id
int64
0
3.29k
file_name
stringlengths
4
37
programming_language
stringclasses
2 values
method_name
stringlengths
3
112
code_before
stringlengths
701
809k
code_after
stringlengths
701
809k
func_before
stringlengths
40
60.4k
func_after
stringlengths
43
61.2k
diff
stringlengths
67
133k
num_lines_added
int64
1
1.49k
num_lines_deleted
int64
1
1.13k
num_lines_in_file
float64
23
18.6k
num_tokens_in_file
float64
129
172k
num_lines_in_method
int64
1
259
num_tokens_in_method
int64
10
1.29k
method_complexity
int64
1
110
repo
stringclasses
267 values
cve_id
stringlengths
13
16
cwe_id
stringclasses
8 values
2,354
print-pim.c
C
pim_print
/* * Copyright (c) 1995, 1996 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: Protocol Independent Multicast (PIM) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "ip.h" #include "ip6.h" #include "ipproto.h" #define PIMV1_TYPE_QUERY 0 #define PIMV1_TYPE_REGISTER 1 #define PIMV1_TYPE_REGISTER_STOP 2 #define PIMV1_TYPE_JOIN_PRUNE 3 #define PIMV1_TYPE_RP_REACHABILITY 4 #define PIMV1_TYPE_ASSERT 5 #define PIMV1_TYPE_GRAFT 6 #define PIMV1_TYPE_GRAFT_ACK 7 static const struct tok pimv1_type_str[] = { { PIMV1_TYPE_QUERY, "Query" }, { PIMV1_TYPE_REGISTER, "Register" }, { PIMV1_TYPE_REGISTER_STOP, "Register-Stop" }, { PIMV1_TYPE_JOIN_PRUNE, "Join/Prune" }, { PIMV1_TYPE_RP_REACHABILITY, "RP-reachable" }, { PIMV1_TYPE_ASSERT, "Assert" }, { PIMV1_TYPE_GRAFT, "Graft" }, { PIMV1_TYPE_GRAFT_ACK, "Graft-ACK" }, { 0, NULL } }; #define PIMV2_TYPE_HELLO 0 #define PIMV2_TYPE_REGISTER 1 #define PIMV2_TYPE_REGISTER_STOP 2 #define PIMV2_TYPE_JOIN_PRUNE 3 #define PIMV2_TYPE_BOOTSTRAP 4 #define PIMV2_TYPE_ASSERT 5 #define PIMV2_TYPE_GRAFT 6 #define PIMV2_TYPE_GRAFT_ACK 7 #define PIMV2_TYPE_CANDIDATE_RP 8 #define PIMV2_TYPE_PRUNE_REFRESH 9 #define PIMV2_TYPE_DF_ELECTION 10 #define PIMV2_TYPE_ECMP_REDIRECT 11 static const struct tok pimv2_type_values[] = { { PIMV2_TYPE_HELLO, "Hello" }, { PIMV2_TYPE_REGISTER, "Register" }, { PIMV2_TYPE_REGISTER_STOP, "Register Stop" }, { PIMV2_TYPE_JOIN_PRUNE, "Join / Prune" }, { PIMV2_TYPE_BOOTSTRAP, "Bootstrap" }, { PIMV2_TYPE_ASSERT, "Assert" }, { PIMV2_TYPE_GRAFT, "Graft" }, { PIMV2_TYPE_GRAFT_ACK, "Graft Acknowledgement" }, { PIMV2_TYPE_CANDIDATE_RP, "Candidate RP Advertisement" }, { PIMV2_TYPE_PRUNE_REFRESH, "Prune Refresh" }, { PIMV2_TYPE_DF_ELECTION, "DF Election" }, { PIMV2_TYPE_ECMP_REDIRECT, "ECMP Redirect" }, { 0, NULL} }; #define PIMV2_HELLO_OPTION_HOLDTIME 1 #define PIMV2_HELLO_OPTION_LANPRUNEDELAY 2 #define PIMV2_HELLO_OPTION_DR_PRIORITY_OLD 18 #define PIMV2_HELLO_OPTION_DR_PRIORITY 19 #define PIMV2_HELLO_OPTION_GENID 20 #define PIMV2_HELLO_OPTION_REFRESH_CAP 21 #define PIMV2_HELLO_OPTION_BIDIR_CAP 22 #define PIMV2_HELLO_OPTION_ADDRESS_LIST 24 #define PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD 65001 static const struct tok pimv2_hello_option_values[] = { { PIMV2_HELLO_OPTION_HOLDTIME, "Hold Time" }, { PIMV2_HELLO_OPTION_LANPRUNEDELAY, "LAN Prune Delay" }, { PIMV2_HELLO_OPTION_DR_PRIORITY_OLD, "DR Priority (Old)" }, { PIMV2_HELLO_OPTION_DR_PRIORITY, "DR Priority" }, { PIMV2_HELLO_OPTION_GENID, "Generation ID" }, { PIMV2_HELLO_OPTION_REFRESH_CAP, "State Refresh Capability" }, { PIMV2_HELLO_OPTION_BIDIR_CAP, "Bi-Directional Capability" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST, "Address List" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD, "Address List (Old)" }, { 0, NULL} }; #define PIMV2_REGISTER_FLAG_LEN 4 #define PIMV2_REGISTER_FLAG_BORDER 0x80000000 #define PIMV2_REGISTER_FLAG_NULL 0x40000000 static const struct tok pimv2_register_flag_values[] = { { PIMV2_REGISTER_FLAG_BORDER, "Border" }, { PIMV2_REGISTER_FLAG_NULL, "Null" }, { 0, NULL} }; /* * XXX: We consider a case where IPv6 is not ready yet for portability, * but PIM dependent defintions should be independent of IPv6... */ struct pim { uint8_t pim_typever; /* upper 4bit: PIM version number; 2 for PIMv2 */ /* lower 4bit: the PIM message type, currently they are: * Hello, Register, Register-Stop, Join/Prune, * Bootstrap, Assert, Graft (PIM-DM only), * Graft-Ack (PIM-DM only), C-RP-Adv */ #define PIM_VER(x) (((x) & 0xf0) >> 4) #define PIM_TYPE(x) ((x) & 0x0f) u_char pim_rsv; /* Reserved */ u_short pim_cksum; /* IP style check sum */ }; static void pimv2_print(netdissect_options *, register const u_char *bp, register u_int len, const u_char *); static void pimv1_join_prune_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int ngroups, njoin, nprune; int njp; /* If it's a single group and a single source, use 1-line output. */ if (ND_TTEST2(bp[0], 30) && bp[11] == 1 && ((njoin = EXTRACT_16BITS(&bp[20])) + EXTRACT_16BITS(&bp[22])) == 1) { int hold; ND_PRINT((ndo, " RPF %s ", ipaddr_string(ndo, bp))); hold = EXTRACT_16BITS(&bp[6]); if (hold != 180) { ND_PRINT((ndo, "Hold ")); unsigned_relts_print(ndo, hold); } ND_PRINT((ndo, "%s (%s/%d, %s", njoin ? "Join" : "Prune", ipaddr_string(ndo, &bp[26]), bp[25] & 0x3f, ipaddr_string(ndo, &bp[12]))); if (EXTRACT_32BITS(&bp[16]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[16]))); ND_PRINT((ndo, ") %s%s %s", (bp[24] & 0x01) ? "Sparse" : "Dense", (bp[25] & 0x80) ? " WC" : "", (bp[25] & 0x40) ? "RP" : "SPT")); return; } ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Upstream Nbr: %s", ipaddr_string(ndo, bp))); ND_TCHECK2(bp[6], 2); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Hold time: ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[6])); if (ndo->ndo_vflag < 2) return; bp += 8; len -= 8; ND_TCHECK2(bp[0], 4); ngroups = bp[3]; bp += 4; len -= 4; while (ngroups--) { /* * XXX - does the address have length "addrlen" and the * mask length "maddrlen"? */ ND_TCHECK2(bp[0], sizeof(struct in_addr)); ND_PRINT((ndo, "\n\tGroup: %s", ipaddr_string(ndo, bp))); ND_TCHECK2(bp[4], sizeof(struct in_addr)); if (EXTRACT_32BITS(&bp[4]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[4]))); ND_TCHECK2(bp[8], 4); njoin = EXTRACT_16BITS(&bp[8]); nprune = EXTRACT_16BITS(&bp[10]); ND_PRINT((ndo, " joined: %d pruned: %d", njoin, nprune)); bp += 12; len -= 12; for (njp = 0; njp < (njoin + nprune); njp++) { const char *type; if (njp < njoin) type = "Join "; else type = "Prune"; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "\n\t%s %s%s%s%s/%d", type, (bp[0] & 0x01) ? "Sparse " : "Dense ", (bp[1] & 0x80) ? "WC " : "", (bp[1] & 0x40) ? "RP " : "SPT ", ipaddr_string(ndo, &bp[2]), bp[1] & 0x3f)); bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|pim]")); return; } void pimv1_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { register const u_char *ep; register u_char type; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; ND_TCHECK(bp[1]); type = bp[1]; ND_PRINT((ndo, " %s", tok2str(pimv1_type_str, "[type %u]", type))); switch (type) { case PIMV1_TYPE_QUERY: if (ND_TTEST(bp[8])) { switch (bp[8] >> 4) { case 0: ND_PRINT((ndo, " Dense-mode")); break; case 1: ND_PRINT((ndo, " Sparse-mode")); break; case 2: ND_PRINT((ndo, " Sparse-Dense-mode")); break; default: ND_PRINT((ndo, " mode-%d", bp[8] >> 4)); break; } } if (ndo->ndo_vflag) { ND_TCHECK2(bp[10],2); ND_PRINT((ndo, " (Hold-time ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[10])); ND_PRINT((ndo, ")")); } break; case PIMV1_TYPE_REGISTER: ND_TCHECK2(bp[8], 20); /* ip header */ ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[20]), ipaddr_string(ndo, &bp[24]))); break; case PIMV1_TYPE_REGISTER_STOP: ND_TCHECK2(bp[12], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[8]), ipaddr_string(ndo, &bp[12]))); break; case PIMV1_TYPE_RP_REACHABILITY: if (ndo->ndo_vflag) { ND_TCHECK2(bp[22], 2); ND_PRINT((ndo, " group %s", ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_PRINT((ndo, " RP %s hold ", ipaddr_string(ndo, &bp[16]))); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[22])); } break; case PIMV1_TYPE_ASSERT: ND_TCHECK2(bp[16], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[16]), ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_TCHECK2(bp[24], 4); ND_PRINT((ndo, " %s pref %d metric %d", (bp[20] & 0x80) ? "RP-tree" : "SPT", EXTRACT_32BITS(&bp[20]) & 0x7fffffff, EXTRACT_32BITS(&bp[24]))); break; case PIMV1_TYPE_JOIN_PRUNE: case PIMV1_TYPE_GRAFT: case PIMV1_TYPE_GRAFT_ACK: if (ndo->ndo_vflag) pimv1_join_prune_print(ndo, &bp[8], len - 8); break; } ND_TCHECK(bp[4]); if ((bp[4] >> 4) != 1) ND_PRINT((ndo, " [v%d]", bp[4] >> 4)); return; trunc: ND_PRINT((ndo, "[|pim]")); return; } /* * auto-RP is a cisco protocol, documented at * ftp://ftpeng.cisco.com/ipmulticast/specs/pim-autorp-spec01.txt * * This implements version 1+, dated Sept 9, 1998. */ void cisco_autorp_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int type; int numrps; int hold; ND_TCHECK(bp[0]); ND_PRINT((ndo, " auto-rp ")); type = bp[0]; switch (type) { case 0x11: ND_PRINT((ndo, "candidate-advert")); break; case 0x12: ND_PRINT((ndo, "mapping")); break; default: ND_PRINT((ndo, "type-0x%02x", type)); break; } ND_TCHECK(bp[1]); numrps = bp[1]; ND_TCHECK2(bp[2], 2); ND_PRINT((ndo, " Hold ")); hold = EXTRACT_16BITS(&bp[2]); if (hold) unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); else ND_PRINT((ndo, "FOREVER")); /* Next 4 bytes are reserved. */ bp += 8; len -= 8; /*XXX skip unless -v? */ /* * Rest of packet: * numrps entries of the form: * 32 bits: RP * 6 bits: reserved * 2 bits: PIM version supported, bit 0 is "supports v1", 1 is "v2". * 8 bits: # of entries for this RP * each entry: 7 bits: reserved, 1 bit: negative, * 8 bits: mask 32 bits: source * lather, rinse, repeat. */ while (numrps--) { int nentries; char s; ND_TCHECK2(bp[0], 4); ND_PRINT((ndo, " RP %s", ipaddr_string(ndo, bp))); ND_TCHECK(bp[4]); switch (bp[4] & 0x3) { case 0: ND_PRINT((ndo, " PIMv?")); break; case 1: ND_PRINT((ndo, " PIMv1")); break; case 2: ND_PRINT((ndo, " PIMv2")); break; case 3: ND_PRINT((ndo, " PIMv1+2")); break; } if (bp[4] & 0xfc) ND_PRINT((ndo, " [rsvd=0x%02x]", bp[4] & 0xfc)); ND_TCHECK(bp[5]); nentries = bp[5]; bp += 6; len -= 6; s = ' '; for (; nentries; nentries--) { ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "%c%s%s/%d", s, bp[0] & 1 ? "!" : "", ipaddr_string(ndo, &bp[2]), bp[1])); if (bp[0] & 0x02) { ND_PRINT((ndo, " bidir")); } if (bp[0] & 0xfc) { ND_PRINT((ndo, "[rsvd=0x%02x]", bp[0] & 0xfc)); } s = ','; bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|autorp]")); return; } void pim_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; #ifdef notyet /* currently we see only version and type */ ND_TCHECK(pim->pim_rsv); #endif switch (PIM_VER(pim->pim_typever)) { case 2: if (!ndo->ndo_vflag) { ND_PRINT((ndo, "PIMv%u, %s, length %u", PIM_VER(pim->pim_typever), tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)), len)); return; } else { ND_PRINT((ndo, "PIMv%u, length %u\n\t%s", PIM_VER(pim->pim_typever), len, tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)))); pimv2_print(ndo, bp, len, bp2); } break; default: ND_PRINT((ndo, "PIMv%u, length %u", PIM_VER(pim->pim_typever), len)); break; } return; } /* * PIMv2 uses encoded address representations. * * The last PIM-SM I-D before RFC2117 was published specified the * following representation for unicast addresses. However, RFC2117 * specified no encoding for unicast addresses with the unicast * address length specified in the header. Therefore, we have to * guess which encoding is being used (Cisco's PIMv2 implementation * uses the non-RFC encoding). RFC2117 turns a previously "Reserved" * field into a 'unicast-address-length-in-bytes' field. We guess * that it's the draft encoding if this reserved field is zero. * * RFC2362 goes back to the encoded format, and calls the addr length * field "reserved" again. * * The first byte is the address family, from: * * 0 Reserved * 1 IP (IP version 4) * 2 IP6 (IP version 6) * 3 NSAP * 4 HDLC (8-bit multidrop) * 5 BBN 1822 * 6 802 (includes all 802 media plus Ethernet "canonical format") * 7 E.163 * 8 E.164 (SMDS, Frame Relay, ATM) * 9 F.69 (Telex) * 10 X.121 (X.25, Frame Relay) * 11 IPX * 12 Appletalk * 13 Decnet IV * 14 Banyan Vines * 15 E.164 with NSAP format subaddress * * In addition, the second byte is an "Encoding". 0 is the default * encoding for the address family, and no other encodings are currently * specified. * */ static int pimv2_addr_len; enum pimv2_addrtype { pimv2_unicast, pimv2_group, pimv2_source }; /* 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Unicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+++++++ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Reserved | Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Group multicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Rsrvd |S|W|R| Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Source Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ static int pimv2_addr_print(netdissect_options *ndo, const u_char *bp, enum pimv2_addrtype at, int silent) { int af; int len, hdrlen; ND_TCHECK(bp[0]); if (pimv2_addr_len == 0) { ND_TCHECK(bp[1]); switch (bp[0]) { case 1: af = AF_INET; len = sizeof(struct in_addr); break; case 2: af = AF_INET6; len = sizeof(struct in6_addr); break; default: return -1; } if (bp[1] != 0) return -1; hdrlen = 2; } else { switch (pimv2_addr_len) { case sizeof(struct in_addr): af = AF_INET; break; case sizeof(struct in6_addr): af = AF_INET6; break; default: return -1; break; } len = pimv2_addr_len; hdrlen = 0; } bp += hdrlen; switch (at) { case pimv2_unicast: ND_TCHECK2(bp[0], len); if (af == AF_INET) { if (!silent) ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp))); } else if (af == AF_INET6) { if (!silent) ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp))); } return hdrlen + len; case pimv2_group: case pimv2_source: ND_TCHECK2(bp[0], len + 2); if (af == AF_INET) { if (!silent) { ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp + 2))); if (bp[1] != 32) ND_PRINT((ndo, "/%u", bp[1])); } } else if (af == AF_INET6) { if (!silent) { ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp + 2))); if (bp[1] != 128) ND_PRINT((ndo, "/%u", bp[1])); } } if (bp[0] && !silent) { if (at == pimv2_group) { ND_PRINT((ndo, "(0x%02x)", bp[0])); } else { ND_PRINT((ndo, "(%s%s%s", bp[0] & 0x04 ? "S" : "", bp[0] & 0x02 ? "W" : "", bp[0] & 0x01 ? "R" : "")); if (bp[0] & 0xf8) { ND_PRINT((ndo, "+0x%02x", bp[0] & 0xf8)); } ND_PRINT((ndo, ")")); } } return hdrlen + 2 + len; default: return -1; } trunc: return -1; } enum checksum_status { CORRECT, INCORRECT, UNVERIFIED }; static enum checksum_status pimv2_check_checksum(netdissect_options *ndo, const u_char *bp, const u_char *bp2, u_int len) { const struct ip *ip; u_int cksum; if (!ND_TTEST2(bp[0], len)) { /* We don't have all the data. */ return (UNVERIFIED); } ip = (const struct ip *)bp2; if (IP_V(ip) == 4) { struct cksum_vec vec[1]; vec[0].ptr = bp; vec[0].len = len; cksum = in_cksum(vec, 1); return (cksum ? INCORRECT : CORRECT); } else if (IP_V(ip) == 6) { const struct ip6_hdr *ip6; ip6 = (const struct ip6_hdr *)bp2; cksum = nextproto6_cksum(ndo, ip6, bp, len, len, IPPROTO_PIM); return (cksum ? INCORRECT : CORRECT); } else { return (UNVERIFIED); } } static void pimv2_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; int advance; enum checksum_status cksum_status; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; if (ep > bp + len) ep = bp + len; ND_TCHECK(pim->pim_rsv); pimv2_addr_len = pim->pim_rsv; if (pimv2_addr_len != 0) ND_PRINT((ndo, ", RFC2117-encoding")); ND_PRINT((ndo, ", cksum 0x%04x ", EXTRACT_16BITS(&pim->pim_cksum))); if (EXTRACT_16BITS(&pim->pim_cksum) == 0) { ND_PRINT((ndo, "(unverified)")); } else { if (PIM_TYPE(pim->pim_typever) == PIMV2_TYPE_REGISTER) { /* * The checksum only covers the packet header, * not the encapsulated packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, 8); if (cksum_status == INCORRECT) { /* * To quote RFC 4601, "For interoperability * reasons, a message carrying a checksum * calculated over the entire PIM Register * message should also be accepted." */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } } else { /* * The checksum covers the entire packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } switch (cksum_status) { case CORRECT: ND_PRINT((ndo, "(correct)")); break; case INCORRECT: ND_PRINT((ndo, "(incorrect)")); break; case UNVERIFIED: ND_PRINT((ndo, "(unverified)")); break; } } switch (PIM_TYPE(pim->pim_typever)) { case PIMV2_TYPE_HELLO: { uint16_t otype, olen; bp += 4; while (bp < ep) { ND_TCHECK2(bp[0], 4); otype = EXTRACT_16BITS(&bp[0]); olen = EXTRACT_16BITS(&bp[2]); ND_TCHECK2(bp[0], 4 + olen); ND_PRINT((ndo, "\n\t %s Option (%u), length %u, Value: ", tok2str(pimv2_hello_option_values, "Unknown", otype), otype, olen)); bp += 4; switch (otype) { case PIMV2_HELLO_OPTION_HOLDTIME: if (olen != 2) { ND_PRINT((ndo, "ERROR: Option Length != 2 Bytes (%u)", olen)); } else { unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); } break; case PIMV2_HELLO_OPTION_LANPRUNEDELAY: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { char t_bit; uint16_t lan_delay, override_interval; lan_delay = EXTRACT_16BITS(bp); override_interval = EXTRACT_16BITS(bp+2); t_bit = (lan_delay & 0x8000)? 1 : 0; lan_delay &= ~0x8000; ND_PRINT((ndo, "\n\t T-bit=%d, LAN delay %dms, Override interval %dms", t_bit, lan_delay, override_interval)); } break; case PIMV2_HELLO_OPTION_DR_PRIORITY_OLD: case PIMV2_HELLO_OPTION_DR_PRIORITY: switch (olen) { case 0: ND_PRINT((ndo, "Bi-Directional Capability (Old)")); break; case 4: ND_PRINT((ndo, "%u", EXTRACT_32BITS(bp))); break; default: ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); break; } break; case PIMV2_HELLO_OPTION_GENID: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "0x%08x", EXTRACT_32BITS(bp))); } break; case PIMV2_HELLO_OPTION_REFRESH_CAP: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "v%d", *bp)); if (*(bp+1) != 0) { ND_PRINT((ndo, ", interval ")); unsigned_relts_print(ndo, *(bp+1)); } if (EXTRACT_16BITS(bp+2) != 0) { ND_PRINT((ndo, " ?0x%04x?", EXTRACT_16BITS(bp+2))); } } break; case PIMV2_HELLO_OPTION_BIDIR_CAP: break; case PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD: case PIMV2_HELLO_OPTION_ADDRESS_LIST: if (ndo->ndo_vflag > 1) { const u_char *ptr = bp; while (ptr < (bp+olen)) { ND_PRINT((ndo, "\n\t ")); advance = pimv2_addr_print(ndo, ptr, pimv2_unicast, 0); if (advance < 0) { ND_PRINT((ndo, "...")); break; } ptr += advance; } } break; default: if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, bp, "\n\t ", olen); break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag> 1) print_unknown_data(ndo, bp, "\n\t ", olen); bp += olen; } break; } case PIMV2_TYPE_REGISTER: { const struct ip *ip; ND_TCHECK2(*(bp + 4), PIMV2_REGISTER_FLAG_LEN); ND_PRINT((ndo, ", Flags [ %s ]\n\t", tok2str(pimv2_register_flag_values, "none", EXTRACT_32BITS(bp+4)))); bp += 8; len -= 8; /* encapsulated multicast packet */ ip = (const struct ip *)bp; switch (IP_V(ip)) { case 0: /* Null header */ ND_PRINT((ndo, "IP-Null-header %s > %s", ipaddr_string(ndo, &ip->ip_src), ipaddr_string(ndo, &ip->ip_dst))); break; case 4: /* IPv4 */ ip_print(ndo, bp, len); break; case 6: /* IPv6 */ ip6_print(ndo, bp, len); break; default: ND_PRINT((ndo, "IP ver %d", IP_V(ip))); break; } break; } case PIMV2_TYPE_REGISTER_STOP: bp += 4; len -= 4; if (bp >= ep) break; ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp >= ep) break; ND_PRINT((ndo, " source=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; break; case PIMV2_TYPE_JOIN_PRUNE: case PIMV2_TYPE_GRAFT: case PIMV2_TYPE_GRAFT_ACK: /* * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |PIM Ver| Type | Addr length | Checksum | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Unicast-Upstream Neighbor Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Reserved | Num groups | Holdtime | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Number of Joined Sources | Number of Pruned Sources | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ { uint8_t ngroup; uint16_t holdtime; uint16_t njoin; uint16_t nprune; int i, j; bp += 4; len -= 4; if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ if (bp >= ep) break; ND_PRINT((ndo, ", upstream-neighbor: ")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; } if (bp + 4 > ep) break; ngroup = bp[1]; holdtime = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %u group(s)", ngroup)); if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", holdtime: ")); if (holdtime == 0xffff) ND_PRINT((ndo, "infinite")); else unsigned_relts_print(ndo, holdtime); } bp += 4; len -= 4; for (i = 0; i < ngroup; i++) { if (bp >= ep) goto jp_done; ND_PRINT((ndo, "\n\t group #%u: ", i+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; if (bp + 4 > ep) { ND_PRINT((ndo, "...)")); goto jp_done; } njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, ", joined sources: %u, pruned sources: %u", njoin, nprune)); bp += 4; len -= 4; for (j = 0; j < njoin; j++) { ND_PRINT((ndo, "\n\t joined source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; } for (j = 0; j < nprune; j++) { ND_PRINT((ndo, "\n\t pruned source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; } } jp_done: break; } case PIMV2_TYPE_BOOTSTRAP: { int i, j, frpcnt; bp += 4; /* Fragment Tag, Hash Mask len, and BSR-priority */ if (bp + sizeof(uint16_t) >= ep) break; ND_PRINT((ndo, " tag=%x", EXTRACT_16BITS(bp))); bp += sizeof(uint16_t); if (bp >= ep) break; ND_PRINT((ndo, " hashmlen=%d", bp[0])); if (bp + 1 >= ep) break; ND_PRINT((ndo, " BSRprio=%d", bp[1])); bp += 2; /* Encoded-Unicast-BSR-Address */ if (bp >= ep) break; ND_PRINT((ndo, " BSR=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; for (i = 0; bp < ep; i++) { /* Encoded-Group Address */ ND_PRINT((ndo, " (group%d: ", i)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...)")); goto bs_done; } bp += advance; /* RP-Count, Frag RP-Cnt, and rsvd */ if (bp >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, " RPcnt=%d", bp[0])); if (bp + 1 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, " FRPcnt=%d", frpcnt = bp[1])); bp += 4; for (j = 0; j < frpcnt && bp < ep; j++) { /* each RP info */ ND_PRINT((ndo, " RP%d=", j)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...)")); goto bs_done; } bp += advance; if (bp + 1 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, ",holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); if (bp + 2 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, ",prio=%d", bp[2])); bp += 4; } ND_PRINT((ndo, ")")); } bs_done: break; } case PIMV2_TYPE_ASSERT: bp += 4; len -= 4; if (bp >= ep) break; ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp >= ep) break; ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp + 8 > ep) break; if (bp[0] & 0x80) ND_PRINT((ndo, " RPT")); ND_PRINT((ndo, " pref=%u", EXTRACT_32BITS(&bp[0]) & 0x7fffffff)); ND_PRINT((ndo, " metric=%u", EXTRACT_32BITS(&bp[4]))); break; case PIMV2_TYPE_CANDIDATE_RP: { int i, pfxcnt; bp += 4; /* Prefix-Cnt, Priority, and Holdtime */ if (bp >= ep) break; ND_PRINT((ndo, " prefix-cnt=%d", bp[0])); pfxcnt = bp[0]; if (bp + 1 >= ep) break; ND_PRINT((ndo, " prio=%d", bp[1])); if (bp + 3 >= ep) break; ND_PRINT((ndo, " holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); bp += 4; /* Encoded-Unicast-RP-Address */ if (bp >= ep) break; ND_PRINT((ndo, " RP=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; /* Encoded-Group Addresses */ for (i = 0; i < pfxcnt && bp < ep; i++) { ND_PRINT((ndo, " Group%d=", i)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; } break; } case PIMV2_TYPE_PRUNE_REFRESH: ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_PRINT((ndo, " grp=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_PRINT((ndo, " forwarder=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_TCHECK2(bp[0], 2); ND_PRINT((ndo, " TUNR ")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); break; default: ND_PRINT((ndo, " [type %d]", PIM_TYPE(pim->pim_typever))); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
/* * Copyright (c) 1995, 1996 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: Protocol Independent Multicast (PIM) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "ip.h" #include "ip6.h" #include "ipproto.h" #define PIMV1_TYPE_QUERY 0 #define PIMV1_TYPE_REGISTER 1 #define PIMV1_TYPE_REGISTER_STOP 2 #define PIMV1_TYPE_JOIN_PRUNE 3 #define PIMV1_TYPE_RP_REACHABILITY 4 #define PIMV1_TYPE_ASSERT 5 #define PIMV1_TYPE_GRAFT 6 #define PIMV1_TYPE_GRAFT_ACK 7 static const struct tok pimv1_type_str[] = { { PIMV1_TYPE_QUERY, "Query" }, { PIMV1_TYPE_REGISTER, "Register" }, { PIMV1_TYPE_REGISTER_STOP, "Register-Stop" }, { PIMV1_TYPE_JOIN_PRUNE, "Join/Prune" }, { PIMV1_TYPE_RP_REACHABILITY, "RP-reachable" }, { PIMV1_TYPE_ASSERT, "Assert" }, { PIMV1_TYPE_GRAFT, "Graft" }, { PIMV1_TYPE_GRAFT_ACK, "Graft-ACK" }, { 0, NULL } }; #define PIMV2_TYPE_HELLO 0 #define PIMV2_TYPE_REGISTER 1 #define PIMV2_TYPE_REGISTER_STOP 2 #define PIMV2_TYPE_JOIN_PRUNE 3 #define PIMV2_TYPE_BOOTSTRAP 4 #define PIMV2_TYPE_ASSERT 5 #define PIMV2_TYPE_GRAFT 6 #define PIMV2_TYPE_GRAFT_ACK 7 #define PIMV2_TYPE_CANDIDATE_RP 8 #define PIMV2_TYPE_PRUNE_REFRESH 9 #define PIMV2_TYPE_DF_ELECTION 10 #define PIMV2_TYPE_ECMP_REDIRECT 11 static const struct tok pimv2_type_values[] = { { PIMV2_TYPE_HELLO, "Hello" }, { PIMV2_TYPE_REGISTER, "Register" }, { PIMV2_TYPE_REGISTER_STOP, "Register Stop" }, { PIMV2_TYPE_JOIN_PRUNE, "Join / Prune" }, { PIMV2_TYPE_BOOTSTRAP, "Bootstrap" }, { PIMV2_TYPE_ASSERT, "Assert" }, { PIMV2_TYPE_GRAFT, "Graft" }, { PIMV2_TYPE_GRAFT_ACK, "Graft Acknowledgement" }, { PIMV2_TYPE_CANDIDATE_RP, "Candidate RP Advertisement" }, { PIMV2_TYPE_PRUNE_REFRESH, "Prune Refresh" }, { PIMV2_TYPE_DF_ELECTION, "DF Election" }, { PIMV2_TYPE_ECMP_REDIRECT, "ECMP Redirect" }, { 0, NULL} }; #define PIMV2_HELLO_OPTION_HOLDTIME 1 #define PIMV2_HELLO_OPTION_LANPRUNEDELAY 2 #define PIMV2_HELLO_OPTION_DR_PRIORITY_OLD 18 #define PIMV2_HELLO_OPTION_DR_PRIORITY 19 #define PIMV2_HELLO_OPTION_GENID 20 #define PIMV2_HELLO_OPTION_REFRESH_CAP 21 #define PIMV2_HELLO_OPTION_BIDIR_CAP 22 #define PIMV2_HELLO_OPTION_ADDRESS_LIST 24 #define PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD 65001 static const struct tok pimv2_hello_option_values[] = { { PIMV2_HELLO_OPTION_HOLDTIME, "Hold Time" }, { PIMV2_HELLO_OPTION_LANPRUNEDELAY, "LAN Prune Delay" }, { PIMV2_HELLO_OPTION_DR_PRIORITY_OLD, "DR Priority (Old)" }, { PIMV2_HELLO_OPTION_DR_PRIORITY, "DR Priority" }, { PIMV2_HELLO_OPTION_GENID, "Generation ID" }, { PIMV2_HELLO_OPTION_REFRESH_CAP, "State Refresh Capability" }, { PIMV2_HELLO_OPTION_BIDIR_CAP, "Bi-Directional Capability" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST, "Address List" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD, "Address List (Old)" }, { 0, NULL} }; #define PIMV2_REGISTER_FLAG_LEN 4 #define PIMV2_REGISTER_FLAG_BORDER 0x80000000 #define PIMV2_REGISTER_FLAG_NULL 0x40000000 static const struct tok pimv2_register_flag_values[] = { { PIMV2_REGISTER_FLAG_BORDER, "Border" }, { PIMV2_REGISTER_FLAG_NULL, "Null" }, { 0, NULL} }; /* * XXX: We consider a case where IPv6 is not ready yet for portability, * but PIM dependent defintions should be independent of IPv6... */ struct pim { uint8_t pim_typever; /* upper 4bit: PIM version number; 2 for PIMv2 */ /* lower 4bit: the PIM message type, currently they are: * Hello, Register, Register-Stop, Join/Prune, * Bootstrap, Assert, Graft (PIM-DM only), * Graft-Ack (PIM-DM only), C-RP-Adv */ #define PIM_VER(x) (((x) & 0xf0) >> 4) #define PIM_TYPE(x) ((x) & 0x0f) u_char pim_rsv; /* Reserved */ u_short pim_cksum; /* IP style check sum */ }; static void pimv2_print(netdissect_options *, register const u_char *bp, register u_int len, const u_char *); static void pimv1_join_prune_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int ngroups, njoin, nprune; int njp; /* If it's a single group and a single source, use 1-line output. */ if (ND_TTEST2(bp[0], 30) && bp[11] == 1 && ((njoin = EXTRACT_16BITS(&bp[20])) + EXTRACT_16BITS(&bp[22])) == 1) { int hold; ND_PRINT((ndo, " RPF %s ", ipaddr_string(ndo, bp))); hold = EXTRACT_16BITS(&bp[6]); if (hold != 180) { ND_PRINT((ndo, "Hold ")); unsigned_relts_print(ndo, hold); } ND_PRINT((ndo, "%s (%s/%d, %s", njoin ? "Join" : "Prune", ipaddr_string(ndo, &bp[26]), bp[25] & 0x3f, ipaddr_string(ndo, &bp[12]))); if (EXTRACT_32BITS(&bp[16]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[16]))); ND_PRINT((ndo, ") %s%s %s", (bp[24] & 0x01) ? "Sparse" : "Dense", (bp[25] & 0x80) ? " WC" : "", (bp[25] & 0x40) ? "RP" : "SPT")); return; } if (len < sizeof(struct in_addr)) goto trunc; ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Upstream Nbr: %s", ipaddr_string(ndo, bp))); bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[2], 2); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Hold time: ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); if (ndo->ndo_vflag < 2) return; bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); ngroups = bp[3]; bp += 4; len -= 4; while (ngroups--) { /* * XXX - does the address have length "addrlen" and the * mask length "maddrlen"? */ if (len < 4) goto trunc; ND_TCHECK2(bp[0], sizeof(struct in_addr)); ND_PRINT((ndo, "\n\tGroup: %s", ipaddr_string(ndo, bp))); bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (EXTRACT_32BITS(&bp[0]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[0]))); bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, " joined: %d pruned: %d", njoin, nprune)); bp += 4; len -= 4; for (njp = 0; njp < (njoin + nprune); njp++) { const char *type; if (njp < njoin) type = "Join "; else type = "Prune"; if (len < 6) goto trunc; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "\n\t%s %s%s%s%s/%d", type, (bp[0] & 0x01) ? "Sparse " : "Dense ", (bp[1] & 0x80) ? "WC " : "", (bp[1] & 0x40) ? "RP " : "SPT ", ipaddr_string(ndo, &bp[2]), bp[1] & 0x3f)); bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|pim]")); return; } void pimv1_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { register u_char type; ND_TCHECK(bp[1]); type = bp[1]; ND_PRINT((ndo, " %s", tok2str(pimv1_type_str, "[type %u]", type))); switch (type) { case PIMV1_TYPE_QUERY: if (ND_TTEST(bp[8])) { switch (bp[8] >> 4) { case 0: ND_PRINT((ndo, " Dense-mode")); break; case 1: ND_PRINT((ndo, " Sparse-mode")); break; case 2: ND_PRINT((ndo, " Sparse-Dense-mode")); break; default: ND_PRINT((ndo, " mode-%d", bp[8] >> 4)); break; } } if (ndo->ndo_vflag) { ND_TCHECK2(bp[10],2); ND_PRINT((ndo, " (Hold-time ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[10])); ND_PRINT((ndo, ")")); } break; case PIMV1_TYPE_REGISTER: ND_TCHECK2(bp[8], 20); /* ip header */ ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[20]), ipaddr_string(ndo, &bp[24]))); break; case PIMV1_TYPE_REGISTER_STOP: ND_TCHECK2(bp[12], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[8]), ipaddr_string(ndo, &bp[12]))); break; case PIMV1_TYPE_RP_REACHABILITY: if (ndo->ndo_vflag) { ND_TCHECK2(bp[22], 2); ND_PRINT((ndo, " group %s", ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_PRINT((ndo, " RP %s hold ", ipaddr_string(ndo, &bp[16]))); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[22])); } break; case PIMV1_TYPE_ASSERT: ND_TCHECK2(bp[16], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[16]), ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_TCHECK2(bp[24], 4); ND_PRINT((ndo, " %s pref %d metric %d", (bp[20] & 0x80) ? "RP-tree" : "SPT", EXTRACT_32BITS(&bp[20]) & 0x7fffffff, EXTRACT_32BITS(&bp[24]))); break; case PIMV1_TYPE_JOIN_PRUNE: case PIMV1_TYPE_GRAFT: case PIMV1_TYPE_GRAFT_ACK: if (ndo->ndo_vflag) { if (len < 8) goto trunc; pimv1_join_prune_print(ndo, &bp[8], len - 8); } break; } ND_TCHECK(bp[4]); if ((bp[4] >> 4) != 1) ND_PRINT((ndo, " [v%d]", bp[4] >> 4)); return; trunc: ND_PRINT((ndo, "[|pim]")); return; } /* * auto-RP is a cisco protocol, documented at * ftp://ftpeng.cisco.com/ipmulticast/specs/pim-autorp-spec01.txt * * This implements version 1+, dated Sept 9, 1998. */ void cisco_autorp_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int type; int numrps; int hold; if (len < 8) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " auto-rp ")); type = bp[0]; switch (type) { case 0x11: ND_PRINT((ndo, "candidate-advert")); break; case 0x12: ND_PRINT((ndo, "mapping")); break; default: ND_PRINT((ndo, "type-0x%02x", type)); break; } ND_TCHECK(bp[1]); numrps = bp[1]; ND_TCHECK2(bp[2], 2); ND_PRINT((ndo, " Hold ")); hold = EXTRACT_16BITS(&bp[2]); if (hold) unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); else ND_PRINT((ndo, "FOREVER")); /* Next 4 bytes are reserved. */ bp += 8; len -= 8; /*XXX skip unless -v? */ /* * Rest of packet: * numrps entries of the form: * 32 bits: RP * 6 bits: reserved * 2 bits: PIM version supported, bit 0 is "supports v1", 1 is "v2". * 8 bits: # of entries for this RP * each entry: 7 bits: reserved, 1 bit: negative, * 8 bits: mask 32 bits: source * lather, rinse, repeat. */ while (numrps--) { int nentries; char s; if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); ND_PRINT((ndo, " RP %s", ipaddr_string(ndo, bp))); bp += 4; len -= 4; if (len < 1) goto trunc; ND_TCHECK(bp[0]); switch (bp[0] & 0x3) { case 0: ND_PRINT((ndo, " PIMv?")); break; case 1: ND_PRINT((ndo, " PIMv1")); break; case 2: ND_PRINT((ndo, " PIMv2")); break; case 3: ND_PRINT((ndo, " PIMv1+2")); break; } if (bp[0] & 0xfc) ND_PRINT((ndo, " [rsvd=0x%02x]", bp[0] & 0xfc)); bp += 1; len -= 1; if (len < 1) goto trunc; ND_TCHECK(bp[0]); nentries = bp[0]; bp += 1; len -= 1; s = ' '; for (; nentries; nentries--) { if (len < 6) goto trunc; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "%c%s%s/%d", s, bp[0] & 1 ? "!" : "", ipaddr_string(ndo, &bp[2]), bp[1])); if (bp[0] & 0x02) { ND_PRINT((ndo, " bidir")); } if (bp[0] & 0xfc) { ND_PRINT((ndo, "[rsvd=0x%02x]", bp[0] & 0xfc)); } s = ','; bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|autorp]")); return; } void pim_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const struct pim *pim = (const struct pim *)bp; #ifdef notyet /* currently we see only version and type */ ND_TCHECK(pim->pim_rsv); #endif ND_TCHECK(pim->pim_typever); switch (PIM_VER(pim->pim_typever)) { case 2: if (!ndo->ndo_vflag) { ND_PRINT((ndo, "PIMv%u, %s, length %u", PIM_VER(pim->pim_typever), tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)), len)); return; } else { ND_PRINT((ndo, "PIMv%u, length %u\n\t%s", PIM_VER(pim->pim_typever), len, tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)))); pimv2_print(ndo, bp, len, bp2); } break; default: ND_PRINT((ndo, "PIMv%u, length %u", PIM_VER(pim->pim_typever), len)); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); return; } /* * PIMv2 uses encoded address representations. * * The last PIM-SM I-D before RFC2117 was published specified the * following representation for unicast addresses. However, RFC2117 * specified no encoding for unicast addresses with the unicast * address length specified in the header. Therefore, we have to * guess which encoding is being used (Cisco's PIMv2 implementation * uses the non-RFC encoding). RFC2117 turns a previously "Reserved" * field into a 'unicast-address-length-in-bytes' field. We guess * that it's the draft encoding if this reserved field is zero. * * RFC2362 goes back to the encoded format, and calls the addr length * field "reserved" again. * * The first byte is the address family, from: * * 0 Reserved * 1 IP (IP version 4) * 2 IP6 (IP version 6) * 3 NSAP * 4 HDLC (8-bit multidrop) * 5 BBN 1822 * 6 802 (includes all 802 media plus Ethernet "canonical format") * 7 E.163 * 8 E.164 (SMDS, Frame Relay, ATM) * 9 F.69 (Telex) * 10 X.121 (X.25, Frame Relay) * 11 IPX * 12 Appletalk * 13 Decnet IV * 14 Banyan Vines * 15 E.164 with NSAP format subaddress * * In addition, the second byte is an "Encoding". 0 is the default * encoding for the address family, and no other encodings are currently * specified. * */ enum pimv2_addrtype { pimv2_unicast, pimv2_group, pimv2_source }; /* 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Unicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+++++++ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Reserved | Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Group multicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Rsrvd |S|W|R| Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Source Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ static int pimv2_addr_print(netdissect_options *ndo, const u_char *bp, u_int len, enum pimv2_addrtype at, u_int addr_len, int silent) { int af; int hdrlen; if (addr_len == 0) { if (len < 2) goto trunc; ND_TCHECK(bp[1]); switch (bp[0]) { case 1: af = AF_INET; addr_len = (u_int)sizeof(struct in_addr); break; case 2: af = AF_INET6; addr_len = (u_int)sizeof(struct in6_addr); break; default: return -1; } if (bp[1] != 0) return -1; hdrlen = 2; } else { switch (addr_len) { case sizeof(struct in_addr): af = AF_INET; break; case sizeof(struct in6_addr): af = AF_INET6; break; default: return -1; break; } hdrlen = 0; } bp += hdrlen; len -= hdrlen; switch (at) { case pimv2_unicast: if (len < addr_len) goto trunc; ND_TCHECK2(bp[0], addr_len); if (af == AF_INET) { if (!silent) ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp))); } else if (af == AF_INET6) { if (!silent) ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp))); } return hdrlen + addr_len; case pimv2_group: case pimv2_source: if (len < addr_len + 2) goto trunc; ND_TCHECK2(bp[0], addr_len + 2); if (af == AF_INET) { if (!silent) { ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp + 2))); if (bp[1] != 32) ND_PRINT((ndo, "/%u", bp[1])); } } else if (af == AF_INET6) { if (!silent) { ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp + 2))); if (bp[1] != 128) ND_PRINT((ndo, "/%u", bp[1])); } } if (bp[0] && !silent) { if (at == pimv2_group) { ND_PRINT((ndo, "(0x%02x)", bp[0])); } else { ND_PRINT((ndo, "(%s%s%s", bp[0] & 0x04 ? "S" : "", bp[0] & 0x02 ? "W" : "", bp[0] & 0x01 ? "R" : "")); if (bp[0] & 0xf8) { ND_PRINT((ndo, "+0x%02x", bp[0] & 0xf8)); } ND_PRINT((ndo, ")")); } } return hdrlen + 2 + addr_len; default: return -1; } trunc: return -1; } enum checksum_status { CORRECT, INCORRECT, UNVERIFIED }; static enum checksum_status pimv2_check_checksum(netdissect_options *ndo, const u_char *bp, const u_char *bp2, u_int len) { const struct ip *ip; u_int cksum; if (!ND_TTEST2(bp[0], len)) { /* We don't have all the data. */ return (UNVERIFIED); } ip = (const struct ip *)bp2; if (IP_V(ip) == 4) { struct cksum_vec vec[1]; vec[0].ptr = bp; vec[0].len = len; cksum = in_cksum(vec, 1); return (cksum ? INCORRECT : CORRECT); } else if (IP_V(ip) == 6) { const struct ip6_hdr *ip6; ip6 = (const struct ip6_hdr *)bp2; cksum = nextproto6_cksum(ndo, ip6, bp, len, len, IPPROTO_PIM); return (cksum ? INCORRECT : CORRECT); } else { return (UNVERIFIED); } } static void pimv2_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; int advance; enum checksum_status cksum_status; int pimv2_addr_len; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; if (ep > bp + len) ep = bp + len; if (len < 2) goto trunc; ND_TCHECK(pim->pim_rsv); pimv2_addr_len = pim->pim_rsv; if (pimv2_addr_len != 0) ND_PRINT((ndo, ", RFC2117-encoding")); if (len < 4) goto trunc; ND_TCHECK(pim->pim_cksum); ND_PRINT((ndo, ", cksum 0x%04x ", EXTRACT_16BITS(&pim->pim_cksum))); if (EXTRACT_16BITS(&pim->pim_cksum) == 0) { ND_PRINT((ndo, "(unverified)")); } else { if (PIM_TYPE(pim->pim_typever) == PIMV2_TYPE_REGISTER) { /* * The checksum only covers the packet header, * not the encapsulated packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, 8); if (cksum_status == INCORRECT) { /* * To quote RFC 4601, "For interoperability * reasons, a message carrying a checksum * calculated over the entire PIM Register * message should also be accepted." */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } } else { /* * The checksum covers the entire packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } switch (cksum_status) { case CORRECT: ND_PRINT((ndo, "(correct)")); break; case INCORRECT: ND_PRINT((ndo, "(incorrect)")); break; case UNVERIFIED: ND_PRINT((ndo, "(unverified)")); break; } } bp += 4; len -= 4; switch (PIM_TYPE(pim->pim_typever)) { case PIMV2_TYPE_HELLO: { uint16_t otype, olen; while (len > 0) { if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); otype = EXTRACT_16BITS(&bp[0]); olen = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %s Option (%u), length %u, Value: ", tok2str(pimv2_hello_option_values, "Unknown", otype), otype, olen)); bp += 4; len -= 4; if (len < olen) goto trunc; ND_TCHECK2(bp[0], olen); switch (otype) { case PIMV2_HELLO_OPTION_HOLDTIME: if (olen != 2) { ND_PRINT((ndo, "ERROR: Option Length != 2 Bytes (%u)", olen)); } else { unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); } break; case PIMV2_HELLO_OPTION_LANPRUNEDELAY: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { char t_bit; uint16_t lan_delay, override_interval; lan_delay = EXTRACT_16BITS(bp); override_interval = EXTRACT_16BITS(bp+2); t_bit = (lan_delay & 0x8000)? 1 : 0; lan_delay &= ~0x8000; ND_PRINT((ndo, "\n\t T-bit=%d, LAN delay %dms, Override interval %dms", t_bit, lan_delay, override_interval)); } break; case PIMV2_HELLO_OPTION_DR_PRIORITY_OLD: case PIMV2_HELLO_OPTION_DR_PRIORITY: switch (olen) { case 0: ND_PRINT((ndo, "Bi-Directional Capability (Old)")); break; case 4: ND_PRINT((ndo, "%u", EXTRACT_32BITS(bp))); break; default: ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); break; } break; case PIMV2_HELLO_OPTION_GENID: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "0x%08x", EXTRACT_32BITS(bp))); } break; case PIMV2_HELLO_OPTION_REFRESH_CAP: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "v%d", *bp)); if (*(bp+1) != 0) { ND_PRINT((ndo, ", interval ")); unsigned_relts_print(ndo, *(bp+1)); } if (EXTRACT_16BITS(bp+2) != 0) { ND_PRINT((ndo, " ?0x%04x?", EXTRACT_16BITS(bp+2))); } } break; case PIMV2_HELLO_OPTION_BIDIR_CAP: break; case PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD: case PIMV2_HELLO_OPTION_ADDRESS_LIST: if (ndo->ndo_vflag > 1) { const u_char *ptr = bp; u_int plen = len; while (ptr < (bp+olen)) { ND_PRINT((ndo, "\n\t ")); advance = pimv2_addr_print(ndo, ptr, plen, pimv2_unicast, pimv2_addr_len, 0); if (advance < 0) goto trunc; ptr += advance; plen -= advance; } } break; default: if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, bp, "\n\t ", olen); break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag> 1) print_unknown_data(ndo, bp, "\n\t ", olen); bp += olen; len -= olen; } break; } case PIMV2_TYPE_REGISTER: { const struct ip *ip; if (len < 4) goto trunc; ND_TCHECK2(*bp, PIMV2_REGISTER_FLAG_LEN); ND_PRINT((ndo, ", Flags [ %s ]\n\t", tok2str(pimv2_register_flag_values, "none", EXTRACT_32BITS(bp)))); bp += 4; len -= 4; /* encapsulated multicast packet */ if (len == 0) goto trunc; ip = (const struct ip *)bp; ND_TCHECK(ip->ip_vhl); switch (IP_V(ip)) { case 0: /* Null header */ ND_TCHECK(ip->ip_dst); ND_PRINT((ndo, "IP-Null-header %s > %s", ipaddr_string(ndo, &ip->ip_src), ipaddr_string(ndo, &ip->ip_dst))); break; case 4: /* IPv4 */ ip_print(ndo, bp, len); break; case 6: /* IPv6 */ ip6_print(ndo, bp, len); break; default: ND_PRINT((ndo, "IP ver %d", IP_V(ip))); break; } break; } case PIMV2_TYPE_REGISTER_STOP: ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " source=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; break; case PIMV2_TYPE_JOIN_PRUNE: case PIMV2_TYPE_GRAFT: case PIMV2_TYPE_GRAFT_ACK: /* * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |PIM Ver| Type | Addr length | Checksum | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Unicast-Upstream Neighbor Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Reserved | Num groups | Holdtime | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Number of Joined Sources | Number of Pruned Sources | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ { uint8_t ngroup; uint16_t holdtime; uint16_t njoin; uint16_t nprune; int i, j; if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", upstream-neighbor: ")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } if (len < 4) goto trunc; ND_TCHECK2(*bp, 4); ngroup = bp[1]; holdtime = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %u group(s)", ngroup)); if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", holdtime: ")); if (holdtime == 0xffff) ND_PRINT((ndo, "infinite")); else unsigned_relts_print(ndo, holdtime); } bp += 4; len -= 4; for (i = 0; i < ngroup; i++) { ND_PRINT((ndo, "\n\t group #%u: ", i+1)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 4) goto trunc; ND_TCHECK2(*bp, 4); njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, ", joined sources: %u, pruned sources: %u", njoin, nprune)); bp += 4; len -= 4; for (j = 0; j < njoin; j++) { ND_PRINT((ndo, "\n\t joined source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } for (j = 0; j < nprune; j++) { ND_PRINT((ndo, "\n\t pruned source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } } break; } case PIMV2_TYPE_BOOTSTRAP: { int i, j, frpcnt; /* Fragment Tag, Hash Mask len, and BSR-priority */ if (len < 2) goto trunc; ND_TCHECK_16BITS(bp); ND_PRINT((ndo, " tag=%x", EXTRACT_16BITS(bp))); bp += 2; len -= 2; if (len < 1) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " hashmlen=%d", bp[0])); if (len < 2) goto trunc; ND_TCHECK(bp[2]); ND_PRINT((ndo, " BSRprio=%d", bp[1])); bp += 2; len -= 2; /* Encoded-Unicast-BSR-Address */ ND_PRINT((ndo, " BSR=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; for (i = 0; bp < ep; i++) { /* Encoded-Group Address */ ND_PRINT((ndo, " (group%d: ", i)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; /* RP-Count, Frag RP-Cnt, and rsvd */ if (len < 1) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " RPcnt=%d", bp[0])); if (len < 2) goto trunc; ND_TCHECK(bp[1]); ND_PRINT((ndo, " FRPcnt=%d", frpcnt = bp[1])); if (len < 4) goto trunc; bp += 4; len -= 4; for (j = 0; j < frpcnt && bp < ep; j++) { /* each RP info */ ND_PRINT((ndo, " RP%d=", j)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 2) goto trunc; ND_TCHECK_16BITS(bp); ND_PRINT((ndo, ",holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); if (len < 3) goto trunc; ND_TCHECK(bp[2]); ND_PRINT((ndo, ",prio=%d", bp[2])); if (len < 4) goto trunc; bp += 4; len -= 4; } ND_PRINT((ndo, ")")); } break; } case PIMV2_TYPE_ASSERT: ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 8) goto trunc; ND_TCHECK2(*bp, 8); if (bp[0] & 0x80) ND_PRINT((ndo, " RPT")); ND_PRINT((ndo, " pref=%u", EXTRACT_32BITS(&bp[0]) & 0x7fffffff)); ND_PRINT((ndo, " metric=%u", EXTRACT_32BITS(&bp[4]))); break; case PIMV2_TYPE_CANDIDATE_RP: { int i, pfxcnt; /* Prefix-Cnt, Priority, and Holdtime */ if (len < 1) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " prefix-cnt=%d", bp[0])); pfxcnt = bp[0]; if (len < 2) goto trunc; ND_TCHECK(bp[1]); ND_PRINT((ndo, " prio=%d", bp[1])); if (len < 4) goto trunc; ND_TCHECK_16BITS(&bp[2]); ND_PRINT((ndo, " holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); bp += 4; len -= 4; /* Encoded-Unicast-RP-Address */ ND_PRINT((ndo, " RP=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; /* Encoded-Group Addresses */ for (i = 0; i < pfxcnt && bp < ep; i++) { ND_PRINT((ndo, " Group%d=", i)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } break; } case PIMV2_TYPE_PRUNE_REFRESH: ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " grp=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " forwarder=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 2) goto trunc; ND_TCHECK_16BITS(bp); ND_PRINT((ndo, " TUNR ")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); break; default: ND_PRINT((ndo, " [type %d]", PIM_TYPE(pim->pim_typever))); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
pim_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; #ifdef notyet /* currently we see only version and type */ ND_TCHECK(pim->pim_rsv); #endif switch (PIM_VER(pim->pim_typever)) { case 2: if (!ndo->ndo_vflag) { ND_PRINT((ndo, "PIMv%u, %s, length %u", PIM_VER(pim->pim_typever), tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)), len)); return; } else { ND_PRINT((ndo, "PIMv%u, length %u\n\t%s", PIM_VER(pim->pim_typever), len, tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)))); pimv2_print(ndo, bp, len, bp2); } break; default: ND_PRINT((ndo, "PIMv%u, length %u", PIM_VER(pim->pim_typever), len)); break; } return; }
pim_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const struct pim *pim = (const struct pim *)bp; #ifdef notyet /* currently we see only version and type */ ND_TCHECK(pim->pim_rsv); #endif ND_TCHECK(pim->pim_typever); switch (PIM_VER(pim->pim_typever)) { case 2: if (!ndo->ndo_vflag) { ND_PRINT((ndo, "PIMv%u, %s, length %u", PIM_VER(pim->pim_typever), tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)), len)); return; } else { ND_PRINT((ndo, "PIMv%u, length %u\n\t%s", PIM_VER(pim->pim_typever), len, tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)))); pimv2_print(ndo, bp, len, bp2); } break; default: ND_PRINT((ndo, "PIMv%u, length %u", PIM_VER(pim->pim_typever), len)); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); return; }
{'added': [(172, '\tif (len < sizeof(struct in_addr))'), (173, '\t\tgoto trunc;'), (178, '\tbp += 4;'), (179, '\tlen -= 4;'), (180, '\tif (len < 4)'), (181, '\t\tgoto trunc;'), (182, '\tND_TCHECK2(bp[2], 2);'), (186, '\tunsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2]));'), (189, '\tbp += 4;'), (190, '\tlen -= 4;'), (192, '\tif (len < 4)'), (193, '\t\tgoto trunc;'), (203, '\t\tif (len < 4)'), (204, '\t\t\tgoto trunc;'), (207, '\t\tbp += 4;'), (208, '\t\tlen -= 4;'), (209, '\t\tif (len < 4)'), (210, '\t\t\tgoto trunc;'), (211, '\t\tND_TCHECK2(bp[0], sizeof(struct in_addr));'), (212, '\t\tif (EXTRACT_32BITS(&bp[0]) != 0xffffffff)'), (213, '\t\t\tND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[0])));'), (214, '\t\tbp += 4;'), (215, '\t\tlen -= 4;'), (216, '\t\tif (len < 4)'), (217, '\t\t\tgoto trunc;'), (218, '\t\tND_TCHECK2(bp[0], 4);'), (219, '\t\tnjoin = EXTRACT_16BITS(&bp[0]);'), (220, '\t\tnprune = EXTRACT_16BITS(&bp[2]);'), (222, '\t\tbp += 4;'), (223, '\t\tlen -= 4;'), (231, '\t\t\tif (len < 6)'), (232, '\t\t\t\tgoto trunc;'), (238, '\t\t\t ipaddr_string(ndo, &bp[2]),'), (239, '\t\t\t bp[1] & 0x3f));'), (321, '\t\tif (ndo->ndo_vflag) {'), (322, '\t\t\tif (len < 8)'), (323, '\t\t\t\tgoto trunc;'), (325, '\t\t}'), (352, '\tif (len < 8)'), (353, '\t\tgoto trunc;'), (401, '\t\tif (len < 4)'), (402, '\t\t\tgoto trunc;'), (405, '\t\tbp += 4;'), (406, '\t\tlen -= 4;'), (407, '\t\tif (len < 1)'), (408, '\t\t\tgoto trunc;'), (409, '\t\tND_TCHECK(bp[0]);'), (410, '\t\tswitch (bp[0] & 0x3) {'), (420, '\t\tif (bp[0] & 0xfc)'), (421, '\t\t\tND_PRINT((ndo, " [rsvd=0x%02x]", bp[0] & 0xfc));'), (422, '\t\tbp += 1;'), (423, '\t\tlen -= 1;'), (424, '\t\tif (len < 1)'), (425, '\t\t\tgoto trunc;'), (426, '\t\tND_TCHECK(bp[0]);'), (427, '\t\tnentries = bp[0];'), (428, '\t\tbp += 1;'), (429, '\t\tlen -= 1;'), (432, '\t\t\tif (len < 6)'), (433, '\t\t\t\tgoto trunc;'), (464, '\tND_TCHECK(pim->pim_typever);'), (488, ''), (489, 'trunc:'), (490, '\tND_PRINT((ndo, "[|pim]"));'), (491, '\treturn;'), (560, ' const u_char *bp, u_int len, enum pimv2_addrtype at,'), (561, ' u_int addr_len, int silent)'), (564, '\tint hdrlen;'), (566, '\tif (addr_len == 0) {'), (567, '\t\tif (len < 2)'), (568, '\t\t\tgoto trunc;'), (573, '\t\t\taddr_len = (u_int)sizeof(struct in_addr);'), (577, '\t\t\taddr_len = (u_int)sizeof(struct in6_addr);'), (586, '\t\tswitch (addr_len) {'), (601, '\tlen -= hdrlen;'), (604, '\t\tif (len < addr_len)'), (605, '\t\t\tgoto trunc;'), (606, '\t\tND_TCHECK2(bp[0], addr_len);'), (615, '\t\treturn hdrlen + addr_len;'), (618, '\t\tif (len < addr_len + 2)'), (619, '\t\t\tgoto trunc;'), (620, '\t\tND_TCHECK2(bp[0], addr_len + 2);'), (649, '\t\treturn hdrlen + 2 + addr_len;'), (701, '\tint pimv2_addr_len;'), (708, '\tif (len < 2)'), (709, '\t\tgoto trunc;'), (715, '\tif (len < 4)'), (716, '\t\tgoto trunc;'), (717, '\tND_TCHECK(pim->pim_cksum);'), (758, '\tbp += 4;'), (759, '\tlen -= 4;'), (765, '\t\twhile (len > 0) {'), (766, '\t\t\tif (len < 4)'), (767, '\t\t\t\tgoto trunc;'), (776, '\t\t\tlen -= 4;'), (778, '\t\t\tif (len < olen)'), (779, '\t\t\t\tgoto trunc;'), (780, '\t\t\tND_TCHECK2(bp[0], olen);'), (850, '\t\t\t\t\tu_int plen = len;'), (853, '\t\t\t\t\t\tadvance = pimv2_addr_print(ndo, ptr, plen, pimv2_unicast, pimv2_addr_len, 0);'), (854, '\t\t\t\t\t\tif (advance < 0)'), (855, '\t\t\t\t\t\t\tgoto trunc;'), (857, '\t\t\t\t\t\tplen -= advance;'), (870, '\t\t\tlen -= olen;'), (879, '\t\tif (len < 4)'), (880, '\t\t\tgoto trunc;'), (881, '\t\tND_TCHECK2(*bp, PIMV2_REGISTER_FLAG_LEN);'), (886, '\t\t EXTRACT_32BITS(bp))));'), (888, '\t\tbp += 4; len -= 4;'), (890, '\t\tif (len == 0)'), (891, '\t\t\tgoto trunc;'), (893, '\t\tND_TCHECK(ip->ip_vhl);'), (896, '\t\t\tND_TCHECK(ip->ip_dst);'), (919, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (920, '\t\t\tgoto trunc;'), (923, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (924, '\t\t\tgoto trunc;'), (977, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (978, '\t\t\t\tgoto trunc;'), (981, '\t\tif (len < 4)'), (982, '\t\t\tgoto trunc;'), (983, '\t\tND_TCHECK2(*bp, 4);'), (997, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (998, '\t\t\t\tgoto trunc;'), (1000, '\t\t\tif (len < 4)'), (1001, '\t\t\t\tgoto trunc;'), (1002, '\t\t\tND_TCHECK2(*bp, 4);'), (1009, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0)'), (1010, '\t\t\t\t\tgoto trunc;'), (1015, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0)'), (1016, '\t\t\t\t\tgoto trunc;'), (1028, '\t\tif (len < 2)'), (1029, '\t\t\tgoto trunc;'), (1030, '\t\tND_TCHECK_16BITS(bp);'), (1032, '\t\tbp += 2;'), (1033, '\t\tlen -= 2;'), (1034, '\t\tif (len < 1)'), (1035, '\t\t\tgoto trunc;'), (1036, '\t\tND_TCHECK(bp[0]);'), (1038, '\t\tif (len < 2)'), (1039, '\t\t\tgoto trunc;'), (1040, '\t\tND_TCHECK(bp[2]);'), (1043, '\t\tlen -= 2;'), (1047, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1048, '\t\t\tgoto trunc;'), (1050, '\t\tlen -= advance;'), (1055, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1056, '\t\t\t\tgoto trunc;'), (1058, '\t\t\tlen -= advance;'), (1061, '\t\t\tif (len < 1)'), (1062, '\t\t\t\tgoto trunc;'), (1063, '\t\t\tND_TCHECK(bp[0]);'), (1065, '\t\t\tif (len < 2)'), (1066, '\t\t\t\tgoto trunc;'), (1067, '\t\t\tND_TCHECK(bp[1]);'), (1069, '\t\t\tif (len < 4)'), (1070, '\t\t\t\tgoto trunc;'), (1072, '\t\t\tlen -= 4;'), (1077, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len,'), (1079, '\t\t\t\t\t\t\t\tpimv2_addr_len,'), (1080, '\t\t\t\t\t\t\t\t0)) < 0)'), (1081, '\t\t\t\t\tgoto trunc;'), (1083, '\t\t\t\tlen -= advance;'), (1085, '\t\t\t\tif (len < 2)'), (1086, '\t\t\t\t\tgoto trunc;'), (1087, '\t\t\t\tND_TCHECK_16BITS(bp);'), (1090, '\t\t\t\tif (len < 3)'), (1091, '\t\t\t\t\tgoto trunc;'), (1092, '\t\t\t\tND_TCHECK(bp[2]);'), (1094, '\t\t\t\tif (len < 4)'), (1095, '\t\t\t\t\tgoto trunc;'), (1097, '\t\t\t\tlen -= 4;'), (1105, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1106, '\t\t\tgoto trunc;'), (1109, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1110, '\t\t\tgoto trunc;'), (1112, '\t\tif (len < 8)'), (1113, '\t\t\tgoto trunc;'), (1114, '\t\tND_TCHECK2(*bp, 8);'), (1126, '\t\tif (len < 1)'), (1127, '\t\t\tgoto trunc;'), (1128, '\t\tND_TCHECK(bp[0]);'), (1131, '\t\tif (len < 2)'), (1132, '\t\t\tgoto trunc;'), (1133, '\t\tND_TCHECK(bp[1]);'), (1135, '\t\tif (len < 4)'), (1136, '\t\t\tgoto trunc;'), (1137, '\t\tND_TCHECK_16BITS(&bp[2]);'), (1141, '\t\tlen -= 4;'), (1145, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1146, '\t\t\tgoto trunc;'), (1148, '\t\tlen -= advance;'), (1153, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1154, '\t\t\t\tgoto trunc;'), (1156, '\t\t\tlen -= advance;'), (1163, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1164, '\t\t\tgoto trunc;'), (1166, '\t\tlen -= advance;'), (1168, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1169, '\t\t\tgoto trunc;'), (1171, '\t\tlen -= advance;'), (1173, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1174, '\t\t\tgoto trunc;'), (1176, '\t\tlen -= advance;'), (1177, '\t\tif (len < 2)'), (1178, '\t\t\tgoto trunc;'), (1179, '\t\tND_TCHECK_16BITS(bp);')], 'deleted': [(176, '\tND_TCHECK2(bp[6], 2);'), (180, '\tunsigned_relts_print(ndo, EXTRACT_16BITS(&bp[6]));'), (183, '\tbp += 8;'), (184, '\tlen -= 8;'), (197, '\t\tND_TCHECK2(bp[4], sizeof(struct in_addr));'), (198, '\t\tif (EXTRACT_32BITS(&bp[4]) != 0xffffffff)'), (199, '\t\t\tND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[4])));'), (200, '\t\tND_TCHECK2(bp[8], 4);'), (201, '\t\tnjoin = EXTRACT_16BITS(&bp[8]);'), (202, '\t\tnprune = EXTRACT_16BITS(&bp[10]);'), (204, '\t\tbp += 12;'), (205, '\t\tlen -= 12;'), (218, '\t\t\tipaddr_string(ndo, &bp[2]), bp[1] & 0x3f));'), (233, '\tregister const u_char *ep;'), (236, '\tep = (const u_char *)ndo->ndo_snapend;'), (237, '\tif (bp >= ep)'), (238, '\t\treturn;'), (239, ''), (305, '\t\tif (ndo->ndo_vflag)'), (382, '\t\tND_TCHECK(bp[4]);'), (383, '\t\tswitch (bp[4] & 0x3) {'), (393, '\t\tif (bp[4] & 0xfc)'), (394, '\t\t\tND_PRINT((ndo, " [rsvd=0x%02x]", bp[4] & 0xfc));'), (395, '\t\tND_TCHECK(bp[5]);'), (396, '\t\tnentries = bp[5];'), (397, '\t\tbp += 6; len -= 6;'), (424, '\tregister const u_char *ep;'), (427, '\tep = (const u_char *)ndo->ndo_snapend;'), (428, '\tif (bp >= ep)'), (429, '\t\treturn;'), (499, 'static int pimv2_addr_len;'), (500, ''), (527, ' const u_char *bp, enum pimv2_addrtype at, int silent)'), (530, '\tint len, hdrlen;'), (532, '\tND_TCHECK(bp[0]);'), (533, ''), (534, '\tif (pimv2_addr_len == 0) {'), (539, '\t\t\tlen = sizeof(struct in_addr);'), (543, '\t\t\tlen = sizeof(struct in6_addr);'), (552, '\t\tswitch (pimv2_addr_len) {'), (563, '\t\tlen = pimv2_addr_len;'), (570, '\t\tND_TCHECK2(bp[0], len);'), (579, '\t\treturn hdrlen + len;'), (582, '\t\tND_TCHECK2(bp[0], len + 2);'), (611, '\t\treturn hdrlen + 2 + len;'), (719, '\t\tbp += 4;'), (720, '\t\twhile (bp < ep) {'), (724, '\t\t\tND_TCHECK2(bp[0], 4 + olen);'), (802, '\t\t\t\t\t\tadvance = pimv2_addr_print(ndo, ptr, pimv2_unicast, 0);'), (803, '\t\t\t\t\t\tif (advance < 0) {'), (804, '\t\t\t\t\t\t\tND_PRINT((ndo, "..."));'), (805, '\t\t\t\t\t\t\tbreak;'), (806, '\t\t\t\t\t\t}'), (828, '\t\tND_TCHECK2(*(bp + 4), PIMV2_REGISTER_FLAG_LEN);'), (833, '\t\t EXTRACT_32BITS(bp+4))));'), (835, '\t\tbp += 8; len -= 8;'), (861, '\t\tbp += 4; len -= 4;'), (862, '\t\tif (bp >= ep)'), (863, '\t\t\tbreak;'), (865, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (866, '\t\t\tND_PRINT((ndo, "..."));'), (867, '\t\t\tbreak;'), (868, '\t\t}'), (870, '\t\tif (bp >= ep)'), (871, '\t\t\tbreak;'), (873, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (874, '\t\t\tND_PRINT((ndo, "..."));'), (875, '\t\t\tbreak;'), (876, '\t\t}'), (927, '\t\tbp += 4; len -= 4;'), (929, '\t\t\tif (bp >= ep)'), (930, '\t\t\t\tbreak;'), (932, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (933, '\t\t\t\tND_PRINT((ndo, "..."));'), (934, '\t\t\t\tbreak;'), (935, '\t\t\t}'), (938, '\t\tif (bp + 4 > ep)'), (939, '\t\t\tbreak;'), (952, '\t\t\tif (bp >= ep)'), (953, '\t\t\t\tgoto jp_done;'), (955, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (956, '\t\t\t\tND_PRINT((ndo, "...)"));'), (957, '\t\t\t\tgoto jp_done;'), (958, '\t\t\t}'), (960, '\t\t\tif (bp + 4 > ep) {'), (961, '\t\t\t\tND_PRINT((ndo, "...)"));'), (962, '\t\t\t\tgoto jp_done;'), (963, '\t\t\t}'), (970, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) {'), (971, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (972, '\t\t\t\t\tgoto jp_done;'), (973, '\t\t\t\t}'), (978, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) {'), (979, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (980, '\t\t\t\t\tgoto jp_done;'), (981, '\t\t\t\t}'), (985, '\tjp_done:'), (992, '\t\tbp += 4;'), (995, '\t\tif (bp + sizeof(uint16_t) >= ep) break;'), (997, '\t\tbp += sizeof(uint16_t);'), (998, '\t\tif (bp >= ep) break;'), (1000, '\t\tif (bp + 1 >= ep) break;'), (1005, '\t\tif (bp >= ep) break;'), (1007, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1008, '\t\t\tND_PRINT((ndo, "..."));'), (1009, '\t\t\tbreak;'), (1010, '\t\t}'), (1016, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0))'), (1017, '\t\t\t < 0) {'), (1018, '\t\t\t\tND_PRINT((ndo, "...)"));'), (1019, '\t\t\t\tgoto bs_done;'), (1020, '\t\t\t}'), (1024, '\t\t\tif (bp >= ep) {'), (1025, '\t\t\t\tND_PRINT((ndo, "...)"));'), (1026, '\t\t\t\tgoto bs_done;'), (1027, '\t\t\t}'), (1029, '\t\t\tif (bp + 1 >= ep) {'), (1030, '\t\t\t\tND_PRINT((ndo, "...)"));'), (1031, '\t\t\t\tgoto bs_done;'), (1032, '\t\t\t}'), (1039, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp,'), (1041, '\t\t\t\t\t\t\t\t0)) < 0) {'), (1042, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (1043, '\t\t\t\t\tgoto bs_done;'), (1044, '\t\t\t\t}'), (1047, '\t\t\t\tif (bp + 1 >= ep) {'), (1048, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (1049, '\t\t\t\t\tgoto bs_done;'), (1050, '\t\t\t\t}'), (1053, '\t\t\t\tif (bp + 2 >= ep) {'), (1054, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (1055, '\t\t\t\t\tgoto bs_done;'), (1056, '\t\t\t\t}'), (1062, '\t bs_done:'), (1066, '\t\tbp += 4; len -= 4;'), (1067, '\t\tif (bp >= ep)'), (1068, '\t\t\tbreak;'), (1070, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (1071, '\t\t\tND_PRINT((ndo, "..."));'), (1072, '\t\t\tbreak;'), (1073, '\t\t}'), (1075, '\t\tif (bp >= ep)'), (1076, '\t\t\tbreak;'), (1078, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1079, '\t\t\tND_PRINT((ndo, "..."));'), (1080, '\t\t\tbreak;'), (1081, '\t\t}'), (1083, '\t\tif (bp + 8 > ep)'), (1084, '\t\t\tbreak;'), (1094, '\t\tbp += 4;'), (1097, '\t\tif (bp >= ep) break;'), (1100, '\t\tif (bp + 1 >= ep) break;'), (1102, '\t\tif (bp + 3 >= ep) break;'), (1108, '\t\tif (bp >= ep) break;'), (1110, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1111, '\t\t\tND_PRINT((ndo, "..."));'), (1112, '\t\t\tbreak;'), (1113, '\t\t}'), (1119, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0))'), (1120, '\t\t\t < 0) {'), (1121, '\t\t\t\tND_PRINT((ndo, "..."));'), (1122, '\t\t\t\tbreak;'), (1123, '\t\t\t}'), (1131, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1132, '\t\t\tND_PRINT((ndo, "..."));'), (1133, '\t\t\tbreak;'), (1134, '\t\t}'), (1137, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (1138, '\t\t\tND_PRINT((ndo, "..."));'), (1139, '\t\t\tbreak;'), (1140, '\t\t}'), (1143, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1144, '\t\t\tND_PRINT((ndo, "..."));'), (1145, '\t\t\tbreak;'), (1146, '\t\t}'), (1148, '\t\tND_TCHECK2(bp[0], 2);')]}
207
176
890
5,856
33
199
5
https://github.com/the-tcpdump-group/tcpdump
CVE-2017-13030
CWE-125
3,280
class.c
C++
r_bin_java_source_code_file_attr_new
/* Apache 2.0 - Copyright 2007-2022 - pancake and dso class.c rewrite: Adam Pridgen <dso@rice.edu || adam.pridgen@thecoverofnight.com> */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <stdarg.h> #include <r_types.h> #include <r_util.h> #include <r_bin.h> #include <math.h> #include <sdb.h> #include "class.h" #include "dsojson.h" #ifdef IFDBG #undef IFDBG #endif #define DO_THE_DBG 0 #define IFDBG if (DO_THE_DBG) #define IFINT if (0) #define MAX_CPITEMS 8192 R_API char *U(r_bin_java_unmangle_method)(const char *flags, const char *name, const char *params, const char *r_value); R_API int r_bin_java_is_fm_type_private(RBinJavaField *fm_type); R_API int r_bin_java_is_fm_type_protected(RBinJavaField *fm_type); R_API ut32 U(r_bin_java_swap_uint)(ut32 x); // R_API const char * r_bin_java_get_this_class_name(RBinJavaObj *bin); R_API void U(add_cp_objs_to_sdb)(RBinJavaObj * bin); R_API void U(add_field_infos_to_sdb)(RBinJavaObj * bin); R_API void U(add_method_infos_to_sdb)(RBinJavaObj * bin); R_API RList *retrieve_all_access_string_and_value(RBinJavaAccessFlags *access_flags); R_API char *retrieve_access_string(ut16 flags, RBinJavaAccessFlags *access_flags); R_API ut16 calculate_access_value(const char *access_flags_str, RBinJavaAccessFlags *access_flags); R_API int r_bin_java_new_bin(RBinJavaObj *bin, ut64 loadaddr, Sdb *kv, const ut8 *buf, ut64 len); R_API int extract_type_value(const char *arg_str, char **output); R_API int r_bin_java_check_reset_cp_obj(RBinJavaCPTypeObj *cp_obj, ut8 tag); R_API ut8 *r_bin_java_cp_get_4bytes(ut8 tag, ut32 *out_sz, const ut8 *buf, const ut64 len); R_API ut8 *r_bin_java_cp_get_8bytes(ut8 tag, ut32 *out_sz, const ut8 *buf, const ut64 len); R_API ut8 *r_bin_java_cp_get_utf8(ut8 tag, ut32 *out_sz, const ut8 *buf, const ut64 len); R_API RBinJavaCPTypeObj *r_bin_java_get_item_from_bin_cp_list(RBinJavaObj *bin, ut64 idx); R_API RBinJavaCPTypeObj *r_bin_java_get_item_from_cp_item_list(RList *cp_list, ut64 idx); // Allocs for objects R_API RBinJavaCPTypeObj *r_bin_java_class_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 offset); R_API RBinJavaCPTypeObj *r_bin_java_fieldref_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 offset); R_API RBinJavaCPTypeObj *r_bin_java_methodref_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 offset); R_API RBinJavaCPTypeObj *r_bin_java_interfacemethodref_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 offset); R_API RBinJavaCPTypeObj *r_bin_java_name_and_type_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 offset); R_API RBinJavaCPTypeObj *r_bin_java_string_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 offset); R_API RBinJavaCPTypeObj *r_bin_java_integer_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 offset); R_API RBinJavaCPTypeObj *r_bin_java_float_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 offset); R_API RBinJavaCPTypeObj *r_bin_java_long_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 offset); R_API RBinJavaCPTypeObj *r_bin_java_double_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 offset); R_API RBinJavaCPTypeObj *r_bin_java_utf8_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 offset); R_API RBinJavaCPTypeObj *r_bin_java_do_nothing_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz); R_API RBinJavaCPTypeObj *r_bin_java_clone_cp_item(RBinJavaCPTypeObj *obj); R_API RBinJavaCPTypeObj *r_bin_java_clone_cp_idx(RBinJavaObj *bin, ut32 idx); R_API RBinJavaCPTypeObj *r_bin_java_methodhandle_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz); R_API RBinJavaCPTypeObj *r_bin_java_methodtype_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz); R_API RBinJavaCPTypeObj *r_bin_java_invokedynamic_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz); // Deallocs for type objects R_API void r_bin_java_default_free(void /*RBinJavaCPTypeObj*/ *obj); R_API void r_bin_java_obj_free(void /*RBinJavaCPTypeObj*/ *obj); R_API void r_bin_java_utf8_info_free(void /*RBinJavaCPTypeObj*/ *obj); R_API void r_bin_java_do_nothing_free(void /*RBinJavaCPTypeObj*/ *obj); R_API void r_bin_java_fmtype_free(void /*RBinJavaField*/ *fm_type); // handle freeing the lists // handle the reading of the various field R_API RBinJavaAttrInfo *r_bin_java_read_next_attr(RBinJavaObj *bin, const ut64 offset, const ut8 *buf, const ut64 len); R_API RBinJavaCPTypeObj *r_bin_java_read_next_constant_pool_item(RBinJavaObj *bin, const ut64 offset, const ut8 *buf, ut64 len); R_API RBinJavaAttrMetas *r_bin_java_get_attr_type_by_name(const char *name); R_API RBinJavaCPTypeObj *r_bin_java_get_java_null_cp(void); R_API ut64 r_bin_java_read_class_file2(RBinJavaObj *bin, const ut64 offset, const ut8 *buf, ut64 len); R_API RBinJavaAttrInfo *r_bin_java_get_attr_from_field(RBinJavaField *field, R_BIN_JAVA_ATTR_TYPE attr_type, ut32 pos); R_API RBinJavaField *r_bin_java_read_next_field(RBinJavaObj *bin, const ut64 offset, const ut8 *buffer, const ut64 len); R_API RBinJavaField *r_bin_java_read_next_method(RBinJavaObj *bin, const ut64 offset, const ut8 *buffer, const ut64 len); R_API void r_bin_java_print_utf8_cp_summary(RBinJavaCPTypeObj *obj); R_API void r_bin_java_print_name_and_type_cp_summary(RBinJavaCPTypeObj *obj); R_API void r_bin_java_print_double_cp_summary(RBinJavaCPTypeObj *obj); R_API void r_bin_java_print_long_cp_summary(RBinJavaCPTypeObj *obj); R_API void r_bin_java_print_float_cp_summary(RBinJavaCPTypeObj *obj); R_API void r_bin_java_print_integer_cp_summary(RBinJavaCPTypeObj *obj); R_API void r_bin_java_print_string_cp_summary(RBinJavaCPTypeObj *obj); R_API void r_bin_java_print_classref_cp_summary(RBinJavaCPTypeObj *obj); R_API void r_bin_java_print_fieldref_cp_summary(RBinJavaCPTypeObj *obj); R_API void r_bin_java_print_methodref_cp_summary(RBinJavaCPTypeObj *obj); R_API void r_bin_java_print_interfacemethodref_cp_summary(RBinJavaCPTypeObj *obj); R_API void r_bin_java_print_unknown_cp_summary(RBinJavaCPTypeObj *obj); R_API void r_bin_java_print_null_cp_summary(RBinJavaCPTypeObj *obj); R_API void r_bin_java_print_unknown_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_methodhandle_cp_summary(RBinJavaCPTypeObj *obj); R_API void r_bin_java_print_methodtype_cp_summary(RBinJavaCPTypeObj *obj); R_API void r_bin_java_print_invokedynamic_cp_summary(RBinJavaCPTypeObj *obj); R_API RBinJavaCPTypeObj *r_bin_java_unknown_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz); R_API RBinJavaInterfaceInfo *r_bin_java_interface_new(RBinJavaObj *bin, const ut8 *buf, ut64 sz); R_API RBinJavaInterfaceInfo *r_bin_java_read_next_interface_item(RBinJavaObj *bin, const ut64 offset, const ut8 *buf, ut64 len); R_API void r_bin_java_interface_free(void /*RBinJavaInterfaceInfo*/ *obj); R_API void r_bin_java_stack_frame_free(void /*RBinJavaStackMapFrame*/ *obj); R_API void r_bin_java_stack_map_table_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_verification_info_free(void /*RBinJavaVerificationObj*/ *obj); R_API void r_bin_java_print_stack_map_table_attr_summary(RBinJavaAttrInfo *obj); R_API void r_bin_java_print_stack_map_frame_summary(RBinJavaStackMapFrame *obj); R_API void r_bin_java_print_verification_info_summary(RBinJavaVerificationObj *obj); R_API RBinJavaStackMapFrame *r_bin_java_build_stack_frame_from_local_variable_table(RBinJavaObj *bin, RBinJavaAttrInfo *attr); R_API void U(r_bin_java_print_stack_map_append_frame_summary)(RBinJavaStackMapFrame * obj); R_API void U(r_bin_java_stack_frame_default_free)(void /*RBinJavaStackMapFrame*/ *stack_frame); // R_API void U(r_bin_java_stack_frame_do_nothing_free)(void /*RBinJavaStackMapFrame*/ *stack_frame); // R_API void U(r_bin_java_stack_frame_do_nothing_new)(RBinJavaObj * bin, RBinJavaStackMapFrame * stack_frame, ut64 offset); R_API RBinJavaStackMapFrame *r_bin_java_stack_map_frame_new(ut8 *buffer, ut64 sz, RBinJavaStackMapFrame *p_frame, ut64 buf_offset); // R_API RBinJavaStackMapFrame* r_bin_java_stack_map_frame_new (ut8* buffer, ut64 sz, ut64 buf_offset); R_API RBinJavaElementValue *r_bin_java_element_value_new(ut8 *buffer, ut64 sz, ut64 buf_offset); // R_API RBinJavaVerificationObj* r_bin_java_read_next_verification_info_new(ut8* buffer, ut64 sz, ut64 buf_offset); R_API RBinJavaAnnotation *r_bin_java_annotation_new(ut8 *buffer, ut64 sz, ut64 buf_offset); R_API RBinJavaElementValuePair *r_bin_java_element_pair_new(ut8 *buffer, ut64 sz, ut64 buf_offset); R_API RBinJavaElementValue *r_bin_java_element_value_new(ut8 *buffer, ut64 sz, ut64 buf_offset); // R_API RBinJavaBootStrapArgument* r_bin_java_bootstrap_method_argument_new(ut8* buffer, ut64 sz, ut64 buf_offset); R_API RBinJavaBootStrapMethod *r_bin_java_bootstrap_method_new(ut8 *buffer, ut64 sz, ut64 buf_offset); R_API RBinJavaAnnotationsArray *r_bin_java_annotation_array_new(ut8 *buffer, ut64 sz, ut64 buf_offset); R_API RBinJavaElementValueMetas *r_bin_java_get_ev_meta_from_tag(ut8 tag); R_API RBinJavaCPTypeMetas *U(r_bin_java_get_cp_meta_from_tag)(ut8 tag); R_API void r_bin_java_inner_classes_attr_entry_free(void /*RBinJavaClassesAttribute*/ *attr); R_API void r_bin_java_annotation_default_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_enclosing_methods_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_local_variable_type_table_attr_entry_free(void /*RBinJavaLocalVariableTypeAttribute*/ *lvattr); R_API void r_bin_java_local_variable_type_table_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_signature_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_source_debug_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_element_value_free(void /*RBinJavaElementValue*/ *element_value); R_API void r_bin_java_element_pair_free(void /*RBinJavaElementValuePair*/ *evp); R_API void r_bin_java_annotation_free(void /*RBinJavaAnnotation*/ *annotation); R_API void r_bin_java_rtv_annotations_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_rti_annotations_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_annotation_array_free(void /*RBinJavaAnnotationsArray*/ *annotation_array); R_API void r_bin_java_bootstrap_methods_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_bootstrap_method_free(void /*RBinJavaBootStrapMethod*/ *bsm); R_API void r_bin_java_bootstrap_method_argument_free(void /*RBinJavaBootStrapArgument*/ *bsm_arg); R_API void r_bin_java_rtvp_annotations_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_rtip_annotations_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_unknown_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_code_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_constant_value_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_deprecated_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_exceptions_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_inner_classes_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_line_number_table_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_local_variable_table_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_source_code_file_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_synthetic_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_print_annotation_default_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_enclosing_methods_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_local_variable_type_attr_summary(RBinJavaLocalVariableTypeAttribute *lvattr); R_API void r_bin_java_print_local_variable_type_table_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_signature_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_source_debug_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_element_value_summary(RBinJavaElementValue *element_value); R_API void r_bin_java_print_annotation_summary(RBinJavaAnnotation *annotation); R_API void r_bin_java_print_element_pair_summary(RBinJavaElementValuePair *evp); R_API void r_bin_java_print_bootstrap_methods_attr_summary(RBinJavaAttrInfo *attr); // R_API void r_bin_java_bootstrap_method_summary(RBinJavaBootStrapMethod *bsm); // R_API void r_bin_java_bootstrap_method_argument_summary(RBinJavaBootStrapArgument *bsm_arg); R_API void r_bin_java_print_rtv_annotations_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_rti_annotations_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_annotation_array_summary(RBinJavaAnnotationsArray *annotation_array); R_API void r_bin_java_print_rtvp_annotations_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_rtip_annotations_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_attribute_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_constant_pool(void /*RBinJavaCPTypeObj*/ *obj); R_API void r_bin_java_print_field_summary(RBinJavaField *field); // R_API void r_bin_java_print_interface_summary(RBinJavaField *field); R_API void r_bin_java_print_method_summary(RBinJavaField *field); R_API void r_bin_java_print_code_exceptions_attr_summary(RBinJavaExceptionEntry *exc_entry); R_API void r_bin_java_print_code_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_constant_value_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_deprecated_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_exceptions_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_classes_attr_summary(RBinJavaClassesAttribute *icattr); R_API void r_bin_java_print_inner_classes_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_line_number_table_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_local_variable_attr_summary(RBinJavaLocalVariableAttribute *lvattr); R_API void r_bin_java_print_local_variable_table_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_source_code_file_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_synthetic_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_attr_summary(RBinJavaAttrInfo *attr); R_API RBinJavaAttrInfo *r_bin_java_read_next_attr_from_buffer(RBinJavaObj *bin, ut8 *buffer, st64 sz, st64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_unknown_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_annotation_default_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_enclosing_methods_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_local_variable_type_table_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_signature_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_source_debug_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_bootstrap_methods_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_rtv_annotations_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_rti_annotations_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_rtvp_annotations_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_rtip_annotations_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_code_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_constant_value_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_deprecated_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_exceptions_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_inner_classes_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_line_number_table_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_local_variable_table_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_source_code_file_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_stack_map_table_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_synthetic_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API ut64 r_bin_java_unknown_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_annotation_default_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_enclosing_methods_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_local_variable_type_table_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_signature_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_source_debug_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_bootstrap_methods_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_rtv_annotations_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_rti_annotations_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_rtvp_annotations_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_rtip_annotations_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_code_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_constant_value_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_deprecated_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_exceptions_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_inner_classes_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_line_number_table_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_local_variable_table_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_source_code_file_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_stack_map_table_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_synthetic_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_bootstrap_method_calc_size(RBinJavaBootStrapMethod *bsm); R_API ut64 r_bin_java_element_pair_calc_size(RBinJavaElementValuePair *evp); R_API ut64 r_bin_java_element_value_calc_size(RBinJavaElementValue *element_value); R_API ut64 r_bin_java_unknown_cp_calc_size(RBinJavaCPTypeObj *obj); R_API ut64 r_bin_java_class_cp_calc_size(RBinJavaCPTypeObj *obj); R_API ut64 r_bin_java_fieldref_cp_calc_size(RBinJavaCPTypeObj *obj); R_API ut64 r_bin_java_methodref_cp_calc_size(RBinJavaCPTypeObj *obj); R_API ut64 r_bin_java_interfacemethodref_cp_calc_size(RBinJavaCPTypeObj *obj); R_API ut64 r_bin_java_name_and_type_cp_calc_size(RBinJavaCPTypeObj *obj); R_API ut64 r_bin_java_string_cp_calc_size(RBinJavaCPTypeObj *obj); R_API ut64 r_bin_java_integer_cp_calc_size(RBinJavaCPTypeObj *obj); R_API ut64 r_bin_java_float_cp_calc_size(RBinJavaCPTypeObj *obj); R_API ut64 r_bin_java_long_cp_calc_size(RBinJavaCPTypeObj *obj); R_API ut64 r_bin_java_double_cp_calc_size(RBinJavaCPTypeObj *obj); R_API ut64 r_bin_java_utf8_cp_calc_size(RBinJavaCPTypeObj *obj); R_API ut64 r_bin_java_do_nothing_calc_size(RBinJavaCPTypeObj *obj); R_API ut64 r_bin_java_methodhandle_cp_calc_size(RBinJavaCPTypeObj *obj); R_API ut64 r_bin_java_methodtype_cp_calc_size(RBinJavaCPTypeObj *obj); R_API ut64 r_bin_java_invokedynamic_cp_calc_size(RBinJavaCPTypeObj *obj); R_API RBinJavaStackMapFrame *r_bin_java_default_stack_frame(void); R_API RList *r_bin_java_find_cp_const_by_val_float(RBinJavaObj *bin_obj, const ut8 *bytes, ut32 len); R_API RList *r_bin_java_find_cp_const_by_val_double(RBinJavaObj *bin_obj, const ut8 *bytes, ut32 len); R_API RList *r_bin_java_find_cp_const_by_val_int(RBinJavaObj *bin_obj, const ut8 *bytes, ut32 len); R_API RList *r_bin_java_find_cp_const_by_val_long(RBinJavaObj *bin_obj, const ut8 *bytes, ut32 len); R_API RList *r_bin_java_find_cp_const_by_val_utf8(RBinJavaObj *bin_obj, const ut8 *bytes, ut32 len); R_API ut8 *r_bin_java_cp_append_classref_and_name(RBinJavaObj *bin, ut32 *out_sz, const char *classname, const ut32 classname_len); R_API ut8 *U(r_bin_java_cp_append_ref_cname_fname_ftype)(RBinJavaObj * bin, ut32 * out_sz, ut8 tag, const char *cname, const ut32 c_len, const char *fname, const ut32 f_len, const char *tname, const ut32 t_len); R_API ut8 *r_bin_java_cp_get_classref(RBinJavaObj *bin, ut32 *out_sz, const char *classname, const ut32 classname_len, const ut16 name_idx); R_API ut8 *U(r_bin_java_cp_get_method_ref)(RBinJavaObj * bin, ut32 * out_sz, ut16 class_idx, ut16 name_and_type_idx); R_API ut8 *U(r_bin_java_cp_get_field_ref)(RBinJavaObj * bin, ut32 * out_sz, ut16 class_idx, ut16 name_and_type_idx); R_API ut8 *r_bin_java_cp_get_fm_ref(RBinJavaObj *bin, ut32 *out_sz, ut8 tag, ut16 class_idx, ut16 name_and_type_idx); R_API ut8 *r_bin_java_cp_get_2_ut16(RBinJavaObj *bin, ut32 *out_sz, ut8 tag, ut16 ut16_one, ut16 ut16_two); R_API ut8 *r_bin_java_cp_get_name_type(RBinJavaObj *bin, ut32 *out_sz, ut16 name_idx, ut16 type_idx); static char *convert_string(const char *bytes, ut32 len) { ut32 idx = 0, pos = 0; ut32 str_sz = 32 * len + 1; char *cpy_buffer = len > 0 ? malloc (str_sz) : NULL; if (!cpy_buffer) { return cpy_buffer; } // 4x is the increase from byte to \xHH where HH represents hexed byte memset (cpy_buffer, 0, str_sz); while (idx < len && pos < len) { if (dso_json_char_needs_hexing (bytes[idx])) { if (pos + 2 < len) { free (cpy_buffer); return NULL; } sprintf (cpy_buffer + pos, "\\x%02x", bytes[idx]); pos += 4; } else { cpy_buffer[pos] = bytes[idx]; pos++; } idx++; } return cpy_buffer; } // taken from LLVM Code Byte Swap // TODO: move into r_util R_API ut32 U(r_bin_java_swap_uint)(ut32 x) { const ut32 Byte0 = x & 0x000000FF; const ut32 Byte1 = x & 0x0000FF00; const ut32 Byte2 = x & 0x00FF0000; const ut32 Byte3 = x & 0xFF000000; return (Byte0 << 24) | (Byte1 << 8) | (Byte2 >> 8) | (Byte3 >> 24); } static RBinJavaAccessFlags FIELD_ACCESS_FLAGS[] = { { "public", R_BIN_JAVA_FIELD_ACC_PUBLIC, 6 }, { "private", R_BIN_JAVA_FIELD_ACC_PRIVATE, 7 }, { "protected", R_BIN_JAVA_FIELD_ACC_PROTECTED, 9 }, { "static", R_BIN_JAVA_FIELD_ACC_STATIC, 6 }, { "final", R_BIN_JAVA_FIELD_ACC_FINAL, 5 }, { "undefined.0x0020", 0x0020, 16 }, { "volatile", R_BIN_JAVA_FIELD_ACC_VOLATILE, 8 }, { "transient", R_BIN_JAVA_FIELD_ACC_TRANSIENT, 9 }, { "undefined.0x0100", 0x0100, 16 }, { "undefined.0x0200", 0x0200, 16 }, { "undefined.0x0400", 0x0400, 16 }, { "undefined.0x0800", 0x0800, 16 }, { "synthetic", R_BIN_JAVA_FIELD_ACC_SYNTHETIC, 9 }, { "undefined.0x2000", 0x2000, 16 }, { "enum", R_BIN_JAVA_FIELD_ACC_ENUM, 16 }, { "undefined.0x8000", 0x8000, 16 }, { NULL, 0, 0 } }; static RBinJavaAccessFlags METHOD_ACCESS_FLAGS[] = { { "public", R_BIN_JAVA_METHOD_ACC_PUBLIC, 6 }, { "private", R_BIN_JAVA_METHOD_ACC_PRIVATE, 7 }, { "protected", R_BIN_JAVA_METHOD_ACC_PROTECTED, 9 }, { "static", R_BIN_JAVA_METHOD_ACC_STATIC, 6 }, { "final", R_BIN_JAVA_METHOD_ACC_FINAL, 5 }, { "synchronized", R_BIN_JAVA_METHOD_ACC_SYNCHRONIZED, 12 }, { "bridge", R_BIN_JAVA_METHOD_ACC_BRIDGE, 6 }, { "varargs", R_BIN_JAVA_METHOD_ACC_VARARGS, 7 }, { "native", R_BIN_JAVA_METHOD_ACC_NATIVE, 6 }, { "interface", R_BIN_JAVA_METHOD_ACC_INTERFACE, 9 }, { "abstract", R_BIN_JAVA_METHOD_ACC_ABSTRACT, 8 }, { "strict", R_BIN_JAVA_METHOD_ACC_STRICT, 6 }, { "synthetic", R_BIN_JAVA_METHOD_ACC_SYNTHETIC, 9 }, { "annotation", R_BIN_JAVA_METHOD_ACC_ANNOTATION, 10 }, { "enum", R_BIN_JAVA_METHOD_ACC_ENUM, 4 }, { "undefined.0x8000", 0x8000, 16 }, { NULL, 0, 0 } }; // XXX - Fix these there are some incorrect ongs static RBinJavaAccessFlags CLASS_ACCESS_FLAGS[] = { { "public", R_BIN_JAVA_CLASS_ACC_PUBLIC, 6 }, { "undefined.0x0002", 0x0002, 16 }, { "undefined.0x0004", 0x0004, 16 }, { "undefined.0x0008", 0x0008, 16 }, { "final", R_BIN_JAVA_CLASS_ACC_FINAL, 5 }, { "super", R_BIN_JAVA_CLASS_ACC_SUPER, 5 }, { "undefined.0x0040", 0x0040, 16 }, { "undefined.0x0080", 0x0080, 16 }, { "undefined.0x0100", 0x0100, 16 }, { "interface", R_BIN_JAVA_CLASS_ACC_INTERFACE, 9 }, { "abstract", R_BIN_JAVA_CLASS_ACC_ABSTRACT, 8 }, { "undefined.0x0800", 0x0800, 16 }, { "synthetic", R_BIN_JAVA_CLASS_ACC_SYNTHETIC, 9 }, { "annotation", R_BIN_JAVA_CLASS_ACC_ANNOTATION, 10 }, { "enum", R_BIN_JAVA_CLASS_ACC_ENUM, 4 }, { "undefined.0x8000", 0x8000, 16 }, { NULL, 0, 0 } }; static RBinJavaRefMetas R_BIN_JAVA_REF_METAS[] = { { "Unknown", R_BIN_JAVA_REF_UNKNOWN }, { "GetField", R_BIN_JAVA_REF_GETFIELD }, { "GetStatic", R_BIN_JAVA_REF_GETSTATIC }, { "PutField", R_BIN_JAVA_REF_PUTFIELD }, { "PutStatic", R_BIN_JAVA_REF_PUTSTATIC }, { "InvokeVirtual", R_BIN_JAVA_REF_INVOKEVIRTUAL }, { "InvokeStatic", R_BIN_JAVA_REF_INVOKESTATIC }, { "InvokeSpecial", R_BIN_JAVA_REF_INVOKESPECIAL }, { "NewInvokeSpecial", R_BIN_JAVA_REF_NEWINVOKESPECIAL }, { "InvokeInterface", R_BIN_JAVA_REF_INVOKEINTERFACE } }; static const ut16 R_BIN_JAVA_ELEMENT_VALUE_METAS_SZ = 14; static R_TH_LOCAL bool R_BIN_JAVA_NULL_TYPE_INITTED = false; static R_TH_LOCAL RBinJavaObj *R_BIN_JAVA_GLOBAL_BIN = NULL; static RBinJavaElementValueMetas R_BIN_JAVA_ELEMENT_VALUE_METAS[] = { { "Byte", R_BIN_JAVA_EV_TAG_BYTE, NULL }, { "Char", R_BIN_JAVA_EV_TAG_CHAR, NULL }, { "Double", R_BIN_JAVA_EV_TAG_DOUBLE, NULL }, { "Float", R_BIN_JAVA_EV_TAG_FLOAT, NULL }, { "Integer", R_BIN_JAVA_EV_TAG_INT, NULL }, { "Long", R_BIN_JAVA_EV_TAG_LONG, NULL }, { "Short", R_BIN_JAVA_EV_TAG_SHORT, NULL }, { "Boolean", R_BIN_JAVA_EV_TAG_BOOLEAN, NULL }, { "Array of ", R_BIN_JAVA_EV_TAG_ARRAY, NULL }, { "String", R_BIN_JAVA_EV_TAG_STRING, NULL }, { "Enum", R_BIN_JAVA_EV_TAG_ENUM, NULL }, { "Class", R_BIN_JAVA_EV_TAG_CLASS, NULL }, { "Annotation", R_BIN_JAVA_EV_TAG_ANNOTATION, NULL }, { "Unknown", R_BIN_JAVA_EV_TAG_UNKNOWN, NULL }, }; static RBinJavaVerificationMetas R_BIN_JAVA_VERIFICATION_METAS[] = { { "Top", R_BIN_JAVA_STACKMAP_TOP }, { "Integer", R_BIN_JAVA_STACKMAP_INTEGER }, { "Float", R_BIN_JAVA_STACKMAP_FLOAT }, { "Double", R_BIN_JAVA_STACKMAP_DOUBLE }, { "Long", R_BIN_JAVA_STACKMAP_LONG }, { "NULL", R_BIN_JAVA_STACKMAP_NULL }, { "This", R_BIN_JAVA_STACKMAP_THIS }, { "Object", R_BIN_JAVA_STACKMAP_OBJECT }, { "Uninitialized", R_BIN_JAVA_STACKMAP_UNINIT }, { "Unknown", R_BIN_JAVA_STACKMAP_UNKNOWN } }; static RBinJavaStackMapFrameMetas R_BIN_JAVA_STACK_MAP_FRAME_METAS[] = { { "ImplicitStackFrame", R_BIN_JAVA_STACK_FRAME_IMPLICIT, NULL }, { "Same", R_BIN_JAVA_STACK_FRAME_SAME, NULL }, { "SameLocals1StackItem", R_BIN_JAVA_STACK_FRAME_SAME_LOCALS_1, NULL }, { "Chop", R_BIN_JAVA_STACK_FRAME_CHOP, NULL }, { "SameFrameExtended", R_BIN_JAVA_STACK_FRAME_SAME_FRAME_EXTENDED, NULL }, { "Append", R_BIN_JAVA_STACK_FRAME_APPEND, NULL }, { "FullFrame", R_BIN_JAVA_STACK_FRAME_FULL_FRAME, NULL }, { "Reserved", R_BIN_JAVA_STACK_FRAME_RESERVED, NULL } }; static RBinJavaCPTypeObjectAllocs R_BIN_ALLOCS_CONSTANTS[] = { { r_bin_java_do_nothing_new, r_bin_java_do_nothing_free, r_bin_java_print_null_cp_summary, r_bin_java_do_nothing_calc_size, r_bin_java_print_null_cp_stringify }, { r_bin_java_utf8_cp_new, r_bin_java_utf8_info_free, r_bin_java_print_utf8_cp_summary, r_bin_java_utf8_cp_calc_size, r_bin_java_print_utf8_cp_stringify }, { r_bin_java_unknown_cp_new, r_bin_java_default_free, r_bin_java_print_unknown_cp_summary, r_bin_java_unknown_cp_calc_size, r_bin_java_print_unknown_cp_stringify }, { r_bin_java_integer_cp_new, r_bin_java_default_free, r_bin_java_print_integer_cp_summary, r_bin_java_integer_cp_calc_size, r_bin_java_print_integer_cp_stringify }, { r_bin_java_float_cp_new, r_bin_java_default_free, r_bin_java_print_float_cp_summary, r_bin_java_float_cp_calc_size, r_bin_java_print_float_cp_stringify }, { r_bin_java_long_cp_new, r_bin_java_default_free, r_bin_java_print_long_cp_summary, r_bin_java_long_cp_calc_size, r_bin_java_print_long_cp_stringify }, { r_bin_java_double_cp_new, r_bin_java_default_free, r_bin_java_print_double_cp_summary, r_bin_java_double_cp_calc_size, r_bin_java_print_double_cp_stringify }, { r_bin_java_class_cp_new, r_bin_java_default_free, r_bin_java_print_classref_cp_summary, r_bin_java_class_cp_calc_size, r_bin_java_print_classref_cp_stringify }, { r_bin_java_string_cp_new, r_bin_java_default_free, r_bin_java_print_string_cp_summary, r_bin_java_string_cp_calc_size, r_bin_java_print_string_cp_stringify }, { r_bin_java_fieldref_cp_new, r_bin_java_default_free, r_bin_java_print_fieldref_cp_summary, r_bin_java_fieldref_cp_calc_size, r_bin_java_print_fieldref_cp_stringify }, { r_bin_java_methodref_cp_new, r_bin_java_default_free, r_bin_java_print_methodref_cp_summary, r_bin_java_methodref_cp_calc_size, r_bin_java_print_methodref_cp_stringify }, { r_bin_java_interfacemethodref_cp_new, r_bin_java_default_free, r_bin_java_print_interfacemethodref_cp_summary, r_bin_java_interfacemethodref_cp_calc_size, r_bin_java_print_interfacemethodref_cp_stringify }, { r_bin_java_name_and_type_cp_new, r_bin_java_default_free, r_bin_java_print_name_and_type_cp_summary, r_bin_java_name_and_type_cp_calc_size, r_bin_java_print_name_and_type_cp_stringify }, { NULL, NULL, NULL, NULL, NULL }, { NULL, NULL, NULL, NULL, NULL }, { r_bin_java_methodhandle_cp_new, r_bin_java_default_free, r_bin_java_print_methodhandle_cp_summary, r_bin_java_methodhandle_cp_calc_size, r_bin_java_print_methodhandle_cp_stringify }, { r_bin_java_methodtype_cp_new, r_bin_java_default_free, r_bin_java_print_methodtype_cp_summary, r_bin_java_methodtype_cp_calc_size, r_bin_java_print_methodtype_cp_stringify }, { NULL, NULL, NULL, NULL, NULL }, { r_bin_java_invokedynamic_cp_new, r_bin_java_default_free, r_bin_java_print_invokedynamic_cp_summary, r_bin_java_invokedynamic_cp_calc_size, r_bin_java_print_invokedynamic_cp_stringify }, }; static RBinJavaCPTypeObj R_BIN_JAVA_NULL_TYPE; static ut8 R_BIN_JAVA_CP_METAS_SZ = 12; static RBinJavaCPTypeMetas R_BIN_JAVA_CP_METAS[] = { // Each field has a name pointer and a tag field { "NULL", R_BIN_JAVA_CP_NULL, 0, &R_BIN_ALLOCS_CONSTANTS[0] }, { "Utf8", R_BIN_JAVA_CP_UTF8, 3, &R_BIN_ALLOCS_CONSTANTS[1] }, // 2 bytes = length, N bytes string (containts a pointer in the field) { "Unknown", R_BIN_JAVA_CP_UNKNOWN, 0, &R_BIN_ALLOCS_CONSTANTS[2] }, { "Integer", R_BIN_JAVA_CP_INTEGER, 5, &R_BIN_ALLOCS_CONSTANTS[3] }, // 4 bytes { "Float", R_BIN_JAVA_CP_FLOAT, 5, &R_BIN_ALLOCS_CONSTANTS[4] }, // 4 bytes { "Long", R_BIN_JAVA_CP_LONG, 9, &R_BIN_ALLOCS_CONSTANTS[5] }, // 4 high 4 low { "Double", R_BIN_JAVA_CP_DOUBLE, 9, &R_BIN_ALLOCS_CONSTANTS[6] }, // 4 high 4 low { "Class", R_BIN_JAVA_CP_CLASS, 3, &R_BIN_ALLOCS_CONSTANTS[7] }, // 2 name_idx { "String", R_BIN_JAVA_CP_STRING, 3, &R_BIN_ALLOCS_CONSTANTS[8] }, // 2 string_idx { "FieldRef", R_BIN_JAVA_CP_FIELDREF, 5, &R_BIN_ALLOCS_CONSTANTS[9] }, // 2 class idx, 2 name/type_idx { "MethodRef", R_BIN_JAVA_CP_METHODREF, 5, &R_BIN_ALLOCS_CONSTANTS[10] }, // 2 class idx, 2 name/type_idx { "InterfaceMethodRef", R_BIN_JAVA_CP_INTERFACEMETHOD_REF, 5, &R_BIN_ALLOCS_CONSTANTS[11] }, // 2 class idx, 2 name/type_idx { "NameAndType", R_BIN_JAVA_CP_NAMEANDTYPE, 5, &R_BIN_ALLOCS_CONSTANTS[12] }, // 4 high 4 low { "Unknown", R_BIN_JAVA_CP_UNKNOWN, 0, &R_BIN_ALLOCS_CONSTANTS[2] }, { "Unknown", R_BIN_JAVA_CP_UNKNOWN, 0, &R_BIN_ALLOCS_CONSTANTS[2] }, { "MethodHandle", R_BIN_JAVA_CP_METHODHANDLE, 4, &R_BIN_ALLOCS_CONSTANTS[15] }, // 4 high 4 low { "MethodType", R_BIN_JAVA_CP_METHODTYPE, 3, &R_BIN_ALLOCS_CONSTANTS[16] }, // 4 high 4 low { "Unknown", R_BIN_JAVA_CP_UNKNOWN, 0, &R_BIN_ALLOCS_CONSTANTS[2] }, { "InvokeDynamic", R_BIN_JAVA_CP_INVOKEDYNAMIC, 5, &R_BIN_ALLOCS_CONSTANTS[18] }, // 4 high 4 low }; static RBinJavaAttrInfoObjectAllocs RBIN_JAVA_ATTRS_ALLOCS[] = { { r_bin_java_annotation_default_attr_new, r_bin_java_annotation_default_attr_free, r_bin_java_print_annotation_default_attr_summary, r_bin_java_annotation_default_attr_calc_size }, { r_bin_java_bootstrap_methods_attr_new, r_bin_java_bootstrap_methods_attr_free, r_bin_java_print_bootstrap_methods_attr_summary, r_bin_java_bootstrap_methods_attr_calc_size }, { r_bin_java_code_attr_new, r_bin_java_code_attr_free, r_bin_java_print_code_attr_summary, r_bin_java_code_attr_calc_size }, { r_bin_java_constant_value_attr_new, r_bin_java_constant_value_attr_free, r_bin_java_print_constant_value_attr_summary, r_bin_java_constant_value_attr_calc_size }, { r_bin_java_deprecated_attr_new, r_bin_java_deprecated_attr_free, r_bin_java_print_deprecated_attr_summary, r_bin_java_deprecated_attr_calc_size }, { r_bin_java_enclosing_methods_attr_new, r_bin_java_enclosing_methods_attr_free, r_bin_java_print_enclosing_methods_attr_summary, r_bin_java_enclosing_methods_attr_calc_size }, { r_bin_java_exceptions_attr_new, r_bin_java_exceptions_attr_free, r_bin_java_print_exceptions_attr_summary, r_bin_java_exceptions_attr_calc_size }, { r_bin_java_inner_classes_attr_new, r_bin_java_inner_classes_attr_free, r_bin_java_print_inner_classes_attr_summary, r_bin_java_inner_classes_attr_calc_size }, { r_bin_java_line_number_table_attr_new, r_bin_java_line_number_table_attr_free, r_bin_java_print_line_number_table_attr_summary, r_bin_java_line_number_table_attr_calc_size }, { r_bin_java_local_variable_table_attr_new, r_bin_java_local_variable_table_attr_free, r_bin_java_print_local_variable_table_attr_summary, r_bin_java_local_variable_table_attr_calc_size }, { r_bin_java_local_variable_type_table_attr_new, r_bin_java_local_variable_type_table_attr_free, r_bin_java_print_local_variable_type_table_attr_summary, r_bin_java_local_variable_type_table_attr_calc_size }, { r_bin_java_rti_annotations_attr_new, r_bin_java_rti_annotations_attr_free, r_bin_java_print_rti_annotations_attr_summary, r_bin_java_rti_annotations_attr_calc_size }, { r_bin_java_rtip_annotations_attr_new, r_bin_java_rtip_annotations_attr_free, r_bin_java_print_rtip_annotations_attr_summary, r_bin_java_rtip_annotations_attr_calc_size }, { r_bin_java_rtv_annotations_attr_new, r_bin_java_rtv_annotations_attr_free, r_bin_java_print_rtv_annotations_attr_summary, r_bin_java_rtv_annotations_attr_calc_size }, { r_bin_java_rtvp_annotations_attr_new, r_bin_java_rtvp_annotations_attr_free, r_bin_java_print_rtvp_annotations_attr_summary, r_bin_java_rtvp_annotations_attr_calc_size }, { r_bin_java_signature_attr_new, r_bin_java_signature_attr_free, r_bin_java_print_signature_attr_summary, r_bin_java_signature_attr_calc_size }, { r_bin_java_source_debug_attr_new, r_bin_java_source_debug_attr_free, r_bin_java_print_source_debug_attr_summary, r_bin_java_source_debug_attr_calc_size }, { r_bin_java_source_code_file_attr_new, r_bin_java_source_code_file_attr_free, r_bin_java_print_source_code_file_attr_summary, r_bin_java_source_code_file_attr_calc_size }, { r_bin_java_stack_map_table_attr_new, r_bin_java_stack_map_table_attr_free, r_bin_java_print_stack_map_table_attr_summary, r_bin_java_stack_map_table_attr_calc_size }, { r_bin_java_synthetic_attr_new, r_bin_java_synthetic_attr_free, r_bin_java_print_synthetic_attr_summary, r_bin_java_synthetic_attr_calc_size }, { r_bin_java_unknown_attr_new, r_bin_java_unknown_attr_free, r_bin_java_print_unknown_attr_summary, r_bin_java_unknown_attr_calc_size } }; // R_API ut32 RBIN_JAVA_ATTRS_METAS_SZ = 21; static ut32 RBIN_JAVA_ATTRS_METAS_SZ = 20; static RBinJavaAttrMetas RBIN_JAVA_ATTRS_METAS[] = { { "AnnotationDefault", R_BIN_JAVA_ATTR_TYPE_ANNOTATION_DEFAULT_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[0] }, { "BootstrapMethods", R_BIN_JAVA_ATTR_TYPE_BOOTSTRAP_METHODS_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[1] }, { "Code", R_BIN_JAVA_ATTR_TYPE_CODE_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[2] }, { "ConstantValue", R_BIN_JAVA_ATTR_TYPE_CONST_VALUE_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[3] }, { "Deperecated", R_BIN_JAVA_ATTR_TYPE_DEPRECATED_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[4] }, { "EnclosingMethod", R_BIN_JAVA_ATTR_TYPE_ENCLOSING_METHOD_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[5] }, { "Exceptions", R_BIN_JAVA_ATTR_TYPE_EXCEPTIONS_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[6] }, { "InnerClasses", R_BIN_JAVA_ATTR_TYPE_INNER_CLASSES_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[7] }, { "LineNumberTable", R_BIN_JAVA_ATTR_TYPE_LINE_NUMBER_TABLE_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[8] }, { "LocalVariableTable", R_BIN_JAVA_ATTR_TYPE_LOCAL_VARIABLE_TABLE_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[9] }, { "LocalVariableTypeTable", R_BIN_JAVA_ATTR_TYPE_LOCAL_VARIABLE_TYPE_TABLE_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[10] }, { "RuntimeInvisibleAnnotations", R_BIN_JAVA_ATTR_TYPE_RUNTIME_INVISIBLE_ANNOTATION_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[11] }, { "RuntimeInvisibleParameterAnnotations", R_BIN_JAVA_ATTR_TYPE_RUNTIME_INVISIBLE_PARAMETER_ANNOTATION_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[12] }, { "RuntimeVisibleAnnotations", R_BIN_JAVA_ATTR_TYPE_RUNTIME_VISIBLE_ANNOTATION_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[13] }, { "RuntimeVisibleParameterAnnotations", R_BIN_JAVA_ATTR_TYPE_RUNTIME_VISIBLE_PARAMETER_ANNOTATION_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[14] }, { "Signature", R_BIN_JAVA_ATTR_TYPE_SIGNATURE_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[15] }, { "SourceDebugExtension", R_BIN_JAVA_ATTR_TYPE_SOURCE_DEBUG_EXTENTSION_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[16] }, { "SourceFile", R_BIN_JAVA_ATTR_TYPE_SOURCE_FILE_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[17] }, { "StackMapTable", R_BIN_JAVA_ATTR_TYPE_STACK_MAP_TABLE_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[18] }, // { "StackMap", R_BIN_JAVA_ATTR_TYPE_STACK_MAP_TABLE_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[18]}, { "Synthetic", R_BIN_JAVA_ATTR_TYPE_SYNTHETIC_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[19] }, { "Unknown", R_BIN_JAVA_ATTR_TYPE_UNKNOWN_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[20] } }; R_API bool r_bin_java_is_old_format(RBinJavaObj *bin) { return bin->cf.major[1] == 45 && bin->cf.minor[1] <= 2; } R_API void r_bin_java_reset_bin_info(RBinJavaObj *bin) { free (bin->cf2.flags_str); free (bin->cf2.this_class_name); r_list_free (bin->imports_list); r_list_free (bin->methods_list); r_list_free (bin->fields_list); r_list_free (bin->attrs_list); r_list_free (bin->cp_list); r_list_free (bin->interfaces_list); r_str_constpool_fini (&bin->constpool); r_str_constpool_init (&bin->constpool); bin->cf2.flags_str = strdup ("unknown"); bin->cf2.this_class_name = strdup ("unknown"); bin->imports_list = r_list_newf (free); bin->methods_list = r_list_newf (r_bin_java_fmtype_free); bin->fields_list = r_list_newf (r_bin_java_fmtype_free); bin->attrs_list = r_list_newf (r_bin_java_attribute_free); bin->cp_list = r_list_newf (r_bin_java_constant_pool); bin->interfaces_list = r_list_newf (r_bin_java_interface_free); } R_API char *r_bin_java_unmangle_method(const char *flags, const char *name, const char *params, const char *r_value) { RList *the_list = params ? r_bin_java_extract_type_values (params) : r_list_new (); RListIter *iter = NULL; // second case removes leading space if no flags are given const char *fmt = flags ? "%s %s %s (%s)" : "%s%s %s (%s)"; char *str = NULL, *f_val_str = NULL, *r_val_str = NULL, *prototype = NULL, *p_val_str = NULL; ut32 params_idx = 0, params_len = 0, prototype_len = 0; if (!extract_type_value (r_value, &r_val_str)) { r_list_free (the_list); return NULL; } if (!r_val_str) { r_val_str = strdup ("UNKNOWN"); } f_val_str = strdup (r_str_get (flags)); r_list_foreach (the_list, iter, str) { params_len += strlen (str); if (params_idx > 0) { params_len += 2; } params_idx++; } if (params_len > 0) { ut32 offset = 0; params_len += 1; p_val_str = malloc (params_len); r_list_foreach (the_list, iter, str) { if (offset != 0) { offset += snprintf (p_val_str + offset, params_len - offset, ", %s", str); } else { offset += snprintf (p_val_str + offset, params_len - offset, "%s", str); } } } else { p_val_str = strdup (""); } prototype_len += (flags ? strlen (flags) + 1 : 0); // space vs no space prototype_len += strlen (name) + 1; // name + space prototype_len += strlen (r_val_str) + 1; // r_value + space prototype_len += strlen (p_val_str) + 3; // space + l_paren + params + r_paren prototype_len += 1; // null prototype = malloc (prototype_len); /// TODO enable this function and start using it to demangle strings snprintf (prototype, prototype_len, fmt, f_val_str, r_val_str, name, p_val_str); free (f_val_str); free (r_val_str); free (p_val_str); r_list_free (the_list); return prototype; } R_API char *r_bin_java_unmangle(const char *flags, const char *name, const char *descriptor) { ut32 l_paren_pos = -1, r_paren_pos = -1; char *result = NULL; ut32 desc_len = descriptor && *descriptor ? strlen (descriptor) : 0, name_len = name && *name ? strlen (name) : 0, flags_len = flags && *flags ? strlen (flags) : 0, i = 0; if (desc_len == 0 || name == 0) { return NULL; } for (i = 0; i < desc_len; i++) { if (descriptor[i] == '(') { l_paren_pos = i; } else if (l_paren_pos != (ut32) - 1 && descriptor[i] == ')') { r_paren_pos = i; break; } } // handle field case; if (l_paren_pos == (ut32) - 1 && r_paren_pos == (ut32) - 1) { char *unmangle_field_desc = NULL; ut32 len = extract_type_value (descriptor, &unmangle_field_desc); if (len == 0) { eprintf ("Warning: attempting to unmangle invalid type descriptor.\n"); free (unmangle_field_desc); return result; } if (flags_len > 0) { len += (flags_len + name_len + 5); // space and null result = malloc (len); snprintf (result, len, "%s %s %s", flags, unmangle_field_desc, name); } else { len += (name_len + 5); // space and null result = malloc (len); snprintf (result, len, "%s %s", unmangle_field_desc, name); } free (unmangle_field_desc); } else if (l_paren_pos != (ut32) - 1 && r_paren_pos != (ut32) - 1 && l_paren_pos < r_paren_pos) { // params_len account for l_paren + 1 and null ut32 params_len = r_paren_pos - (l_paren_pos + 1) != 0 ? r_paren_pos - (l_paren_pos + 1) + 1 : 0; char *params = params_len ? malloc (params_len) : NULL; const char *rvalue = descriptor + r_paren_pos + 1; if (params) { snprintf (params, params_len, "%s", descriptor + l_paren_pos + 1); } result = r_bin_java_unmangle_method (flags, name, params, rvalue); free (params); } return result; } R_API DsoJsonObj *r_bin_java_get_bin_obj_json(RBinJavaObj *bin) { DsoJsonObj *imports_list = r_bin_java_get_import_json_definitions (bin); DsoJsonObj *fields_list = r_bin_java_get_field_json_definitions (bin); DsoJsonObj *methods_list = r_bin_java_get_method_json_definitions (bin); // interfaces_list = r_bin_java_get_interface_json_definitions (bin); DsoJsonObj *class_dict = r_bin_java_get_class_info_json (bin); char *res = dso_json_obj_to_str (methods_list); // eprintf ("Resulting methods json: \n%s\n", res); free (res); if (dso_json_dict_insert_str_key_obj (class_dict, "methods", methods_list)) { // dso_json_list_free (methods_list); dso_json_obj_del (methods_list); } res = dso_json_obj_to_str (fields_list); // eprintf ("Resulting fields json: \n%s\n", res); free (res); if (dso_json_dict_insert_str_key_obj (class_dict, "fields", fields_list)) { // dso_json_list_free (fields_list); dso_json_obj_del (fields_list); } res = dso_json_obj_to_str (imports_list); // eprintf ("Resulting imports json: \n%s\n", res); free (res); if (dso_json_dict_insert_str_key_obj (class_dict, "imports", imports_list)) { // dso_json_list_free (imports_list); dso_json_obj_del (imports_list); } // res = dso_json_obj_to_str (interfaces_list); // eprintf ("Resulting interfaces json: \n%s\n", res); // free (res); // dso_json_dict_insert_str_key_obj (class_dict, "interfaces", interfaces_list); res = dso_json_obj_to_str (class_dict); // eprintf ("Resulting class info json: \n%s\n", res); free (res); // dso_json_obj_del (class_dict); return class_dict; } R_API DsoJsonObj *r_bin_java_get_import_json_definitions(RBinJavaObj *bin) { RList *the_list; DsoJsonObj *json_list = dso_json_list_new (); RListIter *iter = NULL; char *new_str; if (!bin || !(the_list = r_bin_java_get_lib_names (bin))) { return json_list; } r_list_foreach (the_list, iter, new_str) { char *tmp = new_str; // eprintf ("Processing string: %s\n", new_str); while (*tmp) { if (*tmp == '/') { *tmp = '.'; } tmp++; } // eprintf ("adding string: %s\n", new_str); dso_json_list_append_str (json_list, new_str); } r_list_free (the_list); return json_list; } R_API DsoJsonObj *r_bin_java_get_class_info_json(RBinJavaObj *bin) { RList *classes = r_bin_java_get_classes (bin); DsoJsonObj *interfaces_list = dso_json_list_new (); DsoJsonObj *class_info_dict = dso_json_dict_new (); RBinClass *class_ = r_list_get_n (classes, 0); if (class_) { int dummy = 0; RListIter *iter; RBinClass *class_v = NULL; // add access flags like in methods bool is_public = ((class_->visibility & R_BIN_JAVA_CLASS_ACC_PUBLIC) != 0); bool is_final = ((class_->visibility & R_BIN_JAVA_CLASS_ACC_FINAL) != 0); bool is_super = ((class_->visibility & R_BIN_JAVA_CLASS_ACC_SUPER) != 0); bool is_interface = ((class_->visibility & R_BIN_JAVA_CLASS_ACC_INTERFACE) != 0); bool is_abstract = ((class_->visibility & R_BIN_JAVA_CLASS_ACC_ABSTRACT) != 0); bool is_synthetic = ((class_->visibility & R_BIN_JAVA_CLASS_ACC_SYNTHETIC) != 0); bool is_annotation = ((class_->visibility & R_BIN_JAVA_CLASS_ACC_ANNOTATION) != 0); bool is_enum = ((class_->visibility & R_BIN_JAVA_CLASS_ACC_ENUM) != 0); dso_json_dict_insert_str_key_num (class_info_dict, "access_flags", class_->visibility); dso_json_dict_insert_str_key_num (class_info_dict, "is_public", is_public); dso_json_dict_insert_str_key_num (class_info_dict, "is_final", is_final); dso_json_dict_insert_str_key_num (class_info_dict, "is_super", is_super); dso_json_dict_insert_str_key_num (class_info_dict, "is_interface", is_interface); dso_json_dict_insert_str_key_num (class_info_dict, "is_abstract", is_abstract); dso_json_dict_insert_str_key_num (class_info_dict, "is_synthetic", is_synthetic); dso_json_dict_insert_str_key_num (class_info_dict, "is_annotation", is_annotation); dso_json_dict_insert_str_key_num (class_info_dict, "is_enum", is_enum); dso_json_dict_insert_str_key_str (class_info_dict, "name", class_->name); if (!class_->super) { DsoJsonObj *str = dso_json_str_new (); if (dso_json_dict_insert_str_key_obj (class_info_dict, "super", str)) { dso_json_str_free (str); } } else { dso_json_dict_insert_str_key_str (class_info_dict, "super", class_->super); } r_list_foreach (classes, iter, class_v) { if (!dummy) { dummy++; continue; } // enumerate all interface classes and append them to the interfaces if ((class_v->visibility & R_BIN_JAVA_CLASS_ACC_INTERFACE) != 0) { dso_json_list_append_str (interfaces_list, class_v->name); } } } if (dso_json_dict_insert_str_key_obj (class_info_dict, "interfaces", interfaces_list)) { // dso_json_list_free (interfaces_list); dso_json_obj_del (interfaces_list); } r_list_free (classes); return class_info_dict; } R_API DsoJsonObj *r_bin_java_get_interface_json_definitions(RBinJavaObj *bin) { RList *the_list; DsoJsonObj *json_list = dso_json_list_new (); RListIter *iter = NULL; char *new_str; if (!bin || !(the_list = r_bin_java_get_interface_names (bin))) { return json_list; } r_list_foreach (the_list, iter, new_str) { char *tmp = new_str; // eprintf ("Processing string: %s\n", new_str); while (*tmp) { if (*tmp == '/') { *tmp = '.'; } tmp++; } // eprintf ("adding string: %s\n", new_str); dso_json_list_append_str (json_list, new_str); } r_list_free (the_list); return json_list; } R_API DsoJsonObj *r_bin_java_get_method_json_definitions(RBinJavaObj *bin) { RBinJavaField *fm_type = NULL; RListIter *iter = NULL; DsoJsonObj *json_list = dso_json_list_new (); if (!bin) { return json_list; } r_list_foreach (bin->methods_list, iter, fm_type) { DsoJsonObj *method_proto = r_bin_java_get_method_json_definition (bin, fm_type); // eprintf ("Method json: %s\n", method_proto); dso_json_list_append (json_list, method_proto); } return json_list; } R_API DsoJsonObj *r_bin_java_get_field_json_definitions(RBinJavaObj *bin) { RBinJavaField *fm_type = NULL; RListIter *iter = NULL; DsoJsonObj *json_list = dso_json_list_new (); if (!bin) { return json_list; } r_list_foreach (bin->fields_list, iter, fm_type) { DsoJsonObj *field_proto = r_bin_java_get_field_json_definition (bin, fm_type); // eprintf ("Field json: %s\n", field_proto); dso_json_list_append (json_list, field_proto); } return json_list; } R_API char *r_bin_java_create_method_fq_str(const char *klass, const char *name, const char *signature) { if (!klass) { klass = "null_class"; } if (!name) { name = "null_name"; } if (!signature) { signature = "null_signature"; } return r_str_newf ("%s.%s.%s", klass, name, signature); } R_API char *r_bin_java_create_field_fq_str(const char *klass, const char *name, const char *signature) { if (!klass) { klass = "null_class"; } if (!name) { name = "null_name"; } if (!signature) { signature = "null_signature"; } return r_str_newf ("%s %s.%s", signature, klass, name); } R_API DsoJsonObj *r_bin_java_get_fm_type_definition_json(RBinJavaObj *bin, RBinJavaField *fm_type, int is_method) { ut64 addr = UT64_MAX; char *prototype = NULL, *fq_name = NULL; bool is_native = ((fm_type->flags & R_BIN_JAVA_METHOD_ACC_NATIVE) != 0); bool is_static = ((fm_type->flags & R_BIN_JAVA_METHOD_ACC_STATIC) != 0); bool is_synthetic = ((fm_type->flags & R_BIN_JAVA_METHOD_ACC_SYNTHETIC) != 0); bool is_private = ((fm_type->flags & R_BIN_JAVA_METHOD_ACC_PRIVATE) != 0); bool is_public = ((fm_type->flags & R_BIN_JAVA_METHOD_ACC_PUBLIC) != 0); bool is_protected = ((fm_type->flags & R_BIN_JAVA_METHOD_ACC_PROTECTED) != 0); bool is_super = ((fm_type->flags & R_BIN_JAVA_CLASS_ACC_SUPER) != 0); DsoJsonObj *fm_type_dict = dso_json_dict_new (); dso_json_dict_insert_str_key_num (fm_type_dict, "access_flags", fm_type->flags); dso_json_dict_insert_str_key_num (fm_type_dict, "is_method", is_method); dso_json_dict_insert_str_key_num (fm_type_dict, "is_native", is_native); dso_json_dict_insert_str_key_num (fm_type_dict, "is_synthetic", is_synthetic); dso_json_dict_insert_str_key_num (fm_type_dict, "is_private", is_private); dso_json_dict_insert_str_key_num (fm_type_dict, "is_public", is_public); dso_json_dict_insert_str_key_num (fm_type_dict, "is_static", is_static); dso_json_dict_insert_str_key_num (fm_type_dict, "is_protected", is_protected); dso_json_dict_insert_str_key_num (fm_type_dict, "is_super", is_super); addr = r_bin_java_get_method_code_offset (fm_type); if (addr == 0) { addr = fm_type->file_offset; } addr += bin->loadaddr; dso_json_dict_insert_str_key_num (fm_type_dict, "addr", addr); dso_json_dict_insert_str_key_num (fm_type_dict, "offset", fm_type->file_offset + bin->loadaddr); dso_json_dict_insert_str_key_str (fm_type_dict, "class_name", fm_type->class_name); dso_json_dict_insert_str_key_str (fm_type_dict, "signature", fm_type->descriptor); dso_json_dict_insert_str_key_str (fm_type_dict, "name", fm_type->name); if (is_method) { fq_name = r_bin_java_create_method_fq_str (fm_type->class_name, fm_type->name, fm_type->descriptor); } else { fq_name = r_bin_java_create_field_fq_str (fm_type->class_name, fm_type->name, fm_type->descriptor); } dso_json_dict_insert_str_key_str (fm_type_dict, "fq_name", fq_name); prototype = r_bin_java_unmangle (fm_type->flags_str, fm_type->name, fm_type->descriptor); dso_json_dict_insert_str_key_str (fm_type_dict, "prototype", prototype); free (prototype); free (fq_name); return fm_type_dict; } R_API char *r_bin_java_get_method_definition(RBinJavaField *fm_type) { return r_bin_java_unmangle (fm_type->flags_str, fm_type->name, fm_type->descriptor); } R_API char *r_bin_java_get_field_definition(RBinJavaField *fm_type) { return r_bin_java_unmangle (fm_type->flags_str, fm_type->name, fm_type->descriptor); } R_API DsoJsonObj *r_bin_java_get_method_json_definition(RBinJavaObj *bin, RBinJavaField *fm_type) { return r_bin_java_get_fm_type_definition_json (bin, fm_type, 1); } R_API DsoJsonObj *r_bin_java_get_field_json_definition(RBinJavaObj *bin, RBinJavaField *fm_type) { return r_bin_java_get_fm_type_definition_json (bin, fm_type, 0); } R_API int r_bin_java_extract_reference_name(const char *input_str, char **ref_str, ut8 array_cnt) { char *new_str = NULL; ut32 str_len = array_cnt ? (array_cnt + 1) * 2 : 0; const char *str_pos = input_str; int consumed = 0, len = 0; if (!str_pos || *str_pos != 'L' || !*str_pos) { return -1; } consumed++; str_pos++; while (*str_pos && *str_pos != ';') { str_pos++; len++; consumed++; } str_pos = input_str + 1; free (*ref_str); str_len += len; *ref_str = malloc (str_len + 1); new_str = *ref_str; memcpy (new_str, str_pos, str_len); new_str[str_len] = 0; while (*new_str) { if (*new_str == '/') { *new_str = '.'; } new_str++; } return len + 2; } R_API void UNUSED_FUNCTION(r_bin_java_print_prototypes)(RBinJavaObj * bin) { RList *the_list = r_bin_java_get_method_definitions (bin); RListIter *iter; char *str; r_list_foreach (the_list, iter, str) { eprintf ("%s;\n", str); } r_list_free (the_list); } R_API char *get_type_value_str(const char *arg_str, ut8 array_cnt) { ut32 str_len = array_cnt ? (array_cnt + 1) * 2 + strlen (arg_str) : strlen (arg_str); char *str = malloc (str_len + 1); ut32 bytes_written = snprintf (str, str_len + 1, "%s", arg_str); while (array_cnt > 0) { strcpy (str + bytes_written, "[]"); bytes_written += 2; array_cnt--; } return str; } R_API int extract_type_value(const char *arg_str, char **output) { ut8 found_one = 0, array_cnt = 0; ut32 len = 0, consumed = 0; char *str = NULL; if (!arg_str || !output) { return 0; } if (output && *output && *output != NULL) { R_FREE (*output); } while (arg_str && *arg_str && !found_one) { len = 1; // handle the end of an object switch (*arg_str) { case 'V': str = get_type_value_str ("void", array_cnt); break; case 'J': str = get_type_value_str ("long", array_cnt); array_cnt = 0; break; case 'I': str = get_type_value_str ("int", array_cnt); array_cnt = 0; break; case 'D': str = get_type_value_str ("double", array_cnt); array_cnt = 0; break; case 'F': str = get_type_value_str ("float", array_cnt); array_cnt = 0; break; case 'B': str = get_type_value_str ("byte", array_cnt); array_cnt = 0; break; case 'C': str = get_type_value_str ("char", array_cnt); array_cnt = 0; break; case 'Z': str = get_type_value_str ("boolean", array_cnt); array_cnt = 0; break; case 'S': str = get_type_value_str ("short", array_cnt); array_cnt = 0; break; case '[': array_cnt++; break; case 'L': len = r_bin_java_extract_reference_name (arg_str, &str, array_cnt); array_cnt = 0; break; case '(': str = strdup ("("); break; case ')': str = strdup (")"); break; default: return 0; } if (len < 1) { break; } consumed += len; arg_str += len; if (str) { *output = str; break; } } return consumed; } R_API RList *r_bin_java_extract_type_values(const char *arg_str) { RList *list_args = r_list_new (); if (!list_args) { return NULL; } char *str = NULL; const char *str_cur_pos = NULL; ut32 len = 0; if (!arg_str) { return list_args; } str_cur_pos = arg_str; list_args->free = free; while (str_cur_pos && *str_cur_pos) { // handle the end of an object len = extract_type_value (str_cur_pos, &str); if (len < 1) { r_list_free (list_args); return NULL; } str_cur_pos += len; r_list_append (list_args, str); str = NULL; } return list_args; } R_API int r_bin_java_is_fm_type_private(RBinJavaField *fm_type) { if (fm_type && fm_type->type == R_BIN_JAVA_FIELD_TYPE_METHOD) { return fm_type->flags & R_BIN_JAVA_METHOD_ACC_PRIVATE; } if (fm_type && fm_type->type == R_BIN_JAVA_FIELD_TYPE_FIELD) { return fm_type->flags & R_BIN_JAVA_FIELD_ACC_PRIVATE; } return 0; } R_API int r_bin_java_is_fm_type_protected(RBinJavaField *fm_type) { if (fm_type && fm_type->type == R_BIN_JAVA_FIELD_TYPE_METHOD) { return fm_type->flags & R_BIN_JAVA_METHOD_ACC_PROTECTED; } if (fm_type && fm_type->type == R_BIN_JAVA_FIELD_TYPE_FIELD) { return fm_type->flags & R_BIN_JAVA_FIELD_ACC_PROTECTED; } return 0; } R_API RList *r_bin_java_get_args(RBinJavaField *fm_type) { RList *the_list = r_bin_java_extract_type_values (fm_type->descriptor); RList *arg_list = r_list_new (); ut8 in_args = 0; RListIter *desc_iter; char *str; r_list_foreach (the_list, desc_iter, str) { if (str && *str == '(') { in_args = 1; continue; } if (str && *str == ')') { break; } if (in_args && str) { r_list_append (arg_list, strdup (str)); } } r_list_free (the_list); return arg_list; } R_API RList *r_bin_java_get_ret(RBinJavaField *fm_type) { RList *the_list = r_bin_java_extract_type_values (fm_type->descriptor); RList *ret_list = r_list_new (); ut8 in_ret = 0; RListIter *desc_iter; char *str; r_list_foreach (the_list, desc_iter, str) { if (str && *str != ')') { in_ret = 0; } if (in_ret) { r_list_append (ret_list, strdup (str)); } } r_list_free (the_list); return ret_list; } R_API char *r_bin_java_get_this_class_name(RBinJavaObj *bin) { return (bin->cf2.this_class_name ? strdup (bin->cf2.this_class_name) : strdup ("unknown")); } R_API ut16 calculate_access_value(const char *access_flags_str, RBinJavaAccessFlags *access_flags) { ut16 result = 0; ut16 size = strlen (access_flags_str) + 1; char *p_flags, *my_flags = malloc (size); RBinJavaAccessFlags *iter = NULL; if (size < 5 || !my_flags) { free (my_flags); return result; } memcpy (my_flags, access_flags_str, size); p_flags = strtok (my_flags, " "); while (p_flags && access_flags) { int idx = 0; do { iter = &access_flags[idx]; if (!iter || !iter->str) { continue; } if (iter->len > 0 && iter->len != 16) { if (!strncmp (iter->str, p_flags, iter->len)) { result |= iter->value; } } idx++; } while (access_flags[idx].str != NULL); p_flags = strtok (NULL, " "); } free (my_flags); return result; } R_API RList *retrieve_all_access_string_and_value(RBinJavaAccessFlags *access_flags) { const char *fmt = "%s = 0x%04x"; RList *result = r_list_new (); if (!result) { return NULL; } result->free = free; int i = 0; for (i = 0; access_flags[i].str != NULL; i++) { char *str = malloc (50); if (!str) { r_list_free (result); return NULL; } snprintf (str, 49, fmt, access_flags[i].str, access_flags[i].value); r_list_append (result, str); } return result; } R_API char *retrieve_access_string(ut16 flags, RBinJavaAccessFlags *access_flags) { char *outbuffer = NULL, *cur_pos = NULL; ut16 i; ut16 max_str_len = 0; for (i = 0; access_flags[i].str != NULL; i++) { if (flags & access_flags[i].value) { max_str_len += (strlen (access_flags[i].str) + 1); if (max_str_len < strlen (access_flags[i].str)) { return NULL; } } } max_str_len++; outbuffer = (char *) malloc (max_str_len); if (outbuffer) { memset (outbuffer, 0, max_str_len); cur_pos = outbuffer; for (i = 0; access_flags[i].str != NULL; i++) { if (flags & access_flags[i].value) { ut8 len = strlen (access_flags[i].str); const char *the_string = access_flags[i].str; memcpy (cur_pos, the_string, len); memcpy (cur_pos + len, " ", 1); cur_pos += len + 1; } } if (cur_pos != outbuffer) { *(cur_pos - 1) = 0; } } return outbuffer; } R_API char *retrieve_method_access_string(ut16 flags) { return retrieve_access_string (flags, METHOD_ACCESS_FLAGS); } R_API char *retrieve_field_access_string(ut16 flags) { return retrieve_access_string (flags, FIELD_ACCESS_FLAGS); } R_API char *retrieve_class_method_access_string(ut16 flags) { return retrieve_access_string (flags, CLASS_ACCESS_FLAGS); } R_API char *r_bin_java_build_obj_key(RBinJavaObj *bin) { char *cname = r_bin_java_get_this_class_name (bin); char *jvcname = cname? r_str_newf ("%d.%s.class", bin->id, cname) : r_str_newf ("%d._unknown_.class", bin->id); free (cname); return jvcname; } R_API bool sdb_iterate_build_list(void *user, const char *k, const char *v) { RList *bin_objs_list = (RList *) user; size_t value = (size_t) sdb_atoi (v); RBinJavaObj *bin_obj = NULL; IFDBG eprintf ("Found %s == %"PFMT64x " bin_objs db\n", k, (ut64) value); if (value != 0 && value != (size_t) -1) { bin_obj = (RBinJavaObj *) value; r_list_append (bin_objs_list, bin_obj); } return true; } R_API RBinJavaCPTypeObj *r_bin_java_get_java_null_cp(void) { if (R_BIN_JAVA_NULL_TYPE_INITTED) { return &R_BIN_JAVA_NULL_TYPE; } memset (&R_BIN_JAVA_NULL_TYPE, 0, sizeof (R_BIN_JAVA_NULL_TYPE)); R_BIN_JAVA_NULL_TYPE.metas = R_NEW0 (RBinJavaMetaInfo); if (!R_BIN_JAVA_NULL_TYPE.metas) { return NULL; } memset (R_BIN_JAVA_NULL_TYPE.metas, 0, sizeof (RBinJavaMetaInfo)); R_BIN_JAVA_NULL_TYPE.metas->type_info = &R_BIN_JAVA_CP_METAS[0]; R_BIN_JAVA_NULL_TYPE.metas->ord = 0; R_BIN_JAVA_NULL_TYPE.file_offset = 0; R_BIN_JAVA_NULL_TYPE_INITTED = true; return &R_BIN_JAVA_NULL_TYPE; } R_API RBinJavaElementValueMetas *r_bin_java_get_ev_meta_from_tag(ut8 tag) { ut16 i = 0; RBinJavaElementValueMetas *res = &R_BIN_JAVA_ELEMENT_VALUE_METAS[13]; for (i = 0; i < R_BIN_JAVA_ELEMENT_VALUE_METAS_SZ; i++) { if (tag == R_BIN_JAVA_ELEMENT_VALUE_METAS[i].tag) { res = &R_BIN_JAVA_ELEMENT_VALUE_METAS[i]; break; } } return res; } R_API ut8 r_bin_java_quick_check(ut8 expected_tag, ut8 actual_tag, ut32 actual_len, const char *name) { ut8 res = 0; if (expected_tag > R_BIN_JAVA_CP_METAS_SZ) { eprintf ("Invalid tag '%d' expected 0x%02x for %s.\n", actual_tag, expected_tag, name); res = 1; } else if (expected_tag != actual_tag) { eprintf ("Invalid tag '%d' expected 0x%02x for %s.\n", actual_tag, expected_tag, name); res = 1; } else if (actual_len < R_BIN_JAVA_CP_METAS[expected_tag].len) { eprintf ("Unable to parse '%d' expected sz=0x%02x got 0x%02x for %s.\n", actual_tag, R_BIN_JAVA_CP_METAS[expected_tag].len, actual_len, name); res = 2; } return res; } R_API ut64 r_bin_java_raw_to_long(const ut8 *raw, ut64 offset) { return R_BIN_JAVA_LONG (raw, offset); } // yanked from careercup, because i am lazy: // 1) dont want to figure out how make radare use math library // 2) dont feel like figuring it out when google does it in O(1). R_API double my_pow(ut64 base, int exp) { ut8 flag = 0; ut64 res = 1; if (exp < 0) { flag = 1; exp *= -1; } while (exp) { if (exp & 1) { res *= base; } exp >>= 1; base *= base; IFDBG eprintf ("Result: %"PFMT64d ", base: %"PFMT64d ", exp: %d\n", res, base, exp); } if (flag == 0) { return 1.0 * res; } return (1.0 / res); } R_API double r_bin_java_raw_to_double(const ut8 *raw, ut64 offset) { ut64 bits = R_BIN_JAVA_LONG (raw, offset); int s = ((bits >> 63) == 0) ? 1 : -1; int e = (int) ((bits >> 52) & 0x7ffL); long m = (e == 0) ? (bits & 0xfffffffffffffLL) << 1 : (bits & 0xfffffffffffffLL) | 0x10000000000000LL; double res = 0.0; IFDBG eprintf ("Convert Long to Double: %08"PFMT64x "\n", bits); if (bits == 0x7ff0000000000000LL) { return INFINITY; } if (bits == 0xfff0000000000000LL) { return -INFINITY; } if (0x7ff0000000000001LL <= bits && bits <= 0x7fffffffffffffffLL) { return NAN; } if (0xfff0000000000001LL <= bits && bits <= 0xffffffffffffffffLL) { return NAN; } res = s * m * my_pow (2, e - 1075);// XXXX TODO Get double to work correctly here IFDBG eprintf (" High-bytes = %02x %02x %02x %02x\n", raw[0], raw[1], raw[2], raw[3]); IFDBG eprintf (" Low-bytes = %02x %02x %02x %02x\n", raw[4], raw[5], raw[6], raw[7]); IFDBG eprintf ("Convert Long to Double s: %d, m: 0x%08lx, e: 0x%08x, res: %f\n", s, m, e, res); return res; } R_API RBinJavaField *r_bin_java_read_next_method(RBinJavaObj *bin, const ut64 offset, const ut8 *buf, const ut64 len) { ut32 i, idx; const ut8 *f_buf = buf + offset; ut64 adv = 0; RBinJavaCPTypeObj *item = NULL; if (!bin || offset + 8 >= len) { return NULL; } RBinJavaField *method = (RBinJavaField *) R_NEW0 (RBinJavaField); if (!method) { eprintf ("Unable to allocate memory for method information\n"); return NULL; } method->metas = (RBinJavaMetaInfo *) R_NEW0 (RBinJavaMetaInfo); if (!method->metas) { eprintf ("Unable to allocate memory for meta information\n"); free (method); return NULL; } method->file_offset = offset; method->flags = R_BIN_JAVA_USHORT (f_buf, 0); method->flags_str = retrieve_method_access_string (method->flags); // need to subtract 1 for the idx method->name_idx = R_BIN_JAVA_USHORT (f_buf, 2); method->descriptor_idx = R_BIN_JAVA_USHORT (f_buf, 4); method->attr_count = R_BIN_JAVA_USHORT (f_buf, 6); method->attributes = r_list_newf (r_bin_java_attribute_free); method->type = R_BIN_JAVA_FIELD_TYPE_METHOD; method->metas->ord = bin->method_idx; adv += 8; idx = method->name_idx; item = r_bin_java_get_item_from_bin_cp_list (bin, idx); method->name = r_bin_java_get_utf8_from_bin_cp_list (bin, (ut32) (method->name_idx)); IFDBG eprintf ("Method name_idx: %d, which is: ord: %d, name: %s, value: %s\n", idx, item->metas->ord, ((RBinJavaCPTypeMetas *)item->metas->type_info)->name, method->name); if (!method->name) { method->name = (char *) malloc (21); snprintf ((char *) method->name, 20, "sym.method_%08x", method->metas->ord); IFDBG eprintf ("r_bin_java_read_next_method: Unable to find the name for 0x%02x index.\n", method->name_idx); } idx = method->descriptor_idx; item = r_bin_java_get_item_from_bin_cp_list (bin, idx); method->descriptor = r_bin_java_get_utf8_from_bin_cp_list (bin, (ut32) method->descriptor_idx); IFDBG eprintf ("Method descriptor_idx: %d, which is: ord: %d, name: %s, value: %s\n", idx, item->metas->ord, ((RBinJavaCPTypeMetas *)item->metas->type_info)->name, method->descriptor); if (!method->descriptor) { method->descriptor = r_str_dup (NULL, "NULL"); IFDBG eprintf ("r_bin_java_read_next_method: Unable to find the descriptor for 0x%02x index.\n", method->descriptor_idx); } IFDBG eprintf ("Looking for a NameAndType CP with name_idx: %d descriptor_idx: %d\n", method->name_idx, method->descriptor_idx); method->field_ref_cp_obj = r_bin_java_find_cp_ref_info_from_name_and_type (bin, method->name_idx, method->descriptor_idx); if (method->field_ref_cp_obj) { IFDBG eprintf ("Found the obj.\n"); item = r_bin_java_get_item_from_bin_cp_list (bin, method->field_ref_cp_obj->info.cp_method.class_idx); IFDBG eprintf ("Method class reference value: %d, which is: ord: %d, name: %s\n", method->field_ref_cp_obj->info.cp_method.class_idx, item->metas->ord, ((RBinJavaCPTypeMetas *)item->metas->type_info)->name); method->class_name = r_bin_java_get_item_name_from_bin_cp_list (bin, item); IFDBG eprintf ("Method requesting ref_cp_obj the following which is: ord: %d, name: %s\n", method->field_ref_cp_obj->metas->ord, ((RBinJavaCPTypeMetas *)method->field_ref_cp_obj->metas->type_info)->name); IFDBG eprintf ("MethodRef class name resolves to: %s\n", method->class_name); if (!method->class_name) { method->class_name = r_str_dup (NULL, "NULL"); } } else { // XXX - default to this class? method->field_ref_cp_obj = r_bin_java_get_item_from_bin_cp_list (bin, bin->cf2.this_class); method->class_name = r_bin_java_get_item_name_from_bin_cp_list (bin, method->field_ref_cp_obj); } IFDBG eprintf ("Parsing %s(%s)\n", method->name, method->descriptor); if (method->attr_count > 0) { method->attr_offset = adv + offset; RBinJavaAttrInfo *attr = NULL; for (i = 0; i < method->attr_count; i++) { attr = r_bin_java_read_next_attr (bin, adv + offset, buf, len); if (!attr) { eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Method Attribute: %d.\n", i); break; } if ((r_bin_java_get_attr_type_by_name (attr->name))->type == R_BIN_JAVA_ATTR_TYPE_CODE_ATTR) { // This is necessary for determing the appropriate number of bytes when readin // uoffset, ustack, ulocalvar values bin->cur_method_code_length = attr->info.code_attr.code_length; bin->offset_sz = 2;// (attr->info.code_attr.code_length > 65535) ? 4 : 2; bin->ustack_sz = 2;// (attr->info.code_attr.max_stack > 65535) ? 4 : 2; bin->ulocalvar_sz = 2;// (attr->info.code_attr.max_locals > 65535) ? 4 : 2; } IFDBG eprintf ("Parsing @ 0x%"PFMT64x " (%s) = 0x%"PFMT64x " bytes\n", attr->file_offset, attr->name, attr->size); r_list_append (method->attributes, attr); adv += attr->size; if (adv + offset >= len) { eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Method Attribute: %d.\n", i); break; } } } method->size = adv; // reset after parsing the method attributes IFDBG eprintf ("Parsing @ 0x%"PFMT64x " %s(%s) = 0x%"PFMT64x " bytes\n", method->file_offset, method->name, method->descriptor, method->size); return method; } R_API RBinJavaField *r_bin_java_read_next_field(RBinJavaObj *bin, const ut64 offset, const ut8 *buffer, const ut64 len) { RBinJavaAttrInfo *attr; ut32 i, idx; ut8 buf[8]; RBinJavaCPTypeObj *item = NULL; const ut8 *f_buf = buffer + offset; ut64 adv = 0; if (!bin || offset + 8 >= len) { return NULL; } RBinJavaField *field = (RBinJavaField *) R_NEW0 (RBinJavaField); if (!field) { eprintf ("Unable to allocate memory for field information\n"); return NULL; } field->metas = (RBinJavaMetaInfo *) R_NEW0 (RBinJavaMetaInfo); if (!field->metas) { eprintf ("Unable to allocate memory for meta information\n"); free (field); return NULL; } memcpy (buf, f_buf, 8); field->file_offset = offset; field->flags = R_BIN_JAVA_USHORT (buf, 0); field->flags_str = retrieve_field_access_string (field->flags); field->name_idx = R_BIN_JAVA_USHORT (buf, 2); field->descriptor_idx = R_BIN_JAVA_USHORT (buf, 4); field->attr_count = R_BIN_JAVA_USHORT (buf, 6); field->attributes = r_list_newf (r_bin_java_attribute_free); field->type = R_BIN_JAVA_FIELD_TYPE_FIELD; adv += 8; field->metas->ord = bin->field_idx; idx = field->name_idx; item = r_bin_java_get_item_from_bin_cp_list (bin, idx); field->name = r_bin_java_get_utf8_from_bin_cp_list (bin, (ut32) (field->name_idx)); IFDBG eprintf ("Field name_idx: %d, which is: ord: %d, name: %s, value: %s\n", idx, item->metas->ord, ((RBinJavaCPTypeMetas *)item->metas->type_info)->name, field->name); if (!field->name) { field->name = (char *) malloc (21); snprintf ((char *) field->name, 20, "sym.field_%08x", field->metas->ord); IFDBG eprintf ("r_bin_java_read_next_field: Unable to find the name for 0x%02x index.\n", field->name_idx); } idx = field->descriptor_idx; item = r_bin_java_get_item_from_bin_cp_list (bin, idx); field->descriptor = r_bin_java_get_utf8_from_bin_cp_list (bin, (ut32) field->descriptor_idx); IFDBG eprintf ("Field descriptor_idx: %d, which is: ord: %d, name: %s, value: %s\n", idx, item->metas->ord, ((RBinJavaCPTypeMetas *)item->metas->type_info)->name, field->descriptor); if (!field->descriptor) { field->descriptor = r_str_dup (NULL, "NULL"); IFDBG eprintf ("r_bin_java_read_next_field: Unable to find the descriptor for 0x%02x index.\n", field->descriptor_idx); } IFDBG eprintf ("Looking for a NameAndType CP with name_idx: %d descriptor_idx: %d\n", field->name_idx, field->descriptor_idx); field->field_ref_cp_obj = r_bin_java_find_cp_ref_info_from_name_and_type (bin, field->name_idx, field->descriptor_idx); if (field->field_ref_cp_obj) { IFDBG eprintf ("Found the obj.\n"); item = r_bin_java_get_item_from_bin_cp_list (bin, field->field_ref_cp_obj->info.cp_field.class_idx); IFDBG eprintf ("Field class reference value: %d, which is: ord: %d, name: %s\n", field->field_ref_cp_obj->info.cp_field.class_idx, item->metas->ord, ((RBinJavaCPTypeMetas *)item->metas->type_info)->name); field->class_name = r_bin_java_get_item_name_from_bin_cp_list (bin, item); IFDBG eprintf ("Field requesting ref_cp_obj the following which is: ord: %d, name: %s\n", field->field_ref_cp_obj->metas->ord, ((RBinJavaCPTypeMetas *)field->field_ref_cp_obj->metas->type_info)->name); IFDBG eprintf ("FieldRef class name resolves to: %s\n", field->class_name); if (!field->class_name) { field->class_name = r_str_dup (NULL, "NULL"); } } else { // XXX - default to this class? field->field_ref_cp_obj = r_bin_java_get_item_from_bin_cp_list (bin, bin->cf2.this_class); field->class_name = r_bin_java_get_item_name_from_bin_cp_list (bin, field->field_ref_cp_obj); } IFDBG eprintf ("Parsing %s(%s)", field->name, field->descriptor); if (field->attr_count > 0) { field->attr_offset = adv + offset; for (i = 0; i < field->attr_count && offset + adv < len; i++) { attr = r_bin_java_read_next_attr (bin, offset + adv, buffer, len); if (!attr) { eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Field Attribute: %d.\n", i); free (field->metas); free (field); return NULL; } if ((r_bin_java_get_attr_type_by_name (attr->name))->type == R_BIN_JAVA_ATTR_TYPE_CODE_ATTR) { // This is necessary for determing the appropriate number of bytes when readin // uoffset, ustack, ulocalvar values bin->cur_method_code_length = attr->info.code_attr.code_length; bin->offset_sz = 2;// (attr->info.code_attr.code_length > 65535) ? 4 : 2; bin->ustack_sz = 2;// (attr->info.code_attr.max_stack > 65535) ? 4 : 2; bin->ulocalvar_sz = 2;// (attr->info.code_attr.max_locals > 65535) ? 4 : 2; } r_list_append (field->attributes, attr); adv += attr->size; if (adv + offset >= len) { eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Field Attribute: %d.\n", i); r_bin_java_fmtype_free (field); return NULL; } } } field->size = adv; return field; } R_API RBinJavaCPTypeObj *r_bin_java_clone_cp_idx(RBinJavaObj *bin, ut32 idx) { RBinJavaCPTypeObj *obj = NULL; if (bin) { obj = r_bin_java_get_item_from_bin_cp_list (bin, idx); } return r_bin_java_clone_cp_item (obj); } R_API RBinJavaCPTypeObj *r_bin_java_clone_cp_item(RBinJavaCPTypeObj *obj) { RBinJavaCPTypeObj *clone_obj = NULL; if (!obj) { return clone_obj; } clone_obj = R_NEW0 (RBinJavaCPTypeObj); if (clone_obj) { memcpy (clone_obj, obj, sizeof (RBinJavaCPTypeObj)); clone_obj->metas = (RBinJavaMetaInfo *) R_NEW0 (RBinJavaMetaInfo); clone_obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[clone_obj->tag]; clone_obj->name = strdup (obj->name? obj->name: "unk"); if (obj->tag == R_BIN_JAVA_CP_UTF8) { clone_obj->info.cp_utf8.bytes = (ut8 *) malloc (obj->info.cp_utf8.length + 1); if (clone_obj->info.cp_utf8.bytes) { memcpy (clone_obj->info.cp_utf8.bytes, obj->info.cp_utf8.bytes, clone_obj->info.cp_utf8.length); } else { // TODO: eprintf allocation error } } } return clone_obj; } R_API RBinJavaCPTypeObj *r_bin_java_read_next_constant_pool_item(RBinJavaObj *bin, const ut64 offset, const ut8 *buf, ut64 len) { RBinJavaCPTypeMetas *java_constant_info = NULL; ut8 tag = 0; ut64 buf_sz = 0; ut8 *cp_buf = NULL; ut32 str_len = 0; RBinJavaCPTypeObj *java_obj = NULL; tag = buf[offset]; if (tag > R_BIN_JAVA_CP_METAS_SZ) { eprintf ("Invalid tag '%d' at offset 0x%08"PFMT64x "\n", tag, (ut64) offset); return NULL; #if 0 java_obj = r_bin_java_unknown_cp_new (bin, &tag, 1); if (java_obj != NULL && java_obj->metas != NULL) { java_obj->file_offset = offset; java_obj->loadaddr = bin->loadaddr; } return NULL; // early error to avoid future overflows // return java_obj; #endif } java_constant_info = &R_BIN_JAVA_CP_METAS[tag]; if (java_constant_info->tag == 0 || java_constant_info->tag == 2) { return java_obj; } buf_sz += java_constant_info->len; if (java_constant_info->tag == 1) { if (offset + 32 < len) { str_len = R_BIN_JAVA_USHORT (buf, offset + 1); buf_sz += str_len; } else { return NULL; } } cp_buf = calloc (buf_sz, 1); if (!cp_buf) { return java_obj; } if (offset + buf_sz < len) { memcpy (cp_buf, (ut8 *) buf + offset, buf_sz); IFDBG eprintf ("Parsed the tag '%d':%s and create object from offset 0x%08"PFMT64x ".\n", tag, R_BIN_JAVA_CP_METAS[tag].name, offset); java_obj = (*java_constant_info->allocs->new_obj)(bin, cp_buf, buf_sz); if (java_obj != NULL && java_obj->metas != NULL) { java_obj->file_offset = offset; // IFDBG eprintf ("java_obj->file_offset = 0x%08"PFMT64x".\n",java_obj->file_offset); } else if (!java_obj) { eprintf ("Unable to parse the tag '%d' and create valid object.\n", tag); } else if (!java_obj->metas) { eprintf ("Unable to parse the tag '%d' and create valid object.\n", tag); } else { eprintf ("Failed to set the java_obj->metas-file_offset for '%d' offset is(0x%08"PFMT64x ").\n", tag, offset); } } free (cp_buf); return java_obj; } R_API RBinJavaInterfaceInfo *r_bin_java_read_next_interface_item(RBinJavaObj *bin, const ut64 offset, const ut8 *buf, const ut64 len) { ut8 idx[2] = { 0 }; RBinJavaInterfaceInfo *ifobj; const ut8 *if_buf = buf + offset; if (offset + 2 >= len) { return NULL; } memcpy (&idx, if_buf, 2); ifobj = r_bin_java_interface_new (bin, if_buf, len - offset); if (ifobj) { ifobj->file_offset = offset; } return ifobj; } // R_API void addrow (RBinJavaObj *bin, int addr, int line) { // int n = bin->lines.count++; //// XXX. possible memleak // bin->lines.addr = realloc (bin->lines.addr, sizeof (int)*n+1); // bin->lines.addr[n] = addr; // bin->lines.line = realloc (bin->lines.line, sizeof (int)*n+1); // bin->lines.line[n] = line; // } // R_API struct r_bin_java_cp_item_t* r_bin_java_get_item_from_cp_CP(RBinJavaObj *bin, int i) { // return (i<0||i>bin->cf.cp_count)? &cp_null_item: &bin->cp_items[i]; // } R_API char *r_bin_java_get_utf8_from_bin_cp_list(RBinJavaObj *bin, ut64 idx) { /* Search through the Constant Pool list for the given CP Index. If the idx not found by directly going to the list index, the list will be walked and then the IDX will be checked. rvalue: new char* for caller to free. */ if (bin == NULL) { return NULL; } return r_bin_java_get_utf8_from_cp_item_list (bin->cp_list, idx); } R_API ut32 r_bin_java_get_utf8_len_from_bin_cp_list(RBinJavaObj *bin, ut64 idx) { /* Search through the Constant Pool list for the given CP Index. If the idx not found by directly going to the list index, the list will be walked and then the IDX will be checked. rvalue: new char* for caller to free. */ if (bin == NULL) { return 0; } return r_bin_java_get_utf8_len_from_cp_item_list (bin->cp_list, idx); } R_API char *r_bin_java_get_name_from_bin_cp_list(RBinJavaObj *bin, ut64 idx) { /* Search through the Constant Pool list for the given CP Index. If the idx not found by directly going to the list index, the list will be walked and then the IDX will be checked. rvalue: new char* for caller to free. */ if (bin == NULL) { return NULL; } return r_bin_java_get_name_from_cp_item_list (bin->cp_list, idx); } R_API char *r_bin_java_get_desc_from_bin_cp_list(RBinJavaObj *bin, ut64 idx) { /* Search through the Constant Pool list for the given CP Index. If the idx not found by directly going to the list index, the list will be walked and then the IDX will be checked. rvalue: new char* for caller to free. */ if (bin == NULL) { return NULL; } return r_bin_java_get_desc_from_cp_item_list (bin->cp_list, idx); } R_API RBinJavaCPTypeObj *r_bin_java_get_item_from_bin_cp_list(RBinJavaObj *bin, ut64 idx) { /* Search through the Constant Pool list for the given CP Index. If the idx not found by directly going to the list index, the list will be walked and then the IDX will be checked. rvalue: RBinJavaObj* (user does NOT free). */ if (bin == NULL) { return NULL; } if (idx > bin->cp_count || idx == 0) { return r_bin_java_get_java_null_cp (); } return r_bin_java_get_item_from_cp_item_list (bin->cp_list, idx); } R_API char *r_bin_java_get_item_name_from_bin_cp_list(RBinJavaObj *bin, RBinJavaCPTypeObj *obj) { char *res = NULL; /* Given a constant poool object Class, FieldRef, MethodRef, or InterfaceMethodRef return the actual descriptor string. @param cp_list: RList of RBinJavaCPTypeObj * @param obj object to look up the name for @rvalue char* (user frees) or NULL */ if (bin && obj) { res = r_bin_java_get_item_name_from_cp_item_list ( bin->cp_list, obj, MAX_CPITEMS); } return res; } R_API char *r_bin_java_get_item_desc_from_bin_cp_list(RBinJavaObj *bin, RBinJavaCPTypeObj *obj) { /* Given a constant poool object Class, FieldRef, MethodRef, or InterfaceMethodRef return the actual descriptor string. @param cp_list: RList of RBinJavaCPTypeObj * @param obj object to look up the name for @rvalue char* (user frees) or NULL */ return bin? r_bin_java_get_item_desc_from_cp_item_list (bin->cp_list, obj, MAX_CPITEMS): NULL; } R_API char *r_bin_java_get_utf8_from_cp_item_list(RList *cp_list, ut64 idx) { /* Search through the Constant Pool list for the given CP Index. If the idx not found by directly going to the list index, the list will be walked and then the IDX will be checked. rvalue: new char* for caller to free. */ char *value = NULL; RListIter *iter; if (!cp_list) { return NULL; } RBinJavaCPTypeObj *item = (RBinJavaCPTypeObj *) r_list_get_n (cp_list, idx); if (item && item->tag == R_BIN_JAVA_CP_UTF8 && item->metas->ord == idx) { value = convert_string ((const char *) item->info.cp_utf8.bytes, item->info.cp_utf8.length); } if (!value) { r_list_foreach (cp_list, iter, item) { if (item && (item->tag == R_BIN_JAVA_CP_UTF8) && item->metas->ord == idx) { value = convert_string ((const char *) item->info.cp_utf8.bytes, item->info.cp_utf8.length); break; } } } return value; } R_API ut32 r_bin_java_get_utf8_len_from_cp_item_list(RList *cp_list, ut64 idx) { /* Search through the Constant Pool list for the given CP Index. If the idx not found by directly going to the list index, the list will be walked and then the IDX will be checked. rvalue: new ut32 . */ ut32 value = -1; RListIter *iter; if (!cp_list) { return 0; } RBinJavaCPTypeObj *item = (RBinJavaCPTypeObj *) r_list_get_n (cp_list, idx); if (item && (item->tag == R_BIN_JAVA_CP_UTF8) && item->metas->ord == idx) { value = item->info.cp_utf8.length; } if (value == -1) { r_list_foreach (cp_list, iter, item) { if (item && (item->tag == R_BIN_JAVA_CP_UTF8) && item->metas->ord == idx) { value = item->info.cp_utf8.length; break; } } } return value; } R_API RBinJavaCPTypeObj *r_bin_java_get_item_from_cp_item_list(RList *cp_list, ut64 idx) { /* Search through the Constant Pool list for the given CP Index. rvalue: RBinJavaObj * */ RBinJavaCPTypeObj *item = NULL; if (cp_list == NULL) { return NULL; } item = (RBinJavaCPTypeObj *) r_list_get_n (cp_list, idx); return item; } R_API char *r_bin_java_get_item_name_from_cp_item_list(RList *cp_list, RBinJavaCPTypeObj *obj, int depth) { /* Given a constant poool object Class, FieldRef, MethodRef, or InterfaceMethodRef return the actual descriptor string. @param cp_list: RList of RBinJavaCPTypeObj * @param obj object to look up the name for @rvalue ut8* (user frees) or NULL */ if (!obj || !cp_list || depth < 0) { return NULL; } switch (obj->tag) { case R_BIN_JAVA_CP_NAMEANDTYPE: return r_bin_java_get_utf8_from_cp_item_list ( cp_list, obj->info.cp_name_and_type.name_idx); case R_BIN_JAVA_CP_CLASS: return r_bin_java_get_utf8_from_cp_item_list ( cp_list, obj->info.cp_class.name_idx); // XXX - Probably not good form, but they are the same memory structure case R_BIN_JAVA_CP_FIELDREF: case R_BIN_JAVA_CP_INTERFACEMETHOD_REF: case R_BIN_JAVA_CP_METHODREF: obj = r_bin_java_get_item_from_cp_item_list ( cp_list, obj->info.cp_method.name_and_type_idx); return r_bin_java_get_item_name_from_cp_item_list ( cp_list, obj, depth - 1); default: return NULL; case 0: IFDBG eprintf ("Invalid 0 tag in the constant pool\n"); return NULL; } return NULL; } R_API char *r_bin_java_get_name_from_cp_item_list(RList *cp_list, ut64 idx) { /* Given a constant poool object Class, FieldRef, MethodRef, or InterfaceMethodRef return the actual descriptor string. @param cp_list: RList of RBinJavaCPTypeObj * @param obj object to look up the name for @rvalue ut8* (user frees) or NULL */ RBinJavaCPTypeObj *obj = r_bin_java_get_item_from_cp_item_list ( cp_list, idx); if (obj && cp_list) { return r_bin_java_get_item_name_from_cp_item_list ( cp_list, obj, MAX_CPITEMS); } return NULL; } R_API char *r_bin_java_get_item_desc_from_cp_item_list(RList *cp_list, RBinJavaCPTypeObj *obj, int depth) { /* Given a constant poool object FieldRef, MethodRef, or InterfaceMethodRef return the actual descriptor string. @rvalue ut8* (user frees) or NULL */ if (!obj || !cp_list || depth < 0) { return NULL; } switch (obj->tag) { case R_BIN_JAVA_CP_NAMEANDTYPE: return r_bin_java_get_utf8_from_cp_item_list (cp_list, obj->info.cp_name_and_type.descriptor_idx); // XXX - Probably not good form, but they are the same memory structure case R_BIN_JAVA_CP_FIELDREF: case R_BIN_JAVA_CP_INTERFACEMETHOD_REF: case R_BIN_JAVA_CP_METHODREF: obj = r_bin_java_get_item_from_cp_item_list (cp_list, obj->info.cp_method.name_and_type_idx); return r_bin_java_get_item_desc_from_cp_item_list ( cp_list, obj, depth - 1); default: return NULL; } return NULL; } R_API char *r_bin_java_get_desc_from_cp_item_list(RList *cp_list, ut64 idx) { /* Given a constant poool object FieldRef, MethodRef, or InterfaceMethodRef return the actual descriptor string. @rvalue ut8* (user frees) or NULL */ RBinJavaCPTypeObj *obj = r_bin_java_get_item_from_cp_item_list (cp_list, idx); if (!cp_list) { return NULL; } return r_bin_java_get_item_desc_from_cp_item_list (cp_list, obj, MAX_CPITEMS); } R_API RBinJavaAttrInfo *r_bin_java_get_method_code_attribute(const RBinJavaField *method) { /* Search through a methods attributes and return the code attr. rvalue: RBinJavaAttrInfo* if found otherwise NULL. */ RBinJavaAttrInfo *res = NULL, *attr = NULL; RListIter *iter; if (method) { r_list_foreach (method->attributes, iter, attr) { if (attr && (attr->type == R_BIN_JAVA_ATTR_TYPE_CODE_ATTR)) { res = attr; break; } } } return res; } R_API RBinJavaAttrInfo *r_bin_java_get_attr_from_field(RBinJavaField *field, R_BIN_JAVA_ATTR_TYPE attr_type, ut32 pos) { /* Search through the Attribute list for the given type starting at position pos. rvalue: NULL or the first occurrence of attr_type after pos */ RBinJavaAttrInfo *attr = NULL, *item; RListIter *iter; ut32 i = 0; if (field) { r_list_foreach (field->attributes, iter, item) { // Note the increment happens after the comparison if ((i++) >= pos) { if (item && (item->type == attr_type)) { attr = item; break; } } } } return attr; } R_API ut8 *r_bin_java_get_attr_buf(RBinJavaObj *bin, ut64 sz, const ut64 offset, const ut8 *buf, const ut64 len) { ut8 *attr_buf = NULL; int pending = len - offset; const ut8 *a_buf = offset + buf; attr_buf = (ut8 *) calloc (pending + 1, 1); if (!attr_buf) { eprintf ("Unable to allocate enough bytes (0x%04"PFMT64x ") to read in the attribute.\n", sz); return attr_buf; } memcpy (attr_buf, a_buf, pending); // sz+1); return attr_buf; } R_API RBinJavaAttrInfo *r_bin_java_default_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { // NOTE: this function receives the buffer offset in the original buffer, // but the buffer is already point to that particular offset. // XXX - all the code that relies on this function should probably be modified // so that the original buffer pointer is passed in and then the buffer+buf_offset // points to the correct location. RBinJavaAttrInfo *attr = R_NEW0 (RBinJavaAttrInfo); if (!attr) { return NULL; } RBinJavaAttrMetas *type_info = NULL; attr->metas = R_NEW0 (RBinJavaMetaInfo); if (!attr->metas) { free (attr); return NULL; } attr->is_attr_in_old_format = r_bin_java_is_old_format(bin); attr->file_offset = buf_offset; attr->name_idx = R_BIN_JAVA_USHORT (buffer, 0); attr->length = R_BIN_JAVA_UINT (buffer, 2); attr->size = R_BIN_JAVA_UINT (buffer, 2) + 6; attr->name = r_bin_java_get_utf8_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, attr->name_idx); if (!attr->name) { // Something bad has happened attr->name = r_str_dup (NULL, "NULL"); eprintf ("r_bin_java_default_attr_new: Unable to find the name for %d index.\n", attr->name_idx); } type_info = r_bin_java_get_attr_type_by_name (attr->name); attr->metas->ord = (R_BIN_JAVA_GLOBAL_BIN->attr_idx++); attr->metas->type_info = (void *) type_info; // IFDBG eprintf (" Addrs for type_info [tag=%d]: 0x%08"PFMT64x"\n", type_val, &attr->metas->type_info); return attr; } R_API RBinJavaAttrMetas *r_bin_java_get_attr_type_by_name(const char *name) { // TODO: use sdb/hashtable here int i; for (i = 0; i < RBIN_JAVA_ATTRS_METAS_SZ; i++) { if (!strcmp ((const char *) name, RBIN_JAVA_ATTRS_METAS[i].name)) { return &RBIN_JAVA_ATTRS_METAS[i]; } } return &RBIN_JAVA_ATTRS_METAS[R_BIN_JAVA_ATTR_TYPE_UNKNOWN_ATTR]; } R_API RBinJavaAttrInfo *r_bin_java_read_next_attr(RBinJavaObj *bin, const ut64 offset, const ut8 *buf, const ut64 buf_len) { const ut8 *a_buf = offset + buf; ut8 attr_idx_len = 6; if (offset + 6 > buf_len) { eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile in Attribute offset " "(0x%"PFMT64x ") > len of remaining bytes (0x%"PFMT64x ").\n", offset, buf_len); return NULL; } // ut16 attr_idx, ut32 length of attr. ut32 sz = R_BIN_JAVA_UINT (a_buf, 2) + attr_idx_len; // r_bin_java_read_int (bin, buf_offset+2) + attr_idx_len; if (sz + offset > buf_len) { eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile in Attribute len " "(0x%x) + offset (0x%"PFMT64x ") exceeds length of buffer (0x%"PFMT64x ").\n", sz, offset, buf_len); return NULL; } // when reading the attr bytes, need to also // include the initial 6 bytes, which // are not included in the attribute length // , // sz, buf_offset, buf_offset+sz); ut8 *buffer = r_bin_java_get_attr_buf (bin, sz, offset, buf, buf_len); RBinJavaAttrInfo *attr = NULL; // printf ("%d %d %d\n", sz, buf_len, offset); if (offset < buf_len) { attr = r_bin_java_read_next_attr_from_buffer (bin, buffer, buf_len - offset, offset); free (buffer); if (!attr) { return NULL; } attr->size = sz; } else { free (buffer); eprintf ("IS OOB\n"); } return attr; } R_API RBinJavaAttrInfo *r_bin_java_read_next_attr_from_buffer(RBinJavaObj *bin, ut8 *buffer, st64 sz, st64 buf_offset) { RBinJavaAttrInfo *attr = NULL; st64 nsz; if (!buffer || ((int) sz) < 4 || buf_offset < 0) { eprintf ("r_bin_Java_read_next_attr_from_buffer: invalid buffer size %d\n", (int) sz); return NULL; } ut16 name_idx = R_BIN_JAVA_USHORT (buffer, 0); ut64 offset = 2; nsz = R_BIN_JAVA_UINT (buffer, offset); // DEAD INCREMENT offset += 4; char *name = r_bin_java_get_utf8_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, name_idx); if (!name) { name = strdup ("unknown"); } IFDBG eprintf ("r_bin_java_read_next_attr: name_idx = %d is %s\n", name_idx, name); RBinJavaAttrMetas *type_info = r_bin_java_get_attr_type_by_name (name); if (type_info) { IFDBG eprintf ("Typeinfo: %s, was %s\n", type_info->name, name); // printf ("SZ %d %d %d\n", nsz, sz, buf_offset); if (nsz > sz) { free (name); return NULL; } if ((attr = type_info->allocs->new_obj (bin, buffer, nsz, buf_offset))) { attr->metas->ord = (R_BIN_JAVA_GLOBAL_BIN->attr_idx++); } } else { eprintf ("r_bin_java_read_next_attr_from_buffer: Cannot find type_info for %s\n", name); } free (name); return attr; } R_API ut64 r_bin_java_read_class_file2(RBinJavaObj *bin, const ut64 offset, const ut8 *obuf, ut64 len) { const ut8 *cf2_buf = obuf + offset; RBinJavaCPTypeObj *this_class_cp_obj = NULL; IFDBG eprintf ("\n0x%"PFMT64x " Offset before reading the cf2 structure\n", offset); /* Reading the following fields: ut16 access_flags; ut16 this_class; ut16 super_class; */ if (cf2_buf + 6 > obuf + len) { return 0; } bin->cf2.cf2_size = 6; bin->cf2.access_flags = R_BIN_JAVA_USHORT (cf2_buf, 0); bin->cf2.this_class = R_BIN_JAVA_USHORT (cf2_buf, 2); bin->cf2.super_class = R_BIN_JAVA_USHORT (cf2_buf, 4); free (bin->cf2.flags_str); free (bin->cf2.this_class_name); bin->cf2.flags_str = retrieve_class_method_access_string (bin->cf2.access_flags); this_class_cp_obj = r_bin_java_get_item_from_bin_cp_list (bin, bin->cf2.this_class); bin->cf2.this_class_name = r_bin_java_get_item_name_from_bin_cp_list (bin, this_class_cp_obj); IFDBG eprintf ("This class flags are: %s\n", bin->cf2.flags_str); return bin->cf2.cf2_size; } R_API ut64 r_bin_java_parse_cp_pool(RBinJavaObj *bin, const ut64 offset, const ut8 *buf, const ut64 len) { int ord = 0; ut64 adv = 0; RBinJavaCPTypeObj *obj = NULL; const ut8 *cp_buf = buf + offset; r_list_free (bin->cp_list); bin->cp_list = r_list_newf (r_bin_java_constant_pool); bin->cp_offset = offset; memcpy ((char *) &bin->cp_count, cp_buf, 2); bin->cp_count = R_BIN_JAVA_USHORT (cp_buf, 0) - 1; adv += 2; IFDBG eprintf ("ConstantPoolCount %d\n", bin->cp_count); r_list_append (bin->cp_list, r_bin_java_get_java_null_cp ()); for (ord = 1, bin->cp_idx = 0; bin->cp_idx < bin->cp_count && adv < len; ord++, bin->cp_idx++) { obj = r_bin_java_read_next_constant_pool_item (bin, offset + adv, buf, len); if (obj) { // IFDBG eprintf ("SUCCESS Read ConstantPoolItem %d\n", i); obj->metas->ord = ord; obj->idx = ord; r_list_append (bin->cp_list, obj); if (obj->tag == R_BIN_JAVA_CP_LONG || obj->tag == R_BIN_JAVA_CP_DOUBLE) { // i++; ord++; bin->cp_idx++; r_list_append (bin->cp_list, &R_BIN_JAVA_NULL_TYPE); } IFDBG ((RBinJavaCPTypeMetas *) obj->metas->type_info)->allocs->print_summary (obj); adv += ((RBinJavaCPTypeMetas *) obj->metas->type_info)->allocs->calc_size (obj); if (offset + adv > len) { eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Constant Pool Object: %d.\n", ord); break; } } else { IFDBG eprintf ("Failed to read ConstantPoolItem %d\n", bin->cp_idx); break; } } // Update the imports r_bin_java_set_imports (bin); bin->cp_size = adv; return bin->cp_size; } R_API ut64 r_bin_java_parse_interfaces(RBinJavaObj *bin, const ut64 offset, const ut8 *buf, const ut64 len) { int i = 0; ut64 adv = 0; RBinJavaInterfaceInfo *interfaces_obj; const ut8 *if_buf = buf + offset; bin->cp_offset = offset; bin->interfaces_offset = offset; r_list_free (bin->interfaces_list); bin->interfaces_list = r_list_newf (r_bin_java_interface_free); if (offset + 2 > len) { bin->interfaces_size = 0; return 0; } bin->interfaces_count = R_BIN_JAVA_USHORT (if_buf, 0); adv += 2; IFDBG eprintf ("Interfaces count: %d\n", bin->interfaces_count); if (bin->interfaces_count > 0) { for (i = 0; i < bin->interfaces_count; i++) { interfaces_obj = r_bin_java_read_next_interface_item (bin, offset + adv, buf, len); if (interfaces_obj) { r_list_append (bin->interfaces_list, interfaces_obj); adv += interfaces_obj->size; if (offset + adv > len) { eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Interface: %d.\n", i); break; } } else { break; } } } bin->interfaces_size = adv; return adv; } R_API ut64 r_bin_java_parse_fields(RBinJavaObj *bin, const ut64 offset, const ut8 *buf, const ut64 len) { int i = 0; ut64 adv = 0; RBinJavaField *field; const ut8 *fm_buf = buf + offset; r_list_free (bin->fields_list); bin->fields_list = r_list_newf (r_bin_java_fmtype_free); bin->fields_offset = offset; if (offset + 2 >= len) { return UT64_MAX; } bin->fields_count = R_BIN_JAVA_USHORT (fm_buf, 0); adv += 2; IFDBG eprintf ("Fields count: %d 0x%"PFMT64x "\n", bin->fields_count, bin->fields_offset); if (bin->fields_count > 0) { for (i = 0; i < bin->fields_count; i++, bin->field_idx++) { field = r_bin_java_read_next_field (bin, offset + adv, buf, len); if (field) { adv += field->size; r_list_append (bin->fields_list, field); IFDBG r_bin_java_print_field_summary(field); if (adv + offset > len) { eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Field: %d.\n", i); break; } } else { IFDBG eprintf ("Failed to read Field %d\n", i); break; } } } bin->fields_size = adv; return adv; } R_API ut64 r_bin_java_parse_attrs(RBinJavaObj *bin, const ut64 offset, const ut8 *buf, const ut64 len) { int i = 0; ut64 adv = 0; const ut8 *a_buf = buf + offset; if (offset + 2 >= len) { // Check if we can read that USHORT return UT64_MAX; } r_list_free (bin->attrs_list); bin->attrs_list = r_list_newf (r_bin_java_attribute_free); bin->attrs_offset = offset; bin->attrs_count = R_BIN_JAVA_USHORT (a_buf, adv); adv += 2; if (bin->attrs_count > 0) { for (i = 0; i < bin->attrs_count; i++, bin->attr_idx++) { RBinJavaAttrInfo *attr = r_bin_java_read_next_attr (bin, offset + adv, buf, len); if (!attr) { // eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Attribute: %d.\n", i); break; } r_list_append (bin->attrs_list, attr); adv += attr->size; if (adv + offset >= len) { // eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Attribute: %d.\n", i); break; } } } bin->attrs_size = adv; return adv; } R_API ut64 r_bin_java_parse_methods(RBinJavaObj *bin, const ut64 offset, const ut8 *buf, const ut64 len) { int i = 0; ut64 adv = 0; RBinJavaField *method; const ut8 *fm_buf = buf + offset; r_list_free (bin->methods_list); bin->methods_list = r_list_newf (r_bin_java_fmtype_free); if (offset + 2 >= len) { return 0LL; } bin->methods_offset = offset; bin->methods_count = R_BIN_JAVA_USHORT (fm_buf, 0); adv += 2; IFDBG eprintf ("Methods count: %d 0x%"PFMT64x "\n", bin->methods_count, bin->methods_offset); bin->main = NULL; bin->entrypoint = NULL; bin->main_code_attr = NULL; bin->entrypoint_code_attr = NULL; for (i = 0; i < bin->methods_count; i++, bin->method_idx++) { method = r_bin_java_read_next_method (bin, offset + adv, buf, len); if (method) { adv += method->size; r_list_append (bin->methods_list, method); } // Update Main, Init, or Class Init if (method && !strcmp ((const char *) method->name, "main")) { bin->main = method; // get main code attr bin->main_code_attr = r_bin_java_get_attr_from_field (method, R_BIN_JAVA_ATTR_TYPE_CODE_ATTR, 0); } else if (method && (!strcmp ((const char *) method->name, "<init>") || !strcmp ((const char *) method->name, "init"))) { IFDBG eprintf ("Found an init function.\n"); bin->entrypoint = method; bin->entrypoint_code_attr = r_bin_java_get_attr_from_field (method, R_BIN_JAVA_ATTR_TYPE_CODE_ATTR, 0); } else if (method && (!strcmp ((const char *) method->name, "<cinit>") || !strcmp ((const char *) method->name, "cinit"))) { bin->cf2.this_class_entrypoint = method; bin->cf2.this_class_entrypoint_code_attr = r_bin_java_get_attr_from_field (method, R_BIN_JAVA_ATTR_TYPE_CODE_ATTR, 0); } if (adv + offset > len) { eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Method: %d.\n", i); break; } IFDBG r_bin_java_print_field_summary(method); } bin->methods_size = adv; return adv; } R_API int r_bin_java_new_bin(RBinJavaObj *bin, ut64 loadaddr, Sdb *kv, const ut8 *buf, ut64 len) { R_BIN_JAVA_GLOBAL_BIN = bin; if (!r_str_constpool_init (&bin->constpool)) { return false; } bin->lines.count = 0; bin->loadaddr = loadaddr; r_bin_java_get_java_null_cp (); bin->id = r_num_rand (UT32_MAX); bin->kv = kv ? kv : sdb_new (NULL, NULL, 0); bin->AllJavaBinObjs = NULL; return r_bin_java_load_bin (bin, buf, len); } R_API int r_bin_java_load_bin(RBinJavaObj *bin, const ut8 *buf, ut64 buf_sz) { ut64 adv = 0; R_BIN_JAVA_GLOBAL_BIN = bin; if (!bin) { return false; } r_bin_java_reset_bin_info (bin); memcpy ((ut8 *) &bin->cf, buf, 10); if (memcmp (bin->cf.cafebabe, "\xCA\xFE\xBA\xBE", 4)) { eprintf ("r_bin_java_new_bin: Invalid header (%02x %02x %02x %02x)\n", bin->cf.cafebabe[0], bin->cf.cafebabe[1], bin->cf.cafebabe[2], bin->cf.cafebabe[3]); return false; } if (bin->cf.major[0] == bin->cf.major[1] && bin->cf.major[0] == 0) { eprintf ("Java CLASS with MACH0 header?\n"); return false; } adv += 8; // -2 so that the cp_count will be parsed adv += r_bin_java_parse_cp_pool (bin, adv, buf, buf_sz); if (adv > buf_sz) { eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Constant Pool.\n"); return true; } adv += r_bin_java_read_class_file2 (bin, adv, buf, buf_sz); if (adv > buf_sz) { eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after class file info.\n"); return true; } IFDBG eprintf ("This class: %d %s\n", bin->cf2.this_class, bin->cf2.this_class_name); IFDBG eprintf ("0x%"PFMT64x " Access flags: 0x%04x\n", adv, bin->cf2.access_flags); adv += r_bin_java_parse_interfaces (bin, adv, buf, buf_sz); if (adv > buf_sz) { eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Interfaces.\n"); return true; } adv += r_bin_java_parse_fields (bin, adv, buf, buf_sz); if (adv > buf_sz) { eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Fields.\n"); return true; } adv += r_bin_java_parse_methods (bin, adv, buf, buf_sz); if (adv > buf_sz) { eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Methods.\n"); return true; } adv += r_bin_java_parse_attrs (bin, adv, buf, buf_sz); bin->calc_size = adv; // if (adv > buf_sz) { // eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Attributes.\n"); // return true; // } // add_cp_objs_to_sdb(bin); // add_method_infos_to_sdb(bin); // add_field_infos_to_sdb(bin); return true; } R_API char *r_bin_java_get_version(RBinJavaObj *bin) { return r_str_newf ("0x%02x%02x 0x%02x%02x", bin->cf.major[1], bin->cf.major[0], bin->cf.minor[1], bin->cf.minor[0]); } R_API RList *r_bin_java_get_entrypoints(RBinJavaObj *bin) { RListIter *iter = NULL, *iter_tmp = NULL; RBinJavaField *fm_type; RList *ret = r_list_newf (free); if (!ret) { return NULL; } r_list_foreach_safe (bin->methods_list, iter, iter_tmp, fm_type) { if (!strcmp (fm_type->name, "main") || !strcmp (fm_type->name, "<init>") || !strcmp (fm_type->name, "<clinit>") || strstr (fm_type->flags_str, "static")) { RBinAddr *addr = R_NEW0 (RBinAddr); if (addr) { addr->vaddr = addr->paddr = \ r_bin_java_get_method_code_offset (fm_type) + bin->loadaddr; addr->hpaddr = fm_type->file_offset; r_list_append (ret, addr); } } } return ret; } R_API RBinJavaField *r_bin_java_get_method_code_attribute_with_addr(RBinJavaObj *bin, ut64 addr) { RListIter *iter = NULL, *iter_tmp = NULL; RBinJavaField *fm_type, *res = NULL; if (!bin && R_BIN_JAVA_GLOBAL_BIN) { bin = R_BIN_JAVA_GLOBAL_BIN; } if (!bin) { eprintf ("Attempting to analyse function when the R_BIN_JAVA_GLOBAL_BIN has not been set.\n"); return NULL; } r_list_foreach_safe (bin->methods_list, iter, iter_tmp, fm_type) { ut64 offset = r_bin_java_get_method_code_offset (fm_type) + bin->loadaddr, size = r_bin_java_get_method_code_size (fm_type); if (addr >= offset && addr <= size + offset) { res = fm_type; } } return res; } R_API RBinAddr *r_bin_java_get_entrypoint(RBinJavaObj *bin, int sym) { RBinAddr *ret = NULL; ret = R_NEW0 (RBinAddr); if (!ret) { return NULL; } ret->paddr = UT64_MAX; switch (sym) { case R_BIN_SYM_ENTRY: case R_BIN_SYM_INIT: ret->paddr = r_bin_java_find_method_offset (bin, "<init>"); if (ret->paddr == UT64_MAX) { ret->paddr = r_bin_java_find_method_offset (bin, "<cinit>"); } break; case R_BIN_SYM_FINI: ret->paddr = UT64_MAX; break; case R_BIN_SYM_MAIN: ret->paddr = r_bin_java_find_method_offset (bin, "main"); break; default: ret->paddr = -1; } if (ret->paddr != -1) { ret->paddr += bin->loadaddr; } return ret; } R_API ut64 r_bin_java_get_method_code_size(RBinJavaField *fm_type) { RListIter *attr_iter = NULL, *attr_iter_tmp = NULL; RBinJavaAttrInfo *attr = NULL; ut64 sz = 0; r_list_foreach_safe (fm_type->attributes, attr_iter, attr_iter_tmp, attr) { if (attr->type == R_BIN_JAVA_ATTR_TYPE_CODE_ATTR) { sz = attr->info.code_attr.code_length; break; } } return sz; } R_API ut64 r_bin_java_find_method_offset(RBinJavaObj *bin, const char *method_name) { RListIter *attr_iter = NULL, *attr_iter_tmp = NULL; RBinJavaField *method = NULL; ut64 offset = -1; r_list_foreach_safe (bin->methods_list, attr_iter, attr_iter_tmp, method) { if (method && !strcmp ((const char *) method->name, method_name)) { offset = r_bin_java_get_method_code_offset (method) + bin->loadaddr; break; } } return offset; } R_API ut64 r_bin_java_get_method_code_offset(RBinJavaField *fm_type) { RListIter *attr_iter = NULL, *attr_iter_tmp = NULL; RBinJavaAttrInfo *attr = NULL; ut64 offset = 0; r_list_foreach_safe (fm_type->attributes, attr_iter, attr_iter_tmp, attr) { if (attr->type == R_BIN_JAVA_ATTR_TYPE_CODE_ATTR) { offset = attr->info.code_attr.code_offset; break; } } return offset; } R_API RBinField *r_bin_java_allocate_rbinfield(void) { RBinField *t = (RBinField *) malloc (sizeof (RBinField)); if (t) { memset (t, 0, sizeof (RBinField)); } return t; } R_API RBinField *r_bin_java_create_new_rbinfield_from_field(RBinJavaField *fm_type, ut64 baddr) { RBinField *field = r_bin_java_allocate_rbinfield (); if (field) { field->name = strdup (fm_type->name); field->paddr = fm_type->file_offset + baddr; field->visibility = fm_type->flags; } return field; } R_API RBinSymbol *r_bin_java_create_new_symbol_from_field(RBinJavaField *fm_type, ut64 baddr) { RBinSymbol *sym = R_NEW0 (RBinSymbol); if (!fm_type || !fm_type->field_ref_cp_obj || fm_type->field_ref_cp_obj == &R_BIN_JAVA_NULL_TYPE) { R_FREE (sym); } if (sym) { sym->name = strdup (fm_type->name); // strncpy (sym->type, fm_type->descriptor, R_BIN_SIZEOF_STRINGS); if (fm_type->type == R_BIN_JAVA_FIELD_TYPE_METHOD) { sym->type = R_BIN_TYPE_FUNC_STR; sym->paddr = r_bin_java_get_method_code_offset (fm_type); sym->vaddr = r_bin_java_get_method_code_offset (fm_type) + baddr; sym->size = r_bin_java_get_method_code_size (fm_type); } else { sym->type = "FIELD"; sym->paddr = fm_type->file_offset;// r_bin_java_get_method_code_offset (fm_type); sym->vaddr = fm_type->file_offset + baddr; sym->size = fm_type->size; } if (r_bin_java_is_fm_type_protected (fm_type)) { sym->bind = R_BIN_BIND_LOCAL_STR; } else if (r_bin_java_is_fm_type_private (fm_type)) { sym->bind = R_BIN_BIND_LOCAL_STR; } else if (r_bin_java_is_fm_type_protected (fm_type)) { sym->bind = R_BIN_BIND_GLOBAL_STR; } sym->forwarder = "NONE"; if (fm_type->class_name) { sym->classname = strdup (fm_type->class_name); } else { sym->classname = strdup ("UNKNOWN"); // dupped names? } sym->ordinal = fm_type->metas->ord; sym->visibility = fm_type->flags; if (fm_type->flags_str) { sym->visibility_str = strdup (fm_type->flags_str); } } return sym; } R_API RBinSymbol *r_bin_java_create_new_symbol_from_fm_type_meta(RBinJavaField *fm_type, ut64 baddr) { RBinSymbol *sym = R_NEW0 (RBinSymbol); if (!sym || !fm_type || !fm_type->field_ref_cp_obj || fm_type->field_ref_cp_obj == &R_BIN_JAVA_NULL_TYPE) { free (sym); return NULL; } // ut32 new_name_len = strlen (fm_type->name) + strlen ("_meta") + 1; // char *new_name = malloc (new_name_len); sym->name = r_str_newf ("meta_%s", fm_type->name); if (fm_type->type == R_BIN_JAVA_FIELD_TYPE_METHOD) { sym->type = "FUNC_META"; } else { sym->type = "FIELD_META"; } if (r_bin_java_is_fm_type_protected (fm_type)) { sym->bind = R_BIN_BIND_LOCAL_STR; } else if (r_bin_java_is_fm_type_private (fm_type)) { sym->bind = R_BIN_BIND_LOCAL_STR; } else if (r_bin_java_is_fm_type_protected (fm_type)) { sym->bind = R_BIN_BIND_GLOBAL_STR; } sym->forwarder = "NONE"; if (fm_type->class_name) { sym->classname = strdup (fm_type->class_name); } else { sym->classname = strdup ("UNKNOWN"); } sym->paddr = fm_type->file_offset;// r_bin_java_get_method_code_offset (fm_type); sym->vaddr = fm_type->file_offset + baddr; sym->ordinal = fm_type->metas->ord; sym->size = fm_type->size; sym->visibility = fm_type->flags; if (fm_type->flags_str) { sym->visibility_str = strdup (fm_type->flags_str); } return sym; } R_API RBinSymbol *r_bin_java_create_new_symbol_from_ref(RBinJavaObj *bin, RBinJavaCPTypeObj *obj, ut64 baddr) { RBinSymbol *sym = R_NEW0 (RBinSymbol); if (!sym) { return NULL; } char *class_name, *name, *type_name; if (!obj || (obj->tag != R_BIN_JAVA_CP_METHODREF && obj->tag != R_BIN_JAVA_CP_INTERFACEMETHOD_REF && obj->tag != R_BIN_JAVA_CP_FIELDREF)) { R_FREE (sym); return sym; } if (sym) { class_name = r_bin_java_get_name_from_bin_cp_list (bin, obj->info.cp_method.class_idx); name = r_bin_java_get_name_from_bin_cp_list (bin, obj->info.cp_method.name_and_type_idx); type_name = r_bin_java_get_name_from_bin_cp_list (bin, obj->info.cp_method.name_and_type_idx); if (name) { sym->name = name; name = NULL; } if (type_name) { sym->type = r_str_constpool_get (&bin->constpool, type_name); R_FREE (type_name); } if (class_name) { sym->classname = strdup (class_name); } sym->paddr = obj->file_offset + baddr; sym->vaddr = obj->file_offset + baddr; sym->ordinal = obj->metas->ord; sym->size = 0; } return sym; } // TODO: vaddr+vsize break things if set R_API RList *r_bin_java_get_sections(RBinJavaObj *bin) { RBinSection *section = NULL; RList *sections = r_list_newf (free); ut64 baddr = bin->loadaddr; RBinJavaField *fm_type; RListIter *iter = NULL; if (bin->cp_count > 0) { section = R_NEW0 (RBinSection); if (section) { section->name = strdup ("constant_pool"); section->paddr = bin->cp_offset + baddr; section->size = bin->cp_size; #if 0 section->vsize = section->size; section->vaddr = 0x10; // XXX // bin->cp_offset; // + baddr; #endif section->vaddr = baddr; // section->vaddr = section->paddr; // section->vsize = section->size; section->perm = R_PERM_R; section->add = true; r_list_append (sections, section); } section = NULL; } if (bin->fields_count > 0) { section = R_NEW0 (RBinSection); if (section) { section->name = strdup ("fields"); section->size = bin->fields_size; section->paddr = bin->fields_offset + baddr; #if 0 section->vsize = section->size; section->vaddr = section->paddr; #endif section->perm = R_PERM_R; section->add = true; r_list_append (sections, section); section = NULL; r_list_foreach (bin->fields_list, iter, fm_type) { if (fm_type->attr_offset == 0) { continue; } section = R_NEW0 (RBinSection); if (section) { section->name = r_str_newf ("attrs.%s", fm_type->name); section->size = fm_type->size - (fm_type->file_offset - fm_type->attr_offset); #if 0 section->vsize = section->size; section->vaddr = section->paddr; #endif section->paddr = fm_type->attr_offset + baddr; section->perm = R_PERM_R; section->add = true; r_list_append (sections, section); } } } } if (bin->methods_count > 0) { section = R_NEW0 (RBinSection); if (section) { section->name = strdup ("methods"); section->paddr = bin->methods_offset + baddr; section->size = bin->methods_size; // section->vaddr = section->paddr; // section->vsize = section->size; section->perm = R_PERM_RX; section->add = true; r_list_append (sections, section); section = NULL; r_list_foreach (bin->methods_list, iter, fm_type) { if (fm_type->attr_offset == 0) { continue; } section = R_NEW0 (RBinSection); if (section) { section->name = r_str_newf ("attrs.%s", fm_type->name); section->size = fm_type->size - (fm_type->file_offset - fm_type->attr_offset); // section->vsize = section->size; // section->vaddr = section->paddr; section->paddr = fm_type->attr_offset + baddr; section->perm = R_PERM_R | R_PERM_X; section->add = true; r_list_append (sections, section); } } } } if (bin->interfaces_count > 0) { section = R_NEW0 (RBinSection); if (section) { section->name = strdup ("interfaces"); section->paddr = bin->interfaces_offset + baddr; section->size = bin->interfaces_size; // section->vaddr = section->paddr; // section->vsize = section->size; section->perm = R_PERM_R; section->add = true; r_list_append (sections, section); } section = NULL; } if (bin->attrs_count > 0) { section = R_NEW0 (RBinSection); if (section) { section->name = strdup ("attributes"); section->paddr = bin->attrs_offset + baddr; section->size = bin->attrs_size; // section->vaddr = section->paddr; // section->vsize = section->size; section->perm = R_PERM_R; section->perm = R_PERM_R; section->add = true; r_list_append (sections, section); } section = NULL; } return sections; } R_API RList *r_bin_java_enum_class_methods(RBinJavaObj *bin, ut16 class_idx) { RList *methods = r_list_newf (free); RListIter *iter; RBinJavaField *field; r_list_foreach (bin->methods_list, iter, field) { if (field->field_ref_cp_obj && 0) { if ((field && field->field_ref_cp_obj->metas->ord == class_idx)) { RBinSymbol *sym = r_bin_java_create_new_symbol_from_ref ( bin, field->field_ref_cp_obj, bin->loadaddr); if (sym) { r_list_append (methods, sym); } } } else { RBinSymbol *sym = R_NEW0 (RBinSymbol); sym->name = strdup (field->name); // func defintion // sym->paddr = field->file_offset + bin->loadaddr; // code implementation sym->paddr = r_bin_java_get_method_code_offset (field); sym->vaddr = sym->paddr; // + bin->loadaddr; r_list_append (methods, sym); } } return methods; } R_API RList *r_bin_java_enum_class_fields(RBinJavaObj *bin, ut16 class_idx) { RList *fields = r_list_newf (free); RListIter *iter; RBinJavaField *fm_type; RBinField *field = NULL; r_list_foreach (bin->fields_list, iter, fm_type) { if (fm_type) { if (fm_type && fm_type->field_ref_cp_obj && fm_type->field_ref_cp_obj->metas->ord == class_idx) { field = r_bin_java_create_new_rbinfield_from_field (fm_type, bin->loadaddr); if (field) { r_list_append (fields, field); } } } } return fields; } R_API int is_class_interface(RBinJavaObj *bin, RBinJavaCPTypeObj *cp_obj) { RBinJavaInterfaceInfo *ifobj; RListIter *iter; int res = false; r_list_foreach (bin->interfaces_list, iter, ifobj) { if (ifobj) { res = cp_obj == ifobj->cp_class; if (res) { break; } } } return res; } /* R_API RList * r_bin_java_get_interface_classes(RBinJavaObj * bin) { RList *interfaces_names = r_list_new (); RListIter *iter; RBinJavaInterfaceInfo *ifobj; r_list_foreach(bin->interfaces_list, iter, iinfo) { RBinClass *class_ = R_NEW0 (RBinClass); RBinJavaCPTypeObj *cp_obj = ; if (ifobj && ifobj->name) { ut8 * name = strdup(ifobj->name); r_list_append(interfaces_names, name); } } return interfaces_names; } */ R_API RList *r_bin_java_get_lib_names(RBinJavaObj *bin) { RList *lib_names = r_list_newf (free); RListIter *iter; RBinJavaCPTypeObj *cp_obj = NULL; if (!bin) { return lib_names; } r_list_foreach (bin->cp_list, iter, cp_obj) { if (cp_obj && cp_obj->tag == R_BIN_JAVA_CP_CLASS && (bin->cf2.this_class != cp_obj->info.cp_class.name_idx || !is_class_interface (bin, cp_obj))) { char *name = r_bin_java_get_item_name_from_bin_cp_list (bin, cp_obj); if (name) { r_list_append (lib_names, name); } } } return lib_names; } R_API void r_bin_java_classes_free(void /*RBinClass*/ *k) { RBinClass *klass = k; if (klass) { r_list_free (klass->methods); r_list_free (klass->fields); free (klass->name); free (klass->super); free (klass->visibility_str); free (klass); } } R_API RList *r_bin_java_get_classes(RBinJavaObj *bin) { RList *classes = r_list_newf (r_bin_java_classes_free); RListIter *iter; RBinJavaCPTypeObj *cp_obj = NULL; RBinJavaCPTypeObj *this_class_cp_obj = r_bin_java_get_item_from_bin_cp_list (bin, bin->cf2.this_class); ut32 idx = 0; RBinClass *k = R_NEW0 (RBinClass); if (!k) { r_list_free (classes); return NULL; } k->visibility = bin->cf2.access_flags; if (bin->cf2.flags_str) { k->visibility_str = strdup (bin->cf2.flags_str); } k->methods = r_bin_java_enum_class_methods (bin, bin->cf2.this_class); k->fields = r_bin_java_enum_class_fields (bin, bin->cf2.this_class); k->name = r_bin_java_get_this_class_name (bin); k->super = r_bin_java_get_name_from_bin_cp_list (bin, bin->cf2.super_class); k->index = (idx++); r_list_append (classes, k); r_list_foreach (bin->cp_list, iter, cp_obj) { if (cp_obj && cp_obj->tag == R_BIN_JAVA_CP_CLASS && (this_class_cp_obj != cp_obj && is_class_interface (bin, cp_obj))) { k = R_NEW0 (RBinClass); if (!k) { break; } k->methods = r_bin_java_enum_class_methods (bin, cp_obj->info.cp_class.name_idx); k->fields = r_bin_java_enum_class_fields (bin, cp_obj->info.cp_class.name_idx); k->index = idx; k->name = r_bin_java_get_item_name_from_bin_cp_list (bin, cp_obj); r_list_append (classes, k); idx++; } } return classes; } R_API RBinSymbol *r_bin_java_create_new_symbol_from_invoke_dynamic(RBinJavaCPTypeObj *obj, ut64 baddr) { if (!obj || (obj->tag != R_BIN_JAVA_CP_INVOKEDYNAMIC)) { return NULL; } return r_bin_java_create_new_symbol_from_cp_idx (obj->info.cp_invoke_dynamic.name_and_type_index, baddr); } R_API RBinSymbol *r_bin_java_create_new_symbol_from_cp_idx(ut32 cp_idx, ut64 baddr) { RBinSymbol *sym = NULL; RBinJavaCPTypeObj *obj = r_bin_java_get_item_from_bin_cp_list ( R_BIN_JAVA_GLOBAL_BIN, cp_idx); if (obj) { switch (obj->tag) { case R_BIN_JAVA_CP_METHODREF: case R_BIN_JAVA_CP_FIELDREF: case R_BIN_JAVA_CP_INTERFACEMETHOD_REF: sym = r_bin_java_create_new_symbol_from_ref (R_BIN_JAVA_GLOBAL_BIN, obj, baddr); break; case R_BIN_JAVA_CP_INVOKEDYNAMIC: sym = r_bin_java_create_new_symbol_from_invoke_dynamic (obj, baddr); break; default: break; } } return sym; } R_API RList *U(r_bin_java_get_fields)(RBinJavaObj * bin) { RListIter *iter = NULL, *iter_tmp = NULL; RList *fields = r_list_new (); RBinJavaField *fm_type; RBinField *field; r_list_foreach_safe (bin->fields_list, iter, iter_tmp, fm_type) { field = r_bin_java_create_new_rbinfield_from_field (fm_type, bin->loadaddr); if (field) { r_list_append (fields, field); } } return fields; } R_API void r_bin_add_import(RBinJavaObj *bin, RBinJavaCPTypeObj *obj, const char *type) { RBinImport *imp = R_NEW0 (RBinImport); char *class_name = r_bin_java_get_name_from_bin_cp_list (bin, obj->info.cp_method.class_idx); char *name = r_bin_java_get_name_from_bin_cp_list (bin, obj->info.cp_method.name_and_type_idx); char *descriptor = r_bin_java_get_desc_from_bin_cp_list (bin, obj->info.cp_method.name_and_type_idx); class_name = class_name ? class_name : strdup ("INVALID CLASS NAME INDEX"); name = name ? name : strdup ("InvalidNameIndex"); descriptor = descriptor ? descriptor : strdup ("INVALID DESCRIPTOR INDEX"); imp->classname = class_name; imp->name = name; imp->bind = "NONE"; imp->type = r_str_constpool_get (&bin->constpool, type); imp->descriptor = descriptor; imp->ordinal = obj->idx; r_list_append (bin->imports_list, imp); } R_API void r_bin_java_set_imports(RBinJavaObj *bin) { RListIter *iter = NULL; RBinJavaCPTypeObj *obj = NULL; r_list_free (bin->imports_list); bin->imports_list = r_list_newf (free); r_list_foreach (bin->cp_list, iter, obj) { const char *type = NULL; switch (obj->tag) { case R_BIN_JAVA_CP_METHODREF: type = "METHOD"; break; case R_BIN_JAVA_CP_INTERFACEMETHOD_REF: type = "FIELD"; break; case R_BIN_JAVA_CP_FIELDREF: type = "INTERFACE_METHOD"; break; default: type = NULL; break; } if (type) { r_bin_add_import (bin, obj, type); } } } R_API RList *r_bin_java_get_imports(RBinJavaObj *bin) { RList *ret = r_list_newf (free); RBinImport *import = NULL; RListIter *iter; r_list_foreach (bin->imports_list, iter, import) { RBinImport *n_import = R_NEW0 (RBinImport); if (!n_import) { r_list_free (ret); return NULL; } memcpy (n_import, import, sizeof (RBinImport)); r_list_append (ret, n_import); } return ret; } R_API RList *r_bin_java_get_symbols(RBinJavaObj *bin) { RListIter *iter = NULL, *iter_tmp = NULL; RList *imports, *symbols = r_list_newf (free); RBinSymbol *sym = NULL; RBinImport *imp; RBinJavaField *fm_type; r_list_foreach_safe (bin->methods_list, iter, iter_tmp, fm_type) { sym = r_bin_java_create_new_symbol_from_field (fm_type, bin->loadaddr); if (sym) { r_list_append (symbols, (void *) sym); } sym = r_bin_java_create_new_symbol_from_fm_type_meta (fm_type, bin->loadaddr); if (sym) { r_list_append (symbols, (void *) sym); } } r_list_foreach_safe (bin->fields_list, iter, iter_tmp, fm_type) { sym = r_bin_java_create_new_symbol_from_field (fm_type, bin->loadaddr); if (sym) { r_list_append (symbols, (void *) sym); } sym = r_bin_java_create_new_symbol_from_fm_type_meta (fm_type, bin->loadaddr); if (sym) { r_list_append (symbols, (void *) sym); } } bin->lang = "java"; if (bin->cf.major[1] >= 46) { switch (bin->cf.major[1]) { static char lang[32]; int langid; case 46: case 47: case 48: langid = 2 + (bin->cf.major[1] - 46); snprintf (lang, sizeof (lang) - 1, "java 1.%d", langid); bin->lang = lang; break; default: langid = 5 + (bin->cf.major[1] - 49); snprintf (lang, sizeof (lang) - 1, "java %d", langid); bin->lang = lang; } } imports = r_bin_java_get_imports (bin); r_list_foreach (imports, iter, imp) { sym = R_NEW0 (RBinSymbol); if (!sym) { break; } if (imp->classname && !strncmp (imp->classname, "kotlin/jvm", 10)) { bin->lang = "kotlin"; } sym->name = strdup (imp->name); sym->is_imported = true; if (!sym->name) { free (sym); break; } sym->type = "import"; if (!sym->type) { free (sym); break; } sym->vaddr = sym->paddr = imp->ordinal; sym->ordinal = imp->ordinal; r_list_append (symbols, (void *) sym); } r_list_free (imports); return symbols; } R_API RList *r_bin_java_get_strings(RBinJavaObj *bin) { RList *strings = r_list_newf (free); RBinString *str = NULL; RListIter *iter = NULL, *iter_tmp = NULL; RBinJavaCPTypeObj *cp_obj = NULL; r_list_foreach_safe (bin->cp_list, iter, iter_tmp, cp_obj) { if (cp_obj && cp_obj->tag == R_BIN_JAVA_CP_UTF8) { str = (RBinString *) R_NEW0 (RBinString); if (str) { str->paddr = cp_obj->file_offset + bin->loadaddr; str->ordinal = cp_obj->metas->ord; str->size = cp_obj->info.cp_utf8.length + 3; str->length = cp_obj->info.cp_utf8.length; if (str->size > 0) { str->string = r_str_ndup ((const char *) cp_obj->info.cp_utf8.bytes, R_BIN_JAVA_MAXSTR); } r_list_append (strings, (void *) str); } } } return strings; } R_API void *r_bin_java_free(RBinJavaObj *bin) { char *bin_obj_key = NULL; if (!bin) { return NULL; } // Delete the bin object from the data base. bin_obj_key = r_bin_java_build_obj_key (bin); // if (bin->AllJavaBinObjs && sdb_exists (bin->AllJavaBinObjs, bin_obj_key)) { // sdb_unset (bin->AllJavaBinObjs, bin_obj_key, 0); // } free (bin_obj_key); r_list_free (bin->imports_list); // XXX - Need to remove all keys belonging to this class from // the share meta information sdb. // TODO e.g. iterate over bin->kv and delete all obj, func, etc. keys // sdb_free (bin->kv); // free up the constant pool list r_list_free (bin->cp_list); // free up the fields list r_list_free (bin->fields_list); // free up methods list r_list_free (bin->methods_list); // free up interfaces list r_list_free (bin->interfaces_list); r_list_free (bin->attrs_list); // TODO: XXX if a class list of all inner classes // are formed then this will need to be updated free (bin->cf2.flags_str); free (bin->cf2.this_class_name); if (bin == R_BIN_JAVA_GLOBAL_BIN) { R_BIN_JAVA_GLOBAL_BIN = NULL; } free (bin->file); r_str_constpool_fini (&bin->constpool); free (bin); return NULL; } R_API RBinJavaObj *r_bin_java_new_buf(RBuffer *buf, ut64 loadaddr, Sdb *kv) { RBinJavaObj *bin = R_NEW0 (RBinJavaObj); if (!bin) { return NULL; } ut64 tmpsz; const ut8 *tmp = r_buf_data (buf, &tmpsz); if (!r_bin_java_new_bin (bin, loadaddr, kv, tmp, tmpsz)) { return r_bin_java_free (bin); } return bin; } R_API void r_bin_java_attribute_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { IFDBG eprintf ("Deleting attr %s, %p\n", attr->name, attr); if (attr && attr->metas && attr->metas->type_info) { RBinJavaAttrMetas *a = attr->metas->type_info; if (a && a->allocs && a->allocs->delete_obj) { a->allocs->delete_obj (attr); } } // free (attr->metas); // free (attr); } } R_API void r_bin_java_constant_pool(void /*RBinJavaCPTypeObj*/ *o) { RBinJavaCPTypeObj *obj = o; if (obj != &R_BIN_JAVA_NULL_TYPE) { ((RBinJavaCPTypeMetas *) obj->metas->type_info)->allocs->delete_obj (obj); } } R_API void r_bin_java_fmtype_free(void /*RBinJavaField*/ *f) { RBinJavaField *fm_type = f; if (!fm_type) { return; } free (fm_type->descriptor); free (fm_type->name); free (fm_type->flags_str); free (fm_type->class_name); free (fm_type->metas); r_list_free (fm_type->attributes); free (fm_type); } // Start Free the various attribute types R_API void r_bin_java_unknown_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { free (attr->name); free (attr->metas); free (attr); } } R_API void r_bin_java_local_variable_table_attr_entry_free(void /*RBinJavaLocalVariableAttribute*/ *a) { RBinJavaLocalVariableAttribute *lvattr = a; if (lvattr) { free (lvattr->descriptor); free (lvattr->name); free (lvattr); } } R_API void r_bin_java_local_variable_table_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { free (attr->name); free (attr->metas); r_list_free (attr->info.local_variable_table_attr.local_variable_table); free (attr); } } R_API void r_bin_java_local_variable_type_table_attr_entry_free(void /*RBinJavaLocalVariableTypeAttribute*/ *a) { RBinJavaLocalVariableTypeAttribute *attr = a; if (attr) { free (attr->name); free (attr->signature); free (attr); } } R_API void r_bin_java_local_variable_type_table_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { free (attr->name); free (attr->metas); r_list_free (attr->info.local_variable_type_table_attr.local_variable_table); free (attr); } } R_API void r_bin_java_deprecated_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { free (attr->name); free (attr->metas); free (attr); } } R_API void r_bin_java_enclosing_methods_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { free (attr->name); free (attr->metas); free (attr->info.enclosing_method_attr.class_name); free (attr->info.enclosing_method_attr.method_name); free (attr->info.enclosing_method_attr.method_descriptor); free (attr); } } R_API void r_bin_java_synthetic_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { free (attr->name); free (attr->metas); free (attr); } } R_API void r_bin_java_constant_value_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { free (attr->name); free (attr->metas); free (attr); } } R_API void r_bin_java_line_number_table_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { free (attr->name); free (attr->metas); r_list_free (attr->info.line_number_table_attr.line_number_table); free (attr); } } R_API void r_bin_java_code_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { // XXX - Intentional memory leak here. When one of the // Code attributes is parsed, the code (the r_bin_java) // is not properly parsing the class file r_bin_java_stack_frame_free (attr->info.code_attr.implicit_frame); r_list_free (attr->info.code_attr.attributes); free (attr->info.code_attr.code); r_list_free (attr->info.code_attr.exception_table); free (attr->name); free (attr->metas); free (attr); } } R_API void r_bin_java_exceptions_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { free (attr->name); free (attr->metas); free (attr->info.exceptions_attr.exception_idx_table); free (attr); } } R_API void r_bin_java_inner_classes_attr_entry_free(void /*RBinJavaClassesAttribute*/ *a) { RBinJavaClassesAttribute *attr = a; if (attr) { free (attr->name); free (attr->flags_str); free (attr); } } R_API void r_bin_java_inner_classes_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { free (attr->name); free (attr->metas); r_list_free (attr->info.inner_classes_attr.classes); free (attr); } } R_API void r_bin_java_signature_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { free (attr->name); free (attr->metas); free (attr->info.signature_attr.signature); free (attr); } } R_API void r_bin_java_source_debug_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { free (attr->name); free (attr->metas); free (attr->info.debug_extensions.debug_extension); free (attr); } } R_API void r_bin_java_source_code_file_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { free (attr->name); free (attr->metas); free (attr); } } R_API void r_bin_java_stack_map_table_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { free (attr->name); free (attr->metas); r_list_free (attr->info.stack_map_table_attr.stack_map_frame_entries); free (attr); } } R_API void r_bin_java_stack_frame_free(void /*RBinJavaStackMapFrame*/ *o) { RBinJavaStackMapFrame *obj = o; if (obj) { r_list_free (obj->local_items); r_list_free (obj->stack_items); free (obj->metas); free (obj); } } R_API void r_bin_java_verification_info_free(void /*RBinJavaVerificationObj*/ *o) { RBinJavaVerificationObj *obj = o; // eprintf ("Freeing verification object\n"); if (obj) { free (obj->name); free (obj); } } R_API void r_bin_java_interface_free(void /*RBinJavaInterfaceInfo*/ *o) { RBinJavaInterfaceInfo *obj = o; if (obj) { free (obj->name); free (obj); } } // End Free the various attribute types // Start the various attibute types new R_API ut64 r_bin_java_attr_calc_size(RBinJavaAttrInfo *attr) { return attr ? ((RBinJavaAttrMetas *) attr->metas->type_info)->allocs->calc_size (attr) : 0; } R_API ut64 r_bin_java_unknown_attr_calc_size(RBinJavaAttrInfo *attr) { return attr ? 6 : 0; } R_API RBinJavaAttrInfo *r_bin_java_unknown_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { return r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); } R_API ut64 r_bin_java_code_attr_calc_size(RBinJavaAttrInfo *attr) { RListIter *iter; // RListIter *iter_tmp; ut64 size = 0; bool is_attr_in_old_format = attr->is_attr_in_old_format; if (attr) { // attr = r_bin_java_default_attr_new (buffer, sz, buf_offset); size += is_attr_in_old_format ? 4 : 6; // attr->info.code_attr.max_stack = R_BIN_JAVA_USHORT (buffer, 0); size += is_attr_in_old_format ? 1 : 2; // attr->info.code_attr.max_locals = R_BIN_JAVA_USHORT (buffer, 2); size += is_attr_in_old_format ? 1 : 2; // attr->info.code_attr.code_length = R_BIN_JAVA_UINT (buffer, 4); size += is_attr_in_old_format ? 2 : 4; if (attr->info.code_attr.code) { size += attr->info.code_attr.code_length; } // attr->info.code_attr.exception_table_length = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // RBinJavaExceptionEntry *exc_entry; // r_list_foreach_safe (attr->info.code_attr.exception_table, iter, iter_tmp, exc_entry) { r_list_foreach_iter (attr->info.code_attr.exception_table, iter) { // exc_entry->start_pc = R_BIN_JAVA_USHORT (buffer,offset); size += 2; // exc_entry->end_pc = R_BIN_JAVA_USHORT (buffer,offset); size += 2; // exc_entry->handler_pc = R_BIN_JAVA_USHORT (buffer,offset); size += 2; // exc_entry->catch_type = R_BIN_JAVA_USHORT (buffer, offset); size += 2; } // attr->info.code_attr.attributes_count = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // RBinJavaAttrInfo *_attr; if (attr->info.code_attr.attributes_count > 0) { // r_list_foreach_safe (attr->info.code_attr.attributes, iter, iter_tmp, _attr) { r_list_foreach_iter (attr->info.code_attr.attributes, iter) { size += r_bin_java_attr_calc_size (attr); } } } return size; } R_API RBinJavaAttrInfo *r_bin_java_code_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { RBinJavaAttrInfo *_attr = NULL; ut32 k = 0, curpos; ut64 offset = 0; RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); if (!attr) { return NULL; } if (sz < 16 || sz > buf_offset) {// sz > buf_offset) { free (attr); return NULL; } offset += 6; attr->type = R_BIN_JAVA_ATTR_TYPE_CODE_ATTR; attr->info.code_attr.max_stack = attr->is_attr_in_old_format ? buffer[offset] : R_BIN_JAVA_USHORT (buffer, offset); offset += attr->is_attr_in_old_format ? 1 : 2; attr->info.code_attr.max_locals = attr->is_attr_in_old_format ? buffer[offset] : R_BIN_JAVA_USHORT (buffer, offset); offset += attr->is_attr_in_old_format ? 1 : 2; attr->info.code_attr.code_length = attr->is_attr_in_old_format ? R_BIN_JAVA_USHORT(buffer, offset) : R_BIN_JAVA_UINT (buffer, offset); offset += attr->is_attr_in_old_format ? 2 : 4; // BUG: possible unsigned integer overflow here attr->info.code_attr.code_offset = buf_offset + offset; attr->info.code_attr.code = (ut8 *) malloc (attr->info.code_attr.code_length); if (!attr->info.code_attr.code) { eprintf ("Handling Code Attributes: Unable to allocate memory " "(%u bytes) for a code.\n", attr->info.code_attr.code_length); return attr; } R_BIN_JAVA_GLOBAL_BIN->current_code_attr = attr; { int len = attr->info.code_attr.code_length; memset (attr->info.code_attr.code, 0, len); if (offset + len >= sz) { return attr; } memcpy (attr->info.code_attr.code, buffer + offset, len); offset += len; } attr->info.code_attr.exception_table_length = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->info.code_attr.exception_table = r_list_newf (free); for (k = 0; k < attr->info.code_attr.exception_table_length; k++) { curpos = buf_offset + offset; if (curpos + 8 > sz) { return attr; } RBinJavaExceptionEntry *e = R_NEW0 (RBinJavaExceptionEntry); if (!e) { free (attr); return NULL; } e->file_offset = curpos; e->start_pc = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; e->end_pc = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; e->handler_pc = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; e->catch_type = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; r_list_append (attr->info.code_attr.exception_table, e); e->size = 8; } attr->info.code_attr.attributes_count = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; // IFDBG eprintf (" code Attributes_count: %d\n", attr->info.code_attr.attributes_count); // XXX - attr->info.code_attr.attributes is not freed because one of the code attributes is improperly parsed. attr->info.code_attr.attributes = r_list_newf (r_bin_java_attribute_free); if (attr->info.code_attr.attributes_count > 0) { for (k = 0; k < attr->info.code_attr.attributes_count; k++) { int size = (offset < sz) ? sz - offset : 0; if (size > sz || size <= 0) { break; } _attr = r_bin_java_read_next_attr_from_buffer (bin, buffer + offset, size, buf_offset + offset); if (!_attr) { eprintf ("[X] r_bin_java_code_attr_new: Error unable to parse remainder of classfile after Method's Code Attribute: %d.\n", k); break; } IFDBG eprintf ("Parsing @ 0x%"PFMT64x " (%s) = 0x%"PFMT64x " bytes, %p\n", _attr->file_offset, _attr->name, _attr->size, _attr); offset += _attr->size; r_list_append (attr->info.code_attr.attributes, _attr); if (_attr->type == R_BIN_JAVA_ATTR_TYPE_LOCAL_VARIABLE_TABLE_ATTR) { IFDBG eprintf ("Parsed the LocalVariableTable, preparing the implicit mthod frame.\n"); // r_bin_java_print_attr_summary(_attr); attr->info.code_attr.implicit_frame = r_bin_java_build_stack_frame_from_local_variable_table (R_BIN_JAVA_GLOBAL_BIN, _attr); attr->info.code_attr.implicit_frame->file_offset = buf_offset; IFDBG r_bin_java_print_stack_map_frame_summary(attr->info.code_attr.implicit_frame); // r_list_append (attr->info.code_attr.attributes, attr->info.code_attr.implicit_frame); } // if (offset > sz) { // eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Attribute: %d.\n", k); // break; // } } } if (attr->info.code_attr.implicit_frame == NULL) { // build a default implicit_frame attr->info.code_attr.implicit_frame = r_bin_java_default_stack_frame (); // r_list_append (attr->info.code_attr.attributes, attr->info.code_attr.implicit_frame); } attr->size = offset; return attr; } R_API RBinJavaAttrInfo *r_bin_java_constant_value_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { ut64 offset = 6; RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); if (attr) { attr->type = R_BIN_JAVA_ATTR_TYPE_CONST_VALUE_ATTR; attr->info.constant_value_attr.constantvalue_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->size = offset; } // IFDBG r_bin_java_print_constant_value_attr_summary(attr); return attr; } R_API ut64 r_bin_java_constant_value_attr_calc_size(RBinJavaAttrInfo *attr) { return attr ? 8 : 0; } R_API RBinJavaAttrInfo *r_bin_java_deprecated_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); if (attr) { attr->type = R_BIN_JAVA_ATTR_TYPE_DEPRECATED_ATTR; attr->size = 6; } // IFDBG r_bin_java_print_deprecated_attr_summary(attr); return attr; } R_API ut64 r_bin_java_deprecated_attr_calc_size(RBinJavaAttrInfo *attr) { return attr ? 6 : 0; } R_API RBinJavaAttrInfo *r_bin_java_signature_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { if (sz < 8) { return NULL; } RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); if (!attr) { return NULL; } ut64 offset = 6; attr->type = R_BIN_JAVA_ATTR_TYPE_SIGNATURE_ATTR; // attr->info.source_file_attr.sourcefile_idx = R_BIN_JAVA_USHORT (buffer, offset); // offset += 2; attr->info.signature_attr.signature_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->info.signature_attr.signature = r_bin_java_get_utf8_from_bin_cp_list ( R_BIN_JAVA_GLOBAL_BIN, attr->info.signature_attr.signature_idx); if (!attr->info.signature_attr.signature) { eprintf ("r_bin_java_signature_attr_new: Unable to resolve the " "Signature UTF8 String Index: 0x%02x\n", attr->info.signature_attr.signature_idx); } attr->size = offset; // IFDBG r_bin_java_print_source_code_file_attr_summary(attr); return attr; } R_API ut64 r_bin_java_signature_attr_calc_size(RBinJavaAttrInfo *attr) { ut64 size = 0; if (attr == NULL) { // TODO eprintf allocation fail return size; } size += 6; // attr->info.source_file_attr.sourcefile_idx = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // attr->info.signature_attr.signature_idx = R_BIN_JAVA_USHORT (buffer, offset); size += 2; return size; } R_API RBinJavaAttrInfo *r_bin_java_enclosing_methods_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { ut64 offset = 6; RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); if (!attr || sz < 10) { free (attr); return NULL; } attr->type = R_BIN_JAVA_ATTR_TYPE_ENCLOSING_METHOD_ATTR; attr->info.enclosing_method_attr.class_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->info.enclosing_method_attr.method_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->info.enclosing_method_attr.class_name = r_bin_java_get_name_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, attr->info.enclosing_method_attr.class_idx); if (attr->info.enclosing_method_attr.class_name == NULL) { eprintf ("Could not resolve enclosing class name for the enclosed method.\n"); } attr->info.enclosing_method_attr.method_name = r_bin_java_get_name_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, attr->info.enclosing_method_attr.method_idx); if (attr->info.enclosing_method_attr.class_name == NULL) { eprintf ("Could not resolve method descriptor for the enclosed method.\n"); } attr->info.enclosing_method_attr.method_descriptor = r_bin_java_get_desc_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, attr->info.enclosing_method_attr.method_idx); if (attr->info.enclosing_method_attr.method_name == NULL) { eprintf ("Could not resolve method name for the enclosed method.\n"); } attr->size = offset; return attr; } R_API ut64 r_bin_java_enclosing_methods_attr_calc_size(RBinJavaAttrInfo *attr) { ut64 size = 0; if (attr) { size += 6; // attr->info.enclosing_method_attr.class_idx = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // attr->info.enclosing_method_attr.method_idx = R_BIN_JAVA_USHORT (buffer, offset); size += 2; } return size; } R_API RBinJavaAttrInfo *r_bin_java_exceptions_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { ut32 i = 0, offset = 0; ut64 size; if (sz < 8) { return NULL; } RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); offset += 6; if (!attr) { return attr; } attr->type = R_BIN_JAVA_ATTR_TYPE_LINE_NUMBER_TABLE_ATTR; attr->info.exceptions_attr.number_of_exceptions = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; size = sizeof (ut16) * attr->info.exceptions_attr.number_of_exceptions; if (size < attr->info.exceptions_attr.number_of_exceptions) { free (attr); return NULL; } attr->info.exceptions_attr.exception_idx_table = (ut16 *) malloc (size); if (!attr->info.exceptions_attr.exception_idx_table) { free (attr); return NULL; } for (i = 0; i < attr->info.exceptions_attr.number_of_exceptions; i++) { if (offset + 2 > sz) { break; } attr->info.exceptions_attr.exception_idx_table[i] = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; } attr->size = offset; // IFDBG r_bin_java_print_exceptions_attr_summary(attr); return attr; } R_API ut64 r_bin_java_exceptions_attr_calc_size(RBinJavaAttrInfo *attr) { ut64 size = 0, i = 0; if (attr) { size += 6; for (i = 0; i < attr->info.exceptions_attr.number_of_exceptions; i++) { // attr->info.exceptions_attr.exception_idx_table[i] = R_BIN_JAVA_USHORT (buffer, offset); size += 2; } } return size; } R_API RBinJavaAttrInfo *r_bin_java_inner_classes_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { RBinJavaClassesAttribute *icattr; RBinJavaAttrInfo *attr = NULL; RBinJavaCPTypeObj *obj; ut32 i = 0; ut64 offset = 0, curpos; attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); offset += 6; if (buf_offset + offset + 8 > sz) { eprintf ("Invalid amount of inner classes\n"); return NULL; } if (attr == NULL) { // TODO eprintf return attr; } attr->type = R_BIN_JAVA_ATTR_TYPE_INNER_CLASSES_ATTR; attr->info.inner_classes_attr.number_of_classes = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->info.inner_classes_attr.classes = r_list_newf (r_bin_java_inner_classes_attr_entry_free); for (i = 0; i < attr->info.inner_classes_attr.number_of_classes; i++) { curpos = buf_offset + offset; if (buf_offset + offset + 8 > sz) { eprintf ("Invalid amount of inner classes\n"); break; } icattr = R_NEW0 (RBinJavaClassesAttribute); if (!icattr) { break; } icattr->inner_class_info_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; icattr->outer_class_info_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; icattr->inner_name_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; icattr->inner_class_access_flags = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; icattr->flags_str = retrieve_class_method_access_string (icattr->inner_class_access_flags); icattr->file_offset = curpos; icattr->size = 8; obj = r_bin_java_get_item_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, icattr->inner_name_idx); if (!obj) { eprintf ("BINCPLIS IS HULL %d\n", icattr->inner_name_idx); } icattr->name = r_bin_java_get_item_name_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, obj); if (!icattr->name) { obj = r_bin_java_get_item_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, icattr->inner_class_info_idx); if (!obj) { eprintf ("BINCPLIST IS NULL %d\n", icattr->inner_class_info_idx); } icattr->name = r_bin_java_get_item_name_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, obj); if (!icattr->name) { icattr->name = r_str_dup (NULL, "NULL"); eprintf ("r_bin_java_inner_classes_attr: Unable to find the name for %d index.\n", icattr->inner_name_idx); free (icattr); break; } } IFDBG eprintf ("r_bin_java_inner_classes_attr: Inner class name %d is %s.\n", icattr->inner_name_idx, icattr->name); r_list_append (attr->info.inner_classes_attr.classes, (void *) icattr); } attr->size = offset; // IFDBG r_bin_java_print_inner_classes_attr_summary(attr); return attr; } R_API ut64 r_bin_java_inner_class_attr_calc_size(RBinJavaClassesAttribute *icattr) { ut64 size = 0; if (icattr) { // icattr->inner_class_info_idx = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // icattr->outer_class_info_idx = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // icattr->inner_name_idx = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // icattr->inner_class_access_flags = R_BIN_JAVA_USHORT (buffer, offset); size += 2; } return size; } R_API ut64 r_bin_java_inner_classes_attr_calc_size(RBinJavaAttrInfo *attr) { RBinJavaClassesAttribute *icattr = NULL; RListIter *iter; ut64 size = 6; if (!attr) { return 0; } r_list_foreach (attr->info.inner_classes_attr.classes, iter, icattr) { size += r_bin_java_inner_class_attr_calc_size (icattr); } return size; } R_API RBinJavaAttrInfo *r_bin_java_line_number_table_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { ut32 i = 0; ut64 curpos, offset = 0; if (sz < 6) { return NULL; } RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); if (!attr) { return NULL; } offset += 6; attr->type = R_BIN_JAVA_ATTR_TYPE_LINE_NUMBER_TABLE_ATTR; attr->info.line_number_table_attr.line_number_table_length = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->info.line_number_table_attr.line_number_table = r_list_newf (free); ut32 linenum_len = attr->info.line_number_table_attr.line_number_table_length; RList *linenum_list = attr->info.line_number_table_attr.line_number_table; for (i = 0; i < linenum_len; i++) { curpos = buf_offset + offset; // eprintf ("%"PFMT64x" %"PFMT64x"\n", curpos, sz); // XXX if (curpos + 8 >= sz) break; RBinJavaLineNumberAttribute *lnattr = R_NEW0 (RBinJavaLineNumberAttribute); if (!lnattr) { break; } // wtf it works if (offset - 2 > sz) { R_FREE (lnattr); break; } lnattr->start_pc = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; lnattr->line_number = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; lnattr->file_offset = curpos; lnattr->size = 4; r_list_append (linenum_list, lnattr); } attr->size = offset; return attr; } R_API ut64 r_bin_java_line_number_table_attr_calc_size(RBinJavaAttrInfo *attr) { ut64 size = 6; // RBinJavaLineNumberAttribute *lnattr; RListIter *iter; // RListIter *iter_tmp; if (!attr) { return 0LL; } // r_list_foreach_safe (attr->info.line_number_table_attr.line_number_table, iter, iter_tmp, lnattr) { r_list_foreach_iter (attr->info.line_number_table_attr.line_number_table, iter) { // lnattr->start_pc = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // lnattr->line_number = R_BIN_JAVA_USHORT (buffer, offset); size += 2; } return size; } R_API RBinJavaAttrInfo *r_bin_java_source_debug_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { ut64 offset = 6; RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); if (!attr) { return NULL; } attr->type = R_BIN_JAVA_ATTR_TYPE_SOURCE_DEBUG_EXTENTSION_ATTR; if (attr->length == 0) { eprintf ("r_bin_java_source_debug_attr_new: Attempting to allocate 0 bytes for debug_extension.\n"); attr->info.debug_extensions.debug_extension = NULL; return attr; } else if ((attr->length + offset) > sz) { eprintf ("r_bin_java_source_debug_attr_new: Expected %d byte(s) got %" PFMT64d " bytes for debug_extension.\n", attr->length, (offset + sz)); } attr->info.debug_extensions.debug_extension = (ut8 *) malloc (attr->length); if (attr->info.debug_extensions.debug_extension && (attr->length > (sz - offset))) { memcpy (attr->info.debug_extensions.debug_extension, buffer + offset, sz - offset); } else if (attr->info.debug_extensions.debug_extension) { memcpy (attr->info.debug_extensions.debug_extension, buffer + offset, attr->length); } else { eprintf ("r_bin_java_source_debug_attr_new: Unable to allocate the data for the debug_extension.\n"); } offset += attr->length; attr->size = offset; return attr; } R_API ut64 r_bin_java_source_debug_attr_calc_size(RBinJavaAttrInfo *attr) { ut64 size = 6; if (!attr) { return 0LL; } if (attr->info.debug_extensions.debug_extension) { size += attr->length; } return size; } R_API ut64 r_bin_java_local_variable_table_attr_calc_size(RBinJavaAttrInfo *attr) { ut64 size = 0; // ut64 offset = 0; RListIter *iter; // RBinJavaLocalVariableAttribute *lvattr; if (!attr) { return 0LL; } size += 6; // attr->info.local_variable_table_attr.table_length = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // r_list_foreach (attr->info.local_variable_table_attr.local_variable_table, iter, lvattr) { r_list_foreach_iter (attr->info.local_variable_table_attr.local_variable_table, iter) { // lvattr->start_pc = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // lvattr->length = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // lvattr->name_idx = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // lvattr->descriptor_idx = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // lvattr->index = R_BIN_JAVA_USHORT (buffer, offset); size += 2; } return size; } R_API RBinJavaAttrInfo *r_bin_java_local_variable_table_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { RBinJavaLocalVariableAttribute *lvattr; ut64 curpos = 0, offset = 6; RBinJavaAttrInfo *attr; ut32 i = 0; if (!buffer || sz < 1) { return NULL; } attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); if (!attr) { return NULL; } attr->type = R_BIN_JAVA_ATTR_TYPE_LOCAL_VARIABLE_TABLE_ATTR; attr->info.local_variable_table_attr.table_length = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->info.local_variable_table_attr.local_variable_table =\ r_list_newf (r_bin_java_local_variable_table_attr_entry_free); for (i = 0; i < attr->info.local_variable_table_attr.table_length; i++) { if (offset + 10 > sz) { break; } curpos = buf_offset + offset; lvattr = R_NEW0 (RBinJavaLocalVariableAttribute); if (!lvattr) { break; } lvattr->start_pc = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; lvattr->length = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; lvattr->name_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; lvattr->descriptor_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; lvattr->index = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; lvattr->file_offset = curpos; lvattr->name = r_bin_java_get_utf8_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, lvattr->name_idx); lvattr->size = 10; if (!lvattr->name) { lvattr->name = strdup ("NULL"); eprintf ("r_bin_java_local_variable_table_attr_new: Unable to find the name for %d index.\n", lvattr->name_idx); } lvattr->descriptor = r_bin_java_get_utf8_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, lvattr->descriptor_idx); if (!lvattr->descriptor) { lvattr->descriptor = strdup ("NULL"); eprintf ("r_bin_java_local_variable_table_attr_new: Unable to find the descriptor for %d index.\n", lvattr->descriptor_idx); } r_list_append (attr->info.local_variable_table_attr.local_variable_table, lvattr); } attr->size = offset; // IFDBG r_bin_java_print_local_variable_table_attr_summary(attr); return attr; } R_API ut64 r_bin_java_local_variable_type_table_attr_calc_size(RBinJavaAttrInfo *attr) { // RBinJavaLocalVariableTypeAttribute *lvattr; RListIter *iter; ut64 size = 0; if (attr) { RList *list = attr->info.local_variable_type_table_attr.local_variable_table; size += 6; // attr->info.local_variable_type_table_attr.table_length = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // r_list_foreach (list, iter, lvattr) { r_list_foreach_iter (list, iter) { // lvattr->start_pc = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // lvattr->length = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // lvattr->name_idx = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // lvattr->signature_idx = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // lvattr->index = R_BIN_JAVA_USHORT (buffer, offset); size += 2; } } return size; } R_API RBinJavaAttrInfo *r_bin_java_local_variable_type_table_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { RBinJavaLocalVariableTypeAttribute *lvattr; ut64 offset = 6; ut32 i = 0; RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, 0); if (!attr) { return NULL; } attr->type = R_BIN_JAVA_ATTR_TYPE_LOCAL_VARIABLE_TYPE_TABLE_ATTR; attr->info.local_variable_type_table_attr.table_length = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->info.local_variable_type_table_attr.local_variable_table = r_list_newf (r_bin_java_local_variable_type_table_attr_entry_free); for (i = 0; i < attr->info.local_variable_type_table_attr.table_length; i++) { ut64 curpos = buf_offset + offset; lvattr = R_NEW0 (RBinJavaLocalVariableTypeAttribute); if (!lvattr) { perror ("calloc"); break; } if (offset + 10 > sz) { eprintf ("oob"); free (lvattr); break; } lvattr->start_pc = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; lvattr->length = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; lvattr->name_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; lvattr->signature_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; lvattr->index = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; lvattr->file_offset = curpos; lvattr->name = r_bin_java_get_utf8_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, lvattr->name_idx); lvattr->size = 10; if (!lvattr->name) { lvattr->name = strdup ("NULL"); eprintf ("r_bin_java_local_variable_type_table_attr_new: Unable to find the name for %d index.\n", lvattr->name_idx); } lvattr->signature = r_bin_java_get_utf8_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, lvattr->signature_idx); if (!lvattr->signature) { lvattr->signature = strdup ("NULL"); eprintf ("r_bin_java_local_variable_type_table_attr_new: Unable to find the descriptor for %d index.\n", lvattr->signature_idx); } r_list_append (attr->info.local_variable_type_table_attr.local_variable_table, lvattr); } // IFDBG r_bin_java_print_local_variable_type_table_attr_summary(attr); attr->size = offset; return attr; } R_API RBinJavaAttrInfo *r_bin_java_source_code_file_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { if (!sz) { return NULL; } ut64 offset = 0; RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); offset += 6; if (!attr) { return NULL; } attr->type = R_BIN_JAVA_ATTR_TYPE_SOURCE_FILE_ATTR; // if (buffer + offset > buffer + sz) return NULL; attr->info.source_file_attr.sourcefile_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->size = offset; // IFDBG r_bin_java_print_source_code_file_attr_summary(attr); return attr; } R_API ut64 r_bin_java_source_code_file_attr_calc_size(RBinJavaAttrInfo *attr) { return attr ? 8 : 0; } R_API RBinJavaAttrInfo *r_bin_java_synthetic_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { ut64 offset = 0; RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); if (!attr) { return NULL; } offset += 6; attr->type = R_BIN_JAVA_ATTR_TYPE_SYNTHETIC_ATTR; attr->size = offset; return attr; } R_API ut64 r_bin_java_synthetic_attr_calc_size(RBinJavaAttrInfo *attr) { return attr ? 12 : 6; } R_API RBinJavaInterfaceInfo *r_bin_java_interface_new(RBinJavaObj *bin, const ut8 *buffer, ut64 sz) { IFDBG eprintf ("Parsing RBinJavaInterfaceInfo\n"); RBinJavaInterfaceInfo *ifobj = R_NEW0 (RBinJavaInterfaceInfo); if (ifobj) { if (buffer) { ifobj->class_info_idx = R_BIN_JAVA_USHORT (buffer, 0); ifobj->cp_class = r_bin_java_get_item_from_bin_cp_list (bin, ifobj->class_info_idx); if (ifobj->cp_class) { ifobj->name = r_bin_java_get_item_name_from_bin_cp_list (bin, ifobj->cp_class); } else { ifobj->name = r_str_dup (NULL, "NULL"); } ifobj->size = 2; } else { ifobj->class_info_idx = 0; ifobj->name = r_str_dup (NULL, "NULL"); } } return ifobj; } R_API RBinJavaVerificationObj *r_bin_java_verification_info_from_type(RBinJavaObj *bin, R_BIN_JAVA_STACKMAP_TYPE type, ut32 value) { RBinJavaVerificationObj *se = R_NEW0 (RBinJavaVerificationObj); if (!se) { return NULL; } se->tag = type; if (se->tag == R_BIN_JAVA_STACKMAP_OBJECT) { se->info.obj_val_cp_idx = (ut16) value; } else if (se->tag == R_BIN_JAVA_STACKMAP_UNINIT) { /*if (bin->offset_sz == 4) { se->info.uninit_offset = value; } else { se->info.uninit_offset = (ut16) value; }*/ se->info.uninit_offset = (ut16) value; } return se; } R_API RBinJavaVerificationObj *r_bin_java_read_from_buffer_verification_info_new(ut8 *buffer, ut64 sz, ut64 buf_offset) { ut64 offset = 0; RBinJavaVerificationObj *se = R_NEW0 (RBinJavaVerificationObj); if (!se) { return NULL; } se->file_offset = buf_offset; se->tag = buffer[offset]; offset += 1; if (se->tag == R_BIN_JAVA_STACKMAP_OBJECT) { se->info.obj_val_cp_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; } else if (se->tag == R_BIN_JAVA_STACKMAP_UNINIT) { se->info.uninit_offset = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; } if (R_BIN_JAVA_STACKMAP_UNINIT < se->tag) { r_bin_java_verification_info_free (se); return NULL; } se->size = offset; return se; } R_API ut64 rbin_java_verification_info_calc_size(RBinJavaVerificationObj *se) { ut64 sz = 1; if (!se) { return 0; } // r_buf_read_at (bin->b, offset, (ut8*)(&se->tag), 1) switch (se->tag) { case R_BIN_JAVA_STACKMAP_OBJECT: // r_buf_read_at (bin->b, offset+1, (ut8*)buf, 2) sz += 2; break; case R_BIN_JAVA_STACKMAP_UNINIT: // r_buf_read_at (bin->b, offset+1, (ut8*)buf, 2) sz += 2; break; } return sz; } R_API RBinJavaStackMapFrameMetas *r_bin_java_determine_stack_frame_type(ut8 tag) { ut8 type_value = 0; if (tag < 64) { type_value = R_BIN_JAVA_STACK_FRAME_SAME; } else if (tag < 128) { type_value = R_BIN_JAVA_STACK_FRAME_SAME_LOCALS_1; } else if (247 < tag && tag < 251) { type_value = R_BIN_JAVA_STACK_FRAME_CHOP; } else if (tag == 251) { type_value = R_BIN_JAVA_STACK_FRAME_SAME_FRAME_EXTENDED; } else if (251 < tag && tag < 255) { type_value = R_BIN_JAVA_STACK_FRAME_APPEND; } else if (tag == 255) { type_value = R_BIN_JAVA_STACK_FRAME_FULL_FRAME; } else { type_value = R_BIN_JAVA_STACK_FRAME_RESERVED; } return &R_BIN_JAVA_STACK_MAP_FRAME_METAS[type_value]; } R_API ut64 r_bin_java_stack_map_frame_calc_size(RBinJavaStackMapFrame *sf) { ut64 size = 0; RListIter *iter, *iter_tmp; RBinJavaVerificationObj *se; if (sf) { // sf->tag = buffer[offset]; size += 1; switch (sf->type) { case R_BIN_JAVA_STACK_FRAME_SAME: // Nothing to read break; case R_BIN_JAVA_STACK_FRAME_SAME_LOCALS_1: r_list_foreach_safe (sf->stack_items, iter, iter_tmp, se) { size += rbin_java_verification_info_calc_size (se); } break; case R_BIN_JAVA_STACK_FRAME_CHOP: // sf->offset_delta = R_BIN_JAVA_USHORT (buffer, offset); size += 2; break; case R_BIN_JAVA_STACK_FRAME_SAME_FRAME_EXTENDED: // sf->offset_delta = R_BIN_JAVA_USHORT (buffer, offset); size += 2; r_list_foreach_safe (sf->stack_items, iter, iter_tmp, se) { size += rbin_java_verification_info_calc_size (se); } break; case R_BIN_JAVA_STACK_FRAME_APPEND: // sf->offset_delta = R_BIN_JAVA_USHORT (buffer, offset); size += 2; r_list_foreach_safe (sf->stack_items, iter, iter_tmp, se) { size += rbin_java_verification_info_calc_size (se); } break; case R_BIN_JAVA_STACK_FRAME_FULL_FRAME: // sf->offset_delta = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // sf->number_of_locals = R_BIN_JAVA_USHORT (buffer, offset); size += 2; r_list_foreach_safe (sf->local_items, iter, iter_tmp, se) { size += rbin_java_verification_info_calc_size (se); } // sf->number_of_stack_items = R_BIN_JAVA_USHORT (buffer, offset); size += 2; r_list_foreach_safe (sf->stack_items, iter, iter_tmp, se) { size += rbin_java_verification_info_calc_size (se); } break; default: eprintf ("Unknown type\n"); break; } } return size; } R_API RBinJavaStackMapFrame *r_bin_java_stack_map_frame_new(ut8 *buffer, ut64 sz, RBinJavaStackMapFrame *p_frame, ut64 buf_offset) { RBinJavaStackMapFrame *stack_frame = r_bin_java_default_stack_frame (); RBinJavaVerificationObj *se = NULL; ut64 offset = 0; if (!stack_frame) { return NULL; } stack_frame->tag = buffer[offset]; offset += 1; stack_frame->metas->type_info = (void *) r_bin_java_determine_stack_frame_type (stack_frame->tag); stack_frame->type = ((RBinJavaStackMapFrameMetas *) stack_frame->metas->type_info)->type; stack_frame->file_offset = buf_offset; stack_frame->p_stack_frame = p_frame; switch (stack_frame->type) { case R_BIN_JAVA_STACK_FRAME_SAME: // Maybe? 1. Copy the previous frames locals and set the locals count. // copy_type_info_to_stack_frame_list_up_to_idx (p_frame->local_items, stack_frame->local_items, idx); if (p_frame) { stack_frame->number_of_locals = p_frame->number_of_locals; } else { IFINT eprintf ("><?><\n"); IFDBG eprintf ("Unable to set previous stackframe with the number of locals (current info.code_attr.implicit_frame was probably not set :/)"); } IFDBG eprintf ("r_bin_java_stack_map_frame_new: TODO Stack Frame Same Locals Condition is untested, so there may be issues.\n"); break; case R_BIN_JAVA_STACK_FRAME_SAME_LOCALS_1: // 1. Read the stack type stack_frame->number_of_stack_items = 1; se = r_bin_java_read_from_buffer_verification_info_new (buffer + offset, sz - offset, buf_offset + offset); IFDBG eprintf ("r_bin_java_stack_map_frame_new: Parsed R_BIN_JAVA_STACK_FRAME_SAME_LOCALS_1.\n"); if (se) { offset += se->size; } else { eprintf ("r_bin_java_stack_map_frame_new: Unable to parse the Stack Items for the stack frame.\n"); r_bin_java_stack_frame_free (stack_frame); return NULL; } r_list_append (stack_frame->stack_items, (void *) se); // Maybe? 3. Copy the previous frames locals and set the locals count. // copy_type_info_to_stack_frame_list_up_to_idx (p_frame->local_items, stack_frame->local_items, idx); if (p_frame) { stack_frame->number_of_locals = p_frame->number_of_locals; } else { IFDBG eprintf ("Unable to set previous stackframe with the number of locals (current info.code_attr.implicit_frame was probably not set :/)"); } IFDBG eprintf ("r_bin_java_stack_map_frame_new: TODO Stack Frame Same Locals 1 Stack Element Condition is untested, so there may be issues.\n"); break; case R_BIN_JAVA_STACK_FRAME_CHOP: // 1. Calculate the max index we want to copy from the list of the // previous frames locals IFDBG eprintf ("r_bin_java_stack_map_frame_new: Parsing R_BIN_JAVA_STACK_FRAME_CHOP.\n"); // ut16 k = 251 - stack_frame->tag; /*, idx = p_frame->number_of_locals - k; */ // 2. read the uoffset value stack_frame->offset_delta = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; // Maybe? 3. Copy the previous frames locals and set the locals count. // copy_type_info_to_stack_frame_list_up_to_idx (p_frame->local_items, stack_frame->local_items, idx); if (p_frame) { stack_frame->number_of_locals = p_frame->number_of_locals; } else { IFINT eprintf ("><?><\n"); IFDBG eprintf ("Unable to set previous stackframe with the number of locals (current info.code_attr.implicit_frame was probably not set :/)"); } IFDBG eprintf ("r_bin_java_stack_map_frame_new: TODO Stack Frame Chop Condition is untested, so there may be issues.\n"); break; case R_BIN_JAVA_STACK_FRAME_SAME_FRAME_EXTENDED: IFDBG eprintf ("r_bin_java_stack_map_frame_new: Parsing R_BIN_JAVA_STACK_FRAME_SAME_FRAME_EXTENDED.\n"); // 1. Read the uoffset stack_frame->offset_delta = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; // 2. Read the stack element type stack_frame->number_of_stack_items = 1; se = r_bin_java_read_from_buffer_verification_info_new (buffer + offset, sz - offset, buf_offset + offset); if (se) { offset += se->size; } else { eprintf ("r_bin_java_stack_map_frame_new: Unable to parse the Stack Items for the stack frame.\n"); r_bin_java_stack_frame_free (stack_frame); return NULL; } r_list_append (stack_frame->stack_items, (void *) se); // Maybe? 3. Copy the previous frames locals to the current locals // copy_type_info_to_stack_frame_list_up_to_idx (p_frame->local_items, stack_frame->local_items, idx); if (p_frame) { stack_frame->number_of_locals = p_frame->number_of_locals; } else { IFINT eprintf ("><?><\n"); IFDBG eprintf ("Unable to set previous stackframe with the number of locals (current info.code_attr.implicit_frame was probably not set :/)"); } IFDBG eprintf ("r_bin_java_stack_map_frame_new: TODO Stack Frame Same Locals Frame Stack 1 Extended Condition is untested, so there may be issues.\n"); break; case R_BIN_JAVA_STACK_FRAME_APPEND: IFDBG eprintf ("r_bin_java_stack_map_frame_new: Parsing R_BIN_JAVA_STACK_FRAME_APPEND.\n"); // 1. Calculate the max index we want to copy from the list of the // previous frames locals ut16 k = stack_frame->tag - 251; ut32 i = 0; // 2. Read the uoffset stack_frame->offset_delta = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; // Maybe? 3. Copy the previous frames locals to the current locals // copy_type_info_to_stack_frame_list_up_to_idx (p_frame->local_items, stack_frame->local_items, idx); // 4. Read off the rest of the appended locals types for (i = 0; i < k; i++) { if (offset >= sz) { break; } IFDBG eprintf ("r_bin_java_stack_map_frame_new: Parsing verifying the k'th frame: %d of %d.\n", i, k); se = r_bin_java_read_from_buffer_verification_info_new (buffer + offset, sz - offset, buf_offset + offset); IFDBG eprintf ("r_bin_java_stack_map_frame_new: Completed Parsing\n"); if (se) { offset += se->size; } else { eprintf ("r_bin_java_stack_map_frame_new: Unable to parse the locals for the stack frame.\n"); r_bin_java_stack_frame_free (stack_frame); return NULL; } r_list_append (stack_frame->local_items, (void *) se); } IFDBG eprintf ("r_bin_java_stack_map_frame_new: Breaking out of loop"); IFDBG eprintf ("p_frame: %p\n", p_frame); if (p_frame) { stack_frame->number_of_locals = p_frame->number_of_locals + k; } else { IFINT eprintf ("><?><\n"); IFDBG eprintf ("Unable to set previous stackframe with the number of locals (current info.code_attr.implicit_frame was probably not set :/)"); } IFDBG eprintf ("r_bin_java_stack_map_frame_new: TODO Stack Frame Same Locals Frame Stack 1 Extended Condition is untested, so there may be issues.\n"); break; case R_BIN_JAVA_STACK_FRAME_FULL_FRAME: IFDBG eprintf ("r_bin_java_stack_map_frame_new: Parsing R_BIN_JAVA_STACK_FRAME_FULL_FRAME.\n"); stack_frame->offset_delta = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; // IFDBG eprintf ("r_bin_java_stack_map_frame_new: Code Size > 65535, read(%d byte(s)), offset = 0x%08x.\n", var_sz, stack_frame->offset_delta); // Read the number of variables based on the max # local variable stack_frame->number_of_locals = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; // IFDBG eprintf ("r_bin_java_stack_map_frame_new: Max ulocalvar > 65535, read(%d byte(s)), number_of_locals = 0x%08x.\n", var_sz, stack_frame->number_of_locals); IFDBG r_bin_java_print_stack_map_frame_summary(stack_frame); // read the number of locals off the stack for (i = 0; i < stack_frame->number_of_locals; i++) { if (offset >= sz) { break; } se = r_bin_java_read_from_buffer_verification_info_new (buffer + offset, sz - offset, buf_offset + offset); if (se) { offset += se->size; // r_list_append (stack_frame->local_items, (void *) se); } else { eprintf ("r_bin_java_stack_map_frame_new: Unable to parse the locals for the stack frame.\n"); r_bin_java_stack_frame_free (stack_frame); return NULL; } r_list_append (stack_frame->local_items, (void *) se); } // Read the number of stack items based on the max size of stack stack_frame->number_of_stack_items = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; // IFDBG eprintf ("r_bin_java_stack_map_frame_new: Max ustack items > 65535, read(%d byte(s)), number_of_locals = 0x%08x.\n", var_sz, stack_frame->number_of_stack_items); // read the stack items for (i = 0; i < stack_frame->number_of_stack_items; i++) { if (offset >= sz) { break; } se = r_bin_java_read_from_buffer_verification_info_new (buffer + offset, sz - offset, buf_offset + offset); if (se) { offset += se->size; // r_list_append (stack_frame->stack_items, (void *) se); } else { eprintf ("r_bin_java_stack_map_frame_new: Unable to parse the stack items for the stack frame.\n"); r_bin_java_stack_frame_free (stack_frame); return NULL; } r_list_append (stack_frame->local_items, (void *) se); } break; default: eprintf ("java: Unknown type\n"); break; } // IFDBG eprintf ("Created a stack frame at offset(0x%08"PFMT64x") of size: %d\n", buf_offset, stack_frame->size);//r_bin_java_print_stack_map_frame_summary(stack_frame); stack_frame->size = offset; // IFDBG r_bin_java_print_stack_map_frame_summary(stack_frame); return stack_frame; } R_API ut16 r_bin_java_find_cp_class_ref_from_name_idx(RBinJavaObj *bin, ut16 name_idx) { ut16 pos, len = (ut16) r_list_length (bin->cp_list); RBinJavaCPTypeObj *item; for (pos = 0; pos < len; pos++) { item = (RBinJavaCPTypeObj *) r_list_get_n (bin->cp_list, pos); if (item && item->tag == R_BIN_JAVA_CP_CLASS && item->info.cp_class.name_idx == name_idx) { break; } } return (pos != len) ? pos : 0; } R_API RBinJavaStackMapFrame *r_bin_java_default_stack_frame(void) { RBinJavaStackMapFrame *sf = R_NEW0 (RBinJavaStackMapFrame); if (!sf) { return NULL; } sf->metas = R_NEW0 (RBinJavaMetaInfo); if (!sf->metas) { free (sf); return NULL; } sf->metas->type_info = (void *) &R_BIN_JAVA_STACK_MAP_FRAME_METAS[R_BIN_JAVA_STACK_FRAME_IMPLICIT]; sf->type = ((RBinJavaStackMapFrameMetas *) sf->metas->type_info)->type; sf->local_items = r_list_newf (r_bin_java_verification_info_free); sf->stack_items = r_list_newf (r_bin_java_verification_info_free); sf->number_of_stack_items = 0; sf->number_of_locals = 0; return sf; } R_API RBinJavaStackMapFrame *r_bin_java_build_stack_frame_from_local_variable_table(RBinJavaObj *bin, RBinJavaAttrInfo *attr) { RBinJavaStackMapFrame *sf = r_bin_java_default_stack_frame (); RBinJavaLocalVariableAttribute *lvattr = NULL; RBinJavaVerificationObj *type_item; RListIter *iter = NULL; ut32 value_cnt = 0; ut8 value; if (!sf || !bin || !attr || attr->type != R_BIN_JAVA_ATTR_TYPE_LOCAL_VARIABLE_TABLE_ATTR) { eprintf ("Attempting to create a stack_map frame from a bad attribute.\n"); return sf; } sf->number_of_locals = attr->info.local_variable_table_attr.table_length; r_list_foreach (attr->info.local_variable_table_attr.local_variable_table, iter, lvattr) { ut32 pos = 0; // knock the array Types while (lvattr->descriptor[pos] == '[') { pos++; } value = lvattr->descriptor[pos]; // IFDBG eprintf ("Found the following type value: %c at pos %d in %s\n", value, pos, lvattr->descriptor); switch (value) { case 'I': case 'Z': case 'S': case 'B': case 'C': type_item = r_bin_java_verification_info_from_type (bin, R_BIN_JAVA_STACKMAP_INTEGER, 0); break; case 'F': type_item = r_bin_java_verification_info_from_type (bin, R_BIN_JAVA_STACKMAP_FLOAT, 0); break; case 'D': type_item = r_bin_java_verification_info_from_type (bin, R_BIN_JAVA_STACKMAP_DOUBLE, 0); break; case 'J': type_item = r_bin_java_verification_info_from_type (bin, R_BIN_JAVA_STACKMAP_LONG, 0); break; case 'L': // TODO: FIXME write something that will iterate over the CP Pool and find the // CONSTANT_Class_info referencing this { ut16 idx = r_bin_java_find_cp_class_ref_from_name_idx (bin, lvattr->name_idx); type_item = r_bin_java_verification_info_from_type (bin, R_BIN_JAVA_STACKMAP_OBJECT, idx); } break; default: eprintf ("r_bin_java_build_stack_frame_from_local_variable_table: " "not sure how to handle: name: %s, type: %s\n", lvattr->name, lvattr->descriptor); type_item = r_bin_java_verification_info_from_type (bin, R_BIN_JAVA_STACKMAP_NULL, 0); } if (type_item) { r_list_append (sf->local_items, (void *) type_item); } value_cnt++; } if (value_cnt != attr->info.local_variable_table_attr.table_length) { IFDBG eprintf ("r_bin_java_build_stack_frame_from_local_variable_table: " "Number of locals not accurate. Expected %d but got %d", attr->info.local_variable_table_attr.table_length, value_cnt); } return sf; } R_API ut64 r_bin_java_stack_map_table_attr_calc_size(RBinJavaAttrInfo *attr) { ut64 size = 0; RListIter *iter, *iter_tmp; RBinJavaStackMapFrame *sf; if (attr) { // attr = r_bin_java_default_attr_new (buffer, sz, buf_offset); size += 6; // IFDBG r_bin_java_print_source_code_file_attr_summary(attr); // Current spec does not call for variable sizes. // attr->info.stack_map_table_attr.number_of_entries = R_BIN_JAVA_USHORT (buffer, offset); size += 2; r_list_foreach_safe (attr->info.stack_map_table_attr.stack_map_frame_entries, iter, iter_tmp, sf) { size += r_bin_java_stack_map_frame_calc_size (sf); } } return size; } R_API RBinJavaAttrInfo *r_bin_java_stack_map_table_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { ut32 i = 0; ut64 offset = 0; RBinJavaStackMapFrame *stack_frame = NULL, *new_stack_frame = NULL; if (sz < 10) { return NULL; } RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); offset += 6; IFDBG eprintf("r_bin_java_stack_map_table_attr_new: New stack map allocated.\n"); if (!attr) { return NULL; } attr->info.stack_map_table_attr.stack_map_frame_entries = r_list_newf (r_bin_java_stack_frame_free); // IFDBG r_bin_java_print_source_code_file_attr_summary(attr); // Current spec does not call for variable sizes. attr->info.stack_map_table_attr.number_of_entries = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; IFDBG eprintf ("r_bin_java_stack_map_table_attr_new: Processing stack map, summary is:\n"); IFDBG r_bin_java_print_stack_map_table_attr_summary(attr); for (i = 0; i < attr->info.stack_map_table_attr.number_of_entries; i++) { // read next stack frame IFDBG eprintf ("Reading StackMap Entry #%d @ 0x%08"PFMT64x "\n", i, buf_offset + offset); if (stack_frame == NULL && R_BIN_JAVA_GLOBAL_BIN && R_BIN_JAVA_GLOBAL_BIN->current_code_attr) { IFDBG eprintf ("Setting an implicit frame at #%d @ 0x%08"PFMT64x "\n", i, buf_offset + offset); stack_frame = R_BIN_JAVA_GLOBAL_BIN->current_code_attr->info.code_attr.implicit_frame; } IFDBG eprintf ("Reading StackMap Entry #%d @ 0x%08"PFMT64x ", current stack_frame: %p\n", i, buf_offset + offset, stack_frame); if (offset >= sz) { r_bin_java_stack_map_table_attr_free (attr); return NULL; } new_stack_frame = r_bin_java_stack_map_frame_new (buffer + offset, sz - offset, stack_frame, buf_offset + offset); if (new_stack_frame) { offset += new_stack_frame->size; // append stack frame to the list r_list_append (attr->info.stack_map_table_attr.stack_map_frame_entries, (void *) new_stack_frame); stack_frame = new_stack_frame; } else { eprintf ("r_bin_java_stack_map_table_attr_new: Unable to parse the stack frame for the stack map table.\n"); r_bin_java_stack_map_table_attr_free (attr); attr = NULL; break; } } if (attr) { attr->size = offset; } return attr; } // End attribute types new // Start new Constant Pool Types R_API RBinJavaCPTypeObj *r_bin_java_do_nothing_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { return (RBinJavaCPTypeObj *) NULL; } R_API ut64 r_bin_java_do_nothing_calc_size(RBinJavaCPTypeObj *obj) { return 0; } R_API void r_bin_java_do_nothing_free(void /*RBinJavaCPTypeObj*/ *obj) { return; } R_API RBinJavaCPTypeObj *r_bin_java_unknown_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { ut8 tag = buffer[0]; RBinJavaCPTypeObj *obj = NULL; obj = (RBinJavaCPTypeObj *) malloc (sizeof (RBinJavaCPTypeObj)); if (obj) { memset (obj, 0, sizeof (RBinJavaCPTypeObj)); obj->tag = tag; obj->metas = R_NEW0 (RBinJavaMetaInfo); obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[R_BIN_JAVA_CP_UNKNOWN]; } return obj; } R_API ut64 r_bin_java_unknown_cp_calc_size(RBinJavaCPTypeObj *obj) { return 1LL; } R_API RBinJavaCPTypeObj *r_bin_java_class_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { ut8 tag = buffer[0]; int quick_check = r_bin_java_quick_check (R_BIN_JAVA_CP_CLASS, tag, sz, "Class"); if (quick_check > 0) { return NULL; } RBinJavaCPTypeObj *obj = R_NEW0 (RBinJavaCPTypeObj); if (obj) { obj->tag = tag; obj->metas = R_NEW0 (RBinJavaMetaInfo); obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag]; obj->info.cp_class.name_idx = R_BIN_JAVA_USHORT (buffer, 1); } return obj; } R_API ut64 r_bin_java_class_cp_calc_size(RBinJavaCPTypeObj *obj) { ut64 size = 0; // ut8 tag = buffer[0]; size += 1; // obj->info.cp_class.name_idx = R_BIN_JAVA_USHORT (buffer, 1); size += 2; return size; } R_API RBinJavaCPTypeObj *r_bin_java_fieldref_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { ut8 tag = buffer[0]; RBinJavaCPTypeObj *obj = NULL; int quick_check = 0; quick_check = r_bin_java_quick_check (R_BIN_JAVA_CP_FIELDREF, tag, sz, "FieldRef"); if (quick_check > 0) { return obj; } obj = (RBinJavaCPTypeObj *) malloc (sizeof (RBinJavaCPTypeObj)); if (obj) { memset (obj, 0, sizeof (RBinJavaCPTypeObj)); obj->tag = tag; obj->metas = R_NEW0 (RBinJavaMetaInfo); obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag]; obj->info.cp_field.class_idx = R_BIN_JAVA_USHORT (buffer, 1); obj->info.cp_field.name_and_type_idx = R_BIN_JAVA_USHORT (buffer, 3); } return (RBinJavaCPTypeObj *) obj; } R_API ut64 r_bin_java_fieldref_cp_calc_size(RBinJavaCPTypeObj *obj) { ut64 size = 0; // tag size += 1; // obj->info.cp_field.class_idx = R_BIN_JAVA_USHORT (buffer, 1); size += 2; // obj->info.cp_field.name_and_type_idx = R_BIN_JAVA_USHORT (buffer, 3); size += 2; return size; } R_API RBinJavaCPTypeObj *r_bin_java_methodref_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { ut8 tag = buffer[0]; RBinJavaCPTypeObj *obj = NULL; int quick_check = 0; quick_check = r_bin_java_quick_check (R_BIN_JAVA_CP_METHODREF, tag, sz, "MethodRef"); if (quick_check > 0) { return obj; } obj = (RBinJavaCPTypeObj *) malloc (sizeof (RBinJavaCPTypeObj)); if (obj) { memset (obj, 0, sizeof (RBinJavaCPTypeObj)); obj->tag = tag; obj->metas = R_NEW0 (RBinJavaMetaInfo); obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag]; obj->info.cp_method.class_idx = R_BIN_JAVA_USHORT (buffer, 1); obj->info.cp_method.name_and_type_idx = R_BIN_JAVA_USHORT (buffer, 3); } return obj; } R_API ut64 r_bin_java_methodref_cp_calc_size(RBinJavaCPTypeObj *obj) { ut64 size = 0; // tag size += 1; // obj->info.cp_method.class_idx = R_BIN_JAVA_USHORT (buffer, 1); size += 2; // obj->info.cp_method.name_and_type_idx = R_BIN_JAVA_USHORT (buffer, 3); size += 2; return size; } R_API RBinJavaCPTypeObj *r_bin_java_interfacemethodref_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { ut8 tag = buffer[0]; int quick_check = r_bin_java_quick_check (R_BIN_JAVA_CP_INTERFACEMETHOD_REF, tag, sz, "InterfaceMethodRef"); if (quick_check > 0) { return NULL; } RBinJavaCPTypeObj *obj = R_NEW0 (RBinJavaCPTypeObj); if (obj) { obj->tag = tag; obj->metas = R_NEW0 (RBinJavaMetaInfo); obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag]; obj->name = r_str_dup (NULL, (const char *) R_BIN_JAVA_CP_METAS[tag].name); obj->info.cp_interface.class_idx = R_BIN_JAVA_USHORT (buffer, 1); obj->info.cp_interface.name_and_type_idx = R_BIN_JAVA_USHORT (buffer, 3); } return obj; } R_API ut64 r_bin_java_interfacemethodref_cp_calc_size(RBinJavaCPTypeObj *obj) { ut64 size = 0; // tag size += 1; // obj->info.cp_interface.class_idx = R_BIN_JAVA_USHORT (buffer, 1); size += 2; // obj->info.cp_interface.name_and_type_idx = R_BIN_JAVA_USHORT (buffer, 3); size += 2; return size; } R_API RBinJavaCPTypeObj *r_bin_java_string_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { ut8 tag = buffer[0]; int quick_check = r_bin_java_quick_check (R_BIN_JAVA_CP_STRING, tag, sz, "String"); if (quick_check > 0) { return NULL; } RBinJavaCPTypeObj *obj = R_NEW0 (RBinJavaCPTypeObj); if (obj) { obj->tag = tag; obj->metas = R_NEW0 (RBinJavaMetaInfo); obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag]; obj->name = r_str_dup (NULL, (const char *) R_BIN_JAVA_CP_METAS[tag].name); obj->info.cp_string.string_idx = R_BIN_JAVA_USHORT (buffer, 1); } return obj; } R_API ut64 r_bin_java_string_cp_calc_size(RBinJavaCPTypeObj *obj) { ut64 size = 0; // tag size += 1; // obj->info.cp_string.string_idx = R_BIN_JAVA_USHORT (buffer, 1); size += 2; return size; } R_API RBinJavaCPTypeObj *r_bin_java_integer_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { ut8 tag = buffer[0]; RBinJavaCPTypeObj *obj = NULL; int quick_check = 0; quick_check = r_bin_java_quick_check (R_BIN_JAVA_CP_INTEGER, tag, sz, "Integer"); if (quick_check > 0) { return obj; } obj = (RBinJavaCPTypeObj *) R_NEW0 (RBinJavaCPTypeObj); if (obj) { obj->tag = tag; obj->metas = R_NEW0 (RBinJavaMetaInfo); obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag]; obj->name = r_str_dup (NULL, (const char *) R_BIN_JAVA_CP_METAS[tag].name); memset (&obj->info.cp_integer.bytes, 0, sizeof (obj->info.cp_integer.bytes)); memcpy (&obj->info.cp_integer.bytes.raw, buffer + 1, 4); } return obj; } R_API ut64 r_bin_java_integer_cp_calc_size(RBinJavaCPTypeObj *obj) { ut64 size = 0; // tag size += 1; // obj->info.cp_string.string_idx = R_BIN_JAVA_USHORT (buffer, 1); size += 4; return size; } R_API RBinJavaCPTypeObj *r_bin_java_float_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { ut8 tag = buffer[0]; RBinJavaCPTypeObj *obj = NULL; int quick_check = 0; quick_check = r_bin_java_quick_check (R_BIN_JAVA_CP_FLOAT, tag, sz, "Float"); if (quick_check > 0) { return obj; } obj = (RBinJavaCPTypeObj *) calloc (1, sizeof (RBinJavaCPTypeObj)); if (obj) { obj->tag = tag; obj->metas = R_NEW0 (RBinJavaMetaInfo); obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag]; obj->name = r_str_dup (NULL, (const char *) R_BIN_JAVA_CP_METAS[tag].name); memset (&obj->info.cp_float.bytes, 0, sizeof (obj->info.cp_float.bytes)); memcpy (&obj->info.cp_float.bytes.raw, buffer, 4); } return (RBinJavaCPTypeObj *) obj; } R_API ut64 r_bin_java_float_cp_calc_size(RBinJavaCPTypeObj *obj) { ut64 size = 0; // tag size += 1; // obj->info.cp_string.string_idx = R_BIN_JAVA_USHORT (buffer, 1); size += 4; return size; } R_API RBinJavaCPTypeObj *r_bin_java_long_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { ut8 tag = buffer[0]; RBinJavaCPTypeObj *obj = NULL; int quick_check = 0; quick_check = r_bin_java_quick_check (R_BIN_JAVA_CP_LONG, tag, sz, "Long"); if (quick_check > 0) { return obj; } obj = (RBinJavaCPTypeObj *) malloc (sizeof (RBinJavaCPTypeObj)); if (obj) { memset (obj, 0, sizeof (RBinJavaCPTypeObj)); obj->tag = tag; obj->metas = R_NEW0 (RBinJavaMetaInfo); obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag]; obj->name = r_str_dup (NULL, (const char *) R_BIN_JAVA_CP_METAS[tag].name); memset (&obj->info.cp_long.bytes, 0, sizeof (obj->info.cp_long.bytes)); memcpy (&(obj->info.cp_long.bytes), buffer + 1, 8); } return obj; } R_API ut64 r_bin_java_long_cp_calc_size(RBinJavaCPTypeObj *obj) { ut64 size = 0; // tag size += 1; // obj->info.cp_string.string_idx = R_BIN_JAVA_USHORT (buffer, 1); size += 8; return size; } R_API RBinJavaCPTypeObj *r_bin_java_double_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { ut8 tag = buffer[0]; RBinJavaCPTypeObj *obj = NULL; int quick_check = 0; quick_check = r_bin_java_quick_check (R_BIN_JAVA_CP_DOUBLE, tag, sz, "Double"); if (quick_check > 0) { return (RBinJavaCPTypeObj *) obj; } obj = (RBinJavaCPTypeObj *) malloc (sizeof (RBinJavaCPTypeObj)); if (obj) { memset (obj, 0, sizeof (RBinJavaCPTypeObj)); obj->tag = tag; obj->metas = R_NEW0 (RBinJavaMetaInfo); obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag]; obj->name = r_str_dup (NULL, (const char *) R_BIN_JAVA_CP_METAS[tag].name); memset (&obj->info.cp_double.bytes, 0, sizeof (obj->info.cp_double.bytes)); memcpy (&obj->info.cp_double.bytes, buffer + 1, 8); } return obj; } R_API ut64 r_bin_java_double_cp_calc_size(RBinJavaCPTypeObj *obj) { ut64 size = 0; // tag size += 1; // obj->info.cp_string.string_idx = R_BIN_JAVA_USHORT (buffer, 1); size += 8; return size; } R_API RBinJavaCPTypeObj *r_bin_java_utf8_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { ut8 tag = buffer[0]; RBinJavaCPTypeObj *obj; int quick_check = r_bin_java_quick_check (R_BIN_JAVA_CP_UTF8, tag, sz, "Utf8"); if (quick_check > 0) { return NULL; } if ((obj = R_NEW0 (RBinJavaCPTypeObj))) { obj->tag = tag; obj->metas = R_NEW0 (RBinJavaMetaInfo); obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag]; obj->name = r_str_dup (NULL, (const char *) R_BIN_JAVA_CP_METAS[tag].name); obj->info.cp_utf8.length = R_BIN_JAVA_USHORT (buffer, 1); obj->info.cp_utf8.bytes = (ut8 *) malloc (obj->info.cp_utf8.length + 1); if (obj->info.cp_utf8.bytes) { memset (obj->info.cp_utf8.bytes, 0, obj->info.cp_utf8.length + 1); if (obj->info.cp_utf8.length < (sz - 3)) { memcpy (obj->info.cp_utf8.bytes, buffer + 3, (sz - 3)); obj->info.cp_utf8.length = sz - 3; } else { memcpy (obj->info.cp_utf8.bytes, buffer + 3, obj->info.cp_utf8.length); } obj->value = obj->info.cp_utf8.bytes; } else { r_bin_java_obj_free (obj); obj = NULL; } } return obj; } R_API ut64 r_bin_java_utf8_cp_calc_size(RBinJavaCPTypeObj *obj) { ut64 size = 0; size += 1; if (obj && R_BIN_JAVA_CP_UTF8 == obj->tag) { size += 2; size += obj->info.cp_utf8.length; } return size; } R_API RBinJavaCPTypeObj *r_bin_java_name_and_type_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { ut8 tag = buffer[0]; RBinJavaCPTypeObj *obj = NULL; int quick_check = 0; quick_check = r_bin_java_quick_check (R_BIN_JAVA_CP_NAMEANDTYPE, tag, sz, "RBinJavaCPTypeNameAndType"); if (quick_check > 0) { return obj; } obj = R_NEW0 (RBinJavaCPTypeObj); if (obj) { obj->metas = R_NEW0 (RBinJavaMetaInfo); obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag]; obj->name = r_str_dup (NULL, (const char *) R_BIN_JAVA_CP_METAS[tag].name);; obj->tag = tag; obj->info.cp_name_and_type.name_idx = R_BIN_JAVA_USHORT (buffer, 1); obj->info.cp_name_and_type.descriptor_idx = R_BIN_JAVA_USHORT (buffer, 3); } return obj; } R_API ut64 r_bin_java_name_and_type_cp_calc_size(RBinJavaCPTypeObj *obj) { ut64 size = 0; if (obj) { size += 1; // obj->info.cp_name_and_type.name_idx = R_BIN_JAVA_USHORT (buffer, 1); size += 2; // obj->info.cp_name_and_type.descriptor_idx = R_BIN_JAVA_USHORT (buffer, 3); size += 2; } return size; } R_API RBinJavaCPTypeObj *r_bin_java_methodtype_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { ut8 tag = buffer[0]; int quick_check = r_bin_java_quick_check (R_BIN_JAVA_CP_METHODTYPE, tag, sz, "RBinJavaCPTypeMethodType"); if (quick_check > 0) { return NULL; } RBinJavaCPTypeObj *obj = R_NEW0 (RBinJavaCPTypeObj); if (obj) { obj->metas = R_NEW0 (RBinJavaMetaInfo); obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag]; obj->name = r_str_dup (NULL, (const char *) R_BIN_JAVA_CP_METAS[tag].name);; obj->tag = tag; obj->info.cp_method_type.descriptor_index = R_BIN_JAVA_USHORT (buffer, 1); } return obj; } R_API ut64 r_bin_java_methodtype_cp_calc_size(RBinJavaCPTypeObj *obj) { ut64 size = 0; size += 1; // obj->info.cp_method_type.descriptor_index = R_BIN_JAVA_USHORT (buffer, 1); size += 2; return size; } R_API RBinJavaCPTypeObj *r_bin_java_methodhandle_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { ut8 tag = buffer[0]; int quick_check = r_bin_java_quick_check (R_BIN_JAVA_CP_METHODHANDLE, tag, sz, "RBinJavaCPTypeMethodHandle"); if (quick_check > 0) { return NULL; } RBinJavaCPTypeObj *obj = R_NEW0 (RBinJavaCPTypeObj); if (obj) { obj->metas = R_NEW0 (RBinJavaMetaInfo); obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag]; obj->name = r_str_dup (NULL, (const char *) R_BIN_JAVA_CP_METAS[tag].name);; obj->tag = tag; obj->info.cp_method_handle.reference_kind = buffer[1]; obj->info.cp_method_handle.reference_index = R_BIN_JAVA_USHORT (buffer, 2); } return obj; } R_API ut64 r_bin_java_methodhandle_cp_calc_size(RBinJavaCPTypeObj *obj) { ut64 size = 0; size += 1; // obj->info.cp_method_handle.reference_index = R_BIN_JAVA_USHORT (buffer, 2); size += 2; return size; } R_API RBinJavaCPTypeObj *r_bin_java_invokedynamic_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { ut8 tag = buffer[0]; RBinJavaCPTypeObj *obj; int quick_check = r_bin_java_quick_check (R_BIN_JAVA_CP_INVOKEDYNAMIC, tag, sz, "RBinJavaCPTypeMethodHandle"); if (quick_check > 0) { return NULL; } if ((obj = R_NEW0 (RBinJavaCPTypeObj))) { obj->metas = R_NEW0 (RBinJavaMetaInfo); obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag]; obj->name = r_str_dup (NULL, (const char *) R_BIN_JAVA_CP_METAS[tag].name);; obj->tag = tag; obj->info.cp_invoke_dynamic.bootstrap_method_attr_index = R_BIN_JAVA_USHORT (buffer, 1); obj->info.cp_invoke_dynamic.name_and_type_index = R_BIN_JAVA_USHORT (buffer, 3); } return obj; } R_API int r_bin_java_check_reset_cp_obj(RBinJavaCPTypeObj *cp_obj, ut8 tag) { bool res = false; if (tag < R_BIN_JAVA_CP_METAS_SZ) { if (tag != cp_obj->tag) { if (cp_obj->tag == R_BIN_JAVA_CP_UTF8) { R_FREE (cp_obj->info.cp_utf8.bytes); cp_obj->info.cp_utf8.length = 0; R_FREE (cp_obj->name); } cp_obj->tag = tag; cp_obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag]; cp_obj->name = strdup (R_BIN_JAVA_CP_METAS[tag].name); res = true; } else { eprintf ("Invalid tag\n"); } } else { eprintf ("Invalid tag '%d'.\n", tag); } return res; } R_API ut8 *r_bin_java_cp_get_4bytes(ut8 tag, ut32 *out_sz, const ut8 *buf, const ut64 len) { ut8 *buffer = malloc (5); if (!buffer) { return NULL; } ut32 val = 0; if (!buffer || len < 4) { if (out_sz) { *out_sz = 0; } free (buffer); return NULL; } buffer[0] = tag; val = R_BIN_JAVA_UINT (buf, 0); memcpy (buffer + 1, (const char *) &val, 4); *out_sz = 5; return buffer; } R_API ut8 *r_bin_java_cp_get_8bytes(ut8 tag, ut32 *out_sz, const ut8 *buf, const ut64 len) { ut8 *buffer = malloc (10); if (!buffer) { return NULL; } ut64 val = 0; if (len < 8) { *out_sz = 0; free (buffer); return NULL; } buffer[0] = tag; val = r_bin_java_raw_to_long (buf, 0); memcpy (buffer + 1, (const char *) &val, 8); *out_sz = 9; return buffer; } R_API ut8 *r_bin_java_cp_append_classref_and_name(RBinJavaObj *bin, ut32 *out_sz, const char *classname, const ut32 classname_len) { ut16 use_name_idx = bin->cp_idx + 1; ut8 *bytes = NULL, *name_bytes = NULL; name_bytes = r_bin_java_cp_get_utf8 (R_BIN_JAVA_CP_UTF8, out_sz, (const ut8 *) classname, classname_len); if (*out_sz > 0 && name_bytes) { ut8 *idx_addr = (ut8 *) &use_name_idx; bytes = malloc (*out_sz + 3); memcpy (bytes, name_bytes, *out_sz); bytes[*out_sz + 0] = R_BIN_JAVA_CP_CLASS; bytes[*out_sz + 1] = idx_addr[1]; bytes[*out_sz + 2] = idx_addr[0]; *out_sz += 3; } free (name_bytes); return bytes; } R_API ut8 *r_bin_java_cp_get_fref_bytes(RBinJavaObj *bin, ut32 *out_sz, ut8 tag, ut16 cn_idx, ut16 fn_idx, ut16 ft_idx) { ut8 *bytes = NULL, *fnt_bytes = NULL; RBinJavaCPTypeObj *ref_cp_obj = NULL; ut16 fnt_idx = 0, cref_idx = 0; ut32 fnt_len = 0; ut16 ref_cp_obj_idx = r_bin_java_find_cp_class_ref_from_name_idx (bin, cn_idx); if (!ref_cp_obj_idx) { return NULL; } ref_cp_obj = r_bin_java_get_item_from_bin_cp_list (bin, ref_cp_obj_idx); if (ref_cp_obj) { cref_idx = ref_cp_obj->idx; } ref_cp_obj = r_bin_java_find_cp_name_and_type_info (bin, fn_idx, ft_idx); if (ref_cp_obj) { fnt_idx = ref_cp_obj->idx; } else { fnt_bytes = r_bin_java_cp_get_name_type (bin, &fnt_len, fn_idx, ft_idx); fnt_idx = bin->cp_idx + 1; } if (cref_idx && fnt_idx) { bytes = r_bin_java_cp_get_fm_ref (bin, out_sz, tag, cref_idx, fnt_idx); if (fnt_bytes) { ut8 *tbuf = malloc (fnt_len + *out_sz); if (!tbuf) { free (bytes); free (fnt_bytes); return NULL; } // copy the bytes to the new buffer memcpy (tbuf, fnt_bytes, fnt_len); memcpy (tbuf + fnt_len, bytes, *out_sz); // update the values free old buffer *out_sz += fnt_len; free (bytes); bytes = tbuf; } } free (fnt_bytes); return bytes; } R_API ut8 *r_bin_java_cp_get_classref(RBinJavaObj *bin, ut32 *out_sz, const char *classname, const ut32 classname_len, const ut16 name_idx) { ut16 use_name_idx = -1; ut8 *bytes = NULL; if (name_idx == (ut16) - 1 && classname && *classname && classname_len > 0) { // find class_name_idx by class name RList *results = r_bin_java_find_cp_const_by_val_utf8 (bin, (const ut8 *) classname, classname_len); if (r_list_length (results) == 1) { use_name_idx = (ut16) * ((ut32 *) r_list_get_n (results, 0)); } r_list_free (results); } else if (name_idx != (ut16) - 1 && name_idx != 0) { use_name_idx = name_idx; } if (use_name_idx == (ut16) - 1 && classname && *classname && classname_len > 0) { bytes = r_bin_java_cp_append_classref_and_name (bin, out_sz, classname, classname_len); } else if (use_name_idx != (ut16) - 1) { ut8 *idx_addr = (ut8 *) &use_name_idx; bytes = malloc (3); if (!bytes) { return NULL; } bytes[0] = R_BIN_JAVA_CP_CLASS; bytes[1] = idx_addr[1]; bytes[2] = idx_addr[0]; *out_sz += 3; } return bytes; } R_API ut8 *r_bin_java_cp_get_fm_ref(RBinJavaObj *bin, ut32 *out_sz, ut8 tag, ut16 class_idx, ut16 name_and_type_idx) { return r_bin_java_cp_get_2_ut16 (bin, out_sz, tag, class_idx, name_and_type_idx); } R_API ut8 *r_bin_java_cp_get_2_ut16(RBinJavaObj *bin, ut32 *out_sz, ut8 tag, ut16 ut16_one, ut16 ut16_two) { ut8 *bytes = malloc (7); if (!bytes) { return NULL; } ut8 *idx_addr = NULL; bytes[*out_sz] = tag; *out_sz += 1; idx_addr = (ut8 *) &ut16_one; bytes[*out_sz + 1] = idx_addr[1]; bytes[*out_sz + 2] = idx_addr[0]; *out_sz += 3; idx_addr = (ut8 *) &ut16_two; bytes[*out_sz + 1] = idx_addr[1]; bytes[*out_sz + 2] = idx_addr[0]; *out_sz += 3; return bytes; } R_API ut8 *r_bin_java_cp_get_name_type(RBinJavaObj *bin, ut32 *out_sz, ut16 name_idx, ut16 type_idx) { return r_bin_java_cp_get_2_ut16 (bin, out_sz, R_BIN_JAVA_CP_NAMEANDTYPE, name_idx, type_idx); } R_API ut8 *r_bin_java_cp_get_utf8(ut8 tag, ut32 *out_sz, const ut8 *buf, const ut64 len) { ut8 *buffer = NULL; ut16 sz = 0; ut16 t = (ut16) len; if (len > 0 && len > (ut16) - 1) { *out_sz = 0; return NULL; } sz = R_BIN_JAVA_USHORT (((ut8 *) (ut16 *) &t), 0); *out_sz = 3 + t; // tag + sz + bytes buffer = malloc (*out_sz + 3); if (!buffer) { return NULL; } // XXX - excess bytes are created to ensure null for string operations. memset (buffer, 0, *out_sz + 3); buffer[0] = tag; memcpy (buffer + 1, (const char *) &sz, 2); memcpy (buffer + 3, buf, *out_sz - 3); return buffer; } R_API ut64 r_bin_java_invokedynamic_cp_calc_size(RBinJavaCPTypeObj *obj) { ut64 size = 0; size += 1; // obj->info.cp_invoke_dynamic.bootstrap_method_attr_index = R_BIN_JAVA_USHORT (buffer, 1); size += 2; // obj->info.cp_invoke_dynamic.name_and_type_index = R_BIN_JAVA_USHORT (buffer, 3); size += 2; return size; } // End new Constant Pool types // Start free Constant Pool types R_API void r_bin_java_default_free(void /* RBinJavaCPTypeObj*/ *o) { RBinJavaCPTypeObj *obj = o; if (obj) { free (obj->metas); free (obj->name); free (obj->value); free (obj); } } R_API void r_bin_java_utf8_info_free(void /* RBinJavaCPTypeObj*/ *o) { RBinJavaCPTypeObj *obj = o; if (obj) { free (obj->name); free (obj->metas); free (obj->info.cp_utf8.bytes); free (obj); } } // Deallocs for type objects R_API void r_bin_java_obj_free(void /*RBinJavaCPTypeObj*/ *o) { RBinJavaCPTypeObj *obj = o; ((RBinJavaCPTypeMetas *) obj->metas->type_info)->allocs->delete_obj (obj); } R_API void r_bin_java_print_attr_summary(RBinJavaAttrInfo *attr) { if (attr == NULL) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *.\n"); return; } ((RBinJavaAttrMetas *) attr->metas->type_info)->allocs->print_summary (attr); } R_API void r_bin_java_print_source_debug_attr_summary(RBinJavaAttrInfo *attr) { ut32 i = 0; if (attr == NULL) { eprintf ("Attempting to print an invalid RBinJavaSourceDebugExtensionAttr *.\n"); return; } printf ("Source Debug Extension Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Extension Length: %d\n", attr->length); printf (" Source Debug Extension value: \n"); for (i = 0; i < attr->length; i++) { printf ("%c", attr->info.debug_extensions.debug_extension[i]); } printf ("\n Source Debug Extension End\n"); } R_API void r_bin_java_print_unknown_attr_summary(RBinJavaAttrInfo *attr) { if (attr == NULL) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *Unknown.\n"); return; } printf ("Unknown Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d\n", attr->length); } R_API void r_bin_java_print_code_exceptions_attr_summary(RBinJavaExceptionEntry *exc_entry) { if (exc_entry == NULL) { eprintf ("Attempting to print an invalid RBinJavaExceptionEntry *.\n"); return; } printf (" Exception Table Entry Information\n"); printf (" offset: 0x%08"PFMT64x"\n", exc_entry->file_offset); printf (" catch_type: %d\n", exc_entry->catch_type); printf (" start_pc: 0x%04x\n", exc_entry->start_pc); printf (" end_pc: 0x%04x\n", exc_entry->end_pc); printf (" handler_pc: 0x%04x\n", exc_entry->handler_pc); } // End free Constant Pool types R_API void r_bin_java_print_code_attr_summary(RBinJavaAttrInfo *attr) { RListIter *iter = NULL, *iter_tmp = NULL; RBinJavaExceptionEntry *exc_entry = NULL; RBinJavaAttrInfo *_attr = NULL; if (!attr) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *Code.\n"); return; } printf ("Code Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d, Attribute Count: %d\n", attr->length, attr->info.code_attr.attributes_count); printf (" Max Stack: %d\n", attr->info.code_attr.max_stack); printf (" Max Locals: %d\n", attr->info.code_attr.max_locals); printf (" Code Length: %d\n", attr->info.code_attr.code_length); printf (" Code At Offset: 0x%08"PFMT64x "\n", (ut64) attr->info.code_attr.code_offset); printf ("Code Attribute Exception Table Information:\n"); printf (" Exception Table Length: %d\n", attr->info.code_attr.exception_table_length); if (attr->info.code_attr.exception_table) { // Delete the attr entries r_list_foreach_safe (attr->info.code_attr.exception_table, iter, iter_tmp, exc_entry) { r_bin_java_print_code_exceptions_attr_summary (exc_entry); } } printf (" Implicit Method Stack Frame:\n"); r_bin_java_print_stack_map_frame_summary (attr->info.code_attr.implicit_frame); printf ("Code Attribute Attributes Information:\n"); if (attr->info.code_attr.attributes && attr->info.code_attr.attributes_count > 0) { printf (" Code Attribute Attributes Count: %d\n", attr->info.code_attr.attributes_count); r_list_foreach_safe (attr->info.code_attr.attributes, iter, iter_tmp, _attr) { r_bin_java_print_attr_summary (_attr); } } } R_API void r_bin_java_print_constant_value_attr_summary(RBinJavaAttrInfo *attr) { if (!attr) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *ConstantValue.\n"); return; } printf ("Constant Value Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d\n", attr->length); printf (" ConstantValue Index: %d\n", attr->info.constant_value_attr.constantvalue_idx); } R_API void r_bin_java_print_deprecated_attr_summary(RBinJavaAttrInfo *attr) { if (!attr) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *Deperecated.\n"); return; } printf ("Deperecated Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d\n", attr->length); } R_API void r_bin_java_print_enclosing_methods_attr_summary(RBinJavaAttrInfo *attr) { if (!attr) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *Deperecated.\n"); return; } printf ("Enclosing Method Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d\n", attr->length); printf (" Class Info Index : 0x%02x\n", attr->info.enclosing_method_attr.class_idx); printf (" Method Name and Type Index : 0x%02x\n", attr->info.enclosing_method_attr.method_idx); printf (" Class Name : %s\n", attr->info.enclosing_method_attr.class_name); printf (" Method Name and Desc : %s %s\n", attr->info.enclosing_method_attr.method_name, attr->info.enclosing_method_attr.method_descriptor); } R_API void r_bin_java_print_exceptions_attr_summary(RBinJavaAttrInfo *attr) { ut32 i = 0; if (!attr) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *Exceptions.\n"); return; } printf ("Exceptions Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d\n", attr->length); for (i = 0; i < attr->info.exceptions_attr.number_of_exceptions; i++) { printf (" Exceptions Attribute Index[%d]: %d\n", i, attr->info.exceptions_attr.exception_idx_table[i]); } } R_API void r_bin_java_print_classes_attr_summary(RBinJavaClassesAttribute *icattr) { if (!icattr) { eprintf ("Attempting to print an invalid RBinJavaClassesAttribute* (InnerClasses element).\n"); return; } eprintf (" Inner Classes Class Attribute Offset: 0x%08"PFMT64x "\n", icattr->file_offset); eprintf (" Inner Classes Class Attribute Class Name (%d): %s\n", icattr->inner_name_idx, icattr->name); eprintf (" Inner Classes Class Attribute Class inner_class_info_idx: %d\n", icattr->inner_class_info_idx); eprintf (" Inner Classes Class Attribute Class inner_class_access_flags: 0x%02x %s\n", icattr->inner_class_access_flags, icattr->flags_str); eprintf (" Inner Classes Class Attribute Class outer_class_info_idx: %d\n", icattr->outer_class_info_idx); eprintf (" Inner Classes Class Field Information:\n"); r_bin_java_print_field_summary (icattr->clint_field); eprintf (" Inner Classes Class Field Information:\n"); r_bin_java_print_field_summary (icattr->clint_field); eprintf (" Inner Classes Class Attr Info Information:\n"); r_bin_java_print_attr_summary (icattr->clint_attr); } R_API void r_bin_java_print_inner_classes_attr_summary(RBinJavaAttrInfo *attr) { RBinJavaClassesAttribute *icattr; RListIter *iter, *iter_tmp; if (!attr) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *InnerClasses.\n"); return; } printf ("Inner Classes Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d\n", attr->length); r_list_foreach_safe (attr->info.inner_classes_attr.classes, iter, iter_tmp, icattr) { r_bin_java_print_classes_attr_summary (icattr); } } R_API void r_bin_java_print_line_number_attr_summary(RBinJavaLineNumberAttribute *lnattr) { if (!lnattr) { eprintf ("Attempting to print an invalid RBinJavaLineNumberAttribute *.\n"); return; } printf (" Line Number Attribute Offset: 0x%08"PFMT64x "\n", lnattr->file_offset); printf (" Line Number Attribute StartPC: %d\n", lnattr->start_pc); printf (" Line Number Attribute LineNumber: %d\n", lnattr->line_number); } R_API void r_bin_java_print_line_number_table_attr_summary(RBinJavaAttrInfo *attr) { RBinJavaLineNumberAttribute *lnattr; RListIter *iter, *iter_tmp; if (!attr) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *LineNumberTable.\n"); return; } printf ("Line Number Table Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d\n", attr->length); r_list_foreach_safe (attr->info.line_number_table_attr.line_number_table, iter, iter_tmp, lnattr) { r_bin_java_print_line_number_attr_summary (lnattr); } } R_API void r_bin_java_print_local_variable_attr_summary(RBinJavaLocalVariableAttribute *lvattr) { if (!lvattr) { eprintf ("Attempting to print an invalid RBinJavaLocalVariableAttribute *.\n"); return; } printf (" Local Variable Attribute offset: 0x%08"PFMT64x "\n", lvattr->file_offset); printf (" Local Variable Attribute start_pc: %d\n", lvattr->start_pc); printf (" Local Variable Attribute Length: %d\n", lvattr->length); printf (" Local Variable Attribute name_idx: %d\n", lvattr->name_idx); printf (" Local Variable Attribute name: %s\n", lvattr->name); printf (" Local Variable Attribute descriptor_idx: %d\n", lvattr->descriptor_idx); printf (" Local Variable Attribute descriptor: %s\n", lvattr->descriptor); printf (" Local Variable Attribute index: %d\n", lvattr->index); } R_API void r_bin_java_print_local_variable_table_attr_summary(RBinJavaAttrInfo *attr) { RBinJavaLocalVariableAttribute *lvattr; RListIter *iter, *iter_tmp; if (attr == NULL) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *LocalVariableTable.\n"); return; } printf ("Local Variable Table Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d\n", attr->length); r_list_foreach_safe (attr->info.local_variable_table_attr.local_variable_table, iter, iter_tmp, lvattr) { r_bin_java_print_local_variable_attr_summary (lvattr); } } R_API void r_bin_java_print_local_variable_type_attr_summary(RBinJavaLocalVariableTypeAttribute *lvattr) { if (!lvattr) { eprintf ("Attempting to print an invalid RBinJavaLocalVariableTypeAttribute *.\n"); return; } eprintf (" Local Variable Type Attribute offset: 0x%08"PFMT64x "\n", lvattr->file_offset); eprintf (" Local Variable Type Attribute start_pc: %d\n", lvattr->start_pc); eprintf (" Local Variable Type Attribute Length: %d\n", lvattr->length); eprintf (" Local Variable Type Attribute name_idx: %d\n", lvattr->name_idx); eprintf (" Local Variable Type Attribute name: %s\n", lvattr->name); eprintf (" Local Variable Type Attribute signature_idx: %d\n", lvattr->signature_idx); eprintf (" Local Variable Type Attribute signature: %s\n", lvattr->signature); eprintf (" Local Variable Type Attribute index: %d\n", lvattr->index); } R_API void r_bin_java_print_local_variable_type_table_attr_summary(RBinJavaAttrInfo *attr) { RBinJavaLocalVariableTypeAttribute *lvtattr; RListIter *iter, *iter_tmp; if (!attr) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *LocalVariableTable.\n"); return; } eprintf ("Local Variable Type Table Attribute Information:\n"); eprintf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); eprintf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); eprintf (" Attribute Length: %d\n", attr->length); r_list_foreach_safe (attr->info.local_variable_type_table_attr.local_variable_table, iter, iter_tmp, lvtattr) { r_bin_java_print_local_variable_type_attr_summary (lvtattr); } } R_API void r_bin_java_print_signature_attr_summary(RBinJavaAttrInfo *attr) { if (!attr) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *SignatureAttr.\n"); return; } printf ("Signature Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d\n", attr->length); printf (" Signature UTF8 Index: %d\n", attr->info.signature_attr.signature_idx); printf (" Signature string: %s\n", attr->info.signature_attr.signature); } R_API void r_bin_java_print_source_code_file_attr_summary(RBinJavaAttrInfo *attr) { if (!attr) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *SourceFile.\n"); return; } printf ("Source File Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d\n", attr->length); printf (" Source File Index: %d\n", attr->info.source_file_attr.sourcefile_idx); } R_API void r_bin_java_print_synthetic_attr_summary(RBinJavaAttrInfo *attr) { if (attr == NULL) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *Synthetic.\n"); return; } printf ("Synthetic Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d\n", attr->length); printf (" Attribute Index: %d\n", attr->info.source_file_attr.sourcefile_idx); } R_API void r_bin_java_print_stack_map_table_attr_summary(RBinJavaAttrInfo *attr) { RListIter *iter, *iter_tmp; RBinJavaStackMapFrame *frame; if (!attr) { eprintf ("Attempting to print an invalid RBinJavaStackMapTableAttr* .\n"); return; } printf ("StackMapTable Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d\n", attr->length); printf (" StackMapTable Method Code Size: 0x%08x\n", attr->info.stack_map_table_attr.code_size); printf (" StackMapTable Frame Entries: 0x%08x\n", attr->info.stack_map_table_attr.number_of_entries); printf (" StackMapTable Frames:\n"); RList *ptrList = attr->info.stack_map_table_attr.stack_map_frame_entries; if (ptrList) { r_list_foreach_safe (ptrList, iter, iter_tmp, frame) { r_bin_java_print_stack_map_frame_summary (frame); } } } R_API void r_bin_java_print_stack_map_frame_summary(RBinJavaStackMapFrame *obj) { RListIter *iter, *iter_tmp; RBinJavaVerificationObj *ver_obj; if (!obj) { eprintf ("Attempting to print an invalid RBinJavaStackMapFrame* .\n"); return; } printf ("Stack Map Frame Information\n"); printf (" Tag Value = 0x%02x Name: %s\n", obj->tag, ((RBinJavaStackMapFrameMetas *) obj->metas->type_info)->name); printf (" Offset: 0x%08"PFMT64x "\n", obj->file_offset); printf (" Local Variable Count = 0x%04x\n", obj->number_of_locals); printf (" Stack Items Count = 0x%04x\n", obj->number_of_stack_items); printf (" Local Variables:\n"); RList *ptrList = obj->local_items; r_list_foreach_safe (ptrList, iter, iter_tmp, ver_obj) { r_bin_java_print_verification_info_summary (ver_obj); } printf (" Stack Items:\n"); ptrList = obj->stack_items; r_list_foreach_safe (ptrList, iter, iter_tmp, ver_obj) { r_bin_java_print_verification_info_summary (ver_obj); } } R_API void r_bin_java_print_verification_info_summary(RBinJavaVerificationObj *obj) { ut8 tag_value = R_BIN_JAVA_STACKMAP_UNKNOWN; if (!obj) { eprintf ("Attempting to print an invalid RBinJavaVerificationObj* .\n"); return; } if (obj->tag < R_BIN_JAVA_STACKMAP_UNKNOWN) { tag_value = obj->tag; } printf ("Verification Information\n"); printf (" Offset: 0x%08"PFMT64x "", obj->file_offset); printf (" Tag Value = 0x%02x\n", obj->tag); printf (" Name = %s\n", R_BIN_JAVA_VERIFICATION_METAS[tag_value].name); if (obj->tag == R_BIN_JAVA_STACKMAP_OBJECT) { printf (" Object Constant Pool Index = 0x%x\n", obj->info.obj_val_cp_idx); } else if (obj->tag == R_BIN_JAVA_STACKMAP_UNINIT) { printf (" Uninitialized Object offset in code = 0x%x\n", obj->info.uninit_offset); } } R_API void r_bin_java_print_field_summary(RBinJavaField *field) { RBinJavaAttrInfo *attr; RListIter *iter, *iter_tmp; if (field) { if (field->type == R_BIN_JAVA_FIELD_TYPE_METHOD) { r_bin_java_print_method_summary (field); } else { #if 0 r_bin_java_print_interface_summary (field); #else printf ("Field Summary Information:\n"); printf (" File Offset: 0x%08"PFMT64x "\n", field->file_offset); printf (" Name Index: %d (%s)\n", field->name_idx, field->name); printf (" Descriptor Index: %d (%s)\n", field->descriptor_idx, field->descriptor); printf (" Access Flags: 0x%02x (%s)\n", field->flags, field->flags_str); printf (" Field Attributes Count: %d\n", field->attr_count); printf (" Field Attributes:\n"); r_list_foreach_safe (field->attributes, iter, iter_tmp, attr) { r_bin_java_print_attr_summary (attr); } #endif } } else { eprintf ("Attempting to print an invalid RBinJavaField* Field.\n"); } } R_API void r_bin_java_print_method_summary(RBinJavaField *field) { RBinJavaAttrInfo *attr; RListIter *iter, *iter_tmp; if (field == NULL) { eprintf ("Attempting to print an invalid RBinJavaField* Method.\n"); return; } printf ("Method Summary Information:\n"); printf (" File Offset: 0x%08"PFMT64x "\n", field->file_offset); printf (" Name Index: %d (%s)\n", field->name_idx, field->name); printf (" Descriptor Index: %d (%s)\n", field->descriptor_idx, field->descriptor); printf (" Access Flags: 0x%02x (%s)\n", field->flags, field->flags_str); printf (" Method Attributes Count: %d\n", field->attr_count); printf (" Method Attributes:\n"); r_list_foreach_safe (field->attributes, iter, iter_tmp, attr) { r_bin_java_print_attr_summary (attr); } } /* R_API void r_bin_java_print_interface_summary(ut16 idx) {//RBinJavaField *field) { RBinJavaAttrInfo *attr; RBinJavaCPTypeObj *class_info; RListIter *iter, *iter_tmp; if (field == NULL) { eprintf ("Attempting to print an invalid RBinJavaField* Interface.\n"); return; } eprintf ("Interface Summary Information:\n"); eprintf (" File offset: 0x%08"PFMT64x"", field->file_offset); eprintf (" Access Flags: %d\n", field->flags); eprintf (" Name Index: %d (%s)\n", field->name_idx, field->name); eprintf (" Descriptor Index: %d (%s)\n", field->descriptor_idx, field->descriptor); eprintf (" Interface Attributes Count: %d\n", field->attr_count); eprintf (" Interface Attributes:\n"); r_list_foreach_safe (field->attributes, iter, iter_tmp, attr) { r_bin_java_print_attr_summary(attr); } } */ R_API void r_bin_java_print_interfacemethodref_cp_summary(RBinJavaCPTypeObj *obj) { if (!obj) { eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* InterfaceMethodRef.\n"); return; } eprintf ("InterfaceMethodRef ConstantPool Type (%d) ", obj->metas->ord); eprintf (" Offset: 0x%08"PFMT64x"", obj->file_offset); eprintf (" Class Index = %d\n", obj->info.cp_interface.class_idx); eprintf (" Name and type Index = %d\n", obj->info.cp_interface.name_and_type_idx); } R_API char *r_bin_java_print_interfacemethodref_cp_stringify(RBinJavaCPTypeObj *obj) { return r_str_newf ("%d.0x%04"PFMT64x ".%s.%d.%d", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, obj->info.cp_interface.class_idx, obj->info.cp_interface.name_and_type_idx); } R_API void r_bin_java_print_methodhandle_cp_summary(RBinJavaCPTypeObj *obj) { ut8 ref_kind; if (!obj) { eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* RBinJavaCPTypeMethodHandle.\n"); return; } ref_kind = obj->info.cp_method_handle.reference_kind; eprintf ("MethodHandle ConstantPool Type (%d) ", obj->metas->ord); eprintf (" Offset: 0x%08"PFMT64x"", obj->file_offset); eprintf (" Reference Kind = (0x%02x) %s\n", ref_kind, R_BIN_JAVA_REF_METAS[ref_kind].name); eprintf (" Reference Index = %d\n", obj->info.cp_method_handle.reference_index); } R_API char *r_bin_java_print_methodhandle_cp_stringify(RBinJavaCPTypeObj *obj) { ut8 ref_kind = obj->info.cp_method_handle.reference_kind; return r_str_newf ("%d.0x%04"PFMT64x ".%s.%s.%d", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, R_BIN_JAVA_REF_METAS[ref_kind].name, obj->info.cp_method_handle.reference_index); } R_API void r_bin_java_print_methodtype_cp_summary(RBinJavaCPTypeObj *obj) { if (!obj) { eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* RBinJavaCPTypeMethodType.\n"); return; } printf ("MethodType ConstantPool Type (%d) ", obj->metas->ord); printf (" Offset: 0x%08"PFMT64x "", obj->file_offset); printf (" Descriptor Index = 0x%02x\n", obj->info.cp_method_type.descriptor_index); } R_API char *r_bin_java_print_methodtype_cp_stringify(RBinJavaCPTypeObj *obj) { return r_str_newf ("%d.0x%04"PFMT64x ".%s.%d", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, obj->info.cp_method_type.descriptor_index); } R_API void r_bin_java_print_invokedynamic_cp_summary(RBinJavaCPTypeObj *obj) { if (!obj) { eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* RBinJavaCPTypeInvokeDynamic.\n"); return; } eprintf ("InvokeDynamic ConstantPool Type (%d) ", obj->metas->ord); eprintf (" Offset: 0x%08"PFMT64x"", obj->file_offset); eprintf (" Bootstrap Method Attr Index = (0x%02x)\n", obj->info.cp_invoke_dynamic.bootstrap_method_attr_index); eprintf (" Bootstrap Name and Type Index = (0x%02x)\n", obj->info.cp_invoke_dynamic.name_and_type_index); } R_API char *r_bin_java_print_invokedynamic_cp_stringify(RBinJavaCPTypeObj *obj) { return r_str_newf ("%d.0x%04"PFMT64x ".%s.%d.%d", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, obj->info.cp_invoke_dynamic.bootstrap_method_attr_index, obj->info.cp_invoke_dynamic.name_and_type_index); } R_API void r_bin_java_print_methodref_cp_summary(RBinJavaCPTypeObj *obj) { if (!obj) { eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* MethodRef.\n"); return; } eprintf ("MethodRef ConstantPool Type (%d) ", obj->metas->ord); eprintf (" Offset: 0x%08"PFMT64x"", obj->file_offset); eprintf (" Class Index = %d\n", obj->info.cp_method.class_idx); eprintf (" Name and type Index = %d\n", obj->info.cp_method.name_and_type_idx); } R_API char *r_bin_java_print_methodref_cp_stringify(RBinJavaCPTypeObj *obj) { return r_str_newf ("%d.0x%04"PFMT64x ".%s.%d.%d", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, obj->info.cp_method.class_idx, obj->info.cp_method.name_and_type_idx); } R_API void r_bin_java_print_fieldref_cp_summary(RBinJavaCPTypeObj *obj) { if (!obj) { eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* FieldRef.\n"); return; } eprintf ("FieldRef ConstantPool Type (%d) ", obj->metas->ord); eprintf (" Offset: 0x%08"PFMT64x"", obj->file_offset); eprintf (" Class Index = %d\n", obj->info.cp_field.class_idx); eprintf (" Name and type Index = %d\n", obj->info.cp_field.name_and_type_idx); } R_API char *r_bin_java_print_fieldref_cp_stringify(RBinJavaCPTypeObj *obj) { ut32 size = 255, consumed = 0; char *value = malloc (size); if (value) { memset (value, 0, size); consumed = snprintf (value, size, "%d.0x%04"PFMT64x ".%s.%d.%d", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, obj->info.cp_field.class_idx, obj->info.cp_field.name_and_type_idx); if (consumed >= size - 1) { free (value); size += size >> 1; value = malloc (size); if (value) { memset (value, 0, size); (void)snprintf (value, size, "%d.0x%04"PFMT64x ".%s.%d.%d", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, obj->info.cp_field.class_idx, obj->info.cp_field.name_and_type_idx); } } } return value; } R_API void r_bin_java_print_classref_cp_summary(RBinJavaCPTypeObj *obj) { if (!obj) { eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* ClassRef.\n"); return; } eprintf ("ClassRef ConstantPool Type (%d) ", obj->metas->ord); eprintf (" Offset: 0x%08"PFMT64x"", obj->file_offset); eprintf (" Name Index = %d\n", obj->info.cp_class.name_idx); } R_API char *r_bin_java_print_classref_cp_stringify(RBinJavaCPTypeObj *obj) { ut32 size = 255, consumed = 0; char *value = malloc (size); if (value) { memset (value, 0, size); consumed = snprintf (value, size, "%d.0x%04"PFMT64x ".%s.%d", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, obj->info.cp_class.name_idx); if (consumed >= size - 1) { free (value); size += size >> 1; value = malloc (size); if (value) { memset (value, 0, size); (void)snprintf (value, size, "%d.0x%04"PFMT64x ".%s.%d", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, obj->info.cp_class.name_idx); } } } return value; } R_API void r_bin_java_print_string_cp_summary(RBinJavaCPTypeObj *obj) { if (!obj) { eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* String.\n"); return; } printf ("String ConstantPool Type (%d) ", obj->metas->ord); printf (" Offset: 0x%08"PFMT64x "", obj->file_offset); printf (" String Index = %d\n", obj->info.cp_string.string_idx); } R_API char *r_bin_java_print_string_cp_stringify(RBinJavaCPTypeObj *obj) { ut32 size = 255, consumed = 0; char *value = malloc (size); if (value) { memset (value, 0, size); consumed = snprintf (value, size, "%d.0x%04"PFMT64x ".%s.%d", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, obj->info.cp_string.string_idx); if (consumed >= size - 1) { free (value); size += size >> 1; value = malloc (size); if (value) { memset (value, 0, size); (void)snprintf (value, size, "%d.0x%04"PFMT64x ".%s.%d", obj->metas->ord, obj->file_offset, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, obj->info.cp_string.string_idx); } } } return value; } R_API void r_bin_java_print_integer_cp_summary(RBinJavaCPTypeObj *obj) { ut8 *b = NULL; if (!obj) { eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* Integer.\n"); return; } b = obj->info.cp_integer.bytes.raw; eprintf ("Integer ConstantPool Type (%d) ", obj->metas->ord); eprintf (" Offset: 0x%08"PFMT64x"", obj->file_offset); eprintf (" bytes = %02x %02x %02x %02x\n", b[0], b[1], b[2], b[3]); eprintf (" integer = %d\n", R_BIN_JAVA_UINT (obj->info.cp_integer.bytes.raw, 0)); } R_API char *r_bin_java_print_integer_cp_stringify(RBinJavaCPTypeObj *obj) { ut32 size = 255, consumed = 0; char *value = malloc (size); if (value) { memset (value, 0, size); consumed = snprintf (value, size, "%d.0x%04"PFMT64x ".%s.0x%08x", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, R_BIN_JAVA_UINT (obj->info.cp_integer.bytes.raw, 0)); if (consumed >= size - 1) { free (value); size += size >> 1; value = malloc (size); if (value) { memset (value, 0, size); (void)snprintf (value, size, "%d.0x%04"PFMT64x ".%s.0x%08x", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, R_BIN_JAVA_UINT (obj->info.cp_integer.bytes.raw, 0)); } } } return value; } R_API void r_bin_java_print_float_cp_summary(RBinJavaCPTypeObj *obj) { ut8 *b = NULL; if (!obj) { eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* Double.\n"); return; } b = obj->info.cp_float.bytes.raw; printf ("Float ConstantPool Type (%d) ", obj->metas->ord); printf (" Offset: 0x%08"PFMT64x "", obj->file_offset); printf (" Bytes = %02x %02x %02x %02x\n", b[0], b[1], b[2], b[3]); printf (" Float = %f\n", R_BIN_JAVA_FLOAT (obj->info.cp_float.bytes.raw, 0)); } R_API char *r_bin_java_print_float_cp_stringify(RBinJavaCPTypeObj *obj) { ut32 size = 255, consumed = 0; char *value = malloc (size); if (value) { memset (value, 0, size); consumed = snprintf (value, size, "%d.0x%04"PFMT64x ".%s.%f", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, R_BIN_JAVA_FLOAT (obj->info.cp_float.bytes.raw, 0)); if (consumed >= size - 1) { free (value); size += size >> 1; value = malloc (size); if (value) { memset (value, 0, size); (void)snprintf (value, size, "%d.0x%04"PFMT64x ".%s.%f", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, R_BIN_JAVA_FLOAT (obj->info.cp_float.bytes.raw, 0)); } } } return value; } R_API void r_bin_java_print_long_cp_summary(RBinJavaCPTypeObj *obj) { ut8 *b = NULL; if (!obj) { eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* Long.\n"); return; } b = obj->info.cp_long.bytes.raw; printf ("Long ConstantPool Type (%d) ", obj->metas->ord); printf (" Offset: 0x%08"PFMT64x "", obj->file_offset); printf (" High-Bytes = %02x %02x %02x %02x\n", b[0], b[1], b[2], b[3]); printf (" Low-Bytes = %02x %02x %02x %02x\n", b[4], b[5], b[6], b[7]); printf (" Long = %08"PFMT64x "\n", r_bin_java_raw_to_long (obj->info.cp_long.bytes.raw, 0)); } R_API char *r_bin_java_print_long_cp_stringify(RBinJavaCPTypeObj *obj) { ut32 size = 255, consumed = 0; char *value = malloc (size); if (value) { memset (value, 0, size); consumed = snprintf (value, size, "%d.0x%04"PFMT64x ".%s.0x%08"PFMT64x "", obj->metas->ord, obj->file_offset, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, r_bin_java_raw_to_long (obj->info.cp_long.bytes.raw, 0)); if (consumed >= size - 1) { free (value); size += size >> 1; value = malloc (size); if (value) { memset (value, 0, size); (void)snprintf (value, size, "%d.0x%04"PFMT64x ".%s.0x%08"PFMT64x "", obj->metas->ord, obj->file_offset, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, r_bin_java_raw_to_long (obj->info.cp_long.bytes.raw, 0)); } } } return value; } R_API void r_bin_java_print_double_cp_summary(RBinJavaCPTypeObj *obj) { ut8 *b = NULL; if (!obj) { eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* Double.\n"); return; } b = obj->info.cp_double.bytes.raw; printf ("Double ConstantPool Type (%d) ", obj->metas->ord); printf (" Offset: 0x%08"PFMT64x "", obj->file_offset); printf (" High-Bytes = %02x %02x %02x %02x\n", b[0], b[1], b[2], b[3]); printf (" Low-Bytes = %02x %02x %02x %02x\n", b[4], b[5], b[6], b[7]); printf (" Double = %f\n", r_bin_java_raw_to_double (obj->info.cp_double.bytes.raw, 0)); } R_API char *r_bin_java_print_double_cp_stringify(RBinJavaCPTypeObj *obj) { ut32 size = 255, consumed = 0; char *value = malloc (size); if (value) { memset (value, 0, size); consumed = snprintf (value, size, "%d.0x%04"PFMT64x ".%s.%f", obj->metas->ord, obj->file_offset, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, r_bin_java_raw_to_double (obj->info.cp_double.bytes.raw, 0)); if (consumed >= size - 1) { free (value); size += size >> 1; value = malloc (size); if (value) { memset (value, 0, size); (void)snprintf (value, size, "%d.0x%04"PFMT64x ".%s.%f", obj->metas->ord, obj->file_offset, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, r_bin_java_raw_to_double (obj->info.cp_double.bytes.raw, 0)); } } } return value; } R_API void r_bin_java_print_name_and_type_cp_summary(RBinJavaCPTypeObj *obj) { if (!obj) { eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* Name_And_Type.\n"); return; } printf ("Name_And_Type ConstantPool Type (%d) ", obj->metas->ord); printf (" Offset: 0x%08"PFMT64x "", obj->file_offset); printf (" name_idx = (%d)\n", obj->info.cp_name_and_type.name_idx); printf (" descriptor_idx = (%d)\n", obj->info.cp_name_and_type.descriptor_idx); } R_API char *r_bin_java_print_name_and_type_cp_stringify(RBinJavaCPTypeObj *obj) { ut32 size = 255, consumed = 0; char *value = malloc (size); if (value) { memset (value, 0, size); consumed = snprintf (value, size, "%d.0x%04"PFMT64x ".%s.%d.%d", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, obj->info.cp_name_and_type.name_idx, obj->info.cp_name_and_type.descriptor_idx); if (consumed >= size - 1) { free (value); size += size >> 1; value = malloc (size); if (value) { memset (value, 0, size); (void)snprintf (value, size, "%d.0x%04"PFMT64x ".%s.%d.%d", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, obj->info.cp_name_and_type.name_idx, obj->info.cp_name_and_type.descriptor_idx); } } } return value; } R_API void r_bin_java_print_utf8_cp_summary(RBinJavaCPTypeObj *obj) { if (!obj) { eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* Utf8.\n"); return; } char *str = convert_string ((const char *) obj->info.cp_utf8.bytes, obj->info.cp_utf8.length); eprintf ("UTF8 ConstantPool Type (%d) ", obj->metas->ord); eprintf (" Offset: 0x%08"PFMT64x"", obj->file_offset); eprintf (" length = %d\n", obj->info.cp_utf8.length); eprintf (" utf8 = %s\n", str); free (str); } R_API char *r_bin_java_print_utf8_cp_stringify(RBinJavaCPTypeObj *obj) { char *utf8_str = r_hex_bin2strdup (obj->info.cp_utf8.bytes, obj->info.cp_utf8.length); char *res = r_str_newf ("%d.0x%04"PFMT64x ".%s.%d.%s", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, obj->info.cp_utf8.length, utf8_str); free (utf8_str); return res; } R_API void r_bin_java_print_null_cp_summary(RBinJavaCPTypeObj *obj) { eprintf ("Unknown ConstantPool Type Tag: 0x%04x .\n", obj->tag); } R_API char *r_bin_java_print_null_cp_stringify(RBinJavaCPTypeObj *obj) { return r_str_newf ("%d.0x%04"PFMT64x ".%s", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name); } R_API void r_bin_java_print_unknown_cp_summary(RBinJavaCPTypeObj *obj) { eprintf ("NULL ConstantPool Type.\n"); } R_API char *r_bin_java_print_unknown_cp_stringify(RBinJavaCPTypeObj *obj) { return r_str_newf ("%d.0x%04"PFMT64x ".%s", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name); } R_API RBinJavaElementValuePair *r_bin_java_element_pair_new(ut8 *buffer, ut64 sz, ut64 buf_offset) { if (!buffer || sz < 8) { return NULL; } RBinJavaElementValuePair *evp = R_NEW0 (RBinJavaElementValuePair); if (!evp) { return NULL; } // TODO: What is the signifigance of evp element evp->element_name_idx = R_BIN_JAVA_USHORT (buffer, 0); ut64 offset = 2; evp->file_offset = buf_offset; evp->name = r_bin_java_get_utf8_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, evp->element_name_idx); if (!evp->name) { // TODO: eprintf unable to find the name for the given index eprintf ("ElementValue Name is invalid.\n"); evp->name = strdup ("UNKNOWN"); } if (offset >= sz) { free (evp); return NULL; } evp->value = r_bin_java_element_value_new (buffer + offset, sz - offset, buf_offset + offset); if (evp->value) { offset += evp->value->size; if (offset >= sz) { free (evp->value); free (evp); return NULL; } } evp->size = offset; return evp; } R_API void r_bin_java_print_element_pair_summary(RBinJavaElementValuePair *evp) { if (!evp) { eprintf ("Attempting to print an invalid RBinJavaElementValuePair *pair.\n"); return; } printf ("Element Value Pair information:\n"); printf (" EV Pair File Offset: 0x%08"PFMT64x "\n", evp->file_offset); printf (" EV Pair Element Name index: 0x%02x\n", evp->element_name_idx); printf (" EV Pair Element Name: %s\n", evp->name); printf (" EV Pair Element Value:\n"); r_bin_java_print_element_value_summary (evp->value); } R_API void r_bin_java_print_element_value_summary(RBinJavaElementValue *element_value) { RBinJavaCPTypeObj *obj; RBinJavaElementValue *ev_element = NULL; RListIter *iter = NULL, *iter_tmp = NULL; char *name; if (!element_value) { eprintf ("Attempting to print an invalid RBinJavaElementValuePair *pair.\n"); return; } name = ((RBinJavaElementValueMetas *) element_value->metas->type_info)->name; eprintf ("Element Value information:\n"); eprintf (" EV Pair File Offset: 0x%08"PFMT64x "\n", element_value->file_offset); eprintf (" EV Value Type (%d): %s\n", element_value->tag, name); switch (element_value->tag) { case R_BIN_JAVA_EV_TAG_BYTE: case R_BIN_JAVA_EV_TAG_CHAR: case R_BIN_JAVA_EV_TAG_DOUBLE: case R_BIN_JAVA_EV_TAG_FLOAT: case R_BIN_JAVA_EV_TAG_INT: case R_BIN_JAVA_EV_TAG_LONG: case R_BIN_JAVA_EV_TAG_SHORT: case R_BIN_JAVA_EV_TAG_BOOLEAN: case R_BIN_JAVA_EV_TAG_STRING: eprintf (" EV Value Constant Value index: 0x%02x\n", element_value->value.const_value.const_value_idx); eprintf (" EV Value Constant Value Information:\n"); obj = element_value->value.const_value.const_value_cp_obj; if (obj && obj->metas && obj->metas->type_info) { ((RBinJavaCPTypeMetas *) obj->metas->type_info)->allocs->print_summary (obj); } break; case R_BIN_JAVA_EV_TAG_ENUM: eprintf (" EV Value Enum Constant Value Const Name Index: 0x%02x\n", element_value->value.enum_const_value.const_name_idx); eprintf (" EV Value Enum Constant Value Type Name Index: 0x%02x\n", element_value->value.enum_const_value.type_name_idx); eprintf (" EV Value Enum Constant Value Const CP Information:\n"); obj = element_value->value.enum_const_value.const_name_cp_obj; if (obj && obj->metas && obj->metas->type_info) { ((RBinJavaCPTypeMetas *) obj->metas->type_info)->allocs->print_summary (obj); } eprintf (" EV Value Enum Constant Value Type CP Information:\n"); obj = element_value->value.enum_const_value.type_name_cp_obj; if (obj && obj->metas && obj->metas->type_info) { ((RBinJavaCPTypeMetas *) obj->metas->type_info)->allocs->print_summary (obj); } break; case R_BIN_JAVA_EV_TAG_CLASS: eprintf (" EV Value Class Info Index: 0x%02x\n", element_value->value.class_value.class_info_idx); eprintf (" EV Value Class Info CP Information:\n"); obj = element_value->value.class_value.class_info_cp_obj; if (obj && obj->metas && obj->metas->type_info) { ((RBinJavaCPTypeMetas *) obj->metas->type_info)->allocs->print_summary (obj); } break; case R_BIN_JAVA_EV_TAG_ARRAY: eprintf (" EV Value Array Value Number of Values: 0x%04x\n", element_value->value.array_value.num_values); eprintf (" EV Value Array Values\n"); r_list_foreach_safe (element_value->value.array_value.values, iter, iter_tmp, ev_element) { r_bin_java_print_element_value_summary (ev_element); } break; case R_BIN_JAVA_EV_TAG_ANNOTATION: eprintf (" EV Annotation Information:\n"); r_bin_java_print_annotation_summary (&element_value->value.annotation_value); break; default: // eprintf unable to handle tag break; } } R_API void r_bin_java_element_pair_free(void /*RBinJavaElementValuePair*/ *e) { RBinJavaElementValuePair *evp = e; if (evp) { free (evp->name); r_bin_java_element_value_free (evp->value); free (evp); } evp = NULL; } R_API void r_bin_java_element_value_free(void /*RBinJavaElementValue*/ *e) { RBinJavaElementValue *element_value = e; RListIter *iter = NULL, *iter_tmp = NULL; RBinJavaCPTypeObj *obj = NULL; RBinJavaElementValue *ev_element = NULL; if (element_value) { R_FREE (element_value->metas); switch (element_value->tag) { case R_BIN_JAVA_EV_TAG_BYTE: case R_BIN_JAVA_EV_TAG_CHAR: case R_BIN_JAVA_EV_TAG_DOUBLE: case R_BIN_JAVA_EV_TAG_FLOAT: case R_BIN_JAVA_EV_TAG_INT: case R_BIN_JAVA_EV_TAG_LONG: case R_BIN_JAVA_EV_TAG_SHORT: case R_BIN_JAVA_EV_TAG_BOOLEAN: case R_BIN_JAVA_EV_TAG_STRING: // Delete the CP Type Object obj = element_value->value.const_value.const_value_cp_obj; if (obj && obj->metas) { ((RBinJavaCPTypeMetas *) obj->metas->type_info)->allocs->delete_obj (obj); } break; case R_BIN_JAVA_EV_TAG_ENUM: // Delete the CP Type Objects obj = element_value->value.enum_const_value.const_name_cp_obj; if (obj && obj->metas) { RBinJavaCPTypeMetas *ti = obj->metas->type_info; if (ti && ti->allocs && ti->allocs->delete_obj) { ti->allocs->delete_obj (obj); } } obj = element_value->value.enum_const_value.type_name_cp_obj; if (obj && obj->metas) { RBinJavaCPTypeMetas *tm = obj->metas->type_info; if (tm && tm->allocs && tm->allocs->delete_obj) { tm->allocs->delete_obj (obj); } } break; case R_BIN_JAVA_EV_TAG_CLASS: // Delete the CP Type Object obj = element_value->value.class_value.class_info_cp_obj; if (obj && obj->metas) { ((RBinJavaCPTypeMetas *) obj->metas->type_info)->allocs->delete_obj (obj); } break; case R_BIN_JAVA_EV_TAG_ARRAY: // Delete the Element Value array List r_list_foreach_safe (element_value->value.array_value.values, iter, iter_tmp, ev_element) { if (ev_element) { r_bin_java_element_value_free (ev_element); } else { // TODO eprintf evps value was NULL } // r_list_delete (element_value->value.array_value.values, iter); ev_element = NULL; } r_list_free (element_value->value.array_value.values); break; case R_BIN_JAVA_EV_TAG_ANNOTATION: // Delete the Annotations List r_list_free (element_value->value.annotation_value.element_value_pairs); break; default: // eprintf unable to free the tag break; } free (element_value); } } R_API ut64 r_bin_java_annotation_default_attr_calc_size(RBinJavaAttrInfo *attr) { ut64 size = 0; if (attr) { // attr = r_bin_java_default_attr_new (buffer, sz, buf_offset); size += 6; // attr->info.annotation_default_attr.default_value = r_bin_java_element_value_new (buffer+offset, sz-offset, buf_offset+offset); size += r_bin_java_element_value_calc_size (attr->info.annotation_default_attr.default_value); } return size; } R_API RBinJavaAttrInfo *r_bin_java_annotation_default_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { ut64 offset = 0; RBinJavaAttrInfo *attr = NULL; attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); offset += 6; if (attr && sz >= offset) { attr->type = R_BIN_JAVA_ATTR_TYPE_ANNOTATION_DEFAULT_ATTR; attr->info.annotation_default_attr.default_value = r_bin_java_element_value_new (buffer + offset, sz - offset, buf_offset + offset); if (attr->info.annotation_default_attr.default_value) { offset += attr->info.annotation_default_attr.default_value->size; } } r_bin_java_print_annotation_default_attr_summary (attr); return attr; } static void delete_obj(RBinJavaCPTypeObj *obj) { if (obj && obj->metas && obj->metas->type_info) { RBinJavaCPTypeMetas *ti = obj->metas->type_info; if (ti && ti->allocs && ti->allocs->delete_obj) { ti->allocs->delete_obj (obj); } } } R_API void r_bin_java_annotation_default_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; RBinJavaElementValue *ev_element = NULL; RListIter *iter = NULL, *iter_tmp = NULL; if (!attr || attr->type != R_BIN_JAVA_ATTR_TYPE_ANNOTATION_DEFAULT_ATTR) { return; } RBinJavaElementValue *element_value = attr->info.annotation_default_attr.default_value; if (!element_value) { return; } switch (element_value->tag) { case R_BIN_JAVA_EV_TAG_BYTE: case R_BIN_JAVA_EV_TAG_CHAR: case R_BIN_JAVA_EV_TAG_DOUBLE: case R_BIN_JAVA_EV_TAG_FLOAT: case R_BIN_JAVA_EV_TAG_INT: case R_BIN_JAVA_EV_TAG_LONG: case R_BIN_JAVA_EV_TAG_SHORT: case R_BIN_JAVA_EV_TAG_BOOLEAN: case R_BIN_JAVA_EV_TAG_STRING: // Delete the CP Type Object delete_obj (element_value->value.const_value.const_value_cp_obj); break; case R_BIN_JAVA_EV_TAG_ENUM: // Delete the CP Type Objects delete_obj (element_value->value.enum_const_value.const_name_cp_obj); break; case R_BIN_JAVA_EV_TAG_CLASS: // Delete the CP Type Object delete_obj (element_value->value.class_value.class_info_cp_obj); break; case R_BIN_JAVA_EV_TAG_ARRAY: // Delete the Element Value array List r_list_foreach_safe (element_value->value.array_value.values, iter, iter_tmp, ev_element) { r_bin_java_element_value_free (ev_element); // r_list_delete (element_value->value.array_value.values, iter); ev_element = NULL; } r_list_free (element_value->value.array_value.values); break; case R_BIN_JAVA_EV_TAG_ANNOTATION: // Delete the Annotations List r_list_free (element_value->value.annotation_value.element_value_pairs); break; default: // eprintf unable to free the tag break; } if (attr) { free (attr->name); free (attr->metas); free (attr); } } R_API RBinJavaAnnotation *r_bin_java_annotation_new(ut8 *buffer, ut64 sz, ut64 buf_offset) { ut32 i = 0; RBinJavaAnnotation *annotation = NULL; RBinJavaElementValuePair *evps = NULL; ut64 offset = 0; annotation = R_NEW0 (RBinJavaAnnotation); if (!annotation) { return NULL; } // (ut16) read and set annotation_value.type_idx; annotation->type_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; // (ut16) read and set annotation_value.num_element_value_pairs; annotation->num_element_value_pairs = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; annotation->element_value_pairs = r_list_newf (r_bin_java_element_pair_free); // read annotation_value.num_element_value_pairs, and append to annotation_value.element_value_pairs for (i = 0; i < annotation->num_element_value_pairs; i++) { if (offset > sz) { break; } evps = r_bin_java_element_pair_new (buffer + offset, sz - offset, buf_offset + offset); if (evps) { offset += evps->size; r_list_append (annotation->element_value_pairs, (void *) evps); } } annotation->size = offset; return annotation; } R_API ut64 r_bin_java_annotation_calc_size(RBinJavaAnnotation *annotation) { ut64 sz = 0; RListIter *iter, *iter_tmp; RBinJavaElementValuePair *evps = NULL; if (!annotation) { // TODO eprintf allocation fail return sz; } // annotation->type_idx = R_BIN_JAVA_USHORT (buffer, offset); sz += 2; // annotation->num_element_value_pairs = R_BIN_JAVA_USHORT (buffer, offset); sz += 2; r_list_foreach_safe (annotation->element_value_pairs, iter, iter_tmp, evps) { if (evps) { sz += r_bin_java_element_pair_calc_size (evps); } } return sz; } R_API void r_bin_java_annotation_free(void /*RBinJavaAnnotation*/ *a) { RBinJavaAnnotation *annotation = a; if (annotation) { r_list_free (annotation->element_value_pairs); free (annotation); } } R_API void r_bin_java_print_annotation_summary(RBinJavaAnnotation *annotation) { RListIter *iter = NULL, *iter_tmp = NULL; RBinJavaElementValuePair *evp = NULL; if (!annotation) { // TODO eprintf invalid annotation return; } printf (" Annotation Type Index: 0x%02x\n", annotation->type_idx); printf (" Annotation Number of EV Pairs: 0x%04x\n", annotation->num_element_value_pairs); printf (" Annotation EV Pair Values:\n"); if (annotation->element_value_pairs) { r_list_foreach_safe (annotation->element_value_pairs, iter, iter_tmp, evp) { r_bin_java_print_element_pair_summary (evp); } } } R_API ut64 r_bin_java_element_pair_calc_size(RBinJavaElementValuePair *evp) { ut64 sz = 0; if (evp == NULL) { return sz; } // evp->element_name_idx = r_bin_java_read_short(bin, bin->b->cur); sz += 2; // evp->value = r_bin_java_element_value_new (bin, offset+2); if (evp->value) { sz += r_bin_java_element_value_calc_size (evp->value); } return sz; } R_API ut64 r_bin_java_element_value_calc_size(RBinJavaElementValue *element_value) { RListIter *iter, *iter_tmp; RBinJavaElementValue *ev_element; RBinJavaElementValuePair *evps; ut64 sz = 0; if (!element_value) { return sz; } // tag sz += 1; switch (element_value->tag) { case R_BIN_JAVA_EV_TAG_BYTE: case R_BIN_JAVA_EV_TAG_CHAR: case R_BIN_JAVA_EV_TAG_DOUBLE: case R_BIN_JAVA_EV_TAG_FLOAT: case R_BIN_JAVA_EV_TAG_INT: case R_BIN_JAVA_EV_TAG_LONG: case R_BIN_JAVA_EV_TAG_SHORT: case R_BIN_JAVA_EV_TAG_BOOLEAN: case R_BIN_JAVA_EV_TAG_STRING: // look up value in bin->cp_list // (ut16) read and set const_value.const_value_idx // element_value->value.const_value.const_value_idx = r_bin_java_read_short(bin, bin->b->cur); sz += 2; break; case R_BIN_JAVA_EV_TAG_ENUM: // (ut16) read and set enum_const_value.type_name_idx // element_value->value.enum_const_value.type_name_idx = r_bin_java_read_short(bin, bin->b->cur); sz += 2; // (ut16) read and set enum_const_value.const_name_idx // element_value->value.enum_const_value.const_name_idx = r_bin_java_read_short(bin, bin->b->cur); sz += 2; break; case R_BIN_JAVA_EV_TAG_CLASS: // (ut16) read and set class_value.class_info_idx // element_value->value.class_value.class_info_idx = r_bin_java_read_short(bin, bin->b->cur); sz += 2; break; case R_BIN_JAVA_EV_TAG_ARRAY: // (ut16) read and set array_value.num_values // element_value->value.array_value.num_values = r_bin_java_read_short(bin, bin->b->cur); sz += 2; r_list_foreach_safe (element_value->value.array_value.values, iter, iter_tmp, ev_element) { if (ev_element) { sz += r_bin_java_element_value_calc_size (ev_element); } } break; case R_BIN_JAVA_EV_TAG_ANNOTATION: // annotation new is not used here. // (ut16) read and set annotation_value.type_idx; // element_value->value.annotation_value.type_idx = r_bin_java_read_short(bin, bin->b->cur); sz += 2; // (ut16) read and set annotation_value.num_element_value_pairs; // element_value->value.annotation_value.num_element_value_pairs = r_bin_java_read_short(bin, bin->b->cur); sz += 2; element_value->value.annotation_value.element_value_pairs = r_list_newf (r_bin_java_element_pair_free); r_list_foreach_safe (element_value->value.annotation_value.element_value_pairs, iter, iter_tmp, evps) { if (evps) { sz += r_bin_java_element_pair_calc_size (evps); } } break; default: // eprintf unable to handle tag break; } return sz; } R_API RBinJavaElementValue *r_bin_java_element_value_new(ut8 *buffer, ut64 sz, ut64 buf_offset) { ut32 i = 0; ut64 offset = 0; RBinJavaElementValue *element_value = R_NEW0 (RBinJavaElementValue); if (!element_value) { return NULL; } RBinJavaElementValuePair *evps = NULL; element_value->metas = R_NEW0 (RBinJavaMetaInfo); if (!element_value->metas) { R_FREE (element_value); return NULL; } element_value->file_offset = buf_offset; element_value->tag = buffer[offset]; element_value->size += 1; offset += 1; element_value->metas->type_info = (void *) r_bin_java_get_ev_meta_from_tag (element_value->tag); switch (element_value->tag) { case R_BIN_JAVA_EV_TAG_BYTE: case R_BIN_JAVA_EV_TAG_CHAR: case R_BIN_JAVA_EV_TAG_DOUBLE: case R_BIN_JAVA_EV_TAG_FLOAT: case R_BIN_JAVA_EV_TAG_INT: case R_BIN_JAVA_EV_TAG_LONG: case R_BIN_JAVA_EV_TAG_SHORT: case R_BIN_JAVA_EV_TAG_BOOLEAN: case R_BIN_JAVA_EV_TAG_STRING: // look up value in bin->cp_list // (ut16) read and set const_value.const_value_idx element_value->value.const_value.const_value_idx = R_BIN_JAVA_USHORT (buffer, offset); element_value->size += 2; // look-up, deep copy, and set const_value.const_value_cp_obj element_value->value.const_value.const_value_cp_obj = r_bin_java_clone_cp_idx (R_BIN_JAVA_GLOBAL_BIN, element_value->value.const_value.const_value_idx); break; case R_BIN_JAVA_EV_TAG_ENUM: // (ut16) read and set enum_const_value.type_name_idx element_value->value.enum_const_value.type_name_idx = R_BIN_JAVA_USHORT (buffer, offset); element_value->size += 2; offset += 2; // (ut16) read and set enum_const_value.const_name_idx element_value->value.enum_const_value.const_name_idx = R_BIN_JAVA_USHORT (buffer, offset); element_value->size += 2; offset += 2; // look up type_name_index in bin->cp_list // look-up, deep copy, and set enum_const_value.const_name_cp_obj element_value->value.enum_const_value.const_name_cp_obj = r_bin_java_clone_cp_idx (R_BIN_JAVA_GLOBAL_BIN, element_value->value.enum_const_value.const_name_idx); // look-up, deep copy, and set enum_const_value.type_name_cp_obj element_value->value.enum_const_value.type_name_cp_obj = r_bin_java_clone_cp_idx (R_BIN_JAVA_GLOBAL_BIN, element_value->value.enum_const_value.type_name_idx); break; case R_BIN_JAVA_EV_TAG_CLASS: // (ut16) read and set class_value.class_info_idx element_value->value.class_value.class_info_idx = R_BIN_JAVA_USHORT (buffer, offset); element_value->size += 2; offset += 2; // look up type_name_index in bin->cp_list // look-up, deep copy, and set class_value.class_info_cp_obj element_value->value.class_value.class_info_cp_obj = r_bin_java_clone_cp_idx (R_BIN_JAVA_GLOBAL_BIN, element_value->value.class_value.class_info_idx); break; case R_BIN_JAVA_EV_TAG_ARRAY: // (ut16) read and set array_value.num_values element_value->value.array_value.num_values = R_BIN_JAVA_USHORT (buffer, offset); element_value->size += 2; offset += 2; element_value->value.array_value.values = r_list_new (); for (i = 0; i < element_value->value.array_value.num_values; i++) { if (offset >= sz) { break; } RBinJavaElementValue *ev_element = r_bin_java_element_value_new (buffer + offset, sz - offset, buf_offset + offset); if (ev_element) { element_value->size += ev_element->size; offset += ev_element->size; // read array_value.num_values, and append to array_value.values r_list_append (element_value->value.array_value.values, (void *) ev_element); } } break; case R_BIN_JAVA_EV_TAG_ANNOTATION: // annotation new is not used here. // (ut16) read and set annotation_value.type_idx; if (offset + 8 < sz) { element_value->value.annotation_value.type_idx = R_BIN_JAVA_USHORT (buffer, offset); element_value->size += 2; offset += 2; // (ut16) read and set annotation_value.num_element_value_pairs; element_value->value.annotation_value.num_element_value_pairs = R_BIN_JAVA_USHORT (buffer, offset); element_value->size += 2; offset += 2; } element_value->value.annotation_value.element_value_pairs = r_list_newf (r_bin_java_element_pair_free); // read annotation_value.num_element_value_pairs, and append to annotation_value.element_value_pairs for (i = 0; i < element_value->value.annotation_value.num_element_value_pairs; i++) { if (offset > sz) { break; } evps = r_bin_java_element_pair_new (buffer + offset, sz - offset, buf_offset + offset); if (evps) { element_value->size += evps->size; offset += evps->size; } if (evps == NULL) { // TODO: eprintf error when reading element pair } r_list_append (element_value->value.annotation_value.element_value_pairs, (void *) evps); } break; default: // eprintf unable to handle tag break; } return element_value; } R_API void r_bin_java_bootstrap_method_argument_free(void /*RBinJavaBootStrapArgument*/ *b) { RBinJavaBootStrapArgument *bsm_arg = b; if (bsm_arg) { RBinJavaCPTypeMetas *tm = (RBinJavaCPTypeMetas*)bsm_arg->argument_info_cp_obj; if (tm) { if (tm && (size_t)(tm->allocs) > 1024 && tm->allocs->delete_obj) { tm->allocs->delete_obj (tm); } bsm_arg->argument_info_cp_obj = NULL; } free (bsm_arg); } } R_API void r_bin_java_print_bootstrap_method_argument_summary(RBinJavaBootStrapArgument *bsm_arg) { if (!bsm_arg) { eprintf ("Attempting to print an invalid RBinJavaBootStrapArgument *.\n"); return; } eprintf ("Bootstrap Method Argument Information:\n"); eprintf (" Offset: 0x%08"PFMT64x"", bsm_arg->file_offset); eprintf (" Name_And_Type Index = (0x%02x)\n", bsm_arg->argument_info_idx); if (bsm_arg->argument_info_cp_obj) { eprintf (" Bootstrap Method Argument Type and Name Info:\n"); ((RBinJavaCPTypeMetas *) bsm_arg->argument_info_cp_obj)->allocs->print_summary (bsm_arg->argument_info_cp_obj); } else { eprintf (" Bootstrap Method Argument Type and Name Info: INVALID\n"); } } R_API void r_bin_java_print_bootstrap_method_summary(RBinJavaBootStrapMethod *bsm) { RBinJavaBootStrapArgument *bsm_arg = NULL; RListIter *iter = NULL, *iter_tmp = NULL; if (!bsm) { eprintf ("Attempting to print an invalid RBinJavaBootStrapArgument *.\n"); return; } eprintf ("Bootstrap Method Information:\n"); eprintf (" Offset: 0x%08"PFMT64x"", bsm->file_offset); eprintf (" Method Reference Index = (0x%02x)\n", bsm->bootstrap_method_ref); eprintf (" Number of Method Arguments = (0x%02x)\n", bsm->num_bootstrap_arguments); if (bsm->bootstrap_arguments) { r_list_foreach_safe (bsm->bootstrap_arguments, iter, iter_tmp, bsm_arg) { if (bsm_arg) { r_bin_java_print_bootstrap_method_argument_summary (bsm_arg); } } } else { eprintf (" Bootstrap Method Argument: NONE \n"); } } R_API RBinJavaBootStrapArgument *r_bin_java_bootstrap_method_argument_new(ut8 *buffer, ut64 sz, ut64 buf_offset) { ut64 offset = 0; RBinJavaBootStrapArgument *bsm_arg = (RBinJavaBootStrapArgument *) malloc (sizeof (RBinJavaBootStrapArgument)); if (!bsm_arg) { // TODO eprintf failed to allocate bytes for bootstrap_method. return bsm_arg; } memset (bsm_arg, 0, sizeof (RBinJavaBootStrapArgument)); bsm_arg->file_offset = buf_offset; bsm_arg->argument_info_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; bsm_arg->argument_info_cp_obj = r_bin_java_clone_cp_idx (R_BIN_JAVA_GLOBAL_BIN, bsm_arg->argument_info_idx); bsm_arg->size = offset; return bsm_arg; } R_API void r_bin_java_bootstrap_method_free(void /*/RBinJavaBootStrapMethod*/ *b) { RBinJavaBootStrapMethod *bsm = b; RListIter *iter, *iter_tmp; RBinJavaBootStrapArgument *obj = NULL; if (bsm) { if (bsm->bootstrap_arguments) { r_list_foreach_safe (bsm->bootstrap_arguments, iter, iter_tmp, obj) { if (obj) { r_bin_java_bootstrap_method_argument_free (obj); } // r_list_delete (bsm->bootstrap_arguments, iter); } r_list_free (bsm->bootstrap_arguments); bsm->bootstrap_arguments = NULL; } free (bsm); } } R_API RBinJavaBootStrapMethod *r_bin_java_bootstrap_method_new(ut8 *buffer, ut64 sz, ut64 buf_offset) { RBinJavaBootStrapArgument *bsm_arg = NULL; ut32 i = 0; ut64 offset = 0; RBinJavaBootStrapMethod *bsm = R_NEW0 (RBinJavaBootStrapMethod); if (!bsm) { // TODO eprintf failed to allocate bytes for bootstrap_method. return bsm; } memset (bsm, 0, sizeof (RBinJavaBootStrapMethod)); bsm->file_offset = buf_offset; bsm->bootstrap_method_ref = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; bsm->num_bootstrap_arguments = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; bsm->bootstrap_arguments = r_list_new (); for (i = 0; i < bsm->num_bootstrap_arguments; i++) { if (offset >= sz) { break; } // bsm_arg = r_bin_java_bootstrap_method_argument_new (bin, bin->b->cur); bsm_arg = r_bin_java_bootstrap_method_argument_new (buffer + offset, sz - offset, buf_offset + offset); if (bsm_arg) { offset += bsm_arg->size; r_list_append (bsm->bootstrap_arguments, (void *) bsm_arg); } else { // TODO eprintf Failed to read the %d boot strap method. } } bsm->size = offset; return bsm; } R_API void r_bin_java_print_bootstrap_methods_attr_summary(RBinJavaAttrInfo *attr) { RListIter *iter, *iter_tmp; RBinJavaBootStrapMethod *obj = NULL; if (!attr || attr->type == R_BIN_JAVA_ATTR_TYPE_BOOTSTRAP_METHODS_ATTR) { eprintf ("Unable to print attribue summary for RBinJavaAttrInfo *RBinJavaBootstrapMethodsAttr"); return; } eprintf ("Bootstrap Methods Attribute Information Information:\n"); eprintf (" Attribute Offset: 0x%08"PFMT64x"", attr->file_offset); eprintf (" Length: 0x%08x", attr->length); eprintf (" Number of Method Arguments = (0x%02x)\n", attr->info.bootstrap_methods_attr.num_bootstrap_methods); if (attr->info.bootstrap_methods_attr.bootstrap_methods) { r_list_foreach_safe (attr->info.bootstrap_methods_attr.bootstrap_methods, iter, iter_tmp, obj) { if (obj) { r_bin_java_print_bootstrap_method_summary (obj); } } } else { eprintf (" Bootstrap Methods: NONE \n"); } } R_API void r_bin_java_bootstrap_methods_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr && attr->type == R_BIN_JAVA_ATTR_TYPE_BOOTSTRAP_METHODS_ATTR) { free (attr->name); free (attr->metas); r_list_free (attr->info.bootstrap_methods_attr.bootstrap_methods); free (attr); } } R_API ut64 r_bin_java_bootstrap_methods_attr_calc_size(RBinJavaAttrInfo *attr) { RListIter *iter, *iter_tmp; RBinJavaBootStrapMethod *bsm = NULL; ut64 size = 0; if (attr) { size += 6; // attr->info.bootstrap_methods_attr.num_bootstrap_methods = R_BIN_JAVA_USHORT (buffer, offset); size += 2; r_list_foreach_safe (attr->info.bootstrap_methods_attr.bootstrap_methods, iter, iter_tmp, bsm) { if (bsm) { size += r_bin_java_bootstrap_method_calc_size (bsm); } else { // TODO eprintf Failed to read the %d boot strap method. } } } return size; } R_API ut64 r_bin_java_bootstrap_arg_calc_size(RBinJavaBootStrapArgument *bsm_arg) { ut64 size = 0; if (bsm_arg) { // bsm_arg->argument_info_idx = R_BIN_JAVA_USHORT (buffer, offset); size += 2; } return size; } R_API ut64 r_bin_java_bootstrap_method_calc_size(RBinJavaBootStrapMethod *bsm) { RListIter *iter, *iter_tmp; RBinJavaBootStrapArgument *bsm_arg = NULL; ut64 size = 0; if (bsm) { size += 6; // bsm->bootstrap_method_ref = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // bsm->num_bootstrap_arguments = R_BIN_JAVA_USHORT (buffer, offset); size += 2; r_list_foreach_safe (bsm->bootstrap_arguments, iter, iter_tmp, bsm_arg) { if (bsm_arg) { size += r_bin_java_bootstrap_arg_calc_size (bsm_arg); } else { // TODO eprintf Failed to read the %d boot strap method. } } } return size; } R_API RBinJavaAttrInfo *r_bin_java_bootstrap_methods_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { ut32 i = 0; RBinJavaBootStrapMethod *bsm = NULL; ut64 offset = 0; RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); offset += 6; if (attr) { attr->type = R_BIN_JAVA_ATTR_TYPE_BOOTSTRAP_METHODS_ATTR; attr->info.bootstrap_methods_attr.num_bootstrap_methods = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->info.bootstrap_methods_attr.bootstrap_methods = r_list_newf (r_bin_java_bootstrap_method_free); for (i = 0; i < attr->info.bootstrap_methods_attr.num_bootstrap_methods; i++) { // bsm = r_bin_java_bootstrap_method_new (bin, bin->b->cur); if (offset >= sz) { break; } bsm = r_bin_java_bootstrap_method_new (buffer + offset, sz - offset, buf_offset + offset); if (bsm) { offset += bsm->size; r_list_append (attr->info.bootstrap_methods_attr.bootstrap_methods, (void *) bsm); } else { // TODO eprintf Failed to read the %d boot strap method. } } attr->size = offset; } return attr; } R_API void r_bin_java_print_annotation_default_attr_summary(RBinJavaAttrInfo *attr) { if (attr && attr->type == R_BIN_JAVA_ATTR_TYPE_ANNOTATION_DEFAULT_ATTR) { eprintf ("Annotation Default Attribute Information:\n"); eprintf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); eprintf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); eprintf (" Attribute Length: %d\n", attr->length); r_bin_java_print_element_value_summary ((attr->info.annotation_default_attr.default_value)); } else { // TODO: eprintf attr is invalid } } R_API void r_bin_java_annotation_array_free(void /*RBinJavaAnnotationsArray*/ *a) { RBinJavaAnnotationsArray *annotation_array = a; RListIter *iter = NULL, *iter_tmp = NULL; RBinJavaAnnotation *annotation; if (!annotation_array->annotations) { // TODO eprintf return; } r_list_foreach_safe (annotation_array->annotations, iter, iter_tmp, annotation) { if (annotation) { r_bin_java_annotation_free (annotation); } // r_list_delete (annotation_array->annotations, iter); } r_list_free (annotation_array->annotations); free (annotation_array); } R_API void r_bin_java_print_annotation_array_summary(RBinJavaAnnotationsArray *annotation_array) { RListIter *iter = NULL, *iter_tmp = NULL; RBinJavaAnnotation *annotation; if (!annotation_array->annotations) { // TODO eprintf return; } eprintf (" Annotation Array Information:\n"); eprintf (" Number of Annotation Array Elements: %d\n", annotation_array->num_annotations); r_list_foreach_safe (annotation_array->annotations, iter, iter_tmp, annotation) { r_bin_java_print_annotation_summary (annotation); } } R_API RBinJavaAnnotationsArray *r_bin_java_annotation_array_new(ut8 *buffer, ut64 sz, ut64 buf_offset) { RBinJavaAnnotation *annotation; RBinJavaAnnotationsArray *annotation_array; ut32 i; ut64 offset = 0; annotation_array = (RBinJavaAnnotationsArray *) malloc (sizeof (RBinJavaAnnotationsArray)); if (!annotation_array) { // TODO eprintf return NULL; } annotation_array->num_annotations = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; annotation_array->annotations = r_list_new (); for (i = 0; i < annotation_array->num_annotations; i++) { if (offset > sz) { break; } annotation = r_bin_java_annotation_new (buffer + offset, sz - offset, buf_offset + offset); if (annotation) { offset += annotation->size; r_list_append (annotation_array->annotations, (void *) annotation); } } annotation_array->size = offset; return annotation_array; } R_API RBinJavaAttrInfo *r_bin_java_rtv_annotations_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { ut32 i = 0; ut64 offset = 0; if (buf_offset + 8 > sz) { return NULL; } RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); offset += 6; if (attr) { attr->type = R_BIN_JAVA_ATTR_TYPE_RUNTIME_VISIBLE_ANNOTATION_ATTR; attr->info.annotation_array.num_annotations = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->info.annotation_array.annotations = r_list_newf (r_bin_java_annotation_free); for (i = 0; i < attr->info.annotation_array.num_annotations; i++) { if (offset >= sz) { break; } RBinJavaAnnotation *annotation = r_bin_java_annotation_new (buffer + offset, sz - offset, buf_offset + offset); if (annotation) { offset += annotation->size; r_list_append (attr->info.annotation_array.annotations, (void *) annotation); } } attr->size = offset; } return attr; } R_API ut64 r_bin_java_annotation_array_calc_size(RBinJavaAnnotationsArray *annotation_array) { ut64 size = 0; RListIter *iter = NULL, *iter_tmp = NULL; RBinJavaAnnotation *annotation; if (!annotation_array->annotations) { // TODO eprintf return size; } // annotation_array->num_annotations = R_BIN_JAVA_USHORT (buffer, offset); size += 2; r_list_foreach_safe (annotation_array->annotations, iter, iter_tmp, annotation) { size += r_bin_java_annotation_calc_size (annotation); } return size; } R_API ut64 r_bin_java_rtv_annotations_attr_calc_size(RBinJavaAttrInfo *attr) { ut64 size = 0; if (!attr) { // TODO eprintf allocation fail return size; } size += (6 + r_bin_java_annotation_array_calc_size (&(attr->info.annotation_array))); return size; } R_API RBinJavaAttrInfo *r_bin_java_rti_annotations_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { ut32 i = 0; RBinJavaAttrInfo *attr = NULL; ut64 offset = 0; attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); offset += 6; if (attr) { attr->type = R_BIN_JAVA_ATTR_TYPE_RUNTIME_INVISIBLE_ANNOTATION_ATTR; attr->info.annotation_array.num_annotations = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->info.annotation_array.annotations = r_list_newf (r_bin_java_annotation_free); for (i = 0; i < attr->info.rtv_annotations_attr.num_annotations; i++) { if (offset >= sz) { break; } RBinJavaAnnotation *annotation = r_bin_java_annotation_new (buffer + offset, sz - offset, buf_offset + offset); if (annotation) { offset += annotation->size; } r_list_append (attr->info.annotation_array.annotations, (void *) annotation); } attr->size = offset; } return attr; } R_API ut64 r_bin_java_rti_annotations_attr_calc_size(RBinJavaAttrInfo *attr) { ut64 size = 0; if (!attr) { // TODO eprintf allocation fail return size; } size += (6 + r_bin_java_annotation_array_calc_size (&(attr->info.annotation_array))); return size; } R_API void r_bin_java_rtv_annotations_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr && attr->type == R_BIN_JAVA_ATTR_TYPE_RUNTIME_VISIBLE_ANNOTATION_ATTR) { r_list_free (attr->info.annotation_array.annotations); free (attr->metas); free (attr->name); free (attr); } } R_API void r_bin_java_rti_annotations_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr && attr->type == R_BIN_JAVA_ATTR_TYPE_RUNTIME_INVISIBLE_ANNOTATION_ATTR) { r_list_free (attr->info.annotation_array.annotations); free (attr->metas); free (attr->name); free (attr); } } R_API void r_bin_java_print_rtv_annotations_attr_summary(RBinJavaAttrInfo *attr) { if (attr && attr->type == R_BIN_JAVA_ATTR_TYPE_RUNTIME_VISIBLE_ANNOTATION_ATTR) { printf ("Runtime Visible Annotations Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d\n", attr->length); r_bin_java_print_annotation_array_summary (&attr->info.annotation_array); } } R_API void r_bin_java_print_rti_annotations_attr_summary(RBinJavaAttrInfo *attr) { if (attr && attr->type == R_BIN_JAVA_ATTR_TYPE_RUNTIME_INVISIBLE_ANNOTATION_ATTR) { printf ("Runtime Invisible Annotations Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d\n", attr->length); r_bin_java_print_annotation_array_summary (&attr->info.annotation_array); } } R_API ut64 r_bin_java_rtip_annotations_attr_calc_size(RBinJavaAttrInfo *attr) { ut64 size = 0; RListIter *iter = NULL, *iter_tmp = NULL; RBinJavaAnnotationsArray *annotation_array; if (!attr) { // TODO eprintf allocation fail return size; } // attr->info.rtip_annotations_attr.num_parameters = buffer[offset]; size += (6 + 1); r_list_foreach_safe (attr->info.rtip_annotations_attr.parameter_annotations, iter, iter_tmp, annotation_array) { if (annotation_array) { size += r_bin_java_annotation_array_calc_size (annotation_array); } } return size; } R_API RBinJavaAttrInfo *r_bin_java_rtip_annotations_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { ut32 i = 0; RBinJavaAttrInfo *attr = NULL; ut64 offset = 0; attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); offset += 6; if (attr) { attr->type = R_BIN_JAVA_ATTR_TYPE_RUNTIME_INVISIBLE_PARAMETER_ANNOTATION_ATTR; attr->info.rtip_annotations_attr.num_parameters = buffer[offset]; offset += 1; attr->info.rtip_annotations_attr.parameter_annotations = r_list_newf (r_bin_java_annotation_array_free); for (i = 0; i < attr->info.rtip_annotations_attr.num_parameters; i++) { if (offset >= sz) { break; } RBinJavaAnnotationsArray *annotation_array = r_bin_java_annotation_array_new ( buffer + offset, sz - offset, buf_offset + offset); if (annotation_array) { offset += annotation_array->size; r_list_append (attr->info.rtip_annotations_attr.parameter_annotations, (void *) annotation_array); } } attr->size = offset; } return attr; } R_API RBinJavaAttrInfo *r_bin_java_rtvp_annotations_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { ut32 i = 0; RBinJavaAttrInfo *attr = NULL; ut64 offset = 0; attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); offset += 6; RBinJavaAnnotationsArray *annotation_array; if (attr) { attr->type = R_BIN_JAVA_ATTR_TYPE_RUNTIME_VISIBLE_PARAMETER_ANNOTATION_ATTR; attr->info.rtvp_annotations_attr.num_parameters = buffer[offset]; offset += 1; attr->info.rtvp_annotations_attr.parameter_annotations = r_list_newf (r_bin_java_annotation_array_free); for (i = 0; i < attr->info.rtvp_annotations_attr.num_parameters; i++) { if (offset > sz) { break; } annotation_array = r_bin_java_annotation_array_new (buffer + offset, sz - offset, buf_offset + offset); if (annotation_array) { offset += annotation_array->size; } r_list_append (attr->info.rtvp_annotations_attr.parameter_annotations, (void *) annotation_array); } attr->size = offset; } return attr; } R_API ut64 r_bin_java_rtvp_annotations_attr_calc_size(RBinJavaAttrInfo *attr) { ut64 size = 0; RListIter *iter = NULL, *iter_tmp = NULL; RBinJavaAnnotationsArray *annotation_array; if (!attr) { return size; } size += (6 + 1); r_list_foreach_safe (attr->info.rtvp_annotations_attr.parameter_annotations, iter, iter_tmp, annotation_array) { if (annotation_array) { size += r_bin_java_annotation_array_calc_size ( annotation_array); } } return size; } R_API void r_bin_java_rtvp_annotations_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { if (attr->type == R_BIN_JAVA_ATTR_TYPE_RUNTIME_VISIBLE_PARAMETER_ANNOTATION_ATTR) { r_list_free (attr->info.rtvp_annotations_attr.parameter_annotations); } free (attr->name); free (attr->metas); free (attr); } } R_API void r_bin_java_rtip_annotations_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { // && attr->type == R_BIN_JAVA_ATTR_TYPE_RUNTIME_INVISIBLE_PARAMETER_ANNOTATION_ATTR) { r_list_free (attr->info.rtip_annotations_attr.parameter_annotations); free (attr->metas); free (attr->name); free (attr); } } R_API void r_bin_java_print_rtvp_annotations_attr_summary(RBinJavaAttrInfo *attr) { RBinJavaAnnotationsArray *annotation_array = NULL; RListIter *iter = NULL, *iter_tmp = NULL; if (attr && attr->type == R_BIN_JAVA_ATTR_TYPE_RUNTIME_VISIBLE_PARAMETER_ANNOTATION_ATTR) { eprintf ("Runtime Visible Parameter Annotations Attribute Information:\n"); eprintf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); eprintf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); eprintf (" Attribute Length: %d\n", attr->length); eprintf (" Number of Runtime Invisible Parameters: %d\n", attr->info.rtvp_annotations_attr.num_parameters); r_list_foreach_safe (attr->info.rtvp_annotations_attr.parameter_annotations, iter, iter_tmp, annotation_array) { r_bin_java_print_annotation_array_summary (annotation_array); } } } R_API void r_bin_java_print_rtip_annotations_attr_summary(RBinJavaAttrInfo *attr) { RBinJavaAnnotationsArray *annotation_array = NULL; RListIter *iter = NULL, *iter_tmp = NULL; if (attr && attr->type == R_BIN_JAVA_ATTR_TYPE_RUNTIME_INVISIBLE_PARAMETER_ANNOTATION_ATTR) { eprintf ("Runtime Invisible Parameter Annotations Attribute Information:\n"); eprintf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); eprintf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); eprintf (" Attribute Length: %d\n", attr->length); eprintf (" Number of Runtime Invisible Parameters: %d\n", attr->info.rtip_annotations_attr.num_parameters); r_list_foreach_safe (attr->info.rtip_annotations_attr.parameter_annotations, iter, iter_tmp, annotation_array) { r_bin_java_print_annotation_array_summary (annotation_array); } } } R_API RBinJavaCPTypeObj *r_bin_java_find_cp_name_and_type_info(RBinJavaObj *bin, ut16 name_idx, ut16 descriptor_idx) { RListIter *iter, *iter_tmp; RBinJavaCPTypeObj *res = NULL, *obj = NULL; IFDBG eprintf ("Looking for name_idx: %d and descriptor_idx: %d\n", name_idx, descriptor_idx); r_list_foreach_safe (bin->cp_list, iter, iter_tmp, obj) { if (obj && obj->tag == R_BIN_JAVA_CP_NAMEANDTYPE) { IFDBG eprintf ("RBinJavaCPTypeNameAndType has name_idx: %d and descriptor_idx: %d\n", obj->info.cp_name_and_type.name_idx, obj->info.cp_name_and_type.descriptor_idx); if (obj->info.cp_name_and_type.name_idx == name_idx && obj->info.cp_name_and_type.descriptor_idx == descriptor_idx) { res = obj; break; } } } return res; } R_API char *r_bin_java_resolve_cp_idx_type(RBinJavaObj *BIN_OBJ, int idx) { RBinJavaCPTypeObj *item = NULL; char *str = NULL; if (BIN_OBJ && BIN_OBJ->cp_count < 1) { // r_bin_java_new_bin(BIN_OBJ); return NULL; } item = (RBinJavaCPTypeObj *) r_bin_java_get_item_from_bin_cp_list (BIN_OBJ, idx); if (item) { str = strdup (((RBinJavaCPTypeMetas *) item->metas->type_info)->name); } else { str = strdup ("INVALID"); } return str; } R_API RBinJavaCPTypeObj *r_bin_java_find_cp_ref_info_from_name_and_type(RBinJavaObj *bin, ut16 name_idx, ut16 descriptor_idx) { RBinJavaCPTypeObj *obj = r_bin_java_find_cp_name_and_type_info (bin, name_idx, descriptor_idx); if (obj) { return r_bin_java_find_cp_ref_info (bin, obj->metas->ord); } return NULL; } R_API RBinJavaCPTypeObj *r_bin_java_find_cp_ref_info(RBinJavaObj *bin, ut16 name_and_type_idx) { RListIter *iter, *iter_tmp; RBinJavaCPTypeObj *res = NULL, *obj = NULL; r_list_foreach_safe (bin->cp_list, iter, iter_tmp, obj) { if (obj->tag == R_BIN_JAVA_CP_FIELDREF && obj->info.cp_field.name_and_type_idx == name_and_type_idx) { res = obj; break; } else if (obj->tag == R_BIN_JAVA_CP_METHODREF && obj->info.cp_method.name_and_type_idx == name_and_type_idx) { res = obj; break; } } return res; } R_API char *r_bin_java_resolve(RBinJavaObj *BIN_OBJ, int idx, ut8 space_bn_name_type) { // TODO XXX FIXME add a size parameter to the str when it is passed in RBinJavaCPTypeObj *item = NULL, *item2 = NULL; char *class_str = NULL, *name_str = NULL, *desc_str = NULL, *string_str = NULL, *empty = "", *cp_name = NULL, *str = NULL; if (BIN_OBJ && BIN_OBJ->cp_count < 1) { // r_bin_java_new_bin(BIN_OBJ); return NULL; } item = (RBinJavaCPTypeObj *) r_bin_java_get_item_from_bin_cp_list (BIN_OBJ, idx); if (item) { cp_name = ((RBinJavaCPTypeMetas *) item->metas->type_info)->name; IFDBG eprintf ("java_resolve Resolved: (%d) %s\n", idx, cp_name); } else { str = malloc (512); if (str) { snprintf (str, 512, "(%d) INVALID CP_OBJ", idx); } return str; } if (strcmp (cp_name, "Class") == 0) { item2 = (RBinJavaCPTypeObj *) r_bin_java_get_item_from_bin_cp_list (BIN_OBJ, idx); // str = r_bin_java_get_name_from_bin_cp_list (BIN_OBJ, idx-1); class_str = r_bin_java_get_item_name_from_bin_cp_list (BIN_OBJ, item); if (!class_str) { class_str = empty; } name_str = r_bin_java_get_item_name_from_bin_cp_list (BIN_OBJ, item2); if (!name_str) { name_str = empty; } desc_str = r_bin_java_get_item_desc_from_bin_cp_list (BIN_OBJ, item2); if (!desc_str) { desc_str = empty; } str = r_str_newf ("%s%s%s", name_str, space_bn_name_type ? " " : "", desc_str); if (class_str != empty) { free (class_str); } if (name_str != empty) { free (name_str); } if (desc_str != empty) { free (desc_str); } } else if (!strcmp (cp_name, "MethodRef") || !strcmp (cp_name, "FieldRef") || !strcmp (cp_name, "InterfaceMethodRef")) { /* * The MethodRef, FieldRef, and InterfaceMethodRef structures */ class_str = r_bin_java_get_name_from_bin_cp_list (BIN_OBJ, item->info.cp_method.class_idx); if (!class_str) { class_str = empty; } name_str = r_bin_java_get_item_name_from_bin_cp_list (BIN_OBJ, item); if (!name_str) { name_str = empty; } desc_str = r_bin_java_get_item_desc_from_bin_cp_list (BIN_OBJ, item); if (!desc_str) { desc_str = empty; } str = r_str_newf ("%s/%s%s%s", class_str, name_str, space_bn_name_type ? " " : "", desc_str); if (class_str != empty) { free (class_str); } if (name_str != empty) { free (name_str); } if (desc_str != empty) { free (desc_str); } } else if (!strcmp (cp_name, "String")) { string_str = r_bin_java_get_utf8_from_bin_cp_list (BIN_OBJ, item->info.cp_string.string_idx); str = NULL; IFDBG eprintf ("java_resolve String got: (%d) %s\n", item->info.cp_string.string_idx, string_str); if (!string_str) { string_str = empty; } str = r_str_newf ("\"%s\"", string_str); IFDBG eprintf ("java_resolve String return: %s\n", str); if (string_str != empty) { free (string_str); } } else if (!strcmp (cp_name, "Utf8")) { char *tmp_str = convert_string ((const char *) item->info.cp_utf8.bytes, item->info.cp_utf8.length); ut32 tmp_str_len = tmp_str ? strlen (tmp_str) + 4 : 0; if (tmp_str) { str = malloc (tmp_str_len + 4); snprintf (str, tmp_str_len + 4, "\"%s\"", tmp_str); } free (tmp_str); } else if (!strcmp (cp_name, "Long")) { str = r_str_newf ("0x%"PFMT64x, r_bin_java_raw_to_long (item->info.cp_long.bytes.raw, 0)); } else if (!strcmp (cp_name, "Double")) { str = r_str_newf ("%f", r_bin_java_raw_to_double (item->info.cp_double.bytes.raw, 0)); } else if (!strcmp (cp_name, "Integer")) { str = r_str_newf ("0x%08x", R_BIN_JAVA_UINT (item->info.cp_integer.bytes.raw, 0)); } else if (!strcmp (cp_name, "Float")) { str = r_str_newf ("%f", R_BIN_JAVA_FLOAT (item->info.cp_float.bytes.raw, 0)); } else if (!strcmp (cp_name, "NameAndType")) { name_str = r_bin_java_get_item_name_from_bin_cp_list (BIN_OBJ, item); if (!name_str) { name_str = empty; } desc_str = r_bin_java_get_item_desc_from_bin_cp_list (BIN_OBJ, item); if (!desc_str) { desc_str = empty; } str = r_str_newf ("%s%s%s", name_str, space_bn_name_type ? " " : "", desc_str); if (name_str != empty) { free (name_str); } if (desc_str != empty) { free (desc_str); } } else { str = strdup ("(null)"); } return str; } R_API ut8 r_bin_java_does_cp_idx_ref_method(RBinJavaObj *BIN_OBJ, int idx) { RBinJavaField *fm_type = NULL; RListIter *iter; ut8 res = 0; r_list_foreach (BIN_OBJ->methods_list, iter, fm_type) { if (fm_type->field_ref_cp_obj->metas->ord == idx) { res = 1; break; } } return res; } R_API ut8 r_bin_java_does_cp_idx_ref_field(RBinJavaObj *BIN_OBJ, int idx) { RBinJavaField *fm_type = NULL; RListIter *iter; ut8 res = 0; r_list_foreach (BIN_OBJ->fields_list, iter, fm_type) { if (fm_type->field_ref_cp_obj->metas->ord == idx) { res = 1; break; } } return res; } R_API char *r_bin_java_get_method_name(RBinJavaObj *bin_obj, ut32 idx) { char *name = NULL; if (idx < r_list_length (bin_obj->methods_list)) { RBinJavaField *fm_type = r_list_get_n (bin_obj->methods_list, idx); name = strdup (fm_type->name); } return name; } R_API RList *r_bin_java_get_method_num_name(RBinJavaObj *bin_obj) { ut32 i = 0; RListIter *iter; RBinJavaField *fm_type; RList *res = r_list_newf (free); r_list_foreach (bin_obj->methods_list, iter, fm_type) { char *str = r_str_newf ("%d %s", i, fm_type->name); r_list_append (res, str); i++; } return res; } /* R_API int r_bin_java_does_cp_obj_ref_idx (RBinJavaObj *bin_obj, RBinJavaCPTypeObj *cp_obj, ut16 idx) { int res = false; RBinJavaCPTypeObj *t_obj = NULL; if (cp_obj) { switch (cp_obj->tag) { case R_BIN_JAVA_CP_NULL: break; case R_BIN_JAVA_CP_UTF8: break; case R_BIN_JAVA_CP_UNKNOWN: break; case R_BIN_JAVA_CP_INTEGER: break; case R_BIN_JAVA_CP_FLOAT: break; case R_BIN_JAVA_CP_LONG: break; case R_BIN_JAVA_CP_DOUBLE: break; case R_BIN_JAVA_CP_CLASS: res = idx == cp_obj->info.cp_class.name_idx ? true : false; break; case R_BIN_JAVA_CP_STRING: res = idx == cp_obj->info.cp_string.string_idx ? true : false; break; case R_BIN_JAVA_CP_METHODREF: break;// check if idx is referenced here case R_BIN_JAVA_CP_INTERFACEMETHOD_REF: break; // check if idx is referenced here case R_BIN_JAVA_CP_FIELDREF: t_obj = r_bin_java_get_item_from_cp (bin_obj, cp_obj->info.cp_method.class_idx); res = r_bin_java_does_cp_obj_ref_idx (bin_obj, t_obj, idx); if (res == true) break; t_obj = r_bin_java_get_item_from_cp (bin_obj, cp_obj->info.cp_method.name_and_type_idx); res = r_bin_java_does_cp_obj_ref_idx (bin_obj, t_obj, idx); break; case R_BIN_JAVA_CP_NAMEANDTYPE: break;// check if idx is referenced here obj->info.cp_name_and_type.name_idx case R_BIN_JAVA_CP_METHODHANDLE: break;// check if idx is referenced here case R_BIN_JAVA_CP_METHODTYPE: break;// check if idx is referenced here case R_BIN_JAVA_CP_INVOKEDYNAMIC: break;// check if idx is referenced here } } } */ R_API RList *r_bin_java_find_cp_const_by_val_long(RBinJavaObj *bin_obj, const ut8 *bytes, ut32 len) { RList *res = r_list_newf (free); ut32 *v = NULL; RListIter *iter; RBinJavaCPTypeObj *cp_obj; eprintf ("Looking for 0x%08x\n", R_BIN_JAVA_UINT (bytes, 0)); r_list_foreach (bin_obj->cp_list, iter, cp_obj) { if (cp_obj->tag == R_BIN_JAVA_CP_LONG) { if (len == 8 && r_bin_java_raw_to_long (cp_obj->info.cp_long.bytes.raw, 0) == r_bin_java_raw_to_long (bytes, 0)) { // TODO: we can safely store a ut32 inside the list without having to allocate it v = malloc (sizeof (ut32)); if (!v) { r_list_free (res); return NULL; } *v = cp_obj->idx; r_list_append (res, v); } } } return res; } R_API RList *r_bin_java_find_cp_const_by_val_double(RBinJavaObj *bin_obj, const ut8 *bytes, ut32 len) { RList *res = r_list_newf (free); ut32 *v = NULL; RListIter *iter; RBinJavaCPTypeObj *cp_obj; eprintf ("Looking for %f\n", r_bin_java_raw_to_double (bytes, 0)); r_list_foreach (bin_obj->cp_list, iter, cp_obj) { if (cp_obj->tag == R_BIN_JAVA_CP_DOUBLE) { if (len == 8 && r_bin_java_raw_to_double (cp_obj->info.cp_long.bytes.raw, 0) == r_bin_java_raw_to_double (bytes, 0)) { v = malloc (sizeof (ut32)); if (!v) { r_list_free (res); return NULL; } *v = cp_obj->idx; r_list_append (res, v); } } } return res; } R_API RList *r_bin_java_find_cp_const_by_val_float(RBinJavaObj *bin_obj, const ut8 *bytes, ut32 len) { RList *res = r_list_newf (free); ut32 *v = NULL; RListIter *iter; RBinJavaCPTypeObj *cp_obj; eprintf ("Looking for %f\n", R_BIN_JAVA_FLOAT (bytes, 0)); r_list_foreach (bin_obj->cp_list, iter, cp_obj) { if (cp_obj->tag == R_BIN_JAVA_CP_FLOAT) { if (len == 4 && R_BIN_JAVA_FLOAT (cp_obj->info.cp_long.bytes.raw, 0) == R_BIN_JAVA_FLOAT (bytes, 0)) { v = malloc (sizeof (ut32)); if (!v) { r_list_free (res); return NULL; } *v = cp_obj->idx; r_list_append (res, v); } } } return res; } R_API RList *r_bin_java_find_cp_const_by_val(RBinJavaObj *bin_obj, const ut8 *bytes, ut32 len, const char t) { switch (t) { case R_BIN_JAVA_CP_UTF8: return r_bin_java_find_cp_const_by_val_utf8 (bin_obj, bytes, len); case R_BIN_JAVA_CP_INTEGER: return r_bin_java_find_cp_const_by_val_int (bin_obj, bytes, len); case R_BIN_JAVA_CP_FLOAT: return r_bin_java_find_cp_const_by_val_float (bin_obj, bytes, len); case R_BIN_JAVA_CP_LONG: return r_bin_java_find_cp_const_by_val_long (bin_obj, bytes, len); case R_BIN_JAVA_CP_DOUBLE: return r_bin_java_find_cp_const_by_val_double (bin_obj, bytes, len); case R_BIN_JAVA_CP_UNKNOWN: default: eprintf ("Failed to perform the search for: %s\n", bytes); return r_list_new (); } } R_API void U(add_cp_objs_to_sdb)(RBinJavaObj * bin) { /* Add Constant Pool Serialized Object to an Array the key for this info is: Key: java.<classname>.cp_obj Each Value varies by type: In general its: <ordinal>.<file_offset>.<type_name>.[type specific stuff] Example: UTF-8: <ordinal>.<file_offset>.<type_name>.<strlen>.<hexlified(str)> Integer: <ordinal>.<file_offset>.<type_name>.<abs(int)> Long: <ordinal>.<file_offset>.<type_name>.abs(long)> FieldRef/MethodRef: <ordinal>.<file_offset>.<type_name>.<class_idx>.<name_and_type_idx> */ ut32 idx = 0, class_name_inheap = 1; RBinJavaCPTypeObj *cp_obj = NULL; char *key = NULL, *value = NULL; char str_cnt[40]; char *class_name = r_bin_java_get_this_class_name (bin); ut32 key_buf_size = 0; if (!class_name) { class_name = "unknown"; class_name_inheap = 0; } // 4 - format, 8 number, 1 null byte, 7 "unknown" key_buf_size = strlen (class_name) + 4 + 8 + 1; key = malloc (key_buf_size); if (!key) { if (class_name_inheap) { free (class_name); } return; } snprintf (key, key_buf_size - 1, "%s.cp_count", class_name); key[key_buf_size - 1] = 0; snprintf (str_cnt, 39, "%d", bin->cp_count); str_cnt[39] = 0; sdb_set (bin->kv, key, value, 0); // sdb_alist(bin->kv, key); for (idx = 0; idx < bin->cp_count; idx++) { snprintf (key, key_buf_size - 1, "%s.cp.%d", class_name, idx); key[key_buf_size - 1] = 0; cp_obj = (RBinJavaCPTypeObj *) r_bin_java_get_item_from_bin_cp_list (bin, idx); IFDBG eprintf ("Adding %s to the sdb.\n", key); if (cp_obj) { value = ((RBinJavaCPTypeMetas *) cp_obj->metas->type_info)-> allocs->stringify_obj (cp_obj); sdb_set (bin->kv, key, value, 0); free (value); } } if (class_name_inheap) { free (class_name); } free (key); } R_API void U(add_field_infos_to_sdb)(RBinJavaObj * bin) { /* *** Experimental and May Change *** Add field information to an Array the key for this info variable depenedent on addr, method ordinal, etc. Key 1, mapping to method key: java.<file_offset> = <field_key> Key 3, method description <field_key>.info = [<access str>, <class_name>, <name>, <signature>] key 4, method meta <field_key>.meta = [<file_offset>, ?] */ RListIter *iter = NULL, *iter_tmp = NULL; RBinJavaField *fm_type; ut32 key_size = 255, value_buffer_size = 1024, class_name_inheap = 1; char *field_key = NULL, *field_key_value = NULL, *value_buffer = NULL; char *class_name = r_bin_java_get_this_class_name (bin); if (!class_name) { class_name = "unknown"; class_name_inheap = 0; } key_size += strlen (class_name); value_buffer_size += strlen (class_name); field_key = malloc (key_size); value_buffer = malloc (value_buffer_size); field_key_value = malloc (key_size); snprintf (field_key, key_size, "%s.methods", class_name); field_key[key_size - 1] = 0; r_list_foreach_safe (bin->fields_list, iter, iter_tmp, fm_type) { char number_buffer[80]; ut64 file_offset = fm_type->file_offset + bin->loadaddr; snprintf (number_buffer, sizeof (number_buffer), "0x%04"PFMT64x, file_offset); IFDBG eprintf ("Inserting: []%s = %s\n", field_key, number_buffer); sdb_array_push (bin->kv, field_key, number_buffer, 0); } r_list_foreach_safe (bin->fields_list, iter, iter_tmp, fm_type) { ut64 field_offset = fm_type->file_offset + bin->loadaddr; // generate method specific key & value snprintf (field_key, key_size, "%s.0x%04"PFMT64x, class_name, field_offset); field_key[key_size - 1] = 0; snprintf (field_key_value, key_size, "%s.0x%04"PFMT64x ".field", class_name, field_offset); field_key_value[key_size - 1] = 0; sdb_set (bin->kv, field_key, field_key_value, 0); IFDBG eprintf ("Inserting: %s = %s\n", field_key, field_key_value); // generate info key, and place values in method info array snprintf (field_key, key_size, "%s.info", field_key_value); field_key[key_size - 1] = 0; snprintf (value_buffer, value_buffer_size, "%s", fm_type->flags_str); value_buffer[value_buffer_size - 1] = 0; sdb_array_push (bin->kv, field_key, value_buffer, 0); IFDBG eprintf ("Inserting: []%s = %s\n", field_key, value_buffer); snprintf (value_buffer, value_buffer_size, "%s", fm_type->class_name); value_buffer[value_buffer_size - 1] = 0; sdb_array_push (bin->kv, field_key, value_buffer, 0); IFDBG eprintf ("Inserting: []%s = %s\n", field_key, value_buffer); snprintf (value_buffer, value_buffer_size, "%s", fm_type->name); value_buffer[value_buffer_size - 1] = 0; sdb_array_push (bin->kv, field_key, value_buffer, 0); IFDBG eprintf ("Inserting: []%s = %s\n", field_key, value_buffer); snprintf (value_buffer, value_buffer_size, "%s", fm_type->descriptor); value_buffer[value_buffer_size - 1] = 0; sdb_array_push (bin->kv, field_key, value_buffer, 0); IFDBG eprintf ("Inserting: []%s = %s\n", field_key, value_buffer); } free (field_key); free (field_key_value); free (value_buffer); if (class_name_inheap) { free (class_name); } } R_API void U(add_method_infos_to_sdb)(RBinJavaObj * bin) { /* *** Experimental and May Change *** Add Mehtod information to an Array the key for this info variable depenedent on addr, method ordinal, etc. Key 1, mapping to method key: java.<file_offset> = <method_key> Key 2, basic code information <method_key>.code = [<addr>, <size>] Key 3, method description <method_key>.info = [<access str>, <class_name>, <name>, <signature>,] key 4, method meta <method_key>.meta = [<file_offset>, ?] // TODO in key 3 add <class_name>? e.g. <access str>.<name>.<signature> Note: method name not used because of collisions with operator overloading also take note that code offset and the method offset are not the same values. */ RListIter *iter = NULL, *iter_tmp = NULL; RBinJavaField *fm_type; ut32 key_size = 255, value_buffer_size = 1024, class_name_inheap = 1; char *method_key = NULL, *method_key_value = NULL, *value_buffer = NULL; char *class_name = r_bin_java_get_this_class_name (bin); ut64 baddr = bin->loadaddr; if (!class_name) { class_name = "unknown"; class_name_inheap = 0; } key_size += strlen (class_name); value_buffer_size += strlen (class_name); method_key = malloc (key_size); value_buffer = malloc (value_buffer_size); method_key_value = malloc (key_size); snprintf (method_key, key_size, "%s.methods", class_name); method_key[key_size - 1] = 0; r_list_foreach_safe (bin->methods_list, iter, iter_tmp, fm_type) { char number_buffer[80]; ut64 file_offset = fm_type->file_offset + baddr; snprintf (number_buffer, sizeof (number_buffer), "0x%04"PFMT64x, file_offset); sdb_array_push (bin->kv, method_key, number_buffer, 0); } r_list_foreach_safe (bin->methods_list, iter, iter_tmp, fm_type) { ut64 code_offset = r_bin_java_get_method_code_offset (fm_type) + baddr, code_size = r_bin_java_get_method_code_size (fm_type), method_offset = fm_type->file_offset + baddr; // generate method specific key & value snprintf (method_key, key_size, "%s.0x%04"PFMT64x, class_name, code_offset); method_key[key_size - 1] = 0; snprintf (method_key_value, key_size, "%s.0x%04"PFMT64x ".method", class_name, method_offset); method_key_value[key_size - 1] = 0; IFDBG eprintf ("Adding %s to sdb_array: %s\n", method_key_value, method_key); sdb_set (bin->kv, method_key, method_key_value, 0); // generate code key and values snprintf (method_key, key_size, "%s.code", method_key_value); method_key[key_size - 1] = 0; snprintf (value_buffer, value_buffer_size, "0x%04"PFMT64x, code_offset); value_buffer[value_buffer_size - 1] = 0; sdb_array_push (bin->kv, method_key, value_buffer, 0); snprintf (value_buffer, value_buffer_size, "0x%04"PFMT64x, code_size); value_buffer[value_buffer_size - 1] = 0; sdb_array_push (bin->kv, method_key, value_buffer, 0); // generate info key, and place values in method info array snprintf (method_key, key_size, "%s.info", method_key_value); method_key[key_size - 1] = 0; snprintf (value_buffer, value_buffer_size, "%s", fm_type->flags_str); value_buffer[value_buffer_size - 1] = 0; IFDBG eprintf ("Adding %s to sdb_array: %s\n", value_buffer, method_key); sdb_array_push (bin->kv, method_key, value_buffer, 0); snprintf (value_buffer, value_buffer_size, "%s", fm_type->class_name); value_buffer[value_buffer_size - 1] = 0; IFDBG eprintf ("Adding %s to sdb_array: %s\n", value_buffer, method_key); sdb_array_push (bin->kv, method_key, value_buffer, 0); snprintf (value_buffer, value_buffer_size, "%s", fm_type->name); value_buffer[value_buffer_size - 1] = 0; IFDBG eprintf ("Adding %s to sdb_array: %s\n", value_buffer, method_key); sdb_array_push (bin->kv, method_key, value_buffer, 0); snprintf (value_buffer, value_buffer_size, "%s", fm_type->descriptor); value_buffer[value_buffer_size - 1] = 0; IFDBG eprintf ("Adding %s to sdb_array: %s\n", value_buffer, method_key); sdb_array_push (bin->kv, method_key, value_buffer, 0); } free (method_key); free (method_key_value); free (value_buffer); if (class_name_inheap) { free (class_name); } } R_API RList *U(r_bin_java_get_args_from_bin)(RBinJavaObj * bin_obj, ut64 addr) { RBinJavaField *fm_type = r_bin_java_get_method_code_attribute_with_addr (bin_obj, addr); return fm_type ? r_bin_java_get_args (fm_type) : NULL; } R_API RList *U(r_bin_java_get_ret_from_bin)(RBinJavaObj * bin_obj, ut64 addr) { RBinJavaField *fm_type = r_bin_java_get_method_code_attribute_with_addr (bin_obj, addr); return fm_type ? r_bin_java_get_ret (fm_type) : NULL; } R_API char *U(r_bin_java_get_fcn_name_from_bin)(RBinJavaObj * bin_obj, ut64 addr) { RBinJavaField *fm_type = r_bin_java_get_method_code_attribute_with_addr (bin_obj, addr); return fm_type && fm_type->name ? strdup (fm_type->name) : NULL; } R_API int U(r_bin_java_is_method_static)(RBinJavaObj * bin_obj, ut64 addr) { RBinJavaField *fm_type = r_bin_java_get_method_code_attribute_with_addr (bin_obj, addr); return fm_type && fm_type->flags & R_BIN_JAVA_METHOD_ACC_STATIC; } R_API int U(r_bin_java_is_method_private)(RBinJavaObj * bin_obj, ut64 addr) { return r_bin_java_is_fm_type_private (r_bin_java_get_method_code_attribute_with_addr (bin_obj, addr)); } R_API int U(r_bin_java_is_method_protected)(RBinJavaObj * bin_obj, ut64 addr) { return r_bin_java_is_fm_type_protected ( r_bin_java_get_method_code_attribute_with_addr (bin_obj, addr)); } R_API int r_bin_java_print_method_idx_summary(RBinJavaObj *bin_obj, ut32 idx) { int res = false; if (idx < r_list_length (bin_obj->methods_list)) { RBinJavaField *fm_type = r_list_get_n (bin_obj->methods_list, idx); r_bin_java_print_method_summary (fm_type); res = true; } return res; } R_API ut32 r_bin_java_get_method_count(RBinJavaObj *bin_obj) { return r_list_length (bin_obj->methods_list); } R_API RList *r_bin_java_get_interface_names(RBinJavaObj *bin) { RList *interfaces_names = r_list_new (); RListIter *iter; RBinJavaInterfaceInfo *ifobj; r_list_foreach (bin->interfaces_list, iter, ifobj) { if (ifobj && ifobj->name) { r_list_append (interfaces_names, strdup (ifobj->name)); } } return interfaces_names; } R_API ut64 r_bin_java_get_main(RBinJavaObj *bin) { if (bin->main_code_attr) { return bin->main_code_attr->info.code_attr.code_offset + bin->loadaddr; } return 0; } R_API RBinJavaObj *r_bin_java_new(const char *file, ut64 loadaddr, Sdb *kv) { RBinJavaObj *bin = R_NEW0 (RBinJavaObj); if (!bin) { return NULL; } bin->file = strdup (file); size_t sz; ut8 *buf = (ut8 *)r_file_slurp (file, &sz); bin->size = sz; if (!buf) { return r_bin_java_free (bin); } if (!r_bin_java_new_bin (bin, loadaddr, kv, buf, bin->size)) { r_bin_java_free (bin); bin = NULL; } free (buf); return bin; } R_API ut64 r_bin_java_get_class_entrypoint(RBinJavaObj *bin) { if (bin->cf2.this_class_entrypoint_code_attr) { return bin->cf2.this_class_entrypoint_code_attr->info.code_attr.code_offset; } return 0; } R_API RList *r_bin_java_get_method_exception_table_with_addr(RBinJavaObj *bin, ut64 addr) { RListIter *iter = NULL, *iter_tmp = NULL; RBinJavaField *fm_type, *res = NULL; if (!bin && R_BIN_JAVA_GLOBAL_BIN) { bin = R_BIN_JAVA_GLOBAL_BIN; } if (!bin) { eprintf ("Attempting to analyse function when the R_BIN_JAVA_GLOBAL_BIN has not been set.\n"); return NULL; } r_list_foreach_safe (bin->methods_list, iter, iter_tmp, fm_type) { ut64 offset = r_bin_java_get_method_code_offset (fm_type) + bin->loadaddr, size = r_bin_java_get_method_code_size (fm_type); if (addr >= offset && addr <= size + offset) { res = fm_type; } } if (res) { RBinJavaAttrInfo *code_attr = r_bin_java_get_method_code_attribute (res); return code_attr->info.code_attr.exception_table; } return NULL; } R_API const RList *r_bin_java_get_methods_list(RBinJavaObj *bin) { if (bin) { return bin->methods_list; } if (R_BIN_JAVA_GLOBAL_BIN) { return R_BIN_JAVA_GLOBAL_BIN->methods_list; } return NULL; } R_API RList *r_bin_java_get_bin_obj_list_thru_obj(RBinJavaObj *bin_obj) { RList *the_list; Sdb *sdb; if (!bin_obj) { return NULL; } sdb = bin_obj->AllJavaBinObjs; if (!sdb) { return NULL; } the_list = r_list_new (); if (!the_list) { return NULL; } sdb_foreach (sdb, sdb_iterate_build_list, (void *) the_list); return the_list; } R_API RList *r_bin_java_extract_all_bin_type_values(RBinJavaObj *bin_obj) { RListIter *fm_type_iter; RList *all_types = r_list_new (); RBinJavaField *fm_type; // get all field types r_list_foreach (bin_obj->fields_list, fm_type_iter, fm_type) { char *desc = NULL; if (!extract_type_value (fm_type->descriptor, &desc)) { return NULL; } IFDBG eprintf ("Adding field type: %s\n", desc); r_list_append (all_types, desc); } // get all method types r_list_foreach (bin_obj->methods_list, fm_type_iter, fm_type) { RList *the_list = r_bin_java_extract_type_values (fm_type->descriptor); RListIter *desc_iter; char *str; r_list_foreach (the_list, desc_iter, str) { if (str && *str != '(' && *str != ')') { r_list_append (all_types, strdup (str)); IFDBG eprintf ("Adding method type: %s\n", str); } } r_list_free (the_list); } return all_types; } R_API RList *r_bin_java_get_method_definitions(RBinJavaObj *bin) { RBinJavaField *fm_type = NULL; RList *the_list = r_list_new (); if (!the_list) { return NULL; } RListIter *iter = NULL; if (!bin) { return the_list; } r_list_foreach (bin->methods_list, iter, fm_type) { char *method_proto = r_bin_java_get_method_definition (fm_type); // eprintf ("Method prototype: %s\n", method_proto); r_list_append (the_list, method_proto); } return the_list; } R_API RList *r_bin_java_get_field_definitions(RBinJavaObj *bin) { RBinJavaField *fm_type = NULL; RList *the_list = r_list_new (); if (!the_list) { return NULL; } RListIter *iter = NULL; if (!bin) { return the_list; } r_list_foreach (bin->fields_list, iter, fm_type) { char *field_def = r_bin_java_get_field_definition (fm_type); // eprintf ("Field def: %s, %s, %s, %s\n", fm_type->name, fm_type->descriptor, fm_type->flags_str, field_def); r_list_append (the_list, field_def); } return the_list; } R_API RList *r_bin_java_get_import_definitions(RBinJavaObj *bin) { RList *the_list = r_bin_java_get_lib_names (bin); RListIter *iter = NULL; char *new_str; if (!bin || !the_list) { return the_list; } r_list_foreach (the_list, iter, new_str) { while (*new_str) { if (*new_str == '/') { *new_str = '.'; } new_str++; } } return the_list; } R_API RList *r_bin_java_get_field_offsets(RBinJavaObj *bin) { RBinJavaField *fm_type = NULL; RList *the_list = r_list_new (); if (!the_list) { return NULL; } RListIter *iter = NULL; ut64 *paddr = NULL; if (!bin) { return the_list; } the_list->free = free; r_list_foreach (bin->fields_list, iter, fm_type) { paddr = malloc (sizeof(ut64)); if (!paddr) { r_list_free (the_list); return NULL; } *paddr = fm_type->file_offset + bin->loadaddr; // eprintf ("Field def: %s, %s, %s, %s\n", fm_type->name, fm_type->descriptor, fm_type->flags_str, field_def); r_list_append (the_list, paddr); } return the_list; } R_API RList *r_bin_java_get_method_offsets(RBinJavaObj *bin) { RBinJavaField *fm_type = NULL; RList *the_list = r_list_new (); RListIter *iter = NULL; ut64 *paddr = NULL; if (!bin) { return the_list; } the_list->free = free; r_list_foreach (bin->methods_list, iter, fm_type) { paddr = R_NEW0 (ut64); *paddr = fm_type->file_offset + bin->loadaddr; r_list_append (the_list, paddr); } return the_list; } R_API ut16 r_bin_java_calculate_field_access_value(const char *access_flags_str) { return calculate_access_value (access_flags_str, FIELD_ACCESS_FLAGS); } R_API ut16 r_bin_java_calculate_class_access_value(const char *access_flags_str) { return calculate_access_value (access_flags_str, CLASS_ACCESS_FLAGS); } R_API ut16 r_bin_java_calculate_method_access_value(const char *access_flags_str) { return calculate_access_value (access_flags_str, METHOD_ACCESS_FLAGS); } R_API RList *retrieve_all_method_access_string_and_value(void) { return retrieve_all_access_string_and_value (METHOD_ACCESS_FLAGS); } R_API RList *retrieve_all_field_access_string_and_value(void) { return retrieve_all_access_string_and_value (FIELD_ACCESS_FLAGS); } R_API RList *retrieve_all_class_access_string_and_value(void) { return retrieve_all_access_string_and_value (CLASS_ACCESS_FLAGS); } R_API char *r_bin_java_resolve_with_space(RBinJavaObj *obj, int idx) { return r_bin_java_resolve (obj, idx, 1); } R_API char *r_bin_java_resolve_without_space(RBinJavaObj *obj, int idx) { return r_bin_java_resolve (obj, idx, 0); } R_API char *r_bin_java_resolve_b64_encode(RBinJavaObj *BIN_OBJ, ut16 idx) { RBinJavaCPTypeObj *item = NULL, *item2 = NULL; char *class_str = NULL, *name_str = NULL, *desc_str = NULL, *string_str = NULL, *empty = "", *cp_name = NULL, *str = NULL, *out = NULL; int memory_alloc = 0; if (BIN_OBJ && BIN_OBJ->cp_count < 1) { // r_bin_java_new_bin(BIN_OBJ); return NULL; } item = (RBinJavaCPTypeObj *) r_bin_java_get_item_from_bin_cp_list (BIN_OBJ, idx); if (item) { cp_name = ((RBinJavaCPTypeMetas *) item->metas->type_info)->name; IFDBG eprintf ("java_resolve Resolved: (%d) %s\n", idx, cp_name); } else { return NULL; } if (!strcmp (cp_name, "Class")) { item2 = (RBinJavaCPTypeObj *) r_bin_java_get_item_from_bin_cp_list (BIN_OBJ, idx); // str = r_bin_java_get_name_from_bin_cp_list (BIN_OBJ, idx-1); class_str = r_bin_java_get_item_name_from_bin_cp_list (BIN_OBJ, item); if (!class_str) { class_str = empty; } name_str = r_bin_java_get_item_name_from_bin_cp_list (BIN_OBJ, item2); if (!name_str) { name_str = empty; } desc_str = r_bin_java_get_item_desc_from_bin_cp_list (BIN_OBJ, item2); if (!desc_str) { desc_str = empty; } memory_alloc = strlen (class_str) + strlen (name_str) + strlen (desc_str) + 3; if (memory_alloc) { str = malloc (memory_alloc); if (str) { snprintf (str, memory_alloc, "%s%s", name_str, desc_str); out = r_base64_encode_dyn ((const char *) str, strlen (str)); free (str); str = out; } } if (class_str != empty) { free (class_str); } if (name_str != empty) { free (name_str); } if (desc_str != empty) { free (desc_str); } } else if (strcmp (cp_name, "MethodRef") == 0 || strcmp (cp_name, "FieldRef") == 0 || strcmp (cp_name, "InterfaceMethodRef") == 0) { /* * The MethodRef, FieldRef, and InterfaceMethodRef structures */ class_str = r_bin_java_get_name_from_bin_cp_list (BIN_OBJ, item->info.cp_method.class_idx); if (!class_str) { class_str = empty; } name_str = r_bin_java_get_item_name_from_bin_cp_list (BIN_OBJ, item); if (!name_str) { name_str = empty; } desc_str = r_bin_java_get_item_desc_from_bin_cp_list (BIN_OBJ, item); if (!desc_str) { desc_str = empty; } memory_alloc = strlen (class_str) + strlen (name_str) + strlen (desc_str) + 3; if (memory_alloc) { str = malloc (memory_alloc); if (str) { snprintf (str, memory_alloc, "%s/%s%s", class_str, name_str, desc_str); out = r_base64_encode_dyn ((const char *) str, strlen (str)); free (str); str = out; } } if (class_str != empty) { free (class_str); } if (name_str != empty) { free (name_str); } if (desc_str != empty) { free (desc_str); } } else if (strcmp (cp_name, "String") == 0) { ut32 length = r_bin_java_get_utf8_len_from_bin_cp_list (BIN_OBJ, item->info.cp_string.string_idx); string_str = r_bin_java_get_utf8_from_bin_cp_list (BIN_OBJ, item->info.cp_string.string_idx); str = NULL; IFDBG eprintf ("java_resolve String got: (%d) %s\n", item->info.cp_string.string_idx, string_str); if (!string_str) { string_str = empty; length = strlen (empty); } memory_alloc = length + 3; if (memory_alloc) { str = malloc (memory_alloc); if (str) { snprintf (str, memory_alloc, "\"%s\"", string_str); out = r_base64_encode_dyn ((const char *) str, strlen (str)); free (str); str = out; } } IFDBG eprintf ("java_resolve String return: %s\n", str); if (string_str != empty) { free (string_str); } } else if (strcmp (cp_name, "Utf8") == 0) { ut64 sz = item->info.cp_utf8.length ? item->info.cp_utf8.length + 10 : 10; str = malloc (sz); memset (str, 0, sz); if (sz > 10) { r_base64_encode (str, item->info.cp_utf8.bytes, item->info.cp_utf8.length); } } else if (strcmp (cp_name, "Long") == 0) { str = malloc (34); if (str) { snprintf (str, 34, "0x%"PFMT64x, r_bin_java_raw_to_long (item->info.cp_long.bytes.raw, 0)); out = r_base64_encode_dyn ((const char *) str, strlen (str)); free (str); str = out; } } else if (strcmp (cp_name, "Double") == 0) { str = malloc (1000); if (str) { snprintf (str, 1000, "%f", r_bin_java_raw_to_double (item->info.cp_double.bytes.raw, 0)); out = r_base64_encode_dyn ((const char *) str, strlen (str)); free (str); str = out; } } else if (strcmp (cp_name, "Integer") == 0) { str = calloc (34, 1); if (str) { snprintf (str, 34, "0x%08x", R_BIN_JAVA_UINT (item->info.cp_integer.bytes.raw, 0)); out = r_base64_encode_dyn ((const char *) str, strlen (str)); free (str); str = out; } } else if (strcmp (cp_name, "Float") == 0) { str = malloc (34); if (str) { snprintf (str, 34, "%f", R_BIN_JAVA_FLOAT (item->info.cp_float.bytes.raw, 0)); out = r_base64_encode_dyn ((const char *) str, strlen (str)); free (str); str = out; } } else if (!strcmp (cp_name, "NameAndType")) { name_str = r_bin_java_get_item_name_from_bin_cp_list (BIN_OBJ, item); if (!name_str) { name_str = empty; } desc_str = r_bin_java_get_item_desc_from_bin_cp_list (BIN_OBJ, item); if (!desc_str) { desc_str = empty; } memory_alloc = strlen (name_str) + strlen (desc_str) + 3; if (memory_alloc) { str = malloc (memory_alloc); if (str) { snprintf (str, memory_alloc, "%s %s", name_str, desc_str); out = r_base64_encode_dyn ((const char *) str, strlen (str)); free (str); str = out; } } if (name_str != empty) { free (name_str); } if (desc_str != empty) { free (desc_str); } } else { str = r_base64_encode_dyn ((const char *) "(null)", 6); } return str; } R_API ut64 r_bin_java_resolve_cp_idx_address(RBinJavaObj *BIN_OBJ, int idx) { RBinJavaCPTypeObj *item = NULL; ut64 addr = -1; if (BIN_OBJ && BIN_OBJ->cp_count < 1) { return -1; } item = (RBinJavaCPTypeObj *) r_bin_java_get_item_from_bin_cp_list (BIN_OBJ, idx); if (item) { addr = item->file_offset + item->loadaddr; } return addr; } R_API char *r_bin_java_resolve_cp_idx_to_string(RBinJavaObj *BIN_OBJ, int idx) { RBinJavaCPTypeObj *item = NULL; char *value = NULL; if (BIN_OBJ && BIN_OBJ->cp_count < 1) { return NULL; } item = (RBinJavaCPTypeObj *) r_bin_java_get_item_from_bin_cp_list (BIN_OBJ, idx); if (item) { value = ((RBinJavaCPTypeMetas *) item->metas->type_info)-> allocs->stringify_obj (item); } return value; } R_API int r_bin_java_resolve_cp_idx_print_summary(RBinJavaObj *BIN_OBJ, int idx) { RBinJavaCPTypeObj *item = NULL; if (BIN_OBJ && BIN_OBJ->cp_count < 1) { return false; } item = (RBinJavaCPTypeObj *) r_bin_java_get_item_from_bin_cp_list (BIN_OBJ, idx); if (item) { ((RBinJavaCPTypeMetas *) item->metas->type_info)-> allocs->print_summary (item); } else { eprintf ("Error: Invalid CP Object.\n"); } return item ? true : false; } R_API ConstJavaValue *U(r_bin_java_resolve_to_const_value)(RBinJavaObj * BIN_OBJ, int idx) { // TODO XXX FIXME add a size parameter to the str when it is passed in RBinJavaCPTypeObj *item = NULL, *item2 = NULL; ConstJavaValue *result = R_NEW0 (ConstJavaValue); if (!result) { return NULL; } char *class_str = NULL, *name_str = NULL, *desc_str = NULL, *string_str = NULL, *empty = "", *cp_name = NULL; result->type = "unknown"; if (BIN_OBJ && BIN_OBJ->cp_count < 1) { // r_bin_java_new_bin(BIN_OBJ); return result; } item = (RBinJavaCPTypeObj *) r_bin_java_get_item_from_bin_cp_list (BIN_OBJ, idx); if (!item) { return result; } cp_name = ((RBinJavaCPTypeMetas *) item->metas->type_info)->name; IFDBG eprintf ("java_resolve Resolved: (%d) %s\n", idx, cp_name); if (strcmp (cp_name, "Class") == 0) { item2 = (RBinJavaCPTypeObj *) r_bin_java_get_item_from_bin_cp_list (BIN_OBJ, idx); // str = r_bin_java_get_name_from_bin_cp_list (BIN_OBJ, idx-1); class_str = r_bin_java_get_item_name_from_bin_cp_list (BIN_OBJ, item); if (!class_str) { class_str = empty; } name_str = r_bin_java_get_item_name_from_bin_cp_list (BIN_OBJ, item2); if (!name_str) { name_str = empty; } desc_str = r_bin_java_get_item_desc_from_bin_cp_list (BIN_OBJ, item2); if (!desc_str) { desc_str = empty; } result->value._ref = R_NEW0 (_JavaRef); result->type = "ref"; result->value._ref->class_name = strdup (class_str); result->value._ref->name = strdup (name_str); result->value._ref->desc = strdup (desc_str); if (class_str != empty) { free (class_str); } if (name_str != empty) { free (name_str); } if (desc_str != empty) { free (desc_str); } } else if (strcmp (cp_name, "MethodRef") == 0 || strcmp (cp_name, "FieldRef") == 0 || strcmp (cp_name, "InterfaceMethodRef") == 0) { /* * The MethodRef, FieldRef, and InterfaceMethodRef structures */ class_str = r_bin_java_get_name_from_bin_cp_list (BIN_OBJ, item->info.cp_method.class_idx); if (!class_str) { class_str = empty; } name_str = r_bin_java_get_item_name_from_bin_cp_list (BIN_OBJ, item); if (!name_str) { name_str = empty; } desc_str = r_bin_java_get_item_desc_from_bin_cp_list (BIN_OBJ, item); if (!desc_str) { desc_str = empty; } result->value._ref = R_NEW0 (_JavaRef); result->type = "ref"; result->value._ref->class_name = strdup (class_str); result->value._ref->name = strdup (name_str); result->value._ref->desc = strdup (desc_str); if (class_str != empty) { free (class_str); } if (name_str != empty) { free (name_str); } if (desc_str != empty) { free (desc_str); } } else if (strcmp (cp_name, "String") == 0) { ut32 length = r_bin_java_get_utf8_len_from_bin_cp_list (BIN_OBJ, item->info.cp_string.string_idx); string_str = r_bin_java_get_utf8_from_bin_cp_list (BIN_OBJ, item->info.cp_string.string_idx); IFDBG eprintf ("java_resolve String got: (%d) %s\n", item->info.cp_string.string_idx, string_str); if (!string_str) { string_str = empty; length = strlen (empty); } result->type = "str"; result->value._str = R_NEW0 (struct java_const_value_str_t); result->value._str->len = length; if (length > 0) { result->value._str->str = r_str_ndup (string_str, length); } else { result->value._str->str = strdup (""); } if (string_str != empty) { free (string_str); } } else if (strcmp (cp_name, "Utf8") == 0) { result->type = "str"; result->value._str = R_NEW0 (struct java_const_value_str_t); result->value._str->str = malloc (item->info.cp_utf8.length); result->value._str->len = item->info.cp_utf8.length; memcpy (result->value._str->str, item->info.cp_utf8.bytes, item->info.cp_utf8.length); } else if (strcmp (cp_name, "Long") == 0) { result->type = "long"; result->value._long = r_bin_java_raw_to_long (item->info.cp_long.bytes.raw, 0); } else if (strcmp (cp_name, "Double") == 0) { result->type = "double"; result->value._double = r_bin_java_raw_to_double (item->info.cp_double.bytes.raw, 0); } else if (strcmp (cp_name, "Integer") == 0) { result->type = "int"; result->value._int = R_BIN_JAVA_UINT (item->info.cp_integer.bytes.raw, 0); } else if (strcmp (cp_name, "Float") == 0) { result->type = "float"; result->value._float = R_BIN_JAVA_FLOAT (item->info.cp_float.bytes.raw, 0); } else if (strcmp (cp_name, "NameAndType") == 0) { result->value._ref = R_NEW0 (struct java_const_value_ref_t); result->type = "ref"; name_str = r_bin_java_get_item_name_from_bin_cp_list (BIN_OBJ, item); if (!name_str) { name_str = empty; } desc_str = r_bin_java_get_item_desc_from_bin_cp_list (BIN_OBJ, item); if (!desc_str) { desc_str = empty; } result->value._ref->class_name = strdup (empty); result->value._ref->name = strdup (name_str); result->value._ref->desc = strdup (desc_str); if (name_str != empty) { free (name_str); } if (desc_str != empty) { free (desc_str); } result->value._ref->is_method = r_bin_java_does_cp_idx_ref_method (BIN_OBJ, idx); result->value._ref->is_field = r_bin_java_does_cp_idx_ref_field (BIN_OBJ, idx); } return result; } R_API void U(r_bin_java_free_const_value)(ConstJavaValue * cp_value) { char first_char = cp_value && cp_value->type ? *cp_value->type : 0, second_char = cp_value && cp_value->type ? *(cp_value->type + 1) : 0; switch (first_char) { case 'r': if (cp_value && cp_value->value._ref) { free (cp_value->value._ref->class_name); free (cp_value->value._ref->name); free (cp_value->value._ref->desc); } break; case 's': if (second_char == 't' && cp_value->value._str) { free (cp_value->value._str->str); } break; } free (cp_value); } R_API char *r_bin_java_get_field_name(RBinJavaObj *bin_obj, ut32 idx) { char *name = NULL; if (idx < r_list_length (bin_obj->fields_list)) { RBinJavaField *fm_type = r_list_get_n (bin_obj->fields_list, idx); name = strdup (fm_type->name); } return name; } R_API int r_bin_java_print_field_idx_summary(RBinJavaObj *bin_obj, ut32 idx) { int res = false; if (idx < r_list_length (bin_obj->fields_list)) { RBinJavaField *fm_type = r_list_get_n (bin_obj->fields_list, idx); r_bin_java_print_field_summary (fm_type); res = true; } return res; } R_API ut32 r_bin_java_get_field_count(RBinJavaObj *bin_obj) { return r_list_length (bin_obj->fields_list); } R_API RList *r_bin_java_get_field_num_name(RBinJavaObj *bin_obj) { ut32 i = 0; RBinJavaField *fm_type; RListIter *iter = NULL; RList *res = r_list_newf (free); r_list_foreach (bin_obj->fields_list, iter, fm_type) { ut32 len = strlen (fm_type->name) + 30; char *str = malloc (len); if (!str) { r_list_free (res); return NULL; } snprintf (str, len, "%d %s", i, fm_type->name); ++i; r_list_append (res, str); } return res; } R_API RList *r_bin_java_find_cp_const_by_val_utf8(RBinJavaObj *bin_obj, const ut8 *bytes, ut32 len) { RList *res = r_list_newf (free); ut32 *v = NULL; RListIter *iter; RBinJavaCPTypeObj *cp_obj; IFDBG eprintf ("In UTF-8 Looking for %s\n", bytes); r_list_foreach (bin_obj->cp_list, iter, cp_obj) { if (cp_obj->tag == R_BIN_JAVA_CP_UTF8) { IFDBG eprintf ("In UTF-8 Looking @ %s\n", cp_obj->info.cp_utf8.bytes); IFDBG eprintf ("UTF-8 len = %d and memcmp = %d\n", cp_obj->info.cp_utf8.length, memcmp (bytes, cp_obj->info.cp_utf8.bytes, len)); if (len == cp_obj->info.cp_utf8.length && !memcmp (bytes, cp_obj->info.cp_utf8.bytes, len)) { v = malloc (sizeof (ut32)); if (!v) { r_list_free (res); return NULL; } *v = cp_obj->metas->ord; IFDBG eprintf ("Found a match adding idx: %d\n", *v); r_list_append (res, v); } } } return res; } R_API RList *r_bin_java_find_cp_const_by_val_int(RBinJavaObj *bin_obj, const ut8 *bytes, ut32 len) { RList *res = r_list_newf (free); ut32 *v = NULL; RListIter *iter; RBinJavaCPTypeObj *cp_obj; eprintf ("Looking for 0x%08x\n", (ut32) R_BIN_JAVA_UINT (bytes, 0)); r_list_foreach (bin_obj->cp_list, iter, cp_obj) { if (cp_obj->tag == R_BIN_JAVA_CP_INTEGER) { if (len == 4 && R_BIN_JAVA_UINT (bytes, 0) == R_BIN_JAVA_UINT (cp_obj->info.cp_integer.bytes.raw, 0)) { v = malloc (sizeof (ut32)); if (!v) { r_list_free (res); return NULL; } *v = cp_obj->idx; r_list_append (res, v); } } } return res; } R_API char r_bin_java_resolve_cp_idx_tag(RBinJavaObj *BIN_OBJ, int idx) { RBinJavaCPTypeObj *item = NULL; if (BIN_OBJ && BIN_OBJ->cp_count < 1) { // r_bin_java_new_bin(BIN_OBJ); return R_BIN_JAVA_CP_UNKNOWN; } item = (RBinJavaCPTypeObj *) r_bin_java_get_item_from_bin_cp_list (BIN_OBJ, idx); if (item) { return item->tag; } return R_BIN_JAVA_CP_UNKNOWN; } R_API int U(r_bin_java_integer_cp_set)(RBinJavaObj * bin, ut16 idx, ut32 val) { RBinJavaCPTypeObj *cp_obj = r_bin_java_get_item_from_bin_cp_list (bin, idx); if (!cp_obj) { return false; } ut8 bytes[4] = { 0 }; if (cp_obj->tag != R_BIN_JAVA_CP_INTEGER && cp_obj->tag != R_BIN_JAVA_CP_FLOAT) { eprintf ("Not supporting the overwrite of CP Objects with one of a different size.\n"); return false; } r_bin_java_check_reset_cp_obj (cp_obj, R_BIN_JAVA_CP_INTEGER); cp_obj->tag = R_BIN_JAVA_CP_INTEGER; memcpy (bytes, (const char *) &val, 4); val = R_BIN_JAVA_UINT (bytes, 0); memcpy (&cp_obj->info.cp_integer.bytes.raw, (const char *) &val, 4); return true; } R_API int U(r_bin_java_float_cp_set)(RBinJavaObj * bin, ut16 idx, float val) { RBinJavaCPTypeObj *cp_obj = r_bin_java_get_item_from_bin_cp_list (bin, idx); if (!cp_obj) { return false; } ut8 bytes[4] = { 0 }; if (cp_obj->tag != R_BIN_JAVA_CP_INTEGER && cp_obj->tag != R_BIN_JAVA_CP_FLOAT) { eprintf ("Not supporting the overwrite of CP Objects with one of a different size.\n"); return false; } r_bin_java_check_reset_cp_obj (cp_obj, R_BIN_JAVA_CP_FLOAT); cp_obj->tag = R_BIN_JAVA_CP_FLOAT; memcpy (bytes, (const char *) &val, 4); float *foo = (float*) bytes; val = *foo; //(float)R_BIN_JAVA_UINT (bytes, 0); memcpy (&cp_obj->info.cp_float.bytes.raw, (const char *) &val, 4); return true; } R_API int U(r_bin_java_long_cp_set)(RBinJavaObj * bin, ut16 idx, ut64 val) { RBinJavaCPTypeObj *cp_obj = r_bin_java_get_item_from_bin_cp_list (bin, idx); if (!cp_obj) { return false; } ut8 bytes[8] = { 0 }; if (cp_obj->tag != R_BIN_JAVA_CP_LONG && cp_obj->tag != R_BIN_JAVA_CP_DOUBLE) { eprintf ("Not supporting the overwrite of CP Objects with one of a different size.\n"); return false; } r_bin_java_check_reset_cp_obj (cp_obj, R_BIN_JAVA_CP_LONG); cp_obj->tag = R_BIN_JAVA_CP_LONG; memcpy (bytes, (const char *) &val, 8); val = r_bin_java_raw_to_long (bytes, 0); memcpy (&cp_obj->info.cp_long.bytes.raw, (const char *) &val, 8); return true; } R_API int U(r_bin_java_double_cp_set)(RBinJavaObj * bin, ut16 idx, ut32 val) { RBinJavaCPTypeObj *cp_obj = r_bin_java_get_item_from_bin_cp_list (bin, idx); if (!cp_obj) { return false; } ut8 bytes[8] = { 0 }; if (cp_obj->tag != R_BIN_JAVA_CP_LONG && cp_obj->tag != R_BIN_JAVA_CP_DOUBLE) { eprintf ("Not supporting the overwrite of CP Objects with one of a different size.\n"); return false; } r_bin_java_check_reset_cp_obj (cp_obj, R_BIN_JAVA_CP_DOUBLE); cp_obj->tag = R_BIN_JAVA_CP_DOUBLE; ut64 val64 = val; memcpy (bytes, (const char *) &val64, 8); val64 = r_bin_java_raw_to_long (bytes, 0); memcpy (&cp_obj->info.cp_double.bytes.raw, (const char *) &val64, 8); return true; } R_API int U(r_bin_java_utf8_cp_set)(RBinJavaObj * bin, ut16 idx, const ut8 * buffer, ut32 len) { RBinJavaCPTypeObj *cp_obj = r_bin_java_get_item_from_bin_cp_list (bin, idx); if (!cp_obj) { return false; } eprintf ("Writing %d byte(s) (%s)\n", len, buffer); // r_bin_java_check_reset_cp_obj(cp_obj, R_BIN_JAVA_CP_INTEGER); if (cp_obj->tag != R_BIN_JAVA_CP_UTF8) { eprintf ("Not supporting the overwrite of CP Objects with one of a different size.\n"); return false; } if (cp_obj->info.cp_utf8.length != len) { eprintf ("Not supporting the resize, rewriting utf8 string up to %d byte(s).\n", cp_obj->info.cp_utf8.length); if (cp_obj->info.cp_utf8.length > len) { eprintf ("Remaining %d byte(s) will be filled with \\x00.\n", cp_obj->info.cp_utf8.length - len); } } memcpy (cp_obj->info.cp_utf8.bytes, buffer, cp_obj->info.cp_utf8.length); if (cp_obj->info.cp_utf8.length > len) { memset (cp_obj->info.cp_utf8.bytes + len, 0, cp_obj->info.cp_utf8.length - len); } return true; } R_API ut8 *r_bin_java_cp_get_bytes(ut8 tag, ut32 *out_sz, const ut8 *buf, const ut64 len) { if (!out_sz) { return NULL; } if (out_sz) { *out_sz = 0; } switch (tag) { case R_BIN_JAVA_CP_INTEGER: case R_BIN_JAVA_CP_FLOAT: return r_bin_java_cp_get_4bytes (tag, out_sz, buf, len); case R_BIN_JAVA_CP_LONG: case R_BIN_JAVA_CP_DOUBLE: return r_bin_java_cp_get_8bytes (tag, out_sz, buf, len); case R_BIN_JAVA_CP_UTF8: return r_bin_java_cp_get_utf8 (tag, out_sz, buf, len); } return NULL; } R_API ut32 r_bin_java_cp_get_size(RBinJavaObj *bin, ut16 idx) { RBinJavaCPTypeObj *cp_obj = r_bin_java_get_item_from_bin_cp_list (bin, idx); switch (cp_obj->tag) { case R_BIN_JAVA_CP_INTEGER: case R_BIN_JAVA_CP_FLOAT: return 1 + 4; case R_BIN_JAVA_CP_LONG: case R_BIN_JAVA_CP_DOUBLE: return 1 + 8; case R_BIN_JAVA_CP_UTF8: return 1 + 2 + cp_obj->info.cp_utf8.length; } return 0; } R_API ut64 r_bin_java_get_method_start(RBinJavaObj *bin, RBinJavaField *fm_type) { return r_bin_java_get_method_code_offset (fm_type) + bin->loadaddr; } R_API ut64 r_bin_java_get_method_end(RBinJavaObj *bin, RBinJavaField *fm_type) { return r_bin_java_get_method_code_offset (fm_type) + bin->loadaddr + +r_bin_java_get_method_code_size (fm_type); } R_API ut8 *U(r_bin_java_cp_append_method_ref)(RBinJavaObj * bin, ut32 * out_sz, ut16 cn_idx, ut16 fn_idx, ut16 ft_idx) { return r_bin_java_cp_get_fref_bytes (bin, out_sz, R_BIN_JAVA_CP_METHODREF, cn_idx, fn_idx, ft_idx); } R_API ut8 *U(r_bin_java_cp_append_field_ref)(RBinJavaObj * bin, ut32 * out_sz, ut16 cn_idx, ut16 fn_idx, ut16 ft_idx) { return r_bin_java_cp_get_fref_bytes (bin, out_sz, R_BIN_JAVA_CP_FIELDREF, cn_idx, fn_idx, ft_idx); } R_API char *r_bin_java_unmangle_without_flags(const char *name, const char *descriptor) { return r_bin_java_unmangle (NULL, name, descriptor); } R_API void U(r_bin_java_print_stack_map_append_frame_summary)(RBinJavaStackMapFrame * obj) { RListIter *iter, *iter_tmp; RList *ptrList; RBinJavaVerificationObj *ver_obj; printf ("Stack Map Frame Information\n"); printf (" Tag Value = 0x%02x Name: %s\n", obj->tag, ((RBinJavaStackMapFrameMetas *) obj->metas->type_info)->name); printf (" Offset: 0x%08"PFMT64x "\n", obj->file_offset); printf (" Local Variable Count = 0x%04x\n", obj->number_of_locals); printf (" Local Variables:\n"); ptrList = obj->local_items; r_list_foreach_safe (ptrList, iter, iter_tmp, ver_obj) { r_bin_java_print_verification_info_summary (ver_obj); } printf (" Stack Items Count = 0x%04x\n", obj->number_of_stack_items); printf (" Stack Items:\n"); ptrList = obj->stack_items; r_list_foreach_safe (ptrList, iter, iter_tmp, ver_obj) { r_bin_java_print_verification_info_summary (ver_obj); } } R_API void U(r_bin_java_stack_frame_default_free)(void *s) { RBinJavaStackMapFrame *stack_frame = s; if (stack_frame) { free (stack_frame->metas); free (stack_frame); } } // R_API void U(r_bin_java_stack_frame_do_nothing_free)(void /*RBinJavaStackMapFrame*/ *stack_frame) {} // R_API void U(r_bin_java_stack_frame_do_nothing_new)(RBinJavaObj * bin, RBinJavaStackMapFrame * stack_frame, ut64 offset) {} R_API RBinJavaCPTypeMetas *U(r_bin_java_get_cp_meta_from_tag)(ut8 tag) { ut16 i = 0; // set default to unknown. RBinJavaCPTypeMetas *res = &R_BIN_JAVA_CP_METAS[2]; for (i = 0; i < R_BIN_JAVA_CP_METAS_SZ; i++) { if (tag == R_BIN_JAVA_CP_METAS[i].tag) { res = &R_BIN_JAVA_CP_METAS[i]; break; } } return res; } R_API ut8 *U(r_bin_java_cp_append_ref_cname_fname_ftype)(RBinJavaObj * bin, ut32 * out_sz, ut8 tag, const char *cname, const ut32 c_len, const char *fname, const ut32 f_len, const char *tname, const ut32 t_len) { ut32 cn_len = 0, fn_len = 0, ft_len = 0, total_len; ut16 cn_idx = 0, fn_idx = 0, ft_idx = 0; ut8 *bytes = NULL, *cn_bytes = NULL, *fn_bytes = NULL, *ft_bytes = NULL, *cref_bytes = NULL, *fref_bytes = NULL, *fnt_bytes = NULL; *out_sz = 0; cn_bytes = r_bin_java_cp_get_utf8 (R_BIN_JAVA_CP_UTF8, &cn_len, (const ut8 *) cname, c_len); cn_idx = bin->cp_idx + 1; if (cn_bytes) { fn_bytes = r_bin_java_cp_get_utf8 (R_BIN_JAVA_CP_UTF8, &fn_len, (const ut8 *) fname, f_len); fn_idx = bin->cp_idx + 2; } if (fn_bytes) { ft_bytes = r_bin_java_cp_get_utf8 (R_BIN_JAVA_CP_UTF8, &ft_len, (const ut8 *) tname, t_len); ft_idx = bin->cp_idx + 3; } if (cn_bytes && fn_bytes && ft_bytes) { ut32 cref_len = 0, fnt_len = 0, fref_len = 0; ut32 cref_idx = 0, fnt_idx = 0; cref_bytes = r_bin_java_cp_get_classref (bin, &cref_len, NULL, 0, cn_idx); cref_idx = bin->cp_idx + 3; fnt_bytes = r_bin_java_cp_get_name_type (bin, &fnt_len, fn_idx, ft_idx); fnt_idx = bin->cp_idx + 4; fref_bytes = r_bin_java_cp_get_2_ut16 (bin, &fref_len, tag, cref_idx, fnt_idx); if (cref_bytes && fref_bytes && fnt_bytes) { total_len = cn_len + fn_len + ft_len + cref_len + fnt_len + fref_len + 2; if (total_len < cn_len) { goto beach; } bytes = calloc (1, total_len); // class name bytes if (*out_sz + cn_len >= total_len) { goto beach; } memcpy (bytes, cn_bytes + *out_sz, cn_len); *out_sz += cn_len; // field name bytes if (*out_sz + fn_len >= total_len) { goto beach; } memcpy (bytes, fn_bytes + *out_sz, fn_len); *out_sz += fn_len; // field type bytes if (*out_sz + ft_len >= total_len) { goto beach; } memcpy (bytes, ft_bytes + *out_sz, ft_len); *out_sz += ft_len; // class ref bytes if (*out_sz + cref_len >= total_len) { goto beach; } memcpy (bytes, cref_bytes + *out_sz, cref_len); *out_sz += fn_len; // field name and type bytes if (*out_sz + fnt_len >= total_len) { goto beach; } memcpy (bytes, fnt_bytes + *out_sz, fnt_len); *out_sz += fnt_len; // field ref bytes if (*out_sz + fref_len >= total_len) { goto beach; } memcpy (bytes, fref_bytes + *out_sz, fref_len); *out_sz += fref_len; } } beach: free (cn_bytes); free (ft_bytes); free (fn_bytes); free (fnt_bytes); free (fref_bytes); free (cref_bytes); return bytes; } R_API ut8 *U(r_bin_java_cp_get_method_ref)(RBinJavaObj * bin, ut32 * out_sz, ut16 class_idx, ut16 name_and_type_idx) { return r_bin_java_cp_get_fm_ref (bin, out_sz, R_BIN_JAVA_CP_METHODREF, class_idx, name_and_type_idx); } R_API ut8 *U(r_bin_java_cp_get_field_ref)(RBinJavaObj * bin, ut32 * out_sz, ut16 class_idx, ut16 name_and_type_idx) { return r_bin_java_cp_get_fm_ref (bin, out_sz, R_BIN_JAVA_CP_FIELDREF, class_idx, name_and_type_idx); } R_API void U(deinit_java_type_null)(void) { free (R_BIN_JAVA_NULL_TYPE.metas); } R_API RBinJavaCPTypeObj *r_bin_java_get_item_from_cp(RBinJavaObj *bin, int i) { if (i < 1 || i > bin->cf.cp_count) { return &R_BIN_JAVA_NULL_TYPE; } RBinJavaCPTypeObj *obj = (RBinJavaCPTypeObj *) r_list_get_n (bin->cp_list, i); return obj ? obj : &R_BIN_JAVA_NULL_TYPE; } R_API void U(copy_type_info_to_stack_frame_list)(RList * type_list, RList * sf_list) { RListIter *iter, *iter_tmp; RBinJavaVerificationObj *ver_obj, *new_ver_obj; if (!type_list || !sf_list) { return; } r_list_foreach_safe (type_list, iter, iter_tmp, ver_obj) { new_ver_obj = (RBinJavaVerificationObj *) malloc (sizeof (RBinJavaVerificationObj)); // FIXME: how to handle failed memory allocation? if (new_ver_obj && ver_obj) { memcpy (new_ver_obj, ver_obj, sizeof (RBinJavaVerificationObj)); if (!r_list_append (sf_list, (void *) new_ver_obj)) { R_FREE (new_ver_obj); } } else { R_FREE (new_ver_obj); } } } R_API void U(copy_type_info_to_stack_frame_list_up_to_idx)(RList * type_list, RList * sf_list, ut64 idx) { RListIter *iter, *iter_tmp; RBinJavaVerificationObj *ver_obj, *new_ver_obj; ut32 pos = 0; if (!type_list || !sf_list) { return; } r_list_foreach_safe (type_list, iter, iter_tmp, ver_obj) { new_ver_obj = (RBinJavaVerificationObj *) malloc (sizeof (RBinJavaVerificationObj)); // FIXME: how to handle failed memory allocation? if (new_ver_obj && ver_obj) { memcpy (new_ver_obj, ver_obj, sizeof (RBinJavaVerificationObj)); if (!r_list_append (sf_list, (void *) new_ver_obj)) { R_FREE (new_ver_obj); } } else { R_FREE (new_ver_obj); } pos++; if (pos == idx) { break; } } } R_API ut8 *r_bin_java_cp_get_idx_bytes(RBinJavaObj *bin, ut16 idx, ut32 *out_sz) { RBinJavaCPTypeObj *cp_obj = r_bin_java_get_item_from_bin_cp_list (bin, idx); if (!cp_obj || !out_sz) { return NULL; } if (out_sz) { *out_sz = 0; } switch (cp_obj->tag) { case R_BIN_JAVA_CP_INTEGER: case R_BIN_JAVA_CP_FLOAT: return r_bin_java_cp_get_4bytes (cp_obj->tag, out_sz, cp_obj->info.cp_integer.bytes.raw, 5); case R_BIN_JAVA_CP_LONG: case R_BIN_JAVA_CP_DOUBLE: return r_bin_java_cp_get_4bytes (cp_obj->tag, out_sz, cp_obj->info.cp_long.bytes.raw, 9); case R_BIN_JAVA_CP_UTF8: // eprintf ("Getting idx: %d = %p (3+0x%"PFMT64x")\n", idx, cp_obj, cp_obj->info.cp_utf8.length); if (cp_obj->info.cp_utf8.length > 0) { return r_bin_java_cp_get_utf8 (cp_obj->tag, out_sz, cp_obj->info.cp_utf8.bytes, cp_obj->info.cp_utf8.length); } } return NULL; } R_API int r_bin_java_valid_class(const ut8 *buf, ut64 buf_sz) { RBinJavaObj *bin = R_NEW0 (RBinJavaObj), *cur_bin = R_BIN_JAVA_GLOBAL_BIN; if (!bin) { return false; } int res = r_bin_java_load_bin (bin, buf, buf_sz); if (bin->calc_size == buf_sz) { res = true; } r_bin_java_free (bin); R_BIN_JAVA_GLOBAL_BIN = cur_bin; return res; } R_API ut64 r_bin_java_calc_class_size(ut8 *bytes, ut64 size) { RBinJavaObj *bin = R_NEW0 (RBinJavaObj); if (!bin) { return false; } RBinJavaObj *cur_bin = R_BIN_JAVA_GLOBAL_BIN; ut64 bin_size = UT64_MAX; if (bin) { if (r_bin_java_load_bin (bin, bytes, size)) { bin_size = bin->calc_size; } r_bin_java_free (bin); R_BIN_JAVA_GLOBAL_BIN = cur_bin; } return bin_size; } R_API int U(r_bin_java_get_cp_idx_with_name)(RBinJavaObj * bin_obj, const char *name, ut32 len) { RListIter *iter; RBinJavaCPTypeObj *obj; r_list_foreach (bin_obj->cp_list, iter, obj) { if (obj->tag == R_BIN_JAVA_CP_UTF8) { if (!strncmp (name, (const char *) obj->info.cp_utf8.bytes, len)) { return obj->metas->ord; } } } return 0; }
/* Apache 2.0 - Copyright 2007-2022 - pancake and dso class.c rewrite: Adam Pridgen <dso@rice.edu || adam.pridgen@thecoverofnight.com> */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <stdarg.h> #include <r_types.h> #include <r_util.h> #include <r_bin.h> #include <math.h> #include <sdb.h> #include "class.h" #include "dsojson.h" #ifdef IFDBG #undef IFDBG #endif #define DO_THE_DBG 0 #define IFDBG if (DO_THE_DBG) #define IFINT if (0) #define MAX_CPITEMS 8192 R_API char *U(r_bin_java_unmangle_method)(const char *flags, const char *name, const char *params, const char *r_value); R_API int r_bin_java_is_fm_type_private(RBinJavaField *fm_type); R_API int r_bin_java_is_fm_type_protected(RBinJavaField *fm_type); R_API ut32 U(r_bin_java_swap_uint)(ut32 x); // R_API const char * r_bin_java_get_this_class_name(RBinJavaObj *bin); R_API void U(add_cp_objs_to_sdb)(RBinJavaObj * bin); R_API void U(add_field_infos_to_sdb)(RBinJavaObj * bin); R_API void U(add_method_infos_to_sdb)(RBinJavaObj * bin); R_API RList *retrieve_all_access_string_and_value(RBinJavaAccessFlags *access_flags); R_API char *retrieve_access_string(ut16 flags, RBinJavaAccessFlags *access_flags); R_API ut16 calculate_access_value(const char *access_flags_str, RBinJavaAccessFlags *access_flags); R_API int r_bin_java_new_bin(RBinJavaObj *bin, ut64 loadaddr, Sdb *kv, const ut8 *buf, ut64 len); R_API int extract_type_value(const char *arg_str, char **output); R_API int r_bin_java_check_reset_cp_obj(RBinJavaCPTypeObj *cp_obj, ut8 tag); R_API ut8 *r_bin_java_cp_get_4bytes(ut8 tag, ut32 *out_sz, const ut8 *buf, const ut64 len); R_API ut8 *r_bin_java_cp_get_8bytes(ut8 tag, ut32 *out_sz, const ut8 *buf, const ut64 len); R_API ut8 *r_bin_java_cp_get_utf8(ut8 tag, ut32 *out_sz, const ut8 *buf, const ut64 len); R_API RBinJavaCPTypeObj *r_bin_java_get_item_from_bin_cp_list(RBinJavaObj *bin, ut64 idx); R_API RBinJavaCPTypeObj *r_bin_java_get_item_from_cp_item_list(RList *cp_list, ut64 idx); // Allocs for objects R_API RBinJavaCPTypeObj *r_bin_java_class_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 offset); R_API RBinJavaCPTypeObj *r_bin_java_fieldref_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 offset); R_API RBinJavaCPTypeObj *r_bin_java_methodref_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 offset); R_API RBinJavaCPTypeObj *r_bin_java_interfacemethodref_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 offset); R_API RBinJavaCPTypeObj *r_bin_java_name_and_type_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 offset); R_API RBinJavaCPTypeObj *r_bin_java_string_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 offset); R_API RBinJavaCPTypeObj *r_bin_java_integer_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 offset); R_API RBinJavaCPTypeObj *r_bin_java_float_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 offset); R_API RBinJavaCPTypeObj *r_bin_java_long_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 offset); R_API RBinJavaCPTypeObj *r_bin_java_double_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 offset); R_API RBinJavaCPTypeObj *r_bin_java_utf8_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 offset); R_API RBinJavaCPTypeObj *r_bin_java_do_nothing_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz); R_API RBinJavaCPTypeObj *r_bin_java_clone_cp_item(RBinJavaCPTypeObj *obj); R_API RBinJavaCPTypeObj *r_bin_java_clone_cp_idx(RBinJavaObj *bin, ut32 idx); R_API RBinJavaCPTypeObj *r_bin_java_methodhandle_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz); R_API RBinJavaCPTypeObj *r_bin_java_methodtype_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz); R_API RBinJavaCPTypeObj *r_bin_java_invokedynamic_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz); // Deallocs for type objects R_API void r_bin_java_default_free(void /*RBinJavaCPTypeObj*/ *obj); R_API void r_bin_java_obj_free(void /*RBinJavaCPTypeObj*/ *obj); R_API void r_bin_java_utf8_info_free(void /*RBinJavaCPTypeObj*/ *obj); R_API void r_bin_java_do_nothing_free(void /*RBinJavaCPTypeObj*/ *obj); R_API void r_bin_java_fmtype_free(void /*RBinJavaField*/ *fm_type); // handle freeing the lists // handle the reading of the various field R_API RBinJavaAttrInfo *r_bin_java_read_next_attr(RBinJavaObj *bin, const ut64 offset, const ut8 *buf, const ut64 len); R_API RBinJavaCPTypeObj *r_bin_java_read_next_constant_pool_item(RBinJavaObj *bin, const ut64 offset, const ut8 *buf, ut64 len); R_API RBinJavaAttrMetas *r_bin_java_get_attr_type_by_name(const char *name); R_API RBinJavaCPTypeObj *r_bin_java_get_java_null_cp(void); R_API ut64 r_bin_java_read_class_file2(RBinJavaObj *bin, const ut64 offset, const ut8 *buf, ut64 len); R_API RBinJavaAttrInfo *r_bin_java_get_attr_from_field(RBinJavaField *field, R_BIN_JAVA_ATTR_TYPE attr_type, ut32 pos); R_API RBinJavaField *r_bin_java_read_next_field(RBinJavaObj *bin, const ut64 offset, const ut8 *buffer, const ut64 len); R_API RBinJavaField *r_bin_java_read_next_method(RBinJavaObj *bin, const ut64 offset, const ut8 *buffer, const ut64 len); R_API void r_bin_java_print_utf8_cp_summary(RBinJavaCPTypeObj *obj); R_API void r_bin_java_print_name_and_type_cp_summary(RBinJavaCPTypeObj *obj); R_API void r_bin_java_print_double_cp_summary(RBinJavaCPTypeObj *obj); R_API void r_bin_java_print_long_cp_summary(RBinJavaCPTypeObj *obj); R_API void r_bin_java_print_float_cp_summary(RBinJavaCPTypeObj *obj); R_API void r_bin_java_print_integer_cp_summary(RBinJavaCPTypeObj *obj); R_API void r_bin_java_print_string_cp_summary(RBinJavaCPTypeObj *obj); R_API void r_bin_java_print_classref_cp_summary(RBinJavaCPTypeObj *obj); R_API void r_bin_java_print_fieldref_cp_summary(RBinJavaCPTypeObj *obj); R_API void r_bin_java_print_methodref_cp_summary(RBinJavaCPTypeObj *obj); R_API void r_bin_java_print_interfacemethodref_cp_summary(RBinJavaCPTypeObj *obj); R_API void r_bin_java_print_unknown_cp_summary(RBinJavaCPTypeObj *obj); R_API void r_bin_java_print_null_cp_summary(RBinJavaCPTypeObj *obj); R_API void r_bin_java_print_unknown_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_methodhandle_cp_summary(RBinJavaCPTypeObj *obj); R_API void r_bin_java_print_methodtype_cp_summary(RBinJavaCPTypeObj *obj); R_API void r_bin_java_print_invokedynamic_cp_summary(RBinJavaCPTypeObj *obj); R_API RBinJavaCPTypeObj *r_bin_java_unknown_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz); R_API RBinJavaInterfaceInfo *r_bin_java_interface_new(RBinJavaObj *bin, const ut8 *buf, ut64 sz); R_API RBinJavaInterfaceInfo *r_bin_java_read_next_interface_item(RBinJavaObj *bin, const ut64 offset, const ut8 *buf, ut64 len); R_API void r_bin_java_interface_free(void /*RBinJavaInterfaceInfo*/ *obj); R_API void r_bin_java_stack_frame_free(void /*RBinJavaStackMapFrame*/ *obj); R_API void r_bin_java_stack_map_table_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_verification_info_free(void /*RBinJavaVerificationObj*/ *obj); R_API void r_bin_java_print_stack_map_table_attr_summary(RBinJavaAttrInfo *obj); R_API void r_bin_java_print_stack_map_frame_summary(RBinJavaStackMapFrame *obj); R_API void r_bin_java_print_verification_info_summary(RBinJavaVerificationObj *obj); R_API RBinJavaStackMapFrame *r_bin_java_build_stack_frame_from_local_variable_table(RBinJavaObj *bin, RBinJavaAttrInfo *attr); R_API void U(r_bin_java_print_stack_map_append_frame_summary)(RBinJavaStackMapFrame * obj); R_API void U(r_bin_java_stack_frame_default_free)(void /*RBinJavaStackMapFrame*/ *stack_frame); // R_API void U(r_bin_java_stack_frame_do_nothing_free)(void /*RBinJavaStackMapFrame*/ *stack_frame); // R_API void U(r_bin_java_stack_frame_do_nothing_new)(RBinJavaObj * bin, RBinJavaStackMapFrame * stack_frame, ut64 offset); R_API RBinJavaStackMapFrame *r_bin_java_stack_map_frame_new(ut8 *buffer, ut64 sz, RBinJavaStackMapFrame *p_frame, ut64 buf_offset); // R_API RBinJavaStackMapFrame* r_bin_java_stack_map_frame_new (ut8* buffer, ut64 sz, ut64 buf_offset); R_API RBinJavaElementValue *r_bin_java_element_value_new(ut8 *buffer, ut64 sz, ut64 buf_offset); // R_API RBinJavaVerificationObj* r_bin_java_read_next_verification_info_new(ut8* buffer, ut64 sz, ut64 buf_offset); R_API RBinJavaAnnotation *r_bin_java_annotation_new(ut8 *buffer, ut64 sz, ut64 buf_offset); R_API RBinJavaElementValuePair *r_bin_java_element_pair_new(ut8 *buffer, ut64 sz, ut64 buf_offset); R_API RBinJavaElementValue *r_bin_java_element_value_new(ut8 *buffer, ut64 sz, ut64 buf_offset); // R_API RBinJavaBootStrapArgument* r_bin_java_bootstrap_method_argument_new(ut8* buffer, ut64 sz, ut64 buf_offset); R_API RBinJavaBootStrapMethod *r_bin_java_bootstrap_method_new(ut8 *buffer, ut64 sz, ut64 buf_offset); R_API RBinJavaAnnotationsArray *r_bin_java_annotation_array_new(ut8 *buffer, ut64 sz, ut64 buf_offset); R_API RBinJavaElementValueMetas *r_bin_java_get_ev_meta_from_tag(ut8 tag); R_API RBinJavaCPTypeMetas *U(r_bin_java_get_cp_meta_from_tag)(ut8 tag); R_API void r_bin_java_inner_classes_attr_entry_free(void /*RBinJavaClassesAttribute*/ *attr); R_API void r_bin_java_annotation_default_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_enclosing_methods_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_local_variable_type_table_attr_entry_free(void /*RBinJavaLocalVariableTypeAttribute*/ *lvattr); R_API void r_bin_java_local_variable_type_table_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_signature_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_source_debug_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_element_value_free(void /*RBinJavaElementValue*/ *element_value); R_API void r_bin_java_element_pair_free(void /*RBinJavaElementValuePair*/ *evp); R_API void r_bin_java_annotation_free(void /*RBinJavaAnnotation*/ *annotation); R_API void r_bin_java_rtv_annotations_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_rti_annotations_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_annotation_array_free(void /*RBinJavaAnnotationsArray*/ *annotation_array); R_API void r_bin_java_bootstrap_methods_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_bootstrap_method_free(void /*RBinJavaBootStrapMethod*/ *bsm); R_API void r_bin_java_bootstrap_method_argument_free(void /*RBinJavaBootStrapArgument*/ *bsm_arg); R_API void r_bin_java_rtvp_annotations_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_rtip_annotations_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_unknown_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_code_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_constant_value_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_deprecated_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_exceptions_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_inner_classes_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_line_number_table_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_local_variable_table_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_source_code_file_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_synthetic_attr_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_print_annotation_default_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_enclosing_methods_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_local_variable_type_attr_summary(RBinJavaLocalVariableTypeAttribute *lvattr); R_API void r_bin_java_print_local_variable_type_table_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_signature_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_source_debug_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_element_value_summary(RBinJavaElementValue *element_value); R_API void r_bin_java_print_annotation_summary(RBinJavaAnnotation *annotation); R_API void r_bin_java_print_element_pair_summary(RBinJavaElementValuePair *evp); R_API void r_bin_java_print_bootstrap_methods_attr_summary(RBinJavaAttrInfo *attr); // R_API void r_bin_java_bootstrap_method_summary(RBinJavaBootStrapMethod *bsm); // R_API void r_bin_java_bootstrap_method_argument_summary(RBinJavaBootStrapArgument *bsm_arg); R_API void r_bin_java_print_rtv_annotations_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_rti_annotations_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_annotation_array_summary(RBinJavaAnnotationsArray *annotation_array); R_API void r_bin_java_print_rtvp_annotations_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_rtip_annotations_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_attribute_free(void /*RBinJavaAttrInfo*/ *attr); R_API void r_bin_java_constant_pool(void /*RBinJavaCPTypeObj*/ *obj); R_API void r_bin_java_print_field_summary(RBinJavaField *field); // R_API void r_bin_java_print_interface_summary(RBinJavaField *field); R_API void r_bin_java_print_method_summary(RBinJavaField *field); R_API void r_bin_java_print_code_exceptions_attr_summary(RBinJavaExceptionEntry *exc_entry); R_API void r_bin_java_print_code_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_constant_value_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_deprecated_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_exceptions_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_classes_attr_summary(RBinJavaClassesAttribute *icattr); R_API void r_bin_java_print_inner_classes_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_line_number_table_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_local_variable_attr_summary(RBinJavaLocalVariableAttribute *lvattr); R_API void r_bin_java_print_local_variable_table_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_source_code_file_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_synthetic_attr_summary(RBinJavaAttrInfo *attr); R_API void r_bin_java_print_attr_summary(RBinJavaAttrInfo *attr); R_API RBinJavaAttrInfo *r_bin_java_read_next_attr_from_buffer(RBinJavaObj *bin, ut8 *buffer, st64 sz, st64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_unknown_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_annotation_default_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_enclosing_methods_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_local_variable_type_table_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_signature_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_source_debug_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_bootstrap_methods_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_rtv_annotations_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_rti_annotations_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_rtvp_annotations_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_rtip_annotations_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_code_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_constant_value_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_deprecated_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_exceptions_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_inner_classes_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_line_number_table_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_local_variable_table_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_source_code_file_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_stack_map_table_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API RBinJavaAttrInfo *r_bin_java_synthetic_attr_new(RBinJavaObj *bin, ut8 *buf, ut64 sz, ut64 buf_offset); R_API ut64 r_bin_java_unknown_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_annotation_default_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_enclosing_methods_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_local_variable_type_table_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_signature_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_source_debug_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_bootstrap_methods_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_rtv_annotations_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_rti_annotations_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_rtvp_annotations_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_rtip_annotations_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_code_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_constant_value_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_deprecated_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_exceptions_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_inner_classes_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_line_number_table_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_local_variable_table_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_source_code_file_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_stack_map_table_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_synthetic_attr_calc_size(RBinJavaAttrInfo *attr); R_API ut64 r_bin_java_bootstrap_method_calc_size(RBinJavaBootStrapMethod *bsm); R_API ut64 r_bin_java_element_pair_calc_size(RBinJavaElementValuePair *evp); R_API ut64 r_bin_java_element_value_calc_size(RBinJavaElementValue *element_value); R_API ut64 r_bin_java_unknown_cp_calc_size(RBinJavaCPTypeObj *obj); R_API ut64 r_bin_java_class_cp_calc_size(RBinJavaCPTypeObj *obj); R_API ut64 r_bin_java_fieldref_cp_calc_size(RBinJavaCPTypeObj *obj); R_API ut64 r_bin_java_methodref_cp_calc_size(RBinJavaCPTypeObj *obj); R_API ut64 r_bin_java_interfacemethodref_cp_calc_size(RBinJavaCPTypeObj *obj); R_API ut64 r_bin_java_name_and_type_cp_calc_size(RBinJavaCPTypeObj *obj); R_API ut64 r_bin_java_string_cp_calc_size(RBinJavaCPTypeObj *obj); R_API ut64 r_bin_java_integer_cp_calc_size(RBinJavaCPTypeObj *obj); R_API ut64 r_bin_java_float_cp_calc_size(RBinJavaCPTypeObj *obj); R_API ut64 r_bin_java_long_cp_calc_size(RBinJavaCPTypeObj *obj); R_API ut64 r_bin_java_double_cp_calc_size(RBinJavaCPTypeObj *obj); R_API ut64 r_bin_java_utf8_cp_calc_size(RBinJavaCPTypeObj *obj); R_API ut64 r_bin_java_do_nothing_calc_size(RBinJavaCPTypeObj *obj); R_API ut64 r_bin_java_methodhandle_cp_calc_size(RBinJavaCPTypeObj *obj); R_API ut64 r_bin_java_methodtype_cp_calc_size(RBinJavaCPTypeObj *obj); R_API ut64 r_bin_java_invokedynamic_cp_calc_size(RBinJavaCPTypeObj *obj); R_API RBinJavaStackMapFrame *r_bin_java_default_stack_frame(void); R_API RList *r_bin_java_find_cp_const_by_val_float(RBinJavaObj *bin_obj, const ut8 *bytes, ut32 len); R_API RList *r_bin_java_find_cp_const_by_val_double(RBinJavaObj *bin_obj, const ut8 *bytes, ut32 len); R_API RList *r_bin_java_find_cp_const_by_val_int(RBinJavaObj *bin_obj, const ut8 *bytes, ut32 len); R_API RList *r_bin_java_find_cp_const_by_val_long(RBinJavaObj *bin_obj, const ut8 *bytes, ut32 len); R_API RList *r_bin_java_find_cp_const_by_val_utf8(RBinJavaObj *bin_obj, const ut8 *bytes, ut32 len); R_API ut8 *r_bin_java_cp_append_classref_and_name(RBinJavaObj *bin, ut32 *out_sz, const char *classname, const ut32 classname_len); R_API ut8 *U(r_bin_java_cp_append_ref_cname_fname_ftype)(RBinJavaObj * bin, ut32 * out_sz, ut8 tag, const char *cname, const ut32 c_len, const char *fname, const ut32 f_len, const char *tname, const ut32 t_len); R_API ut8 *r_bin_java_cp_get_classref(RBinJavaObj *bin, ut32 *out_sz, const char *classname, const ut32 classname_len, const ut16 name_idx); R_API ut8 *U(r_bin_java_cp_get_method_ref)(RBinJavaObj * bin, ut32 * out_sz, ut16 class_idx, ut16 name_and_type_idx); R_API ut8 *U(r_bin_java_cp_get_field_ref)(RBinJavaObj * bin, ut32 * out_sz, ut16 class_idx, ut16 name_and_type_idx); R_API ut8 *r_bin_java_cp_get_fm_ref(RBinJavaObj *bin, ut32 *out_sz, ut8 tag, ut16 class_idx, ut16 name_and_type_idx); R_API ut8 *r_bin_java_cp_get_2_ut16(RBinJavaObj *bin, ut32 *out_sz, ut8 tag, ut16 ut16_one, ut16 ut16_two); R_API ut8 *r_bin_java_cp_get_name_type(RBinJavaObj *bin, ut32 *out_sz, ut16 name_idx, ut16 type_idx); static char *convert_string(const char *bytes, ut32 len) { ut32 idx = 0, pos = 0; ut32 str_sz = 32 * len + 1; char *cpy_buffer = len > 0 ? malloc (str_sz) : NULL; if (!cpy_buffer) { return cpy_buffer; } // 4x is the increase from byte to \xHH where HH represents hexed byte memset (cpy_buffer, 0, str_sz); while (idx < len && pos < len) { if (dso_json_char_needs_hexing (bytes[idx])) { if (pos + 2 < len) { free (cpy_buffer); return NULL; } sprintf (cpy_buffer + pos, "\\x%02x", bytes[idx]); pos += 4; } else { cpy_buffer[pos] = bytes[idx]; pos++; } idx++; } return cpy_buffer; } // taken from LLVM Code Byte Swap // TODO: move into r_util R_API ut32 U(r_bin_java_swap_uint)(ut32 x) { const ut32 Byte0 = x & 0x000000FF; const ut32 Byte1 = x & 0x0000FF00; const ut32 Byte2 = x & 0x00FF0000; const ut32 Byte3 = x & 0xFF000000; return (Byte0 << 24) | (Byte1 << 8) | (Byte2 >> 8) | (Byte3 >> 24); } static RBinJavaAccessFlags FIELD_ACCESS_FLAGS[] = { { "public", R_BIN_JAVA_FIELD_ACC_PUBLIC, 6 }, { "private", R_BIN_JAVA_FIELD_ACC_PRIVATE, 7 }, { "protected", R_BIN_JAVA_FIELD_ACC_PROTECTED, 9 }, { "static", R_BIN_JAVA_FIELD_ACC_STATIC, 6 }, { "final", R_BIN_JAVA_FIELD_ACC_FINAL, 5 }, { "undefined.0x0020", 0x0020, 16 }, { "volatile", R_BIN_JAVA_FIELD_ACC_VOLATILE, 8 }, { "transient", R_BIN_JAVA_FIELD_ACC_TRANSIENT, 9 }, { "undefined.0x0100", 0x0100, 16 }, { "undefined.0x0200", 0x0200, 16 }, { "undefined.0x0400", 0x0400, 16 }, { "undefined.0x0800", 0x0800, 16 }, { "synthetic", R_BIN_JAVA_FIELD_ACC_SYNTHETIC, 9 }, { "undefined.0x2000", 0x2000, 16 }, { "enum", R_BIN_JAVA_FIELD_ACC_ENUM, 16 }, { "undefined.0x8000", 0x8000, 16 }, { NULL, 0, 0 } }; static RBinJavaAccessFlags METHOD_ACCESS_FLAGS[] = { { "public", R_BIN_JAVA_METHOD_ACC_PUBLIC, 6 }, { "private", R_BIN_JAVA_METHOD_ACC_PRIVATE, 7 }, { "protected", R_BIN_JAVA_METHOD_ACC_PROTECTED, 9 }, { "static", R_BIN_JAVA_METHOD_ACC_STATIC, 6 }, { "final", R_BIN_JAVA_METHOD_ACC_FINAL, 5 }, { "synchronized", R_BIN_JAVA_METHOD_ACC_SYNCHRONIZED, 12 }, { "bridge", R_BIN_JAVA_METHOD_ACC_BRIDGE, 6 }, { "varargs", R_BIN_JAVA_METHOD_ACC_VARARGS, 7 }, { "native", R_BIN_JAVA_METHOD_ACC_NATIVE, 6 }, { "interface", R_BIN_JAVA_METHOD_ACC_INTERFACE, 9 }, { "abstract", R_BIN_JAVA_METHOD_ACC_ABSTRACT, 8 }, { "strict", R_BIN_JAVA_METHOD_ACC_STRICT, 6 }, { "synthetic", R_BIN_JAVA_METHOD_ACC_SYNTHETIC, 9 }, { "annotation", R_BIN_JAVA_METHOD_ACC_ANNOTATION, 10 }, { "enum", R_BIN_JAVA_METHOD_ACC_ENUM, 4 }, { "undefined.0x8000", 0x8000, 16 }, { NULL, 0, 0 } }; // XXX - Fix these there are some incorrect ongs static RBinJavaAccessFlags CLASS_ACCESS_FLAGS[] = { { "public", R_BIN_JAVA_CLASS_ACC_PUBLIC, 6 }, { "undefined.0x0002", 0x0002, 16 }, { "undefined.0x0004", 0x0004, 16 }, { "undefined.0x0008", 0x0008, 16 }, { "final", R_BIN_JAVA_CLASS_ACC_FINAL, 5 }, { "super", R_BIN_JAVA_CLASS_ACC_SUPER, 5 }, { "undefined.0x0040", 0x0040, 16 }, { "undefined.0x0080", 0x0080, 16 }, { "undefined.0x0100", 0x0100, 16 }, { "interface", R_BIN_JAVA_CLASS_ACC_INTERFACE, 9 }, { "abstract", R_BIN_JAVA_CLASS_ACC_ABSTRACT, 8 }, { "undefined.0x0800", 0x0800, 16 }, { "synthetic", R_BIN_JAVA_CLASS_ACC_SYNTHETIC, 9 }, { "annotation", R_BIN_JAVA_CLASS_ACC_ANNOTATION, 10 }, { "enum", R_BIN_JAVA_CLASS_ACC_ENUM, 4 }, { "undefined.0x8000", 0x8000, 16 }, { NULL, 0, 0 } }; static RBinJavaRefMetas R_BIN_JAVA_REF_METAS[] = { { "Unknown", R_BIN_JAVA_REF_UNKNOWN }, { "GetField", R_BIN_JAVA_REF_GETFIELD }, { "GetStatic", R_BIN_JAVA_REF_GETSTATIC }, { "PutField", R_BIN_JAVA_REF_PUTFIELD }, { "PutStatic", R_BIN_JAVA_REF_PUTSTATIC }, { "InvokeVirtual", R_BIN_JAVA_REF_INVOKEVIRTUAL }, { "InvokeStatic", R_BIN_JAVA_REF_INVOKESTATIC }, { "InvokeSpecial", R_BIN_JAVA_REF_INVOKESPECIAL }, { "NewInvokeSpecial", R_BIN_JAVA_REF_NEWINVOKESPECIAL }, { "InvokeInterface", R_BIN_JAVA_REF_INVOKEINTERFACE } }; static const ut16 R_BIN_JAVA_ELEMENT_VALUE_METAS_SZ = 14; static R_TH_LOCAL bool R_BIN_JAVA_NULL_TYPE_INITTED = false; static R_TH_LOCAL RBinJavaObj *R_BIN_JAVA_GLOBAL_BIN = NULL; static RBinJavaElementValueMetas R_BIN_JAVA_ELEMENT_VALUE_METAS[] = { { "Byte", R_BIN_JAVA_EV_TAG_BYTE, NULL }, { "Char", R_BIN_JAVA_EV_TAG_CHAR, NULL }, { "Double", R_BIN_JAVA_EV_TAG_DOUBLE, NULL }, { "Float", R_BIN_JAVA_EV_TAG_FLOAT, NULL }, { "Integer", R_BIN_JAVA_EV_TAG_INT, NULL }, { "Long", R_BIN_JAVA_EV_TAG_LONG, NULL }, { "Short", R_BIN_JAVA_EV_TAG_SHORT, NULL }, { "Boolean", R_BIN_JAVA_EV_TAG_BOOLEAN, NULL }, { "Array of ", R_BIN_JAVA_EV_TAG_ARRAY, NULL }, { "String", R_BIN_JAVA_EV_TAG_STRING, NULL }, { "Enum", R_BIN_JAVA_EV_TAG_ENUM, NULL }, { "Class", R_BIN_JAVA_EV_TAG_CLASS, NULL }, { "Annotation", R_BIN_JAVA_EV_TAG_ANNOTATION, NULL }, { "Unknown", R_BIN_JAVA_EV_TAG_UNKNOWN, NULL }, }; static RBinJavaVerificationMetas R_BIN_JAVA_VERIFICATION_METAS[] = { { "Top", R_BIN_JAVA_STACKMAP_TOP }, { "Integer", R_BIN_JAVA_STACKMAP_INTEGER }, { "Float", R_BIN_JAVA_STACKMAP_FLOAT }, { "Double", R_BIN_JAVA_STACKMAP_DOUBLE }, { "Long", R_BIN_JAVA_STACKMAP_LONG }, { "NULL", R_BIN_JAVA_STACKMAP_NULL }, { "This", R_BIN_JAVA_STACKMAP_THIS }, { "Object", R_BIN_JAVA_STACKMAP_OBJECT }, { "Uninitialized", R_BIN_JAVA_STACKMAP_UNINIT }, { "Unknown", R_BIN_JAVA_STACKMAP_UNKNOWN } }; static RBinJavaStackMapFrameMetas R_BIN_JAVA_STACK_MAP_FRAME_METAS[] = { { "ImplicitStackFrame", R_BIN_JAVA_STACK_FRAME_IMPLICIT, NULL }, { "Same", R_BIN_JAVA_STACK_FRAME_SAME, NULL }, { "SameLocals1StackItem", R_BIN_JAVA_STACK_FRAME_SAME_LOCALS_1, NULL }, { "Chop", R_BIN_JAVA_STACK_FRAME_CHOP, NULL }, { "SameFrameExtended", R_BIN_JAVA_STACK_FRAME_SAME_FRAME_EXTENDED, NULL }, { "Append", R_BIN_JAVA_STACK_FRAME_APPEND, NULL }, { "FullFrame", R_BIN_JAVA_STACK_FRAME_FULL_FRAME, NULL }, { "Reserved", R_BIN_JAVA_STACK_FRAME_RESERVED, NULL } }; static RBinJavaCPTypeObjectAllocs R_BIN_ALLOCS_CONSTANTS[] = { { r_bin_java_do_nothing_new, r_bin_java_do_nothing_free, r_bin_java_print_null_cp_summary, r_bin_java_do_nothing_calc_size, r_bin_java_print_null_cp_stringify }, { r_bin_java_utf8_cp_new, r_bin_java_utf8_info_free, r_bin_java_print_utf8_cp_summary, r_bin_java_utf8_cp_calc_size, r_bin_java_print_utf8_cp_stringify }, { r_bin_java_unknown_cp_new, r_bin_java_default_free, r_bin_java_print_unknown_cp_summary, r_bin_java_unknown_cp_calc_size, r_bin_java_print_unknown_cp_stringify }, { r_bin_java_integer_cp_new, r_bin_java_default_free, r_bin_java_print_integer_cp_summary, r_bin_java_integer_cp_calc_size, r_bin_java_print_integer_cp_stringify }, { r_bin_java_float_cp_new, r_bin_java_default_free, r_bin_java_print_float_cp_summary, r_bin_java_float_cp_calc_size, r_bin_java_print_float_cp_stringify }, { r_bin_java_long_cp_new, r_bin_java_default_free, r_bin_java_print_long_cp_summary, r_bin_java_long_cp_calc_size, r_bin_java_print_long_cp_stringify }, { r_bin_java_double_cp_new, r_bin_java_default_free, r_bin_java_print_double_cp_summary, r_bin_java_double_cp_calc_size, r_bin_java_print_double_cp_stringify }, { r_bin_java_class_cp_new, r_bin_java_default_free, r_bin_java_print_classref_cp_summary, r_bin_java_class_cp_calc_size, r_bin_java_print_classref_cp_stringify }, { r_bin_java_string_cp_new, r_bin_java_default_free, r_bin_java_print_string_cp_summary, r_bin_java_string_cp_calc_size, r_bin_java_print_string_cp_stringify }, { r_bin_java_fieldref_cp_new, r_bin_java_default_free, r_bin_java_print_fieldref_cp_summary, r_bin_java_fieldref_cp_calc_size, r_bin_java_print_fieldref_cp_stringify }, { r_bin_java_methodref_cp_new, r_bin_java_default_free, r_bin_java_print_methodref_cp_summary, r_bin_java_methodref_cp_calc_size, r_bin_java_print_methodref_cp_stringify }, { r_bin_java_interfacemethodref_cp_new, r_bin_java_default_free, r_bin_java_print_interfacemethodref_cp_summary, r_bin_java_interfacemethodref_cp_calc_size, r_bin_java_print_interfacemethodref_cp_stringify }, { r_bin_java_name_and_type_cp_new, r_bin_java_default_free, r_bin_java_print_name_and_type_cp_summary, r_bin_java_name_and_type_cp_calc_size, r_bin_java_print_name_and_type_cp_stringify }, { NULL, NULL, NULL, NULL, NULL }, { NULL, NULL, NULL, NULL, NULL }, { r_bin_java_methodhandle_cp_new, r_bin_java_default_free, r_bin_java_print_methodhandle_cp_summary, r_bin_java_methodhandle_cp_calc_size, r_bin_java_print_methodhandle_cp_stringify }, { r_bin_java_methodtype_cp_new, r_bin_java_default_free, r_bin_java_print_methodtype_cp_summary, r_bin_java_methodtype_cp_calc_size, r_bin_java_print_methodtype_cp_stringify }, { NULL, NULL, NULL, NULL, NULL }, { r_bin_java_invokedynamic_cp_new, r_bin_java_default_free, r_bin_java_print_invokedynamic_cp_summary, r_bin_java_invokedynamic_cp_calc_size, r_bin_java_print_invokedynamic_cp_stringify }, }; static RBinJavaCPTypeObj R_BIN_JAVA_NULL_TYPE; static ut8 R_BIN_JAVA_CP_METAS_SZ = 12; static RBinJavaCPTypeMetas R_BIN_JAVA_CP_METAS[] = { // Each field has a name pointer and a tag field { "NULL", R_BIN_JAVA_CP_NULL, 0, &R_BIN_ALLOCS_CONSTANTS[0] }, { "Utf8", R_BIN_JAVA_CP_UTF8, 3, &R_BIN_ALLOCS_CONSTANTS[1] }, // 2 bytes = length, N bytes string (containts a pointer in the field) { "Unknown", R_BIN_JAVA_CP_UNKNOWN, 0, &R_BIN_ALLOCS_CONSTANTS[2] }, { "Integer", R_BIN_JAVA_CP_INTEGER, 5, &R_BIN_ALLOCS_CONSTANTS[3] }, // 4 bytes { "Float", R_BIN_JAVA_CP_FLOAT, 5, &R_BIN_ALLOCS_CONSTANTS[4] }, // 4 bytes { "Long", R_BIN_JAVA_CP_LONG, 9, &R_BIN_ALLOCS_CONSTANTS[5] }, // 4 high 4 low { "Double", R_BIN_JAVA_CP_DOUBLE, 9, &R_BIN_ALLOCS_CONSTANTS[6] }, // 4 high 4 low { "Class", R_BIN_JAVA_CP_CLASS, 3, &R_BIN_ALLOCS_CONSTANTS[7] }, // 2 name_idx { "String", R_BIN_JAVA_CP_STRING, 3, &R_BIN_ALLOCS_CONSTANTS[8] }, // 2 string_idx { "FieldRef", R_BIN_JAVA_CP_FIELDREF, 5, &R_BIN_ALLOCS_CONSTANTS[9] }, // 2 class idx, 2 name/type_idx { "MethodRef", R_BIN_JAVA_CP_METHODREF, 5, &R_BIN_ALLOCS_CONSTANTS[10] }, // 2 class idx, 2 name/type_idx { "InterfaceMethodRef", R_BIN_JAVA_CP_INTERFACEMETHOD_REF, 5, &R_BIN_ALLOCS_CONSTANTS[11] }, // 2 class idx, 2 name/type_idx { "NameAndType", R_BIN_JAVA_CP_NAMEANDTYPE, 5, &R_BIN_ALLOCS_CONSTANTS[12] }, // 4 high 4 low { "Unknown", R_BIN_JAVA_CP_UNKNOWN, 0, &R_BIN_ALLOCS_CONSTANTS[2] }, { "Unknown", R_BIN_JAVA_CP_UNKNOWN, 0, &R_BIN_ALLOCS_CONSTANTS[2] }, { "MethodHandle", R_BIN_JAVA_CP_METHODHANDLE, 4, &R_BIN_ALLOCS_CONSTANTS[15] }, // 4 high 4 low { "MethodType", R_BIN_JAVA_CP_METHODTYPE, 3, &R_BIN_ALLOCS_CONSTANTS[16] }, // 4 high 4 low { "Unknown", R_BIN_JAVA_CP_UNKNOWN, 0, &R_BIN_ALLOCS_CONSTANTS[2] }, { "InvokeDynamic", R_BIN_JAVA_CP_INVOKEDYNAMIC, 5, &R_BIN_ALLOCS_CONSTANTS[18] }, // 4 high 4 low }; static RBinJavaAttrInfoObjectAllocs RBIN_JAVA_ATTRS_ALLOCS[] = { { r_bin_java_annotation_default_attr_new, r_bin_java_annotation_default_attr_free, r_bin_java_print_annotation_default_attr_summary, r_bin_java_annotation_default_attr_calc_size }, { r_bin_java_bootstrap_methods_attr_new, r_bin_java_bootstrap_methods_attr_free, r_bin_java_print_bootstrap_methods_attr_summary, r_bin_java_bootstrap_methods_attr_calc_size }, { r_bin_java_code_attr_new, r_bin_java_code_attr_free, r_bin_java_print_code_attr_summary, r_bin_java_code_attr_calc_size }, { r_bin_java_constant_value_attr_new, r_bin_java_constant_value_attr_free, r_bin_java_print_constant_value_attr_summary, r_bin_java_constant_value_attr_calc_size }, { r_bin_java_deprecated_attr_new, r_bin_java_deprecated_attr_free, r_bin_java_print_deprecated_attr_summary, r_bin_java_deprecated_attr_calc_size }, { r_bin_java_enclosing_methods_attr_new, r_bin_java_enclosing_methods_attr_free, r_bin_java_print_enclosing_methods_attr_summary, r_bin_java_enclosing_methods_attr_calc_size }, { r_bin_java_exceptions_attr_new, r_bin_java_exceptions_attr_free, r_bin_java_print_exceptions_attr_summary, r_bin_java_exceptions_attr_calc_size }, { r_bin_java_inner_classes_attr_new, r_bin_java_inner_classes_attr_free, r_bin_java_print_inner_classes_attr_summary, r_bin_java_inner_classes_attr_calc_size }, { r_bin_java_line_number_table_attr_new, r_bin_java_line_number_table_attr_free, r_bin_java_print_line_number_table_attr_summary, r_bin_java_line_number_table_attr_calc_size }, { r_bin_java_local_variable_table_attr_new, r_bin_java_local_variable_table_attr_free, r_bin_java_print_local_variable_table_attr_summary, r_bin_java_local_variable_table_attr_calc_size }, { r_bin_java_local_variable_type_table_attr_new, r_bin_java_local_variable_type_table_attr_free, r_bin_java_print_local_variable_type_table_attr_summary, r_bin_java_local_variable_type_table_attr_calc_size }, { r_bin_java_rti_annotations_attr_new, r_bin_java_rti_annotations_attr_free, r_bin_java_print_rti_annotations_attr_summary, r_bin_java_rti_annotations_attr_calc_size }, { r_bin_java_rtip_annotations_attr_new, r_bin_java_rtip_annotations_attr_free, r_bin_java_print_rtip_annotations_attr_summary, r_bin_java_rtip_annotations_attr_calc_size }, { r_bin_java_rtv_annotations_attr_new, r_bin_java_rtv_annotations_attr_free, r_bin_java_print_rtv_annotations_attr_summary, r_bin_java_rtv_annotations_attr_calc_size }, { r_bin_java_rtvp_annotations_attr_new, r_bin_java_rtvp_annotations_attr_free, r_bin_java_print_rtvp_annotations_attr_summary, r_bin_java_rtvp_annotations_attr_calc_size }, { r_bin_java_signature_attr_new, r_bin_java_signature_attr_free, r_bin_java_print_signature_attr_summary, r_bin_java_signature_attr_calc_size }, { r_bin_java_source_debug_attr_new, r_bin_java_source_debug_attr_free, r_bin_java_print_source_debug_attr_summary, r_bin_java_source_debug_attr_calc_size }, { r_bin_java_source_code_file_attr_new, r_bin_java_source_code_file_attr_free, r_bin_java_print_source_code_file_attr_summary, r_bin_java_source_code_file_attr_calc_size }, { r_bin_java_stack_map_table_attr_new, r_bin_java_stack_map_table_attr_free, r_bin_java_print_stack_map_table_attr_summary, r_bin_java_stack_map_table_attr_calc_size }, { r_bin_java_synthetic_attr_new, r_bin_java_synthetic_attr_free, r_bin_java_print_synthetic_attr_summary, r_bin_java_synthetic_attr_calc_size }, { r_bin_java_unknown_attr_new, r_bin_java_unknown_attr_free, r_bin_java_print_unknown_attr_summary, r_bin_java_unknown_attr_calc_size } }; // R_API ut32 RBIN_JAVA_ATTRS_METAS_SZ = 21; static ut32 RBIN_JAVA_ATTRS_METAS_SZ = 20; static RBinJavaAttrMetas RBIN_JAVA_ATTRS_METAS[] = { { "AnnotationDefault", R_BIN_JAVA_ATTR_TYPE_ANNOTATION_DEFAULT_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[0] }, { "BootstrapMethods", R_BIN_JAVA_ATTR_TYPE_BOOTSTRAP_METHODS_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[1] }, { "Code", R_BIN_JAVA_ATTR_TYPE_CODE_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[2] }, { "ConstantValue", R_BIN_JAVA_ATTR_TYPE_CONST_VALUE_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[3] }, { "Deperecated", R_BIN_JAVA_ATTR_TYPE_DEPRECATED_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[4] }, { "EnclosingMethod", R_BIN_JAVA_ATTR_TYPE_ENCLOSING_METHOD_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[5] }, { "Exceptions", R_BIN_JAVA_ATTR_TYPE_EXCEPTIONS_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[6] }, { "InnerClasses", R_BIN_JAVA_ATTR_TYPE_INNER_CLASSES_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[7] }, { "LineNumberTable", R_BIN_JAVA_ATTR_TYPE_LINE_NUMBER_TABLE_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[8] }, { "LocalVariableTable", R_BIN_JAVA_ATTR_TYPE_LOCAL_VARIABLE_TABLE_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[9] }, { "LocalVariableTypeTable", R_BIN_JAVA_ATTR_TYPE_LOCAL_VARIABLE_TYPE_TABLE_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[10] }, { "RuntimeInvisibleAnnotations", R_BIN_JAVA_ATTR_TYPE_RUNTIME_INVISIBLE_ANNOTATION_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[11] }, { "RuntimeInvisibleParameterAnnotations", R_BIN_JAVA_ATTR_TYPE_RUNTIME_INVISIBLE_PARAMETER_ANNOTATION_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[12] }, { "RuntimeVisibleAnnotations", R_BIN_JAVA_ATTR_TYPE_RUNTIME_VISIBLE_ANNOTATION_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[13] }, { "RuntimeVisibleParameterAnnotations", R_BIN_JAVA_ATTR_TYPE_RUNTIME_VISIBLE_PARAMETER_ANNOTATION_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[14] }, { "Signature", R_BIN_JAVA_ATTR_TYPE_SIGNATURE_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[15] }, { "SourceDebugExtension", R_BIN_JAVA_ATTR_TYPE_SOURCE_DEBUG_EXTENTSION_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[16] }, { "SourceFile", R_BIN_JAVA_ATTR_TYPE_SOURCE_FILE_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[17] }, { "StackMapTable", R_BIN_JAVA_ATTR_TYPE_STACK_MAP_TABLE_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[18] }, // { "StackMap", R_BIN_JAVA_ATTR_TYPE_STACK_MAP_TABLE_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[18]}, { "Synthetic", R_BIN_JAVA_ATTR_TYPE_SYNTHETIC_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[19] }, { "Unknown", R_BIN_JAVA_ATTR_TYPE_UNKNOWN_ATTR, &RBIN_JAVA_ATTRS_ALLOCS[20] } }; R_API bool r_bin_java_is_old_format(RBinJavaObj *bin) { return bin->cf.major[1] == 45 && bin->cf.minor[1] <= 2; } R_API void r_bin_java_reset_bin_info(RBinJavaObj *bin) { free (bin->cf2.flags_str); free (bin->cf2.this_class_name); r_list_free (bin->imports_list); r_list_free (bin->methods_list); r_list_free (bin->fields_list); r_list_free (bin->attrs_list); r_list_free (bin->cp_list); r_list_free (bin->interfaces_list); r_str_constpool_fini (&bin->constpool); r_str_constpool_init (&bin->constpool); bin->cf2.flags_str = strdup ("unknown"); bin->cf2.this_class_name = strdup ("unknown"); bin->imports_list = r_list_newf (free); bin->methods_list = r_list_newf (r_bin_java_fmtype_free); bin->fields_list = r_list_newf (r_bin_java_fmtype_free); bin->attrs_list = r_list_newf (r_bin_java_attribute_free); bin->cp_list = r_list_newf (r_bin_java_constant_pool); bin->interfaces_list = r_list_newf (r_bin_java_interface_free); } R_API char *r_bin_java_unmangle_method(const char *flags, const char *name, const char *params, const char *r_value) { RList *the_list = params ? r_bin_java_extract_type_values (params) : r_list_new (); RListIter *iter = NULL; // second case removes leading space if no flags are given const char *fmt = flags ? "%s %s %s (%s)" : "%s%s %s (%s)"; char *str = NULL, *f_val_str = NULL, *r_val_str = NULL, *prototype = NULL, *p_val_str = NULL; ut32 params_idx = 0, params_len = 0, prototype_len = 0; if (!extract_type_value (r_value, &r_val_str)) { r_list_free (the_list); return NULL; } if (!r_val_str) { r_val_str = strdup ("UNKNOWN"); } f_val_str = strdup (r_str_get (flags)); r_list_foreach (the_list, iter, str) { params_len += strlen (str); if (params_idx > 0) { params_len += 2; } params_idx++; } if (params_len > 0) { ut32 offset = 0; params_len += 1; p_val_str = malloc (params_len); r_list_foreach (the_list, iter, str) { if (offset != 0) { offset += snprintf (p_val_str + offset, params_len - offset, ", %s", str); } else { offset += snprintf (p_val_str + offset, params_len - offset, "%s", str); } } } else { p_val_str = strdup (""); } prototype_len += (flags ? strlen (flags) + 1 : 0); // space vs no space prototype_len += strlen (name) + 1; // name + space prototype_len += strlen (r_val_str) + 1; // r_value + space prototype_len += strlen (p_val_str) + 3; // space + l_paren + params + r_paren prototype_len += 1; // null prototype = malloc (prototype_len); /// TODO enable this function and start using it to demangle strings snprintf (prototype, prototype_len, fmt, f_val_str, r_val_str, name, p_val_str); free (f_val_str); free (r_val_str); free (p_val_str); r_list_free (the_list); return prototype; } R_API char *r_bin_java_unmangle(const char *flags, const char *name, const char *descriptor) { ut32 l_paren_pos = -1, r_paren_pos = -1; char *result = NULL; ut32 desc_len = descriptor && *descriptor ? strlen (descriptor) : 0, name_len = name && *name ? strlen (name) : 0, flags_len = flags && *flags ? strlen (flags) : 0, i = 0; if (desc_len == 0 || name == 0) { return NULL; } for (i = 0; i < desc_len; i++) { if (descriptor[i] == '(') { l_paren_pos = i; } else if (l_paren_pos != (ut32) - 1 && descriptor[i] == ')') { r_paren_pos = i; break; } } // handle field case; if (l_paren_pos == (ut32) - 1 && r_paren_pos == (ut32) - 1) { char *unmangle_field_desc = NULL; ut32 len = extract_type_value (descriptor, &unmangle_field_desc); if (len == 0) { eprintf ("Warning: attempting to unmangle invalid type descriptor.\n"); free (unmangle_field_desc); return result; } if (flags_len > 0) { len += (flags_len + name_len + 5); // space and null result = malloc (len); snprintf (result, len, "%s %s %s", flags, unmangle_field_desc, name); } else { len += (name_len + 5); // space and null result = malloc (len); snprintf (result, len, "%s %s", unmangle_field_desc, name); } free (unmangle_field_desc); } else if (l_paren_pos != (ut32) - 1 && r_paren_pos != (ut32) - 1 && l_paren_pos < r_paren_pos) { // params_len account for l_paren + 1 and null ut32 params_len = r_paren_pos - (l_paren_pos + 1) != 0 ? r_paren_pos - (l_paren_pos + 1) + 1 : 0; char *params = params_len ? malloc (params_len) : NULL; const char *rvalue = descriptor + r_paren_pos + 1; if (params) { snprintf (params, params_len, "%s", descriptor + l_paren_pos + 1); } result = r_bin_java_unmangle_method (flags, name, params, rvalue); free (params); } return result; } R_API DsoJsonObj *r_bin_java_get_bin_obj_json(RBinJavaObj *bin) { DsoJsonObj *imports_list = r_bin_java_get_import_json_definitions (bin); DsoJsonObj *fields_list = r_bin_java_get_field_json_definitions (bin); DsoJsonObj *methods_list = r_bin_java_get_method_json_definitions (bin); // interfaces_list = r_bin_java_get_interface_json_definitions (bin); DsoJsonObj *class_dict = r_bin_java_get_class_info_json (bin); char *res = dso_json_obj_to_str (methods_list); // eprintf ("Resulting methods json: \n%s\n", res); free (res); if (dso_json_dict_insert_str_key_obj (class_dict, "methods", methods_list)) { // dso_json_list_free (methods_list); dso_json_obj_del (methods_list); } res = dso_json_obj_to_str (fields_list); // eprintf ("Resulting fields json: \n%s\n", res); free (res); if (dso_json_dict_insert_str_key_obj (class_dict, "fields", fields_list)) { // dso_json_list_free (fields_list); dso_json_obj_del (fields_list); } res = dso_json_obj_to_str (imports_list); // eprintf ("Resulting imports json: \n%s\n", res); free (res); if (dso_json_dict_insert_str_key_obj (class_dict, "imports", imports_list)) { // dso_json_list_free (imports_list); dso_json_obj_del (imports_list); } // res = dso_json_obj_to_str (interfaces_list); // eprintf ("Resulting interfaces json: \n%s\n", res); // free (res); // dso_json_dict_insert_str_key_obj (class_dict, "interfaces", interfaces_list); res = dso_json_obj_to_str (class_dict); // eprintf ("Resulting class info json: \n%s\n", res); free (res); // dso_json_obj_del (class_dict); return class_dict; } R_API DsoJsonObj *r_bin_java_get_import_json_definitions(RBinJavaObj *bin) { RList *the_list; DsoJsonObj *json_list = dso_json_list_new (); RListIter *iter = NULL; char *new_str; if (!bin || !(the_list = r_bin_java_get_lib_names (bin))) { return json_list; } r_list_foreach (the_list, iter, new_str) { char *tmp = new_str; // eprintf ("Processing string: %s\n", new_str); while (*tmp) { if (*tmp == '/') { *tmp = '.'; } tmp++; } // eprintf ("adding string: %s\n", new_str); dso_json_list_append_str (json_list, new_str); } r_list_free (the_list); return json_list; } R_API DsoJsonObj *r_bin_java_get_class_info_json(RBinJavaObj *bin) { RList *classes = r_bin_java_get_classes (bin); DsoJsonObj *interfaces_list = dso_json_list_new (); DsoJsonObj *class_info_dict = dso_json_dict_new (); RBinClass *class_ = r_list_get_n (classes, 0); if (class_) { int dummy = 0; RListIter *iter; RBinClass *class_v = NULL; // add access flags like in methods bool is_public = ((class_->visibility & R_BIN_JAVA_CLASS_ACC_PUBLIC) != 0); bool is_final = ((class_->visibility & R_BIN_JAVA_CLASS_ACC_FINAL) != 0); bool is_super = ((class_->visibility & R_BIN_JAVA_CLASS_ACC_SUPER) != 0); bool is_interface = ((class_->visibility & R_BIN_JAVA_CLASS_ACC_INTERFACE) != 0); bool is_abstract = ((class_->visibility & R_BIN_JAVA_CLASS_ACC_ABSTRACT) != 0); bool is_synthetic = ((class_->visibility & R_BIN_JAVA_CLASS_ACC_SYNTHETIC) != 0); bool is_annotation = ((class_->visibility & R_BIN_JAVA_CLASS_ACC_ANNOTATION) != 0); bool is_enum = ((class_->visibility & R_BIN_JAVA_CLASS_ACC_ENUM) != 0); dso_json_dict_insert_str_key_num (class_info_dict, "access_flags", class_->visibility); dso_json_dict_insert_str_key_num (class_info_dict, "is_public", is_public); dso_json_dict_insert_str_key_num (class_info_dict, "is_final", is_final); dso_json_dict_insert_str_key_num (class_info_dict, "is_super", is_super); dso_json_dict_insert_str_key_num (class_info_dict, "is_interface", is_interface); dso_json_dict_insert_str_key_num (class_info_dict, "is_abstract", is_abstract); dso_json_dict_insert_str_key_num (class_info_dict, "is_synthetic", is_synthetic); dso_json_dict_insert_str_key_num (class_info_dict, "is_annotation", is_annotation); dso_json_dict_insert_str_key_num (class_info_dict, "is_enum", is_enum); dso_json_dict_insert_str_key_str (class_info_dict, "name", class_->name); if (!class_->super) { DsoJsonObj *str = dso_json_str_new (); if (dso_json_dict_insert_str_key_obj (class_info_dict, "super", str)) { dso_json_str_free (str); } } else { dso_json_dict_insert_str_key_str (class_info_dict, "super", class_->super); } r_list_foreach (classes, iter, class_v) { if (!dummy) { dummy++; continue; } // enumerate all interface classes and append them to the interfaces if ((class_v->visibility & R_BIN_JAVA_CLASS_ACC_INTERFACE) != 0) { dso_json_list_append_str (interfaces_list, class_v->name); } } } if (dso_json_dict_insert_str_key_obj (class_info_dict, "interfaces", interfaces_list)) { // dso_json_list_free (interfaces_list); dso_json_obj_del (interfaces_list); } r_list_free (classes); return class_info_dict; } R_API DsoJsonObj *r_bin_java_get_interface_json_definitions(RBinJavaObj *bin) { RList *the_list; DsoJsonObj *json_list = dso_json_list_new (); RListIter *iter = NULL; char *new_str; if (!bin || !(the_list = r_bin_java_get_interface_names (bin))) { return json_list; } r_list_foreach (the_list, iter, new_str) { char *tmp = new_str; // eprintf ("Processing string: %s\n", new_str); while (*tmp) { if (*tmp == '/') { *tmp = '.'; } tmp++; } // eprintf ("adding string: %s\n", new_str); dso_json_list_append_str (json_list, new_str); } r_list_free (the_list); return json_list; } R_API DsoJsonObj *r_bin_java_get_method_json_definitions(RBinJavaObj *bin) { RBinJavaField *fm_type = NULL; RListIter *iter = NULL; DsoJsonObj *json_list = dso_json_list_new (); if (!bin) { return json_list; } r_list_foreach (bin->methods_list, iter, fm_type) { DsoJsonObj *method_proto = r_bin_java_get_method_json_definition (bin, fm_type); // eprintf ("Method json: %s\n", method_proto); dso_json_list_append (json_list, method_proto); } return json_list; } R_API DsoJsonObj *r_bin_java_get_field_json_definitions(RBinJavaObj *bin) { RBinJavaField *fm_type = NULL; RListIter *iter = NULL; DsoJsonObj *json_list = dso_json_list_new (); if (!bin) { return json_list; } r_list_foreach (bin->fields_list, iter, fm_type) { DsoJsonObj *field_proto = r_bin_java_get_field_json_definition (bin, fm_type); // eprintf ("Field json: %s\n", field_proto); dso_json_list_append (json_list, field_proto); } return json_list; } R_API char *r_bin_java_create_method_fq_str(const char *klass, const char *name, const char *signature) { if (!klass) { klass = "null_class"; } if (!name) { name = "null_name"; } if (!signature) { signature = "null_signature"; } return r_str_newf ("%s.%s.%s", klass, name, signature); } R_API char *r_bin_java_create_field_fq_str(const char *klass, const char *name, const char *signature) { if (!klass) { klass = "null_class"; } if (!name) { name = "null_name"; } if (!signature) { signature = "null_signature"; } return r_str_newf ("%s %s.%s", signature, klass, name); } R_API DsoJsonObj *r_bin_java_get_fm_type_definition_json(RBinJavaObj *bin, RBinJavaField *fm_type, int is_method) { ut64 addr = UT64_MAX; char *prototype = NULL, *fq_name = NULL; bool is_native = ((fm_type->flags & R_BIN_JAVA_METHOD_ACC_NATIVE) != 0); bool is_static = ((fm_type->flags & R_BIN_JAVA_METHOD_ACC_STATIC) != 0); bool is_synthetic = ((fm_type->flags & R_BIN_JAVA_METHOD_ACC_SYNTHETIC) != 0); bool is_private = ((fm_type->flags & R_BIN_JAVA_METHOD_ACC_PRIVATE) != 0); bool is_public = ((fm_type->flags & R_BIN_JAVA_METHOD_ACC_PUBLIC) != 0); bool is_protected = ((fm_type->flags & R_BIN_JAVA_METHOD_ACC_PROTECTED) != 0); bool is_super = ((fm_type->flags & R_BIN_JAVA_CLASS_ACC_SUPER) != 0); DsoJsonObj *fm_type_dict = dso_json_dict_new (); dso_json_dict_insert_str_key_num (fm_type_dict, "access_flags", fm_type->flags); dso_json_dict_insert_str_key_num (fm_type_dict, "is_method", is_method); dso_json_dict_insert_str_key_num (fm_type_dict, "is_native", is_native); dso_json_dict_insert_str_key_num (fm_type_dict, "is_synthetic", is_synthetic); dso_json_dict_insert_str_key_num (fm_type_dict, "is_private", is_private); dso_json_dict_insert_str_key_num (fm_type_dict, "is_public", is_public); dso_json_dict_insert_str_key_num (fm_type_dict, "is_static", is_static); dso_json_dict_insert_str_key_num (fm_type_dict, "is_protected", is_protected); dso_json_dict_insert_str_key_num (fm_type_dict, "is_super", is_super); addr = r_bin_java_get_method_code_offset (fm_type); if (addr == 0) { addr = fm_type->file_offset; } addr += bin->loadaddr; dso_json_dict_insert_str_key_num (fm_type_dict, "addr", addr); dso_json_dict_insert_str_key_num (fm_type_dict, "offset", fm_type->file_offset + bin->loadaddr); dso_json_dict_insert_str_key_str (fm_type_dict, "class_name", fm_type->class_name); dso_json_dict_insert_str_key_str (fm_type_dict, "signature", fm_type->descriptor); dso_json_dict_insert_str_key_str (fm_type_dict, "name", fm_type->name); if (is_method) { fq_name = r_bin_java_create_method_fq_str (fm_type->class_name, fm_type->name, fm_type->descriptor); } else { fq_name = r_bin_java_create_field_fq_str (fm_type->class_name, fm_type->name, fm_type->descriptor); } dso_json_dict_insert_str_key_str (fm_type_dict, "fq_name", fq_name); prototype = r_bin_java_unmangle (fm_type->flags_str, fm_type->name, fm_type->descriptor); dso_json_dict_insert_str_key_str (fm_type_dict, "prototype", prototype); free (prototype); free (fq_name); return fm_type_dict; } R_API char *r_bin_java_get_method_definition(RBinJavaField *fm_type) { return r_bin_java_unmangle (fm_type->flags_str, fm_type->name, fm_type->descriptor); } R_API char *r_bin_java_get_field_definition(RBinJavaField *fm_type) { return r_bin_java_unmangle (fm_type->flags_str, fm_type->name, fm_type->descriptor); } R_API DsoJsonObj *r_bin_java_get_method_json_definition(RBinJavaObj *bin, RBinJavaField *fm_type) { return r_bin_java_get_fm_type_definition_json (bin, fm_type, 1); } R_API DsoJsonObj *r_bin_java_get_field_json_definition(RBinJavaObj *bin, RBinJavaField *fm_type) { return r_bin_java_get_fm_type_definition_json (bin, fm_type, 0); } R_API int r_bin_java_extract_reference_name(const char *input_str, char **ref_str, ut8 array_cnt) { char *new_str = NULL; ut32 str_len = array_cnt ? (array_cnt + 1) * 2 : 0; const char *str_pos = input_str; int consumed = 0, len = 0; if (!str_pos || *str_pos != 'L' || !*str_pos) { return -1; } consumed++; str_pos++; while (*str_pos && *str_pos != ';') { str_pos++; len++; consumed++; } str_pos = input_str + 1; free (*ref_str); str_len += len; *ref_str = malloc (str_len + 1); new_str = *ref_str; memcpy (new_str, str_pos, str_len); new_str[str_len] = 0; while (*new_str) { if (*new_str == '/') { *new_str = '.'; } new_str++; } return len + 2; } R_API void UNUSED_FUNCTION(r_bin_java_print_prototypes)(RBinJavaObj * bin) { RList *the_list = r_bin_java_get_method_definitions (bin); RListIter *iter; char *str; r_list_foreach (the_list, iter, str) { eprintf ("%s;\n", str); } r_list_free (the_list); } R_API char *get_type_value_str(const char *arg_str, ut8 array_cnt) { ut32 str_len = array_cnt ? (array_cnt + 1) * 2 + strlen (arg_str) : strlen (arg_str); char *str = malloc (str_len + 1); ut32 bytes_written = snprintf (str, str_len + 1, "%s", arg_str); while (array_cnt > 0) { strcpy (str + bytes_written, "[]"); bytes_written += 2; array_cnt--; } return str; } R_API int extract_type_value(const char *arg_str, char **output) { ut8 found_one = 0, array_cnt = 0; ut32 len = 0, consumed = 0; char *str = NULL; if (!arg_str || !output) { return 0; } if (output && *output && *output != NULL) { R_FREE (*output); } while (arg_str && *arg_str && !found_one) { len = 1; // handle the end of an object switch (*arg_str) { case 'V': str = get_type_value_str ("void", array_cnt); break; case 'J': str = get_type_value_str ("long", array_cnt); array_cnt = 0; break; case 'I': str = get_type_value_str ("int", array_cnt); array_cnt = 0; break; case 'D': str = get_type_value_str ("double", array_cnt); array_cnt = 0; break; case 'F': str = get_type_value_str ("float", array_cnt); array_cnt = 0; break; case 'B': str = get_type_value_str ("byte", array_cnt); array_cnt = 0; break; case 'C': str = get_type_value_str ("char", array_cnt); array_cnt = 0; break; case 'Z': str = get_type_value_str ("boolean", array_cnt); array_cnt = 0; break; case 'S': str = get_type_value_str ("short", array_cnt); array_cnt = 0; break; case '[': array_cnt++; break; case 'L': len = r_bin_java_extract_reference_name (arg_str, &str, array_cnt); array_cnt = 0; break; case '(': str = strdup ("("); break; case ')': str = strdup (")"); break; default: return 0; } if (len < 1) { break; } consumed += len; arg_str += len; if (str) { *output = str; break; } } return consumed; } R_API RList *r_bin_java_extract_type_values(const char *arg_str) { RList *list_args = r_list_new (); if (!list_args) { return NULL; } char *str = NULL; const char *str_cur_pos = NULL; ut32 len = 0; if (!arg_str) { return list_args; } str_cur_pos = arg_str; list_args->free = free; while (str_cur_pos && *str_cur_pos) { // handle the end of an object len = extract_type_value (str_cur_pos, &str); if (len < 1) { r_list_free (list_args); return NULL; } str_cur_pos += len; r_list_append (list_args, str); str = NULL; } return list_args; } R_API int r_bin_java_is_fm_type_private(RBinJavaField *fm_type) { if (fm_type && fm_type->type == R_BIN_JAVA_FIELD_TYPE_METHOD) { return fm_type->flags & R_BIN_JAVA_METHOD_ACC_PRIVATE; } if (fm_type && fm_type->type == R_BIN_JAVA_FIELD_TYPE_FIELD) { return fm_type->flags & R_BIN_JAVA_FIELD_ACC_PRIVATE; } return 0; } R_API int r_bin_java_is_fm_type_protected(RBinJavaField *fm_type) { if (fm_type && fm_type->type == R_BIN_JAVA_FIELD_TYPE_METHOD) { return fm_type->flags & R_BIN_JAVA_METHOD_ACC_PROTECTED; } if (fm_type && fm_type->type == R_BIN_JAVA_FIELD_TYPE_FIELD) { return fm_type->flags & R_BIN_JAVA_FIELD_ACC_PROTECTED; } return 0; } R_API RList *r_bin_java_get_args(RBinJavaField *fm_type) { RList *the_list = r_bin_java_extract_type_values (fm_type->descriptor); RList *arg_list = r_list_new (); ut8 in_args = 0; RListIter *desc_iter; char *str; r_list_foreach (the_list, desc_iter, str) { if (str && *str == '(') { in_args = 1; continue; } if (str && *str == ')') { break; } if (in_args && str) { r_list_append (arg_list, strdup (str)); } } r_list_free (the_list); return arg_list; } R_API RList *r_bin_java_get_ret(RBinJavaField *fm_type) { RList *the_list = r_bin_java_extract_type_values (fm_type->descriptor); RList *ret_list = r_list_new (); ut8 in_ret = 0; RListIter *desc_iter; char *str; r_list_foreach (the_list, desc_iter, str) { if (str && *str != ')') { in_ret = 0; } if (in_ret) { r_list_append (ret_list, strdup (str)); } } r_list_free (the_list); return ret_list; } R_API char *r_bin_java_get_this_class_name(RBinJavaObj *bin) { return (bin->cf2.this_class_name ? strdup (bin->cf2.this_class_name) : strdup ("unknown")); } R_API ut16 calculate_access_value(const char *access_flags_str, RBinJavaAccessFlags *access_flags) { ut16 result = 0; ut16 size = strlen (access_flags_str) + 1; char *p_flags, *my_flags = malloc (size); RBinJavaAccessFlags *iter = NULL; if (size < 5 || !my_flags) { free (my_flags); return result; } memcpy (my_flags, access_flags_str, size); p_flags = strtok (my_flags, " "); while (p_flags && access_flags) { int idx = 0; do { iter = &access_flags[idx]; if (!iter || !iter->str) { continue; } if (iter->len > 0 && iter->len != 16) { if (!strncmp (iter->str, p_flags, iter->len)) { result |= iter->value; } } idx++; } while (access_flags[idx].str != NULL); p_flags = strtok (NULL, " "); } free (my_flags); return result; } R_API RList *retrieve_all_access_string_and_value(RBinJavaAccessFlags *access_flags) { const char *fmt = "%s = 0x%04x"; RList *result = r_list_new (); if (!result) { return NULL; } result->free = free; int i = 0; for (i = 0; access_flags[i].str != NULL; i++) { char *str = malloc (50); if (!str) { r_list_free (result); return NULL; } snprintf (str, 49, fmt, access_flags[i].str, access_flags[i].value); r_list_append (result, str); } return result; } R_API char *retrieve_access_string(ut16 flags, RBinJavaAccessFlags *access_flags) { char *outbuffer = NULL, *cur_pos = NULL; ut16 i; ut16 max_str_len = 0; for (i = 0; access_flags[i].str != NULL; i++) { if (flags & access_flags[i].value) { max_str_len += (strlen (access_flags[i].str) + 1); if (max_str_len < strlen (access_flags[i].str)) { return NULL; } } } max_str_len++; outbuffer = (char *) malloc (max_str_len); if (outbuffer) { memset (outbuffer, 0, max_str_len); cur_pos = outbuffer; for (i = 0; access_flags[i].str != NULL; i++) { if (flags & access_flags[i].value) { ut8 len = strlen (access_flags[i].str); const char *the_string = access_flags[i].str; memcpy (cur_pos, the_string, len); memcpy (cur_pos + len, " ", 1); cur_pos += len + 1; } } if (cur_pos != outbuffer) { *(cur_pos - 1) = 0; } } return outbuffer; } R_API char *retrieve_method_access_string(ut16 flags) { return retrieve_access_string (flags, METHOD_ACCESS_FLAGS); } R_API char *retrieve_field_access_string(ut16 flags) { return retrieve_access_string (flags, FIELD_ACCESS_FLAGS); } R_API char *retrieve_class_method_access_string(ut16 flags) { return retrieve_access_string (flags, CLASS_ACCESS_FLAGS); } R_API char *r_bin_java_build_obj_key(RBinJavaObj *bin) { char *cname = r_bin_java_get_this_class_name (bin); char *jvcname = cname? r_str_newf ("%d.%s.class", bin->id, cname) : r_str_newf ("%d._unknown_.class", bin->id); free (cname); return jvcname; } R_API bool sdb_iterate_build_list(void *user, const char *k, const char *v) { RList *bin_objs_list = (RList *) user; size_t value = (size_t) sdb_atoi (v); RBinJavaObj *bin_obj = NULL; IFDBG eprintf ("Found %s == %"PFMT64x " bin_objs db\n", k, (ut64) value); if (value != 0 && value != (size_t) -1) { bin_obj = (RBinJavaObj *) value; r_list_append (bin_objs_list, bin_obj); } return true; } R_API RBinJavaCPTypeObj *r_bin_java_get_java_null_cp(void) { if (R_BIN_JAVA_NULL_TYPE_INITTED) { return &R_BIN_JAVA_NULL_TYPE; } memset (&R_BIN_JAVA_NULL_TYPE, 0, sizeof (R_BIN_JAVA_NULL_TYPE)); R_BIN_JAVA_NULL_TYPE.metas = R_NEW0 (RBinJavaMetaInfo); if (!R_BIN_JAVA_NULL_TYPE.metas) { return NULL; } memset (R_BIN_JAVA_NULL_TYPE.metas, 0, sizeof (RBinJavaMetaInfo)); R_BIN_JAVA_NULL_TYPE.metas->type_info = &R_BIN_JAVA_CP_METAS[0]; R_BIN_JAVA_NULL_TYPE.metas->ord = 0; R_BIN_JAVA_NULL_TYPE.file_offset = 0; R_BIN_JAVA_NULL_TYPE_INITTED = true; return &R_BIN_JAVA_NULL_TYPE; } R_API RBinJavaElementValueMetas *r_bin_java_get_ev_meta_from_tag(ut8 tag) { ut16 i = 0; RBinJavaElementValueMetas *res = &R_BIN_JAVA_ELEMENT_VALUE_METAS[13]; for (i = 0; i < R_BIN_JAVA_ELEMENT_VALUE_METAS_SZ; i++) { if (tag == R_BIN_JAVA_ELEMENT_VALUE_METAS[i].tag) { res = &R_BIN_JAVA_ELEMENT_VALUE_METAS[i]; break; } } return res; } R_API ut8 r_bin_java_quick_check(ut8 expected_tag, ut8 actual_tag, ut32 actual_len, const char *name) { ut8 res = 0; if (expected_tag > R_BIN_JAVA_CP_METAS_SZ) { eprintf ("Invalid tag '%d' expected 0x%02x for %s.\n", actual_tag, expected_tag, name); res = 1; } else if (expected_tag != actual_tag) { eprintf ("Invalid tag '%d' expected 0x%02x for %s.\n", actual_tag, expected_tag, name); res = 1; } else if (actual_len < R_BIN_JAVA_CP_METAS[expected_tag].len) { eprintf ("Unable to parse '%d' expected sz=0x%02x got 0x%02x for %s.\n", actual_tag, R_BIN_JAVA_CP_METAS[expected_tag].len, actual_len, name); res = 2; } return res; } R_API ut64 r_bin_java_raw_to_long(const ut8 *raw, ut64 offset) { return R_BIN_JAVA_LONG (raw, offset); } // yanked from careercup, because i am lazy: // 1) dont want to figure out how make radare use math library // 2) dont feel like figuring it out when google does it in O(1). R_API double my_pow(ut64 base, int exp) { ut8 flag = 0; ut64 res = 1; if (exp < 0) { flag = 1; exp *= -1; } while (exp) { if (exp & 1) { res *= base; } exp >>= 1; base *= base; IFDBG eprintf ("Result: %"PFMT64d ", base: %"PFMT64d ", exp: %d\n", res, base, exp); } if (flag == 0) { return 1.0 * res; } return (1.0 / res); } R_API double r_bin_java_raw_to_double(const ut8 *raw, ut64 offset) { ut64 bits = R_BIN_JAVA_LONG (raw, offset); int s = ((bits >> 63) == 0) ? 1 : -1; int e = (int) ((bits >> 52) & 0x7ffL); long m = (e == 0) ? (bits & 0xfffffffffffffLL) << 1 : (bits & 0xfffffffffffffLL) | 0x10000000000000LL; double res = 0.0; IFDBG eprintf ("Convert Long to Double: %08"PFMT64x "\n", bits); if (bits == 0x7ff0000000000000LL) { return INFINITY; } if (bits == 0xfff0000000000000LL) { return -INFINITY; } if (0x7ff0000000000001LL <= bits && bits <= 0x7fffffffffffffffLL) { return NAN; } if (0xfff0000000000001LL <= bits && bits <= 0xffffffffffffffffLL) { return NAN; } res = s * m * my_pow (2, e - 1075);// XXXX TODO Get double to work correctly here IFDBG eprintf (" High-bytes = %02x %02x %02x %02x\n", raw[0], raw[1], raw[2], raw[3]); IFDBG eprintf (" Low-bytes = %02x %02x %02x %02x\n", raw[4], raw[5], raw[6], raw[7]); IFDBG eprintf ("Convert Long to Double s: %d, m: 0x%08lx, e: 0x%08x, res: %f\n", s, m, e, res); return res; } R_API RBinJavaField *r_bin_java_read_next_method(RBinJavaObj *bin, const ut64 offset, const ut8 *buf, const ut64 len) { ut32 i, idx; const ut8 *f_buf = buf + offset; ut64 adv = 0; RBinJavaCPTypeObj *item = NULL; if (!bin || offset + 8 >= len) { return NULL; } RBinJavaField *method = (RBinJavaField *) R_NEW0 (RBinJavaField); if (!method) { eprintf ("Unable to allocate memory for method information\n"); return NULL; } method->metas = (RBinJavaMetaInfo *) R_NEW0 (RBinJavaMetaInfo); if (!method->metas) { eprintf ("Unable to allocate memory for meta information\n"); free (method); return NULL; } method->file_offset = offset; method->flags = R_BIN_JAVA_USHORT (f_buf, 0); method->flags_str = retrieve_method_access_string (method->flags); // need to subtract 1 for the idx method->name_idx = R_BIN_JAVA_USHORT (f_buf, 2); method->descriptor_idx = R_BIN_JAVA_USHORT (f_buf, 4); method->attr_count = R_BIN_JAVA_USHORT (f_buf, 6); method->attributes = r_list_newf (r_bin_java_attribute_free); method->type = R_BIN_JAVA_FIELD_TYPE_METHOD; method->metas->ord = bin->method_idx; adv += 8; idx = method->name_idx; item = r_bin_java_get_item_from_bin_cp_list (bin, idx); method->name = r_bin_java_get_utf8_from_bin_cp_list (bin, (ut32) (method->name_idx)); IFDBG eprintf ("Method name_idx: %d, which is: ord: %d, name: %s, value: %s\n", idx, item->metas->ord, ((RBinJavaCPTypeMetas *)item->metas->type_info)->name, method->name); if (!method->name) { method->name = (char *) malloc (21); snprintf ((char *) method->name, 20, "sym.method_%08x", method->metas->ord); IFDBG eprintf ("r_bin_java_read_next_method: Unable to find the name for 0x%02x index.\n", method->name_idx); } idx = method->descriptor_idx; item = r_bin_java_get_item_from_bin_cp_list (bin, idx); method->descriptor = r_bin_java_get_utf8_from_bin_cp_list (bin, (ut32) method->descriptor_idx); IFDBG eprintf ("Method descriptor_idx: %d, which is: ord: %d, name: %s, value: %s\n", idx, item->metas->ord, ((RBinJavaCPTypeMetas *)item->metas->type_info)->name, method->descriptor); if (!method->descriptor) { method->descriptor = r_str_dup (NULL, "NULL"); IFDBG eprintf ("r_bin_java_read_next_method: Unable to find the descriptor for 0x%02x index.\n", method->descriptor_idx); } IFDBG eprintf ("Looking for a NameAndType CP with name_idx: %d descriptor_idx: %d\n", method->name_idx, method->descriptor_idx); method->field_ref_cp_obj = r_bin_java_find_cp_ref_info_from_name_and_type (bin, method->name_idx, method->descriptor_idx); if (method->field_ref_cp_obj) { IFDBG eprintf ("Found the obj.\n"); item = r_bin_java_get_item_from_bin_cp_list (bin, method->field_ref_cp_obj->info.cp_method.class_idx); IFDBG eprintf ("Method class reference value: %d, which is: ord: %d, name: %s\n", method->field_ref_cp_obj->info.cp_method.class_idx, item->metas->ord, ((RBinJavaCPTypeMetas *)item->metas->type_info)->name); method->class_name = r_bin_java_get_item_name_from_bin_cp_list (bin, item); IFDBG eprintf ("Method requesting ref_cp_obj the following which is: ord: %d, name: %s\n", method->field_ref_cp_obj->metas->ord, ((RBinJavaCPTypeMetas *)method->field_ref_cp_obj->metas->type_info)->name); IFDBG eprintf ("MethodRef class name resolves to: %s\n", method->class_name); if (!method->class_name) { method->class_name = r_str_dup (NULL, "NULL"); } } else { // XXX - default to this class? method->field_ref_cp_obj = r_bin_java_get_item_from_bin_cp_list (bin, bin->cf2.this_class); method->class_name = r_bin_java_get_item_name_from_bin_cp_list (bin, method->field_ref_cp_obj); } IFDBG eprintf ("Parsing %s(%s)\n", method->name, method->descriptor); if (method->attr_count > 0) { method->attr_offset = adv + offset; RBinJavaAttrInfo *attr = NULL; for (i = 0; i < method->attr_count; i++) { attr = r_bin_java_read_next_attr (bin, adv + offset, buf, len); if (!attr) { eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Method Attribute: %d.\n", i); break; } if ((r_bin_java_get_attr_type_by_name (attr->name))->type == R_BIN_JAVA_ATTR_TYPE_CODE_ATTR) { // This is necessary for determing the appropriate number of bytes when readin // uoffset, ustack, ulocalvar values bin->cur_method_code_length = attr->info.code_attr.code_length; bin->offset_sz = 2;// (attr->info.code_attr.code_length > 65535) ? 4 : 2; bin->ustack_sz = 2;// (attr->info.code_attr.max_stack > 65535) ? 4 : 2; bin->ulocalvar_sz = 2;// (attr->info.code_attr.max_locals > 65535) ? 4 : 2; } IFDBG eprintf ("Parsing @ 0x%"PFMT64x " (%s) = 0x%"PFMT64x " bytes\n", attr->file_offset, attr->name, attr->size); r_list_append (method->attributes, attr); adv += attr->size; if (adv + offset >= len) { eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Method Attribute: %d.\n", i); break; } } } method->size = adv; // reset after parsing the method attributes IFDBG eprintf ("Parsing @ 0x%"PFMT64x " %s(%s) = 0x%"PFMT64x " bytes\n", method->file_offset, method->name, method->descriptor, method->size); return method; } R_API RBinJavaField *r_bin_java_read_next_field(RBinJavaObj *bin, const ut64 offset, const ut8 *buffer, const ut64 len) { RBinJavaAttrInfo *attr; ut32 i, idx; ut8 buf[8]; RBinJavaCPTypeObj *item = NULL; const ut8 *f_buf = buffer + offset; ut64 adv = 0; if (!bin || offset + 8 >= len) { return NULL; } RBinJavaField *field = (RBinJavaField *) R_NEW0 (RBinJavaField); if (!field) { eprintf ("Unable to allocate memory for field information\n"); return NULL; } field->metas = (RBinJavaMetaInfo *) R_NEW0 (RBinJavaMetaInfo); if (!field->metas) { eprintf ("Unable to allocate memory for meta information\n"); free (field); return NULL; } memcpy (buf, f_buf, 8); field->file_offset = offset; field->flags = R_BIN_JAVA_USHORT (buf, 0); field->flags_str = retrieve_field_access_string (field->flags); field->name_idx = R_BIN_JAVA_USHORT (buf, 2); field->descriptor_idx = R_BIN_JAVA_USHORT (buf, 4); field->attr_count = R_BIN_JAVA_USHORT (buf, 6); field->attributes = r_list_newf (r_bin_java_attribute_free); field->type = R_BIN_JAVA_FIELD_TYPE_FIELD; adv += 8; field->metas->ord = bin->field_idx; idx = field->name_idx; item = r_bin_java_get_item_from_bin_cp_list (bin, idx); field->name = r_bin_java_get_utf8_from_bin_cp_list (bin, (ut32) (field->name_idx)); IFDBG eprintf ("Field name_idx: %d, which is: ord: %d, name: %s, value: %s\n", idx, item->metas->ord, ((RBinJavaCPTypeMetas *)item->metas->type_info)->name, field->name); if (!field->name) { field->name = (char *) malloc (21); snprintf ((char *) field->name, 20, "sym.field_%08x", field->metas->ord); IFDBG eprintf ("r_bin_java_read_next_field: Unable to find the name for 0x%02x index.\n", field->name_idx); } idx = field->descriptor_idx; item = r_bin_java_get_item_from_bin_cp_list (bin, idx); field->descriptor = r_bin_java_get_utf8_from_bin_cp_list (bin, (ut32) field->descriptor_idx); IFDBG eprintf ("Field descriptor_idx: %d, which is: ord: %d, name: %s, value: %s\n", idx, item->metas->ord, ((RBinJavaCPTypeMetas *)item->metas->type_info)->name, field->descriptor); if (!field->descriptor) { field->descriptor = r_str_dup (NULL, "NULL"); IFDBG eprintf ("r_bin_java_read_next_field: Unable to find the descriptor for 0x%02x index.\n", field->descriptor_idx); } IFDBG eprintf ("Looking for a NameAndType CP with name_idx: %d descriptor_idx: %d\n", field->name_idx, field->descriptor_idx); field->field_ref_cp_obj = r_bin_java_find_cp_ref_info_from_name_and_type (bin, field->name_idx, field->descriptor_idx); if (field->field_ref_cp_obj) { IFDBG eprintf ("Found the obj.\n"); item = r_bin_java_get_item_from_bin_cp_list (bin, field->field_ref_cp_obj->info.cp_field.class_idx); IFDBG eprintf ("Field class reference value: %d, which is: ord: %d, name: %s\n", field->field_ref_cp_obj->info.cp_field.class_idx, item->metas->ord, ((RBinJavaCPTypeMetas *)item->metas->type_info)->name); field->class_name = r_bin_java_get_item_name_from_bin_cp_list (bin, item); IFDBG eprintf ("Field requesting ref_cp_obj the following which is: ord: %d, name: %s\n", field->field_ref_cp_obj->metas->ord, ((RBinJavaCPTypeMetas *)field->field_ref_cp_obj->metas->type_info)->name); IFDBG eprintf ("FieldRef class name resolves to: %s\n", field->class_name); if (!field->class_name) { field->class_name = r_str_dup (NULL, "NULL"); } } else { // XXX - default to this class? field->field_ref_cp_obj = r_bin_java_get_item_from_bin_cp_list (bin, bin->cf2.this_class); field->class_name = r_bin_java_get_item_name_from_bin_cp_list (bin, field->field_ref_cp_obj); } IFDBG eprintf ("Parsing %s(%s)", field->name, field->descriptor); if (field->attr_count > 0) { field->attr_offset = adv + offset; for (i = 0; i < field->attr_count && offset + adv < len; i++) { attr = r_bin_java_read_next_attr (bin, offset + adv, buffer, len); if (!attr) { eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Field Attribute: %d.\n", i); free (field->metas); free (field); return NULL; } if ((r_bin_java_get_attr_type_by_name (attr->name))->type == R_BIN_JAVA_ATTR_TYPE_CODE_ATTR) { // This is necessary for determing the appropriate number of bytes when readin // uoffset, ustack, ulocalvar values bin->cur_method_code_length = attr->info.code_attr.code_length; bin->offset_sz = 2;// (attr->info.code_attr.code_length > 65535) ? 4 : 2; bin->ustack_sz = 2;// (attr->info.code_attr.max_stack > 65535) ? 4 : 2; bin->ulocalvar_sz = 2;// (attr->info.code_attr.max_locals > 65535) ? 4 : 2; } r_list_append (field->attributes, attr); adv += attr->size; if (adv + offset >= len) { eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Field Attribute: %d.\n", i); r_bin_java_fmtype_free (field); return NULL; } } } field->size = adv; return field; } R_API RBinJavaCPTypeObj *r_bin_java_clone_cp_idx(RBinJavaObj *bin, ut32 idx) { RBinJavaCPTypeObj *obj = NULL; if (bin) { obj = r_bin_java_get_item_from_bin_cp_list (bin, idx); } return r_bin_java_clone_cp_item (obj); } R_API RBinJavaCPTypeObj *r_bin_java_clone_cp_item(RBinJavaCPTypeObj *obj) { RBinJavaCPTypeObj *clone_obj = NULL; if (!obj) { return clone_obj; } clone_obj = R_NEW0 (RBinJavaCPTypeObj); if (clone_obj) { memcpy (clone_obj, obj, sizeof (RBinJavaCPTypeObj)); clone_obj->metas = (RBinJavaMetaInfo *) R_NEW0 (RBinJavaMetaInfo); clone_obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[clone_obj->tag]; clone_obj->name = strdup (obj->name? obj->name: "unk"); if (obj->tag == R_BIN_JAVA_CP_UTF8) { clone_obj->info.cp_utf8.bytes = (ut8 *) malloc (obj->info.cp_utf8.length + 1); if (clone_obj->info.cp_utf8.bytes) { memcpy (clone_obj->info.cp_utf8.bytes, obj->info.cp_utf8.bytes, clone_obj->info.cp_utf8.length); } else { // TODO: eprintf allocation error } } } return clone_obj; } R_API RBinJavaCPTypeObj *r_bin_java_read_next_constant_pool_item(RBinJavaObj *bin, const ut64 offset, const ut8 *buf, ut64 len) { RBinJavaCPTypeMetas *java_constant_info = NULL; ut8 tag = 0; ut64 buf_sz = 0; ut8 *cp_buf = NULL; ut32 str_len = 0; RBinJavaCPTypeObj *java_obj = NULL; tag = buf[offset]; if (tag > R_BIN_JAVA_CP_METAS_SZ) { eprintf ("Invalid tag '%d' at offset 0x%08"PFMT64x "\n", tag, (ut64) offset); return NULL; #if 0 java_obj = r_bin_java_unknown_cp_new (bin, &tag, 1); if (java_obj != NULL && java_obj->metas != NULL) { java_obj->file_offset = offset; java_obj->loadaddr = bin->loadaddr; } return NULL; // early error to avoid future overflows // return java_obj; #endif } java_constant_info = &R_BIN_JAVA_CP_METAS[tag]; if (java_constant_info->tag == 0 || java_constant_info->tag == 2) { return java_obj; } buf_sz += java_constant_info->len; if (java_constant_info->tag == 1) { if (offset + 32 < len) { str_len = R_BIN_JAVA_USHORT (buf, offset + 1); buf_sz += str_len; } else { return NULL; } } cp_buf = calloc (buf_sz, 1); if (!cp_buf) { return java_obj; } if (offset + buf_sz < len) { memcpy (cp_buf, (ut8 *) buf + offset, buf_sz); IFDBG eprintf ("Parsed the tag '%d':%s and create object from offset 0x%08"PFMT64x ".\n", tag, R_BIN_JAVA_CP_METAS[tag].name, offset); java_obj = (*java_constant_info->allocs->new_obj)(bin, cp_buf, buf_sz); if (java_obj != NULL && java_obj->metas != NULL) { java_obj->file_offset = offset; // IFDBG eprintf ("java_obj->file_offset = 0x%08"PFMT64x".\n",java_obj->file_offset); } else if (!java_obj) { eprintf ("Unable to parse the tag '%d' and create valid object.\n", tag); } else if (!java_obj->metas) { eprintf ("Unable to parse the tag '%d' and create valid object.\n", tag); } else { eprintf ("Failed to set the java_obj->metas-file_offset for '%d' offset is(0x%08"PFMT64x ").\n", tag, offset); } } free (cp_buf); return java_obj; } R_API RBinJavaInterfaceInfo *r_bin_java_read_next_interface_item(RBinJavaObj *bin, const ut64 offset, const ut8 *buf, const ut64 len) { ut8 idx[2] = { 0 }; RBinJavaInterfaceInfo *ifobj; const ut8 *if_buf = buf + offset; if (offset + 2 >= len) { return NULL; } memcpy (&idx, if_buf, 2); ifobj = r_bin_java_interface_new (bin, if_buf, len - offset); if (ifobj) { ifobj->file_offset = offset; } return ifobj; } // R_API void addrow (RBinJavaObj *bin, int addr, int line) { // int n = bin->lines.count++; //// XXX. possible memleak // bin->lines.addr = realloc (bin->lines.addr, sizeof (int)*n+1); // bin->lines.addr[n] = addr; // bin->lines.line = realloc (bin->lines.line, sizeof (int)*n+1); // bin->lines.line[n] = line; // } // R_API struct r_bin_java_cp_item_t* r_bin_java_get_item_from_cp_CP(RBinJavaObj *bin, int i) { // return (i<0||i>bin->cf.cp_count)? &cp_null_item: &bin->cp_items[i]; // } R_API char *r_bin_java_get_utf8_from_bin_cp_list(RBinJavaObj *bin, ut64 idx) { /* Search through the Constant Pool list for the given CP Index. If the idx not found by directly going to the list index, the list will be walked and then the IDX will be checked. rvalue: new char* for caller to free. */ if (bin == NULL) { return NULL; } return r_bin_java_get_utf8_from_cp_item_list (bin->cp_list, idx); } R_API ut32 r_bin_java_get_utf8_len_from_bin_cp_list(RBinJavaObj *bin, ut64 idx) { /* Search through the Constant Pool list for the given CP Index. If the idx not found by directly going to the list index, the list will be walked and then the IDX will be checked. rvalue: new char* for caller to free. */ if (bin == NULL) { return 0; } return r_bin_java_get_utf8_len_from_cp_item_list (bin->cp_list, idx); } R_API char *r_bin_java_get_name_from_bin_cp_list(RBinJavaObj *bin, ut64 idx) { /* Search through the Constant Pool list for the given CP Index. If the idx not found by directly going to the list index, the list will be walked and then the IDX will be checked. rvalue: new char* for caller to free. */ if (bin == NULL) { return NULL; } return r_bin_java_get_name_from_cp_item_list (bin->cp_list, idx); } R_API char *r_bin_java_get_desc_from_bin_cp_list(RBinJavaObj *bin, ut64 idx) { /* Search through the Constant Pool list for the given CP Index. If the idx not found by directly going to the list index, the list will be walked and then the IDX will be checked. rvalue: new char* for caller to free. */ if (bin == NULL) { return NULL; } return r_bin_java_get_desc_from_cp_item_list (bin->cp_list, idx); } R_API RBinJavaCPTypeObj *r_bin_java_get_item_from_bin_cp_list(RBinJavaObj *bin, ut64 idx) { /* Search through the Constant Pool list for the given CP Index. If the idx not found by directly going to the list index, the list will be walked and then the IDX will be checked. rvalue: RBinJavaObj* (user does NOT free). */ if (bin == NULL) { return NULL; } if (idx > bin->cp_count || idx == 0) { return r_bin_java_get_java_null_cp (); } return r_bin_java_get_item_from_cp_item_list (bin->cp_list, idx); } R_API char *r_bin_java_get_item_name_from_bin_cp_list(RBinJavaObj *bin, RBinJavaCPTypeObj *obj) { char *res = NULL; /* Given a constant poool object Class, FieldRef, MethodRef, or InterfaceMethodRef return the actual descriptor string. @param cp_list: RList of RBinJavaCPTypeObj * @param obj object to look up the name for @rvalue char* (user frees) or NULL */ if (bin && obj) { res = r_bin_java_get_item_name_from_cp_item_list ( bin->cp_list, obj, MAX_CPITEMS); } return res; } R_API char *r_bin_java_get_item_desc_from_bin_cp_list(RBinJavaObj *bin, RBinJavaCPTypeObj *obj) { /* Given a constant poool object Class, FieldRef, MethodRef, or InterfaceMethodRef return the actual descriptor string. @param cp_list: RList of RBinJavaCPTypeObj * @param obj object to look up the name for @rvalue char* (user frees) or NULL */ return bin? r_bin_java_get_item_desc_from_cp_item_list (bin->cp_list, obj, MAX_CPITEMS): NULL; } R_API char *r_bin_java_get_utf8_from_cp_item_list(RList *cp_list, ut64 idx) { /* Search through the Constant Pool list for the given CP Index. If the idx not found by directly going to the list index, the list will be walked and then the IDX will be checked. rvalue: new char* for caller to free. */ char *value = NULL; RListIter *iter; if (!cp_list) { return NULL; } RBinJavaCPTypeObj *item = (RBinJavaCPTypeObj *) r_list_get_n (cp_list, idx); if (item && item->tag == R_BIN_JAVA_CP_UTF8 && item->metas->ord == idx) { value = convert_string ((const char *) item->info.cp_utf8.bytes, item->info.cp_utf8.length); } if (!value) { r_list_foreach (cp_list, iter, item) { if (item && (item->tag == R_BIN_JAVA_CP_UTF8) && item->metas->ord == idx) { value = convert_string ((const char *) item->info.cp_utf8.bytes, item->info.cp_utf8.length); break; } } } return value; } R_API ut32 r_bin_java_get_utf8_len_from_cp_item_list(RList *cp_list, ut64 idx) { /* Search through the Constant Pool list for the given CP Index. If the idx not found by directly going to the list index, the list will be walked and then the IDX will be checked. rvalue: new ut32 . */ ut32 value = -1; RListIter *iter; if (!cp_list) { return 0; } RBinJavaCPTypeObj *item = (RBinJavaCPTypeObj *) r_list_get_n (cp_list, idx); if (item && (item->tag == R_BIN_JAVA_CP_UTF8) && item->metas->ord == idx) { value = item->info.cp_utf8.length; } if (value == -1) { r_list_foreach (cp_list, iter, item) { if (item && (item->tag == R_BIN_JAVA_CP_UTF8) && item->metas->ord == idx) { value = item->info.cp_utf8.length; break; } } } return value; } R_API RBinJavaCPTypeObj *r_bin_java_get_item_from_cp_item_list(RList *cp_list, ut64 idx) { /* Search through the Constant Pool list for the given CP Index. rvalue: RBinJavaObj * */ RBinJavaCPTypeObj *item = NULL; if (cp_list == NULL) { return NULL; } item = (RBinJavaCPTypeObj *) r_list_get_n (cp_list, idx); return item; } R_API char *r_bin_java_get_item_name_from_cp_item_list(RList *cp_list, RBinJavaCPTypeObj *obj, int depth) { /* Given a constant poool object Class, FieldRef, MethodRef, or InterfaceMethodRef return the actual descriptor string. @param cp_list: RList of RBinJavaCPTypeObj * @param obj object to look up the name for @rvalue ut8* (user frees) or NULL */ if (!obj || !cp_list || depth < 0) { return NULL; } switch (obj->tag) { case R_BIN_JAVA_CP_NAMEANDTYPE: return r_bin_java_get_utf8_from_cp_item_list ( cp_list, obj->info.cp_name_and_type.name_idx); case R_BIN_JAVA_CP_CLASS: return r_bin_java_get_utf8_from_cp_item_list ( cp_list, obj->info.cp_class.name_idx); // XXX - Probably not good form, but they are the same memory structure case R_BIN_JAVA_CP_FIELDREF: case R_BIN_JAVA_CP_INTERFACEMETHOD_REF: case R_BIN_JAVA_CP_METHODREF: obj = r_bin_java_get_item_from_cp_item_list ( cp_list, obj->info.cp_method.name_and_type_idx); return r_bin_java_get_item_name_from_cp_item_list ( cp_list, obj, depth - 1); default: return NULL; case 0: IFDBG eprintf ("Invalid 0 tag in the constant pool\n"); return NULL; } return NULL; } R_API char *r_bin_java_get_name_from_cp_item_list(RList *cp_list, ut64 idx) { /* Given a constant poool object Class, FieldRef, MethodRef, or InterfaceMethodRef return the actual descriptor string. @param cp_list: RList of RBinJavaCPTypeObj * @param obj object to look up the name for @rvalue ut8* (user frees) or NULL */ RBinJavaCPTypeObj *obj = r_bin_java_get_item_from_cp_item_list ( cp_list, idx); if (obj && cp_list) { return r_bin_java_get_item_name_from_cp_item_list ( cp_list, obj, MAX_CPITEMS); } return NULL; } R_API char *r_bin_java_get_item_desc_from_cp_item_list(RList *cp_list, RBinJavaCPTypeObj *obj, int depth) { /* Given a constant poool object FieldRef, MethodRef, or InterfaceMethodRef return the actual descriptor string. @rvalue ut8* (user frees) or NULL */ if (!obj || !cp_list || depth < 0) { return NULL; } switch (obj->tag) { case R_BIN_JAVA_CP_NAMEANDTYPE: return r_bin_java_get_utf8_from_cp_item_list (cp_list, obj->info.cp_name_and_type.descriptor_idx); // XXX - Probably not good form, but they are the same memory structure case R_BIN_JAVA_CP_FIELDREF: case R_BIN_JAVA_CP_INTERFACEMETHOD_REF: case R_BIN_JAVA_CP_METHODREF: obj = r_bin_java_get_item_from_cp_item_list (cp_list, obj->info.cp_method.name_and_type_idx); return r_bin_java_get_item_desc_from_cp_item_list ( cp_list, obj, depth - 1); default: return NULL; } return NULL; } R_API char *r_bin_java_get_desc_from_cp_item_list(RList *cp_list, ut64 idx) { /* Given a constant poool object FieldRef, MethodRef, or InterfaceMethodRef return the actual descriptor string. @rvalue ut8* (user frees) or NULL */ RBinJavaCPTypeObj *obj = r_bin_java_get_item_from_cp_item_list (cp_list, idx); if (!cp_list) { return NULL; } return r_bin_java_get_item_desc_from_cp_item_list (cp_list, obj, MAX_CPITEMS); } R_API RBinJavaAttrInfo *r_bin_java_get_method_code_attribute(const RBinJavaField *method) { /* Search through a methods attributes and return the code attr. rvalue: RBinJavaAttrInfo* if found otherwise NULL. */ RBinJavaAttrInfo *res = NULL, *attr = NULL; RListIter *iter; if (method) { r_list_foreach (method->attributes, iter, attr) { if (attr && (attr->type == R_BIN_JAVA_ATTR_TYPE_CODE_ATTR)) { res = attr; break; } } } return res; } R_API RBinJavaAttrInfo *r_bin_java_get_attr_from_field(RBinJavaField *field, R_BIN_JAVA_ATTR_TYPE attr_type, ut32 pos) { /* Search through the Attribute list for the given type starting at position pos. rvalue: NULL or the first occurrence of attr_type after pos */ RBinJavaAttrInfo *attr = NULL, *item; RListIter *iter; ut32 i = 0; if (field) { r_list_foreach (field->attributes, iter, item) { // Note the increment happens after the comparison if ((i++) >= pos) { if (item && (item->type == attr_type)) { attr = item; break; } } } } return attr; } R_API ut8 *r_bin_java_get_attr_buf(RBinJavaObj *bin, ut64 sz, const ut64 offset, const ut8 *buf, const ut64 len) { ut8 *attr_buf = NULL; int pending = len - offset; const ut8 *a_buf = offset + buf; attr_buf = (ut8 *) calloc (pending + 1, 1); if (!attr_buf) { eprintf ("Unable to allocate enough bytes (0x%04"PFMT64x ") to read in the attribute.\n", sz); return attr_buf; } memcpy (attr_buf, a_buf, pending); // sz+1); return attr_buf; } R_API RBinJavaAttrInfo *r_bin_java_default_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { // NOTE: this function receives the buffer offset in the original buffer, // but the buffer is already point to that particular offset. // XXX - all the code that relies on this function should probably be modified // so that the original buffer pointer is passed in and then the buffer+buf_offset // points to the correct location. RBinJavaAttrInfo *attr = R_NEW0 (RBinJavaAttrInfo); if (!attr) { return NULL; } RBinJavaAttrMetas *type_info = NULL; attr->metas = R_NEW0 (RBinJavaMetaInfo); if (!attr->metas) { free (attr); return NULL; } attr->is_attr_in_old_format = r_bin_java_is_old_format(bin); attr->file_offset = buf_offset; attr->name_idx = R_BIN_JAVA_USHORT (buffer, 0); attr->length = R_BIN_JAVA_UINT (buffer, 2); attr->size = R_BIN_JAVA_UINT (buffer, 2) + 6; attr->name = r_bin_java_get_utf8_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, attr->name_idx); if (!attr->name) { // Something bad has happened attr->name = r_str_dup (NULL, "NULL"); eprintf ("r_bin_java_default_attr_new: Unable to find the name for %d index.\n", attr->name_idx); } type_info = r_bin_java_get_attr_type_by_name (attr->name); attr->metas->ord = (R_BIN_JAVA_GLOBAL_BIN->attr_idx++); attr->metas->type_info = (void *) type_info; // IFDBG eprintf (" Addrs for type_info [tag=%d]: 0x%08"PFMT64x"\n", type_val, &attr->metas->type_info); return attr; } R_API RBinJavaAttrMetas *r_bin_java_get_attr_type_by_name(const char *name) { // TODO: use sdb/hashtable here int i; for (i = 0; i < RBIN_JAVA_ATTRS_METAS_SZ; i++) { if (!strcmp ((const char *) name, RBIN_JAVA_ATTRS_METAS[i].name)) { return &RBIN_JAVA_ATTRS_METAS[i]; } } return &RBIN_JAVA_ATTRS_METAS[R_BIN_JAVA_ATTR_TYPE_UNKNOWN_ATTR]; } R_API RBinJavaAttrInfo *r_bin_java_read_next_attr(RBinJavaObj *bin, const ut64 offset, const ut8 *buf, const ut64 buf_len) { const ut8 *a_buf = offset + buf; ut8 attr_idx_len = 6; if (offset + 6 > buf_len) { eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile in Attribute offset " "(0x%"PFMT64x ") > len of remaining bytes (0x%"PFMT64x ").\n", offset, buf_len); return NULL; } // ut16 attr_idx, ut32 length of attr. ut32 sz = R_BIN_JAVA_UINT (a_buf, 2) + attr_idx_len; // r_bin_java_read_int (bin, buf_offset+2) + attr_idx_len; if (sz + offset > buf_len) { eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile in Attribute len " "(0x%x) + offset (0x%"PFMT64x ") exceeds length of buffer (0x%"PFMT64x ").\n", sz, offset, buf_len); return NULL; } // when reading the attr bytes, need to also // include the initial 6 bytes, which // are not included in the attribute length // , // sz, buf_offset, buf_offset+sz); ut8 *buffer = r_bin_java_get_attr_buf (bin, sz, offset, buf, buf_len); RBinJavaAttrInfo *attr = NULL; // printf ("%d %d %d\n", sz, buf_len, offset); if (offset < buf_len) { attr = r_bin_java_read_next_attr_from_buffer (bin, buffer, buf_len - offset, offset); free (buffer); if (!attr) { return NULL; } attr->size = sz; } else { free (buffer); eprintf ("IS OOB\n"); } return attr; } R_API RBinJavaAttrInfo *r_bin_java_read_next_attr_from_buffer(RBinJavaObj *bin, ut8 *buffer, st64 sz, st64 buf_offset) { RBinJavaAttrInfo *attr = NULL; st64 nsz; if (!buffer || ((int) sz) < 4 || buf_offset < 0) { eprintf ("r_bin_Java_read_next_attr_from_buffer: invalid buffer size %d\n", (int) sz); return NULL; } ut16 name_idx = R_BIN_JAVA_USHORT (buffer, 0); ut64 offset = 2; nsz = R_BIN_JAVA_UINT (buffer, offset); // DEAD INCREMENT offset += 4; char *name = r_bin_java_get_utf8_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, name_idx); if (!name) { name = strdup ("unknown"); } IFDBG eprintf ("r_bin_java_read_next_attr: name_idx = %d is %s\n", name_idx, name); RBinJavaAttrMetas *type_info = r_bin_java_get_attr_type_by_name (name); if (type_info) { IFDBG eprintf ("Typeinfo: %s, was %s\n", type_info->name, name); // printf ("SZ %d %d %d\n", nsz, sz, buf_offset); if (nsz > sz) { free (name); return NULL; } if ((attr = type_info->allocs->new_obj (bin, buffer, nsz, buf_offset))) { attr->metas->ord = (R_BIN_JAVA_GLOBAL_BIN->attr_idx++); } } else { eprintf ("r_bin_java_read_next_attr_from_buffer: Cannot find type_info for %s\n", name); } free (name); return attr; } R_API ut64 r_bin_java_read_class_file2(RBinJavaObj *bin, const ut64 offset, const ut8 *obuf, ut64 len) { const ut8 *cf2_buf = obuf + offset; RBinJavaCPTypeObj *this_class_cp_obj = NULL; IFDBG eprintf ("\n0x%"PFMT64x " Offset before reading the cf2 structure\n", offset); /* Reading the following fields: ut16 access_flags; ut16 this_class; ut16 super_class; */ if (cf2_buf + 6 > obuf + len) { return 0; } bin->cf2.cf2_size = 6; bin->cf2.access_flags = R_BIN_JAVA_USHORT (cf2_buf, 0); bin->cf2.this_class = R_BIN_JAVA_USHORT (cf2_buf, 2); bin->cf2.super_class = R_BIN_JAVA_USHORT (cf2_buf, 4); free (bin->cf2.flags_str); free (bin->cf2.this_class_name); bin->cf2.flags_str = retrieve_class_method_access_string (bin->cf2.access_flags); this_class_cp_obj = r_bin_java_get_item_from_bin_cp_list (bin, bin->cf2.this_class); bin->cf2.this_class_name = r_bin_java_get_item_name_from_bin_cp_list (bin, this_class_cp_obj); IFDBG eprintf ("This class flags are: %s\n", bin->cf2.flags_str); return bin->cf2.cf2_size; } R_API ut64 r_bin_java_parse_cp_pool(RBinJavaObj *bin, const ut64 offset, const ut8 *buf, const ut64 len) { int ord = 0; ut64 adv = 0; RBinJavaCPTypeObj *obj = NULL; const ut8 *cp_buf = buf + offset; r_list_free (bin->cp_list); bin->cp_list = r_list_newf (r_bin_java_constant_pool); bin->cp_offset = offset; memcpy ((char *) &bin->cp_count, cp_buf, 2); bin->cp_count = R_BIN_JAVA_USHORT (cp_buf, 0) - 1; adv += 2; IFDBG eprintf ("ConstantPoolCount %d\n", bin->cp_count); r_list_append (bin->cp_list, r_bin_java_get_java_null_cp ()); for (ord = 1, bin->cp_idx = 0; bin->cp_idx < bin->cp_count && adv < len; ord++, bin->cp_idx++) { obj = r_bin_java_read_next_constant_pool_item (bin, offset + adv, buf, len); if (obj) { // IFDBG eprintf ("SUCCESS Read ConstantPoolItem %d\n", i); obj->metas->ord = ord; obj->idx = ord; r_list_append (bin->cp_list, obj); if (obj->tag == R_BIN_JAVA_CP_LONG || obj->tag == R_BIN_JAVA_CP_DOUBLE) { // i++; ord++; bin->cp_idx++; r_list_append (bin->cp_list, &R_BIN_JAVA_NULL_TYPE); } IFDBG ((RBinJavaCPTypeMetas *) obj->metas->type_info)->allocs->print_summary (obj); adv += ((RBinJavaCPTypeMetas *) obj->metas->type_info)->allocs->calc_size (obj); if (offset + adv > len) { eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Constant Pool Object: %d.\n", ord); break; } } else { IFDBG eprintf ("Failed to read ConstantPoolItem %d\n", bin->cp_idx); break; } } // Update the imports r_bin_java_set_imports (bin); bin->cp_size = adv; return bin->cp_size; } R_API ut64 r_bin_java_parse_interfaces(RBinJavaObj *bin, const ut64 offset, const ut8 *buf, const ut64 len) { int i = 0; ut64 adv = 0; RBinJavaInterfaceInfo *interfaces_obj; const ut8 *if_buf = buf + offset; bin->cp_offset = offset; bin->interfaces_offset = offset; r_list_free (bin->interfaces_list); bin->interfaces_list = r_list_newf (r_bin_java_interface_free); if (offset + 2 > len) { bin->interfaces_size = 0; return 0; } bin->interfaces_count = R_BIN_JAVA_USHORT (if_buf, 0); adv += 2; IFDBG eprintf ("Interfaces count: %d\n", bin->interfaces_count); if (bin->interfaces_count > 0) { for (i = 0; i < bin->interfaces_count; i++) { interfaces_obj = r_bin_java_read_next_interface_item (bin, offset + adv, buf, len); if (interfaces_obj) { r_list_append (bin->interfaces_list, interfaces_obj); adv += interfaces_obj->size; if (offset + adv > len) { eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Interface: %d.\n", i); break; } } else { break; } } } bin->interfaces_size = adv; return adv; } R_API ut64 r_bin_java_parse_fields(RBinJavaObj *bin, const ut64 offset, const ut8 *buf, const ut64 len) { int i = 0; ut64 adv = 0; RBinJavaField *field; const ut8 *fm_buf = buf + offset; r_list_free (bin->fields_list); bin->fields_list = r_list_newf (r_bin_java_fmtype_free); bin->fields_offset = offset; if (offset + 2 >= len) { return UT64_MAX; } bin->fields_count = R_BIN_JAVA_USHORT (fm_buf, 0); adv += 2; IFDBG eprintf ("Fields count: %d 0x%"PFMT64x "\n", bin->fields_count, bin->fields_offset); if (bin->fields_count > 0) { for (i = 0; i < bin->fields_count; i++, bin->field_idx++) { field = r_bin_java_read_next_field (bin, offset + adv, buf, len); if (field) { adv += field->size; r_list_append (bin->fields_list, field); IFDBG r_bin_java_print_field_summary(field); if (adv + offset > len) { eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Field: %d.\n", i); break; } } else { IFDBG eprintf ("Failed to read Field %d\n", i); break; } } } bin->fields_size = adv; return adv; } R_API ut64 r_bin_java_parse_attrs(RBinJavaObj *bin, const ut64 offset, const ut8 *buf, const ut64 len) { int i = 0; ut64 adv = 0; const ut8 *a_buf = buf + offset; if (offset + 2 >= len) { // Check if we can read that USHORT return UT64_MAX; } r_list_free (bin->attrs_list); bin->attrs_list = r_list_newf (r_bin_java_attribute_free); bin->attrs_offset = offset; bin->attrs_count = R_BIN_JAVA_USHORT (a_buf, adv); adv += 2; if (bin->attrs_count > 0) { for (i = 0; i < bin->attrs_count; i++, bin->attr_idx++) { RBinJavaAttrInfo *attr = r_bin_java_read_next_attr (bin, offset + adv, buf, len); if (!attr) { // eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Attribute: %d.\n", i); break; } r_list_append (bin->attrs_list, attr); adv += attr->size; if (adv + offset >= len) { // eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Attribute: %d.\n", i); break; } } } bin->attrs_size = adv; return adv; } R_API ut64 r_bin_java_parse_methods(RBinJavaObj *bin, const ut64 offset, const ut8 *buf, const ut64 len) { int i = 0; ut64 adv = 0; RBinJavaField *method; const ut8 *fm_buf = buf + offset; r_list_free (bin->methods_list); bin->methods_list = r_list_newf (r_bin_java_fmtype_free); if (offset + 2 >= len) { return 0LL; } bin->methods_offset = offset; bin->methods_count = R_BIN_JAVA_USHORT (fm_buf, 0); adv += 2; IFDBG eprintf ("Methods count: %d 0x%"PFMT64x "\n", bin->methods_count, bin->methods_offset); bin->main = NULL; bin->entrypoint = NULL; bin->main_code_attr = NULL; bin->entrypoint_code_attr = NULL; for (i = 0; i < bin->methods_count; i++, bin->method_idx++) { method = r_bin_java_read_next_method (bin, offset + adv, buf, len); if (method) { adv += method->size; r_list_append (bin->methods_list, method); } // Update Main, Init, or Class Init if (method && !strcmp ((const char *) method->name, "main")) { bin->main = method; // get main code attr bin->main_code_attr = r_bin_java_get_attr_from_field (method, R_BIN_JAVA_ATTR_TYPE_CODE_ATTR, 0); } else if (method && (!strcmp ((const char *) method->name, "<init>") || !strcmp ((const char *) method->name, "init"))) { IFDBG eprintf ("Found an init function.\n"); bin->entrypoint = method; bin->entrypoint_code_attr = r_bin_java_get_attr_from_field (method, R_BIN_JAVA_ATTR_TYPE_CODE_ATTR, 0); } else if (method && (!strcmp ((const char *) method->name, "<cinit>") || !strcmp ((const char *) method->name, "cinit"))) { bin->cf2.this_class_entrypoint = method; bin->cf2.this_class_entrypoint_code_attr = r_bin_java_get_attr_from_field (method, R_BIN_JAVA_ATTR_TYPE_CODE_ATTR, 0); } if (adv + offset > len) { eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Method: %d.\n", i); break; } IFDBG r_bin_java_print_field_summary(method); } bin->methods_size = adv; return adv; } R_API int r_bin_java_new_bin(RBinJavaObj *bin, ut64 loadaddr, Sdb *kv, const ut8 *buf, ut64 len) { R_BIN_JAVA_GLOBAL_BIN = bin; if (!r_str_constpool_init (&bin->constpool)) { return false; } bin->lines.count = 0; bin->loadaddr = loadaddr; r_bin_java_get_java_null_cp (); bin->id = r_num_rand (UT32_MAX); bin->kv = kv ? kv : sdb_new (NULL, NULL, 0); bin->AllJavaBinObjs = NULL; return r_bin_java_load_bin (bin, buf, len); } R_API int r_bin_java_load_bin(RBinJavaObj *bin, const ut8 *buf, ut64 buf_sz) { ut64 adv = 0; R_BIN_JAVA_GLOBAL_BIN = bin; if (!bin) { return false; } r_bin_java_reset_bin_info (bin); memcpy ((ut8 *) &bin->cf, buf, 10); if (memcmp (bin->cf.cafebabe, "\xCA\xFE\xBA\xBE", 4)) { eprintf ("r_bin_java_new_bin: Invalid header (%02x %02x %02x %02x)\n", bin->cf.cafebabe[0], bin->cf.cafebabe[1], bin->cf.cafebabe[2], bin->cf.cafebabe[3]); return false; } if (bin->cf.major[0] == bin->cf.major[1] && bin->cf.major[0] == 0) { eprintf ("Java CLASS with MACH0 header?\n"); return false; } adv += 8; // -2 so that the cp_count will be parsed adv += r_bin_java_parse_cp_pool (bin, adv, buf, buf_sz); if (adv > buf_sz) { eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Constant Pool.\n"); return true; } adv += r_bin_java_read_class_file2 (bin, adv, buf, buf_sz); if (adv > buf_sz) { eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after class file info.\n"); return true; } IFDBG eprintf ("This class: %d %s\n", bin->cf2.this_class, bin->cf2.this_class_name); IFDBG eprintf ("0x%"PFMT64x " Access flags: 0x%04x\n", adv, bin->cf2.access_flags); adv += r_bin_java_parse_interfaces (bin, adv, buf, buf_sz); if (adv > buf_sz) { eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Interfaces.\n"); return true; } adv += r_bin_java_parse_fields (bin, adv, buf, buf_sz); if (adv > buf_sz) { eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Fields.\n"); return true; } adv += r_bin_java_parse_methods (bin, adv, buf, buf_sz); if (adv > buf_sz) { eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Methods.\n"); return true; } adv += r_bin_java_parse_attrs (bin, adv, buf, buf_sz); bin->calc_size = adv; // if (adv > buf_sz) { // eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Attributes.\n"); // return true; // } // add_cp_objs_to_sdb(bin); // add_method_infos_to_sdb(bin); // add_field_infos_to_sdb(bin); return true; } R_API char *r_bin_java_get_version(RBinJavaObj *bin) { return r_str_newf ("0x%02x%02x 0x%02x%02x", bin->cf.major[1], bin->cf.major[0], bin->cf.minor[1], bin->cf.minor[0]); } R_API RList *r_bin_java_get_entrypoints(RBinJavaObj *bin) { RListIter *iter = NULL, *iter_tmp = NULL; RBinJavaField *fm_type; RList *ret = r_list_newf (free); if (!ret) { return NULL; } r_list_foreach_safe (bin->methods_list, iter, iter_tmp, fm_type) { if (!strcmp (fm_type->name, "main") || !strcmp (fm_type->name, "<init>") || !strcmp (fm_type->name, "<clinit>") || strstr (fm_type->flags_str, "static")) { RBinAddr *addr = R_NEW0 (RBinAddr); if (addr) { addr->vaddr = addr->paddr = \ r_bin_java_get_method_code_offset (fm_type) + bin->loadaddr; addr->hpaddr = fm_type->file_offset; r_list_append (ret, addr); } } } return ret; } R_API RBinJavaField *r_bin_java_get_method_code_attribute_with_addr(RBinJavaObj *bin, ut64 addr) { RListIter *iter = NULL, *iter_tmp = NULL; RBinJavaField *fm_type, *res = NULL; if (!bin && R_BIN_JAVA_GLOBAL_BIN) { bin = R_BIN_JAVA_GLOBAL_BIN; } if (!bin) { eprintf ("Attempting to analyse function when the R_BIN_JAVA_GLOBAL_BIN has not been set.\n"); return NULL; } r_list_foreach_safe (bin->methods_list, iter, iter_tmp, fm_type) { ut64 offset = r_bin_java_get_method_code_offset (fm_type) + bin->loadaddr, size = r_bin_java_get_method_code_size (fm_type); if (addr >= offset && addr <= size + offset) { res = fm_type; } } return res; } R_API RBinAddr *r_bin_java_get_entrypoint(RBinJavaObj *bin, int sym) { RBinAddr *ret = NULL; ret = R_NEW0 (RBinAddr); if (!ret) { return NULL; } ret->paddr = UT64_MAX; switch (sym) { case R_BIN_SYM_ENTRY: case R_BIN_SYM_INIT: ret->paddr = r_bin_java_find_method_offset (bin, "<init>"); if (ret->paddr == UT64_MAX) { ret->paddr = r_bin_java_find_method_offset (bin, "<cinit>"); } break; case R_BIN_SYM_FINI: ret->paddr = UT64_MAX; break; case R_BIN_SYM_MAIN: ret->paddr = r_bin_java_find_method_offset (bin, "main"); break; default: ret->paddr = -1; } if (ret->paddr != -1) { ret->paddr += bin->loadaddr; } return ret; } R_API ut64 r_bin_java_get_method_code_size(RBinJavaField *fm_type) { RListIter *attr_iter = NULL, *attr_iter_tmp = NULL; RBinJavaAttrInfo *attr = NULL; ut64 sz = 0; r_list_foreach_safe (fm_type->attributes, attr_iter, attr_iter_tmp, attr) { if (attr->type == R_BIN_JAVA_ATTR_TYPE_CODE_ATTR) { sz = attr->info.code_attr.code_length; break; } } return sz; } R_API ut64 r_bin_java_find_method_offset(RBinJavaObj *bin, const char *method_name) { RListIter *attr_iter = NULL, *attr_iter_tmp = NULL; RBinJavaField *method = NULL; ut64 offset = -1; r_list_foreach_safe (bin->methods_list, attr_iter, attr_iter_tmp, method) { if (method && !strcmp ((const char *) method->name, method_name)) { offset = r_bin_java_get_method_code_offset (method) + bin->loadaddr; break; } } return offset; } R_API ut64 r_bin_java_get_method_code_offset(RBinJavaField *fm_type) { RListIter *attr_iter = NULL, *attr_iter_tmp = NULL; RBinJavaAttrInfo *attr = NULL; ut64 offset = 0; r_list_foreach_safe (fm_type->attributes, attr_iter, attr_iter_tmp, attr) { if (attr->type == R_BIN_JAVA_ATTR_TYPE_CODE_ATTR) { offset = attr->info.code_attr.code_offset; break; } } return offset; } R_API RBinField *r_bin_java_allocate_rbinfield(void) { RBinField *t = (RBinField *) malloc (sizeof (RBinField)); if (t) { memset (t, 0, sizeof (RBinField)); } return t; } R_API RBinField *r_bin_java_create_new_rbinfield_from_field(RBinJavaField *fm_type, ut64 baddr) { RBinField *field = r_bin_java_allocate_rbinfield (); if (field) { field->name = strdup (fm_type->name); field->paddr = fm_type->file_offset + baddr; field->visibility = fm_type->flags; } return field; } R_API RBinSymbol *r_bin_java_create_new_symbol_from_field(RBinJavaField *fm_type, ut64 baddr) { RBinSymbol *sym = R_NEW0 (RBinSymbol); if (!fm_type || !fm_type->field_ref_cp_obj || fm_type->field_ref_cp_obj == &R_BIN_JAVA_NULL_TYPE) { R_FREE (sym); } if (sym) { sym->name = strdup (fm_type->name); // strncpy (sym->type, fm_type->descriptor, R_BIN_SIZEOF_STRINGS); if (fm_type->type == R_BIN_JAVA_FIELD_TYPE_METHOD) { sym->type = R_BIN_TYPE_FUNC_STR; sym->paddr = r_bin_java_get_method_code_offset (fm_type); sym->vaddr = r_bin_java_get_method_code_offset (fm_type) + baddr; sym->size = r_bin_java_get_method_code_size (fm_type); } else { sym->type = "FIELD"; sym->paddr = fm_type->file_offset;// r_bin_java_get_method_code_offset (fm_type); sym->vaddr = fm_type->file_offset + baddr; sym->size = fm_type->size; } if (r_bin_java_is_fm_type_protected (fm_type)) { sym->bind = R_BIN_BIND_LOCAL_STR; } else if (r_bin_java_is_fm_type_private (fm_type)) { sym->bind = R_BIN_BIND_LOCAL_STR; } else if (r_bin_java_is_fm_type_protected (fm_type)) { sym->bind = R_BIN_BIND_GLOBAL_STR; } sym->forwarder = "NONE"; if (fm_type->class_name) { sym->classname = strdup (fm_type->class_name); } else { sym->classname = strdup ("UNKNOWN"); // dupped names? } sym->ordinal = fm_type->metas->ord; sym->visibility = fm_type->flags; if (fm_type->flags_str) { sym->visibility_str = strdup (fm_type->flags_str); } } return sym; } R_API RBinSymbol *r_bin_java_create_new_symbol_from_fm_type_meta(RBinJavaField *fm_type, ut64 baddr) { RBinSymbol *sym = R_NEW0 (RBinSymbol); if (!sym || !fm_type || !fm_type->field_ref_cp_obj || fm_type->field_ref_cp_obj == &R_BIN_JAVA_NULL_TYPE) { free (sym); return NULL; } // ut32 new_name_len = strlen (fm_type->name) + strlen ("_meta") + 1; // char *new_name = malloc (new_name_len); sym->name = r_str_newf ("meta_%s", fm_type->name); if (fm_type->type == R_BIN_JAVA_FIELD_TYPE_METHOD) { sym->type = "FUNC_META"; } else { sym->type = "FIELD_META"; } if (r_bin_java_is_fm_type_protected (fm_type)) { sym->bind = R_BIN_BIND_LOCAL_STR; } else if (r_bin_java_is_fm_type_private (fm_type)) { sym->bind = R_BIN_BIND_LOCAL_STR; } else if (r_bin_java_is_fm_type_protected (fm_type)) { sym->bind = R_BIN_BIND_GLOBAL_STR; } sym->forwarder = "NONE"; if (fm_type->class_name) { sym->classname = strdup (fm_type->class_name); } else { sym->classname = strdup ("UNKNOWN"); } sym->paddr = fm_type->file_offset;// r_bin_java_get_method_code_offset (fm_type); sym->vaddr = fm_type->file_offset + baddr; sym->ordinal = fm_type->metas->ord; sym->size = fm_type->size; sym->visibility = fm_type->flags; if (fm_type->flags_str) { sym->visibility_str = strdup (fm_type->flags_str); } return sym; } R_API RBinSymbol *r_bin_java_create_new_symbol_from_ref(RBinJavaObj *bin, RBinJavaCPTypeObj *obj, ut64 baddr) { RBinSymbol *sym = R_NEW0 (RBinSymbol); if (!sym) { return NULL; } char *class_name, *name, *type_name; if (!obj || (obj->tag != R_BIN_JAVA_CP_METHODREF && obj->tag != R_BIN_JAVA_CP_INTERFACEMETHOD_REF && obj->tag != R_BIN_JAVA_CP_FIELDREF)) { R_FREE (sym); return sym; } if (sym) { class_name = r_bin_java_get_name_from_bin_cp_list (bin, obj->info.cp_method.class_idx); name = r_bin_java_get_name_from_bin_cp_list (bin, obj->info.cp_method.name_and_type_idx); type_name = r_bin_java_get_name_from_bin_cp_list (bin, obj->info.cp_method.name_and_type_idx); if (name) { sym->name = name; name = NULL; } if (type_name) { sym->type = r_str_constpool_get (&bin->constpool, type_name); R_FREE (type_name); } if (class_name) { sym->classname = strdup (class_name); } sym->paddr = obj->file_offset + baddr; sym->vaddr = obj->file_offset + baddr; sym->ordinal = obj->metas->ord; sym->size = 0; } return sym; } // TODO: vaddr+vsize break things if set R_API RList *r_bin_java_get_sections(RBinJavaObj *bin) { RBinSection *section = NULL; RList *sections = r_list_newf (free); ut64 baddr = bin->loadaddr; RBinJavaField *fm_type; RListIter *iter = NULL; if (bin->cp_count > 0) { section = R_NEW0 (RBinSection); if (section) { section->name = strdup ("constant_pool"); section->paddr = bin->cp_offset + baddr; section->size = bin->cp_size; #if 0 section->vsize = section->size; section->vaddr = 0x10; // XXX // bin->cp_offset; // + baddr; #endif section->vaddr = baddr; // section->vaddr = section->paddr; // section->vsize = section->size; section->perm = R_PERM_R; section->add = true; r_list_append (sections, section); } section = NULL; } if (bin->fields_count > 0) { section = R_NEW0 (RBinSection); if (section) { section->name = strdup ("fields"); section->size = bin->fields_size; section->paddr = bin->fields_offset + baddr; #if 0 section->vsize = section->size; section->vaddr = section->paddr; #endif section->perm = R_PERM_R; section->add = true; r_list_append (sections, section); section = NULL; r_list_foreach (bin->fields_list, iter, fm_type) { if (fm_type->attr_offset == 0) { continue; } section = R_NEW0 (RBinSection); if (section) { section->name = r_str_newf ("attrs.%s", fm_type->name); section->size = fm_type->size - (fm_type->file_offset - fm_type->attr_offset); #if 0 section->vsize = section->size; section->vaddr = section->paddr; #endif section->paddr = fm_type->attr_offset + baddr; section->perm = R_PERM_R; section->add = true; r_list_append (sections, section); } } } } if (bin->methods_count > 0) { section = R_NEW0 (RBinSection); if (section) { section->name = strdup ("methods"); section->paddr = bin->methods_offset + baddr; section->size = bin->methods_size; // section->vaddr = section->paddr; // section->vsize = section->size; section->perm = R_PERM_RX; section->add = true; r_list_append (sections, section); section = NULL; r_list_foreach (bin->methods_list, iter, fm_type) { if (fm_type->attr_offset == 0) { continue; } section = R_NEW0 (RBinSection); if (section) { section->name = r_str_newf ("attrs.%s", fm_type->name); section->size = fm_type->size - (fm_type->file_offset - fm_type->attr_offset); // section->vsize = section->size; // section->vaddr = section->paddr; section->paddr = fm_type->attr_offset + baddr; section->perm = R_PERM_R | R_PERM_X; section->add = true; r_list_append (sections, section); } } } } if (bin->interfaces_count > 0) { section = R_NEW0 (RBinSection); if (section) { section->name = strdup ("interfaces"); section->paddr = bin->interfaces_offset + baddr; section->size = bin->interfaces_size; // section->vaddr = section->paddr; // section->vsize = section->size; section->perm = R_PERM_R; section->add = true; r_list_append (sections, section); } section = NULL; } if (bin->attrs_count > 0) { section = R_NEW0 (RBinSection); if (section) { section->name = strdup ("attributes"); section->paddr = bin->attrs_offset + baddr; section->size = bin->attrs_size; // section->vaddr = section->paddr; // section->vsize = section->size; section->perm = R_PERM_R; section->perm = R_PERM_R; section->add = true; r_list_append (sections, section); } section = NULL; } return sections; } R_API RList *r_bin_java_enum_class_methods(RBinJavaObj *bin, ut16 class_idx) { RList *methods = r_list_newf (free); RListIter *iter; RBinJavaField *field; r_list_foreach (bin->methods_list, iter, field) { if (field->field_ref_cp_obj && 0) { if ((field && field->field_ref_cp_obj->metas->ord == class_idx)) { RBinSymbol *sym = r_bin_java_create_new_symbol_from_ref ( bin, field->field_ref_cp_obj, bin->loadaddr); if (sym) { r_list_append (methods, sym); } } } else { RBinSymbol *sym = R_NEW0 (RBinSymbol); sym->name = strdup (field->name); // func defintion // sym->paddr = field->file_offset + bin->loadaddr; // code implementation sym->paddr = r_bin_java_get_method_code_offset (field); sym->vaddr = sym->paddr; // + bin->loadaddr; r_list_append (methods, sym); } } return methods; } R_API RList *r_bin_java_enum_class_fields(RBinJavaObj *bin, ut16 class_idx) { RList *fields = r_list_newf (free); RListIter *iter; RBinJavaField *fm_type; RBinField *field = NULL; r_list_foreach (bin->fields_list, iter, fm_type) { if (fm_type) { if (fm_type && fm_type->field_ref_cp_obj && fm_type->field_ref_cp_obj->metas->ord == class_idx) { field = r_bin_java_create_new_rbinfield_from_field (fm_type, bin->loadaddr); if (field) { r_list_append (fields, field); } } } } return fields; } R_API int is_class_interface(RBinJavaObj *bin, RBinJavaCPTypeObj *cp_obj) { RBinJavaInterfaceInfo *ifobj; RListIter *iter; int res = false; r_list_foreach (bin->interfaces_list, iter, ifobj) { if (ifobj) { res = cp_obj == ifobj->cp_class; if (res) { break; } } } return res; } /* R_API RList * r_bin_java_get_interface_classes(RBinJavaObj * bin) { RList *interfaces_names = r_list_new (); RListIter *iter; RBinJavaInterfaceInfo *ifobj; r_list_foreach(bin->interfaces_list, iter, iinfo) { RBinClass *class_ = R_NEW0 (RBinClass); RBinJavaCPTypeObj *cp_obj = ; if (ifobj && ifobj->name) { ut8 * name = strdup(ifobj->name); r_list_append(interfaces_names, name); } } return interfaces_names; } */ R_API RList *r_bin_java_get_lib_names(RBinJavaObj *bin) { RList *lib_names = r_list_newf (free); RListIter *iter; RBinJavaCPTypeObj *cp_obj = NULL; if (!bin) { return lib_names; } r_list_foreach (bin->cp_list, iter, cp_obj) { if (cp_obj && cp_obj->tag == R_BIN_JAVA_CP_CLASS && (bin->cf2.this_class != cp_obj->info.cp_class.name_idx || !is_class_interface (bin, cp_obj))) { char *name = r_bin_java_get_item_name_from_bin_cp_list (bin, cp_obj); if (name) { r_list_append (lib_names, name); } } } return lib_names; } R_API void r_bin_java_classes_free(void /*RBinClass*/ *k) { RBinClass *klass = k; if (klass) { r_list_free (klass->methods); r_list_free (klass->fields); free (klass->name); free (klass->super); free (klass->visibility_str); free (klass); } } R_API RList *r_bin_java_get_classes(RBinJavaObj *bin) { RList *classes = r_list_newf (r_bin_java_classes_free); RListIter *iter; RBinJavaCPTypeObj *cp_obj = NULL; RBinJavaCPTypeObj *this_class_cp_obj = r_bin_java_get_item_from_bin_cp_list (bin, bin->cf2.this_class); ut32 idx = 0; RBinClass *k = R_NEW0 (RBinClass); if (!k) { r_list_free (classes); return NULL; } k->visibility = bin->cf2.access_flags; if (bin->cf2.flags_str) { k->visibility_str = strdup (bin->cf2.flags_str); } k->methods = r_bin_java_enum_class_methods (bin, bin->cf2.this_class); k->fields = r_bin_java_enum_class_fields (bin, bin->cf2.this_class); k->name = r_bin_java_get_this_class_name (bin); k->super = r_bin_java_get_name_from_bin_cp_list (bin, bin->cf2.super_class); k->index = (idx++); r_list_append (classes, k); r_list_foreach (bin->cp_list, iter, cp_obj) { if (cp_obj && cp_obj->tag == R_BIN_JAVA_CP_CLASS && (this_class_cp_obj != cp_obj && is_class_interface (bin, cp_obj))) { k = R_NEW0 (RBinClass); if (!k) { break; } k->methods = r_bin_java_enum_class_methods (bin, cp_obj->info.cp_class.name_idx); k->fields = r_bin_java_enum_class_fields (bin, cp_obj->info.cp_class.name_idx); k->index = idx; k->name = r_bin_java_get_item_name_from_bin_cp_list (bin, cp_obj); r_list_append (classes, k); idx++; } } return classes; } R_API RBinSymbol *r_bin_java_create_new_symbol_from_invoke_dynamic(RBinJavaCPTypeObj *obj, ut64 baddr) { if (!obj || (obj->tag != R_BIN_JAVA_CP_INVOKEDYNAMIC)) { return NULL; } return r_bin_java_create_new_symbol_from_cp_idx (obj->info.cp_invoke_dynamic.name_and_type_index, baddr); } R_API RBinSymbol *r_bin_java_create_new_symbol_from_cp_idx(ut32 cp_idx, ut64 baddr) { RBinSymbol *sym = NULL; RBinJavaCPTypeObj *obj = r_bin_java_get_item_from_bin_cp_list ( R_BIN_JAVA_GLOBAL_BIN, cp_idx); if (obj) { switch (obj->tag) { case R_BIN_JAVA_CP_METHODREF: case R_BIN_JAVA_CP_FIELDREF: case R_BIN_JAVA_CP_INTERFACEMETHOD_REF: sym = r_bin_java_create_new_symbol_from_ref (R_BIN_JAVA_GLOBAL_BIN, obj, baddr); break; case R_BIN_JAVA_CP_INVOKEDYNAMIC: sym = r_bin_java_create_new_symbol_from_invoke_dynamic (obj, baddr); break; default: break; } } return sym; } R_API RList *U(r_bin_java_get_fields)(RBinJavaObj * bin) { RListIter *iter = NULL, *iter_tmp = NULL; RList *fields = r_list_new (); RBinJavaField *fm_type; RBinField *field; r_list_foreach_safe (bin->fields_list, iter, iter_tmp, fm_type) { field = r_bin_java_create_new_rbinfield_from_field (fm_type, bin->loadaddr); if (field) { r_list_append (fields, field); } } return fields; } R_API void r_bin_add_import(RBinJavaObj *bin, RBinJavaCPTypeObj *obj, const char *type) { RBinImport *imp = R_NEW0 (RBinImport); char *class_name = r_bin_java_get_name_from_bin_cp_list (bin, obj->info.cp_method.class_idx); char *name = r_bin_java_get_name_from_bin_cp_list (bin, obj->info.cp_method.name_and_type_idx); char *descriptor = r_bin_java_get_desc_from_bin_cp_list (bin, obj->info.cp_method.name_and_type_idx); class_name = class_name ? class_name : strdup ("INVALID CLASS NAME INDEX"); name = name ? name : strdup ("InvalidNameIndex"); descriptor = descriptor ? descriptor : strdup ("INVALID DESCRIPTOR INDEX"); imp->classname = class_name; imp->name = name; imp->bind = "NONE"; imp->type = r_str_constpool_get (&bin->constpool, type); imp->descriptor = descriptor; imp->ordinal = obj->idx; r_list_append (bin->imports_list, imp); } R_API void r_bin_java_set_imports(RBinJavaObj *bin) { RListIter *iter = NULL; RBinJavaCPTypeObj *obj = NULL; r_list_free (bin->imports_list); bin->imports_list = r_list_newf (free); r_list_foreach (bin->cp_list, iter, obj) { const char *type = NULL; switch (obj->tag) { case R_BIN_JAVA_CP_METHODREF: type = "METHOD"; break; case R_BIN_JAVA_CP_INTERFACEMETHOD_REF: type = "FIELD"; break; case R_BIN_JAVA_CP_FIELDREF: type = "INTERFACE_METHOD"; break; default: type = NULL; break; } if (type) { r_bin_add_import (bin, obj, type); } } } R_API RList *r_bin_java_get_imports(RBinJavaObj *bin) { RList *ret = r_list_newf (free); RBinImport *import = NULL; RListIter *iter; r_list_foreach (bin->imports_list, iter, import) { RBinImport *n_import = R_NEW0 (RBinImport); if (!n_import) { r_list_free (ret); return NULL; } memcpy (n_import, import, sizeof (RBinImport)); r_list_append (ret, n_import); } return ret; } R_API RList *r_bin_java_get_symbols(RBinJavaObj *bin) { RListIter *iter = NULL, *iter_tmp = NULL; RList *imports, *symbols = r_list_newf (free); RBinSymbol *sym = NULL; RBinImport *imp; RBinJavaField *fm_type; r_list_foreach_safe (bin->methods_list, iter, iter_tmp, fm_type) { sym = r_bin_java_create_new_symbol_from_field (fm_type, bin->loadaddr); if (sym) { r_list_append (symbols, (void *) sym); } sym = r_bin_java_create_new_symbol_from_fm_type_meta (fm_type, bin->loadaddr); if (sym) { r_list_append (symbols, (void *) sym); } } r_list_foreach_safe (bin->fields_list, iter, iter_tmp, fm_type) { sym = r_bin_java_create_new_symbol_from_field (fm_type, bin->loadaddr); if (sym) { r_list_append (symbols, (void *) sym); } sym = r_bin_java_create_new_symbol_from_fm_type_meta (fm_type, bin->loadaddr); if (sym) { r_list_append (symbols, (void *) sym); } } bin->lang = "java"; if (bin->cf.major[1] >= 46) { switch (bin->cf.major[1]) { static char lang[32]; int langid; case 46: case 47: case 48: langid = 2 + (bin->cf.major[1] - 46); snprintf (lang, sizeof (lang) - 1, "java 1.%d", langid); bin->lang = lang; break; default: langid = 5 + (bin->cf.major[1] - 49); snprintf (lang, sizeof (lang) - 1, "java %d", langid); bin->lang = lang; } } imports = r_bin_java_get_imports (bin); r_list_foreach (imports, iter, imp) { sym = R_NEW0 (RBinSymbol); if (!sym) { break; } if (imp->classname && !strncmp (imp->classname, "kotlin/jvm", 10)) { bin->lang = "kotlin"; } sym->name = strdup (imp->name); sym->is_imported = true; if (!sym->name) { free (sym); break; } sym->type = "import"; if (!sym->type) { free (sym); break; } sym->vaddr = sym->paddr = imp->ordinal; sym->ordinal = imp->ordinal; r_list_append (symbols, (void *) sym); } r_list_free (imports); return symbols; } R_API RList *r_bin_java_get_strings(RBinJavaObj *bin) { RList *strings = r_list_newf (free); RBinString *str = NULL; RListIter *iter = NULL, *iter_tmp = NULL; RBinJavaCPTypeObj *cp_obj = NULL; r_list_foreach_safe (bin->cp_list, iter, iter_tmp, cp_obj) { if (cp_obj && cp_obj->tag == R_BIN_JAVA_CP_UTF8) { str = (RBinString *) R_NEW0 (RBinString); if (str) { str->paddr = cp_obj->file_offset + bin->loadaddr; str->ordinal = cp_obj->metas->ord; str->size = cp_obj->info.cp_utf8.length + 3; str->length = cp_obj->info.cp_utf8.length; if (str->size > 0) { str->string = r_str_ndup ((const char *) cp_obj->info.cp_utf8.bytes, R_BIN_JAVA_MAXSTR); } r_list_append (strings, (void *) str); } } } return strings; } R_API void *r_bin_java_free(RBinJavaObj *bin) { char *bin_obj_key = NULL; if (!bin) { return NULL; } // Delete the bin object from the data base. bin_obj_key = r_bin_java_build_obj_key (bin); // if (bin->AllJavaBinObjs && sdb_exists (bin->AllJavaBinObjs, bin_obj_key)) { // sdb_unset (bin->AllJavaBinObjs, bin_obj_key, 0); // } free (bin_obj_key); r_list_free (bin->imports_list); // XXX - Need to remove all keys belonging to this class from // the share meta information sdb. // TODO e.g. iterate over bin->kv and delete all obj, func, etc. keys // sdb_free (bin->kv); // free up the constant pool list r_list_free (bin->cp_list); // free up the fields list r_list_free (bin->fields_list); // free up methods list r_list_free (bin->methods_list); // free up interfaces list r_list_free (bin->interfaces_list); r_list_free (bin->attrs_list); // TODO: XXX if a class list of all inner classes // are formed then this will need to be updated free (bin->cf2.flags_str); free (bin->cf2.this_class_name); if (bin == R_BIN_JAVA_GLOBAL_BIN) { R_BIN_JAVA_GLOBAL_BIN = NULL; } free (bin->file); r_str_constpool_fini (&bin->constpool); free (bin); return NULL; } R_API RBinJavaObj *r_bin_java_new_buf(RBuffer *buf, ut64 loadaddr, Sdb *kv) { RBinJavaObj *bin = R_NEW0 (RBinJavaObj); if (!bin) { return NULL; } ut64 tmpsz; const ut8 *tmp = r_buf_data (buf, &tmpsz); if (!r_bin_java_new_bin (bin, loadaddr, kv, tmp, tmpsz)) { return r_bin_java_free (bin); } return bin; } R_API void r_bin_java_attribute_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { IFDBG eprintf ("Deleting attr %s, %p\n", attr->name, attr); if (attr && attr->metas && attr->metas->type_info) { RBinJavaAttrMetas *a = attr->metas->type_info; if (a && a->allocs && a->allocs->delete_obj) { a->allocs->delete_obj (attr); } } // free (attr->metas); // free (attr); } } R_API void r_bin_java_constant_pool(void /*RBinJavaCPTypeObj*/ *o) { RBinJavaCPTypeObj *obj = o; if (obj != &R_BIN_JAVA_NULL_TYPE) { ((RBinJavaCPTypeMetas *) obj->metas->type_info)->allocs->delete_obj (obj); } } R_API void r_bin_java_fmtype_free(void /*RBinJavaField*/ *f) { RBinJavaField *fm_type = f; if (!fm_type) { return; } free (fm_type->descriptor); free (fm_type->name); free (fm_type->flags_str); free (fm_type->class_name); free (fm_type->metas); r_list_free (fm_type->attributes); free (fm_type); } // Start Free the various attribute types R_API void r_bin_java_unknown_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { free (attr->name); free (attr->metas); free (attr); } } R_API void r_bin_java_local_variable_table_attr_entry_free(void /*RBinJavaLocalVariableAttribute*/ *a) { RBinJavaLocalVariableAttribute *lvattr = a; if (lvattr) { free (lvattr->descriptor); free (lvattr->name); free (lvattr); } } R_API void r_bin_java_local_variable_table_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { free (attr->name); free (attr->metas); r_list_free (attr->info.local_variable_table_attr.local_variable_table); free (attr); } } R_API void r_bin_java_local_variable_type_table_attr_entry_free(void /*RBinJavaLocalVariableTypeAttribute*/ *a) { RBinJavaLocalVariableTypeAttribute *attr = a; if (attr) { free (attr->name); free (attr->signature); free (attr); } } R_API void r_bin_java_local_variable_type_table_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { free (attr->name); free (attr->metas); r_list_free (attr->info.local_variable_type_table_attr.local_variable_table); free (attr); } } R_API void r_bin_java_deprecated_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { free (attr->name); free (attr->metas); free (attr); } } R_API void r_bin_java_enclosing_methods_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { free (attr->name); free (attr->metas); free (attr->info.enclosing_method_attr.class_name); free (attr->info.enclosing_method_attr.method_name); free (attr->info.enclosing_method_attr.method_descriptor); free (attr); } } R_API void r_bin_java_synthetic_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { free (attr->name); free (attr->metas); free (attr); } } R_API void r_bin_java_constant_value_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { free (attr->name); free (attr->metas); free (attr); } } R_API void r_bin_java_line_number_table_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { free (attr->name); free (attr->metas); r_list_free (attr->info.line_number_table_attr.line_number_table); free (attr); } } R_API void r_bin_java_code_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { // XXX - Intentional memory leak here. When one of the // Code attributes is parsed, the code (the r_bin_java) // is not properly parsing the class file r_bin_java_stack_frame_free (attr->info.code_attr.implicit_frame); r_list_free (attr->info.code_attr.attributes); free (attr->info.code_attr.code); r_list_free (attr->info.code_attr.exception_table); free (attr->name); free (attr->metas); free (attr); } } R_API void r_bin_java_exceptions_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { free (attr->name); free (attr->metas); free (attr->info.exceptions_attr.exception_idx_table); free (attr); } } R_API void r_bin_java_inner_classes_attr_entry_free(void /*RBinJavaClassesAttribute*/ *a) { RBinJavaClassesAttribute *attr = a; if (attr) { free (attr->name); free (attr->flags_str); free (attr); } } R_API void r_bin_java_inner_classes_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { free (attr->name); free (attr->metas); r_list_free (attr->info.inner_classes_attr.classes); free (attr); } } R_API void r_bin_java_signature_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { free (attr->name); free (attr->metas); free (attr->info.signature_attr.signature); free (attr); } } R_API void r_bin_java_source_debug_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { free (attr->name); free (attr->metas); free (attr->info.debug_extensions.debug_extension); free (attr); } } R_API void r_bin_java_source_code_file_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { free (attr->name); free (attr->metas); free (attr); } } R_API void r_bin_java_stack_map_table_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { free (attr->name); free (attr->metas); r_list_free (attr->info.stack_map_table_attr.stack_map_frame_entries); free (attr); } } R_API void r_bin_java_stack_frame_free(void /*RBinJavaStackMapFrame*/ *o) { RBinJavaStackMapFrame *obj = o; if (obj) { r_list_free (obj->local_items); r_list_free (obj->stack_items); free (obj->metas); free (obj); } } R_API void r_bin_java_verification_info_free(void /*RBinJavaVerificationObj*/ *o) { RBinJavaVerificationObj *obj = o; // eprintf ("Freeing verification object\n"); if (obj) { free (obj->name); free (obj); } } R_API void r_bin_java_interface_free(void /*RBinJavaInterfaceInfo*/ *o) { RBinJavaInterfaceInfo *obj = o; if (obj) { free (obj->name); free (obj); } } // End Free the various attribute types // Start the various attibute types new R_API ut64 r_bin_java_attr_calc_size(RBinJavaAttrInfo *attr) { return attr ? ((RBinJavaAttrMetas *) attr->metas->type_info)->allocs->calc_size (attr) : 0; } R_API ut64 r_bin_java_unknown_attr_calc_size(RBinJavaAttrInfo *attr) { return attr ? 6 : 0; } R_API RBinJavaAttrInfo *r_bin_java_unknown_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { return r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); } R_API ut64 r_bin_java_code_attr_calc_size(RBinJavaAttrInfo *attr) { RListIter *iter; // RListIter *iter_tmp; ut64 size = 0; bool is_attr_in_old_format = attr->is_attr_in_old_format; if (attr) { // attr = r_bin_java_default_attr_new (buffer, sz, buf_offset); size += is_attr_in_old_format ? 4 : 6; // attr->info.code_attr.max_stack = R_BIN_JAVA_USHORT (buffer, 0); size += is_attr_in_old_format ? 1 : 2; // attr->info.code_attr.max_locals = R_BIN_JAVA_USHORT (buffer, 2); size += is_attr_in_old_format ? 1 : 2; // attr->info.code_attr.code_length = R_BIN_JAVA_UINT (buffer, 4); size += is_attr_in_old_format ? 2 : 4; if (attr->info.code_attr.code) { size += attr->info.code_attr.code_length; } // attr->info.code_attr.exception_table_length = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // RBinJavaExceptionEntry *exc_entry; // r_list_foreach_safe (attr->info.code_attr.exception_table, iter, iter_tmp, exc_entry) { r_list_foreach_iter (attr->info.code_attr.exception_table, iter) { // exc_entry->start_pc = R_BIN_JAVA_USHORT (buffer,offset); size += 2; // exc_entry->end_pc = R_BIN_JAVA_USHORT (buffer,offset); size += 2; // exc_entry->handler_pc = R_BIN_JAVA_USHORT (buffer,offset); size += 2; // exc_entry->catch_type = R_BIN_JAVA_USHORT (buffer, offset); size += 2; } // attr->info.code_attr.attributes_count = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // RBinJavaAttrInfo *_attr; if (attr->info.code_attr.attributes_count > 0) { // r_list_foreach_safe (attr->info.code_attr.attributes, iter, iter_tmp, _attr) { r_list_foreach_iter (attr->info.code_attr.attributes, iter) { size += r_bin_java_attr_calc_size (attr); } } } return size; } R_API RBinJavaAttrInfo *r_bin_java_code_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { RBinJavaAttrInfo *_attr = NULL; ut32 k = 0, curpos; ut64 offset = 0; RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); if (!attr) { return NULL; } if (sz < 16 || sz > buf_offset) {// sz > buf_offset) { free (attr); return NULL; } offset += 6; attr->type = R_BIN_JAVA_ATTR_TYPE_CODE_ATTR; attr->info.code_attr.max_stack = attr->is_attr_in_old_format ? buffer[offset] : R_BIN_JAVA_USHORT (buffer, offset); offset += attr->is_attr_in_old_format ? 1 : 2; attr->info.code_attr.max_locals = attr->is_attr_in_old_format ? buffer[offset] : R_BIN_JAVA_USHORT (buffer, offset); offset += attr->is_attr_in_old_format ? 1 : 2; attr->info.code_attr.code_length = attr->is_attr_in_old_format ? R_BIN_JAVA_USHORT(buffer, offset) : R_BIN_JAVA_UINT (buffer, offset); offset += attr->is_attr_in_old_format ? 2 : 4; // BUG: possible unsigned integer overflow here attr->info.code_attr.code_offset = buf_offset + offset; attr->info.code_attr.code = (ut8 *) malloc (attr->info.code_attr.code_length); if (!attr->info.code_attr.code) { eprintf ("Handling Code Attributes: Unable to allocate memory " "(%u bytes) for a code.\n", attr->info.code_attr.code_length); return attr; } R_BIN_JAVA_GLOBAL_BIN->current_code_attr = attr; { int len = attr->info.code_attr.code_length; memset (attr->info.code_attr.code, 0, len); if (offset + len >= sz) { return attr; } memcpy (attr->info.code_attr.code, buffer + offset, len); offset += len; } attr->info.code_attr.exception_table_length = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->info.code_attr.exception_table = r_list_newf (free); for (k = 0; k < attr->info.code_attr.exception_table_length; k++) { curpos = buf_offset + offset; if (curpos + 8 > sz) { return attr; } RBinJavaExceptionEntry *e = R_NEW0 (RBinJavaExceptionEntry); if (!e) { free (attr); return NULL; } e->file_offset = curpos; e->start_pc = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; e->end_pc = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; e->handler_pc = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; e->catch_type = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; r_list_append (attr->info.code_attr.exception_table, e); e->size = 8; } attr->info.code_attr.attributes_count = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; // IFDBG eprintf (" code Attributes_count: %d\n", attr->info.code_attr.attributes_count); // XXX - attr->info.code_attr.attributes is not freed because one of the code attributes is improperly parsed. attr->info.code_attr.attributes = r_list_newf (r_bin_java_attribute_free); if (attr->info.code_attr.attributes_count > 0) { for (k = 0; k < attr->info.code_attr.attributes_count; k++) { int size = (offset < sz) ? sz - offset : 0; if (size > sz || size <= 0) { break; } _attr = r_bin_java_read_next_attr_from_buffer (bin, buffer + offset, size, buf_offset + offset); if (!_attr) { eprintf ("[X] r_bin_java_code_attr_new: Error unable to parse remainder of classfile after Method's Code Attribute: %d.\n", k); break; } IFDBG eprintf ("Parsing @ 0x%"PFMT64x " (%s) = 0x%"PFMT64x " bytes, %p\n", _attr->file_offset, _attr->name, _attr->size, _attr); offset += _attr->size; r_list_append (attr->info.code_attr.attributes, _attr); if (_attr->type == R_BIN_JAVA_ATTR_TYPE_LOCAL_VARIABLE_TABLE_ATTR) { IFDBG eprintf ("Parsed the LocalVariableTable, preparing the implicit mthod frame.\n"); // r_bin_java_print_attr_summary(_attr); attr->info.code_attr.implicit_frame = r_bin_java_build_stack_frame_from_local_variable_table (R_BIN_JAVA_GLOBAL_BIN, _attr); attr->info.code_attr.implicit_frame->file_offset = buf_offset; IFDBG r_bin_java_print_stack_map_frame_summary(attr->info.code_attr.implicit_frame); // r_list_append (attr->info.code_attr.attributes, attr->info.code_attr.implicit_frame); } // if (offset > sz) { // eprintf ("[X] r_bin_java: Error unable to parse remainder of classfile after Attribute: %d.\n", k); // break; // } } } if (attr->info.code_attr.implicit_frame == NULL) { // build a default implicit_frame attr->info.code_attr.implicit_frame = r_bin_java_default_stack_frame (); // r_list_append (attr->info.code_attr.attributes, attr->info.code_attr.implicit_frame); } attr->size = offset; return attr; } R_API RBinJavaAttrInfo *r_bin_java_constant_value_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { ut64 offset = 6; RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); if (attr) { attr->type = R_BIN_JAVA_ATTR_TYPE_CONST_VALUE_ATTR; attr->info.constant_value_attr.constantvalue_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->size = offset; } // IFDBG r_bin_java_print_constant_value_attr_summary(attr); return attr; } R_API ut64 r_bin_java_constant_value_attr_calc_size(RBinJavaAttrInfo *attr) { return attr ? 8 : 0; } R_API RBinJavaAttrInfo *r_bin_java_deprecated_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); if (attr) { attr->type = R_BIN_JAVA_ATTR_TYPE_DEPRECATED_ATTR; attr->size = 6; } // IFDBG r_bin_java_print_deprecated_attr_summary(attr); return attr; } R_API ut64 r_bin_java_deprecated_attr_calc_size(RBinJavaAttrInfo *attr) { return attr ? 6 : 0; } R_API RBinJavaAttrInfo *r_bin_java_signature_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { if (sz < 8) { return NULL; } RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); if (!attr) { return NULL; } ut64 offset = 6; attr->type = R_BIN_JAVA_ATTR_TYPE_SIGNATURE_ATTR; // attr->info.source_file_attr.sourcefile_idx = R_BIN_JAVA_USHORT (buffer, offset); // offset += 2; attr->info.signature_attr.signature_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->info.signature_attr.signature = r_bin_java_get_utf8_from_bin_cp_list ( R_BIN_JAVA_GLOBAL_BIN, attr->info.signature_attr.signature_idx); if (!attr->info.signature_attr.signature) { eprintf ("r_bin_java_signature_attr_new: Unable to resolve the " "Signature UTF8 String Index: 0x%02x\n", attr->info.signature_attr.signature_idx); } attr->size = offset; // IFDBG r_bin_java_print_source_code_file_attr_summary(attr); return attr; } R_API ut64 r_bin_java_signature_attr_calc_size(RBinJavaAttrInfo *attr) { ut64 size = 0; if (attr == NULL) { // TODO eprintf allocation fail return size; } size += 6; // attr->info.source_file_attr.sourcefile_idx = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // attr->info.signature_attr.signature_idx = R_BIN_JAVA_USHORT (buffer, offset); size += 2; return size; } R_API RBinJavaAttrInfo *r_bin_java_enclosing_methods_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { ut64 offset = 6; if (sz < 8) { return NULL; } RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); if (!attr || sz < 10) { free (attr); return NULL; } attr->type = R_BIN_JAVA_ATTR_TYPE_ENCLOSING_METHOD_ATTR; attr->info.enclosing_method_attr.class_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->info.enclosing_method_attr.method_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->info.enclosing_method_attr.class_name = r_bin_java_get_name_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, attr->info.enclosing_method_attr.class_idx); if (attr->info.enclosing_method_attr.class_name == NULL) { eprintf ("Could not resolve enclosing class name for the enclosed method.\n"); } attr->info.enclosing_method_attr.method_name = r_bin_java_get_name_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, attr->info.enclosing_method_attr.method_idx); if (attr->info.enclosing_method_attr.class_name == NULL) { eprintf ("Could not resolve method descriptor for the enclosed method.\n"); } attr->info.enclosing_method_attr.method_descriptor = r_bin_java_get_desc_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, attr->info.enclosing_method_attr.method_idx); if (attr->info.enclosing_method_attr.method_name == NULL) { eprintf ("Could not resolve method name for the enclosed method.\n"); } attr->size = offset; return attr; } R_API ut64 r_bin_java_enclosing_methods_attr_calc_size(RBinJavaAttrInfo *attr) { ut64 size = 0; if (attr) { size += 6; // attr->info.enclosing_method_attr.class_idx = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // attr->info.enclosing_method_attr.method_idx = R_BIN_JAVA_USHORT (buffer, offset); size += 2; } return size; } R_API RBinJavaAttrInfo *r_bin_java_exceptions_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { ut32 i = 0, offset = 0; ut64 size; if (sz < 8) { return NULL; } RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); offset += 6; if (!attr) { return attr; } attr->type = R_BIN_JAVA_ATTR_TYPE_LINE_NUMBER_TABLE_ATTR; attr->info.exceptions_attr.number_of_exceptions = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; size = sizeof (ut16) * attr->info.exceptions_attr.number_of_exceptions; if (size < attr->info.exceptions_attr.number_of_exceptions) { free (attr); return NULL; } attr->info.exceptions_attr.exception_idx_table = (ut16 *) malloc (size); if (!attr->info.exceptions_attr.exception_idx_table) { free (attr); return NULL; } for (i = 0; i < attr->info.exceptions_attr.number_of_exceptions; i++) { if (offset + 2 > sz) { break; } attr->info.exceptions_attr.exception_idx_table[i] = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; } attr->size = offset; // IFDBG r_bin_java_print_exceptions_attr_summary(attr); return attr; } R_API ut64 r_bin_java_exceptions_attr_calc_size(RBinJavaAttrInfo *attr) { ut64 size = 0, i = 0; if (attr) { size += 6; for (i = 0; i < attr->info.exceptions_attr.number_of_exceptions; i++) { // attr->info.exceptions_attr.exception_idx_table[i] = R_BIN_JAVA_USHORT (buffer, offset); size += 2; } } return size; } R_API RBinJavaAttrInfo *r_bin_java_inner_classes_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { RBinJavaClassesAttribute *icattr; RBinJavaCPTypeObj *obj; ut32 i = 0; ut64 offset = 0, curpos; if (sz < 8) { return NULL; } RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); if (!attr) { return NULL; } offset += 6; attr->type = R_BIN_JAVA_ATTR_TYPE_INNER_CLASSES_ATTR; attr->info.inner_classes_attr.number_of_classes = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->info.inner_classes_attr.classes = r_list_newf (r_bin_java_inner_classes_attr_entry_free); for (i = 0; i < attr->info.inner_classes_attr.number_of_classes; i++) { curpos = buf_offset + offset; if (offset + 8 > sz) { eprintf ("Invalid amount of inner classes\n"); break; } icattr = R_NEW0 (RBinJavaClassesAttribute); if (!icattr) { break; } icattr->inner_class_info_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; icattr->outer_class_info_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; icattr->inner_name_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; icattr->inner_class_access_flags = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; icattr->flags_str = retrieve_class_method_access_string (icattr->inner_class_access_flags); icattr->file_offset = curpos; icattr->size = 8; obj = r_bin_java_get_item_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, icattr->inner_name_idx); if (!obj) { eprintf ("BINCPLIS IS HULL %d\n", icattr->inner_name_idx); } icattr->name = r_bin_java_get_item_name_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, obj); if (!icattr->name) { obj = r_bin_java_get_item_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, icattr->inner_class_info_idx); if (!obj) { eprintf ("BINCPLIST IS NULL %d\n", icattr->inner_class_info_idx); } icattr->name = r_bin_java_get_item_name_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, obj); if (!icattr->name) { icattr->name = r_str_dup (NULL, "NULL"); eprintf ("r_bin_java_inner_classes_attr: Unable to find the name for %d index.\n", icattr->inner_name_idx); free (icattr); break; } } IFDBG eprintf ("r_bin_java_inner_classes_attr: Inner class name %d is %s.\n", icattr->inner_name_idx, icattr->name); r_list_append (attr->info.inner_classes_attr.classes, (void *) icattr); } attr->size = offset; // IFDBG r_bin_java_print_inner_classes_attr_summary(attr); return attr; } R_API ut64 r_bin_java_inner_class_attr_calc_size(RBinJavaClassesAttribute *icattr) { ut64 size = 0; if (icattr) { // icattr->inner_class_info_idx = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // icattr->outer_class_info_idx = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // icattr->inner_name_idx = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // icattr->inner_class_access_flags = R_BIN_JAVA_USHORT (buffer, offset); size += 2; } return size; } R_API ut64 r_bin_java_inner_classes_attr_calc_size(RBinJavaAttrInfo *attr) { RBinJavaClassesAttribute *icattr = NULL; RListIter *iter; ut64 size = 6; if (!attr) { return 0; } r_list_foreach (attr->info.inner_classes_attr.classes, iter, icattr) { size += r_bin_java_inner_class_attr_calc_size (icattr); } return size; } R_API RBinJavaAttrInfo *r_bin_java_line_number_table_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { ut32 i = 0; ut64 curpos, offset = 0; if (sz < 6) { return NULL; } RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); if (!attr) { return NULL; } offset += 6; attr->type = R_BIN_JAVA_ATTR_TYPE_LINE_NUMBER_TABLE_ATTR; attr->info.line_number_table_attr.line_number_table_length = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->info.line_number_table_attr.line_number_table = r_list_newf (free); ut32 linenum_len = attr->info.line_number_table_attr.line_number_table_length; RList *linenum_list = attr->info.line_number_table_attr.line_number_table; for (i = 0; i < linenum_len; i++) { curpos = buf_offset + offset; // eprintf ("%"PFMT64x" %"PFMT64x"\n", curpos, sz); // XXX if (curpos + 8 >= sz) break; RBinJavaLineNumberAttribute *lnattr = R_NEW0 (RBinJavaLineNumberAttribute); if (!lnattr) { break; } // wtf it works if (offset - 2 > sz) { R_FREE (lnattr); break; } lnattr->start_pc = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; lnattr->line_number = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; lnattr->file_offset = curpos; lnattr->size = 4; r_list_append (linenum_list, lnattr); } attr->size = offset; return attr; } R_API ut64 r_bin_java_line_number_table_attr_calc_size(RBinJavaAttrInfo *attr) { ut64 size = 6; // RBinJavaLineNumberAttribute *lnattr; RListIter *iter; // RListIter *iter_tmp; if (!attr) { return 0LL; } // r_list_foreach_safe (attr->info.line_number_table_attr.line_number_table, iter, iter_tmp, lnattr) { r_list_foreach_iter (attr->info.line_number_table_attr.line_number_table, iter) { // lnattr->start_pc = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // lnattr->line_number = R_BIN_JAVA_USHORT (buffer, offset); size += 2; } return size; } R_API RBinJavaAttrInfo *r_bin_java_source_debug_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { ut64 offset = 6; if (sz < 8) { return NULL; } RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); if (!attr) { return NULL; } attr->type = R_BIN_JAVA_ATTR_TYPE_SOURCE_DEBUG_EXTENTSION_ATTR; if (attr->length == 0) { eprintf ("r_bin_java_source_debug_attr_new: Attempting to allocate 0 bytes for debug_extension.\n"); attr->info.debug_extensions.debug_extension = NULL; return attr; } else if ((attr->length + offset) > sz) { eprintf ("r_bin_java_source_debug_attr_new: Expected %d byte(s) got %" PFMT64d " bytes for debug_extension.\n", attr->length, (offset + sz)); } attr->info.debug_extensions.debug_extension = (ut8 *) malloc (attr->length); if (attr->info.debug_extensions.debug_extension && (attr->length > (sz - offset))) { memcpy (attr->info.debug_extensions.debug_extension, buffer + offset, sz - offset); } else if (attr->info.debug_extensions.debug_extension) { memcpy (attr->info.debug_extensions.debug_extension, buffer + offset, attr->length); } else { eprintf ("r_bin_java_source_debug_attr_new: Unable to allocate the data for the debug_extension.\n"); } offset += attr->length; attr->size = offset; return attr; } R_API ut64 r_bin_java_source_debug_attr_calc_size(RBinJavaAttrInfo *attr) { ut64 size = 6; if (!attr) { return 0LL; } if (attr->info.debug_extensions.debug_extension) { size += attr->length; } return size; } R_API ut64 r_bin_java_local_variable_table_attr_calc_size(RBinJavaAttrInfo *attr) { ut64 size = 0; // ut64 offset = 0; RListIter *iter; // RBinJavaLocalVariableAttribute *lvattr; if (!attr) { return 0LL; } size += 6; // attr->info.local_variable_table_attr.table_length = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // r_list_foreach (attr->info.local_variable_table_attr.local_variable_table, iter, lvattr) { r_list_foreach_iter (attr->info.local_variable_table_attr.local_variable_table, iter) { // lvattr->start_pc = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // lvattr->length = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // lvattr->name_idx = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // lvattr->descriptor_idx = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // lvattr->index = R_BIN_JAVA_USHORT (buffer, offset); size += 2; } return size; } R_API RBinJavaAttrInfo *r_bin_java_local_variable_table_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { RBinJavaLocalVariableAttribute *lvattr; ut64 curpos = 0, offset = 6; ut32 i = 0; if (!bin || !buffer || sz < 8) { return NULL; } RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); if (!attr) { return NULL; } attr->type = R_BIN_JAVA_ATTR_TYPE_LOCAL_VARIABLE_TABLE_ATTR; attr->info.local_variable_table_attr.table_length = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->info.local_variable_table_attr.local_variable_table =\ r_list_newf (r_bin_java_local_variable_table_attr_entry_free); for (i = 0; i < attr->info.local_variable_table_attr.table_length; i++) { if (offset + 10 > sz) { break; } curpos = buf_offset + offset; lvattr = R_NEW0 (RBinJavaLocalVariableAttribute); if (!lvattr) { break; } lvattr->start_pc = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; lvattr->length = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; lvattr->name_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; lvattr->descriptor_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; lvattr->index = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; lvattr->file_offset = curpos; lvattr->name = r_bin_java_get_utf8_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, lvattr->name_idx); lvattr->size = 10; if (!lvattr->name) { lvattr->name = strdup ("NULL"); eprintf ("r_bin_java_local_variable_table_attr_new: Unable to find the name for %d index.\n", lvattr->name_idx); } lvattr->descriptor = r_bin_java_get_utf8_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, lvattr->descriptor_idx); if (!lvattr->descriptor) { lvattr->descriptor = strdup ("NULL"); eprintf ("r_bin_java_local_variable_table_attr_new: Unable to find the descriptor for %d index.\n", lvattr->descriptor_idx); } r_list_append (attr->info.local_variable_table_attr.local_variable_table, lvattr); } attr->size = offset; // IFDBG r_bin_java_print_local_variable_table_attr_summary(attr); return attr; } R_API ut64 r_bin_java_local_variable_type_table_attr_calc_size(RBinJavaAttrInfo *attr) { // RBinJavaLocalVariableTypeAttribute *lvattr; RListIter *iter; ut64 size = 0; if (attr) { RList *list = attr->info.local_variable_type_table_attr.local_variable_table; size += 6; // attr->info.local_variable_type_table_attr.table_length = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // r_list_foreach (list, iter, lvattr) { r_list_foreach_iter (list, iter) { // lvattr->start_pc = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // lvattr->length = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // lvattr->name_idx = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // lvattr->signature_idx = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // lvattr->index = R_BIN_JAVA_USHORT (buffer, offset); size += 2; } } return size; } R_API RBinJavaAttrInfo *r_bin_java_local_variable_type_table_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { if (sz < 8) { return NULL; } RBinJavaLocalVariableTypeAttribute *lvattr; ut64 offset = 6; ut32 i = 0; RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, 0); if (!attr) { return NULL; } attr->type = R_BIN_JAVA_ATTR_TYPE_LOCAL_VARIABLE_TYPE_TABLE_ATTR; attr->info.local_variable_type_table_attr.table_length = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->info.local_variable_type_table_attr.local_variable_table = r_list_newf (r_bin_java_local_variable_type_table_attr_entry_free); for (i = 0; i < attr->info.local_variable_type_table_attr.table_length; i++) { ut64 curpos = buf_offset + offset; lvattr = R_NEW0 (RBinJavaLocalVariableTypeAttribute); if (!lvattr) { perror ("calloc"); break; } if (offset + 10 > sz) { eprintf ("oob"); free (lvattr); break; } lvattr->start_pc = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; lvattr->length = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; lvattr->name_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; lvattr->signature_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; lvattr->index = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; lvattr->file_offset = curpos; lvattr->name = r_bin_java_get_utf8_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, lvattr->name_idx); lvattr->size = 10; if (!lvattr->name) { lvattr->name = strdup ("NULL"); eprintf ("r_bin_java_local_variable_type_table_attr_new: Unable to find the name for %d index.\n", lvattr->name_idx); } lvattr->signature = r_bin_java_get_utf8_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, lvattr->signature_idx); if (!lvattr->signature) { lvattr->signature = strdup ("NULL"); eprintf ("r_bin_java_local_variable_type_table_attr_new: Unable to find the descriptor for %d index.\n", lvattr->signature_idx); } r_list_append (attr->info.local_variable_type_table_attr.local_variable_table, lvattr); } // IFDBG r_bin_java_print_local_variable_type_table_attr_summary(attr); attr->size = offset; return attr; } R_API RBinJavaAttrInfo *r_bin_java_source_code_file_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { if (!sz || sz == UT64_MAX) { return NULL; } #if 0 /// XXX this breaks tests if (sz < 8) { return NULL; } #endif ut64 offset = 0; RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); offset += 6; if (attr) { attr->type = R_BIN_JAVA_ATTR_TYPE_SOURCE_FILE_ATTR; attr->info.source_file_attr.sourcefile_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->size = offset; // IFDBG r_bin_java_print_source_code_file_attr_summary(attr); } return attr; } R_API ut64 r_bin_java_source_code_file_attr_calc_size(RBinJavaAttrInfo *attr) { return attr ? 8 : 0; } R_API RBinJavaAttrInfo *r_bin_java_synthetic_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { if (sz < 8) { return NULL; } RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); if (!attr) { return NULL; } attr->type = R_BIN_JAVA_ATTR_TYPE_SYNTHETIC_ATTR; attr->size = 6; return attr; } R_API ut64 r_bin_java_synthetic_attr_calc_size(RBinJavaAttrInfo *attr) { return attr ? 12 : 6; } R_API RBinJavaInterfaceInfo *r_bin_java_interface_new(RBinJavaObj *bin, const ut8 *buffer, ut64 sz) { IFDBG eprintf ("Parsing RBinJavaInterfaceInfo\n"); RBinJavaInterfaceInfo *ifobj = R_NEW0 (RBinJavaInterfaceInfo); if (ifobj) { if (buffer) { ifobj->class_info_idx = R_BIN_JAVA_USHORT (buffer, 0); ifobj->cp_class = r_bin_java_get_item_from_bin_cp_list (bin, ifobj->class_info_idx); if (ifobj->cp_class) { ifobj->name = r_bin_java_get_item_name_from_bin_cp_list (bin, ifobj->cp_class); } else { ifobj->name = r_str_dup (NULL, "NULL"); } ifobj->size = 2; } else { ifobj->class_info_idx = 0; ifobj->name = r_str_dup (NULL, "NULL"); } } return ifobj; } R_API RBinJavaVerificationObj *r_bin_java_verification_info_from_type(RBinJavaObj *bin, R_BIN_JAVA_STACKMAP_TYPE type, ut32 value) { RBinJavaVerificationObj *se = R_NEW0 (RBinJavaVerificationObj); if (se) { se->tag = type; if (se->tag == R_BIN_JAVA_STACKMAP_OBJECT) { se->info.obj_val_cp_idx = (ut16) value; } else if (se->tag == R_BIN_JAVA_STACKMAP_UNINIT) { se->info.uninit_offset = (ut16) value; } } return se; } R_API RBinJavaVerificationObj *r_bin_java_read_from_buffer_verification_info_new(ut8 *buffer, ut64 sz, ut64 buf_offset) { if (sz < 8) { return NULL; } ut64 offset = 0; RBinJavaVerificationObj *se = R_NEW0 (RBinJavaVerificationObj); if (!se) { return NULL; } se->file_offset = buf_offset; se->tag = buffer[offset]; offset += 1; if (se->tag == R_BIN_JAVA_STACKMAP_OBJECT) { se->info.obj_val_cp_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; } else if (se->tag == R_BIN_JAVA_STACKMAP_UNINIT) { se->info.uninit_offset = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; } if (R_BIN_JAVA_STACKMAP_UNINIT < se->tag) { r_bin_java_verification_info_free (se); return NULL; } se->size = offset; return se; } R_API ut64 rbin_java_verification_info_calc_size(RBinJavaVerificationObj *se) { ut64 sz = 1; if (!se) { return 0; } // r_buf_read_at (bin->b, offset, (ut8*)(&se->tag), 1) switch (se->tag) { case R_BIN_JAVA_STACKMAP_OBJECT: // r_buf_read_at (bin->b, offset+1, (ut8*)buf, 2) sz += 2; break; case R_BIN_JAVA_STACKMAP_UNINIT: // r_buf_read_at (bin->b, offset+1, (ut8*)buf, 2) sz += 2; break; } return sz; } R_API RBinJavaStackMapFrameMetas *r_bin_java_determine_stack_frame_type(ut8 tag) { ut8 type_value = 0; if (tag < 64) { type_value = R_BIN_JAVA_STACK_FRAME_SAME; } else if (tag < 128) { type_value = R_BIN_JAVA_STACK_FRAME_SAME_LOCALS_1; } else if (247 < tag && tag < 251) { type_value = R_BIN_JAVA_STACK_FRAME_CHOP; } else if (tag == 251) { type_value = R_BIN_JAVA_STACK_FRAME_SAME_FRAME_EXTENDED; } else if (251 < tag && tag < 255) { type_value = R_BIN_JAVA_STACK_FRAME_APPEND; } else if (tag == 255) { type_value = R_BIN_JAVA_STACK_FRAME_FULL_FRAME; } else { type_value = R_BIN_JAVA_STACK_FRAME_RESERVED; } return &R_BIN_JAVA_STACK_MAP_FRAME_METAS[type_value]; } R_API ut64 r_bin_java_stack_map_frame_calc_size(RBinJavaStackMapFrame *sf) { ut64 size = 0; RListIter *iter, *iter_tmp; RBinJavaVerificationObj *se; if (sf) { // sf->tag = buffer[offset]; size += 1; switch (sf->type) { case R_BIN_JAVA_STACK_FRAME_SAME: // Nothing to read break; case R_BIN_JAVA_STACK_FRAME_SAME_LOCALS_1: r_list_foreach_safe (sf->stack_items, iter, iter_tmp, se) { size += rbin_java_verification_info_calc_size (se); } break; case R_BIN_JAVA_STACK_FRAME_CHOP: // sf->offset_delta = R_BIN_JAVA_USHORT (buffer, offset); size += 2; break; case R_BIN_JAVA_STACK_FRAME_SAME_FRAME_EXTENDED: // sf->offset_delta = R_BIN_JAVA_USHORT (buffer, offset); size += 2; r_list_foreach_safe (sf->stack_items, iter, iter_tmp, se) { size += rbin_java_verification_info_calc_size (se); } break; case R_BIN_JAVA_STACK_FRAME_APPEND: // sf->offset_delta = R_BIN_JAVA_USHORT (buffer, offset); size += 2; r_list_foreach_safe (sf->stack_items, iter, iter_tmp, se) { size += rbin_java_verification_info_calc_size (se); } break; case R_BIN_JAVA_STACK_FRAME_FULL_FRAME: // sf->offset_delta = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // sf->number_of_locals = R_BIN_JAVA_USHORT (buffer, offset); size += 2; r_list_foreach_safe (sf->local_items, iter, iter_tmp, se) { size += rbin_java_verification_info_calc_size (se); } // sf->number_of_stack_items = R_BIN_JAVA_USHORT (buffer, offset); size += 2; r_list_foreach_safe (sf->stack_items, iter, iter_tmp, se) { size += rbin_java_verification_info_calc_size (se); } break; default: eprintf ("Unknown type\n"); break; } } return size; } R_API RBinJavaStackMapFrame *r_bin_java_stack_map_frame_new(ut8 *buffer, ut64 sz, RBinJavaStackMapFrame *p_frame, ut64 buf_offset) { if (sz < 8) { return NULL; } RBinJavaStackMapFrame *stack_frame = r_bin_java_default_stack_frame (); RBinJavaVerificationObj *se = NULL; ut64 offset = 0; if (!stack_frame) { return NULL; } stack_frame->tag = buffer[offset]; offset += 1; stack_frame->metas->type_info = (void *) r_bin_java_determine_stack_frame_type (stack_frame->tag); stack_frame->type = ((RBinJavaStackMapFrameMetas *) stack_frame->metas->type_info)->type; stack_frame->file_offset = buf_offset; stack_frame->p_stack_frame = p_frame; switch (stack_frame->type) { case R_BIN_JAVA_STACK_FRAME_SAME: // Maybe? 1. Copy the previous frames locals and set the locals count. // copy_type_info_to_stack_frame_list_up_to_idx (p_frame->local_items, stack_frame->local_items, idx); if (p_frame) { stack_frame->number_of_locals = p_frame->number_of_locals; } else { IFINT eprintf ("><?><\n"); IFDBG eprintf ("Unable to set previous stackframe with the number of locals (current info.code_attr.implicit_frame was probably not set :/)"); } IFDBG eprintf ("r_bin_java_stack_map_frame_new: TODO Stack Frame Same Locals Condition is untested, so there may be issues.\n"); break; case R_BIN_JAVA_STACK_FRAME_SAME_LOCALS_1: // 1. Read the stack type stack_frame->number_of_stack_items = 1; se = r_bin_java_read_from_buffer_verification_info_new (buffer + offset, sz - offset, buf_offset + offset); IFDBG eprintf ("r_bin_java_stack_map_frame_new: Parsed R_BIN_JAVA_STACK_FRAME_SAME_LOCALS_1.\n"); if (se) { offset += se->size; } else { eprintf ("r_bin_java_stack_map_frame_new: Unable to parse the Stack Items for the stack frame.\n"); r_bin_java_stack_frame_free (stack_frame); return NULL; } r_list_append (stack_frame->stack_items, (void *) se); // Maybe? 3. Copy the previous frames locals and set the locals count. // copy_type_info_to_stack_frame_list_up_to_idx (p_frame->local_items, stack_frame->local_items, idx); if (p_frame) { stack_frame->number_of_locals = p_frame->number_of_locals; } else { IFDBG eprintf ("Unable to set previous stackframe with the number of locals (current info.code_attr.implicit_frame was probably not set :/)"); } IFDBG eprintf ("r_bin_java_stack_map_frame_new: TODO Stack Frame Same Locals 1 Stack Element Condition is untested, so there may be issues.\n"); break; case R_BIN_JAVA_STACK_FRAME_CHOP: // 1. Calculate the max index we want to copy from the list of the // previous frames locals IFDBG eprintf ("r_bin_java_stack_map_frame_new: Parsing R_BIN_JAVA_STACK_FRAME_CHOP.\n"); // ut16 k = 251 - stack_frame->tag; /*, idx = p_frame->number_of_locals - k; */ // 2. read the uoffset value stack_frame->offset_delta = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; // Maybe? 3. Copy the previous frames locals and set the locals count. // copy_type_info_to_stack_frame_list_up_to_idx (p_frame->local_items, stack_frame->local_items, idx); if (p_frame) { stack_frame->number_of_locals = p_frame->number_of_locals; } else { IFINT eprintf ("><?><\n"); IFDBG eprintf ("Unable to set previous stackframe with the number of locals (current info.code_attr.implicit_frame was probably not set :/)"); } IFDBG eprintf ("r_bin_java_stack_map_frame_new: TODO Stack Frame Chop Condition is untested, so there may be issues.\n"); break; case R_BIN_JAVA_STACK_FRAME_SAME_FRAME_EXTENDED: IFDBG eprintf ("r_bin_java_stack_map_frame_new: Parsing R_BIN_JAVA_STACK_FRAME_SAME_FRAME_EXTENDED.\n"); // 1. Read the uoffset stack_frame->offset_delta = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; // 2. Read the stack element type stack_frame->number_of_stack_items = 1; se = r_bin_java_read_from_buffer_verification_info_new (buffer + offset, sz - offset, buf_offset + offset); if (se) { offset += se->size; } else { eprintf ("r_bin_java_stack_map_frame_new: Unable to parse the Stack Items for the stack frame.\n"); r_bin_java_stack_frame_free (stack_frame); return NULL; } r_list_append (stack_frame->stack_items, (void *) se); // Maybe? 3. Copy the previous frames locals to the current locals // copy_type_info_to_stack_frame_list_up_to_idx (p_frame->local_items, stack_frame->local_items, idx); if (p_frame) { stack_frame->number_of_locals = p_frame->number_of_locals; } else { IFINT eprintf ("><?><\n"); IFDBG eprintf ("Unable to set previous stackframe with the number of locals (current info.code_attr.implicit_frame was probably not set :/)"); } IFDBG eprintf ("r_bin_java_stack_map_frame_new: TODO Stack Frame Same Locals Frame Stack 1 Extended Condition is untested, so there may be issues.\n"); break; case R_BIN_JAVA_STACK_FRAME_APPEND: IFDBG eprintf ("r_bin_java_stack_map_frame_new: Parsing R_BIN_JAVA_STACK_FRAME_APPEND.\n"); // 1. Calculate the max index we want to copy from the list of the // previous frames locals ut16 k = stack_frame->tag - 251; ut32 i = 0; // 2. Read the uoffset stack_frame->offset_delta = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; // Maybe? 3. Copy the previous frames locals to the current locals // copy_type_info_to_stack_frame_list_up_to_idx (p_frame->local_items, stack_frame->local_items, idx); // 4. Read off the rest of the appended locals types for (i = 0; i < k; i++) { if (offset >= sz) { break; } IFDBG eprintf ("r_bin_java_stack_map_frame_new: Parsing verifying the k'th frame: %d of %d.\n", i, k); se = r_bin_java_read_from_buffer_verification_info_new (buffer + offset, sz - offset, buf_offset + offset); IFDBG eprintf ("r_bin_java_stack_map_frame_new: Completed Parsing\n"); if (se) { offset += se->size; } else { eprintf ("r_bin_java_stack_map_frame_new: Unable to parse the locals for the stack frame.\n"); r_bin_java_stack_frame_free (stack_frame); return NULL; } r_list_append (stack_frame->local_items, (void *) se); } IFDBG eprintf ("r_bin_java_stack_map_frame_new: Breaking out of loop"); IFDBG eprintf ("p_frame: %p\n", p_frame); if (p_frame) { stack_frame->number_of_locals = p_frame->number_of_locals + k; } else { IFINT eprintf ("><?><\n"); IFDBG eprintf ("Unable to set previous stackframe with the number of locals (current info.code_attr.implicit_frame was probably not set :/)"); } IFDBG eprintf ("r_bin_java_stack_map_frame_new: TODO Stack Frame Same Locals Frame Stack 1 Extended Condition is untested, so there may be issues.\n"); break; case R_BIN_JAVA_STACK_FRAME_FULL_FRAME: IFDBG eprintf ("r_bin_java_stack_map_frame_new: Parsing R_BIN_JAVA_STACK_FRAME_FULL_FRAME.\n"); stack_frame->offset_delta = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; // IFDBG eprintf ("r_bin_java_stack_map_frame_new: Code Size > 65535, read(%d byte(s)), offset = 0x%08x.\n", var_sz, stack_frame->offset_delta); // Read the number of variables based on the max # local variable stack_frame->number_of_locals = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; // IFDBG eprintf ("r_bin_java_stack_map_frame_new: Max ulocalvar > 65535, read(%d byte(s)), number_of_locals = 0x%08x.\n", var_sz, stack_frame->number_of_locals); IFDBG r_bin_java_print_stack_map_frame_summary(stack_frame); // read the number of locals off the stack for (i = 0; i < stack_frame->number_of_locals; i++) { if (offset >= sz) { break; } se = r_bin_java_read_from_buffer_verification_info_new (buffer + offset, sz - offset, buf_offset + offset); if (se) { offset += se->size; // r_list_append (stack_frame->local_items, (void *) se); } else { eprintf ("r_bin_java_stack_map_frame_new: Unable to parse the locals for the stack frame.\n"); r_bin_java_stack_frame_free (stack_frame); return NULL; } r_list_append (stack_frame->local_items, (void *) se); } // Read the number of stack items based on the max size of stack stack_frame->number_of_stack_items = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; // IFDBG eprintf ("r_bin_java_stack_map_frame_new: Max ustack items > 65535, read(%d byte(s)), number_of_locals = 0x%08x.\n", var_sz, stack_frame->number_of_stack_items); // read the stack items for (i = 0; i < stack_frame->number_of_stack_items; i++) { if (offset >= sz) { break; } se = r_bin_java_read_from_buffer_verification_info_new (buffer + offset, sz - offset, buf_offset + offset); if (se) { offset += se->size; // r_list_append (stack_frame->stack_items, (void *) se); } else { eprintf ("r_bin_java_stack_map_frame_new: Unable to parse the stack items for the stack frame.\n"); r_bin_java_stack_frame_free (stack_frame); return NULL; } r_list_append (stack_frame->local_items, (void *) se); } break; default: eprintf ("java: Unknown type\n"); break; } // IFDBG eprintf ("Created a stack frame at offset(0x%08"PFMT64x") of size: %d\n", buf_offset, stack_frame->size);//r_bin_java_print_stack_map_frame_summary(stack_frame); stack_frame->size = offset; // IFDBG r_bin_java_print_stack_map_frame_summary(stack_frame); return stack_frame; } R_API ut16 r_bin_java_find_cp_class_ref_from_name_idx(RBinJavaObj *bin, ut16 name_idx) { ut16 pos, len = (ut16) r_list_length (bin->cp_list); RBinJavaCPTypeObj *item; for (pos = 0; pos < len; pos++) { item = (RBinJavaCPTypeObj *) r_list_get_n (bin->cp_list, pos); if (item && item->tag == R_BIN_JAVA_CP_CLASS && item->info.cp_class.name_idx == name_idx) { break; } } return (pos != len) ? pos : 0; } R_API RBinJavaStackMapFrame *r_bin_java_default_stack_frame(void) { RBinJavaStackMapFrame *sf = R_NEW0 (RBinJavaStackMapFrame); if (!sf) { return NULL; } sf->metas = R_NEW0 (RBinJavaMetaInfo); if (!sf->metas) { free (sf); return NULL; } sf->metas->type_info = (void *) &R_BIN_JAVA_STACK_MAP_FRAME_METAS[R_BIN_JAVA_STACK_FRAME_IMPLICIT]; sf->type = ((RBinJavaStackMapFrameMetas *) sf->metas->type_info)->type; sf->local_items = r_list_newf (r_bin_java_verification_info_free); sf->stack_items = r_list_newf (r_bin_java_verification_info_free); sf->number_of_stack_items = 0; sf->number_of_locals = 0; return sf; } R_API RBinJavaStackMapFrame *r_bin_java_build_stack_frame_from_local_variable_table(RBinJavaObj *bin, RBinJavaAttrInfo *attr) { RBinJavaStackMapFrame *sf = r_bin_java_default_stack_frame (); RBinJavaLocalVariableAttribute *lvattr = NULL; RBinJavaVerificationObj *type_item; RListIter *iter = NULL; ut32 value_cnt = 0; ut8 value; if (!sf || !bin || !attr || attr->type != R_BIN_JAVA_ATTR_TYPE_LOCAL_VARIABLE_TABLE_ATTR) { eprintf ("Attempting to create a stack_map frame from a bad attribute.\n"); return sf; } sf->number_of_locals = attr->info.local_variable_table_attr.table_length; r_list_foreach (attr->info.local_variable_table_attr.local_variable_table, iter, lvattr) { ut32 pos = 0; // knock the array Types while (lvattr->descriptor[pos] == '[') { pos++; } value = lvattr->descriptor[pos]; // IFDBG eprintf ("Found the following type value: %c at pos %d in %s\n", value, pos, lvattr->descriptor); switch (value) { case 'I': case 'Z': case 'S': case 'B': case 'C': type_item = r_bin_java_verification_info_from_type (bin, R_BIN_JAVA_STACKMAP_INTEGER, 0); break; case 'F': type_item = r_bin_java_verification_info_from_type (bin, R_BIN_JAVA_STACKMAP_FLOAT, 0); break; case 'D': type_item = r_bin_java_verification_info_from_type (bin, R_BIN_JAVA_STACKMAP_DOUBLE, 0); break; case 'J': type_item = r_bin_java_verification_info_from_type (bin, R_BIN_JAVA_STACKMAP_LONG, 0); break; case 'L': // TODO: FIXME write something that will iterate over the CP Pool and find the // CONSTANT_Class_info referencing this { ut16 idx = r_bin_java_find_cp_class_ref_from_name_idx (bin, lvattr->name_idx); type_item = r_bin_java_verification_info_from_type (bin, R_BIN_JAVA_STACKMAP_OBJECT, idx); } break; default: eprintf ("r_bin_java_build_stack_frame_from_local_variable_table: " "not sure how to handle: name: %s, type: %s\n", lvattr->name, lvattr->descriptor); type_item = r_bin_java_verification_info_from_type (bin, R_BIN_JAVA_STACKMAP_NULL, 0); } if (type_item) { r_list_append (sf->local_items, (void *) type_item); } value_cnt++; } if (value_cnt != attr->info.local_variable_table_attr.table_length) { IFDBG eprintf ("r_bin_java_build_stack_frame_from_local_variable_table: " "Number of locals not accurate. Expected %d but got %d", attr->info.local_variable_table_attr.table_length, value_cnt); } return sf; } R_API ut64 r_bin_java_stack_map_table_attr_calc_size(RBinJavaAttrInfo *attr) { ut64 size = 0; RListIter *iter, *iter_tmp; RBinJavaStackMapFrame *sf; if (attr) { // attr = r_bin_java_default_attr_new (buffer, sz, buf_offset); size += 6; // IFDBG r_bin_java_print_source_code_file_attr_summary(attr); // Current spec does not call for variable sizes. // attr->info.stack_map_table_attr.number_of_entries = R_BIN_JAVA_USHORT (buffer, offset); size += 2; r_list_foreach_safe (attr->info.stack_map_table_attr.stack_map_frame_entries, iter, iter_tmp, sf) { size += r_bin_java_stack_map_frame_calc_size (sf); } } return size; } R_API RBinJavaAttrInfo *r_bin_java_stack_map_table_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { ut32 i = 0; ut64 offset = 0; if (sz < 8) { return NULL; } RBinJavaStackMapFrame *stack_frame = NULL, *new_stack_frame = NULL; if (sz < 10) { return NULL; } RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); offset += 6; IFDBG eprintf ("r_bin_java_stack_map_table_attr_new: New stack map allocated.\n"); if (!attr) { return NULL; } attr->info.stack_map_table_attr.stack_map_frame_entries = r_list_newf (r_bin_java_stack_frame_free); // IFDBG r_bin_java_print_source_code_file_attr_summary(attr); // Current spec does not call for variable sizes. attr->info.stack_map_table_attr.number_of_entries = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; IFDBG eprintf ("r_bin_java_stack_map_table_attr_new: Processing stack map, summary is:\n"); IFDBG r_bin_java_print_stack_map_table_attr_summary(attr); for (i = 0; i < attr->info.stack_map_table_attr.number_of_entries; i++) { // read next stack frame IFDBG eprintf ("Reading StackMap Entry #%d @ 0x%08"PFMT64x "\n", i, buf_offset + offset); if (stack_frame == NULL && R_BIN_JAVA_GLOBAL_BIN && R_BIN_JAVA_GLOBAL_BIN->current_code_attr) { IFDBG eprintf ("Setting an implicit frame at #%d @ 0x%08"PFMT64x "\n", i, buf_offset + offset); stack_frame = R_BIN_JAVA_GLOBAL_BIN->current_code_attr->info.code_attr.implicit_frame; } IFDBG eprintf ("Reading StackMap Entry #%d @ 0x%08"PFMT64x ", current stack_frame: %p\n", i, buf_offset + offset, stack_frame); if (offset >= sz) { r_bin_java_stack_map_table_attr_free (attr); return NULL; } new_stack_frame = r_bin_java_stack_map_frame_new (buffer + offset, sz - offset, stack_frame, buf_offset + offset); if (new_stack_frame) { offset += new_stack_frame->size; // append stack frame to the list r_list_append (attr->info.stack_map_table_attr.stack_map_frame_entries, (void *) new_stack_frame); stack_frame = new_stack_frame; } else { eprintf ("r_bin_java_stack_map_table_attr_new: Unable to parse the stack frame for the stack map table.\n"); r_bin_java_stack_map_table_attr_free (attr); attr = NULL; break; } } if (attr) { attr->size = offset; } return attr; } // End attribute types new // Start new Constant Pool Types R_API RBinJavaCPTypeObj *r_bin_java_do_nothing_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { return (RBinJavaCPTypeObj *) NULL; } R_API ut64 r_bin_java_do_nothing_calc_size(RBinJavaCPTypeObj *obj) { return 0; } R_API void r_bin_java_do_nothing_free(void /*RBinJavaCPTypeObj*/ *obj) { return; } R_API RBinJavaCPTypeObj *r_bin_java_unknown_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { ut8 tag = buffer[0]; RBinJavaCPTypeObj *obj = NULL; obj = (RBinJavaCPTypeObj *) malloc (sizeof (RBinJavaCPTypeObj)); if (obj) { memset (obj, 0, sizeof (RBinJavaCPTypeObj)); obj->tag = tag; obj->metas = R_NEW0 (RBinJavaMetaInfo); obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[R_BIN_JAVA_CP_UNKNOWN]; } return obj; } R_API ut64 r_bin_java_unknown_cp_calc_size(RBinJavaCPTypeObj *obj) { return 1LL; } R_API RBinJavaCPTypeObj *r_bin_java_class_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { ut8 tag = buffer[0]; int quick_check = r_bin_java_quick_check (R_BIN_JAVA_CP_CLASS, tag, sz, "Class"); if (quick_check > 0) { return NULL; } RBinJavaCPTypeObj *obj = R_NEW0 (RBinJavaCPTypeObj); if (obj) { obj->tag = tag; obj->metas = R_NEW0 (RBinJavaMetaInfo); obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag]; obj->info.cp_class.name_idx = R_BIN_JAVA_USHORT (buffer, 1); } return obj; } R_API ut64 r_bin_java_class_cp_calc_size(RBinJavaCPTypeObj *obj) { ut64 size = 0; // ut8 tag = buffer[0]; size += 1; // obj->info.cp_class.name_idx = R_BIN_JAVA_USHORT (buffer, 1); size += 2; return size; } R_API RBinJavaCPTypeObj *r_bin_java_fieldref_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { ut8 tag = buffer[0]; RBinJavaCPTypeObj *obj = NULL; int quick_check = 0; quick_check = r_bin_java_quick_check (R_BIN_JAVA_CP_FIELDREF, tag, sz, "FieldRef"); if (quick_check > 0) { return obj; } obj = (RBinJavaCPTypeObj *) malloc (sizeof (RBinJavaCPTypeObj)); if (obj) { memset (obj, 0, sizeof (RBinJavaCPTypeObj)); obj->tag = tag; obj->metas = R_NEW0 (RBinJavaMetaInfo); obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag]; obj->info.cp_field.class_idx = R_BIN_JAVA_USHORT (buffer, 1); obj->info.cp_field.name_and_type_idx = R_BIN_JAVA_USHORT (buffer, 3); } return (RBinJavaCPTypeObj *) obj; } R_API ut64 r_bin_java_fieldref_cp_calc_size(RBinJavaCPTypeObj *obj) { ut64 size = 0; // tag size += 1; // obj->info.cp_field.class_idx = R_BIN_JAVA_USHORT (buffer, 1); size += 2; // obj->info.cp_field.name_and_type_idx = R_BIN_JAVA_USHORT (buffer, 3); size += 2; return size; } R_API RBinJavaCPTypeObj *r_bin_java_methodref_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { ut8 tag = buffer[0]; RBinJavaCPTypeObj *obj = NULL; int quick_check = 0; quick_check = r_bin_java_quick_check (R_BIN_JAVA_CP_METHODREF, tag, sz, "MethodRef"); if (quick_check > 0) { return obj; } obj = (RBinJavaCPTypeObj *) malloc (sizeof (RBinJavaCPTypeObj)); if (obj) { memset (obj, 0, sizeof (RBinJavaCPTypeObj)); obj->tag = tag; obj->metas = R_NEW0 (RBinJavaMetaInfo); obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag]; obj->info.cp_method.class_idx = R_BIN_JAVA_USHORT (buffer, 1); obj->info.cp_method.name_and_type_idx = R_BIN_JAVA_USHORT (buffer, 3); } return obj; } R_API ut64 r_bin_java_methodref_cp_calc_size(RBinJavaCPTypeObj *obj) { ut64 size = 0; // tag size += 1; // obj->info.cp_method.class_idx = R_BIN_JAVA_USHORT (buffer, 1); size += 2; // obj->info.cp_method.name_and_type_idx = R_BIN_JAVA_USHORT (buffer, 3); size += 2; return size; } R_API RBinJavaCPTypeObj *r_bin_java_interfacemethodref_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { ut8 tag = buffer[0]; int quick_check = r_bin_java_quick_check (R_BIN_JAVA_CP_INTERFACEMETHOD_REF, tag, sz, "InterfaceMethodRef"); if (quick_check > 0) { return NULL; } RBinJavaCPTypeObj *obj = R_NEW0 (RBinJavaCPTypeObj); if (obj) { obj->tag = tag; obj->metas = R_NEW0 (RBinJavaMetaInfo); obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag]; obj->name = r_str_dup (NULL, (const char *) R_BIN_JAVA_CP_METAS[tag].name); obj->info.cp_interface.class_idx = R_BIN_JAVA_USHORT (buffer, 1); obj->info.cp_interface.name_and_type_idx = R_BIN_JAVA_USHORT (buffer, 3); } return obj; } R_API ut64 r_bin_java_interfacemethodref_cp_calc_size(RBinJavaCPTypeObj *obj) { ut64 size = 0; // tag size += 1; // obj->info.cp_interface.class_idx = R_BIN_JAVA_USHORT (buffer, 1); size += 2; // obj->info.cp_interface.name_and_type_idx = R_BIN_JAVA_USHORT (buffer, 3); size += 2; return size; } R_API RBinJavaCPTypeObj *r_bin_java_string_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { ut8 tag = buffer[0]; int quick_check = r_bin_java_quick_check (R_BIN_JAVA_CP_STRING, tag, sz, "String"); if (quick_check > 0) { return NULL; } RBinJavaCPTypeObj *obj = R_NEW0 (RBinJavaCPTypeObj); if (obj) { obj->tag = tag; obj->metas = R_NEW0 (RBinJavaMetaInfo); obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag]; obj->name = r_str_dup (NULL, (const char *) R_BIN_JAVA_CP_METAS[tag].name); obj->info.cp_string.string_idx = R_BIN_JAVA_USHORT (buffer, 1); } return obj; } R_API ut64 r_bin_java_string_cp_calc_size(RBinJavaCPTypeObj *obj) { ut64 size = 0; // tag size += 1; // obj->info.cp_string.string_idx = R_BIN_JAVA_USHORT (buffer, 1); size += 2; return size; } R_API RBinJavaCPTypeObj *r_bin_java_integer_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { ut8 tag = buffer[0]; RBinJavaCPTypeObj *obj = NULL; int quick_check = 0; quick_check = r_bin_java_quick_check (R_BIN_JAVA_CP_INTEGER, tag, sz, "Integer"); if (quick_check > 0) { return obj; } obj = (RBinJavaCPTypeObj *) R_NEW0 (RBinJavaCPTypeObj); if (obj) { obj->tag = tag; obj->metas = R_NEW0 (RBinJavaMetaInfo); obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag]; obj->name = r_str_dup (NULL, (const char *) R_BIN_JAVA_CP_METAS[tag].name); memset (&obj->info.cp_integer.bytes, 0, sizeof (obj->info.cp_integer.bytes)); memcpy (&obj->info.cp_integer.bytes.raw, buffer + 1, 4); } return obj; } R_API ut64 r_bin_java_integer_cp_calc_size(RBinJavaCPTypeObj *obj) { ut64 size = 0; // tag size += 1; // obj->info.cp_string.string_idx = R_BIN_JAVA_USHORT (buffer, 1); size += 4; return size; } R_API RBinJavaCPTypeObj *r_bin_java_float_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { ut8 tag = buffer[0]; RBinJavaCPTypeObj *obj = NULL; int quick_check = 0; quick_check = r_bin_java_quick_check (R_BIN_JAVA_CP_FLOAT, tag, sz, "Float"); if (quick_check > 0) { return obj; } obj = (RBinJavaCPTypeObj *) calloc (1, sizeof (RBinJavaCPTypeObj)); if (obj) { obj->tag = tag; obj->metas = R_NEW0 (RBinJavaMetaInfo); obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag]; obj->name = r_str_dup (NULL, (const char *) R_BIN_JAVA_CP_METAS[tag].name); memset (&obj->info.cp_float.bytes, 0, sizeof (obj->info.cp_float.bytes)); memcpy (&obj->info.cp_float.bytes.raw, buffer, 4); } return (RBinJavaCPTypeObj *) obj; } R_API ut64 r_bin_java_float_cp_calc_size(RBinJavaCPTypeObj *obj) { ut64 size = 0; // tag size += 1; // obj->info.cp_string.string_idx = R_BIN_JAVA_USHORT (buffer, 1); size += 4; return size; } R_API RBinJavaCPTypeObj *r_bin_java_long_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { ut8 tag = buffer[0]; RBinJavaCPTypeObj *obj = NULL; int quick_check = 0; quick_check = r_bin_java_quick_check (R_BIN_JAVA_CP_LONG, tag, sz, "Long"); if (quick_check > 0) { return obj; } obj = (RBinJavaCPTypeObj *) malloc (sizeof (RBinJavaCPTypeObj)); if (obj) { memset (obj, 0, sizeof (RBinJavaCPTypeObj)); obj->tag = tag; obj->metas = R_NEW0 (RBinJavaMetaInfo); obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag]; obj->name = r_str_dup (NULL, (const char *) R_BIN_JAVA_CP_METAS[tag].name); memset (&obj->info.cp_long.bytes, 0, sizeof (obj->info.cp_long.bytes)); memcpy (&(obj->info.cp_long.bytes), buffer + 1, 8); } return obj; } R_API ut64 r_bin_java_long_cp_calc_size(RBinJavaCPTypeObj *obj) { ut64 size = 0; // tag size += 1; // obj->info.cp_string.string_idx = R_BIN_JAVA_USHORT (buffer, 1); size += 8; return size; } R_API RBinJavaCPTypeObj *r_bin_java_double_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { ut8 tag = buffer[0]; RBinJavaCPTypeObj *obj = NULL; int quick_check = 0; quick_check = r_bin_java_quick_check (R_BIN_JAVA_CP_DOUBLE, tag, sz, "Double"); if (quick_check > 0) { return (RBinJavaCPTypeObj *) obj; } obj = (RBinJavaCPTypeObj *) malloc (sizeof (RBinJavaCPTypeObj)); if (obj) { memset (obj, 0, sizeof (RBinJavaCPTypeObj)); obj->tag = tag; obj->metas = R_NEW0 (RBinJavaMetaInfo); obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag]; obj->name = r_str_dup (NULL, (const char *) R_BIN_JAVA_CP_METAS[tag].name); memset (&obj->info.cp_double.bytes, 0, sizeof (obj->info.cp_double.bytes)); memcpy (&obj->info.cp_double.bytes, buffer + 1, 8); } return obj; } R_API ut64 r_bin_java_double_cp_calc_size(RBinJavaCPTypeObj *obj) { ut64 size = 0; // tag size += 1; // obj->info.cp_string.string_idx = R_BIN_JAVA_USHORT (buffer, 1); size += 8; return size; } R_API RBinJavaCPTypeObj *r_bin_java_utf8_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { ut8 tag = buffer[0]; RBinJavaCPTypeObj *obj; int quick_check = r_bin_java_quick_check (R_BIN_JAVA_CP_UTF8, tag, sz, "Utf8"); if (quick_check > 0) { return NULL; } if ((obj = R_NEW0 (RBinJavaCPTypeObj))) { obj->tag = tag; obj->metas = R_NEW0 (RBinJavaMetaInfo); obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag]; obj->name = r_str_dup (NULL, (const char *) R_BIN_JAVA_CP_METAS[tag].name); obj->info.cp_utf8.length = R_BIN_JAVA_USHORT (buffer, 1); obj->info.cp_utf8.bytes = (ut8 *) malloc (obj->info.cp_utf8.length + 1); if (obj->info.cp_utf8.bytes) { memset (obj->info.cp_utf8.bytes, 0, obj->info.cp_utf8.length + 1); if (obj->info.cp_utf8.length < (sz - 3)) { memcpy (obj->info.cp_utf8.bytes, buffer + 3, (sz - 3)); obj->info.cp_utf8.length = sz - 3; } else { memcpy (obj->info.cp_utf8.bytes, buffer + 3, obj->info.cp_utf8.length); } obj->value = obj->info.cp_utf8.bytes; } else { r_bin_java_obj_free (obj); obj = NULL; } } return obj; } R_API ut64 r_bin_java_utf8_cp_calc_size(RBinJavaCPTypeObj *obj) { ut64 size = 0; size += 1; if (obj && R_BIN_JAVA_CP_UTF8 == obj->tag) { size += 2; size += obj->info.cp_utf8.length; } return size; } R_API RBinJavaCPTypeObj *r_bin_java_name_and_type_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { ut8 tag = buffer[0]; RBinJavaCPTypeObj *obj = NULL; int quick_check = 0; quick_check = r_bin_java_quick_check (R_BIN_JAVA_CP_NAMEANDTYPE, tag, sz, "RBinJavaCPTypeNameAndType"); if (quick_check > 0) { return obj; } obj = R_NEW0 (RBinJavaCPTypeObj); if (obj) { obj->metas = R_NEW0 (RBinJavaMetaInfo); obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag]; obj->name = r_str_dup (NULL, (const char *) R_BIN_JAVA_CP_METAS[tag].name);; obj->tag = tag; obj->info.cp_name_and_type.name_idx = R_BIN_JAVA_USHORT (buffer, 1); obj->info.cp_name_and_type.descriptor_idx = R_BIN_JAVA_USHORT (buffer, 3); } return obj; } R_API ut64 r_bin_java_name_and_type_cp_calc_size(RBinJavaCPTypeObj *obj) { ut64 size = 0; if (obj) { size += 1; // obj->info.cp_name_and_type.name_idx = R_BIN_JAVA_USHORT (buffer, 1); size += 2; // obj->info.cp_name_and_type.descriptor_idx = R_BIN_JAVA_USHORT (buffer, 3); size += 2; } return size; } R_API RBinJavaCPTypeObj *r_bin_java_methodtype_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { ut8 tag = buffer[0]; int quick_check = r_bin_java_quick_check (R_BIN_JAVA_CP_METHODTYPE, tag, sz, "RBinJavaCPTypeMethodType"); if (quick_check > 0) { return NULL; } RBinJavaCPTypeObj *obj = R_NEW0 (RBinJavaCPTypeObj); if (obj) { obj->metas = R_NEW0 (RBinJavaMetaInfo); obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag]; obj->name = r_str_dup (NULL, (const char *) R_BIN_JAVA_CP_METAS[tag].name);; obj->tag = tag; obj->info.cp_method_type.descriptor_index = R_BIN_JAVA_USHORT (buffer, 1); } return obj; } R_API ut64 r_bin_java_methodtype_cp_calc_size(RBinJavaCPTypeObj *obj) { ut64 size = 0; size += 1; // obj->info.cp_method_type.descriptor_index = R_BIN_JAVA_USHORT (buffer, 1); size += 2; return size; } R_API RBinJavaCPTypeObj *r_bin_java_methodhandle_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { ut8 tag = buffer[0]; int quick_check = r_bin_java_quick_check (R_BIN_JAVA_CP_METHODHANDLE, tag, sz, "RBinJavaCPTypeMethodHandle"); if (quick_check > 0) { return NULL; } RBinJavaCPTypeObj *obj = R_NEW0 (RBinJavaCPTypeObj); if (obj) { obj->metas = R_NEW0 (RBinJavaMetaInfo); obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag]; obj->name = r_str_dup (NULL, (const char *) R_BIN_JAVA_CP_METAS[tag].name);; obj->tag = tag; obj->info.cp_method_handle.reference_kind = buffer[1]; obj->info.cp_method_handle.reference_index = R_BIN_JAVA_USHORT (buffer, 2); } return obj; } R_API ut64 r_bin_java_methodhandle_cp_calc_size(RBinJavaCPTypeObj *obj) { ut64 size = 0; size += 1; // obj->info.cp_method_handle.reference_index = R_BIN_JAVA_USHORT (buffer, 2); size += 2; return size; } R_API RBinJavaCPTypeObj *r_bin_java_invokedynamic_cp_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz) { ut8 tag = buffer[0]; RBinJavaCPTypeObj *obj; int quick_check = r_bin_java_quick_check (R_BIN_JAVA_CP_INVOKEDYNAMIC, tag, sz, "RBinJavaCPTypeMethodHandle"); if (quick_check > 0) { return NULL; } if ((obj = R_NEW0 (RBinJavaCPTypeObj))) { obj->metas = R_NEW0 (RBinJavaMetaInfo); obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag]; obj->name = r_str_dup (NULL, (const char *) R_BIN_JAVA_CP_METAS[tag].name);; obj->tag = tag; obj->info.cp_invoke_dynamic.bootstrap_method_attr_index = R_BIN_JAVA_USHORT (buffer, 1); obj->info.cp_invoke_dynamic.name_and_type_index = R_BIN_JAVA_USHORT (buffer, 3); } return obj; } R_API int r_bin_java_check_reset_cp_obj(RBinJavaCPTypeObj *cp_obj, ut8 tag) { bool res = false; if (tag < R_BIN_JAVA_CP_METAS_SZ) { if (tag != cp_obj->tag) { if (cp_obj->tag == R_BIN_JAVA_CP_UTF8) { R_FREE (cp_obj->info.cp_utf8.bytes); cp_obj->info.cp_utf8.length = 0; R_FREE (cp_obj->name); } cp_obj->tag = tag; cp_obj->metas->type_info = (void *) &R_BIN_JAVA_CP_METAS[tag]; cp_obj->name = strdup (R_BIN_JAVA_CP_METAS[tag].name); res = true; } else { eprintf ("Invalid tag\n"); } } else { eprintf ("Invalid tag '%d'.\n", tag); } return res; } R_API ut8 *r_bin_java_cp_get_4bytes(ut8 tag, ut32 *out_sz, const ut8 *buf, const ut64 len) { ut8 *buffer = malloc (5); if (!buffer) { return NULL; } ut32 val = 0; if (!buffer || len < 4) { if (out_sz) { *out_sz = 0; } free (buffer); return NULL; } buffer[0] = tag; val = R_BIN_JAVA_UINT (buf, 0); memcpy (buffer + 1, (const char *) &val, 4); *out_sz = 5; return buffer; } R_API ut8 *r_bin_java_cp_get_8bytes(ut8 tag, ut32 *out_sz, const ut8 *buf, const ut64 len) { ut8 *buffer = malloc (10); if (!buffer) { return NULL; } ut64 val = 0; if (len < 8) { *out_sz = 0; free (buffer); return NULL; } buffer[0] = tag; val = r_bin_java_raw_to_long (buf, 0); memcpy (buffer + 1, (const char *) &val, 8); *out_sz = 9; return buffer; } R_API ut8 *r_bin_java_cp_append_classref_and_name(RBinJavaObj *bin, ut32 *out_sz, const char *classname, const ut32 classname_len) { ut16 use_name_idx = bin->cp_idx + 1; ut8 *bytes = NULL, *name_bytes = NULL; name_bytes = r_bin_java_cp_get_utf8 (R_BIN_JAVA_CP_UTF8, out_sz, (const ut8 *) classname, classname_len); if (*out_sz > 0 && name_bytes) { ut8 *idx_addr = (ut8 *) &use_name_idx; bytes = malloc (*out_sz + 3); memcpy (bytes, name_bytes, *out_sz); bytes[*out_sz + 0] = R_BIN_JAVA_CP_CLASS; bytes[*out_sz + 1] = idx_addr[1]; bytes[*out_sz + 2] = idx_addr[0]; *out_sz += 3; } free (name_bytes); return bytes; } R_API ut8 *r_bin_java_cp_get_fref_bytes(RBinJavaObj *bin, ut32 *out_sz, ut8 tag, ut16 cn_idx, ut16 fn_idx, ut16 ft_idx) { ut8 *bytes = NULL, *fnt_bytes = NULL; RBinJavaCPTypeObj *ref_cp_obj = NULL; ut16 fnt_idx = 0, cref_idx = 0; ut32 fnt_len = 0; ut16 ref_cp_obj_idx = r_bin_java_find_cp_class_ref_from_name_idx (bin, cn_idx); if (!ref_cp_obj_idx) { return NULL; } ref_cp_obj = r_bin_java_get_item_from_bin_cp_list (bin, ref_cp_obj_idx); if (ref_cp_obj) { cref_idx = ref_cp_obj->idx; } ref_cp_obj = r_bin_java_find_cp_name_and_type_info (bin, fn_idx, ft_idx); if (ref_cp_obj) { fnt_idx = ref_cp_obj->idx; } else { fnt_bytes = r_bin_java_cp_get_name_type (bin, &fnt_len, fn_idx, ft_idx); fnt_idx = bin->cp_idx + 1; } if (cref_idx && fnt_idx) { bytes = r_bin_java_cp_get_fm_ref (bin, out_sz, tag, cref_idx, fnt_idx); if (fnt_bytes) { ut8 *tbuf = malloc (fnt_len + *out_sz); if (!tbuf) { free (bytes); free (fnt_bytes); return NULL; } // copy the bytes to the new buffer memcpy (tbuf, fnt_bytes, fnt_len); memcpy (tbuf + fnt_len, bytes, *out_sz); // update the values free old buffer *out_sz += fnt_len; free (bytes); bytes = tbuf; } } free (fnt_bytes); return bytes; } R_API ut8 *r_bin_java_cp_get_classref(RBinJavaObj *bin, ut32 *out_sz, const char *classname, const ut32 classname_len, const ut16 name_idx) { ut16 use_name_idx = -1; ut8 *bytes = NULL; if (name_idx == (ut16) - 1 && classname && *classname && classname_len > 0) { // find class_name_idx by class name RList *results = r_bin_java_find_cp_const_by_val_utf8 (bin, (const ut8 *) classname, classname_len); if (r_list_length (results) == 1) { use_name_idx = (ut16) * ((ut32 *) r_list_get_n (results, 0)); } r_list_free (results); } else if (name_idx != (ut16) - 1 && name_idx != 0) { use_name_idx = name_idx; } if (use_name_idx == (ut16) - 1 && classname && *classname && classname_len > 0) { bytes = r_bin_java_cp_append_classref_and_name (bin, out_sz, classname, classname_len); } else if (use_name_idx != (ut16) - 1) { ut8 *idx_addr = (ut8 *) &use_name_idx; bytes = malloc (3); if (!bytes) { return NULL; } bytes[0] = R_BIN_JAVA_CP_CLASS; bytes[1] = idx_addr[1]; bytes[2] = idx_addr[0]; *out_sz += 3; } return bytes; } R_API ut8 *r_bin_java_cp_get_fm_ref(RBinJavaObj *bin, ut32 *out_sz, ut8 tag, ut16 class_idx, ut16 name_and_type_idx) { return r_bin_java_cp_get_2_ut16 (bin, out_sz, tag, class_idx, name_and_type_idx); } R_API ut8 *r_bin_java_cp_get_2_ut16(RBinJavaObj *bin, ut32 *out_sz, ut8 tag, ut16 ut16_one, ut16 ut16_two) { ut8 *bytes = malloc (7); if (!bytes) { return NULL; } ut8 *idx_addr = NULL; bytes[*out_sz] = tag; *out_sz += 1; idx_addr = (ut8 *) &ut16_one; bytes[*out_sz + 1] = idx_addr[1]; bytes[*out_sz + 2] = idx_addr[0]; *out_sz += 3; idx_addr = (ut8 *) &ut16_two; bytes[*out_sz + 1] = idx_addr[1]; bytes[*out_sz + 2] = idx_addr[0]; *out_sz += 3; return bytes; } R_API ut8 *r_bin_java_cp_get_name_type(RBinJavaObj *bin, ut32 *out_sz, ut16 name_idx, ut16 type_idx) { return r_bin_java_cp_get_2_ut16 (bin, out_sz, R_BIN_JAVA_CP_NAMEANDTYPE, name_idx, type_idx); } R_API ut8 *r_bin_java_cp_get_utf8(ut8 tag, ut32 *out_sz, const ut8 *buf, const ut64 len) { ut8 *buffer = NULL; ut16 sz = 0; ut16 t = (ut16) len; if (len > 0 && len > (ut16) - 1) { *out_sz = 0; return NULL; } sz = R_BIN_JAVA_USHORT (((ut8 *) (ut16 *) &t), 0); *out_sz = 3 + t; // tag + sz + bytes buffer = malloc (*out_sz + 3); if (!buffer) { return NULL; } // XXX - excess bytes are created to ensure null for string operations. memset (buffer, 0, *out_sz + 3); buffer[0] = tag; memcpy (buffer + 1, (const char *) &sz, 2); memcpy (buffer + 3, buf, *out_sz - 3); return buffer; } R_API ut64 r_bin_java_invokedynamic_cp_calc_size(RBinJavaCPTypeObj *obj) { ut64 size = 0; size += 1; // obj->info.cp_invoke_dynamic.bootstrap_method_attr_index = R_BIN_JAVA_USHORT (buffer, 1); size += 2; // obj->info.cp_invoke_dynamic.name_and_type_index = R_BIN_JAVA_USHORT (buffer, 3); size += 2; return size; } // End new Constant Pool types // Start free Constant Pool types R_API void r_bin_java_default_free(void /* RBinJavaCPTypeObj*/ *o) { RBinJavaCPTypeObj *obj = o; if (obj) { free (obj->metas); free (obj->name); free (obj->value); free (obj); } } R_API void r_bin_java_utf8_info_free(void /* RBinJavaCPTypeObj*/ *o) { RBinJavaCPTypeObj *obj = o; if (obj) { free (obj->name); free (obj->metas); free (obj->info.cp_utf8.bytes); free (obj); } } // Deallocs for type objects R_API void r_bin_java_obj_free(void /*RBinJavaCPTypeObj*/ *o) { RBinJavaCPTypeObj *obj = o; ((RBinJavaCPTypeMetas *) obj->metas->type_info)->allocs->delete_obj (obj); } R_API void r_bin_java_print_attr_summary(RBinJavaAttrInfo *attr) { if (attr == NULL) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *.\n"); return; } ((RBinJavaAttrMetas *) attr->metas->type_info)->allocs->print_summary (attr); } R_API void r_bin_java_print_source_debug_attr_summary(RBinJavaAttrInfo *attr) { ut32 i = 0; if (attr == NULL) { eprintf ("Attempting to print an invalid RBinJavaSourceDebugExtensionAttr *.\n"); return; } printf ("Source Debug Extension Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Extension Length: %d\n", attr->length); printf (" Source Debug Extension value: \n"); for (i = 0; i < attr->length; i++) { printf ("%c", attr->info.debug_extensions.debug_extension[i]); } printf ("\n Source Debug Extension End\n"); } R_API void r_bin_java_print_unknown_attr_summary(RBinJavaAttrInfo *attr) { if (attr == NULL) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *Unknown.\n"); return; } printf ("Unknown Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d\n", attr->length); } R_API void r_bin_java_print_code_exceptions_attr_summary(RBinJavaExceptionEntry *exc_entry) { if (exc_entry == NULL) { eprintf ("Attempting to print an invalid RBinJavaExceptionEntry *.\n"); return; } printf (" Exception Table Entry Information\n"); printf (" offset: 0x%08"PFMT64x"\n", exc_entry->file_offset); printf (" catch_type: %d\n", exc_entry->catch_type); printf (" start_pc: 0x%04x\n", exc_entry->start_pc); printf (" end_pc: 0x%04x\n", exc_entry->end_pc); printf (" handler_pc: 0x%04x\n", exc_entry->handler_pc); } // End free Constant Pool types R_API void r_bin_java_print_code_attr_summary(RBinJavaAttrInfo *attr) { RListIter *iter = NULL, *iter_tmp = NULL; RBinJavaExceptionEntry *exc_entry = NULL; RBinJavaAttrInfo *_attr = NULL; if (!attr) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *Code.\n"); return; } printf ("Code Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d, Attribute Count: %d\n", attr->length, attr->info.code_attr.attributes_count); printf (" Max Stack: %d\n", attr->info.code_attr.max_stack); printf (" Max Locals: %d\n", attr->info.code_attr.max_locals); printf (" Code Length: %d\n", attr->info.code_attr.code_length); printf (" Code At Offset: 0x%08"PFMT64x "\n", (ut64) attr->info.code_attr.code_offset); printf ("Code Attribute Exception Table Information:\n"); printf (" Exception Table Length: %d\n", attr->info.code_attr.exception_table_length); if (attr->info.code_attr.exception_table) { // Delete the attr entries r_list_foreach_safe (attr->info.code_attr.exception_table, iter, iter_tmp, exc_entry) { r_bin_java_print_code_exceptions_attr_summary (exc_entry); } } printf (" Implicit Method Stack Frame:\n"); r_bin_java_print_stack_map_frame_summary (attr->info.code_attr.implicit_frame); printf ("Code Attribute Attributes Information:\n"); if (attr->info.code_attr.attributes && attr->info.code_attr.attributes_count > 0) { printf (" Code Attribute Attributes Count: %d\n", attr->info.code_attr.attributes_count); r_list_foreach_safe (attr->info.code_attr.attributes, iter, iter_tmp, _attr) { r_bin_java_print_attr_summary (_attr); } } } R_API void r_bin_java_print_constant_value_attr_summary(RBinJavaAttrInfo *attr) { if (!attr) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *ConstantValue.\n"); return; } printf ("Constant Value Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d\n", attr->length); printf (" ConstantValue Index: %d\n", attr->info.constant_value_attr.constantvalue_idx); } R_API void r_bin_java_print_deprecated_attr_summary(RBinJavaAttrInfo *attr) { if (!attr) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *Deperecated.\n"); return; } printf ("Deperecated Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d\n", attr->length); } R_API void r_bin_java_print_enclosing_methods_attr_summary(RBinJavaAttrInfo *attr) { if (!attr) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *Deperecated.\n"); return; } printf ("Enclosing Method Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d\n", attr->length); printf (" Class Info Index : 0x%02x\n", attr->info.enclosing_method_attr.class_idx); printf (" Method Name and Type Index : 0x%02x\n", attr->info.enclosing_method_attr.method_idx); printf (" Class Name : %s\n", attr->info.enclosing_method_attr.class_name); printf (" Method Name and Desc : %s %s\n", attr->info.enclosing_method_attr.method_name, attr->info.enclosing_method_attr.method_descriptor); } R_API void r_bin_java_print_exceptions_attr_summary(RBinJavaAttrInfo *attr) { ut32 i = 0; if (!attr) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *Exceptions.\n"); return; } printf ("Exceptions Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d\n", attr->length); for (i = 0; i < attr->info.exceptions_attr.number_of_exceptions; i++) { printf (" Exceptions Attribute Index[%d]: %d\n", i, attr->info.exceptions_attr.exception_idx_table[i]); } } R_API void r_bin_java_print_classes_attr_summary(RBinJavaClassesAttribute *icattr) { if (!icattr) { eprintf ("Attempting to print an invalid RBinJavaClassesAttribute* (InnerClasses element).\n"); return; } eprintf (" Inner Classes Class Attribute Offset: 0x%08"PFMT64x "\n", icattr->file_offset); eprintf (" Inner Classes Class Attribute Class Name (%d): %s\n", icattr->inner_name_idx, icattr->name); eprintf (" Inner Classes Class Attribute Class inner_class_info_idx: %d\n", icattr->inner_class_info_idx); eprintf (" Inner Classes Class Attribute Class inner_class_access_flags: 0x%02x %s\n", icattr->inner_class_access_flags, icattr->flags_str); eprintf (" Inner Classes Class Attribute Class outer_class_info_idx: %d\n", icattr->outer_class_info_idx); eprintf (" Inner Classes Class Field Information:\n"); r_bin_java_print_field_summary (icattr->clint_field); eprintf (" Inner Classes Class Field Information:\n"); r_bin_java_print_field_summary (icattr->clint_field); eprintf (" Inner Classes Class Attr Info Information:\n"); r_bin_java_print_attr_summary (icattr->clint_attr); } R_API void r_bin_java_print_inner_classes_attr_summary(RBinJavaAttrInfo *attr) { RBinJavaClassesAttribute *icattr; RListIter *iter, *iter_tmp; if (!attr) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *InnerClasses.\n"); return; } printf ("Inner Classes Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d\n", attr->length); r_list_foreach_safe (attr->info.inner_classes_attr.classes, iter, iter_tmp, icattr) { r_bin_java_print_classes_attr_summary (icattr); } } R_API void r_bin_java_print_line_number_attr_summary(RBinJavaLineNumberAttribute *lnattr) { if (!lnattr) { eprintf ("Attempting to print an invalid RBinJavaLineNumberAttribute *.\n"); return; } printf (" Line Number Attribute Offset: 0x%08"PFMT64x "\n", lnattr->file_offset); printf (" Line Number Attribute StartPC: %d\n", lnattr->start_pc); printf (" Line Number Attribute LineNumber: %d\n", lnattr->line_number); } R_API void r_bin_java_print_line_number_table_attr_summary(RBinJavaAttrInfo *attr) { RBinJavaLineNumberAttribute *lnattr; RListIter *iter, *iter_tmp; if (!attr) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *LineNumberTable.\n"); return; } printf ("Line Number Table Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d\n", attr->length); r_list_foreach_safe (attr->info.line_number_table_attr.line_number_table, iter, iter_tmp, lnattr) { r_bin_java_print_line_number_attr_summary (lnattr); } } R_API void r_bin_java_print_local_variable_attr_summary(RBinJavaLocalVariableAttribute *lvattr) { if (!lvattr) { eprintf ("Attempting to print an invalid RBinJavaLocalVariableAttribute *.\n"); return; } printf (" Local Variable Attribute offset: 0x%08"PFMT64x "\n", lvattr->file_offset); printf (" Local Variable Attribute start_pc: %d\n", lvattr->start_pc); printf (" Local Variable Attribute Length: %d\n", lvattr->length); printf (" Local Variable Attribute name_idx: %d\n", lvattr->name_idx); printf (" Local Variable Attribute name: %s\n", lvattr->name); printf (" Local Variable Attribute descriptor_idx: %d\n", lvattr->descriptor_idx); printf (" Local Variable Attribute descriptor: %s\n", lvattr->descriptor); printf (" Local Variable Attribute index: %d\n", lvattr->index); } R_API void r_bin_java_print_local_variable_table_attr_summary(RBinJavaAttrInfo *attr) { RBinJavaLocalVariableAttribute *lvattr; RListIter *iter, *iter_tmp; if (attr == NULL) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *LocalVariableTable.\n"); return; } printf ("Local Variable Table Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d\n", attr->length); r_list_foreach_safe (attr->info.local_variable_table_attr.local_variable_table, iter, iter_tmp, lvattr) { r_bin_java_print_local_variable_attr_summary (lvattr); } } R_API void r_bin_java_print_local_variable_type_attr_summary(RBinJavaLocalVariableTypeAttribute *lvattr) { if (!lvattr) { eprintf ("Attempting to print an invalid RBinJavaLocalVariableTypeAttribute *.\n"); return; } eprintf (" Local Variable Type Attribute offset: 0x%08"PFMT64x "\n", lvattr->file_offset); eprintf (" Local Variable Type Attribute start_pc: %d\n", lvattr->start_pc); eprintf (" Local Variable Type Attribute Length: %d\n", lvattr->length); eprintf (" Local Variable Type Attribute name_idx: %d\n", lvattr->name_idx); eprintf (" Local Variable Type Attribute name: %s\n", lvattr->name); eprintf (" Local Variable Type Attribute signature_idx: %d\n", lvattr->signature_idx); eprintf (" Local Variable Type Attribute signature: %s\n", lvattr->signature); eprintf (" Local Variable Type Attribute index: %d\n", lvattr->index); } R_API void r_bin_java_print_local_variable_type_table_attr_summary(RBinJavaAttrInfo *attr) { RBinJavaLocalVariableTypeAttribute *lvtattr; RListIter *iter, *iter_tmp; if (!attr) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *LocalVariableTable.\n"); return; } eprintf ("Local Variable Type Table Attribute Information:\n"); eprintf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); eprintf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); eprintf (" Attribute Length: %d\n", attr->length); r_list_foreach_safe (attr->info.local_variable_type_table_attr.local_variable_table, iter, iter_tmp, lvtattr) { r_bin_java_print_local_variable_type_attr_summary (lvtattr); } } R_API void r_bin_java_print_signature_attr_summary(RBinJavaAttrInfo *attr) { if (!attr) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *SignatureAttr.\n"); return; } printf ("Signature Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d\n", attr->length); printf (" Signature UTF8 Index: %d\n", attr->info.signature_attr.signature_idx); printf (" Signature string: %s\n", attr->info.signature_attr.signature); } R_API void r_bin_java_print_source_code_file_attr_summary(RBinJavaAttrInfo *attr) { if (!attr) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *SourceFile.\n"); return; } printf ("Source File Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d\n", attr->length); printf (" Source File Index: %d\n", attr->info.source_file_attr.sourcefile_idx); } R_API void r_bin_java_print_synthetic_attr_summary(RBinJavaAttrInfo *attr) { if (attr == NULL) { eprintf ("Attempting to print an invalid RBinJavaAttrInfo *Synthetic.\n"); return; } printf ("Synthetic Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d\n", attr->length); printf (" Attribute Index: %d\n", attr->info.source_file_attr.sourcefile_idx); } R_API void r_bin_java_print_stack_map_table_attr_summary(RBinJavaAttrInfo *attr) { RListIter *iter, *iter_tmp; RBinJavaStackMapFrame *frame; if (!attr) { eprintf ("Attempting to print an invalid RBinJavaStackMapTableAttr* .\n"); return; } printf ("StackMapTable Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d\n", attr->length); printf (" StackMapTable Method Code Size: 0x%08x\n", attr->info.stack_map_table_attr.code_size); printf (" StackMapTable Frame Entries: 0x%08x\n", attr->info.stack_map_table_attr.number_of_entries); printf (" StackMapTable Frames:\n"); RList *ptrList = attr->info.stack_map_table_attr.stack_map_frame_entries; if (ptrList) { r_list_foreach_safe (ptrList, iter, iter_tmp, frame) { r_bin_java_print_stack_map_frame_summary (frame); } } } R_API void r_bin_java_print_stack_map_frame_summary(RBinJavaStackMapFrame *obj) { RListIter *iter, *iter_tmp; RBinJavaVerificationObj *ver_obj; if (!obj) { eprintf ("Attempting to print an invalid RBinJavaStackMapFrame* .\n"); return; } printf ("Stack Map Frame Information\n"); printf (" Tag Value = 0x%02x Name: %s\n", obj->tag, ((RBinJavaStackMapFrameMetas *) obj->metas->type_info)->name); printf (" Offset: 0x%08"PFMT64x "\n", obj->file_offset); printf (" Local Variable Count = 0x%04x\n", obj->number_of_locals); printf (" Stack Items Count = 0x%04x\n", obj->number_of_stack_items); printf (" Local Variables:\n"); RList *ptrList = obj->local_items; r_list_foreach_safe (ptrList, iter, iter_tmp, ver_obj) { r_bin_java_print_verification_info_summary (ver_obj); } printf (" Stack Items:\n"); ptrList = obj->stack_items; r_list_foreach_safe (ptrList, iter, iter_tmp, ver_obj) { r_bin_java_print_verification_info_summary (ver_obj); } } R_API void r_bin_java_print_verification_info_summary(RBinJavaVerificationObj *obj) { ut8 tag_value = R_BIN_JAVA_STACKMAP_UNKNOWN; if (!obj) { eprintf ("Attempting to print an invalid RBinJavaVerificationObj* .\n"); return; } if (obj->tag < R_BIN_JAVA_STACKMAP_UNKNOWN) { tag_value = obj->tag; } printf ("Verification Information\n"); printf (" Offset: 0x%08"PFMT64x "", obj->file_offset); printf (" Tag Value = 0x%02x\n", obj->tag); printf (" Name = %s\n", R_BIN_JAVA_VERIFICATION_METAS[tag_value].name); if (obj->tag == R_BIN_JAVA_STACKMAP_OBJECT) { printf (" Object Constant Pool Index = 0x%x\n", obj->info.obj_val_cp_idx); } else if (obj->tag == R_BIN_JAVA_STACKMAP_UNINIT) { printf (" Uninitialized Object offset in code = 0x%x\n", obj->info.uninit_offset); } } R_API void r_bin_java_print_field_summary(RBinJavaField *field) { RBinJavaAttrInfo *attr; RListIter *iter, *iter_tmp; if (field) { if (field->type == R_BIN_JAVA_FIELD_TYPE_METHOD) { r_bin_java_print_method_summary (field); } else { #if 0 r_bin_java_print_interface_summary (field); #else printf ("Field Summary Information:\n"); printf (" File Offset: 0x%08"PFMT64x "\n", field->file_offset); printf (" Name Index: %d (%s)\n", field->name_idx, field->name); printf (" Descriptor Index: %d (%s)\n", field->descriptor_idx, field->descriptor); printf (" Access Flags: 0x%02x (%s)\n", field->flags, field->flags_str); printf (" Field Attributes Count: %d\n", field->attr_count); printf (" Field Attributes:\n"); r_list_foreach_safe (field->attributes, iter, iter_tmp, attr) { r_bin_java_print_attr_summary (attr); } #endif } } else { eprintf ("Attempting to print an invalid RBinJavaField* Field.\n"); } } R_API void r_bin_java_print_method_summary(RBinJavaField *field) { RBinJavaAttrInfo *attr; RListIter *iter, *iter_tmp; if (field == NULL) { eprintf ("Attempting to print an invalid RBinJavaField* Method.\n"); return; } printf ("Method Summary Information:\n"); printf (" File Offset: 0x%08"PFMT64x "\n", field->file_offset); printf (" Name Index: %d (%s)\n", field->name_idx, field->name); printf (" Descriptor Index: %d (%s)\n", field->descriptor_idx, field->descriptor); printf (" Access Flags: 0x%02x (%s)\n", field->flags, field->flags_str); printf (" Method Attributes Count: %d\n", field->attr_count); printf (" Method Attributes:\n"); r_list_foreach_safe (field->attributes, iter, iter_tmp, attr) { r_bin_java_print_attr_summary (attr); } } /* R_API void r_bin_java_print_interface_summary(ut16 idx) {//RBinJavaField *field) { RBinJavaAttrInfo *attr; RBinJavaCPTypeObj *class_info; RListIter *iter, *iter_tmp; if (field == NULL) { eprintf ("Attempting to print an invalid RBinJavaField* Interface.\n"); return; } eprintf ("Interface Summary Information:\n"); eprintf (" File offset: 0x%08"PFMT64x"", field->file_offset); eprintf (" Access Flags: %d\n", field->flags); eprintf (" Name Index: %d (%s)\n", field->name_idx, field->name); eprintf (" Descriptor Index: %d (%s)\n", field->descriptor_idx, field->descriptor); eprintf (" Interface Attributes Count: %d\n", field->attr_count); eprintf (" Interface Attributes:\n"); r_list_foreach_safe (field->attributes, iter, iter_tmp, attr) { r_bin_java_print_attr_summary(attr); } } */ R_API void r_bin_java_print_interfacemethodref_cp_summary(RBinJavaCPTypeObj *obj) { if (!obj) { eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* InterfaceMethodRef.\n"); return; } eprintf ("InterfaceMethodRef ConstantPool Type (%d) ", obj->metas->ord); eprintf (" Offset: 0x%08"PFMT64x"", obj->file_offset); eprintf (" Class Index = %d\n", obj->info.cp_interface.class_idx); eprintf (" Name and type Index = %d\n", obj->info.cp_interface.name_and_type_idx); } R_API char *r_bin_java_print_interfacemethodref_cp_stringify(RBinJavaCPTypeObj *obj) { return r_str_newf ("%d.0x%04"PFMT64x ".%s.%d.%d", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, obj->info.cp_interface.class_idx, obj->info.cp_interface.name_and_type_idx); } R_API void r_bin_java_print_methodhandle_cp_summary(RBinJavaCPTypeObj *obj) { ut8 ref_kind; if (!obj) { eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* RBinJavaCPTypeMethodHandle.\n"); return; } ref_kind = obj->info.cp_method_handle.reference_kind; eprintf ("MethodHandle ConstantPool Type (%d) ", obj->metas->ord); eprintf (" Offset: 0x%08"PFMT64x"", obj->file_offset); eprintf (" Reference Kind = (0x%02x) %s\n", ref_kind, R_BIN_JAVA_REF_METAS[ref_kind].name); eprintf (" Reference Index = %d\n", obj->info.cp_method_handle.reference_index); } R_API char *r_bin_java_print_methodhandle_cp_stringify(RBinJavaCPTypeObj *obj) { ut8 ref_kind = obj->info.cp_method_handle.reference_kind; return r_str_newf ("%d.0x%04"PFMT64x ".%s.%s.%d", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, R_BIN_JAVA_REF_METAS[ref_kind].name, obj->info.cp_method_handle.reference_index); } R_API void r_bin_java_print_methodtype_cp_summary(RBinJavaCPTypeObj *obj) { if (!obj) { eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* RBinJavaCPTypeMethodType.\n"); return; } printf ("MethodType ConstantPool Type (%d) ", obj->metas->ord); printf (" Offset: 0x%08"PFMT64x "", obj->file_offset); printf (" Descriptor Index = 0x%02x\n", obj->info.cp_method_type.descriptor_index); } R_API char *r_bin_java_print_methodtype_cp_stringify(RBinJavaCPTypeObj *obj) { return r_str_newf ("%d.0x%04"PFMT64x ".%s.%d", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, obj->info.cp_method_type.descriptor_index); } R_API void r_bin_java_print_invokedynamic_cp_summary(RBinJavaCPTypeObj *obj) { if (!obj) { eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* RBinJavaCPTypeInvokeDynamic.\n"); return; } eprintf ("InvokeDynamic ConstantPool Type (%d) ", obj->metas->ord); eprintf (" Offset: 0x%08"PFMT64x"", obj->file_offset); eprintf (" Bootstrap Method Attr Index = (0x%02x)\n", obj->info.cp_invoke_dynamic.bootstrap_method_attr_index); eprintf (" Bootstrap Name and Type Index = (0x%02x)\n", obj->info.cp_invoke_dynamic.name_and_type_index); } R_API char *r_bin_java_print_invokedynamic_cp_stringify(RBinJavaCPTypeObj *obj) { return r_str_newf ("%d.0x%04"PFMT64x ".%s.%d.%d", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, obj->info.cp_invoke_dynamic.bootstrap_method_attr_index, obj->info.cp_invoke_dynamic.name_and_type_index); } R_API void r_bin_java_print_methodref_cp_summary(RBinJavaCPTypeObj *obj) { if (!obj) { eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* MethodRef.\n"); return; } eprintf ("MethodRef ConstantPool Type (%d) ", obj->metas->ord); eprintf (" Offset: 0x%08"PFMT64x"", obj->file_offset); eprintf (" Class Index = %d\n", obj->info.cp_method.class_idx); eprintf (" Name and type Index = %d\n", obj->info.cp_method.name_and_type_idx); } R_API char *r_bin_java_print_methodref_cp_stringify(RBinJavaCPTypeObj *obj) { return r_str_newf ("%d.0x%04"PFMT64x ".%s.%d.%d", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, obj->info.cp_method.class_idx, obj->info.cp_method.name_and_type_idx); } R_API void r_bin_java_print_fieldref_cp_summary(RBinJavaCPTypeObj *obj) { if (!obj) { eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* FieldRef.\n"); return; } eprintf ("FieldRef ConstantPool Type (%d) ", obj->metas->ord); eprintf (" Offset: 0x%08"PFMT64x"", obj->file_offset); eprintf (" Class Index = %d\n", obj->info.cp_field.class_idx); eprintf (" Name and type Index = %d\n", obj->info.cp_field.name_and_type_idx); } R_API char *r_bin_java_print_fieldref_cp_stringify(RBinJavaCPTypeObj *obj) { ut32 size = 255, consumed = 0; char *value = malloc (size); if (value) { memset (value, 0, size); consumed = snprintf (value, size, "%d.0x%04"PFMT64x ".%s.%d.%d", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, obj->info.cp_field.class_idx, obj->info.cp_field.name_and_type_idx); if (consumed >= size - 1) { free (value); size += size >> 1; value = malloc (size); if (value) { memset (value, 0, size); (void)snprintf (value, size, "%d.0x%04"PFMT64x ".%s.%d.%d", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, obj->info.cp_field.class_idx, obj->info.cp_field.name_and_type_idx); } } } return value; } R_API void r_bin_java_print_classref_cp_summary(RBinJavaCPTypeObj *obj) { if (!obj) { eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* ClassRef.\n"); return; } eprintf ("ClassRef ConstantPool Type (%d) ", obj->metas->ord); eprintf (" Offset: 0x%08"PFMT64x"", obj->file_offset); eprintf (" Name Index = %d\n", obj->info.cp_class.name_idx); } R_API char *r_bin_java_print_classref_cp_stringify(RBinJavaCPTypeObj *obj) { ut32 size = 255, consumed = 0; char *value = malloc (size); if (value) { memset (value, 0, size); consumed = snprintf (value, size, "%d.0x%04"PFMT64x ".%s.%d", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, obj->info.cp_class.name_idx); if (consumed >= size - 1) { free (value); size += size >> 1; value = malloc (size); if (value) { memset (value, 0, size); (void)snprintf (value, size, "%d.0x%04"PFMT64x ".%s.%d", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, obj->info.cp_class.name_idx); } } } return value; } R_API void r_bin_java_print_string_cp_summary(RBinJavaCPTypeObj *obj) { if (!obj) { eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* String.\n"); return; } printf ("String ConstantPool Type (%d) ", obj->metas->ord); printf (" Offset: 0x%08"PFMT64x "", obj->file_offset); printf (" String Index = %d\n", obj->info.cp_string.string_idx); } R_API char *r_bin_java_print_string_cp_stringify(RBinJavaCPTypeObj *obj) { ut32 size = 255, consumed = 0; char *value = malloc (size); if (value) { memset (value, 0, size); consumed = snprintf (value, size, "%d.0x%04"PFMT64x ".%s.%d", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, obj->info.cp_string.string_idx); if (consumed >= size - 1) { free (value); size += size >> 1; value = malloc (size); if (value) { memset (value, 0, size); (void)snprintf (value, size, "%d.0x%04"PFMT64x ".%s.%d", obj->metas->ord, obj->file_offset, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, obj->info.cp_string.string_idx); } } } return value; } R_API void r_bin_java_print_integer_cp_summary(RBinJavaCPTypeObj *obj) { ut8 *b = NULL; if (!obj) { eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* Integer.\n"); return; } b = obj->info.cp_integer.bytes.raw; eprintf ("Integer ConstantPool Type (%d) ", obj->metas->ord); eprintf (" Offset: 0x%08"PFMT64x"", obj->file_offset); eprintf (" bytes = %02x %02x %02x %02x\n", b[0], b[1], b[2], b[3]); eprintf (" integer = %d\n", R_BIN_JAVA_UINT (obj->info.cp_integer.bytes.raw, 0)); } R_API char *r_bin_java_print_integer_cp_stringify(RBinJavaCPTypeObj *obj) { ut32 size = 255, consumed = 0; char *value = malloc (size); if (value) { memset (value, 0, size); consumed = snprintf (value, size, "%d.0x%04"PFMT64x ".%s.0x%08x", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, R_BIN_JAVA_UINT (obj->info.cp_integer.bytes.raw, 0)); if (consumed >= size - 1) { free (value); size += size >> 1; value = malloc (size); if (value) { memset (value, 0, size); (void)snprintf (value, size, "%d.0x%04"PFMT64x ".%s.0x%08x", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, R_BIN_JAVA_UINT (obj->info.cp_integer.bytes.raw, 0)); } } } return value; } R_API void r_bin_java_print_float_cp_summary(RBinJavaCPTypeObj *obj) { ut8 *b = NULL; if (!obj) { eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* Double.\n"); return; } b = obj->info.cp_float.bytes.raw; printf ("Float ConstantPool Type (%d) ", obj->metas->ord); printf (" Offset: 0x%08"PFMT64x "", obj->file_offset); printf (" Bytes = %02x %02x %02x %02x\n", b[0], b[1], b[2], b[3]); printf (" Float = %f\n", R_BIN_JAVA_FLOAT (obj->info.cp_float.bytes.raw, 0)); } R_API char *r_bin_java_print_float_cp_stringify(RBinJavaCPTypeObj *obj) { ut32 size = 255, consumed = 0; char *value = malloc (size); if (value) { memset (value, 0, size); consumed = snprintf (value, size, "%d.0x%04"PFMT64x ".%s.%f", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, R_BIN_JAVA_FLOAT (obj->info.cp_float.bytes.raw, 0)); if (consumed >= size - 1) { free (value); size += size >> 1; value = malloc (size); if (value) { memset (value, 0, size); (void)snprintf (value, size, "%d.0x%04"PFMT64x ".%s.%f", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, R_BIN_JAVA_FLOAT (obj->info.cp_float.bytes.raw, 0)); } } } return value; } R_API void r_bin_java_print_long_cp_summary(RBinJavaCPTypeObj *obj) { ut8 *b = NULL; if (!obj) { eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* Long.\n"); return; } b = obj->info.cp_long.bytes.raw; printf ("Long ConstantPool Type (%d) ", obj->metas->ord); printf (" Offset: 0x%08"PFMT64x "", obj->file_offset); printf (" High-Bytes = %02x %02x %02x %02x\n", b[0], b[1], b[2], b[3]); printf (" Low-Bytes = %02x %02x %02x %02x\n", b[4], b[5], b[6], b[7]); printf (" Long = %08"PFMT64x "\n", r_bin_java_raw_to_long (obj->info.cp_long.bytes.raw, 0)); } R_API char *r_bin_java_print_long_cp_stringify(RBinJavaCPTypeObj *obj) { ut32 size = 255, consumed = 0; char *value = malloc (size); if (value) { memset (value, 0, size); consumed = snprintf (value, size, "%d.0x%04"PFMT64x ".%s.0x%08"PFMT64x "", obj->metas->ord, obj->file_offset, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, r_bin_java_raw_to_long (obj->info.cp_long.bytes.raw, 0)); if (consumed >= size - 1) { free (value); size += size >> 1; value = malloc (size); if (value) { memset (value, 0, size); (void)snprintf (value, size, "%d.0x%04"PFMT64x ".%s.0x%08"PFMT64x "", obj->metas->ord, obj->file_offset, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, r_bin_java_raw_to_long (obj->info.cp_long.bytes.raw, 0)); } } } return value; } R_API void r_bin_java_print_double_cp_summary(RBinJavaCPTypeObj *obj) { ut8 *b = NULL; if (!obj) { eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* Double.\n"); return; } b = obj->info.cp_double.bytes.raw; printf ("Double ConstantPool Type (%d) ", obj->metas->ord); printf (" Offset: 0x%08"PFMT64x "", obj->file_offset); printf (" High-Bytes = %02x %02x %02x %02x\n", b[0], b[1], b[2], b[3]); printf (" Low-Bytes = %02x %02x %02x %02x\n", b[4], b[5], b[6], b[7]); printf (" Double = %f\n", r_bin_java_raw_to_double (obj->info.cp_double.bytes.raw, 0)); } R_API char *r_bin_java_print_double_cp_stringify(RBinJavaCPTypeObj *obj) { ut32 size = 255, consumed = 0; char *value = malloc (size); if (value) { memset (value, 0, size); consumed = snprintf (value, size, "%d.0x%04"PFMT64x ".%s.%f", obj->metas->ord, obj->file_offset, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, r_bin_java_raw_to_double (obj->info.cp_double.bytes.raw, 0)); if (consumed >= size - 1) { free (value); size += size >> 1; value = malloc (size); if (value) { memset (value, 0, size); (void)snprintf (value, size, "%d.0x%04"PFMT64x ".%s.%f", obj->metas->ord, obj->file_offset, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, r_bin_java_raw_to_double (obj->info.cp_double.bytes.raw, 0)); } } } return value; } R_API void r_bin_java_print_name_and_type_cp_summary(RBinJavaCPTypeObj *obj) { if (!obj) { eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* Name_And_Type.\n"); return; } printf ("Name_And_Type ConstantPool Type (%d) ", obj->metas->ord); printf (" Offset: 0x%08"PFMT64x "", obj->file_offset); printf (" name_idx = (%d)\n", obj->info.cp_name_and_type.name_idx); printf (" descriptor_idx = (%d)\n", obj->info.cp_name_and_type.descriptor_idx); } R_API char *r_bin_java_print_name_and_type_cp_stringify(RBinJavaCPTypeObj *obj) { ut32 size = 255, consumed = 0; char *value = malloc (size); if (value) { memset (value, 0, size); consumed = snprintf (value, size, "%d.0x%04"PFMT64x ".%s.%d.%d", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, obj->info.cp_name_and_type.name_idx, obj->info.cp_name_and_type.descriptor_idx); if (consumed >= size - 1) { free (value); size += size >> 1; value = malloc (size); if (value) { memset (value, 0, size); (void)snprintf (value, size, "%d.0x%04"PFMT64x ".%s.%d.%d", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, obj->info.cp_name_and_type.name_idx, obj->info.cp_name_and_type.descriptor_idx); } } } return value; } R_API void r_bin_java_print_utf8_cp_summary(RBinJavaCPTypeObj *obj) { if (!obj) { eprintf ("Attempting to print an invalid RBinJavaCPTypeObj* Utf8.\n"); return; } char *str = convert_string ((const char *) obj->info.cp_utf8.bytes, obj->info.cp_utf8.length); eprintf ("UTF8 ConstantPool Type (%d) ", obj->metas->ord); eprintf (" Offset: 0x%08"PFMT64x"", obj->file_offset); eprintf (" length = %d\n", obj->info.cp_utf8.length); eprintf (" utf8 = %s\n", str); free (str); } R_API char *r_bin_java_print_utf8_cp_stringify(RBinJavaCPTypeObj *obj) { char *utf8_str = r_hex_bin2strdup (obj->info.cp_utf8.bytes, obj->info.cp_utf8.length); char *res = r_str_newf ("%d.0x%04"PFMT64x ".%s.%d.%s", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name, obj->info.cp_utf8.length, utf8_str); free (utf8_str); return res; } R_API void r_bin_java_print_null_cp_summary(RBinJavaCPTypeObj *obj) { eprintf ("Unknown ConstantPool Type Tag: 0x%04x .\n", obj->tag); } R_API char *r_bin_java_print_null_cp_stringify(RBinJavaCPTypeObj *obj) { return r_str_newf ("%d.0x%04"PFMT64x ".%s", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name); } R_API void r_bin_java_print_unknown_cp_summary(RBinJavaCPTypeObj *obj) { eprintf ("NULL ConstantPool Type.\n"); } R_API char *r_bin_java_print_unknown_cp_stringify(RBinJavaCPTypeObj *obj) { return r_str_newf ("%d.0x%04"PFMT64x ".%s", obj->metas->ord, obj->file_offset + obj->loadaddr, ((RBinJavaCPTypeMetas *) obj->metas->type_info)->name); } R_API RBinJavaElementValuePair *r_bin_java_element_pair_new(ut8 *buffer, ut64 sz, ut64 buf_offset) { if (!buffer || sz < 8) { return NULL; } RBinJavaElementValuePair *evp = R_NEW0 (RBinJavaElementValuePair); if (!evp) { return NULL; } // TODO: What is the signifigance of evp element evp->element_name_idx = R_BIN_JAVA_USHORT (buffer, 0); ut64 offset = 2; evp->file_offset = buf_offset; evp->name = r_bin_java_get_utf8_from_bin_cp_list (R_BIN_JAVA_GLOBAL_BIN, evp->element_name_idx); if (!evp->name) { // TODO: eprintf unable to find the name for the given index eprintf ("ElementValue Name is invalid.\n"); evp->name = strdup ("UNKNOWN"); } if (offset >= sz) { free (evp); return NULL; } evp->value = r_bin_java_element_value_new (buffer + offset, sz - offset, buf_offset + offset); if (evp->value) { offset += evp->value->size; if (offset >= sz) { free (evp->value); free (evp); return NULL; } } evp->size = offset; return evp; } R_API void r_bin_java_print_element_pair_summary(RBinJavaElementValuePair *evp) { if (!evp) { eprintf ("Attempting to print an invalid RBinJavaElementValuePair *pair.\n"); return; } printf ("Element Value Pair information:\n"); printf (" EV Pair File Offset: 0x%08"PFMT64x "\n", evp->file_offset); printf (" EV Pair Element Name index: 0x%02x\n", evp->element_name_idx); printf (" EV Pair Element Name: %s\n", evp->name); printf (" EV Pair Element Value:\n"); r_bin_java_print_element_value_summary (evp->value); } R_API void r_bin_java_print_element_value_summary(RBinJavaElementValue *element_value) { RBinJavaCPTypeObj *obj; RBinJavaElementValue *ev_element = NULL; RListIter *iter = NULL, *iter_tmp = NULL; char *name; if (!element_value) { eprintf ("Attempting to print an invalid RBinJavaElementValuePair *pair.\n"); return; } name = ((RBinJavaElementValueMetas *) element_value->metas->type_info)->name; eprintf ("Element Value information:\n"); eprintf (" EV Pair File Offset: 0x%08"PFMT64x "\n", element_value->file_offset); eprintf (" EV Value Type (%d): %s\n", element_value->tag, name); switch (element_value->tag) { case R_BIN_JAVA_EV_TAG_BYTE: case R_BIN_JAVA_EV_TAG_CHAR: case R_BIN_JAVA_EV_TAG_DOUBLE: case R_BIN_JAVA_EV_TAG_FLOAT: case R_BIN_JAVA_EV_TAG_INT: case R_BIN_JAVA_EV_TAG_LONG: case R_BIN_JAVA_EV_TAG_SHORT: case R_BIN_JAVA_EV_TAG_BOOLEAN: case R_BIN_JAVA_EV_TAG_STRING: eprintf (" EV Value Constant Value index: 0x%02x\n", element_value->value.const_value.const_value_idx); eprintf (" EV Value Constant Value Information:\n"); obj = element_value->value.const_value.const_value_cp_obj; if (obj && obj->metas && obj->metas->type_info) { ((RBinJavaCPTypeMetas *) obj->metas->type_info)->allocs->print_summary (obj); } break; case R_BIN_JAVA_EV_TAG_ENUM: eprintf (" EV Value Enum Constant Value Const Name Index: 0x%02x\n", element_value->value.enum_const_value.const_name_idx); eprintf (" EV Value Enum Constant Value Type Name Index: 0x%02x\n", element_value->value.enum_const_value.type_name_idx); eprintf (" EV Value Enum Constant Value Const CP Information:\n"); obj = element_value->value.enum_const_value.const_name_cp_obj; if (obj && obj->metas && obj->metas->type_info) { ((RBinJavaCPTypeMetas *) obj->metas->type_info)->allocs->print_summary (obj); } eprintf (" EV Value Enum Constant Value Type CP Information:\n"); obj = element_value->value.enum_const_value.type_name_cp_obj; if (obj && obj->metas && obj->metas->type_info) { ((RBinJavaCPTypeMetas *) obj->metas->type_info)->allocs->print_summary (obj); } break; case R_BIN_JAVA_EV_TAG_CLASS: eprintf (" EV Value Class Info Index: 0x%02x\n", element_value->value.class_value.class_info_idx); eprintf (" EV Value Class Info CP Information:\n"); obj = element_value->value.class_value.class_info_cp_obj; if (obj && obj->metas && obj->metas->type_info) { ((RBinJavaCPTypeMetas *) obj->metas->type_info)->allocs->print_summary (obj); } break; case R_BIN_JAVA_EV_TAG_ARRAY: eprintf (" EV Value Array Value Number of Values: 0x%04x\n", element_value->value.array_value.num_values); eprintf (" EV Value Array Values\n"); r_list_foreach_safe (element_value->value.array_value.values, iter, iter_tmp, ev_element) { r_bin_java_print_element_value_summary (ev_element); } break; case R_BIN_JAVA_EV_TAG_ANNOTATION: eprintf (" EV Annotation Information:\n"); r_bin_java_print_annotation_summary (&element_value->value.annotation_value); break; default: // eprintf unable to handle tag break; } } R_API void r_bin_java_element_pair_free(void /*RBinJavaElementValuePair*/ *e) { RBinJavaElementValuePair *evp = e; if (evp) { free (evp->name); r_bin_java_element_value_free (evp->value); free (evp); } evp = NULL; } R_API void r_bin_java_element_value_free(void /*RBinJavaElementValue*/ *e) { RBinJavaElementValue *element_value = e; RListIter *iter = NULL, *iter_tmp = NULL; RBinJavaCPTypeObj *obj = NULL; RBinJavaElementValue *ev_element = NULL; if (element_value) { R_FREE (element_value->metas); switch (element_value->tag) { case R_BIN_JAVA_EV_TAG_BYTE: case R_BIN_JAVA_EV_TAG_CHAR: case R_BIN_JAVA_EV_TAG_DOUBLE: case R_BIN_JAVA_EV_TAG_FLOAT: case R_BIN_JAVA_EV_TAG_INT: case R_BIN_JAVA_EV_TAG_LONG: case R_BIN_JAVA_EV_TAG_SHORT: case R_BIN_JAVA_EV_TAG_BOOLEAN: case R_BIN_JAVA_EV_TAG_STRING: // Delete the CP Type Object obj = element_value->value.const_value.const_value_cp_obj; if (obj && obj->metas) { ((RBinJavaCPTypeMetas *) obj->metas->type_info)->allocs->delete_obj (obj); } break; case R_BIN_JAVA_EV_TAG_ENUM: // Delete the CP Type Objects obj = element_value->value.enum_const_value.const_name_cp_obj; if (obj && obj->metas) { RBinJavaCPTypeMetas *ti = obj->metas->type_info; if (ti && ti->allocs && ti->allocs->delete_obj) { ti->allocs->delete_obj (obj); } } obj = element_value->value.enum_const_value.type_name_cp_obj; if (obj && obj->metas) { RBinJavaCPTypeMetas *tm = obj->metas->type_info; if (tm && tm->allocs && tm->allocs->delete_obj) { tm->allocs->delete_obj (obj); } } break; case R_BIN_JAVA_EV_TAG_CLASS: // Delete the CP Type Object obj = element_value->value.class_value.class_info_cp_obj; if (obj && obj->metas) { ((RBinJavaCPTypeMetas *) obj->metas->type_info)->allocs->delete_obj (obj); } break; case R_BIN_JAVA_EV_TAG_ARRAY: // Delete the Element Value array List r_list_foreach_safe (element_value->value.array_value.values, iter, iter_tmp, ev_element) { if (ev_element) { r_bin_java_element_value_free (ev_element); } else { // TODO eprintf evps value was NULL } // r_list_delete (element_value->value.array_value.values, iter); ev_element = NULL; } r_list_free (element_value->value.array_value.values); break; case R_BIN_JAVA_EV_TAG_ANNOTATION: // Delete the Annotations List r_list_free (element_value->value.annotation_value.element_value_pairs); break; default: // eprintf unable to free the tag break; } free (element_value); } } R_API ut64 r_bin_java_annotation_default_attr_calc_size(RBinJavaAttrInfo *attr) { ut64 size = 0; if (attr) { // attr = r_bin_java_default_attr_new (buffer, sz, buf_offset); size += 6; // attr->info.annotation_default_attr.default_value = r_bin_java_element_value_new (buffer+offset, sz-offset, buf_offset+offset); size += r_bin_java_element_value_calc_size (attr->info.annotation_default_attr.default_value); } return size; } R_API RBinJavaAttrInfo *r_bin_java_annotation_default_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { ut64 offset = 0; if (sz < 8) { return NULL; } RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); offset += 6; if (attr && sz >= offset) { attr->type = R_BIN_JAVA_ATTR_TYPE_ANNOTATION_DEFAULT_ATTR; attr->info.annotation_default_attr.default_value = r_bin_java_element_value_new (buffer + offset, sz - offset, buf_offset + offset); if (attr->info.annotation_default_attr.default_value) { offset += attr->info.annotation_default_attr.default_value->size; } } r_bin_java_print_annotation_default_attr_summary (attr); return attr; } static void delete_obj(RBinJavaCPTypeObj *obj) { if (obj && obj->metas && obj->metas->type_info) { RBinJavaCPTypeMetas *ti = obj->metas->type_info; if (ti && ti->allocs && ti->allocs->delete_obj) { ti->allocs->delete_obj (obj); } } } R_API void r_bin_java_annotation_default_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; RBinJavaElementValue *ev_element = NULL; RListIter *iter = NULL, *iter_tmp = NULL; if (!attr || attr->type != R_BIN_JAVA_ATTR_TYPE_ANNOTATION_DEFAULT_ATTR) { return; } RBinJavaElementValue *element_value = attr->info.annotation_default_attr.default_value; if (!element_value) { return; } switch (element_value->tag) { case R_BIN_JAVA_EV_TAG_BYTE: case R_BIN_JAVA_EV_TAG_CHAR: case R_BIN_JAVA_EV_TAG_DOUBLE: case R_BIN_JAVA_EV_TAG_FLOAT: case R_BIN_JAVA_EV_TAG_INT: case R_BIN_JAVA_EV_TAG_LONG: case R_BIN_JAVA_EV_TAG_SHORT: case R_BIN_JAVA_EV_TAG_BOOLEAN: case R_BIN_JAVA_EV_TAG_STRING: // Delete the CP Type Object delete_obj (element_value->value.const_value.const_value_cp_obj); break; case R_BIN_JAVA_EV_TAG_ENUM: // Delete the CP Type Objects delete_obj (element_value->value.enum_const_value.const_name_cp_obj); break; case R_BIN_JAVA_EV_TAG_CLASS: // Delete the CP Type Object delete_obj (element_value->value.class_value.class_info_cp_obj); break; case R_BIN_JAVA_EV_TAG_ARRAY: // Delete the Element Value array List r_list_foreach_safe (element_value->value.array_value.values, iter, iter_tmp, ev_element) { r_bin_java_element_value_free (ev_element); // r_list_delete (element_value->value.array_value.values, iter); ev_element = NULL; } r_list_free (element_value->value.array_value.values); break; case R_BIN_JAVA_EV_TAG_ANNOTATION: // Delete the Annotations List r_list_free (element_value->value.annotation_value.element_value_pairs); break; default: // eprintf unable to free the tag break; } if (attr) { free (attr->name); free (attr->metas); free (attr); } } R_API RBinJavaAnnotation *r_bin_java_annotation_new(ut8 *buffer, ut64 sz, ut64 buf_offset) { ut32 i = 0; RBinJavaElementValuePair *evps = NULL; ut64 offset = 0; if (sz < 8) { return NULL; } RBinJavaAnnotation *annotation = R_NEW0 (RBinJavaAnnotation); if (!annotation) { return NULL; } // (ut16) read and set annotation_value.type_idx; annotation->type_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; // (ut16) read and set annotation_value.num_element_value_pairs; annotation->num_element_value_pairs = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; annotation->element_value_pairs = r_list_newf (r_bin_java_element_pair_free); // read annotation_value.num_element_value_pairs, and append to annotation_value.element_value_pairs for (i = 0; i < annotation->num_element_value_pairs; i++) { if (offset > sz) { break; } evps = r_bin_java_element_pair_new (buffer + offset, sz - offset, buf_offset + offset); if (evps) { offset += evps->size; r_list_append (annotation->element_value_pairs, (void *) evps); } } annotation->size = offset; return annotation; } R_API ut64 r_bin_java_annotation_calc_size(RBinJavaAnnotation *annotation) { ut64 sz = 0; RListIter *iter, *iter_tmp; RBinJavaElementValuePair *evps = NULL; if (!annotation) { // TODO eprintf allocation fail return sz; } // annotation->type_idx = R_BIN_JAVA_USHORT (buffer, offset); sz += 2; // annotation->num_element_value_pairs = R_BIN_JAVA_USHORT (buffer, offset); sz += 2; r_list_foreach_safe (annotation->element_value_pairs, iter, iter_tmp, evps) { if (evps) { sz += r_bin_java_element_pair_calc_size (evps); } } return sz; } R_API void r_bin_java_annotation_free(void /*RBinJavaAnnotation*/ *a) { RBinJavaAnnotation *annotation = a; if (annotation) { r_list_free (annotation->element_value_pairs); free (annotation); } } R_API void r_bin_java_print_annotation_summary(RBinJavaAnnotation *annotation) { RListIter *iter = NULL, *iter_tmp = NULL; RBinJavaElementValuePair *evp = NULL; if (!annotation) { // TODO eprintf invalid annotation return; } printf (" Annotation Type Index: 0x%02x\n", annotation->type_idx); printf (" Annotation Number of EV Pairs: 0x%04x\n", annotation->num_element_value_pairs); printf (" Annotation EV Pair Values:\n"); if (annotation->element_value_pairs) { r_list_foreach_safe (annotation->element_value_pairs, iter, iter_tmp, evp) { r_bin_java_print_element_pair_summary (evp); } } } R_API ut64 r_bin_java_element_pair_calc_size(RBinJavaElementValuePair *evp) { ut64 sz = 2; if (evp && evp->value) { // evp->element_name_idx = r_bin_java_read_short(bin, bin->b->cur); // evp->value = r_bin_java_element_value_new (bin, offset+2); sz += r_bin_java_element_value_calc_size (evp->value); } return sz; } R_API ut64 r_bin_java_element_value_calc_size(RBinJavaElementValue *element_value) { RListIter *iter, *iter_tmp; RBinJavaElementValue *ev_element; RBinJavaElementValuePair *evps; ut64 sz = 0; if (!element_value) { return sz; } // tag sz += 1; switch (element_value->tag) { case R_BIN_JAVA_EV_TAG_BYTE: case R_BIN_JAVA_EV_TAG_CHAR: case R_BIN_JAVA_EV_TAG_DOUBLE: case R_BIN_JAVA_EV_TAG_FLOAT: case R_BIN_JAVA_EV_TAG_INT: case R_BIN_JAVA_EV_TAG_LONG: case R_BIN_JAVA_EV_TAG_SHORT: case R_BIN_JAVA_EV_TAG_BOOLEAN: case R_BIN_JAVA_EV_TAG_STRING: // look up value in bin->cp_list // (ut16) read and set const_value.const_value_idx // element_value->value.const_value.const_value_idx = r_bin_java_read_short(bin, bin->b->cur); sz += 2; break; case R_BIN_JAVA_EV_TAG_ENUM: // (ut16) read and set enum_const_value.type_name_idx // element_value->value.enum_const_value.type_name_idx = r_bin_java_read_short(bin, bin->b->cur); sz += 2; // (ut16) read and set enum_const_value.const_name_idx // element_value->value.enum_const_value.const_name_idx = r_bin_java_read_short(bin, bin->b->cur); sz += 2; break; case R_BIN_JAVA_EV_TAG_CLASS: // (ut16) read and set class_value.class_info_idx // element_value->value.class_value.class_info_idx = r_bin_java_read_short(bin, bin->b->cur); sz += 2; break; case R_BIN_JAVA_EV_TAG_ARRAY: // (ut16) read and set array_value.num_values // element_value->value.array_value.num_values = r_bin_java_read_short(bin, bin->b->cur); sz += 2; r_list_foreach_safe (element_value->value.array_value.values, iter, iter_tmp, ev_element) { if (ev_element) { sz += r_bin_java_element_value_calc_size (ev_element); } } break; case R_BIN_JAVA_EV_TAG_ANNOTATION: // annotation new is not used here. // (ut16) read and set annotation_value.type_idx; // element_value->value.annotation_value.type_idx = r_bin_java_read_short(bin, bin->b->cur); sz += 2; // (ut16) read and set annotation_value.num_element_value_pairs; // element_value->value.annotation_value.num_element_value_pairs = r_bin_java_read_short(bin, bin->b->cur); sz += 2; element_value->value.annotation_value.element_value_pairs = r_list_newf (r_bin_java_element_pair_free); r_list_foreach_safe (element_value->value.annotation_value.element_value_pairs, iter, iter_tmp, evps) { if (evps) { sz += r_bin_java_element_pair_calc_size (evps); } } break; default: // eprintf unable to handle tag break; } return sz; } R_API RBinJavaElementValue *r_bin_java_element_value_new(ut8 *buffer, ut64 sz, ut64 buf_offset) { ut32 i = 0; ut64 offset = 0; if (sz < 8) { return NULL; } RBinJavaElementValue *element_value = R_NEW0 (RBinJavaElementValue); if (!element_value) { return NULL; } RBinJavaElementValuePair *evps = NULL; element_value->metas = R_NEW0 (RBinJavaMetaInfo); if (!element_value->metas) { R_FREE (element_value); return NULL; } element_value->file_offset = buf_offset; element_value->tag = buffer[offset]; element_value->size += 1; offset += 1; element_value->metas->type_info = (void *) r_bin_java_get_ev_meta_from_tag (element_value->tag); switch (element_value->tag) { case R_BIN_JAVA_EV_TAG_BYTE: case R_BIN_JAVA_EV_TAG_CHAR: case R_BIN_JAVA_EV_TAG_DOUBLE: case R_BIN_JAVA_EV_TAG_FLOAT: case R_BIN_JAVA_EV_TAG_INT: case R_BIN_JAVA_EV_TAG_LONG: case R_BIN_JAVA_EV_TAG_SHORT: case R_BIN_JAVA_EV_TAG_BOOLEAN: case R_BIN_JAVA_EV_TAG_STRING: // look up value in bin->cp_list // (ut16) read and set const_value.const_value_idx element_value->value.const_value.const_value_idx = R_BIN_JAVA_USHORT (buffer, offset); element_value->size += 2; // look-up, deep copy, and set const_value.const_value_cp_obj element_value->value.const_value.const_value_cp_obj = r_bin_java_clone_cp_idx (R_BIN_JAVA_GLOBAL_BIN, element_value->value.const_value.const_value_idx); break; case R_BIN_JAVA_EV_TAG_ENUM: // (ut16) read and set enum_const_value.type_name_idx element_value->value.enum_const_value.type_name_idx = R_BIN_JAVA_USHORT (buffer, offset); element_value->size += 2; offset += 2; // (ut16) read and set enum_const_value.const_name_idx element_value->value.enum_const_value.const_name_idx = R_BIN_JAVA_USHORT (buffer, offset); element_value->size += 2; offset += 2; // look up type_name_index in bin->cp_list // look-up, deep copy, and set enum_const_value.const_name_cp_obj element_value->value.enum_const_value.const_name_cp_obj = r_bin_java_clone_cp_idx (R_BIN_JAVA_GLOBAL_BIN, element_value->value.enum_const_value.const_name_idx); // look-up, deep copy, and set enum_const_value.type_name_cp_obj element_value->value.enum_const_value.type_name_cp_obj = r_bin_java_clone_cp_idx (R_BIN_JAVA_GLOBAL_BIN, element_value->value.enum_const_value.type_name_idx); break; case R_BIN_JAVA_EV_TAG_CLASS: // (ut16) read and set class_value.class_info_idx element_value->value.class_value.class_info_idx = R_BIN_JAVA_USHORT (buffer, offset); element_value->size += 2; offset += 2; // look up type_name_index in bin->cp_list // look-up, deep copy, and set class_value.class_info_cp_obj element_value->value.class_value.class_info_cp_obj = r_bin_java_clone_cp_idx (R_BIN_JAVA_GLOBAL_BIN, element_value->value.class_value.class_info_idx); break; case R_BIN_JAVA_EV_TAG_ARRAY: // (ut16) read and set array_value.num_values element_value->value.array_value.num_values = R_BIN_JAVA_USHORT (buffer, offset); element_value->size += 2; offset += 2; element_value->value.array_value.values = r_list_new (); for (i = 0; i < element_value->value.array_value.num_values; i++) { if (offset >= sz) { break; } RBinJavaElementValue *ev_element = r_bin_java_element_value_new (buffer + offset, sz - offset, buf_offset + offset); if (ev_element) { element_value->size += ev_element->size; offset += ev_element->size; // read array_value.num_values, and append to array_value.values r_list_append (element_value->value.array_value.values, (void *) ev_element); } } break; case R_BIN_JAVA_EV_TAG_ANNOTATION: // annotation new is not used here. // (ut16) read and set annotation_value.type_idx; if (offset + 8 < sz) { element_value->value.annotation_value.type_idx = R_BIN_JAVA_USHORT (buffer, offset); element_value->size += 2; offset += 2; // (ut16) read and set annotation_value.num_element_value_pairs; element_value->value.annotation_value.num_element_value_pairs = R_BIN_JAVA_USHORT (buffer, offset); element_value->size += 2; offset += 2; } element_value->value.annotation_value.element_value_pairs = r_list_newf (r_bin_java_element_pair_free); // read annotation_value.num_element_value_pairs, and append to annotation_value.element_value_pairs for (i = 0; i < element_value->value.annotation_value.num_element_value_pairs; i++) { if (offset > sz) { break; } evps = r_bin_java_element_pair_new (buffer + offset, sz - offset, buf_offset + offset); if (evps) { element_value->size += evps->size; offset += evps->size; } if (evps == NULL) { // TODO: eprintf error when reading element pair } r_list_append (element_value->value.annotation_value.element_value_pairs, (void *) evps); } break; default: // eprintf unable to handle tag break; } return element_value; } R_API void r_bin_java_bootstrap_method_argument_free(void /*RBinJavaBootStrapArgument*/ *b) { RBinJavaBootStrapArgument *bsm_arg = b; if (bsm_arg) { RBinJavaCPTypeMetas *tm = (RBinJavaCPTypeMetas*)bsm_arg->argument_info_cp_obj; if (tm) { if (tm && (size_t)(tm->allocs) > 1024 && tm->allocs->delete_obj) { tm->allocs->delete_obj (tm); } bsm_arg->argument_info_cp_obj = NULL; } free (bsm_arg); } } R_API void r_bin_java_print_bootstrap_method_argument_summary(RBinJavaBootStrapArgument *bsm_arg) { if (!bsm_arg) { eprintf ("Attempting to print an invalid RBinJavaBootStrapArgument *.\n"); return; } eprintf ("Bootstrap Method Argument Information:\n"); eprintf (" Offset: 0x%08"PFMT64x"", bsm_arg->file_offset); eprintf (" Name_And_Type Index = (0x%02x)\n", bsm_arg->argument_info_idx); if (bsm_arg->argument_info_cp_obj) { eprintf (" Bootstrap Method Argument Type and Name Info:\n"); ((RBinJavaCPTypeMetas *) bsm_arg->argument_info_cp_obj)->allocs->print_summary (bsm_arg->argument_info_cp_obj); } else { eprintf (" Bootstrap Method Argument Type and Name Info: INVALID\n"); } } R_API void r_bin_java_print_bootstrap_method_summary(RBinJavaBootStrapMethod *bsm) { RBinJavaBootStrapArgument *bsm_arg = NULL; RListIter *iter = NULL, *iter_tmp = NULL; if (!bsm) { eprintf ("Attempting to print an invalid RBinJavaBootStrapArgument *.\n"); return; } eprintf ("Bootstrap Method Information:\n"); eprintf (" Offset: 0x%08"PFMT64x"", bsm->file_offset); eprintf (" Method Reference Index = (0x%02x)\n", bsm->bootstrap_method_ref); eprintf (" Number of Method Arguments = (0x%02x)\n", bsm->num_bootstrap_arguments); if (bsm->bootstrap_arguments) { r_list_foreach_safe (bsm->bootstrap_arguments, iter, iter_tmp, bsm_arg) { if (bsm_arg) { r_bin_java_print_bootstrap_method_argument_summary (bsm_arg); } } } else { eprintf (" Bootstrap Method Argument: NONE \n"); } } R_API RBinJavaBootStrapArgument *r_bin_java_bootstrap_method_argument_new(ut8 *buffer, ut64 sz, ut64 buf_offset) { ut64 offset = 0; RBinJavaBootStrapArgument *bsm_arg = (RBinJavaBootStrapArgument *) malloc (sizeof (RBinJavaBootStrapArgument)); if (!bsm_arg) { // TODO eprintf failed to allocate bytes for bootstrap_method. return bsm_arg; } memset (bsm_arg, 0, sizeof (RBinJavaBootStrapArgument)); bsm_arg->file_offset = buf_offset; bsm_arg->argument_info_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; bsm_arg->argument_info_cp_obj = r_bin_java_clone_cp_idx (R_BIN_JAVA_GLOBAL_BIN, bsm_arg->argument_info_idx); bsm_arg->size = offset; return bsm_arg; } R_API void r_bin_java_bootstrap_method_free(void /*/RBinJavaBootStrapMethod*/ *b) { RBinJavaBootStrapMethod *bsm = b; RListIter *iter, *iter_tmp; RBinJavaBootStrapArgument *obj = NULL; if (bsm) { if (bsm->bootstrap_arguments) { r_list_foreach_safe (bsm->bootstrap_arguments, iter, iter_tmp, obj) { if (obj) { r_bin_java_bootstrap_method_argument_free (obj); } // r_list_delete (bsm->bootstrap_arguments, iter); } r_list_free (bsm->bootstrap_arguments); bsm->bootstrap_arguments = NULL; } free (bsm); } } R_API RBinJavaBootStrapMethod *r_bin_java_bootstrap_method_new(ut8 *buffer, ut64 sz, ut64 buf_offset) { RBinJavaBootStrapArgument *bsm_arg = NULL; ut32 i = 0; ut64 offset = 0; RBinJavaBootStrapMethod *bsm = R_NEW0 (RBinJavaBootStrapMethod); if (!bsm) { // TODO eprintf failed to allocate bytes for bootstrap_method. return bsm; } memset (bsm, 0, sizeof (RBinJavaBootStrapMethod)); bsm->file_offset = buf_offset; bsm->bootstrap_method_ref = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; bsm->num_bootstrap_arguments = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; bsm->bootstrap_arguments = r_list_new (); for (i = 0; i < bsm->num_bootstrap_arguments; i++) { if (offset >= sz) { break; } // bsm_arg = r_bin_java_bootstrap_method_argument_new (bin, bin->b->cur); bsm_arg = r_bin_java_bootstrap_method_argument_new (buffer + offset, sz - offset, buf_offset + offset); if (bsm_arg) { offset += bsm_arg->size; r_list_append (bsm->bootstrap_arguments, (void *) bsm_arg); } else { // TODO eprintf Failed to read the %d boot strap method. } } bsm->size = offset; return bsm; } R_API void r_bin_java_print_bootstrap_methods_attr_summary(RBinJavaAttrInfo *attr) { RListIter *iter, *iter_tmp; RBinJavaBootStrapMethod *obj = NULL; if (!attr || attr->type == R_BIN_JAVA_ATTR_TYPE_BOOTSTRAP_METHODS_ATTR) { eprintf ("Unable to print attribue summary for RBinJavaAttrInfo *RBinJavaBootstrapMethodsAttr"); return; } eprintf ("Bootstrap Methods Attribute Information Information:\n"); eprintf (" Attribute Offset: 0x%08"PFMT64x"", attr->file_offset); eprintf (" Length: 0x%08x", attr->length); eprintf (" Number of Method Arguments = (0x%02x)\n", attr->info.bootstrap_methods_attr.num_bootstrap_methods); if (attr->info.bootstrap_methods_attr.bootstrap_methods) { r_list_foreach_safe (attr->info.bootstrap_methods_attr.bootstrap_methods, iter, iter_tmp, obj) { if (obj) { r_bin_java_print_bootstrap_method_summary (obj); } } } else { eprintf (" Bootstrap Methods: NONE \n"); } } R_API void r_bin_java_bootstrap_methods_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr && attr->type == R_BIN_JAVA_ATTR_TYPE_BOOTSTRAP_METHODS_ATTR) { free (attr->name); free (attr->metas); r_list_free (attr->info.bootstrap_methods_attr.bootstrap_methods); free (attr); } } R_API ut64 r_bin_java_bootstrap_methods_attr_calc_size(RBinJavaAttrInfo *attr) { RListIter *iter, *iter_tmp; RBinJavaBootStrapMethod *bsm = NULL; ut64 size = 0; if (attr) { size += 6; // attr->info.bootstrap_methods_attr.num_bootstrap_methods = R_BIN_JAVA_USHORT (buffer, offset); size += 2; r_list_foreach_safe (attr->info.bootstrap_methods_attr.bootstrap_methods, iter, iter_tmp, bsm) { if (bsm) { size += r_bin_java_bootstrap_method_calc_size (bsm); } else { // TODO eprintf Failed to read the %d boot strap method. } } } return size; } R_API ut64 r_bin_java_bootstrap_arg_calc_size(RBinJavaBootStrapArgument *bsm_arg) { ut64 size = 0; if (bsm_arg) { // bsm_arg->argument_info_idx = R_BIN_JAVA_USHORT (buffer, offset); size += 2; } return size; } R_API ut64 r_bin_java_bootstrap_method_calc_size(RBinJavaBootStrapMethod *bsm) { RListIter *iter, *iter_tmp; RBinJavaBootStrapArgument *bsm_arg = NULL; ut64 size = 0; if (bsm) { size += 6; // bsm->bootstrap_method_ref = R_BIN_JAVA_USHORT (buffer, offset); size += 2; // bsm->num_bootstrap_arguments = R_BIN_JAVA_USHORT (buffer, offset); size += 2; r_list_foreach_safe (bsm->bootstrap_arguments, iter, iter_tmp, bsm_arg) { if (bsm_arg) { size += r_bin_java_bootstrap_arg_calc_size (bsm_arg); } else { // TODO eprintf Failed to read the %d boot strap method. } } } return size; } R_API RBinJavaAttrInfo *r_bin_java_bootstrap_methods_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { ut32 i = 0; RBinJavaBootStrapMethod *bsm = NULL; ut64 offset = 0; RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); offset += 6; if (attr) { attr->type = R_BIN_JAVA_ATTR_TYPE_BOOTSTRAP_METHODS_ATTR; attr->info.bootstrap_methods_attr.num_bootstrap_methods = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->info.bootstrap_methods_attr.bootstrap_methods = r_list_newf (r_bin_java_bootstrap_method_free); for (i = 0; i < attr->info.bootstrap_methods_attr.num_bootstrap_methods; i++) { // bsm = r_bin_java_bootstrap_method_new (bin, bin->b->cur); if (offset >= sz) { break; } bsm = r_bin_java_bootstrap_method_new (buffer + offset, sz - offset, buf_offset + offset); if (bsm) { offset += bsm->size; r_list_append (attr->info.bootstrap_methods_attr.bootstrap_methods, (void *) bsm); } else { // TODO eprintf Failed to read the %d boot strap method. } } attr->size = offset; } return attr; } R_API void r_bin_java_print_annotation_default_attr_summary(RBinJavaAttrInfo *attr) { if (attr && attr->type == R_BIN_JAVA_ATTR_TYPE_ANNOTATION_DEFAULT_ATTR) { eprintf ("Annotation Default Attribute Information:\n"); eprintf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); eprintf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); eprintf (" Attribute Length: %d\n", attr->length); r_bin_java_print_element_value_summary ((attr->info.annotation_default_attr.default_value)); } else { // TODO: eprintf attr is invalid } } R_API void r_bin_java_annotation_array_free(void /*RBinJavaAnnotationsArray*/ *a) { RBinJavaAnnotationsArray *annotation_array = a; RListIter *iter = NULL, *iter_tmp = NULL; RBinJavaAnnotation *annotation; if (!annotation_array->annotations) { // TODO eprintf return; } r_list_foreach_safe (annotation_array->annotations, iter, iter_tmp, annotation) { if (annotation) { r_bin_java_annotation_free (annotation); } // r_list_delete (annotation_array->annotations, iter); } r_list_free (annotation_array->annotations); free (annotation_array); } R_API void r_bin_java_print_annotation_array_summary(RBinJavaAnnotationsArray *annotation_array) { RListIter *iter = NULL, *iter_tmp = NULL; RBinJavaAnnotation *annotation; if (!annotation_array->annotations) { // TODO eprintf return; } eprintf (" Annotation Array Information:\n"); eprintf (" Number of Annotation Array Elements: %d\n", annotation_array->num_annotations); r_list_foreach_safe (annotation_array->annotations, iter, iter_tmp, annotation) { r_bin_java_print_annotation_summary (annotation); } } R_API RBinJavaAnnotationsArray *r_bin_java_annotation_array_new(ut8 *buffer, ut64 sz, ut64 buf_offset) { RBinJavaAnnotation *annotation; RBinJavaAnnotationsArray *annotation_array; ut32 i; ut64 offset = 0; annotation_array = (RBinJavaAnnotationsArray *) malloc (sizeof (RBinJavaAnnotationsArray)); if (!annotation_array) { // TODO eprintf return NULL; } annotation_array->num_annotations = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; annotation_array->annotations = r_list_new (); for (i = 0; i < annotation_array->num_annotations; i++) { if (offset > sz) { break; } annotation = r_bin_java_annotation_new (buffer + offset, sz - offset, buf_offset + offset); if (annotation) { offset += annotation->size; r_list_append (annotation_array->annotations, (void *) annotation); } } annotation_array->size = offset; return annotation_array; } R_API RBinJavaAttrInfo *r_bin_java_rtv_annotations_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { ut32 i = 0; ut64 offset = 0; if (sz < 8) { return NULL; } RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); offset += 6; if (attr) { attr->type = R_BIN_JAVA_ATTR_TYPE_RUNTIME_VISIBLE_ANNOTATION_ATTR; attr->info.annotation_array.num_annotations = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->info.annotation_array.annotations = r_list_newf (r_bin_java_annotation_free); for (i = 0; i < attr->info.annotation_array.num_annotations; i++) { if (offset >= sz) { break; } RBinJavaAnnotation *annotation = r_bin_java_annotation_new (buffer + offset, sz - offset, buf_offset + offset); if (annotation) { offset += annotation->size; r_list_append (attr->info.annotation_array.annotations, (void *) annotation); } } attr->size = offset; } return attr; } R_API ut64 r_bin_java_annotation_array_calc_size(RBinJavaAnnotationsArray *annotation_array) { ut64 size = 0; RListIter *iter = NULL, *iter_tmp = NULL; RBinJavaAnnotation *annotation; if (!annotation_array->annotations) { // TODO eprintf return size; } // annotation_array->num_annotations = R_BIN_JAVA_USHORT (buffer, offset); size += 2; r_list_foreach_safe (annotation_array->annotations, iter, iter_tmp, annotation) { size += r_bin_java_annotation_calc_size (annotation); } return size; } R_API ut64 r_bin_java_rtv_annotations_attr_calc_size(RBinJavaAttrInfo *attr) { ut64 size = 0; if (!attr) { // TODO eprintf allocation fail return size; } size += (6 + r_bin_java_annotation_array_calc_size (&(attr->info.annotation_array))); return size; } R_API RBinJavaAttrInfo *r_bin_java_rti_annotations_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { ut32 i = 0; RBinJavaAttrInfo *attr = NULL; ut64 offset = 0; attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); offset += 6; if (attr) { attr->type = R_BIN_JAVA_ATTR_TYPE_RUNTIME_INVISIBLE_ANNOTATION_ATTR; attr->info.annotation_array.num_annotations = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->info.annotation_array.annotations = r_list_newf (r_bin_java_annotation_free); for (i = 0; i < attr->info.rtv_annotations_attr.num_annotations; i++) { if (offset >= sz) { break; } RBinJavaAnnotation *annotation = r_bin_java_annotation_new (buffer + offset, sz - offset, buf_offset + offset); if (annotation) { offset += annotation->size; } r_list_append (attr->info.annotation_array.annotations, (void *) annotation); } attr->size = offset; } return attr; } R_API ut64 r_bin_java_rti_annotations_attr_calc_size(RBinJavaAttrInfo *attr) { ut64 size = 0; if (!attr) { // TODO eprintf allocation fail return size; } size += (6 + r_bin_java_annotation_array_calc_size (&(attr->info.annotation_array))); return size; } R_API void r_bin_java_rtv_annotations_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr && attr->type == R_BIN_JAVA_ATTR_TYPE_RUNTIME_VISIBLE_ANNOTATION_ATTR) { r_list_free (attr->info.annotation_array.annotations); free (attr->metas); free (attr->name); free (attr); } } R_API void r_bin_java_rti_annotations_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr && attr->type == R_BIN_JAVA_ATTR_TYPE_RUNTIME_INVISIBLE_ANNOTATION_ATTR) { r_list_free (attr->info.annotation_array.annotations); free (attr->metas); free (attr->name); free (attr); } } R_API void r_bin_java_print_rtv_annotations_attr_summary(RBinJavaAttrInfo *attr) { if (attr && attr->type == R_BIN_JAVA_ATTR_TYPE_RUNTIME_VISIBLE_ANNOTATION_ATTR) { printf ("Runtime Visible Annotations Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d\n", attr->length); r_bin_java_print_annotation_array_summary (&attr->info.annotation_array); } } R_API void r_bin_java_print_rti_annotations_attr_summary(RBinJavaAttrInfo *attr) { if (attr && attr->type == R_BIN_JAVA_ATTR_TYPE_RUNTIME_INVISIBLE_ANNOTATION_ATTR) { printf ("Runtime Invisible Annotations Attribute Information:\n"); printf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); printf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); printf (" Attribute Length: %d\n", attr->length); r_bin_java_print_annotation_array_summary (&attr->info.annotation_array); } } R_API ut64 r_bin_java_rtip_annotations_attr_calc_size(RBinJavaAttrInfo *attr) { ut64 size = 0; RListIter *iter = NULL, *iter_tmp = NULL; RBinJavaAnnotationsArray *annotation_array; if (!attr) { // TODO eprintf allocation fail return size; } // attr->info.rtip_annotations_attr.num_parameters = buffer[offset]; size += (6 + 1); r_list_foreach_safe (attr->info.rtip_annotations_attr.parameter_annotations, iter, iter_tmp, annotation_array) { if (annotation_array) { size += r_bin_java_annotation_array_calc_size (annotation_array); } } return size; } R_API RBinJavaAttrInfo *r_bin_java_rtip_annotations_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { ut32 i = 0; RBinJavaAttrInfo *attr = NULL; ut64 offset = 0; attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); offset += 6; if (attr) { attr->type = R_BIN_JAVA_ATTR_TYPE_RUNTIME_INVISIBLE_PARAMETER_ANNOTATION_ATTR; attr->info.rtip_annotations_attr.num_parameters = buffer[offset]; offset += 1; attr->info.rtip_annotations_attr.parameter_annotations = r_list_newf (r_bin_java_annotation_array_free); for (i = 0; i < attr->info.rtip_annotations_attr.num_parameters; i++) { if (offset >= sz) { break; } RBinJavaAnnotationsArray *annotation_array = r_bin_java_annotation_array_new ( buffer + offset, sz - offset, buf_offset + offset); if (annotation_array) { offset += annotation_array->size; r_list_append (attr->info.rtip_annotations_attr.parameter_annotations, (void *) annotation_array); } } attr->size = offset; } return attr; } R_API RBinJavaAttrInfo *r_bin_java_rtvp_annotations_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { ut32 i = 0; RBinJavaAttrInfo *attr = NULL; ut64 offset = 0; attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); offset += 6; RBinJavaAnnotationsArray *annotation_array; if (attr) { attr->type = R_BIN_JAVA_ATTR_TYPE_RUNTIME_VISIBLE_PARAMETER_ANNOTATION_ATTR; attr->info.rtvp_annotations_attr.num_parameters = buffer[offset]; offset += 1; attr->info.rtvp_annotations_attr.parameter_annotations = r_list_newf (r_bin_java_annotation_array_free); for (i = 0; i < attr->info.rtvp_annotations_attr.num_parameters; i++) { if (offset > sz) { break; } annotation_array = r_bin_java_annotation_array_new (buffer + offset, sz - offset, buf_offset + offset); if (annotation_array) { offset += annotation_array->size; } r_list_append (attr->info.rtvp_annotations_attr.parameter_annotations, (void *) annotation_array); } attr->size = offset; } return attr; } R_API ut64 r_bin_java_rtvp_annotations_attr_calc_size(RBinJavaAttrInfo *attr) { ut64 size = 0; RListIter *iter = NULL, *iter_tmp = NULL; RBinJavaAnnotationsArray *annotation_array; if (!attr) { return size; } size += (6 + 1); r_list_foreach_safe (attr->info.rtvp_annotations_attr.parameter_annotations, iter, iter_tmp, annotation_array) { if (annotation_array) { size += r_bin_java_annotation_array_calc_size ( annotation_array); } } return size; } R_API void r_bin_java_rtvp_annotations_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { if (attr->type == R_BIN_JAVA_ATTR_TYPE_RUNTIME_VISIBLE_PARAMETER_ANNOTATION_ATTR) { r_list_free (attr->info.rtvp_annotations_attr.parameter_annotations); } free (attr->name); free (attr->metas); free (attr); } } R_API void r_bin_java_rtip_annotations_attr_free(void /*RBinJavaAttrInfo*/ *a) { RBinJavaAttrInfo *attr = a; if (attr) { // && attr->type == R_BIN_JAVA_ATTR_TYPE_RUNTIME_INVISIBLE_PARAMETER_ANNOTATION_ATTR) { r_list_free (attr->info.rtip_annotations_attr.parameter_annotations); free (attr->metas); free (attr->name); free (attr); } } R_API void r_bin_java_print_rtvp_annotations_attr_summary(RBinJavaAttrInfo *attr) { RBinJavaAnnotationsArray *annotation_array = NULL; RListIter *iter = NULL, *iter_tmp = NULL; if (attr && attr->type == R_BIN_JAVA_ATTR_TYPE_RUNTIME_VISIBLE_PARAMETER_ANNOTATION_ATTR) { eprintf ("Runtime Visible Parameter Annotations Attribute Information:\n"); eprintf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); eprintf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); eprintf (" Attribute Length: %d\n", attr->length); eprintf (" Number of Runtime Invisible Parameters: %d\n", attr->info.rtvp_annotations_attr.num_parameters); r_list_foreach_safe (attr->info.rtvp_annotations_attr.parameter_annotations, iter, iter_tmp, annotation_array) { r_bin_java_print_annotation_array_summary (annotation_array); } } } R_API void r_bin_java_print_rtip_annotations_attr_summary(RBinJavaAttrInfo *attr) { RBinJavaAnnotationsArray *annotation_array = NULL; RListIter *iter = NULL, *iter_tmp = NULL; if (attr && attr->type == R_BIN_JAVA_ATTR_TYPE_RUNTIME_INVISIBLE_PARAMETER_ANNOTATION_ATTR) { eprintf ("Runtime Invisible Parameter Annotations Attribute Information:\n"); eprintf (" Attribute Offset: 0x%08"PFMT64x "\n", attr->file_offset); eprintf (" Attribute Name Index: %d (%s)\n", attr->name_idx, attr->name); eprintf (" Attribute Length: %d\n", attr->length); eprintf (" Number of Runtime Invisible Parameters: %d\n", attr->info.rtip_annotations_attr.num_parameters); r_list_foreach_safe (attr->info.rtip_annotations_attr.parameter_annotations, iter, iter_tmp, annotation_array) { r_bin_java_print_annotation_array_summary (annotation_array); } } } R_API RBinJavaCPTypeObj *r_bin_java_find_cp_name_and_type_info(RBinJavaObj *bin, ut16 name_idx, ut16 descriptor_idx) { RListIter *iter, *iter_tmp; RBinJavaCPTypeObj *res = NULL, *obj = NULL; IFDBG eprintf ("Looking for name_idx: %d and descriptor_idx: %d\n", name_idx, descriptor_idx); r_list_foreach_safe (bin->cp_list, iter, iter_tmp, obj) { if (obj && obj->tag == R_BIN_JAVA_CP_NAMEANDTYPE) { IFDBG eprintf ("RBinJavaCPTypeNameAndType has name_idx: %d and descriptor_idx: %d\n", obj->info.cp_name_and_type.name_idx, obj->info.cp_name_and_type.descriptor_idx); if (obj->info.cp_name_and_type.name_idx == name_idx && obj->info.cp_name_and_type.descriptor_idx == descriptor_idx) { res = obj; break; } } } return res; } R_API char *r_bin_java_resolve_cp_idx_type(RBinJavaObj *BIN_OBJ, int idx) { RBinJavaCPTypeObj *item = NULL; char *str = NULL; if (BIN_OBJ && BIN_OBJ->cp_count < 1) { // r_bin_java_new_bin(BIN_OBJ); return NULL; } item = (RBinJavaCPTypeObj *) r_bin_java_get_item_from_bin_cp_list (BIN_OBJ, idx); if (item) { str = strdup (((RBinJavaCPTypeMetas *) item->metas->type_info)->name); } else { str = strdup ("INVALID"); } return str; } R_API RBinJavaCPTypeObj *r_bin_java_find_cp_ref_info_from_name_and_type(RBinJavaObj *bin, ut16 name_idx, ut16 descriptor_idx) { RBinJavaCPTypeObj *obj = r_bin_java_find_cp_name_and_type_info (bin, name_idx, descriptor_idx); if (obj) { return r_bin_java_find_cp_ref_info (bin, obj->metas->ord); } return NULL; } R_API RBinJavaCPTypeObj *r_bin_java_find_cp_ref_info(RBinJavaObj *bin, ut16 name_and_type_idx) { RListIter *iter, *iter_tmp; RBinJavaCPTypeObj *res = NULL, *obj = NULL; r_list_foreach_safe (bin->cp_list, iter, iter_tmp, obj) { if (obj->tag == R_BIN_JAVA_CP_FIELDREF && obj->info.cp_field.name_and_type_idx == name_and_type_idx) { res = obj; break; } else if (obj->tag == R_BIN_JAVA_CP_METHODREF && obj->info.cp_method.name_and_type_idx == name_and_type_idx) { res = obj; break; } } return res; } R_API char *r_bin_java_resolve(RBinJavaObj *BIN_OBJ, int idx, ut8 space_bn_name_type) { // TODO XXX FIXME add a size parameter to the str when it is passed in RBinJavaCPTypeObj *item = NULL, *item2 = NULL; char *class_str = NULL, *name_str = NULL, *desc_str = NULL, *string_str = NULL, *empty = "", *cp_name = NULL, *str = NULL; if (BIN_OBJ && BIN_OBJ->cp_count < 1) { // r_bin_java_new_bin(BIN_OBJ); return NULL; } item = (RBinJavaCPTypeObj *) r_bin_java_get_item_from_bin_cp_list (BIN_OBJ, idx); if (item) { cp_name = ((RBinJavaCPTypeMetas *) item->metas->type_info)->name; IFDBG eprintf ("java_resolve Resolved: (%d) %s\n", idx, cp_name); } else { str = malloc (512); if (str) { snprintf (str, 512, "(%d) INVALID CP_OBJ", idx); } return str; } if (strcmp (cp_name, "Class") == 0) { item2 = (RBinJavaCPTypeObj *) r_bin_java_get_item_from_bin_cp_list (BIN_OBJ, idx); // str = r_bin_java_get_name_from_bin_cp_list (BIN_OBJ, idx-1); class_str = r_bin_java_get_item_name_from_bin_cp_list (BIN_OBJ, item); if (!class_str) { class_str = empty; } name_str = r_bin_java_get_item_name_from_bin_cp_list (BIN_OBJ, item2); if (!name_str) { name_str = empty; } desc_str = r_bin_java_get_item_desc_from_bin_cp_list (BIN_OBJ, item2); if (!desc_str) { desc_str = empty; } str = r_str_newf ("%s%s%s", name_str, space_bn_name_type ? " " : "", desc_str); if (class_str != empty) { free (class_str); } if (name_str != empty) { free (name_str); } if (desc_str != empty) { free (desc_str); } } else if (!strcmp (cp_name, "MethodRef") || !strcmp (cp_name, "FieldRef") || !strcmp (cp_name, "InterfaceMethodRef")) { /* * The MethodRef, FieldRef, and InterfaceMethodRef structures */ class_str = r_bin_java_get_name_from_bin_cp_list (BIN_OBJ, item->info.cp_method.class_idx); if (!class_str) { class_str = empty; } name_str = r_bin_java_get_item_name_from_bin_cp_list (BIN_OBJ, item); if (!name_str) { name_str = empty; } desc_str = r_bin_java_get_item_desc_from_bin_cp_list (BIN_OBJ, item); if (!desc_str) { desc_str = empty; } str = r_str_newf ("%s/%s%s%s", class_str, name_str, space_bn_name_type ? " " : "", desc_str); if (class_str != empty) { free (class_str); } if (name_str != empty) { free (name_str); } if (desc_str != empty) { free (desc_str); } } else if (!strcmp (cp_name, "String")) { string_str = r_bin_java_get_utf8_from_bin_cp_list (BIN_OBJ, item->info.cp_string.string_idx); str = NULL; IFDBG eprintf ("java_resolve String got: (%d) %s\n", item->info.cp_string.string_idx, string_str); if (!string_str) { string_str = empty; } str = r_str_newf ("\"%s\"", string_str); IFDBG eprintf ("java_resolve String return: %s\n", str); if (string_str != empty) { free (string_str); } } else if (!strcmp (cp_name, "Utf8")) { char *tmp_str = convert_string ((const char *) item->info.cp_utf8.bytes, item->info.cp_utf8.length); ut32 tmp_str_len = tmp_str ? strlen (tmp_str) + 4 : 0; if (tmp_str) { str = malloc (tmp_str_len + 4); snprintf (str, tmp_str_len + 4, "\"%s\"", tmp_str); } free (tmp_str); } else if (!strcmp (cp_name, "Long")) { str = r_str_newf ("0x%"PFMT64x, r_bin_java_raw_to_long (item->info.cp_long.bytes.raw, 0)); } else if (!strcmp (cp_name, "Double")) { str = r_str_newf ("%f", r_bin_java_raw_to_double (item->info.cp_double.bytes.raw, 0)); } else if (!strcmp (cp_name, "Integer")) { str = r_str_newf ("0x%08x", R_BIN_JAVA_UINT (item->info.cp_integer.bytes.raw, 0)); } else if (!strcmp (cp_name, "Float")) { str = r_str_newf ("%f", R_BIN_JAVA_FLOAT (item->info.cp_float.bytes.raw, 0)); } else if (!strcmp (cp_name, "NameAndType")) { name_str = r_bin_java_get_item_name_from_bin_cp_list (BIN_OBJ, item); if (!name_str) { name_str = empty; } desc_str = r_bin_java_get_item_desc_from_bin_cp_list (BIN_OBJ, item); if (!desc_str) { desc_str = empty; } str = r_str_newf ("%s%s%s", name_str, space_bn_name_type ? " " : "", desc_str); if (name_str != empty) { free (name_str); } if (desc_str != empty) { free (desc_str); } } else { str = strdup ("(null)"); } return str; } R_API ut8 r_bin_java_does_cp_idx_ref_method(RBinJavaObj *BIN_OBJ, int idx) { RBinJavaField *fm_type = NULL; RListIter *iter; ut8 res = 0; r_list_foreach (BIN_OBJ->methods_list, iter, fm_type) { if (fm_type->field_ref_cp_obj->metas->ord == idx) { res = 1; break; } } return res; } R_API ut8 r_bin_java_does_cp_idx_ref_field(RBinJavaObj *BIN_OBJ, int idx) { RBinJavaField *fm_type = NULL; RListIter *iter; ut8 res = 0; r_list_foreach (BIN_OBJ->fields_list, iter, fm_type) { if (fm_type->field_ref_cp_obj->metas->ord == idx) { res = 1; break; } } return res; } R_API char *r_bin_java_get_method_name(RBinJavaObj *bin_obj, ut32 idx) { char *name = NULL; if (idx < r_list_length (bin_obj->methods_list)) { RBinJavaField *fm_type = r_list_get_n (bin_obj->methods_list, idx); name = strdup (fm_type->name); } return name; } R_API RList *r_bin_java_get_method_num_name(RBinJavaObj *bin_obj) { ut32 i = 0; RListIter *iter; RBinJavaField *fm_type; RList *res = r_list_newf (free); r_list_foreach (bin_obj->methods_list, iter, fm_type) { char *str = r_str_newf ("%d %s", i, fm_type->name); r_list_append (res, str); i++; } return res; } /* R_API int r_bin_java_does_cp_obj_ref_idx (RBinJavaObj *bin_obj, RBinJavaCPTypeObj *cp_obj, ut16 idx) { int res = false; RBinJavaCPTypeObj *t_obj = NULL; if (cp_obj) { switch (cp_obj->tag) { case R_BIN_JAVA_CP_NULL: break; case R_BIN_JAVA_CP_UTF8: break; case R_BIN_JAVA_CP_UNKNOWN: break; case R_BIN_JAVA_CP_INTEGER: break; case R_BIN_JAVA_CP_FLOAT: break; case R_BIN_JAVA_CP_LONG: break; case R_BIN_JAVA_CP_DOUBLE: break; case R_BIN_JAVA_CP_CLASS: res = idx == cp_obj->info.cp_class.name_idx ? true : false; break; case R_BIN_JAVA_CP_STRING: res = idx == cp_obj->info.cp_string.string_idx ? true : false; break; case R_BIN_JAVA_CP_METHODREF: break;// check if idx is referenced here case R_BIN_JAVA_CP_INTERFACEMETHOD_REF: break; // check if idx is referenced here case R_BIN_JAVA_CP_FIELDREF: t_obj = r_bin_java_get_item_from_cp (bin_obj, cp_obj->info.cp_method.class_idx); res = r_bin_java_does_cp_obj_ref_idx (bin_obj, t_obj, idx); if (res == true) break; t_obj = r_bin_java_get_item_from_cp (bin_obj, cp_obj->info.cp_method.name_and_type_idx); res = r_bin_java_does_cp_obj_ref_idx (bin_obj, t_obj, idx); break; case R_BIN_JAVA_CP_NAMEANDTYPE: break;// check if idx is referenced here obj->info.cp_name_and_type.name_idx case R_BIN_JAVA_CP_METHODHANDLE: break;// check if idx is referenced here case R_BIN_JAVA_CP_METHODTYPE: break;// check if idx is referenced here case R_BIN_JAVA_CP_INVOKEDYNAMIC: break;// check if idx is referenced here } } } */ R_API RList *r_bin_java_find_cp_const_by_val_long(RBinJavaObj *bin_obj, const ut8 *bytes, ut32 len) { RList *res = r_list_newf (free); ut32 *v = NULL; RListIter *iter; RBinJavaCPTypeObj *cp_obj; eprintf ("Looking for 0x%08x\n", R_BIN_JAVA_UINT (bytes, 0)); r_list_foreach (bin_obj->cp_list, iter, cp_obj) { if (cp_obj->tag == R_BIN_JAVA_CP_LONG) { if (len == 8 && r_bin_java_raw_to_long (cp_obj->info.cp_long.bytes.raw, 0) == r_bin_java_raw_to_long (bytes, 0)) { // TODO: we can safely store a ut32 inside the list without having to allocate it v = malloc (sizeof (ut32)); if (!v) { r_list_free (res); return NULL; } *v = cp_obj->idx; r_list_append (res, v); } } } return res; } R_API RList *r_bin_java_find_cp_const_by_val_double(RBinJavaObj *bin_obj, const ut8 *bytes, ut32 len) { RList *res = r_list_newf (free); ut32 *v = NULL; RListIter *iter; RBinJavaCPTypeObj *cp_obj; eprintf ("Looking for %f\n", r_bin_java_raw_to_double (bytes, 0)); r_list_foreach (bin_obj->cp_list, iter, cp_obj) { if (cp_obj->tag == R_BIN_JAVA_CP_DOUBLE) { if (len == 8 && r_bin_java_raw_to_double (cp_obj->info.cp_long.bytes.raw, 0) == r_bin_java_raw_to_double (bytes, 0)) { v = malloc (sizeof (ut32)); if (!v) { r_list_free (res); return NULL; } *v = cp_obj->idx; r_list_append (res, v); } } } return res; } R_API RList *r_bin_java_find_cp_const_by_val_float(RBinJavaObj *bin_obj, const ut8 *bytes, ut32 len) { RList *res = r_list_newf (free); ut32 *v = NULL; RListIter *iter; RBinJavaCPTypeObj *cp_obj; eprintf ("Looking for %f\n", R_BIN_JAVA_FLOAT (bytes, 0)); r_list_foreach (bin_obj->cp_list, iter, cp_obj) { if (cp_obj->tag == R_BIN_JAVA_CP_FLOAT) { if (len == 4 && R_BIN_JAVA_FLOAT (cp_obj->info.cp_long.bytes.raw, 0) == R_BIN_JAVA_FLOAT (bytes, 0)) { v = malloc (sizeof (ut32)); if (!v) { r_list_free (res); return NULL; } *v = cp_obj->idx; r_list_append (res, v); } } } return res; } R_API RList *r_bin_java_find_cp_const_by_val(RBinJavaObj *bin_obj, const ut8 *bytes, ut32 len, const char t) { switch (t) { case R_BIN_JAVA_CP_UTF8: return r_bin_java_find_cp_const_by_val_utf8 (bin_obj, bytes, len); case R_BIN_JAVA_CP_INTEGER: return r_bin_java_find_cp_const_by_val_int (bin_obj, bytes, len); case R_BIN_JAVA_CP_FLOAT: return r_bin_java_find_cp_const_by_val_float (bin_obj, bytes, len); case R_BIN_JAVA_CP_LONG: return r_bin_java_find_cp_const_by_val_long (bin_obj, bytes, len); case R_BIN_JAVA_CP_DOUBLE: return r_bin_java_find_cp_const_by_val_double (bin_obj, bytes, len); case R_BIN_JAVA_CP_UNKNOWN: default: eprintf ("Failed to perform the search for: %s\n", bytes); return r_list_new (); } } R_API void U(add_cp_objs_to_sdb)(RBinJavaObj * bin) { /* Add Constant Pool Serialized Object to an Array the key for this info is: Key: java.<classname>.cp_obj Each Value varies by type: In general its: <ordinal>.<file_offset>.<type_name>.[type specific stuff] Example: UTF-8: <ordinal>.<file_offset>.<type_name>.<strlen>.<hexlified(str)> Integer: <ordinal>.<file_offset>.<type_name>.<abs(int)> Long: <ordinal>.<file_offset>.<type_name>.abs(long)> FieldRef/MethodRef: <ordinal>.<file_offset>.<type_name>.<class_idx>.<name_and_type_idx> */ ut32 idx = 0, class_name_inheap = 1; RBinJavaCPTypeObj *cp_obj = NULL; char *key = NULL, *value = NULL; char str_cnt[40]; char *class_name = r_bin_java_get_this_class_name (bin); ut32 key_buf_size = 0; if (!class_name) { class_name = "unknown"; class_name_inheap = 0; } // 4 - format, 8 number, 1 null byte, 7 "unknown" key_buf_size = strlen (class_name) + 4 + 8 + 1; key = malloc (key_buf_size); if (!key) { if (class_name_inheap) { free (class_name); } return; } snprintf (key, key_buf_size - 1, "%s.cp_count", class_name); key[key_buf_size - 1] = 0; snprintf (str_cnt, 39, "%d", bin->cp_count); str_cnt[39] = 0; sdb_set (bin->kv, key, value, 0); // sdb_alist(bin->kv, key); for (idx = 0; idx < bin->cp_count; idx++) { snprintf (key, key_buf_size - 1, "%s.cp.%d", class_name, idx); key[key_buf_size - 1] = 0; cp_obj = (RBinJavaCPTypeObj *) r_bin_java_get_item_from_bin_cp_list (bin, idx); IFDBG eprintf ("Adding %s to the sdb.\n", key); if (cp_obj) { value = ((RBinJavaCPTypeMetas *) cp_obj->metas->type_info)-> allocs->stringify_obj (cp_obj); sdb_set (bin->kv, key, value, 0); free (value); } } if (class_name_inheap) { free (class_name); } free (key); } R_API void U(add_field_infos_to_sdb)(RBinJavaObj * bin) { /* *** Experimental and May Change *** Add field information to an Array the key for this info variable depenedent on addr, method ordinal, etc. Key 1, mapping to method key: java.<file_offset> = <field_key> Key 3, method description <field_key>.info = [<access str>, <class_name>, <name>, <signature>] key 4, method meta <field_key>.meta = [<file_offset>, ?] */ RListIter *iter = NULL, *iter_tmp = NULL; RBinJavaField *fm_type; ut32 key_size = 255, value_buffer_size = 1024, class_name_inheap = 1; char *field_key = NULL, *field_key_value = NULL, *value_buffer = NULL; char *class_name = r_bin_java_get_this_class_name (bin); if (!class_name) { class_name = "unknown"; class_name_inheap = 0; } key_size += strlen (class_name); value_buffer_size += strlen (class_name); field_key = malloc (key_size); value_buffer = malloc (value_buffer_size); field_key_value = malloc (key_size); snprintf (field_key, key_size, "%s.methods", class_name); field_key[key_size - 1] = 0; r_list_foreach_safe (bin->fields_list, iter, iter_tmp, fm_type) { char number_buffer[80]; ut64 file_offset = fm_type->file_offset + bin->loadaddr; snprintf (number_buffer, sizeof (number_buffer), "0x%04"PFMT64x, file_offset); IFDBG eprintf ("Inserting: []%s = %s\n", field_key, number_buffer); sdb_array_push (bin->kv, field_key, number_buffer, 0); } r_list_foreach_safe (bin->fields_list, iter, iter_tmp, fm_type) { ut64 field_offset = fm_type->file_offset + bin->loadaddr; // generate method specific key & value snprintf (field_key, key_size, "%s.0x%04"PFMT64x, class_name, field_offset); field_key[key_size - 1] = 0; snprintf (field_key_value, key_size, "%s.0x%04"PFMT64x ".field", class_name, field_offset); field_key_value[key_size - 1] = 0; sdb_set (bin->kv, field_key, field_key_value, 0); IFDBG eprintf ("Inserting: %s = %s\n", field_key, field_key_value); // generate info key, and place values in method info array snprintf (field_key, key_size, "%s.info", field_key_value); field_key[key_size - 1] = 0; snprintf (value_buffer, value_buffer_size, "%s", fm_type->flags_str); value_buffer[value_buffer_size - 1] = 0; sdb_array_push (bin->kv, field_key, value_buffer, 0); IFDBG eprintf ("Inserting: []%s = %s\n", field_key, value_buffer); snprintf (value_buffer, value_buffer_size, "%s", fm_type->class_name); value_buffer[value_buffer_size - 1] = 0; sdb_array_push (bin->kv, field_key, value_buffer, 0); IFDBG eprintf ("Inserting: []%s = %s\n", field_key, value_buffer); snprintf (value_buffer, value_buffer_size, "%s", fm_type->name); value_buffer[value_buffer_size - 1] = 0; sdb_array_push (bin->kv, field_key, value_buffer, 0); IFDBG eprintf ("Inserting: []%s = %s\n", field_key, value_buffer); snprintf (value_buffer, value_buffer_size, "%s", fm_type->descriptor); value_buffer[value_buffer_size - 1] = 0; sdb_array_push (bin->kv, field_key, value_buffer, 0); IFDBG eprintf ("Inserting: []%s = %s\n", field_key, value_buffer); } free (field_key); free (field_key_value); free (value_buffer); if (class_name_inheap) { free (class_name); } } R_API void U(add_method_infos_to_sdb)(RBinJavaObj * bin) { /* *** Experimental and May Change *** Add Mehtod information to an Array the key for this info variable depenedent on addr, method ordinal, etc. Key 1, mapping to method key: java.<file_offset> = <method_key> Key 2, basic code information <method_key>.code = [<addr>, <size>] Key 3, method description <method_key>.info = [<access str>, <class_name>, <name>, <signature>,] key 4, method meta <method_key>.meta = [<file_offset>, ?] // TODO in key 3 add <class_name>? e.g. <access str>.<name>.<signature> Note: method name not used because of collisions with operator overloading also take note that code offset and the method offset are not the same values. */ RListIter *iter = NULL, *iter_tmp = NULL; RBinJavaField *fm_type; ut32 key_size = 255, value_buffer_size = 1024, class_name_inheap = 1; char *method_key = NULL, *method_key_value = NULL, *value_buffer = NULL; char *class_name = r_bin_java_get_this_class_name (bin); ut64 baddr = bin->loadaddr; if (!class_name) { class_name = "unknown"; class_name_inheap = 0; } key_size += strlen (class_name); value_buffer_size += strlen (class_name); method_key = malloc (key_size); value_buffer = malloc (value_buffer_size); method_key_value = malloc (key_size); snprintf (method_key, key_size, "%s.methods", class_name); method_key[key_size - 1] = 0; r_list_foreach_safe (bin->methods_list, iter, iter_tmp, fm_type) { char number_buffer[80]; ut64 file_offset = fm_type->file_offset + baddr; snprintf (number_buffer, sizeof (number_buffer), "0x%04"PFMT64x, file_offset); sdb_array_push (bin->kv, method_key, number_buffer, 0); } r_list_foreach_safe (bin->methods_list, iter, iter_tmp, fm_type) { ut64 code_offset = r_bin_java_get_method_code_offset (fm_type) + baddr, code_size = r_bin_java_get_method_code_size (fm_type), method_offset = fm_type->file_offset + baddr; // generate method specific key & value snprintf (method_key, key_size, "%s.0x%04"PFMT64x, class_name, code_offset); method_key[key_size - 1] = 0; snprintf (method_key_value, key_size, "%s.0x%04"PFMT64x ".method", class_name, method_offset); method_key_value[key_size - 1] = 0; IFDBG eprintf ("Adding %s to sdb_array: %s\n", method_key_value, method_key); sdb_set (bin->kv, method_key, method_key_value, 0); // generate code key and values snprintf (method_key, key_size, "%s.code", method_key_value); method_key[key_size - 1] = 0; snprintf (value_buffer, value_buffer_size, "0x%04"PFMT64x, code_offset); value_buffer[value_buffer_size - 1] = 0; sdb_array_push (bin->kv, method_key, value_buffer, 0); snprintf (value_buffer, value_buffer_size, "0x%04"PFMT64x, code_size); value_buffer[value_buffer_size - 1] = 0; sdb_array_push (bin->kv, method_key, value_buffer, 0); // generate info key, and place values in method info array snprintf (method_key, key_size, "%s.info", method_key_value); method_key[key_size - 1] = 0; snprintf (value_buffer, value_buffer_size, "%s", fm_type->flags_str); value_buffer[value_buffer_size - 1] = 0; IFDBG eprintf ("Adding %s to sdb_array: %s\n", value_buffer, method_key); sdb_array_push (bin->kv, method_key, value_buffer, 0); snprintf (value_buffer, value_buffer_size, "%s", fm_type->class_name); value_buffer[value_buffer_size - 1] = 0; IFDBG eprintf ("Adding %s to sdb_array: %s\n", value_buffer, method_key); sdb_array_push (bin->kv, method_key, value_buffer, 0); snprintf (value_buffer, value_buffer_size, "%s", fm_type->name); value_buffer[value_buffer_size - 1] = 0; IFDBG eprintf ("Adding %s to sdb_array: %s\n", value_buffer, method_key); sdb_array_push (bin->kv, method_key, value_buffer, 0); snprintf (value_buffer, value_buffer_size, "%s", fm_type->descriptor); value_buffer[value_buffer_size - 1] = 0; IFDBG eprintf ("Adding %s to sdb_array: %s\n", value_buffer, method_key); sdb_array_push (bin->kv, method_key, value_buffer, 0); } free (method_key); free (method_key_value); free (value_buffer); if (class_name_inheap) { free (class_name); } } R_API RList *U(r_bin_java_get_args_from_bin)(RBinJavaObj * bin_obj, ut64 addr) { RBinJavaField *fm_type = r_bin_java_get_method_code_attribute_with_addr (bin_obj, addr); return fm_type ? r_bin_java_get_args (fm_type) : NULL; } R_API RList *U(r_bin_java_get_ret_from_bin)(RBinJavaObj * bin_obj, ut64 addr) { RBinJavaField *fm_type = r_bin_java_get_method_code_attribute_with_addr (bin_obj, addr); return fm_type ? r_bin_java_get_ret (fm_type) : NULL; } R_API char *U(r_bin_java_get_fcn_name_from_bin)(RBinJavaObj * bin_obj, ut64 addr) { RBinJavaField *fm_type = r_bin_java_get_method_code_attribute_with_addr (bin_obj, addr); return fm_type && fm_type->name ? strdup (fm_type->name) : NULL; } R_API int U(r_bin_java_is_method_static)(RBinJavaObj * bin_obj, ut64 addr) { RBinJavaField *fm_type = r_bin_java_get_method_code_attribute_with_addr (bin_obj, addr); return fm_type && fm_type->flags & R_BIN_JAVA_METHOD_ACC_STATIC; } R_API int U(r_bin_java_is_method_private)(RBinJavaObj * bin_obj, ut64 addr) { return r_bin_java_is_fm_type_private (r_bin_java_get_method_code_attribute_with_addr (bin_obj, addr)); } R_API int U(r_bin_java_is_method_protected)(RBinJavaObj * bin_obj, ut64 addr) { return r_bin_java_is_fm_type_protected ( r_bin_java_get_method_code_attribute_with_addr (bin_obj, addr)); } R_API int r_bin_java_print_method_idx_summary(RBinJavaObj *bin_obj, ut32 idx) { int res = false; if (idx < r_list_length (bin_obj->methods_list)) { RBinJavaField *fm_type = r_list_get_n (bin_obj->methods_list, idx); r_bin_java_print_method_summary (fm_type); res = true; } return res; } R_API ut32 r_bin_java_get_method_count(RBinJavaObj *bin_obj) { return r_list_length (bin_obj->methods_list); } R_API RList *r_bin_java_get_interface_names(RBinJavaObj *bin) { RList *interfaces_names = r_list_new (); RListIter *iter; RBinJavaInterfaceInfo *ifobj; r_list_foreach (bin->interfaces_list, iter, ifobj) { if (ifobj && ifobj->name) { r_list_append (interfaces_names, strdup (ifobj->name)); } } return interfaces_names; } R_API ut64 r_bin_java_get_main(RBinJavaObj *bin) { if (bin->main_code_attr) { return bin->main_code_attr->info.code_attr.code_offset + bin->loadaddr; } return 0; } R_API RBinJavaObj *r_bin_java_new(const char *file, ut64 loadaddr, Sdb *kv) { RBinJavaObj *bin = R_NEW0 (RBinJavaObj); if (!bin) { return NULL; } bin->file = strdup (file); size_t sz; ut8 *buf = (ut8 *)r_file_slurp (file, &sz); bin->size = sz; if (!buf) { return r_bin_java_free (bin); } if (!r_bin_java_new_bin (bin, loadaddr, kv, buf, bin->size)) { r_bin_java_free (bin); bin = NULL; } free (buf); return bin; } R_API ut64 r_bin_java_get_class_entrypoint(RBinJavaObj *bin) { if (bin->cf2.this_class_entrypoint_code_attr) { return bin->cf2.this_class_entrypoint_code_attr->info.code_attr.code_offset; } return 0; } R_API RList *r_bin_java_get_method_exception_table_with_addr(RBinJavaObj *bin, ut64 addr) { RListIter *iter = NULL, *iter_tmp = NULL; RBinJavaField *fm_type, *res = NULL; if (!bin && R_BIN_JAVA_GLOBAL_BIN) { bin = R_BIN_JAVA_GLOBAL_BIN; } if (!bin) { eprintf ("Attempting to analyse function when the R_BIN_JAVA_GLOBAL_BIN has not been set.\n"); return NULL; } r_list_foreach_safe (bin->methods_list, iter, iter_tmp, fm_type) { ut64 offset = r_bin_java_get_method_code_offset (fm_type) + bin->loadaddr, size = r_bin_java_get_method_code_size (fm_type); if (addr >= offset && addr <= size + offset) { res = fm_type; } } if (res) { RBinJavaAttrInfo *code_attr = r_bin_java_get_method_code_attribute (res); return code_attr->info.code_attr.exception_table; } return NULL; } R_API const RList *r_bin_java_get_methods_list(RBinJavaObj *bin) { if (bin) { return bin->methods_list; } if (R_BIN_JAVA_GLOBAL_BIN) { return R_BIN_JAVA_GLOBAL_BIN->methods_list; } return NULL; } R_API RList *r_bin_java_get_bin_obj_list_thru_obj(RBinJavaObj *bin_obj) { RList *the_list; Sdb *sdb; if (!bin_obj) { return NULL; } sdb = bin_obj->AllJavaBinObjs; if (!sdb) { return NULL; } the_list = r_list_new (); if (!the_list) { return NULL; } sdb_foreach (sdb, sdb_iterate_build_list, (void *) the_list); return the_list; } R_API RList *r_bin_java_extract_all_bin_type_values(RBinJavaObj *bin_obj) { RListIter *fm_type_iter; RList *all_types = r_list_new (); RBinJavaField *fm_type; // get all field types r_list_foreach (bin_obj->fields_list, fm_type_iter, fm_type) { char *desc = NULL; if (!extract_type_value (fm_type->descriptor, &desc)) { return NULL; } IFDBG eprintf ("Adding field type: %s\n", desc); r_list_append (all_types, desc); } // get all method types r_list_foreach (bin_obj->methods_list, fm_type_iter, fm_type) { RList *the_list = r_bin_java_extract_type_values (fm_type->descriptor); RListIter *desc_iter; char *str; r_list_foreach (the_list, desc_iter, str) { if (str && *str != '(' && *str != ')') { r_list_append (all_types, strdup (str)); IFDBG eprintf ("Adding method type: %s\n", str); } } r_list_free (the_list); } return all_types; } R_API RList *r_bin_java_get_method_definitions(RBinJavaObj *bin) { RBinJavaField *fm_type = NULL; RList *the_list = r_list_new (); if (!the_list) { return NULL; } RListIter *iter = NULL; if (!bin) { return the_list; } r_list_foreach (bin->methods_list, iter, fm_type) { char *method_proto = r_bin_java_get_method_definition (fm_type); // eprintf ("Method prototype: %s\n", method_proto); r_list_append (the_list, method_proto); } return the_list; } R_API RList *r_bin_java_get_field_definitions(RBinJavaObj *bin) { RBinJavaField *fm_type = NULL; RList *the_list = r_list_new (); if (!the_list) { return NULL; } RListIter *iter = NULL; if (!bin) { return the_list; } r_list_foreach (bin->fields_list, iter, fm_type) { char *field_def = r_bin_java_get_field_definition (fm_type); // eprintf ("Field def: %s, %s, %s, %s\n", fm_type->name, fm_type->descriptor, fm_type->flags_str, field_def); r_list_append (the_list, field_def); } return the_list; } R_API RList *r_bin_java_get_import_definitions(RBinJavaObj *bin) { RList *the_list = r_bin_java_get_lib_names (bin); RListIter *iter = NULL; char *new_str; if (!bin || !the_list) { return the_list; } r_list_foreach (the_list, iter, new_str) { while (*new_str) { if (*new_str == '/') { *new_str = '.'; } new_str++; } } return the_list; } R_API RList *r_bin_java_get_field_offsets(RBinJavaObj *bin) { RBinJavaField *fm_type = NULL; RList *the_list = r_list_new (); if (!the_list) { return NULL; } RListIter *iter = NULL; ut64 *paddr = NULL; if (!bin) { return the_list; } the_list->free = free; r_list_foreach (bin->fields_list, iter, fm_type) { paddr = malloc (sizeof(ut64)); if (!paddr) { r_list_free (the_list); return NULL; } *paddr = fm_type->file_offset + bin->loadaddr; // eprintf ("Field def: %s, %s, %s, %s\n", fm_type->name, fm_type->descriptor, fm_type->flags_str, field_def); r_list_append (the_list, paddr); } return the_list; } R_API RList *r_bin_java_get_method_offsets(RBinJavaObj *bin) { RBinJavaField *fm_type = NULL; RList *the_list = r_list_new (); RListIter *iter = NULL; ut64 *paddr = NULL; if (!bin) { return the_list; } the_list->free = free; r_list_foreach (bin->methods_list, iter, fm_type) { paddr = R_NEW0 (ut64); *paddr = fm_type->file_offset + bin->loadaddr; r_list_append (the_list, paddr); } return the_list; } R_API ut16 r_bin_java_calculate_field_access_value(const char *access_flags_str) { return calculate_access_value (access_flags_str, FIELD_ACCESS_FLAGS); } R_API ut16 r_bin_java_calculate_class_access_value(const char *access_flags_str) { return calculate_access_value (access_flags_str, CLASS_ACCESS_FLAGS); } R_API ut16 r_bin_java_calculate_method_access_value(const char *access_flags_str) { return calculate_access_value (access_flags_str, METHOD_ACCESS_FLAGS); } R_API RList *retrieve_all_method_access_string_and_value(void) { return retrieve_all_access_string_and_value (METHOD_ACCESS_FLAGS); } R_API RList *retrieve_all_field_access_string_and_value(void) { return retrieve_all_access_string_and_value (FIELD_ACCESS_FLAGS); } R_API RList *retrieve_all_class_access_string_and_value(void) { return retrieve_all_access_string_and_value (CLASS_ACCESS_FLAGS); } R_API char *r_bin_java_resolve_with_space(RBinJavaObj *obj, int idx) { return r_bin_java_resolve (obj, idx, 1); } R_API char *r_bin_java_resolve_without_space(RBinJavaObj *obj, int idx) { return r_bin_java_resolve (obj, idx, 0); } R_API char *r_bin_java_resolve_b64_encode(RBinJavaObj *BIN_OBJ, ut16 idx) { RBinJavaCPTypeObj *item = NULL, *item2 = NULL; char *class_str = NULL, *name_str = NULL, *desc_str = NULL, *string_str = NULL, *empty = "", *cp_name = NULL, *str = NULL, *out = NULL; int memory_alloc = 0; if (BIN_OBJ && BIN_OBJ->cp_count < 1) { // r_bin_java_new_bin(BIN_OBJ); return NULL; } item = (RBinJavaCPTypeObj *) r_bin_java_get_item_from_bin_cp_list (BIN_OBJ, idx); if (item) { cp_name = ((RBinJavaCPTypeMetas *) item->metas->type_info)->name; IFDBG eprintf ("java_resolve Resolved: (%d) %s\n", idx, cp_name); } else { return NULL; } if (!strcmp (cp_name, "Class")) { item2 = (RBinJavaCPTypeObj *) r_bin_java_get_item_from_bin_cp_list (BIN_OBJ, idx); // str = r_bin_java_get_name_from_bin_cp_list (BIN_OBJ, idx-1); class_str = r_bin_java_get_item_name_from_bin_cp_list (BIN_OBJ, item); if (!class_str) { class_str = empty; } name_str = r_bin_java_get_item_name_from_bin_cp_list (BIN_OBJ, item2); if (!name_str) { name_str = empty; } desc_str = r_bin_java_get_item_desc_from_bin_cp_list (BIN_OBJ, item2); if (!desc_str) { desc_str = empty; } memory_alloc = strlen (class_str) + strlen (name_str) + strlen (desc_str) + 3; if (memory_alloc) { str = malloc (memory_alloc); if (str) { snprintf (str, memory_alloc, "%s%s", name_str, desc_str); out = r_base64_encode_dyn ((const char *) str, strlen (str)); free (str); str = out; } } if (class_str != empty) { free (class_str); } if (name_str != empty) { free (name_str); } if (desc_str != empty) { free (desc_str); } } else if (strcmp (cp_name, "MethodRef") == 0 || strcmp (cp_name, "FieldRef") == 0 || strcmp (cp_name, "InterfaceMethodRef") == 0) { /* * The MethodRef, FieldRef, and InterfaceMethodRef structures */ class_str = r_bin_java_get_name_from_bin_cp_list (BIN_OBJ, item->info.cp_method.class_idx); if (!class_str) { class_str = empty; } name_str = r_bin_java_get_item_name_from_bin_cp_list (BIN_OBJ, item); if (!name_str) { name_str = empty; } desc_str = r_bin_java_get_item_desc_from_bin_cp_list (BIN_OBJ, item); if (!desc_str) { desc_str = empty; } memory_alloc = strlen (class_str) + strlen (name_str) + strlen (desc_str) + 3; if (memory_alloc) { str = malloc (memory_alloc); if (str) { snprintf (str, memory_alloc, "%s/%s%s", class_str, name_str, desc_str); out = r_base64_encode_dyn ((const char *) str, strlen (str)); free (str); str = out; } } if (class_str != empty) { free (class_str); } if (name_str != empty) { free (name_str); } if (desc_str != empty) { free (desc_str); } } else if (strcmp (cp_name, "String") == 0) { ut32 length = r_bin_java_get_utf8_len_from_bin_cp_list (BIN_OBJ, item->info.cp_string.string_idx); string_str = r_bin_java_get_utf8_from_bin_cp_list (BIN_OBJ, item->info.cp_string.string_idx); str = NULL; IFDBG eprintf ("java_resolve String got: (%d) %s\n", item->info.cp_string.string_idx, string_str); if (!string_str) { string_str = empty; length = strlen (empty); } memory_alloc = length + 3; if (memory_alloc) { str = malloc (memory_alloc); if (str) { snprintf (str, memory_alloc, "\"%s\"", string_str); out = r_base64_encode_dyn ((const char *) str, strlen (str)); free (str); str = out; } } IFDBG eprintf ("java_resolve String return: %s\n", str); if (string_str != empty) { free (string_str); } } else if (strcmp (cp_name, "Utf8") == 0) { ut64 sz = item->info.cp_utf8.length ? item->info.cp_utf8.length + 10 : 10; str = malloc (sz); memset (str, 0, sz); if (sz > 10) { r_base64_encode (str, item->info.cp_utf8.bytes, item->info.cp_utf8.length); } } else if (strcmp (cp_name, "Long") == 0) { str = malloc (34); if (str) { snprintf (str, 34, "0x%"PFMT64x, r_bin_java_raw_to_long (item->info.cp_long.bytes.raw, 0)); out = r_base64_encode_dyn ((const char *) str, strlen (str)); free (str); str = out; } } else if (strcmp (cp_name, "Double") == 0) { str = malloc (1000); if (str) { snprintf (str, 1000, "%f", r_bin_java_raw_to_double (item->info.cp_double.bytes.raw, 0)); out = r_base64_encode_dyn ((const char *) str, strlen (str)); free (str); str = out; } } else if (strcmp (cp_name, "Integer") == 0) { str = calloc (34, 1); if (str) { snprintf (str, 34, "0x%08x", R_BIN_JAVA_UINT (item->info.cp_integer.bytes.raw, 0)); out = r_base64_encode_dyn ((const char *) str, strlen (str)); free (str); str = out; } } else if (strcmp (cp_name, "Float") == 0) { str = malloc (34); if (str) { snprintf (str, 34, "%f", R_BIN_JAVA_FLOAT (item->info.cp_float.bytes.raw, 0)); out = r_base64_encode_dyn ((const char *) str, strlen (str)); free (str); str = out; } } else if (!strcmp (cp_name, "NameAndType")) { name_str = r_bin_java_get_item_name_from_bin_cp_list (BIN_OBJ, item); if (!name_str) { name_str = empty; } desc_str = r_bin_java_get_item_desc_from_bin_cp_list (BIN_OBJ, item); if (!desc_str) { desc_str = empty; } memory_alloc = strlen (name_str) + strlen (desc_str) + 3; if (memory_alloc) { str = malloc (memory_alloc); if (str) { snprintf (str, memory_alloc, "%s %s", name_str, desc_str); out = r_base64_encode_dyn ((const char *) str, strlen (str)); free (str); str = out; } } if (name_str != empty) { free (name_str); } if (desc_str != empty) { free (desc_str); } } else { str = r_base64_encode_dyn ((const char *) "(null)", 6); } return str; } R_API ut64 r_bin_java_resolve_cp_idx_address(RBinJavaObj *BIN_OBJ, int idx) { RBinJavaCPTypeObj *item = NULL; ut64 addr = -1; if (BIN_OBJ && BIN_OBJ->cp_count < 1) { return -1; } item = (RBinJavaCPTypeObj *) r_bin_java_get_item_from_bin_cp_list (BIN_OBJ, idx); if (item) { addr = item->file_offset + item->loadaddr; } return addr; } R_API char *r_bin_java_resolve_cp_idx_to_string(RBinJavaObj *BIN_OBJ, int idx) { RBinJavaCPTypeObj *item = NULL; char *value = NULL; if (BIN_OBJ && BIN_OBJ->cp_count < 1) { return NULL; } item = (RBinJavaCPTypeObj *) r_bin_java_get_item_from_bin_cp_list (BIN_OBJ, idx); if (item) { value = ((RBinJavaCPTypeMetas *) item->metas->type_info)-> allocs->stringify_obj (item); } return value; } R_API int r_bin_java_resolve_cp_idx_print_summary(RBinJavaObj *BIN_OBJ, int idx) { RBinJavaCPTypeObj *item = NULL; if (BIN_OBJ && BIN_OBJ->cp_count < 1) { return false; } item = (RBinJavaCPTypeObj *) r_bin_java_get_item_from_bin_cp_list (BIN_OBJ, idx); if (item) { ((RBinJavaCPTypeMetas *) item->metas->type_info)-> allocs->print_summary (item); } else { eprintf ("Error: Invalid CP Object.\n"); } return item ? true : false; } R_API ConstJavaValue *U(r_bin_java_resolve_to_const_value)(RBinJavaObj * BIN_OBJ, int idx) { // TODO XXX FIXME add a size parameter to the str when it is passed in RBinJavaCPTypeObj *item = NULL, *item2 = NULL; ConstJavaValue *result = R_NEW0 (ConstJavaValue); if (!result) { return NULL; } char *class_str = NULL, *name_str = NULL, *desc_str = NULL, *string_str = NULL, *empty = "", *cp_name = NULL; result->type = "unknown"; if (BIN_OBJ && BIN_OBJ->cp_count < 1) { // r_bin_java_new_bin(BIN_OBJ); return result; } item = (RBinJavaCPTypeObj *) r_bin_java_get_item_from_bin_cp_list (BIN_OBJ, idx); if (!item) { return result; } cp_name = ((RBinJavaCPTypeMetas *) item->metas->type_info)->name; IFDBG eprintf ("java_resolve Resolved: (%d) %s\n", idx, cp_name); if (strcmp (cp_name, "Class") == 0) { item2 = (RBinJavaCPTypeObj *) r_bin_java_get_item_from_bin_cp_list (BIN_OBJ, idx); // str = r_bin_java_get_name_from_bin_cp_list (BIN_OBJ, idx-1); class_str = r_bin_java_get_item_name_from_bin_cp_list (BIN_OBJ, item); if (!class_str) { class_str = empty; } name_str = r_bin_java_get_item_name_from_bin_cp_list (BIN_OBJ, item2); if (!name_str) { name_str = empty; } desc_str = r_bin_java_get_item_desc_from_bin_cp_list (BIN_OBJ, item2); if (!desc_str) { desc_str = empty; } result->value._ref = R_NEW0 (_JavaRef); result->type = "ref"; result->value._ref->class_name = strdup (class_str); result->value._ref->name = strdup (name_str); result->value._ref->desc = strdup (desc_str); if (class_str != empty) { free (class_str); } if (name_str != empty) { free (name_str); } if (desc_str != empty) { free (desc_str); } } else if (strcmp (cp_name, "MethodRef") == 0 || strcmp (cp_name, "FieldRef") == 0 || strcmp (cp_name, "InterfaceMethodRef") == 0) { /* * The MethodRef, FieldRef, and InterfaceMethodRef structures */ class_str = r_bin_java_get_name_from_bin_cp_list (BIN_OBJ, item->info.cp_method.class_idx); if (!class_str) { class_str = empty; } name_str = r_bin_java_get_item_name_from_bin_cp_list (BIN_OBJ, item); if (!name_str) { name_str = empty; } desc_str = r_bin_java_get_item_desc_from_bin_cp_list (BIN_OBJ, item); if (!desc_str) { desc_str = empty; } result->value._ref = R_NEW0 (_JavaRef); result->type = "ref"; result->value._ref->class_name = strdup (class_str); result->value._ref->name = strdup (name_str); result->value._ref->desc = strdup (desc_str); if (class_str != empty) { free (class_str); } if (name_str != empty) { free (name_str); } if (desc_str != empty) { free (desc_str); } } else if (strcmp (cp_name, "String") == 0) { ut32 length = r_bin_java_get_utf8_len_from_bin_cp_list (BIN_OBJ, item->info.cp_string.string_idx); string_str = r_bin_java_get_utf8_from_bin_cp_list (BIN_OBJ, item->info.cp_string.string_idx); IFDBG eprintf ("java_resolve String got: (%d) %s\n", item->info.cp_string.string_idx, string_str); if (!string_str) { string_str = empty; length = strlen (empty); } result->type = "str"; result->value._str = R_NEW0 (struct java_const_value_str_t); result->value._str->len = length; if (length > 0) { result->value._str->str = r_str_ndup (string_str, length); } else { result->value._str->str = strdup (""); } if (string_str != empty) { free (string_str); } } else if (strcmp (cp_name, "Utf8") == 0) { result->type = "str"; result->value._str = R_NEW0 (struct java_const_value_str_t); result->value._str->str = malloc (item->info.cp_utf8.length); result->value._str->len = item->info.cp_utf8.length; memcpy (result->value._str->str, item->info.cp_utf8.bytes, item->info.cp_utf8.length); } else if (strcmp (cp_name, "Long") == 0) { result->type = "long"; result->value._long = r_bin_java_raw_to_long (item->info.cp_long.bytes.raw, 0); } else if (strcmp (cp_name, "Double") == 0) { result->type = "double"; result->value._double = r_bin_java_raw_to_double (item->info.cp_double.bytes.raw, 0); } else if (strcmp (cp_name, "Integer") == 0) { result->type = "int"; result->value._int = R_BIN_JAVA_UINT (item->info.cp_integer.bytes.raw, 0); } else if (strcmp (cp_name, "Float") == 0) { result->type = "float"; result->value._float = R_BIN_JAVA_FLOAT (item->info.cp_float.bytes.raw, 0); } else if (strcmp (cp_name, "NameAndType") == 0) { result->value._ref = R_NEW0 (struct java_const_value_ref_t); result->type = "ref"; name_str = r_bin_java_get_item_name_from_bin_cp_list (BIN_OBJ, item); if (!name_str) { name_str = empty; } desc_str = r_bin_java_get_item_desc_from_bin_cp_list (BIN_OBJ, item); if (!desc_str) { desc_str = empty; } result->value._ref->class_name = strdup (empty); result->value._ref->name = strdup (name_str); result->value._ref->desc = strdup (desc_str); if (name_str != empty) { free (name_str); } if (desc_str != empty) { free (desc_str); } result->value._ref->is_method = r_bin_java_does_cp_idx_ref_method (BIN_OBJ, idx); result->value._ref->is_field = r_bin_java_does_cp_idx_ref_field (BIN_OBJ, idx); } return result; } R_API void U(r_bin_java_free_const_value)(ConstJavaValue * cp_value) { char first_char = cp_value && cp_value->type ? *cp_value->type : 0, second_char = cp_value && cp_value->type ? *(cp_value->type + 1) : 0; switch (first_char) { case 'r': if (cp_value && cp_value->value._ref) { free (cp_value->value._ref->class_name); free (cp_value->value._ref->name); free (cp_value->value._ref->desc); } break; case 's': if (second_char == 't' && cp_value->value._str) { free (cp_value->value._str->str); } break; } free (cp_value); } R_API char *r_bin_java_get_field_name(RBinJavaObj *bin_obj, ut32 idx) { char *name = NULL; if (idx < r_list_length (bin_obj->fields_list)) { RBinJavaField *fm_type = r_list_get_n (bin_obj->fields_list, idx); name = strdup (fm_type->name); } return name; } R_API int r_bin_java_print_field_idx_summary(RBinJavaObj *bin_obj, ut32 idx) { int res = false; if (idx < r_list_length (bin_obj->fields_list)) { RBinJavaField *fm_type = r_list_get_n (bin_obj->fields_list, idx); r_bin_java_print_field_summary (fm_type); res = true; } return res; } R_API ut32 r_bin_java_get_field_count(RBinJavaObj *bin_obj) { return r_list_length (bin_obj->fields_list); } R_API RList *r_bin_java_get_field_num_name(RBinJavaObj *bin_obj) { ut32 i = 0; RBinJavaField *fm_type; RListIter *iter = NULL; RList *res = r_list_newf (free); r_list_foreach (bin_obj->fields_list, iter, fm_type) { ut32 len = strlen (fm_type->name) + 30; char *str = malloc (len); if (!str) { r_list_free (res); return NULL; } snprintf (str, len, "%d %s", i, fm_type->name); ++i; r_list_append (res, str); } return res; } R_API RList *r_bin_java_find_cp_const_by_val_utf8(RBinJavaObj *bin_obj, const ut8 *bytes, ut32 len) { RList *res = r_list_newf (free); ut32 *v = NULL; RListIter *iter; RBinJavaCPTypeObj *cp_obj; IFDBG eprintf ("In UTF-8 Looking for %s\n", bytes); r_list_foreach (bin_obj->cp_list, iter, cp_obj) { if (cp_obj->tag == R_BIN_JAVA_CP_UTF8) { IFDBG eprintf ("In UTF-8 Looking @ %s\n", cp_obj->info.cp_utf8.bytes); IFDBG eprintf ("UTF-8 len = %d and memcmp = %d\n", cp_obj->info.cp_utf8.length, memcmp (bytes, cp_obj->info.cp_utf8.bytes, len)); if (len == cp_obj->info.cp_utf8.length && !memcmp (bytes, cp_obj->info.cp_utf8.bytes, len)) { v = malloc (sizeof (ut32)); if (!v) { r_list_free (res); return NULL; } *v = cp_obj->metas->ord; IFDBG eprintf ("Found a match adding idx: %d\n", *v); r_list_append (res, v); } } } return res; } R_API RList *r_bin_java_find_cp_const_by_val_int(RBinJavaObj *bin_obj, const ut8 *bytes, ut32 len) { RList *res = r_list_newf (free); ut32 *v = NULL; RListIter *iter; RBinJavaCPTypeObj *cp_obj; eprintf ("Looking for 0x%08x\n", (ut32) R_BIN_JAVA_UINT (bytes, 0)); r_list_foreach (bin_obj->cp_list, iter, cp_obj) { if (cp_obj->tag == R_BIN_JAVA_CP_INTEGER) { if (len == 4 && R_BIN_JAVA_UINT (bytes, 0) == R_BIN_JAVA_UINT (cp_obj->info.cp_integer.bytes.raw, 0)) { v = malloc (sizeof (ut32)); if (!v) { r_list_free (res); return NULL; } *v = cp_obj->idx; r_list_append (res, v); } } } return res; } R_API char r_bin_java_resolve_cp_idx_tag(RBinJavaObj *BIN_OBJ, int idx) { RBinJavaCPTypeObj *item = NULL; if (BIN_OBJ && BIN_OBJ->cp_count < 1) { // r_bin_java_new_bin(BIN_OBJ); return R_BIN_JAVA_CP_UNKNOWN; } item = (RBinJavaCPTypeObj *) r_bin_java_get_item_from_bin_cp_list (BIN_OBJ, idx); if (item) { return item->tag; } return R_BIN_JAVA_CP_UNKNOWN; } R_API int U(r_bin_java_integer_cp_set)(RBinJavaObj * bin, ut16 idx, ut32 val) { RBinJavaCPTypeObj *cp_obj = r_bin_java_get_item_from_bin_cp_list (bin, idx); if (!cp_obj) { return false; } ut8 bytes[4] = { 0 }; if (cp_obj->tag != R_BIN_JAVA_CP_INTEGER && cp_obj->tag != R_BIN_JAVA_CP_FLOAT) { eprintf ("Not supporting the overwrite of CP Objects with one of a different size.\n"); return false; } r_bin_java_check_reset_cp_obj (cp_obj, R_BIN_JAVA_CP_INTEGER); cp_obj->tag = R_BIN_JAVA_CP_INTEGER; memcpy (bytes, (const char *) &val, 4); val = R_BIN_JAVA_UINT (bytes, 0); memcpy (&cp_obj->info.cp_integer.bytes.raw, (const char *) &val, 4); return true; } R_API int U(r_bin_java_float_cp_set)(RBinJavaObj * bin, ut16 idx, float val) { RBinJavaCPTypeObj *cp_obj = r_bin_java_get_item_from_bin_cp_list (bin, idx); if (!cp_obj) { return false; } ut8 bytes[4] = { 0 }; if (cp_obj->tag != R_BIN_JAVA_CP_INTEGER && cp_obj->tag != R_BIN_JAVA_CP_FLOAT) { eprintf ("Not supporting the overwrite of CP Objects with one of a different size.\n"); return false; } r_bin_java_check_reset_cp_obj (cp_obj, R_BIN_JAVA_CP_FLOAT); cp_obj->tag = R_BIN_JAVA_CP_FLOAT; memcpy (bytes, (const char *) &val, 4); float *foo = (float*) bytes; val = *foo; //(float)R_BIN_JAVA_UINT (bytes, 0); memcpy (&cp_obj->info.cp_float.bytes.raw, (const char *) &val, 4); return true; } R_API int U(r_bin_java_long_cp_set)(RBinJavaObj * bin, ut16 idx, ut64 val) { RBinJavaCPTypeObj *cp_obj = r_bin_java_get_item_from_bin_cp_list (bin, idx); if (!cp_obj) { return false; } ut8 bytes[8] = { 0 }; if (cp_obj->tag != R_BIN_JAVA_CP_LONG && cp_obj->tag != R_BIN_JAVA_CP_DOUBLE) { eprintf ("Not supporting the overwrite of CP Objects with one of a different size.\n"); return false; } r_bin_java_check_reset_cp_obj (cp_obj, R_BIN_JAVA_CP_LONG); cp_obj->tag = R_BIN_JAVA_CP_LONG; memcpy (bytes, (const char *) &val, 8); val = r_bin_java_raw_to_long (bytes, 0); memcpy (&cp_obj->info.cp_long.bytes.raw, (const char *) &val, 8); return true; } R_API int U(r_bin_java_double_cp_set)(RBinJavaObj * bin, ut16 idx, ut32 val) { RBinJavaCPTypeObj *cp_obj = r_bin_java_get_item_from_bin_cp_list (bin, idx); if (!cp_obj) { return false; } ut8 bytes[8] = { 0 }; if (cp_obj->tag != R_BIN_JAVA_CP_LONG && cp_obj->tag != R_BIN_JAVA_CP_DOUBLE) { eprintf ("Not supporting the overwrite of CP Objects with one of a different size.\n"); return false; } r_bin_java_check_reset_cp_obj (cp_obj, R_BIN_JAVA_CP_DOUBLE); cp_obj->tag = R_BIN_JAVA_CP_DOUBLE; ut64 val64 = val; memcpy (bytes, (const char *) &val64, 8); val64 = r_bin_java_raw_to_long (bytes, 0); memcpy (&cp_obj->info.cp_double.bytes.raw, (const char *) &val64, 8); return true; } R_API int U(r_bin_java_utf8_cp_set)(RBinJavaObj * bin, ut16 idx, const ut8 * buffer, ut32 len) { RBinJavaCPTypeObj *cp_obj = r_bin_java_get_item_from_bin_cp_list (bin, idx); if (!cp_obj) { return false; } eprintf ("Writing %d byte(s) (%s)\n", len, buffer); // r_bin_java_check_reset_cp_obj(cp_obj, R_BIN_JAVA_CP_INTEGER); if (cp_obj->tag != R_BIN_JAVA_CP_UTF8) { eprintf ("Not supporting the overwrite of CP Objects with one of a different size.\n"); return false; } if (cp_obj->info.cp_utf8.length != len) { eprintf ("Not supporting the resize, rewriting utf8 string up to %d byte(s).\n", cp_obj->info.cp_utf8.length); if (cp_obj->info.cp_utf8.length > len) { eprintf ("Remaining %d byte(s) will be filled with \\x00.\n", cp_obj->info.cp_utf8.length - len); } } memcpy (cp_obj->info.cp_utf8.bytes, buffer, cp_obj->info.cp_utf8.length); if (cp_obj->info.cp_utf8.length > len) { memset (cp_obj->info.cp_utf8.bytes + len, 0, cp_obj->info.cp_utf8.length - len); } return true; } R_API ut8 *r_bin_java_cp_get_bytes(ut8 tag, ut32 *out_sz, const ut8 *buf, const ut64 len) { if (!out_sz) { return NULL; } if (out_sz) { *out_sz = 0; } switch (tag) { case R_BIN_JAVA_CP_INTEGER: case R_BIN_JAVA_CP_FLOAT: return r_bin_java_cp_get_4bytes (tag, out_sz, buf, len); case R_BIN_JAVA_CP_LONG: case R_BIN_JAVA_CP_DOUBLE: return r_bin_java_cp_get_8bytes (tag, out_sz, buf, len); case R_BIN_JAVA_CP_UTF8: return r_bin_java_cp_get_utf8 (tag, out_sz, buf, len); } return NULL; } R_API ut32 r_bin_java_cp_get_size(RBinJavaObj *bin, ut16 idx) { RBinJavaCPTypeObj *cp_obj = r_bin_java_get_item_from_bin_cp_list (bin, idx); switch (cp_obj->tag) { case R_BIN_JAVA_CP_INTEGER: case R_BIN_JAVA_CP_FLOAT: return 1 + 4; case R_BIN_JAVA_CP_LONG: case R_BIN_JAVA_CP_DOUBLE: return 1 + 8; case R_BIN_JAVA_CP_UTF8: return 1 + 2 + cp_obj->info.cp_utf8.length; } return 0; } R_API ut64 r_bin_java_get_method_start(RBinJavaObj *bin, RBinJavaField *fm_type) { return r_bin_java_get_method_code_offset (fm_type) + bin->loadaddr; } R_API ut64 r_bin_java_get_method_end(RBinJavaObj *bin, RBinJavaField *fm_type) { return r_bin_java_get_method_code_offset (fm_type) + bin->loadaddr + +r_bin_java_get_method_code_size (fm_type); } R_API ut8 *U(r_bin_java_cp_append_method_ref)(RBinJavaObj * bin, ut32 * out_sz, ut16 cn_idx, ut16 fn_idx, ut16 ft_idx) { return r_bin_java_cp_get_fref_bytes (bin, out_sz, R_BIN_JAVA_CP_METHODREF, cn_idx, fn_idx, ft_idx); } R_API ut8 *U(r_bin_java_cp_append_field_ref)(RBinJavaObj * bin, ut32 * out_sz, ut16 cn_idx, ut16 fn_idx, ut16 ft_idx) { return r_bin_java_cp_get_fref_bytes (bin, out_sz, R_BIN_JAVA_CP_FIELDREF, cn_idx, fn_idx, ft_idx); } R_API char *r_bin_java_unmangle_without_flags(const char *name, const char *descriptor) { return r_bin_java_unmangle (NULL, name, descriptor); } R_API void U(r_bin_java_print_stack_map_append_frame_summary)(RBinJavaStackMapFrame * obj) { RListIter *iter, *iter_tmp; RList *ptrList; RBinJavaVerificationObj *ver_obj; printf ("Stack Map Frame Information\n"); printf (" Tag Value = 0x%02x Name: %s\n", obj->tag, ((RBinJavaStackMapFrameMetas *) obj->metas->type_info)->name); printf (" Offset: 0x%08"PFMT64x "\n", obj->file_offset); printf (" Local Variable Count = 0x%04x\n", obj->number_of_locals); printf (" Local Variables:\n"); ptrList = obj->local_items; r_list_foreach_safe (ptrList, iter, iter_tmp, ver_obj) { r_bin_java_print_verification_info_summary (ver_obj); } printf (" Stack Items Count = 0x%04x\n", obj->number_of_stack_items); printf (" Stack Items:\n"); ptrList = obj->stack_items; r_list_foreach_safe (ptrList, iter, iter_tmp, ver_obj) { r_bin_java_print_verification_info_summary (ver_obj); } } R_API void U(r_bin_java_stack_frame_default_free)(void *s) { RBinJavaStackMapFrame *stack_frame = s; if (stack_frame) { free (stack_frame->metas); free (stack_frame); } } // R_API void U(r_bin_java_stack_frame_do_nothing_free)(void /*RBinJavaStackMapFrame*/ *stack_frame) {} // R_API void U(r_bin_java_stack_frame_do_nothing_new)(RBinJavaObj * bin, RBinJavaStackMapFrame * stack_frame, ut64 offset) {} R_API RBinJavaCPTypeMetas *U(r_bin_java_get_cp_meta_from_tag)(ut8 tag) { ut16 i = 0; // set default to unknown. RBinJavaCPTypeMetas *res = &R_BIN_JAVA_CP_METAS[2]; for (i = 0; i < R_BIN_JAVA_CP_METAS_SZ; i++) { if (tag == R_BIN_JAVA_CP_METAS[i].tag) { res = &R_BIN_JAVA_CP_METAS[i]; break; } } return res; } R_API ut8 *U(r_bin_java_cp_append_ref_cname_fname_ftype)(RBinJavaObj * bin, ut32 * out_sz, ut8 tag, const char *cname, const ut32 c_len, const char *fname, const ut32 f_len, const char *tname, const ut32 t_len) { ut32 cn_len = 0, fn_len = 0, ft_len = 0, total_len; ut16 cn_idx = 0, fn_idx = 0, ft_idx = 0; ut8 *bytes = NULL, *cn_bytes = NULL, *fn_bytes = NULL, *ft_bytes = NULL, *cref_bytes = NULL, *fref_bytes = NULL, *fnt_bytes = NULL; *out_sz = 0; cn_bytes = r_bin_java_cp_get_utf8 (R_BIN_JAVA_CP_UTF8, &cn_len, (const ut8 *) cname, c_len); cn_idx = bin->cp_idx + 1; if (cn_bytes) { fn_bytes = r_bin_java_cp_get_utf8 (R_BIN_JAVA_CP_UTF8, &fn_len, (const ut8 *) fname, f_len); fn_idx = bin->cp_idx + 2; } if (fn_bytes) { ft_bytes = r_bin_java_cp_get_utf8 (R_BIN_JAVA_CP_UTF8, &ft_len, (const ut8 *) tname, t_len); ft_idx = bin->cp_idx + 3; } if (cn_bytes && fn_bytes && ft_bytes) { ut32 cref_len = 0, fnt_len = 0, fref_len = 0; ut32 cref_idx = 0, fnt_idx = 0; cref_bytes = r_bin_java_cp_get_classref (bin, &cref_len, NULL, 0, cn_idx); cref_idx = bin->cp_idx + 3; fnt_bytes = r_bin_java_cp_get_name_type (bin, &fnt_len, fn_idx, ft_idx); fnt_idx = bin->cp_idx + 4; fref_bytes = r_bin_java_cp_get_2_ut16 (bin, &fref_len, tag, cref_idx, fnt_idx); if (cref_bytes && fref_bytes && fnt_bytes) { total_len = cn_len + fn_len + ft_len + cref_len + fnt_len + fref_len + 2; if (total_len < cn_len) { goto beach; } bytes = calloc (1, total_len); // class name bytes if (*out_sz + cn_len >= total_len) { goto beach; } memcpy (bytes, cn_bytes + *out_sz, cn_len); *out_sz += cn_len; // field name bytes if (*out_sz + fn_len >= total_len) { goto beach; } memcpy (bytes, fn_bytes + *out_sz, fn_len); *out_sz += fn_len; // field type bytes if (*out_sz + ft_len >= total_len) { goto beach; } memcpy (bytes, ft_bytes + *out_sz, ft_len); *out_sz += ft_len; // class ref bytes if (*out_sz + cref_len >= total_len) { goto beach; } memcpy (bytes, cref_bytes + *out_sz, cref_len); *out_sz += fn_len; // field name and type bytes if (*out_sz + fnt_len >= total_len) { goto beach; } memcpy (bytes, fnt_bytes + *out_sz, fnt_len); *out_sz += fnt_len; // field ref bytes if (*out_sz + fref_len >= total_len) { goto beach; } memcpy (bytes, fref_bytes + *out_sz, fref_len); *out_sz += fref_len; } } beach: free (cn_bytes); free (ft_bytes); free (fn_bytes); free (fnt_bytes); free (fref_bytes); free (cref_bytes); return bytes; } R_API ut8 *U(r_bin_java_cp_get_method_ref)(RBinJavaObj * bin, ut32 * out_sz, ut16 class_idx, ut16 name_and_type_idx) { return r_bin_java_cp_get_fm_ref (bin, out_sz, R_BIN_JAVA_CP_METHODREF, class_idx, name_and_type_idx); } R_API ut8 *U(r_bin_java_cp_get_field_ref)(RBinJavaObj * bin, ut32 * out_sz, ut16 class_idx, ut16 name_and_type_idx) { return r_bin_java_cp_get_fm_ref (bin, out_sz, R_BIN_JAVA_CP_FIELDREF, class_idx, name_and_type_idx); } R_API void U(deinit_java_type_null)(void) { free (R_BIN_JAVA_NULL_TYPE.metas); } R_API RBinJavaCPTypeObj *r_bin_java_get_item_from_cp(RBinJavaObj *bin, int i) { if (i < 1 || i > bin->cf.cp_count) { return &R_BIN_JAVA_NULL_TYPE; } RBinJavaCPTypeObj *obj = (RBinJavaCPTypeObj *) r_list_get_n (bin->cp_list, i); return obj ? obj : &R_BIN_JAVA_NULL_TYPE; } R_API void U(copy_type_info_to_stack_frame_list)(RList * type_list, RList * sf_list) { RListIter *iter, *iter_tmp; RBinJavaVerificationObj *ver_obj, *new_ver_obj; if (!type_list || !sf_list) { return; } r_list_foreach_safe (type_list, iter, iter_tmp, ver_obj) { new_ver_obj = (RBinJavaVerificationObj *) malloc (sizeof (RBinJavaVerificationObj)); // FIXME: how to handle failed memory allocation? if (new_ver_obj && ver_obj) { memcpy (new_ver_obj, ver_obj, sizeof (RBinJavaVerificationObj)); if (!r_list_append (sf_list, (void *) new_ver_obj)) { R_FREE (new_ver_obj); } } else { R_FREE (new_ver_obj); } } } R_API void U(copy_type_info_to_stack_frame_list_up_to_idx)(RList * type_list, RList * sf_list, ut64 idx) { RListIter *iter, *iter_tmp; RBinJavaVerificationObj *ver_obj, *new_ver_obj; ut32 pos = 0; if (!type_list || !sf_list) { return; } r_list_foreach_safe (type_list, iter, iter_tmp, ver_obj) { new_ver_obj = (RBinJavaVerificationObj *) malloc (sizeof (RBinJavaVerificationObj)); // FIXME: how to handle failed memory allocation? if (new_ver_obj && ver_obj) { memcpy (new_ver_obj, ver_obj, sizeof (RBinJavaVerificationObj)); if (!r_list_append (sf_list, (void *) new_ver_obj)) { R_FREE (new_ver_obj); } } else { R_FREE (new_ver_obj); } pos++; if (pos == idx) { break; } } } R_API ut8 *r_bin_java_cp_get_idx_bytes(RBinJavaObj *bin, ut16 idx, ut32 *out_sz) { RBinJavaCPTypeObj *cp_obj = r_bin_java_get_item_from_bin_cp_list (bin, idx); if (!cp_obj || !out_sz) { return NULL; } if (out_sz) { *out_sz = 0; } switch (cp_obj->tag) { case R_BIN_JAVA_CP_INTEGER: case R_BIN_JAVA_CP_FLOAT: return r_bin_java_cp_get_4bytes (cp_obj->tag, out_sz, cp_obj->info.cp_integer.bytes.raw, 5); case R_BIN_JAVA_CP_LONG: case R_BIN_JAVA_CP_DOUBLE: return r_bin_java_cp_get_4bytes (cp_obj->tag, out_sz, cp_obj->info.cp_long.bytes.raw, 9); case R_BIN_JAVA_CP_UTF8: // eprintf ("Getting idx: %d = %p (3+0x%"PFMT64x")\n", idx, cp_obj, cp_obj->info.cp_utf8.length); if (cp_obj->info.cp_utf8.length > 0) { return r_bin_java_cp_get_utf8 (cp_obj->tag, out_sz, cp_obj->info.cp_utf8.bytes, cp_obj->info.cp_utf8.length); } } return NULL; } R_API int r_bin_java_valid_class(const ut8 *buf, ut64 buf_sz) { RBinJavaObj *bin = R_NEW0 (RBinJavaObj), *cur_bin = R_BIN_JAVA_GLOBAL_BIN; if (!bin) { return false; } int res = r_bin_java_load_bin (bin, buf, buf_sz); if (bin->calc_size == buf_sz) { res = true; } r_bin_java_free (bin); R_BIN_JAVA_GLOBAL_BIN = cur_bin; return res; } R_API ut64 r_bin_java_calc_class_size(ut8 *bytes, ut64 size) { RBinJavaObj *bin = R_NEW0 (RBinJavaObj); if (!bin) { return false; } RBinJavaObj *cur_bin = R_BIN_JAVA_GLOBAL_BIN; ut64 bin_size = UT64_MAX; if (bin) { if (r_bin_java_load_bin (bin, bytes, size)) { bin_size = bin->calc_size; } r_bin_java_free (bin); R_BIN_JAVA_GLOBAL_BIN = cur_bin; } return bin_size; } R_API int U(r_bin_java_get_cp_idx_with_name)(RBinJavaObj * bin_obj, const char *name, ut32 len) { RListIter *iter; RBinJavaCPTypeObj *obj; r_list_foreach (bin_obj->cp_list, iter, obj) { if (obj->tag == R_BIN_JAVA_CP_UTF8) { if (!strncmp (name, (const char *) obj->info.cp_utf8.bytes, len)) { return obj->metas->ord; } } } return 0; }
R_API RBinJavaAttrInfo *r_bin_java_source_code_file_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { if (!sz) { return NULL; } ut64 offset = 0; RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); offset += 6; if (!attr) { return NULL; } attr->type = R_BIN_JAVA_ATTR_TYPE_SOURCE_FILE_ATTR; // if (buffer + offset > buffer + sz) return NULL; attr->info.source_file_attr.sourcefile_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->size = offset; // IFDBG r_bin_java_print_source_code_file_attr_summary(attr); return attr; }
R_API RBinJavaAttrInfo *r_bin_java_source_code_file_attr_new(RBinJavaObj *bin, ut8 *buffer, ut64 sz, ut64 buf_offset) { if (!sz || sz == UT64_MAX) { return NULL; } #if 0 /// XXX this breaks tests if (sz < 8) { return NULL; } #endif ut64 offset = 0; RBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset); offset += 6; if (attr) { attr->type = R_BIN_JAVA_ATTR_TYPE_SOURCE_FILE_ATTR; attr->info.source_file_attr.sourcefile_idx = R_BIN_JAVA_USHORT (buffer, offset); offset += 2; attr->size = offset; // IFDBG r_bin_java_print_source_code_file_attr_summary(attr); } return attr; }
{'added': [(3630, '\tif (sz < 8) {'), (3631, '\t\treturn NULL;'), (3632, '\t}'), (3724, '\tif (sz < 8) {'), (3727, '\tRBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset);'), (3728, '\tif (!attr) {'), (3729, '\t\treturn NULL;'), (3731, '\toffset += 6;'), (3738, '\t\tif (offset + 8 > sz) {'), (3876, '\tif (sz < 8) {'), (3877, '\t\treturn NULL;'), (3878, '\t}'), (3947, '\tif (!bin || !buffer || sz < 8) {'), (3950, '\tRBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset);'), (4024, '\tif (sz < 8) {'), (4025, '\t\treturn NULL;'), (4026, '\t}'), (4080, '\tif (!sz || sz == UT64_MAX) {'), (4081, '\t\treturn NULL;'), (4082, '\t}'), (4083, '#if 0'), (4084, '\t/// XXX this breaks tests'), (4085, '\tif (sz < 8) {'), (4088, '#endif'), (4092, '\tif (attr) {'), (4093, '\t\tattr->type = R_BIN_JAVA_ATTR_TYPE_SOURCE_FILE_ATTR;'), (4094, '\t\tattr->info.source_file_attr.sourcefile_idx = R_BIN_JAVA_USHORT (buffer, offset);'), (4095, '\t\toffset += 2;'), (4096, '\t\tattr->size = offset;'), (4097, '\t\t// IFDBG r_bin_java_print_source_code_file_attr_summary(attr);'), (4107, '\tif (sz < 8) {'), (4108, '\t\treturn NULL;'), (4109, '\t}'), (4115, '\tattr->size = 6;'), (4146, '\tif (se) {'), (4147, '\t\tse->tag = type;'), (4148, '\t\tif (se->tag == R_BIN_JAVA_STACKMAP_OBJECT) {'), (4149, '\t\t\tse->info.obj_val_cp_idx = (ut16) value;'), (4150, '\t\t} else if (se->tag == R_BIN_JAVA_STACKMAP_UNINIT) {'), (4151, '\t\t\tse->info.uninit_offset = (ut16) value;'), (4152, '\t\t}'), (4158, '\tif (sz < 8) {'), (4159, '\t\treturn NULL;'), (4160, '\t}'), (4280, '\tif (sz < 8) {'), (4281, '\t\treturn NULL;'), (4282, '\t}'), (4586, '\tif (sz < 8) {'), (4587, '\t\treturn NULL;'), (4588, '\t}'), (4595, '\tIFDBG eprintf ("r_bin_java_stack_map_table_attr_new: New stack map allocated.\\n");'), (6370, '\tif (sz < 8) {'), (6371, '\t\treturn NULL;'), (6372, '\t}'), (6373, '\tRBinJavaAttrInfo *attr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset);'), (6455, '\tif (sz < 8) {'), (6456, '\t\treturn NULL;'), (6457, '\t}'), (6458, '\tRBinJavaAnnotation *annotation = R_NEW0 (RBinJavaAnnotation);'), (6530, '\tut64 sz = 2;'), (6531, '\tif (evp && evp->value) {'), (6532, '\t\t// evp->element_name_idx = r_bin_java_read_short(bin, bin->b->cur);'), (6533, '\t\t// evp->value = r_bin_java_element_value_new (bin, offset+2);'), (6612, '\tif (sz < 8) {'), (6613, '\t\treturn NULL;'), (6614, '\t}'), (7030, '\tif (sz < 8) {')], 'deleted': [(3718, '\tRBinJavaAttrInfo *attr = NULL;'), (3722, '\tattr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset);'), (3723, '\toffset += 6;'), (3724, '\tif (buf_offset + offset + 8 > sz) {'), (3725, '\t\teprintf ("Invalid amount of inner classes\\n");'), (3728, '\tif (attr == NULL) {'), (3729, '\t\t// TODO eprintf'), (3730, '\t\treturn attr;'), (3738, '\t\tif (buf_offset + offset + 8 > sz) {'), (3943, '\tRBinJavaAttrInfo *attr;'), (3945, '\tif (!buffer || sz < 1) {'), (3948, '\tattr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset);'), (4075, '\tif (!sz) {'), (4081, '\tif (!attr) {'), (4082, '\t\treturn NULL;'), (4084, '\tattr->type = R_BIN_JAVA_ATTR_TYPE_SOURCE_FILE_ATTR;'), (4085, '\t// if (buffer + offset > buffer + sz) return NULL;'), (4086, '\tattr->info.source_file_attr.sourcefile_idx = R_BIN_JAVA_USHORT (buffer, offset);'), (4087, '\toffset += 2;'), (4088, '\tattr->size = offset;'), (4089, '\t// IFDBG r_bin_java_print_source_code_file_attr_summary(attr);'), (4098, '\tut64 offset = 0;'), (4103, '\toffset += 6;'), (4105, '\tattr->size = offset;'), (4136, '\tif (!se) {'), (4137, '\t\treturn NULL;'), (4138, '\t}'), (4139, '\tse->tag = type;'), (4140, '\tif (se->tag == R_BIN_JAVA_STACKMAP_OBJECT) {'), (4141, '\t\tse->info.obj_val_cp_idx = (ut16) value;'), (4142, '\t} else if (se->tag == R_BIN_JAVA_STACKMAP_UNINIT) {'), (4143, '\t\t/*if (bin->offset_sz == 4) {'), (4144, '\t\tse->info.uninit_offset = value;'), (4145, '\t\t} else {'), (4146, '\t\tse->info.uninit_offset = (ut16) value;'), (4147, '\t\t}*/'), (4148, '\t\tse->info.uninit_offset = (ut16) value;'), (4582, '\tIFDBG eprintf("r_bin_java_stack_map_table_attr_new: New stack map allocated.\\n");'), (6357, '\tRBinJavaAttrInfo *attr = NULL;'), (6358, '\tattr = r_bin_java_default_attr_new (bin, buffer, sz, buf_offset);'), (6438, '\tRBinJavaAnnotation *annotation = NULL;'), (6441, '\tannotation = R_NEW0 (RBinJavaAnnotation);'), (6513, '\tut64 sz = 0;'), (6514, '\tif (evp == NULL) {'), (6515, '\t\treturn sz;'), (6516, '\t}'), (6517, '\t// evp->element_name_idx = r_bin_java_read_short(bin, bin->b->cur);'), (6518, '\tsz += 2;'), (6519, '\t// evp->value = r_bin_java_element_value_new (bin, offset+2);'), (6520, '\tif (evp->value) {'), (7014, '\tif (buf_offset + 8 > sz) {')]}
67
51
7,961
56,436
16
96
3
https://github.com/radareorg/radare2
CVE-2022-0519
CWE-119
1,114
hevcdec.c
C
decode_nal_unit
/* * HEVC video Decoder * * Copyright (C) 2012 - 2013 Guillaume Martres * Copyright (C) 2012 - 2013 Mickael Raulet * Copyright (C) 2012 - 2013 Gildas Cocherel * Copyright (C) 2012 - 2013 Wassim Hamidouche * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/attributes.h" #include "libavutil/common.h" #include "libavutil/display.h" #include "libavutil/internal.h" #include "libavutil/mastering_display_metadata.h" #include "libavutil/md5.h" #include "libavutil/opt.h" #include "libavutil/pixdesc.h" #include "libavutil/stereo3d.h" #include "bswapdsp.h" #include "bytestream.h" #include "cabac_functions.h" #include "golomb.h" #include "hevc.h" #include "hevc_data.h" #include "hevc_parse.h" #include "hevcdec.h" #include "hwaccel.h" #include "profiles.h" const uint8_t ff_hevc_pel_weight[65] = { [2] = 0, [4] = 1, [6] = 2, [8] = 3, [12] = 4, [16] = 5, [24] = 6, [32] = 7, [48] = 8, [64] = 9 }; /** * NOTE: Each function hls_foo correspond to the function foo in the * specification (HLS stands for High Level Syntax). */ /** * Section 5.7 */ /* free everything allocated by pic_arrays_init() */ static void pic_arrays_free(HEVCContext *s) { av_freep(&s->sao); av_freep(&s->deblock); av_freep(&s->skip_flag); av_freep(&s->tab_ct_depth); av_freep(&s->tab_ipm); av_freep(&s->cbf_luma); av_freep(&s->is_pcm); av_freep(&s->qp_y_tab); av_freep(&s->tab_slice_address); av_freep(&s->filter_slice_edges); av_freep(&s->horizontal_bs); av_freep(&s->vertical_bs); av_freep(&s->sh.entry_point_offset); av_freep(&s->sh.size); av_freep(&s->sh.offset); av_buffer_pool_uninit(&s->tab_mvf_pool); av_buffer_pool_uninit(&s->rpl_tab_pool); } /* allocate arrays that depend on frame dimensions */ static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps) { int log2_min_cb_size = sps->log2_min_cb_size; int width = sps->width; int height = sps->height; int pic_size_in_ctb = ((width >> log2_min_cb_size) + 1) * ((height >> log2_min_cb_size) + 1); int ctb_count = sps->ctb_width * sps->ctb_height; int min_pu_size = sps->min_pu_width * sps->min_pu_height; s->bs_width = (width >> 2) + 1; s->bs_height = (height >> 2) + 1; s->sao = av_mallocz_array(ctb_count, sizeof(*s->sao)); s->deblock = av_mallocz_array(ctb_count, sizeof(*s->deblock)); if (!s->sao || !s->deblock) goto fail; s->skip_flag = av_malloc_array(sps->min_cb_height, sps->min_cb_width); s->tab_ct_depth = av_malloc_array(sps->min_cb_height, sps->min_cb_width); if (!s->skip_flag || !s->tab_ct_depth) goto fail; s->cbf_luma = av_malloc_array(sps->min_tb_width, sps->min_tb_height); s->tab_ipm = av_mallocz(min_pu_size); s->is_pcm = av_malloc_array(sps->min_pu_width + 1, sps->min_pu_height + 1); if (!s->tab_ipm || !s->cbf_luma || !s->is_pcm) goto fail; s->filter_slice_edges = av_mallocz(ctb_count); s->tab_slice_address = av_malloc_array(pic_size_in_ctb, sizeof(*s->tab_slice_address)); s->qp_y_tab = av_malloc_array(pic_size_in_ctb, sizeof(*s->qp_y_tab)); if (!s->qp_y_tab || !s->filter_slice_edges || !s->tab_slice_address) goto fail; s->horizontal_bs = av_mallocz_array(s->bs_width, s->bs_height); s->vertical_bs = av_mallocz_array(s->bs_width, s->bs_height); if (!s->horizontal_bs || !s->vertical_bs) goto fail; s->tab_mvf_pool = av_buffer_pool_init(min_pu_size * sizeof(MvField), av_buffer_allocz); s->rpl_tab_pool = av_buffer_pool_init(ctb_count * sizeof(RefPicListTab), av_buffer_allocz); if (!s->tab_mvf_pool || !s->rpl_tab_pool) goto fail; return 0; fail: pic_arrays_free(s); return AVERROR(ENOMEM); } static int pred_weight_table(HEVCContext *s, GetBitContext *gb) { int i = 0; int j = 0; uint8_t luma_weight_l0_flag[16]; uint8_t chroma_weight_l0_flag[16]; uint8_t luma_weight_l1_flag[16]; uint8_t chroma_weight_l1_flag[16]; int luma_log2_weight_denom; luma_log2_weight_denom = get_ue_golomb_long(gb); if (luma_log2_weight_denom < 0 || luma_log2_weight_denom > 7) { av_log(s->avctx, AV_LOG_ERROR, "luma_log2_weight_denom %d is invalid\n", luma_log2_weight_denom); return AVERROR_INVALIDDATA; } s->sh.luma_log2_weight_denom = av_clip_uintp2(luma_log2_weight_denom, 3); if (s->ps.sps->chroma_format_idc != 0) { int64_t chroma_log2_weight_denom = luma_log2_weight_denom + (int64_t)get_se_golomb(gb); if (chroma_log2_weight_denom < 0 || chroma_log2_weight_denom > 7) { av_log(s->avctx, AV_LOG_ERROR, "chroma_log2_weight_denom %"PRId64" is invalid\n", chroma_log2_weight_denom); return AVERROR_INVALIDDATA; } s->sh.chroma_log2_weight_denom = chroma_log2_weight_denom; } for (i = 0; i < s->sh.nb_refs[L0]; i++) { luma_weight_l0_flag[i] = get_bits1(gb); if (!luma_weight_l0_flag[i]) { s->sh.luma_weight_l0[i] = 1 << s->sh.luma_log2_weight_denom; s->sh.luma_offset_l0[i] = 0; } } if (s->ps.sps->chroma_format_idc != 0) { for (i = 0; i < s->sh.nb_refs[L0]; i++) chroma_weight_l0_flag[i] = get_bits1(gb); } else { for (i = 0; i < s->sh.nb_refs[L0]; i++) chroma_weight_l0_flag[i] = 0; } for (i = 0; i < s->sh.nb_refs[L0]; i++) { if (luma_weight_l0_flag[i]) { int delta_luma_weight_l0 = get_se_golomb(gb); s->sh.luma_weight_l0[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l0; s->sh.luma_offset_l0[i] = get_se_golomb(gb); } if (chroma_weight_l0_flag[i]) { for (j = 0; j < 2; j++) { int delta_chroma_weight_l0 = get_se_golomb(gb); int delta_chroma_offset_l0 = get_se_golomb(gb); if ( (int8_t)delta_chroma_weight_l0 != delta_chroma_weight_l0 || delta_chroma_offset_l0 < -(1<<17) || delta_chroma_offset_l0 > (1<<17)) { return AVERROR_INVALIDDATA; } s->sh.chroma_weight_l0[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l0; s->sh.chroma_offset_l0[i][j] = av_clip((delta_chroma_offset_l0 - ((128 * s->sh.chroma_weight_l0[i][j]) >> s->sh.chroma_log2_weight_denom) + 128), -128, 127); } } else { s->sh.chroma_weight_l0[i][0] = 1 << s->sh.chroma_log2_weight_denom; s->sh.chroma_offset_l0[i][0] = 0; s->sh.chroma_weight_l0[i][1] = 1 << s->sh.chroma_log2_weight_denom; s->sh.chroma_offset_l0[i][1] = 0; } } if (s->sh.slice_type == HEVC_SLICE_B) { for (i = 0; i < s->sh.nb_refs[L1]; i++) { luma_weight_l1_flag[i] = get_bits1(gb); if (!luma_weight_l1_flag[i]) { s->sh.luma_weight_l1[i] = 1 << s->sh.luma_log2_weight_denom; s->sh.luma_offset_l1[i] = 0; } } if (s->ps.sps->chroma_format_idc != 0) { for (i = 0; i < s->sh.nb_refs[L1]; i++) chroma_weight_l1_flag[i] = get_bits1(gb); } else { for (i = 0; i < s->sh.nb_refs[L1]; i++) chroma_weight_l1_flag[i] = 0; } for (i = 0; i < s->sh.nb_refs[L1]; i++) { if (luma_weight_l1_flag[i]) { int delta_luma_weight_l1 = get_se_golomb(gb); s->sh.luma_weight_l1[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l1; s->sh.luma_offset_l1[i] = get_se_golomb(gb); } if (chroma_weight_l1_flag[i]) { for (j = 0; j < 2; j++) { int delta_chroma_weight_l1 = get_se_golomb(gb); int delta_chroma_offset_l1 = get_se_golomb(gb); if ( (int8_t)delta_chroma_weight_l1 != delta_chroma_weight_l1 || delta_chroma_offset_l1 < -(1<<17) || delta_chroma_offset_l1 > (1<<17)) { return AVERROR_INVALIDDATA; } s->sh.chroma_weight_l1[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l1; s->sh.chroma_offset_l1[i][j] = av_clip((delta_chroma_offset_l1 - ((128 * s->sh.chroma_weight_l1[i][j]) >> s->sh.chroma_log2_weight_denom) + 128), -128, 127); } } else { s->sh.chroma_weight_l1[i][0] = 1 << s->sh.chroma_log2_weight_denom; s->sh.chroma_offset_l1[i][0] = 0; s->sh.chroma_weight_l1[i][1] = 1 << s->sh.chroma_log2_weight_denom; s->sh.chroma_offset_l1[i][1] = 0; } } } return 0; } static int decode_lt_rps(HEVCContext *s, LongTermRPS *rps, GetBitContext *gb) { const HEVCSPS *sps = s->ps.sps; int max_poc_lsb = 1 << sps->log2_max_poc_lsb; int prev_delta_msb = 0; unsigned int nb_sps = 0, nb_sh; int i; rps->nb_refs = 0; if (!sps->long_term_ref_pics_present_flag) return 0; if (sps->num_long_term_ref_pics_sps > 0) nb_sps = get_ue_golomb_long(gb); nb_sh = get_ue_golomb_long(gb); if (nb_sps > sps->num_long_term_ref_pics_sps) return AVERROR_INVALIDDATA; if (nb_sh + (uint64_t)nb_sps > FF_ARRAY_ELEMS(rps->poc)) return AVERROR_INVALIDDATA; rps->nb_refs = nb_sh + nb_sps; for (i = 0; i < rps->nb_refs; i++) { uint8_t delta_poc_msb_present; if (i < nb_sps) { uint8_t lt_idx_sps = 0; if (sps->num_long_term_ref_pics_sps > 1) lt_idx_sps = get_bits(gb, av_ceil_log2(sps->num_long_term_ref_pics_sps)); rps->poc[i] = sps->lt_ref_pic_poc_lsb_sps[lt_idx_sps]; rps->used[i] = sps->used_by_curr_pic_lt_sps_flag[lt_idx_sps]; } else { rps->poc[i] = get_bits(gb, sps->log2_max_poc_lsb); rps->used[i] = get_bits1(gb); } delta_poc_msb_present = get_bits1(gb); if (delta_poc_msb_present) { int64_t delta = get_ue_golomb_long(gb); int64_t poc; if (i && i != nb_sps) delta += prev_delta_msb; poc = rps->poc[i] + s->poc - delta * max_poc_lsb - s->sh.pic_order_cnt_lsb; if (poc != (int32_t)poc) return AVERROR_INVALIDDATA; rps->poc[i] = poc; prev_delta_msb = delta; } } return 0; } static void export_stream_params(AVCodecContext *avctx, const HEVCParamSets *ps, const HEVCSPS *sps) { const HEVCVPS *vps = (const HEVCVPS*)ps->vps_list[sps->vps_id]->data; const HEVCWindow *ow = &sps->output_window; unsigned int num = 0, den = 0; avctx->pix_fmt = sps->pix_fmt; avctx->coded_width = sps->width; avctx->coded_height = sps->height; avctx->width = sps->width - ow->left_offset - ow->right_offset; avctx->height = sps->height - ow->top_offset - ow->bottom_offset; avctx->has_b_frames = sps->temporal_layer[sps->max_sub_layers - 1].num_reorder_pics; avctx->profile = sps->ptl.general_ptl.profile_idc; avctx->level = sps->ptl.general_ptl.level_idc; ff_set_sar(avctx, sps->vui.sar); if (sps->vui.video_signal_type_present_flag) avctx->color_range = sps->vui.video_full_range_flag ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG; else avctx->color_range = AVCOL_RANGE_MPEG; if (sps->vui.colour_description_present_flag) { avctx->color_primaries = sps->vui.colour_primaries; avctx->color_trc = sps->vui.transfer_characteristic; avctx->colorspace = sps->vui.matrix_coeffs; } else { avctx->color_primaries = AVCOL_PRI_UNSPECIFIED; avctx->color_trc = AVCOL_TRC_UNSPECIFIED; avctx->colorspace = AVCOL_SPC_UNSPECIFIED; } if (vps->vps_timing_info_present_flag) { num = vps->vps_num_units_in_tick; den = vps->vps_time_scale; } else if (sps->vui.vui_timing_info_present_flag) { num = sps->vui.vui_num_units_in_tick; den = sps->vui.vui_time_scale; } if (num != 0 && den != 0) av_reduce(&avctx->framerate.den, &avctx->framerate.num, num, den, 1 << 30); } static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps) { #define HWACCEL_MAX (CONFIG_HEVC_DXVA2_HWACCEL + \ CONFIG_HEVC_D3D11VA_HWACCEL * 2 + \ CONFIG_HEVC_NVDEC_HWACCEL + \ CONFIG_HEVC_VAAPI_HWACCEL + \ CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL + \ CONFIG_HEVC_VDPAU_HWACCEL) enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts; switch (sps->pix_fmt) { case AV_PIX_FMT_YUV420P: case AV_PIX_FMT_YUVJ420P: #if CONFIG_HEVC_DXVA2_HWACCEL *fmt++ = AV_PIX_FMT_DXVA2_VLD; #endif #if CONFIG_HEVC_D3D11VA_HWACCEL *fmt++ = AV_PIX_FMT_D3D11VA_VLD; *fmt++ = AV_PIX_FMT_D3D11; #endif #if CONFIG_HEVC_VAAPI_HWACCEL *fmt++ = AV_PIX_FMT_VAAPI; #endif #if CONFIG_HEVC_VDPAU_HWACCEL *fmt++ = AV_PIX_FMT_VDPAU; #endif #if CONFIG_HEVC_NVDEC_HWACCEL *fmt++ = AV_PIX_FMT_CUDA; #endif #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX; #endif break; case AV_PIX_FMT_YUV420P10: #if CONFIG_HEVC_DXVA2_HWACCEL *fmt++ = AV_PIX_FMT_DXVA2_VLD; #endif #if CONFIG_HEVC_D3D11VA_HWACCEL *fmt++ = AV_PIX_FMT_D3D11VA_VLD; *fmt++ = AV_PIX_FMT_D3D11; #endif #if CONFIG_HEVC_VAAPI_HWACCEL *fmt++ = AV_PIX_FMT_VAAPI; #endif #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX; #endif #if CONFIG_HEVC_NVDEC_HWACCEL *fmt++ = AV_PIX_FMT_CUDA; #endif break; case AV_PIX_FMT_YUV420P12: case AV_PIX_FMT_YUV444P: case AV_PIX_FMT_YUV444P10: case AV_PIX_FMT_YUV444P12: #if CONFIG_HEVC_NVDEC_HWACCEL *fmt++ = AV_PIX_FMT_CUDA; #endif break; } *fmt++ = sps->pix_fmt; *fmt = AV_PIX_FMT_NONE; return ff_thread_get_format(s->avctx, pix_fmts); } static int set_sps(HEVCContext *s, const HEVCSPS *sps, enum AVPixelFormat pix_fmt) { int ret, i; pic_arrays_free(s); s->ps.sps = NULL; s->ps.vps = NULL; if (!sps) return 0; ret = pic_arrays_init(s, sps); if (ret < 0) goto fail; export_stream_params(s->avctx, &s->ps, sps); s->avctx->pix_fmt = pix_fmt; ff_hevc_pred_init(&s->hpc, sps->bit_depth); ff_hevc_dsp_init (&s->hevcdsp, sps->bit_depth); ff_videodsp_init (&s->vdsp, sps->bit_depth); for (i = 0; i < 3; i++) { av_freep(&s->sao_pixel_buffer_h[i]); av_freep(&s->sao_pixel_buffer_v[i]); } if (sps->sao_enabled && !s->avctx->hwaccel) { int c_count = (sps->chroma_format_idc != 0) ? 3 : 1; int c_idx; for(c_idx = 0; c_idx < c_count; c_idx++) { int w = sps->width >> sps->hshift[c_idx]; int h = sps->height >> sps->vshift[c_idx]; s->sao_pixel_buffer_h[c_idx] = av_malloc((w * 2 * sps->ctb_height) << sps->pixel_shift); s->sao_pixel_buffer_v[c_idx] = av_malloc((h * 2 * sps->ctb_width) << sps->pixel_shift); } } s->ps.sps = sps; s->ps.vps = (HEVCVPS*) s->ps.vps_list[s->ps.sps->vps_id]->data; return 0; fail: pic_arrays_free(s); s->ps.sps = NULL; return ret; } static int hls_slice_header(HEVCContext *s) { GetBitContext *gb = &s->HEVClc->gb; SliceHeader *sh = &s->sh; int i, ret; // Coded parameters sh->first_slice_in_pic_flag = get_bits1(gb); if ((IS_IDR(s) || IS_BLA(s)) && sh->first_slice_in_pic_flag) { s->seq_decode = (s->seq_decode + 1) & 0xff; s->max_ra = INT_MAX; if (IS_IDR(s)) ff_hevc_clear_refs(s); } sh->no_output_of_prior_pics_flag = 0; if (IS_IRAP(s)) sh->no_output_of_prior_pics_flag = get_bits1(gb); sh->pps_id = get_ue_golomb_long(gb); if (sh->pps_id >= HEVC_MAX_PPS_COUNT || !s->ps.pps_list[sh->pps_id]) { av_log(s->avctx, AV_LOG_ERROR, "PPS id out of range: %d\n", sh->pps_id); return AVERROR_INVALIDDATA; } if (!sh->first_slice_in_pic_flag && s->ps.pps != (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data) { av_log(s->avctx, AV_LOG_ERROR, "PPS changed between slices.\n"); return AVERROR_INVALIDDATA; } s->ps.pps = (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data; if (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos == 1) sh->no_output_of_prior_pics_flag = 1; if (s->ps.sps != (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data) { const HEVCSPS *sps = (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data; const HEVCSPS *last_sps = s->ps.sps; enum AVPixelFormat pix_fmt; if (last_sps && IS_IRAP(s) && s->nal_unit_type != HEVC_NAL_CRA_NUT) { if (sps->width != last_sps->width || sps->height != last_sps->height || sps->temporal_layer[sps->max_sub_layers - 1].max_dec_pic_buffering != last_sps->temporal_layer[last_sps->max_sub_layers - 1].max_dec_pic_buffering) sh->no_output_of_prior_pics_flag = 0; } ff_hevc_clear_refs(s); ret = set_sps(s, sps, sps->pix_fmt); if (ret < 0) return ret; pix_fmt = get_format(s, sps); if (pix_fmt < 0) return pix_fmt; s->avctx->pix_fmt = pix_fmt; s->seq_decode = (s->seq_decode + 1) & 0xff; s->max_ra = INT_MAX; } sh->dependent_slice_segment_flag = 0; if (!sh->first_slice_in_pic_flag) { int slice_address_length; if (s->ps.pps->dependent_slice_segments_enabled_flag) sh->dependent_slice_segment_flag = get_bits1(gb); slice_address_length = av_ceil_log2(s->ps.sps->ctb_width * s->ps.sps->ctb_height); sh->slice_segment_addr = get_bitsz(gb, slice_address_length); if (sh->slice_segment_addr >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) { av_log(s->avctx, AV_LOG_ERROR, "Invalid slice segment address: %u.\n", sh->slice_segment_addr); return AVERROR_INVALIDDATA; } if (!sh->dependent_slice_segment_flag) { sh->slice_addr = sh->slice_segment_addr; s->slice_idx++; } } else { sh->slice_segment_addr = sh->slice_addr = 0; s->slice_idx = 0; s->slice_initialized = 0; } if (!sh->dependent_slice_segment_flag) { s->slice_initialized = 0; for (i = 0; i < s->ps.pps->num_extra_slice_header_bits; i++) skip_bits(gb, 1); // slice_reserved_undetermined_flag[] sh->slice_type = get_ue_golomb_long(gb); if (!(sh->slice_type == HEVC_SLICE_I || sh->slice_type == HEVC_SLICE_P || sh->slice_type == HEVC_SLICE_B)) { av_log(s->avctx, AV_LOG_ERROR, "Unknown slice type: %d.\n", sh->slice_type); return AVERROR_INVALIDDATA; } if (IS_IRAP(s) && sh->slice_type != HEVC_SLICE_I) { av_log(s->avctx, AV_LOG_ERROR, "Inter slices in an IRAP frame.\n"); return AVERROR_INVALIDDATA; } // when flag is not present, picture is inferred to be output sh->pic_output_flag = 1; if (s->ps.pps->output_flag_present_flag) sh->pic_output_flag = get_bits1(gb); if (s->ps.sps->separate_colour_plane_flag) sh->colour_plane_id = get_bits(gb, 2); if (!IS_IDR(s)) { int poc, pos; sh->pic_order_cnt_lsb = get_bits(gb, s->ps.sps->log2_max_poc_lsb); poc = ff_hevc_compute_poc(s->ps.sps, s->pocTid0, sh->pic_order_cnt_lsb, s->nal_unit_type); if (!sh->first_slice_in_pic_flag && poc != s->poc) { av_log(s->avctx, AV_LOG_WARNING, "Ignoring POC change between slices: %d -> %d\n", s->poc, poc); if (s->avctx->err_recognition & AV_EF_EXPLODE) return AVERROR_INVALIDDATA; poc = s->poc; } s->poc = poc; sh->short_term_ref_pic_set_sps_flag = get_bits1(gb); pos = get_bits_left(gb); if (!sh->short_term_ref_pic_set_sps_flag) { ret = ff_hevc_decode_short_term_rps(gb, s->avctx, &sh->slice_rps, s->ps.sps, 1); if (ret < 0) return ret; sh->short_term_rps = &sh->slice_rps; } else { int numbits, rps_idx; if (!s->ps.sps->nb_st_rps) { av_log(s->avctx, AV_LOG_ERROR, "No ref lists in the SPS.\n"); return AVERROR_INVALIDDATA; } numbits = av_ceil_log2(s->ps.sps->nb_st_rps); rps_idx = numbits > 0 ? get_bits(gb, numbits) : 0; sh->short_term_rps = &s->ps.sps->st_rps[rps_idx]; } sh->short_term_ref_pic_set_size = pos - get_bits_left(gb); pos = get_bits_left(gb); ret = decode_lt_rps(s, &sh->long_term_rps, gb); if (ret < 0) { av_log(s->avctx, AV_LOG_WARNING, "Invalid long term RPS.\n"); if (s->avctx->err_recognition & AV_EF_EXPLODE) return AVERROR_INVALIDDATA; } sh->long_term_ref_pic_set_size = pos - get_bits_left(gb); if (s->ps.sps->sps_temporal_mvp_enabled_flag) sh->slice_temporal_mvp_enabled_flag = get_bits1(gb); else sh->slice_temporal_mvp_enabled_flag = 0; } else { s->sh.short_term_rps = NULL; s->poc = 0; } /* 8.3.1 */ if (sh->first_slice_in_pic_flag && s->temporal_id == 0 && s->nal_unit_type != HEVC_NAL_TRAIL_N && s->nal_unit_type != HEVC_NAL_TSA_N && s->nal_unit_type != HEVC_NAL_STSA_N && s->nal_unit_type != HEVC_NAL_RADL_N && s->nal_unit_type != HEVC_NAL_RADL_R && s->nal_unit_type != HEVC_NAL_RASL_N && s->nal_unit_type != HEVC_NAL_RASL_R) s->pocTid0 = s->poc; if (s->ps.sps->sao_enabled) { sh->slice_sample_adaptive_offset_flag[0] = get_bits1(gb); if (s->ps.sps->chroma_format_idc) { sh->slice_sample_adaptive_offset_flag[1] = sh->slice_sample_adaptive_offset_flag[2] = get_bits1(gb); } } else { sh->slice_sample_adaptive_offset_flag[0] = 0; sh->slice_sample_adaptive_offset_flag[1] = 0; sh->slice_sample_adaptive_offset_flag[2] = 0; } sh->nb_refs[L0] = sh->nb_refs[L1] = 0; if (sh->slice_type == HEVC_SLICE_P || sh->slice_type == HEVC_SLICE_B) { int nb_refs; sh->nb_refs[L0] = s->ps.pps->num_ref_idx_l0_default_active; if (sh->slice_type == HEVC_SLICE_B) sh->nb_refs[L1] = s->ps.pps->num_ref_idx_l1_default_active; if (get_bits1(gb)) { // num_ref_idx_active_override_flag sh->nb_refs[L0] = get_ue_golomb_long(gb) + 1; if (sh->slice_type == HEVC_SLICE_B) sh->nb_refs[L1] = get_ue_golomb_long(gb) + 1; } if (sh->nb_refs[L0] > HEVC_MAX_REFS || sh->nb_refs[L1] > HEVC_MAX_REFS) { av_log(s->avctx, AV_LOG_ERROR, "Too many refs: %d/%d.\n", sh->nb_refs[L0], sh->nb_refs[L1]); return AVERROR_INVALIDDATA; } sh->rpl_modification_flag[0] = 0; sh->rpl_modification_flag[1] = 0; nb_refs = ff_hevc_frame_nb_refs(s); if (!nb_refs) { av_log(s->avctx, AV_LOG_ERROR, "Zero refs for a frame with P or B slices.\n"); return AVERROR_INVALIDDATA; } if (s->ps.pps->lists_modification_present_flag && nb_refs > 1) { sh->rpl_modification_flag[0] = get_bits1(gb); if (sh->rpl_modification_flag[0]) { for (i = 0; i < sh->nb_refs[L0]; i++) sh->list_entry_lx[0][i] = get_bits(gb, av_ceil_log2(nb_refs)); } if (sh->slice_type == HEVC_SLICE_B) { sh->rpl_modification_flag[1] = get_bits1(gb); if (sh->rpl_modification_flag[1] == 1) for (i = 0; i < sh->nb_refs[L1]; i++) sh->list_entry_lx[1][i] = get_bits(gb, av_ceil_log2(nb_refs)); } } if (sh->slice_type == HEVC_SLICE_B) sh->mvd_l1_zero_flag = get_bits1(gb); if (s->ps.pps->cabac_init_present_flag) sh->cabac_init_flag = get_bits1(gb); else sh->cabac_init_flag = 0; sh->collocated_ref_idx = 0; if (sh->slice_temporal_mvp_enabled_flag) { sh->collocated_list = L0; if (sh->slice_type == HEVC_SLICE_B) sh->collocated_list = !get_bits1(gb); if (sh->nb_refs[sh->collocated_list] > 1) { sh->collocated_ref_idx = get_ue_golomb_long(gb); if (sh->collocated_ref_idx >= sh->nb_refs[sh->collocated_list]) { av_log(s->avctx, AV_LOG_ERROR, "Invalid collocated_ref_idx: %d.\n", sh->collocated_ref_idx); return AVERROR_INVALIDDATA; } } } if ((s->ps.pps->weighted_pred_flag && sh->slice_type == HEVC_SLICE_P) || (s->ps.pps->weighted_bipred_flag && sh->slice_type == HEVC_SLICE_B)) { int ret = pred_weight_table(s, gb); if (ret < 0) return ret; } sh->max_num_merge_cand = 5 - get_ue_golomb_long(gb); if (sh->max_num_merge_cand < 1 || sh->max_num_merge_cand > 5) { av_log(s->avctx, AV_LOG_ERROR, "Invalid number of merging MVP candidates: %d.\n", sh->max_num_merge_cand); return AVERROR_INVALIDDATA; } } sh->slice_qp_delta = get_se_golomb(gb); if (s->ps.pps->pic_slice_level_chroma_qp_offsets_present_flag) { sh->slice_cb_qp_offset = get_se_golomb(gb); sh->slice_cr_qp_offset = get_se_golomb(gb); } else { sh->slice_cb_qp_offset = 0; sh->slice_cr_qp_offset = 0; } if (s->ps.pps->chroma_qp_offset_list_enabled_flag) sh->cu_chroma_qp_offset_enabled_flag = get_bits1(gb); else sh->cu_chroma_qp_offset_enabled_flag = 0; if (s->ps.pps->deblocking_filter_control_present_flag) { int deblocking_filter_override_flag = 0; if (s->ps.pps->deblocking_filter_override_enabled_flag) deblocking_filter_override_flag = get_bits1(gb); if (deblocking_filter_override_flag) { sh->disable_deblocking_filter_flag = get_bits1(gb); if (!sh->disable_deblocking_filter_flag) { int beta_offset_div2 = get_se_golomb(gb); int tc_offset_div2 = get_se_golomb(gb) ; if (beta_offset_div2 < -6 || beta_offset_div2 > 6 || tc_offset_div2 < -6 || tc_offset_div2 > 6) { av_log(s->avctx, AV_LOG_ERROR, "Invalid deblock filter offsets: %d, %d\n", beta_offset_div2, tc_offset_div2); return AVERROR_INVALIDDATA; } sh->beta_offset = beta_offset_div2 * 2; sh->tc_offset = tc_offset_div2 * 2; } } else { sh->disable_deblocking_filter_flag = s->ps.pps->disable_dbf; sh->beta_offset = s->ps.pps->beta_offset; sh->tc_offset = s->ps.pps->tc_offset; } } else { sh->disable_deblocking_filter_flag = 0; sh->beta_offset = 0; sh->tc_offset = 0; } if (s->ps.pps->seq_loop_filter_across_slices_enabled_flag && (sh->slice_sample_adaptive_offset_flag[0] || sh->slice_sample_adaptive_offset_flag[1] || !sh->disable_deblocking_filter_flag)) { sh->slice_loop_filter_across_slices_enabled_flag = get_bits1(gb); } else { sh->slice_loop_filter_across_slices_enabled_flag = s->ps.pps->seq_loop_filter_across_slices_enabled_flag; } } else if (!s->slice_initialized) { av_log(s->avctx, AV_LOG_ERROR, "Independent slice segment missing.\n"); return AVERROR_INVALIDDATA; } sh->num_entry_point_offsets = 0; if (s->ps.pps->tiles_enabled_flag || s->ps.pps->entropy_coding_sync_enabled_flag) { unsigned num_entry_point_offsets = get_ue_golomb_long(gb); // It would be possible to bound this tighter but this here is simpler if (num_entry_point_offsets > get_bits_left(gb)) { av_log(s->avctx, AV_LOG_ERROR, "num_entry_point_offsets %d is invalid\n", num_entry_point_offsets); return AVERROR_INVALIDDATA; } sh->num_entry_point_offsets = num_entry_point_offsets; if (sh->num_entry_point_offsets > 0) { int offset_len = get_ue_golomb_long(gb) + 1; if (offset_len < 1 || offset_len > 32) { sh->num_entry_point_offsets = 0; av_log(s->avctx, AV_LOG_ERROR, "offset_len %d is invalid\n", offset_len); return AVERROR_INVALIDDATA; } av_freep(&sh->entry_point_offset); av_freep(&sh->offset); av_freep(&sh->size); sh->entry_point_offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(unsigned)); sh->offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(int)); sh->size = av_malloc_array(sh->num_entry_point_offsets, sizeof(int)); if (!sh->entry_point_offset || !sh->offset || !sh->size) { sh->num_entry_point_offsets = 0; av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate memory\n"); return AVERROR(ENOMEM); } for (i = 0; i < sh->num_entry_point_offsets; i++) { unsigned val = get_bits_long(gb, offset_len); sh->entry_point_offset[i] = val + 1; // +1; // +1 to get the size } if (s->threads_number > 1 && (s->ps.pps->num_tile_rows > 1 || s->ps.pps->num_tile_columns > 1)) { s->enable_parallel_tiles = 0; // TODO: you can enable tiles in parallel here s->threads_number = 1; } else s->enable_parallel_tiles = 0; } else s->enable_parallel_tiles = 0; } if (s->ps.pps->slice_header_extension_present_flag) { unsigned int length = get_ue_golomb_long(gb); if (length*8LL > get_bits_left(gb)) { av_log(s->avctx, AV_LOG_ERROR, "too many slice_header_extension_data_bytes\n"); return AVERROR_INVALIDDATA; } for (i = 0; i < length; i++) skip_bits(gb, 8); // slice_header_extension_data_byte } // Inferred parameters sh->slice_qp = 26U + s->ps.pps->pic_init_qp_minus26 + sh->slice_qp_delta; if (sh->slice_qp > 51 || sh->slice_qp < -s->ps.sps->qp_bd_offset) { av_log(s->avctx, AV_LOG_ERROR, "The slice_qp %d is outside the valid range " "[%d, 51].\n", sh->slice_qp, -s->ps.sps->qp_bd_offset); return AVERROR_INVALIDDATA; } sh->slice_ctb_addr_rs = sh->slice_segment_addr; if (!s->sh.slice_ctb_addr_rs && s->sh.dependent_slice_segment_flag) { av_log(s->avctx, AV_LOG_ERROR, "Impossible slice segment.\n"); return AVERROR_INVALIDDATA; } if (get_bits_left(gb) < 0) { av_log(s->avctx, AV_LOG_ERROR, "Overread slice header by %d bits\n", -get_bits_left(gb)); return AVERROR_INVALIDDATA; } s->HEVClc->first_qp_group = !s->sh.dependent_slice_segment_flag; if (!s->ps.pps->cu_qp_delta_enabled_flag) s->HEVClc->qp_y = s->sh.slice_qp; s->slice_initialized = 1; s->HEVClc->tu.cu_qp_offset_cb = 0; s->HEVClc->tu.cu_qp_offset_cr = 0; return 0; } #define CTB(tab, x, y) ((tab)[(y) * s->ps.sps->ctb_width + (x)]) #define SET_SAO(elem, value) \ do { \ if (!sao_merge_up_flag && !sao_merge_left_flag) \ sao->elem = value; \ else if (sao_merge_left_flag) \ sao->elem = CTB(s->sao, rx-1, ry).elem; \ else if (sao_merge_up_flag) \ sao->elem = CTB(s->sao, rx, ry-1).elem; \ else \ sao->elem = 0; \ } while (0) static void hls_sao_param(HEVCContext *s, int rx, int ry) { HEVCLocalContext *lc = s->HEVClc; int sao_merge_left_flag = 0; int sao_merge_up_flag = 0; SAOParams *sao = &CTB(s->sao, rx, ry); int c_idx, i; if (s->sh.slice_sample_adaptive_offset_flag[0] || s->sh.slice_sample_adaptive_offset_flag[1]) { if (rx > 0) { if (lc->ctb_left_flag) sao_merge_left_flag = ff_hevc_sao_merge_flag_decode(s); } if (ry > 0 && !sao_merge_left_flag) { if (lc->ctb_up_flag) sao_merge_up_flag = ff_hevc_sao_merge_flag_decode(s); } } for (c_idx = 0; c_idx < (s->ps.sps->chroma_format_idc ? 3 : 1); c_idx++) { int log2_sao_offset_scale = c_idx == 0 ? s->ps.pps->log2_sao_offset_scale_luma : s->ps.pps->log2_sao_offset_scale_chroma; if (!s->sh.slice_sample_adaptive_offset_flag[c_idx]) { sao->type_idx[c_idx] = SAO_NOT_APPLIED; continue; } if (c_idx == 2) { sao->type_idx[2] = sao->type_idx[1]; sao->eo_class[2] = sao->eo_class[1]; } else { SET_SAO(type_idx[c_idx], ff_hevc_sao_type_idx_decode(s)); } if (sao->type_idx[c_idx] == SAO_NOT_APPLIED) continue; for (i = 0; i < 4; i++) SET_SAO(offset_abs[c_idx][i], ff_hevc_sao_offset_abs_decode(s)); if (sao->type_idx[c_idx] == SAO_BAND) { for (i = 0; i < 4; i++) { if (sao->offset_abs[c_idx][i]) { SET_SAO(offset_sign[c_idx][i], ff_hevc_sao_offset_sign_decode(s)); } else { sao->offset_sign[c_idx][i] = 0; } } SET_SAO(band_position[c_idx], ff_hevc_sao_band_position_decode(s)); } else if (c_idx != 2) { SET_SAO(eo_class[c_idx], ff_hevc_sao_eo_class_decode(s)); } // Inferred parameters sao->offset_val[c_idx][0] = 0; for (i = 0; i < 4; i++) { sao->offset_val[c_idx][i + 1] = sao->offset_abs[c_idx][i]; if (sao->type_idx[c_idx] == SAO_EDGE) { if (i > 1) sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1]; } else if (sao->offset_sign[c_idx][i]) { sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1]; } sao->offset_val[c_idx][i + 1] *= 1 << log2_sao_offset_scale; } } } #undef SET_SAO #undef CTB static int hls_cross_component_pred(HEVCContext *s, int idx) { HEVCLocalContext *lc = s->HEVClc; int log2_res_scale_abs_plus1 = ff_hevc_log2_res_scale_abs(s, idx); if (log2_res_scale_abs_plus1 != 0) { int res_scale_sign_flag = ff_hevc_res_scale_sign_flag(s, idx); lc->tu.res_scale_val = (1 << (log2_res_scale_abs_plus1 - 1)) * (1 - 2 * res_scale_sign_flag); } else { lc->tu.res_scale_val = 0; } return 0; } static int hls_transform_unit(HEVCContext *s, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr) { HEVCLocalContext *lc = s->HEVClc; const int log2_trafo_size_c = log2_trafo_size - s->ps.sps->hshift[1]; int i; if (lc->cu.pred_mode == MODE_INTRA) { int trafo_size = 1 << log2_trafo_size; ff_hevc_set_neighbour_available(s, x0, y0, trafo_size, trafo_size); s->hpc.intra_pred[log2_trafo_size - 2](s, x0, y0, 0); } if (cbf_luma || cbf_cb[0] || cbf_cr[0] || (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) { int scan_idx = SCAN_DIAG; int scan_idx_c = SCAN_DIAG; int cbf_chroma = cbf_cb[0] || cbf_cr[0] || (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1])); if (s->ps.pps->cu_qp_delta_enabled_flag && !lc->tu.is_cu_qp_delta_coded) { lc->tu.cu_qp_delta = ff_hevc_cu_qp_delta_abs(s); if (lc->tu.cu_qp_delta != 0) if (ff_hevc_cu_qp_delta_sign_flag(s) == 1) lc->tu.cu_qp_delta = -lc->tu.cu_qp_delta; lc->tu.is_cu_qp_delta_coded = 1; if (lc->tu.cu_qp_delta < -(26 + s->ps.sps->qp_bd_offset / 2) || lc->tu.cu_qp_delta > (25 + s->ps.sps->qp_bd_offset / 2)) { av_log(s->avctx, AV_LOG_ERROR, "The cu_qp_delta %d is outside the valid range " "[%d, %d].\n", lc->tu.cu_qp_delta, -(26 + s->ps.sps->qp_bd_offset / 2), (25 + s->ps.sps->qp_bd_offset / 2)); return AVERROR_INVALIDDATA; } ff_hevc_set_qPy(s, cb_xBase, cb_yBase, log2_cb_size); } if (s->sh.cu_chroma_qp_offset_enabled_flag && cbf_chroma && !lc->cu.cu_transquant_bypass_flag && !lc->tu.is_cu_chroma_qp_offset_coded) { int cu_chroma_qp_offset_flag = ff_hevc_cu_chroma_qp_offset_flag(s); if (cu_chroma_qp_offset_flag) { int cu_chroma_qp_offset_idx = 0; if (s->ps.pps->chroma_qp_offset_list_len_minus1 > 0) { cu_chroma_qp_offset_idx = ff_hevc_cu_chroma_qp_offset_idx(s); av_log(s->avctx, AV_LOG_ERROR, "cu_chroma_qp_offset_idx not yet tested.\n"); } lc->tu.cu_qp_offset_cb = s->ps.pps->cb_qp_offset_list[cu_chroma_qp_offset_idx]; lc->tu.cu_qp_offset_cr = s->ps.pps->cr_qp_offset_list[cu_chroma_qp_offset_idx]; } else { lc->tu.cu_qp_offset_cb = 0; lc->tu.cu_qp_offset_cr = 0; } lc->tu.is_cu_chroma_qp_offset_coded = 1; } if (lc->cu.pred_mode == MODE_INTRA && log2_trafo_size < 4) { if (lc->tu.intra_pred_mode >= 6 && lc->tu.intra_pred_mode <= 14) { scan_idx = SCAN_VERT; } else if (lc->tu.intra_pred_mode >= 22 && lc->tu.intra_pred_mode <= 30) { scan_idx = SCAN_HORIZ; } if (lc->tu.intra_pred_mode_c >= 6 && lc->tu.intra_pred_mode_c <= 14) { scan_idx_c = SCAN_VERT; } else if (lc->tu.intra_pred_mode_c >= 22 && lc->tu.intra_pred_mode_c <= 30) { scan_idx_c = SCAN_HORIZ; } } lc->tu.cross_pf = 0; if (cbf_luma) ff_hevc_hls_residual_coding(s, x0, y0, log2_trafo_size, scan_idx, 0); if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) { int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]); int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]); lc->tu.cross_pf = (s->ps.pps->cross_component_prediction_enabled_flag && cbf_luma && (lc->cu.pred_mode == MODE_INTER || (lc->tu.chroma_mode_c == 4))); if (lc->tu.cross_pf) { hls_cross_component_pred(s, 0); } for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) { if (lc->cu.pred_mode == MODE_INTRA) { ff_hevc_set_neighbour_available(s, x0, y0 + (i << log2_trafo_size_c), trafo_size_h, trafo_size_v); s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (i << log2_trafo_size_c), 1); } if (cbf_cb[i]) ff_hevc_hls_residual_coding(s, x0, y0 + (i << log2_trafo_size_c), log2_trafo_size_c, scan_idx_c, 1); else if (lc->tu.cross_pf) { ptrdiff_t stride = s->frame->linesize[1]; int hshift = s->ps.sps->hshift[1]; int vshift = s->ps.sps->vshift[1]; int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer; int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2; int size = 1 << log2_trafo_size_c; uint8_t *dst = &s->frame->data[1][(y0 >> vshift) * stride + ((x0 >> hshift) << s->ps.sps->pixel_shift)]; for (i = 0; i < (size * size); i++) { coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3); } s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride); } } if (lc->tu.cross_pf) { hls_cross_component_pred(s, 1); } for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) { if (lc->cu.pred_mode == MODE_INTRA) { ff_hevc_set_neighbour_available(s, x0, y0 + (i << log2_trafo_size_c), trafo_size_h, trafo_size_v); s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (i << log2_trafo_size_c), 2); } if (cbf_cr[i]) ff_hevc_hls_residual_coding(s, x0, y0 + (i << log2_trafo_size_c), log2_trafo_size_c, scan_idx_c, 2); else if (lc->tu.cross_pf) { ptrdiff_t stride = s->frame->linesize[2]; int hshift = s->ps.sps->hshift[2]; int vshift = s->ps.sps->vshift[2]; int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer; int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2; int size = 1 << log2_trafo_size_c; uint8_t *dst = &s->frame->data[2][(y0 >> vshift) * stride + ((x0 >> hshift) << s->ps.sps->pixel_shift)]; for (i = 0; i < (size * size); i++) { coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3); } s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride); } } } else if (s->ps.sps->chroma_format_idc && blk_idx == 3) { int trafo_size_h = 1 << (log2_trafo_size + 1); int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]); for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) { if (lc->cu.pred_mode == MODE_INTRA) { ff_hevc_set_neighbour_available(s, xBase, yBase + (i << log2_trafo_size), trafo_size_h, trafo_size_v); s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (i << log2_trafo_size), 1); } if (cbf_cb[i]) ff_hevc_hls_residual_coding(s, xBase, yBase + (i << log2_trafo_size), log2_trafo_size, scan_idx_c, 1); } for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) { if (lc->cu.pred_mode == MODE_INTRA) { ff_hevc_set_neighbour_available(s, xBase, yBase + (i << log2_trafo_size), trafo_size_h, trafo_size_v); s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (i << log2_trafo_size), 2); } if (cbf_cr[i]) ff_hevc_hls_residual_coding(s, xBase, yBase + (i << log2_trafo_size), log2_trafo_size, scan_idx_c, 2); } } } else if (s->ps.sps->chroma_format_idc && lc->cu.pred_mode == MODE_INTRA) { if (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3) { int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]); int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]); ff_hevc_set_neighbour_available(s, x0, y0, trafo_size_h, trafo_size_v); s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0, 1); s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0, 2); if (s->ps.sps->chroma_format_idc == 2) { ff_hevc_set_neighbour_available(s, x0, y0 + (1 << log2_trafo_size_c), trafo_size_h, trafo_size_v); s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (1 << log2_trafo_size_c), 1); s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (1 << log2_trafo_size_c), 2); } } else if (blk_idx == 3) { int trafo_size_h = 1 << (log2_trafo_size + 1); int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]); ff_hevc_set_neighbour_available(s, xBase, yBase, trafo_size_h, trafo_size_v); s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase, 1); s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase, 2); if (s->ps.sps->chroma_format_idc == 2) { ff_hevc_set_neighbour_available(s, xBase, yBase + (1 << (log2_trafo_size)), trafo_size_h, trafo_size_v); s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (1 << (log2_trafo_size)), 1); s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (1 << (log2_trafo_size)), 2); } } } return 0; } static void set_deblocking_bypass(HEVCContext *s, int x0, int y0, int log2_cb_size) { int cb_size = 1 << log2_cb_size; int log2_min_pu_size = s->ps.sps->log2_min_pu_size; int min_pu_width = s->ps.sps->min_pu_width; int x_end = FFMIN(x0 + cb_size, s->ps.sps->width); int y_end = FFMIN(y0 + cb_size, s->ps.sps->height); int i, j; for (j = (y0 >> log2_min_pu_size); j < (y_end >> log2_min_pu_size); j++) for (i = (x0 >> log2_min_pu_size); i < (x_end >> log2_min_pu_size); i++) s->is_pcm[i + j * min_pu_width] = 2; } static int hls_transform_tree(HEVCContext *s, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int trafo_depth, int blk_idx, const int *base_cbf_cb, const int *base_cbf_cr) { HEVCLocalContext *lc = s->HEVClc; uint8_t split_transform_flag; int cbf_cb[2]; int cbf_cr[2]; int ret; cbf_cb[0] = base_cbf_cb[0]; cbf_cb[1] = base_cbf_cb[1]; cbf_cr[0] = base_cbf_cr[0]; cbf_cr[1] = base_cbf_cr[1]; if (lc->cu.intra_split_flag) { if (trafo_depth == 1) { lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[blk_idx]; if (s->ps.sps->chroma_format_idc == 3) { lc->tu.intra_pred_mode_c = lc->pu.intra_pred_mode_c[blk_idx]; lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[blk_idx]; } else { lc->tu.intra_pred_mode_c = lc->pu.intra_pred_mode_c[0]; lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0]; } } } else { lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[0]; lc->tu.intra_pred_mode_c = lc->pu.intra_pred_mode_c[0]; lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0]; } if (log2_trafo_size <= s->ps.sps->log2_max_trafo_size && log2_trafo_size > s->ps.sps->log2_min_tb_size && trafo_depth < lc->cu.max_trafo_depth && !(lc->cu.intra_split_flag && trafo_depth == 0)) { split_transform_flag = ff_hevc_split_transform_flag_decode(s, log2_trafo_size); } else { int inter_split = s->ps.sps->max_transform_hierarchy_depth_inter == 0 && lc->cu.pred_mode == MODE_INTER && lc->cu.part_mode != PART_2Nx2N && trafo_depth == 0; split_transform_flag = log2_trafo_size > s->ps.sps->log2_max_trafo_size || (lc->cu.intra_split_flag && trafo_depth == 0) || inter_split; } if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) { if (trafo_depth == 0 || cbf_cb[0]) { cbf_cb[0] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth); if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) { cbf_cb[1] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth); } } if (trafo_depth == 0 || cbf_cr[0]) { cbf_cr[0] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth); if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) { cbf_cr[1] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth); } } } if (split_transform_flag) { const int trafo_size_split = 1 << (log2_trafo_size - 1); const int x1 = x0 + trafo_size_split; const int y1 = y0 + trafo_size_split; #define SUBDIVIDE(x, y, idx) \ do { \ ret = hls_transform_tree(s, x, y, x0, y0, cb_xBase, cb_yBase, log2_cb_size, \ log2_trafo_size - 1, trafo_depth + 1, idx, \ cbf_cb, cbf_cr); \ if (ret < 0) \ return ret; \ } while (0) SUBDIVIDE(x0, y0, 0); SUBDIVIDE(x1, y0, 1); SUBDIVIDE(x0, y1, 2); SUBDIVIDE(x1, y1, 3); #undef SUBDIVIDE } else { int min_tu_size = 1 << s->ps.sps->log2_min_tb_size; int log2_min_tu_size = s->ps.sps->log2_min_tb_size; int min_tu_width = s->ps.sps->min_tb_width; int cbf_luma = 1; if (lc->cu.pred_mode == MODE_INTRA || trafo_depth != 0 || cbf_cb[0] || cbf_cr[0] || (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) { cbf_luma = ff_hevc_cbf_luma_decode(s, trafo_depth); } ret = hls_transform_unit(s, x0, y0, xBase, yBase, cb_xBase, cb_yBase, log2_cb_size, log2_trafo_size, blk_idx, cbf_luma, cbf_cb, cbf_cr); if (ret < 0) return ret; // TODO: store cbf_luma somewhere else if (cbf_luma) { int i, j; for (i = 0; i < (1 << log2_trafo_size); i += min_tu_size) for (j = 0; j < (1 << log2_trafo_size); j += min_tu_size) { int x_tu = (x0 + j) >> log2_min_tu_size; int y_tu = (y0 + i) >> log2_min_tu_size; s->cbf_luma[y_tu * min_tu_width + x_tu] = 1; } } if (!s->sh.disable_deblocking_filter_flag) { ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_trafo_size); if (s->ps.pps->transquant_bypass_enable_flag && lc->cu.cu_transquant_bypass_flag) set_deblocking_bypass(s, x0, y0, log2_trafo_size); } } return 0; } static int hls_pcm_sample(HEVCContext *s, int x0, int y0, int log2_cb_size) { HEVCLocalContext *lc = s->HEVClc; GetBitContext gb; int cb_size = 1 << log2_cb_size; ptrdiff_t stride0 = s->frame->linesize[0]; ptrdiff_t stride1 = s->frame->linesize[1]; ptrdiff_t stride2 = s->frame->linesize[2]; uint8_t *dst0 = &s->frame->data[0][y0 * stride0 + (x0 << s->ps.sps->pixel_shift)]; uint8_t *dst1 = &s->frame->data[1][(y0 >> s->ps.sps->vshift[1]) * stride1 + ((x0 >> s->ps.sps->hshift[1]) << s->ps.sps->pixel_shift)]; uint8_t *dst2 = &s->frame->data[2][(y0 >> s->ps.sps->vshift[2]) * stride2 + ((x0 >> s->ps.sps->hshift[2]) << s->ps.sps->pixel_shift)]; int length = cb_size * cb_size * s->ps.sps->pcm.bit_depth + (((cb_size >> s->ps.sps->hshift[1]) * (cb_size >> s->ps.sps->vshift[1])) + ((cb_size >> s->ps.sps->hshift[2]) * (cb_size >> s->ps.sps->vshift[2]))) * s->ps.sps->pcm.bit_depth_chroma; const uint8_t *pcm = skip_bytes(&lc->cc, (length + 7) >> 3); int ret; if (!s->sh.disable_deblocking_filter_flag) ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size); ret = init_get_bits(&gb, pcm, length); if (ret < 0) return ret; s->hevcdsp.put_pcm(dst0, stride0, cb_size, cb_size, &gb, s->ps.sps->pcm.bit_depth); if (s->ps.sps->chroma_format_idc) { s->hevcdsp.put_pcm(dst1, stride1, cb_size >> s->ps.sps->hshift[1], cb_size >> s->ps.sps->vshift[1], &gb, s->ps.sps->pcm.bit_depth_chroma); s->hevcdsp.put_pcm(dst2, stride2, cb_size >> s->ps.sps->hshift[2], cb_size >> s->ps.sps->vshift[2], &gb, s->ps.sps->pcm.bit_depth_chroma); } return 0; } /** * 8.5.3.2.2.1 Luma sample unidirectional interpolation process * * @param s HEVC decoding context * @param dst target buffer for block data at block position * @param dststride stride of the dst buffer * @param ref reference picture buffer at origin (0, 0) * @param mv motion vector (relative to block position) to get pixel data from * @param x_off horizontal position of block from origin (0, 0) * @param y_off vertical position of block from origin (0, 0) * @param block_w width of block * @param block_h height of block * @param luma_weight weighting factor applied to the luma prediction * @param luma_offset additive offset applied to the luma prediction value */ static void luma_mc_uni(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride, AVFrame *ref, const Mv *mv, int x_off, int y_off, int block_w, int block_h, int luma_weight, int luma_offset) { HEVCLocalContext *lc = s->HEVClc; uint8_t *src = ref->data[0]; ptrdiff_t srcstride = ref->linesize[0]; int pic_width = s->ps.sps->width; int pic_height = s->ps.sps->height; int mx = mv->x & 3; int my = mv->y & 3; int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) || (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag); int idx = ff_hevc_pel_weight[block_w]; x_off += mv->x >> 2; y_off += mv->y >> 2; src += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift)); if (x_off < QPEL_EXTRA_BEFORE || y_off < QPEL_EXTRA_AFTER || x_off >= pic_width - block_w - QPEL_EXTRA_AFTER || y_off >= pic_height - block_h - QPEL_EXTRA_AFTER) { const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift; int offset = QPEL_EXTRA_BEFORE * srcstride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift); int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift); s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src - offset, edge_emu_stride, srcstride, block_w + QPEL_EXTRA, block_h + QPEL_EXTRA, x_off - QPEL_EXTRA_BEFORE, y_off - QPEL_EXTRA_BEFORE, pic_width, pic_height); src = lc->edge_emu_buffer + buf_offset; srcstride = edge_emu_stride; } if (!weight_flag) s->hevcdsp.put_hevc_qpel_uni[idx][!!my][!!mx](dst, dststride, src, srcstride, block_h, mx, my, block_w); else s->hevcdsp.put_hevc_qpel_uni_w[idx][!!my][!!mx](dst, dststride, src, srcstride, block_h, s->sh.luma_log2_weight_denom, luma_weight, luma_offset, mx, my, block_w); } /** * 8.5.3.2.2.1 Luma sample bidirectional interpolation process * * @param s HEVC decoding context * @param dst target buffer for block data at block position * @param dststride stride of the dst buffer * @param ref0 reference picture0 buffer at origin (0, 0) * @param mv0 motion vector0 (relative to block position) to get pixel data from * @param x_off horizontal position of block from origin (0, 0) * @param y_off vertical position of block from origin (0, 0) * @param block_w width of block * @param block_h height of block * @param ref1 reference picture1 buffer at origin (0, 0) * @param mv1 motion vector1 (relative to block position) to get pixel data from * @param current_mv current motion vector structure */ static void luma_mc_bi(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride, AVFrame *ref0, const Mv *mv0, int x_off, int y_off, int block_w, int block_h, AVFrame *ref1, const Mv *mv1, struct MvField *current_mv) { HEVCLocalContext *lc = s->HEVClc; ptrdiff_t src0stride = ref0->linesize[0]; ptrdiff_t src1stride = ref1->linesize[0]; int pic_width = s->ps.sps->width; int pic_height = s->ps.sps->height; int mx0 = mv0->x & 3; int my0 = mv0->y & 3; int mx1 = mv1->x & 3; int my1 = mv1->y & 3; int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) || (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag); int x_off0 = x_off + (mv0->x >> 2); int y_off0 = y_off + (mv0->y >> 2); int x_off1 = x_off + (mv1->x >> 2); int y_off1 = y_off + (mv1->y >> 2); int idx = ff_hevc_pel_weight[block_w]; uint8_t *src0 = ref0->data[0] + y_off0 * src0stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift); uint8_t *src1 = ref1->data[0] + y_off1 * src1stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift); if (x_off0 < QPEL_EXTRA_BEFORE || y_off0 < QPEL_EXTRA_AFTER || x_off0 >= pic_width - block_w - QPEL_EXTRA_AFTER || y_off0 >= pic_height - block_h - QPEL_EXTRA_AFTER) { const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift; int offset = QPEL_EXTRA_BEFORE * src0stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift); int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift); s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset, edge_emu_stride, src0stride, block_w + QPEL_EXTRA, block_h + QPEL_EXTRA, x_off0 - QPEL_EXTRA_BEFORE, y_off0 - QPEL_EXTRA_BEFORE, pic_width, pic_height); src0 = lc->edge_emu_buffer + buf_offset; src0stride = edge_emu_stride; } if (x_off1 < QPEL_EXTRA_BEFORE || y_off1 < QPEL_EXTRA_AFTER || x_off1 >= pic_width - block_w - QPEL_EXTRA_AFTER || y_off1 >= pic_height - block_h - QPEL_EXTRA_AFTER) { const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift; int offset = QPEL_EXTRA_BEFORE * src1stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift); int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift); s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src1 - offset, edge_emu_stride, src1stride, block_w + QPEL_EXTRA, block_h + QPEL_EXTRA, x_off1 - QPEL_EXTRA_BEFORE, y_off1 - QPEL_EXTRA_BEFORE, pic_width, pic_height); src1 = lc->edge_emu_buffer2 + buf_offset; src1stride = edge_emu_stride; } s->hevcdsp.put_hevc_qpel[idx][!!my0][!!mx0](lc->tmp, src0, src0stride, block_h, mx0, my0, block_w); if (!weight_flag) s->hevcdsp.put_hevc_qpel_bi[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp, block_h, mx1, my1, block_w); else s->hevcdsp.put_hevc_qpel_bi_w[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp, block_h, s->sh.luma_log2_weight_denom, s->sh.luma_weight_l0[current_mv->ref_idx[0]], s->sh.luma_weight_l1[current_mv->ref_idx[1]], s->sh.luma_offset_l0[current_mv->ref_idx[0]], s->sh.luma_offset_l1[current_mv->ref_idx[1]], mx1, my1, block_w); } /** * 8.5.3.2.2.2 Chroma sample uniprediction interpolation process * * @param s HEVC decoding context * @param dst1 target buffer for block data at block position (U plane) * @param dst2 target buffer for block data at block position (V plane) * @param dststride stride of the dst1 and dst2 buffers * @param ref reference picture buffer at origin (0, 0) * @param mv motion vector (relative to block position) to get pixel data from * @param x_off horizontal position of block from origin (0, 0) * @param y_off vertical position of block from origin (0, 0) * @param block_w width of block * @param block_h height of block * @param chroma_weight weighting factor applied to the chroma prediction * @param chroma_offset additive offset applied to the chroma prediction value */ static void chroma_mc_uni(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, uint8_t *src0, ptrdiff_t srcstride, int reflist, int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int chroma_weight, int chroma_offset) { HEVCLocalContext *lc = s->HEVClc; int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1]; int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1]; const Mv *mv = &current_mv->mv[reflist]; int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) || (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag); int idx = ff_hevc_pel_weight[block_w]; int hshift = s->ps.sps->hshift[1]; int vshift = s->ps.sps->vshift[1]; intptr_t mx = av_mod_uintp2(mv->x, 2 + hshift); intptr_t my = av_mod_uintp2(mv->y, 2 + vshift); intptr_t _mx = mx << (1 - hshift); intptr_t _my = my << (1 - vshift); x_off += mv->x >> (2 + hshift); y_off += mv->y >> (2 + vshift); src0 += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift)); if (x_off < EPEL_EXTRA_BEFORE || y_off < EPEL_EXTRA_AFTER || x_off >= pic_width - block_w - EPEL_EXTRA_AFTER || y_off >= pic_height - block_h - EPEL_EXTRA_AFTER) { const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift; int offset0 = EPEL_EXTRA_BEFORE * (srcstride + (1 << s->ps.sps->pixel_shift)); int buf_offset0 = EPEL_EXTRA_BEFORE * (edge_emu_stride + (1 << s->ps.sps->pixel_shift)); s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset0, edge_emu_stride, srcstride, block_w + EPEL_EXTRA, block_h + EPEL_EXTRA, x_off - EPEL_EXTRA_BEFORE, y_off - EPEL_EXTRA_BEFORE, pic_width, pic_height); src0 = lc->edge_emu_buffer + buf_offset0; srcstride = edge_emu_stride; } if (!weight_flag) s->hevcdsp.put_hevc_epel_uni[idx][!!my][!!mx](dst0, dststride, src0, srcstride, block_h, _mx, _my, block_w); else s->hevcdsp.put_hevc_epel_uni_w[idx][!!my][!!mx](dst0, dststride, src0, srcstride, block_h, s->sh.chroma_log2_weight_denom, chroma_weight, chroma_offset, _mx, _my, block_w); } /** * 8.5.3.2.2.2 Chroma sample bidirectional interpolation process * * @param s HEVC decoding context * @param dst target buffer for block data at block position * @param dststride stride of the dst buffer * @param ref0 reference picture0 buffer at origin (0, 0) * @param mv0 motion vector0 (relative to block position) to get pixel data from * @param x_off horizontal position of block from origin (0, 0) * @param y_off vertical position of block from origin (0, 0) * @param block_w width of block * @param block_h height of block * @param ref1 reference picture1 buffer at origin (0, 0) * @param mv1 motion vector1 (relative to block position) to get pixel data from * @param current_mv current motion vector structure * @param cidx chroma component(cb, cr) */ static void chroma_mc_bi(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, AVFrame *ref0, AVFrame *ref1, int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int cidx) { HEVCLocalContext *lc = s->HEVClc; uint8_t *src1 = ref0->data[cidx+1]; uint8_t *src2 = ref1->data[cidx+1]; ptrdiff_t src1stride = ref0->linesize[cidx+1]; ptrdiff_t src2stride = ref1->linesize[cidx+1]; int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) || (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag); int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1]; int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1]; Mv *mv0 = &current_mv->mv[0]; Mv *mv1 = &current_mv->mv[1]; int hshift = s->ps.sps->hshift[1]; int vshift = s->ps.sps->vshift[1]; intptr_t mx0 = av_mod_uintp2(mv0->x, 2 + hshift); intptr_t my0 = av_mod_uintp2(mv0->y, 2 + vshift); intptr_t mx1 = av_mod_uintp2(mv1->x, 2 + hshift); intptr_t my1 = av_mod_uintp2(mv1->y, 2 + vshift); intptr_t _mx0 = mx0 << (1 - hshift); intptr_t _my0 = my0 << (1 - vshift); intptr_t _mx1 = mx1 << (1 - hshift); intptr_t _my1 = my1 << (1 - vshift); int x_off0 = x_off + (mv0->x >> (2 + hshift)); int y_off0 = y_off + (mv0->y >> (2 + vshift)); int x_off1 = x_off + (mv1->x >> (2 + hshift)); int y_off1 = y_off + (mv1->y >> (2 + vshift)); int idx = ff_hevc_pel_weight[block_w]; src1 += y_off0 * src1stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift); src2 += y_off1 * src2stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift); if (x_off0 < EPEL_EXTRA_BEFORE || y_off0 < EPEL_EXTRA_AFTER || x_off0 >= pic_width - block_w - EPEL_EXTRA_AFTER || y_off0 >= pic_height - block_h - EPEL_EXTRA_AFTER) { const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift; int offset1 = EPEL_EXTRA_BEFORE * (src1stride + (1 << s->ps.sps->pixel_shift)); int buf_offset1 = EPEL_EXTRA_BEFORE * (edge_emu_stride + (1 << s->ps.sps->pixel_shift)); s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src1 - offset1, edge_emu_stride, src1stride, block_w + EPEL_EXTRA, block_h + EPEL_EXTRA, x_off0 - EPEL_EXTRA_BEFORE, y_off0 - EPEL_EXTRA_BEFORE, pic_width, pic_height); src1 = lc->edge_emu_buffer + buf_offset1; src1stride = edge_emu_stride; } if (x_off1 < EPEL_EXTRA_BEFORE || y_off1 < EPEL_EXTRA_AFTER || x_off1 >= pic_width - block_w - EPEL_EXTRA_AFTER || y_off1 >= pic_height - block_h - EPEL_EXTRA_AFTER) { const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift; int offset1 = EPEL_EXTRA_BEFORE * (src2stride + (1 << s->ps.sps->pixel_shift)); int buf_offset1 = EPEL_EXTRA_BEFORE * (edge_emu_stride + (1 << s->ps.sps->pixel_shift)); s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src2 - offset1, edge_emu_stride, src2stride, block_w + EPEL_EXTRA, block_h + EPEL_EXTRA, x_off1 - EPEL_EXTRA_BEFORE, y_off1 - EPEL_EXTRA_BEFORE, pic_width, pic_height); src2 = lc->edge_emu_buffer2 + buf_offset1; src2stride = edge_emu_stride; } s->hevcdsp.put_hevc_epel[idx][!!my0][!!mx0](lc->tmp, src1, src1stride, block_h, _mx0, _my0, block_w); if (!weight_flag) s->hevcdsp.put_hevc_epel_bi[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1], src2, src2stride, lc->tmp, block_h, _mx1, _my1, block_w); else s->hevcdsp.put_hevc_epel_bi_w[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1], src2, src2stride, lc->tmp, block_h, s->sh.chroma_log2_weight_denom, s->sh.chroma_weight_l0[current_mv->ref_idx[0]][cidx], s->sh.chroma_weight_l1[current_mv->ref_idx[1]][cidx], s->sh.chroma_offset_l0[current_mv->ref_idx[0]][cidx], s->sh.chroma_offset_l1[current_mv->ref_idx[1]][cidx], _mx1, _my1, block_w); } static void hevc_await_progress(HEVCContext *s, HEVCFrame *ref, const Mv *mv, int y0, int height) { if (s->threads_type == FF_THREAD_FRAME ) { int y = FFMAX(0, (mv->y >> 2) + y0 + height + 9); ff_thread_await_progress(&ref->tf, y, 0); } } static void hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv) { HEVCLocalContext *lc = s->HEVClc; enum InterPredIdc inter_pred_idc = PRED_L0; int mvp_flag; ff_hevc_set_neighbour_available(s, x0, y0, nPbW, nPbH); mv->pred_flag = 0; if (s->sh.slice_type == HEVC_SLICE_B) inter_pred_idc = ff_hevc_inter_pred_idc_decode(s, nPbW, nPbH); if (inter_pred_idc != PRED_L1) { if (s->sh.nb_refs[L0]) mv->ref_idx[0]= ff_hevc_ref_idx_lx_decode(s, s->sh.nb_refs[L0]); mv->pred_flag = PF_L0; ff_hevc_hls_mvd_coding(s, x0, y0, 0); mvp_flag = ff_hevc_mvp_lx_flag_decode(s); ff_hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size, part_idx, merge_idx, mv, mvp_flag, 0); mv->mv[0].x += lc->pu.mvd.x; mv->mv[0].y += lc->pu.mvd.y; } if (inter_pred_idc != PRED_L0) { if (s->sh.nb_refs[L1]) mv->ref_idx[1]= ff_hevc_ref_idx_lx_decode(s, s->sh.nb_refs[L1]); if (s->sh.mvd_l1_zero_flag == 1 && inter_pred_idc == PRED_BI) { AV_ZERO32(&lc->pu.mvd); } else { ff_hevc_hls_mvd_coding(s, x0, y0, 1); } mv->pred_flag += PF_L1; mvp_flag = ff_hevc_mvp_lx_flag_decode(s); ff_hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size, part_idx, merge_idx, mv, mvp_flag, 1); mv->mv[1].x += lc->pu.mvd.x; mv->mv[1].y += lc->pu.mvd.y; } } static void hls_prediction_unit(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int partIdx, int idx) { #define POS(c_idx, x, y) \ &s->frame->data[c_idx][((y) >> s->ps.sps->vshift[c_idx]) * s->frame->linesize[c_idx] + \ (((x) >> s->ps.sps->hshift[c_idx]) << s->ps.sps->pixel_shift)] HEVCLocalContext *lc = s->HEVClc; int merge_idx = 0; struct MvField current_mv = {{{ 0 }}}; int min_pu_width = s->ps.sps->min_pu_width; MvField *tab_mvf = s->ref->tab_mvf; RefPicList *refPicList = s->ref->refPicList; HEVCFrame *ref0 = NULL, *ref1 = NULL; uint8_t *dst0 = POS(0, x0, y0); uint8_t *dst1 = POS(1, x0, y0); uint8_t *dst2 = POS(2, x0, y0); int log2_min_cb_size = s->ps.sps->log2_min_cb_size; int min_cb_width = s->ps.sps->min_cb_width; int x_cb = x0 >> log2_min_cb_size; int y_cb = y0 >> log2_min_cb_size; int x_pu, y_pu; int i, j; int skip_flag = SAMPLE_CTB(s->skip_flag, x_cb, y_cb); if (!skip_flag) lc->pu.merge_flag = ff_hevc_merge_flag_decode(s); if (skip_flag || lc->pu.merge_flag) { if (s->sh.max_num_merge_cand > 1) merge_idx = ff_hevc_merge_idx_decode(s); else merge_idx = 0; ff_hevc_luma_mv_merge_mode(s, x0, y0, nPbW, nPbH, log2_cb_size, partIdx, merge_idx, &current_mv); } else { hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size, partIdx, merge_idx, &current_mv); } x_pu = x0 >> s->ps.sps->log2_min_pu_size; y_pu = y0 >> s->ps.sps->log2_min_pu_size; for (j = 0; j < nPbH >> s->ps.sps->log2_min_pu_size; j++) for (i = 0; i < nPbW >> s->ps.sps->log2_min_pu_size; i++) tab_mvf[(y_pu + j) * min_pu_width + x_pu + i] = current_mv; if (current_mv.pred_flag & PF_L0) { ref0 = refPicList[0].ref[current_mv.ref_idx[0]]; if (!ref0) return; hevc_await_progress(s, ref0, &current_mv.mv[0], y0, nPbH); } if (current_mv.pred_flag & PF_L1) { ref1 = refPicList[1].ref[current_mv.ref_idx[1]]; if (!ref1) return; hevc_await_progress(s, ref1, &current_mv.mv[1], y0, nPbH); } if (current_mv.pred_flag == PF_L0) { int x0_c = x0 >> s->ps.sps->hshift[1]; int y0_c = y0 >> s->ps.sps->vshift[1]; int nPbW_c = nPbW >> s->ps.sps->hshift[1]; int nPbH_c = nPbH >> s->ps.sps->vshift[1]; luma_mc_uni(s, dst0, s->frame->linesize[0], ref0->frame, &current_mv.mv[0], x0, y0, nPbW, nPbH, s->sh.luma_weight_l0[current_mv.ref_idx[0]], s->sh.luma_offset_l0[current_mv.ref_idx[0]]); if (s->ps.sps->chroma_format_idc) { chroma_mc_uni(s, dst1, s->frame->linesize[1], ref0->frame->data[1], ref0->frame->linesize[1], 0, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, s->sh.chroma_weight_l0[current_mv.ref_idx[0]][0], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][0]); chroma_mc_uni(s, dst2, s->frame->linesize[2], ref0->frame->data[2], ref0->frame->linesize[2], 0, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, s->sh.chroma_weight_l0[current_mv.ref_idx[0]][1], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][1]); } } else if (current_mv.pred_flag == PF_L1) { int x0_c = x0 >> s->ps.sps->hshift[1]; int y0_c = y0 >> s->ps.sps->vshift[1]; int nPbW_c = nPbW >> s->ps.sps->hshift[1]; int nPbH_c = nPbH >> s->ps.sps->vshift[1]; luma_mc_uni(s, dst0, s->frame->linesize[0], ref1->frame, &current_mv.mv[1], x0, y0, nPbW, nPbH, s->sh.luma_weight_l1[current_mv.ref_idx[1]], s->sh.luma_offset_l1[current_mv.ref_idx[1]]); if (s->ps.sps->chroma_format_idc) { chroma_mc_uni(s, dst1, s->frame->linesize[1], ref1->frame->data[1], ref1->frame->linesize[1], 1, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, s->sh.chroma_weight_l1[current_mv.ref_idx[1]][0], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][0]); chroma_mc_uni(s, dst2, s->frame->linesize[2], ref1->frame->data[2], ref1->frame->linesize[2], 1, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, s->sh.chroma_weight_l1[current_mv.ref_idx[1]][1], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][1]); } } else if (current_mv.pred_flag == PF_BI) { int x0_c = x0 >> s->ps.sps->hshift[1]; int y0_c = y0 >> s->ps.sps->vshift[1]; int nPbW_c = nPbW >> s->ps.sps->hshift[1]; int nPbH_c = nPbH >> s->ps.sps->vshift[1]; luma_mc_bi(s, dst0, s->frame->linesize[0], ref0->frame, &current_mv.mv[0], x0, y0, nPbW, nPbH, ref1->frame, &current_mv.mv[1], &current_mv); if (s->ps.sps->chroma_format_idc) { chroma_mc_bi(s, dst1, s->frame->linesize[1], ref0->frame, ref1->frame, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, 0); chroma_mc_bi(s, dst2, s->frame->linesize[2], ref0->frame, ref1->frame, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, 1); } } } /** * 8.4.1 */ static int luma_intra_pred_mode(HEVCContext *s, int x0, int y0, int pu_size, int prev_intra_luma_pred_flag) { HEVCLocalContext *lc = s->HEVClc; int x_pu = x0 >> s->ps.sps->log2_min_pu_size; int y_pu = y0 >> s->ps.sps->log2_min_pu_size; int min_pu_width = s->ps.sps->min_pu_width; int size_in_pus = pu_size >> s->ps.sps->log2_min_pu_size; int x0b = av_mod_uintp2(x0, s->ps.sps->log2_ctb_size); int y0b = av_mod_uintp2(y0, s->ps.sps->log2_ctb_size); int cand_up = (lc->ctb_up_flag || y0b) ? s->tab_ipm[(y_pu - 1) * min_pu_width + x_pu] : INTRA_DC; int cand_left = (lc->ctb_left_flag || x0b) ? s->tab_ipm[y_pu * min_pu_width + x_pu - 1] : INTRA_DC; int y_ctb = (y0 >> (s->ps.sps->log2_ctb_size)) << (s->ps.sps->log2_ctb_size); MvField *tab_mvf = s->ref->tab_mvf; int intra_pred_mode; int candidate[3]; int i, j; // intra_pred_mode prediction does not cross vertical CTB boundaries if ((y0 - 1) < y_ctb) cand_up = INTRA_DC; if (cand_left == cand_up) { if (cand_left < 2) { candidate[0] = INTRA_PLANAR; candidate[1] = INTRA_DC; candidate[2] = INTRA_ANGULAR_26; } else { candidate[0] = cand_left; candidate[1] = 2 + ((cand_left - 2 - 1 + 32) & 31); candidate[2] = 2 + ((cand_left - 2 + 1) & 31); } } else { candidate[0] = cand_left; candidate[1] = cand_up; if (candidate[0] != INTRA_PLANAR && candidate[1] != INTRA_PLANAR) { candidate[2] = INTRA_PLANAR; } else if (candidate[0] != INTRA_DC && candidate[1] != INTRA_DC) { candidate[2] = INTRA_DC; } else { candidate[2] = INTRA_ANGULAR_26; } } if (prev_intra_luma_pred_flag) { intra_pred_mode = candidate[lc->pu.mpm_idx]; } else { if (candidate[0] > candidate[1]) FFSWAP(uint8_t, candidate[0], candidate[1]); if (candidate[0] > candidate[2]) FFSWAP(uint8_t, candidate[0], candidate[2]); if (candidate[1] > candidate[2]) FFSWAP(uint8_t, candidate[1], candidate[2]); intra_pred_mode = lc->pu.rem_intra_luma_pred_mode; for (i = 0; i < 3; i++) if (intra_pred_mode >= candidate[i]) intra_pred_mode++; } /* write the intra prediction units into the mv array */ if (!size_in_pus) size_in_pus = 1; for (i = 0; i < size_in_pus; i++) { memset(&s->tab_ipm[(y_pu + i) * min_pu_width + x_pu], intra_pred_mode, size_in_pus); for (j = 0; j < size_in_pus; j++) { tab_mvf[(y_pu + j) * min_pu_width + x_pu + i].pred_flag = PF_INTRA; } } return intra_pred_mode; } static av_always_inline void set_ct_depth(HEVCContext *s, int x0, int y0, int log2_cb_size, int ct_depth) { int length = (1 << log2_cb_size) >> s->ps.sps->log2_min_cb_size; int x_cb = x0 >> s->ps.sps->log2_min_cb_size; int y_cb = y0 >> s->ps.sps->log2_min_cb_size; int y; for (y = 0; y < length; y++) memset(&s->tab_ct_depth[(y_cb + y) * s->ps.sps->min_cb_width + x_cb], ct_depth, length); } static const uint8_t tab_mode_idx[] = { 0, 1, 2, 2, 2, 2, 3, 5, 7, 8, 10, 12, 13, 15, 17, 18, 19, 20, 21, 22, 23, 23, 24, 24, 25, 25, 26, 27, 27, 28, 28, 29, 29, 30, 31}; static void intra_prediction_unit(HEVCContext *s, int x0, int y0, int log2_cb_size) { HEVCLocalContext *lc = s->HEVClc; static const uint8_t intra_chroma_table[4] = { 0, 26, 10, 1 }; uint8_t prev_intra_luma_pred_flag[4]; int split = lc->cu.part_mode == PART_NxN; int pb_size = (1 << log2_cb_size) >> split; int side = split + 1; int chroma_mode; int i, j; for (i = 0; i < side; i++) for (j = 0; j < side; j++) prev_intra_luma_pred_flag[2 * i + j] = ff_hevc_prev_intra_luma_pred_flag_decode(s); for (i = 0; i < side; i++) { for (j = 0; j < side; j++) { if (prev_intra_luma_pred_flag[2 * i + j]) lc->pu.mpm_idx = ff_hevc_mpm_idx_decode(s); else lc->pu.rem_intra_luma_pred_mode = ff_hevc_rem_intra_luma_pred_mode_decode(s); lc->pu.intra_pred_mode[2 * i + j] = luma_intra_pred_mode(s, x0 + pb_size * j, y0 + pb_size * i, pb_size, prev_intra_luma_pred_flag[2 * i + j]); } } if (s->ps.sps->chroma_format_idc == 3) { for (i = 0; i < side; i++) { for (j = 0; j < side; j++) { lc->pu.chroma_mode_c[2 * i + j] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s); if (chroma_mode != 4) { if (lc->pu.intra_pred_mode[2 * i + j] == intra_chroma_table[chroma_mode]) lc->pu.intra_pred_mode_c[2 * i + j] = 34; else lc->pu.intra_pred_mode_c[2 * i + j] = intra_chroma_table[chroma_mode]; } else { lc->pu.intra_pred_mode_c[2 * i + j] = lc->pu.intra_pred_mode[2 * i + j]; } } } } else if (s->ps.sps->chroma_format_idc == 2) { int mode_idx; lc->pu.chroma_mode_c[0] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s); if (chroma_mode != 4) { if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode]) mode_idx = 34; else mode_idx = intra_chroma_table[chroma_mode]; } else { mode_idx = lc->pu.intra_pred_mode[0]; } lc->pu.intra_pred_mode_c[0] = tab_mode_idx[mode_idx]; } else if (s->ps.sps->chroma_format_idc != 0) { chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s); if (chroma_mode != 4) { if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode]) lc->pu.intra_pred_mode_c[0] = 34; else lc->pu.intra_pred_mode_c[0] = intra_chroma_table[chroma_mode]; } else { lc->pu.intra_pred_mode_c[0] = lc->pu.intra_pred_mode[0]; } } } static void intra_prediction_unit_default_value(HEVCContext *s, int x0, int y0, int log2_cb_size) { HEVCLocalContext *lc = s->HEVClc; int pb_size = 1 << log2_cb_size; int size_in_pus = pb_size >> s->ps.sps->log2_min_pu_size; int min_pu_width = s->ps.sps->min_pu_width; MvField *tab_mvf = s->ref->tab_mvf; int x_pu = x0 >> s->ps.sps->log2_min_pu_size; int y_pu = y0 >> s->ps.sps->log2_min_pu_size; int j, k; if (size_in_pus == 0) size_in_pus = 1; for (j = 0; j < size_in_pus; j++) memset(&s->tab_ipm[(y_pu + j) * min_pu_width + x_pu], INTRA_DC, size_in_pus); if (lc->cu.pred_mode == MODE_INTRA) for (j = 0; j < size_in_pus; j++) for (k = 0; k < size_in_pus; k++) tab_mvf[(y_pu + j) * min_pu_width + x_pu + k].pred_flag = PF_INTRA; } static int hls_coding_unit(HEVCContext *s, int x0, int y0, int log2_cb_size) { int cb_size = 1 << log2_cb_size; HEVCLocalContext *lc = s->HEVClc; int log2_min_cb_size = s->ps.sps->log2_min_cb_size; int length = cb_size >> log2_min_cb_size; int min_cb_width = s->ps.sps->min_cb_width; int x_cb = x0 >> log2_min_cb_size; int y_cb = y0 >> log2_min_cb_size; int idx = log2_cb_size - 2; int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1; int x, y, ret; lc->cu.x = x0; lc->cu.y = y0; lc->cu.pred_mode = MODE_INTRA; lc->cu.part_mode = PART_2Nx2N; lc->cu.intra_split_flag = 0; SAMPLE_CTB(s->skip_flag, x_cb, y_cb) = 0; for (x = 0; x < 4; x++) lc->pu.intra_pred_mode[x] = 1; if (s->ps.pps->transquant_bypass_enable_flag) { lc->cu.cu_transquant_bypass_flag = ff_hevc_cu_transquant_bypass_flag_decode(s); if (lc->cu.cu_transquant_bypass_flag) set_deblocking_bypass(s, x0, y0, log2_cb_size); } else lc->cu.cu_transquant_bypass_flag = 0; if (s->sh.slice_type != HEVC_SLICE_I) { uint8_t skip_flag = ff_hevc_skip_flag_decode(s, x0, y0, x_cb, y_cb); x = y_cb * min_cb_width + x_cb; for (y = 0; y < length; y++) { memset(&s->skip_flag[x], skip_flag, length); x += min_cb_width; } lc->cu.pred_mode = skip_flag ? MODE_SKIP : MODE_INTER; } else { x = y_cb * min_cb_width + x_cb; for (y = 0; y < length; y++) { memset(&s->skip_flag[x], 0, length); x += min_cb_width; } } if (SAMPLE_CTB(s->skip_flag, x_cb, y_cb)) { hls_prediction_unit(s, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx); intra_prediction_unit_default_value(s, x0, y0, log2_cb_size); if (!s->sh.disable_deblocking_filter_flag) ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size); } else { int pcm_flag = 0; if (s->sh.slice_type != HEVC_SLICE_I) lc->cu.pred_mode = ff_hevc_pred_mode_decode(s); if (lc->cu.pred_mode != MODE_INTRA || log2_cb_size == s->ps.sps->log2_min_cb_size) { lc->cu.part_mode = ff_hevc_part_mode_decode(s, log2_cb_size); lc->cu.intra_split_flag = lc->cu.part_mode == PART_NxN && lc->cu.pred_mode == MODE_INTRA; } if (lc->cu.pred_mode == MODE_INTRA) { if (lc->cu.part_mode == PART_2Nx2N && s->ps.sps->pcm_enabled_flag && log2_cb_size >= s->ps.sps->pcm.log2_min_pcm_cb_size && log2_cb_size <= s->ps.sps->pcm.log2_max_pcm_cb_size) { pcm_flag = ff_hevc_pcm_flag_decode(s); } if (pcm_flag) { intra_prediction_unit_default_value(s, x0, y0, log2_cb_size); ret = hls_pcm_sample(s, x0, y0, log2_cb_size); if (s->ps.sps->pcm.loop_filter_disable_flag) set_deblocking_bypass(s, x0, y0, log2_cb_size); if (ret < 0) return ret; } else { intra_prediction_unit(s, x0, y0, log2_cb_size); } } else { intra_prediction_unit_default_value(s, x0, y0, log2_cb_size); switch (lc->cu.part_mode) { case PART_2Nx2N: hls_prediction_unit(s, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx); break; case PART_2NxN: hls_prediction_unit(s, x0, y0, cb_size, cb_size / 2, log2_cb_size, 0, idx); hls_prediction_unit(s, x0, y0 + cb_size / 2, cb_size, cb_size / 2, log2_cb_size, 1, idx); break; case PART_Nx2N: hls_prediction_unit(s, x0, y0, cb_size / 2, cb_size, log2_cb_size, 0, idx - 1); hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size, log2_cb_size, 1, idx - 1); break; case PART_2NxnU: hls_prediction_unit(s, x0, y0, cb_size, cb_size / 4, log2_cb_size, 0, idx); hls_prediction_unit(s, x0, y0 + cb_size / 4, cb_size, cb_size * 3 / 4, log2_cb_size, 1, idx); break; case PART_2NxnD: hls_prediction_unit(s, x0, y0, cb_size, cb_size * 3 / 4, log2_cb_size, 0, idx); hls_prediction_unit(s, x0, y0 + cb_size * 3 / 4, cb_size, cb_size / 4, log2_cb_size, 1, idx); break; case PART_nLx2N: hls_prediction_unit(s, x0, y0, cb_size / 4, cb_size, log2_cb_size, 0, idx - 2); hls_prediction_unit(s, x0 + cb_size / 4, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 1, idx - 2); break; case PART_nRx2N: hls_prediction_unit(s, x0, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 0, idx - 2); hls_prediction_unit(s, x0 + cb_size * 3 / 4, y0, cb_size / 4, cb_size, log2_cb_size, 1, idx - 2); break; case PART_NxN: hls_prediction_unit(s, x0, y0, cb_size / 2, cb_size / 2, log2_cb_size, 0, idx - 1); hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size / 2, log2_cb_size, 1, idx - 1); hls_prediction_unit(s, x0, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 2, idx - 1); hls_prediction_unit(s, x0 + cb_size / 2, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 3, idx - 1); break; } } if (!pcm_flag) { int rqt_root_cbf = 1; if (lc->cu.pred_mode != MODE_INTRA && !(lc->cu.part_mode == PART_2Nx2N && lc->pu.merge_flag)) { rqt_root_cbf = ff_hevc_no_residual_syntax_flag_decode(s); } if (rqt_root_cbf) { const static int cbf[2] = { 0 }; lc->cu.max_trafo_depth = lc->cu.pred_mode == MODE_INTRA ? s->ps.sps->max_transform_hierarchy_depth_intra + lc->cu.intra_split_flag : s->ps.sps->max_transform_hierarchy_depth_inter; ret = hls_transform_tree(s, x0, y0, x0, y0, x0, y0, log2_cb_size, log2_cb_size, 0, 0, cbf, cbf); if (ret < 0) return ret; } else { if (!s->sh.disable_deblocking_filter_flag) ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size); } } } if (s->ps.pps->cu_qp_delta_enabled_flag && lc->tu.is_cu_qp_delta_coded == 0) ff_hevc_set_qPy(s, x0, y0, log2_cb_size); x = y_cb * min_cb_width + x_cb; for (y = 0; y < length; y++) { memset(&s->qp_y_tab[x], lc->qp_y, length); x += min_cb_width; } if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 && ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0) { lc->qPy_pred = lc->qp_y; } set_ct_depth(s, x0, y0, log2_cb_size, lc->ct_depth); return 0; } static int hls_coding_quadtree(HEVCContext *s, int x0, int y0, int log2_cb_size, int cb_depth) { HEVCLocalContext *lc = s->HEVClc; const int cb_size = 1 << log2_cb_size; int ret; int split_cu; lc->ct_depth = cb_depth; if (x0 + cb_size <= s->ps.sps->width && y0 + cb_size <= s->ps.sps->height && log2_cb_size > s->ps.sps->log2_min_cb_size) { split_cu = ff_hevc_split_coding_unit_flag_decode(s, cb_depth, x0, y0); } else { split_cu = (log2_cb_size > s->ps.sps->log2_min_cb_size); } if (s->ps.pps->cu_qp_delta_enabled_flag && log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth) { lc->tu.is_cu_qp_delta_coded = 0; lc->tu.cu_qp_delta = 0; } if (s->sh.cu_chroma_qp_offset_enabled_flag && log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_chroma_qp_offset_depth) { lc->tu.is_cu_chroma_qp_offset_coded = 0; } if (split_cu) { int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1; const int cb_size_split = cb_size >> 1; const int x1 = x0 + cb_size_split; const int y1 = y0 + cb_size_split; int more_data = 0; more_data = hls_coding_quadtree(s, x0, y0, log2_cb_size - 1, cb_depth + 1); if (more_data < 0) return more_data; if (more_data && x1 < s->ps.sps->width) { more_data = hls_coding_quadtree(s, x1, y0, log2_cb_size - 1, cb_depth + 1); if (more_data < 0) return more_data; } if (more_data && y1 < s->ps.sps->height) { more_data = hls_coding_quadtree(s, x0, y1, log2_cb_size - 1, cb_depth + 1); if (more_data < 0) return more_data; } if (more_data && x1 < s->ps.sps->width && y1 < s->ps.sps->height) { more_data = hls_coding_quadtree(s, x1, y1, log2_cb_size - 1, cb_depth + 1); if (more_data < 0) return more_data; } if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 && ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0) lc->qPy_pred = lc->qp_y; if (more_data) return ((x1 + cb_size_split) < s->ps.sps->width || (y1 + cb_size_split) < s->ps.sps->height); else return 0; } else { ret = hls_coding_unit(s, x0, y0, log2_cb_size); if (ret < 0) return ret; if ((!((x0 + cb_size) % (1 << (s->ps.sps->log2_ctb_size))) || (x0 + cb_size >= s->ps.sps->width)) && (!((y0 + cb_size) % (1 << (s->ps.sps->log2_ctb_size))) || (y0 + cb_size >= s->ps.sps->height))) { int end_of_slice_flag = ff_hevc_end_of_slice_flag_decode(s); return !end_of_slice_flag; } else { return 1; } } return 0; } static void hls_decode_neighbour(HEVCContext *s, int x_ctb, int y_ctb, int ctb_addr_ts) { HEVCLocalContext *lc = s->HEVClc; int ctb_size = 1 << s->ps.sps->log2_ctb_size; int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts]; int ctb_addr_in_slice = ctb_addr_rs - s->sh.slice_addr; s->tab_slice_address[ctb_addr_rs] = s->sh.slice_addr; if (s->ps.pps->entropy_coding_sync_enabled_flag) { if (x_ctb == 0 && (y_ctb & (ctb_size - 1)) == 0) lc->first_qp_group = 1; lc->end_of_tiles_x = s->ps.sps->width; } else if (s->ps.pps->tiles_enabled_flag) { if (ctb_addr_ts && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[ctb_addr_ts - 1]) { int idxX = s->ps.pps->col_idxX[x_ctb >> s->ps.sps->log2_ctb_size]; lc->end_of_tiles_x = x_ctb + (s->ps.pps->column_width[idxX] << s->ps.sps->log2_ctb_size); lc->first_qp_group = 1; } } else { lc->end_of_tiles_x = s->ps.sps->width; } lc->end_of_tiles_y = FFMIN(y_ctb + ctb_size, s->ps.sps->height); lc->boundary_flags = 0; if (s->ps.pps->tiles_enabled_flag) { if (x_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - 1]]) lc->boundary_flags |= BOUNDARY_LEFT_TILE; if (x_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - 1]) lc->boundary_flags |= BOUNDARY_LEFT_SLICE; if (y_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - s->ps.sps->ctb_width]]) lc->boundary_flags |= BOUNDARY_UPPER_TILE; if (y_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - s->ps.sps->ctb_width]) lc->boundary_flags |= BOUNDARY_UPPER_SLICE; } else { if (ctb_addr_in_slice <= 0) lc->boundary_flags |= BOUNDARY_LEFT_SLICE; if (ctb_addr_in_slice < s->ps.sps->ctb_width) lc->boundary_flags |= BOUNDARY_UPPER_SLICE; } lc->ctb_left_flag = ((x_ctb > 0) && (ctb_addr_in_slice > 0) && !(lc->boundary_flags & BOUNDARY_LEFT_TILE)); lc->ctb_up_flag = ((y_ctb > 0) && (ctb_addr_in_slice >= s->ps.sps->ctb_width) && !(lc->boundary_flags & BOUNDARY_UPPER_TILE)); lc->ctb_up_right_flag = ((y_ctb > 0) && (ctb_addr_in_slice+1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs+1 - s->ps.sps->ctb_width]])); lc->ctb_up_left_flag = ((x_ctb > 0) && (y_ctb > 0) && (ctb_addr_in_slice-1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs-1 - s->ps.sps->ctb_width]])); } static int hls_decode_entry(AVCodecContext *avctxt, void *isFilterThread) { HEVCContext *s = avctxt->priv_data; int ctb_size = 1 << s->ps.sps->log2_ctb_size; int more_data = 1; int x_ctb = 0; int y_ctb = 0; int ctb_addr_ts = s->ps.pps->ctb_addr_rs_to_ts[s->sh.slice_ctb_addr_rs]; int ret; if (!ctb_addr_ts && s->sh.dependent_slice_segment_flag) { av_log(s->avctx, AV_LOG_ERROR, "Impossible initial tile.\n"); return AVERROR_INVALIDDATA; } if (s->sh.dependent_slice_segment_flag) { int prev_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts - 1]; if (s->tab_slice_address[prev_rs] != s->sh.slice_addr) { av_log(s->avctx, AV_LOG_ERROR, "Previous slice segment missing\n"); return AVERROR_INVALIDDATA; } } while (more_data && ctb_addr_ts < s->ps.sps->ctb_size) { int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts]; x_ctb = (ctb_addr_rs % ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size; y_ctb = (ctb_addr_rs / ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size; hls_decode_neighbour(s, x_ctb, y_ctb, ctb_addr_ts); ret = ff_hevc_cabac_init(s, ctb_addr_ts); if (ret < 0) { s->tab_slice_address[ctb_addr_rs] = -1; return ret; } hls_sao_param(s, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size); s->deblock[ctb_addr_rs].beta_offset = s->sh.beta_offset; s->deblock[ctb_addr_rs].tc_offset = s->sh.tc_offset; s->filter_slice_edges[ctb_addr_rs] = s->sh.slice_loop_filter_across_slices_enabled_flag; more_data = hls_coding_quadtree(s, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0); if (more_data < 0) { s->tab_slice_address[ctb_addr_rs] = -1; return more_data; } ctb_addr_ts++; ff_hevc_save_states(s, ctb_addr_ts); ff_hevc_hls_filters(s, x_ctb, y_ctb, ctb_size); } if (x_ctb + ctb_size >= s->ps.sps->width && y_ctb + ctb_size >= s->ps.sps->height) ff_hevc_hls_filter(s, x_ctb, y_ctb, ctb_size); return ctb_addr_ts; } static int hls_slice_data(HEVCContext *s) { int arg[2]; int ret[2]; arg[0] = 0; arg[1] = 1; s->avctx->execute(s->avctx, hls_decode_entry, arg, ret , 1, sizeof(int)); return ret[0]; } static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *input_ctb_row, int job, int self_id) { HEVCContext *s1 = avctxt->priv_data, *s; HEVCLocalContext *lc; int ctb_size = 1<< s1->ps.sps->log2_ctb_size; int more_data = 1; int *ctb_row_p = input_ctb_row; int ctb_row = ctb_row_p[job]; int ctb_addr_rs = s1->sh.slice_ctb_addr_rs + ctb_row * ((s1->ps.sps->width + ctb_size - 1) >> s1->ps.sps->log2_ctb_size); int ctb_addr_ts = s1->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs]; int thread = ctb_row % s1->threads_number; int ret; s = s1->sList[self_id]; lc = s->HEVClc; if(ctb_row) { ret = init_get_bits8(&lc->gb, s->data + s->sh.offset[ctb_row - 1], s->sh.size[ctb_row - 1]); if (ret < 0) goto error; ff_init_cabac_decoder(&lc->cc, s->data + s->sh.offset[(ctb_row)-1], s->sh.size[ctb_row - 1]); } while(more_data && ctb_addr_ts < s->ps.sps->ctb_size) { int x_ctb = (ctb_addr_rs % s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size; int y_ctb = (ctb_addr_rs / s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size; hls_decode_neighbour(s, x_ctb, y_ctb, ctb_addr_ts); ff_thread_await_progress2(s->avctx, ctb_row, thread, SHIFT_CTB_WPP); if (atomic_load(&s1->wpp_err)) { ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP); return 0; } ret = ff_hevc_cabac_init(s, ctb_addr_ts); if (ret < 0) goto error; hls_sao_param(s, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size); more_data = hls_coding_quadtree(s, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0); if (more_data < 0) { ret = more_data; goto error; } ctb_addr_ts++; ff_hevc_save_states(s, ctb_addr_ts); ff_thread_report_progress2(s->avctx, ctb_row, thread, 1); ff_hevc_hls_filters(s, x_ctb, y_ctb, ctb_size); if (!more_data && (x_ctb+ctb_size) < s->ps.sps->width && ctb_row != s->sh.num_entry_point_offsets) { atomic_store(&s1->wpp_err, 1); ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP); return 0; } if ((x_ctb+ctb_size) >= s->ps.sps->width && (y_ctb+ctb_size) >= s->ps.sps->height ) { ff_hevc_hls_filter(s, x_ctb, y_ctb, ctb_size); ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP); return ctb_addr_ts; } ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts]; x_ctb+=ctb_size; if(x_ctb >= s->ps.sps->width) { break; } } ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP); return 0; error: s->tab_slice_address[ctb_addr_rs] = -1; atomic_store(&s1->wpp_err, 1); ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP); return ret; } static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal) { const uint8_t *data = nal->data; int length = nal->size; HEVCLocalContext *lc = s->HEVClc; int *ret = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int)); int *arg = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int)); int64_t offset; int64_t startheader, cmpt = 0; int i, j, res = 0; if (!ret || !arg) { av_free(ret); av_free(arg); return AVERROR(ENOMEM); } if (s->sh.slice_ctb_addr_rs + s->sh.num_entry_point_offsets * s->ps.sps->ctb_width >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) { av_log(s->avctx, AV_LOG_ERROR, "WPP ctb addresses are wrong (%d %d %d %d)\n", s->sh.slice_ctb_addr_rs, s->sh.num_entry_point_offsets, s->ps.sps->ctb_width, s->ps.sps->ctb_height ); res = AVERROR_INVALIDDATA; goto error; } ff_alloc_entries(s->avctx, s->sh.num_entry_point_offsets + 1); if (!s->sList[1]) { for (i = 1; i < s->threads_number; i++) { s->sList[i] = av_malloc(sizeof(HEVCContext)); memcpy(s->sList[i], s, sizeof(HEVCContext)); s->HEVClcList[i] = av_mallocz(sizeof(HEVCLocalContext)); s->sList[i]->HEVClc = s->HEVClcList[i]; } } offset = (lc->gb.index >> 3); for (j = 0, cmpt = 0, startheader = offset + s->sh.entry_point_offset[0]; j < nal->skipped_bytes; j++) { if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) { startheader--; cmpt++; } } for (i = 1; i < s->sh.num_entry_point_offsets; i++) { offset += (s->sh.entry_point_offset[i - 1] - cmpt); for (j = 0, cmpt = 0, startheader = offset + s->sh.entry_point_offset[i]; j < nal->skipped_bytes; j++) { if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) { startheader--; cmpt++; } } s->sh.size[i - 1] = s->sh.entry_point_offset[i] - cmpt; s->sh.offset[i - 1] = offset; } if (s->sh.num_entry_point_offsets != 0) { offset += s->sh.entry_point_offset[s->sh.num_entry_point_offsets - 1] - cmpt; if (length < offset) { av_log(s->avctx, AV_LOG_ERROR, "entry_point_offset table is corrupted\n"); res = AVERROR_INVALIDDATA; goto error; } s->sh.size[s->sh.num_entry_point_offsets - 1] = length - offset; s->sh.offset[s->sh.num_entry_point_offsets - 1] = offset; } s->data = data; for (i = 1; i < s->threads_number; i++) { s->sList[i]->HEVClc->first_qp_group = 1; s->sList[i]->HEVClc->qp_y = s->sList[0]->HEVClc->qp_y; memcpy(s->sList[i], s, sizeof(HEVCContext)); s->sList[i]->HEVClc = s->HEVClcList[i]; } atomic_store(&s->wpp_err, 0); ff_reset_entries(s->avctx); for (i = 0; i <= s->sh.num_entry_point_offsets; i++) { arg[i] = i; ret[i] = 0; } if (s->ps.pps->entropy_coding_sync_enabled_flag) s->avctx->execute2(s->avctx, hls_decode_entry_wpp, arg, ret, s->sh.num_entry_point_offsets + 1); for (i = 0; i <= s->sh.num_entry_point_offsets; i++) res += ret[i]; error: av_free(ret); av_free(arg); return res; } static int set_side_data(HEVCContext *s) { AVFrame *out = s->ref->frame; if (s->sei.frame_packing.present && s->sei.frame_packing.arrangement_type >= 3 && s->sei.frame_packing.arrangement_type <= 5 && s->sei.frame_packing.content_interpretation_type > 0 && s->sei.frame_packing.content_interpretation_type < 3) { AVStereo3D *stereo = av_stereo3d_create_side_data(out); if (!stereo) return AVERROR(ENOMEM); switch (s->sei.frame_packing.arrangement_type) { case 3: if (s->sei.frame_packing.quincunx_subsampling) stereo->type = AV_STEREO3D_SIDEBYSIDE_QUINCUNX; else stereo->type = AV_STEREO3D_SIDEBYSIDE; break; case 4: stereo->type = AV_STEREO3D_TOPBOTTOM; break; case 5: stereo->type = AV_STEREO3D_FRAMESEQUENCE; break; } if (s->sei.frame_packing.content_interpretation_type == 2) stereo->flags = AV_STEREO3D_FLAG_INVERT; if (s->sei.frame_packing.arrangement_type == 5) { if (s->sei.frame_packing.current_frame_is_frame0_flag) stereo->view = AV_STEREO3D_VIEW_LEFT; else stereo->view = AV_STEREO3D_VIEW_RIGHT; } } if (s->sei.display_orientation.present && (s->sei.display_orientation.anticlockwise_rotation || s->sei.display_orientation.hflip || s->sei.display_orientation.vflip)) { double angle = s->sei.display_orientation.anticlockwise_rotation * 360 / (double) (1 << 16); AVFrameSideData *rotation = av_frame_new_side_data(out, AV_FRAME_DATA_DISPLAYMATRIX, sizeof(int32_t) * 9); if (!rotation) return AVERROR(ENOMEM); av_display_rotation_set((int32_t *)rotation->data, angle); av_display_matrix_flip((int32_t *)rotation->data, s->sei.display_orientation.hflip, s->sei.display_orientation.vflip); } // Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1 // so the side data persists for the entire coded video sequence. if (s->sei.mastering_display.present > 0 && IS_IRAP(s) && s->no_rasl_output_flag) { s->sei.mastering_display.present--; } if (s->sei.mastering_display.present) { // HEVC uses a g,b,r ordering, which we convert to a more natural r,g,b const int mapping[3] = {2, 0, 1}; const int chroma_den = 50000; const int luma_den = 10000; int i; AVMasteringDisplayMetadata *metadata = av_mastering_display_metadata_create_side_data(out); if (!metadata) return AVERROR(ENOMEM); for (i = 0; i < 3; i++) { const int j = mapping[i]; metadata->display_primaries[i][0].num = s->sei.mastering_display.display_primaries[j][0]; metadata->display_primaries[i][0].den = chroma_den; metadata->display_primaries[i][1].num = s->sei.mastering_display.display_primaries[j][1]; metadata->display_primaries[i][1].den = chroma_den; } metadata->white_point[0].num = s->sei.mastering_display.white_point[0]; metadata->white_point[0].den = chroma_den; metadata->white_point[1].num = s->sei.mastering_display.white_point[1]; metadata->white_point[1].den = chroma_den; metadata->max_luminance.num = s->sei.mastering_display.max_luminance; metadata->max_luminance.den = luma_den; metadata->min_luminance.num = s->sei.mastering_display.min_luminance; metadata->min_luminance.den = luma_den; metadata->has_luminance = 1; metadata->has_primaries = 1; av_log(s->avctx, AV_LOG_DEBUG, "Mastering Display Metadata:\n"); av_log(s->avctx, AV_LOG_DEBUG, "r(%5.4f,%5.4f) g(%5.4f,%5.4f) b(%5.4f %5.4f) wp(%5.4f, %5.4f)\n", av_q2d(metadata->display_primaries[0][0]), av_q2d(metadata->display_primaries[0][1]), av_q2d(metadata->display_primaries[1][0]), av_q2d(metadata->display_primaries[1][1]), av_q2d(metadata->display_primaries[2][0]), av_q2d(metadata->display_primaries[2][1]), av_q2d(metadata->white_point[0]), av_q2d(metadata->white_point[1])); av_log(s->avctx, AV_LOG_DEBUG, "min_luminance=%f, max_luminance=%f\n", av_q2d(metadata->min_luminance), av_q2d(metadata->max_luminance)); } // Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1 // so the side data persists for the entire coded video sequence. if (s->sei.content_light.present > 0 && IS_IRAP(s) && s->no_rasl_output_flag) { s->sei.content_light.present--; } if (s->sei.content_light.present) { AVContentLightMetadata *metadata = av_content_light_metadata_create_side_data(out); if (!metadata) return AVERROR(ENOMEM); metadata->MaxCLL = s->sei.content_light.max_content_light_level; metadata->MaxFALL = s->sei.content_light.max_pic_average_light_level; av_log(s->avctx, AV_LOG_DEBUG, "Content Light Level Metadata:\n"); av_log(s->avctx, AV_LOG_DEBUG, "MaxCLL=%d, MaxFALL=%d\n", metadata->MaxCLL, metadata->MaxFALL); } if (s->sei.a53_caption.a53_caption) { AVFrameSideData* sd = av_frame_new_side_data(out, AV_FRAME_DATA_A53_CC, s->sei.a53_caption.a53_caption_size); if (sd) memcpy(sd->data, s->sei.a53_caption.a53_caption, s->sei.a53_caption.a53_caption_size); av_freep(&s->sei.a53_caption.a53_caption); s->sei.a53_caption.a53_caption_size = 0; s->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS; } if (s->sei.alternative_transfer.present && av_color_transfer_name(s->sei.alternative_transfer.preferred_transfer_characteristics) && s->sei.alternative_transfer.preferred_transfer_characteristics != AVCOL_TRC_UNSPECIFIED) { s->avctx->color_trc = out->color_trc = s->sei.alternative_transfer.preferred_transfer_characteristics; } return 0; } static int hevc_frame_start(HEVCContext *s) { HEVCLocalContext *lc = s->HEVClc; int pic_size_in_ctb = ((s->ps.sps->width >> s->ps.sps->log2_min_cb_size) + 1) * ((s->ps.sps->height >> s->ps.sps->log2_min_cb_size) + 1); int ret; memset(s->horizontal_bs, 0, s->bs_width * s->bs_height); memset(s->vertical_bs, 0, s->bs_width * s->bs_height); memset(s->cbf_luma, 0, s->ps.sps->min_tb_width * s->ps.sps->min_tb_height); memset(s->is_pcm, 0, (s->ps.sps->min_pu_width + 1) * (s->ps.sps->min_pu_height + 1)); memset(s->tab_slice_address, -1, pic_size_in_ctb * sizeof(*s->tab_slice_address)); s->is_decoded = 0; s->first_nal_type = s->nal_unit_type; s->no_rasl_output_flag = IS_IDR(s) || IS_BLA(s) || (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos); if (s->ps.pps->tiles_enabled_flag) lc->end_of_tiles_x = s->ps.pps->column_width[0] << s->ps.sps->log2_ctb_size; ret = ff_hevc_set_new_ref(s, &s->frame, s->poc); if (ret < 0) goto fail; ret = ff_hevc_frame_rps(s); if (ret < 0) { av_log(s->avctx, AV_LOG_ERROR, "Error constructing the frame RPS.\n"); goto fail; } s->ref->frame->key_frame = IS_IRAP(s); ret = set_side_data(s); if (ret < 0) goto fail; s->frame->pict_type = 3 - s->sh.slice_type; if (!IS_IRAP(s)) ff_hevc_bump_frame(s); av_frame_unref(s->output_frame); ret = ff_hevc_output_frame(s, s->output_frame, 0); if (ret < 0) goto fail; if (!s->avctx->hwaccel) ff_thread_finish_setup(s->avctx); return 0; fail: if (s->ref) ff_hevc_unref_frame(s, s->ref, ~0); s->ref = NULL; return ret; } static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal) { HEVCLocalContext *lc = s->HEVClc; GetBitContext *gb = &lc->gb; int ctb_addr_ts, ret; *gb = nal->gb; s->nal_unit_type = nal->type; s->temporal_id = nal->temporal_id; switch (s->nal_unit_type) { case HEVC_NAL_VPS: if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) { ret = s->avctx->hwaccel->decode_params(s->avctx, nal->type, nal->raw_data, nal->raw_size); if (ret < 0) goto fail; } ret = ff_hevc_decode_nal_vps(gb, s->avctx, &s->ps); if (ret < 0) goto fail; break; case HEVC_NAL_SPS: if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) { ret = s->avctx->hwaccel->decode_params(s->avctx, nal->type, nal->raw_data, nal->raw_size); if (ret < 0) goto fail; } ret = ff_hevc_decode_nal_sps(gb, s->avctx, &s->ps, s->apply_defdispwin); if (ret < 0) goto fail; break; case HEVC_NAL_PPS: if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) { ret = s->avctx->hwaccel->decode_params(s->avctx, nal->type, nal->raw_data, nal->raw_size); if (ret < 0) goto fail; } ret = ff_hevc_decode_nal_pps(gb, s->avctx, &s->ps); if (ret < 0) goto fail; break; case HEVC_NAL_SEI_PREFIX: case HEVC_NAL_SEI_SUFFIX: if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) { ret = s->avctx->hwaccel->decode_params(s->avctx, nal->type, nal->raw_data, nal->raw_size); if (ret < 0) goto fail; } ret = ff_hevc_decode_nal_sei(gb, s->avctx, &s->sei, &s->ps, s->nal_unit_type); if (ret < 0) goto fail; break; case HEVC_NAL_TRAIL_R: case HEVC_NAL_TRAIL_N: case HEVC_NAL_TSA_N: case HEVC_NAL_TSA_R: case HEVC_NAL_STSA_N: case HEVC_NAL_STSA_R: case HEVC_NAL_BLA_W_LP: case HEVC_NAL_BLA_W_RADL: case HEVC_NAL_BLA_N_LP: case HEVC_NAL_IDR_W_RADL: case HEVC_NAL_IDR_N_LP: case HEVC_NAL_CRA_NUT: case HEVC_NAL_RADL_N: case HEVC_NAL_RADL_R: case HEVC_NAL_RASL_N: case HEVC_NAL_RASL_R: ret = hls_slice_header(s); if (ret < 0) return ret; if ( (s->avctx->skip_frame >= AVDISCARD_BIDIR && s->sh.slice_type == HEVC_SLICE_B) || (s->avctx->skip_frame >= AVDISCARD_NONINTRA && s->sh.slice_type != HEVC_SLICE_I) || (s->avctx->skip_frame >= AVDISCARD_NONKEY && !IS_IRAP(s))) { break; } if (s->sh.first_slice_in_pic_flag) { if (s->ref) { av_log(s->avctx, AV_LOG_ERROR, "Two slices reporting being the first in the same frame.\n"); goto fail; } if (s->max_ra == INT_MAX) { if (s->nal_unit_type == HEVC_NAL_CRA_NUT || IS_BLA(s)) { s->max_ra = s->poc; } else { if (IS_IDR(s)) s->max_ra = INT_MIN; } } if ((s->nal_unit_type == HEVC_NAL_RASL_R || s->nal_unit_type == HEVC_NAL_RASL_N) && s->poc <= s->max_ra) { s->is_decoded = 0; break; } else { if (s->nal_unit_type == HEVC_NAL_RASL_R && s->poc > s->max_ra) s->max_ra = INT_MIN; } s->overlap ++; ret = hevc_frame_start(s); if (ret < 0) return ret; } else if (!s->ref) { av_log(s->avctx, AV_LOG_ERROR, "First slice in a frame missing.\n"); goto fail; } if (s->nal_unit_type != s->first_nal_type) { av_log(s->avctx, AV_LOG_ERROR, "Non-matching NAL types of the VCL NALUs: %d %d\n", s->first_nal_type, s->nal_unit_type); return AVERROR_INVALIDDATA; } if (!s->sh.dependent_slice_segment_flag && s->sh.slice_type != HEVC_SLICE_I) { ret = ff_hevc_slice_rpl(s); if (ret < 0) { av_log(s->avctx, AV_LOG_WARNING, "Error constructing the reference lists for the current slice.\n"); goto fail; } } if (s->sh.first_slice_in_pic_flag && s->avctx->hwaccel) { ret = s->avctx->hwaccel->start_frame(s->avctx, NULL, 0); if (ret < 0) goto fail; } if (s->avctx->hwaccel) { ret = s->avctx->hwaccel->decode_slice(s->avctx, nal->raw_data, nal->raw_size); if (ret < 0) goto fail; } else { if (s->threads_number > 1 && s->sh.num_entry_point_offsets > 0) ctb_addr_ts = hls_slice_data_wpp(s, nal); else ctb_addr_ts = hls_slice_data(s); if (ctb_addr_ts >= (s->ps.sps->ctb_width * s->ps.sps->ctb_height)) { s->is_decoded = 1; } if (ctb_addr_ts < 0) { ret = ctb_addr_ts; goto fail; } } break; case HEVC_NAL_EOS_NUT: case HEVC_NAL_EOB_NUT: s->seq_decode = (s->seq_decode + 1) & 0xff; s->max_ra = INT_MAX; break; case HEVC_NAL_AUD: case HEVC_NAL_FD_NUT: break; default: av_log(s->avctx, AV_LOG_INFO, "Skipping NAL unit %d\n", s->nal_unit_type); } return 0; fail: if (s->avctx->err_recognition & AV_EF_EXPLODE) return ret; return 0; } static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length) { int i, ret = 0; int eos_at_start = 1; s->ref = NULL; s->last_eos = s->eos; s->eos = 0; s->overlap = 0; /* split the input packet into NAL units, so we know the upper bound on the * number of slices in the frame */ ret = ff_h2645_packet_split(&s->pkt, buf, length, s->avctx, s->is_nalff, s->nal_length_size, s->avctx->codec_id, 1, 0); if (ret < 0) { av_log(s->avctx, AV_LOG_ERROR, "Error splitting the input into NAL units.\n"); return ret; } for (i = 0; i < s->pkt.nb_nals; i++) { if (s->pkt.nals[i].type == HEVC_NAL_EOB_NUT || s->pkt.nals[i].type == HEVC_NAL_EOS_NUT) { if (eos_at_start) { s->last_eos = 1; } else { s->eos = 1; } } else { eos_at_start = 0; } } /* decode the NAL units */ for (i = 0; i < s->pkt.nb_nals; i++) { H2645NAL *nal = &s->pkt.nals[i]; if (s->avctx->skip_frame >= AVDISCARD_ALL || (s->avctx->skip_frame >= AVDISCARD_NONREF && ff_hevc_nal_is_nonref(nal->type))) continue; ret = decode_nal_unit(s, nal); if (ret >= 0 && s->overlap > 2) ret = AVERROR_INVALIDDATA; if (ret < 0) { av_log(s->avctx, AV_LOG_WARNING, "Error parsing NAL unit #%d.\n", i); goto fail; } } fail: if (s->ref && s->threads_type == FF_THREAD_FRAME) ff_thread_report_progress(&s->ref->tf, INT_MAX, 0); return ret; } static void print_md5(void *log_ctx, int level, uint8_t md5[16]) { int i; for (i = 0; i < 16; i++) av_log(log_ctx, level, "%02"PRIx8, md5[i]); } static int verify_md5(HEVCContext *s, AVFrame *frame) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format); int pixel_shift; int i, j; if (!desc) return AVERROR(EINVAL); pixel_shift = desc->comp[0].depth > 8; av_log(s->avctx, AV_LOG_DEBUG, "Verifying checksum for frame with POC %d: ", s->poc); /* the checksums are LE, so we have to byteswap for >8bpp formats * on BE arches */ #if HAVE_BIGENDIAN if (pixel_shift && !s->checksum_buf) { av_fast_malloc(&s->checksum_buf, &s->checksum_buf_size, FFMAX3(frame->linesize[0], frame->linesize[1], frame->linesize[2])); if (!s->checksum_buf) return AVERROR(ENOMEM); } #endif for (i = 0; frame->data[i]; i++) { int width = s->avctx->coded_width; int height = s->avctx->coded_height; int w = (i == 1 || i == 2) ? (width >> desc->log2_chroma_w) : width; int h = (i == 1 || i == 2) ? (height >> desc->log2_chroma_h) : height; uint8_t md5[16]; av_md5_init(s->md5_ctx); for (j = 0; j < h; j++) { const uint8_t *src = frame->data[i] + j * frame->linesize[i]; #if HAVE_BIGENDIAN if (pixel_shift) { s->bdsp.bswap16_buf((uint16_t *) s->checksum_buf, (const uint16_t *) src, w); src = s->checksum_buf; } #endif av_md5_update(s->md5_ctx, src, w << pixel_shift); } av_md5_final(s->md5_ctx, md5); if (!memcmp(md5, s->sei.picture_hash.md5[i], 16)) { av_log (s->avctx, AV_LOG_DEBUG, "plane %d - correct ", i); print_md5(s->avctx, AV_LOG_DEBUG, md5); av_log (s->avctx, AV_LOG_DEBUG, "; "); } else { av_log (s->avctx, AV_LOG_ERROR, "mismatching checksum of plane %d - ", i); print_md5(s->avctx, AV_LOG_ERROR, md5); av_log (s->avctx, AV_LOG_ERROR, " != "); print_md5(s->avctx, AV_LOG_ERROR, s->sei.picture_hash.md5[i]); av_log (s->avctx, AV_LOG_ERROR, "\n"); return AVERROR_INVALIDDATA; } } av_log(s->avctx, AV_LOG_DEBUG, "\n"); return 0; } static int hevc_decode_extradata(HEVCContext *s, uint8_t *buf, int length, int first) { int ret, i; ret = ff_hevc_decode_extradata(buf, length, &s->ps, &s->sei, &s->is_nalff, &s->nal_length_size, s->avctx->err_recognition, s->apply_defdispwin, s->avctx); if (ret < 0) return ret; /* export stream parameters from the first SPS */ for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) { if (first && s->ps.sps_list[i]) { const HEVCSPS *sps = (const HEVCSPS*)s->ps.sps_list[i]->data; export_stream_params(s->avctx, &s->ps, sps); break; } } return 0; } static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output, AVPacket *avpkt) { int ret; int new_extradata_size; uint8_t *new_extradata; HEVCContext *s = avctx->priv_data; if (!avpkt->size) { ret = ff_hevc_output_frame(s, data, 1); if (ret < 0) return ret; *got_output = ret; return 0; } new_extradata = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &new_extradata_size); if (new_extradata && new_extradata_size > 0) { ret = hevc_decode_extradata(s, new_extradata, new_extradata_size, 0); if (ret < 0) return ret; } s->ref = NULL; ret = decode_nal_units(s, avpkt->data, avpkt->size); if (ret < 0) return ret; if (avctx->hwaccel) { if (s->ref && (ret = avctx->hwaccel->end_frame(avctx)) < 0) { av_log(avctx, AV_LOG_ERROR, "hardware accelerator failed to decode picture\n"); ff_hevc_unref_frame(s, s->ref, ~0); return ret; } } else { /* verify the SEI checksum */ if (avctx->err_recognition & AV_EF_CRCCHECK && s->is_decoded && s->sei.picture_hash.is_md5) { ret = verify_md5(s, s->ref->frame); if (ret < 0 && avctx->err_recognition & AV_EF_EXPLODE) { ff_hevc_unref_frame(s, s->ref, ~0); return ret; } } } s->sei.picture_hash.is_md5 = 0; if (s->is_decoded) { av_log(avctx, AV_LOG_DEBUG, "Decoded frame with POC %d.\n", s->poc); s->is_decoded = 0; } if (s->output_frame->buf[0]) { av_frame_move_ref(data, s->output_frame); *got_output = 1; } return avpkt->size; } static int hevc_ref_frame(HEVCContext *s, HEVCFrame *dst, HEVCFrame *src) { int ret; ret = ff_thread_ref_frame(&dst->tf, &src->tf); if (ret < 0) return ret; dst->tab_mvf_buf = av_buffer_ref(src->tab_mvf_buf); if (!dst->tab_mvf_buf) goto fail; dst->tab_mvf = src->tab_mvf; dst->rpl_tab_buf = av_buffer_ref(src->rpl_tab_buf); if (!dst->rpl_tab_buf) goto fail; dst->rpl_tab = src->rpl_tab; dst->rpl_buf = av_buffer_ref(src->rpl_buf); if (!dst->rpl_buf) goto fail; dst->poc = src->poc; dst->ctb_count = src->ctb_count; dst->flags = src->flags; dst->sequence = src->sequence; if (src->hwaccel_picture_private) { dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf); if (!dst->hwaccel_priv_buf) goto fail; dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data; } return 0; fail: ff_hevc_unref_frame(s, dst, ~0); return AVERROR(ENOMEM); } static av_cold int hevc_decode_free(AVCodecContext *avctx) { HEVCContext *s = avctx->priv_data; int i; pic_arrays_free(s); av_freep(&s->md5_ctx); av_freep(&s->cabac_state); for (i = 0; i < 3; i++) { av_freep(&s->sao_pixel_buffer_h[i]); av_freep(&s->sao_pixel_buffer_v[i]); } av_frame_free(&s->output_frame); for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) { ff_hevc_unref_frame(s, &s->DPB[i], ~0); av_frame_free(&s->DPB[i].frame); } ff_hevc_ps_uninit(&s->ps); av_freep(&s->sh.entry_point_offset); av_freep(&s->sh.offset); av_freep(&s->sh.size); for (i = 1; i < s->threads_number; i++) { HEVCLocalContext *lc = s->HEVClcList[i]; if (lc) { av_freep(&s->HEVClcList[i]); av_freep(&s->sList[i]); } } if (s->HEVClc == s->HEVClcList[0]) s->HEVClc = NULL; av_freep(&s->HEVClcList[0]); ff_h2645_packet_uninit(&s->pkt); return 0; } static av_cold int hevc_init_context(AVCodecContext *avctx) { HEVCContext *s = avctx->priv_data; int i; s->avctx = avctx; s->HEVClc = av_mallocz(sizeof(HEVCLocalContext)); if (!s->HEVClc) goto fail; s->HEVClcList[0] = s->HEVClc; s->sList[0] = s; s->cabac_state = av_malloc(HEVC_CONTEXTS); if (!s->cabac_state) goto fail; s->output_frame = av_frame_alloc(); if (!s->output_frame) goto fail; for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) { s->DPB[i].frame = av_frame_alloc(); if (!s->DPB[i].frame) goto fail; s->DPB[i].tf.f = s->DPB[i].frame; } s->max_ra = INT_MAX; s->md5_ctx = av_md5_alloc(); if (!s->md5_ctx) goto fail; ff_bswapdsp_init(&s->bdsp); s->context_initialized = 1; s->eos = 0; ff_hevc_reset_sei(&s->sei); return 0; fail: hevc_decode_free(avctx); return AVERROR(ENOMEM); } #if HAVE_THREADS static int hevc_update_thread_context(AVCodecContext *dst, const AVCodecContext *src) { HEVCContext *s = dst->priv_data; HEVCContext *s0 = src->priv_data; int i, ret; if (!s->context_initialized) { ret = hevc_init_context(dst); if (ret < 0) return ret; } for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) { ff_hevc_unref_frame(s, &s->DPB[i], ~0); if (s0->DPB[i].frame->buf[0]) { ret = hevc_ref_frame(s, &s->DPB[i], &s0->DPB[i]); if (ret < 0) return ret; } } if (s->ps.sps != s0->ps.sps) s->ps.sps = NULL; for (i = 0; i < FF_ARRAY_ELEMS(s->ps.vps_list); i++) { av_buffer_unref(&s->ps.vps_list[i]); if (s0->ps.vps_list[i]) { s->ps.vps_list[i] = av_buffer_ref(s0->ps.vps_list[i]); if (!s->ps.vps_list[i]) return AVERROR(ENOMEM); } } for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) { av_buffer_unref(&s->ps.sps_list[i]); if (s0->ps.sps_list[i]) { s->ps.sps_list[i] = av_buffer_ref(s0->ps.sps_list[i]); if (!s->ps.sps_list[i]) return AVERROR(ENOMEM); } } for (i = 0; i < FF_ARRAY_ELEMS(s->ps.pps_list); i++) { av_buffer_unref(&s->ps.pps_list[i]); if (s0->ps.pps_list[i]) { s->ps.pps_list[i] = av_buffer_ref(s0->ps.pps_list[i]); if (!s->ps.pps_list[i]) return AVERROR(ENOMEM); } } if (s->ps.sps != s0->ps.sps) if ((ret = set_sps(s, s0->ps.sps, src->pix_fmt)) < 0) return ret; s->seq_decode = s0->seq_decode; s->seq_output = s0->seq_output; s->pocTid0 = s0->pocTid0; s->max_ra = s0->max_ra; s->eos = s0->eos; s->no_rasl_output_flag = s0->no_rasl_output_flag; s->is_nalff = s0->is_nalff; s->nal_length_size = s0->nal_length_size; s->threads_number = s0->threads_number; s->threads_type = s0->threads_type; if (s0->eos) { s->seq_decode = (s->seq_decode + 1) & 0xff; s->max_ra = INT_MAX; } s->sei.frame_packing = s0->sei.frame_packing; s->sei.display_orientation = s0->sei.display_orientation; s->sei.mastering_display = s0->sei.mastering_display; s->sei.content_light = s0->sei.content_light; s->sei.alternative_transfer = s0->sei.alternative_transfer; return 0; } #endif static av_cold int hevc_decode_init(AVCodecContext *avctx) { HEVCContext *s = avctx->priv_data; int ret; avctx->internal->allocate_progress = 1; ret = hevc_init_context(avctx); if (ret < 0) return ret; s->enable_parallel_tiles = 0; s->sei.picture_timing.picture_struct = 0; s->eos = 1; atomic_init(&s->wpp_err, 0); if(avctx->active_thread_type & FF_THREAD_SLICE) s->threads_number = avctx->thread_count; else s->threads_number = 1; if (avctx->extradata_size > 0 && avctx->extradata) { ret = hevc_decode_extradata(s, avctx->extradata, avctx->extradata_size, 1); if (ret < 0) { hevc_decode_free(avctx); return ret; } } if((avctx->active_thread_type & FF_THREAD_FRAME) && avctx->thread_count > 1) s->threads_type = FF_THREAD_FRAME; else s->threads_type = FF_THREAD_SLICE; return 0; } #if HAVE_THREADS static av_cold int hevc_init_thread_copy(AVCodecContext *avctx) { HEVCContext *s = avctx->priv_data; int ret; memset(s, 0, sizeof(*s)); ret = hevc_init_context(avctx); if (ret < 0) return ret; return 0; } #endif static void hevc_decode_flush(AVCodecContext *avctx) { HEVCContext *s = avctx->priv_data; ff_hevc_flush_dpb(s); s->max_ra = INT_MAX; s->eos = 1; } #define OFFSET(x) offsetof(HEVCContext, x) #define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM) static const AVOption options[] = { { "apply_defdispwin", "Apply default display window from VUI", OFFSET(apply_defdispwin), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR }, { "strict-displaywin", "stricly apply default display window size", OFFSET(apply_defdispwin), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR }, { NULL }, }; static const AVClass hevc_decoder_class = { .class_name = "HEVC decoder", .item_name = av_default_item_name, .option = options, .version = LIBAVUTIL_VERSION_INT, }; AVCodec ff_hevc_decoder = { .name = "hevc", .long_name = NULL_IF_CONFIG_SMALL("HEVC (High Efficiency Video Coding)"), .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_HEVC, .priv_data_size = sizeof(HEVCContext), .priv_class = &hevc_decoder_class, .init = hevc_decode_init, .close = hevc_decode_free, .decode = hevc_decode_frame, .flush = hevc_decode_flush, .update_thread_context = ONLY_IF_THREADS_ENABLED(hevc_update_thread_context), .init_thread_copy = ONLY_IF_THREADS_ENABLED(hevc_init_thread_copy), .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_FRAME_THREADS, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_EXPORTS_CROPPING, .profiles = NULL_IF_CONFIG_SMALL(ff_hevc_profiles), .hw_configs = (const AVCodecHWConfigInternal*[]) { #if CONFIG_HEVC_DXVA2_HWACCEL HWACCEL_DXVA2(hevc), #endif #if CONFIG_HEVC_D3D11VA_HWACCEL HWACCEL_D3D11VA(hevc), #endif #if CONFIG_HEVC_D3D11VA2_HWACCEL HWACCEL_D3D11VA2(hevc), #endif #if CONFIG_HEVC_NVDEC_HWACCEL HWACCEL_NVDEC(hevc), #endif #if CONFIG_HEVC_VAAPI_HWACCEL HWACCEL_VAAPI(hevc), #endif #if CONFIG_HEVC_VDPAU_HWACCEL HWACCEL_VDPAU(hevc), #endif #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL HWACCEL_VIDEOTOOLBOX(hevc), #endif NULL }, };
/* * HEVC video Decoder * * Copyright (C) 2012 - 2013 Guillaume Martres * Copyright (C) 2012 - 2013 Mickael Raulet * Copyright (C) 2012 - 2013 Gildas Cocherel * Copyright (C) 2012 - 2013 Wassim Hamidouche * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/attributes.h" #include "libavutil/common.h" #include "libavutil/display.h" #include "libavutil/internal.h" #include "libavutil/mastering_display_metadata.h" #include "libavutil/md5.h" #include "libavutil/opt.h" #include "libavutil/pixdesc.h" #include "libavutil/stereo3d.h" #include "bswapdsp.h" #include "bytestream.h" #include "cabac_functions.h" #include "golomb.h" #include "hevc.h" #include "hevc_data.h" #include "hevc_parse.h" #include "hevcdec.h" #include "hwaccel.h" #include "profiles.h" const uint8_t ff_hevc_pel_weight[65] = { [2] = 0, [4] = 1, [6] = 2, [8] = 3, [12] = 4, [16] = 5, [24] = 6, [32] = 7, [48] = 8, [64] = 9 }; /** * NOTE: Each function hls_foo correspond to the function foo in the * specification (HLS stands for High Level Syntax). */ /** * Section 5.7 */ /* free everything allocated by pic_arrays_init() */ static void pic_arrays_free(HEVCContext *s) { av_freep(&s->sao); av_freep(&s->deblock); av_freep(&s->skip_flag); av_freep(&s->tab_ct_depth); av_freep(&s->tab_ipm); av_freep(&s->cbf_luma); av_freep(&s->is_pcm); av_freep(&s->qp_y_tab); av_freep(&s->tab_slice_address); av_freep(&s->filter_slice_edges); av_freep(&s->horizontal_bs); av_freep(&s->vertical_bs); av_freep(&s->sh.entry_point_offset); av_freep(&s->sh.size); av_freep(&s->sh.offset); av_buffer_pool_uninit(&s->tab_mvf_pool); av_buffer_pool_uninit(&s->rpl_tab_pool); } /* allocate arrays that depend on frame dimensions */ static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps) { int log2_min_cb_size = sps->log2_min_cb_size; int width = sps->width; int height = sps->height; int pic_size_in_ctb = ((width >> log2_min_cb_size) + 1) * ((height >> log2_min_cb_size) + 1); int ctb_count = sps->ctb_width * sps->ctb_height; int min_pu_size = sps->min_pu_width * sps->min_pu_height; s->bs_width = (width >> 2) + 1; s->bs_height = (height >> 2) + 1; s->sao = av_mallocz_array(ctb_count, sizeof(*s->sao)); s->deblock = av_mallocz_array(ctb_count, sizeof(*s->deblock)); if (!s->sao || !s->deblock) goto fail; s->skip_flag = av_malloc_array(sps->min_cb_height, sps->min_cb_width); s->tab_ct_depth = av_malloc_array(sps->min_cb_height, sps->min_cb_width); if (!s->skip_flag || !s->tab_ct_depth) goto fail; s->cbf_luma = av_malloc_array(sps->min_tb_width, sps->min_tb_height); s->tab_ipm = av_mallocz(min_pu_size); s->is_pcm = av_malloc_array(sps->min_pu_width + 1, sps->min_pu_height + 1); if (!s->tab_ipm || !s->cbf_luma || !s->is_pcm) goto fail; s->filter_slice_edges = av_mallocz(ctb_count); s->tab_slice_address = av_malloc_array(pic_size_in_ctb, sizeof(*s->tab_slice_address)); s->qp_y_tab = av_malloc_array(pic_size_in_ctb, sizeof(*s->qp_y_tab)); if (!s->qp_y_tab || !s->filter_slice_edges || !s->tab_slice_address) goto fail; s->horizontal_bs = av_mallocz_array(s->bs_width, s->bs_height); s->vertical_bs = av_mallocz_array(s->bs_width, s->bs_height); if (!s->horizontal_bs || !s->vertical_bs) goto fail; s->tab_mvf_pool = av_buffer_pool_init(min_pu_size * sizeof(MvField), av_buffer_allocz); s->rpl_tab_pool = av_buffer_pool_init(ctb_count * sizeof(RefPicListTab), av_buffer_allocz); if (!s->tab_mvf_pool || !s->rpl_tab_pool) goto fail; return 0; fail: pic_arrays_free(s); return AVERROR(ENOMEM); } static int pred_weight_table(HEVCContext *s, GetBitContext *gb) { int i = 0; int j = 0; uint8_t luma_weight_l0_flag[16]; uint8_t chroma_weight_l0_flag[16]; uint8_t luma_weight_l1_flag[16]; uint8_t chroma_weight_l1_flag[16]; int luma_log2_weight_denom; luma_log2_weight_denom = get_ue_golomb_long(gb); if (luma_log2_weight_denom < 0 || luma_log2_weight_denom > 7) { av_log(s->avctx, AV_LOG_ERROR, "luma_log2_weight_denom %d is invalid\n", luma_log2_weight_denom); return AVERROR_INVALIDDATA; } s->sh.luma_log2_weight_denom = av_clip_uintp2(luma_log2_weight_denom, 3); if (s->ps.sps->chroma_format_idc != 0) { int64_t chroma_log2_weight_denom = luma_log2_weight_denom + (int64_t)get_se_golomb(gb); if (chroma_log2_weight_denom < 0 || chroma_log2_weight_denom > 7) { av_log(s->avctx, AV_LOG_ERROR, "chroma_log2_weight_denom %"PRId64" is invalid\n", chroma_log2_weight_denom); return AVERROR_INVALIDDATA; } s->sh.chroma_log2_weight_denom = chroma_log2_weight_denom; } for (i = 0; i < s->sh.nb_refs[L0]; i++) { luma_weight_l0_flag[i] = get_bits1(gb); if (!luma_weight_l0_flag[i]) { s->sh.luma_weight_l0[i] = 1 << s->sh.luma_log2_weight_denom; s->sh.luma_offset_l0[i] = 0; } } if (s->ps.sps->chroma_format_idc != 0) { for (i = 0; i < s->sh.nb_refs[L0]; i++) chroma_weight_l0_flag[i] = get_bits1(gb); } else { for (i = 0; i < s->sh.nb_refs[L0]; i++) chroma_weight_l0_flag[i] = 0; } for (i = 0; i < s->sh.nb_refs[L0]; i++) { if (luma_weight_l0_flag[i]) { int delta_luma_weight_l0 = get_se_golomb(gb); s->sh.luma_weight_l0[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l0; s->sh.luma_offset_l0[i] = get_se_golomb(gb); } if (chroma_weight_l0_flag[i]) { for (j = 0; j < 2; j++) { int delta_chroma_weight_l0 = get_se_golomb(gb); int delta_chroma_offset_l0 = get_se_golomb(gb); if ( (int8_t)delta_chroma_weight_l0 != delta_chroma_weight_l0 || delta_chroma_offset_l0 < -(1<<17) || delta_chroma_offset_l0 > (1<<17)) { return AVERROR_INVALIDDATA; } s->sh.chroma_weight_l0[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l0; s->sh.chroma_offset_l0[i][j] = av_clip((delta_chroma_offset_l0 - ((128 * s->sh.chroma_weight_l0[i][j]) >> s->sh.chroma_log2_weight_denom) + 128), -128, 127); } } else { s->sh.chroma_weight_l0[i][0] = 1 << s->sh.chroma_log2_weight_denom; s->sh.chroma_offset_l0[i][0] = 0; s->sh.chroma_weight_l0[i][1] = 1 << s->sh.chroma_log2_weight_denom; s->sh.chroma_offset_l0[i][1] = 0; } } if (s->sh.slice_type == HEVC_SLICE_B) { for (i = 0; i < s->sh.nb_refs[L1]; i++) { luma_weight_l1_flag[i] = get_bits1(gb); if (!luma_weight_l1_flag[i]) { s->sh.luma_weight_l1[i] = 1 << s->sh.luma_log2_weight_denom; s->sh.luma_offset_l1[i] = 0; } } if (s->ps.sps->chroma_format_idc != 0) { for (i = 0; i < s->sh.nb_refs[L1]; i++) chroma_weight_l1_flag[i] = get_bits1(gb); } else { for (i = 0; i < s->sh.nb_refs[L1]; i++) chroma_weight_l1_flag[i] = 0; } for (i = 0; i < s->sh.nb_refs[L1]; i++) { if (luma_weight_l1_flag[i]) { int delta_luma_weight_l1 = get_se_golomb(gb); s->sh.luma_weight_l1[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l1; s->sh.luma_offset_l1[i] = get_se_golomb(gb); } if (chroma_weight_l1_flag[i]) { for (j = 0; j < 2; j++) { int delta_chroma_weight_l1 = get_se_golomb(gb); int delta_chroma_offset_l1 = get_se_golomb(gb); if ( (int8_t)delta_chroma_weight_l1 != delta_chroma_weight_l1 || delta_chroma_offset_l1 < -(1<<17) || delta_chroma_offset_l1 > (1<<17)) { return AVERROR_INVALIDDATA; } s->sh.chroma_weight_l1[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l1; s->sh.chroma_offset_l1[i][j] = av_clip((delta_chroma_offset_l1 - ((128 * s->sh.chroma_weight_l1[i][j]) >> s->sh.chroma_log2_weight_denom) + 128), -128, 127); } } else { s->sh.chroma_weight_l1[i][0] = 1 << s->sh.chroma_log2_weight_denom; s->sh.chroma_offset_l1[i][0] = 0; s->sh.chroma_weight_l1[i][1] = 1 << s->sh.chroma_log2_weight_denom; s->sh.chroma_offset_l1[i][1] = 0; } } } return 0; } static int decode_lt_rps(HEVCContext *s, LongTermRPS *rps, GetBitContext *gb) { const HEVCSPS *sps = s->ps.sps; int max_poc_lsb = 1 << sps->log2_max_poc_lsb; int prev_delta_msb = 0; unsigned int nb_sps = 0, nb_sh; int i; rps->nb_refs = 0; if (!sps->long_term_ref_pics_present_flag) return 0; if (sps->num_long_term_ref_pics_sps > 0) nb_sps = get_ue_golomb_long(gb); nb_sh = get_ue_golomb_long(gb); if (nb_sps > sps->num_long_term_ref_pics_sps) return AVERROR_INVALIDDATA; if (nb_sh + (uint64_t)nb_sps > FF_ARRAY_ELEMS(rps->poc)) return AVERROR_INVALIDDATA; rps->nb_refs = nb_sh + nb_sps; for (i = 0; i < rps->nb_refs; i++) { uint8_t delta_poc_msb_present; if (i < nb_sps) { uint8_t lt_idx_sps = 0; if (sps->num_long_term_ref_pics_sps > 1) lt_idx_sps = get_bits(gb, av_ceil_log2(sps->num_long_term_ref_pics_sps)); rps->poc[i] = sps->lt_ref_pic_poc_lsb_sps[lt_idx_sps]; rps->used[i] = sps->used_by_curr_pic_lt_sps_flag[lt_idx_sps]; } else { rps->poc[i] = get_bits(gb, sps->log2_max_poc_lsb); rps->used[i] = get_bits1(gb); } delta_poc_msb_present = get_bits1(gb); if (delta_poc_msb_present) { int64_t delta = get_ue_golomb_long(gb); int64_t poc; if (i && i != nb_sps) delta += prev_delta_msb; poc = rps->poc[i] + s->poc - delta * max_poc_lsb - s->sh.pic_order_cnt_lsb; if (poc != (int32_t)poc) return AVERROR_INVALIDDATA; rps->poc[i] = poc; prev_delta_msb = delta; } } return 0; } static void export_stream_params(AVCodecContext *avctx, const HEVCParamSets *ps, const HEVCSPS *sps) { const HEVCVPS *vps = (const HEVCVPS*)ps->vps_list[sps->vps_id]->data; const HEVCWindow *ow = &sps->output_window; unsigned int num = 0, den = 0; avctx->pix_fmt = sps->pix_fmt; avctx->coded_width = sps->width; avctx->coded_height = sps->height; avctx->width = sps->width - ow->left_offset - ow->right_offset; avctx->height = sps->height - ow->top_offset - ow->bottom_offset; avctx->has_b_frames = sps->temporal_layer[sps->max_sub_layers - 1].num_reorder_pics; avctx->profile = sps->ptl.general_ptl.profile_idc; avctx->level = sps->ptl.general_ptl.level_idc; ff_set_sar(avctx, sps->vui.sar); if (sps->vui.video_signal_type_present_flag) avctx->color_range = sps->vui.video_full_range_flag ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG; else avctx->color_range = AVCOL_RANGE_MPEG; if (sps->vui.colour_description_present_flag) { avctx->color_primaries = sps->vui.colour_primaries; avctx->color_trc = sps->vui.transfer_characteristic; avctx->colorspace = sps->vui.matrix_coeffs; } else { avctx->color_primaries = AVCOL_PRI_UNSPECIFIED; avctx->color_trc = AVCOL_TRC_UNSPECIFIED; avctx->colorspace = AVCOL_SPC_UNSPECIFIED; } if (vps->vps_timing_info_present_flag) { num = vps->vps_num_units_in_tick; den = vps->vps_time_scale; } else if (sps->vui.vui_timing_info_present_flag) { num = sps->vui.vui_num_units_in_tick; den = sps->vui.vui_time_scale; } if (num != 0 && den != 0) av_reduce(&avctx->framerate.den, &avctx->framerate.num, num, den, 1 << 30); } static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps) { #define HWACCEL_MAX (CONFIG_HEVC_DXVA2_HWACCEL + \ CONFIG_HEVC_D3D11VA_HWACCEL * 2 + \ CONFIG_HEVC_NVDEC_HWACCEL + \ CONFIG_HEVC_VAAPI_HWACCEL + \ CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL + \ CONFIG_HEVC_VDPAU_HWACCEL) enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts; switch (sps->pix_fmt) { case AV_PIX_FMT_YUV420P: case AV_PIX_FMT_YUVJ420P: #if CONFIG_HEVC_DXVA2_HWACCEL *fmt++ = AV_PIX_FMT_DXVA2_VLD; #endif #if CONFIG_HEVC_D3D11VA_HWACCEL *fmt++ = AV_PIX_FMT_D3D11VA_VLD; *fmt++ = AV_PIX_FMT_D3D11; #endif #if CONFIG_HEVC_VAAPI_HWACCEL *fmt++ = AV_PIX_FMT_VAAPI; #endif #if CONFIG_HEVC_VDPAU_HWACCEL *fmt++ = AV_PIX_FMT_VDPAU; #endif #if CONFIG_HEVC_NVDEC_HWACCEL *fmt++ = AV_PIX_FMT_CUDA; #endif #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX; #endif break; case AV_PIX_FMT_YUV420P10: #if CONFIG_HEVC_DXVA2_HWACCEL *fmt++ = AV_PIX_FMT_DXVA2_VLD; #endif #if CONFIG_HEVC_D3D11VA_HWACCEL *fmt++ = AV_PIX_FMT_D3D11VA_VLD; *fmt++ = AV_PIX_FMT_D3D11; #endif #if CONFIG_HEVC_VAAPI_HWACCEL *fmt++ = AV_PIX_FMT_VAAPI; #endif #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX; #endif #if CONFIG_HEVC_NVDEC_HWACCEL *fmt++ = AV_PIX_FMT_CUDA; #endif break; case AV_PIX_FMT_YUV420P12: case AV_PIX_FMT_YUV444P: case AV_PIX_FMT_YUV444P10: case AV_PIX_FMT_YUV444P12: #if CONFIG_HEVC_NVDEC_HWACCEL *fmt++ = AV_PIX_FMT_CUDA; #endif break; } *fmt++ = sps->pix_fmt; *fmt = AV_PIX_FMT_NONE; return ff_thread_get_format(s->avctx, pix_fmts); } static int set_sps(HEVCContext *s, const HEVCSPS *sps, enum AVPixelFormat pix_fmt) { int ret, i; pic_arrays_free(s); s->ps.sps = NULL; s->ps.vps = NULL; if (!sps) return 0; ret = pic_arrays_init(s, sps); if (ret < 0) goto fail; export_stream_params(s->avctx, &s->ps, sps); s->avctx->pix_fmt = pix_fmt; ff_hevc_pred_init(&s->hpc, sps->bit_depth); ff_hevc_dsp_init (&s->hevcdsp, sps->bit_depth); ff_videodsp_init (&s->vdsp, sps->bit_depth); for (i = 0; i < 3; i++) { av_freep(&s->sao_pixel_buffer_h[i]); av_freep(&s->sao_pixel_buffer_v[i]); } if (sps->sao_enabled && !s->avctx->hwaccel) { int c_count = (sps->chroma_format_idc != 0) ? 3 : 1; int c_idx; for(c_idx = 0; c_idx < c_count; c_idx++) { int w = sps->width >> sps->hshift[c_idx]; int h = sps->height >> sps->vshift[c_idx]; s->sao_pixel_buffer_h[c_idx] = av_malloc((w * 2 * sps->ctb_height) << sps->pixel_shift); s->sao_pixel_buffer_v[c_idx] = av_malloc((h * 2 * sps->ctb_width) << sps->pixel_shift); } } s->ps.sps = sps; s->ps.vps = (HEVCVPS*) s->ps.vps_list[s->ps.sps->vps_id]->data; return 0; fail: pic_arrays_free(s); s->ps.sps = NULL; return ret; } static int hls_slice_header(HEVCContext *s) { GetBitContext *gb = &s->HEVClc->gb; SliceHeader *sh = &s->sh; int i, ret; // Coded parameters sh->first_slice_in_pic_flag = get_bits1(gb); if (s->ref && sh->first_slice_in_pic_flag) { av_log(s->avctx, AV_LOG_ERROR, "Two slices reporting being the first in the same frame.\n"); return 1; // This slice will be skiped later, do not corrupt state } if ((IS_IDR(s) || IS_BLA(s)) && sh->first_slice_in_pic_flag) { s->seq_decode = (s->seq_decode + 1) & 0xff; s->max_ra = INT_MAX; if (IS_IDR(s)) ff_hevc_clear_refs(s); } sh->no_output_of_prior_pics_flag = 0; if (IS_IRAP(s)) sh->no_output_of_prior_pics_flag = get_bits1(gb); sh->pps_id = get_ue_golomb_long(gb); if (sh->pps_id >= HEVC_MAX_PPS_COUNT || !s->ps.pps_list[sh->pps_id]) { av_log(s->avctx, AV_LOG_ERROR, "PPS id out of range: %d\n", sh->pps_id); return AVERROR_INVALIDDATA; } if (!sh->first_slice_in_pic_flag && s->ps.pps != (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data) { av_log(s->avctx, AV_LOG_ERROR, "PPS changed between slices.\n"); return AVERROR_INVALIDDATA; } s->ps.pps = (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data; if (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos == 1) sh->no_output_of_prior_pics_flag = 1; if (s->ps.sps != (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data) { const HEVCSPS *sps = (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data; const HEVCSPS *last_sps = s->ps.sps; enum AVPixelFormat pix_fmt; if (last_sps && IS_IRAP(s) && s->nal_unit_type != HEVC_NAL_CRA_NUT) { if (sps->width != last_sps->width || sps->height != last_sps->height || sps->temporal_layer[sps->max_sub_layers - 1].max_dec_pic_buffering != last_sps->temporal_layer[last_sps->max_sub_layers - 1].max_dec_pic_buffering) sh->no_output_of_prior_pics_flag = 0; } ff_hevc_clear_refs(s); ret = set_sps(s, sps, sps->pix_fmt); if (ret < 0) return ret; pix_fmt = get_format(s, sps); if (pix_fmt < 0) return pix_fmt; s->avctx->pix_fmt = pix_fmt; s->seq_decode = (s->seq_decode + 1) & 0xff; s->max_ra = INT_MAX; } sh->dependent_slice_segment_flag = 0; if (!sh->first_slice_in_pic_flag) { int slice_address_length; if (s->ps.pps->dependent_slice_segments_enabled_flag) sh->dependent_slice_segment_flag = get_bits1(gb); slice_address_length = av_ceil_log2(s->ps.sps->ctb_width * s->ps.sps->ctb_height); sh->slice_segment_addr = get_bitsz(gb, slice_address_length); if (sh->slice_segment_addr >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) { av_log(s->avctx, AV_LOG_ERROR, "Invalid slice segment address: %u.\n", sh->slice_segment_addr); return AVERROR_INVALIDDATA; } if (!sh->dependent_slice_segment_flag) { sh->slice_addr = sh->slice_segment_addr; s->slice_idx++; } } else { sh->slice_segment_addr = sh->slice_addr = 0; s->slice_idx = 0; s->slice_initialized = 0; } if (!sh->dependent_slice_segment_flag) { s->slice_initialized = 0; for (i = 0; i < s->ps.pps->num_extra_slice_header_bits; i++) skip_bits(gb, 1); // slice_reserved_undetermined_flag[] sh->slice_type = get_ue_golomb_long(gb); if (!(sh->slice_type == HEVC_SLICE_I || sh->slice_type == HEVC_SLICE_P || sh->slice_type == HEVC_SLICE_B)) { av_log(s->avctx, AV_LOG_ERROR, "Unknown slice type: %d.\n", sh->slice_type); return AVERROR_INVALIDDATA; } if (IS_IRAP(s) && sh->slice_type != HEVC_SLICE_I) { av_log(s->avctx, AV_LOG_ERROR, "Inter slices in an IRAP frame.\n"); return AVERROR_INVALIDDATA; } // when flag is not present, picture is inferred to be output sh->pic_output_flag = 1; if (s->ps.pps->output_flag_present_flag) sh->pic_output_flag = get_bits1(gb); if (s->ps.sps->separate_colour_plane_flag) sh->colour_plane_id = get_bits(gb, 2); if (!IS_IDR(s)) { int poc, pos; sh->pic_order_cnt_lsb = get_bits(gb, s->ps.sps->log2_max_poc_lsb); poc = ff_hevc_compute_poc(s->ps.sps, s->pocTid0, sh->pic_order_cnt_lsb, s->nal_unit_type); if (!sh->first_slice_in_pic_flag && poc != s->poc) { av_log(s->avctx, AV_LOG_WARNING, "Ignoring POC change between slices: %d -> %d\n", s->poc, poc); if (s->avctx->err_recognition & AV_EF_EXPLODE) return AVERROR_INVALIDDATA; poc = s->poc; } s->poc = poc; sh->short_term_ref_pic_set_sps_flag = get_bits1(gb); pos = get_bits_left(gb); if (!sh->short_term_ref_pic_set_sps_flag) { ret = ff_hevc_decode_short_term_rps(gb, s->avctx, &sh->slice_rps, s->ps.sps, 1); if (ret < 0) return ret; sh->short_term_rps = &sh->slice_rps; } else { int numbits, rps_idx; if (!s->ps.sps->nb_st_rps) { av_log(s->avctx, AV_LOG_ERROR, "No ref lists in the SPS.\n"); return AVERROR_INVALIDDATA; } numbits = av_ceil_log2(s->ps.sps->nb_st_rps); rps_idx = numbits > 0 ? get_bits(gb, numbits) : 0; sh->short_term_rps = &s->ps.sps->st_rps[rps_idx]; } sh->short_term_ref_pic_set_size = pos - get_bits_left(gb); pos = get_bits_left(gb); ret = decode_lt_rps(s, &sh->long_term_rps, gb); if (ret < 0) { av_log(s->avctx, AV_LOG_WARNING, "Invalid long term RPS.\n"); if (s->avctx->err_recognition & AV_EF_EXPLODE) return AVERROR_INVALIDDATA; } sh->long_term_ref_pic_set_size = pos - get_bits_left(gb); if (s->ps.sps->sps_temporal_mvp_enabled_flag) sh->slice_temporal_mvp_enabled_flag = get_bits1(gb); else sh->slice_temporal_mvp_enabled_flag = 0; } else { s->sh.short_term_rps = NULL; s->poc = 0; } /* 8.3.1 */ if (sh->first_slice_in_pic_flag && s->temporal_id == 0 && s->nal_unit_type != HEVC_NAL_TRAIL_N && s->nal_unit_type != HEVC_NAL_TSA_N && s->nal_unit_type != HEVC_NAL_STSA_N && s->nal_unit_type != HEVC_NAL_RADL_N && s->nal_unit_type != HEVC_NAL_RADL_R && s->nal_unit_type != HEVC_NAL_RASL_N && s->nal_unit_type != HEVC_NAL_RASL_R) s->pocTid0 = s->poc; if (s->ps.sps->sao_enabled) { sh->slice_sample_adaptive_offset_flag[0] = get_bits1(gb); if (s->ps.sps->chroma_format_idc) { sh->slice_sample_adaptive_offset_flag[1] = sh->slice_sample_adaptive_offset_flag[2] = get_bits1(gb); } } else { sh->slice_sample_adaptive_offset_flag[0] = 0; sh->slice_sample_adaptive_offset_flag[1] = 0; sh->slice_sample_adaptive_offset_flag[2] = 0; } sh->nb_refs[L0] = sh->nb_refs[L1] = 0; if (sh->slice_type == HEVC_SLICE_P || sh->slice_type == HEVC_SLICE_B) { int nb_refs; sh->nb_refs[L0] = s->ps.pps->num_ref_idx_l0_default_active; if (sh->slice_type == HEVC_SLICE_B) sh->nb_refs[L1] = s->ps.pps->num_ref_idx_l1_default_active; if (get_bits1(gb)) { // num_ref_idx_active_override_flag sh->nb_refs[L0] = get_ue_golomb_long(gb) + 1; if (sh->slice_type == HEVC_SLICE_B) sh->nb_refs[L1] = get_ue_golomb_long(gb) + 1; } if (sh->nb_refs[L0] > HEVC_MAX_REFS || sh->nb_refs[L1] > HEVC_MAX_REFS) { av_log(s->avctx, AV_LOG_ERROR, "Too many refs: %d/%d.\n", sh->nb_refs[L0], sh->nb_refs[L1]); return AVERROR_INVALIDDATA; } sh->rpl_modification_flag[0] = 0; sh->rpl_modification_flag[1] = 0; nb_refs = ff_hevc_frame_nb_refs(s); if (!nb_refs) { av_log(s->avctx, AV_LOG_ERROR, "Zero refs for a frame with P or B slices.\n"); return AVERROR_INVALIDDATA; } if (s->ps.pps->lists_modification_present_flag && nb_refs > 1) { sh->rpl_modification_flag[0] = get_bits1(gb); if (sh->rpl_modification_flag[0]) { for (i = 0; i < sh->nb_refs[L0]; i++) sh->list_entry_lx[0][i] = get_bits(gb, av_ceil_log2(nb_refs)); } if (sh->slice_type == HEVC_SLICE_B) { sh->rpl_modification_flag[1] = get_bits1(gb); if (sh->rpl_modification_flag[1] == 1) for (i = 0; i < sh->nb_refs[L1]; i++) sh->list_entry_lx[1][i] = get_bits(gb, av_ceil_log2(nb_refs)); } } if (sh->slice_type == HEVC_SLICE_B) sh->mvd_l1_zero_flag = get_bits1(gb); if (s->ps.pps->cabac_init_present_flag) sh->cabac_init_flag = get_bits1(gb); else sh->cabac_init_flag = 0; sh->collocated_ref_idx = 0; if (sh->slice_temporal_mvp_enabled_flag) { sh->collocated_list = L0; if (sh->slice_type == HEVC_SLICE_B) sh->collocated_list = !get_bits1(gb); if (sh->nb_refs[sh->collocated_list] > 1) { sh->collocated_ref_idx = get_ue_golomb_long(gb); if (sh->collocated_ref_idx >= sh->nb_refs[sh->collocated_list]) { av_log(s->avctx, AV_LOG_ERROR, "Invalid collocated_ref_idx: %d.\n", sh->collocated_ref_idx); return AVERROR_INVALIDDATA; } } } if ((s->ps.pps->weighted_pred_flag && sh->slice_type == HEVC_SLICE_P) || (s->ps.pps->weighted_bipred_flag && sh->slice_type == HEVC_SLICE_B)) { int ret = pred_weight_table(s, gb); if (ret < 0) return ret; } sh->max_num_merge_cand = 5 - get_ue_golomb_long(gb); if (sh->max_num_merge_cand < 1 || sh->max_num_merge_cand > 5) { av_log(s->avctx, AV_LOG_ERROR, "Invalid number of merging MVP candidates: %d.\n", sh->max_num_merge_cand); return AVERROR_INVALIDDATA; } } sh->slice_qp_delta = get_se_golomb(gb); if (s->ps.pps->pic_slice_level_chroma_qp_offsets_present_flag) { sh->slice_cb_qp_offset = get_se_golomb(gb); sh->slice_cr_qp_offset = get_se_golomb(gb); } else { sh->slice_cb_qp_offset = 0; sh->slice_cr_qp_offset = 0; } if (s->ps.pps->chroma_qp_offset_list_enabled_flag) sh->cu_chroma_qp_offset_enabled_flag = get_bits1(gb); else sh->cu_chroma_qp_offset_enabled_flag = 0; if (s->ps.pps->deblocking_filter_control_present_flag) { int deblocking_filter_override_flag = 0; if (s->ps.pps->deblocking_filter_override_enabled_flag) deblocking_filter_override_flag = get_bits1(gb); if (deblocking_filter_override_flag) { sh->disable_deblocking_filter_flag = get_bits1(gb); if (!sh->disable_deblocking_filter_flag) { int beta_offset_div2 = get_se_golomb(gb); int tc_offset_div2 = get_se_golomb(gb) ; if (beta_offset_div2 < -6 || beta_offset_div2 > 6 || tc_offset_div2 < -6 || tc_offset_div2 > 6) { av_log(s->avctx, AV_LOG_ERROR, "Invalid deblock filter offsets: %d, %d\n", beta_offset_div2, tc_offset_div2); return AVERROR_INVALIDDATA; } sh->beta_offset = beta_offset_div2 * 2; sh->tc_offset = tc_offset_div2 * 2; } } else { sh->disable_deblocking_filter_flag = s->ps.pps->disable_dbf; sh->beta_offset = s->ps.pps->beta_offset; sh->tc_offset = s->ps.pps->tc_offset; } } else { sh->disable_deblocking_filter_flag = 0; sh->beta_offset = 0; sh->tc_offset = 0; } if (s->ps.pps->seq_loop_filter_across_slices_enabled_flag && (sh->slice_sample_adaptive_offset_flag[0] || sh->slice_sample_adaptive_offset_flag[1] || !sh->disable_deblocking_filter_flag)) { sh->slice_loop_filter_across_slices_enabled_flag = get_bits1(gb); } else { sh->slice_loop_filter_across_slices_enabled_flag = s->ps.pps->seq_loop_filter_across_slices_enabled_flag; } } else if (!s->slice_initialized) { av_log(s->avctx, AV_LOG_ERROR, "Independent slice segment missing.\n"); return AVERROR_INVALIDDATA; } sh->num_entry_point_offsets = 0; if (s->ps.pps->tiles_enabled_flag || s->ps.pps->entropy_coding_sync_enabled_flag) { unsigned num_entry_point_offsets = get_ue_golomb_long(gb); // It would be possible to bound this tighter but this here is simpler if (num_entry_point_offsets > get_bits_left(gb)) { av_log(s->avctx, AV_LOG_ERROR, "num_entry_point_offsets %d is invalid\n", num_entry_point_offsets); return AVERROR_INVALIDDATA; } sh->num_entry_point_offsets = num_entry_point_offsets; if (sh->num_entry_point_offsets > 0) { int offset_len = get_ue_golomb_long(gb) + 1; if (offset_len < 1 || offset_len > 32) { sh->num_entry_point_offsets = 0; av_log(s->avctx, AV_LOG_ERROR, "offset_len %d is invalid\n", offset_len); return AVERROR_INVALIDDATA; } av_freep(&sh->entry_point_offset); av_freep(&sh->offset); av_freep(&sh->size); sh->entry_point_offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(unsigned)); sh->offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(int)); sh->size = av_malloc_array(sh->num_entry_point_offsets, sizeof(int)); if (!sh->entry_point_offset || !sh->offset || !sh->size) { sh->num_entry_point_offsets = 0; av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate memory\n"); return AVERROR(ENOMEM); } for (i = 0; i < sh->num_entry_point_offsets; i++) { unsigned val = get_bits_long(gb, offset_len); sh->entry_point_offset[i] = val + 1; // +1; // +1 to get the size } if (s->threads_number > 1 && (s->ps.pps->num_tile_rows > 1 || s->ps.pps->num_tile_columns > 1)) { s->enable_parallel_tiles = 0; // TODO: you can enable tiles in parallel here s->threads_number = 1; } else s->enable_parallel_tiles = 0; } else s->enable_parallel_tiles = 0; } if (s->ps.pps->slice_header_extension_present_flag) { unsigned int length = get_ue_golomb_long(gb); if (length*8LL > get_bits_left(gb)) { av_log(s->avctx, AV_LOG_ERROR, "too many slice_header_extension_data_bytes\n"); return AVERROR_INVALIDDATA; } for (i = 0; i < length; i++) skip_bits(gb, 8); // slice_header_extension_data_byte } // Inferred parameters sh->slice_qp = 26U + s->ps.pps->pic_init_qp_minus26 + sh->slice_qp_delta; if (sh->slice_qp > 51 || sh->slice_qp < -s->ps.sps->qp_bd_offset) { av_log(s->avctx, AV_LOG_ERROR, "The slice_qp %d is outside the valid range " "[%d, 51].\n", sh->slice_qp, -s->ps.sps->qp_bd_offset); return AVERROR_INVALIDDATA; } sh->slice_ctb_addr_rs = sh->slice_segment_addr; if (!s->sh.slice_ctb_addr_rs && s->sh.dependent_slice_segment_flag) { av_log(s->avctx, AV_LOG_ERROR, "Impossible slice segment.\n"); return AVERROR_INVALIDDATA; } if (get_bits_left(gb) < 0) { av_log(s->avctx, AV_LOG_ERROR, "Overread slice header by %d bits\n", -get_bits_left(gb)); return AVERROR_INVALIDDATA; } s->HEVClc->first_qp_group = !s->sh.dependent_slice_segment_flag; if (!s->ps.pps->cu_qp_delta_enabled_flag) s->HEVClc->qp_y = s->sh.slice_qp; s->slice_initialized = 1; s->HEVClc->tu.cu_qp_offset_cb = 0; s->HEVClc->tu.cu_qp_offset_cr = 0; return 0; } #define CTB(tab, x, y) ((tab)[(y) * s->ps.sps->ctb_width + (x)]) #define SET_SAO(elem, value) \ do { \ if (!sao_merge_up_flag && !sao_merge_left_flag) \ sao->elem = value; \ else if (sao_merge_left_flag) \ sao->elem = CTB(s->sao, rx-1, ry).elem; \ else if (sao_merge_up_flag) \ sao->elem = CTB(s->sao, rx, ry-1).elem; \ else \ sao->elem = 0; \ } while (0) static void hls_sao_param(HEVCContext *s, int rx, int ry) { HEVCLocalContext *lc = s->HEVClc; int sao_merge_left_flag = 0; int sao_merge_up_flag = 0; SAOParams *sao = &CTB(s->sao, rx, ry); int c_idx, i; if (s->sh.slice_sample_adaptive_offset_flag[0] || s->sh.slice_sample_adaptive_offset_flag[1]) { if (rx > 0) { if (lc->ctb_left_flag) sao_merge_left_flag = ff_hevc_sao_merge_flag_decode(s); } if (ry > 0 && !sao_merge_left_flag) { if (lc->ctb_up_flag) sao_merge_up_flag = ff_hevc_sao_merge_flag_decode(s); } } for (c_idx = 0; c_idx < (s->ps.sps->chroma_format_idc ? 3 : 1); c_idx++) { int log2_sao_offset_scale = c_idx == 0 ? s->ps.pps->log2_sao_offset_scale_luma : s->ps.pps->log2_sao_offset_scale_chroma; if (!s->sh.slice_sample_adaptive_offset_flag[c_idx]) { sao->type_idx[c_idx] = SAO_NOT_APPLIED; continue; } if (c_idx == 2) { sao->type_idx[2] = sao->type_idx[1]; sao->eo_class[2] = sao->eo_class[1]; } else { SET_SAO(type_idx[c_idx], ff_hevc_sao_type_idx_decode(s)); } if (sao->type_idx[c_idx] == SAO_NOT_APPLIED) continue; for (i = 0; i < 4; i++) SET_SAO(offset_abs[c_idx][i], ff_hevc_sao_offset_abs_decode(s)); if (sao->type_idx[c_idx] == SAO_BAND) { for (i = 0; i < 4; i++) { if (sao->offset_abs[c_idx][i]) { SET_SAO(offset_sign[c_idx][i], ff_hevc_sao_offset_sign_decode(s)); } else { sao->offset_sign[c_idx][i] = 0; } } SET_SAO(band_position[c_idx], ff_hevc_sao_band_position_decode(s)); } else if (c_idx != 2) { SET_SAO(eo_class[c_idx], ff_hevc_sao_eo_class_decode(s)); } // Inferred parameters sao->offset_val[c_idx][0] = 0; for (i = 0; i < 4; i++) { sao->offset_val[c_idx][i + 1] = sao->offset_abs[c_idx][i]; if (sao->type_idx[c_idx] == SAO_EDGE) { if (i > 1) sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1]; } else if (sao->offset_sign[c_idx][i]) { sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1]; } sao->offset_val[c_idx][i + 1] *= 1 << log2_sao_offset_scale; } } } #undef SET_SAO #undef CTB static int hls_cross_component_pred(HEVCContext *s, int idx) { HEVCLocalContext *lc = s->HEVClc; int log2_res_scale_abs_plus1 = ff_hevc_log2_res_scale_abs(s, idx); if (log2_res_scale_abs_plus1 != 0) { int res_scale_sign_flag = ff_hevc_res_scale_sign_flag(s, idx); lc->tu.res_scale_val = (1 << (log2_res_scale_abs_plus1 - 1)) * (1 - 2 * res_scale_sign_flag); } else { lc->tu.res_scale_val = 0; } return 0; } static int hls_transform_unit(HEVCContext *s, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr) { HEVCLocalContext *lc = s->HEVClc; const int log2_trafo_size_c = log2_trafo_size - s->ps.sps->hshift[1]; int i; if (lc->cu.pred_mode == MODE_INTRA) { int trafo_size = 1 << log2_trafo_size; ff_hevc_set_neighbour_available(s, x0, y0, trafo_size, trafo_size); s->hpc.intra_pred[log2_trafo_size - 2](s, x0, y0, 0); } if (cbf_luma || cbf_cb[0] || cbf_cr[0] || (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) { int scan_idx = SCAN_DIAG; int scan_idx_c = SCAN_DIAG; int cbf_chroma = cbf_cb[0] || cbf_cr[0] || (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1])); if (s->ps.pps->cu_qp_delta_enabled_flag && !lc->tu.is_cu_qp_delta_coded) { lc->tu.cu_qp_delta = ff_hevc_cu_qp_delta_abs(s); if (lc->tu.cu_qp_delta != 0) if (ff_hevc_cu_qp_delta_sign_flag(s) == 1) lc->tu.cu_qp_delta = -lc->tu.cu_qp_delta; lc->tu.is_cu_qp_delta_coded = 1; if (lc->tu.cu_qp_delta < -(26 + s->ps.sps->qp_bd_offset / 2) || lc->tu.cu_qp_delta > (25 + s->ps.sps->qp_bd_offset / 2)) { av_log(s->avctx, AV_LOG_ERROR, "The cu_qp_delta %d is outside the valid range " "[%d, %d].\n", lc->tu.cu_qp_delta, -(26 + s->ps.sps->qp_bd_offset / 2), (25 + s->ps.sps->qp_bd_offset / 2)); return AVERROR_INVALIDDATA; } ff_hevc_set_qPy(s, cb_xBase, cb_yBase, log2_cb_size); } if (s->sh.cu_chroma_qp_offset_enabled_flag && cbf_chroma && !lc->cu.cu_transquant_bypass_flag && !lc->tu.is_cu_chroma_qp_offset_coded) { int cu_chroma_qp_offset_flag = ff_hevc_cu_chroma_qp_offset_flag(s); if (cu_chroma_qp_offset_flag) { int cu_chroma_qp_offset_idx = 0; if (s->ps.pps->chroma_qp_offset_list_len_minus1 > 0) { cu_chroma_qp_offset_idx = ff_hevc_cu_chroma_qp_offset_idx(s); av_log(s->avctx, AV_LOG_ERROR, "cu_chroma_qp_offset_idx not yet tested.\n"); } lc->tu.cu_qp_offset_cb = s->ps.pps->cb_qp_offset_list[cu_chroma_qp_offset_idx]; lc->tu.cu_qp_offset_cr = s->ps.pps->cr_qp_offset_list[cu_chroma_qp_offset_idx]; } else { lc->tu.cu_qp_offset_cb = 0; lc->tu.cu_qp_offset_cr = 0; } lc->tu.is_cu_chroma_qp_offset_coded = 1; } if (lc->cu.pred_mode == MODE_INTRA && log2_trafo_size < 4) { if (lc->tu.intra_pred_mode >= 6 && lc->tu.intra_pred_mode <= 14) { scan_idx = SCAN_VERT; } else if (lc->tu.intra_pred_mode >= 22 && lc->tu.intra_pred_mode <= 30) { scan_idx = SCAN_HORIZ; } if (lc->tu.intra_pred_mode_c >= 6 && lc->tu.intra_pred_mode_c <= 14) { scan_idx_c = SCAN_VERT; } else if (lc->tu.intra_pred_mode_c >= 22 && lc->tu.intra_pred_mode_c <= 30) { scan_idx_c = SCAN_HORIZ; } } lc->tu.cross_pf = 0; if (cbf_luma) ff_hevc_hls_residual_coding(s, x0, y0, log2_trafo_size, scan_idx, 0); if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) { int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]); int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]); lc->tu.cross_pf = (s->ps.pps->cross_component_prediction_enabled_flag && cbf_luma && (lc->cu.pred_mode == MODE_INTER || (lc->tu.chroma_mode_c == 4))); if (lc->tu.cross_pf) { hls_cross_component_pred(s, 0); } for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) { if (lc->cu.pred_mode == MODE_INTRA) { ff_hevc_set_neighbour_available(s, x0, y0 + (i << log2_trafo_size_c), trafo_size_h, trafo_size_v); s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (i << log2_trafo_size_c), 1); } if (cbf_cb[i]) ff_hevc_hls_residual_coding(s, x0, y0 + (i << log2_trafo_size_c), log2_trafo_size_c, scan_idx_c, 1); else if (lc->tu.cross_pf) { ptrdiff_t stride = s->frame->linesize[1]; int hshift = s->ps.sps->hshift[1]; int vshift = s->ps.sps->vshift[1]; int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer; int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2; int size = 1 << log2_trafo_size_c; uint8_t *dst = &s->frame->data[1][(y0 >> vshift) * stride + ((x0 >> hshift) << s->ps.sps->pixel_shift)]; for (i = 0; i < (size * size); i++) { coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3); } s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride); } } if (lc->tu.cross_pf) { hls_cross_component_pred(s, 1); } for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) { if (lc->cu.pred_mode == MODE_INTRA) { ff_hevc_set_neighbour_available(s, x0, y0 + (i << log2_trafo_size_c), trafo_size_h, trafo_size_v); s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (i << log2_trafo_size_c), 2); } if (cbf_cr[i]) ff_hevc_hls_residual_coding(s, x0, y0 + (i << log2_trafo_size_c), log2_trafo_size_c, scan_idx_c, 2); else if (lc->tu.cross_pf) { ptrdiff_t stride = s->frame->linesize[2]; int hshift = s->ps.sps->hshift[2]; int vshift = s->ps.sps->vshift[2]; int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer; int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2; int size = 1 << log2_trafo_size_c; uint8_t *dst = &s->frame->data[2][(y0 >> vshift) * stride + ((x0 >> hshift) << s->ps.sps->pixel_shift)]; for (i = 0; i < (size * size); i++) { coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3); } s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride); } } } else if (s->ps.sps->chroma_format_idc && blk_idx == 3) { int trafo_size_h = 1 << (log2_trafo_size + 1); int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]); for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) { if (lc->cu.pred_mode == MODE_INTRA) { ff_hevc_set_neighbour_available(s, xBase, yBase + (i << log2_trafo_size), trafo_size_h, trafo_size_v); s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (i << log2_trafo_size), 1); } if (cbf_cb[i]) ff_hevc_hls_residual_coding(s, xBase, yBase + (i << log2_trafo_size), log2_trafo_size, scan_idx_c, 1); } for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) { if (lc->cu.pred_mode == MODE_INTRA) { ff_hevc_set_neighbour_available(s, xBase, yBase + (i << log2_trafo_size), trafo_size_h, trafo_size_v); s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (i << log2_trafo_size), 2); } if (cbf_cr[i]) ff_hevc_hls_residual_coding(s, xBase, yBase + (i << log2_trafo_size), log2_trafo_size, scan_idx_c, 2); } } } else if (s->ps.sps->chroma_format_idc && lc->cu.pred_mode == MODE_INTRA) { if (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3) { int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]); int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]); ff_hevc_set_neighbour_available(s, x0, y0, trafo_size_h, trafo_size_v); s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0, 1); s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0, 2); if (s->ps.sps->chroma_format_idc == 2) { ff_hevc_set_neighbour_available(s, x0, y0 + (1 << log2_trafo_size_c), trafo_size_h, trafo_size_v); s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (1 << log2_trafo_size_c), 1); s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (1 << log2_trafo_size_c), 2); } } else if (blk_idx == 3) { int trafo_size_h = 1 << (log2_trafo_size + 1); int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]); ff_hevc_set_neighbour_available(s, xBase, yBase, trafo_size_h, trafo_size_v); s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase, 1); s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase, 2); if (s->ps.sps->chroma_format_idc == 2) { ff_hevc_set_neighbour_available(s, xBase, yBase + (1 << (log2_trafo_size)), trafo_size_h, trafo_size_v); s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (1 << (log2_trafo_size)), 1); s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (1 << (log2_trafo_size)), 2); } } } return 0; } static void set_deblocking_bypass(HEVCContext *s, int x0, int y0, int log2_cb_size) { int cb_size = 1 << log2_cb_size; int log2_min_pu_size = s->ps.sps->log2_min_pu_size; int min_pu_width = s->ps.sps->min_pu_width; int x_end = FFMIN(x0 + cb_size, s->ps.sps->width); int y_end = FFMIN(y0 + cb_size, s->ps.sps->height); int i, j; for (j = (y0 >> log2_min_pu_size); j < (y_end >> log2_min_pu_size); j++) for (i = (x0 >> log2_min_pu_size); i < (x_end >> log2_min_pu_size); i++) s->is_pcm[i + j * min_pu_width] = 2; } static int hls_transform_tree(HEVCContext *s, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int trafo_depth, int blk_idx, const int *base_cbf_cb, const int *base_cbf_cr) { HEVCLocalContext *lc = s->HEVClc; uint8_t split_transform_flag; int cbf_cb[2]; int cbf_cr[2]; int ret; cbf_cb[0] = base_cbf_cb[0]; cbf_cb[1] = base_cbf_cb[1]; cbf_cr[0] = base_cbf_cr[0]; cbf_cr[1] = base_cbf_cr[1]; if (lc->cu.intra_split_flag) { if (trafo_depth == 1) { lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[blk_idx]; if (s->ps.sps->chroma_format_idc == 3) { lc->tu.intra_pred_mode_c = lc->pu.intra_pred_mode_c[blk_idx]; lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[blk_idx]; } else { lc->tu.intra_pred_mode_c = lc->pu.intra_pred_mode_c[0]; lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0]; } } } else { lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[0]; lc->tu.intra_pred_mode_c = lc->pu.intra_pred_mode_c[0]; lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0]; } if (log2_trafo_size <= s->ps.sps->log2_max_trafo_size && log2_trafo_size > s->ps.sps->log2_min_tb_size && trafo_depth < lc->cu.max_trafo_depth && !(lc->cu.intra_split_flag && trafo_depth == 0)) { split_transform_flag = ff_hevc_split_transform_flag_decode(s, log2_trafo_size); } else { int inter_split = s->ps.sps->max_transform_hierarchy_depth_inter == 0 && lc->cu.pred_mode == MODE_INTER && lc->cu.part_mode != PART_2Nx2N && trafo_depth == 0; split_transform_flag = log2_trafo_size > s->ps.sps->log2_max_trafo_size || (lc->cu.intra_split_flag && trafo_depth == 0) || inter_split; } if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) { if (trafo_depth == 0 || cbf_cb[0]) { cbf_cb[0] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth); if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) { cbf_cb[1] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth); } } if (trafo_depth == 0 || cbf_cr[0]) { cbf_cr[0] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth); if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) { cbf_cr[1] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth); } } } if (split_transform_flag) { const int trafo_size_split = 1 << (log2_trafo_size - 1); const int x1 = x0 + trafo_size_split; const int y1 = y0 + trafo_size_split; #define SUBDIVIDE(x, y, idx) \ do { \ ret = hls_transform_tree(s, x, y, x0, y0, cb_xBase, cb_yBase, log2_cb_size, \ log2_trafo_size - 1, trafo_depth + 1, idx, \ cbf_cb, cbf_cr); \ if (ret < 0) \ return ret; \ } while (0) SUBDIVIDE(x0, y0, 0); SUBDIVIDE(x1, y0, 1); SUBDIVIDE(x0, y1, 2); SUBDIVIDE(x1, y1, 3); #undef SUBDIVIDE } else { int min_tu_size = 1 << s->ps.sps->log2_min_tb_size; int log2_min_tu_size = s->ps.sps->log2_min_tb_size; int min_tu_width = s->ps.sps->min_tb_width; int cbf_luma = 1; if (lc->cu.pred_mode == MODE_INTRA || trafo_depth != 0 || cbf_cb[0] || cbf_cr[0] || (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) { cbf_luma = ff_hevc_cbf_luma_decode(s, trafo_depth); } ret = hls_transform_unit(s, x0, y0, xBase, yBase, cb_xBase, cb_yBase, log2_cb_size, log2_trafo_size, blk_idx, cbf_luma, cbf_cb, cbf_cr); if (ret < 0) return ret; // TODO: store cbf_luma somewhere else if (cbf_luma) { int i, j; for (i = 0; i < (1 << log2_trafo_size); i += min_tu_size) for (j = 0; j < (1 << log2_trafo_size); j += min_tu_size) { int x_tu = (x0 + j) >> log2_min_tu_size; int y_tu = (y0 + i) >> log2_min_tu_size; s->cbf_luma[y_tu * min_tu_width + x_tu] = 1; } } if (!s->sh.disable_deblocking_filter_flag) { ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_trafo_size); if (s->ps.pps->transquant_bypass_enable_flag && lc->cu.cu_transquant_bypass_flag) set_deblocking_bypass(s, x0, y0, log2_trafo_size); } } return 0; } static int hls_pcm_sample(HEVCContext *s, int x0, int y0, int log2_cb_size) { HEVCLocalContext *lc = s->HEVClc; GetBitContext gb; int cb_size = 1 << log2_cb_size; ptrdiff_t stride0 = s->frame->linesize[0]; ptrdiff_t stride1 = s->frame->linesize[1]; ptrdiff_t stride2 = s->frame->linesize[2]; uint8_t *dst0 = &s->frame->data[0][y0 * stride0 + (x0 << s->ps.sps->pixel_shift)]; uint8_t *dst1 = &s->frame->data[1][(y0 >> s->ps.sps->vshift[1]) * stride1 + ((x0 >> s->ps.sps->hshift[1]) << s->ps.sps->pixel_shift)]; uint8_t *dst2 = &s->frame->data[2][(y0 >> s->ps.sps->vshift[2]) * stride2 + ((x0 >> s->ps.sps->hshift[2]) << s->ps.sps->pixel_shift)]; int length = cb_size * cb_size * s->ps.sps->pcm.bit_depth + (((cb_size >> s->ps.sps->hshift[1]) * (cb_size >> s->ps.sps->vshift[1])) + ((cb_size >> s->ps.sps->hshift[2]) * (cb_size >> s->ps.sps->vshift[2]))) * s->ps.sps->pcm.bit_depth_chroma; const uint8_t *pcm = skip_bytes(&lc->cc, (length + 7) >> 3); int ret; if (!s->sh.disable_deblocking_filter_flag) ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size); ret = init_get_bits(&gb, pcm, length); if (ret < 0) return ret; s->hevcdsp.put_pcm(dst0, stride0, cb_size, cb_size, &gb, s->ps.sps->pcm.bit_depth); if (s->ps.sps->chroma_format_idc) { s->hevcdsp.put_pcm(dst1, stride1, cb_size >> s->ps.sps->hshift[1], cb_size >> s->ps.sps->vshift[1], &gb, s->ps.sps->pcm.bit_depth_chroma); s->hevcdsp.put_pcm(dst2, stride2, cb_size >> s->ps.sps->hshift[2], cb_size >> s->ps.sps->vshift[2], &gb, s->ps.sps->pcm.bit_depth_chroma); } return 0; } /** * 8.5.3.2.2.1 Luma sample unidirectional interpolation process * * @param s HEVC decoding context * @param dst target buffer for block data at block position * @param dststride stride of the dst buffer * @param ref reference picture buffer at origin (0, 0) * @param mv motion vector (relative to block position) to get pixel data from * @param x_off horizontal position of block from origin (0, 0) * @param y_off vertical position of block from origin (0, 0) * @param block_w width of block * @param block_h height of block * @param luma_weight weighting factor applied to the luma prediction * @param luma_offset additive offset applied to the luma prediction value */ static void luma_mc_uni(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride, AVFrame *ref, const Mv *mv, int x_off, int y_off, int block_w, int block_h, int luma_weight, int luma_offset) { HEVCLocalContext *lc = s->HEVClc; uint8_t *src = ref->data[0]; ptrdiff_t srcstride = ref->linesize[0]; int pic_width = s->ps.sps->width; int pic_height = s->ps.sps->height; int mx = mv->x & 3; int my = mv->y & 3; int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) || (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag); int idx = ff_hevc_pel_weight[block_w]; x_off += mv->x >> 2; y_off += mv->y >> 2; src += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift)); if (x_off < QPEL_EXTRA_BEFORE || y_off < QPEL_EXTRA_AFTER || x_off >= pic_width - block_w - QPEL_EXTRA_AFTER || y_off >= pic_height - block_h - QPEL_EXTRA_AFTER) { const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift; int offset = QPEL_EXTRA_BEFORE * srcstride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift); int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift); s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src - offset, edge_emu_stride, srcstride, block_w + QPEL_EXTRA, block_h + QPEL_EXTRA, x_off - QPEL_EXTRA_BEFORE, y_off - QPEL_EXTRA_BEFORE, pic_width, pic_height); src = lc->edge_emu_buffer + buf_offset; srcstride = edge_emu_stride; } if (!weight_flag) s->hevcdsp.put_hevc_qpel_uni[idx][!!my][!!mx](dst, dststride, src, srcstride, block_h, mx, my, block_w); else s->hevcdsp.put_hevc_qpel_uni_w[idx][!!my][!!mx](dst, dststride, src, srcstride, block_h, s->sh.luma_log2_weight_denom, luma_weight, luma_offset, mx, my, block_w); } /** * 8.5.3.2.2.1 Luma sample bidirectional interpolation process * * @param s HEVC decoding context * @param dst target buffer for block data at block position * @param dststride stride of the dst buffer * @param ref0 reference picture0 buffer at origin (0, 0) * @param mv0 motion vector0 (relative to block position) to get pixel data from * @param x_off horizontal position of block from origin (0, 0) * @param y_off vertical position of block from origin (0, 0) * @param block_w width of block * @param block_h height of block * @param ref1 reference picture1 buffer at origin (0, 0) * @param mv1 motion vector1 (relative to block position) to get pixel data from * @param current_mv current motion vector structure */ static void luma_mc_bi(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride, AVFrame *ref0, const Mv *mv0, int x_off, int y_off, int block_w, int block_h, AVFrame *ref1, const Mv *mv1, struct MvField *current_mv) { HEVCLocalContext *lc = s->HEVClc; ptrdiff_t src0stride = ref0->linesize[0]; ptrdiff_t src1stride = ref1->linesize[0]; int pic_width = s->ps.sps->width; int pic_height = s->ps.sps->height; int mx0 = mv0->x & 3; int my0 = mv0->y & 3; int mx1 = mv1->x & 3; int my1 = mv1->y & 3; int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) || (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag); int x_off0 = x_off + (mv0->x >> 2); int y_off0 = y_off + (mv0->y >> 2); int x_off1 = x_off + (mv1->x >> 2); int y_off1 = y_off + (mv1->y >> 2); int idx = ff_hevc_pel_weight[block_w]; uint8_t *src0 = ref0->data[0] + y_off0 * src0stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift); uint8_t *src1 = ref1->data[0] + y_off1 * src1stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift); if (x_off0 < QPEL_EXTRA_BEFORE || y_off0 < QPEL_EXTRA_AFTER || x_off0 >= pic_width - block_w - QPEL_EXTRA_AFTER || y_off0 >= pic_height - block_h - QPEL_EXTRA_AFTER) { const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift; int offset = QPEL_EXTRA_BEFORE * src0stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift); int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift); s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset, edge_emu_stride, src0stride, block_w + QPEL_EXTRA, block_h + QPEL_EXTRA, x_off0 - QPEL_EXTRA_BEFORE, y_off0 - QPEL_EXTRA_BEFORE, pic_width, pic_height); src0 = lc->edge_emu_buffer + buf_offset; src0stride = edge_emu_stride; } if (x_off1 < QPEL_EXTRA_BEFORE || y_off1 < QPEL_EXTRA_AFTER || x_off1 >= pic_width - block_w - QPEL_EXTRA_AFTER || y_off1 >= pic_height - block_h - QPEL_EXTRA_AFTER) { const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift; int offset = QPEL_EXTRA_BEFORE * src1stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift); int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift); s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src1 - offset, edge_emu_stride, src1stride, block_w + QPEL_EXTRA, block_h + QPEL_EXTRA, x_off1 - QPEL_EXTRA_BEFORE, y_off1 - QPEL_EXTRA_BEFORE, pic_width, pic_height); src1 = lc->edge_emu_buffer2 + buf_offset; src1stride = edge_emu_stride; } s->hevcdsp.put_hevc_qpel[idx][!!my0][!!mx0](lc->tmp, src0, src0stride, block_h, mx0, my0, block_w); if (!weight_flag) s->hevcdsp.put_hevc_qpel_bi[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp, block_h, mx1, my1, block_w); else s->hevcdsp.put_hevc_qpel_bi_w[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp, block_h, s->sh.luma_log2_weight_denom, s->sh.luma_weight_l0[current_mv->ref_idx[0]], s->sh.luma_weight_l1[current_mv->ref_idx[1]], s->sh.luma_offset_l0[current_mv->ref_idx[0]], s->sh.luma_offset_l1[current_mv->ref_idx[1]], mx1, my1, block_w); } /** * 8.5.3.2.2.2 Chroma sample uniprediction interpolation process * * @param s HEVC decoding context * @param dst1 target buffer for block data at block position (U plane) * @param dst2 target buffer for block data at block position (V plane) * @param dststride stride of the dst1 and dst2 buffers * @param ref reference picture buffer at origin (0, 0) * @param mv motion vector (relative to block position) to get pixel data from * @param x_off horizontal position of block from origin (0, 0) * @param y_off vertical position of block from origin (0, 0) * @param block_w width of block * @param block_h height of block * @param chroma_weight weighting factor applied to the chroma prediction * @param chroma_offset additive offset applied to the chroma prediction value */ static void chroma_mc_uni(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, uint8_t *src0, ptrdiff_t srcstride, int reflist, int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int chroma_weight, int chroma_offset) { HEVCLocalContext *lc = s->HEVClc; int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1]; int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1]; const Mv *mv = &current_mv->mv[reflist]; int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) || (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag); int idx = ff_hevc_pel_weight[block_w]; int hshift = s->ps.sps->hshift[1]; int vshift = s->ps.sps->vshift[1]; intptr_t mx = av_mod_uintp2(mv->x, 2 + hshift); intptr_t my = av_mod_uintp2(mv->y, 2 + vshift); intptr_t _mx = mx << (1 - hshift); intptr_t _my = my << (1 - vshift); x_off += mv->x >> (2 + hshift); y_off += mv->y >> (2 + vshift); src0 += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift)); if (x_off < EPEL_EXTRA_BEFORE || y_off < EPEL_EXTRA_AFTER || x_off >= pic_width - block_w - EPEL_EXTRA_AFTER || y_off >= pic_height - block_h - EPEL_EXTRA_AFTER) { const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift; int offset0 = EPEL_EXTRA_BEFORE * (srcstride + (1 << s->ps.sps->pixel_shift)); int buf_offset0 = EPEL_EXTRA_BEFORE * (edge_emu_stride + (1 << s->ps.sps->pixel_shift)); s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset0, edge_emu_stride, srcstride, block_w + EPEL_EXTRA, block_h + EPEL_EXTRA, x_off - EPEL_EXTRA_BEFORE, y_off - EPEL_EXTRA_BEFORE, pic_width, pic_height); src0 = lc->edge_emu_buffer + buf_offset0; srcstride = edge_emu_stride; } if (!weight_flag) s->hevcdsp.put_hevc_epel_uni[idx][!!my][!!mx](dst0, dststride, src0, srcstride, block_h, _mx, _my, block_w); else s->hevcdsp.put_hevc_epel_uni_w[idx][!!my][!!mx](dst0, dststride, src0, srcstride, block_h, s->sh.chroma_log2_weight_denom, chroma_weight, chroma_offset, _mx, _my, block_w); } /** * 8.5.3.2.2.2 Chroma sample bidirectional interpolation process * * @param s HEVC decoding context * @param dst target buffer for block data at block position * @param dststride stride of the dst buffer * @param ref0 reference picture0 buffer at origin (0, 0) * @param mv0 motion vector0 (relative to block position) to get pixel data from * @param x_off horizontal position of block from origin (0, 0) * @param y_off vertical position of block from origin (0, 0) * @param block_w width of block * @param block_h height of block * @param ref1 reference picture1 buffer at origin (0, 0) * @param mv1 motion vector1 (relative to block position) to get pixel data from * @param current_mv current motion vector structure * @param cidx chroma component(cb, cr) */ static void chroma_mc_bi(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, AVFrame *ref0, AVFrame *ref1, int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int cidx) { HEVCLocalContext *lc = s->HEVClc; uint8_t *src1 = ref0->data[cidx+1]; uint8_t *src2 = ref1->data[cidx+1]; ptrdiff_t src1stride = ref0->linesize[cidx+1]; ptrdiff_t src2stride = ref1->linesize[cidx+1]; int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) || (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag); int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1]; int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1]; Mv *mv0 = &current_mv->mv[0]; Mv *mv1 = &current_mv->mv[1]; int hshift = s->ps.sps->hshift[1]; int vshift = s->ps.sps->vshift[1]; intptr_t mx0 = av_mod_uintp2(mv0->x, 2 + hshift); intptr_t my0 = av_mod_uintp2(mv0->y, 2 + vshift); intptr_t mx1 = av_mod_uintp2(mv1->x, 2 + hshift); intptr_t my1 = av_mod_uintp2(mv1->y, 2 + vshift); intptr_t _mx0 = mx0 << (1 - hshift); intptr_t _my0 = my0 << (1 - vshift); intptr_t _mx1 = mx1 << (1 - hshift); intptr_t _my1 = my1 << (1 - vshift); int x_off0 = x_off + (mv0->x >> (2 + hshift)); int y_off0 = y_off + (mv0->y >> (2 + vshift)); int x_off1 = x_off + (mv1->x >> (2 + hshift)); int y_off1 = y_off + (mv1->y >> (2 + vshift)); int idx = ff_hevc_pel_weight[block_w]; src1 += y_off0 * src1stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift); src2 += y_off1 * src2stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift); if (x_off0 < EPEL_EXTRA_BEFORE || y_off0 < EPEL_EXTRA_AFTER || x_off0 >= pic_width - block_w - EPEL_EXTRA_AFTER || y_off0 >= pic_height - block_h - EPEL_EXTRA_AFTER) { const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift; int offset1 = EPEL_EXTRA_BEFORE * (src1stride + (1 << s->ps.sps->pixel_shift)); int buf_offset1 = EPEL_EXTRA_BEFORE * (edge_emu_stride + (1 << s->ps.sps->pixel_shift)); s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src1 - offset1, edge_emu_stride, src1stride, block_w + EPEL_EXTRA, block_h + EPEL_EXTRA, x_off0 - EPEL_EXTRA_BEFORE, y_off0 - EPEL_EXTRA_BEFORE, pic_width, pic_height); src1 = lc->edge_emu_buffer + buf_offset1; src1stride = edge_emu_stride; } if (x_off1 < EPEL_EXTRA_BEFORE || y_off1 < EPEL_EXTRA_AFTER || x_off1 >= pic_width - block_w - EPEL_EXTRA_AFTER || y_off1 >= pic_height - block_h - EPEL_EXTRA_AFTER) { const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift; int offset1 = EPEL_EXTRA_BEFORE * (src2stride + (1 << s->ps.sps->pixel_shift)); int buf_offset1 = EPEL_EXTRA_BEFORE * (edge_emu_stride + (1 << s->ps.sps->pixel_shift)); s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src2 - offset1, edge_emu_stride, src2stride, block_w + EPEL_EXTRA, block_h + EPEL_EXTRA, x_off1 - EPEL_EXTRA_BEFORE, y_off1 - EPEL_EXTRA_BEFORE, pic_width, pic_height); src2 = lc->edge_emu_buffer2 + buf_offset1; src2stride = edge_emu_stride; } s->hevcdsp.put_hevc_epel[idx][!!my0][!!mx0](lc->tmp, src1, src1stride, block_h, _mx0, _my0, block_w); if (!weight_flag) s->hevcdsp.put_hevc_epel_bi[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1], src2, src2stride, lc->tmp, block_h, _mx1, _my1, block_w); else s->hevcdsp.put_hevc_epel_bi_w[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1], src2, src2stride, lc->tmp, block_h, s->sh.chroma_log2_weight_denom, s->sh.chroma_weight_l0[current_mv->ref_idx[0]][cidx], s->sh.chroma_weight_l1[current_mv->ref_idx[1]][cidx], s->sh.chroma_offset_l0[current_mv->ref_idx[0]][cidx], s->sh.chroma_offset_l1[current_mv->ref_idx[1]][cidx], _mx1, _my1, block_w); } static void hevc_await_progress(HEVCContext *s, HEVCFrame *ref, const Mv *mv, int y0, int height) { if (s->threads_type == FF_THREAD_FRAME ) { int y = FFMAX(0, (mv->y >> 2) + y0 + height + 9); ff_thread_await_progress(&ref->tf, y, 0); } } static void hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv) { HEVCLocalContext *lc = s->HEVClc; enum InterPredIdc inter_pred_idc = PRED_L0; int mvp_flag; ff_hevc_set_neighbour_available(s, x0, y0, nPbW, nPbH); mv->pred_flag = 0; if (s->sh.slice_type == HEVC_SLICE_B) inter_pred_idc = ff_hevc_inter_pred_idc_decode(s, nPbW, nPbH); if (inter_pred_idc != PRED_L1) { if (s->sh.nb_refs[L0]) mv->ref_idx[0]= ff_hevc_ref_idx_lx_decode(s, s->sh.nb_refs[L0]); mv->pred_flag = PF_L0; ff_hevc_hls_mvd_coding(s, x0, y0, 0); mvp_flag = ff_hevc_mvp_lx_flag_decode(s); ff_hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size, part_idx, merge_idx, mv, mvp_flag, 0); mv->mv[0].x += lc->pu.mvd.x; mv->mv[0].y += lc->pu.mvd.y; } if (inter_pred_idc != PRED_L0) { if (s->sh.nb_refs[L1]) mv->ref_idx[1]= ff_hevc_ref_idx_lx_decode(s, s->sh.nb_refs[L1]); if (s->sh.mvd_l1_zero_flag == 1 && inter_pred_idc == PRED_BI) { AV_ZERO32(&lc->pu.mvd); } else { ff_hevc_hls_mvd_coding(s, x0, y0, 1); } mv->pred_flag += PF_L1; mvp_flag = ff_hevc_mvp_lx_flag_decode(s); ff_hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size, part_idx, merge_idx, mv, mvp_flag, 1); mv->mv[1].x += lc->pu.mvd.x; mv->mv[1].y += lc->pu.mvd.y; } } static void hls_prediction_unit(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int partIdx, int idx) { #define POS(c_idx, x, y) \ &s->frame->data[c_idx][((y) >> s->ps.sps->vshift[c_idx]) * s->frame->linesize[c_idx] + \ (((x) >> s->ps.sps->hshift[c_idx]) << s->ps.sps->pixel_shift)] HEVCLocalContext *lc = s->HEVClc; int merge_idx = 0; struct MvField current_mv = {{{ 0 }}}; int min_pu_width = s->ps.sps->min_pu_width; MvField *tab_mvf = s->ref->tab_mvf; RefPicList *refPicList = s->ref->refPicList; HEVCFrame *ref0 = NULL, *ref1 = NULL; uint8_t *dst0 = POS(0, x0, y0); uint8_t *dst1 = POS(1, x0, y0); uint8_t *dst2 = POS(2, x0, y0); int log2_min_cb_size = s->ps.sps->log2_min_cb_size; int min_cb_width = s->ps.sps->min_cb_width; int x_cb = x0 >> log2_min_cb_size; int y_cb = y0 >> log2_min_cb_size; int x_pu, y_pu; int i, j; int skip_flag = SAMPLE_CTB(s->skip_flag, x_cb, y_cb); if (!skip_flag) lc->pu.merge_flag = ff_hevc_merge_flag_decode(s); if (skip_flag || lc->pu.merge_flag) { if (s->sh.max_num_merge_cand > 1) merge_idx = ff_hevc_merge_idx_decode(s); else merge_idx = 0; ff_hevc_luma_mv_merge_mode(s, x0, y0, nPbW, nPbH, log2_cb_size, partIdx, merge_idx, &current_mv); } else { hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size, partIdx, merge_idx, &current_mv); } x_pu = x0 >> s->ps.sps->log2_min_pu_size; y_pu = y0 >> s->ps.sps->log2_min_pu_size; for (j = 0; j < nPbH >> s->ps.sps->log2_min_pu_size; j++) for (i = 0; i < nPbW >> s->ps.sps->log2_min_pu_size; i++) tab_mvf[(y_pu + j) * min_pu_width + x_pu + i] = current_mv; if (current_mv.pred_flag & PF_L0) { ref0 = refPicList[0].ref[current_mv.ref_idx[0]]; if (!ref0) return; hevc_await_progress(s, ref0, &current_mv.mv[0], y0, nPbH); } if (current_mv.pred_flag & PF_L1) { ref1 = refPicList[1].ref[current_mv.ref_idx[1]]; if (!ref1) return; hevc_await_progress(s, ref1, &current_mv.mv[1], y0, nPbH); } if (current_mv.pred_flag == PF_L0) { int x0_c = x0 >> s->ps.sps->hshift[1]; int y0_c = y0 >> s->ps.sps->vshift[1]; int nPbW_c = nPbW >> s->ps.sps->hshift[1]; int nPbH_c = nPbH >> s->ps.sps->vshift[1]; luma_mc_uni(s, dst0, s->frame->linesize[0], ref0->frame, &current_mv.mv[0], x0, y0, nPbW, nPbH, s->sh.luma_weight_l0[current_mv.ref_idx[0]], s->sh.luma_offset_l0[current_mv.ref_idx[0]]); if (s->ps.sps->chroma_format_idc) { chroma_mc_uni(s, dst1, s->frame->linesize[1], ref0->frame->data[1], ref0->frame->linesize[1], 0, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, s->sh.chroma_weight_l0[current_mv.ref_idx[0]][0], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][0]); chroma_mc_uni(s, dst2, s->frame->linesize[2], ref0->frame->data[2], ref0->frame->linesize[2], 0, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, s->sh.chroma_weight_l0[current_mv.ref_idx[0]][1], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][1]); } } else if (current_mv.pred_flag == PF_L1) { int x0_c = x0 >> s->ps.sps->hshift[1]; int y0_c = y0 >> s->ps.sps->vshift[1]; int nPbW_c = nPbW >> s->ps.sps->hshift[1]; int nPbH_c = nPbH >> s->ps.sps->vshift[1]; luma_mc_uni(s, dst0, s->frame->linesize[0], ref1->frame, &current_mv.mv[1], x0, y0, nPbW, nPbH, s->sh.luma_weight_l1[current_mv.ref_idx[1]], s->sh.luma_offset_l1[current_mv.ref_idx[1]]); if (s->ps.sps->chroma_format_idc) { chroma_mc_uni(s, dst1, s->frame->linesize[1], ref1->frame->data[1], ref1->frame->linesize[1], 1, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, s->sh.chroma_weight_l1[current_mv.ref_idx[1]][0], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][0]); chroma_mc_uni(s, dst2, s->frame->linesize[2], ref1->frame->data[2], ref1->frame->linesize[2], 1, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, s->sh.chroma_weight_l1[current_mv.ref_idx[1]][1], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][1]); } } else if (current_mv.pred_flag == PF_BI) { int x0_c = x0 >> s->ps.sps->hshift[1]; int y0_c = y0 >> s->ps.sps->vshift[1]; int nPbW_c = nPbW >> s->ps.sps->hshift[1]; int nPbH_c = nPbH >> s->ps.sps->vshift[1]; luma_mc_bi(s, dst0, s->frame->linesize[0], ref0->frame, &current_mv.mv[0], x0, y0, nPbW, nPbH, ref1->frame, &current_mv.mv[1], &current_mv); if (s->ps.sps->chroma_format_idc) { chroma_mc_bi(s, dst1, s->frame->linesize[1], ref0->frame, ref1->frame, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, 0); chroma_mc_bi(s, dst2, s->frame->linesize[2], ref0->frame, ref1->frame, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, 1); } } } /** * 8.4.1 */ static int luma_intra_pred_mode(HEVCContext *s, int x0, int y0, int pu_size, int prev_intra_luma_pred_flag) { HEVCLocalContext *lc = s->HEVClc; int x_pu = x0 >> s->ps.sps->log2_min_pu_size; int y_pu = y0 >> s->ps.sps->log2_min_pu_size; int min_pu_width = s->ps.sps->min_pu_width; int size_in_pus = pu_size >> s->ps.sps->log2_min_pu_size; int x0b = av_mod_uintp2(x0, s->ps.sps->log2_ctb_size); int y0b = av_mod_uintp2(y0, s->ps.sps->log2_ctb_size); int cand_up = (lc->ctb_up_flag || y0b) ? s->tab_ipm[(y_pu - 1) * min_pu_width + x_pu] : INTRA_DC; int cand_left = (lc->ctb_left_flag || x0b) ? s->tab_ipm[y_pu * min_pu_width + x_pu - 1] : INTRA_DC; int y_ctb = (y0 >> (s->ps.sps->log2_ctb_size)) << (s->ps.sps->log2_ctb_size); MvField *tab_mvf = s->ref->tab_mvf; int intra_pred_mode; int candidate[3]; int i, j; // intra_pred_mode prediction does not cross vertical CTB boundaries if ((y0 - 1) < y_ctb) cand_up = INTRA_DC; if (cand_left == cand_up) { if (cand_left < 2) { candidate[0] = INTRA_PLANAR; candidate[1] = INTRA_DC; candidate[2] = INTRA_ANGULAR_26; } else { candidate[0] = cand_left; candidate[1] = 2 + ((cand_left - 2 - 1 + 32) & 31); candidate[2] = 2 + ((cand_left - 2 + 1) & 31); } } else { candidate[0] = cand_left; candidate[1] = cand_up; if (candidate[0] != INTRA_PLANAR && candidate[1] != INTRA_PLANAR) { candidate[2] = INTRA_PLANAR; } else if (candidate[0] != INTRA_DC && candidate[1] != INTRA_DC) { candidate[2] = INTRA_DC; } else { candidate[2] = INTRA_ANGULAR_26; } } if (prev_intra_luma_pred_flag) { intra_pred_mode = candidate[lc->pu.mpm_idx]; } else { if (candidate[0] > candidate[1]) FFSWAP(uint8_t, candidate[0], candidate[1]); if (candidate[0] > candidate[2]) FFSWAP(uint8_t, candidate[0], candidate[2]); if (candidate[1] > candidate[2]) FFSWAP(uint8_t, candidate[1], candidate[2]); intra_pred_mode = lc->pu.rem_intra_luma_pred_mode; for (i = 0; i < 3; i++) if (intra_pred_mode >= candidate[i]) intra_pred_mode++; } /* write the intra prediction units into the mv array */ if (!size_in_pus) size_in_pus = 1; for (i = 0; i < size_in_pus; i++) { memset(&s->tab_ipm[(y_pu + i) * min_pu_width + x_pu], intra_pred_mode, size_in_pus); for (j = 0; j < size_in_pus; j++) { tab_mvf[(y_pu + j) * min_pu_width + x_pu + i].pred_flag = PF_INTRA; } } return intra_pred_mode; } static av_always_inline void set_ct_depth(HEVCContext *s, int x0, int y0, int log2_cb_size, int ct_depth) { int length = (1 << log2_cb_size) >> s->ps.sps->log2_min_cb_size; int x_cb = x0 >> s->ps.sps->log2_min_cb_size; int y_cb = y0 >> s->ps.sps->log2_min_cb_size; int y; for (y = 0; y < length; y++) memset(&s->tab_ct_depth[(y_cb + y) * s->ps.sps->min_cb_width + x_cb], ct_depth, length); } static const uint8_t tab_mode_idx[] = { 0, 1, 2, 2, 2, 2, 3, 5, 7, 8, 10, 12, 13, 15, 17, 18, 19, 20, 21, 22, 23, 23, 24, 24, 25, 25, 26, 27, 27, 28, 28, 29, 29, 30, 31}; static void intra_prediction_unit(HEVCContext *s, int x0, int y0, int log2_cb_size) { HEVCLocalContext *lc = s->HEVClc; static const uint8_t intra_chroma_table[4] = { 0, 26, 10, 1 }; uint8_t prev_intra_luma_pred_flag[4]; int split = lc->cu.part_mode == PART_NxN; int pb_size = (1 << log2_cb_size) >> split; int side = split + 1; int chroma_mode; int i, j; for (i = 0; i < side; i++) for (j = 0; j < side; j++) prev_intra_luma_pred_flag[2 * i + j] = ff_hevc_prev_intra_luma_pred_flag_decode(s); for (i = 0; i < side; i++) { for (j = 0; j < side; j++) { if (prev_intra_luma_pred_flag[2 * i + j]) lc->pu.mpm_idx = ff_hevc_mpm_idx_decode(s); else lc->pu.rem_intra_luma_pred_mode = ff_hevc_rem_intra_luma_pred_mode_decode(s); lc->pu.intra_pred_mode[2 * i + j] = luma_intra_pred_mode(s, x0 + pb_size * j, y0 + pb_size * i, pb_size, prev_intra_luma_pred_flag[2 * i + j]); } } if (s->ps.sps->chroma_format_idc == 3) { for (i = 0; i < side; i++) { for (j = 0; j < side; j++) { lc->pu.chroma_mode_c[2 * i + j] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s); if (chroma_mode != 4) { if (lc->pu.intra_pred_mode[2 * i + j] == intra_chroma_table[chroma_mode]) lc->pu.intra_pred_mode_c[2 * i + j] = 34; else lc->pu.intra_pred_mode_c[2 * i + j] = intra_chroma_table[chroma_mode]; } else { lc->pu.intra_pred_mode_c[2 * i + j] = lc->pu.intra_pred_mode[2 * i + j]; } } } } else if (s->ps.sps->chroma_format_idc == 2) { int mode_idx; lc->pu.chroma_mode_c[0] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s); if (chroma_mode != 4) { if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode]) mode_idx = 34; else mode_idx = intra_chroma_table[chroma_mode]; } else { mode_idx = lc->pu.intra_pred_mode[0]; } lc->pu.intra_pred_mode_c[0] = tab_mode_idx[mode_idx]; } else if (s->ps.sps->chroma_format_idc != 0) { chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s); if (chroma_mode != 4) { if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode]) lc->pu.intra_pred_mode_c[0] = 34; else lc->pu.intra_pred_mode_c[0] = intra_chroma_table[chroma_mode]; } else { lc->pu.intra_pred_mode_c[0] = lc->pu.intra_pred_mode[0]; } } } static void intra_prediction_unit_default_value(HEVCContext *s, int x0, int y0, int log2_cb_size) { HEVCLocalContext *lc = s->HEVClc; int pb_size = 1 << log2_cb_size; int size_in_pus = pb_size >> s->ps.sps->log2_min_pu_size; int min_pu_width = s->ps.sps->min_pu_width; MvField *tab_mvf = s->ref->tab_mvf; int x_pu = x0 >> s->ps.sps->log2_min_pu_size; int y_pu = y0 >> s->ps.sps->log2_min_pu_size; int j, k; if (size_in_pus == 0) size_in_pus = 1; for (j = 0; j < size_in_pus; j++) memset(&s->tab_ipm[(y_pu + j) * min_pu_width + x_pu], INTRA_DC, size_in_pus); if (lc->cu.pred_mode == MODE_INTRA) for (j = 0; j < size_in_pus; j++) for (k = 0; k < size_in_pus; k++) tab_mvf[(y_pu + j) * min_pu_width + x_pu + k].pred_flag = PF_INTRA; } static int hls_coding_unit(HEVCContext *s, int x0, int y0, int log2_cb_size) { int cb_size = 1 << log2_cb_size; HEVCLocalContext *lc = s->HEVClc; int log2_min_cb_size = s->ps.sps->log2_min_cb_size; int length = cb_size >> log2_min_cb_size; int min_cb_width = s->ps.sps->min_cb_width; int x_cb = x0 >> log2_min_cb_size; int y_cb = y0 >> log2_min_cb_size; int idx = log2_cb_size - 2; int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1; int x, y, ret; lc->cu.x = x0; lc->cu.y = y0; lc->cu.pred_mode = MODE_INTRA; lc->cu.part_mode = PART_2Nx2N; lc->cu.intra_split_flag = 0; SAMPLE_CTB(s->skip_flag, x_cb, y_cb) = 0; for (x = 0; x < 4; x++) lc->pu.intra_pred_mode[x] = 1; if (s->ps.pps->transquant_bypass_enable_flag) { lc->cu.cu_transquant_bypass_flag = ff_hevc_cu_transquant_bypass_flag_decode(s); if (lc->cu.cu_transquant_bypass_flag) set_deblocking_bypass(s, x0, y0, log2_cb_size); } else lc->cu.cu_transquant_bypass_flag = 0; if (s->sh.slice_type != HEVC_SLICE_I) { uint8_t skip_flag = ff_hevc_skip_flag_decode(s, x0, y0, x_cb, y_cb); x = y_cb * min_cb_width + x_cb; for (y = 0; y < length; y++) { memset(&s->skip_flag[x], skip_flag, length); x += min_cb_width; } lc->cu.pred_mode = skip_flag ? MODE_SKIP : MODE_INTER; } else { x = y_cb * min_cb_width + x_cb; for (y = 0; y < length; y++) { memset(&s->skip_flag[x], 0, length); x += min_cb_width; } } if (SAMPLE_CTB(s->skip_flag, x_cb, y_cb)) { hls_prediction_unit(s, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx); intra_prediction_unit_default_value(s, x0, y0, log2_cb_size); if (!s->sh.disable_deblocking_filter_flag) ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size); } else { int pcm_flag = 0; if (s->sh.slice_type != HEVC_SLICE_I) lc->cu.pred_mode = ff_hevc_pred_mode_decode(s); if (lc->cu.pred_mode != MODE_INTRA || log2_cb_size == s->ps.sps->log2_min_cb_size) { lc->cu.part_mode = ff_hevc_part_mode_decode(s, log2_cb_size); lc->cu.intra_split_flag = lc->cu.part_mode == PART_NxN && lc->cu.pred_mode == MODE_INTRA; } if (lc->cu.pred_mode == MODE_INTRA) { if (lc->cu.part_mode == PART_2Nx2N && s->ps.sps->pcm_enabled_flag && log2_cb_size >= s->ps.sps->pcm.log2_min_pcm_cb_size && log2_cb_size <= s->ps.sps->pcm.log2_max_pcm_cb_size) { pcm_flag = ff_hevc_pcm_flag_decode(s); } if (pcm_flag) { intra_prediction_unit_default_value(s, x0, y0, log2_cb_size); ret = hls_pcm_sample(s, x0, y0, log2_cb_size); if (s->ps.sps->pcm.loop_filter_disable_flag) set_deblocking_bypass(s, x0, y0, log2_cb_size); if (ret < 0) return ret; } else { intra_prediction_unit(s, x0, y0, log2_cb_size); } } else { intra_prediction_unit_default_value(s, x0, y0, log2_cb_size); switch (lc->cu.part_mode) { case PART_2Nx2N: hls_prediction_unit(s, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx); break; case PART_2NxN: hls_prediction_unit(s, x0, y0, cb_size, cb_size / 2, log2_cb_size, 0, idx); hls_prediction_unit(s, x0, y0 + cb_size / 2, cb_size, cb_size / 2, log2_cb_size, 1, idx); break; case PART_Nx2N: hls_prediction_unit(s, x0, y0, cb_size / 2, cb_size, log2_cb_size, 0, idx - 1); hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size, log2_cb_size, 1, idx - 1); break; case PART_2NxnU: hls_prediction_unit(s, x0, y0, cb_size, cb_size / 4, log2_cb_size, 0, idx); hls_prediction_unit(s, x0, y0 + cb_size / 4, cb_size, cb_size * 3 / 4, log2_cb_size, 1, idx); break; case PART_2NxnD: hls_prediction_unit(s, x0, y0, cb_size, cb_size * 3 / 4, log2_cb_size, 0, idx); hls_prediction_unit(s, x0, y0 + cb_size * 3 / 4, cb_size, cb_size / 4, log2_cb_size, 1, idx); break; case PART_nLx2N: hls_prediction_unit(s, x0, y0, cb_size / 4, cb_size, log2_cb_size, 0, idx - 2); hls_prediction_unit(s, x0 + cb_size / 4, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 1, idx - 2); break; case PART_nRx2N: hls_prediction_unit(s, x0, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 0, idx - 2); hls_prediction_unit(s, x0 + cb_size * 3 / 4, y0, cb_size / 4, cb_size, log2_cb_size, 1, idx - 2); break; case PART_NxN: hls_prediction_unit(s, x0, y0, cb_size / 2, cb_size / 2, log2_cb_size, 0, idx - 1); hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size / 2, log2_cb_size, 1, idx - 1); hls_prediction_unit(s, x0, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 2, idx - 1); hls_prediction_unit(s, x0 + cb_size / 2, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 3, idx - 1); break; } } if (!pcm_flag) { int rqt_root_cbf = 1; if (lc->cu.pred_mode != MODE_INTRA && !(lc->cu.part_mode == PART_2Nx2N && lc->pu.merge_flag)) { rqt_root_cbf = ff_hevc_no_residual_syntax_flag_decode(s); } if (rqt_root_cbf) { const static int cbf[2] = { 0 }; lc->cu.max_trafo_depth = lc->cu.pred_mode == MODE_INTRA ? s->ps.sps->max_transform_hierarchy_depth_intra + lc->cu.intra_split_flag : s->ps.sps->max_transform_hierarchy_depth_inter; ret = hls_transform_tree(s, x0, y0, x0, y0, x0, y0, log2_cb_size, log2_cb_size, 0, 0, cbf, cbf); if (ret < 0) return ret; } else { if (!s->sh.disable_deblocking_filter_flag) ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size); } } } if (s->ps.pps->cu_qp_delta_enabled_flag && lc->tu.is_cu_qp_delta_coded == 0) ff_hevc_set_qPy(s, x0, y0, log2_cb_size); x = y_cb * min_cb_width + x_cb; for (y = 0; y < length; y++) { memset(&s->qp_y_tab[x], lc->qp_y, length); x += min_cb_width; } if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 && ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0) { lc->qPy_pred = lc->qp_y; } set_ct_depth(s, x0, y0, log2_cb_size, lc->ct_depth); return 0; } static int hls_coding_quadtree(HEVCContext *s, int x0, int y0, int log2_cb_size, int cb_depth) { HEVCLocalContext *lc = s->HEVClc; const int cb_size = 1 << log2_cb_size; int ret; int split_cu; lc->ct_depth = cb_depth; if (x0 + cb_size <= s->ps.sps->width && y0 + cb_size <= s->ps.sps->height && log2_cb_size > s->ps.sps->log2_min_cb_size) { split_cu = ff_hevc_split_coding_unit_flag_decode(s, cb_depth, x0, y0); } else { split_cu = (log2_cb_size > s->ps.sps->log2_min_cb_size); } if (s->ps.pps->cu_qp_delta_enabled_flag && log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth) { lc->tu.is_cu_qp_delta_coded = 0; lc->tu.cu_qp_delta = 0; } if (s->sh.cu_chroma_qp_offset_enabled_flag && log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_chroma_qp_offset_depth) { lc->tu.is_cu_chroma_qp_offset_coded = 0; } if (split_cu) { int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1; const int cb_size_split = cb_size >> 1; const int x1 = x0 + cb_size_split; const int y1 = y0 + cb_size_split; int more_data = 0; more_data = hls_coding_quadtree(s, x0, y0, log2_cb_size - 1, cb_depth + 1); if (more_data < 0) return more_data; if (more_data && x1 < s->ps.sps->width) { more_data = hls_coding_quadtree(s, x1, y0, log2_cb_size - 1, cb_depth + 1); if (more_data < 0) return more_data; } if (more_data && y1 < s->ps.sps->height) { more_data = hls_coding_quadtree(s, x0, y1, log2_cb_size - 1, cb_depth + 1); if (more_data < 0) return more_data; } if (more_data && x1 < s->ps.sps->width && y1 < s->ps.sps->height) { more_data = hls_coding_quadtree(s, x1, y1, log2_cb_size - 1, cb_depth + 1); if (more_data < 0) return more_data; } if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 && ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0) lc->qPy_pred = lc->qp_y; if (more_data) return ((x1 + cb_size_split) < s->ps.sps->width || (y1 + cb_size_split) < s->ps.sps->height); else return 0; } else { ret = hls_coding_unit(s, x0, y0, log2_cb_size); if (ret < 0) return ret; if ((!((x0 + cb_size) % (1 << (s->ps.sps->log2_ctb_size))) || (x0 + cb_size >= s->ps.sps->width)) && (!((y0 + cb_size) % (1 << (s->ps.sps->log2_ctb_size))) || (y0 + cb_size >= s->ps.sps->height))) { int end_of_slice_flag = ff_hevc_end_of_slice_flag_decode(s); return !end_of_slice_flag; } else { return 1; } } return 0; } static void hls_decode_neighbour(HEVCContext *s, int x_ctb, int y_ctb, int ctb_addr_ts) { HEVCLocalContext *lc = s->HEVClc; int ctb_size = 1 << s->ps.sps->log2_ctb_size; int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts]; int ctb_addr_in_slice = ctb_addr_rs - s->sh.slice_addr; s->tab_slice_address[ctb_addr_rs] = s->sh.slice_addr; if (s->ps.pps->entropy_coding_sync_enabled_flag) { if (x_ctb == 0 && (y_ctb & (ctb_size - 1)) == 0) lc->first_qp_group = 1; lc->end_of_tiles_x = s->ps.sps->width; } else if (s->ps.pps->tiles_enabled_flag) { if (ctb_addr_ts && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[ctb_addr_ts - 1]) { int idxX = s->ps.pps->col_idxX[x_ctb >> s->ps.sps->log2_ctb_size]; lc->end_of_tiles_x = x_ctb + (s->ps.pps->column_width[idxX] << s->ps.sps->log2_ctb_size); lc->first_qp_group = 1; } } else { lc->end_of_tiles_x = s->ps.sps->width; } lc->end_of_tiles_y = FFMIN(y_ctb + ctb_size, s->ps.sps->height); lc->boundary_flags = 0; if (s->ps.pps->tiles_enabled_flag) { if (x_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - 1]]) lc->boundary_flags |= BOUNDARY_LEFT_TILE; if (x_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - 1]) lc->boundary_flags |= BOUNDARY_LEFT_SLICE; if (y_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - s->ps.sps->ctb_width]]) lc->boundary_flags |= BOUNDARY_UPPER_TILE; if (y_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - s->ps.sps->ctb_width]) lc->boundary_flags |= BOUNDARY_UPPER_SLICE; } else { if (ctb_addr_in_slice <= 0) lc->boundary_flags |= BOUNDARY_LEFT_SLICE; if (ctb_addr_in_slice < s->ps.sps->ctb_width) lc->boundary_flags |= BOUNDARY_UPPER_SLICE; } lc->ctb_left_flag = ((x_ctb > 0) && (ctb_addr_in_slice > 0) && !(lc->boundary_flags & BOUNDARY_LEFT_TILE)); lc->ctb_up_flag = ((y_ctb > 0) && (ctb_addr_in_slice >= s->ps.sps->ctb_width) && !(lc->boundary_flags & BOUNDARY_UPPER_TILE)); lc->ctb_up_right_flag = ((y_ctb > 0) && (ctb_addr_in_slice+1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs+1 - s->ps.sps->ctb_width]])); lc->ctb_up_left_flag = ((x_ctb > 0) && (y_ctb > 0) && (ctb_addr_in_slice-1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs-1 - s->ps.sps->ctb_width]])); } static int hls_decode_entry(AVCodecContext *avctxt, void *isFilterThread) { HEVCContext *s = avctxt->priv_data; int ctb_size = 1 << s->ps.sps->log2_ctb_size; int more_data = 1; int x_ctb = 0; int y_ctb = 0; int ctb_addr_ts = s->ps.pps->ctb_addr_rs_to_ts[s->sh.slice_ctb_addr_rs]; int ret; if (!ctb_addr_ts && s->sh.dependent_slice_segment_flag) { av_log(s->avctx, AV_LOG_ERROR, "Impossible initial tile.\n"); return AVERROR_INVALIDDATA; } if (s->sh.dependent_slice_segment_flag) { int prev_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts - 1]; if (s->tab_slice_address[prev_rs] != s->sh.slice_addr) { av_log(s->avctx, AV_LOG_ERROR, "Previous slice segment missing\n"); return AVERROR_INVALIDDATA; } } while (more_data && ctb_addr_ts < s->ps.sps->ctb_size) { int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts]; x_ctb = (ctb_addr_rs % ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size; y_ctb = (ctb_addr_rs / ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size; hls_decode_neighbour(s, x_ctb, y_ctb, ctb_addr_ts); ret = ff_hevc_cabac_init(s, ctb_addr_ts); if (ret < 0) { s->tab_slice_address[ctb_addr_rs] = -1; return ret; } hls_sao_param(s, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size); s->deblock[ctb_addr_rs].beta_offset = s->sh.beta_offset; s->deblock[ctb_addr_rs].tc_offset = s->sh.tc_offset; s->filter_slice_edges[ctb_addr_rs] = s->sh.slice_loop_filter_across_slices_enabled_flag; more_data = hls_coding_quadtree(s, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0); if (more_data < 0) { s->tab_slice_address[ctb_addr_rs] = -1; return more_data; } ctb_addr_ts++; ff_hevc_save_states(s, ctb_addr_ts); ff_hevc_hls_filters(s, x_ctb, y_ctb, ctb_size); } if (x_ctb + ctb_size >= s->ps.sps->width && y_ctb + ctb_size >= s->ps.sps->height) ff_hevc_hls_filter(s, x_ctb, y_ctb, ctb_size); return ctb_addr_ts; } static int hls_slice_data(HEVCContext *s) { int arg[2]; int ret[2]; arg[0] = 0; arg[1] = 1; s->avctx->execute(s->avctx, hls_decode_entry, arg, ret , 1, sizeof(int)); return ret[0]; } static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *input_ctb_row, int job, int self_id) { HEVCContext *s1 = avctxt->priv_data, *s; HEVCLocalContext *lc; int ctb_size = 1<< s1->ps.sps->log2_ctb_size; int more_data = 1; int *ctb_row_p = input_ctb_row; int ctb_row = ctb_row_p[job]; int ctb_addr_rs = s1->sh.slice_ctb_addr_rs + ctb_row * ((s1->ps.sps->width + ctb_size - 1) >> s1->ps.sps->log2_ctb_size); int ctb_addr_ts = s1->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs]; int thread = ctb_row % s1->threads_number; int ret; s = s1->sList[self_id]; lc = s->HEVClc; if(ctb_row) { ret = init_get_bits8(&lc->gb, s->data + s->sh.offset[ctb_row - 1], s->sh.size[ctb_row - 1]); if (ret < 0) goto error; ff_init_cabac_decoder(&lc->cc, s->data + s->sh.offset[(ctb_row)-1], s->sh.size[ctb_row - 1]); } while(more_data && ctb_addr_ts < s->ps.sps->ctb_size) { int x_ctb = (ctb_addr_rs % s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size; int y_ctb = (ctb_addr_rs / s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size; hls_decode_neighbour(s, x_ctb, y_ctb, ctb_addr_ts); ff_thread_await_progress2(s->avctx, ctb_row, thread, SHIFT_CTB_WPP); if (atomic_load(&s1->wpp_err)) { ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP); return 0; } ret = ff_hevc_cabac_init(s, ctb_addr_ts); if (ret < 0) goto error; hls_sao_param(s, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size); more_data = hls_coding_quadtree(s, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0); if (more_data < 0) { ret = more_data; goto error; } ctb_addr_ts++; ff_hevc_save_states(s, ctb_addr_ts); ff_thread_report_progress2(s->avctx, ctb_row, thread, 1); ff_hevc_hls_filters(s, x_ctb, y_ctb, ctb_size); if (!more_data && (x_ctb+ctb_size) < s->ps.sps->width && ctb_row != s->sh.num_entry_point_offsets) { atomic_store(&s1->wpp_err, 1); ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP); return 0; } if ((x_ctb+ctb_size) >= s->ps.sps->width && (y_ctb+ctb_size) >= s->ps.sps->height ) { ff_hevc_hls_filter(s, x_ctb, y_ctb, ctb_size); ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP); return ctb_addr_ts; } ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts]; x_ctb+=ctb_size; if(x_ctb >= s->ps.sps->width) { break; } } ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP); return 0; error: s->tab_slice_address[ctb_addr_rs] = -1; atomic_store(&s1->wpp_err, 1); ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP); return ret; } static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal) { const uint8_t *data = nal->data; int length = nal->size; HEVCLocalContext *lc = s->HEVClc; int *ret = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int)); int *arg = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int)); int64_t offset; int64_t startheader, cmpt = 0; int i, j, res = 0; if (!ret || !arg) { av_free(ret); av_free(arg); return AVERROR(ENOMEM); } if (s->sh.slice_ctb_addr_rs + s->sh.num_entry_point_offsets * s->ps.sps->ctb_width >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) { av_log(s->avctx, AV_LOG_ERROR, "WPP ctb addresses are wrong (%d %d %d %d)\n", s->sh.slice_ctb_addr_rs, s->sh.num_entry_point_offsets, s->ps.sps->ctb_width, s->ps.sps->ctb_height ); res = AVERROR_INVALIDDATA; goto error; } ff_alloc_entries(s->avctx, s->sh.num_entry_point_offsets + 1); if (!s->sList[1]) { for (i = 1; i < s->threads_number; i++) { s->sList[i] = av_malloc(sizeof(HEVCContext)); memcpy(s->sList[i], s, sizeof(HEVCContext)); s->HEVClcList[i] = av_mallocz(sizeof(HEVCLocalContext)); s->sList[i]->HEVClc = s->HEVClcList[i]; } } offset = (lc->gb.index >> 3); for (j = 0, cmpt = 0, startheader = offset + s->sh.entry_point_offset[0]; j < nal->skipped_bytes; j++) { if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) { startheader--; cmpt++; } } for (i = 1; i < s->sh.num_entry_point_offsets; i++) { offset += (s->sh.entry_point_offset[i - 1] - cmpt); for (j = 0, cmpt = 0, startheader = offset + s->sh.entry_point_offset[i]; j < nal->skipped_bytes; j++) { if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) { startheader--; cmpt++; } } s->sh.size[i - 1] = s->sh.entry_point_offset[i] - cmpt; s->sh.offset[i - 1] = offset; } if (s->sh.num_entry_point_offsets != 0) { offset += s->sh.entry_point_offset[s->sh.num_entry_point_offsets - 1] - cmpt; if (length < offset) { av_log(s->avctx, AV_LOG_ERROR, "entry_point_offset table is corrupted\n"); res = AVERROR_INVALIDDATA; goto error; } s->sh.size[s->sh.num_entry_point_offsets - 1] = length - offset; s->sh.offset[s->sh.num_entry_point_offsets - 1] = offset; } s->data = data; for (i = 1; i < s->threads_number; i++) { s->sList[i]->HEVClc->first_qp_group = 1; s->sList[i]->HEVClc->qp_y = s->sList[0]->HEVClc->qp_y; memcpy(s->sList[i], s, sizeof(HEVCContext)); s->sList[i]->HEVClc = s->HEVClcList[i]; } atomic_store(&s->wpp_err, 0); ff_reset_entries(s->avctx); for (i = 0; i <= s->sh.num_entry_point_offsets; i++) { arg[i] = i; ret[i] = 0; } if (s->ps.pps->entropy_coding_sync_enabled_flag) s->avctx->execute2(s->avctx, hls_decode_entry_wpp, arg, ret, s->sh.num_entry_point_offsets + 1); for (i = 0; i <= s->sh.num_entry_point_offsets; i++) res += ret[i]; error: av_free(ret); av_free(arg); return res; } static int set_side_data(HEVCContext *s) { AVFrame *out = s->ref->frame; if (s->sei.frame_packing.present && s->sei.frame_packing.arrangement_type >= 3 && s->sei.frame_packing.arrangement_type <= 5 && s->sei.frame_packing.content_interpretation_type > 0 && s->sei.frame_packing.content_interpretation_type < 3) { AVStereo3D *stereo = av_stereo3d_create_side_data(out); if (!stereo) return AVERROR(ENOMEM); switch (s->sei.frame_packing.arrangement_type) { case 3: if (s->sei.frame_packing.quincunx_subsampling) stereo->type = AV_STEREO3D_SIDEBYSIDE_QUINCUNX; else stereo->type = AV_STEREO3D_SIDEBYSIDE; break; case 4: stereo->type = AV_STEREO3D_TOPBOTTOM; break; case 5: stereo->type = AV_STEREO3D_FRAMESEQUENCE; break; } if (s->sei.frame_packing.content_interpretation_type == 2) stereo->flags = AV_STEREO3D_FLAG_INVERT; if (s->sei.frame_packing.arrangement_type == 5) { if (s->sei.frame_packing.current_frame_is_frame0_flag) stereo->view = AV_STEREO3D_VIEW_LEFT; else stereo->view = AV_STEREO3D_VIEW_RIGHT; } } if (s->sei.display_orientation.present && (s->sei.display_orientation.anticlockwise_rotation || s->sei.display_orientation.hflip || s->sei.display_orientation.vflip)) { double angle = s->sei.display_orientation.anticlockwise_rotation * 360 / (double) (1 << 16); AVFrameSideData *rotation = av_frame_new_side_data(out, AV_FRAME_DATA_DISPLAYMATRIX, sizeof(int32_t) * 9); if (!rotation) return AVERROR(ENOMEM); av_display_rotation_set((int32_t *)rotation->data, angle); av_display_matrix_flip((int32_t *)rotation->data, s->sei.display_orientation.hflip, s->sei.display_orientation.vflip); } // Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1 // so the side data persists for the entire coded video sequence. if (s->sei.mastering_display.present > 0 && IS_IRAP(s) && s->no_rasl_output_flag) { s->sei.mastering_display.present--; } if (s->sei.mastering_display.present) { // HEVC uses a g,b,r ordering, which we convert to a more natural r,g,b const int mapping[3] = {2, 0, 1}; const int chroma_den = 50000; const int luma_den = 10000; int i; AVMasteringDisplayMetadata *metadata = av_mastering_display_metadata_create_side_data(out); if (!metadata) return AVERROR(ENOMEM); for (i = 0; i < 3; i++) { const int j = mapping[i]; metadata->display_primaries[i][0].num = s->sei.mastering_display.display_primaries[j][0]; metadata->display_primaries[i][0].den = chroma_den; metadata->display_primaries[i][1].num = s->sei.mastering_display.display_primaries[j][1]; metadata->display_primaries[i][1].den = chroma_den; } metadata->white_point[0].num = s->sei.mastering_display.white_point[0]; metadata->white_point[0].den = chroma_den; metadata->white_point[1].num = s->sei.mastering_display.white_point[1]; metadata->white_point[1].den = chroma_den; metadata->max_luminance.num = s->sei.mastering_display.max_luminance; metadata->max_luminance.den = luma_den; metadata->min_luminance.num = s->sei.mastering_display.min_luminance; metadata->min_luminance.den = luma_den; metadata->has_luminance = 1; metadata->has_primaries = 1; av_log(s->avctx, AV_LOG_DEBUG, "Mastering Display Metadata:\n"); av_log(s->avctx, AV_LOG_DEBUG, "r(%5.4f,%5.4f) g(%5.4f,%5.4f) b(%5.4f %5.4f) wp(%5.4f, %5.4f)\n", av_q2d(metadata->display_primaries[0][0]), av_q2d(metadata->display_primaries[0][1]), av_q2d(metadata->display_primaries[1][0]), av_q2d(metadata->display_primaries[1][1]), av_q2d(metadata->display_primaries[2][0]), av_q2d(metadata->display_primaries[2][1]), av_q2d(metadata->white_point[0]), av_q2d(metadata->white_point[1])); av_log(s->avctx, AV_LOG_DEBUG, "min_luminance=%f, max_luminance=%f\n", av_q2d(metadata->min_luminance), av_q2d(metadata->max_luminance)); } // Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1 // so the side data persists for the entire coded video sequence. if (s->sei.content_light.present > 0 && IS_IRAP(s) && s->no_rasl_output_flag) { s->sei.content_light.present--; } if (s->sei.content_light.present) { AVContentLightMetadata *metadata = av_content_light_metadata_create_side_data(out); if (!metadata) return AVERROR(ENOMEM); metadata->MaxCLL = s->sei.content_light.max_content_light_level; metadata->MaxFALL = s->sei.content_light.max_pic_average_light_level; av_log(s->avctx, AV_LOG_DEBUG, "Content Light Level Metadata:\n"); av_log(s->avctx, AV_LOG_DEBUG, "MaxCLL=%d, MaxFALL=%d\n", metadata->MaxCLL, metadata->MaxFALL); } if (s->sei.a53_caption.a53_caption) { AVFrameSideData* sd = av_frame_new_side_data(out, AV_FRAME_DATA_A53_CC, s->sei.a53_caption.a53_caption_size); if (sd) memcpy(sd->data, s->sei.a53_caption.a53_caption, s->sei.a53_caption.a53_caption_size); av_freep(&s->sei.a53_caption.a53_caption); s->sei.a53_caption.a53_caption_size = 0; s->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS; } if (s->sei.alternative_transfer.present && av_color_transfer_name(s->sei.alternative_transfer.preferred_transfer_characteristics) && s->sei.alternative_transfer.preferred_transfer_characteristics != AVCOL_TRC_UNSPECIFIED) { s->avctx->color_trc = out->color_trc = s->sei.alternative_transfer.preferred_transfer_characteristics; } return 0; } static int hevc_frame_start(HEVCContext *s) { HEVCLocalContext *lc = s->HEVClc; int pic_size_in_ctb = ((s->ps.sps->width >> s->ps.sps->log2_min_cb_size) + 1) * ((s->ps.sps->height >> s->ps.sps->log2_min_cb_size) + 1); int ret; memset(s->horizontal_bs, 0, s->bs_width * s->bs_height); memset(s->vertical_bs, 0, s->bs_width * s->bs_height); memset(s->cbf_luma, 0, s->ps.sps->min_tb_width * s->ps.sps->min_tb_height); memset(s->is_pcm, 0, (s->ps.sps->min_pu_width + 1) * (s->ps.sps->min_pu_height + 1)); memset(s->tab_slice_address, -1, pic_size_in_ctb * sizeof(*s->tab_slice_address)); s->is_decoded = 0; s->first_nal_type = s->nal_unit_type; s->no_rasl_output_flag = IS_IDR(s) || IS_BLA(s) || (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos); if (s->ps.pps->tiles_enabled_flag) lc->end_of_tiles_x = s->ps.pps->column_width[0] << s->ps.sps->log2_ctb_size; ret = ff_hevc_set_new_ref(s, &s->frame, s->poc); if (ret < 0) goto fail; ret = ff_hevc_frame_rps(s); if (ret < 0) { av_log(s->avctx, AV_LOG_ERROR, "Error constructing the frame RPS.\n"); goto fail; } s->ref->frame->key_frame = IS_IRAP(s); ret = set_side_data(s); if (ret < 0) goto fail; s->frame->pict_type = 3 - s->sh.slice_type; if (!IS_IRAP(s)) ff_hevc_bump_frame(s); av_frame_unref(s->output_frame); ret = ff_hevc_output_frame(s, s->output_frame, 0); if (ret < 0) goto fail; if (!s->avctx->hwaccel) ff_thread_finish_setup(s->avctx); return 0; fail: if (s->ref) ff_hevc_unref_frame(s, s->ref, ~0); s->ref = NULL; return ret; } static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal) { HEVCLocalContext *lc = s->HEVClc; GetBitContext *gb = &lc->gb; int ctb_addr_ts, ret; *gb = nal->gb; s->nal_unit_type = nal->type; s->temporal_id = nal->temporal_id; switch (s->nal_unit_type) { case HEVC_NAL_VPS: if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) { ret = s->avctx->hwaccel->decode_params(s->avctx, nal->type, nal->raw_data, nal->raw_size); if (ret < 0) goto fail; } ret = ff_hevc_decode_nal_vps(gb, s->avctx, &s->ps); if (ret < 0) goto fail; break; case HEVC_NAL_SPS: if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) { ret = s->avctx->hwaccel->decode_params(s->avctx, nal->type, nal->raw_data, nal->raw_size); if (ret < 0) goto fail; } ret = ff_hevc_decode_nal_sps(gb, s->avctx, &s->ps, s->apply_defdispwin); if (ret < 0) goto fail; break; case HEVC_NAL_PPS: if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) { ret = s->avctx->hwaccel->decode_params(s->avctx, nal->type, nal->raw_data, nal->raw_size); if (ret < 0) goto fail; } ret = ff_hevc_decode_nal_pps(gb, s->avctx, &s->ps); if (ret < 0) goto fail; break; case HEVC_NAL_SEI_PREFIX: case HEVC_NAL_SEI_SUFFIX: if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) { ret = s->avctx->hwaccel->decode_params(s->avctx, nal->type, nal->raw_data, nal->raw_size); if (ret < 0) goto fail; } ret = ff_hevc_decode_nal_sei(gb, s->avctx, &s->sei, &s->ps, s->nal_unit_type); if (ret < 0) goto fail; break; case HEVC_NAL_TRAIL_R: case HEVC_NAL_TRAIL_N: case HEVC_NAL_TSA_N: case HEVC_NAL_TSA_R: case HEVC_NAL_STSA_N: case HEVC_NAL_STSA_R: case HEVC_NAL_BLA_W_LP: case HEVC_NAL_BLA_W_RADL: case HEVC_NAL_BLA_N_LP: case HEVC_NAL_IDR_W_RADL: case HEVC_NAL_IDR_N_LP: case HEVC_NAL_CRA_NUT: case HEVC_NAL_RADL_N: case HEVC_NAL_RADL_R: case HEVC_NAL_RASL_N: case HEVC_NAL_RASL_R: ret = hls_slice_header(s); if (ret < 0) return ret; if (ret == 1) { ret = AVERROR_INVALIDDATA; goto fail; } if ( (s->avctx->skip_frame >= AVDISCARD_BIDIR && s->sh.slice_type == HEVC_SLICE_B) || (s->avctx->skip_frame >= AVDISCARD_NONINTRA && s->sh.slice_type != HEVC_SLICE_I) || (s->avctx->skip_frame >= AVDISCARD_NONKEY && !IS_IRAP(s))) { break; } if (s->sh.first_slice_in_pic_flag) { if (s->max_ra == INT_MAX) { if (s->nal_unit_type == HEVC_NAL_CRA_NUT || IS_BLA(s)) { s->max_ra = s->poc; } else { if (IS_IDR(s)) s->max_ra = INT_MIN; } } if ((s->nal_unit_type == HEVC_NAL_RASL_R || s->nal_unit_type == HEVC_NAL_RASL_N) && s->poc <= s->max_ra) { s->is_decoded = 0; break; } else { if (s->nal_unit_type == HEVC_NAL_RASL_R && s->poc > s->max_ra) s->max_ra = INT_MIN; } s->overlap ++; ret = hevc_frame_start(s); if (ret < 0) return ret; } else if (!s->ref) { av_log(s->avctx, AV_LOG_ERROR, "First slice in a frame missing.\n"); goto fail; } if (s->nal_unit_type != s->first_nal_type) { av_log(s->avctx, AV_LOG_ERROR, "Non-matching NAL types of the VCL NALUs: %d %d\n", s->first_nal_type, s->nal_unit_type); return AVERROR_INVALIDDATA; } if (!s->sh.dependent_slice_segment_flag && s->sh.slice_type != HEVC_SLICE_I) { ret = ff_hevc_slice_rpl(s); if (ret < 0) { av_log(s->avctx, AV_LOG_WARNING, "Error constructing the reference lists for the current slice.\n"); goto fail; } } if (s->sh.first_slice_in_pic_flag && s->avctx->hwaccel) { ret = s->avctx->hwaccel->start_frame(s->avctx, NULL, 0); if (ret < 0) goto fail; } if (s->avctx->hwaccel) { ret = s->avctx->hwaccel->decode_slice(s->avctx, nal->raw_data, nal->raw_size); if (ret < 0) goto fail; } else { if (s->threads_number > 1 && s->sh.num_entry_point_offsets > 0) ctb_addr_ts = hls_slice_data_wpp(s, nal); else ctb_addr_ts = hls_slice_data(s); if (ctb_addr_ts >= (s->ps.sps->ctb_width * s->ps.sps->ctb_height)) { s->is_decoded = 1; } if (ctb_addr_ts < 0) { ret = ctb_addr_ts; goto fail; } } break; case HEVC_NAL_EOS_NUT: case HEVC_NAL_EOB_NUT: s->seq_decode = (s->seq_decode + 1) & 0xff; s->max_ra = INT_MAX; break; case HEVC_NAL_AUD: case HEVC_NAL_FD_NUT: break; default: av_log(s->avctx, AV_LOG_INFO, "Skipping NAL unit %d\n", s->nal_unit_type); } return 0; fail: if (s->avctx->err_recognition & AV_EF_EXPLODE) return ret; return 0; } static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length) { int i, ret = 0; int eos_at_start = 1; s->ref = NULL; s->last_eos = s->eos; s->eos = 0; s->overlap = 0; /* split the input packet into NAL units, so we know the upper bound on the * number of slices in the frame */ ret = ff_h2645_packet_split(&s->pkt, buf, length, s->avctx, s->is_nalff, s->nal_length_size, s->avctx->codec_id, 1, 0); if (ret < 0) { av_log(s->avctx, AV_LOG_ERROR, "Error splitting the input into NAL units.\n"); return ret; } for (i = 0; i < s->pkt.nb_nals; i++) { if (s->pkt.nals[i].type == HEVC_NAL_EOB_NUT || s->pkt.nals[i].type == HEVC_NAL_EOS_NUT) { if (eos_at_start) { s->last_eos = 1; } else { s->eos = 1; } } else { eos_at_start = 0; } } /* decode the NAL units */ for (i = 0; i < s->pkt.nb_nals; i++) { H2645NAL *nal = &s->pkt.nals[i]; if (s->avctx->skip_frame >= AVDISCARD_ALL || (s->avctx->skip_frame >= AVDISCARD_NONREF && ff_hevc_nal_is_nonref(nal->type))) continue; ret = decode_nal_unit(s, nal); if (ret >= 0 && s->overlap > 2) ret = AVERROR_INVALIDDATA; if (ret < 0) { av_log(s->avctx, AV_LOG_WARNING, "Error parsing NAL unit #%d.\n", i); goto fail; } } fail: if (s->ref && s->threads_type == FF_THREAD_FRAME) ff_thread_report_progress(&s->ref->tf, INT_MAX, 0); return ret; } static void print_md5(void *log_ctx, int level, uint8_t md5[16]) { int i; for (i = 0; i < 16; i++) av_log(log_ctx, level, "%02"PRIx8, md5[i]); } static int verify_md5(HEVCContext *s, AVFrame *frame) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format); int pixel_shift; int i, j; if (!desc) return AVERROR(EINVAL); pixel_shift = desc->comp[0].depth > 8; av_log(s->avctx, AV_LOG_DEBUG, "Verifying checksum for frame with POC %d: ", s->poc); /* the checksums are LE, so we have to byteswap for >8bpp formats * on BE arches */ #if HAVE_BIGENDIAN if (pixel_shift && !s->checksum_buf) { av_fast_malloc(&s->checksum_buf, &s->checksum_buf_size, FFMAX3(frame->linesize[0], frame->linesize[1], frame->linesize[2])); if (!s->checksum_buf) return AVERROR(ENOMEM); } #endif for (i = 0; frame->data[i]; i++) { int width = s->avctx->coded_width; int height = s->avctx->coded_height; int w = (i == 1 || i == 2) ? (width >> desc->log2_chroma_w) : width; int h = (i == 1 || i == 2) ? (height >> desc->log2_chroma_h) : height; uint8_t md5[16]; av_md5_init(s->md5_ctx); for (j = 0; j < h; j++) { const uint8_t *src = frame->data[i] + j * frame->linesize[i]; #if HAVE_BIGENDIAN if (pixel_shift) { s->bdsp.bswap16_buf((uint16_t *) s->checksum_buf, (const uint16_t *) src, w); src = s->checksum_buf; } #endif av_md5_update(s->md5_ctx, src, w << pixel_shift); } av_md5_final(s->md5_ctx, md5); if (!memcmp(md5, s->sei.picture_hash.md5[i], 16)) { av_log (s->avctx, AV_LOG_DEBUG, "plane %d - correct ", i); print_md5(s->avctx, AV_LOG_DEBUG, md5); av_log (s->avctx, AV_LOG_DEBUG, "; "); } else { av_log (s->avctx, AV_LOG_ERROR, "mismatching checksum of plane %d - ", i); print_md5(s->avctx, AV_LOG_ERROR, md5); av_log (s->avctx, AV_LOG_ERROR, " != "); print_md5(s->avctx, AV_LOG_ERROR, s->sei.picture_hash.md5[i]); av_log (s->avctx, AV_LOG_ERROR, "\n"); return AVERROR_INVALIDDATA; } } av_log(s->avctx, AV_LOG_DEBUG, "\n"); return 0; } static int hevc_decode_extradata(HEVCContext *s, uint8_t *buf, int length, int first) { int ret, i; ret = ff_hevc_decode_extradata(buf, length, &s->ps, &s->sei, &s->is_nalff, &s->nal_length_size, s->avctx->err_recognition, s->apply_defdispwin, s->avctx); if (ret < 0) return ret; /* export stream parameters from the first SPS */ for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) { if (first && s->ps.sps_list[i]) { const HEVCSPS *sps = (const HEVCSPS*)s->ps.sps_list[i]->data; export_stream_params(s->avctx, &s->ps, sps); break; } } return 0; } static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output, AVPacket *avpkt) { int ret; int new_extradata_size; uint8_t *new_extradata; HEVCContext *s = avctx->priv_data; if (!avpkt->size) { ret = ff_hevc_output_frame(s, data, 1); if (ret < 0) return ret; *got_output = ret; return 0; } new_extradata = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &new_extradata_size); if (new_extradata && new_extradata_size > 0) { ret = hevc_decode_extradata(s, new_extradata, new_extradata_size, 0); if (ret < 0) return ret; } s->ref = NULL; ret = decode_nal_units(s, avpkt->data, avpkt->size); if (ret < 0) return ret; if (avctx->hwaccel) { if (s->ref && (ret = avctx->hwaccel->end_frame(avctx)) < 0) { av_log(avctx, AV_LOG_ERROR, "hardware accelerator failed to decode picture\n"); ff_hevc_unref_frame(s, s->ref, ~0); return ret; } } else { /* verify the SEI checksum */ if (avctx->err_recognition & AV_EF_CRCCHECK && s->is_decoded && s->sei.picture_hash.is_md5) { ret = verify_md5(s, s->ref->frame); if (ret < 0 && avctx->err_recognition & AV_EF_EXPLODE) { ff_hevc_unref_frame(s, s->ref, ~0); return ret; } } } s->sei.picture_hash.is_md5 = 0; if (s->is_decoded) { av_log(avctx, AV_LOG_DEBUG, "Decoded frame with POC %d.\n", s->poc); s->is_decoded = 0; } if (s->output_frame->buf[0]) { av_frame_move_ref(data, s->output_frame); *got_output = 1; } return avpkt->size; } static int hevc_ref_frame(HEVCContext *s, HEVCFrame *dst, HEVCFrame *src) { int ret; ret = ff_thread_ref_frame(&dst->tf, &src->tf); if (ret < 0) return ret; dst->tab_mvf_buf = av_buffer_ref(src->tab_mvf_buf); if (!dst->tab_mvf_buf) goto fail; dst->tab_mvf = src->tab_mvf; dst->rpl_tab_buf = av_buffer_ref(src->rpl_tab_buf); if (!dst->rpl_tab_buf) goto fail; dst->rpl_tab = src->rpl_tab; dst->rpl_buf = av_buffer_ref(src->rpl_buf); if (!dst->rpl_buf) goto fail; dst->poc = src->poc; dst->ctb_count = src->ctb_count; dst->flags = src->flags; dst->sequence = src->sequence; if (src->hwaccel_picture_private) { dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf); if (!dst->hwaccel_priv_buf) goto fail; dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data; } return 0; fail: ff_hevc_unref_frame(s, dst, ~0); return AVERROR(ENOMEM); } static av_cold int hevc_decode_free(AVCodecContext *avctx) { HEVCContext *s = avctx->priv_data; int i; pic_arrays_free(s); av_freep(&s->md5_ctx); av_freep(&s->cabac_state); for (i = 0; i < 3; i++) { av_freep(&s->sao_pixel_buffer_h[i]); av_freep(&s->sao_pixel_buffer_v[i]); } av_frame_free(&s->output_frame); for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) { ff_hevc_unref_frame(s, &s->DPB[i], ~0); av_frame_free(&s->DPB[i].frame); } ff_hevc_ps_uninit(&s->ps); av_freep(&s->sh.entry_point_offset); av_freep(&s->sh.offset); av_freep(&s->sh.size); for (i = 1; i < s->threads_number; i++) { HEVCLocalContext *lc = s->HEVClcList[i]; if (lc) { av_freep(&s->HEVClcList[i]); av_freep(&s->sList[i]); } } if (s->HEVClc == s->HEVClcList[0]) s->HEVClc = NULL; av_freep(&s->HEVClcList[0]); ff_h2645_packet_uninit(&s->pkt); return 0; } static av_cold int hevc_init_context(AVCodecContext *avctx) { HEVCContext *s = avctx->priv_data; int i; s->avctx = avctx; s->HEVClc = av_mallocz(sizeof(HEVCLocalContext)); if (!s->HEVClc) goto fail; s->HEVClcList[0] = s->HEVClc; s->sList[0] = s; s->cabac_state = av_malloc(HEVC_CONTEXTS); if (!s->cabac_state) goto fail; s->output_frame = av_frame_alloc(); if (!s->output_frame) goto fail; for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) { s->DPB[i].frame = av_frame_alloc(); if (!s->DPB[i].frame) goto fail; s->DPB[i].tf.f = s->DPB[i].frame; } s->max_ra = INT_MAX; s->md5_ctx = av_md5_alloc(); if (!s->md5_ctx) goto fail; ff_bswapdsp_init(&s->bdsp); s->context_initialized = 1; s->eos = 0; ff_hevc_reset_sei(&s->sei); return 0; fail: hevc_decode_free(avctx); return AVERROR(ENOMEM); } #if HAVE_THREADS static int hevc_update_thread_context(AVCodecContext *dst, const AVCodecContext *src) { HEVCContext *s = dst->priv_data; HEVCContext *s0 = src->priv_data; int i, ret; if (!s->context_initialized) { ret = hevc_init_context(dst); if (ret < 0) return ret; } for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) { ff_hevc_unref_frame(s, &s->DPB[i], ~0); if (s0->DPB[i].frame->buf[0]) { ret = hevc_ref_frame(s, &s->DPB[i], &s0->DPB[i]); if (ret < 0) return ret; } } if (s->ps.sps != s0->ps.sps) s->ps.sps = NULL; for (i = 0; i < FF_ARRAY_ELEMS(s->ps.vps_list); i++) { av_buffer_unref(&s->ps.vps_list[i]); if (s0->ps.vps_list[i]) { s->ps.vps_list[i] = av_buffer_ref(s0->ps.vps_list[i]); if (!s->ps.vps_list[i]) return AVERROR(ENOMEM); } } for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) { av_buffer_unref(&s->ps.sps_list[i]); if (s0->ps.sps_list[i]) { s->ps.sps_list[i] = av_buffer_ref(s0->ps.sps_list[i]); if (!s->ps.sps_list[i]) return AVERROR(ENOMEM); } } for (i = 0; i < FF_ARRAY_ELEMS(s->ps.pps_list); i++) { av_buffer_unref(&s->ps.pps_list[i]); if (s0->ps.pps_list[i]) { s->ps.pps_list[i] = av_buffer_ref(s0->ps.pps_list[i]); if (!s->ps.pps_list[i]) return AVERROR(ENOMEM); } } if (s->ps.sps != s0->ps.sps) if ((ret = set_sps(s, s0->ps.sps, src->pix_fmt)) < 0) return ret; s->seq_decode = s0->seq_decode; s->seq_output = s0->seq_output; s->pocTid0 = s0->pocTid0; s->max_ra = s0->max_ra; s->eos = s0->eos; s->no_rasl_output_flag = s0->no_rasl_output_flag; s->is_nalff = s0->is_nalff; s->nal_length_size = s0->nal_length_size; s->threads_number = s0->threads_number; s->threads_type = s0->threads_type; if (s0->eos) { s->seq_decode = (s->seq_decode + 1) & 0xff; s->max_ra = INT_MAX; } s->sei.frame_packing = s0->sei.frame_packing; s->sei.display_orientation = s0->sei.display_orientation; s->sei.mastering_display = s0->sei.mastering_display; s->sei.content_light = s0->sei.content_light; s->sei.alternative_transfer = s0->sei.alternative_transfer; return 0; } #endif static av_cold int hevc_decode_init(AVCodecContext *avctx) { HEVCContext *s = avctx->priv_data; int ret; avctx->internal->allocate_progress = 1; ret = hevc_init_context(avctx); if (ret < 0) return ret; s->enable_parallel_tiles = 0; s->sei.picture_timing.picture_struct = 0; s->eos = 1; atomic_init(&s->wpp_err, 0); if(avctx->active_thread_type & FF_THREAD_SLICE) s->threads_number = avctx->thread_count; else s->threads_number = 1; if (avctx->extradata_size > 0 && avctx->extradata) { ret = hevc_decode_extradata(s, avctx->extradata, avctx->extradata_size, 1); if (ret < 0) { hevc_decode_free(avctx); return ret; } } if((avctx->active_thread_type & FF_THREAD_FRAME) && avctx->thread_count > 1) s->threads_type = FF_THREAD_FRAME; else s->threads_type = FF_THREAD_SLICE; return 0; } #if HAVE_THREADS static av_cold int hevc_init_thread_copy(AVCodecContext *avctx) { HEVCContext *s = avctx->priv_data; int ret; memset(s, 0, sizeof(*s)); ret = hevc_init_context(avctx); if (ret < 0) return ret; return 0; } #endif static void hevc_decode_flush(AVCodecContext *avctx) { HEVCContext *s = avctx->priv_data; ff_hevc_flush_dpb(s); s->max_ra = INT_MAX; s->eos = 1; } #define OFFSET(x) offsetof(HEVCContext, x) #define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM) static const AVOption options[] = { { "apply_defdispwin", "Apply default display window from VUI", OFFSET(apply_defdispwin), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR }, { "strict-displaywin", "stricly apply default display window size", OFFSET(apply_defdispwin), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR }, { NULL }, }; static const AVClass hevc_decoder_class = { .class_name = "HEVC decoder", .item_name = av_default_item_name, .option = options, .version = LIBAVUTIL_VERSION_INT, }; AVCodec ff_hevc_decoder = { .name = "hevc", .long_name = NULL_IF_CONFIG_SMALL("HEVC (High Efficiency Video Coding)"), .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_HEVC, .priv_data_size = sizeof(HEVCContext), .priv_class = &hevc_decoder_class, .init = hevc_decode_init, .close = hevc_decode_free, .decode = hevc_decode_frame, .flush = hevc_decode_flush, .update_thread_context = ONLY_IF_THREADS_ENABLED(hevc_update_thread_context), .init_thread_copy = ONLY_IF_THREADS_ENABLED(hevc_init_thread_copy), .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_FRAME_THREADS, .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_EXPORTS_CROPPING, .profiles = NULL_IF_CONFIG_SMALL(ff_hevc_profiles), .hw_configs = (const AVCodecHWConfigInternal*[]) { #if CONFIG_HEVC_DXVA2_HWACCEL HWACCEL_DXVA2(hevc), #endif #if CONFIG_HEVC_D3D11VA_HWACCEL HWACCEL_D3D11VA(hevc), #endif #if CONFIG_HEVC_D3D11VA2_HWACCEL HWACCEL_D3D11VA2(hevc), #endif #if CONFIG_HEVC_NVDEC_HWACCEL HWACCEL_NVDEC(hevc), #endif #if CONFIG_HEVC_VAAPI_HWACCEL HWACCEL_VAAPI(hevc), #endif #if CONFIG_HEVC_VDPAU_HWACCEL HWACCEL_VDPAU(hevc), #endif #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL HWACCEL_VIDEOTOOLBOX(hevc), #endif NULL }, };
static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal) { HEVCLocalContext *lc = s->HEVClc; GetBitContext *gb = &lc->gb; int ctb_addr_ts, ret; *gb = nal->gb; s->nal_unit_type = nal->type; s->temporal_id = nal->temporal_id; switch (s->nal_unit_type) { case HEVC_NAL_VPS: if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) { ret = s->avctx->hwaccel->decode_params(s->avctx, nal->type, nal->raw_data, nal->raw_size); if (ret < 0) goto fail; } ret = ff_hevc_decode_nal_vps(gb, s->avctx, &s->ps); if (ret < 0) goto fail; break; case HEVC_NAL_SPS: if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) { ret = s->avctx->hwaccel->decode_params(s->avctx, nal->type, nal->raw_data, nal->raw_size); if (ret < 0) goto fail; } ret = ff_hevc_decode_nal_sps(gb, s->avctx, &s->ps, s->apply_defdispwin); if (ret < 0) goto fail; break; case HEVC_NAL_PPS: if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) { ret = s->avctx->hwaccel->decode_params(s->avctx, nal->type, nal->raw_data, nal->raw_size); if (ret < 0) goto fail; } ret = ff_hevc_decode_nal_pps(gb, s->avctx, &s->ps); if (ret < 0) goto fail; break; case HEVC_NAL_SEI_PREFIX: case HEVC_NAL_SEI_SUFFIX: if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) { ret = s->avctx->hwaccel->decode_params(s->avctx, nal->type, nal->raw_data, nal->raw_size); if (ret < 0) goto fail; } ret = ff_hevc_decode_nal_sei(gb, s->avctx, &s->sei, &s->ps, s->nal_unit_type); if (ret < 0) goto fail; break; case HEVC_NAL_TRAIL_R: case HEVC_NAL_TRAIL_N: case HEVC_NAL_TSA_N: case HEVC_NAL_TSA_R: case HEVC_NAL_STSA_N: case HEVC_NAL_STSA_R: case HEVC_NAL_BLA_W_LP: case HEVC_NAL_BLA_W_RADL: case HEVC_NAL_BLA_N_LP: case HEVC_NAL_IDR_W_RADL: case HEVC_NAL_IDR_N_LP: case HEVC_NAL_CRA_NUT: case HEVC_NAL_RADL_N: case HEVC_NAL_RADL_R: case HEVC_NAL_RASL_N: case HEVC_NAL_RASL_R: ret = hls_slice_header(s); if (ret < 0) return ret; if ( (s->avctx->skip_frame >= AVDISCARD_BIDIR && s->sh.slice_type == HEVC_SLICE_B) || (s->avctx->skip_frame >= AVDISCARD_NONINTRA && s->sh.slice_type != HEVC_SLICE_I) || (s->avctx->skip_frame >= AVDISCARD_NONKEY && !IS_IRAP(s))) { break; } if (s->sh.first_slice_in_pic_flag) { if (s->ref) { av_log(s->avctx, AV_LOG_ERROR, "Two slices reporting being the first in the same frame.\n"); goto fail; } if (s->max_ra == INT_MAX) { if (s->nal_unit_type == HEVC_NAL_CRA_NUT || IS_BLA(s)) { s->max_ra = s->poc; } else { if (IS_IDR(s)) s->max_ra = INT_MIN; } } if ((s->nal_unit_type == HEVC_NAL_RASL_R || s->nal_unit_type == HEVC_NAL_RASL_N) && s->poc <= s->max_ra) { s->is_decoded = 0; break; } else { if (s->nal_unit_type == HEVC_NAL_RASL_R && s->poc > s->max_ra) s->max_ra = INT_MIN; } s->overlap ++; ret = hevc_frame_start(s); if (ret < 0) return ret; } else if (!s->ref) { av_log(s->avctx, AV_LOG_ERROR, "First slice in a frame missing.\n"); goto fail; } if (s->nal_unit_type != s->first_nal_type) { av_log(s->avctx, AV_LOG_ERROR, "Non-matching NAL types of the VCL NALUs: %d %d\n", s->first_nal_type, s->nal_unit_type); return AVERROR_INVALIDDATA; } if (!s->sh.dependent_slice_segment_flag && s->sh.slice_type != HEVC_SLICE_I) { ret = ff_hevc_slice_rpl(s); if (ret < 0) { av_log(s->avctx, AV_LOG_WARNING, "Error constructing the reference lists for the current slice.\n"); goto fail; } } if (s->sh.first_slice_in_pic_flag && s->avctx->hwaccel) { ret = s->avctx->hwaccel->start_frame(s->avctx, NULL, 0); if (ret < 0) goto fail; } if (s->avctx->hwaccel) { ret = s->avctx->hwaccel->decode_slice(s->avctx, nal->raw_data, nal->raw_size); if (ret < 0) goto fail; } else { if (s->threads_number > 1 && s->sh.num_entry_point_offsets > 0) ctb_addr_ts = hls_slice_data_wpp(s, nal); else ctb_addr_ts = hls_slice_data(s); if (ctb_addr_ts >= (s->ps.sps->ctb_width * s->ps.sps->ctb_height)) { s->is_decoded = 1; } if (ctb_addr_ts < 0) { ret = ctb_addr_ts; goto fail; } } break; case HEVC_NAL_EOS_NUT: case HEVC_NAL_EOB_NUT: s->seq_decode = (s->seq_decode + 1) & 0xff; s->max_ra = INT_MAX; break; case HEVC_NAL_AUD: case HEVC_NAL_FD_NUT: break; default: av_log(s->avctx, AV_LOG_INFO, "Skipping NAL unit %d\n", s->nal_unit_type); } return 0; fail: if (s->avctx->err_recognition & AV_EF_EXPLODE) return ret; return 0; }
static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal) { HEVCLocalContext *lc = s->HEVClc; GetBitContext *gb = &lc->gb; int ctb_addr_ts, ret; *gb = nal->gb; s->nal_unit_type = nal->type; s->temporal_id = nal->temporal_id; switch (s->nal_unit_type) { case HEVC_NAL_VPS: if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) { ret = s->avctx->hwaccel->decode_params(s->avctx, nal->type, nal->raw_data, nal->raw_size); if (ret < 0) goto fail; } ret = ff_hevc_decode_nal_vps(gb, s->avctx, &s->ps); if (ret < 0) goto fail; break; case HEVC_NAL_SPS: if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) { ret = s->avctx->hwaccel->decode_params(s->avctx, nal->type, nal->raw_data, nal->raw_size); if (ret < 0) goto fail; } ret = ff_hevc_decode_nal_sps(gb, s->avctx, &s->ps, s->apply_defdispwin); if (ret < 0) goto fail; break; case HEVC_NAL_PPS: if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) { ret = s->avctx->hwaccel->decode_params(s->avctx, nal->type, nal->raw_data, nal->raw_size); if (ret < 0) goto fail; } ret = ff_hevc_decode_nal_pps(gb, s->avctx, &s->ps); if (ret < 0) goto fail; break; case HEVC_NAL_SEI_PREFIX: case HEVC_NAL_SEI_SUFFIX: if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) { ret = s->avctx->hwaccel->decode_params(s->avctx, nal->type, nal->raw_data, nal->raw_size); if (ret < 0) goto fail; } ret = ff_hevc_decode_nal_sei(gb, s->avctx, &s->sei, &s->ps, s->nal_unit_type); if (ret < 0) goto fail; break; case HEVC_NAL_TRAIL_R: case HEVC_NAL_TRAIL_N: case HEVC_NAL_TSA_N: case HEVC_NAL_TSA_R: case HEVC_NAL_STSA_N: case HEVC_NAL_STSA_R: case HEVC_NAL_BLA_W_LP: case HEVC_NAL_BLA_W_RADL: case HEVC_NAL_BLA_N_LP: case HEVC_NAL_IDR_W_RADL: case HEVC_NAL_IDR_N_LP: case HEVC_NAL_CRA_NUT: case HEVC_NAL_RADL_N: case HEVC_NAL_RADL_R: case HEVC_NAL_RASL_N: case HEVC_NAL_RASL_R: ret = hls_slice_header(s); if (ret < 0) return ret; if (ret == 1) { ret = AVERROR_INVALIDDATA; goto fail; } if ( (s->avctx->skip_frame >= AVDISCARD_BIDIR && s->sh.slice_type == HEVC_SLICE_B) || (s->avctx->skip_frame >= AVDISCARD_NONINTRA && s->sh.slice_type != HEVC_SLICE_I) || (s->avctx->skip_frame >= AVDISCARD_NONKEY && !IS_IRAP(s))) { break; } if (s->sh.first_slice_in_pic_flag) { if (s->max_ra == INT_MAX) { if (s->nal_unit_type == HEVC_NAL_CRA_NUT || IS_BLA(s)) { s->max_ra = s->poc; } else { if (IS_IDR(s)) s->max_ra = INT_MIN; } } if ((s->nal_unit_type == HEVC_NAL_RASL_R || s->nal_unit_type == HEVC_NAL_RASL_N) && s->poc <= s->max_ra) { s->is_decoded = 0; break; } else { if (s->nal_unit_type == HEVC_NAL_RASL_R && s->poc > s->max_ra) s->max_ra = INT_MIN; } s->overlap ++; ret = hevc_frame_start(s); if (ret < 0) return ret; } else if (!s->ref) { av_log(s->avctx, AV_LOG_ERROR, "First slice in a frame missing.\n"); goto fail; } if (s->nal_unit_type != s->first_nal_type) { av_log(s->avctx, AV_LOG_ERROR, "Non-matching NAL types of the VCL NALUs: %d %d\n", s->first_nal_type, s->nal_unit_type); return AVERROR_INVALIDDATA; } if (!s->sh.dependent_slice_segment_flag && s->sh.slice_type != HEVC_SLICE_I) { ret = ff_hevc_slice_rpl(s); if (ret < 0) { av_log(s->avctx, AV_LOG_WARNING, "Error constructing the reference lists for the current slice.\n"); goto fail; } } if (s->sh.first_slice_in_pic_flag && s->avctx->hwaccel) { ret = s->avctx->hwaccel->start_frame(s->avctx, NULL, 0); if (ret < 0) goto fail; } if (s->avctx->hwaccel) { ret = s->avctx->hwaccel->decode_slice(s->avctx, nal->raw_data, nal->raw_size); if (ret < 0) goto fail; } else { if (s->threads_number > 1 && s->sh.num_entry_point_offsets > 0) ctb_addr_ts = hls_slice_data_wpp(s, nal); else ctb_addr_ts = hls_slice_data(s); if (ctb_addr_ts >= (s->ps.sps->ctb_width * s->ps.sps->ctb_height)) { s->is_decoded = 1; } if (ctb_addr_ts < 0) { ret = ctb_addr_ts; goto fail; } } break; case HEVC_NAL_EOS_NUT: case HEVC_NAL_EOB_NUT: s->seq_decode = (s->seq_decode + 1) & 0xff; s->max_ra = INT_MAX; break; case HEVC_NAL_AUD: case HEVC_NAL_FD_NUT: break; default: av_log(s->avctx, AV_LOG_INFO, "Skipping NAL unit %d\n", s->nal_unit_type); } return 0; fail: if (s->avctx->err_recognition & AV_EF_EXPLODE) return ret; return 0; }
{'added': [(491, ' if (s->ref && sh->first_slice_in_pic_flag) {'), (492, ' av_log(s->avctx, AV_LOG_ERROR, "Two slices reporting being the first in the same frame.\\n");'), (493, ' return 1; // This slice will be skiped later, do not corrupt state'), (494, ' }'), (495, ''), (2926, ' if (ret == 1) {'), (2927, ' ret = AVERROR_INVALIDDATA;'), (2928, ' goto fail;'), (2929, ' }'), (2930, '')], 'deleted': [(2930, ' if (s->ref) {'), (2931, ' av_log(s->avctx, AV_LOG_ERROR, "Two slices reporting being the first in the same frame.\\n");'), (2932, ' goto fail;'), (2933, ' }')]}
10
4
2,905
26,838
173
1,050
76
https://github.com/FFmpeg/FFmpeg
CVE-2019-11338
CWE-476
1,738
xdelta3-test.h
C
do_cmd
/* xdelta 3 - delta compression tools and library Copyright (C) 2001, * 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012. * Joshua P. MacDonald * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* This is public-domain Mersenne Twister code, * attributed to Michael Brundage. Thanks! * http://www.qbrundage.com/michaelb/pubs/essays/random_number_generation.html */ static const uint32_t TEST_SEED1 = 5489UL; #define MT_LEN 624 #define MT_IA 397 static const uint32_t UPPER_MASK = 0x80000000; static const uint32_t LOWER_MASK = 0x7FFFFFFF; static const uint32_t MATRIX_A = 0x9908B0DF; #ifndef SHELL_TESTS #define SHELL_TESTS 1 #endif typedef struct mtrand mtrand; struct mtrand { int mt_index_; uint32_t mt_buffer_[MT_LEN]; }; int test_compare_files (const char* tgt, const char *rec); void mt_init(mtrand *mt, uint32_t seed); uint32_t mt_random (mtrand *mt); int test_setup (void); void mt_init(mtrand *mt, uint32_t seed) { int i; mt->mt_buffer_[0] = seed; mt->mt_index_ = MT_LEN; for (i = 1; i < MT_LEN; i++) { /* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */ /* In the previous versions, MSBs of the seed affect */ /* only MSBs of the array mt[]. */ /* 2002/01/09 modified by Makoto Matsumoto */ mt->mt_buffer_[i] = (1812433253UL * (mt->mt_buffer_[i-1] ^ (mt->mt_buffer_[i-1] >> 30)) + i); } } uint32_t mt_random (mtrand *mt) { uint32_t y; unsigned long mag01[2]; mag01[0] = 0; mag01[1] = MATRIX_A; if (mt->mt_index_ >= MT_LEN) { int kk; for (kk = 0; kk < MT_LEN - MT_IA; kk++) { y = (mt->mt_buffer_[kk] & UPPER_MASK) | (mt->mt_buffer_[kk + 1] & LOWER_MASK); mt->mt_buffer_[kk] = mt->mt_buffer_[kk + MT_IA] ^ (y >> 1) ^ mag01[y & 0x1UL]; } for (;kk < MT_LEN - 1; kk++) { y = (mt->mt_buffer_[kk] & UPPER_MASK) | (mt->mt_buffer_[kk + 1] & LOWER_MASK); mt->mt_buffer_[kk] = mt->mt_buffer_[kk + (MT_IA - MT_LEN)] ^ (y >> 1) ^ mag01[y & 0x1UL]; } y = (mt->mt_buffer_[MT_LEN - 1] & UPPER_MASK) | (mt->mt_buffer_[0] & LOWER_MASK); mt->mt_buffer_[MT_LEN - 1] = mt->mt_buffer_[MT_IA - 1] ^ (y >> 1) ^ mag01[y & 0x1UL]; mt->mt_index_ = 0; } y = mt->mt_buffer_[mt->mt_index_++]; y ^= (y >> 11); y ^= (y << 7) & 0x9d2c5680UL; y ^= (y << 15) & 0xefc60000UL; y ^= (y >> 18); return y; } static mtrand static_mtrand; #include <math.h> static uint32_t mt_exp_rand (uint32_t mean, uint32_t max_value) { double mean_d = mean; double erand = log (1.0 / (mt_random (&static_mtrand) / (double)UINT32_MAX)); uint32_t x = (uint32_t) (mean_d * erand + 0.5); return min (x, max_value); } #if SHELL_TESTS #include <sys/wait.h> #endif #define MSG_IS(x) (stream->msg != NULL && strcmp ((x), stream->msg) == 0) static const usize_t TWO_MEGS_AND_DELTA = (3 << 20); static const usize_t ADDR_CACHE_ROUNDS = 10000; static const usize_t TEST_FILE_MEAN = 16384; static const double TEST_ADD_MEAN = 128; static const double TEST_ADD_MAX = 512; static const double TEST_ADD_RATIO = 0.1; static const double TEST_EPSILON = 0.25; #define TESTBUFSIZE (1024 * 16) #define TESTFILESIZE (1024) static char TEST_TARGET_FILE[TESTFILESIZE]; static char TEST_SOURCE_FILE[TESTFILESIZE]; static char TEST_DELTA_FILE[TESTFILESIZE]; static char TEST_RECON_FILE[TESTFILESIZE]; static char TEST_RECON2_FILE[TESTFILESIZE]; static char TEST_COPY_FILE[TESTFILESIZE]; static char TEST_NOPERM_FILE[TESTFILESIZE]; #define CHECK(cond) if (!(cond)) { XPR(NT "check failure: " #cond); abort(); } #if SHELL_TESTS /* Use a fixed soft config so that test values are fixed. See also * test_compress_text(). */ static const char* test_softcfg_str = "-C9,3,4,8,2,36,70"; #endif /*********************************************************************** TEST HELPERS ***********************************************************************/ static void DOT (void) { XPR(NTR "."); } static int do_cmd (xd3_stream *stream, const char *buf) { int ret; if ((ret = system (buf)) != 0) { if (WIFEXITED (ret)) { stream->msg = "command exited non-zero"; IF_DEBUG1 (XPR(NT "command was: %s\n", buf)); } else { stream->msg = "abnormal command termination"; } return XD3_INTERNAL; } return 0; } static int do_fail (xd3_stream *stream, const char *buf) { int ret; ret = system (buf); if (! WIFEXITED (ret) || WEXITSTATUS (ret) != 1) { stream->msg = "command should have not succeeded"; XPR(NT "command was %s\n", buf); return XD3_INTERNAL; } return 0; } /* Test that the exponential distribution actually produces its mean. */ static int test_random_numbers (xd3_stream *stream, int ignore) { usize_t i; usize_t sum = 0; usize_t mean = 50; usize_t n_rounds = 1000000; double average, error; double allowed_error = 0.1; mt_init (& static_mtrand, 0x9f73f7fe); for (i = 0; i < n_rounds; i += 1) { sum += mt_exp_rand (mean, USIZE_T_MAX); } average = (double) sum / (double) n_rounds; error = average - (double) mean; if (error < allowed_error && error > -allowed_error) { return 0; } /*XPR(NT "error is %f\n", error);*/ stream->msg = "random distribution looks broken"; return XD3_INTERNAL; } static void test_unlink (char* file) { int ret; if ((ret = unlink (file)) != 0 && errno != ENOENT) { XPR(NT "unlink %s failed: %s\n", file, strerror(ret)); } } static void test_cleanup (void) { #if 1 test_unlink (TEST_TARGET_FILE); test_unlink (TEST_SOURCE_FILE); test_unlink (TEST_DELTA_FILE); test_unlink (TEST_RECON_FILE); test_unlink (TEST_RECON2_FILE); test_unlink (TEST_COPY_FILE); test_unlink (TEST_NOPERM_FILE); #endif } int test_setup (void) { static int x = 0; x++; snprintf_func (TEST_TARGET_FILE, TESTFILESIZE, "/tmp/xdtest.target.%d", x); snprintf_func (TEST_SOURCE_FILE, TESTFILESIZE, "/tmp/xdtest.source.%d", x); snprintf_func (TEST_DELTA_FILE, TESTFILESIZE, "/tmp/xdtest.delta.%d", x); snprintf_func (TEST_RECON_FILE, TESTFILESIZE, "/tmp/xdtest.recon.%d", x); snprintf_func (TEST_RECON2_FILE, TESTFILESIZE, "/tmp/xdtest.recon2.%d", x); snprintf_func (TEST_COPY_FILE, TESTFILESIZE, "/tmp/xdtest.copy.%d", x); snprintf_func (TEST_NOPERM_FILE, TESTFILESIZE, "/tmp/xdtest.noperm.%d", x); test_cleanup(); return 0; } static int test_make_inputs (xd3_stream *stream, xoff_t *ss_out, xoff_t *ts_out) { usize_t ts = (mt_random (&static_mtrand) % TEST_FILE_MEAN) + TEST_FILE_MEAN / 2; usize_t ss = (mt_random (&static_mtrand) % TEST_FILE_MEAN) + TEST_FILE_MEAN / 2; uint8_t *buf = (uint8_t*) malloc (ts + ss), *sbuf = buf, *tbuf = buf + ss; usize_t sadd = 0, sadd_max = (usize_t)(ss * TEST_ADD_RATIO); FILE *tf = NULL, *sf = NULL; usize_t i, j; int ret; if (buf == NULL) { return ENOMEM; } if ((tf = fopen (TEST_TARGET_FILE, "w")) == NULL || (ss_out != NULL && (sf = fopen (TEST_SOURCE_FILE, "w")) == NULL)) { stream->msg = "write failed"; ret = get_errno (); goto failure; } if (ss_out != NULL) { for (i = 0; i < ss; ) { sbuf[i++] = (uint8_t) mt_random (&static_mtrand); } } /* Then modify the data to produce copies, everything not copied is * an add. The following logic produces the TEST_ADD_RATIO. The * variable SADD contains the number of adds so far, which should * not exceed SADD_MAX. */ /* XPR(NT "ss = %u ts = %u\n", ss, ts); */ for (i = 0; i < ts; ) { usize_t left = ts - i; usize_t next = mt_exp_rand ((uint32_t) TEST_ADD_MEAN, (uint32_t) TEST_ADD_MAX); usize_t add_left = sadd_max - sadd; double add_prob = (left == 0) ? 0 : (add_left / (double) left); int do_copy; next = min (left, next); do_copy = (next > add_left || (mt_random (&static_mtrand) / \ (double)USIZE_T_MAX) >= add_prob); if (ss_out == NULL) { do_copy &= (i > 0); } else { do_copy &= (ss - next) > 0; } if (do_copy) { /* Copy */ size_t offset = mt_random (&static_mtrand) % ((ss_out == NULL) ? i : (ss - next)); /* XPR(NT "[%u] copy %u at %u ", i, next, offset); */ for (j = 0; j < next; j += 1) { char c = ((ss_out == NULL) ? tbuf : sbuf)[offset + j]; /* XPR(NT "%x%x", (c >> 4) & 0xf, c & 0xf); */ tbuf[i++] = c; } /* XPR(NT "\n"); */ } else { /* Add */ /* XPR(NT "[%u] add %u ", i, next); */ for (j = 0; j < next; j += 1) { char c = (char) mt_random (&static_mtrand); /* XPR(NT "%x%x", (c >> 4) & 0xf, c & 0xf); */ tbuf[i++] = c; } /* XPR(NT "\n"); */ sadd += next; } } /* XPR(NT "sadd = %u max = %u\n", sadd, sadd_max); */ if ((fwrite (tbuf, 1, ts, tf) != ts) || (ss_out != NULL && (fwrite (sbuf, 1, ss, sf) != ss))) { stream->msg = "write failed"; ret = get_errno (); goto failure; } if ((ret = fclose (tf)) || (ss_out != NULL && (ret = fclose (sf)))) { stream->msg = "close failed"; ret = get_errno (); goto failure; } if (ts_out) { (*ts_out) = ts; } if (ss_out) { (*ss_out) = ss; } failure: free (buf); return ret; } int test_compare_files (const char* tgt, const char *rec) { FILE *orig, *recons; static uint8_t obuf[TESTBUFSIZE], rbuf[TESTBUFSIZE]; xoff_t offset = 0; size_t i; size_t oc, rc; xoff_t diffs = 0; if ((orig = fopen (tgt, "r")) == NULL) { XPR(NT "open %s failed\n", tgt); return get_errno (); } if ((recons = fopen (rec, "r")) == NULL) { XPR(NT "open %s failed\n", rec); return get_errno (); } for (;;) { oc = fread (obuf, 1, TESTBUFSIZE, orig); rc = fread (rbuf, 1, TESTBUFSIZE, recons); if (oc != rc) { return XD3_INTERNAL; } if (oc == 0) { break; } for (i = 0; i < oc; i += 1) { if (obuf[i] != rbuf[i]) { XPR(NT "byte %u (read %u @ %"Q"u) %d != %d\n", (int)i, (int)oc, offset, obuf[i], rbuf[i]); diffs++; return XD3_INTERNAL; } } offset += oc; } fclose (orig); fclose (recons); if (diffs != 0) { return XD3_INTERNAL; } return 0; } static int test_save_copy (const char *origname) { char buf[TESTBUFSIZE]; int ret; snprintf_func (buf, TESTBUFSIZE, "cp -f %s %s", origname, TEST_COPY_FILE); if ((ret = system (buf)) != 0) { return XD3_INTERNAL; } return 0; } static int test_file_size (const char* file, xoff_t *size) { struct stat sbuf; int ret; (*size) = 0; if (stat (file, & sbuf) < 0) { ret = get_errno (); XPR(NT "stat failed: %s: %s\n", file, strerror (ret)); return ret; } if (! S_ISREG (sbuf.st_mode)) { ret = XD3_INTERNAL; XPR(NT "not a regular file: %s: %s\n", file, strerror (ret)); return ret; } (*size) = sbuf.st_size; return 0; } /*********************************************************************** READ OFFSET ***********************************************************************/ /* Common test for read_integer errors: encodes a 64-bit value and * then attempts to read as a 32-bit value. If TRUNC is non-zero, * attempts to get errors by shortening the input, otherwise it should * overflow. Expects XD3_INTERNAL and MSG. */ static int test_read_integer_error (xd3_stream *stream, usize_t trunto, const char *msg) { uint64_t eval = 1ULL << 34; uint32_t rval; xd3_output *buf = NULL; const uint8_t *max; const uint8_t *inp; int ret; buf = xd3_alloc_output (stream, buf); if ((ret = xd3_emit_uint64_t (stream, & buf, eval))) { goto fail; } again: inp = buf->base; max = buf->base + buf->next - trunto; if ((ret = xd3_read_uint32_t (stream, & inp, max, & rval)) != XD3_INVALID_INPUT || !MSG_IS (msg)) { ret = XD3_INTERNAL; } else if (trunto && trunto < buf->next) { trunto += 1; goto again; } else { ret = 0; } fail: xd3_free_output (stream, buf); return ret; } /* Test integer overflow using the above routine. */ static int test_decode_integer_overflow (xd3_stream *stream, int unused) { return test_read_integer_error (stream, 0, "overflow in read_intger"); } /* Test integer EOI using the above routine. */ static int test_decode_integer_end_of_input (xd3_stream *stream, int unused) { return test_read_integer_error (stream, 1, "end-of-input in read_integer"); } /* Test that emit_integer/decode_integer/sizeof_integer/read_integer * work on correct inputs. Tests powers of (2^7), plus or minus, up * to the maximum value. */ #define TEST_ENCODE_DECODE_INTEGER(TYPE,ONE,MAX) \ xd3_output *rbuf = NULL; \ xd3_output *dbuf = NULL; \ TYPE values[64]; \ usize_t nvalues = 0; \ usize_t i; \ int ret = 0; \ \ for (i = 0; i < (sizeof (TYPE) * 8); i += 7) \ { \ values[nvalues++] = (ONE << i) - ONE; \ values[nvalues++] = (ONE << i); \ values[nvalues++] = (ONE << i) + ONE; \ } \ \ values[nvalues++] = MAX-ONE; \ values[nvalues++] = MAX; \ \ rbuf = xd3_alloc_output (stream, rbuf); \ dbuf = xd3_alloc_output (stream, dbuf); \ \ for (i = 0; i < nvalues; i += 1) \ { \ const uint8_t *max; \ const uint8_t *inp; \ TYPE val; \ \ DOT (); \ rbuf->next = 0; \ \ if ((ret = xd3_emit_ ## TYPE (stream, & rbuf, values[i])) || \ (ret = xd3_emit_ ## TYPE (stream, & dbuf, values[i]))) \ { \ goto fail; \ } \ \ inp = rbuf->base; \ max = rbuf->base + rbuf->next; \ \ if (rbuf->next != xd3_sizeof_ ## TYPE (values[i])) \ { \ ret = XD3_INTERNAL; \ goto fail; \ } \ \ if ((ret = xd3_read_ ## TYPE (stream, & inp, max, & val))) \ { \ goto fail; \ } \ \ if (val != values[i]) \ { \ ret = XD3_INTERNAL; \ goto fail; \ } \ \ DOT (); \ } \ \ stream->next_in = dbuf->base; \ stream->avail_in = dbuf->next; \ \ for (i = 0; i < nvalues; i += 1) \ { \ TYPE val; \ \ if ((ret = xd3_decode_ ## TYPE (stream, & val))) \ { \ goto fail; \ } \ \ if (val != values[i]) \ { \ ret = XD3_INTERNAL; \ goto fail; \ } \ } \ \ if (stream->avail_in != 0) \ { \ ret = XD3_INTERNAL; \ goto fail; \ } \ \ fail: \ xd3_free_output (stream, rbuf); \ xd3_free_output (stream, dbuf); \ \ return ret static int test_encode_decode_uint32_t (xd3_stream *stream, int unused) { TEST_ENCODE_DECODE_INTEGER(uint32_t,1U,UINT32_MAX); } static int test_encode_decode_uint64_t (xd3_stream *stream, int unused) { TEST_ENCODE_DECODE_INTEGER(uint64_t,1ULL,UINT64_MAX); } static int test_usize_t_overflow (xd3_stream *stream, int unused) { if (USIZE_T_OVERFLOW (USIZE_T_MAX, 0)) { goto fail; } if (USIZE_T_OVERFLOW (0, USIZE_T_MAX)) { goto fail; } if (USIZE_T_OVERFLOW (USIZE_T_MAX / 2, USIZE_T_MAX / 2)) { goto fail; } if (USIZE_T_OVERFLOW (USIZE_T_MAX / 2, USIZE_T_MAX / 2 + 1)) { goto fail; } if (! USIZE_T_OVERFLOW (USIZE_T_MAX, 1)) { goto fail; } if (! USIZE_T_OVERFLOW (1, USIZE_T_MAX)) { goto fail; } if (! USIZE_T_OVERFLOW (USIZE_T_MAX / 2 + 1, USIZE_T_MAX / 2 + 1)) { goto fail; } return 0; fail: stream->msg = "incorrect overflow computation"; return XD3_INTERNAL; } static int test_forward_match (xd3_stream *stream, int unused) { usize_t i; uint8_t buf1[256], buf2[256]; memset(buf1, 0, 256); memset(buf2, 0, 256); for (i = 0; i < 256; i++) { CHECK(xd3_forward_match(buf1, buf2, i) == (int)i); } for (i = 0; i < 255; i++) { buf2[i] = 1; CHECK(xd3_forward_match(buf1, buf2, 256) == (int)i); buf2[i] = 0; } return 0; } /*********************************************************************** Address cache ***********************************************************************/ static int test_address_cache (xd3_stream *stream, int unused) { int ret; usize_t i; usize_t offset; usize_t *addrs; uint8_t *big_buf, *buf_max; const uint8_t *buf; xd3_output *outp; uint8_t *modes; int mode_counts[16]; stream->acache.s_near = stream->code_table_desc->near_modes; stream->acache.s_same = stream->code_table_desc->same_modes; if ((ret = xd3_encode_init_partial (stream))) { return ret; } addrs = (usize_t*) xd3_alloc (stream, sizeof (usize_t), ADDR_CACHE_ROUNDS); modes = (uint8_t*) xd3_alloc (stream, sizeof (uint8_t), ADDR_CACHE_ROUNDS); memset (mode_counts, 0, sizeof (mode_counts)); memset (modes, 0, ADDR_CACHE_ROUNDS); addrs[0] = 0; mt_init (& static_mtrand, 0x9f73f7fc); /* First pass: encode addresses */ xd3_init_cache (& stream->acache); for (offset = 1; offset < ADDR_CACHE_ROUNDS; offset += 1) { double p; usize_t addr; usize_t prev_i; usize_t nearby; p = (mt_random (&static_mtrand) / (double)USIZE_T_MAX); prev_i = mt_random (&static_mtrand) % offset; nearby = (mt_random (&static_mtrand) % 256) % offset; nearby = max (1U, nearby); if (p < 0.1) { addr = addrs[offset-nearby]; } else if (p < 0.4) { addr = min (addrs[prev_i] + nearby, offset-1); } else { addr = prev_i; } if ((ret = xd3_encode_address (stream, addr, offset, & modes[offset]))) { return ret; } addrs[offset] = addr; mode_counts[modes[offset]] += 1; } /* Copy addresses into a contiguous buffer. */ big_buf = (uint8_t*) xd3_alloc (stream, xd3_sizeof_output (ADDR_HEAD (stream)), 1); for (offset = 0, outp = ADDR_HEAD (stream); outp != NULL; offset += outp->next, outp = outp->next_page) { memcpy (big_buf + offset, outp->base, outp->next); } buf_max = big_buf + offset; buf = big_buf; /* Second pass: decode addresses */ xd3_init_cache (& stream->acache); for (offset = 1; offset < ADDR_CACHE_ROUNDS; offset += 1) { uint32_t addr; if ((ret = xd3_decode_address (stream, offset, modes[offset], & buf, buf_max, & addr))) { return ret; } if (addr != addrs[offset]) { stream->msg = "incorrect decoded address"; return XD3_INTERNAL; } } /* Check that every byte, mode was used. */ if (buf != buf_max) { stream->msg = "address bytes not used"; return XD3_INTERNAL; } for (i = 0; i < (2 + stream->acache.s_same + stream->acache.s_near); i += 1) { if (mode_counts[i] == 0) { stream->msg = "address mode not used"; return XD3_INTERNAL; } } xd3_free (stream, modes); xd3_free (stream, addrs); xd3_free (stream, big_buf); return 0; } /*********************************************************************** Encode and decode with single bit error ***********************************************************************/ /* It compresses from 256 to around 185 bytes. * Avoids matching addresses that are a single-bit difference. * Avoids matching address 0. */ static const uint8_t test_text[] = "this is a story\n" "abouttttttttttt\n" "- his is a stor\n" "- about nothing " " all. boutique -" "his story is a -" "about " "what happens all" " the time what -" "am I ttttttt the" " person said, so" " what, per son -" " gory story is -" " about nothing -" "tttttt to test -" "his sto nothing"; static const uint8_t test_apphead[] = "header test"; static int test_compress_text (xd3_stream *stream, uint8_t *encoded, usize_t *encoded_size) { int ret; xd3_config cfg; int oflags = stream->flags; int flags = stream->flags | XD3_FLUSH; xd3_free_stream (stream); xd3_init_config (& cfg, flags); /* This configuration is fixed so that the "expected non-error" the counts in * decompress_single_bit_errors are too. See test_coftcfg_str. */ cfg.smatch_cfg = XD3_SMATCH_SOFT; cfg.smatcher_soft.name = "test"; cfg.smatcher_soft.large_look = 64; /* no source, not used */ cfg.smatcher_soft.large_step = 64; /* no source, not used */ cfg.smatcher_soft.small_look = 4; cfg.smatcher_soft.small_chain = 128; cfg.smatcher_soft.small_lchain = 16; cfg.smatcher_soft.max_lazy = 8; cfg.smatcher_soft.long_enough = 128; xd3_config_stream (stream, & cfg); (*encoded_size) = 0; xd3_set_appheader (stream, test_apphead, (usize_t) strlen ((char*) test_apphead)); if ((ret = xd3_encode_stream (stream, test_text, sizeof (test_text), encoded, encoded_size, 4*sizeof (test_text)))) { goto fail; } if ((ret = xd3_close_stream (stream))) { goto fail; } fail: xd3_free_stream (stream); xd3_init_config (& cfg, oflags); xd3_config_stream (stream, & cfg); return ret; } static int test_decompress_text (xd3_stream *stream, uint8_t *enc, usize_t enc_size, usize_t test_desize) { xd3_config cfg; char decoded[sizeof (test_text)]; uint8_t *apphead; usize_t apphead_size; usize_t decoded_size; const char *msg; int ret; usize_t pos = 0; int flags = stream->flags; usize_t take; input: /* Test decoding test_desize input bytes at a time */ take = min (enc_size - pos, test_desize); CHECK(take > 0); xd3_avail_input (stream, enc + pos, take); again: ret = xd3_decode_input (stream); pos += take; take = 0; switch (ret) { case XD3_OUTPUT: break; case XD3_WINSTART: case XD3_GOTHEADER: goto again; case XD3_INPUT: if (pos < enc_size) { goto input; } /* else fallthrough */ case XD3_WINFINISH: default: goto fail; } CHECK(ret == XD3_OUTPUT); CHECK(pos == enc_size); if (stream->avail_out != sizeof (test_text)) { stream->msg = "incorrect output size"; ret = XD3_INTERNAL; goto fail; } decoded_size = stream->avail_out; memcpy (decoded, stream->next_out, stream->avail_out); xd3_consume_output (stream); if ((ret = xd3_get_appheader (stream, & apphead, & apphead_size))) { goto fail; } if (apphead_size != strlen ((char*) test_apphead) || memcmp (apphead, test_apphead, strlen ((char*) test_apphead)) != 0) { stream->msg = "incorrect appheader"; ret = XD3_INTERNAL; goto fail; } if ((ret = xd3_decode_input (stream)) != XD3_WINFINISH || (ret = xd3_close_stream (stream)) != 0) { goto fail; } if (decoded_size != sizeof (test_text) || memcmp (decoded, test_text, sizeof (test_text)) != 0) { stream->msg = "incorrect output text"; ret = EIO; } fail: msg = stream->msg; xd3_free_stream (stream); xd3_init_config (& cfg, flags); xd3_config_stream (stream, & cfg); stream->msg = msg; return ret; } static int test_decompress_single_bit_error (xd3_stream *stream, int expected_non_failures) { int ret; usize_t i; uint8_t encoded[4*sizeof (test_text)]; /* make room for alt code table */ usize_t encoded_size; int non_failures = 0; int cksum = (stream->flags & XD3_ADLER32) != 0; //#define DEBUG_TEST_FAILURES #ifndef DEBUG_TEST_FAILURES #define TEST_FAILURES() #else /* For checking non-failure cases by hand, enable this macro and run * xdelta printdelta with print_cpymode disabled. Every non-failure * should change a copy address mode, which doesn't cause a failure * because the address cache starts out with all zeros. ./xdelta3 test for i in test_text.xz.*; do ./xdelta3 printdelta $i > $i.out; diff $i.out test_text.xz.0.out; done */ system ("rm -rf test_text.*"); { char buf[TESTBUFSIZE]; FILE *f; snprintf_func (buf, TESTBUFSIZE, "test_text"); f = fopen (buf, "w"); fwrite (test_text,1,sizeof (test_text),f); fclose (f); } #define TEST_FAILURES() \ do { \ char buf[TESTBUFSIZE]; \ FILE *f; \ snprintf_func (buf, TESTBUFSIZE, "test_text.xz.%d", non_failures); \ f = fopen (buf, "w"); \ fwrite (encoded,1,encoded_size,f); \ fclose (f); \ } while (0) #endif stream->sec_data.inefficient = 1; stream->sec_inst.inefficient = 1; stream->sec_addr.inefficient = 1; /* Encode text, test correct input */ if ((ret = test_compress_text (stream, encoded, & encoded_size))) { /*stream->msg = "without error: encode failure";*/ return ret; } if ((ret = test_decompress_text (stream, encoded, encoded_size, sizeof (test_text) / 4))) { /*stream->msg = "without error: decode failure";*/ return ret; } TEST_FAILURES(); for (i = 0; i < encoded_size*8; i += 1) { /* Single bit error. */ encoded[i/8] ^= 1 << (i%8); if ((ret = test_decompress_text (stream, encoded, encoded_size, sizeof (test_text))) == 0) { non_failures += 1; #ifdef DEBUG_TEST_FAILURES XPR(NT "%u[%u] non-failure %u\n", i/8, i%8, non_failures); #endif TEST_FAILURES(); } else { /*XPR(NT "%u[%u] failure: %s\n", i/8, i%8, stream->msg);*/ } /* decompress_text returns EIO when the final memcmp() fails, but that * should never happen with checksumming on. */ if (cksum && ret == EIO) { /*XPR(NT "%u[%u] cksum mismatch\n", i/8, i%8);*/ stream->msg = "checksum mismatch"; return XD3_INTERNAL; } /* Undo single bit error. */ encoded[i/8] ^= 1 << (i%8); } /* Test correct input again */ if ((ret = test_decompress_text (stream, encoded, encoded_size, 1))) { /*stream->msg = "without error: decode failure";*/ return ret; } /* Check expected non-failures */ if (non_failures != expected_non_failures) { XPR(NT "non-failures %u; expected %u", non_failures, expected_non_failures); stream->msg = "incorrect"; return XD3_INTERNAL; } DOT (); return 0; } /*********************************************************************** Secondary compression tests ***********************************************************************/ #if SECONDARY_ANY typedef int (*sec_dist_func) (xd3_stream *stream, xd3_output *data); static int sec_dist_func1 (xd3_stream *stream, xd3_output *data); static int sec_dist_func2 (xd3_stream *stream, xd3_output *data); static int sec_dist_func3 (xd3_stream *stream, xd3_output *data); static int sec_dist_func4 (xd3_stream *stream, xd3_output *data); static int sec_dist_func5 (xd3_stream *stream, xd3_output *data); static int sec_dist_func6 (xd3_stream *stream, xd3_output *data); static int sec_dist_func7 (xd3_stream *stream, xd3_output *data); static int sec_dist_func8 (xd3_stream *stream, xd3_output *data); static int sec_dist_func9 (xd3_stream *stream, xd3_output *data); static int sec_dist_func10 (xd3_stream *stream, xd3_output *data); static int sec_dist_func11 (xd3_stream *stream, xd3_output *data); static sec_dist_func sec_dists[] = { sec_dist_func1, sec_dist_func2, sec_dist_func3, sec_dist_func4, sec_dist_func5, sec_dist_func6, sec_dist_func7, sec_dist_func8, sec_dist_func9, sec_dist_func10, sec_dist_func11, }; /* Test ditsribution: 100 bytes of the same character (13). */ static int sec_dist_func1 (xd3_stream *stream, xd3_output *data) { int i, ret; for (i = 0; i < 100; i += 1) { if ((ret = xd3_emit_byte (stream, & data, 13))) { return ret; } } return 0; } /* Test ditsribution: uniform covering half the alphabet. */ static int sec_dist_func2 (xd3_stream *stream, xd3_output *data) { int i, ret; for (i = 0; i < ALPHABET_SIZE; i += 1) { if ((ret = xd3_emit_byte (stream, & data, i%(ALPHABET_SIZE/2)))) { return ret; } } return 0; } /* Test ditsribution: uniform covering the entire alphabet. */ static int sec_dist_func3 (xd3_stream *stream, xd3_output *data) { int i, ret; for (i = 0; i < ALPHABET_SIZE; i += 1) { if ((ret = xd3_emit_byte (stream, & data, i%ALPHABET_SIZE))) { return ret; } } return 0; } /* Test distribution: An exponential distribution covering half the alphabet */ static int sec_dist_func4 (xd3_stream *stream, xd3_output *data) { int i, ret, x; for (i = 0; i < ALPHABET_SIZE*20; i += 1) { x = mt_exp_rand (10, ALPHABET_SIZE/2); if ((ret = xd3_emit_byte (stream, & data, x))) { return ret; } } return 0; } /* Test distribution: An exponential distribution covering the entire alphabet */ static int sec_dist_func5 (xd3_stream *stream, xd3_output *data) { int i, ret, x; for (i = 0; i < ALPHABET_SIZE*20; i += 1) { x = mt_exp_rand (10, ALPHABET_SIZE-1); if ((ret = xd3_emit_byte (stream, & data, x))) { return ret; } } return 0; } /* Test distribution: An uniform random distribution covering half the alphabet */ static int sec_dist_func6 (xd3_stream *stream, xd3_output *data) { int i, ret, x; for (i = 0; i < ALPHABET_SIZE*20; i += 1) { x = mt_random (&static_mtrand) % (ALPHABET_SIZE/2); if ((ret = xd3_emit_byte (stream, & data, x))) { return ret; } } return 0; } /* Test distribution: An uniform random distribution covering the entire alphabet */ static int sec_dist_func7 (xd3_stream *stream, xd3_output *data) { int i, ret, x; for (i = 0; i < ALPHABET_SIZE*200; i += 1) { x = mt_random (&static_mtrand) % ALPHABET_SIZE; if ((ret = xd3_emit_byte (stream, & data, x))) { return ret; } } return 0; } /* Test distribution: A small number of frequent characters, difficult * to divide into many groups */ static int sec_dist_func8 (xd3_stream *stream, xd3_output *data) { int i, ret; for (i = 0; i < ALPHABET_SIZE*5; i += 1) { if ((ret = xd3_emit_byte (stream, & data, 0))) { return ret; } if ((ret = xd3_emit_byte (stream, & data, 64))) { return ret; } if ((ret = xd3_emit_byte (stream, & data, 128))) { return ret; } if ((ret = xd3_emit_byte (stream, & data, 255))) { return ret; } } return 0; } /* Test distribution: One that causes many FGK block promotions (found a bug) */ static int sec_dist_func9 (xd3_stream *stream, xd3_output *data) { int i, ret; int ramp = 0; int rcount = 0; int prom = 0; int pcount = 0; /* 200 was long enough to trigger it--only when stricter checking * that counted all blocks was turned on, but it seems I deleted * this code. (missing fgk_free_block on line 398). */ for (i = 0; i < ALPHABET_SIZE*200; i += 1) { repeat: if (ramp < ALPHABET_SIZE) { /* Initially Nth symbol has (N+1) frequency */ if (rcount <= ramp) { rcount += 1; if ((ret = xd3_emit_byte (stream, & data, ramp))) { return ret; } continue; } ramp += 1; rcount = 0; goto repeat; } /* Thereafter, promote least freq to max freq */ if (pcount == ALPHABET_SIZE) { pcount = 0; prom = (prom + 1) % ALPHABET_SIZE; } pcount += 1; if ((ret = xd3_emit_byte (stream, & data, prom))) { return ret; } } return 0; } /* Test distribution: freq[i] == i*i, creates a 21-bit code length, fixed in 3.0r. */ static int sec_dist_func10 (xd3_stream *stream, xd3_output *data) { int i, j, ret; for (i = 0; i < ALPHABET_SIZE; i += 1) { for (j = 0; j <= (i*i); j += 1) { if ((ret = xd3_emit_byte (stream, & data, i))) { return ret; } } } return 0; } /* Test distribution: fibonacci */ static int sec_dist_func11 (xd3_stream *stream, xd3_output *data) { int sum0 = 0; int sum1 = 1; int i, j, ret; for (i = 0; i < 33; ++i) { for (j = 0; j < (sum0 + sum1); ++j) { if ((ret = xd3_emit_byte (stream, & data, i))) { return ret; } } sum0 = sum1; sum1 = j; } return 0; } static int test_secondary_decode (xd3_stream *stream, const xd3_sec_type *sec, usize_t input_size, usize_t compress_size, const uint8_t *dec_input, const uint8_t *dec_correct, uint8_t *dec_output) { int ret; xd3_sec_stream *dec_stream; const uint8_t *dec_input_used, *dec_input_end; uint8_t *dec_output_used, *dec_output_end; if ((dec_stream = sec->alloc (stream)) == NULL) { return ENOMEM; } if ((ret = sec->init (stream, dec_stream, 0)) != 0) { goto fail; } dec_input_used = dec_input; dec_input_end = dec_input + compress_size; dec_output_used = dec_output; dec_output_end = dec_output + input_size; if ((ret = sec->decode (stream, dec_stream, & dec_input_used, dec_input_end, & dec_output_used, dec_output_end))) { goto fail; } if (dec_input_used != dec_input_end) { stream->msg = "unused input"; ret = XD3_INTERNAL; goto fail; } if (dec_output_used != dec_output_end) { stream->msg = "unfinished output"; ret = XD3_INTERNAL; goto fail; } if (memcmp (dec_output, dec_correct, input_size) != 0) { stream->msg = "incorrect output"; ret = XD3_INTERNAL; goto fail; } fail: sec->destroy (stream, dec_stream); return ret; } static int test_secondary (xd3_stream *stream, const xd3_sec_type *sec, usize_t groups) { usize_t test_i; int ret; xd3_output *in_head, *out_head, *p; usize_t p_off, input_size, compress_size; uint8_t *dec_input = NULL, *dec_output = NULL, *dec_correct = NULL; xd3_sec_stream *enc_stream; xd3_sec_cfg cfg; memset (& cfg, 0, sizeof (cfg)); cfg.inefficient = 1; for (cfg.ngroups = 1; cfg.ngroups <= groups; cfg.ngroups += 1) { XPR(NTR "\n..."); for (test_i = 0; test_i < SIZEOF_ARRAY (sec_dists); test_i += 1) { mt_init (& static_mtrand, 0x9f73f7fc); in_head = xd3_alloc_output (stream, NULL); out_head = xd3_alloc_output (stream, NULL); enc_stream = sec->alloc (stream); dec_input = NULL; dec_output = NULL; dec_correct = NULL; if (in_head == NULL || out_head == NULL || enc_stream == NULL) { goto nomem; } if ((ret = sec_dists[test_i] (stream, in_head))) { goto fail; } if ((ret = sec->init (stream, enc_stream, 1)) != 0) { goto fail; } /* Encode data */ if ((ret = sec->encode (stream, enc_stream, in_head, out_head, & cfg))) { XPR(NT "test %u: encode: %s", test_i, stream->msg); goto fail; } /* Calculate sizes, allocate contiguous arrays for decoding */ input_size = xd3_sizeof_output (in_head); compress_size = xd3_sizeof_output (out_head); XPR(NTR "%.3f", 8.0 * (double) compress_size / (double) input_size); if ((dec_input = (uint8_t*) xd3_alloc (stream, compress_size, 1)) == NULL || (dec_output = (uint8_t*) xd3_alloc (stream, input_size, 1)) == NULL || (dec_correct = (uint8_t*) xd3_alloc (stream, input_size, 1)) == NULL) { goto nomem; } /* Fill the compressed data array */ for (p_off = 0, p = out_head; p != NULL; p_off += p->next, p = p->next_page) { memcpy (dec_input + p_off, p->base, p->next); } CHECK(p_off == compress_size); /* Fill the input data array */ for (p_off = 0, p = in_head; p != NULL; p_off += p->next, p = p->next_page) { memcpy (dec_correct + p_off, p->base, p->next); } CHECK(p_off == input_size); if ((ret = test_secondary_decode (stream, sec, input_size, compress_size, dec_input, dec_correct, dec_output))) { XPR(NT "test %u: decode: %s", test_i, stream->msg); goto fail; } /* Single-bit error test, only cover the first 10 bytes. * Some non-failures are expected in the Huffman case: * Changing the clclen array, for example, may not harm the * decoding. Really looking for faults here. */ { int i; int bytes = min (compress_size, 10U); for (i = 0; i < bytes * 8; i += 1) { dec_input[i/8] ^= 1 << (i%8); if ((ret = test_secondary_decode (stream, sec, input_size, compress_size, dec_input, dec_correct, dec_output)) == 0) { /*XPR(NT "test %u: decode single-bit [%u/%u] error non-failure", test_i, i/8, i%8);*/ } dec_input[i/8] ^= 1 << (i%8); if ((i % (2*bytes)) == (2*bytes)-1) { DOT (); } } ret = 0; } if (0) { nomem: ret = ENOMEM; } fail: sec->destroy (stream, enc_stream); xd3_free_output (stream, in_head); xd3_free_output (stream, out_head); xd3_free (stream, dec_input); xd3_free (stream, dec_output); xd3_free (stream, dec_correct); if (ret != 0) { return ret; } } } return 0; } IF_FGK (static int test_secondary_fgk (xd3_stream *stream, usize_t gp) { return test_secondary (stream, & fgk_sec_type, gp); }) IF_DJW (static int test_secondary_huff (xd3_stream *stream, usize_t gp) { return test_secondary (stream, & djw_sec_type, gp); }) IF_LZMA (static int test_secondary_lzma (xd3_stream *stream, usize_t gp) { return test_secondary (stream, & lzma_sec_type, gp); }) #endif /*********************************************************************** TEST INSTRUCTION TABLE ***********************************************************************/ /* Test that xd3_choose_instruction() does the right thing for its code * table. */ static int test_choose_instruction (xd3_stream *stream, int ignore) { int i; stream->code_table = (*stream->code_table_func) (); for (i = 0; i < 256; i += 1) { const xd3_dinst *d = stream->code_table + i; xd3_rinst prev, inst; CHECK(d->type1 > 0); memset (& prev, 0, sizeof (prev)); memset (& inst, 0, sizeof (inst)); if (d->type2 == 0) { inst.type = d->type1; if ((inst.size = d->size1) == 0) { inst.size = TESTBUFSIZE; } XD3_CHOOSE_INSTRUCTION (stream, NULL, & inst); if (inst.code2 != 0 || inst.code1 != i) { stream->msg = "wrong single instruction"; return XD3_INTERNAL; } } else { prev.type = d->type1; prev.size = d->size1; inst.type = d->type2; inst.size = d->size2; XD3_CHOOSE_INSTRUCTION (stream, & prev, & inst); if (prev.code2 != i) { stream->msg = "wrong double instruction"; return XD3_INTERNAL; } } } return 0; } /*********************************************************************** TEST INSTRUCTION TABLE CODING ***********************************************************************/ #if GENERIC_ENCODE_TABLES /* Test that encoding and decoding a code table works */ static int test_encode_code_table (xd3_stream *stream, int ignore) { int ret; const uint8_t *comp_data; usize_t comp_size; if ((ret = xd3_compute_alternate_table_encoding (stream, & comp_data, & comp_size))) { return ret; } stream->acache.s_near = __alternate_code_table_desc.near_modes; stream->acache.s_same = __alternate_code_table_desc.same_modes; if ((ret = xd3_apply_table_encoding (stream, comp_data, comp_size))) { return ret; } if (memcmp (stream->code_table, xd3_alternate_code_table (), sizeof (xd3_dinst) * 256) != 0) { stream->msg = "wrong code table reconstruction"; return XD3_INTERNAL; } return 0; } #endif /*********************************************************************** 64BIT STREAMING ***********************************************************************/ /* This test encodes and decodes a series of 1 megabyte windows, each * containing a long run of zeros along with a single xoff_t size * record to indicate the sequence. */ static int test_streaming (xd3_stream *in_stream, uint8_t *encbuf, uint8_t *decbuf, uint8_t *delbuf, usize_t megs) { xd3_stream estream, dstream; int ret; usize_t i, delsize, decsize; xd3_config cfg; xd3_init_config (& cfg, in_stream->flags); cfg.flags |= XD3_COMPLEVEL_6; if ((ret = xd3_config_stream (& estream, & cfg)) || (ret = xd3_config_stream (& dstream, & cfg))) { goto fail; } for (i = 0; i < megs; i += 1) { ((usize_t*) encbuf)[0] = i; if ((i % 200) == 199) { DOT (); } if ((ret = xd3_process_stream (1, & estream, xd3_encode_input, 0, encbuf, 1 << 20, delbuf, & delsize, 1 << 20))) { in_stream->msg = estream.msg; goto fail; } if ((ret = xd3_process_stream (0, & dstream, xd3_decode_input, 0, delbuf, delsize, decbuf, & decsize, 1 << 20))) { in_stream->msg = dstream.msg; goto fail; } if (decsize != 1 << 20 || memcmp (encbuf, decbuf, 1 << 20) != 0) { in_stream->msg = "wrong result"; ret = XD3_INTERNAL; goto fail; } } if ((ret = xd3_close_stream (& estream)) || (ret = xd3_close_stream (& dstream))) { goto fail; } fail: xd3_free_stream (& estream); xd3_free_stream (& dstream); return ret; } /* Run tests of data streaming of over and around 4GB of data. */ static int test_compressed_stream_overflow (xd3_stream *stream, int ignore) { int ret; int i; uint8_t *buf; if ((buf = (uint8_t*) malloc (TWO_MEGS_AND_DELTA)) == NULL) { return ENOMEM; } memset (buf, 0, TWO_MEGS_AND_DELTA); for (i = 0; i < (2 << 20); i += 256) { int j; int off = mt_random(& static_mtrand) % 10; for (j = 0; j < 256; j++) { buf[i + j] = j + off; } } /* Test overflow of a 32-bit file offset. */ if (SIZEOF_XOFF_T == 4) { ret = test_streaming (stream, buf, buf + (1 << 20), buf + (2 << 20), (1 << 12) + 1); if (ret == XD3_INVALID_INPUT && MSG_IS ("decoder file offset overflow")) { ret = 0; } else { XPR(NT XD3_LIB_ERRMSG (stream, ret)); stream->msg = "expected overflow condition"; ret = XD3_INTERNAL; goto fail; } } /* Test transfer of exactly 32bits worth of data. */ if ((ret = test_streaming (stream, buf, buf + (1 << 20), buf + (2 << 20), 1 << 12))) { goto fail; } fail: free (buf); return ret; } /*********************************************************************** COMMAND LINE ***********************************************************************/ #if SHELL_TESTS /* For each pair of command templates in the array below, test that * encoding and decoding commands work. Also check for the expected * size delta, which should be approximately TEST_ADD_RATIO times the * file size created by test_make_inputs. Due to differences in the * application header, it is suppressed (-A) so that all delta files * are the same. */ static int test_command_line_arguments (xd3_stream *stream, int ignore) { int i, ret; static const char* cmdpairs[] = { /* standard input, output */ "%s %s -A < %s > %s", "%s -d < %s > %s", "%s %s -A -e < %s > %s", "%s -d < %s > %s", "%s %s -A= encode < %s > %s", "%s decode < %s > %s", "%s %s -A -q encode < %s > %s", "%s -qdq < %s > %s", /* file input, standard output */ "%s %s -A= %s > %s", "%s -d %s > %s", "%s %s -A -e %s > %s", "%s -d %s > %s", "%s %s encode -A= %s > %s", "%s decode %s > %s", /* file input, output */ "%s %s -A= %s %s", "%s -d %s %s", "%s %s -A -e %s %s", "%s -d %s %s", "%s %s -A= encode %s %s", "%s decode %s %s", /* option placement */ "%s %s -A -f %s %s", "%s -f -d %s %s", "%s %s -e -A= %s %s", "%s -d -f %s %s", "%s %s -f encode -A= %s %s", "%s -f decode -f %s %s", }; char ecmd[TESTBUFSIZE], dcmd[TESTBUFSIZE]; int pairs = SIZEOF_ARRAY (cmdpairs) / 2; xoff_t tsize; xoff_t dsize; double ratio; mt_init (& static_mtrand, 0x9f73f7fc); for (i = 0; i < pairs; i += 1) { test_setup (); if ((ret = test_make_inputs (stream, NULL, & tsize))) { return ret; } snprintf_func (ecmd, TESTBUFSIZE, cmdpairs[2*i], program_name, test_softcfg_str, TEST_TARGET_FILE, TEST_DELTA_FILE); snprintf_func (dcmd, TESTBUFSIZE, cmdpairs[2*i+1], program_name, TEST_DELTA_FILE, TEST_RECON_FILE); /* Encode and decode. */ if ((ret = system (ecmd)) != 0) { XPR(NT "encode command: %s\n", ecmd); stream->msg = "encode cmd failed"; return XD3_INTERNAL; } if ((ret = system (dcmd)) != 0) { XPR(NT "decode command: %s\n", dcmd); stream->msg = "decode cmd failed"; return XD3_INTERNAL; } /* Compare the target file. */ if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_RECON_FILE))) { return ret; } if ((ret = test_file_size (TEST_DELTA_FILE, & dsize))) { return ret; } ratio = (double) dsize / (double) tsize; /* Check that it is not too small, not too large. */ if (ratio >= TEST_ADD_RATIO + TEST_EPSILON) { XPR(NT "test encode with size ratio %.4f, " "expected < %.4f (%"Q"u, %"Q"u)\n", ratio, TEST_ADD_RATIO + TEST_EPSILON, dsize, tsize); stream->msg = "strange encoding"; return XD3_INTERNAL; } if (ratio <= TEST_ADD_RATIO * (1.0 - 2 * TEST_EPSILON)) { XPR(NT "test encode with size ratio %.4f, " "expected > %.4f\n", ratio, TEST_ADD_RATIO - TEST_EPSILON); stream->msg = "strange encoding"; return XD3_INTERNAL; } /* Also check that test_compare_files works. The delta and original should * not be identical. */ if ((ret = test_compare_files (TEST_DELTA_FILE, TEST_TARGET_FILE)) == 0) { stream->msg = "broken test_compare_files"; return XD3_INTERNAL; } test_cleanup (); DOT (); } return 0; } static int check_vcdiff_header (xd3_stream *stream, const char *input, const char *line_start, const char *matches, int yes_or_no) { int ret; char vcmd[TESTBUFSIZE], gcmd[TESTBUFSIZE]; snprintf_func (vcmd, TESTBUFSIZE, "%s printhdr -f %s %s", program_name, input, TEST_RECON2_FILE); if ((ret = system (vcmd)) != 0) { XPR(NT "printhdr command: %s\n", vcmd); stream->msg = "printhdr cmd failed"; return XD3_INTERNAL; } snprintf_func (gcmd, TESTBUFSIZE, "grep \"%s.*%s.*\" %s > /dev/null", line_start, matches, TEST_RECON2_FILE); if (yes_or_no) { if ((ret = do_cmd (stream, gcmd))) { XPR(NT "%s\n", gcmd); return ret; } } else { if ((ret = do_fail (stream, gcmd))) { XPR(NT "%s\n", gcmd); return ret; } } return 0; } static int test_recode_command2 (xd3_stream *stream, int has_source, int variant, int change) { int has_adler32 = (variant & 0x1) != 0; int has_apphead = (variant & 0x2) != 0; int has_secondary = (variant & 0x4) != 0; int change_adler32 = (change & 0x1) != 0; int change_apphead = (change & 0x2) != 0; int change_secondary = (change & 0x4) != 0; int recoded_adler32 = change_adler32 ? !has_adler32 : has_adler32; int recoded_apphead = change_apphead ? !has_apphead : has_apphead; int recoded_secondary = change_secondary ? !has_secondary : has_secondary; char ecmd[TESTBUFSIZE], recmd[TESTBUFSIZE], dcmd[TESTBUFSIZE]; xoff_t tsize, ssize; int ret; test_setup (); if ((ret = test_make_inputs (stream, has_source ? & ssize : NULL, & tsize))) { return ret; } /* First encode */ snprintf_func (ecmd, TESTBUFSIZE, "%s %s -f %s %s %s %s %s %s %s", program_name, test_softcfg_str, has_adler32 ? "" : "-n ", has_apphead ? "-A=encode_apphead " : "-A= ", has_secondary ? "-S djw " : "-S none ", has_source ? "-s " : "", has_source ? TEST_SOURCE_FILE : "", TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = system (ecmd)) != 0) { XPR(NT "encode command: %s\n", ecmd); stream->msg = "encode cmd failed"; return XD3_INTERNAL; } /* Now recode */ snprintf_func (recmd, TESTBUFSIZE, "%s recode %s -f %s %s %s %s %s", program_name, test_softcfg_str, recoded_adler32 ? "" : "-n ", !change_apphead ? "" : (recoded_apphead ? "-A=recode_apphead " : "-A= "), recoded_secondary ? "-S djw " : "-S none ", TEST_DELTA_FILE, TEST_COPY_FILE); if ((ret = system (recmd)) != 0) { XPR(NT "recode command: %s\n", recmd); stream->msg = "recode cmd failed"; return XD3_INTERNAL; } /* Check recode changes. */ if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF window indicator", "VCD_SOURCE", has_source))) { return ret; } if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF header indicator", "VCD_SECONDARY", recoded_secondary))) { return ret; } if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF window indicator", "VCD_ADLER32", /* Recode can't generate an adler32 * checksum, it can only preserve it or * remove it. */ has_adler32 && recoded_adler32))) { return ret; } if (!change_apphead) { if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF header indicator", "VCD_APPHEADER", has_apphead))) { return ret; } if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF application header", "encode_apphead", has_apphead))) { return ret; } } else { if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF header indicator", "VCD_APPHEADER", recoded_apphead))) { return ret; } if (recoded_apphead && (ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF application header", "recode_apphead", 1))) { return ret; } } /* Now decode */ snprintf_func (dcmd, TESTBUFSIZE, "%s -fd %s %s %s %s ", program_name, has_source ? "-s " : "", has_source ? TEST_SOURCE_FILE : "", TEST_COPY_FILE, TEST_RECON_FILE); if ((ret = system (dcmd)) != 0) { XPR(NT "decode command: %s\n", dcmd); stream->msg = "decode cmd failed"; return XD3_INTERNAL; } /* Now compare. */ if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_RECON_FILE))) { return ret; } return 0; } static int test_recode_command (xd3_stream *stream, int ignore) { /* Things to test: * - with and without a source file (recode does not change) * * (recode may or may not change -- 8 variations) * - with and without adler32 * - with and without app header * - with and without secondary */ int has_source; int variant; int change; int ret; for (has_source = 0; has_source < 2; has_source++) { for (variant = 0; variant < 8; variant++) { for (change = 0; change < 8; change++) { if ((ret = test_recode_command2 (stream, has_source, variant, change))) { return ret; } } DOT (); } } return 0; } #endif /*********************************************************************** EXTERNAL I/O DECOMPRESSION/RECOMPRESSION ***********************************************************************/ #if EXTERNAL_COMPRESSION /* This performs one step of the test_externally_compressed_io * function described below. It builds a pipe containing both Xdelta * and external compression/decompression that should not modify the * data passing through. */ static int test_compressed_pipe (xd3_stream *stream, main_extcomp *ext, char* buf, const char* comp_options, const char* decomp_options, int do_ext_recomp, const char* msg) { int ret; char decomp_buf[TESTBUFSIZE]; if (do_ext_recomp) { snprintf_func (decomp_buf, TESTBUFSIZE, " | %s %s", ext->decomp_cmdname, ext->decomp_options); } else { decomp_buf[0] = 0; } snprintf_func (buf, TESTBUFSIZE, "%s %s < %s | %s %s | %s %s%s > %s", ext->recomp_cmdname, ext->recomp_options, TEST_TARGET_FILE, program_name, comp_options, program_name, decomp_options, decomp_buf, TEST_RECON_FILE); if ((ret = system (buf)) != 0) { stream->msg = msg; return XD3_INTERNAL; } if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_RECON_FILE))) { return XD3_INTERNAL; } DOT (); return 0; } /* We want to test that a pipe such as: * * --> | gzip -cf | xdelta3 -cf | xdelta3 -dcf | gzip -dcf | --> * * is transparent, i.e., does not modify the stream of data. However, * we also want to verify that at the center the data is properly * compressed, i.e., that we do not just have a re-compressed gzip * format, that we have an VCDIFF format. We do this in two steps. * First test the above pipe, then test with suppressed output * recompression (-D). The result should be the original input: * * --> | gzip -cf | xdelta3 -cf | xdelta3 -Ddcf | --> * * Finally we want to test that -D also disables input decompression: * * --> | gzip -cf | xdelta3 -Dcf | xdelta3 -Ddcf | gzip -dcf | --> */ static int test_externally_compressed_io (xd3_stream *stream, int ignore) { usize_t i; int ret; char buf[TESTBUFSIZE]; mt_init (& static_mtrand, 0x9f73f7fc); if ((ret = test_make_inputs (stream, NULL, NULL))) { return ret; } for (i = 0; i < SIZEOF_ARRAY (extcomp_types); i += 1) { main_extcomp *ext = & extcomp_types[i]; /* Test for the existence of the external command first, if not skip. */ snprintf_func (buf, TESTBUFSIZE, "%s %s < /dev/null > /dev/null", ext->recomp_cmdname, ext->recomp_options); if ((ret = system (buf)) != 0) { XPR(NT "%s=0", ext->recomp_cmdname); continue; } if ((ret = test_compressed_pipe (stream, ext, buf, "-cfq", "-dcfq", 1, "compression failed: identity pipe")) || (ret = test_compressed_pipe (stream, ext, buf, "-cfq", "-Rdcfq", 0, "compression failed: without recompression")) || (ret = test_compressed_pipe (stream, ext, buf, "-Dcfq", "-Rdcfq", 1, "compression failed: without decompression"))) { return ret; } } return 0; } /* This tests the proper functioning of external decompression for * source files. The source and target files are identical and * compressed by gzip. Decoding such a delta with recompression * disbaled (-R) should produce the original, uncompressed * source/target file. Then it checks with output recompression * enabled--in this case the output should be a compressed copy of the * original source/target file. Then it checks that encoding with * decompression disabled works--the compressed files are identical * and decoding them should always produce a compressed output, * regardless of -R since the encoded delta file had decompression * disabled.. */ static int test_source_decompression (xd3_stream *stream, int ignore) { int ret; char buf[TESTBUFSIZE]; const main_extcomp *ext; xoff_t dsize; mt_init (& static_mtrand, 0x9f73f7fc); test_setup (); if ((ret = test_make_inputs (stream, NULL, NULL))) { return ret; } /* Use gzip. */ if ((ext = main_get_compressor ("G")) == NULL) { XPR(NT "skipped"); return 0; } /* Save an uncompressed copy. */ if ((ret = test_save_copy (TEST_TARGET_FILE))) { return ret; } /* Compress the source. */ snprintf_func (buf, TESTBUFSIZE, "%s -1 %s < %s > %s", ext->recomp_cmdname, ext->recomp_options, TEST_COPY_FILE, TEST_SOURCE_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Compress the target. */ snprintf_func (buf, TESTBUFSIZE, "%s -9 %s < %s > %s", ext->recomp_cmdname, ext->recomp_options, TEST_COPY_FILE, TEST_TARGET_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Now the two identical files are compressed. Delta-encode the target, * with decompression. */ snprintf_func (buf, TESTBUFSIZE, "%s -e -vfq -s%s %s %s", program_name, TEST_SOURCE_FILE, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Check that the compressed file is small (b/c inputs are * identical). */ if ((ret = test_file_size (TEST_DELTA_FILE, & dsize))) { return ret; } /* Deltas for identical files should be very small. */ if (dsize > 200) { XPR(NT "external compression did not happen\n"); stream->msg = "external compression did not happen"; return XD3_INTERNAL; } /* Decode the delta file with recompression disabled, should get an * uncompressed file out. */ snprintf_func (buf, TESTBUFSIZE, "%s -v -dq -R -s%s %s %s", program_name, TEST_SOURCE_FILE, TEST_DELTA_FILE, TEST_RECON_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_compare_files (TEST_COPY_FILE, TEST_RECON_FILE))) { return ret; } /* Decode the delta file with recompression, should get a compressed file * out. But we can't compare compressed files directly. */ snprintf_func (buf, TESTBUFSIZE, "%s -v -dqf -s%s %s %s", program_name, TEST_SOURCE_FILE, TEST_DELTA_FILE, TEST_RECON_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } snprintf_func (buf, TESTBUFSIZE, "%s %s < %s > %s", ext->decomp_cmdname, ext->decomp_options, TEST_RECON_FILE, TEST_RECON2_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_compare_files (TEST_COPY_FILE, TEST_RECON2_FILE))) { return ret; } /* Encode with decompression disabled */ snprintf_func (buf, TESTBUFSIZE, "%s -e -D -vfq -s%s %s %s", program_name, TEST_SOURCE_FILE, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Decode the delta file with decompression disabled, should get the * identical compressed file out. */ snprintf_func (buf, TESTBUFSIZE, "%s -d -D -vfq -s%s %s %s", program_name, TEST_SOURCE_FILE, TEST_DELTA_FILE, TEST_RECON_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_RECON_FILE))) { return ret; } test_cleanup(); return 0; } #endif /*********************************************************************** FORCE, STDOUT ***********************************************************************/ /* This tests that output will not overwrite an existing file unless * -f was specified. The test is for encoding (the same code handles * it for decoding). */ static int test_force_behavior (xd3_stream *stream, int ignore) { int ret; char buf[TESTBUFSIZE]; /* Create empty target file */ test_setup (); snprintf_func (buf, TESTBUFSIZE, "cp /dev/null %s", TEST_TARGET_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Encode to delta file */ snprintf_func (buf, TESTBUFSIZE, "%s -e %s %s", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Encode again, should fail. */ snprintf_func (buf, TESTBUFSIZE, "%s -q -e %s %s ", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_fail (stream, buf))) { return ret; } /* Force it, should succeed. */ snprintf_func (buf, TESTBUFSIZE, "%s -f -e %s %s", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } test_cleanup(); return 0; } /* This checks the proper operation of the -c flag. When specified * the default output becomes stdout, otherwise the input must be * provided (encode) or it may be defaulted (decode w/ app header). */ static int test_stdout_behavior (xd3_stream *stream, int ignore) { int ret; char buf[TESTBUFSIZE]; test_setup(); snprintf_func (buf, TESTBUFSIZE, "cp /dev/null %s", TEST_TARGET_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Without -c, encode writes to delta file */ snprintf_func (buf, TESTBUFSIZE, "%s -e %s %s", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* With -c, encode writes to stdout */ snprintf_func (buf, TESTBUFSIZE, "%s -e -c %s > %s", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Without -c, decode writes to target file name, but it fails because the * file exists. */ snprintf_func (buf, TESTBUFSIZE, "%s -q -d %s ", program_name, TEST_DELTA_FILE); if ((ret = do_fail (stream, buf))) { return ret; } /* With -c, decode writes to stdout */ snprintf_func (buf, TESTBUFSIZE, "%s -d -c %s > /dev/null", program_name, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } test_cleanup(); return 0; } /* This tests that the no-output flag (-J) works. */ static int test_no_output (xd3_stream *stream, int ignore) { int ret; char buf[TESTBUFSIZE]; test_setup (); snprintf_func (buf, TESTBUFSIZE, "touch %s && chmod 0000 %s", TEST_NOPERM_FILE, TEST_NOPERM_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_make_inputs (stream, NULL, NULL))) { return ret; } /* Try no_output encode w/out unwritable output file */ snprintf_func (buf, TESTBUFSIZE, "%s -q -f -e %s %s", program_name, TEST_TARGET_FILE, TEST_NOPERM_FILE); if ((ret = do_fail (stream, buf))) { return ret; } snprintf_func (buf, TESTBUFSIZE, "%s -J -e %s %s", program_name, TEST_TARGET_FILE, TEST_NOPERM_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Now really write the delta to test decode no-output */ snprintf_func (buf, TESTBUFSIZE, "%s -e %s %s", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } snprintf_func (buf, TESTBUFSIZE, "%s -q -f -d %s %s", program_name, TEST_DELTA_FILE, TEST_NOPERM_FILE); if ((ret = do_fail (stream, buf))) { return ret; } snprintf_func (buf, TESTBUFSIZE, "%s -J -d %s %s", program_name, TEST_DELTA_FILE, TEST_NOPERM_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } test_cleanup (); return 0; } /*********************************************************************** Source identical optimization ***********************************************************************/ /* Computing a delta should be fastest when the two inputs are * identical, this checks it. The library is called to compute a * delta between a 10000 byte file, 1000 byte winsize, 500 byte source * blocksize. The same buffer is used for both source and target. */ static int test_identical_behavior (xd3_stream *stream, int ignore) { #define IDB_TGTSZ 10000 /* Not a power of two b/c of hard-coded expectations below. */ #define IDB_BLKSZ 512 #define IDB_WINSZ 1000 #define IDB_DELSZ 1000 #define IDB_WINCNT (IDB_TGTSZ / IDB_WINSZ) int ret, i; uint8_t buf[IDB_TGTSZ]; uint8_t del[IDB_DELSZ]; uint8_t rec[IDB_TGTSZ]; xd3_source source; int nextencwin = 0; int winstarts = 0, winfinishes = 0; usize_t delpos = 0, recsize; xd3_config config; memset(&source, 0, sizeof(source)); for (i = 0; i < IDB_TGTSZ; i += 1) { buf[i] = (uint8_t) mt_random (&static_mtrand); } stream->winsize = IDB_WINSZ; source.blksize = IDB_BLKSZ; source.name = ""; source.curblk = NULL; source.curblkno = 0; if ((ret = xd3_set_source (stream, & source))) { goto fail; } /* Compute an delta between identical source and targets. */ for (;;) { ret = xd3_encode_input (stream); if (ret == XD3_INPUT) { xd3_avail_input (stream, buf + (IDB_WINSZ * nextencwin), IDB_WINSZ); nextencwin += 1; continue; } if (ret == XD3_GETSRCBLK) { source.curblkno = source.getblkno; source.onblk = IDB_BLKSZ; source.curblk = buf + source.getblkno * IDB_BLKSZ; continue; } if (ret == XD3_WINSTART) { winstarts++; continue; } if (ret == XD3_WINFINISH) { winfinishes++; if (winfinishes == IDB_WINCNT) { break; } continue; } if (ret != XD3_OUTPUT) { goto fail; } CHECK(delpos + stream->avail_out <= IDB_DELSZ); memcpy (del + delpos, stream->next_out, stream->avail_out); delpos += stream->avail_out; xd3_consume_output (stream); } CHECK(winfinishes == IDB_WINCNT); CHECK(winstarts == IDB_WINCNT); CHECK(nextencwin == IDB_WINCNT); /* Reset. */ memset(&source, 0, sizeof(source)); source.blksize = IDB_TGTSZ; source.onblk = IDB_TGTSZ; source.curblk = buf; source.curblkno = 0; if ((ret = xd3_close_stream (stream))) { goto fail; } xd3_free_stream (stream); xd3_init_config (& config, 0); if ((ret = xd3_config_stream (stream, & config))) { goto fail; } if ((ret = xd3_set_source_and_size (stream, & source, IDB_TGTSZ))) { goto fail; } /* Decode. */ if ((ret = xd3_decode_stream (stream, del, delpos, rec, & recsize, IDB_TGTSZ))) { goto fail; } /* Check result size and data. */ if (recsize != IDB_TGTSZ) { stream->msg = "wrong size reconstruction"; goto fail; } if (memcmp (rec, buf, IDB_TGTSZ) != 0) { stream->msg = "wrong data reconstruction"; goto fail; } /* Check that there was one copy per window. */ IF_DEBUG (if (stream->n_scpy != IDB_WINCNT || stream->n_add != 0 || stream->n_run != 0) { stream->msg = "wrong copy count"; goto fail; }); /* Check that no checksums were computed because the initial match was presumed. */ IF_DEBUG (if (stream->large_ckcnt != 0) { stream->msg = "wrong checksum behavior"; goto fail; }); ret = 0; fail: return ret; } /*********************************************************************** String matching test ***********************************************************************/ /* Check particular matching behaviors by calling * xd3_string_match_soft directly with specific arguments. */ typedef struct _string_match_test string_match_test; typedef enum { SM_NONE = 0, SM_LAZY = (1 << 1), } string_match_flags; struct _string_match_test { const char *input; int flags; const char *result; }; static const string_match_test match_tests[] = { /* nothing */ { "1234567890", SM_NONE, "" }, /* basic run, copy */ { "11111111112323232323", SM_NONE, "R0/10 C12/8@10" }, /* no run smaller than MIN_RUN=8 */ { "1111111", SM_NONE, "C1/6@0" }, { "11111111", SM_NONE, "R0/8" }, /* simple promotion: the third copy address depends on promotion */ { "ABCDEF_ABCDEF^ABCDEF", SM_NONE, "C7/6@0 C14/6@7" }, /* { "ABCDEF_ABCDEF^ABCDEF", SM_PROMOTE, "C7/6@0 C14/6@0" }, forgotten */ /* simple lazy: there is a better copy starting with "23 X" than "123 " */ { "123 23 XYZ 123 XYZ", SM_NONE, "C11/4@0" }, { "123 23 XYZ 123 XYZ", SM_LAZY, "C11/4@0 C12/6@4" }, /* trylazy: no lazy matches unless there are at least two characters beyond * the first match */ { "2123_121212", SM_LAZY, "C7/4@5" }, { "2123_1212123", SM_LAZY, "C7/4@5" }, { "2123_1212123_", SM_LAZY, "C7/4@5 C8/5@0" }, /* trylazy: no lazy matches if the copy is >= MAXLAZY=10 */ { "2123_121212123_", SM_LAZY, "C7/6@5 C10/5@0" }, { "2123_12121212123_", SM_LAZY, "C7/8@5 C12/5@0" }, { "2123_1212121212123_", SM_LAZY, "C7/10@5" }, /* lazy run: check a run overlapped by a longer copy */ { "11111112 111111112 1", SM_LAZY, "C1/6@0 R9/8 C10/10@0" }, /* lazy match: match_length,run_l >= min_match tests, shouldn't get any * copies within the run, no run within the copy */ { "^________^________ ", SM_LAZY, "R1/8 C9/9@0" }, /* chain depth: it only goes back 10. this checks that the 10th match hits * and the 11th misses. */ { "1234 1234_1234-1234=1234+1234[1234]1234{1234}1234<1234 ", SM_NONE, "C5/4@0 C10/4@5 C15/4@10 C20/4@15 C25/4@20 C30/4@25 C35/4@30 C40/4@35 C45/4@40 C50/5@0" }, { "1234 1234_1234-1234=1234+1234[1234]1234{1234}1234<1234>1234 ", SM_NONE, "C5/4@0 C10/4@5 C15/4@10 C20/4@15 C25/4@20 C30/4@25 C35/4@30 C40/4@35 C45/4@40 C50/4@45 C55/4@50" }, /* ssmatch test */ { "ABCDE___ABCDE*** BCDE***", SM_NONE, "C8/5@0 C17/4@1" }, /*{ "ABCDE___ABCDE*** BCDE***", SM_SSMATCH, "C8/5@0 C17/7@9" }, forgotten */ }; static int test_string_matching (xd3_stream *stream, int ignore) { usize_t i; int ret; xd3_config config; char rbuf[TESTBUFSIZE]; for (i = 0; i < SIZEOF_ARRAY (match_tests); i += 1) { const string_match_test *test = & match_tests[i]; char *rptr = rbuf; usize_t len = (usize_t) strlen (test->input); xd3_free_stream (stream); xd3_init_config (& config, 0); config.smatch_cfg = XD3_SMATCH_SOFT; config.smatcher_soft.large_look = 4; config.smatcher_soft.large_step = 4; config.smatcher_soft.small_look = 4; config.smatcher_soft.small_chain = 10; config.smatcher_soft.small_lchain = 10; config.smatcher_soft.max_lazy = (test->flags & SM_LAZY) ? 10 : 0; config.smatcher_soft.long_enough = 10; if ((ret = xd3_config_stream (stream, & config))) { return ret; } if ((ret = xd3_encode_init_full (stream))) { return ret; } xd3_avail_input (stream, (uint8_t*)test->input, len); if ((ret = stream->smatcher.string_match (stream))) { return ret; } *rptr = 0; while (! xd3_rlist_empty (& stream->iopt_used)) { xd3_rinst *inst = xd3_rlist_pop_front (& stream->iopt_used); switch (inst->type) { case XD3_RUN: *rptr++ = 'R'; break; case XD3_CPY: *rptr++ = 'C'; break; default: CHECK(0); } snprintf_func (rptr, rbuf+TESTBUFSIZE-rptr, "%d/%d", inst->pos, inst->size); rptr += strlen (rptr); if (inst->type == XD3_CPY) { *rptr++ = '@'; snprintf_func (rptr, rbuf+TESTBUFSIZE-rptr, "%"Q"d", inst->addr); rptr += strlen (rptr); } *rptr++ = ' '; xd3_rlist_push_back (& stream->iopt_free, inst); } if (rptr != rbuf) { rptr -= 1; *rptr = 0; } if (strcmp (rbuf, test->result) != 0) { XPR(NT "test %u: expected %s: got %s", i, test->result, rbuf); stream->msg = "wrong result"; return XD3_INTERNAL; } } return 0; } /* * This is a test for many overlapping instructions. It must be a lazy * matcher. */ static int test_iopt_flush_instructions (xd3_stream *stream, int ignore) { int ret, i; usize_t tpos = 0; usize_t delta_size, recon_size; xd3_config config; uint8_t target[TESTBUFSIZE]; uint8_t delta[TESTBUFSIZE]; uint8_t recon[TESTBUFSIZE]; xd3_free_stream (stream); xd3_init_config (& config, 0); config.smatch_cfg = XD3_SMATCH_SOFT; config.smatcher_soft.large_look = 16; config.smatcher_soft.large_step = 16; config.smatcher_soft.small_look = 4; config.smatcher_soft.small_chain = 128; config.smatcher_soft.small_lchain = 16; config.smatcher_soft.max_lazy = 8; config.smatcher_soft.long_enough = 128; if ((ret = xd3_config_stream (stream, & config))) { return ret; } for (i = 1; i < 250; i++) { target[tpos++] = i; target[tpos++] = i+1; target[tpos++] = i+2; target[tpos++] = i+3; target[tpos++] = 0; } for (i = 1; i < 253; i++) { target[tpos++] = i; } if ((ret = xd3_encode_stream (stream, target, tpos, delta, & delta_size, sizeof (delta)))) { return ret; } xd3_free_stream(stream); if ((ret = xd3_config_stream (stream, & config))) { return ret; } if ((ret = xd3_decode_stream (stream, delta, delta_size, recon, & recon_size, sizeof (recon)))) { return ret; } CHECK(tpos == recon_size); CHECK(memcmp(target, recon, recon_size) == 0); return 0; } /* * This tests the 32/64bit ambiguity for source-window matching. */ static int test_source_cksum_offset (xd3_stream *stream, int ignore) { xd3_source source; // Inputs are: struct { xoff_t cpos; // stream->srcwin_cksum_pos; xoff_t ipos; // stream->total_in; xoff_t size; // stream->src->size; usize_t input; // input 32-bit offset xoff_t output; // output 64-bit offset } cksum_test[] = { // If cpos is <= 2^32 { 1, 1, 1, 1, 1 }, #if XD3_USE_LARGEFILE64 // cpos ipos size input output // 0x____xxxxxULL, 0x____xxxxxULL, 0x____xxxxxULL, 0x___xxxxxUL, 0x____xxxxxULL { 0x100100000ULL, 0x100000000ULL, 0x100200000ULL, 0x00000000UL, 0x100000000ULL }, { 0x100100000ULL, 0x100000000ULL, 0x100200000ULL, 0xF0000000UL, 0x0F0000000ULL }, { 0x100200000ULL, 0x100100000ULL, 0x100200000ULL, 0x00300000UL, 0x000300000ULL }, { 25771983104ULL, 25770000000ULL, 26414808769ULL, 2139216707UL, 23614053187ULL }, #endif { 0, 0, 0, 0, 0 }, }, *test_ptr; stream->src = &source; for (test_ptr = cksum_test; test_ptr->cpos; test_ptr++) { xoff_t r; stream->srcwin_cksum_pos = test_ptr->cpos; stream->total_in = test_ptr->ipos; r = xd3_source_cksum_offset(stream, test_ptr->input); CHECK(r == test_ptr->output); } return 0; } static int test_in_memory (xd3_stream *stream, int ignore) { // test_text is 256 bytes uint8_t ibuf[sizeof(test_text)]; uint8_t dbuf[sizeof(test_text)]; uint8_t obuf[sizeof(test_text)]; usize_t size = sizeof(test_text); usize_t dsize, osize; int r1, r2; int eflags = SECONDARY_DJW ? XD3_SEC_DJW : 0; memcpy(ibuf, test_text, size); memset(ibuf + 128, 0, 16); r1 = xd3_encode_memory(ibuf, size, test_text, size, dbuf, &dsize, size, eflags); r2 = xd3_decode_memory(dbuf, dsize, test_text, size, obuf, &osize, size, 0); if (r1 != 0 || r2 != 0 || dsize >= (size/2) || dsize < 1 || osize != size) { stream->msg = "encode/decode size error"; return XD3_INTERNAL; } if (memcmp(obuf, ibuf, size) != 0) { stream->msg = "encode/decode data error"; return XD3_INTERNAL; } return 0; } /*********************************************************************** TEST MAIN ***********************************************************************/ static int xd3_selftest (void) { #define DO_TEST(fn,flags,arg) \ do { \ xd3_stream stream; \ xd3_config config; \ xd3_init_config (& config, flags); \ XPR(NT "testing " #fn "%s...", \ flags ? (" (" #flags ")") : ""); \ if ((ret = xd3_config_stream (& stream, & config) == 0) && \ (ret = test_ ## fn (& stream, arg)) == 0) { \ XPR(NTR " success\n"); \ } else { \ XPR(NTR " failed: %s: %s\n", xd3_errstring (& stream), \ xd3_mainerror (ret)); } \ xd3_free_stream (& stream); \ if (ret != 0) { goto failure; } \ } while (0) int ret; DO_TEST (random_numbers, 0, 0); DO_TEST (decode_integer_end_of_input, 0, 0); DO_TEST (decode_integer_overflow, 0, 0); DO_TEST (encode_decode_uint32_t, 0, 0); DO_TEST (encode_decode_uint64_t, 0, 0); DO_TEST (usize_t_overflow, 0, 0); DO_TEST (forward_match, 0, 0); DO_TEST (address_cache, 0, 0); IF_GENCODETBL (DO_TEST (address_cache, XD3_ALT_CODE_TABLE, 0)); DO_TEST (string_matching, 0, 0); DO_TEST (choose_instruction, 0, 0); DO_TEST (identical_behavior, 0, 0); DO_TEST (in_memory, 0, 0); IF_GENCODETBL (DO_TEST (choose_instruction, XD3_ALT_CODE_TABLE, 0)); IF_GENCODETBL (DO_TEST (encode_code_table, 0, 0)); DO_TEST (iopt_flush_instructions, 0, 0); DO_TEST (source_cksum_offset, 0, 0); DO_TEST (decompress_single_bit_error, 0, 3); DO_TEST (decompress_single_bit_error, XD3_ADLER32, 3); IF_LZMA (DO_TEST (decompress_single_bit_error, XD3_SEC_LZMA, 54)); IF_FGK (DO_TEST (decompress_single_bit_error, XD3_SEC_FGK, 3)); IF_DJW (DO_TEST (decompress_single_bit_error, XD3_SEC_DJW, 8)); /* There are many expected non-failures for ALT_CODE_TABLE because * not all of the instruction codes are used. */ IF_GENCODETBL ( DO_TEST (decompress_single_bit_error, XD3_ALT_CODE_TABLE, 224)); #if SHELL_TESTS DO_TEST (force_behavior, 0, 0); DO_TEST (stdout_behavior, 0, 0); DO_TEST (no_output, 0, 0); DO_TEST (command_line_arguments, 0, 0); #if EXTERNAL_COMPRESSION DO_TEST (source_decompression, 0, 0); DO_TEST (externally_compressed_io, 0, 0); #endif DO_TEST (recode_command, 0, 0); #endif IF_LZMA (DO_TEST (secondary_lzma, 0, 1)); IF_DJW (DO_TEST (secondary_huff, 0, DJW_MAX_GROUPS)); IF_FGK (DO_TEST (secondary_fgk, 0, 1)); DO_TEST (compressed_stream_overflow, 0, 0); IF_LZMA (DO_TEST (compressed_stream_overflow, XD3_SEC_LZMA, 0)); failure: test_cleanup (); return ret == 0 ? EXIT_SUCCESS : EXIT_FAILURE; #undef DO_TEST }
/* xdelta 3 - delta compression tools and library Copyright (C) 2001, * 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012. * Joshua P. MacDonald * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* This is public-domain Mersenne Twister code, * attributed to Michael Brundage. Thanks! * http://www.qbrundage.com/michaelb/pubs/essays/random_number_generation.html */ static const uint32_t TEST_SEED1 = 5489UL; #define MT_LEN 624 #define MT_IA 397 static const uint32_t UPPER_MASK = 0x80000000; static const uint32_t LOWER_MASK = 0x7FFFFFFF; static const uint32_t MATRIX_A = 0x9908B0DF; #ifndef SHELL_TESTS #define SHELL_TESTS 1 #endif typedef struct mtrand mtrand; struct mtrand { int mt_index_; uint32_t mt_buffer_[MT_LEN]; }; int test_compare_files (const char* tgt, const char *rec); void mt_init(mtrand *mt, uint32_t seed); uint32_t mt_random (mtrand *mt); int test_setup (void); void mt_init(mtrand *mt, uint32_t seed) { int i; mt->mt_buffer_[0] = seed; mt->mt_index_ = MT_LEN; for (i = 1; i < MT_LEN; i++) { /* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */ /* In the previous versions, MSBs of the seed affect */ /* only MSBs of the array mt[]. */ /* 2002/01/09 modified by Makoto Matsumoto */ mt->mt_buffer_[i] = (1812433253UL * (mt->mt_buffer_[i-1] ^ (mt->mt_buffer_[i-1] >> 30)) + i); } } uint32_t mt_random (mtrand *mt) { uint32_t y; unsigned long mag01[2]; mag01[0] = 0; mag01[1] = MATRIX_A; if (mt->mt_index_ >= MT_LEN) { int kk; for (kk = 0; kk < MT_LEN - MT_IA; kk++) { y = (mt->mt_buffer_[kk] & UPPER_MASK) | (mt->mt_buffer_[kk + 1] & LOWER_MASK); mt->mt_buffer_[kk] = mt->mt_buffer_[kk + MT_IA] ^ (y >> 1) ^ mag01[y & 0x1UL]; } for (;kk < MT_LEN - 1; kk++) { y = (mt->mt_buffer_[kk] & UPPER_MASK) | (mt->mt_buffer_[kk + 1] & LOWER_MASK); mt->mt_buffer_[kk] = mt->mt_buffer_[kk + (MT_IA - MT_LEN)] ^ (y >> 1) ^ mag01[y & 0x1UL]; } y = (mt->mt_buffer_[MT_LEN - 1] & UPPER_MASK) | (mt->mt_buffer_[0] & LOWER_MASK); mt->mt_buffer_[MT_LEN - 1] = mt->mt_buffer_[MT_IA - 1] ^ (y >> 1) ^ mag01[y & 0x1UL]; mt->mt_index_ = 0; } y = mt->mt_buffer_[mt->mt_index_++]; y ^= (y >> 11); y ^= (y << 7) & 0x9d2c5680UL; y ^= (y << 15) & 0xefc60000UL; y ^= (y >> 18); return y; } static mtrand static_mtrand; #include <math.h> static uint32_t mt_exp_rand (uint32_t mean, uint32_t max_value) { double mean_d = mean; double erand = log (1.0 / (mt_random (&static_mtrand) / (double)UINT32_MAX)); uint32_t x = (uint32_t) (mean_d * erand + 0.5); return min (x, max_value); } #if SHELL_TESTS #include <sys/wait.h> #endif #define MSG_IS(x) (stream->msg != NULL && strcmp ((x), stream->msg) == 0) static const usize_t TWO_MEGS_AND_DELTA = (3 << 20); static const usize_t ADDR_CACHE_ROUNDS = 10000; static const usize_t TEST_FILE_MEAN = 16384; static const double TEST_ADD_MEAN = 128; static const double TEST_ADD_MAX = 512; static const double TEST_ADD_RATIO = 0.1; static const double TEST_EPSILON = 0.25; #define TESTBUFSIZE (1024 * 16) #define TESTFILESIZE (1024) static char TEST_TARGET_FILE[TESTFILESIZE]; static char TEST_SOURCE_FILE[TESTFILESIZE]; static char TEST_DELTA_FILE[TESTFILESIZE]; static char TEST_RECON_FILE[TESTFILESIZE]; static char TEST_RECON2_FILE[TESTFILESIZE]; static char TEST_COPY_FILE[TESTFILESIZE]; static char TEST_NOPERM_FILE[TESTFILESIZE]; #define CHECK(cond) if (!(cond)) { XPR(NT "check failure: " #cond); abort(); } #if SHELL_TESTS /* Use a fixed soft config so that test values are fixed. See also * test_compress_text(). */ static const char* test_softcfg_str = "-C9,3,4,8,2,36,70"; #endif /*********************************************************************** TEST HELPERS ***********************************************************************/ static void DOT (void) { XPR(NTR "."); } static int do_cmd (xd3_stream *stream, const char *buf) { int ret; if ((ret = system (buf)) != 0) { if (WIFEXITED (ret)) { stream->msg = "command exited non-zero"; IF_DEBUG1 (XPR(NT "command was: %s\n", buf)); } else { stream->msg = "abnormal command termination"; } return ret; } return 0; } static int do_fail (xd3_stream *stream, const char *buf) { int ret; ret = system (buf); if (! WIFEXITED (ret) || WEXITSTATUS (ret) != 1) { stream->msg = "command should have not succeeded"; XPR(NT "command was %s\n", buf); return XD3_INTERNAL; } return 0; } /* Test that the exponential distribution actually produces its mean. */ static int test_random_numbers (xd3_stream *stream, int ignore) { usize_t i; usize_t sum = 0; usize_t mean = 50; usize_t n_rounds = 1000000; double average, error; double allowed_error = 0.1; mt_init (& static_mtrand, 0x9f73f7fe); for (i = 0; i < n_rounds; i += 1) { sum += mt_exp_rand (mean, USIZE_T_MAX); } average = (double) sum / (double) n_rounds; error = average - (double) mean; if (error < allowed_error && error > -allowed_error) { return 0; } /*XPR(NT "error is %f\n", error);*/ stream->msg = "random distribution looks broken"; return XD3_INTERNAL; } static void test_unlink (char* file) { int ret; if ((ret = unlink (file)) != 0 && errno != ENOENT) { XPR(NT "unlink %s failed: %s\n", file, strerror(ret)); } } static void test_cleanup (void) { #if 1 test_unlink (TEST_TARGET_FILE); test_unlink (TEST_SOURCE_FILE); test_unlink (TEST_DELTA_FILE); test_unlink (TEST_RECON_FILE); test_unlink (TEST_RECON2_FILE); test_unlink (TEST_COPY_FILE); test_unlink (TEST_NOPERM_FILE); #endif } int test_setup (void) { static int x = 0; x++; snprintf_func (TEST_TARGET_FILE, TESTFILESIZE, "/tmp/xdtest.target.%d", x); snprintf_func (TEST_SOURCE_FILE, TESTFILESIZE, "/tmp/xdtest.source.%d", x); snprintf_func (TEST_DELTA_FILE, TESTFILESIZE, "/tmp/xdtest.delta.%d", x); snprintf_func (TEST_RECON_FILE, TESTFILESIZE, "/tmp/xdtest.recon.%d", x); snprintf_func (TEST_RECON2_FILE, TESTFILESIZE, "/tmp/xdtest.recon2.%d", x); snprintf_func (TEST_COPY_FILE, TESTFILESIZE, "/tmp/xdtest.copy.%d", x); snprintf_func (TEST_NOPERM_FILE, TESTFILESIZE, "/tmp/xdtest.noperm.%d", x); test_cleanup(); return 0; } static int test_make_inputs (xd3_stream *stream, xoff_t *ss_out, xoff_t *ts_out) { usize_t ts = (mt_random (&static_mtrand) % TEST_FILE_MEAN) + TEST_FILE_MEAN / 2; usize_t ss = (mt_random (&static_mtrand) % TEST_FILE_MEAN) + TEST_FILE_MEAN / 2; uint8_t *buf = (uint8_t*) malloc (ts + ss), *sbuf = buf, *tbuf = buf + ss; usize_t sadd = 0, sadd_max = (usize_t)(ss * TEST_ADD_RATIO); FILE *tf = NULL, *sf = NULL; usize_t i, j; int ret; if (buf == NULL) { return ENOMEM; } if ((tf = fopen (TEST_TARGET_FILE, "w")) == NULL || (ss_out != NULL && (sf = fopen (TEST_SOURCE_FILE, "w")) == NULL)) { stream->msg = "write failed"; ret = get_errno (); goto failure; } if (ss_out != NULL) { for (i = 0; i < ss; ) { sbuf[i++] = (uint8_t) mt_random (&static_mtrand); } } /* Then modify the data to produce copies, everything not copied is * an add. The following logic produces the TEST_ADD_RATIO. The * variable SADD contains the number of adds so far, which should * not exceed SADD_MAX. */ /* XPR(NT "ss = %u ts = %u\n", ss, ts); */ for (i = 0; i < ts; ) { usize_t left = ts - i; usize_t next = mt_exp_rand ((uint32_t) TEST_ADD_MEAN, (uint32_t) TEST_ADD_MAX); usize_t add_left = sadd_max - sadd; double add_prob = (left == 0) ? 0 : (add_left / (double) left); int do_copy; next = min (left, next); do_copy = (next > add_left || (mt_random (&static_mtrand) / \ (double)USIZE_T_MAX) >= add_prob); if (ss_out == NULL) { do_copy &= (i > 0); } else { do_copy &= (ss - next) > 0; } if (do_copy) { /* Copy */ size_t offset = mt_random (&static_mtrand) % ((ss_out == NULL) ? i : (ss - next)); /* XPR(NT "[%u] copy %u at %u ", i, next, offset); */ for (j = 0; j < next; j += 1) { char c = ((ss_out == NULL) ? tbuf : sbuf)[offset + j]; /* XPR(NT "%x%x", (c >> 4) & 0xf, c & 0xf); */ tbuf[i++] = c; } /* XPR(NT "\n"); */ } else { /* Add */ /* XPR(NT "[%u] add %u ", i, next); */ for (j = 0; j < next; j += 1) { char c = (char) mt_random (&static_mtrand); /* XPR(NT "%x%x", (c >> 4) & 0xf, c & 0xf); */ tbuf[i++] = c; } /* XPR(NT "\n"); */ sadd += next; } } /* XPR(NT "sadd = %u max = %u\n", sadd, sadd_max); */ if ((fwrite (tbuf, 1, ts, tf) != ts) || (ss_out != NULL && (fwrite (sbuf, 1, ss, sf) != ss))) { stream->msg = "write failed"; ret = get_errno (); goto failure; } if ((ret = fclose (tf)) || (ss_out != NULL && (ret = fclose (sf)))) { stream->msg = "close failed"; ret = get_errno (); goto failure; } if (ts_out) { (*ts_out) = ts; } if (ss_out) { (*ss_out) = ss; } failure: free (buf); return ret; } int test_compare_files (const char* tgt, const char *rec) { FILE *orig, *recons; static uint8_t obuf[TESTBUFSIZE], rbuf[TESTBUFSIZE]; xoff_t offset = 0; size_t i; size_t oc, rc; xoff_t diffs = 0; if ((orig = fopen (tgt, "r")) == NULL) { XPR(NT "open %s failed\n", tgt); return get_errno (); } if ((recons = fopen (rec, "r")) == NULL) { XPR(NT "open %s failed\n", rec); return get_errno (); } for (;;) { oc = fread (obuf, 1, TESTBUFSIZE, orig); rc = fread (rbuf, 1, TESTBUFSIZE, recons); if (oc != rc) { return XD3_INTERNAL; } if (oc == 0) { break; } for (i = 0; i < oc; i += 1) { if (obuf[i] != rbuf[i]) { XPR(NT "byte %u (read %u @ %"Q"u) %d != %d\n", (int)i, (int)oc, offset, obuf[i], rbuf[i]); diffs++; return XD3_INTERNAL; } } offset += oc; } fclose (orig); fclose (recons); if (diffs != 0) { return XD3_INTERNAL; } return 0; } static int test_copy_to (const char *from, const char *to) { char buf[TESTBUFSIZE]; int ret; snprintf_func (buf, TESTBUFSIZE, "cp -f %s %s", from, to); if ((ret = system (buf)) != 0) { return XD3_INTERNAL; } return 0; } static int test_save_copy (const char *origname) { return test_copy_to(origname, TEST_COPY_FILE); } static int test_file_size (const char* file, xoff_t *size) { struct stat sbuf; int ret; (*size) = 0; if (stat (file, & sbuf) < 0) { ret = get_errno (); XPR(NT "stat failed: %s: %s\n", file, strerror (ret)); return ret; } if (! S_ISREG (sbuf.st_mode)) { ret = XD3_INTERNAL; XPR(NT "not a regular file: %s: %s\n", file, strerror (ret)); return ret; } (*size) = sbuf.st_size; return 0; } /*********************************************************************** READ OFFSET ***********************************************************************/ /* Common test for read_integer errors: encodes a 64-bit value and * then attempts to read as a 32-bit value. If TRUNC is non-zero, * attempts to get errors by shortening the input, otherwise it should * overflow. Expects XD3_INTERNAL and MSG. */ static int test_read_integer_error (xd3_stream *stream, usize_t trunto, const char *msg) { uint64_t eval = 1ULL << 34; uint32_t rval; xd3_output *buf = NULL; const uint8_t *max; const uint8_t *inp; int ret; buf = xd3_alloc_output (stream, buf); if ((ret = xd3_emit_uint64_t (stream, & buf, eval))) { goto fail; } again: inp = buf->base; max = buf->base + buf->next - trunto; if ((ret = xd3_read_uint32_t (stream, & inp, max, & rval)) != XD3_INVALID_INPUT || !MSG_IS (msg)) { ret = XD3_INTERNAL; } else if (trunto && trunto < buf->next) { trunto += 1; goto again; } else { ret = 0; } fail: xd3_free_output (stream, buf); return ret; } /* Test integer overflow using the above routine. */ static int test_decode_integer_overflow (xd3_stream *stream, int unused) { return test_read_integer_error (stream, 0, "overflow in read_intger"); } /* Test integer EOI using the above routine. */ static int test_decode_integer_end_of_input (xd3_stream *stream, int unused) { return test_read_integer_error (stream, 1, "end-of-input in read_integer"); } /* Test that emit_integer/decode_integer/sizeof_integer/read_integer * work on correct inputs. Tests powers of (2^7), plus or minus, up * to the maximum value. */ #define TEST_ENCODE_DECODE_INTEGER(TYPE,ONE,MAX) \ xd3_output *rbuf = NULL; \ xd3_output *dbuf = NULL; \ TYPE values[64]; \ usize_t nvalues = 0; \ usize_t i; \ int ret = 0; \ \ for (i = 0; i < (sizeof (TYPE) * 8); i += 7) \ { \ values[nvalues++] = (ONE << i) - ONE; \ values[nvalues++] = (ONE << i); \ values[nvalues++] = (ONE << i) + ONE; \ } \ \ values[nvalues++] = MAX-ONE; \ values[nvalues++] = MAX; \ \ rbuf = xd3_alloc_output (stream, rbuf); \ dbuf = xd3_alloc_output (stream, dbuf); \ \ for (i = 0; i < nvalues; i += 1) \ { \ const uint8_t *max; \ const uint8_t *inp; \ TYPE val; \ \ DOT (); \ rbuf->next = 0; \ \ if ((ret = xd3_emit_ ## TYPE (stream, & rbuf, values[i])) || \ (ret = xd3_emit_ ## TYPE (stream, & dbuf, values[i]))) \ { \ goto fail; \ } \ \ inp = rbuf->base; \ max = rbuf->base + rbuf->next; \ \ if (rbuf->next != xd3_sizeof_ ## TYPE (values[i])) \ { \ ret = XD3_INTERNAL; \ goto fail; \ } \ \ if ((ret = xd3_read_ ## TYPE (stream, & inp, max, & val))) \ { \ goto fail; \ } \ \ if (val != values[i]) \ { \ ret = XD3_INTERNAL; \ goto fail; \ } \ \ DOT (); \ } \ \ stream->next_in = dbuf->base; \ stream->avail_in = dbuf->next; \ \ for (i = 0; i < nvalues; i += 1) \ { \ TYPE val; \ \ if ((ret = xd3_decode_ ## TYPE (stream, & val))) \ { \ goto fail; \ } \ \ if (val != values[i]) \ { \ ret = XD3_INTERNAL; \ goto fail; \ } \ } \ \ if (stream->avail_in != 0) \ { \ ret = XD3_INTERNAL; \ goto fail; \ } \ \ fail: \ xd3_free_output (stream, rbuf); \ xd3_free_output (stream, dbuf); \ \ return ret static int test_encode_decode_uint32_t (xd3_stream *stream, int unused) { TEST_ENCODE_DECODE_INTEGER(uint32_t,1U,UINT32_MAX); } static int test_encode_decode_uint64_t (xd3_stream *stream, int unused) { TEST_ENCODE_DECODE_INTEGER(uint64_t,1ULL,UINT64_MAX); } static int test_usize_t_overflow (xd3_stream *stream, int unused) { if (USIZE_T_OVERFLOW (USIZE_T_MAX, 0)) { goto fail; } if (USIZE_T_OVERFLOW (0, USIZE_T_MAX)) { goto fail; } if (USIZE_T_OVERFLOW (USIZE_T_MAX / 2, USIZE_T_MAX / 2)) { goto fail; } if (USIZE_T_OVERFLOW (USIZE_T_MAX / 2, USIZE_T_MAX / 2 + 1)) { goto fail; } if (! USIZE_T_OVERFLOW (USIZE_T_MAX, 1)) { goto fail; } if (! USIZE_T_OVERFLOW (1, USIZE_T_MAX)) { goto fail; } if (! USIZE_T_OVERFLOW (USIZE_T_MAX / 2 + 1, USIZE_T_MAX / 2 + 1)) { goto fail; } return 0; fail: stream->msg = "incorrect overflow computation"; return XD3_INTERNAL; } static int test_forward_match (xd3_stream *stream, int unused) { usize_t i; uint8_t buf1[256], buf2[256]; memset(buf1, 0, 256); memset(buf2, 0, 256); for (i = 0; i < 256; i++) { CHECK(xd3_forward_match(buf1, buf2, i) == (int)i); } for (i = 0; i < 255; i++) { buf2[i] = 1; CHECK(xd3_forward_match(buf1, buf2, 256) == (int)i); buf2[i] = 0; } return 0; } /*********************************************************************** Address cache ***********************************************************************/ static int test_address_cache (xd3_stream *stream, int unused) { int ret; usize_t i; usize_t offset; usize_t *addrs; uint8_t *big_buf, *buf_max; const uint8_t *buf; xd3_output *outp; uint8_t *modes; int mode_counts[16]; stream->acache.s_near = stream->code_table_desc->near_modes; stream->acache.s_same = stream->code_table_desc->same_modes; if ((ret = xd3_encode_init_partial (stream))) { return ret; } addrs = (usize_t*) xd3_alloc (stream, sizeof (usize_t), ADDR_CACHE_ROUNDS); modes = (uint8_t*) xd3_alloc (stream, sizeof (uint8_t), ADDR_CACHE_ROUNDS); memset (mode_counts, 0, sizeof (mode_counts)); memset (modes, 0, ADDR_CACHE_ROUNDS); addrs[0] = 0; mt_init (& static_mtrand, 0x9f73f7fc); /* First pass: encode addresses */ xd3_init_cache (& stream->acache); for (offset = 1; offset < ADDR_CACHE_ROUNDS; offset += 1) { double p; usize_t addr; usize_t prev_i; usize_t nearby; p = (mt_random (&static_mtrand) / (double)USIZE_T_MAX); prev_i = mt_random (&static_mtrand) % offset; nearby = (mt_random (&static_mtrand) % 256) % offset; nearby = max (1U, nearby); if (p < 0.1) { addr = addrs[offset-nearby]; } else if (p < 0.4) { addr = min (addrs[prev_i] + nearby, offset-1); } else { addr = prev_i; } if ((ret = xd3_encode_address (stream, addr, offset, & modes[offset]))) { return ret; } addrs[offset] = addr; mode_counts[modes[offset]] += 1; } /* Copy addresses into a contiguous buffer. */ big_buf = (uint8_t*) xd3_alloc (stream, xd3_sizeof_output (ADDR_HEAD (stream)), 1); for (offset = 0, outp = ADDR_HEAD (stream); outp != NULL; offset += outp->next, outp = outp->next_page) { memcpy (big_buf + offset, outp->base, outp->next); } buf_max = big_buf + offset; buf = big_buf; /* Second pass: decode addresses */ xd3_init_cache (& stream->acache); for (offset = 1; offset < ADDR_CACHE_ROUNDS; offset += 1) { uint32_t addr; if ((ret = xd3_decode_address (stream, offset, modes[offset], & buf, buf_max, & addr))) { return ret; } if (addr != addrs[offset]) { stream->msg = "incorrect decoded address"; return XD3_INTERNAL; } } /* Check that every byte, mode was used. */ if (buf != buf_max) { stream->msg = "address bytes not used"; return XD3_INTERNAL; } for (i = 0; i < (2 + stream->acache.s_same + stream->acache.s_near); i += 1) { if (mode_counts[i] == 0) { stream->msg = "address mode not used"; return XD3_INTERNAL; } } xd3_free (stream, modes); xd3_free (stream, addrs); xd3_free (stream, big_buf); return 0; } /*********************************************************************** Encode and decode with single bit error ***********************************************************************/ /* It compresses from 256 to around 185 bytes. * Avoids matching addresses that are a single-bit difference. * Avoids matching address 0. */ static const uint8_t test_text[] = "this is a story\n" "abouttttttttttt\n" "- his is a stor\n" "- about nothing " " all. boutique -" "his story is a -" "about " "what happens all" " the time what -" "am I ttttttt the" " person said, so" " what, per son -" " gory story is -" " about nothing -" "tttttt to test -" "his sto nothing"; static const uint8_t test_apphead[] = "header test"; static int test_compress_text (xd3_stream *stream, uint8_t *encoded, usize_t *encoded_size) { int ret; xd3_config cfg; int oflags = stream->flags; int flags = stream->flags | XD3_FLUSH; xd3_free_stream (stream); xd3_init_config (& cfg, flags); /* This configuration is fixed so that the "expected non-error" the counts in * decompress_single_bit_errors are too. See test_coftcfg_str. */ cfg.smatch_cfg = XD3_SMATCH_SOFT; cfg.smatcher_soft.name = "test"; cfg.smatcher_soft.large_look = 64; /* no source, not used */ cfg.smatcher_soft.large_step = 64; /* no source, not used */ cfg.smatcher_soft.small_look = 4; cfg.smatcher_soft.small_chain = 128; cfg.smatcher_soft.small_lchain = 16; cfg.smatcher_soft.max_lazy = 8; cfg.smatcher_soft.long_enough = 128; xd3_config_stream (stream, & cfg); (*encoded_size) = 0; xd3_set_appheader (stream, test_apphead, (usize_t) strlen ((char*) test_apphead)); if ((ret = xd3_encode_stream (stream, test_text, sizeof (test_text), encoded, encoded_size, 4*sizeof (test_text)))) { goto fail; } if ((ret = xd3_close_stream (stream))) { goto fail; } fail: xd3_free_stream (stream); xd3_init_config (& cfg, oflags); xd3_config_stream (stream, & cfg); return ret; } static int test_decompress_text (xd3_stream *stream, uint8_t *enc, usize_t enc_size, usize_t test_desize) { xd3_config cfg; char decoded[sizeof (test_text)]; uint8_t *apphead; usize_t apphead_size; usize_t decoded_size; const char *msg; int ret; usize_t pos = 0; int flags = stream->flags; usize_t take; input: /* Test decoding test_desize input bytes at a time */ take = min (enc_size - pos, test_desize); CHECK(take > 0); xd3_avail_input (stream, enc + pos, take); again: ret = xd3_decode_input (stream); pos += take; take = 0; switch (ret) { case XD3_OUTPUT: break; case XD3_WINSTART: case XD3_GOTHEADER: goto again; case XD3_INPUT: if (pos < enc_size) { goto input; } /* else fallthrough */ case XD3_WINFINISH: default: goto fail; } CHECK(ret == XD3_OUTPUT); CHECK(pos == enc_size); if (stream->avail_out != sizeof (test_text)) { stream->msg = "incorrect output size"; ret = XD3_INTERNAL; goto fail; } decoded_size = stream->avail_out; memcpy (decoded, stream->next_out, stream->avail_out); xd3_consume_output (stream); if ((ret = xd3_get_appheader (stream, & apphead, & apphead_size))) { goto fail; } if (apphead_size != strlen ((char*) test_apphead) || memcmp (apphead, test_apphead, strlen ((char*) test_apphead)) != 0) { stream->msg = "incorrect appheader"; ret = XD3_INTERNAL; goto fail; } if ((ret = xd3_decode_input (stream)) != XD3_WINFINISH || (ret = xd3_close_stream (stream)) != 0) { goto fail; } if (decoded_size != sizeof (test_text) || memcmp (decoded, test_text, sizeof (test_text)) != 0) { stream->msg = "incorrect output text"; ret = EIO; } fail: msg = stream->msg; xd3_free_stream (stream); xd3_init_config (& cfg, flags); xd3_config_stream (stream, & cfg); stream->msg = msg; return ret; } static int test_decompress_single_bit_error (xd3_stream *stream, int expected_non_failures) { int ret; usize_t i; uint8_t encoded[4*sizeof (test_text)]; /* make room for alt code table */ usize_t encoded_size; int non_failures = 0; int cksum = (stream->flags & XD3_ADLER32) != 0; //#define DEBUG_TEST_FAILURES #ifndef DEBUG_TEST_FAILURES #define TEST_FAILURES() #else /* For checking non-failure cases by hand, enable this macro and run * xdelta printdelta with print_cpymode disabled. Every non-failure * should change a copy address mode, which doesn't cause a failure * because the address cache starts out with all zeros. ./xdelta3 test for i in test_text.xz.*; do ./xdelta3 printdelta $i > $i.out; diff $i.out test_text.xz.0.out; done */ system ("rm -rf test_text.*"); { char buf[TESTBUFSIZE]; FILE *f; snprintf_func (buf, TESTBUFSIZE, "test_text"); f = fopen (buf, "w"); fwrite (test_text,1,sizeof (test_text),f); fclose (f); } #define TEST_FAILURES() \ do { \ char buf[TESTBUFSIZE]; \ FILE *f; \ snprintf_func (buf, TESTBUFSIZE, "test_text.xz.%d", non_failures); \ f = fopen (buf, "w"); \ fwrite (encoded,1,encoded_size,f); \ fclose (f); \ } while (0) #endif stream->sec_data.inefficient = 1; stream->sec_inst.inefficient = 1; stream->sec_addr.inefficient = 1; /* Encode text, test correct input */ if ((ret = test_compress_text (stream, encoded, & encoded_size))) { /*stream->msg = "without error: encode failure";*/ return ret; } if ((ret = test_decompress_text (stream, encoded, encoded_size, sizeof (test_text) / 4))) { /*stream->msg = "without error: decode failure";*/ return ret; } TEST_FAILURES(); for (i = 0; i < encoded_size*8; i += 1) { /* Single bit error. */ encoded[i/8] ^= 1 << (i%8); if ((ret = test_decompress_text (stream, encoded, encoded_size, sizeof (test_text))) == 0) { non_failures += 1; #ifdef DEBUG_TEST_FAILURES XPR(NT "%u[%u] non-failure %u\n", i/8, i%8, non_failures); #endif TEST_FAILURES(); } else { /*XPR(NT "%u[%u] failure: %s\n", i/8, i%8, stream->msg);*/ } /* decompress_text returns EIO when the final memcmp() fails, but that * should never happen with checksumming on. */ if (cksum && ret == EIO) { /*XPR(NT "%u[%u] cksum mismatch\n", i/8, i%8);*/ stream->msg = "checksum mismatch"; return XD3_INTERNAL; } /* Undo single bit error. */ encoded[i/8] ^= 1 << (i%8); } /* Test correct input again */ if ((ret = test_decompress_text (stream, encoded, encoded_size, 1))) { /*stream->msg = "without error: decode failure";*/ return ret; } /* Check expected non-failures */ if (non_failures != expected_non_failures) { XPR(NT "non-failures %u; expected %u", non_failures, expected_non_failures); stream->msg = "incorrect"; return XD3_INTERNAL; } DOT (); return 0; } /*********************************************************************** Secondary compression tests ***********************************************************************/ #if SECONDARY_ANY typedef int (*sec_dist_func) (xd3_stream *stream, xd3_output *data); static int sec_dist_func1 (xd3_stream *stream, xd3_output *data); static int sec_dist_func2 (xd3_stream *stream, xd3_output *data); static int sec_dist_func3 (xd3_stream *stream, xd3_output *data); static int sec_dist_func4 (xd3_stream *stream, xd3_output *data); static int sec_dist_func5 (xd3_stream *stream, xd3_output *data); static int sec_dist_func6 (xd3_stream *stream, xd3_output *data); static int sec_dist_func7 (xd3_stream *stream, xd3_output *data); static int sec_dist_func8 (xd3_stream *stream, xd3_output *data); static int sec_dist_func9 (xd3_stream *stream, xd3_output *data); static int sec_dist_func10 (xd3_stream *stream, xd3_output *data); static int sec_dist_func11 (xd3_stream *stream, xd3_output *data); static sec_dist_func sec_dists[] = { sec_dist_func1, sec_dist_func2, sec_dist_func3, sec_dist_func4, sec_dist_func5, sec_dist_func6, sec_dist_func7, sec_dist_func8, sec_dist_func9, sec_dist_func10, sec_dist_func11, }; /* Test ditsribution: 100 bytes of the same character (13). */ static int sec_dist_func1 (xd3_stream *stream, xd3_output *data) { int i, ret; for (i = 0; i < 100; i += 1) { if ((ret = xd3_emit_byte (stream, & data, 13))) { return ret; } } return 0; } /* Test ditsribution: uniform covering half the alphabet. */ static int sec_dist_func2 (xd3_stream *stream, xd3_output *data) { int i, ret; for (i = 0; i < ALPHABET_SIZE; i += 1) { if ((ret = xd3_emit_byte (stream, & data, i%(ALPHABET_SIZE/2)))) { return ret; } } return 0; } /* Test ditsribution: uniform covering the entire alphabet. */ static int sec_dist_func3 (xd3_stream *stream, xd3_output *data) { int i, ret; for (i = 0; i < ALPHABET_SIZE; i += 1) { if ((ret = xd3_emit_byte (stream, & data, i%ALPHABET_SIZE))) { return ret; } } return 0; } /* Test distribution: An exponential distribution covering half the alphabet */ static int sec_dist_func4 (xd3_stream *stream, xd3_output *data) { int i, ret, x; for (i = 0; i < ALPHABET_SIZE*20; i += 1) { x = mt_exp_rand (10, ALPHABET_SIZE/2); if ((ret = xd3_emit_byte (stream, & data, x))) { return ret; } } return 0; } /* Test distribution: An exponential distribution covering the entire alphabet */ static int sec_dist_func5 (xd3_stream *stream, xd3_output *data) { int i, ret, x; for (i = 0; i < ALPHABET_SIZE*20; i += 1) { x = mt_exp_rand (10, ALPHABET_SIZE-1); if ((ret = xd3_emit_byte (stream, & data, x))) { return ret; } } return 0; } /* Test distribution: An uniform random distribution covering half the alphabet */ static int sec_dist_func6 (xd3_stream *stream, xd3_output *data) { int i, ret, x; for (i = 0; i < ALPHABET_SIZE*20; i += 1) { x = mt_random (&static_mtrand) % (ALPHABET_SIZE/2); if ((ret = xd3_emit_byte (stream, & data, x))) { return ret; } } return 0; } /* Test distribution: An uniform random distribution covering the entire alphabet */ static int sec_dist_func7 (xd3_stream *stream, xd3_output *data) { int i, ret, x; for (i = 0; i < ALPHABET_SIZE*200; i += 1) { x = mt_random (&static_mtrand) % ALPHABET_SIZE; if ((ret = xd3_emit_byte (stream, & data, x))) { return ret; } } return 0; } /* Test distribution: A small number of frequent characters, difficult * to divide into many groups */ static int sec_dist_func8 (xd3_stream *stream, xd3_output *data) { int i, ret; for (i = 0; i < ALPHABET_SIZE*5; i += 1) { if ((ret = xd3_emit_byte (stream, & data, 0))) { return ret; } if ((ret = xd3_emit_byte (stream, & data, 64))) { return ret; } if ((ret = xd3_emit_byte (stream, & data, 128))) { return ret; } if ((ret = xd3_emit_byte (stream, & data, 255))) { return ret; } } return 0; } /* Test distribution: One that causes many FGK block promotions (found a bug) */ static int sec_dist_func9 (xd3_stream *stream, xd3_output *data) { int i, ret; int ramp = 0; int rcount = 0; int prom = 0; int pcount = 0; /* 200 was long enough to trigger it--only when stricter checking * that counted all blocks was turned on, but it seems I deleted * this code. (missing fgk_free_block on line 398). */ for (i = 0; i < ALPHABET_SIZE*200; i += 1) { repeat: if (ramp < ALPHABET_SIZE) { /* Initially Nth symbol has (N+1) frequency */ if (rcount <= ramp) { rcount += 1; if ((ret = xd3_emit_byte (stream, & data, ramp))) { return ret; } continue; } ramp += 1; rcount = 0; goto repeat; } /* Thereafter, promote least freq to max freq */ if (pcount == ALPHABET_SIZE) { pcount = 0; prom = (prom + 1) % ALPHABET_SIZE; } pcount += 1; if ((ret = xd3_emit_byte (stream, & data, prom))) { return ret; } } return 0; } /* Test distribution: freq[i] == i*i, creates a 21-bit code length, fixed in 3.0r. */ static int sec_dist_func10 (xd3_stream *stream, xd3_output *data) { int i, j, ret; for (i = 0; i < ALPHABET_SIZE; i += 1) { for (j = 0; j <= (i*i); j += 1) { if ((ret = xd3_emit_byte (stream, & data, i))) { return ret; } } } return 0; } /* Test distribution: fibonacci */ static int sec_dist_func11 (xd3_stream *stream, xd3_output *data) { int sum0 = 0; int sum1 = 1; int i, j, ret; for (i = 0; i < 33; ++i) { for (j = 0; j < (sum0 + sum1); ++j) { if ((ret = xd3_emit_byte (stream, & data, i))) { return ret; } } sum0 = sum1; sum1 = j; } return 0; } static int test_secondary_decode (xd3_stream *stream, const xd3_sec_type *sec, usize_t input_size, usize_t compress_size, const uint8_t *dec_input, const uint8_t *dec_correct, uint8_t *dec_output) { int ret; xd3_sec_stream *dec_stream; const uint8_t *dec_input_used, *dec_input_end; uint8_t *dec_output_used, *dec_output_end; if ((dec_stream = sec->alloc (stream)) == NULL) { return ENOMEM; } if ((ret = sec->init (stream, dec_stream, 0)) != 0) { goto fail; } dec_input_used = dec_input; dec_input_end = dec_input + compress_size; dec_output_used = dec_output; dec_output_end = dec_output + input_size; if ((ret = sec->decode (stream, dec_stream, & dec_input_used, dec_input_end, & dec_output_used, dec_output_end))) { goto fail; } if (dec_input_used != dec_input_end) { stream->msg = "unused input"; ret = XD3_INTERNAL; goto fail; } if (dec_output_used != dec_output_end) { stream->msg = "unfinished output"; ret = XD3_INTERNAL; goto fail; } if (memcmp (dec_output, dec_correct, input_size) != 0) { stream->msg = "incorrect output"; ret = XD3_INTERNAL; goto fail; } fail: sec->destroy (stream, dec_stream); return ret; } static int test_secondary (xd3_stream *stream, const xd3_sec_type *sec, usize_t groups) { usize_t test_i; int ret; xd3_output *in_head, *out_head, *p; usize_t p_off, input_size, compress_size; uint8_t *dec_input = NULL, *dec_output = NULL, *dec_correct = NULL; xd3_sec_stream *enc_stream; xd3_sec_cfg cfg; memset (& cfg, 0, sizeof (cfg)); cfg.inefficient = 1; for (cfg.ngroups = 1; cfg.ngroups <= groups; cfg.ngroups += 1) { XPR(NTR "\n..."); for (test_i = 0; test_i < SIZEOF_ARRAY (sec_dists); test_i += 1) { mt_init (& static_mtrand, 0x9f73f7fc); in_head = xd3_alloc_output (stream, NULL); out_head = xd3_alloc_output (stream, NULL); enc_stream = sec->alloc (stream); dec_input = NULL; dec_output = NULL; dec_correct = NULL; if (in_head == NULL || out_head == NULL || enc_stream == NULL) { goto nomem; } if ((ret = sec_dists[test_i] (stream, in_head))) { goto fail; } if ((ret = sec->init (stream, enc_stream, 1)) != 0) { goto fail; } /* Encode data */ if ((ret = sec->encode (stream, enc_stream, in_head, out_head, & cfg))) { XPR(NT "test %u: encode: %s", test_i, stream->msg); goto fail; } /* Calculate sizes, allocate contiguous arrays for decoding */ input_size = xd3_sizeof_output (in_head); compress_size = xd3_sizeof_output (out_head); XPR(NTR "%.3f", 8.0 * (double) compress_size / (double) input_size); if ((dec_input = (uint8_t*) xd3_alloc (stream, compress_size, 1)) == NULL || (dec_output = (uint8_t*) xd3_alloc (stream, input_size, 1)) == NULL || (dec_correct = (uint8_t*) xd3_alloc (stream, input_size, 1)) == NULL) { goto nomem; } /* Fill the compressed data array */ for (p_off = 0, p = out_head; p != NULL; p_off += p->next, p = p->next_page) { memcpy (dec_input + p_off, p->base, p->next); } CHECK(p_off == compress_size); /* Fill the input data array */ for (p_off = 0, p = in_head; p != NULL; p_off += p->next, p = p->next_page) { memcpy (dec_correct + p_off, p->base, p->next); } CHECK(p_off == input_size); if ((ret = test_secondary_decode (stream, sec, input_size, compress_size, dec_input, dec_correct, dec_output))) { XPR(NT "test %u: decode: %s", test_i, stream->msg); goto fail; } /* Single-bit error test, only cover the first 10 bytes. * Some non-failures are expected in the Huffman case: * Changing the clclen array, for example, may not harm the * decoding. Really looking for faults here. */ { int i; int bytes = min (compress_size, 10U); for (i = 0; i < bytes * 8; i += 1) { dec_input[i/8] ^= 1 << (i%8); if ((ret = test_secondary_decode (stream, sec, input_size, compress_size, dec_input, dec_correct, dec_output)) == 0) { /*XPR(NT "test %u: decode single-bit [%u/%u] error non-failure", test_i, i/8, i%8);*/ } dec_input[i/8] ^= 1 << (i%8); if ((i % (2*bytes)) == (2*bytes)-1) { DOT (); } } ret = 0; } if (0) { nomem: ret = ENOMEM; } fail: sec->destroy (stream, enc_stream); xd3_free_output (stream, in_head); xd3_free_output (stream, out_head); xd3_free (stream, dec_input); xd3_free (stream, dec_output); xd3_free (stream, dec_correct); if (ret != 0) { return ret; } } } return 0; } IF_FGK (static int test_secondary_fgk (xd3_stream *stream, usize_t gp) { return test_secondary (stream, & fgk_sec_type, gp); }) IF_DJW (static int test_secondary_huff (xd3_stream *stream, usize_t gp) { return test_secondary (stream, & djw_sec_type, gp); }) IF_LZMA (static int test_secondary_lzma (xd3_stream *stream, usize_t gp) { return test_secondary (stream, & lzma_sec_type, gp); }) #endif /*********************************************************************** TEST INSTRUCTION TABLE ***********************************************************************/ /* Test that xd3_choose_instruction() does the right thing for its code * table. */ static int test_choose_instruction (xd3_stream *stream, int ignore) { int i; stream->code_table = (*stream->code_table_func) (); for (i = 0; i < 256; i += 1) { const xd3_dinst *d = stream->code_table + i; xd3_rinst prev, inst; CHECK(d->type1 > 0); memset (& prev, 0, sizeof (prev)); memset (& inst, 0, sizeof (inst)); if (d->type2 == 0) { inst.type = d->type1; if ((inst.size = d->size1) == 0) { inst.size = TESTBUFSIZE; } XD3_CHOOSE_INSTRUCTION (stream, NULL, & inst); if (inst.code2 != 0 || inst.code1 != i) { stream->msg = "wrong single instruction"; return XD3_INTERNAL; } } else { prev.type = d->type1; prev.size = d->size1; inst.type = d->type2; inst.size = d->size2; XD3_CHOOSE_INSTRUCTION (stream, & prev, & inst); if (prev.code2 != i) { stream->msg = "wrong double instruction"; return XD3_INTERNAL; } } } return 0; } /*********************************************************************** TEST INSTRUCTION TABLE CODING ***********************************************************************/ #if GENERIC_ENCODE_TABLES /* Test that encoding and decoding a code table works */ static int test_encode_code_table (xd3_stream *stream, int ignore) { int ret; const uint8_t *comp_data; usize_t comp_size; if ((ret = xd3_compute_alternate_table_encoding (stream, & comp_data, & comp_size))) { return ret; } stream->acache.s_near = __alternate_code_table_desc.near_modes; stream->acache.s_same = __alternate_code_table_desc.same_modes; if ((ret = xd3_apply_table_encoding (stream, comp_data, comp_size))) { return ret; } if (memcmp (stream->code_table, xd3_alternate_code_table (), sizeof (xd3_dinst) * 256) != 0) { stream->msg = "wrong code table reconstruction"; return XD3_INTERNAL; } return 0; } #endif /*********************************************************************** 64BIT STREAMING ***********************************************************************/ /* This test encodes and decodes a series of 1 megabyte windows, each * containing a long run of zeros along with a single xoff_t size * record to indicate the sequence. */ static int test_streaming (xd3_stream *in_stream, uint8_t *encbuf, uint8_t *decbuf, uint8_t *delbuf, usize_t megs) { xd3_stream estream, dstream; int ret; usize_t i, delsize, decsize; xd3_config cfg; xd3_init_config (& cfg, in_stream->flags); cfg.flags |= XD3_COMPLEVEL_6; if ((ret = xd3_config_stream (& estream, & cfg)) || (ret = xd3_config_stream (& dstream, & cfg))) { goto fail; } for (i = 0; i < megs; i += 1) { ((usize_t*) encbuf)[0] = i; if ((i % 200) == 199) { DOT (); } if ((ret = xd3_process_stream (1, & estream, xd3_encode_input, 0, encbuf, 1 << 20, delbuf, & delsize, 1 << 20))) { in_stream->msg = estream.msg; goto fail; } if ((ret = xd3_process_stream (0, & dstream, xd3_decode_input, 0, delbuf, delsize, decbuf, & decsize, 1 << 20))) { in_stream->msg = dstream.msg; goto fail; } if (decsize != 1 << 20 || memcmp (encbuf, decbuf, 1 << 20) != 0) { in_stream->msg = "wrong result"; ret = XD3_INTERNAL; goto fail; } } if ((ret = xd3_close_stream (& estream)) || (ret = xd3_close_stream (& dstream))) { goto fail; } fail: xd3_free_stream (& estream); xd3_free_stream (& dstream); return ret; } /* Run tests of data streaming of over and around 4GB of data. */ static int test_compressed_stream_overflow (xd3_stream *stream, int ignore) { int ret; int i; uint8_t *buf; if ((buf = (uint8_t*) malloc (TWO_MEGS_AND_DELTA)) == NULL) { return ENOMEM; } memset (buf, 0, TWO_MEGS_AND_DELTA); for (i = 0; i < (2 << 20); i += 256) { int j; int off = mt_random(& static_mtrand) % 10; for (j = 0; j < 256; j++) { buf[i + j] = j + off; } } /* Test overflow of a 32-bit file offset. */ if (SIZEOF_XOFF_T == 4) { ret = test_streaming (stream, buf, buf + (1 << 20), buf + (2 << 20), (1 << 12) + 1); if (ret == XD3_INVALID_INPUT && MSG_IS ("decoder file offset overflow")) { ret = 0; } else { XPR(NT XD3_LIB_ERRMSG (stream, ret)); stream->msg = "expected overflow condition"; ret = XD3_INTERNAL; goto fail; } } /* Test transfer of exactly 32bits worth of data. */ if ((ret = test_streaming (stream, buf, buf + (1 << 20), buf + (2 << 20), 1 << 12))) { goto fail; } fail: free (buf); return ret; } /*********************************************************************** COMMAND LINE ***********************************************************************/ #if SHELL_TESTS /* For each pair of command templates in the array below, test that * encoding and decoding commands work. Also check for the expected * size delta, which should be approximately TEST_ADD_RATIO times the * file size created by test_make_inputs. Due to differences in the * application header, it is suppressed (-A) so that all delta files * are the same. */ static int test_command_line_arguments (xd3_stream *stream, int ignore) { int i, ret; static const char* cmdpairs[] = { /* standard input, output */ "%s %s -A < %s > %s", "%s -d < %s > %s", "%s %s -A -e < %s > %s", "%s -d < %s > %s", "%s %s -A= encode < %s > %s", "%s decode < %s > %s", "%s %s -A -q encode < %s > %s", "%s -qdq < %s > %s", /* file input, standard output */ "%s %s -A= %s > %s", "%s -d %s > %s", "%s %s -A -e %s > %s", "%s -d %s > %s", "%s %s encode -A= %s > %s", "%s decode %s > %s", /* file input, output */ "%s %s -A= %s %s", "%s -d %s %s", "%s %s -A -e %s %s", "%s -d %s %s", "%s %s -A= encode %s %s", "%s decode %s %s", /* option placement */ "%s %s -A -f %s %s", "%s -f -d %s %s", "%s %s -e -A= %s %s", "%s -d -f %s %s", "%s %s -f encode -A= %s %s", "%s -f decode -f %s %s", }; char ecmd[TESTBUFSIZE], dcmd[TESTBUFSIZE]; int pairs = SIZEOF_ARRAY (cmdpairs) / 2; xoff_t tsize; xoff_t dsize; double ratio; mt_init (& static_mtrand, 0x9f73f7fc); for (i = 0; i < pairs; i += 1) { test_setup (); if ((ret = test_make_inputs (stream, NULL, & tsize))) { return ret; } snprintf_func (ecmd, TESTBUFSIZE, cmdpairs[2*i], program_name, test_softcfg_str, TEST_TARGET_FILE, TEST_DELTA_FILE); snprintf_func (dcmd, TESTBUFSIZE, cmdpairs[2*i+1], program_name, TEST_DELTA_FILE, TEST_RECON_FILE); /* Encode and decode. */ if ((ret = system (ecmd)) != 0) { XPR(NT "encode command: %s\n", ecmd); stream->msg = "encode cmd failed"; return XD3_INTERNAL; } if ((ret = system (dcmd)) != 0) { XPR(NT "decode command: %s\n", dcmd); stream->msg = "decode cmd failed"; return XD3_INTERNAL; } /* Compare the target file. */ if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_RECON_FILE))) { return ret; } if ((ret = test_file_size (TEST_DELTA_FILE, & dsize))) { return ret; } ratio = (double) dsize / (double) tsize; /* Check that it is not too small, not too large. */ if (ratio >= TEST_ADD_RATIO + TEST_EPSILON) { XPR(NT "test encode with size ratio %.4f, " "expected < %.4f (%"Q"u, %"Q"u)\n", ratio, TEST_ADD_RATIO + TEST_EPSILON, dsize, tsize); stream->msg = "strange encoding"; return XD3_INTERNAL; } if (ratio <= TEST_ADD_RATIO * (1.0 - 2 * TEST_EPSILON)) { XPR(NT "test encode with size ratio %.4f, " "expected > %.4f\n", ratio, TEST_ADD_RATIO - TEST_EPSILON); stream->msg = "strange encoding"; return XD3_INTERNAL; } /* Also check that test_compare_files works. The delta and original should * not be identical. */ if ((ret = test_compare_files (TEST_DELTA_FILE, TEST_TARGET_FILE)) == 0) { stream->msg = "broken test_compare_files"; return XD3_INTERNAL; } test_cleanup (); DOT (); } return 0; } static int check_vcdiff_header (xd3_stream *stream, const char *input, const char *line_start, const char *matches, int yes_or_no) { int ret; char vcmd[TESTBUFSIZE], gcmd[TESTBUFSIZE]; snprintf_func (vcmd, TESTBUFSIZE, "%s printhdr -f %s %s", program_name, input, TEST_RECON2_FILE); if ((ret = system (vcmd)) != 0) { XPR(NT "printhdr command: %s\n", vcmd); stream->msg = "printhdr cmd failed"; return XD3_INTERNAL; } snprintf_func (gcmd, TESTBUFSIZE, "grep \"%s.*%s.*\" %s > /dev/null", line_start, matches, TEST_RECON2_FILE); if (yes_or_no) { if ((ret = do_cmd (stream, gcmd))) { XPR(NT "%s\n", gcmd); return ret; } } else { if ((ret = do_fail (stream, gcmd))) { XPR(NT "%s\n", gcmd); return ret; } } return 0; } static int test_recode_command2 (xd3_stream *stream, int has_source, int variant, int change) { int has_adler32 = (variant & 0x1) != 0; int has_apphead = (variant & 0x2) != 0; int has_secondary = (variant & 0x4) != 0; int change_adler32 = (change & 0x1) != 0; int change_apphead = (change & 0x2) != 0; int change_secondary = (change & 0x4) != 0; int recoded_adler32 = change_adler32 ? !has_adler32 : has_adler32; int recoded_apphead = change_apphead ? !has_apphead : has_apphead; int recoded_secondary = change_secondary ? !has_secondary : has_secondary; char ecmd[TESTBUFSIZE], recmd[TESTBUFSIZE], dcmd[TESTBUFSIZE]; xoff_t tsize, ssize; int ret; test_setup (); if ((ret = test_make_inputs (stream, has_source ? & ssize : NULL, & tsize))) { return ret; } /* First encode */ snprintf_func (ecmd, TESTBUFSIZE, "%s %s -f %s %s %s %s %s %s %s", program_name, test_softcfg_str, has_adler32 ? "" : "-n ", has_apphead ? "-A=encode_apphead " : "-A= ", has_secondary ? "-S djw " : "-S none ", has_source ? "-s " : "", has_source ? TEST_SOURCE_FILE : "", TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = system (ecmd)) != 0) { XPR(NT "encode command: %s\n", ecmd); stream->msg = "encode cmd failed"; return XD3_INTERNAL; } /* Now recode */ snprintf_func (recmd, TESTBUFSIZE, "%s recode %s -f %s %s %s %s %s", program_name, test_softcfg_str, recoded_adler32 ? "" : "-n ", !change_apphead ? "" : (recoded_apphead ? "-A=recode_apphead " : "-A= "), recoded_secondary ? "-S djw " : "-S none ", TEST_DELTA_FILE, TEST_COPY_FILE); if ((ret = system (recmd)) != 0) { XPR(NT "recode command: %s\n", recmd); stream->msg = "recode cmd failed"; return XD3_INTERNAL; } /* Check recode changes. */ if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF window indicator", "VCD_SOURCE", has_source))) { return ret; } if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF header indicator", "VCD_SECONDARY", recoded_secondary))) { return ret; } if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF window indicator", "VCD_ADLER32", /* Recode can't generate an adler32 * checksum, it can only preserve it or * remove it. */ has_adler32 && recoded_adler32))) { return ret; } if (!change_apphead) { if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF header indicator", "VCD_APPHEADER", has_apphead))) { return ret; } if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF application header", "encode_apphead", has_apphead))) { return ret; } } else { if ((ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF header indicator", "VCD_APPHEADER", recoded_apphead))) { return ret; } if (recoded_apphead && (ret = check_vcdiff_header (stream, TEST_COPY_FILE, "VCDIFF application header", "recode_apphead", 1))) { return ret; } } /* Now decode */ snprintf_func (dcmd, TESTBUFSIZE, "%s -fd %s %s %s %s ", program_name, has_source ? "-s " : "", has_source ? TEST_SOURCE_FILE : "", TEST_COPY_FILE, TEST_RECON_FILE); if ((ret = system (dcmd)) != 0) { XPR(NT "decode command: %s\n", dcmd); stream->msg = "decode cmd failed"; return XD3_INTERNAL; } /* Now compare. */ if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_RECON_FILE))) { return ret; } return 0; } static int test_recode_command (xd3_stream *stream, int ignore) { /* Things to test: * - with and without a source file (recode does not change) * * (recode may or may not change -- 8 variations) * - with and without adler32 * - with and without app header * - with and without secondary */ int has_source; int variant; int change; int ret; for (has_source = 0; has_source < 2; has_source++) { for (variant = 0; variant < 8; variant++) { for (change = 0; change < 8; change++) { if ((ret = test_recode_command2 (stream, has_source, variant, change))) { return ret; } } DOT (); } } return 0; } #endif /*********************************************************************** EXTERNAL I/O DECOMPRESSION/RECOMPRESSION ***********************************************************************/ #if EXTERNAL_COMPRESSION /* This performs one step of the test_externally_compressed_io * function described below. It builds a pipe containing both Xdelta * and external compression/decompression that should not modify the * data passing through. */ static int test_compressed_pipe (xd3_stream *stream, main_extcomp *ext, char* buf, const char* comp_options, const char* decomp_options, int do_ext_recomp, const char* msg) { int ret; char decomp_buf[TESTBUFSIZE]; if (do_ext_recomp) { snprintf_func (decomp_buf, TESTBUFSIZE, " | %s %s", ext->decomp_cmdname, ext->decomp_options); } else { decomp_buf[0] = 0; } snprintf_func (buf, TESTBUFSIZE, "%s %s < %s | %s %s | %s %s%s > %s", ext->recomp_cmdname, ext->recomp_options, TEST_TARGET_FILE, program_name, comp_options, program_name, decomp_options, decomp_buf, TEST_RECON_FILE); if ((ret = system (buf)) != 0) { stream->msg = msg; return XD3_INTERNAL; } if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_RECON_FILE))) { return XD3_INTERNAL; } DOT (); return 0; } /* We want to test that a pipe such as: * * --> | gzip -cf | xdelta3 -cf | xdelta3 -dcf | gzip -dcf | --> * * is transparent, i.e., does not modify the stream of data. However, * we also want to verify that at the center the data is properly * compressed, i.e., that we do not just have a re-compressed gzip * format, that we have an VCDIFF format. We do this in two steps. * First test the above pipe, then test with suppressed output * recompression (-D). The result should be the original input: * * --> | gzip -cf | xdelta3 -cf | xdelta3 -Ddcf | --> * * Finally we want to test that -D also disables input decompression: * * --> | gzip -cf | xdelta3 -Dcf | xdelta3 -Ddcf | gzip -dcf | --> */ static int test_externally_compressed_io (xd3_stream *stream, int ignore) { usize_t i; int ret; char buf[TESTBUFSIZE]; mt_init (& static_mtrand, 0x9f73f7fc); if ((ret = test_make_inputs (stream, NULL, NULL))) { return ret; } for (i = 0; i < SIZEOF_ARRAY (extcomp_types); i += 1) { main_extcomp *ext = & extcomp_types[i]; /* Test for the existence of the external command first, if not skip. */ snprintf_func (buf, TESTBUFSIZE, "%s %s < /dev/null > /dev/null", ext->recomp_cmdname, ext->recomp_options); if ((ret = system (buf)) != 0) { XPR(NT "%s=0", ext->recomp_cmdname); continue; } if ((ret = test_compressed_pipe (stream, ext, buf, "-cfq", "-dcfq", 1, "compression failed: identity pipe")) || (ret = test_compressed_pipe (stream, ext, buf, "-cfq", "-Rdcfq", 0, "compression failed: without recompression")) || (ret = test_compressed_pipe (stream, ext, buf, "-Dcfq", "-Rdcfq", 1, "compression failed: without decompression"))) { return ret; } } return 0; } /* This tests the proper functioning of external decompression for * source files. The source and target files are identical and * compressed by gzip. Decoding such a delta with recompression * disbaled (-R) should produce the original, uncompressed * source/target file. Then it checks with output recompression * enabled--in this case the output should be a compressed copy of the * original source/target file. Then it checks that encoding with * decompression disabled works--the compressed files are identical * and decoding them should always produce a compressed output, * regardless of -R since the encoded delta file had decompression * disabled.. */ static int test_source_decompression (xd3_stream *stream, int ignore) { int ret; char buf[TESTBUFSIZE]; const main_extcomp *ext; xoff_t dsize; mt_init (& static_mtrand, 0x9f73f7fc); test_setup (); if ((ret = test_make_inputs (stream, NULL, NULL))) { return ret; } /* Use gzip. */ if ((ext = main_get_compressor ("G")) == NULL) { XPR(NT "skipped"); return 0; } /* Save an uncompressed copy. */ if ((ret = test_save_copy (TEST_TARGET_FILE))) { return ret; } /* Compress the source. */ snprintf_func (buf, TESTBUFSIZE, "%s -1 %s < %s > %s", ext->recomp_cmdname, ext->recomp_options, TEST_COPY_FILE, TEST_SOURCE_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Compress the target. */ snprintf_func (buf, TESTBUFSIZE, "%s -9 %s < %s > %s", ext->recomp_cmdname, ext->recomp_options, TEST_COPY_FILE, TEST_TARGET_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Now the two identical files are compressed. Delta-encode the target, * with decompression. */ snprintf_func (buf, TESTBUFSIZE, "%s -e -vfq -s%s %s %s", program_name, TEST_SOURCE_FILE, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Check that the compressed file is small (b/c inputs are * identical). */ if ((ret = test_file_size (TEST_DELTA_FILE, & dsize))) { return ret; } /* Deltas for identical files should be very small. */ if (dsize > 200) { XPR(NT "external compression did not happen\n"); stream->msg = "external compression did not happen"; return XD3_INTERNAL; } /* Decode the delta file with recompression disabled, should get an * uncompressed file out. */ snprintf_func (buf, TESTBUFSIZE, "%s -v -dq -R -s%s %s %s", program_name, TEST_SOURCE_FILE, TEST_DELTA_FILE, TEST_RECON_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_compare_files (TEST_COPY_FILE, TEST_RECON_FILE))) { return ret; } /* Decode the delta file with recompression, should get a compressed file * out. But we can't compare compressed files directly. */ snprintf_func (buf, TESTBUFSIZE, "%s -v -dqf -s%s %s %s", program_name, TEST_SOURCE_FILE, TEST_DELTA_FILE, TEST_RECON_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } snprintf_func (buf, TESTBUFSIZE, "%s %s < %s > %s", ext->decomp_cmdname, ext->decomp_options, TEST_RECON_FILE, TEST_RECON2_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_compare_files (TEST_COPY_FILE, TEST_RECON2_FILE))) { return ret; } /* Encode with decompression disabled */ snprintf_func (buf, TESTBUFSIZE, "%s -e -D -vfq -s%s %s %s", program_name, TEST_SOURCE_FILE, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Decode the delta file with decompression disabled, should get the * identical compressed file out. */ snprintf_func (buf, TESTBUFSIZE, "%s -d -D -vfq -s%s %s %s", program_name, TEST_SOURCE_FILE, TEST_DELTA_FILE, TEST_RECON_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_RECON_FILE))) { return ret; } test_cleanup(); return 0; } #endif /*********************************************************************** FORCE, STDOUT ***********************************************************************/ /* This tests that output will not overwrite an existing file unless * -f was specified. The test is for encoding (the same code handles * it for decoding). */ static int test_force_behavior (xd3_stream *stream, int ignore) { int ret; char buf[TESTBUFSIZE]; /* Create empty target file */ test_setup (); snprintf_func (buf, TESTBUFSIZE, "cp /dev/null %s", TEST_TARGET_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Encode to delta file */ snprintf_func (buf, TESTBUFSIZE, "%s -e %s %s", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Encode again, should fail. */ snprintf_func (buf, TESTBUFSIZE, "%s -q -e %s %s ", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_fail (stream, buf))) { return ret; } /* Force it, should succeed. */ snprintf_func (buf, TESTBUFSIZE, "%s -f -e %s %s", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } test_cleanup(); return 0; } /* This checks the proper operation of the -c flag. When specified * the default output becomes stdout, otherwise the input must be * provided (encode) or it may be defaulted (decode w/ app header). */ static int test_stdout_behavior (xd3_stream *stream, int ignore) { int ret; char buf[TESTBUFSIZE]; test_setup(); snprintf_func (buf, TESTBUFSIZE, "cp /dev/null %s", TEST_TARGET_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Without -c, encode writes to delta file */ snprintf_func (buf, TESTBUFSIZE, "%s -e %s %s", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* With -c, encode writes to stdout */ snprintf_func (buf, TESTBUFSIZE, "%s -e -c %s > %s", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Without -c, decode writes to target file name, but it fails because the * file exists. */ snprintf_func (buf, TESTBUFSIZE, "%s -q -d %s ", program_name, TEST_DELTA_FILE); if ((ret = do_fail (stream, buf))) { return ret; } /* With -c, decode writes to stdout */ snprintf_func (buf, TESTBUFSIZE, "%s -d -c %s > /dev/null", program_name, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } test_cleanup(); return 0; } /* This tests that the no-output flag (-J) works. */ static int test_no_output (xd3_stream *stream, int ignore) { int ret; char buf[TESTBUFSIZE]; test_setup (); snprintf_func (buf, TESTBUFSIZE, "touch %s && chmod 0000 %s", TEST_NOPERM_FILE, TEST_NOPERM_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_make_inputs (stream, NULL, NULL))) { return ret; } /* Try no_output encode w/out unwritable output file */ snprintf_func (buf, TESTBUFSIZE, "%s -q -f -e %s %s", program_name, TEST_TARGET_FILE, TEST_NOPERM_FILE); if ((ret = do_fail (stream, buf))) { return ret; } snprintf_func (buf, TESTBUFSIZE, "%s -J -e %s %s", program_name, TEST_TARGET_FILE, TEST_NOPERM_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } /* Now really write the delta to test decode no-output */ snprintf_func (buf, TESTBUFSIZE, "%s -e %s %s", program_name, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } snprintf_func (buf, TESTBUFSIZE, "%s -q -f -d %s %s", program_name, TEST_DELTA_FILE, TEST_NOPERM_FILE); if ((ret = do_fail (stream, buf))) { return ret; } snprintf_func (buf, TESTBUFSIZE, "%s -J -d %s %s", program_name, TEST_DELTA_FILE, TEST_NOPERM_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } test_cleanup (); return 0; } /* This tests that the default appheader works */ static int test_appheader (xd3_stream *stream, int ignore) { int i; int ret; char buf[TESTBUFSIZE]; char bogus[TESTBUFSIZE]; xoff_t ssize, tsize; test_setup (); if ((ret = test_make_inputs (stream, &ssize, &tsize))) { return ret; } snprintf_func (buf, TESTBUFSIZE, "%s -q -f -e -s %s %s %s", program_name, TEST_SOURCE_FILE, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_copy_to (program_name, TEST_RECON2_FILE))) { return ret; } snprintf_func (buf, TESTBUFSIZE, "chmod 0700 %s", TEST_RECON2_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_save_copy (TEST_TARGET_FILE))) { return ret; } if ((ret = test_copy_to (TEST_SOURCE_FILE, TEST_TARGET_FILE))) { return ret; } if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_COPY_FILE)) == 0) { return XD3_INVALID; // I.e., files are different! } // Test that the target file is restored. snprintf_func (buf, TESTBUFSIZE, "(cd /tmp && %s -q -f -d %s)", TEST_RECON2_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_COPY_FILE)) != 0) { return ret; } // Test a malicious string w/ entries > 4 in the appheader by having // the encoder write it: for (i = 0; i < TESTBUFSIZE / 4; ++i) { bogus[2*i] = 'G'; bogus[2*i+1] = '/'; } bogus[TESTBUFSIZE/2-1] = 0; snprintf_func (buf, TESTBUFSIZE, "%s -q -f -A=%s -e -s %s %s %s", program_name, bogus, TEST_SOURCE_FILE, TEST_TARGET_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf))) { return ret; } // Then read it: snprintf_func (buf, TESTBUFSIZE, "(cd /tmp && %s -q -f -d %s)", TEST_RECON2_FILE, TEST_DELTA_FILE); if ((ret = do_cmd (stream, buf)) == 0) { return XD3_INVALID; // Impossible } if (!WIFEXITED(ret)) { return XD3_INVALID; // Must have crashed! } return 0; } /*********************************************************************** Source identical optimization ***********************************************************************/ /* Computing a delta should be fastest when the two inputs are * identical, this checks it. The library is called to compute a * delta between a 10000 byte file, 1000 byte winsize, 500 byte source * blocksize. The same buffer is used for both source and target. */ static int test_identical_behavior (xd3_stream *stream, int ignore) { #define IDB_TGTSZ 10000 /* Not a power of two b/c of hard-coded expectations below. */ #define IDB_BLKSZ 512 #define IDB_WINSZ 1000 #define IDB_DELSZ 1000 #define IDB_WINCNT (IDB_TGTSZ / IDB_WINSZ) int ret, i; uint8_t buf[IDB_TGTSZ]; uint8_t del[IDB_DELSZ]; uint8_t rec[IDB_TGTSZ]; xd3_source source; int nextencwin = 0; int winstarts = 0, winfinishes = 0; usize_t delpos = 0, recsize; xd3_config config; memset(&source, 0, sizeof(source)); for (i = 0; i < IDB_TGTSZ; i += 1) { buf[i] = (uint8_t) mt_random (&static_mtrand); } stream->winsize = IDB_WINSZ; source.blksize = IDB_BLKSZ; source.name = ""; source.curblk = NULL; source.curblkno = 0; if ((ret = xd3_set_source (stream, & source))) { goto fail; } /* Compute an delta between identical source and targets. */ for (;;) { ret = xd3_encode_input (stream); if (ret == XD3_INPUT) { xd3_avail_input (stream, buf + (IDB_WINSZ * nextencwin), IDB_WINSZ); nextencwin += 1; continue; } if (ret == XD3_GETSRCBLK) { source.curblkno = source.getblkno; source.onblk = IDB_BLKSZ; source.curblk = buf + source.getblkno * IDB_BLKSZ; continue; } if (ret == XD3_WINSTART) { winstarts++; continue; } if (ret == XD3_WINFINISH) { winfinishes++; if (winfinishes == IDB_WINCNT) { break; } continue; } if (ret != XD3_OUTPUT) { goto fail; } CHECK(delpos + stream->avail_out <= IDB_DELSZ); memcpy (del + delpos, stream->next_out, stream->avail_out); delpos += stream->avail_out; xd3_consume_output (stream); } CHECK(winfinishes == IDB_WINCNT); CHECK(winstarts == IDB_WINCNT); CHECK(nextencwin == IDB_WINCNT); /* Reset. */ memset(&source, 0, sizeof(source)); source.blksize = IDB_TGTSZ; source.onblk = IDB_TGTSZ; source.curblk = buf; source.curblkno = 0; if ((ret = xd3_close_stream (stream))) { goto fail; } xd3_free_stream (stream); xd3_init_config (& config, 0); if ((ret = xd3_config_stream (stream, & config))) { goto fail; } if ((ret = xd3_set_source_and_size (stream, & source, IDB_TGTSZ))) { goto fail; } /* Decode. */ if ((ret = xd3_decode_stream (stream, del, delpos, rec, & recsize, IDB_TGTSZ))) { goto fail; } /* Check result size and data. */ if (recsize != IDB_TGTSZ) { stream->msg = "wrong size reconstruction"; goto fail; } if (memcmp (rec, buf, IDB_TGTSZ) != 0) { stream->msg = "wrong data reconstruction"; goto fail; } /* Check that there was one copy per window. */ IF_DEBUG (if (stream->n_scpy != IDB_WINCNT || stream->n_add != 0 || stream->n_run != 0) { stream->msg = "wrong copy count"; goto fail; }); /* Check that no checksums were computed because the initial match was presumed. */ IF_DEBUG (if (stream->large_ckcnt != 0) { stream->msg = "wrong checksum behavior"; goto fail; }); ret = 0; fail: return ret; } /*********************************************************************** String matching test ***********************************************************************/ /* Check particular matching behaviors by calling * xd3_string_match_soft directly with specific arguments. */ typedef struct _string_match_test string_match_test; typedef enum { SM_NONE = 0, SM_LAZY = (1 << 1), } string_match_flags; struct _string_match_test { const char *input; int flags; const char *result; }; static const string_match_test match_tests[] = { /* nothing */ { "1234567890", SM_NONE, "" }, /* basic run, copy */ { "11111111112323232323", SM_NONE, "R0/10 C12/8@10" }, /* no run smaller than MIN_RUN=8 */ { "1111111", SM_NONE, "C1/6@0" }, { "11111111", SM_NONE, "R0/8" }, /* simple promotion: the third copy address depends on promotion */ { "ABCDEF_ABCDEF^ABCDEF", SM_NONE, "C7/6@0 C14/6@7" }, /* { "ABCDEF_ABCDEF^ABCDEF", SM_PROMOTE, "C7/6@0 C14/6@0" }, forgotten */ /* simple lazy: there is a better copy starting with "23 X" than "123 " */ { "123 23 XYZ 123 XYZ", SM_NONE, "C11/4@0" }, { "123 23 XYZ 123 XYZ", SM_LAZY, "C11/4@0 C12/6@4" }, /* trylazy: no lazy matches unless there are at least two characters beyond * the first match */ { "2123_121212", SM_LAZY, "C7/4@5" }, { "2123_1212123", SM_LAZY, "C7/4@5" }, { "2123_1212123_", SM_LAZY, "C7/4@5 C8/5@0" }, /* trylazy: no lazy matches if the copy is >= MAXLAZY=10 */ { "2123_121212123_", SM_LAZY, "C7/6@5 C10/5@0" }, { "2123_12121212123_", SM_LAZY, "C7/8@5 C12/5@0" }, { "2123_1212121212123_", SM_LAZY, "C7/10@5" }, /* lazy run: check a run overlapped by a longer copy */ { "11111112 111111112 1", SM_LAZY, "C1/6@0 R9/8 C10/10@0" }, /* lazy match: match_length,run_l >= min_match tests, shouldn't get any * copies within the run, no run within the copy */ { "^________^________ ", SM_LAZY, "R1/8 C9/9@0" }, /* chain depth: it only goes back 10. this checks that the 10th match hits * and the 11th misses. */ { "1234 1234_1234-1234=1234+1234[1234]1234{1234}1234<1234 ", SM_NONE, "C5/4@0 C10/4@5 C15/4@10 C20/4@15 C25/4@20 C30/4@25 C35/4@30 C40/4@35 C45/4@40 C50/5@0" }, { "1234 1234_1234-1234=1234+1234[1234]1234{1234}1234<1234>1234 ", SM_NONE, "C5/4@0 C10/4@5 C15/4@10 C20/4@15 C25/4@20 C30/4@25 C35/4@30 C40/4@35 C45/4@40 C50/4@45 C55/4@50" }, /* ssmatch test */ { "ABCDE___ABCDE*** BCDE***", SM_NONE, "C8/5@0 C17/4@1" }, /*{ "ABCDE___ABCDE*** BCDE***", SM_SSMATCH, "C8/5@0 C17/7@9" }, forgotten */ }; static int test_string_matching (xd3_stream *stream, int ignore) { usize_t i; int ret; xd3_config config; char rbuf[TESTBUFSIZE]; for (i = 0; i < SIZEOF_ARRAY (match_tests); i += 1) { const string_match_test *test = & match_tests[i]; char *rptr = rbuf; usize_t len = (usize_t) strlen (test->input); xd3_free_stream (stream); xd3_init_config (& config, 0); config.smatch_cfg = XD3_SMATCH_SOFT; config.smatcher_soft.large_look = 4; config.smatcher_soft.large_step = 4; config.smatcher_soft.small_look = 4; config.smatcher_soft.small_chain = 10; config.smatcher_soft.small_lchain = 10; config.smatcher_soft.max_lazy = (test->flags & SM_LAZY) ? 10 : 0; config.smatcher_soft.long_enough = 10; if ((ret = xd3_config_stream (stream, & config))) { return ret; } if ((ret = xd3_encode_init_full (stream))) { return ret; } xd3_avail_input (stream, (uint8_t*)test->input, len); if ((ret = stream->smatcher.string_match (stream))) { return ret; } *rptr = 0; while (! xd3_rlist_empty (& stream->iopt_used)) { xd3_rinst *inst = xd3_rlist_pop_front (& stream->iopt_used); switch (inst->type) { case XD3_RUN: *rptr++ = 'R'; break; case XD3_CPY: *rptr++ = 'C'; break; default: CHECK(0); } snprintf_func (rptr, rbuf+TESTBUFSIZE-rptr, "%d/%d", inst->pos, inst->size); rptr += strlen (rptr); if (inst->type == XD3_CPY) { *rptr++ = '@'; snprintf_func (rptr, rbuf+TESTBUFSIZE-rptr, "%"Q"d", inst->addr); rptr += strlen (rptr); } *rptr++ = ' '; xd3_rlist_push_back (& stream->iopt_free, inst); } if (rptr != rbuf) { rptr -= 1; *rptr = 0; } if (strcmp (rbuf, test->result) != 0) { XPR(NT "test %u: expected %s: got %s", i, test->result, rbuf); stream->msg = "wrong result"; return XD3_INTERNAL; } } return 0; } /* * This is a test for many overlapping instructions. It must be a lazy * matcher. */ static int test_iopt_flush_instructions (xd3_stream *stream, int ignore) { int ret, i; usize_t tpos = 0; usize_t delta_size, recon_size; xd3_config config; uint8_t target[TESTBUFSIZE]; uint8_t delta[TESTBUFSIZE]; uint8_t recon[TESTBUFSIZE]; xd3_free_stream (stream); xd3_init_config (& config, 0); config.smatch_cfg = XD3_SMATCH_SOFT; config.smatcher_soft.large_look = 16; config.smatcher_soft.large_step = 16; config.smatcher_soft.small_look = 4; config.smatcher_soft.small_chain = 128; config.smatcher_soft.small_lchain = 16; config.smatcher_soft.max_lazy = 8; config.smatcher_soft.long_enough = 128; if ((ret = xd3_config_stream (stream, & config))) { return ret; } for (i = 1; i < 250; i++) { target[tpos++] = i; target[tpos++] = i+1; target[tpos++] = i+2; target[tpos++] = i+3; target[tpos++] = 0; } for (i = 1; i < 253; i++) { target[tpos++] = i; } if ((ret = xd3_encode_stream (stream, target, tpos, delta, & delta_size, sizeof (delta)))) { return ret; } xd3_free_stream(stream); if ((ret = xd3_config_stream (stream, & config))) { return ret; } if ((ret = xd3_decode_stream (stream, delta, delta_size, recon, & recon_size, sizeof (recon)))) { return ret; } CHECK(tpos == recon_size); CHECK(memcmp(target, recon, recon_size) == 0); return 0; } /* * This tests the 32/64bit ambiguity for source-window matching. */ static int test_source_cksum_offset (xd3_stream *stream, int ignore) { xd3_source source; // Inputs are: struct { xoff_t cpos; // stream->srcwin_cksum_pos; xoff_t ipos; // stream->total_in; xoff_t size; // stream->src->size; usize_t input; // input 32-bit offset xoff_t output; // output 64-bit offset } cksum_test[] = { // If cpos is <= 2^32 { 1, 1, 1, 1, 1 }, #if XD3_USE_LARGEFILE64 // cpos ipos size input output // 0x____xxxxxULL, 0x____xxxxxULL, 0x____xxxxxULL, 0x___xxxxxUL, 0x____xxxxxULL { 0x100100000ULL, 0x100000000ULL, 0x100200000ULL, 0x00000000UL, 0x100000000ULL }, { 0x100100000ULL, 0x100000000ULL, 0x100200000ULL, 0xF0000000UL, 0x0F0000000ULL }, { 0x100200000ULL, 0x100100000ULL, 0x100200000ULL, 0x00300000UL, 0x000300000ULL }, { 25771983104ULL, 25770000000ULL, 26414808769ULL, 2139216707UL, 23614053187ULL }, #endif { 0, 0, 0, 0, 0 }, }, *test_ptr; stream->src = &source; for (test_ptr = cksum_test; test_ptr->cpos; test_ptr++) { xoff_t r; stream->srcwin_cksum_pos = test_ptr->cpos; stream->total_in = test_ptr->ipos; r = xd3_source_cksum_offset(stream, test_ptr->input); CHECK(r == test_ptr->output); } return 0; } static int test_in_memory (xd3_stream *stream, int ignore) { // test_text is 256 bytes uint8_t ibuf[sizeof(test_text)]; uint8_t dbuf[sizeof(test_text)]; uint8_t obuf[sizeof(test_text)]; usize_t size = sizeof(test_text); usize_t dsize, osize; int r1, r2; int eflags = SECONDARY_DJW ? XD3_SEC_DJW : 0; memcpy(ibuf, test_text, size); memset(ibuf + 128, 0, 16); r1 = xd3_encode_memory(ibuf, size, test_text, size, dbuf, &dsize, size, eflags); r2 = xd3_decode_memory(dbuf, dsize, test_text, size, obuf, &osize, size, 0); if (r1 != 0 || r2 != 0 || dsize >= (size/2) || dsize < 1 || osize != size) { stream->msg = "encode/decode size error"; return XD3_INTERNAL; } if (memcmp(obuf, ibuf, size) != 0) { stream->msg = "encode/decode data error"; return XD3_INTERNAL; } return 0; } /*********************************************************************** TEST MAIN ***********************************************************************/ static int xd3_selftest (void) { #define DO_TEST(fn,flags,arg) \ do { \ xd3_stream stream; \ xd3_config config; \ xd3_init_config (& config, flags); \ XPR(NT "testing " #fn "%s...", \ flags ? (" (" #flags ")") : ""); \ if ((ret = xd3_config_stream (& stream, & config) == 0) && \ (ret = test_ ## fn (& stream, arg)) == 0) { \ XPR(NTR " success\n"); \ } else { \ XPR(NTR " failed: %s: %s\n", xd3_errstring (& stream), \ xd3_mainerror (ret)); } \ xd3_free_stream (& stream); \ if (ret != 0) { goto failure; } \ } while (0) int ret; DO_TEST (random_numbers, 0, 0); DO_TEST (decode_integer_end_of_input, 0, 0); DO_TEST (decode_integer_overflow, 0, 0); DO_TEST (encode_decode_uint32_t, 0, 0); DO_TEST (encode_decode_uint64_t, 0, 0); DO_TEST (usize_t_overflow, 0, 0); DO_TEST (forward_match, 0, 0); DO_TEST (address_cache, 0, 0); IF_GENCODETBL (DO_TEST (address_cache, XD3_ALT_CODE_TABLE, 0)); DO_TEST (string_matching, 0, 0); DO_TEST (choose_instruction, 0, 0); DO_TEST (identical_behavior, 0, 0); DO_TEST (in_memory, 0, 0); IF_GENCODETBL (DO_TEST (choose_instruction, XD3_ALT_CODE_TABLE, 0)); IF_GENCODETBL (DO_TEST (encode_code_table, 0, 0)); DO_TEST (iopt_flush_instructions, 0, 0); DO_TEST (source_cksum_offset, 0, 0); DO_TEST (decompress_single_bit_error, 0, 3); DO_TEST (decompress_single_bit_error, XD3_ADLER32, 3); IF_LZMA (DO_TEST (decompress_single_bit_error, XD3_SEC_LZMA, 54)); IF_FGK (DO_TEST (decompress_single_bit_error, XD3_SEC_FGK, 3)); IF_DJW (DO_TEST (decompress_single_bit_error, XD3_SEC_DJW, 8)); /* There are many expected non-failures for ALT_CODE_TABLE because * not all of the instruction codes are used. */ IF_GENCODETBL ( DO_TEST (decompress_single_bit_error, XD3_ALT_CODE_TABLE, 224)); #if SHELL_TESTS DO_TEST (force_behavior, 0, 0); DO_TEST (stdout_behavior, 0, 0); DO_TEST (no_output, 0, 0); DO_TEST (appheader, 0, 0); DO_TEST (command_line_arguments, 0, 0); #if EXTERNAL_COMPRESSION DO_TEST (source_decompression, 0, 0); DO_TEST (externally_compressed_io, 0, 0); #endif DO_TEST (recode_command, 0, 0); #endif IF_LZMA (DO_TEST (secondary_lzma, 0, 1)); IF_DJW (DO_TEST (secondary_huff, 0, DJW_MAX_GROUPS)); IF_FGK (DO_TEST (secondary_fgk, 0, 1)); DO_TEST (compressed_stream_overflow, 0, 0); IF_LZMA (DO_TEST (compressed_stream_overflow, XD3_SEC_LZMA, 0)); failure: test_cleanup (); return ret == 0 ? EXIT_SUCCESS : EXIT_FAILURE; #undef DO_TEST }
static int do_cmd (xd3_stream *stream, const char *buf) { int ret; if ((ret = system (buf)) != 0) { if (WIFEXITED (ret)) { stream->msg = "command exited non-zero"; IF_DEBUG1 (XPR(NT "command was: %s\n", buf)); } else { stream->msg = "abnormal command termination"; } return XD3_INTERNAL; } return 0; }
static int do_cmd (xd3_stream *stream, const char *buf) { int ret; if ((ret = system (buf)) != 0) { if (WIFEXITED (ret)) { stream->msg = "command exited non-zero"; IF_DEBUG1 (XPR(NT "command was: %s\n", buf)); } else { stream->msg = "abnormal command termination"; } return ret; } return 0; }
{'added': [(2, ' * 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012.'), (57, '\t(1812433253UL * (mt->mt_buffer_[i-1] ^'), (72, ' y = (mt->mt_buffer_[kk] & UPPER_MASK) |'), (74, ' mt->mt_buffer_[kk] = mt->mt_buffer_[kk + MT_IA] ^'), (78, ' y = (mt->mt_buffer_[kk] & UPPER_MASK) |'), (80, ' mt->mt_buffer_[kk] = mt->mt_buffer_[kk + (MT_IA - MT_LEN)] ^'), (83, ' y = (mt->mt_buffer_[MT_LEN - 1] & UPPER_MASK) |'), (85, ' mt->mt_buffer_[MT_LEN - 1] = mt->mt_buffer_[MT_IA - 1] ^'), (169, ' return ret;'), (260, ' usize_t ts = (mt_random (&static_mtrand) % TEST_FILE_MEAN) +'), (261, ' TEST_FILE_MEAN / 2;'), (262, ' usize_t ss = (mt_random (&static_mtrand) % TEST_FILE_MEAN) +'), (263, ' TEST_FILE_MEAN / 2;'), (414, '\t XPR(NT "byte %u (read %u @ %"Q"u) %d != %d\\n",'), (426, ' if (diffs != 0)'), (434, 'test_copy_to (const char *from, const char *to)'), (439, ' snprintf_func (buf, TESTBUFSIZE, "cp -f %s %s", from, to);'), (449, 'static int'), (450, 'test_save_copy (const char *origname)'), (451, '{'), (452, ' return test_copy_to(origname, TEST_COPY_FILE);'), (453, '}'), (454, ''), (510, ' if ((ret = xd3_read_uint32_t (stream, & inp, max, & rval)) !='), (1665, ' for (i = 0; i < (2 << 20); i += 256)'), (1669, ' for (j = 0; j < 256; j++)'), (1694, ' if ((ret = test_streaming (stream,'), (1695, '\t\t\t buf,'), (1696, '\t\t\t buf + (1 << 20),'), (1697, '\t\t\t buf + (2 << 20),'), (1698, '\t\t\t 1 << 12)))'), (1900, ' snprintf_func (ecmd, TESTBUFSIZE, "%s %s -f %s %s %s %s %s %s %s",'), (1921, '\t !change_apphead ? "" :'), (2372, '/* This tests that the default appheader works */'), (2373, 'static int'), (2374, 'test_appheader (xd3_stream *stream, int ignore)'), (2375, '{'), (2376, ' int i;'), (2377, ' int ret;'), (2378, ' char buf[TESTBUFSIZE];'), (2379, ' char bogus[TESTBUFSIZE];'), (2380, ' xoff_t ssize, tsize;'), (2381, ' test_setup ();'), (2382, ''), (2383, ' if ((ret = test_make_inputs (stream, &ssize, &tsize))) { return ret; }'), (2384, ''), (2385, ' snprintf_func (buf, TESTBUFSIZE, "%s -q -f -e -s %s %s %s", program_name,'), (2386, '\t\t TEST_SOURCE_FILE, TEST_TARGET_FILE, TEST_DELTA_FILE);'), (2387, ' if ((ret = do_cmd (stream, buf))) { return ret; }'), (2388, ''), (2389, ' if ((ret = test_copy_to (program_name, TEST_RECON2_FILE))) { return ret; }'), (2390, ''), (2391, ' snprintf_func (buf, TESTBUFSIZE, "chmod 0700 %s", TEST_RECON2_FILE);'), (2392, ' if ((ret = do_cmd (stream, buf))) { return ret; }'), (2393, ''), (2394, ' if ((ret = test_save_copy (TEST_TARGET_FILE))) { return ret; }'), (2395, ' if ((ret = test_copy_to (TEST_SOURCE_FILE, TEST_TARGET_FILE))) { return ret; }'), (2396, ''), (2397, ' if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_COPY_FILE)) == 0)'), (2398, ' {'), (2399, ' return XD3_INVALID; // I.e., files are different!'), (2400, ' }'), (2401, ''), (2402, ' // Test that the target file is restored.'), (2403, ' snprintf_func (buf, TESTBUFSIZE, "(cd /tmp && %s -q -f -d %s)",'), (2404, '\t\t TEST_RECON2_FILE,'), (2405, '\t\t TEST_DELTA_FILE);'), (2406, ' if ((ret = do_cmd (stream, buf))) { return ret; }'), (2407, ''), (2408, ' if ((ret = test_compare_files (TEST_TARGET_FILE, TEST_COPY_FILE)) != 0)'), (2409, ' {'), (2410, ' return ret;'), (2411, ' }'), (2412, ''), (2413, ' // Test a malicious string w/ entries > 4 in the appheader by having'), (2414, ' // the encoder write it:'), (2415, ' for (i = 0; i < TESTBUFSIZE / 4; ++i)'), (2416, ' {'), (2417, " bogus[2*i] = 'G';"), (2418, " bogus[2*i+1] = '/';"), (2419, ' }'), (2420, ' bogus[TESTBUFSIZE/2-1] = 0;'), (2421, ''), (2422, ' snprintf_func (buf, TESTBUFSIZE,'), (2423, '\t\t "%s -q -f -A=%s -e -s %s %s %s", program_name, bogus,'), (2424, '\t\t TEST_SOURCE_FILE, TEST_TARGET_FILE, TEST_DELTA_FILE);'), (2425, ' if ((ret = do_cmd (stream, buf))) { return ret; }'), (2426, ' // Then read it:'), (2427, ' snprintf_func (buf, TESTBUFSIZE, "(cd /tmp && %s -q -f -d %s)",'), (2428, '\t\t TEST_RECON2_FILE,'), (2429, '\t\t TEST_DELTA_FILE);'), (2430, ' if ((ret = do_cmd (stream, buf)) == 0)'), (2431, ' {'), (2432, ' return XD3_INVALID; // Impossible'), (2433, ' }'), (2434, ' if (!WIFEXITED(ret))'), (2435, ' {'), (2436, ' return XD3_INVALID; // Must have crashed!'), (2437, ' }'), (2438, ''), (2439, ' return 0;'), (2440, '}'), (2441, ''), (2684, '\t snprintf_func (rptr, rbuf+TESTBUFSIZE-rptr, "%d/%d",'), (2929, ' DO_TEST (appheader, 0, 0);')], 'deleted': [(2, ' * 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012.'), (57, '\t(1812433253UL * (mt->mt_buffer_[i-1] ^'), (72, ' y = (mt->mt_buffer_[kk] & UPPER_MASK) |'), (74, ' mt->mt_buffer_[kk] = mt->mt_buffer_[kk + MT_IA] ^'), (78, ' y = (mt->mt_buffer_[kk] & UPPER_MASK) |'), (80, ' mt->mt_buffer_[kk] = mt->mt_buffer_[kk + (MT_IA - MT_LEN)] ^'), (83, ' y = (mt->mt_buffer_[MT_LEN - 1] & UPPER_MASK) |'), (85, ' mt->mt_buffer_[MT_LEN - 1] = mt->mt_buffer_[MT_IA - 1] ^'), (169, ' return XD3_INTERNAL;'), (260, ' usize_t ts = (mt_random (&static_mtrand) % TEST_FILE_MEAN) + TEST_FILE_MEAN / 2;'), (261, ' usize_t ss = (mt_random (&static_mtrand) % TEST_FILE_MEAN) + TEST_FILE_MEAN / 2;'), (412, '\t XPR(NT "byte %u (read %u @ %"Q"u) %d != %d\\n",'), (424, ' if (diffs != 0)'), (432, 'test_save_copy (const char *origname)'), (437, ' snprintf_func (buf, TESTBUFSIZE, "cp -f %s %s", origname, TEST_COPY_FILE);'), (502, ' if ((ret = xd3_read_uint32_t (stream, & inp, max, & rval)) !='), (1657, ' for (i = 0; i < (2 << 20); i += 256)'), (1661, ' for (j = 0; j < 256; j++)'), (1686, ' if ((ret = test_streaming (stream,'), (1687, '\t\t\t buf,'), (1688, '\t\t\t buf + (1 << 20),'), (1689, '\t\t\t buf + (2 << 20),'), (1690, '\t\t\t 1 << 12)))'), (1892, ' snprintf_func (ecmd, TESTBUFSIZE, "%s %s -f %s %s %s %s %s %s %s",'), (1913, '\t !change_apphead ? "" :'), (2606, '\t snprintf_func (rptr, rbuf+TESTBUFSIZE-rptr, "%d/%d",')]}
105
26
2,023
12,828
18
72
3
https://github.com/jmacd/xdelta-devel
CVE-2014-9765
CWE-119
3,066
cdf.c
C
cdf_read_short_sector
/*- * Copyright (c) 2008 Christos Zoulas * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Parse Composite Document Files, the format used in Microsoft Office * document files before they switched to zipped XML. * Info from: http://sc.openoffice.org/compdocfileformat.pdf * * N.B. This is the "Composite Document File" format, and not the * "Compound Document Format", nor the "Channel Definition Format". */ #include "file.h" #ifndef lint FILE_RCSID("@(#)$File: cdf.c,v 1.45 2011/08/28 08:38:48 christos Exp $") #endif #include <assert.h> #ifdef CDF_DEBUG #include <err.h> #endif #include <stdlib.h> #include <unistd.h> #include <string.h> #include <time.h> #include <ctype.h> #ifdef HAVE_LIMITS_H #include <limits.h> #endif #ifndef EFTYPE #define EFTYPE EINVAL #endif #include "cdf.h" #ifdef CDF_DEBUG #define DPRINTF(a) printf a, fflush(stdout) #else #define DPRINTF(a) #endif static union { char s[4]; uint32_t u; } cdf_bo; #define NEED_SWAP (cdf_bo.u == (uint32_t)0x01020304) #define CDF_TOLE8(x) ((uint64_t)(NEED_SWAP ? _cdf_tole8(x) : (uint64_t)(x))) #define CDF_TOLE4(x) ((uint32_t)(NEED_SWAP ? _cdf_tole4(x) : (uint32_t)(x))) #define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x))) #define CDF_GETUINT32(x, y) cdf_getuint32(x, y) /* * swap a short */ static uint16_t _cdf_tole2(uint16_t sv) { uint16_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[1]; d[1] = s[0]; return rv; } /* * swap an int */ static uint32_t _cdf_tole4(uint32_t sv) { uint32_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[3]; d[1] = s[2]; d[2] = s[1]; d[3] = s[0]; return rv; } /* * swap a quad */ static uint64_t _cdf_tole8(uint64_t sv) { uint64_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[7]; d[1] = s[6]; d[2] = s[5]; d[3] = s[4]; d[4] = s[3]; d[5] = s[2]; d[6] = s[1]; d[7] = s[0]; return rv; } /* * grab a uint32_t from a possibly unaligned address, and return it in * the native host order. */ static uint32_t cdf_getuint32(const uint8_t *p, size_t offs) { uint32_t rv; (void)memcpy(&rv, p + offs * sizeof(uint32_t), sizeof(rv)); return CDF_TOLE4(rv); } #define CDF_UNPACK(a) \ (void)memcpy(&(a), &buf[len], sizeof(a)), len += sizeof(a) #define CDF_UNPACKA(a) \ (void)memcpy((a), &buf[len], sizeof(a)), len += sizeof(a) uint16_t cdf_tole2(uint16_t sv) { return CDF_TOLE2(sv); } uint32_t cdf_tole4(uint32_t sv) { return CDF_TOLE4(sv); } uint64_t cdf_tole8(uint64_t sv) { return CDF_TOLE8(sv); } void cdf_swap_header(cdf_header_t *h) { size_t i; h->h_magic = CDF_TOLE8(h->h_magic); h->h_uuid[0] = CDF_TOLE8(h->h_uuid[0]); h->h_uuid[1] = CDF_TOLE8(h->h_uuid[1]); h->h_revision = CDF_TOLE2(h->h_revision); h->h_version = CDF_TOLE2(h->h_version); h->h_byte_order = CDF_TOLE2(h->h_byte_order); h->h_sec_size_p2 = CDF_TOLE2(h->h_sec_size_p2); h->h_short_sec_size_p2 = CDF_TOLE2(h->h_short_sec_size_p2); h->h_num_sectors_in_sat = CDF_TOLE4(h->h_num_sectors_in_sat); h->h_secid_first_directory = CDF_TOLE4(h->h_secid_first_directory); h->h_min_size_standard_stream = CDF_TOLE4(h->h_min_size_standard_stream); h->h_secid_first_sector_in_short_sat = CDF_TOLE4((uint32_t)h->h_secid_first_sector_in_short_sat); h->h_num_sectors_in_short_sat = CDF_TOLE4(h->h_num_sectors_in_short_sat); h->h_secid_first_sector_in_master_sat = CDF_TOLE4((uint32_t)h->h_secid_first_sector_in_master_sat); h->h_num_sectors_in_master_sat = CDF_TOLE4(h->h_num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) h->h_master_sat[i] = CDF_TOLE4((uint32_t)h->h_master_sat[i]); } void cdf_unpack_header(cdf_header_t *h, char *buf) { size_t i; size_t len = 0; CDF_UNPACK(h->h_magic); CDF_UNPACKA(h->h_uuid); CDF_UNPACK(h->h_revision); CDF_UNPACK(h->h_version); CDF_UNPACK(h->h_byte_order); CDF_UNPACK(h->h_sec_size_p2); CDF_UNPACK(h->h_short_sec_size_p2); CDF_UNPACKA(h->h_unused0); CDF_UNPACK(h->h_num_sectors_in_sat); CDF_UNPACK(h->h_secid_first_directory); CDF_UNPACKA(h->h_unused1); CDF_UNPACK(h->h_min_size_standard_stream); CDF_UNPACK(h->h_secid_first_sector_in_short_sat); CDF_UNPACK(h->h_num_sectors_in_short_sat); CDF_UNPACK(h->h_secid_first_sector_in_master_sat); CDF_UNPACK(h->h_num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) CDF_UNPACK(h->h_master_sat[i]); } void cdf_swap_dir(cdf_directory_t *d) { d->d_namelen = CDF_TOLE2(d->d_namelen); d->d_left_child = CDF_TOLE4((uint32_t)d->d_left_child); d->d_right_child = CDF_TOLE4((uint32_t)d->d_right_child); d->d_storage = CDF_TOLE4((uint32_t)d->d_storage); d->d_storage_uuid[0] = CDF_TOLE8(d->d_storage_uuid[0]); d->d_storage_uuid[1] = CDF_TOLE8(d->d_storage_uuid[1]); d->d_flags = CDF_TOLE4(d->d_flags); d->d_created = CDF_TOLE8((uint64_t)d->d_created); d->d_modified = CDF_TOLE8((uint64_t)d->d_modified); d->d_stream_first_sector = CDF_TOLE4((uint32_t)d->d_stream_first_sector); d->d_size = CDF_TOLE4(d->d_size); } void cdf_swap_class(cdf_classid_t *d) { d->cl_dword = CDF_TOLE4(d->cl_dword); d->cl_word[0] = CDF_TOLE2(d->cl_word[0]); d->cl_word[1] = CDF_TOLE2(d->cl_word[1]); } void cdf_unpack_dir(cdf_directory_t *d, char *buf) { size_t len = 0; CDF_UNPACKA(d->d_name); CDF_UNPACK(d->d_namelen); CDF_UNPACK(d->d_type); CDF_UNPACK(d->d_color); CDF_UNPACK(d->d_left_child); CDF_UNPACK(d->d_right_child); CDF_UNPACK(d->d_storage); CDF_UNPACKA(d->d_storage_uuid); CDF_UNPACK(d->d_flags); CDF_UNPACK(d->d_created); CDF_UNPACK(d->d_modified); CDF_UNPACK(d->d_stream_first_sector); CDF_UNPACK(d->d_size); CDF_UNPACK(d->d_unused0); } static int cdf_check_stream_offset(const cdf_stream_t *sst, const cdf_header_t *h, const void *p, size_t tail, int line) { const char *b = (const char *)sst->sst_tab; const char *e = ((const char *)p) + tail; (void)&line; if (e >= b && (size_t)(e - b) < CDF_SEC_SIZE(h) * sst->sst_len) return 0; DPRINTF(("%d: offset begin %p end %p %" SIZE_T_FORMAT "u" " >= %" SIZE_T_FORMAT "u [%" SIZE_T_FORMAT "u %" SIZE_T_FORMAT "u]\n", line, b, e, (size_t)(e - b), CDF_SEC_SIZE(h) * sst->sst_len, CDF_SEC_SIZE(h), sst->sst_len)); errno = EFTYPE; return -1; } static ssize_t cdf_read(const cdf_info_t *info, off_t off, void *buf, size_t len) { size_t siz = (size_t)off + len; if ((off_t)(off + len) != (off_t)siz) { errno = EINVAL; return -1; } if (info->i_buf != NULL && info->i_len >= siz) { (void)memcpy(buf, &info->i_buf[off], len); return (ssize_t)len; } if (info->i_fd == -1) return -1; if (lseek(info->i_fd, off, SEEK_SET) == (off_t)-1) return -1; if (read(info->i_fd, buf, len) != (ssize_t)len) return -1; return (ssize_t)len; } int cdf_read_header(const cdf_info_t *info, cdf_header_t *h) { char buf[512]; (void)memcpy(cdf_bo.s, "\01\02\03\04", 4); if (cdf_read(info, (off_t)0, buf, sizeof(buf)) == -1) return -1; cdf_unpack_header(h, buf); cdf_swap_header(h); if (h->h_magic != CDF_MAGIC) { DPRINTF(("Bad magic 0x%" INT64_T_FORMAT "x != 0x%" INT64_T_FORMAT "x\n", (unsigned long long)h->h_magic, (unsigned long long)CDF_MAGIC)); goto out; } if (h->h_sec_size_p2 > 20) { DPRINTF(("Bad sector size 0x%u\n", h->h_sec_size_p2)); goto out; } if (h->h_short_sec_size_p2 > 20) { DPRINTF(("Bad short sector size 0x%u\n", h->h_short_sec_size_p2)); goto out; } return 0; out: errno = EFTYPE; return -1; } ssize_t cdf_read_sector(const cdf_info_t *info, void *buf, size_t offs, size_t len, const cdf_header_t *h, cdf_secid_t id) { assert((size_t)CDF_SEC_SIZE(h) == len); return cdf_read(info, (off_t)CDF_SEC_POS(h, id), ((char *)buf) + offs, len); } ssize_t cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs, size_t len, const cdf_header_t *h, cdf_secid_t id) { assert((size_t)CDF_SHORT_SEC_SIZE(h) == len); (void)memcpy(((char *)buf) + offs, ((const char *)sst->sst_tab) + CDF_SHORT_SEC_POS(h, id), len); return len; } /* * Read the sector allocation table. */ int cdf_read_sat(const cdf_info_t *info, cdf_header_t *h, cdf_sat_t *sat) { size_t i, j, k; size_t ss = CDF_SEC_SIZE(h); cdf_secid_t *msa, mid, sec; size_t nsatpersec = (ss / sizeof(mid)) - 1; for (i = 0; i < __arraycount(h->h_master_sat); i++) if (h->h_master_sat[i] == CDF_SECID_FREE) break; #define CDF_SEC_LIMIT (UINT32_MAX / (4 * ss)) if ((nsatpersec > 0 && h->h_num_sectors_in_master_sat > CDF_SEC_LIMIT / nsatpersec) || i > CDF_SEC_LIMIT) { DPRINTF(("Number of sectors in master SAT too big %u %" SIZE_T_FORMAT "u\n", h->h_num_sectors_in_master_sat, i)); errno = EFTYPE; return -1; } sat->sat_len = h->h_num_sectors_in_master_sat * nsatpersec + i; DPRINTF(("sat_len = %" SIZE_T_FORMAT "u ss = %" SIZE_T_FORMAT "u\n", sat->sat_len, ss)); if ((sat->sat_tab = CAST(cdf_secid_t *, calloc(sat->sat_len, ss))) == NULL) return -1; for (i = 0; i < __arraycount(h->h_master_sat); i++) { if (h->h_master_sat[i] < 0) break; if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h, h->h_master_sat[i]) != (ssize_t)ss) { DPRINTF(("Reading sector %d", h->h_master_sat[i])); goto out1; } } if ((msa = CAST(cdf_secid_t *, calloc(1, ss))) == NULL) goto out1; mid = h->h_secid_first_sector_in_master_sat; for (j = 0; j < h->h_num_sectors_in_master_sat; j++) { if (mid < 0) goto out; if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Reading master sector loop limit")); errno = EFTYPE; goto out2; } if (cdf_read_sector(info, msa, 0, ss, h, mid) != (ssize_t)ss) { DPRINTF(("Reading master sector %d", mid)); goto out2; } for (k = 0; k < nsatpersec; k++, i++) { sec = CDF_TOLE4((uint32_t)msa[k]); if (sec < 0) goto out; if (i >= sat->sat_len) { DPRINTF(("Out of bounds reading MSA %u >= %u", i, sat->sat_len)); errno = EFTYPE; goto out2; } if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h, sec) != (ssize_t)ss) { DPRINTF(("Reading sector %d", CDF_TOLE4(msa[k]))); goto out2; } } mid = CDF_TOLE4((uint32_t)msa[nsatpersec]); } out: sat->sat_len = i; free(msa); return 0; out2: free(msa); out1: free(sat->sat_tab); return -1; } size_t cdf_count_chain(const cdf_sat_t *sat, cdf_secid_t sid, size_t size) { size_t i, j; cdf_secid_t maxsector = (cdf_secid_t)(sat->sat_len * size); DPRINTF(("Chain:")); for (j = i = 0; sid >= 0; i++, j++) { DPRINTF((" %d", sid)); if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Counting chain loop limit")); errno = EFTYPE; return (size_t)-1; } if (sid > maxsector) { DPRINTF(("Sector %d > %d\n", sid, maxsector)); errno = EFTYPE; return (size_t)-1; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } DPRINTF(("\n")); return i; } int cdf_read_long_sector_chain(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { size_t ss = CDF_SEC_SIZE(h), i, j; ssize_t nr; scn->sst_len = cdf_count_chain(sat, sid, ss); scn->sst_dirlen = len; if (scn->sst_len == (size_t)-1) return -1; scn->sst_tab = calloc(scn->sst_len, ss); if (scn->sst_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read long sector chain loop limit")); errno = EFTYPE; goto out; } if (i >= scn->sst_len) { DPRINTF(("Out of bounds reading long sector chain " "%u > %u\n", i, scn->sst_len)); errno = EFTYPE; goto out; } if ((nr = cdf_read_sector(info, scn->sst_tab, i * ss, ss, h, sid)) != (ssize_t)ss) { if (i == scn->sst_len - 1 && nr > 0) { /* Last sector might be truncated */ return 0; } DPRINTF(("Reading long sector chain %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } return 0; out: free(scn->sst_tab); return -1; } int cdf_read_short_sector_chain(const cdf_header_t *h, const cdf_sat_t *ssat, const cdf_stream_t *sst, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { size_t ss = CDF_SHORT_SEC_SIZE(h), i, j; scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h)); scn->sst_dirlen = len; if (sst->sst_tab == NULL || scn->sst_len == (size_t)-1) return -1; scn->sst_tab = calloc(scn->sst_len, ss); if (scn->sst_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read short sector chain loop limit")); errno = EFTYPE; goto out; } if (i >= scn->sst_len) { DPRINTF(("Out of bounds reading short sector chain " "%u > %u\n", i, scn->sst_len)); errno = EFTYPE; goto out; } if (cdf_read_short_sector(sst, scn->sst_tab, i * ss, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading short sector chain %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)ssat->sat_tab[sid]); } return 0; out: free(scn->sst_tab); return -1; } int cdf_read_sector_chain(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { if (len < h->h_min_size_standard_stream && sst->sst_tab != NULL) return cdf_read_short_sector_chain(h, ssat, sst, sid, len, scn); else return cdf_read_long_sector_chain(info, h, sat, sid, len, scn); } int cdf_read_dir(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_dir_t *dir) { size_t i, j; size_t ss = CDF_SEC_SIZE(h), ns, nd; char *buf; cdf_secid_t sid = h->h_secid_first_directory; ns = cdf_count_chain(sat, sid, ss); if (ns == (size_t)-1) return -1; nd = ss / CDF_DIRECTORY_SIZE; dir->dir_len = ns * nd; dir->dir_tab = CAST(cdf_directory_t *, calloc(dir->dir_len, sizeof(dir->dir_tab[0]))); if (dir->dir_tab == NULL) return -1; if ((buf = CAST(char *, malloc(ss))) == NULL) { free(dir->dir_tab); return -1; } for (j = i = 0; i < ns; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read dir loop limit")); errno = EFTYPE; goto out; } if (cdf_read_sector(info, buf, 0, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading directory sector %d", sid)); goto out; } for (j = 0; j < nd; j++) { cdf_unpack_dir(&dir->dir_tab[i * nd + j], &buf[j * CDF_DIRECTORY_SIZE]); } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } if (NEED_SWAP) for (i = 0; i < dir->dir_len; i++) cdf_swap_dir(&dir->dir_tab[i]); free(buf); return 0; out: free(dir->dir_tab); free(buf); return -1; } int cdf_read_ssat(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_sat_t *ssat) { size_t i, j; size_t ss = CDF_SEC_SIZE(h); cdf_secid_t sid = h->h_secid_first_sector_in_short_sat; ssat->sat_len = cdf_count_chain(sat, sid, CDF_SEC_SIZE(h)); if (ssat->sat_len == (size_t)-1) return -1; ssat->sat_tab = CAST(cdf_secid_t *, calloc(ssat->sat_len, ss)); if (ssat->sat_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read short sat sector loop limit")); errno = EFTYPE; goto out; } if (i >= ssat->sat_len) { DPRINTF(("Out of bounds reading short sector chain " "%u > %u\n", i, ssat->sat_len)); errno = EFTYPE; goto out; } if (cdf_read_sector(info, ssat->sat_tab, i * ss, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading short sat sector %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } return 0; out: free(ssat->sat_tab); return -1; } int cdf_read_short_stream(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_dir_t *dir, cdf_stream_t *scn) { size_t i; const cdf_directory_t *d; for (i = 0; i < dir->dir_len; i++) if (dir->dir_tab[i].d_type == CDF_DIR_TYPE_ROOT_STORAGE) break; /* If the it is not there, just fake it; some docs don't have it */ if (i == dir->dir_len) goto out; d = &dir->dir_tab[i]; /* If the it is not there, just fake it; some docs don't have it */ if (d->d_stream_first_sector < 0) goto out; return cdf_read_long_sector_chain(info, h, sat, d->d_stream_first_sector, d->d_size, scn); out: scn->sst_tab = NULL; scn->sst_len = 0; scn->sst_dirlen = 0; return 0; } static int cdf_namecmp(const char *d, const uint16_t *s, size_t l) { for (; l--; d++, s++) if (*d != CDF_TOLE2(*s)) return (unsigned char)*d - CDF_TOLE2(*s); return 0; } int cdf_read_summary_info(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, const cdf_dir_t *dir, cdf_stream_t *scn) { size_t i; const cdf_directory_t *d; static const char name[] = "\05SummaryInformation"; for (i = dir->dir_len; i > 0; i--) if (dir->dir_tab[i - 1].d_type == CDF_DIR_TYPE_USER_STREAM && cdf_namecmp(name, dir->dir_tab[i - 1].d_name, sizeof(name)) == 0) break; if (i == 0) { DPRINTF(("Cannot find summary information section\n")); errno = ESRCH; return -1; } d = &dir->dir_tab[i - 1]; return cdf_read_sector_chain(info, h, sat, ssat, sst, d->d_stream_first_sector, d->d_size, scn); } int cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h, uint32_t offs, cdf_property_info_t **info, size_t *count, size_t *maxcount) { const cdf_section_header_t *shp; cdf_section_header_t sh; const uint8_t *p, *q, *e; int16_t s16; int32_t s32; uint32_t u32; int64_t s64; uint64_t u64; cdf_timestamp_t tp; size_t i, o, o4, nelements, j; cdf_property_info_t *inp; if (offs > UINT32_MAX / 4) { errno = EFTYPE; goto out; } shp = CAST(const cdf_section_header_t *, (const void *) ((const char *)sst->sst_tab + offs)); if (cdf_check_stream_offset(sst, h, shp, sizeof(*shp), __LINE__) == -1) goto out; sh.sh_len = CDF_TOLE4(shp->sh_len); #define CDF_SHLEN_LIMIT (UINT32_MAX / 8) if (sh.sh_len > CDF_SHLEN_LIMIT) { errno = EFTYPE; goto out; } sh.sh_properties = CDF_TOLE4(shp->sh_properties); #define CDF_PROP_LIMIT (UINT32_MAX / (4 * sizeof(*inp))) if (sh.sh_properties > CDF_PROP_LIMIT) goto out; DPRINTF(("section len: %u properties %u\n", sh.sh_len, sh.sh_properties)); if (*maxcount) { if (*maxcount > CDF_PROP_LIMIT) goto out; *maxcount += sh.sh_properties; inp = CAST(cdf_property_info_t *, realloc(*info, *maxcount * sizeof(*inp))); } else { *maxcount = sh.sh_properties; inp = CAST(cdf_property_info_t *, malloc(*maxcount * sizeof(*inp))); } if (inp == NULL) goto out; *info = inp; inp += *count; *count += sh.sh_properties; p = CAST(const uint8_t *, (const void *) ((const char *)(const void *)sst->sst_tab + offs + sizeof(sh))); e = CAST(const uint8_t *, (const void *) (((const char *)(const void *)shp) + sh.sh_len)); if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1) goto out; for (i = 0; i < sh.sh_properties; i++) { q = (const uint8_t *)(const void *) ((const char *)(const void *)p + CDF_GETUINT32(p, (i << 1) + 1)) - 2 * sizeof(uint32_t); if (q > e) { DPRINTF(("Ran of the end %p > %p\n", q, e)); goto out; } inp[i].pi_id = CDF_GETUINT32(p, i << 1); inp[i].pi_type = CDF_GETUINT32(q, 0); DPRINTF(("%d) id=%x type=%x offs=%x,%d\n", i, inp[i].pi_id, inp[i].pi_type, q - p, CDF_GETUINT32(p, (i << 1) + 1))); if (inp[i].pi_type & CDF_VECTOR) { nelements = CDF_GETUINT32(q, 1); o = 2; } else { nelements = 1; o = 1; } o4 = o * sizeof(uint32_t); if (inp[i].pi_type & (CDF_ARRAY|CDF_BYREF|CDF_RESERVED)) goto unknown; switch (inp[i].pi_type & CDF_TYPEMASK) { case CDF_NULL: case CDF_EMPTY: break; case CDF_SIGNED16: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s16, &q[o4], sizeof(s16)); inp[i].pi_s16 = CDF_TOLE2(s16); break; case CDF_SIGNED32: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s32, &q[o4], sizeof(s32)); inp[i].pi_s32 = CDF_TOLE4((uint32_t)s32); break; case CDF_BOOL: case CDF_UNSIGNED32: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u32, &q[o4], sizeof(u32)); inp[i].pi_u32 = CDF_TOLE4(u32); break; case CDF_SIGNED64: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s64, &q[o4], sizeof(s64)); inp[i].pi_s64 = CDF_TOLE8((uint64_t)s64); break; case CDF_UNSIGNED64: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u64, &q[o4], sizeof(u64)); inp[i].pi_u64 = CDF_TOLE8((uint64_t)u64); break; case CDF_LENGTH32_STRING: case CDF_LENGTH32_WSTRING: if (nelements > 1) { size_t nelem = inp - *info; if (*maxcount > CDF_PROP_LIMIT || nelements > CDF_PROP_LIMIT) goto out; *maxcount += nelements; inp = CAST(cdf_property_info_t *, realloc(*info, *maxcount * sizeof(*inp))); if (inp == NULL) goto out; *info = inp; inp = *info + nelem; } DPRINTF(("nelements = %d\n", nelements)); for (j = 0; j < nelements; j++, i++) { uint32_t l = CDF_GETUINT32(q, o); inp[i].pi_str.s_len = l; inp[i].pi_str.s_buf = (const char *) (const void *)(&q[o4 + sizeof(l)]); DPRINTF(("l = %d, r = %d, s = %s\n", l, CDF_ROUND(l, sizeof(l)), inp[i].pi_str.s_buf)); l = 4 + (uint32_t)CDF_ROUND(l, sizeof(l)); o += l >> 2; o4 = o * sizeof(uint32_t); } i--; break; case CDF_FILETIME: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&tp, &q[o4], sizeof(tp)); inp[i].pi_tp = CDF_TOLE8((uint64_t)tp); break; case CDF_CLIPBOARD: if (inp[i].pi_type & CDF_VECTOR) goto unknown; break; default: unknown: DPRINTF(("Don't know how to deal with %x\n", inp[i].pi_type)); goto out; } } return 0; out: free(*info); return -1; } int cdf_unpack_summary_info(const cdf_stream_t *sst, const cdf_header_t *h, cdf_summary_info_header_t *ssi, cdf_property_info_t **info, size_t *count) { size_t i, maxcount; const cdf_summary_info_header_t *si = CAST(const cdf_summary_info_header_t *, sst->sst_tab); const cdf_section_declaration_t *sd = CAST(const cdf_section_declaration_t *, (const void *) ((const char *)sst->sst_tab + CDF_SECTION_DECLARATION_OFFSET)); if (cdf_check_stream_offset(sst, h, si, sizeof(*si), __LINE__) == -1 || cdf_check_stream_offset(sst, h, sd, sizeof(*sd), __LINE__) == -1) return -1; ssi->si_byte_order = CDF_TOLE2(si->si_byte_order); ssi->si_os_version = CDF_TOLE2(si->si_os_version); ssi->si_os = CDF_TOLE2(si->si_os); ssi->si_class = si->si_class; cdf_swap_class(&ssi->si_class); ssi->si_count = CDF_TOLE2(si->si_count); *count = 0; maxcount = 0; *info = NULL; for (i = 0; i < CDF_TOLE4(si->si_count); i++) { if (i >= CDF_LOOP_LIMIT) { DPRINTF(("Unpack summary info loop limit")); errno = EFTYPE; return -1; } if (cdf_read_property_info(sst, h, CDF_TOLE4(sd->sd_offset), info, count, &maxcount) == -1) return -1; } return 0; } int cdf_print_classid(char *buf, size_t buflen, const cdf_classid_t *id) { return snprintf(buf, buflen, "%.8x-%.4x-%.4x-%.2x%.2x-" "%.2x%.2x%.2x%.2x%.2x%.2x", id->cl_dword, id->cl_word[0], id->cl_word[1], id->cl_two[0], id->cl_two[1], id->cl_six[0], id->cl_six[1], id->cl_six[2], id->cl_six[3], id->cl_six[4], id->cl_six[5]); } static const struct { uint32_t v; const char *n; } vn[] = { { CDF_PROPERTY_CODE_PAGE, "Code page" }, { CDF_PROPERTY_TITLE, "Title" }, { CDF_PROPERTY_SUBJECT, "Subject" }, { CDF_PROPERTY_AUTHOR, "Author" }, { CDF_PROPERTY_KEYWORDS, "Keywords" }, { CDF_PROPERTY_COMMENTS, "Comments" }, { CDF_PROPERTY_TEMPLATE, "Template" }, { CDF_PROPERTY_LAST_SAVED_BY, "Last Saved By" }, { CDF_PROPERTY_REVISION_NUMBER, "Revision Number" }, { CDF_PROPERTY_TOTAL_EDITING_TIME, "Total Editing Time" }, { CDF_PROPERTY_LAST_PRINTED, "Last Printed" }, { CDF_PROPERTY_CREATE_TIME, "Create Time/Date" }, { CDF_PROPERTY_LAST_SAVED_TIME, "Last Saved Time/Date" }, { CDF_PROPERTY_NUMBER_OF_PAGES, "Number of Pages" }, { CDF_PROPERTY_NUMBER_OF_WORDS, "Number of Words" }, { CDF_PROPERTY_NUMBER_OF_CHARACTERS, "Number of Characters" }, { CDF_PROPERTY_THUMBNAIL, "Thumbnail" }, { CDF_PROPERTY_NAME_OF_APPLICATION, "Name of Creating Application" }, { CDF_PROPERTY_SECURITY, "Security" }, { CDF_PROPERTY_LOCALE_ID, "Locale ID" }, }; int cdf_print_property_name(char *buf, size_t bufsiz, uint32_t p) { size_t i; for (i = 0; i < __arraycount(vn); i++) if (vn[i].v == p) return snprintf(buf, bufsiz, "%s", vn[i].n); return snprintf(buf, bufsiz, "0x%x", p); } int cdf_print_elapsed_time(char *buf, size_t bufsiz, cdf_timestamp_t ts) { int len = 0; int days, hours, mins, secs; ts /= CDF_TIME_PREC; secs = (int)(ts % 60); ts /= 60; mins = (int)(ts % 60); ts /= 60; hours = (int)(ts % 24); ts /= 24; days = (int)ts; if (days) { len += snprintf(buf + len, bufsiz - len, "%dd+", days); if ((size_t)len >= bufsiz) return len; } if (days || hours) { len += snprintf(buf + len, bufsiz - len, "%.2d:", hours); if ((size_t)len >= bufsiz) return len; } len += snprintf(buf + len, bufsiz - len, "%.2d:", mins); if ((size_t)len >= bufsiz) return len; len += snprintf(buf + len, bufsiz - len, "%.2d", secs); return len; } #ifdef CDF_DEBUG void cdf_dump_header(const cdf_header_t *h) { size_t i; #define DUMP(a, b) (void)fprintf(stderr, "%40.40s = " a "\n", # b, h->h_ ## b) #define DUMP2(a, b) (void)fprintf(stderr, "%40.40s = " a " (" a ")\n", # b, \ h->h_ ## b, 1 << h->h_ ## b) DUMP("%d", revision); DUMP("%d", version); DUMP("0x%x", byte_order); DUMP2("%d", sec_size_p2); DUMP2("%d", short_sec_size_p2); DUMP("%d", num_sectors_in_sat); DUMP("%d", secid_first_directory); DUMP("%d", min_size_standard_stream); DUMP("%d", secid_first_sector_in_short_sat); DUMP("%d", num_sectors_in_short_sat); DUMP("%d", secid_first_sector_in_master_sat); DUMP("%d", num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) { if (h->h_master_sat[i] == CDF_SECID_FREE) break; (void)fprintf(stderr, "%35.35s[%.3zu] = %d\n", "master_sat", i, h->h_master_sat[i]); } } void cdf_dump_sat(const char *prefix, const cdf_sat_t *sat, size_t size) { size_t i, j, s = size / sizeof(cdf_secid_t); for (i = 0; i < sat->sat_len; i++) { (void)fprintf(stderr, "%s[%" SIZE_T_FORMAT "u]:\n%.6d: ", prefix, i, i * s); for (j = 0; j < s; j++) { (void)fprintf(stderr, "%5d, ", CDF_TOLE4(sat->sat_tab[s * i + j])); if ((j + 1) % 10 == 0) (void)fprintf(stderr, "\n%.6d: ", i * s + j + 1); } (void)fprintf(stderr, "\n"); } } void cdf_dump(void *v, size_t len) { size_t i, j; unsigned char *p = v; char abuf[16]; (void)fprintf(stderr, "%.4x: ", 0); for (i = 0, j = 0; i < len; i++, p++) { (void)fprintf(stderr, "%.2x ", *p); abuf[j++] = isprint(*p) ? *p : '.'; if (j == 16) { j = 0; abuf[15] = '\0'; (void)fprintf(stderr, "%s\n%.4x: ", abuf, i + 1); } } (void)fprintf(stderr, "\n"); } void cdf_dump_stream(const cdf_header_t *h, const cdf_stream_t *sst) { size_t ss = sst->sst_dirlen < h->h_min_size_standard_stream ? CDF_SHORT_SEC_SIZE(h) : CDF_SEC_SIZE(h); cdf_dump(sst->sst_tab, ss * sst->sst_len); } void cdf_dump_dir(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, const cdf_dir_t *dir) { size_t i, j; cdf_directory_t *d; char name[__arraycount(d->d_name)]; cdf_stream_t scn; struct timespec ts; static const char *types[] = { "empty", "user storage", "user stream", "lockbytes", "property", "root storage" }; for (i = 0; i < dir->dir_len; i++) { d = &dir->dir_tab[i]; for (j = 0; j < sizeof(name); j++) name[j] = (char)CDF_TOLE2(d->d_name[j]); (void)fprintf(stderr, "Directory %" SIZE_T_FORMAT "u: %s\n", i, name); if (d->d_type < __arraycount(types)) (void)fprintf(stderr, "Type: %s\n", types[d->d_type]); else (void)fprintf(stderr, "Type: %d\n", d->d_type); (void)fprintf(stderr, "Color: %s\n", d->d_color ? "black" : "red"); (void)fprintf(stderr, "Left child: %d\n", d->d_left_child); (void)fprintf(stderr, "Right child: %d\n", d->d_right_child); (void)fprintf(stderr, "Flags: 0x%x\n", d->d_flags); cdf_timestamp_to_timespec(&ts, d->d_created); (void)fprintf(stderr, "Created %s", cdf_ctime(&ts.tv_sec)); cdf_timestamp_to_timespec(&ts, d->d_modified); (void)fprintf(stderr, "Modified %s", cdf_ctime(&ts.tv_sec)); (void)fprintf(stderr, "Stream %d\n", d->d_stream_first_sector); (void)fprintf(stderr, "Size %d\n", d->d_size); switch (d->d_type) { case CDF_DIR_TYPE_USER_STORAGE: (void)fprintf(stderr, "Storage: %d\n", d->d_storage); break; case CDF_DIR_TYPE_USER_STREAM: if (sst == NULL) break; if (cdf_read_sector_chain(info, h, sat, ssat, sst, d->d_stream_first_sector, d->d_size, &scn) == -1) { warn("Can't read stream for %s at %d len %d", name, d->d_stream_first_sector, d->d_size); break; } cdf_dump_stream(h, &scn); free(scn.sst_tab); break; default: break; } } } void cdf_dump_property_info(const cdf_property_info_t *info, size_t count) { cdf_timestamp_t tp; struct timespec ts; char buf[64]; size_t i, j; for (i = 0; i < count; i++) { cdf_print_property_name(buf, sizeof(buf), info[i].pi_id); (void)fprintf(stderr, "%" SIZE_T_FORMAT "u) %s: ", i, buf); switch (info[i].pi_type) { case CDF_NULL: break; case CDF_SIGNED16: (void)fprintf(stderr, "signed 16 [%hd]\n", info[i].pi_s16); break; case CDF_SIGNED32: (void)fprintf(stderr, "signed 32 [%d]\n", info[i].pi_s32); break; case CDF_UNSIGNED32: (void)fprintf(stderr, "unsigned 32 [%u]\n", info[i].pi_u32); break; case CDF_LENGTH32_STRING: (void)fprintf(stderr, "string %u [%.*s]\n", info[i].pi_str.s_len, info[i].pi_str.s_len, info[i].pi_str.s_buf); break; case CDF_LENGTH32_WSTRING: (void)fprintf(stderr, "string %u [", info[i].pi_str.s_len); for (j = 0; j < info[i].pi_str.s_len - 1; j++) (void)fputc(info[i].pi_str.s_buf[j << 1], stderr); (void)fprintf(stderr, "]\n"); break; case CDF_FILETIME: tp = info[i].pi_tp; if (tp < 1000000000000000LL) { cdf_print_elapsed_time(buf, sizeof(buf), tp); (void)fprintf(stderr, "timestamp %s\n", buf); } else { cdf_timestamp_to_timespec(&ts, tp); (void)fprintf(stderr, "timestamp %s", cdf_ctime(&ts.tv_sec)); } break; case CDF_CLIPBOARD: (void)fprintf(stderr, "CLIPBOARD %u\n", info[i].pi_u32); break; default: DPRINTF(("Don't know how to deal with %x\n", info[i].pi_type)); break; } } } void cdf_dump_summary_info(const cdf_header_t *h, const cdf_stream_t *sst) { char buf[128]; cdf_summary_info_header_t ssi; cdf_property_info_t *info; size_t count; (void)&h; if (cdf_unpack_summary_info(sst, h, &ssi, &info, &count) == -1) return; (void)fprintf(stderr, "Endian: %x\n", ssi.si_byte_order); (void)fprintf(stderr, "Os Version %d.%d\n", ssi.si_os_version & 0xff, ssi.si_os_version >> 8); (void)fprintf(stderr, "Os %d\n", ssi.si_os); cdf_print_classid(buf, sizeof(buf), &ssi.si_class); (void)fprintf(stderr, "Class %s\n", buf); (void)fprintf(stderr, "Count %d\n", ssi.si_count); cdf_dump_property_info(info, count); free(info); } #endif #ifdef TEST int main(int argc, char *argv[]) { int i; cdf_header_t h; cdf_sat_t sat, ssat; cdf_stream_t sst, scn; cdf_dir_t dir; cdf_info_t info; if (argc < 2) { (void)fprintf(stderr, "Usage: %s <filename>\n", getprogname()); return -1; } info.i_buf = NULL; info.i_len = 0; for (i = 1; i < argc; i++) { if ((info.i_fd = open(argv[1], O_RDONLY)) == -1) err(1, "Cannot open `%s'", argv[1]); if (cdf_read_header(&info, &h) == -1) err(1, "Cannot read header"); #ifdef CDF_DEBUG cdf_dump_header(&h); #endif if (cdf_read_sat(&info, &h, &sat) == -1) err(1, "Cannot read sat"); #ifdef CDF_DEBUG cdf_dump_sat("SAT", &sat, CDF_SEC_SIZE(&h)); #endif if (cdf_read_ssat(&info, &h, &sat, &ssat) == -1) err(1, "Cannot read ssat"); #ifdef CDF_DEBUG cdf_dump_sat("SSAT", &ssat, CDF_SHORT_SEC_SIZE(&h)); #endif if (cdf_read_dir(&info, &h, &sat, &dir) == -1) err(1, "Cannot read dir"); if (cdf_read_short_stream(&info, &h, &sat, &dir, &sst) == -1) err(1, "Cannot read short stream"); #ifdef CDF_DEBUG cdf_dump_stream(&h, &sst); #endif #ifdef CDF_DEBUG cdf_dump_dir(&info, &h, &sat, &ssat, &sst, &dir); #endif if (cdf_read_summary_info(&info, &h, &sat, &ssat, &sst, &dir, &scn) == -1) err(1, "Cannot read summary info"); #ifdef CDF_DEBUG cdf_dump_summary_info(&h, &scn); #endif (void)close(info.i_fd); } return 0; } #endif
/*- * Copyright (c) 2008 Christos Zoulas * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Parse Composite Document Files, the format used in Microsoft Office * document files before they switched to zipped XML. * Info from: http://sc.openoffice.org/compdocfileformat.pdf * * N.B. This is the "Composite Document File" format, and not the * "Compound Document Format", nor the "Channel Definition Format". */ #include "file.h" #ifndef lint FILE_RCSID("@(#)$File: cdf.c,v 1.46 2011/09/16 21:23:59 christos Exp $") #endif #include <assert.h> #ifdef CDF_DEBUG #include <err.h> #endif #include <stdlib.h> #include <unistd.h> #include <string.h> #include <time.h> #include <ctype.h> #ifdef HAVE_LIMITS_H #include <limits.h> #endif #ifndef EFTYPE #define EFTYPE EINVAL #endif #include "cdf.h" #ifdef CDF_DEBUG #define DPRINTF(a) printf a, fflush(stdout) #else #define DPRINTF(a) #endif static union { char s[4]; uint32_t u; } cdf_bo; #define NEED_SWAP (cdf_bo.u == (uint32_t)0x01020304) #define CDF_TOLE8(x) ((uint64_t)(NEED_SWAP ? _cdf_tole8(x) : (uint64_t)(x))) #define CDF_TOLE4(x) ((uint32_t)(NEED_SWAP ? _cdf_tole4(x) : (uint32_t)(x))) #define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x))) #define CDF_GETUINT32(x, y) cdf_getuint32(x, y) /* * swap a short */ static uint16_t _cdf_tole2(uint16_t sv) { uint16_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[1]; d[1] = s[0]; return rv; } /* * swap an int */ static uint32_t _cdf_tole4(uint32_t sv) { uint32_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[3]; d[1] = s[2]; d[2] = s[1]; d[3] = s[0]; return rv; } /* * swap a quad */ static uint64_t _cdf_tole8(uint64_t sv) { uint64_t rv; uint8_t *s = (uint8_t *)(void *)&sv; uint8_t *d = (uint8_t *)(void *)&rv; d[0] = s[7]; d[1] = s[6]; d[2] = s[5]; d[3] = s[4]; d[4] = s[3]; d[5] = s[2]; d[6] = s[1]; d[7] = s[0]; return rv; } /* * grab a uint32_t from a possibly unaligned address, and return it in * the native host order. */ static uint32_t cdf_getuint32(const uint8_t *p, size_t offs) { uint32_t rv; (void)memcpy(&rv, p + offs * sizeof(uint32_t), sizeof(rv)); return CDF_TOLE4(rv); } #define CDF_UNPACK(a) \ (void)memcpy(&(a), &buf[len], sizeof(a)), len += sizeof(a) #define CDF_UNPACKA(a) \ (void)memcpy((a), &buf[len], sizeof(a)), len += sizeof(a) uint16_t cdf_tole2(uint16_t sv) { return CDF_TOLE2(sv); } uint32_t cdf_tole4(uint32_t sv) { return CDF_TOLE4(sv); } uint64_t cdf_tole8(uint64_t sv) { return CDF_TOLE8(sv); } void cdf_swap_header(cdf_header_t *h) { size_t i; h->h_magic = CDF_TOLE8(h->h_magic); h->h_uuid[0] = CDF_TOLE8(h->h_uuid[0]); h->h_uuid[1] = CDF_TOLE8(h->h_uuid[1]); h->h_revision = CDF_TOLE2(h->h_revision); h->h_version = CDF_TOLE2(h->h_version); h->h_byte_order = CDF_TOLE2(h->h_byte_order); h->h_sec_size_p2 = CDF_TOLE2(h->h_sec_size_p2); h->h_short_sec_size_p2 = CDF_TOLE2(h->h_short_sec_size_p2); h->h_num_sectors_in_sat = CDF_TOLE4(h->h_num_sectors_in_sat); h->h_secid_first_directory = CDF_TOLE4(h->h_secid_first_directory); h->h_min_size_standard_stream = CDF_TOLE4(h->h_min_size_standard_stream); h->h_secid_first_sector_in_short_sat = CDF_TOLE4((uint32_t)h->h_secid_first_sector_in_short_sat); h->h_num_sectors_in_short_sat = CDF_TOLE4(h->h_num_sectors_in_short_sat); h->h_secid_first_sector_in_master_sat = CDF_TOLE4((uint32_t)h->h_secid_first_sector_in_master_sat); h->h_num_sectors_in_master_sat = CDF_TOLE4(h->h_num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) h->h_master_sat[i] = CDF_TOLE4((uint32_t)h->h_master_sat[i]); } void cdf_unpack_header(cdf_header_t *h, char *buf) { size_t i; size_t len = 0; CDF_UNPACK(h->h_magic); CDF_UNPACKA(h->h_uuid); CDF_UNPACK(h->h_revision); CDF_UNPACK(h->h_version); CDF_UNPACK(h->h_byte_order); CDF_UNPACK(h->h_sec_size_p2); CDF_UNPACK(h->h_short_sec_size_p2); CDF_UNPACKA(h->h_unused0); CDF_UNPACK(h->h_num_sectors_in_sat); CDF_UNPACK(h->h_secid_first_directory); CDF_UNPACKA(h->h_unused1); CDF_UNPACK(h->h_min_size_standard_stream); CDF_UNPACK(h->h_secid_first_sector_in_short_sat); CDF_UNPACK(h->h_num_sectors_in_short_sat); CDF_UNPACK(h->h_secid_first_sector_in_master_sat); CDF_UNPACK(h->h_num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) CDF_UNPACK(h->h_master_sat[i]); } void cdf_swap_dir(cdf_directory_t *d) { d->d_namelen = CDF_TOLE2(d->d_namelen); d->d_left_child = CDF_TOLE4((uint32_t)d->d_left_child); d->d_right_child = CDF_TOLE4((uint32_t)d->d_right_child); d->d_storage = CDF_TOLE4((uint32_t)d->d_storage); d->d_storage_uuid[0] = CDF_TOLE8(d->d_storage_uuid[0]); d->d_storage_uuid[1] = CDF_TOLE8(d->d_storage_uuid[1]); d->d_flags = CDF_TOLE4(d->d_flags); d->d_created = CDF_TOLE8((uint64_t)d->d_created); d->d_modified = CDF_TOLE8((uint64_t)d->d_modified); d->d_stream_first_sector = CDF_TOLE4((uint32_t)d->d_stream_first_sector); d->d_size = CDF_TOLE4(d->d_size); } void cdf_swap_class(cdf_classid_t *d) { d->cl_dword = CDF_TOLE4(d->cl_dword); d->cl_word[0] = CDF_TOLE2(d->cl_word[0]); d->cl_word[1] = CDF_TOLE2(d->cl_word[1]); } void cdf_unpack_dir(cdf_directory_t *d, char *buf) { size_t len = 0; CDF_UNPACKA(d->d_name); CDF_UNPACK(d->d_namelen); CDF_UNPACK(d->d_type); CDF_UNPACK(d->d_color); CDF_UNPACK(d->d_left_child); CDF_UNPACK(d->d_right_child); CDF_UNPACK(d->d_storage); CDF_UNPACKA(d->d_storage_uuid); CDF_UNPACK(d->d_flags); CDF_UNPACK(d->d_created); CDF_UNPACK(d->d_modified); CDF_UNPACK(d->d_stream_first_sector); CDF_UNPACK(d->d_size); CDF_UNPACK(d->d_unused0); } static int cdf_check_stream_offset(const cdf_stream_t *sst, const cdf_header_t *h, const void *p, size_t tail, int line) { const char *b = (const char *)sst->sst_tab; const char *e = ((const char *)p) + tail; (void)&line; if (e >= b && (size_t)(e - b) < CDF_SEC_SIZE(h) * sst->sst_len) return 0; DPRINTF(("%d: offset begin %p end %p %" SIZE_T_FORMAT "u" " >= %" SIZE_T_FORMAT "u [%" SIZE_T_FORMAT "u %" SIZE_T_FORMAT "u]\n", line, b, e, (size_t)(e - b), CDF_SEC_SIZE(h) * sst->sst_len, CDF_SEC_SIZE(h), sst->sst_len)); errno = EFTYPE; return -1; } static ssize_t cdf_read(const cdf_info_t *info, off_t off, void *buf, size_t len) { size_t siz = (size_t)off + len; if ((off_t)(off + len) != (off_t)siz) { errno = EINVAL; return -1; } if (info->i_buf != NULL && info->i_len >= siz) { (void)memcpy(buf, &info->i_buf[off], len); return (ssize_t)len; } if (info->i_fd == -1) return -1; if (lseek(info->i_fd, off, SEEK_SET) == (off_t)-1) return -1; if (read(info->i_fd, buf, len) != (ssize_t)len) return -1; return (ssize_t)len; } int cdf_read_header(const cdf_info_t *info, cdf_header_t *h) { char buf[512]; (void)memcpy(cdf_bo.s, "\01\02\03\04", 4); if (cdf_read(info, (off_t)0, buf, sizeof(buf)) == -1) return -1; cdf_unpack_header(h, buf); cdf_swap_header(h); if (h->h_magic != CDF_MAGIC) { DPRINTF(("Bad magic 0x%" INT64_T_FORMAT "x != 0x%" INT64_T_FORMAT "x\n", (unsigned long long)h->h_magic, (unsigned long long)CDF_MAGIC)); goto out; } if (h->h_sec_size_p2 > 20) { DPRINTF(("Bad sector size 0x%u\n", h->h_sec_size_p2)); goto out; } if (h->h_short_sec_size_p2 > 20) { DPRINTF(("Bad short sector size 0x%u\n", h->h_short_sec_size_p2)); goto out; } return 0; out: errno = EFTYPE; return -1; } ssize_t cdf_read_sector(const cdf_info_t *info, void *buf, size_t offs, size_t len, const cdf_header_t *h, cdf_secid_t id) { size_t ss = CDF_SEC_SIZE(h); size_t pos = CDF_SEC_POS(h, id); assert(ss == len); return cdf_read(info, (off_t)pos, ((char *)buf) + offs, len); } ssize_t cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs, size_t len, const cdf_header_t *h, cdf_secid_t id) { size_t ss = CDF_SHORT_SEC_SIZE(h); size_t pos = CDF_SHORT_SEC_POS(h, id); assert(ss == len); if (sst->sst_len < (size_t)id) { DPRINTF(("bad sector id %d > %d\n", id, sst->sst_len)); return -1; } (void)memcpy(((char *)buf) + offs, ((const char *)sst->sst_tab) + pos, len); return len; } /* * Read the sector allocation table. */ int cdf_read_sat(const cdf_info_t *info, cdf_header_t *h, cdf_sat_t *sat) { size_t i, j, k; size_t ss = CDF_SEC_SIZE(h); cdf_secid_t *msa, mid, sec; size_t nsatpersec = (ss / sizeof(mid)) - 1; for (i = 0; i < __arraycount(h->h_master_sat); i++) if (h->h_master_sat[i] == CDF_SECID_FREE) break; #define CDF_SEC_LIMIT (UINT32_MAX / (4 * ss)) if ((nsatpersec > 0 && h->h_num_sectors_in_master_sat > CDF_SEC_LIMIT / nsatpersec) || i > CDF_SEC_LIMIT) { DPRINTF(("Number of sectors in master SAT too big %u %" SIZE_T_FORMAT "u\n", h->h_num_sectors_in_master_sat, i)); errno = EFTYPE; return -1; } sat->sat_len = h->h_num_sectors_in_master_sat * nsatpersec + i; DPRINTF(("sat_len = %" SIZE_T_FORMAT "u ss = %" SIZE_T_FORMAT "u\n", sat->sat_len, ss)); if ((sat->sat_tab = CAST(cdf_secid_t *, calloc(sat->sat_len, ss))) == NULL) return -1; for (i = 0; i < __arraycount(h->h_master_sat); i++) { if (h->h_master_sat[i] < 0) break; if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h, h->h_master_sat[i]) != (ssize_t)ss) { DPRINTF(("Reading sector %d", h->h_master_sat[i])); goto out1; } } if ((msa = CAST(cdf_secid_t *, calloc(1, ss))) == NULL) goto out1; mid = h->h_secid_first_sector_in_master_sat; for (j = 0; j < h->h_num_sectors_in_master_sat; j++) { if (mid < 0) goto out; if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Reading master sector loop limit")); errno = EFTYPE; goto out2; } if (cdf_read_sector(info, msa, 0, ss, h, mid) != (ssize_t)ss) { DPRINTF(("Reading master sector %d", mid)); goto out2; } for (k = 0; k < nsatpersec; k++, i++) { sec = CDF_TOLE4((uint32_t)msa[k]); if (sec < 0) goto out; if (i >= sat->sat_len) { DPRINTF(("Out of bounds reading MSA %u >= %u", i, sat->sat_len)); errno = EFTYPE; goto out2; } if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h, sec) != (ssize_t)ss) { DPRINTF(("Reading sector %d", CDF_TOLE4(msa[k]))); goto out2; } } mid = CDF_TOLE4((uint32_t)msa[nsatpersec]); } out: sat->sat_len = i; free(msa); return 0; out2: free(msa); out1: free(sat->sat_tab); return -1; } size_t cdf_count_chain(const cdf_sat_t *sat, cdf_secid_t sid, size_t size) { size_t i, j; cdf_secid_t maxsector = (cdf_secid_t)(sat->sat_len * size); DPRINTF(("Chain:")); for (j = i = 0; sid >= 0; i++, j++) { DPRINTF((" %d", sid)); if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Counting chain loop limit")); errno = EFTYPE; return (size_t)-1; } if (sid > maxsector) { DPRINTF(("Sector %d > %d\n", sid, maxsector)); errno = EFTYPE; return (size_t)-1; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } DPRINTF(("\n")); return i; } int cdf_read_long_sector_chain(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { size_t ss = CDF_SEC_SIZE(h), i, j; ssize_t nr; scn->sst_len = cdf_count_chain(sat, sid, ss); scn->sst_dirlen = len; if (scn->sst_len == (size_t)-1) return -1; scn->sst_tab = calloc(scn->sst_len, ss); if (scn->sst_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read long sector chain loop limit")); errno = EFTYPE; goto out; } if (i >= scn->sst_len) { DPRINTF(("Out of bounds reading long sector chain " "%u > %u\n", i, scn->sst_len)); errno = EFTYPE; goto out; } if ((nr = cdf_read_sector(info, scn->sst_tab, i * ss, ss, h, sid)) != (ssize_t)ss) { if (i == scn->sst_len - 1 && nr > 0) { /* Last sector might be truncated */ return 0; } DPRINTF(("Reading long sector chain %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } return 0; out: free(scn->sst_tab); return -1; } int cdf_read_short_sector_chain(const cdf_header_t *h, const cdf_sat_t *ssat, const cdf_stream_t *sst, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { size_t ss = CDF_SHORT_SEC_SIZE(h), i, j; scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h)); scn->sst_dirlen = len; if (sst->sst_tab == NULL || scn->sst_len == (size_t)-1) return -1; scn->sst_tab = calloc(scn->sst_len, ss); if (scn->sst_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read short sector chain loop limit")); errno = EFTYPE; goto out; } if (i >= scn->sst_len) { DPRINTF(("Out of bounds reading short sector chain " "%u > %u\n", i, scn->sst_len)); errno = EFTYPE; goto out; } if (cdf_read_short_sector(sst, scn->sst_tab, i * ss, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading short sector chain %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)ssat->sat_tab[sid]); } return 0; out: free(scn->sst_tab); return -1; } int cdf_read_sector_chain(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { if (len < h->h_min_size_standard_stream && sst->sst_tab != NULL) return cdf_read_short_sector_chain(h, ssat, sst, sid, len, scn); else return cdf_read_long_sector_chain(info, h, sat, sid, len, scn); } int cdf_read_dir(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_dir_t *dir) { size_t i, j; size_t ss = CDF_SEC_SIZE(h), ns, nd; char *buf; cdf_secid_t sid = h->h_secid_first_directory; ns = cdf_count_chain(sat, sid, ss); if (ns == (size_t)-1) return -1; nd = ss / CDF_DIRECTORY_SIZE; dir->dir_len = ns * nd; dir->dir_tab = CAST(cdf_directory_t *, calloc(dir->dir_len, sizeof(dir->dir_tab[0]))); if (dir->dir_tab == NULL) return -1; if ((buf = CAST(char *, malloc(ss))) == NULL) { free(dir->dir_tab); return -1; } for (j = i = 0; i < ns; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read dir loop limit")); errno = EFTYPE; goto out; } if (cdf_read_sector(info, buf, 0, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading directory sector %d", sid)); goto out; } for (j = 0; j < nd; j++) { cdf_unpack_dir(&dir->dir_tab[i * nd + j], &buf[j * CDF_DIRECTORY_SIZE]); } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } if (NEED_SWAP) for (i = 0; i < dir->dir_len; i++) cdf_swap_dir(&dir->dir_tab[i]); free(buf); return 0; out: free(dir->dir_tab); free(buf); return -1; } int cdf_read_ssat(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_sat_t *ssat) { size_t i, j; size_t ss = CDF_SEC_SIZE(h); cdf_secid_t sid = h->h_secid_first_sector_in_short_sat; ssat->sat_len = cdf_count_chain(sat, sid, CDF_SEC_SIZE(h)); if (ssat->sat_len == (size_t)-1) return -1; ssat->sat_tab = CAST(cdf_secid_t *, calloc(ssat->sat_len, ss)); if (ssat->sat_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read short sat sector loop limit")); errno = EFTYPE; goto out; } if (i >= ssat->sat_len) { DPRINTF(("Out of bounds reading short sector chain " "%u > %u\n", i, ssat->sat_len)); errno = EFTYPE; goto out; } if (cdf_read_sector(info, ssat->sat_tab, i * ss, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading short sat sector %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } return 0; out: free(ssat->sat_tab); return -1; } int cdf_read_short_stream(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_dir_t *dir, cdf_stream_t *scn) { size_t i; const cdf_directory_t *d; for (i = 0; i < dir->dir_len; i++) if (dir->dir_tab[i].d_type == CDF_DIR_TYPE_ROOT_STORAGE) break; /* If the it is not there, just fake it; some docs don't have it */ if (i == dir->dir_len) goto out; d = &dir->dir_tab[i]; /* If the it is not there, just fake it; some docs don't have it */ if (d->d_stream_first_sector < 0) goto out; return cdf_read_long_sector_chain(info, h, sat, d->d_stream_first_sector, d->d_size, scn); out: scn->sst_tab = NULL; scn->sst_len = 0; scn->sst_dirlen = 0; return 0; } static int cdf_namecmp(const char *d, const uint16_t *s, size_t l) { for (; l--; d++, s++) if (*d != CDF_TOLE2(*s)) return (unsigned char)*d - CDF_TOLE2(*s); return 0; } int cdf_read_summary_info(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, const cdf_dir_t *dir, cdf_stream_t *scn) { size_t i; const cdf_directory_t *d; static const char name[] = "\05SummaryInformation"; for (i = dir->dir_len; i > 0; i--) if (dir->dir_tab[i - 1].d_type == CDF_DIR_TYPE_USER_STREAM && cdf_namecmp(name, dir->dir_tab[i - 1].d_name, sizeof(name)) == 0) break; if (i == 0) { DPRINTF(("Cannot find summary information section\n")); errno = ESRCH; return -1; } d = &dir->dir_tab[i - 1]; return cdf_read_sector_chain(info, h, sat, ssat, sst, d->d_stream_first_sector, d->d_size, scn); } int cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h, uint32_t offs, cdf_property_info_t **info, size_t *count, size_t *maxcount) { const cdf_section_header_t *shp; cdf_section_header_t sh; const uint8_t *p, *q, *e; int16_t s16; int32_t s32; uint32_t u32; int64_t s64; uint64_t u64; cdf_timestamp_t tp; size_t i, o, o4, nelements, j; cdf_property_info_t *inp; if (offs > UINT32_MAX / 4) { errno = EFTYPE; goto out; } shp = CAST(const cdf_section_header_t *, (const void *) ((const char *)sst->sst_tab + offs)); if (cdf_check_stream_offset(sst, h, shp, sizeof(*shp), __LINE__) == -1) goto out; sh.sh_len = CDF_TOLE4(shp->sh_len); #define CDF_SHLEN_LIMIT (UINT32_MAX / 8) if (sh.sh_len > CDF_SHLEN_LIMIT) { errno = EFTYPE; goto out; } sh.sh_properties = CDF_TOLE4(shp->sh_properties); #define CDF_PROP_LIMIT (UINT32_MAX / (4 * sizeof(*inp))) if (sh.sh_properties > CDF_PROP_LIMIT) goto out; DPRINTF(("section len: %u properties %u\n", sh.sh_len, sh.sh_properties)); if (*maxcount) { if (*maxcount > CDF_PROP_LIMIT) goto out; *maxcount += sh.sh_properties; inp = CAST(cdf_property_info_t *, realloc(*info, *maxcount * sizeof(*inp))); } else { *maxcount = sh.sh_properties; inp = CAST(cdf_property_info_t *, malloc(*maxcount * sizeof(*inp))); } if (inp == NULL) goto out; *info = inp; inp += *count; *count += sh.sh_properties; p = CAST(const uint8_t *, (const void *) ((const char *)(const void *)sst->sst_tab + offs + sizeof(sh))); e = CAST(const uint8_t *, (const void *) (((const char *)(const void *)shp) + sh.sh_len)); if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1) goto out; for (i = 0; i < sh.sh_properties; i++) { q = (const uint8_t *)(const void *) ((const char *)(const void *)p + CDF_GETUINT32(p, (i << 1) + 1)) - 2 * sizeof(uint32_t); if (q > e) { DPRINTF(("Ran of the end %p > %p\n", q, e)); goto out; } inp[i].pi_id = CDF_GETUINT32(p, i << 1); inp[i].pi_type = CDF_GETUINT32(q, 0); DPRINTF(("%d) id=%x type=%x offs=%x,%d\n", i, inp[i].pi_id, inp[i].pi_type, q - p, CDF_GETUINT32(p, (i << 1) + 1))); if (inp[i].pi_type & CDF_VECTOR) { nelements = CDF_GETUINT32(q, 1); o = 2; } else { nelements = 1; o = 1; } o4 = o * sizeof(uint32_t); if (inp[i].pi_type & (CDF_ARRAY|CDF_BYREF|CDF_RESERVED)) goto unknown; switch (inp[i].pi_type & CDF_TYPEMASK) { case CDF_NULL: case CDF_EMPTY: break; case CDF_SIGNED16: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s16, &q[o4], sizeof(s16)); inp[i].pi_s16 = CDF_TOLE2(s16); break; case CDF_SIGNED32: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s32, &q[o4], sizeof(s32)); inp[i].pi_s32 = CDF_TOLE4((uint32_t)s32); break; case CDF_BOOL: case CDF_UNSIGNED32: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u32, &q[o4], sizeof(u32)); inp[i].pi_u32 = CDF_TOLE4(u32); break; case CDF_SIGNED64: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s64, &q[o4], sizeof(s64)); inp[i].pi_s64 = CDF_TOLE8((uint64_t)s64); break; case CDF_UNSIGNED64: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u64, &q[o4], sizeof(u64)); inp[i].pi_u64 = CDF_TOLE8((uint64_t)u64); break; case CDF_LENGTH32_STRING: case CDF_LENGTH32_WSTRING: if (nelements > 1) { size_t nelem = inp - *info; if (*maxcount > CDF_PROP_LIMIT || nelements > CDF_PROP_LIMIT) goto out; *maxcount += nelements; inp = CAST(cdf_property_info_t *, realloc(*info, *maxcount * sizeof(*inp))); if (inp == NULL) goto out; *info = inp; inp = *info + nelem; } DPRINTF(("nelements = %d\n", nelements)); for (j = 0; j < nelements; j++, i++) { uint32_t l = CDF_GETUINT32(q, o); inp[i].pi_str.s_len = l; inp[i].pi_str.s_buf = (const char *) (const void *)(&q[o4 + sizeof(l)]); DPRINTF(("l = %d, r = %d, s = %s\n", l, CDF_ROUND(l, sizeof(l)), inp[i].pi_str.s_buf)); l = 4 + (uint32_t)CDF_ROUND(l, sizeof(l)); o += l >> 2; if (q + o >= e) goto out; o4 = o * sizeof(uint32_t); } i--; break; case CDF_FILETIME: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&tp, &q[o4], sizeof(tp)); inp[i].pi_tp = CDF_TOLE8((uint64_t)tp); break; case CDF_CLIPBOARD: if (inp[i].pi_type & CDF_VECTOR) goto unknown; break; default: unknown: DPRINTF(("Don't know how to deal with %x\n", inp[i].pi_type)); goto out; } } return 0; out: free(*info); return -1; } int cdf_unpack_summary_info(const cdf_stream_t *sst, const cdf_header_t *h, cdf_summary_info_header_t *ssi, cdf_property_info_t **info, size_t *count) { size_t i, maxcount; const cdf_summary_info_header_t *si = CAST(const cdf_summary_info_header_t *, sst->sst_tab); const cdf_section_declaration_t *sd = CAST(const cdf_section_declaration_t *, (const void *) ((const char *)sst->sst_tab + CDF_SECTION_DECLARATION_OFFSET)); if (cdf_check_stream_offset(sst, h, si, sizeof(*si), __LINE__) == -1 || cdf_check_stream_offset(sst, h, sd, sizeof(*sd), __LINE__) == -1) return -1; ssi->si_byte_order = CDF_TOLE2(si->si_byte_order); ssi->si_os_version = CDF_TOLE2(si->si_os_version); ssi->si_os = CDF_TOLE2(si->si_os); ssi->si_class = si->si_class; cdf_swap_class(&ssi->si_class); ssi->si_count = CDF_TOLE2(si->si_count); *count = 0; maxcount = 0; *info = NULL; for (i = 0; i < CDF_TOLE4(si->si_count); i++) { if (i >= CDF_LOOP_LIMIT) { DPRINTF(("Unpack summary info loop limit")); errno = EFTYPE; return -1; } if (cdf_read_property_info(sst, h, CDF_TOLE4(sd->sd_offset), info, count, &maxcount) == -1) return -1; } return 0; } int cdf_print_classid(char *buf, size_t buflen, const cdf_classid_t *id) { return snprintf(buf, buflen, "%.8x-%.4x-%.4x-%.2x%.2x-" "%.2x%.2x%.2x%.2x%.2x%.2x", id->cl_dword, id->cl_word[0], id->cl_word[1], id->cl_two[0], id->cl_two[1], id->cl_six[0], id->cl_six[1], id->cl_six[2], id->cl_six[3], id->cl_six[4], id->cl_six[5]); } static const struct { uint32_t v; const char *n; } vn[] = { { CDF_PROPERTY_CODE_PAGE, "Code page" }, { CDF_PROPERTY_TITLE, "Title" }, { CDF_PROPERTY_SUBJECT, "Subject" }, { CDF_PROPERTY_AUTHOR, "Author" }, { CDF_PROPERTY_KEYWORDS, "Keywords" }, { CDF_PROPERTY_COMMENTS, "Comments" }, { CDF_PROPERTY_TEMPLATE, "Template" }, { CDF_PROPERTY_LAST_SAVED_BY, "Last Saved By" }, { CDF_PROPERTY_REVISION_NUMBER, "Revision Number" }, { CDF_PROPERTY_TOTAL_EDITING_TIME, "Total Editing Time" }, { CDF_PROPERTY_LAST_PRINTED, "Last Printed" }, { CDF_PROPERTY_CREATE_TIME, "Create Time/Date" }, { CDF_PROPERTY_LAST_SAVED_TIME, "Last Saved Time/Date" }, { CDF_PROPERTY_NUMBER_OF_PAGES, "Number of Pages" }, { CDF_PROPERTY_NUMBER_OF_WORDS, "Number of Words" }, { CDF_PROPERTY_NUMBER_OF_CHARACTERS, "Number of Characters" }, { CDF_PROPERTY_THUMBNAIL, "Thumbnail" }, { CDF_PROPERTY_NAME_OF_APPLICATION, "Name of Creating Application" }, { CDF_PROPERTY_SECURITY, "Security" }, { CDF_PROPERTY_LOCALE_ID, "Locale ID" }, }; int cdf_print_property_name(char *buf, size_t bufsiz, uint32_t p) { size_t i; for (i = 0; i < __arraycount(vn); i++) if (vn[i].v == p) return snprintf(buf, bufsiz, "%s", vn[i].n); return snprintf(buf, bufsiz, "0x%x", p); } int cdf_print_elapsed_time(char *buf, size_t bufsiz, cdf_timestamp_t ts) { int len = 0; int days, hours, mins, secs; ts /= CDF_TIME_PREC; secs = (int)(ts % 60); ts /= 60; mins = (int)(ts % 60); ts /= 60; hours = (int)(ts % 24); ts /= 24; days = (int)ts; if (days) { len += snprintf(buf + len, bufsiz - len, "%dd+", days); if ((size_t)len >= bufsiz) return len; } if (days || hours) { len += snprintf(buf + len, bufsiz - len, "%.2d:", hours); if ((size_t)len >= bufsiz) return len; } len += snprintf(buf + len, bufsiz - len, "%.2d:", mins); if ((size_t)len >= bufsiz) return len; len += snprintf(buf + len, bufsiz - len, "%.2d", secs); return len; } #ifdef CDF_DEBUG void cdf_dump_header(const cdf_header_t *h) { size_t i; #define DUMP(a, b) (void)fprintf(stderr, "%40.40s = " a "\n", # b, h->h_ ## b) #define DUMP2(a, b) (void)fprintf(stderr, "%40.40s = " a " (" a ")\n", # b, \ h->h_ ## b, 1 << h->h_ ## b) DUMP("%d", revision); DUMP("%d", version); DUMP("0x%x", byte_order); DUMP2("%d", sec_size_p2); DUMP2("%d", short_sec_size_p2); DUMP("%d", num_sectors_in_sat); DUMP("%d", secid_first_directory); DUMP("%d", min_size_standard_stream); DUMP("%d", secid_first_sector_in_short_sat); DUMP("%d", num_sectors_in_short_sat); DUMP("%d", secid_first_sector_in_master_sat); DUMP("%d", num_sectors_in_master_sat); for (i = 0; i < __arraycount(h->h_master_sat); i++) { if (h->h_master_sat[i] == CDF_SECID_FREE) break; (void)fprintf(stderr, "%35.35s[%.3zu] = %d\n", "master_sat", i, h->h_master_sat[i]); } } void cdf_dump_sat(const char *prefix, const cdf_sat_t *sat, size_t size) { size_t i, j, s = size / sizeof(cdf_secid_t); for (i = 0; i < sat->sat_len; i++) { (void)fprintf(stderr, "%s[%" SIZE_T_FORMAT "u]:\n%.6d: ", prefix, i, i * s); for (j = 0; j < s; j++) { (void)fprintf(stderr, "%5d, ", CDF_TOLE4(sat->sat_tab[s * i + j])); if ((j + 1) % 10 == 0) (void)fprintf(stderr, "\n%.6d: ", i * s + j + 1); } (void)fprintf(stderr, "\n"); } } void cdf_dump(void *v, size_t len) { size_t i, j; unsigned char *p = v; char abuf[16]; (void)fprintf(stderr, "%.4x: ", 0); for (i = 0, j = 0; i < len; i++, p++) { (void)fprintf(stderr, "%.2x ", *p); abuf[j++] = isprint(*p) ? *p : '.'; if (j == 16) { j = 0; abuf[15] = '\0'; (void)fprintf(stderr, "%s\n%.4x: ", abuf, i + 1); } } (void)fprintf(stderr, "\n"); } void cdf_dump_stream(const cdf_header_t *h, const cdf_stream_t *sst) { size_t ss = sst->sst_dirlen < h->h_min_size_standard_stream ? CDF_SHORT_SEC_SIZE(h) : CDF_SEC_SIZE(h); cdf_dump(sst->sst_tab, ss * sst->sst_len); } void cdf_dump_dir(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst, const cdf_dir_t *dir) { size_t i, j; cdf_directory_t *d; char name[__arraycount(d->d_name)]; cdf_stream_t scn; struct timespec ts; static const char *types[] = { "empty", "user storage", "user stream", "lockbytes", "property", "root storage" }; for (i = 0; i < dir->dir_len; i++) { d = &dir->dir_tab[i]; for (j = 0; j < sizeof(name); j++) name[j] = (char)CDF_TOLE2(d->d_name[j]); (void)fprintf(stderr, "Directory %" SIZE_T_FORMAT "u: %s\n", i, name); if (d->d_type < __arraycount(types)) (void)fprintf(stderr, "Type: %s\n", types[d->d_type]); else (void)fprintf(stderr, "Type: %d\n", d->d_type); (void)fprintf(stderr, "Color: %s\n", d->d_color ? "black" : "red"); (void)fprintf(stderr, "Left child: %d\n", d->d_left_child); (void)fprintf(stderr, "Right child: %d\n", d->d_right_child); (void)fprintf(stderr, "Flags: 0x%x\n", d->d_flags); cdf_timestamp_to_timespec(&ts, d->d_created); (void)fprintf(stderr, "Created %s", cdf_ctime(&ts.tv_sec)); cdf_timestamp_to_timespec(&ts, d->d_modified); (void)fprintf(stderr, "Modified %s", cdf_ctime(&ts.tv_sec)); (void)fprintf(stderr, "Stream %d\n", d->d_stream_first_sector); (void)fprintf(stderr, "Size %d\n", d->d_size); switch (d->d_type) { case CDF_DIR_TYPE_USER_STORAGE: (void)fprintf(stderr, "Storage: %d\n", d->d_storage); break; case CDF_DIR_TYPE_USER_STREAM: if (sst == NULL) break; if (cdf_read_sector_chain(info, h, sat, ssat, sst, d->d_stream_first_sector, d->d_size, &scn) == -1) { warn("Can't read stream for %s at %d len %d", name, d->d_stream_first_sector, d->d_size); break; } cdf_dump_stream(h, &scn); free(scn.sst_tab); break; default: break; } } } void cdf_dump_property_info(const cdf_property_info_t *info, size_t count) { cdf_timestamp_t tp; struct timespec ts; char buf[64]; size_t i, j; for (i = 0; i < count; i++) { cdf_print_property_name(buf, sizeof(buf), info[i].pi_id); (void)fprintf(stderr, "%" SIZE_T_FORMAT "u) %s: ", i, buf); switch (info[i].pi_type) { case CDF_NULL: break; case CDF_SIGNED16: (void)fprintf(stderr, "signed 16 [%hd]\n", info[i].pi_s16); break; case CDF_SIGNED32: (void)fprintf(stderr, "signed 32 [%d]\n", info[i].pi_s32); break; case CDF_UNSIGNED32: (void)fprintf(stderr, "unsigned 32 [%u]\n", info[i].pi_u32); break; case CDF_LENGTH32_STRING: (void)fprintf(stderr, "string %u [%.*s]\n", info[i].pi_str.s_len, info[i].pi_str.s_len, info[i].pi_str.s_buf); break; case CDF_LENGTH32_WSTRING: (void)fprintf(stderr, "string %u [", info[i].pi_str.s_len); for (j = 0; j < info[i].pi_str.s_len - 1; j++) (void)fputc(info[i].pi_str.s_buf[j << 1], stderr); (void)fprintf(stderr, "]\n"); break; case CDF_FILETIME: tp = info[i].pi_tp; if (tp < 1000000000000000LL) { cdf_print_elapsed_time(buf, sizeof(buf), tp); (void)fprintf(stderr, "timestamp %s\n", buf); } else { cdf_timestamp_to_timespec(&ts, tp); (void)fprintf(stderr, "timestamp %s", cdf_ctime(&ts.tv_sec)); } break; case CDF_CLIPBOARD: (void)fprintf(stderr, "CLIPBOARD %u\n", info[i].pi_u32); break; default: DPRINTF(("Don't know how to deal with %x\n", info[i].pi_type)); break; } } } void cdf_dump_summary_info(const cdf_header_t *h, const cdf_stream_t *sst) { char buf[128]; cdf_summary_info_header_t ssi; cdf_property_info_t *info; size_t count; (void)&h; if (cdf_unpack_summary_info(sst, h, &ssi, &info, &count) == -1) return; (void)fprintf(stderr, "Endian: %x\n", ssi.si_byte_order); (void)fprintf(stderr, "Os Version %d.%d\n", ssi.si_os_version & 0xff, ssi.si_os_version >> 8); (void)fprintf(stderr, "Os %d\n", ssi.si_os); cdf_print_classid(buf, sizeof(buf), &ssi.si_class); (void)fprintf(stderr, "Class %s\n", buf); (void)fprintf(stderr, "Count %d\n", ssi.si_count); cdf_dump_property_info(info, count); free(info); } #endif #ifdef TEST int main(int argc, char *argv[]) { int i; cdf_header_t h; cdf_sat_t sat, ssat; cdf_stream_t sst, scn; cdf_dir_t dir; cdf_info_t info; if (argc < 2) { (void)fprintf(stderr, "Usage: %s <filename>\n", getprogname()); return -1; } info.i_buf = NULL; info.i_len = 0; for (i = 1; i < argc; i++) { if ((info.i_fd = open(argv[1], O_RDONLY)) == -1) err(1, "Cannot open `%s'", argv[1]); if (cdf_read_header(&info, &h) == -1) err(1, "Cannot read header"); #ifdef CDF_DEBUG cdf_dump_header(&h); #endif if (cdf_read_sat(&info, &h, &sat) == -1) err(1, "Cannot read sat"); #ifdef CDF_DEBUG cdf_dump_sat("SAT", &sat, CDF_SEC_SIZE(&h)); #endif if (cdf_read_ssat(&info, &h, &sat, &ssat) == -1) err(1, "Cannot read ssat"); #ifdef CDF_DEBUG cdf_dump_sat("SSAT", &ssat, CDF_SHORT_SEC_SIZE(&h)); #endif if (cdf_read_dir(&info, &h, &sat, &dir) == -1) err(1, "Cannot read dir"); if (cdf_read_short_stream(&info, &h, &sat, &dir, &sst) == -1) err(1, "Cannot read short stream"); #ifdef CDF_DEBUG cdf_dump_stream(&h, &sst); #endif #ifdef CDF_DEBUG cdf_dump_dir(&info, &h, &sat, &ssat, &sst, &dir); #endif if (cdf_read_summary_info(&info, &h, &sat, &ssat, &sst, &dir, &scn) == -1) err(1, "Cannot read summary info"); #ifdef CDF_DEBUG cdf_dump_summary_info(&h, &scn); #endif (void)close(info.i_fd); } return 0; } #endif
cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs, size_t len, const cdf_header_t *h, cdf_secid_t id) { assert((size_t)CDF_SHORT_SEC_SIZE(h) == len); (void)memcpy(((char *)buf) + offs, ((const char *)sst->sst_tab) + CDF_SHORT_SEC_POS(h, id), len); return len; }
cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs, size_t len, const cdf_header_t *h, cdf_secid_t id) { size_t ss = CDF_SHORT_SEC_SIZE(h); size_t pos = CDF_SHORT_SEC_POS(h, id); assert(ss == len); if (sst->sst_len < (size_t)id) { DPRINTF(("bad sector id %d > %d\n", id, sst->sst_len)); return -1; } (void)memcpy(((char *)buf) + offs, ((const char *)sst->sst_tab) + pos, len); return len; }
{'added': [(38, 'FILE_RCSID("@(#)$File: cdf.c,v 1.46 2011/09/16 21:23:59 christos Exp $")'), (344, '\tsize_t ss = CDF_SEC_SIZE(h);'), (345, '\tsize_t pos = CDF_SEC_POS(h, id);'), (346, '\tassert(ss == len);'), (347, '\treturn cdf_read(info, (off_t)pos, ((char *)buf) + offs, len);'), (354, '\tsize_t ss = CDF_SHORT_SEC_SIZE(h);'), (355, '\tsize_t pos = CDF_SHORT_SEC_POS(h, id);'), (356, '\tassert(ss == len);'), (357, '\tif (sst->sst_len < (size_t)id) {'), (358, '\t\tDPRINTF(("bad sector id %d > %d\\n", id, sst->sst_len));'), (359, '\t\treturn -1;'), (360, '\t}'), (362, '\t ((const char *)sst->sst_tab) + pos, len);'), (878, '\t\t\t\tif (q + o >= e)'), (879, '\t\t\t\t\tgoto out;')], 'deleted': [(38, 'FILE_RCSID("@(#)$File: cdf.c,v 1.45 2011/08/28 08:38:48 christos Exp $")'), (344, '\tassert((size_t)CDF_SEC_SIZE(h) == len);'), (345, '\treturn cdf_read(info, (off_t)CDF_SEC_POS(h, id),'), (346, '\t ((char *)buf) + offs, len);'), (353, '\tassert((size_t)CDF_SHORT_SEC_SIZE(h) == len);'), (355, '\t ((const char *)sst->sst_tab) + CDF_SHORT_SEC_POS(h, id), len);')]}
15
6
1,092
8,038
8
79
1
https://github.com/glensc/file
CVE-2012-1571
CWE-119
2,691
arp_tables.c
C
mark_source_chains
/* * Packet matching code for ARP packets. * * Based heavily, if not almost entirely, upon ip_tables.c framework. * * Some ARP specific bits are: * * Copyright (C) 2002 David S. Miller (davem@redhat.com) * Copyright (C) 2006-2009 Patrick McHardy <kaber@trash.net> * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/capability.h> #include <linux/if_arp.h> #include <linux/kmod.h> #include <linux/vmalloc.h> #include <linux/proc_fs.h> #include <linux/module.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/err.h> #include <net/compat.h> #include <net/sock.h> #include <asm/uaccess.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_arp/arp_tables.h> #include "../../netfilter/xt_repldata.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); MODULE_DESCRIPTION("arptables core"); /*#define DEBUG_ARP_TABLES*/ /*#define DEBUG_ARP_TABLES_USER*/ #ifdef DEBUG_ARP_TABLES #define dprintf(format, args...) pr_debug(format, ## args) #else #define dprintf(format, args...) #endif #ifdef DEBUG_ARP_TABLES_USER #define duprintf(format, args...) pr_debug(format, ## args) #else #define duprintf(format, args...) #endif #ifdef CONFIG_NETFILTER_DEBUG #define ARP_NF_ASSERT(x) WARN_ON(!(x)) #else #define ARP_NF_ASSERT(x) #endif void *arpt_alloc_initial_table(const struct xt_table *info) { return xt_alloc_initial_table(arpt, ARPT); } EXPORT_SYMBOL_GPL(arpt_alloc_initial_table); static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap, const char *hdr_addr, int len) { int i, ret; if (len > ARPT_DEV_ADDR_LEN_MAX) len = ARPT_DEV_ADDR_LEN_MAX; ret = 0; for (i = 0; i < len; i++) ret |= (hdr_addr[i] ^ ap->addr[i]) & ap->mask[i]; return ret != 0; } /* * Unfortunately, _b and _mask are not aligned to an int (or long int) * Some arches dont care, unrolling the loop is a win on them. * For other arches, we only have a 16bit alignement. */ static unsigned long ifname_compare(const char *_a, const char *_b, const char *_mask) { #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS unsigned long ret = ifname_compare_aligned(_a, _b, _mask); #else unsigned long ret = 0; const u16 *a = (const u16 *)_a; const u16 *b = (const u16 *)_b; const u16 *mask = (const u16 *)_mask; int i; for (i = 0; i < IFNAMSIZ/sizeof(u16); i++) ret |= (a[i] ^ b[i]) & mask[i]; #endif return ret; } /* Returns whether packet matches rule or not. */ static inline int arp_packet_match(const struct arphdr *arphdr, struct net_device *dev, const char *indev, const char *outdev, const struct arpt_arp *arpinfo) { const char *arpptr = (char *)(arphdr + 1); const char *src_devaddr, *tgt_devaddr; __be32 src_ipaddr, tgt_ipaddr; long ret; #define FWINV(bool, invflg) ((bool) ^ !!(arpinfo->invflags & (invflg))) if (FWINV((arphdr->ar_op & arpinfo->arpop_mask) != arpinfo->arpop, ARPT_INV_ARPOP)) { dprintf("ARP operation field mismatch.\n"); dprintf("ar_op: %04x info->arpop: %04x info->arpop_mask: %04x\n", arphdr->ar_op, arpinfo->arpop, arpinfo->arpop_mask); return 0; } if (FWINV((arphdr->ar_hrd & arpinfo->arhrd_mask) != arpinfo->arhrd, ARPT_INV_ARPHRD)) { dprintf("ARP hardware address format mismatch.\n"); dprintf("ar_hrd: %04x info->arhrd: %04x info->arhrd_mask: %04x\n", arphdr->ar_hrd, arpinfo->arhrd, arpinfo->arhrd_mask); return 0; } if (FWINV((arphdr->ar_pro & arpinfo->arpro_mask) != arpinfo->arpro, ARPT_INV_ARPPRO)) { dprintf("ARP protocol address format mismatch.\n"); dprintf("ar_pro: %04x info->arpro: %04x info->arpro_mask: %04x\n", arphdr->ar_pro, arpinfo->arpro, arpinfo->arpro_mask); return 0; } if (FWINV((arphdr->ar_hln & arpinfo->arhln_mask) != arpinfo->arhln, ARPT_INV_ARPHLN)) { dprintf("ARP hardware address length mismatch.\n"); dprintf("ar_hln: %02x info->arhln: %02x info->arhln_mask: %02x\n", arphdr->ar_hln, arpinfo->arhln, arpinfo->arhln_mask); return 0; } src_devaddr = arpptr; arpptr += dev->addr_len; memcpy(&src_ipaddr, arpptr, sizeof(u32)); arpptr += sizeof(u32); tgt_devaddr = arpptr; arpptr += dev->addr_len; memcpy(&tgt_ipaddr, arpptr, sizeof(u32)); if (FWINV(arp_devaddr_compare(&arpinfo->src_devaddr, src_devaddr, dev->addr_len), ARPT_INV_SRCDEVADDR) || FWINV(arp_devaddr_compare(&arpinfo->tgt_devaddr, tgt_devaddr, dev->addr_len), ARPT_INV_TGTDEVADDR)) { dprintf("Source or target device address mismatch.\n"); return 0; } if (FWINV((src_ipaddr & arpinfo->smsk.s_addr) != arpinfo->src.s_addr, ARPT_INV_SRCIP) || FWINV(((tgt_ipaddr & arpinfo->tmsk.s_addr) != arpinfo->tgt.s_addr), ARPT_INV_TGTIP)) { dprintf("Source or target IP address mismatch.\n"); dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n", &src_ipaddr, &arpinfo->smsk.s_addr, &arpinfo->src.s_addr, arpinfo->invflags & ARPT_INV_SRCIP ? " (INV)" : ""); dprintf("TGT: %pI4 Mask: %pI4 Target: %pI4.%s\n", &tgt_ipaddr, &arpinfo->tmsk.s_addr, &arpinfo->tgt.s_addr, arpinfo->invflags & ARPT_INV_TGTIP ? " (INV)" : ""); return 0; } /* Look for ifname matches. */ ret = ifname_compare(indev, arpinfo->iniface, arpinfo->iniface_mask); if (FWINV(ret != 0, ARPT_INV_VIA_IN)) { dprintf("VIA in mismatch (%s vs %s).%s\n", indev, arpinfo->iniface, arpinfo->invflags & ARPT_INV_VIA_IN ? " (INV)" : ""); return 0; } ret = ifname_compare(outdev, arpinfo->outiface, arpinfo->outiface_mask); if (FWINV(ret != 0, ARPT_INV_VIA_OUT)) { dprintf("VIA out mismatch (%s vs %s).%s\n", outdev, arpinfo->outiface, arpinfo->invflags & ARPT_INV_VIA_OUT ? " (INV)" : ""); return 0; } return 1; #undef FWINV } static inline int arp_checkentry(const struct arpt_arp *arp) { if (arp->flags & ~ARPT_F_MASK) { duprintf("Unknown flag bits set: %08X\n", arp->flags & ~ARPT_F_MASK); return 0; } if (arp->invflags & ~ARPT_INV_MASK) { duprintf("Unknown invflag bits set: %08X\n", arp->invflags & ~ARPT_INV_MASK); return 0; } return 1; } static unsigned int arpt_error(struct sk_buff *skb, const struct xt_action_param *par) { net_err_ratelimited("arp_tables: error: '%s'\n", (const char *)par->targinfo); return NF_DROP; } static inline const struct xt_entry_target * arpt_get_target_c(const struct arpt_entry *e) { return arpt_get_target((struct arpt_entry *)e); } static inline struct arpt_entry * get_entry(const void *base, unsigned int offset) { return (struct arpt_entry *)(base + offset); } static inline struct arpt_entry *arpt_next_entry(const struct arpt_entry *entry) { return (void *)entry + entry->next_offset; } unsigned int arpt_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table) { unsigned int hook = state->hook; static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); unsigned int verdict = NF_DROP; const struct arphdr *arp; struct arpt_entry *e, **jumpstack; const char *indev, *outdev; const void *table_base; unsigned int cpu, stackidx = 0; const struct xt_table_info *private; struct xt_action_param acpar; unsigned int addend; if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) return NF_DROP; indev = state->in ? state->in->name : nulldevname; outdev = state->out ? state->out->name : nulldevname; local_bh_disable(); addend = xt_write_recseq_begin(); private = table->private; cpu = smp_processor_id(); /* * Ensure we load private-> members after we've fetched the base * pointer. */ smp_read_barrier_depends(); table_base = private->entries; jumpstack = (struct arpt_entry **)private->jumpstack[cpu]; /* No TEE support for arptables, so no need to switch to alternate * stack. All targets that reenter must return absolute verdicts. */ e = get_entry(table_base, private->hook_entry[hook]); acpar.net = state->net; acpar.in = state->in; acpar.out = state->out; acpar.hooknum = hook; acpar.family = NFPROTO_ARP; acpar.hotdrop = false; arp = arp_hdr(skb); do { const struct xt_entry_target *t; struct xt_counters *counter; if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) { e = arpt_next_entry(e); continue; } counter = xt_get_this_cpu_counter(&e->counters); ADD_COUNTER(*counter, arp_hdr_len(skb->dev), 1); t = arpt_get_target_c(e); /* Standard target? */ if (!t->u.kernel.target->target) { int v; v = ((struct xt_standard_target *)t)->verdict; if (v < 0) { /* Pop from stack? */ if (v != XT_RETURN) { verdict = (unsigned int)(-v) - 1; break; } if (stackidx == 0) { e = get_entry(table_base, private->underflow[hook]); } else { e = jumpstack[--stackidx]; e = arpt_next_entry(e); } continue; } if (table_base + v != arpt_next_entry(e)) { jumpstack[stackidx++] = e; } e = get_entry(table_base, v); continue; } acpar.target = t->u.kernel.target; acpar.targinfo = t->data; verdict = t->u.kernel.target->target(skb, &acpar); /* Target might have changed stuff. */ arp = arp_hdr(skb); if (verdict == XT_CONTINUE) e = arpt_next_entry(e); else /* Verdict */ break; } while (!acpar.hotdrop); xt_write_recseq_end(addend); local_bh_enable(); if (acpar.hotdrop) return NF_DROP; else return verdict; } /* All zeroes == unconditional rule. */ static inline bool unconditional(const struct arpt_arp *arp) { static const struct arpt_arp uncond; return memcmp(arp, &uncond, sizeof(uncond)) == 0; } /* Figures out from what hook each rule can be called: returns 0 if * there are loops. Puts hook bitmask in comefrom. */ static int mark_source_chains(const struct xt_table_info *newinfo, unsigned int valid_hooks, void *entry0) { unsigned int hook; /* No recursion; use packet counter to save back ptrs (reset * to 0 as we leave), and comefrom to save source hook bitmask. */ for (hook = 0; hook < NF_ARP_NUMHOOKS; hook++) { unsigned int pos = newinfo->hook_entry[hook]; struct arpt_entry *e = (struct arpt_entry *)(entry0 + pos); if (!(valid_hooks & (1 << hook))) continue; /* Set initial back pointer. */ e->counters.pcnt = pos; for (;;) { const struct xt_standard_target *t = (void *)arpt_get_target_c(e); int visited = e->comefrom & (1 << hook); if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) { pr_notice("arptables: loop hook %u pos %u %08X.\n", hook, pos, e->comefrom); return 0; } e->comefrom |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS)); /* Unconditional return/END. */ if ((e->target_offset == sizeof(struct arpt_entry) && (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < 0 && unconditional(&e->arp)) || visited) { unsigned int oldpos, size; if ((strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < -NF_MAX_VERDICT - 1) { duprintf("mark_source_chains: bad " "negative verdict (%i)\n", t->verdict); return 0; } /* Return: backtrack through the last * big jump. */ do { e->comefrom ^= (1<<NF_ARP_NUMHOOKS); oldpos = pos; pos = e->counters.pcnt; e->counters.pcnt = 0; /* We're at the start. */ if (pos == oldpos) goto next; e = (struct arpt_entry *) (entry0 + pos); } while (oldpos == pos + e->next_offset); /* Move along one */ size = e->next_offset; e = (struct arpt_entry *) (entry0 + pos + size); e->counters.pcnt = pos; pos += size; } else { int newpos = t->verdict; if (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0 && newpos >= 0) { if (newpos > newinfo->size - sizeof(struct arpt_entry)) { duprintf("mark_source_chains: " "bad verdict (%i)\n", newpos); return 0; } /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; } e = (struct arpt_entry *) (entry0 + newpos); e->counters.pcnt = pos; pos = newpos; } } next: duprintf("Finished chain %u\n", hook); } return 1; } static inline int check_entry(const struct arpt_entry *e) { const struct xt_entry_target *t; if (!arp_checkentry(&e->arp)) return -EINVAL; if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset) return -EINVAL; t = arpt_get_target_c(e); if (e->target_offset + t->u.target_size > e->next_offset) return -EINVAL; return 0; } static inline int check_target(struct arpt_entry *e, const char *name) { struct xt_entry_target *t = arpt_get_target(e); int ret; struct xt_tgchk_param par = { .table = name, .entryinfo = e, .target = t->u.kernel.target, .targinfo = t->data, .hook_mask = e->comefrom, .family = NFPROTO_ARP, }; ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false); if (ret < 0) { duprintf("arp_tables: check failed for `%s'.\n", t->u.kernel.target->name); return ret; } return 0; } static inline int find_check_entry(struct arpt_entry *e, const char *name, unsigned int size) { struct xt_entry_target *t; struct xt_target *target; int ret; e->counters.pcnt = xt_percpu_counter_alloc(); if (IS_ERR_VALUE(e->counters.pcnt)) return -ENOMEM; t = arpt_get_target(e); target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("find_check_entry: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto out; } t->u.kernel.target = target; ret = check_target(e, name); if (ret) goto err; return 0; err: module_put(t->u.kernel.target->me); out: xt_percpu_counter_free(e->counters.pcnt); return ret; } static bool check_underflow(const struct arpt_entry *e) { const struct xt_entry_target *t; unsigned int verdict; if (!unconditional(&e->arp)) return false; t = arpt_get_target_c(e); if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) return false; verdict = ((struct xt_standard_target *)t)->verdict; verdict = -verdict - 1; return verdict == NF_DROP || verdict == NF_ACCEPT; } static inline int check_entry_size_and_hooks(struct arpt_entry *e, struct xt_table_info *newinfo, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int valid_hooks) { unsigned int h; int err; if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 || (unsigned char *)e + sizeof(struct arpt_entry) >= limit || (unsigned char *)e + e->next_offset > limit) { duprintf("Bad offset %p\n", e); return -EINVAL; } if (e->next_offset < sizeof(struct arpt_entry) + sizeof(struct xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } err = check_entry(e); if (err) return err; /* Check hooks & underflows */ for (h = 0; h < NF_ARP_NUMHOOKS; h++) { if (!(valid_hooks & (1 << h))) continue; if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) { if (!check_underflow(e)) { pr_err("Underflows must be unconditional and " "use the STANDARD target with " "ACCEPT/DROP\n"); return -EINVAL; } newinfo->underflow[h] = underflows[h]; } } /* Clear counters and comefrom */ e->counters = ((struct xt_counters) { 0, 0 }); e->comefrom = 0; return 0; } static inline void cleanup_entry(struct arpt_entry *e) { struct xt_tgdtor_param par; struct xt_entry_target *t; t = arpt_get_target(e); par.target = t->u.kernel.target; par.targinfo = t->data; par.family = NFPROTO_ARP; if (par.target->destroy != NULL) par.target->destroy(&par); module_put(par.target->me); xt_percpu_counter_free(e->counters.pcnt); } /* Checks and translates the user-supplied table segment (held in * newinfo). */ static int translate_table(struct xt_table_info *newinfo, void *entry0, const struct arpt_replace *repl) { struct arpt_entry *iter; unsigned int i; int ret = 0; newinfo->size = repl->size; newinfo->number = repl->num_entries; /* Init all hooks to impossible value. */ for (i = 0; i < NF_ARP_NUMHOOKS; i++) { newinfo->hook_entry[i] = 0xFFFFFFFF; newinfo->underflow[i] = 0xFFFFFFFF; } duprintf("translate_table: size %u\n", newinfo->size); i = 0; /* Walk through entries, checking offsets. */ xt_entry_foreach(iter, entry0, newinfo->size) { ret = check_entry_size_and_hooks(iter, newinfo, entry0, entry0 + repl->size, repl->hook_entry, repl->underflow, repl->valid_hooks); if (ret != 0) break; ++i; if (strcmp(arpt_get_target(iter)->u.user.name, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret); if (ret != 0) return ret; if (i != repl->num_entries) { duprintf("translate_table: %u not %u entries\n", i, repl->num_entries); return -EINVAL; } /* Check hooks all assigned */ for (i = 0; i < NF_ARP_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(repl->valid_hooks & (1 << i))) continue; if (newinfo->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, repl->hook_entry[i]); return -EINVAL; } if (newinfo->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, repl->underflow[i]); return -EINVAL; } } if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) { duprintf("Looping hook\n"); return -ELOOP; } /* Finally, each sanity check must pass */ i = 0; xt_entry_foreach(iter, entry0, newinfo->size) { ret = find_check_entry(iter, repl->name, repl->size); if (ret != 0) break; ++i; } if (ret != 0) { xt_entry_foreach(iter, entry0, newinfo->size) { if (i-- == 0) break; cleanup_entry(iter); } return ret; } return ret; } static void get_counters(const struct xt_table_info *t, struct xt_counters counters[]) { struct arpt_entry *iter; unsigned int cpu; unsigned int i; for_each_possible_cpu(cpu) { seqcount_t *s = &per_cpu(xt_recseq, cpu); i = 0; xt_entry_foreach(iter, t->entries, t->size) { struct xt_counters *tmp; u64 bcnt, pcnt; unsigned int start; tmp = xt_get_per_cpu_counter(&iter->counters, cpu); do { start = read_seqcount_begin(s); bcnt = tmp->bcnt; pcnt = tmp->pcnt; } while (read_seqcount_retry(s, start)); ADD_COUNTER(counters[i], bcnt, pcnt); ++i; } } } static struct xt_counters *alloc_counters(const struct xt_table *table) { unsigned int countersize; struct xt_counters *counters; const struct xt_table_info *private = table->private; /* We need atomic snapshot of counters: rest doesn't change * (other than comefrom, which userspace doesn't care * about). */ countersize = sizeof(struct xt_counters) * private->number; counters = vzalloc(countersize); if (counters == NULL) return ERR_PTR(-ENOMEM); get_counters(private, counters); return counters; } static int copy_entries_to_user(unsigned int total_size, const struct xt_table *table, void __user *userptr) { unsigned int off, num; const struct arpt_entry *e; struct xt_counters *counters; struct xt_table_info *private = table->private; int ret = 0; void *loc_cpu_entry; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); loc_cpu_entry = private->entries; /* ... then copy entire thing ... */ if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { ret = -EFAULT; goto free_counters; } /* FIXME: use iterator macros --RR */ /* ... then go back and fix counters and names */ for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ const struct xt_entry_target *t; e = (struct arpt_entry *)(loc_cpu_entry + off); if (copy_to_user(userptr + off + offsetof(struct arpt_entry, counters), &counters[num], sizeof(counters[num])) != 0) { ret = -EFAULT; goto free_counters; } t = arpt_get_target_c(e); if (copy_to_user(userptr + off + e->target_offset + offsetof(struct xt_entry_target, u.user.name), t->u.kernel.target->name, strlen(t->u.kernel.target->name)+1) != 0) { ret = -EFAULT; goto free_counters; } } free_counters: vfree(counters); return ret; } #ifdef CONFIG_COMPAT static void compat_standard_from_user(void *dst, const void *src) { int v = *(compat_int_t *)src; if (v > 0) v += xt_compat_calc_jump(NFPROTO_ARP, v); memcpy(dst, &v, sizeof(v)); } static int compat_standard_to_user(void __user *dst, const void *src) { compat_int_t cv = *(int *)src; if (cv > 0) cv -= xt_compat_calc_jump(NFPROTO_ARP, cv); return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; } static int compat_calc_entry(const struct arpt_entry *e, const struct xt_table_info *info, const void *base, struct xt_table_info *newinfo) { const struct xt_entry_target *t; unsigned int entry_offset; int off, i, ret; off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); entry_offset = (void *)e - base; t = arpt_get_target_c(e); off += xt_compat_target_offset(t->u.kernel.target); newinfo->size -= off; ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); if (ret) return ret; for (i = 0; i < NF_ARP_NUMHOOKS; i++) { if (info->hook_entry[i] && (e < (struct arpt_entry *)(base + info->hook_entry[i]))) newinfo->hook_entry[i] -= off; if (info->underflow[i] && (e < (struct arpt_entry *)(base + info->underflow[i]))) newinfo->underflow[i] -= off; } return 0; } static int compat_table_info(const struct xt_table_info *info, struct xt_table_info *newinfo) { struct arpt_entry *iter; const void *loc_cpu_entry; int ret; if (!newinfo || !info) return -EINVAL; /* we dont care about newinfo->entries */ memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); newinfo->initial_entries = 0; loc_cpu_entry = info->entries; xt_compat_init_offsets(NFPROTO_ARP, info->number); xt_entry_foreach(iter, loc_cpu_entry, info->size) { ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); if (ret != 0) return ret; } return 0; } #endif static int get_info(struct net *net, void __user *user, const int *len, int compat) { char name[XT_TABLE_MAXNAMELEN]; struct xt_table *t; int ret; if (*len != sizeof(struct arpt_getinfo)) { duprintf("length %u != %Zu\n", *len, sizeof(struct arpt_getinfo)); return -EINVAL; } if (copy_from_user(name, user, sizeof(name)) != 0) return -EFAULT; name[XT_TABLE_MAXNAMELEN-1] = '\0'; #ifdef CONFIG_COMPAT if (compat) xt_compat_lock(NFPROTO_ARP); #endif t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name), "arptable_%s", name); if (!IS_ERR_OR_NULL(t)) { struct arpt_getinfo info; const struct xt_table_info *private = t->private; #ifdef CONFIG_COMPAT struct xt_table_info tmp; if (compat) { ret = compat_table_info(private, &tmp); xt_compat_flush_offsets(NFPROTO_ARP); private = &tmp; } #endif memset(&info, 0, sizeof(info)); info.valid_hooks = t->valid_hooks; memcpy(info.hook_entry, private->hook_entry, sizeof(info.hook_entry)); memcpy(info.underflow, private->underflow, sizeof(info.underflow)); info.num_entries = private->number; info.size = private->size; strcpy(info.name, name); if (copy_to_user(user, &info, *len) != 0) ret = -EFAULT; else ret = 0; xt_table_unlock(t); module_put(t->me); } else ret = t ? PTR_ERR(t) : -ENOENT; #ifdef CONFIG_COMPAT if (compat) xt_compat_unlock(NFPROTO_ARP); #endif return ret; } static int get_entries(struct net *net, struct arpt_get_entries __user *uptr, const int *len) { int ret; struct arpt_get_entries get; struct xt_table *t; if (*len < sizeof(get)) { duprintf("get_entries: %u < %Zu\n", *len, sizeof(get)); return -EINVAL; } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; if (*len != sizeof(struct arpt_get_entries) + get.size) { duprintf("get_entries: %u != %Zu\n", *len, sizeof(struct arpt_get_entries) + get.size); return -EINVAL; } t = xt_find_table_lock(net, NFPROTO_ARP, get.name); if (!IS_ERR_OR_NULL(t)) { const struct xt_table_info *private = t->private; duprintf("t->private->number = %u\n", private->number); if (get.size == private->size) ret = copy_entries_to_user(private->size, t, uptr->entrytable); else { duprintf("get_entries: I've got %u not %u!\n", private->size, get.size); ret = -EAGAIN; } module_put(t->me); xt_table_unlock(t); } else ret = t ? PTR_ERR(t) : -ENOENT; return ret; } static int __do_replace(struct net *net, const char *name, unsigned int valid_hooks, struct xt_table_info *newinfo, unsigned int num_counters, void __user *counters_ptr) { int ret; struct xt_table *t; struct xt_table_info *oldinfo; struct xt_counters *counters; void *loc_cpu_old_entry; struct arpt_entry *iter; ret = 0; counters = vzalloc(num_counters * sizeof(struct xt_counters)); if (!counters) { ret = -ENOMEM; goto out; } t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name), "arptable_%s", name); if (IS_ERR_OR_NULL(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free_newinfo_counters_untrans; } /* You lied! */ if (valid_hooks != t->valid_hooks) { duprintf("Valid hook crap: %08X vs %08X\n", valid_hooks, t->valid_hooks); ret = -EINVAL; goto put_module; } oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); if (!oldinfo) goto put_module; /* Update module usage count based on number of rules */ duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", oldinfo->number, oldinfo->initial_entries, newinfo->number); if ((oldinfo->number > oldinfo->initial_entries) || (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); if ((oldinfo->number > oldinfo->initial_entries) && (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); /* Get the old counters, and synchronize with replace */ get_counters(oldinfo, counters); /* Decrease module usage counts and free resource */ loc_cpu_old_entry = oldinfo->entries; xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size) cleanup_entry(iter); xt_free_table_info(oldinfo); if (copy_to_user(counters_ptr, counters, sizeof(struct xt_counters) * num_counters) != 0) { /* Silent error, can't fail, new table is already in place */ net_warn_ratelimited("arptables: counters copy to user failed while replacing table\n"); } vfree(counters); xt_table_unlock(t); return ret; put_module: module_put(t->me); xt_table_unlock(t); free_newinfo_counters_untrans: vfree(counters); out: return ret; } static int do_replace(struct net *net, const void __user *user, unsigned int len) { int ret; struct arpt_replace tmp; struct xt_table_info *newinfo; void *loc_cpu_entry; struct arpt_entry *iter; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; if (tmp.num_counters == 0) return -EINVAL; tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } ret = translate_table(newinfo, loc_cpu_entry, &tmp); if (ret != 0) goto free_newinfo; duprintf("arp_tables: Translated table\n"); ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, tmp.counters); if (ret) goto free_newinfo_untrans; return 0; free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) cleanup_entry(iter); free_newinfo: xt_free_table_info(newinfo); return ret; } static int do_add_counters(struct net *net, const void __user *user, unsigned int len, int compat) { unsigned int i; struct xt_counters_info tmp; struct xt_counters *paddc; unsigned int num_counters; const char *name; int size; void *ptmp; struct xt_table *t; const struct xt_table_info *private; int ret = 0; struct arpt_entry *iter; unsigned int addend; #ifdef CONFIG_COMPAT struct compat_xt_counters_info compat_tmp; if (compat) { ptmp = &compat_tmp; size = sizeof(struct compat_xt_counters_info); } else #endif { ptmp = &tmp; size = sizeof(struct xt_counters_info); } if (copy_from_user(ptmp, user, size) != 0) return -EFAULT; #ifdef CONFIG_COMPAT if (compat) { num_counters = compat_tmp.num_counters; name = compat_tmp.name; } else #endif { num_counters = tmp.num_counters; name = tmp.name; } if (len != size + num_counters * sizeof(struct xt_counters)) return -EINVAL; paddc = vmalloc(len - size); if (!paddc) return -ENOMEM; if (copy_from_user(paddc, user + size, len - size) != 0) { ret = -EFAULT; goto free; } t = xt_find_table_lock(net, NFPROTO_ARP, name); if (IS_ERR_OR_NULL(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free; } local_bh_disable(); private = t->private; if (private->number != num_counters) { ret = -EINVAL; goto unlock_up_free; } i = 0; addend = xt_write_recseq_begin(); xt_entry_foreach(iter, private->entries, private->size) { struct xt_counters *tmp; tmp = xt_get_this_cpu_counter(&iter->counters); ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt); ++i; } xt_write_recseq_end(addend); unlock_up_free: local_bh_enable(); xt_table_unlock(t); module_put(t->me); free: vfree(paddc); return ret; } #ifdef CONFIG_COMPAT static inline void compat_release_entry(struct compat_arpt_entry *e) { struct xt_entry_target *t; t = compat_arpt_get_target(e); module_put(t->u.kernel.target->me); } static inline int check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, struct xt_table_info *newinfo, unsigned int *size, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, const char *name) { struct xt_entry_target *t; struct xt_target *target; unsigned int entry_offset; int ret, off, h; duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 || (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit || (unsigned char *)e + e->next_offset > limit) { duprintf("Bad offset %p, limit = %p\n", e, limit); return -EINVAL; } if (e->next_offset < sizeof(struct compat_arpt_entry) + sizeof(struct compat_xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } /* For purposes of check_entry casting the compat entry is fine */ ret = check_entry((struct arpt_entry *)e); if (ret) return ret; off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); entry_offset = (void *)e - (void *)base; t = compat_arpt_get_target(e); target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto out; } t->u.kernel.target = target; off += xt_compat_target_offset(target); *size += off; ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); if (ret) goto release_target; /* Check hooks & underflows */ for (h = 0; h < NF_ARP_NUMHOOKS; h++) { if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) newinfo->underflow[h] = underflows[h]; } /* Clear counters and comefrom */ memset(&e->counters, 0, sizeof(e->counters)); e->comefrom = 0; return 0; release_target: module_put(t->u.kernel.target->me); out: return ret; } static int compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr, unsigned int *size, const char *name, struct xt_table_info *newinfo, unsigned char *base) { struct xt_entry_target *t; struct xt_target *target; struct arpt_entry *de; unsigned int origsize; int ret, h; ret = 0; origsize = *size; de = (struct arpt_entry *)*dstptr; memcpy(de, e, sizeof(struct arpt_entry)); memcpy(&de->counters, &e->counters, sizeof(e->counters)); *dstptr += sizeof(struct arpt_entry); *size += sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); de->target_offset = e->target_offset - (origsize - *size); t = compat_arpt_get_target(e); target = t->u.kernel.target; xt_compat_target_from_user(t, dstptr, size); de->next_offset = e->next_offset - (origsize - *size); for (h = 0; h < NF_ARP_NUMHOOKS; h++) { if ((unsigned char *)de - base < newinfo->hook_entry[h]) newinfo->hook_entry[h] -= origsize - *size; if ((unsigned char *)de - base < newinfo->underflow[h]) newinfo->underflow[h] -= origsize - *size; } return ret; } static int translate_compat_table(const char *name, unsigned int valid_hooks, struct xt_table_info **pinfo, void **pentry0, unsigned int total_size, unsigned int number, unsigned int *hook_entries, unsigned int *underflows) { unsigned int i, j; struct xt_table_info *newinfo, *info; void *pos, *entry0, *entry1; struct compat_arpt_entry *iter0; struct arpt_entry *iter1; unsigned int size; int ret = 0; info = *pinfo; entry0 = *pentry0; size = total_size; info->number = number; /* Init all hooks to impossible value. */ for (i = 0; i < NF_ARP_NUMHOOKS; i++) { info->hook_entry[i] = 0xFFFFFFFF; info->underflow[i] = 0xFFFFFFFF; } duprintf("translate_compat_table: size %u\n", info->size); j = 0; xt_compat_lock(NFPROTO_ARP); xt_compat_init_offsets(NFPROTO_ARP, number); /* Walk through entries, checking offsets. */ xt_entry_foreach(iter0, entry0, total_size) { ret = check_compat_entry_size_and_hooks(iter0, info, &size, entry0, entry0 + total_size, hook_entries, underflows, name); if (ret != 0) goto out_unlock; ++j; } ret = -EINVAL; if (j != number) { duprintf("translate_compat_table: %u not %u entries\n", j, number); goto out_unlock; } /* Check hooks all assigned */ for (i = 0; i < NF_ARP_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(valid_hooks & (1 << i))) continue; if (info->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, hook_entries[i]); goto out_unlock; } if (info->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, underflows[i]); goto out_unlock; } } ret = -ENOMEM; newinfo = xt_alloc_table_info(size); if (!newinfo) goto out_unlock; newinfo->number = number; for (i = 0; i < NF_ARP_NUMHOOKS; i++) { newinfo->hook_entry[i] = info->hook_entry[i]; newinfo->underflow[i] = info->underflow[i]; } entry1 = newinfo->entries; pos = entry1; size = total_size; xt_entry_foreach(iter0, entry0, total_size) { ret = compat_copy_entry_from_user(iter0, &pos, &size, name, newinfo, entry1); if (ret != 0) break; } xt_compat_flush_offsets(NFPROTO_ARP); xt_compat_unlock(NFPROTO_ARP); if (ret) goto free_newinfo; ret = -ELOOP; if (!mark_source_chains(newinfo, valid_hooks, entry1)) goto free_newinfo; i = 0; xt_entry_foreach(iter1, entry1, newinfo->size) { iter1->counters.pcnt = xt_percpu_counter_alloc(); if (IS_ERR_VALUE(iter1->counters.pcnt)) { ret = -ENOMEM; break; } ret = check_target(iter1, name); if (ret != 0) { xt_percpu_counter_free(iter1->counters.pcnt); break; } ++i; if (strcmp(arpt_get_target(iter1)->u.user.name, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } if (ret) { /* * The first i matches need cleanup_entry (calls ->destroy) * because they had called ->check already. The other j-i * entries need only release. */ int skip = i; j -= i; xt_entry_foreach(iter0, entry0, newinfo->size) { if (skip-- > 0) continue; if (j-- == 0) break; compat_release_entry(iter0); } xt_entry_foreach(iter1, entry1, newinfo->size) { if (i-- == 0) break; cleanup_entry(iter1); } xt_free_table_info(newinfo); return ret; } *pinfo = newinfo; *pentry0 = entry1; xt_free_table_info(info); return 0; free_newinfo: xt_free_table_info(newinfo); out: xt_entry_foreach(iter0, entry0, total_size) { if (j-- == 0) break; compat_release_entry(iter0); } return ret; out_unlock: xt_compat_flush_offsets(NFPROTO_ARP); xt_compat_unlock(NFPROTO_ARP); goto out; } struct compat_arpt_replace { char name[XT_TABLE_MAXNAMELEN]; u32 valid_hooks; u32 num_entries; u32 size; u32 hook_entry[NF_ARP_NUMHOOKS]; u32 underflow[NF_ARP_NUMHOOKS]; u32 num_counters; compat_uptr_t counters; struct compat_arpt_entry entries[0]; }; static int compat_do_replace(struct net *net, void __user *user, unsigned int len) { int ret; struct compat_arpt_replace tmp; struct xt_table_info *newinfo; void *loc_cpu_entry; struct arpt_entry *iter; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.size >= INT_MAX / num_possible_cpus()) return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; if (tmp.num_counters == 0) return -EINVAL; tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } ret = translate_compat_table(tmp.name, tmp.valid_hooks, &newinfo, &loc_cpu_entry, tmp.size, tmp.num_entries, tmp.hook_entry, tmp.underflow); if (ret != 0) goto free_newinfo; duprintf("compat_do_replace: Translated table\n"); ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, compat_ptr(tmp.counters)); if (ret) goto free_newinfo_untrans; return 0; free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) cleanup_entry(iter); free_newinfo: xt_free_table_info(newinfo); return ret; } static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case ARPT_SO_SET_REPLACE: ret = compat_do_replace(sock_net(sk), user, len); break; case ARPT_SO_SET_ADD_COUNTERS: ret = do_add_counters(sock_net(sk), user, len, 1); break; default: duprintf("do_arpt_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr, compat_uint_t *size, struct xt_counters *counters, unsigned int i) { struct xt_entry_target *t; struct compat_arpt_entry __user *ce; u_int16_t target_offset, next_offset; compat_uint_t origsize; int ret; origsize = *size; ce = (struct compat_arpt_entry __user *)*dstptr; if (copy_to_user(ce, e, sizeof(struct arpt_entry)) != 0 || copy_to_user(&ce->counters, &counters[i], sizeof(counters[i])) != 0) return -EFAULT; *dstptr += sizeof(struct compat_arpt_entry); *size -= sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); target_offset = e->target_offset - (origsize - *size); t = arpt_get_target(e); ret = xt_compat_target_to_user(t, dstptr, size); if (ret) return ret; next_offset = e->next_offset - (origsize - *size); if (put_user(target_offset, &ce->target_offset) != 0 || put_user(next_offset, &ce->next_offset) != 0) return -EFAULT; return 0; } static int compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, void __user *userptr) { struct xt_counters *counters; const struct xt_table_info *private = table->private; void __user *pos; unsigned int size; int ret = 0; unsigned int i = 0; struct arpt_entry *iter; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); pos = userptr; size = total_size; xt_entry_foreach(iter, private->entries, total_size) { ret = compat_copy_entry_to_user(iter, &pos, &size, counters, i++); if (ret != 0) break; } vfree(counters); return ret; } struct compat_arpt_get_entries { char name[XT_TABLE_MAXNAMELEN]; compat_uint_t size; struct compat_arpt_entry entrytable[0]; }; static int compat_get_entries(struct net *net, struct compat_arpt_get_entries __user *uptr, int *len) { int ret; struct compat_arpt_get_entries get; struct xt_table *t; if (*len < sizeof(get)) { duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); return -EINVAL; } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; if (*len != sizeof(struct compat_arpt_get_entries) + get.size) { duprintf("compat_get_entries: %u != %zu\n", *len, sizeof(get) + get.size); return -EINVAL; } xt_compat_lock(NFPROTO_ARP); t = xt_find_table_lock(net, NFPROTO_ARP, get.name); if (!IS_ERR_OR_NULL(t)) { const struct xt_table_info *private = t->private; struct xt_table_info info; duprintf("t->private->number = %u\n", private->number); ret = compat_table_info(private, &info); if (!ret && get.size == info.size) { ret = compat_copy_entries_to_user(private->size, t, uptr->entrytable); } else if (!ret) { duprintf("compat_get_entries: I've got %u not %u!\n", private->size, get.size); ret = -EAGAIN; } xt_compat_flush_offsets(NFPROTO_ARP); module_put(t->me); xt_table_unlock(t); } else ret = t ? PTR_ERR(t) : -ENOENT; xt_compat_unlock(NFPROTO_ARP); return ret; } static int do_arpt_get_ctl(struct sock *, int, void __user *, int *); static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case ARPT_SO_GET_INFO: ret = get_info(sock_net(sk), user, len, 1); break; case ARPT_SO_GET_ENTRIES: ret = compat_get_entries(sock_net(sk), user, len); break; default: ret = do_arpt_get_ctl(sk, cmd, user, len); } return ret; } #endif static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case ARPT_SO_SET_REPLACE: ret = do_replace(sock_net(sk), user, len); break; case ARPT_SO_SET_ADD_COUNTERS: ret = do_add_counters(sock_net(sk), user, len, 0); break; default: duprintf("do_arpt_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case ARPT_SO_GET_INFO: ret = get_info(sock_net(sk), user, len, 0); break; case ARPT_SO_GET_ENTRIES: ret = get_entries(sock_net(sk), user, len); break; case ARPT_SO_GET_REVISION_TARGET: { struct xt_get_revision rev; if (*len != sizeof(rev)) { ret = -EINVAL; break; } if (copy_from_user(&rev, user, sizeof(rev)) != 0) { ret = -EFAULT; break; } rev.name[sizeof(rev.name)-1] = 0; try_then_request_module(xt_find_revision(NFPROTO_ARP, rev.name, rev.revision, 1, &ret), "arpt_%s", rev.name); break; } default: duprintf("do_arpt_get_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static void __arpt_unregister_table(struct xt_table *table) { struct xt_table_info *private; void *loc_cpu_entry; struct module *table_owner = table->me; struct arpt_entry *iter; private = xt_unregister_table(table); /* Decrease module usage counts and free resources */ loc_cpu_entry = private->entries; xt_entry_foreach(iter, loc_cpu_entry, private->size) cleanup_entry(iter); if (private->number > private->initial_entries) module_put(table_owner); xt_free_table_info(private); } int arpt_register_table(struct net *net, const struct xt_table *table, const struct arpt_replace *repl, const struct nf_hook_ops *ops, struct xt_table **res) { int ret; struct xt_table_info *newinfo; struct xt_table_info bootstrap = {0}; void *loc_cpu_entry; struct xt_table *new_table; newinfo = xt_alloc_table_info(repl->size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; memcpy(loc_cpu_entry, repl->entries, repl->size); ret = translate_table(newinfo, loc_cpu_entry, repl); duprintf("arpt_register_table: translate table gives %d\n", ret); if (ret != 0) goto out_free; new_table = xt_register_table(net, table, &bootstrap, newinfo); if (IS_ERR(new_table)) { ret = PTR_ERR(new_table); goto out_free; } /* set res now, will see skbs right after nf_register_net_hooks */ WRITE_ONCE(*res, new_table); ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks)); if (ret != 0) { __arpt_unregister_table(new_table); *res = NULL; } return ret; out_free: xt_free_table_info(newinfo); return ret; } void arpt_unregister_table(struct net *net, struct xt_table *table, const struct nf_hook_ops *ops) { nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks)); __arpt_unregister_table(table); } /* The built-in targets: standard (NULL) and error. */ static struct xt_target arpt_builtin_tg[] __read_mostly = { { .name = XT_STANDARD_TARGET, .targetsize = sizeof(int), .family = NFPROTO_ARP, #ifdef CONFIG_COMPAT .compatsize = sizeof(compat_int_t), .compat_from_user = compat_standard_from_user, .compat_to_user = compat_standard_to_user, #endif }, { .name = XT_ERROR_TARGET, .target = arpt_error, .targetsize = XT_FUNCTION_MAXNAMELEN, .family = NFPROTO_ARP, }, }; static struct nf_sockopt_ops arpt_sockopts = { .pf = PF_INET, .set_optmin = ARPT_BASE_CTL, .set_optmax = ARPT_SO_SET_MAX+1, .set = do_arpt_set_ctl, #ifdef CONFIG_COMPAT .compat_set = compat_do_arpt_set_ctl, #endif .get_optmin = ARPT_BASE_CTL, .get_optmax = ARPT_SO_GET_MAX+1, .get = do_arpt_get_ctl, #ifdef CONFIG_COMPAT .compat_get = compat_do_arpt_get_ctl, #endif .owner = THIS_MODULE, }; static int __net_init arp_tables_net_init(struct net *net) { return xt_proto_init(net, NFPROTO_ARP); } static void __net_exit arp_tables_net_exit(struct net *net) { xt_proto_fini(net, NFPROTO_ARP); } static struct pernet_operations arp_tables_net_ops = { .init = arp_tables_net_init, .exit = arp_tables_net_exit, }; static int __init arp_tables_init(void) { int ret; ret = register_pernet_subsys(&arp_tables_net_ops); if (ret < 0) goto err1; /* No one else will be downing sem now, so we won't sleep */ ret = xt_register_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); if (ret < 0) goto err2; /* Register setsockopt */ ret = nf_register_sockopt(&arpt_sockopts); if (ret < 0) goto err4; pr_info("arp_tables: (C) 2002 David S. Miller\n"); return 0; err4: xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); err2: unregister_pernet_subsys(&arp_tables_net_ops); err1: return ret; } static void __exit arp_tables_fini(void) { nf_unregister_sockopt(&arpt_sockopts); xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); unregister_pernet_subsys(&arp_tables_net_ops); } EXPORT_SYMBOL(arpt_register_table); EXPORT_SYMBOL(arpt_unregister_table); EXPORT_SYMBOL(arpt_do_table); module_init(arp_tables_init); module_exit(arp_tables_fini);
/* * Packet matching code for ARP packets. * * Based heavily, if not almost entirely, upon ip_tables.c framework. * * Some ARP specific bits are: * * Copyright (C) 2002 David S. Miller (davem@redhat.com) * Copyright (C) 2006-2009 Patrick McHardy <kaber@trash.net> * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/capability.h> #include <linux/if_arp.h> #include <linux/kmod.h> #include <linux/vmalloc.h> #include <linux/proc_fs.h> #include <linux/module.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/err.h> #include <net/compat.h> #include <net/sock.h> #include <asm/uaccess.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_arp/arp_tables.h> #include "../../netfilter/xt_repldata.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); MODULE_DESCRIPTION("arptables core"); /*#define DEBUG_ARP_TABLES*/ /*#define DEBUG_ARP_TABLES_USER*/ #ifdef DEBUG_ARP_TABLES #define dprintf(format, args...) pr_debug(format, ## args) #else #define dprintf(format, args...) #endif #ifdef DEBUG_ARP_TABLES_USER #define duprintf(format, args...) pr_debug(format, ## args) #else #define duprintf(format, args...) #endif #ifdef CONFIG_NETFILTER_DEBUG #define ARP_NF_ASSERT(x) WARN_ON(!(x)) #else #define ARP_NF_ASSERT(x) #endif void *arpt_alloc_initial_table(const struct xt_table *info) { return xt_alloc_initial_table(arpt, ARPT); } EXPORT_SYMBOL_GPL(arpt_alloc_initial_table); static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap, const char *hdr_addr, int len) { int i, ret; if (len > ARPT_DEV_ADDR_LEN_MAX) len = ARPT_DEV_ADDR_LEN_MAX; ret = 0; for (i = 0; i < len; i++) ret |= (hdr_addr[i] ^ ap->addr[i]) & ap->mask[i]; return ret != 0; } /* * Unfortunately, _b and _mask are not aligned to an int (or long int) * Some arches dont care, unrolling the loop is a win on them. * For other arches, we only have a 16bit alignement. */ static unsigned long ifname_compare(const char *_a, const char *_b, const char *_mask) { #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS unsigned long ret = ifname_compare_aligned(_a, _b, _mask); #else unsigned long ret = 0; const u16 *a = (const u16 *)_a; const u16 *b = (const u16 *)_b; const u16 *mask = (const u16 *)_mask; int i; for (i = 0; i < IFNAMSIZ/sizeof(u16); i++) ret |= (a[i] ^ b[i]) & mask[i]; #endif return ret; } /* Returns whether packet matches rule or not. */ static inline int arp_packet_match(const struct arphdr *arphdr, struct net_device *dev, const char *indev, const char *outdev, const struct arpt_arp *arpinfo) { const char *arpptr = (char *)(arphdr + 1); const char *src_devaddr, *tgt_devaddr; __be32 src_ipaddr, tgt_ipaddr; long ret; #define FWINV(bool, invflg) ((bool) ^ !!(arpinfo->invflags & (invflg))) if (FWINV((arphdr->ar_op & arpinfo->arpop_mask) != arpinfo->arpop, ARPT_INV_ARPOP)) { dprintf("ARP operation field mismatch.\n"); dprintf("ar_op: %04x info->arpop: %04x info->arpop_mask: %04x\n", arphdr->ar_op, arpinfo->arpop, arpinfo->arpop_mask); return 0; } if (FWINV((arphdr->ar_hrd & arpinfo->arhrd_mask) != arpinfo->arhrd, ARPT_INV_ARPHRD)) { dprintf("ARP hardware address format mismatch.\n"); dprintf("ar_hrd: %04x info->arhrd: %04x info->arhrd_mask: %04x\n", arphdr->ar_hrd, arpinfo->arhrd, arpinfo->arhrd_mask); return 0; } if (FWINV((arphdr->ar_pro & arpinfo->arpro_mask) != arpinfo->arpro, ARPT_INV_ARPPRO)) { dprintf("ARP protocol address format mismatch.\n"); dprintf("ar_pro: %04x info->arpro: %04x info->arpro_mask: %04x\n", arphdr->ar_pro, arpinfo->arpro, arpinfo->arpro_mask); return 0; } if (FWINV((arphdr->ar_hln & arpinfo->arhln_mask) != arpinfo->arhln, ARPT_INV_ARPHLN)) { dprintf("ARP hardware address length mismatch.\n"); dprintf("ar_hln: %02x info->arhln: %02x info->arhln_mask: %02x\n", arphdr->ar_hln, arpinfo->arhln, arpinfo->arhln_mask); return 0; } src_devaddr = arpptr; arpptr += dev->addr_len; memcpy(&src_ipaddr, arpptr, sizeof(u32)); arpptr += sizeof(u32); tgt_devaddr = arpptr; arpptr += dev->addr_len; memcpy(&tgt_ipaddr, arpptr, sizeof(u32)); if (FWINV(arp_devaddr_compare(&arpinfo->src_devaddr, src_devaddr, dev->addr_len), ARPT_INV_SRCDEVADDR) || FWINV(arp_devaddr_compare(&arpinfo->tgt_devaddr, tgt_devaddr, dev->addr_len), ARPT_INV_TGTDEVADDR)) { dprintf("Source or target device address mismatch.\n"); return 0; } if (FWINV((src_ipaddr & arpinfo->smsk.s_addr) != arpinfo->src.s_addr, ARPT_INV_SRCIP) || FWINV(((tgt_ipaddr & arpinfo->tmsk.s_addr) != arpinfo->tgt.s_addr), ARPT_INV_TGTIP)) { dprintf("Source or target IP address mismatch.\n"); dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n", &src_ipaddr, &arpinfo->smsk.s_addr, &arpinfo->src.s_addr, arpinfo->invflags & ARPT_INV_SRCIP ? " (INV)" : ""); dprintf("TGT: %pI4 Mask: %pI4 Target: %pI4.%s\n", &tgt_ipaddr, &arpinfo->tmsk.s_addr, &arpinfo->tgt.s_addr, arpinfo->invflags & ARPT_INV_TGTIP ? " (INV)" : ""); return 0; } /* Look for ifname matches. */ ret = ifname_compare(indev, arpinfo->iniface, arpinfo->iniface_mask); if (FWINV(ret != 0, ARPT_INV_VIA_IN)) { dprintf("VIA in mismatch (%s vs %s).%s\n", indev, arpinfo->iniface, arpinfo->invflags & ARPT_INV_VIA_IN ? " (INV)" : ""); return 0; } ret = ifname_compare(outdev, arpinfo->outiface, arpinfo->outiface_mask); if (FWINV(ret != 0, ARPT_INV_VIA_OUT)) { dprintf("VIA out mismatch (%s vs %s).%s\n", outdev, arpinfo->outiface, arpinfo->invflags & ARPT_INV_VIA_OUT ? " (INV)" : ""); return 0; } return 1; #undef FWINV } static inline int arp_checkentry(const struct arpt_arp *arp) { if (arp->flags & ~ARPT_F_MASK) { duprintf("Unknown flag bits set: %08X\n", arp->flags & ~ARPT_F_MASK); return 0; } if (arp->invflags & ~ARPT_INV_MASK) { duprintf("Unknown invflag bits set: %08X\n", arp->invflags & ~ARPT_INV_MASK); return 0; } return 1; } static unsigned int arpt_error(struct sk_buff *skb, const struct xt_action_param *par) { net_err_ratelimited("arp_tables: error: '%s'\n", (const char *)par->targinfo); return NF_DROP; } static inline const struct xt_entry_target * arpt_get_target_c(const struct arpt_entry *e) { return arpt_get_target((struct arpt_entry *)e); } static inline struct arpt_entry * get_entry(const void *base, unsigned int offset) { return (struct arpt_entry *)(base + offset); } static inline struct arpt_entry *arpt_next_entry(const struct arpt_entry *entry) { return (void *)entry + entry->next_offset; } unsigned int arpt_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table) { unsigned int hook = state->hook; static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); unsigned int verdict = NF_DROP; const struct arphdr *arp; struct arpt_entry *e, **jumpstack; const char *indev, *outdev; const void *table_base; unsigned int cpu, stackidx = 0; const struct xt_table_info *private; struct xt_action_param acpar; unsigned int addend; if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) return NF_DROP; indev = state->in ? state->in->name : nulldevname; outdev = state->out ? state->out->name : nulldevname; local_bh_disable(); addend = xt_write_recseq_begin(); private = table->private; cpu = smp_processor_id(); /* * Ensure we load private-> members after we've fetched the base * pointer. */ smp_read_barrier_depends(); table_base = private->entries; jumpstack = (struct arpt_entry **)private->jumpstack[cpu]; /* No TEE support for arptables, so no need to switch to alternate * stack. All targets that reenter must return absolute verdicts. */ e = get_entry(table_base, private->hook_entry[hook]); acpar.net = state->net; acpar.in = state->in; acpar.out = state->out; acpar.hooknum = hook; acpar.family = NFPROTO_ARP; acpar.hotdrop = false; arp = arp_hdr(skb); do { const struct xt_entry_target *t; struct xt_counters *counter; if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) { e = arpt_next_entry(e); continue; } counter = xt_get_this_cpu_counter(&e->counters); ADD_COUNTER(*counter, arp_hdr_len(skb->dev), 1); t = arpt_get_target_c(e); /* Standard target? */ if (!t->u.kernel.target->target) { int v; v = ((struct xt_standard_target *)t)->verdict; if (v < 0) { /* Pop from stack? */ if (v != XT_RETURN) { verdict = (unsigned int)(-v) - 1; break; } if (stackidx == 0) { e = get_entry(table_base, private->underflow[hook]); } else { e = jumpstack[--stackidx]; e = arpt_next_entry(e); } continue; } if (table_base + v != arpt_next_entry(e)) { jumpstack[stackidx++] = e; } e = get_entry(table_base, v); continue; } acpar.target = t->u.kernel.target; acpar.targinfo = t->data; verdict = t->u.kernel.target->target(skb, &acpar); /* Target might have changed stuff. */ arp = arp_hdr(skb); if (verdict == XT_CONTINUE) e = arpt_next_entry(e); else /* Verdict */ break; } while (!acpar.hotdrop); xt_write_recseq_end(addend); local_bh_enable(); if (acpar.hotdrop) return NF_DROP; else return verdict; } /* All zeroes == unconditional rule. */ static inline bool unconditional(const struct arpt_entry *e) { static const struct arpt_arp uncond; return e->target_offset == sizeof(struct arpt_entry) && memcmp(&e->arp, &uncond, sizeof(uncond)) == 0; } /* Figures out from what hook each rule can be called: returns 0 if * there are loops. Puts hook bitmask in comefrom. */ static int mark_source_chains(const struct xt_table_info *newinfo, unsigned int valid_hooks, void *entry0) { unsigned int hook; /* No recursion; use packet counter to save back ptrs (reset * to 0 as we leave), and comefrom to save source hook bitmask. */ for (hook = 0; hook < NF_ARP_NUMHOOKS; hook++) { unsigned int pos = newinfo->hook_entry[hook]; struct arpt_entry *e = (struct arpt_entry *)(entry0 + pos); if (!(valid_hooks & (1 << hook))) continue; /* Set initial back pointer. */ e->counters.pcnt = pos; for (;;) { const struct xt_standard_target *t = (void *)arpt_get_target_c(e); int visited = e->comefrom & (1 << hook); if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) { pr_notice("arptables: loop hook %u pos %u %08X.\n", hook, pos, e->comefrom); return 0; } e->comefrom |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS)); /* Unconditional return/END. */ if ((unconditional(e) && (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < 0) || visited) { unsigned int oldpos, size; if ((strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < -NF_MAX_VERDICT - 1) { duprintf("mark_source_chains: bad " "negative verdict (%i)\n", t->verdict); return 0; } /* Return: backtrack through the last * big jump. */ do { e->comefrom ^= (1<<NF_ARP_NUMHOOKS); oldpos = pos; pos = e->counters.pcnt; e->counters.pcnt = 0; /* We're at the start. */ if (pos == oldpos) goto next; e = (struct arpt_entry *) (entry0 + pos); } while (oldpos == pos + e->next_offset); /* Move along one */ size = e->next_offset; e = (struct arpt_entry *) (entry0 + pos + size); e->counters.pcnt = pos; pos += size; } else { int newpos = t->verdict; if (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0 && newpos >= 0) { if (newpos > newinfo->size - sizeof(struct arpt_entry)) { duprintf("mark_source_chains: " "bad verdict (%i)\n", newpos); return 0; } /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; } e = (struct arpt_entry *) (entry0 + newpos); e->counters.pcnt = pos; pos = newpos; } } next: duprintf("Finished chain %u\n", hook); } return 1; } static inline int check_entry(const struct arpt_entry *e) { const struct xt_entry_target *t; if (!arp_checkentry(&e->arp)) return -EINVAL; if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset) return -EINVAL; t = arpt_get_target_c(e); if (e->target_offset + t->u.target_size > e->next_offset) return -EINVAL; return 0; } static inline int check_target(struct arpt_entry *e, const char *name) { struct xt_entry_target *t = arpt_get_target(e); int ret; struct xt_tgchk_param par = { .table = name, .entryinfo = e, .target = t->u.kernel.target, .targinfo = t->data, .hook_mask = e->comefrom, .family = NFPROTO_ARP, }; ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false); if (ret < 0) { duprintf("arp_tables: check failed for `%s'.\n", t->u.kernel.target->name); return ret; } return 0; } static inline int find_check_entry(struct arpt_entry *e, const char *name, unsigned int size) { struct xt_entry_target *t; struct xt_target *target; int ret; e->counters.pcnt = xt_percpu_counter_alloc(); if (IS_ERR_VALUE(e->counters.pcnt)) return -ENOMEM; t = arpt_get_target(e); target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("find_check_entry: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto out; } t->u.kernel.target = target; ret = check_target(e, name); if (ret) goto err; return 0; err: module_put(t->u.kernel.target->me); out: xt_percpu_counter_free(e->counters.pcnt); return ret; } static bool check_underflow(const struct arpt_entry *e) { const struct xt_entry_target *t; unsigned int verdict; if (!unconditional(e)) return false; t = arpt_get_target_c(e); if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) return false; verdict = ((struct xt_standard_target *)t)->verdict; verdict = -verdict - 1; return verdict == NF_DROP || verdict == NF_ACCEPT; } static inline int check_entry_size_and_hooks(struct arpt_entry *e, struct xt_table_info *newinfo, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int valid_hooks) { unsigned int h; int err; if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 || (unsigned char *)e + sizeof(struct arpt_entry) >= limit || (unsigned char *)e + e->next_offset > limit) { duprintf("Bad offset %p\n", e); return -EINVAL; } if (e->next_offset < sizeof(struct arpt_entry) + sizeof(struct xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } err = check_entry(e); if (err) return err; /* Check hooks & underflows */ for (h = 0; h < NF_ARP_NUMHOOKS; h++) { if (!(valid_hooks & (1 << h))) continue; if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) { if (!check_underflow(e)) { pr_debug("Underflows must be unconditional and " "use the STANDARD target with " "ACCEPT/DROP\n"); return -EINVAL; } newinfo->underflow[h] = underflows[h]; } } /* Clear counters and comefrom */ e->counters = ((struct xt_counters) { 0, 0 }); e->comefrom = 0; return 0; } static inline void cleanup_entry(struct arpt_entry *e) { struct xt_tgdtor_param par; struct xt_entry_target *t; t = arpt_get_target(e); par.target = t->u.kernel.target; par.targinfo = t->data; par.family = NFPROTO_ARP; if (par.target->destroy != NULL) par.target->destroy(&par); module_put(par.target->me); xt_percpu_counter_free(e->counters.pcnt); } /* Checks and translates the user-supplied table segment (held in * newinfo). */ static int translate_table(struct xt_table_info *newinfo, void *entry0, const struct arpt_replace *repl) { struct arpt_entry *iter; unsigned int i; int ret = 0; newinfo->size = repl->size; newinfo->number = repl->num_entries; /* Init all hooks to impossible value. */ for (i = 0; i < NF_ARP_NUMHOOKS; i++) { newinfo->hook_entry[i] = 0xFFFFFFFF; newinfo->underflow[i] = 0xFFFFFFFF; } duprintf("translate_table: size %u\n", newinfo->size); i = 0; /* Walk through entries, checking offsets. */ xt_entry_foreach(iter, entry0, newinfo->size) { ret = check_entry_size_and_hooks(iter, newinfo, entry0, entry0 + repl->size, repl->hook_entry, repl->underflow, repl->valid_hooks); if (ret != 0) break; ++i; if (strcmp(arpt_get_target(iter)->u.user.name, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret); if (ret != 0) return ret; if (i != repl->num_entries) { duprintf("translate_table: %u not %u entries\n", i, repl->num_entries); return -EINVAL; } /* Check hooks all assigned */ for (i = 0; i < NF_ARP_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(repl->valid_hooks & (1 << i))) continue; if (newinfo->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, repl->hook_entry[i]); return -EINVAL; } if (newinfo->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, repl->underflow[i]); return -EINVAL; } } if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) { duprintf("Looping hook\n"); return -ELOOP; } /* Finally, each sanity check must pass */ i = 0; xt_entry_foreach(iter, entry0, newinfo->size) { ret = find_check_entry(iter, repl->name, repl->size); if (ret != 0) break; ++i; } if (ret != 0) { xt_entry_foreach(iter, entry0, newinfo->size) { if (i-- == 0) break; cleanup_entry(iter); } return ret; } return ret; } static void get_counters(const struct xt_table_info *t, struct xt_counters counters[]) { struct arpt_entry *iter; unsigned int cpu; unsigned int i; for_each_possible_cpu(cpu) { seqcount_t *s = &per_cpu(xt_recseq, cpu); i = 0; xt_entry_foreach(iter, t->entries, t->size) { struct xt_counters *tmp; u64 bcnt, pcnt; unsigned int start; tmp = xt_get_per_cpu_counter(&iter->counters, cpu); do { start = read_seqcount_begin(s); bcnt = tmp->bcnt; pcnt = tmp->pcnt; } while (read_seqcount_retry(s, start)); ADD_COUNTER(counters[i], bcnt, pcnt); ++i; } } } static struct xt_counters *alloc_counters(const struct xt_table *table) { unsigned int countersize; struct xt_counters *counters; const struct xt_table_info *private = table->private; /* We need atomic snapshot of counters: rest doesn't change * (other than comefrom, which userspace doesn't care * about). */ countersize = sizeof(struct xt_counters) * private->number; counters = vzalloc(countersize); if (counters == NULL) return ERR_PTR(-ENOMEM); get_counters(private, counters); return counters; } static int copy_entries_to_user(unsigned int total_size, const struct xt_table *table, void __user *userptr) { unsigned int off, num; const struct arpt_entry *e; struct xt_counters *counters; struct xt_table_info *private = table->private; int ret = 0; void *loc_cpu_entry; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); loc_cpu_entry = private->entries; /* ... then copy entire thing ... */ if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { ret = -EFAULT; goto free_counters; } /* FIXME: use iterator macros --RR */ /* ... then go back and fix counters and names */ for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ const struct xt_entry_target *t; e = (struct arpt_entry *)(loc_cpu_entry + off); if (copy_to_user(userptr + off + offsetof(struct arpt_entry, counters), &counters[num], sizeof(counters[num])) != 0) { ret = -EFAULT; goto free_counters; } t = arpt_get_target_c(e); if (copy_to_user(userptr + off + e->target_offset + offsetof(struct xt_entry_target, u.user.name), t->u.kernel.target->name, strlen(t->u.kernel.target->name)+1) != 0) { ret = -EFAULT; goto free_counters; } } free_counters: vfree(counters); return ret; } #ifdef CONFIG_COMPAT static void compat_standard_from_user(void *dst, const void *src) { int v = *(compat_int_t *)src; if (v > 0) v += xt_compat_calc_jump(NFPROTO_ARP, v); memcpy(dst, &v, sizeof(v)); } static int compat_standard_to_user(void __user *dst, const void *src) { compat_int_t cv = *(int *)src; if (cv > 0) cv -= xt_compat_calc_jump(NFPROTO_ARP, cv); return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; } static int compat_calc_entry(const struct arpt_entry *e, const struct xt_table_info *info, const void *base, struct xt_table_info *newinfo) { const struct xt_entry_target *t; unsigned int entry_offset; int off, i, ret; off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); entry_offset = (void *)e - base; t = arpt_get_target_c(e); off += xt_compat_target_offset(t->u.kernel.target); newinfo->size -= off; ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); if (ret) return ret; for (i = 0; i < NF_ARP_NUMHOOKS; i++) { if (info->hook_entry[i] && (e < (struct arpt_entry *)(base + info->hook_entry[i]))) newinfo->hook_entry[i] -= off; if (info->underflow[i] && (e < (struct arpt_entry *)(base + info->underflow[i]))) newinfo->underflow[i] -= off; } return 0; } static int compat_table_info(const struct xt_table_info *info, struct xt_table_info *newinfo) { struct arpt_entry *iter; const void *loc_cpu_entry; int ret; if (!newinfo || !info) return -EINVAL; /* we dont care about newinfo->entries */ memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); newinfo->initial_entries = 0; loc_cpu_entry = info->entries; xt_compat_init_offsets(NFPROTO_ARP, info->number); xt_entry_foreach(iter, loc_cpu_entry, info->size) { ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); if (ret != 0) return ret; } return 0; } #endif static int get_info(struct net *net, void __user *user, const int *len, int compat) { char name[XT_TABLE_MAXNAMELEN]; struct xt_table *t; int ret; if (*len != sizeof(struct arpt_getinfo)) { duprintf("length %u != %Zu\n", *len, sizeof(struct arpt_getinfo)); return -EINVAL; } if (copy_from_user(name, user, sizeof(name)) != 0) return -EFAULT; name[XT_TABLE_MAXNAMELEN-1] = '\0'; #ifdef CONFIG_COMPAT if (compat) xt_compat_lock(NFPROTO_ARP); #endif t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name), "arptable_%s", name); if (!IS_ERR_OR_NULL(t)) { struct arpt_getinfo info; const struct xt_table_info *private = t->private; #ifdef CONFIG_COMPAT struct xt_table_info tmp; if (compat) { ret = compat_table_info(private, &tmp); xt_compat_flush_offsets(NFPROTO_ARP); private = &tmp; } #endif memset(&info, 0, sizeof(info)); info.valid_hooks = t->valid_hooks; memcpy(info.hook_entry, private->hook_entry, sizeof(info.hook_entry)); memcpy(info.underflow, private->underflow, sizeof(info.underflow)); info.num_entries = private->number; info.size = private->size; strcpy(info.name, name); if (copy_to_user(user, &info, *len) != 0) ret = -EFAULT; else ret = 0; xt_table_unlock(t); module_put(t->me); } else ret = t ? PTR_ERR(t) : -ENOENT; #ifdef CONFIG_COMPAT if (compat) xt_compat_unlock(NFPROTO_ARP); #endif return ret; } static int get_entries(struct net *net, struct arpt_get_entries __user *uptr, const int *len) { int ret; struct arpt_get_entries get; struct xt_table *t; if (*len < sizeof(get)) { duprintf("get_entries: %u < %Zu\n", *len, sizeof(get)); return -EINVAL; } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; if (*len != sizeof(struct arpt_get_entries) + get.size) { duprintf("get_entries: %u != %Zu\n", *len, sizeof(struct arpt_get_entries) + get.size); return -EINVAL; } t = xt_find_table_lock(net, NFPROTO_ARP, get.name); if (!IS_ERR_OR_NULL(t)) { const struct xt_table_info *private = t->private; duprintf("t->private->number = %u\n", private->number); if (get.size == private->size) ret = copy_entries_to_user(private->size, t, uptr->entrytable); else { duprintf("get_entries: I've got %u not %u!\n", private->size, get.size); ret = -EAGAIN; } module_put(t->me); xt_table_unlock(t); } else ret = t ? PTR_ERR(t) : -ENOENT; return ret; } static int __do_replace(struct net *net, const char *name, unsigned int valid_hooks, struct xt_table_info *newinfo, unsigned int num_counters, void __user *counters_ptr) { int ret; struct xt_table *t; struct xt_table_info *oldinfo; struct xt_counters *counters; void *loc_cpu_old_entry; struct arpt_entry *iter; ret = 0; counters = vzalloc(num_counters * sizeof(struct xt_counters)); if (!counters) { ret = -ENOMEM; goto out; } t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name), "arptable_%s", name); if (IS_ERR_OR_NULL(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free_newinfo_counters_untrans; } /* You lied! */ if (valid_hooks != t->valid_hooks) { duprintf("Valid hook crap: %08X vs %08X\n", valid_hooks, t->valid_hooks); ret = -EINVAL; goto put_module; } oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); if (!oldinfo) goto put_module; /* Update module usage count based on number of rules */ duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", oldinfo->number, oldinfo->initial_entries, newinfo->number); if ((oldinfo->number > oldinfo->initial_entries) || (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); if ((oldinfo->number > oldinfo->initial_entries) && (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); /* Get the old counters, and synchronize with replace */ get_counters(oldinfo, counters); /* Decrease module usage counts and free resource */ loc_cpu_old_entry = oldinfo->entries; xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size) cleanup_entry(iter); xt_free_table_info(oldinfo); if (copy_to_user(counters_ptr, counters, sizeof(struct xt_counters) * num_counters) != 0) { /* Silent error, can't fail, new table is already in place */ net_warn_ratelimited("arptables: counters copy to user failed while replacing table\n"); } vfree(counters); xt_table_unlock(t); return ret; put_module: module_put(t->me); xt_table_unlock(t); free_newinfo_counters_untrans: vfree(counters); out: return ret; } static int do_replace(struct net *net, const void __user *user, unsigned int len) { int ret; struct arpt_replace tmp; struct xt_table_info *newinfo; void *loc_cpu_entry; struct arpt_entry *iter; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; if (tmp.num_counters == 0) return -EINVAL; tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } ret = translate_table(newinfo, loc_cpu_entry, &tmp); if (ret != 0) goto free_newinfo; duprintf("arp_tables: Translated table\n"); ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, tmp.counters); if (ret) goto free_newinfo_untrans; return 0; free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) cleanup_entry(iter); free_newinfo: xt_free_table_info(newinfo); return ret; } static int do_add_counters(struct net *net, const void __user *user, unsigned int len, int compat) { unsigned int i; struct xt_counters_info tmp; struct xt_counters *paddc; unsigned int num_counters; const char *name; int size; void *ptmp; struct xt_table *t; const struct xt_table_info *private; int ret = 0; struct arpt_entry *iter; unsigned int addend; #ifdef CONFIG_COMPAT struct compat_xt_counters_info compat_tmp; if (compat) { ptmp = &compat_tmp; size = sizeof(struct compat_xt_counters_info); } else #endif { ptmp = &tmp; size = sizeof(struct xt_counters_info); } if (copy_from_user(ptmp, user, size) != 0) return -EFAULT; #ifdef CONFIG_COMPAT if (compat) { num_counters = compat_tmp.num_counters; name = compat_tmp.name; } else #endif { num_counters = tmp.num_counters; name = tmp.name; } if (len != size + num_counters * sizeof(struct xt_counters)) return -EINVAL; paddc = vmalloc(len - size); if (!paddc) return -ENOMEM; if (copy_from_user(paddc, user + size, len - size) != 0) { ret = -EFAULT; goto free; } t = xt_find_table_lock(net, NFPROTO_ARP, name); if (IS_ERR_OR_NULL(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free; } local_bh_disable(); private = t->private; if (private->number != num_counters) { ret = -EINVAL; goto unlock_up_free; } i = 0; addend = xt_write_recseq_begin(); xt_entry_foreach(iter, private->entries, private->size) { struct xt_counters *tmp; tmp = xt_get_this_cpu_counter(&iter->counters); ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt); ++i; } xt_write_recseq_end(addend); unlock_up_free: local_bh_enable(); xt_table_unlock(t); module_put(t->me); free: vfree(paddc); return ret; } #ifdef CONFIG_COMPAT static inline void compat_release_entry(struct compat_arpt_entry *e) { struct xt_entry_target *t; t = compat_arpt_get_target(e); module_put(t->u.kernel.target->me); } static inline int check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, struct xt_table_info *newinfo, unsigned int *size, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, const char *name) { struct xt_entry_target *t; struct xt_target *target; unsigned int entry_offset; int ret, off, h; duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 || (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit || (unsigned char *)e + e->next_offset > limit) { duprintf("Bad offset %p, limit = %p\n", e, limit); return -EINVAL; } if (e->next_offset < sizeof(struct compat_arpt_entry) + sizeof(struct compat_xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } /* For purposes of check_entry casting the compat entry is fine */ ret = check_entry((struct arpt_entry *)e); if (ret) return ret; off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); entry_offset = (void *)e - (void *)base; t = compat_arpt_get_target(e); target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto out; } t->u.kernel.target = target; off += xt_compat_target_offset(target); *size += off; ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); if (ret) goto release_target; /* Check hooks & underflows */ for (h = 0; h < NF_ARP_NUMHOOKS; h++) { if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) newinfo->underflow[h] = underflows[h]; } /* Clear counters and comefrom */ memset(&e->counters, 0, sizeof(e->counters)); e->comefrom = 0; return 0; release_target: module_put(t->u.kernel.target->me); out: return ret; } static int compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr, unsigned int *size, const char *name, struct xt_table_info *newinfo, unsigned char *base) { struct xt_entry_target *t; struct xt_target *target; struct arpt_entry *de; unsigned int origsize; int ret, h; ret = 0; origsize = *size; de = (struct arpt_entry *)*dstptr; memcpy(de, e, sizeof(struct arpt_entry)); memcpy(&de->counters, &e->counters, sizeof(e->counters)); *dstptr += sizeof(struct arpt_entry); *size += sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); de->target_offset = e->target_offset - (origsize - *size); t = compat_arpt_get_target(e); target = t->u.kernel.target; xt_compat_target_from_user(t, dstptr, size); de->next_offset = e->next_offset - (origsize - *size); for (h = 0; h < NF_ARP_NUMHOOKS; h++) { if ((unsigned char *)de - base < newinfo->hook_entry[h]) newinfo->hook_entry[h] -= origsize - *size; if ((unsigned char *)de - base < newinfo->underflow[h]) newinfo->underflow[h] -= origsize - *size; } return ret; } static int translate_compat_table(const char *name, unsigned int valid_hooks, struct xt_table_info **pinfo, void **pentry0, unsigned int total_size, unsigned int number, unsigned int *hook_entries, unsigned int *underflows) { unsigned int i, j; struct xt_table_info *newinfo, *info; void *pos, *entry0, *entry1; struct compat_arpt_entry *iter0; struct arpt_entry *iter1; unsigned int size; int ret = 0; info = *pinfo; entry0 = *pentry0; size = total_size; info->number = number; /* Init all hooks to impossible value. */ for (i = 0; i < NF_ARP_NUMHOOKS; i++) { info->hook_entry[i] = 0xFFFFFFFF; info->underflow[i] = 0xFFFFFFFF; } duprintf("translate_compat_table: size %u\n", info->size); j = 0; xt_compat_lock(NFPROTO_ARP); xt_compat_init_offsets(NFPROTO_ARP, number); /* Walk through entries, checking offsets. */ xt_entry_foreach(iter0, entry0, total_size) { ret = check_compat_entry_size_and_hooks(iter0, info, &size, entry0, entry0 + total_size, hook_entries, underflows, name); if (ret != 0) goto out_unlock; ++j; } ret = -EINVAL; if (j != number) { duprintf("translate_compat_table: %u not %u entries\n", j, number); goto out_unlock; } /* Check hooks all assigned */ for (i = 0; i < NF_ARP_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(valid_hooks & (1 << i))) continue; if (info->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, hook_entries[i]); goto out_unlock; } if (info->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, underflows[i]); goto out_unlock; } } ret = -ENOMEM; newinfo = xt_alloc_table_info(size); if (!newinfo) goto out_unlock; newinfo->number = number; for (i = 0; i < NF_ARP_NUMHOOKS; i++) { newinfo->hook_entry[i] = info->hook_entry[i]; newinfo->underflow[i] = info->underflow[i]; } entry1 = newinfo->entries; pos = entry1; size = total_size; xt_entry_foreach(iter0, entry0, total_size) { ret = compat_copy_entry_from_user(iter0, &pos, &size, name, newinfo, entry1); if (ret != 0) break; } xt_compat_flush_offsets(NFPROTO_ARP); xt_compat_unlock(NFPROTO_ARP); if (ret) goto free_newinfo; ret = -ELOOP; if (!mark_source_chains(newinfo, valid_hooks, entry1)) goto free_newinfo; i = 0; xt_entry_foreach(iter1, entry1, newinfo->size) { iter1->counters.pcnt = xt_percpu_counter_alloc(); if (IS_ERR_VALUE(iter1->counters.pcnt)) { ret = -ENOMEM; break; } ret = check_target(iter1, name); if (ret != 0) { xt_percpu_counter_free(iter1->counters.pcnt); break; } ++i; if (strcmp(arpt_get_target(iter1)->u.user.name, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } if (ret) { /* * The first i matches need cleanup_entry (calls ->destroy) * because they had called ->check already. The other j-i * entries need only release. */ int skip = i; j -= i; xt_entry_foreach(iter0, entry0, newinfo->size) { if (skip-- > 0) continue; if (j-- == 0) break; compat_release_entry(iter0); } xt_entry_foreach(iter1, entry1, newinfo->size) { if (i-- == 0) break; cleanup_entry(iter1); } xt_free_table_info(newinfo); return ret; } *pinfo = newinfo; *pentry0 = entry1; xt_free_table_info(info); return 0; free_newinfo: xt_free_table_info(newinfo); out: xt_entry_foreach(iter0, entry0, total_size) { if (j-- == 0) break; compat_release_entry(iter0); } return ret; out_unlock: xt_compat_flush_offsets(NFPROTO_ARP); xt_compat_unlock(NFPROTO_ARP); goto out; } struct compat_arpt_replace { char name[XT_TABLE_MAXNAMELEN]; u32 valid_hooks; u32 num_entries; u32 size; u32 hook_entry[NF_ARP_NUMHOOKS]; u32 underflow[NF_ARP_NUMHOOKS]; u32 num_counters; compat_uptr_t counters; struct compat_arpt_entry entries[0]; }; static int compat_do_replace(struct net *net, void __user *user, unsigned int len) { int ret; struct compat_arpt_replace tmp; struct xt_table_info *newinfo; void *loc_cpu_entry; struct arpt_entry *iter; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.size >= INT_MAX / num_possible_cpus()) return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; if (tmp.num_counters == 0) return -EINVAL; tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } ret = translate_compat_table(tmp.name, tmp.valid_hooks, &newinfo, &loc_cpu_entry, tmp.size, tmp.num_entries, tmp.hook_entry, tmp.underflow); if (ret != 0) goto free_newinfo; duprintf("compat_do_replace: Translated table\n"); ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, compat_ptr(tmp.counters)); if (ret) goto free_newinfo_untrans; return 0; free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) cleanup_entry(iter); free_newinfo: xt_free_table_info(newinfo); return ret; } static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case ARPT_SO_SET_REPLACE: ret = compat_do_replace(sock_net(sk), user, len); break; case ARPT_SO_SET_ADD_COUNTERS: ret = do_add_counters(sock_net(sk), user, len, 1); break; default: duprintf("do_arpt_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr, compat_uint_t *size, struct xt_counters *counters, unsigned int i) { struct xt_entry_target *t; struct compat_arpt_entry __user *ce; u_int16_t target_offset, next_offset; compat_uint_t origsize; int ret; origsize = *size; ce = (struct compat_arpt_entry __user *)*dstptr; if (copy_to_user(ce, e, sizeof(struct arpt_entry)) != 0 || copy_to_user(&ce->counters, &counters[i], sizeof(counters[i])) != 0) return -EFAULT; *dstptr += sizeof(struct compat_arpt_entry); *size -= sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); target_offset = e->target_offset - (origsize - *size); t = arpt_get_target(e); ret = xt_compat_target_to_user(t, dstptr, size); if (ret) return ret; next_offset = e->next_offset - (origsize - *size); if (put_user(target_offset, &ce->target_offset) != 0 || put_user(next_offset, &ce->next_offset) != 0) return -EFAULT; return 0; } static int compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, void __user *userptr) { struct xt_counters *counters; const struct xt_table_info *private = table->private; void __user *pos; unsigned int size; int ret = 0; unsigned int i = 0; struct arpt_entry *iter; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); pos = userptr; size = total_size; xt_entry_foreach(iter, private->entries, total_size) { ret = compat_copy_entry_to_user(iter, &pos, &size, counters, i++); if (ret != 0) break; } vfree(counters); return ret; } struct compat_arpt_get_entries { char name[XT_TABLE_MAXNAMELEN]; compat_uint_t size; struct compat_arpt_entry entrytable[0]; }; static int compat_get_entries(struct net *net, struct compat_arpt_get_entries __user *uptr, int *len) { int ret; struct compat_arpt_get_entries get; struct xt_table *t; if (*len < sizeof(get)) { duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); return -EINVAL; } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; if (*len != sizeof(struct compat_arpt_get_entries) + get.size) { duprintf("compat_get_entries: %u != %zu\n", *len, sizeof(get) + get.size); return -EINVAL; } xt_compat_lock(NFPROTO_ARP); t = xt_find_table_lock(net, NFPROTO_ARP, get.name); if (!IS_ERR_OR_NULL(t)) { const struct xt_table_info *private = t->private; struct xt_table_info info; duprintf("t->private->number = %u\n", private->number); ret = compat_table_info(private, &info); if (!ret && get.size == info.size) { ret = compat_copy_entries_to_user(private->size, t, uptr->entrytable); } else if (!ret) { duprintf("compat_get_entries: I've got %u not %u!\n", private->size, get.size); ret = -EAGAIN; } xt_compat_flush_offsets(NFPROTO_ARP); module_put(t->me); xt_table_unlock(t); } else ret = t ? PTR_ERR(t) : -ENOENT; xt_compat_unlock(NFPROTO_ARP); return ret; } static int do_arpt_get_ctl(struct sock *, int, void __user *, int *); static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case ARPT_SO_GET_INFO: ret = get_info(sock_net(sk), user, len, 1); break; case ARPT_SO_GET_ENTRIES: ret = compat_get_entries(sock_net(sk), user, len); break; default: ret = do_arpt_get_ctl(sk, cmd, user, len); } return ret; } #endif static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case ARPT_SO_SET_REPLACE: ret = do_replace(sock_net(sk), user, len); break; case ARPT_SO_SET_ADD_COUNTERS: ret = do_add_counters(sock_net(sk), user, len, 0); break; default: duprintf("do_arpt_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case ARPT_SO_GET_INFO: ret = get_info(sock_net(sk), user, len, 0); break; case ARPT_SO_GET_ENTRIES: ret = get_entries(sock_net(sk), user, len); break; case ARPT_SO_GET_REVISION_TARGET: { struct xt_get_revision rev; if (*len != sizeof(rev)) { ret = -EINVAL; break; } if (copy_from_user(&rev, user, sizeof(rev)) != 0) { ret = -EFAULT; break; } rev.name[sizeof(rev.name)-1] = 0; try_then_request_module(xt_find_revision(NFPROTO_ARP, rev.name, rev.revision, 1, &ret), "arpt_%s", rev.name); break; } default: duprintf("do_arpt_get_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static void __arpt_unregister_table(struct xt_table *table) { struct xt_table_info *private; void *loc_cpu_entry; struct module *table_owner = table->me; struct arpt_entry *iter; private = xt_unregister_table(table); /* Decrease module usage counts and free resources */ loc_cpu_entry = private->entries; xt_entry_foreach(iter, loc_cpu_entry, private->size) cleanup_entry(iter); if (private->number > private->initial_entries) module_put(table_owner); xt_free_table_info(private); } int arpt_register_table(struct net *net, const struct xt_table *table, const struct arpt_replace *repl, const struct nf_hook_ops *ops, struct xt_table **res) { int ret; struct xt_table_info *newinfo; struct xt_table_info bootstrap = {0}; void *loc_cpu_entry; struct xt_table *new_table; newinfo = xt_alloc_table_info(repl->size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; memcpy(loc_cpu_entry, repl->entries, repl->size); ret = translate_table(newinfo, loc_cpu_entry, repl); duprintf("arpt_register_table: translate table gives %d\n", ret); if (ret != 0) goto out_free; new_table = xt_register_table(net, table, &bootstrap, newinfo); if (IS_ERR(new_table)) { ret = PTR_ERR(new_table); goto out_free; } /* set res now, will see skbs right after nf_register_net_hooks */ WRITE_ONCE(*res, new_table); ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks)); if (ret != 0) { __arpt_unregister_table(new_table); *res = NULL; } return ret; out_free: xt_free_table_info(newinfo); return ret; } void arpt_unregister_table(struct net *net, struct xt_table *table, const struct nf_hook_ops *ops) { nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks)); __arpt_unregister_table(table); } /* The built-in targets: standard (NULL) and error. */ static struct xt_target arpt_builtin_tg[] __read_mostly = { { .name = XT_STANDARD_TARGET, .targetsize = sizeof(int), .family = NFPROTO_ARP, #ifdef CONFIG_COMPAT .compatsize = sizeof(compat_int_t), .compat_from_user = compat_standard_from_user, .compat_to_user = compat_standard_to_user, #endif }, { .name = XT_ERROR_TARGET, .target = arpt_error, .targetsize = XT_FUNCTION_MAXNAMELEN, .family = NFPROTO_ARP, }, }; static struct nf_sockopt_ops arpt_sockopts = { .pf = PF_INET, .set_optmin = ARPT_BASE_CTL, .set_optmax = ARPT_SO_SET_MAX+1, .set = do_arpt_set_ctl, #ifdef CONFIG_COMPAT .compat_set = compat_do_arpt_set_ctl, #endif .get_optmin = ARPT_BASE_CTL, .get_optmax = ARPT_SO_GET_MAX+1, .get = do_arpt_get_ctl, #ifdef CONFIG_COMPAT .compat_get = compat_do_arpt_get_ctl, #endif .owner = THIS_MODULE, }; static int __net_init arp_tables_net_init(struct net *net) { return xt_proto_init(net, NFPROTO_ARP); } static void __net_exit arp_tables_net_exit(struct net *net) { xt_proto_fini(net, NFPROTO_ARP); } static struct pernet_operations arp_tables_net_ops = { .init = arp_tables_net_init, .exit = arp_tables_net_exit, }; static int __init arp_tables_init(void) { int ret; ret = register_pernet_subsys(&arp_tables_net_ops); if (ret < 0) goto err1; /* No one else will be downing sem now, so we won't sleep */ ret = xt_register_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); if (ret < 0) goto err2; /* Register setsockopt */ ret = nf_register_sockopt(&arpt_sockopts); if (ret < 0) goto err4; pr_info("arp_tables: (C) 2002 David S. Miller\n"); return 0; err4: xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); err2: unregister_pernet_subsys(&arp_tables_net_ops); err1: return ret; } static void __exit arp_tables_fini(void) { nf_unregister_sockopt(&arpt_sockopts); xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); unregister_pernet_subsys(&arp_tables_net_ops); } EXPORT_SYMBOL(arpt_register_table); EXPORT_SYMBOL(arpt_unregister_table); EXPORT_SYMBOL(arpt_do_table); module_init(arp_tables_init); module_exit(arp_tables_fini);
static int mark_source_chains(const struct xt_table_info *newinfo, unsigned int valid_hooks, void *entry0) { unsigned int hook; /* No recursion; use packet counter to save back ptrs (reset * to 0 as we leave), and comefrom to save source hook bitmask. */ for (hook = 0; hook < NF_ARP_NUMHOOKS; hook++) { unsigned int pos = newinfo->hook_entry[hook]; struct arpt_entry *e = (struct arpt_entry *)(entry0 + pos); if (!(valid_hooks & (1 << hook))) continue; /* Set initial back pointer. */ e->counters.pcnt = pos; for (;;) { const struct xt_standard_target *t = (void *)arpt_get_target_c(e); int visited = e->comefrom & (1 << hook); if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) { pr_notice("arptables: loop hook %u pos %u %08X.\n", hook, pos, e->comefrom); return 0; } e->comefrom |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS)); /* Unconditional return/END. */ if ((e->target_offset == sizeof(struct arpt_entry) && (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < 0 && unconditional(&e->arp)) || visited) { unsigned int oldpos, size; if ((strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < -NF_MAX_VERDICT - 1) { duprintf("mark_source_chains: bad " "negative verdict (%i)\n", t->verdict); return 0; } /* Return: backtrack through the last * big jump. */ do { e->comefrom ^= (1<<NF_ARP_NUMHOOKS); oldpos = pos; pos = e->counters.pcnt; e->counters.pcnt = 0; /* We're at the start. */ if (pos == oldpos) goto next; e = (struct arpt_entry *) (entry0 + pos); } while (oldpos == pos + e->next_offset); /* Move along one */ size = e->next_offset; e = (struct arpt_entry *) (entry0 + pos + size); e->counters.pcnt = pos; pos += size; } else { int newpos = t->verdict; if (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0 && newpos >= 0) { if (newpos > newinfo->size - sizeof(struct arpt_entry)) { duprintf("mark_source_chains: " "bad verdict (%i)\n", newpos); return 0; } /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; } e = (struct arpt_entry *) (entry0 + newpos); e->counters.pcnt = pos; pos = newpos; } } next: duprintf("Finished chain %u\n", hook); } return 1; }
static int mark_source_chains(const struct xt_table_info *newinfo, unsigned int valid_hooks, void *entry0) { unsigned int hook; /* No recursion; use packet counter to save back ptrs (reset * to 0 as we leave), and comefrom to save source hook bitmask. */ for (hook = 0; hook < NF_ARP_NUMHOOKS; hook++) { unsigned int pos = newinfo->hook_entry[hook]; struct arpt_entry *e = (struct arpt_entry *)(entry0 + pos); if (!(valid_hooks & (1 << hook))) continue; /* Set initial back pointer. */ e->counters.pcnt = pos; for (;;) { const struct xt_standard_target *t = (void *)arpt_get_target_c(e); int visited = e->comefrom & (1 << hook); if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) { pr_notice("arptables: loop hook %u pos %u %08X.\n", hook, pos, e->comefrom); return 0; } e->comefrom |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS)); /* Unconditional return/END. */ if ((unconditional(e) && (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < 0) || visited) { unsigned int oldpos, size; if ((strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < -NF_MAX_VERDICT - 1) { duprintf("mark_source_chains: bad " "negative verdict (%i)\n", t->verdict); return 0; } /* Return: backtrack through the last * big jump. */ do { e->comefrom ^= (1<<NF_ARP_NUMHOOKS); oldpos = pos; pos = e->counters.pcnt; e->counters.pcnt = 0; /* We're at the start. */ if (pos == oldpos) goto next; e = (struct arpt_entry *) (entry0 + pos); } while (oldpos == pos + e->next_offset); /* Move along one */ size = e->next_offset; e = (struct arpt_entry *) (entry0 + pos + size); e->counters.pcnt = pos; pos += size; } else { int newpos = t->verdict; if (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0 && newpos >= 0) { if (newpos > newinfo->size - sizeof(struct arpt_entry)) { duprintf("mark_source_chains: " "bad verdict (%i)\n", newpos); return 0; } /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; } e = (struct arpt_entry *) (entry0 + newpos); e->counters.pcnt = pos; pos = newpos; } } next: duprintf("Finished chain %u\n", hook); } return 1; }
{'added': [(362, 'static inline bool unconditional(const struct arpt_entry *e)'), (366, '\treturn e->target_offset == sizeof(struct arpt_entry) &&'), (367, '\t memcmp(&e->arp, &uncond, sizeof(uncond)) == 0;'), (406, '\t\t\tif ((unconditional(e) &&'), (409, '\t\t\t t->verdict < 0) || visited) {'), (554, '\tif (!unconditional(e))'), (601, '\t\t\t\tpr_debug("Underflows must be unconditional and "'), (602, '\t\t\t\t\t "use the STANDARD target with "'), (603, '\t\t\t\t\t "ACCEPT/DROP\\n");')], 'deleted': [(362, 'static inline bool unconditional(const struct arpt_arp *arp)'), (366, '\treturn memcmp(arp, &uncond, sizeof(uncond)) == 0;'), (405, '\t\t\tif ((e->target_offset == sizeof(struct arpt_entry) &&'), (408, '\t\t\t t->verdict < 0 && unconditional(&e->arp)) ||'), (409, '\t\t\t visited) {'), (554, '\tif (!unconditional(&e->arp))'), (601, '\t\t\t\tpr_err("Underflows must be unconditional and "'), (602, '\t\t\t\t "use the STANDARD target with "'), (603, '\t\t\t\t "ACCEPT/DROP\\n");')]}
9
9
1,537
9,605
79
496
17
https://github.com/torvalds/linux
CVE-2016-3134
CWE-119
550
dvb-usb-firmware.c
C
usb_cypress_load_firmware
/* dvb-usb-firmware.c is part of the DVB USB library. * * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de) * see dvb-usb-init.c for copyright information. * * This file contains functions for downloading the firmware to Cypress FX 1 and 2 based devices. * * FIXME: This part does actually not belong to dvb-usb, but to the usb-subsystem. */ #include "dvb-usb-common.h" #include <linux/usb.h> struct usb_cypress_controller { int id; const char *name; /* name of the usb controller */ u16 cpu_cs_register; /* needs to be restarted, when the firmware has been downloaded. */ }; static struct usb_cypress_controller cypress[] = { { .id = DEVICE_SPECIFIC, .name = "Device specific", .cpu_cs_register = 0 }, { .id = CYPRESS_AN2135, .name = "Cypress AN2135", .cpu_cs_register = 0x7f92 }, { .id = CYPRESS_AN2235, .name = "Cypress AN2235", .cpu_cs_register = 0x7f92 }, { .id = CYPRESS_FX2, .name = "Cypress FX2", .cpu_cs_register = 0xe600 }, }; /* * load a firmware packet to the device */ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 len) { return usb_control_msg(udev, usb_sndctrlpipe(udev,0), 0xa0, USB_TYPE_VENDOR, addr, 0x00, data, len, 5000); } int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type) { struct hexline *hx; u8 reset; int ret,pos=0; hx = kmalloc(sizeof(*hx), GFP_KERNEL); if (!hx) return -ENOMEM; /* stop the CPU */ reset = 1; if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1) err("could not stop the USB controller CPU."); while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) { deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n", hx->addr, hx->len, hx->chk); ret = usb_cypress_writemem(udev, hx->addr, hx->data, hx->len); if (ret != hx->len) { err("error while transferring firmware (transferred size: %d, block size: %d)", ret, hx->len); ret = -EINVAL; break; } } if (ret < 0) { err("firmware download failed at %d with %d",pos,ret); kfree(hx); return ret; } if (ret == 0) { /* restart the CPU */ reset = 0; if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) { err("could not restart the USB controller CPU."); ret = -EINVAL; } } else ret = -EIO; kfree(hx); return ret; } EXPORT_SYMBOL(usb_cypress_load_firmware); int dvb_usb_download_firmware(struct usb_device *udev, struct dvb_usb_device_properties *props) { int ret; const struct firmware *fw = NULL; if ((ret = request_firmware(&fw, props->firmware, &udev->dev)) != 0) { err("did not find the firmware file. (%s) Please see linux/Documentation/dvb/ for more details on firmware-problems. (%d)", props->firmware,ret); return ret; } info("downloading firmware from file '%s'",props->firmware); switch (props->usb_ctrl) { case CYPRESS_AN2135: case CYPRESS_AN2235: case CYPRESS_FX2: ret = usb_cypress_load_firmware(udev, fw, props->usb_ctrl); break; case DEVICE_SPECIFIC: if (props->download_firmware) ret = props->download_firmware(udev,fw); else { err("BUG: driver didn't specified a download_firmware-callback, although it claims to have a DEVICE_SPECIFIC one."); ret = -EINVAL; } break; default: ret = -EINVAL; break; } release_firmware(fw); return ret; } int dvb_usb_get_hexline(const struct firmware *fw, struct hexline *hx, int *pos) { u8 *b = (u8 *) &fw->data[*pos]; int data_offs = 4; if (*pos >= fw->size) return 0; memset(hx,0,sizeof(struct hexline)); hx->len = b[0]; if ((*pos + hx->len + 4) >= fw->size) return -EINVAL; hx->addr = b[1] | (b[2] << 8); hx->type = b[3]; if (hx->type == 0x04) { /* b[4] and b[5] are the Extended linear address record data field */ hx->addr |= (b[4] << 24) | (b[5] << 16); /* hx->len -= 2; data_offs += 2; */ } memcpy(hx->data,&b[data_offs],hx->len); hx->chk = b[hx->len + data_offs]; *pos += hx->len + 5; return *pos; } EXPORT_SYMBOL(dvb_usb_get_hexline);
/* dvb-usb-firmware.c is part of the DVB USB library. * * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de) * see dvb-usb-init.c for copyright information. * * This file contains functions for downloading the firmware to Cypress FX 1 and 2 based devices. * * FIXME: This part does actually not belong to dvb-usb, but to the usb-subsystem. */ #include "dvb-usb-common.h" #include <linux/usb.h> struct usb_cypress_controller { int id; const char *name; /* name of the usb controller */ u16 cpu_cs_register; /* needs to be restarted, when the firmware has been downloaded. */ }; static struct usb_cypress_controller cypress[] = { { .id = DEVICE_SPECIFIC, .name = "Device specific", .cpu_cs_register = 0 }, { .id = CYPRESS_AN2135, .name = "Cypress AN2135", .cpu_cs_register = 0x7f92 }, { .id = CYPRESS_AN2235, .name = "Cypress AN2235", .cpu_cs_register = 0x7f92 }, { .id = CYPRESS_FX2, .name = "Cypress FX2", .cpu_cs_register = 0xe600 }, }; /* * load a firmware packet to the device */ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 len) { return usb_control_msg(udev, usb_sndctrlpipe(udev,0), 0xa0, USB_TYPE_VENDOR, addr, 0x00, data, len, 5000); } int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type) { struct hexline *hx; u8 *buf; int ret, pos = 0; u16 cpu_cs_register = cypress[type].cpu_cs_register; buf = kmalloc(sizeof(*hx), GFP_KERNEL); if (!buf) return -ENOMEM; hx = (struct hexline *)buf; /* stop the CPU */ buf[0] = 1; if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1) err("could not stop the USB controller CPU."); while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) { deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n", hx->addr, hx->len, hx->chk); ret = usb_cypress_writemem(udev, hx->addr, hx->data, hx->len); if (ret != hx->len) { err("error while transferring firmware (transferred size: %d, block size: %d)", ret, hx->len); ret = -EINVAL; break; } } if (ret < 0) { err("firmware download failed at %d with %d",pos,ret); kfree(buf); return ret; } if (ret == 0) { /* restart the CPU */ buf[0] = 0; if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1) { err("could not restart the USB controller CPU."); ret = -EINVAL; } } else ret = -EIO; kfree(buf); return ret; } EXPORT_SYMBOL(usb_cypress_load_firmware); int dvb_usb_download_firmware(struct usb_device *udev, struct dvb_usb_device_properties *props) { int ret; const struct firmware *fw = NULL; if ((ret = request_firmware(&fw, props->firmware, &udev->dev)) != 0) { err("did not find the firmware file. (%s) Please see linux/Documentation/dvb/ for more details on firmware-problems. (%d)", props->firmware,ret); return ret; } info("downloading firmware from file '%s'",props->firmware); switch (props->usb_ctrl) { case CYPRESS_AN2135: case CYPRESS_AN2235: case CYPRESS_FX2: ret = usb_cypress_load_firmware(udev, fw, props->usb_ctrl); break; case DEVICE_SPECIFIC: if (props->download_firmware) ret = props->download_firmware(udev,fw); else { err("BUG: driver didn't specified a download_firmware-callback, although it claims to have a DEVICE_SPECIFIC one."); ret = -EINVAL; } break; default: ret = -EINVAL; break; } release_firmware(fw); return ret; } int dvb_usb_get_hexline(const struct firmware *fw, struct hexline *hx, int *pos) { u8 *b = (u8 *) &fw->data[*pos]; int data_offs = 4; if (*pos >= fw->size) return 0; memset(hx,0,sizeof(struct hexline)); hx->len = b[0]; if ((*pos + hx->len + 4) >= fw->size) return -EINVAL; hx->addr = b[1] | (b[2] << 8); hx->type = b[3]; if (hx->type == 0x04) { /* b[4] and b[5] are the Extended linear address record data field */ hx->addr |= (b[4] << 24) | (b[5] << 16); /* hx->len -= 2; data_offs += 2; */ } memcpy(hx->data,&b[data_offs],hx->len); hx->chk = b[hx->len + data_offs]; *pos += hx->len + 5; return *pos; } EXPORT_SYMBOL(dvb_usb_get_hexline);
int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type) { struct hexline *hx; u8 reset; int ret,pos=0; hx = kmalloc(sizeof(*hx), GFP_KERNEL); if (!hx) return -ENOMEM; /* stop the CPU */ reset = 1; if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1) err("could not stop the USB controller CPU."); while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) { deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n", hx->addr, hx->len, hx->chk); ret = usb_cypress_writemem(udev, hx->addr, hx->data, hx->len); if (ret != hx->len) { err("error while transferring firmware (transferred size: %d, block size: %d)", ret, hx->len); ret = -EINVAL; break; } } if (ret < 0) { err("firmware download failed at %d with %d",pos,ret); kfree(hx); return ret; } if (ret == 0) { /* restart the CPU */ reset = 0; if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) { err("could not restart the USB controller CPU."); ret = -EINVAL; } } else ret = -EIO; kfree(hx); return ret; }
int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type) { struct hexline *hx; u8 *buf; int ret, pos = 0; u16 cpu_cs_register = cypress[type].cpu_cs_register; buf = kmalloc(sizeof(*hx), GFP_KERNEL); if (!buf) return -ENOMEM; hx = (struct hexline *)buf; /* stop the CPU */ buf[0] = 1; if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1) err("could not stop the USB controller CPU."); while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) { deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n", hx->addr, hx->len, hx->chk); ret = usb_cypress_writemem(udev, hx->addr, hx->data, hx->len); if (ret != hx->len) { err("error while transferring firmware (transferred size: %d, block size: %d)", ret, hx->len); ret = -EINVAL; break; } } if (ret < 0) { err("firmware download failed at %d with %d",pos,ret); kfree(buf); return ret; } if (ret == 0) { /* restart the CPU */ buf[0] = 0; if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1) { err("could not restart the USB controller CPU."); ret = -EINVAL; } } else ret = -EIO; kfree(buf); return ret; }
{'added': [(39, '\tu8 *buf;'), (40, '\tint ret, pos = 0;'), (41, '\tu16 cpu_cs_register = cypress[type].cpu_cs_register;'), (43, '\tbuf = kmalloc(sizeof(*hx), GFP_KERNEL);'), (44, '\tif (!buf)'), (46, '\thx = (struct hexline *)buf;'), (49, '\tbuf[0] = 1;'), (50, '\tif (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1)'), (66, '\t\tkfree(buf);'), (72, '\t\tbuf[0] = 0;'), (73, '\t\tif (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1) {'), (80, '\tkfree(buf);')], 'deleted': [(39, '\tu8 reset;'), (40, '\tint ret,pos=0;'), (42, '\thx = kmalloc(sizeof(*hx), GFP_KERNEL);'), (43, '\tif (!hx)'), (47, '\treset = 1;'), (48, '\tif ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)'), (64, '\t\tkfree(hx);'), (70, '\t\treset = 0;'), (71, '\t\tif (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {'), (78, '\tkfree(hx);')]}
12
10
111
784
37
259
9
https://github.com/torvalds/linux
CVE-2017-8061
CWE-119
2,296
ioapic.c
C
rtc_irq_eoi_tracking_reset
/* * Copyright (C) 2001 MandrakeSoft S.A. * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * MandrakeSoft S.A. * 43, rue d'Aboukir * 75002 Paris - France * http://www.linux-mandrake.com/ * http://www.mandrakesoft.com/ * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Yunhong Jiang <yunhong.jiang@intel.com> * Yaozu (Eddie) Dong <eddie.dong@intel.com> * Based on Xen 3.1 code. */ #include <linux/kvm_host.h> #include <linux/kvm.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/smp.h> #include <linux/hrtimer.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/export.h> #include <asm/processor.h> #include <asm/page.h> #include <asm/current.h> #include <trace/events/kvm.h> #include "ioapic.h" #include "lapic.h" #include "irq.h" #if 0 #define ioapic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) #else #define ioapic_debug(fmt, arg...) #endif static int ioapic_service(struct kvm_ioapic *vioapic, int irq, bool line_status); static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, unsigned long addr, unsigned long length) { unsigned long result = 0; switch (ioapic->ioregsel) { case IOAPIC_REG_VERSION: result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16) | (IOAPIC_VERSION_ID & 0xff)); break; case IOAPIC_REG_APIC_ID: case IOAPIC_REG_ARB_ID: result = ((ioapic->id & 0xf) << 24); break; default: { u32 redir_index = (ioapic->ioregsel - 0x10) >> 1; u64 redir_content; if (redir_index < IOAPIC_NUM_PINS) redir_content = ioapic->redirtbl[redir_index].bits; else redir_content = ~0ULL; result = (ioapic->ioregsel & 0x1) ? (redir_content >> 32) & 0xffffffff : redir_content & 0xffffffff; break; } } return result; } static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic) { ioapic->rtc_status.pending_eoi = 0; bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPUS); } static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic); static void rtc_status_pending_eoi_check_valid(struct kvm_ioapic *ioapic) { if (WARN_ON(ioapic->rtc_status.pending_eoi < 0)) kvm_rtc_eoi_tracking_restore_all(ioapic); } static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu) { bool new_val, old_val; struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; struct dest_map *dest_map = &ioapic->rtc_status.dest_map; union kvm_ioapic_redirect_entry *e; e = &ioapic->redirtbl[RTC_GSI]; if (!kvm_apic_match_dest(vcpu, NULL, 0, e->fields.dest_id, e->fields.dest_mode)) return; new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector); old_val = test_bit(vcpu->vcpu_id, dest_map->map); if (new_val == old_val) return; if (new_val) { __set_bit(vcpu->vcpu_id, dest_map->map); dest_map->vectors[vcpu->vcpu_id] = e->fields.vector; ioapic->rtc_status.pending_eoi++; } else { __clear_bit(vcpu->vcpu_id, dest_map->map); ioapic->rtc_status.pending_eoi--; rtc_status_pending_eoi_check_valid(ioapic); } } void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu) { struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; spin_lock(&ioapic->lock); __rtc_irq_eoi_tracking_restore_one(vcpu); spin_unlock(&ioapic->lock); } static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic) { struct kvm_vcpu *vcpu; int i; if (RTC_GSI >= IOAPIC_NUM_PINS) return; rtc_irq_eoi_tracking_reset(ioapic); kvm_for_each_vcpu(i, vcpu, ioapic->kvm) __rtc_irq_eoi_tracking_restore_one(vcpu); } static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu) { if (test_and_clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map)) { --ioapic->rtc_status.pending_eoi; rtc_status_pending_eoi_check_valid(ioapic); } } static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic) { if (ioapic->rtc_status.pending_eoi > 0) return true; /* coalesced */ return false; } static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq, int irq_level, bool line_status) { union kvm_ioapic_redirect_entry entry; u32 mask = 1 << irq; u32 old_irr; int edge, ret; entry = ioapic->redirtbl[irq]; edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG); if (!irq_level) { ioapic->irr &= ~mask; ret = 1; goto out; } /* * Return 0 for coalesced interrupts; for edge-triggered interrupts, * this only happens if a previous edge has not been delivered due * do masking. For level interrupts, the remote_irr field tells * us if the interrupt is waiting for an EOI. * * RTC is special: it is edge-triggered, but userspace likes to know * if it has been already ack-ed via EOI because coalesced RTC * interrupts lead to time drift in Windows guests. So we track * EOI manually for the RTC interrupt. */ if (irq == RTC_GSI && line_status && rtc_irq_check_coalesced(ioapic)) { ret = 0; goto out; } old_irr = ioapic->irr; ioapic->irr |= mask; if (edge) ioapic->irr_delivered &= ~mask; if ((edge && old_irr == ioapic->irr) || (!edge && entry.fields.remote_irr)) { ret = 0; goto out; } ret = ioapic_service(ioapic, irq, line_status); out: trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); return ret; } static void kvm_ioapic_inject_all(struct kvm_ioapic *ioapic, unsigned long irr) { u32 idx; rtc_irq_eoi_tracking_reset(ioapic); for_each_set_bit(idx, &irr, IOAPIC_NUM_PINS) ioapic_set_irq(ioapic, idx, 1, true); kvm_rtc_eoi_tracking_restore_all(ioapic); } void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors) { struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; struct dest_map *dest_map = &ioapic->rtc_status.dest_map; union kvm_ioapic_redirect_entry *e; int index; spin_lock(&ioapic->lock); /* Make sure we see any missing RTC EOI */ if (test_bit(vcpu->vcpu_id, dest_map->map)) __set_bit(dest_map->vectors[vcpu->vcpu_id], ioapic_handled_vectors); for (index = 0; index < IOAPIC_NUM_PINS; index++) { e = &ioapic->redirtbl[index]; if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG || kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) || index == RTC_GSI) { if (kvm_apic_match_dest(vcpu, NULL, 0, e->fields.dest_id, e->fields.dest_mode) || (e->fields.trig_mode == IOAPIC_EDGE_TRIG && kvm_apic_pending_eoi(vcpu, e->fields.vector))) __set_bit(e->fields.vector, ioapic_handled_vectors); } } spin_unlock(&ioapic->lock); } void kvm_vcpu_request_scan_ioapic(struct kvm *kvm) { struct kvm_ioapic *ioapic = kvm->arch.vioapic; if (!ioapic) return; kvm_make_scan_ioapic_request(kvm); } static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) { unsigned index; bool mask_before, mask_after; union kvm_ioapic_redirect_entry *e; switch (ioapic->ioregsel) { case IOAPIC_REG_VERSION: /* Writes are ignored. */ break; case IOAPIC_REG_APIC_ID: ioapic->id = (val >> 24) & 0xf; break; case IOAPIC_REG_ARB_ID: break; default: index = (ioapic->ioregsel - 0x10) >> 1; ioapic_debug("change redir index %x val %x\n", index, val); if (index >= IOAPIC_NUM_PINS) return; e = &ioapic->redirtbl[index]; mask_before = e->fields.mask; if (ioapic->ioregsel & 1) { e->bits &= 0xffffffff; e->bits |= (u64) val << 32; } else { e->bits &= ~0xffffffffULL; e->bits |= (u32) val; e->fields.remote_irr = 0; } mask_after = e->fields.mask; if (mask_before != mask_after) kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after); if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG && ioapic->irr & (1 << index)) ioapic_service(ioapic, index, false); kvm_vcpu_request_scan_ioapic(ioapic->kvm); break; } } static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status) { union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; struct kvm_lapic_irq irqe; int ret; if (entry->fields.mask) return -1; ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " "vector=%x trig_mode=%x\n", entry->fields.dest_id, entry->fields.dest_mode, entry->fields.delivery_mode, entry->fields.vector, entry->fields.trig_mode); irqe.dest_id = entry->fields.dest_id; irqe.vector = entry->fields.vector; irqe.dest_mode = entry->fields.dest_mode; irqe.trig_mode = entry->fields.trig_mode; irqe.delivery_mode = entry->fields.delivery_mode << 8; irqe.level = 1; irqe.shorthand = 0; irqe.msi_redir_hint = false; if (irqe.trig_mode == IOAPIC_EDGE_TRIG) ioapic->irr_delivered |= 1 << irq; if (irq == RTC_GSI && line_status) { /* * pending_eoi cannot ever become negative (see * rtc_status_pending_eoi_check_valid) and the caller * ensures that it is only called if it is >= zero, namely * if rtc_irq_check_coalesced returns false). */ BUG_ON(ioapic->rtc_status.pending_eoi != 0); ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, &ioapic->rtc_status.dest_map); ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret); } else ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL); if (ret && irqe.trig_mode == IOAPIC_LEVEL_TRIG) entry->fields.remote_irr = 1; return ret; } int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, int level, bool line_status) { int ret, irq_level; BUG_ON(irq < 0 || irq >= IOAPIC_NUM_PINS); spin_lock(&ioapic->lock); irq_level = __kvm_irq_line_state(&ioapic->irq_states[irq], irq_source_id, level); ret = ioapic_set_irq(ioapic, irq, irq_level, line_status); spin_unlock(&ioapic->lock); return ret; } void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id) { int i; spin_lock(&ioapic->lock); for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++) __clear_bit(irq_source_id, &ioapic->irq_states[i]); spin_unlock(&ioapic->lock); } static void kvm_ioapic_eoi_inject_work(struct work_struct *work) { int i; struct kvm_ioapic *ioapic = container_of(work, struct kvm_ioapic, eoi_inject.work); spin_lock(&ioapic->lock); for (i = 0; i < IOAPIC_NUM_PINS; i++) { union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; if (ent->fields.trig_mode != IOAPIC_LEVEL_TRIG) continue; if (ioapic->irr & (1 << i) && !ent->fields.remote_irr) ioapic_service(ioapic, i, false); } spin_unlock(&ioapic->lock); } #define IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT 10000 static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, struct kvm_ioapic *ioapic, int vector, int trigger_mode) { struct dest_map *dest_map = &ioapic->rtc_status.dest_map; struct kvm_lapic *apic = vcpu->arch.apic; int i; /* RTC special handling */ if (test_bit(vcpu->vcpu_id, dest_map->map) && vector == dest_map->vectors[vcpu->vcpu_id]) rtc_irq_eoi(ioapic, vcpu); for (i = 0; i < IOAPIC_NUM_PINS; i++) { union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; if (ent->fields.vector != vector) continue; /* * We are dropping lock while calling ack notifiers because ack * notifier callbacks for assigned devices call into IOAPIC * recursively. Since remote_irr is cleared only after call * to notifiers if the same vector will be delivered while lock * is dropped it will be put into irr and will be delivered * after ack notifier returns. */ spin_unlock(&ioapic->lock); kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); spin_lock(&ioapic->lock); if (trigger_mode != IOAPIC_LEVEL_TRIG || kvm_lapic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) continue; ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); ent->fields.remote_irr = 0; if (!ent->fields.mask && (ioapic->irr & (1 << i))) { ++ioapic->irq_eoi[i]; if (ioapic->irq_eoi[i] == IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT) { /* * Real hardware does not deliver the interrupt * immediately during eoi broadcast, and this * lets a buggy guest make slow progress * even if it does not correctly handle a * level-triggered interrupt. Emulate this * behavior if we detect an interrupt storm. */ schedule_delayed_work(&ioapic->eoi_inject, HZ / 100); ioapic->irq_eoi[i] = 0; trace_kvm_ioapic_delayed_eoi_inj(ent->bits); } else { ioapic_service(ioapic, i, false); } } else { ioapic->irq_eoi[i] = 0; } } } void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode) { struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; spin_lock(&ioapic->lock); __kvm_ioapic_update_eoi(vcpu, ioapic, vector, trigger_mode); spin_unlock(&ioapic->lock); } static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev) { return container_of(dev, struct kvm_ioapic, dev); } static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr) { return ((addr >= ioapic->base_address && (addr < ioapic->base_address + IOAPIC_MEM_LENGTH))); } static int ioapic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr, int len, void *val) { struct kvm_ioapic *ioapic = to_ioapic(this); u32 result; if (!ioapic_in_range(ioapic, addr)) return -EOPNOTSUPP; ioapic_debug("addr %lx\n", (unsigned long)addr); ASSERT(!(addr & 0xf)); /* check alignment */ addr &= 0xff; spin_lock(&ioapic->lock); switch (addr) { case IOAPIC_REG_SELECT: result = ioapic->ioregsel; break; case IOAPIC_REG_WINDOW: result = ioapic_read_indirect(ioapic, addr, len); break; default: result = 0; break; } spin_unlock(&ioapic->lock); switch (len) { case 8: *(u64 *) val = result; break; case 1: case 2: case 4: memcpy(val, (char *)&result, len); break; default: printk(KERN_WARNING "ioapic: wrong length %d\n", len); } return 0; } static int ioapic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr, int len, const void *val) { struct kvm_ioapic *ioapic = to_ioapic(this); u32 data; if (!ioapic_in_range(ioapic, addr)) return -EOPNOTSUPP; ioapic_debug("ioapic_mmio_write addr=%p len=%d val=%p\n", (void*)addr, len, val); ASSERT(!(addr & 0xf)); /* check alignment */ switch (len) { case 8: case 4: data = *(u32 *) val; break; case 2: data = *(u16 *) val; break; case 1: data = *(u8 *) val; break; default: printk(KERN_WARNING "ioapic: Unsupported size %d\n", len); return 0; } addr &= 0xff; spin_lock(&ioapic->lock); switch (addr) { case IOAPIC_REG_SELECT: ioapic->ioregsel = data & 0xFF; /* 8-bit register */ break; case IOAPIC_REG_WINDOW: ioapic_write_indirect(ioapic, data); break; default: break; } spin_unlock(&ioapic->lock); return 0; } static void kvm_ioapic_reset(struct kvm_ioapic *ioapic) { int i; cancel_delayed_work_sync(&ioapic->eoi_inject); for (i = 0; i < IOAPIC_NUM_PINS; i++) ioapic->redirtbl[i].fields.mask = 1; ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS; ioapic->ioregsel = 0; ioapic->irr = 0; ioapic->irr_delivered = 0; ioapic->id = 0; memset(ioapic->irq_eoi, 0x00, sizeof(ioapic->irq_eoi)); rtc_irq_eoi_tracking_reset(ioapic); } static const struct kvm_io_device_ops ioapic_mmio_ops = { .read = ioapic_mmio_read, .write = ioapic_mmio_write, }; int kvm_ioapic_init(struct kvm *kvm) { struct kvm_ioapic *ioapic; int ret; ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL); if (!ioapic) return -ENOMEM; spin_lock_init(&ioapic->lock); INIT_DELAYED_WORK(&ioapic->eoi_inject, kvm_ioapic_eoi_inject_work); kvm->arch.vioapic = ioapic; kvm_ioapic_reset(ioapic); kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops); ioapic->kvm = kvm; mutex_lock(&kvm->slots_lock); ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, ioapic->base_address, IOAPIC_MEM_LENGTH, &ioapic->dev); mutex_unlock(&kvm->slots_lock); if (ret < 0) { kvm->arch.vioapic = NULL; kfree(ioapic); return ret; } kvm_vcpu_request_scan_ioapic(kvm); return ret; } void kvm_ioapic_destroy(struct kvm *kvm) { struct kvm_ioapic *ioapic = kvm->arch.vioapic; cancel_delayed_work_sync(&ioapic->eoi_inject); kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev); kvm->arch.vioapic = NULL; kfree(ioapic); } int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) { struct kvm_ioapic *ioapic = ioapic_irqchip(kvm); if (!ioapic) return -EINVAL; spin_lock(&ioapic->lock); memcpy(state, ioapic, sizeof(struct kvm_ioapic_state)); state->irr &= ~ioapic->irr_delivered; spin_unlock(&ioapic->lock); return 0; } int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) { struct kvm_ioapic *ioapic = ioapic_irqchip(kvm); if (!ioapic) return -EINVAL; spin_lock(&ioapic->lock); memcpy(ioapic, state, sizeof(struct kvm_ioapic_state)); ioapic->irr = 0; ioapic->irr_delivered = 0; kvm_vcpu_request_scan_ioapic(kvm); kvm_ioapic_inject_all(ioapic, state->irr); spin_unlock(&ioapic->lock); return 0; }
/* * Copyright (C) 2001 MandrakeSoft S.A. * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * MandrakeSoft S.A. * 43, rue d'Aboukir * 75002 Paris - France * http://www.linux-mandrake.com/ * http://www.mandrakesoft.com/ * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Yunhong Jiang <yunhong.jiang@intel.com> * Yaozu (Eddie) Dong <eddie.dong@intel.com> * Based on Xen 3.1 code. */ #include <linux/kvm_host.h> #include <linux/kvm.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/smp.h> #include <linux/hrtimer.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/export.h> #include <asm/processor.h> #include <asm/page.h> #include <asm/current.h> #include <trace/events/kvm.h> #include "ioapic.h" #include "lapic.h" #include "irq.h" #if 0 #define ioapic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) #else #define ioapic_debug(fmt, arg...) #endif static int ioapic_service(struct kvm_ioapic *vioapic, int irq, bool line_status); static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, unsigned long addr, unsigned long length) { unsigned long result = 0; switch (ioapic->ioregsel) { case IOAPIC_REG_VERSION: result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16) | (IOAPIC_VERSION_ID & 0xff)); break; case IOAPIC_REG_APIC_ID: case IOAPIC_REG_ARB_ID: result = ((ioapic->id & 0xf) << 24); break; default: { u32 redir_index = (ioapic->ioregsel - 0x10) >> 1; u64 redir_content; if (redir_index < IOAPIC_NUM_PINS) redir_content = ioapic->redirtbl[redir_index].bits; else redir_content = ~0ULL; result = (ioapic->ioregsel & 0x1) ? (redir_content >> 32) & 0xffffffff : redir_content & 0xffffffff; break; } } return result; } static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic) { ioapic->rtc_status.pending_eoi = 0; bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID); } static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic); static void rtc_status_pending_eoi_check_valid(struct kvm_ioapic *ioapic) { if (WARN_ON(ioapic->rtc_status.pending_eoi < 0)) kvm_rtc_eoi_tracking_restore_all(ioapic); } static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu) { bool new_val, old_val; struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; struct dest_map *dest_map = &ioapic->rtc_status.dest_map; union kvm_ioapic_redirect_entry *e; e = &ioapic->redirtbl[RTC_GSI]; if (!kvm_apic_match_dest(vcpu, NULL, 0, e->fields.dest_id, e->fields.dest_mode)) return; new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector); old_val = test_bit(vcpu->vcpu_id, dest_map->map); if (new_val == old_val) return; if (new_val) { __set_bit(vcpu->vcpu_id, dest_map->map); dest_map->vectors[vcpu->vcpu_id] = e->fields.vector; ioapic->rtc_status.pending_eoi++; } else { __clear_bit(vcpu->vcpu_id, dest_map->map); ioapic->rtc_status.pending_eoi--; rtc_status_pending_eoi_check_valid(ioapic); } } void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu) { struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; spin_lock(&ioapic->lock); __rtc_irq_eoi_tracking_restore_one(vcpu); spin_unlock(&ioapic->lock); } static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic) { struct kvm_vcpu *vcpu; int i; if (RTC_GSI >= IOAPIC_NUM_PINS) return; rtc_irq_eoi_tracking_reset(ioapic); kvm_for_each_vcpu(i, vcpu, ioapic->kvm) __rtc_irq_eoi_tracking_restore_one(vcpu); } static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu) { if (test_and_clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map)) { --ioapic->rtc_status.pending_eoi; rtc_status_pending_eoi_check_valid(ioapic); } } static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic) { if (ioapic->rtc_status.pending_eoi > 0) return true; /* coalesced */ return false; } static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq, int irq_level, bool line_status) { union kvm_ioapic_redirect_entry entry; u32 mask = 1 << irq; u32 old_irr; int edge, ret; entry = ioapic->redirtbl[irq]; edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG); if (!irq_level) { ioapic->irr &= ~mask; ret = 1; goto out; } /* * Return 0 for coalesced interrupts; for edge-triggered interrupts, * this only happens if a previous edge has not been delivered due * do masking. For level interrupts, the remote_irr field tells * us if the interrupt is waiting for an EOI. * * RTC is special: it is edge-triggered, but userspace likes to know * if it has been already ack-ed via EOI because coalesced RTC * interrupts lead to time drift in Windows guests. So we track * EOI manually for the RTC interrupt. */ if (irq == RTC_GSI && line_status && rtc_irq_check_coalesced(ioapic)) { ret = 0; goto out; } old_irr = ioapic->irr; ioapic->irr |= mask; if (edge) ioapic->irr_delivered &= ~mask; if ((edge && old_irr == ioapic->irr) || (!edge && entry.fields.remote_irr)) { ret = 0; goto out; } ret = ioapic_service(ioapic, irq, line_status); out: trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); return ret; } static void kvm_ioapic_inject_all(struct kvm_ioapic *ioapic, unsigned long irr) { u32 idx; rtc_irq_eoi_tracking_reset(ioapic); for_each_set_bit(idx, &irr, IOAPIC_NUM_PINS) ioapic_set_irq(ioapic, idx, 1, true); kvm_rtc_eoi_tracking_restore_all(ioapic); } void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors) { struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; struct dest_map *dest_map = &ioapic->rtc_status.dest_map; union kvm_ioapic_redirect_entry *e; int index; spin_lock(&ioapic->lock); /* Make sure we see any missing RTC EOI */ if (test_bit(vcpu->vcpu_id, dest_map->map)) __set_bit(dest_map->vectors[vcpu->vcpu_id], ioapic_handled_vectors); for (index = 0; index < IOAPIC_NUM_PINS; index++) { e = &ioapic->redirtbl[index]; if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG || kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) || index == RTC_GSI) { if (kvm_apic_match_dest(vcpu, NULL, 0, e->fields.dest_id, e->fields.dest_mode) || (e->fields.trig_mode == IOAPIC_EDGE_TRIG && kvm_apic_pending_eoi(vcpu, e->fields.vector))) __set_bit(e->fields.vector, ioapic_handled_vectors); } } spin_unlock(&ioapic->lock); } void kvm_vcpu_request_scan_ioapic(struct kvm *kvm) { struct kvm_ioapic *ioapic = kvm->arch.vioapic; if (!ioapic) return; kvm_make_scan_ioapic_request(kvm); } static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) { unsigned index; bool mask_before, mask_after; union kvm_ioapic_redirect_entry *e; switch (ioapic->ioregsel) { case IOAPIC_REG_VERSION: /* Writes are ignored. */ break; case IOAPIC_REG_APIC_ID: ioapic->id = (val >> 24) & 0xf; break; case IOAPIC_REG_ARB_ID: break; default: index = (ioapic->ioregsel - 0x10) >> 1; ioapic_debug("change redir index %x val %x\n", index, val); if (index >= IOAPIC_NUM_PINS) return; e = &ioapic->redirtbl[index]; mask_before = e->fields.mask; if (ioapic->ioregsel & 1) { e->bits &= 0xffffffff; e->bits |= (u64) val << 32; } else { e->bits &= ~0xffffffffULL; e->bits |= (u32) val; e->fields.remote_irr = 0; } mask_after = e->fields.mask; if (mask_before != mask_after) kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after); if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG && ioapic->irr & (1 << index)) ioapic_service(ioapic, index, false); kvm_vcpu_request_scan_ioapic(ioapic->kvm); break; } } static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status) { union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; struct kvm_lapic_irq irqe; int ret; if (entry->fields.mask) return -1; ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " "vector=%x trig_mode=%x\n", entry->fields.dest_id, entry->fields.dest_mode, entry->fields.delivery_mode, entry->fields.vector, entry->fields.trig_mode); irqe.dest_id = entry->fields.dest_id; irqe.vector = entry->fields.vector; irqe.dest_mode = entry->fields.dest_mode; irqe.trig_mode = entry->fields.trig_mode; irqe.delivery_mode = entry->fields.delivery_mode << 8; irqe.level = 1; irqe.shorthand = 0; irqe.msi_redir_hint = false; if (irqe.trig_mode == IOAPIC_EDGE_TRIG) ioapic->irr_delivered |= 1 << irq; if (irq == RTC_GSI && line_status) { /* * pending_eoi cannot ever become negative (see * rtc_status_pending_eoi_check_valid) and the caller * ensures that it is only called if it is >= zero, namely * if rtc_irq_check_coalesced returns false). */ BUG_ON(ioapic->rtc_status.pending_eoi != 0); ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, &ioapic->rtc_status.dest_map); ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret); } else ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL); if (ret && irqe.trig_mode == IOAPIC_LEVEL_TRIG) entry->fields.remote_irr = 1; return ret; } int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, int level, bool line_status) { int ret, irq_level; BUG_ON(irq < 0 || irq >= IOAPIC_NUM_PINS); spin_lock(&ioapic->lock); irq_level = __kvm_irq_line_state(&ioapic->irq_states[irq], irq_source_id, level); ret = ioapic_set_irq(ioapic, irq, irq_level, line_status); spin_unlock(&ioapic->lock); return ret; } void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id) { int i; spin_lock(&ioapic->lock); for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++) __clear_bit(irq_source_id, &ioapic->irq_states[i]); spin_unlock(&ioapic->lock); } static void kvm_ioapic_eoi_inject_work(struct work_struct *work) { int i; struct kvm_ioapic *ioapic = container_of(work, struct kvm_ioapic, eoi_inject.work); spin_lock(&ioapic->lock); for (i = 0; i < IOAPIC_NUM_PINS; i++) { union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; if (ent->fields.trig_mode != IOAPIC_LEVEL_TRIG) continue; if (ioapic->irr & (1 << i) && !ent->fields.remote_irr) ioapic_service(ioapic, i, false); } spin_unlock(&ioapic->lock); } #define IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT 10000 static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, struct kvm_ioapic *ioapic, int vector, int trigger_mode) { struct dest_map *dest_map = &ioapic->rtc_status.dest_map; struct kvm_lapic *apic = vcpu->arch.apic; int i; /* RTC special handling */ if (test_bit(vcpu->vcpu_id, dest_map->map) && vector == dest_map->vectors[vcpu->vcpu_id]) rtc_irq_eoi(ioapic, vcpu); for (i = 0; i < IOAPIC_NUM_PINS; i++) { union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; if (ent->fields.vector != vector) continue; /* * We are dropping lock while calling ack notifiers because ack * notifier callbacks for assigned devices call into IOAPIC * recursively. Since remote_irr is cleared only after call * to notifiers if the same vector will be delivered while lock * is dropped it will be put into irr and will be delivered * after ack notifier returns. */ spin_unlock(&ioapic->lock); kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); spin_lock(&ioapic->lock); if (trigger_mode != IOAPIC_LEVEL_TRIG || kvm_lapic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) continue; ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); ent->fields.remote_irr = 0; if (!ent->fields.mask && (ioapic->irr & (1 << i))) { ++ioapic->irq_eoi[i]; if (ioapic->irq_eoi[i] == IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT) { /* * Real hardware does not deliver the interrupt * immediately during eoi broadcast, and this * lets a buggy guest make slow progress * even if it does not correctly handle a * level-triggered interrupt. Emulate this * behavior if we detect an interrupt storm. */ schedule_delayed_work(&ioapic->eoi_inject, HZ / 100); ioapic->irq_eoi[i] = 0; trace_kvm_ioapic_delayed_eoi_inj(ent->bits); } else { ioapic_service(ioapic, i, false); } } else { ioapic->irq_eoi[i] = 0; } } } void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode) { struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; spin_lock(&ioapic->lock); __kvm_ioapic_update_eoi(vcpu, ioapic, vector, trigger_mode); spin_unlock(&ioapic->lock); } static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev) { return container_of(dev, struct kvm_ioapic, dev); } static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr) { return ((addr >= ioapic->base_address && (addr < ioapic->base_address + IOAPIC_MEM_LENGTH))); } static int ioapic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr, int len, void *val) { struct kvm_ioapic *ioapic = to_ioapic(this); u32 result; if (!ioapic_in_range(ioapic, addr)) return -EOPNOTSUPP; ioapic_debug("addr %lx\n", (unsigned long)addr); ASSERT(!(addr & 0xf)); /* check alignment */ addr &= 0xff; spin_lock(&ioapic->lock); switch (addr) { case IOAPIC_REG_SELECT: result = ioapic->ioregsel; break; case IOAPIC_REG_WINDOW: result = ioapic_read_indirect(ioapic, addr, len); break; default: result = 0; break; } spin_unlock(&ioapic->lock); switch (len) { case 8: *(u64 *) val = result; break; case 1: case 2: case 4: memcpy(val, (char *)&result, len); break; default: printk(KERN_WARNING "ioapic: wrong length %d\n", len); } return 0; } static int ioapic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr, int len, const void *val) { struct kvm_ioapic *ioapic = to_ioapic(this); u32 data; if (!ioapic_in_range(ioapic, addr)) return -EOPNOTSUPP; ioapic_debug("ioapic_mmio_write addr=%p len=%d val=%p\n", (void*)addr, len, val); ASSERT(!(addr & 0xf)); /* check alignment */ switch (len) { case 8: case 4: data = *(u32 *) val; break; case 2: data = *(u16 *) val; break; case 1: data = *(u8 *) val; break; default: printk(KERN_WARNING "ioapic: Unsupported size %d\n", len); return 0; } addr &= 0xff; spin_lock(&ioapic->lock); switch (addr) { case IOAPIC_REG_SELECT: ioapic->ioregsel = data & 0xFF; /* 8-bit register */ break; case IOAPIC_REG_WINDOW: ioapic_write_indirect(ioapic, data); break; default: break; } spin_unlock(&ioapic->lock); return 0; } static void kvm_ioapic_reset(struct kvm_ioapic *ioapic) { int i; cancel_delayed_work_sync(&ioapic->eoi_inject); for (i = 0; i < IOAPIC_NUM_PINS; i++) ioapic->redirtbl[i].fields.mask = 1; ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS; ioapic->ioregsel = 0; ioapic->irr = 0; ioapic->irr_delivered = 0; ioapic->id = 0; memset(ioapic->irq_eoi, 0x00, sizeof(ioapic->irq_eoi)); rtc_irq_eoi_tracking_reset(ioapic); } static const struct kvm_io_device_ops ioapic_mmio_ops = { .read = ioapic_mmio_read, .write = ioapic_mmio_write, }; int kvm_ioapic_init(struct kvm *kvm) { struct kvm_ioapic *ioapic; int ret; ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL); if (!ioapic) return -ENOMEM; spin_lock_init(&ioapic->lock); INIT_DELAYED_WORK(&ioapic->eoi_inject, kvm_ioapic_eoi_inject_work); kvm->arch.vioapic = ioapic; kvm_ioapic_reset(ioapic); kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops); ioapic->kvm = kvm; mutex_lock(&kvm->slots_lock); ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, ioapic->base_address, IOAPIC_MEM_LENGTH, &ioapic->dev); mutex_unlock(&kvm->slots_lock); if (ret < 0) { kvm->arch.vioapic = NULL; kfree(ioapic); return ret; } kvm_vcpu_request_scan_ioapic(kvm); return ret; } void kvm_ioapic_destroy(struct kvm *kvm) { struct kvm_ioapic *ioapic = kvm->arch.vioapic; cancel_delayed_work_sync(&ioapic->eoi_inject); kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev); kvm->arch.vioapic = NULL; kfree(ioapic); } int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) { struct kvm_ioapic *ioapic = ioapic_irqchip(kvm); if (!ioapic) return -EINVAL; spin_lock(&ioapic->lock); memcpy(state, ioapic, sizeof(struct kvm_ioapic_state)); state->irr &= ~ioapic->irr_delivered; spin_unlock(&ioapic->lock); return 0; } int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) { struct kvm_ioapic *ioapic = ioapic_irqchip(kvm); if (!ioapic) return -EINVAL; spin_lock(&ioapic->lock); memcpy(ioapic, state, sizeof(struct kvm_ioapic_state)); ioapic->irr = 0; ioapic->irr_delivered = 0; kvm_vcpu_request_scan_ioapic(kvm); kvm_ioapic_inject_all(ioapic, state->irr); spin_unlock(&ioapic->lock); return 0; }
static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic) { ioapic->rtc_status.pending_eoi = 0; bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPUS); }
static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic) { ioapic->rtc_status.pending_eoi = 0; bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID); }
{'added': [(97, '\tbitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID);')], 'deleted': [(97, '\tbitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPUS);')]}
1
1
498
3,135
5
30
1
https://github.com/torvalds/linux
CVE-2016-9777
CWE-125
1,297
spl_array.c
C
spl_array_get_dimension_ptr_ptr
/* +----------------------------------------------------------------------+ | PHP Version 5 | +----------------------------------------------------------------------+ | Copyright (c) 1997-2016 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Authors: Marcus Boerger <helly@php.net> | +----------------------------------------------------------------------+ */ /* $Id$ */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include "php.h" #include "php_ini.h" #include "ext/standard/info.h" #include "ext/standard/php_var.h" #include "ext/standard/php_smart_str.h" #include "zend_interfaces.h" #include "zend_exceptions.h" #include "php_spl.h" #include "spl_functions.h" #include "spl_engine.h" #include "spl_iterators.h" #include "spl_array.h" #include "spl_exceptions.h" zend_object_handlers spl_handler_ArrayObject; PHPAPI zend_class_entry *spl_ce_ArrayObject; zend_object_handlers spl_handler_ArrayIterator; PHPAPI zend_class_entry *spl_ce_ArrayIterator; PHPAPI zend_class_entry *spl_ce_RecursiveArrayIterator; #define SPL_ARRAY_STD_PROP_LIST 0x00000001 #define SPL_ARRAY_ARRAY_AS_PROPS 0x00000002 #define SPL_ARRAY_CHILD_ARRAYS_ONLY 0x00000004 #define SPL_ARRAY_OVERLOADED_REWIND 0x00010000 #define SPL_ARRAY_OVERLOADED_VALID 0x00020000 #define SPL_ARRAY_OVERLOADED_KEY 0x00040000 #define SPL_ARRAY_OVERLOADED_CURRENT 0x00080000 #define SPL_ARRAY_OVERLOADED_NEXT 0x00100000 #define SPL_ARRAY_IS_REF 0x01000000 #define SPL_ARRAY_IS_SELF 0x02000000 #define SPL_ARRAY_USE_OTHER 0x04000000 #define SPL_ARRAY_INT_MASK 0xFFFF0000 #define SPL_ARRAY_CLONE_MASK 0x0300FFFF #define SPL_ARRAY_METHOD_NO_ARG 0 #define SPL_ARRAY_METHOD_USE_ARG 1 #define SPL_ARRAY_METHOD_MAY_USER_ARG 2 typedef struct _spl_array_object { zend_object std; zval *array; zval *retval; HashPosition pos; ulong pos_h; int ar_flags; int is_self; zend_function *fptr_offset_get; zend_function *fptr_offset_set; zend_function *fptr_offset_has; zend_function *fptr_offset_del; zend_function *fptr_count; zend_class_entry* ce_get_iterator; HashTable *debug_info; unsigned char nApplyCount; } spl_array_object; static inline HashTable *spl_array_get_hash_table(spl_array_object* intern, int check_std_props TSRMLS_DC) { /* {{{ */ if ((intern->ar_flags & SPL_ARRAY_IS_SELF) != 0) { if (!intern->std.properties) { rebuild_object_properties(&intern->std); } return intern->std.properties; } else if ((intern->ar_flags & SPL_ARRAY_USE_OTHER) && (check_std_props == 0 || (intern->ar_flags & SPL_ARRAY_STD_PROP_LIST) == 0) && Z_TYPE_P(intern->array) == IS_OBJECT) { spl_array_object *other = (spl_array_object*)zend_object_store_get_object(intern->array TSRMLS_CC); return spl_array_get_hash_table(other, check_std_props TSRMLS_CC); } else if ((intern->ar_flags & ((check_std_props ? SPL_ARRAY_STD_PROP_LIST : 0) | SPL_ARRAY_IS_SELF)) != 0) { if (!intern->std.properties) { rebuild_object_properties(&intern->std); } return intern->std.properties; } else { return HASH_OF(intern->array); } } /* }}} */ static void spl_array_rewind(spl_array_object *intern TSRMLS_DC); static void spl_array_update_pos(spl_array_object* intern) /* {{{ */ { Bucket *pos = intern->pos; if (pos != NULL) { intern->pos_h = pos->h; } } /* }}} */ static void spl_array_set_pos(spl_array_object* intern, HashPosition pos) /* {{{ */ { intern->pos = pos; spl_array_update_pos(intern); } /* }}} */ SPL_API int spl_hash_verify_pos_ex(spl_array_object * intern, HashTable * ht TSRMLS_DC) /* {{{ */ { Bucket *p; /* IS_CONSISTENT(ht);*/ /* HASH_PROTECT_RECURSION(ht);*/ p = ht->arBuckets[intern->pos_h & ht->nTableMask]; while (p != NULL) { if (p == intern->pos) { return SUCCESS; } p = p->pNext; } /* HASH_UNPROTECT_RECURSION(ht); */ spl_array_rewind(intern TSRMLS_CC); return FAILURE; } /* }}} */ SPL_API int spl_hash_verify_pos(spl_array_object * intern TSRMLS_DC) /* {{{ */ { HashTable *ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); return spl_hash_verify_pos_ex(intern, ht TSRMLS_CC); } /* }}} */ /* {{{ spl_array_object_free_storage */ static void spl_array_object_free_storage(void *object TSRMLS_DC) { spl_array_object *intern = (spl_array_object *)object; zend_object_std_dtor(&intern->std TSRMLS_CC); zval_ptr_dtor(&intern->array); zval_ptr_dtor(&intern->retval); if (intern->debug_info != NULL) { zend_hash_destroy(intern->debug_info); efree(intern->debug_info); } efree(object); } /* }}} */ zend_object_iterator *spl_array_get_iterator(zend_class_entry *ce, zval *object, int by_ref TSRMLS_DC); /* {{{ spl_array_object_new_ex */ static zend_object_value spl_array_object_new_ex(zend_class_entry *class_type, spl_array_object **obj, zval *orig, int clone_orig TSRMLS_DC) { zend_object_value retval = {0}; spl_array_object *intern; zval *tmp; zend_class_entry * parent = class_type; int inherited = 0; intern = emalloc(sizeof(spl_array_object)); memset(intern, 0, sizeof(spl_array_object)); *obj = intern; ALLOC_INIT_ZVAL(intern->retval); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); intern->ar_flags = 0; intern->debug_info = NULL; intern->ce_get_iterator = spl_ce_ArrayIterator; if (orig) { spl_array_object *other = (spl_array_object*)zend_object_store_get_object(orig TSRMLS_CC); intern->ar_flags &= ~ SPL_ARRAY_CLONE_MASK; intern->ar_flags |= (other->ar_flags & SPL_ARRAY_CLONE_MASK); intern->ce_get_iterator = other->ce_get_iterator; if (clone_orig) { intern->array = other->array; if (Z_OBJ_HT_P(orig) == &spl_handler_ArrayObject) { MAKE_STD_ZVAL(intern->array); array_init(intern->array); zend_hash_copy(HASH_OF(intern->array), HASH_OF(other->array), (copy_ctor_func_t) zval_add_ref, &tmp, sizeof(zval*)); } if (Z_OBJ_HT_P(orig) == &spl_handler_ArrayIterator) { Z_ADDREF_P(other->array); } } else { intern->array = orig; Z_ADDREF_P(intern->array); intern->ar_flags |= SPL_ARRAY_IS_REF | SPL_ARRAY_USE_OTHER; } } else { MAKE_STD_ZVAL(intern->array); array_init(intern->array); intern->ar_flags &= ~SPL_ARRAY_IS_REF; } retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t)zend_objects_destroy_object, (zend_objects_free_object_storage_t) spl_array_object_free_storage, NULL TSRMLS_CC); while (parent) { if (parent == spl_ce_ArrayIterator || parent == spl_ce_RecursiveArrayIterator) { retval.handlers = &spl_handler_ArrayIterator; class_type->get_iterator = spl_array_get_iterator; break; } else if (parent == spl_ce_ArrayObject) { retval.handlers = &spl_handler_ArrayObject; break; } parent = parent->parent; inherited = 1; } if (!parent) { /* this must never happen */ php_error_docref(NULL TSRMLS_CC, E_COMPILE_ERROR, "Internal compiler error, Class is not child of ArrayObject or ArrayIterator"); } if (inherited) { zend_hash_find(&class_type->function_table, "offsetget", sizeof("offsetget"), (void **) &intern->fptr_offset_get); if (intern->fptr_offset_get->common.scope == parent) { intern->fptr_offset_get = NULL; } zend_hash_find(&class_type->function_table, "offsetset", sizeof("offsetset"), (void **) &intern->fptr_offset_set); if (intern->fptr_offset_set->common.scope == parent) { intern->fptr_offset_set = NULL; } zend_hash_find(&class_type->function_table, "offsetexists", sizeof("offsetexists"), (void **) &intern->fptr_offset_has); if (intern->fptr_offset_has->common.scope == parent) { intern->fptr_offset_has = NULL; } zend_hash_find(&class_type->function_table, "offsetunset", sizeof("offsetunset"), (void **) &intern->fptr_offset_del); if (intern->fptr_offset_del->common.scope == parent) { intern->fptr_offset_del = NULL; } zend_hash_find(&class_type->function_table, "count", sizeof("count"), (void **) &intern->fptr_count); if (intern->fptr_count->common.scope == parent) { intern->fptr_count = NULL; } } /* Cache iterator functions if ArrayIterator or derived. Check current's */ /* cache since only current is always required */ if (retval.handlers == &spl_handler_ArrayIterator) { if (!class_type->iterator_funcs.zf_current) { zend_hash_find(&class_type->function_table, "rewind", sizeof("rewind"), (void **) &class_type->iterator_funcs.zf_rewind); zend_hash_find(&class_type->function_table, "valid", sizeof("valid"), (void **) &class_type->iterator_funcs.zf_valid); zend_hash_find(&class_type->function_table, "key", sizeof("key"), (void **) &class_type->iterator_funcs.zf_key); zend_hash_find(&class_type->function_table, "current", sizeof("current"), (void **) &class_type->iterator_funcs.zf_current); zend_hash_find(&class_type->function_table, "next", sizeof("next"), (void **) &class_type->iterator_funcs.zf_next); } if (inherited) { if (class_type->iterator_funcs.zf_rewind->common.scope != parent) intern->ar_flags |= SPL_ARRAY_OVERLOADED_REWIND; if (class_type->iterator_funcs.zf_valid->common.scope != parent) intern->ar_flags |= SPL_ARRAY_OVERLOADED_VALID; if (class_type->iterator_funcs.zf_key->common.scope != parent) intern->ar_flags |= SPL_ARRAY_OVERLOADED_KEY; if (class_type->iterator_funcs.zf_current->common.scope != parent) intern->ar_flags |= SPL_ARRAY_OVERLOADED_CURRENT; if (class_type->iterator_funcs.zf_next->common.scope != parent) intern->ar_flags |= SPL_ARRAY_OVERLOADED_NEXT; } } spl_array_rewind(intern TSRMLS_CC); return retval; } /* }}} */ /* {{{ spl_array_object_new */ static zend_object_value spl_array_object_new(zend_class_entry *class_type TSRMLS_DC) { spl_array_object *tmp; return spl_array_object_new_ex(class_type, &tmp, NULL, 0 TSRMLS_CC); } /* }}} */ /* {{{ spl_array_object_clone */ static zend_object_value spl_array_object_clone(zval *zobject TSRMLS_DC) { zend_object_value new_obj_val; zend_object *old_object; zend_object *new_object; zend_object_handle handle = Z_OBJ_HANDLE_P(zobject); spl_array_object *intern; old_object = zend_objects_get_address(zobject TSRMLS_CC); new_obj_val = spl_array_object_new_ex(old_object->ce, &intern, zobject, 1 TSRMLS_CC); new_object = &intern->std; zend_objects_clone_members(new_object, new_obj_val, old_object, handle TSRMLS_CC); return new_obj_val; } /* }}} */ static zval **spl_array_get_dimension_ptr_ptr(int check_inherited, zval *object, zval *offset, int type TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); zval **retval; char *key; uint len; long index; HashTable *ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (!offset) { return &EG(uninitialized_zval_ptr); } if ((type == BP_VAR_W || type == BP_VAR_RW) && (ht->nApplyCount > 0)) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return &EG(error_zval_ptr);; } switch (Z_TYPE_P(offset)) { case IS_STRING: key = Z_STRVAL_P(offset); len = Z_STRLEN_P(offset) + 1; string_offest: if (zend_symtable_find(ht, key, len, (void **) &retval) == FAILURE) { switch (type) { case BP_VAR_R: zend_error(E_NOTICE, "Undefined index: %s", key); case BP_VAR_UNSET: case BP_VAR_IS: retval = &EG(uninitialized_zval_ptr); break; case BP_VAR_RW: zend_error(E_NOTICE,"Undefined index: %s", key); case BP_VAR_W: { zval *value; ALLOC_INIT_ZVAL(value); zend_symtable_update(ht, key, len, (void**)&value, sizeof(void*), (void **)&retval); } } } return retval; case IS_NULL: key = ""; len = 1; goto string_offest; case IS_RESOURCE: zend_error(E_STRICT, "Resource ID#%ld used as offset, casting to integer (%ld)", Z_LVAL_P(offset), Z_LVAL_P(offset)); case IS_DOUBLE: case IS_BOOL: case IS_LONG: if (offset->type == IS_DOUBLE) { index = (long)Z_DVAL_P(offset); } else { index = Z_LVAL_P(offset); } if (zend_hash_index_find(ht, index, (void **) &retval) == FAILURE) { switch (type) { case BP_VAR_R: zend_error(E_NOTICE, "Undefined offset: %ld", index); case BP_VAR_UNSET: case BP_VAR_IS: retval = &EG(uninitialized_zval_ptr); break; case BP_VAR_RW: zend_error(E_NOTICE, "Undefined offset: %ld", index); case BP_VAR_W: { zval *value; ALLOC_INIT_ZVAL(value); zend_hash_index_update(ht, index, (void**)&value, sizeof(void*), (void **)&retval); } } } return retval; default: zend_error(E_WARNING, "Illegal offset type"); return (type == BP_VAR_W || type == BP_VAR_RW) ? &EG(error_zval_ptr) : &EG(uninitialized_zval_ptr); } } /* }}} */ static zval *spl_array_read_dimension_ex(int check_inherited, zval *object, zval *offset, int type TSRMLS_DC) /* {{{ */ { zval **ret; if (check_inherited) { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (intern->fptr_offset_get) { zval *rv; if (!offset) { ALLOC_INIT_ZVAL(offset); } else { SEPARATE_ARG_IF_REF(offset); } zend_call_method_with_1_params(&object, Z_OBJCE_P(object), &intern->fptr_offset_get, "offsetGet", &rv, offset); zval_ptr_dtor(&offset); if (rv) { zval_ptr_dtor(&intern->retval); MAKE_STD_ZVAL(intern->retval); ZVAL_ZVAL(intern->retval, rv, 1, 1); return intern->retval; } return EG(uninitialized_zval_ptr); } } ret = spl_array_get_dimension_ptr_ptr(check_inherited, object, offset, type TSRMLS_CC); /* When in a write context, * ZE has to be fooled into thinking this is in a reference set * by separating (if necessary) and returning as an is_ref=1 zval (even if refcount == 1) */ if ((type == BP_VAR_W || type == BP_VAR_RW || type == BP_VAR_UNSET) && !Z_ISREF_PP(ret) && ret != &EG(uninitialized_zval_ptr)) { if (Z_REFCOUNT_PP(ret) > 1) { zval *newval; /* Separate */ MAKE_STD_ZVAL(newval); *newval = **ret; zval_copy_ctor(newval); Z_SET_REFCOUNT_P(newval, 1); /* Replace */ Z_DELREF_PP(ret); *ret = newval; } Z_SET_ISREF_PP(ret); } return *ret; } /* }}} */ static zval *spl_array_read_dimension(zval *object, zval *offset, int type TSRMLS_DC) /* {{{ */ { return spl_array_read_dimension_ex(1, object, offset, type TSRMLS_CC); } /* }}} */ static void spl_array_write_dimension_ex(int check_inherited, zval *object, zval *offset, zval *value TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); long index; HashTable *ht; if (check_inherited && intern->fptr_offset_set) { if (!offset) { ALLOC_INIT_ZVAL(offset); } else { SEPARATE_ARG_IF_REF(offset); } zend_call_method_with_2_params(&object, Z_OBJCE_P(object), &intern->fptr_offset_set, "offsetSet", NULL, offset, value); zval_ptr_dtor(&offset); return; } if (!offset) { ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (ht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } Z_ADDREF_P(value); zend_hash_next_index_insert(ht, (void**)&value, sizeof(void*), NULL); return; } switch(Z_TYPE_P(offset)) { case IS_STRING: ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (ht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } Z_ADDREF_P(value); zend_symtable_update(ht, Z_STRVAL_P(offset), Z_STRLEN_P(offset)+1, (void**)&value, sizeof(void*), NULL); return; case IS_DOUBLE: case IS_RESOURCE: case IS_BOOL: case IS_LONG: ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (ht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } if (offset->type == IS_DOUBLE) { index = (long)Z_DVAL_P(offset); } else { index = Z_LVAL_P(offset); } Z_ADDREF_P(value); zend_hash_index_update(ht, index, (void**)&value, sizeof(void*), NULL); return; case IS_NULL: ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (ht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } Z_ADDREF_P(value); zend_hash_next_index_insert(ht, (void**)&value, sizeof(void*), NULL); return; default: zend_error(E_WARNING, "Illegal offset type"); return; } } /* }}} */ static void spl_array_write_dimension(zval *object, zval *offset, zval *value TSRMLS_DC) /* {{{ */ { spl_array_write_dimension_ex(1, object, offset, value TSRMLS_CC); } /* }}} */ static void spl_array_unset_dimension_ex(int check_inherited, zval *object, zval *offset TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); long index; HashTable *ht; if (check_inherited && intern->fptr_offset_del) { SEPARATE_ARG_IF_REF(offset); zend_call_method_with_1_params(&object, Z_OBJCE_P(object), &intern->fptr_offset_del, "offsetUnset", NULL, offset); zval_ptr_dtor(&offset); return; } switch(Z_TYPE_P(offset)) { case IS_STRING: ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (ht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } if (ht == &EG(symbol_table)) { if (zend_delete_global_variable(Z_STRVAL_P(offset), Z_STRLEN_P(offset) TSRMLS_CC)) { zend_error(E_NOTICE,"Undefined index: %s", Z_STRVAL_P(offset)); } } else { if (zend_symtable_del(ht, Z_STRVAL_P(offset), Z_STRLEN_P(offset)+1) == FAILURE) { zend_error(E_NOTICE,"Undefined index: %s", Z_STRVAL_P(offset)); } else { spl_array_object *obj = intern; while (1) { if ((obj->ar_flags & SPL_ARRAY_IS_SELF) != 0) { break; } else if (Z_TYPE_P(obj->array) == IS_OBJECT) { if ((obj->ar_flags & SPL_ARRAY_USE_OTHER) == 0) { obj = (spl_array_object*)zend_object_store_get_object(obj->array TSRMLS_CC); break; } else { obj = (spl_array_object*)zend_object_store_get_object(obj->array TSRMLS_CC); } } else { obj = NULL; break; } } if (obj) { zend_property_info *property_info = zend_get_property_info(obj->std.ce, offset, 1 TSRMLS_CC); if (property_info && (property_info->flags & ZEND_ACC_STATIC) == 0 && property_info->offset >= 0) { obj->std.properties_table[property_info->offset] = NULL; } } } } break; case IS_DOUBLE: case IS_RESOURCE: case IS_BOOL: case IS_LONG: if (offset->type == IS_DOUBLE) { index = (long)Z_DVAL_P(offset); } else { index = Z_LVAL_P(offset); } ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (ht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } if (zend_hash_index_del(ht, index) == FAILURE) { zend_error(E_NOTICE,"Undefined offset: %ld", Z_LVAL_P(offset)); } break; default: zend_error(E_WARNING, "Illegal offset type"); return; } spl_hash_verify_pos(intern TSRMLS_CC); /* call rewind on FAILURE */ } /* }}} */ static void spl_array_unset_dimension(zval *object, zval *offset TSRMLS_DC) /* {{{ */ { spl_array_unset_dimension_ex(1, object, offset TSRMLS_CC); } /* }}} */ static int spl_array_has_dimension_ex(int check_inherited, zval *object, zval *offset, int check_empty TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); long index; zval *rv, *value = NULL, **tmp; if (check_inherited && intern->fptr_offset_has) { zval *offset_tmp = offset; SEPARATE_ARG_IF_REF(offset_tmp); zend_call_method_with_1_params(&object, Z_OBJCE_P(object), &intern->fptr_offset_has, "offsetExists", &rv, offset_tmp); zval_ptr_dtor(&offset_tmp); if (rv && zend_is_true(rv)) { zval_ptr_dtor(&rv); if (check_empty != 1) { return 1; } else if (intern->fptr_offset_get) { value = spl_array_read_dimension_ex(1, object, offset, BP_VAR_R TSRMLS_CC); } } else { if (rv) { zval_ptr_dtor(&rv); } return 0; } } if (!value) { HashTable *ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); switch(Z_TYPE_P(offset)) { case IS_STRING: if (zend_symtable_find(ht, Z_STRVAL_P(offset), Z_STRLEN_P(offset)+1, (void **) &tmp) != FAILURE) { if (check_empty == 2) { return 1; } } else { return 0; } break; case IS_DOUBLE: case IS_RESOURCE: case IS_BOOL: case IS_LONG: if (offset->type == IS_DOUBLE) { index = (long)Z_DVAL_P(offset); } else { index = Z_LVAL_P(offset); } if (zend_hash_index_find(ht, index, (void **)&tmp) != FAILURE) { if (check_empty == 2) { return 1; } } else { return 0; } break; default: zend_error(E_WARNING, "Illegal offset type"); return 0; } if (check_empty && check_inherited && intern->fptr_offset_get) { value = spl_array_read_dimension_ex(1, object, offset, BP_VAR_R TSRMLS_CC); } else { value = *tmp; } } return check_empty ? zend_is_true(value) : Z_TYPE_P(value) != IS_NULL; } /* }}} */ static int spl_array_has_dimension(zval *object, zval *offset, int check_empty TSRMLS_DC) /* {{{ */ { return spl_array_has_dimension_ex(1, object, offset, check_empty TSRMLS_CC); } /* }}} */ /* {{{ spl_array_object_verify_pos_ex */ static inline int spl_array_object_verify_pos_ex(spl_array_object *object, HashTable *ht, const char *msg_prefix TSRMLS_DC) { if (!ht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "%sArray was modified outside object and is no longer an array", msg_prefix); return FAILURE; } if (object->pos && (object->ar_flags & SPL_ARRAY_IS_REF) && spl_hash_verify_pos_ex(object, ht TSRMLS_CC) == FAILURE) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "%sArray was modified outside object and internal position is no longer valid", msg_prefix); return FAILURE; } return SUCCESS; } /* }}} */ /* {{{ spl_array_object_verify_pos */ static inline int spl_array_object_verify_pos(spl_array_object *object, HashTable *ht TSRMLS_DC) { return spl_array_object_verify_pos_ex(object, ht, "" TSRMLS_CC); } /* }}} */ /* {{{ proto bool ArrayObject::offsetExists(mixed $index) proto bool ArrayIterator::offsetExists(mixed $index) Returns whether the requested $index exists. */ SPL_METHOD(Array, offsetExists) { zval *index; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z", &index) == FAILURE) { return; } RETURN_BOOL(spl_array_has_dimension_ex(0, getThis(), index, 2 TSRMLS_CC)); } /* }}} */ /* {{{ proto mixed ArrayObject::offsetGet(mixed $index) proto mixed ArrayIterator::offsetGet(mixed $index) Returns the value at the specified $index. */ SPL_METHOD(Array, offsetGet) { zval *index, *value; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z", &index) == FAILURE) { return; } value = spl_array_read_dimension_ex(0, getThis(), index, BP_VAR_R TSRMLS_CC); RETURN_ZVAL(value, 1, 0); } /* }}} */ /* {{{ proto void ArrayObject::offsetSet(mixed $index, mixed $newval) proto void ArrayIterator::offsetSet(mixed $index, mixed $newval) Sets the value at the specified $index to $newval. */ SPL_METHOD(Array, offsetSet) { zval *index, *value; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "zz", &index, &value) == FAILURE) { return; } spl_array_write_dimension_ex(0, getThis(), index, value TSRMLS_CC); } /* }}} */ void spl_array_iterator_append(zval *object, zval *append_value TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array"); return; } if (Z_TYPE_P(intern->array) == IS_OBJECT) { php_error_docref(NULL TSRMLS_CC, E_RECOVERABLE_ERROR, "Cannot append properties to objects, use %s::offsetSet() instead", Z_OBJCE_P(object)->name); return; } spl_array_write_dimension(object, NULL, append_value TSRMLS_CC); if (!intern->pos) { spl_array_set_pos(intern, aht->pListTail); } } /* }}} */ /* {{{ proto void ArrayObject::append(mixed $newval) proto void ArrayIterator::append(mixed $newval) Appends the value (cannot be called for objects). */ SPL_METHOD(Array, append) { zval *value; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z", &value) == FAILURE) { return; } spl_array_iterator_append(getThis(), value TSRMLS_CC); } /* }}} */ /* {{{ proto void ArrayObject::offsetUnset(mixed $index) proto void ArrayIterator::offsetUnset(mixed $index) Unsets the value at the specified $index. */ SPL_METHOD(Array, offsetUnset) { zval *index; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z", &index) == FAILURE) { return; } spl_array_unset_dimension_ex(0, getThis(), index TSRMLS_CC); } /* }}} */ /* {{{ proto array ArrayObject::getArrayCopy() proto array ArrayIterator::getArrayCopy() Return a copy of the contained array */ SPL_METHOD(Array, getArrayCopy) { zval *object = getThis(), *tmp; spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); array_init(return_value); zend_hash_copy(HASH_OF(return_value), spl_array_get_hash_table(intern, 0 TSRMLS_CC), (copy_ctor_func_t) zval_add_ref, &tmp, sizeof(zval*)); } /* }}} */ static HashTable *spl_array_get_properties(zval *object TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *result; if (intern->nApplyCount > 1) { php_error_docref(NULL TSRMLS_CC, E_ERROR, "Nesting level too deep - recursive dependency?"); } intern->nApplyCount++; result = spl_array_get_hash_table(intern, 1 TSRMLS_CC); intern->nApplyCount--; return result; } /* }}} */ static HashTable* spl_array_get_debug_info(zval *obj, int *is_temp TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(obj TSRMLS_CC); zval *tmp, *storage; int name_len; char *zname; zend_class_entry *base; *is_temp = 0; if (!intern->std.properties) { rebuild_object_properties(&intern->std); } if (HASH_OF(intern->array) == intern->std.properties) { return intern->std.properties; } else { if (intern->debug_info == NULL) { ALLOC_HASHTABLE(intern->debug_info); ZEND_INIT_SYMTABLE_EX(intern->debug_info, zend_hash_num_elements(intern->std.properties) + 1, 0); } if (intern->debug_info->nApplyCount == 0) { zend_hash_clean(intern->debug_info); zend_hash_copy(intern->debug_info, intern->std.properties, (copy_ctor_func_t) zval_add_ref, (void *) &tmp, sizeof(zval *)); storage = intern->array; zval_add_ref(&storage); base = (Z_OBJ_HT_P(obj) == &spl_handler_ArrayIterator) ? spl_ce_ArrayIterator : spl_ce_ArrayObject; zname = spl_gen_private_prop_name(base, "storage", sizeof("storage")-1, &name_len TSRMLS_CC); zend_symtable_update(intern->debug_info, zname, name_len+1, &storage, sizeof(zval *), NULL); efree(zname); } return intern->debug_info; } } /* }}} */ static HashTable *spl_array_get_gc(zval *object, zval ***gc_data, int *gc_data_count TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); *gc_data = &intern->array; *gc_data_count = 1; return zend_std_get_properties(object TSRMLS_CC); } /* }}} */ static zval *spl_array_read_property(zval *object, zval *member, int type, const zend_literal *key TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if ((intern->ar_flags & SPL_ARRAY_ARRAY_AS_PROPS) != 0 && !std_object_handlers.has_property(object, member, 2, key TSRMLS_CC)) { return spl_array_read_dimension(object, member, type TSRMLS_CC); } return std_object_handlers.read_property(object, member, type, key TSRMLS_CC); } /* }}} */ static void spl_array_write_property(zval *object, zval *member, zval *value, const zend_literal *key TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if ((intern->ar_flags & SPL_ARRAY_ARRAY_AS_PROPS) != 0 && !std_object_handlers.has_property(object, member, 2, key TSRMLS_CC)) { spl_array_write_dimension(object, member, value TSRMLS_CC); return; } std_object_handlers.write_property(object, member, value, key TSRMLS_CC); } /* }}} */ static zval **spl_array_get_property_ptr_ptr(zval *object, zval *member, int type, const zend_literal *key TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if ((intern->ar_flags & SPL_ARRAY_ARRAY_AS_PROPS) != 0 && !std_object_handlers.has_property(object, member, 2, key TSRMLS_CC)) { return spl_array_get_dimension_ptr_ptr(1, object, member, type TSRMLS_CC); } return std_object_handlers.get_property_ptr_ptr(object, member, type, key TSRMLS_CC); } /* }}} */ static int spl_array_has_property(zval *object, zval *member, int has_set_exists, const zend_literal *key TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if ((intern->ar_flags & SPL_ARRAY_ARRAY_AS_PROPS) != 0 && !std_object_handlers.has_property(object, member, 2, key TSRMLS_CC)) { return spl_array_has_dimension(object, member, has_set_exists TSRMLS_CC); } return std_object_handlers.has_property(object, member, has_set_exists, key TSRMLS_CC); } /* }}} */ static void spl_array_unset_property(zval *object, zval *member, const zend_literal *key TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if ((intern->ar_flags & SPL_ARRAY_ARRAY_AS_PROPS) != 0 && !std_object_handlers.has_property(object, member, 2, key TSRMLS_CC)) { spl_array_unset_dimension(object, member TSRMLS_CC); spl_array_rewind(intern TSRMLS_CC); /* because deletion might invalidate position */ return; } std_object_handlers.unset_property(object, member, key TSRMLS_CC); } /* }}} */ static int spl_array_compare_objects(zval *o1, zval *o2 TSRMLS_DC) /* {{{ */ { HashTable *ht1, *ht2; spl_array_object *intern1, *intern2; int result = 0; zval temp_zv; intern1 = (spl_array_object*)zend_object_store_get_object(o1 TSRMLS_CC); intern2 = (spl_array_object*)zend_object_store_get_object(o2 TSRMLS_CC); ht1 = spl_array_get_hash_table(intern1, 0 TSRMLS_CC); ht2 = spl_array_get_hash_table(intern2, 0 TSRMLS_CC); zend_compare_symbol_tables(&temp_zv, ht1, ht2 TSRMLS_CC); result = (int)Z_LVAL(temp_zv); /* if we just compared std.properties, don't do it again */ if (result == 0 && !(ht1 == intern1->std.properties && ht2 == intern2->std.properties)) { result = std_object_handlers.compare_objects(o1, o2 TSRMLS_CC); } return result; } /* }}} */ static int spl_array_skip_protected(spl_array_object *intern, HashTable *aht TSRMLS_DC) /* {{{ */ { char *string_key; uint string_length; ulong num_key; if (Z_TYPE_P(intern->array) == IS_OBJECT) { do { if (zend_hash_get_current_key_ex(aht, &string_key, &string_length, &num_key, 0, &intern->pos) == HASH_KEY_IS_STRING) { /* zend_hash_get_current_key_ex() should never set * string_length to 0 when returning HASH_KEY_IS_STRING, but we * may as well be defensive and consider that successful. * Beyond that, we're looking for protected keys (which will * have a null byte at string_key[0]), but want to avoid * skipping completely empty keys (which will also have the * null byte, but a string_length of 1). */ if (!string_length || string_key[0] || string_length == 1) { return SUCCESS; } } else { return SUCCESS; } if (zend_hash_has_more_elements_ex(aht, &intern->pos) != SUCCESS) { return FAILURE; } zend_hash_move_forward_ex(aht, &intern->pos); spl_array_update_pos(intern); } while (1); } return FAILURE; } /* }}} */ static int spl_array_next_no_verify(spl_array_object *intern, HashTable *aht TSRMLS_DC) /* {{{ */ { zend_hash_move_forward_ex(aht, &intern->pos); spl_array_update_pos(intern); if (Z_TYPE_P(intern->array) == IS_OBJECT) { return spl_array_skip_protected(intern, aht TSRMLS_CC); } else { return zend_hash_has_more_elements_ex(aht, &intern->pos); } } /* }}} */ static int spl_array_next_ex(spl_array_object *intern, HashTable *aht TSRMLS_DC) /* {{{ */ { if ((intern->ar_flags & SPL_ARRAY_IS_REF) && spl_hash_verify_pos_ex(intern, aht TSRMLS_CC) == FAILURE) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and internal position is no longer valid"); return FAILURE; } return spl_array_next_no_verify(intern, aht TSRMLS_CC); } /* }}} */ static int spl_array_next(spl_array_object *intern TSRMLS_DC) /* {{{ */ { HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); return spl_array_next_ex(intern, aht TSRMLS_CC); } /* }}} */ /* define an overloaded iterator structure */ typedef struct { zend_user_iterator intern; spl_array_object *object; } spl_array_it; static void spl_array_it_dtor(zend_object_iterator *iter TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; zend_user_it_invalidate_current(iter TSRMLS_CC); zval_ptr_dtor((zval**)&iterator->intern.it.data); efree(iterator); } /* }}} */ static int spl_array_it_valid(zend_object_iterator *iter TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; spl_array_object *object = iterator->object; HashTable *aht = spl_array_get_hash_table(object, 0 TSRMLS_CC); if (object->ar_flags & SPL_ARRAY_OVERLOADED_VALID) { return zend_user_it_valid(iter TSRMLS_CC); } else { if (spl_array_object_verify_pos_ex(object, aht, "ArrayIterator::valid(): " TSRMLS_CC) == FAILURE) { return FAILURE; } return zend_hash_has_more_elements_ex(aht, &object->pos); } } /* }}} */ static void spl_array_it_get_current_data(zend_object_iterator *iter, zval ***data TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; spl_array_object *object = iterator->object; HashTable *aht = spl_array_get_hash_table(object, 0 TSRMLS_CC); if (object->ar_flags & SPL_ARRAY_OVERLOADED_CURRENT) { zend_user_it_get_current_data(iter, data TSRMLS_CC); } else { if (zend_hash_get_current_data_ex(aht, (void**)data, &object->pos) == FAILURE) { *data = NULL; } } } /* }}} */ static void spl_array_it_get_current_key(zend_object_iterator *iter, zval *key TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; spl_array_object *object = iterator->object; HashTable *aht = spl_array_get_hash_table(object, 0 TSRMLS_CC); if (object->ar_flags & SPL_ARRAY_OVERLOADED_KEY) { zend_user_it_get_current_key(iter, key TSRMLS_CC); } else { if (spl_array_object_verify_pos_ex(object, aht, "ArrayIterator::current(): " TSRMLS_CC) == FAILURE) { ZVAL_NULL(key); } else { zend_hash_get_current_key_zval_ex(aht, key, &object->pos); } } } /* }}} */ static void spl_array_it_move_forward(zend_object_iterator *iter TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; spl_array_object *object = iterator->object; HashTable *aht = spl_array_get_hash_table(object, 0 TSRMLS_CC); if (object->ar_flags & SPL_ARRAY_OVERLOADED_NEXT) { zend_user_it_move_forward(iter TSRMLS_CC); } else { zend_user_it_invalidate_current(iter TSRMLS_CC); if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "ArrayIterator::current(): Array was modified outside object and is no longer an array"); return; } if ((object->ar_flags & SPL_ARRAY_IS_REF) && spl_hash_verify_pos_ex(object, aht TSRMLS_CC) == FAILURE) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "ArrayIterator::next(): Array was modified outside object and internal position is no longer valid"); } else { spl_array_next_no_verify(object, aht TSRMLS_CC); } } } /* }}} */ static void spl_array_rewind_ex(spl_array_object *intern, HashTable *aht TSRMLS_DC) /* {{{ */ { zend_hash_internal_pointer_reset_ex(aht, &intern->pos); spl_array_update_pos(intern); spl_array_skip_protected(intern, aht TSRMLS_CC); } /* }}} */ static void spl_array_rewind(spl_array_object *intern TSRMLS_DC) /* {{{ */ { HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "ArrayIterator::rewind(): Array was modified outside object and is no longer an array"); return; } spl_array_rewind_ex(intern, aht TSRMLS_CC); } /* }}} */ static void spl_array_it_rewind(zend_object_iterator *iter TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; spl_array_object *object = iterator->object; if (object->ar_flags & SPL_ARRAY_OVERLOADED_REWIND) { zend_user_it_rewind(iter TSRMLS_CC); } else { zend_user_it_invalidate_current(iter TSRMLS_CC); spl_array_rewind(object TSRMLS_CC); } } /* }}} */ /* {{{ spl_array_set_array */ static void spl_array_set_array(zval *object, spl_array_object *intern, zval **array, long ar_flags, int just_array TSRMLS_DC) { if (Z_TYPE_PP(array) == IS_ARRAY) { SEPARATE_ZVAL_IF_NOT_REF(array); } if (Z_TYPE_PP(array) == IS_OBJECT && (Z_OBJ_HT_PP(array) == &spl_handler_ArrayObject || Z_OBJ_HT_PP(array) == &spl_handler_ArrayIterator)) { zval_ptr_dtor(&intern->array); if (just_array) { spl_array_object *other = (spl_array_object*)zend_object_store_get_object(*array TSRMLS_CC); ar_flags = other->ar_flags & ~SPL_ARRAY_INT_MASK; } ar_flags |= SPL_ARRAY_USE_OTHER; intern->array = *array; } else { if (Z_TYPE_PP(array) != IS_OBJECT && Z_TYPE_PP(array) != IS_ARRAY) { zend_throw_exception(spl_ce_InvalidArgumentException, "Passed variable is not an array or object, using empty array instead", 0 TSRMLS_CC); return; } zval_ptr_dtor(&intern->array); intern->array = *array; } if (object == *array) { intern->ar_flags |= SPL_ARRAY_IS_SELF; intern->ar_flags &= ~SPL_ARRAY_USE_OTHER; } else { intern->ar_flags &= ~SPL_ARRAY_IS_SELF; } intern->ar_flags |= ar_flags; Z_ADDREF_P(intern->array); if (Z_TYPE_PP(array) == IS_OBJECT) { zend_object_get_properties_t handler = Z_OBJ_HANDLER_PP(array, get_properties); if ((handler != std_object_handlers.get_properties && handler != spl_array_get_properties) || !spl_array_get_hash_table(intern, 0 TSRMLS_CC)) { zend_throw_exception_ex(spl_ce_InvalidArgumentException, 0 TSRMLS_CC, "Overloaded object of type %s is not compatible with %s", Z_OBJCE_PP(array)->name, intern->std.ce->name); } } spl_array_rewind(intern TSRMLS_CC); } /* }}} */ /* iterator handler table */ zend_object_iterator_funcs spl_array_it_funcs = { spl_array_it_dtor, spl_array_it_valid, spl_array_it_get_current_data, spl_array_it_get_current_key, spl_array_it_move_forward, spl_array_it_rewind }; zend_object_iterator *spl_array_get_iterator(zend_class_entry *ce, zval *object, int by_ref TSRMLS_DC) /* {{{ */ { spl_array_it *iterator; spl_array_object *array_object = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (by_ref && (array_object->ar_flags & SPL_ARRAY_OVERLOADED_CURRENT)) { zend_error(E_ERROR, "An iterator cannot be used with foreach by reference"); } iterator = emalloc(sizeof(spl_array_it)); Z_ADDREF_P(object); iterator->intern.it.data = (void*)object; iterator->intern.it.funcs = &spl_array_it_funcs; iterator->intern.ce = ce; iterator->intern.value = NULL; iterator->object = array_object; return (zend_object_iterator*)iterator; } /* }}} */ /* {{{ proto void ArrayObject::__construct(array|object ar = array() [, int flags = 0 [, string iterator_class = "ArrayIterator"]]) proto void ArrayIterator::__construct(array|object ar = array() [, int flags = 0]) Constructs a new array iterator from a path. */ SPL_METHOD(Array, __construct) { zval *object = getThis(); spl_array_object *intern; zval **array; long ar_flags = 0; zend_class_entry *ce_get_iterator = spl_ce_Iterator; zend_error_handling error_handling; if (ZEND_NUM_ARGS() == 0) { return; /* nothing to do */ } zend_replace_error_handling(EH_THROW, spl_ce_InvalidArgumentException, &error_handling TSRMLS_CC); intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "Z|lC", &array, &ar_flags, &ce_get_iterator) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } if (ZEND_NUM_ARGS() > 2) { intern->ce_get_iterator = ce_get_iterator; } ar_flags &= ~SPL_ARRAY_INT_MASK; spl_array_set_array(object, intern, array, ar_flags, ZEND_NUM_ARGS() == 1 TSRMLS_CC); zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ proto void ArrayObject::setIteratorClass(string iterator_class) Set the class used in getIterator. */ SPL_METHOD(Array, setIteratorClass) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); zend_class_entry * ce_get_iterator = spl_ce_Iterator; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "C", &ce_get_iterator) == FAILURE) { return; } intern->ce_get_iterator = ce_get_iterator; } /* }}} */ /* {{{ proto string ArrayObject::getIteratorClass() Get the class used in getIterator. */ SPL_METHOD(Array, getIteratorClass) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_STRING(intern->ce_get_iterator->name, 1); } /* }}} */ /* {{{ proto int ArrayObject::getFlags() Get flags */ SPL_METHOD(Array, getFlags) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_LONG(intern->ar_flags & ~SPL_ARRAY_INT_MASK); } /* }}} */ /* {{{ proto void ArrayObject::setFlags(int flags) Set flags */ SPL_METHOD(Array, setFlags) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); long ar_flags = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &ar_flags) == FAILURE) { return; } intern->ar_flags = (intern->ar_flags & SPL_ARRAY_INT_MASK) | (ar_flags & ~SPL_ARRAY_INT_MASK); } /* }}} */ /* {{{ proto Array|Object ArrayObject::exchangeArray(Array|Object ar = array()) Replace the referenced array or object with a new one and return the old one (right now copy - to be changed) */ SPL_METHOD(Array, exchangeArray) { zval *object = getThis(), *tmp, **array; spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); array_init(return_value); zend_hash_copy(HASH_OF(return_value), spl_array_get_hash_table(intern, 0 TSRMLS_CC), (copy_ctor_func_t) zval_add_ref, &tmp, sizeof(zval*)); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "Z", &array) == FAILURE) { return; } spl_array_set_array(object, intern, array, 0L, 1 TSRMLS_CC); } /* }}} */ /* {{{ proto ArrayIterator ArrayObject::getIterator() Create a new iterator from a ArrayObject instance */ SPL_METHOD(Array, getIterator) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); spl_array_object *iterator; HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array"); return; } return_value->type = IS_OBJECT; return_value->value.obj = spl_array_object_new_ex(intern->ce_get_iterator, &iterator, object, 0 TSRMLS_CC); Z_SET_REFCOUNT_P(return_value, 1); Z_SET_ISREF_P(return_value); } /* }}} */ /* {{{ proto void ArrayIterator::rewind() Rewind array back to the start */ SPL_METHOD(Array, rewind) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } spl_array_rewind(intern TSRMLS_CC); } /* }}} */ /* {{{ proto void ArrayIterator::seek(int $position) Seek to position. */ SPL_METHOD(Array, seek) { long opos, position; zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); int result; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &position) == FAILURE) { return; } if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array"); return; } opos = position; if (position >= 0) { /* negative values are not supported */ spl_array_rewind(intern TSRMLS_CC); result = SUCCESS; while (position-- > 0 && (result = spl_array_next(intern TSRMLS_CC)) == SUCCESS); if (result == SUCCESS && zend_hash_has_more_elements_ex(aht, &intern->pos) == SUCCESS) { return; /* ok */ } } zend_throw_exception_ex(spl_ce_OutOfBoundsException, 0 TSRMLS_CC, "Seek position %ld is out of range", opos); } /* }}} */ int static spl_array_object_count_elements_helper(spl_array_object *intern, long *count TSRMLS_DC) /* {{{ */ { HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); HashPosition pos; if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array"); *count = 0; return FAILURE; } if (Z_TYPE_P(intern->array) == IS_OBJECT) { /* We need to store the 'pos' since we'll modify it in the functions * we're going to call and which do not support 'pos' as parameter. */ pos = intern->pos; *count = 0; spl_array_rewind(intern TSRMLS_CC); while(intern->pos && spl_array_next(intern TSRMLS_CC) == SUCCESS) { (*count)++; } spl_array_set_pos(intern, pos); return SUCCESS; } else { *count = zend_hash_num_elements(aht); return SUCCESS; } } /* }}} */ int spl_array_object_count_elements(zval *object, long *count TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (intern->fptr_count) { zval *rv; zend_call_method_with_0_params(&object, intern->std.ce, &intern->fptr_count, "count", &rv); if (rv) { zval_ptr_dtor(&intern->retval); MAKE_STD_ZVAL(intern->retval); ZVAL_ZVAL(intern->retval, rv, 1, 1); convert_to_long(intern->retval); *count = (long) Z_LVAL_P(intern->retval); return SUCCESS; } *count = 0; return FAILURE; } return spl_array_object_count_elements_helper(intern, count TSRMLS_CC); } /* }}} */ /* {{{ proto int ArrayObject::count() proto int ArrayIterator::count() Return the number of elements in the Iterator. */ SPL_METHOD(Array, count) { long count; spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } spl_array_object_count_elements_helper(intern, &count TSRMLS_CC); RETURN_LONG(count); } /* }}} */ static void spl_array_method(INTERNAL_FUNCTION_PARAMETERS, char *fname, int fname_len, int use_arg) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(getThis() TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); zval *tmp, *arg = NULL; zval *retval_ptr = NULL; MAKE_STD_ZVAL(tmp); Z_TYPE_P(tmp) = IS_ARRAY; Z_ARRVAL_P(tmp) = aht; if (!use_arg) { aht->nApplyCount++; zend_call_method(NULL, NULL, NULL, fname, fname_len, &retval_ptr, 1, tmp, NULL TSRMLS_CC); aht->nApplyCount--; } else if (use_arg == SPL_ARRAY_METHOD_MAY_USER_ARG) { if (zend_parse_parameters_ex(ZEND_PARSE_PARAMS_QUIET, ZEND_NUM_ARGS() TSRMLS_CC, "|z", &arg) == FAILURE) { Z_TYPE_P(tmp) = IS_NULL; zval_ptr_dtor(&tmp); zend_throw_exception(spl_ce_BadMethodCallException, "Function expects one argument at most", 0 TSRMLS_CC); return; } aht->nApplyCount++; zend_call_method(NULL, NULL, NULL, fname, fname_len, &retval_ptr, arg? 2 : 1, tmp, arg TSRMLS_CC); aht->nApplyCount--; } else { if (ZEND_NUM_ARGS() != 1 || zend_parse_parameters_ex(ZEND_PARSE_PARAMS_QUIET, ZEND_NUM_ARGS() TSRMLS_CC, "z", &arg) == FAILURE) { Z_TYPE_P(tmp) = IS_NULL; zval_ptr_dtor(&tmp); zend_throw_exception(spl_ce_BadMethodCallException, "Function expects exactly one argument", 0 TSRMLS_CC); return; } aht->nApplyCount++; zend_call_method(NULL, NULL, NULL, fname, fname_len, &retval_ptr, 2, tmp, arg TSRMLS_CC); aht->nApplyCount--; } Z_TYPE_P(tmp) = IS_NULL; /* we want to destroy the zval, not the hashtable */ zval_ptr_dtor(&tmp); if (retval_ptr) { COPY_PZVAL_TO_ZVAL(*return_value, retval_ptr); } } /* }}} */ #define SPL_ARRAY_METHOD(cname, fname, use_arg) \ SPL_METHOD(cname, fname) \ { \ spl_array_method(INTERNAL_FUNCTION_PARAM_PASSTHRU, #fname, sizeof(#fname)-1, use_arg); \ } /* {{{ proto int ArrayObject::asort([int $sort_flags = SORT_REGULAR ]) proto int ArrayIterator::asort([int $sort_flags = SORT_REGULAR ]) Sort the entries by values. */ SPL_ARRAY_METHOD(Array, asort, SPL_ARRAY_METHOD_MAY_USER_ARG) /* }}} */ /* {{{ proto int ArrayObject::ksort([int $sort_flags = SORT_REGULAR ]) proto int ArrayIterator::ksort([int $sort_flags = SORT_REGULAR ]) Sort the entries by key. */ SPL_ARRAY_METHOD(Array, ksort, SPL_ARRAY_METHOD_MAY_USER_ARG) /* }}} */ /* {{{ proto int ArrayObject::uasort(callback cmp_function) proto int ArrayIterator::uasort(callback cmp_function) Sort the entries by values user defined function. */ SPL_ARRAY_METHOD(Array, uasort, SPL_ARRAY_METHOD_USE_ARG) /* }}} */ /* {{{ proto int ArrayObject::uksort(callback cmp_function) proto int ArrayIterator::uksort(callback cmp_function) Sort the entries by key using user defined function. */ SPL_ARRAY_METHOD(Array, uksort, SPL_ARRAY_METHOD_USE_ARG) /* }}} */ /* {{{ proto int ArrayObject::natsort() proto int ArrayIterator::natsort() Sort the entries by values using "natural order" algorithm. */ SPL_ARRAY_METHOD(Array, natsort, SPL_ARRAY_METHOD_NO_ARG) /* }}} */ /* {{{ proto int ArrayObject::natcasesort() proto int ArrayIterator::natcasesort() Sort the entries by key using case insensitive "natural order" algorithm. */ SPL_ARRAY_METHOD(Array, natcasesort, SPL_ARRAY_METHOD_NO_ARG) /* }}} */ /* {{{ proto mixed|NULL ArrayIterator::current() Return current array entry */ SPL_METHOD(Array, current) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); zval **entry; HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) { return; } if (zend_hash_get_current_data_ex(aht, (void **) &entry, &intern->pos) == FAILURE) { return; } RETVAL_ZVAL(*entry, 1, 0); } /* }}} */ /* {{{ proto mixed|NULL ArrayIterator::key() Return current array key */ SPL_METHOD(Array, key) { if (zend_parse_parameters_none() == FAILURE) { return; } spl_array_iterator_key(getThis(), return_value TSRMLS_CC); } /* }}} */ void spl_array_iterator_key(zval *object, zval *return_value TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) { return; } zend_hash_get_current_key_zval_ex(aht, return_value, &intern->pos); } /* }}} */ /* {{{ proto void ArrayIterator::next() Move to next entry */ SPL_METHOD(Array, next) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) { return; } spl_array_next_no_verify(intern, aht TSRMLS_CC); } /* }}} */ /* {{{ proto bool ArrayIterator::valid() Check whether array contains more entries */ SPL_METHOD(Array, valid) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) { RETURN_FALSE; } else { RETURN_BOOL(zend_hash_has_more_elements_ex(aht, &intern->pos) == SUCCESS); } } /* }}} */ /* {{{ proto bool RecursiveArrayIterator::hasChildren() Check whether current element has children (e.g. is an array) */ SPL_METHOD(Array, hasChildren) { zval *object = getThis(), **entry; spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) { RETURN_FALSE; } if (zend_hash_get_current_data_ex(aht, (void **) &entry, &intern->pos) == FAILURE) { RETURN_FALSE; } RETURN_BOOL(Z_TYPE_PP(entry) == IS_ARRAY || (Z_TYPE_PP(entry) == IS_OBJECT && (intern->ar_flags & SPL_ARRAY_CHILD_ARRAYS_ONLY) == 0)); } /* }}} */ /* {{{ proto object RecursiveArrayIterator::getChildren() Create a sub iterator for the current element (same class as $this) */ SPL_METHOD(Array, getChildren) { zval *object = getThis(), **entry, *flags; spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) { return; } if (zend_hash_get_current_data_ex(aht, (void **) &entry, &intern->pos) == FAILURE) { return; } if (Z_TYPE_PP(entry) == IS_OBJECT) { if ((intern->ar_flags & SPL_ARRAY_CHILD_ARRAYS_ONLY) != 0) { return; } if (instanceof_function(Z_OBJCE_PP(entry), Z_OBJCE_P(getThis()) TSRMLS_CC)) { RETURN_ZVAL(*entry, 1, 0); } } MAKE_STD_ZVAL(flags); ZVAL_LONG(flags, SPL_ARRAY_USE_OTHER | intern->ar_flags); spl_instantiate_arg_ex2(Z_OBJCE_P(getThis()), &return_value, 0, *entry, flags TSRMLS_CC); zval_ptr_dtor(&flags); } /* }}} */ /* {{{ proto string ArrayObject::serialize() Serialize the object */ SPL_METHOD(Array, serialize) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); zval members, *pmembers; php_serialize_data_t var_hash; smart_str buf = {0}; zval *flags; if (zend_parse_parameters_none() == FAILURE) { return; } if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array"); return; } PHP_VAR_SERIALIZE_INIT(var_hash); MAKE_STD_ZVAL(flags); ZVAL_LONG(flags, (intern->ar_flags & SPL_ARRAY_CLONE_MASK)); /* storage */ smart_str_appendl(&buf, "x:", 2); php_var_serialize(&buf, &flags, &var_hash TSRMLS_CC); zval_ptr_dtor(&flags); if (!(intern->ar_flags & SPL_ARRAY_IS_SELF)) { php_var_serialize(&buf, &intern->array, &var_hash TSRMLS_CC); smart_str_appendc(&buf, ';'); } /* members */ smart_str_appendl(&buf, "m:", 2); INIT_PZVAL(&members); if (!intern->std.properties) { rebuild_object_properties(&intern->std); } Z_ARRVAL(members) = intern->std.properties; Z_TYPE(members) = IS_ARRAY; pmembers = &members; php_var_serialize(&buf, &pmembers, &var_hash TSRMLS_CC); /* finishes the string */ /* done */ PHP_VAR_SERIALIZE_DESTROY(var_hash); if (buf.c) { RETURN_STRINGL(buf.c, buf.len, 0); } RETURN_NULL(); } /* }}} */ /* {{{ proto void ArrayObject::unserialize(string serialized) * unserialize the object */ SPL_METHOD(Array, unserialize) { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *buf; int buf_len; const unsigned char *p, *s; php_unserialize_data_t var_hash; zval *pmembers, *pflags = NULL; HashTable *aht; long flags; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &buf, &buf_len) == FAILURE) { return; } if (buf_len == 0) { return; } aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (aht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } /* storage */ s = p = (const unsigned char*)buf; PHP_VAR_UNSERIALIZE_INIT(var_hash); if (*p!= 'x' || *++p != ':') { goto outexcept; } ++p; ALLOC_INIT_ZVAL(pflags); if (!php_var_unserialize(&pflags, &p, s + buf_len, &var_hash TSRMLS_CC) || Z_TYPE_P(pflags) != IS_LONG) { goto outexcept; } var_push_dtor(&var_hash, &pflags); --p; /* for ';' */ flags = Z_LVAL_P(pflags); /* flags needs to be verified and we also need to verify whether the next * thing we get is ';'. After that we require an 'm' or somethign else * where 'm' stands for members and anything else should be an array. If * neither 'a' or 'm' follows we have an error. */ if (*p != ';') { goto outexcept; } ++p; if (*p!='m') { if (*p!='a' && *p!='O' && *p!='C' && *p!='r') { goto outexcept; } intern->ar_flags &= ~SPL_ARRAY_CLONE_MASK; intern->ar_flags |= flags & SPL_ARRAY_CLONE_MASK; zval_ptr_dtor(&intern->array); ALLOC_INIT_ZVAL(intern->array); if (!php_var_unserialize(&intern->array, &p, s + buf_len, &var_hash TSRMLS_CC)) { goto outexcept; } var_push_dtor(&var_hash, &intern->array); } if (*p != ';') { goto outexcept; } ++p; /* members */ if (*p!= 'm' || *++p != ':') { goto outexcept; } ++p; ALLOC_INIT_ZVAL(pmembers); if (!php_var_unserialize(&pmembers, &p, s + buf_len, &var_hash TSRMLS_CC) || Z_TYPE_P(pmembers) != IS_ARRAY) { zval_ptr_dtor(&pmembers); goto outexcept; } var_push_dtor(&var_hash, &pmembers); /* copy members */ if (!intern->std.properties) { rebuild_object_properties(&intern->std); } zend_hash_copy(intern->std.properties, Z_ARRVAL_P(pmembers), (copy_ctor_func_t) zval_add_ref, (void *) NULL, sizeof(zval *)); zval_ptr_dtor(&pmembers); /* done reading $serialized */ PHP_VAR_UNSERIALIZE_DESTROY(var_hash); if (pflags) { zval_ptr_dtor(&pflags); } return; outexcept: PHP_VAR_UNSERIALIZE_DESTROY(var_hash); if (pflags) { zval_ptr_dtor(&pflags); } zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0 TSRMLS_CC, "Error at offset %ld of %d bytes", (long)((char*)p - buf), buf_len); return; } /* }}} */ /* {{{ arginfo and function table */ ZEND_BEGIN_ARG_INFO_EX(arginfo_array___construct, 0, 0, 0) ZEND_ARG_INFO(0, array) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_array_offsetGet, 0, 0, 1) ZEND_ARG_INFO(0, index) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_array_offsetSet, 0, 0, 2) ZEND_ARG_INFO(0, index) ZEND_ARG_INFO(0, newval) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_array_append, 0) ZEND_ARG_INFO(0, value) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_array_seek, 0) ZEND_ARG_INFO(0, position) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_array_exchangeArray, 0) ZEND_ARG_INFO(0, array) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_array_setFlags, 0) ZEND_ARG_INFO(0, flags) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_array_setIteratorClass, 0) ZEND_ARG_INFO(0, iteratorClass) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_array_uXsort, 0) ZEND_ARG_INFO(0, cmp_function) ZEND_END_ARG_INFO(); ZEND_BEGIN_ARG_INFO(arginfo_array_unserialize, 0) ZEND_ARG_INFO(0, serialized) ZEND_END_ARG_INFO(); ZEND_BEGIN_ARG_INFO(arginfo_array_void, 0) ZEND_END_ARG_INFO() static const zend_function_entry spl_funcs_ArrayObject[] = { SPL_ME(Array, __construct, arginfo_array___construct, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetExists, arginfo_array_offsetGet, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetGet, arginfo_array_offsetGet, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetSet, arginfo_array_offsetSet, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetUnset, arginfo_array_offsetGet, ZEND_ACC_PUBLIC) SPL_ME(Array, append, arginfo_array_append, ZEND_ACC_PUBLIC) SPL_ME(Array, getArrayCopy, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, count, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, getFlags, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, setFlags, arginfo_array_setFlags, ZEND_ACC_PUBLIC) SPL_ME(Array, asort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, ksort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, uasort, arginfo_array_uXsort, ZEND_ACC_PUBLIC) SPL_ME(Array, uksort, arginfo_array_uXsort, ZEND_ACC_PUBLIC) SPL_ME(Array, natsort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, natcasesort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, unserialize, arginfo_array_unserialize, ZEND_ACC_PUBLIC) SPL_ME(Array, serialize, arginfo_array_void, ZEND_ACC_PUBLIC) /* ArrayObject specific */ SPL_ME(Array, getIterator, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, exchangeArray, arginfo_array_exchangeArray, ZEND_ACC_PUBLIC) SPL_ME(Array, setIteratorClass, arginfo_array_setIteratorClass, ZEND_ACC_PUBLIC) SPL_ME(Array, getIteratorClass, arginfo_array_void, ZEND_ACC_PUBLIC) PHP_FE_END }; static const zend_function_entry spl_funcs_ArrayIterator[] = { SPL_ME(Array, __construct, arginfo_array___construct, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetExists, arginfo_array_offsetGet, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetGet, arginfo_array_offsetGet, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetSet, arginfo_array_offsetSet, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetUnset, arginfo_array_offsetGet, ZEND_ACC_PUBLIC) SPL_ME(Array, append, arginfo_array_append, ZEND_ACC_PUBLIC) SPL_ME(Array, getArrayCopy, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, count, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, getFlags, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, setFlags, arginfo_array_setFlags, ZEND_ACC_PUBLIC) SPL_ME(Array, asort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, ksort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, uasort, arginfo_array_uXsort, ZEND_ACC_PUBLIC) SPL_ME(Array, uksort, arginfo_array_uXsort, ZEND_ACC_PUBLIC) SPL_ME(Array, natsort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, natcasesort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, unserialize, arginfo_array_unserialize, ZEND_ACC_PUBLIC) SPL_ME(Array, serialize, arginfo_array_void, ZEND_ACC_PUBLIC) /* ArrayIterator specific */ SPL_ME(Array, rewind, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, current, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, key, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, next, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, valid, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, seek, arginfo_array_seek, ZEND_ACC_PUBLIC) PHP_FE_END }; static const zend_function_entry spl_funcs_RecursiveArrayIterator[] = { SPL_ME(Array, hasChildren, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, getChildren, arginfo_array_void, ZEND_ACC_PUBLIC) PHP_FE_END }; /* }}} */ /* {{{ PHP_MINIT_FUNCTION(spl_array) */ PHP_MINIT_FUNCTION(spl_array) { REGISTER_SPL_STD_CLASS_EX(ArrayObject, spl_array_object_new, spl_funcs_ArrayObject); REGISTER_SPL_IMPLEMENTS(ArrayObject, Aggregate); REGISTER_SPL_IMPLEMENTS(ArrayObject, ArrayAccess); REGISTER_SPL_IMPLEMENTS(ArrayObject, Serializable); REGISTER_SPL_IMPLEMENTS(ArrayObject, Countable); memcpy(&spl_handler_ArrayObject, zend_get_std_object_handlers(), sizeof(zend_object_handlers)); spl_handler_ArrayObject.clone_obj = spl_array_object_clone; spl_handler_ArrayObject.read_dimension = spl_array_read_dimension; spl_handler_ArrayObject.write_dimension = spl_array_write_dimension; spl_handler_ArrayObject.unset_dimension = spl_array_unset_dimension; spl_handler_ArrayObject.has_dimension = spl_array_has_dimension; spl_handler_ArrayObject.count_elements = spl_array_object_count_elements; spl_handler_ArrayObject.get_properties = spl_array_get_properties; spl_handler_ArrayObject.get_debug_info = spl_array_get_debug_info; spl_handler_ArrayObject.get_gc = spl_array_get_gc; spl_handler_ArrayObject.read_property = spl_array_read_property; spl_handler_ArrayObject.write_property = spl_array_write_property; spl_handler_ArrayObject.get_property_ptr_ptr = spl_array_get_property_ptr_ptr; spl_handler_ArrayObject.has_property = spl_array_has_property; spl_handler_ArrayObject.unset_property = spl_array_unset_property; spl_handler_ArrayObject.compare_objects = spl_array_compare_objects; REGISTER_SPL_STD_CLASS_EX(ArrayIterator, spl_array_object_new, spl_funcs_ArrayIterator); REGISTER_SPL_IMPLEMENTS(ArrayIterator, Iterator); REGISTER_SPL_IMPLEMENTS(ArrayIterator, ArrayAccess); REGISTER_SPL_IMPLEMENTS(ArrayIterator, SeekableIterator); REGISTER_SPL_IMPLEMENTS(ArrayIterator, Serializable); REGISTER_SPL_IMPLEMENTS(ArrayIterator, Countable); memcpy(&spl_handler_ArrayIterator, &spl_handler_ArrayObject, sizeof(zend_object_handlers)); spl_ce_ArrayIterator->get_iterator = spl_array_get_iterator; REGISTER_SPL_SUB_CLASS_EX(RecursiveArrayIterator, ArrayIterator, spl_array_object_new, spl_funcs_RecursiveArrayIterator); REGISTER_SPL_IMPLEMENTS(RecursiveArrayIterator, RecursiveIterator); spl_ce_RecursiveArrayIterator->get_iterator = spl_array_get_iterator; REGISTER_SPL_CLASS_CONST_LONG(ArrayObject, "STD_PROP_LIST", SPL_ARRAY_STD_PROP_LIST); REGISTER_SPL_CLASS_CONST_LONG(ArrayObject, "ARRAY_AS_PROPS", SPL_ARRAY_ARRAY_AS_PROPS); REGISTER_SPL_CLASS_CONST_LONG(ArrayIterator, "STD_PROP_LIST", SPL_ARRAY_STD_PROP_LIST); REGISTER_SPL_CLASS_CONST_LONG(ArrayIterator, "ARRAY_AS_PROPS", SPL_ARRAY_ARRAY_AS_PROPS); REGISTER_SPL_CLASS_CONST_LONG(RecursiveArrayIterator, "CHILD_ARRAYS_ONLY", SPL_ARRAY_CHILD_ARRAYS_ONLY); return SUCCESS; } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: fdm=marker * vim: noet sw=4 ts=4 */
/* +----------------------------------------------------------------------+ | PHP Version 5 | +----------------------------------------------------------------------+ | Copyright (c) 1997-2016 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Authors: Marcus Boerger <helly@php.net> | +----------------------------------------------------------------------+ */ /* $Id$ */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include "php.h" #include "php_ini.h" #include "ext/standard/info.h" #include "ext/standard/php_var.h" #include "ext/standard/php_smart_str.h" #include "zend_interfaces.h" #include "zend_exceptions.h" #include "php_spl.h" #include "spl_functions.h" #include "spl_engine.h" #include "spl_iterators.h" #include "spl_array.h" #include "spl_exceptions.h" zend_object_handlers spl_handler_ArrayObject; PHPAPI zend_class_entry *spl_ce_ArrayObject; zend_object_handlers spl_handler_ArrayIterator; PHPAPI zend_class_entry *spl_ce_ArrayIterator; PHPAPI zend_class_entry *spl_ce_RecursiveArrayIterator; #define SPL_ARRAY_STD_PROP_LIST 0x00000001 #define SPL_ARRAY_ARRAY_AS_PROPS 0x00000002 #define SPL_ARRAY_CHILD_ARRAYS_ONLY 0x00000004 #define SPL_ARRAY_OVERLOADED_REWIND 0x00010000 #define SPL_ARRAY_OVERLOADED_VALID 0x00020000 #define SPL_ARRAY_OVERLOADED_KEY 0x00040000 #define SPL_ARRAY_OVERLOADED_CURRENT 0x00080000 #define SPL_ARRAY_OVERLOADED_NEXT 0x00100000 #define SPL_ARRAY_IS_REF 0x01000000 #define SPL_ARRAY_IS_SELF 0x02000000 #define SPL_ARRAY_USE_OTHER 0x04000000 #define SPL_ARRAY_INT_MASK 0xFFFF0000 #define SPL_ARRAY_CLONE_MASK 0x0300FFFF #define SPL_ARRAY_METHOD_NO_ARG 0 #define SPL_ARRAY_METHOD_USE_ARG 1 #define SPL_ARRAY_METHOD_MAY_USER_ARG 2 typedef struct _spl_array_object { zend_object std; zval *array; zval *retval; HashPosition pos; ulong pos_h; int ar_flags; int is_self; zend_function *fptr_offset_get; zend_function *fptr_offset_set; zend_function *fptr_offset_has; zend_function *fptr_offset_del; zend_function *fptr_count; zend_class_entry* ce_get_iterator; HashTable *debug_info; unsigned char nApplyCount; } spl_array_object; static inline HashTable *spl_array_get_hash_table(spl_array_object* intern, int check_std_props TSRMLS_DC) { /* {{{ */ if ((intern->ar_flags & SPL_ARRAY_IS_SELF) != 0) { if (!intern->std.properties) { rebuild_object_properties(&intern->std); } return intern->std.properties; } else if ((intern->ar_flags & SPL_ARRAY_USE_OTHER) && (check_std_props == 0 || (intern->ar_flags & SPL_ARRAY_STD_PROP_LIST) == 0) && Z_TYPE_P(intern->array) == IS_OBJECT) { spl_array_object *other = (spl_array_object*)zend_object_store_get_object(intern->array TSRMLS_CC); return spl_array_get_hash_table(other, check_std_props TSRMLS_CC); } else if ((intern->ar_flags & ((check_std_props ? SPL_ARRAY_STD_PROP_LIST : 0) | SPL_ARRAY_IS_SELF)) != 0) { if (!intern->std.properties) { rebuild_object_properties(&intern->std); } return intern->std.properties; } else { return HASH_OF(intern->array); } } /* }}} */ static void spl_array_rewind(spl_array_object *intern TSRMLS_DC); static void spl_array_update_pos(spl_array_object* intern) /* {{{ */ { Bucket *pos = intern->pos; if (pos != NULL) { intern->pos_h = pos->h; } } /* }}} */ static void spl_array_set_pos(spl_array_object* intern, HashPosition pos) /* {{{ */ { intern->pos = pos; spl_array_update_pos(intern); } /* }}} */ SPL_API int spl_hash_verify_pos_ex(spl_array_object * intern, HashTable * ht TSRMLS_DC) /* {{{ */ { Bucket *p; /* IS_CONSISTENT(ht);*/ /* HASH_PROTECT_RECURSION(ht);*/ p = ht->arBuckets[intern->pos_h & ht->nTableMask]; while (p != NULL) { if (p == intern->pos) { return SUCCESS; } p = p->pNext; } /* HASH_UNPROTECT_RECURSION(ht); */ spl_array_rewind(intern TSRMLS_CC); return FAILURE; } /* }}} */ SPL_API int spl_hash_verify_pos(spl_array_object * intern TSRMLS_DC) /* {{{ */ { HashTable *ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); return spl_hash_verify_pos_ex(intern, ht TSRMLS_CC); } /* }}} */ /* {{{ spl_array_object_free_storage */ static void spl_array_object_free_storage(void *object TSRMLS_DC) { spl_array_object *intern = (spl_array_object *)object; zend_object_std_dtor(&intern->std TSRMLS_CC); zval_ptr_dtor(&intern->array); zval_ptr_dtor(&intern->retval); if (intern->debug_info != NULL) { zend_hash_destroy(intern->debug_info); efree(intern->debug_info); } efree(object); } /* }}} */ zend_object_iterator *spl_array_get_iterator(zend_class_entry *ce, zval *object, int by_ref TSRMLS_DC); /* {{{ spl_array_object_new_ex */ static zend_object_value spl_array_object_new_ex(zend_class_entry *class_type, spl_array_object **obj, zval *orig, int clone_orig TSRMLS_DC) { zend_object_value retval = {0}; spl_array_object *intern; zval *tmp; zend_class_entry * parent = class_type; int inherited = 0; intern = emalloc(sizeof(spl_array_object)); memset(intern, 0, sizeof(spl_array_object)); *obj = intern; ALLOC_INIT_ZVAL(intern->retval); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); intern->ar_flags = 0; intern->debug_info = NULL; intern->ce_get_iterator = spl_ce_ArrayIterator; if (orig) { spl_array_object *other = (spl_array_object*)zend_object_store_get_object(orig TSRMLS_CC); intern->ar_flags &= ~ SPL_ARRAY_CLONE_MASK; intern->ar_flags |= (other->ar_flags & SPL_ARRAY_CLONE_MASK); intern->ce_get_iterator = other->ce_get_iterator; if (clone_orig) { intern->array = other->array; if (Z_OBJ_HT_P(orig) == &spl_handler_ArrayObject) { MAKE_STD_ZVAL(intern->array); array_init(intern->array); zend_hash_copy(HASH_OF(intern->array), HASH_OF(other->array), (copy_ctor_func_t) zval_add_ref, &tmp, sizeof(zval*)); } if (Z_OBJ_HT_P(orig) == &spl_handler_ArrayIterator) { Z_ADDREF_P(other->array); } } else { intern->array = orig; Z_ADDREF_P(intern->array); intern->ar_flags |= SPL_ARRAY_IS_REF | SPL_ARRAY_USE_OTHER; } } else { MAKE_STD_ZVAL(intern->array); array_init(intern->array); intern->ar_flags &= ~SPL_ARRAY_IS_REF; } retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t)zend_objects_destroy_object, (zend_objects_free_object_storage_t) spl_array_object_free_storage, NULL TSRMLS_CC); while (parent) { if (parent == spl_ce_ArrayIterator || parent == spl_ce_RecursiveArrayIterator) { retval.handlers = &spl_handler_ArrayIterator; class_type->get_iterator = spl_array_get_iterator; break; } else if (parent == spl_ce_ArrayObject) { retval.handlers = &spl_handler_ArrayObject; break; } parent = parent->parent; inherited = 1; } if (!parent) { /* this must never happen */ php_error_docref(NULL TSRMLS_CC, E_COMPILE_ERROR, "Internal compiler error, Class is not child of ArrayObject or ArrayIterator"); } if (inherited) { zend_hash_find(&class_type->function_table, "offsetget", sizeof("offsetget"), (void **) &intern->fptr_offset_get); if (intern->fptr_offset_get->common.scope == parent) { intern->fptr_offset_get = NULL; } zend_hash_find(&class_type->function_table, "offsetset", sizeof("offsetset"), (void **) &intern->fptr_offset_set); if (intern->fptr_offset_set->common.scope == parent) { intern->fptr_offset_set = NULL; } zend_hash_find(&class_type->function_table, "offsetexists", sizeof("offsetexists"), (void **) &intern->fptr_offset_has); if (intern->fptr_offset_has->common.scope == parent) { intern->fptr_offset_has = NULL; } zend_hash_find(&class_type->function_table, "offsetunset", sizeof("offsetunset"), (void **) &intern->fptr_offset_del); if (intern->fptr_offset_del->common.scope == parent) { intern->fptr_offset_del = NULL; } zend_hash_find(&class_type->function_table, "count", sizeof("count"), (void **) &intern->fptr_count); if (intern->fptr_count->common.scope == parent) { intern->fptr_count = NULL; } } /* Cache iterator functions if ArrayIterator or derived. Check current's */ /* cache since only current is always required */ if (retval.handlers == &spl_handler_ArrayIterator) { if (!class_type->iterator_funcs.zf_current) { zend_hash_find(&class_type->function_table, "rewind", sizeof("rewind"), (void **) &class_type->iterator_funcs.zf_rewind); zend_hash_find(&class_type->function_table, "valid", sizeof("valid"), (void **) &class_type->iterator_funcs.zf_valid); zend_hash_find(&class_type->function_table, "key", sizeof("key"), (void **) &class_type->iterator_funcs.zf_key); zend_hash_find(&class_type->function_table, "current", sizeof("current"), (void **) &class_type->iterator_funcs.zf_current); zend_hash_find(&class_type->function_table, "next", sizeof("next"), (void **) &class_type->iterator_funcs.zf_next); } if (inherited) { if (class_type->iterator_funcs.zf_rewind->common.scope != parent) intern->ar_flags |= SPL_ARRAY_OVERLOADED_REWIND; if (class_type->iterator_funcs.zf_valid->common.scope != parent) intern->ar_flags |= SPL_ARRAY_OVERLOADED_VALID; if (class_type->iterator_funcs.zf_key->common.scope != parent) intern->ar_flags |= SPL_ARRAY_OVERLOADED_KEY; if (class_type->iterator_funcs.zf_current->common.scope != parent) intern->ar_flags |= SPL_ARRAY_OVERLOADED_CURRENT; if (class_type->iterator_funcs.zf_next->common.scope != parent) intern->ar_flags |= SPL_ARRAY_OVERLOADED_NEXT; } } spl_array_rewind(intern TSRMLS_CC); return retval; } /* }}} */ /* {{{ spl_array_object_new */ static zend_object_value spl_array_object_new(zend_class_entry *class_type TSRMLS_DC) { spl_array_object *tmp; return spl_array_object_new_ex(class_type, &tmp, NULL, 0 TSRMLS_CC); } /* }}} */ /* {{{ spl_array_object_clone */ static zend_object_value spl_array_object_clone(zval *zobject TSRMLS_DC) { zend_object_value new_obj_val; zend_object *old_object; zend_object *new_object; zend_object_handle handle = Z_OBJ_HANDLE_P(zobject); spl_array_object *intern; old_object = zend_objects_get_address(zobject TSRMLS_CC); new_obj_val = spl_array_object_new_ex(old_object->ce, &intern, zobject, 1 TSRMLS_CC); new_object = &intern->std; zend_objects_clone_members(new_object, new_obj_val, old_object, handle TSRMLS_CC); return new_obj_val; } /* }}} */ static zval **spl_array_get_dimension_ptr_ptr(int check_inherited, zval *object, zval *offset, int type TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); zval **retval; char *key; uint len; long index; HashTable *ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (!offset || !ht) { return &EG(uninitialized_zval_ptr); } if ((type == BP_VAR_W || type == BP_VAR_RW) && (ht->nApplyCount > 0)) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return &EG(error_zval_ptr);; } switch (Z_TYPE_P(offset)) { case IS_STRING: key = Z_STRVAL_P(offset); len = Z_STRLEN_P(offset) + 1; string_offest: if (zend_symtable_find(ht, key, len, (void **) &retval) == FAILURE) { switch (type) { case BP_VAR_R: zend_error(E_NOTICE, "Undefined index: %s", key); case BP_VAR_UNSET: case BP_VAR_IS: retval = &EG(uninitialized_zval_ptr); break; case BP_VAR_RW: zend_error(E_NOTICE,"Undefined index: %s", key); case BP_VAR_W: { zval *value; ALLOC_INIT_ZVAL(value); zend_symtable_update(ht, key, len, (void**)&value, sizeof(void*), (void **)&retval); } } } return retval; case IS_NULL: key = ""; len = 1; goto string_offest; case IS_RESOURCE: zend_error(E_STRICT, "Resource ID#%ld used as offset, casting to integer (%ld)", Z_LVAL_P(offset), Z_LVAL_P(offset)); case IS_DOUBLE: case IS_BOOL: case IS_LONG: if (offset->type == IS_DOUBLE) { index = (long)Z_DVAL_P(offset); } else { index = Z_LVAL_P(offset); } if (zend_hash_index_find(ht, index, (void **) &retval) == FAILURE) { switch (type) { case BP_VAR_R: zend_error(E_NOTICE, "Undefined offset: %ld", index); case BP_VAR_UNSET: case BP_VAR_IS: retval = &EG(uninitialized_zval_ptr); break; case BP_VAR_RW: zend_error(E_NOTICE, "Undefined offset: %ld", index); case BP_VAR_W: { zval *value; ALLOC_INIT_ZVAL(value); zend_hash_index_update(ht, index, (void**)&value, sizeof(void*), (void **)&retval); } } } return retval; default: zend_error(E_WARNING, "Illegal offset type"); return (type == BP_VAR_W || type == BP_VAR_RW) ? &EG(error_zval_ptr) : &EG(uninitialized_zval_ptr); } } /* }}} */ static zval *spl_array_read_dimension_ex(int check_inherited, zval *object, zval *offset, int type TSRMLS_DC) /* {{{ */ { zval **ret; if (check_inherited) { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (intern->fptr_offset_get) { zval *rv; if (!offset) { ALLOC_INIT_ZVAL(offset); } else { SEPARATE_ARG_IF_REF(offset); } zend_call_method_with_1_params(&object, Z_OBJCE_P(object), &intern->fptr_offset_get, "offsetGet", &rv, offset); zval_ptr_dtor(&offset); if (rv) { zval_ptr_dtor(&intern->retval); MAKE_STD_ZVAL(intern->retval); ZVAL_ZVAL(intern->retval, rv, 1, 1); return intern->retval; } return EG(uninitialized_zval_ptr); } } ret = spl_array_get_dimension_ptr_ptr(check_inherited, object, offset, type TSRMLS_CC); /* When in a write context, * ZE has to be fooled into thinking this is in a reference set * by separating (if necessary) and returning as an is_ref=1 zval (even if refcount == 1) */ if ((type == BP_VAR_W || type == BP_VAR_RW || type == BP_VAR_UNSET) && !Z_ISREF_PP(ret) && ret != &EG(uninitialized_zval_ptr)) { if (Z_REFCOUNT_PP(ret) > 1) { zval *newval; /* Separate */ MAKE_STD_ZVAL(newval); *newval = **ret; zval_copy_ctor(newval); Z_SET_REFCOUNT_P(newval, 1); /* Replace */ Z_DELREF_PP(ret); *ret = newval; } Z_SET_ISREF_PP(ret); } return *ret; } /* }}} */ static zval *spl_array_read_dimension(zval *object, zval *offset, int type TSRMLS_DC) /* {{{ */ { return spl_array_read_dimension_ex(1, object, offset, type TSRMLS_CC); } /* }}} */ static void spl_array_write_dimension_ex(int check_inherited, zval *object, zval *offset, zval *value TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); long index; HashTable *ht; if (check_inherited && intern->fptr_offset_set) { if (!offset) { ALLOC_INIT_ZVAL(offset); } else { SEPARATE_ARG_IF_REF(offset); } zend_call_method_with_2_params(&object, Z_OBJCE_P(object), &intern->fptr_offset_set, "offsetSet", NULL, offset, value); zval_ptr_dtor(&offset); return; } if (!offset) { ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (ht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } Z_ADDREF_P(value); zend_hash_next_index_insert(ht, (void**)&value, sizeof(void*), NULL); return; } switch(Z_TYPE_P(offset)) { case IS_STRING: ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (ht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } Z_ADDREF_P(value); zend_symtable_update(ht, Z_STRVAL_P(offset), Z_STRLEN_P(offset)+1, (void**)&value, sizeof(void*), NULL); return; case IS_DOUBLE: case IS_RESOURCE: case IS_BOOL: case IS_LONG: ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (ht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } if (offset->type == IS_DOUBLE) { index = (long)Z_DVAL_P(offset); } else { index = Z_LVAL_P(offset); } Z_ADDREF_P(value); zend_hash_index_update(ht, index, (void**)&value, sizeof(void*), NULL); return; case IS_NULL: ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (ht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } Z_ADDREF_P(value); zend_hash_next_index_insert(ht, (void**)&value, sizeof(void*), NULL); return; default: zend_error(E_WARNING, "Illegal offset type"); return; } } /* }}} */ static void spl_array_write_dimension(zval *object, zval *offset, zval *value TSRMLS_DC) /* {{{ */ { spl_array_write_dimension_ex(1, object, offset, value TSRMLS_CC); } /* }}} */ static void spl_array_unset_dimension_ex(int check_inherited, zval *object, zval *offset TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); long index; HashTable *ht; if (check_inherited && intern->fptr_offset_del) { SEPARATE_ARG_IF_REF(offset); zend_call_method_with_1_params(&object, Z_OBJCE_P(object), &intern->fptr_offset_del, "offsetUnset", NULL, offset); zval_ptr_dtor(&offset); return; } switch(Z_TYPE_P(offset)) { case IS_STRING: ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (ht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } if (ht == &EG(symbol_table)) { if (zend_delete_global_variable(Z_STRVAL_P(offset), Z_STRLEN_P(offset) TSRMLS_CC)) { zend_error(E_NOTICE,"Undefined index: %s", Z_STRVAL_P(offset)); } } else { if (zend_symtable_del(ht, Z_STRVAL_P(offset), Z_STRLEN_P(offset)+1) == FAILURE) { zend_error(E_NOTICE,"Undefined index: %s", Z_STRVAL_P(offset)); } else { spl_array_object *obj = intern; while (1) { if ((obj->ar_flags & SPL_ARRAY_IS_SELF) != 0) { break; } else if (Z_TYPE_P(obj->array) == IS_OBJECT) { if ((obj->ar_flags & SPL_ARRAY_USE_OTHER) == 0) { obj = (spl_array_object*)zend_object_store_get_object(obj->array TSRMLS_CC); break; } else { obj = (spl_array_object*)zend_object_store_get_object(obj->array TSRMLS_CC); } } else { obj = NULL; break; } } if (obj) { zend_property_info *property_info = zend_get_property_info(obj->std.ce, offset, 1 TSRMLS_CC); if (property_info && (property_info->flags & ZEND_ACC_STATIC) == 0 && property_info->offset >= 0) { obj->std.properties_table[property_info->offset] = NULL; } } } } break; case IS_DOUBLE: case IS_RESOURCE: case IS_BOOL: case IS_LONG: if (offset->type == IS_DOUBLE) { index = (long)Z_DVAL_P(offset); } else { index = Z_LVAL_P(offset); } ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (ht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } if (zend_hash_index_del(ht, index) == FAILURE) { zend_error(E_NOTICE,"Undefined offset: %ld", Z_LVAL_P(offset)); } break; default: zend_error(E_WARNING, "Illegal offset type"); return; } spl_hash_verify_pos(intern TSRMLS_CC); /* call rewind on FAILURE */ } /* }}} */ static void spl_array_unset_dimension(zval *object, zval *offset TSRMLS_DC) /* {{{ */ { spl_array_unset_dimension_ex(1, object, offset TSRMLS_CC); } /* }}} */ static int spl_array_has_dimension_ex(int check_inherited, zval *object, zval *offset, int check_empty TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); long index; zval *rv, *value = NULL, **tmp; if (check_inherited && intern->fptr_offset_has) { zval *offset_tmp = offset; SEPARATE_ARG_IF_REF(offset_tmp); zend_call_method_with_1_params(&object, Z_OBJCE_P(object), &intern->fptr_offset_has, "offsetExists", &rv, offset_tmp); zval_ptr_dtor(&offset_tmp); if (rv && zend_is_true(rv)) { zval_ptr_dtor(&rv); if (check_empty != 1) { return 1; } else if (intern->fptr_offset_get) { value = spl_array_read_dimension_ex(1, object, offset, BP_VAR_R TSRMLS_CC); } } else { if (rv) { zval_ptr_dtor(&rv); } return 0; } } if (!value) { HashTable *ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); switch(Z_TYPE_P(offset)) { case IS_STRING: if (zend_symtable_find(ht, Z_STRVAL_P(offset), Z_STRLEN_P(offset)+1, (void **) &tmp) != FAILURE) { if (check_empty == 2) { return 1; } } else { return 0; } break; case IS_DOUBLE: case IS_RESOURCE: case IS_BOOL: case IS_LONG: if (offset->type == IS_DOUBLE) { index = (long)Z_DVAL_P(offset); } else { index = Z_LVAL_P(offset); } if (zend_hash_index_find(ht, index, (void **)&tmp) != FAILURE) { if (check_empty == 2) { return 1; } } else { return 0; } break; default: zend_error(E_WARNING, "Illegal offset type"); return 0; } if (check_empty && check_inherited && intern->fptr_offset_get) { value = spl_array_read_dimension_ex(1, object, offset, BP_VAR_R TSRMLS_CC); } else { value = *tmp; } } return check_empty ? zend_is_true(value) : Z_TYPE_P(value) != IS_NULL; } /* }}} */ static int spl_array_has_dimension(zval *object, zval *offset, int check_empty TSRMLS_DC) /* {{{ */ { return spl_array_has_dimension_ex(1, object, offset, check_empty TSRMLS_CC); } /* }}} */ /* {{{ spl_array_object_verify_pos_ex */ static inline int spl_array_object_verify_pos_ex(spl_array_object *object, HashTable *ht, const char *msg_prefix TSRMLS_DC) { if (!ht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "%sArray was modified outside object and is no longer an array", msg_prefix); return FAILURE; } if (object->pos && (object->ar_flags & SPL_ARRAY_IS_REF) && spl_hash_verify_pos_ex(object, ht TSRMLS_CC) == FAILURE) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "%sArray was modified outside object and internal position is no longer valid", msg_prefix); return FAILURE; } return SUCCESS; } /* }}} */ /* {{{ spl_array_object_verify_pos */ static inline int spl_array_object_verify_pos(spl_array_object *object, HashTable *ht TSRMLS_DC) { return spl_array_object_verify_pos_ex(object, ht, "" TSRMLS_CC); } /* }}} */ /* {{{ proto bool ArrayObject::offsetExists(mixed $index) proto bool ArrayIterator::offsetExists(mixed $index) Returns whether the requested $index exists. */ SPL_METHOD(Array, offsetExists) { zval *index; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z", &index) == FAILURE) { return; } RETURN_BOOL(spl_array_has_dimension_ex(0, getThis(), index, 2 TSRMLS_CC)); } /* }}} */ /* {{{ proto mixed ArrayObject::offsetGet(mixed $index) proto mixed ArrayIterator::offsetGet(mixed $index) Returns the value at the specified $index. */ SPL_METHOD(Array, offsetGet) { zval *index, *value; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z", &index) == FAILURE) { return; } value = spl_array_read_dimension_ex(0, getThis(), index, BP_VAR_R TSRMLS_CC); RETURN_ZVAL(value, 1, 0); } /* }}} */ /* {{{ proto void ArrayObject::offsetSet(mixed $index, mixed $newval) proto void ArrayIterator::offsetSet(mixed $index, mixed $newval) Sets the value at the specified $index to $newval. */ SPL_METHOD(Array, offsetSet) { zval *index, *value; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "zz", &index, &value) == FAILURE) { return; } spl_array_write_dimension_ex(0, getThis(), index, value TSRMLS_CC); } /* }}} */ void spl_array_iterator_append(zval *object, zval *append_value TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array"); return; } if (Z_TYPE_P(intern->array) == IS_OBJECT) { php_error_docref(NULL TSRMLS_CC, E_RECOVERABLE_ERROR, "Cannot append properties to objects, use %s::offsetSet() instead", Z_OBJCE_P(object)->name); return; } spl_array_write_dimension(object, NULL, append_value TSRMLS_CC); if (!intern->pos) { spl_array_set_pos(intern, aht->pListTail); } } /* }}} */ /* {{{ proto void ArrayObject::append(mixed $newval) proto void ArrayIterator::append(mixed $newval) Appends the value (cannot be called for objects). */ SPL_METHOD(Array, append) { zval *value; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z", &value) == FAILURE) { return; } spl_array_iterator_append(getThis(), value TSRMLS_CC); } /* }}} */ /* {{{ proto void ArrayObject::offsetUnset(mixed $index) proto void ArrayIterator::offsetUnset(mixed $index) Unsets the value at the specified $index. */ SPL_METHOD(Array, offsetUnset) { zval *index; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z", &index) == FAILURE) { return; } spl_array_unset_dimension_ex(0, getThis(), index TSRMLS_CC); } /* }}} */ /* {{{ proto array ArrayObject::getArrayCopy() proto array ArrayIterator::getArrayCopy() Return a copy of the contained array */ SPL_METHOD(Array, getArrayCopy) { zval *object = getThis(), *tmp; spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); array_init(return_value); zend_hash_copy(HASH_OF(return_value), spl_array_get_hash_table(intern, 0 TSRMLS_CC), (copy_ctor_func_t) zval_add_ref, &tmp, sizeof(zval*)); } /* }}} */ static HashTable *spl_array_get_properties(zval *object TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *result; if (intern->nApplyCount > 1) { php_error_docref(NULL TSRMLS_CC, E_ERROR, "Nesting level too deep - recursive dependency?"); } intern->nApplyCount++; result = spl_array_get_hash_table(intern, 1 TSRMLS_CC); intern->nApplyCount--; return result; } /* }}} */ static HashTable* spl_array_get_debug_info(zval *obj, int *is_temp TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(obj TSRMLS_CC); zval *tmp, *storage; int name_len; char *zname; zend_class_entry *base; *is_temp = 0; if (!intern->std.properties) { rebuild_object_properties(&intern->std); } if (HASH_OF(intern->array) == intern->std.properties) { return intern->std.properties; } else { if (intern->debug_info == NULL) { ALLOC_HASHTABLE(intern->debug_info); ZEND_INIT_SYMTABLE_EX(intern->debug_info, zend_hash_num_elements(intern->std.properties) + 1, 0); } if (intern->debug_info->nApplyCount == 0) { zend_hash_clean(intern->debug_info); zend_hash_copy(intern->debug_info, intern->std.properties, (copy_ctor_func_t) zval_add_ref, (void *) &tmp, sizeof(zval *)); storage = intern->array; zval_add_ref(&storage); base = (Z_OBJ_HT_P(obj) == &spl_handler_ArrayIterator) ? spl_ce_ArrayIterator : spl_ce_ArrayObject; zname = spl_gen_private_prop_name(base, "storage", sizeof("storage")-1, &name_len TSRMLS_CC); zend_symtable_update(intern->debug_info, zname, name_len+1, &storage, sizeof(zval *), NULL); efree(zname); } return intern->debug_info; } } /* }}} */ static HashTable *spl_array_get_gc(zval *object, zval ***gc_data, int *gc_data_count TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); *gc_data = &intern->array; *gc_data_count = 1; return zend_std_get_properties(object TSRMLS_CC); } /* }}} */ static zval *spl_array_read_property(zval *object, zval *member, int type, const zend_literal *key TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if ((intern->ar_flags & SPL_ARRAY_ARRAY_AS_PROPS) != 0 && !std_object_handlers.has_property(object, member, 2, key TSRMLS_CC)) { return spl_array_read_dimension(object, member, type TSRMLS_CC); } return std_object_handlers.read_property(object, member, type, key TSRMLS_CC); } /* }}} */ static void spl_array_write_property(zval *object, zval *member, zval *value, const zend_literal *key TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if ((intern->ar_flags & SPL_ARRAY_ARRAY_AS_PROPS) != 0 && !std_object_handlers.has_property(object, member, 2, key TSRMLS_CC)) { spl_array_write_dimension(object, member, value TSRMLS_CC); return; } std_object_handlers.write_property(object, member, value, key TSRMLS_CC); } /* }}} */ static zval **spl_array_get_property_ptr_ptr(zval *object, zval *member, int type, const zend_literal *key TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if ((intern->ar_flags & SPL_ARRAY_ARRAY_AS_PROPS) != 0 && !std_object_handlers.has_property(object, member, 2, key TSRMLS_CC)) { return spl_array_get_dimension_ptr_ptr(1, object, member, type TSRMLS_CC); } return std_object_handlers.get_property_ptr_ptr(object, member, type, key TSRMLS_CC); } /* }}} */ static int spl_array_has_property(zval *object, zval *member, int has_set_exists, const zend_literal *key TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if ((intern->ar_flags & SPL_ARRAY_ARRAY_AS_PROPS) != 0 && !std_object_handlers.has_property(object, member, 2, key TSRMLS_CC)) { return spl_array_has_dimension(object, member, has_set_exists TSRMLS_CC); } return std_object_handlers.has_property(object, member, has_set_exists, key TSRMLS_CC); } /* }}} */ static void spl_array_unset_property(zval *object, zval *member, const zend_literal *key TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if ((intern->ar_flags & SPL_ARRAY_ARRAY_AS_PROPS) != 0 && !std_object_handlers.has_property(object, member, 2, key TSRMLS_CC)) { spl_array_unset_dimension(object, member TSRMLS_CC); spl_array_rewind(intern TSRMLS_CC); /* because deletion might invalidate position */ return; } std_object_handlers.unset_property(object, member, key TSRMLS_CC); } /* }}} */ static int spl_array_compare_objects(zval *o1, zval *o2 TSRMLS_DC) /* {{{ */ { HashTable *ht1, *ht2; spl_array_object *intern1, *intern2; int result = 0; zval temp_zv; intern1 = (spl_array_object*)zend_object_store_get_object(o1 TSRMLS_CC); intern2 = (spl_array_object*)zend_object_store_get_object(o2 TSRMLS_CC); ht1 = spl_array_get_hash_table(intern1, 0 TSRMLS_CC); ht2 = spl_array_get_hash_table(intern2, 0 TSRMLS_CC); zend_compare_symbol_tables(&temp_zv, ht1, ht2 TSRMLS_CC); result = (int)Z_LVAL(temp_zv); /* if we just compared std.properties, don't do it again */ if (result == 0 && !(ht1 == intern1->std.properties && ht2 == intern2->std.properties)) { result = std_object_handlers.compare_objects(o1, o2 TSRMLS_CC); } return result; } /* }}} */ static int spl_array_skip_protected(spl_array_object *intern, HashTable *aht TSRMLS_DC) /* {{{ */ { char *string_key; uint string_length; ulong num_key; if (Z_TYPE_P(intern->array) == IS_OBJECT) { do { if (zend_hash_get_current_key_ex(aht, &string_key, &string_length, &num_key, 0, &intern->pos) == HASH_KEY_IS_STRING) { /* zend_hash_get_current_key_ex() should never set * string_length to 0 when returning HASH_KEY_IS_STRING, but we * may as well be defensive and consider that successful. * Beyond that, we're looking for protected keys (which will * have a null byte at string_key[0]), but want to avoid * skipping completely empty keys (which will also have the * null byte, but a string_length of 1). */ if (!string_length || string_key[0] || string_length == 1) { return SUCCESS; } } else { return SUCCESS; } if (zend_hash_has_more_elements_ex(aht, &intern->pos) != SUCCESS) { return FAILURE; } zend_hash_move_forward_ex(aht, &intern->pos); spl_array_update_pos(intern); } while (1); } return FAILURE; } /* }}} */ static int spl_array_next_no_verify(spl_array_object *intern, HashTable *aht TSRMLS_DC) /* {{{ */ { zend_hash_move_forward_ex(aht, &intern->pos); spl_array_update_pos(intern); if (Z_TYPE_P(intern->array) == IS_OBJECT) { return spl_array_skip_protected(intern, aht TSRMLS_CC); } else { return zend_hash_has_more_elements_ex(aht, &intern->pos); } } /* }}} */ static int spl_array_next_ex(spl_array_object *intern, HashTable *aht TSRMLS_DC) /* {{{ */ { if ((intern->ar_flags & SPL_ARRAY_IS_REF) && spl_hash_verify_pos_ex(intern, aht TSRMLS_CC) == FAILURE) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and internal position is no longer valid"); return FAILURE; } return spl_array_next_no_verify(intern, aht TSRMLS_CC); } /* }}} */ static int spl_array_next(spl_array_object *intern TSRMLS_DC) /* {{{ */ { HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); return spl_array_next_ex(intern, aht TSRMLS_CC); } /* }}} */ /* define an overloaded iterator structure */ typedef struct { zend_user_iterator intern; spl_array_object *object; } spl_array_it; static void spl_array_it_dtor(zend_object_iterator *iter TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; zend_user_it_invalidate_current(iter TSRMLS_CC); zval_ptr_dtor((zval**)&iterator->intern.it.data); efree(iterator); } /* }}} */ static int spl_array_it_valid(zend_object_iterator *iter TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; spl_array_object *object = iterator->object; HashTable *aht = spl_array_get_hash_table(object, 0 TSRMLS_CC); if (object->ar_flags & SPL_ARRAY_OVERLOADED_VALID) { return zend_user_it_valid(iter TSRMLS_CC); } else { if (spl_array_object_verify_pos_ex(object, aht, "ArrayIterator::valid(): " TSRMLS_CC) == FAILURE) { return FAILURE; } return zend_hash_has_more_elements_ex(aht, &object->pos); } } /* }}} */ static void spl_array_it_get_current_data(zend_object_iterator *iter, zval ***data TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; spl_array_object *object = iterator->object; HashTable *aht = spl_array_get_hash_table(object, 0 TSRMLS_CC); if (object->ar_flags & SPL_ARRAY_OVERLOADED_CURRENT) { zend_user_it_get_current_data(iter, data TSRMLS_CC); } else { if (zend_hash_get_current_data_ex(aht, (void**)data, &object->pos) == FAILURE) { *data = NULL; } } } /* }}} */ static void spl_array_it_get_current_key(zend_object_iterator *iter, zval *key TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; spl_array_object *object = iterator->object; HashTable *aht = spl_array_get_hash_table(object, 0 TSRMLS_CC); if (object->ar_flags & SPL_ARRAY_OVERLOADED_KEY) { zend_user_it_get_current_key(iter, key TSRMLS_CC); } else { if (spl_array_object_verify_pos_ex(object, aht, "ArrayIterator::current(): " TSRMLS_CC) == FAILURE) { ZVAL_NULL(key); } else { zend_hash_get_current_key_zval_ex(aht, key, &object->pos); } } } /* }}} */ static void spl_array_it_move_forward(zend_object_iterator *iter TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; spl_array_object *object = iterator->object; HashTable *aht = spl_array_get_hash_table(object, 0 TSRMLS_CC); if (object->ar_flags & SPL_ARRAY_OVERLOADED_NEXT) { zend_user_it_move_forward(iter TSRMLS_CC); } else { zend_user_it_invalidate_current(iter TSRMLS_CC); if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "ArrayIterator::current(): Array was modified outside object and is no longer an array"); return; } if ((object->ar_flags & SPL_ARRAY_IS_REF) && spl_hash_verify_pos_ex(object, aht TSRMLS_CC) == FAILURE) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "ArrayIterator::next(): Array was modified outside object and internal position is no longer valid"); } else { spl_array_next_no_verify(object, aht TSRMLS_CC); } } } /* }}} */ static void spl_array_rewind_ex(spl_array_object *intern, HashTable *aht TSRMLS_DC) /* {{{ */ { zend_hash_internal_pointer_reset_ex(aht, &intern->pos); spl_array_update_pos(intern); spl_array_skip_protected(intern, aht TSRMLS_CC); } /* }}} */ static void spl_array_rewind(spl_array_object *intern TSRMLS_DC) /* {{{ */ { HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "ArrayIterator::rewind(): Array was modified outside object and is no longer an array"); return; } spl_array_rewind_ex(intern, aht TSRMLS_CC); } /* }}} */ static void spl_array_it_rewind(zend_object_iterator *iter TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; spl_array_object *object = iterator->object; if (object->ar_flags & SPL_ARRAY_OVERLOADED_REWIND) { zend_user_it_rewind(iter TSRMLS_CC); } else { zend_user_it_invalidate_current(iter TSRMLS_CC); spl_array_rewind(object TSRMLS_CC); } } /* }}} */ /* {{{ spl_array_set_array */ static void spl_array_set_array(zval *object, spl_array_object *intern, zval **array, long ar_flags, int just_array TSRMLS_DC) { if (Z_TYPE_PP(array) == IS_ARRAY) { SEPARATE_ZVAL_IF_NOT_REF(array); } if (Z_TYPE_PP(array) == IS_OBJECT && (Z_OBJ_HT_PP(array) == &spl_handler_ArrayObject || Z_OBJ_HT_PP(array) == &spl_handler_ArrayIterator)) { zval_ptr_dtor(&intern->array); if (just_array) { spl_array_object *other = (spl_array_object*)zend_object_store_get_object(*array TSRMLS_CC); ar_flags = other->ar_flags & ~SPL_ARRAY_INT_MASK; } ar_flags |= SPL_ARRAY_USE_OTHER; intern->array = *array; } else { if (Z_TYPE_PP(array) != IS_OBJECT && Z_TYPE_PP(array) != IS_ARRAY) { zend_throw_exception(spl_ce_InvalidArgumentException, "Passed variable is not an array or object, using empty array instead", 0 TSRMLS_CC); return; } zval_ptr_dtor(&intern->array); intern->array = *array; } if (object == *array) { intern->ar_flags |= SPL_ARRAY_IS_SELF; intern->ar_flags &= ~SPL_ARRAY_USE_OTHER; } else { intern->ar_flags &= ~SPL_ARRAY_IS_SELF; } intern->ar_flags |= ar_flags; Z_ADDREF_P(intern->array); if (Z_TYPE_PP(array) == IS_OBJECT) { zend_object_get_properties_t handler = Z_OBJ_HANDLER_PP(array, get_properties); if ((handler != std_object_handlers.get_properties && handler != spl_array_get_properties) || !spl_array_get_hash_table(intern, 0 TSRMLS_CC)) { zend_throw_exception_ex(spl_ce_InvalidArgumentException, 0 TSRMLS_CC, "Overloaded object of type %s is not compatible with %s", Z_OBJCE_PP(array)->name, intern->std.ce->name); } } spl_array_rewind(intern TSRMLS_CC); } /* }}} */ /* iterator handler table */ zend_object_iterator_funcs spl_array_it_funcs = { spl_array_it_dtor, spl_array_it_valid, spl_array_it_get_current_data, spl_array_it_get_current_key, spl_array_it_move_forward, spl_array_it_rewind }; zend_object_iterator *spl_array_get_iterator(zend_class_entry *ce, zval *object, int by_ref TSRMLS_DC) /* {{{ */ { spl_array_it *iterator; spl_array_object *array_object = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (by_ref && (array_object->ar_flags & SPL_ARRAY_OVERLOADED_CURRENT)) { zend_error(E_ERROR, "An iterator cannot be used with foreach by reference"); } iterator = emalloc(sizeof(spl_array_it)); Z_ADDREF_P(object); iterator->intern.it.data = (void*)object; iterator->intern.it.funcs = &spl_array_it_funcs; iterator->intern.ce = ce; iterator->intern.value = NULL; iterator->object = array_object; return (zend_object_iterator*)iterator; } /* }}} */ /* {{{ proto void ArrayObject::__construct(array|object ar = array() [, int flags = 0 [, string iterator_class = "ArrayIterator"]]) proto void ArrayIterator::__construct(array|object ar = array() [, int flags = 0]) Constructs a new array iterator from a path. */ SPL_METHOD(Array, __construct) { zval *object = getThis(); spl_array_object *intern; zval **array; long ar_flags = 0; zend_class_entry *ce_get_iterator = spl_ce_Iterator; zend_error_handling error_handling; if (ZEND_NUM_ARGS() == 0) { return; /* nothing to do */ } zend_replace_error_handling(EH_THROW, spl_ce_InvalidArgumentException, &error_handling TSRMLS_CC); intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "Z|lC", &array, &ar_flags, &ce_get_iterator) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } if (ZEND_NUM_ARGS() > 2) { intern->ce_get_iterator = ce_get_iterator; } ar_flags &= ~SPL_ARRAY_INT_MASK; spl_array_set_array(object, intern, array, ar_flags, ZEND_NUM_ARGS() == 1 TSRMLS_CC); zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ proto void ArrayObject::setIteratorClass(string iterator_class) Set the class used in getIterator. */ SPL_METHOD(Array, setIteratorClass) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); zend_class_entry * ce_get_iterator = spl_ce_Iterator; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "C", &ce_get_iterator) == FAILURE) { return; } intern->ce_get_iterator = ce_get_iterator; } /* }}} */ /* {{{ proto string ArrayObject::getIteratorClass() Get the class used in getIterator. */ SPL_METHOD(Array, getIteratorClass) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_STRING(intern->ce_get_iterator->name, 1); } /* }}} */ /* {{{ proto int ArrayObject::getFlags() Get flags */ SPL_METHOD(Array, getFlags) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_LONG(intern->ar_flags & ~SPL_ARRAY_INT_MASK); } /* }}} */ /* {{{ proto void ArrayObject::setFlags(int flags) Set flags */ SPL_METHOD(Array, setFlags) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); long ar_flags = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &ar_flags) == FAILURE) { return; } intern->ar_flags = (intern->ar_flags & SPL_ARRAY_INT_MASK) | (ar_flags & ~SPL_ARRAY_INT_MASK); } /* }}} */ /* {{{ proto Array|Object ArrayObject::exchangeArray(Array|Object ar = array()) Replace the referenced array or object with a new one and return the old one (right now copy - to be changed) */ SPL_METHOD(Array, exchangeArray) { zval *object = getThis(), *tmp, **array; spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); array_init(return_value); zend_hash_copy(HASH_OF(return_value), spl_array_get_hash_table(intern, 0 TSRMLS_CC), (copy_ctor_func_t) zval_add_ref, &tmp, sizeof(zval*)); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "Z", &array) == FAILURE) { return; } spl_array_set_array(object, intern, array, 0L, 1 TSRMLS_CC); } /* }}} */ /* {{{ proto ArrayIterator ArrayObject::getIterator() Create a new iterator from a ArrayObject instance */ SPL_METHOD(Array, getIterator) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); spl_array_object *iterator; HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array"); return; } return_value->type = IS_OBJECT; return_value->value.obj = spl_array_object_new_ex(intern->ce_get_iterator, &iterator, object, 0 TSRMLS_CC); Z_SET_REFCOUNT_P(return_value, 1); Z_SET_ISREF_P(return_value); } /* }}} */ /* {{{ proto void ArrayIterator::rewind() Rewind array back to the start */ SPL_METHOD(Array, rewind) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } spl_array_rewind(intern TSRMLS_CC); } /* }}} */ /* {{{ proto void ArrayIterator::seek(int $position) Seek to position. */ SPL_METHOD(Array, seek) { long opos, position; zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); int result; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &position) == FAILURE) { return; } if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array"); return; } opos = position; if (position >= 0) { /* negative values are not supported */ spl_array_rewind(intern TSRMLS_CC); result = SUCCESS; while (position-- > 0 && (result = spl_array_next(intern TSRMLS_CC)) == SUCCESS); if (result == SUCCESS && zend_hash_has_more_elements_ex(aht, &intern->pos) == SUCCESS) { return; /* ok */ } } zend_throw_exception_ex(spl_ce_OutOfBoundsException, 0 TSRMLS_CC, "Seek position %ld is out of range", opos); } /* }}} */ int static spl_array_object_count_elements_helper(spl_array_object *intern, long *count TSRMLS_DC) /* {{{ */ { HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); HashPosition pos; if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array"); *count = 0; return FAILURE; } if (Z_TYPE_P(intern->array) == IS_OBJECT) { /* We need to store the 'pos' since we'll modify it in the functions * we're going to call and which do not support 'pos' as parameter. */ pos = intern->pos; *count = 0; spl_array_rewind(intern TSRMLS_CC); while(intern->pos && spl_array_next(intern TSRMLS_CC) == SUCCESS) { (*count)++; } spl_array_set_pos(intern, pos); return SUCCESS; } else { *count = zend_hash_num_elements(aht); return SUCCESS; } } /* }}} */ int spl_array_object_count_elements(zval *object, long *count TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (intern->fptr_count) { zval *rv; zend_call_method_with_0_params(&object, intern->std.ce, &intern->fptr_count, "count", &rv); if (rv) { zval_ptr_dtor(&intern->retval); MAKE_STD_ZVAL(intern->retval); ZVAL_ZVAL(intern->retval, rv, 1, 1); convert_to_long(intern->retval); *count = (long) Z_LVAL_P(intern->retval); return SUCCESS; } *count = 0; return FAILURE; } return spl_array_object_count_elements_helper(intern, count TSRMLS_CC); } /* }}} */ /* {{{ proto int ArrayObject::count() proto int ArrayIterator::count() Return the number of elements in the Iterator. */ SPL_METHOD(Array, count) { long count; spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } spl_array_object_count_elements_helper(intern, &count TSRMLS_CC); RETURN_LONG(count); } /* }}} */ static void spl_array_method(INTERNAL_FUNCTION_PARAMETERS, char *fname, int fname_len, int use_arg) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(getThis() TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); zval *tmp, *arg = NULL; zval *retval_ptr = NULL; MAKE_STD_ZVAL(tmp); Z_TYPE_P(tmp) = IS_ARRAY; Z_ARRVAL_P(tmp) = aht; if (!use_arg) { aht->nApplyCount++; zend_call_method(NULL, NULL, NULL, fname, fname_len, &retval_ptr, 1, tmp, NULL TSRMLS_CC); aht->nApplyCount--; } else if (use_arg == SPL_ARRAY_METHOD_MAY_USER_ARG) { if (zend_parse_parameters_ex(ZEND_PARSE_PARAMS_QUIET, ZEND_NUM_ARGS() TSRMLS_CC, "|z", &arg) == FAILURE) { Z_TYPE_P(tmp) = IS_NULL; zval_ptr_dtor(&tmp); zend_throw_exception(spl_ce_BadMethodCallException, "Function expects one argument at most", 0 TSRMLS_CC); return; } aht->nApplyCount++; zend_call_method(NULL, NULL, NULL, fname, fname_len, &retval_ptr, arg? 2 : 1, tmp, arg TSRMLS_CC); aht->nApplyCount--; } else { if (ZEND_NUM_ARGS() != 1 || zend_parse_parameters_ex(ZEND_PARSE_PARAMS_QUIET, ZEND_NUM_ARGS() TSRMLS_CC, "z", &arg) == FAILURE) { Z_TYPE_P(tmp) = IS_NULL; zval_ptr_dtor(&tmp); zend_throw_exception(spl_ce_BadMethodCallException, "Function expects exactly one argument", 0 TSRMLS_CC); return; } aht->nApplyCount++; zend_call_method(NULL, NULL, NULL, fname, fname_len, &retval_ptr, 2, tmp, arg TSRMLS_CC); aht->nApplyCount--; } Z_TYPE_P(tmp) = IS_NULL; /* we want to destroy the zval, not the hashtable */ zval_ptr_dtor(&tmp); if (retval_ptr) { COPY_PZVAL_TO_ZVAL(*return_value, retval_ptr); } } /* }}} */ #define SPL_ARRAY_METHOD(cname, fname, use_arg) \ SPL_METHOD(cname, fname) \ { \ spl_array_method(INTERNAL_FUNCTION_PARAM_PASSTHRU, #fname, sizeof(#fname)-1, use_arg); \ } /* {{{ proto int ArrayObject::asort([int $sort_flags = SORT_REGULAR ]) proto int ArrayIterator::asort([int $sort_flags = SORT_REGULAR ]) Sort the entries by values. */ SPL_ARRAY_METHOD(Array, asort, SPL_ARRAY_METHOD_MAY_USER_ARG) /* }}} */ /* {{{ proto int ArrayObject::ksort([int $sort_flags = SORT_REGULAR ]) proto int ArrayIterator::ksort([int $sort_flags = SORT_REGULAR ]) Sort the entries by key. */ SPL_ARRAY_METHOD(Array, ksort, SPL_ARRAY_METHOD_MAY_USER_ARG) /* }}} */ /* {{{ proto int ArrayObject::uasort(callback cmp_function) proto int ArrayIterator::uasort(callback cmp_function) Sort the entries by values user defined function. */ SPL_ARRAY_METHOD(Array, uasort, SPL_ARRAY_METHOD_USE_ARG) /* }}} */ /* {{{ proto int ArrayObject::uksort(callback cmp_function) proto int ArrayIterator::uksort(callback cmp_function) Sort the entries by key using user defined function. */ SPL_ARRAY_METHOD(Array, uksort, SPL_ARRAY_METHOD_USE_ARG) /* }}} */ /* {{{ proto int ArrayObject::natsort() proto int ArrayIterator::natsort() Sort the entries by values using "natural order" algorithm. */ SPL_ARRAY_METHOD(Array, natsort, SPL_ARRAY_METHOD_NO_ARG) /* }}} */ /* {{{ proto int ArrayObject::natcasesort() proto int ArrayIterator::natcasesort() Sort the entries by key using case insensitive "natural order" algorithm. */ SPL_ARRAY_METHOD(Array, natcasesort, SPL_ARRAY_METHOD_NO_ARG) /* }}} */ /* {{{ proto mixed|NULL ArrayIterator::current() Return current array entry */ SPL_METHOD(Array, current) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); zval **entry; HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) { return; } if (zend_hash_get_current_data_ex(aht, (void **) &entry, &intern->pos) == FAILURE) { return; } RETVAL_ZVAL(*entry, 1, 0); } /* }}} */ /* {{{ proto mixed|NULL ArrayIterator::key() Return current array key */ SPL_METHOD(Array, key) { if (zend_parse_parameters_none() == FAILURE) { return; } spl_array_iterator_key(getThis(), return_value TSRMLS_CC); } /* }}} */ void spl_array_iterator_key(zval *object, zval *return_value TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) { return; } zend_hash_get_current_key_zval_ex(aht, return_value, &intern->pos); } /* }}} */ /* {{{ proto void ArrayIterator::next() Move to next entry */ SPL_METHOD(Array, next) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) { return; } spl_array_next_no_verify(intern, aht TSRMLS_CC); } /* }}} */ /* {{{ proto bool ArrayIterator::valid() Check whether array contains more entries */ SPL_METHOD(Array, valid) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) { RETURN_FALSE; } else { RETURN_BOOL(zend_hash_has_more_elements_ex(aht, &intern->pos) == SUCCESS); } } /* }}} */ /* {{{ proto bool RecursiveArrayIterator::hasChildren() Check whether current element has children (e.g. is an array) */ SPL_METHOD(Array, hasChildren) { zval *object = getThis(), **entry; spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) { RETURN_FALSE; } if (zend_hash_get_current_data_ex(aht, (void **) &entry, &intern->pos) == FAILURE) { RETURN_FALSE; } RETURN_BOOL(Z_TYPE_PP(entry) == IS_ARRAY || (Z_TYPE_PP(entry) == IS_OBJECT && (intern->ar_flags & SPL_ARRAY_CHILD_ARRAYS_ONLY) == 0)); } /* }}} */ /* {{{ proto object RecursiveArrayIterator::getChildren() Create a sub iterator for the current element (same class as $this) */ SPL_METHOD(Array, getChildren) { zval *object = getThis(), **entry, *flags; spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) { return; } if (zend_hash_get_current_data_ex(aht, (void **) &entry, &intern->pos) == FAILURE) { return; } if (Z_TYPE_PP(entry) == IS_OBJECT) { if ((intern->ar_flags & SPL_ARRAY_CHILD_ARRAYS_ONLY) != 0) { return; } if (instanceof_function(Z_OBJCE_PP(entry), Z_OBJCE_P(getThis()) TSRMLS_CC)) { RETURN_ZVAL(*entry, 1, 0); } } MAKE_STD_ZVAL(flags); ZVAL_LONG(flags, SPL_ARRAY_USE_OTHER | intern->ar_flags); spl_instantiate_arg_ex2(Z_OBJCE_P(getThis()), &return_value, 0, *entry, flags TSRMLS_CC); zval_ptr_dtor(&flags); } /* }}} */ /* {{{ proto string ArrayObject::serialize() Serialize the object */ SPL_METHOD(Array, serialize) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); zval members, *pmembers; php_serialize_data_t var_hash; smart_str buf = {0}; zval *flags; if (zend_parse_parameters_none() == FAILURE) { return; } if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array"); return; } PHP_VAR_SERIALIZE_INIT(var_hash); MAKE_STD_ZVAL(flags); ZVAL_LONG(flags, (intern->ar_flags & SPL_ARRAY_CLONE_MASK)); /* storage */ smart_str_appendl(&buf, "x:", 2); php_var_serialize(&buf, &flags, &var_hash TSRMLS_CC); zval_ptr_dtor(&flags); if (!(intern->ar_flags & SPL_ARRAY_IS_SELF)) { php_var_serialize(&buf, &intern->array, &var_hash TSRMLS_CC); smart_str_appendc(&buf, ';'); } /* members */ smart_str_appendl(&buf, "m:", 2); INIT_PZVAL(&members); if (!intern->std.properties) { rebuild_object_properties(&intern->std); } Z_ARRVAL(members) = intern->std.properties; Z_TYPE(members) = IS_ARRAY; pmembers = &members; php_var_serialize(&buf, &pmembers, &var_hash TSRMLS_CC); /* finishes the string */ /* done */ PHP_VAR_SERIALIZE_DESTROY(var_hash); if (buf.c) { RETURN_STRINGL(buf.c, buf.len, 0); } RETURN_NULL(); } /* }}} */ /* {{{ proto void ArrayObject::unserialize(string serialized) * unserialize the object */ SPL_METHOD(Array, unserialize) { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *buf; int buf_len; const unsigned char *p, *s; php_unserialize_data_t var_hash; zval *pmembers, *pflags = NULL; HashTable *aht; long flags; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &buf, &buf_len) == FAILURE) { return; } if (buf_len == 0) { return; } aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (aht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } /* storage */ s = p = (const unsigned char*)buf; PHP_VAR_UNSERIALIZE_INIT(var_hash); if (*p!= 'x' || *++p != ':') { goto outexcept; } ++p; ALLOC_INIT_ZVAL(pflags); if (!php_var_unserialize(&pflags, &p, s + buf_len, &var_hash TSRMLS_CC) || Z_TYPE_P(pflags) != IS_LONG) { goto outexcept; } var_push_dtor(&var_hash, &pflags); --p; /* for ';' */ flags = Z_LVAL_P(pflags); /* flags needs to be verified and we also need to verify whether the next * thing we get is ';'. After that we require an 'm' or somethign else * where 'm' stands for members and anything else should be an array. If * neither 'a' or 'm' follows we have an error. */ if (*p != ';') { goto outexcept; } ++p; if (*p!='m') { if (*p!='a' && *p!='O' && *p!='C' && *p!='r') { goto outexcept; } intern->ar_flags &= ~SPL_ARRAY_CLONE_MASK; intern->ar_flags |= flags & SPL_ARRAY_CLONE_MASK; zval_ptr_dtor(&intern->array); ALLOC_INIT_ZVAL(intern->array); if (!php_var_unserialize(&intern->array, &p, s + buf_len, &var_hash TSRMLS_CC) || (Z_TYPE_P(intern->array) != IS_ARRAY && Z_TYPE_P(intern->array) != IS_OBJECT)) { zval_ptr_dtor(&intern->array); goto outexcept; } var_push_dtor(&var_hash, &intern->array); } if (*p != ';') { goto outexcept; } ++p; /* members */ if (*p!= 'm' || *++p != ':') { goto outexcept; } ++p; ALLOC_INIT_ZVAL(pmembers); if (!php_var_unserialize(&pmembers, &p, s + buf_len, &var_hash TSRMLS_CC) || Z_TYPE_P(pmembers) != IS_ARRAY) { zval_ptr_dtor(&pmembers); goto outexcept; } var_push_dtor(&var_hash, &pmembers); /* copy members */ if (!intern->std.properties) { rebuild_object_properties(&intern->std); } zend_hash_copy(intern->std.properties, Z_ARRVAL_P(pmembers), (copy_ctor_func_t) zval_add_ref, (void *) NULL, sizeof(zval *)); zval_ptr_dtor(&pmembers); /* done reading $serialized */ PHP_VAR_UNSERIALIZE_DESTROY(var_hash); if (pflags) { zval_ptr_dtor(&pflags); } return; outexcept: PHP_VAR_UNSERIALIZE_DESTROY(var_hash); if (pflags) { zval_ptr_dtor(&pflags); } zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0 TSRMLS_CC, "Error at offset %ld of %d bytes", (long)((char*)p - buf), buf_len); return; } /* }}} */ /* {{{ arginfo and function table */ ZEND_BEGIN_ARG_INFO_EX(arginfo_array___construct, 0, 0, 0) ZEND_ARG_INFO(0, array) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_array_offsetGet, 0, 0, 1) ZEND_ARG_INFO(0, index) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_array_offsetSet, 0, 0, 2) ZEND_ARG_INFO(0, index) ZEND_ARG_INFO(0, newval) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_array_append, 0) ZEND_ARG_INFO(0, value) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_array_seek, 0) ZEND_ARG_INFO(0, position) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_array_exchangeArray, 0) ZEND_ARG_INFO(0, array) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_array_setFlags, 0) ZEND_ARG_INFO(0, flags) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_array_setIteratorClass, 0) ZEND_ARG_INFO(0, iteratorClass) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_array_uXsort, 0) ZEND_ARG_INFO(0, cmp_function) ZEND_END_ARG_INFO(); ZEND_BEGIN_ARG_INFO(arginfo_array_unserialize, 0) ZEND_ARG_INFO(0, serialized) ZEND_END_ARG_INFO(); ZEND_BEGIN_ARG_INFO(arginfo_array_void, 0) ZEND_END_ARG_INFO() static const zend_function_entry spl_funcs_ArrayObject[] = { SPL_ME(Array, __construct, arginfo_array___construct, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetExists, arginfo_array_offsetGet, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetGet, arginfo_array_offsetGet, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetSet, arginfo_array_offsetSet, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetUnset, arginfo_array_offsetGet, ZEND_ACC_PUBLIC) SPL_ME(Array, append, arginfo_array_append, ZEND_ACC_PUBLIC) SPL_ME(Array, getArrayCopy, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, count, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, getFlags, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, setFlags, arginfo_array_setFlags, ZEND_ACC_PUBLIC) SPL_ME(Array, asort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, ksort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, uasort, arginfo_array_uXsort, ZEND_ACC_PUBLIC) SPL_ME(Array, uksort, arginfo_array_uXsort, ZEND_ACC_PUBLIC) SPL_ME(Array, natsort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, natcasesort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, unserialize, arginfo_array_unserialize, ZEND_ACC_PUBLIC) SPL_ME(Array, serialize, arginfo_array_void, ZEND_ACC_PUBLIC) /* ArrayObject specific */ SPL_ME(Array, getIterator, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, exchangeArray, arginfo_array_exchangeArray, ZEND_ACC_PUBLIC) SPL_ME(Array, setIteratorClass, arginfo_array_setIteratorClass, ZEND_ACC_PUBLIC) SPL_ME(Array, getIteratorClass, arginfo_array_void, ZEND_ACC_PUBLIC) PHP_FE_END }; static const zend_function_entry spl_funcs_ArrayIterator[] = { SPL_ME(Array, __construct, arginfo_array___construct, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetExists, arginfo_array_offsetGet, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetGet, arginfo_array_offsetGet, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetSet, arginfo_array_offsetSet, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetUnset, arginfo_array_offsetGet, ZEND_ACC_PUBLIC) SPL_ME(Array, append, arginfo_array_append, ZEND_ACC_PUBLIC) SPL_ME(Array, getArrayCopy, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, count, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, getFlags, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, setFlags, arginfo_array_setFlags, ZEND_ACC_PUBLIC) SPL_ME(Array, asort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, ksort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, uasort, arginfo_array_uXsort, ZEND_ACC_PUBLIC) SPL_ME(Array, uksort, arginfo_array_uXsort, ZEND_ACC_PUBLIC) SPL_ME(Array, natsort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, natcasesort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, unserialize, arginfo_array_unserialize, ZEND_ACC_PUBLIC) SPL_ME(Array, serialize, arginfo_array_void, ZEND_ACC_PUBLIC) /* ArrayIterator specific */ SPL_ME(Array, rewind, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, current, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, key, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, next, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, valid, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, seek, arginfo_array_seek, ZEND_ACC_PUBLIC) PHP_FE_END }; static const zend_function_entry spl_funcs_RecursiveArrayIterator[] = { SPL_ME(Array, hasChildren, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, getChildren, arginfo_array_void, ZEND_ACC_PUBLIC) PHP_FE_END }; /* }}} */ /* {{{ PHP_MINIT_FUNCTION(spl_array) */ PHP_MINIT_FUNCTION(spl_array) { REGISTER_SPL_STD_CLASS_EX(ArrayObject, spl_array_object_new, spl_funcs_ArrayObject); REGISTER_SPL_IMPLEMENTS(ArrayObject, Aggregate); REGISTER_SPL_IMPLEMENTS(ArrayObject, ArrayAccess); REGISTER_SPL_IMPLEMENTS(ArrayObject, Serializable); REGISTER_SPL_IMPLEMENTS(ArrayObject, Countable); memcpy(&spl_handler_ArrayObject, zend_get_std_object_handlers(), sizeof(zend_object_handlers)); spl_handler_ArrayObject.clone_obj = spl_array_object_clone; spl_handler_ArrayObject.read_dimension = spl_array_read_dimension; spl_handler_ArrayObject.write_dimension = spl_array_write_dimension; spl_handler_ArrayObject.unset_dimension = spl_array_unset_dimension; spl_handler_ArrayObject.has_dimension = spl_array_has_dimension; spl_handler_ArrayObject.count_elements = spl_array_object_count_elements; spl_handler_ArrayObject.get_properties = spl_array_get_properties; spl_handler_ArrayObject.get_debug_info = spl_array_get_debug_info; spl_handler_ArrayObject.get_gc = spl_array_get_gc; spl_handler_ArrayObject.read_property = spl_array_read_property; spl_handler_ArrayObject.write_property = spl_array_write_property; spl_handler_ArrayObject.get_property_ptr_ptr = spl_array_get_property_ptr_ptr; spl_handler_ArrayObject.has_property = spl_array_has_property; spl_handler_ArrayObject.unset_property = spl_array_unset_property; spl_handler_ArrayObject.compare_objects = spl_array_compare_objects; REGISTER_SPL_STD_CLASS_EX(ArrayIterator, spl_array_object_new, spl_funcs_ArrayIterator); REGISTER_SPL_IMPLEMENTS(ArrayIterator, Iterator); REGISTER_SPL_IMPLEMENTS(ArrayIterator, ArrayAccess); REGISTER_SPL_IMPLEMENTS(ArrayIterator, SeekableIterator); REGISTER_SPL_IMPLEMENTS(ArrayIterator, Serializable); REGISTER_SPL_IMPLEMENTS(ArrayIterator, Countable); memcpy(&spl_handler_ArrayIterator, &spl_handler_ArrayObject, sizeof(zend_object_handlers)); spl_ce_ArrayIterator->get_iterator = spl_array_get_iterator; REGISTER_SPL_SUB_CLASS_EX(RecursiveArrayIterator, ArrayIterator, spl_array_object_new, spl_funcs_RecursiveArrayIterator); REGISTER_SPL_IMPLEMENTS(RecursiveArrayIterator, RecursiveIterator); spl_ce_RecursiveArrayIterator->get_iterator = spl_array_get_iterator; REGISTER_SPL_CLASS_CONST_LONG(ArrayObject, "STD_PROP_LIST", SPL_ARRAY_STD_PROP_LIST); REGISTER_SPL_CLASS_CONST_LONG(ArrayObject, "ARRAY_AS_PROPS", SPL_ARRAY_ARRAY_AS_PROPS); REGISTER_SPL_CLASS_CONST_LONG(ArrayIterator, "STD_PROP_LIST", SPL_ARRAY_STD_PROP_LIST); REGISTER_SPL_CLASS_CONST_LONG(ArrayIterator, "ARRAY_AS_PROPS", SPL_ARRAY_ARRAY_AS_PROPS); REGISTER_SPL_CLASS_CONST_LONG(RecursiveArrayIterator, "CHILD_ARRAYS_ONLY", SPL_ARRAY_CHILD_ARRAYS_ONLY); return SUCCESS; } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: fdm=marker * vim: noet sw=4 ts=4 */
static zval **spl_array_get_dimension_ptr_ptr(int check_inherited, zval *object, zval *offset, int type TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); zval **retval; char *key; uint len; long index; HashTable *ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (!offset) { return &EG(uninitialized_zval_ptr); } if ((type == BP_VAR_W || type == BP_VAR_RW) && (ht->nApplyCount > 0)) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return &EG(error_zval_ptr);; } switch (Z_TYPE_P(offset)) { case IS_STRING: key = Z_STRVAL_P(offset); len = Z_STRLEN_P(offset) + 1; string_offest: if (zend_symtable_find(ht, key, len, (void **) &retval) == FAILURE) { switch (type) { case BP_VAR_R: zend_error(E_NOTICE, "Undefined index: %s", key); case BP_VAR_UNSET: case BP_VAR_IS: retval = &EG(uninitialized_zval_ptr); break; case BP_VAR_RW: zend_error(E_NOTICE,"Undefined index: %s", key); case BP_VAR_W: { zval *value; ALLOC_INIT_ZVAL(value); zend_symtable_update(ht, key, len, (void**)&value, sizeof(void*), (void **)&retval); } } } return retval; case IS_NULL: key = ""; len = 1; goto string_offest; case IS_RESOURCE: zend_error(E_STRICT, "Resource ID#%ld used as offset, casting to integer (%ld)", Z_LVAL_P(offset), Z_LVAL_P(offset)); case IS_DOUBLE: case IS_BOOL: case IS_LONG: if (offset->type == IS_DOUBLE) { index = (long)Z_DVAL_P(offset); } else { index = Z_LVAL_P(offset); } if (zend_hash_index_find(ht, index, (void **) &retval) == FAILURE) { switch (type) { case BP_VAR_R: zend_error(E_NOTICE, "Undefined offset: %ld", index); case BP_VAR_UNSET: case BP_VAR_IS: retval = &EG(uninitialized_zval_ptr); break; case BP_VAR_RW: zend_error(E_NOTICE, "Undefined offset: %ld", index); case BP_VAR_W: { zval *value; ALLOC_INIT_ZVAL(value); zend_hash_index_update(ht, index, (void**)&value, sizeof(void*), (void **)&retval); } } } return retval; default: zend_error(E_WARNING, "Illegal offset type"); return (type == BP_VAR_W || type == BP_VAR_RW) ? &EG(error_zval_ptr) : &EG(uninitialized_zval_ptr); } } /* }}} */
static zval **spl_array_get_dimension_ptr_ptr(int check_inherited, zval *object, zval *offset, int type TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); zval **retval; char *key; uint len; long index; HashTable *ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (!offset || !ht) { return &EG(uninitialized_zval_ptr); } if ((type == BP_VAR_W || type == BP_VAR_RW) && (ht->nApplyCount > 0)) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return &EG(error_zval_ptr);; } switch (Z_TYPE_P(offset)) { case IS_STRING: key = Z_STRVAL_P(offset); len = Z_STRLEN_P(offset) + 1; string_offest: if (zend_symtable_find(ht, key, len, (void **) &retval) == FAILURE) { switch (type) { case BP_VAR_R: zend_error(E_NOTICE, "Undefined index: %s", key); case BP_VAR_UNSET: case BP_VAR_IS: retval = &EG(uninitialized_zval_ptr); break; case BP_VAR_RW: zend_error(E_NOTICE,"Undefined index: %s", key); case BP_VAR_W: { zval *value; ALLOC_INIT_ZVAL(value); zend_symtable_update(ht, key, len, (void**)&value, sizeof(void*), (void **)&retval); } } } return retval; case IS_NULL: key = ""; len = 1; goto string_offest; case IS_RESOURCE: zend_error(E_STRICT, "Resource ID#%ld used as offset, casting to integer (%ld)", Z_LVAL_P(offset), Z_LVAL_P(offset)); case IS_DOUBLE: case IS_BOOL: case IS_LONG: if (offset->type == IS_DOUBLE) { index = (long)Z_DVAL_P(offset); } else { index = Z_LVAL_P(offset); } if (zend_hash_index_find(ht, index, (void **) &retval) == FAILURE) { switch (type) { case BP_VAR_R: zend_error(E_NOTICE, "Undefined offset: %ld", index); case BP_VAR_UNSET: case BP_VAR_IS: retval = &EG(uninitialized_zval_ptr); break; case BP_VAR_RW: zend_error(E_NOTICE, "Undefined offset: %ld", index); case BP_VAR_W: { zval *value; ALLOC_INIT_ZVAL(value); zend_hash_index_update(ht, index, (void**)&value, sizeof(void*), (void **)&retval); } } } return retval; default: zend_error(E_WARNING, "Illegal offset type"); return (type == BP_VAR_W || type == BP_VAR_RW) ? &EG(error_zval_ptr) : &EG(uninitialized_zval_ptr); } } /* }}} */
{'added': [(311, '\tif (!offset || !ht) {'), (629, '\t\t\tcase IS_STRING:'), (641, '\t\t\tcase IS_BOOL:'), (1813, '\t\tif (!php_var_unserialize(&intern->array, &p, s + buf_len, &var_hash TSRMLS_CC)'), (1814, '\t\t\t\t|| (Z_TYPE_P(intern->array) != IS_ARRAY && Z_TYPE_P(intern->array) != IS_OBJECT)) {'), (1815, '\t\t\tzval_ptr_dtor(&intern->array);')], 'deleted': [(311, '\tif (!offset) {'), (629, '\t\t\tcase IS_STRING:'), (641, '\t\t\tcase IS_BOOL:'), (1813, '\t\tif (!php_var_unserialize(&intern->array, &p, s + buf_len, &var_hash TSRMLS_CC)) {')]}
6
4
1,540
10,658
76
469
26
https://github.com/php/php-src
CVE-2016-7417
CWE-20
1,429
regexp.c
C
regtilde
/* vi:set ts=8 sts=4 sw=4 noet: * * Handling of regular expressions: vim_regcomp(), vim_regexec(), vim_regsub() */ // By default: do not create debugging logs or files related to regular // expressions, even when compiling with -DDEBUG. // Uncomment the second line to get the regexp debugging. #undef DEBUG // #define DEBUG #include "vim.h" #ifdef DEBUG // show/save debugging data when BT engine is used # define BT_REGEXP_DUMP // save the debugging data to a file instead of displaying it # define BT_REGEXP_LOG # define BT_REGEXP_DEBUG_LOG # define BT_REGEXP_DEBUG_LOG_NAME "bt_regexp_debug.log" #endif #ifdef FEAT_RELTIME static sig_atomic_t dummy_timeout_flag = 0; static volatile sig_atomic_t *timeout_flag = &dummy_timeout_flag; #endif /* * Magic characters have a special meaning, they don't match literally. * Magic characters are negative. This separates them from literal characters * (possibly multi-byte). Only ASCII characters can be Magic. */ #define Magic(x) ((int)(x) - 256) #define un_Magic(x) ((x) + 256) #define is_Magic(x) ((x) < 0) static int no_Magic(int x) { if (is_Magic(x)) return un_Magic(x); return x; } static int toggle_Magic(int x) { if (is_Magic(x)) return un_Magic(x); return Magic(x); } #ifdef FEAT_RELTIME void init_regexp_timeout(long msec) { timeout_flag = start_timeout(msec); } void disable_regexp_timeout(void) { stop_timeout(); timeout_flag = &dummy_timeout_flag; } #endif /* * The first byte of the BT regexp internal "program" is actually this magic * number; the start node begins in the second byte. It's used to catch the * most severe mutilation of the program by the caller. */ #define REGMAGIC 0234 /* * Utility definitions. */ #define UCHARAT(p) ((int)*(char_u *)(p)) // Used for an error (down from) vim_regcomp(): give the error message, set // rc_did_emsg and return NULL #define EMSG_RET_NULL(m) return (emsg((m)), rc_did_emsg = TRUE, (void *)NULL) #define IEMSG_RET_NULL(m) return (iemsg((m)), rc_did_emsg = TRUE, (void *)NULL) #define EMSG_RET_FAIL(m) return (emsg((m)), rc_did_emsg = TRUE, FAIL) #define EMSG2_RET_NULL(m, c) return (semsg((const char *)(m), (c) ? "" : "\\"), rc_did_emsg = TRUE, (void *)NULL) #define EMSG3_RET_NULL(m, c, a) return (semsg((const char *)(m), (c) ? "" : "\\", (a)), rc_did_emsg = TRUE, (void *)NULL) #define EMSG2_RET_FAIL(m, c) return (semsg((const char *)(m), (c) ? "" : "\\"), rc_did_emsg = TRUE, FAIL) #define EMSG_ONE_RET_NULL EMSG2_RET_NULL(_(e_invalid_item_in_str_brackets), reg_magic == MAGIC_ALL) #define MAX_LIMIT (32767L << 16L) #define NOT_MULTI 0 #define MULTI_ONE 1 #define MULTI_MULT 2 // return values for regmatch() #define RA_FAIL 1 // something failed, abort #define RA_CONT 2 // continue in inner loop #define RA_BREAK 3 // break inner loop #define RA_MATCH 4 // successful match #define RA_NOMATCH 5 // didn't match /* * Return NOT_MULTI if c is not a "multi" operator. * Return MULTI_ONE if c is a single "multi" operator. * Return MULTI_MULT if c is a multi "multi" operator. */ static int re_multi_type(int c) { if (c == Magic('@') || c == Magic('=') || c == Magic('?')) return MULTI_ONE; if (c == Magic('*') || c == Magic('+') || c == Magic('{')) return MULTI_MULT; return NOT_MULTI; } static char_u *reg_prev_sub = NULL; /* * REGEXP_INRANGE contains all characters which are always special in a [] * range after '\'. * REGEXP_ABBR contains all characters which act as abbreviations after '\'. * These are: * \n - New line (NL). * \r - Carriage Return (CR). * \t - Tab (TAB). * \e - Escape (ESC). * \b - Backspace (Ctrl_H). * \d - Character code in decimal, eg \d123 * \o - Character code in octal, eg \o80 * \x - Character code in hex, eg \x4a * \u - Multibyte character code, eg \u20ac * \U - Long multibyte character code, eg \U12345678 */ static char_u REGEXP_INRANGE[] = "]^-n\\"; static char_u REGEXP_ABBR[] = "nrtebdoxuU"; /* * Translate '\x' to its control character, except "\n", which is Magic. */ static int backslash_trans(int c) { switch (c) { case 'r': return CAR; case 't': return TAB; case 'e': return ESC; case 'b': return BS; } return c; } /* * Check for a character class name "[:name:]". "pp" points to the '['. * Returns one of the CLASS_ items. CLASS_NONE means that no item was * recognized. Otherwise "pp" is advanced to after the item. */ static int get_char_class(char_u **pp) { static const char *(class_names[]) = { "alnum:]", #define CLASS_ALNUM 0 "alpha:]", #define CLASS_ALPHA 1 "blank:]", #define CLASS_BLANK 2 "cntrl:]", #define CLASS_CNTRL 3 "digit:]", #define CLASS_DIGIT 4 "graph:]", #define CLASS_GRAPH 5 "lower:]", #define CLASS_LOWER 6 "print:]", #define CLASS_PRINT 7 "punct:]", #define CLASS_PUNCT 8 "space:]", #define CLASS_SPACE 9 "upper:]", #define CLASS_UPPER 10 "xdigit:]", #define CLASS_XDIGIT 11 "tab:]", #define CLASS_TAB 12 "return:]", #define CLASS_RETURN 13 "backspace:]", #define CLASS_BACKSPACE 14 "escape:]", #define CLASS_ESCAPE 15 "ident:]", #define CLASS_IDENT 16 "keyword:]", #define CLASS_KEYWORD 17 "fname:]", #define CLASS_FNAME 18 }; #define CLASS_NONE 99 int i; if ((*pp)[1] == ':') { for (i = 0; i < (int)ARRAY_LENGTH(class_names); ++i) if (STRNCMP(*pp + 2, class_names[i], STRLEN(class_names[i])) == 0) { *pp += STRLEN(class_names[i]) + 2; return i; } } return CLASS_NONE; } /* * Specific version of character class functions. * Using a table to keep this fast. */ static short class_tab[256]; #define RI_DIGIT 0x01 #define RI_HEX 0x02 #define RI_OCTAL 0x04 #define RI_WORD 0x08 #define RI_HEAD 0x10 #define RI_ALPHA 0x20 #define RI_LOWER 0x40 #define RI_UPPER 0x80 #define RI_WHITE 0x100 static void init_class_tab(void) { int i; static int done = FALSE; if (done) return; for (i = 0; i < 256; ++i) { if (i >= '0' && i <= '7') class_tab[i] = RI_DIGIT + RI_HEX + RI_OCTAL + RI_WORD; else if (i >= '8' && i <= '9') class_tab[i] = RI_DIGIT + RI_HEX + RI_WORD; else if (i >= 'a' && i <= 'f') class_tab[i] = RI_HEX + RI_WORD + RI_HEAD + RI_ALPHA + RI_LOWER; else if (i >= 'g' && i <= 'z') class_tab[i] = RI_WORD + RI_HEAD + RI_ALPHA + RI_LOWER; else if (i >= 'A' && i <= 'F') class_tab[i] = RI_HEX + RI_WORD + RI_HEAD + RI_ALPHA + RI_UPPER; else if (i >= 'G' && i <= 'Z') class_tab[i] = RI_WORD + RI_HEAD + RI_ALPHA + RI_UPPER; else if (i == '_') class_tab[i] = RI_WORD + RI_HEAD; else class_tab[i] = 0; } class_tab[' '] |= RI_WHITE; class_tab['\t'] |= RI_WHITE; done = TRUE; } #define ri_digit(c) ((c) < 0x100 && (class_tab[c] & RI_DIGIT)) #define ri_hex(c) ((c) < 0x100 && (class_tab[c] & RI_HEX)) #define ri_octal(c) ((c) < 0x100 && (class_tab[c] & RI_OCTAL)) #define ri_word(c) ((c) < 0x100 && (class_tab[c] & RI_WORD)) #define ri_head(c) ((c) < 0x100 && (class_tab[c] & RI_HEAD)) #define ri_alpha(c) ((c) < 0x100 && (class_tab[c] & RI_ALPHA)) #define ri_lower(c) ((c) < 0x100 && (class_tab[c] & RI_LOWER)) #define ri_upper(c) ((c) < 0x100 && (class_tab[c] & RI_UPPER)) #define ri_white(c) ((c) < 0x100 && (class_tab[c] & RI_WHITE)) // flags for regflags #define RF_ICASE 1 // ignore case #define RF_NOICASE 2 // don't ignore case #define RF_HASNL 4 // can match a NL #define RF_ICOMBINE 8 // ignore combining characters #define RF_LOOKBH 16 // uses "\@<=" or "\@<!" /* * Global work variables for vim_regcomp(). */ static char_u *regparse; // Input-scan pointer. static int regnpar; // () count. static int wants_nfa; // regex should use NFA engine #ifdef FEAT_SYN_HL static int regnzpar; // \z() count. static int re_has_z; // \z item detected #endif static unsigned regflags; // RF_ flags for prog #if defined(FEAT_SYN_HL) || defined(PROTO) static int had_eol; // TRUE when EOL found by vim_regcomp() #endif static magic_T reg_magic; // magicness of the pattern static int reg_string; // matching with a string instead of a buffer // line static int reg_strict; // "[abc" is illegal /* * META contains all characters that may be magic, except '^' and '$'. */ // META[] is used often enough to justify turning it into a table. static char_u META_flags[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // % & ( ) * + . 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, // 1 2 3 4 5 6 7 8 9 < = > ? 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, // @ A C D F H I K L M O 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, // P S U V W X Z [ _ 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, // a c d f h i k l m n o 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, // p s u v w x z { | ~ 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1 }; static int curchr; // currently parsed character // Previous character. Note: prevchr is sometimes -1 when we are not at the // start, eg in /[ ^I]^ the pattern was never found even if it existed, // because ^ was taken to be magic -- webb static int prevchr; static int prevprevchr; // previous-previous character static int nextchr; // used for ungetchr() // arguments for reg() #define REG_NOPAREN 0 // toplevel reg() #define REG_PAREN 1 // \(\) #define REG_ZPAREN 2 // \z(\) #define REG_NPAREN 3 // \%(\) typedef struct { char_u *regparse; int prevchr_len; int curchr; int prevchr; int prevprevchr; int nextchr; int at_start; int prev_at_start; int regnpar; } parse_state_T; static void initchr(char_u *); static int getchr(void); static void skipchr_keepstart(void); static int peekchr(void); static void skipchr(void); static void ungetchr(void); static long gethexchrs(int maxinputlen); static long getoctchrs(void); static long getdecchrs(void); static int coll_get_char(void); static int prog_magic_wrong(void); static int cstrncmp(char_u *s1, char_u *s2, int *n); static char_u *cstrchr(char_u *, int); static int re_mult_next(char *what); static int reg_iswordc(int); #ifdef FEAT_EVAL static void report_re_switch(char_u *pat); #endif static regengine_T bt_regengine; static regengine_T nfa_regengine; /* * Return TRUE if compiled regular expression "prog" can match a line break. */ int re_multiline(regprog_T *prog) { return (prog->regflags & RF_HASNL); } /* * Check for an equivalence class name "[=a=]". "pp" points to the '['. * Returns a character representing the class. Zero means that no item was * recognized. Otherwise "pp" is advanced to after the item. */ static int get_equi_class(char_u **pp) { int c; int l = 1; char_u *p = *pp; if (p[1] == '=' && p[2] != NUL) { if (has_mbyte) l = (*mb_ptr2len)(p + 2); if (p[l + 2] == '=' && p[l + 3] == ']') { if (has_mbyte) c = mb_ptr2char(p + 2); else c = p[2]; *pp += l + 4; return c; } } return 0; } /* * Check for a collating element "[.a.]". "pp" points to the '['. * Returns a character. Zero means that no item was recognized. Otherwise * "pp" is advanced to after the item. * Currently only single characters are recognized! */ static int get_coll_element(char_u **pp) { int c; int l = 1; char_u *p = *pp; if (p[0] != NUL && p[1] == '.' && p[2] != NUL) { if (has_mbyte) l = (*mb_ptr2len)(p + 2); if (p[l + 2] == '.' && p[l + 3] == ']') { if (has_mbyte) c = mb_ptr2char(p + 2); else c = p[2]; *pp += l + 4; return c; } } return 0; } static int reg_cpo_lit; // 'cpoptions' contains 'l' flag static int reg_cpo_bsl; // 'cpoptions' contains '\' flag static void get_cpo_flags(void) { reg_cpo_lit = vim_strchr(p_cpo, CPO_LITERAL) != NULL; reg_cpo_bsl = vim_strchr(p_cpo, CPO_BACKSL) != NULL; } /* * Skip over a "[]" range. * "p" must point to the character after the '['. * The returned pointer is on the matching ']', or the terminating NUL. */ static char_u * skip_anyof(char_u *p) { int l; if (*p == '^') // Complement of range. ++p; if (*p == ']' || *p == '-') ++p; while (*p != NUL && *p != ']') { if (has_mbyte && (l = (*mb_ptr2len)(p)) > 1) p += l; else if (*p == '-') { ++p; if (*p != ']' && *p != NUL) MB_PTR_ADV(p); } else if (*p == '\\' && !reg_cpo_bsl && (vim_strchr(REGEXP_INRANGE, p[1]) != NULL || (!reg_cpo_lit && vim_strchr(REGEXP_ABBR, p[1]) != NULL))) p += 2; else if (*p == '[') { if (get_char_class(&p) == CLASS_NONE && get_equi_class(&p) == 0 && get_coll_element(&p) == 0 && *p != NUL) ++p; // it is not a class name and not NUL } else ++p; } return p; } /* * Skip past regular expression. * Stop at end of "startp" or where "delim" is found ('/', '?', etc). * Take care of characters with a backslash in front of it. * Skip strings inside [ and ]. */ char_u * skip_regexp( char_u *startp, int delim, int magic) { return skip_regexp_ex(startp, delim, magic, NULL, NULL, NULL); } /* * Call skip_regexp() and when the delimiter does not match give an error and * return NULL. */ char_u * skip_regexp_err( char_u *startp, int delim, int magic) { char_u *p = skip_regexp(startp, delim, magic); if (*p != delim) { semsg(_(e_missing_delimiter_after_search_pattern_str), startp); return NULL; } return p; } /* * skip_regexp() with extra arguments: * When "newp" is not NULL and "dirc" is '?', make an allocated copy of the * expression and change "\?" to "?". If "*newp" is not NULL the expression * is changed in-place. * If a "\?" is changed to "?" then "dropped" is incremented, unless NULL. * If "magic_val" is not NULL, returns the effective magicness of the pattern */ char_u * skip_regexp_ex( char_u *startp, int dirc, int magic, char_u **newp, int *dropped, magic_T *magic_val) { magic_T mymagic; char_u *p = startp; if (magic) mymagic = MAGIC_ON; else mymagic = MAGIC_OFF; get_cpo_flags(); for (; p[0] != NUL; MB_PTR_ADV(p)) { if (p[0] == dirc) // found end of regexp break; if ((p[0] == '[' && mymagic >= MAGIC_ON) || (p[0] == '\\' && p[1] == '[' && mymagic <= MAGIC_OFF)) { p = skip_anyof(p + 1); if (p[0] == NUL) break; } else if (p[0] == '\\' && p[1] != NUL) { if (dirc == '?' && newp != NULL && p[1] == '?') { // change "\?" to "?", make a copy first. if (*newp == NULL) { *newp = vim_strsave(startp); if (*newp != NULL) p = *newp + (p - startp); } if (dropped != NULL) ++*dropped; if (*newp != NULL) STRMOVE(p, p + 1); else ++p; } else ++p; // skip next character if (*p == 'v') mymagic = MAGIC_ALL; else if (*p == 'V') mymagic = MAGIC_NONE; } } if (magic_val != NULL) *magic_val = mymagic; return p; } /* * Functions for getting characters from the regexp input. */ static int prevchr_len; // byte length of previous char static int at_start; // True when on the first character static int prev_at_start; // True when on the second character /* * Start parsing at "str". */ static void initchr(char_u *str) { regparse = str; prevchr_len = 0; curchr = prevprevchr = prevchr = nextchr = -1; at_start = TRUE; prev_at_start = FALSE; } /* * Save the current parse state, so that it can be restored and parsing * starts in the same state again. */ static void save_parse_state(parse_state_T *ps) { ps->regparse = regparse; ps->prevchr_len = prevchr_len; ps->curchr = curchr; ps->prevchr = prevchr; ps->prevprevchr = prevprevchr; ps->nextchr = nextchr; ps->at_start = at_start; ps->prev_at_start = prev_at_start; ps->regnpar = regnpar; } /* * Restore a previously saved parse state. */ static void restore_parse_state(parse_state_T *ps) { regparse = ps->regparse; prevchr_len = ps->prevchr_len; curchr = ps->curchr; prevchr = ps->prevchr; prevprevchr = ps->prevprevchr; nextchr = ps->nextchr; at_start = ps->at_start; prev_at_start = ps->prev_at_start; regnpar = ps->regnpar; } /* * Get the next character without advancing. */ static int peekchr(void) { static int after_slash = FALSE; if (curchr == -1) { switch (curchr = regparse[0]) { case '.': case '[': case '~': // magic when 'magic' is on if (reg_magic >= MAGIC_ON) curchr = Magic(curchr); break; case '(': case ')': case '{': case '%': case '+': case '=': case '?': case '@': case '!': case '&': case '|': case '<': case '>': case '#': // future ext. case '"': // future ext. case '\'': // future ext. case ',': // future ext. case '-': // future ext. case ':': // future ext. case ';': // future ext. case '`': // future ext. case '/': // Can't be used in / command // magic only after "\v" if (reg_magic == MAGIC_ALL) curchr = Magic(curchr); break; case '*': // * is not magic as the very first character, eg "?*ptr", when // after '^', eg "/^*ptr" and when after "\(", "\|", "\&". But // "\(\*" is not magic, thus must be magic if "after_slash" if (reg_magic >= MAGIC_ON && !at_start && !(prev_at_start && prevchr == Magic('^')) && (after_slash || (prevchr != Magic('(') && prevchr != Magic('&') && prevchr != Magic('|')))) curchr = Magic('*'); break; case '^': // '^' is only magic as the very first character and if it's after // "\(", "\|", "\&' or "\n" if (reg_magic >= MAGIC_OFF && (at_start || reg_magic == MAGIC_ALL || prevchr == Magic('(') || prevchr == Magic('|') || prevchr == Magic('&') || prevchr == Magic('n') || (no_Magic(prevchr) == '(' && prevprevchr == Magic('%')))) { curchr = Magic('^'); at_start = TRUE; prev_at_start = FALSE; } break; case '$': // '$' is only magic as the very last char and if it's in front of // either "\|", "\)", "\&", or "\n" if (reg_magic >= MAGIC_OFF) { char_u *p = regparse + 1; int is_magic_all = (reg_magic == MAGIC_ALL); // ignore \c \C \m \M \v \V and \Z after '$' while (p[0] == '\\' && (p[1] == 'c' || p[1] == 'C' || p[1] == 'm' || p[1] == 'M' || p[1] == 'v' || p[1] == 'V' || p[1] == 'Z')) { if (p[1] == 'v') is_magic_all = TRUE; else if (p[1] == 'm' || p[1] == 'M' || p[1] == 'V') is_magic_all = FALSE; p += 2; } if (p[0] == NUL || (p[0] == '\\' && (p[1] == '|' || p[1] == '&' || p[1] == ')' || p[1] == 'n')) || (is_magic_all && (p[0] == '|' || p[0] == '&' || p[0] == ')')) || reg_magic == MAGIC_ALL) curchr = Magic('$'); } break; case '\\': { int c = regparse[1]; if (c == NUL) curchr = '\\'; // trailing '\' else if (c <= '~' && META_flags[c]) { /* * META contains everything that may be magic sometimes, * except ^ and $ ("\^" and "\$" are only magic after * "\V"). We now fetch the next character and toggle its * magicness. Therefore, \ is so meta-magic that it is * not in META. */ curchr = -1; prev_at_start = at_start; at_start = FALSE; // be able to say "/\*ptr" ++regparse; ++after_slash; peekchr(); --regparse; --after_slash; curchr = toggle_Magic(curchr); } else if (vim_strchr(REGEXP_ABBR, c)) { /* * Handle abbreviations, like "\t" for TAB -- webb */ curchr = backslash_trans(c); } else if (reg_magic == MAGIC_NONE && (c == '$' || c == '^')) curchr = toggle_Magic(c); else { /* * Next character can never be (made) magic? * Then backslashing it won't do anything. */ if (has_mbyte) curchr = (*mb_ptr2char)(regparse + 1); else curchr = c; } break; } default: if (has_mbyte) curchr = (*mb_ptr2char)(regparse); } } return curchr; } /* * Eat one lexed character. Do this in a way that we can undo it. */ static void skipchr(void) { // peekchr() eats a backslash, do the same here if (*regparse == '\\') prevchr_len = 1; else prevchr_len = 0; if (regparse[prevchr_len] != NUL) { if (enc_utf8) // exclude composing chars that mb_ptr2len does include prevchr_len += utf_ptr2len(regparse + prevchr_len); else if (has_mbyte) prevchr_len += (*mb_ptr2len)(regparse + prevchr_len); else ++prevchr_len; } regparse += prevchr_len; prev_at_start = at_start; at_start = FALSE; prevprevchr = prevchr; prevchr = curchr; curchr = nextchr; // use previously unget char, or -1 nextchr = -1; } /* * Skip a character while keeping the value of prev_at_start for at_start. * prevchr and prevprevchr are also kept. */ static void skipchr_keepstart(void) { int as = prev_at_start; int pr = prevchr; int prpr = prevprevchr; skipchr(); at_start = as; prevchr = pr; prevprevchr = prpr; } /* * Get the next character from the pattern. We know about magic and such, so * therefore we need a lexical analyzer. */ static int getchr(void) { int chr = peekchr(); skipchr(); return chr; } /* * put character back. Works only once! */ static void ungetchr(void) { nextchr = curchr; curchr = prevchr; prevchr = prevprevchr; at_start = prev_at_start; prev_at_start = FALSE; // Backup regparse, so that it's at the same position as before the // getchr(). regparse -= prevchr_len; } /* * Get and return the value of the hex string at the current position. * Return -1 if there is no valid hex number. * The position is updated: * blahblah\%x20asdf * before-^ ^-after * The parameter controls the maximum number of input characters. This will be * 2 when reading a \%x20 sequence and 4 when reading a \%u20AC sequence. */ static long gethexchrs(int maxinputlen) { long_u nr = 0; int c; int i; for (i = 0; i < maxinputlen; ++i) { c = regparse[0]; if (!vim_isxdigit(c)) break; nr <<= 4; nr |= hex2nr(c); ++regparse; } if (i == 0) return -1; return (long)nr; } /* * Get and return the value of the decimal string immediately after the * current position. Return -1 for invalid. Consumes all digits. */ static long getdecchrs(void) { long_u nr = 0; int c; int i; for (i = 0; ; ++i) { c = regparse[0]; if (c < '0' || c > '9') break; nr *= 10; nr += c - '0'; ++regparse; curchr = -1; // no longer valid } if (i == 0) return -1; return (long)nr; } /* * get and return the value of the octal string immediately after the current * position. Return -1 for invalid, or 0-255 for valid. Smart enough to handle * numbers > 377 correctly (for example, 400 is treated as 40) and doesn't * treat 8 or 9 as recognised characters. Position is updated: * blahblah\%o210asdf * before-^ ^-after */ static long getoctchrs(void) { long_u nr = 0; int c; int i; for (i = 0; i < 3 && nr < 040; ++i) { c = regparse[0]; if (c < '0' || c > '7') break; nr <<= 3; nr |= hex2nr(c); ++regparse; } if (i == 0) return -1; return (long)nr; } /* * read_limits - Read two integers to be taken as a minimum and maximum. * If the first character is '-', then the range is reversed. * Should end with 'end'. If minval is missing, zero is default, if maxval is * missing, a very big number is the default. */ static int read_limits(long *minval, long *maxval) { int reverse = FALSE; char_u *first_char; long tmp; if (*regparse == '-') { // Starts with '-', so reverse the range later regparse++; reverse = TRUE; } first_char = regparse; *minval = getdigits(&regparse); if (*regparse == ',') // There is a comma { if (vim_isdigit(*++regparse)) *maxval = getdigits(&regparse); else *maxval = MAX_LIMIT; } else if (VIM_ISDIGIT(*first_char)) *maxval = *minval; // It was \{n} or \{-n} else *maxval = MAX_LIMIT; // It was \{} or \{-} if (*regparse == '\\') regparse++; // Allow either \{...} or \{...\} if (*regparse != '}') EMSG2_RET_FAIL(_(e_syntax_error_in_str_curlies), reg_magic == MAGIC_ALL); /* * Reverse the range if there was a '-', or make sure it is in the right * order otherwise. */ if ((!reverse && *minval > *maxval) || (reverse && *minval < *maxval)) { tmp = *minval; *minval = *maxval; *maxval = tmp; } skipchr(); // let's be friends with the lexer again return OK; } /* * vim_regexec and friends */ /* * Global work variables for vim_regexec(). */ static void cleanup_subexpr(void); #ifdef FEAT_SYN_HL static void cleanup_zsubexpr(void); #endif static void reg_nextline(void); static int match_with_backref(linenr_T start_lnum, colnr_T start_col, linenr_T end_lnum, colnr_T end_col, int *bytelen); /* * Sometimes need to save a copy of a line. Since alloc()/free() is very * slow, we keep one allocated piece of memory and only re-allocate it when * it's too small. It's freed in bt_regexec_both() when finished. */ static char_u *reg_tofree = NULL; static unsigned reg_tofreelen; /* * Structure used to store the execution state of the regex engine. * Which ones are set depends on whether a single-line or multi-line match is * done: * single-line multi-line * reg_match &regmatch_T NULL * reg_mmatch NULL &regmmatch_T * reg_startp reg_match->startp <invalid> * reg_endp reg_match->endp <invalid> * reg_startpos <invalid> reg_mmatch->startpos * reg_endpos <invalid> reg_mmatch->endpos * reg_win NULL window in which to search * reg_buf curbuf buffer in which to search * reg_firstlnum <invalid> first line in which to search * reg_maxline 0 last line nr * reg_line_lbr FALSE or TRUE FALSE */ typedef struct { regmatch_T *reg_match; regmmatch_T *reg_mmatch; char_u **reg_startp; char_u **reg_endp; lpos_T *reg_startpos; lpos_T *reg_endpos; win_T *reg_win; buf_T *reg_buf; linenr_T reg_firstlnum; linenr_T reg_maxline; int reg_line_lbr; // "\n" in string is line break // The current match-position is stord in these variables: linenr_T lnum; // line number, relative to first line char_u *line; // start of current line char_u *input; // current input, points into "line" int need_clear_subexpr; // subexpressions still need to be cleared #ifdef FEAT_SYN_HL int need_clear_zsubexpr; // extmatch subexpressions still need to be // cleared #endif // Internal copy of 'ignorecase'. It is set at each call to vim_regexec(). // Normally it gets the value of "rm_ic" or "rmm_ic", but when the pattern // contains '\c' or '\C' the value is overruled. int reg_ic; // Similar to "reg_ic", but only for 'combining' characters. Set with \Z // flag in the regexp. Defaults to false, always. int reg_icombine; // Copy of "rmm_maxcol": maximum column to search for a match. Zero when // there is no maximum. colnr_T reg_maxcol; // State for the NFA engine regexec. int nfa_has_zend; // NFA regexp \ze operator encountered. int nfa_has_backref; // NFA regexp \1 .. \9 encountered. int nfa_nsubexpr; // Number of sub expressions actually being used // during execution. 1 if only the whole match // (subexpr 0) is used. // listid is global, so that it increases on recursive calls to // nfa_regmatch(), which means we don't have to clear the lastlist field of // all the states. int nfa_listid; int nfa_alt_listid; #ifdef FEAT_SYN_HL int nfa_has_zsubexpr; // NFA regexp has \z( ), set zsubexpr. #endif } regexec_T; static regexec_T rex; static int rex_in_use = FALSE; /* * Return TRUE if character 'c' is included in 'iskeyword' option for * "reg_buf" buffer. */ static int reg_iswordc(int c) { return vim_iswordc_buf(c, rex.reg_buf); } /* * Get pointer to the line "lnum", which is relative to "reg_firstlnum". */ static char_u * reg_getline(linenr_T lnum) { // when looking behind for a match/no-match lnum is negative. But we // can't go before line 1 if (rex.reg_firstlnum + lnum < 1) return NULL; if (lnum > rex.reg_maxline) // Must have matched the "\n" in the last line. return (char_u *)""; return ml_get_buf(rex.reg_buf, rex.reg_firstlnum + lnum, FALSE); } #ifdef FEAT_SYN_HL static char_u *reg_startzp[NSUBEXP]; // Workspace to mark beginning static char_u *reg_endzp[NSUBEXP]; // and end of \z(...\) matches static lpos_T reg_startzpos[NSUBEXP]; // idem, beginning pos static lpos_T reg_endzpos[NSUBEXP]; // idem, end pos #endif // TRUE if using multi-line regexp. #define REG_MULTI (rex.reg_match == NULL) #ifdef FEAT_SYN_HL /* * Create a new extmatch and mark it as referenced once. */ static reg_extmatch_T * make_extmatch(void) { reg_extmatch_T *em; em = ALLOC_CLEAR_ONE(reg_extmatch_T); if (em != NULL) em->refcnt = 1; return em; } /* * Add a reference to an extmatch. */ reg_extmatch_T * ref_extmatch(reg_extmatch_T *em) { if (em != NULL) em->refcnt++; return em; } /* * Remove a reference to an extmatch. If there are no references left, free * the info. */ void unref_extmatch(reg_extmatch_T *em) { int i; if (em != NULL && --em->refcnt <= 0) { for (i = 0; i < NSUBEXP; ++i) vim_free(em->matches[i]); vim_free(em); } } #endif /* * Get class of previous character. */ static int reg_prev_class(void) { if (rex.input > rex.line) return mb_get_class_buf(rex.input - 1 - (*mb_head_off)(rex.line, rex.input - 1), rex.reg_buf); return -1; } /* * Return TRUE if the current rex.input position matches the Visual area. */ static int reg_match_visual(void) { pos_T top, bot; linenr_T lnum; colnr_T col; win_T *wp = rex.reg_win == NULL ? curwin : rex.reg_win; int mode; colnr_T start, end; colnr_T start2, end2; colnr_T cols; colnr_T curswant; // Check if the buffer is the current buffer and not using a string. if (rex.reg_buf != curbuf || VIsual.lnum == 0 || !REG_MULTI) return FALSE; if (VIsual_active) { if (LT_POS(VIsual, wp->w_cursor)) { top = VIsual; bot = wp->w_cursor; } else { top = wp->w_cursor; bot = VIsual; } mode = VIsual_mode; curswant = wp->w_curswant; } else { if (LT_POS(curbuf->b_visual.vi_start, curbuf->b_visual.vi_end)) { top = curbuf->b_visual.vi_start; bot = curbuf->b_visual.vi_end; } else { top = curbuf->b_visual.vi_end; bot = curbuf->b_visual.vi_start; } mode = curbuf->b_visual.vi_mode; curswant = curbuf->b_visual.vi_curswant; } lnum = rex.lnum + rex.reg_firstlnum; if (lnum < top.lnum || lnum > bot.lnum) return FALSE; col = (colnr_T)(rex.input - rex.line); if (mode == 'v') { if ((lnum == top.lnum && col < top.col) || (lnum == bot.lnum && col >= bot.col + (*p_sel != 'e'))) return FALSE; } else if (mode == Ctrl_V) { getvvcol(wp, &top, &start, NULL, &end); getvvcol(wp, &bot, &start2, NULL, &end2); if (start2 < start) start = start2; if (end2 > end) end = end2; if (top.col == MAXCOL || bot.col == MAXCOL || curswant == MAXCOL) end = MAXCOL; // getvvcol() flushes rex.line, need to get it again rex.line = reg_getline(rex.lnum); rex.input = rex.line + col; cols = win_linetabsize(wp, rex.line, col); if (cols < start || cols > end - (*p_sel == 'e')) return FALSE; } return TRUE; } /* * Check the regexp program for its magic number. * Return TRUE if it's wrong. */ static int prog_magic_wrong(void) { regprog_T *prog; prog = REG_MULTI ? rex.reg_mmatch->regprog : rex.reg_match->regprog; if (prog->engine == &nfa_regengine) // For NFA matcher we don't check the magic return FALSE; if (UCHARAT(((bt_regprog_T *)prog)->program) != REGMAGIC) { emsg(_(e_corrupted_regexp_program)); return TRUE; } return FALSE; } /* * Cleanup the subexpressions, if this wasn't done yet. * This construction is used to clear the subexpressions only when they are * used (to increase speed). */ static void cleanup_subexpr(void) { if (rex.need_clear_subexpr) { if (REG_MULTI) { // Use 0xff to set lnum to -1 vim_memset(rex.reg_startpos, 0xff, sizeof(lpos_T) * NSUBEXP); vim_memset(rex.reg_endpos, 0xff, sizeof(lpos_T) * NSUBEXP); } else { vim_memset(rex.reg_startp, 0, sizeof(char_u *) * NSUBEXP); vim_memset(rex.reg_endp, 0, sizeof(char_u *) * NSUBEXP); } rex.need_clear_subexpr = FALSE; } } #ifdef FEAT_SYN_HL static void cleanup_zsubexpr(void) { if (rex.need_clear_zsubexpr) { if (REG_MULTI) { // Use 0xff to set lnum to -1 vim_memset(reg_startzpos, 0xff, sizeof(lpos_T) * NSUBEXP); vim_memset(reg_endzpos, 0xff, sizeof(lpos_T) * NSUBEXP); } else { vim_memset(reg_startzp, 0, sizeof(char_u *) * NSUBEXP); vim_memset(reg_endzp, 0, sizeof(char_u *) * NSUBEXP); } rex.need_clear_zsubexpr = FALSE; } } #endif /* * Advance rex.lnum, rex.line and rex.input to the next line. */ static void reg_nextline(void) { rex.line = reg_getline(++rex.lnum); rex.input = rex.line; fast_breakcheck(); } /* * Check whether a backreference matches. * Returns RA_FAIL, RA_NOMATCH or RA_MATCH. * If "bytelen" is not NULL, it is set to the byte length of the match in the * last line. */ static int match_with_backref( linenr_T start_lnum, colnr_T start_col, linenr_T end_lnum, colnr_T end_col, int *bytelen) { linenr_T clnum = start_lnum; colnr_T ccol = start_col; int len; char_u *p; if (bytelen != NULL) *bytelen = 0; for (;;) { // Since getting one line may invalidate the other, need to make copy. // Slow! if (rex.line != reg_tofree) { len = (int)STRLEN(rex.line); if (reg_tofree == NULL || len >= (int)reg_tofreelen) { len += 50; // get some extra vim_free(reg_tofree); reg_tofree = alloc(len); if (reg_tofree == NULL) return RA_FAIL; // out of memory! reg_tofreelen = len; } STRCPY(reg_tofree, rex.line); rex.input = reg_tofree + (rex.input - rex.line); rex.line = reg_tofree; } // Get the line to compare with. p = reg_getline(clnum); if (clnum == end_lnum) len = end_col - ccol; else len = (int)STRLEN(p + ccol); if (cstrncmp(p + ccol, rex.input, &len) != 0) return RA_NOMATCH; // doesn't match if (bytelen != NULL) *bytelen += len; if (clnum == end_lnum) break; // match and at end! if (rex.lnum >= rex.reg_maxline) return RA_NOMATCH; // text too short // Advance to next line. reg_nextline(); if (bytelen != NULL) *bytelen = 0; ++clnum; ccol = 0; if (got_int) return RA_FAIL; } // found a match! Note that rex.line may now point to a copy of the line, // that should not matter. return RA_MATCH; } /* * Used in a place where no * or \+ can follow. */ static int re_mult_next(char *what) { if (re_multi_type(peekchr()) == MULTI_MULT) { semsg(_(e_nfa_regexp_cannot_repeat_str), what); rc_did_emsg = TRUE; return FAIL; } return OK; } typedef struct { int a, b, c; } decomp_T; // 0xfb20 - 0xfb4f static decomp_T decomp_table[0xfb4f-0xfb20+1] = { {0x5e2,0,0}, // 0xfb20 alt ayin {0x5d0,0,0}, // 0xfb21 alt alef {0x5d3,0,0}, // 0xfb22 alt dalet {0x5d4,0,0}, // 0xfb23 alt he {0x5db,0,0}, // 0xfb24 alt kaf {0x5dc,0,0}, // 0xfb25 alt lamed {0x5dd,0,0}, // 0xfb26 alt mem-sofit {0x5e8,0,0}, // 0xfb27 alt resh {0x5ea,0,0}, // 0xfb28 alt tav {'+', 0, 0}, // 0xfb29 alt plus {0x5e9, 0x5c1, 0}, // 0xfb2a shin+shin-dot {0x5e9, 0x5c2, 0}, // 0xfb2b shin+sin-dot {0x5e9, 0x5c1, 0x5bc}, // 0xfb2c shin+shin-dot+dagesh {0x5e9, 0x5c2, 0x5bc}, // 0xfb2d shin+sin-dot+dagesh {0x5d0, 0x5b7, 0}, // 0xfb2e alef+patah {0x5d0, 0x5b8, 0}, // 0xfb2f alef+qamats {0x5d0, 0x5b4, 0}, // 0xfb30 alef+hiriq {0x5d1, 0x5bc, 0}, // 0xfb31 bet+dagesh {0x5d2, 0x5bc, 0}, // 0xfb32 gimel+dagesh {0x5d3, 0x5bc, 0}, // 0xfb33 dalet+dagesh {0x5d4, 0x5bc, 0}, // 0xfb34 he+dagesh {0x5d5, 0x5bc, 0}, // 0xfb35 vav+dagesh {0x5d6, 0x5bc, 0}, // 0xfb36 zayin+dagesh {0xfb37, 0, 0}, // 0xfb37 -- UNUSED {0x5d8, 0x5bc, 0}, // 0xfb38 tet+dagesh {0x5d9, 0x5bc, 0}, // 0xfb39 yud+dagesh {0x5da, 0x5bc, 0}, // 0xfb3a kaf sofit+dagesh {0x5db, 0x5bc, 0}, // 0xfb3b kaf+dagesh {0x5dc, 0x5bc, 0}, // 0xfb3c lamed+dagesh {0xfb3d, 0, 0}, // 0xfb3d -- UNUSED {0x5de, 0x5bc, 0}, // 0xfb3e mem+dagesh {0xfb3f, 0, 0}, // 0xfb3f -- UNUSED {0x5e0, 0x5bc, 0}, // 0xfb40 nun+dagesh {0x5e1, 0x5bc, 0}, // 0xfb41 samech+dagesh {0xfb42, 0, 0}, // 0xfb42 -- UNUSED {0x5e3, 0x5bc, 0}, // 0xfb43 pe sofit+dagesh {0x5e4, 0x5bc,0}, // 0xfb44 pe+dagesh {0xfb45, 0, 0}, // 0xfb45 -- UNUSED {0x5e6, 0x5bc, 0}, // 0xfb46 tsadi+dagesh {0x5e7, 0x5bc, 0}, // 0xfb47 qof+dagesh {0x5e8, 0x5bc, 0}, // 0xfb48 resh+dagesh {0x5e9, 0x5bc, 0}, // 0xfb49 shin+dagesh {0x5ea, 0x5bc, 0}, // 0xfb4a tav+dagesh {0x5d5, 0x5b9, 0}, // 0xfb4b vav+holam {0x5d1, 0x5bf, 0}, // 0xfb4c bet+rafe {0x5db, 0x5bf, 0}, // 0xfb4d kaf+rafe {0x5e4, 0x5bf, 0}, // 0xfb4e pe+rafe {0x5d0, 0x5dc, 0} // 0xfb4f alef-lamed }; static void mb_decompose(int c, int *c1, int *c2, int *c3) { decomp_T d; if (c >= 0xfb20 && c <= 0xfb4f) { d = decomp_table[c - 0xfb20]; *c1 = d.a; *c2 = d.b; *c3 = d.c; } else { *c1 = c; *c2 = *c3 = 0; } } /* * Compare two strings, ignore case if rex.reg_ic set. * Return 0 if strings match, non-zero otherwise. * Correct the length "*n" when composing characters are ignored. */ static int cstrncmp(char_u *s1, char_u *s2, int *n) { int result; if (!rex.reg_ic) result = STRNCMP(s1, s2, *n); else result = MB_STRNICMP(s1, s2, *n); // if it failed and it's utf8 and we want to combineignore: if (result != 0 && enc_utf8 && rex.reg_icombine) { char_u *str1, *str2; int c1, c2, c11, c12; int junk; // we have to handle the strcmp ourselves, since it is necessary to // deal with the composing characters by ignoring them: str1 = s1; str2 = s2; c1 = c2 = 0; while ((int)(str1 - s1) < *n) { c1 = mb_ptr2char_adv(&str1); c2 = mb_ptr2char_adv(&str2); // Decompose the character if necessary, into 'base' characters. // Currently hard-coded for Hebrew, Arabic to be done... if (c1 != c2 && (!rex.reg_ic || utf_fold(c1) != utf_fold(c2))) { // decomposition necessary? mb_decompose(c1, &c11, &junk, &junk); mb_decompose(c2, &c12, &junk, &junk); c1 = c11; c2 = c12; if (c11 != c12 && (!rex.reg_ic || utf_fold(c11) != utf_fold(c12))) break; } } result = c2 - c1; if (result == 0) *n = (int)(str2 - s2); } return result; } /* * cstrchr: This function is used a lot for simple searches, keep it fast! */ static char_u * cstrchr(char_u *s, int c) { char_u *p; int cc; if (!rex.reg_ic || (!enc_utf8 && mb_char2len(c) > 1)) return vim_strchr(s, c); // tolower() and toupper() can be slow, comparing twice should be a lot // faster (esp. when using MS Visual C++!). // For UTF-8 need to use folded case. if (enc_utf8 && c > 0x80) cc = utf_fold(c); else if (MB_ISUPPER(c)) cc = MB_TOLOWER(c); else if (MB_ISLOWER(c)) cc = MB_TOUPPER(c); else return vim_strchr(s, c); if (has_mbyte) { for (p = s; *p != NUL; p += (*mb_ptr2len)(p)) { if (enc_utf8 && c > 0x80) { if (utf_fold(utf_ptr2char(p)) == cc) return p; } else if (*p == c || *p == cc) return p; } } else // Faster version for when there are no multi-byte characters. for (p = s; *p != NUL; ++p) if (*p == c || *p == cc) return p; return NULL; } //////////////////////////////////////////////////////////////// // regsub stuff // //////////////////////////////////////////////////////////////// /* * We should define ftpr as a pointer to a function returning a pointer to * a function returning a pointer to a function ... * This is impossible, so we declare a pointer to a function returning a * void pointer. This should work for all compilers. */ typedef void (*(*fptr_T)(int *, int)); static int vim_regsub_both(char_u *source, typval_T *expr, char_u *dest, int destlen, int flags); static fptr_T do_upper(int *d, int c) { *d = MB_TOUPPER(c); return (fptr_T)NULL; } static fptr_T do_Upper(int *d, int c) { *d = MB_TOUPPER(c); return (fptr_T)do_Upper; } static fptr_T do_lower(int *d, int c) { *d = MB_TOLOWER(c); return (fptr_T)NULL; } static fptr_T do_Lower(int *d, int c) { *d = MB_TOLOWER(c); return (fptr_T)do_Lower; } /* * regtilde(): Replace tildes in the pattern by the old pattern. * * Short explanation of the tilde: It stands for the previous replacement * pattern. If that previous pattern also contains a ~ we should go back a * step further... But we insert the previous pattern into the current one * and remember that. * This still does not handle the case where "magic" changes. So require the * user to keep his hands off of "magic". * * The tildes are parsed once before the first call to vim_regsub(). */ char_u * regtilde(char_u *source, int magic) { char_u *newsub = source; char_u *tmpsub; char_u *p; int len; int prevlen; for (p = newsub; *p; ++p) { if ((*p == '~' && magic) || (*p == '\\' && *(p + 1) == '~' && !magic)) { if (reg_prev_sub != NULL) { // length = len(newsub) - 1 + len(prev_sub) + 1 prevlen = (int)STRLEN(reg_prev_sub); tmpsub = alloc(STRLEN(newsub) + prevlen); if (tmpsub != NULL) { // copy prefix len = (int)(p - newsub); // not including ~ mch_memmove(tmpsub, newsub, (size_t)len); // interpret tilde mch_memmove(tmpsub + len, reg_prev_sub, (size_t)prevlen); // copy postfix if (!magic) ++p; // back off backslash STRCPY(tmpsub + len + prevlen, p + 1); if (newsub != source) // already allocated newsub vim_free(newsub); newsub = tmpsub; p = newsub + len + prevlen; } } else if (magic) STRMOVE(p, p + 1); // remove '~' else STRMOVE(p, p + 2); // remove '\~' --p; } else { if (*p == '\\' && p[1]) // skip escaped characters ++p; if (has_mbyte) p += (*mb_ptr2len)(p) - 1; } } vim_free(reg_prev_sub); if (newsub != source) // newsub was allocated, just keep it reg_prev_sub = newsub; else // no ~ found, need to save newsub reg_prev_sub = vim_strsave(newsub); return newsub; } #ifdef FEAT_EVAL static int can_f_submatch = FALSE; // TRUE when submatch() can be used // These pointers are used for reg_submatch(). Needed for when the // substitution string is an expression that contains a call to substitute() // and submatch(). typedef struct { regmatch_T *sm_match; regmmatch_T *sm_mmatch; linenr_T sm_firstlnum; linenr_T sm_maxline; int sm_line_lbr; } regsubmatch_T; static regsubmatch_T rsm; // can only be used when can_f_submatch is TRUE #endif #ifdef FEAT_EVAL /* * Put the submatches in "argv[argskip]" which is a list passed into * call_func() by vim_regsub_both(). */ static int fill_submatch_list(int argc UNUSED, typval_T *argv, int argskip, int argcount) { listitem_T *li; int i; char_u *s; typval_T *listarg = argv + argskip; if (argcount == argskip) // called function doesn't take a submatches argument return argskip; // Relies on sl_list to be the first item in staticList10_T. init_static_list((staticList10_T *)(listarg->vval.v_list)); // There are always 10 list items in staticList10_T. li = listarg->vval.v_list->lv_first; for (i = 0; i < 10; ++i) { s = rsm.sm_match->startp[i]; if (s == NULL || rsm.sm_match->endp[i] == NULL) s = NULL; else s = vim_strnsave(s, rsm.sm_match->endp[i] - s); li->li_tv.v_type = VAR_STRING; li->li_tv.vval.v_string = s; li = li->li_next; } return argskip + 1; } static void clear_submatch_list(staticList10_T *sl) { int i; for (i = 0; i < 10; ++i) vim_free(sl->sl_items[i].li_tv.vval.v_string); } #endif /* * vim_regsub() - perform substitutions after a vim_regexec() or * vim_regexec_multi() match. * * If "flags" has REGSUB_COPY really copy into "dest[destlen]". * Oterwise nothing is copied, only compue the length of the result. * * If "flags" has REGSUB_MAGIC then behave like 'magic' is set. * * If "flags" has REGSUB_BACKSLASH a backslash will be removed later, need to * double them to keep them, and insert a backslash before a CR to avoid it * being replaced with a line break later. * * Note: The matched text must not change between the call of * vim_regexec()/vim_regexec_multi() and vim_regsub()! It would make the back * references invalid! * * Returns the size of the replacement, including terminating NUL. */ int vim_regsub( regmatch_T *rmp, char_u *source, typval_T *expr, char_u *dest, int destlen, int flags) { int result; regexec_T rex_save; int rex_in_use_save = rex_in_use; if (rex_in_use) // Being called recursively, save the state. rex_save = rex; rex_in_use = TRUE; rex.reg_match = rmp; rex.reg_mmatch = NULL; rex.reg_maxline = 0; rex.reg_buf = curbuf; rex.reg_line_lbr = TRUE; result = vim_regsub_both(source, expr, dest, destlen, flags); rex_in_use = rex_in_use_save; if (rex_in_use) rex = rex_save; return result; } int vim_regsub_multi( regmmatch_T *rmp, linenr_T lnum, char_u *source, char_u *dest, int destlen, int flags) { int result; regexec_T rex_save; int rex_in_use_save = rex_in_use; if (rex_in_use) // Being called recursively, save the state. rex_save = rex; rex_in_use = TRUE; rex.reg_match = NULL; rex.reg_mmatch = rmp; rex.reg_buf = curbuf; // always works on the current buffer! rex.reg_firstlnum = lnum; rex.reg_maxline = curbuf->b_ml.ml_line_count - lnum; rex.reg_line_lbr = FALSE; result = vim_regsub_both(source, NULL, dest, destlen, flags); rex_in_use = rex_in_use_save; if (rex_in_use) rex = rex_save; return result; } #if defined(FEAT_EVAL) || defined(PROTO) // When nesting more than a couple levels it's probably a mistake. # define MAX_REGSUB_NESTING 4 static char_u *eval_result[MAX_REGSUB_NESTING] = {NULL, NULL, NULL, NULL}; # if defined(EXITFREE) || defined(PROTO) void free_resub_eval_result(void) { int i; for (i = 0; i < MAX_REGSUB_NESTING; ++i) VIM_CLEAR(eval_result[i]); } # endif #endif static int vim_regsub_both( char_u *source, typval_T *expr, char_u *dest, int destlen, int flags) { char_u *src; char_u *dst; char_u *s; int c; int cc; int no = -1; fptr_T func_all = (fptr_T)NULL; fptr_T func_one = (fptr_T)NULL; linenr_T clnum = 0; // init for GCC int len = 0; // init for GCC #ifdef FEAT_EVAL static int nesting = 0; int nested; #endif int copy = flags & REGSUB_COPY; // Be paranoid... if ((source == NULL && expr == NULL) || dest == NULL) { emsg(_(e_null_argument)); return 0; } if (prog_magic_wrong()) return 0; #ifdef FEAT_EVAL if (nesting == MAX_REGSUB_NESTING) { emsg(_(e_substitute_nesting_too_deep)); return 0; } nested = nesting; #endif src = source; dst = dest; /* * When the substitute part starts with "\=" evaluate it as an expression. */ if (expr != NULL || (source[0] == '\\' && source[1] == '=')) { #ifdef FEAT_EVAL // To make sure that the length doesn't change between checking the // length and copying the string, and to speed up things, the // resulting string is saved from the call with // "flags & REGSUB_COPY" == 0 to the call with // "flags & REGSUB_COPY" != 0. if (copy) { if (eval_result[nested] != NULL) { STRCPY(dest, eval_result[nested]); dst += STRLEN(eval_result[nested]); VIM_CLEAR(eval_result[nested]); } } else { int prev_can_f_submatch = can_f_submatch; regsubmatch_T rsm_save; VIM_CLEAR(eval_result[nested]); // The expression may contain substitute(), which calls us // recursively. Make sure submatch() gets the text from the first // level. if (can_f_submatch) rsm_save = rsm; can_f_submatch = TRUE; rsm.sm_match = rex.reg_match; rsm.sm_mmatch = rex.reg_mmatch; rsm.sm_firstlnum = rex.reg_firstlnum; rsm.sm_maxline = rex.reg_maxline; rsm.sm_line_lbr = rex.reg_line_lbr; // Although unlikely, it is possible that the expression invokes a // substitute command (it might fail, but still). Therefore keep // an array of eval results. ++nesting; if (expr != NULL) { typval_T argv[2]; char_u buf[NUMBUFLEN]; typval_T rettv; staticList10_T matchList; funcexe_T funcexe; rettv.v_type = VAR_STRING; rettv.vval.v_string = NULL; argv[0].v_type = VAR_LIST; argv[0].vval.v_list = &matchList.sl_list; matchList.sl_list.lv_len = 0; CLEAR_FIELD(funcexe); funcexe.fe_argv_func = fill_submatch_list; funcexe.fe_evaluate = TRUE; if (expr->v_type == VAR_FUNC) { s = expr->vval.v_string; call_func(s, -1, &rettv, 1, argv, &funcexe); } else if (expr->v_type == VAR_PARTIAL) { partial_T *partial = expr->vval.v_partial; s = partial_name(partial); funcexe.fe_partial = partial; call_func(s, -1, &rettv, 1, argv, &funcexe); } else if (expr->v_type == VAR_INSTR) { exe_typval_instr(expr, &rettv); } if (matchList.sl_list.lv_len > 0) // fill_submatch_list() was called clear_submatch_list(&matchList); if (rettv.v_type == VAR_UNKNOWN) // something failed, no need to report another error eval_result[nested] = NULL; else { eval_result[nested] = tv_get_string_buf_chk(&rettv, buf); if (eval_result[nested] != NULL) eval_result[nested] = vim_strsave(eval_result[nested]); } clear_tv(&rettv); } else if (substitute_instr != NULL) // Execute instructions from ISN_SUBSTITUTE. eval_result[nested] = exe_substitute_instr(); else eval_result[nested] = eval_to_string(source + 2, TRUE); --nesting; if (eval_result[nested] != NULL) { int had_backslash = FALSE; for (s = eval_result[nested]; *s != NUL; MB_PTR_ADV(s)) { // Change NL to CR, so that it becomes a line break, // unless called from vim_regexec_nl(). // Skip over a backslashed character. if (*s == NL && !rsm.sm_line_lbr) *s = CAR; else if (*s == '\\' && s[1] != NUL) { ++s; /* Change NL to CR here too, so that this works: * :s/abc\\\ndef/\="aaa\\\nbbb"/ on text: * abc\ * def * Not when called from vim_regexec_nl(). */ if (*s == NL && !rsm.sm_line_lbr) *s = CAR; had_backslash = TRUE; } } if (had_backslash && (flags & REGSUB_BACKSLASH)) { // Backslashes will be consumed, need to double them. s = vim_strsave_escaped(eval_result[nested], (char_u *)"\\"); if (s != NULL) { vim_free(eval_result[nested]); eval_result[nested] = s; } } dst += STRLEN(eval_result[nested]); } can_f_submatch = prev_can_f_submatch; if (can_f_submatch) rsm = rsm_save; } #endif } else while ((c = *src++) != NUL) { if (c == '&' && (flags & REGSUB_MAGIC)) no = 0; else if (c == '\\' && *src != NUL) { if (*src == '&' && !(flags & REGSUB_MAGIC)) { ++src; no = 0; } else if ('0' <= *src && *src <= '9') { no = *src++ - '0'; } else if (vim_strchr((char_u *)"uUlLeE", *src)) { switch (*src++) { case 'u': func_one = (fptr_T)do_upper; continue; case 'U': func_all = (fptr_T)do_Upper; continue; case 'l': func_one = (fptr_T)do_lower; continue; case 'L': func_all = (fptr_T)do_Lower; continue; case 'e': case 'E': func_one = func_all = (fptr_T)NULL; continue; } } } if (no < 0) // Ordinary character. { if (c == K_SPECIAL && src[0] != NUL && src[1] != NUL) { // Copy a special key as-is. if (copy) { if (dst + 3 > dest + destlen) { iemsg("vim_regsub_both(): not enough space"); return 0; } *dst++ = c; *dst++ = *src++; *dst++ = *src++; } else { dst += 3; src += 2; } continue; } if (c == '\\' && *src != NUL) { // Check for abbreviations -- webb switch (*src) { case 'r': c = CAR; ++src; break; case 'n': c = NL; ++src; break; case 't': c = TAB; ++src; break; // Oh no! \e already has meaning in subst pat :-( // case 'e': c = ESC; ++src; break; case 'b': c = Ctrl_H; ++src; break; // If "backslash" is TRUE the backslash will be removed // later. Used to insert a literal CR. default: if (flags & REGSUB_BACKSLASH) { if (copy) { if (dst + 1 > dest + destlen) { iemsg("vim_regsub_both(): not enough space"); return 0; } *dst = '\\'; } ++dst; } c = *src++; } } else if (has_mbyte) c = mb_ptr2char(src - 1); // Write to buffer, if copy is set. if (func_one != (fptr_T)NULL) // Turbo C complains without the typecast func_one = (fptr_T)(func_one(&cc, c)); else if (func_all != (fptr_T)NULL) // Turbo C complains without the typecast func_all = (fptr_T)(func_all(&cc, c)); else // just copy cc = c; if (has_mbyte) { int totlen = mb_ptr2len(src - 1); int charlen = mb_char2len(cc); if (copy) { if (dst + charlen > dest + destlen) { iemsg("vim_regsub_both(): not enough space"); return 0; } mb_char2bytes(cc, dst); } dst += charlen - 1; if (enc_utf8) { int clen = utf_ptr2len(src - 1); // If the character length is shorter than "totlen", there // are composing characters; copy them as-is. if (clen < totlen) { if (copy) { if (dst + totlen - clen > dest + destlen) { iemsg("vim_regsub_both(): not enough space"); return 0; } mch_memmove(dst + 1, src - 1 + clen, (size_t)(totlen - clen)); } dst += totlen - clen; } } src += totlen - 1; } else if (copy) { if (dst + 1 > dest + destlen) { iemsg("vim_regsub_both(): not enough space"); return 0; } *dst = cc; } dst++; } else { if (REG_MULTI) { clnum = rex.reg_mmatch->startpos[no].lnum; if (clnum < 0 || rex.reg_mmatch->endpos[no].lnum < 0) s = NULL; else { s = reg_getline(clnum) + rex.reg_mmatch->startpos[no].col; if (rex.reg_mmatch->endpos[no].lnum == clnum) len = rex.reg_mmatch->endpos[no].col - rex.reg_mmatch->startpos[no].col; else len = (int)STRLEN(s); } } else { s = rex.reg_match->startp[no]; if (rex.reg_match->endp[no] == NULL) s = NULL; else len = (int)(rex.reg_match->endp[no] - s); } if (s != NULL) { for (;;) { if (len == 0) { if (REG_MULTI) { if (rex.reg_mmatch->endpos[no].lnum == clnum) break; if (copy) { if (dst + 1 > dest + destlen) { iemsg("vim_regsub_both(): not enough space"); return 0; } *dst = CAR; } ++dst; s = reg_getline(++clnum); if (rex.reg_mmatch->endpos[no].lnum == clnum) len = rex.reg_mmatch->endpos[no].col; else len = (int)STRLEN(s); } else break; } else if (*s == NUL) // we hit NUL. { if (copy) iemsg(_(e_damaged_match_string)); goto exit; } else { if ((flags & REGSUB_BACKSLASH) && (*s == CAR || *s == '\\')) { /* * Insert a backslash in front of a CR, otherwise * it will be replaced by a line break. * Number of backslashes will be halved later, * double them here. */ if (copy) { if (dst + 2 > dest + destlen) { iemsg("vim_regsub_both(): not enough space"); return 0; } dst[0] = '\\'; dst[1] = *s; } dst += 2; } else { if (has_mbyte) c = mb_ptr2char(s); else c = *s; if (func_one != (fptr_T)NULL) // Turbo C complains without the typecast func_one = (fptr_T)(func_one(&cc, c)); else if (func_all != (fptr_T)NULL) // Turbo C complains without the typecast func_all = (fptr_T)(func_all(&cc, c)); else // just copy cc = c; if (has_mbyte) { int l; int charlen; // Copy composing characters separately, one // at a time. if (enc_utf8) l = utf_ptr2len(s) - 1; else l = mb_ptr2len(s) - 1; s += l; len -= l; charlen = mb_char2len(cc); if (copy) { if (dst + charlen > dest + destlen) { iemsg("vim_regsub_both(): not enough space"); return 0; } mb_char2bytes(cc, dst); } dst += charlen - 1; } else if (copy) { if (dst + 1 > dest + destlen) { iemsg("vim_regsub_both(): not enough space"); return 0; } *dst = cc; } dst++; } ++s; --len; } } } no = -1; } } if (copy) *dst = NUL; exit: return (int)((dst - dest) + 1); } #ifdef FEAT_EVAL /* * Call reg_getline() with the line numbers from the submatch. If a * substitute() was used the reg_maxline and other values have been * overwritten. */ static char_u * reg_getline_submatch(linenr_T lnum) { char_u *s; linenr_T save_first = rex.reg_firstlnum; linenr_T save_max = rex.reg_maxline; rex.reg_firstlnum = rsm.sm_firstlnum; rex.reg_maxline = rsm.sm_maxline; s = reg_getline(lnum); rex.reg_firstlnum = save_first; rex.reg_maxline = save_max; return s; } /* * Used for the submatch() function: get the string from the n'th submatch in * allocated memory. * Returns NULL when not in a ":s" command and for a non-existing submatch. */ char_u * reg_submatch(int no) { char_u *retval = NULL; char_u *s; int len; int round; linenr_T lnum; if (!can_f_submatch || no < 0) return NULL; if (rsm.sm_match == NULL) { /* * First round: compute the length and allocate memory. * Second round: copy the text. */ for (round = 1; round <= 2; ++round) { lnum = rsm.sm_mmatch->startpos[no].lnum; if (lnum < 0 || rsm.sm_mmatch->endpos[no].lnum < 0) return NULL; s = reg_getline_submatch(lnum); if (s == NULL) // anti-crash check, cannot happen? break; s += rsm.sm_mmatch->startpos[no].col; if (rsm.sm_mmatch->endpos[no].lnum == lnum) { // Within one line: take form start to end col. len = rsm.sm_mmatch->endpos[no].col - rsm.sm_mmatch->startpos[no].col; if (round == 2) vim_strncpy(retval, s, len); ++len; } else { // Multiple lines: take start line from start col, middle // lines completely and end line up to end col. len = (int)STRLEN(s); if (round == 2) { STRCPY(retval, s); retval[len] = '\n'; } ++len; ++lnum; while (lnum < rsm.sm_mmatch->endpos[no].lnum) { s = reg_getline_submatch(lnum++); if (round == 2) STRCPY(retval + len, s); len += (int)STRLEN(s); if (round == 2) retval[len] = '\n'; ++len; } if (round == 2) STRNCPY(retval + len, reg_getline_submatch(lnum), rsm.sm_mmatch->endpos[no].col); len += rsm.sm_mmatch->endpos[no].col; if (round == 2) retval[len] = NUL; ++len; } if (retval == NULL) { retval = alloc(len); if (retval == NULL) return NULL; } } } else { s = rsm.sm_match->startp[no]; if (s == NULL || rsm.sm_match->endp[no] == NULL) retval = NULL; else retval = vim_strnsave(s, rsm.sm_match->endp[no] - s); } return retval; } /* * Used for the submatch() function with the optional non-zero argument: get * the list of strings from the n'th submatch in allocated memory with NULs * represented in NLs. * Returns a list of allocated strings. Returns NULL when not in a ":s" * command, for a non-existing submatch and for any error. */ list_T * reg_submatch_list(int no) { char_u *s; linenr_T slnum; linenr_T elnum; colnr_T scol; colnr_T ecol; int i; list_T *list; int error = FALSE; if (!can_f_submatch || no < 0) return NULL; if (rsm.sm_match == NULL) { slnum = rsm.sm_mmatch->startpos[no].lnum; elnum = rsm.sm_mmatch->endpos[no].lnum; if (slnum < 0 || elnum < 0) return NULL; scol = rsm.sm_mmatch->startpos[no].col; ecol = rsm.sm_mmatch->endpos[no].col; list = list_alloc(); if (list == NULL) return NULL; s = reg_getline_submatch(slnum) + scol; if (slnum == elnum) { if (list_append_string(list, s, ecol - scol) == FAIL) error = TRUE; } else { if (list_append_string(list, s, -1) == FAIL) error = TRUE; for (i = 1; i < elnum - slnum; i++) { s = reg_getline_submatch(slnum + i); if (list_append_string(list, s, -1) == FAIL) error = TRUE; } s = reg_getline_submatch(elnum); if (list_append_string(list, s, ecol) == FAIL) error = TRUE; } } else { s = rsm.sm_match->startp[no]; if (s == NULL || rsm.sm_match->endp[no] == NULL) return NULL; list = list_alloc(); if (list == NULL) return NULL; if (list_append_string(list, s, (int)(rsm.sm_match->endp[no] - s)) == FAIL) error = TRUE; } if (error) { list_free(list); return NULL; } ++list->lv_refcount; return list; } #endif /* * Initialize the values used for matching against multiple lines */ static void init_regexec_multi( regmmatch_T *rmp, win_T *win, // window in which to search or NULL buf_T *buf, // buffer in which to search linenr_T lnum) // nr of line to start looking for match { rex.reg_match = NULL; rex.reg_mmatch = rmp; rex.reg_buf = buf; rex.reg_win = win; rex.reg_firstlnum = lnum; rex.reg_maxline = rex.reg_buf->b_ml.ml_line_count - lnum; rex.reg_line_lbr = FALSE; rex.reg_ic = rmp->rmm_ic; rex.reg_icombine = FALSE; rex.reg_maxcol = rmp->rmm_maxcol; } #include "regexp_bt.c" static regengine_T bt_regengine = { bt_regcomp, bt_regfree, bt_regexec_nl, bt_regexec_multi, }; #include "regexp_nfa.c" static regengine_T nfa_regengine = { nfa_regcomp, nfa_regfree, nfa_regexec_nl, nfa_regexec_multi, }; // Which regexp engine to use? Needed for vim_regcomp(). // Must match with 'regexpengine'. static int regexp_engine = 0; #ifdef DEBUG static char_u regname[][30] = { "AUTOMATIC Regexp Engine", "BACKTRACKING Regexp Engine", "NFA Regexp Engine" }; #endif /* * Compile a regular expression into internal code. * Returns the program in allocated memory. * Use vim_regfree() to free the memory. * Returns NULL for an error. */ regprog_T * vim_regcomp(char_u *expr_arg, int re_flags) { regprog_T *prog = NULL; char_u *expr = expr_arg; int called_emsg_before; regexp_engine = p_re; // Check for prefix "\%#=", that sets the regexp engine if (STRNCMP(expr, "\\%#=", 4) == 0) { int newengine = expr[4] - '0'; if (newengine == AUTOMATIC_ENGINE || newengine == BACKTRACKING_ENGINE || newengine == NFA_ENGINE) { regexp_engine = expr[4] - '0'; expr += 5; #ifdef DEBUG smsg("New regexp mode selected (%d): %s", regexp_engine, regname[newengine]); #endif } else { emsg(_(e_percent_hash_can_only_be_followed_by_zero_one_two_automatic_engine_will_be_used)); regexp_engine = AUTOMATIC_ENGINE; } } #ifdef DEBUG bt_regengine.expr = expr; nfa_regengine.expr = expr; #endif // reg_iswordc() uses rex.reg_buf rex.reg_buf = curbuf; /* * First try the NFA engine, unless backtracking was requested. */ called_emsg_before = called_emsg; if (regexp_engine != BACKTRACKING_ENGINE) prog = nfa_regengine.regcomp(expr, re_flags + (regexp_engine == AUTOMATIC_ENGINE ? RE_AUTO : 0)); else prog = bt_regengine.regcomp(expr, re_flags); // Check for error compiling regexp with initial engine. if (prog == NULL) { #ifdef BT_REGEXP_DEBUG_LOG if (regexp_engine == BACKTRACKING_ENGINE) // debugging log for BT engine { FILE *f; f = fopen(BT_REGEXP_DEBUG_LOG_NAME, "a"); if (f) { fprintf(f, "Syntax error in \"%s\"\n", expr); fclose(f); } else semsg("(NFA) Could not open \"%s\" to write !!!", BT_REGEXP_DEBUG_LOG_NAME); } #endif /* * If the NFA engine failed, try the backtracking engine. * The NFA engine also fails for patterns that it can't handle well * but are still valid patterns, thus a retry should work. * But don't try if an error message was given. */ if (regexp_engine == AUTOMATIC_ENGINE && called_emsg == called_emsg_before) { regexp_engine = BACKTRACKING_ENGINE; #ifdef FEAT_EVAL report_re_switch(expr); #endif prog = bt_regengine.regcomp(expr, re_flags); } } if (prog != NULL) { // Store the info needed to call regcomp() again when the engine turns // out to be very slow when executing it. prog->re_engine = regexp_engine; prog->re_flags = re_flags; } return prog; } /* * Free a compiled regexp program, returned by vim_regcomp(). */ void vim_regfree(regprog_T *prog) { if (prog != NULL) prog->engine->regfree(prog); } #if defined(EXITFREE) || defined(PROTO) void free_regexp_stuff(void) { ga_clear(&regstack); ga_clear(&backpos); vim_free(reg_tofree); vim_free(reg_prev_sub); } #endif #ifdef FEAT_EVAL static void report_re_switch(char_u *pat) { if (p_verbose > 0) { verbose_enter(); msg_puts(_("Switching to backtracking RE engine for pattern: ")); msg_puts((char *)pat); verbose_leave(); } } #endif #if defined(FEAT_X11) || defined(PROTO) /* * Return whether "prog" is currently being executed. */ int regprog_in_use(regprog_T *prog) { return prog->re_in_use; } #endif /* * Match a regexp against a string. * "rmp->regprog" must be a compiled regexp as returned by vim_regcomp(). * Note: "rmp->regprog" may be freed and changed. * Uses curbuf for line count and 'iskeyword'. * When "nl" is TRUE consider a "\n" in "line" to be a line break. * * Return TRUE if there is a match, FALSE if not. */ static int vim_regexec_string( regmatch_T *rmp, char_u *line, // string to match against colnr_T col, // column to start looking for match int nl) { int result; regexec_T rex_save; int rex_in_use_save = rex_in_use; // Cannot use the same prog recursively, it contains state. if (rmp->regprog->re_in_use) { emsg(_(e_cannot_use_pattern_recursively)); return FALSE; } rmp->regprog->re_in_use = TRUE; if (rex_in_use) // Being called recursively, save the state. rex_save = rex; rex_in_use = TRUE; rex.reg_startp = NULL; rex.reg_endp = NULL; rex.reg_startpos = NULL; rex.reg_endpos = NULL; result = rmp->regprog->engine->regexec_nl(rmp, line, col, nl); rmp->regprog->re_in_use = FALSE; // NFA engine aborted because it's very slow. if (rmp->regprog->re_engine == AUTOMATIC_ENGINE && result == NFA_TOO_EXPENSIVE) { int save_p_re = p_re; int re_flags = rmp->regprog->re_flags; char_u *pat = vim_strsave(((nfa_regprog_T *)rmp->regprog)->pattern); p_re = BACKTRACKING_ENGINE; vim_regfree(rmp->regprog); if (pat != NULL) { #ifdef FEAT_EVAL report_re_switch(pat); #endif rmp->regprog = vim_regcomp(pat, re_flags); if (rmp->regprog != NULL) { rmp->regprog->re_in_use = TRUE; result = rmp->regprog->engine->regexec_nl(rmp, line, col, nl); rmp->regprog->re_in_use = FALSE; } vim_free(pat); } p_re = save_p_re; } rex_in_use = rex_in_use_save; if (rex_in_use) rex = rex_save; return result > 0; } /* * Note: "*prog" may be freed and changed. * Return TRUE if there is a match, FALSE if not. */ int vim_regexec_prog( regprog_T **prog, int ignore_case, char_u *line, colnr_T col) { int r; regmatch_T regmatch; regmatch.regprog = *prog; regmatch.rm_ic = ignore_case; r = vim_regexec_string(&regmatch, line, col, FALSE); *prog = regmatch.regprog; return r; } /* * Note: "rmp->regprog" may be freed and changed. * Return TRUE if there is a match, FALSE if not. */ int vim_regexec(regmatch_T *rmp, char_u *line, colnr_T col) { return vim_regexec_string(rmp, line, col, FALSE); } /* * Like vim_regexec(), but consider a "\n" in "line" to be a line break. * Note: "rmp->regprog" may be freed and changed. * Return TRUE if there is a match, FALSE if not. */ int vim_regexec_nl(regmatch_T *rmp, char_u *line, colnr_T col) { return vim_regexec_string(rmp, line, col, TRUE); } /* * Match a regexp against multiple lines. * "rmp->regprog" must be a compiled regexp as returned by vim_regcomp(). * Note: "rmp->regprog" may be freed and changed, even set to NULL. * Uses curbuf for line count and 'iskeyword'. * * Return zero if there is no match. Return number of lines contained in the * match otherwise. */ long vim_regexec_multi( regmmatch_T *rmp, win_T *win, // window in which to search or NULL buf_T *buf, // buffer in which to search linenr_T lnum, // nr of line to start looking for match colnr_T col, // column to start looking for match int *timed_out) // flag is set when timeout limit reached { int result; regexec_T rex_save; int rex_in_use_save = rex_in_use; // Cannot use the same prog recursively, it contains state. if (rmp->regprog->re_in_use) { emsg(_(e_cannot_use_pattern_recursively)); return FALSE; } rmp->regprog->re_in_use = TRUE; if (rex_in_use) // Being called recursively, save the state. rex_save = rex; rex_in_use = TRUE; result = rmp->regprog->engine->regexec_multi( rmp, win, buf, lnum, col, timed_out); rmp->regprog->re_in_use = FALSE; // NFA engine aborted because it's very slow. if (rmp->regprog->re_engine == AUTOMATIC_ENGINE && result == NFA_TOO_EXPENSIVE) { int save_p_re = p_re; int re_flags = rmp->regprog->re_flags; char_u *pat = vim_strsave(((nfa_regprog_T *)rmp->regprog)->pattern); p_re = BACKTRACKING_ENGINE; if (pat != NULL) { regprog_T *prev_prog = rmp->regprog; #ifdef FEAT_EVAL report_re_switch(pat); #endif #ifdef FEAT_SYN_HL // checking for \z misuse was already done when compiling for NFA, // allow all here reg_do_extmatch = REX_ALL; #endif rmp->regprog = vim_regcomp(pat, re_flags); #ifdef FEAT_SYN_HL reg_do_extmatch = 0; #endif if (rmp->regprog == NULL) { // Somehow compiling the pattern failed now, put back the // previous one to avoid "regprog" becoming NULL. rmp->regprog = prev_prog; } else { vim_regfree(prev_prog); rmp->regprog->re_in_use = TRUE; result = rmp->regprog->engine->regexec_multi( rmp, win, buf, lnum, col, timed_out); rmp->regprog->re_in_use = FALSE; } vim_free(pat); } p_re = save_p_re; } rex_in_use = rex_in_use_save; if (rex_in_use) rex = rex_save; return result <= 0 ? 0 : result; }
/* vi:set ts=8 sts=4 sw=4 noet: * * Handling of regular expressions: vim_regcomp(), vim_regexec(), vim_regsub() */ // By default: do not create debugging logs or files related to regular // expressions, even when compiling with -DDEBUG. // Uncomment the second line to get the regexp debugging. #undef DEBUG // #define DEBUG #include "vim.h" #ifdef DEBUG // show/save debugging data when BT engine is used # define BT_REGEXP_DUMP // save the debugging data to a file instead of displaying it # define BT_REGEXP_LOG # define BT_REGEXP_DEBUG_LOG # define BT_REGEXP_DEBUG_LOG_NAME "bt_regexp_debug.log" #endif #ifdef FEAT_RELTIME static sig_atomic_t dummy_timeout_flag = 0; static volatile sig_atomic_t *timeout_flag = &dummy_timeout_flag; #endif /* * Magic characters have a special meaning, they don't match literally. * Magic characters are negative. This separates them from literal characters * (possibly multi-byte). Only ASCII characters can be Magic. */ #define Magic(x) ((int)(x) - 256) #define un_Magic(x) ((x) + 256) #define is_Magic(x) ((x) < 0) static int no_Magic(int x) { if (is_Magic(x)) return un_Magic(x); return x; } static int toggle_Magic(int x) { if (is_Magic(x)) return un_Magic(x); return Magic(x); } #ifdef FEAT_RELTIME void init_regexp_timeout(long msec) { timeout_flag = start_timeout(msec); } void disable_regexp_timeout(void) { stop_timeout(); timeout_flag = &dummy_timeout_flag; } #endif /* * The first byte of the BT regexp internal "program" is actually this magic * number; the start node begins in the second byte. It's used to catch the * most severe mutilation of the program by the caller. */ #define REGMAGIC 0234 /* * Utility definitions. */ #define UCHARAT(p) ((int)*(char_u *)(p)) // Used for an error (down from) vim_regcomp(): give the error message, set // rc_did_emsg and return NULL #define EMSG_RET_NULL(m) return (emsg((m)), rc_did_emsg = TRUE, (void *)NULL) #define IEMSG_RET_NULL(m) return (iemsg((m)), rc_did_emsg = TRUE, (void *)NULL) #define EMSG_RET_FAIL(m) return (emsg((m)), rc_did_emsg = TRUE, FAIL) #define EMSG2_RET_NULL(m, c) return (semsg((const char *)(m), (c) ? "" : "\\"), rc_did_emsg = TRUE, (void *)NULL) #define EMSG3_RET_NULL(m, c, a) return (semsg((const char *)(m), (c) ? "" : "\\", (a)), rc_did_emsg = TRUE, (void *)NULL) #define EMSG2_RET_FAIL(m, c) return (semsg((const char *)(m), (c) ? "" : "\\"), rc_did_emsg = TRUE, FAIL) #define EMSG_ONE_RET_NULL EMSG2_RET_NULL(_(e_invalid_item_in_str_brackets), reg_magic == MAGIC_ALL) #define MAX_LIMIT (32767L << 16L) #define NOT_MULTI 0 #define MULTI_ONE 1 #define MULTI_MULT 2 // return values for regmatch() #define RA_FAIL 1 // something failed, abort #define RA_CONT 2 // continue in inner loop #define RA_BREAK 3 // break inner loop #define RA_MATCH 4 // successful match #define RA_NOMATCH 5 // didn't match /* * Return NOT_MULTI if c is not a "multi" operator. * Return MULTI_ONE if c is a single "multi" operator. * Return MULTI_MULT if c is a multi "multi" operator. */ static int re_multi_type(int c) { if (c == Magic('@') || c == Magic('=') || c == Magic('?')) return MULTI_ONE; if (c == Magic('*') || c == Magic('+') || c == Magic('{')) return MULTI_MULT; return NOT_MULTI; } static char_u *reg_prev_sub = NULL; /* * REGEXP_INRANGE contains all characters which are always special in a [] * range after '\'. * REGEXP_ABBR contains all characters which act as abbreviations after '\'. * These are: * \n - New line (NL). * \r - Carriage Return (CR). * \t - Tab (TAB). * \e - Escape (ESC). * \b - Backspace (Ctrl_H). * \d - Character code in decimal, eg \d123 * \o - Character code in octal, eg \o80 * \x - Character code in hex, eg \x4a * \u - Multibyte character code, eg \u20ac * \U - Long multibyte character code, eg \U12345678 */ static char_u REGEXP_INRANGE[] = "]^-n\\"; static char_u REGEXP_ABBR[] = "nrtebdoxuU"; /* * Translate '\x' to its control character, except "\n", which is Magic. */ static int backslash_trans(int c) { switch (c) { case 'r': return CAR; case 't': return TAB; case 'e': return ESC; case 'b': return BS; } return c; } /* * Check for a character class name "[:name:]". "pp" points to the '['. * Returns one of the CLASS_ items. CLASS_NONE means that no item was * recognized. Otherwise "pp" is advanced to after the item. */ static int get_char_class(char_u **pp) { static const char *(class_names[]) = { "alnum:]", #define CLASS_ALNUM 0 "alpha:]", #define CLASS_ALPHA 1 "blank:]", #define CLASS_BLANK 2 "cntrl:]", #define CLASS_CNTRL 3 "digit:]", #define CLASS_DIGIT 4 "graph:]", #define CLASS_GRAPH 5 "lower:]", #define CLASS_LOWER 6 "print:]", #define CLASS_PRINT 7 "punct:]", #define CLASS_PUNCT 8 "space:]", #define CLASS_SPACE 9 "upper:]", #define CLASS_UPPER 10 "xdigit:]", #define CLASS_XDIGIT 11 "tab:]", #define CLASS_TAB 12 "return:]", #define CLASS_RETURN 13 "backspace:]", #define CLASS_BACKSPACE 14 "escape:]", #define CLASS_ESCAPE 15 "ident:]", #define CLASS_IDENT 16 "keyword:]", #define CLASS_KEYWORD 17 "fname:]", #define CLASS_FNAME 18 }; #define CLASS_NONE 99 int i; if ((*pp)[1] == ':') { for (i = 0; i < (int)ARRAY_LENGTH(class_names); ++i) if (STRNCMP(*pp + 2, class_names[i], STRLEN(class_names[i])) == 0) { *pp += STRLEN(class_names[i]) + 2; return i; } } return CLASS_NONE; } /* * Specific version of character class functions. * Using a table to keep this fast. */ static short class_tab[256]; #define RI_DIGIT 0x01 #define RI_HEX 0x02 #define RI_OCTAL 0x04 #define RI_WORD 0x08 #define RI_HEAD 0x10 #define RI_ALPHA 0x20 #define RI_LOWER 0x40 #define RI_UPPER 0x80 #define RI_WHITE 0x100 static void init_class_tab(void) { int i; static int done = FALSE; if (done) return; for (i = 0; i < 256; ++i) { if (i >= '0' && i <= '7') class_tab[i] = RI_DIGIT + RI_HEX + RI_OCTAL + RI_WORD; else if (i >= '8' && i <= '9') class_tab[i] = RI_DIGIT + RI_HEX + RI_WORD; else if (i >= 'a' && i <= 'f') class_tab[i] = RI_HEX + RI_WORD + RI_HEAD + RI_ALPHA + RI_LOWER; else if (i >= 'g' && i <= 'z') class_tab[i] = RI_WORD + RI_HEAD + RI_ALPHA + RI_LOWER; else if (i >= 'A' && i <= 'F') class_tab[i] = RI_HEX + RI_WORD + RI_HEAD + RI_ALPHA + RI_UPPER; else if (i >= 'G' && i <= 'Z') class_tab[i] = RI_WORD + RI_HEAD + RI_ALPHA + RI_UPPER; else if (i == '_') class_tab[i] = RI_WORD + RI_HEAD; else class_tab[i] = 0; } class_tab[' '] |= RI_WHITE; class_tab['\t'] |= RI_WHITE; done = TRUE; } #define ri_digit(c) ((c) < 0x100 && (class_tab[c] & RI_DIGIT)) #define ri_hex(c) ((c) < 0x100 && (class_tab[c] & RI_HEX)) #define ri_octal(c) ((c) < 0x100 && (class_tab[c] & RI_OCTAL)) #define ri_word(c) ((c) < 0x100 && (class_tab[c] & RI_WORD)) #define ri_head(c) ((c) < 0x100 && (class_tab[c] & RI_HEAD)) #define ri_alpha(c) ((c) < 0x100 && (class_tab[c] & RI_ALPHA)) #define ri_lower(c) ((c) < 0x100 && (class_tab[c] & RI_LOWER)) #define ri_upper(c) ((c) < 0x100 && (class_tab[c] & RI_UPPER)) #define ri_white(c) ((c) < 0x100 && (class_tab[c] & RI_WHITE)) // flags for regflags #define RF_ICASE 1 // ignore case #define RF_NOICASE 2 // don't ignore case #define RF_HASNL 4 // can match a NL #define RF_ICOMBINE 8 // ignore combining characters #define RF_LOOKBH 16 // uses "\@<=" or "\@<!" /* * Global work variables for vim_regcomp(). */ static char_u *regparse; // Input-scan pointer. static int regnpar; // () count. static int wants_nfa; // regex should use NFA engine #ifdef FEAT_SYN_HL static int regnzpar; // \z() count. static int re_has_z; // \z item detected #endif static unsigned regflags; // RF_ flags for prog #if defined(FEAT_SYN_HL) || defined(PROTO) static int had_eol; // TRUE when EOL found by vim_regcomp() #endif static magic_T reg_magic; // magicness of the pattern static int reg_string; // matching with a string instead of a buffer // line static int reg_strict; // "[abc" is illegal /* * META contains all characters that may be magic, except '^' and '$'. */ // META[] is used often enough to justify turning it into a table. static char_u META_flags[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // % & ( ) * + . 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, // 1 2 3 4 5 6 7 8 9 < = > ? 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, // @ A C D F H I K L M O 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, // P S U V W X Z [ _ 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, // a c d f h i k l m n o 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, // p s u v w x z { | ~ 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1 }; static int curchr; // currently parsed character // Previous character. Note: prevchr is sometimes -1 when we are not at the // start, eg in /[ ^I]^ the pattern was never found even if it existed, // because ^ was taken to be magic -- webb static int prevchr; static int prevprevchr; // previous-previous character static int nextchr; // used for ungetchr() // arguments for reg() #define REG_NOPAREN 0 // toplevel reg() #define REG_PAREN 1 // \(\) #define REG_ZPAREN 2 // \z(\) #define REG_NPAREN 3 // \%(\) typedef struct { char_u *regparse; int prevchr_len; int curchr; int prevchr; int prevprevchr; int nextchr; int at_start; int prev_at_start; int regnpar; } parse_state_T; static void initchr(char_u *); static int getchr(void); static void skipchr_keepstart(void); static int peekchr(void); static void skipchr(void); static void ungetchr(void); static long gethexchrs(int maxinputlen); static long getoctchrs(void); static long getdecchrs(void); static int coll_get_char(void); static int prog_magic_wrong(void); static int cstrncmp(char_u *s1, char_u *s2, int *n); static char_u *cstrchr(char_u *, int); static int re_mult_next(char *what); static int reg_iswordc(int); #ifdef FEAT_EVAL static void report_re_switch(char_u *pat); #endif static regengine_T bt_regengine; static regengine_T nfa_regengine; /* * Return TRUE if compiled regular expression "prog" can match a line break. */ int re_multiline(regprog_T *prog) { return (prog->regflags & RF_HASNL); } /* * Check for an equivalence class name "[=a=]". "pp" points to the '['. * Returns a character representing the class. Zero means that no item was * recognized. Otherwise "pp" is advanced to after the item. */ static int get_equi_class(char_u **pp) { int c; int l = 1; char_u *p = *pp; if (p[1] == '=' && p[2] != NUL) { if (has_mbyte) l = (*mb_ptr2len)(p + 2); if (p[l + 2] == '=' && p[l + 3] == ']') { if (has_mbyte) c = mb_ptr2char(p + 2); else c = p[2]; *pp += l + 4; return c; } } return 0; } /* * Check for a collating element "[.a.]". "pp" points to the '['. * Returns a character. Zero means that no item was recognized. Otherwise * "pp" is advanced to after the item. * Currently only single characters are recognized! */ static int get_coll_element(char_u **pp) { int c; int l = 1; char_u *p = *pp; if (p[0] != NUL && p[1] == '.' && p[2] != NUL) { if (has_mbyte) l = (*mb_ptr2len)(p + 2); if (p[l + 2] == '.' && p[l + 3] == ']') { if (has_mbyte) c = mb_ptr2char(p + 2); else c = p[2]; *pp += l + 4; return c; } } return 0; } static int reg_cpo_lit; // 'cpoptions' contains 'l' flag static int reg_cpo_bsl; // 'cpoptions' contains '\' flag static void get_cpo_flags(void) { reg_cpo_lit = vim_strchr(p_cpo, CPO_LITERAL) != NULL; reg_cpo_bsl = vim_strchr(p_cpo, CPO_BACKSL) != NULL; } /* * Skip over a "[]" range. * "p" must point to the character after the '['. * The returned pointer is on the matching ']', or the terminating NUL. */ static char_u * skip_anyof(char_u *p) { int l; if (*p == '^') // Complement of range. ++p; if (*p == ']' || *p == '-') ++p; while (*p != NUL && *p != ']') { if (has_mbyte && (l = (*mb_ptr2len)(p)) > 1) p += l; else if (*p == '-') { ++p; if (*p != ']' && *p != NUL) MB_PTR_ADV(p); } else if (*p == '\\' && !reg_cpo_bsl && (vim_strchr(REGEXP_INRANGE, p[1]) != NULL || (!reg_cpo_lit && vim_strchr(REGEXP_ABBR, p[1]) != NULL))) p += 2; else if (*p == '[') { if (get_char_class(&p) == CLASS_NONE && get_equi_class(&p) == 0 && get_coll_element(&p) == 0 && *p != NUL) ++p; // it is not a class name and not NUL } else ++p; } return p; } /* * Skip past regular expression. * Stop at end of "startp" or where "delim" is found ('/', '?', etc). * Take care of characters with a backslash in front of it. * Skip strings inside [ and ]. */ char_u * skip_regexp( char_u *startp, int delim, int magic) { return skip_regexp_ex(startp, delim, magic, NULL, NULL, NULL); } /* * Call skip_regexp() and when the delimiter does not match give an error and * return NULL. */ char_u * skip_regexp_err( char_u *startp, int delim, int magic) { char_u *p = skip_regexp(startp, delim, magic); if (*p != delim) { semsg(_(e_missing_delimiter_after_search_pattern_str), startp); return NULL; } return p; } /* * skip_regexp() with extra arguments: * When "newp" is not NULL and "dirc" is '?', make an allocated copy of the * expression and change "\?" to "?". If "*newp" is not NULL the expression * is changed in-place. * If a "\?" is changed to "?" then "dropped" is incremented, unless NULL. * If "magic_val" is not NULL, returns the effective magicness of the pattern */ char_u * skip_regexp_ex( char_u *startp, int dirc, int magic, char_u **newp, int *dropped, magic_T *magic_val) { magic_T mymagic; char_u *p = startp; if (magic) mymagic = MAGIC_ON; else mymagic = MAGIC_OFF; get_cpo_flags(); for (; p[0] != NUL; MB_PTR_ADV(p)) { if (p[0] == dirc) // found end of regexp break; if ((p[0] == '[' && mymagic >= MAGIC_ON) || (p[0] == '\\' && p[1] == '[' && mymagic <= MAGIC_OFF)) { p = skip_anyof(p + 1); if (p[0] == NUL) break; } else if (p[0] == '\\' && p[1] != NUL) { if (dirc == '?' && newp != NULL && p[1] == '?') { // change "\?" to "?", make a copy first. if (*newp == NULL) { *newp = vim_strsave(startp); if (*newp != NULL) p = *newp + (p - startp); } if (dropped != NULL) ++*dropped; if (*newp != NULL) STRMOVE(p, p + 1); else ++p; } else ++p; // skip next character if (*p == 'v') mymagic = MAGIC_ALL; else if (*p == 'V') mymagic = MAGIC_NONE; } } if (magic_val != NULL) *magic_val = mymagic; return p; } /* * Functions for getting characters from the regexp input. */ static int prevchr_len; // byte length of previous char static int at_start; // True when on the first character static int prev_at_start; // True when on the second character /* * Start parsing at "str". */ static void initchr(char_u *str) { regparse = str; prevchr_len = 0; curchr = prevprevchr = prevchr = nextchr = -1; at_start = TRUE; prev_at_start = FALSE; } /* * Save the current parse state, so that it can be restored and parsing * starts in the same state again. */ static void save_parse_state(parse_state_T *ps) { ps->regparse = regparse; ps->prevchr_len = prevchr_len; ps->curchr = curchr; ps->prevchr = prevchr; ps->prevprevchr = prevprevchr; ps->nextchr = nextchr; ps->at_start = at_start; ps->prev_at_start = prev_at_start; ps->regnpar = regnpar; } /* * Restore a previously saved parse state. */ static void restore_parse_state(parse_state_T *ps) { regparse = ps->regparse; prevchr_len = ps->prevchr_len; curchr = ps->curchr; prevchr = ps->prevchr; prevprevchr = ps->prevprevchr; nextchr = ps->nextchr; at_start = ps->at_start; prev_at_start = ps->prev_at_start; regnpar = ps->regnpar; } /* * Get the next character without advancing. */ static int peekchr(void) { static int after_slash = FALSE; if (curchr == -1) { switch (curchr = regparse[0]) { case '.': case '[': case '~': // magic when 'magic' is on if (reg_magic >= MAGIC_ON) curchr = Magic(curchr); break; case '(': case ')': case '{': case '%': case '+': case '=': case '?': case '@': case '!': case '&': case '|': case '<': case '>': case '#': // future ext. case '"': // future ext. case '\'': // future ext. case ',': // future ext. case '-': // future ext. case ':': // future ext. case ';': // future ext. case '`': // future ext. case '/': // Can't be used in / command // magic only after "\v" if (reg_magic == MAGIC_ALL) curchr = Magic(curchr); break; case '*': // * is not magic as the very first character, eg "?*ptr", when // after '^', eg "/^*ptr" and when after "\(", "\|", "\&". But // "\(\*" is not magic, thus must be magic if "after_slash" if (reg_magic >= MAGIC_ON && !at_start && !(prev_at_start && prevchr == Magic('^')) && (after_slash || (prevchr != Magic('(') && prevchr != Magic('&') && prevchr != Magic('|')))) curchr = Magic('*'); break; case '^': // '^' is only magic as the very first character and if it's after // "\(", "\|", "\&' or "\n" if (reg_magic >= MAGIC_OFF && (at_start || reg_magic == MAGIC_ALL || prevchr == Magic('(') || prevchr == Magic('|') || prevchr == Magic('&') || prevchr == Magic('n') || (no_Magic(prevchr) == '(' && prevprevchr == Magic('%')))) { curchr = Magic('^'); at_start = TRUE; prev_at_start = FALSE; } break; case '$': // '$' is only magic as the very last char and if it's in front of // either "\|", "\)", "\&", or "\n" if (reg_magic >= MAGIC_OFF) { char_u *p = regparse + 1; int is_magic_all = (reg_magic == MAGIC_ALL); // ignore \c \C \m \M \v \V and \Z after '$' while (p[0] == '\\' && (p[1] == 'c' || p[1] == 'C' || p[1] == 'm' || p[1] == 'M' || p[1] == 'v' || p[1] == 'V' || p[1] == 'Z')) { if (p[1] == 'v') is_magic_all = TRUE; else if (p[1] == 'm' || p[1] == 'M' || p[1] == 'V') is_magic_all = FALSE; p += 2; } if (p[0] == NUL || (p[0] == '\\' && (p[1] == '|' || p[1] == '&' || p[1] == ')' || p[1] == 'n')) || (is_magic_all && (p[0] == '|' || p[0] == '&' || p[0] == ')')) || reg_magic == MAGIC_ALL) curchr = Magic('$'); } break; case '\\': { int c = regparse[1]; if (c == NUL) curchr = '\\'; // trailing '\' else if (c <= '~' && META_flags[c]) { /* * META contains everything that may be magic sometimes, * except ^ and $ ("\^" and "\$" are only magic after * "\V"). We now fetch the next character and toggle its * magicness. Therefore, \ is so meta-magic that it is * not in META. */ curchr = -1; prev_at_start = at_start; at_start = FALSE; // be able to say "/\*ptr" ++regparse; ++after_slash; peekchr(); --regparse; --after_slash; curchr = toggle_Magic(curchr); } else if (vim_strchr(REGEXP_ABBR, c)) { /* * Handle abbreviations, like "\t" for TAB -- webb */ curchr = backslash_trans(c); } else if (reg_magic == MAGIC_NONE && (c == '$' || c == '^')) curchr = toggle_Magic(c); else { /* * Next character can never be (made) magic? * Then backslashing it won't do anything. */ if (has_mbyte) curchr = (*mb_ptr2char)(regparse + 1); else curchr = c; } break; } default: if (has_mbyte) curchr = (*mb_ptr2char)(regparse); } } return curchr; } /* * Eat one lexed character. Do this in a way that we can undo it. */ static void skipchr(void) { // peekchr() eats a backslash, do the same here if (*regparse == '\\') prevchr_len = 1; else prevchr_len = 0; if (regparse[prevchr_len] != NUL) { if (enc_utf8) // exclude composing chars that mb_ptr2len does include prevchr_len += utf_ptr2len(regparse + prevchr_len); else if (has_mbyte) prevchr_len += (*mb_ptr2len)(regparse + prevchr_len); else ++prevchr_len; } regparse += prevchr_len; prev_at_start = at_start; at_start = FALSE; prevprevchr = prevchr; prevchr = curchr; curchr = nextchr; // use previously unget char, or -1 nextchr = -1; } /* * Skip a character while keeping the value of prev_at_start for at_start. * prevchr and prevprevchr are also kept. */ static void skipchr_keepstart(void) { int as = prev_at_start; int pr = prevchr; int prpr = prevprevchr; skipchr(); at_start = as; prevchr = pr; prevprevchr = prpr; } /* * Get the next character from the pattern. We know about magic and such, so * therefore we need a lexical analyzer. */ static int getchr(void) { int chr = peekchr(); skipchr(); return chr; } /* * put character back. Works only once! */ static void ungetchr(void) { nextchr = curchr; curchr = prevchr; prevchr = prevprevchr; at_start = prev_at_start; prev_at_start = FALSE; // Backup regparse, so that it's at the same position as before the // getchr(). regparse -= prevchr_len; } /* * Get and return the value of the hex string at the current position. * Return -1 if there is no valid hex number. * The position is updated: * blahblah\%x20asdf * before-^ ^-after * The parameter controls the maximum number of input characters. This will be * 2 when reading a \%x20 sequence and 4 when reading a \%u20AC sequence. */ static long gethexchrs(int maxinputlen) { long_u nr = 0; int c; int i; for (i = 0; i < maxinputlen; ++i) { c = regparse[0]; if (!vim_isxdigit(c)) break; nr <<= 4; nr |= hex2nr(c); ++regparse; } if (i == 0) return -1; return (long)nr; } /* * Get and return the value of the decimal string immediately after the * current position. Return -1 for invalid. Consumes all digits. */ static long getdecchrs(void) { long_u nr = 0; int c; int i; for (i = 0; ; ++i) { c = regparse[0]; if (c < '0' || c > '9') break; nr *= 10; nr += c - '0'; ++regparse; curchr = -1; // no longer valid } if (i == 0) return -1; return (long)nr; } /* * get and return the value of the octal string immediately after the current * position. Return -1 for invalid, or 0-255 for valid. Smart enough to handle * numbers > 377 correctly (for example, 400 is treated as 40) and doesn't * treat 8 or 9 as recognised characters. Position is updated: * blahblah\%o210asdf * before-^ ^-after */ static long getoctchrs(void) { long_u nr = 0; int c; int i; for (i = 0; i < 3 && nr < 040; ++i) { c = regparse[0]; if (c < '0' || c > '7') break; nr <<= 3; nr |= hex2nr(c); ++regparse; } if (i == 0) return -1; return (long)nr; } /* * read_limits - Read two integers to be taken as a minimum and maximum. * If the first character is '-', then the range is reversed. * Should end with 'end'. If minval is missing, zero is default, if maxval is * missing, a very big number is the default. */ static int read_limits(long *minval, long *maxval) { int reverse = FALSE; char_u *first_char; long tmp; if (*regparse == '-') { // Starts with '-', so reverse the range later regparse++; reverse = TRUE; } first_char = regparse; *minval = getdigits(&regparse); if (*regparse == ',') // There is a comma { if (vim_isdigit(*++regparse)) *maxval = getdigits(&regparse); else *maxval = MAX_LIMIT; } else if (VIM_ISDIGIT(*first_char)) *maxval = *minval; // It was \{n} or \{-n} else *maxval = MAX_LIMIT; // It was \{} or \{-} if (*regparse == '\\') regparse++; // Allow either \{...} or \{...\} if (*regparse != '}') EMSG2_RET_FAIL(_(e_syntax_error_in_str_curlies), reg_magic == MAGIC_ALL); /* * Reverse the range if there was a '-', or make sure it is in the right * order otherwise. */ if ((!reverse && *minval > *maxval) || (reverse && *minval < *maxval)) { tmp = *minval; *minval = *maxval; *maxval = tmp; } skipchr(); // let's be friends with the lexer again return OK; } /* * vim_regexec and friends */ /* * Global work variables for vim_regexec(). */ static void cleanup_subexpr(void); #ifdef FEAT_SYN_HL static void cleanup_zsubexpr(void); #endif static void reg_nextline(void); static int match_with_backref(linenr_T start_lnum, colnr_T start_col, linenr_T end_lnum, colnr_T end_col, int *bytelen); /* * Sometimes need to save a copy of a line. Since alloc()/free() is very * slow, we keep one allocated piece of memory and only re-allocate it when * it's too small. It's freed in bt_regexec_both() when finished. */ static char_u *reg_tofree = NULL; static unsigned reg_tofreelen; /* * Structure used to store the execution state of the regex engine. * Which ones are set depends on whether a single-line or multi-line match is * done: * single-line multi-line * reg_match &regmatch_T NULL * reg_mmatch NULL &regmmatch_T * reg_startp reg_match->startp <invalid> * reg_endp reg_match->endp <invalid> * reg_startpos <invalid> reg_mmatch->startpos * reg_endpos <invalid> reg_mmatch->endpos * reg_win NULL window in which to search * reg_buf curbuf buffer in which to search * reg_firstlnum <invalid> first line in which to search * reg_maxline 0 last line nr * reg_line_lbr FALSE or TRUE FALSE */ typedef struct { regmatch_T *reg_match; regmmatch_T *reg_mmatch; char_u **reg_startp; char_u **reg_endp; lpos_T *reg_startpos; lpos_T *reg_endpos; win_T *reg_win; buf_T *reg_buf; linenr_T reg_firstlnum; linenr_T reg_maxline; int reg_line_lbr; // "\n" in string is line break // The current match-position is stord in these variables: linenr_T lnum; // line number, relative to first line char_u *line; // start of current line char_u *input; // current input, points into "line" int need_clear_subexpr; // subexpressions still need to be cleared #ifdef FEAT_SYN_HL int need_clear_zsubexpr; // extmatch subexpressions still need to be // cleared #endif // Internal copy of 'ignorecase'. It is set at each call to vim_regexec(). // Normally it gets the value of "rm_ic" or "rmm_ic", but when the pattern // contains '\c' or '\C' the value is overruled. int reg_ic; // Similar to "reg_ic", but only for 'combining' characters. Set with \Z // flag in the regexp. Defaults to false, always. int reg_icombine; // Copy of "rmm_maxcol": maximum column to search for a match. Zero when // there is no maximum. colnr_T reg_maxcol; // State for the NFA engine regexec. int nfa_has_zend; // NFA regexp \ze operator encountered. int nfa_has_backref; // NFA regexp \1 .. \9 encountered. int nfa_nsubexpr; // Number of sub expressions actually being used // during execution. 1 if only the whole match // (subexpr 0) is used. // listid is global, so that it increases on recursive calls to // nfa_regmatch(), which means we don't have to clear the lastlist field of // all the states. int nfa_listid; int nfa_alt_listid; #ifdef FEAT_SYN_HL int nfa_has_zsubexpr; // NFA regexp has \z( ), set zsubexpr. #endif } regexec_T; static regexec_T rex; static int rex_in_use = FALSE; /* * Return TRUE if character 'c' is included in 'iskeyword' option for * "reg_buf" buffer. */ static int reg_iswordc(int c) { return vim_iswordc_buf(c, rex.reg_buf); } /* * Get pointer to the line "lnum", which is relative to "reg_firstlnum". */ static char_u * reg_getline(linenr_T lnum) { // when looking behind for a match/no-match lnum is negative. But we // can't go before line 1 if (rex.reg_firstlnum + lnum < 1) return NULL; if (lnum > rex.reg_maxline) // Must have matched the "\n" in the last line. return (char_u *)""; return ml_get_buf(rex.reg_buf, rex.reg_firstlnum + lnum, FALSE); } #ifdef FEAT_SYN_HL static char_u *reg_startzp[NSUBEXP]; // Workspace to mark beginning static char_u *reg_endzp[NSUBEXP]; // and end of \z(...\) matches static lpos_T reg_startzpos[NSUBEXP]; // idem, beginning pos static lpos_T reg_endzpos[NSUBEXP]; // idem, end pos #endif // TRUE if using multi-line regexp. #define REG_MULTI (rex.reg_match == NULL) #ifdef FEAT_SYN_HL /* * Create a new extmatch and mark it as referenced once. */ static reg_extmatch_T * make_extmatch(void) { reg_extmatch_T *em; em = ALLOC_CLEAR_ONE(reg_extmatch_T); if (em != NULL) em->refcnt = 1; return em; } /* * Add a reference to an extmatch. */ reg_extmatch_T * ref_extmatch(reg_extmatch_T *em) { if (em != NULL) em->refcnt++; return em; } /* * Remove a reference to an extmatch. If there are no references left, free * the info. */ void unref_extmatch(reg_extmatch_T *em) { int i; if (em != NULL && --em->refcnt <= 0) { for (i = 0; i < NSUBEXP; ++i) vim_free(em->matches[i]); vim_free(em); } } #endif /* * Get class of previous character. */ static int reg_prev_class(void) { if (rex.input > rex.line) return mb_get_class_buf(rex.input - 1 - (*mb_head_off)(rex.line, rex.input - 1), rex.reg_buf); return -1; } /* * Return TRUE if the current rex.input position matches the Visual area. */ static int reg_match_visual(void) { pos_T top, bot; linenr_T lnum; colnr_T col; win_T *wp = rex.reg_win == NULL ? curwin : rex.reg_win; int mode; colnr_T start, end; colnr_T start2, end2; colnr_T cols; colnr_T curswant; // Check if the buffer is the current buffer and not using a string. if (rex.reg_buf != curbuf || VIsual.lnum == 0 || !REG_MULTI) return FALSE; if (VIsual_active) { if (LT_POS(VIsual, wp->w_cursor)) { top = VIsual; bot = wp->w_cursor; } else { top = wp->w_cursor; bot = VIsual; } mode = VIsual_mode; curswant = wp->w_curswant; } else { if (LT_POS(curbuf->b_visual.vi_start, curbuf->b_visual.vi_end)) { top = curbuf->b_visual.vi_start; bot = curbuf->b_visual.vi_end; } else { top = curbuf->b_visual.vi_end; bot = curbuf->b_visual.vi_start; } mode = curbuf->b_visual.vi_mode; curswant = curbuf->b_visual.vi_curswant; } lnum = rex.lnum + rex.reg_firstlnum; if (lnum < top.lnum || lnum > bot.lnum) return FALSE; col = (colnr_T)(rex.input - rex.line); if (mode == 'v') { if ((lnum == top.lnum && col < top.col) || (lnum == bot.lnum && col >= bot.col + (*p_sel != 'e'))) return FALSE; } else if (mode == Ctrl_V) { getvvcol(wp, &top, &start, NULL, &end); getvvcol(wp, &bot, &start2, NULL, &end2); if (start2 < start) start = start2; if (end2 > end) end = end2; if (top.col == MAXCOL || bot.col == MAXCOL || curswant == MAXCOL) end = MAXCOL; // getvvcol() flushes rex.line, need to get it again rex.line = reg_getline(rex.lnum); rex.input = rex.line + col; cols = win_linetabsize(wp, rex.line, col); if (cols < start || cols > end - (*p_sel == 'e')) return FALSE; } return TRUE; } /* * Check the regexp program for its magic number. * Return TRUE if it's wrong. */ static int prog_magic_wrong(void) { regprog_T *prog; prog = REG_MULTI ? rex.reg_mmatch->regprog : rex.reg_match->regprog; if (prog->engine == &nfa_regengine) // For NFA matcher we don't check the magic return FALSE; if (UCHARAT(((bt_regprog_T *)prog)->program) != REGMAGIC) { emsg(_(e_corrupted_regexp_program)); return TRUE; } return FALSE; } /* * Cleanup the subexpressions, if this wasn't done yet. * This construction is used to clear the subexpressions only when they are * used (to increase speed). */ static void cleanup_subexpr(void) { if (rex.need_clear_subexpr) { if (REG_MULTI) { // Use 0xff to set lnum to -1 vim_memset(rex.reg_startpos, 0xff, sizeof(lpos_T) * NSUBEXP); vim_memset(rex.reg_endpos, 0xff, sizeof(lpos_T) * NSUBEXP); } else { vim_memset(rex.reg_startp, 0, sizeof(char_u *) * NSUBEXP); vim_memset(rex.reg_endp, 0, sizeof(char_u *) * NSUBEXP); } rex.need_clear_subexpr = FALSE; } } #ifdef FEAT_SYN_HL static void cleanup_zsubexpr(void) { if (rex.need_clear_zsubexpr) { if (REG_MULTI) { // Use 0xff to set lnum to -1 vim_memset(reg_startzpos, 0xff, sizeof(lpos_T) * NSUBEXP); vim_memset(reg_endzpos, 0xff, sizeof(lpos_T) * NSUBEXP); } else { vim_memset(reg_startzp, 0, sizeof(char_u *) * NSUBEXP); vim_memset(reg_endzp, 0, sizeof(char_u *) * NSUBEXP); } rex.need_clear_zsubexpr = FALSE; } } #endif /* * Advance rex.lnum, rex.line and rex.input to the next line. */ static void reg_nextline(void) { rex.line = reg_getline(++rex.lnum); rex.input = rex.line; fast_breakcheck(); } /* * Check whether a backreference matches. * Returns RA_FAIL, RA_NOMATCH or RA_MATCH. * If "bytelen" is not NULL, it is set to the byte length of the match in the * last line. */ static int match_with_backref( linenr_T start_lnum, colnr_T start_col, linenr_T end_lnum, colnr_T end_col, int *bytelen) { linenr_T clnum = start_lnum; colnr_T ccol = start_col; int len; char_u *p; if (bytelen != NULL) *bytelen = 0; for (;;) { // Since getting one line may invalidate the other, need to make copy. // Slow! if (rex.line != reg_tofree) { len = (int)STRLEN(rex.line); if (reg_tofree == NULL || len >= (int)reg_tofreelen) { len += 50; // get some extra vim_free(reg_tofree); reg_tofree = alloc(len); if (reg_tofree == NULL) return RA_FAIL; // out of memory! reg_tofreelen = len; } STRCPY(reg_tofree, rex.line); rex.input = reg_tofree + (rex.input - rex.line); rex.line = reg_tofree; } // Get the line to compare with. p = reg_getline(clnum); if (clnum == end_lnum) len = end_col - ccol; else len = (int)STRLEN(p + ccol); if (cstrncmp(p + ccol, rex.input, &len) != 0) return RA_NOMATCH; // doesn't match if (bytelen != NULL) *bytelen += len; if (clnum == end_lnum) break; // match and at end! if (rex.lnum >= rex.reg_maxline) return RA_NOMATCH; // text too short // Advance to next line. reg_nextline(); if (bytelen != NULL) *bytelen = 0; ++clnum; ccol = 0; if (got_int) return RA_FAIL; } // found a match! Note that rex.line may now point to a copy of the line, // that should not matter. return RA_MATCH; } /* * Used in a place where no * or \+ can follow. */ static int re_mult_next(char *what) { if (re_multi_type(peekchr()) == MULTI_MULT) { semsg(_(e_nfa_regexp_cannot_repeat_str), what); rc_did_emsg = TRUE; return FAIL; } return OK; } typedef struct { int a, b, c; } decomp_T; // 0xfb20 - 0xfb4f static decomp_T decomp_table[0xfb4f-0xfb20+1] = { {0x5e2,0,0}, // 0xfb20 alt ayin {0x5d0,0,0}, // 0xfb21 alt alef {0x5d3,0,0}, // 0xfb22 alt dalet {0x5d4,0,0}, // 0xfb23 alt he {0x5db,0,0}, // 0xfb24 alt kaf {0x5dc,0,0}, // 0xfb25 alt lamed {0x5dd,0,0}, // 0xfb26 alt mem-sofit {0x5e8,0,0}, // 0xfb27 alt resh {0x5ea,0,0}, // 0xfb28 alt tav {'+', 0, 0}, // 0xfb29 alt plus {0x5e9, 0x5c1, 0}, // 0xfb2a shin+shin-dot {0x5e9, 0x5c2, 0}, // 0xfb2b shin+sin-dot {0x5e9, 0x5c1, 0x5bc}, // 0xfb2c shin+shin-dot+dagesh {0x5e9, 0x5c2, 0x5bc}, // 0xfb2d shin+sin-dot+dagesh {0x5d0, 0x5b7, 0}, // 0xfb2e alef+patah {0x5d0, 0x5b8, 0}, // 0xfb2f alef+qamats {0x5d0, 0x5b4, 0}, // 0xfb30 alef+hiriq {0x5d1, 0x5bc, 0}, // 0xfb31 bet+dagesh {0x5d2, 0x5bc, 0}, // 0xfb32 gimel+dagesh {0x5d3, 0x5bc, 0}, // 0xfb33 dalet+dagesh {0x5d4, 0x5bc, 0}, // 0xfb34 he+dagesh {0x5d5, 0x5bc, 0}, // 0xfb35 vav+dagesh {0x5d6, 0x5bc, 0}, // 0xfb36 zayin+dagesh {0xfb37, 0, 0}, // 0xfb37 -- UNUSED {0x5d8, 0x5bc, 0}, // 0xfb38 tet+dagesh {0x5d9, 0x5bc, 0}, // 0xfb39 yud+dagesh {0x5da, 0x5bc, 0}, // 0xfb3a kaf sofit+dagesh {0x5db, 0x5bc, 0}, // 0xfb3b kaf+dagesh {0x5dc, 0x5bc, 0}, // 0xfb3c lamed+dagesh {0xfb3d, 0, 0}, // 0xfb3d -- UNUSED {0x5de, 0x5bc, 0}, // 0xfb3e mem+dagesh {0xfb3f, 0, 0}, // 0xfb3f -- UNUSED {0x5e0, 0x5bc, 0}, // 0xfb40 nun+dagesh {0x5e1, 0x5bc, 0}, // 0xfb41 samech+dagesh {0xfb42, 0, 0}, // 0xfb42 -- UNUSED {0x5e3, 0x5bc, 0}, // 0xfb43 pe sofit+dagesh {0x5e4, 0x5bc,0}, // 0xfb44 pe+dagesh {0xfb45, 0, 0}, // 0xfb45 -- UNUSED {0x5e6, 0x5bc, 0}, // 0xfb46 tsadi+dagesh {0x5e7, 0x5bc, 0}, // 0xfb47 qof+dagesh {0x5e8, 0x5bc, 0}, // 0xfb48 resh+dagesh {0x5e9, 0x5bc, 0}, // 0xfb49 shin+dagesh {0x5ea, 0x5bc, 0}, // 0xfb4a tav+dagesh {0x5d5, 0x5b9, 0}, // 0xfb4b vav+holam {0x5d1, 0x5bf, 0}, // 0xfb4c bet+rafe {0x5db, 0x5bf, 0}, // 0xfb4d kaf+rafe {0x5e4, 0x5bf, 0}, // 0xfb4e pe+rafe {0x5d0, 0x5dc, 0} // 0xfb4f alef-lamed }; static void mb_decompose(int c, int *c1, int *c2, int *c3) { decomp_T d; if (c >= 0xfb20 && c <= 0xfb4f) { d = decomp_table[c - 0xfb20]; *c1 = d.a; *c2 = d.b; *c3 = d.c; } else { *c1 = c; *c2 = *c3 = 0; } } /* * Compare two strings, ignore case if rex.reg_ic set. * Return 0 if strings match, non-zero otherwise. * Correct the length "*n" when composing characters are ignored. */ static int cstrncmp(char_u *s1, char_u *s2, int *n) { int result; if (!rex.reg_ic) result = STRNCMP(s1, s2, *n); else result = MB_STRNICMP(s1, s2, *n); // if it failed and it's utf8 and we want to combineignore: if (result != 0 && enc_utf8 && rex.reg_icombine) { char_u *str1, *str2; int c1, c2, c11, c12; int junk; // we have to handle the strcmp ourselves, since it is necessary to // deal with the composing characters by ignoring them: str1 = s1; str2 = s2; c1 = c2 = 0; while ((int)(str1 - s1) < *n) { c1 = mb_ptr2char_adv(&str1); c2 = mb_ptr2char_adv(&str2); // Decompose the character if necessary, into 'base' characters. // Currently hard-coded for Hebrew, Arabic to be done... if (c1 != c2 && (!rex.reg_ic || utf_fold(c1) != utf_fold(c2))) { // decomposition necessary? mb_decompose(c1, &c11, &junk, &junk); mb_decompose(c2, &c12, &junk, &junk); c1 = c11; c2 = c12; if (c11 != c12 && (!rex.reg_ic || utf_fold(c11) != utf_fold(c12))) break; } } result = c2 - c1; if (result == 0) *n = (int)(str2 - s2); } return result; } /* * cstrchr: This function is used a lot for simple searches, keep it fast! */ static char_u * cstrchr(char_u *s, int c) { char_u *p; int cc; if (!rex.reg_ic || (!enc_utf8 && mb_char2len(c) > 1)) return vim_strchr(s, c); // tolower() and toupper() can be slow, comparing twice should be a lot // faster (esp. when using MS Visual C++!). // For UTF-8 need to use folded case. if (enc_utf8 && c > 0x80) cc = utf_fold(c); else if (MB_ISUPPER(c)) cc = MB_TOLOWER(c); else if (MB_ISLOWER(c)) cc = MB_TOUPPER(c); else return vim_strchr(s, c); if (has_mbyte) { for (p = s; *p != NUL; p += (*mb_ptr2len)(p)) { if (enc_utf8 && c > 0x80) { if (utf_fold(utf_ptr2char(p)) == cc) return p; } else if (*p == c || *p == cc) return p; } } else // Faster version for when there are no multi-byte characters. for (p = s; *p != NUL; ++p) if (*p == c || *p == cc) return p; return NULL; } //////////////////////////////////////////////////////////////// // regsub stuff // //////////////////////////////////////////////////////////////// /* * We should define ftpr as a pointer to a function returning a pointer to * a function returning a pointer to a function ... * This is impossible, so we declare a pointer to a function returning a * void pointer. This should work for all compilers. */ typedef void (*(*fptr_T)(int *, int)); static int vim_regsub_both(char_u *source, typval_T *expr, char_u *dest, int destlen, int flags); static fptr_T do_upper(int *d, int c) { *d = MB_TOUPPER(c); return (fptr_T)NULL; } static fptr_T do_Upper(int *d, int c) { *d = MB_TOUPPER(c); return (fptr_T)do_Upper; } static fptr_T do_lower(int *d, int c) { *d = MB_TOLOWER(c); return (fptr_T)NULL; } static fptr_T do_Lower(int *d, int c) { *d = MB_TOLOWER(c); return (fptr_T)do_Lower; } /* * regtilde(): Replace tildes in the pattern by the old pattern. * * Short explanation of the tilde: It stands for the previous replacement * pattern. If that previous pattern also contains a ~ we should go back a * step further... But we insert the previous pattern into the current one * and remember that. * This still does not handle the case where "magic" changes. So require the * user to keep his hands off of "magic". * * The tildes are parsed once before the first call to vim_regsub(). */ char_u * regtilde(char_u *source, int magic) { char_u *newsub = source; char_u *tmpsub; char_u *p; int len; int prevlen; for (p = newsub; *p; ++p) { if ((*p == '~' && magic) || (*p == '\\' && *(p + 1) == '~' && !magic)) { if (reg_prev_sub != NULL) { // length = len(newsub) - 1 + len(prev_sub) + 1 prevlen = (int)STRLEN(reg_prev_sub); tmpsub = alloc(STRLEN(newsub) + prevlen); if (tmpsub != NULL) { // copy prefix len = (int)(p - newsub); // not including ~ mch_memmove(tmpsub, newsub, (size_t)len); // interpret tilde mch_memmove(tmpsub + len, reg_prev_sub, (size_t)prevlen); // copy postfix if (!magic) ++p; // back off backslash STRCPY(tmpsub + len + prevlen, p + 1); if (newsub != source) // already allocated newsub vim_free(newsub); newsub = tmpsub; p = newsub + len + prevlen; } } else if (magic) STRMOVE(p, p + 1); // remove '~' else STRMOVE(p, p + 2); // remove '\~' --p; } else { if (*p == '\\' && p[1]) // skip escaped characters ++p; if (has_mbyte) p += (*mb_ptr2len)(p) - 1; } } // Store a copy of newsub in reg_prev_sub. It is always allocated, // because recursive calls may make the returned string invalid. vim_free(reg_prev_sub); reg_prev_sub = vim_strsave(newsub); return newsub; } #ifdef FEAT_EVAL static int can_f_submatch = FALSE; // TRUE when submatch() can be used // These pointers are used for reg_submatch(). Needed for when the // substitution string is an expression that contains a call to substitute() // and submatch(). typedef struct { regmatch_T *sm_match; regmmatch_T *sm_mmatch; linenr_T sm_firstlnum; linenr_T sm_maxline; int sm_line_lbr; } regsubmatch_T; static regsubmatch_T rsm; // can only be used when can_f_submatch is TRUE #endif #ifdef FEAT_EVAL /* * Put the submatches in "argv[argskip]" which is a list passed into * call_func() by vim_regsub_both(). */ static int fill_submatch_list(int argc UNUSED, typval_T *argv, int argskip, int argcount) { listitem_T *li; int i; char_u *s; typval_T *listarg = argv + argskip; if (argcount == argskip) // called function doesn't take a submatches argument return argskip; // Relies on sl_list to be the first item in staticList10_T. init_static_list((staticList10_T *)(listarg->vval.v_list)); // There are always 10 list items in staticList10_T. li = listarg->vval.v_list->lv_first; for (i = 0; i < 10; ++i) { s = rsm.sm_match->startp[i]; if (s == NULL || rsm.sm_match->endp[i] == NULL) s = NULL; else s = vim_strnsave(s, rsm.sm_match->endp[i] - s); li->li_tv.v_type = VAR_STRING; li->li_tv.vval.v_string = s; li = li->li_next; } return argskip + 1; } static void clear_submatch_list(staticList10_T *sl) { int i; for (i = 0; i < 10; ++i) vim_free(sl->sl_items[i].li_tv.vval.v_string); } #endif /* * vim_regsub() - perform substitutions after a vim_regexec() or * vim_regexec_multi() match. * * If "flags" has REGSUB_COPY really copy into "dest[destlen]". * Oterwise nothing is copied, only compue the length of the result. * * If "flags" has REGSUB_MAGIC then behave like 'magic' is set. * * If "flags" has REGSUB_BACKSLASH a backslash will be removed later, need to * double them to keep them, and insert a backslash before a CR to avoid it * being replaced with a line break later. * * Note: The matched text must not change between the call of * vim_regexec()/vim_regexec_multi() and vim_regsub()! It would make the back * references invalid! * * Returns the size of the replacement, including terminating NUL. */ int vim_regsub( regmatch_T *rmp, char_u *source, typval_T *expr, char_u *dest, int destlen, int flags) { int result; regexec_T rex_save; int rex_in_use_save = rex_in_use; if (rex_in_use) // Being called recursively, save the state. rex_save = rex; rex_in_use = TRUE; rex.reg_match = rmp; rex.reg_mmatch = NULL; rex.reg_maxline = 0; rex.reg_buf = curbuf; rex.reg_line_lbr = TRUE; result = vim_regsub_both(source, expr, dest, destlen, flags); rex_in_use = rex_in_use_save; if (rex_in_use) rex = rex_save; return result; } int vim_regsub_multi( regmmatch_T *rmp, linenr_T lnum, char_u *source, char_u *dest, int destlen, int flags) { int result; regexec_T rex_save; int rex_in_use_save = rex_in_use; if (rex_in_use) // Being called recursively, save the state. rex_save = rex; rex_in_use = TRUE; rex.reg_match = NULL; rex.reg_mmatch = rmp; rex.reg_buf = curbuf; // always works on the current buffer! rex.reg_firstlnum = lnum; rex.reg_maxline = curbuf->b_ml.ml_line_count - lnum; rex.reg_line_lbr = FALSE; result = vim_regsub_both(source, NULL, dest, destlen, flags); rex_in_use = rex_in_use_save; if (rex_in_use) rex = rex_save; return result; } #if defined(FEAT_EVAL) || defined(PROTO) // When nesting more than a couple levels it's probably a mistake. # define MAX_REGSUB_NESTING 4 static char_u *eval_result[MAX_REGSUB_NESTING] = {NULL, NULL, NULL, NULL}; # if defined(EXITFREE) || defined(PROTO) void free_resub_eval_result(void) { int i; for (i = 0; i < MAX_REGSUB_NESTING; ++i) VIM_CLEAR(eval_result[i]); } # endif #endif static int vim_regsub_both( char_u *source, typval_T *expr, char_u *dest, int destlen, int flags) { char_u *src; char_u *dst; char_u *s; int c; int cc; int no = -1; fptr_T func_all = (fptr_T)NULL; fptr_T func_one = (fptr_T)NULL; linenr_T clnum = 0; // init for GCC int len = 0; // init for GCC #ifdef FEAT_EVAL static int nesting = 0; int nested; #endif int copy = flags & REGSUB_COPY; // Be paranoid... if ((source == NULL && expr == NULL) || dest == NULL) { emsg(_(e_null_argument)); return 0; } if (prog_magic_wrong()) return 0; #ifdef FEAT_EVAL if (nesting == MAX_REGSUB_NESTING) { emsg(_(e_substitute_nesting_too_deep)); return 0; } nested = nesting; #endif src = source; dst = dest; /* * When the substitute part starts with "\=" evaluate it as an expression. */ if (expr != NULL || (source[0] == '\\' && source[1] == '=')) { #ifdef FEAT_EVAL // To make sure that the length doesn't change between checking the // length and copying the string, and to speed up things, the // resulting string is saved from the call with // "flags & REGSUB_COPY" == 0 to the call with // "flags & REGSUB_COPY" != 0. if (copy) { if (eval_result[nested] != NULL) { STRCPY(dest, eval_result[nested]); dst += STRLEN(eval_result[nested]); VIM_CLEAR(eval_result[nested]); } } else { int prev_can_f_submatch = can_f_submatch; regsubmatch_T rsm_save; VIM_CLEAR(eval_result[nested]); // The expression may contain substitute(), which calls us // recursively. Make sure submatch() gets the text from the first // level. if (can_f_submatch) rsm_save = rsm; can_f_submatch = TRUE; rsm.sm_match = rex.reg_match; rsm.sm_mmatch = rex.reg_mmatch; rsm.sm_firstlnum = rex.reg_firstlnum; rsm.sm_maxline = rex.reg_maxline; rsm.sm_line_lbr = rex.reg_line_lbr; // Although unlikely, it is possible that the expression invokes a // substitute command (it might fail, but still). Therefore keep // an array of eval results. ++nesting; if (expr != NULL) { typval_T argv[2]; char_u buf[NUMBUFLEN]; typval_T rettv; staticList10_T matchList; funcexe_T funcexe; rettv.v_type = VAR_STRING; rettv.vval.v_string = NULL; argv[0].v_type = VAR_LIST; argv[0].vval.v_list = &matchList.sl_list; matchList.sl_list.lv_len = 0; CLEAR_FIELD(funcexe); funcexe.fe_argv_func = fill_submatch_list; funcexe.fe_evaluate = TRUE; if (expr->v_type == VAR_FUNC) { s = expr->vval.v_string; call_func(s, -1, &rettv, 1, argv, &funcexe); } else if (expr->v_type == VAR_PARTIAL) { partial_T *partial = expr->vval.v_partial; s = partial_name(partial); funcexe.fe_partial = partial; call_func(s, -1, &rettv, 1, argv, &funcexe); } else if (expr->v_type == VAR_INSTR) { exe_typval_instr(expr, &rettv); } if (matchList.sl_list.lv_len > 0) // fill_submatch_list() was called clear_submatch_list(&matchList); if (rettv.v_type == VAR_UNKNOWN) // something failed, no need to report another error eval_result[nested] = NULL; else { eval_result[nested] = tv_get_string_buf_chk(&rettv, buf); if (eval_result[nested] != NULL) eval_result[nested] = vim_strsave(eval_result[nested]); } clear_tv(&rettv); } else if (substitute_instr != NULL) // Execute instructions from ISN_SUBSTITUTE. eval_result[nested] = exe_substitute_instr(); else eval_result[nested] = eval_to_string(source + 2, TRUE); --nesting; if (eval_result[nested] != NULL) { int had_backslash = FALSE; for (s = eval_result[nested]; *s != NUL; MB_PTR_ADV(s)) { // Change NL to CR, so that it becomes a line break, // unless called from vim_regexec_nl(). // Skip over a backslashed character. if (*s == NL && !rsm.sm_line_lbr) *s = CAR; else if (*s == '\\' && s[1] != NUL) { ++s; /* Change NL to CR here too, so that this works: * :s/abc\\\ndef/\="aaa\\\nbbb"/ on text: * abc\ * def * Not when called from vim_regexec_nl(). */ if (*s == NL && !rsm.sm_line_lbr) *s = CAR; had_backslash = TRUE; } } if (had_backslash && (flags & REGSUB_BACKSLASH)) { // Backslashes will be consumed, need to double them. s = vim_strsave_escaped(eval_result[nested], (char_u *)"\\"); if (s != NULL) { vim_free(eval_result[nested]); eval_result[nested] = s; } } dst += STRLEN(eval_result[nested]); } can_f_submatch = prev_can_f_submatch; if (can_f_submatch) rsm = rsm_save; } #endif } else while ((c = *src++) != NUL) { if (c == '&' && (flags & REGSUB_MAGIC)) no = 0; else if (c == '\\' && *src != NUL) { if (*src == '&' && !(flags & REGSUB_MAGIC)) { ++src; no = 0; } else if ('0' <= *src && *src <= '9') { no = *src++ - '0'; } else if (vim_strchr((char_u *)"uUlLeE", *src)) { switch (*src++) { case 'u': func_one = (fptr_T)do_upper; continue; case 'U': func_all = (fptr_T)do_Upper; continue; case 'l': func_one = (fptr_T)do_lower; continue; case 'L': func_all = (fptr_T)do_Lower; continue; case 'e': case 'E': func_one = func_all = (fptr_T)NULL; continue; } } } if (no < 0) // Ordinary character. { if (c == K_SPECIAL && src[0] != NUL && src[1] != NUL) { // Copy a special key as-is. if (copy) { if (dst + 3 > dest + destlen) { iemsg("vim_regsub_both(): not enough space"); return 0; } *dst++ = c; *dst++ = *src++; *dst++ = *src++; } else { dst += 3; src += 2; } continue; } if (c == '\\' && *src != NUL) { // Check for abbreviations -- webb switch (*src) { case 'r': c = CAR; ++src; break; case 'n': c = NL; ++src; break; case 't': c = TAB; ++src; break; // Oh no! \e already has meaning in subst pat :-( // case 'e': c = ESC; ++src; break; case 'b': c = Ctrl_H; ++src; break; // If "backslash" is TRUE the backslash will be removed // later. Used to insert a literal CR. default: if (flags & REGSUB_BACKSLASH) { if (copy) { if (dst + 1 > dest + destlen) { iemsg("vim_regsub_both(): not enough space"); return 0; } *dst = '\\'; } ++dst; } c = *src++; } } else if (has_mbyte) c = mb_ptr2char(src - 1); // Write to buffer, if copy is set. if (func_one != (fptr_T)NULL) // Turbo C complains without the typecast func_one = (fptr_T)(func_one(&cc, c)); else if (func_all != (fptr_T)NULL) // Turbo C complains without the typecast func_all = (fptr_T)(func_all(&cc, c)); else // just copy cc = c; if (has_mbyte) { int totlen = mb_ptr2len(src - 1); int charlen = mb_char2len(cc); if (copy) { if (dst + charlen > dest + destlen) { iemsg("vim_regsub_both(): not enough space"); return 0; } mb_char2bytes(cc, dst); } dst += charlen - 1; if (enc_utf8) { int clen = utf_ptr2len(src - 1); // If the character length is shorter than "totlen", there // are composing characters; copy them as-is. if (clen < totlen) { if (copy) { if (dst + totlen - clen > dest + destlen) { iemsg("vim_regsub_both(): not enough space"); return 0; } mch_memmove(dst + 1, src - 1 + clen, (size_t)(totlen - clen)); } dst += totlen - clen; } } src += totlen - 1; } else if (copy) { if (dst + 1 > dest + destlen) { iemsg("vim_regsub_both(): not enough space"); return 0; } *dst = cc; } dst++; } else { if (REG_MULTI) { clnum = rex.reg_mmatch->startpos[no].lnum; if (clnum < 0 || rex.reg_mmatch->endpos[no].lnum < 0) s = NULL; else { s = reg_getline(clnum) + rex.reg_mmatch->startpos[no].col; if (rex.reg_mmatch->endpos[no].lnum == clnum) len = rex.reg_mmatch->endpos[no].col - rex.reg_mmatch->startpos[no].col; else len = (int)STRLEN(s); } } else { s = rex.reg_match->startp[no]; if (rex.reg_match->endp[no] == NULL) s = NULL; else len = (int)(rex.reg_match->endp[no] - s); } if (s != NULL) { for (;;) { if (len == 0) { if (REG_MULTI) { if (rex.reg_mmatch->endpos[no].lnum == clnum) break; if (copy) { if (dst + 1 > dest + destlen) { iemsg("vim_regsub_both(): not enough space"); return 0; } *dst = CAR; } ++dst; s = reg_getline(++clnum); if (rex.reg_mmatch->endpos[no].lnum == clnum) len = rex.reg_mmatch->endpos[no].col; else len = (int)STRLEN(s); } else break; } else if (*s == NUL) // we hit NUL. { if (copy) iemsg(_(e_damaged_match_string)); goto exit; } else { if ((flags & REGSUB_BACKSLASH) && (*s == CAR || *s == '\\')) { /* * Insert a backslash in front of a CR, otherwise * it will be replaced by a line break. * Number of backslashes will be halved later, * double them here. */ if (copy) { if (dst + 2 > dest + destlen) { iemsg("vim_regsub_both(): not enough space"); return 0; } dst[0] = '\\'; dst[1] = *s; } dst += 2; } else { if (has_mbyte) c = mb_ptr2char(s); else c = *s; if (func_one != (fptr_T)NULL) // Turbo C complains without the typecast func_one = (fptr_T)(func_one(&cc, c)); else if (func_all != (fptr_T)NULL) // Turbo C complains without the typecast func_all = (fptr_T)(func_all(&cc, c)); else // just copy cc = c; if (has_mbyte) { int l; int charlen; // Copy composing characters separately, one // at a time. if (enc_utf8) l = utf_ptr2len(s) - 1; else l = mb_ptr2len(s) - 1; s += l; len -= l; charlen = mb_char2len(cc); if (copy) { if (dst + charlen > dest + destlen) { iemsg("vim_regsub_both(): not enough space"); return 0; } mb_char2bytes(cc, dst); } dst += charlen - 1; } else if (copy) { if (dst + 1 > dest + destlen) { iemsg("vim_regsub_both(): not enough space"); return 0; } *dst = cc; } dst++; } ++s; --len; } } } no = -1; } } if (copy) *dst = NUL; exit: return (int)((dst - dest) + 1); } #ifdef FEAT_EVAL /* * Call reg_getline() with the line numbers from the submatch. If a * substitute() was used the reg_maxline and other values have been * overwritten. */ static char_u * reg_getline_submatch(linenr_T lnum) { char_u *s; linenr_T save_first = rex.reg_firstlnum; linenr_T save_max = rex.reg_maxline; rex.reg_firstlnum = rsm.sm_firstlnum; rex.reg_maxline = rsm.sm_maxline; s = reg_getline(lnum); rex.reg_firstlnum = save_first; rex.reg_maxline = save_max; return s; } /* * Used for the submatch() function: get the string from the n'th submatch in * allocated memory. * Returns NULL when not in a ":s" command and for a non-existing submatch. */ char_u * reg_submatch(int no) { char_u *retval = NULL; char_u *s; int len; int round; linenr_T lnum; if (!can_f_submatch || no < 0) return NULL; if (rsm.sm_match == NULL) { /* * First round: compute the length and allocate memory. * Second round: copy the text. */ for (round = 1; round <= 2; ++round) { lnum = rsm.sm_mmatch->startpos[no].lnum; if (lnum < 0 || rsm.sm_mmatch->endpos[no].lnum < 0) return NULL; s = reg_getline_submatch(lnum); if (s == NULL) // anti-crash check, cannot happen? break; s += rsm.sm_mmatch->startpos[no].col; if (rsm.sm_mmatch->endpos[no].lnum == lnum) { // Within one line: take form start to end col. len = rsm.sm_mmatch->endpos[no].col - rsm.sm_mmatch->startpos[no].col; if (round == 2) vim_strncpy(retval, s, len); ++len; } else { // Multiple lines: take start line from start col, middle // lines completely and end line up to end col. len = (int)STRLEN(s); if (round == 2) { STRCPY(retval, s); retval[len] = '\n'; } ++len; ++lnum; while (lnum < rsm.sm_mmatch->endpos[no].lnum) { s = reg_getline_submatch(lnum++); if (round == 2) STRCPY(retval + len, s); len += (int)STRLEN(s); if (round == 2) retval[len] = '\n'; ++len; } if (round == 2) STRNCPY(retval + len, reg_getline_submatch(lnum), rsm.sm_mmatch->endpos[no].col); len += rsm.sm_mmatch->endpos[no].col; if (round == 2) retval[len] = NUL; ++len; } if (retval == NULL) { retval = alloc(len); if (retval == NULL) return NULL; } } } else { s = rsm.sm_match->startp[no]; if (s == NULL || rsm.sm_match->endp[no] == NULL) retval = NULL; else retval = vim_strnsave(s, rsm.sm_match->endp[no] - s); } return retval; } /* * Used for the submatch() function with the optional non-zero argument: get * the list of strings from the n'th submatch in allocated memory with NULs * represented in NLs. * Returns a list of allocated strings. Returns NULL when not in a ":s" * command, for a non-existing submatch and for any error. */ list_T * reg_submatch_list(int no) { char_u *s; linenr_T slnum; linenr_T elnum; colnr_T scol; colnr_T ecol; int i; list_T *list; int error = FALSE; if (!can_f_submatch || no < 0) return NULL; if (rsm.sm_match == NULL) { slnum = rsm.sm_mmatch->startpos[no].lnum; elnum = rsm.sm_mmatch->endpos[no].lnum; if (slnum < 0 || elnum < 0) return NULL; scol = rsm.sm_mmatch->startpos[no].col; ecol = rsm.sm_mmatch->endpos[no].col; list = list_alloc(); if (list == NULL) return NULL; s = reg_getline_submatch(slnum) + scol; if (slnum == elnum) { if (list_append_string(list, s, ecol - scol) == FAIL) error = TRUE; } else { if (list_append_string(list, s, -1) == FAIL) error = TRUE; for (i = 1; i < elnum - slnum; i++) { s = reg_getline_submatch(slnum + i); if (list_append_string(list, s, -1) == FAIL) error = TRUE; } s = reg_getline_submatch(elnum); if (list_append_string(list, s, ecol) == FAIL) error = TRUE; } } else { s = rsm.sm_match->startp[no]; if (s == NULL || rsm.sm_match->endp[no] == NULL) return NULL; list = list_alloc(); if (list == NULL) return NULL; if (list_append_string(list, s, (int)(rsm.sm_match->endp[no] - s)) == FAIL) error = TRUE; } if (error) { list_free(list); return NULL; } ++list->lv_refcount; return list; } #endif /* * Initialize the values used for matching against multiple lines */ static void init_regexec_multi( regmmatch_T *rmp, win_T *win, // window in which to search or NULL buf_T *buf, // buffer in which to search linenr_T lnum) // nr of line to start looking for match { rex.reg_match = NULL; rex.reg_mmatch = rmp; rex.reg_buf = buf; rex.reg_win = win; rex.reg_firstlnum = lnum; rex.reg_maxline = rex.reg_buf->b_ml.ml_line_count - lnum; rex.reg_line_lbr = FALSE; rex.reg_ic = rmp->rmm_ic; rex.reg_icombine = FALSE; rex.reg_maxcol = rmp->rmm_maxcol; } #include "regexp_bt.c" static regengine_T bt_regengine = { bt_regcomp, bt_regfree, bt_regexec_nl, bt_regexec_multi, }; #include "regexp_nfa.c" static regengine_T nfa_regengine = { nfa_regcomp, nfa_regfree, nfa_regexec_nl, nfa_regexec_multi, }; // Which regexp engine to use? Needed for vim_regcomp(). // Must match with 'regexpengine'. static int regexp_engine = 0; #ifdef DEBUG static char_u regname[][30] = { "AUTOMATIC Regexp Engine", "BACKTRACKING Regexp Engine", "NFA Regexp Engine" }; #endif /* * Compile a regular expression into internal code. * Returns the program in allocated memory. * Use vim_regfree() to free the memory. * Returns NULL for an error. */ regprog_T * vim_regcomp(char_u *expr_arg, int re_flags) { regprog_T *prog = NULL; char_u *expr = expr_arg; int called_emsg_before; regexp_engine = p_re; // Check for prefix "\%#=", that sets the regexp engine if (STRNCMP(expr, "\\%#=", 4) == 0) { int newengine = expr[4] - '0'; if (newengine == AUTOMATIC_ENGINE || newengine == BACKTRACKING_ENGINE || newengine == NFA_ENGINE) { regexp_engine = expr[4] - '0'; expr += 5; #ifdef DEBUG smsg("New regexp mode selected (%d): %s", regexp_engine, regname[newengine]); #endif } else { emsg(_(e_percent_hash_can_only_be_followed_by_zero_one_two_automatic_engine_will_be_used)); regexp_engine = AUTOMATIC_ENGINE; } } #ifdef DEBUG bt_regengine.expr = expr; nfa_regengine.expr = expr; #endif // reg_iswordc() uses rex.reg_buf rex.reg_buf = curbuf; /* * First try the NFA engine, unless backtracking was requested. */ called_emsg_before = called_emsg; if (regexp_engine != BACKTRACKING_ENGINE) prog = nfa_regengine.regcomp(expr, re_flags + (regexp_engine == AUTOMATIC_ENGINE ? RE_AUTO : 0)); else prog = bt_regengine.regcomp(expr, re_flags); // Check for error compiling regexp with initial engine. if (prog == NULL) { #ifdef BT_REGEXP_DEBUG_LOG if (regexp_engine == BACKTRACKING_ENGINE) // debugging log for BT engine { FILE *f; f = fopen(BT_REGEXP_DEBUG_LOG_NAME, "a"); if (f) { fprintf(f, "Syntax error in \"%s\"\n", expr); fclose(f); } else semsg("(NFA) Could not open \"%s\" to write !!!", BT_REGEXP_DEBUG_LOG_NAME); } #endif /* * If the NFA engine failed, try the backtracking engine. * The NFA engine also fails for patterns that it can't handle well * but are still valid patterns, thus a retry should work. * But don't try if an error message was given. */ if (regexp_engine == AUTOMATIC_ENGINE && called_emsg == called_emsg_before) { regexp_engine = BACKTRACKING_ENGINE; #ifdef FEAT_EVAL report_re_switch(expr); #endif prog = bt_regengine.regcomp(expr, re_flags); } } if (prog != NULL) { // Store the info needed to call regcomp() again when the engine turns // out to be very slow when executing it. prog->re_engine = regexp_engine; prog->re_flags = re_flags; } return prog; } /* * Free a compiled regexp program, returned by vim_regcomp(). */ void vim_regfree(regprog_T *prog) { if (prog != NULL) prog->engine->regfree(prog); } #if defined(EXITFREE) || defined(PROTO) void free_regexp_stuff(void) { ga_clear(&regstack); ga_clear(&backpos); vim_free(reg_tofree); vim_free(reg_prev_sub); } #endif #ifdef FEAT_EVAL static void report_re_switch(char_u *pat) { if (p_verbose > 0) { verbose_enter(); msg_puts(_("Switching to backtracking RE engine for pattern: ")); msg_puts((char *)pat); verbose_leave(); } } #endif #if defined(FEAT_X11) || defined(PROTO) /* * Return whether "prog" is currently being executed. */ int regprog_in_use(regprog_T *prog) { return prog->re_in_use; } #endif /* * Match a regexp against a string. * "rmp->regprog" must be a compiled regexp as returned by vim_regcomp(). * Note: "rmp->regprog" may be freed and changed. * Uses curbuf for line count and 'iskeyword'. * When "nl" is TRUE consider a "\n" in "line" to be a line break. * * Return TRUE if there is a match, FALSE if not. */ static int vim_regexec_string( regmatch_T *rmp, char_u *line, // string to match against colnr_T col, // column to start looking for match int nl) { int result; regexec_T rex_save; int rex_in_use_save = rex_in_use; // Cannot use the same prog recursively, it contains state. if (rmp->regprog->re_in_use) { emsg(_(e_cannot_use_pattern_recursively)); return FALSE; } rmp->regprog->re_in_use = TRUE; if (rex_in_use) // Being called recursively, save the state. rex_save = rex; rex_in_use = TRUE; rex.reg_startp = NULL; rex.reg_endp = NULL; rex.reg_startpos = NULL; rex.reg_endpos = NULL; result = rmp->regprog->engine->regexec_nl(rmp, line, col, nl); rmp->regprog->re_in_use = FALSE; // NFA engine aborted because it's very slow. if (rmp->regprog->re_engine == AUTOMATIC_ENGINE && result == NFA_TOO_EXPENSIVE) { int save_p_re = p_re; int re_flags = rmp->regprog->re_flags; char_u *pat = vim_strsave(((nfa_regprog_T *)rmp->regprog)->pattern); p_re = BACKTRACKING_ENGINE; vim_regfree(rmp->regprog); if (pat != NULL) { #ifdef FEAT_EVAL report_re_switch(pat); #endif rmp->regprog = vim_regcomp(pat, re_flags); if (rmp->regprog != NULL) { rmp->regprog->re_in_use = TRUE; result = rmp->regprog->engine->regexec_nl(rmp, line, col, nl); rmp->regprog->re_in_use = FALSE; } vim_free(pat); } p_re = save_p_re; } rex_in_use = rex_in_use_save; if (rex_in_use) rex = rex_save; return result > 0; } /* * Note: "*prog" may be freed and changed. * Return TRUE if there is a match, FALSE if not. */ int vim_regexec_prog( regprog_T **prog, int ignore_case, char_u *line, colnr_T col) { int r; regmatch_T regmatch; regmatch.regprog = *prog; regmatch.rm_ic = ignore_case; r = vim_regexec_string(&regmatch, line, col, FALSE); *prog = regmatch.regprog; return r; } /* * Note: "rmp->regprog" may be freed and changed. * Return TRUE if there is a match, FALSE if not. */ int vim_regexec(regmatch_T *rmp, char_u *line, colnr_T col) { return vim_regexec_string(rmp, line, col, FALSE); } /* * Like vim_regexec(), but consider a "\n" in "line" to be a line break. * Note: "rmp->regprog" may be freed and changed. * Return TRUE if there is a match, FALSE if not. */ int vim_regexec_nl(regmatch_T *rmp, char_u *line, colnr_T col) { return vim_regexec_string(rmp, line, col, TRUE); } /* * Match a regexp against multiple lines. * "rmp->regprog" must be a compiled regexp as returned by vim_regcomp(). * Note: "rmp->regprog" may be freed and changed, even set to NULL. * Uses curbuf for line count and 'iskeyword'. * * Return zero if there is no match. Return number of lines contained in the * match otherwise. */ long vim_regexec_multi( regmmatch_T *rmp, win_T *win, // window in which to search or NULL buf_T *buf, // buffer in which to search linenr_T lnum, // nr of line to start looking for match colnr_T col, // column to start looking for match int *timed_out) // flag is set when timeout limit reached { int result; regexec_T rex_save; int rex_in_use_save = rex_in_use; // Cannot use the same prog recursively, it contains state. if (rmp->regprog->re_in_use) { emsg(_(e_cannot_use_pattern_recursively)); return FALSE; } rmp->regprog->re_in_use = TRUE; if (rex_in_use) // Being called recursively, save the state. rex_save = rex; rex_in_use = TRUE; result = rmp->regprog->engine->regexec_multi( rmp, win, buf, lnum, col, timed_out); rmp->regprog->re_in_use = FALSE; // NFA engine aborted because it's very slow. if (rmp->regprog->re_engine == AUTOMATIC_ENGINE && result == NFA_TOO_EXPENSIVE) { int save_p_re = p_re; int re_flags = rmp->regprog->re_flags; char_u *pat = vim_strsave(((nfa_regprog_T *)rmp->regprog)->pattern); p_re = BACKTRACKING_ENGINE; if (pat != NULL) { regprog_T *prev_prog = rmp->regprog; #ifdef FEAT_EVAL report_re_switch(pat); #endif #ifdef FEAT_SYN_HL // checking for \z misuse was already done when compiling for NFA, // allow all here reg_do_extmatch = REX_ALL; #endif rmp->regprog = vim_regcomp(pat, re_flags); #ifdef FEAT_SYN_HL reg_do_extmatch = 0; #endif if (rmp->regprog == NULL) { // Somehow compiling the pattern failed now, put back the // previous one to avoid "regprog" becoming NULL. rmp->regprog = prev_prog; } else { vim_regfree(prev_prog); rmp->regprog->re_in_use = TRUE; result = rmp->regprog->engine->regexec_multi( rmp, win, buf, lnum, col, timed_out); rmp->regprog->re_in_use = FALSE; } vim_free(pat); } p_re = save_p_re; } rex_in_use = rex_in_use_save; if (rex_in_use) rex = rex_save; return result <= 0 ? 0 : result; }
regtilde(char_u *source, int magic) { char_u *newsub = source; char_u *tmpsub; char_u *p; int len; int prevlen; for (p = newsub; *p; ++p) { if ((*p == '~' && magic) || (*p == '\\' && *(p + 1) == '~' && !magic)) { if (reg_prev_sub != NULL) { // length = len(newsub) - 1 + len(prev_sub) + 1 prevlen = (int)STRLEN(reg_prev_sub); tmpsub = alloc(STRLEN(newsub) + prevlen); if (tmpsub != NULL) { // copy prefix len = (int)(p - newsub); // not including ~ mch_memmove(tmpsub, newsub, (size_t)len); // interpret tilde mch_memmove(tmpsub + len, reg_prev_sub, (size_t)prevlen); // copy postfix if (!magic) ++p; // back off backslash STRCPY(tmpsub + len + prevlen, p + 1); if (newsub != source) // already allocated newsub vim_free(newsub); newsub = tmpsub; p = newsub + len + prevlen; } } else if (magic) STRMOVE(p, p + 1); // remove '~' else STRMOVE(p, p + 2); // remove '\~' --p; } else { if (*p == '\\' && p[1]) // skip escaped characters ++p; if (has_mbyte) p += (*mb_ptr2len)(p) - 1; } } vim_free(reg_prev_sub); if (newsub != source) // newsub was allocated, just keep it reg_prev_sub = newsub; else // no ~ found, need to save newsub reg_prev_sub = vim_strsave(newsub); return newsub; }
regtilde(char_u *source, int magic) { char_u *newsub = source; char_u *tmpsub; char_u *p; int len; int prevlen; for (p = newsub; *p; ++p) { if ((*p == '~' && magic) || (*p == '\\' && *(p + 1) == '~' && !magic)) { if (reg_prev_sub != NULL) { // length = len(newsub) - 1 + len(prev_sub) + 1 prevlen = (int)STRLEN(reg_prev_sub); tmpsub = alloc(STRLEN(newsub) + prevlen); if (tmpsub != NULL) { // copy prefix len = (int)(p - newsub); // not including ~ mch_memmove(tmpsub, newsub, (size_t)len); // interpret tilde mch_memmove(tmpsub + len, reg_prev_sub, (size_t)prevlen); // copy postfix if (!magic) ++p; // back off backslash STRCPY(tmpsub + len + prevlen, p + 1); if (newsub != source) // already allocated newsub vim_free(newsub); newsub = tmpsub; p = newsub + len + prevlen; } } else if (magic) STRMOVE(p, p + 1); // remove '~' else STRMOVE(p, p + 2); // remove '\~' --p; } else { if (*p == '\\' && p[1]) // skip escaped characters ++p; if (has_mbyte) p += (*mb_ptr2len)(p) - 1; } } // Store a copy of newsub in reg_prev_sub. It is always allocated, // because recursive calls may make the returned string invalid. vim_free(reg_prev_sub); reg_prev_sub = vim_strsave(newsub); return newsub; }
{'added': [(1769, ' // Store a copy of newsub in reg_prev_sub. It is always allocated,'), (1770, ' // because recursive calls may make the returned string invalid.'), (1772, ' reg_prev_sub = vim_strsave(newsub);'), (1773, '')], 'deleted': [(1770, ' if (newsub != source)\t// newsub was allocated, just keep it'), (1771, '\treg_prev_sub = newsub;'), (1772, ' else\t\t\t// no ~ found, need to save newsub'), (1773, '\treg_prev_sub = vim_strsave(newsub);')]}
4
4
2,094
10,923
50
283
16
https://github.com/vim/vim
CVE-2022-2345
CWE-416
2,760
skcipher.c
C
crypto_skcipher_init_tfm
/* * Symmetric key cipher operations. * * Generic encrypt/decrypt wrapper for ciphers, handles operations across * multiple page boundaries by using temporary blocks. In user context, * the kernel is given a chance to schedule us once per page. * * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/internal/aead.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> #include <linux/bug.h> #include <linux/cryptouser.h> #include <linux/compiler.h> #include <linux/list.h> #include <linux/module.h> #include <linux/rtnetlink.h> #include <linux/seq_file.h> #include <net/netlink.h> #include "internal.h" enum { SKCIPHER_WALK_PHYS = 1 << 0, SKCIPHER_WALK_SLOW = 1 << 1, SKCIPHER_WALK_COPY = 1 << 2, SKCIPHER_WALK_DIFF = 1 << 3, SKCIPHER_WALK_SLEEP = 1 << 4, }; struct skcipher_walk_buffer { struct list_head entry; struct scatter_walk dst; unsigned int len; u8 *data; u8 buffer[]; }; static int skcipher_walk_next(struct skcipher_walk *walk); static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr) { if (PageHighMem(scatterwalk_page(walk))) kunmap_atomic(vaddr); } static inline void *skcipher_map(struct scatter_walk *walk) { struct page *page = scatterwalk_page(walk); return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) + offset_in_page(walk->offset); } static inline void skcipher_map_src(struct skcipher_walk *walk) { walk->src.virt.addr = skcipher_map(&walk->in); } static inline void skcipher_map_dst(struct skcipher_walk *walk) { walk->dst.virt.addr = skcipher_map(&walk->out); } static inline void skcipher_unmap_src(struct skcipher_walk *walk) { skcipher_unmap(&walk->in, walk->src.virt.addr); } static inline void skcipher_unmap_dst(struct skcipher_walk *walk) { skcipher_unmap(&walk->out, walk->dst.virt.addr); } static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk) { return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC; } /* Get a spot of the specified length that does not straddle a page. * The caller needs to ensure that there is enough space for this operation. */ static inline u8 *skcipher_get_spot(u8 *start, unsigned int len) { u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); return max(start, end_page); } static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) { u8 *addr; addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); addr = skcipher_get_spot(addr, bsize); scatterwalk_copychunks(addr, &walk->out, bsize, (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1); return 0; } int skcipher_walk_done(struct skcipher_walk *walk, int err) { unsigned int n = walk->nbytes - err; unsigned int nbytes; nbytes = walk->total - n; if (unlikely(err < 0)) { nbytes = 0; n = 0; } else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS | SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | SKCIPHER_WALK_DIFF)))) { unmap_src: skcipher_unmap_src(walk); } else if (walk->flags & SKCIPHER_WALK_DIFF) { skcipher_unmap_dst(walk); goto unmap_src; } else if (walk->flags & SKCIPHER_WALK_COPY) { skcipher_map_dst(walk); memcpy(walk->dst.virt.addr, walk->page, n); skcipher_unmap_dst(walk); } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) { if (WARN_ON(err)) { err = -EINVAL; nbytes = 0; } else n = skcipher_done_slow(walk, n); } if (err > 0) err = 0; walk->total = nbytes; walk->nbytes = nbytes; scatterwalk_advance(&walk->in, n); scatterwalk_advance(&walk->out, n); scatterwalk_done(&walk->in, 0, nbytes); scatterwalk_done(&walk->out, 1, nbytes); if (nbytes) { crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ? CRYPTO_TFM_REQ_MAY_SLEEP : 0); return skcipher_walk_next(walk); } /* Short-circuit for the common/fast path. */ if (!((unsigned long)walk->buffer | (unsigned long)walk->page)) goto out; if (walk->flags & SKCIPHER_WALK_PHYS) goto out; if (walk->iv != walk->oiv) memcpy(walk->oiv, walk->iv, walk->ivsize); if (walk->buffer != walk->page) kfree(walk->buffer); if (walk->page) free_page((unsigned long)walk->page); out: return err; } EXPORT_SYMBOL_GPL(skcipher_walk_done); void skcipher_walk_complete(struct skcipher_walk *walk, int err) { struct skcipher_walk_buffer *p, *tmp; list_for_each_entry_safe(p, tmp, &walk->buffers, entry) { u8 *data; if (err) goto done; data = p->data; if (!data) { data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1); data = skcipher_get_spot(data, walk->stride); } scatterwalk_copychunks(data, &p->dst, p->len, 1); if (offset_in_page(p->data) + p->len + walk->stride > PAGE_SIZE) free_page((unsigned long)p->data); done: list_del(&p->entry); kfree(p); } if (!err && walk->iv != walk->oiv) memcpy(walk->oiv, walk->iv, walk->ivsize); if (walk->buffer != walk->page) kfree(walk->buffer); if (walk->page) free_page((unsigned long)walk->page); } EXPORT_SYMBOL_GPL(skcipher_walk_complete); static void skcipher_queue_write(struct skcipher_walk *walk, struct skcipher_walk_buffer *p) { p->dst = walk->out; list_add_tail(&p->entry, &walk->buffers); } static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize) { bool phys = walk->flags & SKCIPHER_WALK_PHYS; unsigned alignmask = walk->alignmask; struct skcipher_walk_buffer *p; unsigned a; unsigned n; u8 *buffer; void *v; if (!phys) { if (!walk->buffer) walk->buffer = walk->page; buffer = walk->buffer; if (buffer) goto ok; } /* Start with the minimum alignment of kmalloc. */ a = crypto_tfm_ctx_alignment() - 1; n = bsize; if (phys) { /* Calculate the minimum alignment of p->buffer. */ a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1; n += sizeof(*p); } /* Minimum size to align p->buffer by alignmask. */ n += alignmask & ~a; /* Minimum size to ensure p->buffer does not straddle a page. */ n += (bsize - 1) & ~(alignmask | a); v = kzalloc(n, skcipher_walk_gfp(walk)); if (!v) return skcipher_walk_done(walk, -ENOMEM); if (phys) { p = v; p->len = bsize; skcipher_queue_write(walk, p); buffer = p->buffer; } else { walk->buffer = v; buffer = v; } ok: walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1); walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize); walk->src.virt.addr = walk->dst.virt.addr; scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0); walk->nbytes = bsize; walk->flags |= SKCIPHER_WALK_SLOW; return 0; } static int skcipher_next_copy(struct skcipher_walk *walk) { struct skcipher_walk_buffer *p; u8 *tmp = walk->page; skcipher_map_src(walk); memcpy(tmp, walk->src.virt.addr, walk->nbytes); skcipher_unmap_src(walk); walk->src.virt.addr = tmp; walk->dst.virt.addr = tmp; if (!(walk->flags & SKCIPHER_WALK_PHYS)) return 0; p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk)); if (!p) return -ENOMEM; p->data = walk->page; p->len = walk->nbytes; skcipher_queue_write(walk, p); if (offset_in_page(walk->page) + walk->nbytes + walk->stride > PAGE_SIZE) walk->page = NULL; else walk->page += walk->nbytes; return 0; } static int skcipher_next_fast(struct skcipher_walk *walk) { unsigned long diff; walk->src.phys.page = scatterwalk_page(&walk->in); walk->src.phys.offset = offset_in_page(walk->in.offset); walk->dst.phys.page = scatterwalk_page(&walk->out); walk->dst.phys.offset = offset_in_page(walk->out.offset); if (walk->flags & SKCIPHER_WALK_PHYS) return 0; diff = walk->src.phys.offset - walk->dst.phys.offset; diff |= walk->src.virt.page - walk->dst.virt.page; skcipher_map_src(walk); walk->dst.virt.addr = walk->src.virt.addr; if (diff) { walk->flags |= SKCIPHER_WALK_DIFF; skcipher_map_dst(walk); } return 0; } static int skcipher_walk_next(struct skcipher_walk *walk) { unsigned int bsize; unsigned int n; int err; walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | SKCIPHER_WALK_DIFF); n = walk->total; bsize = min(walk->stride, max(n, walk->blocksize)); n = scatterwalk_clamp(&walk->in, n); n = scatterwalk_clamp(&walk->out, n); if (unlikely(n < bsize)) { if (unlikely(walk->total < walk->blocksize)) return skcipher_walk_done(walk, -EINVAL); slow_path: err = skcipher_next_slow(walk, bsize); goto set_phys_lowmem; } if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) { if (!walk->page) { gfp_t gfp = skcipher_walk_gfp(walk); walk->page = (void *)__get_free_page(gfp); if (!walk->page) goto slow_path; } walk->nbytes = min_t(unsigned, n, PAGE_SIZE - offset_in_page(walk->page)); walk->flags |= SKCIPHER_WALK_COPY; err = skcipher_next_copy(walk); goto set_phys_lowmem; } walk->nbytes = n; return skcipher_next_fast(walk); set_phys_lowmem: if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) { walk->src.phys.page = virt_to_page(walk->src.virt.addr); walk->dst.phys.page = virt_to_page(walk->dst.virt.addr); walk->src.phys.offset &= PAGE_SIZE - 1; walk->dst.phys.offset &= PAGE_SIZE - 1; } return err; } EXPORT_SYMBOL_GPL(skcipher_walk_next); static int skcipher_copy_iv(struct skcipher_walk *walk) { unsigned a = crypto_tfm_ctx_alignment() - 1; unsigned alignmask = walk->alignmask; unsigned ivsize = walk->ivsize; unsigned bs = walk->stride; unsigned aligned_bs; unsigned size; u8 *iv; aligned_bs = ALIGN(bs, alignmask); /* Minimum size to align buffer by alignmask. */ size = alignmask & ~a; if (walk->flags & SKCIPHER_WALK_PHYS) size += ivsize; else { size += aligned_bs + ivsize; /* Minimum size to ensure buffer does not straddle a page. */ size += (bs - 1) & ~(alignmask | a); } walk->buffer = kmalloc(size, skcipher_walk_gfp(walk)); if (!walk->buffer) return -ENOMEM; iv = PTR_ALIGN(walk->buffer, alignmask + 1); iv = skcipher_get_spot(iv, bs) + aligned_bs; walk->iv = memcpy(iv, walk->iv, walk->ivsize); return 0; } static int skcipher_walk_first(struct skcipher_walk *walk) { walk->nbytes = 0; if (WARN_ON_ONCE(in_irq())) return -EDEADLK; if (unlikely(!walk->total)) return 0; walk->buffer = NULL; if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { int err = skcipher_copy_iv(walk); if (err) return err; } walk->page = NULL; walk->nbytes = walk->total; return skcipher_walk_next(walk); } static int skcipher_walk_skcipher(struct skcipher_walk *walk, struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); scatterwalk_start(&walk->in, req->src); scatterwalk_start(&walk->out, req->dst); walk->total = req->cryptlen; walk->iv = req->iv; walk->oiv = req->iv; walk->flags &= ~SKCIPHER_WALK_SLEEP; walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? SKCIPHER_WALK_SLEEP : 0; walk->blocksize = crypto_skcipher_blocksize(tfm); walk->stride = crypto_skcipher_walksize(tfm); walk->ivsize = crypto_skcipher_ivsize(tfm); walk->alignmask = crypto_skcipher_alignmask(tfm); return skcipher_walk_first(walk); } int skcipher_walk_virt(struct skcipher_walk *walk, struct skcipher_request *req, bool atomic) { int err; walk->flags &= ~SKCIPHER_WALK_PHYS; err = skcipher_walk_skcipher(walk, req); walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0; return err; } EXPORT_SYMBOL_GPL(skcipher_walk_virt); void skcipher_walk_atomise(struct skcipher_walk *walk) { walk->flags &= ~SKCIPHER_WALK_SLEEP; } EXPORT_SYMBOL_GPL(skcipher_walk_atomise); int skcipher_walk_async(struct skcipher_walk *walk, struct skcipher_request *req) { walk->flags |= SKCIPHER_WALK_PHYS; INIT_LIST_HEAD(&walk->buffers); return skcipher_walk_skcipher(walk, req); } EXPORT_SYMBOL_GPL(skcipher_walk_async); static int skcipher_walk_aead_common(struct skcipher_walk *walk, struct aead_request *req, bool atomic) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); int err; walk->flags &= ~SKCIPHER_WALK_PHYS; scatterwalk_start(&walk->in, req->src); scatterwalk_start(&walk->out, req->dst); scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2); scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2); walk->iv = req->iv; walk->oiv = req->iv; if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) walk->flags |= SKCIPHER_WALK_SLEEP; else walk->flags &= ~SKCIPHER_WALK_SLEEP; walk->blocksize = crypto_aead_blocksize(tfm); walk->stride = crypto_aead_chunksize(tfm); walk->ivsize = crypto_aead_ivsize(tfm); walk->alignmask = crypto_aead_alignmask(tfm); err = skcipher_walk_first(walk); if (atomic) walk->flags &= ~SKCIPHER_WALK_SLEEP; return err; } int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req, bool atomic) { walk->total = req->cryptlen; return skcipher_walk_aead_common(walk, req, atomic); } EXPORT_SYMBOL_GPL(skcipher_walk_aead); int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, struct aead_request *req, bool atomic) { walk->total = req->cryptlen; return skcipher_walk_aead_common(walk, req, atomic); } EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt); int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, struct aead_request *req, bool atomic) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); walk->total = req->cryptlen - crypto_aead_authsize(tfm); return skcipher_walk_aead_common(walk, req, atomic); } EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt); static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) { if (alg->cra_type == &crypto_blkcipher_type) return sizeof(struct crypto_blkcipher *); if (alg->cra_type == &crypto_ablkcipher_type || alg->cra_type == &crypto_givcipher_type) return sizeof(struct crypto_ablkcipher *); return crypto_alg_extsize(alg); } static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm); struct crypto_blkcipher *blkcipher = *ctx; int err; crypto_blkcipher_clear_flags(blkcipher, ~0); crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); err = crypto_blkcipher_setkey(blkcipher, key, keylen); crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) & CRYPTO_TFM_RES_MASK); return err; } static int skcipher_crypt_blkcipher(struct skcipher_request *req, int (*crypt)(struct blkcipher_desc *, struct scatterlist *, struct scatterlist *, unsigned int)) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm); struct blkcipher_desc desc = { .tfm = *ctx, .info = req->iv, .flags = req->base.flags, }; return crypt(&desc, req->dst, req->src, req->cryptlen); } static int skcipher_encrypt_blkcipher(struct skcipher_request *req) { struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; return skcipher_crypt_blkcipher(req, alg->encrypt); } static int skcipher_decrypt_blkcipher(struct skcipher_request *req) { struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; return skcipher_crypt_blkcipher(req, alg->decrypt); } static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm) { struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm); crypto_free_blkcipher(*ctx); } static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm) { struct crypto_alg *calg = tfm->__crt_alg; struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm); struct crypto_blkcipher *blkcipher; struct crypto_tfm *btfm; if (!crypto_mod_get(calg)) return -EAGAIN; btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER, CRYPTO_ALG_TYPE_MASK); if (IS_ERR(btfm)) { crypto_mod_put(calg); return PTR_ERR(btfm); } blkcipher = __crypto_blkcipher_cast(btfm); *ctx = blkcipher; tfm->exit = crypto_exit_skcipher_ops_blkcipher; skcipher->setkey = skcipher_setkey_blkcipher; skcipher->encrypt = skcipher_encrypt_blkcipher; skcipher->decrypt = skcipher_decrypt_blkcipher; skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher); skcipher->keysize = calg->cra_blkcipher.max_keysize; return 0; } static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm); struct crypto_ablkcipher *ablkcipher = *ctx; int err; crypto_ablkcipher_clear_flags(ablkcipher, ~0); crypto_ablkcipher_set_flags(ablkcipher, crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); err = crypto_ablkcipher_setkey(ablkcipher, key, keylen); crypto_skcipher_set_flags(tfm, crypto_ablkcipher_get_flags(ablkcipher) & CRYPTO_TFM_RES_MASK); return err; } static int skcipher_crypt_ablkcipher(struct skcipher_request *req, int (*crypt)(struct ablkcipher_request *)) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm); struct ablkcipher_request *subreq = skcipher_request_ctx(req); ablkcipher_request_set_tfm(subreq, *ctx); ablkcipher_request_set_callback(subreq, skcipher_request_flags(req), req->base.complete, req->base.data); ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, req->iv); return crypt(subreq); } static int skcipher_encrypt_ablkcipher(struct skcipher_request *req) { struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; return skcipher_crypt_ablkcipher(req, alg->encrypt); } static int skcipher_decrypt_ablkcipher(struct skcipher_request *req) { struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; return skcipher_crypt_ablkcipher(req, alg->decrypt); } static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm) { struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm); crypto_free_ablkcipher(*ctx); } static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm) { struct crypto_alg *calg = tfm->__crt_alg; struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm); struct crypto_ablkcipher *ablkcipher; struct crypto_tfm *abtfm; if (!crypto_mod_get(calg)) return -EAGAIN; abtfm = __crypto_alloc_tfm(calg, 0, 0); if (IS_ERR(abtfm)) { crypto_mod_put(calg); return PTR_ERR(abtfm); } ablkcipher = __crypto_ablkcipher_cast(abtfm); *ctx = ablkcipher; tfm->exit = crypto_exit_skcipher_ops_ablkcipher; skcipher->setkey = skcipher_setkey_ablkcipher; skcipher->encrypt = skcipher_encrypt_ablkcipher; skcipher->decrypt = skcipher_decrypt_ablkcipher; skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher); skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) + sizeof(struct ablkcipher_request); skcipher->keysize = calg->cra_ablkcipher.max_keysize; return 0; } static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm) { struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); alg->exit(skcipher); } static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) { struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type) return crypto_init_skcipher_ops_blkcipher(tfm); if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type || tfm->__crt_alg->cra_type == &crypto_givcipher_type) return crypto_init_skcipher_ops_ablkcipher(tfm); skcipher->setkey = alg->setkey; skcipher->encrypt = alg->encrypt; skcipher->decrypt = alg->decrypt; skcipher->ivsize = alg->ivsize; skcipher->keysize = alg->max_keysize; if (alg->exit) skcipher->base.exit = crypto_skcipher_exit_tfm; if (alg->init) return alg->init(skcipher); return 0; } static void crypto_skcipher_free_instance(struct crypto_instance *inst) { struct skcipher_instance *skcipher = container_of(inst, struct skcipher_instance, s.base); skcipher->free(skcipher); } static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) __maybe_unused; static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) { struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg, base); seq_printf(m, "type : skcipher\n"); seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no"); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "min keysize : %u\n", skcipher->min_keysize); seq_printf(m, "max keysize : %u\n", skcipher->max_keysize); seq_printf(m, "ivsize : %u\n", skcipher->ivsize); seq_printf(m, "chunksize : %u\n", skcipher->chunksize); seq_printf(m, "walksize : %u\n", skcipher->walksize); } #ifdef CONFIG_NET static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_blkcipher rblkcipher; struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg, base); strncpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type)); strncpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv)); rblkcipher.blocksize = alg->cra_blocksize; rblkcipher.min_keysize = skcipher->min_keysize; rblkcipher.max_keysize = skcipher->max_keysize; rblkcipher.ivsize = skcipher->ivsize; if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, sizeof(struct crypto_report_blkcipher), &rblkcipher)) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } #else static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg) { return -ENOSYS; } #endif static const struct crypto_type crypto_skcipher_type2 = { .extsize = crypto_skcipher_extsize, .init_tfm = crypto_skcipher_init_tfm, .free = crypto_skcipher_free_instance, #ifdef CONFIG_PROC_FS .show = crypto_skcipher_show, #endif .report = crypto_skcipher_report, .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK, .type = CRYPTO_ALG_TYPE_SKCIPHER, .tfmsize = offsetof(struct crypto_skcipher, base), }; int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name, u32 type, u32 mask) { spawn->base.frontend = &crypto_skcipher_type2; return crypto_grab_spawn(&spawn->base, name, type, mask); } EXPORT_SYMBOL_GPL(crypto_grab_skcipher); struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, u32 type, u32 mask) { return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask); } EXPORT_SYMBOL_GPL(crypto_alloc_skcipher); int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask) { return crypto_type_has_alg(alg_name, &crypto_skcipher_type2, type, mask); } EXPORT_SYMBOL_GPL(crypto_has_skcipher2); static int skcipher_prepare_alg(struct skcipher_alg *alg) { struct crypto_alg *base = &alg->base; if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 || alg->walksize > PAGE_SIZE / 8) return -EINVAL; if (!alg->chunksize) alg->chunksize = base->cra_blocksize; if (!alg->walksize) alg->walksize = alg->chunksize; base->cra_type = &crypto_skcipher_type2; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER; return 0; } int crypto_register_skcipher(struct skcipher_alg *alg) { struct crypto_alg *base = &alg->base; int err; err = skcipher_prepare_alg(alg); if (err) return err; return crypto_register_alg(base); } EXPORT_SYMBOL_GPL(crypto_register_skcipher); void crypto_unregister_skcipher(struct skcipher_alg *alg) { crypto_unregister_alg(&alg->base); } EXPORT_SYMBOL_GPL(crypto_unregister_skcipher); int crypto_register_skciphers(struct skcipher_alg *algs, int count) { int i, ret; for (i = 0; i < count; i++) { ret = crypto_register_skcipher(&algs[i]); if (ret) goto err; } return 0; err: for (--i; i >= 0; --i) crypto_unregister_skcipher(&algs[i]); return ret; } EXPORT_SYMBOL_GPL(crypto_register_skciphers); void crypto_unregister_skciphers(struct skcipher_alg *algs, int count) { int i; for (i = count - 1; i >= 0; --i) crypto_unregister_skcipher(&algs[i]); } EXPORT_SYMBOL_GPL(crypto_unregister_skciphers); int skcipher_register_instance(struct crypto_template *tmpl, struct skcipher_instance *inst) { int err; err = skcipher_prepare_alg(&inst->alg); if (err) return err; return crypto_register_instance(tmpl, skcipher_crypto_instance(inst)); } EXPORT_SYMBOL_GPL(skcipher_register_instance); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Symmetric key cipher type");
/* * Symmetric key cipher operations. * * Generic encrypt/decrypt wrapper for ciphers, handles operations across * multiple page boundaries by using temporary blocks. In user context, * the kernel is given a chance to schedule us once per page. * * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/internal/aead.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> #include <linux/bug.h> #include <linux/cryptouser.h> #include <linux/compiler.h> #include <linux/list.h> #include <linux/module.h> #include <linux/rtnetlink.h> #include <linux/seq_file.h> #include <net/netlink.h> #include "internal.h" enum { SKCIPHER_WALK_PHYS = 1 << 0, SKCIPHER_WALK_SLOW = 1 << 1, SKCIPHER_WALK_COPY = 1 << 2, SKCIPHER_WALK_DIFF = 1 << 3, SKCIPHER_WALK_SLEEP = 1 << 4, }; struct skcipher_walk_buffer { struct list_head entry; struct scatter_walk dst; unsigned int len; u8 *data; u8 buffer[]; }; static int skcipher_walk_next(struct skcipher_walk *walk); static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr) { if (PageHighMem(scatterwalk_page(walk))) kunmap_atomic(vaddr); } static inline void *skcipher_map(struct scatter_walk *walk) { struct page *page = scatterwalk_page(walk); return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) + offset_in_page(walk->offset); } static inline void skcipher_map_src(struct skcipher_walk *walk) { walk->src.virt.addr = skcipher_map(&walk->in); } static inline void skcipher_map_dst(struct skcipher_walk *walk) { walk->dst.virt.addr = skcipher_map(&walk->out); } static inline void skcipher_unmap_src(struct skcipher_walk *walk) { skcipher_unmap(&walk->in, walk->src.virt.addr); } static inline void skcipher_unmap_dst(struct skcipher_walk *walk) { skcipher_unmap(&walk->out, walk->dst.virt.addr); } static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk) { return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC; } /* Get a spot of the specified length that does not straddle a page. * The caller needs to ensure that there is enough space for this operation. */ static inline u8 *skcipher_get_spot(u8 *start, unsigned int len) { u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); return max(start, end_page); } static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) { u8 *addr; addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); addr = skcipher_get_spot(addr, bsize); scatterwalk_copychunks(addr, &walk->out, bsize, (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1); return 0; } int skcipher_walk_done(struct skcipher_walk *walk, int err) { unsigned int n = walk->nbytes - err; unsigned int nbytes; nbytes = walk->total - n; if (unlikely(err < 0)) { nbytes = 0; n = 0; } else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS | SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | SKCIPHER_WALK_DIFF)))) { unmap_src: skcipher_unmap_src(walk); } else if (walk->flags & SKCIPHER_WALK_DIFF) { skcipher_unmap_dst(walk); goto unmap_src; } else if (walk->flags & SKCIPHER_WALK_COPY) { skcipher_map_dst(walk); memcpy(walk->dst.virt.addr, walk->page, n); skcipher_unmap_dst(walk); } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) { if (WARN_ON(err)) { err = -EINVAL; nbytes = 0; } else n = skcipher_done_slow(walk, n); } if (err > 0) err = 0; walk->total = nbytes; walk->nbytes = nbytes; scatterwalk_advance(&walk->in, n); scatterwalk_advance(&walk->out, n); scatterwalk_done(&walk->in, 0, nbytes); scatterwalk_done(&walk->out, 1, nbytes); if (nbytes) { crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ? CRYPTO_TFM_REQ_MAY_SLEEP : 0); return skcipher_walk_next(walk); } /* Short-circuit for the common/fast path. */ if (!((unsigned long)walk->buffer | (unsigned long)walk->page)) goto out; if (walk->flags & SKCIPHER_WALK_PHYS) goto out; if (walk->iv != walk->oiv) memcpy(walk->oiv, walk->iv, walk->ivsize); if (walk->buffer != walk->page) kfree(walk->buffer); if (walk->page) free_page((unsigned long)walk->page); out: return err; } EXPORT_SYMBOL_GPL(skcipher_walk_done); void skcipher_walk_complete(struct skcipher_walk *walk, int err) { struct skcipher_walk_buffer *p, *tmp; list_for_each_entry_safe(p, tmp, &walk->buffers, entry) { u8 *data; if (err) goto done; data = p->data; if (!data) { data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1); data = skcipher_get_spot(data, walk->stride); } scatterwalk_copychunks(data, &p->dst, p->len, 1); if (offset_in_page(p->data) + p->len + walk->stride > PAGE_SIZE) free_page((unsigned long)p->data); done: list_del(&p->entry); kfree(p); } if (!err && walk->iv != walk->oiv) memcpy(walk->oiv, walk->iv, walk->ivsize); if (walk->buffer != walk->page) kfree(walk->buffer); if (walk->page) free_page((unsigned long)walk->page); } EXPORT_SYMBOL_GPL(skcipher_walk_complete); static void skcipher_queue_write(struct skcipher_walk *walk, struct skcipher_walk_buffer *p) { p->dst = walk->out; list_add_tail(&p->entry, &walk->buffers); } static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize) { bool phys = walk->flags & SKCIPHER_WALK_PHYS; unsigned alignmask = walk->alignmask; struct skcipher_walk_buffer *p; unsigned a; unsigned n; u8 *buffer; void *v; if (!phys) { if (!walk->buffer) walk->buffer = walk->page; buffer = walk->buffer; if (buffer) goto ok; } /* Start with the minimum alignment of kmalloc. */ a = crypto_tfm_ctx_alignment() - 1; n = bsize; if (phys) { /* Calculate the minimum alignment of p->buffer. */ a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1; n += sizeof(*p); } /* Minimum size to align p->buffer by alignmask. */ n += alignmask & ~a; /* Minimum size to ensure p->buffer does not straddle a page. */ n += (bsize - 1) & ~(alignmask | a); v = kzalloc(n, skcipher_walk_gfp(walk)); if (!v) return skcipher_walk_done(walk, -ENOMEM); if (phys) { p = v; p->len = bsize; skcipher_queue_write(walk, p); buffer = p->buffer; } else { walk->buffer = v; buffer = v; } ok: walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1); walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize); walk->src.virt.addr = walk->dst.virt.addr; scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0); walk->nbytes = bsize; walk->flags |= SKCIPHER_WALK_SLOW; return 0; } static int skcipher_next_copy(struct skcipher_walk *walk) { struct skcipher_walk_buffer *p; u8 *tmp = walk->page; skcipher_map_src(walk); memcpy(tmp, walk->src.virt.addr, walk->nbytes); skcipher_unmap_src(walk); walk->src.virt.addr = tmp; walk->dst.virt.addr = tmp; if (!(walk->flags & SKCIPHER_WALK_PHYS)) return 0; p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk)); if (!p) return -ENOMEM; p->data = walk->page; p->len = walk->nbytes; skcipher_queue_write(walk, p); if (offset_in_page(walk->page) + walk->nbytes + walk->stride > PAGE_SIZE) walk->page = NULL; else walk->page += walk->nbytes; return 0; } static int skcipher_next_fast(struct skcipher_walk *walk) { unsigned long diff; walk->src.phys.page = scatterwalk_page(&walk->in); walk->src.phys.offset = offset_in_page(walk->in.offset); walk->dst.phys.page = scatterwalk_page(&walk->out); walk->dst.phys.offset = offset_in_page(walk->out.offset); if (walk->flags & SKCIPHER_WALK_PHYS) return 0; diff = walk->src.phys.offset - walk->dst.phys.offset; diff |= walk->src.virt.page - walk->dst.virt.page; skcipher_map_src(walk); walk->dst.virt.addr = walk->src.virt.addr; if (diff) { walk->flags |= SKCIPHER_WALK_DIFF; skcipher_map_dst(walk); } return 0; } static int skcipher_walk_next(struct skcipher_walk *walk) { unsigned int bsize; unsigned int n; int err; walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | SKCIPHER_WALK_DIFF); n = walk->total; bsize = min(walk->stride, max(n, walk->blocksize)); n = scatterwalk_clamp(&walk->in, n); n = scatterwalk_clamp(&walk->out, n); if (unlikely(n < bsize)) { if (unlikely(walk->total < walk->blocksize)) return skcipher_walk_done(walk, -EINVAL); slow_path: err = skcipher_next_slow(walk, bsize); goto set_phys_lowmem; } if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) { if (!walk->page) { gfp_t gfp = skcipher_walk_gfp(walk); walk->page = (void *)__get_free_page(gfp); if (!walk->page) goto slow_path; } walk->nbytes = min_t(unsigned, n, PAGE_SIZE - offset_in_page(walk->page)); walk->flags |= SKCIPHER_WALK_COPY; err = skcipher_next_copy(walk); goto set_phys_lowmem; } walk->nbytes = n; return skcipher_next_fast(walk); set_phys_lowmem: if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) { walk->src.phys.page = virt_to_page(walk->src.virt.addr); walk->dst.phys.page = virt_to_page(walk->dst.virt.addr); walk->src.phys.offset &= PAGE_SIZE - 1; walk->dst.phys.offset &= PAGE_SIZE - 1; } return err; } EXPORT_SYMBOL_GPL(skcipher_walk_next); static int skcipher_copy_iv(struct skcipher_walk *walk) { unsigned a = crypto_tfm_ctx_alignment() - 1; unsigned alignmask = walk->alignmask; unsigned ivsize = walk->ivsize; unsigned bs = walk->stride; unsigned aligned_bs; unsigned size; u8 *iv; aligned_bs = ALIGN(bs, alignmask); /* Minimum size to align buffer by alignmask. */ size = alignmask & ~a; if (walk->flags & SKCIPHER_WALK_PHYS) size += ivsize; else { size += aligned_bs + ivsize; /* Minimum size to ensure buffer does not straddle a page. */ size += (bs - 1) & ~(alignmask | a); } walk->buffer = kmalloc(size, skcipher_walk_gfp(walk)); if (!walk->buffer) return -ENOMEM; iv = PTR_ALIGN(walk->buffer, alignmask + 1); iv = skcipher_get_spot(iv, bs) + aligned_bs; walk->iv = memcpy(iv, walk->iv, walk->ivsize); return 0; } static int skcipher_walk_first(struct skcipher_walk *walk) { walk->nbytes = 0; if (WARN_ON_ONCE(in_irq())) return -EDEADLK; if (unlikely(!walk->total)) return 0; walk->buffer = NULL; if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { int err = skcipher_copy_iv(walk); if (err) return err; } walk->page = NULL; walk->nbytes = walk->total; return skcipher_walk_next(walk); } static int skcipher_walk_skcipher(struct skcipher_walk *walk, struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); scatterwalk_start(&walk->in, req->src); scatterwalk_start(&walk->out, req->dst); walk->total = req->cryptlen; walk->iv = req->iv; walk->oiv = req->iv; walk->flags &= ~SKCIPHER_WALK_SLEEP; walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? SKCIPHER_WALK_SLEEP : 0; walk->blocksize = crypto_skcipher_blocksize(tfm); walk->stride = crypto_skcipher_walksize(tfm); walk->ivsize = crypto_skcipher_ivsize(tfm); walk->alignmask = crypto_skcipher_alignmask(tfm); return skcipher_walk_first(walk); } int skcipher_walk_virt(struct skcipher_walk *walk, struct skcipher_request *req, bool atomic) { int err; walk->flags &= ~SKCIPHER_WALK_PHYS; err = skcipher_walk_skcipher(walk, req); walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0; return err; } EXPORT_SYMBOL_GPL(skcipher_walk_virt); void skcipher_walk_atomise(struct skcipher_walk *walk) { walk->flags &= ~SKCIPHER_WALK_SLEEP; } EXPORT_SYMBOL_GPL(skcipher_walk_atomise); int skcipher_walk_async(struct skcipher_walk *walk, struct skcipher_request *req) { walk->flags |= SKCIPHER_WALK_PHYS; INIT_LIST_HEAD(&walk->buffers); return skcipher_walk_skcipher(walk, req); } EXPORT_SYMBOL_GPL(skcipher_walk_async); static int skcipher_walk_aead_common(struct skcipher_walk *walk, struct aead_request *req, bool atomic) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); int err; walk->flags &= ~SKCIPHER_WALK_PHYS; scatterwalk_start(&walk->in, req->src); scatterwalk_start(&walk->out, req->dst); scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2); scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2); walk->iv = req->iv; walk->oiv = req->iv; if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) walk->flags |= SKCIPHER_WALK_SLEEP; else walk->flags &= ~SKCIPHER_WALK_SLEEP; walk->blocksize = crypto_aead_blocksize(tfm); walk->stride = crypto_aead_chunksize(tfm); walk->ivsize = crypto_aead_ivsize(tfm); walk->alignmask = crypto_aead_alignmask(tfm); err = skcipher_walk_first(walk); if (atomic) walk->flags &= ~SKCIPHER_WALK_SLEEP; return err; } int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req, bool atomic) { walk->total = req->cryptlen; return skcipher_walk_aead_common(walk, req, atomic); } EXPORT_SYMBOL_GPL(skcipher_walk_aead); int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, struct aead_request *req, bool atomic) { walk->total = req->cryptlen; return skcipher_walk_aead_common(walk, req, atomic); } EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt); int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, struct aead_request *req, bool atomic) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); walk->total = req->cryptlen - crypto_aead_authsize(tfm); return skcipher_walk_aead_common(walk, req, atomic); } EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt); static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) { if (alg->cra_type == &crypto_blkcipher_type) return sizeof(struct crypto_blkcipher *); if (alg->cra_type == &crypto_ablkcipher_type || alg->cra_type == &crypto_givcipher_type) return sizeof(struct crypto_ablkcipher *); return crypto_alg_extsize(alg); } static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm); struct crypto_blkcipher *blkcipher = *ctx; int err; crypto_blkcipher_clear_flags(blkcipher, ~0); crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); err = crypto_blkcipher_setkey(blkcipher, key, keylen); crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) & CRYPTO_TFM_RES_MASK); return err; } static int skcipher_crypt_blkcipher(struct skcipher_request *req, int (*crypt)(struct blkcipher_desc *, struct scatterlist *, struct scatterlist *, unsigned int)) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm); struct blkcipher_desc desc = { .tfm = *ctx, .info = req->iv, .flags = req->base.flags, }; return crypt(&desc, req->dst, req->src, req->cryptlen); } static int skcipher_encrypt_blkcipher(struct skcipher_request *req) { struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; return skcipher_crypt_blkcipher(req, alg->encrypt); } static int skcipher_decrypt_blkcipher(struct skcipher_request *req) { struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; return skcipher_crypt_blkcipher(req, alg->decrypt); } static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm) { struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm); crypto_free_blkcipher(*ctx); } static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm) { struct crypto_alg *calg = tfm->__crt_alg; struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm); struct crypto_blkcipher *blkcipher; struct crypto_tfm *btfm; if (!crypto_mod_get(calg)) return -EAGAIN; btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER, CRYPTO_ALG_TYPE_MASK); if (IS_ERR(btfm)) { crypto_mod_put(calg); return PTR_ERR(btfm); } blkcipher = __crypto_blkcipher_cast(btfm); *ctx = blkcipher; tfm->exit = crypto_exit_skcipher_ops_blkcipher; skcipher->setkey = skcipher_setkey_blkcipher; skcipher->encrypt = skcipher_encrypt_blkcipher; skcipher->decrypt = skcipher_decrypt_blkcipher; skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher); skcipher->keysize = calg->cra_blkcipher.max_keysize; return 0; } static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm); struct crypto_ablkcipher *ablkcipher = *ctx; int err; crypto_ablkcipher_clear_flags(ablkcipher, ~0); crypto_ablkcipher_set_flags(ablkcipher, crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); err = crypto_ablkcipher_setkey(ablkcipher, key, keylen); crypto_skcipher_set_flags(tfm, crypto_ablkcipher_get_flags(ablkcipher) & CRYPTO_TFM_RES_MASK); return err; } static int skcipher_crypt_ablkcipher(struct skcipher_request *req, int (*crypt)(struct ablkcipher_request *)) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm); struct ablkcipher_request *subreq = skcipher_request_ctx(req); ablkcipher_request_set_tfm(subreq, *ctx); ablkcipher_request_set_callback(subreq, skcipher_request_flags(req), req->base.complete, req->base.data); ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, req->iv); return crypt(subreq); } static int skcipher_encrypt_ablkcipher(struct skcipher_request *req) { struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; return skcipher_crypt_ablkcipher(req, alg->encrypt); } static int skcipher_decrypt_ablkcipher(struct skcipher_request *req) { struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; return skcipher_crypt_ablkcipher(req, alg->decrypt); } static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm) { struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm); crypto_free_ablkcipher(*ctx); } static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm) { struct crypto_alg *calg = tfm->__crt_alg; struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm); struct crypto_ablkcipher *ablkcipher; struct crypto_tfm *abtfm; if (!crypto_mod_get(calg)) return -EAGAIN; abtfm = __crypto_alloc_tfm(calg, 0, 0); if (IS_ERR(abtfm)) { crypto_mod_put(calg); return PTR_ERR(abtfm); } ablkcipher = __crypto_ablkcipher_cast(abtfm); *ctx = ablkcipher; tfm->exit = crypto_exit_skcipher_ops_ablkcipher; skcipher->setkey = skcipher_setkey_ablkcipher; skcipher->encrypt = skcipher_encrypt_ablkcipher; skcipher->decrypt = skcipher_decrypt_ablkcipher; skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher); skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) + sizeof(struct ablkcipher_request); skcipher->keysize = calg->cra_ablkcipher.max_keysize; return 0; } static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { unsigned long alignmask = crypto_skcipher_alignmask(tfm); struct skcipher_alg *cipher = crypto_skcipher_alg(tfm); u8 *buffer, *alignbuffer; unsigned long absize; int ret; absize = keylen + alignmask; buffer = kmalloc(absize, GFP_ATOMIC); if (!buffer) return -ENOMEM; alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); memcpy(alignbuffer, key, keylen); ret = cipher->setkey(tfm, alignbuffer, keylen); kzfree(buffer); return ret; } static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct skcipher_alg *cipher = crypto_skcipher_alg(tfm); unsigned long alignmask = crypto_skcipher_alignmask(tfm); if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) { crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } if ((unsigned long)key & alignmask) return skcipher_setkey_unaligned(tfm, key, keylen); return cipher->setkey(tfm, key, keylen); } static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm) { struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); alg->exit(skcipher); } static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) { struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type) return crypto_init_skcipher_ops_blkcipher(tfm); if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type || tfm->__crt_alg->cra_type == &crypto_givcipher_type) return crypto_init_skcipher_ops_ablkcipher(tfm); skcipher->setkey = skcipher_setkey; skcipher->encrypt = alg->encrypt; skcipher->decrypt = alg->decrypt; skcipher->ivsize = alg->ivsize; skcipher->keysize = alg->max_keysize; if (alg->exit) skcipher->base.exit = crypto_skcipher_exit_tfm; if (alg->init) return alg->init(skcipher); return 0; } static void crypto_skcipher_free_instance(struct crypto_instance *inst) { struct skcipher_instance *skcipher = container_of(inst, struct skcipher_instance, s.base); skcipher->free(skcipher); } static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) __maybe_unused; static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) { struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg, base); seq_printf(m, "type : skcipher\n"); seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no"); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "min keysize : %u\n", skcipher->min_keysize); seq_printf(m, "max keysize : %u\n", skcipher->max_keysize); seq_printf(m, "ivsize : %u\n", skcipher->ivsize); seq_printf(m, "chunksize : %u\n", skcipher->chunksize); seq_printf(m, "walksize : %u\n", skcipher->walksize); } #ifdef CONFIG_NET static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_blkcipher rblkcipher; struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg, base); strncpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type)); strncpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv)); rblkcipher.blocksize = alg->cra_blocksize; rblkcipher.min_keysize = skcipher->min_keysize; rblkcipher.max_keysize = skcipher->max_keysize; rblkcipher.ivsize = skcipher->ivsize; if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, sizeof(struct crypto_report_blkcipher), &rblkcipher)) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } #else static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg) { return -ENOSYS; } #endif static const struct crypto_type crypto_skcipher_type2 = { .extsize = crypto_skcipher_extsize, .init_tfm = crypto_skcipher_init_tfm, .free = crypto_skcipher_free_instance, #ifdef CONFIG_PROC_FS .show = crypto_skcipher_show, #endif .report = crypto_skcipher_report, .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK, .type = CRYPTO_ALG_TYPE_SKCIPHER, .tfmsize = offsetof(struct crypto_skcipher, base), }; int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name, u32 type, u32 mask) { spawn->base.frontend = &crypto_skcipher_type2; return crypto_grab_spawn(&spawn->base, name, type, mask); } EXPORT_SYMBOL_GPL(crypto_grab_skcipher); struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, u32 type, u32 mask) { return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask); } EXPORT_SYMBOL_GPL(crypto_alloc_skcipher); int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask) { return crypto_type_has_alg(alg_name, &crypto_skcipher_type2, type, mask); } EXPORT_SYMBOL_GPL(crypto_has_skcipher2); static int skcipher_prepare_alg(struct skcipher_alg *alg) { struct crypto_alg *base = &alg->base; if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 || alg->walksize > PAGE_SIZE / 8) return -EINVAL; if (!alg->chunksize) alg->chunksize = base->cra_blocksize; if (!alg->walksize) alg->walksize = alg->chunksize; base->cra_type = &crypto_skcipher_type2; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER; return 0; } int crypto_register_skcipher(struct skcipher_alg *alg) { struct crypto_alg *base = &alg->base; int err; err = skcipher_prepare_alg(alg); if (err) return err; return crypto_register_alg(base); } EXPORT_SYMBOL_GPL(crypto_register_skcipher); void crypto_unregister_skcipher(struct skcipher_alg *alg) { crypto_unregister_alg(&alg->base); } EXPORT_SYMBOL_GPL(crypto_unregister_skcipher); int crypto_register_skciphers(struct skcipher_alg *algs, int count) { int i, ret; for (i = 0; i < count; i++) { ret = crypto_register_skcipher(&algs[i]); if (ret) goto err; } return 0; err: for (--i; i >= 0; --i) crypto_unregister_skcipher(&algs[i]); return ret; } EXPORT_SYMBOL_GPL(crypto_register_skciphers); void crypto_unregister_skciphers(struct skcipher_alg *algs, int count) { int i; for (i = count - 1; i >= 0; --i) crypto_unregister_skcipher(&algs[i]); } EXPORT_SYMBOL_GPL(crypto_unregister_skciphers); int skcipher_register_instance(struct crypto_template *tmpl, struct skcipher_instance *inst) { int err; err = skcipher_prepare_alg(&inst->alg); if (err) return err; return crypto_register_instance(tmpl, skcipher_crypto_instance(inst)); } EXPORT_SYMBOL_GPL(skcipher_register_instance); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Symmetric key cipher type");
static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) { struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type) return crypto_init_skcipher_ops_blkcipher(tfm); if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type || tfm->__crt_alg->cra_type == &crypto_givcipher_type) return crypto_init_skcipher_ops_ablkcipher(tfm); skcipher->setkey = alg->setkey; skcipher->encrypt = alg->encrypt; skcipher->decrypt = alg->decrypt; skcipher->ivsize = alg->ivsize; skcipher->keysize = alg->max_keysize; if (alg->exit) skcipher->base.exit = crypto_skcipher_exit_tfm; if (alg->init) return alg->init(skcipher); return 0; }
static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) { struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type) return crypto_init_skcipher_ops_blkcipher(tfm); if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type || tfm->__crt_alg->cra_type == &crypto_givcipher_type) return crypto_init_skcipher_ops_ablkcipher(tfm); skcipher->setkey = skcipher_setkey; skcipher->encrypt = alg->encrypt; skcipher->decrypt = alg->decrypt; skcipher->ivsize = alg->ivsize; skcipher->keysize = alg->max_keysize; if (alg->exit) skcipher->base.exit = crypto_skcipher_exit_tfm; if (alg->init) return alg->init(skcipher); return 0; }
{'added': [(767, 'static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,'), (768, '\t\t\t\t const u8 *key, unsigned int keylen)'), (769, '{'), (770, '\tunsigned long alignmask = crypto_skcipher_alignmask(tfm);'), (771, '\tstruct skcipher_alg *cipher = crypto_skcipher_alg(tfm);'), (772, '\tu8 *buffer, *alignbuffer;'), (773, '\tunsigned long absize;'), (774, '\tint ret;'), (775, ''), (776, '\tabsize = keylen + alignmask;'), (777, '\tbuffer = kmalloc(absize, GFP_ATOMIC);'), (778, '\tif (!buffer)'), (779, '\t\treturn -ENOMEM;'), (780, ''), (781, '\talignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);'), (782, '\tmemcpy(alignbuffer, key, keylen);'), (783, '\tret = cipher->setkey(tfm, alignbuffer, keylen);'), (784, '\tkzfree(buffer);'), (785, '\treturn ret;'), (786, '}'), (787, ''), (788, 'static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,'), (789, '\t\t\t unsigned int keylen)'), (790, '{'), (791, '\tstruct skcipher_alg *cipher = crypto_skcipher_alg(tfm);'), (792, '\tunsigned long alignmask = crypto_skcipher_alignmask(tfm);'), (793, ''), (794, '\tif (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {'), (795, '\t\tcrypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);'), (796, '\t\treturn -EINVAL;'), (797, '\t}'), (798, ''), (799, '\tif ((unsigned long)key & alignmask)'), (800, '\t\treturn skcipher_setkey_unaligned(tfm, key, keylen);'), (801, ''), (802, '\treturn cipher->setkey(tfm, key, keylen);'), (803, '}'), (804, ''), (825, '\tskcipher->setkey = skcipher_setkey;')], 'deleted': [(787, '\tskcipher->setkey = alg->setkey;')]}
39
1
773
5,209
20
143
6
https://github.com/torvalds/linux
CVE-2017-9211
CWE-476
985
gup.c
C
gup_huge_pud
#include <linux/kernel.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/memremap.h> #include <linux/pagemap.h> #include <linux/rmap.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/sched/signal.h> #include <linux/rwsem.h> #include <linux/hugetlb.h> #include <asm/mmu_context.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> #include "internal.h" struct follow_page_context { struct dev_pagemap *pgmap; unsigned int page_mask; }; static struct page *no_page_table(struct vm_area_struct *vma, unsigned int flags) { /* * When core dumping an enormous anonymous area that nobody * has touched so far, we don't want to allocate unnecessary pages or * page tables. Return error instead of NULL to skip handle_mm_fault, * then get_dump_page() will return NULL to leave a hole in the dump. * But we can only make this optimization where a hole would surely * be zero-filled if handle_mm_fault() actually did handle it. */ if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) return ERR_PTR(-EFAULT); return NULL; } static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, unsigned int flags) { /* No page to get reference */ if (flags & FOLL_GET) return -EFAULT; if (flags & FOLL_TOUCH) { pte_t entry = *pte; if (flags & FOLL_WRITE) entry = pte_mkdirty(entry); entry = pte_mkyoung(entry); if (!pte_same(*pte, entry)) { set_pte_at(vma->vm_mm, address, pte, entry); update_mmu_cache(vma, address, pte); } } /* Proper page table entry exists, but no corresponding struct page */ return -EEXIST; } /* * FOLL_FORCE can write to even unwritable pte's, but only * after we've gone through a COW cycle and they are dirty. */ static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) { return pte_write(pte) || ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); } static struct page *follow_page_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, unsigned int flags, struct dev_pagemap **pgmap) { struct mm_struct *mm = vma->vm_mm; struct page *page; spinlock_t *ptl; pte_t *ptep, pte; retry: if (unlikely(pmd_bad(*pmd))) return no_page_table(vma, flags); ptep = pte_offset_map_lock(mm, pmd, address, &ptl); pte = *ptep; if (!pte_present(pte)) { swp_entry_t entry; /* * KSM's break_ksm() relies upon recognizing a ksm page * even while it is being migrated, so for that case we * need migration_entry_wait(). */ if (likely(!(flags & FOLL_MIGRATION))) goto no_page; if (pte_none(pte)) goto no_page; entry = pte_to_swp_entry(pte); if (!is_migration_entry(entry)) goto no_page; pte_unmap_unlock(ptep, ptl); migration_entry_wait(mm, pmd, address); goto retry; } if ((flags & FOLL_NUMA) && pte_protnone(pte)) goto no_page; if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) { pte_unmap_unlock(ptep, ptl); return NULL; } page = vm_normal_page(vma, address, pte); if (!page && pte_devmap(pte) && (flags & FOLL_GET)) { /* * Only return device mapping pages in the FOLL_GET case since * they are only valid while holding the pgmap reference. */ *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap); if (*pgmap) page = pte_page(pte); else goto no_page; } else if (unlikely(!page)) { if (flags & FOLL_DUMP) { /* Avoid special (like zero) pages in core dumps */ page = ERR_PTR(-EFAULT); goto out; } if (is_zero_pfn(pte_pfn(pte))) { page = pte_page(pte); } else { int ret; ret = follow_pfn_pte(vma, address, ptep, flags); page = ERR_PTR(ret); goto out; } } if (flags & FOLL_SPLIT && PageTransCompound(page)) { int ret; get_page(page); pte_unmap_unlock(ptep, ptl); lock_page(page); ret = split_huge_page(page); unlock_page(page); put_page(page); if (ret) return ERR_PTR(ret); goto retry; } if (flags & FOLL_GET) get_page(page); if (flags & FOLL_TOUCH) { if ((flags & FOLL_WRITE) && !pte_dirty(pte) && !PageDirty(page)) set_page_dirty(page); /* * pte_mkyoung() would be more correct here, but atomic care * is needed to avoid losing the dirty bit: it is easier to use * mark_page_accessed(). */ mark_page_accessed(page); } if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { /* Do not mlock pte-mapped THP */ if (PageTransCompound(page)) goto out; /* * The preliminary mapping check is mainly to avoid the * pointless overhead of lock_page on the ZERO_PAGE * which might bounce very badly if there is contention. * * If the page is already locked, we don't need to * handle it now - vmscan will handle it later if and * when it attempts to reclaim the page. */ if (page->mapping && trylock_page(page)) { lru_add_drain(); /* push cached pages to LRU */ /* * Because we lock page here, and migration is * blocked by the pte's page reference, and we * know the page is still mapped, we don't even * need to check for file-cache page truncation. */ mlock_vma_page(page); unlock_page(page); } } out: pte_unmap_unlock(ptep, ptl); return page; no_page: pte_unmap_unlock(ptep, ptl); if (!pte_none(pte)) return NULL; return no_page_table(vma, flags); } static struct page *follow_pmd_mask(struct vm_area_struct *vma, unsigned long address, pud_t *pudp, unsigned int flags, struct follow_page_context *ctx) { pmd_t *pmd, pmdval; spinlock_t *ptl; struct page *page; struct mm_struct *mm = vma->vm_mm; pmd = pmd_offset(pudp, address); /* * The READ_ONCE() will stabilize the pmdval in a register or * on the stack so that it will stop changing under the code. */ pmdval = READ_ONCE(*pmd); if (pmd_none(pmdval)) return no_page_table(vma, flags); if (pmd_huge(pmdval) && vma->vm_flags & VM_HUGETLB) { page = follow_huge_pmd(mm, address, pmd, flags); if (page) return page; return no_page_table(vma, flags); } if (is_hugepd(__hugepd(pmd_val(pmdval)))) { page = follow_huge_pd(vma, address, __hugepd(pmd_val(pmdval)), flags, PMD_SHIFT); if (page) return page; return no_page_table(vma, flags); } retry: if (!pmd_present(pmdval)) { if (likely(!(flags & FOLL_MIGRATION))) return no_page_table(vma, flags); VM_BUG_ON(thp_migration_supported() && !is_pmd_migration_entry(pmdval)); if (is_pmd_migration_entry(pmdval)) pmd_migration_entry_wait(mm, pmd); pmdval = READ_ONCE(*pmd); /* * MADV_DONTNEED may convert the pmd to null because * mmap_sem is held in read mode */ if (pmd_none(pmdval)) return no_page_table(vma, flags); goto retry; } if (pmd_devmap(pmdval)) { ptl = pmd_lock(mm, pmd); page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap); spin_unlock(ptl); if (page) return page; } if (likely(!pmd_trans_huge(pmdval))) return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); if ((flags & FOLL_NUMA) && pmd_protnone(pmdval)) return no_page_table(vma, flags); retry_locked: ptl = pmd_lock(mm, pmd); if (unlikely(pmd_none(*pmd))) { spin_unlock(ptl); return no_page_table(vma, flags); } if (unlikely(!pmd_present(*pmd))) { spin_unlock(ptl); if (likely(!(flags & FOLL_MIGRATION))) return no_page_table(vma, flags); pmd_migration_entry_wait(mm, pmd); goto retry_locked; } if (unlikely(!pmd_trans_huge(*pmd))) { spin_unlock(ptl); return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); } if (flags & FOLL_SPLIT) { int ret; page = pmd_page(*pmd); if (is_huge_zero_page(page)) { spin_unlock(ptl); ret = 0; split_huge_pmd(vma, pmd, address); if (pmd_trans_unstable(pmd)) ret = -EBUSY; } else { get_page(page); spin_unlock(ptl); lock_page(page); ret = split_huge_page(page); unlock_page(page); put_page(page); if (pmd_none(*pmd)) return no_page_table(vma, flags); } return ret ? ERR_PTR(ret) : follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); } page = follow_trans_huge_pmd(vma, address, pmd, flags); spin_unlock(ptl); ctx->page_mask = HPAGE_PMD_NR - 1; return page; } static struct page *follow_pud_mask(struct vm_area_struct *vma, unsigned long address, p4d_t *p4dp, unsigned int flags, struct follow_page_context *ctx) { pud_t *pud; spinlock_t *ptl; struct page *page; struct mm_struct *mm = vma->vm_mm; pud = pud_offset(p4dp, address); if (pud_none(*pud)) return no_page_table(vma, flags); if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { page = follow_huge_pud(mm, address, pud, flags); if (page) return page; return no_page_table(vma, flags); } if (is_hugepd(__hugepd(pud_val(*pud)))) { page = follow_huge_pd(vma, address, __hugepd(pud_val(*pud)), flags, PUD_SHIFT); if (page) return page; return no_page_table(vma, flags); } if (pud_devmap(*pud)) { ptl = pud_lock(mm, pud); page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap); spin_unlock(ptl); if (page) return page; } if (unlikely(pud_bad(*pud))) return no_page_table(vma, flags); return follow_pmd_mask(vma, address, pud, flags, ctx); } static struct page *follow_p4d_mask(struct vm_area_struct *vma, unsigned long address, pgd_t *pgdp, unsigned int flags, struct follow_page_context *ctx) { p4d_t *p4d; struct page *page; p4d = p4d_offset(pgdp, address); if (p4d_none(*p4d)) return no_page_table(vma, flags); BUILD_BUG_ON(p4d_huge(*p4d)); if (unlikely(p4d_bad(*p4d))) return no_page_table(vma, flags); if (is_hugepd(__hugepd(p4d_val(*p4d)))) { page = follow_huge_pd(vma, address, __hugepd(p4d_val(*p4d)), flags, P4D_SHIFT); if (page) return page; return no_page_table(vma, flags); } return follow_pud_mask(vma, address, p4d, flags, ctx); } /** * follow_page_mask - look up a page descriptor from a user-virtual address * @vma: vm_area_struct mapping @address * @address: virtual address to look up * @flags: flags modifying lookup behaviour * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a * pointer to output page_mask * * @flags can have FOLL_ flags set, defined in <linux/mm.h> * * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches * the device's dev_pagemap metadata to avoid repeating expensive lookups. * * On output, the @ctx->page_mask is set according to the size of the page. * * Return: the mapped (struct page *), %NULL if no mapping exists, or * an error pointer if there is a mapping to something not represented * by a page descriptor (see also vm_normal_page()). */ struct page *follow_page_mask(struct vm_area_struct *vma, unsigned long address, unsigned int flags, struct follow_page_context *ctx) { pgd_t *pgd; struct page *page; struct mm_struct *mm = vma->vm_mm; ctx->page_mask = 0; /* make this handle hugepd */ page = follow_huge_addr(mm, address, flags & FOLL_WRITE); if (!IS_ERR(page)) { BUG_ON(flags & FOLL_GET); return page; } pgd = pgd_offset(mm, address); if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) return no_page_table(vma, flags); if (pgd_huge(*pgd)) { page = follow_huge_pgd(mm, address, pgd, flags); if (page) return page; return no_page_table(vma, flags); } if (is_hugepd(__hugepd(pgd_val(*pgd)))) { page = follow_huge_pd(vma, address, __hugepd(pgd_val(*pgd)), flags, PGDIR_SHIFT); if (page) return page; return no_page_table(vma, flags); } return follow_p4d_mask(vma, address, pgd, flags, ctx); } struct page *follow_page(struct vm_area_struct *vma, unsigned long address, unsigned int foll_flags) { struct follow_page_context ctx = { NULL }; struct page *page; page = follow_page_mask(vma, address, foll_flags, &ctx); if (ctx.pgmap) put_dev_pagemap(ctx.pgmap); return page; } static int get_gate_page(struct mm_struct *mm, unsigned long address, unsigned int gup_flags, struct vm_area_struct **vma, struct page **page) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; int ret = -EFAULT; /* user gate pages are read-only */ if (gup_flags & FOLL_WRITE) return -EFAULT; if (address > TASK_SIZE) pgd = pgd_offset_k(address); else pgd = pgd_offset_gate(mm, address); BUG_ON(pgd_none(*pgd)); p4d = p4d_offset(pgd, address); BUG_ON(p4d_none(*p4d)); pud = pud_offset(p4d, address); BUG_ON(pud_none(*pud)); pmd = pmd_offset(pud, address); if (!pmd_present(*pmd)) return -EFAULT; VM_BUG_ON(pmd_trans_huge(*pmd)); pte = pte_offset_map(pmd, address); if (pte_none(*pte)) goto unmap; *vma = get_gate_vma(mm); if (!page) goto out; *page = vm_normal_page(*vma, address, *pte); if (!*page) { if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte))) goto unmap; *page = pte_page(*pte); /* * This should never happen (a device public page in the gate * area). */ if (is_device_public_page(*page)) goto unmap; } get_page(*page); out: ret = 0; unmap: pte_unmap(pte); return ret; } /* * mmap_sem must be held on entry. If @nonblocking != NULL and * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released. * If it is, *@nonblocking will be set to 0 and -EBUSY returned. */ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, unsigned long address, unsigned int *flags, int *nonblocking) { unsigned int fault_flags = 0; vm_fault_t ret; /* mlock all present pages, but do not fault in new pages */ if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) return -ENOENT; if (*flags & FOLL_WRITE) fault_flags |= FAULT_FLAG_WRITE; if (*flags & FOLL_REMOTE) fault_flags |= FAULT_FLAG_REMOTE; if (nonblocking) fault_flags |= FAULT_FLAG_ALLOW_RETRY; if (*flags & FOLL_NOWAIT) fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; if (*flags & FOLL_TRIED) { VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY); fault_flags |= FAULT_FLAG_TRIED; } ret = handle_mm_fault(vma, address, fault_flags); if (ret & VM_FAULT_ERROR) { int err = vm_fault_to_errno(ret, *flags); if (err) return err; BUG(); } if (tsk) { if (ret & VM_FAULT_MAJOR) tsk->maj_flt++; else tsk->min_flt++; } if (ret & VM_FAULT_RETRY) { if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) *nonblocking = 0; return -EBUSY; } /* * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when * necessary, even if maybe_mkwrite decided not to set pte_write. We * can thus safely do subsequent page lookups as if they were reads. * But only do so when looping for pte_write is futile: in some cases * userspace may also be wanting to write to the gotten user page, * which a read fault here might prevent (a readonly page might get * reCOWed by userspace write). */ if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) *flags |= FOLL_COW; return 0; } static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) { vm_flags_t vm_flags = vma->vm_flags; int write = (gup_flags & FOLL_WRITE); int foreign = (gup_flags & FOLL_REMOTE); if (vm_flags & (VM_IO | VM_PFNMAP)) return -EFAULT; if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma)) return -EFAULT; if (write) { if (!(vm_flags & VM_WRITE)) { if (!(gup_flags & FOLL_FORCE)) return -EFAULT; /* * We used to let the write,force case do COW in a * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could * set a breakpoint in a read-only mapping of an * executable, without corrupting the file (yet only * when that file had been opened for writing!). * Anon pages in shared mappings are surprising: now * just reject it. */ if (!is_cow_mapping(vm_flags)) return -EFAULT; } } else if (!(vm_flags & VM_READ)) { if (!(gup_flags & FOLL_FORCE)) return -EFAULT; /* * Is there actually any vma we can reach here which does not * have VM_MAYREAD set? */ if (!(vm_flags & VM_MAYREAD)) return -EFAULT; } /* * gups are always data accesses, not instruction * fetches, so execute=false here */ if (!arch_vma_access_permitted(vma, write, false, foreign)) return -EFAULT; return 0; } /** * __get_user_pages() - pin user pages in memory * @tsk: task_struct of target task * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin * @gup_flags: flags modifying pin behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. Or NULL, if caller * only intends to ensure the pages are faulted in. * @vmas: array of pointers to vmas corresponding to each page. * Or NULL if the caller does not require them. * @nonblocking: whether waiting for disk IO or mmap_sem contention * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. Each page returned must be released * with a put_page() call when it is finished with. vmas will only * remain valid while mmap_sem is held. * * Must be called with mmap_sem held. It may be released. See below. * * __get_user_pages walks a process's page tables and takes a reference to * each struct page that each user address corresponds to at a given * instant. That is, it takes the page that would be accessed if a user * thread accesses the given user virtual address at that instant. * * This does not guarantee that the page exists in the user mappings when * __get_user_pages returns, and there may even be a completely different * page there in some cases (eg. if mmapped pagecache has been invalidated * and subsequently re faulted). However it does guarantee that the page * won't be freed completely. And mostly callers simply care that the page * contains data that was valid *at some point in time*. Typically, an IO * or similar operation cannot guarantee anything stronger anyway because * locks can't be held over the syscall boundary. * * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If * the page is written to, set_page_dirty (or set_page_dirty_lock, as * appropriate) must be called after the page is finished with, and * before put_page is called. * * If @nonblocking != NULL, __get_user_pages will not wait for disk IO * or mmap_sem contention, and if waiting is needed to pin all pages, * *@nonblocking will be set to 0. Further, if @gup_flags does not * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in * this case. * * A caller using such a combination of @nonblocking and @gup_flags * must therefore hold the mmap_sem for reading only, and recognize * when it's been released. Otherwise, it must be held for either * reading or writing and will not be released. * * In most cases, get_user_pages or get_user_pages_fast should be used * instead of __get_user_pages. __get_user_pages should be used only if * you need some special @gup_flags. */ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *nonblocking) { long ret = 0, i = 0; struct vm_area_struct *vma = NULL; struct follow_page_context ctx = { NULL }; if (!nr_pages) return 0; VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); /* * If FOLL_FORCE is set then do not force a full fault as the hinting * fault information is unrelated to the reference behaviour of a task * using the address space */ if (!(gup_flags & FOLL_FORCE)) gup_flags |= FOLL_NUMA; do { struct page *page; unsigned int foll_flags = gup_flags; unsigned int page_increm; /* first iteration or cross vma bound */ if (!vma || start >= vma->vm_end) { vma = find_extend_vma(mm, start); if (!vma && in_gate_area(mm, start)) { ret = get_gate_page(mm, start & PAGE_MASK, gup_flags, &vma, pages ? &pages[i] : NULL); if (ret) goto out; ctx.page_mask = 0; goto next_page; } if (!vma || check_vma_flags(vma, gup_flags)) { ret = -EFAULT; goto out; } if (is_vm_hugetlb_page(vma)) { i = follow_hugetlb_page(mm, vma, pages, vmas, &start, &nr_pages, i, gup_flags, nonblocking); continue; } } retry: /* * If we have a pending SIGKILL, don't keep faulting pages and * potentially allocating memory. */ if (fatal_signal_pending(current)) { ret = -ERESTARTSYS; goto out; } cond_resched(); page = follow_page_mask(vma, start, foll_flags, &ctx); if (!page) { ret = faultin_page(tsk, vma, start, &foll_flags, nonblocking); switch (ret) { case 0: goto retry; case -EBUSY: ret = 0; /* FALLTHRU */ case -EFAULT: case -ENOMEM: case -EHWPOISON: goto out; case -ENOENT: goto next_page; } BUG(); } else if (PTR_ERR(page) == -EEXIST) { /* * Proper page table entry exists, but no corresponding * struct page. */ goto next_page; } else if (IS_ERR(page)) { ret = PTR_ERR(page); goto out; } if (pages) { pages[i] = page; flush_anon_page(vma, page, start); flush_dcache_page(page); ctx.page_mask = 0; } next_page: if (vmas) { vmas[i] = vma; ctx.page_mask = 0; } page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask); if (page_increm > nr_pages) page_increm = nr_pages; i += page_increm; start += page_increm * PAGE_SIZE; nr_pages -= page_increm; } while (nr_pages); out: if (ctx.pgmap) put_dev_pagemap(ctx.pgmap); return i ? i : ret; } static bool vma_permits_fault(struct vm_area_struct *vma, unsigned int fault_flags) { bool write = !!(fault_flags & FAULT_FLAG_WRITE); bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE); vm_flags_t vm_flags = write ? VM_WRITE : VM_READ; if (!(vm_flags & vma->vm_flags)) return false; /* * The architecture might have a hardware protection * mechanism other than read/write that can deny access. * * gup always represents data access, not instruction * fetches, so execute=false here: */ if (!arch_vma_access_permitted(vma, write, false, foreign)) return false; return true; } /* * fixup_user_fault() - manually resolve a user page fault * @tsk: the task_struct to use for page fault accounting, or * NULL if faults are not to be recorded. * @mm: mm_struct of target mm * @address: user address * @fault_flags:flags to pass down to handle_mm_fault() * @unlocked: did we unlock the mmap_sem while retrying, maybe NULL if caller * does not allow retry * * This is meant to be called in the specific scenario where for locking reasons * we try to access user memory in atomic context (within a pagefault_disable() * section), this returns -EFAULT, and we want to resolve the user fault before * trying again. * * Typically this is meant to be used by the futex code. * * The main difference with get_user_pages() is that this function will * unconditionally call handle_mm_fault() which will in turn perform all the * necessary SW fixup of the dirty and young bits in the PTE, while * get_user_pages() only guarantees to update these in the struct page. * * This is important for some architectures where those bits also gate the * access permission to the page because they are maintained in software. On * such architectures, gup() will not be enough to make a subsequent access * succeed. * * This function will not return with an unlocked mmap_sem. So it has not the * same semantics wrt the @mm->mmap_sem as does filemap_fault(). */ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked) { struct vm_area_struct *vma; vm_fault_t ret, major = 0; if (unlocked) fault_flags |= FAULT_FLAG_ALLOW_RETRY; retry: vma = find_extend_vma(mm, address); if (!vma || address < vma->vm_start) return -EFAULT; if (!vma_permits_fault(vma, fault_flags)) return -EFAULT; ret = handle_mm_fault(vma, address, fault_flags); major |= ret & VM_FAULT_MAJOR; if (ret & VM_FAULT_ERROR) { int err = vm_fault_to_errno(ret, 0); if (err) return err; BUG(); } if (ret & VM_FAULT_RETRY) { down_read(&mm->mmap_sem); if (!(fault_flags & FAULT_FLAG_TRIED)) { *unlocked = true; fault_flags &= ~FAULT_FLAG_ALLOW_RETRY; fault_flags |= FAULT_FLAG_TRIED; goto retry; } } if (tsk) { if (major) tsk->maj_flt++; else tsk->min_flt++; } return 0; } EXPORT_SYMBOL_GPL(fixup_user_fault); static __always_inline long __get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, struct vm_area_struct **vmas, int *locked, unsigned int flags) { long ret, pages_done; bool lock_dropped; if (locked) { /* if VM_FAULT_RETRY can be returned, vmas become invalid */ BUG_ON(vmas); /* check caller initialized locked */ BUG_ON(*locked != 1); } if (pages) flags |= FOLL_GET; pages_done = 0; lock_dropped = false; for (;;) { ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, locked); if (!locked) /* VM_FAULT_RETRY couldn't trigger, bypass */ return ret; /* VM_FAULT_RETRY cannot return errors */ if (!*locked) { BUG_ON(ret < 0); BUG_ON(ret >= nr_pages); } if (!pages) /* If it's a prefault don't insist harder */ return ret; if (ret > 0) { nr_pages -= ret; pages_done += ret; if (!nr_pages) break; } if (*locked) { /* * VM_FAULT_RETRY didn't trigger or it was a * FOLL_NOWAIT. */ if (!pages_done) pages_done = ret; break; } /* VM_FAULT_RETRY triggered, so seek to the faulting offset */ pages += ret; start += ret << PAGE_SHIFT; /* * Repeat on the address that fired VM_FAULT_RETRY * without FAULT_FLAG_ALLOW_RETRY but with * FAULT_FLAG_TRIED. */ *locked = 1; lock_dropped = true; down_read(&mm->mmap_sem); ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED, pages, NULL, NULL); if (ret != 1) { BUG_ON(ret > 1); if (!pages_done) pages_done = ret; break; } nr_pages--; pages_done++; if (!nr_pages) break; pages++; start += PAGE_SIZE; } if (lock_dropped && *locked) { /* * We must let the caller know we temporarily dropped the lock * and so the critical section protected by it was lost. */ up_read(&mm->mmap_sem); *locked = 0; } return pages_done; } /* * We can leverage the VM_FAULT_RETRY functionality in the page fault * paths better by using either get_user_pages_locked() or * get_user_pages_unlocked(). * * get_user_pages_locked() is suitable to replace the form: * * down_read(&mm->mmap_sem); * do_something() * get_user_pages(tsk, mm, ..., pages, NULL); * up_read(&mm->mmap_sem); * * to: * * int locked = 1; * down_read(&mm->mmap_sem); * do_something() * get_user_pages_locked(tsk, mm, ..., pages, &locked); * if (locked) * up_read(&mm->mmap_sem); */ long get_user_pages_locked(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked) { return __get_user_pages_locked(current, current->mm, start, nr_pages, pages, NULL, locked, gup_flags | FOLL_TOUCH); } EXPORT_SYMBOL(get_user_pages_locked); /* * get_user_pages_unlocked() is suitable to replace the form: * * down_read(&mm->mmap_sem); * get_user_pages(tsk, mm, ..., pages, NULL); * up_read(&mm->mmap_sem); * * with: * * get_user_pages_unlocked(tsk, mm, ..., pages); * * It is functionally equivalent to get_user_pages_fast so * get_user_pages_fast should be used instead if specific gup_flags * (e.g. FOLL_FORCE) are not required. */ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags) { struct mm_struct *mm = current->mm; int locked = 1; long ret; down_read(&mm->mmap_sem); ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL, &locked, gup_flags | FOLL_TOUCH); if (locked) up_read(&mm->mmap_sem); return ret; } EXPORT_SYMBOL(get_user_pages_unlocked); /* * get_user_pages_remote() - pin user pages in memory * @tsk: the task_struct to use for page fault accounting, or * NULL if faults are not to be recorded. * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin * @gup_flags: flags modifying lookup behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. Or NULL, if caller * only intends to ensure the pages are faulted in. * @vmas: array of pointers to vmas corresponding to each page. * Or NULL if the caller does not require them. * @locked: pointer to lock flag indicating whether lock is held and * subsequently whether VM_FAULT_RETRY functionality can be * utilised. Lock must initially be held. * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. Each page returned must be released * with a put_page() call when it is finished with. vmas will only * remain valid while mmap_sem is held. * * Must be called with mmap_sem held for read or write. * * get_user_pages walks a process's page tables and takes a reference to * each struct page that each user address corresponds to at a given * instant. That is, it takes the page that would be accessed if a user * thread accesses the given user virtual address at that instant. * * This does not guarantee that the page exists in the user mappings when * get_user_pages returns, and there may even be a completely different * page there in some cases (eg. if mmapped pagecache has been invalidated * and subsequently re faulted). However it does guarantee that the page * won't be freed completely. And mostly callers simply care that the page * contains data that was valid *at some point in time*. Typically, an IO * or similar operation cannot guarantee anything stronger anyway because * locks can't be held over the syscall boundary. * * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must * be called after the page is finished with, and before put_page is called. * * get_user_pages is typically used for fewer-copy IO operations, to get a * handle on the memory by some means other than accesses via the user virtual * addresses. The pages may be submitted for DMA to devices or accessed via * their kernel linear mapping (via the kmap APIs). Care should be taken to * use the correct cache flushing APIs. * * See also get_user_pages_fast, for performance critical applications. * * get_user_pages should be phased out in favor of * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing * should use get_user_pages because it cannot pass * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault. */ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) { return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, locked, gup_flags | FOLL_TOUCH | FOLL_REMOTE); } EXPORT_SYMBOL(get_user_pages_remote); /* * This is the same as get_user_pages_remote(), just with a * less-flexible calling convention where we assume that the task * and mm being operated on are the current task's and don't allow * passing of a locked parameter. We also obviously don't pass * FOLL_REMOTE in here. */ long get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas) { return __get_user_pages_locked(current, current->mm, start, nr_pages, pages, vmas, NULL, gup_flags | FOLL_TOUCH); } EXPORT_SYMBOL(get_user_pages); #ifdef CONFIG_FS_DAX /* * This is the same as get_user_pages() in that it assumes we are * operating on the current task's mm, but it goes further to validate * that the vmas associated with the address range are suitable for * longterm elevated page reference counts. For example, filesystem-dax * mappings are subject to the lifetime enforced by the filesystem and * we need guarantees that longterm users like RDMA and V4L2 only * establish mappings that have a kernel enforced revocation mechanism. * * "longterm" == userspace controlled elevated page count lifetime. * Contrast this to iov_iter_get_pages() usages which are transient. */ long get_user_pages_longterm(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas_arg) { struct vm_area_struct **vmas = vmas_arg; struct vm_area_struct *vma_prev = NULL; long rc, i; if (!pages) return -EINVAL; if (!vmas) { vmas = kcalloc(nr_pages, sizeof(struct vm_area_struct *), GFP_KERNEL); if (!vmas) return -ENOMEM; } rc = get_user_pages(start, nr_pages, gup_flags, pages, vmas); for (i = 0; i < rc; i++) { struct vm_area_struct *vma = vmas[i]; if (vma == vma_prev) continue; vma_prev = vma; if (vma_is_fsdax(vma)) break; } /* * Either get_user_pages() failed, or the vma validation * succeeded, in either case we don't need to put_page() before * returning. */ if (i >= rc) goto out; for (i = 0; i < rc; i++) put_page(pages[i]); rc = -EOPNOTSUPP; out: if (vmas != vmas_arg) kfree(vmas); return rc; } EXPORT_SYMBOL(get_user_pages_longterm); #endif /* CONFIG_FS_DAX */ /** * populate_vma_page_range() - populate a range of pages in the vma. * @vma: target vma * @start: start address * @end: end address * @nonblocking: * * This takes care of mlocking the pages too if VM_LOCKED is set. * * return 0 on success, negative error code on error. * * vma->vm_mm->mmap_sem must be held. * * If @nonblocking is NULL, it may be held for read or write and will * be unperturbed. * * If @nonblocking is non-NULL, it must held for read only and may be * released. If it's released, *@nonblocking will be set to 0. */ long populate_vma_page_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, int *nonblocking) { struct mm_struct *mm = vma->vm_mm; unsigned long nr_pages = (end - start) / PAGE_SIZE; int gup_flags; VM_BUG_ON(start & ~PAGE_MASK); VM_BUG_ON(end & ~PAGE_MASK); VM_BUG_ON_VMA(start < vma->vm_start, vma); VM_BUG_ON_VMA(end > vma->vm_end, vma); VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK; if (vma->vm_flags & VM_LOCKONFAULT) gup_flags &= ~FOLL_POPULATE; /* * We want to touch writable mappings with a write fault in order * to break COW, except for shared mappings because these don't COW * and we would not want to dirty them for nothing. */ if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) gup_flags |= FOLL_WRITE; /* * We want mlock to succeed for regions that have any permissions * other than PROT_NONE. */ if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) gup_flags |= FOLL_FORCE; /* * We made sure addr is within a VMA, so the following will * not result in a stack expansion that recurses back here. */ return __get_user_pages(current, mm, start, nr_pages, gup_flags, NULL, NULL, nonblocking); } /* * __mm_populate - populate and/or mlock pages within a range of address space. * * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap * flags. VMAs must be already marked with the desired vm_flags, and * mmap_sem must not be held. */ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) { struct mm_struct *mm = current->mm; unsigned long end, nstart, nend; struct vm_area_struct *vma = NULL; int locked = 0; long ret = 0; end = start + len; for (nstart = start; nstart < end; nstart = nend) { /* * We want to fault in pages for [nstart; end) address range. * Find first corresponding VMA. */ if (!locked) { locked = 1; down_read(&mm->mmap_sem); vma = find_vma(mm, nstart); } else if (nstart >= vma->vm_end) vma = vma->vm_next; if (!vma || vma->vm_start >= end) break; /* * Set [nstart; nend) to intersection of desired address * range with the first VMA. Also, skip undesirable VMA types. */ nend = min(end, vma->vm_end); if (vma->vm_flags & (VM_IO | VM_PFNMAP)) continue; if (nstart < vma->vm_start) nstart = vma->vm_start; /* * Now fault in a range of pages. populate_vma_page_range() * double checks the vma flags, so that it won't mlock pages * if the vma was already munlocked. */ ret = populate_vma_page_range(vma, nstart, nend, &locked); if (ret < 0) { if (ignore_errors) { ret = 0; continue; /* continue at next VMA */ } break; } nend = nstart + ret * PAGE_SIZE; ret = 0; } if (locked) up_read(&mm->mmap_sem); return ret; /* 0 or negative error code */ } /** * get_dump_page() - pin user page in memory while writing it to core dump * @addr: user address * * Returns struct page pointer of user page pinned for dump, * to be freed afterwards by put_page(). * * Returns NULL on any kind of failure - a hole must then be inserted into * the corefile, to preserve alignment with its headers; and also returns * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - * allowing a hole to be left in the corefile to save diskspace. * * Called without mmap_sem, but after all other threads have been killed. */ #ifdef CONFIG_ELF_CORE struct page *get_dump_page(unsigned long addr) { struct vm_area_struct *vma; struct page *page; if (__get_user_pages(current, current->mm, addr, 1, FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, NULL) < 1) return NULL; flush_cache_page(vma, addr, page_to_pfn(page)); return page; } #endif /* CONFIG_ELF_CORE */ /* * Generic Fast GUP * * get_user_pages_fast attempts to pin user pages by walking the page * tables directly and avoids taking locks. Thus the walker needs to be * protected from page table pages being freed from under it, and should * block any THP splits. * * One way to achieve this is to have the walker disable interrupts, and * rely on IPIs from the TLB flushing code blocking before the page table * pages are freed. This is unsuitable for architectures that do not need * to broadcast an IPI when invalidating TLBs. * * Another way to achieve this is to batch up page table containing pages * belonging to more than one mm_user, then rcu_sched a callback to free those * pages. Disabling interrupts will allow the fast_gup walker to both block * the rcu_sched callback, and an IPI that we broadcast for splitting THPs * (which is a relatively rare event). The code below adopts this strategy. * * Before activating this code, please be aware that the following assumptions * are currently made: * * *) Either HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to * free pages containing page tables or TLB flushing requires IPI broadcast. * * *) ptes can be read atomically by the architecture. * * *) access_ok is sufficient to validate userspace address ranges. * * The last two assumptions can be relaxed by the addition of helper functions. * * This code is based heavily on the PowerPC implementation by Nick Piggin. */ #ifdef CONFIG_HAVE_GENERIC_GUP #ifndef gup_get_pte /* * We assume that the PTE can be read atomically. If this is not the case for * your architecture, please provide the helper. */ static inline pte_t gup_get_pte(pte_t *ptep) { return READ_ONCE(*ptep); } #endif static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages) { while ((*nr) - nr_start) { struct page *page = pages[--(*nr)]; ClearPageReferenced(page); put_page(page); } } #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct dev_pagemap *pgmap = NULL; int nr_start = *nr, ret = 0; pte_t *ptep, *ptem; ptem = ptep = pte_offset_map(&pmd, addr); do { pte_t pte = gup_get_pte(ptep); struct page *head, *page; /* * Similar to the PMD case below, NUMA hinting must take slow * path using the pte_protnone check. */ if (pte_protnone(pte)) goto pte_unmap; if (!pte_access_permitted(pte, write)) goto pte_unmap; if (pte_devmap(pte)) { pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); if (unlikely(!pgmap)) { undo_dev_pagemap(nr, nr_start, pages); goto pte_unmap; } } else if (pte_special(pte)) goto pte_unmap; VM_BUG_ON(!pfn_valid(pte_pfn(pte))); page = pte_page(pte); head = compound_head(page); if (!page_cache_get_speculative(head)) goto pte_unmap; if (unlikely(pte_val(pte) != pte_val(*ptep))) { put_page(head); goto pte_unmap; } VM_BUG_ON_PAGE(compound_head(page) != head, page); SetPageReferenced(page); pages[*nr] = page; (*nr)++; } while (ptep++, addr += PAGE_SIZE, addr != end); ret = 1; pte_unmap: if (pgmap) put_dev_pagemap(pgmap); pte_unmap(ptem); return ret; } #else /* * If we can't determine whether or not a pte is special, then fail immediately * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not * to be special. * * For a futex to be placed on a THP tail page, get_futex_key requires a * __get_user_pages_fast implementation that can pin pages. Thus it's still * useful to have gup_huge_pmd even if we can't operate on ptes. */ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { return 0; } #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ #if defined(__HAVE_ARCH_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE) static int __gup_device_huge(unsigned long pfn, unsigned long addr, unsigned long end, struct page **pages, int *nr) { int nr_start = *nr; struct dev_pagemap *pgmap = NULL; do { struct page *page = pfn_to_page(pfn); pgmap = get_dev_pagemap(pfn, pgmap); if (unlikely(!pgmap)) { undo_dev_pagemap(nr, nr_start, pages); return 0; } SetPageReferenced(page); pages[*nr] = page; get_page(page); (*nr)++; pfn++; } while (addr += PAGE_SIZE, addr != end); if (pgmap) put_dev_pagemap(pgmap); return 1; } static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { unsigned long fault_pfn; int nr_start = *nr; fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); if (!__gup_device_huge(fault_pfn, addr, end, pages, nr)) return 0; if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { undo_dev_pagemap(nr, nr_start, pages); return 0; } return 1; } static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { unsigned long fault_pfn; int nr_start = *nr; fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); if (!__gup_device_huge(fault_pfn, addr, end, pages, nr)) return 0; if (unlikely(pud_val(orig) != pud_val(*pudp))) { undo_dev_pagemap(nr, nr_start, pages); return 0; } return 1; } #else static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { BUILD_BUG(); return 0; } static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { BUILD_BUG(); return 0; } #endif static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct page *head, *page; int refs; if (!pmd_access_permitted(orig, write)) return 0; if (pmd_devmap(orig)) return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr); refs = 0; page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); do { pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); head = compound_head(pmd_page(orig)); if (!page_cache_add_speculative(head, refs)) { *nr -= refs; return 0; } if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { *nr -= refs; while (refs--) put_page(head); return 0; } SetPageReferenced(head); return 1; } static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct page *head, *page; int refs; if (!pud_access_permitted(orig, write)) return 0; if (pud_devmap(orig)) return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr); refs = 0; page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); do { pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); head = compound_head(pud_page(orig)); if (!page_cache_add_speculative(head, refs)) { *nr -= refs; return 0; } if (unlikely(pud_val(orig) != pud_val(*pudp))) { *nr -= refs; while (refs--) put_page(head); return 0; } SetPageReferenced(head); return 1; } static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { int refs; struct page *head, *page; if (!pgd_access_permitted(orig, write)) return 0; BUILD_BUG_ON(pgd_devmap(orig)); refs = 0; page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT); do { pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); head = compound_head(pgd_page(orig)); if (!page_cache_add_speculative(head, refs)) { *nr -= refs; return 0; } if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) { *nr -= refs; while (refs--) put_page(head); return 0; } SetPageReferenced(head); return 1; } static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pmd_t *pmdp; pmdp = pmd_offset(&pud, addr); do { pmd_t pmd = READ_ONCE(*pmdp); next = pmd_addr_end(addr, end); if (!pmd_present(pmd)) return 0; if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) || pmd_devmap(pmd))) { /* * NUMA hinting faults need to be handled in the GUP * slowpath for accounting purposes and so that they * can be serialised against THP migration. */ if (pmd_protnone(pmd)) return 0; if (!gup_huge_pmd(pmd, pmdp, addr, next, write, pages, nr)) return 0; } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) { /* * architecture have different format for hugetlbfs * pmd format and THP pmd format */ if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr, PMD_SHIFT, next, write, pages, nr)) return 0; } else if (!gup_pte_range(pmd, addr, next, write, pages, nr)) return 0; } while (pmdp++, addr = next, addr != end); return 1; } static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pud_t *pudp; pudp = pud_offset(&p4d, addr); do { pud_t pud = READ_ONCE(*pudp); next = pud_addr_end(addr, end); if (pud_none(pud)) return 0; if (unlikely(pud_huge(pud))) { if (!gup_huge_pud(pud, pudp, addr, next, write, pages, nr)) return 0; } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) { if (!gup_huge_pd(__hugepd(pud_val(pud)), addr, PUD_SHIFT, next, write, pages, nr)) return 0; } else if (!gup_pmd_range(pud, addr, next, write, pages, nr)) return 0; } while (pudp++, addr = next, addr != end); return 1; } static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; p4d_t *p4dp; p4dp = p4d_offset(&pgd, addr); do { p4d_t p4d = READ_ONCE(*p4dp); next = p4d_addr_end(addr, end); if (p4d_none(p4d)) return 0; BUILD_BUG_ON(p4d_huge(p4d)); if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) { if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr, P4D_SHIFT, next, write, pages, nr)) return 0; } else if (!gup_pud_range(p4d, addr, next, write, pages, nr)) return 0; } while (p4dp++, addr = next, addr != end); return 1; } static void gup_pgd_range(unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pgd_t *pgdp; pgdp = pgd_offset(current->mm, addr); do { pgd_t pgd = READ_ONCE(*pgdp); next = pgd_addr_end(addr, end); if (pgd_none(pgd)) return; if (unlikely(pgd_huge(pgd))) { if (!gup_huge_pgd(pgd, pgdp, addr, next, write, pages, nr)) return; } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) { if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, PGDIR_SHIFT, next, write, pages, nr)) return; } else if (!gup_p4d_range(pgd, addr, next, write, pages, nr)) return; } while (pgdp++, addr = next, addr != end); } #ifndef gup_fast_permitted /* * Check if it's allowed to use __get_user_pages_fast() for the range, or * we need to fall back to the slow version: */ bool gup_fast_permitted(unsigned long start, int nr_pages, int write) { unsigned long len, end; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; return end >= start; } #endif /* * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to * the regular GUP. * Note a difference with get_user_pages_fast: this always returns the * number of pages pinned, 0 if no pages were pinned. */ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { unsigned long len, end; unsigned long flags; int nr = 0; start &= PAGE_MASK; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; if (unlikely(!access_ok((void __user *)start, len))) return 0; /* * Disable interrupts. We use the nested form as we can already have * interrupts disabled by get_futex_key. * * With interrupts disabled, we block page table pages from being * freed from under us. See struct mmu_table_batch comments in * include/asm-generic/tlb.h for more details. * * We do not adopt an rcu_read_lock(.) here as we also want to * block IPIs that come from THPs splitting. */ if (gup_fast_permitted(start, nr_pages, write)) { local_irq_save(flags); gup_pgd_range(start, end, write, pages, &nr); local_irq_restore(flags); } return nr; } /** * get_user_pages_fast() - pin user pages in memory * @start: starting user address * @nr_pages: number of pages from start to pin * @write: whether pages will be written to * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. * * Attempt to pin user pages in memory without taking mm->mmap_sem. * If not successful, it will fall back to taking the lock and * calling get_user_pages(). * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. */ int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { unsigned long addr, len, end; int nr = 0, ret = 0; start &= PAGE_MASK; addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; if (nr_pages <= 0) return 0; if (unlikely(!access_ok((void __user *)start, len))) return -EFAULT; if (gup_fast_permitted(start, nr_pages, write)) { local_irq_disable(); gup_pgd_range(addr, end, write, pages, &nr); local_irq_enable(); ret = nr; } if (nr < nr_pages) { /* Try to get the remaining pages with get_user_pages */ start += nr << PAGE_SHIFT; pages += nr; ret = get_user_pages_unlocked(start, nr_pages - nr, pages, write ? FOLL_WRITE : 0); /* Have to be a bit careful with return values */ if (nr > 0) { if (ret < 0) ret = nr; else ret += nr; } } return ret; } #endif /* CONFIG_HAVE_GENERIC_GUP */
#include <linux/kernel.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/memremap.h> #include <linux/pagemap.h> #include <linux/rmap.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/sched/signal.h> #include <linux/rwsem.h> #include <linux/hugetlb.h> #include <asm/mmu_context.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> #include "internal.h" struct follow_page_context { struct dev_pagemap *pgmap; unsigned int page_mask; }; static struct page *no_page_table(struct vm_area_struct *vma, unsigned int flags) { /* * When core dumping an enormous anonymous area that nobody * has touched so far, we don't want to allocate unnecessary pages or * page tables. Return error instead of NULL to skip handle_mm_fault, * then get_dump_page() will return NULL to leave a hole in the dump. * But we can only make this optimization where a hole would surely * be zero-filled if handle_mm_fault() actually did handle it. */ if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) return ERR_PTR(-EFAULT); return NULL; } static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, unsigned int flags) { /* No page to get reference */ if (flags & FOLL_GET) return -EFAULT; if (flags & FOLL_TOUCH) { pte_t entry = *pte; if (flags & FOLL_WRITE) entry = pte_mkdirty(entry); entry = pte_mkyoung(entry); if (!pte_same(*pte, entry)) { set_pte_at(vma->vm_mm, address, pte, entry); update_mmu_cache(vma, address, pte); } } /* Proper page table entry exists, but no corresponding struct page */ return -EEXIST; } /* * FOLL_FORCE can write to even unwritable pte's, but only * after we've gone through a COW cycle and they are dirty. */ static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) { return pte_write(pte) || ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); } static struct page *follow_page_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, unsigned int flags, struct dev_pagemap **pgmap) { struct mm_struct *mm = vma->vm_mm; struct page *page; spinlock_t *ptl; pte_t *ptep, pte; retry: if (unlikely(pmd_bad(*pmd))) return no_page_table(vma, flags); ptep = pte_offset_map_lock(mm, pmd, address, &ptl); pte = *ptep; if (!pte_present(pte)) { swp_entry_t entry; /* * KSM's break_ksm() relies upon recognizing a ksm page * even while it is being migrated, so for that case we * need migration_entry_wait(). */ if (likely(!(flags & FOLL_MIGRATION))) goto no_page; if (pte_none(pte)) goto no_page; entry = pte_to_swp_entry(pte); if (!is_migration_entry(entry)) goto no_page; pte_unmap_unlock(ptep, ptl); migration_entry_wait(mm, pmd, address); goto retry; } if ((flags & FOLL_NUMA) && pte_protnone(pte)) goto no_page; if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) { pte_unmap_unlock(ptep, ptl); return NULL; } page = vm_normal_page(vma, address, pte); if (!page && pte_devmap(pte) && (flags & FOLL_GET)) { /* * Only return device mapping pages in the FOLL_GET case since * they are only valid while holding the pgmap reference. */ *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap); if (*pgmap) page = pte_page(pte); else goto no_page; } else if (unlikely(!page)) { if (flags & FOLL_DUMP) { /* Avoid special (like zero) pages in core dumps */ page = ERR_PTR(-EFAULT); goto out; } if (is_zero_pfn(pte_pfn(pte))) { page = pte_page(pte); } else { int ret; ret = follow_pfn_pte(vma, address, ptep, flags); page = ERR_PTR(ret); goto out; } } if (flags & FOLL_SPLIT && PageTransCompound(page)) { int ret; get_page(page); pte_unmap_unlock(ptep, ptl); lock_page(page); ret = split_huge_page(page); unlock_page(page); put_page(page); if (ret) return ERR_PTR(ret); goto retry; } if (flags & FOLL_GET) { if (unlikely(!try_get_page(page))) { page = ERR_PTR(-ENOMEM); goto out; } } if (flags & FOLL_TOUCH) { if ((flags & FOLL_WRITE) && !pte_dirty(pte) && !PageDirty(page)) set_page_dirty(page); /* * pte_mkyoung() would be more correct here, but atomic care * is needed to avoid losing the dirty bit: it is easier to use * mark_page_accessed(). */ mark_page_accessed(page); } if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { /* Do not mlock pte-mapped THP */ if (PageTransCompound(page)) goto out; /* * The preliminary mapping check is mainly to avoid the * pointless overhead of lock_page on the ZERO_PAGE * which might bounce very badly if there is contention. * * If the page is already locked, we don't need to * handle it now - vmscan will handle it later if and * when it attempts to reclaim the page. */ if (page->mapping && trylock_page(page)) { lru_add_drain(); /* push cached pages to LRU */ /* * Because we lock page here, and migration is * blocked by the pte's page reference, and we * know the page is still mapped, we don't even * need to check for file-cache page truncation. */ mlock_vma_page(page); unlock_page(page); } } out: pte_unmap_unlock(ptep, ptl); return page; no_page: pte_unmap_unlock(ptep, ptl); if (!pte_none(pte)) return NULL; return no_page_table(vma, flags); } static struct page *follow_pmd_mask(struct vm_area_struct *vma, unsigned long address, pud_t *pudp, unsigned int flags, struct follow_page_context *ctx) { pmd_t *pmd, pmdval; spinlock_t *ptl; struct page *page; struct mm_struct *mm = vma->vm_mm; pmd = pmd_offset(pudp, address); /* * The READ_ONCE() will stabilize the pmdval in a register or * on the stack so that it will stop changing under the code. */ pmdval = READ_ONCE(*pmd); if (pmd_none(pmdval)) return no_page_table(vma, flags); if (pmd_huge(pmdval) && vma->vm_flags & VM_HUGETLB) { page = follow_huge_pmd(mm, address, pmd, flags); if (page) return page; return no_page_table(vma, flags); } if (is_hugepd(__hugepd(pmd_val(pmdval)))) { page = follow_huge_pd(vma, address, __hugepd(pmd_val(pmdval)), flags, PMD_SHIFT); if (page) return page; return no_page_table(vma, flags); } retry: if (!pmd_present(pmdval)) { if (likely(!(flags & FOLL_MIGRATION))) return no_page_table(vma, flags); VM_BUG_ON(thp_migration_supported() && !is_pmd_migration_entry(pmdval)); if (is_pmd_migration_entry(pmdval)) pmd_migration_entry_wait(mm, pmd); pmdval = READ_ONCE(*pmd); /* * MADV_DONTNEED may convert the pmd to null because * mmap_sem is held in read mode */ if (pmd_none(pmdval)) return no_page_table(vma, flags); goto retry; } if (pmd_devmap(pmdval)) { ptl = pmd_lock(mm, pmd); page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap); spin_unlock(ptl); if (page) return page; } if (likely(!pmd_trans_huge(pmdval))) return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); if ((flags & FOLL_NUMA) && pmd_protnone(pmdval)) return no_page_table(vma, flags); retry_locked: ptl = pmd_lock(mm, pmd); if (unlikely(pmd_none(*pmd))) { spin_unlock(ptl); return no_page_table(vma, flags); } if (unlikely(!pmd_present(*pmd))) { spin_unlock(ptl); if (likely(!(flags & FOLL_MIGRATION))) return no_page_table(vma, flags); pmd_migration_entry_wait(mm, pmd); goto retry_locked; } if (unlikely(!pmd_trans_huge(*pmd))) { spin_unlock(ptl); return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); } if (flags & FOLL_SPLIT) { int ret; page = pmd_page(*pmd); if (is_huge_zero_page(page)) { spin_unlock(ptl); ret = 0; split_huge_pmd(vma, pmd, address); if (pmd_trans_unstable(pmd)) ret = -EBUSY; } else { if (unlikely(!try_get_page(page))) { spin_unlock(ptl); return ERR_PTR(-ENOMEM); } spin_unlock(ptl); lock_page(page); ret = split_huge_page(page); unlock_page(page); put_page(page); if (pmd_none(*pmd)) return no_page_table(vma, flags); } return ret ? ERR_PTR(ret) : follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); } page = follow_trans_huge_pmd(vma, address, pmd, flags); spin_unlock(ptl); ctx->page_mask = HPAGE_PMD_NR - 1; return page; } static struct page *follow_pud_mask(struct vm_area_struct *vma, unsigned long address, p4d_t *p4dp, unsigned int flags, struct follow_page_context *ctx) { pud_t *pud; spinlock_t *ptl; struct page *page; struct mm_struct *mm = vma->vm_mm; pud = pud_offset(p4dp, address); if (pud_none(*pud)) return no_page_table(vma, flags); if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { page = follow_huge_pud(mm, address, pud, flags); if (page) return page; return no_page_table(vma, flags); } if (is_hugepd(__hugepd(pud_val(*pud)))) { page = follow_huge_pd(vma, address, __hugepd(pud_val(*pud)), flags, PUD_SHIFT); if (page) return page; return no_page_table(vma, flags); } if (pud_devmap(*pud)) { ptl = pud_lock(mm, pud); page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap); spin_unlock(ptl); if (page) return page; } if (unlikely(pud_bad(*pud))) return no_page_table(vma, flags); return follow_pmd_mask(vma, address, pud, flags, ctx); } static struct page *follow_p4d_mask(struct vm_area_struct *vma, unsigned long address, pgd_t *pgdp, unsigned int flags, struct follow_page_context *ctx) { p4d_t *p4d; struct page *page; p4d = p4d_offset(pgdp, address); if (p4d_none(*p4d)) return no_page_table(vma, flags); BUILD_BUG_ON(p4d_huge(*p4d)); if (unlikely(p4d_bad(*p4d))) return no_page_table(vma, flags); if (is_hugepd(__hugepd(p4d_val(*p4d)))) { page = follow_huge_pd(vma, address, __hugepd(p4d_val(*p4d)), flags, P4D_SHIFT); if (page) return page; return no_page_table(vma, flags); } return follow_pud_mask(vma, address, p4d, flags, ctx); } /** * follow_page_mask - look up a page descriptor from a user-virtual address * @vma: vm_area_struct mapping @address * @address: virtual address to look up * @flags: flags modifying lookup behaviour * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a * pointer to output page_mask * * @flags can have FOLL_ flags set, defined in <linux/mm.h> * * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches * the device's dev_pagemap metadata to avoid repeating expensive lookups. * * On output, the @ctx->page_mask is set according to the size of the page. * * Return: the mapped (struct page *), %NULL if no mapping exists, or * an error pointer if there is a mapping to something not represented * by a page descriptor (see also vm_normal_page()). */ struct page *follow_page_mask(struct vm_area_struct *vma, unsigned long address, unsigned int flags, struct follow_page_context *ctx) { pgd_t *pgd; struct page *page; struct mm_struct *mm = vma->vm_mm; ctx->page_mask = 0; /* make this handle hugepd */ page = follow_huge_addr(mm, address, flags & FOLL_WRITE); if (!IS_ERR(page)) { BUG_ON(flags & FOLL_GET); return page; } pgd = pgd_offset(mm, address); if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) return no_page_table(vma, flags); if (pgd_huge(*pgd)) { page = follow_huge_pgd(mm, address, pgd, flags); if (page) return page; return no_page_table(vma, flags); } if (is_hugepd(__hugepd(pgd_val(*pgd)))) { page = follow_huge_pd(vma, address, __hugepd(pgd_val(*pgd)), flags, PGDIR_SHIFT); if (page) return page; return no_page_table(vma, flags); } return follow_p4d_mask(vma, address, pgd, flags, ctx); } struct page *follow_page(struct vm_area_struct *vma, unsigned long address, unsigned int foll_flags) { struct follow_page_context ctx = { NULL }; struct page *page; page = follow_page_mask(vma, address, foll_flags, &ctx); if (ctx.pgmap) put_dev_pagemap(ctx.pgmap); return page; } static int get_gate_page(struct mm_struct *mm, unsigned long address, unsigned int gup_flags, struct vm_area_struct **vma, struct page **page) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; int ret = -EFAULT; /* user gate pages are read-only */ if (gup_flags & FOLL_WRITE) return -EFAULT; if (address > TASK_SIZE) pgd = pgd_offset_k(address); else pgd = pgd_offset_gate(mm, address); BUG_ON(pgd_none(*pgd)); p4d = p4d_offset(pgd, address); BUG_ON(p4d_none(*p4d)); pud = pud_offset(p4d, address); BUG_ON(pud_none(*pud)); pmd = pmd_offset(pud, address); if (!pmd_present(*pmd)) return -EFAULT; VM_BUG_ON(pmd_trans_huge(*pmd)); pte = pte_offset_map(pmd, address); if (pte_none(*pte)) goto unmap; *vma = get_gate_vma(mm); if (!page) goto out; *page = vm_normal_page(*vma, address, *pte); if (!*page) { if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte))) goto unmap; *page = pte_page(*pte); /* * This should never happen (a device public page in the gate * area). */ if (is_device_public_page(*page)) goto unmap; } if (unlikely(!try_get_page(*page))) { ret = -ENOMEM; goto unmap; } out: ret = 0; unmap: pte_unmap(pte); return ret; } /* * mmap_sem must be held on entry. If @nonblocking != NULL and * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released. * If it is, *@nonblocking will be set to 0 and -EBUSY returned. */ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, unsigned long address, unsigned int *flags, int *nonblocking) { unsigned int fault_flags = 0; vm_fault_t ret; /* mlock all present pages, but do not fault in new pages */ if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) return -ENOENT; if (*flags & FOLL_WRITE) fault_flags |= FAULT_FLAG_WRITE; if (*flags & FOLL_REMOTE) fault_flags |= FAULT_FLAG_REMOTE; if (nonblocking) fault_flags |= FAULT_FLAG_ALLOW_RETRY; if (*flags & FOLL_NOWAIT) fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; if (*flags & FOLL_TRIED) { VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY); fault_flags |= FAULT_FLAG_TRIED; } ret = handle_mm_fault(vma, address, fault_flags); if (ret & VM_FAULT_ERROR) { int err = vm_fault_to_errno(ret, *flags); if (err) return err; BUG(); } if (tsk) { if (ret & VM_FAULT_MAJOR) tsk->maj_flt++; else tsk->min_flt++; } if (ret & VM_FAULT_RETRY) { if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) *nonblocking = 0; return -EBUSY; } /* * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when * necessary, even if maybe_mkwrite decided not to set pte_write. We * can thus safely do subsequent page lookups as if they were reads. * But only do so when looping for pte_write is futile: in some cases * userspace may also be wanting to write to the gotten user page, * which a read fault here might prevent (a readonly page might get * reCOWed by userspace write). */ if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) *flags |= FOLL_COW; return 0; } static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) { vm_flags_t vm_flags = vma->vm_flags; int write = (gup_flags & FOLL_WRITE); int foreign = (gup_flags & FOLL_REMOTE); if (vm_flags & (VM_IO | VM_PFNMAP)) return -EFAULT; if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma)) return -EFAULT; if (write) { if (!(vm_flags & VM_WRITE)) { if (!(gup_flags & FOLL_FORCE)) return -EFAULT; /* * We used to let the write,force case do COW in a * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could * set a breakpoint in a read-only mapping of an * executable, without corrupting the file (yet only * when that file had been opened for writing!). * Anon pages in shared mappings are surprising: now * just reject it. */ if (!is_cow_mapping(vm_flags)) return -EFAULT; } } else if (!(vm_flags & VM_READ)) { if (!(gup_flags & FOLL_FORCE)) return -EFAULT; /* * Is there actually any vma we can reach here which does not * have VM_MAYREAD set? */ if (!(vm_flags & VM_MAYREAD)) return -EFAULT; } /* * gups are always data accesses, not instruction * fetches, so execute=false here */ if (!arch_vma_access_permitted(vma, write, false, foreign)) return -EFAULT; return 0; } /** * __get_user_pages() - pin user pages in memory * @tsk: task_struct of target task * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin * @gup_flags: flags modifying pin behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. Or NULL, if caller * only intends to ensure the pages are faulted in. * @vmas: array of pointers to vmas corresponding to each page. * Or NULL if the caller does not require them. * @nonblocking: whether waiting for disk IO or mmap_sem contention * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. Each page returned must be released * with a put_page() call when it is finished with. vmas will only * remain valid while mmap_sem is held. * * Must be called with mmap_sem held. It may be released. See below. * * __get_user_pages walks a process's page tables and takes a reference to * each struct page that each user address corresponds to at a given * instant. That is, it takes the page that would be accessed if a user * thread accesses the given user virtual address at that instant. * * This does not guarantee that the page exists in the user mappings when * __get_user_pages returns, and there may even be a completely different * page there in some cases (eg. if mmapped pagecache has been invalidated * and subsequently re faulted). However it does guarantee that the page * won't be freed completely. And mostly callers simply care that the page * contains data that was valid *at some point in time*. Typically, an IO * or similar operation cannot guarantee anything stronger anyway because * locks can't be held over the syscall boundary. * * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If * the page is written to, set_page_dirty (or set_page_dirty_lock, as * appropriate) must be called after the page is finished with, and * before put_page is called. * * If @nonblocking != NULL, __get_user_pages will not wait for disk IO * or mmap_sem contention, and if waiting is needed to pin all pages, * *@nonblocking will be set to 0. Further, if @gup_flags does not * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in * this case. * * A caller using such a combination of @nonblocking and @gup_flags * must therefore hold the mmap_sem for reading only, and recognize * when it's been released. Otherwise, it must be held for either * reading or writing and will not be released. * * In most cases, get_user_pages or get_user_pages_fast should be used * instead of __get_user_pages. __get_user_pages should be used only if * you need some special @gup_flags. */ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *nonblocking) { long ret = 0, i = 0; struct vm_area_struct *vma = NULL; struct follow_page_context ctx = { NULL }; if (!nr_pages) return 0; VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); /* * If FOLL_FORCE is set then do not force a full fault as the hinting * fault information is unrelated to the reference behaviour of a task * using the address space */ if (!(gup_flags & FOLL_FORCE)) gup_flags |= FOLL_NUMA; do { struct page *page; unsigned int foll_flags = gup_flags; unsigned int page_increm; /* first iteration or cross vma bound */ if (!vma || start >= vma->vm_end) { vma = find_extend_vma(mm, start); if (!vma && in_gate_area(mm, start)) { ret = get_gate_page(mm, start & PAGE_MASK, gup_flags, &vma, pages ? &pages[i] : NULL); if (ret) goto out; ctx.page_mask = 0; goto next_page; } if (!vma || check_vma_flags(vma, gup_flags)) { ret = -EFAULT; goto out; } if (is_vm_hugetlb_page(vma)) { i = follow_hugetlb_page(mm, vma, pages, vmas, &start, &nr_pages, i, gup_flags, nonblocking); continue; } } retry: /* * If we have a pending SIGKILL, don't keep faulting pages and * potentially allocating memory. */ if (fatal_signal_pending(current)) { ret = -ERESTARTSYS; goto out; } cond_resched(); page = follow_page_mask(vma, start, foll_flags, &ctx); if (!page) { ret = faultin_page(tsk, vma, start, &foll_flags, nonblocking); switch (ret) { case 0: goto retry; case -EBUSY: ret = 0; /* FALLTHRU */ case -EFAULT: case -ENOMEM: case -EHWPOISON: goto out; case -ENOENT: goto next_page; } BUG(); } else if (PTR_ERR(page) == -EEXIST) { /* * Proper page table entry exists, but no corresponding * struct page. */ goto next_page; } else if (IS_ERR(page)) { ret = PTR_ERR(page); goto out; } if (pages) { pages[i] = page; flush_anon_page(vma, page, start); flush_dcache_page(page); ctx.page_mask = 0; } next_page: if (vmas) { vmas[i] = vma; ctx.page_mask = 0; } page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask); if (page_increm > nr_pages) page_increm = nr_pages; i += page_increm; start += page_increm * PAGE_SIZE; nr_pages -= page_increm; } while (nr_pages); out: if (ctx.pgmap) put_dev_pagemap(ctx.pgmap); return i ? i : ret; } static bool vma_permits_fault(struct vm_area_struct *vma, unsigned int fault_flags) { bool write = !!(fault_flags & FAULT_FLAG_WRITE); bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE); vm_flags_t vm_flags = write ? VM_WRITE : VM_READ; if (!(vm_flags & vma->vm_flags)) return false; /* * The architecture might have a hardware protection * mechanism other than read/write that can deny access. * * gup always represents data access, not instruction * fetches, so execute=false here: */ if (!arch_vma_access_permitted(vma, write, false, foreign)) return false; return true; } /* * fixup_user_fault() - manually resolve a user page fault * @tsk: the task_struct to use for page fault accounting, or * NULL if faults are not to be recorded. * @mm: mm_struct of target mm * @address: user address * @fault_flags:flags to pass down to handle_mm_fault() * @unlocked: did we unlock the mmap_sem while retrying, maybe NULL if caller * does not allow retry * * This is meant to be called in the specific scenario where for locking reasons * we try to access user memory in atomic context (within a pagefault_disable() * section), this returns -EFAULT, and we want to resolve the user fault before * trying again. * * Typically this is meant to be used by the futex code. * * The main difference with get_user_pages() is that this function will * unconditionally call handle_mm_fault() which will in turn perform all the * necessary SW fixup of the dirty and young bits in the PTE, while * get_user_pages() only guarantees to update these in the struct page. * * This is important for some architectures where those bits also gate the * access permission to the page because they are maintained in software. On * such architectures, gup() will not be enough to make a subsequent access * succeed. * * This function will not return with an unlocked mmap_sem. So it has not the * same semantics wrt the @mm->mmap_sem as does filemap_fault(). */ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked) { struct vm_area_struct *vma; vm_fault_t ret, major = 0; if (unlocked) fault_flags |= FAULT_FLAG_ALLOW_RETRY; retry: vma = find_extend_vma(mm, address); if (!vma || address < vma->vm_start) return -EFAULT; if (!vma_permits_fault(vma, fault_flags)) return -EFAULT; ret = handle_mm_fault(vma, address, fault_flags); major |= ret & VM_FAULT_MAJOR; if (ret & VM_FAULT_ERROR) { int err = vm_fault_to_errno(ret, 0); if (err) return err; BUG(); } if (ret & VM_FAULT_RETRY) { down_read(&mm->mmap_sem); if (!(fault_flags & FAULT_FLAG_TRIED)) { *unlocked = true; fault_flags &= ~FAULT_FLAG_ALLOW_RETRY; fault_flags |= FAULT_FLAG_TRIED; goto retry; } } if (tsk) { if (major) tsk->maj_flt++; else tsk->min_flt++; } return 0; } EXPORT_SYMBOL_GPL(fixup_user_fault); static __always_inline long __get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, struct vm_area_struct **vmas, int *locked, unsigned int flags) { long ret, pages_done; bool lock_dropped; if (locked) { /* if VM_FAULT_RETRY can be returned, vmas become invalid */ BUG_ON(vmas); /* check caller initialized locked */ BUG_ON(*locked != 1); } if (pages) flags |= FOLL_GET; pages_done = 0; lock_dropped = false; for (;;) { ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, locked); if (!locked) /* VM_FAULT_RETRY couldn't trigger, bypass */ return ret; /* VM_FAULT_RETRY cannot return errors */ if (!*locked) { BUG_ON(ret < 0); BUG_ON(ret >= nr_pages); } if (!pages) /* If it's a prefault don't insist harder */ return ret; if (ret > 0) { nr_pages -= ret; pages_done += ret; if (!nr_pages) break; } if (*locked) { /* * VM_FAULT_RETRY didn't trigger or it was a * FOLL_NOWAIT. */ if (!pages_done) pages_done = ret; break; } /* VM_FAULT_RETRY triggered, so seek to the faulting offset */ pages += ret; start += ret << PAGE_SHIFT; /* * Repeat on the address that fired VM_FAULT_RETRY * without FAULT_FLAG_ALLOW_RETRY but with * FAULT_FLAG_TRIED. */ *locked = 1; lock_dropped = true; down_read(&mm->mmap_sem); ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED, pages, NULL, NULL); if (ret != 1) { BUG_ON(ret > 1); if (!pages_done) pages_done = ret; break; } nr_pages--; pages_done++; if (!nr_pages) break; pages++; start += PAGE_SIZE; } if (lock_dropped && *locked) { /* * We must let the caller know we temporarily dropped the lock * and so the critical section protected by it was lost. */ up_read(&mm->mmap_sem); *locked = 0; } return pages_done; } /* * We can leverage the VM_FAULT_RETRY functionality in the page fault * paths better by using either get_user_pages_locked() or * get_user_pages_unlocked(). * * get_user_pages_locked() is suitable to replace the form: * * down_read(&mm->mmap_sem); * do_something() * get_user_pages(tsk, mm, ..., pages, NULL); * up_read(&mm->mmap_sem); * * to: * * int locked = 1; * down_read(&mm->mmap_sem); * do_something() * get_user_pages_locked(tsk, mm, ..., pages, &locked); * if (locked) * up_read(&mm->mmap_sem); */ long get_user_pages_locked(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked) { return __get_user_pages_locked(current, current->mm, start, nr_pages, pages, NULL, locked, gup_flags | FOLL_TOUCH); } EXPORT_SYMBOL(get_user_pages_locked); /* * get_user_pages_unlocked() is suitable to replace the form: * * down_read(&mm->mmap_sem); * get_user_pages(tsk, mm, ..., pages, NULL); * up_read(&mm->mmap_sem); * * with: * * get_user_pages_unlocked(tsk, mm, ..., pages); * * It is functionally equivalent to get_user_pages_fast so * get_user_pages_fast should be used instead if specific gup_flags * (e.g. FOLL_FORCE) are not required. */ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags) { struct mm_struct *mm = current->mm; int locked = 1; long ret; down_read(&mm->mmap_sem); ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL, &locked, gup_flags | FOLL_TOUCH); if (locked) up_read(&mm->mmap_sem); return ret; } EXPORT_SYMBOL(get_user_pages_unlocked); /* * get_user_pages_remote() - pin user pages in memory * @tsk: the task_struct to use for page fault accounting, or * NULL if faults are not to be recorded. * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin * @gup_flags: flags modifying lookup behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. Or NULL, if caller * only intends to ensure the pages are faulted in. * @vmas: array of pointers to vmas corresponding to each page. * Or NULL if the caller does not require them. * @locked: pointer to lock flag indicating whether lock is held and * subsequently whether VM_FAULT_RETRY functionality can be * utilised. Lock must initially be held. * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. Each page returned must be released * with a put_page() call when it is finished with. vmas will only * remain valid while mmap_sem is held. * * Must be called with mmap_sem held for read or write. * * get_user_pages walks a process's page tables and takes a reference to * each struct page that each user address corresponds to at a given * instant. That is, it takes the page that would be accessed if a user * thread accesses the given user virtual address at that instant. * * This does not guarantee that the page exists in the user mappings when * get_user_pages returns, and there may even be a completely different * page there in some cases (eg. if mmapped pagecache has been invalidated * and subsequently re faulted). However it does guarantee that the page * won't be freed completely. And mostly callers simply care that the page * contains data that was valid *at some point in time*. Typically, an IO * or similar operation cannot guarantee anything stronger anyway because * locks can't be held over the syscall boundary. * * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must * be called after the page is finished with, and before put_page is called. * * get_user_pages is typically used for fewer-copy IO operations, to get a * handle on the memory by some means other than accesses via the user virtual * addresses. The pages may be submitted for DMA to devices or accessed via * their kernel linear mapping (via the kmap APIs). Care should be taken to * use the correct cache flushing APIs. * * See also get_user_pages_fast, for performance critical applications. * * get_user_pages should be phased out in favor of * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing * should use get_user_pages because it cannot pass * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault. */ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) { return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, locked, gup_flags | FOLL_TOUCH | FOLL_REMOTE); } EXPORT_SYMBOL(get_user_pages_remote); /* * This is the same as get_user_pages_remote(), just with a * less-flexible calling convention where we assume that the task * and mm being operated on are the current task's and don't allow * passing of a locked parameter. We also obviously don't pass * FOLL_REMOTE in here. */ long get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas) { return __get_user_pages_locked(current, current->mm, start, nr_pages, pages, vmas, NULL, gup_flags | FOLL_TOUCH); } EXPORT_SYMBOL(get_user_pages); #ifdef CONFIG_FS_DAX /* * This is the same as get_user_pages() in that it assumes we are * operating on the current task's mm, but it goes further to validate * that the vmas associated with the address range are suitable for * longterm elevated page reference counts. For example, filesystem-dax * mappings are subject to the lifetime enforced by the filesystem and * we need guarantees that longterm users like RDMA and V4L2 only * establish mappings that have a kernel enforced revocation mechanism. * * "longterm" == userspace controlled elevated page count lifetime. * Contrast this to iov_iter_get_pages() usages which are transient. */ long get_user_pages_longterm(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas_arg) { struct vm_area_struct **vmas = vmas_arg; struct vm_area_struct *vma_prev = NULL; long rc, i; if (!pages) return -EINVAL; if (!vmas) { vmas = kcalloc(nr_pages, sizeof(struct vm_area_struct *), GFP_KERNEL); if (!vmas) return -ENOMEM; } rc = get_user_pages(start, nr_pages, gup_flags, pages, vmas); for (i = 0; i < rc; i++) { struct vm_area_struct *vma = vmas[i]; if (vma == vma_prev) continue; vma_prev = vma; if (vma_is_fsdax(vma)) break; } /* * Either get_user_pages() failed, or the vma validation * succeeded, in either case we don't need to put_page() before * returning. */ if (i >= rc) goto out; for (i = 0; i < rc; i++) put_page(pages[i]); rc = -EOPNOTSUPP; out: if (vmas != vmas_arg) kfree(vmas); return rc; } EXPORT_SYMBOL(get_user_pages_longterm); #endif /* CONFIG_FS_DAX */ /** * populate_vma_page_range() - populate a range of pages in the vma. * @vma: target vma * @start: start address * @end: end address * @nonblocking: * * This takes care of mlocking the pages too if VM_LOCKED is set. * * return 0 on success, negative error code on error. * * vma->vm_mm->mmap_sem must be held. * * If @nonblocking is NULL, it may be held for read or write and will * be unperturbed. * * If @nonblocking is non-NULL, it must held for read only and may be * released. If it's released, *@nonblocking will be set to 0. */ long populate_vma_page_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, int *nonblocking) { struct mm_struct *mm = vma->vm_mm; unsigned long nr_pages = (end - start) / PAGE_SIZE; int gup_flags; VM_BUG_ON(start & ~PAGE_MASK); VM_BUG_ON(end & ~PAGE_MASK); VM_BUG_ON_VMA(start < vma->vm_start, vma); VM_BUG_ON_VMA(end > vma->vm_end, vma); VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK; if (vma->vm_flags & VM_LOCKONFAULT) gup_flags &= ~FOLL_POPULATE; /* * We want to touch writable mappings with a write fault in order * to break COW, except for shared mappings because these don't COW * and we would not want to dirty them for nothing. */ if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) gup_flags |= FOLL_WRITE; /* * We want mlock to succeed for regions that have any permissions * other than PROT_NONE. */ if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) gup_flags |= FOLL_FORCE; /* * We made sure addr is within a VMA, so the following will * not result in a stack expansion that recurses back here. */ return __get_user_pages(current, mm, start, nr_pages, gup_flags, NULL, NULL, nonblocking); } /* * __mm_populate - populate and/or mlock pages within a range of address space. * * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap * flags. VMAs must be already marked with the desired vm_flags, and * mmap_sem must not be held. */ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) { struct mm_struct *mm = current->mm; unsigned long end, nstart, nend; struct vm_area_struct *vma = NULL; int locked = 0; long ret = 0; end = start + len; for (nstart = start; nstart < end; nstart = nend) { /* * We want to fault in pages for [nstart; end) address range. * Find first corresponding VMA. */ if (!locked) { locked = 1; down_read(&mm->mmap_sem); vma = find_vma(mm, nstart); } else if (nstart >= vma->vm_end) vma = vma->vm_next; if (!vma || vma->vm_start >= end) break; /* * Set [nstart; nend) to intersection of desired address * range with the first VMA. Also, skip undesirable VMA types. */ nend = min(end, vma->vm_end); if (vma->vm_flags & (VM_IO | VM_PFNMAP)) continue; if (nstart < vma->vm_start) nstart = vma->vm_start; /* * Now fault in a range of pages. populate_vma_page_range() * double checks the vma flags, so that it won't mlock pages * if the vma was already munlocked. */ ret = populate_vma_page_range(vma, nstart, nend, &locked); if (ret < 0) { if (ignore_errors) { ret = 0; continue; /* continue at next VMA */ } break; } nend = nstart + ret * PAGE_SIZE; ret = 0; } if (locked) up_read(&mm->mmap_sem); return ret; /* 0 or negative error code */ } /** * get_dump_page() - pin user page in memory while writing it to core dump * @addr: user address * * Returns struct page pointer of user page pinned for dump, * to be freed afterwards by put_page(). * * Returns NULL on any kind of failure - a hole must then be inserted into * the corefile, to preserve alignment with its headers; and also returns * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - * allowing a hole to be left in the corefile to save diskspace. * * Called without mmap_sem, but after all other threads have been killed. */ #ifdef CONFIG_ELF_CORE struct page *get_dump_page(unsigned long addr) { struct vm_area_struct *vma; struct page *page; if (__get_user_pages(current, current->mm, addr, 1, FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, NULL) < 1) return NULL; flush_cache_page(vma, addr, page_to_pfn(page)); return page; } #endif /* CONFIG_ELF_CORE */ /* * Generic Fast GUP * * get_user_pages_fast attempts to pin user pages by walking the page * tables directly and avoids taking locks. Thus the walker needs to be * protected from page table pages being freed from under it, and should * block any THP splits. * * One way to achieve this is to have the walker disable interrupts, and * rely on IPIs from the TLB flushing code blocking before the page table * pages are freed. This is unsuitable for architectures that do not need * to broadcast an IPI when invalidating TLBs. * * Another way to achieve this is to batch up page table containing pages * belonging to more than one mm_user, then rcu_sched a callback to free those * pages. Disabling interrupts will allow the fast_gup walker to both block * the rcu_sched callback, and an IPI that we broadcast for splitting THPs * (which is a relatively rare event). The code below adopts this strategy. * * Before activating this code, please be aware that the following assumptions * are currently made: * * *) Either HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to * free pages containing page tables or TLB flushing requires IPI broadcast. * * *) ptes can be read atomically by the architecture. * * *) access_ok is sufficient to validate userspace address ranges. * * The last two assumptions can be relaxed by the addition of helper functions. * * This code is based heavily on the PowerPC implementation by Nick Piggin. */ #ifdef CONFIG_HAVE_GENERIC_GUP #ifndef gup_get_pte /* * We assume that the PTE can be read atomically. If this is not the case for * your architecture, please provide the helper. */ static inline pte_t gup_get_pte(pte_t *ptep) { return READ_ONCE(*ptep); } #endif static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages) { while ((*nr) - nr_start) { struct page *page = pages[--(*nr)]; ClearPageReferenced(page); put_page(page); } } /* * Return the compund head page with ref appropriately incremented, * or NULL if that failed. */ static inline struct page *try_get_compound_head(struct page *page, int refs) { struct page *head = compound_head(page); if (WARN_ON_ONCE(page_ref_count(head) < 0)) return NULL; if (unlikely(!page_cache_add_speculative(head, refs))) return NULL; return head; } #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct dev_pagemap *pgmap = NULL; int nr_start = *nr, ret = 0; pte_t *ptep, *ptem; ptem = ptep = pte_offset_map(&pmd, addr); do { pte_t pte = gup_get_pte(ptep); struct page *head, *page; /* * Similar to the PMD case below, NUMA hinting must take slow * path using the pte_protnone check. */ if (pte_protnone(pte)) goto pte_unmap; if (!pte_access_permitted(pte, write)) goto pte_unmap; if (pte_devmap(pte)) { pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); if (unlikely(!pgmap)) { undo_dev_pagemap(nr, nr_start, pages); goto pte_unmap; } } else if (pte_special(pte)) goto pte_unmap; VM_BUG_ON(!pfn_valid(pte_pfn(pte))); page = pte_page(pte); head = try_get_compound_head(page, 1); if (!head) goto pte_unmap; if (unlikely(pte_val(pte) != pte_val(*ptep))) { put_page(head); goto pte_unmap; } VM_BUG_ON_PAGE(compound_head(page) != head, page); SetPageReferenced(page); pages[*nr] = page; (*nr)++; } while (ptep++, addr += PAGE_SIZE, addr != end); ret = 1; pte_unmap: if (pgmap) put_dev_pagemap(pgmap); pte_unmap(ptem); return ret; } #else /* * If we can't determine whether or not a pte is special, then fail immediately * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not * to be special. * * For a futex to be placed on a THP tail page, get_futex_key requires a * __get_user_pages_fast implementation that can pin pages. Thus it's still * useful to have gup_huge_pmd even if we can't operate on ptes. */ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { return 0; } #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ #if defined(__HAVE_ARCH_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE) static int __gup_device_huge(unsigned long pfn, unsigned long addr, unsigned long end, struct page **pages, int *nr) { int nr_start = *nr; struct dev_pagemap *pgmap = NULL; do { struct page *page = pfn_to_page(pfn); pgmap = get_dev_pagemap(pfn, pgmap); if (unlikely(!pgmap)) { undo_dev_pagemap(nr, nr_start, pages); return 0; } SetPageReferenced(page); pages[*nr] = page; get_page(page); (*nr)++; pfn++; } while (addr += PAGE_SIZE, addr != end); if (pgmap) put_dev_pagemap(pgmap); return 1; } static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { unsigned long fault_pfn; int nr_start = *nr; fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); if (!__gup_device_huge(fault_pfn, addr, end, pages, nr)) return 0; if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { undo_dev_pagemap(nr, nr_start, pages); return 0; } return 1; } static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { unsigned long fault_pfn; int nr_start = *nr; fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); if (!__gup_device_huge(fault_pfn, addr, end, pages, nr)) return 0; if (unlikely(pud_val(orig) != pud_val(*pudp))) { undo_dev_pagemap(nr, nr_start, pages); return 0; } return 1; } #else static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { BUILD_BUG(); return 0; } static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { BUILD_BUG(); return 0; } #endif static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct page *head, *page; int refs; if (!pmd_access_permitted(orig, write)) return 0; if (pmd_devmap(orig)) return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr); refs = 0; page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); do { pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); head = try_get_compound_head(pmd_page(orig), refs); if (!head) { *nr -= refs; return 0; } if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { *nr -= refs; while (refs--) put_page(head); return 0; } SetPageReferenced(head); return 1; } static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct page *head, *page; int refs; if (!pud_access_permitted(orig, write)) return 0; if (pud_devmap(orig)) return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr); refs = 0; page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); do { pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); head = try_get_compound_head(pud_page(orig), refs); if (!head) { *nr -= refs; return 0; } if (unlikely(pud_val(orig) != pud_val(*pudp))) { *nr -= refs; while (refs--) put_page(head); return 0; } SetPageReferenced(head); return 1; } static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { int refs; struct page *head, *page; if (!pgd_access_permitted(orig, write)) return 0; BUILD_BUG_ON(pgd_devmap(orig)); refs = 0; page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT); do { pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); head = try_get_compound_head(pgd_page(orig), refs); if (!head) { *nr -= refs; return 0; } if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) { *nr -= refs; while (refs--) put_page(head); return 0; } SetPageReferenced(head); return 1; } static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pmd_t *pmdp; pmdp = pmd_offset(&pud, addr); do { pmd_t pmd = READ_ONCE(*pmdp); next = pmd_addr_end(addr, end); if (!pmd_present(pmd)) return 0; if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) || pmd_devmap(pmd))) { /* * NUMA hinting faults need to be handled in the GUP * slowpath for accounting purposes and so that they * can be serialised against THP migration. */ if (pmd_protnone(pmd)) return 0; if (!gup_huge_pmd(pmd, pmdp, addr, next, write, pages, nr)) return 0; } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) { /* * architecture have different format for hugetlbfs * pmd format and THP pmd format */ if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr, PMD_SHIFT, next, write, pages, nr)) return 0; } else if (!gup_pte_range(pmd, addr, next, write, pages, nr)) return 0; } while (pmdp++, addr = next, addr != end); return 1; } static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pud_t *pudp; pudp = pud_offset(&p4d, addr); do { pud_t pud = READ_ONCE(*pudp); next = pud_addr_end(addr, end); if (pud_none(pud)) return 0; if (unlikely(pud_huge(pud))) { if (!gup_huge_pud(pud, pudp, addr, next, write, pages, nr)) return 0; } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) { if (!gup_huge_pd(__hugepd(pud_val(pud)), addr, PUD_SHIFT, next, write, pages, nr)) return 0; } else if (!gup_pmd_range(pud, addr, next, write, pages, nr)) return 0; } while (pudp++, addr = next, addr != end); return 1; } static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; p4d_t *p4dp; p4dp = p4d_offset(&pgd, addr); do { p4d_t p4d = READ_ONCE(*p4dp); next = p4d_addr_end(addr, end); if (p4d_none(p4d)) return 0; BUILD_BUG_ON(p4d_huge(p4d)); if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) { if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr, P4D_SHIFT, next, write, pages, nr)) return 0; } else if (!gup_pud_range(p4d, addr, next, write, pages, nr)) return 0; } while (p4dp++, addr = next, addr != end); return 1; } static void gup_pgd_range(unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pgd_t *pgdp; pgdp = pgd_offset(current->mm, addr); do { pgd_t pgd = READ_ONCE(*pgdp); next = pgd_addr_end(addr, end); if (pgd_none(pgd)) return; if (unlikely(pgd_huge(pgd))) { if (!gup_huge_pgd(pgd, pgdp, addr, next, write, pages, nr)) return; } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) { if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, PGDIR_SHIFT, next, write, pages, nr)) return; } else if (!gup_p4d_range(pgd, addr, next, write, pages, nr)) return; } while (pgdp++, addr = next, addr != end); } #ifndef gup_fast_permitted /* * Check if it's allowed to use __get_user_pages_fast() for the range, or * we need to fall back to the slow version: */ bool gup_fast_permitted(unsigned long start, int nr_pages, int write) { unsigned long len, end; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; return end >= start; } #endif /* * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to * the regular GUP. * Note a difference with get_user_pages_fast: this always returns the * number of pages pinned, 0 if no pages were pinned. */ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { unsigned long len, end; unsigned long flags; int nr = 0; start &= PAGE_MASK; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; if (unlikely(!access_ok((void __user *)start, len))) return 0; /* * Disable interrupts. We use the nested form as we can already have * interrupts disabled by get_futex_key. * * With interrupts disabled, we block page table pages from being * freed from under us. See struct mmu_table_batch comments in * include/asm-generic/tlb.h for more details. * * We do not adopt an rcu_read_lock(.) here as we also want to * block IPIs that come from THPs splitting. */ if (gup_fast_permitted(start, nr_pages, write)) { local_irq_save(flags); gup_pgd_range(start, end, write, pages, &nr); local_irq_restore(flags); } return nr; } /** * get_user_pages_fast() - pin user pages in memory * @start: starting user address * @nr_pages: number of pages from start to pin * @write: whether pages will be written to * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. * * Attempt to pin user pages in memory without taking mm->mmap_sem. * If not successful, it will fall back to taking the lock and * calling get_user_pages(). * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. */ int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { unsigned long addr, len, end; int nr = 0, ret = 0; start &= PAGE_MASK; addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; if (nr_pages <= 0) return 0; if (unlikely(!access_ok((void __user *)start, len))) return -EFAULT; if (gup_fast_permitted(start, nr_pages, write)) { local_irq_disable(); gup_pgd_range(addr, end, write, pages, &nr); local_irq_enable(); ret = nr; } if (nr < nr_pages) { /* Try to get the remaining pages with get_user_pages */ start += nr << PAGE_SHIFT; pages += nr; ret = get_user_pages_unlocked(start, nr_pages - nr, pages, write ? FOLL_WRITE : 0); /* Have to be a bit careful with return values */ if (nr > 0) { if (ret < 0) ret = nr; else ret += nr; } } return ret; } #endif /* CONFIG_HAVE_GENERIC_GUP */
static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct page *head, *page; int refs; if (!pud_access_permitted(orig, write)) return 0; if (pud_devmap(orig)) return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr); refs = 0; page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); do { pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); head = compound_head(pud_page(orig)); if (!page_cache_add_speculative(head, refs)) { *nr -= refs; return 0; } if (unlikely(pud_val(orig) != pud_val(*pudp))) { *nr -= refs; while (refs--) put_page(head); return 0; } SetPageReferenced(head); return 1; }
static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct page *head, *page; int refs; if (!pud_access_permitted(orig, write)) return 0; if (pud_devmap(orig)) return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr); refs = 0; page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); do { pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); head = try_get_compound_head(pud_page(orig), refs); if (!head) { *nr -= refs; return 0; } if (unlikely(pud_val(orig) != pud_val(*pudp))) { *nr -= refs; while (refs--) put_page(head); return 0; } SetPageReferenced(head); return 1; }
{'added': [(160, '\tif (flags & FOLL_GET) {'), (161, '\t\tif (unlikely(!try_get_page(page))) {'), (162, '\t\t\tpage = ERR_PTR(-ENOMEM);'), (163, '\t\t\tgoto out;'), (164, '\t\t}'), (165, '\t}'), (302, '\t\t\tif (unlikely(!try_get_page(page))) {'), (303, '\t\t\t\tspin_unlock(ptl);'), (304, '\t\t\t\treturn ERR_PTR(-ENOMEM);'), (305, '\t\t\t}'), (507, '\tif (unlikely(!try_get_page(*page))) {'), (508, '\t\tret = -ENOMEM;'), (509, '\t\tgoto unmap;'), (510, '\t}'), (1406, '/*'), (1407, ' * Return the compund head page with ref appropriately incremented,'), (1408, ' * or NULL if that failed.'), (1409, ' */'), (1410, 'static inline struct page *try_get_compound_head(struct page *page, int refs)'), (1411, '{'), (1412, '\tstruct page *head = compound_head(page);'), (1413, '\tif (WARN_ON_ONCE(page_ref_count(head) < 0))'), (1414, '\t\treturn NULL;'), (1415, '\tif (unlikely(!page_cache_add_speculative(head, refs)))'), (1416, '\t\treturn NULL;'), (1417, '\treturn head;'), (1418, '}'), (1419, ''), (1455, '\t\thead = try_get_compound_head(page, 1);'), (1456, '\t\tif (!head)'), (1595, '\thead = try_get_compound_head(pmd_page(orig), refs);'), (1596, '\tif (!head) {'), (1633, '\thead = try_get_compound_head(pud_page(orig), refs);'), (1634, '\tif (!head) {'), (1670, '\thead = try_get_compound_head(pgd_page(orig), refs);'), (1671, '\tif (!head) {')], 'deleted': [(160, '\tif (flags & FOLL_GET)'), (161, '\t\tget_page(page);'), (298, '\t\t\tget_page(page);'), (500, '\tget_page(*page);'), (1430, '\t\thead = compound_head(page);'), (1432, '\t\tif (!page_cache_get_speculative(head))'), (1571, '\thead = compound_head(pmd_page(orig));'), (1572, '\tif (!page_cache_add_speculative(head, refs)) {'), (1609, '\thead = compound_head(pud_page(orig));'), (1610, '\tif (!page_cache_add_speculative(head, refs)) {'), (1646, '\thead = compound_head(pgd_page(orig));'), (1647, '\tif (!page_cache_add_speculative(head, refs)) {')]}
36
12
1,194
7,593
31
208
7
https://github.com/torvalds/linux
CVE-2019-11487
CWE-416
954
tif_predict.c
C
horAcc32
/* $Id$ */ /* * Copyright (c) 1988-1997 Sam Leffler * Copyright (c) 1991-1997 Silicon Graphics, Inc. * * Permission to use, copy, modify, distribute, and sell this software and * its documentation for any purpose is hereby granted without fee, provided * that (i) the above copyright notices and this permission notice appear in * all copies of the software and related documentation, and (ii) the names of * Sam Leffler and Silicon Graphics may not be used in any advertising or * publicity relating to the software without the specific, prior written * permission of Sam Leffler and Silicon Graphics. * * THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND, * EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. * * IN NO EVENT SHALL SAM LEFFLER OR SILICON GRAPHICS BE LIABLE FOR * ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, * OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF * LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ /* * TIFF Library. * * Predictor Tag Support (used by multiple codecs). */ #include "tiffiop.h" #include "tif_predict.h" #define PredictorState(tif) ((TIFFPredictorState*) (tif)->tif_data) static void horAcc8(TIFF* tif, uint8* cp0, tmsize_t cc); static void horAcc16(TIFF* tif, uint8* cp0, tmsize_t cc); static void horAcc32(TIFF* tif, uint8* cp0, tmsize_t cc); static void swabHorAcc16(TIFF* tif, uint8* cp0, tmsize_t cc); static void swabHorAcc32(TIFF* tif, uint8* cp0, tmsize_t cc); static void horDiff8(TIFF* tif, uint8* cp0, tmsize_t cc); static void horDiff16(TIFF* tif, uint8* cp0, tmsize_t cc); static void horDiff32(TIFF* tif, uint8* cp0, tmsize_t cc); static void swabHorDiff16(TIFF* tif, uint8* cp0, tmsize_t cc); static void swabHorDiff32(TIFF* tif, uint8* cp0, tmsize_t cc); static void fpAcc(TIFF* tif, uint8* cp0, tmsize_t cc); static void fpDiff(TIFF* tif, uint8* cp0, tmsize_t cc); static int PredictorDecodeRow(TIFF* tif, uint8* op0, tmsize_t occ0, uint16 s); static int PredictorDecodeTile(TIFF* tif, uint8* op0, tmsize_t occ0, uint16 s); static int PredictorEncodeRow(TIFF* tif, uint8* bp, tmsize_t cc, uint16 s); static int PredictorEncodeTile(TIFF* tif, uint8* bp0, tmsize_t cc0, uint16 s); static int PredictorSetup(TIFF* tif) { static const char module[] = "PredictorSetup"; TIFFPredictorState* sp = PredictorState(tif); TIFFDirectory* td = &tif->tif_dir; switch (sp->predictor) /* no differencing */ { case PREDICTOR_NONE: return 1; case PREDICTOR_HORIZONTAL: if (td->td_bitspersample != 8 && td->td_bitspersample != 16 && td->td_bitspersample != 32) { TIFFErrorExt(tif->tif_clientdata, module, "Horizontal differencing \"Predictor\" not supported with %d-bit samples", td->td_bitspersample); return 0; } break; case PREDICTOR_FLOATINGPOINT: if (td->td_sampleformat != SAMPLEFORMAT_IEEEFP) { TIFFErrorExt(tif->tif_clientdata, module, "Floating point \"Predictor\" not supported with %d data format", td->td_sampleformat); return 0; } if (td->td_bitspersample != 16 && td->td_bitspersample != 24 && td->td_bitspersample != 32 && td->td_bitspersample != 64) { /* Should 64 be allowed? */ TIFFErrorExt(tif->tif_clientdata, module, "Floating point \"Predictor\" not supported with %d-bit samples", td->td_bitspersample); return 0; } break; default: TIFFErrorExt(tif->tif_clientdata, module, "\"Predictor\" value %d not supported", sp->predictor); return 0; } sp->stride = (td->td_planarconfig == PLANARCONFIG_CONTIG ? td->td_samplesperpixel : 1); /* * Calculate the scanline/tile-width size in bytes. */ if (isTiled(tif)) sp->rowsize = TIFFTileRowSize(tif); else sp->rowsize = TIFFScanlineSize(tif); if (sp->rowsize == 0) return 0; return 1; } static int PredictorSetupDecode(TIFF* tif) { TIFFPredictorState* sp = PredictorState(tif); TIFFDirectory* td = &tif->tif_dir; if (!(*sp->setupdecode)(tif) || !PredictorSetup(tif)) return 0; if (sp->predictor == 2) { switch (td->td_bitspersample) { case 8: sp->decodepfunc = horAcc8; break; case 16: sp->decodepfunc = horAcc16; break; case 32: sp->decodepfunc = horAcc32; break; } /* * Override default decoding method with one that does the * predictor stuff. */ if( tif->tif_decoderow != PredictorDecodeRow ) { sp->decoderow = tif->tif_decoderow; tif->tif_decoderow = PredictorDecodeRow; sp->decodestrip = tif->tif_decodestrip; tif->tif_decodestrip = PredictorDecodeTile; sp->decodetile = tif->tif_decodetile; tif->tif_decodetile = PredictorDecodeTile; } /* * If the data is horizontally differenced 16-bit data that * requires byte-swapping, then it must be byte swapped before * the accumulation step. We do this with a special-purpose * routine and override the normal post decoding logic that * the library setup when the directory was read. */ if (tif->tif_flags & TIFF_SWAB) { if (sp->decodepfunc == horAcc16) { sp->decodepfunc = swabHorAcc16; tif->tif_postdecode = _TIFFNoPostDecode; } else if (sp->decodepfunc == horAcc32) { sp->decodepfunc = swabHorAcc32; tif->tif_postdecode = _TIFFNoPostDecode; } } } else if (sp->predictor == 3) { sp->decodepfunc = fpAcc; /* * Override default decoding method with one that does the * predictor stuff. */ if( tif->tif_decoderow != PredictorDecodeRow ) { sp->decoderow = tif->tif_decoderow; tif->tif_decoderow = PredictorDecodeRow; sp->decodestrip = tif->tif_decodestrip; tif->tif_decodestrip = PredictorDecodeTile; sp->decodetile = tif->tif_decodetile; tif->tif_decodetile = PredictorDecodeTile; } /* * The data should not be swapped outside of the floating * point predictor, the accumulation routine should return * byres in the native order. */ if (tif->tif_flags & TIFF_SWAB) { tif->tif_postdecode = _TIFFNoPostDecode; } /* * Allocate buffer to keep the decoded bytes before * rearranging in the right order */ } return 1; } static int PredictorSetupEncode(TIFF* tif) { TIFFPredictorState* sp = PredictorState(tif); TIFFDirectory* td = &tif->tif_dir; if (!(*sp->setupencode)(tif) || !PredictorSetup(tif)) return 0; if (sp->predictor == 2) { switch (td->td_bitspersample) { case 8: sp->encodepfunc = horDiff8; break; case 16: sp->encodepfunc = horDiff16; break; case 32: sp->encodepfunc = horDiff32; break; } /* * Override default encoding method with one that does the * predictor stuff. */ if( tif->tif_encoderow != PredictorEncodeRow ) { sp->encoderow = tif->tif_encoderow; tif->tif_encoderow = PredictorEncodeRow; sp->encodestrip = tif->tif_encodestrip; tif->tif_encodestrip = PredictorEncodeTile; sp->encodetile = tif->tif_encodetile; tif->tif_encodetile = PredictorEncodeTile; } /* * If the data is horizontally differenced 16-bit data that * requires byte-swapping, then it must be byte swapped after * the differentiation step. We do this with a special-purpose * routine and override the normal post decoding logic that * the library setup when the directory was read. */ if (tif->tif_flags & TIFF_SWAB) { if (sp->encodepfunc == horDiff16) { sp->encodepfunc = swabHorDiff16; tif->tif_postdecode = _TIFFNoPostDecode; } else if (sp->encodepfunc == horDiff32) { sp->encodepfunc = swabHorDiff32; tif->tif_postdecode = _TIFFNoPostDecode; } } } else if (sp->predictor == 3) { sp->encodepfunc = fpDiff; /* * Override default encoding method with one that does the * predictor stuff. */ if( tif->tif_encoderow != PredictorEncodeRow ) { sp->encoderow = tif->tif_encoderow; tif->tif_encoderow = PredictorEncodeRow; sp->encodestrip = tif->tif_encodestrip; tif->tif_encodestrip = PredictorEncodeTile; sp->encodetile = tif->tif_encodetile; tif->tif_encodetile = PredictorEncodeTile; } } return 1; } #define REPEAT4(n, op) \ switch (n) { \ default: { tmsize_t i; for (i = n-4; i > 0; i--) { op; } } \ case 4: op; \ case 3: op; \ case 2: op; \ case 1: op; \ case 0: ; \ } /* Remarks related to C standard compliance in all below functions : */ /* - to avoid any undefined behaviour, we only operate on unsigned types */ /* since the behaviour of "overflows" is defined (wrap over) */ /* - when storing into the byte stream, we explicitly mask with 0xff so */ /* as to make icc -check=conversions happy (not necessary by the standard) */ static void horAcc8(TIFF* tif, uint8* cp0, tmsize_t cc) { tmsize_t stride = PredictorState(tif)->stride; unsigned char* cp = (unsigned char*) cp0; assert((cc%stride)==0); if (cc > stride) { /* * Pipeline the most common cases. */ if (stride == 3) { unsigned int cr = cp[0]; unsigned int cg = cp[1]; unsigned int cb = cp[2]; cc -= 3; cp += 3; while (cc>0) { cp[0] = (unsigned char) ((cr += cp[0]) & 0xff); cp[1] = (unsigned char) ((cg += cp[1]) & 0xff); cp[2] = (unsigned char) ((cb += cp[2]) & 0xff); cc -= 3; cp += 3; } } else if (stride == 4) { unsigned int cr = cp[0]; unsigned int cg = cp[1]; unsigned int cb = cp[2]; unsigned int ca = cp[3]; cc -= 4; cp += 4; while (cc>0) { cp[0] = (unsigned char) ((cr += cp[0]) & 0xff); cp[1] = (unsigned char) ((cg += cp[1]) & 0xff); cp[2] = (unsigned char) ((cb += cp[2]) & 0xff); cp[3] = (unsigned char) ((ca += cp[3]) & 0xff); cc -= 4; cp += 4; } } else { cc -= stride; do { REPEAT4(stride, cp[stride] = (unsigned char) ((cp[stride] + *cp) & 0xff); cp++) cc -= stride; } while (cc>0); } } } static void swabHorAcc16(TIFF* tif, uint8* cp0, tmsize_t cc) { uint16* wp = (uint16*) cp0; tmsize_t wc = cc / 2; TIFFSwabArrayOfShort(wp, wc); horAcc16(tif, cp0, cc); } static void horAcc16(TIFF* tif, uint8* cp0, tmsize_t cc) { tmsize_t stride = PredictorState(tif)->stride; uint16* wp = (uint16*) cp0; tmsize_t wc = cc / 2; assert((cc%(2*stride))==0); if (wc > stride) { wc -= stride; do { REPEAT4(stride, wp[stride] = (uint16)(((unsigned int)wp[stride] + (unsigned int)wp[0]) & 0xffff); wp++) wc -= stride; } while (wc > 0); } } static void swabHorAcc32(TIFF* tif, uint8* cp0, tmsize_t cc) { uint32* wp = (uint32*) cp0; tmsize_t wc = cc / 4; TIFFSwabArrayOfLong(wp, wc); horAcc32(tif, cp0, cc); } static void horAcc32(TIFF* tif, uint8* cp0, tmsize_t cc) { tmsize_t stride = PredictorState(tif)->stride; uint32* wp = (uint32*) cp0; tmsize_t wc = cc / 4; assert((cc%(4*stride))==0); if (wc > stride) { wc -= stride; do { REPEAT4(stride, wp[stride] += wp[0]; wp++) wc -= stride; } while (wc > 0); } } /* * Floating point predictor accumulation routine. */ static void fpAcc(TIFF* tif, uint8* cp0, tmsize_t cc) { tmsize_t stride = PredictorState(tif)->stride; uint32 bps = tif->tif_dir.td_bitspersample / 8; tmsize_t wc = cc / bps; tmsize_t count = cc; uint8 *cp = (uint8 *) cp0; uint8 *tmp = (uint8 *)_TIFFmalloc(cc); assert((cc%(bps*stride))==0); if (!tmp) return; while (count > stride) { REPEAT4(stride, cp[stride] = (unsigned char) ((cp[stride] + cp[0]) & 0xff); cp++) count -= stride; } _TIFFmemcpy(tmp, cp0, cc); cp = (uint8 *) cp0; for (count = 0; count < wc; count++) { uint32 byte; for (byte = 0; byte < bps; byte++) { #if WORDS_BIGENDIAN cp[bps * count + byte] = tmp[byte * wc + count]; #else cp[bps * count + byte] = tmp[(bps - byte - 1) * wc + count]; #endif } } _TIFFfree(tmp); } /* * Decode a scanline and apply the predictor routine. */ static int PredictorDecodeRow(TIFF* tif, uint8* op0, tmsize_t occ0, uint16 s) { TIFFPredictorState *sp = PredictorState(tif); assert(sp != NULL); assert(sp->decoderow != NULL); assert(sp->decodepfunc != NULL); if ((*sp->decoderow)(tif, op0, occ0, s)) { (*sp->decodepfunc)(tif, op0, occ0); return 1; } else return 0; } /* * Decode a tile/strip and apply the predictor routine. * Note that horizontal differencing must be done on a * row-by-row basis. The width of a "row" has already * been calculated at pre-decode time according to the * strip/tile dimensions. */ static int PredictorDecodeTile(TIFF* tif, uint8* op0, tmsize_t occ0, uint16 s) { TIFFPredictorState *sp = PredictorState(tif); assert(sp != NULL); assert(sp->decodetile != NULL); if ((*sp->decodetile)(tif, op0, occ0, s)) { tmsize_t rowsize = sp->rowsize; assert(rowsize > 0); assert((occ0%rowsize)==0); assert(sp->decodepfunc != NULL); while (occ0 > 0) { (*sp->decodepfunc)(tif, op0, rowsize); occ0 -= rowsize; op0 += rowsize; } return 1; } else return 0; } static void horDiff8(TIFF* tif, uint8* cp0, tmsize_t cc) { TIFFPredictorState* sp = PredictorState(tif); tmsize_t stride = sp->stride; unsigned char* cp = (unsigned char*) cp0; assert((cc%stride)==0); if (cc > stride) { cc -= stride; /* * Pipeline the most common cases. */ if (stride == 3) { unsigned int r1, g1, b1; unsigned int r2 = cp[0]; unsigned int g2 = cp[1]; unsigned int b2 = cp[2]; do { r1 = cp[3]; cp[3] = (unsigned char)((r1-r2)&0xff); r2 = r1; g1 = cp[4]; cp[4] = (unsigned char)((g1-g2)&0xff); g2 = g1; b1 = cp[5]; cp[5] = (unsigned char)((b1-b2)&0xff); b2 = b1; cp += 3; } while ((cc -= 3) > 0); } else if (stride == 4) { unsigned int r1, g1, b1, a1; unsigned int r2 = cp[0]; unsigned int g2 = cp[1]; unsigned int b2 = cp[2]; unsigned int a2 = cp[3]; do { r1 = cp[4]; cp[4] = (unsigned char)((r1-r2)&0xff); r2 = r1; g1 = cp[5]; cp[5] = (unsigned char)((g1-g2)&0xff); g2 = g1; b1 = cp[6]; cp[6] = (unsigned char)((b1-b2)&0xff); b2 = b1; a1 = cp[7]; cp[7] = (unsigned char)((a1-a2)&0xff); a2 = a1; cp += 4; } while ((cc -= 4) > 0); } else { cp += cc - 1; do { REPEAT4(stride, cp[stride] = (unsigned char)((cp[stride] - cp[0])&0xff); cp--) } while ((cc -= stride) > 0); } } } static void horDiff16(TIFF* tif, uint8* cp0, tmsize_t cc) { TIFFPredictorState* sp = PredictorState(tif); tmsize_t stride = sp->stride; uint16 *wp = (uint16*) cp0; tmsize_t wc = cc/2; assert((cc%(2*stride))==0); if (wc > stride) { wc -= stride; wp += wc - 1; do { REPEAT4(stride, wp[stride] = (uint16)(((unsigned int)wp[stride] - (unsigned int)wp[0]) & 0xffff); wp--) wc -= stride; } while (wc > 0); } } static void swabHorDiff16(TIFF* tif, uint8* cp0, tmsize_t cc) { uint16* wp = (uint16*) cp0; tmsize_t wc = cc / 2; horDiff16(tif, cp0, cc); TIFFSwabArrayOfShort(wp, wc); } static void horDiff32(TIFF* tif, uint8* cp0, tmsize_t cc) { TIFFPredictorState* sp = PredictorState(tif); tmsize_t stride = sp->stride; uint32 *wp = (uint32*) cp0; tmsize_t wc = cc/4; assert((cc%(4*stride))==0); if (wc > stride) { wc -= stride; wp += wc - 1; do { REPEAT4(stride, wp[stride] -= wp[0]; wp--) wc -= stride; } while (wc > 0); } } static void swabHorDiff32(TIFF* tif, uint8* cp0, tmsize_t cc) { uint32* wp = (uint32*) cp0; tmsize_t wc = cc / 4; horDiff32(tif, cp0, cc); TIFFSwabArrayOfLong(wp, wc); } /* * Floating point predictor differencing routine. */ static void fpDiff(TIFF* tif, uint8* cp0, tmsize_t cc) { tmsize_t stride = PredictorState(tif)->stride; uint32 bps = tif->tif_dir.td_bitspersample / 8; tmsize_t wc = cc / bps; tmsize_t count; uint8 *cp = (uint8 *) cp0; uint8 *tmp = (uint8 *)_TIFFmalloc(cc); assert((cc%(bps*stride))==0); if (!tmp) return; _TIFFmemcpy(tmp, cp0, cc); for (count = 0; count < wc; count++) { uint32 byte; for (byte = 0; byte < bps; byte++) { #if WORDS_BIGENDIAN cp[byte * wc + count] = tmp[bps * count + byte]; #else cp[(bps - byte - 1) * wc + count] = tmp[bps * count + byte]; #endif } } _TIFFfree(tmp); cp = (uint8 *) cp0; cp += cc - stride - 1; for (count = cc; count > stride; count -= stride) REPEAT4(stride, cp[stride] = (unsigned char)((cp[stride] - cp[0])&0xff); cp--) } static int PredictorEncodeRow(TIFF* tif, uint8* bp, tmsize_t cc, uint16 s) { TIFFPredictorState *sp = PredictorState(tif); assert(sp != NULL); assert(sp->encodepfunc != NULL); assert(sp->encoderow != NULL); /* XXX horizontal differencing alters user's data XXX */ (*sp->encodepfunc)(tif, bp, cc); return (*sp->encoderow)(tif, bp, cc, s); } static int PredictorEncodeTile(TIFF* tif, uint8* bp0, tmsize_t cc0, uint16 s) { static const char module[] = "PredictorEncodeTile"; TIFFPredictorState *sp = PredictorState(tif); uint8 *working_copy; tmsize_t cc = cc0, rowsize; unsigned char* bp; int result_code; assert(sp != NULL); assert(sp->encodepfunc != NULL); assert(sp->encodetile != NULL); /* * Do predictor manipulation in a working buffer to avoid altering * the callers buffer. http://trac.osgeo.org/gdal/ticket/1965 */ working_copy = (uint8*) _TIFFmalloc(cc0); if( working_copy == NULL ) { TIFFErrorExt(tif->tif_clientdata, module, "Out of memory allocating " TIFF_SSIZE_FORMAT " byte temp buffer.", cc0 ); return 0; } memcpy( working_copy, bp0, cc0 ); bp = working_copy; rowsize = sp->rowsize; assert(rowsize > 0); assert((cc0%rowsize)==0); while (cc > 0) { (*sp->encodepfunc)(tif, bp, rowsize); cc -= rowsize; bp += rowsize; } result_code = (*sp->encodetile)(tif, working_copy, cc0, s); _TIFFfree( working_copy ); return result_code; } #define FIELD_PREDICTOR (FIELD_CODEC+0) /* XXX */ static const TIFFField predictFields[] = { { TIFFTAG_PREDICTOR, 1, 1, TIFF_SHORT, 0, TIFF_SETGET_UINT16, TIFF_SETGET_UINT16, FIELD_PREDICTOR, FALSE, FALSE, "Predictor", NULL }, }; static int PredictorVSetField(TIFF* tif, uint32 tag, va_list ap) { TIFFPredictorState *sp = PredictorState(tif); assert(sp != NULL); assert(sp->vsetparent != NULL); switch (tag) { case TIFFTAG_PREDICTOR: sp->predictor = (uint16) va_arg(ap, uint16_vap); TIFFSetFieldBit(tif, FIELD_PREDICTOR); break; default: return (*sp->vsetparent)(tif, tag, ap); } tif->tif_flags |= TIFF_DIRTYDIRECT; return 1; } static int PredictorVGetField(TIFF* tif, uint32 tag, va_list ap) { TIFFPredictorState *sp = PredictorState(tif); assert(sp != NULL); assert(sp->vgetparent != NULL); switch (tag) { case TIFFTAG_PREDICTOR: *va_arg(ap, uint16*) = (uint16)sp->predictor; break; default: return (*sp->vgetparent)(tif, tag, ap); } return 1; } static void PredictorPrintDir(TIFF* tif, FILE* fd, long flags) { TIFFPredictorState* sp = PredictorState(tif); (void) flags; if (TIFFFieldSet(tif,FIELD_PREDICTOR)) { fprintf(fd, " Predictor: "); switch (sp->predictor) { case 1: fprintf(fd, "none "); break; case 2: fprintf(fd, "horizontal differencing "); break; case 3: fprintf(fd, "floating point predictor "); break; } fprintf(fd, "%u (0x%x)\n", sp->predictor, sp->predictor); } if (sp->printdir) (*sp->printdir)(tif, fd, flags); } int TIFFPredictorInit(TIFF* tif) { TIFFPredictorState* sp = PredictorState(tif); assert(sp != 0); /* * Merge codec-specific tag information. */ if (!_TIFFMergeFields(tif, predictFields, TIFFArrayCount(predictFields))) { TIFFErrorExt(tif->tif_clientdata, "TIFFPredictorInit", "Merging Predictor codec-specific tags failed"); return 0; } /* * Override parent get/set field methods. */ sp->vgetparent = tif->tif_tagmethods.vgetfield; tif->tif_tagmethods.vgetfield = PredictorVGetField;/* hook for predictor tag */ sp->vsetparent = tif->tif_tagmethods.vsetfield; tif->tif_tagmethods.vsetfield = PredictorVSetField;/* hook for predictor tag */ sp->printdir = tif->tif_tagmethods.printdir; tif->tif_tagmethods.printdir = PredictorPrintDir; /* hook for predictor tag */ sp->setupdecode = tif->tif_setupdecode; tif->tif_setupdecode = PredictorSetupDecode; sp->setupencode = tif->tif_setupencode; tif->tif_setupencode = PredictorSetupEncode; sp->predictor = 1; /* default value */ sp->encodepfunc = NULL; /* no predictor routine */ sp->decodepfunc = NULL; /* no predictor routine */ return 1; } int TIFFPredictorCleanup(TIFF* tif) { TIFFPredictorState* sp = PredictorState(tif); assert(sp != 0); tif->tif_tagmethods.vgetfield = sp->vgetparent; tif->tif_tagmethods.vsetfield = sp->vsetparent; tif->tif_tagmethods.printdir = sp->printdir; tif->tif_setupdecode = sp->setupdecode; tif->tif_setupencode = sp->setupencode; return 1; } /* vim: set ts=8 sts=8 sw=8 noet: */ /* * Local Variables: * mode: c * c-basic-offset: 8 * fill-column: 78 * End: */
/* $Id$ */ /* * Copyright (c) 1988-1997 Sam Leffler * Copyright (c) 1991-1997 Silicon Graphics, Inc. * * Permission to use, copy, modify, distribute, and sell this software and * its documentation for any purpose is hereby granted without fee, provided * that (i) the above copyright notices and this permission notice appear in * all copies of the software and related documentation, and (ii) the names of * Sam Leffler and Silicon Graphics may not be used in any advertising or * publicity relating to the software without the specific, prior written * permission of Sam Leffler and Silicon Graphics. * * THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND, * EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. * * IN NO EVENT SHALL SAM LEFFLER OR SILICON GRAPHICS BE LIABLE FOR * ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, * OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF * LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ /* * TIFF Library. * * Predictor Tag Support (used by multiple codecs). */ #include "tiffiop.h" #include "tif_predict.h" #define PredictorState(tif) ((TIFFPredictorState*) (tif)->tif_data) static int horAcc8(TIFF* tif, uint8* cp0, tmsize_t cc); static int horAcc16(TIFF* tif, uint8* cp0, tmsize_t cc); static int horAcc32(TIFF* tif, uint8* cp0, tmsize_t cc); static int swabHorAcc16(TIFF* tif, uint8* cp0, tmsize_t cc); static int swabHorAcc32(TIFF* tif, uint8* cp0, tmsize_t cc); static int horDiff8(TIFF* tif, uint8* cp0, tmsize_t cc); static int horDiff16(TIFF* tif, uint8* cp0, tmsize_t cc); static int horDiff32(TIFF* tif, uint8* cp0, tmsize_t cc); static int swabHorDiff16(TIFF* tif, uint8* cp0, tmsize_t cc); static int swabHorDiff32(TIFF* tif, uint8* cp0, tmsize_t cc); static int fpAcc(TIFF* tif, uint8* cp0, tmsize_t cc); static int fpDiff(TIFF* tif, uint8* cp0, tmsize_t cc); static int PredictorDecodeRow(TIFF* tif, uint8* op0, tmsize_t occ0, uint16 s); static int PredictorDecodeTile(TIFF* tif, uint8* op0, tmsize_t occ0, uint16 s); static int PredictorEncodeRow(TIFF* tif, uint8* bp, tmsize_t cc, uint16 s); static int PredictorEncodeTile(TIFF* tif, uint8* bp0, tmsize_t cc0, uint16 s); static int PredictorSetup(TIFF* tif) { static const char module[] = "PredictorSetup"; TIFFPredictorState* sp = PredictorState(tif); TIFFDirectory* td = &tif->tif_dir; switch (sp->predictor) /* no differencing */ { case PREDICTOR_NONE: return 1; case PREDICTOR_HORIZONTAL: if (td->td_bitspersample != 8 && td->td_bitspersample != 16 && td->td_bitspersample != 32) { TIFFErrorExt(tif->tif_clientdata, module, "Horizontal differencing \"Predictor\" not supported with %d-bit samples", td->td_bitspersample); return 0; } break; case PREDICTOR_FLOATINGPOINT: if (td->td_sampleformat != SAMPLEFORMAT_IEEEFP) { TIFFErrorExt(tif->tif_clientdata, module, "Floating point \"Predictor\" not supported with %d data format", td->td_sampleformat); return 0; } if (td->td_bitspersample != 16 && td->td_bitspersample != 24 && td->td_bitspersample != 32 && td->td_bitspersample != 64) { /* Should 64 be allowed? */ TIFFErrorExt(tif->tif_clientdata, module, "Floating point \"Predictor\" not supported with %d-bit samples", td->td_bitspersample); return 0; } break; default: TIFFErrorExt(tif->tif_clientdata, module, "\"Predictor\" value %d not supported", sp->predictor); return 0; } sp->stride = (td->td_planarconfig == PLANARCONFIG_CONTIG ? td->td_samplesperpixel : 1); /* * Calculate the scanline/tile-width size in bytes. */ if (isTiled(tif)) sp->rowsize = TIFFTileRowSize(tif); else sp->rowsize = TIFFScanlineSize(tif); if (sp->rowsize == 0) return 0; return 1; } static int PredictorSetupDecode(TIFF* tif) { TIFFPredictorState* sp = PredictorState(tif); TIFFDirectory* td = &tif->tif_dir; if (!(*sp->setupdecode)(tif) || !PredictorSetup(tif)) return 0; if (sp->predictor == 2) { switch (td->td_bitspersample) { case 8: sp->decodepfunc = horAcc8; break; case 16: sp->decodepfunc = horAcc16; break; case 32: sp->decodepfunc = horAcc32; break; } /* * Override default decoding method with one that does the * predictor stuff. */ if( tif->tif_decoderow != PredictorDecodeRow ) { sp->decoderow = tif->tif_decoderow; tif->tif_decoderow = PredictorDecodeRow; sp->decodestrip = tif->tif_decodestrip; tif->tif_decodestrip = PredictorDecodeTile; sp->decodetile = tif->tif_decodetile; tif->tif_decodetile = PredictorDecodeTile; } /* * If the data is horizontally differenced 16-bit data that * requires byte-swapping, then it must be byte swapped before * the accumulation step. We do this with a special-purpose * routine and override the normal post decoding logic that * the library setup when the directory was read. */ if (tif->tif_flags & TIFF_SWAB) { if (sp->decodepfunc == horAcc16) { sp->decodepfunc = swabHorAcc16; tif->tif_postdecode = _TIFFNoPostDecode; } else if (sp->decodepfunc == horAcc32) { sp->decodepfunc = swabHorAcc32; tif->tif_postdecode = _TIFFNoPostDecode; } } } else if (sp->predictor == 3) { sp->decodepfunc = fpAcc; /* * Override default decoding method with one that does the * predictor stuff. */ if( tif->tif_decoderow != PredictorDecodeRow ) { sp->decoderow = tif->tif_decoderow; tif->tif_decoderow = PredictorDecodeRow; sp->decodestrip = tif->tif_decodestrip; tif->tif_decodestrip = PredictorDecodeTile; sp->decodetile = tif->tif_decodetile; tif->tif_decodetile = PredictorDecodeTile; } /* * The data should not be swapped outside of the floating * point predictor, the accumulation routine should return * byres in the native order. */ if (tif->tif_flags & TIFF_SWAB) { tif->tif_postdecode = _TIFFNoPostDecode; } /* * Allocate buffer to keep the decoded bytes before * rearranging in the right order */ } return 1; } static int PredictorSetupEncode(TIFF* tif) { TIFFPredictorState* sp = PredictorState(tif); TIFFDirectory* td = &tif->tif_dir; if (!(*sp->setupencode)(tif) || !PredictorSetup(tif)) return 0; if (sp->predictor == 2) { switch (td->td_bitspersample) { case 8: sp->encodepfunc = horDiff8; break; case 16: sp->encodepfunc = horDiff16; break; case 32: sp->encodepfunc = horDiff32; break; } /* * Override default encoding method with one that does the * predictor stuff. */ if( tif->tif_encoderow != PredictorEncodeRow ) { sp->encoderow = tif->tif_encoderow; tif->tif_encoderow = PredictorEncodeRow; sp->encodestrip = tif->tif_encodestrip; tif->tif_encodestrip = PredictorEncodeTile; sp->encodetile = tif->tif_encodetile; tif->tif_encodetile = PredictorEncodeTile; } /* * If the data is horizontally differenced 16-bit data that * requires byte-swapping, then it must be byte swapped after * the differentiation step. We do this with a special-purpose * routine and override the normal post decoding logic that * the library setup when the directory was read. */ if (tif->tif_flags & TIFF_SWAB) { if (sp->encodepfunc == horDiff16) { sp->encodepfunc = swabHorDiff16; tif->tif_postdecode = _TIFFNoPostDecode; } else if (sp->encodepfunc == horDiff32) { sp->encodepfunc = swabHorDiff32; tif->tif_postdecode = _TIFFNoPostDecode; } } } else if (sp->predictor == 3) { sp->encodepfunc = fpDiff; /* * Override default encoding method with one that does the * predictor stuff. */ if( tif->tif_encoderow != PredictorEncodeRow ) { sp->encoderow = tif->tif_encoderow; tif->tif_encoderow = PredictorEncodeRow; sp->encodestrip = tif->tif_encodestrip; tif->tif_encodestrip = PredictorEncodeTile; sp->encodetile = tif->tif_encodetile; tif->tif_encodetile = PredictorEncodeTile; } } return 1; } #define REPEAT4(n, op) \ switch (n) { \ default: { tmsize_t i; for (i = n-4; i > 0; i--) { op; } } \ case 4: op; \ case 3: op; \ case 2: op; \ case 1: op; \ case 0: ; \ } /* Remarks related to C standard compliance in all below functions : */ /* - to avoid any undefined behaviour, we only operate on unsigned types */ /* since the behaviour of "overflows" is defined (wrap over) */ /* - when storing into the byte stream, we explicitly mask with 0xff so */ /* as to make icc -check=conversions happy (not necessary by the standard) */ static int horAcc8(TIFF* tif, uint8* cp0, tmsize_t cc) { tmsize_t stride = PredictorState(tif)->stride; unsigned char* cp = (unsigned char*) cp0; if((cc%stride)!=0) { TIFFErrorExt(tif->tif_clientdata, "horAcc8", "%s", "(cc%stride)!=0"); return 0; } if (cc > stride) { /* * Pipeline the most common cases. */ if (stride == 3) { unsigned int cr = cp[0]; unsigned int cg = cp[1]; unsigned int cb = cp[2]; cc -= 3; cp += 3; while (cc>0) { cp[0] = (unsigned char) ((cr += cp[0]) & 0xff); cp[1] = (unsigned char) ((cg += cp[1]) & 0xff); cp[2] = (unsigned char) ((cb += cp[2]) & 0xff); cc -= 3; cp += 3; } } else if (stride == 4) { unsigned int cr = cp[0]; unsigned int cg = cp[1]; unsigned int cb = cp[2]; unsigned int ca = cp[3]; cc -= 4; cp += 4; while (cc>0) { cp[0] = (unsigned char) ((cr += cp[0]) & 0xff); cp[1] = (unsigned char) ((cg += cp[1]) & 0xff); cp[2] = (unsigned char) ((cb += cp[2]) & 0xff); cp[3] = (unsigned char) ((ca += cp[3]) & 0xff); cc -= 4; cp += 4; } } else { cc -= stride; do { REPEAT4(stride, cp[stride] = (unsigned char) ((cp[stride] + *cp) & 0xff); cp++) cc -= stride; } while (cc>0); } } return 1; } static int swabHorAcc16(TIFF* tif, uint8* cp0, tmsize_t cc) { uint16* wp = (uint16*) cp0; tmsize_t wc = cc / 2; TIFFSwabArrayOfShort(wp, wc); return horAcc16(tif, cp0, cc); } static int horAcc16(TIFF* tif, uint8* cp0, tmsize_t cc) { tmsize_t stride = PredictorState(tif)->stride; uint16* wp = (uint16*) cp0; tmsize_t wc = cc / 2; if((cc%(2*stride))!=0) { TIFFErrorExt(tif->tif_clientdata, "horAcc16", "%s", "cc%(2*stride))!=0"); return 0; } if (wc > stride) { wc -= stride; do { REPEAT4(stride, wp[stride] = (uint16)(((unsigned int)wp[stride] + (unsigned int)wp[0]) & 0xffff); wp++) wc -= stride; } while (wc > 0); } return 1; } static int swabHorAcc32(TIFF* tif, uint8* cp0, tmsize_t cc) { uint32* wp = (uint32*) cp0; tmsize_t wc = cc / 4; TIFFSwabArrayOfLong(wp, wc); return horAcc32(tif, cp0, cc); } static int horAcc32(TIFF* tif, uint8* cp0, tmsize_t cc) { tmsize_t stride = PredictorState(tif)->stride; uint32* wp = (uint32*) cp0; tmsize_t wc = cc / 4; if((cc%(4*stride))!=0) { TIFFErrorExt(tif->tif_clientdata, "horAcc32", "%s", "cc%(4*stride))!=0"); return 0; } if (wc > stride) { wc -= stride; do { REPEAT4(stride, wp[stride] += wp[0]; wp++) wc -= stride; } while (wc > 0); } return 1; } /* * Floating point predictor accumulation routine. */ static int fpAcc(TIFF* tif, uint8* cp0, tmsize_t cc) { tmsize_t stride = PredictorState(tif)->stride; uint32 bps = tif->tif_dir.td_bitspersample / 8; tmsize_t wc = cc / bps; tmsize_t count = cc; uint8 *cp = (uint8 *) cp0; uint8 *tmp = (uint8 *)_TIFFmalloc(cc); if(cc%(bps*stride)!=0) { TIFFErrorExt(tif->tif_clientdata, "fpAcc", "%s", "cc%(bps*stride))!=0"); return 0; } if (!tmp) return 0; while (count > stride) { REPEAT4(stride, cp[stride] = (unsigned char) ((cp[stride] + cp[0]) & 0xff); cp++) count -= stride; } _TIFFmemcpy(tmp, cp0, cc); cp = (uint8 *) cp0; for (count = 0; count < wc; count++) { uint32 byte; for (byte = 0; byte < bps; byte++) { #if WORDS_BIGENDIAN cp[bps * count + byte] = tmp[byte * wc + count]; #else cp[bps * count + byte] = tmp[(bps - byte - 1) * wc + count]; #endif } } _TIFFfree(tmp); return 1; } /* * Decode a scanline and apply the predictor routine. */ static int PredictorDecodeRow(TIFF* tif, uint8* op0, tmsize_t occ0, uint16 s) { TIFFPredictorState *sp = PredictorState(tif); assert(sp != NULL); assert(sp->decoderow != NULL); assert(sp->decodepfunc != NULL); if ((*sp->decoderow)(tif, op0, occ0, s)) { return (*sp->decodepfunc)(tif, op0, occ0); } else return 0; } /* * Decode a tile/strip and apply the predictor routine. * Note that horizontal differencing must be done on a * row-by-row basis. The width of a "row" has already * been calculated at pre-decode time according to the * strip/tile dimensions. */ static int PredictorDecodeTile(TIFF* tif, uint8* op0, tmsize_t occ0, uint16 s) { TIFFPredictorState *sp = PredictorState(tif); assert(sp != NULL); assert(sp->decodetile != NULL); if ((*sp->decodetile)(tif, op0, occ0, s)) { tmsize_t rowsize = sp->rowsize; assert(rowsize > 0); if((occ0%rowsize) !=0) { TIFFErrorExt(tif->tif_clientdata, "PredictorDecodeTile", "%s", "occ0%rowsize != 0"); return 0; } assert(sp->decodepfunc != NULL); while (occ0 > 0) { if( !(*sp->decodepfunc)(tif, op0, rowsize) ) return 0; occ0 -= rowsize; op0 += rowsize; } return 1; } else return 0; } static int horDiff8(TIFF* tif, uint8* cp0, tmsize_t cc) { TIFFPredictorState* sp = PredictorState(tif); tmsize_t stride = sp->stride; unsigned char* cp = (unsigned char*) cp0; if((cc%stride)!=0) { TIFFErrorExt(tif->tif_clientdata, "horDiff8", "%s", "(cc%stride)!=0"); return 0; } if (cc > stride) { cc -= stride; /* * Pipeline the most common cases. */ if (stride == 3) { unsigned int r1, g1, b1; unsigned int r2 = cp[0]; unsigned int g2 = cp[1]; unsigned int b2 = cp[2]; do { r1 = cp[3]; cp[3] = (unsigned char)((r1-r2)&0xff); r2 = r1; g1 = cp[4]; cp[4] = (unsigned char)((g1-g2)&0xff); g2 = g1; b1 = cp[5]; cp[5] = (unsigned char)((b1-b2)&0xff); b2 = b1; cp += 3; } while ((cc -= 3) > 0); } else if (stride == 4) { unsigned int r1, g1, b1, a1; unsigned int r2 = cp[0]; unsigned int g2 = cp[1]; unsigned int b2 = cp[2]; unsigned int a2 = cp[3]; do { r1 = cp[4]; cp[4] = (unsigned char)((r1-r2)&0xff); r2 = r1; g1 = cp[5]; cp[5] = (unsigned char)((g1-g2)&0xff); g2 = g1; b1 = cp[6]; cp[6] = (unsigned char)((b1-b2)&0xff); b2 = b1; a1 = cp[7]; cp[7] = (unsigned char)((a1-a2)&0xff); a2 = a1; cp += 4; } while ((cc -= 4) > 0); } else { cp += cc - 1; do { REPEAT4(stride, cp[stride] = (unsigned char)((cp[stride] - cp[0])&0xff); cp--) } while ((cc -= stride) > 0); } } return 1; } static int horDiff16(TIFF* tif, uint8* cp0, tmsize_t cc) { TIFFPredictorState* sp = PredictorState(tif); tmsize_t stride = sp->stride; uint16 *wp = (uint16*) cp0; tmsize_t wc = cc/2; if((cc%(2*stride))!=0) { TIFFErrorExt(tif->tif_clientdata, "horDiff8", "%s", "(cc%(2*stride))!=0"); return 0; } if (wc > stride) { wc -= stride; wp += wc - 1; do { REPEAT4(stride, wp[stride] = (uint16)(((unsigned int)wp[stride] - (unsigned int)wp[0]) & 0xffff); wp--) wc -= stride; } while (wc > 0); } return 1; } static int swabHorDiff16(TIFF* tif, uint8* cp0, tmsize_t cc) { uint16* wp = (uint16*) cp0; tmsize_t wc = cc / 2; if( !horDiff16(tif, cp0, cc) ) return 0; TIFFSwabArrayOfShort(wp, wc); return 1; } static int horDiff32(TIFF* tif, uint8* cp0, tmsize_t cc) { TIFFPredictorState* sp = PredictorState(tif); tmsize_t stride = sp->stride; uint32 *wp = (uint32*) cp0; tmsize_t wc = cc/4; if((cc%(4*stride))!=0) { TIFFErrorExt(tif->tif_clientdata, "horDiff32", "%s", "(cc%(4*stride))!=0"); return 0; } if (wc > stride) { wc -= stride; wp += wc - 1; do { REPEAT4(stride, wp[stride] -= wp[0]; wp--) wc -= stride; } while (wc > 0); } return 1; } static int swabHorDiff32(TIFF* tif, uint8* cp0, tmsize_t cc) { uint32* wp = (uint32*) cp0; tmsize_t wc = cc / 4; if( !horDiff32(tif, cp0, cc) ) return 0; TIFFSwabArrayOfLong(wp, wc); return 1; } /* * Floating point predictor differencing routine. */ static int fpDiff(TIFF* tif, uint8* cp0, tmsize_t cc) { tmsize_t stride = PredictorState(tif)->stride; uint32 bps = tif->tif_dir.td_bitspersample / 8; tmsize_t wc = cc / bps; tmsize_t count; uint8 *cp = (uint8 *) cp0; uint8 *tmp = (uint8 *)_TIFFmalloc(cc); if((cc%(bps*stride))!=0) { TIFFErrorExt(tif->tif_clientdata, "fpDiff", "%s", "(cc%(bps*stride))!=0"); return 0; } if (!tmp) return 0; _TIFFmemcpy(tmp, cp0, cc); for (count = 0; count < wc; count++) { uint32 byte; for (byte = 0; byte < bps; byte++) { #if WORDS_BIGENDIAN cp[byte * wc + count] = tmp[bps * count + byte]; #else cp[(bps - byte - 1) * wc + count] = tmp[bps * count + byte]; #endif } } _TIFFfree(tmp); cp = (uint8 *) cp0; cp += cc - stride - 1; for (count = cc; count > stride; count -= stride) REPEAT4(stride, cp[stride] = (unsigned char)((cp[stride] - cp[0])&0xff); cp--) return 1; } static int PredictorEncodeRow(TIFF* tif, uint8* bp, tmsize_t cc, uint16 s) { TIFFPredictorState *sp = PredictorState(tif); assert(sp != NULL); assert(sp->encodepfunc != NULL); assert(sp->encoderow != NULL); /* XXX horizontal differencing alters user's data XXX */ if( !(*sp->encodepfunc)(tif, bp, cc) ) return 0; return (*sp->encoderow)(tif, bp, cc, s); } static int PredictorEncodeTile(TIFF* tif, uint8* bp0, tmsize_t cc0, uint16 s) { static const char module[] = "PredictorEncodeTile"; TIFFPredictorState *sp = PredictorState(tif); uint8 *working_copy; tmsize_t cc = cc0, rowsize; unsigned char* bp; int result_code; assert(sp != NULL); assert(sp->encodepfunc != NULL); assert(sp->encodetile != NULL); /* * Do predictor manipulation in a working buffer to avoid altering * the callers buffer. http://trac.osgeo.org/gdal/ticket/1965 */ working_copy = (uint8*) _TIFFmalloc(cc0); if( working_copy == NULL ) { TIFFErrorExt(tif->tif_clientdata, module, "Out of memory allocating " TIFF_SSIZE_FORMAT " byte temp buffer.", cc0 ); return 0; } memcpy( working_copy, bp0, cc0 ); bp = working_copy; rowsize = sp->rowsize; assert(rowsize > 0); if((cc0%rowsize)!=0) { TIFFErrorExt(tif->tif_clientdata, "PredictorEncodeTile", "%s", "(cc0%rowsize)!=0"); return 0; } while (cc > 0) { (*sp->encodepfunc)(tif, bp, rowsize); cc -= rowsize; bp += rowsize; } result_code = (*sp->encodetile)(tif, working_copy, cc0, s); _TIFFfree( working_copy ); return result_code; } #define FIELD_PREDICTOR (FIELD_CODEC+0) /* XXX */ static const TIFFField predictFields[] = { { TIFFTAG_PREDICTOR, 1, 1, TIFF_SHORT, 0, TIFF_SETGET_UINT16, TIFF_SETGET_UINT16, FIELD_PREDICTOR, FALSE, FALSE, "Predictor", NULL }, }; static int PredictorVSetField(TIFF* tif, uint32 tag, va_list ap) { TIFFPredictorState *sp = PredictorState(tif); assert(sp != NULL); assert(sp->vsetparent != NULL); switch (tag) { case TIFFTAG_PREDICTOR: sp->predictor = (uint16) va_arg(ap, uint16_vap); TIFFSetFieldBit(tif, FIELD_PREDICTOR); break; default: return (*sp->vsetparent)(tif, tag, ap); } tif->tif_flags |= TIFF_DIRTYDIRECT; return 1; } static int PredictorVGetField(TIFF* tif, uint32 tag, va_list ap) { TIFFPredictorState *sp = PredictorState(tif); assert(sp != NULL); assert(sp->vgetparent != NULL); switch (tag) { case TIFFTAG_PREDICTOR: *va_arg(ap, uint16*) = (uint16)sp->predictor; break; default: return (*sp->vgetparent)(tif, tag, ap); } return 1; } static void PredictorPrintDir(TIFF* tif, FILE* fd, long flags) { TIFFPredictorState* sp = PredictorState(tif); (void) flags; if (TIFFFieldSet(tif,FIELD_PREDICTOR)) { fprintf(fd, " Predictor: "); switch (sp->predictor) { case 1: fprintf(fd, "none "); break; case 2: fprintf(fd, "horizontal differencing "); break; case 3: fprintf(fd, "floating point predictor "); break; } fprintf(fd, "%u (0x%x)\n", sp->predictor, sp->predictor); } if (sp->printdir) (*sp->printdir)(tif, fd, flags); } int TIFFPredictorInit(TIFF* tif) { TIFFPredictorState* sp = PredictorState(tif); assert(sp != 0); /* * Merge codec-specific tag information. */ if (!_TIFFMergeFields(tif, predictFields, TIFFArrayCount(predictFields))) { TIFFErrorExt(tif->tif_clientdata, "TIFFPredictorInit", "Merging Predictor codec-specific tags failed"); return 0; } /* * Override parent get/set field methods. */ sp->vgetparent = tif->tif_tagmethods.vgetfield; tif->tif_tagmethods.vgetfield = PredictorVGetField;/* hook for predictor tag */ sp->vsetparent = tif->tif_tagmethods.vsetfield; tif->tif_tagmethods.vsetfield = PredictorVSetField;/* hook for predictor tag */ sp->printdir = tif->tif_tagmethods.printdir; tif->tif_tagmethods.printdir = PredictorPrintDir; /* hook for predictor tag */ sp->setupdecode = tif->tif_setupdecode; tif->tif_setupdecode = PredictorSetupDecode; sp->setupencode = tif->tif_setupencode; tif->tif_setupencode = PredictorSetupEncode; sp->predictor = 1; /* default value */ sp->encodepfunc = NULL; /* no predictor routine */ sp->decodepfunc = NULL; /* no predictor routine */ return 1; } int TIFFPredictorCleanup(TIFF* tif) { TIFFPredictorState* sp = PredictorState(tif); assert(sp != 0); tif->tif_tagmethods.vgetfield = sp->vgetparent; tif->tif_tagmethods.vsetfield = sp->vsetparent; tif->tif_tagmethods.printdir = sp->printdir; tif->tif_setupdecode = sp->setupdecode; tif->tif_setupencode = sp->setupencode; return 1; } /* vim: set ts=8 sts=8 sw=8 noet: */ /* * Local Variables: * mode: c * c-basic-offset: 8 * fill-column: 78 * End: */
horAcc32(TIFF* tif, uint8* cp0, tmsize_t cc) { tmsize_t stride = PredictorState(tif)->stride; uint32* wp = (uint32*) cp0; tmsize_t wc = cc / 4; assert((cc%(4*stride))==0); if (wc > stride) { wc -= stride; do { REPEAT4(stride, wp[stride] += wp[0]; wp++) wc -= stride; } while (wc > 0); } }
horAcc32(TIFF* tif, uint8* cp0, tmsize_t cc) { tmsize_t stride = PredictorState(tif)->stride; uint32* wp = (uint32*) cp0; tmsize_t wc = cc / 4; if((cc%(4*stride))!=0) { TIFFErrorExt(tif->tif_clientdata, "horAcc32", "%s", "cc%(4*stride))!=0"); return 0; } if (wc > stride) { wc -= stride; do { REPEAT4(stride, wp[stride] += wp[0]; wp++) wc -= stride; } while (wc > 0); } return 1; }
{'added': [(37, 'static int horAcc8(TIFF* tif, uint8* cp0, tmsize_t cc);'), (38, 'static int horAcc16(TIFF* tif, uint8* cp0, tmsize_t cc);'), (39, 'static int horAcc32(TIFF* tif, uint8* cp0, tmsize_t cc);'), (40, 'static int swabHorAcc16(TIFF* tif, uint8* cp0, tmsize_t cc);'), (41, 'static int swabHorAcc32(TIFF* tif, uint8* cp0, tmsize_t cc);'), (42, 'static int horDiff8(TIFF* tif, uint8* cp0, tmsize_t cc);'), (43, 'static int horDiff16(TIFF* tif, uint8* cp0, tmsize_t cc);'), (44, 'static int horDiff32(TIFF* tif, uint8* cp0, tmsize_t cc);'), (45, 'static int swabHorDiff16(TIFF* tif, uint8* cp0, tmsize_t cc);'), (46, 'static int swabHorDiff32(TIFF* tif, uint8* cp0, tmsize_t cc);'), (47, 'static int fpAcc(TIFF* tif, uint8* cp0, tmsize_t cc);'), (48, 'static int fpDiff(TIFF* tif, uint8* cp0, tmsize_t cc);'), (276, 'static int'), (282, ' if((cc%stride)!=0)'), (283, ' {'), (284, ' TIFFErrorExt(tif->tif_clientdata, "horAcc8",'), (285, ' "%s", "(cc%stride)!=0");'), (286, ' return 0;'), (287, ' }'), (288, ''), (330, '\treturn 1;'), (333, 'static int'), (340, ' return horAcc16(tif, cp0, cc);'), (343, 'static int'), (350, ' if((cc%(2*stride))!=0)'), (351, ' {'), (352, ' TIFFErrorExt(tif->tif_clientdata, "horAcc16",'), (353, ' "%s", "cc%(2*stride))!=0");'), (354, ' return 0;'), (355, ' }'), (364, '\treturn 1;'), (367, 'static int'), (374, '\treturn horAcc32(tif, cp0, cc);'), (377, 'static int'), (384, ' if((cc%(4*stride))!=0)'), (385, ' {'), (386, ' TIFFErrorExt(tif->tif_clientdata, "horAcc32",'), (387, ' "%s", "cc%(4*stride))!=0");'), (388, ' return 0;'), (389, ' }'), (398, '\treturn 1;'), (404, 'static int'), (414, ' if(cc%(bps*stride)!=0)'), (415, ' {'), (416, ' TIFFErrorExt(tif->tif_clientdata, "fpAcc",'), (417, ' "%s", "cc%(bps*stride))!=0");'), (418, ' return 0;'), (419, ' }'), (422, '\t\treturn 0;'), (444, ' return 1;'), (460, '\t\treturn (*sp->decodepfunc)(tif, op0, occ0);'), (483, '\t\tif((occ0%rowsize) !=0)'), (484, ' {'), (485, ' TIFFErrorExt(tif->tif_clientdata, "PredictorDecodeTile",'), (486, ' "%s", "occ0%rowsize != 0");'), (487, ' return 0;'), (488, ' }'), (491, '\t\t\tif( !(*sp->decodepfunc)(tif, op0, rowsize) )'), (492, ' return 0;'), (501, 'static int'), (508, ' if((cc%stride)!=0)'), (509, ' {'), (510, ' TIFFErrorExt(tif->tif_clientdata, "horDiff8",'), (511, ' "%s", "(cc%stride)!=0");'), (512, ' return 0;'), (513, ' }'), (551, '\treturn 1;'), (554, 'static int'), (562, ' if((cc%(2*stride))!=0)'), (563, ' {'), (564, ' TIFFErrorExt(tif->tif_clientdata, "horDiff8",'), (565, ' "%s", "(cc%(2*stride))!=0");'), (566, ' return 0;'), (567, ' }'), (577, '\treturn 1;'), (580, 'static int'), (586, ' if( !horDiff16(tif, cp0, cc) )'), (587, ' return 0;'), (590, ' return 1;'), (593, 'static int'), (601, ' if((cc%(4*stride))!=0)'), (602, ' {'), (603, ' TIFFErrorExt(tif->tif_clientdata, "horDiff32",'), (604, ' "%s", "(cc%(4*stride))!=0");'), (605, ' return 0;'), (606, ' }'), (616, '\treturn 1;'), (619, 'static int'), (625, ' if( !horDiff32(tif, cp0, cc) )'), (626, ' return 0;'), (629, ' return 1;'), (635, 'static int'), (645, ' if((cc%(bps*stride))!=0)'), (646, ' {'), (647, ' TIFFErrorExt(tif->tif_clientdata, "fpDiff",'), (648, ' "%s", "(cc%(bps*stride))!=0");'), (649, ' return 0;'), (650, ' }'), (652, '\t\treturn 0;'), (672, ' return 1;'), (685, '\tif( !(*sp->encodepfunc)(tif, bp, cc) )'), (686, ' return 0;'), (721, '\tif((cc0%rowsize)!=0)'), (722, ' {'), (723, ' TIFFErrorExt(tif->tif_clientdata, "PredictorEncodeTile",'), (724, ' "%s", "(cc0%rowsize)!=0");'), (725, ' return 0;'), (726, ' }')], 'deleted': [(37, 'static void horAcc8(TIFF* tif, uint8* cp0, tmsize_t cc);'), (38, 'static void horAcc16(TIFF* tif, uint8* cp0, tmsize_t cc);'), (39, 'static void horAcc32(TIFF* tif, uint8* cp0, tmsize_t cc);'), (40, 'static void swabHorAcc16(TIFF* tif, uint8* cp0, tmsize_t cc);'), (41, 'static void swabHorAcc32(TIFF* tif, uint8* cp0, tmsize_t cc);'), (42, 'static void horDiff8(TIFF* tif, uint8* cp0, tmsize_t cc);'), (43, 'static void horDiff16(TIFF* tif, uint8* cp0, tmsize_t cc);'), (44, 'static void horDiff32(TIFF* tif, uint8* cp0, tmsize_t cc);'), (45, 'static void swabHorDiff16(TIFF* tif, uint8* cp0, tmsize_t cc);'), (46, 'static void swabHorDiff32(TIFF* tif, uint8* cp0, tmsize_t cc);'), (47, 'static void fpAcc(TIFF* tif, uint8* cp0, tmsize_t cc);'), (48, 'static void fpDiff(TIFF* tif, uint8* cp0, tmsize_t cc);'), (276, 'static void'), (282, '\tassert((cc%stride)==0);'), (326, 'static void'), (333, ' horAcc16(tif, cp0, cc);'), (336, 'static void'), (343, '\tassert((cc%(2*stride))==0);'), (354, 'static void'), (361, '\thorAcc32(tif, cp0, cc);'), (364, 'static void'), (371, '\tassert((cc%(4*stride))==0);'), (385, 'static void'), (395, '\tassert((cc%(bps*stride))==0);'), (398, '\t\treturn;'), (435, '\t\t(*sp->decodepfunc)(tif, op0, occ0);'), (436, '\t\treturn 1;'), (459, '\t\tassert((occ0%rowsize)==0);'), (462, '\t\t\t(*sp->decodepfunc)(tif, op0, rowsize);'), (471, 'static void'), (478, '\tassert((cc%stride)==0);'), (518, 'static void'), (526, '\tassert((cc%(2*stride))==0);'), (538, 'static void'), (544, ' horDiff16(tif, cp0, cc);'), (549, 'static void'), (557, '\tassert((cc%(4*stride))==0);'), (569, 'static void'), (575, ' horDiff32(tif, cp0, cc);'), (583, 'static void'), (593, '\tassert((cc%(bps*stride))==0);'), (594, ''), (596, '\t\treturn;'), (628, '\t(*sp->encodepfunc)(tif, bp, cc);'), (663, '\tassert((cc0%rowsize)==0);')]}
108
45
637
4,451
14
100
3
https://github.com/vadz/libtiff
CVE-2016-9535
CWE-119
2,976
jas_image.c
C++
decode_twos_comp
/* * Copyright (c) 1999-2000 Image Power, Inc. and the University of * British Columbia. * Copyright (c) 2001-2003 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /* * Image Library * * $Id$ */ /******************************************************************************\ * Includes. \******************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <assert.h> #include <ctype.h> #include <inttypes.h> #include <stdbool.h> #include <limits.h> #include "jasper/jas_math.h" #include "jasper/jas_image.h" #include "jasper/jas_malloc.h" #include "jasper/jas_string.h" #include "jasper/jas_debug.h" /******************************************************************************\ * Types. \******************************************************************************/ #define FLOORDIV(x, y) ((x) / (y)) /******************************************************************************\ * Local prototypes. \******************************************************************************/ static jas_image_cmpt_t *jas_image_cmpt_create0(void); static void jas_image_cmpt_destroy(jas_image_cmpt_t *cmpt); static jas_image_cmpt_t *jas_image_cmpt_create(int_fast32_t tlx, int_fast32_t tly, int_fast32_t hstep, int_fast32_t vstep, int_fast32_t width, int_fast32_t height, uint_fast16_t depth, bool sgnd, uint_fast32_t inmem); static void jas_image_setbbox(jas_image_t *image); static jas_image_cmpt_t *jas_image_cmpt_copy(jas_image_cmpt_t *cmpt); static int jas_image_growcmpts(jas_image_t *image, int maxcmpts); static uint_fast32_t inttobits(jas_seqent_t v, int prec, bool sgnd); static jas_seqent_t bitstoint(uint_fast32_t v, int prec, bool sgnd); static int putint(jas_stream_t *out, int sgnd, int prec, long val); static int getint(jas_stream_t *in, int sgnd, int prec, long *val); static void jas_image_calcbbox2(jas_image_t *image, jas_image_coord_t *tlx, jas_image_coord_t *tly, jas_image_coord_t *brx, jas_image_coord_t *bry); static long uptomult(long x, long y); static long downtomult(long x, long y); static long convert(long val, int oldsgnd, int oldprec, int newsgnd, int newprec); static void jas_image_calcbbox2(jas_image_t *image, jas_image_coord_t *tlx, jas_image_coord_t *tly, jas_image_coord_t *brx, jas_image_coord_t *bry); /******************************************************************************\ * Global data. \******************************************************************************/ static int jas_image_numfmts = 0; static jas_image_fmtinfo_t jas_image_fmtinfos[JAS_IMAGE_MAXFMTS]; /******************************************************************************\ * Create and destroy operations. \******************************************************************************/ jas_image_t *jas_image_create(int numcmpts, jas_image_cmptparm_t *cmptparms, int clrspc) { jas_image_t *image; uint_fast32_t rawsize; uint_fast32_t inmem; int cmptno; jas_image_cmptparm_t *cmptparm; if (!(image = jas_image_create0())) { return 0; } image->clrspc_ = clrspc; image->maxcmpts_ = numcmpts; image->inmem_ = true; /* Allocate memory for the per-component information. */ if (!(image->cmpts_ = jas_alloc2(image->maxcmpts_, sizeof(jas_image_cmpt_t *)))) { jas_image_destroy(image); return 0; } /* Initialize in case of failure. */ for (cmptno = 0; cmptno < image->maxcmpts_; ++cmptno) { image->cmpts_[cmptno] = 0; } /* Compute the approximate raw size of the image. */ rawsize = 0; for (cmptno = 0, cmptparm = cmptparms; cmptno < numcmpts; ++cmptno, ++cmptparm) { rawsize += cmptparm->width * cmptparm->height * (cmptparm->prec + 7) / 8; } /* Decide whether to buffer the image data in memory, based on the raw size of the image. */ inmem = (rawsize < JAS_IMAGE_INMEMTHRESH); /* Create the individual image components. */ for (cmptno = 0, cmptparm = cmptparms; cmptno < numcmpts; ++cmptno, ++cmptparm) { if (!(image->cmpts_[cmptno] = jas_image_cmpt_create(cmptparm->tlx, cmptparm->tly, cmptparm->hstep, cmptparm->vstep, cmptparm->width, cmptparm->height, cmptparm->prec, cmptparm->sgnd, inmem))) { jas_image_destroy(image); return 0; } ++image->numcmpts_; } /* Determine the bounding box for all of the components on the reference grid (i.e., the image area) */ jas_image_setbbox(image); return image; } jas_image_t *jas_image_create0() { jas_image_t *image; if (!(image = jas_malloc(sizeof(jas_image_t)))) { return 0; } image->tlx_ = 0; image->tly_ = 0; image->brx_ = 0; image->bry_ = 0; image->clrspc_ = JAS_CLRSPC_UNKNOWN; image->numcmpts_ = 0; image->maxcmpts_ = 0; image->cmpts_ = 0; image->inmem_ = true; image->cmprof_ = 0; return image; } jas_image_t *jas_image_copy(jas_image_t *image) { jas_image_t *newimage; int cmptno; if (!(newimage = jas_image_create0())) { goto error; } if (jas_image_growcmpts(newimage, image->numcmpts_)) { goto error; } for (cmptno = 0; cmptno < image->numcmpts_; ++cmptno) { if (!(newimage->cmpts_[cmptno] = jas_image_cmpt_copy(image->cmpts_[cmptno]))) { goto error; } ++newimage->numcmpts_; } jas_image_setbbox(newimage); if (image->cmprof_) { if (!(newimage->cmprof_ = jas_cmprof_copy(image->cmprof_))) goto error; } return newimage; error: if (newimage) { jas_image_destroy(newimage); } return 0; } static jas_image_cmpt_t *jas_image_cmpt_create0() { jas_image_cmpt_t *cmpt; if (!(cmpt = jas_malloc(sizeof(jas_image_cmpt_t)))) { return 0; } memset(cmpt, 0, sizeof(jas_image_cmpt_t)); cmpt->type_ = JAS_IMAGE_CT_UNKNOWN; return cmpt; } static jas_image_cmpt_t *jas_image_cmpt_copy(jas_image_cmpt_t *cmpt) { jas_image_cmpt_t *newcmpt; if (!(newcmpt = jas_image_cmpt_create0())) { return 0; } newcmpt->tlx_ = cmpt->tlx_; newcmpt->tly_ = cmpt->tly_; newcmpt->hstep_ = cmpt->hstep_; newcmpt->vstep_ = cmpt->vstep_; newcmpt->width_ = cmpt->width_; newcmpt->height_ = cmpt->height_; newcmpt->prec_ = cmpt->prec_; newcmpt->sgnd_ = cmpt->sgnd_; newcmpt->cps_ = cmpt->cps_; newcmpt->type_ = cmpt->type_; if (!(newcmpt->stream_ = jas_stream_memopen(0, 0))) { goto error; } if (jas_stream_seek(cmpt->stream_, 0, SEEK_SET)) { goto error; } if (jas_stream_copy(newcmpt->stream_, cmpt->stream_, -1)) { goto error; } if (jas_stream_seek(newcmpt->stream_, 0, SEEK_SET)) { goto error; } return newcmpt; error: if (newcmpt) { jas_image_cmpt_destroy(newcmpt); } return 0; } void jas_image_destroy(jas_image_t *image) { int i; if (image->cmpts_) { for (i = 0; i < image->numcmpts_; ++i) { jas_image_cmpt_destroy(image->cmpts_[i]); image->cmpts_[i] = 0; } jas_free(image->cmpts_); } if (image->cmprof_) jas_cmprof_destroy(image->cmprof_); jas_free(image); } static jas_image_cmpt_t *jas_image_cmpt_create(int_fast32_t tlx, int_fast32_t tly, int_fast32_t hstep, int_fast32_t vstep, int_fast32_t width, int_fast32_t height, uint_fast16_t depth, bool sgnd, uint_fast32_t inmem) { jas_image_cmpt_t *cmpt; size_t size; cmpt = 0; if (width < 0 || height < 0 || hstep <= 0 || vstep <= 0) { goto error; } if (!jas_safe_intfast32_add(tlx, width, 0) || !jas_safe_intfast32_add(tly, height, 0)) { goto error; } if (!(cmpt = jas_malloc(sizeof(jas_image_cmpt_t)))) { goto error; } cmpt->type_ = JAS_IMAGE_CT_UNKNOWN; cmpt->tlx_ = tlx; cmpt->tly_ = tly; cmpt->hstep_ = hstep; cmpt->vstep_ = vstep; cmpt->width_ = width; cmpt->height_ = height; cmpt->prec_ = depth; cmpt->sgnd_ = sgnd; cmpt->stream_ = 0; cmpt->cps_ = (depth + 7) / 8; // Compute the number of samples in the image component, while protecting // against overflow. // size = cmpt->width_ * cmpt->height_ * cmpt->cps_; if (!jas_safe_size_mul(cmpt->width_, cmpt->height_, &size) || !jas_safe_size_mul(size, cmpt->cps_, &size)) { goto error; } cmpt->stream_ = (inmem) ? jas_stream_memopen2(0, size) : jas_stream_tmpfile(); if (!cmpt->stream_) { goto error; } /* Zero the component data. This isn't necessary, but it is convenient for debugging purposes. */ /* Note: conversion of size - 1 to long can overflow */ if (size > 0) { if (size - 1 > LONG_MAX) { goto error; } if (jas_stream_seek(cmpt->stream_, size - 1, SEEK_SET) < 0 || jas_stream_putc(cmpt->stream_, 0) == EOF || jas_stream_seek(cmpt->stream_, 0, SEEK_SET) < 0) { goto error; } } return cmpt; error: if (cmpt) { jas_image_cmpt_destroy(cmpt); } return 0; } static void jas_image_cmpt_destroy(jas_image_cmpt_t *cmpt) { if (cmpt->stream_) { jas_stream_close(cmpt->stream_); } jas_free(cmpt); } /******************************************************************************\ * Load and save operations. \******************************************************************************/ jas_image_t *jas_image_decode(jas_stream_t *in, int fmt, char *optstr) { jas_image_fmtinfo_t *fmtinfo; jas_image_t *image; image = 0; /* If possible, try to determine the format of the input data. */ if (fmt < 0) { if ((fmt = jas_image_getfmt(in)) < 0) goto error; } /* Is it possible to decode an image represented in this format? */ if (!(fmtinfo = jas_image_lookupfmtbyid(fmt))) goto error; if (!fmtinfo->ops.decode) goto error; /* Decode the image. */ if (!(image = (*fmtinfo->ops.decode)(in, optstr))) goto error; /* Create a color profile if needed. */ if (!jas_clrspc_isunknown(image->clrspc_) && !jas_clrspc_isgeneric(image->clrspc_) && !image->cmprof_) { if (!(image->cmprof_ = jas_cmprof_createfromclrspc(jas_image_clrspc(image)))) goto error; } return image; error: if (image) jas_image_destroy(image); return 0; } int jas_image_encode(jas_image_t *image, jas_stream_t *out, int fmt, char *optstr) { jas_image_fmtinfo_t *fmtinfo; if (!(fmtinfo = jas_image_lookupfmtbyid(fmt))) { return -1; } return (fmtinfo->ops.encode) ? (*fmtinfo->ops.encode)(image, out, optstr) : (-1); } /******************************************************************************\ * Component read and write operations. \******************************************************************************/ int jas_image_readcmpt(jas_image_t *image, int cmptno, jas_image_coord_t x, jas_image_coord_t y, jas_image_coord_t width, jas_image_coord_t height, jas_matrix_t *data) { jas_image_cmpt_t *cmpt; jas_image_coord_t i; jas_image_coord_t j; int k; jas_seqent_t v; int c; jas_seqent_t *dr; jas_seqent_t *d; int drs; JAS_DBGLOG(10, ("jas_image_readcmpt(%p, %d, %ld, %ld, %ld, %ld, %p)\n", image, cmptno, JAS_CAST(long, x), JAS_CAST(long, y), JAS_CAST(long, width), JAS_CAST(long, height), data)); if (cmptno < 0 || cmptno >= image->numcmpts_) { return -1; } cmpt = image->cmpts_[cmptno]; if (x >= cmpt->width_ || y >= cmpt->height_ || x + width > cmpt->width_ || y + height > cmpt->height_) { return -1; } if (!jas_matrix_numrows(data) || !jas_matrix_numcols(data)) { return -1; } if (jas_matrix_numrows(data) != height || jas_matrix_numcols(data) != width) { if (jas_matrix_resize(data, height, width)) { return -1; } } dr = jas_matrix_getref(data, 0, 0); drs = jas_matrix_rowstep(data); for (i = 0; i < height; ++i, dr += drs) { d = dr; if (jas_stream_seek(cmpt->stream_, (cmpt->width_ * (y + i) + x) * cmpt->cps_, SEEK_SET) < 0) { return -1; } for (j = width; j > 0; --j, ++d) { v = 0; for (k = cmpt->cps_; k > 0; --k) { if ((c = jas_stream_getc(cmpt->stream_)) == EOF) { return -1; } v = (v << 8) | (c & 0xff); } *d = bitstoint(v, cmpt->prec_, cmpt->sgnd_); } } return 0; } int jas_image_writecmpt(jas_image_t *image, int cmptno, jas_image_coord_t x, jas_image_coord_t y, jas_image_coord_t width, jas_image_coord_t height, jas_matrix_t *data) { jas_image_cmpt_t *cmpt; jas_image_coord_t i; jas_image_coord_t j; jas_seqent_t *d; jas_seqent_t *dr; int drs; jas_seqent_t v; int k; int c; JAS_DBGLOG(10, ("jas_image_writecmpt(%p, %d, %ld, %ld, %ld, %ld, %p)\n", image, cmptno, JAS_CAST(long, x), JAS_CAST(long, y), JAS_CAST(long, width), JAS_CAST(long, height), data)); if (cmptno < 0 || cmptno >= image->numcmpts_) { return -1; } cmpt = image->cmpts_[cmptno]; if (x >= cmpt->width_ || y >= cmpt->height_ || x + width > cmpt->width_ || y + height > cmpt->height_) { return -1; } if (!jas_matrix_numrows(data) || !jas_matrix_numcols(data)) { return -1; } if (jas_matrix_numrows(data) != height || jas_matrix_numcols(data) != width) { return -1; } dr = jas_matrix_getref(data, 0, 0); drs = jas_matrix_rowstep(data); for (i = 0; i < height; ++i, dr += drs) { d = dr; if (jas_stream_seek(cmpt->stream_, (cmpt->width_ * (y + i) + x) * cmpt->cps_, SEEK_SET) < 0) { return -1; } for (j = width; j > 0; --j, ++d) { v = inttobits(*d, cmpt->prec_, cmpt->sgnd_); for (k = cmpt->cps_; k > 0; --k) { c = (v >> (8 * (cmpt->cps_ - 1))) & 0xff; if (jas_stream_putc(cmpt->stream_, (unsigned char) c) == EOF) { return -1; } v <<= 8; } } } return 0; } /******************************************************************************\ * File format operations. \******************************************************************************/ void jas_image_clearfmts() { int i; jas_image_fmtinfo_t *fmtinfo; for (i = 0; i < jas_image_numfmts; ++i) { fmtinfo = &jas_image_fmtinfos[i]; if (fmtinfo->name) { jas_free(fmtinfo->name); fmtinfo->name = 0; } if (fmtinfo->ext) { jas_free(fmtinfo->ext); fmtinfo->ext = 0; } if (fmtinfo->desc) { jas_free(fmtinfo->desc); fmtinfo->desc = 0; } } jas_image_numfmts = 0; } int jas_image_addfmt(int id, char *name, char *ext, char *desc, jas_image_fmtops_t *ops) { jas_image_fmtinfo_t *fmtinfo; assert(id >= 0 && name && ext && ops); if (jas_image_numfmts >= JAS_IMAGE_MAXFMTS) { return -1; } fmtinfo = &jas_image_fmtinfos[jas_image_numfmts]; fmtinfo->id = id; if (!(fmtinfo->name = jas_strdup(name))) { return -1; } if (!(fmtinfo->ext = jas_strdup(ext))) { jas_free(fmtinfo->name); return -1; } if (!(fmtinfo->desc = jas_strdup(desc))) { jas_free(fmtinfo->name); jas_free(fmtinfo->ext); return -1; } fmtinfo->ops = *ops; ++jas_image_numfmts; return 0; } int jas_image_strtofmt(char *name) { jas_image_fmtinfo_t *fmtinfo; if (!(fmtinfo = jas_image_lookupfmtbyname(name))) { return -1; } return fmtinfo->id; } char *jas_image_fmttostr(int fmt) { jas_image_fmtinfo_t *fmtinfo; if (!(fmtinfo = jas_image_lookupfmtbyid(fmt))) { return 0; } return fmtinfo->name; } int jas_image_getfmt(jas_stream_t *in) { jas_image_fmtinfo_t *fmtinfo; int found; int i; /* Check for data in each of the supported formats. */ found = 0; for (i = 0, fmtinfo = jas_image_fmtinfos; i < jas_image_numfmts; ++i, ++fmtinfo) { if (fmtinfo->ops.validate) { /* Is the input data valid for this format? */ JAS_DBGLOG(20, ("testing for format %s ... ", fmtinfo->name)); if (!(*fmtinfo->ops.validate)(in)) { JAS_DBGLOG(20, ("test succeeded\n")); found = 1; break; } JAS_DBGLOG(20, ("test failed\n")); } } return found ? fmtinfo->id : (-1); } int jas_image_fmtfromname(char *name) { int i; char *ext; jas_image_fmtinfo_t *fmtinfo; /* Get the file name extension. */ if (!(ext = strrchr(name, '.'))) { return -1; } ++ext; /* Try to find a format that uses this extension. */ for (i = 0, fmtinfo = jas_image_fmtinfos; i < jas_image_numfmts; ++i, ++fmtinfo) { /* Do we have a match? */ if (!strcmp(ext, fmtinfo->ext)) { return fmtinfo->id; } } return -1; } /******************************************************************************\ * Miscellaneous operations. \******************************************************************************/ bool jas_image_cmpt_domains_same(jas_image_t *image) { int cmptno; jas_image_cmpt_t *cmpt; jas_image_cmpt_t *cmpt0; cmpt0 = image->cmpts_[0]; for (cmptno = 1; cmptno < image->numcmpts_; ++cmptno) { cmpt = image->cmpts_[cmptno]; if (cmpt->tlx_ != cmpt0->tlx_ || cmpt->tly_ != cmpt0->tly_ || cmpt->hstep_ != cmpt0->hstep_ || cmpt->vstep_ != cmpt0->vstep_ || cmpt->width_ != cmpt0->width_ || cmpt->height_ != cmpt0->height_) { return 0; } } return 1; } uint_fast32_t jas_image_rawsize(jas_image_t *image) { uint_fast32_t rawsize; int cmptno; jas_image_cmpt_t *cmpt; rawsize = 0; for (cmptno = 0; cmptno < image->numcmpts_; ++cmptno) { cmpt = image->cmpts_[cmptno]; rawsize += (cmpt->width_ * cmpt->height_ * cmpt->prec_ + 7) / 8; } return rawsize; } void jas_image_delcmpt(jas_image_t *image, int cmptno) { if (cmptno >= image->numcmpts_) { return; } jas_image_cmpt_destroy(image->cmpts_[cmptno]); if (cmptno < image->numcmpts_) { memmove(&image->cmpts_[cmptno], &image->cmpts_[cmptno + 1], (image->numcmpts_ - 1 - cmptno) * sizeof(jas_image_cmpt_t *)); } --image->numcmpts_; jas_image_setbbox(image); } int jas_image_addcmpt(jas_image_t *image, int cmptno, jas_image_cmptparm_t *cmptparm) { jas_image_cmpt_t *newcmpt; if (cmptno < 0) { cmptno = image->numcmpts_; } assert(cmptno >= 0 && cmptno <= image->numcmpts_); if (image->numcmpts_ >= image->maxcmpts_) { if (jas_image_growcmpts(image, image->maxcmpts_ + 128)) { return -1; } } if (!(newcmpt = jas_image_cmpt_create(cmptparm->tlx, cmptparm->tly, cmptparm->hstep, cmptparm->vstep, cmptparm->width, cmptparm->height, cmptparm->prec, cmptparm->sgnd, 1))) { return -1; } if (cmptno < image->numcmpts_) { memmove(&image->cmpts_[cmptno + 1], &image->cmpts_[cmptno], (image->numcmpts_ - cmptno) * sizeof(jas_image_cmpt_t *)); } image->cmpts_[cmptno] = newcmpt; ++image->numcmpts_; jas_image_setbbox(image); return 0; } jas_image_fmtinfo_t *jas_image_lookupfmtbyid(int id) { int i; jas_image_fmtinfo_t *fmtinfo; for (i = 0, fmtinfo = jas_image_fmtinfos; i < jas_image_numfmts; ++i, ++fmtinfo) { if (fmtinfo->id == id) { return fmtinfo; } } return 0; } jas_image_fmtinfo_t *jas_image_lookupfmtbyname(const char *name) { int i; jas_image_fmtinfo_t *fmtinfo; for (i = 0, fmtinfo = jas_image_fmtinfos; i < jas_image_numfmts; ++i, ++fmtinfo) { if (!strcmp(fmtinfo->name, name)) { return fmtinfo; } } return 0; } static uint_fast32_t inttobits(jas_seqent_t v, int prec, bool sgnd) { uint_fast32_t ret; ret = ((sgnd && v < 0) ? ((1 << prec) + v) : v) & JAS_ONES(prec); return ret; } static jas_seqent_t bitstoint(uint_fast32_t v, int prec, bool sgnd) { jas_seqent_t ret; v &= JAS_ONES(prec); ret = (sgnd && (v & (1 << (prec - 1)))) ? (v - (1 << prec)) : v; return ret; } static void jas_image_setbbox(jas_image_t *image) { jas_image_cmpt_t *cmpt; int cmptno; int_fast32_t x; int_fast32_t y; if (image->numcmpts_ > 0) { /* Determine the bounding box for all of the components on the reference grid (i.e., the image area) */ cmpt = image->cmpts_[0]; image->tlx_ = cmpt->tlx_; image->tly_ = cmpt->tly_; image->brx_ = cmpt->tlx_ + cmpt->hstep_ * (cmpt->width_ - 1) + 1; image->bry_ = cmpt->tly_ + cmpt->vstep_ * (cmpt->height_ - 1) + 1; for (cmptno = 1; cmptno < image->numcmpts_; ++cmptno) { cmpt = image->cmpts_[cmptno]; if (image->tlx_ > cmpt->tlx_) { image->tlx_ = cmpt->tlx_; } if (image->tly_ > cmpt->tly_) { image->tly_ = cmpt->tly_; } x = cmpt->tlx_ + cmpt->hstep_ * (cmpt->width_ - 1) + 1; if (image->brx_ < x) { image->brx_ = x; } y = cmpt->tly_ + cmpt->vstep_ * (cmpt->height_ - 1) + 1; if (image->bry_ < y) { image->bry_ = y; } } } else { image->tlx_ = 0; image->tly_ = 0; image->brx_ = 0; image->bry_ = 0; } } static int jas_image_growcmpts(jas_image_t *image, int maxcmpts) { jas_image_cmpt_t **newcmpts; int cmptno; newcmpts = (!image->cmpts_) ? jas_alloc2(maxcmpts, sizeof(jas_image_cmpt_t *)) : jas_realloc2(image->cmpts_, maxcmpts, sizeof(jas_image_cmpt_t *)); if (!newcmpts) { return -1; } image->cmpts_ = newcmpts; image->maxcmpts_ = maxcmpts; for (cmptno = image->numcmpts_; cmptno < image->maxcmpts_; ++cmptno) { image->cmpts_[cmptno] = 0; } return 0; } int jas_image_copycmpt(jas_image_t *dstimage, int dstcmptno, jas_image_t *srcimage, int srccmptno) { jas_image_cmpt_t *newcmpt; if (dstimage->numcmpts_ >= dstimage->maxcmpts_) { if (jas_image_growcmpts(dstimage, dstimage->maxcmpts_ + 128)) { return -1; } } if (!(newcmpt = jas_image_cmpt_copy(srcimage->cmpts_[srccmptno]))) { return -1; } if (dstcmptno < dstimage->numcmpts_) { memmove(&dstimage->cmpts_[dstcmptno + 1], &dstimage->cmpts_[dstcmptno], (dstimage->numcmpts_ - dstcmptno) * sizeof(jas_image_cmpt_t *)); } dstimage->cmpts_[dstcmptno] = newcmpt; ++dstimage->numcmpts_; jas_image_setbbox(dstimage); return 0; } void jas_image_dump(jas_image_t *image, FILE *out) { long buf[1024]; int cmptno; int n; int i; int width; int height; jas_image_cmpt_t *cmpt; for (cmptno = 0; cmptno < image->numcmpts_; ++cmptno) { cmpt = image->cmpts_[cmptno]; fprintf(out, "prec=%d, sgnd=%d, cmpttype=%"PRIiFAST32"\n", cmpt->prec_, cmpt->sgnd_, cmpt->type_); width = jas_image_cmptwidth(image, cmptno); height = jas_image_cmptheight(image, cmptno); n = JAS_MIN(16, width); if (jas_image_readcmpt2(image, cmptno, 0, 0, n, 1, buf)) { abort(); } for (i = 0; i < n; ++i) { fprintf(out, " f(%d,%d)=%ld", i, 0, buf[i]); } fprintf(out, "\n"); if (jas_image_readcmpt2(image, cmptno, width - n, height - 1, n, 1, buf)) { abort(); } for (i = 0; i < n; ++i) { fprintf(out, " f(%d,%d)=%ld", width - n + i, height - 1, buf[i]); } fprintf(out, "\n"); } } int jas_image_depalettize(jas_image_t *image, int cmptno, int numlutents, int_fast32_t *lutents, int dtype, int newcmptno) { jas_image_cmptparm_t cmptparms; int_fast32_t v; int i; int j; jas_image_cmpt_t *cmpt; cmpt = image->cmpts_[cmptno]; cmptparms.tlx = cmpt->tlx_; cmptparms.tly = cmpt->tly_; cmptparms.hstep = cmpt->hstep_; cmptparms.vstep = cmpt->vstep_; cmptparms.width = cmpt->width_; cmptparms.height = cmpt->height_; cmptparms.prec = JAS_IMAGE_CDT_GETPREC(dtype); cmptparms.sgnd = JAS_IMAGE_CDT_GETSGND(dtype); if (jas_image_addcmpt(image, newcmptno, &cmptparms)) { return -1; } if (newcmptno <= cmptno) { ++cmptno; cmpt = image->cmpts_[cmptno]; } for (j = 0; j < cmpt->height_; ++j) { for (i = 0; i < cmpt->width_; ++i) { v = jas_image_readcmptsample(image, cmptno, i, j); if (v < 0) { v = 0; } else if (v >= numlutents) { v = numlutents - 1; } jas_image_writecmptsample(image, newcmptno, i, j, lutents[v]); } } return 0; } int jas_image_readcmptsample(jas_image_t *image, int cmptno, int x, int y) { jas_image_cmpt_t *cmpt; uint_fast32_t v; int k; int c; cmpt = image->cmpts_[cmptno]; if (jas_stream_seek(cmpt->stream_, (cmpt->width_ * y + x) * cmpt->cps_, SEEK_SET) < 0) { return -1; } v = 0; for (k = cmpt->cps_; k > 0; --k) { if ((c = jas_stream_getc(cmpt->stream_)) == EOF) { return -1; } v = (v << 8) | (c & 0xff); } return bitstoint(v, cmpt->prec_, cmpt->sgnd_); } void jas_image_writecmptsample(jas_image_t *image, int cmptno, int x, int y, int_fast32_t v) { jas_image_cmpt_t *cmpt; uint_fast32_t t; int k; int c; cmpt = image->cmpts_[cmptno]; if (jas_stream_seek(cmpt->stream_, (cmpt->width_ * y + x) * cmpt->cps_, SEEK_SET) < 0) { return; } t = inttobits(v, cmpt->prec_, cmpt->sgnd_); for (k = cmpt->cps_; k > 0; --k) { c = (t >> (8 * (cmpt->cps_ - 1))) & 0xff; if (jas_stream_putc(cmpt->stream_, (unsigned char) c) == EOF) { return; } t <<= 8; } } int jas_image_getcmptbytype(jas_image_t *image, int ctype) { int cmptno; for (cmptno = 0; cmptno < image->numcmpts_; ++cmptno) { if (image->cmpts_[cmptno]->type_ == ctype) { return cmptno; } } return -1; } /***********************************************/ /***********************************************/ /***********************************************/ /***********************************************/ int jas_image_readcmpt2(jas_image_t *image, int cmptno, jas_image_coord_t x, jas_image_coord_t y, jas_image_coord_t width, jas_image_coord_t height, long *buf) { jas_image_cmpt_t *cmpt; jas_image_coord_t i; jas_image_coord_t j; long v; long *bufptr; if (cmptno < 0 || cmptno >= image->numcmpts_) goto error; cmpt = image->cmpts_[cmptno]; if (x < 0 || x >= cmpt->width_ || y < 0 || y >= cmpt->height_ || width < 0 || height < 0 || x + width > cmpt->width_ || y + height > cmpt->height_) goto error; bufptr = buf; for (i = 0; i < height; ++i) { if (jas_stream_seek(cmpt->stream_, (cmpt->width_ * (y + i) + x) * cmpt->cps_, SEEK_SET) < 0) goto error; for (j = 0; j < width; ++j) { if (getint(cmpt->stream_, cmpt->sgnd_, cmpt->prec_, &v)) goto error; *bufptr++ = v; } } return 0; error: return -1; } int jas_image_writecmpt2(jas_image_t *image, int cmptno, jas_image_coord_t x, jas_image_coord_t y, jas_image_coord_t width, jas_image_coord_t height, long *buf) { jas_image_cmpt_t *cmpt; jas_image_coord_t i; jas_image_coord_t j; long v; long *bufptr; if (cmptno < 0 || cmptno >= image->numcmpts_) goto error; cmpt = image->cmpts_[cmptno]; if (x < 0 || x >= cmpt->width_ || y < 0 || y >= cmpt->height_ || width < 0 || height < 0 || x + width > cmpt->width_ || y + height > cmpt->height_) goto error; bufptr = buf; for (i = 0; i < height; ++i) { if (jas_stream_seek(cmpt->stream_, (cmpt->width_ * (y + i) + x) * cmpt->cps_, SEEK_SET) < 0) goto error; for (j = 0; j < width; ++j) { v = *bufptr++; if (putint(cmpt->stream_, cmpt->sgnd_, cmpt->prec_, v)) goto error; } } return 0; error: return -1; } int jas_image_sampcmpt(jas_image_t *image, int cmptno, int newcmptno, jas_image_coord_t ho, jas_image_coord_t vo, jas_image_coord_t hs, jas_image_coord_t vs, int sgnd, int prec) { jas_image_cmpt_t *oldcmpt; jas_image_cmpt_t *newcmpt; int width; int height; jas_image_coord_t tlx; jas_image_coord_t tly; jas_image_coord_t brx; jas_image_coord_t bry; int i; int j; jas_image_cmptparm_t cmptparm; jas_image_coord_t ax; jas_image_coord_t ay; jas_image_coord_t bx; jas_image_coord_t by; jas_image_coord_t d0; jas_image_coord_t d1; jas_image_coord_t d2; jas_image_coord_t d3; jas_image_coord_t oldx; jas_image_coord_t oldy; jas_image_coord_t x; jas_image_coord_t y; long v; jas_image_coord_t cmptbrx; jas_image_coord_t cmptbry; assert(cmptno >= 0 && cmptno < image->numcmpts_); oldcmpt = image->cmpts_[cmptno]; assert(oldcmpt->tlx_ == 0 && oldcmpt->tly_ == 0); jas_image_calcbbox2(image, &tlx, &tly, &brx, &bry); width = FLOORDIV(brx - ho + hs, hs); height = FLOORDIV(bry - vo + vs, vs); cmptparm.tlx = ho; cmptparm.tly = vo; cmptparm.hstep = hs; cmptparm.vstep = vs; cmptparm.width = width; cmptparm.height = height; cmptparm.prec = prec; cmptparm.sgnd = sgnd; if (jas_image_addcmpt(image, newcmptno, &cmptparm)) goto error; cmptbrx = oldcmpt->tlx_ + (oldcmpt->width_ - 1) * oldcmpt->hstep_; cmptbry = oldcmpt->tly_ + (oldcmpt->height_ - 1) * oldcmpt->vstep_; newcmpt = image->cmpts_[newcmptno]; jas_stream_rewind(newcmpt->stream_); for (i = 0; i < height; ++i) { y = newcmpt->tly_ + newcmpt->vstep_ * i; for (j = 0; j < width; ++j) { x = newcmpt->tlx_ + newcmpt->hstep_ * j; ax = downtomult(x - oldcmpt->tlx_, oldcmpt->hstep_) + oldcmpt->tlx_; ay = downtomult(y - oldcmpt->tly_, oldcmpt->vstep_) + oldcmpt->tly_; bx = uptomult(x - oldcmpt->tlx_, oldcmpt->hstep_) + oldcmpt->tlx_; if (bx > cmptbrx) bx = cmptbrx; by = uptomult(y - oldcmpt->tly_, oldcmpt->vstep_) + oldcmpt->tly_; if (by > cmptbry) by = cmptbry; d0 = (ax - x) * (ax - x) + (ay - y) * (ay - y); d1 = (bx - x) * (bx - x) + (ay - y) * (ay - y); d2 = (bx - x) * (bx - x) + (by - y) * (by - y); d3 = (ax - x) * (ax - x) + (by - y) * (by - y); if (d0 <= d1 && d0 <= d2 && d0 <= d3) { oldx = (ax - oldcmpt->tlx_) / oldcmpt->hstep_; oldy = (ay - oldcmpt->tly_) / oldcmpt->vstep_; } else if (d1 <= d0 && d1 <= d2 && d1 <= d3) { oldx = (bx - oldcmpt->tlx_) / oldcmpt->hstep_; oldy = (ay - oldcmpt->tly_) / oldcmpt->vstep_; } else if (d2 <= d0 && d2 <= d1 && d1 <= d3) { oldx = (bx - oldcmpt->tlx_) / oldcmpt->hstep_; oldy = (by - oldcmpt->tly_) / oldcmpt->vstep_; } else { oldx = (ax - oldcmpt->tlx_) / oldcmpt->hstep_; oldy = (by - oldcmpt->tly_) / oldcmpt->vstep_; } assert(oldx >= 0 && oldx < oldcmpt->width_ && oldy >= 0 && oldy < oldcmpt->height_); if (jas_stream_seek(oldcmpt->stream_, oldcmpt->cps_ * (oldy * oldcmpt->width_ + oldx), SEEK_SET) < 0) goto error; if (getint(oldcmpt->stream_, oldcmpt->sgnd_, oldcmpt->prec_, &v)) goto error; if (newcmpt->prec_ != oldcmpt->prec_ || newcmpt->sgnd_ != oldcmpt->sgnd_) { v = convert(v, oldcmpt->sgnd_, oldcmpt->prec_, newcmpt->sgnd_, newcmpt->prec_); } if (putint(newcmpt->stream_, newcmpt->sgnd_, newcmpt->prec_, v)) goto error; } } return 0; error: return -1; } int jas_image_ishomosamp(jas_image_t *image) { jas_image_coord_t hstep; jas_image_coord_t vstep; int result; int i; hstep = jas_image_cmpthstep(image, 0); vstep = jas_image_cmptvstep(image, 0); result = 1; for (i = 0; i < image->numcmpts_; ++i) { if (jas_image_cmpthstep(image, i) != hstep || jas_image_cmptvstep(image, i) != vstep) { result = 0; break; } } return result; } /* Note: This function defines a bounding box differently. */ static void jas_image_calcbbox2(jas_image_t *image, jas_image_coord_t *tlx, jas_image_coord_t *tly, jas_image_coord_t *brx, jas_image_coord_t *bry) { jas_image_cmpt_t *cmpt; jas_image_coord_t tmptlx; jas_image_coord_t tmptly; jas_image_coord_t tmpbrx; jas_image_coord_t tmpbry; jas_image_coord_t t; int i; if (image->numcmpts_ > 0) { cmpt = image->cmpts_[0]; tmptlx = cmpt->tlx_; tmptly = cmpt->tly_; tmpbrx = cmpt->tlx_ + cmpt->hstep_ * (cmpt->width_ - 1); tmpbry = cmpt->tly_ + cmpt->vstep_ * (cmpt->height_ - 1); for (i = 0; i < image->numcmpts_; ++i) { cmpt = image->cmpts_[i]; if (cmpt->tlx_ < tmptlx) tmptlx = cmpt->tlx_; if (cmpt->tly_ < tmptly) tmptly = cmpt->tly_; t = cmpt->tlx_ + cmpt->hstep_ * (cmpt->width_ - 1); if (t > tmpbrx) tmpbrx = t; t = cmpt->tly_ + cmpt->vstep_ * (cmpt->height_ - 1); if (t > tmpbry) tmpbry = t; } } else { tmptlx = 0; tmptly = 0; tmpbrx = -1; tmpbry = -1; } *tlx = tmptlx; *tly = tmptly; *brx = tmpbrx; *bry = tmpbry; } static inline long decode_twos_comp(ulong c, int prec) { long result; assert(prec >= 2); jas_eprintf("warning: support for signed data is untested\n"); // NOTE: Is this correct? result = (c & ((1 << (prec - 1)) - 1)) - (c & (1 << (prec - 1))); return result; } static inline ulong encode_twos_comp(long n, int prec) { ulong result; assert(prec >= 2); jas_eprintf("warning: support for signed data is untested\n"); // NOTE: Is this correct? if (n < 0) { result = -n; result = (result ^ 0xffffffffUL) + 1; result &= (1 << prec) - 1; } else { result = n; } return result; } static int getint(jas_stream_t *in, int sgnd, int prec, long *val) { long v; int n; int c; assert((!sgnd && prec >= 1) || (sgnd && prec >= 2)); n = (prec + 7) / 8; v = 0; while (--n >= 0) { if ((c = jas_stream_getc(in)) == EOF) return -1; v = (v << 8) | c; } v &= ((1 << prec) - 1); if (sgnd) { *val = decode_twos_comp(v, prec); } else { *val = v; } return 0; } static int putint(jas_stream_t *out, int sgnd, int prec, long val) { int n; int c; bool s; ulong tmp; assert((!sgnd && prec >= 1) || (sgnd && prec >= 2)); if (sgnd) { val = encode_twos_comp(val, prec); } assert(val >= 0); val &= (1 << prec) - 1; n = (prec + 7) / 8; while (--n >= 0) { c = (val >> (n * 8)) & 0xff; if (jas_stream_putc(out, c) != c) return -1; } return 0; } static long convert(long val, int oldsgnd, int oldprec, int newsgnd, int newprec) { if (newsgnd != oldsgnd) { } if (newprec != oldprec) { if (newprec > oldprec) { val <<= newprec - oldprec; } else if (oldprec > newprec) { val >>= oldprec - newprec; } } return val; } static long downtomult(long x, long y) { assert(x >= 0); return (x / y) * y; } static long uptomult(long x, long y) { assert(x >= 0); return ((x + y - 1) / y) * y; } jas_image_t *jas_image_chclrspc(jas_image_t *image, jas_cmprof_t *outprof, int intent) { jas_image_t *inimage; int minhstep; int minvstep; int i; int j; int k; int n; int hstep; int vstep; int numinauxchans; int numoutauxchans; int numinclrchans; int numoutclrchans; int prec; jas_image_t *outimage; int cmpttype; int numoutchans; jas_cmprof_t *inprof; jas_cmprof_t *tmpprof; jas_image_cmptparm_t cmptparm; int width; int height; jas_cmxform_t *xform; jas_cmpixmap_t inpixmap; jas_cmpixmap_t outpixmap; jas_cmcmptfmt_t *incmptfmts; jas_cmcmptfmt_t *outcmptfmts; #if 0 jas_eprintf("IMAGE\n"); jas_image_dump(image, stderr); #endif outimage = 0; xform = 0; if (!(inimage = jas_image_copy(image))) goto error; image = 0; if (!jas_image_ishomosamp(inimage)) { minhstep = jas_image_cmpthstep(inimage, 0); minvstep = jas_image_cmptvstep(inimage, 0); for (i = 1; i < jas_image_numcmpts(inimage); ++i) { hstep = jas_image_cmpthstep(inimage, i); vstep = jas_image_cmptvstep(inimage, i); if (hstep < minhstep) { minhstep = hstep; } if (vstep < minvstep) { minvstep = vstep; } } n = jas_image_numcmpts(inimage); for (i = 0; i < n; ++i) { cmpttype = jas_image_cmpttype(inimage, i); if (jas_image_sampcmpt(inimage, i, i + 1, 0, 0, minhstep, minvstep, jas_image_cmptsgnd(inimage, i), jas_image_cmptprec(inimage, i))) { goto error; } jas_image_setcmpttype(inimage, i + 1, cmpttype); jas_image_delcmpt(inimage, i); } } width = jas_image_cmptwidth(inimage, 0); height = jas_image_cmptheight(inimage, 0); hstep = jas_image_cmpthstep(inimage, 0); vstep = jas_image_cmptvstep(inimage, 0); if (!(inprof = jas_image_cmprof(inimage))) { abort(); } numinclrchans = jas_clrspc_numchans(jas_cmprof_clrspc(inprof)); numinauxchans = jas_image_numcmpts(inimage) - numinclrchans; numoutclrchans = jas_clrspc_numchans(jas_cmprof_clrspc(outprof)); numoutauxchans = 0; numoutchans = numoutclrchans + numoutauxchans; prec = 8; if (!(outimage = jas_image_create0())) { goto error; } /* Create a component for each of the colorants. */ for (i = 0; i < numoutclrchans; ++i) { cmptparm.tlx = 0; cmptparm.tly = 0; cmptparm.hstep = hstep; cmptparm.vstep = vstep; cmptparm.width = width; cmptparm.height = height; cmptparm.prec = prec; cmptparm.sgnd = 0; if (jas_image_addcmpt(outimage, -1, &cmptparm)) goto error; jas_image_setcmpttype(outimage, i, JAS_IMAGE_CT_COLOR(i)); } #if 0 /* Copy the auxiliary components without modification. */ for (i = 0; i < jas_image_numcmpts(inimage); ++i) { if (!ISCOLOR(jas_image_cmpttype(inimage, i))) { jas_image_copycmpt(outimage, -1, inimage, i); /* XXX - need to specify laydown of component on ref. grid */ } } #endif if (!(tmpprof = jas_cmprof_copy(outprof))) goto error; assert(!jas_image_cmprof(outimage)); jas_image_setcmprof(outimage, tmpprof); tmpprof = 0; jas_image_setclrspc(outimage, jas_cmprof_clrspc(outprof)); if (!(xform = jas_cmxform_create(inprof, outprof, 0, JAS_CMXFORM_OP_FWD, intent, 0))) { goto error; } inpixmap.numcmpts = numinclrchans; if (!(incmptfmts = jas_alloc2(numinclrchans, sizeof(jas_cmcmptfmt_t)))) { abort(); } inpixmap.cmptfmts = incmptfmts; for (i = 0; i < numinclrchans; ++i) { j = jas_image_getcmptbytype(inimage, JAS_IMAGE_CT_COLOR(i)); assert(j >= 0); if (!(incmptfmts[i].buf = jas_alloc2(width, sizeof(long)))) { goto error; } incmptfmts[i].prec = jas_image_cmptprec(inimage, j); incmptfmts[i].sgnd = jas_image_cmptsgnd(inimage, j); incmptfmts[i].width = width; incmptfmts[i].height = 1; } outpixmap.numcmpts = numoutclrchans; if (!(outcmptfmts = jas_alloc2(numoutclrchans, sizeof(jas_cmcmptfmt_t)))) { abort(); } outpixmap.cmptfmts = outcmptfmts; for (i = 0; i < numoutclrchans; ++i) { j = jas_image_getcmptbytype(outimage, JAS_IMAGE_CT_COLOR(i)); assert(j >= 0); if (!(outcmptfmts[i].buf = jas_alloc2(width, sizeof(long)))) goto error; outcmptfmts[i].prec = jas_image_cmptprec(outimage, j); outcmptfmts[i].sgnd = jas_image_cmptsgnd(outimage, j); outcmptfmts[i].width = width; outcmptfmts[i].height = 1; } for (i = 0; i < height; ++i) { for (j = 0; j < numinclrchans; ++j) { k = jas_image_getcmptbytype(inimage, JAS_IMAGE_CT_COLOR(j)); if (jas_image_readcmpt2(inimage, k, 0, i, width, 1, incmptfmts[j].buf)) goto error; } jas_cmxform_apply(xform, &inpixmap, &outpixmap); for (j = 0; j < numoutclrchans; ++j) { k = jas_image_getcmptbytype(outimage, JAS_IMAGE_CT_COLOR(j)); if (jas_image_writecmpt2(outimage, k, 0, i, width, 1, outcmptfmts[j].buf)) goto error; } } for (i = 0; i < numoutclrchans; ++i) { jas_free(outcmptfmts[i].buf); } jas_free(outcmptfmts); for (i = 0; i < numinclrchans; ++i) { jas_free(incmptfmts[i].buf); } jas_free(incmptfmts); jas_cmxform_destroy(xform); jas_image_destroy(inimage); #if 0 jas_eprintf("INIMAGE\n"); jas_image_dump(inimage, stderr); jas_eprintf("OUTIMAGE\n"); jas_image_dump(outimage, stderr); #endif return outimage; error: if (xform) jas_cmxform_destroy(xform); if (inimage) jas_image_destroy(inimage); if (outimage) jas_image_destroy(outimage); return 0; }
/* * Copyright (c) 1999-2000 Image Power, Inc. and the University of * British Columbia. * Copyright (c) 2001-2003 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /* * Image Library * * $Id$ */ /******************************************************************************\ * Includes. \******************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <assert.h> #include <ctype.h> #include <inttypes.h> #include <stdbool.h> #include <limits.h> #include "jasper/jas_math.h" #include "jasper/jas_image.h" #include "jasper/jas_malloc.h" #include "jasper/jas_string.h" #include "jasper/jas_debug.h" /******************************************************************************\ * Types. \******************************************************************************/ #define FLOORDIV(x, y) ((x) / (y)) /******************************************************************************\ * Local prototypes. \******************************************************************************/ static jas_image_cmpt_t *jas_image_cmpt_create0(void); static void jas_image_cmpt_destroy(jas_image_cmpt_t *cmpt); static jas_image_cmpt_t *jas_image_cmpt_create(int_fast32_t tlx, int_fast32_t tly, int_fast32_t hstep, int_fast32_t vstep, int_fast32_t width, int_fast32_t height, uint_fast16_t depth, bool sgnd, uint_fast32_t inmem); static void jas_image_setbbox(jas_image_t *image); static jas_image_cmpt_t *jas_image_cmpt_copy(jas_image_cmpt_t *cmpt); static int jas_image_growcmpts(jas_image_t *image, int maxcmpts); static uint_fast32_t inttobits(jas_seqent_t v, int prec, bool sgnd); static jas_seqent_t bitstoint(uint_fast32_t v, int prec, bool sgnd); static int putint(jas_stream_t *out, int sgnd, int prec, long val); static int getint(jas_stream_t *in, int sgnd, int prec, long *val); static void jas_image_calcbbox2(jas_image_t *image, jas_image_coord_t *tlx, jas_image_coord_t *tly, jas_image_coord_t *brx, jas_image_coord_t *bry); static long uptomult(long x, long y); static long downtomult(long x, long y); static long convert(long val, int oldsgnd, int oldprec, int newsgnd, int newprec); static void jas_image_calcbbox2(jas_image_t *image, jas_image_coord_t *tlx, jas_image_coord_t *tly, jas_image_coord_t *brx, jas_image_coord_t *bry); /******************************************************************************\ * Global data. \******************************************************************************/ static int jas_image_numfmts = 0; static jas_image_fmtinfo_t jas_image_fmtinfos[JAS_IMAGE_MAXFMTS]; /******************************************************************************\ * Create and destroy operations. \******************************************************************************/ jas_image_t *jas_image_create(int numcmpts, jas_image_cmptparm_t *cmptparms, int clrspc) { jas_image_t *image; size_t rawsize; uint_fast32_t inmem; int cmptno; jas_image_cmptparm_t *cmptparm; image = 0; JAS_DBGLOG(100, ("jas_image_create(%d, %p, %d)\n", numcmpts, cmptparms, clrspc)); if (!(image = jas_image_create0())) { goto error; } image->clrspc_ = clrspc; image->maxcmpts_ = numcmpts; // image->inmem_ = true; /* Allocate memory for the per-component information. */ if (!(image->cmpts_ = jas_alloc2(image->maxcmpts_, sizeof(jas_image_cmpt_t *)))) { goto error; } /* Initialize in case of failure. */ for (cmptno = 0; cmptno < image->maxcmpts_; ++cmptno) { image->cmpts_[cmptno] = 0; } #if 0 /* Compute the approximate raw size of the image. */ rawsize = 0; for (cmptno = 0, cmptparm = cmptparms; cmptno < numcmpts; ++cmptno, ++cmptparm) { rawsize += cmptparm->width * cmptparm->height * (cmptparm->prec + 7) / 8; } /* Decide whether to buffer the image data in memory, based on the raw size of the image. */ inmem = (rawsize < JAS_IMAGE_INMEMTHRESH); #endif /* Create the individual image components. */ for (cmptno = 0, cmptparm = cmptparms; cmptno < numcmpts; ++cmptno, ++cmptparm) { if (!jas_safe_size_mul3(cmptparm->width, cmptparm->height, (cmptparm->prec + 7), &rawsize)) { goto error; } rawsize /= 8; inmem = (rawsize < JAS_IMAGE_INMEMTHRESH); if (!(image->cmpts_[cmptno] = jas_image_cmpt_create(cmptparm->tlx, cmptparm->tly, cmptparm->hstep, cmptparm->vstep, cmptparm->width, cmptparm->height, cmptparm->prec, cmptparm->sgnd, inmem))) { goto error; } ++image->numcmpts_; } /* Determine the bounding box for all of the components on the reference grid (i.e., the image area) */ jas_image_setbbox(image); return image; error: if (image) { jas_image_destroy(image); } return 0; } jas_image_t *jas_image_create0() { jas_image_t *image; if (!(image = jas_malloc(sizeof(jas_image_t)))) { return 0; } image->tlx_ = 0; image->tly_ = 0; image->brx_ = 0; image->bry_ = 0; image->clrspc_ = JAS_CLRSPC_UNKNOWN; image->numcmpts_ = 0; image->maxcmpts_ = 0; image->cmpts_ = 0; // image->inmem_ = true; image->cmprof_ = 0; return image; } jas_image_t *jas_image_copy(jas_image_t *image) { jas_image_t *newimage; int cmptno; if (!(newimage = jas_image_create0())) { goto error; } if (jas_image_growcmpts(newimage, image->numcmpts_)) { goto error; } for (cmptno = 0; cmptno < image->numcmpts_; ++cmptno) { if (!(newimage->cmpts_[cmptno] = jas_image_cmpt_copy(image->cmpts_[cmptno]))) { goto error; } ++newimage->numcmpts_; } jas_image_setbbox(newimage); if (image->cmprof_) { if (!(newimage->cmprof_ = jas_cmprof_copy(image->cmprof_))) goto error; } return newimage; error: if (newimage) { jas_image_destroy(newimage); } return 0; } static jas_image_cmpt_t *jas_image_cmpt_create0() { jas_image_cmpt_t *cmpt; if (!(cmpt = jas_malloc(sizeof(jas_image_cmpt_t)))) { return 0; } memset(cmpt, 0, sizeof(jas_image_cmpt_t)); cmpt->type_ = JAS_IMAGE_CT_UNKNOWN; return cmpt; } static jas_image_cmpt_t *jas_image_cmpt_copy(jas_image_cmpt_t *cmpt) { jas_image_cmpt_t *newcmpt; if (!(newcmpt = jas_image_cmpt_create0())) { return 0; } newcmpt->tlx_ = cmpt->tlx_; newcmpt->tly_ = cmpt->tly_; newcmpt->hstep_ = cmpt->hstep_; newcmpt->vstep_ = cmpt->vstep_; newcmpt->width_ = cmpt->width_; newcmpt->height_ = cmpt->height_; newcmpt->prec_ = cmpt->prec_; newcmpt->sgnd_ = cmpt->sgnd_; newcmpt->cps_ = cmpt->cps_; newcmpt->type_ = cmpt->type_; if (!(newcmpt->stream_ = jas_stream_memopen(0, 0))) { goto error; } if (jas_stream_seek(cmpt->stream_, 0, SEEK_SET)) { goto error; } if (jas_stream_copy(newcmpt->stream_, cmpt->stream_, -1)) { goto error; } if (jas_stream_seek(newcmpt->stream_, 0, SEEK_SET)) { goto error; } return newcmpt; error: if (newcmpt) { jas_image_cmpt_destroy(newcmpt); } return 0; } void jas_image_destroy(jas_image_t *image) { int i; if (image->cmpts_) { for (i = 0; i < image->numcmpts_; ++i) { jas_image_cmpt_destroy(image->cmpts_[i]); image->cmpts_[i] = 0; } jas_free(image->cmpts_); } if (image->cmprof_) jas_cmprof_destroy(image->cmprof_); jas_free(image); } static jas_image_cmpt_t *jas_image_cmpt_create(int_fast32_t tlx, int_fast32_t tly, int_fast32_t hstep, int_fast32_t vstep, int_fast32_t width, int_fast32_t height, uint_fast16_t depth, bool sgnd, uint_fast32_t inmem) { jas_image_cmpt_t *cmpt; size_t size; JAS_DBGLOG(100, ( "jas_image_cmpt_create(%ld, %ld, %ld, %ld, %ld, %ld, %d, %d, %d)\n", JAS_CAST(long, tlx), JAS_CAST(long, tly), JAS_CAST(long, hstep), JAS_CAST(long, vstep), JAS_CAST(long, width), JAS_CAST(long, height), JAS_CAST(int, depth), sgnd, inmem )); cmpt = 0; if (width < 0 || height < 0 || hstep <= 0 || vstep <= 0) { goto error; } if (!jas_safe_intfast32_add(tlx, width, 0) || !jas_safe_intfast32_add(tly, height, 0)) { goto error; } if (!jas_safe_intfast32_mul3(width, height, depth, 0)) { goto error; } if (!(cmpt = jas_malloc(sizeof(jas_image_cmpt_t)))) { goto error; } cmpt->type_ = JAS_IMAGE_CT_UNKNOWN; cmpt->tlx_ = tlx; cmpt->tly_ = tly; cmpt->hstep_ = hstep; cmpt->vstep_ = vstep; cmpt->width_ = width; cmpt->height_ = height; cmpt->prec_ = depth; cmpt->sgnd_ = sgnd; cmpt->stream_ = 0; cmpt->cps_ = (depth + 7) / 8; // Compute the number of samples in the image component, while protecting // against overflow. // size = cmpt->width_ * cmpt->height_ * cmpt->cps_; if (!jas_safe_size_mul3(cmpt->width_, cmpt->height_, cmpt->cps_, &size)) { goto error; } cmpt->stream_ = (inmem) ? jas_stream_memopen2(0, size) : jas_stream_tmpfile(); if (!cmpt->stream_) { goto error; } /* Zero the component data. This isn't necessary, but it is convenient for debugging purposes. */ /* Note: conversion of size - 1 to long can overflow */ if (size > 0) { if (size - 1 > LONG_MAX) { goto error; } if (jas_stream_seek(cmpt->stream_, size - 1, SEEK_SET) < 0 || jas_stream_putc(cmpt->stream_, 0) == EOF || jas_stream_seek(cmpt->stream_, 0, SEEK_SET) < 0) { goto error; } } return cmpt; error: if (cmpt) { jas_image_cmpt_destroy(cmpt); } return 0; } static void jas_image_cmpt_destroy(jas_image_cmpt_t *cmpt) { if (cmpt->stream_) { jas_stream_close(cmpt->stream_); } jas_free(cmpt); } /******************************************************************************\ * Load and save operations. \******************************************************************************/ jas_image_t *jas_image_decode(jas_stream_t *in, int fmt, char *optstr) { jas_image_fmtinfo_t *fmtinfo; jas_image_t *image; image = 0; /* If possible, try to determine the format of the input data. */ if (fmt < 0) { if ((fmt = jas_image_getfmt(in)) < 0) goto error; } /* Is it possible to decode an image represented in this format? */ if (!(fmtinfo = jas_image_lookupfmtbyid(fmt))) goto error; if (!fmtinfo->ops.decode) goto error; /* Decode the image. */ if (!(image = (*fmtinfo->ops.decode)(in, optstr))) goto error; /* Create a color profile if needed. */ if (!jas_clrspc_isunknown(image->clrspc_) && !jas_clrspc_isgeneric(image->clrspc_) && !image->cmprof_) { if (!(image->cmprof_ = jas_cmprof_createfromclrspc(jas_image_clrspc(image)))) goto error; } return image; error: if (image) jas_image_destroy(image); return 0; } int jas_image_encode(jas_image_t *image, jas_stream_t *out, int fmt, char *optstr) { jas_image_fmtinfo_t *fmtinfo; if (!(fmtinfo = jas_image_lookupfmtbyid(fmt))) { return -1; } return (fmtinfo->ops.encode) ? (*fmtinfo->ops.encode)(image, out, optstr) : (-1); } /******************************************************************************\ * Component read and write operations. \******************************************************************************/ int jas_image_readcmpt(jas_image_t *image, int cmptno, jas_image_coord_t x, jas_image_coord_t y, jas_image_coord_t width, jas_image_coord_t height, jas_matrix_t *data) { jas_image_cmpt_t *cmpt; jas_image_coord_t i; jas_image_coord_t j; int k; jas_seqent_t v; int c; jas_seqent_t *dr; jas_seqent_t *d; int drs; JAS_DBGLOG(10, ("jas_image_readcmpt(%p, %d, %ld, %ld, %ld, %ld, %p)\n", image, cmptno, JAS_CAST(long, x), JAS_CAST(long, y), JAS_CAST(long, width), JAS_CAST(long, height), data)); if (cmptno < 0 || cmptno >= image->numcmpts_) { return -1; } cmpt = image->cmpts_[cmptno]; if (x >= cmpt->width_ || y >= cmpt->height_ || x + width > cmpt->width_ || y + height > cmpt->height_) { return -1; } if (!jas_matrix_numrows(data) || !jas_matrix_numcols(data)) { return -1; } if (jas_matrix_numrows(data) != height || jas_matrix_numcols(data) != width) { if (jas_matrix_resize(data, height, width)) { return -1; } } dr = jas_matrix_getref(data, 0, 0); drs = jas_matrix_rowstep(data); for (i = 0; i < height; ++i, dr += drs) { d = dr; if (jas_stream_seek(cmpt->stream_, (cmpt->width_ * (y + i) + x) * cmpt->cps_, SEEK_SET) < 0) { return -1; } for (j = width; j > 0; --j, ++d) { v = 0; for (k = cmpt->cps_; k > 0; --k) { if ((c = jas_stream_getc(cmpt->stream_)) == EOF) { return -1; } v = (v << 8) | (c & 0xff); } *d = bitstoint(v, cmpt->prec_, cmpt->sgnd_); } } return 0; } int jas_image_writecmpt(jas_image_t *image, int cmptno, jas_image_coord_t x, jas_image_coord_t y, jas_image_coord_t width, jas_image_coord_t height, jas_matrix_t *data) { jas_image_cmpt_t *cmpt; jas_image_coord_t i; jas_image_coord_t j; jas_seqent_t *d; jas_seqent_t *dr; int drs; jas_seqent_t v; int k; int c; JAS_DBGLOG(10, ("jas_image_writecmpt(%p, %d, %ld, %ld, %ld, %ld, %p)\n", image, cmptno, JAS_CAST(long, x), JAS_CAST(long, y), JAS_CAST(long, width), JAS_CAST(long, height), data)); if (cmptno < 0 || cmptno >= image->numcmpts_) { return -1; } cmpt = image->cmpts_[cmptno]; if (x >= cmpt->width_ || y >= cmpt->height_ || x + width > cmpt->width_ || y + height > cmpt->height_) { return -1; } if (!jas_matrix_numrows(data) || !jas_matrix_numcols(data)) { return -1; } if (jas_matrix_numrows(data) != height || jas_matrix_numcols(data) != width) { return -1; } dr = jas_matrix_getref(data, 0, 0); drs = jas_matrix_rowstep(data); for (i = 0; i < height; ++i, dr += drs) { d = dr; if (jas_stream_seek(cmpt->stream_, (cmpt->width_ * (y + i) + x) * cmpt->cps_, SEEK_SET) < 0) { return -1; } for (j = width; j > 0; --j, ++d) { v = inttobits(*d, cmpt->prec_, cmpt->sgnd_); for (k = cmpt->cps_; k > 0; --k) { c = (v >> (8 * (cmpt->cps_ - 1))) & 0xff; if (jas_stream_putc(cmpt->stream_, (unsigned char) c) == EOF) { return -1; } v <<= 8; } } } return 0; } /******************************************************************************\ * File format operations. \******************************************************************************/ void jas_image_clearfmts() { int i; jas_image_fmtinfo_t *fmtinfo; for (i = 0; i < jas_image_numfmts; ++i) { fmtinfo = &jas_image_fmtinfos[i]; if (fmtinfo->name) { jas_free(fmtinfo->name); fmtinfo->name = 0; } if (fmtinfo->ext) { jas_free(fmtinfo->ext); fmtinfo->ext = 0; } if (fmtinfo->desc) { jas_free(fmtinfo->desc); fmtinfo->desc = 0; } } jas_image_numfmts = 0; } int jas_image_addfmt(int id, char *name, char *ext, char *desc, jas_image_fmtops_t *ops) { jas_image_fmtinfo_t *fmtinfo; assert(id >= 0 && name && ext && ops); if (jas_image_numfmts >= JAS_IMAGE_MAXFMTS) { return -1; } fmtinfo = &jas_image_fmtinfos[jas_image_numfmts]; fmtinfo->id = id; if (!(fmtinfo->name = jas_strdup(name))) { return -1; } if (!(fmtinfo->ext = jas_strdup(ext))) { jas_free(fmtinfo->name); return -1; } if (!(fmtinfo->desc = jas_strdup(desc))) { jas_free(fmtinfo->name); jas_free(fmtinfo->ext); return -1; } fmtinfo->ops = *ops; ++jas_image_numfmts; return 0; } int jas_image_strtofmt(char *name) { jas_image_fmtinfo_t *fmtinfo; if (!(fmtinfo = jas_image_lookupfmtbyname(name))) { return -1; } return fmtinfo->id; } char *jas_image_fmttostr(int fmt) { jas_image_fmtinfo_t *fmtinfo; if (!(fmtinfo = jas_image_lookupfmtbyid(fmt))) { return 0; } return fmtinfo->name; } int jas_image_getfmt(jas_stream_t *in) { jas_image_fmtinfo_t *fmtinfo; int found; int i; /* Check for data in each of the supported formats. */ found = 0; for (i = 0, fmtinfo = jas_image_fmtinfos; i < jas_image_numfmts; ++i, ++fmtinfo) { if (fmtinfo->ops.validate) { /* Is the input data valid for this format? */ JAS_DBGLOG(20, ("testing for format %s ... ", fmtinfo->name)); if (!(*fmtinfo->ops.validate)(in)) { JAS_DBGLOG(20, ("test succeeded\n")); found = 1; break; } JAS_DBGLOG(20, ("test failed\n")); } } return found ? fmtinfo->id : (-1); } int jas_image_fmtfromname(char *name) { int i; char *ext; jas_image_fmtinfo_t *fmtinfo; /* Get the file name extension. */ if (!(ext = strrchr(name, '.'))) { return -1; } ++ext; /* Try to find a format that uses this extension. */ for (i = 0, fmtinfo = jas_image_fmtinfos; i < jas_image_numfmts; ++i, ++fmtinfo) { /* Do we have a match? */ if (!strcmp(ext, fmtinfo->ext)) { return fmtinfo->id; } } return -1; } /******************************************************************************\ * Miscellaneous operations. \******************************************************************************/ bool jas_image_cmpt_domains_same(jas_image_t *image) { int cmptno; jas_image_cmpt_t *cmpt; jas_image_cmpt_t *cmpt0; cmpt0 = image->cmpts_[0]; for (cmptno = 1; cmptno < image->numcmpts_; ++cmptno) { cmpt = image->cmpts_[cmptno]; if (cmpt->tlx_ != cmpt0->tlx_ || cmpt->tly_ != cmpt0->tly_ || cmpt->hstep_ != cmpt0->hstep_ || cmpt->vstep_ != cmpt0->vstep_ || cmpt->width_ != cmpt0->width_ || cmpt->height_ != cmpt0->height_) { return 0; } } return 1; } uint_fast32_t jas_image_rawsize(jas_image_t *image) { uint_fast32_t rawsize; int cmptno; jas_image_cmpt_t *cmpt; rawsize = 0; for (cmptno = 0; cmptno < image->numcmpts_; ++cmptno) { cmpt = image->cmpts_[cmptno]; rawsize += (cmpt->width_ * cmpt->height_ * cmpt->prec_ + 7) / 8; } return rawsize; } void jas_image_delcmpt(jas_image_t *image, int cmptno) { if (cmptno >= image->numcmpts_) { return; } jas_image_cmpt_destroy(image->cmpts_[cmptno]); if (cmptno < image->numcmpts_) { memmove(&image->cmpts_[cmptno], &image->cmpts_[cmptno + 1], (image->numcmpts_ - 1 - cmptno) * sizeof(jas_image_cmpt_t *)); } --image->numcmpts_; jas_image_setbbox(image); } int jas_image_addcmpt(jas_image_t *image, int cmptno, jas_image_cmptparm_t *cmptparm) { jas_image_cmpt_t *newcmpt; if (cmptno < 0) { cmptno = image->numcmpts_; } assert(cmptno >= 0 && cmptno <= image->numcmpts_); if (image->numcmpts_ >= image->maxcmpts_) { if (jas_image_growcmpts(image, image->maxcmpts_ + 128)) { return -1; } } if (!(newcmpt = jas_image_cmpt_create(cmptparm->tlx, cmptparm->tly, cmptparm->hstep, cmptparm->vstep, cmptparm->width, cmptparm->height, cmptparm->prec, cmptparm->sgnd, 1))) { return -1; } if (cmptno < image->numcmpts_) { memmove(&image->cmpts_[cmptno + 1], &image->cmpts_[cmptno], (image->numcmpts_ - cmptno) * sizeof(jas_image_cmpt_t *)); } image->cmpts_[cmptno] = newcmpt; ++image->numcmpts_; jas_image_setbbox(image); return 0; } jas_image_fmtinfo_t *jas_image_lookupfmtbyid(int id) { int i; jas_image_fmtinfo_t *fmtinfo; for (i = 0, fmtinfo = jas_image_fmtinfos; i < jas_image_numfmts; ++i, ++fmtinfo) { if (fmtinfo->id == id) { return fmtinfo; } } return 0; } jas_image_fmtinfo_t *jas_image_lookupfmtbyname(const char *name) { int i; jas_image_fmtinfo_t *fmtinfo; for (i = 0, fmtinfo = jas_image_fmtinfos; i < jas_image_numfmts; ++i, ++fmtinfo) { if (!strcmp(fmtinfo->name, name)) { return fmtinfo; } } return 0; } static uint_fast32_t inttobits(jas_seqent_t v, int prec, bool sgnd) { uint_fast32_t ret; ret = ((sgnd && v < 0) ? ((1 << prec) + v) : v) & JAS_ONES(prec); return ret; } static jas_seqent_t bitstoint(uint_fast32_t v, int prec, bool sgnd) { jas_seqent_t ret; v &= JAS_ONES(prec); ret = (sgnd && (v & (1 << (prec - 1)))) ? (v - (1 << prec)) : v; return ret; } static void jas_image_setbbox(jas_image_t *image) { jas_image_cmpt_t *cmpt; int cmptno; int_fast32_t x; int_fast32_t y; if (image->numcmpts_ > 0) { /* Determine the bounding box for all of the components on the reference grid (i.e., the image area) */ cmpt = image->cmpts_[0]; image->tlx_ = cmpt->tlx_; image->tly_ = cmpt->tly_; image->brx_ = cmpt->tlx_ + cmpt->hstep_ * (cmpt->width_ - 1) + 1; image->bry_ = cmpt->tly_ + cmpt->vstep_ * (cmpt->height_ - 1) + 1; for (cmptno = 1; cmptno < image->numcmpts_; ++cmptno) { cmpt = image->cmpts_[cmptno]; if (image->tlx_ > cmpt->tlx_) { image->tlx_ = cmpt->tlx_; } if (image->tly_ > cmpt->tly_) { image->tly_ = cmpt->tly_; } x = cmpt->tlx_ + cmpt->hstep_ * (cmpt->width_ - 1) + 1; if (image->brx_ < x) { image->brx_ = x; } y = cmpt->tly_ + cmpt->vstep_ * (cmpt->height_ - 1) + 1; if (image->bry_ < y) { image->bry_ = y; } } } else { image->tlx_ = 0; image->tly_ = 0; image->brx_ = 0; image->bry_ = 0; } } static int jas_image_growcmpts(jas_image_t *image, int maxcmpts) { jas_image_cmpt_t **newcmpts; int cmptno; newcmpts = (!image->cmpts_) ? jas_alloc2(maxcmpts, sizeof(jas_image_cmpt_t *)) : jas_realloc2(image->cmpts_, maxcmpts, sizeof(jas_image_cmpt_t *)); if (!newcmpts) { return -1; } image->cmpts_ = newcmpts; image->maxcmpts_ = maxcmpts; for (cmptno = image->numcmpts_; cmptno < image->maxcmpts_; ++cmptno) { image->cmpts_[cmptno] = 0; } return 0; } int jas_image_copycmpt(jas_image_t *dstimage, int dstcmptno, jas_image_t *srcimage, int srccmptno) { jas_image_cmpt_t *newcmpt; if (dstimage->numcmpts_ >= dstimage->maxcmpts_) { if (jas_image_growcmpts(dstimage, dstimage->maxcmpts_ + 128)) { return -1; } } if (!(newcmpt = jas_image_cmpt_copy(srcimage->cmpts_[srccmptno]))) { return -1; } if (dstcmptno < dstimage->numcmpts_) { memmove(&dstimage->cmpts_[dstcmptno + 1], &dstimage->cmpts_[dstcmptno], (dstimage->numcmpts_ - dstcmptno) * sizeof(jas_image_cmpt_t *)); } dstimage->cmpts_[dstcmptno] = newcmpt; ++dstimage->numcmpts_; jas_image_setbbox(dstimage); return 0; } void jas_image_dump(jas_image_t *image, FILE *out) { long buf[1024]; int cmptno; int n; int i; int width; int height; jas_image_cmpt_t *cmpt; for (cmptno = 0; cmptno < image->numcmpts_; ++cmptno) { cmpt = image->cmpts_[cmptno]; fprintf(out, "prec=%d, sgnd=%d, cmpttype=%"PRIiFAST32"\n", cmpt->prec_, cmpt->sgnd_, cmpt->type_); width = jas_image_cmptwidth(image, cmptno); height = jas_image_cmptheight(image, cmptno); n = JAS_MIN(16, width); if (jas_image_readcmpt2(image, cmptno, 0, 0, n, 1, buf)) { abort(); } for (i = 0; i < n; ++i) { fprintf(out, " f(%d,%d)=%ld", i, 0, buf[i]); } fprintf(out, "\n"); if (jas_image_readcmpt2(image, cmptno, width - n, height - 1, n, 1, buf)) { abort(); } for (i = 0; i < n; ++i) { fprintf(out, " f(%d,%d)=%ld", width - n + i, height - 1, buf[i]); } fprintf(out, "\n"); } } int jas_image_depalettize(jas_image_t *image, int cmptno, int numlutents, int_fast32_t *lutents, int dtype, int newcmptno) { jas_image_cmptparm_t cmptparms; int_fast32_t v; int i; int j; jas_image_cmpt_t *cmpt; cmpt = image->cmpts_[cmptno]; cmptparms.tlx = cmpt->tlx_; cmptparms.tly = cmpt->tly_; cmptparms.hstep = cmpt->hstep_; cmptparms.vstep = cmpt->vstep_; cmptparms.width = cmpt->width_; cmptparms.height = cmpt->height_; cmptparms.prec = JAS_IMAGE_CDT_GETPREC(dtype); cmptparms.sgnd = JAS_IMAGE_CDT_GETSGND(dtype); if (jas_image_addcmpt(image, newcmptno, &cmptparms)) { return -1; } if (newcmptno <= cmptno) { ++cmptno; cmpt = image->cmpts_[cmptno]; } for (j = 0; j < cmpt->height_; ++j) { for (i = 0; i < cmpt->width_; ++i) { v = jas_image_readcmptsample(image, cmptno, i, j); if (v < 0) { v = 0; } else if (v >= numlutents) { v = numlutents - 1; } jas_image_writecmptsample(image, newcmptno, i, j, lutents[v]); } } return 0; } int jas_image_readcmptsample(jas_image_t *image, int cmptno, int x, int y) { jas_image_cmpt_t *cmpt; uint_fast32_t v; int k; int c; cmpt = image->cmpts_[cmptno]; if (jas_stream_seek(cmpt->stream_, (cmpt->width_ * y + x) * cmpt->cps_, SEEK_SET) < 0) { return -1; } v = 0; for (k = cmpt->cps_; k > 0; --k) { if ((c = jas_stream_getc(cmpt->stream_)) == EOF) { return -1; } v = (v << 8) | (c & 0xff); } return bitstoint(v, cmpt->prec_, cmpt->sgnd_); } void jas_image_writecmptsample(jas_image_t *image, int cmptno, int x, int y, int_fast32_t v) { jas_image_cmpt_t *cmpt; uint_fast32_t t; int k; int c; cmpt = image->cmpts_[cmptno]; if (jas_stream_seek(cmpt->stream_, (cmpt->width_ * y + x) * cmpt->cps_, SEEK_SET) < 0) { return; } t = inttobits(v, cmpt->prec_, cmpt->sgnd_); for (k = cmpt->cps_; k > 0; --k) { c = (t >> (8 * (cmpt->cps_ - 1))) & 0xff; if (jas_stream_putc(cmpt->stream_, (unsigned char) c) == EOF) { return; } t <<= 8; } } int jas_image_getcmptbytype(jas_image_t *image, int ctype) { int cmptno; for (cmptno = 0; cmptno < image->numcmpts_; ++cmptno) { if (image->cmpts_[cmptno]->type_ == ctype) { return cmptno; } } return -1; } /***********************************************/ /***********************************************/ /***********************************************/ /***********************************************/ int jas_image_readcmpt2(jas_image_t *image, int cmptno, jas_image_coord_t x, jas_image_coord_t y, jas_image_coord_t width, jas_image_coord_t height, long *buf) { jas_image_cmpt_t *cmpt; jas_image_coord_t i; jas_image_coord_t j; long v; long *bufptr; if (cmptno < 0 || cmptno >= image->numcmpts_) goto error; cmpt = image->cmpts_[cmptno]; if (x < 0 || x >= cmpt->width_ || y < 0 || y >= cmpt->height_ || width < 0 || height < 0 || x + width > cmpt->width_ || y + height > cmpt->height_) goto error; bufptr = buf; for (i = 0; i < height; ++i) { if (jas_stream_seek(cmpt->stream_, (cmpt->width_ * (y + i) + x) * cmpt->cps_, SEEK_SET) < 0) goto error; for (j = 0; j < width; ++j) { if (getint(cmpt->stream_, cmpt->sgnd_, cmpt->prec_, &v)) goto error; *bufptr++ = v; } } return 0; error: return -1; } int jas_image_writecmpt2(jas_image_t *image, int cmptno, jas_image_coord_t x, jas_image_coord_t y, jas_image_coord_t width, jas_image_coord_t height, long *buf) { jas_image_cmpt_t *cmpt; jas_image_coord_t i; jas_image_coord_t j; long v; long *bufptr; if (cmptno < 0 || cmptno >= image->numcmpts_) goto error; cmpt = image->cmpts_[cmptno]; if (x < 0 || x >= cmpt->width_ || y < 0 || y >= cmpt->height_ || width < 0 || height < 0 || x + width > cmpt->width_ || y + height > cmpt->height_) goto error; bufptr = buf; for (i = 0; i < height; ++i) { if (jas_stream_seek(cmpt->stream_, (cmpt->width_ * (y + i) + x) * cmpt->cps_, SEEK_SET) < 0) goto error; for (j = 0; j < width; ++j) { v = *bufptr++; if (putint(cmpt->stream_, cmpt->sgnd_, cmpt->prec_, v)) goto error; } } return 0; error: return -1; } int jas_image_sampcmpt(jas_image_t *image, int cmptno, int newcmptno, jas_image_coord_t ho, jas_image_coord_t vo, jas_image_coord_t hs, jas_image_coord_t vs, int sgnd, int prec) { jas_image_cmpt_t *oldcmpt; jas_image_cmpt_t *newcmpt; int width; int height; jas_image_coord_t tlx; jas_image_coord_t tly; jas_image_coord_t brx; jas_image_coord_t bry; int i; int j; jas_image_cmptparm_t cmptparm; jas_image_coord_t ax; jas_image_coord_t ay; jas_image_coord_t bx; jas_image_coord_t by; jas_image_coord_t d0; jas_image_coord_t d1; jas_image_coord_t d2; jas_image_coord_t d3; jas_image_coord_t oldx; jas_image_coord_t oldy; jas_image_coord_t x; jas_image_coord_t y; long v; jas_image_coord_t cmptbrx; jas_image_coord_t cmptbry; assert(cmptno >= 0 && cmptno < image->numcmpts_); oldcmpt = image->cmpts_[cmptno]; assert(oldcmpt->tlx_ == 0 && oldcmpt->tly_ == 0); jas_image_calcbbox2(image, &tlx, &tly, &brx, &bry); width = FLOORDIV(brx - ho + hs, hs); height = FLOORDIV(bry - vo + vs, vs); cmptparm.tlx = ho; cmptparm.tly = vo; cmptparm.hstep = hs; cmptparm.vstep = vs; cmptparm.width = width; cmptparm.height = height; cmptparm.prec = prec; cmptparm.sgnd = sgnd; if (jas_image_addcmpt(image, newcmptno, &cmptparm)) goto error; cmptbrx = oldcmpt->tlx_ + (oldcmpt->width_ - 1) * oldcmpt->hstep_; cmptbry = oldcmpt->tly_ + (oldcmpt->height_ - 1) * oldcmpt->vstep_; newcmpt = image->cmpts_[newcmptno]; jas_stream_rewind(newcmpt->stream_); for (i = 0; i < height; ++i) { y = newcmpt->tly_ + newcmpt->vstep_ * i; for (j = 0; j < width; ++j) { x = newcmpt->tlx_ + newcmpt->hstep_ * j; ax = downtomult(x - oldcmpt->tlx_, oldcmpt->hstep_) + oldcmpt->tlx_; ay = downtomult(y - oldcmpt->tly_, oldcmpt->vstep_) + oldcmpt->tly_; bx = uptomult(x - oldcmpt->tlx_, oldcmpt->hstep_) + oldcmpt->tlx_; if (bx > cmptbrx) bx = cmptbrx; by = uptomult(y - oldcmpt->tly_, oldcmpt->vstep_) + oldcmpt->tly_; if (by > cmptbry) by = cmptbry; d0 = (ax - x) * (ax - x) + (ay - y) * (ay - y); d1 = (bx - x) * (bx - x) + (ay - y) * (ay - y); d2 = (bx - x) * (bx - x) + (by - y) * (by - y); d3 = (ax - x) * (ax - x) + (by - y) * (by - y); if (d0 <= d1 && d0 <= d2 && d0 <= d3) { oldx = (ax - oldcmpt->tlx_) / oldcmpt->hstep_; oldy = (ay - oldcmpt->tly_) / oldcmpt->vstep_; } else if (d1 <= d0 && d1 <= d2 && d1 <= d3) { oldx = (bx - oldcmpt->tlx_) / oldcmpt->hstep_; oldy = (ay - oldcmpt->tly_) / oldcmpt->vstep_; } else if (d2 <= d0 && d2 <= d1 && d1 <= d3) { oldx = (bx - oldcmpt->tlx_) / oldcmpt->hstep_; oldy = (by - oldcmpt->tly_) / oldcmpt->vstep_; } else { oldx = (ax - oldcmpt->tlx_) / oldcmpt->hstep_; oldy = (by - oldcmpt->tly_) / oldcmpt->vstep_; } assert(oldx >= 0 && oldx < oldcmpt->width_ && oldy >= 0 && oldy < oldcmpt->height_); if (jas_stream_seek(oldcmpt->stream_, oldcmpt->cps_ * (oldy * oldcmpt->width_ + oldx), SEEK_SET) < 0) goto error; if (getint(oldcmpt->stream_, oldcmpt->sgnd_, oldcmpt->prec_, &v)) goto error; if (newcmpt->prec_ != oldcmpt->prec_ || newcmpt->sgnd_ != oldcmpt->sgnd_) { v = convert(v, oldcmpt->sgnd_, oldcmpt->prec_, newcmpt->sgnd_, newcmpt->prec_); } if (putint(newcmpt->stream_, newcmpt->sgnd_, newcmpt->prec_, v)) goto error; } } return 0; error: return -1; } int jas_image_ishomosamp(jas_image_t *image) { jas_image_coord_t hstep; jas_image_coord_t vstep; int result; int i; hstep = jas_image_cmpthstep(image, 0); vstep = jas_image_cmptvstep(image, 0); result = 1; for (i = 0; i < image->numcmpts_; ++i) { if (jas_image_cmpthstep(image, i) != hstep || jas_image_cmptvstep(image, i) != vstep) { result = 0; break; } } return result; } /* Note: This function defines a bounding box differently. */ static void jas_image_calcbbox2(jas_image_t *image, jas_image_coord_t *tlx, jas_image_coord_t *tly, jas_image_coord_t *brx, jas_image_coord_t *bry) { jas_image_cmpt_t *cmpt; jas_image_coord_t tmptlx; jas_image_coord_t tmptly; jas_image_coord_t tmpbrx; jas_image_coord_t tmpbry; jas_image_coord_t t; int i; if (image->numcmpts_ > 0) { cmpt = image->cmpts_[0]; tmptlx = cmpt->tlx_; tmptly = cmpt->tly_; tmpbrx = cmpt->tlx_ + cmpt->hstep_ * (cmpt->width_ - 1); tmpbry = cmpt->tly_ + cmpt->vstep_ * (cmpt->height_ - 1); for (i = 0; i < image->numcmpts_; ++i) { cmpt = image->cmpts_[i]; if (cmpt->tlx_ < tmptlx) tmptlx = cmpt->tlx_; if (cmpt->tly_ < tmptly) tmptly = cmpt->tly_; t = cmpt->tlx_ + cmpt->hstep_ * (cmpt->width_ - 1); if (t > tmpbrx) tmpbrx = t; t = cmpt->tly_ + cmpt->vstep_ * (cmpt->height_ - 1); if (t > tmpbry) tmpbry = t; } } else { tmptlx = 0; tmptly = 0; tmpbrx = -1; tmpbry = -1; } *tlx = tmptlx; *tly = tmptly; *brx = tmpbrx; *bry = tmpbry; } static inline long decode_twos_comp(jas_ulong c, int prec) { long result; assert(prec >= 2); jas_eprintf("warning: support for signed data is untested\n"); // NOTE: Is this correct? result = (c & ((1 << (prec - 1)) - 1)) - (c & (1 << (prec - 1))); return result; } static inline jas_ulong encode_twos_comp(long n, int prec) { jas_ulong result; assert(prec >= 2); jas_eprintf("warning: support for signed data is untested\n"); // NOTE: Is this correct? if (n < 0) { result = -n; result = (result ^ 0xffffffffUL) + 1; result &= (1 << prec) - 1; } else { result = n; } return result; } static int getint(jas_stream_t *in, int sgnd, int prec, long *val) { long v; int n; int c; assert((!sgnd && prec >= 1) || (sgnd && prec >= 2)); n = (prec + 7) / 8; v = 0; while (--n >= 0) { if ((c = jas_stream_getc(in)) == EOF) return -1; v = (v << 8) | c; } v &= ((1 << prec) - 1); if (sgnd) { *val = decode_twos_comp(v, prec); } else { *val = v; } return 0; } static int putint(jas_stream_t *out, int sgnd, int prec, long val) { int n; int c; bool s; jas_ulong tmp; assert((!sgnd && prec >= 1) || (sgnd && prec >= 2)); if (sgnd) { val = encode_twos_comp(val, prec); } assert(val >= 0); val &= (1 << prec) - 1; n = (prec + 7) / 8; while (--n >= 0) { c = (val >> (n * 8)) & 0xff; if (jas_stream_putc(out, c) != c) return -1; } return 0; } static long convert(long val, int oldsgnd, int oldprec, int newsgnd, int newprec) { if (newsgnd != oldsgnd) { } if (newprec != oldprec) { if (newprec > oldprec) { val <<= newprec - oldprec; } else if (oldprec > newprec) { val >>= oldprec - newprec; } } return val; } static long downtomult(long x, long y) { assert(x >= 0); return (x / y) * y; } static long uptomult(long x, long y) { assert(x >= 0); return ((x + y - 1) / y) * y; } jas_image_t *jas_image_chclrspc(jas_image_t *image, jas_cmprof_t *outprof, int intent) { jas_image_t *inimage; int minhstep; int minvstep; int i; int j; int k; int n; int hstep; int vstep; int numinauxchans; int numoutauxchans; int numinclrchans; int numoutclrchans; int prec; jas_image_t *outimage; int cmpttype; int numoutchans; jas_cmprof_t *inprof; jas_cmprof_t *tmpprof; jas_image_cmptparm_t cmptparm; int width; int height; jas_cmxform_t *xform; jas_cmpixmap_t inpixmap; jas_cmpixmap_t outpixmap; jas_cmcmptfmt_t *incmptfmts; jas_cmcmptfmt_t *outcmptfmts; #if 0 jas_eprintf("IMAGE\n"); jas_image_dump(image, stderr); #endif outimage = 0; xform = 0; if (!(inimage = jas_image_copy(image))) goto error; image = 0; if (!jas_image_ishomosamp(inimage)) { minhstep = jas_image_cmpthstep(inimage, 0); minvstep = jas_image_cmptvstep(inimage, 0); for (i = 1; i < jas_image_numcmpts(inimage); ++i) { hstep = jas_image_cmpthstep(inimage, i); vstep = jas_image_cmptvstep(inimage, i); if (hstep < minhstep) { minhstep = hstep; } if (vstep < minvstep) { minvstep = vstep; } } n = jas_image_numcmpts(inimage); for (i = 0; i < n; ++i) { cmpttype = jas_image_cmpttype(inimage, i); if (jas_image_sampcmpt(inimage, i, i + 1, 0, 0, minhstep, minvstep, jas_image_cmptsgnd(inimage, i), jas_image_cmptprec(inimage, i))) { goto error; } jas_image_setcmpttype(inimage, i + 1, cmpttype); jas_image_delcmpt(inimage, i); } } width = jas_image_cmptwidth(inimage, 0); height = jas_image_cmptheight(inimage, 0); hstep = jas_image_cmpthstep(inimage, 0); vstep = jas_image_cmptvstep(inimage, 0); if (!(inprof = jas_image_cmprof(inimage))) { abort(); } numinclrchans = jas_clrspc_numchans(jas_cmprof_clrspc(inprof)); numinauxchans = jas_image_numcmpts(inimage) - numinclrchans; numoutclrchans = jas_clrspc_numchans(jas_cmprof_clrspc(outprof)); numoutauxchans = 0; numoutchans = numoutclrchans + numoutauxchans; prec = 8; if (!(outimage = jas_image_create0())) { goto error; } /* Create a component for each of the colorants. */ for (i = 0; i < numoutclrchans; ++i) { cmptparm.tlx = 0; cmptparm.tly = 0; cmptparm.hstep = hstep; cmptparm.vstep = vstep; cmptparm.width = width; cmptparm.height = height; cmptparm.prec = prec; cmptparm.sgnd = 0; if (jas_image_addcmpt(outimage, -1, &cmptparm)) goto error; jas_image_setcmpttype(outimage, i, JAS_IMAGE_CT_COLOR(i)); } #if 0 /* Copy the auxiliary components without modification. */ for (i = 0; i < jas_image_numcmpts(inimage); ++i) { if (!ISCOLOR(jas_image_cmpttype(inimage, i))) { jas_image_copycmpt(outimage, -1, inimage, i); /* XXX - need to specify laydown of component on ref. grid */ } } #endif if (!(tmpprof = jas_cmprof_copy(outprof))) goto error; assert(!jas_image_cmprof(outimage)); jas_image_setcmprof(outimage, tmpprof); tmpprof = 0; jas_image_setclrspc(outimage, jas_cmprof_clrspc(outprof)); if (!(xform = jas_cmxform_create(inprof, outprof, 0, JAS_CMXFORM_OP_FWD, intent, 0))) { goto error; } inpixmap.numcmpts = numinclrchans; if (!(incmptfmts = jas_alloc2(numinclrchans, sizeof(jas_cmcmptfmt_t)))) { abort(); } inpixmap.cmptfmts = incmptfmts; for (i = 0; i < numinclrchans; ++i) { j = jas_image_getcmptbytype(inimage, JAS_IMAGE_CT_COLOR(i)); assert(j >= 0); if (!(incmptfmts[i].buf = jas_alloc2(width, sizeof(long)))) { goto error; } incmptfmts[i].prec = jas_image_cmptprec(inimage, j); incmptfmts[i].sgnd = jas_image_cmptsgnd(inimage, j); incmptfmts[i].width = width; incmptfmts[i].height = 1; } outpixmap.numcmpts = numoutclrchans; if (!(outcmptfmts = jas_alloc2(numoutclrchans, sizeof(jas_cmcmptfmt_t)))) { abort(); } outpixmap.cmptfmts = outcmptfmts; for (i = 0; i < numoutclrchans; ++i) { j = jas_image_getcmptbytype(outimage, JAS_IMAGE_CT_COLOR(i)); assert(j >= 0); if (!(outcmptfmts[i].buf = jas_alloc2(width, sizeof(long)))) goto error; outcmptfmts[i].prec = jas_image_cmptprec(outimage, j); outcmptfmts[i].sgnd = jas_image_cmptsgnd(outimage, j); outcmptfmts[i].width = width; outcmptfmts[i].height = 1; } for (i = 0; i < height; ++i) { for (j = 0; j < numinclrchans; ++j) { k = jas_image_getcmptbytype(inimage, JAS_IMAGE_CT_COLOR(j)); if (jas_image_readcmpt2(inimage, k, 0, i, width, 1, incmptfmts[j].buf)) goto error; } jas_cmxform_apply(xform, &inpixmap, &outpixmap); for (j = 0; j < numoutclrchans; ++j) { k = jas_image_getcmptbytype(outimage, JAS_IMAGE_CT_COLOR(j)); if (jas_image_writecmpt2(outimage, k, 0, i, width, 1, outcmptfmts[j].buf)) goto error; } } for (i = 0; i < numoutclrchans; ++i) { jas_free(outcmptfmts[i].buf); } jas_free(outcmptfmts); for (i = 0; i < numinclrchans; ++i) { jas_free(incmptfmts[i].buf); } jas_free(incmptfmts); jas_cmxform_destroy(xform); jas_image_destroy(inimage); #if 0 jas_eprintf("INIMAGE\n"); jas_image_dump(inimage, stderr); jas_eprintf("OUTIMAGE\n"); jas_image_dump(outimage, stderr); #endif return outimage; error: if (xform) jas_cmxform_destroy(xform); if (inimage) jas_image_destroy(inimage); if (outimage) jas_image_destroy(outimage); return 0; }
static inline long decode_twos_comp(ulong c, int prec) { long result; assert(prec >= 2); jas_eprintf("warning: support for signed data is untested\n"); // NOTE: Is this correct? result = (c & ((1 << (prec - 1)) - 1)) - (c & (1 << (prec - 1))); return result; }
static inline long decode_twos_comp(jas_ulong c, int prec) { long result; assert(prec >= 2); jas_eprintf("warning: support for signed data is untested\n"); // NOTE: Is this correct? result = (c & ((1 << (prec - 1)) - 1)) - (c & (1 << (prec - 1))); return result; }
{'added': [(136, '\tsize_t rawsize;'), (141, '\timage = 0;'), (142, ''), (143, '\tJAS_DBGLOG(100, ("jas_image_create(%d, %p, %d)\\n", numcmpts, cmptparms,'), (144, '\t clrspc));'), (145, ''), (147, '\t\tgoto error;'), (152, '//\timage->inmem_ = true;'), (157, '\t\tgoto error;'), (164, '#if 0'), (175, '#endif'), (180, '\t\tif (!jas_safe_size_mul3(cmptparm->width, cmptparm->height,'), (181, '\t\t (cmptparm->prec + 7), &rawsize)) {'), (182, '\t\t\tgoto error;'), (183, '\t\t}'), (184, '\t\trawsize /= 8;'), (185, '\t\tinmem = (rawsize < JAS_IMAGE_INMEMTHRESH);'), (190, '\t\t\tgoto error;'), (200, ''), (201, 'error:'), (202, '\tif (image) {'), (203, '\t\tjas_image_destroy(image);'), (204, '\t}'), (205, '\treturn 0;'), (224, '//\timage->inmem_ = true;'), (336, '\tJAS_DBGLOG(100, ('), (337, '\t "jas_image_cmpt_create(%ld, %ld, %ld, %ld, %ld, %ld, %d, %d, %d)\\n",'), (338, '\t JAS_CAST(long, tlx),'), (339, '\t JAS_CAST(long, tly),'), (340, '\t JAS_CAST(long, hstep),'), (341, '\t JAS_CAST(long, vstep),'), (342, '\t JAS_CAST(long, width),'), (343, '\t JAS_CAST(long, height),'), (344, '\t JAS_CAST(int, depth),'), (345, '\t sgnd,'), (346, '\t inmem'), (347, '\t ));'), (348, ''), (357, '\tif (!jas_safe_intfast32_mul3(width, height, depth, 0)) {'), (358, '\t\tgoto error;'), (359, '\t}'), (380, '\tif (!jas_safe_size_mul3(cmpt->width_, cmpt->height_, cmpt->cps_, &size)) {'), (1314, 'static inline long decode_twos_comp(jas_ulong c, int prec)'), (1324, 'static inline jas_ulong encode_twos_comp(long n, int prec)'), (1326, '\tjas_ulong result;'), (1367, '\tjas_ulong tmp;')], 'deleted': [(136, '\tuint_fast32_t rawsize;'), (142, '\t\treturn 0;'), (147, '\timage->inmem_ = true;'), (152, '\t\tjas_image_destroy(image);'), (153, '\t\treturn 0;'), (178, '\t\t\tjas_image_destroy(image);'), (179, '\t\t\treturn 0;'), (207, '\timage->inmem_ = true;'), (347, '\tif (!jas_safe_size_mul(cmpt->width_, cmpt->height_, &size) ||'), (348, '\t !jas_safe_size_mul(size, cmpt->cps_, &size)) {'), (1282, 'static inline long decode_twos_comp(ulong c, int prec)'), (1292, 'static inline ulong encode_twos_comp(long n, int prec)'), (1294, '\tulong result;'), (1335, '\tulong tmp;')]}
46
14
1,297
8,668
8
64
1
https://github.com/mdadams/jasper
CVE-2016-9395
CWE-20
394
snmp-ber.c
C
snmp_ber_encode_length
/* * Copyright (C) 2019 Yago Fontoura do Rosario <yago.rosario@hotmail.com.br> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. */ /*---------------------------------------------------------------------------*/ /** * \file * An implementation of the Simple Network Management Protocol (RFC 3411-3418) * \author * Yago Fontoura do Rosario <yago.rosario@hotmail.com.br */ #include "contiki.h" #include "snmp.h" #include "snmp-ber.h" #define LOG_MODULE "SNMP [ber]" #define LOG_LEVEL LOG_LEVEL_SNMP /*---------------------------------------------------------------------------*/ unsigned char * snmp_ber_encode_type(unsigned char *out, uint32_t *out_len, uint8_t type) { *out-- = type; (*out_len)++; return out; } /*---------------------------------------------------------------------------*/ unsigned char * snmp_ber_encode_length(unsigned char *out, uint32_t *out_len, uint8_t length) { *out-- = length; (*out_len)++; return out; } /*---------------------------------------------------------------------------*/ unsigned char * snmp_ber_encode_integer(unsigned char *out, uint32_t *out_len, uint32_t number) { uint32_t original_out_len; original_out_len = *out_len; do { (*out_len)++; *out-- = (uint8_t)(number & 0xFF); number >>= 8; } while(number); out = snmp_ber_encode_length(out, out_len, ((*out_len - original_out_len) & 0xFF)); out = snmp_ber_encode_type(out, out_len, BER_DATA_TYPE_INTEGER); return out; } /*---------------------------------------------------------------------------*/ unsigned char * snmp_ber_encode_unsigned_integer(unsigned char *out, uint32_t *out_len, uint8_t type, uint32_t number) { uint32_t original_out_len; original_out_len = *out_len; do { (*out_len)++; *out-- = (uint8_t)(number & 0xFF); number >>= 8; } while(number); out = snmp_ber_encode_length(out, out_len, ((*out_len - original_out_len) & 0xFF)); out = snmp_ber_encode_type(out, out_len, type); return out; } /*---------------------------------------------------------------------------*/ unsigned char * snmp_ber_encode_string_len(unsigned char *out, uint32_t *out_len, const char *str, uint32_t length) { uint32_t i; str += length - 1; for(i = 0; i < length; ++i) { (*out_len)++; *out-- = (uint8_t)*str--; } out = snmp_ber_encode_length(out, out_len, length); out = snmp_ber_encode_type(out, out_len, BER_DATA_TYPE_OCTET_STRING); return out; } /*---------------------------------------------------------------------------*/ unsigned char * snmp_ber_encode_null(unsigned char *out, uint32_t *out_len, uint8_t type) { (*out_len)++; *out-- = 0x00; out = snmp_ber_encode_type(out, out_len, type); return out; } /*---------------------------------------------------------------------------*/ unsigned char * snmp_ber_decode_type(unsigned char *buff, uint32_t *buff_len, uint8_t *type) { if(*buff_len == 0) { return NULL; } *type = *buff++; (*buff_len)--; return buff; } /*---------------------------------------------------------------------------*/ unsigned char * snmp_ber_decode_length(unsigned char *buff, uint32_t *buff_len, uint8_t *length) { if(*buff_len == 0) { return NULL; } *length = *buff++; (*buff_len)--; return buff; } /*---------------------------------------------------------------------------*/ unsigned char * snmp_ber_decode_integer(unsigned char *buf, uint32_t *buff_len, uint32_t *num) { uint8_t i, len, type; buf = snmp_ber_decode_type(buf, buff_len, &type); if(buf == NULL || type != BER_DATA_TYPE_INTEGER) { /* * Sanity check * Invalid type in buffer */ return NULL; } buf = snmp_ber_decode_length(buf, buff_len, &len); if(buf == NULL || len > 4) { /* * Sanity check * It will not fit in the uint32_t */ return NULL; } if(*buff_len < len) { return NULL; } *num = (uint32_t)(*buf++ & 0xFF); (*buff_len)--; for(i = 1; i < len; ++i) { *num <<= 8; *num |= (uint8_t)(*buf++ & 0xFF); (*buff_len)--; } return buf; } /*---------------------------------------------------------------------------*/ unsigned char * snmp_ber_decode_unsigned_integer(unsigned char *buf, uint32_t *buff_len, uint8_t expected_type, uint32_t *num) { uint8_t i, len, type; buf = snmp_ber_decode_type(buf, buff_len, &type); if(buf == NULL || type != expected_type) { /* * Sanity check * Invalid type in buffer */ return NULL; } buf = snmp_ber_decode_length(buf, buff_len, &len); if(buf == NULL || len > 4) { /* * Sanity check * It will not fit in the uint32_t */ return NULL; } if(*buff_len < len) { return NULL; } *num = (uint32_t)(*buf++ & 0xFF); (*buff_len)--; for(i = 1; i < len; ++i) { *num <<= 8; *num |= (uint8_t)(*buf++ & 0xFF); (*buff_len)--; } return buf; } /*---------------------------------------------------------------------------*/ unsigned char * snmp_ber_decode_string_len_buffer(unsigned char *buf, uint32_t *buff_len, const char **str, uint32_t *length) { uint8_t type, i, length_bytes; buf = snmp_ber_decode_type(buf, buff_len, &type); if(buf == NULL || type != BER_DATA_TYPE_OCTET_STRING) { /* * Sanity check * Invalid type in buffer */ return NULL; } if((*buf & 0x80) == 0) { *length = (uint32_t)*buf++; (*buff_len)--; } else { length_bytes = (uint8_t)(*buf++ & 0x7F); (*buff_len)--; if(length_bytes > 4) { /* * Sanity check * It will not fit in the uint32_t */ return NULL; } *length = (uint32_t)*buf++; (*buff_len)--; for(i = 1; i < length_bytes; ++i) { *length <<= 8; *length |= *buf++; (*buff_len)--; } } *str = (const char *)buf; *buff_len -= *length; return buf + *length; } /*---------------------------------------------------------------------------*/ unsigned char * snmp_ber_decode_null(unsigned char *buf, uint32_t *buff_len) { buf++; (*buff_len)--; buf++; (*buff_len)--; return buf; } /*---------------------------------------------------------------------------*/
/* * Copyright (C) 2019-2020 Yago Fontoura do Rosario <yago.rosario@hotmail.com.br> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. */ /*---------------------------------------------------------------------------*/ /** * \file * SNMP Implementation of the BER encoding * \author * Yago Fontoura do Rosario <yago.rosario@hotmail.com.br */ #include "contiki.h" #include "snmp.h" #include "snmp-ber.h" #define LOG_MODULE "SNMP [ber]" #define LOG_LEVEL LOG_LEVEL_SNMP /*---------------------------------------------------------------------------*/ static inline int snmp_ber_encode_unsigned_integer(snmp_packet_t *snmp_packet, uint8_t type, uint32_t number) { uint16_t original_out_len; original_out_len = snmp_packet->used; do { if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = (uint8_t)number & 0xFF; snmp_packet->used++; /* I'm not sure why but on MSPGCC the >> 8 operation goes haywire here */ #ifdef __MSPGCC__ number >>= 4; number >>= 4; #else /* __MSPGCC__ */ number >>= 8; #endif /* __MSPGCC__ */ } while(number); if(!snmp_ber_encode_length(snmp_packet, snmp_packet->used - original_out_len)) { return 0; } if(!snmp_ber_encode_type(snmp_packet, type)) { return 0; } return 1; } /*---------------------------------------------------------------------------*/ int snmp_ber_encode_type(snmp_packet_t *snmp_packet, uint8_t type) { if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = type; snmp_packet->used++; return 1; } /*---------------------------------------------------------------------------*/ int snmp_ber_encode_length(snmp_packet_t *snmp_packet, uint16_t length) { if(length > 0xFF) { if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = (uint8_t)length & 0xFF; snmp_packet->used++; if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = (uint8_t)(length >> 8) & 0xFF; snmp_packet->used++; if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = 0x82; snmp_packet->used++; } else if(length > 0x7F) { if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = (uint8_t)length & 0xFF; snmp_packet->used++; if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = 0x81; snmp_packet->used++; } else { if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = (uint8_t)length & 0x7F; snmp_packet->used++; } return 1; } /*---------------------------------------------------------------------------*/ int snmp_ber_encode_timeticks(snmp_packet_t *snmp_packet, uint32_t timeticks) { return snmp_ber_encode_unsigned_integer(snmp_packet, BER_DATA_TYPE_TIMETICKS, timeticks); } /*---------------------------------------------------------------------------*/ int snmp_ber_encode_integer(snmp_packet_t *snmp_packet, uint32_t number) { return snmp_ber_encode_unsigned_integer(snmp_packet, BER_DATA_TYPE_INTEGER, number); } /*---------------------------------------------------------------------------*/ int snmp_ber_encode_string_len(snmp_packet_t *snmp_packet, const char *str, uint32_t length) { uint32_t i; str += length - 1; for(i = 0; i < length; ++i) { if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = (uint8_t)*str--; snmp_packet->used++; } if(!snmp_ber_encode_length(snmp_packet, length)) { return 0; } if(!snmp_ber_encode_type(snmp_packet, BER_DATA_TYPE_OCTET_STRING)) { return 0; } return 1; } /*---------------------------------------------------------------------------*/ int snmp_ber_encode_oid(snmp_packet_t *snmp_packet, snmp_oid_t *oid) { uint32_t val; uint16_t original_out_len; uint8_t pos; original_out_len = snmp_packet->used; pos = oid->length - 1; while(pos) { val = oid->data[pos]; if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = (uint8_t)(val & 0x7F); snmp_packet->used++; val >>= 7; while(val) { if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = (uint8_t)((val & 0x7F) | 0x80); snmp_packet->used++; val >>= 7; } pos--; } if(snmp_packet->used == snmp_packet->max) { return 0; } val = *(snmp_packet->out + 1) + 40 * oid->data[pos]; snmp_packet->used--; snmp_packet->out++; if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = (uint8_t)(val & 0x7F); snmp_packet->used++; val >>= 7; while(val) { if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = (uint8_t)((val & 0x7F) | 0x80); snmp_packet->used++; val >>= 7; } if(!snmp_ber_encode_length(snmp_packet, snmp_packet->used - original_out_len)) { return 0; } if(!snmp_ber_encode_type(snmp_packet, BER_DATA_TYPE_OBJECT_IDENTIFIER)) { return 0; } return 1; } /*---------------------------------------------------------------------------*/ int snmp_ber_encode_null(snmp_packet_t *snmp_packet, uint8_t type) { if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = 0x00; snmp_packet->used++; return snmp_ber_encode_type(snmp_packet, type); } /*---------------------------------------------------------------------------*/ static inline int snmp_ber_decode_unsigned_integer(snmp_packet_t *snmp_packet, uint8_t expected_type, uint32_t *num) { uint8_t i, len, type; if(!snmp_ber_decode_type(snmp_packet, &type)) { return 0; } if(type != expected_type) { /* * Sanity check * Invalid type in buffer */ return 0; } if(!snmp_ber_decode_length(snmp_packet, &len)) { return 0; } if(len > 4) { /* * Sanity check * It will not fit in the uint32_t */ return 0; } if(snmp_packet->used == 0) { return 0; } *num = (uint32_t)(*snmp_packet->in++ & 0xFF); snmp_packet->used--; for(i = 1; i < len; ++i) { *num <<= 8; if(snmp_packet->used == 0) { return 0; } *num |= (uint8_t)(*snmp_packet->in++ & 0xFF); snmp_packet->used--; } return 1; } /*---------------------------------------------------------------------------*/ int snmp_ber_decode_type(snmp_packet_t *snmp_packet, uint8_t *type) { if(snmp_packet->used == 0) { return 0; } *type = *snmp_packet->in++; snmp_packet->used--; return 1; } /*---------------------------------------------------------------------------*/ int snmp_ber_decode_length(snmp_packet_t *snmp_packet, uint8_t *length) { if(snmp_packet->used == 0) { return 0; } *length = *snmp_packet->in++; snmp_packet->used--; return 1; } /*---------------------------------------------------------------------------*/ int snmp_ber_decode_timeticks(snmp_packet_t *snmp_packet, uint32_t *timeticks) { return snmp_ber_decode_unsigned_integer(snmp_packet, BER_DATA_TYPE_TIMETICKS, timeticks); } /*---------------------------------------------------------------------------*/ int snmp_ber_decode_integer(snmp_packet_t *snmp_packet, uint32_t *num) { return snmp_ber_decode_unsigned_integer(snmp_packet, BER_DATA_TYPE_INTEGER, num); } /*---------------------------------------------------------------------------*/ int snmp_ber_decode_string_len_buffer(snmp_packet_t *snmp_packet, const char **str, uint32_t *length) { uint8_t type, i, length_bytes; if(!snmp_ber_decode_type(snmp_packet, &type)) { return 0; } if(type != BER_DATA_TYPE_OCTET_STRING) { /* * Sanity check * Invalid type in buffer */ return 0; } if((*snmp_packet->in & 0x80) == 0) { if(snmp_packet->used == 0) { return 0; } *length = (uint32_t)*snmp_packet->in++; snmp_packet->used--; } else { if(snmp_packet->used == 0) { return 0; } length_bytes = (uint8_t)(*snmp_packet->in++ & 0x7F); snmp_packet->used--; if(length_bytes > 4) { /* * Sanity check * It will not fit in the uint32_t */ return 0; } if(snmp_packet->used == 0) { return 0; } *length = (uint32_t)*snmp_packet->in++; snmp_packet->used--; for(i = 1; i < length_bytes; ++i) { *length <<= 8; if(snmp_packet->used == 0) { return 0; } *length |= *snmp_packet->in++; snmp_packet->used--; } } *str = (const char *)snmp_packet->in; if(snmp_packet->used == 0 || snmp_packet->used - *length <= 0) { return 0; } snmp_packet->used -= *length; snmp_packet->in += *length; return 1; } /*---------------------------------------------------------------------------*/ int snmp_ber_decode_oid(snmp_packet_t *snmp_packet, snmp_oid_t *oid) { uint8_t *buf_end, type; uint8_t len, j; div_t first; if(!snmp_ber_decode_type(snmp_packet, &type)) { return 0; } if(type != BER_DATA_TYPE_OBJECT_IDENTIFIER) { return 0; } if(!snmp_ber_decode_length(snmp_packet, &len)) { return 0; } buf_end = snmp_packet->in + len; if(snmp_packet->used == 0) { return 0; } snmp_packet->used--; first = div(*snmp_packet->in++, 40); oid->length = 0; oid->data[oid->length++] = (uint32_t)first.quot; oid->data[oid->length++] = (uint32_t)first.rem; while(snmp_packet->in != buf_end) { if(oid->length >= SNMP_MSG_OID_MAX_LEN) { return 0; } if(snmp_packet->used == 0) { return 0; } oid->data[oid->length] = (uint32_t)(*snmp_packet->in & 0x7F); for(j = 0; j < 4; j++) { snmp_packet->used--; if((*snmp_packet->in++ & 0x80) == 0) { break; } if(snmp_packet->used == 0) { return 0; } oid->data[oid->length] <<= 7; oid->data[oid->length] |= (*snmp_packet->in & 0x7F); } oid->length++; } return 1; } /*---------------------------------------------------------------------------*/ int snmp_ber_decode_null(snmp_packet_t *snmp_packet) { if(snmp_packet->used == 0) { return 0; } snmp_packet->in++; snmp_packet->used--; if(snmp_packet->used == 0) { return 0; } snmp_packet->in++; snmp_packet->used--; return 1; } /*---------------------------------------------------------------------------*/
snmp_ber_encode_length(unsigned char *out, uint32_t *out_len, uint8_t length) { *out-- = length; (*out_len)++; return out; }
snmp_ber_encode_length(snmp_packet_t *snmp_packet, uint16_t length) { if(length > 0xFF) { if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = (uint8_t)length & 0xFF; snmp_packet->used++; if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = (uint8_t)(length >> 8) & 0xFF; snmp_packet->used++; if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = 0x82; snmp_packet->used++; } else if(length > 0x7F) { if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = (uint8_t)length & 0xFF; snmp_packet->used++; if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = 0x81; snmp_packet->used++; } else { if(snmp_packet->used == snmp_packet->max) { return 0; } *snmp_packet->out-- = (uint8_t)length & 0x7F; snmp_packet->used++; } return 1; }
{'added': [(2, ' * Copyright (C) 2019-2020 Yago Fontoura do Rosario <yago.rosario@hotmail.com.br>'), (35, ' * SNMP Implementation of the BER encoding'), (49, 'static inline int'), (50, 'snmp_ber_encode_unsigned_integer(snmp_packet_t *snmp_packet, uint8_t type, uint32_t number)'), (52, ' uint16_t original_out_len;'), (54, ' original_out_len = snmp_packet->used;'), (56, ' if(snmp_packet->used == snmp_packet->max) {'), (57, ' return 0;'), (58, ' }'), (59, ''), (60, ' *snmp_packet->out-- = (uint8_t)number & 0xFF;'), (61, ' snmp_packet->used++;'), (62, " /* I'm not sure why but on MSPGCC the >> 8 operation goes haywire here */"), (63, '#ifdef __MSPGCC__'), (64, ' number >>= 4;'), (65, ' number >>= 4;'), (66, '#else /* __MSPGCC__ */'), (68, '#endif /* __MSPGCC__ */'), (71, ' if(!snmp_ber_encode_length(snmp_packet, snmp_packet->used - original_out_len)) {'), (72, ' return 0;'), (73, ' }'), (74, ''), (75, ' if(!snmp_ber_encode_type(snmp_packet, type)) {'), (76, ' return 0;'), (77, ' }'), (79, ' return 1;'), (82, 'int'), (83, 'snmp_ber_encode_type(snmp_packet_t *snmp_packet, uint8_t type)'), (85, ' if(snmp_packet->used == snmp_packet->max) {'), (86, ' return 0;'), (87, ' }'), (89, ' *snmp_packet->out-- = type;'), (90, ' snmp_packet->used++;'), (91, ''), (92, ' return 1;'), (93, '}'), (94, '/*---------------------------------------------------------------------------*/'), (95, 'int'), (96, 'snmp_ber_encode_length(snmp_packet_t *snmp_packet, uint16_t length)'), (97, '{'), (98, ' if(length > 0xFF) {'), (99, ' if(snmp_packet->used == snmp_packet->max) {'), (100, ' return 0;'), (101, ' }'), (102, ''), (103, ' *snmp_packet->out-- = (uint8_t)length & 0xFF;'), (104, ' snmp_packet->used++;'), (105, ''), (106, ' if(snmp_packet->used == snmp_packet->max) {'), (107, ' return 0;'), (108, ' }'), (109, ''), (110, ' *snmp_packet->out-- = (uint8_t)(length >> 8) & 0xFF;'), (111, ' snmp_packet->used++;'), (112, ''), (113, ' if(snmp_packet->used == snmp_packet->max) {'), (114, ' return 0;'), (115, ' }'), (116, ''), (117, ' *snmp_packet->out-- = 0x82;'), (118, ' snmp_packet->used++;'), (119, ' } else if(length > 0x7F) {'), (120, ' if(snmp_packet->used == snmp_packet->max) {'), (121, ' return 0;'), (122, ' }'), (123, ''), (124, ' *snmp_packet->out-- = (uint8_t)length & 0xFF;'), (125, ' snmp_packet->used++;'), (127, ' if(snmp_packet->used == snmp_packet->max) {'), (128, ' return 0;'), (129, ' }'), (131, ' *snmp_packet->out-- = 0x81;'), (132, ' snmp_packet->used++;'), (133, ' } else {'), (134, ' if(snmp_packet->used == snmp_packet->max) {'), (135, ' return 0;'), (136, ' }'), (137, ''), (138, ' *snmp_packet->out-- = (uint8_t)length & 0x7F;'), (139, ' snmp_packet->used++;'), (140, ' }'), (141, ''), (142, ' return 1;'), (143, '}'), (144, '/*---------------------------------------------------------------------------*/'), (145, 'int'), (146, 'snmp_ber_encode_timeticks(snmp_packet_t *snmp_packet, uint32_t timeticks)'), (147, '{'), (148, ' return snmp_ber_encode_unsigned_integer(snmp_packet, BER_DATA_TYPE_TIMETICKS, timeticks);'), (149, '}'), (150, '/*---------------------------------------------------------------------------*/'), (151, 'int'), (152, 'snmp_ber_encode_integer(snmp_packet_t *snmp_packet, uint32_t number)'), (153, '{'), (154, ' return snmp_ber_encode_unsigned_integer(snmp_packet, BER_DATA_TYPE_INTEGER, number);'), (157, 'int'), (158, 'snmp_ber_encode_string_len(snmp_packet_t *snmp_packet, const char *str, uint32_t length)'), (164, ' if(snmp_packet->used == snmp_packet->max) {'), (165, ' return 0;'), (166, ' }'), (167, ''), (168, ' *snmp_packet->out-- = (uint8_t)*str--;'), (169, ' snmp_packet->used++;'), (172, ' if(!snmp_ber_encode_length(snmp_packet, length)) {'), (173, ' return 0;'), (174, ' }'), (176, ' if(!snmp_ber_encode_type(snmp_packet, BER_DATA_TYPE_OCTET_STRING)) {'), (177, ' return 0;'), (178, ' }'), (180, ' return 1;'), (183, 'int'), (184, 'snmp_ber_encode_oid(snmp_packet_t *snmp_packet, snmp_oid_t *oid)'), (186, ' uint32_t val;'), (187, ' uint16_t original_out_len;'), (188, ' uint8_t pos;'), (189, ''), (190, ' original_out_len = snmp_packet->used;'), (191, ''), (192, ' pos = oid->length - 1;'), (193, ' while(pos) {'), (194, ' val = oid->data[pos];'), (195, ''), (196, ' if(snmp_packet->used == snmp_packet->max) {'), (197, ' return 0;'), (198, ' }'), (199, ''), (200, ' *snmp_packet->out-- = (uint8_t)(val & 0x7F);'), (201, ' snmp_packet->used++;'), (202, ' val >>= 7;'), (203, ''), (204, ' while(val) {'), (205, ' if(snmp_packet->used == snmp_packet->max) {'), (206, ' return 0;'), (207, ' }'), (208, ''), (209, ' *snmp_packet->out-- = (uint8_t)((val & 0x7F) | 0x80);'), (210, ' snmp_packet->used++;'), (211, ''), (212, ' val >>= 7;'), (213, ' }'), (214, ' pos--;'), (215, ' }'), (216, ''), (217, ' if(snmp_packet->used == snmp_packet->max) {'), (218, ' return 0;'), (219, ' }'), (220, ''), (221, ' val = *(snmp_packet->out + 1) + 40 * oid->data[pos];'), (222, ' snmp_packet->used--;'), (223, ' snmp_packet->out++;'), (224, ''), (225, ' if(snmp_packet->used == snmp_packet->max) {'), (226, ' return 0;'), (227, ' }'), (228, ''), (229, ' *snmp_packet->out-- = (uint8_t)(val & 0x7F);'), (230, ' snmp_packet->used++;'), (231, ''), (232, ' val >>= 7;'), (233, ''), (234, ' while(val) {'), (235, ' if(snmp_packet->used == snmp_packet->max) {'), (236, ' return 0;'), (237, ' }'), (238, ''), (239, ' *snmp_packet->out-- = (uint8_t)((val & 0x7F) | 0x80);'), (240, ' snmp_packet->used++;'), (241, ''), (242, ' val >>= 7;'), (243, ' }'), (244, ''), (245, ' if(!snmp_ber_encode_length(snmp_packet, snmp_packet->used - original_out_len)) {'), (246, ' return 0;'), (249, ' if(!snmp_ber_encode_type(snmp_packet, BER_DATA_TYPE_OBJECT_IDENTIFIER)) {'), (250, ' return 0;'), (251, ' }'), (253, ' return 1;'), (256, 'int'), (257, 'snmp_ber_encode_null(snmp_packet_t *snmp_packet, uint8_t type)'), (259, ''), (260, ' if(snmp_packet->used == snmp_packet->max) {'), (261, ' return 0;'), (264, ' *snmp_packet->out-- = 0x00;'), (265, ' snmp_packet->used++;'), (267, ' return snmp_ber_encode_type(snmp_packet, type);'), (270, 'static inline int'), (271, 'snmp_ber_decode_unsigned_integer(snmp_packet_t *snmp_packet, uint8_t expected_type, uint32_t *num)'), (275, ' if(!snmp_ber_decode_type(snmp_packet, &type)) {'), (276, ' return 0;'), (277, ' }'), (279, ' if(type != expected_type) {'), (284, ' return 0;'), (287, ' if(!snmp_ber_decode_length(snmp_packet, &len)) {'), (288, ' return 0;'), (289, ' }'), (291, ' if(len > 4) {'), (296, ' return 0;'), (299, ' if(snmp_packet->used == 0) {'), (300, ' return 0;'), (303, ' *num = (uint32_t)(*snmp_packet->in++ & 0xFF);'), (304, ' snmp_packet->used--;'), (305, ''), (308, ' if(snmp_packet->used == 0) {'), (309, ' return 0;'), (310, ' }'), (311, ' *num |= (uint8_t)(*snmp_packet->in++ & 0xFF);'), (312, ' snmp_packet->used--;'), (315, ' return 1;'), (318, 'int'), (319, 'snmp_ber_decode_type(snmp_packet_t *snmp_packet, uint8_t *type)'), (321, ' if(snmp_packet->used == 0) {'), (322, ' return 0;'), (325, ' *type = *snmp_packet->in++;'), (326, ' snmp_packet->used--;'), (328, ' return 1;'), (329, '}'), (330, '/*---------------------------------------------------------------------------*/'), (331, 'int'), (332, 'snmp_ber_decode_length(snmp_packet_t *snmp_packet, uint8_t *length)'), (333, '{'), (334, ' if(snmp_packet->used == 0) {'), (335, ' return 0;'), (338, ' *length = *snmp_packet->in++;'), (339, ' snmp_packet->used--;'), (341, ' return 1;'), (342, '}'), (343, '/*---------------------------------------------------------------------------*/'), (344, 'int'), (345, 'snmp_ber_decode_timeticks(snmp_packet_t *snmp_packet, uint32_t *timeticks)'), (346, '{'), (347, ' return snmp_ber_decode_unsigned_integer(snmp_packet, BER_DATA_TYPE_TIMETICKS, timeticks);'), (350, 'int'), (351, 'snmp_ber_decode_integer(snmp_packet_t *snmp_packet, uint32_t *num)'), (352, '{'), (353, ' return snmp_ber_decode_unsigned_integer(snmp_packet, BER_DATA_TYPE_INTEGER, num);'), (354, '}'), (355, '/*---------------------------------------------------------------------------*/'), (356, 'int'), (357, 'snmp_ber_decode_string_len_buffer(snmp_packet_t *snmp_packet, const char **str, uint32_t *length)'), (361, ' if(!snmp_ber_decode_type(snmp_packet, &type)) {'), (362, ' return 0;'), (363, ' }'), (365, ' if(type != BER_DATA_TYPE_OCTET_STRING) {'), (370, ' return 0;'), (373, ' if((*snmp_packet->in & 0x80) == 0) {'), (374, ''), (375, ' if(snmp_packet->used == 0) {'), (376, ' return 0;'), (377, ' }'), (378, ''), (379, ' *length = (uint32_t)*snmp_packet->in++;'), (380, ' snmp_packet->used--;'), (383, ' if(snmp_packet->used == 0) {'), (384, ' return 0;'), (385, ' }'), (386, ''), (387, ' length_bytes = (uint8_t)(*snmp_packet->in++ & 0x7F);'), (388, ' snmp_packet->used--;'), (389, ''), (395, ' return 0;'), (396, ' }'), (397, ''), (398, ' if(snmp_packet->used == 0) {'), (399, ' return 0;'), (402, ' *length = (uint32_t)*snmp_packet->in++;'), (403, ' snmp_packet->used--;'), (404, ''), (407, ''), (408, ' if(snmp_packet->used == 0) {'), (409, ' return 0;'), (410, ' }'), (411, ''), (412, ' *length |= *snmp_packet->in++;'), (413, ' snmp_packet->used--;'), (417, ' *str = (const char *)snmp_packet->in;'), (418, ''), (419, ' if(snmp_packet->used == 0 || snmp_packet->used - *length <= 0) {'), (420, ' return 0;'), (421, ' }'), (422, ''), (423, ' snmp_packet->used -= *length;'), (424, ' snmp_packet->in += *length;'), (425, ''), (426, ' return 1;'), (427, '}'), (428, '/*---------------------------------------------------------------------------*/'), (429, 'int'), (430, 'snmp_ber_decode_oid(snmp_packet_t *snmp_packet, snmp_oid_t *oid)'), (431, '{'), (432, ' uint8_t *buf_end, type;'), (433, ' uint8_t len, j;'), (434, ' div_t first;'), (435, ''), (436, ' if(!snmp_ber_decode_type(snmp_packet, &type)) {'), (437, ' return 0;'), (438, ' }'), (439, ''), (440, ' if(type != BER_DATA_TYPE_OBJECT_IDENTIFIER) {'), (441, ' return 0;'), (442, ' }'), (443, ''), (444, ' if(!snmp_ber_decode_length(snmp_packet, &len)) {'), (445, ' return 0;'), (446, ' }'), (447, ''), (448, ' buf_end = snmp_packet->in + len;'), (449, ''), (450, ' if(snmp_packet->used == 0) {'), (451, ' return 0;'), (452, ' }'), (453, ''), (454, ' snmp_packet->used--;'), (455, ' first = div(*snmp_packet->in++, 40);'), (456, ''), (457, ' oid->length = 0;'), (458, ''), (459, ' oid->data[oid->length++] = (uint32_t)first.quot;'), (460, ' oid->data[oid->length++] = (uint32_t)first.rem;'), (461, ''), (462, ' while(snmp_packet->in != buf_end) {'), (463, ' if(oid->length >= SNMP_MSG_OID_MAX_LEN) {'), (464, ' return 0;'), (465, ' }'), (467, ' if(snmp_packet->used == 0) {'), (468, ' return 0;'), (469, ' }'), (470, ' oid->data[oid->length] = (uint32_t)(*snmp_packet->in & 0x7F);'), (471, ' for(j = 0; j < 4; j++) {'), (472, ' snmp_packet->used--;'), (473, ' if((*snmp_packet->in++ & 0x80) == 0) {'), (474, ' break;'), (475, ' }'), (476, ''), (477, ' if(snmp_packet->used == 0) {'), (478, ' return 0;'), (479, ' }'), (480, ''), (481, ' oid->data[oid->length] <<= 7;'), (482, ' oid->data[oid->length] |= (*snmp_packet->in & 0x7F);'), (483, ' }'), (484, ''), (485, ' oid->length++;'), (486, ' }'), (487, ''), (488, ' return 1;'), (491, 'int'), (492, 'snmp_ber_decode_null(snmp_packet_t *snmp_packet)'), (494, ' if(snmp_packet->used == 0) {'), (495, ' return 0;'), (496, ' }'), (497, ''), (498, ' snmp_packet->in++;'), (499, ' snmp_packet->used--;'), (500, ''), (501, ' if(snmp_packet->used == 0) {'), (502, ' return 0;'), (503, ' }'), (505, ' snmp_packet->in++;'), (506, ' snmp_packet->used--;'), (508, ' return 1;')], 'deleted': [(2, ' * Copyright (C) 2019 Yago Fontoura do Rosario <yago.rosario@hotmail.com.br>'), (35, ' * An implementation of the Simple Network Management Protocol (RFC 3411-3418)'), (49, 'unsigned char *'), (50, 'snmp_ber_encode_type(unsigned char *out, uint32_t *out_len, uint8_t type)'), (52, ' *out-- = type;'), (53, ' (*out_len)++;'), (54, ' return out;'), (55, '}'), (56, '/*---------------------------------------------------------------------------*/'), (57, 'unsigned char *'), (58, 'snmp_ber_encode_length(unsigned char *out, uint32_t *out_len, uint8_t length)'), (59, '{'), (60, ' *out-- = length;'), (61, ' (*out_len)++;'), (62, ' return out;'), (63, '}'), (64, '/*---------------------------------------------------------------------------*/'), (65, 'unsigned char *'), (66, 'snmp_ber_encode_integer(unsigned char *out, uint32_t *out_len, uint32_t number)'), (67, '{'), (68, ' uint32_t original_out_len;'), (70, ' original_out_len = *out_len;'), (72, ' (*out_len)++;'), (73, ' *out-- = (uint8_t)(number & 0xFF);'), (77, ' out = snmp_ber_encode_length(out, out_len, ((*out_len - original_out_len) & 0xFF));'), (78, ' out = snmp_ber_encode_type(out, out_len, BER_DATA_TYPE_INTEGER);'), (80, ' return out;'), (83, 'unsigned char *'), (84, 'snmp_ber_encode_unsigned_integer(unsigned char *out, uint32_t *out_len, uint8_t type, uint32_t number)'), (86, ' uint32_t original_out_len;'), (88, ' original_out_len = *out_len;'), (89, ' do {'), (90, ' (*out_len)++;'), (91, ' *out-- = (uint8_t)(number & 0xFF);'), (92, ' number >>= 8;'), (93, ' } while(number);'), (95, ' out = snmp_ber_encode_length(out, out_len, ((*out_len - original_out_len) & 0xFF));'), (96, ' out = snmp_ber_encode_type(out, out_len, type);'), (98, ' return out;'), (101, 'unsigned char *'), (102, 'snmp_ber_encode_string_len(unsigned char *out, uint32_t *out_len, const char *str, uint32_t length)'), (108, ' (*out_len)++;'), (109, ' *out-- = (uint8_t)*str--;'), (112, ' out = snmp_ber_encode_length(out, out_len, length);'), (113, ' out = snmp_ber_encode_type(out, out_len, BER_DATA_TYPE_OCTET_STRING);'), (115, ' return out;'), (116, '}'), (117, '/*---------------------------------------------------------------------------*/'), (118, 'unsigned char *'), (119, 'snmp_ber_encode_null(unsigned char *out, uint32_t *out_len, uint8_t type)'), (120, '{'), (121, ' (*out_len)++;'), (122, ' *out-- = 0x00;'), (123, ' out = snmp_ber_encode_type(out, out_len, type);'), (125, ' return out;'), (128, 'unsigned char *'), (129, 'snmp_ber_decode_type(unsigned char *buff, uint32_t *buff_len, uint8_t *type)'), (131, ' if(*buff_len == 0) {'), (132, ' return NULL;'), (135, ' *type = *buff++;'), (136, ' (*buff_len)--;'), (138, ' return buff;'), (141, 'unsigned char *'), (142, 'snmp_ber_decode_length(unsigned char *buff, uint32_t *buff_len, uint8_t *length)'), (144, ' if(*buff_len == 0) {'), (145, ' return NULL;'), (148, ' *length = *buff++;'), (149, ' (*buff_len)--;'), (151, ' return buff;'), (154, 'unsigned char *'), (155, 'snmp_ber_decode_integer(unsigned char *buf, uint32_t *buff_len, uint32_t *num)'), (159, ' buf = snmp_ber_decode_type(buf, buff_len, &type);'), (161, ' if(buf == NULL || type != BER_DATA_TYPE_INTEGER) {'), (166, ' return NULL;'), (169, ' buf = snmp_ber_decode_length(buf, buff_len, &len);'), (171, ' if(buf == NULL || len > 4) {'), (176, ' return NULL;'), (179, ' if(*buff_len < len) {'), (180, ' return NULL;'), (183, ' *num = (uint32_t)(*buf++ & 0xFF);'), (184, ' (*buff_len)--;'), (187, ' *num |= (uint8_t)(*buf++ & 0xFF);'), (188, ' (*buff_len)--;'), (191, ' return buf;'), (194, 'unsigned char *'), (195, 'snmp_ber_decode_unsigned_integer(unsigned char *buf, uint32_t *buff_len, uint8_t expected_type, uint32_t *num)'), (197, ' uint8_t i, len, type;'), (198, ''), (199, ' buf = snmp_ber_decode_type(buf, buff_len, &type);'), (200, ''), (201, ' if(buf == NULL || type != expected_type) {'), (202, ' /*'), (203, ' * Sanity check'), (204, ' * Invalid type in buffer'), (205, ' */'), (206, ' return NULL;'), (209, ' buf = snmp_ber_decode_length(buf, buff_len, &len);'), (210, ''), (211, ' if(buf == NULL || len > 4) {'), (212, ' /*'), (213, ' * Sanity check'), (214, ' * It will not fit in the uint32_t'), (215, ' */'), (216, ' return NULL;'), (217, ' }'), (219, ' if(*buff_len < len) {'), (220, ' return NULL;'), (223, ' *num = (uint32_t)(*buf++ & 0xFF);'), (224, ' (*buff_len)--;'), (225, ' for(i = 1; i < len; ++i) {'), (226, ' *num <<= 8;'), (227, ' *num |= (uint8_t)(*buf++ & 0xFF);'), (228, ' (*buff_len)--;'), (229, ' }'), (231, ' return buf;'), (234, 'unsigned char *'), (235, 'snmp_ber_decode_string_len_buffer(unsigned char *buf, uint32_t *buff_len, const char **str, uint32_t *length)'), (239, ' buf = snmp_ber_decode_type(buf, buff_len, &type);'), (241, ' if(buf == NULL || type != BER_DATA_TYPE_OCTET_STRING) {'), (246, ' return NULL;'), (249, ' if((*buf & 0x80) == 0) {'), (250, ' *length = (uint32_t)*buf++;'), (251, ' (*buff_len)--;'), (254, ' length_bytes = (uint8_t)(*buf++ & 0x7F);'), (255, ' (*buff_len)--;'), (261, ' return NULL;'), (264, ' *length = (uint32_t)*buf++;'), (265, ' (*buff_len)--;'), (268, ' *length |= *buf++;'), (269, ' (*buff_len)--;'), (273, ' *str = (const char *)buf;'), (274, ' *buff_len -= *length;'), (276, ' return buf + *length;'), (279, 'unsigned char *'), (280, 'snmp_ber_decode_null(unsigned char *buf, uint32_t *buff_len)'), (282, ' buf++;'), (283, ' (*buff_len)--;'), (285, ' buf++;'), (286, ' (*buff_len)--;'), (288, ' return buf;')]}
360
140
339
1,830
6
31
1
https://github.com/contiki-ng/contiki-ng
CVE-2020-12141
CWE-125
1,270
TLSInStream.cxx
C++
TLSInStream::readTLS
/* Copyright (C) 2002-2005 RealVNC Ltd. All Rights Reserved. * Copyright (C) 2005 Martin Koegler * Copyright (C) 2010 TigerVNC Team * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this software; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, * USA. */ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include <rdr/Exception.h> #include <rdr/TLSException.h> #include <rdr/TLSInStream.h> #include <errno.h> #ifdef HAVE_GNUTLS using namespace rdr; enum { DEFAULT_BUF_SIZE = 16384 }; ssize_t TLSInStream::pull(gnutls_transport_ptr_t str, void* data, size_t size) { TLSInStream* self= (TLSInStream*) str; InStream *in = self->in; try { if (!in->check(1, 1, false)) { gnutls_transport_set_errno(self->session, EAGAIN); return -1; } if (in->getend() - in->getptr() < (ptrdiff_t)size) size = in->getend() - in->getptr(); in->readBytes(data, size); } catch (Exception& e) { gnutls_transport_set_errno(self->session, EINVAL); return -1; } return size; } TLSInStream::TLSInStream(InStream* _in, gnutls_session_t _session) : session(_session), in(_in), bufSize(DEFAULT_BUF_SIZE), offset(0) { gnutls_transport_ptr_t recv, send; ptr = end = start = new U8[bufSize]; gnutls_transport_set_pull_function(session, pull); gnutls_transport_get_ptr2(session, &recv, &send); gnutls_transport_set_ptr2(session, this, send); } TLSInStream::~TLSInStream() { gnutls_transport_set_pull_function(session, NULL); delete[] start; } int TLSInStream::pos() { return offset + ptr - start; } int TLSInStream::overrun(int itemSize, int nItems, bool wait) { if (itemSize > bufSize) throw Exception("TLSInStream overrun: max itemSize exceeded"); if (end - ptr != 0) memmove(start, ptr, end - ptr); offset += ptr - start; end -= ptr - start; ptr = start; while (end < start + itemSize) { int n = readTLS((U8*) end, start + bufSize - end, wait); if (!wait && n == 0) return 0; end += n; } if (itemSize * nItems > end - ptr) nItems = (end - ptr) / itemSize; return nItems; } int TLSInStream::readTLS(U8* buf, int len, bool wait) { int n; n = in->check(1, 1, wait); if (n == 0) return 0; n = gnutls_record_recv(session, (void *) buf, len); if (n == GNUTLS_E_INTERRUPTED || n == GNUTLS_E_AGAIN) return 0; if (n < 0) throw TLSException("readTLS", n); return n; } #endif
/* Copyright (C) 2002-2005 RealVNC Ltd. All Rights Reserved. * Copyright (C) 2005 Martin Koegler * Copyright (C) 2010 TigerVNC Team * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this software; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, * USA. */ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include <rdr/Exception.h> #include <rdr/TLSException.h> #include <rdr/TLSInStream.h> #include <errno.h> #ifdef HAVE_GNUTLS using namespace rdr; enum { DEFAULT_BUF_SIZE = 16384 }; ssize_t TLSInStream::pull(gnutls_transport_ptr_t str, void* data, size_t size) { TLSInStream* self= (TLSInStream*) str; InStream *in = self->in; try { if (!in->check(1, 1, false)) { gnutls_transport_set_errno(self->session, EAGAIN); return -1; } if (in->getend() - in->getptr() < (ptrdiff_t)size) size = in->getend() - in->getptr(); in->readBytes(data, size); } catch (Exception& e) { gnutls_transport_set_errno(self->session, EINVAL); return -1; } return size; } TLSInStream::TLSInStream(InStream* _in, gnutls_session_t _session) : session(_session), in(_in), bufSize(DEFAULT_BUF_SIZE), offset(0) { gnutls_transport_ptr_t recv, send; ptr = end = start = new U8[bufSize]; gnutls_transport_set_pull_function(session, pull); gnutls_transport_get_ptr2(session, &recv, &send); gnutls_transport_set_ptr2(session, this, send); } TLSInStream::~TLSInStream() { gnutls_transport_set_pull_function(session, NULL); delete[] start; } size_t TLSInStream::pos() { return offset + ptr - start; } size_t TLSInStream::overrun(size_t itemSize, size_t nItems, bool wait) { if (itemSize > bufSize) throw Exception("TLSInStream overrun: max itemSize exceeded"); if (end - ptr != 0) memmove(start, ptr, end - ptr); offset += ptr - start; end -= ptr - start; ptr = start; while (end < start + itemSize) { size_t n = readTLS((U8*) end, start + bufSize - end, wait); if (!wait && n == 0) return 0; end += n; } if (itemSize * nItems > (size_t)(end - ptr)) nItems = (end - ptr) / itemSize; return nItems; } size_t TLSInStream::readTLS(U8* buf, size_t len, bool wait) { int n; n = in->check(1, 1, wait); if (n == 0) return 0; n = gnutls_record_recv(session, (void *) buf, len); if (n == GNUTLS_E_INTERRUPTED || n == GNUTLS_E_AGAIN) return 0; if (n < 0) throw TLSException("readTLS", n); return n; } #endif
int TLSInStream::readTLS(U8* buf, int len, bool wait) { int n; n = in->check(1, 1, wait); if (n == 0) return 0; n = gnutls_record_recv(session, (void *) buf, len); if (n == GNUTLS_E_INTERRUPTED || n == GNUTLS_E_AGAIN) return 0; if (n < 0) throw TLSException("readTLS", n); return n; }
size_t TLSInStream::readTLS(U8* buf, size_t len, bool wait) { int n; n = in->check(1, 1, wait); if (n == 0) return 0; n = gnutls_record_recv(session, (void *) buf, len); if (n == GNUTLS_E_INTERRUPTED || n == GNUTLS_E_AGAIN) return 0; if (n < 0) throw TLSException("readTLS", n); return n; }
{'added': [(78, 'size_t TLSInStream::pos()'), (83, 'size_t TLSInStream::overrun(size_t itemSize, size_t nItems, bool wait)'), (96, ' size_t n = readTLS((U8*) end, start + bufSize - end, wait);'), (102, ' if (itemSize * nItems > (size_t)(end - ptr))'), (108, 'size_t TLSInStream::readTLS(U8* buf, size_t len, bool wait)')], 'deleted': [(78, 'int TLSInStream::pos()'), (83, 'int TLSInStream::overrun(int itemSize, int nItems, bool wait)'), (96, ' int n = readTLS((U8*) end, start + bufSize - end, wait);'), (102, ' if (itemSize * nItems > end - ptr)'), (108, 'int TLSInStream::readTLS(U8* buf, int len, bool wait)')]}
5
5
74
489
12
86
5
https://github.com/CendioOssman/tigervnc
CVE-2019-15694
CWE-787
1,656
card-epass2003.c
C
decrypt_response
/* * Support for ePass2003 smart cards * * Copyright (C) 2008, Weitao Sun <weitao@ftsafe.com> * Copyright (C) 2011, Xiaoshuo Wu <xiaoshuo@ftsafe.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #if HAVE_CONFIG_H #include "config.h" #endif #ifdef ENABLE_SM /* empty file without SM enabled */ #ifdef ENABLE_OPENSSL /* empty file without openssl */ #include <ctype.h> #include <stdlib.h> #include <string.h> #include <openssl/evp.h> #include <openssl/sha.h> #include "internal.h" #include "asn1.h" #include <ctype.h> #include <stdlib.h> #include <string.h> #include <openssl/evp.h> #include <openssl/sha.h> #include "internal.h" #include "asn1.h" #include "cardctl.h" static struct sc_atr_table epass2003_atrs[] = { /* This is a FIPS certified card using SCP01 security messaging. */ {"3B:9F:95:81:31:FE:9F:00:66:46:53:05:10:00:11:71:df:00:00:00:6a:82:5e", "FF:FF:FF:FF:FF:00:FF:FF:FF:FF:FF:FF:00:00:00:ff:00:ff:ff:00:00:00:00", "FTCOS/ePass2003", SC_CARD_TYPE_ENTERSAFE_FTCOS_EPASS2003, 0, NULL }, {NULL, NULL, NULL, 0, 0, NULL} }; static struct sc_card_operations *iso_ops = NULL; static struct sc_card_operations epass2003_ops; static struct sc_card_driver epass2003_drv = { "epass2003", "epass2003", &epass2003_ops, NULL, 0, NULL }; #define KEY_TYPE_AES 0x01 /* FIPS mode */ #define KEY_TYPE_DES 0x02 /* Non-FIPS mode */ #define KEY_LEN_AES 16 #define KEY_LEN_DES 8 #define KEY_LEN_DES3 24 #define HASH_LEN 24 static unsigned char PIN_ID[2] = { ENTERSAFE_USER_PIN_ID, ENTERSAFE_SO_PIN_ID }; /*0x00:plain; 0x01:scp01 sm*/ #define SM_PLAIN 0x00 #define SM_SCP01 0x01 static unsigned char g_init_key_enc[16] = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10 }; static unsigned char g_init_key_mac[16] = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10 }; static unsigned char g_random[8] = { 0xBF, 0xC3, 0x29, 0x11, 0xC7, 0x18, 0xC3, 0x40 }; typedef struct epass2003_exdata_st { unsigned char sm; /* SM_PLAIN or SM_SCP01 */ unsigned char smtype; /* KEY_TYPE_AES or KEY_TYPE_DES */ unsigned char sk_enc[16]; /* encrypt session key */ unsigned char sk_mac[16]; /* mac session key */ unsigned char icv_mac[16]; /* instruction counter vector(for sm) */ unsigned char currAlg; /* current Alg */ unsigned int ecAlgFlags; /* Ec Alg mechanism type*/ } epass2003_exdata; #define REVERSE_ORDER4(x) ( \ ((unsigned long)x & 0xFF000000)>> 24 | \ ((unsigned long)x & 0x00FF0000)>> 8 | \ ((unsigned long)x & 0x0000FF00)<< 8 | \ ((unsigned long)x & 0x000000FF)<< 24) static const struct sc_card_error epass2003_errors[] = { { 0x6200, SC_ERROR_CARD_CMD_FAILED, "Warning: no information given, non-volatile memory is unchanged" }, { 0x6281, SC_ERROR_CORRUPTED_DATA, "Part of returned data may be corrupted" }, { 0x6282, SC_ERROR_FILE_END_REACHED, "End of file/record reached before reading Le bytes" }, { 0x6283, SC_ERROR_CARD_CMD_FAILED, "Selected file invalidated" }, { 0x6284, SC_ERROR_CARD_CMD_FAILED, "FCI not formatted according to ISO 7816-4" }, { 0x6300, SC_ERROR_PIN_CODE_INCORRECT, "Authentication failed"}, { 0x63C1, SC_ERROR_PIN_CODE_INCORRECT, "Authentication failed. One tries left"}, { 0x63C2, SC_ERROR_PIN_CODE_INCORRECT, "Authentication failed. Two tries left"}, { 0x63C3, SC_ERROR_PIN_CODE_INCORRECT, "Authentication failed"}, { 0x63C4, SC_ERROR_PIN_CODE_INCORRECT, "Authentication failed"}, { 0x63C5, SC_ERROR_PIN_CODE_INCORRECT, "Authentication failed"}, { 0x63C6, SC_ERROR_PIN_CODE_INCORRECT, "Authentication failed"}, { 0x63C7, SC_ERROR_PIN_CODE_INCORRECT, "Authentication failed"}, { 0x63C8, SC_ERROR_PIN_CODE_INCORRECT, "Authentication failed"}, { 0x63C9, SC_ERROR_PIN_CODE_INCORRECT, "Authentication failed"}, { 0x63CA, SC_ERROR_PIN_CODE_INCORRECT, "Authentication failed"}, { 0x6381, SC_ERROR_CARD_CMD_FAILED, "Warning: file filled up by last write" }, { 0x6581, SC_ERROR_MEMORY_FAILURE, "Memory failure" }, { 0x6700, SC_ERROR_WRONG_LENGTH, "Wrong length" }, { 0x6800, SC_ERROR_NO_CARD_SUPPORT, "Functions in CLA not supported" }, { 0x6881, SC_ERROR_NO_CARD_SUPPORT, "Logical channel not supported" }, { 0x6882, SC_ERROR_NO_CARD_SUPPORT, "Secure messaging not supported" }, { 0x6900, SC_ERROR_NOT_ALLOWED, "Command not allowed" }, { 0x6981, SC_ERROR_CARD_CMD_FAILED, "Command incompatible with file structure" }, { 0x6982, SC_ERROR_SECURITY_STATUS_NOT_SATISFIED, "Security status not satisfied" }, { 0x6983, SC_ERROR_AUTH_METHOD_BLOCKED, "Authentication method blocked" }, { 0x6984, SC_ERROR_REF_DATA_NOT_USABLE, "Referenced data not usable" }, { 0x6985, SC_ERROR_NOT_ALLOWED, "Conditions of use not satisfied" }, { 0x6986, SC_ERROR_NOT_ALLOWED, "Command not allowed (no current EF)" }, { 0x6987, SC_ERROR_INCORRECT_PARAMETERS,"Expected SM data objects missing" }, { 0x6988, SC_ERROR_INCORRECT_PARAMETERS,"SM data objects incorrect" }, { 0x6A00, SC_ERROR_INCORRECT_PARAMETERS,"Wrong parameter(s) P1-P2" }, { 0x6A80, SC_ERROR_INCORRECT_PARAMETERS,"Incorrect parameters in the data field" }, { 0x6A81, SC_ERROR_NO_CARD_SUPPORT, "Function not supported" }, { 0x6A82, SC_ERROR_FILE_NOT_FOUND, "File not found" }, { 0x6A83, SC_ERROR_RECORD_NOT_FOUND, "Record not found" }, { 0x6A84, SC_ERROR_NOT_ENOUGH_MEMORY, "Not enough memory space in the file" }, { 0x6A85, SC_ERROR_INCORRECT_PARAMETERS,"Lc inconsistent with TLV structure" }, { 0x6A86, SC_ERROR_INCORRECT_PARAMETERS,"Incorrect parameters P1-P2" }, { 0x6A87, SC_ERROR_INCORRECT_PARAMETERS,"Lc inconsistent with P1-P2" }, { 0x6A88, SC_ERROR_DATA_OBJECT_NOT_FOUND,"Referenced data not found" }, { 0x6A89, SC_ERROR_FILE_ALREADY_EXISTS, "File already exists"}, { 0x6A8A, SC_ERROR_FILE_ALREADY_EXISTS, "DF name already exists"}, { 0x6B00, SC_ERROR_INCORRECT_PARAMETERS,"Wrong parameter(s) P1-P2" }, { 0x6D00, SC_ERROR_INS_NOT_SUPPORTED, "Instruction code not supported or invalid" }, { 0x6E00, SC_ERROR_CLASS_NOT_SUPPORTED, "Class not supported" }, { 0x6F00, SC_ERROR_CARD_CMD_FAILED, "No precise diagnosis" }, { 0x9000,SC_SUCCESS, NULL } }; static int epass2003_transmit_apdu(struct sc_card *card, struct sc_apdu *apdu); static int epass2003_select_file(struct sc_card *card, const sc_path_t * in_path, sc_file_t ** file_out); int epass2003_refresh(struct sc_card *card); static int hash_data(const unsigned char *data, size_t datalen, unsigned char *hash, unsigned int mechanismType); static int epass2003_check_sw(struct sc_card *card, unsigned int sw1, unsigned int sw2) { const int err_count = sizeof(epass2003_errors)/sizeof(epass2003_errors[0]); int i; /* Handle special cases here */ if (sw1 == 0x6C) { sc_log(card->ctx, "Wrong length; correct length is %d", sw2); return SC_ERROR_WRONG_LENGTH; } for (i = 0; i < err_count; i++) { if (epass2003_errors[i].SWs == ((sw1 << 8) | sw2)) { sc_log(card->ctx, "%s", epass2003_errors[i].errorstr); return epass2003_errors[i].errorno; } } sc_log(card->ctx, "Unknown SWs; SW1=%02X, SW2=%02X", sw1, sw2); return SC_ERROR_CARD_CMD_FAILED; } static int sc_transmit_apdu_t(sc_card_t *card, sc_apdu_t *apdu) { int r = sc_transmit_apdu(card, apdu); if ( ((0x69 == apdu->sw1) && (0x85 == apdu->sw2)) || ((0x69 == apdu->sw1) && (0x88 == apdu->sw2))) { epass2003_refresh(card); r = sc_transmit_apdu(card, apdu); } return r; } static int openssl_enc(const EVP_CIPHER * cipher, const unsigned char *key, const unsigned char *iv, const unsigned char *input, size_t length, unsigned char *output) { int r = SC_ERROR_INTERNAL; EVP_CIPHER_CTX * ctx = NULL; int outl = 0; int outl_tmp = 0; unsigned char iv_tmp[EVP_MAX_IV_LENGTH] = { 0 }; memcpy(iv_tmp, iv, EVP_MAX_IV_LENGTH); ctx = EVP_CIPHER_CTX_new(); if (ctx == NULL) goto out; EVP_EncryptInit_ex(ctx, cipher, NULL, key, iv_tmp); EVP_CIPHER_CTX_set_padding(ctx, 0); if (!EVP_EncryptUpdate(ctx, output, &outl, input, length)) goto out; if (!EVP_EncryptFinal_ex(ctx, output + outl, &outl_tmp)) goto out; r = SC_SUCCESS; out: if (ctx) EVP_CIPHER_CTX_free(ctx); return r; } static int openssl_dec(const EVP_CIPHER * cipher, const unsigned char *key, const unsigned char *iv, const unsigned char *input, size_t length, unsigned char *output) { int r = SC_ERROR_INTERNAL; EVP_CIPHER_CTX * ctx = NULL; int outl = 0; int outl_tmp = 0; unsigned char iv_tmp[EVP_MAX_IV_LENGTH] = { 0 }; memcpy(iv_tmp, iv, EVP_MAX_IV_LENGTH); ctx = EVP_CIPHER_CTX_new(); if (ctx == NULL) goto out; EVP_DecryptInit_ex(ctx, cipher, NULL, key, iv_tmp); EVP_CIPHER_CTX_set_padding(ctx, 0); if (!EVP_DecryptUpdate(ctx, output, &outl, input, length)) goto out; if (!EVP_DecryptFinal_ex(ctx, output + outl, &outl_tmp)) goto out; r = SC_SUCCESS; out: if (ctx) EVP_CIPHER_CTX_free(ctx); return r; } static int aes128_encrypt_ecb(const unsigned char *key, int keysize, const unsigned char *input, size_t length, unsigned char *output) { unsigned char iv[EVP_MAX_IV_LENGTH] = { 0 }; return openssl_enc(EVP_aes_128_ecb(), key, iv, input, length, output); } static int aes128_encrypt_cbc(const unsigned char *key, int keysize, unsigned char iv[16], const unsigned char *input, size_t length, unsigned char *output) { return openssl_enc(EVP_aes_128_cbc(), key, iv, input, length, output); } static int aes128_decrypt_cbc(const unsigned char *key, int keysize, unsigned char iv[16], const unsigned char *input, size_t length, unsigned char *output) { return openssl_dec(EVP_aes_128_cbc(), key, iv, input, length, output); } static int des3_encrypt_ecb(const unsigned char *key, int keysize, const unsigned char *input, int length, unsigned char *output) { unsigned char iv[EVP_MAX_IV_LENGTH] = { 0 }; unsigned char bKey[24] = { 0 }; if (keysize == 16) { memcpy(&bKey[0], key, 16); memcpy(&bKey[16], key, 8); } else { memcpy(&bKey[0], key, 24); } return openssl_enc(EVP_des_ede3(), bKey, iv, input, length, output); } static int des3_encrypt_cbc(const unsigned char *key, int keysize, unsigned char iv[EVP_MAX_IV_LENGTH], const unsigned char *input, size_t length, unsigned char *output) { unsigned char bKey[24] = { 0 }; if (keysize == 16) { memcpy(&bKey[0], key, 16); memcpy(&bKey[16], key, 8); } else { memcpy(&bKey[0], key, 24); } return openssl_enc(EVP_des_ede3_cbc(), bKey, iv, input, length, output); } static int des3_decrypt_cbc(const unsigned char *key, int keysize, unsigned char iv[EVP_MAX_IV_LENGTH], const unsigned char *input, size_t length, unsigned char *output) { unsigned char bKey[24] = { 0 }; if (keysize == 16) { memcpy(&bKey[0], key, 16); memcpy(&bKey[16], key, 8); } else { memcpy(&bKey[0], key, 24); } return openssl_dec(EVP_des_ede3_cbc(), bKey, iv, input, length, output); } static int des_encrypt_cbc(const unsigned char *key, int keysize, unsigned char iv[EVP_MAX_IV_LENGTH], const unsigned char *input, size_t length, unsigned char *output) { return openssl_enc(EVP_des_cbc(), key, iv, input, length, output); } static int des_decrypt_cbc(const unsigned char *key, int keysize, unsigned char iv[EVP_MAX_IV_LENGTH], const unsigned char *input, size_t length, unsigned char *output) { return openssl_dec(EVP_des_cbc(), key, iv, input, length, output); } static int openssl_dig(const EVP_MD * digest, const unsigned char *input, size_t length, unsigned char *output) { int r = 0; EVP_MD_CTX *ctx = NULL; unsigned outl = 0; ctx = EVP_MD_CTX_create(); if (ctx == NULL) { r = SC_ERROR_OUT_OF_MEMORY; goto err; } EVP_MD_CTX_init(ctx); EVP_DigestInit_ex(ctx, digest, NULL); if (!EVP_DigestUpdate(ctx, input, length)) { r = SC_ERROR_INTERNAL; goto err; } if (!EVP_DigestFinal_ex(ctx, output, &outl)) { r = SC_ERROR_INTERNAL; goto err; } r = SC_SUCCESS; err: if (ctx) EVP_MD_CTX_destroy(ctx); return r; } static int sha1_digest(const unsigned char *input, size_t length, unsigned char *output) { return openssl_dig(EVP_sha1(), input, length, output); } static int sha256_digest(const unsigned char *input, size_t length, unsigned char *output) { return openssl_dig(EVP_sha256(), input, length, output); } static int gen_init_key(struct sc_card *card, unsigned char *key_enc, unsigned char *key_mac, unsigned char *result, unsigned char key_type) { int r; struct sc_apdu apdu; unsigned char data[256] = { 0 }; unsigned char tmp_sm; unsigned long blocksize = 0; unsigned char cryptogram[256] = { 0 }; /* host cryptogram */ unsigned char iv[16] = { 0 }; epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; LOG_FUNC_CALLED(card->ctx); sc_format_apdu(card, &apdu, SC_APDU_CASE_4_SHORT, 0x50, 0x00, 0x00); apdu.cla = 0x80; apdu.lc = apdu.datalen = sizeof(g_random); apdu.data = g_random; /* host random */ apdu.le = apdu.resplen = 28; apdu.resp = result; /* card random is result[12~19] */ tmp_sm = exdata->sm; exdata->sm = SM_PLAIN; r = epass2003_transmit_apdu(card, &apdu); exdata->sm = tmp_sm; LOG_TEST_RET(card->ctx, r, "APDU gen_init_key failed"); r = sc_check_sw(card, apdu.sw1, apdu.sw2); LOG_TEST_RET(card->ctx, r, "gen_init_key failed"); /* Step 1 - Generate Derivation data */ memcpy(data, &result[16], 4); memcpy(&data[4], g_random, 4); memcpy(&data[8], &result[12], 4); memcpy(&data[12], &g_random[4], 4); /* Step 2,3 - Create S-ENC/S-MAC Session Key */ if (KEY_TYPE_AES == key_type) { aes128_encrypt_ecb(key_enc, 16, data, 16, exdata->sk_enc); aes128_encrypt_ecb(key_mac, 16, data, 16, exdata->sk_mac); } else { des3_encrypt_ecb(key_enc, 16, data, 16, exdata->sk_enc); des3_encrypt_ecb(key_mac, 16, data, 16, exdata->sk_mac); } memcpy(data, g_random, 8); memcpy(&data[8], &result[12], 8); data[16] = 0x80; blocksize = (key_type == KEY_TYPE_AES ? 16 : 8); memset(&data[17], 0x00, blocksize - 1); /* calculate host cryptogram */ if (KEY_TYPE_AES == key_type) aes128_encrypt_cbc(exdata->sk_enc, 16, iv, data, 16 + blocksize, cryptogram); else des3_encrypt_cbc(exdata->sk_enc, 16, iv, data, 16 + blocksize, cryptogram); /* verify card cryptogram */ if (0 != memcmp(&cryptogram[16], &result[20], 8)) LOG_FUNC_RETURN(card->ctx, SC_ERROR_CARD_CMD_FAILED); LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } static int verify_init_key(struct sc_card *card, unsigned char *ran_key, unsigned char key_type) { int r; struct sc_apdu apdu; unsigned long blocksize = (key_type == KEY_TYPE_AES ? 16 : 8); unsigned char data[256] = { 0 }; unsigned char cryptogram[256] = { 0 }; /* host cryptogram */ unsigned char iv[16] = { 0 }; unsigned char mac[256] = { 0 }; unsigned long i; unsigned char tmp_sm; epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; LOG_FUNC_CALLED(card->ctx); memcpy(data, ran_key, 8); memcpy(&data[8], g_random, 8); data[16] = 0x80; memset(&data[17], 0x00, blocksize - 1); memset(iv, 0, 16); /* calculate host cryptogram */ if (KEY_TYPE_AES == key_type) { aes128_encrypt_cbc(exdata->sk_enc, 16, iv, data, 16 + blocksize, cryptogram); } else { des3_encrypt_cbc(exdata->sk_enc, 16, iv, data, 16 + blocksize, cryptogram); } memset(data, 0, sizeof(data)); memcpy(data, "\x84\x82\x03\x00\x10", 5); memcpy(&data[5], &cryptogram[16], 8); memcpy(&data[13], "\x80\x00\x00", 3); /* calculate mac icv */ memset(iv, 0x00, 16); if (KEY_TYPE_AES == key_type) { aes128_encrypt_cbc(exdata->sk_mac, 16, iv, data, 16, mac); i = 0; } else { des3_encrypt_cbc(exdata->sk_mac, 16, iv, data, 16, mac); i = 8; } /* save mac icv */ memset(exdata->icv_mac, 0x00, 16); memcpy(exdata->icv_mac, &mac[i], 8); /* verify host cryptogram */ memcpy(data, &cryptogram[16], 8); memcpy(&data[8], &mac[i], 8); sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0x82, 0x03, 0x00); apdu.cla = 0x84; apdu.lc = apdu.datalen = 16; apdu.data = data; tmp_sm = exdata->sm; exdata->sm = SM_PLAIN; r = epass2003_transmit_apdu(card, &apdu); exdata->sm = tmp_sm; LOG_TEST_RET(card->ctx, r, "APDU verify_init_key failed"); r = sc_check_sw(card, apdu.sw1, apdu.sw2); LOG_TEST_RET(card->ctx, r, "verify_init_key failed"); return r; } static int mutual_auth(struct sc_card *card, unsigned char *key_enc, unsigned char *key_mac) { struct sc_context *ctx = card->ctx; int r; unsigned char result[256] = { 0 }; unsigned char ran_key[8] = { 0 }; epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; LOG_FUNC_CALLED(ctx); r = gen_init_key(card, key_enc, key_mac, result, exdata->smtype); LOG_TEST_RET(ctx, r, "gen_init_key failed"); memcpy(ran_key, &result[12], 8); r = verify_init_key(card, ran_key, exdata->smtype); LOG_TEST_RET(ctx, r, "verify_init_key failed"); LOG_FUNC_RETURN(ctx, r); } int epass2003_refresh(struct sc_card *card) { int r = SC_SUCCESS; epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; if (exdata->sm) { card->sm_ctx.sm_mode = 0; r = mutual_auth(card, g_init_key_enc, g_init_key_mac); card->sm_ctx.sm_mode = SM_MODE_TRANSMIT; LOG_TEST_RET(card->ctx, r, "mutual_auth failed"); } return r; } /* Data(TLV)=0x87|L|0x01+Cipher */ static int construct_data_tlv(struct sc_card *card, struct sc_apdu *apdu, unsigned char *apdu_buf, unsigned char *data_tlv, size_t * data_tlv_len, const unsigned char key_type) { size_t block_size = (KEY_TYPE_AES == key_type ? 16 : 8); unsigned char pad[4096] = { 0 }; size_t pad_len; size_t tlv_more; /* increased tlv length */ unsigned char iv[16] = { 0 }; epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; /* padding */ apdu_buf[block_size] = 0x87; memcpy(pad, apdu->data, apdu->lc); pad[apdu->lc] = 0x80; if ((apdu->lc + 1) % block_size) pad_len = ((apdu->lc + 1) / block_size + 1) * block_size; else pad_len = apdu->lc + 1; /* encode Lc' */ if (pad_len > 0x7E) { /* Lc' > 0x7E, use extended APDU */ apdu_buf[block_size + 1] = 0x82; apdu_buf[block_size + 2] = (unsigned char)((pad_len + 1) / 0x100); apdu_buf[block_size + 3] = (unsigned char)((pad_len + 1) % 0x100); apdu_buf[block_size + 4] = 0x01; tlv_more = 5; } else { apdu_buf[block_size + 1] = (unsigned char)pad_len + 1; apdu_buf[block_size + 2] = 0x01; tlv_more = 3; } memcpy(data_tlv, &apdu_buf[block_size], tlv_more); /* encrypt Data */ if (KEY_TYPE_AES == key_type) aes128_encrypt_cbc(exdata->sk_enc, 16, iv, pad, pad_len, apdu_buf + block_size + tlv_more); else des3_encrypt_cbc(exdata->sk_enc, 16, iv, pad, pad_len, apdu_buf + block_size + tlv_more); memcpy(data_tlv + tlv_more, apdu_buf + block_size + tlv_more, pad_len); *data_tlv_len = tlv_more + pad_len; return 0; } /* Le(TLV)=0x97|L|Le */ static int construct_le_tlv(struct sc_apdu *apdu, unsigned char *apdu_buf, size_t data_tlv_len, unsigned char *le_tlv, size_t * le_tlv_len, const unsigned char key_type) { size_t block_size = (KEY_TYPE_AES == key_type ? 16 : 8); *(apdu_buf + block_size + data_tlv_len) = 0x97; if (apdu->le > 0x7F) { /* Le' > 0x7E, use extended APDU */ *(apdu_buf + block_size + data_tlv_len + 1) = 2; *(apdu_buf + block_size + data_tlv_len + 2) = (unsigned char)(apdu->le / 0x100); *(apdu_buf + block_size + data_tlv_len + 3) = (unsigned char)(apdu->le % 0x100); memcpy(le_tlv, apdu_buf + block_size + data_tlv_len, 4); *le_tlv_len = 4; } else { *(apdu_buf + block_size + data_tlv_len + 1) = 1; *(apdu_buf + block_size + data_tlv_len + 2) = (unsigned char)apdu->le; memcpy(le_tlv, apdu_buf + block_size + data_tlv_len, 3); *le_tlv_len = 3; } return 0; } /* MAC(TLV)=0x8e|0x08|MAC */ static int construct_mac_tlv(struct sc_card *card, unsigned char *apdu_buf, size_t data_tlv_len, size_t le_tlv_len, unsigned char *mac_tlv, size_t * mac_tlv_len, const unsigned char key_type) { size_t block_size = (KEY_TYPE_AES == key_type ? 16 : 8); unsigned char mac[4096] = { 0 }; size_t mac_len; unsigned char icv[16] = { 0 }; int i = (KEY_TYPE_AES == key_type ? 15 : 7); epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; if (0 == data_tlv_len && 0 == le_tlv_len) { mac_len = block_size; } else { /* padding */ *(apdu_buf + block_size + data_tlv_len + le_tlv_len) = 0x80; if ((data_tlv_len + le_tlv_len + 1) % block_size) mac_len = (((data_tlv_len + le_tlv_len + 1) / block_size) + 1) * block_size + block_size; else mac_len = data_tlv_len + le_tlv_len + 1 + block_size; memset((apdu_buf + block_size + data_tlv_len + le_tlv_len + 1), 0, (mac_len - (data_tlv_len + le_tlv_len + 1))); } /* increase icv */ for (; i >= 0; i--) { if (exdata->icv_mac[i] == 0xff) { exdata->icv_mac[i] = 0; } else { exdata->icv_mac[i]++; break; } } /* calculate MAC */ memset(icv, 0, sizeof(icv)); memcpy(icv, exdata->icv_mac, 16); if (KEY_TYPE_AES == key_type) { aes128_encrypt_cbc(exdata->sk_mac, 16, icv, apdu_buf, mac_len, mac); memcpy(mac_tlv + 2, &mac[mac_len - 16], 8); } else { unsigned char iv[EVP_MAX_IV_LENGTH] = { 0 }; unsigned char tmp[8] = { 0 }; des_encrypt_cbc(exdata->sk_mac, 8, icv, apdu_buf, mac_len, mac); des_decrypt_cbc(&exdata->sk_mac[8], 8, iv, &mac[mac_len - 8], 8, tmp); memset(iv, 0x00, sizeof iv); des_encrypt_cbc(exdata->sk_mac, 8, iv, tmp, 8, mac_tlv + 2); } *mac_tlv_len = 2 + 8; return 0; } /* According to GlobalPlatform Card Specification's SCP01 * encode APDU from * CLA INS P1 P2 [Lc] Data [Le] * to * CLA INS P1 P2 Lc' Data' [Le] * where * Data'=Data(TLV)+Le(TLV)+MAC(TLV) */ static int encode_apdu(struct sc_card *card, struct sc_apdu *plain, struct sc_apdu *sm, unsigned char *apdu_buf, size_t * apdu_buf_len) { size_t block_size = 0; unsigned char dataTLV[4096] = { 0 }; size_t data_tlv_len = 0; unsigned char le_tlv[256] = { 0 }; size_t le_tlv_len = 0; size_t mac_tlv_len = 10; size_t tmp_lc = 0; size_t tmp_le = 0; unsigned char mac_tlv[256] = { 0 }; epass2003_exdata *exdata = NULL; mac_tlv[0] = 0x8E; mac_tlv[1] = 8; /* size_t plain_le = 0; */ if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata*)card->drv_data; block_size = (KEY_TYPE_DES == exdata->smtype ? 16 : 8); sm->cse = SC_APDU_CASE_4_SHORT; apdu_buf[0] = (unsigned char)plain->cla; apdu_buf[1] = (unsigned char)plain->ins; apdu_buf[2] = (unsigned char)plain->p1; apdu_buf[3] = (unsigned char)plain->p2; /* plain_le = plain->le; */ /* padding */ apdu_buf[4] = 0x80; memset(&apdu_buf[5], 0x00, block_size - 5); /* Data -> Data' */ if (plain->lc != 0) if (0 != construct_data_tlv(card, plain, apdu_buf, dataTLV, &data_tlv_len, exdata->smtype)) return -1; if (plain->le != 0 || (plain->le == 0 && plain->resplen != 0)) if (0 != construct_le_tlv(plain, apdu_buf, data_tlv_len, le_tlv, &le_tlv_len, exdata->smtype)) return -1; if (0 != construct_mac_tlv(card, apdu_buf, data_tlv_len, le_tlv_len, mac_tlv, &mac_tlv_len, exdata->smtype)) return -1; memset(apdu_buf + 4, 0, *apdu_buf_len - 4); sm->lc = sm->datalen = data_tlv_len + le_tlv_len + mac_tlv_len; if (sm->lc > 0xFF) { sm->cse = SC_APDU_CASE_4_EXT; apdu_buf[4] = (unsigned char)((sm->lc) / 0x10000); apdu_buf[5] = (unsigned char)(((sm->lc) / 0x100) % 0x100); apdu_buf[6] = (unsigned char)((sm->lc) % 0x100); tmp_lc = 3; } else { apdu_buf[4] = (unsigned char)sm->lc; tmp_lc = 1; } memcpy(apdu_buf + 4 + tmp_lc, dataTLV, data_tlv_len); memcpy(apdu_buf + 4 + tmp_lc + data_tlv_len, le_tlv, le_tlv_len); memcpy(apdu_buf + 4 + tmp_lc + data_tlv_len + le_tlv_len, mac_tlv, mac_tlv_len); memcpy((unsigned char *)sm->data, apdu_buf + 4 + tmp_lc, sm->datalen); *apdu_buf_len = 0; if (4 == le_tlv_len) { sm->cse = SC_APDU_CASE_4_EXT; *(apdu_buf + 4 + tmp_lc + sm->lc) = (unsigned char)(plain->le / 0x100); *(apdu_buf + 4 + tmp_lc + sm->lc + 1) = (unsigned char)(plain->le % 0x100); tmp_le = 2; } else if (3 == le_tlv_len) { *(apdu_buf + 4 + tmp_lc + sm->lc) = (unsigned char)plain->le; tmp_le = 1; } *apdu_buf_len += 4 + tmp_lc + data_tlv_len + le_tlv_len + mac_tlv_len + tmp_le; /* sm->le = calc_le(plain_le); */ return 0; } static int epass2003_sm_wrap_apdu(struct sc_card *card, struct sc_apdu *plain, struct sc_apdu *sm) { unsigned char buf[4096] = { 0 }; /* APDU buffer */ size_t buf_len = sizeof(buf); epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; LOG_FUNC_CALLED(card->ctx); if (exdata->sm) plain->cla |= 0x0C; sm->cse = plain->cse; sm->cla = plain->cla; sm->ins = plain->ins; sm->p1 = plain->p1; sm->p2 = plain->p2; sm->lc = plain->lc; sm->le = plain->le; sm->control = plain->control; sm->flags = plain->flags; switch (sm->cla & 0x0C) { case 0x00: case 0x04: sm->datalen = plain->datalen; memcpy((void *)sm->data, plain->data, plain->datalen); sm->resplen = plain->resplen; memcpy(sm->resp, plain->resp, plain->resplen); break; case 0x0C: memset(buf, 0, sizeof(buf)); if (0 != encode_apdu(card, plain, sm, buf, &buf_len)) return SC_ERROR_CARD_CMD_FAILED; break; default: return SC_ERROR_INCORRECT_PARAMETERS; } return SC_SUCCESS; } /* According to GlobalPlatform Card Specification's SCP01 * decrypt APDU response from * ResponseData' SW1 SW2 * to * ResponseData SW1 SW2 * where * ResponseData'=Data(TLV)+SW12(TLV)+MAC(TLV) * where * Data(TLV)=0x87|L|Cipher * SW12(TLV)=0x99|0x02|SW1+SW2 * MAC(TLV)=0x8e|0x08|MAC */ static int decrypt_response(struct sc_card *card, unsigned char *in, size_t inlen, unsigned char *out, size_t * out_len) { size_t cipher_len; size_t i; unsigned char iv[16] = { 0 }; unsigned char plaintext[4096] = { 0 }; epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; /* no cipher */ if (in[0] == 0x99) return 0; /* parse cipher length */ if (0x01 == in[2] && 0x82 != in[1]) { cipher_len = in[1]; i = 3; } else if (0x01 == in[3] && 0x81 == in[1]) { cipher_len = in[2]; i = 4; } else if (0x01 == in[4] && 0x82 == in[1]) { cipher_len = in[2] * 0x100; cipher_len += in[3]; i = 5; } else { return -1; } if (cipher_len < 2 || i+cipher_len > inlen || cipher_len > sizeof plaintext) return -1; /* decrypt */ if (KEY_TYPE_AES == exdata->smtype) aes128_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext); else des3_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext); /* unpadding */ while (0x80 != plaintext[cipher_len - 2] && (cipher_len - 2 > 0)) cipher_len--; if (2 == cipher_len) return -1; memcpy(out, plaintext, cipher_len - 2); *out_len = cipher_len - 2; return 0; } static int epass2003_sm_unwrap_apdu(struct sc_card *card, struct sc_apdu *sm, struct sc_apdu *plain) { int r; size_t len = 0; epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; LOG_FUNC_CALLED(card->ctx); r = sc_check_sw(card, sm->sw1, sm->sw2); if (r == SC_SUCCESS) { if (exdata->sm) { if (0 != decrypt_response(card, sm->resp, sm->resplen, plain->resp, &len)) return SC_ERROR_CARD_CMD_FAILED; } else { memcpy(plain->resp, sm->resp, sm->resplen); len = sm->resplen; } } plain->resplen = len; plain->sw1 = sm->sw1; plain->sw2 = sm->sw2; sc_log(card->ctx, "unwrapped APDU: resplen %"SC_FORMAT_LEN_SIZE_T"u, SW %02X%02X", plain->resplen, plain->sw1, plain->sw2); LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } static int epass2003_sm_free_wrapped_apdu(struct sc_card *card, struct sc_apdu *plain, struct sc_apdu **sm_apdu) { struct sc_context *ctx = card->ctx; int rv = SC_SUCCESS; LOG_FUNC_CALLED(ctx); if (!sm_apdu) LOG_FUNC_RETURN(ctx, SC_ERROR_INVALID_ARGUMENTS); if (!(*sm_apdu)) LOG_FUNC_RETURN(ctx, SC_SUCCESS); if (plain) rv = epass2003_sm_unwrap_apdu(card, *sm_apdu, plain); if ((*sm_apdu)->data) { unsigned char * p = (unsigned char *)((*sm_apdu)->data); free(p); } if ((*sm_apdu)->resp) { free((*sm_apdu)->resp); } free(*sm_apdu); *sm_apdu = NULL; LOG_FUNC_RETURN(ctx, rv); } static int epass2003_sm_get_wrapped_apdu(struct sc_card *card, struct sc_apdu *plain, struct sc_apdu **sm_apdu) { struct sc_context *ctx = card->ctx; struct sc_apdu *apdu = NULL; int rv; LOG_FUNC_CALLED(ctx); if (!plain || !sm_apdu) LOG_FUNC_RETURN(ctx, SC_ERROR_INVALID_ARGUMENTS); *sm_apdu = NULL; //construct new SM apdu from original apdu apdu = calloc(1, sizeof(struct sc_apdu)); if (!apdu) { rv = SC_ERROR_OUT_OF_MEMORY; goto err; } apdu->data = calloc (1, SC_MAX_EXT_APDU_BUFFER_SIZE); if (!apdu->data) { rv = SC_ERROR_OUT_OF_MEMORY; goto err; } apdu->resp = calloc (1, SC_MAX_EXT_APDU_BUFFER_SIZE); if (!apdu->resp) { rv = SC_ERROR_OUT_OF_MEMORY; goto err; } apdu->datalen = SC_MAX_EXT_APDU_BUFFER_SIZE; apdu->resplen = SC_MAX_EXT_APDU_BUFFER_SIZE; rv = epass2003_sm_wrap_apdu(card, plain, apdu); if (rv) { rv = epass2003_sm_free_wrapped_apdu(card, NULL, &apdu); if (rv < 0) goto err; } *sm_apdu = apdu; apdu = NULL; err: if (apdu) { free((unsigned char *) apdu->data); free(apdu->resp); free(apdu); apdu = NULL; } LOG_FUNC_RETURN(ctx, rv); } static int epass2003_transmit_apdu(struct sc_card *card, struct sc_apdu *apdu) { int r; LOG_FUNC_CALLED(card->ctx); r = sc_transmit_apdu_t(card, apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); return r; } static int get_data(struct sc_card *card, unsigned char type, unsigned char *data, size_t datalen) { int r; struct sc_apdu apdu; unsigned char resp[SC_MAX_APDU_BUFFER_SIZE] = { 0 }; size_t resplen = SC_MAX_APDU_BUFFER_SIZE; epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; LOG_FUNC_CALLED(card->ctx); sc_format_apdu(card, &apdu, SC_APDU_CASE_2_SHORT, 0xca, 0x01, type); apdu.resp = resp; apdu.le = 0; apdu.resplen = resplen; if (0x86 == type) { /* No SM temporarily */ unsigned char tmp_sm = exdata->sm; exdata->sm = SM_PLAIN; r = sc_transmit_apdu(card, &apdu); exdata->sm = tmp_sm; } else { r = sc_transmit_apdu_t(card, &apdu); } LOG_TEST_RET(card->ctx, r, "APDU get_data failed"); r = sc_check_sw(card, apdu.sw1, apdu.sw2); LOG_TEST_RET(card->ctx, r, "get_data failed"); memcpy(data, resp, datalen); return r; } /* card driver functions */ static int epass2003_match_card(struct sc_card *card) { int r; LOG_FUNC_CALLED(card->ctx); r = _sc_match_atr(card, epass2003_atrs, &card->type); if (r < 0) return 0; return 1; } static int epass2003_init(struct sc_card *card) { unsigned int flags; unsigned int ext_flags; unsigned char data[SC_MAX_APDU_BUFFER_SIZE] = { 0 }; size_t datalen = SC_MAX_APDU_BUFFER_SIZE; epass2003_exdata *exdata = NULL; LOG_FUNC_CALLED(card->ctx); card->name = "epass2003"; card->cla = 0x00; exdata = (epass2003_exdata *)calloc(1, sizeof(epass2003_exdata)); if (!exdata) return SC_ERROR_OUT_OF_MEMORY; card->drv_data = exdata; exdata->sm = SM_SCP01; /* decide FIPS/Non-FIPS mode */ if (SC_SUCCESS != get_data(card, 0x86, data, datalen)) return SC_ERROR_INVALID_CARD; if (0x01 == data[2]) exdata->smtype = KEY_TYPE_AES; else exdata->smtype = KEY_TYPE_DES; if (0x84 == data[14]) { if (0x00 == data[16]) { exdata->sm = SM_PLAIN; } } /* mutual authentication */ card->max_recv_size = 0xD8; card->max_send_size = 0xE8; card->sm_ctx.ops.open = epass2003_refresh; card->sm_ctx.ops.get_sm_apdu = epass2003_sm_get_wrapped_apdu; card->sm_ctx.ops.free_sm_apdu = epass2003_sm_free_wrapped_apdu; /* FIXME (VT): rather then set/unset 'g_sm', better to implement filter for APDUs to be wrapped */ epass2003_refresh(card); card->sm_ctx.sm_mode = SM_MODE_TRANSMIT; flags = SC_ALGORITHM_ONBOARD_KEY_GEN | SC_ALGORITHM_RSA_RAW | SC_ALGORITHM_RSA_HASH_NONE; _sc_card_add_rsa_alg(card, 512, flags, 0); _sc_card_add_rsa_alg(card, 768, flags, 0); _sc_card_add_rsa_alg(card, 1024, flags, 0); _sc_card_add_rsa_alg(card, 2048, flags, 0); //set EC Alg Flags flags = SC_ALGORITHM_ONBOARD_KEY_GEN|SC_ALGORITHM_ECDSA_HASH_SHA1|SC_ALGORITHM_ECDSA_HASH_SHA256|SC_ALGORITHM_ECDSA_HASH_NONE|SC_ALGORITHM_ECDSA_RAW; ext_flags = 0; _sc_card_add_ec_alg(card, 256, flags, ext_flags, NULL); card->caps = SC_CARD_CAP_RNG | SC_CARD_CAP_APDU_EXT; LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } static int epass2003_finish(sc_card_t *card) { epass2003_exdata *exdata = (epass2003_exdata *)card->drv_data; if (exdata) free(exdata); return SC_SUCCESS; } /* COS implement SFI as lower 5 bits of FID, and not allow same SFI at the * same DF, so use hook functions to increase/decrease FID by 0x20 */ static int epass2003_hook_path(struct sc_path *path, int inc) { u8 fid_h = path->value[path->len - 2]; u8 fid_l = path->value[path->len - 1]; switch (fid_h) { case 0x29: case 0x30: case 0x31: case 0x32: case 0x33: case 0x34: if (inc) fid_l = fid_l * FID_STEP; else fid_l = fid_l / FID_STEP; path->value[path->len - 1] = fid_l; return 1; default: break; } return 0; } static void epass2003_hook_file(struct sc_file *file, int inc) { int fidl = file->id & 0xff; int fidh = file->id & 0xff00; if (epass2003_hook_path(&file->path, inc)) { if (inc) file->id = fidh + fidl * FID_STEP; else file->id = fidh + fidl / FID_STEP; } } static int epass2003_select_fid_(struct sc_card *card, sc_path_t * in_path, sc_file_t ** file_out) { struct sc_apdu apdu; u8 buf[SC_MAX_APDU_BUFFER_SIZE] = { 0 }; u8 pathbuf[SC_MAX_PATH_SIZE], *path = pathbuf; int r, pathlen; sc_file_t *file = NULL; epass2003_hook_path(in_path, 1); memcpy(path, in_path->value, in_path->len); pathlen = in_path->len; sc_format_apdu(card, &apdu, SC_APDU_CASE_4_SHORT, 0xA4, 0x00, 0x00); switch (in_path->type) { case SC_PATH_TYPE_FILE_ID: apdu.p1 = 0; if (pathlen != 2) return SC_ERROR_INVALID_ARGUMENTS; break; default: LOG_FUNC_RETURN(card->ctx, SC_ERROR_INVALID_ARGUMENTS); } apdu.p2 = 0; /* first record, return FCI */ apdu.lc = pathlen; apdu.data = path; apdu.datalen = pathlen; if (file_out != NULL) { apdu.resp = buf; apdu.resplen = sizeof(buf); apdu.le = 0; } else { apdu.cse = (apdu.lc == 0) ? SC_APDU_CASE_1 : SC_APDU_CASE_3_SHORT; } if (path[0] == 0x29) { /* TODO:0x29 accords with FID prefix in profile */ /* Not allowed to select private key file, so fake fci. */ /* 62 16 82 02 11 00 83 02 29 00 85 02 08 00 86 08 FF 90 90 90 FF FF FF FF */ apdu.resplen = 0x18; memcpy(apdu.resp, "\x6f\x16\x82\x02\x11\x00\x83\x02\x29\x00\x85\x02\x08\x00\x86\x08\xff\x90\x90\x90\xff\xff\xff\xff", apdu.resplen); apdu.resp[9] = path[1]; apdu.sw1 = 0x90; apdu.sw2 = 0x00; } else { r = sc_transmit_apdu_t(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); } if (file_out == NULL) { if (apdu.sw1 == 0x61) LOG_FUNC_RETURN(card->ctx, 0); LOG_FUNC_RETURN(card->ctx, sc_check_sw(card, apdu.sw1, apdu.sw2)); } r = sc_check_sw(card, apdu.sw1, apdu.sw2); if (r) LOG_FUNC_RETURN(card->ctx, r); if (apdu.resplen < 2) LOG_FUNC_RETURN(card->ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED); switch (apdu.resp[0]) { case 0x6F: file = sc_file_new(); if (file == NULL) LOG_FUNC_RETURN(card->ctx, SC_ERROR_OUT_OF_MEMORY); file->path = *in_path; if (card->ops->process_fci == NULL) { sc_file_free(file); LOG_FUNC_RETURN(card->ctx, SC_ERROR_NOT_SUPPORTED); } if ((size_t) apdu.resp[1] + 2 <= apdu.resplen) card->ops->process_fci(card, file, apdu.resp + 2, apdu.resp[1]); epass2003_hook_file(file, 0); *file_out = file; break; case 0x00: /* proprietary coding */ LOG_FUNC_RETURN(card->ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED); break; default: LOG_FUNC_RETURN(card->ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED); } return 0; } static int epass2003_select_fid(struct sc_card *card, unsigned int id_hi, unsigned int id_lo, sc_file_t ** file_out) { int r; sc_file_t *file = 0; sc_path_t path; memset(&path, 0, sizeof(path)); path.type = SC_PATH_TYPE_FILE_ID; path.value[0] = id_hi; path.value[1] = id_lo; path.len = 2; r = epass2003_select_fid_(card, &path, &file); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); /* update cache */ if (file && file->type == SC_FILE_TYPE_DF) { card->cache.current_path.type = SC_PATH_TYPE_PATH; card->cache.current_path.value[0] = 0x3f; card->cache.current_path.value[1] = 0x00; if (id_hi == 0x3f && id_lo == 0x00) { card->cache.current_path.len = 2; } else { card->cache.current_path.len = 4; card->cache.current_path.value[2] = id_hi; card->cache.current_path.value[3] = id_lo; } } if (file_out) *file_out = file; LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } static int epass2003_select_aid(struct sc_card *card, const sc_path_t * in_path, sc_file_t ** file_out) { int r = 0; if (card->cache.valid && card->cache.current_path.type == SC_PATH_TYPE_DF_NAME && card->cache.current_path.len == in_path->len && memcmp(card->cache.current_path.value, in_path->value, in_path->len) == 0) { if (file_out) { *file_out = sc_file_new(); if (!file_out) LOG_FUNC_RETURN(card->ctx, SC_ERROR_OUT_OF_MEMORY); } } else { r = iso_ops->select_file(card, in_path, file_out); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); /* update cache */ card->cache.current_path.type = SC_PATH_TYPE_DF_NAME; card->cache.current_path.len = in_path->len; memcpy(card->cache.current_path.value, in_path->value, in_path->len); } if (file_out) { sc_file_t *file = *file_out; file->type = SC_FILE_TYPE_DF; file->ef_structure = SC_FILE_EF_UNKNOWN; file->path.len = 0; file->size = 0; /* AID */ memcpy(file->name, in_path->value, in_path->len); file->namelen = in_path->len; file->id = 0x0000; } LOG_FUNC_RETURN(card->ctx, r); } static int epass2003_select_path(struct sc_card *card, const u8 pathbuf[16], const size_t len, sc_file_t ** file_out) { u8 n_pathbuf[SC_MAX_PATH_SIZE]; const u8 *path = pathbuf; size_t pathlen = len; int bMatch = -1; unsigned int i; int r; if (pathlen % 2 != 0 || pathlen > 6 || pathlen <= 0) LOG_FUNC_RETURN(card->ctx, SC_ERROR_INVALID_ARGUMENTS); /* if pathlen == 6 then the first FID must be MF (== 3F00) */ if (pathlen == 6 && (path[0] != 0x3f || path[1] != 0x00)) LOG_FUNC_RETURN(card->ctx, SC_ERROR_INVALID_ARGUMENTS); /* unify path (the first FID should be MF) */ if (path[0] != 0x3f || path[1] != 0x00) { n_pathbuf[0] = 0x3f; n_pathbuf[1] = 0x00; for (i = 0; i < pathlen; i++) n_pathbuf[i + 2] = pathbuf[i]; path = n_pathbuf; pathlen += 2; } /* check current working directory */ if (card->cache.valid && card->cache.current_path.type == SC_PATH_TYPE_PATH && card->cache.current_path.len >= 2 && card->cache.current_path.len <= pathlen) { bMatch = 0; for (i = 0; i < card->cache.current_path.len; i += 2) if (card->cache.current_path.value[i] == path[i] && card->cache.current_path.value[i + 1] == path[i + 1]) bMatch += 2; } if (card->cache.valid && bMatch > 2) { if (pathlen - bMatch == 2) { /* we are in the right directory */ return epass2003_select_fid(card, path[bMatch], path[bMatch + 1], file_out); } else if (pathlen - bMatch > 2) { /* two more steps to go */ sc_path_t new_path; /* first step: change directory */ r = epass2003_select_fid(card, path[bMatch], path[bMatch + 1], NULL); LOG_TEST_RET(card->ctx, r, "SELECT FILE (DF-ID) failed"); new_path.type = SC_PATH_TYPE_PATH; new_path.len = pathlen - bMatch - 2; memcpy(new_path.value, &(path[bMatch + 2]), new_path.len); /* final step: select file */ return epass2003_select_file(card, &new_path, file_out); } else { /* if (bMatch - pathlen == 0) */ /* done: we are already in the * requested directory */ sc_log(card->ctx, "cache hit\n"); /* copy file info (if necessary) */ if (file_out) { sc_file_t *file = sc_file_new(); if (!file) LOG_FUNC_RETURN(card->ctx, SC_ERROR_OUT_OF_MEMORY); file->id = (path[pathlen - 2] << 8) + path[pathlen - 1]; file->path = card->cache.current_path; file->type = SC_FILE_TYPE_DF; file->ef_structure = SC_FILE_EF_UNKNOWN; file->size = 0; file->namelen = 0; file->magic = SC_FILE_MAGIC; *file_out = file; } /* nothing left to do */ return SC_SUCCESS; } } else { /* no usable cache */ for (i = 0; i < pathlen - 2; i += 2) { r = epass2003_select_fid(card, path[i], path[i + 1], NULL); LOG_TEST_RET(card->ctx, r, "SELECT FILE (DF-ID) failed"); } return epass2003_select_fid(card, path[pathlen - 2], path[pathlen - 1], file_out); } } static int epass2003_select_file(struct sc_card *card, const sc_path_t * in_path, sc_file_t ** file_out) { int r; char pbuf[SC_MAX_PATH_STRING_SIZE]; LOG_FUNC_CALLED(card->ctx); r = sc_path_print(pbuf, sizeof(pbuf), &card->cache.current_path); if (r != SC_SUCCESS) pbuf[0] = '\0'; sc_log(card->ctx, "current path (%s, %s): %s (len: %"SC_FORMAT_LEN_SIZE_T"u)\n", card->cache.current_path.type == SC_PATH_TYPE_DF_NAME ? "aid" : "path", card->cache.valid ? "valid" : "invalid", pbuf, card->cache.current_path.len); switch (in_path->type) { case SC_PATH_TYPE_FILE_ID: if (in_path->len != 2) LOG_FUNC_RETURN(card->ctx, SC_ERROR_INVALID_ARGUMENTS); return epass2003_select_fid(card, in_path->value[0], in_path->value[1], file_out); case SC_PATH_TYPE_DF_NAME: return epass2003_select_aid(card, in_path, file_out); case SC_PATH_TYPE_PATH: return epass2003_select_path(card, in_path->value, in_path->len, file_out); default: LOG_FUNC_RETURN(card->ctx, SC_ERROR_INVALID_ARGUMENTS); } } static int epass2003_set_security_env(struct sc_card *card, const sc_security_env_t * env, int se_num) { struct sc_apdu apdu; u8 sbuf[SC_MAX_APDU_BUFFER_SIZE] = { 0 }; u8 *p; unsigned short fid = 0; int r, locked = 0; epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0x22, 0x41, 0); p = sbuf; *p++ = 0x80; /* algorithm reference */ *p++ = 0x01; *p++ = 0x84; *p++ = 0x81; *p++ = 0x02; fid = 0x2900; fid += (unsigned short)(0x20 * (env->key_ref[0] & 0xff)); *p++ = fid >> 8; *p++ = fid & 0xff; r = p - sbuf; apdu.lc = r; apdu.datalen = r; apdu.data = sbuf; if (env->algorithm == SC_ALGORITHM_EC) { apdu.p2 = 0xB6; exdata->currAlg = SC_ALGORITHM_EC; if(env->algorithm_flags & SC_ALGORITHM_ECDSA_HASH_SHA1) { sbuf[2] = 0x91; exdata->ecAlgFlags = SC_ALGORITHM_ECDSA_HASH_SHA1; } else if (env->algorithm_flags & SC_ALGORITHM_ECDSA_HASH_SHA256) { sbuf[2] = 0x92; exdata->ecAlgFlags = SC_ALGORITHM_ECDSA_HASH_SHA256; } else { sc_log(card->ctx, "%0x Alg Not Support! ", env->algorithm_flags); goto err; } } else if(env->algorithm == SC_ALGORITHM_RSA) { exdata->currAlg = SC_ALGORITHM_RSA; apdu.p2 = 0xB8; sc_log(card->ctx, "setenv RSA Algorithm alg_flags = %0x\n",env->algorithm_flags); } else { sc_log(card->ctx, "%0x Alg Not Support! ", env->algorithm); } if (se_num > 0) { r = sc_lock(card); LOG_TEST_RET(card->ctx, r, "sc_lock() failed"); locked = 1; } if (apdu.datalen != 0) { r = sc_transmit_apdu_t(card, &apdu); if (r) { sc_log(card->ctx, "%s: APDU transmit failed", sc_strerror(r)); goto err; } r = sc_check_sw(card, apdu.sw1, apdu.sw2); if (r) { sc_log(card->ctx, "%s: Card returned error", sc_strerror(r)); goto err; } } if (se_num <= 0) return 0; sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0x22, 0xF2, se_num); r = sc_transmit_apdu_t(card, &apdu); sc_unlock(card); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); return sc_check_sw(card, apdu.sw1, apdu.sw2); err: if (locked) sc_unlock(card); return r; } static int epass2003_restore_security_env(struct sc_card *card, int se_num) { LOG_FUNC_CALLED(card->ctx); LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } static int epass2003_decipher(struct sc_card *card, const u8 * data, size_t datalen, u8 * out, size_t outlen) { int r; struct sc_apdu apdu; u8 rbuf[SC_MAX_APDU_BUFFER_SIZE] = { 0 }; u8 sbuf[SC_MAX_APDU_BUFFER_SIZE] = { 0 }; epass2003_exdata *exdata = NULL; LOG_FUNC_CALLED(card->ctx); if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; if(exdata->currAlg == SC_ALGORITHM_EC) { if(exdata->ecAlgFlags & SC_ALGORITHM_ECDSA_HASH_SHA1) { r = hash_data(data, datalen, sbuf, SC_ALGORITHM_ECDSA_HASH_SHA1); LOG_TEST_RET(card->ctx, r, "hash_data failed"); sc_format_apdu(card, &apdu, SC_APDU_CASE_3,0x2A, 0x9E, 0x9A); apdu.data = sbuf; apdu.lc = 0x14; apdu.datalen = 0x14; } else if (exdata->ecAlgFlags & SC_ALGORITHM_ECDSA_HASH_SHA256) { r = hash_data(data, datalen, sbuf, SC_ALGORITHM_ECDSA_HASH_SHA256); LOG_TEST_RET(card->ctx, r, "hash_data failed"); sc_format_apdu(card, &apdu, SC_APDU_CASE_3,0x2A, 0x9E, 0x9A); apdu.data = sbuf; apdu.lc = 0x20; apdu.datalen = 0x20; } else { return SC_ERROR_NOT_SUPPORTED; } apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); apdu.le = 0; r = sc_transmit_apdu_t(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); if (apdu.sw1 == 0x90 && apdu.sw2 == 0x00) { size_t len = apdu.resplen > outlen ? outlen : apdu.resplen; memcpy(out, apdu.resp, len); LOG_FUNC_RETURN(card->ctx, len); } LOG_FUNC_RETURN(card->ctx, sc_check_sw(card, apdu.sw1, apdu.sw2)); } else if(exdata->currAlg == SC_ALGORITHM_RSA) { sc_format_apdu(card, &apdu, SC_APDU_CASE_4_EXT, 0x2A, 0x80, 0x86); apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); apdu.le = 0; memcpy(sbuf, data, datalen); apdu.data = sbuf; apdu.lc = datalen; apdu.datalen = datalen; } else { sc_format_apdu(card, &apdu, SC_APDU_CASE_4_EXT, 0x2A, 0x80, 0x86); apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); apdu.le = 256; memcpy(sbuf, data, datalen); apdu.data = sbuf; apdu.lc = datalen; apdu.datalen = datalen; } r = sc_transmit_apdu_t(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); if (apdu.sw1 == 0x90 && apdu.sw2 == 0x00) { size_t len = apdu.resplen > outlen ? outlen : apdu.resplen; memcpy(out, apdu.resp, len); LOG_FUNC_RETURN(card->ctx, len); } LOG_FUNC_RETURN(card->ctx, sc_check_sw(card, apdu.sw1, apdu.sw2)); } static int acl_to_ac_byte(struct sc_card *card, const struct sc_acl_entry *e) { if (e == NULL) return SC_ERROR_OBJECT_NOT_FOUND; switch (e->method) { case SC_AC_NONE: LOG_FUNC_RETURN(card->ctx, EPASS2003_AC_MAC_NOLESS | EPASS2003_AC_EVERYONE); case SC_AC_NEVER: LOG_FUNC_RETURN(card->ctx, EPASS2003_AC_MAC_NOLESS | EPASS2003_AC_NOONE); default: LOG_FUNC_RETURN(card->ctx, EPASS2003_AC_MAC_NOLESS | EPASS2003_AC_USER); } LOG_FUNC_RETURN(card->ctx, SC_ERROR_INCORRECT_PARAMETERS); } static int epass2003_process_fci(struct sc_card *card, sc_file_t * file, const u8 * buf, size_t buflen) { sc_context_t *ctx = card->ctx; size_t taglen, len = buflen; const u8 *tag = NULL, *p = buf; sc_log(ctx, "processing FCI bytes"); tag = sc_asn1_find_tag(ctx, p, len, 0x83, &taglen); if (tag != NULL && taglen == 2) { file->id = (tag[0] << 8) | tag[1]; sc_log(ctx, " file identifier: 0x%02X%02X", tag[0], tag[1]); } tag = sc_asn1_find_tag(ctx, p, len, 0x80, &taglen); if (tag != NULL && taglen > 0 && taglen < 3) { file->size = tag[0]; if (taglen == 2) file->size = (file->size << 8) + tag[1]; sc_log(ctx, " bytes in file: %"SC_FORMAT_LEN_SIZE_T"u", file->size); } if (tag == NULL) { tag = sc_asn1_find_tag(ctx, p, len, 0x81, &taglen); if (tag != NULL && taglen >= 2) { int bytes = (tag[0] << 8) + tag[1]; sc_log(ctx, " bytes in file: %d", bytes); file->size = bytes; } } tag = sc_asn1_find_tag(ctx, p, len, 0x82, &taglen); if (tag != NULL) { if (taglen > 0) { unsigned char byte = tag[0]; const char *type; if (byte == 0x38) { type = "DF"; file->type = SC_FILE_TYPE_DF; } else if (0x01 <= byte && byte <= 0x07) { type = "working EF"; file->type = SC_FILE_TYPE_WORKING_EF; switch (byte) { case 0x01: file->ef_structure = SC_FILE_EF_TRANSPARENT; break; case 0x02: file->ef_structure = SC_FILE_EF_LINEAR_FIXED; break; case 0x04: file->ef_structure = SC_FILE_EF_LINEAR_FIXED; break; case 0x03: case 0x05: case 0x06: case 0x07: break; default: break; } } else if (0x10 == byte) { type = "BSO"; file->type = SC_FILE_TYPE_BSO; } else if (0x11 <= byte) { type = "internal EF"; file->type = SC_FILE_TYPE_INTERNAL_EF; switch (byte) { case 0x11: break; case 0x12: break; default: break; } } else { type = "unknown"; file->type = SC_FILE_TYPE_INTERNAL_EF; } sc_log(ctx, "type %s, EF structure %d", type, byte); } } tag = sc_asn1_find_tag(ctx, p, len, 0x84, &taglen); if (tag != NULL && taglen > 0 && taglen <= 16) { memcpy(file->name, tag, taglen); file->namelen = taglen; sc_log_hex(ctx, "File name", file->name, file->namelen); if (!file->type) file->type = SC_FILE_TYPE_DF; } tag = sc_asn1_find_tag(ctx, p, len, 0x85, &taglen); if (tag != NULL && taglen) sc_file_set_prop_attr(file, tag, taglen); else file->prop_attr_len = 0; tag = sc_asn1_find_tag(ctx, p, len, 0xA5, &taglen); if (tag != NULL && taglen) sc_file_set_prop_attr(file, tag, taglen); tag = sc_asn1_find_tag(ctx, p, len, 0x86, &taglen); if (tag != NULL && taglen) sc_file_set_sec_attr(file, tag, taglen); tag = sc_asn1_find_tag(ctx, p, len, 0x8A, &taglen); if (tag != NULL && taglen == 1) { if (tag[0] == 0x01) file->status = SC_FILE_STATUS_CREATION; else if (tag[0] == 0x07 || tag[0] == 0x05) file->status = SC_FILE_STATUS_ACTIVATED; else if (tag[0] == 0x06 || tag[0] == 0x04) file->status = SC_FILE_STATUS_INVALIDATED; } file->magic = SC_FILE_MAGIC; return 0; } static int epass2003_construct_fci(struct sc_card *card, const sc_file_t * file, u8 * out, size_t * outlen) { u8 *p = out; u8 buf[64]; unsigned char ops[8] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; int rv; unsigned ii; if (*outlen < 2) return SC_ERROR_BUFFER_TOO_SMALL; *p++ = 0x62; p++; if (file->type == SC_FILE_TYPE_WORKING_EF) { if (file->ef_structure == SC_FILE_EF_TRANSPARENT) { buf[0] = (file->size >> 8) & 0xFF; buf[1] = file->size & 0xFF; sc_asn1_put_tag(0x80, buf, 2, p, *outlen - (p - out), &p); } } if (file->type == SC_FILE_TYPE_DF) { buf[0] = 0x38; buf[1] = 0x00; sc_asn1_put_tag(0x82, buf, 2, p, *outlen - (p - out), &p); } else if (file->type == SC_FILE_TYPE_WORKING_EF) { buf[0] = file->ef_structure & 7; if (file->ef_structure == SC_FILE_EF_TRANSPARENT) { buf[1] = 0x00; sc_asn1_put_tag(0x82, buf, 2, p, *outlen - (p - out), &p); } else if (file->ef_structure == SC_FILE_EF_LINEAR_FIXED || file->ef_structure == SC_FILE_EF_LINEAR_VARIABLE) { buf[1] = 0x00; buf[2] = 0x00; buf[3] = 0x40; /* record length */ buf[4] = 0x00; /* record count */ sc_asn1_put_tag(0x82, buf, 5, p, *outlen - (p - out), &p); } else { return SC_ERROR_NOT_SUPPORTED; } } else if (file->type == SC_FILE_TYPE_INTERNAL_EF) { if (file->ef_structure == SC_CARDCTL_OBERTHUR_KEY_RSA_CRT || file->ef_structure == SC_CARDCTL_OBERTHUR_KEY_EC_CRT) { buf[0] = 0x11; buf[1] = 0x00; } else if (file->ef_structure == SC_CARDCTL_OBERTHUR_KEY_RSA_PUBLIC || file->ef_structure == SC_CARDCTL_OBERTHUR_KEY_EC_PUBLIC) { buf[0] = 0x12; buf[1] = 0x00; } else { return SC_ERROR_NOT_SUPPORTED; } sc_asn1_put_tag(0x82, buf, 2, p, *outlen - (p - out), &p); } else if (file->type == SC_FILE_TYPE_BSO) { buf[0] = 0x10; buf[1] = 0x00; sc_asn1_put_tag(0x82, buf, 2, p, *outlen - (p - out), &p); } buf[0] = (file->id >> 8) & 0xFF; buf[1] = file->id & 0xFF; sc_asn1_put_tag(0x83, buf, 2, p, *outlen - (p - out), &p); if (file->type == SC_FILE_TYPE_DF) { if (file->namelen != 0) { sc_asn1_put_tag(0x84, file->name, file->namelen, p, *outlen - (p - out), &p); } else { return SC_ERROR_INVALID_ARGUMENTS; } } if (file->type == SC_FILE_TYPE_DF) { unsigned char data[2] = {0x00, 0x7F}; /* 127 files at most */ sc_asn1_put_tag(0x85, data, sizeof(data), p, *outlen - (p - out), &p); } else if (file->type == SC_FILE_TYPE_BSO) { buf[0] = file->size & 0xff; sc_asn1_put_tag(0x85, buf, 1, p, *outlen - (p - out), &p); } else if (file->type == SC_FILE_TYPE_INTERNAL_EF) { if (file->ef_structure == SC_CARDCTL_OBERTHUR_KEY_RSA_CRT || file->ef_structure == SC_CARDCTL_OBERTHUR_KEY_RSA_PUBLIC|| file->ef_structure == SC_CARDCTL_OBERTHUR_KEY_EC_CRT|| file->ef_structure == SC_CARDCTL_OBERTHUR_KEY_EC_PUBLIC) { buf[0] = (file->size >> 8) & 0xFF; buf[1] = file->size & 0xFF; sc_asn1_put_tag(0x85, buf, 2, p, *outlen - (p - out), &p); } } if (file->sec_attr_len) { memcpy(buf, file->sec_attr, file->sec_attr_len); sc_asn1_put_tag(0x86, buf, file->sec_attr_len, p, *outlen - (p - out), &p); } else { sc_log(card->ctx, "SC_FILE_ACL"); if (file->type == SC_FILE_TYPE_DF) { ops[0] = SC_AC_OP_LIST_FILES; ops[1] = SC_AC_OP_CREATE; ops[3] = SC_AC_OP_DELETE; } else if (file->type == SC_FILE_TYPE_WORKING_EF) { if (file->ef_structure == SC_FILE_EF_TRANSPARENT) { ops[0] = SC_AC_OP_READ; ops[1] = SC_AC_OP_UPDATE; ops[3] = SC_AC_OP_DELETE; } else if (file->ef_structure == SC_FILE_EF_LINEAR_FIXED || file->ef_structure == SC_FILE_EF_LINEAR_VARIABLE) { ops[0] = SC_AC_OP_READ; ops[1] = SC_AC_OP_UPDATE; ops[2] = SC_AC_OP_WRITE; ops[3] = SC_AC_OP_DELETE; } else { return SC_ERROR_NOT_SUPPORTED; } } else if (file->type == SC_FILE_TYPE_BSO) { ops[0] = SC_AC_OP_UPDATE; ops[3] = SC_AC_OP_DELETE; } else if (file->type == SC_FILE_TYPE_INTERNAL_EF) { if (file->ef_structure == SC_CARDCTL_OBERTHUR_KEY_RSA_CRT || file->ef_structure == SC_CARDCTL_OBERTHUR_KEY_EC_CRT) { ops[1] = SC_AC_OP_UPDATE; ops[2] = SC_AC_OP_CRYPTO; ops[3] = SC_AC_OP_DELETE; } else if (file->ef_structure == SC_CARDCTL_OBERTHUR_KEY_RSA_PUBLIC|| file->ef_structure == SC_CARDCTL_OBERTHUR_KEY_EC_PUBLIC) { ops[0] = SC_AC_OP_READ; ops[1] = SC_AC_OP_UPDATE; ops[2] = SC_AC_OP_CRYPTO; ops[3] = SC_AC_OP_DELETE; } } else { return SC_ERROR_NOT_SUPPORTED; } for (ii = 0; ii < sizeof(ops); ii++) { const struct sc_acl_entry *entry; buf[ii] = 0xFF; if (ops[ii] == 0xFF) continue; entry = sc_file_get_acl_entry(file, ops[ii]); rv = acl_to_ac_byte(card, entry); LOG_TEST_RET(card->ctx, rv, "Invalid ACL"); buf[ii] = rv; } sc_asn1_put_tag(0x86, buf, sizeof(ops), p, *outlen - (p - out), &p); if(file->size == 256) { out[4]= 0x13; } } /* VT ??? */ if (file->ef_structure == SC_CARDCTL_OBERTHUR_KEY_RSA_PUBLIC|| file->ef_structure == SC_CARDCTL_OBERTHUR_KEY_EC_PUBLIC) { unsigned char data[2] = {0x00, 0x66}; sc_asn1_put_tag(0x87, data, sizeof(data), p, *outlen - (p - out), &p); if(file->size == 256) { out[4]= 0x14; } } out[1] = p - out - 2; *outlen = p - out; return 0; } static int epass2003_create_file(struct sc_card *card, sc_file_t * file) { int r; size_t len; u8 sbuf[SC_MAX_APDU_BUFFER_SIZE] = { 0 }; struct sc_apdu apdu; len = SC_MAX_APDU_BUFFER_SIZE; epass2003_hook_file(file, 1); if (card->ops->construct_fci == NULL) LOG_FUNC_RETURN(card->ctx, SC_ERROR_NOT_SUPPORTED); r = epass2003_construct_fci(card, file, sbuf, &len); LOG_TEST_RET(card->ctx, r, "construct_fci() failed"); sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0xE0, 0x00, 0x00); apdu.lc = len; apdu.datalen = len; apdu.data = sbuf; r = sc_transmit_apdu_t(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); r = sc_check_sw(card, apdu.sw1, apdu.sw2); LOG_TEST_RET(card->ctx, r, "APDU sw1/2 wrong"); epass2003_hook_file(file, 0); return r; } static int epass2003_delete_file(struct sc_card *card, const sc_path_t * path) { int r; u8 sbuf[2]; struct sc_apdu apdu; LOG_FUNC_CALLED(card->ctx); r = sc_select_file(card, path, NULL); epass2003_hook_path((struct sc_path *)path, 1); if (r == SC_SUCCESS) { sbuf[0] = path->value[path->len - 2]; sbuf[1] = path->value[path->len - 1]; sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0xE4, 0x00, 0x00); apdu.lc = 2; apdu.datalen = 2; apdu.data = sbuf; } else { LOG_FUNC_RETURN(card->ctx, SC_ERROR_INVALID_ARGUMENTS); } r = sc_transmit_apdu_t(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); r = sc_check_sw(card, apdu.sw1, apdu.sw2); LOG_TEST_RET(card->ctx, r, "Delete file failed"); LOG_FUNC_RETURN(card->ctx, r); } static int epass2003_list_files(struct sc_card *card, unsigned char *buf, size_t buflen) { struct sc_apdu apdu; unsigned char rbuf[SC_MAX_APDU_BUFFER_SIZE] = { 0 }; int r; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); sc_format_apdu(card, &apdu, SC_APDU_CASE_1, 0x34, 0x00, 0x00); apdu.cla = 0x80; apdu.le = 0; apdu.resplen = sizeof(rbuf); apdu.resp = rbuf; r = sc_transmit_apdu_t(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); r = sc_check_sw(card, apdu.sw1, apdu.sw2); LOG_TEST_RET(card->ctx, r, "Card returned error"); if (apdu.resplen == 0x100 && rbuf[0] == 0 && rbuf[1] == 0) LOG_FUNC_RETURN(card->ctx, 0); buflen = buflen < apdu.resplen ? buflen : apdu.resplen; memcpy(buf, rbuf, buflen); LOG_FUNC_RETURN(card->ctx, buflen); } static int internal_write_rsa_key_factor(struct sc_card *card, unsigned short fid, u8 factor, sc_pkcs15_bignum_t data) { int r; struct sc_apdu apdu; u8 sbuff[SC_MAX_EXT_APDU_BUFFER_SIZE] = { 0 }; LOG_FUNC_CALLED(card->ctx); sbuff[0] = ((fid & 0xff00) >> 8); sbuff[1] = (fid & 0x00ff); memcpy(&sbuff[2], data.data, data.len); // sc_mem_reverse(&sbuff[2], data.len); sc_format_apdu(card, &apdu, SC_APDU_CASE_3, 0xe7, factor, 0x00); apdu.cla = 0x80; apdu.lc = apdu.datalen = 2 + data.len; apdu.data = sbuff; r = sc_transmit_apdu_t(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); r = sc_check_sw(card, apdu.sw1, apdu.sw2); LOG_TEST_RET(card->ctx, r, "Write rsa key factor failed"); LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } static int internal_write_rsa_key(struct sc_card *card, unsigned short fid, struct sc_pkcs15_prkey_rsa *rsa) { int r; LOG_FUNC_CALLED(card->ctx); r = internal_write_rsa_key_factor(card, fid, 0x02, rsa->modulus); LOG_TEST_RET(card->ctx, r, "write n failed"); r = internal_write_rsa_key_factor(card, fid, 0x03, rsa->d); LOG_TEST_RET(card->ctx, r, "write d failed"); LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } static int hash_data(const unsigned char *data, size_t datalen, unsigned char *hash, unsigned int mechanismType) { if ((NULL == data) || (NULL == hash)) return SC_ERROR_INVALID_ARGUMENTS; if(mechanismType & SC_ALGORITHM_ECDSA_HASH_SHA1) { unsigned char data_hash[24] = { 0 }; size_t len = 0; sha1_digest(data, datalen, data_hash); len = REVERSE_ORDER4(datalen); memcpy(&data_hash[20], &len, 4); memcpy(hash, data_hash, 24); } else if(mechanismType & SC_ALGORITHM_ECDSA_HASH_SHA256) { unsigned char data_hash[36] = { 0 }; size_t len = 0; sha256_digest(data, datalen, data_hash); len = REVERSE_ORDER4(datalen); memcpy(&data_hash[32], &len, 4); memcpy(hash, data_hash, 36); } else { return SC_ERROR_NOT_SUPPORTED; } return SC_SUCCESS; } static int install_secret_key(struct sc_card *card, unsigned char ktype, unsigned char kid, unsigned char useac, unsigned char modifyac, unsigned char EC, unsigned char *data, unsigned long dataLen) { int r; struct sc_apdu apdu; unsigned char isapp = 0x00; /* appendable */ unsigned char tmp_data[256] = { 0 }; tmp_data[0] = ktype; tmp_data[1] = kid; tmp_data[2] = useac; tmp_data[3] = modifyac; tmp_data[8] = 0xFF; if (0x04 == ktype || 0x06 == ktype) { tmp_data[4] = EPASS2003_AC_MAC_NOLESS | EPASS2003_AC_SO; tmp_data[5] = EPASS2003_AC_MAC_NOLESS | EPASS2003_AC_SO; tmp_data[7] = (kid == PIN_ID[0] ? EPASS2003_AC_USER : EPASS2003_AC_SO); tmp_data[9] = (EC << 4) | EC; } memcpy(&tmp_data[10], data, dataLen); sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0xe3, isapp, 0x00); apdu.cla = 0x80; apdu.lc = apdu.datalen = 10 + dataLen; apdu.data = tmp_data; r = sc_transmit_apdu_t(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU install_secret_key failed"); r = sc_check_sw(card, apdu.sw1, apdu.sw2); LOG_TEST_RET(card->ctx, r, "install_secret_key failed"); return r; } static int internal_install_pre(struct sc_card *card) { int r; /* init key for enc */ r = install_secret_key(card, 0x01, 0x00, EPASS2003_AC_MAC_NOLESS | EPASS2003_AC_EVERYONE, EPASS2003_AC_MAC_NOLESS | EPASS2003_AC_EVERYONE, 0, g_init_key_enc, 16); LOG_TEST_RET(card->ctx, r, "Install init key failed"); /* init key for mac */ r = install_secret_key(card, 0x02, 0x00, EPASS2003_AC_MAC_NOLESS | EPASS2003_AC_EVERYONE, EPASS2003_AC_MAC_NOLESS | EPASS2003_AC_EVERYONE, 0, g_init_key_mac, 16); LOG_TEST_RET(card->ctx, r, "Install init key failed"); return r; } /* use external auth secret as pin */ static int internal_install_pin(struct sc_card *card, sc_epass2003_wkey_data * pin) { int r; unsigned char hash[HASH_LEN] = { 0 }; r = hash_data(pin->key_data.es_secret.key_val, pin->key_data.es_secret.key_len, hash, SC_ALGORITHM_ECDSA_HASH_SHA1); LOG_TEST_RET(card->ctx, r, "hash data failed"); r = install_secret_key(card, 0x04, pin->key_data.es_secret.kid, pin->key_data.es_secret.ac[0], pin->key_data.es_secret.ac[1], pin->key_data.es_secret.EC, hash, HASH_LEN); LOG_TEST_RET(card->ctx, r, "Install failed"); return r; } static int epass2003_write_key(struct sc_card *card, sc_epass2003_wkey_data * data) { LOG_FUNC_CALLED(card->ctx); if (data->type & SC_EPASS2003_KEY) { if (data->type == SC_EPASS2003_KEY_RSA) return internal_write_rsa_key(card, data->key_data.es_key.fid, data->key_data.es_key.rsa); else LOG_FUNC_RETURN(card->ctx, SC_ERROR_NOT_SUPPORTED); } else if (data->type & SC_EPASS2003_SECRET) { if (data->type == SC_EPASS2003_SECRET_PRE) return internal_install_pre(card); else if (data->type == SC_EPASS2003_SECRET_PIN) return internal_install_pin(card, data); else LOG_FUNC_RETURN(card->ctx, SC_ERROR_NOT_SUPPORTED); } else { LOG_FUNC_RETURN(card->ctx, SC_ERROR_NOT_SUPPORTED); } LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } static int epass2003_gen_key(struct sc_card *card, sc_epass2003_gen_key_data * data) { int r; size_t len = data->key_length; struct sc_apdu apdu; u8 rbuf[SC_MAX_EXT_APDU_BUFFER_SIZE] = { 0 }; u8 sbuf[SC_MAX_EXT_APDU_BUFFER_SIZE] = { 0 }; LOG_FUNC_CALLED(card->ctx); if(len == 256) { sbuf[0] = 0x02; } else { sbuf[0] = 0x01; } sbuf[1] = (u8) ((len >> 8) & 0xff); sbuf[2] = (u8) (len & 0xff); sbuf[3] = (u8) ((data->prkey_id >> 8) & 0xFF); sbuf[4] = (u8) ((data->prkey_id) & 0xFF); sbuf[5] = (u8) ((data->pukey_id >> 8) & 0xFF); sbuf[6] = (u8) ((data->pukey_id) & 0xFF); /* generate key */ sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0x46, 0x00, 0x00); apdu.lc = apdu.datalen = 7; apdu.data = sbuf; r = sc_transmit_apdu_t(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); r = sc_check_sw(card, apdu.sw1, apdu.sw2); LOG_TEST_RET(card->ctx, r, "generate keypair failed"); /* read public key */ sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0xb4, 0x02, 0x00); if(len == 256) { apdu.p1 = 0x00; } apdu.cla = 0x80; apdu.lc = apdu.datalen = 2; apdu.data = &sbuf[5]; apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); apdu.le = 0x00; r = sc_transmit_apdu_t(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); r = sc_check_sw(card, apdu.sw1, apdu.sw2); LOG_TEST_RET(card->ctx, r, "get pukey failed"); if (len < apdu.resplen) LOG_FUNC_RETURN(card->ctx, SC_ERROR_INVALID_ARGUMENTS); data->modulus = (u8 *) malloc(len); if (!data->modulus) LOG_FUNC_RETURN(card->ctx, SC_ERROR_OUT_OF_MEMORY); memcpy(data->modulus, rbuf, len); LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } static int epass2003_erase_card(struct sc_card *card) { int r; LOG_FUNC_CALLED(card->ctx); sc_invalidate_cache(card); r = sc_delete_file(card, sc_get_mf_path()); LOG_TEST_RET(card->ctx, r, "delete MF failed"); LOG_FUNC_RETURN(card->ctx, r); } static int epass2003_get_serialnr(struct sc_card *card, sc_serial_number_t * serial) { u8 rbuf[8]; size_t rbuf_len = sizeof(rbuf); LOG_FUNC_CALLED(card->ctx); if (SC_SUCCESS != get_data(card, 0x80, rbuf, rbuf_len)) return SC_ERROR_CARD_CMD_FAILED; card->serialnr.len = serial->len = 8; memcpy(card->serialnr.value, rbuf, 8); memcpy(serial->value, rbuf, 8); LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } static int epass2003_card_ctl(struct sc_card *card, unsigned long cmd, void *ptr) { LOG_FUNC_CALLED(card->ctx); sc_log(card->ctx, "cmd is %0lx", cmd); switch (cmd) { case SC_CARDCTL_ENTERSAFE_WRITE_KEY: return epass2003_write_key(card, (sc_epass2003_wkey_data *) ptr); case SC_CARDCTL_ENTERSAFE_GENERATE_KEY: return epass2003_gen_key(card, (sc_epass2003_gen_key_data *) ptr); case SC_CARDCTL_ERASE_CARD: return epass2003_erase_card(card); case SC_CARDCTL_GET_SERIALNR: return epass2003_get_serialnr(card, (sc_serial_number_t *) ptr); default: return SC_ERROR_NOT_SUPPORTED; } } static void internal_sanitize_pin_info(struct sc_pin_cmd_pin *pin, unsigned int num) { pin->encoding = SC_PIN_ENCODING_ASCII; pin->min_length = 4; pin->max_length = 16; pin->pad_length = 16; pin->offset = 5 + num * 16; pin->pad_char = 0x00; } static int get_external_key_maxtries(struct sc_card *card, unsigned char *maxtries) { unsigned char maxcounter[2] = { 0 }; static const sc_path_t file_path = { {0x3f, 0x00, 0x50, 0x15, 0x9f, 0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 6, 0, 0, SC_PATH_TYPE_PATH, {{0}, 0} }; int ret; ret = sc_select_file(card, &file_path, NULL); LOG_TEST_RET(card->ctx, ret, "select max counter file failed"); ret = sc_read_binary(card, 0, maxcounter, 2, 0); LOG_TEST_RET(card->ctx, ret, "read max counter file failed"); *maxtries = maxcounter[0]; return SC_SUCCESS; } static int get_external_key_retries(struct sc_card *card, unsigned char kid, unsigned char *retries) { int r; struct sc_apdu apdu; unsigned char random[16] = { 0 }; r = sc_get_challenge(card, random, 8); LOG_TEST_RET(card->ctx, r, "get challenge get_external_key_retries failed"); sc_format_apdu(card, &apdu, SC_APDU_CASE_1, 0x82, 0x01, 0x80 | kid); apdu.resp = NULL; apdu.resplen = 0; r = sc_transmit_apdu_t(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU get_external_key_retries failed"); if (retries && ((0x63 == (apdu.sw1 & 0xff)) && (0xC0 == (apdu.sw2 & 0xf0)))) { *retries = (apdu.sw2 & 0x0f); r = SC_SUCCESS; } else { LOG_TEST_RET(card->ctx, r, "get_external_key_retries failed"); r = SC_ERROR_CARD_CMD_FAILED; } return r; } static int epass2003_get_challenge(sc_card_t *card, u8 *rnd, size_t len) { u8 rbuf[16]; size_t out_len; int r; LOG_FUNC_CALLED(card->ctx); r = iso_ops->get_challenge(card, rbuf, sizeof rbuf); LOG_TEST_RET(card->ctx, r, "GET CHALLENGE cmd failed"); if (len < (size_t) r) { out_len = len; } else { out_len = (size_t) r; } memcpy(rnd, rbuf, out_len); LOG_FUNC_RETURN(card->ctx, (int) out_len); } static int external_key_auth(struct sc_card *card, unsigned char kid, unsigned char *data, size_t datalen) { int r; struct sc_apdu apdu; unsigned char random[16] = { 0 }; unsigned char tmp_data[16] = { 0 }; unsigned char hash[HASH_LEN] = { 0 }; unsigned char iv[16] = { 0 }; r = sc_get_challenge(card, random, 8); LOG_TEST_RET(card->ctx, r, "get challenge external_key_auth failed"); r = hash_data(data, datalen, hash, SC_ALGORITHM_ECDSA_HASH_SHA1); LOG_TEST_RET(card->ctx, r, "hash data failed"); des3_encrypt_cbc(hash, HASH_LEN, iv, random, 8, tmp_data); sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0x82, 0x01, 0x80 | kid); apdu.lc = apdu.datalen = 8; apdu.data = tmp_data; r = sc_transmit_apdu_t(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU external_key_auth failed"); r = sc_check_sw(card, apdu.sw1, apdu.sw2); LOG_TEST_RET(card->ctx, r, "external_key_auth failed"); return r; } static int update_secret_key(struct sc_card *card, unsigned char ktype, unsigned char kid, const unsigned char *data, unsigned long datalen) { int r; struct sc_apdu apdu; unsigned char hash[HASH_LEN] = { 0 }; unsigned char tmp_data[256] = { 0 }; unsigned char maxtries = 0; r = hash_data(data, datalen, hash, SC_ALGORITHM_ECDSA_HASH_SHA1); LOG_TEST_RET(card->ctx, r, "hash data failed"); r = get_external_key_maxtries(card, &maxtries); LOG_TEST_RET(card->ctx, r, "get max counter failed"); tmp_data[0] = (maxtries << 4) | maxtries; memcpy(&tmp_data[1], hash, HASH_LEN); sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0xe5, ktype, kid); apdu.cla = 0x80; apdu.lc = apdu.datalen = 1 + HASH_LEN; apdu.data = tmp_data; r = sc_transmit_apdu_t(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU update_secret_key failed"); r = sc_check_sw(card, apdu.sw1, apdu.sw2); LOG_TEST_RET(card->ctx, r, "update_secret_key failed"); return r; } /* use external auth secret as pin */ static int epass2003_pin_cmd(struct sc_card *card, struct sc_pin_cmd_data *data, int *tries_left) { int r; u8 kid; u8 retries = 0; u8 pin_low = 3; unsigned char maxtries = 0; LOG_FUNC_CALLED(card->ctx); internal_sanitize_pin_info(&data->pin1, 0); internal_sanitize_pin_info(&data->pin2, 1); data->flags |= SC_PIN_CMD_NEED_PADDING; kid = data->pin_reference; /* get pin retries */ if (data->cmd == SC_PIN_CMD_GET_INFO) { r = get_external_key_retries(card, 0x80 | kid, &retries); if (r == SC_SUCCESS) { data->pin1.tries_left = retries; if (tries_left) *tries_left = retries; r = get_external_key_maxtries(card, &maxtries); LOG_TEST_RET(card->ctx, r, "get max counter failed"); data->pin1.max_tries = maxtries; } //remove below code, because the old implement only return PIN retries, now modify the code and return PIN status // return r; } else if (data->cmd == SC_PIN_CMD_UNBLOCK) { /* verify */ r = external_key_auth(card, (kid + 1), (unsigned char *)data->pin1.data, data->pin1.len); LOG_TEST_RET(card->ctx, r, "verify pin failed"); } else if (data->cmd == SC_PIN_CMD_CHANGE || data->cmd == SC_PIN_CMD_UNBLOCK) { /* change */ r = update_secret_key(card, 0x04, kid, data->pin2.data, (unsigned long)data->pin2.len); LOG_TEST_RET(card->ctx, r, "verify pin failed"); } else { r = external_key_auth(card, kid, (unsigned char *)data->pin1.data, data->pin1.len); get_external_key_retries(card, 0x80 | kid, &retries); if (retries < pin_low) sc_log(card->ctx, "Verification failed (remaining tries: %d)", retries); } LOG_TEST_RET(card->ctx, r, "verify pin failed"); if (r == SC_SUCCESS) { data->pin1.logged_in = SC_PIN_STATE_LOGGED_IN; } return r; } static struct sc_card_driver *sc_get_driver(void) { struct sc_card_driver *iso_drv = sc_get_iso7816_driver(); if (iso_ops == NULL) iso_ops = iso_drv->ops; epass2003_ops = *iso_ops; epass2003_ops.match_card = epass2003_match_card; epass2003_ops.init = epass2003_init; epass2003_ops.finish = epass2003_finish; epass2003_ops.write_binary = NULL; epass2003_ops.write_record = NULL; epass2003_ops.select_file = epass2003_select_file; epass2003_ops.get_response = NULL; epass2003_ops.restore_security_env = epass2003_restore_security_env; epass2003_ops.set_security_env = epass2003_set_security_env; epass2003_ops.decipher = epass2003_decipher; epass2003_ops.compute_signature = epass2003_decipher; epass2003_ops.create_file = epass2003_create_file; epass2003_ops.delete_file = epass2003_delete_file; epass2003_ops.list_files = epass2003_list_files; epass2003_ops.card_ctl = epass2003_card_ctl; epass2003_ops.process_fci = epass2003_process_fci; epass2003_ops.construct_fci = epass2003_construct_fci; epass2003_ops.pin_cmd = epass2003_pin_cmd; epass2003_ops.check_sw = epass2003_check_sw; epass2003_ops.get_challenge = epass2003_get_challenge; return &epass2003_drv; } struct sc_card_driver *sc_get_epass2003_driver(void) { return sc_get_driver(); } #endif /* #ifdef ENABLE_OPENSSL */ #endif /* #ifdef ENABLE_SM */
/* * Support for ePass2003 smart cards * * Copyright (C) 2008, Weitao Sun <weitao@ftsafe.com> * Copyright (C) 2011, Xiaoshuo Wu <xiaoshuo@ftsafe.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #if HAVE_CONFIG_H #include "config.h" #endif #ifdef ENABLE_SM /* empty file without SM enabled */ #ifdef ENABLE_OPENSSL /* empty file without openssl */ #include <ctype.h> #include <stdlib.h> #include <string.h> #include <openssl/evp.h> #include <openssl/sha.h> #include "internal.h" #include "asn1.h" #include <ctype.h> #include <stdlib.h> #include <string.h> #include <openssl/evp.h> #include <openssl/sha.h> #include "internal.h" #include "asn1.h" #include "cardctl.h" static struct sc_atr_table epass2003_atrs[] = { /* This is a FIPS certified card using SCP01 security messaging. */ {"3B:9F:95:81:31:FE:9F:00:66:46:53:05:10:00:11:71:df:00:00:00:6a:82:5e", "FF:FF:FF:FF:FF:00:FF:FF:FF:FF:FF:FF:00:00:00:ff:00:ff:ff:00:00:00:00", "FTCOS/ePass2003", SC_CARD_TYPE_ENTERSAFE_FTCOS_EPASS2003, 0, NULL }, {NULL, NULL, NULL, 0, 0, NULL} }; static struct sc_card_operations *iso_ops = NULL; static struct sc_card_operations epass2003_ops; static struct sc_card_driver epass2003_drv = { "epass2003", "epass2003", &epass2003_ops, NULL, 0, NULL }; #define KEY_TYPE_AES 0x01 /* FIPS mode */ #define KEY_TYPE_DES 0x02 /* Non-FIPS mode */ #define KEY_LEN_AES 16 #define KEY_LEN_DES 8 #define KEY_LEN_DES3 24 #define HASH_LEN 24 static unsigned char PIN_ID[2] = { ENTERSAFE_USER_PIN_ID, ENTERSAFE_SO_PIN_ID }; /*0x00:plain; 0x01:scp01 sm*/ #define SM_PLAIN 0x00 #define SM_SCP01 0x01 static unsigned char g_init_key_enc[16] = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10 }; static unsigned char g_init_key_mac[16] = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10 }; static unsigned char g_random[8] = { 0xBF, 0xC3, 0x29, 0x11, 0xC7, 0x18, 0xC3, 0x40 }; typedef struct epass2003_exdata_st { unsigned char sm; /* SM_PLAIN or SM_SCP01 */ unsigned char smtype; /* KEY_TYPE_AES or KEY_TYPE_DES */ unsigned char sk_enc[16]; /* encrypt session key */ unsigned char sk_mac[16]; /* mac session key */ unsigned char icv_mac[16]; /* instruction counter vector(for sm) */ unsigned char currAlg; /* current Alg */ unsigned int ecAlgFlags; /* Ec Alg mechanism type*/ } epass2003_exdata; #define REVERSE_ORDER4(x) ( \ ((unsigned long)x & 0xFF000000)>> 24 | \ ((unsigned long)x & 0x00FF0000)>> 8 | \ ((unsigned long)x & 0x0000FF00)<< 8 | \ ((unsigned long)x & 0x000000FF)<< 24) static const struct sc_card_error epass2003_errors[] = { { 0x6200, SC_ERROR_CARD_CMD_FAILED, "Warning: no information given, non-volatile memory is unchanged" }, { 0x6281, SC_ERROR_CORRUPTED_DATA, "Part of returned data may be corrupted" }, { 0x6282, SC_ERROR_FILE_END_REACHED, "End of file/record reached before reading Le bytes" }, { 0x6283, SC_ERROR_CARD_CMD_FAILED, "Selected file invalidated" }, { 0x6284, SC_ERROR_CARD_CMD_FAILED, "FCI not formatted according to ISO 7816-4" }, { 0x6300, SC_ERROR_PIN_CODE_INCORRECT, "Authentication failed"}, { 0x63C1, SC_ERROR_PIN_CODE_INCORRECT, "Authentication failed. One tries left"}, { 0x63C2, SC_ERROR_PIN_CODE_INCORRECT, "Authentication failed. Two tries left"}, { 0x63C3, SC_ERROR_PIN_CODE_INCORRECT, "Authentication failed"}, { 0x63C4, SC_ERROR_PIN_CODE_INCORRECT, "Authentication failed"}, { 0x63C5, SC_ERROR_PIN_CODE_INCORRECT, "Authentication failed"}, { 0x63C6, SC_ERROR_PIN_CODE_INCORRECT, "Authentication failed"}, { 0x63C7, SC_ERROR_PIN_CODE_INCORRECT, "Authentication failed"}, { 0x63C8, SC_ERROR_PIN_CODE_INCORRECT, "Authentication failed"}, { 0x63C9, SC_ERROR_PIN_CODE_INCORRECT, "Authentication failed"}, { 0x63CA, SC_ERROR_PIN_CODE_INCORRECT, "Authentication failed"}, { 0x6381, SC_ERROR_CARD_CMD_FAILED, "Warning: file filled up by last write" }, { 0x6581, SC_ERROR_MEMORY_FAILURE, "Memory failure" }, { 0x6700, SC_ERROR_WRONG_LENGTH, "Wrong length" }, { 0x6800, SC_ERROR_NO_CARD_SUPPORT, "Functions in CLA not supported" }, { 0x6881, SC_ERROR_NO_CARD_SUPPORT, "Logical channel not supported" }, { 0x6882, SC_ERROR_NO_CARD_SUPPORT, "Secure messaging not supported" }, { 0x6900, SC_ERROR_NOT_ALLOWED, "Command not allowed" }, { 0x6981, SC_ERROR_CARD_CMD_FAILED, "Command incompatible with file structure" }, { 0x6982, SC_ERROR_SECURITY_STATUS_NOT_SATISFIED, "Security status not satisfied" }, { 0x6983, SC_ERROR_AUTH_METHOD_BLOCKED, "Authentication method blocked" }, { 0x6984, SC_ERROR_REF_DATA_NOT_USABLE, "Referenced data not usable" }, { 0x6985, SC_ERROR_NOT_ALLOWED, "Conditions of use not satisfied" }, { 0x6986, SC_ERROR_NOT_ALLOWED, "Command not allowed (no current EF)" }, { 0x6987, SC_ERROR_INCORRECT_PARAMETERS,"Expected SM data objects missing" }, { 0x6988, SC_ERROR_INCORRECT_PARAMETERS,"SM data objects incorrect" }, { 0x6A00, SC_ERROR_INCORRECT_PARAMETERS,"Wrong parameter(s) P1-P2" }, { 0x6A80, SC_ERROR_INCORRECT_PARAMETERS,"Incorrect parameters in the data field" }, { 0x6A81, SC_ERROR_NO_CARD_SUPPORT, "Function not supported" }, { 0x6A82, SC_ERROR_FILE_NOT_FOUND, "File not found" }, { 0x6A83, SC_ERROR_RECORD_NOT_FOUND, "Record not found" }, { 0x6A84, SC_ERROR_NOT_ENOUGH_MEMORY, "Not enough memory space in the file" }, { 0x6A85, SC_ERROR_INCORRECT_PARAMETERS,"Lc inconsistent with TLV structure" }, { 0x6A86, SC_ERROR_INCORRECT_PARAMETERS,"Incorrect parameters P1-P2" }, { 0x6A87, SC_ERROR_INCORRECT_PARAMETERS,"Lc inconsistent with P1-P2" }, { 0x6A88, SC_ERROR_DATA_OBJECT_NOT_FOUND,"Referenced data not found" }, { 0x6A89, SC_ERROR_FILE_ALREADY_EXISTS, "File already exists"}, { 0x6A8A, SC_ERROR_FILE_ALREADY_EXISTS, "DF name already exists"}, { 0x6B00, SC_ERROR_INCORRECT_PARAMETERS,"Wrong parameter(s) P1-P2" }, { 0x6D00, SC_ERROR_INS_NOT_SUPPORTED, "Instruction code not supported or invalid" }, { 0x6E00, SC_ERROR_CLASS_NOT_SUPPORTED, "Class not supported" }, { 0x6F00, SC_ERROR_CARD_CMD_FAILED, "No precise diagnosis" }, { 0x9000,SC_SUCCESS, NULL } }; static int epass2003_transmit_apdu(struct sc_card *card, struct sc_apdu *apdu); static int epass2003_select_file(struct sc_card *card, const sc_path_t * in_path, sc_file_t ** file_out); int epass2003_refresh(struct sc_card *card); static int hash_data(const unsigned char *data, size_t datalen, unsigned char *hash, unsigned int mechanismType); static int epass2003_check_sw(struct sc_card *card, unsigned int sw1, unsigned int sw2) { const int err_count = sizeof(epass2003_errors)/sizeof(epass2003_errors[0]); int i; /* Handle special cases here */ if (sw1 == 0x6C) { sc_log(card->ctx, "Wrong length; correct length is %d", sw2); return SC_ERROR_WRONG_LENGTH; } for (i = 0; i < err_count; i++) { if (epass2003_errors[i].SWs == ((sw1 << 8) | sw2)) { sc_log(card->ctx, "%s", epass2003_errors[i].errorstr); return epass2003_errors[i].errorno; } } sc_log(card->ctx, "Unknown SWs; SW1=%02X, SW2=%02X", sw1, sw2); return SC_ERROR_CARD_CMD_FAILED; } static int sc_transmit_apdu_t(sc_card_t *card, sc_apdu_t *apdu) { int r = sc_transmit_apdu(card, apdu); if ( ((0x69 == apdu->sw1) && (0x85 == apdu->sw2)) || ((0x69 == apdu->sw1) && (0x88 == apdu->sw2))) { epass2003_refresh(card); r = sc_transmit_apdu(card, apdu); } return r; } static int openssl_enc(const EVP_CIPHER * cipher, const unsigned char *key, const unsigned char *iv, const unsigned char *input, size_t length, unsigned char *output) { int r = SC_ERROR_INTERNAL; EVP_CIPHER_CTX * ctx = NULL; int outl = 0; int outl_tmp = 0; unsigned char iv_tmp[EVP_MAX_IV_LENGTH] = { 0 }; memcpy(iv_tmp, iv, EVP_MAX_IV_LENGTH); ctx = EVP_CIPHER_CTX_new(); if (ctx == NULL) goto out; EVP_EncryptInit_ex(ctx, cipher, NULL, key, iv_tmp); EVP_CIPHER_CTX_set_padding(ctx, 0); if (!EVP_EncryptUpdate(ctx, output, &outl, input, length)) goto out; if (!EVP_EncryptFinal_ex(ctx, output + outl, &outl_tmp)) goto out; r = SC_SUCCESS; out: if (ctx) EVP_CIPHER_CTX_free(ctx); return r; } static int openssl_dec(const EVP_CIPHER * cipher, const unsigned char *key, const unsigned char *iv, const unsigned char *input, size_t length, unsigned char *output) { int r = SC_ERROR_INTERNAL; EVP_CIPHER_CTX * ctx = NULL; int outl = 0; int outl_tmp = 0; unsigned char iv_tmp[EVP_MAX_IV_LENGTH] = { 0 }; memcpy(iv_tmp, iv, EVP_MAX_IV_LENGTH); ctx = EVP_CIPHER_CTX_new(); if (ctx == NULL) goto out; EVP_DecryptInit_ex(ctx, cipher, NULL, key, iv_tmp); EVP_CIPHER_CTX_set_padding(ctx, 0); if (!EVP_DecryptUpdate(ctx, output, &outl, input, length)) goto out; if (!EVP_DecryptFinal_ex(ctx, output + outl, &outl_tmp)) goto out; r = SC_SUCCESS; out: if (ctx) EVP_CIPHER_CTX_free(ctx); return r; } static int aes128_encrypt_ecb(const unsigned char *key, int keysize, const unsigned char *input, size_t length, unsigned char *output) { unsigned char iv[EVP_MAX_IV_LENGTH] = { 0 }; return openssl_enc(EVP_aes_128_ecb(), key, iv, input, length, output); } static int aes128_encrypt_cbc(const unsigned char *key, int keysize, unsigned char iv[16], const unsigned char *input, size_t length, unsigned char *output) { return openssl_enc(EVP_aes_128_cbc(), key, iv, input, length, output); } static int aes128_decrypt_cbc(const unsigned char *key, int keysize, unsigned char iv[16], const unsigned char *input, size_t length, unsigned char *output) { return openssl_dec(EVP_aes_128_cbc(), key, iv, input, length, output); } static int des3_encrypt_ecb(const unsigned char *key, int keysize, const unsigned char *input, int length, unsigned char *output) { unsigned char iv[EVP_MAX_IV_LENGTH] = { 0 }; unsigned char bKey[24] = { 0 }; if (keysize == 16) { memcpy(&bKey[0], key, 16); memcpy(&bKey[16], key, 8); } else { memcpy(&bKey[0], key, 24); } return openssl_enc(EVP_des_ede3(), bKey, iv, input, length, output); } static int des3_encrypt_cbc(const unsigned char *key, int keysize, unsigned char iv[EVP_MAX_IV_LENGTH], const unsigned char *input, size_t length, unsigned char *output) { unsigned char bKey[24] = { 0 }; if (keysize == 16) { memcpy(&bKey[0], key, 16); memcpy(&bKey[16], key, 8); } else { memcpy(&bKey[0], key, 24); } return openssl_enc(EVP_des_ede3_cbc(), bKey, iv, input, length, output); } static int des3_decrypt_cbc(const unsigned char *key, int keysize, unsigned char iv[EVP_MAX_IV_LENGTH], const unsigned char *input, size_t length, unsigned char *output) { unsigned char bKey[24] = { 0 }; if (keysize == 16) { memcpy(&bKey[0], key, 16); memcpy(&bKey[16], key, 8); } else { memcpy(&bKey[0], key, 24); } return openssl_dec(EVP_des_ede3_cbc(), bKey, iv, input, length, output); } static int des_encrypt_cbc(const unsigned char *key, int keysize, unsigned char iv[EVP_MAX_IV_LENGTH], const unsigned char *input, size_t length, unsigned char *output) { return openssl_enc(EVP_des_cbc(), key, iv, input, length, output); } static int des_decrypt_cbc(const unsigned char *key, int keysize, unsigned char iv[EVP_MAX_IV_LENGTH], const unsigned char *input, size_t length, unsigned char *output) { return openssl_dec(EVP_des_cbc(), key, iv, input, length, output); } static int openssl_dig(const EVP_MD * digest, const unsigned char *input, size_t length, unsigned char *output) { int r = 0; EVP_MD_CTX *ctx = NULL; unsigned outl = 0; ctx = EVP_MD_CTX_create(); if (ctx == NULL) { r = SC_ERROR_OUT_OF_MEMORY; goto err; } EVP_MD_CTX_init(ctx); EVP_DigestInit_ex(ctx, digest, NULL); if (!EVP_DigestUpdate(ctx, input, length)) { r = SC_ERROR_INTERNAL; goto err; } if (!EVP_DigestFinal_ex(ctx, output, &outl)) { r = SC_ERROR_INTERNAL; goto err; } r = SC_SUCCESS; err: if (ctx) EVP_MD_CTX_destroy(ctx); return r; } static int sha1_digest(const unsigned char *input, size_t length, unsigned char *output) { return openssl_dig(EVP_sha1(), input, length, output); } static int sha256_digest(const unsigned char *input, size_t length, unsigned char *output) { return openssl_dig(EVP_sha256(), input, length, output); } static int gen_init_key(struct sc_card *card, unsigned char *key_enc, unsigned char *key_mac, unsigned char *result, unsigned char key_type) { int r; struct sc_apdu apdu; unsigned char data[256] = { 0 }; unsigned char tmp_sm; unsigned long blocksize = 0; unsigned char cryptogram[256] = { 0 }; /* host cryptogram */ unsigned char iv[16] = { 0 }; epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; LOG_FUNC_CALLED(card->ctx); sc_format_apdu(card, &apdu, SC_APDU_CASE_4_SHORT, 0x50, 0x00, 0x00); apdu.cla = 0x80; apdu.lc = apdu.datalen = sizeof(g_random); apdu.data = g_random; /* host random */ apdu.le = apdu.resplen = 28; apdu.resp = result; /* card random is result[12~19] */ tmp_sm = exdata->sm; exdata->sm = SM_PLAIN; r = epass2003_transmit_apdu(card, &apdu); exdata->sm = tmp_sm; LOG_TEST_RET(card->ctx, r, "APDU gen_init_key failed"); r = sc_check_sw(card, apdu.sw1, apdu.sw2); LOG_TEST_RET(card->ctx, r, "gen_init_key failed"); /* Step 1 - Generate Derivation data */ memcpy(data, &result[16], 4); memcpy(&data[4], g_random, 4); memcpy(&data[8], &result[12], 4); memcpy(&data[12], &g_random[4], 4); /* Step 2,3 - Create S-ENC/S-MAC Session Key */ if (KEY_TYPE_AES == key_type) { aes128_encrypt_ecb(key_enc, 16, data, 16, exdata->sk_enc); aes128_encrypt_ecb(key_mac, 16, data, 16, exdata->sk_mac); } else { des3_encrypt_ecb(key_enc, 16, data, 16, exdata->sk_enc); des3_encrypt_ecb(key_mac, 16, data, 16, exdata->sk_mac); } memcpy(data, g_random, 8); memcpy(&data[8], &result[12], 8); data[16] = 0x80; blocksize = (key_type == KEY_TYPE_AES ? 16 : 8); memset(&data[17], 0x00, blocksize - 1); /* calculate host cryptogram */ if (KEY_TYPE_AES == key_type) aes128_encrypt_cbc(exdata->sk_enc, 16, iv, data, 16 + blocksize, cryptogram); else des3_encrypt_cbc(exdata->sk_enc, 16, iv, data, 16 + blocksize, cryptogram); /* verify card cryptogram */ if (0 != memcmp(&cryptogram[16], &result[20], 8)) LOG_FUNC_RETURN(card->ctx, SC_ERROR_CARD_CMD_FAILED); LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } static int verify_init_key(struct sc_card *card, unsigned char *ran_key, unsigned char key_type) { int r; struct sc_apdu apdu; unsigned long blocksize = (key_type == KEY_TYPE_AES ? 16 : 8); unsigned char data[256] = { 0 }; unsigned char cryptogram[256] = { 0 }; /* host cryptogram */ unsigned char iv[16] = { 0 }; unsigned char mac[256] = { 0 }; unsigned long i; unsigned char tmp_sm; epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; LOG_FUNC_CALLED(card->ctx); memcpy(data, ran_key, 8); memcpy(&data[8], g_random, 8); data[16] = 0x80; memset(&data[17], 0x00, blocksize - 1); memset(iv, 0, 16); /* calculate host cryptogram */ if (KEY_TYPE_AES == key_type) { aes128_encrypt_cbc(exdata->sk_enc, 16, iv, data, 16 + blocksize, cryptogram); } else { des3_encrypt_cbc(exdata->sk_enc, 16, iv, data, 16 + blocksize, cryptogram); } memset(data, 0, sizeof(data)); memcpy(data, "\x84\x82\x03\x00\x10", 5); memcpy(&data[5], &cryptogram[16], 8); memcpy(&data[13], "\x80\x00\x00", 3); /* calculate mac icv */ memset(iv, 0x00, 16); if (KEY_TYPE_AES == key_type) { aes128_encrypt_cbc(exdata->sk_mac, 16, iv, data, 16, mac); i = 0; } else { des3_encrypt_cbc(exdata->sk_mac, 16, iv, data, 16, mac); i = 8; } /* save mac icv */ memset(exdata->icv_mac, 0x00, 16); memcpy(exdata->icv_mac, &mac[i], 8); /* verify host cryptogram */ memcpy(data, &cryptogram[16], 8); memcpy(&data[8], &mac[i], 8); sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0x82, 0x03, 0x00); apdu.cla = 0x84; apdu.lc = apdu.datalen = 16; apdu.data = data; tmp_sm = exdata->sm; exdata->sm = SM_PLAIN; r = epass2003_transmit_apdu(card, &apdu); exdata->sm = tmp_sm; LOG_TEST_RET(card->ctx, r, "APDU verify_init_key failed"); r = sc_check_sw(card, apdu.sw1, apdu.sw2); LOG_TEST_RET(card->ctx, r, "verify_init_key failed"); return r; } static int mutual_auth(struct sc_card *card, unsigned char *key_enc, unsigned char *key_mac) { struct sc_context *ctx = card->ctx; int r; unsigned char result[256] = { 0 }; unsigned char ran_key[8] = { 0 }; epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; LOG_FUNC_CALLED(ctx); r = gen_init_key(card, key_enc, key_mac, result, exdata->smtype); LOG_TEST_RET(ctx, r, "gen_init_key failed"); memcpy(ran_key, &result[12], 8); r = verify_init_key(card, ran_key, exdata->smtype); LOG_TEST_RET(ctx, r, "verify_init_key failed"); LOG_FUNC_RETURN(ctx, r); } int epass2003_refresh(struct sc_card *card) { int r = SC_SUCCESS; epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; if (exdata->sm) { card->sm_ctx.sm_mode = 0; r = mutual_auth(card, g_init_key_enc, g_init_key_mac); card->sm_ctx.sm_mode = SM_MODE_TRANSMIT; LOG_TEST_RET(card->ctx, r, "mutual_auth failed"); } return r; } /* Data(TLV)=0x87|L|0x01+Cipher */ static int construct_data_tlv(struct sc_card *card, struct sc_apdu *apdu, unsigned char *apdu_buf, unsigned char *data_tlv, size_t * data_tlv_len, const unsigned char key_type) { size_t block_size = (KEY_TYPE_AES == key_type ? 16 : 8); unsigned char pad[4096] = { 0 }; size_t pad_len; size_t tlv_more; /* increased tlv length */ unsigned char iv[16] = { 0 }; epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; /* padding */ apdu_buf[block_size] = 0x87; memcpy(pad, apdu->data, apdu->lc); pad[apdu->lc] = 0x80; if ((apdu->lc + 1) % block_size) pad_len = ((apdu->lc + 1) / block_size + 1) * block_size; else pad_len = apdu->lc + 1; /* encode Lc' */ if (pad_len > 0x7E) { /* Lc' > 0x7E, use extended APDU */ apdu_buf[block_size + 1] = 0x82; apdu_buf[block_size + 2] = (unsigned char)((pad_len + 1) / 0x100); apdu_buf[block_size + 3] = (unsigned char)((pad_len + 1) % 0x100); apdu_buf[block_size + 4] = 0x01; tlv_more = 5; } else { apdu_buf[block_size + 1] = (unsigned char)pad_len + 1; apdu_buf[block_size + 2] = 0x01; tlv_more = 3; } memcpy(data_tlv, &apdu_buf[block_size], tlv_more); /* encrypt Data */ if (KEY_TYPE_AES == key_type) aes128_encrypt_cbc(exdata->sk_enc, 16, iv, pad, pad_len, apdu_buf + block_size + tlv_more); else des3_encrypt_cbc(exdata->sk_enc, 16, iv, pad, pad_len, apdu_buf + block_size + tlv_more); memcpy(data_tlv + tlv_more, apdu_buf + block_size + tlv_more, pad_len); *data_tlv_len = tlv_more + pad_len; return 0; } /* Le(TLV)=0x97|L|Le */ static int construct_le_tlv(struct sc_apdu *apdu, unsigned char *apdu_buf, size_t data_tlv_len, unsigned char *le_tlv, size_t * le_tlv_len, const unsigned char key_type) { size_t block_size = (KEY_TYPE_AES == key_type ? 16 : 8); *(apdu_buf + block_size + data_tlv_len) = 0x97; if (apdu->le > 0x7F) { /* Le' > 0x7E, use extended APDU */ *(apdu_buf + block_size + data_tlv_len + 1) = 2; *(apdu_buf + block_size + data_tlv_len + 2) = (unsigned char)(apdu->le / 0x100); *(apdu_buf + block_size + data_tlv_len + 3) = (unsigned char)(apdu->le % 0x100); memcpy(le_tlv, apdu_buf + block_size + data_tlv_len, 4); *le_tlv_len = 4; } else { *(apdu_buf + block_size + data_tlv_len + 1) = 1; *(apdu_buf + block_size + data_tlv_len + 2) = (unsigned char)apdu->le; memcpy(le_tlv, apdu_buf + block_size + data_tlv_len, 3); *le_tlv_len = 3; } return 0; } /* MAC(TLV)=0x8e|0x08|MAC */ static int construct_mac_tlv(struct sc_card *card, unsigned char *apdu_buf, size_t data_tlv_len, size_t le_tlv_len, unsigned char *mac_tlv, size_t * mac_tlv_len, const unsigned char key_type) { size_t block_size = (KEY_TYPE_AES == key_type ? 16 : 8); unsigned char mac[4096] = { 0 }; size_t mac_len; unsigned char icv[16] = { 0 }; int i = (KEY_TYPE_AES == key_type ? 15 : 7); epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; if (0 == data_tlv_len && 0 == le_tlv_len) { mac_len = block_size; } else { /* padding */ *(apdu_buf + block_size + data_tlv_len + le_tlv_len) = 0x80; if ((data_tlv_len + le_tlv_len + 1) % block_size) mac_len = (((data_tlv_len + le_tlv_len + 1) / block_size) + 1) * block_size + block_size; else mac_len = data_tlv_len + le_tlv_len + 1 + block_size; memset((apdu_buf + block_size + data_tlv_len + le_tlv_len + 1), 0, (mac_len - (data_tlv_len + le_tlv_len + 1))); } /* increase icv */ for (; i >= 0; i--) { if (exdata->icv_mac[i] == 0xff) { exdata->icv_mac[i] = 0; } else { exdata->icv_mac[i]++; break; } } /* calculate MAC */ memset(icv, 0, sizeof(icv)); memcpy(icv, exdata->icv_mac, 16); if (KEY_TYPE_AES == key_type) { aes128_encrypt_cbc(exdata->sk_mac, 16, icv, apdu_buf, mac_len, mac); memcpy(mac_tlv + 2, &mac[mac_len - 16], 8); } else { unsigned char iv[EVP_MAX_IV_LENGTH] = { 0 }; unsigned char tmp[8] = { 0 }; des_encrypt_cbc(exdata->sk_mac, 8, icv, apdu_buf, mac_len, mac); des_decrypt_cbc(&exdata->sk_mac[8], 8, iv, &mac[mac_len - 8], 8, tmp); memset(iv, 0x00, sizeof iv); des_encrypt_cbc(exdata->sk_mac, 8, iv, tmp, 8, mac_tlv + 2); } *mac_tlv_len = 2 + 8; return 0; } /* According to GlobalPlatform Card Specification's SCP01 * encode APDU from * CLA INS P1 P2 [Lc] Data [Le] * to * CLA INS P1 P2 Lc' Data' [Le] * where * Data'=Data(TLV)+Le(TLV)+MAC(TLV) */ static int encode_apdu(struct sc_card *card, struct sc_apdu *plain, struct sc_apdu *sm, unsigned char *apdu_buf, size_t * apdu_buf_len) { size_t block_size = 0; unsigned char dataTLV[4096] = { 0 }; size_t data_tlv_len = 0; unsigned char le_tlv[256] = { 0 }; size_t le_tlv_len = 0; size_t mac_tlv_len = 10; size_t tmp_lc = 0; size_t tmp_le = 0; unsigned char mac_tlv[256] = { 0 }; epass2003_exdata *exdata = NULL; mac_tlv[0] = 0x8E; mac_tlv[1] = 8; /* size_t plain_le = 0; */ if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata*)card->drv_data; block_size = (KEY_TYPE_DES == exdata->smtype ? 16 : 8); sm->cse = SC_APDU_CASE_4_SHORT; apdu_buf[0] = (unsigned char)plain->cla; apdu_buf[1] = (unsigned char)plain->ins; apdu_buf[2] = (unsigned char)plain->p1; apdu_buf[3] = (unsigned char)plain->p2; /* plain_le = plain->le; */ /* padding */ apdu_buf[4] = 0x80; memset(&apdu_buf[5], 0x00, block_size - 5); /* Data -> Data' */ if (plain->lc != 0) if (0 != construct_data_tlv(card, plain, apdu_buf, dataTLV, &data_tlv_len, exdata->smtype)) return -1; if (plain->le != 0 || (plain->le == 0 && plain->resplen != 0)) if (0 != construct_le_tlv(plain, apdu_buf, data_tlv_len, le_tlv, &le_tlv_len, exdata->smtype)) return -1; if (0 != construct_mac_tlv(card, apdu_buf, data_tlv_len, le_tlv_len, mac_tlv, &mac_tlv_len, exdata->smtype)) return -1; memset(apdu_buf + 4, 0, *apdu_buf_len - 4); sm->lc = sm->datalen = data_tlv_len + le_tlv_len + mac_tlv_len; if (sm->lc > 0xFF) { sm->cse = SC_APDU_CASE_4_EXT; apdu_buf[4] = (unsigned char)((sm->lc) / 0x10000); apdu_buf[5] = (unsigned char)(((sm->lc) / 0x100) % 0x100); apdu_buf[6] = (unsigned char)((sm->lc) % 0x100); tmp_lc = 3; } else { apdu_buf[4] = (unsigned char)sm->lc; tmp_lc = 1; } memcpy(apdu_buf + 4 + tmp_lc, dataTLV, data_tlv_len); memcpy(apdu_buf + 4 + tmp_lc + data_tlv_len, le_tlv, le_tlv_len); memcpy(apdu_buf + 4 + tmp_lc + data_tlv_len + le_tlv_len, mac_tlv, mac_tlv_len); memcpy((unsigned char *)sm->data, apdu_buf + 4 + tmp_lc, sm->datalen); *apdu_buf_len = 0; if (4 == le_tlv_len) { sm->cse = SC_APDU_CASE_4_EXT; *(apdu_buf + 4 + tmp_lc + sm->lc) = (unsigned char)(plain->le / 0x100); *(apdu_buf + 4 + tmp_lc + sm->lc + 1) = (unsigned char)(plain->le % 0x100); tmp_le = 2; } else if (3 == le_tlv_len) { *(apdu_buf + 4 + tmp_lc + sm->lc) = (unsigned char)plain->le; tmp_le = 1; } *apdu_buf_len += 4 + tmp_lc + data_tlv_len + le_tlv_len + mac_tlv_len + tmp_le; /* sm->le = calc_le(plain_le); */ return 0; } static int epass2003_sm_wrap_apdu(struct sc_card *card, struct sc_apdu *plain, struct sc_apdu *sm) { unsigned char buf[4096] = { 0 }; /* APDU buffer */ size_t buf_len = sizeof(buf); epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; LOG_FUNC_CALLED(card->ctx); if (exdata->sm) plain->cla |= 0x0C; sm->cse = plain->cse; sm->cla = plain->cla; sm->ins = plain->ins; sm->p1 = plain->p1; sm->p2 = plain->p2; sm->lc = plain->lc; sm->le = plain->le; sm->control = plain->control; sm->flags = plain->flags; switch (sm->cla & 0x0C) { case 0x00: case 0x04: sm->datalen = plain->datalen; memcpy((void *)sm->data, plain->data, plain->datalen); sm->resplen = plain->resplen; memcpy(sm->resp, plain->resp, plain->resplen); break; case 0x0C: memset(buf, 0, sizeof(buf)); if (0 != encode_apdu(card, plain, sm, buf, &buf_len)) return SC_ERROR_CARD_CMD_FAILED; break; default: return SC_ERROR_INCORRECT_PARAMETERS; } return SC_SUCCESS; } /* According to GlobalPlatform Card Specification's SCP01 * decrypt APDU response from * ResponseData' SW1 SW2 * to * ResponseData SW1 SW2 * where * ResponseData'=Data(TLV)+SW12(TLV)+MAC(TLV) * where * Data(TLV)=0x87|L|Cipher * SW12(TLV)=0x99|0x02|SW1+SW2 * MAC(TLV)=0x8e|0x08|MAC */ static int decrypt_response(struct sc_card *card, unsigned char *in, size_t inlen, unsigned char *out, size_t * out_len) { size_t cipher_len; size_t i; unsigned char iv[16] = { 0 }; unsigned char plaintext[4096] = { 0 }; epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; /* no cipher */ if (in[0] == 0x99) return 0; /* parse cipher length */ if (0x01 == in[2] && 0x82 != in[1]) { cipher_len = in[1]; i = 3; } else if (0x01 == in[3] && 0x81 == in[1]) { cipher_len = in[2]; i = 4; } else if (0x01 == in[4] && 0x82 == in[1]) { cipher_len = in[2] * 0x100; cipher_len += in[3]; i = 5; } else { return -1; } if (cipher_len < 2 || i+cipher_len > inlen || cipher_len > sizeof plaintext) return -1; /* decrypt */ if (KEY_TYPE_AES == exdata->smtype) aes128_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext); else des3_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext); /* unpadding */ while (0x80 != plaintext[cipher_len - 2] && (cipher_len - 2 > 0)) cipher_len--; if (2 == cipher_len || *out_len < cipher_len - 2) return -1; memcpy(out, plaintext, cipher_len - 2); *out_len = cipher_len - 2; return 0; } static int epass2003_sm_unwrap_apdu(struct sc_card *card, struct sc_apdu *sm, struct sc_apdu *plain) { int r; size_t len = 0; epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; LOG_FUNC_CALLED(card->ctx); r = sc_check_sw(card, sm->sw1, sm->sw2); if (r == SC_SUCCESS) { if (exdata->sm) { len = plain->resplen; if (0 != decrypt_response(card, sm->resp, sm->resplen, plain->resp, &len)) return SC_ERROR_CARD_CMD_FAILED; } else { memcpy(plain->resp, sm->resp, sm->resplen); len = sm->resplen; } } plain->resplen = len; plain->sw1 = sm->sw1; plain->sw2 = sm->sw2; sc_log(card->ctx, "unwrapped APDU: resplen %"SC_FORMAT_LEN_SIZE_T"u, SW %02X%02X", plain->resplen, plain->sw1, plain->sw2); LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } static int epass2003_sm_free_wrapped_apdu(struct sc_card *card, struct sc_apdu *plain, struct sc_apdu **sm_apdu) { struct sc_context *ctx = card->ctx; int rv = SC_SUCCESS; LOG_FUNC_CALLED(ctx); if (!sm_apdu) LOG_FUNC_RETURN(ctx, SC_ERROR_INVALID_ARGUMENTS); if (!(*sm_apdu)) LOG_FUNC_RETURN(ctx, SC_SUCCESS); if (plain) rv = epass2003_sm_unwrap_apdu(card, *sm_apdu, plain); if ((*sm_apdu)->data) { unsigned char * p = (unsigned char *)((*sm_apdu)->data); free(p); } if ((*sm_apdu)->resp) { free((*sm_apdu)->resp); } free(*sm_apdu); *sm_apdu = NULL; LOG_FUNC_RETURN(ctx, rv); } static int epass2003_sm_get_wrapped_apdu(struct sc_card *card, struct sc_apdu *plain, struct sc_apdu **sm_apdu) { struct sc_context *ctx = card->ctx; struct sc_apdu *apdu = NULL; int rv; LOG_FUNC_CALLED(ctx); if (!plain || !sm_apdu) LOG_FUNC_RETURN(ctx, SC_ERROR_INVALID_ARGUMENTS); *sm_apdu = NULL; //construct new SM apdu from original apdu apdu = calloc(1, sizeof(struct sc_apdu)); if (!apdu) { rv = SC_ERROR_OUT_OF_MEMORY; goto err; } apdu->data = calloc (1, SC_MAX_EXT_APDU_BUFFER_SIZE); if (!apdu->data) { rv = SC_ERROR_OUT_OF_MEMORY; goto err; } apdu->resp = calloc (1, SC_MAX_EXT_APDU_BUFFER_SIZE); if (!apdu->resp) { rv = SC_ERROR_OUT_OF_MEMORY; goto err; } apdu->datalen = SC_MAX_EXT_APDU_BUFFER_SIZE; apdu->resplen = SC_MAX_EXT_APDU_BUFFER_SIZE; rv = epass2003_sm_wrap_apdu(card, plain, apdu); if (rv) { rv = epass2003_sm_free_wrapped_apdu(card, NULL, &apdu); if (rv < 0) goto err; } *sm_apdu = apdu; apdu = NULL; err: if (apdu) { free((unsigned char *) apdu->data); free(apdu->resp); free(apdu); apdu = NULL; } LOG_FUNC_RETURN(ctx, rv); } static int epass2003_transmit_apdu(struct sc_card *card, struct sc_apdu *apdu) { int r; LOG_FUNC_CALLED(card->ctx); r = sc_transmit_apdu_t(card, apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); return r; } static int get_data(struct sc_card *card, unsigned char type, unsigned char *data, size_t datalen) { int r; struct sc_apdu apdu; unsigned char resp[SC_MAX_APDU_BUFFER_SIZE] = { 0 }; size_t resplen = SC_MAX_APDU_BUFFER_SIZE; epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; LOG_FUNC_CALLED(card->ctx); sc_format_apdu(card, &apdu, SC_APDU_CASE_2_SHORT, 0xca, 0x01, type); apdu.resp = resp; apdu.le = 0; apdu.resplen = resplen; if (0x86 == type) { /* No SM temporarily */ unsigned char tmp_sm = exdata->sm; exdata->sm = SM_PLAIN; r = sc_transmit_apdu(card, &apdu); exdata->sm = tmp_sm; } else { r = sc_transmit_apdu_t(card, &apdu); } LOG_TEST_RET(card->ctx, r, "APDU get_data failed"); r = sc_check_sw(card, apdu.sw1, apdu.sw2); LOG_TEST_RET(card->ctx, r, "get_data failed"); memcpy(data, resp, datalen); return r; } /* card driver functions */ static int epass2003_match_card(struct sc_card *card) { int r; LOG_FUNC_CALLED(card->ctx); r = _sc_match_atr(card, epass2003_atrs, &card->type); if (r < 0) return 0; return 1; } static int epass2003_init(struct sc_card *card) { unsigned int flags; unsigned int ext_flags; unsigned char data[SC_MAX_APDU_BUFFER_SIZE] = { 0 }; size_t datalen = SC_MAX_APDU_BUFFER_SIZE; epass2003_exdata *exdata = NULL; LOG_FUNC_CALLED(card->ctx); card->name = "epass2003"; card->cla = 0x00; exdata = (epass2003_exdata *)calloc(1, sizeof(epass2003_exdata)); if (!exdata) return SC_ERROR_OUT_OF_MEMORY; card->drv_data = exdata; exdata->sm = SM_SCP01; /* decide FIPS/Non-FIPS mode */ if (SC_SUCCESS != get_data(card, 0x86, data, datalen)) return SC_ERROR_INVALID_CARD; if (0x01 == data[2]) exdata->smtype = KEY_TYPE_AES; else exdata->smtype = KEY_TYPE_DES; if (0x84 == data[14]) { if (0x00 == data[16]) { exdata->sm = SM_PLAIN; } } /* mutual authentication */ card->max_recv_size = 0xD8; card->max_send_size = 0xE8; card->sm_ctx.ops.open = epass2003_refresh; card->sm_ctx.ops.get_sm_apdu = epass2003_sm_get_wrapped_apdu; card->sm_ctx.ops.free_sm_apdu = epass2003_sm_free_wrapped_apdu; /* FIXME (VT): rather then set/unset 'g_sm', better to implement filter for APDUs to be wrapped */ epass2003_refresh(card); card->sm_ctx.sm_mode = SM_MODE_TRANSMIT; flags = SC_ALGORITHM_ONBOARD_KEY_GEN | SC_ALGORITHM_RSA_RAW | SC_ALGORITHM_RSA_HASH_NONE; _sc_card_add_rsa_alg(card, 512, flags, 0); _sc_card_add_rsa_alg(card, 768, flags, 0); _sc_card_add_rsa_alg(card, 1024, flags, 0); _sc_card_add_rsa_alg(card, 2048, flags, 0); //set EC Alg Flags flags = SC_ALGORITHM_ONBOARD_KEY_GEN|SC_ALGORITHM_ECDSA_HASH_SHA1|SC_ALGORITHM_ECDSA_HASH_SHA256|SC_ALGORITHM_ECDSA_HASH_NONE|SC_ALGORITHM_ECDSA_RAW; ext_flags = 0; _sc_card_add_ec_alg(card, 256, flags, ext_flags, NULL); card->caps = SC_CARD_CAP_RNG | SC_CARD_CAP_APDU_EXT; LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } static int epass2003_finish(sc_card_t *card) { epass2003_exdata *exdata = (epass2003_exdata *)card->drv_data; if (exdata) free(exdata); return SC_SUCCESS; } /* COS implement SFI as lower 5 bits of FID, and not allow same SFI at the * same DF, so use hook functions to increase/decrease FID by 0x20 */ static int epass2003_hook_path(struct sc_path *path, int inc) { u8 fid_h = path->value[path->len - 2]; u8 fid_l = path->value[path->len - 1]; switch (fid_h) { case 0x29: case 0x30: case 0x31: case 0x32: case 0x33: case 0x34: if (inc) fid_l = fid_l * FID_STEP; else fid_l = fid_l / FID_STEP; path->value[path->len - 1] = fid_l; return 1; default: break; } return 0; } static void epass2003_hook_file(struct sc_file *file, int inc) { int fidl = file->id & 0xff; int fidh = file->id & 0xff00; if (epass2003_hook_path(&file->path, inc)) { if (inc) file->id = fidh + fidl * FID_STEP; else file->id = fidh + fidl / FID_STEP; } } static int epass2003_select_fid_(struct sc_card *card, sc_path_t * in_path, sc_file_t ** file_out) { struct sc_apdu apdu; u8 buf[SC_MAX_APDU_BUFFER_SIZE] = { 0 }; u8 pathbuf[SC_MAX_PATH_SIZE], *path = pathbuf; int r, pathlen; sc_file_t *file = NULL; epass2003_hook_path(in_path, 1); memcpy(path, in_path->value, in_path->len); pathlen = in_path->len; sc_format_apdu(card, &apdu, SC_APDU_CASE_4_SHORT, 0xA4, 0x00, 0x00); switch (in_path->type) { case SC_PATH_TYPE_FILE_ID: apdu.p1 = 0; if (pathlen != 2) return SC_ERROR_INVALID_ARGUMENTS; break; default: LOG_FUNC_RETURN(card->ctx, SC_ERROR_INVALID_ARGUMENTS); } apdu.p2 = 0; /* first record, return FCI */ apdu.lc = pathlen; apdu.data = path; apdu.datalen = pathlen; if (file_out != NULL) { apdu.resp = buf; apdu.resplen = sizeof(buf); apdu.le = 0; } else { apdu.cse = (apdu.lc == 0) ? SC_APDU_CASE_1 : SC_APDU_CASE_3_SHORT; } if (path[0] == 0x29) { /* TODO:0x29 accords with FID prefix in profile */ /* Not allowed to select private key file, so fake fci. */ /* 62 16 82 02 11 00 83 02 29 00 85 02 08 00 86 08 FF 90 90 90 FF FF FF FF */ apdu.resplen = 0x18; memcpy(apdu.resp, "\x6f\x16\x82\x02\x11\x00\x83\x02\x29\x00\x85\x02\x08\x00\x86\x08\xff\x90\x90\x90\xff\xff\xff\xff", apdu.resplen); apdu.resp[9] = path[1]; apdu.sw1 = 0x90; apdu.sw2 = 0x00; } else { r = sc_transmit_apdu_t(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); } if (file_out == NULL) { if (apdu.sw1 == 0x61) LOG_FUNC_RETURN(card->ctx, 0); LOG_FUNC_RETURN(card->ctx, sc_check_sw(card, apdu.sw1, apdu.sw2)); } r = sc_check_sw(card, apdu.sw1, apdu.sw2); if (r) LOG_FUNC_RETURN(card->ctx, r); if (apdu.resplen < 2) LOG_FUNC_RETURN(card->ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED); switch (apdu.resp[0]) { case 0x6F: file = sc_file_new(); if (file == NULL) LOG_FUNC_RETURN(card->ctx, SC_ERROR_OUT_OF_MEMORY); file->path = *in_path; if (card->ops->process_fci == NULL) { sc_file_free(file); LOG_FUNC_RETURN(card->ctx, SC_ERROR_NOT_SUPPORTED); } if ((size_t) apdu.resp[1] + 2 <= apdu.resplen) card->ops->process_fci(card, file, apdu.resp + 2, apdu.resp[1]); epass2003_hook_file(file, 0); *file_out = file; break; case 0x00: /* proprietary coding */ LOG_FUNC_RETURN(card->ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED); break; default: LOG_FUNC_RETURN(card->ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED); } return 0; } static int epass2003_select_fid(struct sc_card *card, unsigned int id_hi, unsigned int id_lo, sc_file_t ** file_out) { int r; sc_file_t *file = 0; sc_path_t path; memset(&path, 0, sizeof(path)); path.type = SC_PATH_TYPE_FILE_ID; path.value[0] = id_hi; path.value[1] = id_lo; path.len = 2; r = epass2003_select_fid_(card, &path, &file); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); /* update cache */ if (file && file->type == SC_FILE_TYPE_DF) { card->cache.current_path.type = SC_PATH_TYPE_PATH; card->cache.current_path.value[0] = 0x3f; card->cache.current_path.value[1] = 0x00; if (id_hi == 0x3f && id_lo == 0x00) { card->cache.current_path.len = 2; } else { card->cache.current_path.len = 4; card->cache.current_path.value[2] = id_hi; card->cache.current_path.value[3] = id_lo; } } if (file_out) *file_out = file; LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } static int epass2003_select_aid(struct sc_card *card, const sc_path_t * in_path, sc_file_t ** file_out) { int r = 0; if (card->cache.valid && card->cache.current_path.type == SC_PATH_TYPE_DF_NAME && card->cache.current_path.len == in_path->len && memcmp(card->cache.current_path.value, in_path->value, in_path->len) == 0) { if (file_out) { *file_out = sc_file_new(); if (!file_out) LOG_FUNC_RETURN(card->ctx, SC_ERROR_OUT_OF_MEMORY); } } else { r = iso_ops->select_file(card, in_path, file_out); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); /* update cache */ card->cache.current_path.type = SC_PATH_TYPE_DF_NAME; card->cache.current_path.len = in_path->len; memcpy(card->cache.current_path.value, in_path->value, in_path->len); } if (file_out) { sc_file_t *file = *file_out; file->type = SC_FILE_TYPE_DF; file->ef_structure = SC_FILE_EF_UNKNOWN; file->path.len = 0; file->size = 0; /* AID */ memcpy(file->name, in_path->value, in_path->len); file->namelen = in_path->len; file->id = 0x0000; } LOG_FUNC_RETURN(card->ctx, r); } static int epass2003_select_path(struct sc_card *card, const u8 pathbuf[16], const size_t len, sc_file_t ** file_out) { u8 n_pathbuf[SC_MAX_PATH_SIZE]; const u8 *path = pathbuf; size_t pathlen = len; int bMatch = -1; unsigned int i; int r; if (pathlen % 2 != 0 || pathlen > 6 || pathlen <= 0) LOG_FUNC_RETURN(card->ctx, SC_ERROR_INVALID_ARGUMENTS); /* if pathlen == 6 then the first FID must be MF (== 3F00) */ if (pathlen == 6 && (path[0] != 0x3f || path[1] != 0x00)) LOG_FUNC_RETURN(card->ctx, SC_ERROR_INVALID_ARGUMENTS); /* unify path (the first FID should be MF) */ if (path[0] != 0x3f || path[1] != 0x00) { n_pathbuf[0] = 0x3f; n_pathbuf[1] = 0x00; for (i = 0; i < pathlen; i++) n_pathbuf[i + 2] = pathbuf[i]; path = n_pathbuf; pathlen += 2; } /* check current working directory */ if (card->cache.valid && card->cache.current_path.type == SC_PATH_TYPE_PATH && card->cache.current_path.len >= 2 && card->cache.current_path.len <= pathlen) { bMatch = 0; for (i = 0; i < card->cache.current_path.len; i += 2) if (card->cache.current_path.value[i] == path[i] && card->cache.current_path.value[i + 1] == path[i + 1]) bMatch += 2; } if (card->cache.valid && bMatch > 2) { if (pathlen - bMatch == 2) { /* we are in the right directory */ return epass2003_select_fid(card, path[bMatch], path[bMatch + 1], file_out); } else if (pathlen - bMatch > 2) { /* two more steps to go */ sc_path_t new_path; /* first step: change directory */ r = epass2003_select_fid(card, path[bMatch], path[bMatch + 1], NULL); LOG_TEST_RET(card->ctx, r, "SELECT FILE (DF-ID) failed"); new_path.type = SC_PATH_TYPE_PATH; new_path.len = pathlen - bMatch - 2; memcpy(new_path.value, &(path[bMatch + 2]), new_path.len); /* final step: select file */ return epass2003_select_file(card, &new_path, file_out); } else { /* if (bMatch - pathlen == 0) */ /* done: we are already in the * requested directory */ sc_log(card->ctx, "cache hit\n"); /* copy file info (if necessary) */ if (file_out) { sc_file_t *file = sc_file_new(); if (!file) LOG_FUNC_RETURN(card->ctx, SC_ERROR_OUT_OF_MEMORY); file->id = (path[pathlen - 2] << 8) + path[pathlen - 1]; file->path = card->cache.current_path; file->type = SC_FILE_TYPE_DF; file->ef_structure = SC_FILE_EF_UNKNOWN; file->size = 0; file->namelen = 0; file->magic = SC_FILE_MAGIC; *file_out = file; } /* nothing left to do */ return SC_SUCCESS; } } else { /* no usable cache */ for (i = 0; i < pathlen - 2; i += 2) { r = epass2003_select_fid(card, path[i], path[i + 1], NULL); LOG_TEST_RET(card->ctx, r, "SELECT FILE (DF-ID) failed"); } return epass2003_select_fid(card, path[pathlen - 2], path[pathlen - 1], file_out); } } static int epass2003_select_file(struct sc_card *card, const sc_path_t * in_path, sc_file_t ** file_out) { int r; char pbuf[SC_MAX_PATH_STRING_SIZE]; LOG_FUNC_CALLED(card->ctx); r = sc_path_print(pbuf, sizeof(pbuf), &card->cache.current_path); if (r != SC_SUCCESS) pbuf[0] = '\0'; sc_log(card->ctx, "current path (%s, %s): %s (len: %"SC_FORMAT_LEN_SIZE_T"u)\n", card->cache.current_path.type == SC_PATH_TYPE_DF_NAME ? "aid" : "path", card->cache.valid ? "valid" : "invalid", pbuf, card->cache.current_path.len); switch (in_path->type) { case SC_PATH_TYPE_FILE_ID: if (in_path->len != 2) LOG_FUNC_RETURN(card->ctx, SC_ERROR_INVALID_ARGUMENTS); return epass2003_select_fid(card, in_path->value[0], in_path->value[1], file_out); case SC_PATH_TYPE_DF_NAME: return epass2003_select_aid(card, in_path, file_out); case SC_PATH_TYPE_PATH: return epass2003_select_path(card, in_path->value, in_path->len, file_out); default: LOG_FUNC_RETURN(card->ctx, SC_ERROR_INVALID_ARGUMENTS); } } static int epass2003_set_security_env(struct sc_card *card, const sc_security_env_t * env, int se_num) { struct sc_apdu apdu; u8 sbuf[SC_MAX_APDU_BUFFER_SIZE] = { 0 }; u8 *p; unsigned short fid = 0; int r, locked = 0; epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0x22, 0x41, 0); p = sbuf; *p++ = 0x80; /* algorithm reference */ *p++ = 0x01; *p++ = 0x84; *p++ = 0x81; *p++ = 0x02; fid = 0x2900; fid += (unsigned short)(0x20 * (env->key_ref[0] & 0xff)); *p++ = fid >> 8; *p++ = fid & 0xff; r = p - sbuf; apdu.lc = r; apdu.datalen = r; apdu.data = sbuf; if (env->algorithm == SC_ALGORITHM_EC) { apdu.p2 = 0xB6; exdata->currAlg = SC_ALGORITHM_EC; if(env->algorithm_flags & SC_ALGORITHM_ECDSA_HASH_SHA1) { sbuf[2] = 0x91; exdata->ecAlgFlags = SC_ALGORITHM_ECDSA_HASH_SHA1; } else if (env->algorithm_flags & SC_ALGORITHM_ECDSA_HASH_SHA256) { sbuf[2] = 0x92; exdata->ecAlgFlags = SC_ALGORITHM_ECDSA_HASH_SHA256; } else { sc_log(card->ctx, "%0x Alg Not Support! ", env->algorithm_flags); goto err; } } else if(env->algorithm == SC_ALGORITHM_RSA) { exdata->currAlg = SC_ALGORITHM_RSA; apdu.p2 = 0xB8; sc_log(card->ctx, "setenv RSA Algorithm alg_flags = %0x\n",env->algorithm_flags); } else { sc_log(card->ctx, "%0x Alg Not Support! ", env->algorithm); } if (se_num > 0) { r = sc_lock(card); LOG_TEST_RET(card->ctx, r, "sc_lock() failed"); locked = 1; } if (apdu.datalen != 0) { r = sc_transmit_apdu_t(card, &apdu); if (r) { sc_log(card->ctx, "%s: APDU transmit failed", sc_strerror(r)); goto err; } r = sc_check_sw(card, apdu.sw1, apdu.sw2); if (r) { sc_log(card->ctx, "%s: Card returned error", sc_strerror(r)); goto err; } } if (se_num <= 0) return 0; sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0x22, 0xF2, se_num); r = sc_transmit_apdu_t(card, &apdu); sc_unlock(card); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); return sc_check_sw(card, apdu.sw1, apdu.sw2); err: if (locked) sc_unlock(card); return r; } static int epass2003_restore_security_env(struct sc_card *card, int se_num) { LOG_FUNC_CALLED(card->ctx); LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } static int epass2003_decipher(struct sc_card *card, const u8 * data, size_t datalen, u8 * out, size_t outlen) { int r; struct sc_apdu apdu; u8 rbuf[SC_MAX_APDU_BUFFER_SIZE] = { 0 }; u8 sbuf[SC_MAX_APDU_BUFFER_SIZE] = { 0 }; epass2003_exdata *exdata = NULL; LOG_FUNC_CALLED(card->ctx); if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; if(exdata->currAlg == SC_ALGORITHM_EC) { if(exdata->ecAlgFlags & SC_ALGORITHM_ECDSA_HASH_SHA1) { r = hash_data(data, datalen, sbuf, SC_ALGORITHM_ECDSA_HASH_SHA1); LOG_TEST_RET(card->ctx, r, "hash_data failed"); sc_format_apdu(card, &apdu, SC_APDU_CASE_3,0x2A, 0x9E, 0x9A); apdu.data = sbuf; apdu.lc = 0x14; apdu.datalen = 0x14; } else if (exdata->ecAlgFlags & SC_ALGORITHM_ECDSA_HASH_SHA256) { r = hash_data(data, datalen, sbuf, SC_ALGORITHM_ECDSA_HASH_SHA256); LOG_TEST_RET(card->ctx, r, "hash_data failed"); sc_format_apdu(card, &apdu, SC_APDU_CASE_3,0x2A, 0x9E, 0x9A); apdu.data = sbuf; apdu.lc = 0x20; apdu.datalen = 0x20; } else { return SC_ERROR_NOT_SUPPORTED; } apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); apdu.le = 0; r = sc_transmit_apdu_t(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); if (apdu.sw1 == 0x90 && apdu.sw2 == 0x00) { size_t len = apdu.resplen > outlen ? outlen : apdu.resplen; memcpy(out, apdu.resp, len); LOG_FUNC_RETURN(card->ctx, len); } LOG_FUNC_RETURN(card->ctx, sc_check_sw(card, apdu.sw1, apdu.sw2)); } else if(exdata->currAlg == SC_ALGORITHM_RSA) { sc_format_apdu(card, &apdu, SC_APDU_CASE_4_EXT, 0x2A, 0x80, 0x86); apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); apdu.le = 0; memcpy(sbuf, data, datalen); apdu.data = sbuf; apdu.lc = datalen; apdu.datalen = datalen; } else { sc_format_apdu(card, &apdu, SC_APDU_CASE_4_EXT, 0x2A, 0x80, 0x86); apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); apdu.le = 256; memcpy(sbuf, data, datalen); apdu.data = sbuf; apdu.lc = datalen; apdu.datalen = datalen; } r = sc_transmit_apdu_t(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); if (apdu.sw1 == 0x90 && apdu.sw2 == 0x00) { size_t len = apdu.resplen > outlen ? outlen : apdu.resplen; memcpy(out, apdu.resp, len); LOG_FUNC_RETURN(card->ctx, len); } LOG_FUNC_RETURN(card->ctx, sc_check_sw(card, apdu.sw1, apdu.sw2)); } static int acl_to_ac_byte(struct sc_card *card, const struct sc_acl_entry *e) { if (e == NULL) return SC_ERROR_OBJECT_NOT_FOUND; switch (e->method) { case SC_AC_NONE: LOG_FUNC_RETURN(card->ctx, EPASS2003_AC_MAC_NOLESS | EPASS2003_AC_EVERYONE); case SC_AC_NEVER: LOG_FUNC_RETURN(card->ctx, EPASS2003_AC_MAC_NOLESS | EPASS2003_AC_NOONE); default: LOG_FUNC_RETURN(card->ctx, EPASS2003_AC_MAC_NOLESS | EPASS2003_AC_USER); } LOG_FUNC_RETURN(card->ctx, SC_ERROR_INCORRECT_PARAMETERS); } static int epass2003_process_fci(struct sc_card *card, sc_file_t * file, const u8 * buf, size_t buflen) { sc_context_t *ctx = card->ctx; size_t taglen, len = buflen; const u8 *tag = NULL, *p = buf; sc_log(ctx, "processing FCI bytes"); tag = sc_asn1_find_tag(ctx, p, len, 0x83, &taglen); if (tag != NULL && taglen == 2) { file->id = (tag[0] << 8) | tag[1]; sc_log(ctx, " file identifier: 0x%02X%02X", tag[0], tag[1]); } tag = sc_asn1_find_tag(ctx, p, len, 0x80, &taglen); if (tag != NULL && taglen > 0 && taglen < 3) { file->size = tag[0]; if (taglen == 2) file->size = (file->size << 8) + tag[1]; sc_log(ctx, " bytes in file: %"SC_FORMAT_LEN_SIZE_T"u", file->size); } if (tag == NULL) { tag = sc_asn1_find_tag(ctx, p, len, 0x81, &taglen); if (tag != NULL && taglen >= 2) { int bytes = (tag[0] << 8) + tag[1]; sc_log(ctx, " bytes in file: %d", bytes); file->size = bytes; } } tag = sc_asn1_find_tag(ctx, p, len, 0x82, &taglen); if (tag != NULL) { if (taglen > 0) { unsigned char byte = tag[0]; const char *type; if (byte == 0x38) { type = "DF"; file->type = SC_FILE_TYPE_DF; } else if (0x01 <= byte && byte <= 0x07) { type = "working EF"; file->type = SC_FILE_TYPE_WORKING_EF; switch (byte) { case 0x01: file->ef_structure = SC_FILE_EF_TRANSPARENT; break; case 0x02: file->ef_structure = SC_FILE_EF_LINEAR_FIXED; break; case 0x04: file->ef_structure = SC_FILE_EF_LINEAR_FIXED; break; case 0x03: case 0x05: case 0x06: case 0x07: break; default: break; } } else if (0x10 == byte) { type = "BSO"; file->type = SC_FILE_TYPE_BSO; } else if (0x11 <= byte) { type = "internal EF"; file->type = SC_FILE_TYPE_INTERNAL_EF; switch (byte) { case 0x11: break; case 0x12: break; default: break; } } else { type = "unknown"; file->type = SC_FILE_TYPE_INTERNAL_EF; } sc_log(ctx, "type %s, EF structure %d", type, byte); } } tag = sc_asn1_find_tag(ctx, p, len, 0x84, &taglen); if (tag != NULL && taglen > 0 && taglen <= 16) { memcpy(file->name, tag, taglen); file->namelen = taglen; sc_log_hex(ctx, "File name", file->name, file->namelen); if (!file->type) file->type = SC_FILE_TYPE_DF; } tag = sc_asn1_find_tag(ctx, p, len, 0x85, &taglen); if (tag != NULL && taglen) sc_file_set_prop_attr(file, tag, taglen); else file->prop_attr_len = 0; tag = sc_asn1_find_tag(ctx, p, len, 0xA5, &taglen); if (tag != NULL && taglen) sc_file_set_prop_attr(file, tag, taglen); tag = sc_asn1_find_tag(ctx, p, len, 0x86, &taglen); if (tag != NULL && taglen) sc_file_set_sec_attr(file, tag, taglen); tag = sc_asn1_find_tag(ctx, p, len, 0x8A, &taglen); if (tag != NULL && taglen == 1) { if (tag[0] == 0x01) file->status = SC_FILE_STATUS_CREATION; else if (tag[0] == 0x07 || tag[0] == 0x05) file->status = SC_FILE_STATUS_ACTIVATED; else if (tag[0] == 0x06 || tag[0] == 0x04) file->status = SC_FILE_STATUS_INVALIDATED; } file->magic = SC_FILE_MAGIC; return 0; } static int epass2003_construct_fci(struct sc_card *card, const sc_file_t * file, u8 * out, size_t * outlen) { u8 *p = out; u8 buf[64]; unsigned char ops[8] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; int rv; unsigned ii; if (*outlen < 2) return SC_ERROR_BUFFER_TOO_SMALL; *p++ = 0x62; p++; if (file->type == SC_FILE_TYPE_WORKING_EF) { if (file->ef_structure == SC_FILE_EF_TRANSPARENT) { buf[0] = (file->size >> 8) & 0xFF; buf[1] = file->size & 0xFF; sc_asn1_put_tag(0x80, buf, 2, p, *outlen - (p - out), &p); } } if (file->type == SC_FILE_TYPE_DF) { buf[0] = 0x38; buf[1] = 0x00; sc_asn1_put_tag(0x82, buf, 2, p, *outlen - (p - out), &p); } else if (file->type == SC_FILE_TYPE_WORKING_EF) { buf[0] = file->ef_structure & 7; if (file->ef_structure == SC_FILE_EF_TRANSPARENT) { buf[1] = 0x00; sc_asn1_put_tag(0x82, buf, 2, p, *outlen - (p - out), &p); } else if (file->ef_structure == SC_FILE_EF_LINEAR_FIXED || file->ef_structure == SC_FILE_EF_LINEAR_VARIABLE) { buf[1] = 0x00; buf[2] = 0x00; buf[3] = 0x40; /* record length */ buf[4] = 0x00; /* record count */ sc_asn1_put_tag(0x82, buf, 5, p, *outlen - (p - out), &p); } else { return SC_ERROR_NOT_SUPPORTED; } } else if (file->type == SC_FILE_TYPE_INTERNAL_EF) { if (file->ef_structure == SC_CARDCTL_OBERTHUR_KEY_RSA_CRT || file->ef_structure == SC_CARDCTL_OBERTHUR_KEY_EC_CRT) { buf[0] = 0x11; buf[1] = 0x00; } else if (file->ef_structure == SC_CARDCTL_OBERTHUR_KEY_RSA_PUBLIC || file->ef_structure == SC_CARDCTL_OBERTHUR_KEY_EC_PUBLIC) { buf[0] = 0x12; buf[1] = 0x00; } else { return SC_ERROR_NOT_SUPPORTED; } sc_asn1_put_tag(0x82, buf, 2, p, *outlen - (p - out), &p); } else if (file->type == SC_FILE_TYPE_BSO) { buf[0] = 0x10; buf[1] = 0x00; sc_asn1_put_tag(0x82, buf, 2, p, *outlen - (p - out), &p); } buf[0] = (file->id >> 8) & 0xFF; buf[1] = file->id & 0xFF; sc_asn1_put_tag(0x83, buf, 2, p, *outlen - (p - out), &p); if (file->type == SC_FILE_TYPE_DF) { if (file->namelen != 0) { sc_asn1_put_tag(0x84, file->name, file->namelen, p, *outlen - (p - out), &p); } else { return SC_ERROR_INVALID_ARGUMENTS; } } if (file->type == SC_FILE_TYPE_DF) { unsigned char data[2] = {0x00, 0x7F}; /* 127 files at most */ sc_asn1_put_tag(0x85, data, sizeof(data), p, *outlen - (p - out), &p); } else if (file->type == SC_FILE_TYPE_BSO) { buf[0] = file->size & 0xff; sc_asn1_put_tag(0x85, buf, 1, p, *outlen - (p - out), &p); } else if (file->type == SC_FILE_TYPE_INTERNAL_EF) { if (file->ef_structure == SC_CARDCTL_OBERTHUR_KEY_RSA_CRT || file->ef_structure == SC_CARDCTL_OBERTHUR_KEY_RSA_PUBLIC|| file->ef_structure == SC_CARDCTL_OBERTHUR_KEY_EC_CRT|| file->ef_structure == SC_CARDCTL_OBERTHUR_KEY_EC_PUBLIC) { buf[0] = (file->size >> 8) & 0xFF; buf[1] = file->size & 0xFF; sc_asn1_put_tag(0x85, buf, 2, p, *outlen - (p - out), &p); } } if (file->sec_attr_len) { memcpy(buf, file->sec_attr, file->sec_attr_len); sc_asn1_put_tag(0x86, buf, file->sec_attr_len, p, *outlen - (p - out), &p); } else { sc_log(card->ctx, "SC_FILE_ACL"); if (file->type == SC_FILE_TYPE_DF) { ops[0] = SC_AC_OP_LIST_FILES; ops[1] = SC_AC_OP_CREATE; ops[3] = SC_AC_OP_DELETE; } else if (file->type == SC_FILE_TYPE_WORKING_EF) { if (file->ef_structure == SC_FILE_EF_TRANSPARENT) { ops[0] = SC_AC_OP_READ; ops[1] = SC_AC_OP_UPDATE; ops[3] = SC_AC_OP_DELETE; } else if (file->ef_structure == SC_FILE_EF_LINEAR_FIXED || file->ef_structure == SC_FILE_EF_LINEAR_VARIABLE) { ops[0] = SC_AC_OP_READ; ops[1] = SC_AC_OP_UPDATE; ops[2] = SC_AC_OP_WRITE; ops[3] = SC_AC_OP_DELETE; } else { return SC_ERROR_NOT_SUPPORTED; } } else if (file->type == SC_FILE_TYPE_BSO) { ops[0] = SC_AC_OP_UPDATE; ops[3] = SC_AC_OP_DELETE; } else if (file->type == SC_FILE_TYPE_INTERNAL_EF) { if (file->ef_structure == SC_CARDCTL_OBERTHUR_KEY_RSA_CRT || file->ef_structure == SC_CARDCTL_OBERTHUR_KEY_EC_CRT) { ops[1] = SC_AC_OP_UPDATE; ops[2] = SC_AC_OP_CRYPTO; ops[3] = SC_AC_OP_DELETE; } else if (file->ef_structure == SC_CARDCTL_OBERTHUR_KEY_RSA_PUBLIC|| file->ef_structure == SC_CARDCTL_OBERTHUR_KEY_EC_PUBLIC) { ops[0] = SC_AC_OP_READ; ops[1] = SC_AC_OP_UPDATE; ops[2] = SC_AC_OP_CRYPTO; ops[3] = SC_AC_OP_DELETE; } } else { return SC_ERROR_NOT_SUPPORTED; } for (ii = 0; ii < sizeof(ops); ii++) { const struct sc_acl_entry *entry; buf[ii] = 0xFF; if (ops[ii] == 0xFF) continue; entry = sc_file_get_acl_entry(file, ops[ii]); rv = acl_to_ac_byte(card, entry); LOG_TEST_RET(card->ctx, rv, "Invalid ACL"); buf[ii] = rv; } sc_asn1_put_tag(0x86, buf, sizeof(ops), p, *outlen - (p - out), &p); if(file->size == 256) { out[4]= 0x13; } } /* VT ??? */ if (file->ef_structure == SC_CARDCTL_OBERTHUR_KEY_RSA_PUBLIC|| file->ef_structure == SC_CARDCTL_OBERTHUR_KEY_EC_PUBLIC) { unsigned char data[2] = {0x00, 0x66}; sc_asn1_put_tag(0x87, data, sizeof(data), p, *outlen - (p - out), &p); if(file->size == 256) { out[4]= 0x14; } } out[1] = p - out - 2; *outlen = p - out; return 0; } static int epass2003_create_file(struct sc_card *card, sc_file_t * file) { int r; size_t len; u8 sbuf[SC_MAX_APDU_BUFFER_SIZE] = { 0 }; struct sc_apdu apdu; len = SC_MAX_APDU_BUFFER_SIZE; epass2003_hook_file(file, 1); if (card->ops->construct_fci == NULL) LOG_FUNC_RETURN(card->ctx, SC_ERROR_NOT_SUPPORTED); r = epass2003_construct_fci(card, file, sbuf, &len); LOG_TEST_RET(card->ctx, r, "construct_fci() failed"); sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0xE0, 0x00, 0x00); apdu.lc = len; apdu.datalen = len; apdu.data = sbuf; r = sc_transmit_apdu_t(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); r = sc_check_sw(card, apdu.sw1, apdu.sw2); LOG_TEST_RET(card->ctx, r, "APDU sw1/2 wrong"); epass2003_hook_file(file, 0); return r; } static int epass2003_delete_file(struct sc_card *card, const sc_path_t * path) { int r; u8 sbuf[2]; struct sc_apdu apdu; LOG_FUNC_CALLED(card->ctx); r = sc_select_file(card, path, NULL); epass2003_hook_path((struct sc_path *)path, 1); if (r == SC_SUCCESS) { sbuf[0] = path->value[path->len - 2]; sbuf[1] = path->value[path->len - 1]; sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0xE4, 0x00, 0x00); apdu.lc = 2; apdu.datalen = 2; apdu.data = sbuf; } else { LOG_FUNC_RETURN(card->ctx, SC_ERROR_INVALID_ARGUMENTS); } r = sc_transmit_apdu_t(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); r = sc_check_sw(card, apdu.sw1, apdu.sw2); LOG_TEST_RET(card->ctx, r, "Delete file failed"); LOG_FUNC_RETURN(card->ctx, r); } static int epass2003_list_files(struct sc_card *card, unsigned char *buf, size_t buflen) { struct sc_apdu apdu; unsigned char rbuf[SC_MAX_APDU_BUFFER_SIZE] = { 0 }; int r; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); sc_format_apdu(card, &apdu, SC_APDU_CASE_1, 0x34, 0x00, 0x00); apdu.cla = 0x80; apdu.le = 0; apdu.resplen = sizeof(rbuf); apdu.resp = rbuf; r = sc_transmit_apdu_t(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); r = sc_check_sw(card, apdu.sw1, apdu.sw2); LOG_TEST_RET(card->ctx, r, "Card returned error"); if (apdu.resplen == 0x100 && rbuf[0] == 0 && rbuf[1] == 0) LOG_FUNC_RETURN(card->ctx, 0); buflen = buflen < apdu.resplen ? buflen : apdu.resplen; memcpy(buf, rbuf, buflen); LOG_FUNC_RETURN(card->ctx, buflen); } static int internal_write_rsa_key_factor(struct sc_card *card, unsigned short fid, u8 factor, sc_pkcs15_bignum_t data) { int r; struct sc_apdu apdu; u8 sbuff[SC_MAX_EXT_APDU_BUFFER_SIZE] = { 0 }; LOG_FUNC_CALLED(card->ctx); sbuff[0] = ((fid & 0xff00) >> 8); sbuff[1] = (fid & 0x00ff); memcpy(&sbuff[2], data.data, data.len); // sc_mem_reverse(&sbuff[2], data.len); sc_format_apdu(card, &apdu, SC_APDU_CASE_3, 0xe7, factor, 0x00); apdu.cla = 0x80; apdu.lc = apdu.datalen = 2 + data.len; apdu.data = sbuff; r = sc_transmit_apdu_t(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); r = sc_check_sw(card, apdu.sw1, apdu.sw2); LOG_TEST_RET(card->ctx, r, "Write rsa key factor failed"); LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } static int internal_write_rsa_key(struct sc_card *card, unsigned short fid, struct sc_pkcs15_prkey_rsa *rsa) { int r; LOG_FUNC_CALLED(card->ctx); r = internal_write_rsa_key_factor(card, fid, 0x02, rsa->modulus); LOG_TEST_RET(card->ctx, r, "write n failed"); r = internal_write_rsa_key_factor(card, fid, 0x03, rsa->d); LOG_TEST_RET(card->ctx, r, "write d failed"); LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } static int hash_data(const unsigned char *data, size_t datalen, unsigned char *hash, unsigned int mechanismType) { if ((NULL == data) || (NULL == hash)) return SC_ERROR_INVALID_ARGUMENTS; if(mechanismType & SC_ALGORITHM_ECDSA_HASH_SHA1) { unsigned char data_hash[24] = { 0 }; size_t len = 0; sha1_digest(data, datalen, data_hash); len = REVERSE_ORDER4(datalen); memcpy(&data_hash[20], &len, 4); memcpy(hash, data_hash, 24); } else if(mechanismType & SC_ALGORITHM_ECDSA_HASH_SHA256) { unsigned char data_hash[36] = { 0 }; size_t len = 0; sha256_digest(data, datalen, data_hash); len = REVERSE_ORDER4(datalen); memcpy(&data_hash[32], &len, 4); memcpy(hash, data_hash, 36); } else { return SC_ERROR_NOT_SUPPORTED; } return SC_SUCCESS; } static int install_secret_key(struct sc_card *card, unsigned char ktype, unsigned char kid, unsigned char useac, unsigned char modifyac, unsigned char EC, unsigned char *data, unsigned long dataLen) { int r; struct sc_apdu apdu; unsigned char isapp = 0x00; /* appendable */ unsigned char tmp_data[256] = { 0 }; tmp_data[0] = ktype; tmp_data[1] = kid; tmp_data[2] = useac; tmp_data[3] = modifyac; tmp_data[8] = 0xFF; if (0x04 == ktype || 0x06 == ktype) { tmp_data[4] = EPASS2003_AC_MAC_NOLESS | EPASS2003_AC_SO; tmp_data[5] = EPASS2003_AC_MAC_NOLESS | EPASS2003_AC_SO; tmp_data[7] = (kid == PIN_ID[0] ? EPASS2003_AC_USER : EPASS2003_AC_SO); tmp_data[9] = (EC << 4) | EC; } memcpy(&tmp_data[10], data, dataLen); sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0xe3, isapp, 0x00); apdu.cla = 0x80; apdu.lc = apdu.datalen = 10 + dataLen; apdu.data = tmp_data; r = sc_transmit_apdu_t(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU install_secret_key failed"); r = sc_check_sw(card, apdu.sw1, apdu.sw2); LOG_TEST_RET(card->ctx, r, "install_secret_key failed"); return r; } static int internal_install_pre(struct sc_card *card) { int r; /* init key for enc */ r = install_secret_key(card, 0x01, 0x00, EPASS2003_AC_MAC_NOLESS | EPASS2003_AC_EVERYONE, EPASS2003_AC_MAC_NOLESS | EPASS2003_AC_EVERYONE, 0, g_init_key_enc, 16); LOG_TEST_RET(card->ctx, r, "Install init key failed"); /* init key for mac */ r = install_secret_key(card, 0x02, 0x00, EPASS2003_AC_MAC_NOLESS | EPASS2003_AC_EVERYONE, EPASS2003_AC_MAC_NOLESS | EPASS2003_AC_EVERYONE, 0, g_init_key_mac, 16); LOG_TEST_RET(card->ctx, r, "Install init key failed"); return r; } /* use external auth secret as pin */ static int internal_install_pin(struct sc_card *card, sc_epass2003_wkey_data * pin) { int r; unsigned char hash[HASH_LEN] = { 0 }; r = hash_data(pin->key_data.es_secret.key_val, pin->key_data.es_secret.key_len, hash, SC_ALGORITHM_ECDSA_HASH_SHA1); LOG_TEST_RET(card->ctx, r, "hash data failed"); r = install_secret_key(card, 0x04, pin->key_data.es_secret.kid, pin->key_data.es_secret.ac[0], pin->key_data.es_secret.ac[1], pin->key_data.es_secret.EC, hash, HASH_LEN); LOG_TEST_RET(card->ctx, r, "Install failed"); return r; } static int epass2003_write_key(struct sc_card *card, sc_epass2003_wkey_data * data) { LOG_FUNC_CALLED(card->ctx); if (data->type & SC_EPASS2003_KEY) { if (data->type == SC_EPASS2003_KEY_RSA) return internal_write_rsa_key(card, data->key_data.es_key.fid, data->key_data.es_key.rsa); else LOG_FUNC_RETURN(card->ctx, SC_ERROR_NOT_SUPPORTED); } else if (data->type & SC_EPASS2003_SECRET) { if (data->type == SC_EPASS2003_SECRET_PRE) return internal_install_pre(card); else if (data->type == SC_EPASS2003_SECRET_PIN) return internal_install_pin(card, data); else LOG_FUNC_RETURN(card->ctx, SC_ERROR_NOT_SUPPORTED); } else { LOG_FUNC_RETURN(card->ctx, SC_ERROR_NOT_SUPPORTED); } LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } static int epass2003_gen_key(struct sc_card *card, sc_epass2003_gen_key_data * data) { int r; size_t len = data->key_length; struct sc_apdu apdu; u8 rbuf[SC_MAX_EXT_APDU_BUFFER_SIZE] = { 0 }; u8 sbuf[SC_MAX_EXT_APDU_BUFFER_SIZE] = { 0 }; LOG_FUNC_CALLED(card->ctx); if(len == 256) { sbuf[0] = 0x02; } else { sbuf[0] = 0x01; } sbuf[1] = (u8) ((len >> 8) & 0xff); sbuf[2] = (u8) (len & 0xff); sbuf[3] = (u8) ((data->prkey_id >> 8) & 0xFF); sbuf[4] = (u8) ((data->prkey_id) & 0xFF); sbuf[5] = (u8) ((data->pukey_id >> 8) & 0xFF); sbuf[6] = (u8) ((data->pukey_id) & 0xFF); /* generate key */ sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0x46, 0x00, 0x00); apdu.lc = apdu.datalen = 7; apdu.data = sbuf; r = sc_transmit_apdu_t(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); r = sc_check_sw(card, apdu.sw1, apdu.sw2); LOG_TEST_RET(card->ctx, r, "generate keypair failed"); /* read public key */ sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0xb4, 0x02, 0x00); if(len == 256) { apdu.p1 = 0x00; } apdu.cla = 0x80; apdu.lc = apdu.datalen = 2; apdu.data = &sbuf[5]; apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); apdu.le = 0x00; r = sc_transmit_apdu_t(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); r = sc_check_sw(card, apdu.sw1, apdu.sw2); LOG_TEST_RET(card->ctx, r, "get pukey failed"); if (len < apdu.resplen) LOG_FUNC_RETURN(card->ctx, SC_ERROR_INVALID_ARGUMENTS); data->modulus = (u8 *) malloc(len); if (!data->modulus) LOG_FUNC_RETURN(card->ctx, SC_ERROR_OUT_OF_MEMORY); memcpy(data->modulus, rbuf, len); LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } static int epass2003_erase_card(struct sc_card *card) { int r; LOG_FUNC_CALLED(card->ctx); sc_invalidate_cache(card); r = sc_delete_file(card, sc_get_mf_path()); LOG_TEST_RET(card->ctx, r, "delete MF failed"); LOG_FUNC_RETURN(card->ctx, r); } static int epass2003_get_serialnr(struct sc_card *card, sc_serial_number_t * serial) { u8 rbuf[8]; size_t rbuf_len = sizeof(rbuf); LOG_FUNC_CALLED(card->ctx); if (SC_SUCCESS != get_data(card, 0x80, rbuf, rbuf_len)) return SC_ERROR_CARD_CMD_FAILED; card->serialnr.len = serial->len = 8; memcpy(card->serialnr.value, rbuf, 8); memcpy(serial->value, rbuf, 8); LOG_FUNC_RETURN(card->ctx, SC_SUCCESS); } static int epass2003_card_ctl(struct sc_card *card, unsigned long cmd, void *ptr) { LOG_FUNC_CALLED(card->ctx); sc_log(card->ctx, "cmd is %0lx", cmd); switch (cmd) { case SC_CARDCTL_ENTERSAFE_WRITE_KEY: return epass2003_write_key(card, (sc_epass2003_wkey_data *) ptr); case SC_CARDCTL_ENTERSAFE_GENERATE_KEY: return epass2003_gen_key(card, (sc_epass2003_gen_key_data *) ptr); case SC_CARDCTL_ERASE_CARD: return epass2003_erase_card(card); case SC_CARDCTL_GET_SERIALNR: return epass2003_get_serialnr(card, (sc_serial_number_t *) ptr); default: return SC_ERROR_NOT_SUPPORTED; } } static void internal_sanitize_pin_info(struct sc_pin_cmd_pin *pin, unsigned int num) { pin->encoding = SC_PIN_ENCODING_ASCII; pin->min_length = 4; pin->max_length = 16; pin->pad_length = 16; pin->offset = 5 + num * 16; pin->pad_char = 0x00; } static int get_external_key_maxtries(struct sc_card *card, unsigned char *maxtries) { unsigned char maxcounter[2] = { 0 }; static const sc_path_t file_path = { {0x3f, 0x00, 0x50, 0x15, 0x9f, 0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 6, 0, 0, SC_PATH_TYPE_PATH, {{0}, 0} }; int ret; ret = sc_select_file(card, &file_path, NULL); LOG_TEST_RET(card->ctx, ret, "select max counter file failed"); ret = sc_read_binary(card, 0, maxcounter, 2, 0); LOG_TEST_RET(card->ctx, ret, "read max counter file failed"); *maxtries = maxcounter[0]; return SC_SUCCESS; } static int get_external_key_retries(struct sc_card *card, unsigned char kid, unsigned char *retries) { int r; struct sc_apdu apdu; unsigned char random[16] = { 0 }; r = sc_get_challenge(card, random, 8); LOG_TEST_RET(card->ctx, r, "get challenge get_external_key_retries failed"); sc_format_apdu(card, &apdu, SC_APDU_CASE_1, 0x82, 0x01, 0x80 | kid); apdu.resp = NULL; apdu.resplen = 0; r = sc_transmit_apdu_t(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU get_external_key_retries failed"); if (retries && ((0x63 == (apdu.sw1 & 0xff)) && (0xC0 == (apdu.sw2 & 0xf0)))) { *retries = (apdu.sw2 & 0x0f); r = SC_SUCCESS; } else { LOG_TEST_RET(card->ctx, r, "get_external_key_retries failed"); r = SC_ERROR_CARD_CMD_FAILED; } return r; } static int epass2003_get_challenge(sc_card_t *card, u8 *rnd, size_t len) { u8 rbuf[16]; size_t out_len; int r; LOG_FUNC_CALLED(card->ctx); r = iso_ops->get_challenge(card, rbuf, sizeof rbuf); LOG_TEST_RET(card->ctx, r, "GET CHALLENGE cmd failed"); if (len < (size_t) r) { out_len = len; } else { out_len = (size_t) r; } memcpy(rnd, rbuf, out_len); LOG_FUNC_RETURN(card->ctx, (int) out_len); } static int external_key_auth(struct sc_card *card, unsigned char kid, unsigned char *data, size_t datalen) { int r; struct sc_apdu apdu; unsigned char random[16] = { 0 }; unsigned char tmp_data[16] = { 0 }; unsigned char hash[HASH_LEN] = { 0 }; unsigned char iv[16] = { 0 }; r = sc_get_challenge(card, random, 8); LOG_TEST_RET(card->ctx, r, "get challenge external_key_auth failed"); r = hash_data(data, datalen, hash, SC_ALGORITHM_ECDSA_HASH_SHA1); LOG_TEST_RET(card->ctx, r, "hash data failed"); des3_encrypt_cbc(hash, HASH_LEN, iv, random, 8, tmp_data); sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0x82, 0x01, 0x80 | kid); apdu.lc = apdu.datalen = 8; apdu.data = tmp_data; r = sc_transmit_apdu_t(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU external_key_auth failed"); r = sc_check_sw(card, apdu.sw1, apdu.sw2); LOG_TEST_RET(card->ctx, r, "external_key_auth failed"); return r; } static int update_secret_key(struct sc_card *card, unsigned char ktype, unsigned char kid, const unsigned char *data, unsigned long datalen) { int r; struct sc_apdu apdu; unsigned char hash[HASH_LEN] = { 0 }; unsigned char tmp_data[256] = { 0 }; unsigned char maxtries = 0; r = hash_data(data, datalen, hash, SC_ALGORITHM_ECDSA_HASH_SHA1); LOG_TEST_RET(card->ctx, r, "hash data failed"); r = get_external_key_maxtries(card, &maxtries); LOG_TEST_RET(card->ctx, r, "get max counter failed"); tmp_data[0] = (maxtries << 4) | maxtries; memcpy(&tmp_data[1], hash, HASH_LEN); sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0xe5, ktype, kid); apdu.cla = 0x80; apdu.lc = apdu.datalen = 1 + HASH_LEN; apdu.data = tmp_data; r = sc_transmit_apdu_t(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU update_secret_key failed"); r = sc_check_sw(card, apdu.sw1, apdu.sw2); LOG_TEST_RET(card->ctx, r, "update_secret_key failed"); return r; } /* use external auth secret as pin */ static int epass2003_pin_cmd(struct sc_card *card, struct sc_pin_cmd_data *data, int *tries_left) { int r; u8 kid; u8 retries = 0; u8 pin_low = 3; unsigned char maxtries = 0; LOG_FUNC_CALLED(card->ctx); internal_sanitize_pin_info(&data->pin1, 0); internal_sanitize_pin_info(&data->pin2, 1); data->flags |= SC_PIN_CMD_NEED_PADDING; kid = data->pin_reference; /* get pin retries */ if (data->cmd == SC_PIN_CMD_GET_INFO) { r = get_external_key_retries(card, 0x80 | kid, &retries); if (r == SC_SUCCESS) { data->pin1.tries_left = retries; if (tries_left) *tries_left = retries; r = get_external_key_maxtries(card, &maxtries); LOG_TEST_RET(card->ctx, r, "get max counter failed"); data->pin1.max_tries = maxtries; } //remove below code, because the old implement only return PIN retries, now modify the code and return PIN status // return r; } else if (data->cmd == SC_PIN_CMD_UNBLOCK) { /* verify */ r = external_key_auth(card, (kid + 1), (unsigned char *)data->pin1.data, data->pin1.len); LOG_TEST_RET(card->ctx, r, "verify pin failed"); } else if (data->cmd == SC_PIN_CMD_CHANGE || data->cmd == SC_PIN_CMD_UNBLOCK) { /* change */ r = update_secret_key(card, 0x04, kid, data->pin2.data, (unsigned long)data->pin2.len); LOG_TEST_RET(card->ctx, r, "verify pin failed"); } else { r = external_key_auth(card, kid, (unsigned char *)data->pin1.data, data->pin1.len); get_external_key_retries(card, 0x80 | kid, &retries); if (retries < pin_low) sc_log(card->ctx, "Verification failed (remaining tries: %d)", retries); } LOG_TEST_RET(card->ctx, r, "verify pin failed"); if (r == SC_SUCCESS) { data->pin1.logged_in = SC_PIN_STATE_LOGGED_IN; } return r; } static struct sc_card_driver *sc_get_driver(void) { struct sc_card_driver *iso_drv = sc_get_iso7816_driver(); if (iso_ops == NULL) iso_ops = iso_drv->ops; epass2003_ops = *iso_ops; epass2003_ops.match_card = epass2003_match_card; epass2003_ops.init = epass2003_init; epass2003_ops.finish = epass2003_finish; epass2003_ops.write_binary = NULL; epass2003_ops.write_record = NULL; epass2003_ops.select_file = epass2003_select_file; epass2003_ops.get_response = NULL; epass2003_ops.restore_security_env = epass2003_restore_security_env; epass2003_ops.set_security_env = epass2003_set_security_env; epass2003_ops.decipher = epass2003_decipher; epass2003_ops.compute_signature = epass2003_decipher; epass2003_ops.create_file = epass2003_create_file; epass2003_ops.delete_file = epass2003_delete_file; epass2003_ops.list_files = epass2003_list_files; epass2003_ops.card_ctl = epass2003_card_ctl; epass2003_ops.process_fci = epass2003_process_fci; epass2003_ops.construct_fci = epass2003_construct_fci; epass2003_ops.pin_cmd = epass2003_pin_cmd; epass2003_ops.check_sw = epass2003_check_sw; epass2003_ops.get_challenge = epass2003_get_challenge; return &epass2003_drv; } struct sc_card_driver *sc_get_epass2003_driver(void) { return sc_get_driver(); } #endif /* #ifdef ENABLE_OPENSSL */ #endif /* #ifdef ENABLE_SM */
decrypt_response(struct sc_card *card, unsigned char *in, size_t inlen, unsigned char *out, size_t * out_len) { size_t cipher_len; size_t i; unsigned char iv[16] = { 0 }; unsigned char plaintext[4096] = { 0 }; epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; /* no cipher */ if (in[0] == 0x99) return 0; /* parse cipher length */ if (0x01 == in[2] && 0x82 != in[1]) { cipher_len = in[1]; i = 3; } else if (0x01 == in[3] && 0x81 == in[1]) { cipher_len = in[2]; i = 4; } else if (0x01 == in[4] && 0x82 == in[1]) { cipher_len = in[2] * 0x100; cipher_len += in[3]; i = 5; } else { return -1; } if (cipher_len < 2 || i+cipher_len > inlen || cipher_len > sizeof plaintext) return -1; /* decrypt */ if (KEY_TYPE_AES == exdata->smtype) aes128_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext); else des3_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext); /* unpadding */ while (0x80 != plaintext[cipher_len - 2] && (cipher_len - 2 > 0)) cipher_len--; if (2 == cipher_len) return -1; memcpy(out, plaintext, cipher_len - 2); *out_len = cipher_len - 2; return 0; }
decrypt_response(struct sc_card *card, unsigned char *in, size_t inlen, unsigned char *out, size_t * out_len) { size_t cipher_len; size_t i; unsigned char iv[16] = { 0 }; unsigned char plaintext[4096] = { 0 }; epass2003_exdata *exdata = NULL; if (!card->drv_data) return SC_ERROR_INVALID_ARGUMENTS; exdata = (epass2003_exdata *)card->drv_data; /* no cipher */ if (in[0] == 0x99) return 0; /* parse cipher length */ if (0x01 == in[2] && 0x82 != in[1]) { cipher_len = in[1]; i = 3; } else if (0x01 == in[3] && 0x81 == in[1]) { cipher_len = in[2]; i = 4; } else if (0x01 == in[4] && 0x82 == in[1]) { cipher_len = in[2] * 0x100; cipher_len += in[3]; i = 5; } else { return -1; } if (cipher_len < 2 || i+cipher_len > inlen || cipher_len > sizeof plaintext) return -1; /* decrypt */ if (KEY_TYPE_AES == exdata->smtype) aes128_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext); else des3_decrypt_cbc(exdata->sk_enc, 16, iv, &in[i], cipher_len - 1, plaintext); /* unpadding */ while (0x80 != plaintext[cipher_len - 2] && (cipher_len - 2 > 0)) cipher_len--; if (2 == cipher_len || *out_len < cipher_len - 2) return -1; memcpy(out, plaintext, cipher_len - 2); *out_len = cipher_len - 2; return 0; }
{'added': [(954, '\tif (2 == cipher_len || *out_len < cipher_len - 2)'), (980, '\t\t\tlen = plain->resplen;')], 'deleted': [(954, '\tif (2 == cipher_len)')]}
2
1
2,168
16,042
42
326
16
https://github.com/OpenSC/OpenSC
CVE-2018-16391
CWE-119
718
featuremaptest.cpp
C++
testFeatTable
/* GRAPHITE2 LICENSING Copyright 2010, SIL International All rights reserved. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should also have received a copy of the GNU Lesser General Public License along with this library in the file named "LICENSE". If not, write to the Free Software Foundation, 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA or visit their web page on the internet at http://www.fsf.org/licenses/lgpl.html. */ #include <cstdlib> #include <stdexcept> #include <fstream> #include <iostream> #include <map> #include <string> #include <graphite2/Font.h> #include "inc/Endian.h" #include "inc/Face.h" #include "inc/FeatureMap.h" #include "inc/TtfUtil.h" #pragma pack(push, 1) using namespace graphite2; template<typename T> class _be { T _v; public: _be(const T & t) throw() {_v = be::swap<T>(t);} operator T () const throw() {return be::swap<T>(_v); } }; struct FeatHeader { _be<gr_uint16> m_major; _be<gr_uint16> m_minor; _be<gr_uint16> m_numFeat; _be<gr_uint16> m_reserved1; _be<gr_uint32> m_reserved2; }; struct FeatDefn { _be<gr_uint32> m_featId; _be<gr_uint16> m_numFeatSettings; _be<gr_uint16> m_reserved1; _be<gr_uint32> m_settingsOffset; _be<gr_uint16> m_flags; _be<gr_uint16> m_label; }; struct FeatSetting { _be<gr_int16> m_value; _be<gr_uint16> m_label; }; struct FeatTableTestA { FeatHeader m_header; FeatDefn m_defs[1]; FeatSetting m_settings[2]; }; const FeatTableTestA testDataA = { { 2, 0, 1, 0, 0}, {{0x41424344, 2, 0, sizeof(FeatHeader) + sizeof(FeatDefn), 0, 1}}, {{0,10},{1,11}} }; struct FeatTableTestB { FeatHeader m_header; FeatDefn m_defs[2]; FeatSetting m_settings[4]; }; const FeatTableTestB testDataB = { { 2, 0, 2, 0, 0}, {{0x41424344, 2, 0, sizeof(FeatHeader) + 2 * sizeof(FeatDefn), 0, 1}, {0x41424345, 2, 0, sizeof(FeatHeader) + 2 * sizeof(FeatDefn) + 2 * sizeof(FeatSetting), 0, 2}}, {{0,10},{1,11},{0,12},{1,13}} }; const FeatTableTestB testDataBunsorted = { { 2, 0, 2, 0, 0}, {{0x41424345, 2, 0, sizeof(FeatHeader) + 2 * sizeof(FeatDefn) + 2 * sizeof(FeatSetting), 0, 2}, {0x41424344, 2, 0, sizeof(FeatHeader) + 2 * sizeof(FeatDefn), 0, 1}}, {{0,10},{1,11},{0,12},{1,13}} }; struct FeatTableTestC { FeatHeader m_header; FeatDefn m_defs[3]; FeatSetting m_settings[7]; }; const FeatTableTestC testDataCunsorted = { { 2, 0, 3, 0, 0}, {{0x41424343, 3, 0, sizeof(FeatHeader) + 3 * sizeof(FeatDefn) + 4 * sizeof(FeatSetting), 0, 1}, {0x41424345, 2, 0, sizeof(FeatHeader) + 3 * sizeof(FeatDefn) + 2 * sizeof(FeatSetting), 0, 3}, {0x41424344, 2, 0, sizeof(FeatHeader) + 3 * sizeof(FeatDefn), 0, 2}}, {{0,10},{1,11},{0,12},{1,13},{0,14},{1,15},{2,16}} }; struct FeatTableTestD { FeatHeader m_header; FeatDefn m_defs[4]; FeatSetting m_settings[9]; }; const FeatTableTestD testDataDunsorted = { { 2, 0, 4, 0, 0}, {{400, 3, 0, sizeof(FeatHeader) + 4 * sizeof(FeatDefn) + 4 * sizeof(FeatSetting), 0, 1}, {100, 2, 0, sizeof(FeatHeader) + 4 * sizeof(FeatDefn) + 2 * sizeof(FeatSetting), 0, 3}, {300, 2, 0, sizeof(FeatHeader) + 4 * sizeof(FeatDefn), 0, 2}, {200, 2, 0, sizeof(FeatHeader) + 4 * sizeof(FeatDefn) + 7 * sizeof(FeatSetting), 0, 2} }, {{0,10},{1,11},{0,12},{10,13},{0,14},{1,15},{2,16},{2,17},{4,18}} }; struct FeatTableTestE { FeatHeader m_header; FeatDefn m_defs[5]; FeatSetting m_settings[11]; }; const FeatTableTestE testDataE = { { 2, 0, 5, 0, 0}, {{400, 3, 0, sizeof(FeatHeader) + 5 * sizeof(FeatDefn) + 4 * sizeof(FeatSetting), 0, 1}, {100, 2, 0, sizeof(FeatHeader) + 5 * sizeof(FeatDefn) + 2 * sizeof(FeatSetting), 0, 3}, {500, 2, 0, sizeof(FeatHeader) + 5 * sizeof(FeatDefn) + 9 * sizeof(FeatSetting), 0, 3}, {300, 2, 0, sizeof(FeatHeader) + 5 * sizeof(FeatDefn), 0, 2}, {200, 2, 0, sizeof(FeatHeader) + 5 * sizeof(FeatDefn) + 7 * sizeof(FeatSetting), 0, 2} }, {{0,10},{1,11},{0,12},{10,13},{0,14},{1,15},{2,16},{2,17},{4,18},{1,19},{2,20}} }; const FeatTableTestE testBadOffset = { { 2, 0, 5, 0, 0}, {{400, 3, 0, sizeof(FeatHeader) + 5 * sizeof(FeatDefn) + 4 * sizeof(FeatSetting), 0, 1}, {100, 2, 0, sizeof(FeatHeader) + 5 * sizeof(FeatDefn) + 2 * sizeof(FeatSetting), 0, 3}, {500, 2, 0, sizeof(FeatHeader) + 5 * sizeof(FeatDefn) + 9 * sizeof(FeatSetting), 0, 3}, {300, 2, 0, sizeof(FeatHeader) + 5 * sizeof(FeatDefn), 0, 2}, {200, 2, 0, sizeof(FeatHeader) + 5 * sizeof(FeatDefn) + 10 * sizeof(FeatSetting), 0, 2} }, {{0,10},{1,11},{0,12},{10,13},{0,14},{1,15},{2,16},{2,17},{4,18},{1,19},{2,20}} }; #pragma pack(pop) class face_handle { public: typedef std::pair<const void *, size_t> table_t; static const table_t no_table; face_handle(const char *backing_font_path = 0) : _header(0), _dir(0) { if (!backing_font_path) return; std::ifstream font_file(backing_font_path, std::ifstream::binary); const size_t font_size = size_t(font_file.seekg(0, std::ios::end).tellg()); font_file.seekg(0, std::ios::beg); _header = new char [font_size]; font_file.read(const_cast<char *>(_header), font_size); if (!TtfUtil::CheckHeader(_header)) throw std::runtime_error(std::string(backing_font_path) + ": invalid font"); size_t dir_off, dir_sz; if (!TtfUtil::GetTableDirInfo(_header, dir_off, dir_sz)) throw std::runtime_error(std::string(backing_font_path) + ": invalid font"); _dir = _header + dir_off; } void replace_table(const TtfUtil::Tag name, const void * const data, size_t len) throw() { _tables[name] = std::make_pair(data, len); } const table_t & operator [] (const TtfUtil::Tag name) const throw() { const table_t & table = _tables[name]; if (table.first) return table; size_t off, len; if (!TtfUtil::GetTableInfo(name, _header, _dir, off, len)) return no_table; return _tables[name] = table_t(_header + off, len); } static const gr_face_ops ops; private: static const void * get_table_fn(const void* afh, unsigned int name, size_t *len) { const face_handle & fh = *reinterpret_cast<const face_handle *>(afh); const table_t & t = fh[name]; *len = t.second; return t.first; } const char * _header, * _dir; mutable std::map<const TtfUtil::Tag, table_t> _tables; }; const face_handle::table_t face_handle::no_table = face_handle::table_t(reinterpret_cast<void *>(0),0); const gr_face_ops face_handle::ops = { sizeof(gr_face_ops), face_handle::get_table_fn, 0 }; template <typename T> void testAssert(const char * msg, const T b) { if (!b) { fprintf(stderr, msg, b); exit(1); } } template <typename T, typename R> void testAssertEqual(const char * msg, const T a, const R b) { if (a != T(b)) { fprintf(stderr, msg, a, T(b)); exit(1); } } face_handle dummyFace; template <class T> void testFeatTable(const T & table, const char * testName) { FeatureMap testFeatureMap; dummyFace.replace_table(TtfUtil::Tag::Feat, &table, sizeof(T)); gr_face * face = gr_make_face_with_ops(&dummyFace, &face_handle::ops, gr_face_dumbRendering); if (!face) throw std::runtime_error("failed to load font"); bool readStatus = testFeatureMap.readFeats(*face); testAssert("readFeats", readStatus); fprintf(stderr, testName, NULL); testAssertEqual("test num features %hu,%hu\n", testFeatureMap.numFeats(), table.m_header.m_numFeat); for (size_t i = 0; i < sizeof(table.m_defs) / sizeof(FeatDefn); i++) { const FeatureRef * ref = testFeatureMap.findFeatureRef(table.m_defs[i].m_featId); testAssert("test feat\n", ref); testAssertEqual("test feat settings %hu %hu\n", ref->getNumSettings(), table.m_defs[i].m_numFeatSettings); testAssertEqual("test feat label %hu %hu\n", ref->getNameId(), table.m_defs[i].m_label); size_t settingsIndex = (table.m_defs[i].m_settingsOffset - sizeof(FeatHeader) - (sizeof(FeatDefn) * table.m_header.m_numFeat)) / sizeof(FeatSetting); for (size_t j = 0; j < table.m_defs[i].m_numFeatSettings; j++) { testAssertEqual("setting label %hu %hu\n", ref->getSettingName(j), table.m_settings[settingsIndex+j].m_label); } } gr_face_destroy(face); } int main(int argc, char * argv[]) { gr_face * face = 0; try { if (argc != 2) throw std::length_error("not enough arguments: need a backing font"); dummyFace = face_handle(argv[1]); testFeatTable<FeatTableTestA>(testDataA, "A\n"); testFeatTable<FeatTableTestB>(testDataB, "B\n"); testFeatTable<FeatTableTestB>(testDataBunsorted, "Bu\n"); testFeatTable<FeatTableTestC>(testDataCunsorted, "C\n"); testFeatTable<FeatTableTestD>(testDataDunsorted, "D\n"); testFeatTable<FeatTableTestE>(testDataE, "E\n"); // test a bad settings offset stradling the end of the table FeatureMap testFeatureMap; dummyFace.replace_table(TtfUtil::Tag::Feat, &testBadOffset, sizeof testBadOffset); face = gr_make_face_with_ops(&dummyFace, &face_handle::ops, gr_face_dumbRendering); bool readStatus = testFeatureMap.readFeats(*face); testAssert("fail gracefully on bad table", !readStatus); } catch (std::exception & e) { fprintf(stderr, "%s: %s\n", argv[0], e.what()); gr_face_destroy(face); return 1; } gr_face_destroy(face); return 0; }
/* GRAPHITE2 LICENSING Copyright 2010, SIL International All rights reserved. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should also have received a copy of the GNU Lesser General Public License along with this library in the file named "LICENSE". If not, write to the Free Software Foundation, 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA or visit their web page on the internet at http://www.fsf.org/licenses/lgpl.html. */ #include <cstdlib> #include <stdexcept> #include <fstream> #include <iostream> #include <map> #include <string> #include <graphite2/Font.h> #include "inc/Endian.h" #include "inc/Face.h" #include "inc/FeatureMap.h" #include "inc/TtfUtil.h" #pragma pack(push, 1) using namespace graphite2; template<typename T> class _be { T _v; public: _be(const T & t) throw() {_v = be::swap<T>(t);} operator T () const throw() {return be::swap<T>(_v); } }; struct FeatHeader { _be<gr_uint16> m_major; _be<gr_uint16> m_minor; _be<gr_uint16> m_numFeat; _be<gr_uint16> m_reserved1; _be<gr_uint32> m_reserved2; }; struct FeatDefn { _be<gr_uint32> m_featId; _be<gr_uint16> m_numFeatSettings; _be<gr_uint16> m_reserved1; _be<gr_uint32> m_settingsOffset; _be<gr_uint16> m_flags; _be<gr_uint16> m_label; }; struct FeatSetting { _be<gr_int16> m_value; _be<gr_uint16> m_label; }; struct FeatTableTestA { FeatHeader m_header; FeatDefn m_defs[1]; FeatSetting m_settings[2]; }; const FeatTableTestA testDataA = { { 2, 0, 1, 0, 0}, {{0x41424344, 2, 0, sizeof(FeatHeader) + sizeof(FeatDefn), 0, 1}}, {{0,10},{1,11}} }; struct FeatTableTestB { FeatHeader m_header; FeatDefn m_defs[2]; FeatSetting m_settings[4]; }; const FeatTableTestB testDataB = { { 2, 0, 2, 0, 0}, {{0x41424344, 2, 0, sizeof(FeatHeader) + 2 * sizeof(FeatDefn), 0, 1}, {0x41424345, 2, 0, sizeof(FeatHeader) + 2 * sizeof(FeatDefn) + 2 * sizeof(FeatSetting), 0, 2}}, {{0,10},{1,11},{0,12},{1,13}} }; const FeatTableTestB testDataBunsorted = { { 2, 0, 2, 0, 0}, {{0x41424345, 2, 0, sizeof(FeatHeader) + 2 * sizeof(FeatDefn) + 2 * sizeof(FeatSetting), 0, 2}, {0x41424344, 2, 0, sizeof(FeatHeader) + 2 * sizeof(FeatDefn), 0, 1}}, {{0,10},{1,11},{0,12},{1,13}} }; struct FeatTableTestC { FeatHeader m_header; FeatDefn m_defs[3]; FeatSetting m_settings[7]; }; const FeatTableTestC testDataCunsorted = { { 2, 0, 3, 0, 0}, {{0x41424343, 3, 0, sizeof(FeatHeader) + 3 * sizeof(FeatDefn) + 4 * sizeof(FeatSetting), 0, 1}, {0x41424345, 2, 0, sizeof(FeatHeader) + 3 * sizeof(FeatDefn) + 2 * sizeof(FeatSetting), 0, 3}, {0x41424344, 2, 0, sizeof(FeatHeader) + 3 * sizeof(FeatDefn), 0, 2}}, {{0,10},{1,11},{0,12},{1,13},{0,14},{1,15},{2,16}} }; struct FeatTableTestD { FeatHeader m_header; FeatDefn m_defs[4]; FeatSetting m_settings[9]; }; const FeatTableTestD testDataDunsorted = { { 2, 0, 4, 0, 0}, {{400, 3, 0, sizeof(FeatHeader) + 4 * sizeof(FeatDefn) + 4 * sizeof(FeatSetting), 0, 1}, {100, 2, 0, sizeof(FeatHeader) + 4 * sizeof(FeatDefn) + 2 * sizeof(FeatSetting), 0, 3}, {300, 2, 0, sizeof(FeatHeader) + 4 * sizeof(FeatDefn), 0, 2}, {200, 2, 0, sizeof(FeatHeader) + 4 * sizeof(FeatDefn) + 7 * sizeof(FeatSetting), 0, 2} }, {{0,10},{1,11},{0,12},{10,13},{0,14},{1,15},{2,16},{2,17},{4,18}} }; struct FeatTableTestE { FeatHeader m_header; FeatDefn m_defs[5]; FeatSetting m_settings[11]; }; const FeatTableTestE testDataE = { { 2, 0, 5, 0, 0}, {{400, 3, 0, sizeof(FeatHeader) + 5 * sizeof(FeatDefn) + 4 * sizeof(FeatSetting), 0, 1}, {100, 2, 0, sizeof(FeatHeader) + 5 * sizeof(FeatDefn) + 2 * sizeof(FeatSetting), 0, 3}, {500, 2, 0, sizeof(FeatHeader) + 5 * sizeof(FeatDefn) + 9 * sizeof(FeatSetting), 0, 3}, {300, 2, 0, sizeof(FeatHeader) + 5 * sizeof(FeatDefn), 0, 2}, {200, 2, 0, sizeof(FeatHeader) + 5 * sizeof(FeatDefn) + 7 * sizeof(FeatSetting), 0, 2} }, {{0,10},{1,11},{0,12},{10,13},{0,14},{1,15},{2,16},{2,17},{4,18},{1,19},{2,20}} }; const FeatTableTestE testBadOffset = { { 2, 0, 5, 0, 0}, {{400, 3, 0, sizeof(FeatHeader) + 5 * sizeof(FeatDefn) + 4 * sizeof(FeatSetting), 0, 1}, {100, 2, 0, sizeof(FeatHeader) + 5 * sizeof(FeatDefn) + 2 * sizeof(FeatSetting), 0, 3}, {500, 2, 0, sizeof(FeatHeader) + 5 * sizeof(FeatDefn) + 9 * sizeof(FeatSetting), 0, 3}, {300, 2, 0, sizeof(FeatHeader) + 5 * sizeof(FeatDefn), 0, 2}, {200, 2, 0, sizeof(FeatHeader) + 5 * sizeof(FeatDefn) + 10 * sizeof(FeatSetting), 0, 2} }, {{0,10},{1,11},{0,12},{10,13},{0,14},{1,15},{2,16},{2,17},{4,18},{1,19},{2,20}} }; #pragma pack(pop) class face_handle { public: typedef std::pair<const void *, size_t> table_t; static const table_t no_table; face_handle(const char *backing_font_path = 0) : _header(0), _dir(0) { if (!backing_font_path) return; std::ifstream font_file(backing_font_path, std::ifstream::binary); const size_t font_size = size_t(font_file.seekg(0, std::ios::end).tellg()); font_file.seekg(0, std::ios::beg); _header = new char [font_size]; font_file.read(const_cast<char *>(_header), font_size); if (!TtfUtil::CheckHeader(_header)) throw std::runtime_error(std::string(backing_font_path) + ": invalid font"); size_t dir_off, dir_sz; if (!TtfUtil::GetTableDirInfo(_header, dir_off, dir_sz)) throw std::runtime_error(std::string(backing_font_path) + ": invalid font"); _dir = _header + dir_off; } void replace_table(const TtfUtil::Tag name, const void * const data, size_t len) throw() { _tables[name] = std::make_pair(data, len); } const table_t & operator [] (const TtfUtil::Tag name) const throw() { const table_t & table = _tables[name]; if (table.first) return table; size_t off, len; if (!TtfUtil::GetTableInfo(name, _header, _dir, off, len)) return no_table; return _tables[name] = table_t(_header + off, len); } static const gr_face_ops ops; private: static const void * get_table_fn(const void* afh, unsigned int name, size_t *len) { const face_handle & fh = *reinterpret_cast<const face_handle *>(afh); const table_t & t = fh[name]; *len = t.second; return t.first; } const char * _header, * _dir; mutable std::map<const TtfUtil::Tag, table_t> _tables; }; const face_handle::table_t face_handle::no_table = face_handle::table_t(reinterpret_cast<void *>(0),0); const gr_face_ops face_handle::ops = { sizeof(gr_face_ops), face_handle::get_table_fn, 0 }; template <typename T> void testAssert(const char * msg, const T b) { if (!b) { fprintf(stderr, msg, b); exit(1); } } template <typename T, typename R> void testAssertEqual(const char * msg, const T a, const R b) { if (a != T(b)) { fprintf(stderr, msg, a, T(b)); exit(1); } } face_handle dummyFace; template <class T> void testFeatTable(const T & table, const char * testName) { FeatureMap testFeatureMap; dummyFace.replace_table(TtfUtil::Tag::Feat, &table, sizeof(T)); gr_face * face = gr_make_face_with_ops(&dummyFace, &face_handle::ops, 0); if (!face) throw std::runtime_error("failed to load font"); bool readStatus = testFeatureMap.readFeats(*face); testAssert("readFeats", readStatus); fprintf(stderr, testName, NULL); testAssertEqual("test num features %hu,%hu\n", testFeatureMap.numFeats(), table.m_header.m_numFeat); for (size_t i = 0; i < sizeof(table.m_defs) / sizeof(FeatDefn); i++) { const FeatureRef * ref = testFeatureMap.findFeatureRef(table.m_defs[i].m_featId); testAssert("test feat\n", ref); testAssertEqual("test feat settings %hu %hu\n", ref->getNumSettings(), table.m_defs[i].m_numFeatSettings); testAssertEqual("test feat label %hu %hu\n", ref->getNameId(), table.m_defs[i].m_label); size_t settingsIndex = (table.m_defs[i].m_settingsOffset - sizeof(FeatHeader) - (sizeof(FeatDefn) * table.m_header.m_numFeat)) / sizeof(FeatSetting); for (size_t j = 0; j < table.m_defs[i].m_numFeatSettings; j++) { testAssertEqual("setting label %hu %hu\n", ref->getSettingName(j), table.m_settings[settingsIndex+j].m_label); } } gr_face_destroy(face); } int main(int argc, char * argv[]) { gr_face * face = 0; try { if (argc != 2) throw std::length_error("not enough arguments: need a backing font"); dummyFace = face_handle(argv[1]); testFeatTable<FeatTableTestA>(testDataA, "A\n"); testFeatTable<FeatTableTestB>(testDataB, "B\n"); testFeatTable<FeatTableTestB>(testDataBunsorted, "Bu\n"); testFeatTable<FeatTableTestC>(testDataCunsorted, "C\n"); testFeatTable<FeatTableTestD>(testDataDunsorted, "D\n"); testFeatTable<FeatTableTestE>(testDataE, "E\n"); // test a bad settings offset stradling the end of the table FeatureMap testFeatureMap; dummyFace.replace_table(TtfUtil::Tag::Feat, &testBadOffset, sizeof testBadOffset); face = gr_make_face_with_ops(&dummyFace, &face_handle::ops, 0); testAssert("fail gracefully on bad table", !face); } catch (std::exception & e) { fprintf(stderr, "%s: %s\n", argv[0], e.what()); gr_face_destroy(face); return 1; } gr_face_destroy(face); return 0; }
template <class T> void testFeatTable(const T & table, const char * testName) { FeatureMap testFeatureMap; dummyFace.replace_table(TtfUtil::Tag::Feat, &table, sizeof(T)); gr_face * face = gr_make_face_with_ops(&dummyFace, &face_handle::ops, gr_face_dumbRendering); if (!face) throw std::runtime_error("failed to load font"); bool readStatus = testFeatureMap.readFeats(*face); testAssert("readFeats", readStatus); fprintf(stderr, testName, NULL); testAssertEqual("test num features %hu,%hu\n", testFeatureMap.numFeats(), table.m_header.m_numFeat); for (size_t i = 0; i < sizeof(table.m_defs) / sizeof(FeatDefn); i++) { const FeatureRef * ref = testFeatureMap.findFeatureRef(table.m_defs[i].m_featId); testAssert("test feat\n", ref); testAssertEqual("test feat settings %hu %hu\n", ref->getNumSettings(), table.m_defs[i].m_numFeatSettings); testAssertEqual("test feat label %hu %hu\n", ref->getNameId(), table.m_defs[i].m_label); size_t settingsIndex = (table.m_defs[i].m_settingsOffset - sizeof(FeatHeader) - (sizeof(FeatDefn) * table.m_header.m_numFeat)) / sizeof(FeatSetting); for (size_t j = 0; j < table.m_defs[i].m_numFeatSettings; j++) { testAssertEqual("setting label %hu %hu\n", ref->getSettingName(j), table.m_settings[settingsIndex+j].m_label); } } gr_face_destroy(face); }
template <class T> void testFeatTable(const T & table, const char * testName) { FeatureMap testFeatureMap; dummyFace.replace_table(TtfUtil::Tag::Feat, &table, sizeof(T)); gr_face * face = gr_make_face_with_ops(&dummyFace, &face_handle::ops, 0); if (!face) throw std::runtime_error("failed to load font"); bool readStatus = testFeatureMap.readFeats(*face); testAssert("readFeats", readStatus); fprintf(stderr, testName, NULL); testAssertEqual("test num features %hu,%hu\n", testFeatureMap.numFeats(), table.m_header.m_numFeat); for (size_t i = 0; i < sizeof(table.m_defs) / sizeof(FeatDefn); i++) { const FeatureRef * ref = testFeatureMap.findFeatureRef(table.m_defs[i].m_featId); testAssert("test feat\n", ref); testAssertEqual("test feat settings %hu %hu\n", ref->getNumSettings(), table.m_defs[i].m_numFeatSettings); testAssertEqual("test feat label %hu %hu\n", ref->getNameId(), table.m_defs[i].m_label); size_t settingsIndex = (table.m_defs[i].m_settingsOffset - sizeof(FeatHeader) - (sizeof(FeatDefn) * table.m_header.m_numFeat)) / sizeof(FeatSetting); for (size_t j = 0; j < table.m_defs[i].m_numFeatSettings; j++) { testAssertEqual("setting label %hu %hu\n", ref->getSettingName(j), table.m_settings[settingsIndex+j].m_label); } } gr_face_destroy(face); }
{'added': [(246, ' gr_face * face = gr_make_face_with_ops(&dummyFace, &face_handle::ops, 0);'), (288, '\t\tface = gr_make_face_with_ops(&dummyFace, &face_handle::ops, 0);'), (289, '\t\ttestAssert("fail gracefully on bad table", !face);')], 'deleted': [(246, ' gr_face * face = gr_make_face_with_ops(&dummyFace, &face_handle::ops, gr_face_dumbRendering);'), (288, '\t\tface = gr_make_face_with_ops(&dummyFace, &face_handle::ops, gr_face_dumbRendering);'), (289, '\t\tbool readStatus = testFeatureMap.readFeats(*face);'), (290, '\t\ttestAssert("fail gracefully on bad table", !readStatus);')]}
3
4
240
2,381
26
290
4
https://github.com/silnrsi/graphite
CVE-2018-7999
CWE-476
3,082
ip6_tables.c
C
check_compat_entry_size_and_hooks
/* * Packet matching code. * * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/capability.h> #include <linux/in.h> #include <linux/skbuff.h> #include <linux/kmod.h> #include <linux/vmalloc.h> #include <linux/netdevice.h> #include <linux/module.h> #include <linux/poison.h> #include <linux/icmpv6.h> #include <net/ipv6.h> #include <net/compat.h> #include <asm/uaccess.h> #include <linux/mutex.h> #include <linux/proc_fs.h> #include <linux/err.h> #include <linux/cpumask.h> #include <linux/netfilter_ipv6/ip6_tables.h> #include <linux/netfilter/x_tables.h> #include <net/netfilter/nf_log.h> #include "../../netfilter/xt_repldata.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); MODULE_DESCRIPTION("IPv6 packet filter"); /*#define DEBUG_IP_FIREWALL*/ /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */ /*#define DEBUG_IP_FIREWALL_USER*/ #ifdef DEBUG_IP_FIREWALL #define dprintf(format, args...) pr_info(format , ## args) #else #define dprintf(format, args...) #endif #ifdef DEBUG_IP_FIREWALL_USER #define duprintf(format, args...) pr_info(format , ## args) #else #define duprintf(format, args...) #endif #ifdef CONFIG_NETFILTER_DEBUG #define IP_NF_ASSERT(x) WARN_ON(!(x)) #else #define IP_NF_ASSERT(x) #endif #if 0 /* All the better to debug you with... */ #define static #define inline #endif void *ip6t_alloc_initial_table(const struct xt_table *info) { return xt_alloc_initial_table(ip6t, IP6T); } EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table); /* We keep a set of rules for each CPU, so we can avoid write-locking them in the softirq when updating the counters and therefore only need to read-lock in the softirq; doing a write_lock_bh() in user context stops packets coming through and allows user context to read the counters or update the rules. Hence the start of any table is given by get_table() below. */ /* Returns whether matches rule or not. */ /* Performance critical - called for every packet */ static inline bool ip6_packet_match(const struct sk_buff *skb, const char *indev, const char *outdev, const struct ip6t_ip6 *ip6info, unsigned int *protoff, int *fragoff, bool *hotdrop) { unsigned long ret; const struct ipv6hdr *ipv6 = ipv6_hdr(skb); #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg))) if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk, &ip6info->src), IP6T_INV_SRCIP) || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk, &ip6info->dst), IP6T_INV_DSTIP)) { dprintf("Source or dest mismatch.\n"); /* dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr, ipinfo->smsk.s_addr, ipinfo->src.s_addr, ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : ""); dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr, ipinfo->dmsk.s_addr, ipinfo->dst.s_addr, ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/ return false; } ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask); if (FWINV(ret != 0, IP6T_INV_VIA_IN)) { dprintf("VIA in mismatch (%s vs %s).%s\n", indev, ip6info->iniface, ip6info->invflags & IP6T_INV_VIA_IN ? " (INV)" : ""); return false; } ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask); if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) { dprintf("VIA out mismatch (%s vs %s).%s\n", outdev, ip6info->outiface, ip6info->invflags & IP6T_INV_VIA_OUT ? " (INV)" : ""); return false; } /* ... might want to do something with class and flowlabel here ... */ /* look for the desired protocol header */ if (ip6info->flags & IP6T_F_PROTO) { int protohdr; unsigned short _frag_off; protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL); if (protohdr < 0) { if (_frag_off == 0) *hotdrop = true; return false; } *fragoff = _frag_off; dprintf("Packet protocol %hi ?= %s%hi.\n", protohdr, ip6info->invflags & IP6T_INV_PROTO ? "!":"", ip6info->proto); if (ip6info->proto == protohdr) { if (ip6info->invflags & IP6T_INV_PROTO) return false; return true; } /* We need match for the '-p all', too! */ if ((ip6info->proto != 0) && !(ip6info->invflags & IP6T_INV_PROTO)) return false; } return true; } /* should be ip6 safe */ static bool ip6_checkentry(const struct ip6t_ip6 *ipv6) { if (ipv6->flags & ~IP6T_F_MASK) { duprintf("Unknown flag bits set: %08X\n", ipv6->flags & ~IP6T_F_MASK); return false; } if (ipv6->invflags & ~IP6T_INV_MASK) { duprintf("Unknown invflag bits set: %08X\n", ipv6->invflags & ~IP6T_INV_MASK); return false; } return true; } static unsigned int ip6t_error(struct sk_buff *skb, const struct xt_action_param *par) { net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo); return NF_DROP; } static inline struct ip6t_entry * get_entry(const void *base, unsigned int offset) { return (struct ip6t_entry *)(base + offset); } /* All zeroes == unconditional rule. */ /* Mildly perf critical (only if packet tracing is on) */ static inline bool unconditional(const struct ip6t_ip6 *ipv6) { static const struct ip6t_ip6 uncond; return memcmp(ipv6, &uncond, sizeof(uncond)) == 0; } static inline const struct xt_entry_target * ip6t_get_target_c(const struct ip6t_entry *e) { return ip6t_get_target((struct ip6t_entry *)e); } #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) /* This cries for unification! */ static const char *const hooknames[] = { [NF_INET_PRE_ROUTING] = "PREROUTING", [NF_INET_LOCAL_IN] = "INPUT", [NF_INET_FORWARD] = "FORWARD", [NF_INET_LOCAL_OUT] = "OUTPUT", [NF_INET_POST_ROUTING] = "POSTROUTING", }; enum nf_ip_trace_comments { NF_IP6_TRACE_COMMENT_RULE, NF_IP6_TRACE_COMMENT_RETURN, NF_IP6_TRACE_COMMENT_POLICY, }; static const char *const comments[] = { [NF_IP6_TRACE_COMMENT_RULE] = "rule", [NF_IP6_TRACE_COMMENT_RETURN] = "return", [NF_IP6_TRACE_COMMENT_POLICY] = "policy", }; static struct nf_loginfo trace_loginfo = { .type = NF_LOG_TYPE_LOG, .u = { .log = { .level = LOGLEVEL_WARNING, .logflags = NF_LOG_MASK, }, }, }; /* Mildly perf critical (only if packet tracing is on) */ static inline int get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e, const char *hookname, const char **chainname, const char **comment, unsigned int *rulenum) { const struct xt_standard_target *t = (void *)ip6t_get_target_c(s); if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) { /* Head of user chain: ERROR target with chainname */ *chainname = t->target.data; (*rulenum) = 0; } else if (s == e) { (*rulenum)++; if (s->target_offset == sizeof(struct ip6t_entry) && strcmp(t->target.u.kernel.target->name, XT_STANDARD_TARGET) == 0 && t->verdict < 0 && unconditional(&s->ipv6)) { /* Tail of chains: STANDARD target (return/policy) */ *comment = *chainname == hookname ? comments[NF_IP6_TRACE_COMMENT_POLICY] : comments[NF_IP6_TRACE_COMMENT_RETURN]; } return 1; } else (*rulenum)++; return 0; } static void trace_packet(struct net *net, const struct sk_buff *skb, unsigned int hook, const struct net_device *in, const struct net_device *out, const char *tablename, const struct xt_table_info *private, const struct ip6t_entry *e) { const struct ip6t_entry *root; const char *hookname, *chainname, *comment; const struct ip6t_entry *iter; unsigned int rulenum = 0; root = get_entry(private->entries, private->hook_entry[hook]); hookname = chainname = hooknames[hook]; comment = comments[NF_IP6_TRACE_COMMENT_RULE]; xt_entry_foreach(iter, root, private->size - private->hook_entry[hook]) if (get_chainname_rulenum(iter, e, hookname, &chainname, &comment, &rulenum) != 0) break; nf_log_trace(net, AF_INET6, hook, skb, in, out, &trace_loginfo, "TRACE: %s:%s:%s:%u ", tablename, chainname, comment, rulenum); } #endif static inline struct ip6t_entry * ip6t_next_entry(const struct ip6t_entry *entry) { return (void *)entry + entry->next_offset; } /* Returns one of the generic firewall policies, like NF_ACCEPT. */ unsigned int ip6t_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table) { unsigned int hook = state->hook; static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); /* Initializing verdict to NF_DROP keeps gcc happy. */ unsigned int verdict = NF_DROP; const char *indev, *outdev; const void *table_base; struct ip6t_entry *e, **jumpstack; unsigned int stackidx, cpu; const struct xt_table_info *private; struct xt_action_param acpar; unsigned int addend; /* Initialization */ stackidx = 0; indev = state->in ? state->in->name : nulldevname; outdev = state->out ? state->out->name : nulldevname; /* We handle fragments by dealing with the first fragment as * if it was a normal packet. All other fragments are treated * normally, except that they will NEVER match rules that ask * things we don't know, ie. tcp syn flag or ports). If the * rule is also a fragment-specific rule, non-fragments won't * match it. */ acpar.hotdrop = false; acpar.net = state->net; acpar.in = state->in; acpar.out = state->out; acpar.family = NFPROTO_IPV6; acpar.hooknum = hook; IP_NF_ASSERT(table->valid_hooks & (1 << hook)); local_bh_disable(); addend = xt_write_recseq_begin(); private = table->private; /* * Ensure we load private-> members after we've fetched the base * pointer. */ smp_read_barrier_depends(); cpu = smp_processor_id(); table_base = private->entries; jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; /* Switch to alternate jumpstack if we're being invoked via TEE. * TEE issues XT_CONTINUE verdict on original skb so we must not * clobber the jumpstack. * * For recursion via REJECT or SYNPROXY the stack will be clobbered * but it is no problem since absolute verdict is issued by these. */ if (static_key_false(&xt_tee_enabled)) jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated); e = get_entry(table_base, private->hook_entry[hook]); do { const struct xt_entry_target *t; const struct xt_entry_match *ematch; struct xt_counters *counter; IP_NF_ASSERT(e); acpar.thoff = 0; if (!ip6_packet_match(skb, indev, outdev, &e->ipv6, &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) { no_match: e = ip6t_next_entry(e); continue; } xt_ematch_foreach(ematch, e) { acpar.match = ematch->u.kernel.match; acpar.matchinfo = ematch->data; if (!acpar.match->match(skb, &acpar)) goto no_match; } counter = xt_get_this_cpu_counter(&e->counters); ADD_COUNTER(*counter, skb->len, 1); t = ip6t_get_target_c(e); IP_NF_ASSERT(t->u.kernel.target); #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) /* The packet is traced: log it */ if (unlikely(skb->nf_trace)) trace_packet(state->net, skb, hook, state->in, state->out, table->name, private, e); #endif /* Standard target? */ if (!t->u.kernel.target->target) { int v; v = ((struct xt_standard_target *)t)->verdict; if (v < 0) { /* Pop from stack? */ if (v != XT_RETURN) { verdict = (unsigned int)(-v) - 1; break; } if (stackidx == 0) e = get_entry(table_base, private->underflow[hook]); else e = ip6t_next_entry(jumpstack[--stackidx]); continue; } if (table_base + v != ip6t_next_entry(e) && !(e->ipv6.flags & IP6T_F_GOTO)) { jumpstack[stackidx++] = e; } e = get_entry(table_base, v); continue; } acpar.target = t->u.kernel.target; acpar.targinfo = t->data; verdict = t->u.kernel.target->target(skb, &acpar); if (verdict == XT_CONTINUE) e = ip6t_next_entry(e); else /* Verdict */ break; } while (!acpar.hotdrop); xt_write_recseq_end(addend); local_bh_enable(); #ifdef DEBUG_ALLOW_ALL return NF_ACCEPT; #else if (acpar.hotdrop) return NF_DROP; else return verdict; #endif } /* Figures out from what hook each rule can be called: returns 0 if there are loops. Puts hook bitmask in comefrom. */ static int mark_source_chains(const struct xt_table_info *newinfo, unsigned int valid_hooks, void *entry0) { unsigned int hook; /* No recursion; use packet counter to save back ptrs (reset to 0 as we leave), and comefrom to save source hook bitmask */ for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) { unsigned int pos = newinfo->hook_entry[hook]; struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos); if (!(valid_hooks & (1 << hook))) continue; /* Set initial back pointer. */ e->counters.pcnt = pos; for (;;) { const struct xt_standard_target *t = (void *)ip6t_get_target_c(e); int visited = e->comefrom & (1 << hook); if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { pr_err("iptables: loop hook %u pos %u %08X.\n", hook, pos, e->comefrom); return 0; } e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); /* Unconditional return/END. */ if ((e->target_offset == sizeof(struct ip6t_entry) && (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < 0 && unconditional(&e->ipv6)) || visited) { unsigned int oldpos, size; if ((strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < -NF_MAX_VERDICT - 1) { duprintf("mark_source_chains: bad " "negative verdict (%i)\n", t->verdict); return 0; } /* Return: backtrack through the last big jump. */ do { e->comefrom ^= (1<<NF_INET_NUMHOOKS); #ifdef DEBUG_IP_FIREWALL_USER if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { duprintf("Back unset " "on hook %u " "rule %u\n", hook, pos); } #endif oldpos = pos; pos = e->counters.pcnt; e->counters.pcnt = 0; /* We're at the start. */ if (pos == oldpos) goto next; e = (struct ip6t_entry *) (entry0 + pos); } while (oldpos == pos + e->next_offset); /* Move along one */ size = e->next_offset; e = (struct ip6t_entry *) (entry0 + pos + size); e->counters.pcnt = pos; pos += size; } else { int newpos = t->verdict; if (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0 && newpos >= 0) { if (newpos > newinfo->size - sizeof(struct ip6t_entry)) { duprintf("mark_source_chains: " "bad verdict (%i)\n", newpos); return 0; } /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; } e = (struct ip6t_entry *) (entry0 + newpos); e->counters.pcnt = pos; pos = newpos; } } next: duprintf("Finished chain %u\n", hook); } return 1; } static void cleanup_match(struct xt_entry_match *m, struct net *net) { struct xt_mtdtor_param par; par.net = net; par.match = m->u.kernel.match; par.matchinfo = m->data; par.family = NFPROTO_IPV6; if (par.match->destroy != NULL) par.match->destroy(&par); module_put(par.match->me); } static int check_entry(const struct ip6t_entry *e) { const struct xt_entry_target *t; if (!ip6_checkentry(&e->ipv6)) return -EINVAL; if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset) return -EINVAL; t = ip6t_get_target_c(e); if (e->target_offset + t->u.target_size > e->next_offset) return -EINVAL; return 0; } static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) { const struct ip6t_ip6 *ipv6 = par->entryinfo; int ret; par->match = m->u.kernel.match; par->matchinfo = m->data; ret = xt_check_match(par, m->u.match_size - sizeof(*m), ipv6->proto, ipv6->invflags & IP6T_INV_PROTO); if (ret < 0) { duprintf("ip_tables: check failed for `%s'.\n", par.match->name); return ret; } return 0; } static int find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) { struct xt_match *match; int ret; match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name, m->u.user.revision); if (IS_ERR(match)) { duprintf("find_check_match: `%s' not found\n", m->u.user.name); return PTR_ERR(match); } m->u.kernel.match = match; ret = check_match(m, par); if (ret) goto err; return 0; err: module_put(m->u.kernel.match->me); return ret; } static int check_target(struct ip6t_entry *e, struct net *net, const char *name) { struct xt_entry_target *t = ip6t_get_target(e); struct xt_tgchk_param par = { .net = net, .table = name, .entryinfo = e, .target = t->u.kernel.target, .targinfo = t->data, .hook_mask = e->comefrom, .family = NFPROTO_IPV6, }; int ret; t = ip6t_get_target(e); ret = xt_check_target(&par, t->u.target_size - sizeof(*t), e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO); if (ret < 0) { duprintf("ip_tables: check failed for `%s'.\n", t->u.kernel.target->name); return ret; } return 0; } static int find_check_entry(struct ip6t_entry *e, struct net *net, const char *name, unsigned int size) { struct xt_entry_target *t; struct xt_target *target; int ret; unsigned int j; struct xt_mtchk_param mtpar; struct xt_entry_match *ematch; e->counters.pcnt = xt_percpu_counter_alloc(); if (IS_ERR_VALUE(e->counters.pcnt)) return -ENOMEM; j = 0; mtpar.net = net; mtpar.table = name; mtpar.entryinfo = &e->ipv6; mtpar.hook_mask = e->comefrom; mtpar.family = NFPROTO_IPV6; xt_ematch_foreach(ematch, e) { ret = find_check_match(ematch, &mtpar); if (ret != 0) goto cleanup_matches; ++j; } t = ip6t_get_target(e); target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("find_check_entry: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto cleanup_matches; } t->u.kernel.target = target; ret = check_target(e, net, name); if (ret) goto err; return 0; err: module_put(t->u.kernel.target->me); cleanup_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; cleanup_match(ematch, net); } xt_percpu_counter_free(e->counters.pcnt); return ret; } static bool check_underflow(const struct ip6t_entry *e) { const struct xt_entry_target *t; unsigned int verdict; if (!unconditional(&e->ipv6)) return false; t = ip6t_get_target_c(e); if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) return false; verdict = ((struct xt_standard_target *)t)->verdict; verdict = -verdict - 1; return verdict == NF_DROP || verdict == NF_ACCEPT; } static int check_entry_size_and_hooks(struct ip6t_entry *e, struct xt_table_info *newinfo, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int valid_hooks) { unsigned int h; int err; if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) { duprintf("Bad offset %p\n", e); return -EINVAL; } if (e->next_offset < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } err = check_entry(e); if (err) return err; /* Check hooks & underflows */ for (h = 0; h < NF_INET_NUMHOOKS; h++) { if (!(valid_hooks & (1 << h))) continue; if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) { if (!check_underflow(e)) { pr_err("Underflows must be unconditional and " "use the STANDARD target with " "ACCEPT/DROP\n"); return -EINVAL; } newinfo->underflow[h] = underflows[h]; } } /* Clear counters and comefrom */ e->counters = ((struct xt_counters) { 0, 0 }); e->comefrom = 0; return 0; } static void cleanup_entry(struct ip6t_entry *e, struct net *net) { struct xt_tgdtor_param par; struct xt_entry_target *t; struct xt_entry_match *ematch; /* Cleanup all matches */ xt_ematch_foreach(ematch, e) cleanup_match(ematch, net); t = ip6t_get_target(e); par.net = net; par.target = t->u.kernel.target; par.targinfo = t->data; par.family = NFPROTO_IPV6; if (par.target->destroy != NULL) par.target->destroy(&par); module_put(par.target->me); xt_percpu_counter_free(e->counters.pcnt); } /* Checks and translates the user-supplied table segment (held in newinfo) */ static int translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, const struct ip6t_replace *repl) { struct ip6t_entry *iter; unsigned int i; int ret = 0; newinfo->size = repl->size; newinfo->number = repl->num_entries; /* Init all hooks to impossible value. */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { newinfo->hook_entry[i] = 0xFFFFFFFF; newinfo->underflow[i] = 0xFFFFFFFF; } duprintf("translate_table: size %u\n", newinfo->size); i = 0; /* Walk through entries, checking offsets. */ xt_entry_foreach(iter, entry0, newinfo->size) { ret = check_entry_size_and_hooks(iter, newinfo, entry0, entry0 + repl->size, repl->hook_entry, repl->underflow, repl->valid_hooks); if (ret != 0) return ret; ++i; if (strcmp(ip6t_get_target(iter)->u.user.name, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } if (i != repl->num_entries) { duprintf("translate_table: %u not %u entries\n", i, repl->num_entries); return -EINVAL; } /* Check hooks all assigned */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(repl->valid_hooks & (1 << i))) continue; if (newinfo->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, repl->hook_entry[i]); return -EINVAL; } if (newinfo->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, repl->underflow[i]); return -EINVAL; } } if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) return -ELOOP; /* Finally, each sanity check must pass */ i = 0; xt_entry_foreach(iter, entry0, newinfo->size) { ret = find_check_entry(iter, net, repl->name, repl->size); if (ret != 0) break; ++i; } if (ret != 0) { xt_entry_foreach(iter, entry0, newinfo->size) { if (i-- == 0) break; cleanup_entry(iter, net); } return ret; } return ret; } static void get_counters(const struct xt_table_info *t, struct xt_counters counters[]) { struct ip6t_entry *iter; unsigned int cpu; unsigned int i; for_each_possible_cpu(cpu) { seqcount_t *s = &per_cpu(xt_recseq, cpu); i = 0; xt_entry_foreach(iter, t->entries, t->size) { struct xt_counters *tmp; u64 bcnt, pcnt; unsigned int start; tmp = xt_get_per_cpu_counter(&iter->counters, cpu); do { start = read_seqcount_begin(s); bcnt = tmp->bcnt; pcnt = tmp->pcnt; } while (read_seqcount_retry(s, start)); ADD_COUNTER(counters[i], bcnt, pcnt); ++i; } } } static struct xt_counters *alloc_counters(const struct xt_table *table) { unsigned int countersize; struct xt_counters *counters; const struct xt_table_info *private = table->private; /* We need atomic snapshot of counters: rest doesn't change (other than comefrom, which userspace doesn't care about). */ countersize = sizeof(struct xt_counters) * private->number; counters = vzalloc(countersize); if (counters == NULL) return ERR_PTR(-ENOMEM); get_counters(private, counters); return counters; } static int copy_entries_to_user(unsigned int total_size, const struct xt_table *table, void __user *userptr) { unsigned int off, num; const struct ip6t_entry *e; struct xt_counters *counters; const struct xt_table_info *private = table->private; int ret = 0; const void *loc_cpu_entry; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); loc_cpu_entry = private->entries; if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { ret = -EFAULT; goto free_counters; } /* FIXME: use iterator macros --RR */ /* ... then go back and fix counters and names */ for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ unsigned int i; const struct xt_entry_match *m; const struct xt_entry_target *t; e = (struct ip6t_entry *)(loc_cpu_entry + off); if (copy_to_user(userptr + off + offsetof(struct ip6t_entry, counters), &counters[num], sizeof(counters[num])) != 0) { ret = -EFAULT; goto free_counters; } for (i = sizeof(struct ip6t_entry); i < e->target_offset; i += m->u.match_size) { m = (void *)e + i; if (copy_to_user(userptr + off + i + offsetof(struct xt_entry_match, u.user.name), m->u.kernel.match->name, strlen(m->u.kernel.match->name)+1) != 0) { ret = -EFAULT; goto free_counters; } } t = ip6t_get_target_c(e); if (copy_to_user(userptr + off + e->target_offset + offsetof(struct xt_entry_target, u.user.name), t->u.kernel.target->name, strlen(t->u.kernel.target->name)+1) != 0) { ret = -EFAULT; goto free_counters; } } free_counters: vfree(counters); return ret; } #ifdef CONFIG_COMPAT static void compat_standard_from_user(void *dst, const void *src) { int v = *(compat_int_t *)src; if (v > 0) v += xt_compat_calc_jump(AF_INET6, v); memcpy(dst, &v, sizeof(v)); } static int compat_standard_to_user(void __user *dst, const void *src) { compat_int_t cv = *(int *)src; if (cv > 0) cv -= xt_compat_calc_jump(AF_INET6, cv); return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; } static int compat_calc_entry(const struct ip6t_entry *e, const struct xt_table_info *info, const void *base, struct xt_table_info *newinfo) { const struct xt_entry_match *ematch; const struct xt_entry_target *t; unsigned int entry_offset; int off, i, ret; off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); entry_offset = (void *)e - base; xt_ematch_foreach(ematch, e) off += xt_compat_match_offset(ematch->u.kernel.match); t = ip6t_get_target_c(e); off += xt_compat_target_offset(t->u.kernel.target); newinfo->size -= off; ret = xt_compat_add_offset(AF_INET6, entry_offset, off); if (ret) return ret; for (i = 0; i < NF_INET_NUMHOOKS; i++) { if (info->hook_entry[i] && (e < (struct ip6t_entry *)(base + info->hook_entry[i]))) newinfo->hook_entry[i] -= off; if (info->underflow[i] && (e < (struct ip6t_entry *)(base + info->underflow[i]))) newinfo->underflow[i] -= off; } return 0; } static int compat_table_info(const struct xt_table_info *info, struct xt_table_info *newinfo) { struct ip6t_entry *iter; const void *loc_cpu_entry; int ret; if (!newinfo || !info) return -EINVAL; /* we dont care about newinfo->entries */ memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); newinfo->initial_entries = 0; loc_cpu_entry = info->entries; xt_compat_init_offsets(AF_INET6, info->number); xt_entry_foreach(iter, loc_cpu_entry, info->size) { ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); if (ret != 0) return ret; } return 0; } #endif static int get_info(struct net *net, void __user *user, const int *len, int compat) { char name[XT_TABLE_MAXNAMELEN]; struct xt_table *t; int ret; if (*len != sizeof(struct ip6t_getinfo)) { duprintf("length %u != %zu\n", *len, sizeof(struct ip6t_getinfo)); return -EINVAL; } if (copy_from_user(name, user, sizeof(name)) != 0) return -EFAULT; name[XT_TABLE_MAXNAMELEN-1] = '\0'; #ifdef CONFIG_COMPAT if (compat) xt_compat_lock(AF_INET6); #endif t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name), "ip6table_%s", name); if (!IS_ERR_OR_NULL(t)) { struct ip6t_getinfo info; const struct xt_table_info *private = t->private; #ifdef CONFIG_COMPAT struct xt_table_info tmp; if (compat) { ret = compat_table_info(private, &tmp); xt_compat_flush_offsets(AF_INET6); private = &tmp; } #endif memset(&info, 0, sizeof(info)); info.valid_hooks = t->valid_hooks; memcpy(info.hook_entry, private->hook_entry, sizeof(info.hook_entry)); memcpy(info.underflow, private->underflow, sizeof(info.underflow)); info.num_entries = private->number; info.size = private->size; strcpy(info.name, name); if (copy_to_user(user, &info, *len) != 0) ret = -EFAULT; else ret = 0; xt_table_unlock(t); module_put(t->me); } else ret = t ? PTR_ERR(t) : -ENOENT; #ifdef CONFIG_COMPAT if (compat) xt_compat_unlock(AF_INET6); #endif return ret; } static int get_entries(struct net *net, struct ip6t_get_entries __user *uptr, const int *len) { int ret; struct ip6t_get_entries get; struct xt_table *t; if (*len < sizeof(get)) { duprintf("get_entries: %u < %zu\n", *len, sizeof(get)); return -EINVAL; } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; if (*len != sizeof(struct ip6t_get_entries) + get.size) { duprintf("get_entries: %u != %zu\n", *len, sizeof(get) + get.size); return -EINVAL; } t = xt_find_table_lock(net, AF_INET6, get.name); if (!IS_ERR_OR_NULL(t)) { struct xt_table_info *private = t->private; duprintf("t->private->number = %u\n", private->number); if (get.size == private->size) ret = copy_entries_to_user(private->size, t, uptr->entrytable); else { duprintf("get_entries: I've got %u not %u!\n", private->size, get.size); ret = -EAGAIN; } module_put(t->me); xt_table_unlock(t); } else ret = t ? PTR_ERR(t) : -ENOENT; return ret; } static int __do_replace(struct net *net, const char *name, unsigned int valid_hooks, struct xt_table_info *newinfo, unsigned int num_counters, void __user *counters_ptr) { int ret; struct xt_table *t; struct xt_table_info *oldinfo; struct xt_counters *counters; struct ip6t_entry *iter; ret = 0; counters = vzalloc(num_counters * sizeof(struct xt_counters)); if (!counters) { ret = -ENOMEM; goto out; } t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name), "ip6table_%s", name); if (IS_ERR_OR_NULL(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free_newinfo_counters_untrans; } /* You lied! */ if (valid_hooks != t->valid_hooks) { duprintf("Valid hook crap: %08X vs %08X\n", valid_hooks, t->valid_hooks); ret = -EINVAL; goto put_module; } oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); if (!oldinfo) goto put_module; /* Update module usage count based on number of rules */ duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", oldinfo->number, oldinfo->initial_entries, newinfo->number); if ((oldinfo->number > oldinfo->initial_entries) || (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); if ((oldinfo->number > oldinfo->initial_entries) && (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); /* Get the old counters, and synchronize with replace */ get_counters(oldinfo, counters); /* Decrease module usage counts and free resource */ xt_entry_foreach(iter, oldinfo->entries, oldinfo->size) cleanup_entry(iter, net); xt_free_table_info(oldinfo); if (copy_to_user(counters_ptr, counters, sizeof(struct xt_counters) * num_counters) != 0) { /* Silent error, can't fail, new table is already in place */ net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n"); } vfree(counters); xt_table_unlock(t); return ret; put_module: module_put(t->me); xt_table_unlock(t); free_newinfo_counters_untrans: vfree(counters); out: return ret; } static int do_replace(struct net *net, const void __user *user, unsigned int len) { int ret; struct ip6t_replace tmp; struct xt_table_info *newinfo; void *loc_cpu_entry; struct ip6t_entry *iter; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; if (tmp.num_counters == 0) return -EINVAL; tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } ret = translate_table(net, newinfo, loc_cpu_entry, &tmp); if (ret != 0) goto free_newinfo; duprintf("ip_tables: Translated table\n"); ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, tmp.counters); if (ret) goto free_newinfo_untrans; return 0; free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) cleanup_entry(iter, net); free_newinfo: xt_free_table_info(newinfo); return ret; } static int do_add_counters(struct net *net, const void __user *user, unsigned int len, int compat) { unsigned int i; struct xt_counters_info tmp; struct xt_counters *paddc; unsigned int num_counters; char *name; int size; void *ptmp; struct xt_table *t; const struct xt_table_info *private; int ret = 0; struct ip6t_entry *iter; unsigned int addend; #ifdef CONFIG_COMPAT struct compat_xt_counters_info compat_tmp; if (compat) { ptmp = &compat_tmp; size = sizeof(struct compat_xt_counters_info); } else #endif { ptmp = &tmp; size = sizeof(struct xt_counters_info); } if (copy_from_user(ptmp, user, size) != 0) return -EFAULT; #ifdef CONFIG_COMPAT if (compat) { num_counters = compat_tmp.num_counters; name = compat_tmp.name; } else #endif { num_counters = tmp.num_counters; name = tmp.name; } if (len != size + num_counters * sizeof(struct xt_counters)) return -EINVAL; paddc = vmalloc(len - size); if (!paddc) return -ENOMEM; if (copy_from_user(paddc, user + size, len - size) != 0) { ret = -EFAULT; goto free; } t = xt_find_table_lock(net, AF_INET6, name); if (IS_ERR_OR_NULL(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free; } local_bh_disable(); private = t->private; if (private->number != num_counters) { ret = -EINVAL; goto unlock_up_free; } i = 0; addend = xt_write_recseq_begin(); xt_entry_foreach(iter, private->entries, private->size) { struct xt_counters *tmp; tmp = xt_get_this_cpu_counter(&iter->counters); ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt); ++i; } xt_write_recseq_end(addend); unlock_up_free: local_bh_enable(); xt_table_unlock(t); module_put(t->me); free: vfree(paddc); return ret; } #ifdef CONFIG_COMPAT struct compat_ip6t_replace { char name[XT_TABLE_MAXNAMELEN]; u32 valid_hooks; u32 num_entries; u32 size; u32 hook_entry[NF_INET_NUMHOOKS]; u32 underflow[NF_INET_NUMHOOKS]; u32 num_counters; compat_uptr_t counters; /* struct xt_counters * */ struct compat_ip6t_entry entries[0]; }; static int compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr, unsigned int *size, struct xt_counters *counters, unsigned int i) { struct xt_entry_target *t; struct compat_ip6t_entry __user *ce; u_int16_t target_offset, next_offset; compat_uint_t origsize; const struct xt_entry_match *ematch; int ret = 0; origsize = *size; ce = (struct compat_ip6t_entry __user *)*dstptr; if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 || copy_to_user(&ce->counters, &counters[i], sizeof(counters[i])) != 0) return -EFAULT; *dstptr += sizeof(struct compat_ip6t_entry); *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); xt_ematch_foreach(ematch, e) { ret = xt_compat_match_to_user(ematch, dstptr, size); if (ret != 0) return ret; } target_offset = e->target_offset - (origsize - *size); t = ip6t_get_target(e); ret = xt_compat_target_to_user(t, dstptr, size); if (ret) return ret; next_offset = e->next_offset - (origsize - *size); if (put_user(target_offset, &ce->target_offset) != 0 || put_user(next_offset, &ce->next_offset) != 0) return -EFAULT; return 0; } static int compat_find_calc_match(struct xt_entry_match *m, const char *name, const struct ip6t_ip6 *ipv6, int *size) { struct xt_match *match; match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name, m->u.user.revision); if (IS_ERR(match)) { duprintf("compat_check_calc_match: `%s' not found\n", m->u.user.name); return PTR_ERR(match); } m->u.kernel.match = match; *size += xt_compat_match_offset(match); return 0; } static void compat_release_entry(struct compat_ip6t_entry *e) { struct xt_entry_target *t; struct xt_entry_match *ematch; /* Cleanup all matches */ xt_ematch_foreach(ematch, e) module_put(ematch->u.kernel.match->me); t = compat_ip6t_get_target(e); module_put(t->u.kernel.target->me); } static int check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, struct xt_table_info *newinfo, unsigned int *size, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, const char *name) { struct xt_entry_match *ematch; struct xt_entry_target *t; struct xt_target *target; unsigned int entry_offset; unsigned int j; int ret, off, h; duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) { duprintf("Bad offset %p, limit = %p\n", e, limit); return -EINVAL; } if (e->next_offset < sizeof(struct compat_ip6t_entry) + sizeof(struct compat_xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } /* For purposes of check_entry casting the compat entry is fine */ ret = check_entry((struct ip6t_entry *)e); if (ret) return ret; off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); entry_offset = (void *)e - (void *)base; j = 0; xt_ematch_foreach(ematch, e) { ret = compat_find_calc_match(ematch, name, &e->ipv6, &off); if (ret != 0) goto release_matches; ++j; } t = compat_ip6t_get_target(e); target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto release_matches; } t->u.kernel.target = target; off += xt_compat_target_offset(target); *size += off; ret = xt_compat_add_offset(AF_INET6, entry_offset, off); if (ret) goto out; /* Check hooks & underflows */ for (h = 0; h < NF_INET_NUMHOOKS; h++) { if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) newinfo->underflow[h] = underflows[h]; } /* Clear counters and comefrom */ memset(&e->counters, 0, sizeof(e->counters)); e->comefrom = 0; return 0; out: module_put(t->u.kernel.target->me); release_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; module_put(ematch->u.kernel.match->me); } return ret; } static int compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr, unsigned int *size, const char *name, struct xt_table_info *newinfo, unsigned char *base) { struct xt_entry_target *t; struct ip6t_entry *de; unsigned int origsize; int ret, h; struct xt_entry_match *ematch; ret = 0; origsize = *size; de = (struct ip6t_entry *)*dstptr; memcpy(de, e, sizeof(struct ip6t_entry)); memcpy(&de->counters, &e->counters, sizeof(e->counters)); *dstptr += sizeof(struct ip6t_entry); *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); xt_ematch_foreach(ematch, e) { ret = xt_compat_match_from_user(ematch, dstptr, size); if (ret != 0) return ret; } de->target_offset = e->target_offset - (origsize - *size); t = compat_ip6t_get_target(e); xt_compat_target_from_user(t, dstptr, size); de->next_offset = e->next_offset - (origsize - *size); for (h = 0; h < NF_INET_NUMHOOKS; h++) { if ((unsigned char *)de - base < newinfo->hook_entry[h]) newinfo->hook_entry[h] -= origsize - *size; if ((unsigned char *)de - base < newinfo->underflow[h]) newinfo->underflow[h] -= origsize - *size; } return ret; } static int compat_check_entry(struct ip6t_entry *e, struct net *net, const char *name) { unsigned int j; int ret = 0; struct xt_mtchk_param mtpar; struct xt_entry_match *ematch; e->counters.pcnt = xt_percpu_counter_alloc(); if (IS_ERR_VALUE(e->counters.pcnt)) return -ENOMEM; j = 0; mtpar.net = net; mtpar.table = name; mtpar.entryinfo = &e->ipv6; mtpar.hook_mask = e->comefrom; mtpar.family = NFPROTO_IPV6; xt_ematch_foreach(ematch, e) { ret = check_match(ematch, &mtpar); if (ret != 0) goto cleanup_matches; ++j; } ret = check_target(e, net, name); if (ret) goto cleanup_matches; return 0; cleanup_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; cleanup_match(ematch, net); } xt_percpu_counter_free(e->counters.pcnt); return ret; } static int translate_compat_table(struct net *net, const char *name, unsigned int valid_hooks, struct xt_table_info **pinfo, void **pentry0, unsigned int total_size, unsigned int number, unsigned int *hook_entries, unsigned int *underflows) { unsigned int i, j; struct xt_table_info *newinfo, *info; void *pos, *entry0, *entry1; struct compat_ip6t_entry *iter0; struct ip6t_entry *iter1; unsigned int size; int ret = 0; info = *pinfo; entry0 = *pentry0; size = total_size; info->number = number; /* Init all hooks to impossible value. */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { info->hook_entry[i] = 0xFFFFFFFF; info->underflow[i] = 0xFFFFFFFF; } duprintf("translate_compat_table: size %u\n", info->size); j = 0; xt_compat_lock(AF_INET6); xt_compat_init_offsets(AF_INET6, number); /* Walk through entries, checking offsets. */ xt_entry_foreach(iter0, entry0, total_size) { ret = check_compat_entry_size_and_hooks(iter0, info, &size, entry0, entry0 + total_size, hook_entries, underflows, name); if (ret != 0) goto out_unlock; ++j; } ret = -EINVAL; if (j != number) { duprintf("translate_compat_table: %u not %u entries\n", j, number); goto out_unlock; } /* Check hooks all assigned */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(valid_hooks & (1 << i))) continue; if (info->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, hook_entries[i]); goto out_unlock; } if (info->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, underflows[i]); goto out_unlock; } } ret = -ENOMEM; newinfo = xt_alloc_table_info(size); if (!newinfo) goto out_unlock; newinfo->number = number; for (i = 0; i < NF_INET_NUMHOOKS; i++) { newinfo->hook_entry[i] = info->hook_entry[i]; newinfo->underflow[i] = info->underflow[i]; } entry1 = newinfo->entries; pos = entry1; size = total_size; xt_entry_foreach(iter0, entry0, total_size) { ret = compat_copy_entry_from_user(iter0, &pos, &size, name, newinfo, entry1); if (ret != 0) break; } xt_compat_flush_offsets(AF_INET6); xt_compat_unlock(AF_INET6); if (ret) goto free_newinfo; ret = -ELOOP; if (!mark_source_chains(newinfo, valid_hooks, entry1)) goto free_newinfo; i = 0; xt_entry_foreach(iter1, entry1, newinfo->size) { ret = compat_check_entry(iter1, net, name); if (ret != 0) break; ++i; if (strcmp(ip6t_get_target(iter1)->u.user.name, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } if (ret) { /* * The first i matches need cleanup_entry (calls ->destroy) * because they had called ->check already. The other j-i * entries need only release. */ int skip = i; j -= i; xt_entry_foreach(iter0, entry0, newinfo->size) { if (skip-- > 0) continue; if (j-- == 0) break; compat_release_entry(iter0); } xt_entry_foreach(iter1, entry1, newinfo->size) { if (i-- == 0) break; cleanup_entry(iter1, net); } xt_free_table_info(newinfo); return ret; } *pinfo = newinfo; *pentry0 = entry1; xt_free_table_info(info); return 0; free_newinfo: xt_free_table_info(newinfo); out: xt_entry_foreach(iter0, entry0, total_size) { if (j-- == 0) break; compat_release_entry(iter0); } return ret; out_unlock: xt_compat_flush_offsets(AF_INET6); xt_compat_unlock(AF_INET6); goto out; } static int compat_do_replace(struct net *net, void __user *user, unsigned int len) { int ret; struct compat_ip6t_replace tmp; struct xt_table_info *newinfo; void *loc_cpu_entry; struct ip6t_entry *iter; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.size >= INT_MAX / num_possible_cpus()) return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; if (tmp.num_counters == 0) return -EINVAL; tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } ret = translate_compat_table(net, tmp.name, tmp.valid_hooks, &newinfo, &loc_cpu_entry, tmp.size, tmp.num_entries, tmp.hook_entry, tmp.underflow); if (ret != 0) goto free_newinfo; duprintf("compat_do_replace: Translated table\n"); ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, compat_ptr(tmp.counters)); if (ret) goto free_newinfo_untrans; return 0; free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) cleanup_entry(iter, net); free_newinfo: xt_free_table_info(newinfo); return ret; } static int compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IP6T_SO_SET_REPLACE: ret = compat_do_replace(sock_net(sk), user, len); break; case IP6T_SO_SET_ADD_COUNTERS: ret = do_add_counters(sock_net(sk), user, len, 1); break; default: duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } struct compat_ip6t_get_entries { char name[XT_TABLE_MAXNAMELEN]; compat_uint_t size; struct compat_ip6t_entry entrytable[0]; }; static int compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, void __user *userptr) { struct xt_counters *counters; const struct xt_table_info *private = table->private; void __user *pos; unsigned int size; int ret = 0; unsigned int i = 0; struct ip6t_entry *iter; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); pos = userptr; size = total_size; xt_entry_foreach(iter, private->entries, total_size) { ret = compat_copy_entry_to_user(iter, &pos, &size, counters, i++); if (ret != 0) break; } vfree(counters); return ret; } static int compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr, int *len) { int ret; struct compat_ip6t_get_entries get; struct xt_table *t; if (*len < sizeof(get)) { duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); return -EINVAL; } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) { duprintf("compat_get_entries: %u != %zu\n", *len, sizeof(get) + get.size); return -EINVAL; } xt_compat_lock(AF_INET6); t = xt_find_table_lock(net, AF_INET6, get.name); if (!IS_ERR_OR_NULL(t)) { const struct xt_table_info *private = t->private; struct xt_table_info info; duprintf("t->private->number = %u\n", private->number); ret = compat_table_info(private, &info); if (!ret && get.size == info.size) { ret = compat_copy_entries_to_user(private->size, t, uptr->entrytable); } else if (!ret) { duprintf("compat_get_entries: I've got %u not %u!\n", private->size, get.size); ret = -EAGAIN; } xt_compat_flush_offsets(AF_INET6); module_put(t->me); xt_table_unlock(t); } else ret = t ? PTR_ERR(t) : -ENOENT; xt_compat_unlock(AF_INET6); return ret; } static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *); static int compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IP6T_SO_GET_INFO: ret = get_info(sock_net(sk), user, len, 1); break; case IP6T_SO_GET_ENTRIES: ret = compat_get_entries(sock_net(sk), user, len); break; default: ret = do_ip6t_get_ctl(sk, cmd, user, len); } return ret; } #endif static int do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IP6T_SO_SET_REPLACE: ret = do_replace(sock_net(sk), user, len); break; case IP6T_SO_SET_ADD_COUNTERS: ret = do_add_counters(sock_net(sk), user, len, 0); break; default: duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static int do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IP6T_SO_GET_INFO: ret = get_info(sock_net(sk), user, len, 0); break; case IP6T_SO_GET_ENTRIES: ret = get_entries(sock_net(sk), user, len); break; case IP6T_SO_GET_REVISION_MATCH: case IP6T_SO_GET_REVISION_TARGET: { struct xt_get_revision rev; int target; if (*len != sizeof(rev)) { ret = -EINVAL; break; } if (copy_from_user(&rev, user, sizeof(rev)) != 0) { ret = -EFAULT; break; } rev.name[sizeof(rev.name)-1] = 0; if (cmd == IP6T_SO_GET_REVISION_TARGET) target = 1; else target = 0; try_then_request_module(xt_find_revision(AF_INET6, rev.name, rev.revision, target, &ret), "ip6t_%s", rev.name); break; } default: duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static void __ip6t_unregister_table(struct net *net, struct xt_table *table) { struct xt_table_info *private; void *loc_cpu_entry; struct module *table_owner = table->me; struct ip6t_entry *iter; private = xt_unregister_table(table); /* Decrease module usage counts and free resources */ loc_cpu_entry = private->entries; xt_entry_foreach(iter, loc_cpu_entry, private->size) cleanup_entry(iter, net); if (private->number > private->initial_entries) module_put(table_owner); xt_free_table_info(private); } int ip6t_register_table(struct net *net, const struct xt_table *table, const struct ip6t_replace *repl, const struct nf_hook_ops *ops, struct xt_table **res) { int ret; struct xt_table_info *newinfo; struct xt_table_info bootstrap = {0}; void *loc_cpu_entry; struct xt_table *new_table; newinfo = xt_alloc_table_info(repl->size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; memcpy(loc_cpu_entry, repl->entries, repl->size); ret = translate_table(net, newinfo, loc_cpu_entry, repl); if (ret != 0) goto out_free; new_table = xt_register_table(net, table, &bootstrap, newinfo); if (IS_ERR(new_table)) { ret = PTR_ERR(new_table); goto out_free; } /* set res now, will see skbs right after nf_register_net_hooks */ WRITE_ONCE(*res, new_table); ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks)); if (ret != 0) { __ip6t_unregister_table(net, new_table); *res = NULL; } return ret; out_free: xt_free_table_info(newinfo); return ret; } void ip6t_unregister_table(struct net *net, struct xt_table *table, const struct nf_hook_ops *ops) { nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks)); __ip6t_unregister_table(net, table); } /* Returns 1 if the type and code is matched by the range, 0 otherwise */ static inline bool icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code, u_int8_t type, u_int8_t code, bool invert) { return (type == test_type && code >= min_code && code <= max_code) ^ invert; } static bool icmp6_match(const struct sk_buff *skb, struct xt_action_param *par) { const struct icmp6hdr *ic; struct icmp6hdr _icmph; const struct ip6t_icmp *icmpinfo = par->matchinfo; /* Must not be a fragment. */ if (par->fragoff != 0) return false; ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph); if (ic == NULL) { /* We've been asked to examine this packet, and we * can't. Hence, no choice but to drop. */ duprintf("Dropping evil ICMP tinygram.\n"); par->hotdrop = true; return false; } return icmp6_type_code_match(icmpinfo->type, icmpinfo->code[0], icmpinfo->code[1], ic->icmp6_type, ic->icmp6_code, !!(icmpinfo->invflags&IP6T_ICMP_INV)); } /* Called when user tries to insert an entry of this type. */ static int icmp6_checkentry(const struct xt_mtchk_param *par) { const struct ip6t_icmp *icmpinfo = par->matchinfo; /* Must specify no unknown invflags */ return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0; } /* The built-in targets: standard (NULL) and error. */ static struct xt_target ip6t_builtin_tg[] __read_mostly = { { .name = XT_STANDARD_TARGET, .targetsize = sizeof(int), .family = NFPROTO_IPV6, #ifdef CONFIG_COMPAT .compatsize = sizeof(compat_int_t), .compat_from_user = compat_standard_from_user, .compat_to_user = compat_standard_to_user, #endif }, { .name = XT_ERROR_TARGET, .target = ip6t_error, .targetsize = XT_FUNCTION_MAXNAMELEN, .family = NFPROTO_IPV6, }, }; static struct nf_sockopt_ops ip6t_sockopts = { .pf = PF_INET6, .set_optmin = IP6T_BASE_CTL, .set_optmax = IP6T_SO_SET_MAX+1, .set = do_ip6t_set_ctl, #ifdef CONFIG_COMPAT .compat_set = compat_do_ip6t_set_ctl, #endif .get_optmin = IP6T_BASE_CTL, .get_optmax = IP6T_SO_GET_MAX+1, .get = do_ip6t_get_ctl, #ifdef CONFIG_COMPAT .compat_get = compat_do_ip6t_get_ctl, #endif .owner = THIS_MODULE, }; static struct xt_match ip6t_builtin_mt[] __read_mostly = { { .name = "icmp6", .match = icmp6_match, .matchsize = sizeof(struct ip6t_icmp), .checkentry = icmp6_checkentry, .proto = IPPROTO_ICMPV6, .family = NFPROTO_IPV6, }, }; static int __net_init ip6_tables_net_init(struct net *net) { return xt_proto_init(net, NFPROTO_IPV6); } static void __net_exit ip6_tables_net_exit(struct net *net) { xt_proto_fini(net, NFPROTO_IPV6); } static struct pernet_operations ip6_tables_net_ops = { .init = ip6_tables_net_init, .exit = ip6_tables_net_exit, }; static int __init ip6_tables_init(void) { int ret; ret = register_pernet_subsys(&ip6_tables_net_ops); if (ret < 0) goto err1; /* No one else will be downing sem now, so we won't sleep */ ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); if (ret < 0) goto err2; ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt)); if (ret < 0) goto err4; /* Register setsockopt */ ret = nf_register_sockopt(&ip6t_sockopts); if (ret < 0) goto err5; pr_info("(C) 2000-2006 Netfilter Core Team\n"); return 0; err5: xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt)); err4: xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); err2: unregister_pernet_subsys(&ip6_tables_net_ops); err1: return ret; } static void __exit ip6_tables_fini(void) { nf_unregister_sockopt(&ip6t_sockopts); xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt)); xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); unregister_pernet_subsys(&ip6_tables_net_ops); } EXPORT_SYMBOL(ip6t_register_table); EXPORT_SYMBOL(ip6t_unregister_table); EXPORT_SYMBOL(ip6t_do_table); module_init(ip6_tables_init); module_exit(ip6_tables_fini);
/* * Packet matching code. * * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/capability.h> #include <linux/in.h> #include <linux/skbuff.h> #include <linux/kmod.h> #include <linux/vmalloc.h> #include <linux/netdevice.h> #include <linux/module.h> #include <linux/poison.h> #include <linux/icmpv6.h> #include <net/ipv6.h> #include <net/compat.h> #include <asm/uaccess.h> #include <linux/mutex.h> #include <linux/proc_fs.h> #include <linux/err.h> #include <linux/cpumask.h> #include <linux/netfilter_ipv6/ip6_tables.h> #include <linux/netfilter/x_tables.h> #include <net/netfilter/nf_log.h> #include "../../netfilter/xt_repldata.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); MODULE_DESCRIPTION("IPv6 packet filter"); /*#define DEBUG_IP_FIREWALL*/ /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */ /*#define DEBUG_IP_FIREWALL_USER*/ #ifdef DEBUG_IP_FIREWALL #define dprintf(format, args...) pr_info(format , ## args) #else #define dprintf(format, args...) #endif #ifdef DEBUG_IP_FIREWALL_USER #define duprintf(format, args...) pr_info(format , ## args) #else #define duprintf(format, args...) #endif #ifdef CONFIG_NETFILTER_DEBUG #define IP_NF_ASSERT(x) WARN_ON(!(x)) #else #define IP_NF_ASSERT(x) #endif #if 0 /* All the better to debug you with... */ #define static #define inline #endif void *ip6t_alloc_initial_table(const struct xt_table *info) { return xt_alloc_initial_table(ip6t, IP6T); } EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table); /* We keep a set of rules for each CPU, so we can avoid write-locking them in the softirq when updating the counters and therefore only need to read-lock in the softirq; doing a write_lock_bh() in user context stops packets coming through and allows user context to read the counters or update the rules. Hence the start of any table is given by get_table() below. */ /* Returns whether matches rule or not. */ /* Performance critical - called for every packet */ static inline bool ip6_packet_match(const struct sk_buff *skb, const char *indev, const char *outdev, const struct ip6t_ip6 *ip6info, unsigned int *protoff, int *fragoff, bool *hotdrop) { unsigned long ret; const struct ipv6hdr *ipv6 = ipv6_hdr(skb); #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg))) if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk, &ip6info->src), IP6T_INV_SRCIP) || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk, &ip6info->dst), IP6T_INV_DSTIP)) { dprintf("Source or dest mismatch.\n"); /* dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr, ipinfo->smsk.s_addr, ipinfo->src.s_addr, ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : ""); dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr, ipinfo->dmsk.s_addr, ipinfo->dst.s_addr, ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/ return false; } ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask); if (FWINV(ret != 0, IP6T_INV_VIA_IN)) { dprintf("VIA in mismatch (%s vs %s).%s\n", indev, ip6info->iniface, ip6info->invflags & IP6T_INV_VIA_IN ? " (INV)" : ""); return false; } ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask); if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) { dprintf("VIA out mismatch (%s vs %s).%s\n", outdev, ip6info->outiface, ip6info->invflags & IP6T_INV_VIA_OUT ? " (INV)" : ""); return false; } /* ... might want to do something with class and flowlabel here ... */ /* look for the desired protocol header */ if (ip6info->flags & IP6T_F_PROTO) { int protohdr; unsigned short _frag_off; protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL); if (protohdr < 0) { if (_frag_off == 0) *hotdrop = true; return false; } *fragoff = _frag_off; dprintf("Packet protocol %hi ?= %s%hi.\n", protohdr, ip6info->invflags & IP6T_INV_PROTO ? "!":"", ip6info->proto); if (ip6info->proto == protohdr) { if (ip6info->invflags & IP6T_INV_PROTO) return false; return true; } /* We need match for the '-p all', too! */ if ((ip6info->proto != 0) && !(ip6info->invflags & IP6T_INV_PROTO)) return false; } return true; } /* should be ip6 safe */ static bool ip6_checkentry(const struct ip6t_ip6 *ipv6) { if (ipv6->flags & ~IP6T_F_MASK) { duprintf("Unknown flag bits set: %08X\n", ipv6->flags & ~IP6T_F_MASK); return false; } if (ipv6->invflags & ~IP6T_INV_MASK) { duprintf("Unknown invflag bits set: %08X\n", ipv6->invflags & ~IP6T_INV_MASK); return false; } return true; } static unsigned int ip6t_error(struct sk_buff *skb, const struct xt_action_param *par) { net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo); return NF_DROP; } static inline struct ip6t_entry * get_entry(const void *base, unsigned int offset) { return (struct ip6t_entry *)(base + offset); } /* All zeroes == unconditional rule. */ /* Mildly perf critical (only if packet tracing is on) */ static inline bool unconditional(const struct ip6t_ip6 *ipv6) { static const struct ip6t_ip6 uncond; return memcmp(ipv6, &uncond, sizeof(uncond)) == 0; } static inline const struct xt_entry_target * ip6t_get_target_c(const struct ip6t_entry *e) { return ip6t_get_target((struct ip6t_entry *)e); } #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) /* This cries for unification! */ static const char *const hooknames[] = { [NF_INET_PRE_ROUTING] = "PREROUTING", [NF_INET_LOCAL_IN] = "INPUT", [NF_INET_FORWARD] = "FORWARD", [NF_INET_LOCAL_OUT] = "OUTPUT", [NF_INET_POST_ROUTING] = "POSTROUTING", }; enum nf_ip_trace_comments { NF_IP6_TRACE_COMMENT_RULE, NF_IP6_TRACE_COMMENT_RETURN, NF_IP6_TRACE_COMMENT_POLICY, }; static const char *const comments[] = { [NF_IP6_TRACE_COMMENT_RULE] = "rule", [NF_IP6_TRACE_COMMENT_RETURN] = "return", [NF_IP6_TRACE_COMMENT_POLICY] = "policy", }; static struct nf_loginfo trace_loginfo = { .type = NF_LOG_TYPE_LOG, .u = { .log = { .level = LOGLEVEL_WARNING, .logflags = NF_LOG_MASK, }, }, }; /* Mildly perf critical (only if packet tracing is on) */ static inline int get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e, const char *hookname, const char **chainname, const char **comment, unsigned int *rulenum) { const struct xt_standard_target *t = (void *)ip6t_get_target_c(s); if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) { /* Head of user chain: ERROR target with chainname */ *chainname = t->target.data; (*rulenum) = 0; } else if (s == e) { (*rulenum)++; if (s->target_offset == sizeof(struct ip6t_entry) && strcmp(t->target.u.kernel.target->name, XT_STANDARD_TARGET) == 0 && t->verdict < 0 && unconditional(&s->ipv6)) { /* Tail of chains: STANDARD target (return/policy) */ *comment = *chainname == hookname ? comments[NF_IP6_TRACE_COMMENT_POLICY] : comments[NF_IP6_TRACE_COMMENT_RETURN]; } return 1; } else (*rulenum)++; return 0; } static void trace_packet(struct net *net, const struct sk_buff *skb, unsigned int hook, const struct net_device *in, const struct net_device *out, const char *tablename, const struct xt_table_info *private, const struct ip6t_entry *e) { const struct ip6t_entry *root; const char *hookname, *chainname, *comment; const struct ip6t_entry *iter; unsigned int rulenum = 0; root = get_entry(private->entries, private->hook_entry[hook]); hookname = chainname = hooknames[hook]; comment = comments[NF_IP6_TRACE_COMMENT_RULE]; xt_entry_foreach(iter, root, private->size - private->hook_entry[hook]) if (get_chainname_rulenum(iter, e, hookname, &chainname, &comment, &rulenum) != 0) break; nf_log_trace(net, AF_INET6, hook, skb, in, out, &trace_loginfo, "TRACE: %s:%s:%s:%u ", tablename, chainname, comment, rulenum); } #endif static inline struct ip6t_entry * ip6t_next_entry(const struct ip6t_entry *entry) { return (void *)entry + entry->next_offset; } /* Returns one of the generic firewall policies, like NF_ACCEPT. */ unsigned int ip6t_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table) { unsigned int hook = state->hook; static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); /* Initializing verdict to NF_DROP keeps gcc happy. */ unsigned int verdict = NF_DROP; const char *indev, *outdev; const void *table_base; struct ip6t_entry *e, **jumpstack; unsigned int stackidx, cpu; const struct xt_table_info *private; struct xt_action_param acpar; unsigned int addend; /* Initialization */ stackidx = 0; indev = state->in ? state->in->name : nulldevname; outdev = state->out ? state->out->name : nulldevname; /* We handle fragments by dealing with the first fragment as * if it was a normal packet. All other fragments are treated * normally, except that they will NEVER match rules that ask * things we don't know, ie. tcp syn flag or ports). If the * rule is also a fragment-specific rule, non-fragments won't * match it. */ acpar.hotdrop = false; acpar.net = state->net; acpar.in = state->in; acpar.out = state->out; acpar.family = NFPROTO_IPV6; acpar.hooknum = hook; IP_NF_ASSERT(table->valid_hooks & (1 << hook)); local_bh_disable(); addend = xt_write_recseq_begin(); private = table->private; /* * Ensure we load private-> members after we've fetched the base * pointer. */ smp_read_barrier_depends(); cpu = smp_processor_id(); table_base = private->entries; jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; /* Switch to alternate jumpstack if we're being invoked via TEE. * TEE issues XT_CONTINUE verdict on original skb so we must not * clobber the jumpstack. * * For recursion via REJECT or SYNPROXY the stack will be clobbered * but it is no problem since absolute verdict is issued by these. */ if (static_key_false(&xt_tee_enabled)) jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated); e = get_entry(table_base, private->hook_entry[hook]); do { const struct xt_entry_target *t; const struct xt_entry_match *ematch; struct xt_counters *counter; IP_NF_ASSERT(e); acpar.thoff = 0; if (!ip6_packet_match(skb, indev, outdev, &e->ipv6, &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) { no_match: e = ip6t_next_entry(e); continue; } xt_ematch_foreach(ematch, e) { acpar.match = ematch->u.kernel.match; acpar.matchinfo = ematch->data; if (!acpar.match->match(skb, &acpar)) goto no_match; } counter = xt_get_this_cpu_counter(&e->counters); ADD_COUNTER(*counter, skb->len, 1); t = ip6t_get_target_c(e); IP_NF_ASSERT(t->u.kernel.target); #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) /* The packet is traced: log it */ if (unlikely(skb->nf_trace)) trace_packet(state->net, skb, hook, state->in, state->out, table->name, private, e); #endif /* Standard target? */ if (!t->u.kernel.target->target) { int v; v = ((struct xt_standard_target *)t)->verdict; if (v < 0) { /* Pop from stack? */ if (v != XT_RETURN) { verdict = (unsigned int)(-v) - 1; break; } if (stackidx == 0) e = get_entry(table_base, private->underflow[hook]); else e = ip6t_next_entry(jumpstack[--stackidx]); continue; } if (table_base + v != ip6t_next_entry(e) && !(e->ipv6.flags & IP6T_F_GOTO)) { jumpstack[stackidx++] = e; } e = get_entry(table_base, v); continue; } acpar.target = t->u.kernel.target; acpar.targinfo = t->data; verdict = t->u.kernel.target->target(skb, &acpar); if (verdict == XT_CONTINUE) e = ip6t_next_entry(e); else /* Verdict */ break; } while (!acpar.hotdrop); xt_write_recseq_end(addend); local_bh_enable(); #ifdef DEBUG_ALLOW_ALL return NF_ACCEPT; #else if (acpar.hotdrop) return NF_DROP; else return verdict; #endif } /* Figures out from what hook each rule can be called: returns 0 if there are loops. Puts hook bitmask in comefrom. */ static int mark_source_chains(const struct xt_table_info *newinfo, unsigned int valid_hooks, void *entry0) { unsigned int hook; /* No recursion; use packet counter to save back ptrs (reset to 0 as we leave), and comefrom to save source hook bitmask */ for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) { unsigned int pos = newinfo->hook_entry[hook]; struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos); if (!(valid_hooks & (1 << hook))) continue; /* Set initial back pointer. */ e->counters.pcnt = pos; for (;;) { const struct xt_standard_target *t = (void *)ip6t_get_target_c(e); int visited = e->comefrom & (1 << hook); if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { pr_err("iptables: loop hook %u pos %u %08X.\n", hook, pos, e->comefrom); return 0; } e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); /* Unconditional return/END. */ if ((e->target_offset == sizeof(struct ip6t_entry) && (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < 0 && unconditional(&e->ipv6)) || visited) { unsigned int oldpos, size; if ((strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < -NF_MAX_VERDICT - 1) { duprintf("mark_source_chains: bad " "negative verdict (%i)\n", t->verdict); return 0; } /* Return: backtrack through the last big jump. */ do { e->comefrom ^= (1<<NF_INET_NUMHOOKS); #ifdef DEBUG_IP_FIREWALL_USER if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { duprintf("Back unset " "on hook %u " "rule %u\n", hook, pos); } #endif oldpos = pos; pos = e->counters.pcnt; e->counters.pcnt = 0; /* We're at the start. */ if (pos == oldpos) goto next; e = (struct ip6t_entry *) (entry0 + pos); } while (oldpos == pos + e->next_offset); /* Move along one */ size = e->next_offset; e = (struct ip6t_entry *) (entry0 + pos + size); e->counters.pcnt = pos; pos += size; } else { int newpos = t->verdict; if (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0 && newpos >= 0) { if (newpos > newinfo->size - sizeof(struct ip6t_entry)) { duprintf("mark_source_chains: " "bad verdict (%i)\n", newpos); return 0; } /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; } e = (struct ip6t_entry *) (entry0 + newpos); e->counters.pcnt = pos; pos = newpos; } } next: duprintf("Finished chain %u\n", hook); } return 1; } static void cleanup_match(struct xt_entry_match *m, struct net *net) { struct xt_mtdtor_param par; par.net = net; par.match = m->u.kernel.match; par.matchinfo = m->data; par.family = NFPROTO_IPV6; if (par.match->destroy != NULL) par.match->destroy(&par); module_put(par.match->me); } static int check_entry(const struct ip6t_entry *e) { const struct xt_entry_target *t; if (!ip6_checkentry(&e->ipv6)) return -EINVAL; if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset) return -EINVAL; t = ip6t_get_target_c(e); if (e->target_offset + t->u.target_size > e->next_offset) return -EINVAL; return 0; } static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) { const struct ip6t_ip6 *ipv6 = par->entryinfo; int ret; par->match = m->u.kernel.match; par->matchinfo = m->data; ret = xt_check_match(par, m->u.match_size - sizeof(*m), ipv6->proto, ipv6->invflags & IP6T_INV_PROTO); if (ret < 0) { duprintf("ip_tables: check failed for `%s'.\n", par.match->name); return ret; } return 0; } static int find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) { struct xt_match *match; int ret; match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name, m->u.user.revision); if (IS_ERR(match)) { duprintf("find_check_match: `%s' not found\n", m->u.user.name); return PTR_ERR(match); } m->u.kernel.match = match; ret = check_match(m, par); if (ret) goto err; return 0; err: module_put(m->u.kernel.match->me); return ret; } static int check_target(struct ip6t_entry *e, struct net *net, const char *name) { struct xt_entry_target *t = ip6t_get_target(e); struct xt_tgchk_param par = { .net = net, .table = name, .entryinfo = e, .target = t->u.kernel.target, .targinfo = t->data, .hook_mask = e->comefrom, .family = NFPROTO_IPV6, }; int ret; t = ip6t_get_target(e); ret = xt_check_target(&par, t->u.target_size - sizeof(*t), e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO); if (ret < 0) { duprintf("ip_tables: check failed for `%s'.\n", t->u.kernel.target->name); return ret; } return 0; } static int find_check_entry(struct ip6t_entry *e, struct net *net, const char *name, unsigned int size) { struct xt_entry_target *t; struct xt_target *target; int ret; unsigned int j; struct xt_mtchk_param mtpar; struct xt_entry_match *ematch; e->counters.pcnt = xt_percpu_counter_alloc(); if (IS_ERR_VALUE(e->counters.pcnt)) return -ENOMEM; j = 0; mtpar.net = net; mtpar.table = name; mtpar.entryinfo = &e->ipv6; mtpar.hook_mask = e->comefrom; mtpar.family = NFPROTO_IPV6; xt_ematch_foreach(ematch, e) { ret = find_check_match(ematch, &mtpar); if (ret != 0) goto cleanup_matches; ++j; } t = ip6t_get_target(e); target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("find_check_entry: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto cleanup_matches; } t->u.kernel.target = target; ret = check_target(e, net, name); if (ret) goto err; return 0; err: module_put(t->u.kernel.target->me); cleanup_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; cleanup_match(ematch, net); } xt_percpu_counter_free(e->counters.pcnt); return ret; } static bool check_underflow(const struct ip6t_entry *e) { const struct xt_entry_target *t; unsigned int verdict; if (!unconditional(&e->ipv6)) return false; t = ip6t_get_target_c(e); if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) return false; verdict = ((struct xt_standard_target *)t)->verdict; verdict = -verdict - 1; return verdict == NF_DROP || verdict == NF_ACCEPT; } static int check_entry_size_and_hooks(struct ip6t_entry *e, struct xt_table_info *newinfo, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int valid_hooks) { unsigned int h; int err; if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit || (unsigned char *)e + e->next_offset > limit) { duprintf("Bad offset %p\n", e); return -EINVAL; } if (e->next_offset < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } err = check_entry(e); if (err) return err; /* Check hooks & underflows */ for (h = 0; h < NF_INET_NUMHOOKS; h++) { if (!(valid_hooks & (1 << h))) continue; if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) { if (!check_underflow(e)) { pr_err("Underflows must be unconditional and " "use the STANDARD target with " "ACCEPT/DROP\n"); return -EINVAL; } newinfo->underflow[h] = underflows[h]; } } /* Clear counters and comefrom */ e->counters = ((struct xt_counters) { 0, 0 }); e->comefrom = 0; return 0; } static void cleanup_entry(struct ip6t_entry *e, struct net *net) { struct xt_tgdtor_param par; struct xt_entry_target *t; struct xt_entry_match *ematch; /* Cleanup all matches */ xt_ematch_foreach(ematch, e) cleanup_match(ematch, net); t = ip6t_get_target(e); par.net = net; par.target = t->u.kernel.target; par.targinfo = t->data; par.family = NFPROTO_IPV6; if (par.target->destroy != NULL) par.target->destroy(&par); module_put(par.target->me); xt_percpu_counter_free(e->counters.pcnt); } /* Checks and translates the user-supplied table segment (held in newinfo) */ static int translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, const struct ip6t_replace *repl) { struct ip6t_entry *iter; unsigned int i; int ret = 0; newinfo->size = repl->size; newinfo->number = repl->num_entries; /* Init all hooks to impossible value. */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { newinfo->hook_entry[i] = 0xFFFFFFFF; newinfo->underflow[i] = 0xFFFFFFFF; } duprintf("translate_table: size %u\n", newinfo->size); i = 0; /* Walk through entries, checking offsets. */ xt_entry_foreach(iter, entry0, newinfo->size) { ret = check_entry_size_and_hooks(iter, newinfo, entry0, entry0 + repl->size, repl->hook_entry, repl->underflow, repl->valid_hooks); if (ret != 0) return ret; ++i; if (strcmp(ip6t_get_target(iter)->u.user.name, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } if (i != repl->num_entries) { duprintf("translate_table: %u not %u entries\n", i, repl->num_entries); return -EINVAL; } /* Check hooks all assigned */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(repl->valid_hooks & (1 << i))) continue; if (newinfo->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, repl->hook_entry[i]); return -EINVAL; } if (newinfo->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, repl->underflow[i]); return -EINVAL; } } if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) return -ELOOP; /* Finally, each sanity check must pass */ i = 0; xt_entry_foreach(iter, entry0, newinfo->size) { ret = find_check_entry(iter, net, repl->name, repl->size); if (ret != 0) break; ++i; } if (ret != 0) { xt_entry_foreach(iter, entry0, newinfo->size) { if (i-- == 0) break; cleanup_entry(iter, net); } return ret; } return ret; } static void get_counters(const struct xt_table_info *t, struct xt_counters counters[]) { struct ip6t_entry *iter; unsigned int cpu; unsigned int i; for_each_possible_cpu(cpu) { seqcount_t *s = &per_cpu(xt_recseq, cpu); i = 0; xt_entry_foreach(iter, t->entries, t->size) { struct xt_counters *tmp; u64 bcnt, pcnt; unsigned int start; tmp = xt_get_per_cpu_counter(&iter->counters, cpu); do { start = read_seqcount_begin(s); bcnt = tmp->bcnt; pcnt = tmp->pcnt; } while (read_seqcount_retry(s, start)); ADD_COUNTER(counters[i], bcnt, pcnt); ++i; } } } static struct xt_counters *alloc_counters(const struct xt_table *table) { unsigned int countersize; struct xt_counters *counters; const struct xt_table_info *private = table->private; /* We need atomic snapshot of counters: rest doesn't change (other than comefrom, which userspace doesn't care about). */ countersize = sizeof(struct xt_counters) * private->number; counters = vzalloc(countersize); if (counters == NULL) return ERR_PTR(-ENOMEM); get_counters(private, counters); return counters; } static int copy_entries_to_user(unsigned int total_size, const struct xt_table *table, void __user *userptr) { unsigned int off, num; const struct ip6t_entry *e; struct xt_counters *counters; const struct xt_table_info *private = table->private; int ret = 0; const void *loc_cpu_entry; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); loc_cpu_entry = private->entries; if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { ret = -EFAULT; goto free_counters; } /* FIXME: use iterator macros --RR */ /* ... then go back and fix counters and names */ for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ unsigned int i; const struct xt_entry_match *m; const struct xt_entry_target *t; e = (struct ip6t_entry *)(loc_cpu_entry + off); if (copy_to_user(userptr + off + offsetof(struct ip6t_entry, counters), &counters[num], sizeof(counters[num])) != 0) { ret = -EFAULT; goto free_counters; } for (i = sizeof(struct ip6t_entry); i < e->target_offset; i += m->u.match_size) { m = (void *)e + i; if (copy_to_user(userptr + off + i + offsetof(struct xt_entry_match, u.user.name), m->u.kernel.match->name, strlen(m->u.kernel.match->name)+1) != 0) { ret = -EFAULT; goto free_counters; } } t = ip6t_get_target_c(e); if (copy_to_user(userptr + off + e->target_offset + offsetof(struct xt_entry_target, u.user.name), t->u.kernel.target->name, strlen(t->u.kernel.target->name)+1) != 0) { ret = -EFAULT; goto free_counters; } } free_counters: vfree(counters); return ret; } #ifdef CONFIG_COMPAT static void compat_standard_from_user(void *dst, const void *src) { int v = *(compat_int_t *)src; if (v > 0) v += xt_compat_calc_jump(AF_INET6, v); memcpy(dst, &v, sizeof(v)); } static int compat_standard_to_user(void __user *dst, const void *src) { compat_int_t cv = *(int *)src; if (cv > 0) cv -= xt_compat_calc_jump(AF_INET6, cv); return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; } static int compat_calc_entry(const struct ip6t_entry *e, const struct xt_table_info *info, const void *base, struct xt_table_info *newinfo) { const struct xt_entry_match *ematch; const struct xt_entry_target *t; unsigned int entry_offset; int off, i, ret; off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); entry_offset = (void *)e - base; xt_ematch_foreach(ematch, e) off += xt_compat_match_offset(ematch->u.kernel.match); t = ip6t_get_target_c(e); off += xt_compat_target_offset(t->u.kernel.target); newinfo->size -= off; ret = xt_compat_add_offset(AF_INET6, entry_offset, off); if (ret) return ret; for (i = 0; i < NF_INET_NUMHOOKS; i++) { if (info->hook_entry[i] && (e < (struct ip6t_entry *)(base + info->hook_entry[i]))) newinfo->hook_entry[i] -= off; if (info->underflow[i] && (e < (struct ip6t_entry *)(base + info->underflow[i]))) newinfo->underflow[i] -= off; } return 0; } static int compat_table_info(const struct xt_table_info *info, struct xt_table_info *newinfo) { struct ip6t_entry *iter; const void *loc_cpu_entry; int ret; if (!newinfo || !info) return -EINVAL; /* we dont care about newinfo->entries */ memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); newinfo->initial_entries = 0; loc_cpu_entry = info->entries; xt_compat_init_offsets(AF_INET6, info->number); xt_entry_foreach(iter, loc_cpu_entry, info->size) { ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); if (ret != 0) return ret; } return 0; } #endif static int get_info(struct net *net, void __user *user, const int *len, int compat) { char name[XT_TABLE_MAXNAMELEN]; struct xt_table *t; int ret; if (*len != sizeof(struct ip6t_getinfo)) { duprintf("length %u != %zu\n", *len, sizeof(struct ip6t_getinfo)); return -EINVAL; } if (copy_from_user(name, user, sizeof(name)) != 0) return -EFAULT; name[XT_TABLE_MAXNAMELEN-1] = '\0'; #ifdef CONFIG_COMPAT if (compat) xt_compat_lock(AF_INET6); #endif t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name), "ip6table_%s", name); if (!IS_ERR_OR_NULL(t)) { struct ip6t_getinfo info; const struct xt_table_info *private = t->private; #ifdef CONFIG_COMPAT struct xt_table_info tmp; if (compat) { ret = compat_table_info(private, &tmp); xt_compat_flush_offsets(AF_INET6); private = &tmp; } #endif memset(&info, 0, sizeof(info)); info.valid_hooks = t->valid_hooks; memcpy(info.hook_entry, private->hook_entry, sizeof(info.hook_entry)); memcpy(info.underflow, private->underflow, sizeof(info.underflow)); info.num_entries = private->number; info.size = private->size; strcpy(info.name, name); if (copy_to_user(user, &info, *len) != 0) ret = -EFAULT; else ret = 0; xt_table_unlock(t); module_put(t->me); } else ret = t ? PTR_ERR(t) : -ENOENT; #ifdef CONFIG_COMPAT if (compat) xt_compat_unlock(AF_INET6); #endif return ret; } static int get_entries(struct net *net, struct ip6t_get_entries __user *uptr, const int *len) { int ret; struct ip6t_get_entries get; struct xt_table *t; if (*len < sizeof(get)) { duprintf("get_entries: %u < %zu\n", *len, sizeof(get)); return -EINVAL; } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; if (*len != sizeof(struct ip6t_get_entries) + get.size) { duprintf("get_entries: %u != %zu\n", *len, sizeof(get) + get.size); return -EINVAL; } t = xt_find_table_lock(net, AF_INET6, get.name); if (!IS_ERR_OR_NULL(t)) { struct xt_table_info *private = t->private; duprintf("t->private->number = %u\n", private->number); if (get.size == private->size) ret = copy_entries_to_user(private->size, t, uptr->entrytable); else { duprintf("get_entries: I've got %u not %u!\n", private->size, get.size); ret = -EAGAIN; } module_put(t->me); xt_table_unlock(t); } else ret = t ? PTR_ERR(t) : -ENOENT; return ret; } static int __do_replace(struct net *net, const char *name, unsigned int valid_hooks, struct xt_table_info *newinfo, unsigned int num_counters, void __user *counters_ptr) { int ret; struct xt_table *t; struct xt_table_info *oldinfo; struct xt_counters *counters; struct ip6t_entry *iter; ret = 0; counters = vzalloc(num_counters * sizeof(struct xt_counters)); if (!counters) { ret = -ENOMEM; goto out; } t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name), "ip6table_%s", name); if (IS_ERR_OR_NULL(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free_newinfo_counters_untrans; } /* You lied! */ if (valid_hooks != t->valid_hooks) { duprintf("Valid hook crap: %08X vs %08X\n", valid_hooks, t->valid_hooks); ret = -EINVAL; goto put_module; } oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); if (!oldinfo) goto put_module; /* Update module usage count based on number of rules */ duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", oldinfo->number, oldinfo->initial_entries, newinfo->number); if ((oldinfo->number > oldinfo->initial_entries) || (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); if ((oldinfo->number > oldinfo->initial_entries) && (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); /* Get the old counters, and synchronize with replace */ get_counters(oldinfo, counters); /* Decrease module usage counts and free resource */ xt_entry_foreach(iter, oldinfo->entries, oldinfo->size) cleanup_entry(iter, net); xt_free_table_info(oldinfo); if (copy_to_user(counters_ptr, counters, sizeof(struct xt_counters) * num_counters) != 0) { /* Silent error, can't fail, new table is already in place */ net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n"); } vfree(counters); xt_table_unlock(t); return ret; put_module: module_put(t->me); xt_table_unlock(t); free_newinfo_counters_untrans: vfree(counters); out: return ret; } static int do_replace(struct net *net, const void __user *user, unsigned int len) { int ret; struct ip6t_replace tmp; struct xt_table_info *newinfo; void *loc_cpu_entry; struct ip6t_entry *iter; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; if (tmp.num_counters == 0) return -EINVAL; tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } ret = translate_table(net, newinfo, loc_cpu_entry, &tmp); if (ret != 0) goto free_newinfo; duprintf("ip_tables: Translated table\n"); ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, tmp.counters); if (ret) goto free_newinfo_untrans; return 0; free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) cleanup_entry(iter, net); free_newinfo: xt_free_table_info(newinfo); return ret; } static int do_add_counters(struct net *net, const void __user *user, unsigned int len, int compat) { unsigned int i; struct xt_counters_info tmp; struct xt_counters *paddc; unsigned int num_counters; char *name; int size; void *ptmp; struct xt_table *t; const struct xt_table_info *private; int ret = 0; struct ip6t_entry *iter; unsigned int addend; #ifdef CONFIG_COMPAT struct compat_xt_counters_info compat_tmp; if (compat) { ptmp = &compat_tmp; size = sizeof(struct compat_xt_counters_info); } else #endif { ptmp = &tmp; size = sizeof(struct xt_counters_info); } if (copy_from_user(ptmp, user, size) != 0) return -EFAULT; #ifdef CONFIG_COMPAT if (compat) { num_counters = compat_tmp.num_counters; name = compat_tmp.name; } else #endif { num_counters = tmp.num_counters; name = tmp.name; } if (len != size + num_counters * sizeof(struct xt_counters)) return -EINVAL; paddc = vmalloc(len - size); if (!paddc) return -ENOMEM; if (copy_from_user(paddc, user + size, len - size) != 0) { ret = -EFAULT; goto free; } t = xt_find_table_lock(net, AF_INET6, name); if (IS_ERR_OR_NULL(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free; } local_bh_disable(); private = t->private; if (private->number != num_counters) { ret = -EINVAL; goto unlock_up_free; } i = 0; addend = xt_write_recseq_begin(); xt_entry_foreach(iter, private->entries, private->size) { struct xt_counters *tmp; tmp = xt_get_this_cpu_counter(&iter->counters); ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt); ++i; } xt_write_recseq_end(addend); unlock_up_free: local_bh_enable(); xt_table_unlock(t); module_put(t->me); free: vfree(paddc); return ret; } #ifdef CONFIG_COMPAT struct compat_ip6t_replace { char name[XT_TABLE_MAXNAMELEN]; u32 valid_hooks; u32 num_entries; u32 size; u32 hook_entry[NF_INET_NUMHOOKS]; u32 underflow[NF_INET_NUMHOOKS]; u32 num_counters; compat_uptr_t counters; /* struct xt_counters * */ struct compat_ip6t_entry entries[0]; }; static int compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr, unsigned int *size, struct xt_counters *counters, unsigned int i) { struct xt_entry_target *t; struct compat_ip6t_entry __user *ce; u_int16_t target_offset, next_offset; compat_uint_t origsize; const struct xt_entry_match *ematch; int ret = 0; origsize = *size; ce = (struct compat_ip6t_entry __user *)*dstptr; if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 || copy_to_user(&ce->counters, &counters[i], sizeof(counters[i])) != 0) return -EFAULT; *dstptr += sizeof(struct compat_ip6t_entry); *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); xt_ematch_foreach(ematch, e) { ret = xt_compat_match_to_user(ematch, dstptr, size); if (ret != 0) return ret; } target_offset = e->target_offset - (origsize - *size); t = ip6t_get_target(e); ret = xt_compat_target_to_user(t, dstptr, size); if (ret) return ret; next_offset = e->next_offset - (origsize - *size); if (put_user(target_offset, &ce->target_offset) != 0 || put_user(next_offset, &ce->next_offset) != 0) return -EFAULT; return 0; } static int compat_find_calc_match(struct xt_entry_match *m, const char *name, const struct ip6t_ip6 *ipv6, int *size) { struct xt_match *match; match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name, m->u.user.revision); if (IS_ERR(match)) { duprintf("compat_check_calc_match: `%s' not found\n", m->u.user.name); return PTR_ERR(match); } m->u.kernel.match = match; *size += xt_compat_match_offset(match); return 0; } static void compat_release_entry(struct compat_ip6t_entry *e) { struct xt_entry_target *t; struct xt_entry_match *ematch; /* Cleanup all matches */ xt_ematch_foreach(ematch, e) module_put(ematch->u.kernel.match->me); t = compat_ip6t_get_target(e); module_put(t->u.kernel.target->me); } static int check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, struct xt_table_info *newinfo, unsigned int *size, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, const char *name) { struct xt_entry_match *ematch; struct xt_entry_target *t; struct xt_target *target; unsigned int entry_offset; unsigned int j; int ret, off, h; duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit || (unsigned char *)e + e->next_offset > limit) { duprintf("Bad offset %p, limit = %p\n", e, limit); return -EINVAL; } if (e->next_offset < sizeof(struct compat_ip6t_entry) + sizeof(struct compat_xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } /* For purposes of check_entry casting the compat entry is fine */ ret = check_entry((struct ip6t_entry *)e); if (ret) return ret; off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); entry_offset = (void *)e - (void *)base; j = 0; xt_ematch_foreach(ematch, e) { ret = compat_find_calc_match(ematch, name, &e->ipv6, &off); if (ret != 0) goto release_matches; ++j; } t = compat_ip6t_get_target(e); target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto release_matches; } t->u.kernel.target = target; off += xt_compat_target_offset(target); *size += off; ret = xt_compat_add_offset(AF_INET6, entry_offset, off); if (ret) goto out; /* Check hooks & underflows */ for (h = 0; h < NF_INET_NUMHOOKS; h++) { if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) newinfo->underflow[h] = underflows[h]; } /* Clear counters and comefrom */ memset(&e->counters, 0, sizeof(e->counters)); e->comefrom = 0; return 0; out: module_put(t->u.kernel.target->me); release_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; module_put(ematch->u.kernel.match->me); } return ret; } static int compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr, unsigned int *size, const char *name, struct xt_table_info *newinfo, unsigned char *base) { struct xt_entry_target *t; struct ip6t_entry *de; unsigned int origsize; int ret, h; struct xt_entry_match *ematch; ret = 0; origsize = *size; de = (struct ip6t_entry *)*dstptr; memcpy(de, e, sizeof(struct ip6t_entry)); memcpy(&de->counters, &e->counters, sizeof(e->counters)); *dstptr += sizeof(struct ip6t_entry); *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); xt_ematch_foreach(ematch, e) { ret = xt_compat_match_from_user(ematch, dstptr, size); if (ret != 0) return ret; } de->target_offset = e->target_offset - (origsize - *size); t = compat_ip6t_get_target(e); xt_compat_target_from_user(t, dstptr, size); de->next_offset = e->next_offset - (origsize - *size); for (h = 0; h < NF_INET_NUMHOOKS; h++) { if ((unsigned char *)de - base < newinfo->hook_entry[h]) newinfo->hook_entry[h] -= origsize - *size; if ((unsigned char *)de - base < newinfo->underflow[h]) newinfo->underflow[h] -= origsize - *size; } return ret; } static int compat_check_entry(struct ip6t_entry *e, struct net *net, const char *name) { unsigned int j; int ret = 0; struct xt_mtchk_param mtpar; struct xt_entry_match *ematch; e->counters.pcnt = xt_percpu_counter_alloc(); if (IS_ERR_VALUE(e->counters.pcnt)) return -ENOMEM; j = 0; mtpar.net = net; mtpar.table = name; mtpar.entryinfo = &e->ipv6; mtpar.hook_mask = e->comefrom; mtpar.family = NFPROTO_IPV6; xt_ematch_foreach(ematch, e) { ret = check_match(ematch, &mtpar); if (ret != 0) goto cleanup_matches; ++j; } ret = check_target(e, net, name); if (ret) goto cleanup_matches; return 0; cleanup_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; cleanup_match(ematch, net); } xt_percpu_counter_free(e->counters.pcnt); return ret; } static int translate_compat_table(struct net *net, const char *name, unsigned int valid_hooks, struct xt_table_info **pinfo, void **pentry0, unsigned int total_size, unsigned int number, unsigned int *hook_entries, unsigned int *underflows) { unsigned int i, j; struct xt_table_info *newinfo, *info; void *pos, *entry0, *entry1; struct compat_ip6t_entry *iter0; struct ip6t_entry *iter1; unsigned int size; int ret = 0; info = *pinfo; entry0 = *pentry0; size = total_size; info->number = number; /* Init all hooks to impossible value. */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { info->hook_entry[i] = 0xFFFFFFFF; info->underflow[i] = 0xFFFFFFFF; } duprintf("translate_compat_table: size %u\n", info->size); j = 0; xt_compat_lock(AF_INET6); xt_compat_init_offsets(AF_INET6, number); /* Walk through entries, checking offsets. */ xt_entry_foreach(iter0, entry0, total_size) { ret = check_compat_entry_size_and_hooks(iter0, info, &size, entry0, entry0 + total_size, hook_entries, underflows, name); if (ret != 0) goto out_unlock; ++j; } ret = -EINVAL; if (j != number) { duprintf("translate_compat_table: %u not %u entries\n", j, number); goto out_unlock; } /* Check hooks all assigned */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(valid_hooks & (1 << i))) continue; if (info->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, hook_entries[i]); goto out_unlock; } if (info->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, underflows[i]); goto out_unlock; } } ret = -ENOMEM; newinfo = xt_alloc_table_info(size); if (!newinfo) goto out_unlock; newinfo->number = number; for (i = 0; i < NF_INET_NUMHOOKS; i++) { newinfo->hook_entry[i] = info->hook_entry[i]; newinfo->underflow[i] = info->underflow[i]; } entry1 = newinfo->entries; pos = entry1; size = total_size; xt_entry_foreach(iter0, entry0, total_size) { ret = compat_copy_entry_from_user(iter0, &pos, &size, name, newinfo, entry1); if (ret != 0) break; } xt_compat_flush_offsets(AF_INET6); xt_compat_unlock(AF_INET6); if (ret) goto free_newinfo; ret = -ELOOP; if (!mark_source_chains(newinfo, valid_hooks, entry1)) goto free_newinfo; i = 0; xt_entry_foreach(iter1, entry1, newinfo->size) { ret = compat_check_entry(iter1, net, name); if (ret != 0) break; ++i; if (strcmp(ip6t_get_target(iter1)->u.user.name, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } if (ret) { /* * The first i matches need cleanup_entry (calls ->destroy) * because they had called ->check already. The other j-i * entries need only release. */ int skip = i; j -= i; xt_entry_foreach(iter0, entry0, newinfo->size) { if (skip-- > 0) continue; if (j-- == 0) break; compat_release_entry(iter0); } xt_entry_foreach(iter1, entry1, newinfo->size) { if (i-- == 0) break; cleanup_entry(iter1, net); } xt_free_table_info(newinfo); return ret; } *pinfo = newinfo; *pentry0 = entry1; xt_free_table_info(info); return 0; free_newinfo: xt_free_table_info(newinfo); out: xt_entry_foreach(iter0, entry0, total_size) { if (j-- == 0) break; compat_release_entry(iter0); } return ret; out_unlock: xt_compat_flush_offsets(AF_INET6); xt_compat_unlock(AF_INET6); goto out; } static int compat_do_replace(struct net *net, void __user *user, unsigned int len) { int ret; struct compat_ip6t_replace tmp; struct xt_table_info *newinfo; void *loc_cpu_entry; struct ip6t_entry *iter; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.size >= INT_MAX / num_possible_cpus()) return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; if (tmp.num_counters == 0) return -EINVAL; tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } ret = translate_compat_table(net, tmp.name, tmp.valid_hooks, &newinfo, &loc_cpu_entry, tmp.size, tmp.num_entries, tmp.hook_entry, tmp.underflow); if (ret != 0) goto free_newinfo; duprintf("compat_do_replace: Translated table\n"); ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, compat_ptr(tmp.counters)); if (ret) goto free_newinfo_untrans; return 0; free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) cleanup_entry(iter, net); free_newinfo: xt_free_table_info(newinfo); return ret; } static int compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IP6T_SO_SET_REPLACE: ret = compat_do_replace(sock_net(sk), user, len); break; case IP6T_SO_SET_ADD_COUNTERS: ret = do_add_counters(sock_net(sk), user, len, 1); break; default: duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } struct compat_ip6t_get_entries { char name[XT_TABLE_MAXNAMELEN]; compat_uint_t size; struct compat_ip6t_entry entrytable[0]; }; static int compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, void __user *userptr) { struct xt_counters *counters; const struct xt_table_info *private = table->private; void __user *pos; unsigned int size; int ret = 0; unsigned int i = 0; struct ip6t_entry *iter; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); pos = userptr; size = total_size; xt_entry_foreach(iter, private->entries, total_size) { ret = compat_copy_entry_to_user(iter, &pos, &size, counters, i++); if (ret != 0) break; } vfree(counters); return ret; } static int compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr, int *len) { int ret; struct compat_ip6t_get_entries get; struct xt_table *t; if (*len < sizeof(get)) { duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); return -EINVAL; } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) { duprintf("compat_get_entries: %u != %zu\n", *len, sizeof(get) + get.size); return -EINVAL; } xt_compat_lock(AF_INET6); t = xt_find_table_lock(net, AF_INET6, get.name); if (!IS_ERR_OR_NULL(t)) { const struct xt_table_info *private = t->private; struct xt_table_info info; duprintf("t->private->number = %u\n", private->number); ret = compat_table_info(private, &info); if (!ret && get.size == info.size) { ret = compat_copy_entries_to_user(private->size, t, uptr->entrytable); } else if (!ret) { duprintf("compat_get_entries: I've got %u not %u!\n", private->size, get.size); ret = -EAGAIN; } xt_compat_flush_offsets(AF_INET6); module_put(t->me); xt_table_unlock(t); } else ret = t ? PTR_ERR(t) : -ENOENT; xt_compat_unlock(AF_INET6); return ret; } static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *); static int compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IP6T_SO_GET_INFO: ret = get_info(sock_net(sk), user, len, 1); break; case IP6T_SO_GET_ENTRIES: ret = compat_get_entries(sock_net(sk), user, len); break; default: ret = do_ip6t_get_ctl(sk, cmd, user, len); } return ret; } #endif static int do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IP6T_SO_SET_REPLACE: ret = do_replace(sock_net(sk), user, len); break; case IP6T_SO_SET_ADD_COUNTERS: ret = do_add_counters(sock_net(sk), user, len, 0); break; default: duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static int do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IP6T_SO_GET_INFO: ret = get_info(sock_net(sk), user, len, 0); break; case IP6T_SO_GET_ENTRIES: ret = get_entries(sock_net(sk), user, len); break; case IP6T_SO_GET_REVISION_MATCH: case IP6T_SO_GET_REVISION_TARGET: { struct xt_get_revision rev; int target; if (*len != sizeof(rev)) { ret = -EINVAL; break; } if (copy_from_user(&rev, user, sizeof(rev)) != 0) { ret = -EFAULT; break; } rev.name[sizeof(rev.name)-1] = 0; if (cmd == IP6T_SO_GET_REVISION_TARGET) target = 1; else target = 0; try_then_request_module(xt_find_revision(AF_INET6, rev.name, rev.revision, target, &ret), "ip6t_%s", rev.name); break; } default: duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static void __ip6t_unregister_table(struct net *net, struct xt_table *table) { struct xt_table_info *private; void *loc_cpu_entry; struct module *table_owner = table->me; struct ip6t_entry *iter; private = xt_unregister_table(table); /* Decrease module usage counts and free resources */ loc_cpu_entry = private->entries; xt_entry_foreach(iter, loc_cpu_entry, private->size) cleanup_entry(iter, net); if (private->number > private->initial_entries) module_put(table_owner); xt_free_table_info(private); } int ip6t_register_table(struct net *net, const struct xt_table *table, const struct ip6t_replace *repl, const struct nf_hook_ops *ops, struct xt_table **res) { int ret; struct xt_table_info *newinfo; struct xt_table_info bootstrap = {0}; void *loc_cpu_entry; struct xt_table *new_table; newinfo = xt_alloc_table_info(repl->size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; memcpy(loc_cpu_entry, repl->entries, repl->size); ret = translate_table(net, newinfo, loc_cpu_entry, repl); if (ret != 0) goto out_free; new_table = xt_register_table(net, table, &bootstrap, newinfo); if (IS_ERR(new_table)) { ret = PTR_ERR(new_table); goto out_free; } /* set res now, will see skbs right after nf_register_net_hooks */ WRITE_ONCE(*res, new_table); ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks)); if (ret != 0) { __ip6t_unregister_table(net, new_table); *res = NULL; } return ret; out_free: xt_free_table_info(newinfo); return ret; } void ip6t_unregister_table(struct net *net, struct xt_table *table, const struct nf_hook_ops *ops) { nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks)); __ip6t_unregister_table(net, table); } /* Returns 1 if the type and code is matched by the range, 0 otherwise */ static inline bool icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code, u_int8_t type, u_int8_t code, bool invert) { return (type == test_type && code >= min_code && code <= max_code) ^ invert; } static bool icmp6_match(const struct sk_buff *skb, struct xt_action_param *par) { const struct icmp6hdr *ic; struct icmp6hdr _icmph; const struct ip6t_icmp *icmpinfo = par->matchinfo; /* Must not be a fragment. */ if (par->fragoff != 0) return false; ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph); if (ic == NULL) { /* We've been asked to examine this packet, and we * can't. Hence, no choice but to drop. */ duprintf("Dropping evil ICMP tinygram.\n"); par->hotdrop = true; return false; } return icmp6_type_code_match(icmpinfo->type, icmpinfo->code[0], icmpinfo->code[1], ic->icmp6_type, ic->icmp6_code, !!(icmpinfo->invflags&IP6T_ICMP_INV)); } /* Called when user tries to insert an entry of this type. */ static int icmp6_checkentry(const struct xt_mtchk_param *par) { const struct ip6t_icmp *icmpinfo = par->matchinfo; /* Must specify no unknown invflags */ return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0; } /* The built-in targets: standard (NULL) and error. */ static struct xt_target ip6t_builtin_tg[] __read_mostly = { { .name = XT_STANDARD_TARGET, .targetsize = sizeof(int), .family = NFPROTO_IPV6, #ifdef CONFIG_COMPAT .compatsize = sizeof(compat_int_t), .compat_from_user = compat_standard_from_user, .compat_to_user = compat_standard_to_user, #endif }, { .name = XT_ERROR_TARGET, .target = ip6t_error, .targetsize = XT_FUNCTION_MAXNAMELEN, .family = NFPROTO_IPV6, }, }; static struct nf_sockopt_ops ip6t_sockopts = { .pf = PF_INET6, .set_optmin = IP6T_BASE_CTL, .set_optmax = IP6T_SO_SET_MAX+1, .set = do_ip6t_set_ctl, #ifdef CONFIG_COMPAT .compat_set = compat_do_ip6t_set_ctl, #endif .get_optmin = IP6T_BASE_CTL, .get_optmax = IP6T_SO_GET_MAX+1, .get = do_ip6t_get_ctl, #ifdef CONFIG_COMPAT .compat_get = compat_do_ip6t_get_ctl, #endif .owner = THIS_MODULE, }; static struct xt_match ip6t_builtin_mt[] __read_mostly = { { .name = "icmp6", .match = icmp6_match, .matchsize = sizeof(struct ip6t_icmp), .checkentry = icmp6_checkentry, .proto = IPPROTO_ICMPV6, .family = NFPROTO_IPV6, }, }; static int __net_init ip6_tables_net_init(struct net *net) { return xt_proto_init(net, NFPROTO_IPV6); } static void __net_exit ip6_tables_net_exit(struct net *net) { xt_proto_fini(net, NFPROTO_IPV6); } static struct pernet_operations ip6_tables_net_ops = { .init = ip6_tables_net_init, .exit = ip6_tables_net_exit, }; static int __init ip6_tables_init(void) { int ret; ret = register_pernet_subsys(&ip6_tables_net_ops); if (ret < 0) goto err1; /* No one else will be downing sem now, so we won't sleep */ ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); if (ret < 0) goto err2; ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt)); if (ret < 0) goto err4; /* Register setsockopt */ ret = nf_register_sockopt(&ip6t_sockopts); if (ret < 0) goto err5; pr_info("(C) 2000-2006 Netfilter Core Team\n"); return 0; err5: xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt)); err4: xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); err2: unregister_pernet_subsys(&ip6_tables_net_ops); err1: return ret; } static void __exit ip6_tables_fini(void) { nf_unregister_sockopt(&ip6t_sockopts); xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt)); xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); unregister_pernet_subsys(&ip6_tables_net_ops); } EXPORT_SYMBOL(ip6t_register_table); EXPORT_SYMBOL(ip6t_unregister_table); EXPORT_SYMBOL(ip6t_do_table); module_init(ip6_tables_init); module_exit(ip6_tables_fini);
check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, struct xt_table_info *newinfo, unsigned int *size, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, const char *name) { struct xt_entry_match *ematch; struct xt_entry_target *t; struct xt_target *target; unsigned int entry_offset; unsigned int j; int ret, off, h; duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) { duprintf("Bad offset %p, limit = %p\n", e, limit); return -EINVAL; } if (e->next_offset < sizeof(struct compat_ip6t_entry) + sizeof(struct compat_xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } /* For purposes of check_entry casting the compat entry is fine */ ret = check_entry((struct ip6t_entry *)e); if (ret) return ret; off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); entry_offset = (void *)e - (void *)base; j = 0; xt_ematch_foreach(ematch, e) { ret = compat_find_calc_match(ematch, name, &e->ipv6, &off); if (ret != 0) goto release_matches; ++j; } t = compat_ip6t_get_target(e); target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto release_matches; } t->u.kernel.target = target; off += xt_compat_target_offset(target); *size += off; ret = xt_compat_add_offset(AF_INET6, entry_offset, off); if (ret) goto out; /* Check hooks & underflows */ for (h = 0; h < NF_INET_NUMHOOKS; h++) { if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) newinfo->underflow[h] = underflows[h]; } /* Clear counters and comefrom */ memset(&e->counters, 0, sizeof(e->counters)); e->comefrom = 0; return 0; out: module_put(t->u.kernel.target->me); release_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; module_put(ematch->u.kernel.match->me); } return ret; }
check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, struct xt_table_info *newinfo, unsigned int *size, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, const char *name) { struct xt_entry_match *ematch; struct xt_entry_target *t; struct xt_target *target; unsigned int entry_offset; unsigned int j; int ret, off, h; duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit || (unsigned char *)e + e->next_offset > limit) { duprintf("Bad offset %p, limit = %p\n", e, limit); return -EINVAL; } if (e->next_offset < sizeof(struct compat_ip6t_entry) + sizeof(struct compat_xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } /* For purposes of check_entry casting the compat entry is fine */ ret = check_entry((struct ip6t_entry *)e); if (ret) return ret; off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); entry_offset = (void *)e - (void *)base; j = 0; xt_ematch_foreach(ematch, e) { ret = compat_find_calc_match(ematch, name, &e->ipv6, &off); if (ret != 0) goto release_matches; ++j; } t = compat_ip6t_get_target(e); target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto release_matches; } t->u.kernel.target = target; off += xt_compat_target_offset(target); *size += off; ret = xt_compat_add_offset(AF_INET6, entry_offset, off); if (ret) goto out; /* Check hooks & underflows */ for (h = 0; h < NF_INET_NUMHOOKS; h++) { if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) newinfo->underflow[h] = underflows[h]; } /* Clear counters and comefrom */ memset(&e->counters, 0, sizeof(e->counters)); e->comefrom = 0; return 0; out: module_put(t->u.kernel.target->me); release_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; module_put(ematch->u.kernel.match->me); } return ret; }
{'added': [(753, '\t (unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||'), (754, '\t (unsigned char *)e + e->next_offset > limit) {'), (1508, '\t (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||'), (1509, '\t (unsigned char *)e + e->next_offset > limit) {')], 'deleted': [(753, '\t (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {'), (1507, '\t (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {')]}
4
2
1,807
11,142
73
503
12
https://github.com/torvalds/linux
CVE-2016-4998
CWE-119
536
burl.c
C++
burl_normalize_2F_to_slash_fix
#include "first.h" #include "burl.h" #include <string.h> #include "buffer.h" #include "base64.h" static const char hex_chars_uc[] = "0123456789ABCDEF"; /* everything except: ! $ & ' ( ) * + , - . / 0-9 : ; = ? @ A-Z _ a-z ~ */ static const char encoded_chars_http_uri_reqd[] = { /* 0 1 2 3 4 5 6 7 8 9 A B C D E F */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 00 - 0F control chars */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 10 - 1F */ 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 20 - 2F space " # % */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, /* 30 - 3F < > */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 40 - 4F */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, /* 50 - 5F [ \ ] ^ */ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 60 - 6F ` */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, /* 70 - 7F { | } DEL */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 80 - 8F */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 90 - 9F */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* A0 - AF */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* B0 - BF */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* C0 - CF */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* D0 - DF */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* E0 - EF */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* F0 - FF */ }; /* c (char) and n (nibble) MUST be unsigned integer types */ #define li_cton(c,n) \ (((n) = (c) - '0') <= 9 || (((n) = ((c)&0xdf) - 'A') <= 5 ? ((n) += 10) : 0)) /* b (byte) MUST be unsigned integer type * https://en.wikipedia.org/wiki/UTF-8 * reject overlong encodings of 7-byte ASCII and invalid UTF-8 * (but does not detect other overlong multi-byte encodings) */ #define li_utf8_invalid_byte(b) ((b) >= 0xF5 || ((b)|0x1) == 0xC1) static int burl_is_unreserved (const int c) { return (light_isalnum(c) || c == '-' || c == '.' || c == '_' || c == '~'); } static int burl_normalize_basic_unreserved_fix (buffer *b, buffer *t, int i, int qs) { int j = i; const int used = (int)buffer_string_length(b); const unsigned char * const s = (unsigned char *)b->ptr; unsigned char * const p = (unsigned char *)buffer_string_prepare_copy(t,i+(used-i)*3+1); unsigned int n1, n2; memcpy(p, s, (size_t)i); for (; i < used; ++i, ++j) { if (!encoded_chars_http_uri_reqd[s[i]]) { if (s[i] == '?' && -1 == qs) qs = j; p[j] = s[i]; } else if (s[i]=='%' && li_cton(s[i+1], n1) && li_cton(s[i+2], n2)) { const unsigned int x = (n1 << 4) | n2; if (burl_is_unreserved(x)) { p[j] = x; } else { p[j] = '%'; p[++j] = hex_chars_uc[n1]; /*(s[i+1] & 0xdf)*/ p[++j] = hex_chars_uc[n2]; /*(s[i+2] & 0xdf)*/ if (li_utf8_invalid_byte(x)) qs = -2; } i+=2; } else if (s[i] == '#') break; /* ignore fragment */ else { p[j] = '%'; p[++j] = hex_chars_uc[(s[i] >> 4) & 0xF]; p[++j] = hex_chars_uc[s[i] & 0xF]; if (li_utf8_invalid_byte(s[i])) qs = -2; } } buffer_commit(t, (size_t)j); buffer_copy_buffer(b, t); return qs; } static int burl_normalize_basic_unreserved (buffer *b, buffer *t) { const unsigned char * const s = (unsigned char *)b->ptr; const int used = (int)buffer_string_length(b); unsigned int n1, n2, x; int qs = -1; for (int i = 0; i < used; ++i) { if (!encoded_chars_http_uri_reqd[s[i]]) { if (s[i] == '?' && -1 == qs) qs = i; } else if (s[i]=='%' && li_cton(s[i+1], n1) && li_cton(s[i+2], n2) && !burl_is_unreserved((x = (n1 << 4) | n2))) { if (li_utf8_invalid_byte(x)) qs = -2; if (s[i+1] >= 'a') b->ptr[i+1] &= 0xdf; /* uppercase hex */ if (s[i+2] >= 'a') b->ptr[i+2] &= 0xdf; /* uppercase hex */ i+=2; } else if (s[i] == '#') { /* ignore fragment */ buffer_string_set_length(b, (size_t)i); break; } else { qs = burl_normalize_basic_unreserved_fix(b, t, i, qs); break; } } return qs; } static int burl_normalize_basic_required_fix (buffer *b, buffer *t, int i, int qs) { int j = i; const int used = (int)buffer_string_length(b); const unsigned char * const s = (unsigned char *)b->ptr; unsigned char * const p = (unsigned char *)buffer_string_prepare_copy(t,i+(used-i)*3+1); unsigned int n1, n2; memcpy(p, s, (size_t)i); for (; i < used; ++i, ++j) { if (!encoded_chars_http_uri_reqd[s[i]]) { if (s[i] == '?' && -1 == qs) qs = j; p[j] = s[i]; } else if (s[i]=='%' && li_cton(s[i+1], n1) && li_cton(s[i+2], n2)) { const unsigned int x = (n1 << 4) | n2; if (!encoded_chars_http_uri_reqd[x] && (qs < 0 ? (x!='/'&&x!='?') : (x!='&'&&x!='='&&x!=';'))) { p[j] = x; } else { p[j] = '%'; p[++j] = hex_chars_uc[n1]; /*(s[i+1] & 0xdf)*/ p[++j] = hex_chars_uc[n2]; /*(s[i+2] & 0xdf)*/ if (li_utf8_invalid_byte(x)) qs = -2; } i+=2; } else if (s[i] == '#') break; /* ignore fragment */ else { p[j] = '%'; p[++j] = hex_chars_uc[(s[i] >> 4) & 0xF]; p[++j] = hex_chars_uc[s[i] & 0xF]; if (li_utf8_invalid_byte(s[i])) qs = -2; } } buffer_commit(t, (size_t)j); buffer_copy_buffer(b, t); return qs; } static int burl_normalize_basic_required (buffer *b, buffer *t) { const unsigned char * const s = (unsigned char *)b->ptr; const int used = (int)buffer_string_length(b); unsigned int n1, n2, x; int qs = -1; for (int i = 0; i < used; ++i) { if (!encoded_chars_http_uri_reqd[s[i]]) { if (s[i] == '?' && -1 == qs) qs = i; } else if (s[i]=='%' && li_cton(s[i+1], n1) && li_cton(s[i+2], n2) && (encoded_chars_http_uri_reqd[(x = (n1 << 4) | n2)] ||(qs < 0 ? (x=='/'||x=='?') : (x=='&'||x=='='||x==';')))){ if (li_utf8_invalid_byte(x)) qs = -2; if (s[i+1] >= 'a') b->ptr[i+1] &= 0xdf; /* uppercase hex */ if (s[i+2] >= 'a') b->ptr[i+2] &= 0xdf; /* uppercase hex */ i+=2; } else if (s[i] == '#') { /* ignore fragment */ buffer_string_set_length(b, (size_t)i); break; } else { qs = burl_normalize_basic_required_fix(b, t, i, qs); break; } } return qs; } static int burl_contains_ctrls (const buffer *b) { const char * const s = b->ptr; const int used = (int)buffer_string_length(b); for (int i = 0; i < used; ++i) { if (s[i] == '%' && (s[i+1] < '2' || (s[i+1] == '7' && s[i+2] == 'F'))) return 1; } return 0; } static void burl_normalize_qs20_to_plus_fix (buffer *b, int i) { char * const s = b->ptr; const int used = (int)buffer_string_length(b); int j = i; for (; i < used; ++i, ++j) { s[j] = s[i]; if (s[i] == '%' && s[i+1] == '2' && s[i+2] == '0') { s[j] = '+'; i+=2; } } buffer_string_set_length(b, j); } static void burl_normalize_qs20_to_plus (buffer *b, int qs) { const char * const s = b->ptr; const int used = qs < 0 ? 0 : (int)buffer_string_length(b); int i; if (qs < 0) return; for (i = qs+1; i < used; ++i) { if (s[i] == '%' && s[i+1] == '2' && s[i+2] == '0') break; } if (i != used) burl_normalize_qs20_to_plus_fix(b, i); } static int burl_normalize_2F_to_slash_fix (buffer *b, int qs, int i) { char * const s = b->ptr; const int blen = (int)buffer_string_length(b); const int used = qs < 0 ? blen : qs; int j = i; for (; i < used; ++i, ++j) { s[j] = s[i]; if (s[i] == '%' && s[i+1] == '2' && s[i+2] == 'F') { s[j] = '/'; i+=2; } } if (qs >= 0) { memmove(s+j, s+qs, blen - qs); j += blen - qs; } buffer_string_set_length(b, j); return qs; } static int burl_normalize_2F_to_slash (buffer *b, int qs, int flags) { /*("%2F" must already have been uppercased during normalization)*/ const char * const s = b->ptr; const int used = qs < 0 ? (int)buffer_string_length(b) : qs; for (int i = 0; i < used; ++i) { if (s[i] == '%' && s[i+1] == '2' && s[i+2] == 'F') { return (flags & HTTP_PARSEOPT_URL_NORMALIZE_PATH_2F_DECODE) ? burl_normalize_2F_to_slash_fix(b, qs, i) : -2; /*(flags & HTTP_PARSEOPT_URL_NORMALIZE_PATH_2F_REJECT)*/ } } return qs; } static int burl_normalize_path (buffer *b, buffer *t, int qs, int flags) { const unsigned char * const s = (unsigned char *)b->ptr; const int used = (int)buffer_string_length(b); int path_simplify = 0; for (int i = 0, len = qs < 0 ? used : qs; i < len; ++i) { if (s[i] == '.' && (s[i+1] != '.' || ++i) && (s[i+1] == '/' || s[i+1] == '?' || s[i+1] == '\0')) { path_simplify = 1; break; } do { ++i; } while (i < len && s[i] != '/'); if (s[i] == '/' && s[i+1] == '/') { /*(s[len] != '/')*/ path_simplify = 1; break; } } if (path_simplify) { if (flags & HTTP_PARSEOPT_URL_NORMALIZE_PATH_DOTSEG_REJECT) return -2; if (qs >= 0) { buffer_copy_string_len(t, b->ptr+qs, used - qs); buffer_string_set_length(b, qs); } buffer_path_simplify(b, b); if (qs >= 0) { qs = (int)buffer_string_length(b); buffer_append_string_len(b, CONST_BUF_LEN(t)); } } return qs; } int burl_normalize (buffer *b, buffer *t, int flags) { int qs; #if defined(__WIN32) || defined(__CYGWIN__) /* Windows and Cygwin treat '\\' as '/' if '\\' is present in path; * convert to '/' for consistency before percent-encoding * normalization which will convert '\\' to "%5C" in the URL. * (Clients still should not be sending '\\' unencoded in requests.) */ if (flags & HTTP_PARSEOPT_URL_NORMALIZE_PATH_BACKSLASH_TRANS) { for (char *p = b->ptr; *p != '?' && *p != '\0'; ++p) { if (*p == '\\') *p = '/'; } } #endif qs = (flags & HTTP_PARSEOPT_URL_NORMALIZE_REQUIRED) ? burl_normalize_basic_required(b, t) : burl_normalize_basic_unreserved(b, t); if (-2 == qs) return -2; if (flags & HTTP_PARSEOPT_URL_NORMALIZE_CTRLS_REJECT) { if (burl_contains_ctrls(b)) return -2; } if (flags & (HTTP_PARSEOPT_URL_NORMALIZE_PATH_2F_DECODE |HTTP_PARSEOPT_URL_NORMALIZE_PATH_2F_REJECT)) { qs = burl_normalize_2F_to_slash(b, qs, flags); if (-2 == qs) return -2; } if (flags & (HTTP_PARSEOPT_URL_NORMALIZE_PATH_DOTSEG_REMOVE |HTTP_PARSEOPT_URL_NORMALIZE_PATH_DOTSEG_REJECT)) { qs = burl_normalize_path(b, t, qs, flags); if (-2 == qs) return -2; } if (flags & HTTP_PARSEOPT_URL_NORMALIZE_QUERY_20_PLUS) { if (qs >= 0) burl_normalize_qs20_to_plus(b, qs); } return qs; } static void burl_append_encode_nde (buffer * const b, const char * const str, const size_t len) { /* percent-encodes everything except unreserved - . 0-9 A-Z _ a-z ~ * unless already percent-encoded (does not double-encode) */ /* Note: not checking for invalid UTF-8 */ char * const p = buffer_string_prepare_append(b, len*3); unsigned int n1, n2; int j = 0; for (unsigned int i = 0; i < len; ++i, ++j) { if (str[i]=='%' && li_cton(str[i+1], n1) && li_cton(str[i+2], n2)) { const unsigned int x = (n1 << 4) | n2; if (burl_is_unreserved((int)x)) { p[j] = (char)x; } else { /* leave UTF-8, control chars, and required chars encoded */ p[j] = '%'; p[++j] = str[i+1]; p[++j] = str[i+2]; } i+=2; } else if (burl_is_unreserved(str[i])) { p[j] = str[i]; } else { p[j] = '%'; p[++j] = hex_chars_uc[(str[i] >> 4) & 0xF]; p[++j] = hex_chars_uc[str[i] & 0xF]; } } buffer_commit(b, j); } static void burl_append_encode_psnde (buffer * const b, const char * const str, const size_t len) { /* percent-encodes everything except unreserved - . 0-9 A-Z _ a-z ~ plus / * unless already percent-encoded (does not double-encode) */ /* Note: not checking for invalid UTF-8 */ char * const p = buffer_string_prepare_append(b, len*3); unsigned int n1, n2; int j = 0; for (unsigned int i = 0; i < len; ++i, ++j) { if (str[i]=='%' && li_cton(str[i+1], n1) && li_cton(str[i+2], n2)) { const unsigned int x = (n1 << 4) | n2; if (burl_is_unreserved((int)x)) { p[j] = (char)x; } else { /* leave UTF-8, control chars, and required chars encoded */ p[j] = '%'; p[++j] = str[i+1]; p[++j] = str[i+2]; } i+=2; } else if (burl_is_unreserved(str[i]) || str[i] == '/') { p[j] = str[i]; } else { p[j] = '%'; p[++j] = hex_chars_uc[(str[i] >> 4) & 0xF]; p[++j] = hex_chars_uc[str[i] & 0xF]; } } buffer_commit(b, j); } static void burl_append_encode_all (buffer * const b, const char * const str, const size_t len) { /* percent-encodes everything except unreserved - . 0-9 A-Z _ a-z ~ * Note: double-encodes any existing '%') */ /* Note: not checking for invalid UTF-8 */ char * const p = buffer_string_prepare_append(b, len*3); int j = 0; for (unsigned int i = 0; i < len; ++i, ++j) { if (burl_is_unreserved(str[i])) { p[j] = str[i]; } else { p[j] = '%'; p[++j] = hex_chars_uc[(str[i] >> 4) & 0xF]; p[++j] = hex_chars_uc[str[i] & 0xF]; } } buffer_commit(b, j); } static void burl_offset_tolower (buffer * const b, const size_t off) { /*(skips over all percent-encodings, including encoding of alpha chars)*/ for (char *p = b->ptr+off; p[0]; ++p) { if (p[0] >= 'A' && p[0] <= 'Z') p[0] |= 0x20; else if (p[0]=='%' && light_isxdigit(p[1]) && light_isxdigit(p[2])) p+=2; } } static void burl_offset_toupper (buffer * const b, const size_t off) { /*(skips over all percent-encodings, including encoding of alpha chars)*/ for (char *p = b->ptr+off; p[0]; ++p) { if (p[0] >= 'a' && p[0] <= 'z') p[0] &= 0xdf; else if (p[0]=='%' && light_isxdigit(p[1]) && light_isxdigit(p[2])) p+=2; } } void burl_append (buffer * const b, const char * const str, const size_t len, const int flags) { size_t off = 0; if (0 == len) return; if (0 == flags) { buffer_append_string_len(b, str, len); return; } if (flags & (BURL_TOUPPER|BURL_TOLOWER)) off = buffer_string_length(b); if (flags & BURL_ENCODE_NONE) { buffer_append_string_len(b, str, len); } else if (flags & BURL_ENCODE_ALL) { burl_append_encode_all(b, str, len); } else if (flags & BURL_ENCODE_NDE) { burl_append_encode_nde(b, str, len); } else if (flags & BURL_ENCODE_PSNDE) { burl_append_encode_psnde(b, str, len); } else if (flags & BURL_ENCODE_B64U) { const unsigned char *s = (const unsigned char *)str; buffer_append_base64_encode_no_padding(b, s, len, BASE64_URL); } else if (flags & BURL_DECODE_B64U) { buffer_append_base64_decode(b, str, len, BASE64_URL); } /* note: not normalizing str, which could come from arbitrary header, * so it is possible that alpha chars are percent-encoded upper/lowercase */ if (flags & (BURL_TOLOWER|BURL_TOUPPER)) { (flags & BURL_TOLOWER) ? burl_offset_tolower(b, off) /*(flags & BURL_TOLOWER)*/ : burl_offset_toupper(b, off); /*(flags & BURL_TOUPPER)*/ } }
#include "first.h" #include "burl.h" #include <string.h> #include "buffer.h" #include "base64.h" static const char hex_chars_uc[] = "0123456789ABCDEF"; /* everything except: ! $ & ' ( ) * + , - . / 0-9 : ; = ? @ A-Z _ a-z ~ */ static const char encoded_chars_http_uri_reqd[] = { /* 0 1 2 3 4 5 6 7 8 9 A B C D E F */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 00 - 0F control chars */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 10 - 1F */ 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 20 - 2F space " # % */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, /* 30 - 3F < > */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 40 - 4F */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, /* 50 - 5F [ \ ] ^ */ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 60 - 6F ` */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, /* 70 - 7F { | } DEL */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 80 - 8F */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 90 - 9F */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* A0 - AF */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* B0 - BF */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* C0 - CF */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* D0 - DF */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* E0 - EF */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* F0 - FF */ }; /* c (char) and n (nibble) MUST be unsigned integer types */ #define li_cton(c,n) \ (((n) = (c) - '0') <= 9 || (((n) = ((c)&0xdf) - 'A') <= 5 ? ((n) += 10) : 0)) /* b (byte) MUST be unsigned integer type * https://en.wikipedia.org/wiki/UTF-8 * reject overlong encodings of 7-byte ASCII and invalid UTF-8 * (but does not detect other overlong multi-byte encodings) */ #define li_utf8_invalid_byte(b) ((b) >= 0xF5 || ((b)|0x1) == 0xC1) static int burl_is_unreserved (const int c) { return (light_isalnum(c) || c == '-' || c == '.' || c == '_' || c == '~'); } static int burl_normalize_basic_unreserved_fix (buffer *b, buffer *t, int i, int qs) { int j = i; const int used = (int)buffer_string_length(b); const unsigned char * const s = (unsigned char *)b->ptr; unsigned char * const p = (unsigned char *)buffer_string_prepare_copy(t,i+(used-i)*3+1); unsigned int n1, n2; memcpy(p, s, (size_t)i); for (; i < used; ++i, ++j) { if (!encoded_chars_http_uri_reqd[s[i]]) { if (s[i] == '?' && -1 == qs) qs = j; p[j] = s[i]; } else if (s[i]=='%' && li_cton(s[i+1], n1) && li_cton(s[i+2], n2)) { const unsigned int x = (n1 << 4) | n2; if (burl_is_unreserved(x)) { p[j] = x; } else { p[j] = '%'; p[++j] = hex_chars_uc[n1]; /*(s[i+1] & 0xdf)*/ p[++j] = hex_chars_uc[n2]; /*(s[i+2] & 0xdf)*/ if (li_utf8_invalid_byte(x)) qs = -2; } i+=2; } else if (s[i] == '#') break; /* ignore fragment */ else { p[j] = '%'; p[++j] = hex_chars_uc[(s[i] >> 4) & 0xF]; p[++j] = hex_chars_uc[s[i] & 0xF]; if (li_utf8_invalid_byte(s[i])) qs = -2; } } buffer_commit(t, (size_t)j); buffer_copy_buffer(b, t); return qs; } static int burl_normalize_basic_unreserved (buffer *b, buffer *t) { const unsigned char * const s = (unsigned char *)b->ptr; const int used = (int)buffer_string_length(b); unsigned int n1, n2, x; int qs = -1; for (int i = 0; i < used; ++i) { if (!encoded_chars_http_uri_reqd[s[i]]) { if (s[i] == '?' && -1 == qs) qs = i; } else if (s[i]=='%' && li_cton(s[i+1], n1) && li_cton(s[i+2], n2) && !burl_is_unreserved((x = (n1 << 4) | n2))) { if (li_utf8_invalid_byte(x)) qs = -2; if (s[i+1] >= 'a') b->ptr[i+1] &= 0xdf; /* uppercase hex */ if (s[i+2] >= 'a') b->ptr[i+2] &= 0xdf; /* uppercase hex */ i+=2; } else if (s[i] == '#') { /* ignore fragment */ buffer_string_set_length(b, (size_t)i); break; } else { qs = burl_normalize_basic_unreserved_fix(b, t, i, qs); break; } } return qs; } static int burl_normalize_basic_required_fix (buffer *b, buffer *t, int i, int qs) { int j = i; const int used = (int)buffer_string_length(b); const unsigned char * const s = (unsigned char *)b->ptr; unsigned char * const p = (unsigned char *)buffer_string_prepare_copy(t,i+(used-i)*3+1); unsigned int n1, n2; memcpy(p, s, (size_t)i); for (; i < used; ++i, ++j) { if (!encoded_chars_http_uri_reqd[s[i]]) { if (s[i] == '?' && -1 == qs) qs = j; p[j] = s[i]; } else if (s[i]=='%' && li_cton(s[i+1], n1) && li_cton(s[i+2], n2)) { const unsigned int x = (n1 << 4) | n2; if (!encoded_chars_http_uri_reqd[x] && (qs < 0 ? (x!='/'&&x!='?') : (x!='&'&&x!='='&&x!=';'))) { p[j] = x; } else { p[j] = '%'; p[++j] = hex_chars_uc[n1]; /*(s[i+1] & 0xdf)*/ p[++j] = hex_chars_uc[n2]; /*(s[i+2] & 0xdf)*/ if (li_utf8_invalid_byte(x)) qs = -2; } i+=2; } else if (s[i] == '#') break; /* ignore fragment */ else { p[j] = '%'; p[++j] = hex_chars_uc[(s[i] >> 4) & 0xF]; p[++j] = hex_chars_uc[s[i] & 0xF]; if (li_utf8_invalid_byte(s[i])) qs = -2; } } buffer_commit(t, (size_t)j); buffer_copy_buffer(b, t); return qs; } static int burl_normalize_basic_required (buffer *b, buffer *t) { const unsigned char * const s = (unsigned char *)b->ptr; const int used = (int)buffer_string_length(b); unsigned int n1, n2, x; int qs = -1; for (int i = 0; i < used; ++i) { if (!encoded_chars_http_uri_reqd[s[i]]) { if (s[i] == '?' && -1 == qs) qs = i; } else if (s[i]=='%' && li_cton(s[i+1], n1) && li_cton(s[i+2], n2) && (encoded_chars_http_uri_reqd[(x = (n1 << 4) | n2)] ||(qs < 0 ? (x=='/'||x=='?') : (x=='&'||x=='='||x==';')))){ if (li_utf8_invalid_byte(x)) qs = -2; if (s[i+1] >= 'a') b->ptr[i+1] &= 0xdf; /* uppercase hex */ if (s[i+2] >= 'a') b->ptr[i+2] &= 0xdf; /* uppercase hex */ i+=2; } else if (s[i] == '#') { /* ignore fragment */ buffer_string_set_length(b, (size_t)i); break; } else { qs = burl_normalize_basic_required_fix(b, t, i, qs); break; } } return qs; } static int burl_contains_ctrls (const buffer *b) { const char * const s = b->ptr; const int used = (int)buffer_string_length(b); for (int i = 0; i < used; ++i) { if (s[i] == '%' && (s[i+1] < '2' || (s[i+1] == '7' && s[i+2] == 'F'))) return 1; } return 0; } static void burl_normalize_qs20_to_plus_fix (buffer *b, int i) { char * const s = b->ptr; const int used = (int)buffer_string_length(b); int j = i; for (; i < used; ++i, ++j) { s[j] = s[i]; if (s[i] == '%' && s[i+1] == '2' && s[i+2] == '0') { s[j] = '+'; i+=2; } } buffer_string_set_length(b, j); } static void burl_normalize_qs20_to_plus (buffer *b, int qs) { const char * const s = b->ptr; const int used = qs < 0 ? 0 : (int)buffer_string_length(b); int i; if (qs < 0) return; for (i = qs+1; i < used; ++i) { if (s[i] == '%' && s[i+1] == '2' && s[i+2] == '0') break; } if (i != used) burl_normalize_qs20_to_plus_fix(b, i); } static int burl_normalize_2F_to_slash_fix (buffer *b, int qs, int i) { char * const s = b->ptr; const int blen = (int)buffer_string_length(b); const int used = qs < 0 ? blen : qs; int j = i; for (; i < used; ++i, ++j) { s[j] = s[i]; if (s[i] == '%' && s[i+1] == '2' && s[i+2] == 'F') { s[j] = '/'; i+=2; } } if (qs >= 0) { const int qslen = blen - qs; memmove(s+j, s+qs, (size_t)qslen); qs = j; j += qslen; } buffer_string_set_length(b, j); return qs; } static int burl_normalize_2F_to_slash (buffer *b, int qs, int flags) { /*("%2F" must already have been uppercased during normalization)*/ const char * const s = b->ptr; const int used = qs < 0 ? (int)buffer_string_length(b) : qs; for (int i = 0; i < used; ++i) { if (s[i] == '%' && s[i+1] == '2' && s[i+2] == 'F') { return (flags & HTTP_PARSEOPT_URL_NORMALIZE_PATH_2F_DECODE) ? burl_normalize_2F_to_slash_fix(b, qs, i) : -2; /*(flags & HTTP_PARSEOPT_URL_NORMALIZE_PATH_2F_REJECT)*/ } } return qs; } static int burl_normalize_path (buffer *b, buffer *t, int qs, int flags) { const unsigned char * const s = (unsigned char *)b->ptr; const int used = (int)buffer_string_length(b); int path_simplify = 0; for (int i = 0, len = qs < 0 ? used : qs; i < len; ++i) { if (s[i] == '.' && (s[i+1] != '.' || ++i) && (s[i+1] == '/' || s[i+1] == '?' || s[i+1] == '\0')) { path_simplify = 1; break; } do { ++i; } while (i < len && s[i] != '/'); if (s[i] == '/' && s[i+1] == '/') { /*(s[len] != '/')*/ path_simplify = 1; break; } } if (path_simplify) { if (flags & HTTP_PARSEOPT_URL_NORMALIZE_PATH_DOTSEG_REJECT) return -2; if (qs >= 0) { buffer_copy_string_len(t, b->ptr+qs, used - qs); buffer_string_set_length(b, qs); } buffer_path_simplify(b, b); if (qs >= 0) { qs = (int)buffer_string_length(b); buffer_append_string_len(b, CONST_BUF_LEN(t)); } } return qs; } int burl_normalize (buffer *b, buffer *t, int flags) { int qs; #if defined(__WIN32) || defined(__CYGWIN__) /* Windows and Cygwin treat '\\' as '/' if '\\' is present in path; * convert to '/' for consistency before percent-encoding * normalization which will convert '\\' to "%5C" in the URL. * (Clients still should not be sending '\\' unencoded in requests.) */ if (flags & HTTP_PARSEOPT_URL_NORMALIZE_PATH_BACKSLASH_TRANS) { for (char *p = b->ptr; *p != '?' && *p != '\0'; ++p) { if (*p == '\\') *p = '/'; } } #endif qs = (flags & HTTP_PARSEOPT_URL_NORMALIZE_REQUIRED) ? burl_normalize_basic_required(b, t) : burl_normalize_basic_unreserved(b, t); if (-2 == qs) return -2; if (flags & HTTP_PARSEOPT_URL_NORMALIZE_CTRLS_REJECT) { if (burl_contains_ctrls(b)) return -2; } if (flags & (HTTP_PARSEOPT_URL_NORMALIZE_PATH_2F_DECODE |HTTP_PARSEOPT_URL_NORMALIZE_PATH_2F_REJECT)) { qs = burl_normalize_2F_to_slash(b, qs, flags); if (-2 == qs) return -2; } if (flags & (HTTP_PARSEOPT_URL_NORMALIZE_PATH_DOTSEG_REMOVE |HTTP_PARSEOPT_URL_NORMALIZE_PATH_DOTSEG_REJECT)) { qs = burl_normalize_path(b, t, qs, flags); if (-2 == qs) return -2; } if (flags & HTTP_PARSEOPT_URL_NORMALIZE_QUERY_20_PLUS) { if (qs >= 0) burl_normalize_qs20_to_plus(b, qs); } return qs; } static void burl_append_encode_nde (buffer * const b, const char * const str, const size_t len) { /* percent-encodes everything except unreserved - . 0-9 A-Z _ a-z ~ * unless already percent-encoded (does not double-encode) */ /* Note: not checking for invalid UTF-8 */ char * const p = buffer_string_prepare_append(b, len*3); unsigned int n1, n2; int j = 0; for (unsigned int i = 0; i < len; ++i, ++j) { if (str[i]=='%' && li_cton(str[i+1], n1) && li_cton(str[i+2], n2)) { const unsigned int x = (n1 << 4) | n2; if (burl_is_unreserved((int)x)) { p[j] = (char)x; } else { /* leave UTF-8, control chars, and required chars encoded */ p[j] = '%'; p[++j] = str[i+1]; p[++j] = str[i+2]; } i+=2; } else if (burl_is_unreserved(str[i])) { p[j] = str[i]; } else { p[j] = '%'; p[++j] = hex_chars_uc[(str[i] >> 4) & 0xF]; p[++j] = hex_chars_uc[str[i] & 0xF]; } } buffer_commit(b, j); } static void burl_append_encode_psnde (buffer * const b, const char * const str, const size_t len) { /* percent-encodes everything except unreserved - . 0-9 A-Z _ a-z ~ plus / * unless already percent-encoded (does not double-encode) */ /* Note: not checking for invalid UTF-8 */ char * const p = buffer_string_prepare_append(b, len*3); unsigned int n1, n2; int j = 0; for (unsigned int i = 0; i < len; ++i, ++j) { if (str[i]=='%' && li_cton(str[i+1], n1) && li_cton(str[i+2], n2)) { const unsigned int x = (n1 << 4) | n2; if (burl_is_unreserved((int)x)) { p[j] = (char)x; } else { /* leave UTF-8, control chars, and required chars encoded */ p[j] = '%'; p[++j] = str[i+1]; p[++j] = str[i+2]; } i+=2; } else if (burl_is_unreserved(str[i]) || str[i] == '/') { p[j] = str[i]; } else { p[j] = '%'; p[++j] = hex_chars_uc[(str[i] >> 4) & 0xF]; p[++j] = hex_chars_uc[str[i] & 0xF]; } } buffer_commit(b, j); } static void burl_append_encode_all (buffer * const b, const char * const str, const size_t len) { /* percent-encodes everything except unreserved - . 0-9 A-Z _ a-z ~ * Note: double-encodes any existing '%') */ /* Note: not checking for invalid UTF-8 */ char * const p = buffer_string_prepare_append(b, len*3); int j = 0; for (unsigned int i = 0; i < len; ++i, ++j) { if (burl_is_unreserved(str[i])) { p[j] = str[i]; } else { p[j] = '%'; p[++j] = hex_chars_uc[(str[i] >> 4) & 0xF]; p[++j] = hex_chars_uc[str[i] & 0xF]; } } buffer_commit(b, j); } static void burl_offset_tolower (buffer * const b, const size_t off) { /*(skips over all percent-encodings, including encoding of alpha chars)*/ for (char *p = b->ptr+off; p[0]; ++p) { if (p[0] >= 'A' && p[0] <= 'Z') p[0] |= 0x20; else if (p[0]=='%' && light_isxdigit(p[1]) && light_isxdigit(p[2])) p+=2; } } static void burl_offset_toupper (buffer * const b, const size_t off) { /*(skips over all percent-encodings, including encoding of alpha chars)*/ for (char *p = b->ptr+off; p[0]; ++p) { if (p[0] >= 'a' && p[0] <= 'z') p[0] &= 0xdf; else if (p[0]=='%' && light_isxdigit(p[1]) && light_isxdigit(p[2])) p+=2; } } void burl_append (buffer * const b, const char * const str, const size_t len, const int flags) { size_t off = 0; if (0 == len) return; if (0 == flags) { buffer_append_string_len(b, str, len); return; } if (flags & (BURL_TOUPPER|BURL_TOLOWER)) off = buffer_string_length(b); if (flags & BURL_ENCODE_NONE) { buffer_append_string_len(b, str, len); } else if (flags & BURL_ENCODE_ALL) { burl_append_encode_all(b, str, len); } else if (flags & BURL_ENCODE_NDE) { burl_append_encode_nde(b, str, len); } else if (flags & BURL_ENCODE_PSNDE) { burl_append_encode_psnde(b, str, len); } else if (flags & BURL_ENCODE_B64U) { const unsigned char *s = (const unsigned char *)str; buffer_append_base64_encode_no_padding(b, s, len, BASE64_URL); } else if (flags & BURL_DECODE_B64U) { buffer_append_base64_decode(b, str, len, BASE64_URL); } /* note: not normalizing str, which could come from arbitrary header, * so it is possible that alpha chars are percent-encoded upper/lowercase */ if (flags & (BURL_TOLOWER|BURL_TOUPPER)) { (flags & BURL_TOLOWER) ? burl_offset_tolower(b, off) /*(flags & BURL_TOLOWER)*/ : burl_offset_toupper(b, off); /*(flags & BURL_TOUPPER)*/ } }
static int burl_normalize_2F_to_slash_fix (buffer *b, int qs, int i) { char * const s = b->ptr; const int blen = (int)buffer_string_length(b); const int used = qs < 0 ? blen : qs; int j = i; for (; i < used; ++i, ++j) { s[j] = s[i]; if (s[i] == '%' && s[i+1] == '2' && s[i+2] == 'F') { s[j] = '/'; i+=2; } } if (qs >= 0) { memmove(s+j, s+qs, blen - qs); j += blen - qs; } buffer_string_set_length(b, j); return qs; }
static int burl_normalize_2F_to_slash_fix (buffer *b, int qs, int i) { char * const s = b->ptr; const int blen = (int)buffer_string_length(b); const int used = qs < 0 ? blen : qs; int j = i; for (; i < used; ++i, ++j) { s[j] = s[i]; if (s[i] == '%' && s[i+1] == '2' && s[i+2] == 'F') { s[j] = '/'; i+=2; } } if (qs >= 0) { const int qslen = blen - qs; memmove(s+j, s+qs, (size_t)qslen); qs = j; j += qslen; } buffer_string_set_length(b, j); return qs; }
{'added': [(255, ' const int qslen = blen - qs;'), (256, ' memmove(s+j, s+qs, (size_t)qslen);'), (257, ' qs = j;'), (258, ' j += qslen;')], 'deleted': [(255, ' memmove(s+j, s+qs, blen - qs);'), (256, ' j += blen - qs;')]}
4
2
419
4,022
20
156
7
https://github.com/lighttpd/lighttpd1.4
CVE-2019-11072
CWE-190
504
jpc_t2dec.c
C
jpc_dec_decodepkts
/* * Copyright (c) 1999-2000 Image Power, Inc. and the University of * British Columbia. * Copyright (c) 2001-2003 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /* * Tier 2 Decoder * * $Id$ */ /******************************************************************************\ * Includes. \******************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <assert.h> #include "jasper/jas_types.h" #include "jasper/jas_fix.h" #include "jasper/jas_malloc.h" #include "jasper/jas_math.h" #include "jasper/jas_stream.h" #include "jasper/jas_debug.h" #include "jpc_bs.h" #include "jpc_dec.h" #include "jpc_cs.h" #include "jpc_mqdec.h" #include "jpc_t2dec.h" #include "jpc_t1cod.h" #include "jpc_math.h" /******************************************************************************\ * \******************************************************************************/ long jpc_dec_lookahead(jas_stream_t *in); static int jpc_getcommacode(jpc_bitstream_t *in); static int jpc_getnumnewpasses(jpc_bitstream_t *in); static int jpc_dec_decodepkt(jpc_dec_t *dec, jas_stream_t *pkthdrstream, jas_stream_t *in, int compno, int lvlno, int prcno, int lyrno); /******************************************************************************\ * Code. \******************************************************************************/ static int jpc_getcommacode(jpc_bitstream_t *in) { int n; int v; n = 0; for (;;) { if ((v = jpc_bitstream_getbit(in)) < 0) { return -1; } if (jpc_bitstream_eof(in)) { return -1; } if (!v) { break; } ++n; } return n; } static int jpc_getnumnewpasses(jpc_bitstream_t *in) { int n; if ((n = jpc_bitstream_getbit(in)) > 0) { if ((n = jpc_bitstream_getbit(in)) > 0) { if ((n = jpc_bitstream_getbits(in, 2)) == 3) { if ((n = jpc_bitstream_getbits(in, 5)) == 31) { if ((n = jpc_bitstream_getbits(in, 7)) >= 0) { n += 36 + 1; } } else if (n >= 0) { n += 5 + 1; } } else if (n >= 0) { n += 2 + 1; } } else if (!n) { n += 2; } } else if (!n) { ++n; } return n; } static int jpc_dec_decodepkt(jpc_dec_t *dec, jas_stream_t *pkthdrstream, jas_stream_t *in, int compno, int rlvlno, int prcno, int lyrno) { jpc_bitstream_t *inb; jpc_dec_tcomp_t *tcomp; jpc_dec_rlvl_t *rlvl; jpc_dec_band_t *band; jpc_dec_cblk_t *cblk; int n; int m; int i; jpc_tagtreenode_t *leaf; int included; int ret; int numnewpasses; jpc_dec_seg_t *seg; int len; int present; int savenumnewpasses; int mycounter; jpc_ms_t *ms; jpc_dec_tile_t *tile; jpc_dec_ccp_t *ccp; jpc_dec_cp_t *cp; int bandno; jpc_dec_prc_t *prc; int usedcblkcnt; int cblkno; uint_fast32_t bodylen; bool discard; int passno; int maxpasses; int hdrlen; int hdroffstart; int hdroffend; /* Avoid compiler warning about possible use of uninitialized variable. */ bodylen = 0; discard = (lyrno >= dec->maxlyrs); tile = dec->curtile; cp = tile->cp; ccp = &cp->ccps[compno]; /* * Decode the packet header. */ /* Decode the SOP marker segment if present. */ if (cp->csty & JPC_COD_SOP) { if (jpc_dec_lookahead(in) == JPC_MS_SOP) { if (!(ms = jpc_getms(in, dec->cstate))) { return -1; } if (jpc_ms_gettype(ms) != JPC_MS_SOP) { jpc_ms_destroy(ms); jas_eprintf("missing SOP marker segment\n"); return -1; } jpc_ms_destroy(ms); } } hdroffstart = jas_stream_getrwcount(pkthdrstream); if (!(inb = jpc_bitstream_sopen(pkthdrstream, "r"))) { return -1; } if ((present = jpc_bitstream_getbit(inb)) < 0) { return 1; } JAS_DBGLOG(10, ("\n", present)); JAS_DBGLOG(10, ("present=%d ", present)); /* Is the packet non-empty? */ if (present) { /* The packet is non-empty. */ tcomp = &tile->tcomps[compno]; rlvl = &tcomp->rlvls[rlvlno]; bodylen = 0; for (bandno = 0, band = rlvl->bands; bandno < rlvl->numbands; ++bandno, ++band) { if (!band->data) { continue; } prc = &band->prcs[prcno]; if (!prc->cblks) { continue; } usedcblkcnt = 0; for (cblkno = 0, cblk = prc->cblks; cblkno < prc->numcblks; ++cblkno, ++cblk) { ++usedcblkcnt; if (!cblk->numpasses) { leaf = jpc_tagtree_getleaf(prc->incltagtree, usedcblkcnt - 1); if ((included = jpc_tagtree_decode(prc->incltagtree, leaf, lyrno + 1, inb)) < 0) { return -1; } } else { if ((included = jpc_bitstream_getbit(inb)) < 0) { return -1; } } JAS_DBGLOG(10, ("\n")); JAS_DBGLOG(10, ("included=%d ", included)); if (!included) { continue; } if (!cblk->numpasses) { i = 1; leaf = jpc_tagtree_getleaf(prc->numimsbstagtree, usedcblkcnt - 1); for (;;) { if ((ret = jpc_tagtree_decode(prc->numimsbstagtree, leaf, i, inb)) < 0) { return -1; } if (ret) { break; } ++i; } cblk->numimsbs = i - 1; cblk->firstpassno = cblk->numimsbs * 3; } if ((numnewpasses = jpc_getnumnewpasses(inb)) < 0) { return -1; } JAS_DBGLOG(10, ("numnewpasses=%d ", numnewpasses)); seg = cblk->curseg; savenumnewpasses = numnewpasses; mycounter = 0; if (numnewpasses > 0) { if ((m = jpc_getcommacode(inb)) < 0) { return -1; } cblk->numlenbits += m; JAS_DBGLOG(10, ("increment=%d ", m)); while (numnewpasses > 0) { passno = cblk->firstpassno + cblk->numpasses + mycounter; /* XXX - the maxpasses is not set precisely but this doesn't matter... */ maxpasses = JPC_SEGPASSCNT(passno, cblk->firstpassno, 10000, (ccp->cblkctx & JPC_COX_LAZY) != 0, (ccp->cblkctx & JPC_COX_TERMALL) != 0); if (!discard && !seg) { if (!(seg = jpc_seg_alloc())) { return -1; } jpc_seglist_insert(&cblk->segs, cblk->segs.tail, seg); if (!cblk->curseg) { cblk->curseg = seg; } seg->passno = passno; seg->type = JPC_SEGTYPE(seg->passno, cblk->firstpassno, (ccp->cblkctx & JPC_COX_LAZY) != 0); seg->maxpasses = maxpasses; } n = JAS_MIN(numnewpasses, maxpasses); mycounter += n; numnewpasses -= n; if ((len = jpc_bitstream_getbits(inb, cblk->numlenbits + jpc_floorlog2(n))) < 0) { return -1; } JAS_DBGLOG(10, ("len=%d ", len)); if (!discard) { seg->lyrno = lyrno; seg->numpasses += n; seg->cnt = len; seg = seg->next; } bodylen += len; } } cblk->numpasses += savenumnewpasses; } } jpc_bitstream_inalign(inb, 0, 0); } else { if (jpc_bitstream_inalign(inb, 0x7f, 0)) { jas_eprintf("alignment failed\n"); return -1; } } jpc_bitstream_close(inb); hdroffend = jas_stream_getrwcount(pkthdrstream); hdrlen = hdroffend - hdroffstart; if (jas_getdbglevel() >= 5) { jas_eprintf("hdrlen=%lu bodylen=%lu \n", (unsigned long) hdrlen, (unsigned long) bodylen); } if (cp->csty & JPC_COD_EPH) { if (jpc_dec_lookahead(pkthdrstream) == JPC_MS_EPH) { if (!(ms = jpc_getms(pkthdrstream, dec->cstate))) { jas_eprintf("cannot get (EPH) marker segment\n"); return -1; } if (jpc_ms_gettype(ms) != JPC_MS_EPH) { jpc_ms_destroy(ms); jas_eprintf("missing EPH marker segment\n"); return -1; } jpc_ms_destroy(ms); } } /* decode the packet body. */ if (jas_getdbglevel() >= 1) { jas_eprintf("packet body offset=%06ld\n", (long) jas_stream_getrwcount(in)); } if (!discard) { tcomp = &tile->tcomps[compno]; rlvl = &tcomp->rlvls[rlvlno]; for (bandno = 0, band = rlvl->bands; bandno < rlvl->numbands; ++bandno, ++band) { if (!band->data) { continue; } prc = &band->prcs[prcno]; if (!prc->cblks) { continue; } for (cblkno = 0, cblk = prc->cblks; cblkno < prc->numcblks; ++cblkno, ++cblk) { seg = cblk->curseg; while (seg) { if (!seg->stream) { if (!(seg->stream = jas_stream_memopen(0, 0))) { return -1; } } #if 0 jas_eprintf("lyrno=%02d, compno=%02d, lvlno=%02d, prcno=%02d, bandno=%02d, cblkno=%02d, passno=%02d numpasses=%02d cnt=%d numbps=%d, numimsbs=%d\n", lyrno, compno, rlvlno, prcno, band - rlvl->bands, cblk - prc->cblks, seg->passno, seg->numpasses, seg->cnt, band->numbps, cblk->numimsbs); #endif if (seg->cnt > 0) { if (jpc_getdata(in, seg->stream, seg->cnt) < 0) { return -1; } seg->cnt = 0; } if (seg->numpasses >= seg->maxpasses) { cblk->curseg = seg->next; } seg = seg->next; } } } } else { if (jas_stream_gobble(in, bodylen) != JAS_CAST(int, bodylen)) { return -1; } } return 0; } /********************************************************************************************/ /********************************************************************************************/ int jpc_dec_decodepkts(jpc_dec_t *dec, jas_stream_t *pkthdrstream, jas_stream_t *in) { jpc_dec_tile_t *tile; jpc_pi_t *pi; int ret; tile = dec->curtile; pi = tile->pi; for (;;) { if (!tile->pkthdrstream || jas_stream_peekc(tile->pkthdrstream) == EOF) { switch (jpc_dec_lookahead(in)) { case JPC_MS_EOC: case JPC_MS_SOT: return 0; break; case JPC_MS_SOP: case JPC_MS_EPH: case 0: break; default: return -1; break; } } if ((ret = jpc_pi_next(pi))) { return ret; } if (dec->maxpkts >= 0 && dec->numpkts >= dec->maxpkts) { jas_eprintf("warning: stopping decode prematurely as requested\n"); return 0; } if (jas_getdbglevel() >= 1) { jas_eprintf("packet offset=%08ld prg=%d cmptno=%02d " "rlvlno=%02d prcno=%03d lyrno=%02d\n", (long) jas_stream_getrwcount(in), jpc_pi_prg(pi), jpc_pi_cmptno(pi), jpc_pi_rlvlno(pi), jpc_pi_prcno(pi), jpc_pi_lyrno(pi)); } if (jpc_dec_decodepkt(dec, pkthdrstream, in, jpc_pi_cmptno(pi), jpc_pi_rlvlno(pi), jpc_pi_prcno(pi), jpc_pi_lyrno(pi))) { return -1; } ++dec->numpkts; } return 0; } jpc_pi_t *jpc_dec_pi_create(jpc_dec_t *dec, jpc_dec_tile_t *tile) { jpc_pi_t *pi; int compno; jpc_picomp_t *picomp; jpc_pirlvl_t *pirlvl; jpc_dec_tcomp_t *tcomp; int rlvlno; jpc_dec_rlvl_t *rlvl; int prcno; int *prclyrno; jpc_dec_cmpt_t *cmpt; if (!(pi = jpc_pi_create0())) { return 0; } pi->numcomps = dec->numcomps; if (!(pi->picomps = jas_alloc2(pi->numcomps, sizeof(jpc_picomp_t)))) { jpc_pi_destroy(pi); return 0; } for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { picomp->pirlvls = 0; } for (compno = 0, tcomp = tile->tcomps, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++tcomp, ++picomp) { picomp->numrlvls = tcomp->numrlvls; if (!(picomp->pirlvls = jas_alloc2(picomp->numrlvls, sizeof(jpc_pirlvl_t)))) { jpc_pi_destroy(pi); return 0; } for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { pirlvl->prclyrnos = 0; } for (rlvlno = 0, pirlvl = picomp->pirlvls, rlvl = tcomp->rlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl, ++rlvl) { /* XXX sizeof(long) should be sizeof different type */ pirlvl->numprcs = rlvl->numprcs; if (!(pirlvl->prclyrnos = jas_alloc2(pirlvl->numprcs, sizeof(long)))) { jpc_pi_destroy(pi); return 0; } } } pi->maxrlvls = 0; for (compno = 0, tcomp = tile->tcomps, picomp = pi->picomps, cmpt = dec->cmpts; compno < pi->numcomps; ++compno, ++tcomp, ++picomp, ++cmpt) { picomp->hsamp = cmpt->hstep; picomp->vsamp = cmpt->vstep; for (rlvlno = 0, pirlvl = picomp->pirlvls, rlvl = tcomp->rlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl, ++rlvl) { pirlvl->prcwidthexpn = rlvl->prcwidthexpn; pirlvl->prcheightexpn = rlvl->prcheightexpn; for (prcno = 0, prclyrno = pirlvl->prclyrnos; prcno < pirlvl->numprcs; ++prcno, ++prclyrno) { *prclyrno = 0; } pirlvl->numhprcs = rlvl->numhprcs; } if (pi->maxrlvls < tcomp->numrlvls) { pi->maxrlvls = tcomp->numrlvls; } } pi->numlyrs = tile->cp->numlyrs; pi->xstart = tile->xstart; pi->ystart = tile->ystart; pi->xend = tile->xend; pi->yend = tile->yend; pi->picomp = 0; pi->pirlvl = 0; pi->x = 0; pi->y = 0; pi->compno = 0; pi->rlvlno = 0; pi->prcno = 0; pi->lyrno = 0; pi->xstep = 0; pi->ystep = 0; pi->pchgno = -1; pi->defaultpchg.prgord = tile->cp->prgord; pi->defaultpchg.compnostart = 0; pi->defaultpchg.compnoend = pi->numcomps; pi->defaultpchg.rlvlnostart = 0; pi->defaultpchg.rlvlnoend = pi->maxrlvls; pi->defaultpchg.lyrnoend = pi->numlyrs; pi->pchg = 0; pi->valid = 0; return pi; } long jpc_dec_lookahead(jas_stream_t *in) { uint_fast16_t x; if (jpc_getuint16(in, &x)) { return -1; } if (jas_stream_ungetc(in, x & 0xff) == EOF || jas_stream_ungetc(in, x >> 8) == EOF) { return -1; } if (x >= JPC_MS_INMIN && x <= JPC_MS_INMAX) { return x; } return 0; }
/* * Copyright (c) 1999-2000 Image Power, Inc. and the University of * British Columbia. * Copyright (c) 2001-2003 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /* * Tier 2 Decoder * * $Id$ */ /******************************************************************************\ * Includes. \******************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <assert.h> #include "jasper/jas_types.h" #include "jasper/jas_fix.h" #include "jasper/jas_malloc.h" #include "jasper/jas_math.h" #include "jasper/jas_stream.h" #include "jasper/jas_debug.h" #include "jpc_bs.h" #include "jpc_dec.h" #include "jpc_cs.h" #include "jpc_mqdec.h" #include "jpc_t2dec.h" #include "jpc_t1cod.h" #include "jpc_math.h" /******************************************************************************\ * \******************************************************************************/ long jpc_dec_lookahead(jas_stream_t *in); static int jpc_getcommacode(jpc_bitstream_t *in); static int jpc_getnumnewpasses(jpc_bitstream_t *in); static int jpc_dec_decodepkt(jpc_dec_t *dec, jas_stream_t *pkthdrstream, jas_stream_t *in, int compno, int lvlno, int prcno, int lyrno); /******************************************************************************\ * Code. \******************************************************************************/ static int jpc_getcommacode(jpc_bitstream_t *in) { int n; int v; n = 0; for (;;) { if ((v = jpc_bitstream_getbit(in)) < 0) { return -1; } if (jpc_bitstream_eof(in)) { return -1; } if (!v) { break; } ++n; } return n; } static int jpc_getnumnewpasses(jpc_bitstream_t *in) { int n; if ((n = jpc_bitstream_getbit(in)) > 0) { if ((n = jpc_bitstream_getbit(in)) > 0) { if ((n = jpc_bitstream_getbits(in, 2)) == 3) { if ((n = jpc_bitstream_getbits(in, 5)) == 31) { if ((n = jpc_bitstream_getbits(in, 7)) >= 0) { n += 36 + 1; } } else if (n >= 0) { n += 5 + 1; } } else if (n >= 0) { n += 2 + 1; } } else if (!n) { n += 2; } } else if (!n) { ++n; } return n; } static int jpc_dec_decodepkt(jpc_dec_t *dec, jas_stream_t *pkthdrstream, jas_stream_t *in, int compno, int rlvlno, int prcno, int lyrno) { jpc_bitstream_t *inb; jpc_dec_tcomp_t *tcomp; jpc_dec_rlvl_t *rlvl; jpc_dec_band_t *band; jpc_dec_cblk_t *cblk; int n; int m; int i; jpc_tagtreenode_t *leaf; int included; int ret; int numnewpasses; jpc_dec_seg_t *seg; int len; int present; int savenumnewpasses; int mycounter; jpc_ms_t *ms; jpc_dec_tile_t *tile; jpc_dec_ccp_t *ccp; jpc_dec_cp_t *cp; int bandno; jpc_dec_prc_t *prc; int usedcblkcnt; int cblkno; uint_fast32_t bodylen; bool discard; int passno; int maxpasses; int hdrlen; int hdroffstart; int hdroffend; /* Avoid compiler warning about possible use of uninitialized variable. */ bodylen = 0; discard = (lyrno >= dec->maxlyrs); tile = dec->curtile; cp = tile->cp; ccp = &cp->ccps[compno]; /* * Decode the packet header. */ /* Decode the SOP marker segment if present. */ if (cp->csty & JPC_COD_SOP) { if (jpc_dec_lookahead(in) == JPC_MS_SOP) { if (!(ms = jpc_getms(in, dec->cstate))) { return -1; } if (jpc_ms_gettype(ms) != JPC_MS_SOP) { jpc_ms_destroy(ms); jas_eprintf("missing SOP marker segment\n"); return -1; } jpc_ms_destroy(ms); } } hdroffstart = jas_stream_getrwcount(pkthdrstream); if (!(inb = jpc_bitstream_sopen(pkthdrstream, "r"))) { return -1; } if ((present = jpc_bitstream_getbit(inb)) < 0) { return 1; } JAS_DBGLOG(10, ("\n", present)); JAS_DBGLOG(10, ("present=%d ", present)); /* Is the packet non-empty? */ if (present) { /* The packet is non-empty. */ tcomp = &tile->tcomps[compno]; rlvl = &tcomp->rlvls[rlvlno]; bodylen = 0; for (bandno = 0, band = rlvl->bands; bandno < rlvl->numbands; ++bandno, ++band) { if (!band->data) { continue; } prc = &band->prcs[prcno]; if (!prc->cblks) { continue; } usedcblkcnt = 0; for (cblkno = 0, cblk = prc->cblks; cblkno < prc->numcblks; ++cblkno, ++cblk) { ++usedcblkcnt; if (!cblk->numpasses) { leaf = jpc_tagtree_getleaf(prc->incltagtree, usedcblkcnt - 1); if ((included = jpc_tagtree_decode(prc->incltagtree, leaf, lyrno + 1, inb)) < 0) { return -1; } } else { if ((included = jpc_bitstream_getbit(inb)) < 0) { return -1; } } JAS_DBGLOG(10, ("\n")); JAS_DBGLOG(10, ("included=%d ", included)); if (!included) { continue; } if (!cblk->numpasses) { i = 1; leaf = jpc_tagtree_getleaf(prc->numimsbstagtree, usedcblkcnt - 1); for (;;) { if ((ret = jpc_tagtree_decode(prc->numimsbstagtree, leaf, i, inb)) < 0) { return -1; } if (ret) { break; } ++i; } cblk->numimsbs = i - 1; cblk->firstpassno = cblk->numimsbs * 3; } if ((numnewpasses = jpc_getnumnewpasses(inb)) < 0) { return -1; } JAS_DBGLOG(10, ("numnewpasses=%d ", numnewpasses)); seg = cblk->curseg; savenumnewpasses = numnewpasses; mycounter = 0; if (numnewpasses > 0) { if ((m = jpc_getcommacode(inb)) < 0) { return -1; } cblk->numlenbits += m; JAS_DBGLOG(10, ("increment=%d ", m)); while (numnewpasses > 0) { passno = cblk->firstpassno + cblk->numpasses + mycounter; /* XXX - the maxpasses is not set precisely but this doesn't matter... */ maxpasses = JPC_SEGPASSCNT(passno, cblk->firstpassno, 10000, (ccp->cblkctx & JPC_COX_LAZY) != 0, (ccp->cblkctx & JPC_COX_TERMALL) != 0); if (!discard && !seg) { if (!(seg = jpc_seg_alloc())) { return -1; } jpc_seglist_insert(&cblk->segs, cblk->segs.tail, seg); if (!cblk->curseg) { cblk->curseg = seg; } seg->passno = passno; seg->type = JPC_SEGTYPE(seg->passno, cblk->firstpassno, (ccp->cblkctx & JPC_COX_LAZY) != 0); seg->maxpasses = maxpasses; } n = JAS_MIN(numnewpasses, maxpasses); mycounter += n; numnewpasses -= n; if ((len = jpc_bitstream_getbits(inb, cblk->numlenbits + jpc_floorlog2(n))) < 0) { return -1; } JAS_DBGLOG(10, ("len=%d ", len)); if (!discard) { seg->lyrno = lyrno; seg->numpasses += n; seg->cnt = len; seg = seg->next; } bodylen += len; } } cblk->numpasses += savenumnewpasses; } } jpc_bitstream_inalign(inb, 0, 0); } else { if (jpc_bitstream_inalign(inb, 0x7f, 0)) { jas_eprintf("alignment failed\n"); return -1; } } jpc_bitstream_close(inb); hdroffend = jas_stream_getrwcount(pkthdrstream); hdrlen = hdroffend - hdroffstart; if (jas_getdbglevel() >= 5) { jas_eprintf("hdrlen=%lu bodylen=%lu \n", (unsigned long) hdrlen, (unsigned long) bodylen); } if (cp->csty & JPC_COD_EPH) { if (jpc_dec_lookahead(pkthdrstream) == JPC_MS_EPH) { if (!(ms = jpc_getms(pkthdrstream, dec->cstate))) { jas_eprintf("cannot get (EPH) marker segment\n"); return -1; } if (jpc_ms_gettype(ms) != JPC_MS_EPH) { jpc_ms_destroy(ms); jas_eprintf("missing EPH marker segment\n"); return -1; } jpc_ms_destroy(ms); } } /* decode the packet body. */ if (jas_getdbglevel() >= 1) { jas_eprintf("packet body offset=%06ld\n", (long) jas_stream_getrwcount(in)); } if (!discard) { tcomp = &tile->tcomps[compno]; rlvl = &tcomp->rlvls[rlvlno]; for (bandno = 0, band = rlvl->bands; bandno < rlvl->numbands; ++bandno, ++band) { if (!band->data) { continue; } prc = &band->prcs[prcno]; if (!prc->cblks) { continue; } for (cblkno = 0, cblk = prc->cblks; cblkno < prc->numcblks; ++cblkno, ++cblk) { seg = cblk->curseg; while (seg) { if (!seg->stream) { if (!(seg->stream = jas_stream_memopen(0, 0))) { return -1; } } #if 0 jas_eprintf("lyrno=%02d, compno=%02d, lvlno=%02d, prcno=%02d, bandno=%02d, cblkno=%02d, passno=%02d numpasses=%02d cnt=%d numbps=%d, numimsbs=%d\n", lyrno, compno, rlvlno, prcno, band - rlvl->bands, cblk - prc->cblks, seg->passno, seg->numpasses, seg->cnt, band->numbps, cblk->numimsbs); #endif if (seg->cnt > 0) { if (jpc_getdata(in, seg->stream, seg->cnt) < 0) { return -1; } seg->cnt = 0; } if (seg->numpasses >= seg->maxpasses) { cblk->curseg = seg->next; } seg = seg->next; } } } } else { if (jas_stream_gobble(in, bodylen) != JAS_CAST(int, bodylen)) { return -1; } } return 0; } /********************************************************************************************/ /********************************************************************************************/ int jpc_dec_decodepkts(jpc_dec_t *dec, jas_stream_t *pkthdrstream, jas_stream_t *in) { jpc_dec_tile_t *tile; jpc_pi_t *pi; int ret; tile = dec->curtile; pi = tile->pi; for (;;) { if (!tile->pkthdrstream || jas_stream_peekc(tile->pkthdrstream) == EOF) { switch (jpc_dec_lookahead(in)) { case JPC_MS_EOC: case JPC_MS_SOT: return 0; break; case JPC_MS_SOP: case JPC_MS_EPH: case 0: break; default: return -1; break; } } if ((ret = jpc_pi_next(pi))) { return ret; } if (dec->maxpkts >= 0 && dec->numpkts >= dec->maxpkts) { jas_eprintf("warning: stopping decode prematurely as requested\n"); return 0; } if (jas_getdbglevel() >= 1) { jas_eprintf("packet offset=%08ld prg=%d cmptno=%02d " "rlvlno=%02d prcno=%03d lyrno=%02d\n", (long) jas_stream_getrwcount(in), jpc_pi_prg(pi), jpc_pi_cmptno(pi), jpc_pi_rlvlno(pi), jpc_pi_prcno(pi), jpc_pi_lyrno(pi)); } if (jpc_dec_decodepkt(dec, pkthdrstream, in, jpc_pi_cmptno(pi), jpc_pi_rlvlno(pi), jpc_pi_prcno(pi), jpc_pi_lyrno(pi))) { return -1; } ++dec->numpkts; } return 0; } jpc_pi_t *jpc_dec_pi_create(jpc_dec_t *dec, jpc_dec_tile_t *tile) { jpc_pi_t *pi; int compno; jpc_picomp_t *picomp; jpc_pirlvl_t *pirlvl; jpc_dec_tcomp_t *tcomp; int rlvlno; jpc_dec_rlvl_t *rlvl; int prcno; int *prclyrno; jpc_dec_cmpt_t *cmpt; if (!(pi = jpc_pi_create0())) { return 0; } pi->numcomps = dec->numcomps; if (!(pi->picomps = jas_alloc2(pi->numcomps, sizeof(jpc_picomp_t)))) { jpc_pi_destroy(pi); return 0; } for (compno = 0, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++picomp) { picomp->pirlvls = 0; } for (compno = 0, tcomp = tile->tcomps, picomp = pi->picomps; compno < pi->numcomps; ++compno, ++tcomp, ++picomp) { picomp->numrlvls = tcomp->numrlvls; if (!(picomp->pirlvls = jas_alloc2(picomp->numrlvls, sizeof(jpc_pirlvl_t)))) { jpc_pi_destroy(pi); return 0; } for (rlvlno = 0, pirlvl = picomp->pirlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl) { pirlvl->prclyrnos = 0; } for (rlvlno = 0, pirlvl = picomp->pirlvls, rlvl = tcomp->rlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl, ++rlvl) { /* XXX sizeof(long) should be sizeof different type */ pirlvl->numprcs = rlvl->numprcs; if (!(pirlvl->prclyrnos = jas_alloc2(pirlvl->numprcs, sizeof(long)))) { jpc_pi_destroy(pi); return 0; } } } pi->maxrlvls = 0; for (compno = 0, tcomp = tile->tcomps, picomp = pi->picomps, cmpt = dec->cmpts; compno < pi->numcomps; ++compno, ++tcomp, ++picomp, ++cmpt) { picomp->hsamp = cmpt->hstep; picomp->vsamp = cmpt->vstep; for (rlvlno = 0, pirlvl = picomp->pirlvls, rlvl = tcomp->rlvls; rlvlno < picomp->numrlvls; ++rlvlno, ++pirlvl, ++rlvl) { pirlvl->prcwidthexpn = rlvl->prcwidthexpn; pirlvl->prcheightexpn = rlvl->prcheightexpn; for (prcno = 0, prclyrno = pirlvl->prclyrnos; prcno < pirlvl->numprcs; ++prcno, ++prclyrno) { *prclyrno = 0; } pirlvl->numhprcs = rlvl->numhprcs; } if (pi->maxrlvls < tcomp->numrlvls) { pi->maxrlvls = tcomp->numrlvls; } } pi->numlyrs = tile->cp->numlyrs; pi->xstart = tile->xstart; pi->ystart = tile->ystart; pi->xend = tile->xend; pi->yend = tile->yend; pi->picomp = 0; pi->pirlvl = 0; pi->x = 0; pi->y = 0; pi->compno = 0; pi->rlvlno = 0; pi->prcno = 0; pi->lyrno = 0; pi->xstep = 0; pi->ystep = 0; pi->pchgno = -1; pi->defaultpchg.prgord = tile->cp->prgord; pi->defaultpchg.compnostart = 0; pi->defaultpchg.compnoend = pi->numcomps; pi->defaultpchg.rlvlnostart = 0; pi->defaultpchg.rlvlnoend = pi->maxrlvls; pi->defaultpchg.lyrnoend = pi->numlyrs; pi->pchg = 0; pi->valid = 0; return pi; } long jpc_dec_lookahead(jas_stream_t *in) { uint_fast16_t x; if (jpc_getuint16(in, &x)) { return -1; } if (jas_stream_ungetc(in, x & 0xff) == EOF || jas_stream_ungetc(in, x >> 8) == EOF) { return -1; } if (x >= JPC_MS_INMIN && x <= JPC_MS_INMAX) { return x; } return 0; }
int jpc_dec_decodepkts(jpc_dec_t *dec, jas_stream_t *pkthdrstream, jas_stream_t *in) { jpc_dec_tile_t *tile; jpc_pi_t *pi; int ret; tile = dec->curtile; pi = tile->pi; for (;;) { if (!tile->pkthdrstream || jas_stream_peekc(tile->pkthdrstream) == EOF) { switch (jpc_dec_lookahead(in)) { case JPC_MS_EOC: case JPC_MS_SOT: return 0; break; case JPC_MS_SOP: case JPC_MS_EPH: case 0: break; default: return -1; break; } } if ((ret = jpc_pi_next(pi))) { return ret; } if (dec->maxpkts >= 0 && dec->numpkts >= dec->maxpkts) { jas_eprintf("warning: stopping decode prematurely as requested\n"); return 0; } if (jas_getdbglevel() >= 1) { jas_eprintf("packet offset=%08ld prg=%d cmptno=%02d " "rlvlno=%02d prcno=%03d lyrno=%02d\n", (long) jas_stream_getrwcount(in), jpc_pi_prg(pi), jpc_pi_cmptno(pi), jpc_pi_rlvlno(pi), jpc_pi_prcno(pi), jpc_pi_lyrno(pi)); } if (jpc_dec_decodepkt(dec, pkthdrstream, in, jpc_pi_cmptno(pi), jpc_pi_rlvlno(pi), jpc_pi_prcno(pi), jpc_pi_lyrno(pi))) { return -1; } ++dec->numpkts; } return 0; }
int jpc_dec_decodepkts(jpc_dec_t *dec, jas_stream_t *pkthdrstream, jas_stream_t *in) { jpc_dec_tile_t *tile; jpc_pi_t *pi; int ret; tile = dec->curtile; pi = tile->pi; for (;;) { if (!tile->pkthdrstream || jas_stream_peekc(tile->pkthdrstream) == EOF) { switch (jpc_dec_lookahead(in)) { case JPC_MS_EOC: case JPC_MS_SOT: return 0; break; case JPC_MS_SOP: case JPC_MS_EPH: case 0: break; default: return -1; break; } } if ((ret = jpc_pi_next(pi))) { return ret; } if (dec->maxpkts >= 0 && dec->numpkts >= dec->maxpkts) { jas_eprintf("warning: stopping decode prematurely as requested\n"); return 0; } if (jas_getdbglevel() >= 1) { jas_eprintf("packet offset=%08ld prg=%d cmptno=%02d " "rlvlno=%02d prcno=%03d lyrno=%02d\n", (long) jas_stream_getrwcount(in), jpc_pi_prg(pi), jpc_pi_cmptno(pi), jpc_pi_rlvlno(pi), jpc_pi_prcno(pi), jpc_pi_lyrno(pi)); } if (jpc_dec_decodepkt(dec, pkthdrstream, in, jpc_pi_cmptno(pi), jpc_pi_rlvlno(pi), jpc_pi_prcno(pi), jpc_pi_lyrno(pi))) { return -1; } ++dec->numpkts; } return 0; }
{'added': [(426, '\t\tif (!tile->pkthdrstream || jas_stream_peekc(tile->pkthdrstream) == EOF) {'), (427, '\t\t\tswitch (jpc_dec_lookahead(in)) {'), (428, '\t\t\tcase JPC_MS_EOC:'), (429, '\t\t\tcase JPC_MS_SOT:'), (430, '\t\t\t\treturn 0;'), (431, '\t\t\t\tbreak;'), (432, '\t\t\tcase JPC_MS_SOP:'), (433, '\t\t\tcase JPC_MS_EPH:'), (434, '\t\t\tcase 0:'), (435, '\t\t\t\tbreak;'), (436, '\t\t\tdefault:'), (437, '\t\t\t\treturn -1;'), (438, '\t\t\t\tbreak;'), (439, '\t\t\t}'), (444, '\t\tif (dec->maxpkts >= 0 && dec->numpkts >= dec->maxpkts) {'), (445, '\t\t\tjas_eprintf("warning: stopping decode prematurely as requested\\n");'), (446, '\t\t\treturn 0;'), (447, '\t\t}'), (454, '\t\tif (jpc_dec_decodepkt(dec, pkthdrstream, in, jpc_pi_cmptno(pi),'), (455, '\t\t jpc_pi_rlvlno(pi), jpc_pi_prcno(pi), jpc_pi_lyrno(pi))) {'), (458, '\t\t++dec->numpkts;')], 'deleted': [(426, 'if (!tile->pkthdrstream || jas_stream_peekc(tile->pkthdrstream) == EOF) {'), (427, '\t\tswitch (jpc_dec_lookahead(in)) {'), (428, '\t\tcase JPC_MS_EOC:'), (429, '\t\tcase JPC_MS_SOT:'), (430, '\t\t\treturn 0;'), (431, '\t\t\tbreak;'), (432, '\t\tcase JPC_MS_SOP:'), (433, '\t\tcase JPC_MS_EPH:'), (434, '\t\tcase 0:'), (435, '\t\t\tbreak;'), (436, '\t\tdefault:'), (437, '\t\t\treturn -1;'), (438, '\t\t\tbreak;'), (440, '}'), (444, 'if (dec->maxpkts >= 0 && dec->numpkts >= dec->maxpkts) {'), (445, '\tjas_eprintf("warning: stopping decode prematurely as requested\\n");'), (446, '\treturn 0;'), (447, '}'), (454, '\t\tif (jpc_dec_decodepkt(dec, pkthdrstream, in, jpc_pi_cmptno(pi), jpc_pi_rlvlno(pi),'), (455, '\t\t jpc_pi_prcno(pi), jpc_pi_lyrno(pi))) {'), (458, '++dec->numpkts;')]}
21
21
444
2,741
44
239
14
https://github.com/mdadams/jasper
CVE-2016-9583
CWE-125
1,042
IOBuf.cpp
C++
folly::IOBuf::reserveSlow
/* * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __STDC_LIMIT_MACROS #define __STDC_LIMIT_MACROS #endif #include <folly/io/IOBuf.h> #include <cassert> #include <cstdint> #include <cstdlib> #include <stdexcept> #include <folly/Conv.h> #include <folly/Likely.h> #include <folly/Memory.h> #include <folly/ScopeGuard.h> #include <folly/hash/SpookyHashV2.h> #include <folly/io/Cursor.h> #include <folly/lang/Align.h> #include <folly/lang/Exception.h> #include <folly/memory/Malloc.h> #include <folly/memory/SanitizeAddress.h> /* * Callbacks that will be invoked when IOBuf allocates or frees memory. * Note that io_buf_alloc_cb() will also be invoked when IOBuf takes ownership * of a malloc-allocated buffer, even if it was allocated earlier by another * part of the code. * * By default these are unimplemented, but programs can define these functions * to perform their own custom logic on memory allocation. This is intended * primarily to help programs track memory usage and possibly take action * when thresholds are hit. Callers should generally avoid performing any * expensive work in these callbacks, since they may be called from arbitrary * locations in the code that use IOBuf, possibly while holding locks. */ #if FOLLY_HAVE_WEAK_SYMBOLS FOLLY_ATTR_WEAK void io_buf_alloc_cb(void* /*ptr*/, size_t /*size*/) noexcept; FOLLY_ATTR_WEAK void io_buf_free_cb(void* /*ptr*/, size_t /*size*/) noexcept; #else static void (*io_buf_alloc_cb)(void* /*ptr*/, size_t /*size*/) noexcept = nullptr; static void (*io_buf_free_cb)(void* /*ptr*/, size_t /*size*/) noexcept = nullptr; #endif using std::unique_ptr; namespace { enum : uint16_t { kHeapMagic = 0xa5a5, // This memory segment contains an IOBuf that is still in use kIOBufInUse = 0x01, // This memory segment contains buffer data that is still in use kDataInUse = 0x02, // This memory segment contains a SharedInfo that is still in use kSharedInfoInUse = 0x04, }; enum : std::size_t { // When create() is called for buffers less than kDefaultCombinedBufSize, // we allocate a single combined memory segment for the IOBuf and the data // together. See the comments for createCombined()/createSeparate() for more // details. // // (The size of 1k is largely just a guess here. We could could probably do // benchmarks of real applications to see if adjusting this number makes a // difference. Callers that know their exact use case can also explicitly // call createCombined() or createSeparate().) kDefaultCombinedBufSize = 1024 }; // Helper function for IOBuf::takeOwnership() // The user's free function is not allowed to throw. // (We are already in the middle of throwing an exception, so // we cannot let this exception go unhandled.) void takeOwnershipError( bool freeOnError, void* buf, folly::IOBuf::FreeFunction freeFn, void* userData) noexcept { if (!freeOnError) { return; } if (!freeFn) { free(buf); return; } freeFn(buf, userData); } } // namespace namespace folly { // use free for size >= 4GB // since we can store only 32 bits in the size var struct IOBuf::HeapPrefix { HeapPrefix(uint16_t flg, size_t sz) : magic(kHeapMagic), flags(flg), size((sz == ((size_t)(uint32_t)sz)) ? static_cast<uint32_t>(sz) : 0) {} ~HeapPrefix() { // Reset magic to 0 on destruction. This is solely for debugging purposes // to help catch bugs where someone tries to use HeapStorage after it has // been deleted. magic = 0; } uint16_t magic; std::atomic<uint16_t> flags; uint32_t size; }; struct IOBuf::HeapStorage { HeapPrefix prefix; // The IOBuf is last in the HeapStorage object. // This way operator new will work even if allocating a subclass of IOBuf // that requires more space. folly::IOBuf buf; }; struct IOBuf::HeapFullStorage { // Make sure jemalloc allocates from the 64-byte class. Putting this here // because HeapStorage is private so it can't be at namespace level. static_assert(sizeof(HeapStorage) <= 64, "IOBuf may not grow over 56 bytes!"); HeapStorage hs; SharedInfo shared; folly::max_align_t align; }; IOBuf::SharedInfo::SharedInfo() : freeFn(nullptr), userData(nullptr), useHeapFullStorage(false) { // Use relaxed memory ordering here. Since we are creating a new SharedInfo, // no other threads should be referring to it yet. refcount.store(1, std::memory_order_relaxed); } IOBuf::SharedInfo::SharedInfo(FreeFunction fn, void* arg, bool hfs) : freeFn(fn), userData(arg), useHeapFullStorage(hfs) { // Use relaxed memory ordering here. Since we are creating a new SharedInfo, // no other threads should be referring to it yet. refcount.store(1, std::memory_order_relaxed); } void IOBuf::SharedInfo::invokeAndDeleteEachObserver( SharedInfoObserverEntryBase* observerListHead, ObserverCb cb) noexcept { if (observerListHead && cb) { // break the chain observerListHead->prev->next = nullptr; auto entry = observerListHead; while (entry) { auto tmp = entry->next; cb(*entry); delete entry; entry = tmp; } } } void IOBuf::SharedInfo::releaseStorage(SharedInfo* info) noexcept { if (info->useHeapFullStorage) { auto storageAddr = reinterpret_cast<uint8_t*>(info) - offsetof(HeapFullStorage, shared); auto storage = reinterpret_cast<HeapFullStorage*>(storageAddr); info->~SharedInfo(); IOBuf::releaseStorage(&storage->hs, kSharedInfoInUse); } } void* IOBuf::operator new(size_t size) { size_t fullSize = offsetof(HeapStorage, buf) + size; auto storage = static_cast<HeapStorage*>(checkedMalloc(fullSize)); new (&storage->prefix) HeapPrefix(kIOBufInUse, fullSize); if (io_buf_alloc_cb) { io_buf_alloc_cb(storage, fullSize); } return &(storage->buf); } void* IOBuf::operator new(size_t /* size */, void* ptr) { return ptr; } void IOBuf::operator delete(void* ptr) { auto storageAddr = static_cast<uint8_t*>(ptr) - offsetof(HeapStorage, buf); auto storage = reinterpret_cast<HeapStorage*>(storageAddr); releaseStorage(storage, kIOBufInUse); } void IOBuf::operator delete(void* /* ptr */, void* /* placement */) { // Provide matching operator for `IOBuf::new` to avoid MSVC compilation // warning (C4291) about memory leak when exception is thrown in the // constructor. } void IOBuf::releaseStorage(HeapStorage* storage, uint16_t freeFlags) noexcept { CHECK_EQ(storage->prefix.magic, static_cast<uint16_t>(kHeapMagic)); // Use relaxed memory order here. If we are unlucky and happen to get // out-of-date data the compare_exchange_weak() call below will catch // it and load new data with memory_order_acq_rel. auto flags = storage->prefix.flags.load(std::memory_order_acquire); DCHECK_EQ((flags & freeFlags), freeFlags); while (true) { auto newFlags = uint16_t(flags & ~freeFlags); if (newFlags == 0) { // save the size size_t size = storage->prefix.size; // The storage space is now unused. Free it. storage->prefix.HeapPrefix::~HeapPrefix(); if (FOLLY_LIKELY(size)) { if (io_buf_free_cb) { io_buf_free_cb(storage, size); } sizedFree(storage, size); } else { free(storage); } return; } // This storage segment still contains portions that are in use. // Just clear the flags specified in freeFlags for now. auto ret = storage->prefix.flags.compare_exchange_weak( flags, newFlags, std::memory_order_acq_rel); if (ret) { // We successfully updated the flags. return; } // We failed to update the flags. Some other thread probably updated them // and cleared some of the other bits. Continue around the loop to see if // we are the last user now, or if we need to try updating the flags again. } } void IOBuf::freeInternalBuf(void* /* buf */, void* userData) noexcept { auto storage = static_cast<HeapStorage*>(userData); releaseStorage(storage, kDataInUse); } IOBuf::IOBuf(CreateOp, std::size_t capacity) : next_(this), prev_(this), data_(nullptr), length_(0), flagsAndSharedInfo_(0) { SharedInfo* info; allocExtBuffer(capacity, &buf_, &info, &capacity_); setSharedInfo(info); data_ = buf_; } IOBuf::IOBuf( CopyBufferOp /* op */, const void* buf, std::size_t size, std::size_t headroom, std::size_t minTailroom) : IOBuf(CREATE, headroom + size + minTailroom) { advance(headroom); if (size > 0) { assert(buf != nullptr); memcpy(writableData(), buf, size); append(size); } } IOBuf::IOBuf( CopyBufferOp op, ByteRange br, std::size_t headroom, std::size_t minTailroom) : IOBuf(op, br.data(), br.size(), headroom, minTailroom) {} unique_ptr<IOBuf> IOBuf::create(std::size_t capacity) { // For smaller-sized buffers, allocate the IOBuf, SharedInfo, and the buffer // all with a single allocation. // // We don't do this for larger buffers since it can be wasteful if the user // needs to reallocate the buffer but keeps using the same IOBuf object. // In this case we can't free the data space until the IOBuf is also // destroyed. Callers can explicitly call createCombined() or // createSeparate() if they know their use case better, and know if they are // likely to reallocate the buffer later. if (capacity <= kDefaultCombinedBufSize) { return createCombined(capacity); } // if we have nallocx, we want to allocate the capacity and the overhead in // a single allocation only if we do not cross into the next allocation class // for some buffer sizes, this can use about 25% extra memory if (canNallocx()) { auto mallocSize = goodMallocSize(capacity); // round capacity to a multiple of 8 size_t minSize = ((capacity + 7) & ~7) + sizeof(SharedInfo); // if we do not have space for the overhead, allocate the mem separateley if (mallocSize < minSize) { auto* buf = checkedMalloc(mallocSize); return takeOwnership(SIZED_FREE, buf, mallocSize, 0, 0); } } return createSeparate(capacity); } unique_ptr<IOBuf> IOBuf::createCombined(std::size_t capacity) { // To save a memory allocation, allocate space for the IOBuf object, the // SharedInfo struct, and the data itself all with a single call to malloc(). size_t requiredStorage = offsetof(HeapFullStorage, align) + capacity; size_t mallocSize = goodMallocSize(requiredStorage); auto storage = static_cast<HeapFullStorage*>(checkedMalloc(mallocSize)); new (&storage->hs.prefix) HeapPrefix(kIOBufInUse | kDataInUse, mallocSize); new (&storage->shared) SharedInfo(freeInternalBuf, storage); if (io_buf_alloc_cb) { io_buf_alloc_cb(storage, mallocSize); } auto bufAddr = reinterpret_cast<uint8_t*>(&storage->align); uint8_t* storageEnd = reinterpret_cast<uint8_t*>(storage) + mallocSize; auto actualCapacity = size_t(storageEnd - bufAddr); unique_ptr<IOBuf> ret(new (&storage->hs.buf) IOBuf( InternalConstructor(), packFlagsAndSharedInfo(0, &storage->shared), bufAddr, actualCapacity, bufAddr, 0)); return ret; } unique_ptr<IOBuf> IOBuf::createSeparate(std::size_t capacity) { return std::make_unique<IOBuf>(CREATE, capacity); } unique_ptr<IOBuf> IOBuf::createChain( size_t totalCapacity, std::size_t maxBufCapacity) { unique_ptr<IOBuf> out = create(std::min(totalCapacity, size_t(maxBufCapacity))); size_t allocatedCapacity = out->capacity(); while (allocatedCapacity < totalCapacity) { unique_ptr<IOBuf> newBuf = create( std::min(totalCapacity - allocatedCapacity, size_t(maxBufCapacity))); allocatedCapacity += newBuf->capacity(); out->prependChain(std::move(newBuf)); } return out; } size_t IOBuf::goodSize(size_t minCapacity, CombinedOption combined) { if (combined == CombinedOption::DEFAULT) { combined = minCapacity <= kDefaultCombinedBufSize ? CombinedOption::COMBINED : CombinedOption::SEPARATE; } size_t overhead; if (combined == CombinedOption::COMBINED) { overhead = offsetof(HeapFullStorage, align); } else { // Pad minCapacity to a multiple of 8 minCapacity = (minCapacity + 7) & ~7; overhead = sizeof(SharedInfo); } size_t goodSize = folly::goodMallocSize(minCapacity + overhead); return goodSize - overhead; } IOBuf::IOBuf( TakeOwnershipOp, void* buf, std::size_t capacity, std::size_t offset, std::size_t length, FreeFunction freeFn, void* userData, bool freeOnError) : next_(this), prev_(this), data_(static_cast<uint8_t*>(buf) + offset), buf_(static_cast<uint8_t*>(buf)), length_(length), capacity_(capacity), flagsAndSharedInfo_( packFlagsAndSharedInfo(kFlagFreeSharedInfo, nullptr)) { // do not allow only user data without a freeFn // since we use that for folly::sizedFree DCHECK(!userData || (userData && freeFn)); auto rollback = makeGuard([&] { // takeOwnershipError(freeOnError, buf, freeFn, userData); }); setSharedInfo(new SharedInfo(freeFn, userData)); rollback.dismiss(); } IOBuf::IOBuf( TakeOwnershipOp, SizedFree, void* buf, std::size_t capacity, std::size_t offset, std::size_t length, bool freeOnError) : next_(this), prev_(this), data_(static_cast<uint8_t*>(buf) + offset), buf_(static_cast<uint8_t*>(buf)), length_(length), capacity_(capacity), flagsAndSharedInfo_( packFlagsAndSharedInfo(kFlagFreeSharedInfo, nullptr)) { auto rollback = makeGuard([&] { // takeOwnershipError(freeOnError, buf, nullptr, nullptr); }); setSharedInfo(new SharedInfo(nullptr, reinterpret_cast<void*>(capacity))); rollback.dismiss(); if (io_buf_alloc_cb && capacity) { io_buf_alloc_cb(buf, capacity); } } unique_ptr<IOBuf> IOBuf::takeOwnership( void* buf, std::size_t capacity, std::size_t offset, std::size_t length, FreeFunction freeFn, void* userData, bool freeOnError, TakeOwnershipOption option) { // do not allow only user data without a freeFn // since we use that for folly::sizedFree DCHECK( !userData || (userData && freeFn) || (userData && !freeFn && (option == TakeOwnershipOption::STORE_SIZE))); HeapFullStorage* storage = nullptr; auto rollback = makeGuard([&] { if (storage) { free(storage); } takeOwnershipError(freeOnError, buf, freeFn, userData); }); size_t requiredStorage = sizeof(HeapFullStorage); size_t mallocSize = goodMallocSize(requiredStorage); storage = static_cast<HeapFullStorage*>(checkedMalloc(mallocSize)); new (&storage->hs.prefix) HeapPrefix(kIOBufInUse | kSharedInfoInUse, mallocSize); new (&storage->shared) SharedInfo(freeFn, userData, true /*useHeapFullStorage*/); auto result = unique_ptr<IOBuf>(new (&storage->hs.buf) IOBuf( InternalConstructor(), packFlagsAndSharedInfo(0, &storage->shared), static_cast<uint8_t*>(buf), capacity, static_cast<uint8_t*>(buf) + offset, length)); rollback.dismiss(); if (io_buf_alloc_cb) { io_buf_alloc_cb(storage, mallocSize); if (userData && !freeFn && (option == TakeOwnershipOption::STORE_SIZE)) { // Even though we did not allocate the buffer, call io_buf_alloc_cb() // since we will call io_buf_free_cb() on destruction, and we want these // calls to be 1:1. io_buf_alloc_cb(buf, capacity); } } return result; } IOBuf::IOBuf(WrapBufferOp, const void* buf, std::size_t capacity) noexcept : IOBuf( InternalConstructor(), 0, // We cast away the const-ness of the buffer here. // This is okay since IOBuf users must use unshare() to create a copy // of this buffer before writing to the buffer. static_cast<uint8_t*>(const_cast<void*>(buf)), capacity, static_cast<uint8_t*>(const_cast<void*>(buf)), capacity) {} IOBuf::IOBuf(WrapBufferOp op, ByteRange br) noexcept : IOBuf(op, br.data(), br.size()) {} unique_ptr<IOBuf> IOBuf::wrapBuffer(const void* buf, std::size_t capacity) { return std::make_unique<IOBuf>(WRAP_BUFFER, buf, capacity); } IOBuf IOBuf::wrapBufferAsValue(const void* buf, std::size_t capacity) noexcept { return IOBuf(WrapBufferOp::WRAP_BUFFER, buf, capacity); } IOBuf::IOBuf() noexcept = default; IOBuf::IOBuf(IOBuf&& other) noexcept : data_(other.data_), buf_(other.buf_), length_(other.length_), capacity_(other.capacity_), flagsAndSharedInfo_(other.flagsAndSharedInfo_) { // Reset other so it is a clean state to be destroyed. other.data_ = nullptr; other.buf_ = nullptr; other.length_ = 0; other.capacity_ = 0; other.flagsAndSharedInfo_ = 0; // If other was part of the chain, assume ownership of the rest of its chain. // (It's only valid to perform move assignment on the head of a chain.) if (other.next_ != &other) { next_ = other.next_; next_->prev_ = this; other.next_ = &other; prev_ = other.prev_; prev_->next_ = this; other.prev_ = &other; } // Sanity check to make sure that other is in a valid state to be destroyed. DCHECK_EQ(other.prev_, &other); DCHECK_EQ(other.next_, &other); } IOBuf::IOBuf(const IOBuf& other) { *this = other.cloneAsValue(); } IOBuf::IOBuf( InternalConstructor, uintptr_t flagsAndSharedInfo, uint8_t* buf, std::size_t capacity, uint8_t* data, std::size_t length) noexcept : next_(this), prev_(this), data_(data), buf_(buf), length_(length), capacity_(capacity), flagsAndSharedInfo_(flagsAndSharedInfo) { assert(data >= buf); assert(data + length <= buf + capacity); CHECK(!folly::asan_region_is_poisoned(buf, capacity)); } IOBuf::~IOBuf() { // Destroying an IOBuf destroys the entire chain. // Users of IOBuf should only explicitly delete the head of any chain. // The other elements in the chain will be automatically destroyed. while (next_ != this) { // Since unlink() returns unique_ptr() and we don't store it, // it will automatically delete the unlinked element. (void)next_->unlink(); } decrementRefcount(); } IOBuf& IOBuf::operator=(IOBuf&& other) noexcept { if (this == &other) { return *this; } // If we are part of a chain, delete the rest of the chain. while (next_ != this) { // Since unlink() returns unique_ptr() and we don't store it, // it will automatically delete the unlinked element. (void)next_->unlink(); } // Decrement our refcount on the current buffer decrementRefcount(); // Take ownership of the other buffer's data data_ = other.data_; buf_ = other.buf_; length_ = other.length_; capacity_ = other.capacity_; flagsAndSharedInfo_ = other.flagsAndSharedInfo_; // Reset other so it is a clean state to be destroyed. other.data_ = nullptr; other.buf_ = nullptr; other.length_ = 0; other.capacity_ = 0; other.flagsAndSharedInfo_ = 0; // If other was part of the chain, assume ownership of the rest of its chain. // (It's only valid to perform move assignment on the head of a chain.) if (other.next_ != &other) { next_ = other.next_; next_->prev_ = this; other.next_ = &other; prev_ = other.prev_; prev_->next_ = this; other.prev_ = &other; } // Sanity check to make sure that other is in a valid state to be destroyed. DCHECK_EQ(other.prev_, &other); DCHECK_EQ(other.next_, &other); return *this; } IOBuf& IOBuf::operator=(const IOBuf& other) { if (this != &other) { *this = IOBuf(other); } return *this; } bool IOBuf::empty() const { const IOBuf* current = this; do { if (current->length() != 0) { return false; } current = current->next_; } while (current != this); return true; } size_t IOBuf::countChainElements() const { size_t numElements = 1; for (IOBuf* current = next_; current != this; current = current->next_) { ++numElements; } return numElements; } std::size_t IOBuf::computeChainDataLength() const { std::size_t fullLength = length_; for (IOBuf* current = next_; current != this; current = current->next_) { fullLength += current->length_; } return fullLength; } std::size_t IOBuf::computeChainCapacity() const { std::size_t fullCapacity = capacity_; for (IOBuf* current = next_; current != this; current = current->next_) { fullCapacity += current->capacity_; } return fullCapacity; } void IOBuf::prependChain(unique_ptr<IOBuf>&& iobuf) { // Take ownership of the specified IOBuf IOBuf* other = iobuf.release(); // Remember the pointer to the tail of the other chain IOBuf* otherTail = other->prev_; // Hook up prev_->next_ to point at the start of the other chain, // and other->prev_ to point at prev_ prev_->next_ = other; other->prev_ = prev_; // Hook up otherTail->next_ to point at us, // and prev_ to point back at otherTail, otherTail->next_ = this; prev_ = otherTail; } unique_ptr<IOBuf> IOBuf::clone() const { auto tmp = cloneOne(); for (IOBuf* current = next_; current != this; current = current->next_) { tmp->prependChain(current->cloneOne()); } return tmp; } unique_ptr<IOBuf> IOBuf::cloneOne() const { if (SharedInfo* info = sharedInfo()) { info->refcount.fetch_add(1, std::memory_order_acq_rel); } return std::unique_ptr<IOBuf>(new IOBuf( InternalConstructor(), flagsAndSharedInfo_, buf_, capacity_, data_, length_)); } unique_ptr<IOBuf> IOBuf::cloneCoalesced() const { return std::make_unique<IOBuf>(cloneCoalescedAsValue()); } unique_ptr<IOBuf> IOBuf::cloneCoalescedWithHeadroomTailroom( std::size_t newHeadroom, std::size_t newTailroom) const { return std::make_unique<IOBuf>( cloneCoalescedAsValueWithHeadroomTailroom(newHeadroom, newTailroom)); } IOBuf IOBuf::cloneAsValue() const { auto tmp = cloneOneAsValue(); for (IOBuf* current = next_; current != this; current = current->next_) { tmp.prependChain(current->cloneOne()); } return tmp; } IOBuf IOBuf::cloneOneAsValue() const { if (SharedInfo* info = sharedInfo()) { info->refcount.fetch_add(1, std::memory_order_acq_rel); } return IOBuf( InternalConstructor(), flagsAndSharedInfo_, buf_, capacity_, data_, length_); } IOBuf IOBuf::cloneCoalescedAsValue() const { const std::size_t newHeadroom = headroom(); const std::size_t newTailroom = prev()->tailroom(); return cloneCoalescedAsValueWithHeadroomTailroom(newHeadroom, newTailroom); } IOBuf IOBuf::cloneCoalescedAsValueWithHeadroomTailroom( std::size_t newHeadroom, std::size_t newTailroom) const { if (!isChained() && newHeadroom <= headroom() && newTailroom <= tailroom()) { return cloneOneAsValue(); } // Coalesce into newBuf const std::size_t newLength = computeChainDataLength(); const std::size_t newCapacity = newLength + newHeadroom + newTailroom; IOBuf newBuf{CREATE, newCapacity}; newBuf.advance(newHeadroom); auto current = this; do { if (current->length() > 0) { DCHECK_NOTNULL(current->data()); DCHECK_LE(current->length(), newBuf.tailroom()); memcpy(newBuf.writableTail(), current->data(), current->length()); newBuf.append(current->length()); } current = current->next(); } while (current != this); DCHECK_EQ(newLength, newBuf.length()); DCHECK_EQ(newHeadroom, newBuf.headroom()); DCHECK_LE(newTailroom, newBuf.tailroom()); return newBuf; } void IOBuf::unshareOneSlow() { // Allocate a new buffer for the data uint8_t* buf; SharedInfo* sharedInfo; std::size_t actualCapacity; allocExtBuffer(capacity_, &buf, &sharedInfo, &actualCapacity); // Copy the data // Maintain the same amount of headroom. Since we maintained the same // minimum capacity we also maintain at least the same amount of tailroom. std::size_t headlen = headroom(); if (length_ > 0) { assert(data_ != nullptr); memcpy(buf + headlen, data_, length_); } // Release our reference on the old buffer decrementRefcount(); // Make sure flags are all cleared. setFlagsAndSharedInfo(0, sharedInfo); // Update the buffer pointers to point to the new buffer data_ = buf + headlen; buf_ = buf; } void IOBuf::unshareChained() { // unshareChained() should only be called if we are part of a chain of // multiple IOBufs. The caller should have already verified this. assert(isChained()); IOBuf* current = this; while (true) { if (current->isSharedOne()) { // we have to unshare break; } current = current->next_; if (current == this) { // None of the IOBufs in the chain are shared, // so return without doing anything return; } } // We have to unshare. Let coalesceSlow() do the work. coalesceSlow(); } void IOBuf::markExternallyShared() { IOBuf* current = this; do { current->markExternallySharedOne(); current = current->next_; } while (current != this); } void IOBuf::makeManagedChained() { assert(isChained()); IOBuf* current = this; while (true) { current->makeManagedOne(); current = current->next_; if (current == this) { break; } } } void IOBuf::coalesceSlow() { // coalesceSlow() should only be called if we are part of a chain of multiple // IOBufs. The caller should have already verified this. DCHECK(isChained()); // Compute the length of the entire chain std::size_t newLength = 0; IOBuf* end = this; do { newLength += end->length_; end = end->next_; } while (end != this); coalesceAndReallocate(newLength, end); // We should be only element left in the chain now DCHECK(!isChained()); } void IOBuf::coalesceSlow(size_t maxLength) { // coalesceSlow() should only be called if we are part of a chain of multiple // IOBufs. The caller should have already verified this. DCHECK(isChained()); DCHECK_LT(length_, maxLength); // Compute the length of the entire chain std::size_t newLength = 0; IOBuf* end = this; while (true) { newLength += end->length_; end = end->next_; if (newLength >= maxLength) { break; } if (end == this) { throw_exception<std::overflow_error>( "attempted to coalesce more data than " "available"); } } coalesceAndReallocate(newLength, end); // We should have the requested length now DCHECK_GE(length_, maxLength); } void IOBuf::coalesceAndReallocate( size_t newHeadroom, size_t newLength, IOBuf* end, size_t newTailroom) { std::size_t newCapacity = newLength + newHeadroom + newTailroom; // Allocate space for the coalesced buffer. // We always convert to an external buffer, even if we happened to be an // internal buffer before. uint8_t* newBuf; SharedInfo* newInfo; std::size_t actualCapacity; allocExtBuffer(newCapacity, &newBuf, &newInfo, &actualCapacity); // Copy the data into the new buffer uint8_t* newData = newBuf + newHeadroom; uint8_t* p = newData; IOBuf* current = this; size_t remaining = newLength; do { if (current->length_ > 0) { assert(current->length_ <= remaining); assert(current->data_ != nullptr); remaining -= current->length_; memcpy(p, current->data_, current->length_); p += current->length_; } current = current->next_; } while (current != end); assert(remaining == 0); // Point at the new buffer decrementRefcount(); // Make sure flags are all cleared. setFlagsAndSharedInfo(0, newInfo); capacity_ = actualCapacity; buf_ = newBuf; data_ = newData; length_ = newLength; // Separate from the rest of our chain. // Since we don't store the unique_ptr returned by separateChain(), // this will immediately delete the returned subchain. if (isChained()) { (void)separateChain(next_, current->prev_); } } void IOBuf::decrementRefcount() noexcept { // Externally owned buffers don't have a SharedInfo object and aren't managed // by the reference count SharedInfo* info = sharedInfo(); if (!info) { return; } // Avoid doing atomic decrement if the refcount is 1. // This is safe, because it means that we're the last reference and destroying // the object. Anything trying to copy it is already undefined behavior. if (info->refcount.load(std::memory_order_acquire) > 1) { // Decrement the refcount uint32_t newcnt = info->refcount.fetch_sub(1, std::memory_order_acq_rel); // Note that fetch_sub() returns the value before we decremented. // If it is 1, we were the only remaining user; if it is greater there are // still other users. if (newcnt > 1) { return; } } // save the useHeapFullStorage flag here since // freeExtBuffer can delete the sharedInfo() bool useHeapFullStorage = info->useHeapFullStorage; // We were the last user. Free the buffer freeExtBuffer(); // Free the SharedInfo if it was allocated separately. // // This is only used by takeOwnership(). // // To avoid this special case handling in decrementRefcount(), we could have // takeOwnership() set a custom freeFn() that calls the user's free function // then frees the SharedInfo object. (This would require that // takeOwnership() store the user's free function with its allocated // SharedInfo object.) However, handling this specially with a flag seems // like it shouldn't be problematic. if (flags() & kFlagFreeSharedInfo) { delete info; } else { if (useHeapFullStorage) { SharedInfo::releaseStorage(info); } } } void IOBuf::reserveSlow(std::size_t minHeadroom, std::size_t minTailroom) { size_t newCapacity = (size_t)length_ + minHeadroom + minTailroom; DCHECK_LT(newCapacity, UINT32_MAX); // reserveSlow() is dangerous if anyone else is sharing the buffer, as we may // reallocate and free the original buffer. It should only ever be called if // we are the only user of the buffer. DCHECK(!isSharedOne()); // We'll need to reallocate the buffer. // There are a few options. // - If we have enough total room, move the data around in the buffer // and adjust the data_ pointer. // - If we're using an internal buffer, we'll switch to an external // buffer with enough headroom and tailroom. // - If we have enough headroom (headroom() >= minHeadroom) but not too much // (so we don't waste memory), we can try one of two things, depending on // whether we use jemalloc or not: // - If using jemalloc, we can try to expand in place, avoiding a memcpy() // - If not using jemalloc and we don't have too much to copy, // we'll use realloc() (note that realloc might have to copy // headroom + data + tailroom, see smartRealloc in folly/memory/Malloc.h) // - Otherwise, bite the bullet and reallocate. if (headroom() + tailroom() >= minHeadroom + minTailroom) { uint8_t* newData = writableBuffer() + minHeadroom; memmove(newData, data_, length_); data_ = newData; return; } size_t newAllocatedCapacity = 0; uint8_t* newBuffer = nullptr; std::size_t newHeadroom = 0; std::size_t oldHeadroom = headroom(); // If we have a buffer allocated with malloc and we just need more tailroom, // try to use realloc()/xallocx() to grow the buffer in place. SharedInfo* info = sharedInfo(); bool useHeapFullStorage = info && info->useHeapFullStorage; if (info && (info->freeFn == nullptr) && length_ != 0 && oldHeadroom >= minHeadroom) { size_t headSlack = oldHeadroom - minHeadroom; newAllocatedCapacity = goodExtBufferSize(newCapacity + headSlack); if (usingJEMalloc()) { // We assume that tailroom is more useful and more important than // headroom (not least because realloc / xallocx allow us to grow the // buffer at the tail, but not at the head) So, if we have more headroom // than we need, we consider that "wasted". We arbitrarily define "too // much" headroom to be 25% of the capacity. if (headSlack * 4 <= newCapacity) { size_t allocatedCapacity = capacity() + sizeof(SharedInfo); void* p = buf_; if (allocatedCapacity >= jemallocMinInPlaceExpandable) { if (xallocx(p, newAllocatedCapacity, 0, 0) == newAllocatedCapacity) { if (io_buf_free_cb) { io_buf_free_cb(p, reinterpret_cast<size_t>(info->userData)); } newBuffer = static_cast<uint8_t*>(p); newHeadroom = oldHeadroom; // update the userData info->userData = reinterpret_cast<void*>(newAllocatedCapacity); if (io_buf_alloc_cb) { io_buf_alloc_cb(newBuffer, newAllocatedCapacity); } } // if xallocx failed, do nothing, fall back to malloc/memcpy/free } } } else { // Not using jemalloc size_t copySlack = capacity() - length_; if (copySlack * 2 <= length_) { void* p = realloc(buf_, newAllocatedCapacity); if (UNLIKELY(p == nullptr)) { throw_exception<std::bad_alloc>(); } newBuffer = static_cast<uint8_t*>(p); newHeadroom = oldHeadroom; } } } // None of the previous reallocation strategies worked (or we're using // an internal buffer). malloc/copy/free. if (newBuffer == nullptr) { newAllocatedCapacity = goodExtBufferSize(newCapacity); newBuffer = static_cast<uint8_t*>(checkedMalloc(newAllocatedCapacity)); if (length_ > 0) { assert(data_ != nullptr); memcpy(newBuffer + minHeadroom, data_, length_); } if (sharedInfo()) { freeExtBuffer(); } newHeadroom = minHeadroom; } std::size_t cap; initExtBuffer(newBuffer, newAllocatedCapacity, &info, &cap); if (flags() & kFlagFreeSharedInfo) { delete sharedInfo(); } else { if (useHeapFullStorage) { SharedInfo::releaseStorage(sharedInfo()); } } setFlagsAndSharedInfo(0, info); capacity_ = cap; buf_ = newBuffer; data_ = newBuffer + newHeadroom; // length_ is unchanged } // The user's free function should never throw. Otherwise we might throw from // the IOBuf destructor. Other code paths like coalesce() also assume that // decrementRefcount() cannot throw. void IOBuf::freeExtBuffer() noexcept { SharedInfo* info = sharedInfo(); DCHECK(info); // save the observerListHead // since the SharedInfo can be freed auto observerListHead = info->observerListHead; info->observerListHead = nullptr; if (info->freeFn) { info->freeFn(buf_, info->userData); } else { // this will invoke free if info->userData is 0 size_t size = reinterpret_cast<size_t>(info->userData); if (size) { if (io_buf_free_cb) { io_buf_free_cb(buf_, size); } folly::sizedFree(buf_, size); } else { free(buf_); } } SharedInfo::invokeAndDeleteEachObserver( observerListHead, [](auto& entry) { entry.afterFreeExtBuffer(); }); if (kIsMobile) { buf_ = nullptr; } } void IOBuf::allocExtBuffer( std::size_t minCapacity, uint8_t** bufReturn, SharedInfo** infoReturn, std::size_t* capacityReturn) { size_t mallocSize = goodExtBufferSize(minCapacity); auto buf = static_cast<uint8_t*>(checkedMalloc(mallocSize)); initExtBuffer(buf, mallocSize, infoReturn, capacityReturn); // the userData and the freeFn are nullptr here // just store the mallocSize in userData (*infoReturn)->userData = reinterpret_cast<void*>(mallocSize); if (io_buf_alloc_cb) { io_buf_alloc_cb(buf, mallocSize); } *bufReturn = buf; } size_t IOBuf::goodExtBufferSize(std::size_t minCapacity) { // Determine how much space we should allocate. We'll store the SharedInfo // for the external buffer just after the buffer itself. (We store it just // after the buffer rather than just before so that the code can still just // use free(buf_) to free the buffer.) size_t minSize = static_cast<size_t>(minCapacity) + sizeof(SharedInfo); // Add room for padding so that the SharedInfo will be aligned on an 8-byte // boundary. minSize = (minSize + 7) & ~7; // Use goodMallocSize() to bump up the capacity to a decent size to request // from malloc, so we can use all of the space that malloc will probably give // us anyway. return goodMallocSize(minSize); } void IOBuf::initExtBuffer( uint8_t* buf, size_t mallocSize, SharedInfo** infoReturn, std::size_t* capacityReturn) { // Find the SharedInfo storage at the end of the buffer // and construct the SharedInfo. uint8_t* infoStart = (buf + mallocSize) - sizeof(SharedInfo); auto sharedInfo = new (infoStart) SharedInfo; *capacityReturn = std::size_t(infoStart - buf); *infoReturn = sharedInfo; } fbstring IOBuf::moveToFbString() { // we need to save useHeapFullStorage and the observerListHead since // sharedInfo() may not be valid after fbstring str bool useHeapFullStorage = false; SharedInfoObserverEntryBase* observerListHead = nullptr; // malloc-allocated buffers are just fine, everything else needs // to be turned into one. if (!sharedInfo() || // user owned, not ours to give up sharedInfo()->freeFn || // not malloc()-ed headroom() != 0 || // malloc()-ed block doesn't start at beginning tailroom() == 0 || // no room for NUL terminator isShared() || // shared isChained()) { // chained // We might as well get rid of all head and tailroom if we're going // to reallocate; we need 1 byte for NUL terminator. coalesceAndReallocate(0, computeChainDataLength(), this, 1); } else { auto info = sharedInfo(); if (info) { // if we do not call coalesceAndReallocate // we might need to call SharedInfo::releaseStorage() // and/or SharedInfo::invokeAndDeleteEachObserver() useHeapFullStorage = info->useHeapFullStorage; // save the observerListHead // the coalesceAndReallocate path will call // decrementRefcount and freeExtBuffer if needed // so the observer lis notification is needed here observerListHead = info->observerListHead; info->observerListHead = nullptr; } } // Ensure NUL terminated *writableTail() = 0; fbstring str( reinterpret_cast<char*>(writableData()), length(), capacity(), AcquireMallocatedString()); if (io_buf_free_cb && sharedInfo() && sharedInfo()->userData) { io_buf_free_cb( writableData(), reinterpret_cast<size_t>(sharedInfo()->userData)); } SharedInfo::invokeAndDeleteEachObserver( observerListHead, [](auto& entry) { entry.afterReleaseExtBuffer(); }); if (flags() & kFlagFreeSharedInfo) { delete sharedInfo(); } else { if (useHeapFullStorage) { SharedInfo::releaseStorage(sharedInfo()); } } // Reset to a state where we can be deleted cleanly flagsAndSharedInfo_ = 0; buf_ = nullptr; clear(); return str; } IOBuf::Iterator IOBuf::cbegin() const { return Iterator(this, this); } IOBuf::Iterator IOBuf::cend() const { return Iterator(nullptr, nullptr); } folly::fbvector<struct iovec> IOBuf::getIov() const { folly::fbvector<struct iovec> iov; iov.reserve(countChainElements()); appendToIov(&iov); return iov; } void IOBuf::appendToIov(folly::fbvector<struct iovec>* iov) const { IOBuf const* p = this; do { // some code can get confused by empty iovs, so skip them if (p->length() > 0) { iov->push_back({(void*)p->data(), folly::to<size_t>(p->length())}); } p = p->next(); } while (p != this); } unique_ptr<IOBuf> IOBuf::wrapIov(const iovec* vec, size_t count) { unique_ptr<IOBuf> result = nullptr; for (size_t i = 0; i < count; ++i) { size_t len = vec[i].iov_len; void* data = vec[i].iov_base; if (len > 0) { auto buf = wrapBuffer(data, len); if (!result) { result = std::move(buf); } else { result->prependChain(std::move(buf)); } } } if (UNLIKELY(result == nullptr)) { return create(0); } return result; } std::unique_ptr<IOBuf> IOBuf::takeOwnershipIov( const iovec* vec, size_t count, FreeFunction freeFn, void* userData, bool freeOnError) { unique_ptr<IOBuf> result = nullptr; for (size_t i = 0; i < count; ++i) { size_t len = vec[i].iov_len; void* data = vec[i].iov_base; if (len > 0) { auto buf = takeOwnership(data, len, freeFn, userData, freeOnError); if (!result) { result = std::move(buf); } else { result->prependChain(std::move(buf)); } } } if (UNLIKELY(result == nullptr)) { return create(0); } return result; } IOBuf::FillIovResult IOBuf::fillIov(struct iovec* iov, size_t len) const { IOBuf const* p = this; size_t i = 0; size_t totalBytes = 0; while (i < len) { // some code can get confused by empty iovs, so skip them if (p->length() > 0) { iov[i].iov_base = const_cast<uint8_t*>(p->data()); iov[i].iov_len = p->length(); totalBytes += p->length(); i++; } p = p->next(); if (p == this) { return {i, totalBytes}; } } return {0, 0}; } uint32_t IOBuf::approximateShareCountOne() const { if (UNLIKELY(!sharedInfo())) { return 1U; } return sharedInfo()->refcount.load(std::memory_order_acquire); } size_t IOBufHash::operator()(const IOBuf& buf) const noexcept { folly::hash::SpookyHashV2 hasher; hasher.Init(0, 0); io::Cursor cursor(&buf); for (;;) { auto b = cursor.peekBytes(); if (b.empty()) { break; } hasher.Update(b.data(), b.size()); cursor.skip(b.size()); } uint64_t h1; uint64_t h2; hasher.Final(&h1, &h2); return static_cast<std::size_t>(h1); } ordering IOBufCompare::impl(const IOBuf& a, const IOBuf& b) const noexcept { io::Cursor ca(&a); io::Cursor cb(&b); for (;;) { auto ba = ca.peekBytes(); auto bb = cb.peekBytes(); if (ba.empty() || bb.empty()) { return to_ordering(int(bb.empty()) - int(ba.empty())); } const size_t n = std::min(ba.size(), bb.size()); DCHECK_GT(n, 0u); const ordering r = to_ordering(std::memcmp(ba.data(), bb.data(), n)); if (r != ordering::eq) { return r; } // Cursor::skip() may throw if n is too large, but n is not too large here ca.skip(n); cb.skip(n); } } } // namespace folly
/* * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __STDC_LIMIT_MACROS #define __STDC_LIMIT_MACROS #endif #include <folly/io/IOBuf.h> #include <cassert> #include <cstdint> #include <cstdlib> #include <limits> #include <stdexcept> #include <folly/Conv.h> #include <folly/Likely.h> #include <folly/Memory.h> #include <folly/ScopeGuard.h> #include <folly/hash/SpookyHashV2.h> #include <folly/io/Cursor.h> #include <folly/lang/Align.h> #include <folly/lang/CheckedMath.h> #include <folly/lang/Exception.h> #include <folly/memory/Malloc.h> #include <folly/memory/SanitizeAddress.h> /* * Callbacks that will be invoked when IOBuf allocates or frees memory. * Note that io_buf_alloc_cb() will also be invoked when IOBuf takes ownership * of a malloc-allocated buffer, even if it was allocated earlier by another * part of the code. * * By default these are unimplemented, but programs can define these functions * to perform their own custom logic on memory allocation. This is intended * primarily to help programs track memory usage and possibly take action * when thresholds are hit. Callers should generally avoid performing any * expensive work in these callbacks, since they may be called from arbitrary * locations in the code that use IOBuf, possibly while holding locks. */ #if FOLLY_HAVE_WEAK_SYMBOLS FOLLY_ATTR_WEAK void io_buf_alloc_cb(void* /*ptr*/, size_t /*size*/) noexcept; FOLLY_ATTR_WEAK void io_buf_free_cb(void* /*ptr*/, size_t /*size*/) noexcept; #else static void (*io_buf_alloc_cb)(void* /*ptr*/, size_t /*size*/) noexcept = nullptr; static void (*io_buf_free_cb)(void* /*ptr*/, size_t /*size*/) noexcept = nullptr; #endif using std::unique_ptr; namespace { enum : uint16_t { kHeapMagic = 0xa5a5, // This memory segment contains an IOBuf that is still in use kIOBufInUse = 0x01, // This memory segment contains buffer data that is still in use kDataInUse = 0x02, // This memory segment contains a SharedInfo that is still in use kSharedInfoInUse = 0x04, }; enum : std::size_t { // When create() is called for buffers less than kDefaultCombinedBufSize, // we allocate a single combined memory segment for the IOBuf and the data // together. See the comments for createCombined()/createSeparate() for more // details. // // (The size of 1k is largely just a guess here. We could could probably do // benchmarks of real applications to see if adjusting this number makes a // difference. Callers that know their exact use case can also explicitly // call createCombined() or createSeparate().) kDefaultCombinedBufSize = 1024, kMaxIOBufSize = std::numeric_limits<size_t>::max() >> 1, }; // Helper function for IOBuf::takeOwnership() // The user's free function is not allowed to throw. // (We are already in the middle of throwing an exception, so // we cannot let this exception go unhandled.) void takeOwnershipError( bool freeOnError, void* buf, folly::IOBuf::FreeFunction freeFn, void* userData) noexcept { if (!freeOnError) { return; } if (!freeFn) { free(buf); return; } freeFn(buf, userData); } } // namespace namespace folly { // use free for size >= 4GB // since we can store only 32 bits in the size var struct IOBuf::HeapPrefix { HeapPrefix(uint16_t flg, size_t sz) : magic(kHeapMagic), flags(flg), size((sz == ((size_t)(uint32_t)sz)) ? static_cast<uint32_t>(sz) : 0) {} ~HeapPrefix() { // Reset magic to 0 on destruction. This is solely for debugging purposes // to help catch bugs where someone tries to use HeapStorage after it has // been deleted. magic = 0; } uint16_t magic; std::atomic<uint16_t> flags; uint32_t size; }; struct IOBuf::HeapStorage { HeapPrefix prefix; // The IOBuf is last in the HeapStorage object. // This way operator new will work even if allocating a subclass of IOBuf // that requires more space. folly::IOBuf buf; }; struct IOBuf::HeapFullStorage { // Make sure jemalloc allocates from the 64-byte class. Putting this here // because HeapStorage is private so it can't be at namespace level. static_assert(sizeof(HeapStorage) <= 64, "IOBuf may not grow over 56 bytes!"); HeapStorage hs; SharedInfo shared; folly::max_align_t align; }; IOBuf::SharedInfo::SharedInfo() : freeFn(nullptr), userData(nullptr), useHeapFullStorage(false) { // Use relaxed memory ordering here. Since we are creating a new SharedInfo, // no other threads should be referring to it yet. refcount.store(1, std::memory_order_relaxed); } IOBuf::SharedInfo::SharedInfo(FreeFunction fn, void* arg, bool hfs) : freeFn(fn), userData(arg), useHeapFullStorage(hfs) { // Use relaxed memory ordering here. Since we are creating a new SharedInfo, // no other threads should be referring to it yet. refcount.store(1, std::memory_order_relaxed); } void IOBuf::SharedInfo::invokeAndDeleteEachObserver( SharedInfoObserverEntryBase* observerListHead, ObserverCb cb) noexcept { if (observerListHead && cb) { // break the chain observerListHead->prev->next = nullptr; auto entry = observerListHead; while (entry) { auto tmp = entry->next; cb(*entry); delete entry; entry = tmp; } } } void IOBuf::SharedInfo::releaseStorage(SharedInfo* info) noexcept { if (info->useHeapFullStorage) { auto storageAddr = reinterpret_cast<uint8_t*>(info) - offsetof(HeapFullStorage, shared); auto storage = reinterpret_cast<HeapFullStorage*>(storageAddr); info->~SharedInfo(); IOBuf::releaseStorage(&storage->hs, kSharedInfoInUse); } } void* IOBuf::operator new(size_t size) { if (size > kMaxIOBufSize) { throw_exception<std::bad_alloc>(); } size_t fullSize = offsetof(HeapStorage, buf) + size; auto storage = static_cast<HeapStorage*>(checkedMalloc(fullSize)); new (&storage->prefix) HeapPrefix(kIOBufInUse, fullSize); if (io_buf_alloc_cb) { io_buf_alloc_cb(storage, fullSize); } return &(storage->buf); } void* IOBuf::operator new(size_t /* size */, void* ptr) { return ptr; } void IOBuf::operator delete(void* ptr) { auto storageAddr = static_cast<uint8_t*>(ptr) - offsetof(HeapStorage, buf); auto storage = reinterpret_cast<HeapStorage*>(storageAddr); releaseStorage(storage, kIOBufInUse); } void IOBuf::operator delete(void* /* ptr */, void* /* placement */) { // Provide matching operator for `IOBuf::new` to avoid MSVC compilation // warning (C4291) about memory leak when exception is thrown in the // constructor. } void IOBuf::releaseStorage(HeapStorage* storage, uint16_t freeFlags) noexcept { CHECK_EQ(storage->prefix.magic, static_cast<uint16_t>(kHeapMagic)); // Use relaxed memory order here. If we are unlucky and happen to get // out-of-date data the compare_exchange_weak() call below will catch // it and load new data with memory_order_acq_rel. auto flags = storage->prefix.flags.load(std::memory_order_acquire); DCHECK_EQ((flags & freeFlags), freeFlags); while (true) { auto newFlags = uint16_t(flags & ~freeFlags); if (newFlags == 0) { // save the size size_t size = storage->prefix.size; // The storage space is now unused. Free it. storage->prefix.HeapPrefix::~HeapPrefix(); if (FOLLY_LIKELY(size)) { if (io_buf_free_cb) { io_buf_free_cb(storage, size); } sizedFree(storage, size); } else { free(storage); } return; } // This storage segment still contains portions that are in use. // Just clear the flags specified in freeFlags for now. auto ret = storage->prefix.flags.compare_exchange_weak( flags, newFlags, std::memory_order_acq_rel); if (ret) { // We successfully updated the flags. return; } // We failed to update the flags. Some other thread probably updated them // and cleared some of the other bits. Continue around the loop to see if // we are the last user now, or if we need to try updating the flags again. } } void IOBuf::freeInternalBuf(void* /* buf */, void* userData) noexcept { auto storage = static_cast<HeapStorage*>(userData); releaseStorage(storage, kDataInUse); } IOBuf::IOBuf(CreateOp, std::size_t capacity) : next_(this), prev_(this), data_(nullptr), length_(0), flagsAndSharedInfo_(0) { SharedInfo* info; allocExtBuffer(capacity, &buf_, &info, &capacity_); setSharedInfo(info); data_ = buf_; } IOBuf::IOBuf( CopyBufferOp /* op */, const void* buf, std::size_t size, std::size_t headroom, std::size_t minTailroom) : IOBuf(CREATE, headroom + size + minTailroom) { advance(headroom); if (size > 0) { assert(buf != nullptr); memcpy(writableData(), buf, size); append(size); } } IOBuf::IOBuf( CopyBufferOp op, ByteRange br, std::size_t headroom, std::size_t minTailroom) : IOBuf(op, br.data(), br.size(), headroom, minTailroom) {} unique_ptr<IOBuf> IOBuf::create(std::size_t capacity) { if (capacity > kMaxIOBufSize) { throw_exception<std::bad_alloc>(); } // For smaller-sized buffers, allocate the IOBuf, SharedInfo, and the buffer // all with a single allocation. // // We don't do this for larger buffers since it can be wasteful if the user // needs to reallocate the buffer but keeps using the same IOBuf object. // In this case we can't free the data space until the IOBuf is also // destroyed. Callers can explicitly call createCombined() or // createSeparate() if they know their use case better, and know if they are // likely to reallocate the buffer later. if (capacity <= kDefaultCombinedBufSize) { return createCombined(capacity); } // if we have nallocx, we want to allocate the capacity and the overhead in // a single allocation only if we do not cross into the next allocation class // for some buffer sizes, this can use about 25% extra memory if (canNallocx()) { auto mallocSize = goodMallocSize(capacity); // round capacity to a multiple of 8 size_t minSize = ((capacity + 7) & ~7) + sizeof(SharedInfo); // if we do not have space for the overhead, allocate the mem separateley if (mallocSize < minSize) { auto* buf = checkedMalloc(mallocSize); return takeOwnership(SIZED_FREE, buf, mallocSize, 0, 0); } } return createSeparate(capacity); } unique_ptr<IOBuf> IOBuf::createCombined(std::size_t capacity) { if (capacity > kMaxIOBufSize) { throw_exception<std::bad_alloc>(); } // To save a memory allocation, allocate space for the IOBuf object, the // SharedInfo struct, and the data itself all with a single call to malloc(). size_t requiredStorage = offsetof(HeapFullStorage, align) + capacity; size_t mallocSize = goodMallocSize(requiredStorage); auto storage = static_cast<HeapFullStorage*>(checkedMalloc(mallocSize)); new (&storage->hs.prefix) HeapPrefix(kIOBufInUse | kDataInUse, mallocSize); new (&storage->shared) SharedInfo(freeInternalBuf, storage); if (io_buf_alloc_cb) { io_buf_alloc_cb(storage, mallocSize); } auto bufAddr = reinterpret_cast<uint8_t*>(&storage->align); uint8_t* storageEnd = reinterpret_cast<uint8_t*>(storage) + mallocSize; auto actualCapacity = size_t(storageEnd - bufAddr); unique_ptr<IOBuf> ret(new (&storage->hs.buf) IOBuf( InternalConstructor(), packFlagsAndSharedInfo(0, &storage->shared), bufAddr, actualCapacity, bufAddr, 0)); return ret; } unique_ptr<IOBuf> IOBuf::createSeparate(std::size_t capacity) { return std::make_unique<IOBuf>(CREATE, capacity); } unique_ptr<IOBuf> IOBuf::createChain( size_t totalCapacity, std::size_t maxBufCapacity) { unique_ptr<IOBuf> out = create(std::min(totalCapacity, size_t(maxBufCapacity))); size_t allocatedCapacity = out->capacity(); while (allocatedCapacity < totalCapacity) { unique_ptr<IOBuf> newBuf = create( std::min(totalCapacity - allocatedCapacity, size_t(maxBufCapacity))); allocatedCapacity += newBuf->capacity(); out->prependChain(std::move(newBuf)); } return out; } size_t IOBuf::goodSize(size_t minCapacity, CombinedOption combined) { if (combined == CombinedOption::DEFAULT) { combined = minCapacity <= kDefaultCombinedBufSize ? CombinedOption::COMBINED : CombinedOption::SEPARATE; } size_t overhead; if (combined == CombinedOption::COMBINED) { overhead = offsetof(HeapFullStorage, align); } else { // Pad minCapacity to a multiple of 8 minCapacity = (minCapacity + 7) & ~7; overhead = sizeof(SharedInfo); } size_t goodSize = folly::goodMallocSize(minCapacity + overhead); return goodSize - overhead; } IOBuf::IOBuf( TakeOwnershipOp, void* buf, std::size_t capacity, std::size_t offset, std::size_t length, FreeFunction freeFn, void* userData, bool freeOnError) : next_(this), prev_(this), data_(static_cast<uint8_t*>(buf) + offset), buf_(static_cast<uint8_t*>(buf)), length_(length), capacity_(capacity), flagsAndSharedInfo_( packFlagsAndSharedInfo(kFlagFreeSharedInfo, nullptr)) { // do not allow only user data without a freeFn // since we use that for folly::sizedFree DCHECK(!userData || (userData && freeFn)); auto rollback = makeGuard([&] { // takeOwnershipError(freeOnError, buf, freeFn, userData); }); setSharedInfo(new SharedInfo(freeFn, userData)); rollback.dismiss(); } IOBuf::IOBuf( TakeOwnershipOp, SizedFree, void* buf, std::size_t capacity, std::size_t offset, std::size_t length, bool freeOnError) : next_(this), prev_(this), data_(static_cast<uint8_t*>(buf) + offset), buf_(static_cast<uint8_t*>(buf)), length_(length), capacity_(capacity), flagsAndSharedInfo_( packFlagsAndSharedInfo(kFlagFreeSharedInfo, nullptr)) { auto rollback = makeGuard([&] { // takeOwnershipError(freeOnError, buf, nullptr, nullptr); }); setSharedInfo(new SharedInfo(nullptr, reinterpret_cast<void*>(capacity))); rollback.dismiss(); if (io_buf_alloc_cb && capacity) { io_buf_alloc_cb(buf, capacity); } } unique_ptr<IOBuf> IOBuf::takeOwnership( void* buf, std::size_t capacity, std::size_t offset, std::size_t length, FreeFunction freeFn, void* userData, bool freeOnError, TakeOwnershipOption option) { if (capacity > kMaxIOBufSize) { throw_exception<std::bad_alloc>(); } // do not allow only user data without a freeFn // since we use that for folly::sizedFree DCHECK( !userData || (userData && freeFn) || (userData && !freeFn && (option == TakeOwnershipOption::STORE_SIZE))); HeapFullStorage* storage = nullptr; auto rollback = makeGuard([&] { if (storage) { free(storage); } takeOwnershipError(freeOnError, buf, freeFn, userData); }); size_t requiredStorage = sizeof(HeapFullStorage); size_t mallocSize = goodMallocSize(requiredStorage); storage = static_cast<HeapFullStorage*>(checkedMalloc(mallocSize)); new (&storage->hs.prefix) HeapPrefix(kIOBufInUse | kSharedInfoInUse, mallocSize); new (&storage->shared) SharedInfo(freeFn, userData, true /*useHeapFullStorage*/); auto result = unique_ptr<IOBuf>(new (&storage->hs.buf) IOBuf( InternalConstructor(), packFlagsAndSharedInfo(0, &storage->shared), static_cast<uint8_t*>(buf), capacity, static_cast<uint8_t*>(buf) + offset, length)); rollback.dismiss(); if (io_buf_alloc_cb) { io_buf_alloc_cb(storage, mallocSize); if (userData && !freeFn && (option == TakeOwnershipOption::STORE_SIZE)) { // Even though we did not allocate the buffer, call io_buf_alloc_cb() // since we will call io_buf_free_cb() on destruction, and we want these // calls to be 1:1. io_buf_alloc_cb(buf, capacity); } } return result; } IOBuf::IOBuf(WrapBufferOp, const void* buf, std::size_t capacity) noexcept : IOBuf( InternalConstructor(), 0, // We cast away the const-ness of the buffer here. // This is okay since IOBuf users must use unshare() to create a copy // of this buffer before writing to the buffer. static_cast<uint8_t*>(const_cast<void*>(buf)), capacity, static_cast<uint8_t*>(const_cast<void*>(buf)), capacity) {} IOBuf::IOBuf(WrapBufferOp op, ByteRange br) noexcept : IOBuf(op, br.data(), br.size()) {} unique_ptr<IOBuf> IOBuf::wrapBuffer(const void* buf, std::size_t capacity) { return std::make_unique<IOBuf>(WRAP_BUFFER, buf, capacity); } IOBuf IOBuf::wrapBufferAsValue(const void* buf, std::size_t capacity) noexcept { return IOBuf(WrapBufferOp::WRAP_BUFFER, buf, capacity); } IOBuf::IOBuf() noexcept = default; IOBuf::IOBuf(IOBuf&& other) noexcept : data_(other.data_), buf_(other.buf_), length_(other.length_), capacity_(other.capacity_), flagsAndSharedInfo_(other.flagsAndSharedInfo_) { // Reset other so it is a clean state to be destroyed. other.data_ = nullptr; other.buf_ = nullptr; other.length_ = 0; other.capacity_ = 0; other.flagsAndSharedInfo_ = 0; // If other was part of the chain, assume ownership of the rest of its chain. // (It's only valid to perform move assignment on the head of a chain.) if (other.next_ != &other) { next_ = other.next_; next_->prev_ = this; other.next_ = &other; prev_ = other.prev_; prev_->next_ = this; other.prev_ = &other; } // Sanity check to make sure that other is in a valid state to be destroyed. DCHECK_EQ(other.prev_, &other); DCHECK_EQ(other.next_, &other); } IOBuf::IOBuf(const IOBuf& other) { *this = other.cloneAsValue(); } IOBuf::IOBuf( InternalConstructor, uintptr_t flagsAndSharedInfo, uint8_t* buf, std::size_t capacity, uint8_t* data, std::size_t length) noexcept : next_(this), prev_(this), data_(data), buf_(buf), length_(length), capacity_(capacity), flagsAndSharedInfo_(flagsAndSharedInfo) { assert(data >= buf); assert(data + length <= buf + capacity); CHECK(!folly::asan_region_is_poisoned(buf, capacity)); } IOBuf::~IOBuf() { // Destroying an IOBuf destroys the entire chain. // Users of IOBuf should only explicitly delete the head of any chain. // The other elements in the chain will be automatically destroyed. while (next_ != this) { // Since unlink() returns unique_ptr() and we don't store it, // it will automatically delete the unlinked element. (void)next_->unlink(); } decrementRefcount(); } IOBuf& IOBuf::operator=(IOBuf&& other) noexcept { if (this == &other) { return *this; } // If we are part of a chain, delete the rest of the chain. while (next_ != this) { // Since unlink() returns unique_ptr() and we don't store it, // it will automatically delete the unlinked element. (void)next_->unlink(); } // Decrement our refcount on the current buffer decrementRefcount(); // Take ownership of the other buffer's data data_ = other.data_; buf_ = other.buf_; length_ = other.length_; capacity_ = other.capacity_; flagsAndSharedInfo_ = other.flagsAndSharedInfo_; // Reset other so it is a clean state to be destroyed. other.data_ = nullptr; other.buf_ = nullptr; other.length_ = 0; other.capacity_ = 0; other.flagsAndSharedInfo_ = 0; // If other was part of the chain, assume ownership of the rest of its chain. // (It's only valid to perform move assignment on the head of a chain.) if (other.next_ != &other) { next_ = other.next_; next_->prev_ = this; other.next_ = &other; prev_ = other.prev_; prev_->next_ = this; other.prev_ = &other; } // Sanity check to make sure that other is in a valid state to be destroyed. DCHECK_EQ(other.prev_, &other); DCHECK_EQ(other.next_, &other); return *this; } IOBuf& IOBuf::operator=(const IOBuf& other) { if (this != &other) { *this = IOBuf(other); } return *this; } bool IOBuf::empty() const { const IOBuf* current = this; do { if (current->length() != 0) { return false; } current = current->next_; } while (current != this); return true; } size_t IOBuf::countChainElements() const { size_t numElements = 1; for (IOBuf* current = next_; current != this; current = current->next_) { ++numElements; } return numElements; } std::size_t IOBuf::computeChainDataLength() const { std::size_t fullLength = length_; for (IOBuf* current = next_; current != this; current = current->next_) { fullLength += current->length_; } return fullLength; } std::size_t IOBuf::computeChainCapacity() const { std::size_t fullCapacity = capacity_; for (IOBuf* current = next_; current != this; current = current->next_) { fullCapacity += current->capacity_; } return fullCapacity; } void IOBuf::prependChain(unique_ptr<IOBuf>&& iobuf) { // Take ownership of the specified IOBuf IOBuf* other = iobuf.release(); // Remember the pointer to the tail of the other chain IOBuf* otherTail = other->prev_; // Hook up prev_->next_ to point at the start of the other chain, // and other->prev_ to point at prev_ prev_->next_ = other; other->prev_ = prev_; // Hook up otherTail->next_ to point at us, // and prev_ to point back at otherTail, otherTail->next_ = this; prev_ = otherTail; } unique_ptr<IOBuf> IOBuf::clone() const { auto tmp = cloneOne(); for (IOBuf* current = next_; current != this; current = current->next_) { tmp->prependChain(current->cloneOne()); } return tmp; } unique_ptr<IOBuf> IOBuf::cloneOne() const { if (SharedInfo* info = sharedInfo()) { info->refcount.fetch_add(1, std::memory_order_acq_rel); } return std::unique_ptr<IOBuf>(new IOBuf( InternalConstructor(), flagsAndSharedInfo_, buf_, capacity_, data_, length_)); } unique_ptr<IOBuf> IOBuf::cloneCoalesced() const { return std::make_unique<IOBuf>(cloneCoalescedAsValue()); } unique_ptr<IOBuf> IOBuf::cloneCoalescedWithHeadroomTailroom( std::size_t newHeadroom, std::size_t newTailroom) const { return std::make_unique<IOBuf>( cloneCoalescedAsValueWithHeadroomTailroom(newHeadroom, newTailroom)); } IOBuf IOBuf::cloneAsValue() const { auto tmp = cloneOneAsValue(); for (IOBuf* current = next_; current != this; current = current->next_) { tmp.prependChain(current->cloneOne()); } return tmp; } IOBuf IOBuf::cloneOneAsValue() const { if (SharedInfo* info = sharedInfo()) { info->refcount.fetch_add(1, std::memory_order_acq_rel); } return IOBuf( InternalConstructor(), flagsAndSharedInfo_, buf_, capacity_, data_, length_); } IOBuf IOBuf::cloneCoalescedAsValue() const { const std::size_t newHeadroom = headroom(); const std::size_t newTailroom = prev()->tailroom(); return cloneCoalescedAsValueWithHeadroomTailroom(newHeadroom, newTailroom); } IOBuf IOBuf::cloneCoalescedAsValueWithHeadroomTailroom( std::size_t newHeadroom, std::size_t newTailroom) const { if (!isChained() && newHeadroom <= headroom() && newTailroom <= tailroom()) { return cloneOneAsValue(); } // Coalesce into newBuf const std::size_t newLength = computeChainDataLength(); const std::size_t newCapacity = newLength + newHeadroom + newTailroom; IOBuf newBuf{CREATE, newCapacity}; newBuf.advance(newHeadroom); auto current = this; do { if (current->length() > 0) { DCHECK_NOTNULL(current->data()); DCHECK_LE(current->length(), newBuf.tailroom()); memcpy(newBuf.writableTail(), current->data(), current->length()); newBuf.append(current->length()); } current = current->next(); } while (current != this); DCHECK_EQ(newLength, newBuf.length()); DCHECK_EQ(newHeadroom, newBuf.headroom()); DCHECK_LE(newTailroom, newBuf.tailroom()); return newBuf; } void IOBuf::unshareOneSlow() { // Allocate a new buffer for the data uint8_t* buf; SharedInfo* sharedInfo; std::size_t actualCapacity; allocExtBuffer(capacity_, &buf, &sharedInfo, &actualCapacity); // Copy the data // Maintain the same amount of headroom. Since we maintained the same // minimum capacity we also maintain at least the same amount of tailroom. std::size_t headlen = headroom(); if (length_ > 0) { assert(data_ != nullptr); memcpy(buf + headlen, data_, length_); } // Release our reference on the old buffer decrementRefcount(); // Make sure flags are all cleared. setFlagsAndSharedInfo(0, sharedInfo); // Update the buffer pointers to point to the new buffer data_ = buf + headlen; buf_ = buf; } void IOBuf::unshareChained() { // unshareChained() should only be called if we are part of a chain of // multiple IOBufs. The caller should have already verified this. assert(isChained()); IOBuf* current = this; while (true) { if (current->isSharedOne()) { // we have to unshare break; } current = current->next_; if (current == this) { // None of the IOBufs in the chain are shared, // so return without doing anything return; } } // We have to unshare. Let coalesceSlow() do the work. coalesceSlow(); } void IOBuf::markExternallyShared() { IOBuf* current = this; do { current->markExternallySharedOne(); current = current->next_; } while (current != this); } void IOBuf::makeManagedChained() { assert(isChained()); IOBuf* current = this; while (true) { current->makeManagedOne(); current = current->next_; if (current == this) { break; } } } void IOBuf::coalesceSlow() { // coalesceSlow() should only be called if we are part of a chain of multiple // IOBufs. The caller should have already verified this. DCHECK(isChained()); // Compute the length of the entire chain std::size_t newLength = 0; IOBuf* end = this; do { newLength += end->length_; end = end->next_; } while (end != this); coalesceAndReallocate(newLength, end); // We should be only element left in the chain now DCHECK(!isChained()); } void IOBuf::coalesceSlow(size_t maxLength) { // coalesceSlow() should only be called if we are part of a chain of multiple // IOBufs. The caller should have already verified this. DCHECK(isChained()); DCHECK_LT(length_, maxLength); // Compute the length of the entire chain std::size_t newLength = 0; IOBuf* end = this; while (true) { newLength += end->length_; end = end->next_; if (newLength >= maxLength) { break; } if (end == this) { throw_exception<std::overflow_error>( "attempted to coalesce more data than " "available"); } } coalesceAndReallocate(newLength, end); // We should have the requested length now DCHECK_GE(length_, maxLength); } void IOBuf::coalesceAndReallocate( size_t newHeadroom, size_t newLength, IOBuf* end, size_t newTailroom) { std::size_t newCapacity = newLength + newHeadroom + newTailroom; // Allocate space for the coalesced buffer. // We always convert to an external buffer, even if we happened to be an // internal buffer before. uint8_t* newBuf; SharedInfo* newInfo; std::size_t actualCapacity; allocExtBuffer(newCapacity, &newBuf, &newInfo, &actualCapacity); // Copy the data into the new buffer uint8_t* newData = newBuf + newHeadroom; uint8_t* p = newData; IOBuf* current = this; size_t remaining = newLength; do { if (current->length_ > 0) { assert(current->length_ <= remaining); assert(current->data_ != nullptr); remaining -= current->length_; memcpy(p, current->data_, current->length_); p += current->length_; } current = current->next_; } while (current != end); assert(remaining == 0); // Point at the new buffer decrementRefcount(); // Make sure flags are all cleared. setFlagsAndSharedInfo(0, newInfo); capacity_ = actualCapacity; buf_ = newBuf; data_ = newData; length_ = newLength; // Separate from the rest of our chain. // Since we don't store the unique_ptr returned by separateChain(), // this will immediately delete the returned subchain. if (isChained()) { (void)separateChain(next_, current->prev_); } } void IOBuf::decrementRefcount() noexcept { // Externally owned buffers don't have a SharedInfo object and aren't managed // by the reference count SharedInfo* info = sharedInfo(); if (!info) { return; } // Avoid doing atomic decrement if the refcount is 1. // This is safe, because it means that we're the last reference and destroying // the object. Anything trying to copy it is already undefined behavior. if (info->refcount.load(std::memory_order_acquire) > 1) { // Decrement the refcount uint32_t newcnt = info->refcount.fetch_sub(1, std::memory_order_acq_rel); // Note that fetch_sub() returns the value before we decremented. // If it is 1, we were the only remaining user; if it is greater there are // still other users. if (newcnt > 1) { return; } } // save the useHeapFullStorage flag here since // freeExtBuffer can delete the sharedInfo() bool useHeapFullStorage = info->useHeapFullStorage; // We were the last user. Free the buffer freeExtBuffer(); // Free the SharedInfo if it was allocated separately. // // This is only used by takeOwnership(). // // To avoid this special case handling in decrementRefcount(), we could have // takeOwnership() set a custom freeFn() that calls the user's free function // then frees the SharedInfo object. (This would require that // takeOwnership() store the user's free function with its allocated // SharedInfo object.) However, handling this specially with a flag seems // like it shouldn't be problematic. if (flags() & kFlagFreeSharedInfo) { delete info; } else { if (useHeapFullStorage) { SharedInfo::releaseStorage(info); } } } void IOBuf::reserveSlow(std::size_t minHeadroom, std::size_t minTailroom) { size_t newCapacity = length_; if (!checked_add(&newCapacity, newCapacity, minHeadroom) || !checked_add(&newCapacity, newCapacity, minTailroom) || newCapacity > kMaxIOBufSize) { // overflow throw_exception<std::bad_alloc>(); } // reserveSlow() is dangerous if anyone else is sharing the buffer, as we may // reallocate and free the original buffer. It should only ever be called if // we are the only user of the buffer. DCHECK(!isSharedOne()); // We'll need to reallocate the buffer. // There are a few options. // - If we have enough total room, move the data around in the buffer // and adjust the data_ pointer. // - If we're using an internal buffer, we'll switch to an external // buffer with enough headroom and tailroom. // - If we have enough headroom (headroom() >= minHeadroom) but not too much // (so we don't waste memory), we can try one of two things, depending on // whether we use jemalloc or not: // - If using jemalloc, we can try to expand in place, avoiding a memcpy() // - If not using jemalloc and we don't have too much to copy, // we'll use realloc() (note that realloc might have to copy // headroom + data + tailroom, see smartRealloc in folly/memory/Malloc.h) // - Otherwise, bite the bullet and reallocate. if (headroom() + tailroom() >= minHeadroom + minTailroom) { uint8_t* newData = writableBuffer() + minHeadroom; memmove(newData, data_, length_); data_ = newData; return; } size_t newAllocatedCapacity = 0; uint8_t* newBuffer = nullptr; std::size_t newHeadroom = 0; std::size_t oldHeadroom = headroom(); // If we have a buffer allocated with malloc and we just need more tailroom, // try to use realloc()/xallocx() to grow the buffer in place. SharedInfo* info = sharedInfo(); bool useHeapFullStorage = info && info->useHeapFullStorage; if (info && (info->freeFn == nullptr) && length_ != 0 && oldHeadroom >= minHeadroom) { size_t headSlack = oldHeadroom - minHeadroom; newAllocatedCapacity = goodExtBufferSize(newCapacity + headSlack); if (usingJEMalloc()) { // We assume that tailroom is more useful and more important than // headroom (not least because realloc / xallocx allow us to grow the // buffer at the tail, but not at the head) So, if we have more headroom // than we need, we consider that "wasted". We arbitrarily define "too // much" headroom to be 25% of the capacity. if (headSlack * 4 <= newCapacity) { size_t allocatedCapacity = capacity() + sizeof(SharedInfo); void* p = buf_; if (allocatedCapacity >= jemallocMinInPlaceExpandable) { if (xallocx(p, newAllocatedCapacity, 0, 0) == newAllocatedCapacity) { if (io_buf_free_cb) { io_buf_free_cb(p, reinterpret_cast<size_t>(info->userData)); } newBuffer = static_cast<uint8_t*>(p); newHeadroom = oldHeadroom; // update the userData info->userData = reinterpret_cast<void*>(newAllocatedCapacity); if (io_buf_alloc_cb) { io_buf_alloc_cb(newBuffer, newAllocatedCapacity); } } // if xallocx failed, do nothing, fall back to malloc/memcpy/free } } } else { // Not using jemalloc size_t copySlack = capacity() - length_; if (copySlack * 2 <= length_) { void* p = realloc(buf_, newAllocatedCapacity); if (UNLIKELY(p == nullptr)) { throw_exception<std::bad_alloc>(); } newBuffer = static_cast<uint8_t*>(p); newHeadroom = oldHeadroom; } } } // None of the previous reallocation strategies worked (or we're using // an internal buffer). malloc/copy/free. if (newBuffer == nullptr) { newAllocatedCapacity = goodExtBufferSize(newCapacity); newBuffer = static_cast<uint8_t*>(checkedMalloc(newAllocatedCapacity)); if (length_ > 0) { assert(data_ != nullptr); memcpy(newBuffer + minHeadroom, data_, length_); } if (sharedInfo()) { freeExtBuffer(); } newHeadroom = minHeadroom; } std::size_t cap; initExtBuffer(newBuffer, newAllocatedCapacity, &info, &cap); if (flags() & kFlagFreeSharedInfo) { delete sharedInfo(); } else { if (useHeapFullStorage) { SharedInfo::releaseStorage(sharedInfo()); } } setFlagsAndSharedInfo(0, info); capacity_ = cap; buf_ = newBuffer; data_ = newBuffer + newHeadroom; // length_ is unchanged } // The user's free function should never throw. Otherwise we might throw from // the IOBuf destructor. Other code paths like coalesce() also assume that // decrementRefcount() cannot throw. void IOBuf::freeExtBuffer() noexcept { SharedInfo* info = sharedInfo(); DCHECK(info); // save the observerListHead // since the SharedInfo can be freed auto observerListHead = info->observerListHead; info->observerListHead = nullptr; if (info->freeFn) { info->freeFn(buf_, info->userData); } else { // this will invoke free if info->userData is 0 size_t size = reinterpret_cast<size_t>(info->userData); if (size) { if (io_buf_free_cb) { io_buf_free_cb(buf_, size); } folly::sizedFree(buf_, size); } else { free(buf_); } } SharedInfo::invokeAndDeleteEachObserver( observerListHead, [](auto& entry) { entry.afterFreeExtBuffer(); }); if (kIsMobile) { buf_ = nullptr; } } void IOBuf::allocExtBuffer( std::size_t minCapacity, uint8_t** bufReturn, SharedInfo** infoReturn, std::size_t* capacityReturn) { if (minCapacity > kMaxIOBufSize) { throw_exception<std::bad_alloc>(); } size_t mallocSize = goodExtBufferSize(minCapacity); auto buf = static_cast<uint8_t*>(checkedMalloc(mallocSize)); initExtBuffer(buf, mallocSize, infoReturn, capacityReturn); // the userData and the freeFn are nullptr here // just store the mallocSize in userData (*infoReturn)->userData = reinterpret_cast<void*>(mallocSize); if (io_buf_alloc_cb) { io_buf_alloc_cb(buf, mallocSize); } *bufReturn = buf; } size_t IOBuf::goodExtBufferSize(std::size_t minCapacity) { if (minCapacity > kMaxIOBufSize) { throw_exception<std::bad_alloc>(); } // Determine how much space we should allocate. We'll store the SharedInfo // for the external buffer just after the buffer itself. (We store it just // after the buffer rather than just before so that the code can still just // use free(buf_) to free the buffer.) size_t minSize = static_cast<size_t>(minCapacity) + sizeof(SharedInfo); // Add room for padding so that the SharedInfo will be aligned on an 8-byte // boundary. minSize = (minSize + 7) & ~7; // Use goodMallocSize() to bump up the capacity to a decent size to request // from malloc, so we can use all of the space that malloc will probably give // us anyway. return goodMallocSize(minSize); } void IOBuf::initExtBuffer( uint8_t* buf, size_t mallocSize, SharedInfo** infoReturn, std::size_t* capacityReturn) { // Find the SharedInfo storage at the end of the buffer // and construct the SharedInfo. uint8_t* infoStart = (buf + mallocSize) - sizeof(SharedInfo); auto sharedInfo = new (infoStart) SharedInfo; *capacityReturn = std::size_t(infoStart - buf); *infoReturn = sharedInfo; } fbstring IOBuf::moveToFbString() { // we need to save useHeapFullStorage and the observerListHead since // sharedInfo() may not be valid after fbstring str bool useHeapFullStorage = false; SharedInfoObserverEntryBase* observerListHead = nullptr; // malloc-allocated buffers are just fine, everything else needs // to be turned into one. if (!sharedInfo() || // user owned, not ours to give up sharedInfo()->freeFn || // not malloc()-ed headroom() != 0 || // malloc()-ed block doesn't start at beginning tailroom() == 0 || // no room for NUL terminator isShared() || // shared isChained()) { // chained // We might as well get rid of all head and tailroom if we're going // to reallocate; we need 1 byte for NUL terminator. coalesceAndReallocate(0, computeChainDataLength(), this, 1); } else { auto info = sharedInfo(); if (info) { // if we do not call coalesceAndReallocate // we might need to call SharedInfo::releaseStorage() // and/or SharedInfo::invokeAndDeleteEachObserver() useHeapFullStorage = info->useHeapFullStorage; // save the observerListHead // the coalesceAndReallocate path will call // decrementRefcount and freeExtBuffer if needed // so the observer lis notification is needed here observerListHead = info->observerListHead; info->observerListHead = nullptr; } } // Ensure NUL terminated *writableTail() = 0; fbstring str( reinterpret_cast<char*>(writableData()), length(), capacity(), AcquireMallocatedString()); if (io_buf_free_cb && sharedInfo() && sharedInfo()->userData) { io_buf_free_cb( writableData(), reinterpret_cast<size_t>(sharedInfo()->userData)); } SharedInfo::invokeAndDeleteEachObserver( observerListHead, [](auto& entry) { entry.afterReleaseExtBuffer(); }); if (flags() & kFlagFreeSharedInfo) { delete sharedInfo(); } else { if (useHeapFullStorage) { SharedInfo::releaseStorage(sharedInfo()); } } // Reset to a state where we can be deleted cleanly flagsAndSharedInfo_ = 0; buf_ = nullptr; clear(); return str; } IOBuf::Iterator IOBuf::cbegin() const { return Iterator(this, this); } IOBuf::Iterator IOBuf::cend() const { return Iterator(nullptr, nullptr); } folly::fbvector<struct iovec> IOBuf::getIov() const { folly::fbvector<struct iovec> iov; iov.reserve(countChainElements()); appendToIov(&iov); return iov; } void IOBuf::appendToIov(folly::fbvector<struct iovec>* iov) const { IOBuf const* p = this; do { // some code can get confused by empty iovs, so skip them if (p->length() > 0) { iov->push_back({(void*)p->data(), folly::to<size_t>(p->length())}); } p = p->next(); } while (p != this); } unique_ptr<IOBuf> IOBuf::wrapIov(const iovec* vec, size_t count) { unique_ptr<IOBuf> result = nullptr; for (size_t i = 0; i < count; ++i) { size_t len = vec[i].iov_len; void* data = vec[i].iov_base; if (len > 0) { auto buf = wrapBuffer(data, len); if (!result) { result = std::move(buf); } else { result->prependChain(std::move(buf)); } } } if (UNLIKELY(result == nullptr)) { return create(0); } return result; } std::unique_ptr<IOBuf> IOBuf::takeOwnershipIov( const iovec* vec, size_t count, FreeFunction freeFn, void* userData, bool freeOnError) { unique_ptr<IOBuf> result = nullptr; for (size_t i = 0; i < count; ++i) { size_t len = vec[i].iov_len; void* data = vec[i].iov_base; if (len > 0) { auto buf = takeOwnership(data, len, freeFn, userData, freeOnError); if (!result) { result = std::move(buf); } else { result->prependChain(std::move(buf)); } } } if (UNLIKELY(result == nullptr)) { return create(0); } return result; } IOBuf::FillIovResult IOBuf::fillIov(struct iovec* iov, size_t len) const { IOBuf const* p = this; size_t i = 0; size_t totalBytes = 0; while (i < len) { // some code can get confused by empty iovs, so skip them if (p->length() > 0) { iov[i].iov_base = const_cast<uint8_t*>(p->data()); iov[i].iov_len = p->length(); totalBytes += p->length(); i++; } p = p->next(); if (p == this) { return {i, totalBytes}; } } return {0, 0}; } uint32_t IOBuf::approximateShareCountOne() const { if (UNLIKELY(!sharedInfo())) { return 1U; } return sharedInfo()->refcount.load(std::memory_order_acquire); } size_t IOBufHash::operator()(const IOBuf& buf) const noexcept { folly::hash::SpookyHashV2 hasher; hasher.Init(0, 0); io::Cursor cursor(&buf); for (;;) { auto b = cursor.peekBytes(); if (b.empty()) { break; } hasher.Update(b.data(), b.size()); cursor.skip(b.size()); } uint64_t h1; uint64_t h2; hasher.Final(&h1, &h2); return static_cast<std::size_t>(h1); } ordering IOBufCompare::impl(const IOBuf& a, const IOBuf& b) const noexcept { io::Cursor ca(&a); io::Cursor cb(&b); for (;;) { auto ba = ca.peekBytes(); auto bb = cb.peekBytes(); if (ba.empty() || bb.empty()) { return to_ordering(int(bb.empty()) - int(ba.empty())); } const size_t n = std::min(ba.size(), bb.size()); DCHECK_GT(n, 0u); const ordering r = to_ordering(std::memcmp(ba.data(), bb.data(), n)); if (r != ordering::eq) { return r; } // Cursor::skip() may throw if n is too large, but n is not too large here ca.skip(n); cb.skip(n); } } } // namespace folly
void IOBuf::reserveSlow(std::size_t minHeadroom, std::size_t minTailroom) { size_t newCapacity = (size_t)length_ + minHeadroom + minTailroom; DCHECK_LT(newCapacity, UINT32_MAX); // reserveSlow() is dangerous if anyone else is sharing the buffer, as we may // reallocate and free the original buffer. It should only ever be called if // we are the only user of the buffer. DCHECK(!isSharedOne()); // We'll need to reallocate the buffer. // There are a few options. // - If we have enough total room, move the data around in the buffer // and adjust the data_ pointer. // - If we're using an internal buffer, we'll switch to an external // buffer with enough headroom and tailroom. // - If we have enough headroom (headroom() >= minHeadroom) but not too much // (so we don't waste memory), we can try one of two things, depending on // whether we use jemalloc or not: // - If using jemalloc, we can try to expand in place, avoiding a memcpy() // - If not using jemalloc and we don't have too much to copy, // we'll use realloc() (note that realloc might have to copy // headroom + data + tailroom, see smartRealloc in folly/memory/Malloc.h) // - Otherwise, bite the bullet and reallocate. if (headroom() + tailroom() >= minHeadroom + minTailroom) { uint8_t* newData = writableBuffer() + minHeadroom; memmove(newData, data_, length_); data_ = newData; return; } size_t newAllocatedCapacity = 0; uint8_t* newBuffer = nullptr; std::size_t newHeadroom = 0; std::size_t oldHeadroom = headroom(); // If we have a buffer allocated with malloc and we just need more tailroom, // try to use realloc()/xallocx() to grow the buffer in place. SharedInfo* info = sharedInfo(); bool useHeapFullStorage = info && info->useHeapFullStorage; if (info && (info->freeFn == nullptr) && length_ != 0 && oldHeadroom >= minHeadroom) { size_t headSlack = oldHeadroom - minHeadroom; newAllocatedCapacity = goodExtBufferSize(newCapacity + headSlack); if (usingJEMalloc()) { // We assume that tailroom is more useful and more important than // headroom (not least because realloc / xallocx allow us to grow the // buffer at the tail, but not at the head) So, if we have more headroom // than we need, we consider that "wasted". We arbitrarily define "too // much" headroom to be 25% of the capacity. if (headSlack * 4 <= newCapacity) { size_t allocatedCapacity = capacity() + sizeof(SharedInfo); void* p = buf_; if (allocatedCapacity >= jemallocMinInPlaceExpandable) { if (xallocx(p, newAllocatedCapacity, 0, 0) == newAllocatedCapacity) { if (io_buf_free_cb) { io_buf_free_cb(p, reinterpret_cast<size_t>(info->userData)); } newBuffer = static_cast<uint8_t*>(p); newHeadroom = oldHeadroom; // update the userData info->userData = reinterpret_cast<void*>(newAllocatedCapacity); if (io_buf_alloc_cb) { io_buf_alloc_cb(newBuffer, newAllocatedCapacity); } } // if xallocx failed, do nothing, fall back to malloc/memcpy/free } } } else { // Not using jemalloc size_t copySlack = capacity() - length_; if (copySlack * 2 <= length_) { void* p = realloc(buf_, newAllocatedCapacity); if (UNLIKELY(p == nullptr)) { throw_exception<std::bad_alloc>(); } newBuffer = static_cast<uint8_t*>(p); newHeadroom = oldHeadroom; } } } // None of the previous reallocation strategies worked (or we're using // an internal buffer). malloc/copy/free. if (newBuffer == nullptr) { newAllocatedCapacity = goodExtBufferSize(newCapacity); newBuffer = static_cast<uint8_t*>(checkedMalloc(newAllocatedCapacity)); if (length_ > 0) { assert(data_ != nullptr); memcpy(newBuffer + minHeadroom, data_, length_); } if (sharedInfo()) { freeExtBuffer(); } newHeadroom = minHeadroom; } std::size_t cap; initExtBuffer(newBuffer, newAllocatedCapacity, &info, &cap); if (flags() & kFlagFreeSharedInfo) { delete sharedInfo(); } else { if (useHeapFullStorage) { SharedInfo::releaseStorage(sharedInfo()); } } setFlagsAndSharedInfo(0, info); capacity_ = cap; buf_ = newBuffer; data_ = newBuffer + newHeadroom; // length_ is unchanged }
void IOBuf::reserveSlow(std::size_t minHeadroom, std::size_t minTailroom) { size_t newCapacity = length_; if (!checked_add(&newCapacity, newCapacity, minHeadroom) || !checked_add(&newCapacity, newCapacity, minTailroom) || newCapacity > kMaxIOBufSize) { // overflow throw_exception<std::bad_alloc>(); } // reserveSlow() is dangerous if anyone else is sharing the buffer, as we may // reallocate and free the original buffer. It should only ever be called if // we are the only user of the buffer. DCHECK(!isSharedOne()); // We'll need to reallocate the buffer. // There are a few options. // - If we have enough total room, move the data around in the buffer // and adjust the data_ pointer. // - If we're using an internal buffer, we'll switch to an external // buffer with enough headroom and tailroom. // - If we have enough headroom (headroom() >= minHeadroom) but not too much // (so we don't waste memory), we can try one of two things, depending on // whether we use jemalloc or not: // - If using jemalloc, we can try to expand in place, avoiding a memcpy() // - If not using jemalloc and we don't have too much to copy, // we'll use realloc() (note that realloc might have to copy // headroom + data + tailroom, see smartRealloc in folly/memory/Malloc.h) // - Otherwise, bite the bullet and reallocate. if (headroom() + tailroom() >= minHeadroom + minTailroom) { uint8_t* newData = writableBuffer() + minHeadroom; memmove(newData, data_, length_); data_ = newData; return; } size_t newAllocatedCapacity = 0; uint8_t* newBuffer = nullptr; std::size_t newHeadroom = 0; std::size_t oldHeadroom = headroom(); // If we have a buffer allocated with malloc and we just need more tailroom, // try to use realloc()/xallocx() to grow the buffer in place. SharedInfo* info = sharedInfo(); bool useHeapFullStorage = info && info->useHeapFullStorage; if (info && (info->freeFn == nullptr) && length_ != 0 && oldHeadroom >= minHeadroom) { size_t headSlack = oldHeadroom - minHeadroom; newAllocatedCapacity = goodExtBufferSize(newCapacity + headSlack); if (usingJEMalloc()) { // We assume that tailroom is more useful and more important than // headroom (not least because realloc / xallocx allow us to grow the // buffer at the tail, but not at the head) So, if we have more headroom // than we need, we consider that "wasted". We arbitrarily define "too // much" headroom to be 25% of the capacity. if (headSlack * 4 <= newCapacity) { size_t allocatedCapacity = capacity() + sizeof(SharedInfo); void* p = buf_; if (allocatedCapacity >= jemallocMinInPlaceExpandable) { if (xallocx(p, newAllocatedCapacity, 0, 0) == newAllocatedCapacity) { if (io_buf_free_cb) { io_buf_free_cb(p, reinterpret_cast<size_t>(info->userData)); } newBuffer = static_cast<uint8_t*>(p); newHeadroom = oldHeadroom; // update the userData info->userData = reinterpret_cast<void*>(newAllocatedCapacity); if (io_buf_alloc_cb) { io_buf_alloc_cb(newBuffer, newAllocatedCapacity); } } // if xallocx failed, do nothing, fall back to malloc/memcpy/free } } } else { // Not using jemalloc size_t copySlack = capacity() - length_; if (copySlack * 2 <= length_) { void* p = realloc(buf_, newAllocatedCapacity); if (UNLIKELY(p == nullptr)) { throw_exception<std::bad_alloc>(); } newBuffer = static_cast<uint8_t*>(p); newHeadroom = oldHeadroom; } } } // None of the previous reallocation strategies worked (or we're using // an internal buffer). malloc/copy/free. if (newBuffer == nullptr) { newAllocatedCapacity = goodExtBufferSize(newCapacity); newBuffer = static_cast<uint8_t*>(checkedMalloc(newAllocatedCapacity)); if (length_ > 0) { assert(data_ != nullptr); memcpy(newBuffer + minHeadroom, data_, length_); } if (sharedInfo()) { freeExtBuffer(); } newHeadroom = minHeadroom; } std::size_t cap; initExtBuffer(newBuffer, newAllocatedCapacity, &info, &cap); if (flags() & kFlagFreeSharedInfo) { delete sharedInfo(); } else { if (useHeapFullStorage) { SharedInfo::releaseStorage(sharedInfo()); } } setFlagsAndSharedInfo(0, info); capacity_ = cap; buf_ = newBuffer; data_ = newBuffer + newHeadroom; // length_ is unchanged }
{'added': [(26, '#include <limits>'), (36, '#include <folly/lang/CheckedMath.h>'), (89, ' kDefaultCombinedBufSize = 1024,'), (90, ' kMaxIOBufSize = std::numeric_limits<size_t>::max() >> 1,'), (193, ' if (size > kMaxIOBufSize) {'), (194, ' throw_exception<std::bad_alloc>();'), (195, ' }'), (306, ' if (capacity > kMaxIOBufSize) {'), (307, ' throw_exception<std::bad_alloc>();'), (308, ' }'), (309, ''), (341, ' if (capacity > kMaxIOBufSize) {'), (342, ' throw_exception<std::bad_alloc>();'), (343, ' }'), (344, ''), (473, ' if (capacity > kMaxIOBufSize) {'), (474, ' throw_exception<std::bad_alloc>();'), (475, ' }'), (476, ''), (1027, ' size_t newCapacity = length_;'), (1028, ' if (!checked_add(&newCapacity, newCapacity, minHeadroom) ||'), (1029, ' !checked_add(&newCapacity, newCapacity, minTailroom) ||'), (1030, ' newCapacity > kMaxIOBufSize) {'), (1031, ' // overflow'), (1032, ' throw_exception<std::bad_alloc>();'), (1033, ' }'), (1184, ' if (minCapacity > kMaxIOBufSize) {'), (1185, ' throw_exception<std::bad_alloc>();'), (1186, ' }'), (1187, ''), (1203, ' if (minCapacity > kMaxIOBufSize) {'), (1204, ' throw_exception<std::bad_alloc>();'), (1205, ' }'), (1206, '')], 'deleted': [(87, ' kDefaultCombinedBufSize = 1024'), (1009, ' size_t newCapacity = (size_t)length_ + minHeadroom + minTailroom;'), (1010, ' DCHECK_LT(newCapacity, UINT32_MAX);')]}
34
3
1,006
6,446
76
500
20
https://github.com/facebook/folly
CVE-2021-24036
CWE-787
2,285
gridfs.c
C
gridfile_read
/* gridfs.c */ /* Copyright 2009-2012 10gen Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "gridfs.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> MONGO_EXPORT gridfs* gridfs_create( void ) { return (gridfs*)bson_malloc(sizeof(gridfs)); } MONGO_EXPORT void gridfs_dispose(gridfs* gfs) { free(gfs); } MONGO_EXPORT gridfile* gridfile_create( void ) { return (gridfile*)bson_malloc(sizeof(gridfile)); } MONGO_EXPORT void gridfile_dispose(gridfile* gf) { free(gf); } MONGO_EXPORT void gridfile_get_descriptor(gridfile* gf, bson* out) { *out = *gf->meta; } static bson *chunk_new( bson_oid_t id, int chunkNumber, const char *data, int len ) { bson *b = bson_malloc( sizeof( bson ) ); bson_init( b ); bson_append_oid( b, "files_id", &id ); bson_append_int( b, "n", chunkNumber ); bson_append_binary( b, "data", BSON_BIN_BINARY, data, len ); bson_finish( b ); return b; } static void chunk_free( bson *oChunk ) { bson_destroy( oChunk ); bson_free( oChunk ); } int gridfs_init( mongo *client, const char *dbname, const char *prefix, gridfs *gfs ) { int options; bson b; bson_bool_t success; gfs->client = client; /* Allocate space to own the dbname */ gfs->dbname = ( const char * )bson_malloc( strlen( dbname )+1 ); strcpy( ( char * )gfs->dbname, dbname ); /* Allocate space to own the prefix */ if ( prefix == NULL ) prefix = "fs"; gfs->prefix = ( const char * )bson_malloc( strlen( prefix )+1 ); strcpy( ( char * )gfs->prefix, prefix ); /* Allocate space to own files_ns */ gfs->files_ns = ( const char * ) bson_malloc ( strlen( prefix )+strlen( dbname )+strlen( ".files" )+2 ); strcpy( ( char * )gfs->files_ns, dbname ); strcat( ( char * )gfs->files_ns, "." ); strcat( ( char * )gfs->files_ns, prefix ); strcat( ( char * )gfs->files_ns, ".files" ); /* Allocate space to own chunks_ns */ gfs->chunks_ns = ( const char * ) bson_malloc( strlen( prefix ) + strlen( dbname ) + strlen( ".chunks" ) + 2 ); strcpy( ( char * )gfs->chunks_ns, dbname ); strcat( ( char * )gfs->chunks_ns, "." ); strcat( ( char * )gfs->chunks_ns, prefix ); strcat( ( char * )gfs->chunks_ns, ".chunks" ); bson_init( &b ); bson_append_int( &b, "filename", 1 ); bson_finish( &b ); options = 0; success = ( mongo_create_index( gfs->client, gfs->files_ns, &b, options, NULL ) == MONGO_OK ); bson_destroy( &b ); if ( !success ) { bson_free( ( char * )gfs->dbname ); bson_free( ( char * )gfs->prefix ); bson_free( ( char * )gfs->files_ns ); bson_free( ( char * )gfs->chunks_ns ); return MONGO_ERROR; } bson_init( &b ); bson_append_int( &b, "files_id", 1 ); bson_append_int( &b, "n", 1 ); bson_finish( &b ); options = MONGO_INDEX_UNIQUE; success = ( mongo_create_index( gfs->client, gfs->chunks_ns, &b, options, NULL ) == MONGO_OK ); bson_destroy( &b ); if ( !success ) { bson_free( ( char * )gfs->dbname ); bson_free( ( char * )gfs->prefix ); bson_free( ( char * )gfs->files_ns ); bson_free( ( char * )gfs->chunks_ns ); return MONGO_ERROR; } return MONGO_OK; } MONGO_EXPORT void gridfs_destroy( gridfs *gfs ) { if ( gfs == NULL ) return; if ( gfs->dbname ) bson_free( ( char * )gfs->dbname ); if ( gfs->prefix ) bson_free( ( char * )gfs->prefix ); if ( gfs->files_ns ) bson_free( ( char * )gfs->files_ns ); if ( gfs->chunks_ns ) bson_free( ( char * )gfs->chunks_ns ); } static int gridfs_insert_file( gridfs *gfs, const char *name, const bson_oid_t id, gridfs_offset length, const char *contenttype ) { bson command; bson ret; bson res; bson_iterator it; int result; int64_t d; /* Check run md5 */ bson_init( &command ); bson_append_oid( &command, "filemd5", &id ); bson_append_string( &command, "root", gfs->prefix ); bson_finish( &command ); result = mongo_run_command( gfs->client, gfs->dbname, &command, &res ); bson_destroy( &command ); if (result != MONGO_OK) return result; /* Create and insert BSON for file metadata */ bson_init( &ret ); bson_append_oid( &ret, "_id", &id ); if ( name != NULL && *name != '\0' ) { bson_append_string( &ret, "filename", name ); } bson_append_long( &ret, "length", length ); bson_append_int( &ret, "chunkSize", DEFAULT_CHUNK_SIZE ); d = ( bson_date_t )1000*time( NULL ); bson_append_date( &ret, "uploadDate", d); bson_find( &it, &res, "md5" ); bson_append_string( &ret, "md5", bson_iterator_string( &it ) ); bson_destroy( &res ); if ( contenttype != NULL && *contenttype != '\0' ) { bson_append_string( &ret, "contentType", contenttype ); } bson_finish( &ret ); result = mongo_insert( gfs->client, gfs->files_ns, &ret, NULL ); bson_destroy( &ret ); return result; } MONGO_EXPORT int gridfs_store_buffer( gridfs *gfs, const char *data, gridfs_offset length, const char *remotename, const char *contenttype ) { char const *end = data + length; const char *data_ptr = data; bson_oid_t id; int chunkNumber = 0; int chunkLen; bson *oChunk; /* Large files Assertion */ /* assert( length <= 0xffffffff ); */ /* Generate and append an oid*/ bson_oid_gen( &id ); /* Insert the file's data chunk by chunk */ while ( data_ptr < end ) { chunkLen = DEFAULT_CHUNK_SIZE < ( unsigned int )( end - data_ptr ) ? DEFAULT_CHUNK_SIZE : ( unsigned int )( end - data_ptr ); oChunk = chunk_new( id, chunkNumber, data_ptr, chunkLen ); mongo_insert( gfs->client, gfs->chunks_ns, oChunk, NULL ); chunk_free( oChunk ); chunkNumber++; data_ptr += chunkLen; } /* Inserts file's metadata */ return gridfs_insert_file( gfs, remotename, id, length, contenttype ); } MONGO_EXPORT void gridfile_writer_init( gridfile *gfile, gridfs *gfs, const char *remote_name, const char *content_type ) { gfile->gfs = gfs; bson_oid_gen( &( gfile->id ) ); gfile->chunk_num = 0; gfile->length = 0; gfile->pending_len = 0; gfile->pending_data = NULL; gfile->remote_name = ( char * )bson_malloc( strlen( remote_name ) + 1 ); strcpy( ( char * )gfile->remote_name, remote_name ); gfile->content_type = ( char * )bson_malloc( strlen( content_type ) + 1 ); strcpy( ( char * )gfile->content_type, content_type ); } MONGO_EXPORT void gridfile_write_buffer( gridfile *gfile, const char *data, gridfs_offset length ) { int bytes_left = 0; int data_partial_len = 0; int chunks_to_write = 0; char *buffer; bson *oChunk; gridfs_offset to_write = length + gfile->pending_len; if ( to_write < DEFAULT_CHUNK_SIZE ) { /* Less than one chunk to write */ if( gfile->pending_data ) { gfile->pending_data = ( char * )bson_realloc( ( void * )gfile->pending_data, gfile->pending_len + to_write ); memcpy( gfile->pending_data + gfile->pending_len, data, length ); } else if ( to_write > 0 ) { gfile->pending_data = ( char * )bson_malloc( to_write ); memcpy( gfile->pending_data, data, length ); } gfile->pending_len += length; } else { /* At least one chunk of data to write */ chunks_to_write = to_write / DEFAULT_CHUNK_SIZE; bytes_left = to_write % DEFAULT_CHUNK_SIZE; /* If there's a pending chunk to be written, we need to combine * the buffer provided up to DEFAULT_CHUNK_SIZE. */ if ( gfile->pending_len > 0 ) { data_partial_len = DEFAULT_CHUNK_SIZE - gfile->pending_len; buffer = ( char * )bson_malloc( DEFAULT_CHUNK_SIZE ); memcpy( buffer, gfile->pending_data, gfile->pending_len ); memcpy( buffer + gfile->pending_len, data, data_partial_len ); oChunk = chunk_new( gfile->id, gfile->chunk_num, buffer, DEFAULT_CHUNK_SIZE ); mongo_insert( gfile->gfs->client, gfile->gfs->chunks_ns, oChunk, NULL ); chunk_free( oChunk ); gfile->chunk_num++; gfile->length += DEFAULT_CHUNK_SIZE; data += data_partial_len; chunks_to_write--; bson_free( buffer ); } while( chunks_to_write > 0 ) { oChunk = chunk_new( gfile->id, gfile->chunk_num, data, DEFAULT_CHUNK_SIZE ); mongo_insert( gfile->gfs->client, gfile->gfs->chunks_ns, oChunk, NULL ); chunk_free( oChunk ); gfile->chunk_num++; chunks_to_write--; gfile->length += DEFAULT_CHUNK_SIZE; data += DEFAULT_CHUNK_SIZE; } bson_free( gfile->pending_data ); /* If there are any leftover bytes, store them as pending data. */ if( bytes_left == 0 ) gfile->pending_data = NULL; else { gfile->pending_data = ( char * )bson_malloc( bytes_left ); memcpy( gfile->pending_data, data, bytes_left ); } gfile->pending_len = bytes_left; } } MONGO_EXPORT int gridfile_writer_done( gridfile *gfile ) { /* write any remaining pending chunk data. * pending data will always take up less than one chunk */ bson *oChunk; int response; if( gfile->pending_data ) { oChunk = chunk_new( gfile->id, gfile->chunk_num, gfile->pending_data, gfile->pending_len ); mongo_insert( gfile->gfs->client, gfile->gfs->chunks_ns, oChunk, NULL ); chunk_free( oChunk ); bson_free( gfile->pending_data ); gfile->length += gfile->pending_len; } /* insert into files collection */ response = gridfs_insert_file( gfile->gfs, gfile->remote_name, gfile->id, gfile->length, gfile->content_type ); bson_free( gfile->remote_name ); bson_free( gfile->content_type ); return response; } int gridfs_store_file( gridfs *gfs, const char *filename, const char *remotename, const char *contenttype ) { char buffer[DEFAULT_CHUNK_SIZE]; FILE *fd; bson_oid_t id; int chunkNumber = 0; gridfs_offset length = 0; gridfs_offset chunkLen = 0; bson *oChunk; /* Open the file and the correct stream */ if ( strcmp( filename, "-" ) == 0 ) fd = stdin; else { fd = fopen( filename, "rb" ); if (fd == NULL) return MONGO_ERROR; } /* Generate and append an oid*/ bson_oid_gen( &id ); /* Insert the file chunk by chunk */ chunkLen = fread( buffer, 1, DEFAULT_CHUNK_SIZE, fd ); do { oChunk = chunk_new( id, chunkNumber, buffer, chunkLen ); mongo_insert( gfs->client, gfs->chunks_ns, oChunk, NULL ); chunk_free( oChunk ); length += chunkLen; chunkNumber++; chunkLen = fread( buffer, 1, DEFAULT_CHUNK_SIZE, fd ); } while ( chunkLen != 0 ); /* Close the file stream */ if ( fd != stdin ) fclose( fd ); /* Large files Assertion */ /* assert(length <= 0xffffffff); */ /* Optional Remote Name */ if ( remotename == NULL || *remotename == '\0' ) { remotename = filename; } /* Inserts file's metadata */ return gridfs_insert_file( gfs, remotename, id, length, contenttype ); } MONGO_EXPORT void gridfs_remove_filename( gridfs *gfs, const char *filename ) { bson query; mongo_cursor *files; bson file; bson_iterator it; bson_oid_t id; bson b; bson_init( &query ); bson_append_string( &query, "filename", filename ); bson_finish( &query ); files = mongo_find( gfs->client, gfs->files_ns, &query, NULL, 0, 0, 0 ); bson_destroy( &query ); /* Remove each file and it's chunks from files named filename */ while ( mongo_cursor_next( files ) == MONGO_OK ) { file = files->current; bson_find( &it, &file, "_id" ); id = *bson_iterator_oid( &it ); /* Remove the file with the specified id */ bson_init( &b ); bson_append_oid( &b, "_id", &id ); bson_finish( &b ); mongo_remove( gfs->client, gfs->files_ns, &b, NULL ); bson_destroy( &b ); /* Remove all chunks from the file with the specified id */ bson_init( &b ); bson_append_oid( &b, "files_id", &id ); bson_finish( &b ); mongo_remove( gfs->client, gfs->chunks_ns, &b, NULL ); bson_destroy( &b ); } mongo_cursor_destroy( files ); } int gridfs_find_query( gridfs *gfs, bson *query, gridfile *gfile ) { bson uploadDate; bson finalQuery; bson out; int i; bson_init( &uploadDate ); bson_append_int( &uploadDate, "uploadDate", -1 ); bson_finish( &uploadDate ); bson_init( &finalQuery ); bson_append_bson( &finalQuery, "query", query ); bson_append_bson( &finalQuery, "orderby", &uploadDate ); bson_finish( &finalQuery ); i = ( mongo_find_one( gfs->client, gfs->files_ns, &finalQuery, NULL, &out ) == MONGO_OK ); bson_destroy( &uploadDate ); bson_destroy( &finalQuery ); if ( !i ) return MONGO_ERROR; else { gridfile_init( gfs, &out, gfile ); bson_destroy( &out ); return MONGO_OK; } } int gridfs_find_filename( gridfs *gfs, const char *filename, gridfile *gfile ) { bson query; int i; bson_init( &query ); bson_append_string( &query, "filename", filename ); bson_finish( &query ); i = gridfs_find_query( gfs, &query, gfile ); bson_destroy( &query ); return i; } int gridfile_init( gridfs *gfs, bson *meta, gridfile *gfile ) { gfile->gfs = gfs; gfile->pos = 0; gfile->meta = ( bson * )bson_malloc( sizeof( bson ) ); if ( gfile->meta == NULL ) return MONGO_ERROR; bson_copy( gfile->meta, meta ); return MONGO_OK; } MONGO_EXPORT void gridfile_destroy( gridfile *gfile ) { bson_destroy( gfile->meta ); bson_free( gfile->meta ); } bson_bool_t gridfile_exists( gridfile *gfile ) { return ( bson_bool_t )( gfile != NULL && gfile->meta != NULL ); } MONGO_EXPORT const char *gridfile_get_filename( gridfile *gfile ) { bson_iterator it; bson_find( &it, gfile->meta, "filename" ); return bson_iterator_string( &it ); } MONGO_EXPORT int gridfile_get_chunksize( gridfile *gfile ) { bson_iterator it; bson_find( &it, gfile->meta, "chunkSize" ); return bson_iterator_int( &it ); } MONGO_EXPORT gridfs_offset gridfile_get_contentlength( gridfile *gfile ) { bson_iterator it; bson_find( &it, gfile->meta, "length" ); if( bson_iterator_type( &it ) == BSON_INT ) return ( gridfs_offset )bson_iterator_int( &it ); else return ( gridfs_offset )bson_iterator_long( &it ); } MONGO_EXPORT const char *gridfile_get_contenttype( gridfile *gfile ) { bson_iterator it; if ( bson_find( &it, gfile->meta, "contentType" ) ) return bson_iterator_string( &it ); else return NULL; } MONGO_EXPORT bson_date_t gridfile_get_uploaddate( gridfile *gfile ) { bson_iterator it; bson_find( &it, gfile->meta, "uploadDate" ); return bson_iterator_date( &it ); } MONGO_EXPORT const char *gridfile_get_md5( gridfile *gfile ) { bson_iterator it; bson_find( &it, gfile->meta, "md5" ); return bson_iterator_string( &it ); } const char *gridfile_get_field( gridfile *gfile, const char *name ) { bson_iterator it; bson_find( &it, gfile->meta, name ); return bson_iterator_value( &it ); } bson_bool_t gridfile_get_boolean( gridfile *gfile, const char *name ) { bson_iterator it; bson_find( &it, gfile->meta, name ); return bson_iterator_bool( &it ); } MONGO_EXPORT void gridfile_get_metadata( gridfile *gfile, bson* out ) { bson_iterator it; if ( bson_find( &it, gfile->meta, "metadata" ) ) bson_iterator_subobject( &it, out ); else bson_empty( out ); } MONGO_EXPORT int gridfile_get_numchunks( gridfile *gfile ) { bson_iterator it; gridfs_offset length; gridfs_offset chunkSize; double numchunks; bson_find( &it, gfile->meta, "length" ); if( bson_iterator_type( &it ) == BSON_INT ) length = ( gridfs_offset )bson_iterator_int( &it ); else length = ( gridfs_offset )bson_iterator_long( &it ); bson_find( &it, gfile->meta, "chunkSize" ); chunkSize = bson_iterator_int( &it ); numchunks = ( ( double )length/( double )chunkSize ); return ( numchunks - ( int )numchunks > 0 ) ? ( int )( numchunks+1 ) : ( int )( numchunks ); } MONGO_EXPORT void gridfile_get_chunk( gridfile *gfile, int n, bson* out ) { bson query; bson_iterator it; bson_oid_t id; int result; bson_init( &query ); bson_find( &it, gfile->meta, "_id" ); id = *bson_iterator_oid( &it ); bson_append_oid( &query, "files_id", &id ); bson_append_int( &query, "n", n ); bson_finish( &query ); result = (mongo_find_one(gfile->gfs->client, gfile->gfs->chunks_ns, &query, NULL, out ) == MONGO_OK ); bson_destroy( &query ); if (!result) { bson empty; bson_empty(&empty); bson_copy(out, &empty); } } MONGO_EXPORT mongo_cursor *gridfile_get_chunks( gridfile *gfile, int start, int size ) { bson_iterator it; bson_oid_t id; bson gte; bson query; bson orderby; bson command; mongo_cursor *cursor; bson_find( &it, gfile->meta, "_id" ); id = *bson_iterator_oid( &it ); bson_init( &query ); bson_append_oid( &query, "files_id", &id ); if ( size == 1 ) { bson_append_int( &query, "n", start ); } else { bson_init( &gte ); bson_append_int( &gte, "$gte", start ); bson_finish( &gte ); bson_append_bson( &query, "n", &gte ); bson_destroy( &gte ); } bson_finish( &query ); bson_init( &orderby ); bson_append_int( &orderby, "n", 1 ); bson_finish( &orderby ); bson_init( &command ); bson_append_bson( &command, "query", &query ); bson_append_bson( &command, "orderby", &orderby ); bson_finish( &command ); cursor = mongo_find( gfile->gfs->client, gfile->gfs->chunks_ns, &command, NULL, size, 0, 0 ); bson_destroy( &command ); bson_destroy( &query ); bson_destroy( &orderby ); return cursor; } gridfs_offset gridfile_write_file( gridfile *gfile, FILE *stream ) { int i; size_t len; bson chunk; bson_iterator it; const char *data; const int num = gridfile_get_numchunks( gfile ); for ( i=0; i<num; i++ ) { gridfile_get_chunk( gfile, i, &chunk ); bson_find( &it, &chunk, "data" ); len = bson_iterator_bin_len( &it ); data = bson_iterator_bin_data( &it ); fwrite( data, sizeof( char ), len, stream ); bson_destroy( &chunk ); } return gridfile_get_contentlength( gfile ); } MONGO_EXPORT gridfs_offset gridfile_read( gridfile *gfile, gridfs_offset size, char *buf ) { mongo_cursor *chunks; bson chunk; int first_chunk; int last_chunk; int total_chunks; gridfs_offset chunksize; gridfs_offset contentlength; gridfs_offset bytes_left; int i; bson_iterator it; gridfs_offset chunk_len; const char *chunk_data; contentlength = gridfile_get_contentlength( gfile ); chunksize = gridfile_get_chunksize( gfile ); size = ( contentlength - gfile->pos < size ) ? contentlength - gfile->pos : size; bytes_left = size; first_chunk = ( gfile->pos )/chunksize; last_chunk = ( gfile->pos+size-1 )/chunksize; total_chunks = last_chunk - first_chunk + 1; chunks = gridfile_get_chunks( gfile, first_chunk, total_chunks ); for ( i = 0; i < total_chunks; i++ ) { mongo_cursor_next( chunks ); chunk = chunks->current; bson_find( &it, &chunk, "data" ); chunk_len = bson_iterator_bin_len( &it ); chunk_data = bson_iterator_bin_data( &it ); if ( i == 0 ) { chunk_data += ( gfile->pos )%chunksize; chunk_len -= ( gfile->pos )%chunksize; } if ( bytes_left > chunk_len ) { memcpy( buf, chunk_data, chunk_len ); bytes_left -= chunk_len; buf += chunk_len; } else { memcpy( buf, chunk_data, bytes_left ); } } mongo_cursor_destroy( chunks ); gfile->pos = gfile->pos + size; return size; } MONGO_EXPORT gridfs_offset gridfile_seek( gridfile *gfile, gridfs_offset offset ) { gridfs_offset length; length = gridfile_get_contentlength( gfile ); gfile->pos = length < offset ? length : offset; return gfile->pos; }
/* gridfs.c */ /* Copyright 2009-2012 10gen Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "gridfs.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> MONGO_EXPORT gridfs* gridfs_create( void ) { return (gridfs*)bson_malloc(sizeof(gridfs)); } MONGO_EXPORT void gridfs_dispose(gridfs* gfs) { free(gfs); } MONGO_EXPORT gridfile* gridfile_create( void ) { return (gridfile*)bson_malloc(sizeof(gridfile)); } MONGO_EXPORT void gridfile_dispose(gridfile* gf) { free(gf); } MONGO_EXPORT void gridfile_get_descriptor(gridfile* gf, bson* out) { *out = *gf->meta; } static bson *chunk_new( bson_oid_t id, int chunkNumber, const char *data, int len ) { bson *b = bson_malloc( sizeof( bson ) ); bson_init( b ); bson_append_oid( b, "files_id", &id ); bson_append_int( b, "n", chunkNumber ); bson_append_binary( b, "data", BSON_BIN_BINARY, data, len ); bson_finish( b ); return b; } static void chunk_free( bson *oChunk ) { bson_destroy( oChunk ); bson_free( oChunk ); } int gridfs_init( mongo *client, const char *dbname, const char *prefix, gridfs *gfs ) { int options; bson b; bson_bool_t success; gfs->client = client; /* Allocate space to own the dbname */ gfs->dbname = ( const char * )bson_malloc( strlen( dbname )+1 ); strcpy( ( char * )gfs->dbname, dbname ); /* Allocate space to own the prefix */ if ( prefix == NULL ) prefix = "fs"; gfs->prefix = ( const char * )bson_malloc( strlen( prefix )+1 ); strcpy( ( char * )gfs->prefix, prefix ); /* Allocate space to own files_ns */ gfs->files_ns = ( const char * ) bson_malloc ( strlen( prefix )+strlen( dbname )+strlen( ".files" )+2 ); strcpy( ( char * )gfs->files_ns, dbname ); strcat( ( char * )gfs->files_ns, "." ); strcat( ( char * )gfs->files_ns, prefix ); strcat( ( char * )gfs->files_ns, ".files" ); /* Allocate space to own chunks_ns */ gfs->chunks_ns = ( const char * ) bson_malloc( strlen( prefix ) + strlen( dbname ) + strlen( ".chunks" ) + 2 ); strcpy( ( char * )gfs->chunks_ns, dbname ); strcat( ( char * )gfs->chunks_ns, "." ); strcat( ( char * )gfs->chunks_ns, prefix ); strcat( ( char * )gfs->chunks_ns, ".chunks" ); bson_init( &b ); bson_append_int( &b, "filename", 1 ); bson_finish( &b ); options = 0; success = ( mongo_create_index( gfs->client, gfs->files_ns, &b, options, NULL ) == MONGO_OK ); bson_destroy( &b ); if ( !success ) { bson_free( ( char * )gfs->dbname ); bson_free( ( char * )gfs->prefix ); bson_free( ( char * )gfs->files_ns ); bson_free( ( char * )gfs->chunks_ns ); return MONGO_ERROR; } bson_init( &b ); bson_append_int( &b, "files_id", 1 ); bson_append_int( &b, "n", 1 ); bson_finish( &b ); options = MONGO_INDEX_UNIQUE; success = ( mongo_create_index( gfs->client, gfs->chunks_ns, &b, options, NULL ) == MONGO_OK ); bson_destroy( &b ); if ( !success ) { bson_free( ( char * )gfs->dbname ); bson_free( ( char * )gfs->prefix ); bson_free( ( char * )gfs->files_ns ); bson_free( ( char * )gfs->chunks_ns ); return MONGO_ERROR; } return MONGO_OK; } MONGO_EXPORT void gridfs_destroy( gridfs *gfs ) { if ( gfs == NULL ) return; if ( gfs->dbname ) bson_free( ( char * )gfs->dbname ); if ( gfs->prefix ) bson_free( ( char * )gfs->prefix ); if ( gfs->files_ns ) bson_free( ( char * )gfs->files_ns ); if ( gfs->chunks_ns ) bson_free( ( char * )gfs->chunks_ns ); } static int gridfs_insert_file( gridfs *gfs, const char *name, const bson_oid_t id, gridfs_offset length, const char *contenttype ) { bson command; bson ret; bson res; bson_iterator it; int result; int64_t d; /* Check run md5 */ bson_init( &command ); bson_append_oid( &command, "filemd5", &id ); bson_append_string( &command, "root", gfs->prefix ); bson_finish( &command ); result = mongo_run_command( gfs->client, gfs->dbname, &command, &res ); bson_destroy( &command ); if (result != MONGO_OK) return result; /* Create and insert BSON for file metadata */ bson_init( &ret ); bson_append_oid( &ret, "_id", &id ); if ( name != NULL && *name != '\0' ) { bson_append_string( &ret, "filename", name ); } bson_append_long( &ret, "length", length ); bson_append_int( &ret, "chunkSize", DEFAULT_CHUNK_SIZE ); d = ( bson_date_t )1000*time( NULL ); bson_append_date( &ret, "uploadDate", d); bson_find( &it, &res, "md5" ); bson_append_string( &ret, "md5", bson_iterator_string( &it ) ); bson_destroy( &res ); if ( contenttype != NULL && *contenttype != '\0' ) { bson_append_string( &ret, "contentType", contenttype ); } bson_finish( &ret ); result = mongo_insert( gfs->client, gfs->files_ns, &ret, NULL ); bson_destroy( &ret ); return result; } MONGO_EXPORT int gridfs_store_buffer( gridfs *gfs, const char *data, gridfs_offset length, const char *remotename, const char *contenttype ) { char const *end = data + length; const char *data_ptr = data; bson_oid_t id; int chunkNumber = 0; int chunkLen; bson *oChunk; /* Large files Assertion */ /* assert( length <= 0xffffffff ); */ /* Generate and append an oid*/ bson_oid_gen( &id ); /* Insert the file's data chunk by chunk */ while ( data_ptr < end ) { chunkLen = DEFAULT_CHUNK_SIZE < ( unsigned int )( end - data_ptr ) ? DEFAULT_CHUNK_SIZE : ( unsigned int )( end - data_ptr ); oChunk = chunk_new( id, chunkNumber, data_ptr, chunkLen ); mongo_insert( gfs->client, gfs->chunks_ns, oChunk, NULL ); chunk_free( oChunk ); chunkNumber++; data_ptr += chunkLen; } /* Inserts file's metadata */ return gridfs_insert_file( gfs, remotename, id, length, contenttype ); } MONGO_EXPORT void gridfile_writer_init( gridfile *gfile, gridfs *gfs, const char *remote_name, const char *content_type ) { gfile->gfs = gfs; bson_oid_gen( &( gfile->id ) ); gfile->chunk_num = 0; gfile->length = 0; gfile->pending_len = 0; gfile->pending_data = NULL; gfile->remote_name = ( char * )bson_malloc( strlen( remote_name ) + 1 ); strcpy( ( char * )gfile->remote_name, remote_name ); gfile->content_type = ( char * )bson_malloc( strlen( content_type ) + 1 ); strcpy( ( char * )gfile->content_type, content_type ); } MONGO_EXPORT void gridfile_write_buffer( gridfile *gfile, const char *data, gridfs_offset length ) { size_t bytes_left = 0; size_t data_partial_len = 0; size_t chunks_to_write = 0; char *buffer; bson *oChunk; gridfs_offset to_write = length + gfile->pending_len; if ( to_write < DEFAULT_CHUNK_SIZE ) { /* Less than one chunk to write */ if( gfile->pending_data ) { gfile->pending_data = ( char * )bson_realloc( ( void * )gfile->pending_data, gfile->pending_len + to_write ); memcpy( gfile->pending_data + gfile->pending_len, data, length ); } else if ( to_write > 0 ) { gfile->pending_data = ( char * )bson_malloc( to_write ); memcpy( gfile->pending_data, data, length ); } gfile->pending_len += length; } else { /* At least one chunk of data to write */ chunks_to_write = to_write / DEFAULT_CHUNK_SIZE; bytes_left = to_write % DEFAULT_CHUNK_SIZE; /* If there's a pending chunk to be written, we need to combine * the buffer provided up to DEFAULT_CHUNK_SIZE. */ if ( gfile->pending_len > 0 ) { data_partial_len = DEFAULT_CHUNK_SIZE - gfile->pending_len; buffer = ( char * )bson_malloc( DEFAULT_CHUNK_SIZE ); memcpy( buffer, gfile->pending_data, gfile->pending_len ); memcpy( buffer + gfile->pending_len, data, data_partial_len ); oChunk = chunk_new( gfile->id, gfile->chunk_num, buffer, DEFAULT_CHUNK_SIZE ); mongo_insert( gfile->gfs->client, gfile->gfs->chunks_ns, oChunk, NULL ); chunk_free( oChunk ); gfile->chunk_num++; gfile->length += DEFAULT_CHUNK_SIZE; data += data_partial_len; chunks_to_write--; bson_free( buffer ); } while( chunks_to_write > 0 ) { oChunk = chunk_new( gfile->id, gfile->chunk_num, data, DEFAULT_CHUNK_SIZE ); mongo_insert( gfile->gfs->client, gfile->gfs->chunks_ns, oChunk, NULL ); chunk_free( oChunk ); gfile->chunk_num++; chunks_to_write--; gfile->length += DEFAULT_CHUNK_SIZE; data += DEFAULT_CHUNK_SIZE; } bson_free( gfile->pending_data ); /* If there are any leftover bytes, store them as pending data. */ if( bytes_left == 0 ) gfile->pending_data = NULL; else { gfile->pending_data = ( char * )bson_malloc( bytes_left ); memcpy( gfile->pending_data, data, bytes_left ); } gfile->pending_len = bytes_left; } } MONGO_EXPORT int gridfile_writer_done( gridfile *gfile ) { /* write any remaining pending chunk data. * pending data will always take up less than one chunk */ bson *oChunk; int response; if( gfile->pending_data ) { oChunk = chunk_new( gfile->id, gfile->chunk_num, gfile->pending_data, gfile->pending_len ); mongo_insert( gfile->gfs->client, gfile->gfs->chunks_ns, oChunk, NULL ); chunk_free( oChunk ); bson_free( gfile->pending_data ); gfile->length += gfile->pending_len; } /* insert into files collection */ response = gridfs_insert_file( gfile->gfs, gfile->remote_name, gfile->id, gfile->length, gfile->content_type ); bson_free( gfile->remote_name ); bson_free( gfile->content_type ); return response; } int gridfs_store_file( gridfs *gfs, const char *filename, const char *remotename, const char *contenttype ) { char buffer[DEFAULT_CHUNK_SIZE]; FILE *fd; bson_oid_t id; int chunkNumber = 0; gridfs_offset length = 0; gridfs_offset chunkLen = 0; bson *oChunk; /* Open the file and the correct stream */ if ( strcmp( filename, "-" ) == 0 ) fd = stdin; else { fd = fopen( filename, "rb" ); if (fd == NULL) return MONGO_ERROR; } /* Generate and append an oid*/ bson_oid_gen( &id ); /* Insert the file chunk by chunk */ chunkLen = fread( buffer, 1, DEFAULT_CHUNK_SIZE, fd ); do { oChunk = chunk_new( id, chunkNumber, buffer, chunkLen ); mongo_insert( gfs->client, gfs->chunks_ns, oChunk, NULL ); chunk_free( oChunk ); length += chunkLen; chunkNumber++; chunkLen = fread( buffer, 1, DEFAULT_CHUNK_SIZE, fd ); } while ( chunkLen != 0 ); /* Close the file stream */ if ( fd != stdin ) fclose( fd ); /* Large files Assertion */ /* assert(length <= 0xffffffff); */ /* Optional Remote Name */ if ( remotename == NULL || *remotename == '\0' ) { remotename = filename; } /* Inserts file's metadata */ return gridfs_insert_file( gfs, remotename, id, length, contenttype ); } MONGO_EXPORT void gridfs_remove_filename( gridfs *gfs, const char *filename ) { bson query; mongo_cursor *files; bson file; bson_iterator it; bson_oid_t id; bson b; bson_init( &query ); bson_append_string( &query, "filename", filename ); bson_finish( &query ); files = mongo_find( gfs->client, gfs->files_ns, &query, NULL, 0, 0, 0 ); bson_destroy( &query ); /* Remove each file and it's chunks from files named filename */ while ( mongo_cursor_next( files ) == MONGO_OK ) { file = files->current; bson_find( &it, &file, "_id" ); id = *bson_iterator_oid( &it ); /* Remove the file with the specified id */ bson_init( &b ); bson_append_oid( &b, "_id", &id ); bson_finish( &b ); mongo_remove( gfs->client, gfs->files_ns, &b, NULL ); bson_destroy( &b ); /* Remove all chunks from the file with the specified id */ bson_init( &b ); bson_append_oid( &b, "files_id", &id ); bson_finish( &b ); mongo_remove( gfs->client, gfs->chunks_ns, &b, NULL ); bson_destroy( &b ); } mongo_cursor_destroy( files ); } int gridfs_find_query( gridfs *gfs, bson *query, gridfile *gfile ) { bson uploadDate; bson finalQuery; bson out; int i; bson_init( &uploadDate ); bson_append_int( &uploadDate, "uploadDate", -1 ); bson_finish( &uploadDate ); bson_init( &finalQuery ); bson_append_bson( &finalQuery, "query", query ); bson_append_bson( &finalQuery, "orderby", &uploadDate ); bson_finish( &finalQuery ); i = ( mongo_find_one( gfs->client, gfs->files_ns, &finalQuery, NULL, &out ) == MONGO_OK ); bson_destroy( &uploadDate ); bson_destroy( &finalQuery ); if ( !i ) return MONGO_ERROR; else { gridfile_init( gfs, &out, gfile ); bson_destroy( &out ); return MONGO_OK; } } int gridfs_find_filename( gridfs *gfs, const char *filename, gridfile *gfile ) { bson query; int i; bson_init( &query ); bson_append_string( &query, "filename", filename ); bson_finish( &query ); i = gridfs_find_query( gfs, &query, gfile ); bson_destroy( &query ); return i; } int gridfile_init( gridfs *gfs, bson *meta, gridfile *gfile ) { gfile->gfs = gfs; gfile->pos = 0; gfile->meta = ( bson * )bson_malloc( sizeof( bson ) ); if ( gfile->meta == NULL ) return MONGO_ERROR; bson_copy( gfile->meta, meta ); return MONGO_OK; } MONGO_EXPORT void gridfile_destroy( gridfile *gfile ) { bson_destroy( gfile->meta ); bson_free( gfile->meta ); } bson_bool_t gridfile_exists( gridfile *gfile ) { return ( bson_bool_t )( gfile != NULL && gfile->meta != NULL ); } MONGO_EXPORT const char *gridfile_get_filename( gridfile *gfile ) { bson_iterator it; bson_find( &it, gfile->meta, "filename" ); return bson_iterator_string( &it ); } MONGO_EXPORT int gridfile_get_chunksize( gridfile *gfile ) { bson_iterator it; bson_find( &it, gfile->meta, "chunkSize" ); return bson_iterator_int( &it ); } MONGO_EXPORT gridfs_offset gridfile_get_contentlength( gridfile *gfile ) { bson_iterator it; bson_find( &it, gfile->meta, "length" ); if( bson_iterator_type( &it ) == BSON_INT ) return ( gridfs_offset )bson_iterator_int( &it ); else return ( gridfs_offset )bson_iterator_long( &it ); } MONGO_EXPORT const char *gridfile_get_contenttype( gridfile *gfile ) { bson_iterator it; if ( bson_find( &it, gfile->meta, "contentType" ) ) return bson_iterator_string( &it ); else return NULL; } MONGO_EXPORT bson_date_t gridfile_get_uploaddate( gridfile *gfile ) { bson_iterator it; bson_find( &it, gfile->meta, "uploadDate" ); return bson_iterator_date( &it ); } MONGO_EXPORT const char *gridfile_get_md5( gridfile *gfile ) { bson_iterator it; bson_find( &it, gfile->meta, "md5" ); return bson_iterator_string( &it ); } const char *gridfile_get_field( gridfile *gfile, const char *name ) { bson_iterator it; bson_find( &it, gfile->meta, name ); return bson_iterator_value( &it ); } bson_bool_t gridfile_get_boolean( gridfile *gfile, const char *name ) { bson_iterator it; bson_find( &it, gfile->meta, name ); return bson_iterator_bool( &it ); } MONGO_EXPORT void gridfile_get_metadata( gridfile *gfile, bson* out ) { bson_iterator it; if ( bson_find( &it, gfile->meta, "metadata" ) ) bson_iterator_subobject( &it, out ); else bson_empty( out ); } MONGO_EXPORT int gridfile_get_numchunks( gridfile *gfile ) { bson_iterator it; gridfs_offset length; gridfs_offset chunkSize; double numchunks; bson_find( &it, gfile->meta, "length" ); if( bson_iterator_type( &it ) == BSON_INT ) length = ( gridfs_offset )bson_iterator_int( &it ); else length = ( gridfs_offset )bson_iterator_long( &it ); bson_find( &it, gfile->meta, "chunkSize" ); chunkSize = bson_iterator_int( &it ); numchunks = ( ( double )length/( double )chunkSize ); return ( numchunks - ( int )numchunks > 0 ) ? ( int )( numchunks+1 ) : ( int )( numchunks ); } MONGO_EXPORT void gridfile_get_chunk( gridfile *gfile, int n, bson* out ) { bson query; bson_iterator it; bson_oid_t id; int result; bson_init( &query ); bson_find( &it, gfile->meta, "_id" ); id = *bson_iterator_oid( &it ); bson_append_oid( &query, "files_id", &id ); bson_append_int( &query, "n", n ); bson_finish( &query ); result = (mongo_find_one(gfile->gfs->client, gfile->gfs->chunks_ns, &query, NULL, out ) == MONGO_OK ); bson_destroy( &query ); if (!result) { bson empty; bson_empty(&empty); bson_copy(out, &empty); } } MONGO_EXPORT mongo_cursor *gridfile_get_chunks( gridfile *gfile, int start, size_t size ) { bson_iterator it; bson_oid_t id; bson gte; bson query; bson orderby; bson command; mongo_cursor *cursor; bson_find( &it, gfile->meta, "_id" ); id = *bson_iterator_oid( &it ); bson_init( &query ); bson_append_oid( &query, "files_id", &id ); if ( size == 1 ) { bson_append_int( &query, "n", (int)start ); } else { bson_init( &gte ); bson_append_int( &gte, "$gte", (int)start ); bson_finish( &gte ); bson_append_bson( &query, "n", &gte ); bson_destroy( &gte ); } bson_finish( &query ); bson_init( &orderby ); bson_append_int( &orderby, "n", 1 ); bson_finish( &orderby ); bson_init( &command ); bson_append_bson( &command, "query", &query ); bson_append_bson( &command, "orderby", &orderby ); bson_finish( &command ); cursor = mongo_find( gfile->gfs->client, gfile->gfs->chunks_ns, &command, NULL, (int)size, 0, 0 ); bson_destroy( &command ); bson_destroy( &query ); bson_destroy( &orderby ); return cursor; } gridfs_offset gridfile_write_file( gridfile *gfile, FILE *stream ) { int i; size_t len; bson chunk; bson_iterator it; const char *data; const int num = gridfile_get_numchunks( gfile ); for ( i=0; i<num; i++ ) { gridfile_get_chunk( gfile, i, &chunk ); bson_find( &it, &chunk, "data" ); len = bson_iterator_bin_len( &it ); data = bson_iterator_bin_data( &it ); fwrite( data, sizeof( char ), len, stream ); bson_destroy( &chunk ); } return gridfile_get_contentlength( gfile ); } MONGO_EXPORT gridfs_offset gridfile_read( gridfile *gfile, gridfs_offset size, char *buf ) { mongo_cursor *chunks; bson chunk; size_t first_chunk; size_t last_chunk; size_t total_chunks; gridfs_offset chunksize; gridfs_offset contentlength; gridfs_offset bytes_left; int i; bson_iterator it; gridfs_offset chunk_len; const char *chunk_data; contentlength = gridfile_get_contentlength( gfile ); chunksize = gridfile_get_chunksize( gfile ); size = ( contentlength - gfile->pos < size ) ? contentlength - gfile->pos : size; bytes_left = size; first_chunk = ( gfile->pos )/chunksize; last_chunk = ( gfile->pos+size-1 )/chunksize; total_chunks = last_chunk - first_chunk + 1; chunks = gridfile_get_chunks( gfile, first_chunk, total_chunks ); for ( i = 0; i < total_chunks; i++ ) { mongo_cursor_next( chunks ); chunk = chunks->current; bson_find( &it, &chunk, "data" ); chunk_len = bson_iterator_bin_len( &it ); chunk_data = bson_iterator_bin_data( &it ); if ( i == 0 ) { chunk_data += ( gfile->pos )%chunksize; chunk_len -= ( gfile->pos )%chunksize; } if ( bytes_left > chunk_len ) { memcpy( buf, chunk_data, chunk_len ); bytes_left -= chunk_len; buf += chunk_len; } else { memcpy( buf, chunk_data, bytes_left ); } } mongo_cursor_destroy( chunks ); gfile->pos = gfile->pos + size; return size; } MONGO_EXPORT gridfs_offset gridfile_seek( gridfile *gfile, gridfs_offset offset ) { gridfs_offset length; length = gridfile_get_contentlength( gfile ); gfile->pos = length < offset ? length : offset; return gfile->pos; }
MONGO_EXPORT gridfs_offset gridfile_read( gridfile *gfile, gridfs_offset size, char *buf ) { mongo_cursor *chunks; bson chunk; int first_chunk; int last_chunk; int total_chunks; gridfs_offset chunksize; gridfs_offset contentlength; gridfs_offset bytes_left; int i; bson_iterator it; gridfs_offset chunk_len; const char *chunk_data; contentlength = gridfile_get_contentlength( gfile ); chunksize = gridfile_get_chunksize( gfile ); size = ( contentlength - gfile->pos < size ) ? contentlength - gfile->pos : size; bytes_left = size; first_chunk = ( gfile->pos )/chunksize; last_chunk = ( gfile->pos+size-1 )/chunksize; total_chunks = last_chunk - first_chunk + 1; chunks = gridfile_get_chunks( gfile, first_chunk, total_chunks ); for ( i = 0; i < total_chunks; i++ ) { mongo_cursor_next( chunks ); chunk = chunks->current; bson_find( &it, &chunk, "data" ); chunk_len = bson_iterator_bin_len( &it ); chunk_data = bson_iterator_bin_data( &it ); if ( i == 0 ) { chunk_data += ( gfile->pos )%chunksize; chunk_len -= ( gfile->pos )%chunksize; } if ( bytes_left > chunk_len ) { memcpy( buf, chunk_data, chunk_len ); bytes_left -= chunk_len; buf += chunk_len; } else { memcpy( buf, chunk_data, bytes_left ); } } mongo_cursor_destroy( chunks ); gfile->pos = gfile->pos + size; return size; }
MONGO_EXPORT gridfs_offset gridfile_read( gridfile *gfile, gridfs_offset size, char *buf ) { mongo_cursor *chunks; bson chunk; size_t first_chunk; size_t last_chunk; size_t total_chunks; gridfs_offset chunksize; gridfs_offset contentlength; gridfs_offset bytes_left; int i; bson_iterator it; gridfs_offset chunk_len; const char *chunk_data; contentlength = gridfile_get_contentlength( gfile ); chunksize = gridfile_get_chunksize( gfile ); size = ( contentlength - gfile->pos < size ) ? contentlength - gfile->pos : size; bytes_left = size; first_chunk = ( gfile->pos )/chunksize; last_chunk = ( gfile->pos+size-1 )/chunksize; total_chunks = last_chunk - first_chunk + 1; chunks = gridfile_get_chunks( gfile, first_chunk, total_chunks ); for ( i = 0; i < total_chunks; i++ ) { mongo_cursor_next( chunks ); chunk = chunks->current; bson_find( &it, &chunk, "data" ); chunk_len = bson_iterator_bin_len( &it ); chunk_data = bson_iterator_bin_data( &it ); if ( i == 0 ) { chunk_data += ( gfile->pos )%chunksize; chunk_len -= ( gfile->pos )%chunksize; } if ( bytes_left > chunk_len ) { memcpy( buf, chunk_data, chunk_len ); bytes_left -= chunk_len; buf += chunk_len; } else { memcpy( buf, chunk_data, bytes_left ); } } mongo_cursor_destroy( chunks ); gfile->pos = gfile->pos + size; return size; }
{'added': [(231, ' size_t bytes_left = 0;'), (232, ' size_t data_partial_len = 0;'), (233, ' size_t chunks_to_write = 0;'), (593, 'MONGO_EXPORT mongo_cursor *gridfile_get_chunks( gridfile *gfile, int start, size_t size ) {'), (608, ' bson_append_int( &query, "n", (int)start );'), (612, ' bson_append_int( &gte, "$gte", (int)start );'), (629, ' &command, NULL, (int)size, 0, 0 );'), (662, ' size_t first_chunk;'), (663, ' size_t last_chunk;'), (664, ' size_t total_chunks;')], 'deleted': [(231, ' int bytes_left = 0;'), (232, ' int data_partial_len = 0;'), (233, ' int chunks_to_write = 0;'), (593, 'MONGO_EXPORT mongo_cursor *gridfile_get_chunks( gridfile *gfile, int start, int size ) {'), (608, ' bson_append_int( &query, "n", start );'), (612, ' bson_append_int( &gte, "$gte", start );'), (629, ' &command, NULL, size, 0, 0 );'), (662, ' int first_chunk;'), (663, ' int last_chunk;'), (664, ' int total_chunks;')]}
10
10
545
3,958
46
271
5
https://github.com/10gen-archive/mongo-c-driver-legacy
CVE-2020-12135
CWE-190
237
hid-monterey.c
C
mr_report_fixup
/* * HID driver for some monterey "special" devices * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2008 Jiri Slaby */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" static __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 30 && rdesc[29] == 0x05 && rdesc[30] == 0x09) { hid_info(hdev, "fixing up button/consumer in HID report descriptor\n"); rdesc[30] = 0x0c; } return rdesc; } #define mr_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ EV_KEY, (c)) static int mr_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER) return 0; switch (usage->hid & HID_USAGE) { case 0x156: mr_map_key_clear(KEY_WORDPROCESSOR); break; case 0x157: mr_map_key_clear(KEY_SPREADSHEET); break; case 0x158: mr_map_key_clear(KEY_PRESENTATION); break; case 0x15c: mr_map_key_clear(KEY_STOP); break; default: return 0; } return 1; } static const struct hid_device_id mr_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, { } }; MODULE_DEVICE_TABLE(hid, mr_devices); static struct hid_driver mr_driver = { .name = "monterey", .id_table = mr_devices, .report_fixup = mr_report_fixup, .input_mapping = mr_input_mapping, }; module_hid_driver(mr_driver); MODULE_LICENSE("GPL");
/* * HID driver for some monterey "special" devices * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2008 Jiri Slaby */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" static __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 31 && rdesc[29] == 0x05 && rdesc[30] == 0x09) { hid_info(hdev, "fixing up button/consumer in HID report descriptor\n"); rdesc[30] = 0x0c; } return rdesc; } #define mr_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ EV_KEY, (c)) static int mr_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER) return 0; switch (usage->hid & HID_USAGE) { case 0x156: mr_map_key_clear(KEY_WORDPROCESSOR); break; case 0x157: mr_map_key_clear(KEY_SPREADSHEET); break; case 0x158: mr_map_key_clear(KEY_PRESENTATION); break; case 0x15c: mr_map_key_clear(KEY_STOP); break; default: return 0; } return 1; } static const struct hid_device_id mr_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, { } }; MODULE_DEVICE_TABLE(hid, mr_devices); static struct hid_driver mr_driver = { .name = "monterey", .id_table = mr_devices, .report_fixup = mr_report_fixup, .input_mapping = mr_input_mapping, }; module_hid_driver(mr_driver); MODULE_LICENSE("GPL");
static __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 30 && rdesc[29] == 0x05 && rdesc[30] == 0x09) { hid_info(hdev, "fixing up button/consumer in HID report descriptor\n"); rdesc[30] = 0x0c; } return rdesc; }
static __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 31 && rdesc[29] == 0x05 && rdesc[30] == 0x09) { hid_info(hdev, "fixing up button/consumer in HID report descriptor\n"); rdesc[30] = 0x0c; } return rdesc; }
{'added': [(27, '\tif (*rsize >= 31 && rdesc[29] == 0x05 && rdesc[30] == 0x09) {')], 'deleted': [(27, '\tif (*rsize >= 30 && rdesc[29] == 0x05 && rdesc[30] == 0x09) {')]}
1
1
42
244
9
58
4
https://github.com/torvalds/linux
CVE-2014-3184
CWE-119
1,289
out.c
C
_out_result
/* * jabberd - Jabber Open Source Server * Copyright (c) 2002 Jeremie Miller, Thomas Muldowney, * Ryan Eatmon, Robert Norris * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA02111-1307USA */ #define _GNU_SOURCE #include <string.h> #include "s2s.h" #include <idna.h> /* * we handle packets going from the router to the world, and stuff * that comes in on connections we initiated. * * action points: * * out_packet(s2s, nad) - send this packet out * - extract to domain * - get dbconn for this domain using out_route * - if dbconn not available bounce packet * - DONE * - if conn in progress (tcp) * - add packet to queue for this domain * - DONE * - if dbconn state valid for this domain, or packet is dialback * - send packet * - DONE * - if dbconn state invalid for this domain * - bounce packet (502) * - DONE * - add packet to queue for this domain * - if dbconn state inprogress for this domain * - DONE * - out_dialback(dbconn, from, to) * * out_route(s2s, route, out, allow_bad) * - if dbconn not found * - check internal resolver cache for domain * - if not found * - ask resolver for name * - DONE * - if outgoing ip/port is to be reused * - get dbconn for any valid ip/port * - if dbconn not found * - create new dbconn * - initiate connect to ip/port * - DONE * - create new dbconn * - initiate connect to ip/port * - DONE * * out_dialback(dbconn, from, to) - initiate dialback * - generate dbkey: sha1(secret+remote+stream id) * - send auth request: <result to='them' from='us'>dbkey</result> * - set dbconn state for this domain to inprogress * - DONE * * out_resolve(s2s, query) - responses from resolver * - store ip/port/ttl in resolver cache * - flush domain queue -> out_packet(s2s, domain) * - DONE * * event_STREAM - ip/port open * - get dbconn for this sx * - for each route handled by this conn, out_dialback(dbconn, from, to) * - DONE * * event_PACKET: <result from='them' to='us' type='xxx'/> - response to our auth request * - get dbconn for this sx * - if type valid * - set dbconn state for this domain to valid * - flush dbconn queue for this domain -> out_packet(s2s, pkt) * - DONE * - set dbconn state for this domain to invalid * - bounce dbconn queue for this domain (502) * - DONE * * event_PACKET: <verify from='them' to='us' id='123' type='xxx'/> - incoming stream authenticated * - get dbconn for given id * - if type is valid * - set dbconn state for this domain to valid * - send result: <result to='them' from='us' type='xxx'/> * - DONE */ /* forward decls */ static int _out_mio_callback(mio_t m, mio_action_t a, mio_fd_t fd, void *data, void *arg); static int _out_sx_callback(sx_t s, sx_event_t e, void *data, void *arg); static void _out_result(conn_t out, nad_t nad); static void _out_verify(conn_t out, nad_t nad); static void _dns_result_aaaa(struct dns_ctx *ctx, struct dns_rr_a6 *result, void *data); static void _dns_result_a(struct dns_ctx *ctx, struct dns_rr_a4 *result, void *data); /** queue the packet */ static void _out_packet_queue(s2s_t s2s, pkt_t pkt) { char *rkey = s2s_route_key(NULL, pkt->from->domain, pkt->to->domain); jqueue_t q = (jqueue_t) xhash_get(s2s->outq, rkey); if(q == NULL) { log_debug(ZONE, "creating new out packet queue for '%s'", rkey); q = jqueue_new(); q->key = rkey; xhash_put(s2s->outq, q->key, (void *) q); } else { free(rkey); } log_debug(ZONE, "queueing packet for '%s'", q->key); jqueue_push(q, (void *) pkt, 0); } static void _out_dialback(conn_t out, char *rkey, int rkeylen) { char *c, *dbkey, *tmp; nad_t nad; int elem, ns; int from_len, to_len; time_t now; now = time(NULL); c = memchr(rkey, '/', rkeylen); from_len = c - rkey; c++; to_len = rkeylen - (c - rkey); /* kick off the dialback */ tmp = strndup(c, to_len); dbkey = s2s_db_key(NULL, out->s2s->local_secret, tmp, out->s->id); free(tmp); nad = nad_new(); /* request auth */ ns = nad_add_namespace(nad, uri_DIALBACK, "db"); elem = nad_append_elem(nad, ns, "result", 0); nad_set_attr(nad, elem, -1, "from", rkey, from_len); nad_set_attr(nad, elem, -1, "to", c, to_len); nad_append_cdata(nad, dbkey, strlen(dbkey), 1); log_debug(ZONE, "sending auth request for %.*s (key %s)", rkeylen, rkey, dbkey); log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] sending dialback auth request for route '%.*s'", out->fd->fd, out->ip, out->port, rkeylen, rkey); /* off it goes */ sx_nad_write(out->s, nad); free(dbkey); /* we're in progress now */ xhash_put(out->states, pstrdupx(xhash_pool(out->states), rkey, rkeylen), (void *) conn_INPROGRESS); /* record the time that we set conn_INPROGRESS state */ xhash_put(out->states_time, pstrdupx(xhash_pool(out->states_time), rkey, rkeylen), (void *) now); } void _out_dns_mark_bad(conn_t out) { if (out->s2s->dns_bad_timeout > 0) { dnsres_t bad; char *ipport; /* mark this host as bad */ ipport = dns_make_ipport(out->ip, out->port); bad = xhash_get(out->s2s->dns_bad, ipport); if (bad == NULL) { bad = (dnsres_t) calloc(1, sizeof(struct dnsres_st)); bad->key = ipport; xhash_put(out->s2s->dns_bad, ipport, bad); } bad->expiry = time(NULL) + out->s2s->dns_bad_timeout; } } int dns_select(s2s_t s2s, char *ip, int *port, time_t now, dnscache_t dns, int allow_bad) { /* list of results */ dnsres_t l_reuse[DNS_MAX_RESULTS]; dnsres_t l_aaaa[DNS_MAX_RESULTS]; dnsres_t l_a[DNS_MAX_RESULTS]; dnsres_t l_bad[DNS_MAX_RESULTS]; /* running weight sums of results */ int rw_reuse[DNS_MAX_RESULTS]; int rw_aaaa[DNS_MAX_RESULTS]; int rw_a[DNS_MAX_RESULTS]; int s_reuse = 0, s_aaaa = 0, s_a = 0, s_bad = 0; /* count */ int p_reuse = 0, p_aaaa = 0, p_a = 0; /* list prio */ int wt_reuse = 0, wt_aaaa = 0, wt_a = 0; /* weight total */ int c_expired_good = 0; union xhashv xhv; dnsres_t res; char *ipport; int ipport_len; char *c; int c_len; char *tmp; /* for all results: * - if not expired * - put highest priority reuseable addrs into list1 * - put highest priority ipv6 addrs into list2 * - put highest priority ipv4 addrs into list3 * - put bad addrs into list4 * - pick weighted random entry from first non-empty list */ if (dns->results == NULL) { log_debug(ZONE, "negative cache entry for '%s'", dns->name); return -1; } log_debug(ZONE, "selecting DNS result for '%s'", dns->name); xhv.dnsres_val = &res; if (xhash_iter_first(dns->results)) { dnsres_t bad = NULL; do { xhash_iter_get(dns->results, (const char **) &ipport, &ipport_len, xhv.val); if (s2s->dns_bad_timeout > 0) bad = xhash_getx(s2s->dns_bad, ipport, ipport_len); if (now > res->expiry) { /* good host? */ if (bad == NULL) c_expired_good++; log_debug(ZONE, "host '%s' expired", res->key); continue; } else if (bad != NULL && !(now > bad->expiry)) { /* bad host (connection failure) */ l_bad[s_bad++] = res; log_debug(ZONE, "host '%s' bad", res->key); } else if (s2s->out_reuse && xhash_getx(s2s->out_host, ipport, ipport_len) != NULL) { /* existing connection */ log_debug(ZONE, "host '%s' exists", res->key); if (s_reuse == 0 || p_reuse > res->prio) { p_reuse = res->prio; s_reuse = 0; wt_reuse = 0; log_debug(ZONE, "reset prio list, using prio %d", res->prio); } if (res->prio <= p_reuse) { l_reuse[s_reuse] = res; wt_reuse += res->weight; rw_reuse[s_reuse] = wt_reuse; s_reuse++; log_debug(ZONE, "added host with weight %d (%d), running weight %d", (res->weight >> 8), res->weight, wt_reuse); } else { log_debug(ZONE, "ignored host with prio %d", res->prio); } } else if (memchr(ipport, ':', ipport_len) != NULL) { /* ipv6 */ log_debug(ZONE, "host '%s' IPv6", res->key); if (s_aaaa == 0 || p_aaaa > res->prio) { p_aaaa = res->prio; s_aaaa = 0; wt_aaaa = 0; log_debug(ZONE, "reset prio list, using prio %d", res->prio); } if (res->prio <= p_aaaa) { l_aaaa[s_aaaa] = res; wt_aaaa += res->weight; rw_aaaa[s_aaaa] = wt_aaaa; s_aaaa++; log_debug(ZONE, "added host with weight %d (%d), running weight %d", (res->weight >> 8), res->weight, wt_aaaa); } else { log_debug(ZONE, "ignored host with prio %d", res->prio); } } else { /* ipv4 */ log_debug(ZONE, "host '%s' IPv4", res->key); if (s_a == 0 || p_a > res->prio) { p_a = res->prio; s_a = 0; wt_a = 0; log_debug(ZONE, "reset prio list, using prio %d", res->prio); } if (res->prio <= p_a) { l_a[s_a] = res; wt_a += res->weight; rw_a[s_a] = wt_a; s_a++; log_debug(ZONE, "added host with weight %d (%d), running weight %d", (res->weight >> 8), res->weight, wt_a); } else { log_debug(ZONE, "ignored host with prio %d", res->prio); } } } while(xhash_iter_next(dns->results)); } /* pick a result at weighted random (RFC 2782) * all weights are guaranteed to be >= 16 && <= 16776960 * (assuming max 50 hosts, the total/running sums won't exceed 2^31) */ ipport = NULL; if (s_reuse > 0) { int i, r; log_debug(ZONE, "using existing hosts, total weight %d", wt_reuse); assert((wt_reuse + 1) > 0); r = rand() % (wt_reuse + 1); log_debug(ZONE, "random number %d", r); for (i = 0; i < s_reuse; i++) if (rw_reuse[i] >= r) { log_debug(ZONE, "selected host '%s', running weight %d", l_reuse[i]->key, rw_reuse[i]); ipport = l_reuse[i]->key; break; } } else if (s_aaaa > 0 && (s_a == 0 || p_aaaa <= p_a)) { int i, r; log_debug(ZONE, "using IPv6 hosts, total weight %d", wt_aaaa); assert((wt_aaaa + 1) > 0); r = rand() % (wt_aaaa + 1); log_debug(ZONE, "random number %d", r); for (i = 0; i < s_aaaa; i++) if (rw_aaaa[i] >= r) { log_debug(ZONE, "selected host '%s', running weight %d", l_aaaa[i]->key, rw_aaaa[i]); ipport = l_aaaa[i]->key; break; } } else if (s_a > 0) { int i, r; log_debug(ZONE, "using IPv4 hosts, total weight %d", wt_a); assert((wt_a + 1) > 0); r = rand() % (wt_a + 1); log_debug(ZONE, "random number %d", r); for (i = 0; i < s_a; i++) if (rw_a[i] >= r) { log_debug(ZONE, "selected host '%s', running weight %d", l_a[i]->key, rw_a[i]); ipport = l_a[i]->key; break; } } else if (s_bad > 0) { ipport = l_bad[rand() % s_bad]->key; log_debug(ZONE, "using bad hosts, allow_bad=%d", allow_bad); /* there are expired good hosts, expire cache immediately */ if (c_expired_good > 0) { log_debug(ZONE, "expiring this DNS cache entry, %d expired hosts", c_expired_good); dns->expiry = 0; } if (!allow_bad) return -1; } /* results cannot all expire before the collection does */ assert(ipport != NULL); /* copy the ip and port to the packet */ ipport_len = strlen(ipport); c = strchr(ipport, '/'); strncpy(ip, ipport, c-ipport); ip[c-ipport] = '\0'; c++; c_len = ipport_len - (c - ipport); tmp = strndup(c, c_len); *port = atoi(tmp); free(tmp); return 0; } /** find/make a connection for a route */ int out_route(s2s_t s2s, char *route, int routelen, conn_t *out, int allow_bad) { dnscache_t dns; char ipport[INET6_ADDRSTRLEN + 16], *dkey, *c; time_t now; int reuse = 0; char ip[INET6_ADDRSTRLEN] = {0}; int port, c_len, from_len; c = memchr(route, '/', routelen); from_len = c - route; c++; c_len = routelen - (c - route); dkey = strndup(c, c_len); log_debug(ZONE, "trying to find connection for '%s'", dkey); *out = (conn_t) xhash_get(s2s->out_dest, dkey); if(*out == NULL) { log_debug(ZONE, "connection for '%s' not found", dkey); /* check resolver cache for ip/port */ dns = xhash_get(s2s->dnscache, dkey); if(dns == NULL) { /* new resolution */ log_debug(ZONE, "no dns for %s, preparing for resolution", dkey); dns = (dnscache_t) calloc(1, sizeof(struct dnscache_st)); strcpy(dns->name, dkey); xhash_put(s2s->dnscache, dns->name, (void *) dns); #if 0 /* this is good for testing */ dns->pending = 0; strcpy(dns->ip, "127.0.0.1"); dns->port = 3000; dns->expiry = time(NULL) + 99999999; #endif } /* resolution in progress */ if(dns->pending) { log_debug(ZONE, "pending resolution"); free(dkey); return 0; } /* has it expired (this is 0 for new cache objects, so they're always expired */ now = time(NULL); /* each entry must be expired no earlier than the collection */ if(now > dns->expiry) { /* resolution required */ log_debug(ZONE, "requesting resolution for %s", dkey); dns->init_time = time(NULL); dns->pending = 1; dns_resolve_domain(s2s, dns); free(dkey); return 0; } /* dns is valid */ if (dns_select(s2s, ip, &port, now, dns, allow_bad)) { /* failed to find anything acceptable */ free(dkey); return -1; } /* re-request resolution if dns_select expired the data */ if (now > dns->expiry) { /* resolution required */ log_debug(ZONE, "requesting resolution for %s", dkey); dns->init_time = time(NULL); dns->pending = 1; dns_resolve_domain(s2s, dns); free(dkey); return 0; } /* generate the ip/port pair, this is the hash key for the conn */ snprintf(ipport, INET6_ADDRSTRLEN + 16, "%s/%d", ip, port); /* try to re-use an existing connection */ if (s2s->out_reuse) *out = (conn_t) xhash_get(s2s->out_host, ipport); if (*out != NULL) { log_write(s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] using connection for '%s'", (*out)->fd->fd, (*out)->ip, (*out)->port, dkey); /* associate existing connection with domain */ xhash_put(s2s->out_dest, s2s->out_reuse ? pstrdup(xhash_pool((*out)->routes), dkey) : dkey, (void *) *out); reuse = 1; } else{ /* no conn, create one */ *out = (conn_t) calloc(1, sizeof(struct conn_st)); (*out)->s2s = s2s; (*out)->key = strdup(ipport); if (s2s->out_reuse) (*out)->dkey = NULL; else (*out)->dkey = dkey; strcpy((*out)->ip, ip); (*out)->port = port; (*out)->states = xhash_new(101); (*out)->states_time = xhash_new(101); (*out)->routes = xhash_new(101); (*out)->init_time = time(NULL); if (s2s->out_reuse) xhash_put(s2s->out_host, (*out)->key, (void *) *out); xhash_put(s2s->out_dest, s2s->out_reuse ? pstrdup(xhash_pool((*out)->routes), dkey) : dkey, (void *) *out); xhash_put((*out)->routes, pstrdupx(xhash_pool((*out)->routes), route, routelen), (void *) 1); /* connect */ log_debug(ZONE, "initiating connection to %s", ipport); /* APPLE: multiple origin_ips may be specified; use IPv6 if possible or otherwise IPv4 */ int ip_is_v6 = 0; if (strchr(ip, ':') != NULL) ip_is_v6 = 1; int i; for (i = 0; i < s2s->origin_nips; i++) { // only bother with mio_connect if the src and dst IPs are of the same type if ((ip_is_v6 && (strchr(s2s->origin_ips[i], ':') != NULL)) || // both are IPv6 (! ip_is_v6 && (strchr(s2s->origin_ips[i], ':') == NULL))) // both are IPv4 (*out)->fd = mio_connect(s2s->mio, port, ip, s2s->origin_ips[i], _out_mio_callback, (void *) *out); if ((*out)->fd != NULL) break; } if ((*out)->fd == NULL) { log_write(s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] mio_connect error: %s (%d)", -1, (*out)->ip, (*out)->port, MIO_STRERROR(MIO_ERROR), MIO_ERROR); _out_dns_mark_bad(*out); if (s2s->out_reuse) xhash_zap(s2s->out_host, (*out)->key); xhash_zap(s2s->out_dest, dkey); xhash_free((*out)->states); xhash_free((*out)->states_time); xhash_free((*out)->routes); free((*out)->key); free((*out)->dkey); free(*out); *out = NULL; /* try again without allowing bad hosts */ return out_route(s2s, route, routelen, out, 0); } else { log_write(s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] outgoing connection for '%s'", (*out)->fd->fd, (*out)->ip, (*out)->port, dkey); (*out)->s = sx_new(s2s->sx_env, (*out)->fd->fd, _out_sx_callback, (void *) *out); #ifdef HAVE_SSL /* Send a stream version of 1.0 if we can do STARTTLS */ if(s2s->sx_ssl != NULL) { sx_client_init((*out)->s, S2S_DB_HEADER, uri_SERVER, dkey, pstrdupx(xhash_pool((*out)->routes), route, from_len), "1.0"); } else { sx_client_init((*out)->s, S2S_DB_HEADER, uri_SERVER, NULL, NULL, NULL); } #else sx_client_init((*out)->s, S2S_DB_HEADER, uri_SERVER, NULL, NULL, NULL); #endif /* dkey is now used by the hash table */ return 0; } } } else { log_debug(ZONE, "connection for '%s' found (%d %s/%d)", dkey, (*out)->fd->fd, (*out)->ip, (*out)->port); } /* connection in progress, or re-using connection: add to routes list */ if (!(*out)->online || reuse) { if (xhash_getx((*out)->routes, route, routelen) == NULL) xhash_put((*out)->routes, pstrdupx(xhash_pool((*out)->routes), route, routelen), (void *) 1); } free(dkey); return 0; } void out_pkt_free(pkt_t pkt) { nad_free(pkt->nad); jid_free(pkt->from); jid_free(pkt->to); free(pkt); } /** send a packet out */ int out_packet(s2s_t s2s, pkt_t pkt) { char *rkey; int rkeylen; conn_t out; conn_state_t state; int ret; /* perform check against whitelist */ if (s2s->enable_whitelist > 0 && (pkt->to->domain != NULL) && (s2s_domain_in_whitelist(s2s, pkt->to->domain) == 0)) { log_write(s2s->log, LOG_NOTICE, "sending a packet to domain not in the whitelist, dropping it"); if (pkt->to != NULL) jid_free(pkt->to); if (pkt->from != NULL) jid_free(pkt->from); if (pkt->nad != NULL) nad_free(pkt->nad); free(pkt); return; } /* new route key */ rkey = s2s_route_key(NULL, pkt->from->domain, pkt->to->domain); rkeylen = strlen(rkey); /* get a connection */ ret = out_route(s2s, rkey, rkeylen, &out, 1); if (out == NULL) { /* connection not available, queue packet */ _out_packet_queue(s2s, pkt); /* check if out_route was successful in attempting a connection */ if (ret) { /* bounce queue */ out_bounce_route_queue(s2s, rkey, rkeylen, stanza_err_SERVICE_UNAVAILABLE); free(rkey); return -1; } free(rkey); return 0; } /* connection in progress */ if(!out->online) { log_debug(ZONE, "connection in progress, queueing packet"); _out_packet_queue(s2s, pkt); free(rkey); return 0; } /* connection state */ state = (conn_state_t) xhash_get(out->states, rkey); /* valid conns or dialback packets */ if(state == conn_VALID || pkt->db) { log_debug(ZONE, "writing packet for %s to outgoing conn %d", rkey, out->fd->fd); /* send it straight out */ if(pkt->db) { /* if this is a db:verify packet, increment counter and set timestamp */ if(NAD_ENAME_L(pkt->nad, 0) == 6 && strncmp("verify", NAD_ENAME(pkt->nad, 0), 6) == 0) { out->verify++; out->last_verify = time(NULL); } /* dialback packet */ sx_nad_write(out->s, pkt->nad); } else { /* if the outgoing stanza has a jabber:client namespace, remove it so that the stream jabber:server namespaces will apply (XMPP 11.2.2) */ int ns = nad_find_namespace(pkt->nad, 1, uri_CLIENT, NULL); if(ns >= 0) { /* clear the namespaces of elem 0 (internal route element) and elem 1 (message|iq|presence) */ pkt->nad->elems[0].ns = -1; pkt->nad->elems[0].my_ns = -1; pkt->nad->elems[1].ns = -1; pkt->nad->elems[1].my_ns = -1; } /* send it out */ sx_nad_write_elem(out->s, pkt->nad, 1); } /* update timestamp */ out->last_packet = time(NULL); jid_free(pkt->from); jid_free(pkt->to); free(pkt); free(rkey); return 0; } /* can't be handled yet, queue */ _out_packet_queue(s2s, pkt); /* if dialback is in progress, then we're done for now */ if(state == conn_INPROGRESS) { free(rkey); return 0; } /* this is a new route - send dialback auth request to piggyback on the existing connection */ if (out->s2s->require_tls == 0 || out->s->ssf > 0) { _out_dialback(out, rkey, rkeylen); } free(rkey); return 0; } char *dns_make_ipport(char *host, int port) { char *c; assert(port > 0 && port < 65536); c = (char *) malloc(strlen(host) + 7); sprintf(c, "%s/%d", host, port); return c; } static void _dns_add_result(dnsquery_t query, char *ip, int port, int prio, int weight, unsigned int ttl) { char *ipport = dns_make_ipport(ip, port); dnsres_t res = xhash_get(query->results, ipport); if (res != NULL) { if (prio < res->prio) res->prio = prio; if (prio < res->prio) { /* duplicate host at lower prio - reset weight */ res->weight = weight; } else if (prio == res->prio) { /* duplicate host at same prio - add to weight */ res->weight += weight; if (res->weight > (65535 << 8)) res->weight = (65535 << 8); } if (ttl > res->expiry) res->expiry = ttl; if (ttl > query->expiry) query->expiry = ttl; log_debug(ZONE, "dns result updated for %s@%p: %s (%d/%d/%d)", query->name, query, ipport, res->prio, (res->weight >> 8), res->expiry); } else if (xhash_count(query->results) < DNS_MAX_RESULTS) { res = pmalloc(xhash_pool(query->results), sizeof(struct dnsres_st)); res->key = pstrdup(xhash_pool(query->results), ipport); res->prio = prio; res->weight = weight; res->expiry = ttl; if (ttl > query->expiry) query->expiry = ttl; xhash_put(query->results, res->key, res); log_debug(ZONE, "dns result added for %s@%p: %s (%d/%d/%d)", query->name, query, ipport, res->prio, (res->weight >> 8), res->expiry); } else { log_debug(ZONE, "dns result ignored for %s@%p: %s (%d/%d/%d)", query->name, query, ipport, prio, (weight >> 8), ttl); } free(ipport); } static void _dns_add_host(dnsquery_t query, char *ip, int port, int prio, int weight, unsigned int ttl) { char *ipport = dns_make_ipport(ip, port); dnsres_t res = xhash_get(query->hosts, ipport); /* update host weights: * RFC 2482 "In the presence of records containing weights greater * than 0, records with weight 0 should have a very small chance of * being selected." * 0 -> 16 * 1-65535 -> 256-16776960 */ if (weight == 0) weight = 1 << 4; else weight <<= 8; if (res != NULL) { if (prio < res->prio) res->prio = prio; if (prio < res->prio) { /* duplicate host at lower prio - reset weight */ res->weight = weight; } else if (prio == res->prio) { /* duplicate host at same prio - add to weight */ res->weight += weight; if (res->weight > (65535 << 8)) res->weight = (65535 << 8); } if (ttl > res->expiry) res->expiry = ttl; log_debug(ZONE, "dns host updated for %s@%p: %s (%d/%d/%d)", query->name, query, ipport, res->prio, (res->weight >> 8), res->expiry); } else if (xhash_count(query->hosts) < DNS_MAX_RESULTS) { res = pmalloc(xhash_pool(query->hosts), sizeof(struct dnsres_st)); res->key = pstrdup(xhash_pool(query->hosts), ipport); res->prio = prio; res->weight = weight; res->expiry = ttl; xhash_put(query->hosts, res->key, res); log_debug(ZONE, "dns host added for %s@%p: %s (%d/%d/%d)", query->name, query, ipport, res->prio, (res->weight >> 8), res->expiry); } else { log_debug(ZONE, "dns host ignored for %s@%p: %s (%d/%d/%d)", query->name, query, ipport, prio, (weight >> 8), ttl); } free(ipport); } /* this function is called with a NULL ctx to start the SRV process */ static void _dns_result_srv(struct dns_ctx *ctx, struct dns_rr_srv *result, void *data) { dnsquery_t query = data; assert(query != NULL); query->query = NULL; if (ctx != NULL && result == NULL) { log_debug(ZONE, "dns failure for %s@%p: SRV %s (%d)", query->name, query, query->s2s->lookup_srv[query->srv_i], dns_status(ctx)); } else if (result != NULL) { int i; log_debug(ZONE, "dns response for %s@%p: SRV %s %d (%d)", query->name, query, result->dnssrv_qname, result->dnssrv_nrr, result->dnssrv_ttl); for (i = 0; i < result->dnssrv_nrr; i++) { if (strlen(result->dnssrv_srv[i].name) > 0 && result->dnssrv_srv[i].port > 0 && result->dnssrv_srv[i].port < 65536) { log_debug(ZONE, "dns response for %s@%p: SRV %s[%d] %s/%d (%d/%d)", query->name, query, result->dnssrv_qname, i, result->dnssrv_srv[i].name, result->dnssrv_srv[i].port, result->dnssrv_srv[i].priority, result->dnssrv_srv[i].weight); _dns_add_host(query, result->dnssrv_srv[i].name, result->dnssrv_srv[i].port, result->dnssrv_srv[i].priority, result->dnssrv_srv[i].weight, result->dnssrv_ttl); } } free(result); } /* check next SRV service name */ query->srv_i++; if (query->srv_i < query->s2s->lookup_nsrv) { log_debug(ZONE, "dns request for %s@%p: SRV %s", query->name, query, query->s2s->lookup_srv[query->srv_i]); query->query = dns_submit_srv(NULL, query->name, query->s2s->lookup_srv[query->srv_i], "tcp", DNS_NOSRCH, _dns_result_srv, query); /* if submit failed, call ourselves with a NULL result */ if (query->query == NULL) _dns_result_srv(ctx, NULL, query); } else { /* no more SRV records to check, resolve hosts */ if (xhash_count(query->hosts) > 0) { _dns_result_a(NULL, NULL, query); /* no SRV records returned, resolve hostname */ } else { query->cur_host = strdup(query->name); query->cur_port = 5269; query->cur_prio = 0; query->cur_weight = 0; query->cur_expiry = 0; if (query->s2s->resolve_aaaa) { log_debug(ZONE, "dns request for %s@%p: AAAA %s", query->name, query, query->name); query->query = dns_submit_a6(NULL, query->name, DNS_NOSRCH, _dns_result_aaaa, query); /* if submit failed, call ourselves with a NULL result */ if (query->query == NULL) _dns_result_aaaa(ctx, NULL, query); } else { log_debug(ZONE, "dns request for %s@%p: A %s", query->name, query, query->name); query->query = dns_submit_a4(NULL, query->name, DNS_NOSRCH, _dns_result_a, query); /* if submit failed, call ourselves with a NULL result */ if (query->query == NULL) _dns_result_a(ctx, NULL, query); } } } } static void _dns_result_aaaa(struct dns_ctx *ctx, struct dns_rr_a6 *result, void *data) { dnsquery_t query = data; char ip[INET6_ADDRSTRLEN]; int i; assert(query != NULL); query->query = NULL; if (ctx != NULL && result == NULL) { log_debug(ZONE, "dns failure for %s@%p: AAAA %s (%d)", query->name, query, query->cur_host, dns_status(ctx)); } else if (result != NULL) { log_debug(ZONE, "dns response for %s@%p: AAAA %s %d (%d)", query->name, query, result->dnsa6_qname, result->dnsa6_nrr, result->dnsa6_ttl); if (query->cur_expiry > 0 && result->dnsa6_ttl > query->cur_expiry) result->dnsa6_ttl = query->cur_expiry; for (i = 0; i < result->dnsa6_nrr; i++) { if (inet_ntop(AF_INET6, &result->dnsa6_addr[i], ip, INET6_ADDRSTRLEN) != NULL) { log_debug(ZONE, "dns response for %s@%p: AAAA %s[%d] %s/%d", query->name, query, result->dnsa6_qname, i, ip, query->cur_port); _dns_add_result(query, ip, query->cur_port, query->cur_prio, query->cur_weight, result->dnsa6_ttl); } } } if (query->cur_host != NULL) { /* do ipv4 resolution too */ log_debug(ZONE, "dns request for %s@%p: A %s", query->name, query, query->cur_host); query->query = dns_submit_a4(NULL, query->cur_host, DNS_NOSRCH, _dns_result_a, query); /* if submit failed, call ourselves with a NULL result */ if (query->query == NULL) _dns_result_a(ctx, NULL, query); } else { /* uh-oh */ log_debug(ZONE, "dns result for %s@%p: AAAA host vanished...", query->name, query); _dns_result_a(NULL, NULL, query); } free(result); } /* try /etc/hosts if the A process did not return any results */ static int _etc_hosts_lookup(const char *cszName, char *szIP, const int ciMaxIPLen) { #define EHL_LINE_LEN 260 int iSuccess = 0; size_t iLen; char szLine[EHL_LINE_LEN + 1]; /* one extra for the space character (*) */ char *pcStart, *pcEnd; FILE *fHosts; do { /* initialization */ fHosts = NULL; /* sanity checks */ if ((cszName == NULL) || (szIP == NULL) || (ciMaxIPLen <= 0)) break; szIP[0] = 0; /* open the hosts file */ #ifdef _WIN32 pcStart = getenv("WINDIR"); if (pcStart != NULL) { sprintf(szLine, "%s\\system32\\drivers\\etc\\hosts", pcStart); } else { strcpy(szLine, "C:\\WINDOWS\\system32\\drivers\\etc\\hosts"); } #else strcpy(szLine, "/etc/hosts"); #endif fHosts = fopen(szLine, "r"); if (fHosts == NULL) break; /* read line by line ... */ while (fgets(szLine, EHL_LINE_LEN, fHosts) != NULL) { /* remove comments */ pcStart = strchr (szLine, '#'); if (pcStart != NULL) *pcStart = 0; strcat(szLine, " "); /* append a space character for easier parsing (*) */ /* first to appear: IP address */ iLen = strspn(szLine, "1234567890."); if ((iLen < 7) || (iLen > 15)) /* superficial test for anything between x.x.x.x and xxx.xxx.xxx.xxx */ continue; pcEnd = szLine + iLen; *pcEnd = 0; pcEnd++; /* not beyond the end of the line yet (*) */ /* check strings separated by blanks, tabs or newlines */ pcStart = pcEnd + strspn(pcEnd, " \t\n"); while (*pcStart != 0) { pcEnd = pcStart + strcspn(pcStart, " \t\n"); *pcEnd = 0; pcEnd++; /* not beyond the end of the line yet (*) */ if (strcasecmp(pcStart, cszName) == 0) { strncpy(szIP, szLine, ciMaxIPLen - 1); szIP[ciMaxIPLen - 1] = '\0'; iSuccess = 1; break; } pcStart = pcEnd + strspn(pcEnd, " \t\n"); } if (iSuccess) break; } } while (0); if (fHosts != NULL) fclose(fHosts); return (iSuccess); } /* this function is called with a NULL ctx to start the A/AAAA process */ static void _dns_result_a(struct dns_ctx *ctx, struct dns_rr_a4 *result, void *data) { dnsquery_t query = data; assert(query != NULL); query->query = NULL; if (ctx != NULL && result == NULL) { #define DRA_IP_LEN 16 char szIP[DRA_IP_LEN]; if (_etc_hosts_lookup (query->name, szIP, DRA_IP_LEN)) { log_debug(ZONE, "/etc/lookup for %s@%p: %s (%d)", query->name, query, szIP, query->s2s->etc_hosts_ttl); _dns_add_result (query, szIP, query->cur_port, query->cur_prio, query->cur_weight, query->s2s->etc_hosts_ttl); } else { log_debug(ZONE, "dns failure for %s@%p: A %s (%d)", query->name, query, query->cur_host, dns_status(ctx)); } } else if (result != NULL) { char ip[INET_ADDRSTRLEN]; int i; log_debug(ZONE, "dns response for %s@%p: A %s %d (%d)", query->name, query, result->dnsa4_qname, result->dnsa4_nrr, result->dnsa4_ttl); if (query->cur_expiry > 0 && result->dnsa4_ttl > query->cur_expiry) result->dnsa4_ttl = query->cur_expiry; for (i = 0; i < result->dnsa4_nrr; i++) { if (inet_ntop(AF_INET, &result->dnsa4_addr[i], ip, INET_ADDRSTRLEN) != NULL) { log_debug(ZONE, "dns response for %s@%p: A %s[%d] %s/%d", query->name, query, result->dnsa4_qname, i, ip, query->cur_port); _dns_add_result(query, ip, query->cur_port, query->cur_prio, query->cur_weight, result->dnsa4_ttl); } } free(result); } /* resolve the next host in the list */ if (xhash_iter_first(query->hosts)) { char *ipport, *c, *tmp; int ipport_len, ip_len, port_len; dnsres_t res; union xhashv xhv; xhv.dnsres_val = &res; /* get the first entry */ xhash_iter_get(query->hosts, (const char **) &ipport, &ipport_len, xhv.val); /* remove the host from the list */ xhash_iter_zap(query->hosts); c = memchr(ipport, '/', ipport_len); ip_len = c - ipport; c++; port_len = ipport_len - (c - ipport); /* resolve hostname */ free(query->cur_host); query->cur_host = strndup(ipport, ip_len); tmp = strndup(c, port_len); query->cur_port = atoi(tmp); free(tmp); query->cur_prio = res->prio; query->cur_weight = res->weight; query->cur_expiry = res->expiry; log_debug(ZONE, "dns ttl for %s@%p limited to %d", query->name, query, query->cur_expiry); if (query->s2s->resolve_aaaa) { log_debug(ZONE, "dns request for %s@%p: AAAA %s", query->name, query, query->cur_host); query->query = dns_submit_a6(NULL, query->cur_host, DNS_NOSRCH, _dns_result_aaaa, query); /* if submit failed, call ourselves with a NULL result */ if (query->query == NULL) _dns_result_aaaa(ctx, NULL, query); } else { log_debug(ZONE, "dns request for %s@%p: A %s", query->name, query, query->cur_host); query->query = dns_submit_a4(NULL, query->cur_host, DNS_NOSRCH, _dns_result_a, query); /* if submit failed, call ourselves with a NULL result */ if (query->query == NULL) _dns_result_a(ctx, NULL, query); } /* finished */ } else { time_t now = time(NULL); char *domain; free(query->cur_host); query->cur_host = NULL; log_debug(ZONE, "dns requests for %s@%p complete: %d (%d)", query->name, query, xhash_count(query->results), query->expiry); /* update query TTL */ if (query->expiry > query->s2s->dns_max_ttl) query->expiry = query->s2s->dns_max_ttl; if (query->expiry < query->s2s->dns_min_ttl) query->expiry = query->s2s->dns_min_ttl; query->expiry += now; /* update result TTLs - the query expiry MUST NOT be longer than all result expiries */ if (xhash_iter_first(query->results)) { union xhashv xhv; dnsres_t res; xhv.dnsres_val = &res; do { xhash_iter_get(query->results, NULL, NULL, xhv.val); if (res->expiry > query->s2s->dns_max_ttl) res->expiry = query->s2s->dns_max_ttl; if (res->expiry < query->s2s->dns_min_ttl) res->expiry = query->s2s->dns_min_ttl; res->expiry += now; } while(xhash_iter_next(query->results)); } xhash_free(query->hosts); query->hosts = NULL; if (idna_to_unicode_8z8z(query->name, &domain, 0) != IDNA_SUCCESS) { log_write(query->s2s->log, LOG_ERR, "idna dns decode for %s failed", query->name); /* fake empty results to shortcut resolution failure */ xhash_free(query->results); query->results = xhash_new(71); query->expiry = time(NULL) + 99999999; domain = strdup(query->name); } out_resolve(query->s2s, domain, query->results, query->expiry); free(domain); free(query->name); free(query); } } void dns_resolve_domain(s2s_t s2s, dnscache_t dns) { dnsquery_t query = (dnsquery_t) calloc(1, sizeof(struct dnsquery_st)); query->s2s = s2s; query->results = xhash_new(71); if (idna_to_ascii_8z(dns->name, &query->name, 0) != IDNA_SUCCESS) { log_write(s2s->log, LOG_ERR, "idna dns encode for %s failed", dns->name); /* shortcut resolution failure */ query->expiry = time(NULL) + 99999999; out_resolve(query->s2s, dns->name, query->results, query->expiry); return; } query->hosts = xhash_new(71); query->srv_i = -1; query->expiry = 0; query->cur_host = NULL; query->cur_port = 0; query->cur_expiry = 0; query->query = NULL; dns->query = query; log_debug(ZONE, "dns resolve for %s@%p started", query->name, query); /* - resolve all SRV records to host/port * - if no results, include domain/5269 * - resolve all host/port combinations * - return result */ _dns_result_srv(NULL, NULL, query); } /** responses from the resolver */ void out_resolve(s2s_t s2s, char *domain, xht results, time_t expiry) { dnscache_t dns; /* no results, resolve failed */ if(xhash_count(results) == 0) { dns = xhash_get(s2s->dnscache, domain); if (dns != NULL) { /* store negative DNS cache */ xhash_free(dns->results); dns->query = NULL; dns->results = NULL; dns->expiry = expiry; dns->pending = 0; } log_write(s2s->log, LOG_NOTICE, "dns lookup for %s failed", domain); /* bounce queue */ out_bounce_domain_queues(s2s, domain, stanza_err_REMOTE_SERVER_NOT_FOUND); xhash_free(results); return; } log_write(s2s->log, LOG_NOTICE, "dns lookup for %s returned %d result%s (ttl %d)", domain, xhash_count(results), xhash_count(results)!=1?"s":"", expiry - time(NULL)); /* get the cache entry */ dns = xhash_get(s2s->dnscache, domain); if(dns == NULL) { /* retry using punycode */ char *punydomain; if (idna_to_ascii_8z(domain, &punydomain, 0) == IDNA_SUCCESS) { dns = xhash_get(s2s->dnscache, punydomain); free(punydomain); } } if(dns == NULL) { log_write(s2s->log, LOG_ERR, "weird, never requested %s resolution", domain); return; } /* fill it out */ xhash_free(dns->results); dns->query = NULL; dns->results = results; dns->expiry = expiry; dns->pending = 0; out_flush_domain_queues(s2s, domain); /* delete the cache entry if caching is disabled */ if (!s2s->dns_cache_enabled && !dns->pending) { xhash_free(dns->results); xhash_zap(s2s->dnscache, domain); free(dns); } } /** mio callback for outgoing conns */ static int _out_mio_callback(mio_t m, mio_action_t a, mio_fd_t fd, void *data, void *arg) { conn_t out = (conn_t) arg; char ipport[INET6_ADDRSTRLEN + 17]; int nbytes; switch(a) { case action_READ: log_debug(ZONE, "read action on fd %d", fd->fd); /* they did something */ out->last_activity = time(NULL); ioctl(fd->fd, FIONREAD, &nbytes); if(nbytes == 0) { sx_kill(out->s); return 0; } return sx_can_read(out->s); case action_WRITE: log_debug(ZONE, "write action on fd %d", fd->fd); /* update activity timestamp */ out->last_activity = time(NULL); return sx_can_write(out->s); case action_CLOSE: log_debug(ZONE, "close action on fd %d", fd->fd); jqueue_push(out->s2s->dead, (void *) out->s, 0); log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] disconnect, packets: %i", fd->fd, out->ip, out->port, out->packet_count); if (out->s2s->out_reuse) { /* generate the ip/port pair */ snprintf(ipport, INET6_ADDRSTRLEN + 16, "%s/%d", out->ip, out->port); xhash_zap(out->s2s->out_host, ipport); } if (xhash_iter_first(out->routes)) { char *rkey; int rkeylen; char *c; int c_len; /* remove all the out_dest entries */ do { xhash_iter_get(out->routes, (const char **) &rkey, &rkeylen, NULL); c = memchr(rkey, '/', rkeylen); c++; c_len = rkeylen - (c - rkey); log_debug(ZONE, "route '%.*s'", rkeylen, rkey); if (xhash_getx(out->s2s->out_dest, c, c_len) != NULL) { log_debug(ZONE, "removing dest entry for '%.*s'", c_len, c); xhash_zapx(out->s2s->out_dest, c, c_len); } } while(xhash_iter_next(out->routes)); } if (xhash_iter_first(out->routes)) { char *rkey; int rkeylen; jqueue_t q; int npkt; /* retry all the routes */ do { xhash_iter_get(out->routes, (const char **) &rkey, &rkeylen, NULL); q = xhash_getx(out->s2s->outq, rkey, rkeylen); if (out->s2s->retry_limit > 0 && q != NULL && jqueue_age(q) > out->s2s->retry_limit) { log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] retry limit reached for '%.*s' queue", fd->fd, out->ip, out->port, rkeylen, rkey); q = NULL; } if (q != NULL && (npkt = jqueue_size(q)) > 0 && xhash_get(out->states, rkey) != (void*) conn_INPROGRESS) { conn_t retry; log_debug(ZONE, "retrying connection for '%.*s' queue", rkeylen, rkey); if (!out_route(out->s2s, rkey, rkeylen, &retry, 0)) { log_debug(ZONE, "retry successful"); if (retry != NULL) { /* flush queue */ out_flush_route_queue(out->s2s, rkey, rkeylen); } } else { log_debug(ZONE, "retry failed"); /* bounce queue */ out_bounce_route_queue(out->s2s, rkey, rkeylen, stanza_err_SERVICE_UNAVAILABLE); _out_dns_mark_bad(out); } } else { /* bounce queue */ out_bounce_route_queue(out->s2s, rkey, rkeylen, stanza_err_REMOTE_SERVER_TIMEOUT); _out_dns_mark_bad(out); } } while(xhash_iter_next(out->routes)); } jqueue_push(out->s2s->dead_conn, (void *) out, 0); case action_ACCEPT: break; } return 0; } void send_dialbacks(conn_t out) { char *rkey; int rkeylen; if (out->s2s->dns_bad_timeout > 0) { dnsres_t bad = xhash_get(out->s2s->dns_bad, out->key); if (bad != NULL) { log_debug(ZONE, "removing bad host entry for '%s'", out->key); xhash_zap(out->s2s->dns_bad, out->key); free(bad->key); free(bad); } } if (xhash_iter_first(out->routes)) { log_debug(ZONE, "sending dialback packets for %s", out->key); do { xhash_iter_get(out->routes, (const char **) &rkey, &rkeylen, NULL); _out_dialback(out, rkey, rkeylen); } while(xhash_iter_next(out->routes)); } return; } static int _out_sx_callback(sx_t s, sx_event_t e, void *data, void *arg) { conn_t out = (conn_t) arg; sx_buf_t buf = (sx_buf_t) data; int len, ns, elem, starttls = 0; sx_error_t *sxe; nad_t nad; switch(e) { case event_WANT_READ: log_debug(ZONE, "want read"); mio_read(out->s2s->mio, out->fd); break; case event_WANT_WRITE: log_debug(ZONE, "want write"); mio_write(out->s2s->mio, out->fd); break; case event_READ: log_debug(ZONE, "reading from %d", out->fd->fd); /* do the read */ len = recv(out->fd->fd, buf->data, buf->len, 0); if(len < 0) { if(MIO_WOULDBLOCK) { buf->len = 0; return 0; } log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] read error: %s (%d)", out->fd->fd, out->ip, out->port, MIO_STRERROR(MIO_ERROR), MIO_ERROR); if (!out->online) { _out_dns_mark_bad(out); } sx_kill(s); return -1; } else if(len == 0) { /* they went away */ sx_kill(s); return -1; } log_debug(ZONE, "read %d bytes", len); buf->len = len; return len; case event_WRITE: log_debug(ZONE, "writing to %d", out->fd->fd); len = send(out->fd->fd, buf->data, buf->len, 0); if(len >= 0) { log_debug(ZONE, "%d bytes written", len); return len; } if(MIO_WOULDBLOCK) return 0; log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] write error: %s (%d)", out->fd->fd, out->ip, out->port, MIO_STRERROR(MIO_ERROR), MIO_ERROR); if (!out->online) { _out_dns_mark_bad(out); } sx_kill(s); return -1; case event_ERROR: sxe = (sx_error_t *) data; log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] error: %s (%s)", out->fd->fd, out->ip, out->port, sxe->generic, sxe->specific); /* mark as bad if we did not manage to connect or there is unrecoverable stream error */ if (!out->online || (sxe->code == SX_ERR_STREAM && (strstr(sxe->specific, "host-gone") || /* it's not there now */ strstr(sxe->specific, "host-unknown") || /* they do not service the host */ strstr(sxe->specific, "not-authorized") || /* they do not want us there */ strstr(sxe->specific, "see-other-host") || /* we do not support redirections yet */ strstr(sxe->specific, "system-shutdown") || /* they are going down */ strstr(sxe->specific, "policy-violation") || /* they do not want us there */ strstr(sxe->specific, "remote-connection-failed") || /* the required remote entity is gone */ strstr(sxe->specific, "unsupported-encoding") || /* they do not like our encoding */ strstr(sxe->specific, "undefined-condition") || /* something bad happend */ strstr(sxe->specific, "internal-server-error") || /* that server is broken */ strstr(sxe->specific, "unsupported-version") /* they do not support our stream version */ ))) { _out_dns_mark_bad(out); } sx_kill(s); return -1; case event_OPEN: log_debug(ZONE, "OPEN event for %s", out->key); break; case event_STREAM: /* check stream version - NULl = pre-xmpp (some jabber1 servers) */ log_debug(ZONE, "STREAM event for %s stream version is %s", out->key, out->s->res_version); /* first time, bring them online */ if(!out->online) { log_debug(ZONE, "outgoing conn to %s is online", out->key); /* if no stream version from either side, kick off dialback for each route, */ /* otherwise wait for stream features */ if (((out->s->res_version==NULL) || (out->s2s->sx_ssl == NULL)) && out->s2s->require_tls == 0) { log_debug(ZONE, "no stream version, sending dialbacks for %s immediately", out->key); out->online = 1; send_dialbacks(out); } else log_debug(ZONE, "outgoing conn to %s - waiting for STREAM features", out->key); } break; case event_PACKET: /* we're counting packets */ out->packet_count++; out->s2s->packet_count++; nad = (nad_t) data; /* watch for the features packet - STARTTLS and/or SASL*/ if ((out->s->res_version!=NULL) && NAD_NURI_L(nad, NAD_ENS(nad, 0)) == strlen(uri_STREAMS) && strncmp(uri_STREAMS, NAD_NURI(nad, NAD_ENS(nad, 0)), strlen(uri_STREAMS)) == 0 && NAD_ENAME_L(nad, 0) == 8 && strncmp("features", NAD_ENAME(nad, 0), 8) == 0) { log_debug(ZONE, "got the stream features packet"); #ifdef HAVE_SSL /* starttls if we can */ if(out->s2s->sx_ssl != NULL && s->ssf == 0) { ns = nad_find_scoped_namespace(nad, uri_TLS, NULL); if(ns >= 0) { elem = nad_find_elem(nad, 0, ns, "starttls", 1); if(elem >= 0) { log_debug(ZONE, "got STARTTLS in stream features"); if(sx_ssl_client_starttls(out->s2s->sx_ssl, s, out->s2s->local_pemfile) == 0) { starttls = 1; nad_free(nad); return 0; } log_write(out->s2s->log, LOG_ERR, "unable to establish encrypted session with peer"); } } } /* If we're not establishing a starttls connection, send dialbacks */ if (!starttls) { if (out->s2s->require_tls == 0 || s->ssf > 0) { log_debug(ZONE, "No STARTTLS, sending dialbacks for %s", out->key); out->online = 1; send_dialbacks(out); } else { log_debug(ZONE, "No STARTTLS, dialbacks disabled for non-TLS connections, cannot complete negotiation"); } } #else if (out->s2s->require_tls == 0) { out->online = 1; send_dialbacks(out); } #endif } /* we only accept dialback packets */ if(NAD_ENS(nad, 0) < 0 || NAD_NURI_L(nad, NAD_ENS(nad, 0)) != uri_DIALBACK_L || strncmp(uri_DIALBACK, NAD_NURI(nad, NAD_ENS(nad, 0)), uri_DIALBACK_L) != 0) { log_debug(ZONE, "got a non-dialback packet on an outgoing conn, dropping it"); nad_free(nad); return 0; } /* and then only result and verify */ if(NAD_ENAME_L(nad, 0) == 6) { if(strncmp("result", NAD_ENAME(nad, 0), 6) == 0) { _out_result(out, nad); return 0; } if(strncmp("verify", NAD_ENAME(nad, 0), 6) == 0) { _out_verify(out, nad); return 0; } } log_debug(ZONE, "unknown dialback packet, dropping it"); nad_free(nad); return 0; case event_CLOSED: if (out->fd != NULL) { mio_close(out->s2s->mio, out->fd); out->fd = NULL; } return -1; } return 0; } /** process incoming auth responses */ static void _out_result(conn_t out, nad_t nad) { int attr; jid_t from, to; char *rkey; int rkeylen; attr = nad_find_attr(nad, 0, -1, "from", NULL); if(attr < 0 || (from = jid_new(NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr))) == NULL) { log_debug(ZONE, "missing or invalid from on db result packet"); nad_free(nad); return; } attr = nad_find_attr(nad, 0, -1, "to", NULL); if(attr < 0 || (to = jid_new(NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr))) == NULL) { log_debug(ZONE, "missing or invalid to on db result packet"); jid_free(from); nad_free(nad); return; } rkey = s2s_route_key(NULL, to->domain, from->domain); rkeylen = strlen(rkey); /* key is valid */ if(nad_find_attr(nad, 0, -1, "type", "valid") >= 0) { log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] outgoing route '%s' is now valid%s%s", out->fd->fd, out->ip, out->port, rkey, (out->s->flags & SX_SSL_WRAPPER) ? ", TLS negotiated" : "", out->s->compressed ? ", ZLIB compression enabled" : ""); xhash_put(out->states, pstrdup(xhash_pool(out->states), rkey), (void *) conn_VALID); /* !!! small leak here */ log_debug(ZONE, "%s valid, flushing queue", rkey); /* flush the queue */ out_flush_route_queue(out->s2s, rkey, rkeylen); free(rkey); jid_free(from); jid_free(to); nad_free(nad); return; } /* invalid */ log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] outgoing route '%s' is now invalid", out->fd->fd, out->ip, out->port, rkey); /* close connection */ log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] closing connection", out->fd->fd, out->ip, out->port); /* report stream error */ sx_error(out->s, stream_err_INVALID_ID, "dialback negotiation failed"); /* close the stream */ sx_close(out->s); /* bounce queue */ out_bounce_route_queue(out->s2s, rkey, rkeylen, stanza_err_SERVICE_UNAVAILABLE); free(rkey); jid_free(from); jid_free(to); nad_free(nad); } /** incoming stream authenticated */ static void _out_verify(conn_t out, nad_t nad) { int attr, ns; jid_t from, to; conn_t in; char *rkey; int valid; attr = nad_find_attr(nad, 0, -1, "from", NULL); if(attr < 0 || (from = jid_new(NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr))) == NULL) { log_debug(ZONE, "missing or invalid from on db verify packet"); nad_free(nad); return; } attr = nad_find_attr(nad, 0, -1, "to", NULL); if(attr < 0 || (to = jid_new(NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr))) == NULL) { log_debug(ZONE, "missing or invalid to on db verify packet"); jid_free(from); nad_free(nad); return; } attr = nad_find_attr(nad, 0, -1, "id", NULL); if(attr < 0) { log_debug(ZONE, "missing id on db verify packet"); jid_free(from); jid_free(to); nad_free(nad); return; } /* get the incoming conn */ in = xhash_getx(out->s2s->in, NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr)); if(in == NULL) { log_debug(ZONE, "got a verify for incoming conn %.*s, but it doesn't exist, dropping the packet", NAD_AVAL_L(nad, attr), NAD_AVAL(nad, attr)); jid_free(from); jid_free(to); nad_free(nad); return; } rkey = s2s_route_key(NULL, to->domain, from->domain); attr = nad_find_attr(nad, 0, -1, "type", "valid"); if(attr >= 0) { xhash_put(in->states, pstrdup(xhash_pool(in->states), rkey), (void *) conn_VALID); log_write(in->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] incoming route '%s' is now valid%s%s", in->fd->fd, in->ip, in->port, rkey, (in->s->flags & SX_SSL_WRAPPER) ? ", TLS negotiated" : "", in->s->compressed ? ", ZLIB compression enabled" : ""); valid = 1; } else { log_write(in->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] incoming route '%s' is now invalid", in->fd->fd, in->ip, in->port, rkey); valid = 0; } free(rkey); nad_free(nad); /* decrement outstanding verify counter */ --out->verify; /* let them know what happened */ nad = nad_new(); ns = nad_add_namespace(nad, uri_DIALBACK, "db"); nad_append_elem(nad, ns, "result", 0); nad_append_attr(nad, -1, "to", from->domain); nad_append_attr(nad, -1, "from", to->domain); nad_append_attr(nad, -1, "type", valid ? "valid" : "invalid"); /* off it goes */ sx_nad_write(in->s, nad); /* if invalid, close the stream */ if (!valid) { /* generate stream error */ sx_error(in->s, stream_err_INVALID_ID, "dialback negotiation failed"); /* close the incoming stream */ sx_close(in->s); } jid_free(from); jid_free(to); } /* bounce all packets in the queues for domain */ int out_bounce_domain_queues(s2s_t s2s, const char *domain, int err) { char *rkey; int rkeylen; int pktcount = 0; if (xhash_iter_first(s2s->outq)) { do { xhash_iter_get(s2s->outq, (const char **) &rkey, &rkeylen, NULL); if(s2s_route_key_match(NULL, (char *) domain, rkey, rkeylen)) pktcount += out_bounce_route_queue(s2s, rkey, rkeylen, err); } while(xhash_iter_next(s2s->outq)); } return pktcount; } /* bounce all packets in the queue for route */ int out_bounce_route_queue(s2s_t s2s, char *rkey, int rkeylen, int err) { jqueue_t q; pkt_t pkt; int pktcount = 0; q = xhash_getx(s2s->outq, rkey, rkeylen); if(q == NULL) return 0; while((pkt = jqueue_pull(q)) != NULL) { /* only packets with content, in namespace jabber:client and not already errors */ if(pkt->nad->ecur > 1 && NAD_NURI_L(pkt->nad, NAD_ENS(pkt->nad, 1)) == strlen(uri_CLIENT) && strncmp(NAD_NURI(pkt->nad, NAD_ENS(pkt->nad, 1)), uri_CLIENT, strlen(uri_CLIENT)) == 0 && nad_find_attr(pkt->nad, 0, -1, "error", NULL) < 0) { sx_nad_write(s2s->router, stanza_tofrom(stanza_tofrom(stanza_error(pkt->nad, 1, err), 1), 0)); pktcount++; } else nad_free(pkt->nad); jid_free(pkt->to); jid_free(pkt->from); free(pkt); } /* delete queue and remove domain from queue hash */ log_debug(ZONE, "deleting out packet queue for %.*s", rkeylen, rkey); rkey = q->key; jqueue_free(q); xhash_zap(s2s->outq, rkey); free(rkey); return pktcount; } int out_bounce_conn_queues(conn_t out, int err) { char *rkey; int rkeylen; int pktcount = 0; /* bounce queues for all domains handled by this connection - iterate through routes */ if (xhash_iter_first(out->routes)) { do { xhash_iter_get(out->routes, (const char **) &rkey, &rkeylen, NULL); pktcount += out_bounce_route_queue(out->s2s, rkey, rkeylen, err); } while(xhash_iter_next(out->routes)); } return pktcount; } void out_flush_domain_queues(s2s_t s2s, const char *domain) { char *rkey; int rkeylen; char *c; int c_len; if (xhash_iter_first(s2s->outq)) { do { xhash_iter_get(s2s->outq, (const char **) &rkey, &rkeylen, NULL); c = memchr(rkey, '/', rkeylen); c++; c_len = rkeylen - (c - rkey); if (strncmp(domain, c, c_len) == 0) out_flush_route_queue(s2s, rkey, rkeylen); } while(xhash_iter_next(s2s->outq)); } } void out_flush_route_queue(s2s_t s2s, char *rkey, int rkeylen) { jqueue_t q; pkt_t pkt; int npkt, i, ret; q = xhash_getx(s2s->outq, rkey, rkeylen); if(q == NULL) return; npkt = jqueue_size(q); log_debug(ZONE, "flushing %d packets for '%.*s' to out_packet", npkt, rkeylen, rkey); for(i = 0; i < npkt; i++) { pkt = jqueue_pull(q); if(pkt) { ret = out_packet(s2s, pkt); if (ret) { /* uh-oh. the queue was deleted... q and pkt have been freed if q->key == rkey, rkey has also been freed */ return; } } } /* delete queue for route and remove route from queue hash */ if (jqueue_size(q) == 0) { log_debug(ZONE, "deleting out packet queue for '%.*s'", rkeylen, rkey); rkey = q->key; jqueue_free(q); xhash_zap(s2s->outq, rkey); free(rkey); } else { log_debug(ZONE, "emptied queue gained more packets..."); } }
/* * jabberd - Jabber Open Source Server * Copyright (c) 2002 Jeremie Miller, Thomas Muldowney, * Ryan Eatmon, Robert Norris * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA02111-1307USA */ #define _GNU_SOURCE #include <string.h> #include "s2s.h" #include <idna.h> /* * we handle packets going from the router to the world, and stuff * that comes in on connections we initiated. * * action points: * * out_packet(s2s, nad) - send this packet out * - extract to domain * - get dbconn for this domain using out_route * - if dbconn not available bounce packet * - DONE * - if conn in progress (tcp) * - add packet to queue for this domain * - DONE * - if dbconn state valid for this domain, or packet is dialback * - send packet * - DONE * - if dbconn state invalid for this domain * - bounce packet (502) * - DONE * - add packet to queue for this domain * - if dbconn state inprogress for this domain * - DONE * - out_dialback(dbconn, from, to) * * out_route(s2s, route, out, allow_bad) * - if dbconn not found * - check internal resolver cache for domain * - if not found * - ask resolver for name * - DONE * - if outgoing ip/port is to be reused * - get dbconn for any valid ip/port * - if dbconn not found * - create new dbconn * - initiate connect to ip/port * - DONE * - create new dbconn * - initiate connect to ip/port * - DONE * * out_dialback(dbconn, from, to) - initiate dialback * - generate dbkey: sha1(secret+remote+stream id) * - send auth request: <result to='them' from='us'>dbkey</result> * - set dbconn state for this domain to inprogress * - DONE * * out_resolve(s2s, query) - responses from resolver * - store ip/port/ttl in resolver cache * - flush domain queue -> out_packet(s2s, domain) * - DONE * * event_STREAM - ip/port open * - get dbconn for this sx * - for each route handled by this conn, out_dialback(dbconn, from, to) * - DONE * * event_PACKET: <result from='them' to='us' type='xxx'/> - response to our auth request * - get dbconn for this sx * - if type valid * - set dbconn state for this domain to valid * - flush dbconn queue for this domain -> out_packet(s2s, pkt) * - DONE * - set dbconn state for this domain to invalid * - bounce dbconn queue for this domain (502) * - DONE * * event_PACKET: <verify from='them' to='us' id='123' type='xxx'/> - incoming stream authenticated * - get dbconn for given id * - if type is valid * - set dbconn state for this domain to valid * - send result: <result to='them' from='us' type='xxx'/> * - DONE */ /* forward decls */ static int _out_mio_callback(mio_t m, mio_action_t a, mio_fd_t fd, void *data, void *arg); static int _out_sx_callback(sx_t s, sx_event_t e, void *data, void *arg); static void _out_result(conn_t out, nad_t nad); static void _out_verify(conn_t out, nad_t nad); static void _dns_result_aaaa(struct dns_ctx *ctx, struct dns_rr_a6 *result, void *data); static void _dns_result_a(struct dns_ctx *ctx, struct dns_rr_a4 *result, void *data); /** queue the packet */ static void _out_packet_queue(s2s_t s2s, pkt_t pkt) { char *rkey = s2s_route_key(NULL, pkt->from->domain, pkt->to->domain); jqueue_t q = (jqueue_t) xhash_get(s2s->outq, rkey); if(q == NULL) { log_debug(ZONE, "creating new out packet queue for '%s'", rkey); q = jqueue_new(); q->key = rkey; xhash_put(s2s->outq, q->key, (void *) q); } else { free(rkey); } log_debug(ZONE, "queueing packet for '%s'", q->key); jqueue_push(q, (void *) pkt, 0); } static void _out_dialback(conn_t out, char *rkey, int rkeylen) { char *c, *dbkey, *tmp; nad_t nad; int elem, ns; int from_len, to_len; time_t now; now = time(NULL); c = memchr(rkey, '/', rkeylen); from_len = c - rkey; c++; to_len = rkeylen - (c - rkey); /* kick off the dialback */ tmp = strndup(c, to_len); dbkey = s2s_db_key(NULL, out->s2s->local_secret, tmp, out->s->id); free(tmp); nad = nad_new(); /* request auth */ ns = nad_add_namespace(nad, uri_DIALBACK, "db"); elem = nad_append_elem(nad, ns, "result", 0); nad_set_attr(nad, elem, -1, "from", rkey, from_len); nad_set_attr(nad, elem, -1, "to", c, to_len); nad_append_cdata(nad, dbkey, strlen(dbkey), 1); log_debug(ZONE, "sending auth request for %.*s (key %s)", rkeylen, rkey, dbkey); log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] sending dialback auth request for route '%.*s'", out->fd->fd, out->ip, out->port, rkeylen, rkey); /* off it goes */ sx_nad_write(out->s, nad); free(dbkey); /* we're in progress now */ xhash_put(out->states, pstrdupx(xhash_pool(out->states), rkey, rkeylen), (void *) conn_INPROGRESS); /* record the time that we set conn_INPROGRESS state */ xhash_put(out->states_time, pstrdupx(xhash_pool(out->states_time), rkey, rkeylen), (void *) now); } void _out_dns_mark_bad(conn_t out) { if (out->s2s->dns_bad_timeout > 0) { dnsres_t bad; char *ipport; /* mark this host as bad */ ipport = dns_make_ipport(out->ip, out->port); bad = xhash_get(out->s2s->dns_bad, ipport); if (bad == NULL) { bad = (dnsres_t) calloc(1, sizeof(struct dnsres_st)); bad->key = ipport; xhash_put(out->s2s->dns_bad, ipport, bad); } bad->expiry = time(NULL) + out->s2s->dns_bad_timeout; } } int dns_select(s2s_t s2s, char *ip, int *port, time_t now, dnscache_t dns, int allow_bad) { /* list of results */ dnsres_t l_reuse[DNS_MAX_RESULTS]; dnsres_t l_aaaa[DNS_MAX_RESULTS]; dnsres_t l_a[DNS_MAX_RESULTS]; dnsres_t l_bad[DNS_MAX_RESULTS]; /* running weight sums of results */ int rw_reuse[DNS_MAX_RESULTS]; int rw_aaaa[DNS_MAX_RESULTS]; int rw_a[DNS_MAX_RESULTS]; int s_reuse = 0, s_aaaa = 0, s_a = 0, s_bad = 0; /* count */ int p_reuse = 0, p_aaaa = 0, p_a = 0; /* list prio */ int wt_reuse = 0, wt_aaaa = 0, wt_a = 0; /* weight total */ int c_expired_good = 0; union xhashv xhv; dnsres_t res; char *ipport; int ipport_len; char *c; int c_len; char *tmp; /* for all results: * - if not expired * - put highest priority reuseable addrs into list1 * - put highest priority ipv6 addrs into list2 * - put highest priority ipv4 addrs into list3 * - put bad addrs into list4 * - pick weighted random entry from first non-empty list */ if (dns->results == NULL) { log_debug(ZONE, "negative cache entry for '%s'", dns->name); return -1; } log_debug(ZONE, "selecting DNS result for '%s'", dns->name); xhv.dnsres_val = &res; if (xhash_iter_first(dns->results)) { dnsres_t bad = NULL; do { xhash_iter_get(dns->results, (const char **) &ipport, &ipport_len, xhv.val); if (s2s->dns_bad_timeout > 0) bad = xhash_getx(s2s->dns_bad, ipport, ipport_len); if (now > res->expiry) { /* good host? */ if (bad == NULL) c_expired_good++; log_debug(ZONE, "host '%s' expired", res->key); continue; } else if (bad != NULL && !(now > bad->expiry)) { /* bad host (connection failure) */ l_bad[s_bad++] = res; log_debug(ZONE, "host '%s' bad", res->key); } else if (s2s->out_reuse && xhash_getx(s2s->out_host, ipport, ipport_len) != NULL) { /* existing connection */ log_debug(ZONE, "host '%s' exists", res->key); if (s_reuse == 0 || p_reuse > res->prio) { p_reuse = res->prio; s_reuse = 0; wt_reuse = 0; log_debug(ZONE, "reset prio list, using prio %d", res->prio); } if (res->prio <= p_reuse) { l_reuse[s_reuse] = res; wt_reuse += res->weight; rw_reuse[s_reuse] = wt_reuse; s_reuse++; log_debug(ZONE, "added host with weight %d (%d), running weight %d", (res->weight >> 8), res->weight, wt_reuse); } else { log_debug(ZONE, "ignored host with prio %d", res->prio); } } else if (memchr(ipport, ':', ipport_len) != NULL) { /* ipv6 */ log_debug(ZONE, "host '%s' IPv6", res->key); if (s_aaaa == 0 || p_aaaa > res->prio) { p_aaaa = res->prio; s_aaaa = 0; wt_aaaa = 0; log_debug(ZONE, "reset prio list, using prio %d", res->prio); } if (res->prio <= p_aaaa) { l_aaaa[s_aaaa] = res; wt_aaaa += res->weight; rw_aaaa[s_aaaa] = wt_aaaa; s_aaaa++; log_debug(ZONE, "added host with weight %d (%d), running weight %d", (res->weight >> 8), res->weight, wt_aaaa); } else { log_debug(ZONE, "ignored host with prio %d", res->prio); } } else { /* ipv4 */ log_debug(ZONE, "host '%s' IPv4", res->key); if (s_a == 0 || p_a > res->prio) { p_a = res->prio; s_a = 0; wt_a = 0; log_debug(ZONE, "reset prio list, using prio %d", res->prio); } if (res->prio <= p_a) { l_a[s_a] = res; wt_a += res->weight; rw_a[s_a] = wt_a; s_a++; log_debug(ZONE, "added host with weight %d (%d), running weight %d", (res->weight >> 8), res->weight, wt_a); } else { log_debug(ZONE, "ignored host with prio %d", res->prio); } } } while(xhash_iter_next(dns->results)); } /* pick a result at weighted random (RFC 2782) * all weights are guaranteed to be >= 16 && <= 16776960 * (assuming max 50 hosts, the total/running sums won't exceed 2^31) */ ipport = NULL; if (s_reuse > 0) { int i, r; log_debug(ZONE, "using existing hosts, total weight %d", wt_reuse); assert((wt_reuse + 1) > 0); r = rand() % (wt_reuse + 1); log_debug(ZONE, "random number %d", r); for (i = 0; i < s_reuse; i++) if (rw_reuse[i] >= r) { log_debug(ZONE, "selected host '%s', running weight %d", l_reuse[i]->key, rw_reuse[i]); ipport = l_reuse[i]->key; break; } } else if (s_aaaa > 0 && (s_a == 0 || p_aaaa <= p_a)) { int i, r; log_debug(ZONE, "using IPv6 hosts, total weight %d", wt_aaaa); assert((wt_aaaa + 1) > 0); r = rand() % (wt_aaaa + 1); log_debug(ZONE, "random number %d", r); for (i = 0; i < s_aaaa; i++) if (rw_aaaa[i] >= r) { log_debug(ZONE, "selected host '%s', running weight %d", l_aaaa[i]->key, rw_aaaa[i]); ipport = l_aaaa[i]->key; break; } } else if (s_a > 0) { int i, r; log_debug(ZONE, "using IPv4 hosts, total weight %d", wt_a); assert((wt_a + 1) > 0); r = rand() % (wt_a + 1); log_debug(ZONE, "random number %d", r); for (i = 0; i < s_a; i++) if (rw_a[i] >= r) { log_debug(ZONE, "selected host '%s', running weight %d", l_a[i]->key, rw_a[i]); ipport = l_a[i]->key; break; } } else if (s_bad > 0) { ipport = l_bad[rand() % s_bad]->key; log_debug(ZONE, "using bad hosts, allow_bad=%d", allow_bad); /* there are expired good hosts, expire cache immediately */ if (c_expired_good > 0) { log_debug(ZONE, "expiring this DNS cache entry, %d expired hosts", c_expired_good); dns->expiry = 0; } if (!allow_bad) return -1; } /* results cannot all expire before the collection does */ assert(ipport != NULL); /* copy the ip and port to the packet */ ipport_len = strlen(ipport); c = strchr(ipport, '/'); strncpy(ip, ipport, c-ipport); ip[c-ipport] = '\0'; c++; c_len = ipport_len - (c - ipport); tmp = strndup(c, c_len); *port = atoi(tmp); free(tmp); return 0; } /** find/make a connection for a route */ int out_route(s2s_t s2s, char *route, int routelen, conn_t *out, int allow_bad) { dnscache_t dns; char ipport[INET6_ADDRSTRLEN + 16], *dkey, *c; time_t now; int reuse = 0; char ip[INET6_ADDRSTRLEN] = {0}; int port, c_len, from_len; c = memchr(route, '/', routelen); from_len = c - route; c++; c_len = routelen - (c - route); dkey = strndup(c, c_len); log_debug(ZONE, "trying to find connection for '%s'", dkey); *out = (conn_t) xhash_get(s2s->out_dest, dkey); if(*out == NULL) { log_debug(ZONE, "connection for '%s' not found", dkey); /* check resolver cache for ip/port */ dns = xhash_get(s2s->dnscache, dkey); if(dns == NULL) { /* new resolution */ log_debug(ZONE, "no dns for %s, preparing for resolution", dkey); dns = (dnscache_t) calloc(1, sizeof(struct dnscache_st)); strcpy(dns->name, dkey); xhash_put(s2s->dnscache, dns->name, (void *) dns); #if 0 /* this is good for testing */ dns->pending = 0; strcpy(dns->ip, "127.0.0.1"); dns->port = 3000; dns->expiry = time(NULL) + 99999999; #endif } /* resolution in progress */ if(dns->pending) { log_debug(ZONE, "pending resolution"); free(dkey); return 0; } /* has it expired (this is 0 for new cache objects, so they're always expired */ now = time(NULL); /* each entry must be expired no earlier than the collection */ if(now > dns->expiry) { /* resolution required */ log_debug(ZONE, "requesting resolution for %s", dkey); dns->init_time = time(NULL); dns->pending = 1; dns_resolve_domain(s2s, dns); free(dkey); return 0; } /* dns is valid */ if (dns_select(s2s, ip, &port, now, dns, allow_bad)) { /* failed to find anything acceptable */ free(dkey); return -1; } /* re-request resolution if dns_select expired the data */ if (now > dns->expiry) { /* resolution required */ log_debug(ZONE, "requesting resolution for %s", dkey); dns->init_time = time(NULL); dns->pending = 1; dns_resolve_domain(s2s, dns); free(dkey); return 0; } /* generate the ip/port pair, this is the hash key for the conn */ snprintf(ipport, INET6_ADDRSTRLEN + 16, "%s/%d", ip, port); /* try to re-use an existing connection */ if (s2s->out_reuse) *out = (conn_t) xhash_get(s2s->out_host, ipport); if (*out != NULL) { log_write(s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] using connection for '%s'", (*out)->fd->fd, (*out)->ip, (*out)->port, dkey); /* associate existing connection with domain */ xhash_put(s2s->out_dest, s2s->out_reuse ? pstrdup(xhash_pool((*out)->routes), dkey) : dkey, (void *) *out); reuse = 1; } else{ /* no conn, create one */ *out = (conn_t) calloc(1, sizeof(struct conn_st)); (*out)->s2s = s2s; (*out)->key = strdup(ipport); if (s2s->out_reuse) (*out)->dkey = NULL; else (*out)->dkey = dkey; strcpy((*out)->ip, ip); (*out)->port = port; (*out)->states = xhash_new(101); (*out)->states_time = xhash_new(101); (*out)->routes = xhash_new(101); (*out)->init_time = time(NULL); if (s2s->out_reuse) xhash_put(s2s->out_host, (*out)->key, (void *) *out); xhash_put(s2s->out_dest, s2s->out_reuse ? pstrdup(xhash_pool((*out)->routes), dkey) : dkey, (void *) *out); xhash_put((*out)->routes, pstrdupx(xhash_pool((*out)->routes), route, routelen), (void *) 1); /* connect */ log_debug(ZONE, "initiating connection to %s", ipport); /* APPLE: multiple origin_ips may be specified; use IPv6 if possible or otherwise IPv4 */ int ip_is_v6 = 0; if (strchr(ip, ':') != NULL) ip_is_v6 = 1; int i; for (i = 0; i < s2s->origin_nips; i++) { // only bother with mio_connect if the src and dst IPs are of the same type if ((ip_is_v6 && (strchr(s2s->origin_ips[i], ':') != NULL)) || // both are IPv6 (! ip_is_v6 && (strchr(s2s->origin_ips[i], ':') == NULL))) // both are IPv4 (*out)->fd = mio_connect(s2s->mio, port, ip, s2s->origin_ips[i], _out_mio_callback, (void *) *out); if ((*out)->fd != NULL) break; } if ((*out)->fd == NULL) { log_write(s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] mio_connect error: %s (%d)", -1, (*out)->ip, (*out)->port, MIO_STRERROR(MIO_ERROR), MIO_ERROR); _out_dns_mark_bad(*out); if (s2s->out_reuse) xhash_zap(s2s->out_host, (*out)->key); xhash_zap(s2s->out_dest, dkey); xhash_free((*out)->states); xhash_free((*out)->states_time); xhash_free((*out)->routes); free((*out)->key); free((*out)->dkey); free(*out); *out = NULL; /* try again without allowing bad hosts */ return out_route(s2s, route, routelen, out, 0); } else { log_write(s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] outgoing connection for '%s'", (*out)->fd->fd, (*out)->ip, (*out)->port, dkey); (*out)->s = sx_new(s2s->sx_env, (*out)->fd->fd, _out_sx_callback, (void *) *out); #ifdef HAVE_SSL /* Send a stream version of 1.0 if we can do STARTTLS */ if(s2s->sx_ssl != NULL) { sx_client_init((*out)->s, S2S_DB_HEADER, uri_SERVER, dkey, pstrdupx(xhash_pool((*out)->routes), route, from_len), "1.0"); } else { sx_client_init((*out)->s, S2S_DB_HEADER, uri_SERVER, NULL, NULL, NULL); } #else sx_client_init((*out)->s, S2S_DB_HEADER, uri_SERVER, NULL, NULL, NULL); #endif /* dkey is now used by the hash table */ return 0; } } } else { log_debug(ZONE, "connection for '%s' found (%d %s/%d)", dkey, (*out)->fd->fd, (*out)->ip, (*out)->port); } /* connection in progress, or re-using connection: add to routes list */ if (!(*out)->online || reuse) { if (xhash_getx((*out)->routes, route, routelen) == NULL) xhash_put((*out)->routes, pstrdupx(xhash_pool((*out)->routes), route, routelen), (void *) 1); } free(dkey); return 0; } void out_pkt_free(pkt_t pkt) { nad_free(pkt->nad); jid_free(pkt->from); jid_free(pkt->to); free(pkt); } /** send a packet out */ int out_packet(s2s_t s2s, pkt_t pkt) { char *rkey; int rkeylen; conn_t out; conn_state_t state; int ret; /* perform check against whitelist */ if (s2s->enable_whitelist > 0 && (pkt->to->domain != NULL) && (s2s_domain_in_whitelist(s2s, pkt->to->domain) == 0)) { log_write(s2s->log, LOG_NOTICE, "sending a packet to domain not in the whitelist, dropping it"); if (pkt->to != NULL) jid_free(pkt->to); if (pkt->from != NULL) jid_free(pkt->from); if (pkt->nad != NULL) nad_free(pkt->nad); free(pkt); return; } /* new route key */ rkey = s2s_route_key(NULL, pkt->from->domain, pkt->to->domain); rkeylen = strlen(rkey); /* get a connection */ ret = out_route(s2s, rkey, rkeylen, &out, 1); if (out == NULL) { /* connection not available, queue packet */ _out_packet_queue(s2s, pkt); /* check if out_route was successful in attempting a connection */ if (ret) { /* bounce queue */ out_bounce_route_queue(s2s, rkey, rkeylen, stanza_err_SERVICE_UNAVAILABLE); free(rkey); return -1; } free(rkey); return 0; } /* connection in progress */ if(!out->online) { log_debug(ZONE, "connection in progress, queueing packet"); _out_packet_queue(s2s, pkt); free(rkey); return 0; } /* connection state */ state = (conn_state_t) xhash_get(out->states, rkey); /* valid conns or dialback packets */ if(state == conn_VALID || pkt->db) { log_debug(ZONE, "writing packet for %s to outgoing conn %d", rkey, out->fd->fd); /* send it straight out */ if(pkt->db) { /* if this is a db:verify packet, increment counter and set timestamp */ if(NAD_ENAME_L(pkt->nad, 0) == 6 && strncmp("verify", NAD_ENAME(pkt->nad, 0), 6) == 0) { out->verify++; out->last_verify = time(NULL); } /* dialback packet */ sx_nad_write(out->s, pkt->nad); } else { /* if the outgoing stanza has a jabber:client namespace, remove it so that the stream jabber:server namespaces will apply (XMPP 11.2.2) */ int ns = nad_find_namespace(pkt->nad, 1, uri_CLIENT, NULL); if(ns >= 0) { /* clear the namespaces of elem 0 (internal route element) and elem 1 (message|iq|presence) */ pkt->nad->elems[0].ns = -1; pkt->nad->elems[0].my_ns = -1; pkt->nad->elems[1].ns = -1; pkt->nad->elems[1].my_ns = -1; } /* send it out */ sx_nad_write_elem(out->s, pkt->nad, 1); } /* update timestamp */ out->last_packet = time(NULL); jid_free(pkt->from); jid_free(pkt->to); free(pkt); free(rkey); return 0; } /* can't be handled yet, queue */ _out_packet_queue(s2s, pkt); /* if dialback is in progress, then we're done for now */ if(state == conn_INPROGRESS) { free(rkey); return 0; } /* this is a new route - send dialback auth request to piggyback on the existing connection */ if (out->s2s->require_tls == 0 || out->s->ssf > 0) { _out_dialback(out, rkey, rkeylen); } free(rkey); return 0; } char *dns_make_ipport(char *host, int port) { char *c; assert(port > 0 && port < 65536); c = (char *) malloc(strlen(host) + 7); sprintf(c, "%s/%d", host, port); return c; } static void _dns_add_result(dnsquery_t query, char *ip, int port, int prio, int weight, unsigned int ttl) { char *ipport = dns_make_ipport(ip, port); dnsres_t res = xhash_get(query->results, ipport); if (res != NULL) { if (prio < res->prio) res->prio = prio; if (prio < res->prio) { /* duplicate host at lower prio - reset weight */ res->weight = weight; } else if (prio == res->prio) { /* duplicate host at same prio - add to weight */ res->weight += weight; if (res->weight > (65535 << 8)) res->weight = (65535 << 8); } if (ttl > res->expiry) res->expiry = ttl; if (ttl > query->expiry) query->expiry = ttl; log_debug(ZONE, "dns result updated for %s@%p: %s (%d/%d/%d)", query->name, query, ipport, res->prio, (res->weight >> 8), res->expiry); } else if (xhash_count(query->results) < DNS_MAX_RESULTS) { res = pmalloc(xhash_pool(query->results), sizeof(struct dnsres_st)); res->key = pstrdup(xhash_pool(query->results), ipport); res->prio = prio; res->weight = weight; res->expiry = ttl; if (ttl > query->expiry) query->expiry = ttl; xhash_put(query->results, res->key, res); log_debug(ZONE, "dns result added for %s@%p: %s (%d/%d/%d)", query->name, query, ipport, res->prio, (res->weight >> 8), res->expiry); } else { log_debug(ZONE, "dns result ignored for %s@%p: %s (%d/%d/%d)", query->name, query, ipport, prio, (weight >> 8), ttl); } free(ipport); } static void _dns_add_host(dnsquery_t query, char *ip, int port, int prio, int weight, unsigned int ttl) { char *ipport = dns_make_ipport(ip, port); dnsres_t res = xhash_get(query->hosts, ipport); /* update host weights: * RFC 2482 "In the presence of records containing weights greater * than 0, records with weight 0 should have a very small chance of * being selected." * 0 -> 16 * 1-65535 -> 256-16776960 */ if (weight == 0) weight = 1 << 4; else weight <<= 8; if (res != NULL) { if (prio < res->prio) res->prio = prio; if (prio < res->prio) { /* duplicate host at lower prio - reset weight */ res->weight = weight; } else if (prio == res->prio) { /* duplicate host at same prio - add to weight */ res->weight += weight; if (res->weight > (65535 << 8)) res->weight = (65535 << 8); } if (ttl > res->expiry) res->expiry = ttl; log_debug(ZONE, "dns host updated for %s@%p: %s (%d/%d/%d)", query->name, query, ipport, res->prio, (res->weight >> 8), res->expiry); } else if (xhash_count(query->hosts) < DNS_MAX_RESULTS) { res = pmalloc(xhash_pool(query->hosts), sizeof(struct dnsres_st)); res->key = pstrdup(xhash_pool(query->hosts), ipport); res->prio = prio; res->weight = weight; res->expiry = ttl; xhash_put(query->hosts, res->key, res); log_debug(ZONE, "dns host added for %s@%p: %s (%d/%d/%d)", query->name, query, ipport, res->prio, (res->weight >> 8), res->expiry); } else { log_debug(ZONE, "dns host ignored for %s@%p: %s (%d/%d/%d)", query->name, query, ipport, prio, (weight >> 8), ttl); } free(ipport); } /* this function is called with a NULL ctx to start the SRV process */ static void _dns_result_srv(struct dns_ctx *ctx, struct dns_rr_srv *result, void *data) { dnsquery_t query = data; assert(query != NULL); query->query = NULL; if (ctx != NULL && result == NULL) { log_debug(ZONE, "dns failure for %s@%p: SRV %s (%d)", query->name, query, query->s2s->lookup_srv[query->srv_i], dns_status(ctx)); } else if (result != NULL) { int i; log_debug(ZONE, "dns response for %s@%p: SRV %s %d (%d)", query->name, query, result->dnssrv_qname, result->dnssrv_nrr, result->dnssrv_ttl); for (i = 0; i < result->dnssrv_nrr; i++) { if (strlen(result->dnssrv_srv[i].name) > 0 && result->dnssrv_srv[i].port > 0 && result->dnssrv_srv[i].port < 65536) { log_debug(ZONE, "dns response for %s@%p: SRV %s[%d] %s/%d (%d/%d)", query->name, query, result->dnssrv_qname, i, result->dnssrv_srv[i].name, result->dnssrv_srv[i].port, result->dnssrv_srv[i].priority, result->dnssrv_srv[i].weight); _dns_add_host(query, result->dnssrv_srv[i].name, result->dnssrv_srv[i].port, result->dnssrv_srv[i].priority, result->dnssrv_srv[i].weight, result->dnssrv_ttl); } } free(result); } /* check next SRV service name */ query->srv_i++; if (query->srv_i < query->s2s->lookup_nsrv) { log_debug(ZONE, "dns request for %s@%p: SRV %s", query->name, query, query->s2s->lookup_srv[query->srv_i]); query->query = dns_submit_srv(NULL, query->name, query->s2s->lookup_srv[query->srv_i], "tcp", DNS_NOSRCH, _dns_result_srv, query); /* if submit failed, call ourselves with a NULL result */ if (query->query == NULL) _dns_result_srv(ctx, NULL, query); } else { /* no more SRV records to check, resolve hosts */ if (xhash_count(query->hosts) > 0) { _dns_result_a(NULL, NULL, query); /* no SRV records returned, resolve hostname */ } else { query->cur_host = strdup(query->name); query->cur_port = 5269; query->cur_prio = 0; query->cur_weight = 0; query->cur_expiry = 0; if (query->s2s->resolve_aaaa) { log_debug(ZONE, "dns request for %s@%p: AAAA %s", query->name, query, query->name); query->query = dns_submit_a6(NULL, query->name, DNS_NOSRCH, _dns_result_aaaa, query); /* if submit failed, call ourselves with a NULL result */ if (query->query == NULL) _dns_result_aaaa(ctx, NULL, query); } else { log_debug(ZONE, "dns request for %s@%p: A %s", query->name, query, query->name); query->query = dns_submit_a4(NULL, query->name, DNS_NOSRCH, _dns_result_a, query); /* if submit failed, call ourselves with a NULL result */ if (query->query == NULL) _dns_result_a(ctx, NULL, query); } } } } static void _dns_result_aaaa(struct dns_ctx *ctx, struct dns_rr_a6 *result, void *data) { dnsquery_t query = data; char ip[INET6_ADDRSTRLEN]; int i; assert(query != NULL); query->query = NULL; if (ctx != NULL && result == NULL) { log_debug(ZONE, "dns failure for %s@%p: AAAA %s (%d)", query->name, query, query->cur_host, dns_status(ctx)); } else if (result != NULL) { log_debug(ZONE, "dns response for %s@%p: AAAA %s %d (%d)", query->name, query, result->dnsa6_qname, result->dnsa6_nrr, result->dnsa6_ttl); if (query->cur_expiry > 0 && result->dnsa6_ttl > query->cur_expiry) result->dnsa6_ttl = query->cur_expiry; for (i = 0; i < result->dnsa6_nrr; i++) { if (inet_ntop(AF_INET6, &result->dnsa6_addr[i], ip, INET6_ADDRSTRLEN) != NULL) { log_debug(ZONE, "dns response for %s@%p: AAAA %s[%d] %s/%d", query->name, query, result->dnsa6_qname, i, ip, query->cur_port); _dns_add_result(query, ip, query->cur_port, query->cur_prio, query->cur_weight, result->dnsa6_ttl); } } } if (query->cur_host != NULL) { /* do ipv4 resolution too */ log_debug(ZONE, "dns request for %s@%p: A %s", query->name, query, query->cur_host); query->query = dns_submit_a4(NULL, query->cur_host, DNS_NOSRCH, _dns_result_a, query); /* if submit failed, call ourselves with a NULL result */ if (query->query == NULL) _dns_result_a(ctx, NULL, query); } else { /* uh-oh */ log_debug(ZONE, "dns result for %s@%p: AAAA host vanished...", query->name, query); _dns_result_a(NULL, NULL, query); } free(result); } /* try /etc/hosts if the A process did not return any results */ static int _etc_hosts_lookup(const char *cszName, char *szIP, const int ciMaxIPLen) { #define EHL_LINE_LEN 260 int iSuccess = 0; size_t iLen; char szLine[EHL_LINE_LEN + 1]; /* one extra for the space character (*) */ char *pcStart, *pcEnd; FILE *fHosts; do { /* initialization */ fHosts = NULL; /* sanity checks */ if ((cszName == NULL) || (szIP == NULL) || (ciMaxIPLen <= 0)) break; szIP[0] = 0; /* open the hosts file */ #ifdef _WIN32 pcStart = getenv("WINDIR"); if (pcStart != NULL) { sprintf(szLine, "%s\\system32\\drivers\\etc\\hosts", pcStart); } else { strcpy(szLine, "C:\\WINDOWS\\system32\\drivers\\etc\\hosts"); } #else strcpy(szLine, "/etc/hosts"); #endif fHosts = fopen(szLine, "r"); if (fHosts == NULL) break; /* read line by line ... */ while (fgets(szLine, EHL_LINE_LEN, fHosts) != NULL) { /* remove comments */ pcStart = strchr (szLine, '#'); if (pcStart != NULL) *pcStart = 0; strcat(szLine, " "); /* append a space character for easier parsing (*) */ /* first to appear: IP address */ iLen = strspn(szLine, "1234567890."); if ((iLen < 7) || (iLen > 15)) /* superficial test for anything between x.x.x.x and xxx.xxx.xxx.xxx */ continue; pcEnd = szLine + iLen; *pcEnd = 0; pcEnd++; /* not beyond the end of the line yet (*) */ /* check strings separated by blanks, tabs or newlines */ pcStart = pcEnd + strspn(pcEnd, " \t\n"); while (*pcStart != 0) { pcEnd = pcStart + strcspn(pcStart, " \t\n"); *pcEnd = 0; pcEnd++; /* not beyond the end of the line yet (*) */ if (strcasecmp(pcStart, cszName) == 0) { strncpy(szIP, szLine, ciMaxIPLen - 1); szIP[ciMaxIPLen - 1] = '\0'; iSuccess = 1; break; } pcStart = pcEnd + strspn(pcEnd, " \t\n"); } if (iSuccess) break; } } while (0); if (fHosts != NULL) fclose(fHosts); return (iSuccess); } /* this function is called with a NULL ctx to start the A/AAAA process */ static void _dns_result_a(struct dns_ctx *ctx, struct dns_rr_a4 *result, void *data) { dnsquery_t query = data; assert(query != NULL); query->query = NULL; if (ctx != NULL && result == NULL) { #define DRA_IP_LEN 16 char szIP[DRA_IP_LEN]; if (_etc_hosts_lookup (query->name, szIP, DRA_IP_LEN)) { log_debug(ZONE, "/etc/lookup for %s@%p: %s (%d)", query->name, query, szIP, query->s2s->etc_hosts_ttl); _dns_add_result (query, szIP, query->cur_port, query->cur_prio, query->cur_weight, query->s2s->etc_hosts_ttl); } else { log_debug(ZONE, "dns failure for %s@%p: A %s (%d)", query->name, query, query->cur_host, dns_status(ctx)); } } else if (result != NULL) { char ip[INET_ADDRSTRLEN]; int i; log_debug(ZONE, "dns response for %s@%p: A %s %d (%d)", query->name, query, result->dnsa4_qname, result->dnsa4_nrr, result->dnsa4_ttl); if (query->cur_expiry > 0 && result->dnsa4_ttl > query->cur_expiry) result->dnsa4_ttl = query->cur_expiry; for (i = 0; i < result->dnsa4_nrr; i++) { if (inet_ntop(AF_INET, &result->dnsa4_addr[i], ip, INET_ADDRSTRLEN) != NULL) { log_debug(ZONE, "dns response for %s@%p: A %s[%d] %s/%d", query->name, query, result->dnsa4_qname, i, ip, query->cur_port); _dns_add_result(query, ip, query->cur_port, query->cur_prio, query->cur_weight, result->dnsa4_ttl); } } free(result); } /* resolve the next host in the list */ if (xhash_iter_first(query->hosts)) { char *ipport, *c, *tmp; int ipport_len, ip_len, port_len; dnsres_t res; union xhashv xhv; xhv.dnsres_val = &res; /* get the first entry */ xhash_iter_get(query->hosts, (const char **) &ipport, &ipport_len, xhv.val); /* remove the host from the list */ xhash_iter_zap(query->hosts); c = memchr(ipport, '/', ipport_len); ip_len = c - ipport; c++; port_len = ipport_len - (c - ipport); /* resolve hostname */ free(query->cur_host); query->cur_host = strndup(ipport, ip_len); tmp = strndup(c, port_len); query->cur_port = atoi(tmp); free(tmp); query->cur_prio = res->prio; query->cur_weight = res->weight; query->cur_expiry = res->expiry; log_debug(ZONE, "dns ttl for %s@%p limited to %d", query->name, query, query->cur_expiry); if (query->s2s->resolve_aaaa) { log_debug(ZONE, "dns request for %s@%p: AAAA %s", query->name, query, query->cur_host); query->query = dns_submit_a6(NULL, query->cur_host, DNS_NOSRCH, _dns_result_aaaa, query); /* if submit failed, call ourselves with a NULL result */ if (query->query == NULL) _dns_result_aaaa(ctx, NULL, query); } else { log_debug(ZONE, "dns request for %s@%p: A %s", query->name, query, query->cur_host); query->query = dns_submit_a4(NULL, query->cur_host, DNS_NOSRCH, _dns_result_a, query); /* if submit failed, call ourselves with a NULL result */ if (query->query == NULL) _dns_result_a(ctx, NULL, query); } /* finished */ } else { time_t now = time(NULL); char *domain; free(query->cur_host); query->cur_host = NULL; log_debug(ZONE, "dns requests for %s@%p complete: %d (%d)", query->name, query, xhash_count(query->results), query->expiry); /* update query TTL */ if (query->expiry > query->s2s->dns_max_ttl) query->expiry = query->s2s->dns_max_ttl; if (query->expiry < query->s2s->dns_min_ttl) query->expiry = query->s2s->dns_min_ttl; query->expiry += now; /* update result TTLs - the query expiry MUST NOT be longer than all result expiries */ if (xhash_iter_first(query->results)) { union xhashv xhv; dnsres_t res; xhv.dnsres_val = &res; do { xhash_iter_get(query->results, NULL, NULL, xhv.val); if (res->expiry > query->s2s->dns_max_ttl) res->expiry = query->s2s->dns_max_ttl; if (res->expiry < query->s2s->dns_min_ttl) res->expiry = query->s2s->dns_min_ttl; res->expiry += now; } while(xhash_iter_next(query->results)); } xhash_free(query->hosts); query->hosts = NULL; if (idna_to_unicode_8z8z(query->name, &domain, 0) != IDNA_SUCCESS) { log_write(query->s2s->log, LOG_ERR, "idna dns decode for %s failed", query->name); /* fake empty results to shortcut resolution failure */ xhash_free(query->results); query->results = xhash_new(71); query->expiry = time(NULL) + 99999999; domain = strdup(query->name); } out_resolve(query->s2s, domain, query->results, query->expiry); free(domain); free(query->name); free(query); } } void dns_resolve_domain(s2s_t s2s, dnscache_t dns) { dnsquery_t query = (dnsquery_t) calloc(1, sizeof(struct dnsquery_st)); query->s2s = s2s; query->results = xhash_new(71); if (idna_to_ascii_8z(dns->name, &query->name, 0) != IDNA_SUCCESS) { log_write(s2s->log, LOG_ERR, "idna dns encode for %s failed", dns->name); /* shortcut resolution failure */ query->expiry = time(NULL) + 99999999; out_resolve(query->s2s, dns->name, query->results, query->expiry); return; } query->hosts = xhash_new(71); query->srv_i = -1; query->expiry = 0; query->cur_host = NULL; query->cur_port = 0; query->cur_expiry = 0; query->query = NULL; dns->query = query; log_debug(ZONE, "dns resolve for %s@%p started", query->name, query); /* - resolve all SRV records to host/port * - if no results, include domain/5269 * - resolve all host/port combinations * - return result */ _dns_result_srv(NULL, NULL, query); } /** responses from the resolver */ void out_resolve(s2s_t s2s, char *domain, xht results, time_t expiry) { dnscache_t dns; /* no results, resolve failed */ if(xhash_count(results) == 0) { dns = xhash_get(s2s->dnscache, domain); if (dns != NULL) { /* store negative DNS cache */ xhash_free(dns->results); dns->query = NULL; dns->results = NULL; dns->expiry = expiry; dns->pending = 0; } log_write(s2s->log, LOG_NOTICE, "dns lookup for %s failed", domain); /* bounce queue */ out_bounce_domain_queues(s2s, domain, stanza_err_REMOTE_SERVER_NOT_FOUND); xhash_free(results); return; } log_write(s2s->log, LOG_NOTICE, "dns lookup for %s returned %d result%s (ttl %d)", domain, xhash_count(results), xhash_count(results)!=1?"s":"", expiry - time(NULL)); /* get the cache entry */ dns = xhash_get(s2s->dnscache, domain); if(dns == NULL) { /* retry using punycode */ char *punydomain; if (idna_to_ascii_8z(domain, &punydomain, 0) == IDNA_SUCCESS) { dns = xhash_get(s2s->dnscache, punydomain); free(punydomain); } } if(dns == NULL) { log_write(s2s->log, LOG_ERR, "weird, never requested %s resolution", domain); return; } /* fill it out */ xhash_free(dns->results); dns->query = NULL; dns->results = results; dns->expiry = expiry; dns->pending = 0; out_flush_domain_queues(s2s, domain); /* delete the cache entry if caching is disabled */ if (!s2s->dns_cache_enabled && !dns->pending) { xhash_free(dns->results); xhash_zap(s2s->dnscache, domain); free(dns); } } /** mio callback for outgoing conns */ static int _out_mio_callback(mio_t m, mio_action_t a, mio_fd_t fd, void *data, void *arg) { conn_t out = (conn_t) arg; char ipport[INET6_ADDRSTRLEN + 17]; int nbytes; switch(a) { case action_READ: log_debug(ZONE, "read action on fd %d", fd->fd); /* they did something */ out->last_activity = time(NULL); ioctl(fd->fd, FIONREAD, &nbytes); if(nbytes == 0) { sx_kill(out->s); return 0; } return sx_can_read(out->s); case action_WRITE: log_debug(ZONE, "write action on fd %d", fd->fd); /* update activity timestamp */ out->last_activity = time(NULL); return sx_can_write(out->s); case action_CLOSE: log_debug(ZONE, "close action on fd %d", fd->fd); jqueue_push(out->s2s->dead, (void *) out->s, 0); log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] disconnect, packets: %i", fd->fd, out->ip, out->port, out->packet_count); if (out->s2s->out_reuse) { /* generate the ip/port pair */ snprintf(ipport, INET6_ADDRSTRLEN + 16, "%s/%d", out->ip, out->port); xhash_zap(out->s2s->out_host, ipport); } if (xhash_iter_first(out->routes)) { char *rkey; int rkeylen; char *c; int c_len; /* remove all the out_dest entries */ do { xhash_iter_get(out->routes, (const char **) &rkey, &rkeylen, NULL); c = memchr(rkey, '/', rkeylen); c++; c_len = rkeylen - (c - rkey); log_debug(ZONE, "route '%.*s'", rkeylen, rkey); if (xhash_getx(out->s2s->out_dest, c, c_len) != NULL) { log_debug(ZONE, "removing dest entry for '%.*s'", c_len, c); xhash_zapx(out->s2s->out_dest, c, c_len); } } while(xhash_iter_next(out->routes)); } if (xhash_iter_first(out->routes)) { char *rkey; int rkeylen; jqueue_t q; int npkt; /* retry all the routes */ do { xhash_iter_get(out->routes, (const char **) &rkey, &rkeylen, NULL); q = xhash_getx(out->s2s->outq, rkey, rkeylen); if (out->s2s->retry_limit > 0 && q != NULL && jqueue_age(q) > out->s2s->retry_limit) { log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] retry limit reached for '%.*s' queue", fd->fd, out->ip, out->port, rkeylen, rkey); q = NULL; } if (q != NULL && (npkt = jqueue_size(q)) > 0 && xhash_get(out->states, rkey) != (void*) conn_INPROGRESS) { conn_t retry; log_debug(ZONE, "retrying connection for '%.*s' queue", rkeylen, rkey); if (!out_route(out->s2s, rkey, rkeylen, &retry, 0)) { log_debug(ZONE, "retry successful"); if (retry != NULL) { /* flush queue */ out_flush_route_queue(out->s2s, rkey, rkeylen); } } else { log_debug(ZONE, "retry failed"); /* bounce queue */ out_bounce_route_queue(out->s2s, rkey, rkeylen, stanza_err_SERVICE_UNAVAILABLE); _out_dns_mark_bad(out); } } else { /* bounce queue */ out_bounce_route_queue(out->s2s, rkey, rkeylen, stanza_err_REMOTE_SERVER_TIMEOUT); _out_dns_mark_bad(out); } } while(xhash_iter_next(out->routes)); } jqueue_push(out->s2s->dead_conn, (void *) out, 0); case action_ACCEPT: break; } return 0; } void send_dialbacks(conn_t out) { char *rkey; int rkeylen; if (out->s2s->dns_bad_timeout > 0) { dnsres_t bad = xhash_get(out->s2s->dns_bad, out->key); if (bad != NULL) { log_debug(ZONE, "removing bad host entry for '%s'", out->key); xhash_zap(out->s2s->dns_bad, out->key); free(bad->key); free(bad); } } if (xhash_iter_first(out->routes)) { log_debug(ZONE, "sending dialback packets for %s", out->key); do { xhash_iter_get(out->routes, (const char **) &rkey, &rkeylen, NULL); _out_dialback(out, rkey, rkeylen); } while(xhash_iter_next(out->routes)); } return; } static int _out_sx_callback(sx_t s, sx_event_t e, void *data, void *arg) { conn_t out = (conn_t) arg; sx_buf_t buf = (sx_buf_t) data; int len, ns, elem, starttls = 0; sx_error_t *sxe; nad_t nad; switch(e) { case event_WANT_READ: log_debug(ZONE, "want read"); mio_read(out->s2s->mio, out->fd); break; case event_WANT_WRITE: log_debug(ZONE, "want write"); mio_write(out->s2s->mio, out->fd); break; case event_READ: log_debug(ZONE, "reading from %d", out->fd->fd); /* do the read */ len = recv(out->fd->fd, buf->data, buf->len, 0); if(len < 0) { if(MIO_WOULDBLOCK) { buf->len = 0; return 0; } log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] read error: %s (%d)", out->fd->fd, out->ip, out->port, MIO_STRERROR(MIO_ERROR), MIO_ERROR); if (!out->online) { _out_dns_mark_bad(out); } sx_kill(s); return -1; } else if(len == 0) { /* they went away */ sx_kill(s); return -1; } log_debug(ZONE, "read %d bytes", len); buf->len = len; return len; case event_WRITE: log_debug(ZONE, "writing to %d", out->fd->fd); len = send(out->fd->fd, buf->data, buf->len, 0); if(len >= 0) { log_debug(ZONE, "%d bytes written", len); return len; } if(MIO_WOULDBLOCK) return 0; log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] write error: %s (%d)", out->fd->fd, out->ip, out->port, MIO_STRERROR(MIO_ERROR), MIO_ERROR); if (!out->online) { _out_dns_mark_bad(out); } sx_kill(s); return -1; case event_ERROR: sxe = (sx_error_t *) data; log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] error: %s (%s)", out->fd->fd, out->ip, out->port, sxe->generic, sxe->specific); /* mark as bad if we did not manage to connect or there is unrecoverable stream error */ if (!out->online || (sxe->code == SX_ERR_STREAM && (strstr(sxe->specific, "host-gone") || /* it's not there now */ strstr(sxe->specific, "host-unknown") || /* they do not service the host */ strstr(sxe->specific, "not-authorized") || /* they do not want us there */ strstr(sxe->specific, "see-other-host") || /* we do not support redirections yet */ strstr(sxe->specific, "system-shutdown") || /* they are going down */ strstr(sxe->specific, "policy-violation") || /* they do not want us there */ strstr(sxe->specific, "remote-connection-failed") || /* the required remote entity is gone */ strstr(sxe->specific, "unsupported-encoding") || /* they do not like our encoding */ strstr(sxe->specific, "undefined-condition") || /* something bad happend */ strstr(sxe->specific, "internal-server-error") || /* that server is broken */ strstr(sxe->specific, "unsupported-version") /* they do not support our stream version */ ))) { _out_dns_mark_bad(out); } sx_kill(s); return -1; case event_OPEN: log_debug(ZONE, "OPEN event for %s", out->key); break; case event_STREAM: /* check stream version - NULl = pre-xmpp (some jabber1 servers) */ log_debug(ZONE, "STREAM event for %s stream version is %s", out->key, out->s->res_version); /* first time, bring them online */ if(!out->online) { log_debug(ZONE, "outgoing conn to %s is online", out->key); /* if no stream version from either side, kick off dialback for each route, */ /* otherwise wait for stream features */ if (((out->s->res_version==NULL) || (out->s2s->sx_ssl == NULL)) && out->s2s->require_tls == 0) { log_debug(ZONE, "no stream version, sending dialbacks for %s immediately", out->key); out->online = 1; send_dialbacks(out); } else log_debug(ZONE, "outgoing conn to %s - waiting for STREAM features", out->key); } break; case event_PACKET: /* we're counting packets */ out->packet_count++; out->s2s->packet_count++; nad = (nad_t) data; /* watch for the features packet - STARTTLS and/or SASL*/ if ((out->s->res_version!=NULL) && NAD_NURI_L(nad, NAD_ENS(nad, 0)) == strlen(uri_STREAMS) && strncmp(uri_STREAMS, NAD_NURI(nad, NAD_ENS(nad, 0)), strlen(uri_STREAMS)) == 0 && NAD_ENAME_L(nad, 0) == 8 && strncmp("features", NAD_ENAME(nad, 0), 8) == 0) { log_debug(ZONE, "got the stream features packet"); #ifdef HAVE_SSL /* starttls if we can */ if(out->s2s->sx_ssl != NULL && s->ssf == 0) { ns = nad_find_scoped_namespace(nad, uri_TLS, NULL); if(ns >= 0) { elem = nad_find_elem(nad, 0, ns, "starttls", 1); if(elem >= 0) { log_debug(ZONE, "got STARTTLS in stream features"); if(sx_ssl_client_starttls(out->s2s->sx_ssl, s, out->s2s->local_pemfile) == 0) { starttls = 1; nad_free(nad); return 0; } log_write(out->s2s->log, LOG_ERR, "unable to establish encrypted session with peer"); } } } /* If we're not establishing a starttls connection, send dialbacks */ if (!starttls) { if (out->s2s->require_tls == 0 || s->ssf > 0) { log_debug(ZONE, "No STARTTLS, sending dialbacks for %s", out->key); out->online = 1; send_dialbacks(out); } else { log_debug(ZONE, "No STARTTLS, dialbacks disabled for non-TLS connections, cannot complete negotiation"); } } #else if (out->s2s->require_tls == 0) { out->online = 1; send_dialbacks(out); } #endif } /* we only accept dialback packets */ if(NAD_ENS(nad, 0) < 0 || NAD_NURI_L(nad, NAD_ENS(nad, 0)) != uri_DIALBACK_L || strncmp(uri_DIALBACK, NAD_NURI(nad, NAD_ENS(nad, 0)), uri_DIALBACK_L) != 0) { log_debug(ZONE, "got a non-dialback packet on an outgoing conn, dropping it"); nad_free(nad); return 0; } /* and then only result and verify */ if(NAD_ENAME_L(nad, 0) == 6) { if(strncmp("result", NAD_ENAME(nad, 0), 6) == 0) { _out_result(out, nad); return 0; } if(strncmp("verify", NAD_ENAME(nad, 0), 6) == 0) { _out_verify(out, nad); return 0; } } log_debug(ZONE, "unknown dialback packet, dropping it"); nad_free(nad); return 0; case event_CLOSED: if (out->fd != NULL) { mio_close(out->s2s->mio, out->fd); out->fd = NULL; } return -1; } return 0; } /** process incoming auth responses */ static void _out_result(conn_t out, nad_t nad) { int attr; jid_t from, to; char *rkey; int rkeylen; attr = nad_find_attr(nad, 0, -1, "from", NULL); if(attr < 0 || (from = jid_new(NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr))) == NULL) { log_debug(ZONE, "missing or invalid from on db result packet"); nad_free(nad); return; } attr = nad_find_attr(nad, 0, -1, "to", NULL); if(attr < 0 || (to = jid_new(NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr))) == NULL) { log_debug(ZONE, "missing or invalid to on db result packet"); jid_free(from); nad_free(nad); return; } rkey = s2s_route_key(NULL, to->domain, from->domain); rkeylen = strlen(rkey); /* key is valid */ if(nad_find_attr(nad, 0, -1, "type", "valid") >= 0 && xhash_get(out->states, rkey) == (void*) conn_INPROGRESS) { log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] outgoing route '%s' is now valid%s%s", out->fd->fd, out->ip, out->port, rkey, (out->s->flags & SX_SSL_WRAPPER) ? ", TLS negotiated" : "", out->s->compressed ? ", ZLIB compression enabled" : ""); xhash_put(out->states, pstrdup(xhash_pool(out->states), rkey), (void *) conn_VALID); /* !!! small leak here */ log_debug(ZONE, "%s valid, flushing queue", rkey); /* flush the queue */ out_flush_route_queue(out->s2s, rkey, rkeylen); free(rkey); jid_free(from); jid_free(to); nad_free(nad); return; } /* invalid */ log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] outgoing route '%s' is now invalid", out->fd->fd, out->ip, out->port, rkey); /* close connection */ log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] closing connection", out->fd->fd, out->ip, out->port); /* report stream error */ sx_error(out->s, stream_err_INVALID_ID, "dialback negotiation failed"); /* close the stream */ sx_close(out->s); /* bounce queue */ out_bounce_route_queue(out->s2s, rkey, rkeylen, stanza_err_SERVICE_UNAVAILABLE); free(rkey); jid_free(from); jid_free(to); nad_free(nad); } /** incoming stream authenticated */ static void _out_verify(conn_t out, nad_t nad) { int attr, ns; jid_t from, to; conn_t in; char *rkey; int valid; attr = nad_find_attr(nad, 0, -1, "from", NULL); if(attr < 0 || (from = jid_new(NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr))) == NULL) { log_debug(ZONE, "missing or invalid from on db verify packet"); nad_free(nad); return; } attr = nad_find_attr(nad, 0, -1, "to", NULL); if(attr < 0 || (to = jid_new(NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr))) == NULL) { log_debug(ZONE, "missing or invalid to on db verify packet"); jid_free(from); nad_free(nad); return; } attr = nad_find_attr(nad, 0, -1, "id", NULL); if(attr < 0) { log_debug(ZONE, "missing id on db verify packet"); jid_free(from); jid_free(to); nad_free(nad); return; } /* get the incoming conn */ in = xhash_getx(out->s2s->in, NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr)); if(in == NULL) { log_debug(ZONE, "got a verify for incoming conn %.*s, but it doesn't exist, dropping the packet", NAD_AVAL_L(nad, attr), NAD_AVAL(nad, attr)); jid_free(from); jid_free(to); nad_free(nad); return; } rkey = s2s_route_key(NULL, to->domain, from->domain); attr = nad_find_attr(nad, 0, -1, "type", "valid"); if(attr >= 0 && xhash_get(in->states, rkey) == (void*) conn_INPROGRESS) { xhash_put(in->states, pstrdup(xhash_pool(in->states), rkey), (void *) conn_VALID); log_write(in->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] incoming route '%s' is now valid%s%s", in->fd->fd, in->ip, in->port, rkey, (in->s->flags & SX_SSL_WRAPPER) ? ", TLS negotiated" : "", in->s->compressed ? ", ZLIB compression enabled" : ""); valid = 1; } else { log_write(in->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] incoming route '%s' is now invalid", in->fd->fd, in->ip, in->port, rkey); valid = 0; } free(rkey); nad_free(nad); /* decrement outstanding verify counter */ --out->verify; /* let them know what happened */ nad = nad_new(); ns = nad_add_namespace(nad, uri_DIALBACK, "db"); nad_append_elem(nad, ns, "result", 0); nad_append_attr(nad, -1, "to", from->domain); nad_append_attr(nad, -1, "from", to->domain); nad_append_attr(nad, -1, "type", valid ? "valid" : "invalid"); /* off it goes */ sx_nad_write(in->s, nad); /* if invalid, close the stream */ if (!valid) { /* generate stream error */ sx_error(in->s, stream_err_INVALID_ID, "dialback negotiation failed"); /* close the incoming stream */ sx_close(in->s); } jid_free(from); jid_free(to); } /* bounce all packets in the queues for domain */ int out_bounce_domain_queues(s2s_t s2s, const char *domain, int err) { char *rkey; int rkeylen; int pktcount = 0; if (xhash_iter_first(s2s->outq)) { do { xhash_iter_get(s2s->outq, (const char **) &rkey, &rkeylen, NULL); if(s2s_route_key_match(NULL, (char *) domain, rkey, rkeylen)) pktcount += out_bounce_route_queue(s2s, rkey, rkeylen, err); } while(xhash_iter_next(s2s->outq)); } return pktcount; } /* bounce all packets in the queue for route */ int out_bounce_route_queue(s2s_t s2s, char *rkey, int rkeylen, int err) { jqueue_t q; pkt_t pkt; int pktcount = 0; q = xhash_getx(s2s->outq, rkey, rkeylen); if(q == NULL) return 0; while((pkt = jqueue_pull(q)) != NULL) { /* only packets with content, in namespace jabber:client and not already errors */ if(pkt->nad->ecur > 1 && NAD_NURI_L(pkt->nad, NAD_ENS(pkt->nad, 1)) == strlen(uri_CLIENT) && strncmp(NAD_NURI(pkt->nad, NAD_ENS(pkt->nad, 1)), uri_CLIENT, strlen(uri_CLIENT)) == 0 && nad_find_attr(pkt->nad, 0, -1, "error", NULL) < 0) { sx_nad_write(s2s->router, stanza_tofrom(stanza_tofrom(stanza_error(pkt->nad, 1, err), 1), 0)); pktcount++; } else nad_free(pkt->nad); jid_free(pkt->to); jid_free(pkt->from); free(pkt); } /* delete queue and remove domain from queue hash */ log_debug(ZONE, "deleting out packet queue for %.*s", rkeylen, rkey); rkey = q->key; jqueue_free(q); xhash_zap(s2s->outq, rkey); free(rkey); return pktcount; } int out_bounce_conn_queues(conn_t out, int err) { char *rkey; int rkeylen; int pktcount = 0; /* bounce queues for all domains handled by this connection - iterate through routes */ if (xhash_iter_first(out->routes)) { do { xhash_iter_get(out->routes, (const char **) &rkey, &rkeylen, NULL); pktcount += out_bounce_route_queue(out->s2s, rkey, rkeylen, err); } while(xhash_iter_next(out->routes)); } return pktcount; } void out_flush_domain_queues(s2s_t s2s, const char *domain) { char *rkey; int rkeylen; char *c; int c_len; if (xhash_iter_first(s2s->outq)) { do { xhash_iter_get(s2s->outq, (const char **) &rkey, &rkeylen, NULL); c = memchr(rkey, '/', rkeylen); c++; c_len = rkeylen - (c - rkey); if (strncmp(domain, c, c_len) == 0) out_flush_route_queue(s2s, rkey, rkeylen); } while(xhash_iter_next(s2s->outq)); } } void out_flush_route_queue(s2s_t s2s, char *rkey, int rkeylen) { jqueue_t q; pkt_t pkt; int npkt, i, ret; q = xhash_getx(s2s->outq, rkey, rkeylen); if(q == NULL) return; npkt = jqueue_size(q); log_debug(ZONE, "flushing %d packets for '%.*s' to out_packet", npkt, rkeylen, rkey); for(i = 0; i < npkt; i++) { pkt = jqueue_pull(q); if(pkt) { ret = out_packet(s2s, pkt); if (ret) { /* uh-oh. the queue was deleted... q and pkt have been freed if q->key == rkey, rkey has also been freed */ return; } } } /* delete queue for route and remove route from queue hash */ if (jqueue_size(q) == 0) { log_debug(ZONE, "deleting out packet queue for '%.*s'", rkeylen, rkey); rkey = q->key; jqueue_free(q); xhash_zap(s2s->outq, rkey); free(rkey); } else { log_debug(ZONE, "emptied queue gained more packets..."); } }
static void _out_result(conn_t out, nad_t nad) { int attr; jid_t from, to; char *rkey; int rkeylen; attr = nad_find_attr(nad, 0, -1, "from", NULL); if(attr < 0 || (from = jid_new(NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr))) == NULL) { log_debug(ZONE, "missing or invalid from on db result packet"); nad_free(nad); return; } attr = nad_find_attr(nad, 0, -1, "to", NULL); if(attr < 0 || (to = jid_new(NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr))) == NULL) { log_debug(ZONE, "missing or invalid to on db result packet"); jid_free(from); nad_free(nad); return; } rkey = s2s_route_key(NULL, to->domain, from->domain); rkeylen = strlen(rkey); /* key is valid */ if(nad_find_attr(nad, 0, -1, "type", "valid") >= 0) { log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] outgoing route '%s' is now valid%s%s", out->fd->fd, out->ip, out->port, rkey, (out->s->flags & SX_SSL_WRAPPER) ? ", TLS negotiated" : "", out->s->compressed ? ", ZLIB compression enabled" : ""); xhash_put(out->states, pstrdup(xhash_pool(out->states), rkey), (void *) conn_VALID); /* !!! small leak here */ log_debug(ZONE, "%s valid, flushing queue", rkey); /* flush the queue */ out_flush_route_queue(out->s2s, rkey, rkeylen); free(rkey); jid_free(from); jid_free(to); nad_free(nad); return; } /* invalid */ log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] outgoing route '%s' is now invalid", out->fd->fd, out->ip, out->port, rkey); /* close connection */ log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] closing connection", out->fd->fd, out->ip, out->port); /* report stream error */ sx_error(out->s, stream_err_INVALID_ID, "dialback negotiation failed"); /* close the stream */ sx_close(out->s); /* bounce queue */ out_bounce_route_queue(out->s2s, rkey, rkeylen, stanza_err_SERVICE_UNAVAILABLE); free(rkey); jid_free(from); jid_free(to); nad_free(nad); }
static void _out_result(conn_t out, nad_t nad) { int attr; jid_t from, to; char *rkey; int rkeylen; attr = nad_find_attr(nad, 0, -1, "from", NULL); if(attr < 0 || (from = jid_new(NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr))) == NULL) { log_debug(ZONE, "missing or invalid from on db result packet"); nad_free(nad); return; } attr = nad_find_attr(nad, 0, -1, "to", NULL); if(attr < 0 || (to = jid_new(NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr))) == NULL) { log_debug(ZONE, "missing or invalid to on db result packet"); jid_free(from); nad_free(nad); return; } rkey = s2s_route_key(NULL, to->domain, from->domain); rkeylen = strlen(rkey); /* key is valid */ if(nad_find_attr(nad, 0, -1, "type", "valid") >= 0 && xhash_get(out->states, rkey) == (void*) conn_INPROGRESS) { log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] outgoing route '%s' is now valid%s%s", out->fd->fd, out->ip, out->port, rkey, (out->s->flags & SX_SSL_WRAPPER) ? ", TLS negotiated" : "", out->s->compressed ? ", ZLIB compression enabled" : ""); xhash_put(out->states, pstrdup(xhash_pool(out->states), rkey), (void *) conn_VALID); /* !!! small leak here */ log_debug(ZONE, "%s valid, flushing queue", rkey); /* flush the queue */ out_flush_route_queue(out->s2s, rkey, rkeylen); free(rkey); jid_free(from); jid_free(to); nad_free(nad); return; } /* invalid */ log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] outgoing route '%s' is now invalid", out->fd->fd, out->ip, out->port, rkey); /* close connection */ log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] closing connection", out->fd->fd, out->ip, out->port); /* report stream error */ sx_error(out->s, stream_err_INVALID_ID, "dialback negotiation failed"); /* close the stream */ sx_close(out->s); /* bounce queue */ out_bounce_route_queue(out->s2s, rkey, rkeylen, stanza_err_SERVICE_UNAVAILABLE); free(rkey); jid_free(from); jid_free(to); nad_free(nad); }
{'added': [(1664, ' if(nad_find_attr(nad, 0, -1, "type", "valid") >= 0 && xhash_get(out->states, rkey) == (void*) conn_INPROGRESS) {'), (1752, ' if(attr >= 0 && xhash_get(in->states, rkey) == (void*) conn_INPROGRESS) {')], 'deleted': [(1664, ' if(nad_find_attr(nad, 0, -1, "type", "valid") >= 0) {'), (1752, ' if(attr >= 0) {')]}
2
2
1,278
10,866
41
421
8
https://github.com/Jabberd2/jabberd2
CVE-2012-3525
CWE-20
1,586
netback.c
C
xen_netbk_tx_build_gops
/* * Back-end of the driver for virtual network devices. This portion of the * driver exports a 'unified' network-device interface that can be accessed * by any operating system that implements a compatible front end. A * reference front-end implementation can be found in: * drivers/net/xen-netfront.c * * Copyright (c) 2002-2005, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "common.h" #include <linux/kthread.h> #include <linux/if_vlan.h> #include <linux/udp.h> #include <net/tcp.h> #include <xen/xen.h> #include <xen/events.h> #include <xen/interface/memory.h> #include <asm/xen/hypercall.h> #include <asm/xen/page.h> struct pending_tx_info { struct xen_netif_tx_request req; struct xenvif *vif; }; typedef unsigned int pending_ring_idx_t; struct netbk_rx_meta { int id; int size; int gso_size; }; #define MAX_PENDING_REQS 256 /* Discriminate from any valid pending_idx value. */ #define INVALID_PENDING_IDX 0xFFFF #define MAX_BUFFER_OFFSET PAGE_SIZE /* extra field used in struct page */ union page_ext { struct { #if BITS_PER_LONG < 64 #define IDX_WIDTH 8 #define GROUP_WIDTH (BITS_PER_LONG - IDX_WIDTH) unsigned int group:GROUP_WIDTH; unsigned int idx:IDX_WIDTH; #else unsigned int group, idx; #endif } e; void *mapping; }; struct xen_netbk { wait_queue_head_t wq; struct task_struct *task; struct sk_buff_head rx_queue; struct sk_buff_head tx_queue; struct timer_list net_timer; struct page *mmap_pages[MAX_PENDING_REQS]; pending_ring_idx_t pending_prod; pending_ring_idx_t pending_cons; struct list_head net_schedule_list; /* Protect the net_schedule_list in netif. */ spinlock_t net_schedule_list_lock; atomic_t netfront_count; struct pending_tx_info pending_tx_info[MAX_PENDING_REQS]; struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS]; u16 pending_ring[MAX_PENDING_REQS]; /* * Given MAX_BUFFER_OFFSET of 4096 the worst case is that each * head/fragment page uses 2 copy operations because it * straddles two buffers in the frontend. */ struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE]; struct netbk_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE]; }; static struct xen_netbk *xen_netbk; static int xen_netbk_group_nr; void xen_netbk_add_xenvif(struct xenvif *vif) { int i; int min_netfront_count; int min_group = 0; struct xen_netbk *netbk; min_netfront_count = atomic_read(&xen_netbk[0].netfront_count); for (i = 0; i < xen_netbk_group_nr; i++) { int netfront_count = atomic_read(&xen_netbk[i].netfront_count); if (netfront_count < min_netfront_count) { min_group = i; min_netfront_count = netfront_count; } } netbk = &xen_netbk[min_group]; vif->netbk = netbk; atomic_inc(&netbk->netfront_count); } void xen_netbk_remove_xenvif(struct xenvif *vif) { struct xen_netbk *netbk = vif->netbk; vif->netbk = NULL; atomic_dec(&netbk->netfront_count); } static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx); static void make_tx_response(struct xenvif *vif, struct xen_netif_tx_request *txp, s8 st); static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, u16 id, s8 st, u16 offset, u16 size, u16 flags); static inline unsigned long idx_to_pfn(struct xen_netbk *netbk, u16 idx) { return page_to_pfn(netbk->mmap_pages[idx]); } static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk, u16 idx) { return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx)); } /* extra field used in struct page */ static inline void set_page_ext(struct page *pg, struct xen_netbk *netbk, unsigned int idx) { unsigned int group = netbk - xen_netbk; union page_ext ext = { .e = { .group = group + 1, .idx = idx } }; BUILD_BUG_ON(sizeof(ext) > sizeof(ext.mapping)); pg->mapping = ext.mapping; } static int get_page_ext(struct page *pg, unsigned int *pgroup, unsigned int *pidx) { union page_ext ext = { .mapping = pg->mapping }; struct xen_netbk *netbk; unsigned int group, idx; group = ext.e.group - 1; if (group < 0 || group >= xen_netbk_group_nr) return 0; netbk = &xen_netbk[group]; idx = ext.e.idx; if ((idx < 0) || (idx >= MAX_PENDING_REQS)) return 0; if (netbk->mmap_pages[idx] != pg) return 0; *pgroup = group; *pidx = idx; return 1; } /* * This is the amount of packet we copy rather than map, so that the * guest can't fiddle with the contents of the headers while we do * packet processing on them (netfilter, routing, etc). */ #define PKT_PROT_LEN (ETH_HLEN + \ VLAN_HLEN + \ sizeof(struct iphdr) + MAX_IPOPTLEN + \ sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE) static u16 frag_get_pending_idx(skb_frag_t *frag) { return (u16)frag->page_offset; } static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx) { frag->page_offset = pending_idx; } static inline pending_ring_idx_t pending_index(unsigned i) { return i & (MAX_PENDING_REQS-1); } static inline pending_ring_idx_t nr_pending_reqs(struct xen_netbk *netbk) { return MAX_PENDING_REQS - netbk->pending_prod + netbk->pending_cons; } static void xen_netbk_kick_thread(struct xen_netbk *netbk) { wake_up(&netbk->wq); } static int max_required_rx_slots(struct xenvif *vif) { int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE); if (vif->can_sg || vif->gso || vif->gso_prefix) max += MAX_SKB_FRAGS + 1; /* extra_info + frags */ return max; } int xen_netbk_rx_ring_full(struct xenvif *vif) { RING_IDX peek = vif->rx_req_cons_peek; RING_IDX needed = max_required_rx_slots(vif); return ((vif->rx.sring->req_prod - peek) < needed) || ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed); } int xen_netbk_must_stop_queue(struct xenvif *vif) { if (!xen_netbk_rx_ring_full(vif)) return 0; vif->rx.sring->req_event = vif->rx_req_cons_peek + max_required_rx_slots(vif); mb(); /* request notification /then/ check the queue */ return xen_netbk_rx_ring_full(vif); } /* * Returns true if we should start a new receive buffer instead of * adding 'size' bytes to a buffer which currently contains 'offset' * bytes. */ static bool start_new_rx_buffer(int offset, unsigned long size, int head) { /* simple case: we have completely filled the current buffer. */ if (offset == MAX_BUFFER_OFFSET) return true; /* * complex case: start a fresh buffer if the current frag * would overflow the current buffer but only if: * (i) this frag would fit completely in the next buffer * and (ii) there is already some data in the current buffer * and (iii) this is not the head buffer. * * Where: * - (i) stops us splitting a frag into two copies * unless the frag is too large for a single buffer. * - (ii) stops us from leaving a buffer pointlessly empty. * - (iii) stops us leaving the first buffer * empty. Strictly speaking this is already covered * by (ii) but is explicitly checked because * netfront relies on the first buffer being * non-empty and can crash otherwise. * * This means we will effectively linearise small * frags but do not needlessly split large buffers * into multiple copies tend to give large frags their * own buffers as before. */ if ((offset + size > MAX_BUFFER_OFFSET) && (size <= MAX_BUFFER_OFFSET) && offset && !head) return true; return false; } /* * Figure out how many ring slots we're going to need to send @skb to * the guest. This function is essentially a dry run of * netbk_gop_frag_copy. */ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb) { unsigned int count; int i, copy_off; count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE); copy_off = skb_headlen(skb) % PAGE_SIZE; if (skb_shinfo(skb)->gso_size) count++; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]); unsigned long offset = skb_shinfo(skb)->frags[i].page_offset; unsigned long bytes; offset &= ~PAGE_MASK; while (size > 0) { BUG_ON(offset >= PAGE_SIZE); BUG_ON(copy_off > MAX_BUFFER_OFFSET); bytes = PAGE_SIZE - offset; if (bytes > size) bytes = size; if (start_new_rx_buffer(copy_off, bytes, 0)) { count++; copy_off = 0; } if (copy_off + bytes > MAX_BUFFER_OFFSET) bytes = MAX_BUFFER_OFFSET - copy_off; copy_off += bytes; offset += bytes; size -= bytes; if (offset == PAGE_SIZE) offset = 0; } } return count; } struct netrx_pending_operations { unsigned copy_prod, copy_cons; unsigned meta_prod, meta_cons; struct gnttab_copy *copy; struct netbk_rx_meta *meta; int copy_off; grant_ref_t copy_gref; }; static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif, struct netrx_pending_operations *npo) { struct netbk_rx_meta *meta; struct xen_netif_rx_request *req; req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); meta = npo->meta + npo->meta_prod++; meta->gso_size = 0; meta->size = 0; meta->id = req->id; npo->copy_off = 0; npo->copy_gref = req->gref; return meta; } /* * Set up the grant operations for this fragment. If it's a flipping * interface, we also set up the unmap request from here. */ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, struct netrx_pending_operations *npo, struct page *page, unsigned long size, unsigned long offset, int *head) { struct gnttab_copy *copy_gop; struct netbk_rx_meta *meta; /* * These variables are used iff get_page_ext returns true, * in which case they are guaranteed to be initialized. */ unsigned int uninitialized_var(group), uninitialized_var(idx); int foreign = get_page_ext(page, &group, &idx); unsigned long bytes; /* Data must not cross a page boundary. */ BUG_ON(size + offset > PAGE_SIZE<<compound_order(page)); meta = npo->meta + npo->meta_prod - 1; /* Skip unused frames from start of page */ page += offset >> PAGE_SHIFT; offset &= ~PAGE_MASK; while (size > 0) { BUG_ON(offset >= PAGE_SIZE); BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); bytes = PAGE_SIZE - offset; if (bytes > size) bytes = size; if (start_new_rx_buffer(npo->copy_off, bytes, *head)) { /* * Netfront requires there to be some data in the head * buffer. */ BUG_ON(*head); meta = get_next_rx_buffer(vif, npo); } if (npo->copy_off + bytes > MAX_BUFFER_OFFSET) bytes = MAX_BUFFER_OFFSET - npo->copy_off; copy_gop = npo->copy + npo->copy_prod++; copy_gop->flags = GNTCOPY_dest_gref; if (foreign) { struct xen_netbk *netbk = &xen_netbk[group]; struct pending_tx_info *src_pend; src_pend = &netbk->pending_tx_info[idx]; copy_gop->source.domid = src_pend->vif->domid; copy_gop->source.u.ref = src_pend->req.gref; copy_gop->flags |= GNTCOPY_source_gref; } else { void *vaddr = page_address(page); copy_gop->source.domid = DOMID_SELF; copy_gop->source.u.gmfn = virt_to_mfn(vaddr); } copy_gop->source.offset = offset; copy_gop->dest.domid = vif->domid; copy_gop->dest.offset = npo->copy_off; copy_gop->dest.u.ref = npo->copy_gref; copy_gop->len = bytes; npo->copy_off += bytes; meta->size += bytes; offset += bytes; size -= bytes; /* Next frame */ if (offset == PAGE_SIZE && size) { BUG_ON(!PageCompound(page)); page++; offset = 0; } /* Leave a gap for the GSO descriptor. */ if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix) vif->rx.req_cons++; *head = 0; /* There must be something in this buffer now. */ } } /* * Prepare an SKB to be transmitted to the frontend. * * This function is responsible for allocating grant operations, meta * structures, etc. * * It returns the number of meta structures consumed. The number of * ring slots used is always equal to the number of meta slots used * plus the number of GSO descriptors used. Currently, we use either * zero GSO descriptors (for non-GSO packets) or one descriptor (for * frontend-side LRO). */ static int netbk_gop_skb(struct sk_buff *skb, struct netrx_pending_operations *npo) { struct xenvif *vif = netdev_priv(skb->dev); int nr_frags = skb_shinfo(skb)->nr_frags; int i; struct xen_netif_rx_request *req; struct netbk_rx_meta *meta; unsigned char *data; int head = 1; int old_meta_prod; old_meta_prod = npo->meta_prod; /* Set up a GSO prefix descriptor, if necessary */ if (skb_shinfo(skb)->gso_size && vif->gso_prefix) { req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); meta = npo->meta + npo->meta_prod++; meta->gso_size = skb_shinfo(skb)->gso_size; meta->size = 0; meta->id = req->id; } req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); meta = npo->meta + npo->meta_prod++; if (!vif->gso_prefix) meta->gso_size = skb_shinfo(skb)->gso_size; else meta->gso_size = 0; meta->size = 0; meta->id = req->id; npo->copy_off = 0; npo->copy_gref = req->gref; data = skb->data; while (data < skb_tail_pointer(skb)) { unsigned int offset = offset_in_page(data); unsigned int len = PAGE_SIZE - offset; if (data + len > skb_tail_pointer(skb)) len = skb_tail_pointer(skb) - data; netbk_gop_frag_copy(vif, skb, npo, virt_to_page(data), len, offset, &head); data += len; } for (i = 0; i < nr_frags; i++) { netbk_gop_frag_copy(vif, skb, npo, skb_frag_page(&skb_shinfo(skb)->frags[i]), skb_frag_size(&skb_shinfo(skb)->frags[i]), skb_shinfo(skb)->frags[i].page_offset, &head); } return npo->meta_prod - old_meta_prod; } /* * This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was * used to set up the operations on the top of * netrx_pending_operations, which have since been done. Check that * they didn't give any errors and advance over them. */ static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots, struct netrx_pending_operations *npo) { struct gnttab_copy *copy_op; int status = XEN_NETIF_RSP_OKAY; int i; for (i = 0; i < nr_meta_slots; i++) { copy_op = npo->copy + npo->copy_cons++; if (copy_op->status != GNTST_okay) { netdev_dbg(vif->dev, "Bad status %d from copy to DOM%d.\n", copy_op->status, vif->domid); status = XEN_NETIF_RSP_ERROR; } } return status; } static void netbk_add_frag_responses(struct xenvif *vif, int status, struct netbk_rx_meta *meta, int nr_meta_slots) { int i; unsigned long offset; /* No fragments used */ if (nr_meta_slots <= 1) return; nr_meta_slots--; for (i = 0; i < nr_meta_slots; i++) { int flags; if (i == nr_meta_slots - 1) flags = 0; else flags = XEN_NETRXF_more_data; offset = 0; make_rx_response(vif, meta[i].id, status, offset, meta[i].size, flags); } } struct skb_cb_overlay { int meta_slots_used; }; static void xen_netbk_rx_action(struct xen_netbk *netbk) { struct xenvif *vif = NULL, *tmp; s8 status; u16 irq, flags; struct xen_netif_rx_response *resp; struct sk_buff_head rxq; struct sk_buff *skb; LIST_HEAD(notify); int ret; int nr_frags; int count; unsigned long offset; struct skb_cb_overlay *sco; struct netrx_pending_operations npo = { .copy = netbk->grant_copy_op, .meta = netbk->meta, }; skb_queue_head_init(&rxq); count = 0; while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) { vif = netdev_priv(skb->dev); nr_frags = skb_shinfo(skb)->nr_frags; sco = (struct skb_cb_overlay *)skb->cb; sco->meta_slots_used = netbk_gop_skb(skb, &npo); count += nr_frags + 1; __skb_queue_tail(&rxq, skb); /* Filled the batch queue? */ if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE) break; } BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta)); if (!npo.copy_prod) return; BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op)); gnttab_batch_copy(netbk->grant_copy_op, npo.copy_prod); while ((skb = __skb_dequeue(&rxq)) != NULL) { sco = (struct skb_cb_overlay *)skb->cb; vif = netdev_priv(skb->dev); if (netbk->meta[npo.meta_cons].gso_size && vif->gso_prefix) { resp = RING_GET_RESPONSE(&vif->rx, vif->rx.rsp_prod_pvt++); resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data; resp->offset = netbk->meta[npo.meta_cons].gso_size; resp->id = netbk->meta[npo.meta_cons].id; resp->status = sco->meta_slots_used; npo.meta_cons++; sco->meta_slots_used--; } vif->dev->stats.tx_bytes += skb->len; vif->dev->stats.tx_packets++; status = netbk_check_gop(vif, sco->meta_slots_used, &npo); if (sco->meta_slots_used == 1) flags = 0; else flags = XEN_NETRXF_more_data; if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated; else if (skb->ip_summed == CHECKSUM_UNNECESSARY) /* remote but checksummed. */ flags |= XEN_NETRXF_data_validated; offset = 0; resp = make_rx_response(vif, netbk->meta[npo.meta_cons].id, status, offset, netbk->meta[npo.meta_cons].size, flags); if (netbk->meta[npo.meta_cons].gso_size && !vif->gso_prefix) { struct xen_netif_extra_info *gso = (struct xen_netif_extra_info *) RING_GET_RESPONSE(&vif->rx, vif->rx.rsp_prod_pvt++); resp->flags |= XEN_NETRXF_extra_info; gso->u.gso.size = netbk->meta[npo.meta_cons].gso_size; gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; gso->u.gso.pad = 0; gso->u.gso.features = 0; gso->type = XEN_NETIF_EXTRA_TYPE_GSO; gso->flags = 0; } netbk_add_frag_responses(vif, status, netbk->meta + npo.meta_cons + 1, sco->meta_slots_used); RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); irq = vif->irq; if (ret && list_empty(&vif->notify_list)) list_add_tail(&vif->notify_list, &notify); xenvif_notify_tx_completion(vif); xenvif_put(vif); npo.meta_cons += sco->meta_slots_used; dev_kfree_skb(skb); } list_for_each_entry_safe(vif, tmp, &notify, notify_list) { notify_remote_via_irq(vif->irq); list_del_init(&vif->notify_list); } /* More work to do? */ if (!skb_queue_empty(&netbk->rx_queue) && !timer_pending(&netbk->net_timer)) xen_netbk_kick_thread(netbk); } void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb) { struct xen_netbk *netbk = vif->netbk; skb_queue_tail(&netbk->rx_queue, skb); xen_netbk_kick_thread(netbk); } static void xen_netbk_alarm(unsigned long data) { struct xen_netbk *netbk = (struct xen_netbk *)data; xen_netbk_kick_thread(netbk); } static int __on_net_schedule_list(struct xenvif *vif) { return !list_empty(&vif->schedule_list); } /* Must be called with net_schedule_list_lock held */ static void remove_from_net_schedule_list(struct xenvif *vif) { if (likely(__on_net_schedule_list(vif))) { list_del_init(&vif->schedule_list); xenvif_put(vif); } } static struct xenvif *poll_net_schedule_list(struct xen_netbk *netbk) { struct xenvif *vif = NULL; spin_lock_irq(&netbk->net_schedule_list_lock); if (list_empty(&netbk->net_schedule_list)) goto out; vif = list_first_entry(&netbk->net_schedule_list, struct xenvif, schedule_list); if (!vif) goto out; xenvif_get(vif); remove_from_net_schedule_list(vif); out: spin_unlock_irq(&netbk->net_schedule_list_lock); return vif; } void xen_netbk_schedule_xenvif(struct xenvif *vif) { unsigned long flags; struct xen_netbk *netbk = vif->netbk; if (__on_net_schedule_list(vif)) goto kick; spin_lock_irqsave(&netbk->net_schedule_list_lock, flags); if (!__on_net_schedule_list(vif) && likely(xenvif_schedulable(vif))) { list_add_tail(&vif->schedule_list, &netbk->net_schedule_list); xenvif_get(vif); } spin_unlock_irqrestore(&netbk->net_schedule_list_lock, flags); kick: smp_mb(); if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) && !list_empty(&netbk->net_schedule_list)) xen_netbk_kick_thread(netbk); } void xen_netbk_deschedule_xenvif(struct xenvif *vif) { struct xen_netbk *netbk = vif->netbk; spin_lock_irq(&netbk->net_schedule_list_lock); remove_from_net_schedule_list(vif); spin_unlock_irq(&netbk->net_schedule_list_lock); } void xen_netbk_check_rx_xenvif(struct xenvif *vif) { int more_to_do; RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do); if (more_to_do) xen_netbk_schedule_xenvif(vif); } static void tx_add_credit(struct xenvif *vif) { unsigned long max_burst, max_credit; /* * Allow a burst big enough to transmit a jumbo packet of up to 128kB. * Otherwise the interface can seize up due to insufficient credit. */ max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size; max_burst = min(max_burst, 131072UL); max_burst = max(max_burst, vif->credit_bytes); /* Take care that adding a new chunk of credit doesn't wrap to zero. */ max_credit = vif->remaining_credit + vif->credit_bytes; if (max_credit < vif->remaining_credit) max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */ vif->remaining_credit = min(max_credit, max_burst); } static void tx_credit_callback(unsigned long data) { struct xenvif *vif = (struct xenvif *)data; tx_add_credit(vif); xen_netbk_check_rx_xenvif(vif); } static void netbk_tx_err(struct xenvif *vif, struct xen_netif_tx_request *txp, RING_IDX end) { RING_IDX cons = vif->tx.req_cons; do { make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); if (cons >= end) break; txp = RING_GET_REQUEST(&vif->tx, cons++); } while (1); vif->tx.req_cons = cons; xen_netbk_check_rx_xenvif(vif); xenvif_put(vif); } static int netbk_count_requests(struct xenvif *vif, struct xen_netif_tx_request *first, struct xen_netif_tx_request *txp, int work_to_do) { RING_IDX cons = vif->tx.req_cons; int frags = 0; if (!(first->flags & XEN_NETTXF_more_data)) return 0; do { if (frags >= work_to_do) { netdev_dbg(vif->dev, "Need more frags\n"); return -frags; } if (unlikely(frags >= MAX_SKB_FRAGS)) { netdev_dbg(vif->dev, "Too many frags\n"); return -frags; } memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags), sizeof(*txp)); if (txp->size > first->size) { netdev_dbg(vif->dev, "Frags galore\n"); return -frags; } first->size -= txp->size; frags++; if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n", txp->offset, txp->size); return -frags; } } while ((txp++)->flags & XEN_NETTXF_more_data); return frags; } static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk, struct sk_buff *skb, u16 pending_idx) { struct page *page; page = alloc_page(GFP_KERNEL|__GFP_COLD); if (!page) return NULL; set_page_ext(page, netbk, pending_idx); netbk->mmap_pages[pending_idx] = page; return page; } static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, struct xenvif *vif, struct sk_buff *skb, struct xen_netif_tx_request *txp, struct gnttab_copy *gop) { struct skb_shared_info *shinfo = skb_shinfo(skb); skb_frag_t *frags = shinfo->frags; u16 pending_idx = *((u16 *)skb->data); int i, start; /* Skip first skb fragment if it is on same page as header fragment. */ start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); for (i = start; i < shinfo->nr_frags; i++, txp++) { struct page *page; pending_ring_idx_t index; struct pending_tx_info *pending_tx_info = netbk->pending_tx_info; index = pending_index(netbk->pending_cons++); pending_idx = netbk->pending_ring[index]; page = xen_netbk_alloc_page(netbk, skb, pending_idx); if (!page) return NULL; gop->source.u.ref = txp->gref; gop->source.domid = vif->domid; gop->source.offset = txp->offset; gop->dest.u.gmfn = virt_to_mfn(page_address(page)); gop->dest.domid = DOMID_SELF; gop->dest.offset = txp->offset; gop->len = txp->size; gop->flags = GNTCOPY_source_gref; gop++; memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp)); xenvif_get(vif); pending_tx_info[pending_idx].vif = vif; frag_set_pending_idx(&frags[i], pending_idx); } return gop; } static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, struct sk_buff *skb, struct gnttab_copy **gopp) { struct gnttab_copy *gop = *gopp; u16 pending_idx = *((u16 *)skb->data); struct pending_tx_info *pending_tx_info = netbk->pending_tx_info; struct xenvif *vif = pending_tx_info[pending_idx].vif; struct xen_netif_tx_request *txp; struct skb_shared_info *shinfo = skb_shinfo(skb); int nr_frags = shinfo->nr_frags; int i, err, start; /* Check status of header. */ err = gop->status; if (unlikely(err)) { pending_ring_idx_t index; index = pending_index(netbk->pending_prod++); txp = &pending_tx_info[pending_idx].req; make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); netbk->pending_ring[index] = pending_idx; xenvif_put(vif); } /* Skip first skb fragment if it is on same page as header fragment. */ start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); for (i = start; i < nr_frags; i++) { int j, newerr; pending_ring_idx_t index; pending_idx = frag_get_pending_idx(&shinfo->frags[i]); /* Check error status: if okay then remember grant handle. */ newerr = (++gop)->status; if (likely(!newerr)) { /* Had a previous error? Invalidate this fragment. */ if (unlikely(err)) xen_netbk_idx_release(netbk, pending_idx); continue; } /* Error on this fragment: respond to client with an error. */ txp = &netbk->pending_tx_info[pending_idx].req; make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); index = pending_index(netbk->pending_prod++); netbk->pending_ring[index] = pending_idx; xenvif_put(vif); /* Not the first error? Preceding frags already invalidated. */ if (err) continue; /* First error: invalidate header and preceding fragments. */ pending_idx = *((u16 *)skb->data); xen_netbk_idx_release(netbk, pending_idx); for (j = start; j < i; j++) { pending_idx = frag_get_pending_idx(&shinfo->frags[j]); xen_netbk_idx_release(netbk, pending_idx); } /* Remember the error: invalidate all subsequent fragments. */ err = newerr; } *gopp = gop + 1; return err; } static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb) { struct skb_shared_info *shinfo = skb_shinfo(skb); int nr_frags = shinfo->nr_frags; int i; for (i = 0; i < nr_frags; i++) { skb_frag_t *frag = shinfo->frags + i; struct xen_netif_tx_request *txp; struct page *page; u16 pending_idx; pending_idx = frag_get_pending_idx(frag); txp = &netbk->pending_tx_info[pending_idx].req; page = virt_to_page(idx_to_kaddr(netbk, pending_idx)); __skb_fill_page_desc(skb, i, page, txp->offset, txp->size); skb->len += txp->size; skb->data_len += txp->size; skb->truesize += txp->size; /* Take an extra reference to offset xen_netbk_idx_release */ get_page(netbk->mmap_pages[pending_idx]); xen_netbk_idx_release(netbk, pending_idx); } } static int xen_netbk_get_extras(struct xenvif *vif, struct xen_netif_extra_info *extras, int work_to_do) { struct xen_netif_extra_info extra; RING_IDX cons = vif->tx.req_cons; do { if (unlikely(work_to_do-- <= 0)) { netdev_dbg(vif->dev, "Missing extra info\n"); return -EBADR; } memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons), sizeof(extra)); if (unlikely(!extra.type || extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { vif->tx.req_cons = ++cons; netdev_dbg(vif->dev, "Invalid extra type: %d\n", extra.type); return -EINVAL; } memcpy(&extras[extra.type - 1], &extra, sizeof(extra)); vif->tx.req_cons = ++cons; } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE); return work_to_do; } static int netbk_set_skb_gso(struct xenvif *vif, struct sk_buff *skb, struct xen_netif_extra_info *gso) { if (!gso->u.gso.size) { netdev_dbg(vif->dev, "GSO size must not be zero.\n"); return -EINVAL; } /* Currently only TCPv4 S.O. is supported. */ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); return -EINVAL; } skb_shinfo(skb)->gso_size = gso->u.gso.size; skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; skb_shinfo(skb)->gso_segs = 0; return 0; } static int checksum_setup(struct xenvif *vif, struct sk_buff *skb) { struct iphdr *iph; unsigned char *th; int err = -EPROTO; int recalculate_partial_csum = 0; /* * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy * peers can fail to set NETRXF_csum_blank when sending a GSO * frame. In this case force the SKB to CHECKSUM_PARTIAL and * recalculate the partial checksum. */ if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { vif->rx_gso_checksum_fixup++; skb->ip_summed = CHECKSUM_PARTIAL; recalculate_partial_csum = 1; } /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ if (skb->ip_summed != CHECKSUM_PARTIAL) return 0; if (skb->protocol != htons(ETH_P_IP)) goto out; iph = (void *)skb->data; th = skb->data + 4 * iph->ihl; if (th >= skb_tail_pointer(skb)) goto out; skb->csum_start = th - skb->head; switch (iph->protocol) { case IPPROTO_TCP: skb->csum_offset = offsetof(struct tcphdr, check); if (recalculate_partial_csum) { struct tcphdr *tcph = (struct tcphdr *)th; tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len - iph->ihl*4, IPPROTO_TCP, 0); } break; case IPPROTO_UDP: skb->csum_offset = offsetof(struct udphdr, check); if (recalculate_partial_csum) { struct udphdr *udph = (struct udphdr *)th; udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len - iph->ihl*4, IPPROTO_UDP, 0); } break; default: if (net_ratelimit()) netdev_err(vif->dev, "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n", iph->protocol); goto out; } if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb)) goto out; err = 0; out: return err; } static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) { unsigned long now = jiffies; unsigned long next_credit = vif->credit_timeout.expires + msecs_to_jiffies(vif->credit_usec / 1000); /* Timer could already be pending in rare cases. */ if (timer_pending(&vif->credit_timeout)) return true; /* Passed the point where we can replenish credit? */ if (time_after_eq(now, next_credit)) { vif->credit_timeout.expires = now; tx_add_credit(vif); } /* Still too big to send right now? Set a callback. */ if (size > vif->remaining_credit) { vif->credit_timeout.data = (unsigned long)vif; vif->credit_timeout.function = tx_credit_callback; mod_timer(&vif->credit_timeout, next_credit); return true; } return false; } static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) { struct gnttab_copy *gop = netbk->tx_copy_ops, *request_gop; struct sk_buff *skb; int ret; while (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) && !list_empty(&netbk->net_schedule_list)) { struct xenvif *vif; struct xen_netif_tx_request txreq; struct xen_netif_tx_request txfrags[MAX_SKB_FRAGS]; struct page *page; struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; u16 pending_idx; RING_IDX idx; int work_to_do; unsigned int data_len; pending_ring_idx_t index; /* Get a netif from the list with work to do. */ vif = poll_net_schedule_list(netbk); if (!vif) continue; RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); if (!work_to_do) { xenvif_put(vif); continue; } idx = vif->tx.req_cons; rmb(); /* Ensure that we see the request before we copy it. */ memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq)); /* Credit-based scheduling. */ if (txreq.size > vif->remaining_credit && tx_credit_exceeded(vif, txreq.size)) { xenvif_put(vif); continue; } vif->remaining_credit -= txreq.size; work_to_do--; vif->tx.req_cons = ++idx; memset(extras, 0, sizeof(extras)); if (txreq.flags & XEN_NETTXF_extra_info) { work_to_do = xen_netbk_get_extras(vif, extras, work_to_do); idx = vif->tx.req_cons; if (unlikely(work_to_do < 0)) { netbk_tx_err(vif, &txreq, idx); continue; } } ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); if (unlikely(ret < 0)) { netbk_tx_err(vif, &txreq, idx - ret); continue; } idx += ret; if (unlikely(txreq.size < ETH_HLEN)) { netdev_dbg(vif->dev, "Bad packet size: %d\n", txreq.size); netbk_tx_err(vif, &txreq, idx); continue; } /* No crossing a page as the payload mustn't fragment. */ if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { netdev_dbg(vif->dev, "txreq.offset: %x, size: %u, end: %lu\n", txreq.offset, txreq.size, (txreq.offset&~PAGE_MASK) + txreq.size); netbk_tx_err(vif, &txreq, idx); continue; } index = pending_index(netbk->pending_cons); pending_idx = netbk->pending_ring[index]; data_len = (txreq.size > PKT_PROT_LEN && ret < MAX_SKB_FRAGS) ? PKT_PROT_LEN : txreq.size; skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(skb == NULL)) { netdev_dbg(vif->dev, "Can't allocate a skb in start_xmit.\n"); netbk_tx_err(vif, &txreq, idx); break; } /* Packets passed to netif_rx() must have some headroom. */ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct xen_netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; if (netbk_set_skb_gso(vif, skb, gso)) { kfree_skb(skb); netbk_tx_err(vif, &txreq, idx); continue; } } /* XXX could copy straight to head */ page = xen_netbk_alloc_page(netbk, skb, pending_idx); if (!page) { kfree_skb(skb); netbk_tx_err(vif, &txreq, idx); continue; } gop->source.u.ref = txreq.gref; gop->source.domid = vif->domid; gop->source.offset = txreq.offset; gop->dest.u.gmfn = virt_to_mfn(page_address(page)); gop->dest.domid = DOMID_SELF; gop->dest.offset = txreq.offset; gop->len = txreq.size; gop->flags = GNTCOPY_source_gref; gop++; memcpy(&netbk->pending_tx_info[pending_idx].req, &txreq, sizeof(txreq)); netbk->pending_tx_info[pending_idx].vif = vif; *((u16 *)skb->data) = pending_idx; __skb_put(skb, data_len); skb_shinfo(skb)->nr_frags = ret; if (data_len < txreq.size) { skb_shinfo(skb)->nr_frags++; frag_set_pending_idx(&skb_shinfo(skb)->frags[0], pending_idx); } else { frag_set_pending_idx(&skb_shinfo(skb)->frags[0], INVALID_PENDING_IDX); } netbk->pending_cons++; request_gop = xen_netbk_get_requests(netbk, vif, skb, txfrags, gop); if (request_gop == NULL) { kfree_skb(skb); netbk_tx_err(vif, &txreq, idx); continue; } gop = request_gop; __skb_queue_tail(&netbk->tx_queue, skb); vif->tx.req_cons = idx; xen_netbk_check_rx_xenvif(vif); if ((gop-netbk->tx_copy_ops) >= ARRAY_SIZE(netbk->tx_copy_ops)) break; } return gop - netbk->tx_copy_ops; } static void xen_netbk_tx_submit(struct xen_netbk *netbk) { struct gnttab_copy *gop = netbk->tx_copy_ops; struct sk_buff *skb; while ((skb = __skb_dequeue(&netbk->tx_queue)) != NULL) { struct xen_netif_tx_request *txp; struct xenvif *vif; u16 pending_idx; unsigned data_len; pending_idx = *((u16 *)skb->data); vif = netbk->pending_tx_info[pending_idx].vif; txp = &netbk->pending_tx_info[pending_idx].req; /* Check the remap error code. */ if (unlikely(xen_netbk_tx_check_gop(netbk, skb, &gop))) { netdev_dbg(vif->dev, "netback grant failed.\n"); skb_shinfo(skb)->nr_frags = 0; kfree_skb(skb); continue; } data_len = skb->len; memcpy(skb->data, (void *)(idx_to_kaddr(netbk, pending_idx)|txp->offset), data_len); if (data_len < txp->size) { /* Append the packet payload as a fragment. */ txp->offset += data_len; txp->size -= data_len; } else { /* Schedule a response immediately. */ xen_netbk_idx_release(netbk, pending_idx); } if (txp->flags & XEN_NETTXF_csum_blank) skb->ip_summed = CHECKSUM_PARTIAL; else if (txp->flags & XEN_NETTXF_data_validated) skb->ip_summed = CHECKSUM_UNNECESSARY; xen_netbk_fill_frags(netbk, skb); /* * If the initial fragment was < PKT_PROT_LEN then * pull through some bytes from the other fragments to * increase the linear region to PKT_PROT_LEN bytes. */ if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) { int target = min_t(int, skb->len, PKT_PROT_LEN); __pskb_pull_tail(skb, target - skb_headlen(skb)); } skb->dev = vif->dev; skb->protocol = eth_type_trans(skb, skb->dev); if (checksum_setup(vif, skb)) { netdev_dbg(vif->dev, "Can't setup checksum in net_tx_action\n"); kfree_skb(skb); continue; } vif->dev->stats.rx_bytes += skb->len; vif->dev->stats.rx_packets++; xenvif_receive_skb(vif, skb); } } /* Called after netfront has transmitted */ static void xen_netbk_tx_action(struct xen_netbk *netbk) { unsigned nr_gops; nr_gops = xen_netbk_tx_build_gops(netbk); if (nr_gops == 0) return; gnttab_batch_copy(netbk->tx_copy_ops, nr_gops); xen_netbk_tx_submit(netbk); } static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx) { struct xenvif *vif; struct pending_tx_info *pending_tx_info; pending_ring_idx_t index; /* Already complete? */ if (netbk->mmap_pages[pending_idx] == NULL) return; pending_tx_info = &netbk->pending_tx_info[pending_idx]; vif = pending_tx_info->vif; make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY); index = pending_index(netbk->pending_prod++); netbk->pending_ring[index] = pending_idx; xenvif_put(vif); netbk->mmap_pages[pending_idx]->mapping = 0; put_page(netbk->mmap_pages[pending_idx]); netbk->mmap_pages[pending_idx] = NULL; } static void make_tx_response(struct xenvif *vif, struct xen_netif_tx_request *txp, s8 st) { RING_IDX i = vif->tx.rsp_prod_pvt; struct xen_netif_tx_response *resp; int notify; resp = RING_GET_RESPONSE(&vif->tx, i); resp->id = txp->id; resp->status = st; if (txp->flags & XEN_NETTXF_extra_info) RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL; vif->tx.rsp_prod_pvt = ++i; RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify); if (notify) notify_remote_via_irq(vif->irq); } static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, u16 id, s8 st, u16 offset, u16 size, u16 flags) { RING_IDX i = vif->rx.rsp_prod_pvt; struct xen_netif_rx_response *resp; resp = RING_GET_RESPONSE(&vif->rx, i); resp->offset = offset; resp->flags = flags; resp->id = id; resp->status = (s16)size; if (st < 0) resp->status = (s16)st; vif->rx.rsp_prod_pvt = ++i; return resp; } static inline int rx_work_todo(struct xen_netbk *netbk) { return !skb_queue_empty(&netbk->rx_queue); } static inline int tx_work_todo(struct xen_netbk *netbk) { if (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) && !list_empty(&netbk->net_schedule_list)) return 1; return 0; } static int xen_netbk_kthread(void *data) { struct xen_netbk *netbk = data; while (!kthread_should_stop()) { wait_event_interruptible(netbk->wq, rx_work_todo(netbk) || tx_work_todo(netbk) || kthread_should_stop()); cond_resched(); if (kthread_should_stop()) break; if (rx_work_todo(netbk)) xen_netbk_rx_action(netbk); if (tx_work_todo(netbk)) xen_netbk_tx_action(netbk); } return 0; } void xen_netbk_unmap_frontend_rings(struct xenvif *vif) { if (vif->tx.sring) xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), vif->tx.sring); if (vif->rx.sring) xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), vif->rx.sring); } int xen_netbk_map_frontend_rings(struct xenvif *vif, grant_ref_t tx_ring_ref, grant_ref_t rx_ring_ref) { void *addr; struct xen_netif_tx_sring *txs; struct xen_netif_rx_sring *rxs; int err = -ENOMEM; err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif), tx_ring_ref, &addr); if (err) goto err; txs = (struct xen_netif_tx_sring *)addr; BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE); err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif), rx_ring_ref, &addr); if (err) goto err; rxs = (struct xen_netif_rx_sring *)addr; BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE); vif->rx_req_cons_peek = 0; return 0; err: xen_netbk_unmap_frontend_rings(vif); return err; } static int __init netback_init(void) { int i; int rc = 0; int group; if (!xen_domain()) return -ENODEV; xen_netbk_group_nr = num_online_cpus(); xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr); if (!xen_netbk) return -ENOMEM; for (group = 0; group < xen_netbk_group_nr; group++) { struct xen_netbk *netbk = &xen_netbk[group]; skb_queue_head_init(&netbk->rx_queue); skb_queue_head_init(&netbk->tx_queue); init_timer(&netbk->net_timer); netbk->net_timer.data = (unsigned long)netbk; netbk->net_timer.function = xen_netbk_alarm; netbk->pending_cons = 0; netbk->pending_prod = MAX_PENDING_REQS; for (i = 0; i < MAX_PENDING_REQS; i++) netbk->pending_ring[i] = i; init_waitqueue_head(&netbk->wq); netbk->task = kthread_create(xen_netbk_kthread, (void *)netbk, "netback/%u", group); if (IS_ERR(netbk->task)) { printk(KERN_ALERT "kthread_create() fails at netback\n"); del_timer(&netbk->net_timer); rc = PTR_ERR(netbk->task); goto failed_init; } kthread_bind(netbk->task, group); INIT_LIST_HEAD(&netbk->net_schedule_list); spin_lock_init(&netbk->net_schedule_list_lock); atomic_set(&netbk->netfront_count, 0); wake_up_process(netbk->task); } rc = xenvif_xenbus_init(); if (rc) goto failed_init; return 0; failed_init: while (--group >= 0) { struct xen_netbk *netbk = &xen_netbk[group]; for (i = 0; i < MAX_PENDING_REQS; i++) { if (netbk->mmap_pages[i]) __free_page(netbk->mmap_pages[i]); } del_timer(&netbk->net_timer); kthread_stop(netbk->task); } vfree(xen_netbk); return rc; } module_init(netback_init); MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS("xen-backend:vif");
/* * Back-end of the driver for virtual network devices. This portion of the * driver exports a 'unified' network-device interface that can be accessed * by any operating system that implements a compatible front end. A * reference front-end implementation can be found in: * drivers/net/xen-netfront.c * * Copyright (c) 2002-2005, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "common.h" #include <linux/kthread.h> #include <linux/if_vlan.h> #include <linux/udp.h> #include <net/tcp.h> #include <xen/xen.h> #include <xen/events.h> #include <xen/interface/memory.h> #include <asm/xen/hypercall.h> #include <asm/xen/page.h> struct pending_tx_info { struct xen_netif_tx_request req; struct xenvif *vif; }; typedef unsigned int pending_ring_idx_t; struct netbk_rx_meta { int id; int size; int gso_size; }; #define MAX_PENDING_REQS 256 /* Discriminate from any valid pending_idx value. */ #define INVALID_PENDING_IDX 0xFFFF #define MAX_BUFFER_OFFSET PAGE_SIZE /* extra field used in struct page */ union page_ext { struct { #if BITS_PER_LONG < 64 #define IDX_WIDTH 8 #define GROUP_WIDTH (BITS_PER_LONG - IDX_WIDTH) unsigned int group:GROUP_WIDTH; unsigned int idx:IDX_WIDTH; #else unsigned int group, idx; #endif } e; void *mapping; }; struct xen_netbk { wait_queue_head_t wq; struct task_struct *task; struct sk_buff_head rx_queue; struct sk_buff_head tx_queue; struct timer_list net_timer; struct page *mmap_pages[MAX_PENDING_REQS]; pending_ring_idx_t pending_prod; pending_ring_idx_t pending_cons; struct list_head net_schedule_list; /* Protect the net_schedule_list in netif. */ spinlock_t net_schedule_list_lock; atomic_t netfront_count; struct pending_tx_info pending_tx_info[MAX_PENDING_REQS]; struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS]; u16 pending_ring[MAX_PENDING_REQS]; /* * Given MAX_BUFFER_OFFSET of 4096 the worst case is that each * head/fragment page uses 2 copy operations because it * straddles two buffers in the frontend. */ struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE]; struct netbk_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE]; }; static struct xen_netbk *xen_netbk; static int xen_netbk_group_nr; void xen_netbk_add_xenvif(struct xenvif *vif) { int i; int min_netfront_count; int min_group = 0; struct xen_netbk *netbk; min_netfront_count = atomic_read(&xen_netbk[0].netfront_count); for (i = 0; i < xen_netbk_group_nr; i++) { int netfront_count = atomic_read(&xen_netbk[i].netfront_count); if (netfront_count < min_netfront_count) { min_group = i; min_netfront_count = netfront_count; } } netbk = &xen_netbk[min_group]; vif->netbk = netbk; atomic_inc(&netbk->netfront_count); } void xen_netbk_remove_xenvif(struct xenvif *vif) { struct xen_netbk *netbk = vif->netbk; vif->netbk = NULL; atomic_dec(&netbk->netfront_count); } static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx); static void make_tx_response(struct xenvif *vif, struct xen_netif_tx_request *txp, s8 st); static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, u16 id, s8 st, u16 offset, u16 size, u16 flags); static inline unsigned long idx_to_pfn(struct xen_netbk *netbk, u16 idx) { return page_to_pfn(netbk->mmap_pages[idx]); } static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk, u16 idx) { return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx)); } /* extra field used in struct page */ static inline void set_page_ext(struct page *pg, struct xen_netbk *netbk, unsigned int idx) { unsigned int group = netbk - xen_netbk; union page_ext ext = { .e = { .group = group + 1, .idx = idx } }; BUILD_BUG_ON(sizeof(ext) > sizeof(ext.mapping)); pg->mapping = ext.mapping; } static int get_page_ext(struct page *pg, unsigned int *pgroup, unsigned int *pidx) { union page_ext ext = { .mapping = pg->mapping }; struct xen_netbk *netbk; unsigned int group, idx; group = ext.e.group - 1; if (group < 0 || group >= xen_netbk_group_nr) return 0; netbk = &xen_netbk[group]; idx = ext.e.idx; if ((idx < 0) || (idx >= MAX_PENDING_REQS)) return 0; if (netbk->mmap_pages[idx] != pg) return 0; *pgroup = group; *pidx = idx; return 1; } /* * This is the amount of packet we copy rather than map, so that the * guest can't fiddle with the contents of the headers while we do * packet processing on them (netfilter, routing, etc). */ #define PKT_PROT_LEN (ETH_HLEN + \ VLAN_HLEN + \ sizeof(struct iphdr) + MAX_IPOPTLEN + \ sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE) static u16 frag_get_pending_idx(skb_frag_t *frag) { return (u16)frag->page_offset; } static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx) { frag->page_offset = pending_idx; } static inline pending_ring_idx_t pending_index(unsigned i) { return i & (MAX_PENDING_REQS-1); } static inline pending_ring_idx_t nr_pending_reqs(struct xen_netbk *netbk) { return MAX_PENDING_REQS - netbk->pending_prod + netbk->pending_cons; } static void xen_netbk_kick_thread(struct xen_netbk *netbk) { wake_up(&netbk->wq); } static int max_required_rx_slots(struct xenvif *vif) { int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE); if (vif->can_sg || vif->gso || vif->gso_prefix) max += MAX_SKB_FRAGS + 1; /* extra_info + frags */ return max; } int xen_netbk_rx_ring_full(struct xenvif *vif) { RING_IDX peek = vif->rx_req_cons_peek; RING_IDX needed = max_required_rx_slots(vif); return ((vif->rx.sring->req_prod - peek) < needed) || ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed); } int xen_netbk_must_stop_queue(struct xenvif *vif) { if (!xen_netbk_rx_ring_full(vif)) return 0; vif->rx.sring->req_event = vif->rx_req_cons_peek + max_required_rx_slots(vif); mb(); /* request notification /then/ check the queue */ return xen_netbk_rx_ring_full(vif); } /* * Returns true if we should start a new receive buffer instead of * adding 'size' bytes to a buffer which currently contains 'offset' * bytes. */ static bool start_new_rx_buffer(int offset, unsigned long size, int head) { /* simple case: we have completely filled the current buffer. */ if (offset == MAX_BUFFER_OFFSET) return true; /* * complex case: start a fresh buffer if the current frag * would overflow the current buffer but only if: * (i) this frag would fit completely in the next buffer * and (ii) there is already some data in the current buffer * and (iii) this is not the head buffer. * * Where: * - (i) stops us splitting a frag into two copies * unless the frag is too large for a single buffer. * - (ii) stops us from leaving a buffer pointlessly empty. * - (iii) stops us leaving the first buffer * empty. Strictly speaking this is already covered * by (ii) but is explicitly checked because * netfront relies on the first buffer being * non-empty and can crash otherwise. * * This means we will effectively linearise small * frags but do not needlessly split large buffers * into multiple copies tend to give large frags their * own buffers as before. */ if ((offset + size > MAX_BUFFER_OFFSET) && (size <= MAX_BUFFER_OFFSET) && offset && !head) return true; return false; } /* * Figure out how many ring slots we're going to need to send @skb to * the guest. This function is essentially a dry run of * netbk_gop_frag_copy. */ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb) { unsigned int count; int i, copy_off; count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE); copy_off = skb_headlen(skb) % PAGE_SIZE; if (skb_shinfo(skb)->gso_size) count++; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]); unsigned long offset = skb_shinfo(skb)->frags[i].page_offset; unsigned long bytes; offset &= ~PAGE_MASK; while (size > 0) { BUG_ON(offset >= PAGE_SIZE); BUG_ON(copy_off > MAX_BUFFER_OFFSET); bytes = PAGE_SIZE - offset; if (bytes > size) bytes = size; if (start_new_rx_buffer(copy_off, bytes, 0)) { count++; copy_off = 0; } if (copy_off + bytes > MAX_BUFFER_OFFSET) bytes = MAX_BUFFER_OFFSET - copy_off; copy_off += bytes; offset += bytes; size -= bytes; if (offset == PAGE_SIZE) offset = 0; } } return count; } struct netrx_pending_operations { unsigned copy_prod, copy_cons; unsigned meta_prod, meta_cons; struct gnttab_copy *copy; struct netbk_rx_meta *meta; int copy_off; grant_ref_t copy_gref; }; static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif, struct netrx_pending_operations *npo) { struct netbk_rx_meta *meta; struct xen_netif_rx_request *req; req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); meta = npo->meta + npo->meta_prod++; meta->gso_size = 0; meta->size = 0; meta->id = req->id; npo->copy_off = 0; npo->copy_gref = req->gref; return meta; } /* * Set up the grant operations for this fragment. If it's a flipping * interface, we also set up the unmap request from here. */ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, struct netrx_pending_operations *npo, struct page *page, unsigned long size, unsigned long offset, int *head) { struct gnttab_copy *copy_gop; struct netbk_rx_meta *meta; /* * These variables are used iff get_page_ext returns true, * in which case they are guaranteed to be initialized. */ unsigned int uninitialized_var(group), uninitialized_var(idx); int foreign = get_page_ext(page, &group, &idx); unsigned long bytes; /* Data must not cross a page boundary. */ BUG_ON(size + offset > PAGE_SIZE<<compound_order(page)); meta = npo->meta + npo->meta_prod - 1; /* Skip unused frames from start of page */ page += offset >> PAGE_SHIFT; offset &= ~PAGE_MASK; while (size > 0) { BUG_ON(offset >= PAGE_SIZE); BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); bytes = PAGE_SIZE - offset; if (bytes > size) bytes = size; if (start_new_rx_buffer(npo->copy_off, bytes, *head)) { /* * Netfront requires there to be some data in the head * buffer. */ BUG_ON(*head); meta = get_next_rx_buffer(vif, npo); } if (npo->copy_off + bytes > MAX_BUFFER_OFFSET) bytes = MAX_BUFFER_OFFSET - npo->copy_off; copy_gop = npo->copy + npo->copy_prod++; copy_gop->flags = GNTCOPY_dest_gref; if (foreign) { struct xen_netbk *netbk = &xen_netbk[group]; struct pending_tx_info *src_pend; src_pend = &netbk->pending_tx_info[idx]; copy_gop->source.domid = src_pend->vif->domid; copy_gop->source.u.ref = src_pend->req.gref; copy_gop->flags |= GNTCOPY_source_gref; } else { void *vaddr = page_address(page); copy_gop->source.domid = DOMID_SELF; copy_gop->source.u.gmfn = virt_to_mfn(vaddr); } copy_gop->source.offset = offset; copy_gop->dest.domid = vif->domid; copy_gop->dest.offset = npo->copy_off; copy_gop->dest.u.ref = npo->copy_gref; copy_gop->len = bytes; npo->copy_off += bytes; meta->size += bytes; offset += bytes; size -= bytes; /* Next frame */ if (offset == PAGE_SIZE && size) { BUG_ON(!PageCompound(page)); page++; offset = 0; } /* Leave a gap for the GSO descriptor. */ if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix) vif->rx.req_cons++; *head = 0; /* There must be something in this buffer now. */ } } /* * Prepare an SKB to be transmitted to the frontend. * * This function is responsible for allocating grant operations, meta * structures, etc. * * It returns the number of meta structures consumed. The number of * ring slots used is always equal to the number of meta slots used * plus the number of GSO descriptors used. Currently, we use either * zero GSO descriptors (for non-GSO packets) or one descriptor (for * frontend-side LRO). */ static int netbk_gop_skb(struct sk_buff *skb, struct netrx_pending_operations *npo) { struct xenvif *vif = netdev_priv(skb->dev); int nr_frags = skb_shinfo(skb)->nr_frags; int i; struct xen_netif_rx_request *req; struct netbk_rx_meta *meta; unsigned char *data; int head = 1; int old_meta_prod; old_meta_prod = npo->meta_prod; /* Set up a GSO prefix descriptor, if necessary */ if (skb_shinfo(skb)->gso_size && vif->gso_prefix) { req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); meta = npo->meta + npo->meta_prod++; meta->gso_size = skb_shinfo(skb)->gso_size; meta->size = 0; meta->id = req->id; } req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); meta = npo->meta + npo->meta_prod++; if (!vif->gso_prefix) meta->gso_size = skb_shinfo(skb)->gso_size; else meta->gso_size = 0; meta->size = 0; meta->id = req->id; npo->copy_off = 0; npo->copy_gref = req->gref; data = skb->data; while (data < skb_tail_pointer(skb)) { unsigned int offset = offset_in_page(data); unsigned int len = PAGE_SIZE - offset; if (data + len > skb_tail_pointer(skb)) len = skb_tail_pointer(skb) - data; netbk_gop_frag_copy(vif, skb, npo, virt_to_page(data), len, offset, &head); data += len; } for (i = 0; i < nr_frags; i++) { netbk_gop_frag_copy(vif, skb, npo, skb_frag_page(&skb_shinfo(skb)->frags[i]), skb_frag_size(&skb_shinfo(skb)->frags[i]), skb_shinfo(skb)->frags[i].page_offset, &head); } return npo->meta_prod - old_meta_prod; } /* * This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was * used to set up the operations on the top of * netrx_pending_operations, which have since been done. Check that * they didn't give any errors and advance over them. */ static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots, struct netrx_pending_operations *npo) { struct gnttab_copy *copy_op; int status = XEN_NETIF_RSP_OKAY; int i; for (i = 0; i < nr_meta_slots; i++) { copy_op = npo->copy + npo->copy_cons++; if (copy_op->status != GNTST_okay) { netdev_dbg(vif->dev, "Bad status %d from copy to DOM%d.\n", copy_op->status, vif->domid); status = XEN_NETIF_RSP_ERROR; } } return status; } static void netbk_add_frag_responses(struct xenvif *vif, int status, struct netbk_rx_meta *meta, int nr_meta_slots) { int i; unsigned long offset; /* No fragments used */ if (nr_meta_slots <= 1) return; nr_meta_slots--; for (i = 0; i < nr_meta_slots; i++) { int flags; if (i == nr_meta_slots - 1) flags = 0; else flags = XEN_NETRXF_more_data; offset = 0; make_rx_response(vif, meta[i].id, status, offset, meta[i].size, flags); } } struct skb_cb_overlay { int meta_slots_used; }; static void xen_netbk_rx_action(struct xen_netbk *netbk) { struct xenvif *vif = NULL, *tmp; s8 status; u16 irq, flags; struct xen_netif_rx_response *resp; struct sk_buff_head rxq; struct sk_buff *skb; LIST_HEAD(notify); int ret; int nr_frags; int count; unsigned long offset; struct skb_cb_overlay *sco; struct netrx_pending_operations npo = { .copy = netbk->grant_copy_op, .meta = netbk->meta, }; skb_queue_head_init(&rxq); count = 0; while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) { vif = netdev_priv(skb->dev); nr_frags = skb_shinfo(skb)->nr_frags; sco = (struct skb_cb_overlay *)skb->cb; sco->meta_slots_used = netbk_gop_skb(skb, &npo); count += nr_frags + 1; __skb_queue_tail(&rxq, skb); /* Filled the batch queue? */ if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE) break; } BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta)); if (!npo.copy_prod) return; BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op)); gnttab_batch_copy(netbk->grant_copy_op, npo.copy_prod); while ((skb = __skb_dequeue(&rxq)) != NULL) { sco = (struct skb_cb_overlay *)skb->cb; vif = netdev_priv(skb->dev); if (netbk->meta[npo.meta_cons].gso_size && vif->gso_prefix) { resp = RING_GET_RESPONSE(&vif->rx, vif->rx.rsp_prod_pvt++); resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data; resp->offset = netbk->meta[npo.meta_cons].gso_size; resp->id = netbk->meta[npo.meta_cons].id; resp->status = sco->meta_slots_used; npo.meta_cons++; sco->meta_slots_used--; } vif->dev->stats.tx_bytes += skb->len; vif->dev->stats.tx_packets++; status = netbk_check_gop(vif, sco->meta_slots_used, &npo); if (sco->meta_slots_used == 1) flags = 0; else flags = XEN_NETRXF_more_data; if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated; else if (skb->ip_summed == CHECKSUM_UNNECESSARY) /* remote but checksummed. */ flags |= XEN_NETRXF_data_validated; offset = 0; resp = make_rx_response(vif, netbk->meta[npo.meta_cons].id, status, offset, netbk->meta[npo.meta_cons].size, flags); if (netbk->meta[npo.meta_cons].gso_size && !vif->gso_prefix) { struct xen_netif_extra_info *gso = (struct xen_netif_extra_info *) RING_GET_RESPONSE(&vif->rx, vif->rx.rsp_prod_pvt++); resp->flags |= XEN_NETRXF_extra_info; gso->u.gso.size = netbk->meta[npo.meta_cons].gso_size; gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; gso->u.gso.pad = 0; gso->u.gso.features = 0; gso->type = XEN_NETIF_EXTRA_TYPE_GSO; gso->flags = 0; } netbk_add_frag_responses(vif, status, netbk->meta + npo.meta_cons + 1, sco->meta_slots_used); RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); irq = vif->irq; if (ret && list_empty(&vif->notify_list)) list_add_tail(&vif->notify_list, &notify); xenvif_notify_tx_completion(vif); xenvif_put(vif); npo.meta_cons += sco->meta_slots_used; dev_kfree_skb(skb); } list_for_each_entry_safe(vif, tmp, &notify, notify_list) { notify_remote_via_irq(vif->irq); list_del_init(&vif->notify_list); } /* More work to do? */ if (!skb_queue_empty(&netbk->rx_queue) && !timer_pending(&netbk->net_timer)) xen_netbk_kick_thread(netbk); } void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb) { struct xen_netbk *netbk = vif->netbk; skb_queue_tail(&netbk->rx_queue, skb); xen_netbk_kick_thread(netbk); } static void xen_netbk_alarm(unsigned long data) { struct xen_netbk *netbk = (struct xen_netbk *)data; xen_netbk_kick_thread(netbk); } static int __on_net_schedule_list(struct xenvif *vif) { return !list_empty(&vif->schedule_list); } /* Must be called with net_schedule_list_lock held */ static void remove_from_net_schedule_list(struct xenvif *vif) { if (likely(__on_net_schedule_list(vif))) { list_del_init(&vif->schedule_list); xenvif_put(vif); } } static struct xenvif *poll_net_schedule_list(struct xen_netbk *netbk) { struct xenvif *vif = NULL; spin_lock_irq(&netbk->net_schedule_list_lock); if (list_empty(&netbk->net_schedule_list)) goto out; vif = list_first_entry(&netbk->net_schedule_list, struct xenvif, schedule_list); if (!vif) goto out; xenvif_get(vif); remove_from_net_schedule_list(vif); out: spin_unlock_irq(&netbk->net_schedule_list_lock); return vif; } void xen_netbk_schedule_xenvif(struct xenvif *vif) { unsigned long flags; struct xen_netbk *netbk = vif->netbk; if (__on_net_schedule_list(vif)) goto kick; spin_lock_irqsave(&netbk->net_schedule_list_lock, flags); if (!__on_net_schedule_list(vif) && likely(xenvif_schedulable(vif))) { list_add_tail(&vif->schedule_list, &netbk->net_schedule_list); xenvif_get(vif); } spin_unlock_irqrestore(&netbk->net_schedule_list_lock, flags); kick: smp_mb(); if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) && !list_empty(&netbk->net_schedule_list)) xen_netbk_kick_thread(netbk); } void xen_netbk_deschedule_xenvif(struct xenvif *vif) { struct xen_netbk *netbk = vif->netbk; spin_lock_irq(&netbk->net_schedule_list_lock); remove_from_net_schedule_list(vif); spin_unlock_irq(&netbk->net_schedule_list_lock); } void xen_netbk_check_rx_xenvif(struct xenvif *vif) { int more_to_do; RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do); if (more_to_do) xen_netbk_schedule_xenvif(vif); } static void tx_add_credit(struct xenvif *vif) { unsigned long max_burst, max_credit; /* * Allow a burst big enough to transmit a jumbo packet of up to 128kB. * Otherwise the interface can seize up due to insufficient credit. */ max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size; max_burst = min(max_burst, 131072UL); max_burst = max(max_burst, vif->credit_bytes); /* Take care that adding a new chunk of credit doesn't wrap to zero. */ max_credit = vif->remaining_credit + vif->credit_bytes; if (max_credit < vif->remaining_credit) max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */ vif->remaining_credit = min(max_credit, max_burst); } static void tx_credit_callback(unsigned long data) { struct xenvif *vif = (struct xenvif *)data; tx_add_credit(vif); xen_netbk_check_rx_xenvif(vif); } static void netbk_tx_err(struct xenvif *vif, struct xen_netif_tx_request *txp, RING_IDX end) { RING_IDX cons = vif->tx.req_cons; do { make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); if (cons >= end) break; txp = RING_GET_REQUEST(&vif->tx, cons++); } while (1); vif->tx.req_cons = cons; xen_netbk_check_rx_xenvif(vif); xenvif_put(vif); } static void netbk_fatal_tx_err(struct xenvif *vif) { netdev_err(vif->dev, "fatal error; disabling device\n"); xenvif_carrier_off(vif); xenvif_put(vif); } static int netbk_count_requests(struct xenvif *vif, struct xen_netif_tx_request *first, struct xen_netif_tx_request *txp, int work_to_do) { RING_IDX cons = vif->tx.req_cons; int frags = 0; if (!(first->flags & XEN_NETTXF_more_data)) return 0; do { if (frags >= work_to_do) { netdev_err(vif->dev, "Need more frags\n"); netbk_fatal_tx_err(vif); return -frags; } if (unlikely(frags >= MAX_SKB_FRAGS)) { netdev_err(vif->dev, "Too many frags\n"); netbk_fatal_tx_err(vif); return -frags; } memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags), sizeof(*txp)); if (txp->size > first->size) { netdev_err(vif->dev, "Frag is bigger than frame.\n"); netbk_fatal_tx_err(vif); return -frags; } first->size -= txp->size; frags++; if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { netdev_err(vif->dev, "txp->offset: %x, size: %u\n", txp->offset, txp->size); netbk_fatal_tx_err(vif); return -frags; } } while ((txp++)->flags & XEN_NETTXF_more_data); return frags; } static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk, struct sk_buff *skb, u16 pending_idx) { struct page *page; page = alloc_page(GFP_KERNEL|__GFP_COLD); if (!page) return NULL; set_page_ext(page, netbk, pending_idx); netbk->mmap_pages[pending_idx] = page; return page; } static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, struct xenvif *vif, struct sk_buff *skb, struct xen_netif_tx_request *txp, struct gnttab_copy *gop) { struct skb_shared_info *shinfo = skb_shinfo(skb); skb_frag_t *frags = shinfo->frags; u16 pending_idx = *((u16 *)skb->data); int i, start; /* Skip first skb fragment if it is on same page as header fragment. */ start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); for (i = start; i < shinfo->nr_frags; i++, txp++) { struct page *page; pending_ring_idx_t index; struct pending_tx_info *pending_tx_info = netbk->pending_tx_info; index = pending_index(netbk->pending_cons++); pending_idx = netbk->pending_ring[index]; page = xen_netbk_alloc_page(netbk, skb, pending_idx); if (!page) return NULL; gop->source.u.ref = txp->gref; gop->source.domid = vif->domid; gop->source.offset = txp->offset; gop->dest.u.gmfn = virt_to_mfn(page_address(page)); gop->dest.domid = DOMID_SELF; gop->dest.offset = txp->offset; gop->len = txp->size; gop->flags = GNTCOPY_source_gref; gop++; memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp)); xenvif_get(vif); pending_tx_info[pending_idx].vif = vif; frag_set_pending_idx(&frags[i], pending_idx); } return gop; } static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, struct sk_buff *skb, struct gnttab_copy **gopp) { struct gnttab_copy *gop = *gopp; u16 pending_idx = *((u16 *)skb->data); struct pending_tx_info *pending_tx_info = netbk->pending_tx_info; struct xenvif *vif = pending_tx_info[pending_idx].vif; struct xen_netif_tx_request *txp; struct skb_shared_info *shinfo = skb_shinfo(skb); int nr_frags = shinfo->nr_frags; int i, err, start; /* Check status of header. */ err = gop->status; if (unlikely(err)) { pending_ring_idx_t index; index = pending_index(netbk->pending_prod++); txp = &pending_tx_info[pending_idx].req; make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); netbk->pending_ring[index] = pending_idx; xenvif_put(vif); } /* Skip first skb fragment if it is on same page as header fragment. */ start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); for (i = start; i < nr_frags; i++) { int j, newerr; pending_ring_idx_t index; pending_idx = frag_get_pending_idx(&shinfo->frags[i]); /* Check error status: if okay then remember grant handle. */ newerr = (++gop)->status; if (likely(!newerr)) { /* Had a previous error? Invalidate this fragment. */ if (unlikely(err)) xen_netbk_idx_release(netbk, pending_idx); continue; } /* Error on this fragment: respond to client with an error. */ txp = &netbk->pending_tx_info[pending_idx].req; make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); index = pending_index(netbk->pending_prod++); netbk->pending_ring[index] = pending_idx; xenvif_put(vif); /* Not the first error? Preceding frags already invalidated. */ if (err) continue; /* First error: invalidate header and preceding fragments. */ pending_idx = *((u16 *)skb->data); xen_netbk_idx_release(netbk, pending_idx); for (j = start; j < i; j++) { pending_idx = frag_get_pending_idx(&shinfo->frags[j]); xen_netbk_idx_release(netbk, pending_idx); } /* Remember the error: invalidate all subsequent fragments. */ err = newerr; } *gopp = gop + 1; return err; } static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb) { struct skb_shared_info *shinfo = skb_shinfo(skb); int nr_frags = shinfo->nr_frags; int i; for (i = 0; i < nr_frags; i++) { skb_frag_t *frag = shinfo->frags + i; struct xen_netif_tx_request *txp; struct page *page; u16 pending_idx; pending_idx = frag_get_pending_idx(frag); txp = &netbk->pending_tx_info[pending_idx].req; page = virt_to_page(idx_to_kaddr(netbk, pending_idx)); __skb_fill_page_desc(skb, i, page, txp->offset, txp->size); skb->len += txp->size; skb->data_len += txp->size; skb->truesize += txp->size; /* Take an extra reference to offset xen_netbk_idx_release */ get_page(netbk->mmap_pages[pending_idx]); xen_netbk_idx_release(netbk, pending_idx); } } static int xen_netbk_get_extras(struct xenvif *vif, struct xen_netif_extra_info *extras, int work_to_do) { struct xen_netif_extra_info extra; RING_IDX cons = vif->tx.req_cons; do { if (unlikely(work_to_do-- <= 0)) { netdev_err(vif->dev, "Missing extra info\n"); netbk_fatal_tx_err(vif); return -EBADR; } memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons), sizeof(extra)); if (unlikely(!extra.type || extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { vif->tx.req_cons = ++cons; netdev_err(vif->dev, "Invalid extra type: %d\n", extra.type); netbk_fatal_tx_err(vif); return -EINVAL; } memcpy(&extras[extra.type - 1], &extra, sizeof(extra)); vif->tx.req_cons = ++cons; } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE); return work_to_do; } static int netbk_set_skb_gso(struct xenvif *vif, struct sk_buff *skb, struct xen_netif_extra_info *gso) { if (!gso->u.gso.size) { netdev_err(vif->dev, "GSO size must not be zero.\n"); netbk_fatal_tx_err(vif); return -EINVAL; } /* Currently only TCPv4 S.O. is supported. */ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); netbk_fatal_tx_err(vif); return -EINVAL; } skb_shinfo(skb)->gso_size = gso->u.gso.size; skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; skb_shinfo(skb)->gso_segs = 0; return 0; } static int checksum_setup(struct xenvif *vif, struct sk_buff *skb) { struct iphdr *iph; unsigned char *th; int err = -EPROTO; int recalculate_partial_csum = 0; /* * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy * peers can fail to set NETRXF_csum_blank when sending a GSO * frame. In this case force the SKB to CHECKSUM_PARTIAL and * recalculate the partial checksum. */ if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { vif->rx_gso_checksum_fixup++; skb->ip_summed = CHECKSUM_PARTIAL; recalculate_partial_csum = 1; } /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ if (skb->ip_summed != CHECKSUM_PARTIAL) return 0; if (skb->protocol != htons(ETH_P_IP)) goto out; iph = (void *)skb->data; th = skb->data + 4 * iph->ihl; if (th >= skb_tail_pointer(skb)) goto out; skb->csum_start = th - skb->head; switch (iph->protocol) { case IPPROTO_TCP: skb->csum_offset = offsetof(struct tcphdr, check); if (recalculate_partial_csum) { struct tcphdr *tcph = (struct tcphdr *)th; tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len - iph->ihl*4, IPPROTO_TCP, 0); } break; case IPPROTO_UDP: skb->csum_offset = offsetof(struct udphdr, check); if (recalculate_partial_csum) { struct udphdr *udph = (struct udphdr *)th; udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len - iph->ihl*4, IPPROTO_UDP, 0); } break; default: if (net_ratelimit()) netdev_err(vif->dev, "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n", iph->protocol); goto out; } if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb)) goto out; err = 0; out: return err; } static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) { unsigned long now = jiffies; unsigned long next_credit = vif->credit_timeout.expires + msecs_to_jiffies(vif->credit_usec / 1000); /* Timer could already be pending in rare cases. */ if (timer_pending(&vif->credit_timeout)) return true; /* Passed the point where we can replenish credit? */ if (time_after_eq(now, next_credit)) { vif->credit_timeout.expires = now; tx_add_credit(vif); } /* Still too big to send right now? Set a callback. */ if (size > vif->remaining_credit) { vif->credit_timeout.data = (unsigned long)vif; vif->credit_timeout.function = tx_credit_callback; mod_timer(&vif->credit_timeout, next_credit); return true; } return false; } static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) { struct gnttab_copy *gop = netbk->tx_copy_ops, *request_gop; struct sk_buff *skb; int ret; while (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) && !list_empty(&netbk->net_schedule_list)) { struct xenvif *vif; struct xen_netif_tx_request txreq; struct xen_netif_tx_request txfrags[MAX_SKB_FRAGS]; struct page *page; struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; u16 pending_idx; RING_IDX idx; int work_to_do; unsigned int data_len; pending_ring_idx_t index; /* Get a netif from the list with work to do. */ vif = poll_net_schedule_list(netbk); /* This can sometimes happen because the test of * list_empty(net_schedule_list) at the top of the * loop is unlocked. Just go back and have another * look. */ if (!vif) continue; if (vif->tx.sring->req_prod - vif->tx.req_cons > XEN_NETIF_TX_RING_SIZE) { netdev_err(vif->dev, "Impossible number of requests. " "req_prod %d, req_cons %d, size %ld\n", vif->tx.sring->req_prod, vif->tx.req_cons, XEN_NETIF_TX_RING_SIZE); netbk_fatal_tx_err(vif); continue; } RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); if (!work_to_do) { xenvif_put(vif); continue; } idx = vif->tx.req_cons; rmb(); /* Ensure that we see the request before we copy it. */ memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq)); /* Credit-based scheduling. */ if (txreq.size > vif->remaining_credit && tx_credit_exceeded(vif, txreq.size)) { xenvif_put(vif); continue; } vif->remaining_credit -= txreq.size; work_to_do--; vif->tx.req_cons = ++idx; memset(extras, 0, sizeof(extras)); if (txreq.flags & XEN_NETTXF_extra_info) { work_to_do = xen_netbk_get_extras(vif, extras, work_to_do); idx = vif->tx.req_cons; if (unlikely(work_to_do < 0)) continue; } ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); if (unlikely(ret < 0)) continue; idx += ret; if (unlikely(txreq.size < ETH_HLEN)) { netdev_dbg(vif->dev, "Bad packet size: %d\n", txreq.size); netbk_tx_err(vif, &txreq, idx); continue; } /* No crossing a page as the payload mustn't fragment. */ if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { netdev_err(vif->dev, "txreq.offset: %x, size: %u, end: %lu\n", txreq.offset, txreq.size, (txreq.offset&~PAGE_MASK) + txreq.size); netbk_fatal_tx_err(vif); continue; } index = pending_index(netbk->pending_cons); pending_idx = netbk->pending_ring[index]; data_len = (txreq.size > PKT_PROT_LEN && ret < MAX_SKB_FRAGS) ? PKT_PROT_LEN : txreq.size; skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(skb == NULL)) { netdev_dbg(vif->dev, "Can't allocate a skb in start_xmit.\n"); netbk_tx_err(vif, &txreq, idx); break; } /* Packets passed to netif_rx() must have some headroom. */ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct xen_netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; if (netbk_set_skb_gso(vif, skb, gso)) { /* Failure in netbk_set_skb_gso is fatal. */ kfree_skb(skb); continue; } } /* XXX could copy straight to head */ page = xen_netbk_alloc_page(netbk, skb, pending_idx); if (!page) { kfree_skb(skb); netbk_tx_err(vif, &txreq, idx); continue; } gop->source.u.ref = txreq.gref; gop->source.domid = vif->domid; gop->source.offset = txreq.offset; gop->dest.u.gmfn = virt_to_mfn(page_address(page)); gop->dest.domid = DOMID_SELF; gop->dest.offset = txreq.offset; gop->len = txreq.size; gop->flags = GNTCOPY_source_gref; gop++; memcpy(&netbk->pending_tx_info[pending_idx].req, &txreq, sizeof(txreq)); netbk->pending_tx_info[pending_idx].vif = vif; *((u16 *)skb->data) = pending_idx; __skb_put(skb, data_len); skb_shinfo(skb)->nr_frags = ret; if (data_len < txreq.size) { skb_shinfo(skb)->nr_frags++; frag_set_pending_idx(&skb_shinfo(skb)->frags[0], pending_idx); } else { frag_set_pending_idx(&skb_shinfo(skb)->frags[0], INVALID_PENDING_IDX); } netbk->pending_cons++; request_gop = xen_netbk_get_requests(netbk, vif, skb, txfrags, gop); if (request_gop == NULL) { kfree_skb(skb); netbk_tx_err(vif, &txreq, idx); continue; } gop = request_gop; __skb_queue_tail(&netbk->tx_queue, skb); vif->tx.req_cons = idx; xen_netbk_check_rx_xenvif(vif); if ((gop-netbk->tx_copy_ops) >= ARRAY_SIZE(netbk->tx_copy_ops)) break; } return gop - netbk->tx_copy_ops; } static void xen_netbk_tx_submit(struct xen_netbk *netbk) { struct gnttab_copy *gop = netbk->tx_copy_ops; struct sk_buff *skb; while ((skb = __skb_dequeue(&netbk->tx_queue)) != NULL) { struct xen_netif_tx_request *txp; struct xenvif *vif; u16 pending_idx; unsigned data_len; pending_idx = *((u16 *)skb->data); vif = netbk->pending_tx_info[pending_idx].vif; txp = &netbk->pending_tx_info[pending_idx].req; /* Check the remap error code. */ if (unlikely(xen_netbk_tx_check_gop(netbk, skb, &gop))) { netdev_dbg(vif->dev, "netback grant failed.\n"); skb_shinfo(skb)->nr_frags = 0; kfree_skb(skb); continue; } data_len = skb->len; memcpy(skb->data, (void *)(idx_to_kaddr(netbk, pending_idx)|txp->offset), data_len); if (data_len < txp->size) { /* Append the packet payload as a fragment. */ txp->offset += data_len; txp->size -= data_len; } else { /* Schedule a response immediately. */ xen_netbk_idx_release(netbk, pending_idx); } if (txp->flags & XEN_NETTXF_csum_blank) skb->ip_summed = CHECKSUM_PARTIAL; else if (txp->flags & XEN_NETTXF_data_validated) skb->ip_summed = CHECKSUM_UNNECESSARY; xen_netbk_fill_frags(netbk, skb); /* * If the initial fragment was < PKT_PROT_LEN then * pull through some bytes from the other fragments to * increase the linear region to PKT_PROT_LEN bytes. */ if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) { int target = min_t(int, skb->len, PKT_PROT_LEN); __pskb_pull_tail(skb, target - skb_headlen(skb)); } skb->dev = vif->dev; skb->protocol = eth_type_trans(skb, skb->dev); if (checksum_setup(vif, skb)) { netdev_dbg(vif->dev, "Can't setup checksum in net_tx_action\n"); kfree_skb(skb); continue; } vif->dev->stats.rx_bytes += skb->len; vif->dev->stats.rx_packets++; xenvif_receive_skb(vif, skb); } } /* Called after netfront has transmitted */ static void xen_netbk_tx_action(struct xen_netbk *netbk) { unsigned nr_gops; nr_gops = xen_netbk_tx_build_gops(netbk); if (nr_gops == 0) return; gnttab_batch_copy(netbk->tx_copy_ops, nr_gops); xen_netbk_tx_submit(netbk); } static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx) { struct xenvif *vif; struct pending_tx_info *pending_tx_info; pending_ring_idx_t index; /* Already complete? */ if (netbk->mmap_pages[pending_idx] == NULL) return; pending_tx_info = &netbk->pending_tx_info[pending_idx]; vif = pending_tx_info->vif; make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY); index = pending_index(netbk->pending_prod++); netbk->pending_ring[index] = pending_idx; xenvif_put(vif); netbk->mmap_pages[pending_idx]->mapping = 0; put_page(netbk->mmap_pages[pending_idx]); netbk->mmap_pages[pending_idx] = NULL; } static void make_tx_response(struct xenvif *vif, struct xen_netif_tx_request *txp, s8 st) { RING_IDX i = vif->tx.rsp_prod_pvt; struct xen_netif_tx_response *resp; int notify; resp = RING_GET_RESPONSE(&vif->tx, i); resp->id = txp->id; resp->status = st; if (txp->flags & XEN_NETTXF_extra_info) RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL; vif->tx.rsp_prod_pvt = ++i; RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify); if (notify) notify_remote_via_irq(vif->irq); } static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, u16 id, s8 st, u16 offset, u16 size, u16 flags) { RING_IDX i = vif->rx.rsp_prod_pvt; struct xen_netif_rx_response *resp; resp = RING_GET_RESPONSE(&vif->rx, i); resp->offset = offset; resp->flags = flags; resp->id = id; resp->status = (s16)size; if (st < 0) resp->status = (s16)st; vif->rx.rsp_prod_pvt = ++i; return resp; } static inline int rx_work_todo(struct xen_netbk *netbk) { return !skb_queue_empty(&netbk->rx_queue); } static inline int tx_work_todo(struct xen_netbk *netbk) { if (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) && !list_empty(&netbk->net_schedule_list)) return 1; return 0; } static int xen_netbk_kthread(void *data) { struct xen_netbk *netbk = data; while (!kthread_should_stop()) { wait_event_interruptible(netbk->wq, rx_work_todo(netbk) || tx_work_todo(netbk) || kthread_should_stop()); cond_resched(); if (kthread_should_stop()) break; if (rx_work_todo(netbk)) xen_netbk_rx_action(netbk); if (tx_work_todo(netbk)) xen_netbk_tx_action(netbk); } return 0; } void xen_netbk_unmap_frontend_rings(struct xenvif *vif) { if (vif->tx.sring) xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), vif->tx.sring); if (vif->rx.sring) xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), vif->rx.sring); } int xen_netbk_map_frontend_rings(struct xenvif *vif, grant_ref_t tx_ring_ref, grant_ref_t rx_ring_ref) { void *addr; struct xen_netif_tx_sring *txs; struct xen_netif_rx_sring *rxs; int err = -ENOMEM; err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif), tx_ring_ref, &addr); if (err) goto err; txs = (struct xen_netif_tx_sring *)addr; BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE); err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif), rx_ring_ref, &addr); if (err) goto err; rxs = (struct xen_netif_rx_sring *)addr; BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE); vif->rx_req_cons_peek = 0; return 0; err: xen_netbk_unmap_frontend_rings(vif); return err; } static int __init netback_init(void) { int i; int rc = 0; int group; if (!xen_domain()) return -ENODEV; xen_netbk_group_nr = num_online_cpus(); xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr); if (!xen_netbk) return -ENOMEM; for (group = 0; group < xen_netbk_group_nr; group++) { struct xen_netbk *netbk = &xen_netbk[group]; skb_queue_head_init(&netbk->rx_queue); skb_queue_head_init(&netbk->tx_queue); init_timer(&netbk->net_timer); netbk->net_timer.data = (unsigned long)netbk; netbk->net_timer.function = xen_netbk_alarm; netbk->pending_cons = 0; netbk->pending_prod = MAX_PENDING_REQS; for (i = 0; i < MAX_PENDING_REQS; i++) netbk->pending_ring[i] = i; init_waitqueue_head(&netbk->wq); netbk->task = kthread_create(xen_netbk_kthread, (void *)netbk, "netback/%u", group); if (IS_ERR(netbk->task)) { printk(KERN_ALERT "kthread_create() fails at netback\n"); del_timer(&netbk->net_timer); rc = PTR_ERR(netbk->task); goto failed_init; } kthread_bind(netbk->task, group); INIT_LIST_HEAD(&netbk->net_schedule_list); spin_lock_init(&netbk->net_schedule_list_lock); atomic_set(&netbk->netfront_count, 0); wake_up_process(netbk->task); } rc = xenvif_xenbus_init(); if (rc) goto failed_init; return 0; failed_init: while (--group >= 0) { struct xen_netbk *netbk = &xen_netbk[group]; for (i = 0; i < MAX_PENDING_REQS; i++) { if (netbk->mmap_pages[i]) __free_page(netbk->mmap_pages[i]); } del_timer(&netbk->net_timer); kthread_stop(netbk->task); } vfree(xen_netbk); return rc; } module_init(netback_init); MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS("xen-backend:vif");
static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) { struct gnttab_copy *gop = netbk->tx_copy_ops, *request_gop; struct sk_buff *skb; int ret; while (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) && !list_empty(&netbk->net_schedule_list)) { struct xenvif *vif; struct xen_netif_tx_request txreq; struct xen_netif_tx_request txfrags[MAX_SKB_FRAGS]; struct page *page; struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; u16 pending_idx; RING_IDX idx; int work_to_do; unsigned int data_len; pending_ring_idx_t index; /* Get a netif from the list with work to do. */ vif = poll_net_schedule_list(netbk); if (!vif) continue; RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); if (!work_to_do) { xenvif_put(vif); continue; } idx = vif->tx.req_cons; rmb(); /* Ensure that we see the request before we copy it. */ memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq)); /* Credit-based scheduling. */ if (txreq.size > vif->remaining_credit && tx_credit_exceeded(vif, txreq.size)) { xenvif_put(vif); continue; } vif->remaining_credit -= txreq.size; work_to_do--; vif->tx.req_cons = ++idx; memset(extras, 0, sizeof(extras)); if (txreq.flags & XEN_NETTXF_extra_info) { work_to_do = xen_netbk_get_extras(vif, extras, work_to_do); idx = vif->tx.req_cons; if (unlikely(work_to_do < 0)) { netbk_tx_err(vif, &txreq, idx); continue; } } ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); if (unlikely(ret < 0)) { netbk_tx_err(vif, &txreq, idx - ret); continue; } idx += ret; if (unlikely(txreq.size < ETH_HLEN)) { netdev_dbg(vif->dev, "Bad packet size: %d\n", txreq.size); netbk_tx_err(vif, &txreq, idx); continue; } /* No crossing a page as the payload mustn't fragment. */ if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { netdev_dbg(vif->dev, "txreq.offset: %x, size: %u, end: %lu\n", txreq.offset, txreq.size, (txreq.offset&~PAGE_MASK) + txreq.size); netbk_tx_err(vif, &txreq, idx); continue; } index = pending_index(netbk->pending_cons); pending_idx = netbk->pending_ring[index]; data_len = (txreq.size > PKT_PROT_LEN && ret < MAX_SKB_FRAGS) ? PKT_PROT_LEN : txreq.size; skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(skb == NULL)) { netdev_dbg(vif->dev, "Can't allocate a skb in start_xmit.\n"); netbk_tx_err(vif, &txreq, idx); break; } /* Packets passed to netif_rx() must have some headroom. */ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct xen_netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; if (netbk_set_skb_gso(vif, skb, gso)) { kfree_skb(skb); netbk_tx_err(vif, &txreq, idx); continue; } } /* XXX could copy straight to head */ page = xen_netbk_alloc_page(netbk, skb, pending_idx); if (!page) { kfree_skb(skb); netbk_tx_err(vif, &txreq, idx); continue; } gop->source.u.ref = txreq.gref; gop->source.domid = vif->domid; gop->source.offset = txreq.offset; gop->dest.u.gmfn = virt_to_mfn(page_address(page)); gop->dest.domid = DOMID_SELF; gop->dest.offset = txreq.offset; gop->len = txreq.size; gop->flags = GNTCOPY_source_gref; gop++; memcpy(&netbk->pending_tx_info[pending_idx].req, &txreq, sizeof(txreq)); netbk->pending_tx_info[pending_idx].vif = vif; *((u16 *)skb->data) = pending_idx; __skb_put(skb, data_len); skb_shinfo(skb)->nr_frags = ret; if (data_len < txreq.size) { skb_shinfo(skb)->nr_frags++; frag_set_pending_idx(&skb_shinfo(skb)->frags[0], pending_idx); } else { frag_set_pending_idx(&skb_shinfo(skb)->frags[0], INVALID_PENDING_IDX); } netbk->pending_cons++; request_gop = xen_netbk_get_requests(netbk, vif, skb, txfrags, gop); if (request_gop == NULL) { kfree_skb(skb); netbk_tx_err(vif, &txreq, idx); continue; } gop = request_gop; __skb_queue_tail(&netbk->tx_queue, skb); vif->tx.req_cons = idx; xen_netbk_check_rx_xenvif(vif); if ((gop-netbk->tx_copy_ops) >= ARRAY_SIZE(netbk->tx_copy_ops)) break; } return gop - netbk->tx_copy_ops; }
static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) { struct gnttab_copy *gop = netbk->tx_copy_ops, *request_gop; struct sk_buff *skb; int ret; while (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) && !list_empty(&netbk->net_schedule_list)) { struct xenvif *vif; struct xen_netif_tx_request txreq; struct xen_netif_tx_request txfrags[MAX_SKB_FRAGS]; struct page *page; struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; u16 pending_idx; RING_IDX idx; int work_to_do; unsigned int data_len; pending_ring_idx_t index; /* Get a netif from the list with work to do. */ vif = poll_net_schedule_list(netbk); /* This can sometimes happen because the test of * list_empty(net_schedule_list) at the top of the * loop is unlocked. Just go back and have another * look. */ if (!vif) continue; if (vif->tx.sring->req_prod - vif->tx.req_cons > XEN_NETIF_TX_RING_SIZE) { netdev_err(vif->dev, "Impossible number of requests. " "req_prod %d, req_cons %d, size %ld\n", vif->tx.sring->req_prod, vif->tx.req_cons, XEN_NETIF_TX_RING_SIZE); netbk_fatal_tx_err(vif); continue; } RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); if (!work_to_do) { xenvif_put(vif); continue; } idx = vif->tx.req_cons; rmb(); /* Ensure that we see the request before we copy it. */ memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq)); /* Credit-based scheduling. */ if (txreq.size > vif->remaining_credit && tx_credit_exceeded(vif, txreq.size)) { xenvif_put(vif); continue; } vif->remaining_credit -= txreq.size; work_to_do--; vif->tx.req_cons = ++idx; memset(extras, 0, sizeof(extras)); if (txreq.flags & XEN_NETTXF_extra_info) { work_to_do = xen_netbk_get_extras(vif, extras, work_to_do); idx = vif->tx.req_cons; if (unlikely(work_to_do < 0)) continue; } ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); if (unlikely(ret < 0)) continue; idx += ret; if (unlikely(txreq.size < ETH_HLEN)) { netdev_dbg(vif->dev, "Bad packet size: %d\n", txreq.size); netbk_tx_err(vif, &txreq, idx); continue; } /* No crossing a page as the payload mustn't fragment. */ if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { netdev_err(vif->dev, "txreq.offset: %x, size: %u, end: %lu\n", txreq.offset, txreq.size, (txreq.offset&~PAGE_MASK) + txreq.size); netbk_fatal_tx_err(vif); continue; } index = pending_index(netbk->pending_cons); pending_idx = netbk->pending_ring[index]; data_len = (txreq.size > PKT_PROT_LEN && ret < MAX_SKB_FRAGS) ? PKT_PROT_LEN : txreq.size; skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(skb == NULL)) { netdev_dbg(vif->dev, "Can't allocate a skb in start_xmit.\n"); netbk_tx_err(vif, &txreq, idx); break; } /* Packets passed to netif_rx() must have some headroom. */ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct xen_netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; if (netbk_set_skb_gso(vif, skb, gso)) { /* Failure in netbk_set_skb_gso is fatal. */ kfree_skb(skb); continue; } } /* XXX could copy straight to head */ page = xen_netbk_alloc_page(netbk, skb, pending_idx); if (!page) { kfree_skb(skb); netbk_tx_err(vif, &txreq, idx); continue; } gop->source.u.ref = txreq.gref; gop->source.domid = vif->domid; gop->source.offset = txreq.offset; gop->dest.u.gmfn = virt_to_mfn(page_address(page)); gop->dest.domid = DOMID_SELF; gop->dest.offset = txreq.offset; gop->len = txreq.size; gop->flags = GNTCOPY_source_gref; gop++; memcpy(&netbk->pending_tx_info[pending_idx].req, &txreq, sizeof(txreq)); netbk->pending_tx_info[pending_idx].vif = vif; *((u16 *)skb->data) = pending_idx; __skb_put(skb, data_len); skb_shinfo(skb)->nr_frags = ret; if (data_len < txreq.size) { skb_shinfo(skb)->nr_frags++; frag_set_pending_idx(&skb_shinfo(skb)->frags[0], pending_idx); } else { frag_set_pending_idx(&skb_shinfo(skb)->frags[0], INVALID_PENDING_IDX); } netbk->pending_cons++; request_gop = xen_netbk_get_requests(netbk, vif, skb, txfrags, gop); if (request_gop == NULL) { kfree_skb(skb); netbk_tx_err(vif, &txreq, idx); continue; } gop = request_gop; __skb_queue_tail(&netbk->tx_queue, skb); vif->tx.req_cons = idx; xen_netbk_check_rx_xenvif(vif); if ((gop-netbk->tx_copy_ops) >= ARRAY_SIZE(netbk->tx_copy_ops)) break; } return gop - netbk->tx_copy_ops; }
{'added': [(891, 'static void netbk_fatal_tx_err(struct xenvif *vif)'), (892, '{'), (893, '\tnetdev_err(vif->dev, "fatal error; disabling device\\n");'), (894, '\txenvif_carrier_off(vif);'), (895, '\txenvif_put(vif);'), (896, '}'), (897, ''), (911, '\t\t\tnetdev_err(vif->dev, "Need more frags\\n");'), (912, '\t\t\tnetbk_fatal_tx_err(vif);'), (917, '\t\t\tnetdev_err(vif->dev, "Too many frags\\n");'), (918, '\t\t\tnetbk_fatal_tx_err(vif);'), (925, '\t\t\tnetdev_err(vif->dev, "Frag is bigger than frame.\\n");'), (926, '\t\t\tnetbk_fatal_tx_err(vif);'), (934, '\t\t\tnetdev_err(vif->dev, "txp->offset: %x, size: %u\\n",'), (936, '\t\t\tnetbk_fatal_tx_err(vif);'), (1109, '\t\t\tnetdev_err(vif->dev, "Missing extra info\\n");'), (1110, '\t\t\tnetbk_fatal_tx_err(vif);'), (1119, '\t\t\tnetdev_err(vif->dev,'), (1121, '\t\t\tnetbk_fatal_tx_err(vif);'), (1137, '\t\tnetdev_err(vif->dev, "GSO size must not be zero.\\n");'), (1138, '\t\tnetbk_fatal_tx_err(vif);'), (1144, '\t\tnetdev_err(vif->dev, "Bad GSO type %d.\\n", gso->u.gso.type);'), (1145, '\t\tnetbk_fatal_tx_err(vif);'), (1282, '\t\t/* This can sometimes happen because the test of'), (1283, '\t\t * list_empty(net_schedule_list) at the top of the'), (1284, '\t\t * loop is unlocked. Just go back and have another'), (1285, '\t\t * look.'), (1286, '\t\t */'), (1290, '\t\tif (vif->tx.sring->req_prod - vif->tx.req_cons >'), (1291, '\t\t XEN_NETIF_TX_RING_SIZE) {'), (1292, '\t\t\tnetdev_err(vif->dev,'), (1293, '\t\t\t\t "Impossible number of requests. "'), (1294, '\t\t\t\t "req_prod %d, req_cons %d, size %ld\\n",'), (1295, '\t\t\t\t vif->tx.sring->req_prod, vif->tx.req_cons,'), (1296, '\t\t\t\t XEN_NETIF_TX_RING_SIZE);'), (1297, '\t\t\tnetbk_fatal_tx_err(vif);'), (1298, '\t\t\tcontinue;'), (1299, '\t\t}'), (1300, ''), (1328, '\t\t\tif (unlikely(work_to_do < 0))'), (1333, '\t\tif (unlikely(ret < 0))'), (1335, ''), (1347, '\t\t\tnetdev_err(vif->dev,'), (1351, '\t\t\tnetbk_fatal_tx_err(vif);'), (1379, '\t\t\t\t/* Failure in netbk_set_skb_gso is fatal. */')], 'deleted': [(904, '\t\t\tnetdev_dbg(vif->dev, "Need more frags\\n");'), (909, '\t\t\tnetdev_dbg(vif->dev, "Too many frags\\n");'), (916, '\t\t\tnetdev_dbg(vif->dev, "Frags galore\\n");'), (924, '\t\t\tnetdev_dbg(vif->dev, "txp->offset: %x, size: %u\\n",'), (1098, '\t\t\tnetdev_dbg(vif->dev, "Missing extra info\\n");'), (1107, '\t\t\tnetdev_dbg(vif->dev,'), (1124, '\t\tnetdev_dbg(vif->dev, "GSO size must not be zero.\\n");'), (1130, '\t\tnetdev_dbg(vif->dev, "Bad GSO type %d.\\n", gso->u.gso.type);'), (1297, '\t\t\tif (unlikely(work_to_do < 0)) {'), (1298, '\t\t\t\tnetbk_tx_err(vif, &txreq, idx);'), (1300, '\t\t\t}'), (1304, '\t\tif (unlikely(ret < 0)) {'), (1305, '\t\t\tnetbk_tx_err(vif, &txreq, idx - ret);'), (1307, '\t\t}'), (1319, '\t\t\tnetdev_dbg(vif->dev,'), (1323, '\t\t\tnetbk_tx_err(vif, &txreq, idx);'), (1352, '\t\t\t\tnetbk_tx_err(vif, &txreq, idx);')]}
45
17
1,234
7,927
135
907
21
https://github.com/torvalds/linux
CVE-2013-0216
CWE-20
3,267
volumes.c
C
read_one_chunk
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2007 Oracle. All rights reserved. */ #include <linux/sched.h> #include <linux/bio.h> #include <linux/slab.h> #include <linux/buffer_head.h> #include <linux/blkdev.h> #include <linux/ratelimit.h> #include <linux/kthread.h> #include <linux/raid/pq.h> #include <linux/semaphore.h> #include <linux/uuid.h> #include <linux/list_sort.h> #include "ctree.h" #include "extent_map.h" #include "disk-io.h" #include "transaction.h" #include "print-tree.h" #include "volumes.h" #include "raid56.h" #include "async-thread.h" #include "check-integrity.h" #include "rcu-string.h" #include "math.h" #include "dev-replace.h" #include "sysfs.h" const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { [BTRFS_RAID_RAID10] = { .sub_stripes = 2, .dev_stripes = 1, .devs_max = 0, /* 0 == as many as possible */ .devs_min = 4, .tolerated_failures = 1, .devs_increment = 2, .ncopies = 2, .nparity = 0, .raid_name = "raid10", .bg_flag = BTRFS_BLOCK_GROUP_RAID10, .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, }, [BTRFS_RAID_RAID1] = { .sub_stripes = 1, .dev_stripes = 1, .devs_max = 2, .devs_min = 2, .tolerated_failures = 1, .devs_increment = 2, .ncopies = 2, .nparity = 0, .raid_name = "raid1", .bg_flag = BTRFS_BLOCK_GROUP_RAID1, .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, }, [BTRFS_RAID_DUP] = { .sub_stripes = 1, .dev_stripes = 2, .devs_max = 1, .devs_min = 1, .tolerated_failures = 0, .devs_increment = 1, .ncopies = 2, .nparity = 0, .raid_name = "dup", .bg_flag = BTRFS_BLOCK_GROUP_DUP, .mindev_error = 0, }, [BTRFS_RAID_RAID0] = { .sub_stripes = 1, .dev_stripes = 1, .devs_max = 0, .devs_min = 2, .tolerated_failures = 0, .devs_increment = 1, .ncopies = 1, .nparity = 0, .raid_name = "raid0", .bg_flag = BTRFS_BLOCK_GROUP_RAID0, .mindev_error = 0, }, [BTRFS_RAID_SINGLE] = { .sub_stripes = 1, .dev_stripes = 1, .devs_max = 1, .devs_min = 1, .tolerated_failures = 0, .devs_increment = 1, .ncopies = 1, .nparity = 0, .raid_name = "single", .bg_flag = 0, .mindev_error = 0, }, [BTRFS_RAID_RAID5] = { .sub_stripes = 1, .dev_stripes = 1, .devs_max = 0, .devs_min = 2, .tolerated_failures = 1, .devs_increment = 1, .ncopies = 1, .nparity = 1, .raid_name = "raid5", .bg_flag = BTRFS_BLOCK_GROUP_RAID5, .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, }, [BTRFS_RAID_RAID6] = { .sub_stripes = 1, .dev_stripes = 1, .devs_max = 0, .devs_min = 3, .tolerated_failures = 2, .devs_increment = 1, .ncopies = 1, .nparity = 2, .raid_name = "raid6", .bg_flag = BTRFS_BLOCK_GROUP_RAID6, .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, }, }; const char *get_raid_name(enum btrfs_raid_types type) { if (type >= BTRFS_NR_RAID_TYPES) return NULL; return btrfs_raid_array[type].raid_name; } /* * Fill @buf with textual description of @bg_flags, no more than @size_buf * bytes including terminating null byte. */ void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf) { int i; int ret; char *bp = buf; u64 flags = bg_flags; u32 size_bp = size_buf; if (!flags) { strcpy(bp, "NONE"); return; } #define DESCRIBE_FLAG(flag, desc) \ do { \ if (flags & (flag)) { \ ret = snprintf(bp, size_bp, "%s|", (desc)); \ if (ret < 0 || ret >= size_bp) \ goto out_overflow; \ size_bp -= ret; \ bp += ret; \ flags &= ~(flag); \ } \ } while (0) DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data"); DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system"); DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata"); DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single"); for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag, btrfs_raid_array[i].raid_name); #undef DESCRIBE_FLAG if (flags) { ret = snprintf(bp, size_bp, "0x%llx|", flags); size_bp -= ret; } if (size_bp < size_buf) buf[size_buf - size_bp - 1] = '\0'; /* remove last | */ /* * The text is trimmed, it's up to the caller to provide sufficiently * large buffer */ out_overflow:; } static int init_first_rw_device(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info); static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info); static void __btrfs_reset_dev_stats(struct btrfs_device *dev); static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev); static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); static int __btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, u64 logical, u64 *length, struct btrfs_bio **bbio_ret, int mirror_num, int need_raid_map); /* * Device locking * ============== * * There are several mutexes that protect manipulation of devices and low-level * structures like chunks but not block groups, extents or files * * uuid_mutex (global lock) * ------------------------ * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from * the SCAN_DEV ioctl registration or from mount either implicitly (the first * device) or requested by the device= mount option * * the mutex can be very coarse and can cover long-running operations * * protects: updates to fs_devices counters like missing devices, rw devices, * seeding, structure cloning, opening/closing devices at mount/umount time * * global::fs_devs - add, remove, updates to the global list * * does not protect: manipulation of the fs_devices::devices list! * * btrfs_device::name - renames (write side), read is RCU * * fs_devices::device_list_mutex (per-fs, with RCU) * ------------------------------------------------ * protects updates to fs_devices::devices, ie. adding and deleting * * simple list traversal with read-only actions can be done with RCU protection * * may be used to exclude some operations from running concurrently without any * modifications to the list (see write_all_supers) * * balance_mutex * ------------- * protects balance structures (status, state) and context accessed from * several places (internally, ioctl) * * chunk_mutex * ----------- * protects chunks, adding or removing during allocation, trim or when a new * device is added/removed * * cleaner_mutex * ------------- * a big lock that is held by the cleaner thread and prevents running subvolume * cleaning together with relocation or delayed iputs * * * Lock nesting * ============ * * uuid_mutex * volume_mutex * device_list_mutex * chunk_mutex * balance_mutex * * * Exclusive operations, BTRFS_FS_EXCL_OP * ====================================== * * Maintains the exclusivity of the following operations that apply to the * whole filesystem and cannot run in parallel. * * - Balance (*) * - Device add * - Device remove * - Device replace (*) * - Resize * * The device operations (as above) can be in one of the following states: * * - Running state * - Paused state * - Completed state * * Only device operations marked with (*) can go into the Paused state for the * following reasons: * * - ioctl (only Balance can be Paused through ioctl) * - filesystem remounted as read-only * - filesystem unmounted and mounted as read-only * - system power-cycle and filesystem mounted as read-only * - filesystem or device errors leading to forced read-only * * BTRFS_FS_EXCL_OP flag is set and cleared using atomic operations. * During the course of Paused state, the BTRFS_FS_EXCL_OP remains set. * A device operation in Paused or Running state can be canceled or resumed * either by ioctl (Balance only) or when remounted as read-write. * BTRFS_FS_EXCL_OP flag is cleared when the device operation is canceled or * completed. */ DEFINE_MUTEX(uuid_mutex); static LIST_HEAD(fs_uuids); struct list_head *btrfs_get_fs_uuids(void) { return &fs_uuids; } /* * alloc_fs_devices - allocate struct btrfs_fs_devices * @fsid: if not NULL, copy the UUID to fs_devices::fsid * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid * * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR(). * The returned struct is not linked onto any lists and can be destroyed with * kfree() right away. */ static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid, const u8 *metadata_fsid) { struct btrfs_fs_devices *fs_devs; fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL); if (!fs_devs) return ERR_PTR(-ENOMEM); mutex_init(&fs_devs->device_list_mutex); INIT_LIST_HEAD(&fs_devs->devices); INIT_LIST_HEAD(&fs_devs->resized_devices); INIT_LIST_HEAD(&fs_devs->alloc_list); INIT_LIST_HEAD(&fs_devs->fs_list); if (fsid) memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); if (metadata_fsid) memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE); else if (fsid) memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE); return fs_devs; } void btrfs_free_device(struct btrfs_device *device) { rcu_string_free(device->name); bio_put(device->flush_bio); kfree(device); } static void free_fs_devices(struct btrfs_fs_devices *fs_devices) { struct btrfs_device *device; WARN_ON(fs_devices->opened); while (!list_empty(&fs_devices->devices)) { device = list_entry(fs_devices->devices.next, struct btrfs_device, dev_list); list_del(&device->dev_list); btrfs_free_device(device); } kfree(fs_devices); } static void btrfs_kobject_uevent(struct block_device *bdev, enum kobject_action action) { int ret; ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action); if (ret) pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n", action, kobject_name(&disk_to_dev(bdev->bd_disk)->kobj), &disk_to_dev(bdev->bd_disk)->kobj); } void __exit btrfs_cleanup_fs_uuids(void) { struct btrfs_fs_devices *fs_devices; while (!list_empty(&fs_uuids)) { fs_devices = list_entry(fs_uuids.next, struct btrfs_fs_devices, fs_list); list_del(&fs_devices->fs_list); free_fs_devices(fs_devices); } } /* * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error. * Returned struct is not linked onto any lists and must be destroyed using * btrfs_free_device. */ static struct btrfs_device *__alloc_device(void) { struct btrfs_device *dev; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return ERR_PTR(-ENOMEM); /* * Preallocate a bio that's always going to be used for flushing device * barriers and matches the device lifespan */ dev->flush_bio = bio_alloc_bioset(GFP_KERNEL, 0, NULL); if (!dev->flush_bio) { kfree(dev); return ERR_PTR(-ENOMEM); } INIT_LIST_HEAD(&dev->dev_list); INIT_LIST_HEAD(&dev->dev_alloc_list); INIT_LIST_HEAD(&dev->resized_list); spin_lock_init(&dev->io_lock); atomic_set(&dev->reada_in_flight, 0); atomic_set(&dev->dev_stats_ccnt, 0); btrfs_device_data_ordered_init(dev); INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); return dev; } /* * Find a device specified by @devid or @uuid in the list of @fs_devices, or * return NULL. * * If devid and uuid are both specified, the match must be exact, otherwise * only devid is used. */ static struct btrfs_device *find_device(struct btrfs_fs_devices *fs_devices, u64 devid, const u8 *uuid) { struct btrfs_device *dev; list_for_each_entry(dev, &fs_devices->devices, dev_list) { if (dev->devid == devid && (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) { return dev; } } return NULL; } static noinline struct btrfs_fs_devices *find_fsid( const u8 *fsid, const u8 *metadata_fsid) { struct btrfs_fs_devices *fs_devices; ASSERT(fsid); if (metadata_fsid) { /* * Handle scanned device having completed its fsid change but * belonging to a fs_devices that was created by first scanning * a device which didn't have its fsid/metadata_uuid changed * at all and the CHANGING_FSID_V2 flag set. */ list_for_each_entry(fs_devices, &fs_uuids, fs_list) { if (fs_devices->fsid_change && memcmp(metadata_fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0 && memcmp(fs_devices->fsid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE) == 0) { return fs_devices; } } /* * Handle scanned device having completed its fsid change but * belonging to a fs_devices that was created by a device that * has an outdated pair of fsid/metadata_uuid and * CHANGING_FSID_V2 flag set. */ list_for_each_entry(fs_devices, &fs_uuids, fs_list) { if (fs_devices->fsid_change && memcmp(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE) != 0 && memcmp(metadata_fsid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE) == 0) { return fs_devices; } } } /* Handle non-split brain cases */ list_for_each_entry(fs_devices, &fs_uuids, fs_list) { if (metadata_fsid) { if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0 && memcmp(metadata_fsid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE) == 0) return fs_devices; } else { if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) return fs_devices; } } return NULL; } static int btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder, int flush, struct block_device **bdev, struct buffer_head **bh) { int ret; *bdev = blkdev_get_by_path(device_path, flags, holder); if (IS_ERR(*bdev)) { ret = PTR_ERR(*bdev); goto error; } if (flush) filemap_write_and_wait((*bdev)->bd_inode->i_mapping); ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE); if (ret) { blkdev_put(*bdev, flags); goto error; } invalidate_bdev(*bdev); *bh = btrfs_read_dev_super(*bdev); if (IS_ERR(*bh)) { ret = PTR_ERR(*bh); blkdev_put(*bdev, flags); goto error; } return 0; error: *bdev = NULL; *bh = NULL; return ret; } static void requeue_list(struct btrfs_pending_bios *pending_bios, struct bio *head, struct bio *tail) { struct bio *old_head; old_head = pending_bios->head; pending_bios->head = head; if (pending_bios->tail) tail->bi_next = old_head; else pending_bios->tail = tail; } /* * we try to collect pending bios for a device so we don't get a large * number of procs sending bios down to the same device. This greatly * improves the schedulers ability to collect and merge the bios. * * But, it also turns into a long list of bios to process and that is sure * to eventually make the worker thread block. The solution here is to * make some progress and then put this work struct back at the end of * the list if the block device is congested. This way, multiple devices * can make progress from a single worker thread. */ static noinline void run_scheduled_bios(struct btrfs_device *device) { struct btrfs_fs_info *fs_info = device->fs_info; struct bio *pending; struct backing_dev_info *bdi; struct btrfs_pending_bios *pending_bios; struct bio *tail; struct bio *cur; int again = 0; unsigned long num_run; unsigned long batch_run = 0; unsigned long last_waited = 0; int force_reg = 0; int sync_pending = 0; struct blk_plug plug; /* * this function runs all the bios we've collected for * a particular device. We don't want to wander off to * another device without first sending all of these down. * So, setup a plug here and finish it off before we return */ blk_start_plug(&plug); bdi = device->bdev->bd_bdi; loop: spin_lock(&device->io_lock); loop_lock: num_run = 0; /* take all the bios off the list at once and process them * later on (without the lock held). But, remember the * tail and other pointers so the bios can be properly reinserted * into the list if we hit congestion */ if (!force_reg && device->pending_sync_bios.head) { pending_bios = &device->pending_sync_bios; force_reg = 1; } else { pending_bios = &device->pending_bios; force_reg = 0; } pending = pending_bios->head; tail = pending_bios->tail; WARN_ON(pending && !tail); /* * if pending was null this time around, no bios need processing * at all and we can stop. Otherwise it'll loop back up again * and do an additional check so no bios are missed. * * device->running_pending is used to synchronize with the * schedule_bio code. */ if (device->pending_sync_bios.head == NULL && device->pending_bios.head == NULL) { again = 0; device->running_pending = 0; } else { again = 1; device->running_pending = 1; } pending_bios->head = NULL; pending_bios->tail = NULL; spin_unlock(&device->io_lock); while (pending) { rmb(); /* we want to work on both lists, but do more bios on the * sync list than the regular list */ if ((num_run > 32 && pending_bios != &device->pending_sync_bios && device->pending_sync_bios.head) || (num_run > 64 && pending_bios == &device->pending_sync_bios && device->pending_bios.head)) { spin_lock(&device->io_lock); requeue_list(pending_bios, pending, tail); goto loop_lock; } cur = pending; pending = pending->bi_next; cur->bi_next = NULL; BUG_ON(atomic_read(&cur->__bi_cnt) == 0); /* * if we're doing the sync list, record that our * plug has some sync requests on it * * If we're doing the regular list and there are * sync requests sitting around, unplug before * we add more */ if (pending_bios == &device->pending_sync_bios) { sync_pending = 1; } else if (sync_pending) { blk_finish_plug(&plug); blk_start_plug(&plug); sync_pending = 0; } btrfsic_submit_bio(cur); num_run++; batch_run++; cond_resched(); /* * we made progress, there is more work to do and the bdi * is now congested. Back off and let other work structs * run instead */ if (pending && bdi_write_congested(bdi) && batch_run > 8 && fs_info->fs_devices->open_devices > 1) { struct io_context *ioc; ioc = current->io_context; /* * the main goal here is that we don't want to * block if we're going to be able to submit * more requests without blocking. * * This code does two great things, it pokes into * the elevator code from a filesystem _and_ * it makes assumptions about how batching works. */ if (ioc && ioc->nr_batch_requests > 0 && time_before(jiffies, ioc->last_waited + HZ/50UL) && (last_waited == 0 || ioc->last_waited == last_waited)) { /* * we want to go through our batch of * requests and stop. So, we copy out * the ioc->last_waited time and test * against it before looping */ last_waited = ioc->last_waited; cond_resched(); continue; } spin_lock(&device->io_lock); requeue_list(pending_bios, pending, tail); device->running_pending = 1; spin_unlock(&device->io_lock); btrfs_queue_work(fs_info->submit_workers, &device->work); goto done; } } cond_resched(); if (again) goto loop; spin_lock(&device->io_lock); if (device->pending_bios.head || device->pending_sync_bios.head) goto loop_lock; spin_unlock(&device->io_lock); done: blk_finish_plug(&plug); } static void pending_bios_fn(struct btrfs_work *work) { struct btrfs_device *device; device = container_of(work, struct btrfs_device, work); run_scheduled_bios(device); } static bool device_path_matched(const char *path, struct btrfs_device *device) { int found; rcu_read_lock(); found = strcmp(rcu_str_deref(device->name), path); rcu_read_unlock(); return found == 0; } /* * Search and remove all stale (devices which are not mounted) devices. * When both inputs are NULL, it will search and release all stale devices. * path: Optional. When provided will it release all unmounted devices * matching this path only. * skip_dev: Optional. Will skip this device when searching for the stale * devices. * Return: 0 for success or if @path is NULL. * -EBUSY if @path is a mounted device. * -ENOENT if @path does not match any device in the list. */ static int btrfs_free_stale_devices(const char *path, struct btrfs_device *skip_device) { struct btrfs_fs_devices *fs_devices, *tmp_fs_devices; struct btrfs_device *device, *tmp_device; int ret = 0; if (path) ret = -ENOENT; list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) { mutex_lock(&fs_devices->device_list_mutex); list_for_each_entry_safe(device, tmp_device, &fs_devices->devices, dev_list) { if (skip_device && skip_device == device) continue; if (path && !device->name) continue; if (path && !device_path_matched(path, device)) continue; if (fs_devices->opened) { /* for an already deleted device return 0 */ if (path && ret != 0) ret = -EBUSY; break; } /* delete the stale device */ fs_devices->num_devices--; list_del(&device->dev_list); btrfs_free_device(device); ret = 0; if (fs_devices->num_devices == 0) break; } mutex_unlock(&fs_devices->device_list_mutex); if (fs_devices->num_devices == 0) { btrfs_sysfs_remove_fsid(fs_devices); list_del(&fs_devices->fs_list); free_fs_devices(fs_devices); } } return ret; } static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, struct btrfs_device *device, fmode_t flags, void *holder) { struct request_queue *q; struct block_device *bdev; struct buffer_head *bh; struct btrfs_super_block *disk_super; u64 devid; int ret; if (device->bdev) return -EINVAL; if (!device->name) return -EINVAL; ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, &bdev, &bh); if (ret) return ret; disk_super = (struct btrfs_super_block *)bh->b_data; devid = btrfs_stack_device_id(&disk_super->dev_item); if (devid != device->devid) goto error_brelse; if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE)) goto error_brelse; device->generation = btrfs_super_generation(disk_super); if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { if (btrfs_super_incompat_flags(disk_super) & BTRFS_FEATURE_INCOMPAT_METADATA_UUID) { pr_err( "BTRFS: Invalid seeding and uuid-changed device detected\n"); goto error_brelse; } clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); fs_devices->seeding = 1; } else { if (bdev_read_only(bdev)) clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); else set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); } q = bdev_get_queue(bdev); if (!blk_queue_nonrot(q)) fs_devices->rotating = 1; device->bdev = bdev; clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); device->mode = flags; fs_devices->open_devices++; if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && device->devid != BTRFS_DEV_REPLACE_DEVID) { fs_devices->rw_devices++; list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list); } brelse(bh); return 0; error_brelse: brelse(bh); blkdev_put(bdev, flags); return -EINVAL; } /* * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices * being created with a disk that has already completed its fsid change. */ static struct btrfs_fs_devices *find_fsid_inprogress( struct btrfs_super_block *disk_super) { struct btrfs_fs_devices *fs_devices; list_for_each_entry(fs_devices, &fs_uuids, fs_list) { if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE) != 0 && memcmp(fs_devices->metadata_uuid, disk_super->fsid, BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) { return fs_devices; } } return NULL; } static struct btrfs_fs_devices *find_fsid_changed( struct btrfs_super_block *disk_super) { struct btrfs_fs_devices *fs_devices; /* * Handles the case where scanned device is part of an fs that had * multiple successful changes of FSID but curently device didn't * observe it. Meaning our fsid will be different than theirs. */ list_for_each_entry(fs_devices, &fs_uuids, fs_list) { if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE) != 0 && memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid, BTRFS_FSID_SIZE) == 0 && memcmp(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE) != 0) { return fs_devices; } } return NULL; } /* * Add new device to list of registered devices * * Returns: * device pointer which was just added or updated when successful * error pointer when failed */ static noinline struct btrfs_device *device_list_add(const char *path, struct btrfs_super_block *disk_super, bool *new_device_added) { struct btrfs_device *device; struct btrfs_fs_devices *fs_devices = NULL; struct rcu_string *name; u64 found_transid = btrfs_super_generation(disk_super); u64 devid = btrfs_stack_device_id(&disk_super->dev_item); bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) & BTRFS_FEATURE_INCOMPAT_METADATA_UUID); bool fsid_change_in_progress = (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_CHANGING_FSID_V2); if (fsid_change_in_progress) { if (!has_metadata_uuid) { /* * When we have an image which has CHANGING_FSID_V2 set * it might belong to either a filesystem which has * disks with completed fsid change or it might belong * to fs with no UUID changes in effect, handle both. */ fs_devices = find_fsid_inprogress(disk_super); if (!fs_devices) fs_devices = find_fsid(disk_super->fsid, NULL); } else { fs_devices = find_fsid_changed(disk_super); } } else if (has_metadata_uuid) { fs_devices = find_fsid(disk_super->fsid, disk_super->metadata_uuid); } else { fs_devices = find_fsid(disk_super->fsid, NULL); } if (!fs_devices) { if (has_metadata_uuid) fs_devices = alloc_fs_devices(disk_super->fsid, disk_super->metadata_uuid); else fs_devices = alloc_fs_devices(disk_super->fsid, NULL); if (IS_ERR(fs_devices)) return ERR_CAST(fs_devices); fs_devices->fsid_change = fsid_change_in_progress; mutex_lock(&fs_devices->device_list_mutex); list_add(&fs_devices->fs_list, &fs_uuids); device = NULL; } else { mutex_lock(&fs_devices->device_list_mutex); device = find_device(fs_devices, devid, disk_super->dev_item.uuid); /* * If this disk has been pulled into an fs devices created by * a device which had the CHANGING_FSID_V2 flag then replace the * metadata_uuid/fsid values of the fs_devices. */ if (has_metadata_uuid && fs_devices->fsid_change && found_transid > fs_devices->latest_generation) { memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE); memcpy(fs_devices->metadata_uuid, disk_super->metadata_uuid, BTRFS_FSID_SIZE); fs_devices->fsid_change = false; } } if (!device) { if (fs_devices->opened) { mutex_unlock(&fs_devices->device_list_mutex); return ERR_PTR(-EBUSY); } device = btrfs_alloc_device(NULL, &devid, disk_super->dev_item.uuid); if (IS_ERR(device)) { mutex_unlock(&fs_devices->device_list_mutex); /* we can safely leave the fs_devices entry around */ return device; } name = rcu_string_strdup(path, GFP_NOFS); if (!name) { btrfs_free_device(device); mutex_unlock(&fs_devices->device_list_mutex); return ERR_PTR(-ENOMEM); } rcu_assign_pointer(device->name, name); list_add_rcu(&device->dev_list, &fs_devices->devices); fs_devices->num_devices++; device->fs_devices = fs_devices; *new_device_added = true; if (disk_super->label[0]) pr_info("BTRFS: device label %s devid %llu transid %llu %s\n", disk_super->label, devid, found_transid, path); else pr_info("BTRFS: device fsid %pU devid %llu transid %llu %s\n", disk_super->fsid, devid, found_transid, path); } else if (!device->name || strcmp(device->name->str, path)) { /* * When FS is already mounted. * 1. If you are here and if the device->name is NULL that * means this device was missing at time of FS mount. * 2. If you are here and if the device->name is different * from 'path' that means either * a. The same device disappeared and reappeared with * different name. or * b. The missing-disk-which-was-replaced, has * reappeared now. * * We must allow 1 and 2a above. But 2b would be a spurious * and unintentional. * * Further in case of 1 and 2a above, the disk at 'path' * would have missed some transaction when it was away and * in case of 2a the stale bdev has to be updated as well. * 2b must not be allowed at all time. */ /* * For now, we do allow update to btrfs_fs_device through the * btrfs dev scan cli after FS has been mounted. We're still * tracking a problem where systems fail mount by subvolume id * when we reject replacement on a mounted FS. */ if (!fs_devices->opened && found_transid < device->generation) { /* * That is if the FS is _not_ mounted and if you * are here, that means there is more than one * disk with same uuid and devid.We keep the one * with larger generation number or the last-in if * generation are equal. */ mutex_unlock(&fs_devices->device_list_mutex); return ERR_PTR(-EEXIST); } /* * We are going to replace the device path for a given devid, * make sure it's the same device if the device is mounted */ if (device->bdev) { struct block_device *path_bdev; path_bdev = lookup_bdev(path); if (IS_ERR(path_bdev)) { mutex_unlock(&fs_devices->device_list_mutex); return ERR_CAST(path_bdev); } if (device->bdev != path_bdev) { bdput(path_bdev); mutex_unlock(&fs_devices->device_list_mutex); btrfs_warn_in_rcu(device->fs_info, "duplicate device fsid:devid for %pU:%llu old:%s new:%s", disk_super->fsid, devid, rcu_str_deref(device->name), path); return ERR_PTR(-EEXIST); } bdput(path_bdev); btrfs_info_in_rcu(device->fs_info, "device fsid %pU devid %llu moved old:%s new:%s", disk_super->fsid, devid, rcu_str_deref(device->name), path); } name = rcu_string_strdup(path, GFP_NOFS); if (!name) { mutex_unlock(&fs_devices->device_list_mutex); return ERR_PTR(-ENOMEM); } rcu_string_free(device->name); rcu_assign_pointer(device->name, name); if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { fs_devices->missing_devices--; clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); } } /* * Unmount does not free the btrfs_device struct but would zero * generation along with most of the other members. So just update * it back. We need it to pick the disk with largest generation * (as above). */ if (!fs_devices->opened) { device->generation = found_transid; fs_devices->latest_generation = max_t(u64, found_transid, fs_devices->latest_generation); } fs_devices->total_devices = btrfs_super_num_devices(disk_super); mutex_unlock(&fs_devices->device_list_mutex); return device; } static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) { struct btrfs_fs_devices *fs_devices; struct btrfs_device *device; struct btrfs_device *orig_dev; fs_devices = alloc_fs_devices(orig->fsid, NULL); if (IS_ERR(fs_devices)) return fs_devices; mutex_lock(&orig->device_list_mutex); fs_devices->total_devices = orig->total_devices; /* We have held the volume lock, it is safe to get the devices. */ list_for_each_entry(orig_dev, &orig->devices, dev_list) { struct rcu_string *name; device = btrfs_alloc_device(NULL, &orig_dev->devid, orig_dev->uuid); if (IS_ERR(device)) goto error; /* * This is ok to do without rcu read locked because we hold the * uuid mutex so nothing we touch in here is going to disappear. */ if (orig_dev->name) { name = rcu_string_strdup(orig_dev->name->str, GFP_KERNEL); if (!name) { btrfs_free_device(device); goto error; } rcu_assign_pointer(device->name, name); } list_add(&device->dev_list, &fs_devices->devices); device->fs_devices = fs_devices; fs_devices->num_devices++; } mutex_unlock(&orig->device_list_mutex); return fs_devices; error: mutex_unlock(&orig->device_list_mutex); free_fs_devices(fs_devices); return ERR_PTR(-ENOMEM); } /* * After we have read the system tree and know devids belonging to * this filesystem, remove the device which does not belong there. */ void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step) { struct btrfs_device *device, *next; struct btrfs_device *latest_dev = NULL; mutex_lock(&uuid_mutex); again: /* This is the initialized path, it is safe to release the devices. */ list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) { if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state) && (!latest_dev || device->generation > latest_dev->generation)) { latest_dev = device; } continue; } if (device->devid == BTRFS_DEV_REPLACE_DEVID) { /* * In the first step, keep the device which has * the correct fsid and the devid that is used * for the dev_replace procedure. * In the second step, the dev_replace state is * read from the device tree and it is known * whether the procedure is really active or * not, which means whether this device is * used or whether it should be removed. */ if (step == 0 || test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { continue; } } if (device->bdev) { blkdev_put(device->bdev, device->mode); device->bdev = NULL; fs_devices->open_devices--; } if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { list_del_init(&device->dev_alloc_list); clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) fs_devices->rw_devices--; } list_del_init(&device->dev_list); fs_devices->num_devices--; btrfs_free_device(device); } if (fs_devices->seed) { fs_devices = fs_devices->seed; goto again; } fs_devices->latest_bdev = latest_dev->bdev; mutex_unlock(&uuid_mutex); } static void free_device_rcu(struct rcu_head *head) { struct btrfs_device *device; device = container_of(head, struct btrfs_device, rcu); btrfs_free_device(device); } static void btrfs_close_bdev(struct btrfs_device *device) { if (!device->bdev) return; if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { sync_blockdev(device->bdev); invalidate_bdev(device->bdev); } blkdev_put(device->bdev, device->mode); } static void btrfs_close_one_device(struct btrfs_device *device) { struct btrfs_fs_devices *fs_devices = device->fs_devices; struct btrfs_device *new_device; struct rcu_string *name; if (device->bdev) fs_devices->open_devices--; if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && device->devid != BTRFS_DEV_REPLACE_DEVID) { list_del_init(&device->dev_alloc_list); fs_devices->rw_devices--; } if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) fs_devices->missing_devices--; btrfs_close_bdev(device); new_device = btrfs_alloc_device(NULL, &device->devid, device->uuid); BUG_ON(IS_ERR(new_device)); /* -ENOMEM */ /* Safe because we are under uuid_mutex */ if (device->name) { name = rcu_string_strdup(device->name->str, GFP_NOFS); BUG_ON(!name); /* -ENOMEM */ rcu_assign_pointer(new_device->name, name); } list_replace_rcu(&device->dev_list, &new_device->dev_list); new_device->fs_devices = device->fs_devices; call_rcu(&device->rcu, free_device_rcu); } static int close_fs_devices(struct btrfs_fs_devices *fs_devices) { struct btrfs_device *device, *tmp; if (--fs_devices->opened > 0) return 0; mutex_lock(&fs_devices->device_list_mutex); list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) { btrfs_close_one_device(device); } mutex_unlock(&fs_devices->device_list_mutex); WARN_ON(fs_devices->open_devices); WARN_ON(fs_devices->rw_devices); fs_devices->opened = 0; fs_devices->seeding = 0; return 0; } int btrfs_close_devices(struct btrfs_fs_devices *fs_devices) { struct btrfs_fs_devices *seed_devices = NULL; int ret; mutex_lock(&uuid_mutex); ret = close_fs_devices(fs_devices); if (!fs_devices->opened) { seed_devices = fs_devices->seed; fs_devices->seed = NULL; } mutex_unlock(&uuid_mutex); while (seed_devices) { fs_devices = seed_devices; seed_devices = fs_devices->seed; close_fs_devices(fs_devices); free_fs_devices(fs_devices); } return ret; } static int open_fs_devices(struct btrfs_fs_devices *fs_devices, fmode_t flags, void *holder) { struct btrfs_device *device; struct btrfs_device *latest_dev = NULL; int ret = 0; flags |= FMODE_EXCL; list_for_each_entry(device, &fs_devices->devices, dev_list) { /* Just open everything we can; ignore failures here */ if (btrfs_open_one_device(fs_devices, device, flags, holder)) continue; if (!latest_dev || device->generation > latest_dev->generation) latest_dev = device; } if (fs_devices->open_devices == 0) { ret = -EINVAL; goto out; } fs_devices->opened = 1; fs_devices->latest_bdev = latest_dev->bdev; fs_devices->total_rw_bytes = 0; out: return ret; } static int devid_cmp(void *priv, struct list_head *a, struct list_head *b) { struct btrfs_device *dev1, *dev2; dev1 = list_entry(a, struct btrfs_device, dev_list); dev2 = list_entry(b, struct btrfs_device, dev_list); if (dev1->devid < dev2->devid) return -1; else if (dev1->devid > dev2->devid) return 1; return 0; } int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, fmode_t flags, void *holder) { int ret; lockdep_assert_held(&uuid_mutex); mutex_lock(&fs_devices->device_list_mutex); if (fs_devices->opened) { fs_devices->opened++; ret = 0; } else { list_sort(NULL, &fs_devices->devices, devid_cmp); ret = open_fs_devices(fs_devices, flags, holder); } mutex_unlock(&fs_devices->device_list_mutex); return ret; } static void btrfs_release_disk_super(struct page *page) { kunmap(page); put_page(page); } static int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr, struct page **page, struct btrfs_super_block **disk_super) { void *p; pgoff_t index; /* make sure our super fits in the device */ if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode)) return 1; /* make sure our super fits in the page */ if (sizeof(**disk_super) > PAGE_SIZE) return 1; /* make sure our super doesn't straddle pages on disk */ index = bytenr >> PAGE_SHIFT; if ((bytenr + sizeof(**disk_super) - 1) >> PAGE_SHIFT != index) return 1; /* pull in the page with our super */ *page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL); if (IS_ERR_OR_NULL(*page)) return 1; p = kmap(*page); /* align our pointer to the offset of the super block */ *disk_super = p + offset_in_page(bytenr); if (btrfs_super_bytenr(*disk_super) != bytenr || btrfs_super_magic(*disk_super) != BTRFS_MAGIC) { btrfs_release_disk_super(*page); return 1; } if ((*disk_super)->label[0] && (*disk_super)->label[BTRFS_LABEL_SIZE - 1]) (*disk_super)->label[BTRFS_LABEL_SIZE - 1] = '\0'; return 0; } /* * Look for a btrfs signature on a device. This may be called out of the mount path * and we are not allowed to call set_blocksize during the scan. The superblock * is read via pagecache */ struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags, void *holder) { struct btrfs_super_block *disk_super; bool new_device_added = false; struct btrfs_device *device = NULL; struct block_device *bdev; struct page *page; u64 bytenr; lockdep_assert_held(&uuid_mutex); /* * we would like to check all the supers, but that would make * a btrfs mount succeed after a mkfs from a different FS. * So, we need to add a special mount option to scan for * later supers, using BTRFS_SUPER_MIRROR_MAX instead */ bytenr = btrfs_sb_offset(0); flags |= FMODE_EXCL; bdev = blkdev_get_by_path(path, flags, holder); if (IS_ERR(bdev)) return ERR_CAST(bdev); if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super)) { device = ERR_PTR(-EINVAL); goto error_bdev_put; } device = device_list_add(path, disk_super, &new_device_added); if (!IS_ERR(device)) { if (new_device_added) btrfs_free_stale_devices(path, device); } btrfs_release_disk_super(page); error_bdev_put: blkdev_put(bdev, flags); return device; } static int contains_pending_extent(struct btrfs_transaction *transaction, struct btrfs_device *device, u64 *start, u64 len) { struct btrfs_fs_info *fs_info = device->fs_info; struct extent_map *em; struct list_head *search_list = &fs_info->pinned_chunks; int ret = 0; u64 physical_start = *start; if (transaction) search_list = &transaction->pending_chunks; again: list_for_each_entry(em, search_list, list) { struct map_lookup *map; int i; map = em->map_lookup; for (i = 0; i < map->num_stripes; i++) { u64 end; if (map->stripes[i].dev != device) continue; if (map->stripes[i].physical >= physical_start + len || map->stripes[i].physical + em->orig_block_len <= physical_start) continue; /* * Make sure that while processing the pinned list we do * not override our *start with a lower value, because * we can have pinned chunks that fall within this * device hole and that have lower physical addresses * than the pending chunks we processed before. If we * do not take this special care we can end up getting * 2 pending chunks that start at the same physical * device offsets because the end offset of a pinned * chunk can be equal to the start offset of some * pending chunk. */ end = map->stripes[i].physical + em->orig_block_len; if (end > *start) { *start = end; ret = 1; } } } if (search_list != &fs_info->pinned_chunks) { search_list = &fs_info->pinned_chunks; goto again; } return ret; } /* * find_free_dev_extent_start - find free space in the specified device * @device: the device which we search the free space in * @num_bytes: the size of the free space that we need * @search_start: the position from which to begin the search * @start: store the start of the free space. * @len: the size of the free space. that we find, or the size * of the max free space if we don't find suitable free space * * this uses a pretty simple search, the expectation is that it is * called very infrequently and that a given device has a small number * of extents * * @start is used to store the start of the free space if we find. But if we * don't find suitable free space, it will be used to store the start position * of the max free space. * * @len is used to store the size of the free space that we find. * But if we don't find suitable free space, it is used to store the size of * the max free space. */ int find_free_dev_extent_start(struct btrfs_transaction *transaction, struct btrfs_device *device, u64 num_bytes, u64 search_start, u64 *start, u64 *len) { struct btrfs_fs_info *fs_info = device->fs_info; struct btrfs_root *root = fs_info->dev_root; struct btrfs_key key; struct btrfs_dev_extent *dev_extent; struct btrfs_path *path; u64 hole_size; u64 max_hole_start; u64 max_hole_size; u64 extent_end; u64 search_end = device->total_bytes; int ret; int slot; struct extent_buffer *l; /* * We don't want to overwrite the superblock on the drive nor any area * used by the boot loader (grub for example), so we make sure to start * at an offset of at least 1MB. */ search_start = max_t(u64, search_start, SZ_1M); path = btrfs_alloc_path(); if (!path) return -ENOMEM; max_hole_start = search_start; max_hole_size = 0; again: if (search_start >= search_end || test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { ret = -ENOSPC; goto out; } path->reada = READA_FORWARD; path->search_commit_root = 1; path->skip_locking = 1; key.objectid = device->devid; key.offset = search_start; key.type = BTRFS_DEV_EXTENT_KEY; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; if (ret > 0) { ret = btrfs_previous_item(root, path, key.objectid, key.type); if (ret < 0) goto out; } while (1) { l = path->nodes[0]; slot = path->slots[0]; if (slot >= btrfs_header_nritems(l)) { ret = btrfs_next_leaf(root, path); if (ret == 0) continue; if (ret < 0) goto out; break; } btrfs_item_key_to_cpu(l, &key, slot); if (key.objectid < device->devid) goto next; if (key.objectid > device->devid) break; if (key.type != BTRFS_DEV_EXTENT_KEY) goto next; if (key.offset > search_start) { hole_size = key.offset - search_start; /* * Have to check before we set max_hole_start, otherwise * we could end up sending back this offset anyway. */ if (contains_pending_extent(transaction, device, &search_start, hole_size)) { if (key.offset >= search_start) { hole_size = key.offset - search_start; } else { WARN_ON_ONCE(1); hole_size = 0; } } if (hole_size > max_hole_size) { max_hole_start = search_start; max_hole_size = hole_size; } /* * If this free space is greater than which we need, * it must be the max free space that we have found * until now, so max_hole_start must point to the start * of this free space and the length of this free space * is stored in max_hole_size. Thus, we return * max_hole_start and max_hole_size and go back to the * caller. */ if (hole_size >= num_bytes) { ret = 0; goto out; } } dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); extent_end = key.offset + btrfs_dev_extent_length(l, dev_extent); if (extent_end > search_start) search_start = extent_end; next: path->slots[0]++; cond_resched(); } /* * At this point, search_start should be the end of * allocated dev extents, and when shrinking the device, * search_end may be smaller than search_start. */ if (search_end > search_start) { hole_size = search_end - search_start; if (contains_pending_extent(transaction, device, &search_start, hole_size)) { btrfs_release_path(path); goto again; } if (hole_size > max_hole_size) { max_hole_start = search_start; max_hole_size = hole_size; } } /* See above. */ if (max_hole_size < num_bytes) ret = -ENOSPC; else ret = 0; out: btrfs_free_path(path); *start = max_hole_start; if (len) *len = max_hole_size; return ret; } int find_free_dev_extent(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 num_bytes, u64 *start, u64 *len) { /* FIXME use last free of some kind */ return find_free_dev_extent_start(trans->transaction, device, num_bytes, 0, start, len); } static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 start, u64 *dev_extent_len) { struct btrfs_fs_info *fs_info = device->fs_info; struct btrfs_root *root = fs_info->dev_root; int ret; struct btrfs_path *path; struct btrfs_key key; struct btrfs_key found_key; struct extent_buffer *leaf = NULL; struct btrfs_dev_extent *extent = NULL; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = device->devid; key.offset = start; key.type = BTRFS_DEV_EXTENT_KEY; again: ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret > 0) { ret = btrfs_previous_item(root, path, key.objectid, BTRFS_DEV_EXTENT_KEY); if (ret) goto out; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); BUG_ON(found_key.offset > start || found_key.offset + btrfs_dev_extent_length(leaf, extent) < start); key = found_key; btrfs_release_path(path); goto again; } else if (ret == 0) { leaf = path->nodes[0]; extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); } else { btrfs_handle_fs_error(fs_info, ret, "Slot search failed"); goto out; } *dev_extent_len = btrfs_dev_extent_length(leaf, extent); ret = btrfs_del_item(trans, root, path); if (ret) { btrfs_handle_fs_error(fs_info, ret, "Failed to remove dev extent item"); } else { set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); } out: btrfs_free_path(path); return ret; } static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 chunk_offset, u64 start, u64 num_bytes) { int ret; struct btrfs_path *path; struct btrfs_fs_info *fs_info = device->fs_info; struct btrfs_root *root = fs_info->dev_root; struct btrfs_dev_extent *extent; struct extent_buffer *leaf; struct btrfs_key key; WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)); WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = device->devid; key.offset = start; key.type = BTRFS_DEV_EXTENT_KEY; ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*extent)); if (ret) goto out; leaf = path->nodes[0]; extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); btrfs_set_dev_extent_chunk_tree(leaf, extent, BTRFS_CHUNK_TREE_OBJECTID); btrfs_set_dev_extent_chunk_objectid(leaf, extent, BTRFS_FIRST_CHUNK_TREE_OBJECTID); btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset); btrfs_set_dev_extent_length(leaf, extent, num_bytes); btrfs_mark_buffer_dirty(leaf); out: btrfs_free_path(path); return ret; } static u64 find_next_chunk(struct btrfs_fs_info *fs_info) { struct extent_map_tree *em_tree; struct extent_map *em; struct rb_node *n; u64 ret = 0; em_tree = &fs_info->mapping_tree.map_tree; read_lock(&em_tree->lock); n = rb_last(&em_tree->map.rb_root); if (n) { em = rb_entry(n, struct extent_map, rb_node); ret = em->start + em->len; } read_unlock(&em_tree->lock); return ret; } static noinline int find_next_devid(struct btrfs_fs_info *fs_info, u64 *devid_ret) { int ret; struct btrfs_key key; struct btrfs_key found_key; struct btrfs_path *path; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.type = BTRFS_DEV_ITEM_KEY; key.offset = (u64)-1; ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); if (ret < 0) goto error; BUG_ON(ret == 0); /* Corruption */ ret = btrfs_previous_item(fs_info->chunk_root, path, BTRFS_DEV_ITEMS_OBJECTID, BTRFS_DEV_ITEM_KEY); if (ret) { *devid_ret = 1; } else { btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); *devid_ret = found_key.offset + 1; } ret = 0; error: btrfs_free_path(path); return ret; } /* * the device information is stored in the chunk root * the btrfs_device struct should be fully filled in */ static int btrfs_add_dev_item(struct btrfs_trans_handle *trans, struct btrfs_device *device) { int ret; struct btrfs_path *path; struct btrfs_dev_item *dev_item; struct extent_buffer *leaf; struct btrfs_key key; unsigned long ptr; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.type = BTRFS_DEV_ITEM_KEY; key.offset = device->devid; ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path, &key, sizeof(*dev_item)); if (ret) goto out; leaf = path->nodes[0]; dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); btrfs_set_device_id(leaf, dev_item, device->devid); btrfs_set_device_generation(leaf, dev_item, 0); btrfs_set_device_type(leaf, dev_item, device->type); btrfs_set_device_io_align(leaf, dev_item, device->io_align); btrfs_set_device_io_width(leaf, dev_item, device->io_width); btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); btrfs_set_device_total_bytes(leaf, dev_item, btrfs_device_get_disk_total_bytes(device)); btrfs_set_device_bytes_used(leaf, dev_item, btrfs_device_get_bytes_used(device)); btrfs_set_device_group(leaf, dev_item, 0); btrfs_set_device_seek_speed(leaf, dev_item, 0); btrfs_set_device_bandwidth(leaf, dev_item, 0); btrfs_set_device_start_offset(leaf, dev_item, 0); ptr = btrfs_device_uuid(dev_item); write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); ptr = btrfs_device_fsid(dev_item); write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid, ptr, BTRFS_FSID_SIZE); btrfs_mark_buffer_dirty(leaf); ret = 0; out: btrfs_free_path(path); return ret; } /* * Function to update ctime/mtime for a given device path. * Mainly used for ctime/mtime based probe like libblkid. */ static void update_dev_time(const char *path_name) { struct file *filp; filp = filp_open(path_name, O_RDWR, 0); if (IS_ERR(filp)) return; file_update_time(filp); filp_close(filp, NULL); } static int btrfs_rm_dev_item(struct btrfs_fs_info *fs_info, struct btrfs_device *device) { struct btrfs_root *root = fs_info->chunk_root; int ret; struct btrfs_path *path; struct btrfs_key key; struct btrfs_trans_handle *trans; path = btrfs_alloc_path(); if (!path) return -ENOMEM; trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { btrfs_free_path(path); return PTR_ERR(trans); } key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.type = BTRFS_DEV_ITEM_KEY; key.offset = device->devid; ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret) { if (ret > 0) ret = -ENOENT; btrfs_abort_transaction(trans, ret); btrfs_end_transaction(trans); goto out; } ret = btrfs_del_item(trans, root, path); if (ret) { btrfs_abort_transaction(trans, ret); btrfs_end_transaction(trans); } out: btrfs_free_path(path); if (!ret) ret = btrfs_commit_transaction(trans); return ret; } /* * Verify that @num_devices satisfies the RAID profile constraints in the whole * filesystem. It's up to the caller to adjust that number regarding eg. device * replace. */ static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info, u64 num_devices) { u64 all_avail; unsigned seq; int i; do { seq = read_seqbegin(&fs_info->profiles_lock); all_avail = fs_info->avail_data_alloc_bits | fs_info->avail_system_alloc_bits | fs_info->avail_metadata_alloc_bits; } while (read_seqretry(&fs_info->profiles_lock, seq)); for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { if (!(all_avail & btrfs_raid_array[i].bg_flag)) continue; if (num_devices < btrfs_raid_array[i].devs_min) { int ret = btrfs_raid_array[i].mindev_error; if (ret) return ret; } } return 0; } static struct btrfs_device * btrfs_find_next_active_device( struct btrfs_fs_devices *fs_devs, struct btrfs_device *device) { struct btrfs_device *next_device; list_for_each_entry(next_device, &fs_devs->devices, dev_list) { if (next_device != device && !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state) && next_device->bdev) return next_device; } return NULL; } /* * Helper function to check if the given device is part of s_bdev / latest_bdev * and replace it with the provided or the next active device, in the context * where this function called, there should be always be another device (or * this_dev) which is active. */ void btrfs_assign_next_active_device(struct btrfs_device *device, struct btrfs_device *this_dev) { struct btrfs_fs_info *fs_info = device->fs_info; struct btrfs_device *next_device; if (this_dev) next_device = this_dev; else next_device = btrfs_find_next_active_device(fs_info->fs_devices, device); ASSERT(next_device); if (fs_info->sb->s_bdev && (fs_info->sb->s_bdev == device->bdev)) fs_info->sb->s_bdev = next_device->bdev; if (fs_info->fs_devices->latest_bdev == device->bdev) fs_info->fs_devices->latest_bdev = next_device->bdev; } /* * Return btrfs_fs_devices::num_devices excluding the device that's being * currently replaced. */ static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info) { u64 num_devices = fs_info->fs_devices->num_devices; down_read(&fs_info->dev_replace.rwsem); if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { ASSERT(num_devices > 1); num_devices--; } up_read(&fs_info->dev_replace.rwsem); return num_devices; } int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path, u64 devid) { struct btrfs_device *device; struct btrfs_fs_devices *cur_devices; struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; u64 num_devices; int ret = 0; mutex_lock(&uuid_mutex); num_devices = btrfs_num_devices(fs_info); ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1); if (ret) goto out; device = btrfs_find_device_by_devspec(fs_info, devid, device_path); if (IS_ERR(device)) { if (PTR_ERR(device) == -ENOENT && strcmp(device_path, "missing") == 0) ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND; else ret = PTR_ERR(device); goto out; } if (btrfs_pinned_by_swapfile(fs_info, device)) { btrfs_warn_in_rcu(fs_info, "cannot remove device %s (devid %llu) due to active swapfile", rcu_str_deref(device->name), device->devid); ret = -ETXTBSY; goto out; } if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { ret = BTRFS_ERROR_DEV_TGT_REPLACE; goto out; } if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && fs_info->fs_devices->rw_devices == 1) { ret = BTRFS_ERROR_DEV_ONLY_WRITABLE; goto out; } if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { mutex_lock(&fs_info->chunk_mutex); list_del_init(&device->dev_alloc_list); device->fs_devices->rw_devices--; mutex_unlock(&fs_info->chunk_mutex); } mutex_unlock(&uuid_mutex); ret = btrfs_shrink_device(device, 0); mutex_lock(&uuid_mutex); if (ret) goto error_undo; /* * TODO: the superblock still includes this device in its num_devices * counter although write_all_supers() is not locked out. This * could give a filesystem state which requires a degraded mount. */ ret = btrfs_rm_dev_item(fs_info, device); if (ret) goto error_undo; clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); btrfs_scrub_cancel_dev(fs_info, device); /* * the device list mutex makes sure that we don't change * the device list while someone else is writing out all * the device supers. Whoever is writing all supers, should * lock the device list mutex before getting the number of * devices in the super block (super_copy). Conversely, * whoever updates the number of devices in the super block * (super_copy) should hold the device list mutex. */ /* * In normal cases the cur_devices == fs_devices. But in case * of deleting a seed device, the cur_devices should point to * its own fs_devices listed under the fs_devices->seed. */ cur_devices = device->fs_devices; mutex_lock(&fs_devices->device_list_mutex); list_del_rcu(&device->dev_list); cur_devices->num_devices--; cur_devices->total_devices--; /* Update total_devices of the parent fs_devices if it's seed */ if (cur_devices != fs_devices) fs_devices->total_devices--; if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) cur_devices->missing_devices--; btrfs_assign_next_active_device(device, NULL); if (device->bdev) { cur_devices->open_devices--; /* remove sysfs entry */ btrfs_sysfs_rm_device_link(fs_devices, device); } num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1; btrfs_set_super_num_devices(fs_info->super_copy, num_devices); mutex_unlock(&fs_devices->device_list_mutex); /* * at this point, the device is zero sized and detached from * the devices list. All that's left is to zero out the old * supers and free the device. */ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) btrfs_scratch_superblocks(device->bdev, device->name->str); btrfs_close_bdev(device); call_rcu(&device->rcu, free_device_rcu); if (cur_devices->open_devices == 0) { while (fs_devices) { if (fs_devices->seed == cur_devices) { fs_devices->seed = cur_devices->seed; break; } fs_devices = fs_devices->seed; } cur_devices->seed = NULL; close_fs_devices(cur_devices); free_fs_devices(cur_devices); } out: mutex_unlock(&uuid_mutex); return ret; error_undo: if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { mutex_lock(&fs_info->chunk_mutex); list_add(&device->dev_alloc_list, &fs_devices->alloc_list); device->fs_devices->rw_devices++; mutex_unlock(&fs_info->chunk_mutex); } goto out; } void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev) { struct btrfs_fs_devices *fs_devices; lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex); /* * in case of fs with no seed, srcdev->fs_devices will point * to fs_devices of fs_info. However when the dev being replaced is * a seed dev it will point to the seed's local fs_devices. In short * srcdev will have its correct fs_devices in both the cases. */ fs_devices = srcdev->fs_devices; list_del_rcu(&srcdev->dev_list); list_del(&srcdev->dev_alloc_list); fs_devices->num_devices--; if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state)) fs_devices->missing_devices--; if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) fs_devices->rw_devices--; if (srcdev->bdev) fs_devices->open_devices--; } void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info, struct btrfs_device *srcdev) { struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) { /* zero out the old super if it is writable */ btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str); } btrfs_close_bdev(srcdev); call_rcu(&srcdev->rcu, free_device_rcu); /* if this is no devs we rather delete the fs_devices */ if (!fs_devices->num_devices) { struct btrfs_fs_devices *tmp_fs_devices; /* * On a mounted FS, num_devices can't be zero unless it's a * seed. In case of a seed device being replaced, the replace * target added to the sprout FS, so there will be no more * device left under the seed FS. */ ASSERT(fs_devices->seeding); tmp_fs_devices = fs_info->fs_devices; while (tmp_fs_devices) { if (tmp_fs_devices->seed == fs_devices) { tmp_fs_devices->seed = fs_devices->seed; break; } tmp_fs_devices = tmp_fs_devices->seed; } fs_devices->seed = NULL; close_fs_devices(fs_devices); free_fs_devices(fs_devices); } } void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev) { struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices; WARN_ON(!tgtdev); mutex_lock(&fs_devices->device_list_mutex); btrfs_sysfs_rm_device_link(fs_devices, tgtdev); if (tgtdev->bdev) fs_devices->open_devices--; fs_devices->num_devices--; btrfs_assign_next_active_device(tgtdev, NULL); list_del_rcu(&tgtdev->dev_list); mutex_unlock(&fs_devices->device_list_mutex); /* * The update_dev_time() with in btrfs_scratch_superblocks() * may lead to a call to btrfs_show_devname() which will try * to hold device_list_mutex. And here this device * is already out of device list, so we don't have to hold * the device_list_mutex lock. */ btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str); btrfs_close_bdev(tgtdev); call_rcu(&tgtdev->rcu, free_device_rcu); } static struct btrfs_device *btrfs_find_device_by_path( struct btrfs_fs_info *fs_info, const char *device_path) { int ret = 0; struct btrfs_super_block *disk_super; u64 devid; u8 *dev_uuid; struct block_device *bdev; struct buffer_head *bh; struct btrfs_device *device; ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ, fs_info->bdev_holder, 0, &bdev, &bh); if (ret) return ERR_PTR(ret); disk_super = (struct btrfs_super_block *)bh->b_data; devid = btrfs_stack_device_id(&disk_super->dev_item); dev_uuid = disk_super->dev_item.uuid; if (btrfs_fs_incompat(fs_info, METADATA_UUID)) device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid, disk_super->metadata_uuid); else device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid, disk_super->fsid); brelse(bh); if (!device) device = ERR_PTR(-ENOENT); blkdev_put(bdev, FMODE_READ); return device; } /* * Lookup a device given by device id, or the path if the id is 0. */ struct btrfs_device *btrfs_find_device_by_devspec( struct btrfs_fs_info *fs_info, u64 devid, const char *device_path) { struct btrfs_device *device; if (devid) { device = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL); if (!device) return ERR_PTR(-ENOENT); return device; } if (!device_path || !device_path[0]) return ERR_PTR(-EINVAL); if (strcmp(device_path, "missing") == 0) { /* Find first missing device */ list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) { if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) && !device->bdev) return device; } return ERR_PTR(-ENOENT); } return btrfs_find_device_by_path(fs_info, device_path); } /* * does all the dirty work required for changing file system's UUID. */ static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info) { struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; struct btrfs_fs_devices *old_devices; struct btrfs_fs_devices *seed_devices; struct btrfs_super_block *disk_super = fs_info->super_copy; struct btrfs_device *device; u64 super_flags; lockdep_assert_held(&uuid_mutex); if (!fs_devices->seeding) return -EINVAL; seed_devices = alloc_fs_devices(NULL, NULL); if (IS_ERR(seed_devices)) return PTR_ERR(seed_devices); old_devices = clone_fs_devices(fs_devices); if (IS_ERR(old_devices)) { kfree(seed_devices); return PTR_ERR(old_devices); } list_add(&old_devices->fs_list, &fs_uuids); memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); seed_devices->opened = 1; INIT_LIST_HEAD(&seed_devices->devices); INIT_LIST_HEAD(&seed_devices->alloc_list); mutex_init(&seed_devices->device_list_mutex); mutex_lock(&fs_devices->device_list_mutex); list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, synchronize_rcu); list_for_each_entry(device, &seed_devices->devices, dev_list) device->fs_devices = seed_devices; mutex_lock(&fs_info->chunk_mutex); list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list); mutex_unlock(&fs_info->chunk_mutex); fs_devices->seeding = 0; fs_devices->num_devices = 0; fs_devices->open_devices = 0; fs_devices->missing_devices = 0; fs_devices->rotating = 0; fs_devices->seed = seed_devices; generate_random_uuid(fs_devices->fsid); memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE); memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); mutex_unlock(&fs_devices->device_list_mutex); super_flags = btrfs_super_flags(disk_super) & ~BTRFS_SUPER_FLAG_SEEDING; btrfs_set_super_flags(disk_super, super_flags); return 0; } /* * Store the expected generation for seed devices in device items. */ static int btrfs_finish_sprout(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info) { struct btrfs_root *root = fs_info->chunk_root; struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_dev_item *dev_item; struct btrfs_device *device; struct btrfs_key key; u8 fs_uuid[BTRFS_FSID_SIZE]; u8 dev_uuid[BTRFS_UUID_SIZE]; u64 devid; int ret; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.offset = 0; key.type = BTRFS_DEV_ITEM_KEY; while (1) { ret = btrfs_search_slot(trans, root, &key, path, 0, 1); if (ret < 0) goto error; leaf = path->nodes[0]; next_slot: if (path->slots[0] >= btrfs_header_nritems(leaf)) { ret = btrfs_next_leaf(root, path); if (ret > 0) break; if (ret < 0) goto error; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); btrfs_release_path(path); continue; } btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || key.type != BTRFS_DEV_ITEM_KEY) break; dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); devid = btrfs_device_id(leaf, dev_item); read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), BTRFS_UUID_SIZE); read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), BTRFS_FSID_SIZE); device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid, fs_uuid); BUG_ON(!device); /* Logic error */ if (device->fs_devices->seeding) { btrfs_set_device_generation(leaf, dev_item, device->generation); btrfs_mark_buffer_dirty(leaf); } path->slots[0]++; goto next_slot; } ret = 0; error: btrfs_free_path(path); return ret; } int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path) { struct btrfs_root *root = fs_info->dev_root; struct request_queue *q; struct btrfs_trans_handle *trans; struct btrfs_device *device; struct block_device *bdev; struct super_block *sb = fs_info->sb; struct rcu_string *name; struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; u64 orig_super_total_bytes; u64 orig_super_num_devices; int seeding_dev = 0; int ret = 0; bool unlocked = false; if (sb_rdonly(sb) && !fs_devices->seeding) return -EROFS; bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, fs_info->bdev_holder); if (IS_ERR(bdev)) return PTR_ERR(bdev); if (fs_devices->seeding) { seeding_dev = 1; down_write(&sb->s_umount); mutex_lock(&uuid_mutex); } filemap_write_and_wait(bdev->bd_inode->i_mapping); mutex_lock(&fs_devices->device_list_mutex); list_for_each_entry(device, &fs_devices->devices, dev_list) { if (device->bdev == bdev) { ret = -EEXIST; mutex_unlock( &fs_devices->device_list_mutex); goto error; } } mutex_unlock(&fs_devices->device_list_mutex); device = btrfs_alloc_device(fs_info, NULL, NULL); if (IS_ERR(device)) { /* we can safely leave the fs_devices entry around */ ret = PTR_ERR(device); goto error; } name = rcu_string_strdup(device_path, GFP_KERNEL); if (!name) { ret = -ENOMEM; goto error_free_device; } rcu_assign_pointer(device->name, name); trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto error_free_device; } q = bdev_get_queue(bdev); set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); device->generation = trans->transid; device->io_width = fs_info->sectorsize; device->io_align = fs_info->sectorsize; device->sector_size = fs_info->sectorsize; device->total_bytes = round_down(i_size_read(bdev->bd_inode), fs_info->sectorsize); device->disk_total_bytes = device->total_bytes; device->commit_total_bytes = device->total_bytes; device->fs_info = fs_info; device->bdev = bdev; set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); device->mode = FMODE_EXCL; device->dev_stats_valid = 1; set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); if (seeding_dev) { sb->s_flags &= ~SB_RDONLY; ret = btrfs_prepare_sprout(fs_info); if (ret) { btrfs_abort_transaction(trans, ret); goto error_trans; } } device->fs_devices = fs_devices; mutex_lock(&fs_devices->device_list_mutex); mutex_lock(&fs_info->chunk_mutex); list_add_rcu(&device->dev_list, &fs_devices->devices); list_add(&device->dev_alloc_list, &fs_devices->alloc_list); fs_devices->num_devices++; fs_devices->open_devices++; fs_devices->rw_devices++; fs_devices->total_devices++; fs_devices->total_rw_bytes += device->total_bytes; atomic64_add(device->total_bytes, &fs_info->free_chunk_space); if (!blk_queue_nonrot(q)) fs_devices->rotating = 1; orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy); btrfs_set_super_total_bytes(fs_info->super_copy, round_down(orig_super_total_bytes + device->total_bytes, fs_info->sectorsize)); orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy); btrfs_set_super_num_devices(fs_info->super_copy, orig_super_num_devices + 1); /* add sysfs device entry */ btrfs_sysfs_add_device_link(fs_devices, device); /* * we've got more storage, clear any full flags on the space * infos */ btrfs_clear_space_info_full(fs_info); mutex_unlock(&fs_info->chunk_mutex); mutex_unlock(&fs_devices->device_list_mutex); if (seeding_dev) { mutex_lock(&fs_info->chunk_mutex); ret = init_first_rw_device(trans, fs_info); mutex_unlock(&fs_info->chunk_mutex); if (ret) { btrfs_abort_transaction(trans, ret); goto error_sysfs; } } ret = btrfs_add_dev_item(trans, device); if (ret) { btrfs_abort_transaction(trans, ret); goto error_sysfs; } if (seeding_dev) { char fsid_buf[BTRFS_UUID_UNPARSED_SIZE]; ret = btrfs_finish_sprout(trans, fs_info); if (ret) { btrfs_abort_transaction(trans, ret); goto error_sysfs; } /* Sprouting would change fsid of the mounted root, * so rename the fsid on the sysfs */ snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU", fs_info->fs_devices->fsid); if (kobject_rename(&fs_devices->fsid_kobj, fsid_buf)) btrfs_warn(fs_info, "sysfs: failed to create fsid for sprout"); } ret = btrfs_commit_transaction(trans); if (seeding_dev) { mutex_unlock(&uuid_mutex); up_write(&sb->s_umount); unlocked = true; if (ret) /* transaction commit */ return ret; ret = btrfs_relocate_sys_chunks(fs_info); if (ret < 0) btrfs_handle_fs_error(fs_info, ret, "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command."); trans = btrfs_attach_transaction(root); if (IS_ERR(trans)) { if (PTR_ERR(trans) == -ENOENT) return 0; ret = PTR_ERR(trans); trans = NULL; goto error_sysfs; } ret = btrfs_commit_transaction(trans); } /* Update ctime/mtime for libblkid */ update_dev_time(device_path); return ret; error_sysfs: btrfs_sysfs_rm_device_link(fs_devices, device); mutex_lock(&fs_info->fs_devices->device_list_mutex); mutex_lock(&fs_info->chunk_mutex); list_del_rcu(&device->dev_list); list_del(&device->dev_alloc_list); fs_info->fs_devices->num_devices--; fs_info->fs_devices->open_devices--; fs_info->fs_devices->rw_devices--; fs_info->fs_devices->total_devices--; fs_info->fs_devices->total_rw_bytes -= device->total_bytes; atomic64_sub(device->total_bytes, &fs_info->free_chunk_space); btrfs_set_super_total_bytes(fs_info->super_copy, orig_super_total_bytes); btrfs_set_super_num_devices(fs_info->super_copy, orig_super_num_devices); mutex_unlock(&fs_info->chunk_mutex); mutex_unlock(&fs_info->fs_devices->device_list_mutex); error_trans: if (seeding_dev) sb->s_flags |= SB_RDONLY; if (trans) btrfs_end_transaction(trans); error_free_device: btrfs_free_device(device); error: blkdev_put(bdev, FMODE_EXCL); if (seeding_dev && !unlocked) { mutex_unlock(&uuid_mutex); up_write(&sb->s_umount); } return ret; } static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, struct btrfs_device *device) { int ret; struct btrfs_path *path; struct btrfs_root *root = device->fs_info->chunk_root; struct btrfs_dev_item *dev_item; struct extent_buffer *leaf; struct btrfs_key key; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.type = BTRFS_DEV_ITEM_KEY; key.offset = device->devid; ret = btrfs_search_slot(trans, root, &key, path, 0, 1); if (ret < 0) goto out; if (ret > 0) { ret = -ENOENT; goto out; } leaf = path->nodes[0]; dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); btrfs_set_device_id(leaf, dev_item, device->devid); btrfs_set_device_type(leaf, dev_item, device->type); btrfs_set_device_io_align(leaf, dev_item, device->io_align); btrfs_set_device_io_width(leaf, dev_item, device->io_width); btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); btrfs_set_device_total_bytes(leaf, dev_item, btrfs_device_get_disk_total_bytes(device)); btrfs_set_device_bytes_used(leaf, dev_item, btrfs_device_get_bytes_used(device)); btrfs_mark_buffer_dirty(leaf); out: btrfs_free_path(path); return ret; } int btrfs_grow_device(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 new_size) { struct btrfs_fs_info *fs_info = device->fs_info; struct btrfs_super_block *super_copy = fs_info->super_copy; struct btrfs_fs_devices *fs_devices; u64 old_total; u64 diff; if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) return -EACCES; new_size = round_down(new_size, fs_info->sectorsize); mutex_lock(&fs_info->chunk_mutex); old_total = btrfs_super_total_bytes(super_copy); diff = round_down(new_size - device->total_bytes, fs_info->sectorsize); if (new_size <= device->total_bytes || test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { mutex_unlock(&fs_info->chunk_mutex); return -EINVAL; } fs_devices = fs_info->fs_devices; btrfs_set_super_total_bytes(super_copy, round_down(old_total + diff, fs_info->sectorsize)); device->fs_devices->total_rw_bytes += diff; btrfs_device_set_total_bytes(device, new_size); btrfs_device_set_disk_total_bytes(device, new_size); btrfs_clear_space_info_full(device->fs_info); if (list_empty(&device->resized_list)) list_add_tail(&device->resized_list, &fs_devices->resized_devices); mutex_unlock(&fs_info->chunk_mutex); return btrfs_update_device(trans, device); } static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_root *root = fs_info->chunk_root; int ret; struct btrfs_path *path; struct btrfs_key key; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; key.offset = chunk_offset; key.type = BTRFS_CHUNK_ITEM_KEY; ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret < 0) goto out; else if (ret > 0) { /* Logic error or corruption */ btrfs_handle_fs_error(fs_info, -ENOENT, "Failed lookup while freeing chunk."); ret = -ENOENT; goto out; } ret = btrfs_del_item(trans, root, path); if (ret < 0) btrfs_handle_fs_error(fs_info, ret, "Failed to delete chunk item."); out: btrfs_free_path(path); return ret; } static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) { struct btrfs_super_block *super_copy = fs_info->super_copy; struct btrfs_disk_key *disk_key; struct btrfs_chunk *chunk; u8 *ptr; int ret = 0; u32 num_stripes; u32 array_size; u32 len = 0; u32 cur; struct btrfs_key key; mutex_lock(&fs_info->chunk_mutex); array_size = btrfs_super_sys_array_size(super_copy); ptr = super_copy->sys_chunk_array; cur = 0; while (cur < array_size) { disk_key = (struct btrfs_disk_key *)ptr; btrfs_disk_key_to_cpu(&key, disk_key); len = sizeof(*disk_key); if (key.type == BTRFS_CHUNK_ITEM_KEY) { chunk = (struct btrfs_chunk *)(ptr + len); num_stripes = btrfs_stack_chunk_num_stripes(chunk); len += btrfs_chunk_item_size(num_stripes); } else { ret = -EIO; break; } if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID && key.offset == chunk_offset) { memmove(ptr, ptr + len, array_size - (cur + len)); array_size -= len; btrfs_set_super_sys_array_size(super_copy, array_size); } else { ptr += len; cur += len; } } mutex_unlock(&fs_info->chunk_mutex); return ret; } /* * btrfs_get_chunk_map() - Find the mapping containing the given logical extent. * @logical: Logical block offset in bytes. * @length: Length of extent in bytes. * * Return: Chunk mapping or ERR_PTR. */ struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, u64 logical, u64 length) { struct extent_map_tree *em_tree; struct extent_map *em; em_tree = &fs_info->mapping_tree.map_tree; read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, logical, length); read_unlock(&em_tree->lock); if (!em) { btrfs_crit(fs_info, "unable to find logical %llu length %llu", logical, length); return ERR_PTR(-EINVAL); } if (em->start > logical || em->start + em->len < logical) { btrfs_crit(fs_info, "found a bad mapping, wanted %llu-%llu, found %llu-%llu", logical, length, em->start, em->start + em->len); free_extent_map(em); return ERR_PTR(-EINVAL); } /* callers are responsible for dropping em's ref. */ return em; } int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) { struct btrfs_fs_info *fs_info = trans->fs_info; struct extent_map *em; struct map_lookup *map; u64 dev_extent_len = 0; int i, ret = 0; struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); if (IS_ERR(em)) { /* * This is a logic error, but we don't want to just rely on the * user having built with ASSERT enabled, so if ASSERT doesn't * do anything we still error out. */ ASSERT(0); return PTR_ERR(em); } map = em->map_lookup; mutex_lock(&fs_info->chunk_mutex); check_system_chunk(trans, map->type); mutex_unlock(&fs_info->chunk_mutex); /* * Take the device list mutex to prevent races with the final phase of * a device replace operation that replaces the device object associated * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()). */ mutex_lock(&fs_devices->device_list_mutex); for (i = 0; i < map->num_stripes; i++) { struct btrfs_device *device = map->stripes[i].dev; ret = btrfs_free_dev_extent(trans, device, map->stripes[i].physical, &dev_extent_len); if (ret) { mutex_unlock(&fs_devices->device_list_mutex); btrfs_abort_transaction(trans, ret); goto out; } if (device->bytes_used > 0) { mutex_lock(&fs_info->chunk_mutex); btrfs_device_set_bytes_used(device, device->bytes_used - dev_extent_len); atomic64_add(dev_extent_len, &fs_info->free_chunk_space); btrfs_clear_space_info_full(fs_info); mutex_unlock(&fs_info->chunk_mutex); } ret = btrfs_update_device(trans, device); if (ret) { mutex_unlock(&fs_devices->device_list_mutex); btrfs_abort_transaction(trans, ret); goto out; } } mutex_unlock(&fs_devices->device_list_mutex); ret = btrfs_free_chunk(trans, chunk_offset); if (ret) { btrfs_abort_transaction(trans, ret); goto out; } trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len); if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { ret = btrfs_del_sys_chunk(fs_info, chunk_offset); if (ret) { btrfs_abort_transaction(trans, ret); goto out; } } ret = btrfs_remove_block_group(trans, chunk_offset, em); if (ret) { btrfs_abort_transaction(trans, ret); goto out; } out: /* once for us */ free_extent_map(em); return ret; } static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) { struct btrfs_root *root = fs_info->chunk_root; struct btrfs_trans_handle *trans; int ret; /* * Prevent races with automatic removal of unused block groups. * After we relocate and before we remove the chunk with offset * chunk_offset, automatic removal of the block group can kick in, * resulting in a failure when calling btrfs_remove_chunk() below. * * Make sure to acquire this mutex before doing a tree search (dev * or chunk trees) to find chunks. Otherwise the cleaner kthread might * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after * we release the path used to search the chunk/dev tree and before * the current task acquires this mutex and calls us. */ lockdep_assert_held(&fs_info->delete_unused_bgs_mutex); ret = btrfs_can_relocate(fs_info, chunk_offset); if (ret) return -ENOSPC; /* step one, relocate all the extents inside this chunk */ btrfs_scrub_pause(fs_info); ret = btrfs_relocate_block_group(fs_info, chunk_offset); btrfs_scrub_continue(fs_info); if (ret) return ret; /* * We add the kobjects here (and after forcing data chunk creation) * since relocation is the only place we'll create chunks of a new * type at runtime. The only place where we'll remove the last * chunk of a type is the call immediately below this one. Even * so, we're protected against races with the cleaner thread since * we're covered by the delete_unused_bgs_mutex. */ btrfs_add_raid_kobjects(fs_info); trans = btrfs_start_trans_remove_block_group(root->fs_info, chunk_offset); if (IS_ERR(trans)) { ret = PTR_ERR(trans); btrfs_handle_fs_error(root->fs_info, ret, NULL); return ret; } /* * step two, delete the device extents and the * chunk tree entries */ ret = btrfs_remove_chunk(trans, chunk_offset); btrfs_end_transaction(trans); return ret; } static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) { struct btrfs_root *chunk_root = fs_info->chunk_root; struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_chunk *chunk; struct btrfs_key key; struct btrfs_key found_key; u64 chunk_type; bool retried = false; int failed = 0; int ret; path = btrfs_alloc_path(); if (!path) return -ENOMEM; again: key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; key.offset = (u64)-1; key.type = BTRFS_CHUNK_ITEM_KEY; while (1) { mutex_lock(&fs_info->delete_unused_bgs_mutex); ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); if (ret < 0) { mutex_unlock(&fs_info->delete_unused_bgs_mutex); goto error; } BUG_ON(ret == 0); /* Corruption */ ret = btrfs_previous_item(chunk_root, path, key.objectid, key.type); if (ret) mutex_unlock(&fs_info->delete_unused_bgs_mutex); if (ret < 0) goto error; if (ret > 0) break; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); chunk = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_chunk); chunk_type = btrfs_chunk_type(leaf, chunk); btrfs_release_path(path); if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { ret = btrfs_relocate_chunk(fs_info, found_key.offset); if (ret == -ENOSPC) failed++; else BUG_ON(ret); } mutex_unlock(&fs_info->delete_unused_bgs_mutex); if (found_key.offset == 0) break; key.offset = found_key.offset - 1; } ret = 0; if (failed && !retried) { failed = 0; retried = true; goto again; } else if (WARN_ON(failed && retried)) { ret = -ENOSPC; } error: btrfs_free_path(path); return ret; } /* * return 1 : allocate a data chunk successfully, * return <0: errors during allocating a data chunk, * return 0 : no need to allocate a data chunk. */ static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) { struct btrfs_block_group_cache *cache; u64 bytes_used; u64 chunk_type; cache = btrfs_lookup_block_group(fs_info, chunk_offset); ASSERT(cache); chunk_type = cache->flags; btrfs_put_block_group(cache); if (chunk_type & BTRFS_BLOCK_GROUP_DATA) { spin_lock(&fs_info->data_sinfo->lock); bytes_used = fs_info->data_sinfo->bytes_used; spin_unlock(&fs_info->data_sinfo->lock); if (!bytes_used) { struct btrfs_trans_handle *trans; int ret; trans = btrfs_join_transaction(fs_info->tree_root); if (IS_ERR(trans)) return PTR_ERR(trans); ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA); btrfs_end_transaction(trans); if (ret < 0) return ret; btrfs_add_raid_kobjects(fs_info); return 1; } } return 0; } static int insert_balance_item(struct btrfs_fs_info *fs_info, struct btrfs_balance_control *bctl) { struct btrfs_root *root = fs_info->tree_root; struct btrfs_trans_handle *trans; struct btrfs_balance_item *item; struct btrfs_disk_balance_args disk_bargs; struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_key key; int ret, err; path = btrfs_alloc_path(); if (!path) return -ENOMEM; trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { btrfs_free_path(path); return PTR_ERR(trans); } key.objectid = BTRFS_BALANCE_OBJECTID; key.type = BTRFS_TEMPORARY_ITEM_KEY; key.offset = 0; ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*item)); if (ret) goto out; leaf = path->nodes[0]; item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); btrfs_set_balance_data(leaf, item, &disk_bargs); btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); btrfs_set_balance_meta(leaf, item, &disk_bargs); btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); btrfs_set_balance_sys(leaf, item, &disk_bargs); btrfs_set_balance_flags(leaf, item, bctl->flags); btrfs_mark_buffer_dirty(leaf); out: btrfs_free_path(path); err = btrfs_commit_transaction(trans); if (err && !ret) ret = err; return ret; } static int del_balance_item(struct btrfs_fs_info *fs_info) { struct btrfs_root *root = fs_info->tree_root; struct btrfs_trans_handle *trans; struct btrfs_path *path; struct btrfs_key key; int ret, err; path = btrfs_alloc_path(); if (!path) return -ENOMEM; trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { btrfs_free_path(path); return PTR_ERR(trans); } key.objectid = BTRFS_BALANCE_OBJECTID; key.type = BTRFS_TEMPORARY_ITEM_KEY; key.offset = 0; ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret < 0) goto out; if (ret > 0) { ret = -ENOENT; goto out; } ret = btrfs_del_item(trans, root, path); out: btrfs_free_path(path); err = btrfs_commit_transaction(trans); if (err && !ret) ret = err; return ret; } /* * This is a heuristic used to reduce the number of chunks balanced on * resume after balance was interrupted. */ static void update_balance_args(struct btrfs_balance_control *bctl) { /* * Turn on soft mode for chunk types that were being converted. */ if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; /* * Turn on usage filter if is not already used. The idea is * that chunks that we have already balanced should be * reasonably full. Don't do it for chunks that are being * converted - that will keep us from relocating unconverted * (albeit full) chunks. */ if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; bctl->data.usage = 90; } if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; bctl->sys.usage = 90; } if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; bctl->meta.usage = 90; } } /* * Clear the balance status in fs_info and delete the balance item from disk. */ static void reset_balance_state(struct btrfs_fs_info *fs_info) { struct btrfs_balance_control *bctl = fs_info->balance_ctl; int ret; BUG_ON(!fs_info->balance_ctl); spin_lock(&fs_info->balance_lock); fs_info->balance_ctl = NULL; spin_unlock(&fs_info->balance_lock); kfree(bctl); ret = del_balance_item(fs_info); if (ret) btrfs_handle_fs_error(fs_info, ret, NULL); } /* * Balance filters. Return 1 if chunk should be filtered out * (should not be balanced). */ static int chunk_profiles_filter(u64 chunk_type, struct btrfs_balance_args *bargs) { chunk_type = chunk_to_extended(chunk_type) & BTRFS_EXTENDED_PROFILE_MASK; if (bargs->profiles & chunk_type) return 0; return 1; } static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, struct btrfs_balance_args *bargs) { struct btrfs_block_group_cache *cache; u64 chunk_used; u64 user_thresh_min; u64 user_thresh_max; int ret = 1; cache = btrfs_lookup_block_group(fs_info, chunk_offset); chunk_used = btrfs_block_group_used(&cache->item); if (bargs->usage_min == 0) user_thresh_min = 0; else user_thresh_min = div_factor_fine(cache->key.offset, bargs->usage_min); if (bargs->usage_max == 0) user_thresh_max = 1; else if (bargs->usage_max > 100) user_thresh_max = cache->key.offset; else user_thresh_max = div_factor_fine(cache->key.offset, bargs->usage_max); if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) ret = 0; btrfs_put_block_group(cache); return ret; } static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, struct btrfs_balance_args *bargs) { struct btrfs_block_group_cache *cache; u64 chunk_used, user_thresh; int ret = 1; cache = btrfs_lookup_block_group(fs_info, chunk_offset); chunk_used = btrfs_block_group_used(&cache->item); if (bargs->usage_min == 0) user_thresh = 1; else if (bargs->usage > 100) user_thresh = cache->key.offset; else user_thresh = div_factor_fine(cache->key.offset, bargs->usage); if (chunk_used < user_thresh) ret = 0; btrfs_put_block_group(cache); return ret; } static int chunk_devid_filter(struct extent_buffer *leaf, struct btrfs_chunk *chunk, struct btrfs_balance_args *bargs) { struct btrfs_stripe *stripe; int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); int i; for (i = 0; i < num_stripes; i++) { stripe = btrfs_stripe_nr(chunk, i); if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) return 0; } return 1; } /* [pstart, pend) */ static int chunk_drange_filter(struct extent_buffer *leaf, struct btrfs_chunk *chunk, struct btrfs_balance_args *bargs) { struct btrfs_stripe *stripe; int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); u64 stripe_offset; u64 stripe_length; int factor; int i; if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) return 0; if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) { factor = num_stripes / 2; } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) { factor = num_stripes - 1; } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) { factor = num_stripes - 2; } else { factor = num_stripes; } for (i = 0; i < num_stripes; i++) { stripe = btrfs_stripe_nr(chunk, i); if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) continue; stripe_offset = btrfs_stripe_offset(leaf, stripe); stripe_length = btrfs_chunk_length(leaf, chunk); stripe_length = div_u64(stripe_length, factor); if (stripe_offset < bargs->pend && stripe_offset + stripe_length > bargs->pstart) return 0; } return 1; } /* [vstart, vend) */ static int chunk_vrange_filter(struct extent_buffer *leaf, struct btrfs_chunk *chunk, u64 chunk_offset, struct btrfs_balance_args *bargs) { if (chunk_offset < bargs->vend && chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) /* at least part of the chunk is inside this vrange */ return 0; return 1; } static int chunk_stripes_range_filter(struct extent_buffer *leaf, struct btrfs_chunk *chunk, struct btrfs_balance_args *bargs) { int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); if (bargs->stripes_min <= num_stripes && num_stripes <= bargs->stripes_max) return 0; return 1; } static int chunk_soft_convert_filter(u64 chunk_type, struct btrfs_balance_args *bargs) { if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) return 0; chunk_type = chunk_to_extended(chunk_type) & BTRFS_EXTENDED_PROFILE_MASK; if (bargs->target == chunk_type) return 1; return 0; } static int should_balance_chunk(struct btrfs_fs_info *fs_info, struct extent_buffer *leaf, struct btrfs_chunk *chunk, u64 chunk_offset) { struct btrfs_balance_control *bctl = fs_info->balance_ctl; struct btrfs_balance_args *bargs = NULL; u64 chunk_type = btrfs_chunk_type(leaf, chunk); /* type filter */ if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { return 0; } if (chunk_type & BTRFS_BLOCK_GROUP_DATA) bargs = &bctl->data; else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) bargs = &bctl->sys; else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) bargs = &bctl->meta; /* profiles filter */ if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && chunk_profiles_filter(chunk_type, bargs)) { return 0; } /* usage filter */ if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && chunk_usage_filter(fs_info, chunk_offset, bargs)) { return 0; } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && chunk_usage_range_filter(fs_info, chunk_offset, bargs)) { return 0; } /* devid filter */ if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && chunk_devid_filter(leaf, chunk, bargs)) { return 0; } /* drange filter, makes sense only with devid filter */ if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && chunk_drange_filter(leaf, chunk, bargs)) { return 0; } /* vrange filter */ if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { return 0; } /* stripes filter */ if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) && chunk_stripes_range_filter(leaf, chunk, bargs)) { return 0; } /* soft profile changing mode */ if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && chunk_soft_convert_filter(chunk_type, bargs)) { return 0; } /* * limited by count, must be the last filter */ if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { if (bargs->limit == 0) return 0; else bargs->limit--; } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { /* * Same logic as the 'limit' filter; the minimum cannot be * determined here because we do not have the global information * about the count of all chunks that satisfy the filters. */ if (bargs->limit_max == 0) return 0; else bargs->limit_max--; } return 1; } static int __btrfs_balance(struct btrfs_fs_info *fs_info) { struct btrfs_balance_control *bctl = fs_info->balance_ctl; struct btrfs_root *chunk_root = fs_info->chunk_root; u64 chunk_type; struct btrfs_chunk *chunk; struct btrfs_path *path = NULL; struct btrfs_key key; struct btrfs_key found_key; struct extent_buffer *leaf; int slot; int ret; int enospc_errors = 0; bool counting = true; /* The single value limit and min/max limits use the same bytes in the */ u64 limit_data = bctl->data.limit; u64 limit_meta = bctl->meta.limit; u64 limit_sys = bctl->sys.limit; u32 count_data = 0; u32 count_meta = 0; u32 count_sys = 0; int chunk_reserved = 0; path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; goto error; } /* zero out stat counters */ spin_lock(&fs_info->balance_lock); memset(&bctl->stat, 0, sizeof(bctl->stat)); spin_unlock(&fs_info->balance_lock); again: if (!counting) { /* * The single value limit and min/max limits use the same bytes * in the */ bctl->data.limit = limit_data; bctl->meta.limit = limit_meta; bctl->sys.limit = limit_sys; } key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; key.offset = (u64)-1; key.type = BTRFS_CHUNK_ITEM_KEY; while (1) { if ((!counting && atomic_read(&fs_info->balance_pause_req)) || atomic_read(&fs_info->balance_cancel_req)) { ret = -ECANCELED; goto error; } mutex_lock(&fs_info->delete_unused_bgs_mutex); ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); if (ret < 0) { mutex_unlock(&fs_info->delete_unused_bgs_mutex); goto error; } /* * this shouldn't happen, it means the last relocate * failed */ if (ret == 0) BUG(); /* FIXME break ? */ ret = btrfs_previous_item(chunk_root, path, 0, BTRFS_CHUNK_ITEM_KEY); if (ret) { mutex_unlock(&fs_info->delete_unused_bgs_mutex); ret = 0; break; } leaf = path->nodes[0]; slot = path->slots[0]; btrfs_item_key_to_cpu(leaf, &found_key, slot); if (found_key.objectid != key.objectid) { mutex_unlock(&fs_info->delete_unused_bgs_mutex); break; } chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); chunk_type = btrfs_chunk_type(leaf, chunk); if (!counting) { spin_lock(&fs_info->balance_lock); bctl->stat.considered++; spin_unlock(&fs_info->balance_lock); } ret = should_balance_chunk(fs_info, leaf, chunk, found_key.offset); btrfs_release_path(path); if (!ret) { mutex_unlock(&fs_info->delete_unused_bgs_mutex); goto loop; } if (counting) { mutex_unlock(&fs_info->delete_unused_bgs_mutex); spin_lock(&fs_info->balance_lock); bctl->stat.expected++; spin_unlock(&fs_info->balance_lock); if (chunk_type & BTRFS_BLOCK_GROUP_DATA) count_data++; else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) count_sys++; else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) count_meta++; goto loop; } /* * Apply limit_min filter, no need to check if the LIMITS * filter is used, limit_min is 0 by default */ if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) && count_data < bctl->data.limit_min) || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) && count_meta < bctl->meta.limit_min) || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && count_sys < bctl->sys.limit_min)) { mutex_unlock(&fs_info->delete_unused_bgs_mutex); goto loop; } if (!chunk_reserved) { /* * We may be relocating the only data chunk we have, * which could potentially end up with losing data's * raid profile, so lets allocate an empty one in * advance. */ ret = btrfs_may_alloc_data_chunk(fs_info, found_key.offset); if (ret < 0) { mutex_unlock(&fs_info->delete_unused_bgs_mutex); goto error; } else if (ret == 1) { chunk_reserved = 1; } } ret = btrfs_relocate_chunk(fs_info, found_key.offset); mutex_unlock(&fs_info->delete_unused_bgs_mutex); if (ret == -ENOSPC) { enospc_errors++; } else if (ret == -ETXTBSY) { btrfs_info(fs_info, "skipping relocation of block group %llu due to active swapfile", found_key.offset); ret = 0; } else if (ret) { goto error; } else { spin_lock(&fs_info->balance_lock); bctl->stat.completed++; spin_unlock(&fs_info->balance_lock); } loop: if (found_key.offset == 0) break; key.offset = found_key.offset - 1; } if (counting) { btrfs_release_path(path); counting = false; goto again; } error: btrfs_free_path(path); if (enospc_errors) { btrfs_info(fs_info, "%d enospc errors during balance", enospc_errors); if (!ret) ret = -ENOSPC; } return ret; } /** * alloc_profile_is_valid - see if a given profile is valid and reduced * @flags: profile to validate * @extended: if true @flags is treated as an extended profile */ static int alloc_profile_is_valid(u64 flags, int extended) { u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : BTRFS_BLOCK_GROUP_PROFILE_MASK); flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; /* 1) check that all other bits are zeroed */ if (flags & ~mask) return 0; /* 2) see if profile is reduced */ if (flags == 0) return !extended; /* "0" is valid for usual profiles */ /* true if exactly one bit set */ return is_power_of_2(flags); } static inline int balance_need_close(struct btrfs_fs_info *fs_info) { /* cancel requested || normal exit path */ return atomic_read(&fs_info->balance_cancel_req) || (atomic_read(&fs_info->balance_pause_req) == 0 && atomic_read(&fs_info->balance_cancel_req) == 0); } /* Non-zero return value signifies invalidity */ static inline int validate_convert_profile(struct btrfs_balance_args *bctl_arg, u64 allowed) { return ((bctl_arg->flags & BTRFS_BALANCE_ARGS_CONVERT) && (!alloc_profile_is_valid(bctl_arg->target, 1) || (bctl_arg->target & ~allowed))); } /* * Fill @buf with textual description of balance filter flags @bargs, up to * @size_buf including the terminating null. The output may be trimmed if it * does not fit into the provided buffer. */ static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf, u32 size_buf) { int ret; u32 size_bp = size_buf; char *bp = buf; u64 flags = bargs->flags; char tmp_buf[128] = {'\0'}; if (!flags) return; #define CHECK_APPEND_NOARG(a) \ do { \ ret = snprintf(bp, size_bp, (a)); \ if (ret < 0 || ret >= size_bp) \ goto out_overflow; \ size_bp -= ret; \ bp += ret; \ } while (0) #define CHECK_APPEND_1ARG(a, v1) \ do { \ ret = snprintf(bp, size_bp, (a), (v1)); \ if (ret < 0 || ret >= size_bp) \ goto out_overflow; \ size_bp -= ret; \ bp += ret; \ } while (0) #define CHECK_APPEND_2ARG(a, v1, v2) \ do { \ ret = snprintf(bp, size_bp, (a), (v1), (v2)); \ if (ret < 0 || ret >= size_bp) \ goto out_overflow; \ size_bp -= ret; \ bp += ret; \ } while (0) if (flags & BTRFS_BALANCE_ARGS_CONVERT) { int index = btrfs_bg_flags_to_raid_index(bargs->target); CHECK_APPEND_1ARG("convert=%s,", get_raid_name(index)); } if (flags & BTRFS_BALANCE_ARGS_SOFT) CHECK_APPEND_NOARG("soft,"); if (flags & BTRFS_BALANCE_ARGS_PROFILES) { btrfs_describe_block_groups(bargs->profiles, tmp_buf, sizeof(tmp_buf)); CHECK_APPEND_1ARG("profiles=%s,", tmp_buf); } if (flags & BTRFS_BALANCE_ARGS_USAGE) CHECK_APPEND_1ARG("usage=%llu,", bargs->usage); if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) CHECK_APPEND_2ARG("usage=%u..%u,", bargs->usage_min, bargs->usage_max); if (flags & BTRFS_BALANCE_ARGS_DEVID) CHECK_APPEND_1ARG("devid=%llu,", bargs->devid); if (flags & BTRFS_BALANCE_ARGS_DRANGE) CHECK_APPEND_2ARG("drange=%llu..%llu,", bargs->pstart, bargs->pend); if (flags & BTRFS_BALANCE_ARGS_VRANGE) CHECK_APPEND_2ARG("vrange=%llu..%llu,", bargs->vstart, bargs->vend); if (flags & BTRFS_BALANCE_ARGS_LIMIT) CHECK_APPEND_1ARG("limit=%llu,", bargs->limit); if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE) CHECK_APPEND_2ARG("limit=%u..%u,", bargs->limit_min, bargs->limit_max); if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) CHECK_APPEND_2ARG("stripes=%u..%u,", bargs->stripes_min, bargs->stripes_max); #undef CHECK_APPEND_2ARG #undef CHECK_APPEND_1ARG #undef CHECK_APPEND_NOARG out_overflow: if (size_bp < size_buf) buf[size_buf - size_bp - 1] = '\0'; /* remove last , */ else buf[0] = '\0'; } static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info) { u32 size_buf = 1024; char tmp_buf[192] = {'\0'}; char *buf; char *bp; u32 size_bp = size_buf; int ret; struct btrfs_balance_control *bctl = fs_info->balance_ctl; buf = kzalloc(size_buf, GFP_KERNEL); if (!buf) return; bp = buf; #define CHECK_APPEND_1ARG(a, v1) \ do { \ ret = snprintf(bp, size_bp, (a), (v1)); \ if (ret < 0 || ret >= size_bp) \ goto out_overflow; \ size_bp -= ret; \ bp += ret; \ } while (0) if (bctl->flags & BTRFS_BALANCE_FORCE) CHECK_APPEND_1ARG("%s", "-f "); if (bctl->flags & BTRFS_BALANCE_DATA) { describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf)); CHECK_APPEND_1ARG("-d%s ", tmp_buf); } if (bctl->flags & BTRFS_BALANCE_METADATA) { describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf)); CHECK_APPEND_1ARG("-m%s ", tmp_buf); } if (bctl->flags & BTRFS_BALANCE_SYSTEM) { describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf)); CHECK_APPEND_1ARG("-s%s ", tmp_buf); } #undef CHECK_APPEND_1ARG out_overflow: if (size_bp < size_buf) buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */ btrfs_info(fs_info, "balance: %s %s", (bctl->flags & BTRFS_BALANCE_RESUME) ? "resume" : "start", buf); kfree(buf); } /* * Should be called with balance mutexe held */ int btrfs_balance(struct btrfs_fs_info *fs_info, struct btrfs_balance_control *bctl, struct btrfs_ioctl_balance_args *bargs) { u64 meta_target, data_target; u64 allowed; int mixed = 0; int ret; u64 num_devices; unsigned seq; bool reducing_integrity; if (btrfs_fs_closing(fs_info) || atomic_read(&fs_info->balance_pause_req) || atomic_read(&fs_info->balance_cancel_req)) { ret = -EINVAL; goto out; } allowed = btrfs_super_incompat_flags(fs_info->super_copy); if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) mixed = 1; /* * In case of mixed groups both data and meta should be picked, * and identical options should be given for both of them. */ allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; if (mixed && (bctl->flags & allowed)) { if (!(bctl->flags & BTRFS_BALANCE_DATA) || !(bctl->flags & BTRFS_BALANCE_METADATA) || memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { btrfs_err(fs_info, "balance: mixed groups data and metadata options must be the same"); ret = -EINVAL; goto out; } } num_devices = btrfs_num_devices(fs_info); allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE | BTRFS_BLOCK_GROUP_DUP; if (num_devices > 1) allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1); if (num_devices > 2) allowed |= BTRFS_BLOCK_GROUP_RAID5; if (num_devices > 3) allowed |= (BTRFS_BLOCK_GROUP_RAID10 | BTRFS_BLOCK_GROUP_RAID6); if (validate_convert_profile(&bctl->data, allowed)) { int index = btrfs_bg_flags_to_raid_index(bctl->data.target); btrfs_err(fs_info, "balance: invalid convert data profile %s", get_raid_name(index)); ret = -EINVAL; goto out; } if (validate_convert_profile(&bctl->meta, allowed)) { int index = btrfs_bg_flags_to_raid_index(bctl->meta.target); btrfs_err(fs_info, "balance: invalid convert metadata profile %s", get_raid_name(index)); ret = -EINVAL; goto out; } if (validate_convert_profile(&bctl->sys, allowed)) { int index = btrfs_bg_flags_to_raid_index(bctl->sys.target); btrfs_err(fs_info, "balance: invalid convert system profile %s", get_raid_name(index)); ret = -EINVAL; goto out; } /* allow to reduce meta or sys integrity only if force set */ allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10 | BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6; do { seq = read_seqbegin(&fs_info->profiles_lock); if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && (fs_info->avail_system_alloc_bits & allowed) && !(bctl->sys.target & allowed)) || ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && (fs_info->avail_metadata_alloc_bits & allowed) && !(bctl->meta.target & allowed))) reducing_integrity = true; else reducing_integrity = false; /* if we're not converting, the target field is uninitialized */ meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? bctl->meta.target : fs_info->avail_metadata_alloc_bits; data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ? bctl->data.target : fs_info->avail_data_alloc_bits; } while (read_seqretry(&fs_info->profiles_lock, seq)); if (reducing_integrity) { if (bctl->flags & BTRFS_BALANCE_FORCE) { btrfs_info(fs_info, "balance: force reducing metadata integrity"); } else { btrfs_err(fs_info, "balance: reduces metadata integrity, use --force if you want this"); ret = -EINVAL; goto out; } } if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) < btrfs_get_num_tolerated_disk_barrier_failures(data_target)) { int meta_index = btrfs_bg_flags_to_raid_index(meta_target); int data_index = btrfs_bg_flags_to_raid_index(data_target); btrfs_warn(fs_info, "balance: metadata profile %s has lower redundancy than data profile %s", get_raid_name(meta_index), get_raid_name(data_index)); } ret = insert_balance_item(fs_info, bctl); if (ret && ret != -EEXIST) goto out; if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { BUG_ON(ret == -EEXIST); BUG_ON(fs_info->balance_ctl); spin_lock(&fs_info->balance_lock); fs_info->balance_ctl = bctl; spin_unlock(&fs_info->balance_lock); } else { BUG_ON(ret != -EEXIST); spin_lock(&fs_info->balance_lock); update_balance_args(bctl); spin_unlock(&fs_info->balance_lock); } ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); describe_balance_start_or_resume(fs_info); mutex_unlock(&fs_info->balance_mutex); ret = __btrfs_balance(fs_info); mutex_lock(&fs_info->balance_mutex); if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) btrfs_info(fs_info, "balance: paused"); else if (ret == -ECANCELED && atomic_read(&fs_info->balance_cancel_req)) btrfs_info(fs_info, "balance: canceled"); else btrfs_info(fs_info, "balance: ended with status: %d", ret); clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); if (bargs) { memset(bargs, 0, sizeof(*bargs)); btrfs_update_ioctl_balance_args(fs_info, bargs); } if ((ret && ret != -ECANCELED && ret != -ENOSPC) || balance_need_close(fs_info)) { reset_balance_state(fs_info); clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); } wake_up(&fs_info->balance_wait_q); return ret; out: if (bctl->flags & BTRFS_BALANCE_RESUME) reset_balance_state(fs_info); else kfree(bctl); clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); return ret; } static int balance_kthread(void *data) { struct btrfs_fs_info *fs_info = data; int ret = 0; mutex_lock(&fs_info->balance_mutex); if (fs_info->balance_ctl) ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL); mutex_unlock(&fs_info->balance_mutex); return ret; } int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) { struct task_struct *tsk; mutex_lock(&fs_info->balance_mutex); if (!fs_info->balance_ctl) { mutex_unlock(&fs_info->balance_mutex); return 0; } mutex_unlock(&fs_info->balance_mutex); if (btrfs_test_opt(fs_info, SKIP_BALANCE)) { btrfs_info(fs_info, "balance: resume skipped"); return 0; } /* * A ro->rw remount sequence should continue with the paused balance * regardless of who pauses it, system or the user as of now, so set * the resume flag. */ spin_lock(&fs_info->balance_lock); fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME; spin_unlock(&fs_info->balance_lock); tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); return PTR_ERR_OR_ZERO(tsk); } int btrfs_recover_balance(struct btrfs_fs_info *fs_info) { struct btrfs_balance_control *bctl; struct btrfs_balance_item *item; struct btrfs_disk_balance_args disk_bargs; struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_key key; int ret; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = BTRFS_BALANCE_OBJECTID; key.type = BTRFS_TEMPORARY_ITEM_KEY; key.offset = 0; ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); if (ret < 0) goto out; if (ret > 0) { /* ret = -ENOENT; */ ret = 0; goto out; } bctl = kzalloc(sizeof(*bctl), GFP_NOFS); if (!bctl) { ret = -ENOMEM; goto out; } leaf = path->nodes[0]; item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); bctl->flags = btrfs_balance_flags(leaf, item); bctl->flags |= BTRFS_BALANCE_RESUME; btrfs_balance_data(leaf, item, &disk_bargs); btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); btrfs_balance_meta(leaf, item, &disk_bargs); btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); btrfs_balance_sys(leaf, item, &disk_bargs); btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); /* * This should never happen, as the paused balance state is recovered * during mount without any chance of other exclusive ops to collide. * * This gives the exclusive op status to balance and keeps in paused * state until user intervention (cancel or umount). If the ownership * cannot be assigned, show a message but do not fail. The balance * is in a paused state and must have fs_info::balance_ctl properly * set up. */ if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) btrfs_warn(fs_info, "balance: cannot set exclusive op status, resume manually"); mutex_lock(&fs_info->balance_mutex); BUG_ON(fs_info->balance_ctl); spin_lock(&fs_info->balance_lock); fs_info->balance_ctl = bctl; spin_unlock(&fs_info->balance_lock); mutex_unlock(&fs_info->balance_mutex); out: btrfs_free_path(path); return ret; } int btrfs_pause_balance(struct btrfs_fs_info *fs_info) { int ret = 0; mutex_lock(&fs_info->balance_mutex); if (!fs_info->balance_ctl) { mutex_unlock(&fs_info->balance_mutex); return -ENOTCONN; } if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { atomic_inc(&fs_info->balance_pause_req); mutex_unlock(&fs_info->balance_mutex); wait_event(fs_info->balance_wait_q, !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); mutex_lock(&fs_info->balance_mutex); /* we are good with balance_ctl ripped off from under us */ BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); atomic_dec(&fs_info->balance_pause_req); } else { ret = -ENOTCONN; } mutex_unlock(&fs_info->balance_mutex); return ret; } int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) { mutex_lock(&fs_info->balance_mutex); if (!fs_info->balance_ctl) { mutex_unlock(&fs_info->balance_mutex); return -ENOTCONN; } /* * A paused balance with the item stored on disk can be resumed at * mount time if the mount is read-write. Otherwise it's still paused * and we must not allow cancelling as it deletes the item. */ if (sb_rdonly(fs_info->sb)) { mutex_unlock(&fs_info->balance_mutex); return -EROFS; } atomic_inc(&fs_info->balance_cancel_req); /* * if we are running just wait and return, balance item is * deleted in btrfs_balance in this case */ if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { mutex_unlock(&fs_info->balance_mutex); wait_event(fs_info->balance_wait_q, !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); mutex_lock(&fs_info->balance_mutex); } else { mutex_unlock(&fs_info->balance_mutex); /* * Lock released to allow other waiters to continue, we'll * reexamine the status again. */ mutex_lock(&fs_info->balance_mutex); if (fs_info->balance_ctl) { reset_balance_state(fs_info); clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); btrfs_info(fs_info, "balance: canceled"); } } BUG_ON(fs_info->balance_ctl || test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); atomic_dec(&fs_info->balance_cancel_req); mutex_unlock(&fs_info->balance_mutex); return 0; } static int btrfs_uuid_scan_kthread(void *data) { struct btrfs_fs_info *fs_info = data; struct btrfs_root *root = fs_info->tree_root; struct btrfs_key key; struct btrfs_path *path = NULL; int ret = 0; struct extent_buffer *eb; int slot; struct btrfs_root_item root_item; u32 item_size; struct btrfs_trans_handle *trans = NULL; path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; goto out; } key.objectid = 0; key.type = BTRFS_ROOT_ITEM_KEY; key.offset = 0; while (1) { ret = btrfs_search_forward(root, &key, path, BTRFS_OLDEST_GENERATION); if (ret) { if (ret > 0) ret = 0; break; } if (key.type != BTRFS_ROOT_ITEM_KEY || (key.objectid < BTRFS_FIRST_FREE_OBJECTID && key.objectid != BTRFS_FS_TREE_OBJECTID) || key.objectid > BTRFS_LAST_FREE_OBJECTID) goto skip; eb = path->nodes[0]; slot = path->slots[0]; item_size = btrfs_item_size_nr(eb, slot); if (item_size < sizeof(root_item)) goto skip; read_extent_buffer(eb, &root_item, btrfs_item_ptr_offset(eb, slot), (int)sizeof(root_item)); if (btrfs_root_refs(&root_item) == 0) goto skip; if (!btrfs_is_empty_uuid(root_item.uuid) || !btrfs_is_empty_uuid(root_item.received_uuid)) { if (trans) goto update_tree; btrfs_release_path(path); /* * 1 - subvol uuid item * 1 - received_subvol uuid item */ trans = btrfs_start_transaction(fs_info->uuid_root, 2); if (IS_ERR(trans)) { ret = PTR_ERR(trans); break; } continue; } else { goto skip; } update_tree: if (!btrfs_is_empty_uuid(root_item.uuid)) { ret = btrfs_uuid_tree_add(trans, root_item.uuid, BTRFS_UUID_KEY_SUBVOL, key.objectid); if (ret < 0) { btrfs_warn(fs_info, "uuid_tree_add failed %d", ret); break; } } if (!btrfs_is_empty_uuid(root_item.received_uuid)) { ret = btrfs_uuid_tree_add(trans, root_item.received_uuid, BTRFS_UUID_KEY_RECEIVED_SUBVOL, key.objectid); if (ret < 0) { btrfs_warn(fs_info, "uuid_tree_add failed %d", ret); break; } } skip: if (trans) { ret = btrfs_end_transaction(trans); trans = NULL; if (ret) break; } btrfs_release_path(path); if (key.offset < (u64)-1) { key.offset++; } else if (key.type < BTRFS_ROOT_ITEM_KEY) { key.offset = 0; key.type = BTRFS_ROOT_ITEM_KEY; } else if (key.objectid < (u64)-1) { key.offset = 0; key.type = BTRFS_ROOT_ITEM_KEY; key.objectid++; } else { break; } cond_resched(); } out: btrfs_free_path(path); if (trans && !IS_ERR(trans)) btrfs_end_transaction(trans); if (ret) btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret); else set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); up(&fs_info->uuid_tree_rescan_sem); return 0; } /* * Callback for btrfs_uuid_tree_iterate(). * returns: * 0 check succeeded, the entry is not outdated. * < 0 if an error occurred. * > 0 if the check failed, which means the caller shall remove the entry. */ static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info, u8 *uuid, u8 type, u64 subid) { struct btrfs_key key; int ret = 0; struct btrfs_root *subvol_root; if (type != BTRFS_UUID_KEY_SUBVOL && type != BTRFS_UUID_KEY_RECEIVED_SUBVOL) goto out; key.objectid = subid; key.type = BTRFS_ROOT_ITEM_KEY; key.offset = (u64)-1; subvol_root = btrfs_read_fs_root_no_name(fs_info, &key); if (IS_ERR(subvol_root)) { ret = PTR_ERR(subvol_root); if (ret == -ENOENT) ret = 1; goto out; } switch (type) { case BTRFS_UUID_KEY_SUBVOL: if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE)) ret = 1; break; case BTRFS_UUID_KEY_RECEIVED_SUBVOL: if (memcmp(uuid, subvol_root->root_item.received_uuid, BTRFS_UUID_SIZE)) ret = 1; break; } out: return ret; } static int btrfs_uuid_rescan_kthread(void *data) { struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data; int ret; /* * 1st step is to iterate through the existing UUID tree and * to delete all entries that contain outdated data. * 2nd step is to add all missing entries to the UUID tree. */ ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry); if (ret < 0) { btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret); up(&fs_info->uuid_tree_rescan_sem); return ret; } return btrfs_uuid_scan_kthread(data); } int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) { struct btrfs_trans_handle *trans; struct btrfs_root *tree_root = fs_info->tree_root; struct btrfs_root *uuid_root; struct task_struct *task; int ret; /* * 1 - root node * 1 - root item */ trans = btrfs_start_transaction(tree_root, 2); if (IS_ERR(trans)) return PTR_ERR(trans); uuid_root = btrfs_create_tree(trans, fs_info, BTRFS_UUID_TREE_OBJECTID); if (IS_ERR(uuid_root)) { ret = PTR_ERR(uuid_root); btrfs_abort_transaction(trans, ret); btrfs_end_transaction(trans); return ret; } fs_info->uuid_root = uuid_root; ret = btrfs_commit_transaction(trans); if (ret) return ret; down(&fs_info->uuid_tree_rescan_sem); task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid"); if (IS_ERR(task)) { /* fs_info->update_uuid_tree_gen remains 0 in all error case */ btrfs_warn(fs_info, "failed to start uuid_scan task"); up(&fs_info->uuid_tree_rescan_sem); return PTR_ERR(task); } return 0; } int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info) { struct task_struct *task; down(&fs_info->uuid_tree_rescan_sem); task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid"); if (IS_ERR(task)) { /* fs_info->update_uuid_tree_gen remains 0 in all error case */ btrfs_warn(fs_info, "failed to start uuid_rescan task"); up(&fs_info->uuid_tree_rescan_sem); return PTR_ERR(task); } return 0; } /* * shrinking a device means finding all of the device extents past * the new size, and then following the back refs to the chunks. * The chunk relocation code actually frees the device extent */ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) { struct btrfs_fs_info *fs_info = device->fs_info; struct btrfs_root *root = fs_info->dev_root; struct btrfs_trans_handle *trans; struct btrfs_dev_extent *dev_extent = NULL; struct btrfs_path *path; u64 length; u64 chunk_offset; int ret; int slot; int failed = 0; bool retried = false; bool checked_pending_chunks = false; struct extent_buffer *l; struct btrfs_key key; struct btrfs_super_block *super_copy = fs_info->super_copy; u64 old_total = btrfs_super_total_bytes(super_copy); u64 old_size = btrfs_device_get_total_bytes(device); u64 diff; new_size = round_down(new_size, fs_info->sectorsize); diff = round_down(old_size - new_size, fs_info->sectorsize); if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) return -EINVAL; path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->reada = READA_BACK; mutex_lock(&fs_info->chunk_mutex); btrfs_device_set_total_bytes(device, new_size); if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { device->fs_devices->total_rw_bytes -= diff; atomic64_sub(diff, &fs_info->free_chunk_space); } mutex_unlock(&fs_info->chunk_mutex); again: key.objectid = device->devid; key.offset = (u64)-1; key.type = BTRFS_DEV_EXTENT_KEY; do { mutex_lock(&fs_info->delete_unused_bgs_mutex); ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) { mutex_unlock(&fs_info->delete_unused_bgs_mutex); goto done; } ret = btrfs_previous_item(root, path, 0, key.type); if (ret) mutex_unlock(&fs_info->delete_unused_bgs_mutex); if (ret < 0) goto done; if (ret) { ret = 0; btrfs_release_path(path); break; } l = path->nodes[0]; slot = path->slots[0]; btrfs_item_key_to_cpu(l, &key, path->slots[0]); if (key.objectid != device->devid) { mutex_unlock(&fs_info->delete_unused_bgs_mutex); btrfs_release_path(path); break; } dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); length = btrfs_dev_extent_length(l, dev_extent); if (key.offset + length <= new_size) { mutex_unlock(&fs_info->delete_unused_bgs_mutex); btrfs_release_path(path); break; } chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); btrfs_release_path(path); /* * We may be relocating the only data chunk we have, * which could potentially end up with losing data's * raid profile, so lets allocate an empty one in * advance. */ ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset); if (ret < 0) { mutex_unlock(&fs_info->delete_unused_bgs_mutex); goto done; } ret = btrfs_relocate_chunk(fs_info, chunk_offset); mutex_unlock(&fs_info->delete_unused_bgs_mutex); if (ret == -ENOSPC) { failed++; } else if (ret) { if (ret == -ETXTBSY) { btrfs_warn(fs_info, "could not shrink block group %llu due to active swapfile", chunk_offset); } goto done; } } while (key.offset-- > 0); if (failed && !retried) { failed = 0; retried = true; goto again; } else if (failed && retried) { ret = -ENOSPC; goto done; } /* Shrinking succeeded, else we would be at "done". */ trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto done; } mutex_lock(&fs_info->chunk_mutex); /* * We checked in the above loop all device extents that were already in * the device tree. However before we have updated the device's * total_bytes to the new size, we might have had chunk allocations that * have not complete yet (new block groups attached to transaction * handles), and therefore their device extents were not yet in the * device tree and we missed them in the loop above. So if we have any * pending chunk using a device extent that overlaps the device range * that we can not use anymore, commit the current transaction and * repeat the search on the device tree - this way we guarantee we will * not have chunks using device extents that end beyond 'new_size'. */ if (!checked_pending_chunks) { u64 start = new_size; u64 len = old_size - new_size; if (contains_pending_extent(trans->transaction, device, &start, len)) { mutex_unlock(&fs_info->chunk_mutex); checked_pending_chunks = true; failed = 0; retried = false; ret = btrfs_commit_transaction(trans); if (ret) goto done; goto again; } } btrfs_device_set_disk_total_bytes(device, new_size); if (list_empty(&device->resized_list)) list_add_tail(&device->resized_list, &fs_info->fs_devices->resized_devices); WARN_ON(diff > old_total); btrfs_set_super_total_bytes(super_copy, round_down(old_total - diff, fs_info->sectorsize)); mutex_unlock(&fs_info->chunk_mutex); /* Now btrfs_update_device() will change the on-disk size. */ ret = btrfs_update_device(trans, device); if (ret < 0) { btrfs_abort_transaction(trans, ret); btrfs_end_transaction(trans); } else { ret = btrfs_commit_transaction(trans); } done: btrfs_free_path(path); if (ret) { mutex_lock(&fs_info->chunk_mutex); btrfs_device_set_total_bytes(device, old_size); if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) device->fs_devices->total_rw_bytes += diff; atomic64_add(diff, &fs_info->free_chunk_space); mutex_unlock(&fs_info->chunk_mutex); } return ret; } static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key, struct btrfs_chunk *chunk, int item_size) { struct btrfs_super_block *super_copy = fs_info->super_copy; struct btrfs_disk_key disk_key; u32 array_size; u8 *ptr; mutex_lock(&fs_info->chunk_mutex); array_size = btrfs_super_sys_array_size(super_copy); if (array_size + item_size + sizeof(disk_key) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) { mutex_unlock(&fs_info->chunk_mutex); return -EFBIG; } ptr = super_copy->sys_chunk_array + array_size; btrfs_cpu_key_to_disk(&disk_key, key); memcpy(ptr, &disk_key, sizeof(disk_key)); ptr += sizeof(disk_key); memcpy(ptr, chunk, item_size); item_size += sizeof(disk_key); btrfs_set_super_sys_array_size(super_copy, array_size + item_size); mutex_unlock(&fs_info->chunk_mutex); return 0; } /* * sort the devices in descending order by max_avail, total_avail */ static int btrfs_cmp_device_info(const void *a, const void *b) { const struct btrfs_device_info *di_a = a; const struct btrfs_device_info *di_b = b; if (di_a->max_avail > di_b->max_avail) return -1; if (di_a->max_avail < di_b->max_avail) return 1; if (di_a->total_avail > di_b->total_avail) return -1; if (di_a->total_avail < di_b->total_avail) return 1; return 0; } static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) { if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) return; btrfs_set_fs_incompat(info, RAID56); } #define BTRFS_MAX_DEVS(info) ((BTRFS_MAX_ITEM_SIZE(info) \ - sizeof(struct btrfs_chunk)) \ / sizeof(struct btrfs_stripe) + 1) #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \ - 2 * sizeof(struct btrfs_disk_key) \ - 2 * sizeof(struct btrfs_chunk)) \ / sizeof(struct btrfs_stripe) + 1) static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 start, u64 type) { struct btrfs_fs_info *info = trans->fs_info; struct btrfs_fs_devices *fs_devices = info->fs_devices; struct btrfs_device *device; struct map_lookup *map = NULL; struct extent_map_tree *em_tree; struct extent_map *em; struct btrfs_device_info *devices_info = NULL; u64 total_avail; int num_stripes; /* total number of stripes to allocate */ int data_stripes; /* number of stripes that count for block group size */ int sub_stripes; /* sub_stripes info for map */ int dev_stripes; /* stripes per dev */ int devs_max; /* max devs to use */ int devs_min; /* min devs needed */ int devs_increment; /* ndevs has to be a multiple of this */ int ncopies; /* how many copies to data has */ int nparity; /* number of stripes worth of bytes to store parity information */ int ret; u64 max_stripe_size; u64 max_chunk_size; u64 stripe_size; u64 chunk_size; int ndevs; int i; int j; int index; BUG_ON(!alloc_profile_is_valid(type, 0)); if (list_empty(&fs_devices->alloc_list)) { if (btrfs_test_opt(info, ENOSPC_DEBUG)) btrfs_debug(info, "%s: no writable device", __func__); return -ENOSPC; } index = btrfs_bg_flags_to_raid_index(type); sub_stripes = btrfs_raid_array[index].sub_stripes; dev_stripes = btrfs_raid_array[index].dev_stripes; devs_max = btrfs_raid_array[index].devs_max; devs_min = btrfs_raid_array[index].devs_min; devs_increment = btrfs_raid_array[index].devs_increment; ncopies = btrfs_raid_array[index].ncopies; nparity = btrfs_raid_array[index].nparity; if (type & BTRFS_BLOCK_GROUP_DATA) { max_stripe_size = SZ_1G; max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE; if (!devs_max) devs_max = BTRFS_MAX_DEVS(info); } else if (type & BTRFS_BLOCK_GROUP_METADATA) { /* for larger filesystems, use larger metadata chunks */ if (fs_devices->total_rw_bytes > 50ULL * SZ_1G) max_stripe_size = SZ_1G; else max_stripe_size = SZ_256M; max_chunk_size = max_stripe_size; if (!devs_max) devs_max = BTRFS_MAX_DEVS(info); } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { max_stripe_size = SZ_32M; max_chunk_size = 2 * max_stripe_size; if (!devs_max) devs_max = BTRFS_MAX_DEVS_SYS_CHUNK; } else { btrfs_err(info, "invalid chunk type 0x%llx requested", type); BUG_ON(1); } /* We don't want a chunk larger than 10% of writable space */ max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), max_chunk_size); devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), GFP_NOFS); if (!devices_info) return -ENOMEM; /* * in the first pass through the devices list, we gather information * about the available holes on each device. */ ndevs = 0; list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { u64 max_avail; u64 dev_offset; if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { WARN(1, KERN_ERR "BTRFS: read-only device in alloc_list\n"); continue; } if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) || test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) continue; if (device->total_bytes > device->bytes_used) total_avail = device->total_bytes - device->bytes_used; else total_avail = 0; /* If there is no space on this device, skip it. */ if (total_avail == 0) continue; ret = find_free_dev_extent(trans, device, max_stripe_size * dev_stripes, &dev_offset, &max_avail); if (ret && ret != -ENOSPC) goto error; if (ret == 0) max_avail = max_stripe_size * dev_stripes; if (max_avail < BTRFS_STRIPE_LEN * dev_stripes) { if (btrfs_test_opt(info, ENOSPC_DEBUG)) btrfs_debug(info, "%s: devid %llu has no free space, have=%llu want=%u", __func__, device->devid, max_avail, BTRFS_STRIPE_LEN * dev_stripes); continue; } if (ndevs == fs_devices->rw_devices) { WARN(1, "%s: found more than %llu devices\n", __func__, fs_devices->rw_devices); break; } devices_info[ndevs].dev_offset = dev_offset; devices_info[ndevs].max_avail = max_avail; devices_info[ndevs].total_avail = total_avail; devices_info[ndevs].dev = device; ++ndevs; } /* * now sort the devices by hole size / available space */ sort(devices_info, ndevs, sizeof(struct btrfs_device_info), btrfs_cmp_device_info, NULL); /* round down to number of usable stripes */ ndevs = round_down(ndevs, devs_increment); if (ndevs < devs_min) { ret = -ENOSPC; if (btrfs_test_opt(info, ENOSPC_DEBUG)) { btrfs_debug(info, "%s: not enough devices with free space: have=%d minimum required=%d", __func__, ndevs, devs_min); } goto error; } ndevs = min(ndevs, devs_max); /* * The primary goal is to maximize the number of stripes, so use as * many devices as possible, even if the stripes are not maximum sized. * * The DUP profile stores more than one stripe per device, the * max_avail is the total size so we have to adjust. */ stripe_size = div_u64(devices_info[ndevs - 1].max_avail, dev_stripes); num_stripes = ndevs * dev_stripes; /* * this will have to be fixed for RAID1 and RAID10 over * more drives */ data_stripes = (num_stripes - nparity) / ncopies; /* * Use the number of data stripes to figure out how big this chunk * is really going to be in terms of logical address space, * and compare that answer with the max chunk size. If it's higher, * we try to reduce stripe_size. */ if (stripe_size * data_stripes > max_chunk_size) { /* * Reduce stripe_size, round it up to a 16MB boundary again and * then use it, unless it ends up being even bigger than the * previous value we had already. */ stripe_size = min(round_up(div_u64(max_chunk_size, data_stripes), SZ_16M), stripe_size); } /* align to BTRFS_STRIPE_LEN */ stripe_size = round_down(stripe_size, BTRFS_STRIPE_LEN); map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); if (!map) { ret = -ENOMEM; goto error; } map->num_stripes = num_stripes; for (i = 0; i < ndevs; ++i) { for (j = 0; j < dev_stripes; ++j) { int s = i * dev_stripes + j; map->stripes[s].dev = devices_info[i].dev; map->stripes[s].physical = devices_info[i].dev_offset + j * stripe_size; } } map->stripe_len = BTRFS_STRIPE_LEN; map->io_align = BTRFS_STRIPE_LEN; map->io_width = BTRFS_STRIPE_LEN; map->type = type; map->sub_stripes = sub_stripes; chunk_size = stripe_size * data_stripes; trace_btrfs_chunk_alloc(info, map, start, chunk_size); em = alloc_extent_map(); if (!em) { kfree(map); ret = -ENOMEM; goto error; } set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); em->map_lookup = map; em->start = start; em->len = chunk_size; em->block_start = 0; em->block_len = em->len; em->orig_block_len = stripe_size; em_tree = &info->mapping_tree.map_tree; write_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em, 0); if (ret) { write_unlock(&em_tree->lock); free_extent_map(em); goto error; } list_add_tail(&em->list, &trans->transaction->pending_chunks); refcount_inc(&em->refs); write_unlock(&em_tree->lock); ret = btrfs_make_block_group(trans, 0, type, start, chunk_size); if (ret) goto error_del_extent; for (i = 0; i < map->num_stripes; i++) btrfs_device_set_bytes_used(map->stripes[i].dev, map->stripes[i].dev->bytes_used + stripe_size); atomic64_sub(stripe_size * map->num_stripes, &info->free_chunk_space); free_extent_map(em); check_raid56_incompat_flag(info, type); kfree(devices_info); return 0; error_del_extent: write_lock(&em_tree->lock); remove_extent_mapping(em_tree, em); write_unlock(&em_tree->lock); /* One for our allocation */ free_extent_map(em); /* One for the tree reference */ free_extent_map(em); /* One for the pending_chunks list reference */ free_extent_map(em); error: kfree(devices_info); return ret; } int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans, u64 chunk_offset, u64 chunk_size) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_root *extent_root = fs_info->extent_root; struct btrfs_root *chunk_root = fs_info->chunk_root; struct btrfs_key key; struct btrfs_device *device; struct btrfs_chunk *chunk; struct btrfs_stripe *stripe; struct extent_map *em; struct map_lookup *map; size_t item_size; u64 dev_offset; u64 stripe_size; int i = 0; int ret = 0; em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size); if (IS_ERR(em)) return PTR_ERR(em); map = em->map_lookup; item_size = btrfs_chunk_item_size(map->num_stripes); stripe_size = em->orig_block_len; chunk = kzalloc(item_size, GFP_NOFS); if (!chunk) { ret = -ENOMEM; goto out; } /* * Take the device list mutex to prevent races with the final phase of * a device replace operation that replaces the device object associated * with the map's stripes, because the device object's id can change * at any time during that final phase of the device replace operation * (dev-replace.c:btrfs_dev_replace_finishing()). */ mutex_lock(&fs_info->fs_devices->device_list_mutex); for (i = 0; i < map->num_stripes; i++) { device = map->stripes[i].dev; dev_offset = map->stripes[i].physical; ret = btrfs_update_device(trans, device); if (ret) break; ret = btrfs_alloc_dev_extent(trans, device, chunk_offset, dev_offset, stripe_size); if (ret) break; } if (ret) { mutex_unlock(&fs_info->fs_devices->device_list_mutex); goto out; } stripe = &chunk->stripe; for (i = 0; i < map->num_stripes; i++) { device = map->stripes[i].dev; dev_offset = map->stripes[i].physical; btrfs_set_stack_stripe_devid(stripe, device->devid); btrfs_set_stack_stripe_offset(stripe, dev_offset); memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); stripe++; } mutex_unlock(&fs_info->fs_devices->device_list_mutex); btrfs_set_stack_chunk_length(chunk, chunk_size); btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid); btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); btrfs_set_stack_chunk_type(chunk, map->type); btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize); btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; key.type = BTRFS_CHUNK_ITEM_KEY; key.offset = chunk_offset; ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) { /* * TODO: Cleanup of inserted chunk root in case of * failure. */ ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); } out: kfree(chunk); free_extent_map(em); return ret; } /* * Chunk allocation falls into two parts. The first part does work * that makes the new allocated chunk usable, but does not do any operation * that modifies the chunk tree. The second part does the work that * requires modifying the chunk tree. This division is important for the * bootstrap process of adding storage to a seed btrfs. */ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type) { u64 chunk_offset; lockdep_assert_held(&trans->fs_info->chunk_mutex); chunk_offset = find_next_chunk(trans->fs_info); return __btrfs_alloc_chunk(trans, chunk_offset, type); } static noinline int init_first_rw_device(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info) { u64 chunk_offset; u64 sys_chunk_offset; u64 alloc_profile; int ret; chunk_offset = find_next_chunk(fs_info); alloc_profile = btrfs_metadata_alloc_profile(fs_info); ret = __btrfs_alloc_chunk(trans, chunk_offset, alloc_profile); if (ret) return ret; sys_chunk_offset = find_next_chunk(fs_info); alloc_profile = btrfs_system_alloc_profile(fs_info); ret = __btrfs_alloc_chunk(trans, sys_chunk_offset, alloc_profile); return ret; } static inline int btrfs_chunk_max_errors(struct map_lookup *map) { int max_errors; if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10 | BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_DUP)) { max_errors = 1; } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) { max_errors = 2; } else { max_errors = 0; } return max_errors; } int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset) { struct extent_map *em; struct map_lookup *map; int readonly = 0; int miss_ndevs = 0; int i; em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); if (IS_ERR(em)) return 1; map = em->map_lookup; for (i = 0; i < map->num_stripes; i++) { if (test_bit(BTRFS_DEV_STATE_MISSING, &map->stripes[i].dev->dev_state)) { miss_ndevs++; continue; } if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &map->stripes[i].dev->dev_state)) { readonly = 1; goto end; } } /* * If the number of missing devices is larger than max errors, * we can not write the data into that chunk successfully, so * set it readonly. */ if (miss_ndevs > btrfs_chunk_max_errors(map)) readonly = 1; end: free_extent_map(em); return readonly; } void btrfs_mapping_init(struct btrfs_mapping_tree *tree) { extent_map_tree_init(&tree->map_tree); } void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree) { struct extent_map *em; while (1) { write_lock(&tree->map_tree.lock); em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1); if (em) remove_extent_mapping(&tree->map_tree, em); write_unlock(&tree->map_tree.lock); if (!em) break; /* once for us */ free_extent_map(em); /* once for the tree */ free_extent_map(em); } } int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) { struct extent_map *em; struct map_lookup *map; int ret; em = btrfs_get_chunk_map(fs_info, logical, len); if (IS_ERR(em)) /* * We could return errors for these cases, but that could get * ugly and we'd probably do the same thing which is just not do * anything else and exit, so return 1 so the callers don't try * to use other copies. */ return 1; map = em->map_lookup; if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1)) ret = map->num_stripes; else if (map->type & BTRFS_BLOCK_GROUP_RAID10) ret = map->sub_stripes; else if (map->type & BTRFS_BLOCK_GROUP_RAID5) ret = 2; else if (map->type & BTRFS_BLOCK_GROUP_RAID6) /* * There could be two corrupted data stripes, we need * to loop retry in order to rebuild the correct data. * * Fail a stripe at a time on every retry except the * stripe under reconstruction. */ ret = map->num_stripes; else ret = 1; free_extent_map(em); down_read(&fs_info->dev_replace.rwsem); if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) && fs_info->dev_replace.tgtdev) ret++; up_read(&fs_info->dev_replace.rwsem); return ret; } unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, u64 logical) { struct extent_map *em; struct map_lookup *map; unsigned long len = fs_info->sectorsize; em = btrfs_get_chunk_map(fs_info, logical, len); if (!WARN_ON(IS_ERR(em))) { map = em->map_lookup; if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) len = map->stripe_len * nr_data_stripes(map); free_extent_map(em); } return len; } int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len) { struct extent_map *em; struct map_lookup *map; int ret = 0; em = btrfs_get_chunk_map(fs_info, logical, len); if(!WARN_ON(IS_ERR(em))) { map = em->map_lookup; if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) ret = 1; free_extent_map(em); } return ret; } static int find_live_mirror(struct btrfs_fs_info *fs_info, struct map_lookup *map, int first, int dev_replace_is_ongoing) { int i; int num_stripes; int preferred_mirror; int tolerance; struct btrfs_device *srcdev; ASSERT((map->type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))); if (map->type & BTRFS_BLOCK_GROUP_RAID10) num_stripes = map->sub_stripes; else num_stripes = map->num_stripes; preferred_mirror = first + current->pid % num_stripes; if (dev_replace_is_ongoing && fs_info->dev_replace.cont_reading_from_srcdev_mode == BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) srcdev = fs_info->dev_replace.srcdev; else srcdev = NULL; /* * try to avoid the drive that is the source drive for a * dev-replace procedure, only choose it if no other non-missing * mirror is available */ for (tolerance = 0; tolerance < 2; tolerance++) { if (map->stripes[preferred_mirror].dev->bdev && (tolerance || map->stripes[preferred_mirror].dev != srcdev)) return preferred_mirror; for (i = first; i < first + num_stripes; i++) { if (map->stripes[i].dev->bdev && (tolerance || map->stripes[i].dev != srcdev)) return i; } } /* we couldn't find one that doesn't fail. Just return something * and the io error handling code will clean up eventually */ return preferred_mirror; } static inline int parity_smaller(u64 a, u64 b) { return a > b; } /* Bubble-sort the stripe set to put the parity/syndrome stripes last */ static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes) { struct btrfs_bio_stripe s; int i; u64 l; int again = 1; while (again) { again = 0; for (i = 0; i < num_stripes - 1; i++) { if (parity_smaller(bbio->raid_map[i], bbio->raid_map[i+1])) { s = bbio->stripes[i]; l = bbio->raid_map[i]; bbio->stripes[i] = bbio->stripes[i+1]; bbio->raid_map[i] = bbio->raid_map[i+1]; bbio->stripes[i+1] = s; bbio->raid_map[i+1] = l; again = 1; } } } } static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes) { struct btrfs_bio *bbio = kzalloc( /* the size of the btrfs_bio */ sizeof(struct btrfs_bio) + /* plus the variable array for the stripes */ sizeof(struct btrfs_bio_stripe) * (total_stripes) + /* plus the variable array for the tgt dev */ sizeof(int) * (real_stripes) + /* * plus the raid_map, which includes both the tgt dev * and the stripes */ sizeof(u64) * (total_stripes), GFP_NOFS|__GFP_NOFAIL); atomic_set(&bbio->error, 0); refcount_set(&bbio->refs, 1); return bbio; } void btrfs_get_bbio(struct btrfs_bio *bbio) { WARN_ON(!refcount_read(&bbio->refs)); refcount_inc(&bbio->refs); } void btrfs_put_bbio(struct btrfs_bio *bbio) { if (!bbio) return; if (refcount_dec_and_test(&bbio->refs)) kfree(bbio); } /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */ /* * Please note that, discard won't be sent to target device of device * replace. */ static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info, u64 logical, u64 length, struct btrfs_bio **bbio_ret) { struct extent_map *em; struct map_lookup *map; struct btrfs_bio *bbio; u64 offset; u64 stripe_nr; u64 stripe_nr_end; u64 stripe_end_offset; u64 stripe_cnt; u64 stripe_len; u64 stripe_offset; u64 num_stripes; u32 stripe_index; u32 factor = 0; u32 sub_stripes = 0; u64 stripes_per_dev = 0; u32 remaining_stripes = 0; u32 last_stripe = 0; int ret = 0; int i; /* discard always return a bbio */ ASSERT(bbio_ret); em = btrfs_get_chunk_map(fs_info, logical, length); if (IS_ERR(em)) return PTR_ERR(em); map = em->map_lookup; /* we don't discard raid56 yet */ if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { ret = -EOPNOTSUPP; goto out; } offset = logical - em->start; length = min_t(u64, em->len - offset, length); stripe_len = map->stripe_len; /* * stripe_nr counts the total number of stripes we have to stride * to get to this block */ stripe_nr = div64_u64(offset, stripe_len); /* stripe_offset is the offset of this block in its stripe */ stripe_offset = offset - stripe_nr * stripe_len; stripe_nr_end = round_up(offset + length, map->stripe_len); stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len); stripe_cnt = stripe_nr_end - stripe_nr; stripe_end_offset = stripe_nr_end * map->stripe_len - (offset + length); /* * after this, stripe_nr is the number of stripes on this * device we have to walk to find the data, and stripe_index is * the number of our device in the stripe array */ num_stripes = 1; stripe_index = 0; if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) { if (map->type & BTRFS_BLOCK_GROUP_RAID0) sub_stripes = 1; else sub_stripes = map->sub_stripes; factor = map->num_stripes / sub_stripes; num_stripes = min_t(u64, map->num_stripes, sub_stripes * stripe_cnt); stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); stripe_index *= sub_stripes; stripes_per_dev = div_u64_rem(stripe_cnt, factor, &remaining_stripes); div_u64_rem(stripe_nr_end - 1, factor, &last_stripe); last_stripe *= sub_stripes; } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP)) { num_stripes = map->num_stripes; } else { stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &stripe_index); } bbio = alloc_btrfs_bio(num_stripes, 0); if (!bbio) { ret = -ENOMEM; goto out; } for (i = 0; i < num_stripes; i++) { bbio->stripes[i].physical = map->stripes[stripe_index].physical + stripe_offset + stripe_nr * map->stripe_len; bbio->stripes[i].dev = map->stripes[stripe_index].dev; if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) { bbio->stripes[i].length = stripes_per_dev * map->stripe_len; if (i / sub_stripes < remaining_stripes) bbio->stripes[i].length += map->stripe_len; /* * Special for the first stripe and * the last stripe: * * |-------|...|-------| * |----------| * off end_off */ if (i < sub_stripes) bbio->stripes[i].length -= stripe_offset; if (stripe_index >= last_stripe && stripe_index <= (last_stripe + sub_stripes - 1)) bbio->stripes[i].length -= stripe_end_offset; if (i == sub_stripes - 1) stripe_offset = 0; } else { bbio->stripes[i].length = length; } stripe_index++; if (stripe_index == map->num_stripes) { stripe_index = 0; stripe_nr++; } } *bbio_ret = bbio; bbio->map_type = map->type; bbio->num_stripes = num_stripes; out: free_extent_map(em); return ret; } /* * In dev-replace case, for repair case (that's the only case where the mirror * is selected explicitly when calling btrfs_map_block), blocks left of the * left cursor can also be read from the target drive. * * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the * array of stripes. * For READ, it also needs to be supported using the same mirror number. * * If the requested block is not left of the left cursor, EIO is returned. This * can happen because btrfs_num_copies() returns one more in the dev-replace * case. */ static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info, u64 logical, u64 length, u64 srcdev_devid, int *mirror_num, u64 *physical) { struct btrfs_bio *bbio = NULL; int num_stripes; int index_srcdev = 0; int found = 0; u64 physical_of_found = 0; int i; int ret = 0; ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical, &length, &bbio, 0, 0); if (ret) { ASSERT(bbio == NULL); return ret; } num_stripes = bbio->num_stripes; if (*mirror_num > num_stripes) { /* * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror, * that means that the requested area is not left of the left * cursor */ btrfs_put_bbio(bbio); return -EIO; } /* * process the rest of the function using the mirror_num of the source * drive. Therefore look it up first. At the end, patch the device * pointer to the one of the target drive. */ for (i = 0; i < num_stripes; i++) { if (bbio->stripes[i].dev->devid != srcdev_devid) continue; /* * In case of DUP, in order to keep it simple, only add the * mirror with the lowest physical address */ if (found && physical_of_found <= bbio->stripes[i].physical) continue; index_srcdev = i; found = 1; physical_of_found = bbio->stripes[i].physical; } btrfs_put_bbio(bbio); ASSERT(found); if (!found) return -EIO; *mirror_num = index_srcdev + 1; *physical = physical_of_found; return ret; } static void handle_ops_on_dev_replace(enum btrfs_map_op op, struct btrfs_bio **bbio_ret, struct btrfs_dev_replace *dev_replace, int *num_stripes_ret, int *max_errors_ret) { struct btrfs_bio *bbio = *bbio_ret; u64 srcdev_devid = dev_replace->srcdev->devid; int tgtdev_indexes = 0; int num_stripes = *num_stripes_ret; int max_errors = *max_errors_ret; int i; if (op == BTRFS_MAP_WRITE) { int index_where_to_add; /* * duplicate the write operations while the dev replace * procedure is running. Since the copying of the old disk to * the new disk takes place at run time while the filesystem is * mounted writable, the regular write operations to the old * disk have to be duplicated to go to the new disk as well. * * Note that device->missing is handled by the caller, and that * the write to the old disk is already set up in the stripes * array. */ index_where_to_add = num_stripes; for (i = 0; i < num_stripes; i++) { if (bbio->stripes[i].dev->devid == srcdev_devid) { /* write to new disk, too */ struct btrfs_bio_stripe *new = bbio->stripes + index_where_to_add; struct btrfs_bio_stripe *old = bbio->stripes + i; new->physical = old->physical; new->length = old->length; new->dev = dev_replace->tgtdev; bbio->tgtdev_map[i] = index_where_to_add; index_where_to_add++; max_errors++; tgtdev_indexes++; } } num_stripes = index_where_to_add; } else if (op == BTRFS_MAP_GET_READ_MIRRORS) { int index_srcdev = 0; int found = 0; u64 physical_of_found = 0; /* * During the dev-replace procedure, the target drive can also * be used to read data in case it is needed to repair a corrupt * block elsewhere. This is possible if the requested area is * left of the left cursor. In this area, the target drive is a * full copy of the source drive. */ for (i = 0; i < num_stripes; i++) { if (bbio->stripes[i].dev->devid == srcdev_devid) { /* * In case of DUP, in order to keep it simple, * only add the mirror with the lowest physical * address */ if (found && physical_of_found <= bbio->stripes[i].physical) continue; index_srcdev = i; found = 1; physical_of_found = bbio->stripes[i].physical; } } if (found) { struct btrfs_bio_stripe *tgtdev_stripe = bbio->stripes + num_stripes; tgtdev_stripe->physical = physical_of_found; tgtdev_stripe->length = bbio->stripes[index_srcdev].length; tgtdev_stripe->dev = dev_replace->tgtdev; bbio->tgtdev_map[index_srcdev] = num_stripes; tgtdev_indexes++; num_stripes++; } } *num_stripes_ret = num_stripes; *max_errors_ret = max_errors; bbio->num_tgtdevs = tgtdev_indexes; *bbio_ret = bbio; } static bool need_full_stripe(enum btrfs_map_op op) { return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS); } static int __btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, u64 logical, u64 *length, struct btrfs_bio **bbio_ret, int mirror_num, int need_raid_map) { struct extent_map *em; struct map_lookup *map; u64 offset; u64 stripe_offset; u64 stripe_nr; u64 stripe_len; u32 stripe_index; int i; int ret = 0; int num_stripes; int max_errors = 0; int tgtdev_indexes = 0; struct btrfs_bio *bbio = NULL; struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; int dev_replace_is_ongoing = 0; int num_alloc_stripes; int patch_the_first_stripe_for_dev_replace = 0; u64 physical_to_patch_in_first_stripe = 0; u64 raid56_full_stripe_start = (u64)-1; if (op == BTRFS_MAP_DISCARD) return __btrfs_map_block_for_discard(fs_info, logical, *length, bbio_ret); em = btrfs_get_chunk_map(fs_info, logical, *length); if (IS_ERR(em)) return PTR_ERR(em); map = em->map_lookup; offset = logical - em->start; stripe_len = map->stripe_len; stripe_nr = offset; /* * stripe_nr counts the total number of stripes we have to stride * to get to this block */ stripe_nr = div64_u64(stripe_nr, stripe_len); stripe_offset = stripe_nr * stripe_len; if (offset < stripe_offset) { btrfs_crit(fs_info, "stripe math has gone wrong, stripe_offset=%llu, offset=%llu, start=%llu, logical=%llu, stripe_len=%llu", stripe_offset, offset, em->start, logical, stripe_len); free_extent_map(em); return -EINVAL; } /* stripe_offset is the offset of this block in its stripe*/ stripe_offset = offset - stripe_offset; /* if we're here for raid56, we need to know the stripe aligned start */ if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { unsigned long full_stripe_len = stripe_len * nr_data_stripes(map); raid56_full_stripe_start = offset; /* allow a write of a full stripe, but make sure we don't * allow straddling of stripes */ raid56_full_stripe_start = div64_u64(raid56_full_stripe_start, full_stripe_len); raid56_full_stripe_start *= full_stripe_len; } if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { u64 max_len; /* For writes to RAID[56], allow a full stripeset across all disks. For other RAID types and for RAID[56] reads, just allow a single stripe (on a single disk). */ if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && (op == BTRFS_MAP_WRITE)) { max_len = stripe_len * nr_data_stripes(map) - (offset - raid56_full_stripe_start); } else { /* we limit the length of each bio to what fits in a stripe */ max_len = stripe_len - stripe_offset; } *length = min_t(u64, em->len - offset, max_len); } else { *length = em->len - offset; } /* * This is for when we're called from btrfs_bio_fits_in_stripe and all * it cares about is the length */ if (!bbio_ret) goto out; down_read(&dev_replace->rwsem); dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); /* * Hold the semaphore for read during the whole operation, write is * requested at commit time but must wait. */ if (!dev_replace_is_ongoing) up_read(&dev_replace->rwsem); if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 && !need_full_stripe(op) && dev_replace->tgtdev != NULL) { ret = get_extra_mirror_from_replace(fs_info, logical, *length, dev_replace->srcdev->devid, &mirror_num, &physical_to_patch_in_first_stripe); if (ret) goto out; else patch_the_first_stripe_for_dev_replace = 1; } else if (mirror_num > map->num_stripes) { mirror_num = 0; } num_stripes = 1; stripe_index = 0; if (map->type & BTRFS_BLOCK_GROUP_RAID0) { stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &stripe_index); if (!need_full_stripe(op)) mirror_num = 1; } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) { if (need_full_stripe(op)) num_stripes = map->num_stripes; else if (mirror_num) stripe_index = mirror_num - 1; else { stripe_index = find_live_mirror(fs_info, map, 0, dev_replace_is_ongoing); mirror_num = stripe_index + 1; } } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { if (need_full_stripe(op)) { num_stripes = map->num_stripes; } else if (mirror_num) { stripe_index = mirror_num - 1; } else { mirror_num = 1; } } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { u32 factor = map->num_stripes / map->sub_stripes; stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); stripe_index *= map->sub_stripes; if (need_full_stripe(op)) num_stripes = map->sub_stripes; else if (mirror_num) stripe_index += mirror_num - 1; else { int old_stripe_index = stripe_index; stripe_index = find_live_mirror(fs_info, map, stripe_index, dev_replace_is_ongoing); mirror_num = stripe_index - old_stripe_index + 1; } } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) { /* push stripe_nr back to the start of the full stripe */ stripe_nr = div64_u64(raid56_full_stripe_start, stripe_len * nr_data_stripes(map)); /* RAID[56] write or recovery. Return all stripes */ num_stripes = map->num_stripes; max_errors = nr_parity_stripes(map); *length = map->stripe_len; stripe_index = 0; stripe_offset = 0; } else { /* * Mirror #0 or #1 means the original data block. * Mirror #2 is RAID5 parity block. * Mirror #3 is RAID6 Q block. */ stripe_nr = div_u64_rem(stripe_nr, nr_data_stripes(map), &stripe_index); if (mirror_num > 1) stripe_index = nr_data_stripes(map) + mirror_num - 2; /* We distribute the parity blocks across stripes */ div_u64_rem(stripe_nr + stripe_index, map->num_stripes, &stripe_index); if (!need_full_stripe(op) && mirror_num <= 1) mirror_num = 1; } } else { /* * after this, stripe_nr is the number of stripes on this * device we have to walk to find the data, and stripe_index is * the number of our device in the stripe array */ stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &stripe_index); mirror_num = stripe_index + 1; } if (stripe_index >= map->num_stripes) { btrfs_crit(fs_info, "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u", stripe_index, map->num_stripes); ret = -EINVAL; goto out; } num_alloc_stripes = num_stripes; if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) { if (op == BTRFS_MAP_WRITE) num_alloc_stripes <<= 1; if (op == BTRFS_MAP_GET_READ_MIRRORS) num_alloc_stripes++; tgtdev_indexes = num_stripes; } bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes); if (!bbio) { ret = -ENOMEM; goto out; } if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes); /* build raid_map */ if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map && (need_full_stripe(op) || mirror_num > 1)) { u64 tmp; unsigned rot; bbio->raid_map = (u64 *)((void *)bbio->stripes + sizeof(struct btrfs_bio_stripe) * num_alloc_stripes + sizeof(int) * tgtdev_indexes); /* Work out the disk rotation on this stripe-set */ div_u64_rem(stripe_nr, num_stripes, &rot); /* Fill in the logical address of each stripe */ tmp = stripe_nr * nr_data_stripes(map); for (i = 0; i < nr_data_stripes(map); i++) bbio->raid_map[(i+rot) % num_stripes] = em->start + (tmp + i) * map->stripe_len; bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE; if (map->type & BTRFS_BLOCK_GROUP_RAID6) bbio->raid_map[(i+rot+1) % num_stripes] = RAID6_Q_STRIPE; } for (i = 0; i < num_stripes; i++) { bbio->stripes[i].physical = map->stripes[stripe_index].physical + stripe_offset + stripe_nr * map->stripe_len; bbio->stripes[i].dev = map->stripes[stripe_index].dev; stripe_index++; } if (need_full_stripe(op)) max_errors = btrfs_chunk_max_errors(map); if (bbio->raid_map) sort_parity_stripes(bbio, num_stripes); if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && need_full_stripe(op)) { handle_ops_on_dev_replace(op, &bbio, dev_replace, &num_stripes, &max_errors); } *bbio_ret = bbio; bbio->map_type = map->type; bbio->num_stripes = num_stripes; bbio->max_errors = max_errors; bbio->mirror_num = mirror_num; /* * this is the case that REQ_READ && dev_replace_is_ongoing && * mirror_num == num_stripes + 1 && dev_replace target drive is * available as a mirror */ if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) { WARN_ON(num_stripes > 1); bbio->stripes[0].dev = dev_replace->tgtdev; bbio->stripes[0].physical = physical_to_patch_in_first_stripe; bbio->mirror_num = map->num_stripes + 1; } out: if (dev_replace_is_ongoing) { lockdep_assert_held(&dev_replace->rwsem); /* Unlock and let waiting writers proceed */ up_read(&dev_replace->rwsem); } free_extent_map(em); return ret; } int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, u64 logical, u64 *length, struct btrfs_bio **bbio_ret, int mirror_num) { return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, mirror_num, 0); } /* For Scrub/replace */ int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, u64 logical, u64 *length, struct btrfs_bio **bbio_ret) { return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1); } int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, u64 physical, u64 **logical, int *naddrs, int *stripe_len) { struct extent_map *em; struct map_lookup *map; u64 *buf; u64 bytenr; u64 length; u64 stripe_nr; u64 rmap_len; int i, j, nr = 0; em = btrfs_get_chunk_map(fs_info, chunk_start, 1); if (IS_ERR(em)) return -EIO; map = em->map_lookup; length = em->len; rmap_len = map->stripe_len; if (map->type & BTRFS_BLOCK_GROUP_RAID10) length = div_u64(length, map->num_stripes / map->sub_stripes); else if (map->type & BTRFS_BLOCK_GROUP_RAID0) length = div_u64(length, map->num_stripes); else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { length = div_u64(length, nr_data_stripes(map)); rmap_len = map->stripe_len * nr_data_stripes(map); } buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); BUG_ON(!buf); /* -ENOMEM */ for (i = 0; i < map->num_stripes; i++) { if (map->stripes[i].physical > physical || map->stripes[i].physical + length <= physical) continue; stripe_nr = physical - map->stripes[i].physical; stripe_nr = div64_u64(stripe_nr, map->stripe_len); if (map->type & BTRFS_BLOCK_GROUP_RAID10) { stripe_nr = stripe_nr * map->num_stripes + i; stripe_nr = div_u64(stripe_nr, map->sub_stripes); } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) { stripe_nr = stripe_nr * map->num_stripes + i; } /* else if RAID[56], multiply by nr_data_stripes(). * Alternatively, just use rmap_len below instead of * map->stripe_len */ bytenr = chunk_start + stripe_nr * rmap_len; WARN_ON(nr >= map->num_stripes); for (j = 0; j < nr; j++) { if (buf[j] == bytenr) break; } if (j == nr) { WARN_ON(nr >= map->num_stripes); buf[nr++] = bytenr; } } *logical = buf; *naddrs = nr; *stripe_len = rmap_len; free_extent_map(em); return 0; } static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio) { bio->bi_private = bbio->private; bio->bi_end_io = bbio->end_io; bio_endio(bio); btrfs_put_bbio(bbio); } static void btrfs_end_bio(struct bio *bio) { struct btrfs_bio *bbio = bio->bi_private; int is_orig_bio = 0; if (bio->bi_status) { atomic_inc(&bbio->error); if (bio->bi_status == BLK_STS_IOERR || bio->bi_status == BLK_STS_TARGET) { unsigned int stripe_index = btrfs_io_bio(bio)->stripe_index; struct btrfs_device *dev; BUG_ON(stripe_index >= bbio->num_stripes); dev = bbio->stripes[stripe_index].dev; if (dev->bdev) { if (bio_op(bio) == REQ_OP_WRITE) btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); else btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); if (bio->bi_opf & REQ_PREFLUSH) btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_FLUSH_ERRS); } } } if (bio == bbio->orig_bio) is_orig_bio = 1; btrfs_bio_counter_dec(bbio->fs_info); if (atomic_dec_and_test(&bbio->stripes_pending)) { if (!is_orig_bio) { bio_put(bio); bio = bbio->orig_bio; } btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; /* only send an error to the higher layers if it is * beyond the tolerance of the btrfs bio */ if (atomic_read(&bbio->error) > bbio->max_errors) { bio->bi_status = BLK_STS_IOERR; } else { /* * this bio is actually up to date, we didn't * go over the max number of errors */ bio->bi_status = BLK_STS_OK; } btrfs_end_bbio(bbio, bio); } else if (!is_orig_bio) { bio_put(bio); } } /* * see run_scheduled_bios for a description of why bios are collected for * async submit. * * This will add one bio to the pending list for a device and make sure * the work struct is scheduled. */ static noinline void btrfs_schedule_bio(struct btrfs_device *device, struct bio *bio) { struct btrfs_fs_info *fs_info = device->fs_info; int should_queue = 1; struct btrfs_pending_bios *pending_bios; /* don't bother with additional async steps for reads, right now */ if (bio_op(bio) == REQ_OP_READ) { btrfsic_submit_bio(bio); return; } WARN_ON(bio->bi_next); bio->bi_next = NULL; spin_lock(&device->io_lock); if (op_is_sync(bio->bi_opf)) pending_bios = &device->pending_sync_bios; else pending_bios = &device->pending_bios; if (pending_bios->tail) pending_bios->tail->bi_next = bio; pending_bios->tail = bio; if (!pending_bios->head) pending_bios->head = bio; if (device->running_pending) should_queue = 0; spin_unlock(&device->io_lock); if (should_queue) btrfs_queue_work(fs_info->submit_workers, &device->work); } static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio, u64 physical, int dev_nr, int async) { struct btrfs_device *dev = bbio->stripes[dev_nr].dev; struct btrfs_fs_info *fs_info = bbio->fs_info; bio->bi_private = bbio; btrfs_io_bio(bio)->stripe_index = dev_nr; bio->bi_end_io = btrfs_end_bio; bio->bi_iter.bi_sector = physical >> 9; btrfs_debug_in_rcu(fs_info, "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u", bio_op(bio), bio->bi_opf, (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev, rcu_str_deref(dev->name), dev->devid, bio->bi_iter.bi_size); bio_set_dev(bio, dev->bdev); btrfs_bio_counter_inc_noblocked(fs_info); if (async) btrfs_schedule_bio(dev, bio); else btrfsic_submit_bio(bio); } static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical) { atomic_inc(&bbio->error); if (atomic_dec_and_test(&bbio->stripes_pending)) { /* Should be the original bio. */ WARN_ON(bio != bbio->orig_bio); btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; bio->bi_iter.bi_sector = logical >> 9; if (atomic_read(&bbio->error) > bbio->max_errors) bio->bi_status = BLK_STS_IOERR; else bio->bi_status = BLK_STS_OK; btrfs_end_bbio(bbio, bio); } } blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, int mirror_num, int async_submit) { struct btrfs_device *dev; struct bio *first_bio = bio; u64 logical = (u64)bio->bi_iter.bi_sector << 9; u64 length = 0; u64 map_length; int ret; int dev_nr; int total_devs; struct btrfs_bio *bbio = NULL; length = bio->bi_iter.bi_size; map_length = length; btrfs_bio_counter_inc_blocked(fs_info); ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length, &bbio, mirror_num, 1); if (ret) { btrfs_bio_counter_dec(fs_info); return errno_to_blk_status(ret); } total_devs = bbio->num_stripes; bbio->orig_bio = first_bio; bbio->private = first_bio->bi_private; bbio->end_io = first_bio->bi_end_io; bbio->fs_info = fs_info; atomic_set(&bbio->stripes_pending, bbio->num_stripes); if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) && ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) { /* In this case, map_length has been set to the length of a single stripe; not the whole write */ if (bio_op(bio) == REQ_OP_WRITE) { ret = raid56_parity_write(fs_info, bio, bbio, map_length); } else { ret = raid56_parity_recover(fs_info, bio, bbio, map_length, mirror_num, 1); } btrfs_bio_counter_dec(fs_info); return errno_to_blk_status(ret); } if (map_length < length) { btrfs_crit(fs_info, "mapping failed logical %llu bio len %llu len %llu", logical, length, map_length); BUG(); } for (dev_nr = 0; dev_nr < total_devs; dev_nr++) { dev = bbio->stripes[dev_nr].dev; if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || (bio_op(first_bio) == REQ_OP_WRITE && !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) { bbio_error(bbio, first_bio, logical); continue; } if (dev_nr < total_devs - 1) bio = btrfs_bio_clone(first_bio); else bio = first_bio; submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical, dev_nr, async_submit); } btrfs_bio_counter_dec(fs_info); return BLK_STS_OK; } struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices, u64 devid, u8 *uuid, u8 *fsid) { struct btrfs_device *device; while (fs_devices) { if (!fsid || !memcmp(fs_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE)) { device = find_device(fs_devices, devid, uuid); if (device) return device; } fs_devices = fs_devices->seed; } return NULL; } static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, u64 devid, u8 *dev_uuid) { struct btrfs_device *device; device = btrfs_alloc_device(NULL, &devid, dev_uuid); if (IS_ERR(device)) return device; list_add(&device->dev_list, &fs_devices->devices); device->fs_devices = fs_devices; fs_devices->num_devices++; set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); fs_devices->missing_devices++; return device; } /** * btrfs_alloc_device - allocate struct btrfs_device * @fs_info: used only for generating a new devid, can be NULL if * devid is provided (i.e. @devid != NULL). * @devid: a pointer to devid for this device. If NULL a new devid * is generated. * @uuid: a pointer to UUID for this device. If NULL a new UUID * is generated. * * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() * on error. Returned struct is not linked onto any lists and must be * destroyed with btrfs_free_device. */ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, const u64 *devid, const u8 *uuid) { struct btrfs_device *dev; u64 tmp; if (WARN_ON(!devid && !fs_info)) return ERR_PTR(-EINVAL); dev = __alloc_device(); if (IS_ERR(dev)) return dev; if (devid) tmp = *devid; else { int ret; ret = find_next_devid(fs_info, &tmp); if (ret) { btrfs_free_device(dev); return ERR_PTR(ret); } } dev->devid = tmp; if (uuid) memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); else generate_random_uuid(dev->uuid); btrfs_init_work(&dev->work, btrfs_submit_helper, pending_bios_fn, NULL, NULL); return dev; } /* Return -EIO if any error, otherwise return 0. */ static int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info, struct extent_buffer *leaf, struct btrfs_chunk *chunk, u64 logical) { u64 length; u64 stripe_len; u16 num_stripes; u16 sub_stripes; u64 type; u64 features; bool mixed = false; length = btrfs_chunk_length(leaf, chunk); stripe_len = btrfs_chunk_stripe_len(leaf, chunk); num_stripes = btrfs_chunk_num_stripes(leaf, chunk); sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); type = btrfs_chunk_type(leaf, chunk); if (!num_stripes) { btrfs_err(fs_info, "invalid chunk num_stripes: %u", num_stripes); return -EIO; } if (!IS_ALIGNED(logical, fs_info->sectorsize)) { btrfs_err(fs_info, "invalid chunk logical %llu", logical); return -EIO; } if (btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize) { btrfs_err(fs_info, "invalid chunk sectorsize %u", btrfs_chunk_sector_size(leaf, chunk)); return -EIO; } if (!length || !IS_ALIGNED(length, fs_info->sectorsize)) { btrfs_err(fs_info, "invalid chunk length %llu", length); return -EIO; } if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) { btrfs_err(fs_info, "invalid chunk stripe length: %llu", stripe_len); return -EIO; } if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) & type) { btrfs_err(fs_info, "unrecognized chunk type: %llu", ~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) & btrfs_chunk_type(leaf, chunk)); return -EIO; } if ((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0) { btrfs_err(fs_info, "missing chunk type flag: 0x%llx", type); return -EIO; } if ((type & BTRFS_BLOCK_GROUP_SYSTEM) && (type & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA))) { btrfs_err(fs_info, "system chunk with data or metadata type: 0x%llx", type); return -EIO; } features = btrfs_super_incompat_flags(fs_info->super_copy); if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) mixed = true; if (!mixed) { if ((type & BTRFS_BLOCK_GROUP_METADATA) && (type & BTRFS_BLOCK_GROUP_DATA)) { btrfs_err(fs_info, "mixed chunk type in non-mixed mode: 0x%llx", type); return -EIO; } } if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) || (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) || (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) || (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) || (type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) || ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 && num_stripes != 1)) { btrfs_err(fs_info, "invalid num_stripes:sub_stripes %u:%u for profile %llu", num_stripes, sub_stripes, type & BTRFS_BLOCK_GROUP_PROFILE_MASK); return -EIO; } return 0; } static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info, u64 devid, u8 *uuid, bool error) { if (error) btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing", devid, uuid); else btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing", devid, uuid); } static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key, struct extent_buffer *leaf, struct btrfs_chunk *chunk) { struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; struct map_lookup *map; struct extent_map *em; u64 logical; u64 length; u64 devid; u8 uuid[BTRFS_UUID_SIZE]; int num_stripes; int ret; int i; logical = key->offset; length = btrfs_chunk_length(leaf, chunk); num_stripes = btrfs_chunk_num_stripes(leaf, chunk); ret = btrfs_check_chunk_valid(fs_info, leaf, chunk, logical); if (ret) return ret; read_lock(&map_tree->map_tree.lock); em = lookup_extent_mapping(&map_tree->map_tree, logical, 1); read_unlock(&map_tree->map_tree.lock); /* already mapped? */ if (em && em->start <= logical && em->start + em->len > logical) { free_extent_map(em); return 0; } else if (em) { free_extent_map(em); } em = alloc_extent_map(); if (!em) return -ENOMEM; map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); if (!map) { free_extent_map(em); return -ENOMEM; } set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); em->map_lookup = map; em->start = logical; em->len = length; em->orig_start = 0; em->block_start = 0; em->block_len = em->len; map->num_stripes = num_stripes; map->io_width = btrfs_chunk_io_width(leaf, chunk); map->io_align = btrfs_chunk_io_align(leaf, chunk); map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); map->type = btrfs_chunk_type(leaf, chunk); map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); map->verified_stripes = 0; for (i = 0; i < num_stripes; i++) { map->stripes[i].physical = btrfs_stripe_offset_nr(leaf, chunk, i); devid = btrfs_stripe_devid_nr(leaf, chunk, i); read_extent_buffer(leaf, uuid, (unsigned long) btrfs_stripe_dev_uuid_nr(chunk, i), BTRFS_UUID_SIZE); map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, devid, uuid, NULL); if (!map->stripes[i].dev && !btrfs_test_opt(fs_info, DEGRADED)) { free_extent_map(em); btrfs_report_missing_device(fs_info, devid, uuid, true); return -ENOENT; } if (!map->stripes[i].dev) { map->stripes[i].dev = add_missing_dev(fs_info->fs_devices, devid, uuid); if (IS_ERR(map->stripes[i].dev)) { free_extent_map(em); btrfs_err(fs_info, "failed to init missing dev %llu: %ld", devid, PTR_ERR(map->stripes[i].dev)); return PTR_ERR(map->stripes[i].dev); } btrfs_report_missing_device(fs_info, devid, uuid, false); } set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &(map->stripes[i].dev->dev_state)); } write_lock(&map_tree->map_tree.lock); ret = add_extent_mapping(&map_tree->map_tree, em, 0); write_unlock(&map_tree->map_tree.lock); if (ret < 0) { btrfs_err(fs_info, "failed to add chunk map, start=%llu len=%llu: %d", em->start, em->len, ret); } free_extent_map(em); return ret; } static void fill_device_from_item(struct extent_buffer *leaf, struct btrfs_dev_item *dev_item, struct btrfs_device *device) { unsigned long ptr; device->devid = btrfs_device_id(leaf, dev_item); device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); device->total_bytes = device->disk_total_bytes; device->commit_total_bytes = device->disk_total_bytes; device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); device->commit_bytes_used = device->bytes_used; device->type = btrfs_device_type(leaf, dev_item); device->io_align = btrfs_device_io_align(leaf, dev_item); device->io_width = btrfs_device_io_width(leaf, dev_item); device->sector_size = btrfs_device_sector_size(leaf, dev_item); WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); ptr = btrfs_device_uuid(dev_item); read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); } static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, u8 *fsid) { struct btrfs_fs_devices *fs_devices; int ret; lockdep_assert_held(&uuid_mutex); ASSERT(fsid); fs_devices = fs_info->fs_devices->seed; while (fs_devices) { if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) return fs_devices; fs_devices = fs_devices->seed; } fs_devices = find_fsid(fsid, NULL); if (!fs_devices) { if (!btrfs_test_opt(fs_info, DEGRADED)) return ERR_PTR(-ENOENT); fs_devices = alloc_fs_devices(fsid, NULL); if (IS_ERR(fs_devices)) return fs_devices; fs_devices->seeding = 1; fs_devices->opened = 1; return fs_devices; } fs_devices = clone_fs_devices(fs_devices); if (IS_ERR(fs_devices)) return fs_devices; ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder); if (ret) { free_fs_devices(fs_devices); fs_devices = ERR_PTR(ret); goto out; } if (!fs_devices->seeding) { close_fs_devices(fs_devices); free_fs_devices(fs_devices); fs_devices = ERR_PTR(-EINVAL); goto out; } fs_devices->seed = fs_info->fs_devices->seed; fs_info->fs_devices->seed = fs_devices; out: return fs_devices; } static int read_one_dev(struct btrfs_fs_info *fs_info, struct extent_buffer *leaf, struct btrfs_dev_item *dev_item) { struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; struct btrfs_device *device; u64 devid; int ret; u8 fs_uuid[BTRFS_FSID_SIZE]; u8 dev_uuid[BTRFS_UUID_SIZE]; devid = btrfs_device_id(leaf, dev_item); read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), BTRFS_UUID_SIZE); read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), BTRFS_FSID_SIZE); if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) { fs_devices = open_seed_devices(fs_info, fs_uuid); if (IS_ERR(fs_devices)) return PTR_ERR(fs_devices); } device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid, fs_uuid); if (!device) { if (!btrfs_test_opt(fs_info, DEGRADED)) { btrfs_report_missing_device(fs_info, devid, dev_uuid, true); return -ENOENT; } device = add_missing_dev(fs_devices, devid, dev_uuid); if (IS_ERR(device)) { btrfs_err(fs_info, "failed to add missing dev %llu: %ld", devid, PTR_ERR(device)); return PTR_ERR(device); } btrfs_report_missing_device(fs_info, devid, dev_uuid, false); } else { if (!device->bdev) { if (!btrfs_test_opt(fs_info, DEGRADED)) { btrfs_report_missing_device(fs_info, devid, dev_uuid, true); return -ENOENT; } btrfs_report_missing_device(fs_info, devid, dev_uuid, false); } if (!device->bdev && !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { /* * this happens when a device that was properly setup * in the device info lists suddenly goes bad. * device->bdev is NULL, and so we have to set * device->missing to one here */ device->fs_devices->missing_devices++; set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); } /* Move the device to its own fs_devices */ if (device->fs_devices != fs_devices) { ASSERT(test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)); list_move(&device->dev_list, &fs_devices->devices); device->fs_devices->num_devices--; fs_devices->num_devices++; device->fs_devices->missing_devices--; fs_devices->missing_devices++; device->fs_devices = fs_devices; } } if (device->fs_devices != fs_info->fs_devices) { BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)); if (device->generation != btrfs_device_generation(leaf, dev_item)) return -EINVAL; } fill_device_from_item(leaf, dev_item, device); set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { device->fs_devices->total_rw_bytes += device->total_bytes; atomic64_add(device->total_bytes - device->bytes_used, &fs_info->free_chunk_space); } ret = 0; return ret; } int btrfs_read_sys_array(struct btrfs_fs_info *fs_info) { struct btrfs_root *root = fs_info->tree_root; struct btrfs_super_block *super_copy = fs_info->super_copy; struct extent_buffer *sb; struct btrfs_disk_key *disk_key; struct btrfs_chunk *chunk; u8 *array_ptr; unsigned long sb_array_offset; int ret = 0; u32 num_stripes; u32 array_size; u32 len = 0; u32 cur_offset; u64 type; struct btrfs_key key; ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize); /* * This will create extent buffer of nodesize, superblock size is * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will * overallocate but we can keep it as-is, only the first page is used. */ sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET); if (IS_ERR(sb)) return PTR_ERR(sb); set_extent_buffer_uptodate(sb); btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0); /* * The sb extent buffer is artificial and just used to read the system array. * set_extent_buffer_uptodate() call does not properly mark all it's * pages up-to-date when the page is larger: extent does not cover the * whole page and consequently check_page_uptodate does not find all * the page's extents up-to-date (the hole beyond sb), * write_extent_buffer then triggers a WARN_ON. * * Regular short extents go through mark_extent_buffer_dirty/writeback cycle, * but sb spans only this function. Add an explicit SetPageUptodate call * to silence the warning eg. on PowerPC 64. */ if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE) SetPageUptodate(sb->pages[0]); write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); array_size = btrfs_super_sys_array_size(super_copy); array_ptr = super_copy->sys_chunk_array; sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); cur_offset = 0; while (cur_offset < array_size) { disk_key = (struct btrfs_disk_key *)array_ptr; len = sizeof(*disk_key); if (cur_offset + len > array_size) goto out_short_read; btrfs_disk_key_to_cpu(&key, disk_key); array_ptr += len; sb_array_offset += len; cur_offset += len; if (key.type == BTRFS_CHUNK_ITEM_KEY) { chunk = (struct btrfs_chunk *)sb_array_offset; /* * At least one btrfs_chunk with one stripe must be * present, exact stripe count check comes afterwards */ len = btrfs_chunk_item_size(1); if (cur_offset + len > array_size) goto out_short_read; num_stripes = btrfs_chunk_num_stripes(sb, chunk); if (!num_stripes) { btrfs_err(fs_info, "invalid number of stripes %u in sys_array at offset %u", num_stripes, cur_offset); ret = -EIO; break; } type = btrfs_chunk_type(sb, chunk); if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { btrfs_err(fs_info, "invalid chunk type %llu in sys_array at offset %u", type, cur_offset); ret = -EIO; break; } len = btrfs_chunk_item_size(num_stripes); if (cur_offset + len > array_size) goto out_short_read; ret = read_one_chunk(fs_info, &key, sb, chunk); if (ret) break; } else { btrfs_err(fs_info, "unexpected item type %u in sys_array at offset %u", (u32)key.type, cur_offset); ret = -EIO; break; } array_ptr += len; sb_array_offset += len; cur_offset += len; } clear_extent_buffer_uptodate(sb); free_extent_buffer_stale(sb); return ret; out_short_read: btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u", len, cur_offset); clear_extent_buffer_uptodate(sb); free_extent_buffer_stale(sb); return -EIO; } /* * Check if all chunks in the fs are OK for read-write degraded mount * * If the @failing_dev is specified, it's accounted as missing. * * Return true if all chunks meet the minimal RW mount requirements. * Return false if any chunk doesn't meet the minimal RW mount requirements. */ bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, struct btrfs_device *failing_dev) { struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; struct extent_map *em; u64 next_start = 0; bool ret = true; read_lock(&map_tree->map_tree.lock); em = lookup_extent_mapping(&map_tree->map_tree, 0, (u64)-1); read_unlock(&map_tree->map_tree.lock); /* No chunk at all? Return false anyway */ if (!em) { ret = false; goto out; } while (em) { struct map_lookup *map; int missing = 0; int max_tolerated; int i; map = em->map_lookup; max_tolerated = btrfs_get_num_tolerated_disk_barrier_failures( map->type); for (i = 0; i < map->num_stripes; i++) { struct btrfs_device *dev = map->stripes[i].dev; if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || dev->last_flush_error) missing++; else if (failing_dev && failing_dev == dev) missing++; } if (missing > max_tolerated) { if (!failing_dev) btrfs_warn(fs_info, "chunk %llu missing %d devices, max tolerance is %d for writable mount", em->start, missing, max_tolerated); free_extent_map(em); ret = false; goto out; } next_start = extent_map_end(em); free_extent_map(em); read_lock(&map_tree->map_tree.lock); em = lookup_extent_mapping(&map_tree->map_tree, next_start, (u64)(-1) - next_start); read_unlock(&map_tree->map_tree.lock); } out: return ret; } int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) { struct btrfs_root *root = fs_info->chunk_root; struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_key key; struct btrfs_key found_key; int ret; int slot; u64 total_dev = 0; path = btrfs_alloc_path(); if (!path) return -ENOMEM; /* * uuid_mutex is needed only if we are mounting a sprout FS * otherwise we don't need it. */ mutex_lock(&uuid_mutex); mutex_lock(&fs_info->chunk_mutex); /* * Read all device items, and then all the chunk items. All * device items are found before any chunk item (their object id * is smaller than the lowest possible object id for a chunk * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). */ key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.offset = 0; key.type = 0; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto error; while (1) { leaf = path->nodes[0]; slot = path->slots[0]; if (slot >= btrfs_header_nritems(leaf)) { ret = btrfs_next_leaf(root, path); if (ret == 0) continue; if (ret < 0) goto error; break; } btrfs_item_key_to_cpu(leaf, &found_key, slot); if (found_key.type == BTRFS_DEV_ITEM_KEY) { struct btrfs_dev_item *dev_item; dev_item = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item); ret = read_one_dev(fs_info, leaf, dev_item); if (ret) goto error; total_dev++; } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { struct btrfs_chunk *chunk; chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); ret = read_one_chunk(fs_info, &found_key, leaf, chunk); if (ret) goto error; } path->slots[0]++; } /* * After loading chunk tree, we've got all device information, * do another round of validation checks. */ if (total_dev != fs_info->fs_devices->total_devices) { btrfs_err(fs_info, "super_num_devices %llu mismatch with num_devices %llu found here", btrfs_super_num_devices(fs_info->super_copy), total_dev); ret = -EINVAL; goto error; } if (btrfs_super_total_bytes(fs_info->super_copy) < fs_info->fs_devices->total_rw_bytes) { btrfs_err(fs_info, "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu", btrfs_super_total_bytes(fs_info->super_copy), fs_info->fs_devices->total_rw_bytes); ret = -EINVAL; goto error; } ret = 0; error: mutex_unlock(&fs_info->chunk_mutex); mutex_unlock(&uuid_mutex); btrfs_free_path(path); return ret; } void btrfs_init_devices_late(struct btrfs_fs_info *fs_info) { struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; struct btrfs_device *device; while (fs_devices) { mutex_lock(&fs_devices->device_list_mutex); list_for_each_entry(device, &fs_devices->devices, dev_list) device->fs_info = fs_info; mutex_unlock(&fs_devices->device_list_mutex); fs_devices = fs_devices->seed; } } static void __btrfs_reset_dev_stats(struct btrfs_device *dev) { int i; for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) btrfs_dev_stat_reset(dev, i); } int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) { struct btrfs_key key; struct btrfs_key found_key; struct btrfs_root *dev_root = fs_info->dev_root; struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; struct extent_buffer *eb; int slot; int ret = 0; struct btrfs_device *device; struct btrfs_path *path = NULL; int i; path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; goto out; } mutex_lock(&fs_devices->device_list_mutex); list_for_each_entry(device, &fs_devices->devices, dev_list) { int item_size; struct btrfs_dev_stats_item *ptr; key.objectid = BTRFS_DEV_STATS_OBJECTID; key.type = BTRFS_PERSISTENT_ITEM_KEY; key.offset = device->devid; ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0); if (ret) { __btrfs_reset_dev_stats(device); device->dev_stats_valid = 1; btrfs_release_path(path); continue; } slot = path->slots[0]; eb = path->nodes[0]; btrfs_item_key_to_cpu(eb, &found_key, slot); item_size = btrfs_item_size_nr(eb, slot); ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item); for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { if (item_size >= (1 + i) * sizeof(__le64)) btrfs_dev_stat_set(device, i, btrfs_dev_stats_value(eb, ptr, i)); else btrfs_dev_stat_reset(device, i); } device->dev_stats_valid = 1; btrfs_dev_stat_print_on_load(device); btrfs_release_path(path); } mutex_unlock(&fs_devices->device_list_mutex); out: btrfs_free_path(path); return ret < 0 ? ret : 0; } static int update_dev_stat_item(struct btrfs_trans_handle *trans, struct btrfs_device *device) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_root *dev_root = fs_info->dev_root; struct btrfs_path *path; struct btrfs_key key; struct extent_buffer *eb; struct btrfs_dev_stats_item *ptr; int ret; int i; key.objectid = BTRFS_DEV_STATS_OBJECTID; key.type = BTRFS_PERSISTENT_ITEM_KEY; key.offset = device->devid; path = btrfs_alloc_path(); if (!path) return -ENOMEM; ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); if (ret < 0) { btrfs_warn_in_rcu(fs_info, "error %d while searching for dev_stats item for device %s", ret, rcu_str_deref(device->name)); goto out; } if (ret == 0 && btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { /* need to delete old one and insert a new one */ ret = btrfs_del_item(trans, dev_root, path); if (ret != 0) { btrfs_warn_in_rcu(fs_info, "delete too small dev_stats item for device %s failed %d", rcu_str_deref(device->name), ret); goto out; } ret = 1; } if (ret == 1) { /* need to insert a new item */ btrfs_release_path(path); ret = btrfs_insert_empty_item(trans, dev_root, path, &key, sizeof(*ptr)); if (ret < 0) { btrfs_warn_in_rcu(fs_info, "insert dev_stats item for device %s failed %d", rcu_str_deref(device->name), ret); goto out; } } eb = path->nodes[0]; ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) btrfs_set_dev_stats_value(eb, ptr, i, btrfs_dev_stat_read(device, i)); btrfs_mark_buffer_dirty(eb); out: btrfs_free_path(path); return ret; } /* * called from commit_transaction. Writes all changed device stats to disk. */ int btrfs_run_dev_stats(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info) { struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; struct btrfs_device *device; int stats_cnt; int ret = 0; mutex_lock(&fs_devices->device_list_mutex); list_for_each_entry(device, &fs_devices->devices, dev_list) { stats_cnt = atomic_read(&device->dev_stats_ccnt); if (!device->dev_stats_valid || stats_cnt == 0) continue; /* * There is a LOAD-LOAD control dependency between the value of * dev_stats_ccnt and updating the on-disk values which requires * reading the in-memory counters. Such control dependencies * require explicit read memory barriers. * * This memory barriers pairs with smp_mb__before_atomic in * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full * barrier implied by atomic_xchg in * btrfs_dev_stats_read_and_reset */ smp_rmb(); ret = update_dev_stat_item(trans, device); if (!ret) atomic_sub(stats_cnt, &device->dev_stats_ccnt); } mutex_unlock(&fs_devices->device_list_mutex); return ret; } void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) { btrfs_dev_stat_inc(dev, index); btrfs_dev_stat_print_on_error(dev); } static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) { if (!dev->dev_stats_valid) return; btrfs_err_rl_in_rcu(dev->fs_info, "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", rcu_str_deref(dev->name), btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); } static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) { int i; for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) if (btrfs_dev_stat_read(dev, i) != 0) break; if (i == BTRFS_DEV_STAT_VALUES_MAX) return; /* all values == 0, suppress message */ btrfs_info_in_rcu(dev->fs_info, "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", rcu_str_deref(dev->name), btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); } int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, struct btrfs_ioctl_get_dev_stats *stats) { struct btrfs_device *dev; struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; int i; mutex_lock(&fs_devices->device_list_mutex); dev = btrfs_find_device(fs_info->fs_devices, stats->devid, NULL, NULL); mutex_unlock(&fs_devices->device_list_mutex); if (!dev) { btrfs_warn(fs_info, "get dev_stats failed, device not found"); return -ENODEV; } else if (!dev->dev_stats_valid) { btrfs_warn(fs_info, "get dev_stats failed, not yet valid"); return -ENODEV; } else if (stats->flags & BTRFS_DEV_STATS_RESET) { for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { if (stats->nr_items > i) stats->values[i] = btrfs_dev_stat_read_and_reset(dev, i); else btrfs_dev_stat_reset(dev, i); } } else { for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) if (stats->nr_items > i) stats->values[i] = btrfs_dev_stat_read(dev, i); } if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; return 0; } void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path) { struct buffer_head *bh; struct btrfs_super_block *disk_super; int copy_num; if (!bdev) return; for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) { if (btrfs_read_dev_one_super(bdev, copy_num, &bh)) continue; disk_super = (struct btrfs_super_block *)bh->b_data; memset(&disk_super->magic, 0, sizeof(disk_super->magic)); set_buffer_dirty(bh); sync_dirty_buffer(bh); brelse(bh); } /* Notify udev that device has changed */ btrfs_kobject_uevent(bdev, KOBJ_CHANGE); /* Update ctime/mtime for device path for libblkid */ update_dev_time(device_path); } /* * Update the size of all devices, which is used for writing out the * super blocks. */ void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info) { struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; struct btrfs_device *curr, *next; if (list_empty(&fs_devices->resized_devices)) return; mutex_lock(&fs_devices->device_list_mutex); mutex_lock(&fs_info->chunk_mutex); list_for_each_entry_safe(curr, next, &fs_devices->resized_devices, resized_list) { list_del_init(&curr->resized_list); curr->commit_total_bytes = curr->disk_total_bytes; } mutex_unlock(&fs_info->chunk_mutex); mutex_unlock(&fs_devices->device_list_mutex); } /* Must be invoked during the transaction commit */ void btrfs_update_commit_device_bytes_used(struct btrfs_transaction *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; struct extent_map *em; struct map_lookup *map; struct btrfs_device *dev; int i; if (list_empty(&trans->pending_chunks)) return; /* In order to kick the device replace finish process */ mutex_lock(&fs_info->chunk_mutex); list_for_each_entry(em, &trans->pending_chunks, list) { map = em->map_lookup; for (i = 0; i < map->num_stripes; i++) { dev = map->stripes[i].dev; dev->commit_bytes_used = dev->bytes_used; } } mutex_unlock(&fs_info->chunk_mutex); } void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info) { struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; while (fs_devices) { fs_devices->fs_info = fs_info; fs_devices = fs_devices->seed; } } void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info) { struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; while (fs_devices) { fs_devices->fs_info = NULL; fs_devices = fs_devices->seed; } } /* * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10. */ int btrfs_bg_type_to_factor(u64 flags) { if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) return 2; return 1; } static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes) { int index = btrfs_bg_flags_to_raid_index(type); int ncopies = btrfs_raid_array[index].ncopies; int data_stripes; switch (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { case BTRFS_BLOCK_GROUP_RAID5: data_stripes = num_stripes - 1; break; case BTRFS_BLOCK_GROUP_RAID6: data_stripes = num_stripes - 2; break; default: data_stripes = num_stripes / ncopies; break; } return div_u64(chunk_len, data_stripes); } static int verify_one_dev_extent(struct btrfs_fs_info *fs_info, u64 chunk_offset, u64 devid, u64 physical_offset, u64 physical_len) { struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree; struct extent_map *em; struct map_lookup *map; struct btrfs_device *dev; u64 stripe_len; bool found = false; int ret = 0; int i; read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, chunk_offset, 1); read_unlock(&em_tree->lock); if (!em) { btrfs_err(fs_info, "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk", physical_offset, devid); ret = -EUCLEAN; goto out; } map = em->map_lookup; stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes); if (physical_len != stripe_len) { btrfs_err(fs_info, "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu", physical_offset, devid, em->start, physical_len, stripe_len); ret = -EUCLEAN; goto out; } for (i = 0; i < map->num_stripes; i++) { if (map->stripes[i].dev->devid == devid && map->stripes[i].physical == physical_offset) { found = true; if (map->verified_stripes >= map->num_stripes) { btrfs_err(fs_info, "too many dev extents for chunk %llu found", em->start); ret = -EUCLEAN; goto out; } map->verified_stripes++; break; } } if (!found) { btrfs_err(fs_info, "dev extent physical offset %llu devid %llu has no corresponding chunk", physical_offset, devid); ret = -EUCLEAN; } /* Make sure no dev extent is beyond device bondary */ dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL); if (!dev) { btrfs_err(fs_info, "failed to find devid %llu", devid); ret = -EUCLEAN; goto out; } /* It's possible this device is a dummy for seed device */ if (dev->disk_total_bytes == 0) { dev = find_device(fs_info->fs_devices->seed, devid, NULL); if (!dev) { btrfs_err(fs_info, "failed to find seed devid %llu", devid); ret = -EUCLEAN; goto out; } } if (physical_offset + physical_len > dev->disk_total_bytes) { btrfs_err(fs_info, "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu", devid, physical_offset, physical_len, dev->disk_total_bytes); ret = -EUCLEAN; goto out; } out: free_extent_map(em); return ret; } static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info) { struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree; struct extent_map *em; struct rb_node *node; int ret = 0; read_lock(&em_tree->lock); for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) { em = rb_entry(node, struct extent_map, rb_node); if (em->map_lookup->num_stripes != em->map_lookup->verified_stripes) { btrfs_err(fs_info, "chunk %llu has missing dev extent, have %d expect %d", em->start, em->map_lookup->verified_stripes, em->map_lookup->num_stripes); ret = -EUCLEAN; goto out; } } out: read_unlock(&em_tree->lock); return ret; } /* * Ensure that all dev extents are mapped to correct chunk, otherwise * later chunk allocation/free would cause unexpected behavior. * * NOTE: This will iterate through the whole device tree, which should be of * the same size level as the chunk tree. This slightly increases mount time. */ int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info) { struct btrfs_path *path; struct btrfs_root *root = fs_info->dev_root; struct btrfs_key key; u64 prev_devid = 0; u64 prev_dev_ext_end = 0; int ret = 0; key.objectid = 1; key.type = BTRFS_DEV_EXTENT_KEY; key.offset = 0; path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->reada = READA_FORWARD; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { ret = btrfs_next_item(root, path); if (ret < 0) goto out; /* No dev extents at all? Not good */ if (ret > 0) { ret = -EUCLEAN; goto out; } } while (1) { struct extent_buffer *leaf = path->nodes[0]; struct btrfs_dev_extent *dext; int slot = path->slots[0]; u64 chunk_offset; u64 physical_offset; u64 physical_len; u64 devid; btrfs_item_key_to_cpu(leaf, &key, slot); if (key.type != BTRFS_DEV_EXTENT_KEY) break; devid = key.objectid; physical_offset = key.offset; dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent); chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext); physical_len = btrfs_dev_extent_length(leaf, dext); /* Check if this dev extent overlaps with the previous one */ if (devid == prev_devid && physical_offset < prev_dev_ext_end) { btrfs_err(fs_info, "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu", devid, physical_offset, prev_dev_ext_end); ret = -EUCLEAN; goto out; } ret = verify_one_dev_extent(fs_info, chunk_offset, devid, physical_offset, physical_len); if (ret < 0) goto out; prev_devid = devid; prev_dev_ext_end = physical_offset + physical_len; ret = btrfs_next_item(root, path); if (ret < 0) goto out; if (ret > 0) { ret = 0; break; } } /* Ensure all chunks have corresponding dev extents */ ret = verify_chunk_dev_extent_mapping(fs_info); out: btrfs_free_path(path); return ret; } /* * Check whether the given block group or device is pinned by any inode being * used as a swapfile. */ bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr) { struct btrfs_swapfile_pin *sp; struct rb_node *node; spin_lock(&fs_info->swapfile_pins_lock); node = fs_info->swapfile_pins.rb_node; while (node) { sp = rb_entry(node, struct btrfs_swapfile_pin, node); if (ptr < sp->ptr) node = node->rb_left; else if (ptr > sp->ptr) node = node->rb_right; else break; } spin_unlock(&fs_info->swapfile_pins_lock); return node != NULL; }
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2007 Oracle. All rights reserved. */ #include <linux/sched.h> #include <linux/bio.h> #include <linux/slab.h> #include <linux/buffer_head.h> #include <linux/blkdev.h> #include <linux/ratelimit.h> #include <linux/kthread.h> #include <linux/raid/pq.h> #include <linux/semaphore.h> #include <linux/uuid.h> #include <linux/list_sort.h> #include "ctree.h" #include "extent_map.h" #include "disk-io.h" #include "transaction.h" #include "print-tree.h" #include "volumes.h" #include "raid56.h" #include "async-thread.h" #include "check-integrity.h" #include "rcu-string.h" #include "math.h" #include "dev-replace.h" #include "sysfs.h" const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = { [BTRFS_RAID_RAID10] = { .sub_stripes = 2, .dev_stripes = 1, .devs_max = 0, /* 0 == as many as possible */ .devs_min = 4, .tolerated_failures = 1, .devs_increment = 2, .ncopies = 2, .nparity = 0, .raid_name = "raid10", .bg_flag = BTRFS_BLOCK_GROUP_RAID10, .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, }, [BTRFS_RAID_RAID1] = { .sub_stripes = 1, .dev_stripes = 1, .devs_max = 2, .devs_min = 2, .tolerated_failures = 1, .devs_increment = 2, .ncopies = 2, .nparity = 0, .raid_name = "raid1", .bg_flag = BTRFS_BLOCK_GROUP_RAID1, .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, }, [BTRFS_RAID_DUP] = { .sub_stripes = 1, .dev_stripes = 2, .devs_max = 1, .devs_min = 1, .tolerated_failures = 0, .devs_increment = 1, .ncopies = 2, .nparity = 0, .raid_name = "dup", .bg_flag = BTRFS_BLOCK_GROUP_DUP, .mindev_error = 0, }, [BTRFS_RAID_RAID0] = { .sub_stripes = 1, .dev_stripes = 1, .devs_max = 0, .devs_min = 2, .tolerated_failures = 0, .devs_increment = 1, .ncopies = 1, .nparity = 0, .raid_name = "raid0", .bg_flag = BTRFS_BLOCK_GROUP_RAID0, .mindev_error = 0, }, [BTRFS_RAID_SINGLE] = { .sub_stripes = 1, .dev_stripes = 1, .devs_max = 1, .devs_min = 1, .tolerated_failures = 0, .devs_increment = 1, .ncopies = 1, .nparity = 0, .raid_name = "single", .bg_flag = 0, .mindev_error = 0, }, [BTRFS_RAID_RAID5] = { .sub_stripes = 1, .dev_stripes = 1, .devs_max = 0, .devs_min = 2, .tolerated_failures = 1, .devs_increment = 1, .ncopies = 1, .nparity = 1, .raid_name = "raid5", .bg_flag = BTRFS_BLOCK_GROUP_RAID5, .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, }, [BTRFS_RAID_RAID6] = { .sub_stripes = 1, .dev_stripes = 1, .devs_max = 0, .devs_min = 3, .tolerated_failures = 2, .devs_increment = 1, .ncopies = 1, .nparity = 2, .raid_name = "raid6", .bg_flag = BTRFS_BLOCK_GROUP_RAID6, .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, }, }; const char *get_raid_name(enum btrfs_raid_types type) { if (type >= BTRFS_NR_RAID_TYPES) return NULL; return btrfs_raid_array[type].raid_name; } /* * Fill @buf with textual description of @bg_flags, no more than @size_buf * bytes including terminating null byte. */ void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf) { int i; int ret; char *bp = buf; u64 flags = bg_flags; u32 size_bp = size_buf; if (!flags) { strcpy(bp, "NONE"); return; } #define DESCRIBE_FLAG(flag, desc) \ do { \ if (flags & (flag)) { \ ret = snprintf(bp, size_bp, "%s|", (desc)); \ if (ret < 0 || ret >= size_bp) \ goto out_overflow; \ size_bp -= ret; \ bp += ret; \ flags &= ~(flag); \ } \ } while (0) DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data"); DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system"); DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata"); DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single"); for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag, btrfs_raid_array[i].raid_name); #undef DESCRIBE_FLAG if (flags) { ret = snprintf(bp, size_bp, "0x%llx|", flags); size_bp -= ret; } if (size_bp < size_buf) buf[size_buf - size_bp - 1] = '\0'; /* remove last | */ /* * The text is trimmed, it's up to the caller to provide sufficiently * large buffer */ out_overflow:; } static int init_first_rw_device(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info); static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info); static void __btrfs_reset_dev_stats(struct btrfs_device *dev); static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev); static void btrfs_dev_stat_print_on_load(struct btrfs_device *device); static int __btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, u64 logical, u64 *length, struct btrfs_bio **bbio_ret, int mirror_num, int need_raid_map); /* * Device locking * ============== * * There are several mutexes that protect manipulation of devices and low-level * structures like chunks but not block groups, extents or files * * uuid_mutex (global lock) * ------------------------ * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from * the SCAN_DEV ioctl registration or from mount either implicitly (the first * device) or requested by the device= mount option * * the mutex can be very coarse and can cover long-running operations * * protects: updates to fs_devices counters like missing devices, rw devices, * seeding, structure cloning, opening/closing devices at mount/umount time * * global::fs_devs - add, remove, updates to the global list * * does not protect: manipulation of the fs_devices::devices list! * * btrfs_device::name - renames (write side), read is RCU * * fs_devices::device_list_mutex (per-fs, with RCU) * ------------------------------------------------ * protects updates to fs_devices::devices, ie. adding and deleting * * simple list traversal with read-only actions can be done with RCU protection * * may be used to exclude some operations from running concurrently without any * modifications to the list (see write_all_supers) * * balance_mutex * ------------- * protects balance structures (status, state) and context accessed from * several places (internally, ioctl) * * chunk_mutex * ----------- * protects chunks, adding or removing during allocation, trim or when a new * device is added/removed * * cleaner_mutex * ------------- * a big lock that is held by the cleaner thread and prevents running subvolume * cleaning together with relocation or delayed iputs * * * Lock nesting * ============ * * uuid_mutex * volume_mutex * device_list_mutex * chunk_mutex * balance_mutex * * * Exclusive operations, BTRFS_FS_EXCL_OP * ====================================== * * Maintains the exclusivity of the following operations that apply to the * whole filesystem and cannot run in parallel. * * - Balance (*) * - Device add * - Device remove * - Device replace (*) * - Resize * * The device operations (as above) can be in one of the following states: * * - Running state * - Paused state * - Completed state * * Only device operations marked with (*) can go into the Paused state for the * following reasons: * * - ioctl (only Balance can be Paused through ioctl) * - filesystem remounted as read-only * - filesystem unmounted and mounted as read-only * - system power-cycle and filesystem mounted as read-only * - filesystem or device errors leading to forced read-only * * BTRFS_FS_EXCL_OP flag is set and cleared using atomic operations. * During the course of Paused state, the BTRFS_FS_EXCL_OP remains set. * A device operation in Paused or Running state can be canceled or resumed * either by ioctl (Balance only) or when remounted as read-write. * BTRFS_FS_EXCL_OP flag is cleared when the device operation is canceled or * completed. */ DEFINE_MUTEX(uuid_mutex); static LIST_HEAD(fs_uuids); struct list_head *btrfs_get_fs_uuids(void) { return &fs_uuids; } /* * alloc_fs_devices - allocate struct btrfs_fs_devices * @fsid: if not NULL, copy the UUID to fs_devices::fsid * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid * * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR(). * The returned struct is not linked onto any lists and can be destroyed with * kfree() right away. */ static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid, const u8 *metadata_fsid) { struct btrfs_fs_devices *fs_devs; fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL); if (!fs_devs) return ERR_PTR(-ENOMEM); mutex_init(&fs_devs->device_list_mutex); INIT_LIST_HEAD(&fs_devs->devices); INIT_LIST_HEAD(&fs_devs->resized_devices); INIT_LIST_HEAD(&fs_devs->alloc_list); INIT_LIST_HEAD(&fs_devs->fs_list); if (fsid) memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE); if (metadata_fsid) memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE); else if (fsid) memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE); return fs_devs; } void btrfs_free_device(struct btrfs_device *device) { rcu_string_free(device->name); bio_put(device->flush_bio); kfree(device); } static void free_fs_devices(struct btrfs_fs_devices *fs_devices) { struct btrfs_device *device; WARN_ON(fs_devices->opened); while (!list_empty(&fs_devices->devices)) { device = list_entry(fs_devices->devices.next, struct btrfs_device, dev_list); list_del(&device->dev_list); btrfs_free_device(device); } kfree(fs_devices); } static void btrfs_kobject_uevent(struct block_device *bdev, enum kobject_action action) { int ret; ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action); if (ret) pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n", action, kobject_name(&disk_to_dev(bdev->bd_disk)->kobj), &disk_to_dev(bdev->bd_disk)->kobj); } void __exit btrfs_cleanup_fs_uuids(void) { struct btrfs_fs_devices *fs_devices; while (!list_empty(&fs_uuids)) { fs_devices = list_entry(fs_uuids.next, struct btrfs_fs_devices, fs_list); list_del(&fs_devices->fs_list); free_fs_devices(fs_devices); } } /* * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error. * Returned struct is not linked onto any lists and must be destroyed using * btrfs_free_device. */ static struct btrfs_device *__alloc_device(void) { struct btrfs_device *dev; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return ERR_PTR(-ENOMEM); /* * Preallocate a bio that's always going to be used for flushing device * barriers and matches the device lifespan */ dev->flush_bio = bio_alloc_bioset(GFP_KERNEL, 0, NULL); if (!dev->flush_bio) { kfree(dev); return ERR_PTR(-ENOMEM); } INIT_LIST_HEAD(&dev->dev_list); INIT_LIST_HEAD(&dev->dev_alloc_list); INIT_LIST_HEAD(&dev->resized_list); spin_lock_init(&dev->io_lock); atomic_set(&dev->reada_in_flight, 0); atomic_set(&dev->dev_stats_ccnt, 0); btrfs_device_data_ordered_init(dev); INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM); return dev; } static noinline struct btrfs_fs_devices *find_fsid( const u8 *fsid, const u8 *metadata_fsid) { struct btrfs_fs_devices *fs_devices; ASSERT(fsid); if (metadata_fsid) { /* * Handle scanned device having completed its fsid change but * belonging to a fs_devices that was created by first scanning * a device which didn't have its fsid/metadata_uuid changed * at all and the CHANGING_FSID_V2 flag set. */ list_for_each_entry(fs_devices, &fs_uuids, fs_list) { if (fs_devices->fsid_change && memcmp(metadata_fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0 && memcmp(fs_devices->fsid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE) == 0) { return fs_devices; } } /* * Handle scanned device having completed its fsid change but * belonging to a fs_devices that was created by a device that * has an outdated pair of fsid/metadata_uuid and * CHANGING_FSID_V2 flag set. */ list_for_each_entry(fs_devices, &fs_uuids, fs_list) { if (fs_devices->fsid_change && memcmp(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE) != 0 && memcmp(metadata_fsid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE) == 0) { return fs_devices; } } } /* Handle non-split brain cases */ list_for_each_entry(fs_devices, &fs_uuids, fs_list) { if (metadata_fsid) { if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0 && memcmp(metadata_fsid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE) == 0) return fs_devices; } else { if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0) return fs_devices; } } return NULL; } static int btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder, int flush, struct block_device **bdev, struct buffer_head **bh) { int ret; *bdev = blkdev_get_by_path(device_path, flags, holder); if (IS_ERR(*bdev)) { ret = PTR_ERR(*bdev); goto error; } if (flush) filemap_write_and_wait((*bdev)->bd_inode->i_mapping); ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE); if (ret) { blkdev_put(*bdev, flags); goto error; } invalidate_bdev(*bdev); *bh = btrfs_read_dev_super(*bdev); if (IS_ERR(*bh)) { ret = PTR_ERR(*bh); blkdev_put(*bdev, flags); goto error; } return 0; error: *bdev = NULL; *bh = NULL; return ret; } static void requeue_list(struct btrfs_pending_bios *pending_bios, struct bio *head, struct bio *tail) { struct bio *old_head; old_head = pending_bios->head; pending_bios->head = head; if (pending_bios->tail) tail->bi_next = old_head; else pending_bios->tail = tail; } /* * we try to collect pending bios for a device so we don't get a large * number of procs sending bios down to the same device. This greatly * improves the schedulers ability to collect and merge the bios. * * But, it also turns into a long list of bios to process and that is sure * to eventually make the worker thread block. The solution here is to * make some progress and then put this work struct back at the end of * the list if the block device is congested. This way, multiple devices * can make progress from a single worker thread. */ static noinline void run_scheduled_bios(struct btrfs_device *device) { struct btrfs_fs_info *fs_info = device->fs_info; struct bio *pending; struct backing_dev_info *bdi; struct btrfs_pending_bios *pending_bios; struct bio *tail; struct bio *cur; int again = 0; unsigned long num_run; unsigned long batch_run = 0; unsigned long last_waited = 0; int force_reg = 0; int sync_pending = 0; struct blk_plug plug; /* * this function runs all the bios we've collected for * a particular device. We don't want to wander off to * another device without first sending all of these down. * So, setup a plug here and finish it off before we return */ blk_start_plug(&plug); bdi = device->bdev->bd_bdi; loop: spin_lock(&device->io_lock); loop_lock: num_run = 0; /* take all the bios off the list at once and process them * later on (without the lock held). But, remember the * tail and other pointers so the bios can be properly reinserted * into the list if we hit congestion */ if (!force_reg && device->pending_sync_bios.head) { pending_bios = &device->pending_sync_bios; force_reg = 1; } else { pending_bios = &device->pending_bios; force_reg = 0; } pending = pending_bios->head; tail = pending_bios->tail; WARN_ON(pending && !tail); /* * if pending was null this time around, no bios need processing * at all and we can stop. Otherwise it'll loop back up again * and do an additional check so no bios are missed. * * device->running_pending is used to synchronize with the * schedule_bio code. */ if (device->pending_sync_bios.head == NULL && device->pending_bios.head == NULL) { again = 0; device->running_pending = 0; } else { again = 1; device->running_pending = 1; } pending_bios->head = NULL; pending_bios->tail = NULL; spin_unlock(&device->io_lock); while (pending) { rmb(); /* we want to work on both lists, but do more bios on the * sync list than the regular list */ if ((num_run > 32 && pending_bios != &device->pending_sync_bios && device->pending_sync_bios.head) || (num_run > 64 && pending_bios == &device->pending_sync_bios && device->pending_bios.head)) { spin_lock(&device->io_lock); requeue_list(pending_bios, pending, tail); goto loop_lock; } cur = pending; pending = pending->bi_next; cur->bi_next = NULL; BUG_ON(atomic_read(&cur->__bi_cnt) == 0); /* * if we're doing the sync list, record that our * plug has some sync requests on it * * If we're doing the regular list and there are * sync requests sitting around, unplug before * we add more */ if (pending_bios == &device->pending_sync_bios) { sync_pending = 1; } else if (sync_pending) { blk_finish_plug(&plug); blk_start_plug(&plug); sync_pending = 0; } btrfsic_submit_bio(cur); num_run++; batch_run++; cond_resched(); /* * we made progress, there is more work to do and the bdi * is now congested. Back off and let other work structs * run instead */ if (pending && bdi_write_congested(bdi) && batch_run > 8 && fs_info->fs_devices->open_devices > 1) { struct io_context *ioc; ioc = current->io_context; /* * the main goal here is that we don't want to * block if we're going to be able to submit * more requests without blocking. * * This code does two great things, it pokes into * the elevator code from a filesystem _and_ * it makes assumptions about how batching works. */ if (ioc && ioc->nr_batch_requests > 0 && time_before(jiffies, ioc->last_waited + HZ/50UL) && (last_waited == 0 || ioc->last_waited == last_waited)) { /* * we want to go through our batch of * requests and stop. So, we copy out * the ioc->last_waited time and test * against it before looping */ last_waited = ioc->last_waited; cond_resched(); continue; } spin_lock(&device->io_lock); requeue_list(pending_bios, pending, tail); device->running_pending = 1; spin_unlock(&device->io_lock); btrfs_queue_work(fs_info->submit_workers, &device->work); goto done; } } cond_resched(); if (again) goto loop; spin_lock(&device->io_lock); if (device->pending_bios.head || device->pending_sync_bios.head) goto loop_lock; spin_unlock(&device->io_lock); done: blk_finish_plug(&plug); } static void pending_bios_fn(struct btrfs_work *work) { struct btrfs_device *device; device = container_of(work, struct btrfs_device, work); run_scheduled_bios(device); } static bool device_path_matched(const char *path, struct btrfs_device *device) { int found; rcu_read_lock(); found = strcmp(rcu_str_deref(device->name), path); rcu_read_unlock(); return found == 0; } /* * Search and remove all stale (devices which are not mounted) devices. * When both inputs are NULL, it will search and release all stale devices. * path: Optional. When provided will it release all unmounted devices * matching this path only. * skip_dev: Optional. Will skip this device when searching for the stale * devices. * Return: 0 for success or if @path is NULL. * -EBUSY if @path is a mounted device. * -ENOENT if @path does not match any device in the list. */ static int btrfs_free_stale_devices(const char *path, struct btrfs_device *skip_device) { struct btrfs_fs_devices *fs_devices, *tmp_fs_devices; struct btrfs_device *device, *tmp_device; int ret = 0; if (path) ret = -ENOENT; list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) { mutex_lock(&fs_devices->device_list_mutex); list_for_each_entry_safe(device, tmp_device, &fs_devices->devices, dev_list) { if (skip_device && skip_device == device) continue; if (path && !device->name) continue; if (path && !device_path_matched(path, device)) continue; if (fs_devices->opened) { /* for an already deleted device return 0 */ if (path && ret != 0) ret = -EBUSY; break; } /* delete the stale device */ fs_devices->num_devices--; list_del(&device->dev_list); btrfs_free_device(device); ret = 0; if (fs_devices->num_devices == 0) break; } mutex_unlock(&fs_devices->device_list_mutex); if (fs_devices->num_devices == 0) { btrfs_sysfs_remove_fsid(fs_devices); list_del(&fs_devices->fs_list); free_fs_devices(fs_devices); } } return ret; } static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, struct btrfs_device *device, fmode_t flags, void *holder) { struct request_queue *q; struct block_device *bdev; struct buffer_head *bh; struct btrfs_super_block *disk_super; u64 devid; int ret; if (device->bdev) return -EINVAL; if (!device->name) return -EINVAL; ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1, &bdev, &bh); if (ret) return ret; disk_super = (struct btrfs_super_block *)bh->b_data; devid = btrfs_stack_device_id(&disk_super->dev_item); if (devid != device->devid) goto error_brelse; if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE)) goto error_brelse; device->generation = btrfs_super_generation(disk_super); if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) { if (btrfs_super_incompat_flags(disk_super) & BTRFS_FEATURE_INCOMPAT_METADATA_UUID) { pr_err( "BTRFS: Invalid seeding and uuid-changed device detected\n"); goto error_brelse; } clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); fs_devices->seeding = 1; } else { if (bdev_read_only(bdev)) clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); else set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); } q = bdev_get_queue(bdev); if (!blk_queue_nonrot(q)) fs_devices->rotating = 1; device->bdev = bdev; clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); device->mode = flags; fs_devices->open_devices++; if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && device->devid != BTRFS_DEV_REPLACE_DEVID) { fs_devices->rw_devices++; list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list); } brelse(bh); return 0; error_brelse: brelse(bh); blkdev_put(bdev, flags); return -EINVAL; } /* * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices * being created with a disk that has already completed its fsid change. */ static struct btrfs_fs_devices *find_fsid_inprogress( struct btrfs_super_block *disk_super) { struct btrfs_fs_devices *fs_devices; list_for_each_entry(fs_devices, &fs_uuids, fs_list) { if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE) != 0 && memcmp(fs_devices->metadata_uuid, disk_super->fsid, BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) { return fs_devices; } } return NULL; } static struct btrfs_fs_devices *find_fsid_changed( struct btrfs_super_block *disk_super) { struct btrfs_fs_devices *fs_devices; /* * Handles the case where scanned device is part of an fs that had * multiple successful changes of FSID but curently device didn't * observe it. Meaning our fsid will be different than theirs. */ list_for_each_entry(fs_devices, &fs_uuids, fs_list) { if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE) != 0 && memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid, BTRFS_FSID_SIZE) == 0 && memcmp(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE) != 0) { return fs_devices; } } return NULL; } /* * Add new device to list of registered devices * * Returns: * device pointer which was just added or updated when successful * error pointer when failed */ static noinline struct btrfs_device *device_list_add(const char *path, struct btrfs_super_block *disk_super, bool *new_device_added) { struct btrfs_device *device; struct btrfs_fs_devices *fs_devices = NULL; struct rcu_string *name; u64 found_transid = btrfs_super_generation(disk_super); u64 devid = btrfs_stack_device_id(&disk_super->dev_item); bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) & BTRFS_FEATURE_INCOMPAT_METADATA_UUID); bool fsid_change_in_progress = (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_CHANGING_FSID_V2); if (fsid_change_in_progress) { if (!has_metadata_uuid) { /* * When we have an image which has CHANGING_FSID_V2 set * it might belong to either a filesystem which has * disks with completed fsid change or it might belong * to fs with no UUID changes in effect, handle both. */ fs_devices = find_fsid_inprogress(disk_super); if (!fs_devices) fs_devices = find_fsid(disk_super->fsid, NULL); } else { fs_devices = find_fsid_changed(disk_super); } } else if (has_metadata_uuid) { fs_devices = find_fsid(disk_super->fsid, disk_super->metadata_uuid); } else { fs_devices = find_fsid(disk_super->fsid, NULL); } if (!fs_devices) { if (has_metadata_uuid) fs_devices = alloc_fs_devices(disk_super->fsid, disk_super->metadata_uuid); else fs_devices = alloc_fs_devices(disk_super->fsid, NULL); if (IS_ERR(fs_devices)) return ERR_CAST(fs_devices); fs_devices->fsid_change = fsid_change_in_progress; mutex_lock(&fs_devices->device_list_mutex); list_add(&fs_devices->fs_list, &fs_uuids); device = NULL; } else { mutex_lock(&fs_devices->device_list_mutex); device = btrfs_find_device(fs_devices, devid, disk_super->dev_item.uuid, NULL, false); /* * If this disk has been pulled into an fs devices created by * a device which had the CHANGING_FSID_V2 flag then replace the * metadata_uuid/fsid values of the fs_devices. */ if (has_metadata_uuid && fs_devices->fsid_change && found_transid > fs_devices->latest_generation) { memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE); memcpy(fs_devices->metadata_uuid, disk_super->metadata_uuid, BTRFS_FSID_SIZE); fs_devices->fsid_change = false; } } if (!device) { if (fs_devices->opened) { mutex_unlock(&fs_devices->device_list_mutex); return ERR_PTR(-EBUSY); } device = btrfs_alloc_device(NULL, &devid, disk_super->dev_item.uuid); if (IS_ERR(device)) { mutex_unlock(&fs_devices->device_list_mutex); /* we can safely leave the fs_devices entry around */ return device; } name = rcu_string_strdup(path, GFP_NOFS); if (!name) { btrfs_free_device(device); mutex_unlock(&fs_devices->device_list_mutex); return ERR_PTR(-ENOMEM); } rcu_assign_pointer(device->name, name); list_add_rcu(&device->dev_list, &fs_devices->devices); fs_devices->num_devices++; device->fs_devices = fs_devices; *new_device_added = true; if (disk_super->label[0]) pr_info("BTRFS: device label %s devid %llu transid %llu %s\n", disk_super->label, devid, found_transid, path); else pr_info("BTRFS: device fsid %pU devid %llu transid %llu %s\n", disk_super->fsid, devid, found_transid, path); } else if (!device->name || strcmp(device->name->str, path)) { /* * When FS is already mounted. * 1. If you are here and if the device->name is NULL that * means this device was missing at time of FS mount. * 2. If you are here and if the device->name is different * from 'path' that means either * a. The same device disappeared and reappeared with * different name. or * b. The missing-disk-which-was-replaced, has * reappeared now. * * We must allow 1 and 2a above. But 2b would be a spurious * and unintentional. * * Further in case of 1 and 2a above, the disk at 'path' * would have missed some transaction when it was away and * in case of 2a the stale bdev has to be updated as well. * 2b must not be allowed at all time. */ /* * For now, we do allow update to btrfs_fs_device through the * btrfs dev scan cli after FS has been mounted. We're still * tracking a problem where systems fail mount by subvolume id * when we reject replacement on a mounted FS. */ if (!fs_devices->opened && found_transid < device->generation) { /* * That is if the FS is _not_ mounted and if you * are here, that means there is more than one * disk with same uuid and devid.We keep the one * with larger generation number or the last-in if * generation are equal. */ mutex_unlock(&fs_devices->device_list_mutex); return ERR_PTR(-EEXIST); } /* * We are going to replace the device path for a given devid, * make sure it's the same device if the device is mounted */ if (device->bdev) { struct block_device *path_bdev; path_bdev = lookup_bdev(path); if (IS_ERR(path_bdev)) { mutex_unlock(&fs_devices->device_list_mutex); return ERR_CAST(path_bdev); } if (device->bdev != path_bdev) { bdput(path_bdev); mutex_unlock(&fs_devices->device_list_mutex); btrfs_warn_in_rcu(device->fs_info, "duplicate device fsid:devid for %pU:%llu old:%s new:%s", disk_super->fsid, devid, rcu_str_deref(device->name), path); return ERR_PTR(-EEXIST); } bdput(path_bdev); btrfs_info_in_rcu(device->fs_info, "device fsid %pU devid %llu moved old:%s new:%s", disk_super->fsid, devid, rcu_str_deref(device->name), path); } name = rcu_string_strdup(path, GFP_NOFS); if (!name) { mutex_unlock(&fs_devices->device_list_mutex); return ERR_PTR(-ENOMEM); } rcu_string_free(device->name); rcu_assign_pointer(device->name, name); if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { fs_devices->missing_devices--; clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); } } /* * Unmount does not free the btrfs_device struct but would zero * generation along with most of the other members. So just update * it back. We need it to pick the disk with largest generation * (as above). */ if (!fs_devices->opened) { device->generation = found_transid; fs_devices->latest_generation = max_t(u64, found_transid, fs_devices->latest_generation); } fs_devices->total_devices = btrfs_super_num_devices(disk_super); mutex_unlock(&fs_devices->device_list_mutex); return device; } static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) { struct btrfs_fs_devices *fs_devices; struct btrfs_device *device; struct btrfs_device *orig_dev; fs_devices = alloc_fs_devices(orig->fsid, NULL); if (IS_ERR(fs_devices)) return fs_devices; mutex_lock(&orig->device_list_mutex); fs_devices->total_devices = orig->total_devices; /* We have held the volume lock, it is safe to get the devices. */ list_for_each_entry(orig_dev, &orig->devices, dev_list) { struct rcu_string *name; device = btrfs_alloc_device(NULL, &orig_dev->devid, orig_dev->uuid); if (IS_ERR(device)) goto error; /* * This is ok to do without rcu read locked because we hold the * uuid mutex so nothing we touch in here is going to disappear. */ if (orig_dev->name) { name = rcu_string_strdup(orig_dev->name->str, GFP_KERNEL); if (!name) { btrfs_free_device(device); goto error; } rcu_assign_pointer(device->name, name); } list_add(&device->dev_list, &fs_devices->devices); device->fs_devices = fs_devices; fs_devices->num_devices++; } mutex_unlock(&orig->device_list_mutex); return fs_devices; error: mutex_unlock(&orig->device_list_mutex); free_fs_devices(fs_devices); return ERR_PTR(-ENOMEM); } /* * After we have read the system tree and know devids belonging to * this filesystem, remove the device which does not belong there. */ void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step) { struct btrfs_device *device, *next; struct btrfs_device *latest_dev = NULL; mutex_lock(&uuid_mutex); again: /* This is the initialized path, it is safe to release the devices. */ list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) { if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state) && (!latest_dev || device->generation > latest_dev->generation)) { latest_dev = device; } continue; } if (device->devid == BTRFS_DEV_REPLACE_DEVID) { /* * In the first step, keep the device which has * the correct fsid and the devid that is used * for the dev_replace procedure. * In the second step, the dev_replace state is * read from the device tree and it is known * whether the procedure is really active or * not, which means whether this device is * used or whether it should be removed. */ if (step == 0 || test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { continue; } } if (device->bdev) { blkdev_put(device->bdev, device->mode); device->bdev = NULL; fs_devices->open_devices--; } if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { list_del_init(&device->dev_alloc_list); clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) fs_devices->rw_devices--; } list_del_init(&device->dev_list); fs_devices->num_devices--; btrfs_free_device(device); } if (fs_devices->seed) { fs_devices = fs_devices->seed; goto again; } fs_devices->latest_bdev = latest_dev->bdev; mutex_unlock(&uuid_mutex); } static void free_device_rcu(struct rcu_head *head) { struct btrfs_device *device; device = container_of(head, struct btrfs_device, rcu); btrfs_free_device(device); } static void btrfs_close_bdev(struct btrfs_device *device) { if (!device->bdev) return; if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { sync_blockdev(device->bdev); invalidate_bdev(device->bdev); } blkdev_put(device->bdev, device->mode); } static void btrfs_close_one_device(struct btrfs_device *device) { struct btrfs_fs_devices *fs_devices = device->fs_devices; struct btrfs_device *new_device; struct rcu_string *name; if (device->bdev) fs_devices->open_devices--; if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && device->devid != BTRFS_DEV_REPLACE_DEVID) { list_del_init(&device->dev_alloc_list); fs_devices->rw_devices--; } if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) fs_devices->missing_devices--; btrfs_close_bdev(device); new_device = btrfs_alloc_device(NULL, &device->devid, device->uuid); BUG_ON(IS_ERR(new_device)); /* -ENOMEM */ /* Safe because we are under uuid_mutex */ if (device->name) { name = rcu_string_strdup(device->name->str, GFP_NOFS); BUG_ON(!name); /* -ENOMEM */ rcu_assign_pointer(new_device->name, name); } list_replace_rcu(&device->dev_list, &new_device->dev_list); new_device->fs_devices = device->fs_devices; call_rcu(&device->rcu, free_device_rcu); } static int close_fs_devices(struct btrfs_fs_devices *fs_devices) { struct btrfs_device *device, *tmp; if (--fs_devices->opened > 0) return 0; mutex_lock(&fs_devices->device_list_mutex); list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) { btrfs_close_one_device(device); } mutex_unlock(&fs_devices->device_list_mutex); WARN_ON(fs_devices->open_devices); WARN_ON(fs_devices->rw_devices); fs_devices->opened = 0; fs_devices->seeding = 0; return 0; } int btrfs_close_devices(struct btrfs_fs_devices *fs_devices) { struct btrfs_fs_devices *seed_devices = NULL; int ret; mutex_lock(&uuid_mutex); ret = close_fs_devices(fs_devices); if (!fs_devices->opened) { seed_devices = fs_devices->seed; fs_devices->seed = NULL; } mutex_unlock(&uuid_mutex); while (seed_devices) { fs_devices = seed_devices; seed_devices = fs_devices->seed; close_fs_devices(fs_devices); free_fs_devices(fs_devices); } return ret; } static int open_fs_devices(struct btrfs_fs_devices *fs_devices, fmode_t flags, void *holder) { struct btrfs_device *device; struct btrfs_device *latest_dev = NULL; int ret = 0; flags |= FMODE_EXCL; list_for_each_entry(device, &fs_devices->devices, dev_list) { /* Just open everything we can; ignore failures here */ if (btrfs_open_one_device(fs_devices, device, flags, holder)) continue; if (!latest_dev || device->generation > latest_dev->generation) latest_dev = device; } if (fs_devices->open_devices == 0) { ret = -EINVAL; goto out; } fs_devices->opened = 1; fs_devices->latest_bdev = latest_dev->bdev; fs_devices->total_rw_bytes = 0; out: return ret; } static int devid_cmp(void *priv, struct list_head *a, struct list_head *b) { struct btrfs_device *dev1, *dev2; dev1 = list_entry(a, struct btrfs_device, dev_list); dev2 = list_entry(b, struct btrfs_device, dev_list); if (dev1->devid < dev2->devid) return -1; else if (dev1->devid > dev2->devid) return 1; return 0; } int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, fmode_t flags, void *holder) { int ret; lockdep_assert_held(&uuid_mutex); mutex_lock(&fs_devices->device_list_mutex); if (fs_devices->opened) { fs_devices->opened++; ret = 0; } else { list_sort(NULL, &fs_devices->devices, devid_cmp); ret = open_fs_devices(fs_devices, flags, holder); } mutex_unlock(&fs_devices->device_list_mutex); return ret; } static void btrfs_release_disk_super(struct page *page) { kunmap(page); put_page(page); } static int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr, struct page **page, struct btrfs_super_block **disk_super) { void *p; pgoff_t index; /* make sure our super fits in the device */ if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode)) return 1; /* make sure our super fits in the page */ if (sizeof(**disk_super) > PAGE_SIZE) return 1; /* make sure our super doesn't straddle pages on disk */ index = bytenr >> PAGE_SHIFT; if ((bytenr + sizeof(**disk_super) - 1) >> PAGE_SHIFT != index) return 1; /* pull in the page with our super */ *page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL); if (IS_ERR_OR_NULL(*page)) return 1; p = kmap(*page); /* align our pointer to the offset of the super block */ *disk_super = p + offset_in_page(bytenr); if (btrfs_super_bytenr(*disk_super) != bytenr || btrfs_super_magic(*disk_super) != BTRFS_MAGIC) { btrfs_release_disk_super(*page); return 1; } if ((*disk_super)->label[0] && (*disk_super)->label[BTRFS_LABEL_SIZE - 1]) (*disk_super)->label[BTRFS_LABEL_SIZE - 1] = '\0'; return 0; } /* * Look for a btrfs signature on a device. This may be called out of the mount path * and we are not allowed to call set_blocksize during the scan. The superblock * is read via pagecache */ struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags, void *holder) { struct btrfs_super_block *disk_super; bool new_device_added = false; struct btrfs_device *device = NULL; struct block_device *bdev; struct page *page; u64 bytenr; lockdep_assert_held(&uuid_mutex); /* * we would like to check all the supers, but that would make * a btrfs mount succeed after a mkfs from a different FS. * So, we need to add a special mount option to scan for * later supers, using BTRFS_SUPER_MIRROR_MAX instead */ bytenr = btrfs_sb_offset(0); flags |= FMODE_EXCL; bdev = blkdev_get_by_path(path, flags, holder); if (IS_ERR(bdev)) return ERR_CAST(bdev); if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super)) { device = ERR_PTR(-EINVAL); goto error_bdev_put; } device = device_list_add(path, disk_super, &new_device_added); if (!IS_ERR(device)) { if (new_device_added) btrfs_free_stale_devices(path, device); } btrfs_release_disk_super(page); error_bdev_put: blkdev_put(bdev, flags); return device; } static int contains_pending_extent(struct btrfs_transaction *transaction, struct btrfs_device *device, u64 *start, u64 len) { struct btrfs_fs_info *fs_info = device->fs_info; struct extent_map *em; struct list_head *search_list = &fs_info->pinned_chunks; int ret = 0; u64 physical_start = *start; if (transaction) search_list = &transaction->pending_chunks; again: list_for_each_entry(em, search_list, list) { struct map_lookup *map; int i; map = em->map_lookup; for (i = 0; i < map->num_stripes; i++) { u64 end; if (map->stripes[i].dev != device) continue; if (map->stripes[i].physical >= physical_start + len || map->stripes[i].physical + em->orig_block_len <= physical_start) continue; /* * Make sure that while processing the pinned list we do * not override our *start with a lower value, because * we can have pinned chunks that fall within this * device hole and that have lower physical addresses * than the pending chunks we processed before. If we * do not take this special care we can end up getting * 2 pending chunks that start at the same physical * device offsets because the end offset of a pinned * chunk can be equal to the start offset of some * pending chunk. */ end = map->stripes[i].physical + em->orig_block_len; if (end > *start) { *start = end; ret = 1; } } } if (search_list != &fs_info->pinned_chunks) { search_list = &fs_info->pinned_chunks; goto again; } return ret; } /* * find_free_dev_extent_start - find free space in the specified device * @device: the device which we search the free space in * @num_bytes: the size of the free space that we need * @search_start: the position from which to begin the search * @start: store the start of the free space. * @len: the size of the free space. that we find, or the size * of the max free space if we don't find suitable free space * * this uses a pretty simple search, the expectation is that it is * called very infrequently and that a given device has a small number * of extents * * @start is used to store the start of the free space if we find. But if we * don't find suitable free space, it will be used to store the start position * of the max free space. * * @len is used to store the size of the free space that we find. * But if we don't find suitable free space, it is used to store the size of * the max free space. */ int find_free_dev_extent_start(struct btrfs_transaction *transaction, struct btrfs_device *device, u64 num_bytes, u64 search_start, u64 *start, u64 *len) { struct btrfs_fs_info *fs_info = device->fs_info; struct btrfs_root *root = fs_info->dev_root; struct btrfs_key key; struct btrfs_dev_extent *dev_extent; struct btrfs_path *path; u64 hole_size; u64 max_hole_start; u64 max_hole_size; u64 extent_end; u64 search_end = device->total_bytes; int ret; int slot; struct extent_buffer *l; /* * We don't want to overwrite the superblock on the drive nor any area * used by the boot loader (grub for example), so we make sure to start * at an offset of at least 1MB. */ search_start = max_t(u64, search_start, SZ_1M); path = btrfs_alloc_path(); if (!path) return -ENOMEM; max_hole_start = search_start; max_hole_size = 0; again: if (search_start >= search_end || test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { ret = -ENOSPC; goto out; } path->reada = READA_FORWARD; path->search_commit_root = 1; path->skip_locking = 1; key.objectid = device->devid; key.offset = search_start; key.type = BTRFS_DEV_EXTENT_KEY; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; if (ret > 0) { ret = btrfs_previous_item(root, path, key.objectid, key.type); if (ret < 0) goto out; } while (1) { l = path->nodes[0]; slot = path->slots[0]; if (slot >= btrfs_header_nritems(l)) { ret = btrfs_next_leaf(root, path); if (ret == 0) continue; if (ret < 0) goto out; break; } btrfs_item_key_to_cpu(l, &key, slot); if (key.objectid < device->devid) goto next; if (key.objectid > device->devid) break; if (key.type != BTRFS_DEV_EXTENT_KEY) goto next; if (key.offset > search_start) { hole_size = key.offset - search_start; /* * Have to check before we set max_hole_start, otherwise * we could end up sending back this offset anyway. */ if (contains_pending_extent(transaction, device, &search_start, hole_size)) { if (key.offset >= search_start) { hole_size = key.offset - search_start; } else { WARN_ON_ONCE(1); hole_size = 0; } } if (hole_size > max_hole_size) { max_hole_start = search_start; max_hole_size = hole_size; } /* * If this free space is greater than which we need, * it must be the max free space that we have found * until now, so max_hole_start must point to the start * of this free space and the length of this free space * is stored in max_hole_size. Thus, we return * max_hole_start and max_hole_size and go back to the * caller. */ if (hole_size >= num_bytes) { ret = 0; goto out; } } dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); extent_end = key.offset + btrfs_dev_extent_length(l, dev_extent); if (extent_end > search_start) search_start = extent_end; next: path->slots[0]++; cond_resched(); } /* * At this point, search_start should be the end of * allocated dev extents, and when shrinking the device, * search_end may be smaller than search_start. */ if (search_end > search_start) { hole_size = search_end - search_start; if (contains_pending_extent(transaction, device, &search_start, hole_size)) { btrfs_release_path(path); goto again; } if (hole_size > max_hole_size) { max_hole_start = search_start; max_hole_size = hole_size; } } /* See above. */ if (max_hole_size < num_bytes) ret = -ENOSPC; else ret = 0; out: btrfs_free_path(path); *start = max_hole_start; if (len) *len = max_hole_size; return ret; } int find_free_dev_extent(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 num_bytes, u64 *start, u64 *len) { /* FIXME use last free of some kind */ return find_free_dev_extent_start(trans->transaction, device, num_bytes, 0, start, len); } static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 start, u64 *dev_extent_len) { struct btrfs_fs_info *fs_info = device->fs_info; struct btrfs_root *root = fs_info->dev_root; int ret; struct btrfs_path *path; struct btrfs_key key; struct btrfs_key found_key; struct extent_buffer *leaf = NULL; struct btrfs_dev_extent *extent = NULL; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = device->devid; key.offset = start; key.type = BTRFS_DEV_EXTENT_KEY; again: ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret > 0) { ret = btrfs_previous_item(root, path, key.objectid, BTRFS_DEV_EXTENT_KEY); if (ret) goto out; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); BUG_ON(found_key.offset > start || found_key.offset + btrfs_dev_extent_length(leaf, extent) < start); key = found_key; btrfs_release_path(path); goto again; } else if (ret == 0) { leaf = path->nodes[0]; extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); } else { btrfs_handle_fs_error(fs_info, ret, "Slot search failed"); goto out; } *dev_extent_len = btrfs_dev_extent_length(leaf, extent); ret = btrfs_del_item(trans, root, path); if (ret) { btrfs_handle_fs_error(fs_info, ret, "Failed to remove dev extent item"); } else { set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); } out: btrfs_free_path(path); return ret; } static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 chunk_offset, u64 start, u64 num_bytes) { int ret; struct btrfs_path *path; struct btrfs_fs_info *fs_info = device->fs_info; struct btrfs_root *root = fs_info->dev_root; struct btrfs_dev_extent *extent; struct extent_buffer *leaf; struct btrfs_key key; WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)); WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = device->devid; key.offset = start; key.type = BTRFS_DEV_EXTENT_KEY; ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*extent)); if (ret) goto out; leaf = path->nodes[0]; extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); btrfs_set_dev_extent_chunk_tree(leaf, extent, BTRFS_CHUNK_TREE_OBJECTID); btrfs_set_dev_extent_chunk_objectid(leaf, extent, BTRFS_FIRST_CHUNK_TREE_OBJECTID); btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset); btrfs_set_dev_extent_length(leaf, extent, num_bytes); btrfs_mark_buffer_dirty(leaf); out: btrfs_free_path(path); return ret; } static u64 find_next_chunk(struct btrfs_fs_info *fs_info) { struct extent_map_tree *em_tree; struct extent_map *em; struct rb_node *n; u64 ret = 0; em_tree = &fs_info->mapping_tree.map_tree; read_lock(&em_tree->lock); n = rb_last(&em_tree->map.rb_root); if (n) { em = rb_entry(n, struct extent_map, rb_node); ret = em->start + em->len; } read_unlock(&em_tree->lock); return ret; } static noinline int find_next_devid(struct btrfs_fs_info *fs_info, u64 *devid_ret) { int ret; struct btrfs_key key; struct btrfs_key found_key; struct btrfs_path *path; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.type = BTRFS_DEV_ITEM_KEY; key.offset = (u64)-1; ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0); if (ret < 0) goto error; BUG_ON(ret == 0); /* Corruption */ ret = btrfs_previous_item(fs_info->chunk_root, path, BTRFS_DEV_ITEMS_OBJECTID, BTRFS_DEV_ITEM_KEY); if (ret) { *devid_ret = 1; } else { btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); *devid_ret = found_key.offset + 1; } ret = 0; error: btrfs_free_path(path); return ret; } /* * the device information is stored in the chunk root * the btrfs_device struct should be fully filled in */ static int btrfs_add_dev_item(struct btrfs_trans_handle *trans, struct btrfs_device *device) { int ret; struct btrfs_path *path; struct btrfs_dev_item *dev_item; struct extent_buffer *leaf; struct btrfs_key key; unsigned long ptr; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.type = BTRFS_DEV_ITEM_KEY; key.offset = device->devid; ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path, &key, sizeof(*dev_item)); if (ret) goto out; leaf = path->nodes[0]; dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); btrfs_set_device_id(leaf, dev_item, device->devid); btrfs_set_device_generation(leaf, dev_item, 0); btrfs_set_device_type(leaf, dev_item, device->type); btrfs_set_device_io_align(leaf, dev_item, device->io_align); btrfs_set_device_io_width(leaf, dev_item, device->io_width); btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); btrfs_set_device_total_bytes(leaf, dev_item, btrfs_device_get_disk_total_bytes(device)); btrfs_set_device_bytes_used(leaf, dev_item, btrfs_device_get_bytes_used(device)); btrfs_set_device_group(leaf, dev_item, 0); btrfs_set_device_seek_speed(leaf, dev_item, 0); btrfs_set_device_bandwidth(leaf, dev_item, 0); btrfs_set_device_start_offset(leaf, dev_item, 0); ptr = btrfs_device_uuid(dev_item); write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); ptr = btrfs_device_fsid(dev_item); write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid, ptr, BTRFS_FSID_SIZE); btrfs_mark_buffer_dirty(leaf); ret = 0; out: btrfs_free_path(path); return ret; } /* * Function to update ctime/mtime for a given device path. * Mainly used for ctime/mtime based probe like libblkid. */ static void update_dev_time(const char *path_name) { struct file *filp; filp = filp_open(path_name, O_RDWR, 0); if (IS_ERR(filp)) return; file_update_time(filp); filp_close(filp, NULL); } static int btrfs_rm_dev_item(struct btrfs_fs_info *fs_info, struct btrfs_device *device) { struct btrfs_root *root = fs_info->chunk_root; int ret; struct btrfs_path *path; struct btrfs_key key; struct btrfs_trans_handle *trans; path = btrfs_alloc_path(); if (!path) return -ENOMEM; trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { btrfs_free_path(path); return PTR_ERR(trans); } key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.type = BTRFS_DEV_ITEM_KEY; key.offset = device->devid; ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret) { if (ret > 0) ret = -ENOENT; btrfs_abort_transaction(trans, ret); btrfs_end_transaction(trans); goto out; } ret = btrfs_del_item(trans, root, path); if (ret) { btrfs_abort_transaction(trans, ret); btrfs_end_transaction(trans); } out: btrfs_free_path(path); if (!ret) ret = btrfs_commit_transaction(trans); return ret; } /* * Verify that @num_devices satisfies the RAID profile constraints in the whole * filesystem. It's up to the caller to adjust that number regarding eg. device * replace. */ static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info, u64 num_devices) { u64 all_avail; unsigned seq; int i; do { seq = read_seqbegin(&fs_info->profiles_lock); all_avail = fs_info->avail_data_alloc_bits | fs_info->avail_system_alloc_bits | fs_info->avail_metadata_alloc_bits; } while (read_seqretry(&fs_info->profiles_lock, seq)); for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { if (!(all_avail & btrfs_raid_array[i].bg_flag)) continue; if (num_devices < btrfs_raid_array[i].devs_min) { int ret = btrfs_raid_array[i].mindev_error; if (ret) return ret; } } return 0; } static struct btrfs_device * btrfs_find_next_active_device( struct btrfs_fs_devices *fs_devs, struct btrfs_device *device) { struct btrfs_device *next_device; list_for_each_entry(next_device, &fs_devs->devices, dev_list) { if (next_device != device && !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state) && next_device->bdev) return next_device; } return NULL; } /* * Helper function to check if the given device is part of s_bdev / latest_bdev * and replace it with the provided or the next active device, in the context * where this function called, there should be always be another device (or * this_dev) which is active. */ void btrfs_assign_next_active_device(struct btrfs_device *device, struct btrfs_device *this_dev) { struct btrfs_fs_info *fs_info = device->fs_info; struct btrfs_device *next_device; if (this_dev) next_device = this_dev; else next_device = btrfs_find_next_active_device(fs_info->fs_devices, device); ASSERT(next_device); if (fs_info->sb->s_bdev && (fs_info->sb->s_bdev == device->bdev)) fs_info->sb->s_bdev = next_device->bdev; if (fs_info->fs_devices->latest_bdev == device->bdev) fs_info->fs_devices->latest_bdev = next_device->bdev; } /* * Return btrfs_fs_devices::num_devices excluding the device that's being * currently replaced. */ static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info) { u64 num_devices = fs_info->fs_devices->num_devices; down_read(&fs_info->dev_replace.rwsem); if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) { ASSERT(num_devices > 1); num_devices--; } up_read(&fs_info->dev_replace.rwsem); return num_devices; } int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path, u64 devid) { struct btrfs_device *device; struct btrfs_fs_devices *cur_devices; struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; u64 num_devices; int ret = 0; mutex_lock(&uuid_mutex); num_devices = btrfs_num_devices(fs_info); ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1); if (ret) goto out; device = btrfs_find_device_by_devspec(fs_info, devid, device_path); if (IS_ERR(device)) { if (PTR_ERR(device) == -ENOENT && strcmp(device_path, "missing") == 0) ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND; else ret = PTR_ERR(device); goto out; } if (btrfs_pinned_by_swapfile(fs_info, device)) { btrfs_warn_in_rcu(fs_info, "cannot remove device %s (devid %llu) due to active swapfile", rcu_str_deref(device->name), device->devid); ret = -ETXTBSY; goto out; } if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { ret = BTRFS_ERROR_DEV_TGT_REPLACE; goto out; } if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && fs_info->fs_devices->rw_devices == 1) { ret = BTRFS_ERROR_DEV_ONLY_WRITABLE; goto out; } if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { mutex_lock(&fs_info->chunk_mutex); list_del_init(&device->dev_alloc_list); device->fs_devices->rw_devices--; mutex_unlock(&fs_info->chunk_mutex); } mutex_unlock(&uuid_mutex); ret = btrfs_shrink_device(device, 0); mutex_lock(&uuid_mutex); if (ret) goto error_undo; /* * TODO: the superblock still includes this device in its num_devices * counter although write_all_supers() is not locked out. This * could give a filesystem state which requires a degraded mount. */ ret = btrfs_rm_dev_item(fs_info, device); if (ret) goto error_undo; clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); btrfs_scrub_cancel_dev(fs_info, device); /* * the device list mutex makes sure that we don't change * the device list while someone else is writing out all * the device supers. Whoever is writing all supers, should * lock the device list mutex before getting the number of * devices in the super block (super_copy). Conversely, * whoever updates the number of devices in the super block * (super_copy) should hold the device list mutex. */ /* * In normal cases the cur_devices == fs_devices. But in case * of deleting a seed device, the cur_devices should point to * its own fs_devices listed under the fs_devices->seed. */ cur_devices = device->fs_devices; mutex_lock(&fs_devices->device_list_mutex); list_del_rcu(&device->dev_list); cur_devices->num_devices--; cur_devices->total_devices--; /* Update total_devices of the parent fs_devices if it's seed */ if (cur_devices != fs_devices) fs_devices->total_devices--; if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) cur_devices->missing_devices--; btrfs_assign_next_active_device(device, NULL); if (device->bdev) { cur_devices->open_devices--; /* remove sysfs entry */ btrfs_sysfs_rm_device_link(fs_devices, device); } num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1; btrfs_set_super_num_devices(fs_info->super_copy, num_devices); mutex_unlock(&fs_devices->device_list_mutex); /* * at this point, the device is zero sized and detached from * the devices list. All that's left is to zero out the old * supers and free the device. */ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) btrfs_scratch_superblocks(device->bdev, device->name->str); btrfs_close_bdev(device); call_rcu(&device->rcu, free_device_rcu); if (cur_devices->open_devices == 0) { while (fs_devices) { if (fs_devices->seed == cur_devices) { fs_devices->seed = cur_devices->seed; break; } fs_devices = fs_devices->seed; } cur_devices->seed = NULL; close_fs_devices(cur_devices); free_fs_devices(cur_devices); } out: mutex_unlock(&uuid_mutex); return ret; error_undo: if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { mutex_lock(&fs_info->chunk_mutex); list_add(&device->dev_alloc_list, &fs_devices->alloc_list); device->fs_devices->rw_devices++; mutex_unlock(&fs_info->chunk_mutex); } goto out; } void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev) { struct btrfs_fs_devices *fs_devices; lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex); /* * in case of fs with no seed, srcdev->fs_devices will point * to fs_devices of fs_info. However when the dev being replaced is * a seed dev it will point to the seed's local fs_devices. In short * srcdev will have its correct fs_devices in both the cases. */ fs_devices = srcdev->fs_devices; list_del_rcu(&srcdev->dev_list); list_del(&srcdev->dev_alloc_list); fs_devices->num_devices--; if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state)) fs_devices->missing_devices--; if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) fs_devices->rw_devices--; if (srcdev->bdev) fs_devices->open_devices--; } void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info, struct btrfs_device *srcdev) { struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) { /* zero out the old super if it is writable */ btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str); } btrfs_close_bdev(srcdev); call_rcu(&srcdev->rcu, free_device_rcu); /* if this is no devs we rather delete the fs_devices */ if (!fs_devices->num_devices) { struct btrfs_fs_devices *tmp_fs_devices; /* * On a mounted FS, num_devices can't be zero unless it's a * seed. In case of a seed device being replaced, the replace * target added to the sprout FS, so there will be no more * device left under the seed FS. */ ASSERT(fs_devices->seeding); tmp_fs_devices = fs_info->fs_devices; while (tmp_fs_devices) { if (tmp_fs_devices->seed == fs_devices) { tmp_fs_devices->seed = fs_devices->seed; break; } tmp_fs_devices = tmp_fs_devices->seed; } fs_devices->seed = NULL; close_fs_devices(fs_devices); free_fs_devices(fs_devices); } } void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev) { struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices; WARN_ON(!tgtdev); mutex_lock(&fs_devices->device_list_mutex); btrfs_sysfs_rm_device_link(fs_devices, tgtdev); if (tgtdev->bdev) fs_devices->open_devices--; fs_devices->num_devices--; btrfs_assign_next_active_device(tgtdev, NULL); list_del_rcu(&tgtdev->dev_list); mutex_unlock(&fs_devices->device_list_mutex); /* * The update_dev_time() with in btrfs_scratch_superblocks() * may lead to a call to btrfs_show_devname() which will try * to hold device_list_mutex. And here this device * is already out of device list, so we don't have to hold * the device_list_mutex lock. */ btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str); btrfs_close_bdev(tgtdev); call_rcu(&tgtdev->rcu, free_device_rcu); } static struct btrfs_device *btrfs_find_device_by_path( struct btrfs_fs_info *fs_info, const char *device_path) { int ret = 0; struct btrfs_super_block *disk_super; u64 devid; u8 *dev_uuid; struct block_device *bdev; struct buffer_head *bh; struct btrfs_device *device; ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ, fs_info->bdev_holder, 0, &bdev, &bh); if (ret) return ERR_PTR(ret); disk_super = (struct btrfs_super_block *)bh->b_data; devid = btrfs_stack_device_id(&disk_super->dev_item); dev_uuid = disk_super->dev_item.uuid; if (btrfs_fs_incompat(fs_info, METADATA_UUID)) device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid, disk_super->metadata_uuid, true); else device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid, disk_super->fsid, true); brelse(bh); if (!device) device = ERR_PTR(-ENOENT); blkdev_put(bdev, FMODE_READ); return device; } /* * Lookup a device given by device id, or the path if the id is 0. */ struct btrfs_device *btrfs_find_device_by_devspec( struct btrfs_fs_info *fs_info, u64 devid, const char *device_path) { struct btrfs_device *device; if (devid) { device = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true); if (!device) return ERR_PTR(-ENOENT); return device; } if (!device_path || !device_path[0]) return ERR_PTR(-EINVAL); if (strcmp(device_path, "missing") == 0) { /* Find first missing device */ list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) { if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) && !device->bdev) return device; } return ERR_PTR(-ENOENT); } return btrfs_find_device_by_path(fs_info, device_path); } /* * does all the dirty work required for changing file system's UUID. */ static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info) { struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; struct btrfs_fs_devices *old_devices; struct btrfs_fs_devices *seed_devices; struct btrfs_super_block *disk_super = fs_info->super_copy; struct btrfs_device *device; u64 super_flags; lockdep_assert_held(&uuid_mutex); if (!fs_devices->seeding) return -EINVAL; seed_devices = alloc_fs_devices(NULL, NULL); if (IS_ERR(seed_devices)) return PTR_ERR(seed_devices); old_devices = clone_fs_devices(fs_devices); if (IS_ERR(old_devices)) { kfree(seed_devices); return PTR_ERR(old_devices); } list_add(&old_devices->fs_list, &fs_uuids); memcpy(seed_devices, fs_devices, sizeof(*seed_devices)); seed_devices->opened = 1; INIT_LIST_HEAD(&seed_devices->devices); INIT_LIST_HEAD(&seed_devices->alloc_list); mutex_init(&seed_devices->device_list_mutex); mutex_lock(&fs_devices->device_list_mutex); list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, synchronize_rcu); list_for_each_entry(device, &seed_devices->devices, dev_list) device->fs_devices = seed_devices; mutex_lock(&fs_info->chunk_mutex); list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list); mutex_unlock(&fs_info->chunk_mutex); fs_devices->seeding = 0; fs_devices->num_devices = 0; fs_devices->open_devices = 0; fs_devices->missing_devices = 0; fs_devices->rotating = 0; fs_devices->seed = seed_devices; generate_random_uuid(fs_devices->fsid); memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE); memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE); mutex_unlock(&fs_devices->device_list_mutex); super_flags = btrfs_super_flags(disk_super) & ~BTRFS_SUPER_FLAG_SEEDING; btrfs_set_super_flags(disk_super, super_flags); return 0; } /* * Store the expected generation for seed devices in device items. */ static int btrfs_finish_sprout(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info) { struct btrfs_root *root = fs_info->chunk_root; struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_dev_item *dev_item; struct btrfs_device *device; struct btrfs_key key; u8 fs_uuid[BTRFS_FSID_SIZE]; u8 dev_uuid[BTRFS_UUID_SIZE]; u64 devid; int ret; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.offset = 0; key.type = BTRFS_DEV_ITEM_KEY; while (1) { ret = btrfs_search_slot(trans, root, &key, path, 0, 1); if (ret < 0) goto error; leaf = path->nodes[0]; next_slot: if (path->slots[0] >= btrfs_header_nritems(leaf)) { ret = btrfs_next_leaf(root, path); if (ret > 0) break; if (ret < 0) goto error; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); btrfs_release_path(path); continue; } btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID || key.type != BTRFS_DEV_ITEM_KEY) break; dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); devid = btrfs_device_id(leaf, dev_item); read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), BTRFS_UUID_SIZE); read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), BTRFS_FSID_SIZE); device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid, fs_uuid, true); BUG_ON(!device); /* Logic error */ if (device->fs_devices->seeding) { btrfs_set_device_generation(leaf, dev_item, device->generation); btrfs_mark_buffer_dirty(leaf); } path->slots[0]++; goto next_slot; } ret = 0; error: btrfs_free_path(path); return ret; } int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path) { struct btrfs_root *root = fs_info->dev_root; struct request_queue *q; struct btrfs_trans_handle *trans; struct btrfs_device *device; struct block_device *bdev; struct super_block *sb = fs_info->sb; struct rcu_string *name; struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; u64 orig_super_total_bytes; u64 orig_super_num_devices; int seeding_dev = 0; int ret = 0; bool unlocked = false; if (sb_rdonly(sb) && !fs_devices->seeding) return -EROFS; bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, fs_info->bdev_holder); if (IS_ERR(bdev)) return PTR_ERR(bdev); if (fs_devices->seeding) { seeding_dev = 1; down_write(&sb->s_umount); mutex_lock(&uuid_mutex); } filemap_write_and_wait(bdev->bd_inode->i_mapping); mutex_lock(&fs_devices->device_list_mutex); list_for_each_entry(device, &fs_devices->devices, dev_list) { if (device->bdev == bdev) { ret = -EEXIST; mutex_unlock( &fs_devices->device_list_mutex); goto error; } } mutex_unlock(&fs_devices->device_list_mutex); device = btrfs_alloc_device(fs_info, NULL, NULL); if (IS_ERR(device)) { /* we can safely leave the fs_devices entry around */ ret = PTR_ERR(device); goto error; } name = rcu_string_strdup(device_path, GFP_KERNEL); if (!name) { ret = -ENOMEM; goto error_free_device; } rcu_assign_pointer(device->name, name); trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto error_free_device; } q = bdev_get_queue(bdev); set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); device->generation = trans->transid; device->io_width = fs_info->sectorsize; device->io_align = fs_info->sectorsize; device->sector_size = fs_info->sectorsize; device->total_bytes = round_down(i_size_read(bdev->bd_inode), fs_info->sectorsize); device->disk_total_bytes = device->total_bytes; device->commit_total_bytes = device->total_bytes; device->fs_info = fs_info; device->bdev = bdev; set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); device->mode = FMODE_EXCL; device->dev_stats_valid = 1; set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); if (seeding_dev) { sb->s_flags &= ~SB_RDONLY; ret = btrfs_prepare_sprout(fs_info); if (ret) { btrfs_abort_transaction(trans, ret); goto error_trans; } } device->fs_devices = fs_devices; mutex_lock(&fs_devices->device_list_mutex); mutex_lock(&fs_info->chunk_mutex); list_add_rcu(&device->dev_list, &fs_devices->devices); list_add(&device->dev_alloc_list, &fs_devices->alloc_list); fs_devices->num_devices++; fs_devices->open_devices++; fs_devices->rw_devices++; fs_devices->total_devices++; fs_devices->total_rw_bytes += device->total_bytes; atomic64_add(device->total_bytes, &fs_info->free_chunk_space); if (!blk_queue_nonrot(q)) fs_devices->rotating = 1; orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy); btrfs_set_super_total_bytes(fs_info->super_copy, round_down(orig_super_total_bytes + device->total_bytes, fs_info->sectorsize)); orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy); btrfs_set_super_num_devices(fs_info->super_copy, orig_super_num_devices + 1); /* add sysfs device entry */ btrfs_sysfs_add_device_link(fs_devices, device); /* * we've got more storage, clear any full flags on the space * infos */ btrfs_clear_space_info_full(fs_info); mutex_unlock(&fs_info->chunk_mutex); mutex_unlock(&fs_devices->device_list_mutex); if (seeding_dev) { mutex_lock(&fs_info->chunk_mutex); ret = init_first_rw_device(trans, fs_info); mutex_unlock(&fs_info->chunk_mutex); if (ret) { btrfs_abort_transaction(trans, ret); goto error_sysfs; } } ret = btrfs_add_dev_item(trans, device); if (ret) { btrfs_abort_transaction(trans, ret); goto error_sysfs; } if (seeding_dev) { char fsid_buf[BTRFS_UUID_UNPARSED_SIZE]; ret = btrfs_finish_sprout(trans, fs_info); if (ret) { btrfs_abort_transaction(trans, ret); goto error_sysfs; } /* Sprouting would change fsid of the mounted root, * so rename the fsid on the sysfs */ snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU", fs_info->fs_devices->fsid); if (kobject_rename(&fs_devices->fsid_kobj, fsid_buf)) btrfs_warn(fs_info, "sysfs: failed to create fsid for sprout"); } ret = btrfs_commit_transaction(trans); if (seeding_dev) { mutex_unlock(&uuid_mutex); up_write(&sb->s_umount); unlocked = true; if (ret) /* transaction commit */ return ret; ret = btrfs_relocate_sys_chunks(fs_info); if (ret < 0) btrfs_handle_fs_error(fs_info, ret, "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command."); trans = btrfs_attach_transaction(root); if (IS_ERR(trans)) { if (PTR_ERR(trans) == -ENOENT) return 0; ret = PTR_ERR(trans); trans = NULL; goto error_sysfs; } ret = btrfs_commit_transaction(trans); } /* Update ctime/mtime for libblkid */ update_dev_time(device_path); return ret; error_sysfs: btrfs_sysfs_rm_device_link(fs_devices, device); mutex_lock(&fs_info->fs_devices->device_list_mutex); mutex_lock(&fs_info->chunk_mutex); list_del_rcu(&device->dev_list); list_del(&device->dev_alloc_list); fs_info->fs_devices->num_devices--; fs_info->fs_devices->open_devices--; fs_info->fs_devices->rw_devices--; fs_info->fs_devices->total_devices--; fs_info->fs_devices->total_rw_bytes -= device->total_bytes; atomic64_sub(device->total_bytes, &fs_info->free_chunk_space); btrfs_set_super_total_bytes(fs_info->super_copy, orig_super_total_bytes); btrfs_set_super_num_devices(fs_info->super_copy, orig_super_num_devices); mutex_unlock(&fs_info->chunk_mutex); mutex_unlock(&fs_info->fs_devices->device_list_mutex); error_trans: if (seeding_dev) sb->s_flags |= SB_RDONLY; if (trans) btrfs_end_transaction(trans); error_free_device: btrfs_free_device(device); error: blkdev_put(bdev, FMODE_EXCL); if (seeding_dev && !unlocked) { mutex_unlock(&uuid_mutex); up_write(&sb->s_umount); } return ret; } static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, struct btrfs_device *device) { int ret; struct btrfs_path *path; struct btrfs_root *root = device->fs_info->chunk_root; struct btrfs_dev_item *dev_item; struct extent_buffer *leaf; struct btrfs_key key; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.type = BTRFS_DEV_ITEM_KEY; key.offset = device->devid; ret = btrfs_search_slot(trans, root, &key, path, 0, 1); if (ret < 0) goto out; if (ret > 0) { ret = -ENOENT; goto out; } leaf = path->nodes[0]; dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); btrfs_set_device_id(leaf, dev_item, device->devid); btrfs_set_device_type(leaf, dev_item, device->type); btrfs_set_device_io_align(leaf, dev_item, device->io_align); btrfs_set_device_io_width(leaf, dev_item, device->io_width); btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); btrfs_set_device_total_bytes(leaf, dev_item, btrfs_device_get_disk_total_bytes(device)); btrfs_set_device_bytes_used(leaf, dev_item, btrfs_device_get_bytes_used(device)); btrfs_mark_buffer_dirty(leaf); out: btrfs_free_path(path); return ret; } int btrfs_grow_device(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 new_size) { struct btrfs_fs_info *fs_info = device->fs_info; struct btrfs_super_block *super_copy = fs_info->super_copy; struct btrfs_fs_devices *fs_devices; u64 old_total; u64 diff; if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) return -EACCES; new_size = round_down(new_size, fs_info->sectorsize); mutex_lock(&fs_info->chunk_mutex); old_total = btrfs_super_total_bytes(super_copy); diff = round_down(new_size - device->total_bytes, fs_info->sectorsize); if (new_size <= device->total_bytes || test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { mutex_unlock(&fs_info->chunk_mutex); return -EINVAL; } fs_devices = fs_info->fs_devices; btrfs_set_super_total_bytes(super_copy, round_down(old_total + diff, fs_info->sectorsize)); device->fs_devices->total_rw_bytes += diff; btrfs_device_set_total_bytes(device, new_size); btrfs_device_set_disk_total_bytes(device, new_size); btrfs_clear_space_info_full(device->fs_info); if (list_empty(&device->resized_list)) list_add_tail(&device->resized_list, &fs_devices->resized_devices); mutex_unlock(&fs_info->chunk_mutex); return btrfs_update_device(trans, device); } static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_root *root = fs_info->chunk_root; int ret; struct btrfs_path *path; struct btrfs_key key; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; key.offset = chunk_offset; key.type = BTRFS_CHUNK_ITEM_KEY; ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret < 0) goto out; else if (ret > 0) { /* Logic error or corruption */ btrfs_handle_fs_error(fs_info, -ENOENT, "Failed lookup while freeing chunk."); ret = -ENOENT; goto out; } ret = btrfs_del_item(trans, root, path); if (ret < 0) btrfs_handle_fs_error(fs_info, ret, "Failed to delete chunk item."); out: btrfs_free_path(path); return ret; } static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) { struct btrfs_super_block *super_copy = fs_info->super_copy; struct btrfs_disk_key *disk_key; struct btrfs_chunk *chunk; u8 *ptr; int ret = 0; u32 num_stripes; u32 array_size; u32 len = 0; u32 cur; struct btrfs_key key; mutex_lock(&fs_info->chunk_mutex); array_size = btrfs_super_sys_array_size(super_copy); ptr = super_copy->sys_chunk_array; cur = 0; while (cur < array_size) { disk_key = (struct btrfs_disk_key *)ptr; btrfs_disk_key_to_cpu(&key, disk_key); len = sizeof(*disk_key); if (key.type == BTRFS_CHUNK_ITEM_KEY) { chunk = (struct btrfs_chunk *)(ptr + len); num_stripes = btrfs_stack_chunk_num_stripes(chunk); len += btrfs_chunk_item_size(num_stripes); } else { ret = -EIO; break; } if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID && key.offset == chunk_offset) { memmove(ptr, ptr + len, array_size - (cur + len)); array_size -= len; btrfs_set_super_sys_array_size(super_copy, array_size); } else { ptr += len; cur += len; } } mutex_unlock(&fs_info->chunk_mutex); return ret; } /* * btrfs_get_chunk_map() - Find the mapping containing the given logical extent. * @logical: Logical block offset in bytes. * @length: Length of extent in bytes. * * Return: Chunk mapping or ERR_PTR. */ struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, u64 logical, u64 length) { struct extent_map_tree *em_tree; struct extent_map *em; em_tree = &fs_info->mapping_tree.map_tree; read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, logical, length); read_unlock(&em_tree->lock); if (!em) { btrfs_crit(fs_info, "unable to find logical %llu length %llu", logical, length); return ERR_PTR(-EINVAL); } if (em->start > logical || em->start + em->len < logical) { btrfs_crit(fs_info, "found a bad mapping, wanted %llu-%llu, found %llu-%llu", logical, length, em->start, em->start + em->len); free_extent_map(em); return ERR_PTR(-EINVAL); } /* callers are responsible for dropping em's ref. */ return em; } int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset) { struct btrfs_fs_info *fs_info = trans->fs_info; struct extent_map *em; struct map_lookup *map; u64 dev_extent_len = 0; int i, ret = 0; struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); if (IS_ERR(em)) { /* * This is a logic error, but we don't want to just rely on the * user having built with ASSERT enabled, so if ASSERT doesn't * do anything we still error out. */ ASSERT(0); return PTR_ERR(em); } map = em->map_lookup; mutex_lock(&fs_info->chunk_mutex); check_system_chunk(trans, map->type); mutex_unlock(&fs_info->chunk_mutex); /* * Take the device list mutex to prevent races with the final phase of * a device replace operation that replaces the device object associated * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()). */ mutex_lock(&fs_devices->device_list_mutex); for (i = 0; i < map->num_stripes; i++) { struct btrfs_device *device = map->stripes[i].dev; ret = btrfs_free_dev_extent(trans, device, map->stripes[i].physical, &dev_extent_len); if (ret) { mutex_unlock(&fs_devices->device_list_mutex); btrfs_abort_transaction(trans, ret); goto out; } if (device->bytes_used > 0) { mutex_lock(&fs_info->chunk_mutex); btrfs_device_set_bytes_used(device, device->bytes_used - dev_extent_len); atomic64_add(dev_extent_len, &fs_info->free_chunk_space); btrfs_clear_space_info_full(fs_info); mutex_unlock(&fs_info->chunk_mutex); } ret = btrfs_update_device(trans, device); if (ret) { mutex_unlock(&fs_devices->device_list_mutex); btrfs_abort_transaction(trans, ret); goto out; } } mutex_unlock(&fs_devices->device_list_mutex); ret = btrfs_free_chunk(trans, chunk_offset); if (ret) { btrfs_abort_transaction(trans, ret); goto out; } trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len); if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { ret = btrfs_del_sys_chunk(fs_info, chunk_offset); if (ret) { btrfs_abort_transaction(trans, ret); goto out; } } ret = btrfs_remove_block_group(trans, chunk_offset, em); if (ret) { btrfs_abort_transaction(trans, ret); goto out; } out: /* once for us */ free_extent_map(em); return ret; } static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) { struct btrfs_root *root = fs_info->chunk_root; struct btrfs_trans_handle *trans; int ret; /* * Prevent races with automatic removal of unused block groups. * After we relocate and before we remove the chunk with offset * chunk_offset, automatic removal of the block group can kick in, * resulting in a failure when calling btrfs_remove_chunk() below. * * Make sure to acquire this mutex before doing a tree search (dev * or chunk trees) to find chunks. Otherwise the cleaner kthread might * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after * we release the path used to search the chunk/dev tree and before * the current task acquires this mutex and calls us. */ lockdep_assert_held(&fs_info->delete_unused_bgs_mutex); ret = btrfs_can_relocate(fs_info, chunk_offset); if (ret) return -ENOSPC; /* step one, relocate all the extents inside this chunk */ btrfs_scrub_pause(fs_info); ret = btrfs_relocate_block_group(fs_info, chunk_offset); btrfs_scrub_continue(fs_info); if (ret) return ret; /* * We add the kobjects here (and after forcing data chunk creation) * since relocation is the only place we'll create chunks of a new * type at runtime. The only place where we'll remove the last * chunk of a type is the call immediately below this one. Even * so, we're protected against races with the cleaner thread since * we're covered by the delete_unused_bgs_mutex. */ btrfs_add_raid_kobjects(fs_info); trans = btrfs_start_trans_remove_block_group(root->fs_info, chunk_offset); if (IS_ERR(trans)) { ret = PTR_ERR(trans); btrfs_handle_fs_error(root->fs_info, ret, NULL); return ret; } /* * step two, delete the device extents and the * chunk tree entries */ ret = btrfs_remove_chunk(trans, chunk_offset); btrfs_end_transaction(trans); return ret; } static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info) { struct btrfs_root *chunk_root = fs_info->chunk_root; struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_chunk *chunk; struct btrfs_key key; struct btrfs_key found_key; u64 chunk_type; bool retried = false; int failed = 0; int ret; path = btrfs_alloc_path(); if (!path) return -ENOMEM; again: key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; key.offset = (u64)-1; key.type = BTRFS_CHUNK_ITEM_KEY; while (1) { mutex_lock(&fs_info->delete_unused_bgs_mutex); ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); if (ret < 0) { mutex_unlock(&fs_info->delete_unused_bgs_mutex); goto error; } BUG_ON(ret == 0); /* Corruption */ ret = btrfs_previous_item(chunk_root, path, key.objectid, key.type); if (ret) mutex_unlock(&fs_info->delete_unused_bgs_mutex); if (ret < 0) goto error; if (ret > 0) break; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); chunk = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_chunk); chunk_type = btrfs_chunk_type(leaf, chunk); btrfs_release_path(path); if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { ret = btrfs_relocate_chunk(fs_info, found_key.offset); if (ret == -ENOSPC) failed++; else BUG_ON(ret); } mutex_unlock(&fs_info->delete_unused_bgs_mutex); if (found_key.offset == 0) break; key.offset = found_key.offset - 1; } ret = 0; if (failed && !retried) { failed = 0; retried = true; goto again; } else if (WARN_ON(failed && retried)) { ret = -ENOSPC; } error: btrfs_free_path(path); return ret; } /* * return 1 : allocate a data chunk successfully, * return <0: errors during allocating a data chunk, * return 0 : no need to allocate a data chunk. */ static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) { struct btrfs_block_group_cache *cache; u64 bytes_used; u64 chunk_type; cache = btrfs_lookup_block_group(fs_info, chunk_offset); ASSERT(cache); chunk_type = cache->flags; btrfs_put_block_group(cache); if (chunk_type & BTRFS_BLOCK_GROUP_DATA) { spin_lock(&fs_info->data_sinfo->lock); bytes_used = fs_info->data_sinfo->bytes_used; spin_unlock(&fs_info->data_sinfo->lock); if (!bytes_used) { struct btrfs_trans_handle *trans; int ret; trans = btrfs_join_transaction(fs_info->tree_root); if (IS_ERR(trans)) return PTR_ERR(trans); ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA); btrfs_end_transaction(trans); if (ret < 0) return ret; btrfs_add_raid_kobjects(fs_info); return 1; } } return 0; } static int insert_balance_item(struct btrfs_fs_info *fs_info, struct btrfs_balance_control *bctl) { struct btrfs_root *root = fs_info->tree_root; struct btrfs_trans_handle *trans; struct btrfs_balance_item *item; struct btrfs_disk_balance_args disk_bargs; struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_key key; int ret, err; path = btrfs_alloc_path(); if (!path) return -ENOMEM; trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { btrfs_free_path(path); return PTR_ERR(trans); } key.objectid = BTRFS_BALANCE_OBJECTID; key.type = BTRFS_TEMPORARY_ITEM_KEY; key.offset = 0; ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*item)); if (ret) goto out; leaf = path->nodes[0]; item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item)); btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data); btrfs_set_balance_data(leaf, item, &disk_bargs); btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta); btrfs_set_balance_meta(leaf, item, &disk_bargs); btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys); btrfs_set_balance_sys(leaf, item, &disk_bargs); btrfs_set_balance_flags(leaf, item, bctl->flags); btrfs_mark_buffer_dirty(leaf); out: btrfs_free_path(path); err = btrfs_commit_transaction(trans); if (err && !ret) ret = err; return ret; } static int del_balance_item(struct btrfs_fs_info *fs_info) { struct btrfs_root *root = fs_info->tree_root; struct btrfs_trans_handle *trans; struct btrfs_path *path; struct btrfs_key key; int ret, err; path = btrfs_alloc_path(); if (!path) return -ENOMEM; trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { btrfs_free_path(path); return PTR_ERR(trans); } key.objectid = BTRFS_BALANCE_OBJECTID; key.type = BTRFS_TEMPORARY_ITEM_KEY; key.offset = 0; ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret < 0) goto out; if (ret > 0) { ret = -ENOENT; goto out; } ret = btrfs_del_item(trans, root, path); out: btrfs_free_path(path); err = btrfs_commit_transaction(trans); if (err && !ret) ret = err; return ret; } /* * This is a heuristic used to reduce the number of chunks balanced on * resume after balance was interrupted. */ static void update_balance_args(struct btrfs_balance_control *bctl) { /* * Turn on soft mode for chunk types that were being converted. */ if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT; if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT; if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT; /* * Turn on usage filter if is not already used. The idea is * that chunks that we have already balanced should be * reasonably full. Don't do it for chunks that are being * converted - that will keep us from relocating unconverted * (albeit full) chunks. */ if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) && !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) { bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE; bctl->data.usage = 90; } if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) && !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) { bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE; bctl->sys.usage = 90; } if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) && !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) { bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE; bctl->meta.usage = 90; } } /* * Clear the balance status in fs_info and delete the balance item from disk. */ static void reset_balance_state(struct btrfs_fs_info *fs_info) { struct btrfs_balance_control *bctl = fs_info->balance_ctl; int ret; BUG_ON(!fs_info->balance_ctl); spin_lock(&fs_info->balance_lock); fs_info->balance_ctl = NULL; spin_unlock(&fs_info->balance_lock); kfree(bctl); ret = del_balance_item(fs_info); if (ret) btrfs_handle_fs_error(fs_info, ret, NULL); } /* * Balance filters. Return 1 if chunk should be filtered out * (should not be balanced). */ static int chunk_profiles_filter(u64 chunk_type, struct btrfs_balance_args *bargs) { chunk_type = chunk_to_extended(chunk_type) & BTRFS_EXTENDED_PROFILE_MASK; if (bargs->profiles & chunk_type) return 0; return 1; } static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, struct btrfs_balance_args *bargs) { struct btrfs_block_group_cache *cache; u64 chunk_used; u64 user_thresh_min; u64 user_thresh_max; int ret = 1; cache = btrfs_lookup_block_group(fs_info, chunk_offset); chunk_used = btrfs_block_group_used(&cache->item); if (bargs->usage_min == 0) user_thresh_min = 0; else user_thresh_min = div_factor_fine(cache->key.offset, bargs->usage_min); if (bargs->usage_max == 0) user_thresh_max = 1; else if (bargs->usage_max > 100) user_thresh_max = cache->key.offset; else user_thresh_max = div_factor_fine(cache->key.offset, bargs->usage_max); if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max) ret = 0; btrfs_put_block_group(cache); return ret; } static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, struct btrfs_balance_args *bargs) { struct btrfs_block_group_cache *cache; u64 chunk_used, user_thresh; int ret = 1; cache = btrfs_lookup_block_group(fs_info, chunk_offset); chunk_used = btrfs_block_group_used(&cache->item); if (bargs->usage_min == 0) user_thresh = 1; else if (bargs->usage > 100) user_thresh = cache->key.offset; else user_thresh = div_factor_fine(cache->key.offset, bargs->usage); if (chunk_used < user_thresh) ret = 0; btrfs_put_block_group(cache); return ret; } static int chunk_devid_filter(struct extent_buffer *leaf, struct btrfs_chunk *chunk, struct btrfs_balance_args *bargs) { struct btrfs_stripe *stripe; int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); int i; for (i = 0; i < num_stripes; i++) { stripe = btrfs_stripe_nr(chunk, i); if (btrfs_stripe_devid(leaf, stripe) == bargs->devid) return 0; } return 1; } /* [pstart, pend) */ static int chunk_drange_filter(struct extent_buffer *leaf, struct btrfs_chunk *chunk, struct btrfs_balance_args *bargs) { struct btrfs_stripe *stripe; int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); u64 stripe_offset; u64 stripe_length; int factor; int i; if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID)) return 0; if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) { factor = num_stripes / 2; } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) { factor = num_stripes - 1; } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) { factor = num_stripes - 2; } else { factor = num_stripes; } for (i = 0; i < num_stripes; i++) { stripe = btrfs_stripe_nr(chunk, i); if (btrfs_stripe_devid(leaf, stripe) != bargs->devid) continue; stripe_offset = btrfs_stripe_offset(leaf, stripe); stripe_length = btrfs_chunk_length(leaf, chunk); stripe_length = div_u64(stripe_length, factor); if (stripe_offset < bargs->pend && stripe_offset + stripe_length > bargs->pstart) return 0; } return 1; } /* [vstart, vend) */ static int chunk_vrange_filter(struct extent_buffer *leaf, struct btrfs_chunk *chunk, u64 chunk_offset, struct btrfs_balance_args *bargs) { if (chunk_offset < bargs->vend && chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart) /* at least part of the chunk is inside this vrange */ return 0; return 1; } static int chunk_stripes_range_filter(struct extent_buffer *leaf, struct btrfs_chunk *chunk, struct btrfs_balance_args *bargs) { int num_stripes = btrfs_chunk_num_stripes(leaf, chunk); if (bargs->stripes_min <= num_stripes && num_stripes <= bargs->stripes_max) return 0; return 1; } static int chunk_soft_convert_filter(u64 chunk_type, struct btrfs_balance_args *bargs) { if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) return 0; chunk_type = chunk_to_extended(chunk_type) & BTRFS_EXTENDED_PROFILE_MASK; if (bargs->target == chunk_type) return 1; return 0; } static int should_balance_chunk(struct btrfs_fs_info *fs_info, struct extent_buffer *leaf, struct btrfs_chunk *chunk, u64 chunk_offset) { struct btrfs_balance_control *bctl = fs_info->balance_ctl; struct btrfs_balance_args *bargs = NULL; u64 chunk_type = btrfs_chunk_type(leaf, chunk); /* type filter */ if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) & (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) { return 0; } if (chunk_type & BTRFS_BLOCK_GROUP_DATA) bargs = &bctl->data; else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) bargs = &bctl->sys; else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) bargs = &bctl->meta; /* profiles filter */ if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) && chunk_profiles_filter(chunk_type, bargs)) { return 0; } /* usage filter */ if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) && chunk_usage_filter(fs_info, chunk_offset, bargs)) { return 0; } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) && chunk_usage_range_filter(fs_info, chunk_offset, bargs)) { return 0; } /* devid filter */ if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) && chunk_devid_filter(leaf, chunk, bargs)) { return 0; } /* drange filter, makes sense only with devid filter */ if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) && chunk_drange_filter(leaf, chunk, bargs)) { return 0; } /* vrange filter */ if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) && chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) { return 0; } /* stripes filter */ if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) && chunk_stripes_range_filter(leaf, chunk, bargs)) { return 0; } /* soft profile changing mode */ if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) && chunk_soft_convert_filter(chunk_type, bargs)) { return 0; } /* * limited by count, must be the last filter */ if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) { if (bargs->limit == 0) return 0; else bargs->limit--; } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) { /* * Same logic as the 'limit' filter; the minimum cannot be * determined here because we do not have the global information * about the count of all chunks that satisfy the filters. */ if (bargs->limit_max == 0) return 0; else bargs->limit_max--; } return 1; } static int __btrfs_balance(struct btrfs_fs_info *fs_info) { struct btrfs_balance_control *bctl = fs_info->balance_ctl; struct btrfs_root *chunk_root = fs_info->chunk_root; u64 chunk_type; struct btrfs_chunk *chunk; struct btrfs_path *path = NULL; struct btrfs_key key; struct btrfs_key found_key; struct extent_buffer *leaf; int slot; int ret; int enospc_errors = 0; bool counting = true; /* The single value limit and min/max limits use the same bytes in the */ u64 limit_data = bctl->data.limit; u64 limit_meta = bctl->meta.limit; u64 limit_sys = bctl->sys.limit; u32 count_data = 0; u32 count_meta = 0; u32 count_sys = 0; int chunk_reserved = 0; path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; goto error; } /* zero out stat counters */ spin_lock(&fs_info->balance_lock); memset(&bctl->stat, 0, sizeof(bctl->stat)); spin_unlock(&fs_info->balance_lock); again: if (!counting) { /* * The single value limit and min/max limits use the same bytes * in the */ bctl->data.limit = limit_data; bctl->meta.limit = limit_meta; bctl->sys.limit = limit_sys; } key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; key.offset = (u64)-1; key.type = BTRFS_CHUNK_ITEM_KEY; while (1) { if ((!counting && atomic_read(&fs_info->balance_pause_req)) || atomic_read(&fs_info->balance_cancel_req)) { ret = -ECANCELED; goto error; } mutex_lock(&fs_info->delete_unused_bgs_mutex); ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); if (ret < 0) { mutex_unlock(&fs_info->delete_unused_bgs_mutex); goto error; } /* * this shouldn't happen, it means the last relocate * failed */ if (ret == 0) BUG(); /* FIXME break ? */ ret = btrfs_previous_item(chunk_root, path, 0, BTRFS_CHUNK_ITEM_KEY); if (ret) { mutex_unlock(&fs_info->delete_unused_bgs_mutex); ret = 0; break; } leaf = path->nodes[0]; slot = path->slots[0]; btrfs_item_key_to_cpu(leaf, &found_key, slot); if (found_key.objectid != key.objectid) { mutex_unlock(&fs_info->delete_unused_bgs_mutex); break; } chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); chunk_type = btrfs_chunk_type(leaf, chunk); if (!counting) { spin_lock(&fs_info->balance_lock); bctl->stat.considered++; spin_unlock(&fs_info->balance_lock); } ret = should_balance_chunk(fs_info, leaf, chunk, found_key.offset); btrfs_release_path(path); if (!ret) { mutex_unlock(&fs_info->delete_unused_bgs_mutex); goto loop; } if (counting) { mutex_unlock(&fs_info->delete_unused_bgs_mutex); spin_lock(&fs_info->balance_lock); bctl->stat.expected++; spin_unlock(&fs_info->balance_lock); if (chunk_type & BTRFS_BLOCK_GROUP_DATA) count_data++; else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) count_sys++; else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA) count_meta++; goto loop; } /* * Apply limit_min filter, no need to check if the LIMITS * filter is used, limit_min is 0 by default */ if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) && count_data < bctl->data.limit_min) || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) && count_meta < bctl->meta.limit_min) || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) && count_sys < bctl->sys.limit_min)) { mutex_unlock(&fs_info->delete_unused_bgs_mutex); goto loop; } if (!chunk_reserved) { /* * We may be relocating the only data chunk we have, * which could potentially end up with losing data's * raid profile, so lets allocate an empty one in * advance. */ ret = btrfs_may_alloc_data_chunk(fs_info, found_key.offset); if (ret < 0) { mutex_unlock(&fs_info->delete_unused_bgs_mutex); goto error; } else if (ret == 1) { chunk_reserved = 1; } } ret = btrfs_relocate_chunk(fs_info, found_key.offset); mutex_unlock(&fs_info->delete_unused_bgs_mutex); if (ret == -ENOSPC) { enospc_errors++; } else if (ret == -ETXTBSY) { btrfs_info(fs_info, "skipping relocation of block group %llu due to active swapfile", found_key.offset); ret = 0; } else if (ret) { goto error; } else { spin_lock(&fs_info->balance_lock); bctl->stat.completed++; spin_unlock(&fs_info->balance_lock); } loop: if (found_key.offset == 0) break; key.offset = found_key.offset - 1; } if (counting) { btrfs_release_path(path); counting = false; goto again; } error: btrfs_free_path(path); if (enospc_errors) { btrfs_info(fs_info, "%d enospc errors during balance", enospc_errors); if (!ret) ret = -ENOSPC; } return ret; } /** * alloc_profile_is_valid - see if a given profile is valid and reduced * @flags: profile to validate * @extended: if true @flags is treated as an extended profile */ static int alloc_profile_is_valid(u64 flags, int extended) { u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK : BTRFS_BLOCK_GROUP_PROFILE_MASK); flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK; /* 1) check that all other bits are zeroed */ if (flags & ~mask) return 0; /* 2) see if profile is reduced */ if (flags == 0) return !extended; /* "0" is valid for usual profiles */ /* true if exactly one bit set */ return is_power_of_2(flags); } static inline int balance_need_close(struct btrfs_fs_info *fs_info) { /* cancel requested || normal exit path */ return atomic_read(&fs_info->balance_cancel_req) || (atomic_read(&fs_info->balance_pause_req) == 0 && atomic_read(&fs_info->balance_cancel_req) == 0); } /* Non-zero return value signifies invalidity */ static inline int validate_convert_profile(struct btrfs_balance_args *bctl_arg, u64 allowed) { return ((bctl_arg->flags & BTRFS_BALANCE_ARGS_CONVERT) && (!alloc_profile_is_valid(bctl_arg->target, 1) || (bctl_arg->target & ~allowed))); } /* * Fill @buf with textual description of balance filter flags @bargs, up to * @size_buf including the terminating null. The output may be trimmed if it * does not fit into the provided buffer. */ static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf, u32 size_buf) { int ret; u32 size_bp = size_buf; char *bp = buf; u64 flags = bargs->flags; char tmp_buf[128] = {'\0'}; if (!flags) return; #define CHECK_APPEND_NOARG(a) \ do { \ ret = snprintf(bp, size_bp, (a)); \ if (ret < 0 || ret >= size_bp) \ goto out_overflow; \ size_bp -= ret; \ bp += ret; \ } while (0) #define CHECK_APPEND_1ARG(a, v1) \ do { \ ret = snprintf(bp, size_bp, (a), (v1)); \ if (ret < 0 || ret >= size_bp) \ goto out_overflow; \ size_bp -= ret; \ bp += ret; \ } while (0) #define CHECK_APPEND_2ARG(a, v1, v2) \ do { \ ret = snprintf(bp, size_bp, (a), (v1), (v2)); \ if (ret < 0 || ret >= size_bp) \ goto out_overflow; \ size_bp -= ret; \ bp += ret; \ } while (0) if (flags & BTRFS_BALANCE_ARGS_CONVERT) { int index = btrfs_bg_flags_to_raid_index(bargs->target); CHECK_APPEND_1ARG("convert=%s,", get_raid_name(index)); } if (flags & BTRFS_BALANCE_ARGS_SOFT) CHECK_APPEND_NOARG("soft,"); if (flags & BTRFS_BALANCE_ARGS_PROFILES) { btrfs_describe_block_groups(bargs->profiles, tmp_buf, sizeof(tmp_buf)); CHECK_APPEND_1ARG("profiles=%s,", tmp_buf); } if (flags & BTRFS_BALANCE_ARGS_USAGE) CHECK_APPEND_1ARG("usage=%llu,", bargs->usage); if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) CHECK_APPEND_2ARG("usage=%u..%u,", bargs->usage_min, bargs->usage_max); if (flags & BTRFS_BALANCE_ARGS_DEVID) CHECK_APPEND_1ARG("devid=%llu,", bargs->devid); if (flags & BTRFS_BALANCE_ARGS_DRANGE) CHECK_APPEND_2ARG("drange=%llu..%llu,", bargs->pstart, bargs->pend); if (flags & BTRFS_BALANCE_ARGS_VRANGE) CHECK_APPEND_2ARG("vrange=%llu..%llu,", bargs->vstart, bargs->vend); if (flags & BTRFS_BALANCE_ARGS_LIMIT) CHECK_APPEND_1ARG("limit=%llu,", bargs->limit); if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE) CHECK_APPEND_2ARG("limit=%u..%u,", bargs->limit_min, bargs->limit_max); if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) CHECK_APPEND_2ARG("stripes=%u..%u,", bargs->stripes_min, bargs->stripes_max); #undef CHECK_APPEND_2ARG #undef CHECK_APPEND_1ARG #undef CHECK_APPEND_NOARG out_overflow: if (size_bp < size_buf) buf[size_buf - size_bp - 1] = '\0'; /* remove last , */ else buf[0] = '\0'; } static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info) { u32 size_buf = 1024; char tmp_buf[192] = {'\0'}; char *buf; char *bp; u32 size_bp = size_buf; int ret; struct btrfs_balance_control *bctl = fs_info->balance_ctl; buf = kzalloc(size_buf, GFP_KERNEL); if (!buf) return; bp = buf; #define CHECK_APPEND_1ARG(a, v1) \ do { \ ret = snprintf(bp, size_bp, (a), (v1)); \ if (ret < 0 || ret >= size_bp) \ goto out_overflow; \ size_bp -= ret; \ bp += ret; \ } while (0) if (bctl->flags & BTRFS_BALANCE_FORCE) CHECK_APPEND_1ARG("%s", "-f "); if (bctl->flags & BTRFS_BALANCE_DATA) { describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf)); CHECK_APPEND_1ARG("-d%s ", tmp_buf); } if (bctl->flags & BTRFS_BALANCE_METADATA) { describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf)); CHECK_APPEND_1ARG("-m%s ", tmp_buf); } if (bctl->flags & BTRFS_BALANCE_SYSTEM) { describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf)); CHECK_APPEND_1ARG("-s%s ", tmp_buf); } #undef CHECK_APPEND_1ARG out_overflow: if (size_bp < size_buf) buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */ btrfs_info(fs_info, "balance: %s %s", (bctl->flags & BTRFS_BALANCE_RESUME) ? "resume" : "start", buf); kfree(buf); } /* * Should be called with balance mutexe held */ int btrfs_balance(struct btrfs_fs_info *fs_info, struct btrfs_balance_control *bctl, struct btrfs_ioctl_balance_args *bargs) { u64 meta_target, data_target; u64 allowed; int mixed = 0; int ret; u64 num_devices; unsigned seq; bool reducing_integrity; if (btrfs_fs_closing(fs_info) || atomic_read(&fs_info->balance_pause_req) || atomic_read(&fs_info->balance_cancel_req)) { ret = -EINVAL; goto out; } allowed = btrfs_super_incompat_flags(fs_info->super_copy); if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) mixed = 1; /* * In case of mixed groups both data and meta should be picked, * and identical options should be given for both of them. */ allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA; if (mixed && (bctl->flags & allowed)) { if (!(bctl->flags & BTRFS_BALANCE_DATA) || !(bctl->flags & BTRFS_BALANCE_METADATA) || memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { btrfs_err(fs_info, "balance: mixed groups data and metadata options must be the same"); ret = -EINVAL; goto out; } } num_devices = btrfs_num_devices(fs_info); allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE | BTRFS_BLOCK_GROUP_DUP; if (num_devices > 1) allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1); if (num_devices > 2) allowed |= BTRFS_BLOCK_GROUP_RAID5; if (num_devices > 3) allowed |= (BTRFS_BLOCK_GROUP_RAID10 | BTRFS_BLOCK_GROUP_RAID6); if (validate_convert_profile(&bctl->data, allowed)) { int index = btrfs_bg_flags_to_raid_index(bctl->data.target); btrfs_err(fs_info, "balance: invalid convert data profile %s", get_raid_name(index)); ret = -EINVAL; goto out; } if (validate_convert_profile(&bctl->meta, allowed)) { int index = btrfs_bg_flags_to_raid_index(bctl->meta.target); btrfs_err(fs_info, "balance: invalid convert metadata profile %s", get_raid_name(index)); ret = -EINVAL; goto out; } if (validate_convert_profile(&bctl->sys, allowed)) { int index = btrfs_bg_flags_to_raid_index(bctl->sys.target); btrfs_err(fs_info, "balance: invalid convert system profile %s", get_raid_name(index)); ret = -EINVAL; goto out; } /* allow to reduce meta or sys integrity only if force set */ allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10 | BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6; do { seq = read_seqbegin(&fs_info->profiles_lock); if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && (fs_info->avail_system_alloc_bits & allowed) && !(bctl->sys.target & allowed)) || ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && (fs_info->avail_metadata_alloc_bits & allowed) && !(bctl->meta.target & allowed))) reducing_integrity = true; else reducing_integrity = false; /* if we're not converting, the target field is uninitialized */ meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ? bctl->meta.target : fs_info->avail_metadata_alloc_bits; data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ? bctl->data.target : fs_info->avail_data_alloc_bits; } while (read_seqretry(&fs_info->profiles_lock, seq)); if (reducing_integrity) { if (bctl->flags & BTRFS_BALANCE_FORCE) { btrfs_info(fs_info, "balance: force reducing metadata integrity"); } else { btrfs_err(fs_info, "balance: reduces metadata integrity, use --force if you want this"); ret = -EINVAL; goto out; } } if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) < btrfs_get_num_tolerated_disk_barrier_failures(data_target)) { int meta_index = btrfs_bg_flags_to_raid_index(meta_target); int data_index = btrfs_bg_flags_to_raid_index(data_target); btrfs_warn(fs_info, "balance: metadata profile %s has lower redundancy than data profile %s", get_raid_name(meta_index), get_raid_name(data_index)); } ret = insert_balance_item(fs_info, bctl); if (ret && ret != -EEXIST) goto out; if (!(bctl->flags & BTRFS_BALANCE_RESUME)) { BUG_ON(ret == -EEXIST); BUG_ON(fs_info->balance_ctl); spin_lock(&fs_info->balance_lock); fs_info->balance_ctl = bctl; spin_unlock(&fs_info->balance_lock); } else { BUG_ON(ret != -EEXIST); spin_lock(&fs_info->balance_lock); update_balance_args(bctl); spin_unlock(&fs_info->balance_lock); } ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); describe_balance_start_or_resume(fs_info); mutex_unlock(&fs_info->balance_mutex); ret = __btrfs_balance(fs_info); mutex_lock(&fs_info->balance_mutex); if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) btrfs_info(fs_info, "balance: paused"); else if (ret == -ECANCELED && atomic_read(&fs_info->balance_cancel_req)) btrfs_info(fs_info, "balance: canceled"); else btrfs_info(fs_info, "balance: ended with status: %d", ret); clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags); if (bargs) { memset(bargs, 0, sizeof(*bargs)); btrfs_update_ioctl_balance_args(fs_info, bargs); } if ((ret && ret != -ECANCELED && ret != -ENOSPC) || balance_need_close(fs_info)) { reset_balance_state(fs_info); clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); } wake_up(&fs_info->balance_wait_q); return ret; out: if (bctl->flags & BTRFS_BALANCE_RESUME) reset_balance_state(fs_info); else kfree(bctl); clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); return ret; } static int balance_kthread(void *data) { struct btrfs_fs_info *fs_info = data; int ret = 0; mutex_lock(&fs_info->balance_mutex); if (fs_info->balance_ctl) ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL); mutex_unlock(&fs_info->balance_mutex); return ret; } int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info) { struct task_struct *tsk; mutex_lock(&fs_info->balance_mutex); if (!fs_info->balance_ctl) { mutex_unlock(&fs_info->balance_mutex); return 0; } mutex_unlock(&fs_info->balance_mutex); if (btrfs_test_opt(fs_info, SKIP_BALANCE)) { btrfs_info(fs_info, "balance: resume skipped"); return 0; } /* * A ro->rw remount sequence should continue with the paused balance * regardless of who pauses it, system or the user as of now, so set * the resume flag. */ spin_lock(&fs_info->balance_lock); fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME; spin_unlock(&fs_info->balance_lock); tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); return PTR_ERR_OR_ZERO(tsk); } int btrfs_recover_balance(struct btrfs_fs_info *fs_info) { struct btrfs_balance_control *bctl; struct btrfs_balance_item *item; struct btrfs_disk_balance_args disk_bargs; struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_key key; int ret; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = BTRFS_BALANCE_OBJECTID; key.type = BTRFS_TEMPORARY_ITEM_KEY; key.offset = 0; ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); if (ret < 0) goto out; if (ret > 0) { /* ret = -ENOENT; */ ret = 0; goto out; } bctl = kzalloc(sizeof(*bctl), GFP_NOFS); if (!bctl) { ret = -ENOMEM; goto out; } leaf = path->nodes[0]; item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item); bctl->flags = btrfs_balance_flags(leaf, item); bctl->flags |= BTRFS_BALANCE_RESUME; btrfs_balance_data(leaf, item, &disk_bargs); btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs); btrfs_balance_meta(leaf, item, &disk_bargs); btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs); btrfs_balance_sys(leaf, item, &disk_bargs); btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); /* * This should never happen, as the paused balance state is recovered * during mount without any chance of other exclusive ops to collide. * * This gives the exclusive op status to balance and keeps in paused * state until user intervention (cancel or umount). If the ownership * cannot be assigned, show a message but do not fail. The balance * is in a paused state and must have fs_info::balance_ctl properly * set up. */ if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) btrfs_warn(fs_info, "balance: cannot set exclusive op status, resume manually"); mutex_lock(&fs_info->balance_mutex); BUG_ON(fs_info->balance_ctl); spin_lock(&fs_info->balance_lock); fs_info->balance_ctl = bctl; spin_unlock(&fs_info->balance_lock); mutex_unlock(&fs_info->balance_mutex); out: btrfs_free_path(path); return ret; } int btrfs_pause_balance(struct btrfs_fs_info *fs_info) { int ret = 0; mutex_lock(&fs_info->balance_mutex); if (!fs_info->balance_ctl) { mutex_unlock(&fs_info->balance_mutex); return -ENOTCONN; } if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { atomic_inc(&fs_info->balance_pause_req); mutex_unlock(&fs_info->balance_mutex); wait_event(fs_info->balance_wait_q, !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); mutex_lock(&fs_info->balance_mutex); /* we are good with balance_ctl ripped off from under us */ BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); atomic_dec(&fs_info->balance_pause_req); } else { ret = -ENOTCONN; } mutex_unlock(&fs_info->balance_mutex); return ret; } int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) { mutex_lock(&fs_info->balance_mutex); if (!fs_info->balance_ctl) { mutex_unlock(&fs_info->balance_mutex); return -ENOTCONN; } /* * A paused balance with the item stored on disk can be resumed at * mount time if the mount is read-write. Otherwise it's still paused * and we must not allow cancelling as it deletes the item. */ if (sb_rdonly(fs_info->sb)) { mutex_unlock(&fs_info->balance_mutex); return -EROFS; } atomic_inc(&fs_info->balance_cancel_req); /* * if we are running just wait and return, balance item is * deleted in btrfs_balance in this case */ if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { mutex_unlock(&fs_info->balance_mutex); wait_event(fs_info->balance_wait_q, !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); mutex_lock(&fs_info->balance_mutex); } else { mutex_unlock(&fs_info->balance_mutex); /* * Lock released to allow other waiters to continue, we'll * reexamine the status again. */ mutex_lock(&fs_info->balance_mutex); if (fs_info->balance_ctl) { reset_balance_state(fs_info); clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); btrfs_info(fs_info, "balance: canceled"); } } BUG_ON(fs_info->balance_ctl || test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); atomic_dec(&fs_info->balance_cancel_req); mutex_unlock(&fs_info->balance_mutex); return 0; } static int btrfs_uuid_scan_kthread(void *data) { struct btrfs_fs_info *fs_info = data; struct btrfs_root *root = fs_info->tree_root; struct btrfs_key key; struct btrfs_path *path = NULL; int ret = 0; struct extent_buffer *eb; int slot; struct btrfs_root_item root_item; u32 item_size; struct btrfs_trans_handle *trans = NULL; path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; goto out; } key.objectid = 0; key.type = BTRFS_ROOT_ITEM_KEY; key.offset = 0; while (1) { ret = btrfs_search_forward(root, &key, path, BTRFS_OLDEST_GENERATION); if (ret) { if (ret > 0) ret = 0; break; } if (key.type != BTRFS_ROOT_ITEM_KEY || (key.objectid < BTRFS_FIRST_FREE_OBJECTID && key.objectid != BTRFS_FS_TREE_OBJECTID) || key.objectid > BTRFS_LAST_FREE_OBJECTID) goto skip; eb = path->nodes[0]; slot = path->slots[0]; item_size = btrfs_item_size_nr(eb, slot); if (item_size < sizeof(root_item)) goto skip; read_extent_buffer(eb, &root_item, btrfs_item_ptr_offset(eb, slot), (int)sizeof(root_item)); if (btrfs_root_refs(&root_item) == 0) goto skip; if (!btrfs_is_empty_uuid(root_item.uuid) || !btrfs_is_empty_uuid(root_item.received_uuid)) { if (trans) goto update_tree; btrfs_release_path(path); /* * 1 - subvol uuid item * 1 - received_subvol uuid item */ trans = btrfs_start_transaction(fs_info->uuid_root, 2); if (IS_ERR(trans)) { ret = PTR_ERR(trans); break; } continue; } else { goto skip; } update_tree: if (!btrfs_is_empty_uuid(root_item.uuid)) { ret = btrfs_uuid_tree_add(trans, root_item.uuid, BTRFS_UUID_KEY_SUBVOL, key.objectid); if (ret < 0) { btrfs_warn(fs_info, "uuid_tree_add failed %d", ret); break; } } if (!btrfs_is_empty_uuid(root_item.received_uuid)) { ret = btrfs_uuid_tree_add(trans, root_item.received_uuid, BTRFS_UUID_KEY_RECEIVED_SUBVOL, key.objectid); if (ret < 0) { btrfs_warn(fs_info, "uuid_tree_add failed %d", ret); break; } } skip: if (trans) { ret = btrfs_end_transaction(trans); trans = NULL; if (ret) break; } btrfs_release_path(path); if (key.offset < (u64)-1) { key.offset++; } else if (key.type < BTRFS_ROOT_ITEM_KEY) { key.offset = 0; key.type = BTRFS_ROOT_ITEM_KEY; } else if (key.objectid < (u64)-1) { key.offset = 0; key.type = BTRFS_ROOT_ITEM_KEY; key.objectid++; } else { break; } cond_resched(); } out: btrfs_free_path(path); if (trans && !IS_ERR(trans)) btrfs_end_transaction(trans); if (ret) btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret); else set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); up(&fs_info->uuid_tree_rescan_sem); return 0; } /* * Callback for btrfs_uuid_tree_iterate(). * returns: * 0 check succeeded, the entry is not outdated. * < 0 if an error occurred. * > 0 if the check failed, which means the caller shall remove the entry. */ static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info, u8 *uuid, u8 type, u64 subid) { struct btrfs_key key; int ret = 0; struct btrfs_root *subvol_root; if (type != BTRFS_UUID_KEY_SUBVOL && type != BTRFS_UUID_KEY_RECEIVED_SUBVOL) goto out; key.objectid = subid; key.type = BTRFS_ROOT_ITEM_KEY; key.offset = (u64)-1; subvol_root = btrfs_read_fs_root_no_name(fs_info, &key); if (IS_ERR(subvol_root)) { ret = PTR_ERR(subvol_root); if (ret == -ENOENT) ret = 1; goto out; } switch (type) { case BTRFS_UUID_KEY_SUBVOL: if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE)) ret = 1; break; case BTRFS_UUID_KEY_RECEIVED_SUBVOL: if (memcmp(uuid, subvol_root->root_item.received_uuid, BTRFS_UUID_SIZE)) ret = 1; break; } out: return ret; } static int btrfs_uuid_rescan_kthread(void *data) { struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data; int ret; /* * 1st step is to iterate through the existing UUID tree and * to delete all entries that contain outdated data. * 2nd step is to add all missing entries to the UUID tree. */ ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry); if (ret < 0) { btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret); up(&fs_info->uuid_tree_rescan_sem); return ret; } return btrfs_uuid_scan_kthread(data); } int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) { struct btrfs_trans_handle *trans; struct btrfs_root *tree_root = fs_info->tree_root; struct btrfs_root *uuid_root; struct task_struct *task; int ret; /* * 1 - root node * 1 - root item */ trans = btrfs_start_transaction(tree_root, 2); if (IS_ERR(trans)) return PTR_ERR(trans); uuid_root = btrfs_create_tree(trans, fs_info, BTRFS_UUID_TREE_OBJECTID); if (IS_ERR(uuid_root)) { ret = PTR_ERR(uuid_root); btrfs_abort_transaction(trans, ret); btrfs_end_transaction(trans); return ret; } fs_info->uuid_root = uuid_root; ret = btrfs_commit_transaction(trans); if (ret) return ret; down(&fs_info->uuid_tree_rescan_sem); task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid"); if (IS_ERR(task)) { /* fs_info->update_uuid_tree_gen remains 0 in all error case */ btrfs_warn(fs_info, "failed to start uuid_scan task"); up(&fs_info->uuid_tree_rescan_sem); return PTR_ERR(task); } return 0; } int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info) { struct task_struct *task; down(&fs_info->uuid_tree_rescan_sem); task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid"); if (IS_ERR(task)) { /* fs_info->update_uuid_tree_gen remains 0 in all error case */ btrfs_warn(fs_info, "failed to start uuid_rescan task"); up(&fs_info->uuid_tree_rescan_sem); return PTR_ERR(task); } return 0; } /* * shrinking a device means finding all of the device extents past * the new size, and then following the back refs to the chunks. * The chunk relocation code actually frees the device extent */ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size) { struct btrfs_fs_info *fs_info = device->fs_info; struct btrfs_root *root = fs_info->dev_root; struct btrfs_trans_handle *trans; struct btrfs_dev_extent *dev_extent = NULL; struct btrfs_path *path; u64 length; u64 chunk_offset; int ret; int slot; int failed = 0; bool retried = false; bool checked_pending_chunks = false; struct extent_buffer *l; struct btrfs_key key; struct btrfs_super_block *super_copy = fs_info->super_copy; u64 old_total = btrfs_super_total_bytes(super_copy); u64 old_size = btrfs_device_get_total_bytes(device); u64 diff; new_size = round_down(new_size, fs_info->sectorsize); diff = round_down(old_size - new_size, fs_info->sectorsize); if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) return -EINVAL; path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->reada = READA_BACK; mutex_lock(&fs_info->chunk_mutex); btrfs_device_set_total_bytes(device, new_size); if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { device->fs_devices->total_rw_bytes -= diff; atomic64_sub(diff, &fs_info->free_chunk_space); } mutex_unlock(&fs_info->chunk_mutex); again: key.objectid = device->devid; key.offset = (u64)-1; key.type = BTRFS_DEV_EXTENT_KEY; do { mutex_lock(&fs_info->delete_unused_bgs_mutex); ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) { mutex_unlock(&fs_info->delete_unused_bgs_mutex); goto done; } ret = btrfs_previous_item(root, path, 0, key.type); if (ret) mutex_unlock(&fs_info->delete_unused_bgs_mutex); if (ret < 0) goto done; if (ret) { ret = 0; btrfs_release_path(path); break; } l = path->nodes[0]; slot = path->slots[0]; btrfs_item_key_to_cpu(l, &key, path->slots[0]); if (key.objectid != device->devid) { mutex_unlock(&fs_info->delete_unused_bgs_mutex); btrfs_release_path(path); break; } dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); length = btrfs_dev_extent_length(l, dev_extent); if (key.offset + length <= new_size) { mutex_unlock(&fs_info->delete_unused_bgs_mutex); btrfs_release_path(path); break; } chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); btrfs_release_path(path); /* * We may be relocating the only data chunk we have, * which could potentially end up with losing data's * raid profile, so lets allocate an empty one in * advance. */ ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset); if (ret < 0) { mutex_unlock(&fs_info->delete_unused_bgs_mutex); goto done; } ret = btrfs_relocate_chunk(fs_info, chunk_offset); mutex_unlock(&fs_info->delete_unused_bgs_mutex); if (ret == -ENOSPC) { failed++; } else if (ret) { if (ret == -ETXTBSY) { btrfs_warn(fs_info, "could not shrink block group %llu due to active swapfile", chunk_offset); } goto done; } } while (key.offset-- > 0); if (failed && !retried) { failed = 0; retried = true; goto again; } else if (failed && retried) { ret = -ENOSPC; goto done; } /* Shrinking succeeded, else we would be at "done". */ trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto done; } mutex_lock(&fs_info->chunk_mutex); /* * We checked in the above loop all device extents that were already in * the device tree. However before we have updated the device's * total_bytes to the new size, we might have had chunk allocations that * have not complete yet (new block groups attached to transaction * handles), and therefore their device extents were not yet in the * device tree and we missed them in the loop above. So if we have any * pending chunk using a device extent that overlaps the device range * that we can not use anymore, commit the current transaction and * repeat the search on the device tree - this way we guarantee we will * not have chunks using device extents that end beyond 'new_size'. */ if (!checked_pending_chunks) { u64 start = new_size; u64 len = old_size - new_size; if (contains_pending_extent(trans->transaction, device, &start, len)) { mutex_unlock(&fs_info->chunk_mutex); checked_pending_chunks = true; failed = 0; retried = false; ret = btrfs_commit_transaction(trans); if (ret) goto done; goto again; } } btrfs_device_set_disk_total_bytes(device, new_size); if (list_empty(&device->resized_list)) list_add_tail(&device->resized_list, &fs_info->fs_devices->resized_devices); WARN_ON(diff > old_total); btrfs_set_super_total_bytes(super_copy, round_down(old_total - diff, fs_info->sectorsize)); mutex_unlock(&fs_info->chunk_mutex); /* Now btrfs_update_device() will change the on-disk size. */ ret = btrfs_update_device(trans, device); if (ret < 0) { btrfs_abort_transaction(trans, ret); btrfs_end_transaction(trans); } else { ret = btrfs_commit_transaction(trans); } done: btrfs_free_path(path); if (ret) { mutex_lock(&fs_info->chunk_mutex); btrfs_device_set_total_bytes(device, old_size); if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) device->fs_devices->total_rw_bytes += diff; atomic64_add(diff, &fs_info->free_chunk_space); mutex_unlock(&fs_info->chunk_mutex); } return ret; } static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key, struct btrfs_chunk *chunk, int item_size) { struct btrfs_super_block *super_copy = fs_info->super_copy; struct btrfs_disk_key disk_key; u32 array_size; u8 *ptr; mutex_lock(&fs_info->chunk_mutex); array_size = btrfs_super_sys_array_size(super_copy); if (array_size + item_size + sizeof(disk_key) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) { mutex_unlock(&fs_info->chunk_mutex); return -EFBIG; } ptr = super_copy->sys_chunk_array + array_size; btrfs_cpu_key_to_disk(&disk_key, key); memcpy(ptr, &disk_key, sizeof(disk_key)); ptr += sizeof(disk_key); memcpy(ptr, chunk, item_size); item_size += sizeof(disk_key); btrfs_set_super_sys_array_size(super_copy, array_size + item_size); mutex_unlock(&fs_info->chunk_mutex); return 0; } /* * sort the devices in descending order by max_avail, total_avail */ static int btrfs_cmp_device_info(const void *a, const void *b) { const struct btrfs_device_info *di_a = a; const struct btrfs_device_info *di_b = b; if (di_a->max_avail > di_b->max_avail) return -1; if (di_a->max_avail < di_b->max_avail) return 1; if (di_a->total_avail > di_b->total_avail) return -1; if (di_a->total_avail < di_b->total_avail) return 1; return 0; } static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) { if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) return; btrfs_set_fs_incompat(info, RAID56); } #define BTRFS_MAX_DEVS(info) ((BTRFS_MAX_ITEM_SIZE(info) \ - sizeof(struct btrfs_chunk)) \ / sizeof(struct btrfs_stripe) + 1) #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \ - 2 * sizeof(struct btrfs_disk_key) \ - 2 * sizeof(struct btrfs_chunk)) \ / sizeof(struct btrfs_stripe) + 1) static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 start, u64 type) { struct btrfs_fs_info *info = trans->fs_info; struct btrfs_fs_devices *fs_devices = info->fs_devices; struct btrfs_device *device; struct map_lookup *map = NULL; struct extent_map_tree *em_tree; struct extent_map *em; struct btrfs_device_info *devices_info = NULL; u64 total_avail; int num_stripes; /* total number of stripes to allocate */ int data_stripes; /* number of stripes that count for block group size */ int sub_stripes; /* sub_stripes info for map */ int dev_stripes; /* stripes per dev */ int devs_max; /* max devs to use */ int devs_min; /* min devs needed */ int devs_increment; /* ndevs has to be a multiple of this */ int ncopies; /* how many copies to data has */ int nparity; /* number of stripes worth of bytes to store parity information */ int ret; u64 max_stripe_size; u64 max_chunk_size; u64 stripe_size; u64 chunk_size; int ndevs; int i; int j; int index; BUG_ON(!alloc_profile_is_valid(type, 0)); if (list_empty(&fs_devices->alloc_list)) { if (btrfs_test_opt(info, ENOSPC_DEBUG)) btrfs_debug(info, "%s: no writable device", __func__); return -ENOSPC; } index = btrfs_bg_flags_to_raid_index(type); sub_stripes = btrfs_raid_array[index].sub_stripes; dev_stripes = btrfs_raid_array[index].dev_stripes; devs_max = btrfs_raid_array[index].devs_max; devs_min = btrfs_raid_array[index].devs_min; devs_increment = btrfs_raid_array[index].devs_increment; ncopies = btrfs_raid_array[index].ncopies; nparity = btrfs_raid_array[index].nparity; if (type & BTRFS_BLOCK_GROUP_DATA) { max_stripe_size = SZ_1G; max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE; if (!devs_max) devs_max = BTRFS_MAX_DEVS(info); } else if (type & BTRFS_BLOCK_GROUP_METADATA) { /* for larger filesystems, use larger metadata chunks */ if (fs_devices->total_rw_bytes > 50ULL * SZ_1G) max_stripe_size = SZ_1G; else max_stripe_size = SZ_256M; max_chunk_size = max_stripe_size; if (!devs_max) devs_max = BTRFS_MAX_DEVS(info); } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { max_stripe_size = SZ_32M; max_chunk_size = 2 * max_stripe_size; if (!devs_max) devs_max = BTRFS_MAX_DEVS_SYS_CHUNK; } else { btrfs_err(info, "invalid chunk type 0x%llx requested", type); BUG_ON(1); } /* We don't want a chunk larger than 10% of writable space */ max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), max_chunk_size); devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info), GFP_NOFS); if (!devices_info) return -ENOMEM; /* * in the first pass through the devices list, we gather information * about the available holes on each device. */ ndevs = 0; list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { u64 max_avail; u64 dev_offset; if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { WARN(1, KERN_ERR "BTRFS: read-only device in alloc_list\n"); continue; } if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) || test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) continue; if (device->total_bytes > device->bytes_used) total_avail = device->total_bytes - device->bytes_used; else total_avail = 0; /* If there is no space on this device, skip it. */ if (total_avail == 0) continue; ret = find_free_dev_extent(trans, device, max_stripe_size * dev_stripes, &dev_offset, &max_avail); if (ret && ret != -ENOSPC) goto error; if (ret == 0) max_avail = max_stripe_size * dev_stripes; if (max_avail < BTRFS_STRIPE_LEN * dev_stripes) { if (btrfs_test_opt(info, ENOSPC_DEBUG)) btrfs_debug(info, "%s: devid %llu has no free space, have=%llu want=%u", __func__, device->devid, max_avail, BTRFS_STRIPE_LEN * dev_stripes); continue; } if (ndevs == fs_devices->rw_devices) { WARN(1, "%s: found more than %llu devices\n", __func__, fs_devices->rw_devices); break; } devices_info[ndevs].dev_offset = dev_offset; devices_info[ndevs].max_avail = max_avail; devices_info[ndevs].total_avail = total_avail; devices_info[ndevs].dev = device; ++ndevs; } /* * now sort the devices by hole size / available space */ sort(devices_info, ndevs, sizeof(struct btrfs_device_info), btrfs_cmp_device_info, NULL); /* round down to number of usable stripes */ ndevs = round_down(ndevs, devs_increment); if (ndevs < devs_min) { ret = -ENOSPC; if (btrfs_test_opt(info, ENOSPC_DEBUG)) { btrfs_debug(info, "%s: not enough devices with free space: have=%d minimum required=%d", __func__, ndevs, devs_min); } goto error; } ndevs = min(ndevs, devs_max); /* * The primary goal is to maximize the number of stripes, so use as * many devices as possible, even if the stripes are not maximum sized. * * The DUP profile stores more than one stripe per device, the * max_avail is the total size so we have to adjust. */ stripe_size = div_u64(devices_info[ndevs - 1].max_avail, dev_stripes); num_stripes = ndevs * dev_stripes; /* * this will have to be fixed for RAID1 and RAID10 over * more drives */ data_stripes = (num_stripes - nparity) / ncopies; /* * Use the number of data stripes to figure out how big this chunk * is really going to be in terms of logical address space, * and compare that answer with the max chunk size. If it's higher, * we try to reduce stripe_size. */ if (stripe_size * data_stripes > max_chunk_size) { /* * Reduce stripe_size, round it up to a 16MB boundary again and * then use it, unless it ends up being even bigger than the * previous value we had already. */ stripe_size = min(round_up(div_u64(max_chunk_size, data_stripes), SZ_16M), stripe_size); } /* align to BTRFS_STRIPE_LEN */ stripe_size = round_down(stripe_size, BTRFS_STRIPE_LEN); map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); if (!map) { ret = -ENOMEM; goto error; } map->num_stripes = num_stripes; for (i = 0; i < ndevs; ++i) { for (j = 0; j < dev_stripes; ++j) { int s = i * dev_stripes + j; map->stripes[s].dev = devices_info[i].dev; map->stripes[s].physical = devices_info[i].dev_offset + j * stripe_size; } } map->stripe_len = BTRFS_STRIPE_LEN; map->io_align = BTRFS_STRIPE_LEN; map->io_width = BTRFS_STRIPE_LEN; map->type = type; map->sub_stripes = sub_stripes; chunk_size = stripe_size * data_stripes; trace_btrfs_chunk_alloc(info, map, start, chunk_size); em = alloc_extent_map(); if (!em) { kfree(map); ret = -ENOMEM; goto error; } set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); em->map_lookup = map; em->start = start; em->len = chunk_size; em->block_start = 0; em->block_len = em->len; em->orig_block_len = stripe_size; em_tree = &info->mapping_tree.map_tree; write_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em, 0); if (ret) { write_unlock(&em_tree->lock); free_extent_map(em); goto error; } list_add_tail(&em->list, &trans->transaction->pending_chunks); refcount_inc(&em->refs); write_unlock(&em_tree->lock); ret = btrfs_make_block_group(trans, 0, type, start, chunk_size); if (ret) goto error_del_extent; for (i = 0; i < map->num_stripes; i++) btrfs_device_set_bytes_used(map->stripes[i].dev, map->stripes[i].dev->bytes_used + stripe_size); atomic64_sub(stripe_size * map->num_stripes, &info->free_chunk_space); free_extent_map(em); check_raid56_incompat_flag(info, type); kfree(devices_info); return 0; error_del_extent: write_lock(&em_tree->lock); remove_extent_mapping(em_tree, em); write_unlock(&em_tree->lock); /* One for our allocation */ free_extent_map(em); /* One for the tree reference */ free_extent_map(em); /* One for the pending_chunks list reference */ free_extent_map(em); error: kfree(devices_info); return ret; } int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans, u64 chunk_offset, u64 chunk_size) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_root *extent_root = fs_info->extent_root; struct btrfs_root *chunk_root = fs_info->chunk_root; struct btrfs_key key; struct btrfs_device *device; struct btrfs_chunk *chunk; struct btrfs_stripe *stripe; struct extent_map *em; struct map_lookup *map; size_t item_size; u64 dev_offset; u64 stripe_size; int i = 0; int ret = 0; em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size); if (IS_ERR(em)) return PTR_ERR(em); map = em->map_lookup; item_size = btrfs_chunk_item_size(map->num_stripes); stripe_size = em->orig_block_len; chunk = kzalloc(item_size, GFP_NOFS); if (!chunk) { ret = -ENOMEM; goto out; } /* * Take the device list mutex to prevent races with the final phase of * a device replace operation that replaces the device object associated * with the map's stripes, because the device object's id can change * at any time during that final phase of the device replace operation * (dev-replace.c:btrfs_dev_replace_finishing()). */ mutex_lock(&fs_info->fs_devices->device_list_mutex); for (i = 0; i < map->num_stripes; i++) { device = map->stripes[i].dev; dev_offset = map->stripes[i].physical; ret = btrfs_update_device(trans, device); if (ret) break; ret = btrfs_alloc_dev_extent(trans, device, chunk_offset, dev_offset, stripe_size); if (ret) break; } if (ret) { mutex_unlock(&fs_info->fs_devices->device_list_mutex); goto out; } stripe = &chunk->stripe; for (i = 0; i < map->num_stripes; i++) { device = map->stripes[i].dev; dev_offset = map->stripes[i].physical; btrfs_set_stack_stripe_devid(stripe, device->devid); btrfs_set_stack_stripe_offset(stripe, dev_offset); memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE); stripe++; } mutex_unlock(&fs_info->fs_devices->device_list_mutex); btrfs_set_stack_chunk_length(chunk, chunk_size); btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid); btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); btrfs_set_stack_chunk_type(chunk, map->type); btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize); btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; key.type = BTRFS_CHUNK_ITEM_KEY; key.offset = chunk_offset; ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) { /* * TODO: Cleanup of inserted chunk root in case of * failure. */ ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); } out: kfree(chunk); free_extent_map(em); return ret; } /* * Chunk allocation falls into two parts. The first part does work * that makes the new allocated chunk usable, but does not do any operation * that modifies the chunk tree. The second part does the work that * requires modifying the chunk tree. This division is important for the * bootstrap process of adding storage to a seed btrfs. */ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type) { u64 chunk_offset; lockdep_assert_held(&trans->fs_info->chunk_mutex); chunk_offset = find_next_chunk(trans->fs_info); return __btrfs_alloc_chunk(trans, chunk_offset, type); } static noinline int init_first_rw_device(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info) { u64 chunk_offset; u64 sys_chunk_offset; u64 alloc_profile; int ret; chunk_offset = find_next_chunk(fs_info); alloc_profile = btrfs_metadata_alloc_profile(fs_info); ret = __btrfs_alloc_chunk(trans, chunk_offset, alloc_profile); if (ret) return ret; sys_chunk_offset = find_next_chunk(fs_info); alloc_profile = btrfs_system_alloc_profile(fs_info); ret = __btrfs_alloc_chunk(trans, sys_chunk_offset, alloc_profile); return ret; } static inline int btrfs_chunk_max_errors(struct map_lookup *map) { int max_errors; if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10 | BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_DUP)) { max_errors = 1; } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) { max_errors = 2; } else { max_errors = 0; } return max_errors; } int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset) { struct extent_map *em; struct map_lookup *map; int readonly = 0; int miss_ndevs = 0; int i; em = btrfs_get_chunk_map(fs_info, chunk_offset, 1); if (IS_ERR(em)) return 1; map = em->map_lookup; for (i = 0; i < map->num_stripes; i++) { if (test_bit(BTRFS_DEV_STATE_MISSING, &map->stripes[i].dev->dev_state)) { miss_ndevs++; continue; } if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &map->stripes[i].dev->dev_state)) { readonly = 1; goto end; } } /* * If the number of missing devices is larger than max errors, * we can not write the data into that chunk successfully, so * set it readonly. */ if (miss_ndevs > btrfs_chunk_max_errors(map)) readonly = 1; end: free_extent_map(em); return readonly; } void btrfs_mapping_init(struct btrfs_mapping_tree *tree) { extent_map_tree_init(&tree->map_tree); } void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree) { struct extent_map *em; while (1) { write_lock(&tree->map_tree.lock); em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1); if (em) remove_extent_mapping(&tree->map_tree, em); write_unlock(&tree->map_tree.lock); if (!em) break; /* once for us */ free_extent_map(em); /* once for the tree */ free_extent_map(em); } } int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len) { struct extent_map *em; struct map_lookup *map; int ret; em = btrfs_get_chunk_map(fs_info, logical, len); if (IS_ERR(em)) /* * We could return errors for these cases, but that could get * ugly and we'd probably do the same thing which is just not do * anything else and exit, so return 1 so the callers don't try * to use other copies. */ return 1; map = em->map_lookup; if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1)) ret = map->num_stripes; else if (map->type & BTRFS_BLOCK_GROUP_RAID10) ret = map->sub_stripes; else if (map->type & BTRFS_BLOCK_GROUP_RAID5) ret = 2; else if (map->type & BTRFS_BLOCK_GROUP_RAID6) /* * There could be two corrupted data stripes, we need * to loop retry in order to rebuild the correct data. * * Fail a stripe at a time on every retry except the * stripe under reconstruction. */ ret = map->num_stripes; else ret = 1; free_extent_map(em); down_read(&fs_info->dev_replace.rwsem); if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) && fs_info->dev_replace.tgtdev) ret++; up_read(&fs_info->dev_replace.rwsem); return ret; } unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, u64 logical) { struct extent_map *em; struct map_lookup *map; unsigned long len = fs_info->sectorsize; em = btrfs_get_chunk_map(fs_info, logical, len); if (!WARN_ON(IS_ERR(em))) { map = em->map_lookup; if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) len = map->stripe_len * nr_data_stripes(map); free_extent_map(em); } return len; } int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len) { struct extent_map *em; struct map_lookup *map; int ret = 0; em = btrfs_get_chunk_map(fs_info, logical, len); if(!WARN_ON(IS_ERR(em))) { map = em->map_lookup; if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) ret = 1; free_extent_map(em); } return ret; } static int find_live_mirror(struct btrfs_fs_info *fs_info, struct map_lookup *map, int first, int dev_replace_is_ongoing) { int i; int num_stripes; int preferred_mirror; int tolerance; struct btrfs_device *srcdev; ASSERT((map->type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))); if (map->type & BTRFS_BLOCK_GROUP_RAID10) num_stripes = map->sub_stripes; else num_stripes = map->num_stripes; preferred_mirror = first + current->pid % num_stripes; if (dev_replace_is_ongoing && fs_info->dev_replace.cont_reading_from_srcdev_mode == BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID) srcdev = fs_info->dev_replace.srcdev; else srcdev = NULL; /* * try to avoid the drive that is the source drive for a * dev-replace procedure, only choose it if no other non-missing * mirror is available */ for (tolerance = 0; tolerance < 2; tolerance++) { if (map->stripes[preferred_mirror].dev->bdev && (tolerance || map->stripes[preferred_mirror].dev != srcdev)) return preferred_mirror; for (i = first; i < first + num_stripes; i++) { if (map->stripes[i].dev->bdev && (tolerance || map->stripes[i].dev != srcdev)) return i; } } /* we couldn't find one that doesn't fail. Just return something * and the io error handling code will clean up eventually */ return preferred_mirror; } static inline int parity_smaller(u64 a, u64 b) { return a > b; } /* Bubble-sort the stripe set to put the parity/syndrome stripes last */ static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes) { struct btrfs_bio_stripe s; int i; u64 l; int again = 1; while (again) { again = 0; for (i = 0; i < num_stripes - 1; i++) { if (parity_smaller(bbio->raid_map[i], bbio->raid_map[i+1])) { s = bbio->stripes[i]; l = bbio->raid_map[i]; bbio->stripes[i] = bbio->stripes[i+1]; bbio->raid_map[i] = bbio->raid_map[i+1]; bbio->stripes[i+1] = s; bbio->raid_map[i+1] = l; again = 1; } } } } static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes) { struct btrfs_bio *bbio = kzalloc( /* the size of the btrfs_bio */ sizeof(struct btrfs_bio) + /* plus the variable array for the stripes */ sizeof(struct btrfs_bio_stripe) * (total_stripes) + /* plus the variable array for the tgt dev */ sizeof(int) * (real_stripes) + /* * plus the raid_map, which includes both the tgt dev * and the stripes */ sizeof(u64) * (total_stripes), GFP_NOFS|__GFP_NOFAIL); atomic_set(&bbio->error, 0); refcount_set(&bbio->refs, 1); return bbio; } void btrfs_get_bbio(struct btrfs_bio *bbio) { WARN_ON(!refcount_read(&bbio->refs)); refcount_inc(&bbio->refs); } void btrfs_put_bbio(struct btrfs_bio *bbio) { if (!bbio) return; if (refcount_dec_and_test(&bbio->refs)) kfree(bbio); } /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */ /* * Please note that, discard won't be sent to target device of device * replace. */ static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info, u64 logical, u64 length, struct btrfs_bio **bbio_ret) { struct extent_map *em; struct map_lookup *map; struct btrfs_bio *bbio; u64 offset; u64 stripe_nr; u64 stripe_nr_end; u64 stripe_end_offset; u64 stripe_cnt; u64 stripe_len; u64 stripe_offset; u64 num_stripes; u32 stripe_index; u32 factor = 0; u32 sub_stripes = 0; u64 stripes_per_dev = 0; u32 remaining_stripes = 0; u32 last_stripe = 0; int ret = 0; int i; /* discard always return a bbio */ ASSERT(bbio_ret); em = btrfs_get_chunk_map(fs_info, logical, length); if (IS_ERR(em)) return PTR_ERR(em); map = em->map_lookup; /* we don't discard raid56 yet */ if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { ret = -EOPNOTSUPP; goto out; } offset = logical - em->start; length = min_t(u64, em->len - offset, length); stripe_len = map->stripe_len; /* * stripe_nr counts the total number of stripes we have to stride * to get to this block */ stripe_nr = div64_u64(offset, stripe_len); /* stripe_offset is the offset of this block in its stripe */ stripe_offset = offset - stripe_nr * stripe_len; stripe_nr_end = round_up(offset + length, map->stripe_len); stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len); stripe_cnt = stripe_nr_end - stripe_nr; stripe_end_offset = stripe_nr_end * map->stripe_len - (offset + length); /* * after this, stripe_nr is the number of stripes on this * device we have to walk to find the data, and stripe_index is * the number of our device in the stripe array */ num_stripes = 1; stripe_index = 0; if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) { if (map->type & BTRFS_BLOCK_GROUP_RAID0) sub_stripes = 1; else sub_stripes = map->sub_stripes; factor = map->num_stripes / sub_stripes; num_stripes = min_t(u64, map->num_stripes, sub_stripes * stripe_cnt); stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); stripe_index *= sub_stripes; stripes_per_dev = div_u64_rem(stripe_cnt, factor, &remaining_stripes); div_u64_rem(stripe_nr_end - 1, factor, &last_stripe); last_stripe *= sub_stripes; } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP)) { num_stripes = map->num_stripes; } else { stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &stripe_index); } bbio = alloc_btrfs_bio(num_stripes, 0); if (!bbio) { ret = -ENOMEM; goto out; } for (i = 0; i < num_stripes; i++) { bbio->stripes[i].physical = map->stripes[stripe_index].physical + stripe_offset + stripe_nr * map->stripe_len; bbio->stripes[i].dev = map->stripes[stripe_index].dev; if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) { bbio->stripes[i].length = stripes_per_dev * map->stripe_len; if (i / sub_stripes < remaining_stripes) bbio->stripes[i].length += map->stripe_len; /* * Special for the first stripe and * the last stripe: * * |-------|...|-------| * |----------| * off end_off */ if (i < sub_stripes) bbio->stripes[i].length -= stripe_offset; if (stripe_index >= last_stripe && stripe_index <= (last_stripe + sub_stripes - 1)) bbio->stripes[i].length -= stripe_end_offset; if (i == sub_stripes - 1) stripe_offset = 0; } else { bbio->stripes[i].length = length; } stripe_index++; if (stripe_index == map->num_stripes) { stripe_index = 0; stripe_nr++; } } *bbio_ret = bbio; bbio->map_type = map->type; bbio->num_stripes = num_stripes; out: free_extent_map(em); return ret; } /* * In dev-replace case, for repair case (that's the only case where the mirror * is selected explicitly when calling btrfs_map_block), blocks left of the * left cursor can also be read from the target drive. * * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the * array of stripes. * For READ, it also needs to be supported using the same mirror number. * * If the requested block is not left of the left cursor, EIO is returned. This * can happen because btrfs_num_copies() returns one more in the dev-replace * case. */ static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info, u64 logical, u64 length, u64 srcdev_devid, int *mirror_num, u64 *physical) { struct btrfs_bio *bbio = NULL; int num_stripes; int index_srcdev = 0; int found = 0; u64 physical_of_found = 0; int i; int ret = 0; ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical, &length, &bbio, 0, 0); if (ret) { ASSERT(bbio == NULL); return ret; } num_stripes = bbio->num_stripes; if (*mirror_num > num_stripes) { /* * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror, * that means that the requested area is not left of the left * cursor */ btrfs_put_bbio(bbio); return -EIO; } /* * process the rest of the function using the mirror_num of the source * drive. Therefore look it up first. At the end, patch the device * pointer to the one of the target drive. */ for (i = 0; i < num_stripes; i++) { if (bbio->stripes[i].dev->devid != srcdev_devid) continue; /* * In case of DUP, in order to keep it simple, only add the * mirror with the lowest physical address */ if (found && physical_of_found <= bbio->stripes[i].physical) continue; index_srcdev = i; found = 1; physical_of_found = bbio->stripes[i].physical; } btrfs_put_bbio(bbio); ASSERT(found); if (!found) return -EIO; *mirror_num = index_srcdev + 1; *physical = physical_of_found; return ret; } static void handle_ops_on_dev_replace(enum btrfs_map_op op, struct btrfs_bio **bbio_ret, struct btrfs_dev_replace *dev_replace, int *num_stripes_ret, int *max_errors_ret) { struct btrfs_bio *bbio = *bbio_ret; u64 srcdev_devid = dev_replace->srcdev->devid; int tgtdev_indexes = 0; int num_stripes = *num_stripes_ret; int max_errors = *max_errors_ret; int i; if (op == BTRFS_MAP_WRITE) { int index_where_to_add; /* * duplicate the write operations while the dev replace * procedure is running. Since the copying of the old disk to * the new disk takes place at run time while the filesystem is * mounted writable, the regular write operations to the old * disk have to be duplicated to go to the new disk as well. * * Note that device->missing is handled by the caller, and that * the write to the old disk is already set up in the stripes * array. */ index_where_to_add = num_stripes; for (i = 0; i < num_stripes; i++) { if (bbio->stripes[i].dev->devid == srcdev_devid) { /* write to new disk, too */ struct btrfs_bio_stripe *new = bbio->stripes + index_where_to_add; struct btrfs_bio_stripe *old = bbio->stripes + i; new->physical = old->physical; new->length = old->length; new->dev = dev_replace->tgtdev; bbio->tgtdev_map[i] = index_where_to_add; index_where_to_add++; max_errors++; tgtdev_indexes++; } } num_stripes = index_where_to_add; } else if (op == BTRFS_MAP_GET_READ_MIRRORS) { int index_srcdev = 0; int found = 0; u64 physical_of_found = 0; /* * During the dev-replace procedure, the target drive can also * be used to read data in case it is needed to repair a corrupt * block elsewhere. This is possible if the requested area is * left of the left cursor. In this area, the target drive is a * full copy of the source drive. */ for (i = 0; i < num_stripes; i++) { if (bbio->stripes[i].dev->devid == srcdev_devid) { /* * In case of DUP, in order to keep it simple, * only add the mirror with the lowest physical * address */ if (found && physical_of_found <= bbio->stripes[i].physical) continue; index_srcdev = i; found = 1; physical_of_found = bbio->stripes[i].physical; } } if (found) { struct btrfs_bio_stripe *tgtdev_stripe = bbio->stripes + num_stripes; tgtdev_stripe->physical = physical_of_found; tgtdev_stripe->length = bbio->stripes[index_srcdev].length; tgtdev_stripe->dev = dev_replace->tgtdev; bbio->tgtdev_map[index_srcdev] = num_stripes; tgtdev_indexes++; num_stripes++; } } *num_stripes_ret = num_stripes; *max_errors_ret = max_errors; bbio->num_tgtdevs = tgtdev_indexes; *bbio_ret = bbio; } static bool need_full_stripe(enum btrfs_map_op op) { return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS); } static int __btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, u64 logical, u64 *length, struct btrfs_bio **bbio_ret, int mirror_num, int need_raid_map) { struct extent_map *em; struct map_lookup *map; u64 offset; u64 stripe_offset; u64 stripe_nr; u64 stripe_len; u32 stripe_index; int i; int ret = 0; int num_stripes; int max_errors = 0; int tgtdev_indexes = 0; struct btrfs_bio *bbio = NULL; struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; int dev_replace_is_ongoing = 0; int num_alloc_stripes; int patch_the_first_stripe_for_dev_replace = 0; u64 physical_to_patch_in_first_stripe = 0; u64 raid56_full_stripe_start = (u64)-1; if (op == BTRFS_MAP_DISCARD) return __btrfs_map_block_for_discard(fs_info, logical, *length, bbio_ret); em = btrfs_get_chunk_map(fs_info, logical, *length); if (IS_ERR(em)) return PTR_ERR(em); map = em->map_lookup; offset = logical - em->start; stripe_len = map->stripe_len; stripe_nr = offset; /* * stripe_nr counts the total number of stripes we have to stride * to get to this block */ stripe_nr = div64_u64(stripe_nr, stripe_len); stripe_offset = stripe_nr * stripe_len; if (offset < stripe_offset) { btrfs_crit(fs_info, "stripe math has gone wrong, stripe_offset=%llu, offset=%llu, start=%llu, logical=%llu, stripe_len=%llu", stripe_offset, offset, em->start, logical, stripe_len); free_extent_map(em); return -EINVAL; } /* stripe_offset is the offset of this block in its stripe*/ stripe_offset = offset - stripe_offset; /* if we're here for raid56, we need to know the stripe aligned start */ if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { unsigned long full_stripe_len = stripe_len * nr_data_stripes(map); raid56_full_stripe_start = offset; /* allow a write of a full stripe, but make sure we don't * allow straddling of stripes */ raid56_full_stripe_start = div64_u64(raid56_full_stripe_start, full_stripe_len); raid56_full_stripe_start *= full_stripe_len; } if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { u64 max_len; /* For writes to RAID[56], allow a full stripeset across all disks. For other RAID types and for RAID[56] reads, just allow a single stripe (on a single disk). */ if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && (op == BTRFS_MAP_WRITE)) { max_len = stripe_len * nr_data_stripes(map) - (offset - raid56_full_stripe_start); } else { /* we limit the length of each bio to what fits in a stripe */ max_len = stripe_len - stripe_offset; } *length = min_t(u64, em->len - offset, max_len); } else { *length = em->len - offset; } /* * This is for when we're called from btrfs_bio_fits_in_stripe and all * it cares about is the length */ if (!bbio_ret) goto out; down_read(&dev_replace->rwsem); dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); /* * Hold the semaphore for read during the whole operation, write is * requested at commit time but must wait. */ if (!dev_replace_is_ongoing) up_read(&dev_replace->rwsem); if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 && !need_full_stripe(op) && dev_replace->tgtdev != NULL) { ret = get_extra_mirror_from_replace(fs_info, logical, *length, dev_replace->srcdev->devid, &mirror_num, &physical_to_patch_in_first_stripe); if (ret) goto out; else patch_the_first_stripe_for_dev_replace = 1; } else if (mirror_num > map->num_stripes) { mirror_num = 0; } num_stripes = 1; stripe_index = 0; if (map->type & BTRFS_BLOCK_GROUP_RAID0) { stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &stripe_index); if (!need_full_stripe(op)) mirror_num = 1; } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) { if (need_full_stripe(op)) num_stripes = map->num_stripes; else if (mirror_num) stripe_index = mirror_num - 1; else { stripe_index = find_live_mirror(fs_info, map, 0, dev_replace_is_ongoing); mirror_num = stripe_index + 1; } } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { if (need_full_stripe(op)) { num_stripes = map->num_stripes; } else if (mirror_num) { stripe_index = mirror_num - 1; } else { mirror_num = 1; } } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { u32 factor = map->num_stripes / map->sub_stripes; stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index); stripe_index *= map->sub_stripes; if (need_full_stripe(op)) num_stripes = map->sub_stripes; else if (mirror_num) stripe_index += mirror_num - 1; else { int old_stripe_index = stripe_index; stripe_index = find_live_mirror(fs_info, map, stripe_index, dev_replace_is_ongoing); mirror_num = stripe_index - old_stripe_index + 1; } } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) { /* push stripe_nr back to the start of the full stripe */ stripe_nr = div64_u64(raid56_full_stripe_start, stripe_len * nr_data_stripes(map)); /* RAID[56] write or recovery. Return all stripes */ num_stripes = map->num_stripes; max_errors = nr_parity_stripes(map); *length = map->stripe_len; stripe_index = 0; stripe_offset = 0; } else { /* * Mirror #0 or #1 means the original data block. * Mirror #2 is RAID5 parity block. * Mirror #3 is RAID6 Q block. */ stripe_nr = div_u64_rem(stripe_nr, nr_data_stripes(map), &stripe_index); if (mirror_num > 1) stripe_index = nr_data_stripes(map) + mirror_num - 2; /* We distribute the parity blocks across stripes */ div_u64_rem(stripe_nr + stripe_index, map->num_stripes, &stripe_index); if (!need_full_stripe(op) && mirror_num <= 1) mirror_num = 1; } } else { /* * after this, stripe_nr is the number of stripes on this * device we have to walk to find the data, and stripe_index is * the number of our device in the stripe array */ stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &stripe_index); mirror_num = stripe_index + 1; } if (stripe_index >= map->num_stripes) { btrfs_crit(fs_info, "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u", stripe_index, map->num_stripes); ret = -EINVAL; goto out; } num_alloc_stripes = num_stripes; if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) { if (op == BTRFS_MAP_WRITE) num_alloc_stripes <<= 1; if (op == BTRFS_MAP_GET_READ_MIRRORS) num_alloc_stripes++; tgtdev_indexes = num_stripes; } bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes); if (!bbio) { ret = -ENOMEM; goto out; } if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes); /* build raid_map */ if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map && (need_full_stripe(op) || mirror_num > 1)) { u64 tmp; unsigned rot; bbio->raid_map = (u64 *)((void *)bbio->stripes + sizeof(struct btrfs_bio_stripe) * num_alloc_stripes + sizeof(int) * tgtdev_indexes); /* Work out the disk rotation on this stripe-set */ div_u64_rem(stripe_nr, num_stripes, &rot); /* Fill in the logical address of each stripe */ tmp = stripe_nr * nr_data_stripes(map); for (i = 0; i < nr_data_stripes(map); i++) bbio->raid_map[(i+rot) % num_stripes] = em->start + (tmp + i) * map->stripe_len; bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE; if (map->type & BTRFS_BLOCK_GROUP_RAID6) bbio->raid_map[(i+rot+1) % num_stripes] = RAID6_Q_STRIPE; } for (i = 0; i < num_stripes; i++) { bbio->stripes[i].physical = map->stripes[stripe_index].physical + stripe_offset + stripe_nr * map->stripe_len; bbio->stripes[i].dev = map->stripes[stripe_index].dev; stripe_index++; } if (need_full_stripe(op)) max_errors = btrfs_chunk_max_errors(map); if (bbio->raid_map) sort_parity_stripes(bbio, num_stripes); if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL && need_full_stripe(op)) { handle_ops_on_dev_replace(op, &bbio, dev_replace, &num_stripes, &max_errors); } *bbio_ret = bbio; bbio->map_type = map->type; bbio->num_stripes = num_stripes; bbio->max_errors = max_errors; bbio->mirror_num = mirror_num; /* * this is the case that REQ_READ && dev_replace_is_ongoing && * mirror_num == num_stripes + 1 && dev_replace target drive is * available as a mirror */ if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) { WARN_ON(num_stripes > 1); bbio->stripes[0].dev = dev_replace->tgtdev; bbio->stripes[0].physical = physical_to_patch_in_first_stripe; bbio->mirror_num = map->num_stripes + 1; } out: if (dev_replace_is_ongoing) { lockdep_assert_held(&dev_replace->rwsem); /* Unlock and let waiting writers proceed */ up_read(&dev_replace->rwsem); } free_extent_map(em); return ret; } int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, u64 logical, u64 *length, struct btrfs_bio **bbio_ret, int mirror_num) { return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, mirror_num, 0); } /* For Scrub/replace */ int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, u64 logical, u64 *length, struct btrfs_bio **bbio_ret) { return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1); } int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, u64 physical, u64 **logical, int *naddrs, int *stripe_len) { struct extent_map *em; struct map_lookup *map; u64 *buf; u64 bytenr; u64 length; u64 stripe_nr; u64 rmap_len; int i, j, nr = 0; em = btrfs_get_chunk_map(fs_info, chunk_start, 1); if (IS_ERR(em)) return -EIO; map = em->map_lookup; length = em->len; rmap_len = map->stripe_len; if (map->type & BTRFS_BLOCK_GROUP_RAID10) length = div_u64(length, map->num_stripes / map->sub_stripes); else if (map->type & BTRFS_BLOCK_GROUP_RAID0) length = div_u64(length, map->num_stripes); else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { length = div_u64(length, nr_data_stripes(map)); rmap_len = map->stripe_len * nr_data_stripes(map); } buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); BUG_ON(!buf); /* -ENOMEM */ for (i = 0; i < map->num_stripes; i++) { if (map->stripes[i].physical > physical || map->stripes[i].physical + length <= physical) continue; stripe_nr = physical - map->stripes[i].physical; stripe_nr = div64_u64(stripe_nr, map->stripe_len); if (map->type & BTRFS_BLOCK_GROUP_RAID10) { stripe_nr = stripe_nr * map->num_stripes + i; stripe_nr = div_u64(stripe_nr, map->sub_stripes); } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) { stripe_nr = stripe_nr * map->num_stripes + i; } /* else if RAID[56], multiply by nr_data_stripes(). * Alternatively, just use rmap_len below instead of * map->stripe_len */ bytenr = chunk_start + stripe_nr * rmap_len; WARN_ON(nr >= map->num_stripes); for (j = 0; j < nr; j++) { if (buf[j] == bytenr) break; } if (j == nr) { WARN_ON(nr >= map->num_stripes); buf[nr++] = bytenr; } } *logical = buf; *naddrs = nr; *stripe_len = rmap_len; free_extent_map(em); return 0; } static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio) { bio->bi_private = bbio->private; bio->bi_end_io = bbio->end_io; bio_endio(bio); btrfs_put_bbio(bbio); } static void btrfs_end_bio(struct bio *bio) { struct btrfs_bio *bbio = bio->bi_private; int is_orig_bio = 0; if (bio->bi_status) { atomic_inc(&bbio->error); if (bio->bi_status == BLK_STS_IOERR || bio->bi_status == BLK_STS_TARGET) { unsigned int stripe_index = btrfs_io_bio(bio)->stripe_index; struct btrfs_device *dev; BUG_ON(stripe_index >= bbio->num_stripes); dev = bbio->stripes[stripe_index].dev; if (dev->bdev) { if (bio_op(bio) == REQ_OP_WRITE) btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); else btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); if (bio->bi_opf & REQ_PREFLUSH) btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_FLUSH_ERRS); } } } if (bio == bbio->orig_bio) is_orig_bio = 1; btrfs_bio_counter_dec(bbio->fs_info); if (atomic_dec_and_test(&bbio->stripes_pending)) { if (!is_orig_bio) { bio_put(bio); bio = bbio->orig_bio; } btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; /* only send an error to the higher layers if it is * beyond the tolerance of the btrfs bio */ if (atomic_read(&bbio->error) > bbio->max_errors) { bio->bi_status = BLK_STS_IOERR; } else { /* * this bio is actually up to date, we didn't * go over the max number of errors */ bio->bi_status = BLK_STS_OK; } btrfs_end_bbio(bbio, bio); } else if (!is_orig_bio) { bio_put(bio); } } /* * see run_scheduled_bios for a description of why bios are collected for * async submit. * * This will add one bio to the pending list for a device and make sure * the work struct is scheduled. */ static noinline void btrfs_schedule_bio(struct btrfs_device *device, struct bio *bio) { struct btrfs_fs_info *fs_info = device->fs_info; int should_queue = 1; struct btrfs_pending_bios *pending_bios; /* don't bother with additional async steps for reads, right now */ if (bio_op(bio) == REQ_OP_READ) { btrfsic_submit_bio(bio); return; } WARN_ON(bio->bi_next); bio->bi_next = NULL; spin_lock(&device->io_lock); if (op_is_sync(bio->bi_opf)) pending_bios = &device->pending_sync_bios; else pending_bios = &device->pending_bios; if (pending_bios->tail) pending_bios->tail->bi_next = bio; pending_bios->tail = bio; if (!pending_bios->head) pending_bios->head = bio; if (device->running_pending) should_queue = 0; spin_unlock(&device->io_lock); if (should_queue) btrfs_queue_work(fs_info->submit_workers, &device->work); } static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio, u64 physical, int dev_nr, int async) { struct btrfs_device *dev = bbio->stripes[dev_nr].dev; struct btrfs_fs_info *fs_info = bbio->fs_info; bio->bi_private = bbio; btrfs_io_bio(bio)->stripe_index = dev_nr; bio->bi_end_io = btrfs_end_bio; bio->bi_iter.bi_sector = physical >> 9; btrfs_debug_in_rcu(fs_info, "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u", bio_op(bio), bio->bi_opf, (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev, rcu_str_deref(dev->name), dev->devid, bio->bi_iter.bi_size); bio_set_dev(bio, dev->bdev); btrfs_bio_counter_inc_noblocked(fs_info); if (async) btrfs_schedule_bio(dev, bio); else btrfsic_submit_bio(bio); } static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical) { atomic_inc(&bbio->error); if (atomic_dec_and_test(&bbio->stripes_pending)) { /* Should be the original bio. */ WARN_ON(bio != bbio->orig_bio); btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; bio->bi_iter.bi_sector = logical >> 9; if (atomic_read(&bbio->error) > bbio->max_errors) bio->bi_status = BLK_STS_IOERR; else bio->bi_status = BLK_STS_OK; btrfs_end_bbio(bbio, bio); } } blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, int mirror_num, int async_submit) { struct btrfs_device *dev; struct bio *first_bio = bio; u64 logical = (u64)bio->bi_iter.bi_sector << 9; u64 length = 0; u64 map_length; int ret; int dev_nr; int total_devs; struct btrfs_bio *bbio = NULL; length = bio->bi_iter.bi_size; map_length = length; btrfs_bio_counter_inc_blocked(fs_info); ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length, &bbio, mirror_num, 1); if (ret) { btrfs_bio_counter_dec(fs_info); return errno_to_blk_status(ret); } total_devs = bbio->num_stripes; bbio->orig_bio = first_bio; bbio->private = first_bio->bi_private; bbio->end_io = first_bio->bi_end_io; bbio->fs_info = fs_info; atomic_set(&bbio->stripes_pending, bbio->num_stripes); if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) && ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) { /* In this case, map_length has been set to the length of a single stripe; not the whole write */ if (bio_op(bio) == REQ_OP_WRITE) { ret = raid56_parity_write(fs_info, bio, bbio, map_length); } else { ret = raid56_parity_recover(fs_info, bio, bbio, map_length, mirror_num, 1); } btrfs_bio_counter_dec(fs_info); return errno_to_blk_status(ret); } if (map_length < length) { btrfs_crit(fs_info, "mapping failed logical %llu bio len %llu len %llu", logical, length, map_length); BUG(); } for (dev_nr = 0; dev_nr < total_devs; dev_nr++) { dev = bbio->stripes[dev_nr].dev; if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || (bio_op(first_bio) == REQ_OP_WRITE && !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) { bbio_error(bbio, first_bio, logical); continue; } if (dev_nr < total_devs - 1) bio = btrfs_bio_clone(first_bio); else bio = first_bio; submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical, dev_nr, async_submit); } btrfs_bio_counter_dec(fs_info); return BLK_STS_OK; } /* * Find a device specified by @devid or @uuid in the list of @fs_devices, or * return NULL. * * If devid and uuid are both specified, the match must be exact, otherwise * only devid is used. * * If @seed is true, traverse through the seed devices. */ struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices, u64 devid, u8 *uuid, u8 *fsid, bool seed) { struct btrfs_device *device; while (fs_devices) { if (!fsid || !memcmp(fs_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE)) { list_for_each_entry(device, &fs_devices->devices, dev_list) { if (device->devid == devid && (!uuid || memcmp(device->uuid, uuid, BTRFS_UUID_SIZE) == 0)) return device; } } if (seed) fs_devices = fs_devices->seed; else return NULL; } return NULL; } static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, u64 devid, u8 *dev_uuid) { struct btrfs_device *device; device = btrfs_alloc_device(NULL, &devid, dev_uuid); if (IS_ERR(device)) return device; list_add(&device->dev_list, &fs_devices->devices); device->fs_devices = fs_devices; fs_devices->num_devices++; set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); fs_devices->missing_devices++; return device; } /** * btrfs_alloc_device - allocate struct btrfs_device * @fs_info: used only for generating a new devid, can be NULL if * devid is provided (i.e. @devid != NULL). * @devid: a pointer to devid for this device. If NULL a new devid * is generated. * @uuid: a pointer to UUID for this device. If NULL a new UUID * is generated. * * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR() * on error. Returned struct is not linked onto any lists and must be * destroyed with btrfs_free_device. */ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, const u64 *devid, const u8 *uuid) { struct btrfs_device *dev; u64 tmp; if (WARN_ON(!devid && !fs_info)) return ERR_PTR(-EINVAL); dev = __alloc_device(); if (IS_ERR(dev)) return dev; if (devid) tmp = *devid; else { int ret; ret = find_next_devid(fs_info, &tmp); if (ret) { btrfs_free_device(dev); return ERR_PTR(ret); } } dev->devid = tmp; if (uuid) memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE); else generate_random_uuid(dev->uuid); btrfs_init_work(&dev->work, btrfs_submit_helper, pending_bios_fn, NULL, NULL); return dev; } /* Return -EIO if any error, otherwise return 0. */ static int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info, struct extent_buffer *leaf, struct btrfs_chunk *chunk, u64 logical) { u64 length; u64 stripe_len; u16 num_stripes; u16 sub_stripes; u64 type; u64 features; bool mixed = false; length = btrfs_chunk_length(leaf, chunk); stripe_len = btrfs_chunk_stripe_len(leaf, chunk); num_stripes = btrfs_chunk_num_stripes(leaf, chunk); sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); type = btrfs_chunk_type(leaf, chunk); if (!num_stripes) { btrfs_err(fs_info, "invalid chunk num_stripes: %u", num_stripes); return -EIO; } if (!IS_ALIGNED(logical, fs_info->sectorsize)) { btrfs_err(fs_info, "invalid chunk logical %llu", logical); return -EIO; } if (btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize) { btrfs_err(fs_info, "invalid chunk sectorsize %u", btrfs_chunk_sector_size(leaf, chunk)); return -EIO; } if (!length || !IS_ALIGNED(length, fs_info->sectorsize)) { btrfs_err(fs_info, "invalid chunk length %llu", length); return -EIO; } if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) { btrfs_err(fs_info, "invalid chunk stripe length: %llu", stripe_len); return -EIO; } if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) & type) { btrfs_err(fs_info, "unrecognized chunk type: %llu", ~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) & btrfs_chunk_type(leaf, chunk)); return -EIO; } if ((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0) { btrfs_err(fs_info, "missing chunk type flag: 0x%llx", type); return -EIO; } if ((type & BTRFS_BLOCK_GROUP_SYSTEM) && (type & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA))) { btrfs_err(fs_info, "system chunk with data or metadata type: 0x%llx", type); return -EIO; } features = btrfs_super_incompat_flags(fs_info->super_copy); if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) mixed = true; if (!mixed) { if ((type & BTRFS_BLOCK_GROUP_METADATA) && (type & BTRFS_BLOCK_GROUP_DATA)) { btrfs_err(fs_info, "mixed chunk type in non-mixed mode: 0x%llx", type); return -EIO; } } if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) || (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) || (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) || (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) || (type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) || ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 && num_stripes != 1)) { btrfs_err(fs_info, "invalid num_stripes:sub_stripes %u:%u for profile %llu", num_stripes, sub_stripes, type & BTRFS_BLOCK_GROUP_PROFILE_MASK); return -EIO; } return 0; } static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info, u64 devid, u8 *uuid, bool error) { if (error) btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing", devid, uuid); else btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing", devid, uuid); } static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key, struct extent_buffer *leaf, struct btrfs_chunk *chunk) { struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; struct map_lookup *map; struct extent_map *em; u64 logical; u64 length; u64 devid; u8 uuid[BTRFS_UUID_SIZE]; int num_stripes; int ret; int i; logical = key->offset; length = btrfs_chunk_length(leaf, chunk); num_stripes = btrfs_chunk_num_stripes(leaf, chunk); ret = btrfs_check_chunk_valid(fs_info, leaf, chunk, logical); if (ret) return ret; read_lock(&map_tree->map_tree.lock); em = lookup_extent_mapping(&map_tree->map_tree, logical, 1); read_unlock(&map_tree->map_tree.lock); /* already mapped? */ if (em && em->start <= logical && em->start + em->len > logical) { free_extent_map(em); return 0; } else if (em) { free_extent_map(em); } em = alloc_extent_map(); if (!em) return -ENOMEM; map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); if (!map) { free_extent_map(em); return -ENOMEM; } set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); em->map_lookup = map; em->start = logical; em->len = length; em->orig_start = 0; em->block_start = 0; em->block_len = em->len; map->num_stripes = num_stripes; map->io_width = btrfs_chunk_io_width(leaf, chunk); map->io_align = btrfs_chunk_io_align(leaf, chunk); map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); map->type = btrfs_chunk_type(leaf, chunk); map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); map->verified_stripes = 0; for (i = 0; i < num_stripes; i++) { map->stripes[i].physical = btrfs_stripe_offset_nr(leaf, chunk, i); devid = btrfs_stripe_devid_nr(leaf, chunk, i); read_extent_buffer(leaf, uuid, (unsigned long) btrfs_stripe_dev_uuid_nr(chunk, i), BTRFS_UUID_SIZE); map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, devid, uuid, NULL, true); if (!map->stripes[i].dev && !btrfs_test_opt(fs_info, DEGRADED)) { free_extent_map(em); btrfs_report_missing_device(fs_info, devid, uuid, true); return -ENOENT; } if (!map->stripes[i].dev) { map->stripes[i].dev = add_missing_dev(fs_info->fs_devices, devid, uuid); if (IS_ERR(map->stripes[i].dev)) { free_extent_map(em); btrfs_err(fs_info, "failed to init missing dev %llu: %ld", devid, PTR_ERR(map->stripes[i].dev)); return PTR_ERR(map->stripes[i].dev); } btrfs_report_missing_device(fs_info, devid, uuid, false); } set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &(map->stripes[i].dev->dev_state)); } write_lock(&map_tree->map_tree.lock); ret = add_extent_mapping(&map_tree->map_tree, em, 0); write_unlock(&map_tree->map_tree.lock); if (ret < 0) { btrfs_err(fs_info, "failed to add chunk map, start=%llu len=%llu: %d", em->start, em->len, ret); } free_extent_map(em); return ret; } static void fill_device_from_item(struct extent_buffer *leaf, struct btrfs_dev_item *dev_item, struct btrfs_device *device) { unsigned long ptr; device->devid = btrfs_device_id(leaf, dev_item); device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item); device->total_bytes = device->disk_total_bytes; device->commit_total_bytes = device->disk_total_bytes; device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); device->commit_bytes_used = device->bytes_used; device->type = btrfs_device_type(leaf, dev_item); device->io_align = btrfs_device_io_align(leaf, dev_item); device->io_width = btrfs_device_io_width(leaf, dev_item); device->sector_size = btrfs_device_sector_size(leaf, dev_item); WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID); clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); ptr = btrfs_device_uuid(dev_item); read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); } static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, u8 *fsid) { struct btrfs_fs_devices *fs_devices; int ret; lockdep_assert_held(&uuid_mutex); ASSERT(fsid); fs_devices = fs_info->fs_devices->seed; while (fs_devices) { if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE)) return fs_devices; fs_devices = fs_devices->seed; } fs_devices = find_fsid(fsid, NULL); if (!fs_devices) { if (!btrfs_test_opt(fs_info, DEGRADED)) return ERR_PTR(-ENOENT); fs_devices = alloc_fs_devices(fsid, NULL); if (IS_ERR(fs_devices)) return fs_devices; fs_devices->seeding = 1; fs_devices->opened = 1; return fs_devices; } fs_devices = clone_fs_devices(fs_devices); if (IS_ERR(fs_devices)) return fs_devices; ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder); if (ret) { free_fs_devices(fs_devices); fs_devices = ERR_PTR(ret); goto out; } if (!fs_devices->seeding) { close_fs_devices(fs_devices); free_fs_devices(fs_devices); fs_devices = ERR_PTR(-EINVAL); goto out; } fs_devices->seed = fs_info->fs_devices->seed; fs_info->fs_devices->seed = fs_devices; out: return fs_devices; } static int read_one_dev(struct btrfs_fs_info *fs_info, struct extent_buffer *leaf, struct btrfs_dev_item *dev_item) { struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; struct btrfs_device *device; u64 devid; int ret; u8 fs_uuid[BTRFS_FSID_SIZE]; u8 dev_uuid[BTRFS_UUID_SIZE]; devid = btrfs_device_id(leaf, dev_item); read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item), BTRFS_UUID_SIZE); read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item), BTRFS_FSID_SIZE); if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) { fs_devices = open_seed_devices(fs_info, fs_uuid); if (IS_ERR(fs_devices)) return PTR_ERR(fs_devices); } device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid, fs_uuid, true); if (!device) { if (!btrfs_test_opt(fs_info, DEGRADED)) { btrfs_report_missing_device(fs_info, devid, dev_uuid, true); return -ENOENT; } device = add_missing_dev(fs_devices, devid, dev_uuid); if (IS_ERR(device)) { btrfs_err(fs_info, "failed to add missing dev %llu: %ld", devid, PTR_ERR(device)); return PTR_ERR(device); } btrfs_report_missing_device(fs_info, devid, dev_uuid, false); } else { if (!device->bdev) { if (!btrfs_test_opt(fs_info, DEGRADED)) { btrfs_report_missing_device(fs_info, devid, dev_uuid, true); return -ENOENT; } btrfs_report_missing_device(fs_info, devid, dev_uuid, false); } if (!device->bdev && !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) { /* * this happens when a device that was properly setup * in the device info lists suddenly goes bad. * device->bdev is NULL, and so we have to set * device->missing to one here */ device->fs_devices->missing_devices++; set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); } /* Move the device to its own fs_devices */ if (device->fs_devices != fs_devices) { ASSERT(test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)); list_move(&device->dev_list, &fs_devices->devices); device->fs_devices->num_devices--; fs_devices->num_devices++; device->fs_devices->missing_devices--; fs_devices->missing_devices++; device->fs_devices = fs_devices; } } if (device->fs_devices != fs_info->fs_devices) { BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)); if (device->generation != btrfs_device_generation(leaf, dev_item)) return -EINVAL; } fill_device_from_item(leaf, dev_item, device); set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) && !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { device->fs_devices->total_rw_bytes += device->total_bytes; atomic64_add(device->total_bytes - device->bytes_used, &fs_info->free_chunk_space); } ret = 0; return ret; } int btrfs_read_sys_array(struct btrfs_fs_info *fs_info) { struct btrfs_root *root = fs_info->tree_root; struct btrfs_super_block *super_copy = fs_info->super_copy; struct extent_buffer *sb; struct btrfs_disk_key *disk_key; struct btrfs_chunk *chunk; u8 *array_ptr; unsigned long sb_array_offset; int ret = 0; u32 num_stripes; u32 array_size; u32 len = 0; u32 cur_offset; u64 type; struct btrfs_key key; ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize); /* * This will create extent buffer of nodesize, superblock size is * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will * overallocate but we can keep it as-is, only the first page is used. */ sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET); if (IS_ERR(sb)) return PTR_ERR(sb); set_extent_buffer_uptodate(sb); btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0); /* * The sb extent buffer is artificial and just used to read the system array. * set_extent_buffer_uptodate() call does not properly mark all it's * pages up-to-date when the page is larger: extent does not cover the * whole page and consequently check_page_uptodate does not find all * the page's extents up-to-date (the hole beyond sb), * write_extent_buffer then triggers a WARN_ON. * * Regular short extents go through mark_extent_buffer_dirty/writeback cycle, * but sb spans only this function. Add an explicit SetPageUptodate call * to silence the warning eg. on PowerPC 64. */ if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE) SetPageUptodate(sb->pages[0]); write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); array_size = btrfs_super_sys_array_size(super_copy); array_ptr = super_copy->sys_chunk_array; sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); cur_offset = 0; while (cur_offset < array_size) { disk_key = (struct btrfs_disk_key *)array_ptr; len = sizeof(*disk_key); if (cur_offset + len > array_size) goto out_short_read; btrfs_disk_key_to_cpu(&key, disk_key); array_ptr += len; sb_array_offset += len; cur_offset += len; if (key.type == BTRFS_CHUNK_ITEM_KEY) { chunk = (struct btrfs_chunk *)sb_array_offset; /* * At least one btrfs_chunk with one stripe must be * present, exact stripe count check comes afterwards */ len = btrfs_chunk_item_size(1); if (cur_offset + len > array_size) goto out_short_read; num_stripes = btrfs_chunk_num_stripes(sb, chunk); if (!num_stripes) { btrfs_err(fs_info, "invalid number of stripes %u in sys_array at offset %u", num_stripes, cur_offset); ret = -EIO; break; } type = btrfs_chunk_type(sb, chunk); if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { btrfs_err(fs_info, "invalid chunk type %llu in sys_array at offset %u", type, cur_offset); ret = -EIO; break; } len = btrfs_chunk_item_size(num_stripes); if (cur_offset + len > array_size) goto out_short_read; ret = read_one_chunk(fs_info, &key, sb, chunk); if (ret) break; } else { btrfs_err(fs_info, "unexpected item type %u in sys_array at offset %u", (u32)key.type, cur_offset); ret = -EIO; break; } array_ptr += len; sb_array_offset += len; cur_offset += len; } clear_extent_buffer_uptodate(sb); free_extent_buffer_stale(sb); return ret; out_short_read: btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u", len, cur_offset); clear_extent_buffer_uptodate(sb); free_extent_buffer_stale(sb); return -EIO; } /* * Check if all chunks in the fs are OK for read-write degraded mount * * If the @failing_dev is specified, it's accounted as missing. * * Return true if all chunks meet the minimal RW mount requirements. * Return false if any chunk doesn't meet the minimal RW mount requirements. */ bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, struct btrfs_device *failing_dev) { struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; struct extent_map *em; u64 next_start = 0; bool ret = true; read_lock(&map_tree->map_tree.lock); em = lookup_extent_mapping(&map_tree->map_tree, 0, (u64)-1); read_unlock(&map_tree->map_tree.lock); /* No chunk at all? Return false anyway */ if (!em) { ret = false; goto out; } while (em) { struct map_lookup *map; int missing = 0; int max_tolerated; int i; map = em->map_lookup; max_tolerated = btrfs_get_num_tolerated_disk_barrier_failures( map->type); for (i = 0; i < map->num_stripes; i++) { struct btrfs_device *dev = map->stripes[i].dev; if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) || dev->last_flush_error) missing++; else if (failing_dev && failing_dev == dev) missing++; } if (missing > max_tolerated) { if (!failing_dev) btrfs_warn(fs_info, "chunk %llu missing %d devices, max tolerance is %d for writable mount", em->start, missing, max_tolerated); free_extent_map(em); ret = false; goto out; } next_start = extent_map_end(em); free_extent_map(em); read_lock(&map_tree->map_tree.lock); em = lookup_extent_mapping(&map_tree->map_tree, next_start, (u64)(-1) - next_start); read_unlock(&map_tree->map_tree.lock); } out: return ret; } int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) { struct btrfs_root *root = fs_info->chunk_root; struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_key key; struct btrfs_key found_key; int ret; int slot; u64 total_dev = 0; path = btrfs_alloc_path(); if (!path) return -ENOMEM; /* * uuid_mutex is needed only if we are mounting a sprout FS * otherwise we don't need it. */ mutex_lock(&uuid_mutex); mutex_lock(&fs_info->chunk_mutex); /* * Read all device items, and then all the chunk items. All * device items are found before any chunk item (their object id * is smaller than the lowest possible object id for a chunk * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID). */ key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.offset = 0; key.type = 0; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto error; while (1) { leaf = path->nodes[0]; slot = path->slots[0]; if (slot >= btrfs_header_nritems(leaf)) { ret = btrfs_next_leaf(root, path); if (ret == 0) continue; if (ret < 0) goto error; break; } btrfs_item_key_to_cpu(leaf, &found_key, slot); if (found_key.type == BTRFS_DEV_ITEM_KEY) { struct btrfs_dev_item *dev_item; dev_item = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item); ret = read_one_dev(fs_info, leaf, dev_item); if (ret) goto error; total_dev++; } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { struct btrfs_chunk *chunk; chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); ret = read_one_chunk(fs_info, &found_key, leaf, chunk); if (ret) goto error; } path->slots[0]++; } /* * After loading chunk tree, we've got all device information, * do another round of validation checks. */ if (total_dev != fs_info->fs_devices->total_devices) { btrfs_err(fs_info, "super_num_devices %llu mismatch with num_devices %llu found here", btrfs_super_num_devices(fs_info->super_copy), total_dev); ret = -EINVAL; goto error; } if (btrfs_super_total_bytes(fs_info->super_copy) < fs_info->fs_devices->total_rw_bytes) { btrfs_err(fs_info, "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu", btrfs_super_total_bytes(fs_info->super_copy), fs_info->fs_devices->total_rw_bytes); ret = -EINVAL; goto error; } ret = 0; error: mutex_unlock(&fs_info->chunk_mutex); mutex_unlock(&uuid_mutex); btrfs_free_path(path); return ret; } void btrfs_init_devices_late(struct btrfs_fs_info *fs_info) { struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; struct btrfs_device *device; while (fs_devices) { mutex_lock(&fs_devices->device_list_mutex); list_for_each_entry(device, &fs_devices->devices, dev_list) device->fs_info = fs_info; mutex_unlock(&fs_devices->device_list_mutex); fs_devices = fs_devices->seed; } } static void __btrfs_reset_dev_stats(struct btrfs_device *dev) { int i; for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) btrfs_dev_stat_reset(dev, i); } int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info) { struct btrfs_key key; struct btrfs_key found_key; struct btrfs_root *dev_root = fs_info->dev_root; struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; struct extent_buffer *eb; int slot; int ret = 0; struct btrfs_device *device; struct btrfs_path *path = NULL; int i; path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; goto out; } mutex_lock(&fs_devices->device_list_mutex); list_for_each_entry(device, &fs_devices->devices, dev_list) { int item_size; struct btrfs_dev_stats_item *ptr; key.objectid = BTRFS_DEV_STATS_OBJECTID; key.type = BTRFS_PERSISTENT_ITEM_KEY; key.offset = device->devid; ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0); if (ret) { __btrfs_reset_dev_stats(device); device->dev_stats_valid = 1; btrfs_release_path(path); continue; } slot = path->slots[0]; eb = path->nodes[0]; btrfs_item_key_to_cpu(eb, &found_key, slot); item_size = btrfs_item_size_nr(eb, slot); ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item); for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { if (item_size >= (1 + i) * sizeof(__le64)) btrfs_dev_stat_set(device, i, btrfs_dev_stats_value(eb, ptr, i)); else btrfs_dev_stat_reset(device, i); } device->dev_stats_valid = 1; btrfs_dev_stat_print_on_load(device); btrfs_release_path(path); } mutex_unlock(&fs_devices->device_list_mutex); out: btrfs_free_path(path); return ret < 0 ? ret : 0; } static int update_dev_stat_item(struct btrfs_trans_handle *trans, struct btrfs_device *device) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_root *dev_root = fs_info->dev_root; struct btrfs_path *path; struct btrfs_key key; struct extent_buffer *eb; struct btrfs_dev_stats_item *ptr; int ret; int i; key.objectid = BTRFS_DEV_STATS_OBJECTID; key.type = BTRFS_PERSISTENT_ITEM_KEY; key.offset = device->devid; path = btrfs_alloc_path(); if (!path) return -ENOMEM; ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); if (ret < 0) { btrfs_warn_in_rcu(fs_info, "error %d while searching for dev_stats item for device %s", ret, rcu_str_deref(device->name)); goto out; } if (ret == 0 && btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { /* need to delete old one and insert a new one */ ret = btrfs_del_item(trans, dev_root, path); if (ret != 0) { btrfs_warn_in_rcu(fs_info, "delete too small dev_stats item for device %s failed %d", rcu_str_deref(device->name), ret); goto out; } ret = 1; } if (ret == 1) { /* need to insert a new item */ btrfs_release_path(path); ret = btrfs_insert_empty_item(trans, dev_root, path, &key, sizeof(*ptr)); if (ret < 0) { btrfs_warn_in_rcu(fs_info, "insert dev_stats item for device %s failed %d", rcu_str_deref(device->name), ret); goto out; } } eb = path->nodes[0]; ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) btrfs_set_dev_stats_value(eb, ptr, i, btrfs_dev_stat_read(device, i)); btrfs_mark_buffer_dirty(eb); out: btrfs_free_path(path); return ret; } /* * called from commit_transaction. Writes all changed device stats to disk. */ int btrfs_run_dev_stats(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info) { struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; struct btrfs_device *device; int stats_cnt; int ret = 0; mutex_lock(&fs_devices->device_list_mutex); list_for_each_entry(device, &fs_devices->devices, dev_list) { stats_cnt = atomic_read(&device->dev_stats_ccnt); if (!device->dev_stats_valid || stats_cnt == 0) continue; /* * There is a LOAD-LOAD control dependency between the value of * dev_stats_ccnt and updating the on-disk values which requires * reading the in-memory counters. Such control dependencies * require explicit read memory barriers. * * This memory barriers pairs with smp_mb__before_atomic in * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full * barrier implied by atomic_xchg in * btrfs_dev_stats_read_and_reset */ smp_rmb(); ret = update_dev_stat_item(trans, device); if (!ret) atomic_sub(stats_cnt, &device->dev_stats_ccnt); } mutex_unlock(&fs_devices->device_list_mutex); return ret; } void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index) { btrfs_dev_stat_inc(dev, index); btrfs_dev_stat_print_on_error(dev); } static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev) { if (!dev->dev_stats_valid) return; btrfs_err_rl_in_rcu(dev->fs_info, "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", rcu_str_deref(dev->name), btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); } static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev) { int i; for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) if (btrfs_dev_stat_read(dev, i) != 0) break; if (i == BTRFS_DEV_STAT_VALUES_MAX) return; /* all values == 0, suppress message */ btrfs_info_in_rcu(dev->fs_info, "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u", rcu_str_deref(dev->name), btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS), btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS), btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS), btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS), btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS)); } int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, struct btrfs_ioctl_get_dev_stats *stats) { struct btrfs_device *dev; struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; int i; mutex_lock(&fs_devices->device_list_mutex); dev = btrfs_find_device(fs_info->fs_devices, stats->devid, NULL, NULL, true); mutex_unlock(&fs_devices->device_list_mutex); if (!dev) { btrfs_warn(fs_info, "get dev_stats failed, device not found"); return -ENODEV; } else if (!dev->dev_stats_valid) { btrfs_warn(fs_info, "get dev_stats failed, not yet valid"); return -ENODEV; } else if (stats->flags & BTRFS_DEV_STATS_RESET) { for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) { if (stats->nr_items > i) stats->values[i] = btrfs_dev_stat_read_and_reset(dev, i); else btrfs_dev_stat_reset(dev, i); } } else { for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) if (stats->nr_items > i) stats->values[i] = btrfs_dev_stat_read(dev, i); } if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX) stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX; return 0; } void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path) { struct buffer_head *bh; struct btrfs_super_block *disk_super; int copy_num; if (!bdev) return; for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) { if (btrfs_read_dev_one_super(bdev, copy_num, &bh)) continue; disk_super = (struct btrfs_super_block *)bh->b_data; memset(&disk_super->magic, 0, sizeof(disk_super->magic)); set_buffer_dirty(bh); sync_dirty_buffer(bh); brelse(bh); } /* Notify udev that device has changed */ btrfs_kobject_uevent(bdev, KOBJ_CHANGE); /* Update ctime/mtime for device path for libblkid */ update_dev_time(device_path); } /* * Update the size of all devices, which is used for writing out the * super blocks. */ void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info) { struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; struct btrfs_device *curr, *next; if (list_empty(&fs_devices->resized_devices)) return; mutex_lock(&fs_devices->device_list_mutex); mutex_lock(&fs_info->chunk_mutex); list_for_each_entry_safe(curr, next, &fs_devices->resized_devices, resized_list) { list_del_init(&curr->resized_list); curr->commit_total_bytes = curr->disk_total_bytes; } mutex_unlock(&fs_info->chunk_mutex); mutex_unlock(&fs_devices->device_list_mutex); } /* Must be invoked during the transaction commit */ void btrfs_update_commit_device_bytes_used(struct btrfs_transaction *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; struct extent_map *em; struct map_lookup *map; struct btrfs_device *dev; int i; if (list_empty(&trans->pending_chunks)) return; /* In order to kick the device replace finish process */ mutex_lock(&fs_info->chunk_mutex); list_for_each_entry(em, &trans->pending_chunks, list) { map = em->map_lookup; for (i = 0; i < map->num_stripes; i++) { dev = map->stripes[i].dev; dev->commit_bytes_used = dev->bytes_used; } } mutex_unlock(&fs_info->chunk_mutex); } void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info) { struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; while (fs_devices) { fs_devices->fs_info = fs_info; fs_devices = fs_devices->seed; } } void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info) { struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; while (fs_devices) { fs_devices->fs_info = NULL; fs_devices = fs_devices->seed; } } /* * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10. */ int btrfs_bg_type_to_factor(u64 flags) { if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) return 2; return 1; } static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes) { int index = btrfs_bg_flags_to_raid_index(type); int ncopies = btrfs_raid_array[index].ncopies; int data_stripes; switch (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { case BTRFS_BLOCK_GROUP_RAID5: data_stripes = num_stripes - 1; break; case BTRFS_BLOCK_GROUP_RAID6: data_stripes = num_stripes - 2; break; default: data_stripes = num_stripes / ncopies; break; } return div_u64(chunk_len, data_stripes); } static int verify_one_dev_extent(struct btrfs_fs_info *fs_info, u64 chunk_offset, u64 devid, u64 physical_offset, u64 physical_len) { struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree; struct extent_map *em; struct map_lookup *map; struct btrfs_device *dev; u64 stripe_len; bool found = false; int ret = 0; int i; read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, chunk_offset, 1); read_unlock(&em_tree->lock); if (!em) { btrfs_err(fs_info, "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk", physical_offset, devid); ret = -EUCLEAN; goto out; } map = em->map_lookup; stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes); if (physical_len != stripe_len) { btrfs_err(fs_info, "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu", physical_offset, devid, em->start, physical_len, stripe_len); ret = -EUCLEAN; goto out; } for (i = 0; i < map->num_stripes; i++) { if (map->stripes[i].dev->devid == devid && map->stripes[i].physical == physical_offset) { found = true; if (map->verified_stripes >= map->num_stripes) { btrfs_err(fs_info, "too many dev extents for chunk %llu found", em->start); ret = -EUCLEAN; goto out; } map->verified_stripes++; break; } } if (!found) { btrfs_err(fs_info, "dev extent physical offset %llu devid %llu has no corresponding chunk", physical_offset, devid); ret = -EUCLEAN; } /* Make sure no dev extent is beyond device bondary */ dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true); if (!dev) { btrfs_err(fs_info, "failed to find devid %llu", devid); ret = -EUCLEAN; goto out; } /* It's possible this device is a dummy for seed device */ if (dev->disk_total_bytes == 0) { dev = btrfs_find_device(fs_info->fs_devices->seed, devid, NULL, NULL, false); if (!dev) { btrfs_err(fs_info, "failed to find seed devid %llu", devid); ret = -EUCLEAN; goto out; } } if (physical_offset + physical_len > dev->disk_total_bytes) { btrfs_err(fs_info, "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu", devid, physical_offset, physical_len, dev->disk_total_bytes); ret = -EUCLEAN; goto out; } out: free_extent_map(em); return ret; } static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info) { struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree; struct extent_map *em; struct rb_node *node; int ret = 0; read_lock(&em_tree->lock); for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) { em = rb_entry(node, struct extent_map, rb_node); if (em->map_lookup->num_stripes != em->map_lookup->verified_stripes) { btrfs_err(fs_info, "chunk %llu has missing dev extent, have %d expect %d", em->start, em->map_lookup->verified_stripes, em->map_lookup->num_stripes); ret = -EUCLEAN; goto out; } } out: read_unlock(&em_tree->lock); return ret; } /* * Ensure that all dev extents are mapped to correct chunk, otherwise * later chunk allocation/free would cause unexpected behavior. * * NOTE: This will iterate through the whole device tree, which should be of * the same size level as the chunk tree. This slightly increases mount time. */ int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info) { struct btrfs_path *path; struct btrfs_root *root = fs_info->dev_root; struct btrfs_key key; u64 prev_devid = 0; u64 prev_dev_ext_end = 0; int ret = 0; key.objectid = 1; key.type = BTRFS_DEV_EXTENT_KEY; key.offset = 0; path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->reada = READA_FORWARD; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { ret = btrfs_next_item(root, path); if (ret < 0) goto out; /* No dev extents at all? Not good */ if (ret > 0) { ret = -EUCLEAN; goto out; } } while (1) { struct extent_buffer *leaf = path->nodes[0]; struct btrfs_dev_extent *dext; int slot = path->slots[0]; u64 chunk_offset; u64 physical_offset; u64 physical_len; u64 devid; btrfs_item_key_to_cpu(leaf, &key, slot); if (key.type != BTRFS_DEV_EXTENT_KEY) break; devid = key.objectid; physical_offset = key.offset; dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent); chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext); physical_len = btrfs_dev_extent_length(leaf, dext); /* Check if this dev extent overlaps with the previous one */ if (devid == prev_devid && physical_offset < prev_dev_ext_end) { btrfs_err(fs_info, "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu", devid, physical_offset, prev_dev_ext_end); ret = -EUCLEAN; goto out; } ret = verify_one_dev_extent(fs_info, chunk_offset, devid, physical_offset, physical_len); if (ret < 0) goto out; prev_devid = devid; prev_dev_ext_end = physical_offset + physical_len; ret = btrfs_next_item(root, path); if (ret < 0) goto out; if (ret > 0) { ret = 0; break; } } /* Ensure all chunks have corresponding dev extents */ ret = verify_chunk_dev_extent_mapping(fs_info); out: btrfs_free_path(path); return ret; } /* * Check whether the given block group or device is pinned by any inode being * used as a swapfile. */ bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr) { struct btrfs_swapfile_pin *sp; struct rb_node *node; spin_lock(&fs_info->swapfile_pins_lock); node = fs_info->swapfile_pins.rb_node; while (node) { sp = rb_entry(node, struct btrfs_swapfile_pin, node); if (ptr < sp->ptr) node = node->rb_left; else if (ptr > sp->ptr) node = node->rb_right; else break; } spin_unlock(&fs_info->swapfile_pins_lock); return node != NULL; }
static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key, struct extent_buffer *leaf, struct btrfs_chunk *chunk) { struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; struct map_lookup *map; struct extent_map *em; u64 logical; u64 length; u64 devid; u8 uuid[BTRFS_UUID_SIZE]; int num_stripes; int ret; int i; logical = key->offset; length = btrfs_chunk_length(leaf, chunk); num_stripes = btrfs_chunk_num_stripes(leaf, chunk); ret = btrfs_check_chunk_valid(fs_info, leaf, chunk, logical); if (ret) return ret; read_lock(&map_tree->map_tree.lock); em = lookup_extent_mapping(&map_tree->map_tree, logical, 1); read_unlock(&map_tree->map_tree.lock); /* already mapped? */ if (em && em->start <= logical && em->start + em->len > logical) { free_extent_map(em); return 0; } else if (em) { free_extent_map(em); } em = alloc_extent_map(); if (!em) return -ENOMEM; map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); if (!map) { free_extent_map(em); return -ENOMEM; } set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); em->map_lookup = map; em->start = logical; em->len = length; em->orig_start = 0; em->block_start = 0; em->block_len = em->len; map->num_stripes = num_stripes; map->io_width = btrfs_chunk_io_width(leaf, chunk); map->io_align = btrfs_chunk_io_align(leaf, chunk); map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); map->type = btrfs_chunk_type(leaf, chunk); map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); map->verified_stripes = 0; for (i = 0; i < num_stripes; i++) { map->stripes[i].physical = btrfs_stripe_offset_nr(leaf, chunk, i); devid = btrfs_stripe_devid_nr(leaf, chunk, i); read_extent_buffer(leaf, uuid, (unsigned long) btrfs_stripe_dev_uuid_nr(chunk, i), BTRFS_UUID_SIZE); map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, devid, uuid, NULL); if (!map->stripes[i].dev && !btrfs_test_opt(fs_info, DEGRADED)) { free_extent_map(em); btrfs_report_missing_device(fs_info, devid, uuid, true); return -ENOENT; } if (!map->stripes[i].dev) { map->stripes[i].dev = add_missing_dev(fs_info->fs_devices, devid, uuid); if (IS_ERR(map->stripes[i].dev)) { free_extent_map(em); btrfs_err(fs_info, "failed to init missing dev %llu: %ld", devid, PTR_ERR(map->stripes[i].dev)); return PTR_ERR(map->stripes[i].dev); } btrfs_report_missing_device(fs_info, devid, uuid, false); } set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &(map->stripes[i].dev->dev_state)); } write_lock(&map_tree->map_tree.lock); ret = add_extent_mapping(&map_tree->map_tree, em, 0); write_unlock(&map_tree->map_tree.lock); if (ret < 0) { btrfs_err(fs_info, "failed to add chunk map, start=%llu len=%llu: %d", em->start, em->len, ret); } free_extent_map(em); return ret; }
static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key, struct extent_buffer *leaf, struct btrfs_chunk *chunk) { struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; struct map_lookup *map; struct extent_map *em; u64 logical; u64 length; u64 devid; u8 uuid[BTRFS_UUID_SIZE]; int num_stripes; int ret; int i; logical = key->offset; length = btrfs_chunk_length(leaf, chunk); num_stripes = btrfs_chunk_num_stripes(leaf, chunk); ret = btrfs_check_chunk_valid(fs_info, leaf, chunk, logical); if (ret) return ret; read_lock(&map_tree->map_tree.lock); em = lookup_extent_mapping(&map_tree->map_tree, logical, 1); read_unlock(&map_tree->map_tree.lock); /* already mapped? */ if (em && em->start <= logical && em->start + em->len > logical) { free_extent_map(em); return 0; } else if (em) { free_extent_map(em); } em = alloc_extent_map(); if (!em) return -ENOMEM; map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); if (!map) { free_extent_map(em); return -ENOMEM; } set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags); em->map_lookup = map; em->start = logical; em->len = length; em->orig_start = 0; em->block_start = 0; em->block_len = em->len; map->num_stripes = num_stripes; map->io_width = btrfs_chunk_io_width(leaf, chunk); map->io_align = btrfs_chunk_io_align(leaf, chunk); map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); map->type = btrfs_chunk_type(leaf, chunk); map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); map->verified_stripes = 0; for (i = 0; i < num_stripes; i++) { map->stripes[i].physical = btrfs_stripe_offset_nr(leaf, chunk, i); devid = btrfs_stripe_devid_nr(leaf, chunk, i); read_extent_buffer(leaf, uuid, (unsigned long) btrfs_stripe_dev_uuid_nr(chunk, i), BTRFS_UUID_SIZE); map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, devid, uuid, NULL, true); if (!map->stripes[i].dev && !btrfs_test_opt(fs_info, DEGRADED)) { free_extent_map(em); btrfs_report_missing_device(fs_info, devid, uuid, true); return -ENOENT; } if (!map->stripes[i].dev) { map->stripes[i].dev = add_missing_dev(fs_info->fs_devices, devid, uuid); if (IS_ERR(map->stripes[i].dev)) { free_extent_map(em); btrfs_err(fs_info, "failed to init missing dev %llu: %ld", devid, PTR_ERR(map->stripes[i].dev)); return PTR_ERR(map->stripes[i].dev); } btrfs_report_missing_device(fs_info, devid, uuid, false); } set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &(map->stripes[i].dev->dev_state)); } write_lock(&map_tree->map_tree.lock); ret = add_extent_mapping(&map_tree->map_tree, em, 0); write_unlock(&map_tree->map_tree.lock); if (ret < 0) { btrfs_err(fs_info, "failed to add chunk map, start=%llu len=%llu: %d", em->start, em->len, ret); } free_extent_map(em); return ret; }
{'added': [(966, '\t\tdevice = btrfs_find_device(fs_devices, devid,'), (967, '\t\t\t\tdisk_super->dev_item.uuid, NULL, false);'), (2384, '\t\t\t\t\t disk_super->metadata_uuid, true);'), (2387, '\t\t\t\t\t disk_super->fsid, true);'), (2407, '\t\t\t\t\t NULL, true);'), (2550, '\t\t\t\t\t fs_uuid, true);'), (6603, '/*'), (6604, ' * Find a device specified by @devid or @uuid in the list of @fs_devices, or'), (6605, ' * return NULL.'), (6606, ' *'), (6607, ' * If devid and uuid are both specified, the match must be exact, otherwise'), (6608, ' * only devid is used.'), (6609, ' *'), (6610, ' * If @seed is true, traverse through the seed devices.'), (6611, ' */'), (6613, '\t\t\t\t u64 devid, u8 *uuid, u8 *fsid,'), (6614, '\t\t\t\t bool seed)'), (6621, '\t\t\tlist_for_each_entry(device, &fs_devices->devices,'), (6622, '\t\t\t\t\t dev_list) {'), (6623, '\t\t\t\tif (device->devid == devid &&'), (6624, '\t\t\t\t (!uuid || memcmp(device->uuid, uuid,'), (6625, '\t\t\t\t\t\t BTRFS_UUID_SIZE) == 0))'), (6626, '\t\t\t\t\treturn device;'), (6627, '\t\t\t}'), (6629, '\t\tif (seed)'), (6630, '\t\t\tfs_devices = fs_devices->seed;'), (6631, '\t\telse'), (6632, '\t\t\treturn NULL;'), (6878, '\t\t\t\t\t\t\tdevid, uuid, NULL, true);'), (7018, '\t\t\t\t fs_uuid, true);'), (7608, '\tdev = btrfs_find_device(fs_info->fs_devices, stats->devid, NULL, NULL,'), (7609, '\t\t\t\ttrue);'), (7823, '\tdev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);'), (7832, '\t\tdev = btrfs_find_device(fs_info->fs_devices->seed, devid, NULL,'), (7833, '\t\t\t\t\tNULL, false);')], 'deleted': [(418, '/*'), (419, ' * Find a device specified by @devid or @uuid in the list of @fs_devices, or'), (420, ' * return NULL.'), (421, ' *'), (422, ' * If devid and uuid are both specified, the match must be exact, otherwise'), (423, ' * only devid is used.'), (424, ' */'), (425, 'static struct btrfs_device *find_device(struct btrfs_fs_devices *fs_devices,'), (426, '\t\tu64 devid, const u8 *uuid)'), (427, '{'), (428, '\tstruct btrfs_device *dev;'), (429, ''), (430, '\tlist_for_each_entry(dev, &fs_devices->devices, dev_list) {'), (431, '\t\tif (dev->devid == devid &&'), (432, '\t\t (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {'), (433, '\t\t\treturn dev;'), (434, '\t\t}'), (435, '\t}'), (436, '\treturn NULL;'), (437, '}'), (438, ''), (987, '\t\tdevice = find_device(fs_devices, devid,'), (988, '\t\t\t\tdisk_super->dev_item.uuid);'), (2405, '\t\t\t\t\t disk_super->metadata_uuid);'), (2408, '\t\t\t\t\t disk_super->fsid);'), (2428, '\t\t\t\t\t NULL);'), (2571, '\t\t\t\t\t fs_uuid);'), (6625, '\t\t\t\t u64 devid, u8 *uuid, u8 *fsid)'), (6632, '\t\t\tdevice = find_device(fs_devices, devid, uuid);'), (6633, '\t\t\tif (device)'), (6634, '\t\t\t\treturn device;'), (6636, '\t\tfs_devices = fs_devices->seed;'), (6882, '\t\t\t\t\t\t\tdevid, uuid, NULL);'), (7022, '\t\t\t\t fs_uuid);'), (7612, '\tdev = btrfs_find_device(fs_info->fs_devices, stats->devid, NULL, NULL);'), (7826, '\tdev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL);'), (7835, '\t\tdev = find_device(fs_info->fs_devices->seed, devid, NULL);')]}
35
37
5,833
34,668
93
660
14
https://github.com/torvalds/linux
CVE-2019-18885
CWE-476
2,373
jpg_dec.c
C
jpg_decode
/* * Copyright (c) 2001-2003 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /******************************************************************************\ * Includes. \******************************************************************************/ #include <stdio.h> #include <assert.h> #include <ctype.h> #include "jasper/jas_tvp.h" #include "jasper/jas_stream.h" #include "jasper/jas_image.h" #include "jasper/jas_string.h" #include "jasper/jas_debug.h" #include "jpg_jpeglib.h" #include "jpg_cod.h" /******************************************************************************\ * Types. \******************************************************************************/ /* JPEG decoder data sink type. */ typedef struct jpg_dest_s { /* Initialize output. */ void (*start_output)(j_decompress_ptr cinfo, struct jpg_dest_s *dinfo); /* Output rows of decompressed data. */ void (*put_pixel_rows)(j_decompress_ptr cinfo, struct jpg_dest_s *dinfo, JDIMENSION rows_supplied); /* Cleanup output. */ void (*finish_output)(j_decompress_ptr cinfo, struct jpg_dest_s *dinfo); /* Output buffer. */ JSAMPARRAY buffer; /* Height of output buffer. */ JDIMENSION buffer_height; /* The current row. */ JDIMENSION row; /* The image used to hold the decompressed sample data. */ jas_image_t *image; /* The row buffer. */ jas_matrix_t *data; /* The error indicator. If this is nonzero, something has gone wrong during decompression. */ int error; } jpg_dest_t; /******************************************************************************\ * Local functions. \******************************************************************************/ static void jpg_start_output(j_decompress_ptr cinfo, jpg_dest_t *dinfo); static void jpg_put_pixel_rows(j_decompress_ptr cinfo, jpg_dest_t *dinfo, JDIMENSION rows_supplied); static void jpg_finish_output(j_decompress_ptr cinfo, jpg_dest_t *dinfo); static int jpg_copystreamtofile(FILE *out, jas_stream_t *in); static jas_image_t *jpg_mkimage(j_decompress_ptr cinfo); /******************************************************************************\ * \******************************************************************************/ typedef struct { size_t max_size; } jpg_dec_importopts_t; typedef enum { OPT_MAXSIZE, } optid_t; static jas_taginfo_t decopts[] = { {OPT_MAXSIZE, "max_size"}, {-1, 0} }; static int jpg_dec_parseopts(char *optstr, jpg_dec_importopts_t *opts) { jas_tvparser_t *tvp; opts->max_size = 0; if (!(tvp = jas_tvparser_create(optstr ? optstr : ""))) { return -1; } while (!jas_tvparser_next(tvp)) { switch (jas_taginfo_nonull(jas_taginfos_lookup(decopts, jas_tvparser_gettag(tvp)))->id) { case OPT_MAXSIZE: opts->max_size = atoi(jas_tvparser_getval(tvp)); break; default: jas_eprintf("warning: ignoring invalid option %s\n", jas_tvparser_gettag(tvp)); break; } } jas_tvparser_destroy(tvp); return 0; } /******************************************************************************\ * Code for load operation. \******************************************************************************/ /* Load an image from a stream in the JPG format. */ jas_image_t *jpg_decode(jas_stream_t *in, char *optstr) { struct jpeg_decompress_struct cinfo; struct jpeg_error_mgr jerr; FILE *input_file; jpg_dest_t dest_mgr_buf; jpg_dest_t *dest_mgr = &dest_mgr_buf; JDIMENSION num_scanlines; jas_image_t *image; int ret; jpg_dec_importopts_t opts; size_t size; if (jpg_dec_parseopts(optstr, &opts)) { goto error; } // In theory, the two memset calls that follow are not needed. // They are only here to make the code more predictable in the event // that the JPEG library fails to initialize a member. memset(&cinfo, 0, sizeof(struct jpeg_decompress_struct)); memset(dest_mgr, 0, sizeof(jpg_dest_t)); dest_mgr->data = 0; image = 0; input_file = 0; if (!(input_file = tmpfile())) { jas_eprintf("cannot make temporary file\n"); goto error; } if (jpg_copystreamtofile(input_file, in)) { jas_eprintf("cannot copy stream\n"); goto error; } rewind(input_file); /* Allocate and initialize a JPEG decompression object. */ JAS_DBGLOG(10, ("jpeg_std_error(%p)\n", &jerr)); cinfo.err = jpeg_std_error(&jerr); JAS_DBGLOG(10, ("jpeg_create_decompress(%p)\n", &cinfo)); jpeg_create_decompress(&cinfo); /* Specify the data source for decompression. */ JAS_DBGLOG(10, ("jpeg_stdio_src(%p, %p)\n", &cinfo, input_file)); jpeg_stdio_src(&cinfo, input_file); /* Read the file header to obtain the image information. */ JAS_DBGLOG(10, ("jpeg_read_header(%p, TRUE)\n", &cinfo)); ret = jpeg_read_header(&cinfo, TRUE); JAS_DBGLOG(10, ("jpeg_read_header return value %d\n", ret)); if (ret != JPEG_HEADER_OK) { jas_eprintf("jpeg_read_header did not return JPEG_HEADER_OK\n"); } JAS_DBGLOG(10, ( "header: image_width %d; image_height %d; num_components %d\n", cinfo.image_width, cinfo.image_height, cinfo.num_components) ); /* Start the decompressor. */ JAS_DBGLOG(10, ("jpeg_start_decompress(%p)\n", &cinfo)); ret = jpeg_start_decompress(&cinfo); JAS_DBGLOG(10, ("jpeg_start_decompress return value %d\n", ret)); JAS_DBGLOG(10, ( "header: output_width %d; output_height %d; output_components %d\n", cinfo.output_width, cinfo.output_height, cinfo.output_components) ); if (opts.max_size) { if (!jas_safe_size_mul(cinfo.output_width, cinfo.output_height, &size) || !jas_safe_size_mul(size, cinfo.output_components, &size)) { goto error; } if (size > opts.max_size) { jas_eprintf("image is too large\n"); goto error; } } /* Create an image object to hold the decoded data. */ if (!(image = jpg_mkimage(&cinfo))) { jas_eprintf("jpg_mkimage failed\n"); goto error; } /* Initialize the data sink object. */ dest_mgr->image = image; if (!(dest_mgr->data = jas_matrix_create(1, cinfo.output_width))) { jas_eprintf("jas_matrix_create failed\n"); goto error; } dest_mgr->start_output = jpg_start_output; dest_mgr->put_pixel_rows = jpg_put_pixel_rows; dest_mgr->finish_output = jpg_finish_output; dest_mgr->buffer = (*cinfo.mem->alloc_sarray) ((j_common_ptr) &cinfo, JPOOL_IMAGE, cinfo.output_width * cinfo.output_components, (JDIMENSION) 1); dest_mgr->buffer_height = 1; dest_mgr->error = 0; /* Process the compressed data. */ (*dest_mgr->start_output)(&cinfo, dest_mgr); while (cinfo.output_scanline < cinfo.output_height) { JAS_DBGLOG(10, ("jpeg_read_scanlines(%p, %p, %lu)\n", &cinfo, dest_mgr->buffer, JAS_CAST(unsigned long, dest_mgr->buffer_height))); num_scanlines = jpeg_read_scanlines(&cinfo, dest_mgr->buffer, dest_mgr->buffer_height); JAS_DBGLOG(10, ("jpeg_read_scanlines return value %lu\n", JAS_CAST(unsigned long, num_scanlines))); (*dest_mgr->put_pixel_rows)(&cinfo, dest_mgr, num_scanlines); } (*dest_mgr->finish_output)(&cinfo, dest_mgr); /* Complete the decompression process. */ JAS_DBGLOG(10, ("jpeg_finish_decompress(%p)\n", &cinfo)); jpeg_finish_decompress(&cinfo); /* Destroy the JPEG decompression object. */ JAS_DBGLOG(10, ("jpeg_destroy_decompress(%p)\n", &cinfo)); jpeg_destroy_decompress(&cinfo); jas_matrix_destroy(dest_mgr->data); JAS_DBGLOG(10, ("fclose(%p)\n", input_file)); fclose(input_file); input_file = 0; if (dest_mgr->error) { jas_eprintf("error during decoding\n"); goto error; } return image; error: if (dest_mgr->data) { jas_matrix_destroy(dest_mgr->data); } if (image) { jas_image_destroy(image); } if (input_file) { fclose(input_file); } return 0; } /******************************************************************************\ * \******************************************************************************/ static jas_image_t *jpg_mkimage(j_decompress_ptr cinfo) { jas_image_t *image; int cmptno; jas_image_cmptparm_t cmptparm; int numcmpts; JAS_DBGLOG(10, ("jpg_mkimage(%p)\n", cinfo)); image = 0; numcmpts = cinfo->output_components; if (!(image = jas_image_create0())) { goto error; } for (cmptno = 0; cmptno < numcmpts; ++cmptno) { if (cinfo->image_width > JAS_IMAGE_COORD_MAX || cinfo->image_height > JAS_IMAGE_COORD_MAX) { goto error; } cmptparm.tlx = 0; cmptparm.tly = 0; cmptparm.hstep = 1; cmptparm.vstep = 1; cmptparm.width = cinfo->image_width; cmptparm.height = cinfo->image_height; cmptparm.prec = 8; cmptparm.sgnd = false; if (jas_image_addcmpt(image, cmptno, &cmptparm)) { goto error; } } if (numcmpts == 3) { jas_image_setclrspc(image, JAS_CLRSPC_SRGB); jas_image_setcmpttype(image, 0, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_R)); jas_image_setcmpttype(image, 1, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_G)); jas_image_setcmpttype(image, 2, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_B)); } else { jas_image_setclrspc(image, JAS_CLRSPC_SGRAY); jas_image_setcmpttype(image, 0, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_GRAY_Y)); } return image; error: if (image) { jas_image_destroy(image); } return 0; } /******************************************************************************\ * Data source code. \******************************************************************************/ static int jpg_copystreamtofile(FILE *out, jas_stream_t *in) { int c; while ((c = jas_stream_getc(in)) != EOF) { if (fputc(c, out) == EOF) { return -1; } } if (jas_stream_error(in)) { return -1; } return 0; } /******************************************************************************\ * Data sink code. \******************************************************************************/ static void jpg_start_output(j_decompress_ptr cinfo, jpg_dest_t *dinfo) { /* Avoid compiler warnings about unused parameters. */ cinfo = 0; JAS_DBGLOG(10, ("jpg_start_output(%p, %p)\n", cinfo, dinfo)); dinfo->row = 0; } static void jpg_put_pixel_rows(j_decompress_ptr cinfo, jpg_dest_t *dinfo, JDIMENSION rows_supplied) { JSAMPLE *bufptr; int cmptno; JDIMENSION x; uint_fast32_t width; JAS_DBGLOG(10, ("jpg_put_pixel_rows(%p, %p)\n", cinfo, dinfo)); if (dinfo->error) { return; } assert(cinfo->output_components == jas_image_numcmpts(dinfo->image)); for (cmptno = 0; cmptno < cinfo->output_components; ++cmptno) { width = jas_image_cmptwidth(dinfo->image, cmptno); bufptr = (dinfo->buffer[0]) + cmptno; for (x = 0; x < width; ++x) { jas_matrix_set(dinfo->data, 0, x, GETJSAMPLE(*bufptr)); bufptr += cinfo->output_components; } JAS_DBGLOG(10, ( "jas_image_writecmpt called for component %d row %lu\n", cmptno, JAS_CAST(unsigned long, dinfo->row))); if (jas_image_writecmpt(dinfo->image, cmptno, 0, dinfo->row, width, 1, dinfo->data)) { dinfo->error = 1; } } dinfo->row += rows_supplied; } static void jpg_finish_output(j_decompress_ptr cinfo, jpg_dest_t *dinfo) { JAS_DBGLOG(10, ("jpg_finish_output(%p, %p)\n", cinfo, dinfo)); /* Avoid compiler warnings about unused parameters. */ cinfo = 0; dinfo = 0; }
/* * Copyright (c) 2001-2003 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /******************************************************************************\ * Includes. \******************************************************************************/ #include <stdio.h> #include <assert.h> #include <ctype.h> #include "jasper/jas_tvp.h" #include "jasper/jas_stream.h" #include "jasper/jas_image.h" #include "jasper/jas_string.h" #include "jasper/jas_debug.h" #include "jpg_jpeglib.h" #include "jpg_cod.h" /******************************************************************************\ * Types. \******************************************************************************/ typedef struct { size_t max_samples; } jpg_dec_importopts_t; typedef enum { OPT_MAXSIZE, } optid_t; /* JPEG decoder data sink type. */ typedef struct jpg_dest_s { /* Initialize output. */ void (*start_output)(j_decompress_ptr cinfo, struct jpg_dest_s *dinfo); /* Output rows of decompressed data. */ void (*put_pixel_rows)(j_decompress_ptr cinfo, struct jpg_dest_s *dinfo, JDIMENSION rows_supplied); /* Cleanup output. */ void (*finish_output)(j_decompress_ptr cinfo, struct jpg_dest_s *dinfo); /* Output buffer. */ JSAMPARRAY buffer; /* Height of output buffer. */ JDIMENSION buffer_height; /* The current row. */ JDIMENSION row; /* The image used to hold the decompressed sample data. */ jas_image_t *image; /* The row buffer. */ jas_matrix_t *data; /* The error indicator. If this is nonzero, something has gone wrong during decompression. */ int error; } jpg_dest_t; /******************************************************************************\ * Local functions. \******************************************************************************/ static void jpg_start_output(j_decompress_ptr cinfo, jpg_dest_t *dinfo); static void jpg_put_pixel_rows(j_decompress_ptr cinfo, jpg_dest_t *dinfo, JDIMENSION rows_supplied); static void jpg_finish_output(j_decompress_ptr cinfo, jpg_dest_t *dinfo); static int jpg_copystreamtofile(FILE *out, jas_stream_t *in); static jas_image_t *jpg_mkimage(j_decompress_ptr cinfo); /******************************************************************************\ * Option parsing. \******************************************************************************/ static jas_taginfo_t decopts[] = { {OPT_MAXSIZE, "max_samples"}, {-1, 0} }; static int jpg_dec_parseopts(char *optstr, jpg_dec_importopts_t *opts) { jas_tvparser_t *tvp; opts->max_samples = 64 * JAS_MEBI; if (!(tvp = jas_tvparser_create(optstr ? optstr : ""))) { return -1; } while (!jas_tvparser_next(tvp)) { switch (jas_taginfo_nonull(jas_taginfos_lookup(decopts, jas_tvparser_gettag(tvp)))->id) { case OPT_MAXSIZE: opts->max_samples = atoi(jas_tvparser_getval(tvp)); break; default: jas_eprintf("warning: ignoring invalid option %s\n", jas_tvparser_gettag(tvp)); break; } } jas_tvparser_destroy(tvp); return 0; } /******************************************************************************\ * Code for load operation. \******************************************************************************/ /* Load an image from a stream in the JPG format. */ jas_image_t *jpg_decode(jas_stream_t *in, char *optstr) { struct jpeg_decompress_struct cinfo; struct jpeg_error_mgr jerr; FILE *input_file; jpg_dest_t dest_mgr_buf; jpg_dest_t *dest_mgr = &dest_mgr_buf; JDIMENSION num_scanlines; jas_image_t *image; int ret; jpg_dec_importopts_t opts; size_t num_samples; JAS_DBGLOG(100, ("jpg_decode(%p, \"%s\")\n", in, optstr)); if (jpg_dec_parseopts(optstr, &opts)) { goto error; } // In theory, the two memset calls that follow are not needed. // They are only here to make the code more predictable in the event // that the JPEG library fails to initialize a member. memset(&cinfo, 0, sizeof(struct jpeg_decompress_struct)); memset(dest_mgr, 0, sizeof(jpg_dest_t)); dest_mgr->data = 0; image = 0; input_file = 0; if (!(input_file = tmpfile())) { jas_eprintf("cannot make temporary file\n"); goto error; } if (jpg_copystreamtofile(input_file, in)) { jas_eprintf("cannot copy stream\n"); goto error; } rewind(input_file); /* Allocate and initialize a JPEG decompression object. */ JAS_DBGLOG(10, ("jpeg_std_error(%p)\n", &jerr)); cinfo.err = jpeg_std_error(&jerr); JAS_DBGLOG(10, ("jpeg_create_decompress(%p)\n", &cinfo)); jpeg_create_decompress(&cinfo); /* Specify the data source for decompression. */ JAS_DBGLOG(10, ("jpeg_stdio_src(%p, %p)\n", &cinfo, input_file)); jpeg_stdio_src(&cinfo, input_file); /* Read the file header to obtain the image information. */ JAS_DBGLOG(10, ("jpeg_read_header(%p, TRUE)\n", &cinfo)); ret = jpeg_read_header(&cinfo, TRUE); JAS_DBGLOG(10, ("jpeg_read_header return value %d\n", ret)); if (ret != JPEG_HEADER_OK) { jas_eprintf("jpeg_read_header did not return JPEG_HEADER_OK\n"); } JAS_DBGLOG(10, ( "header: image_width %d; image_height %d; num_components %d\n", cinfo.image_width, cinfo.image_height, cinfo.num_components) ); if (opts.max_samples > 0) { if (!jas_safe_size_mul3(cinfo.image_width, cinfo.image_height, cinfo.num_components, &num_samples)) { goto error; } if (num_samples > opts.max_samples) { jas_eprintf("image is too large (%zu > %zu)\n", num_samples, opts.max_samples); goto error; } } /* Start the decompressor. */ JAS_DBGLOG(10, ("jpeg_start_decompress(%p)\n", &cinfo)); ret = jpeg_start_decompress(&cinfo); JAS_DBGLOG(10, ("jpeg_start_decompress return value %d\n", ret)); JAS_DBGLOG(10, ( "header: output_width %d; output_height %d; output_components %d\n", cinfo.output_width, cinfo.output_height, cinfo.output_components) ); /* Create an image object to hold the decoded data. */ if (!(image = jpg_mkimage(&cinfo))) { jas_eprintf("jpg_mkimage failed\n"); goto error; } /* Initialize the data sink object. */ dest_mgr->image = image; if (!(dest_mgr->data = jas_matrix_create(1, cinfo.output_width))) { jas_eprintf("jas_matrix_create failed\n"); goto error; } dest_mgr->start_output = jpg_start_output; dest_mgr->put_pixel_rows = jpg_put_pixel_rows; dest_mgr->finish_output = jpg_finish_output; dest_mgr->buffer = (*cinfo.mem->alloc_sarray) ((j_common_ptr) &cinfo, JPOOL_IMAGE, cinfo.output_width * cinfo.output_components, (JDIMENSION) 1); dest_mgr->buffer_height = 1; dest_mgr->error = 0; /* Process the compressed data. */ (*dest_mgr->start_output)(&cinfo, dest_mgr); while (cinfo.output_scanline < cinfo.output_height) { JAS_DBGLOG(10, ("jpeg_read_scanlines(%p, %p, %lu)\n", &cinfo, dest_mgr->buffer, JAS_CAST(unsigned long, dest_mgr->buffer_height))); num_scanlines = jpeg_read_scanlines(&cinfo, dest_mgr->buffer, dest_mgr->buffer_height); JAS_DBGLOG(10, ("jpeg_read_scanlines return value %lu\n", JAS_CAST(unsigned long, num_scanlines))); (*dest_mgr->put_pixel_rows)(&cinfo, dest_mgr, num_scanlines); } (*dest_mgr->finish_output)(&cinfo, dest_mgr); /* Complete the decompression process. */ JAS_DBGLOG(10, ("jpeg_finish_decompress(%p)\n", &cinfo)); jpeg_finish_decompress(&cinfo); /* Destroy the JPEG decompression object. */ JAS_DBGLOG(10, ("jpeg_destroy_decompress(%p)\n", &cinfo)); jpeg_destroy_decompress(&cinfo); jas_matrix_destroy(dest_mgr->data); JAS_DBGLOG(10, ("fclose(%p)\n", input_file)); fclose(input_file); input_file = 0; if (dest_mgr->error) { jas_eprintf("error during decoding\n"); goto error; } return image; error: if (dest_mgr->data) { jas_matrix_destroy(dest_mgr->data); } if (image) { jas_image_destroy(image); } if (input_file) { fclose(input_file); } return 0; } /******************************************************************************\ * \******************************************************************************/ static jas_image_t *jpg_mkimage(j_decompress_ptr cinfo) { jas_image_t *image; int cmptno; jas_image_cmptparm_t cmptparm; int numcmpts; JAS_DBGLOG(10, ("jpg_mkimage(%p)\n", cinfo)); image = 0; numcmpts = cinfo->output_components; if (!(image = jas_image_create0())) { goto error; } for (cmptno = 0; cmptno < numcmpts; ++cmptno) { if (cinfo->image_width > JAS_IMAGE_COORD_MAX || cinfo->image_height > JAS_IMAGE_COORD_MAX) { goto error; } cmptparm.tlx = 0; cmptparm.tly = 0; cmptparm.hstep = 1; cmptparm.vstep = 1; cmptparm.width = cinfo->image_width; cmptparm.height = cinfo->image_height; cmptparm.prec = 8; cmptparm.sgnd = false; if (jas_image_addcmpt(image, cmptno, &cmptparm)) { goto error; } } if (numcmpts == 3) { jas_image_setclrspc(image, JAS_CLRSPC_SRGB); jas_image_setcmpttype(image, 0, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_R)); jas_image_setcmpttype(image, 1, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_G)); jas_image_setcmpttype(image, 2, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_B)); } else { jas_image_setclrspc(image, JAS_CLRSPC_SGRAY); jas_image_setcmpttype(image, 0, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_GRAY_Y)); } return image; error: if (image) { jas_image_destroy(image); } return 0; } /******************************************************************************\ * Data source code. \******************************************************************************/ static int jpg_copystreamtofile(FILE *out, jas_stream_t *in) { int c; while ((c = jas_stream_getc(in)) != EOF) { if (fputc(c, out) == EOF) { return -1; } } if (jas_stream_error(in)) { return -1; } return 0; } /******************************************************************************\ * Data sink code. \******************************************************************************/ static void jpg_start_output(j_decompress_ptr cinfo, jpg_dest_t *dinfo) { /* Avoid compiler warnings about unused parameters. */ cinfo = 0; JAS_DBGLOG(10, ("jpg_start_output(%p, %p)\n", cinfo, dinfo)); dinfo->row = 0; } static void jpg_put_pixel_rows(j_decompress_ptr cinfo, jpg_dest_t *dinfo, JDIMENSION rows_supplied) { JSAMPLE *bufptr; int cmptno; JDIMENSION x; uint_fast32_t width; JAS_DBGLOG(10, ("jpg_put_pixel_rows(%p, %p)\n", cinfo, dinfo)); if (dinfo->error) { return; } assert(cinfo->output_components == jas_image_numcmpts(dinfo->image)); for (cmptno = 0; cmptno < cinfo->output_components; ++cmptno) { width = jas_image_cmptwidth(dinfo->image, cmptno); bufptr = (dinfo->buffer[0]) + cmptno; for (x = 0; x < width; ++x) { jas_matrix_set(dinfo->data, 0, x, GETJSAMPLE(*bufptr)); bufptr += cinfo->output_components; } JAS_DBGLOG(10, ( "jas_image_writecmpt called for component %d row %lu\n", cmptno, JAS_CAST(unsigned long, dinfo->row))); if (jas_image_writecmpt(dinfo->image, cmptno, 0, dinfo->row, width, 1, dinfo->data)) { dinfo->error = 1; } } dinfo->row += rows_supplied; } static void jpg_finish_output(j_decompress_ptr cinfo, jpg_dest_t *dinfo) { JAS_DBGLOG(10, ("jpg_finish_output(%p, %p)\n", cinfo, dinfo)); /* Avoid compiler warnings about unused parameters. */ cinfo = 0; dinfo = 0; }
jas_image_t *jpg_decode(jas_stream_t *in, char *optstr) { struct jpeg_decompress_struct cinfo; struct jpeg_error_mgr jerr; FILE *input_file; jpg_dest_t dest_mgr_buf; jpg_dest_t *dest_mgr = &dest_mgr_buf; JDIMENSION num_scanlines; jas_image_t *image; int ret; jpg_dec_importopts_t opts; size_t size; if (jpg_dec_parseopts(optstr, &opts)) { goto error; } // In theory, the two memset calls that follow are not needed. // They are only here to make the code more predictable in the event // that the JPEG library fails to initialize a member. memset(&cinfo, 0, sizeof(struct jpeg_decompress_struct)); memset(dest_mgr, 0, sizeof(jpg_dest_t)); dest_mgr->data = 0; image = 0; input_file = 0; if (!(input_file = tmpfile())) { jas_eprintf("cannot make temporary file\n"); goto error; } if (jpg_copystreamtofile(input_file, in)) { jas_eprintf("cannot copy stream\n"); goto error; } rewind(input_file); /* Allocate and initialize a JPEG decompression object. */ JAS_DBGLOG(10, ("jpeg_std_error(%p)\n", &jerr)); cinfo.err = jpeg_std_error(&jerr); JAS_DBGLOG(10, ("jpeg_create_decompress(%p)\n", &cinfo)); jpeg_create_decompress(&cinfo); /* Specify the data source for decompression. */ JAS_DBGLOG(10, ("jpeg_stdio_src(%p, %p)\n", &cinfo, input_file)); jpeg_stdio_src(&cinfo, input_file); /* Read the file header to obtain the image information. */ JAS_DBGLOG(10, ("jpeg_read_header(%p, TRUE)\n", &cinfo)); ret = jpeg_read_header(&cinfo, TRUE); JAS_DBGLOG(10, ("jpeg_read_header return value %d\n", ret)); if (ret != JPEG_HEADER_OK) { jas_eprintf("jpeg_read_header did not return JPEG_HEADER_OK\n"); } JAS_DBGLOG(10, ( "header: image_width %d; image_height %d; num_components %d\n", cinfo.image_width, cinfo.image_height, cinfo.num_components) ); /* Start the decompressor. */ JAS_DBGLOG(10, ("jpeg_start_decompress(%p)\n", &cinfo)); ret = jpeg_start_decompress(&cinfo); JAS_DBGLOG(10, ("jpeg_start_decompress return value %d\n", ret)); JAS_DBGLOG(10, ( "header: output_width %d; output_height %d; output_components %d\n", cinfo.output_width, cinfo.output_height, cinfo.output_components) ); if (opts.max_size) { if (!jas_safe_size_mul(cinfo.output_width, cinfo.output_height, &size) || !jas_safe_size_mul(size, cinfo.output_components, &size)) { goto error; } if (size > opts.max_size) { jas_eprintf("image is too large\n"); goto error; } } /* Create an image object to hold the decoded data. */ if (!(image = jpg_mkimage(&cinfo))) { jas_eprintf("jpg_mkimage failed\n"); goto error; } /* Initialize the data sink object. */ dest_mgr->image = image; if (!(dest_mgr->data = jas_matrix_create(1, cinfo.output_width))) { jas_eprintf("jas_matrix_create failed\n"); goto error; } dest_mgr->start_output = jpg_start_output; dest_mgr->put_pixel_rows = jpg_put_pixel_rows; dest_mgr->finish_output = jpg_finish_output; dest_mgr->buffer = (*cinfo.mem->alloc_sarray) ((j_common_ptr) &cinfo, JPOOL_IMAGE, cinfo.output_width * cinfo.output_components, (JDIMENSION) 1); dest_mgr->buffer_height = 1; dest_mgr->error = 0; /* Process the compressed data. */ (*dest_mgr->start_output)(&cinfo, dest_mgr); while (cinfo.output_scanline < cinfo.output_height) { JAS_DBGLOG(10, ("jpeg_read_scanlines(%p, %p, %lu)\n", &cinfo, dest_mgr->buffer, JAS_CAST(unsigned long, dest_mgr->buffer_height))); num_scanlines = jpeg_read_scanlines(&cinfo, dest_mgr->buffer, dest_mgr->buffer_height); JAS_DBGLOG(10, ("jpeg_read_scanlines return value %lu\n", JAS_CAST(unsigned long, num_scanlines))); (*dest_mgr->put_pixel_rows)(&cinfo, dest_mgr, num_scanlines); } (*dest_mgr->finish_output)(&cinfo, dest_mgr); /* Complete the decompression process. */ JAS_DBGLOG(10, ("jpeg_finish_decompress(%p)\n", &cinfo)); jpeg_finish_decompress(&cinfo); /* Destroy the JPEG decompression object. */ JAS_DBGLOG(10, ("jpeg_destroy_decompress(%p)\n", &cinfo)); jpeg_destroy_decompress(&cinfo); jas_matrix_destroy(dest_mgr->data); JAS_DBGLOG(10, ("fclose(%p)\n", input_file)); fclose(input_file); input_file = 0; if (dest_mgr->error) { jas_eprintf("error during decoding\n"); goto error; } return image; error: if (dest_mgr->data) { jas_matrix_destroy(dest_mgr->data); } if (image) { jas_image_destroy(image); } if (input_file) { fclose(input_file); } return 0; }
jas_image_t *jpg_decode(jas_stream_t *in, char *optstr) { struct jpeg_decompress_struct cinfo; struct jpeg_error_mgr jerr; FILE *input_file; jpg_dest_t dest_mgr_buf; jpg_dest_t *dest_mgr = &dest_mgr_buf; JDIMENSION num_scanlines; jas_image_t *image; int ret; jpg_dec_importopts_t opts; size_t num_samples; JAS_DBGLOG(100, ("jpg_decode(%p, \"%s\")\n", in, optstr)); if (jpg_dec_parseopts(optstr, &opts)) { goto error; } // In theory, the two memset calls that follow are not needed. // They are only here to make the code more predictable in the event // that the JPEG library fails to initialize a member. memset(&cinfo, 0, sizeof(struct jpeg_decompress_struct)); memset(dest_mgr, 0, sizeof(jpg_dest_t)); dest_mgr->data = 0; image = 0; input_file = 0; if (!(input_file = tmpfile())) { jas_eprintf("cannot make temporary file\n"); goto error; } if (jpg_copystreamtofile(input_file, in)) { jas_eprintf("cannot copy stream\n"); goto error; } rewind(input_file); /* Allocate and initialize a JPEG decompression object. */ JAS_DBGLOG(10, ("jpeg_std_error(%p)\n", &jerr)); cinfo.err = jpeg_std_error(&jerr); JAS_DBGLOG(10, ("jpeg_create_decompress(%p)\n", &cinfo)); jpeg_create_decompress(&cinfo); /* Specify the data source for decompression. */ JAS_DBGLOG(10, ("jpeg_stdio_src(%p, %p)\n", &cinfo, input_file)); jpeg_stdio_src(&cinfo, input_file); /* Read the file header to obtain the image information. */ JAS_DBGLOG(10, ("jpeg_read_header(%p, TRUE)\n", &cinfo)); ret = jpeg_read_header(&cinfo, TRUE); JAS_DBGLOG(10, ("jpeg_read_header return value %d\n", ret)); if (ret != JPEG_HEADER_OK) { jas_eprintf("jpeg_read_header did not return JPEG_HEADER_OK\n"); } JAS_DBGLOG(10, ( "header: image_width %d; image_height %d; num_components %d\n", cinfo.image_width, cinfo.image_height, cinfo.num_components) ); if (opts.max_samples > 0) { if (!jas_safe_size_mul3(cinfo.image_width, cinfo.image_height, cinfo.num_components, &num_samples)) { goto error; } if (num_samples > opts.max_samples) { jas_eprintf("image is too large (%zu > %zu)\n", num_samples, opts.max_samples); goto error; } } /* Start the decompressor. */ JAS_DBGLOG(10, ("jpeg_start_decompress(%p)\n", &cinfo)); ret = jpeg_start_decompress(&cinfo); JAS_DBGLOG(10, ("jpeg_start_decompress return value %d\n", ret)); JAS_DBGLOG(10, ( "header: output_width %d; output_height %d; output_components %d\n", cinfo.output_width, cinfo.output_height, cinfo.output_components) ); /* Create an image object to hold the decoded data. */ if (!(image = jpg_mkimage(&cinfo))) { jas_eprintf("jpg_mkimage failed\n"); goto error; } /* Initialize the data sink object. */ dest_mgr->image = image; if (!(dest_mgr->data = jas_matrix_create(1, cinfo.output_width))) { jas_eprintf("jas_matrix_create failed\n"); goto error; } dest_mgr->start_output = jpg_start_output; dest_mgr->put_pixel_rows = jpg_put_pixel_rows; dest_mgr->finish_output = jpg_finish_output; dest_mgr->buffer = (*cinfo.mem->alloc_sarray) ((j_common_ptr) &cinfo, JPOOL_IMAGE, cinfo.output_width * cinfo.output_components, (JDIMENSION) 1); dest_mgr->buffer_height = 1; dest_mgr->error = 0; /* Process the compressed data. */ (*dest_mgr->start_output)(&cinfo, dest_mgr); while (cinfo.output_scanline < cinfo.output_height) { JAS_DBGLOG(10, ("jpeg_read_scanlines(%p, %p, %lu)\n", &cinfo, dest_mgr->buffer, JAS_CAST(unsigned long, dest_mgr->buffer_height))); num_scanlines = jpeg_read_scanlines(&cinfo, dest_mgr->buffer, dest_mgr->buffer_height); JAS_DBGLOG(10, ("jpeg_read_scanlines return value %lu\n", JAS_CAST(unsigned long, num_scanlines))); (*dest_mgr->put_pixel_rows)(&cinfo, dest_mgr, num_scanlines); } (*dest_mgr->finish_output)(&cinfo, dest_mgr); /* Complete the decompression process. */ JAS_DBGLOG(10, ("jpeg_finish_decompress(%p)\n", &cinfo)); jpeg_finish_decompress(&cinfo); /* Destroy the JPEG decompression object. */ JAS_DBGLOG(10, ("jpeg_destroy_decompress(%p)\n", &cinfo)); jpeg_destroy_decompress(&cinfo); jas_matrix_destroy(dest_mgr->data); JAS_DBGLOG(10, ("fclose(%p)\n", input_file)); fclose(input_file); input_file = 0; if (dest_mgr->error) { jas_eprintf("error during decoding\n"); goto error; } return image; error: if (dest_mgr->data) { jas_matrix_destroy(dest_mgr->data); } if (image) { jas_image_destroy(image); } if (input_file) { fclose(input_file); } return 0; }
{'added': [(83, 'typedef struct {'), (84, '\tsize_t max_samples;'), (85, '} jpg_dec_importopts_t;'), (86, ''), (87, 'typedef enum {'), (88, '\tOPT_MAXSIZE,'), (89, '} optid_t;'), (90, ''), (138, '* Option parsing.'), (142, '\t{OPT_MAXSIZE, "max_samples"},'), (150, '\topts->max_samples = 64 * JAS_MEBI;'), (160, '\t\t\topts->max_samples = atoi(jas_tvparser_getval(tvp));'), (191, '\tsize_t num_samples;'), (192, ''), (193, '\tJAS_DBGLOG(100, ("jpg_decode(%p, \\"%s\\")\\n", in, optstr));'), (241, '\tif (opts.max_samples > 0) {'), (242, '\t\tif (!jas_safe_size_mul3(cinfo.image_width, cinfo.image_height,'), (243, '\t\t cinfo.num_components, &num_samples)) {'), (244, '\t\t\tgoto error;'), (245, '\t\t}'), (246, '\t\tif (num_samples > opts.max_samples) {'), (247, '\t\t\tjas_eprintf("image is too large (%zu > %zu)\\n", num_samples,'), (248, '\t\t\t opts.max_samples);'), (249, '\t\t\tgoto error;'), (250, '\t\t}'), (251, '\t}'), (252, '')], 'deleted': [(130, '*'), (133, 'typedef struct {'), (134, '\tsize_t max_size;'), (135, '} jpg_dec_importopts_t;'), (136, ''), (137, 'typedef enum {'), (138, '\tOPT_MAXSIZE,'), (139, '} optid_t;'), (140, ''), (142, '\t{OPT_MAXSIZE, "max_size"},'), (150, '\topts->max_size = 0;'), (160, '\t\t\topts->max_size = atoi(jas_tvparser_getval(tvp));'), (191, '\tsize_t size;'), (248, '\tif (opts.max_size) {'), (249, '\t\tif (!jas_safe_size_mul(cinfo.output_width, cinfo.output_height,'), (250, '\t\t &size) ||'), (251, '\t\t !jas_safe_size_mul(size, cinfo.output_components, &size)) {'), (252, '\t\t\tgoto error;'), (253, '\t\t}'), (254, '\t\tif (size > opts.max_size) {'), (255, '\t\t\tjas_eprintf("image is too large\\n");'), (256, '\t\t\tgoto error;'), (257, '\t\t}'), (258, '\t}'), (259, '')]}
27
25
280
1,669
116
750
16
https://github.com/mdadams/jasper
CVE-2016-9395
CWE-20
2,703
bson.c
C++
bson_append_estart
/* bson.c */ /* Copyright 2009, 2010 10gen Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdlib.h> #include <string.h> #include <stdio.h> #include <time.h> #include <limits.h> #include "bson.h" #include "encoding.h" const int initialBufferSize = 128; /* only need one of these */ static const int zero = 0; /* Custom standard function pointers. */ void *( *bson_malloc_func )( size_t ) = malloc; void *( *bson_realloc_func )( void *, size_t ) = realloc; void ( *bson_free_func )( void * ) = free; #ifdef R_SAFETY_NET bson_printf_func bson_printf; #else bson_printf_func bson_printf = printf; #endif bson_fprintf_func bson_fprintf = fprintf; bson_sprintf_func bson_sprintf = sprintf; static int _bson_errprintf( const char *, ... ); bson_printf_func bson_errprintf = _bson_errprintf; /* ObjectId fuzz functions. */ static int ( *oid_fuzz_func )( void ) = NULL; static int ( *oid_inc_func )( void ) = NULL; /* ---------------------------- READING ------------------------------ */ MONGO_EXPORT bson* bson_create( void ) { return (bson*)bson_malloc(sizeof(bson)); } MONGO_EXPORT void bson_dispose(bson* b) { bson_free(b); } MONGO_EXPORT bson *bson_empty( bson *obj ) { static char *data = "\005\0\0\0\0"; bson_init_data( obj, data ); obj->finished = 1; obj->err = 0; obj->errstr = NULL; obj->stackPos = 0; return obj; } MONGO_EXPORT int bson_copy( bson *out, const bson *in ) { if ( !out || !in ) return BSON_ERROR; if ( !in->finished ) return BSON_ERROR; bson_init_size( out, bson_size( in ) ); memcpy( out->data, in->data, bson_size( in ) ); out->finished = 1; return BSON_OK; } int bson_init_data( bson *b, char *data ) { b->data = data; return BSON_OK; } int bson_init_finished_data( bson *b, char *data ) { bson_init_data( b, data ); b->finished = 1; return BSON_OK; } static void _bson_reset( bson *b ) { b->finished = 0; b->stackPos = 0; b->err = 0; b->errstr = NULL; } MONGO_EXPORT int bson_size( const bson *b ) { int i; if ( ! b || ! b->data ) return 0; bson_little_endian32( &i, b->data ); return i; } MONGO_EXPORT int bson_buffer_size( const bson *b ) { return (b->cur - b->data + 1); } MONGO_EXPORT const char *bson_data( const bson *b ) { return (const char *)b->data; } static char hexbyte( char hex ) { if (hex >= '0' && hex <= '9') return (hex - '0'); else if (hex >= 'A' && hex <= 'F') return (hex - 'A' + 10); else if (hex >= 'a' && hex <= 'f') return (hex - 'a' + 10); else return 0x0; } MONGO_EXPORT void bson_oid_from_string( bson_oid_t *oid, const char *str ) { int i; for ( i=0; i<12; i++ ) { oid->bytes[i] = ( hexbyte( str[2*i] ) << 4 ) | hexbyte( str[2*i + 1] ); } } MONGO_EXPORT void bson_oid_to_string( const bson_oid_t *oid, char *str ) { static const char hex[16] = {'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'}; int i; for ( i=0; i<12; i++ ) { str[2*i] = hex[( oid->bytes[i] & 0xf0 ) >> 4]; str[2*i + 1] = hex[ oid->bytes[i] & 0x0f ]; } str[24] = '\0'; } MONGO_EXPORT void bson_set_oid_fuzz( int ( *func )( void ) ) { oid_fuzz_func = func; } MONGO_EXPORT void bson_set_oid_inc( int ( *func )( void ) ) { oid_inc_func = func; } MONGO_EXPORT void bson_oid_gen( bson_oid_t *oid ) { static int incr = 0; static int fuzz = 0; int i; time_t t = time( NULL ); if( oid_inc_func ) i = oid_inc_func(); else i = incr++; if ( !fuzz ) { if ( oid_fuzz_func ) fuzz = oid_fuzz_func(); else { srand( ( int )t ); fuzz = rand(); } } bson_big_endian32( &oid->ints[0], &t ); oid->ints[1] = fuzz; bson_big_endian32( &oid->ints[2], &i ); } MONGO_EXPORT time_t bson_oid_generated_time( bson_oid_t *oid ) { time_t out; bson_big_endian32( &out, &oid->ints[0] ); return out; } MONGO_EXPORT void bson_print( const bson *b ) { bson_print_raw( b->data , 0 ); } MONGO_EXPORT void bson_print_raw( const char *data , int depth ) { bson_iterator i; const char *key; int temp; bson_timestamp_t ts; char oidhex[25]; bson scope; bson_iterator_from_buffer( &i, data ); while ( bson_iterator_next( &i ) ) { bson_type t = bson_iterator_type( &i ); if ( t == 0 ) break; key = bson_iterator_key( &i ); for ( temp=0; temp<=depth; temp++ ) bson_printf( "\t" ); bson_printf( "%s : %d \t " , key , t ); switch ( t ) { case BSON_DOUBLE: bson_printf( "%f" , bson_iterator_double( &i ) ); break; case BSON_STRING: bson_printf( "%s" , bson_iterator_string( &i ) ); break; case BSON_SYMBOL: bson_printf( "SYMBOL: %s" , bson_iterator_string( &i ) ); break; case BSON_OID: bson_oid_to_string( bson_iterator_oid( &i ), oidhex ); bson_printf( "%s" , oidhex ); break; case BSON_BOOL: bson_printf( "%s" , bson_iterator_bool( &i ) ? "true" : "false" ); break; case BSON_DATE: bson_printf( "%ld" , ( long int )bson_iterator_date( &i ) ); break; case BSON_BINDATA: bson_printf( "BSON_BINDATA" ); break; case BSON_UNDEFINED: bson_printf( "BSON_UNDEFINED" ); break; case BSON_NULL: bson_printf( "BSON_NULL" ); break; case BSON_REGEX: bson_printf( "BSON_REGEX: %s", bson_iterator_regex( &i ) ); break; case BSON_CODE: bson_printf( "BSON_CODE: %s", bson_iterator_code( &i ) ); break; case BSON_CODEWSCOPE: bson_printf( "BSON_CODE_W_SCOPE: %s", bson_iterator_code( &i ) ); /* bson_init( &scope ); */ /* review - stepped on by bson_iterator_code_scope? */ bson_iterator_code_scope( &i, &scope ); bson_printf( "\n\t SCOPE: " ); bson_print( &scope ); /* bson_destroy( &scope ); */ /* review - causes free error */ break; case BSON_INT: bson_printf( "%d" , bson_iterator_int( &i ) ); break; case BSON_LONG: bson_printf( "%lld" , ( uint64_t )bson_iterator_long( &i ) ); break; case BSON_TIMESTAMP: ts = bson_iterator_timestamp( &i ); bson_printf( "i: %d, t: %d", ts.i, ts.t ); break; case BSON_OBJECT: case BSON_ARRAY: bson_printf( "\n" ); bson_print_raw( bson_iterator_value( &i ) , depth + 1 ); break; default: bson_errprintf( "can't print type : %d\n" , t ); } bson_printf( "\n" ); } } /* ---------------------------- ITERATOR ------------------------------ */ MONGO_EXPORT bson_iterator* bson_iterator_create( void ) { return ( bson_iterator* )malloc( sizeof( bson_iterator ) ); } MONGO_EXPORT void bson_iterator_dispose(bson_iterator* i) { free(i); } MONGO_EXPORT void bson_iterator_init( bson_iterator *i, const bson *b ) { i->cur = b->data + 4; i->first = 1; } MONGO_EXPORT void bson_iterator_from_buffer( bson_iterator *i, const char *buffer ) { i->cur = buffer + 4; i->first = 1; } MONGO_EXPORT bson_type bson_find( bson_iterator *it, const bson *obj, const char *name ) { bson_iterator_init( it, (bson *)obj ); while( bson_iterator_next( it ) ) { if ( strcmp( name, bson_iterator_key( it ) ) == 0 ) break; } return bson_iterator_type( it ); } MONGO_EXPORT bson_bool_t bson_iterator_more( const bson_iterator *i ) { return *( i->cur ); } MONGO_EXPORT bson_type bson_iterator_next( bson_iterator *i ) { int ds; if ( i->first ) { i->first = 0; return ( bson_type )( *i->cur ); } switch ( bson_iterator_type( i ) ) { case BSON_EOO: return BSON_EOO; /* don't advance */ case BSON_UNDEFINED: case BSON_NULL: ds = 0; break; case BSON_BOOL: ds = 1; break; case BSON_INT: ds = 4; break; case BSON_LONG: case BSON_DOUBLE: case BSON_TIMESTAMP: case BSON_DATE: ds = 8; break; case BSON_OID: ds = 12; break; case BSON_STRING: case BSON_SYMBOL: case BSON_CODE: ds = 4 + bson_iterator_int_raw( i ); break; case BSON_BINDATA: ds = 5 + bson_iterator_int_raw( i ); break; case BSON_OBJECT: case BSON_ARRAY: case BSON_CODEWSCOPE: ds = bson_iterator_int_raw( i ); break; case BSON_DBREF: ds = 4+12 + bson_iterator_int_raw( i ); break; case BSON_REGEX: { const char *s = bson_iterator_value( i ); const char *p = s; p += strlen( p )+1; p += strlen( p )+1; ds = p-s; break; } default: { char msg[] = "unknown type: 000000000000"; bson_numstr( msg+14, ( unsigned )( i->cur[0] ) ); bson_fatal_msg( 0, msg ); return 0; } } i->cur += 1 + strlen( i->cur + 1 ) + 1 + ds; return ( bson_type )( *i->cur ); } MONGO_EXPORT bson_type bson_iterator_type( const bson_iterator *i ) { return ( bson_type )i->cur[0]; } MONGO_EXPORT const char *bson_iterator_key( const bson_iterator *i ) { return i->cur + 1; } MONGO_EXPORT const char *bson_iterator_value( const bson_iterator *i ) { const char *t = i->cur + 1; t += strlen( t ) + 1; return t; } /* types */ int bson_iterator_int_raw( const bson_iterator *i ) { int out; bson_little_endian32( &out, bson_iterator_value( i ) ); return out; } double bson_iterator_double_raw( const bson_iterator *i ) { double out; bson_little_endian64( &out, bson_iterator_value( i ) ); return out; } int64_t bson_iterator_long_raw( const bson_iterator *i ) { int64_t out; bson_little_endian64( &out, bson_iterator_value( i ) ); return out; } bson_bool_t bson_iterator_bool_raw( const bson_iterator *i ) { return bson_iterator_value( i )[0]; } MONGO_EXPORT bson_oid_t *bson_iterator_oid( const bson_iterator *i ) { return ( bson_oid_t * )bson_iterator_value( i ); } MONGO_EXPORT int bson_iterator_int( const bson_iterator *i ) { switch ( bson_iterator_type( i ) ) { case BSON_INT: return bson_iterator_int_raw( i ); case BSON_LONG: return bson_iterator_long_raw( i ); case BSON_DOUBLE: return bson_iterator_double_raw( i ); default: return 0; } } MONGO_EXPORT double bson_iterator_double( const bson_iterator *i ) { switch ( bson_iterator_type( i ) ) { case BSON_INT: return bson_iterator_int_raw( i ); case BSON_LONG: return bson_iterator_long_raw( i ); case BSON_DOUBLE: return bson_iterator_double_raw( i ); default: return 0; } } MONGO_EXPORT int64_t bson_iterator_long( const bson_iterator *i ) { switch ( bson_iterator_type( i ) ) { case BSON_INT: return bson_iterator_int_raw( i ); case BSON_LONG: return bson_iterator_long_raw( i ); case BSON_DOUBLE: return bson_iterator_double_raw( i ); default: return 0; } } MONGO_EXPORT bson_timestamp_t bson_iterator_timestamp( const bson_iterator *i ) { bson_timestamp_t ts; bson_little_endian32( &( ts.i ), bson_iterator_value( i ) ); bson_little_endian32( &( ts.t ), bson_iterator_value( i ) + 4 ); return ts; } MONGO_EXPORT int bson_iterator_timestamp_time( const bson_iterator *i ) { int time; bson_little_endian32( &time, bson_iterator_value( i ) + 4 ); return time; } MONGO_EXPORT int bson_iterator_timestamp_increment( const bson_iterator *i ) { int increment; bson_little_endian32( &increment, bson_iterator_value( i ) ); return increment; } MONGO_EXPORT bson_bool_t bson_iterator_bool( const bson_iterator *i ) { switch ( bson_iterator_type( i ) ) { case BSON_BOOL: return bson_iterator_bool_raw( i ); case BSON_INT: return bson_iterator_int_raw( i ) != 0; case BSON_LONG: return bson_iterator_long_raw( i ) != 0; case BSON_DOUBLE: return bson_iterator_double_raw( i ) != 0; case BSON_EOO: case BSON_NULL: return 0; default: return 1; } } MONGO_EXPORT const char *bson_iterator_string( const bson_iterator *i ) { switch ( bson_iterator_type( i ) ) { case BSON_STRING: case BSON_SYMBOL: return bson_iterator_value( i ) + 4; default: return ""; } } int bson_iterator_string_len( const bson_iterator *i ) { return bson_iterator_int_raw( i ); } MONGO_EXPORT const char *bson_iterator_code( const bson_iterator *i ) { switch ( bson_iterator_type( i ) ) { case BSON_STRING: case BSON_CODE: return bson_iterator_value( i ) + 4; case BSON_CODEWSCOPE: return bson_iterator_value( i ) + 8; default: return NULL; } } MONGO_EXPORT void bson_iterator_code_scope( const bson_iterator *i, bson *scope ) { if ( bson_iterator_type( i ) == BSON_CODEWSCOPE ) { int code_len; bson_little_endian32( &code_len, bson_iterator_value( i )+4 ); bson_init_data( scope, ( void * )( bson_iterator_value( i )+8+code_len ) ); _bson_reset( scope ); scope->finished = 1; } else { bson_empty( scope ); } } MONGO_EXPORT bson_date_t bson_iterator_date( const bson_iterator *i ) { return bson_iterator_long_raw( i ); } MONGO_EXPORT time_t bson_iterator_time_t( const bson_iterator *i ) { return bson_iterator_date( i ) / 1000; } MONGO_EXPORT int bson_iterator_bin_len( const bson_iterator *i ) { return ( bson_iterator_bin_type( i ) == BSON_BIN_BINARY_OLD ) ? bson_iterator_int_raw( i ) - 4 : bson_iterator_int_raw( i ); } MONGO_EXPORT char bson_iterator_bin_type( const bson_iterator *i ) { return bson_iterator_value( i )[4]; } MONGO_EXPORT const char *bson_iterator_bin_data( const bson_iterator *i ) { return ( bson_iterator_bin_type( i ) == BSON_BIN_BINARY_OLD ) ? bson_iterator_value( i ) + 9 : bson_iterator_value( i ) + 5; } MONGO_EXPORT const char *bson_iterator_regex( const bson_iterator *i ) { return bson_iterator_value( i ); } MONGO_EXPORT const char *bson_iterator_regex_opts( const bson_iterator *i ) { const char *p = bson_iterator_value( i ); return p + strlen( p ) + 1; } MONGO_EXPORT void bson_iterator_subobject( const bson_iterator *i, bson *sub ) { bson_init_data( sub, ( char * )bson_iterator_value( i ) ); _bson_reset( sub ); sub->finished = 1; } MONGO_EXPORT void bson_iterator_subiterator( const bson_iterator *i, bson_iterator *sub ) { bson_iterator_from_buffer( sub, bson_iterator_value( i ) ); } /* ---------------------------- BUILDING ------------------------------ */ static void _bson_init_size( bson *b, int size ) { if( size == 0 ) b->data = NULL; else b->data = ( char * )bson_malloc( size ); b->dataSize = size; b->cur = b->data + 4; _bson_reset( b ); } MONGO_EXPORT void bson_init( bson *b ) { _bson_init_size( b, initialBufferSize ); } void bson_init_size( bson *b, int size ) { _bson_init_size( b, size ); } static void bson_append_byte( bson *b, char c ) { b->cur[0] = c; b->cur++; } static void bson_append( bson *b, const void *data, int len ) { memcpy( b->cur , data , len ); b->cur += len; } static void bson_append32( bson *b, const void *data ) { bson_little_endian32( b->cur, data ); b->cur += 4; } static void bson_append64( bson *b, const void *data ) { bson_little_endian64( b->cur, data ); b->cur += 8; } int bson_ensure_space( bson *b, const int bytesNeeded ) { int pos = b->cur - b->data; char *orig = b->data; int new_size; if ( pos + bytesNeeded <= b->dataSize ) return BSON_OK; new_size = 1.5 * ( b->dataSize + bytesNeeded ); if( new_size < b->dataSize ) { if( ( b->dataSize + bytesNeeded ) < INT_MAX ) new_size = INT_MAX; else { b->err = BSON_SIZE_OVERFLOW; return BSON_ERROR; } } b->data = bson_realloc( b->data, new_size ); if ( !b->data ) bson_fatal_msg( !!b->data, "realloc() failed" ); b->dataSize = new_size; b->cur += b->data - orig; return BSON_OK; } MONGO_EXPORT int bson_finish( bson *b ) { int i; if( b->err & BSON_NOT_UTF8 ) return BSON_ERROR; if ( ! b->finished ) { if ( bson_ensure_space( b, 1 ) == BSON_ERROR ) return BSON_ERROR; bson_append_byte( b, 0 ); i = b->cur - b->data; bson_little_endian32( b->data, &i ); b->finished = 1; } return BSON_OK; } MONGO_EXPORT void bson_destroy( bson *b ) { if (b) { bson_free( b->data ); b->err = 0; b->data = 0; b->cur = 0; b->finished = 1; } } static int bson_append_estart( bson *b, int type, const char *name, const int dataSize ) { const int len = strlen( name ) + 1; if ( b->finished ) { b->err |= BSON_ALREADY_FINISHED; return BSON_ERROR; } if ( bson_ensure_space( b, 1 + len + dataSize ) == BSON_ERROR ) { return BSON_ERROR; } if( bson_check_field_name( b, ( const char * )name, len - 1 ) == BSON_ERROR ) { bson_builder_error( b ); return BSON_ERROR; } bson_append_byte( b, ( char )type ); bson_append( b, name, len ); return BSON_OK; } /* ---------------------------- BUILDING TYPES ------------------------------ */ MONGO_EXPORT int bson_append_int( bson *b, const char *name, const int i ) { if ( bson_append_estart( b, BSON_INT, name, 4 ) == BSON_ERROR ) return BSON_ERROR; bson_append32( b , &i ); return BSON_OK; } MONGO_EXPORT int bson_append_long( bson *b, const char *name, const int64_t i ) { if ( bson_append_estart( b , BSON_LONG, name, 8 ) == BSON_ERROR ) return BSON_ERROR; bson_append64( b , &i ); return BSON_OK; } MONGO_EXPORT int bson_append_double( bson *b, const char *name, const double d ) { if ( bson_append_estart( b, BSON_DOUBLE, name, 8 ) == BSON_ERROR ) return BSON_ERROR; bson_append64( b , &d ); return BSON_OK; } MONGO_EXPORT int bson_append_bool( bson *b, const char *name, const bson_bool_t i ) { if ( bson_append_estart( b, BSON_BOOL, name, 1 ) == BSON_ERROR ) return BSON_ERROR; bson_append_byte( b , i != 0 ); return BSON_OK; } MONGO_EXPORT int bson_append_null( bson *b, const char *name ) { if ( bson_append_estart( b , BSON_NULL, name, 0 ) == BSON_ERROR ) return BSON_ERROR; return BSON_OK; } MONGO_EXPORT int bson_append_undefined( bson *b, const char *name ) { if ( bson_append_estart( b, BSON_UNDEFINED, name, 0 ) == BSON_ERROR ) return BSON_ERROR; return BSON_OK; } static int bson_append_string_base( bson *b, const char *name, const char *value, int len, bson_type type ) { int sl = len + 1; if ( bson_check_string( b, ( const char * )value, sl - 1 ) == BSON_ERROR ) return BSON_ERROR; if ( bson_append_estart( b, type, name, 4 + sl ) == BSON_ERROR ) { return BSON_ERROR; } bson_append32( b , &sl ); bson_append( b , value , sl - 1 ); bson_append( b , "\0" , 1 ); return BSON_OK; } MONGO_EXPORT int bson_append_string( bson *b, const char *name, const char *value ) { return bson_append_string_base( b, name, value, strlen ( value ), BSON_STRING ); } MONGO_EXPORT int bson_append_symbol( bson *b, const char *name, const char *value ) { return bson_append_string_base( b, name, value, strlen ( value ), BSON_SYMBOL ); } MONGO_EXPORT int bson_append_code( bson *b, const char *name, const char *value ) { return bson_append_string_base( b, name, value, strlen ( value ), BSON_CODE ); } MONGO_EXPORT int bson_append_string_n( bson *b, const char *name, const char *value, int len ) { return bson_append_string_base( b, name, value, len, BSON_STRING ); } MONGO_EXPORT int bson_append_symbol_n( bson *b, const char *name, const char *value, int len ) { return bson_append_string_base( b, name, value, len, BSON_SYMBOL ); } MONGO_EXPORT int bson_append_code_n( bson *b, const char *name, const char *value, int len ) { return bson_append_string_base( b, name, value, len, BSON_CODE ); } MONGO_EXPORT int bson_append_code_w_scope_n( bson *b, const char *name, const char *code, int len, const bson *scope ) { int sl, size; if ( !scope ) return BSON_ERROR; sl = len + 1; size = 4 + 4 + sl + bson_size( scope ); if ( bson_append_estart( b, BSON_CODEWSCOPE, name, size ) == BSON_ERROR ) return BSON_ERROR; bson_append32( b, &size ); bson_append32( b, &sl ); bson_append( b, code, sl ); bson_append( b, scope->data, bson_size( scope ) ); return BSON_OK; } MONGO_EXPORT int bson_append_code_w_scope( bson *b, const char *name, const char *code, const bson *scope ) { return bson_append_code_w_scope_n( b, name, code, strlen ( code ), scope ); } MONGO_EXPORT int bson_append_binary( bson *b, const char *name, char type, const char *str, int len ) { if ( type == BSON_BIN_BINARY_OLD ) { int subtwolen = len + 4; if ( bson_append_estart( b, BSON_BINDATA, name, 4+1+4+len ) == BSON_ERROR ) return BSON_ERROR; bson_append32( b, &subtwolen ); bson_append_byte( b, type ); bson_append32( b, &len ); bson_append( b, str, len ); } else { if ( bson_append_estart( b, BSON_BINDATA, name, 4+1+len ) == BSON_ERROR ) return BSON_ERROR; bson_append32( b, &len ); bson_append_byte( b, type ); bson_append( b, str, len ); } return BSON_OK; } MONGO_EXPORT int bson_append_oid( bson *b, const char *name, const bson_oid_t *oid ) { if ( bson_append_estart( b, BSON_OID, name, 12 ) == BSON_ERROR ) return BSON_ERROR; bson_append( b , oid , 12 ); return BSON_OK; } MONGO_EXPORT int bson_append_new_oid( bson *b, const char *name ) { bson_oid_t oid; bson_oid_gen( &oid ); return bson_append_oid( b, name, &oid ); } MONGO_EXPORT int bson_append_regex( bson *b, const char *name, const char *pattern, const char *opts ) { const int plen = strlen( pattern )+1; const int olen = strlen( opts )+1; if ( bson_append_estart( b, BSON_REGEX, name, plen + olen ) == BSON_ERROR ) return BSON_ERROR; if ( bson_check_string( b, pattern, plen - 1 ) == BSON_ERROR ) return BSON_ERROR; bson_append( b , pattern , plen ); bson_append( b , opts , olen ); return BSON_OK; } MONGO_EXPORT int bson_append_bson( bson *b, const char *name, const bson *bson ) { if ( !bson ) return BSON_ERROR; if ( bson_append_estart( b, BSON_OBJECT, name, bson_size( bson ) ) == BSON_ERROR ) return BSON_ERROR; bson_append( b , bson->data , bson_size( bson ) ); return BSON_OK; } MONGO_EXPORT int bson_append_element( bson *b, const char *name_or_null, const bson_iterator *elem ) { bson_iterator next = *elem; int size; bson_iterator_next( &next ); size = next.cur - elem->cur; if ( name_or_null == NULL ) { if( bson_ensure_space( b, size ) == BSON_ERROR ) return BSON_ERROR; bson_append( b, elem->cur, size ); } else { int data_size = size - 2 - strlen( bson_iterator_key( elem ) ); bson_append_estart( b, elem->cur[0], name_or_null, data_size ); bson_append( b, bson_iterator_value( elem ), data_size ); } return BSON_OK; } MONGO_EXPORT int bson_append_timestamp( bson *b, const char *name, bson_timestamp_t *ts ) { if ( bson_append_estart( b, BSON_TIMESTAMP, name, 8 ) == BSON_ERROR ) return BSON_ERROR; bson_append32( b , &( ts->i ) ); bson_append32( b , &( ts->t ) ); return BSON_OK; } MONGO_EXPORT int bson_append_timestamp2( bson *b, const char *name, int time, int increment ) { if ( bson_append_estart( b, BSON_TIMESTAMP, name, 8 ) == BSON_ERROR ) return BSON_ERROR; bson_append32( b , &increment ); bson_append32( b , &time ); return BSON_OK; } MONGO_EXPORT int bson_append_date( bson *b, const char *name, bson_date_t millis ) { if ( bson_append_estart( b, BSON_DATE, name, 8 ) == BSON_ERROR ) return BSON_ERROR; bson_append64( b , &millis ); return BSON_OK; } MONGO_EXPORT int bson_append_time_t( bson *b, const char *name, time_t secs ) { return bson_append_date( b, name, ( bson_date_t )secs * 1000 ); } MONGO_EXPORT int bson_append_start_object( bson *b, const char *name ) { if ( bson_append_estart( b, BSON_OBJECT, name, 5 ) == BSON_ERROR ) return BSON_ERROR; b->stack[ b->stackPos++ ] = b->cur - b->data; bson_append32( b , &zero ); return BSON_OK; } MONGO_EXPORT int bson_append_start_array( bson *b, const char *name ) { if ( bson_append_estart( b, BSON_ARRAY, name, 5 ) == BSON_ERROR ) return BSON_ERROR; b->stack[ b->stackPos++ ] = b->cur - b->data; bson_append32( b , &zero ); return BSON_OK; } MONGO_EXPORT int bson_append_finish_object( bson *b ) { char *start; int i; if ( bson_ensure_space( b, 1 ) == BSON_ERROR ) return BSON_ERROR; bson_append_byte( b , 0 ); start = b->data + b->stack[ --b->stackPos ]; i = b->cur - start; bson_little_endian32( start, &i ); return BSON_OK; } MONGO_EXPORT double bson_int64_to_double( int64_t i64 ) { return (double)i64; } MONGO_EXPORT int bson_append_finish_array( bson *b ) { return bson_append_finish_object( b ); } /* Error handling and allocators. */ static bson_err_handler err_handler = NULL; MONGO_EXPORT bson_err_handler set_bson_err_handler( bson_err_handler func ) { bson_err_handler old = err_handler; err_handler = func; return old; } MONGO_EXPORT void bson_free( void *ptr ) { bson_free_func( ptr ); } MONGO_EXPORT void *bson_malloc( int size ) { void *p; p = bson_malloc_func( size ); bson_fatal_msg( !!p, "malloc() failed" ); return p; } void *bson_realloc( void *ptr, int size ) { void *p; p = bson_realloc_func( ptr, size ); bson_fatal_msg( !!p, "realloc() failed" ); return p; } int _bson_errprintf( const char *format, ... ) { va_list ap; int ret = 0; va_start( ap, format ); #ifndef R_SAFETY_NET ret = vfprintf( stderr, format, ap ); #endif va_end( ap ); return ret; } /** * This method is invoked when a non-fatal bson error is encountered. * Calls the error handler if available. * * @param */ void bson_builder_error( bson *b ) { if( err_handler ) err_handler( "BSON error." ); } void bson_fatal( int ok ) { bson_fatal_msg( ok, "" ); } void bson_fatal_msg( int ok , const char *msg ) { if ( ok ) return; if ( err_handler ) { err_handler( msg ); } #ifndef R_SAFETY_NET bson_errprintf( "error: %s\n" , msg ); exit( -5 ); #endif } /* Efficiently copy an integer to a string. */ extern const char bson_numstrs[1000][4]; void bson_numstr( char *str, int i ) { if( i < 1000 ) memcpy( str, bson_numstrs[i], 4 ); else bson_sprintf( str,"%d", i ); } MONGO_EXPORT void bson_swap_endian64( void *outp, const void *inp ) { const char *in = ( const char * )inp; char *out = ( char * )outp; out[0] = in[7]; out[1] = in[6]; out[2] = in[5]; out[3] = in[4]; out[4] = in[3]; out[5] = in[2]; out[6] = in[1]; out[7] = in[0]; } MONGO_EXPORT void bson_swap_endian32( void *outp, const void *inp ) { const char *in = ( const char * )inp; char *out = ( char * )outp; out[0] = in[3]; out[1] = in[2]; out[2] = in[1]; out[3] = in[0]; }
/* bson.c */ /* Copyright 2009, 2010 10gen Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdlib.h> #include <string.h> #include <stdio.h> #include <time.h> #include <limits.h> #include "bson.h" #include "encoding.h" const int initialBufferSize = 128; /* only need one of these */ static const int zero = 0; /* Custom standard function pointers. */ void *( *bson_malloc_func )( size_t ) = malloc; void *( *bson_realloc_func )( void *, size_t ) = realloc; void ( *bson_free_func )( void * ) = free; #ifdef R_SAFETY_NET bson_printf_func bson_printf; #else bson_printf_func bson_printf = printf; #endif bson_fprintf_func bson_fprintf = fprintf; bson_sprintf_func bson_sprintf = sprintf; static int _bson_errprintf( const char *, ... ); bson_printf_func bson_errprintf = _bson_errprintf; /* ObjectId fuzz functions. */ static int ( *oid_fuzz_func )( void ) = NULL; static int ( *oid_inc_func )( void ) = NULL; /* ---------------------------- READING ------------------------------ */ MONGO_EXPORT bson* bson_create( void ) { return (bson*)bson_malloc(sizeof(bson)); } MONGO_EXPORT void bson_dispose(bson* b) { bson_free(b); } MONGO_EXPORT bson *bson_empty( bson *obj ) { static char *data = "\005\0\0\0\0"; bson_init_data( obj, data ); obj->finished = 1; obj->err = 0; obj->errstr = NULL; obj->stackPos = 0; return obj; } MONGO_EXPORT int bson_copy( bson *out, const bson *in ) { if ( !out || !in ) return BSON_ERROR; if ( !in->finished ) return BSON_ERROR; bson_init_size( out, bson_size( in ) ); memcpy( out->data, in->data, bson_size( in ) ); out->finished = 1; return BSON_OK; } int bson_init_data( bson *b, char *data ) { b->data = data; return BSON_OK; } int bson_init_finished_data( bson *b, char *data ) { bson_init_data( b, data ); b->finished = 1; return BSON_OK; } static void _bson_reset( bson *b ) { b->finished = 0; b->stackPos = 0; b->err = 0; b->errstr = NULL; } MONGO_EXPORT int bson_size( const bson *b ) { int i; if ( ! b || ! b->data ) return 0; bson_little_endian32( &i, b->data ); return i; } MONGO_EXPORT size_t bson_buffer_size( const bson *b ) { return (b->cur - b->data + 1); } MONGO_EXPORT const char *bson_data( const bson *b ) { return (const char *)b->data; } static char hexbyte( char hex ) { if (hex >= '0' && hex <= '9') return (hex - '0'); else if (hex >= 'A' && hex <= 'F') return (hex - 'A' + 10); else if (hex >= 'a' && hex <= 'f') return (hex - 'a' + 10); else return 0x0; } MONGO_EXPORT void bson_oid_from_string( bson_oid_t *oid, const char *str ) { int i; for ( i=0; i<12; i++ ) { oid->bytes[i] = ( hexbyte( str[2*i] ) << 4 ) | hexbyte( str[2*i + 1] ); } } MONGO_EXPORT void bson_oid_to_string( const bson_oid_t *oid, char *str ) { static const char hex[16] = {'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'}; int i; for ( i=0; i<12; i++ ) { str[2*i] = hex[( oid->bytes[i] & 0xf0 ) >> 4]; str[2*i + 1] = hex[ oid->bytes[i] & 0x0f ]; } str[24] = '\0'; } MONGO_EXPORT void bson_set_oid_fuzz( int ( *func )( void ) ) { oid_fuzz_func = func; } MONGO_EXPORT void bson_set_oid_inc( int ( *func )( void ) ) { oid_inc_func = func; } MONGO_EXPORT void bson_oid_gen( bson_oid_t *oid ) { static int incr = 0; static int fuzz = 0; int i; time_t t = time( NULL ); if( oid_inc_func ) i = oid_inc_func(); else i = incr++; if ( !fuzz ) { if ( oid_fuzz_func ) fuzz = oid_fuzz_func(); else { srand( ( int )t ); fuzz = rand(); } } bson_big_endian32( &oid->ints[0], &t ); oid->ints[1] = fuzz; bson_big_endian32( &oid->ints[2], &i ); } MONGO_EXPORT time_t bson_oid_generated_time( bson_oid_t *oid ) { time_t out; bson_big_endian32( &out, &oid->ints[0] ); return out; } MONGO_EXPORT void bson_print( const bson *b ) { bson_print_raw( b->data , 0 ); } MONGO_EXPORT void bson_print_raw( const char *data , int depth ) { bson_iterator i; const char *key; int temp; bson_timestamp_t ts; char oidhex[25]; bson scope; bson_iterator_from_buffer( &i, data ); while ( bson_iterator_next( &i ) ) { bson_type t = bson_iterator_type( &i ); if ( t == 0 ) break; key = bson_iterator_key( &i ); for ( temp=0; temp<=depth; temp++ ) bson_printf( "\t" ); bson_printf( "%s : %d \t " , key , t ); switch ( t ) { case BSON_DOUBLE: bson_printf( "%f" , bson_iterator_double( &i ) ); break; case BSON_STRING: bson_printf( "%s" , bson_iterator_string( &i ) ); break; case BSON_SYMBOL: bson_printf( "SYMBOL: %s" , bson_iterator_string( &i ) ); break; case BSON_OID: bson_oid_to_string( bson_iterator_oid( &i ), oidhex ); bson_printf( "%s" , oidhex ); break; case BSON_BOOL: bson_printf( "%s" , bson_iterator_bool( &i ) ? "true" : "false" ); break; case BSON_DATE: bson_printf( "%ld" , ( long int )bson_iterator_date( &i ) ); break; case BSON_BINDATA: bson_printf( "BSON_BINDATA" ); break; case BSON_UNDEFINED: bson_printf( "BSON_UNDEFINED" ); break; case BSON_NULL: bson_printf( "BSON_NULL" ); break; case BSON_REGEX: bson_printf( "BSON_REGEX: %s", bson_iterator_regex( &i ) ); break; case BSON_CODE: bson_printf( "BSON_CODE: %s", bson_iterator_code( &i ) ); break; case BSON_CODEWSCOPE: bson_printf( "BSON_CODE_W_SCOPE: %s", bson_iterator_code( &i ) ); /* bson_init( &scope ); */ /* review - stepped on by bson_iterator_code_scope? */ bson_iterator_code_scope( &i, &scope ); bson_printf( "\n\t SCOPE: " ); bson_print( &scope ); /* bson_destroy( &scope ); */ /* review - causes free error */ break; case BSON_INT: bson_printf( "%d" , bson_iterator_int( &i ) ); break; case BSON_LONG: bson_printf( "%lld" , ( uint64_t )bson_iterator_long( &i ) ); break; case BSON_TIMESTAMP: ts = bson_iterator_timestamp( &i ); bson_printf( "i: %d, t: %d", ts.i, ts.t ); break; case BSON_OBJECT: case BSON_ARRAY: bson_printf( "\n" ); bson_print_raw( bson_iterator_value( &i ) , depth + 1 ); break; default: bson_errprintf( "can't print type : %d\n" , t ); } bson_printf( "\n" ); } } /* ---------------------------- ITERATOR ------------------------------ */ MONGO_EXPORT bson_iterator* bson_iterator_create( void ) { return ( bson_iterator* )malloc( sizeof( bson_iterator ) ); } MONGO_EXPORT void bson_iterator_dispose(bson_iterator* i) { free(i); } MONGO_EXPORT void bson_iterator_init( bson_iterator *i, const bson *b ) { i->cur = b->data + 4; i->first = 1; } MONGO_EXPORT void bson_iterator_from_buffer( bson_iterator *i, const char *buffer ) { i->cur = buffer + 4; i->first = 1; } MONGO_EXPORT bson_type bson_find( bson_iterator *it, const bson *obj, const char *name ) { bson_iterator_init( it, (bson *)obj ); while( bson_iterator_next( it ) ) { if ( strcmp( name, bson_iterator_key( it ) ) == 0 ) break; } return bson_iterator_type( it ); } MONGO_EXPORT bson_bool_t bson_iterator_more( const bson_iterator *i ) { return *( i->cur ); } MONGO_EXPORT bson_type bson_iterator_next( bson_iterator *i ) { size_t ds; if ( i->first ) { i->first = 0; return ( bson_type )( *i->cur ); } switch ( bson_iterator_type( i ) ) { case BSON_EOO: return BSON_EOO; /* don't advance */ case BSON_UNDEFINED: case BSON_NULL: ds = 0; break; case BSON_BOOL: ds = 1; break; case BSON_INT: ds = 4; break; case BSON_LONG: case BSON_DOUBLE: case BSON_TIMESTAMP: case BSON_DATE: ds = 8; break; case BSON_OID: ds = 12; break; case BSON_STRING: case BSON_SYMBOL: case BSON_CODE: ds = 4 + bson_iterator_int_raw( i ); break; case BSON_BINDATA: ds = 5 + bson_iterator_int_raw( i ); break; case BSON_OBJECT: case BSON_ARRAY: case BSON_CODEWSCOPE: ds = bson_iterator_int_raw( i ); break; case BSON_DBREF: ds = 4+12 + bson_iterator_int_raw( i ); break; case BSON_REGEX: { const char *s = bson_iterator_value( i ); const char *p = s; p += strlen( p )+1; p += strlen( p )+1; ds = p-s; break; } default: { char msg[] = "unknown type: 000000000000"; bson_numstr( msg+14, ( unsigned )( i->cur[0] ) ); bson_fatal_msg( 0, msg ); return 0; } } i->cur += 1 + strlen( i->cur + 1 ) + 1 + ds; return ( bson_type )( *i->cur ); } MONGO_EXPORT bson_type bson_iterator_type( const bson_iterator *i ) { return ( bson_type )i->cur[0]; } MONGO_EXPORT const char *bson_iterator_key( const bson_iterator *i ) { return i->cur + 1; } MONGO_EXPORT const char *bson_iterator_value( const bson_iterator *i ) { const char *t = i->cur + 1; t += strlen( t ) + 1; return t; } /* types */ int bson_iterator_int_raw( const bson_iterator *i ) { int out; bson_little_endian32( &out, bson_iterator_value( i ) ); return out; } double bson_iterator_double_raw( const bson_iterator *i ) { double out; bson_little_endian64( &out, bson_iterator_value( i ) ); return out; } int64_t bson_iterator_long_raw( const bson_iterator *i ) { int64_t out; bson_little_endian64( &out, bson_iterator_value( i ) ); return out; } bson_bool_t bson_iterator_bool_raw( const bson_iterator *i ) { return bson_iterator_value( i )[0]; } MONGO_EXPORT bson_oid_t *bson_iterator_oid( const bson_iterator *i ) { return ( bson_oid_t * )bson_iterator_value( i ); } MONGO_EXPORT int bson_iterator_int( const bson_iterator *i ) { switch ( bson_iterator_type( i ) ) { case BSON_INT: return bson_iterator_int_raw( i ); case BSON_LONG: return ( int )bson_iterator_long_raw( i ); case BSON_DOUBLE: return bson_iterator_double_raw( i ); default: return 0; } } MONGO_EXPORT double bson_iterator_double( const bson_iterator *i ) { switch ( bson_iterator_type( i ) ) { case BSON_INT: return bson_iterator_int_raw( i ); case BSON_LONG: return bson_iterator_long_raw( i ); case BSON_DOUBLE: return bson_iterator_double_raw( i ); default: return 0; } } MONGO_EXPORT int64_t bson_iterator_long( const bson_iterator *i ) { switch ( bson_iterator_type( i ) ) { case BSON_INT: return bson_iterator_int_raw( i ); case BSON_LONG: return bson_iterator_long_raw( i ); case BSON_DOUBLE: return bson_iterator_double_raw( i ); default: return 0; } } MONGO_EXPORT bson_timestamp_t bson_iterator_timestamp( const bson_iterator *i ) { bson_timestamp_t ts; bson_little_endian32( &( ts.i ), bson_iterator_value( i ) ); bson_little_endian32( &( ts.t ), bson_iterator_value( i ) + 4 ); return ts; } MONGO_EXPORT int bson_iterator_timestamp_time( const bson_iterator *i ) { int time; bson_little_endian32( &time, bson_iterator_value( i ) + 4 ); return time; } MONGO_EXPORT int bson_iterator_timestamp_increment( const bson_iterator *i ) { int increment; bson_little_endian32( &increment, bson_iterator_value( i ) ); return increment; } MONGO_EXPORT bson_bool_t bson_iterator_bool( const bson_iterator *i ) { switch ( bson_iterator_type( i ) ) { case BSON_BOOL: return bson_iterator_bool_raw( i ); case BSON_INT: return bson_iterator_int_raw( i ) != 0; case BSON_LONG: return bson_iterator_long_raw( i ) != 0; case BSON_DOUBLE: return bson_iterator_double_raw( i ) != 0; case BSON_EOO: case BSON_NULL: return 0; default: return 1; } } MONGO_EXPORT const char *bson_iterator_string( const bson_iterator *i ) { switch ( bson_iterator_type( i ) ) { case BSON_STRING: case BSON_SYMBOL: return bson_iterator_value( i ) + 4; default: return ""; } } int bson_iterator_string_len( const bson_iterator *i ) { return bson_iterator_int_raw( i ); } MONGO_EXPORT const char *bson_iterator_code( const bson_iterator *i ) { switch ( bson_iterator_type( i ) ) { case BSON_STRING: case BSON_CODE: return bson_iterator_value( i ) + 4; case BSON_CODEWSCOPE: return bson_iterator_value( i ) + 8; default: return NULL; } } MONGO_EXPORT void bson_iterator_code_scope( const bson_iterator *i, bson *scope ) { if ( bson_iterator_type( i ) == BSON_CODEWSCOPE ) { int code_len; bson_little_endian32( &code_len, bson_iterator_value( i )+4 ); bson_init_data( scope, ( void * )( bson_iterator_value( i )+8+code_len ) ); _bson_reset( scope ); scope->finished = 1; } else { bson_empty( scope ); } } MONGO_EXPORT bson_date_t bson_iterator_date( const bson_iterator *i ) { return bson_iterator_long_raw( i ); } MONGO_EXPORT time_t bson_iterator_time_t( const bson_iterator *i ) { return bson_iterator_date( i ) / 1000; } MONGO_EXPORT int bson_iterator_bin_len( const bson_iterator *i ) { return ( bson_iterator_bin_type( i ) == BSON_BIN_BINARY_OLD ) ? bson_iterator_int_raw( i ) - 4 : bson_iterator_int_raw( i ); } MONGO_EXPORT char bson_iterator_bin_type( const bson_iterator *i ) { return bson_iterator_value( i )[4]; } MONGO_EXPORT const char *bson_iterator_bin_data( const bson_iterator *i ) { return ( bson_iterator_bin_type( i ) == BSON_BIN_BINARY_OLD ) ? bson_iterator_value( i ) + 9 : bson_iterator_value( i ) + 5; } MONGO_EXPORT const char *bson_iterator_regex( const bson_iterator *i ) { return bson_iterator_value( i ); } MONGO_EXPORT const char *bson_iterator_regex_opts( const bson_iterator *i ) { const char *p = bson_iterator_value( i ); return p + strlen( p ) + 1; } MONGO_EXPORT void bson_iterator_subobject( const bson_iterator *i, bson *sub ) { bson_init_data( sub, ( char * )bson_iterator_value( i ) ); _bson_reset( sub ); sub->finished = 1; } MONGO_EXPORT void bson_iterator_subiterator( const bson_iterator *i, bson_iterator *sub ) { bson_iterator_from_buffer( sub, bson_iterator_value( i ) ); } /* ---------------------------- BUILDING ------------------------------ */ static void _bson_init_size( bson *b, int size ) { if( size == 0 ) b->data = NULL; else b->data = ( char * )bson_malloc( size ); b->dataSize = size; b->cur = b->data + 4; _bson_reset( b ); } MONGO_EXPORT void bson_init( bson *b ) { _bson_init_size( b, initialBufferSize ); } void bson_init_size( bson *b, int size ) { _bson_init_size( b, size ); } static void bson_append_byte( bson *b, char c ) { b->cur[0] = c; b->cur++; } static void bson_append( bson *b, const void *data, size_t len ) { memcpy( b->cur , data , len ); b->cur += len; } static void bson_append32( bson *b, const void *data ) { bson_little_endian32( b->cur, data ); b->cur += 4; } static void bson_append32_as_int( bson *b, int data ) { bson_little_endian32( b->cur, &data ); b->cur += 4; } static void bson_append64( bson *b, const void *data ) { bson_little_endian64( b->cur, data ); b->cur += 8; } int bson_ensure_space( bson *b, const size_t bytesNeeded ) { int pos = b->cur - b->data; char *orig = b->data; int new_size; if ( pos + bytesNeeded <= b->dataSize ) return BSON_OK; new_size = 1.5 * ( b->dataSize + bytesNeeded ); if( new_size < b->dataSize ) { if( ( b->dataSize + bytesNeeded ) < INT_MAX ) new_size = INT_MAX; else { b->err = BSON_SIZE_OVERFLOW; return BSON_ERROR; } } b->data = bson_realloc( b->data, new_size ); if ( !b->data ) bson_fatal_msg( !!b->data, "realloc() failed" ); b->dataSize = new_size; b->cur += b->data - orig; return BSON_OK; } MONGO_EXPORT int bson_finish( bson *b ) { int i; if( b->err & BSON_NOT_UTF8 ) return BSON_ERROR; if ( ! b->finished ) { if ( bson_ensure_space( b, 1 ) == BSON_ERROR ) return BSON_ERROR; bson_append_byte( b, 0 ); i = ( int )( b->cur - b->data ); bson_little_endian32( b->data, &i ); b->finished = 1; } return BSON_OK; } MONGO_EXPORT void bson_destroy( bson *b ) { if (b) { bson_free( b->data ); b->err = 0; b->data = 0; b->cur = 0; b->finished = 1; } } static int bson_append_estart( bson *b, int type, const char *name, const size_t dataSize ) { const int len = strlen( name ) + 1; if ( b->finished ) { b->err |= BSON_ALREADY_FINISHED; return BSON_ERROR; } if ( bson_ensure_space( b, 1 + len + dataSize ) == BSON_ERROR ) { return BSON_ERROR; } if( bson_check_field_name( b, ( const char * )name, len - 1 ) == BSON_ERROR ) { bson_builder_error( b ); return BSON_ERROR; } bson_append_byte( b, ( char )type ); bson_append( b, name, len ); return BSON_OK; } /* ---------------------------- BUILDING TYPES ------------------------------ */ MONGO_EXPORT int bson_append_int( bson *b, const char *name, const int i ) { if ( bson_append_estart( b, BSON_INT, name, 4 ) == BSON_ERROR ) return BSON_ERROR; bson_append32( b , &i ); return BSON_OK; } MONGO_EXPORT int bson_append_long( bson *b, const char *name, const int64_t i ) { if ( bson_append_estart( b , BSON_LONG, name, 8 ) == BSON_ERROR ) return BSON_ERROR; bson_append64( b , &i ); return BSON_OK; } MONGO_EXPORT int bson_append_double( bson *b, const char *name, const double d ) { if ( bson_append_estart( b, BSON_DOUBLE, name, 8 ) == BSON_ERROR ) return BSON_ERROR; bson_append64( b , &d ); return BSON_OK; } MONGO_EXPORT int bson_append_bool( bson *b, const char *name, const bson_bool_t i ) { if ( bson_append_estart( b, BSON_BOOL, name, 1 ) == BSON_ERROR ) return BSON_ERROR; bson_append_byte( b , i != 0 ); return BSON_OK; } MONGO_EXPORT int bson_append_null( bson *b, const char *name ) { if ( bson_append_estart( b , BSON_NULL, name, 0 ) == BSON_ERROR ) return BSON_ERROR; return BSON_OK; } MONGO_EXPORT int bson_append_undefined( bson *b, const char *name ) { if ( bson_append_estart( b, BSON_UNDEFINED, name, 0 ) == BSON_ERROR ) return BSON_ERROR; return BSON_OK; } static int bson_append_string_base( bson *b, const char *name, const char *value, size_t len, bson_type type ) { size_t sl = len + 1; if ( bson_check_string( b, ( const char * )value, sl - 1 ) == BSON_ERROR ) return BSON_ERROR; if ( bson_append_estart( b, type, name, 4 + sl ) == BSON_ERROR ) { return BSON_ERROR; } bson_append32_as_int( b , ( int )sl ); bson_append( b , value , sl - 1 ); bson_append( b , "\0" , 1 ); return BSON_OK; } MONGO_EXPORT int bson_append_string( bson *b, const char *name, const char *value ) { return bson_append_string_base( b, name, value, strlen ( value ), BSON_STRING ); } MONGO_EXPORT int bson_append_symbol( bson *b, const char *name, const char *value ) { return bson_append_string_base( b, name, value, strlen ( value ), BSON_SYMBOL ); } MONGO_EXPORT int bson_append_code( bson *b, const char *name, const char *value ) { return bson_append_string_base( b, name, value, strlen ( value ), BSON_CODE ); } MONGO_EXPORT int bson_append_string_n( bson *b, const char *name, const char *value, size_t len ) { return bson_append_string_base( b, name, value, len, BSON_STRING ); } MONGO_EXPORT int bson_append_symbol_n( bson *b, const char *name, const char *value, size_t len ) { return bson_append_string_base( b, name, value, len, BSON_SYMBOL ); } MONGO_EXPORT int bson_append_code_n( bson *b, const char *name, const char *value, size_t len ) { return bson_append_string_base( b, name, value, len, BSON_CODE ); } MONGO_EXPORT int bson_append_code_w_scope_n( bson *b, const char *name, const char *code, size_t len, const bson *scope ) { size_t sl, size; if ( !scope ) return BSON_ERROR; sl = len + 1; size = 4 + 4 + sl + bson_size( scope ); if ( bson_append_estart( b, BSON_CODEWSCOPE, name, size ) == BSON_ERROR ) return BSON_ERROR; bson_append32_as_int( b, ( int )size ); bson_append32( b, &sl ); bson_append( b, code, sl ); bson_append( b, scope->data, bson_size( scope ) ); return BSON_OK; } MONGO_EXPORT int bson_append_code_w_scope( bson *b, const char *name, const char *code, const bson *scope ) { return bson_append_code_w_scope_n( b, name, code, strlen ( code ), scope ); } MONGO_EXPORT int bson_append_binary( bson *b, const char *name, char type, const char *str, size_t len ) { if ( type == BSON_BIN_BINARY_OLD ) { int subtwolen = len + 4; if ( bson_append_estart( b, BSON_BINDATA, name, 4+1+4+len ) == BSON_ERROR ) return BSON_ERROR; bson_append32_as_int( b, ( int )subtwolen ); bson_append_byte( b, type ); bson_append32_as_int( b, ( int )len ); bson_append( b, str, len ); } else { if ( bson_append_estart( b, BSON_BINDATA, name, 4+1+len ) == BSON_ERROR ) return BSON_ERROR; bson_append32_as_int( b, ( int )len ); bson_append_byte( b, type ); bson_append( b, str, len ); } return BSON_OK; } MONGO_EXPORT int bson_append_oid( bson *b, const char *name, const bson_oid_t *oid ) { if ( bson_append_estart( b, BSON_OID, name, 12 ) == BSON_ERROR ) return BSON_ERROR; bson_append( b , oid , 12 ); return BSON_OK; } MONGO_EXPORT int bson_append_new_oid( bson *b, const char *name ) { bson_oid_t oid; bson_oid_gen( &oid ); return bson_append_oid( b, name, &oid ); } MONGO_EXPORT int bson_append_regex( bson *b, const char *name, const char *pattern, const char *opts ) { const size_t plen = strlen( pattern )+1; const size_t olen = strlen( opts )+1; if ( bson_append_estart( b, BSON_REGEX, name, plen + olen ) == BSON_ERROR ) return BSON_ERROR; if ( bson_check_string( b, pattern, plen - 1 ) == BSON_ERROR ) return BSON_ERROR; bson_append( b , pattern , plen ); bson_append( b , opts , olen ); return BSON_OK; } MONGO_EXPORT int bson_append_bson( bson *b, const char *name, const bson *bson ) { if ( !bson ) return BSON_ERROR; if ( bson_append_estart( b, BSON_OBJECT, name, bson_size( bson ) ) == BSON_ERROR ) return BSON_ERROR; bson_append( b , bson->data , bson_size( bson ) ); return BSON_OK; } MONGO_EXPORT int bson_append_element( bson *b, const char *name_or_null, const bson_iterator *elem ) { bson_iterator next = *elem; size_t size; bson_iterator_next( &next ); size = next.cur - elem->cur; if ( name_or_null == NULL ) { if( bson_ensure_space( b, size ) == BSON_ERROR ) return BSON_ERROR; bson_append( b, elem->cur, size ); } else { size_t data_size = size - 2 - strlen( bson_iterator_key( elem ) ); bson_append_estart( b, elem->cur[0], name_or_null, data_size ); bson_append( b, bson_iterator_value( elem ), data_size ); } return BSON_OK; } MONGO_EXPORT int bson_append_timestamp( bson *b, const char *name, bson_timestamp_t *ts ) { if ( bson_append_estart( b, BSON_TIMESTAMP, name, 8 ) == BSON_ERROR ) return BSON_ERROR; bson_append32( b , &( ts->i ) ); bson_append32( b , &( ts->t ) ); return BSON_OK; } MONGO_EXPORT int bson_append_timestamp2( bson *b, const char *name, int time, int increment ) { if ( bson_append_estart( b, BSON_TIMESTAMP, name, 8 ) == BSON_ERROR ) return BSON_ERROR; bson_append32( b , &increment ); bson_append32( b , &time ); return BSON_OK; } MONGO_EXPORT int bson_append_date( bson *b, const char *name, bson_date_t millis ) { if ( bson_append_estart( b, BSON_DATE, name, 8 ) == BSON_ERROR ) return BSON_ERROR; bson_append64( b , &millis ); return BSON_OK; } MONGO_EXPORT int bson_append_time_t( bson *b, const char *name, time_t secs ) { return bson_append_date( b, name, ( bson_date_t )secs * 1000 ); } MONGO_EXPORT int bson_append_start_object( bson *b, const char *name ) { if ( bson_append_estart( b, BSON_OBJECT, name, 5 ) == BSON_ERROR ) return BSON_ERROR; b->stack[ b->stackPos++ ] = b->cur - b->data; bson_append32( b , &zero ); return BSON_OK; } MONGO_EXPORT int bson_append_start_array( bson *b, const char *name ) { if ( bson_append_estart( b, BSON_ARRAY, name, 5 ) == BSON_ERROR ) return BSON_ERROR; b->stack[ b->stackPos++ ] = b->cur - b->data; bson_append32( b , &zero ); return BSON_OK; } MONGO_EXPORT int bson_append_finish_object( bson *b ) { char *start; int i; if ( bson_ensure_space( b, 1 ) == BSON_ERROR ) return BSON_ERROR; bson_append_byte( b , 0 ); start = b->data + b->stack[ --b->stackPos ]; i = ( int )( b->cur - start ); bson_little_endian32( start, &i ); return BSON_OK; } MONGO_EXPORT double bson_int64_to_double( int64_t i64 ) { return (double)i64; } MONGO_EXPORT int bson_append_finish_array( bson *b ) { return bson_append_finish_object( b ); } /* Error handling and allocators. */ static bson_err_handler err_handler = NULL; MONGO_EXPORT bson_err_handler set_bson_err_handler( bson_err_handler func ) { bson_err_handler old = err_handler; err_handler = func; return old; } MONGO_EXPORT void bson_free( void *ptr ) { bson_free_func( ptr ); } MONGO_EXPORT void *bson_malloc( size_t size ) { void *p; p = bson_malloc_func( size ); bson_fatal_msg( !!p, "malloc() failed" ); return p; } void *bson_realloc( void *ptr, size_t size ) { void *p; p = bson_realloc_func( ptr, size ); bson_fatal_msg( !!p, "realloc() failed" ); return p; } int _bson_errprintf( const char *format, ... ) { va_list ap; int ret = 0; va_start( ap, format ); #ifndef R_SAFETY_NET ret = vfprintf( stderr, format, ap ); #endif va_end( ap ); return ret; } /** * This method is invoked when a non-fatal bson error is encountered. * Calls the error handler if available. * * @param */ void bson_builder_error( bson *b ) { if( err_handler ) err_handler( "BSON error." ); } void bson_fatal( int ok ) { bson_fatal_msg( ok, "" ); } void bson_fatal_msg( int ok , const char *msg ) { if ( ok ) return; if ( err_handler ) { err_handler( msg ); } #ifndef R_SAFETY_NET bson_errprintf( "error: %s\n" , msg ); exit( -5 ); #endif } /* Efficiently copy an integer to a string. */ extern const char bson_numstrs[1000][4]; void bson_numstr( char *str, int i ) { if( i < 1000 ) memcpy( str, bson_numstrs[i], 4 ); else bson_sprintf( str,"%d", i ); } MONGO_EXPORT void bson_swap_endian64( void *outp, const void *inp ) { const char *in = ( const char * )inp; char *out = ( char * )outp; out[0] = in[7]; out[1] = in[6]; out[2] = in[5]; out[3] = in[4]; out[4] = in[3]; out[5] = in[2]; out[6] = in[1]; out[7] = in[0]; } MONGO_EXPORT void bson_swap_endian32( void *outp, const void *inp ) { const char *in = ( const char * )inp; char *out = ( char * )outp; out[0] = in[3]; out[1] = in[2]; out[2] = in[1]; out[3] = in[0]; }
static int bson_append_estart( bson *b, int type, const char *name, const int dataSize ) { const int len = strlen( name ) + 1; if ( b->finished ) { b->err |= BSON_ALREADY_FINISHED; return BSON_ERROR; } if ( bson_ensure_space( b, 1 + len + dataSize ) == BSON_ERROR ) { return BSON_ERROR; } if( bson_check_field_name( b, ( const char * )name, len - 1 ) == BSON_ERROR ) { bson_builder_error( b ); return BSON_ERROR; } bson_append_byte( b, ( char )type ); bson_append( b, name, len ); return BSON_OK; }
static int bson_append_estart( bson *b, int type, const char *name, const size_t dataSize ) { const int len = strlen( name ) + 1; if ( b->finished ) { b->err |= BSON_ALREADY_FINISHED; return BSON_ERROR; } if ( bson_ensure_space( b, 1 + len + dataSize ) == BSON_ERROR ) { return BSON_ERROR; } if( bson_check_field_name( b, ( const char * )name, len - 1 ) == BSON_ERROR ) { bson_builder_error( b ); return BSON_ERROR; } bson_append_byte( b, ( char )type ); bson_append( b, name, len ); return BSON_OK; }
{'added': [(109, 'MONGO_EXPORT size_t bson_buffer_size( const bson *b ) {'), (309, ' size_t ds;'), (423, ' return ( int )bson_iterator_long_raw( i );'), (607, 'static void bson_append( bson *b, const void *data, size_t len ) {'), (617, 'static void bson_append32_as_int( bson *b, int data ) {'), (618, ' bson_little_endian32( b->cur, &data );'), (619, ' b->cur += 4;'), (620, '}'), (621, ''), (627, 'int bson_ensure_space( bson *b, const size_t bytesNeeded ) {'), (665, ' i = ( int )( b->cur - b->data );'), (683, 'static int bson_append_estart( bson *b, int type, const char *name, const size_t dataSize ) {'), (750, ' const char *value, size_t len, bson_type type ) {'), (752, ' size_t sl = len + 1;'), (758, ' bson_append32_as_int( b , ( int )sl );'), (776, 'MONGO_EXPORT int bson_append_string_n( bson *b, const char *name, const char *value, size_t len ) {'), (780, 'MONGO_EXPORT int bson_append_symbol_n( bson *b, const char *name, const char *value, size_t len ) {'), (784, 'MONGO_EXPORT int bson_append_code_n( bson *b, const char *name, const char *value, size_t len ) {'), (789, ' const char *code, size_t len, const bson *scope ) {'), (791, ' size_t sl, size;'), (797, ' bson_append32_as_int( b, ( int )size );'), (808, 'MONGO_EXPORT int bson_append_binary( bson *b, const char *name, char type, const char *str, size_t len ) {'), (813, ' bson_append32_as_int( b, ( int )subtwolen );'), (815, ' bson_append32_as_int( b, ( int )len );'), (821, ' bson_append32_as_int( b, ( int )len );'), (842, ' const size_t plen = strlen( pattern )+1;'), (843, ' const size_t olen = strlen( opts )+1;'), (863, ' size_t size;'), (874, ' size_t data_size = size - 2 - strlen( bson_iterator_key( elem ) );'), (930, ' i = ( int )( b->cur - start );'), (958, 'MONGO_EXPORT void *bson_malloc( size_t size ) {'), (965, 'void *bson_realloc( void *ptr, size_t size ) {')], 'deleted': [(109, 'MONGO_EXPORT int bson_buffer_size( const bson *b ) {'), (309, ' int ds;'), (423, ' return bson_iterator_long_raw( i );'), (607, 'static void bson_append( bson *b, const void *data, int len ) {'), (622, 'int bson_ensure_space( bson *b, const int bytesNeeded ) {'), (660, ' i = b->cur - b->data;'), (678, 'static int bson_append_estart( bson *b, int type, const char *name, const int dataSize ) {'), (745, ' const char *value, int len, bson_type type ) {'), (747, ' int sl = len + 1;'), (753, ' bson_append32( b , &sl );'), (771, 'MONGO_EXPORT int bson_append_string_n( bson *b, const char *name, const char *value, int len ) {'), (775, 'MONGO_EXPORT int bson_append_symbol_n( bson *b, const char *name, const char *value, int len ) {'), (779, 'MONGO_EXPORT int bson_append_code_n( bson *b, const char *name, const char *value, int len ) {'), (784, ' const char *code, int len, const bson *scope ) {'), (786, ' int sl, size;'), (792, ' bson_append32( b, &size );'), (803, 'MONGO_EXPORT int bson_append_binary( bson *b, const char *name, char type, const char *str, int len ) {'), (808, ' bson_append32( b, &subtwolen );'), (810, ' bson_append32( b, &len );'), (816, ' bson_append32( b, &len );'), (837, ' const int plen = strlen( pattern )+1;'), (838, ' const int olen = strlen( opts )+1;'), (858, ' int size;'), (869, ' int data_size = size - 2 - strlen( bson_iterator_key( elem ) );'), (925, ' i = b->cur - start;'), (953, 'MONGO_EXPORT void *bson_malloc( int size ) {'), (960, 'void *bson_realloc( void *ptr, int size ) {')]}
32
27
829
5,670
17
120
4
https://github.com/10gen-archive/mongo-c-driver-legacy
CVE-2020-12135
CWE-190
2,973
edit_distance_op.cc
C++
tensorflow::EditDistanceOp::Compute
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // See docs in ../ops/array_ops.cc. #define EIGEN_USE_THREADS #include <limits> #include <vector> #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/gtl/edit_distance.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/util/sparse/sparse_tensor.h" namespace tensorflow { namespace { Status ValidateShapes(OpKernelContext* ctx, const Tensor& hypothesis_indices, const Tensor& hypothesis_values, const Tensor& hypothesis_shape, const Tensor& truth_indices, const Tensor& truth_values, const Tensor& truth_shape) { if (!TensorShapeUtils::IsMatrix(hypothesis_indices.shape())) return errors::InvalidArgument( "hypothesis_indices should be a matrix, but got shape: ", hypothesis_indices.shape().DebugString()); if (!TensorShapeUtils::IsMatrix(truth_indices.shape())) return errors::InvalidArgument( "truth_indices should be a matrix, but got shape: ", truth_indices.shape().DebugString()); if (!TensorShapeUtils::IsVector(hypothesis_values.shape())) return errors::InvalidArgument( "hypothesis_values should be a vector, but got shape: ", hypothesis_values.shape().DebugString()); if (!TensorShapeUtils::IsVector(truth_values.shape())) return errors::InvalidArgument( "truth_values should be a vector, but got shape: ", truth_values.shape().DebugString()); if (!TensorShapeUtils::IsVector(hypothesis_shape.shape())) return errors::InvalidArgument( "hypothesis_shape should be a vector, but got shape: ", hypothesis_shape.shape().DebugString()); if (!TensorShapeUtils::IsVector(truth_shape.shape())) return errors::InvalidArgument( "truth_shape should be a vector, but got shape: ", truth_shape.shape().DebugString()); if (hypothesis_values.NumElements() != hypothesis_indices.dim_size(0)) return errors::InvalidArgument( "Expected hypothesis_values.NumElements == " "#rows(hypothesis_indices), their shapes are: ", hypothesis_values.shape().DebugString(), " and ", hypothesis_indices.shape().DebugString()); if (hypothesis_shape.NumElements() != hypothesis_indices.dim_size(1)) return errors::InvalidArgument( "Expected hypothesis_shape.NumElements == " "#cols(hypothesis_indices), their shapes are: ", hypothesis_shape.shape().DebugString(), " and ", hypothesis_indices.shape().DebugString()); if (truth_shape.NumElements() < 2) return errors::InvalidArgument( "Input SparseTensors must have rank at least 2, but truth_shape " "rank is: ", truth_shape.NumElements()); if (truth_values.NumElements() != truth_indices.dim_size(0)) return errors::InvalidArgument( "Expected truth_values.NumElements == " "#rows(truth_indices), their shapes are: ", truth_values.shape().DebugString(), " and ", truth_indices.shape().DebugString()); if (truth_shape.NumElements() != truth_indices.dim_size(1)) return errors::InvalidArgument( "Expected truth_shape.NumElements == " "#cols(truth_indices), their shapes are: ", truth_shape.shape().DebugString(), " and ", truth_indices.shape().DebugString()); if (truth_shape.NumElements() != hypothesis_shape.NumElements()) return errors::InvalidArgument( "Expected truth and hypothesis to have matching ranks, but " "their shapes are: ", truth_shape.shape().DebugString(), " and ", hypothesis_shape.shape().DebugString()); return Status::OK(); } } // namespace template <typename T> class EditDistanceOp : public OpKernel { public: explicit EditDistanceOp(OpKernelConstruction* ctx) : OpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("normalize", &normalize_)); } void Compute(OpKernelContext* ctx) override { const Tensor* hypothesis_indices; const Tensor* hypothesis_values; const Tensor* hypothesis_shape; const Tensor* truth_indices; const Tensor* truth_values; const Tensor* truth_shape; OP_REQUIRES_OK(ctx, ctx->input("hypothesis_indices", &hypothesis_indices)); OP_REQUIRES_OK(ctx, ctx->input("hypothesis_values", &hypothesis_values)); OP_REQUIRES_OK(ctx, ctx->input("hypothesis_shape", &hypothesis_shape)); OP_REQUIRES_OK(ctx, ctx->input("truth_indices", &truth_indices)); OP_REQUIRES_OK(ctx, ctx->input("truth_values", &truth_values)); OP_REQUIRES_OK(ctx, ctx->input("truth_shape", &truth_shape)); OP_REQUIRES_OK( ctx, ValidateShapes(ctx, *hypothesis_indices, *hypothesis_values, *hypothesis_shape, *truth_indices, *truth_values, *truth_shape)); TensorShape hypothesis_st_shape; OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape( hypothesis_shape->vec<int64_t>().data(), hypothesis_shape->NumElements(), &hypothesis_st_shape)); TensorShape truth_st_shape; OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape( truth_shape->vec<int64_t>().data(), truth_shape->NumElements(), &truth_st_shape)); // Assume indices are sorted in row-major order. std::vector<int64_t> sorted_order(truth_st_shape.dims()); std::iota(sorted_order.begin(), sorted_order.end(), 0); sparse::SparseTensor hypothesis; OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create( *hypothesis_indices, *hypothesis_values, hypothesis_st_shape, sorted_order, &hypothesis)); sparse::SparseTensor truth; OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create( *truth_indices, *truth_values, truth_st_shape, sorted_order, &truth)); // Group dims 0, 1, ..., RANK - 1. The very last dim is assumed // to store the variable length sequences. std::vector<int64_t> group_dims(truth_st_shape.dims() - 1); std::iota(group_dims.begin(), group_dims.end(), 0); TensorShape output_shape; for (int d = 0; d < static_cast<int>(group_dims.size()); ++d) { output_shape.AddDim(std::max(hypothesis_st_shape.dim_size(d), truth_st_shape.dim_size(d))); } const auto output_elements = output_shape.num_elements(); OP_REQUIRES( ctx, output_elements > 0, errors::InvalidArgument("Got output shape ", output_shape.DebugString(), " which has 0 elements")); Tensor* output = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output("output", output_shape, &output)); auto output_t = output->flat<float>(); output_t.setZero(); std::vector<int64_t> output_strides(output_shape.dims()); output_strides[output_shape.dims() - 1] = 1; for (int d = output_shape.dims() - 2; d >= 0; --d) { output_strides[d] = output_strides[d + 1] * output_shape.dim_size(d + 1); } auto hypothesis_grouper = hypothesis.group(group_dims); auto truth_grouper = truth.group(group_dims); auto hypothesis_iter = hypothesis_grouper.begin(); auto truth_iter = truth_grouper.begin(); auto cmp = std::equal_to<T>(); while (hypothesis_iter != hypothesis_grouper.end() && truth_iter != truth_grouper.end()) { sparse::Group truth_i = *truth_iter; sparse::Group hypothesis_j = *hypothesis_iter; std::vector<int64_t> g_truth = truth_i.group(); std::vector<int64_t> g_hypothesis = hypothesis_j.group(); auto truth_seq = truth_i.values<T>(); auto hypothesis_seq = hypothesis_j.values<T>(); if (g_truth == g_hypothesis) { auto loc = std::inner_product(g_truth.begin(), g_truth.end(), output_strides.begin(), int64_t{0}); OP_REQUIRES( ctx, loc < output_elements, errors::Internal("Got an inner product ", loc, " which would require in writing to outside of " "the buffer for the output tensor (max elements ", output_elements, ")")); output_t(loc) = gtl::LevenshteinDistance<T>(truth_seq, hypothesis_seq, cmp); if (normalize_) output_t(loc) /= truth_seq.size(); ++hypothesis_iter; ++truth_iter; } else if (g_truth > g_hypothesis) { // zero-length truth auto loc = std::inner_product(g_hypothesis.begin(), g_hypothesis.end(), output_strides.begin(), int64_t{0}); OP_REQUIRES( ctx, loc < output_elements, errors::Internal("Got an inner product ", loc, " which would require in writing to outside of " "the buffer for the output tensor (max elements ", output_elements, ")")); output_t(loc) = hypothesis_seq.size(); if (normalize_ && output_t(loc) != 0.0f) { output_t(loc) = std::numeric_limits<float>::infinity(); } ++hypothesis_iter; } else { // zero-length hypothesis auto loc = std::inner_product(g_truth.begin(), g_truth.end(), output_strides.begin(), int64_t{0}); OP_REQUIRES( ctx, loc < output_elements, errors::Internal("Got an inner product ", loc, " which would require in writing to outside of " "the buffer for the output tensor (max elements ", output_elements, ")")); output_t(loc) = (normalize_) ? 1.0 : truth_seq.size(); ++truth_iter; } } while (hypothesis_iter != hypothesis_grouper.end()) { // zero-length truths sparse::Group hypothesis_j = *hypothesis_iter; std::vector<int64_t> g_hypothesis = hypothesis_j.group(); auto hypothesis_seq = hypothesis_j.values<T>(); auto loc = std::inner_product(g_hypothesis.begin(), g_hypothesis.end(), output_strides.begin(), int64_t{0}); OP_REQUIRES( ctx, loc < output_elements, errors::Internal("Got an inner product ", loc, " which would require in writing to outside of the " "buffer for the output tensor (max elements ", output_elements, ")")); output_t(loc) = hypothesis_seq.size(); if (normalize_ && output_t(loc) != 0.0f) { output_t(loc) = std::numeric_limits<float>::infinity(); } ++hypothesis_iter; } while (truth_iter != truth_grouper.end()) { // missing hypotheses sparse::Group truth_i = *truth_iter; std::vector<int64_t> g_truth = truth_i.group(); auto truth_seq = truth_i.values<T>(); auto loc = std::inner_product(g_truth.begin(), g_truth.end(), output_strides.begin(), int64_t{0}); OP_REQUIRES( ctx, loc < output_elements, errors::Internal("Got an inner product ", loc, " which would require in writing to outside of the " "buffer for the output tensor (max elements ", output_elements, ")")); output_t(loc) = (normalize_) ? 1.0 : truth_seq.size(); ++truth_iter; } } private: bool normalize_; TF_DISALLOW_COPY_AND_ASSIGN(EditDistanceOp); }; #define REGISTER_CPU_KERNEL(T) \ REGISTER_KERNEL_BUILDER( \ Name("EditDistance").Device(DEVICE_CPU).TypeConstraint<T>("T"), \ EditDistanceOp<T>); TF_CALL_POD_STRING_TYPES(REGISTER_CPU_KERNEL); #undef REGISTER_CPU_KERNEL } // end namespace tensorflow
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // See docs in ../ops/array_ops.cc. #define EIGEN_USE_THREADS #include <limits> #include <vector> #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/gtl/edit_distance.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/util/sparse/sparse_tensor.h" namespace tensorflow { namespace { Status ValidateShapes(OpKernelContext* ctx, const Tensor& hypothesis_indices, const Tensor& hypothesis_values, const Tensor& hypothesis_shape, const Tensor& truth_indices, const Tensor& truth_values, const Tensor& truth_shape) { if (!TensorShapeUtils::IsMatrix(hypothesis_indices.shape())) return errors::InvalidArgument( "hypothesis_indices should be a matrix, but got shape: ", hypothesis_indices.shape().DebugString()); if (!TensorShapeUtils::IsMatrix(truth_indices.shape())) return errors::InvalidArgument( "truth_indices should be a matrix, but got shape: ", truth_indices.shape().DebugString()); if (!TensorShapeUtils::IsVector(hypothesis_values.shape())) return errors::InvalidArgument( "hypothesis_values should be a vector, but got shape: ", hypothesis_values.shape().DebugString()); if (!TensorShapeUtils::IsVector(truth_values.shape())) return errors::InvalidArgument( "truth_values should be a vector, but got shape: ", truth_values.shape().DebugString()); if (!TensorShapeUtils::IsVector(hypothesis_shape.shape())) return errors::InvalidArgument( "hypothesis_shape should be a vector, but got shape: ", hypothesis_shape.shape().DebugString()); if (!TensorShapeUtils::IsVector(truth_shape.shape())) return errors::InvalidArgument( "truth_shape should be a vector, but got shape: ", truth_shape.shape().DebugString()); if (hypothesis_values.NumElements() != hypothesis_indices.dim_size(0)) return errors::InvalidArgument( "Expected hypothesis_values.NumElements == " "#rows(hypothesis_indices), their shapes are: ", hypothesis_values.shape().DebugString(), " and ", hypothesis_indices.shape().DebugString()); if (hypothesis_shape.NumElements() != hypothesis_indices.dim_size(1)) return errors::InvalidArgument( "Expected hypothesis_shape.NumElements == " "#cols(hypothesis_indices), their shapes are: ", hypothesis_shape.shape().DebugString(), " and ", hypothesis_indices.shape().DebugString()); if (truth_shape.NumElements() < 2) return errors::InvalidArgument( "Input SparseTensors must have rank at least 2, but truth_shape " "rank is: ", truth_shape.NumElements()); if (truth_values.NumElements() != truth_indices.dim_size(0)) return errors::InvalidArgument( "Expected truth_values.NumElements == " "#rows(truth_indices), their shapes are: ", truth_values.shape().DebugString(), " and ", truth_indices.shape().DebugString()); if (truth_shape.NumElements() != truth_indices.dim_size(1)) return errors::InvalidArgument( "Expected truth_shape.NumElements == " "#cols(truth_indices), their shapes are: ", truth_shape.shape().DebugString(), " and ", truth_indices.shape().DebugString()); if (truth_shape.NumElements() != hypothesis_shape.NumElements()) return errors::InvalidArgument( "Expected truth and hypothesis to have matching ranks, but " "their shapes are: ", truth_shape.shape().DebugString(), " and ", hypothesis_shape.shape().DebugString()); return Status::OK(); } } // namespace template <typename T> class EditDistanceOp : public OpKernel { public: explicit EditDistanceOp(OpKernelConstruction* ctx) : OpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("normalize", &normalize_)); } void Compute(OpKernelContext* ctx) override { const Tensor* hypothesis_indices; const Tensor* hypothesis_values; const Tensor* hypothesis_shape; const Tensor* truth_indices; const Tensor* truth_values; const Tensor* truth_shape; OP_REQUIRES_OK(ctx, ctx->input("hypothesis_indices", &hypothesis_indices)); OP_REQUIRES_OK(ctx, ctx->input("hypothesis_values", &hypothesis_values)); OP_REQUIRES_OK(ctx, ctx->input("hypothesis_shape", &hypothesis_shape)); OP_REQUIRES_OK(ctx, ctx->input("truth_indices", &truth_indices)); OP_REQUIRES_OK(ctx, ctx->input("truth_values", &truth_values)); OP_REQUIRES_OK(ctx, ctx->input("truth_shape", &truth_shape)); OP_REQUIRES_OK( ctx, ValidateShapes(ctx, *hypothesis_indices, *hypothesis_values, *hypothesis_shape, *truth_indices, *truth_values, *truth_shape)); TensorShape hypothesis_st_shape; OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape( hypothesis_shape->vec<int64_t>().data(), hypothesis_shape->NumElements(), &hypothesis_st_shape)); TensorShape truth_st_shape; OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape( truth_shape->vec<int64_t>().data(), truth_shape->NumElements(), &truth_st_shape)); // Assume indices are sorted in row-major order. std::vector<int64_t> sorted_order(truth_st_shape.dims()); std::iota(sorted_order.begin(), sorted_order.end(), 0); sparse::SparseTensor hypothesis; OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create( *hypothesis_indices, *hypothesis_values, hypothesis_st_shape, sorted_order, &hypothesis)); sparse::SparseTensor truth; OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create( *truth_indices, *truth_values, truth_st_shape, sorted_order, &truth)); // Group dims 0, 1, ..., RANK - 1. The very last dim is assumed // to store the variable length sequences. std::vector<int64_t> group_dims(truth_st_shape.dims() - 1); std::iota(group_dims.begin(), group_dims.end(), 0); TensorShape output_shape; for (int d = 0; d < static_cast<int>(group_dims.size()); ++d) { output_shape.AddDim(std::max(hypothesis_st_shape.dim_size(d), truth_st_shape.dim_size(d))); } const auto output_elements = output_shape.num_elements(); OP_REQUIRES( ctx, output_elements > 0, errors::InvalidArgument("Got output shape ", output_shape.DebugString(), " which has 0 elements")); Tensor* output = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output("output", output_shape, &output)); auto output_t = output->flat<float>(); output_t.setZero(); std::vector<int64_t> output_strides(output_shape.dims()); output_strides[output_shape.dims() - 1] = 1; for (int d = output_shape.dims() - 2; d >= 0; --d) { output_strides[d] = output_strides[d + 1] * output_shape.dim_size(d + 1); } auto hypothesis_grouper = hypothesis.group(group_dims); auto truth_grouper = truth.group(group_dims); auto hypothesis_iter = hypothesis_grouper.begin(); auto truth_iter = truth_grouper.begin(); auto cmp = std::equal_to<T>(); while (hypothesis_iter != hypothesis_grouper.end() && truth_iter != truth_grouper.end()) { sparse::Group truth_i = *truth_iter; sparse::Group hypothesis_j = *hypothesis_iter; std::vector<int64_t> g_truth = truth_i.group(); std::vector<int64_t> g_hypothesis = hypothesis_j.group(); auto truth_seq = truth_i.values<T>(); auto hypothesis_seq = hypothesis_j.values<T>(); if (g_truth == g_hypothesis) { auto loc = std::inner_product(g_truth.begin(), g_truth.end(), output_strides.begin(), int64_t{0}); OP_REQUIRES( ctx, 0 <= loc && loc < output_elements, errors::Internal("Got an inner product ", loc, " which would require writing to outside of " "the buffer for the output tensor (max elements ", output_elements, ")")); output_t(loc) = gtl::LevenshteinDistance<T>(truth_seq, hypothesis_seq, cmp); if (normalize_) output_t(loc) /= truth_seq.size(); ++hypothesis_iter; ++truth_iter; } else if (g_truth > g_hypothesis) { // zero-length truth auto loc = std::inner_product(g_hypothesis.begin(), g_hypothesis.end(), output_strides.begin(), int64_t{0}); OP_REQUIRES( ctx, 0 <= loc && loc < output_elements, errors::Internal("Got an inner product ", loc, " which would require writing to outside of " "the buffer for the output tensor (max elements ", output_elements, ")")); output_t(loc) = hypothesis_seq.size(); if (normalize_ && output_t(loc) != 0.0f) { output_t(loc) = std::numeric_limits<float>::infinity(); } ++hypothesis_iter; } else { // zero-length hypothesis auto loc = std::inner_product(g_truth.begin(), g_truth.end(), output_strides.begin(), int64_t{0}); OP_REQUIRES( ctx, 0 <= loc && loc < output_elements, errors::Internal("Got an inner product ", loc, " which would require writing to outside of " "the buffer for the output tensor (max elements ", output_elements, ")")); output_t(loc) = (normalize_) ? 1.0 : truth_seq.size(); ++truth_iter; } } while (hypothesis_iter != hypothesis_grouper.end()) { // zero-length truths sparse::Group hypothesis_j = *hypothesis_iter; std::vector<int64_t> g_hypothesis = hypothesis_j.group(); auto hypothesis_seq = hypothesis_j.values<T>(); auto loc = std::inner_product(g_hypothesis.begin(), g_hypothesis.end(), output_strides.begin(), int64_t{0}); OP_REQUIRES( ctx, 0 <= loc && loc < output_elements, errors::Internal("Got an inner product ", loc, " which would require writing to outside of the " "buffer for the output tensor (max elements ", output_elements, ")")); output_t(loc) = hypothesis_seq.size(); if (normalize_ && output_t(loc) != 0.0f) { output_t(loc) = std::numeric_limits<float>::infinity(); } ++hypothesis_iter; } while (truth_iter != truth_grouper.end()) { // missing hypotheses sparse::Group truth_i = *truth_iter; std::vector<int64_t> g_truth = truth_i.group(); auto truth_seq = truth_i.values<T>(); auto loc = std::inner_product(g_truth.begin(), g_truth.end(), output_strides.begin(), int64_t{0}); OP_REQUIRES( ctx, 0 <= loc && loc < output_elements, errors::Internal("Got an inner product ", loc, " which would require writing to outside of the " "buffer for the output tensor (max elements ", output_elements, ")")); output_t(loc) = (normalize_) ? 1.0 : truth_seq.size(); ++truth_iter; } } private: bool normalize_; TF_DISALLOW_COPY_AND_ASSIGN(EditDistanceOp); }; #define REGISTER_CPU_KERNEL(T) \ REGISTER_KERNEL_BUILDER( \ Name("EditDistance").Device(DEVICE_CPU).TypeConstraint<T>("T"), \ EditDistanceOp<T>); TF_CALL_POD_STRING_TYPES(REGISTER_CPU_KERNEL); #undef REGISTER_CPU_KERNEL } // end namespace tensorflow
void Compute(OpKernelContext* ctx) override { const Tensor* hypothesis_indices; const Tensor* hypothesis_values; const Tensor* hypothesis_shape; const Tensor* truth_indices; const Tensor* truth_values; const Tensor* truth_shape; OP_REQUIRES_OK(ctx, ctx->input("hypothesis_indices", &hypothesis_indices)); OP_REQUIRES_OK(ctx, ctx->input("hypothesis_values", &hypothesis_values)); OP_REQUIRES_OK(ctx, ctx->input("hypothesis_shape", &hypothesis_shape)); OP_REQUIRES_OK(ctx, ctx->input("truth_indices", &truth_indices)); OP_REQUIRES_OK(ctx, ctx->input("truth_values", &truth_values)); OP_REQUIRES_OK(ctx, ctx->input("truth_shape", &truth_shape)); OP_REQUIRES_OK( ctx, ValidateShapes(ctx, *hypothesis_indices, *hypothesis_values, *hypothesis_shape, *truth_indices, *truth_values, *truth_shape)); TensorShape hypothesis_st_shape; OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape( hypothesis_shape->vec<int64_t>().data(), hypothesis_shape->NumElements(), &hypothesis_st_shape)); TensorShape truth_st_shape; OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape( truth_shape->vec<int64_t>().data(), truth_shape->NumElements(), &truth_st_shape)); // Assume indices are sorted in row-major order. std::vector<int64_t> sorted_order(truth_st_shape.dims()); std::iota(sorted_order.begin(), sorted_order.end(), 0); sparse::SparseTensor hypothesis; OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create( *hypothesis_indices, *hypothesis_values, hypothesis_st_shape, sorted_order, &hypothesis)); sparse::SparseTensor truth; OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create( *truth_indices, *truth_values, truth_st_shape, sorted_order, &truth)); // Group dims 0, 1, ..., RANK - 1. The very last dim is assumed // to store the variable length sequences. std::vector<int64_t> group_dims(truth_st_shape.dims() - 1); std::iota(group_dims.begin(), group_dims.end(), 0); TensorShape output_shape; for (int d = 0; d < static_cast<int>(group_dims.size()); ++d) { output_shape.AddDim(std::max(hypothesis_st_shape.dim_size(d), truth_st_shape.dim_size(d))); } const auto output_elements = output_shape.num_elements(); OP_REQUIRES( ctx, output_elements > 0, errors::InvalidArgument("Got output shape ", output_shape.DebugString(), " which has 0 elements")); Tensor* output = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output("output", output_shape, &output)); auto output_t = output->flat<float>(); output_t.setZero(); std::vector<int64_t> output_strides(output_shape.dims()); output_strides[output_shape.dims() - 1] = 1; for (int d = output_shape.dims() - 2; d >= 0; --d) { output_strides[d] = output_strides[d + 1] * output_shape.dim_size(d + 1); } auto hypothesis_grouper = hypothesis.group(group_dims); auto truth_grouper = truth.group(group_dims); auto hypothesis_iter = hypothesis_grouper.begin(); auto truth_iter = truth_grouper.begin(); auto cmp = std::equal_to<T>(); while (hypothesis_iter != hypothesis_grouper.end() && truth_iter != truth_grouper.end()) { sparse::Group truth_i = *truth_iter; sparse::Group hypothesis_j = *hypothesis_iter; std::vector<int64_t> g_truth = truth_i.group(); std::vector<int64_t> g_hypothesis = hypothesis_j.group(); auto truth_seq = truth_i.values<T>(); auto hypothesis_seq = hypothesis_j.values<T>(); if (g_truth == g_hypothesis) { auto loc = std::inner_product(g_truth.begin(), g_truth.end(), output_strides.begin(), int64_t{0}); OP_REQUIRES( ctx, loc < output_elements, errors::Internal("Got an inner product ", loc, " which would require in writing to outside of " "the buffer for the output tensor (max elements ", output_elements, ")")); output_t(loc) = gtl::LevenshteinDistance<T>(truth_seq, hypothesis_seq, cmp); if (normalize_) output_t(loc) /= truth_seq.size(); ++hypothesis_iter; ++truth_iter; } else if (g_truth > g_hypothesis) { // zero-length truth auto loc = std::inner_product(g_hypothesis.begin(), g_hypothesis.end(), output_strides.begin(), int64_t{0}); OP_REQUIRES( ctx, loc < output_elements, errors::Internal("Got an inner product ", loc, " which would require in writing to outside of " "the buffer for the output tensor (max elements ", output_elements, ")")); output_t(loc) = hypothesis_seq.size(); if (normalize_ && output_t(loc) != 0.0f) { output_t(loc) = std::numeric_limits<float>::infinity(); } ++hypothesis_iter; } else { // zero-length hypothesis auto loc = std::inner_product(g_truth.begin(), g_truth.end(), output_strides.begin(), int64_t{0}); OP_REQUIRES( ctx, loc < output_elements, errors::Internal("Got an inner product ", loc, " which would require in writing to outside of " "the buffer for the output tensor (max elements ", output_elements, ")")); output_t(loc) = (normalize_) ? 1.0 : truth_seq.size(); ++truth_iter; } } while (hypothesis_iter != hypothesis_grouper.end()) { // zero-length truths sparse::Group hypothesis_j = *hypothesis_iter; std::vector<int64_t> g_hypothesis = hypothesis_j.group(); auto hypothesis_seq = hypothesis_j.values<T>(); auto loc = std::inner_product(g_hypothesis.begin(), g_hypothesis.end(), output_strides.begin(), int64_t{0}); OP_REQUIRES( ctx, loc < output_elements, errors::Internal("Got an inner product ", loc, " which would require in writing to outside of the " "buffer for the output tensor (max elements ", output_elements, ")")); output_t(loc) = hypothesis_seq.size(); if (normalize_ && output_t(loc) != 0.0f) { output_t(loc) = std::numeric_limits<float>::infinity(); } ++hypothesis_iter; } while (truth_iter != truth_grouper.end()) { // missing hypotheses sparse::Group truth_i = *truth_iter; std::vector<int64_t> g_truth = truth_i.group(); auto truth_seq = truth_i.values<T>(); auto loc = std::inner_product(g_truth.begin(), g_truth.end(), output_strides.begin(), int64_t{0}); OP_REQUIRES( ctx, loc < output_elements, errors::Internal("Got an inner product ", loc, " which would require in writing to outside of the " "buffer for the output tensor (max elements ", output_elements, ")")); output_t(loc) = (normalize_) ? 1.0 : truth_seq.size(); ++truth_iter; } }
void Compute(OpKernelContext* ctx) override { const Tensor* hypothesis_indices; const Tensor* hypothesis_values; const Tensor* hypothesis_shape; const Tensor* truth_indices; const Tensor* truth_values; const Tensor* truth_shape; OP_REQUIRES_OK(ctx, ctx->input("hypothesis_indices", &hypothesis_indices)); OP_REQUIRES_OK(ctx, ctx->input("hypothesis_values", &hypothesis_values)); OP_REQUIRES_OK(ctx, ctx->input("hypothesis_shape", &hypothesis_shape)); OP_REQUIRES_OK(ctx, ctx->input("truth_indices", &truth_indices)); OP_REQUIRES_OK(ctx, ctx->input("truth_values", &truth_values)); OP_REQUIRES_OK(ctx, ctx->input("truth_shape", &truth_shape)); OP_REQUIRES_OK( ctx, ValidateShapes(ctx, *hypothesis_indices, *hypothesis_values, *hypothesis_shape, *truth_indices, *truth_values, *truth_shape)); TensorShape hypothesis_st_shape; OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape( hypothesis_shape->vec<int64_t>().data(), hypothesis_shape->NumElements(), &hypothesis_st_shape)); TensorShape truth_st_shape; OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape( truth_shape->vec<int64_t>().data(), truth_shape->NumElements(), &truth_st_shape)); // Assume indices are sorted in row-major order. std::vector<int64_t> sorted_order(truth_st_shape.dims()); std::iota(sorted_order.begin(), sorted_order.end(), 0); sparse::SparseTensor hypothesis; OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create( *hypothesis_indices, *hypothesis_values, hypothesis_st_shape, sorted_order, &hypothesis)); sparse::SparseTensor truth; OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create( *truth_indices, *truth_values, truth_st_shape, sorted_order, &truth)); // Group dims 0, 1, ..., RANK - 1. The very last dim is assumed // to store the variable length sequences. std::vector<int64_t> group_dims(truth_st_shape.dims() - 1); std::iota(group_dims.begin(), group_dims.end(), 0); TensorShape output_shape; for (int d = 0; d < static_cast<int>(group_dims.size()); ++d) { output_shape.AddDim(std::max(hypothesis_st_shape.dim_size(d), truth_st_shape.dim_size(d))); } const auto output_elements = output_shape.num_elements(); OP_REQUIRES( ctx, output_elements > 0, errors::InvalidArgument("Got output shape ", output_shape.DebugString(), " which has 0 elements")); Tensor* output = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output("output", output_shape, &output)); auto output_t = output->flat<float>(); output_t.setZero(); std::vector<int64_t> output_strides(output_shape.dims()); output_strides[output_shape.dims() - 1] = 1; for (int d = output_shape.dims() - 2; d >= 0; --d) { output_strides[d] = output_strides[d + 1] * output_shape.dim_size(d + 1); } auto hypothesis_grouper = hypothesis.group(group_dims); auto truth_grouper = truth.group(group_dims); auto hypothesis_iter = hypothesis_grouper.begin(); auto truth_iter = truth_grouper.begin(); auto cmp = std::equal_to<T>(); while (hypothesis_iter != hypothesis_grouper.end() && truth_iter != truth_grouper.end()) { sparse::Group truth_i = *truth_iter; sparse::Group hypothesis_j = *hypothesis_iter; std::vector<int64_t> g_truth = truth_i.group(); std::vector<int64_t> g_hypothesis = hypothesis_j.group(); auto truth_seq = truth_i.values<T>(); auto hypothesis_seq = hypothesis_j.values<T>(); if (g_truth == g_hypothesis) { auto loc = std::inner_product(g_truth.begin(), g_truth.end(), output_strides.begin(), int64_t{0}); OP_REQUIRES( ctx, 0 <= loc && loc < output_elements, errors::Internal("Got an inner product ", loc, " which would require writing to outside of " "the buffer for the output tensor (max elements ", output_elements, ")")); output_t(loc) = gtl::LevenshteinDistance<T>(truth_seq, hypothesis_seq, cmp); if (normalize_) output_t(loc) /= truth_seq.size(); ++hypothesis_iter; ++truth_iter; } else if (g_truth > g_hypothesis) { // zero-length truth auto loc = std::inner_product(g_hypothesis.begin(), g_hypothesis.end(), output_strides.begin(), int64_t{0}); OP_REQUIRES( ctx, 0 <= loc && loc < output_elements, errors::Internal("Got an inner product ", loc, " which would require writing to outside of " "the buffer for the output tensor (max elements ", output_elements, ")")); output_t(loc) = hypothesis_seq.size(); if (normalize_ && output_t(loc) != 0.0f) { output_t(loc) = std::numeric_limits<float>::infinity(); } ++hypothesis_iter; } else { // zero-length hypothesis auto loc = std::inner_product(g_truth.begin(), g_truth.end(), output_strides.begin(), int64_t{0}); OP_REQUIRES( ctx, 0 <= loc && loc < output_elements, errors::Internal("Got an inner product ", loc, " which would require writing to outside of " "the buffer for the output tensor (max elements ", output_elements, ")")); output_t(loc) = (normalize_) ? 1.0 : truth_seq.size(); ++truth_iter; } } while (hypothesis_iter != hypothesis_grouper.end()) { // zero-length truths sparse::Group hypothesis_j = *hypothesis_iter; std::vector<int64_t> g_hypothesis = hypothesis_j.group(); auto hypothesis_seq = hypothesis_j.values<T>(); auto loc = std::inner_product(g_hypothesis.begin(), g_hypothesis.end(), output_strides.begin(), int64_t{0}); OP_REQUIRES( ctx, 0 <= loc && loc < output_elements, errors::Internal("Got an inner product ", loc, " which would require writing to outside of the " "buffer for the output tensor (max elements ", output_elements, ")")); output_t(loc) = hypothesis_seq.size(); if (normalize_ && output_t(loc) != 0.0f) { output_t(loc) = std::numeric_limits<float>::infinity(); } ++hypothesis_iter; } while (truth_iter != truth_grouper.end()) { // missing hypotheses sparse::Group truth_i = *truth_iter; std::vector<int64_t> g_truth = truth_i.group(); auto truth_seq = truth_i.values<T>(); auto loc = std::inner_product(g_truth.begin(), g_truth.end(), output_strides.begin(), int64_t{0}); OP_REQUIRES( ctx, 0 <= loc && loc < output_elements, errors::Internal("Got an inner product ", loc, " which would require writing to outside of the " "buffer for the output tensor (max elements ", output_elements, ")")); output_t(loc) = (normalize_) ? 1.0 : truth_seq.size(); ++truth_iter; } }
{'added': [(206, ' ctx, 0 <= loc && loc < output_elements,'), (208, ' " which would require writing to outside of "'), (221, ' ctx, 0 <= loc && loc < output_elements,'), (223, ' " which would require writing to outside of "'), (235, ' ctx, 0 <= loc && loc < output_elements,'), (237, ' " which would require writing to outside of "'), (251, ' ctx, 0 <= loc && loc < output_elements,'), (253, ' " which would require writing to outside of the "'), (269, ' ctx, 0 <= loc && loc < output_elements,'), (271, ' " which would require writing to outside of the "')], 'deleted': [(206, ' ctx, loc < output_elements,'), (208, ' " which would require in writing to outside of "'), (221, ' ctx, loc < output_elements,'), (223, ' " which would require in writing to outside of "'), (235, ' ctx, loc < output_elements,'), (237, ' " which would require in writing to outside of "'), (251, ' ctx, loc < output_elements,'), (253, ' " which would require in writing to outside of the "'), (269, ' ctx, loc < output_elements,'), (271, ' " which would require in writing to outside of the "')]}
10
10
238
1,864
145
1,261
16
https://github.com/tensorflow/tensorflow
CVE-2022-29208
CWE-787
2,355
print-pim.c
C
pimv1_join_prune_print
/* * Copyright (c) 1995, 1996 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: Protocol Independent Multicast (PIM) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "ip.h" #include "ip6.h" #include "ipproto.h" #define PIMV1_TYPE_QUERY 0 #define PIMV1_TYPE_REGISTER 1 #define PIMV1_TYPE_REGISTER_STOP 2 #define PIMV1_TYPE_JOIN_PRUNE 3 #define PIMV1_TYPE_RP_REACHABILITY 4 #define PIMV1_TYPE_ASSERT 5 #define PIMV1_TYPE_GRAFT 6 #define PIMV1_TYPE_GRAFT_ACK 7 static const struct tok pimv1_type_str[] = { { PIMV1_TYPE_QUERY, "Query" }, { PIMV1_TYPE_REGISTER, "Register" }, { PIMV1_TYPE_REGISTER_STOP, "Register-Stop" }, { PIMV1_TYPE_JOIN_PRUNE, "Join/Prune" }, { PIMV1_TYPE_RP_REACHABILITY, "RP-reachable" }, { PIMV1_TYPE_ASSERT, "Assert" }, { PIMV1_TYPE_GRAFT, "Graft" }, { PIMV1_TYPE_GRAFT_ACK, "Graft-ACK" }, { 0, NULL } }; #define PIMV2_TYPE_HELLO 0 #define PIMV2_TYPE_REGISTER 1 #define PIMV2_TYPE_REGISTER_STOP 2 #define PIMV2_TYPE_JOIN_PRUNE 3 #define PIMV2_TYPE_BOOTSTRAP 4 #define PIMV2_TYPE_ASSERT 5 #define PIMV2_TYPE_GRAFT 6 #define PIMV2_TYPE_GRAFT_ACK 7 #define PIMV2_TYPE_CANDIDATE_RP 8 #define PIMV2_TYPE_PRUNE_REFRESH 9 #define PIMV2_TYPE_DF_ELECTION 10 #define PIMV2_TYPE_ECMP_REDIRECT 11 static const struct tok pimv2_type_values[] = { { PIMV2_TYPE_HELLO, "Hello" }, { PIMV2_TYPE_REGISTER, "Register" }, { PIMV2_TYPE_REGISTER_STOP, "Register Stop" }, { PIMV2_TYPE_JOIN_PRUNE, "Join / Prune" }, { PIMV2_TYPE_BOOTSTRAP, "Bootstrap" }, { PIMV2_TYPE_ASSERT, "Assert" }, { PIMV2_TYPE_GRAFT, "Graft" }, { PIMV2_TYPE_GRAFT_ACK, "Graft Acknowledgement" }, { PIMV2_TYPE_CANDIDATE_RP, "Candidate RP Advertisement" }, { PIMV2_TYPE_PRUNE_REFRESH, "Prune Refresh" }, { PIMV2_TYPE_DF_ELECTION, "DF Election" }, { PIMV2_TYPE_ECMP_REDIRECT, "ECMP Redirect" }, { 0, NULL} }; #define PIMV2_HELLO_OPTION_HOLDTIME 1 #define PIMV2_HELLO_OPTION_LANPRUNEDELAY 2 #define PIMV2_HELLO_OPTION_DR_PRIORITY_OLD 18 #define PIMV2_HELLO_OPTION_DR_PRIORITY 19 #define PIMV2_HELLO_OPTION_GENID 20 #define PIMV2_HELLO_OPTION_REFRESH_CAP 21 #define PIMV2_HELLO_OPTION_BIDIR_CAP 22 #define PIMV2_HELLO_OPTION_ADDRESS_LIST 24 #define PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD 65001 static const struct tok pimv2_hello_option_values[] = { { PIMV2_HELLO_OPTION_HOLDTIME, "Hold Time" }, { PIMV2_HELLO_OPTION_LANPRUNEDELAY, "LAN Prune Delay" }, { PIMV2_HELLO_OPTION_DR_PRIORITY_OLD, "DR Priority (Old)" }, { PIMV2_HELLO_OPTION_DR_PRIORITY, "DR Priority" }, { PIMV2_HELLO_OPTION_GENID, "Generation ID" }, { PIMV2_HELLO_OPTION_REFRESH_CAP, "State Refresh Capability" }, { PIMV2_HELLO_OPTION_BIDIR_CAP, "Bi-Directional Capability" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST, "Address List" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD, "Address List (Old)" }, { 0, NULL} }; #define PIMV2_REGISTER_FLAG_LEN 4 #define PIMV2_REGISTER_FLAG_BORDER 0x80000000 #define PIMV2_REGISTER_FLAG_NULL 0x40000000 static const struct tok pimv2_register_flag_values[] = { { PIMV2_REGISTER_FLAG_BORDER, "Border" }, { PIMV2_REGISTER_FLAG_NULL, "Null" }, { 0, NULL} }; /* * XXX: We consider a case where IPv6 is not ready yet for portability, * but PIM dependent defintions should be independent of IPv6... */ struct pim { uint8_t pim_typever; /* upper 4bit: PIM version number; 2 for PIMv2 */ /* lower 4bit: the PIM message type, currently they are: * Hello, Register, Register-Stop, Join/Prune, * Bootstrap, Assert, Graft (PIM-DM only), * Graft-Ack (PIM-DM only), C-RP-Adv */ #define PIM_VER(x) (((x) & 0xf0) >> 4) #define PIM_TYPE(x) ((x) & 0x0f) u_char pim_rsv; /* Reserved */ u_short pim_cksum; /* IP style check sum */ }; static void pimv2_print(netdissect_options *, register const u_char *bp, register u_int len, const u_char *); static void pimv1_join_prune_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int ngroups, njoin, nprune; int njp; /* If it's a single group and a single source, use 1-line output. */ if (ND_TTEST2(bp[0], 30) && bp[11] == 1 && ((njoin = EXTRACT_16BITS(&bp[20])) + EXTRACT_16BITS(&bp[22])) == 1) { int hold; ND_PRINT((ndo, " RPF %s ", ipaddr_string(ndo, bp))); hold = EXTRACT_16BITS(&bp[6]); if (hold != 180) { ND_PRINT((ndo, "Hold ")); unsigned_relts_print(ndo, hold); } ND_PRINT((ndo, "%s (%s/%d, %s", njoin ? "Join" : "Prune", ipaddr_string(ndo, &bp[26]), bp[25] & 0x3f, ipaddr_string(ndo, &bp[12]))); if (EXTRACT_32BITS(&bp[16]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[16]))); ND_PRINT((ndo, ") %s%s %s", (bp[24] & 0x01) ? "Sparse" : "Dense", (bp[25] & 0x80) ? " WC" : "", (bp[25] & 0x40) ? "RP" : "SPT")); return; } ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Upstream Nbr: %s", ipaddr_string(ndo, bp))); ND_TCHECK2(bp[6], 2); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Hold time: ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[6])); if (ndo->ndo_vflag < 2) return; bp += 8; len -= 8; ND_TCHECK2(bp[0], 4); ngroups = bp[3]; bp += 4; len -= 4; while (ngroups--) { /* * XXX - does the address have length "addrlen" and the * mask length "maddrlen"? */ ND_TCHECK2(bp[0], sizeof(struct in_addr)); ND_PRINT((ndo, "\n\tGroup: %s", ipaddr_string(ndo, bp))); ND_TCHECK2(bp[4], sizeof(struct in_addr)); if (EXTRACT_32BITS(&bp[4]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[4]))); ND_TCHECK2(bp[8], 4); njoin = EXTRACT_16BITS(&bp[8]); nprune = EXTRACT_16BITS(&bp[10]); ND_PRINT((ndo, " joined: %d pruned: %d", njoin, nprune)); bp += 12; len -= 12; for (njp = 0; njp < (njoin + nprune); njp++) { const char *type; if (njp < njoin) type = "Join "; else type = "Prune"; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "\n\t%s %s%s%s%s/%d", type, (bp[0] & 0x01) ? "Sparse " : "Dense ", (bp[1] & 0x80) ? "WC " : "", (bp[1] & 0x40) ? "RP " : "SPT ", ipaddr_string(ndo, &bp[2]), bp[1] & 0x3f)); bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|pim]")); return; } void pimv1_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { register const u_char *ep; register u_char type; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; ND_TCHECK(bp[1]); type = bp[1]; ND_PRINT((ndo, " %s", tok2str(pimv1_type_str, "[type %u]", type))); switch (type) { case PIMV1_TYPE_QUERY: if (ND_TTEST(bp[8])) { switch (bp[8] >> 4) { case 0: ND_PRINT((ndo, " Dense-mode")); break; case 1: ND_PRINT((ndo, " Sparse-mode")); break; case 2: ND_PRINT((ndo, " Sparse-Dense-mode")); break; default: ND_PRINT((ndo, " mode-%d", bp[8] >> 4)); break; } } if (ndo->ndo_vflag) { ND_TCHECK2(bp[10],2); ND_PRINT((ndo, " (Hold-time ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[10])); ND_PRINT((ndo, ")")); } break; case PIMV1_TYPE_REGISTER: ND_TCHECK2(bp[8], 20); /* ip header */ ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[20]), ipaddr_string(ndo, &bp[24]))); break; case PIMV1_TYPE_REGISTER_STOP: ND_TCHECK2(bp[12], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[8]), ipaddr_string(ndo, &bp[12]))); break; case PIMV1_TYPE_RP_REACHABILITY: if (ndo->ndo_vflag) { ND_TCHECK2(bp[22], 2); ND_PRINT((ndo, " group %s", ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_PRINT((ndo, " RP %s hold ", ipaddr_string(ndo, &bp[16]))); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[22])); } break; case PIMV1_TYPE_ASSERT: ND_TCHECK2(bp[16], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[16]), ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_TCHECK2(bp[24], 4); ND_PRINT((ndo, " %s pref %d metric %d", (bp[20] & 0x80) ? "RP-tree" : "SPT", EXTRACT_32BITS(&bp[20]) & 0x7fffffff, EXTRACT_32BITS(&bp[24]))); break; case PIMV1_TYPE_JOIN_PRUNE: case PIMV1_TYPE_GRAFT: case PIMV1_TYPE_GRAFT_ACK: if (ndo->ndo_vflag) pimv1_join_prune_print(ndo, &bp[8], len - 8); break; } ND_TCHECK(bp[4]); if ((bp[4] >> 4) != 1) ND_PRINT((ndo, " [v%d]", bp[4] >> 4)); return; trunc: ND_PRINT((ndo, "[|pim]")); return; } /* * auto-RP is a cisco protocol, documented at * ftp://ftpeng.cisco.com/ipmulticast/specs/pim-autorp-spec01.txt * * This implements version 1+, dated Sept 9, 1998. */ void cisco_autorp_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int type; int numrps; int hold; ND_TCHECK(bp[0]); ND_PRINT((ndo, " auto-rp ")); type = bp[0]; switch (type) { case 0x11: ND_PRINT((ndo, "candidate-advert")); break; case 0x12: ND_PRINT((ndo, "mapping")); break; default: ND_PRINT((ndo, "type-0x%02x", type)); break; } ND_TCHECK(bp[1]); numrps = bp[1]; ND_TCHECK2(bp[2], 2); ND_PRINT((ndo, " Hold ")); hold = EXTRACT_16BITS(&bp[2]); if (hold) unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); else ND_PRINT((ndo, "FOREVER")); /* Next 4 bytes are reserved. */ bp += 8; len -= 8; /*XXX skip unless -v? */ /* * Rest of packet: * numrps entries of the form: * 32 bits: RP * 6 bits: reserved * 2 bits: PIM version supported, bit 0 is "supports v1", 1 is "v2". * 8 bits: # of entries for this RP * each entry: 7 bits: reserved, 1 bit: negative, * 8 bits: mask 32 bits: source * lather, rinse, repeat. */ while (numrps--) { int nentries; char s; ND_TCHECK2(bp[0], 4); ND_PRINT((ndo, " RP %s", ipaddr_string(ndo, bp))); ND_TCHECK(bp[4]); switch (bp[4] & 0x3) { case 0: ND_PRINT((ndo, " PIMv?")); break; case 1: ND_PRINT((ndo, " PIMv1")); break; case 2: ND_PRINT((ndo, " PIMv2")); break; case 3: ND_PRINT((ndo, " PIMv1+2")); break; } if (bp[4] & 0xfc) ND_PRINT((ndo, " [rsvd=0x%02x]", bp[4] & 0xfc)); ND_TCHECK(bp[5]); nentries = bp[5]; bp += 6; len -= 6; s = ' '; for (; nentries; nentries--) { ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "%c%s%s/%d", s, bp[0] & 1 ? "!" : "", ipaddr_string(ndo, &bp[2]), bp[1])); if (bp[0] & 0x02) { ND_PRINT((ndo, " bidir")); } if (bp[0] & 0xfc) { ND_PRINT((ndo, "[rsvd=0x%02x]", bp[0] & 0xfc)); } s = ','; bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|autorp]")); return; } void pim_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; #ifdef notyet /* currently we see only version and type */ ND_TCHECK(pim->pim_rsv); #endif switch (PIM_VER(pim->pim_typever)) { case 2: if (!ndo->ndo_vflag) { ND_PRINT((ndo, "PIMv%u, %s, length %u", PIM_VER(pim->pim_typever), tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)), len)); return; } else { ND_PRINT((ndo, "PIMv%u, length %u\n\t%s", PIM_VER(pim->pim_typever), len, tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)))); pimv2_print(ndo, bp, len, bp2); } break; default: ND_PRINT((ndo, "PIMv%u, length %u", PIM_VER(pim->pim_typever), len)); break; } return; } /* * PIMv2 uses encoded address representations. * * The last PIM-SM I-D before RFC2117 was published specified the * following representation for unicast addresses. However, RFC2117 * specified no encoding for unicast addresses with the unicast * address length specified in the header. Therefore, we have to * guess which encoding is being used (Cisco's PIMv2 implementation * uses the non-RFC encoding). RFC2117 turns a previously "Reserved" * field into a 'unicast-address-length-in-bytes' field. We guess * that it's the draft encoding if this reserved field is zero. * * RFC2362 goes back to the encoded format, and calls the addr length * field "reserved" again. * * The first byte is the address family, from: * * 0 Reserved * 1 IP (IP version 4) * 2 IP6 (IP version 6) * 3 NSAP * 4 HDLC (8-bit multidrop) * 5 BBN 1822 * 6 802 (includes all 802 media plus Ethernet "canonical format") * 7 E.163 * 8 E.164 (SMDS, Frame Relay, ATM) * 9 F.69 (Telex) * 10 X.121 (X.25, Frame Relay) * 11 IPX * 12 Appletalk * 13 Decnet IV * 14 Banyan Vines * 15 E.164 with NSAP format subaddress * * In addition, the second byte is an "Encoding". 0 is the default * encoding for the address family, and no other encodings are currently * specified. * */ static int pimv2_addr_len; enum pimv2_addrtype { pimv2_unicast, pimv2_group, pimv2_source }; /* 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Unicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+++++++ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Reserved | Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Group multicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Rsrvd |S|W|R| Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Source Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ static int pimv2_addr_print(netdissect_options *ndo, const u_char *bp, enum pimv2_addrtype at, int silent) { int af; int len, hdrlen; ND_TCHECK(bp[0]); if (pimv2_addr_len == 0) { ND_TCHECK(bp[1]); switch (bp[0]) { case 1: af = AF_INET; len = sizeof(struct in_addr); break; case 2: af = AF_INET6; len = sizeof(struct in6_addr); break; default: return -1; } if (bp[1] != 0) return -1; hdrlen = 2; } else { switch (pimv2_addr_len) { case sizeof(struct in_addr): af = AF_INET; break; case sizeof(struct in6_addr): af = AF_INET6; break; default: return -1; break; } len = pimv2_addr_len; hdrlen = 0; } bp += hdrlen; switch (at) { case pimv2_unicast: ND_TCHECK2(bp[0], len); if (af == AF_INET) { if (!silent) ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp))); } else if (af == AF_INET6) { if (!silent) ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp))); } return hdrlen + len; case pimv2_group: case pimv2_source: ND_TCHECK2(bp[0], len + 2); if (af == AF_INET) { if (!silent) { ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp + 2))); if (bp[1] != 32) ND_PRINT((ndo, "/%u", bp[1])); } } else if (af == AF_INET6) { if (!silent) { ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp + 2))); if (bp[1] != 128) ND_PRINT((ndo, "/%u", bp[1])); } } if (bp[0] && !silent) { if (at == pimv2_group) { ND_PRINT((ndo, "(0x%02x)", bp[0])); } else { ND_PRINT((ndo, "(%s%s%s", bp[0] & 0x04 ? "S" : "", bp[0] & 0x02 ? "W" : "", bp[0] & 0x01 ? "R" : "")); if (bp[0] & 0xf8) { ND_PRINT((ndo, "+0x%02x", bp[0] & 0xf8)); } ND_PRINT((ndo, ")")); } } return hdrlen + 2 + len; default: return -1; } trunc: return -1; } enum checksum_status { CORRECT, INCORRECT, UNVERIFIED }; static enum checksum_status pimv2_check_checksum(netdissect_options *ndo, const u_char *bp, const u_char *bp2, u_int len) { const struct ip *ip; u_int cksum; if (!ND_TTEST2(bp[0], len)) { /* We don't have all the data. */ return (UNVERIFIED); } ip = (const struct ip *)bp2; if (IP_V(ip) == 4) { struct cksum_vec vec[1]; vec[0].ptr = bp; vec[0].len = len; cksum = in_cksum(vec, 1); return (cksum ? INCORRECT : CORRECT); } else if (IP_V(ip) == 6) { const struct ip6_hdr *ip6; ip6 = (const struct ip6_hdr *)bp2; cksum = nextproto6_cksum(ndo, ip6, bp, len, len, IPPROTO_PIM); return (cksum ? INCORRECT : CORRECT); } else { return (UNVERIFIED); } } static void pimv2_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; int advance; enum checksum_status cksum_status; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; if (ep > bp + len) ep = bp + len; ND_TCHECK(pim->pim_rsv); pimv2_addr_len = pim->pim_rsv; if (pimv2_addr_len != 0) ND_PRINT((ndo, ", RFC2117-encoding")); ND_PRINT((ndo, ", cksum 0x%04x ", EXTRACT_16BITS(&pim->pim_cksum))); if (EXTRACT_16BITS(&pim->pim_cksum) == 0) { ND_PRINT((ndo, "(unverified)")); } else { if (PIM_TYPE(pim->pim_typever) == PIMV2_TYPE_REGISTER) { /* * The checksum only covers the packet header, * not the encapsulated packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, 8); if (cksum_status == INCORRECT) { /* * To quote RFC 4601, "For interoperability * reasons, a message carrying a checksum * calculated over the entire PIM Register * message should also be accepted." */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } } else { /* * The checksum covers the entire packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } switch (cksum_status) { case CORRECT: ND_PRINT((ndo, "(correct)")); break; case INCORRECT: ND_PRINT((ndo, "(incorrect)")); break; case UNVERIFIED: ND_PRINT((ndo, "(unverified)")); break; } } switch (PIM_TYPE(pim->pim_typever)) { case PIMV2_TYPE_HELLO: { uint16_t otype, olen; bp += 4; while (bp < ep) { ND_TCHECK2(bp[0], 4); otype = EXTRACT_16BITS(&bp[0]); olen = EXTRACT_16BITS(&bp[2]); ND_TCHECK2(bp[0], 4 + olen); ND_PRINT((ndo, "\n\t %s Option (%u), length %u, Value: ", tok2str(pimv2_hello_option_values, "Unknown", otype), otype, olen)); bp += 4; switch (otype) { case PIMV2_HELLO_OPTION_HOLDTIME: if (olen != 2) { ND_PRINT((ndo, "ERROR: Option Length != 2 Bytes (%u)", olen)); } else { unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); } break; case PIMV2_HELLO_OPTION_LANPRUNEDELAY: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { char t_bit; uint16_t lan_delay, override_interval; lan_delay = EXTRACT_16BITS(bp); override_interval = EXTRACT_16BITS(bp+2); t_bit = (lan_delay & 0x8000)? 1 : 0; lan_delay &= ~0x8000; ND_PRINT((ndo, "\n\t T-bit=%d, LAN delay %dms, Override interval %dms", t_bit, lan_delay, override_interval)); } break; case PIMV2_HELLO_OPTION_DR_PRIORITY_OLD: case PIMV2_HELLO_OPTION_DR_PRIORITY: switch (olen) { case 0: ND_PRINT((ndo, "Bi-Directional Capability (Old)")); break; case 4: ND_PRINT((ndo, "%u", EXTRACT_32BITS(bp))); break; default: ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); break; } break; case PIMV2_HELLO_OPTION_GENID: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "0x%08x", EXTRACT_32BITS(bp))); } break; case PIMV2_HELLO_OPTION_REFRESH_CAP: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "v%d", *bp)); if (*(bp+1) != 0) { ND_PRINT((ndo, ", interval ")); unsigned_relts_print(ndo, *(bp+1)); } if (EXTRACT_16BITS(bp+2) != 0) { ND_PRINT((ndo, " ?0x%04x?", EXTRACT_16BITS(bp+2))); } } break; case PIMV2_HELLO_OPTION_BIDIR_CAP: break; case PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD: case PIMV2_HELLO_OPTION_ADDRESS_LIST: if (ndo->ndo_vflag > 1) { const u_char *ptr = bp; while (ptr < (bp+olen)) { ND_PRINT((ndo, "\n\t ")); advance = pimv2_addr_print(ndo, ptr, pimv2_unicast, 0); if (advance < 0) { ND_PRINT((ndo, "...")); break; } ptr += advance; } } break; default: if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, bp, "\n\t ", olen); break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag> 1) print_unknown_data(ndo, bp, "\n\t ", olen); bp += olen; } break; } case PIMV2_TYPE_REGISTER: { const struct ip *ip; ND_TCHECK2(*(bp + 4), PIMV2_REGISTER_FLAG_LEN); ND_PRINT((ndo, ", Flags [ %s ]\n\t", tok2str(pimv2_register_flag_values, "none", EXTRACT_32BITS(bp+4)))); bp += 8; len -= 8; /* encapsulated multicast packet */ ip = (const struct ip *)bp; switch (IP_V(ip)) { case 0: /* Null header */ ND_PRINT((ndo, "IP-Null-header %s > %s", ipaddr_string(ndo, &ip->ip_src), ipaddr_string(ndo, &ip->ip_dst))); break; case 4: /* IPv4 */ ip_print(ndo, bp, len); break; case 6: /* IPv6 */ ip6_print(ndo, bp, len); break; default: ND_PRINT((ndo, "IP ver %d", IP_V(ip))); break; } break; } case PIMV2_TYPE_REGISTER_STOP: bp += 4; len -= 4; if (bp >= ep) break; ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp >= ep) break; ND_PRINT((ndo, " source=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; break; case PIMV2_TYPE_JOIN_PRUNE: case PIMV2_TYPE_GRAFT: case PIMV2_TYPE_GRAFT_ACK: /* * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |PIM Ver| Type | Addr length | Checksum | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Unicast-Upstream Neighbor Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Reserved | Num groups | Holdtime | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Number of Joined Sources | Number of Pruned Sources | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ { uint8_t ngroup; uint16_t holdtime; uint16_t njoin; uint16_t nprune; int i, j; bp += 4; len -= 4; if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ if (bp >= ep) break; ND_PRINT((ndo, ", upstream-neighbor: ")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; } if (bp + 4 > ep) break; ngroup = bp[1]; holdtime = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %u group(s)", ngroup)); if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", holdtime: ")); if (holdtime == 0xffff) ND_PRINT((ndo, "infinite")); else unsigned_relts_print(ndo, holdtime); } bp += 4; len -= 4; for (i = 0; i < ngroup; i++) { if (bp >= ep) goto jp_done; ND_PRINT((ndo, "\n\t group #%u: ", i+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; if (bp + 4 > ep) { ND_PRINT((ndo, "...)")); goto jp_done; } njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, ", joined sources: %u, pruned sources: %u", njoin, nprune)); bp += 4; len -= 4; for (j = 0; j < njoin; j++) { ND_PRINT((ndo, "\n\t joined source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; } for (j = 0; j < nprune; j++) { ND_PRINT((ndo, "\n\t pruned source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) { ND_PRINT((ndo, "...)")); goto jp_done; } bp += advance; len -= advance; } } jp_done: break; } case PIMV2_TYPE_BOOTSTRAP: { int i, j, frpcnt; bp += 4; /* Fragment Tag, Hash Mask len, and BSR-priority */ if (bp + sizeof(uint16_t) >= ep) break; ND_PRINT((ndo, " tag=%x", EXTRACT_16BITS(bp))); bp += sizeof(uint16_t); if (bp >= ep) break; ND_PRINT((ndo, " hashmlen=%d", bp[0])); if (bp + 1 >= ep) break; ND_PRINT((ndo, " BSRprio=%d", bp[1])); bp += 2; /* Encoded-Unicast-BSR-Address */ if (bp >= ep) break; ND_PRINT((ndo, " BSR=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; for (i = 0; bp < ep; i++) { /* Encoded-Group Address */ ND_PRINT((ndo, " (group%d: ", i)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...)")); goto bs_done; } bp += advance; /* RP-Count, Frag RP-Cnt, and rsvd */ if (bp >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, " RPcnt=%d", bp[0])); if (bp + 1 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, " FRPcnt=%d", frpcnt = bp[1])); bp += 4; for (j = 0; j < frpcnt && bp < ep; j++) { /* each RP info */ ND_PRINT((ndo, " RP%d=", j)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...)")); goto bs_done; } bp += advance; if (bp + 1 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, ",holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); if (bp + 2 >= ep) { ND_PRINT((ndo, "...)")); goto bs_done; } ND_PRINT((ndo, ",prio=%d", bp[2])); bp += 4; } ND_PRINT((ndo, ")")); } bs_done: break; } case PIMV2_TYPE_ASSERT: bp += 4; len -= 4; if (bp >= ep) break; ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp >= ep) break; ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; len -= advance; if (bp + 8 > ep) break; if (bp[0] & 0x80) ND_PRINT((ndo, " RPT")); ND_PRINT((ndo, " pref=%u", EXTRACT_32BITS(&bp[0]) & 0x7fffffff)); ND_PRINT((ndo, " metric=%u", EXTRACT_32BITS(&bp[4]))); break; case PIMV2_TYPE_CANDIDATE_RP: { int i, pfxcnt; bp += 4; /* Prefix-Cnt, Priority, and Holdtime */ if (bp >= ep) break; ND_PRINT((ndo, " prefix-cnt=%d", bp[0])); pfxcnt = bp[0]; if (bp + 1 >= ep) break; ND_PRINT((ndo, " prio=%d", bp[1])); if (bp + 3 >= ep) break; ND_PRINT((ndo, " holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); bp += 4; /* Encoded-Unicast-RP-Address */ if (bp >= ep) break; ND_PRINT((ndo, " RP=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; /* Encoded-Group Addresses */ for (i = 0; i < pfxcnt && bp < ep; i++) { ND_PRINT((ndo, " Group%d=", i)); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; } break; } case PIMV2_TYPE_PRUNE_REFRESH: ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_PRINT((ndo, " grp=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_PRINT((ndo, " forwarder=")); if ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) { ND_PRINT((ndo, "...")); break; } bp += advance; ND_TCHECK2(bp[0], 2); ND_PRINT((ndo, " TUNR ")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); break; default: ND_PRINT((ndo, " [type %d]", PIM_TYPE(pim->pim_typever))); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
/* * Copyright (c) 1995, 1996 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: Protocol Independent Multicast (PIM) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "ip.h" #include "ip6.h" #include "ipproto.h" #define PIMV1_TYPE_QUERY 0 #define PIMV1_TYPE_REGISTER 1 #define PIMV1_TYPE_REGISTER_STOP 2 #define PIMV1_TYPE_JOIN_PRUNE 3 #define PIMV1_TYPE_RP_REACHABILITY 4 #define PIMV1_TYPE_ASSERT 5 #define PIMV1_TYPE_GRAFT 6 #define PIMV1_TYPE_GRAFT_ACK 7 static const struct tok pimv1_type_str[] = { { PIMV1_TYPE_QUERY, "Query" }, { PIMV1_TYPE_REGISTER, "Register" }, { PIMV1_TYPE_REGISTER_STOP, "Register-Stop" }, { PIMV1_TYPE_JOIN_PRUNE, "Join/Prune" }, { PIMV1_TYPE_RP_REACHABILITY, "RP-reachable" }, { PIMV1_TYPE_ASSERT, "Assert" }, { PIMV1_TYPE_GRAFT, "Graft" }, { PIMV1_TYPE_GRAFT_ACK, "Graft-ACK" }, { 0, NULL } }; #define PIMV2_TYPE_HELLO 0 #define PIMV2_TYPE_REGISTER 1 #define PIMV2_TYPE_REGISTER_STOP 2 #define PIMV2_TYPE_JOIN_PRUNE 3 #define PIMV2_TYPE_BOOTSTRAP 4 #define PIMV2_TYPE_ASSERT 5 #define PIMV2_TYPE_GRAFT 6 #define PIMV2_TYPE_GRAFT_ACK 7 #define PIMV2_TYPE_CANDIDATE_RP 8 #define PIMV2_TYPE_PRUNE_REFRESH 9 #define PIMV2_TYPE_DF_ELECTION 10 #define PIMV2_TYPE_ECMP_REDIRECT 11 static const struct tok pimv2_type_values[] = { { PIMV2_TYPE_HELLO, "Hello" }, { PIMV2_TYPE_REGISTER, "Register" }, { PIMV2_TYPE_REGISTER_STOP, "Register Stop" }, { PIMV2_TYPE_JOIN_PRUNE, "Join / Prune" }, { PIMV2_TYPE_BOOTSTRAP, "Bootstrap" }, { PIMV2_TYPE_ASSERT, "Assert" }, { PIMV2_TYPE_GRAFT, "Graft" }, { PIMV2_TYPE_GRAFT_ACK, "Graft Acknowledgement" }, { PIMV2_TYPE_CANDIDATE_RP, "Candidate RP Advertisement" }, { PIMV2_TYPE_PRUNE_REFRESH, "Prune Refresh" }, { PIMV2_TYPE_DF_ELECTION, "DF Election" }, { PIMV2_TYPE_ECMP_REDIRECT, "ECMP Redirect" }, { 0, NULL} }; #define PIMV2_HELLO_OPTION_HOLDTIME 1 #define PIMV2_HELLO_OPTION_LANPRUNEDELAY 2 #define PIMV2_HELLO_OPTION_DR_PRIORITY_OLD 18 #define PIMV2_HELLO_OPTION_DR_PRIORITY 19 #define PIMV2_HELLO_OPTION_GENID 20 #define PIMV2_HELLO_OPTION_REFRESH_CAP 21 #define PIMV2_HELLO_OPTION_BIDIR_CAP 22 #define PIMV2_HELLO_OPTION_ADDRESS_LIST 24 #define PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD 65001 static const struct tok pimv2_hello_option_values[] = { { PIMV2_HELLO_OPTION_HOLDTIME, "Hold Time" }, { PIMV2_HELLO_OPTION_LANPRUNEDELAY, "LAN Prune Delay" }, { PIMV2_HELLO_OPTION_DR_PRIORITY_OLD, "DR Priority (Old)" }, { PIMV2_HELLO_OPTION_DR_PRIORITY, "DR Priority" }, { PIMV2_HELLO_OPTION_GENID, "Generation ID" }, { PIMV2_HELLO_OPTION_REFRESH_CAP, "State Refresh Capability" }, { PIMV2_HELLO_OPTION_BIDIR_CAP, "Bi-Directional Capability" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST, "Address List" }, { PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD, "Address List (Old)" }, { 0, NULL} }; #define PIMV2_REGISTER_FLAG_LEN 4 #define PIMV2_REGISTER_FLAG_BORDER 0x80000000 #define PIMV2_REGISTER_FLAG_NULL 0x40000000 static const struct tok pimv2_register_flag_values[] = { { PIMV2_REGISTER_FLAG_BORDER, "Border" }, { PIMV2_REGISTER_FLAG_NULL, "Null" }, { 0, NULL} }; /* * XXX: We consider a case where IPv6 is not ready yet for portability, * but PIM dependent defintions should be independent of IPv6... */ struct pim { uint8_t pim_typever; /* upper 4bit: PIM version number; 2 for PIMv2 */ /* lower 4bit: the PIM message type, currently they are: * Hello, Register, Register-Stop, Join/Prune, * Bootstrap, Assert, Graft (PIM-DM only), * Graft-Ack (PIM-DM only), C-RP-Adv */ #define PIM_VER(x) (((x) & 0xf0) >> 4) #define PIM_TYPE(x) ((x) & 0x0f) u_char pim_rsv; /* Reserved */ u_short pim_cksum; /* IP style check sum */ }; static void pimv2_print(netdissect_options *, register const u_char *bp, register u_int len, const u_char *); static void pimv1_join_prune_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int ngroups, njoin, nprune; int njp; /* If it's a single group and a single source, use 1-line output. */ if (ND_TTEST2(bp[0], 30) && bp[11] == 1 && ((njoin = EXTRACT_16BITS(&bp[20])) + EXTRACT_16BITS(&bp[22])) == 1) { int hold; ND_PRINT((ndo, " RPF %s ", ipaddr_string(ndo, bp))); hold = EXTRACT_16BITS(&bp[6]); if (hold != 180) { ND_PRINT((ndo, "Hold ")); unsigned_relts_print(ndo, hold); } ND_PRINT((ndo, "%s (%s/%d, %s", njoin ? "Join" : "Prune", ipaddr_string(ndo, &bp[26]), bp[25] & 0x3f, ipaddr_string(ndo, &bp[12]))); if (EXTRACT_32BITS(&bp[16]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[16]))); ND_PRINT((ndo, ") %s%s %s", (bp[24] & 0x01) ? "Sparse" : "Dense", (bp[25] & 0x80) ? " WC" : "", (bp[25] & 0x40) ? "RP" : "SPT")); return; } if (len < sizeof(struct in_addr)) goto trunc; ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Upstream Nbr: %s", ipaddr_string(ndo, bp))); bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[2], 2); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Hold time: ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); if (ndo->ndo_vflag < 2) return; bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); ngroups = bp[3]; bp += 4; len -= 4; while (ngroups--) { /* * XXX - does the address have length "addrlen" and the * mask length "maddrlen"? */ if (len < 4) goto trunc; ND_TCHECK2(bp[0], sizeof(struct in_addr)); ND_PRINT((ndo, "\n\tGroup: %s", ipaddr_string(ndo, bp))); bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (EXTRACT_32BITS(&bp[0]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[0]))); bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, " joined: %d pruned: %d", njoin, nprune)); bp += 4; len -= 4; for (njp = 0; njp < (njoin + nprune); njp++) { const char *type; if (njp < njoin) type = "Join "; else type = "Prune"; if (len < 6) goto trunc; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "\n\t%s %s%s%s%s/%d", type, (bp[0] & 0x01) ? "Sparse " : "Dense ", (bp[1] & 0x80) ? "WC " : "", (bp[1] & 0x40) ? "RP " : "SPT ", ipaddr_string(ndo, &bp[2]), bp[1] & 0x3f)); bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|pim]")); return; } void pimv1_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { register u_char type; ND_TCHECK(bp[1]); type = bp[1]; ND_PRINT((ndo, " %s", tok2str(pimv1_type_str, "[type %u]", type))); switch (type) { case PIMV1_TYPE_QUERY: if (ND_TTEST(bp[8])) { switch (bp[8] >> 4) { case 0: ND_PRINT((ndo, " Dense-mode")); break; case 1: ND_PRINT((ndo, " Sparse-mode")); break; case 2: ND_PRINT((ndo, " Sparse-Dense-mode")); break; default: ND_PRINT((ndo, " mode-%d", bp[8] >> 4)); break; } } if (ndo->ndo_vflag) { ND_TCHECK2(bp[10],2); ND_PRINT((ndo, " (Hold-time ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[10])); ND_PRINT((ndo, ")")); } break; case PIMV1_TYPE_REGISTER: ND_TCHECK2(bp[8], 20); /* ip header */ ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[20]), ipaddr_string(ndo, &bp[24]))); break; case PIMV1_TYPE_REGISTER_STOP: ND_TCHECK2(bp[12], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[8]), ipaddr_string(ndo, &bp[12]))); break; case PIMV1_TYPE_RP_REACHABILITY: if (ndo->ndo_vflag) { ND_TCHECK2(bp[22], 2); ND_PRINT((ndo, " group %s", ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_PRINT((ndo, " RP %s hold ", ipaddr_string(ndo, &bp[16]))); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[22])); } break; case PIMV1_TYPE_ASSERT: ND_TCHECK2(bp[16], sizeof(struct in_addr)); ND_PRINT((ndo, " for %s > %s", ipaddr_string(ndo, &bp[16]), ipaddr_string(ndo, &bp[8]))); if (EXTRACT_32BITS(&bp[12]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[12]))); ND_TCHECK2(bp[24], 4); ND_PRINT((ndo, " %s pref %d metric %d", (bp[20] & 0x80) ? "RP-tree" : "SPT", EXTRACT_32BITS(&bp[20]) & 0x7fffffff, EXTRACT_32BITS(&bp[24]))); break; case PIMV1_TYPE_JOIN_PRUNE: case PIMV1_TYPE_GRAFT: case PIMV1_TYPE_GRAFT_ACK: if (ndo->ndo_vflag) { if (len < 8) goto trunc; pimv1_join_prune_print(ndo, &bp[8], len - 8); } break; } ND_TCHECK(bp[4]); if ((bp[4] >> 4) != 1) ND_PRINT((ndo, " [v%d]", bp[4] >> 4)); return; trunc: ND_PRINT((ndo, "[|pim]")); return; } /* * auto-RP is a cisco protocol, documented at * ftp://ftpeng.cisco.com/ipmulticast/specs/pim-autorp-spec01.txt * * This implements version 1+, dated Sept 9, 1998. */ void cisco_autorp_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int type; int numrps; int hold; if (len < 8) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " auto-rp ")); type = bp[0]; switch (type) { case 0x11: ND_PRINT((ndo, "candidate-advert")); break; case 0x12: ND_PRINT((ndo, "mapping")); break; default: ND_PRINT((ndo, "type-0x%02x", type)); break; } ND_TCHECK(bp[1]); numrps = bp[1]; ND_TCHECK2(bp[2], 2); ND_PRINT((ndo, " Hold ")); hold = EXTRACT_16BITS(&bp[2]); if (hold) unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); else ND_PRINT((ndo, "FOREVER")); /* Next 4 bytes are reserved. */ bp += 8; len -= 8; /*XXX skip unless -v? */ /* * Rest of packet: * numrps entries of the form: * 32 bits: RP * 6 bits: reserved * 2 bits: PIM version supported, bit 0 is "supports v1", 1 is "v2". * 8 bits: # of entries for this RP * each entry: 7 bits: reserved, 1 bit: negative, * 8 bits: mask 32 bits: source * lather, rinse, repeat. */ while (numrps--) { int nentries; char s; if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); ND_PRINT((ndo, " RP %s", ipaddr_string(ndo, bp))); bp += 4; len -= 4; if (len < 1) goto trunc; ND_TCHECK(bp[0]); switch (bp[0] & 0x3) { case 0: ND_PRINT((ndo, " PIMv?")); break; case 1: ND_PRINT((ndo, " PIMv1")); break; case 2: ND_PRINT((ndo, " PIMv2")); break; case 3: ND_PRINT((ndo, " PIMv1+2")); break; } if (bp[0] & 0xfc) ND_PRINT((ndo, " [rsvd=0x%02x]", bp[0] & 0xfc)); bp += 1; len -= 1; if (len < 1) goto trunc; ND_TCHECK(bp[0]); nentries = bp[0]; bp += 1; len -= 1; s = ' '; for (; nentries; nentries--) { if (len < 6) goto trunc; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "%c%s%s/%d", s, bp[0] & 1 ? "!" : "", ipaddr_string(ndo, &bp[2]), bp[1])); if (bp[0] & 0x02) { ND_PRINT((ndo, " bidir")); } if (bp[0] & 0xfc) { ND_PRINT((ndo, "[rsvd=0x%02x]", bp[0] & 0xfc)); } s = ','; bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|autorp]")); return; } void pim_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const struct pim *pim = (const struct pim *)bp; #ifdef notyet /* currently we see only version and type */ ND_TCHECK(pim->pim_rsv); #endif ND_TCHECK(pim->pim_typever); switch (PIM_VER(pim->pim_typever)) { case 2: if (!ndo->ndo_vflag) { ND_PRINT((ndo, "PIMv%u, %s, length %u", PIM_VER(pim->pim_typever), tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)), len)); return; } else { ND_PRINT((ndo, "PIMv%u, length %u\n\t%s", PIM_VER(pim->pim_typever), len, tok2str(pimv2_type_values,"Unknown Type",PIM_TYPE(pim->pim_typever)))); pimv2_print(ndo, bp, len, bp2); } break; default: ND_PRINT((ndo, "PIMv%u, length %u", PIM_VER(pim->pim_typever), len)); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); return; } /* * PIMv2 uses encoded address representations. * * The last PIM-SM I-D before RFC2117 was published specified the * following representation for unicast addresses. However, RFC2117 * specified no encoding for unicast addresses with the unicast * address length specified in the header. Therefore, we have to * guess which encoding is being used (Cisco's PIMv2 implementation * uses the non-RFC encoding). RFC2117 turns a previously "Reserved" * field into a 'unicast-address-length-in-bytes' field. We guess * that it's the draft encoding if this reserved field is zero. * * RFC2362 goes back to the encoded format, and calls the addr length * field "reserved" again. * * The first byte is the address family, from: * * 0 Reserved * 1 IP (IP version 4) * 2 IP6 (IP version 6) * 3 NSAP * 4 HDLC (8-bit multidrop) * 5 BBN 1822 * 6 802 (includes all 802 media plus Ethernet "canonical format") * 7 E.163 * 8 E.164 (SMDS, Frame Relay, ATM) * 9 F.69 (Telex) * 10 X.121 (X.25, Frame Relay) * 11 IPX * 12 Appletalk * 13 Decnet IV * 14 Banyan Vines * 15 E.164 with NSAP format subaddress * * In addition, the second byte is an "Encoding". 0 is the default * encoding for the address family, and no other encodings are currently * specified. * */ enum pimv2_addrtype { pimv2_unicast, pimv2_group, pimv2_source }; /* 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Unicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+++++++ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Reserved | Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Group multicast Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Addr Family | Encoding Type | Rsrvd |S|W|R| Mask Len | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Source Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ static int pimv2_addr_print(netdissect_options *ndo, const u_char *bp, u_int len, enum pimv2_addrtype at, u_int addr_len, int silent) { int af; int hdrlen; if (addr_len == 0) { if (len < 2) goto trunc; ND_TCHECK(bp[1]); switch (bp[0]) { case 1: af = AF_INET; addr_len = (u_int)sizeof(struct in_addr); break; case 2: af = AF_INET6; addr_len = (u_int)sizeof(struct in6_addr); break; default: return -1; } if (bp[1] != 0) return -1; hdrlen = 2; } else { switch (addr_len) { case sizeof(struct in_addr): af = AF_INET; break; case sizeof(struct in6_addr): af = AF_INET6; break; default: return -1; break; } hdrlen = 0; } bp += hdrlen; len -= hdrlen; switch (at) { case pimv2_unicast: if (len < addr_len) goto trunc; ND_TCHECK2(bp[0], addr_len); if (af == AF_INET) { if (!silent) ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp))); } else if (af == AF_INET6) { if (!silent) ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp))); } return hdrlen + addr_len; case pimv2_group: case pimv2_source: if (len < addr_len + 2) goto trunc; ND_TCHECK2(bp[0], addr_len + 2); if (af == AF_INET) { if (!silent) { ND_PRINT((ndo, "%s", ipaddr_string(ndo, bp + 2))); if (bp[1] != 32) ND_PRINT((ndo, "/%u", bp[1])); } } else if (af == AF_INET6) { if (!silent) { ND_PRINT((ndo, "%s", ip6addr_string(ndo, bp + 2))); if (bp[1] != 128) ND_PRINT((ndo, "/%u", bp[1])); } } if (bp[0] && !silent) { if (at == pimv2_group) { ND_PRINT((ndo, "(0x%02x)", bp[0])); } else { ND_PRINT((ndo, "(%s%s%s", bp[0] & 0x04 ? "S" : "", bp[0] & 0x02 ? "W" : "", bp[0] & 0x01 ? "R" : "")); if (bp[0] & 0xf8) { ND_PRINT((ndo, "+0x%02x", bp[0] & 0xf8)); } ND_PRINT((ndo, ")")); } } return hdrlen + 2 + addr_len; default: return -1; } trunc: return -1; } enum checksum_status { CORRECT, INCORRECT, UNVERIFIED }; static enum checksum_status pimv2_check_checksum(netdissect_options *ndo, const u_char *bp, const u_char *bp2, u_int len) { const struct ip *ip; u_int cksum; if (!ND_TTEST2(bp[0], len)) { /* We don't have all the data. */ return (UNVERIFIED); } ip = (const struct ip *)bp2; if (IP_V(ip) == 4) { struct cksum_vec vec[1]; vec[0].ptr = bp; vec[0].len = len; cksum = in_cksum(vec, 1); return (cksum ? INCORRECT : CORRECT); } else if (IP_V(ip) == 6) { const struct ip6_hdr *ip6; ip6 = (const struct ip6_hdr *)bp2; cksum = nextproto6_cksum(ndo, ip6, bp, len, len, IPPROTO_PIM); return (cksum ? INCORRECT : CORRECT); } else { return (UNVERIFIED); } } static void pimv2_print(netdissect_options *ndo, register const u_char *bp, register u_int len, const u_char *bp2) { register const u_char *ep; register const struct pim *pim = (const struct pim *)bp; int advance; enum checksum_status cksum_status; int pimv2_addr_len; ep = (const u_char *)ndo->ndo_snapend; if (bp >= ep) return; if (ep > bp + len) ep = bp + len; if (len < 2) goto trunc; ND_TCHECK(pim->pim_rsv); pimv2_addr_len = pim->pim_rsv; if (pimv2_addr_len != 0) ND_PRINT((ndo, ", RFC2117-encoding")); if (len < 4) goto trunc; ND_TCHECK(pim->pim_cksum); ND_PRINT((ndo, ", cksum 0x%04x ", EXTRACT_16BITS(&pim->pim_cksum))); if (EXTRACT_16BITS(&pim->pim_cksum) == 0) { ND_PRINT((ndo, "(unverified)")); } else { if (PIM_TYPE(pim->pim_typever) == PIMV2_TYPE_REGISTER) { /* * The checksum only covers the packet header, * not the encapsulated packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, 8); if (cksum_status == INCORRECT) { /* * To quote RFC 4601, "For interoperability * reasons, a message carrying a checksum * calculated over the entire PIM Register * message should also be accepted." */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } } else { /* * The checksum covers the entire packet. */ cksum_status = pimv2_check_checksum(ndo, bp, bp2, len); } switch (cksum_status) { case CORRECT: ND_PRINT((ndo, "(correct)")); break; case INCORRECT: ND_PRINT((ndo, "(incorrect)")); break; case UNVERIFIED: ND_PRINT((ndo, "(unverified)")); break; } } bp += 4; len -= 4; switch (PIM_TYPE(pim->pim_typever)) { case PIMV2_TYPE_HELLO: { uint16_t otype, olen; while (len > 0) { if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); otype = EXTRACT_16BITS(&bp[0]); olen = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %s Option (%u), length %u, Value: ", tok2str(pimv2_hello_option_values, "Unknown", otype), otype, olen)); bp += 4; len -= 4; if (len < olen) goto trunc; ND_TCHECK2(bp[0], olen); switch (otype) { case PIMV2_HELLO_OPTION_HOLDTIME: if (olen != 2) { ND_PRINT((ndo, "ERROR: Option Length != 2 Bytes (%u)", olen)); } else { unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); } break; case PIMV2_HELLO_OPTION_LANPRUNEDELAY: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { char t_bit; uint16_t lan_delay, override_interval; lan_delay = EXTRACT_16BITS(bp); override_interval = EXTRACT_16BITS(bp+2); t_bit = (lan_delay & 0x8000)? 1 : 0; lan_delay &= ~0x8000; ND_PRINT((ndo, "\n\t T-bit=%d, LAN delay %dms, Override interval %dms", t_bit, lan_delay, override_interval)); } break; case PIMV2_HELLO_OPTION_DR_PRIORITY_OLD: case PIMV2_HELLO_OPTION_DR_PRIORITY: switch (olen) { case 0: ND_PRINT((ndo, "Bi-Directional Capability (Old)")); break; case 4: ND_PRINT((ndo, "%u", EXTRACT_32BITS(bp))); break; default: ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); break; } break; case PIMV2_HELLO_OPTION_GENID: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "0x%08x", EXTRACT_32BITS(bp))); } break; case PIMV2_HELLO_OPTION_REFRESH_CAP: if (olen != 4) { ND_PRINT((ndo, "ERROR: Option Length != 4 Bytes (%u)", olen)); } else { ND_PRINT((ndo, "v%d", *bp)); if (*(bp+1) != 0) { ND_PRINT((ndo, ", interval ")); unsigned_relts_print(ndo, *(bp+1)); } if (EXTRACT_16BITS(bp+2) != 0) { ND_PRINT((ndo, " ?0x%04x?", EXTRACT_16BITS(bp+2))); } } break; case PIMV2_HELLO_OPTION_BIDIR_CAP: break; case PIMV2_HELLO_OPTION_ADDRESS_LIST_OLD: case PIMV2_HELLO_OPTION_ADDRESS_LIST: if (ndo->ndo_vflag > 1) { const u_char *ptr = bp; u_int plen = len; while (ptr < (bp+olen)) { ND_PRINT((ndo, "\n\t ")); advance = pimv2_addr_print(ndo, ptr, plen, pimv2_unicast, pimv2_addr_len, 0); if (advance < 0) goto trunc; ptr += advance; plen -= advance; } } break; default: if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, bp, "\n\t ", olen); break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag> 1) print_unknown_data(ndo, bp, "\n\t ", olen); bp += olen; len -= olen; } break; } case PIMV2_TYPE_REGISTER: { const struct ip *ip; if (len < 4) goto trunc; ND_TCHECK2(*bp, PIMV2_REGISTER_FLAG_LEN); ND_PRINT((ndo, ", Flags [ %s ]\n\t", tok2str(pimv2_register_flag_values, "none", EXTRACT_32BITS(bp)))); bp += 4; len -= 4; /* encapsulated multicast packet */ if (len == 0) goto trunc; ip = (const struct ip *)bp; ND_TCHECK(ip->ip_vhl); switch (IP_V(ip)) { case 0: /* Null header */ ND_TCHECK(ip->ip_dst); ND_PRINT((ndo, "IP-Null-header %s > %s", ipaddr_string(ndo, &ip->ip_src), ipaddr_string(ndo, &ip->ip_dst))); break; case 4: /* IPv4 */ ip_print(ndo, bp, len); break; case 6: /* IPv6 */ ip6_print(ndo, bp, len); break; default: ND_PRINT((ndo, "IP ver %d", IP_V(ip))); break; } break; } case PIMV2_TYPE_REGISTER_STOP: ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " source=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; break; case PIMV2_TYPE_JOIN_PRUNE: case PIMV2_TYPE_GRAFT: case PIMV2_TYPE_GRAFT_ACK: /* * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |PIM Ver| Type | Addr length | Checksum | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Unicast-Upstream Neighbor Address | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Reserved | Num groups | Holdtime | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Number of Joined Sources | Number of Pruned Sources | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Joined Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Pruned Source Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | . | * | . | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Encoded-Multicast Group Address-n | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ { uint8_t ngroup; uint16_t holdtime; uint16_t njoin; uint16_t nprune; int i, j; if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", upstream-neighbor: ")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } if (len < 4) goto trunc; ND_TCHECK2(*bp, 4); ngroup = bp[1]; holdtime = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, "\n\t %u group(s)", ngroup)); if (PIM_TYPE(pim->pim_typever) != 7) { /*not for Graft-ACK*/ ND_PRINT((ndo, ", holdtime: ")); if (holdtime == 0xffff) ND_PRINT((ndo, "infinite")); else unsigned_relts_print(ndo, holdtime); } bp += 4; len -= 4; for (i = 0; i < ngroup; i++) { ND_PRINT((ndo, "\n\t group #%u: ", i+1)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 4) goto trunc; ND_TCHECK2(*bp, 4); njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, ", joined sources: %u, pruned sources: %u", njoin, nprune)); bp += 4; len -= 4; for (j = 0; j < njoin; j++) { ND_PRINT((ndo, "\n\t joined source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } for (j = 0; j < nprune; j++) { ND_PRINT((ndo, "\n\t pruned source #%u: ", j+1)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } } break; } case PIMV2_TYPE_BOOTSTRAP: { int i, j, frpcnt; /* Fragment Tag, Hash Mask len, and BSR-priority */ if (len < 2) goto trunc; ND_TCHECK_16BITS(bp); ND_PRINT((ndo, " tag=%x", EXTRACT_16BITS(bp))); bp += 2; len -= 2; if (len < 1) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " hashmlen=%d", bp[0])); if (len < 2) goto trunc; ND_TCHECK(bp[2]); ND_PRINT((ndo, " BSRprio=%d", bp[1])); bp += 2; len -= 2; /* Encoded-Unicast-BSR-Address */ ND_PRINT((ndo, " BSR=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; for (i = 0; bp < ep; i++) { /* Encoded-Group Address */ ND_PRINT((ndo, " (group%d: ", i)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; /* RP-Count, Frag RP-Cnt, and rsvd */ if (len < 1) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " RPcnt=%d", bp[0])); if (len < 2) goto trunc; ND_TCHECK(bp[1]); ND_PRINT((ndo, " FRPcnt=%d", frpcnt = bp[1])); if (len < 4) goto trunc; bp += 4; len -= 4; for (j = 0; j < frpcnt && bp < ep; j++) { /* each RP info */ ND_PRINT((ndo, " RP%d=", j)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 2) goto trunc; ND_TCHECK_16BITS(bp); ND_PRINT((ndo, ",holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); if (len < 3) goto trunc; ND_TCHECK(bp[2]); ND_PRINT((ndo, ",prio=%d", bp[2])); if (len < 4) goto trunc; bp += 4; len -= 4; } ND_PRINT((ndo, ")")); } break; } case PIMV2_TYPE_ASSERT: ND_PRINT((ndo, " group=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 8) goto trunc; ND_TCHECK2(*bp, 8); if (bp[0] & 0x80) ND_PRINT((ndo, " RPT")); ND_PRINT((ndo, " pref=%u", EXTRACT_32BITS(&bp[0]) & 0x7fffffff)); ND_PRINT((ndo, " metric=%u", EXTRACT_32BITS(&bp[4]))); break; case PIMV2_TYPE_CANDIDATE_RP: { int i, pfxcnt; /* Prefix-Cnt, Priority, and Holdtime */ if (len < 1) goto trunc; ND_TCHECK(bp[0]); ND_PRINT((ndo, " prefix-cnt=%d", bp[0])); pfxcnt = bp[0]; if (len < 2) goto trunc; ND_TCHECK(bp[1]); ND_PRINT((ndo, " prio=%d", bp[1])); if (len < 4) goto trunc; ND_TCHECK_16BITS(&bp[2]); ND_PRINT((ndo, " holdtime=")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); bp += 4; len -= 4; /* Encoded-Unicast-RP-Address */ ND_PRINT((ndo, " RP=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; /* Encoded-Group Addresses */ for (i = 0; i < pfxcnt && bp < ep; i++) { ND_PRINT((ndo, " Group%d=", i)); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; } break; } case PIMV2_TYPE_PRUNE_REFRESH: ND_PRINT((ndo, " src=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " grp=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; ND_PRINT((ndo, " forwarder=")); if ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0) goto trunc; bp += advance; len -= advance; if (len < 2) goto trunc; ND_TCHECK_16BITS(bp); ND_PRINT((ndo, " TUNR ")); unsigned_relts_print(ndo, EXTRACT_16BITS(bp)); break; default: ND_PRINT((ndo, " [type %d]", PIM_TYPE(pim->pim_typever))); break; } return; trunc: ND_PRINT((ndo, "[|pim]")); } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
pimv1_join_prune_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int ngroups, njoin, nprune; int njp; /* If it's a single group and a single source, use 1-line output. */ if (ND_TTEST2(bp[0], 30) && bp[11] == 1 && ((njoin = EXTRACT_16BITS(&bp[20])) + EXTRACT_16BITS(&bp[22])) == 1) { int hold; ND_PRINT((ndo, " RPF %s ", ipaddr_string(ndo, bp))); hold = EXTRACT_16BITS(&bp[6]); if (hold != 180) { ND_PRINT((ndo, "Hold ")); unsigned_relts_print(ndo, hold); } ND_PRINT((ndo, "%s (%s/%d, %s", njoin ? "Join" : "Prune", ipaddr_string(ndo, &bp[26]), bp[25] & 0x3f, ipaddr_string(ndo, &bp[12]))); if (EXTRACT_32BITS(&bp[16]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[16]))); ND_PRINT((ndo, ") %s%s %s", (bp[24] & 0x01) ? "Sparse" : "Dense", (bp[25] & 0x80) ? " WC" : "", (bp[25] & 0x40) ? "RP" : "SPT")); return; } ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Upstream Nbr: %s", ipaddr_string(ndo, bp))); ND_TCHECK2(bp[6], 2); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Hold time: ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[6])); if (ndo->ndo_vflag < 2) return; bp += 8; len -= 8; ND_TCHECK2(bp[0], 4); ngroups = bp[3]; bp += 4; len -= 4; while (ngroups--) { /* * XXX - does the address have length "addrlen" and the * mask length "maddrlen"? */ ND_TCHECK2(bp[0], sizeof(struct in_addr)); ND_PRINT((ndo, "\n\tGroup: %s", ipaddr_string(ndo, bp))); ND_TCHECK2(bp[4], sizeof(struct in_addr)); if (EXTRACT_32BITS(&bp[4]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[4]))); ND_TCHECK2(bp[8], 4); njoin = EXTRACT_16BITS(&bp[8]); nprune = EXTRACT_16BITS(&bp[10]); ND_PRINT((ndo, " joined: %d pruned: %d", njoin, nprune)); bp += 12; len -= 12; for (njp = 0; njp < (njoin + nprune); njp++) { const char *type; if (njp < njoin) type = "Join "; else type = "Prune"; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "\n\t%s %s%s%s%s/%d", type, (bp[0] & 0x01) ? "Sparse " : "Dense ", (bp[1] & 0x80) ? "WC " : "", (bp[1] & 0x40) ? "RP " : "SPT ", ipaddr_string(ndo, &bp[2]), bp[1] & 0x3f)); bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|pim]")); return; }
pimv1_join_prune_print(netdissect_options *ndo, register const u_char *bp, register u_int len) { int ngroups, njoin, nprune; int njp; /* If it's a single group and a single source, use 1-line output. */ if (ND_TTEST2(bp[0], 30) && bp[11] == 1 && ((njoin = EXTRACT_16BITS(&bp[20])) + EXTRACT_16BITS(&bp[22])) == 1) { int hold; ND_PRINT((ndo, " RPF %s ", ipaddr_string(ndo, bp))); hold = EXTRACT_16BITS(&bp[6]); if (hold != 180) { ND_PRINT((ndo, "Hold ")); unsigned_relts_print(ndo, hold); } ND_PRINT((ndo, "%s (%s/%d, %s", njoin ? "Join" : "Prune", ipaddr_string(ndo, &bp[26]), bp[25] & 0x3f, ipaddr_string(ndo, &bp[12]))); if (EXTRACT_32BITS(&bp[16]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[16]))); ND_PRINT((ndo, ") %s%s %s", (bp[24] & 0x01) ? "Sparse" : "Dense", (bp[25] & 0x80) ? " WC" : "", (bp[25] & 0x40) ? "RP" : "SPT")); return; } if (len < sizeof(struct in_addr)) goto trunc; ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Upstream Nbr: %s", ipaddr_string(ndo, bp))); bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[2], 2); if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n")); ND_PRINT((ndo, " Hold time: ")); unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2])); if (ndo->ndo_vflag < 2) return; bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); ngroups = bp[3]; bp += 4; len -= 4; while (ngroups--) { /* * XXX - does the address have length "addrlen" and the * mask length "maddrlen"? */ if (len < 4) goto trunc; ND_TCHECK2(bp[0], sizeof(struct in_addr)); ND_PRINT((ndo, "\n\tGroup: %s", ipaddr_string(ndo, bp))); bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[0], sizeof(struct in_addr)); if (EXTRACT_32BITS(&bp[0]) != 0xffffffff) ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[0]))); bp += 4; len -= 4; if (len < 4) goto trunc; ND_TCHECK2(bp[0], 4); njoin = EXTRACT_16BITS(&bp[0]); nprune = EXTRACT_16BITS(&bp[2]); ND_PRINT((ndo, " joined: %d pruned: %d", njoin, nprune)); bp += 4; len -= 4; for (njp = 0; njp < (njoin + nprune); njp++) { const char *type; if (njp < njoin) type = "Join "; else type = "Prune"; if (len < 6) goto trunc; ND_TCHECK2(bp[0], 6); ND_PRINT((ndo, "\n\t%s %s%s%s%s/%d", type, (bp[0] & 0x01) ? "Sparse " : "Dense ", (bp[1] & 0x80) ? "WC " : "", (bp[1] & 0x40) ? "RP " : "SPT ", ipaddr_string(ndo, &bp[2]), bp[1] & 0x3f)); bp += 6; len -= 6; } } return; trunc: ND_PRINT((ndo, "[|pim]")); return; }
{'added': [(172, '\tif (len < sizeof(struct in_addr))'), (173, '\t\tgoto trunc;'), (178, '\tbp += 4;'), (179, '\tlen -= 4;'), (180, '\tif (len < 4)'), (181, '\t\tgoto trunc;'), (182, '\tND_TCHECK2(bp[2], 2);'), (186, '\tunsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2]));'), (189, '\tbp += 4;'), (190, '\tlen -= 4;'), (192, '\tif (len < 4)'), (193, '\t\tgoto trunc;'), (203, '\t\tif (len < 4)'), (204, '\t\t\tgoto trunc;'), (207, '\t\tbp += 4;'), (208, '\t\tlen -= 4;'), (209, '\t\tif (len < 4)'), (210, '\t\t\tgoto trunc;'), (211, '\t\tND_TCHECK2(bp[0], sizeof(struct in_addr));'), (212, '\t\tif (EXTRACT_32BITS(&bp[0]) != 0xffffffff)'), (213, '\t\t\tND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[0])));'), (214, '\t\tbp += 4;'), (215, '\t\tlen -= 4;'), (216, '\t\tif (len < 4)'), (217, '\t\t\tgoto trunc;'), (218, '\t\tND_TCHECK2(bp[0], 4);'), (219, '\t\tnjoin = EXTRACT_16BITS(&bp[0]);'), (220, '\t\tnprune = EXTRACT_16BITS(&bp[2]);'), (222, '\t\tbp += 4;'), (223, '\t\tlen -= 4;'), (231, '\t\t\tif (len < 6)'), (232, '\t\t\t\tgoto trunc;'), (238, '\t\t\t ipaddr_string(ndo, &bp[2]),'), (239, '\t\t\t bp[1] & 0x3f));'), (321, '\t\tif (ndo->ndo_vflag) {'), (322, '\t\t\tif (len < 8)'), (323, '\t\t\t\tgoto trunc;'), (325, '\t\t}'), (352, '\tif (len < 8)'), (353, '\t\tgoto trunc;'), (401, '\t\tif (len < 4)'), (402, '\t\t\tgoto trunc;'), (405, '\t\tbp += 4;'), (406, '\t\tlen -= 4;'), (407, '\t\tif (len < 1)'), (408, '\t\t\tgoto trunc;'), (409, '\t\tND_TCHECK(bp[0]);'), (410, '\t\tswitch (bp[0] & 0x3) {'), (420, '\t\tif (bp[0] & 0xfc)'), (421, '\t\t\tND_PRINT((ndo, " [rsvd=0x%02x]", bp[0] & 0xfc));'), (422, '\t\tbp += 1;'), (423, '\t\tlen -= 1;'), (424, '\t\tif (len < 1)'), (425, '\t\t\tgoto trunc;'), (426, '\t\tND_TCHECK(bp[0]);'), (427, '\t\tnentries = bp[0];'), (428, '\t\tbp += 1;'), (429, '\t\tlen -= 1;'), (432, '\t\t\tif (len < 6)'), (433, '\t\t\t\tgoto trunc;'), (464, '\tND_TCHECK(pim->pim_typever);'), (488, ''), (489, 'trunc:'), (490, '\tND_PRINT((ndo, "[|pim]"));'), (491, '\treturn;'), (560, ' const u_char *bp, u_int len, enum pimv2_addrtype at,'), (561, ' u_int addr_len, int silent)'), (564, '\tint hdrlen;'), (566, '\tif (addr_len == 0) {'), (567, '\t\tif (len < 2)'), (568, '\t\t\tgoto trunc;'), (573, '\t\t\taddr_len = (u_int)sizeof(struct in_addr);'), (577, '\t\t\taddr_len = (u_int)sizeof(struct in6_addr);'), (586, '\t\tswitch (addr_len) {'), (601, '\tlen -= hdrlen;'), (604, '\t\tif (len < addr_len)'), (605, '\t\t\tgoto trunc;'), (606, '\t\tND_TCHECK2(bp[0], addr_len);'), (615, '\t\treturn hdrlen + addr_len;'), (618, '\t\tif (len < addr_len + 2)'), (619, '\t\t\tgoto trunc;'), (620, '\t\tND_TCHECK2(bp[0], addr_len + 2);'), (649, '\t\treturn hdrlen + 2 + addr_len;'), (701, '\tint pimv2_addr_len;'), (708, '\tif (len < 2)'), (709, '\t\tgoto trunc;'), (715, '\tif (len < 4)'), (716, '\t\tgoto trunc;'), (717, '\tND_TCHECK(pim->pim_cksum);'), (758, '\tbp += 4;'), (759, '\tlen -= 4;'), (765, '\t\twhile (len > 0) {'), (766, '\t\t\tif (len < 4)'), (767, '\t\t\t\tgoto trunc;'), (776, '\t\t\tlen -= 4;'), (778, '\t\t\tif (len < olen)'), (779, '\t\t\t\tgoto trunc;'), (780, '\t\t\tND_TCHECK2(bp[0], olen);'), (850, '\t\t\t\t\tu_int plen = len;'), (853, '\t\t\t\t\t\tadvance = pimv2_addr_print(ndo, ptr, plen, pimv2_unicast, pimv2_addr_len, 0);'), (854, '\t\t\t\t\t\tif (advance < 0)'), (855, '\t\t\t\t\t\t\tgoto trunc;'), (857, '\t\t\t\t\t\tplen -= advance;'), (870, '\t\t\tlen -= olen;'), (879, '\t\tif (len < 4)'), (880, '\t\t\tgoto trunc;'), (881, '\t\tND_TCHECK2(*bp, PIMV2_REGISTER_FLAG_LEN);'), (886, '\t\t EXTRACT_32BITS(bp))));'), (888, '\t\tbp += 4; len -= 4;'), (890, '\t\tif (len == 0)'), (891, '\t\t\tgoto trunc;'), (893, '\t\tND_TCHECK(ip->ip_vhl);'), (896, '\t\t\tND_TCHECK(ip->ip_dst);'), (919, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (920, '\t\t\tgoto trunc;'), (923, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (924, '\t\t\tgoto trunc;'), (977, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (978, '\t\t\t\tgoto trunc;'), (981, '\t\tif (len < 4)'), (982, '\t\t\tgoto trunc;'), (983, '\t\tND_TCHECK2(*bp, 4);'), (997, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (998, '\t\t\t\tgoto trunc;'), (1000, '\t\t\tif (len < 4)'), (1001, '\t\t\t\tgoto trunc;'), (1002, '\t\t\tND_TCHECK2(*bp, 4);'), (1009, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0)'), (1010, '\t\t\t\t\tgoto trunc;'), (1015, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_source, pimv2_addr_len, 0)) < 0)'), (1016, '\t\t\t\t\tgoto trunc;'), (1028, '\t\tif (len < 2)'), (1029, '\t\t\tgoto trunc;'), (1030, '\t\tND_TCHECK_16BITS(bp);'), (1032, '\t\tbp += 2;'), (1033, '\t\tlen -= 2;'), (1034, '\t\tif (len < 1)'), (1035, '\t\t\tgoto trunc;'), (1036, '\t\tND_TCHECK(bp[0]);'), (1038, '\t\tif (len < 2)'), (1039, '\t\t\tgoto trunc;'), (1040, '\t\tND_TCHECK(bp[2]);'), (1043, '\t\tlen -= 2;'), (1047, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1048, '\t\t\tgoto trunc;'), (1050, '\t\tlen -= advance;'), (1055, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1056, '\t\t\t\tgoto trunc;'), (1058, '\t\t\tlen -= advance;'), (1061, '\t\t\tif (len < 1)'), (1062, '\t\t\t\tgoto trunc;'), (1063, '\t\t\tND_TCHECK(bp[0]);'), (1065, '\t\t\tif (len < 2)'), (1066, '\t\t\t\tgoto trunc;'), (1067, '\t\t\tND_TCHECK(bp[1]);'), (1069, '\t\t\tif (len < 4)'), (1070, '\t\t\t\tgoto trunc;'), (1072, '\t\t\tlen -= 4;'), (1077, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len,'), (1079, '\t\t\t\t\t\t\t\tpimv2_addr_len,'), (1080, '\t\t\t\t\t\t\t\t0)) < 0)'), (1081, '\t\t\t\t\tgoto trunc;'), (1083, '\t\t\t\tlen -= advance;'), (1085, '\t\t\t\tif (len < 2)'), (1086, '\t\t\t\t\tgoto trunc;'), (1087, '\t\t\t\tND_TCHECK_16BITS(bp);'), (1090, '\t\t\t\tif (len < 3)'), (1091, '\t\t\t\t\tgoto trunc;'), (1092, '\t\t\t\tND_TCHECK(bp[2]);'), (1094, '\t\t\t\tif (len < 4)'), (1095, '\t\t\t\t\tgoto trunc;'), (1097, '\t\t\t\tlen -= 4;'), (1105, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1106, '\t\t\tgoto trunc;'), (1109, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1110, '\t\t\tgoto trunc;'), (1112, '\t\tif (len < 8)'), (1113, '\t\t\tgoto trunc;'), (1114, '\t\tND_TCHECK2(*bp, 8);'), (1126, '\t\tif (len < 1)'), (1127, '\t\t\tgoto trunc;'), (1128, '\t\tND_TCHECK(bp[0]);'), (1131, '\t\tif (len < 2)'), (1132, '\t\t\tgoto trunc;'), (1133, '\t\tND_TCHECK(bp[1]);'), (1135, '\t\tif (len < 4)'), (1136, '\t\t\tgoto trunc;'), (1137, '\t\tND_TCHECK_16BITS(&bp[2]);'), (1141, '\t\tlen -= 4;'), (1145, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1146, '\t\t\tgoto trunc;'), (1148, '\t\tlen -= advance;'), (1153, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1154, '\t\t\t\tgoto trunc;'), (1156, '\t\t\tlen -= advance;'), (1163, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1164, '\t\t\tgoto trunc;'), (1166, '\t\tlen -= advance;'), (1168, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_group, pimv2_addr_len, 0)) < 0)'), (1169, '\t\t\tgoto trunc;'), (1171, '\t\tlen -= advance;'), (1173, '\t\tif ((advance = pimv2_addr_print(ndo, bp, len, pimv2_unicast, pimv2_addr_len, 0)) < 0)'), (1174, '\t\t\tgoto trunc;'), (1176, '\t\tlen -= advance;'), (1177, '\t\tif (len < 2)'), (1178, '\t\t\tgoto trunc;'), (1179, '\t\tND_TCHECK_16BITS(bp);')], 'deleted': [(176, '\tND_TCHECK2(bp[6], 2);'), (180, '\tunsigned_relts_print(ndo, EXTRACT_16BITS(&bp[6]));'), (183, '\tbp += 8;'), (184, '\tlen -= 8;'), (197, '\t\tND_TCHECK2(bp[4], sizeof(struct in_addr));'), (198, '\t\tif (EXTRACT_32BITS(&bp[4]) != 0xffffffff)'), (199, '\t\t\tND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[4])));'), (200, '\t\tND_TCHECK2(bp[8], 4);'), (201, '\t\tnjoin = EXTRACT_16BITS(&bp[8]);'), (202, '\t\tnprune = EXTRACT_16BITS(&bp[10]);'), (204, '\t\tbp += 12;'), (205, '\t\tlen -= 12;'), (218, '\t\t\tipaddr_string(ndo, &bp[2]), bp[1] & 0x3f));'), (233, '\tregister const u_char *ep;'), (236, '\tep = (const u_char *)ndo->ndo_snapend;'), (237, '\tif (bp >= ep)'), (238, '\t\treturn;'), (239, ''), (305, '\t\tif (ndo->ndo_vflag)'), (382, '\t\tND_TCHECK(bp[4]);'), (383, '\t\tswitch (bp[4] & 0x3) {'), (393, '\t\tif (bp[4] & 0xfc)'), (394, '\t\t\tND_PRINT((ndo, " [rsvd=0x%02x]", bp[4] & 0xfc));'), (395, '\t\tND_TCHECK(bp[5]);'), (396, '\t\tnentries = bp[5];'), (397, '\t\tbp += 6; len -= 6;'), (424, '\tregister const u_char *ep;'), (427, '\tep = (const u_char *)ndo->ndo_snapend;'), (428, '\tif (bp >= ep)'), (429, '\t\treturn;'), (499, 'static int pimv2_addr_len;'), (500, ''), (527, ' const u_char *bp, enum pimv2_addrtype at, int silent)'), (530, '\tint len, hdrlen;'), (532, '\tND_TCHECK(bp[0]);'), (533, ''), (534, '\tif (pimv2_addr_len == 0) {'), (539, '\t\t\tlen = sizeof(struct in_addr);'), (543, '\t\t\tlen = sizeof(struct in6_addr);'), (552, '\t\tswitch (pimv2_addr_len) {'), (563, '\t\tlen = pimv2_addr_len;'), (570, '\t\tND_TCHECK2(bp[0], len);'), (579, '\t\treturn hdrlen + len;'), (582, '\t\tND_TCHECK2(bp[0], len + 2);'), (611, '\t\treturn hdrlen + 2 + len;'), (719, '\t\tbp += 4;'), (720, '\t\twhile (bp < ep) {'), (724, '\t\t\tND_TCHECK2(bp[0], 4 + olen);'), (802, '\t\t\t\t\t\tadvance = pimv2_addr_print(ndo, ptr, pimv2_unicast, 0);'), (803, '\t\t\t\t\t\tif (advance < 0) {'), (804, '\t\t\t\t\t\t\tND_PRINT((ndo, "..."));'), (805, '\t\t\t\t\t\t\tbreak;'), (806, '\t\t\t\t\t\t}'), (828, '\t\tND_TCHECK2(*(bp + 4), PIMV2_REGISTER_FLAG_LEN);'), (833, '\t\t EXTRACT_32BITS(bp+4))));'), (835, '\t\tbp += 8; len -= 8;'), (861, '\t\tbp += 4; len -= 4;'), (862, '\t\tif (bp >= ep)'), (863, '\t\t\tbreak;'), (865, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (866, '\t\t\tND_PRINT((ndo, "..."));'), (867, '\t\t\tbreak;'), (868, '\t\t}'), (870, '\t\tif (bp >= ep)'), (871, '\t\t\tbreak;'), (873, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (874, '\t\t\tND_PRINT((ndo, "..."));'), (875, '\t\t\tbreak;'), (876, '\t\t}'), (927, '\t\tbp += 4; len -= 4;'), (929, '\t\t\tif (bp >= ep)'), (930, '\t\t\t\tbreak;'), (932, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (933, '\t\t\t\tND_PRINT((ndo, "..."));'), (934, '\t\t\t\tbreak;'), (935, '\t\t\t}'), (938, '\t\tif (bp + 4 > ep)'), (939, '\t\t\tbreak;'), (952, '\t\t\tif (bp >= ep)'), (953, '\t\t\t\tgoto jp_done;'), (955, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (956, '\t\t\t\tND_PRINT((ndo, "...)"));'), (957, '\t\t\t\tgoto jp_done;'), (958, '\t\t\t}'), (960, '\t\t\tif (bp + 4 > ep) {'), (961, '\t\t\t\tND_PRINT((ndo, "...)"));'), (962, '\t\t\t\tgoto jp_done;'), (963, '\t\t\t}'), (970, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) {'), (971, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (972, '\t\t\t\t\tgoto jp_done;'), (973, '\t\t\t\t}'), (978, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_source, 0)) < 0) {'), (979, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (980, '\t\t\t\t\tgoto jp_done;'), (981, '\t\t\t\t}'), (985, '\tjp_done:'), (992, '\t\tbp += 4;'), (995, '\t\tif (bp + sizeof(uint16_t) >= ep) break;'), (997, '\t\tbp += sizeof(uint16_t);'), (998, '\t\tif (bp >= ep) break;'), (1000, '\t\tif (bp + 1 >= ep) break;'), (1005, '\t\tif (bp >= ep) break;'), (1007, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1008, '\t\t\tND_PRINT((ndo, "..."));'), (1009, '\t\t\tbreak;'), (1010, '\t\t}'), (1016, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0))'), (1017, '\t\t\t < 0) {'), (1018, '\t\t\t\tND_PRINT((ndo, "...)"));'), (1019, '\t\t\t\tgoto bs_done;'), (1020, '\t\t\t}'), (1024, '\t\t\tif (bp >= ep) {'), (1025, '\t\t\t\tND_PRINT((ndo, "...)"));'), (1026, '\t\t\t\tgoto bs_done;'), (1027, '\t\t\t}'), (1029, '\t\t\tif (bp + 1 >= ep) {'), (1030, '\t\t\t\tND_PRINT((ndo, "...)"));'), (1031, '\t\t\t\tgoto bs_done;'), (1032, '\t\t\t}'), (1039, '\t\t\t\tif ((advance = pimv2_addr_print(ndo, bp,'), (1041, '\t\t\t\t\t\t\t\t0)) < 0) {'), (1042, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (1043, '\t\t\t\t\tgoto bs_done;'), (1044, '\t\t\t\t}'), (1047, '\t\t\t\tif (bp + 1 >= ep) {'), (1048, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (1049, '\t\t\t\t\tgoto bs_done;'), (1050, '\t\t\t\t}'), (1053, '\t\t\t\tif (bp + 2 >= ep) {'), (1054, '\t\t\t\t\tND_PRINT((ndo, "...)"));'), (1055, '\t\t\t\t\tgoto bs_done;'), (1056, '\t\t\t\t}'), (1062, '\t bs_done:'), (1066, '\t\tbp += 4; len -= 4;'), (1067, '\t\tif (bp >= ep)'), (1068, '\t\t\tbreak;'), (1070, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (1071, '\t\t\tND_PRINT((ndo, "..."));'), (1072, '\t\t\tbreak;'), (1073, '\t\t}'), (1075, '\t\tif (bp >= ep)'), (1076, '\t\t\tbreak;'), (1078, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1079, '\t\t\tND_PRINT((ndo, "..."));'), (1080, '\t\t\tbreak;'), (1081, '\t\t}'), (1083, '\t\tif (bp + 8 > ep)'), (1084, '\t\t\tbreak;'), (1094, '\t\tbp += 4;'), (1097, '\t\tif (bp >= ep) break;'), (1100, '\t\tif (bp + 1 >= ep) break;'), (1102, '\t\tif (bp + 3 >= ep) break;'), (1108, '\t\tif (bp >= ep) break;'), (1110, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1111, '\t\t\tND_PRINT((ndo, "..."));'), (1112, '\t\t\tbreak;'), (1113, '\t\t}'), (1119, '\t\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0))'), (1120, '\t\t\t < 0) {'), (1121, '\t\t\t\tND_PRINT((ndo, "..."));'), (1122, '\t\t\t\tbreak;'), (1123, '\t\t\t}'), (1131, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1132, '\t\t\tND_PRINT((ndo, "..."));'), (1133, '\t\t\tbreak;'), (1134, '\t\t}'), (1137, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_group, 0)) < 0) {'), (1138, '\t\t\tND_PRINT((ndo, "..."));'), (1139, '\t\t\tbreak;'), (1140, '\t\t}'), (1143, '\t\tif ((advance = pimv2_addr_print(ndo, bp, pimv2_unicast, 0)) < 0) {'), (1144, '\t\t\tND_PRINT((ndo, "..."));'), (1145, '\t\t\tbreak;'), (1146, '\t\t}'), (1148, '\t\tND_TCHECK2(bp[0], 2);')]}
207
176
890
5,856
75
673
19
https://github.com/the-tcpdump-group/tcpdump
CVE-2017-13030
CWE-125
2,966
audio_spectrogram.cc
C++
tflite::ops::custom::audio_spectrogram::Prepare
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <math.h> #include <stddef.h> #include <stdint.h> #include <vector> #include "flatbuffers/flexbuffers.h" // from @flatbuffers #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/spectrogram.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace ops { namespace custom { namespace audio_spectrogram { constexpr int kInputTensor = 0; constexpr int kOutputTensor = 0; enum KernelType { kReference, }; typedef struct { int window_size; int stride; bool magnitude_squared; int output_height; internal::Spectrogram* spectrogram; } TfLiteAudioSpectrogramParams; void* Init(TfLiteContext* context, const char* buffer, size_t length) { auto* data = new TfLiteAudioSpectrogramParams; const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer); const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap(); data->window_size = m["window_size"].AsInt64(); data->stride = m["stride"].AsInt64(); data->magnitude_squared = m["magnitude_squared"].AsBool(); data->spectrogram = new internal::Spectrogram; return data; } void Free(TfLiteContext* context, void* buffer) { auto* params = reinterpret_cast<TfLiteAudioSpectrogramParams*>(buffer); delete params->spectrogram; delete params; } TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteAudioSpectrogramParams*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2); TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); TF_LITE_ENSURE(context, params->spectrogram->Initialize(params->window_size, params->stride)); const int64_t sample_count = input->dims->data[0]; const int64_t length_minus_window = (sample_count - params->window_size); if (length_minus_window < 0) { params->output_height = 0; } else { params->output_height = 1 + (length_minus_window / params->stride); } TfLiteIntArray* output_size = TfLiteIntArrayCreate(3); output_size->data[0] = input->dims->data[1]; output_size->data[1] = params->output_height; output_size->data[2] = params->spectrogram->output_frequency_channels(); return context->ResizeTensor(context, output, output_size); } template <KernelType kernel_type> TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteAudioSpectrogramParams*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE(context, params->spectrogram->Initialize(params->window_size, params->stride)); const float* input_data = GetTensorData<float>(input); const int64_t sample_count = input->dims->data[0]; const int64_t channel_count = input->dims->data[1]; const int64_t output_width = params->spectrogram->output_frequency_channels(); float* output_flat = GetTensorData<float>(output); std::vector<float> input_for_channel(sample_count); for (int64_t channel = 0; channel < channel_count; ++channel) { float* output_slice = output_flat + (channel * params->output_height * output_width); for (int i = 0; i < sample_count; ++i) { input_for_channel[i] = input_data[i * channel_count + channel]; } std::vector<std::vector<float>> spectrogram_output; TF_LITE_ENSURE(context, params->spectrogram->ComputeSquaredMagnitudeSpectrogram( input_for_channel, &spectrogram_output)); TF_LITE_ENSURE_EQ(context, spectrogram_output.size(), params->output_height); TF_LITE_ENSURE(context, spectrogram_output.empty() || (spectrogram_output[0].size() == output_width)); for (int row_index = 0; row_index < params->output_height; ++row_index) { const std::vector<float>& spectrogram_row = spectrogram_output[row_index]; TF_LITE_ENSURE_EQ(context, spectrogram_row.size(), output_width); float* output_row = output_slice + (row_index * output_width); if (params->magnitude_squared) { for (int i = 0; i < output_width; ++i) { output_row[i] = spectrogram_row[i]; } } else { for (int i = 0; i < output_width; ++i) { output_row[i] = sqrtf(spectrogram_row[i]); } } } } return kTfLiteOk; } } // namespace audio_spectrogram TfLiteRegistration* Register_AUDIO_SPECTROGRAM() { static TfLiteRegistration r = { audio_spectrogram::Init, audio_spectrogram::Free, audio_spectrogram::Prepare, audio_spectrogram::Eval<audio_spectrogram::kReference>}; return &r; } } // namespace custom } // namespace ops } // namespace tflite
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <math.h> #include <stddef.h> #include <stdint.h> #include <vector> #include "flatbuffers/flexbuffers.h" // from @flatbuffers #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/spectrogram.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace ops { namespace custom { namespace audio_spectrogram { constexpr int kInputTensor = 0; constexpr int kOutputTensor = 0; enum KernelType { kReference, }; typedef struct { int window_size; int stride; bool magnitude_squared; int output_height; internal::Spectrogram* spectrogram; } TfLiteAudioSpectrogramParams; void* Init(TfLiteContext* context, const char* buffer, size_t length) { auto* data = new TfLiteAudioSpectrogramParams; const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer); const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap(); data->window_size = m["window_size"].AsInt64(); data->stride = m["stride"].AsInt64(); data->magnitude_squared = m["magnitude_squared"].AsBool(); data->spectrogram = new internal::Spectrogram; return data; } void Free(TfLiteContext* context, void* buffer) { auto* params = reinterpret_cast<TfLiteAudioSpectrogramParams*>(buffer); delete params->spectrogram; delete params; } TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteAudioSpectrogramParams*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2); TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); TF_LITE_ENSURE(context, params->spectrogram->Initialize(params->window_size, params->stride)); const int64_t sample_count = input->dims->data[0]; const int64_t length_minus_window = (sample_count - params->window_size); if (length_minus_window < 0) { params->output_height = 0; } else { params->output_height = 1 + (length_minus_window / params->stride); } TfLiteIntArray* output_size = TfLiteIntArrayCreate(3); output_size->data[0] = input->dims->data[1]; output_size->data[1] = params->output_height; output_size->data[2] = params->spectrogram->output_frequency_channels(); return context->ResizeTensor(context, output, output_size); } template <KernelType kernel_type> TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteAudioSpectrogramParams*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); TF_LITE_ENSURE(context, params->spectrogram->Initialize(params->window_size, params->stride)); const float* input_data = GetTensorData<float>(input); const int64_t sample_count = input->dims->data[0]; const int64_t channel_count = input->dims->data[1]; const int64_t output_width = params->spectrogram->output_frequency_channels(); float* output_flat = GetTensorData<float>(output); std::vector<float> input_for_channel(sample_count); for (int64_t channel = 0; channel < channel_count; ++channel) { float* output_slice = output_flat + (channel * params->output_height * output_width); for (int i = 0; i < sample_count; ++i) { input_for_channel[i] = input_data[i * channel_count + channel]; } std::vector<std::vector<float>> spectrogram_output; TF_LITE_ENSURE(context, params->spectrogram->ComputeSquaredMagnitudeSpectrogram( input_for_channel, &spectrogram_output)); TF_LITE_ENSURE_EQ(context, spectrogram_output.size(), params->output_height); TF_LITE_ENSURE(context, spectrogram_output.empty() || (spectrogram_output[0].size() == output_width)); for (int row_index = 0; row_index < params->output_height; ++row_index) { const std::vector<float>& spectrogram_row = spectrogram_output[row_index]; TF_LITE_ENSURE_EQ(context, spectrogram_row.size(), output_width); float* output_row = output_slice + (row_index * output_width); if (params->magnitude_squared) { for (int i = 0; i < output_width; ++i) { output_row[i] = spectrogram_row[i]; } } else { for (int i = 0; i < output_width; ++i) { output_row[i] = sqrtf(spectrogram_row[i]); } } } } return kTfLiteOk; } } // namespace audio_spectrogram TfLiteRegistration* Register_AUDIO_SPECTROGRAM() { static TfLiteRegistration r = { audio_spectrogram::Init, audio_spectrogram::Free, audio_spectrogram::Prepare, audio_spectrogram::Eval<audio_spectrogram::kReference>}; return &r; } } // namespace custom } // namespace ops } // namespace tflite
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteAudioSpectrogramParams*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2); TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); TF_LITE_ENSURE(context, params->spectrogram->Initialize(params->window_size, params->stride)); const int64_t sample_count = input->dims->data[0]; const int64_t length_minus_window = (sample_count - params->window_size); if (length_minus_window < 0) { params->output_height = 0; } else { params->output_height = 1 + (length_minus_window / params->stride); } TfLiteIntArray* output_size = TfLiteIntArrayCreate(3); output_size->data[0] = input->dims->data[1]; output_size->data[1] = params->output_height; output_size->data[2] = params->spectrogram->output_frequency_channels(); return context->ResizeTensor(context, output, output_size); }
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteAudioSpectrogramParams*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2); TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); TF_LITE_ENSURE(context, params->spectrogram->Initialize(params->window_size, params->stride)); const int64_t sample_count = input->dims->data[0]; const int64_t length_minus_window = (sample_count - params->window_size); if (length_minus_window < 0) { params->output_height = 0; } else { params->output_height = 1 + (length_minus_window / params->stride); } TfLiteIntArray* output_size = TfLiteIntArrayCreate(3); output_size->data[0] = input->dims->data[1]; output_size->data[1] = params->output_height; output_size->data[2] = params->spectrogram->output_frequency_channels(); return context->ResizeTensor(context, output, output_size); }
{'added': [(79, ' const TfLiteTensor* input;'), (80, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));'), (81, ' TfLiteTensor* output;'), (82, ' TF_LITE_ENSURE_OK(context,'), (83, ' GetOutputSafe(context, node, kOutputTensor, &output));'), (112, ' const TfLiteTensor* input;'), (113, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));'), (114, ' TfLiteTensor* output;'), (115, ' TF_LITE_ENSURE_OK(context,'), (116, ' GetOutputSafe(context, node, kOutputTensor, &output));')], 'deleted': [(79, ' const TfLiteTensor* input = GetInput(context, node, kInputTensor);'), (80, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);'), (109, ' const TfLiteTensor* input = GetInput(context, node, kInputTensor);'), (110, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);')]}
10
4
130
964
25
253
2
https://github.com/tensorflow/tensorflow
CVE-2020-15211
CWE-125
2,332
edge_proxy_common.c
C
edge_sparse_csr_reader_double
/****************************************************************************** ** Copyright (c) 2017-2018, Intel Corporation ** ** All rights reserved. ** ** ** ** Redistribution and use in source and binary forms, with or without ** ** modification, are permitted provided that the following conditions ** ** are met: ** ** 1. Redistributions of source code must retain the above copyright ** ** notice, this list of conditions and the following disclaimer. ** ** 2. Redistributions in binary form must reproduce the above copyright ** ** notice, this list of conditions and the following disclaimer in the ** ** documentation and/or other materials provided with the distribution. ** ** 3. Neither the name of the copyright holder nor the names of its ** ** contributors may be used to endorse or promote products derived ** ** from this software without specific prior written permission. ** ** ** ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ** ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ #include "edge_proxy_common.h" void edge_sparse_csr_reader_double( const char* i_csr_file_in, unsigned int** o_row_idx, unsigned int** o_column_idx, double** o_values, unsigned int* o_row_count, unsigned int* o_column_count, unsigned int* o_element_count ) { FILE *l_csr_file_handle; const unsigned int l_line_length = 512; char l_line[512/*l_line_length*/+1]; unsigned int l_header_read = 0; unsigned int* l_row_idx_id = NULL; unsigned int l_i = 0; l_csr_file_handle = fopen( i_csr_file_in, "r" ); if ( l_csr_file_handle == NULL ) { fprintf( stderr, "cannot open CSR file!\n" ); return; } while (fgets(l_line, l_line_length, l_csr_file_handle) != NULL) { if ( strlen(l_line) == l_line_length ) { fprintf( stderr, "could not read file length!\n" ); return; } /* check if we are still reading comments header */ if ( l_line[0] == '%' ) { continue; } else { /* if we are the first line after comment header, we allocate our data structures */ if ( l_header_read == 0 ) { if ( sscanf(l_line, "%u %u %u", o_row_count, o_column_count, o_element_count) == 3 ) { /* allocate CSC datastructure matching mtx file */ *o_column_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_element_count)); *o_row_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_row_count + 1)); *o_values = (double*) malloc(sizeof(double) * (*o_element_count)); l_row_idx_id = (unsigned int*) malloc(sizeof(unsigned int) * (*o_row_count)); /* check if mallocs were successful */ if ( ( *o_row_idx == NULL ) || ( *o_column_idx == NULL ) || ( *o_values == NULL ) || ( l_row_idx_id == NULL ) ) { fprintf( stderr, "could not allocate sp data!\n" ); return; } /* set everything to zero for init */ memset(*o_row_idx, 0, sizeof(unsigned int)*(*o_row_count + 1)); memset(*o_column_idx, 0, sizeof(unsigned int)*(*o_element_count)); memset(*o_values, 0, sizeof(double)*(*o_element_count)); memset(l_row_idx_id, 0, sizeof(unsigned int)*(*o_row_count)); /* init column idx */ for ( l_i = 0; l_i < (*o_row_count + 1); l_i++) (*o_row_idx)[l_i] = (*o_element_count); /* init */ (*o_row_idx)[0] = 0; l_i = 0; l_header_read = 1; } else { fprintf( stderr, "could not csr description!\n" ); return; } /* now we read the actual content */ } else { unsigned int l_row, l_column; double l_value; /* read a line of content */ if ( sscanf(l_line, "%u %u %lf", &l_row, &l_column, &l_value) != 3 ) { fprintf( stderr, "could not read element!\n" ); return; } /* adjust numbers to zero termination */ l_row--; l_column--; /* add these values to row and value structure */ (*o_column_idx)[l_i] = l_column; (*o_values)[l_i] = l_value; l_i++; /* handle columns, set id to own for this column, yeah we need to handle empty columns */ l_row_idx_id[l_row] = 1; (*o_row_idx)[l_row+1] = l_i; } } } /* close mtx file */ fclose( l_csr_file_handle ); /* check if we read a file which was consistent */ if ( l_i != (*o_element_count) ) { fprintf( stderr, "we were not able to read all elements!\n" ); return; } /* let's handle empty rows */ for ( l_i = 0; l_i < (*o_row_count); l_i++) { if ( l_row_idx_id[l_i] == 0 ) { (*o_row_idx)[l_i+1] = (*o_row_idx)[l_i]; } } /* free helper data structure */ if ( l_row_idx_id != NULL ) { free( l_row_idx_id ); } } void edge_sparse_csr_reader_float( const char* i_csr_file_in, unsigned int** o_row_idx, unsigned int** o_column_idx, float** o_values, unsigned int* o_row_count, unsigned int* o_column_count, unsigned int* o_element_count ) { double* l_values; unsigned int i; /* read using double */ edge_sparse_csr_reader_double( i_csr_file_in, o_row_idx, o_column_idx, &l_values, o_row_count, o_column_count, o_element_count ); /* converting double values into float */ *o_values = (float*) malloc((*o_element_count)*sizeof(float)); for ( i = 0; i < (*o_element_count); ++i ) { (*o_values)[i] = (float)l_values[i]; } free(l_values); }
/****************************************************************************** ** Copyright (c) 2017-2018, Intel Corporation ** ** All rights reserved. ** ** ** ** Redistribution and use in source and binary forms, with or without ** ** modification, are permitted provided that the following conditions ** ** are met: ** ** 1. Redistributions of source code must retain the above copyright ** ** notice, this list of conditions and the following disclaimer. ** ** 2. Redistributions in binary form must reproduce the above copyright ** ** notice, this list of conditions and the following disclaimer in the ** ** documentation and/or other materials provided with the distribution. ** ** 3. Neither the name of the copyright holder nor the names of its ** ** contributors may be used to endorse or promote products derived ** ** from this software without specific prior written permission. ** ** ** ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ** ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ #include "edge_proxy_common.h" void edge_sparse_csr_reader_double( const char* i_csr_file_in, unsigned int** o_row_idx, unsigned int** o_column_idx, double** o_values, unsigned int* o_row_count, unsigned int* o_column_count, unsigned int* o_element_count ) { FILE *l_csr_file_handle; const unsigned int l_line_length = 512; char l_line[512/*l_line_length*/+1]; unsigned int l_header_read = 0; unsigned int* l_row_idx_id = NULL; unsigned int l_i = 0; l_csr_file_handle = fopen( i_csr_file_in, "r" ); if ( l_csr_file_handle == NULL ) { fprintf( stderr, "cannot open CSR file!\n" ); return; } while (fgets(l_line, l_line_length, l_csr_file_handle) != NULL) { if ( strlen(l_line) == l_line_length ) { fprintf( stderr, "could not read file length!\n" ); return; } /* check if we are still reading comments header */ if ( l_line[0] == '%' ) { continue; } else { /* if we are the first line after comment header, we allocate our data structures */ if ( l_header_read == 0 ) { if (3 == sscanf(l_line, "%u %u %u", o_row_count, o_column_count, o_element_count) && 0 != *o_row_count && 0 != *o_column_count && 0 != *o_element_count) { /* allocate CSC datastructure matching mtx file */ *o_column_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_element_count)); *o_row_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_row_count + 1)); *o_values = (double*) malloc(sizeof(double) * (*o_element_count)); l_row_idx_id = (unsigned int*) malloc(sizeof(unsigned int) * (*o_row_count)); /* check if mallocs were successful */ if ( ( *o_row_idx == NULL ) || ( *o_column_idx == NULL ) || ( *o_values == NULL ) || ( l_row_idx_id == NULL ) ) { fprintf( stderr, "could not allocate sp data!\n" ); return; } /* set everything to zero for init */ memset(*o_row_idx, 0, sizeof(unsigned int)*(*o_row_count + 1)); memset(*o_column_idx, 0, sizeof(unsigned int)*(*o_element_count)); memset(*o_values, 0, sizeof(double)*(*o_element_count)); memset(l_row_idx_id, 0, sizeof(unsigned int)*(*o_row_count)); /* init column idx */ for ( l_i = 0; l_i < (*o_row_count + 1); l_i++) (*o_row_idx)[l_i] = (*o_element_count); /* init */ (*o_row_idx)[0] = 0; l_i = 0; l_header_read = 1; } else { fprintf( stderr, "could not csr description!\n" ); return; } /* now we read the actual content */ } else { unsigned int l_row, l_column; double l_value; /* read a line of content */ if ( sscanf(l_line, "%u %u %lf", &l_row, &l_column, &l_value) != 3 ) { fprintf( stderr, "could not read element!\n" ); return; } /* adjust numbers to zero termination */ l_row--; l_column--; /* add these values to row and value structure */ (*o_column_idx)[l_i] = l_column; (*o_values)[l_i] = l_value; l_i++; /* handle columns, set id to own for this column, yeah we need to handle empty columns */ l_row_idx_id[l_row] = 1; (*o_row_idx)[l_row+1] = l_i; } } } /* close mtx file */ fclose( l_csr_file_handle ); /* check if we read a file which was consistent */ if ( l_i != (*o_element_count) ) { fprintf( stderr, "we were not able to read all elements!\n" ); return; } /* let's handle empty rows */ for ( l_i = 0; l_i < (*o_row_count); l_i++) { if ( l_row_idx_id[l_i] == 0 ) { (*o_row_idx)[l_i+1] = (*o_row_idx)[l_i]; } } /* free helper data structure */ if ( l_row_idx_id != NULL ) { free( l_row_idx_id ); } } void edge_sparse_csr_reader_float( const char* i_csr_file_in, unsigned int** o_row_idx, unsigned int** o_column_idx, float** o_values, unsigned int* o_row_count, unsigned int* o_column_count, unsigned int* o_element_count ) { double* l_values; unsigned int i; /* read using double */ edge_sparse_csr_reader_double( i_csr_file_in, o_row_idx, o_column_idx, &l_values, o_row_count, o_column_count, o_element_count ); /* converting double values into float */ *o_values = (float*) malloc((*o_element_count)*sizeof(float)); for ( i = 0; i < (*o_element_count); ++i ) { (*o_values)[i] = (float)l_values[i]; } free(l_values); }
void edge_sparse_csr_reader_double( const char* i_csr_file_in, unsigned int** o_row_idx, unsigned int** o_column_idx, double** o_values, unsigned int* o_row_count, unsigned int* o_column_count, unsigned int* o_element_count ) { FILE *l_csr_file_handle; const unsigned int l_line_length = 512; char l_line[512/*l_line_length*/+1]; unsigned int l_header_read = 0; unsigned int* l_row_idx_id = NULL; unsigned int l_i = 0; l_csr_file_handle = fopen( i_csr_file_in, "r" ); if ( l_csr_file_handle == NULL ) { fprintf( stderr, "cannot open CSR file!\n" ); return; } while (fgets(l_line, l_line_length, l_csr_file_handle) != NULL) { if ( strlen(l_line) == l_line_length ) { fprintf( stderr, "could not read file length!\n" ); return; } /* check if we are still reading comments header */ if ( l_line[0] == '%' ) { continue; } else { /* if we are the first line after comment header, we allocate our data structures */ if ( l_header_read == 0 ) { if ( sscanf(l_line, "%u %u %u", o_row_count, o_column_count, o_element_count) == 3 ) { /* allocate CSC datastructure matching mtx file */ *o_column_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_element_count)); *o_row_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_row_count + 1)); *o_values = (double*) malloc(sizeof(double) * (*o_element_count)); l_row_idx_id = (unsigned int*) malloc(sizeof(unsigned int) * (*o_row_count)); /* check if mallocs were successful */ if ( ( *o_row_idx == NULL ) || ( *o_column_idx == NULL ) || ( *o_values == NULL ) || ( l_row_idx_id == NULL ) ) { fprintf( stderr, "could not allocate sp data!\n" ); return; } /* set everything to zero for init */ memset(*o_row_idx, 0, sizeof(unsigned int)*(*o_row_count + 1)); memset(*o_column_idx, 0, sizeof(unsigned int)*(*o_element_count)); memset(*o_values, 0, sizeof(double)*(*o_element_count)); memset(l_row_idx_id, 0, sizeof(unsigned int)*(*o_row_count)); /* init column idx */ for ( l_i = 0; l_i < (*o_row_count + 1); l_i++) (*o_row_idx)[l_i] = (*o_element_count); /* init */ (*o_row_idx)[0] = 0; l_i = 0; l_header_read = 1; } else { fprintf( stderr, "could not csr description!\n" ); return; } /* now we read the actual content */ } else { unsigned int l_row, l_column; double l_value; /* read a line of content */ if ( sscanf(l_line, "%u %u %lf", &l_row, &l_column, &l_value) != 3 ) { fprintf( stderr, "could not read element!\n" ); return; } /* adjust numbers to zero termination */ l_row--; l_column--; /* add these values to row and value structure */ (*o_column_idx)[l_i] = l_column; (*o_values)[l_i] = l_value; l_i++; /* handle columns, set id to own for this column, yeah we need to handle empty columns */ l_row_idx_id[l_row] = 1; (*o_row_idx)[l_row+1] = l_i; } } } /* close mtx file */ fclose( l_csr_file_handle ); /* check if we read a file which was consistent */ if ( l_i != (*o_element_count) ) { fprintf( stderr, "we were not able to read all elements!\n" ); return; } /* let's handle empty rows */ for ( l_i = 0; l_i < (*o_row_count); l_i++) { if ( l_row_idx_id[l_i] == 0 ) { (*o_row_idx)[l_i+1] = (*o_row_idx)[l_i]; } } /* free helper data structure */ if ( l_row_idx_id != NULL ) { free( l_row_idx_id ); } }
void edge_sparse_csr_reader_double( const char* i_csr_file_in, unsigned int** o_row_idx, unsigned int** o_column_idx, double** o_values, unsigned int* o_row_count, unsigned int* o_column_count, unsigned int* o_element_count ) { FILE *l_csr_file_handle; const unsigned int l_line_length = 512; char l_line[512/*l_line_length*/+1]; unsigned int l_header_read = 0; unsigned int* l_row_idx_id = NULL; unsigned int l_i = 0; l_csr_file_handle = fopen( i_csr_file_in, "r" ); if ( l_csr_file_handle == NULL ) { fprintf( stderr, "cannot open CSR file!\n" ); return; } while (fgets(l_line, l_line_length, l_csr_file_handle) != NULL) { if ( strlen(l_line) == l_line_length ) { fprintf( stderr, "could not read file length!\n" ); return; } /* check if we are still reading comments header */ if ( l_line[0] == '%' ) { continue; } else { /* if we are the first line after comment header, we allocate our data structures */ if ( l_header_read == 0 ) { if (3 == sscanf(l_line, "%u %u %u", o_row_count, o_column_count, o_element_count) && 0 != *o_row_count && 0 != *o_column_count && 0 != *o_element_count) { /* allocate CSC datastructure matching mtx file */ *o_column_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_element_count)); *o_row_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_row_count + 1)); *o_values = (double*) malloc(sizeof(double) * (*o_element_count)); l_row_idx_id = (unsigned int*) malloc(sizeof(unsigned int) * (*o_row_count)); /* check if mallocs were successful */ if ( ( *o_row_idx == NULL ) || ( *o_column_idx == NULL ) || ( *o_values == NULL ) || ( l_row_idx_id == NULL ) ) { fprintf( stderr, "could not allocate sp data!\n" ); return; } /* set everything to zero for init */ memset(*o_row_idx, 0, sizeof(unsigned int)*(*o_row_count + 1)); memset(*o_column_idx, 0, sizeof(unsigned int)*(*o_element_count)); memset(*o_values, 0, sizeof(double)*(*o_element_count)); memset(l_row_idx_id, 0, sizeof(unsigned int)*(*o_row_count)); /* init column idx */ for ( l_i = 0; l_i < (*o_row_count + 1); l_i++) (*o_row_idx)[l_i] = (*o_element_count); /* init */ (*o_row_idx)[0] = 0; l_i = 0; l_header_read = 1; } else { fprintf( stderr, "could not csr description!\n" ); return; } /* now we read the actual content */ } else { unsigned int l_row, l_column; double l_value; /* read a line of content */ if ( sscanf(l_line, "%u %u %lf", &l_row, &l_column, &l_value) != 3 ) { fprintf( stderr, "could not read element!\n" ); return; } /* adjust numbers to zero termination */ l_row--; l_column--; /* add these values to row and value structure */ (*o_column_idx)[l_i] = l_column; (*o_values)[l_i] = l_value; l_i++; /* handle columns, set id to own for this column, yeah we need to handle empty columns */ l_row_idx_id[l_row] = 1; (*o_row_idx)[l_row+1] = l_i; } } } /* close mtx file */ fclose( l_csr_file_handle ); /* check if we read a file which was consistent */ if ( l_i != (*o_element_count) ) { fprintf( stderr, "we were not able to read all elements!\n" ); return; } /* let's handle empty rows */ for ( l_i = 0; l_i < (*o_row_count); l_i++) { if ( l_row_idx_id[l_i] == 0 ) { (*o_row_idx)[l_i+1] = (*o_row_idx)[l_i]; } } /* free helper data structure */ if ( l_row_idx_id != NULL ) { free( l_row_idx_id ); } }
{'added': [(65, ' if (3 == sscanf(l_line, "%u %u %u", o_row_count, o_column_count, o_element_count) &&'), (66, ' 0 != *o_row_count && 0 != *o_column_count && 0 != *o_element_count)'), (67, ' {')], 'deleted': [(65, ' if ( sscanf(l_line, "%u %u %u", o_row_count, o_column_count, o_element_count) == 3 ) {')]}
3
1
103
762
83
620
17
https://github.com/hfp/libxsmm
CVE-2018-20541
CWE-787
2,901
reframe_nalu.c
C
naludmx_queue_param_set
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre * Copyright (c) Telecom ParisTech 2000-2021 * All rights reserved * * This file is part of GPAC / NALU (AVC, HEVC, VVC) reframer filter * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/avparse.h> #include <gpac/constants.h> #include <gpac/filters.h> #include <gpac/internal/media_dev.h> //for oinf stuff #include <gpac/internal/isomedia_dev.h> #ifndef GPAC_DISABLE_AV_PARSERS #define CTS_POC_OFFSET_SAFETY 1000 GF_Err gf_bs_set_logger(GF_BitStream *bs, void (*on_bs_log)(void *udta, const char *field_name, u32 nb_bits, u64 field_val, s32 idx1, s32 idx2, s32 idx3), void *udta); typedef struct { u64 pos; Double duration; } NALUIdx; typedef struct { u32 layer_id_plus_one; u32 min_temporal_id, max_temporal_id; } LHVCLayerInfo; enum { STRICT_POC_OFF = 0, STRICT_POC_ON, STRICT_POC_ERROR, }; typedef struct { //filter args GF_Fraction fps; Double index; Bool explicit, force_sync, nosei, importer, subsamples, nosvc, novpsext, deps, seirw, audelim, analyze; u32 nal_length; u32 strict_poc; u32 bsdbg; GF_Fraction idur; //only one input pid declared GF_FilterPid *ipid; //only one output pid declared GF_FilterPid *opid; //read bitstream for AVC/HEVC parsing GF_BitStream *bs_r; //write bitstream for nalus size length rewrite GF_BitStream *bs_w; //current CTS/DTS of the stream, may be overridden by input packet if not file (eg TS PES) u64 cts, dts, prev_dts, prev_cts; u32 pck_duration; //basic config stored here: with, height CRC of base and enh layer decoder config, sample aspect ratio //when changing, a new pid config will be emitted u32 width, height; u32 crc_cfg, crc_cfg_enh; GF_Fraction sar; GF_Fraction cur_fps; //duration of the file if known GF_Fraction64 duration; //playback start range Double start_range; //indicates we are in seek, packets before start range should be marked Bool in_seek; //set once we play something Bool is_playing; //is a file, is a file fully loaded on disk (local or download done) Bool is_file, file_loaded; //initial PLAY command found Bool initial_play_done; //list of RAP entry points NALUIdx *indexes; u32 index_alloc_size, index_size; //timescale of the input pid if any, 0 otherwise u32 timescale; //framing flag of input packet when input pid has timing (eg is not a file) Bool input_is_au_start; GF_FilterPacket *src_pck; Bool full_au_source; //total delay in frames between decode and presentation s32 max_total_delay; //max size codable with our nal_length setting u32 max_nalu_size_allowed; //position in input packet from which we resume parsing u32 resume_from; //prevents message about possible NAL size optimizaion at finalization Bool nal_adjusted; //avc/hevc switch u32 codecid; //name of the logger const char *log_name; //list of packet (in decode order !!) not yet dispatched. //Dispatch depends on the mode: //strict_poc=0: we wait after each IDR until we find a stable poc diff between pictures, controled by poc_probe_done //strict_poc>=1: we dispatch only after IDR or at the end (huge delay) GF_List *pck_queue; //dts of the last IDR found u64 dts_last_IDR; //max size of NALUs in the bitstream u32 max_nalu_size; u8 *nal_store; u32 nal_store_size, nal_store_alloc; //list of param sets found GF_List *sps, *pps, *vps, *sps_ext, *pps_svc, *vvc_aps_pre, *vvc_dci; //set to true if one of the PS has been modified, will potentially trigger a PID reconfigure Bool ps_modified; //stats u32 nb_idr, nb_i, nb_p, nb_b, nb_sp, nb_si, nb_sei, nb_nalus, nb_aud; //frame has intra slice Bool has_islice; //AU is rap GF_FilterSAPType au_sap; //frame first slice Bool first_slice_in_au; //paff used - NEED FURTHER CHECKING Bool is_paff; Bool bottom_field_flag; //SEI recovery count - if 0 and I slice only frame, openGOP detection (avc) s32 sei_recovery_frame_count; u32 use_opengop_gdr; //poc compute variables s32 last_poc, max_last_poc, max_last_b_poc, poc_diff, prev_last_poc, min_poc, poc_shift; //set to TRUE once 3 frames with same min poc diff are found, enabling dispatch of the frames Bool poc_probe_done; //pointer to the first packet of the current frame (the one holding timing info) //this packet is in the packet queue GF_FilterPacket *first_pck_in_au; //frame has slices used as reference Bool has_ref_slices; //frame has redundant coding Bool has_redundant; Bool last_frame_is_idr; //buffer to store SEI messages //for AVC: we have to rewrite the SEI to remove some of the messages according to the spec //for HEVC: we store prefix SEI here and dispatch them once the first VCL is found char *sei_buffer; u32 sei_buffer_size, sei_buffer_alloc; //subsample buffer, only used for SVC for now u32 subsamp_buffer_alloc, subsamp_buffer_size, subs_mapped_bytes; char *subsamp_buffer; //AVC specific //avc bitstream state AVCState *avc_state; //SVC specific char *svc_prefix_buffer; u32 svc_prefix_buffer_size, svc_prefix_buffer_alloc; u32 svc_nalu_prefix_reserved; u8 svc_nalu_prefix_priority; //HEVC specific HEVCState *hevc_state; //shvc stats u32 nb_e_idr, nb_e_i, nb_e_p, nb_e_b; Bool vvc_no_stats; LHVCLayerInfo linf[64]; u8 max_temporal_id[64]; u8 min_layer_id; //VVC specific VVCState *vvc_state; Bool has_initial_aud; char init_aud[3]; Bool interlaced; Bool is_mvc; u32 bitrate; u32 nb_frames; } GF_NALUDmxCtx; static void naludmx_enqueue_or_dispatch(GF_NALUDmxCtx *ctx, GF_FilterPacket *n_pck, Bool flush_ref); static void naludmx_finalize_au_flags(GF_NALUDmxCtx *ctx); GF_Err naludmx_configure_pid(GF_Filter *filter, GF_FilterPid *pid, Bool is_remove) { const GF_PropertyValue *p; GF_NALUDmxCtx *ctx = gf_filter_get_udta(filter); if (is_remove) { ctx->ipid = NULL; if (ctx->opid) { gf_filter_pid_remove(ctx->opid); ctx->opid = NULL; } return GF_OK; } if (! gf_filter_pid_check_caps(pid)) return GF_NOT_SUPPORTED; ctx->ipid = pid; p = gf_filter_pid_get_property(pid, GF_PROP_PID_TIMESCALE); if (p) { ctx->timescale = p->value.uint; //if we have a FPS prop, use it p = gf_filter_pid_get_property(pid, GF_PROP_PID_FPS); if (p) { ctx->cur_fps = p->value.frac; } else { ctx->cur_fps.den = 0; ctx->cur_fps.num = ctx->timescale; } } p = gf_filter_pid_get_property(pid, GF_PROP_PID_CODECID); if (p) { switch (p->value.uint) { case GF_CODECID_HEVC: case GF_CODECID_LHVC: ctx->codecid = GF_CODECID_HEVC; break; case GF_CODECID_VVC: ctx->codecid = GF_CODECID_VVC; break; case GF_CODECID_AVC: case GF_CODECID_AVC_PS: case GF_CODECID_SVC: case GF_CODECID_MVC: ctx->codecid = GF_CODECID_AVC; break; default: return GF_NOT_SUPPORTED; } } else { p = gf_filter_pid_get_property(pid, GF_PROP_PID_MIME); if (p && p->value.string && ( strstr(p->value.string, "hvc") || strstr(p->value.string, "hevc") || strstr(p->value.string, "265") || strstr(p->value.string, "shvc") || strstr(p->value.string, "mhvc") || strstr(p->value.string, "lhvc") ) ) ctx->codecid = GF_CODECID_HEVC; else if (p && p->value.string && ( strstr(p->value.string, "vvc") ) ) ctx->codecid = GF_CODECID_VVC; else { p = gf_filter_pid_get_property(pid, GF_PROP_PID_FILE_EXT); if (p && p->value.string && ( strstr(p->value.string, "hvc") || strstr(p->value.string, "hevc") || strstr(p->value.string, "265") || strstr(p->value.string, "shvc") || strstr(p->value.string, "mhvc") || strstr(p->value.string, "lhvc") ) ) ctx->codecid = GF_CODECID_HEVC; else if (p && p->value.string && ( strstr(p->value.string, "vvc") || strstr(p->value.string, "266") || strstr(p->value.string, "lvvc") ) ) ctx->codecid = GF_CODECID_VVC; else ctx->codecid = GF_CODECID_AVC; } } if (ctx->codecid==GF_CODECID_HEVC) { #ifdef GPAC_DISABLE_HEVC return GF_NOT_SUPPORTED; #else ctx->log_name = "HEVC"; if (ctx->avc_state) gf_free(ctx->avc_state); if (ctx->vvc_state) gf_free(ctx->vvc_state); if (!ctx->hevc_state) GF_SAFEALLOC(ctx->hevc_state, HEVCState); ctx->min_layer_id = 0xFF; #endif } else if (ctx->codecid==GF_CODECID_VVC) { ctx->log_name = "VVC"; if (ctx->hevc_state) gf_free(ctx->hevc_state); if (ctx->avc_state) gf_free(ctx->avc_state); if (!ctx->vvc_state) GF_SAFEALLOC(ctx->vvc_state, VVCState); } else { ctx->log_name = "AVC|H264"; if (ctx->hevc_state) gf_free(ctx->hevc_state); if (ctx->vvc_state) gf_free(ctx->vvc_state); if (!ctx->avc_state) GF_SAFEALLOC(ctx->avc_state, AVCState); } if (ctx->timescale && !ctx->opid) { ctx->opid = gf_filter_pid_new(filter); ctx->first_slice_in_au = GF_TRUE; } ctx->full_au_source = GF_FALSE; p = gf_filter_pid_get_property(pid, GF_PROP_PID_UNFRAMED_FULL_AU); if (p && p->value.boolean) { GF_FilterEvent fevt; //this is a reframer used after an encoder, we want to make sure we have enough frames to compute POC otherwise we might block the chain //by holding input packets - ask 1s by default GF_FEVT_INIT(fevt, GF_FEVT_BUFFER_REQ, ctx->ipid); fevt.buffer_req.pid_only = GF_TRUE; fevt.buffer_req.max_buffer_us = 1000000; gf_filter_pid_send_event(ctx->ipid, &fevt); ctx->full_au_source = GF_TRUE; } //copy properties at init or reconfig if (ctx->opid) { if (ctx->poc_probe_done) { //full frame mode, flush everything before signaling discontinuity //for other modes discontinuity we signal disconntinuity before the current AU being reconstructed if (ctx->full_au_source && ctx->first_pck_in_au) naludmx_finalize_au_flags(ctx); naludmx_enqueue_or_dispatch(ctx, NULL, GF_TRUE); } gf_filter_pid_copy_properties(ctx->opid, ctx->ipid); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_STREAM_TYPE, & PROP_UINT(GF_STREAM_VISUAL)); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_CODECID, & PROP_UINT(ctx->codecid)); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_UNFRAMED, NULL); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_UNFRAMED_FULL_AU, NULL); if (!gf_filter_pid_get_property(ctx->ipid, GF_PROP_PID_ID)) gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_ID, &PROP_UINT(1)); ctx->ps_modified = GF_TRUE; ctx->crc_cfg = ctx->crc_cfg_enh = 0; } return GF_OK; } static void naludmx_check_dur(GF_Filter *filter, GF_NALUDmxCtx *ctx) { FILE *stream; GF_BitStream *bs; u64 duration, cur_dur, nal_start, start_code_pos, rate; AVCState *avc_state = NULL; HEVCState *hevc_state = NULL; VVCState *vvc_state = NULL; Bool first_slice_in_pic = GF_TRUE; const GF_PropertyValue *p; const char *filepath = NULL; if (!ctx->opid || ctx->timescale || ctx->file_loaded) return; p = gf_filter_pid_get_property(ctx->ipid, GF_PROP_PID_FILEPATH); if (!p || !p->value.string || !strncmp(p->value.string, "gmem://", 7)) { ctx->is_file = GF_FALSE; ctx->file_loaded = GF_TRUE; return; } filepath = p->value.string; ctx->is_file = GF_TRUE; if (ctx->index<0) { if (gf_opts_get_bool("temp", "force_indexing")) { ctx->index = 1.0; } else { p = gf_filter_pid_get_property(ctx->ipid, GF_PROP_PID_DOWN_SIZE); if (!p || (p->value.longuint > 20000000)) { GF_LOG(GF_LOG_INFO, GF_LOG_PARSER, ("[%s] Source file larger than 20M, skipping indexing\n", ctx->log_name)); } else { ctx->index = -ctx->index; } } } if (ctx->index<=0) { ctx->duration.num = 1; ctx->file_loaded = GF_TRUE; return; } if (ctx->codecid==GF_CODECID_HEVC) { GF_SAFEALLOC(hevc_state, HEVCState); if (!hevc_state) return; } else if (ctx->codecid==GF_CODECID_VVC) { GF_SAFEALLOC(vvc_state, VVCState); if (!vvc_state) return; } else { GF_SAFEALLOC(avc_state, AVCState); if (!avc_state) return; } stream = gf_fopen(filepath, "rb"); if (!stream) { if (hevc_state) gf_free(hevc_state); if (vvc_state) gf_free(vvc_state); if (avc_state) gf_free(avc_state); return; } ctx->index_size = 0; duration = 0; cur_dur = 0; bs = gf_bs_from_file(stream, GF_BITSTREAM_READ); gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); start_code_pos = gf_bs_get_position(bs); if (!gf_media_nalu_is_start_code(bs)) { if (hevc_state) gf_free(hevc_state); if (avc_state) gf_free(avc_state); gf_bs_del(bs); gf_fclose(stream); ctx->duration.num = 1; ctx->file_loaded = GF_TRUE; return; } nal_start = gf_bs_get_position(bs); while (gf_bs_available(bs)) { u32 nal_size; s32 res; Bool is_rap = GF_FALSE; Bool is_slice = GF_FALSE; nal_size = gf_media_nalu_next_start_code_bs(bs); gf_bs_seek(bs, nal_start); if (hevc_state) { #ifndef GPAC_DISABLE_HEVC u8 temporal_id, layer_id, nal_type; res = gf_hevc_parse_nalu_bs(bs, hevc_state, &nal_type, &temporal_id, &layer_id); if (res>0) first_slice_in_pic = GF_TRUE; switch (nal_type) { case GF_HEVC_NALU_SLICE_IDR_N_LP: case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_CRA: case GF_HEVC_NALU_SLICE_BLA_N_LP: case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: is_rap = GF_TRUE; is_slice = GF_TRUE; break; case GF_HEVC_NALU_SLICE_STSA_N: case GF_HEVC_NALU_SLICE_STSA_R: case GF_HEVC_NALU_SLICE_RADL_R: case GF_HEVC_NALU_SLICE_RASL_R: case GF_HEVC_NALU_SLICE_RADL_N: case GF_HEVC_NALU_SLICE_RASL_N: case GF_HEVC_NALU_SLICE_TRAIL_N: case GF_HEVC_NALU_SLICE_TRAIL_R: case GF_HEVC_NALU_SLICE_TSA_N: case GF_HEVC_NALU_SLICE_TSA_R: is_slice = GF_TRUE; break; } #endif // GPAC_DISABLE_HEVC } else if (vvc_state) { } else { u32 nal_type; u64 pos = gf_bs_get_position(bs); res = gf_avc_parse_nalu(bs, avc_state); if (res>0) first_slice_in_pic = GF_TRUE; nal_type = avc_state->last_nal_type_parsed; switch (nal_type) { case GF_AVC_NALU_SEQ_PARAM: gf_bs_seek(bs, pos); gf_avc_read_sps_bs(bs, avc_state, GF_FALSE, NULL); break; case GF_AVC_NALU_PIC_PARAM: gf_bs_seek(bs, pos); gf_avc_read_pps_bs(bs, avc_state); break; case GF_AVC_NALU_IDR_SLICE: is_rap = GF_TRUE; is_slice = GF_TRUE; break; case GF_AVC_NALU_NON_IDR_SLICE: case GF_AVC_NALU_DP_A_SLICE: case GF_AVC_NALU_DP_B_SLICE: case GF_AVC_NALU_DP_C_SLICE: is_slice = GF_TRUE; break; } } if (is_rap && first_slice_in_pic && (cur_dur >= ctx->index * ctx->cur_fps.num) ) { if (!ctx->index_alloc_size) ctx->index_alloc_size = 10; else if (ctx->index_alloc_size == ctx->index_size) ctx->index_alloc_size *= 2; ctx->indexes = gf_realloc(ctx->indexes, sizeof(NALUIdx)*ctx->index_alloc_size); ctx->indexes[ctx->index_size].pos = start_code_pos; ctx->indexes[ctx->index_size].duration = (Double) duration; ctx->indexes[ctx->index_size].duration /= ctx->cur_fps.num; ctx->index_size ++; cur_dur = 0; } if (is_slice && first_slice_in_pic) { duration += ctx->cur_fps.den; cur_dur += ctx->cur_fps.den; first_slice_in_pic = GF_FALSE; } gf_bs_seek(bs, nal_start + nal_size); /* nal_start = gf_media_nalu_next_start_code_bs(bs); if (nal_start) gf_bs_skip_bytes(bs, nal_start); */ if (gf_bs_available(bs)<4) break; start_code_pos = gf_bs_get_position(bs); nal_start = gf_media_nalu_is_start_code(bs); if (!nal_start) { break; } nal_start = gf_bs_get_position(bs); } rate = gf_bs_get_position(bs); gf_bs_del(bs); gf_fclose(stream); if (hevc_state) gf_free(hevc_state); if (vvc_state) gf_free(vvc_state); if (avc_state) gf_free(avc_state); if (!ctx->duration.num || (ctx->duration.num * ctx->cur_fps.num != duration * ctx->duration.den)) { ctx->duration.num = (s32) duration; ctx->duration.den = ctx->cur_fps.num; gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_DURATION, & PROP_FRAC64(ctx->duration)); if (duration && (!gf_sys_is_test_mode() || gf_opts_get_bool("temp", "force_indexing"))) { rate *= 8 * ctx->duration.den; rate /= ctx->duration.num; ctx->bitrate = (u32) rate; } } p = gf_filter_pid_get_property(ctx->ipid, GF_PROP_PID_FILE_CACHED); if (p && p->value.boolean) ctx->file_loaded = GF_TRUE; gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_CAN_DATAREF, & PROP_BOOL(GF_TRUE ) ); } static void naludmx_enqueue_or_dispatch(GF_NALUDmxCtx *ctx, GF_FilterPacket *n_pck, Bool flush_ref) { //TODO: we are dispatching frames in "negctts mode", ie we may have DTS>CTS //need to signal this for consumers using DTS (eg MPEG-2 TS) if (flush_ref && ctx->pck_queue && ctx->poc_diff) { u32 dts_inc=0; s32 last_poc = 0; Bool patch_missing_frame = GF_FALSE; //send all reference packet queued if (ctx->strict_poc==STRICT_POC_ERROR) { u32 i; u32 nb_bframes = 0; for (i=0; i<gf_list_count(ctx->pck_queue); i++) { s32 poc; u64 poc_ts, dts; GF_FilterPacket *q_pck = gf_list_get(ctx->pck_queue, i); if (q_pck == ctx->first_pck_in_au) break; dts = gf_filter_pck_get_dts(q_pck); if (dts == GF_FILTER_NO_TS) continue; poc_ts = gf_filter_pck_get_cts(q_pck); assert(poc_ts != GF_FILTER_NO_TS); poc = (s32) ((s64) poc_ts - CTS_POC_OFFSET_SAFETY); if (i) { if (last_poc>poc) nb_bframes ++; else if (last_poc + ctx->poc_diff<poc) patch_missing_frame = GF_TRUE; } last_poc = poc; } if (nb_bframes>1) patch_missing_frame = GF_FALSE; else if (nb_bframes) patch_missing_frame = GF_TRUE; } last_poc = GF_INT_MIN; while (gf_list_count(ctx->pck_queue) ) { u64 dts; GF_FilterPacket *q_pck = gf_list_get(ctx->pck_queue, 0); if (q_pck == ctx->first_pck_in_au) break; dts = gf_filter_pck_get_dts(q_pck); if (dts != GF_FILTER_NO_TS) { s32 poc; u64 poc_ts, cts; u8 carousel_info = gf_filter_pck_get_carousel_version(q_pck); //we reused timing from source packets if (!carousel_info) { assert(ctx->timescale); gf_list_rem(ctx->pck_queue, 0); gf_filter_pck_send(q_pck); continue; } gf_filter_pck_set_carousel_version(q_pck, 0); poc_ts = gf_filter_pck_get_cts(q_pck); assert(poc_ts != GF_FILTER_NO_TS); poc = (s32) ((s64) poc_ts - CTS_POC_OFFSET_SAFETY); if (patch_missing_frame) { if (last_poc!=GF_INT_MIN) { //check if we missed an IDR (poc reset) if (poc && (last_poc > poc) ) { last_poc = 0; dts_inc += ctx->cur_fps.den; ctx->dts_last_IDR = dts; ctx->dts += ctx->cur_fps.den; } //check if we miss a frame while (last_poc + ctx->poc_diff < poc) { last_poc += ctx->poc_diff; dts_inc += ctx->cur_fps.den; ctx->dts += ctx->cur_fps.den; } } last_poc = poc; dts += dts_inc; } //poc is stored as diff since last IDR which has min_poc cts = ( (ctx->min_poc + (s32) poc) * ctx->cur_fps.den ) / ctx->poc_diff + ctx->dts_last_IDR; /*if PAFF, 2 pictures (eg poc) <=> 1 aggregated frame (eg sample), divide by 2*/ if (ctx->is_paff) { cts /= 2; /*in some cases the poc is not on the top field - if that is the case, round up*/ if (cts % ctx->cur_fps.den) { cts = ((cts/ctx->cur_fps.den)+1) * ctx->cur_fps.den; } } gf_filter_pck_set_cts(q_pck, cts); GF_LOG(GF_LOG_DEBUG, GF_LOG_PARSER, ("[%s] Frame timestamps computed dts "LLU" cts "LLU" (poc %d min poc %d poc_diff %d last IDR DTS "LLU")\n", ctx->log_name, dts, cts, poc, ctx->min_poc, ctx->poc_diff, ctx->dts_last_IDR)); if (ctx->importer && ctx->cur_fps.den) { poc = (s32) ( (s64) cts - (s64) dts); if (poc<0) poc = -poc; poc /= ctx->cur_fps.den; if (poc > ctx->max_total_delay) ctx->max_total_delay = poc; } } gf_list_rem(ctx->pck_queue, 0); gf_filter_pck_send(q_pck); } } if (!n_pck) return; if (!ctx->pck_queue) ctx->pck_queue = gf_list_new(); gf_list_add(ctx->pck_queue, n_pck); } static void naludmx_add_param_nalu(GF_List *param_list, GF_NALUFFParam *sl, u8 nal_type) { GF_NALUFFParamArray *pa = NULL; u32 i, count; count = gf_list_count(param_list); for (i=0; i<count; i++) { pa = gf_list_get(param_list, i); if (pa->type == nal_type) break; pa = NULL; } if (!pa) { GF_SAFEALLOC(pa, GF_NALUFFParamArray); if (!pa) return; pa->array_completeness = 1; pa->type = nal_type; pa->nalus = gf_list_new(); gf_list_add(param_list, pa); } gf_list_add(pa->nalus, sl); } #ifndef GPAC_DISABLE_HEVC static void naludmx_hevc_set_parall_type(GF_NALUDmxCtx *ctx, GF_HEVCConfig *hevc_cfg) { u32 use_tiles, use_wpp, nb_pps, i, count; HEVCState hevc; count = gf_list_count(ctx->pps); memset(&hevc, 0, sizeof(HEVCState)); hevc.sps_active_idx = -1; use_tiles = 0; use_wpp = 0; nb_pps = 0; for (i=0; i<count; i++) { GF_NALUFFParam *slc = (GF_NALUFFParam*)gf_list_get(ctx->pps, i); s32 idx = gf_hevc_read_pps(slc->data, slc->size, &hevc); if (idx>=0) { HEVC_PPS *pps; nb_pps++; pps = &hevc.pps[idx]; if (!pps->entropy_coding_sync_enabled_flag && pps->tiles_enabled_flag) use_tiles++; else if (pps->entropy_coding_sync_enabled_flag && !pps->tiles_enabled_flag) use_wpp++; } } if (!use_tiles && !use_wpp) hevc_cfg->parallelismType = 1; else if (!use_wpp && (use_tiles==nb_pps) ) hevc_cfg->parallelismType = 2; else if (!use_tiles && (use_wpp==nb_pps) ) hevc_cfg->parallelismType = 3; else hevc_cfg->parallelismType = 0; } #endif // GPAC_DISABLE_HEVC GF_Err naludmx_set_hevc_oinf(GF_NALUDmxCtx *ctx, u8 *max_temporal_id) { GF_OperatingPointsInformation *oinf; GF_BitStream *bs; u8 *data; u32 data_size; u32 i; HEVC_VPS *vps; GF_NALUFFParam *vps_sl = gf_list_get(ctx->vps, 0); if (!vps_sl) return GF_SERVICE_ERROR; vps = &ctx->hevc_state->vps[vps_sl->id]; if (!vps->vps_extension_found) return GF_OK; if (vps->max_layers<2) return GF_OK; oinf = gf_isom_oinf_new_entry(); if (!oinf) return GF_OUT_OF_MEM; oinf->scalability_mask = 0; for (i = 0; i < 16; i++) { if (vps->scalability_mask[i]) oinf->scalability_mask |= 1 << i; } for (i = 0; i < vps->num_profile_tier_level; i++) { HEVC_ProfileTierLevel ptl = (i == 0) ? vps->ptl : vps->ext_ptl[i-1]; LHEVC_ProfileTierLevel *lhevc_ptl; GF_SAFEALLOC(lhevc_ptl, LHEVC_ProfileTierLevel); if (!lhevc_ptl) return GF_OUT_OF_MEM; lhevc_ptl->general_profile_space = ptl.profile_space; lhevc_ptl->general_tier_flag = ptl.tier_flag; lhevc_ptl->general_profile_idc = ptl.profile_idc; lhevc_ptl->general_profile_compatibility_flags = ptl.profile_compatibility_flag; lhevc_ptl->general_constraint_indicator_flags = 0; if (ptl.general_progressive_source_flag) lhevc_ptl->general_constraint_indicator_flags |= ((u64)1) << 47; if (ptl.general_interlaced_source_flag) lhevc_ptl->general_constraint_indicator_flags |= ((u64)1) << 46; if (ptl.general_non_packed_constraint_flag) lhevc_ptl->general_constraint_indicator_flags |= ((u64)1) << 45; if (ptl.general_frame_only_constraint_flag) lhevc_ptl->general_constraint_indicator_flags |= ((u64)1) << 44; lhevc_ptl->general_constraint_indicator_flags |= ptl.general_reserved_44bits; lhevc_ptl->general_level_idc = ptl.level_idc; gf_list_add(oinf->profile_tier_levels, lhevc_ptl); } for (i = 0; i < vps->num_output_layer_sets; i++) { LHEVC_OperatingPoint *op; u32 j; u16 minPicWidth, minPicHeight, maxPicWidth, maxPicHeight; u8 maxChromaFormat, maxBitDepth; u8 maxTemporalId; GF_SAFEALLOC(op, LHEVC_OperatingPoint); if (!op) return GF_OUT_OF_MEM; op->output_layer_set_idx = i; op->layer_count = vps->num_necessary_layers[i]; minPicWidth = minPicHeight = maxPicWidth = maxPicHeight = maxTemporalId = 0; maxChromaFormat = maxBitDepth = 0; for (j = 0; j < op->layer_count; j++) { u32 format_idx; u32 bitDepth; op->layers_info[j].ptl_idx = vps->profile_tier_level_idx[i][j]; op->layers_info[j].layer_id = j; op->layers_info[j].is_outputlayer = vps->output_layer_flag[i][j]; //FIXME: we consider that this flag is never set op->layers_info[j].is_alternate_outputlayer = GF_FALSE; if (max_temporal_id) { if (!maxTemporalId || (maxTemporalId < max_temporal_id[op->layers_info[j].layer_id])) maxTemporalId = max_temporal_id[op->layers_info[j].layer_id]; } else { maxTemporalId = vps->max_sub_layers; } format_idx = vps->rep_format_idx[op->layers_info[j].layer_id]; if (!minPicWidth || (minPicWidth > vps->rep_formats[format_idx].pic_width_luma_samples)) minPicWidth = vps->rep_formats[format_idx].pic_width_luma_samples; if (!minPicHeight || (minPicHeight > vps->rep_formats[format_idx].pic_height_luma_samples)) minPicHeight = vps->rep_formats[format_idx].pic_height_luma_samples; if (!maxPicWidth || (maxPicWidth < vps->rep_formats[format_idx].pic_width_luma_samples)) maxPicWidth = vps->rep_formats[format_idx].pic_width_luma_samples; if (!maxPicHeight || (maxPicHeight < vps->rep_formats[format_idx].pic_height_luma_samples)) maxPicHeight = vps->rep_formats[format_idx].pic_height_luma_samples; if (!maxChromaFormat || (maxChromaFormat < vps->rep_formats[format_idx].chroma_format_idc)) maxChromaFormat = vps->rep_formats[format_idx].chroma_format_idc; bitDepth = vps->rep_formats[format_idx].bit_depth_chroma > vps->rep_formats[format_idx].bit_depth_luma ? vps->rep_formats[format_idx].bit_depth_chroma : vps->rep_formats[format_idx].bit_depth_luma; if (!maxChromaFormat || (maxChromaFormat < bitDepth)) maxChromaFormat = bitDepth; } op->max_temporal_id = maxTemporalId; op->minPicWidth = minPicWidth; op->minPicHeight = minPicHeight; op->maxPicWidth = maxPicWidth; op->maxPicHeight = maxPicHeight; op->maxChromaFormat = maxChromaFormat; op->maxBitDepth = maxBitDepth; op->frame_rate_info_flag = GF_FALSE; //FIXME: should fetch this info from VUI op->bit_rate_info_flag = GF_FALSE; //we don't use it gf_list_add(oinf->operating_points, op); } for (i = 0; i < vps->max_layers; i++) { LHEVC_DependentLayer *dep; u32 j, k; GF_SAFEALLOC(dep, LHEVC_DependentLayer); if (!dep) return GF_OUT_OF_MEM; dep->dependent_layerID = vps->layer_id_in_nuh[i]; for (j = 0; j < vps->max_layers; j++) { if (vps->direct_dependency_flag[dep->dependent_layerID][j]) { dep->dependent_on_layerID[dep->num_layers_dependent_on] = j; dep->num_layers_dependent_on ++; } } k = 0; for (j = 0; j < 16; j++) { if (oinf->scalability_mask & (1 << j)) { dep->dimension_identifier[j] = vps->dimension_id[i][k]; k++; } } gf_list_add(oinf->dependency_layers, dep); } //write Operating Points Information Sample Group bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_isom_oinf_write_entry(oinf, bs); gf_bs_get_content(bs, &data, &data_size); gf_bs_del(bs); gf_isom_oinf_del_entry(oinf); gf_filter_pid_set_info_str(ctx->opid, "hevc:oinf", &PROP_DATA_NO_COPY(data, data_size) ); return GF_OK; } static void naludmx_set_hevc_linf(GF_NALUDmxCtx *ctx) { u32 i, nb_layers=0, nb_sublayers=0; u8 *data; u32 data_size; GF_BitStream *bs; for (i=0; i<64; i++) { if (ctx->linf[i].layer_id_plus_one) nb_layers++; if (ctx->linf[i].min_temporal_id != ctx->linf[i].max_temporal_id) nb_sublayers++; } if (!nb_layers && !nb_sublayers) return; bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_bs_write_int(bs, 0, 2); gf_bs_write_int(bs, nb_layers, 6); for (i=0; i<nb_layers; i++) { if (! ctx->linf[i].layer_id_plus_one) continue; gf_bs_write_int(bs, 0, 4); gf_bs_write_int(bs, ctx->linf[i].layer_id_plus_one - 1, 6); gf_bs_write_int(bs, ctx->linf[i].min_temporal_id, 3); gf_bs_write_int(bs, ctx->linf[i].max_temporal_id, 3); gf_bs_write_int(bs, 0, 1); gf_bs_write_int(bs, 0xFF, 7); } gf_bs_get_content(bs, &data, &data_size); gf_bs_del(bs); gf_filter_pid_set_info_str(ctx->opid, "hevc:linf", &PROP_DATA_NO_COPY(data, data_size) ); } static void naludmx_create_hevc_decoder_config(GF_NALUDmxCtx *ctx, u8 **dsi, u32 *dsi_size, u8 **dsi_enh, u32 *dsi_enh_size, u32 *max_width, u32 *max_height, u32 *max_enh_width, u32 *max_enh_height, GF_Fraction *sar, Bool *has_hevc_base) { #ifndef GPAC_DISABLE_HEVC u32 i, count; u8 layer_id; Bool first = GF_TRUE; Bool first_lhvc = GF_TRUE; GF_HEVCConfig *cfg; GF_HEVCConfig *hvcc; GF_HEVCConfig *lvcc; u32 max_w, max_h, max_ew, max_eh; *has_hevc_base = GF_FALSE; max_w = max_h = 0; max_ew = max_eh = 0; sar->num = sar->den = 0; hvcc = gf_odf_hevc_cfg_new(); lvcc = gf_odf_hevc_cfg_new(); hvcc->nal_unit_size = ctx->nal_length; lvcc->nal_unit_size = ctx->nal_length; lvcc->is_lhvc = GF_TRUE; //check we have one pps or sps in base layer count = gf_list_count(ctx->sps); for (i=0; i<count; i++) { GF_NALUFFParam *sl = gf_list_get(ctx->sps, i); layer_id = ((sl->data[0] & 0x1) << 5) | (sl->data[1] >> 3); if (!layer_id) { *has_hevc_base = GF_TRUE; break; } } count = gf_list_count(ctx->pps); for (i=0; i<count; i++) { GF_NALUFFParam *sl = gf_list_get(ctx->pps, i); layer_id = ((sl->data[0] & 0x1) << 5) | (sl->data[1] >> 3); if (!layer_id) { *has_hevc_base = GF_TRUE; break; } } //assign vps first so that they are serialized first count = gf_list_count(ctx->vps); for (i=0; i<count; i++) { GF_NALUFFParam *sl = gf_list_get(ctx->vps, i); HEVC_VPS *vps = &ctx->hevc_state->vps[sl->id]; if (!i) { hvcc->avgFrameRate = lvcc->avgFrameRate = vps->rates[0].avg_pic_rate; hvcc->constantFrameRate = lvcc->constantFrameRate = vps->rates[0].constand_pic_rate_idc; hvcc->numTemporalLayers = lvcc->numTemporalLayers = vps->max_sub_layers; hvcc->temporalIdNested = lvcc->temporalIdNested = vps->temporal_id_nesting; } //TODO set scalability mask if (!ctx->analyze) naludmx_add_param_nalu((ctx->explicit || ! (*has_hevc_base) ) ? lvcc->param_array : hvcc->param_array, sl, GF_HEVC_NALU_VID_PARAM); } count = gf_list_count(ctx->sps); for (i=0; i<count; i++) { Bool is_lhvc = GF_FALSE; GF_NALUFFParam *sl = gf_list_get(ctx->sps, i); HEVC_SPS *sps = &ctx->hevc_state->sps[sl->id]; layer_id = ((sl->data[0] & 0x1) << 5) | (sl->data[1] >> 3); if (!layer_id) *has_hevc_base = GF_TRUE; if (ctx->explicit || layer_id) { cfg = lvcc; is_lhvc = GF_TRUE; } else { cfg = hvcc; } if (first || (is_lhvc && first_lhvc) ) { cfg->configurationVersion = 1; cfg->profile_space = sps->ptl.profile_space; cfg->tier_flag = sps->ptl.tier_flag; cfg->profile_idc = sps->ptl.profile_idc; cfg->general_profile_compatibility_flags = sps->ptl.profile_compatibility_flag; cfg->progressive_source_flag = sps->ptl.general_progressive_source_flag; cfg->interlaced_source_flag = sps->ptl.general_interlaced_source_flag; cfg->non_packed_constraint_flag = sps->ptl.general_non_packed_constraint_flag; cfg->frame_only_constraint_flag = sps->ptl.general_frame_only_constraint_flag; cfg->constraint_indicator_flags = sps->ptl.general_reserved_44bits; cfg->level_idc = sps->ptl.level_idc; cfg->chromaFormat = sps->chroma_format_idc; cfg->luma_bit_depth = sps->bit_depth_luma; cfg->chroma_bit_depth = sps->bit_depth_chroma; ctx->interlaced = cfg->interlaced_source_flag ? GF_TRUE : GF_FALSE; if (sps->aspect_ratio_info_present_flag && sps->sar_width && sps->sar_height) { sar->num = sps->sar_width; sar->den = sps->sar_height; } /*disable frame rate scan, most bitstreams have wrong values there*/ if (!ctx->timescale && first && (!ctx->fps.num || !ctx->fps.den) && sps->has_timing_info /*if detected FPS is greater than 1000, assume wrong timing info*/ && (sps->time_scale <= 1000*sps->num_units_in_tick) ) { ctx->cur_fps.num = sps->time_scale; ctx->cur_fps.den = sps->num_units_in_tick; if (!ctx->fps.num && ctx->dts==ctx->fps.den) ctx->dts = ctx->cur_fps.den; } ctx->fps = ctx->cur_fps; } first = GF_FALSE; if (is_lhvc) { first_lhvc = GF_FALSE; if (sps->width > max_ew) max_ew = sps->width; if (sps->height > max_eh) max_eh = sps->height; } else { if (sps->width > max_w) max_w = sps->width; if (sps->height > max_h) max_h = sps->height; } if (!ctx->analyze) naludmx_add_param_nalu(cfg->param_array, sl, GF_HEVC_NALU_SEQ_PARAM); } cfg = ctx->explicit ? lvcc : hvcc; count = gf_list_count(ctx->pps); for (i=0; i<count; i++) { GF_NALUFFParam *sl = gf_list_get(ctx->pps, i); layer_id = ((sl->data[0] & 0x1) << 5) | (sl->data[1] >> 3); if (!layer_id) *has_hevc_base = GF_TRUE; if (!ctx->analyze) naludmx_add_param_nalu(layer_id ? lvcc->param_array : cfg->param_array, sl, GF_HEVC_NALU_PIC_PARAM); } *dsi = *dsi_enh = NULL; *dsi_size = *dsi_enh_size = 0; if (ctx->explicit || ! (*has_hevc_base) ) { naludmx_hevc_set_parall_type(ctx, lvcc); gf_odf_hevc_cfg_write(lvcc, dsi, dsi_size); *max_width = *max_enh_width = max_ew; *max_height = *max_enh_height = max_eh; } else { naludmx_hevc_set_parall_type(ctx, hvcc); gf_odf_hevc_cfg_write(hvcc, dsi, dsi_size); if (gf_list_count(lvcc->param_array) ) { naludmx_hevc_set_parall_type(ctx, lvcc); gf_odf_hevc_cfg_write(lvcc, dsi_enh, dsi_enh_size); } *max_width = max_w; *max_height = max_h; *max_enh_width = max_ew; *max_enh_height = max_eh; } count = gf_list_count(hvcc->param_array); for (i=0; i<count; i++) { GF_NALUFFParamArray *pa = gf_list_get(hvcc->param_array, i); gf_list_reset(pa->nalus); } count = gf_list_count(lvcc->param_array); for (i=0; i<count; i++) { GF_NALUFFParamArray *pa = gf_list_get(lvcc->param_array, i); gf_list_reset(pa->nalus); } gf_odf_hevc_cfg_del(hvcc); gf_odf_hevc_cfg_del(lvcc); #endif // GPAC_DISABLE_HEVC } static void naludmx_create_vvc_decoder_config(GF_NALUDmxCtx *ctx, u8 **dsi, u32 *dsi_size, u8 **dsi_enh, u32 *dsi_enh_size, u32 *max_width, u32 *max_height, u32 *max_enh_width, u32 *max_enh_height, GF_Fraction *sar, Bool *has_vvc_base) { u32 i, count; u8 layer_id; Bool first = GF_TRUE; Bool first_lvvc = GF_TRUE; GF_VVCConfig *cfg; u32 max_w, max_h, max_ew, max_eh; *has_vvc_base = GF_FALSE; max_w = max_h = 0; max_ew = max_eh = 0; sar->num = sar->den = 0; cfg = gf_odf_vvc_cfg_new(); cfg->nal_unit_size = ctx->nal_length; //check we have one pps or sps in base layer count = gf_list_count(ctx->sps); for (i=0; i<count; i++) { GF_NALUFFParam *sl = gf_list_get(ctx->sps, i); layer_id = (sl->data[0] & 0x3f); //todo, base is not always 0 ! if (!layer_id) { *has_vvc_base = GF_TRUE; break; } } count = gf_list_count(ctx->pps); for (i=0; i<count; i++) { GF_NALUFFParam *sl = gf_list_get(ctx->pps, i); layer_id = (sl->data[0] & 0x3f); //todo, base is not always 0 ! if (!layer_id) { *has_vvc_base = GF_TRUE; break; } } //assign vps first so that they are serialized first count = gf_list_count(ctx->vps); for (i=0; i<count; i++) { GF_NALUFFParam *sl = gf_list_get(ctx->vps, i); VVC_VPS *vps = &ctx->vvc_state->vps[sl->id]; if (!i) { cfg->avgFrameRate = vps->rates[0].avg_pic_rate; cfg->constantFrameRate = vps->rates[0].constand_pic_rate_idc; cfg->numTemporalLayers = vps->max_sub_layers; } if (!ctx->analyze) naludmx_add_param_nalu(cfg->param_array, sl, GF_VVC_NALU_VID_PARAM); } count = gf_list_count(ctx->sps); for (i=0; i<count; i++) { Bool is_lvvc = GF_FALSE; GF_NALUFFParam *sl = gf_list_get(ctx->sps, i); VVC_SPS *sps = &ctx->vvc_state->sps[sl->id]; layer_id = sl->data[0] & 0x3f; if (!layer_id) *has_vvc_base = GF_TRUE; if (ctx->explicit || layer_id) { is_lvvc = GF_TRUE; } if (first || (is_lvvc && first_lvvc) ) { VVC_VPS *vps = &ctx->vvc_state->vps[sps->vps_id]; cfg->avgFrameRate = 0; cfg->constantFrameRate = 1; cfg->numTemporalLayers = sps->max_sublayers; cfg->nal_unit_size = ctx->nal_length; cfg->ptl_present = vps->num_ptl ? 1 : 0; if (vps->num_ptl) { cfg->num_constraint_info = vps->ptl[0].gci_present ? 1 : 12; cfg->general_profile_idc = vps->ptl[0].general_profile_idc; cfg->general_tier_flag = vps->ptl[0].general_tier_flag; cfg->general_level_idc = vps->ptl[0].general_level_idc; cfg->ptl_frame_only_constraint = vps->ptl[0].frame_only_constraint; cfg->ptl_multilayer_enabled = vps->ptl[0].multilayer_enabled; cfg->general_constraint_info = gf_malloc(sizeof(u8) * cfg-> num_constraint_info); if (cfg->general_constraint_info) memcpy(cfg->general_constraint_info, vps->ptl[0].gci, cfg->num_constraint_info); //todo set temporal sublayers cfg->ptl_sublayer_present_mask = 0; cfg->num_sub_profiles = 0; cfg->ols_idx = 0; } cfg->chroma_format = sps->chroma_format_idc; cfg->bit_depth = sps->bitdepth; cfg->maxPictureWidth = sps->width; cfg->maxPictureHeight = sps->height; if (sps->aspect_ratio_info_present_flag && sps->sar_width && sps->sar_height) { sar->num = sps->sar_width; sar->den = sps->sar_height; } /*disable frame rate scan, most bitstreams have wrong values there*/ if (!ctx->timescale && first && (!ctx->fps.num || !ctx->fps.den) && sps->has_timing_info /*if detected FPS is greater than 1000, assume wrong timing info*/ && (sps->time_scale <= 1000*sps->num_units_in_tick) ) { ctx->cur_fps.num = sps->time_scale; ctx->cur_fps.den = sps->num_units_in_tick; if (!ctx->fps.num && ctx->dts==ctx->fps.den) ctx->dts = ctx->cur_fps.den; } ctx->fps = ctx->cur_fps; } first = GF_FALSE; if (is_lvvc) { first_lvvc = GF_FALSE; if (sps->width > max_ew) max_ew = sps->width; if (sps->height > max_eh) max_eh = sps->height; } else { if (sps->width > max_w) max_w = sps->width; if (sps->height > max_h) max_h = sps->height; } if (!ctx->analyze) naludmx_add_param_nalu(cfg->param_array, sl, GF_VVC_NALU_SEQ_PARAM); } count = gf_list_count(ctx->pps); for (i=0; i<count; i++) { GF_NALUFFParam *sl = gf_list_get(ctx->pps, i); layer_id = sl->data[0] & 0x3F; if (!layer_id) *has_vvc_base = GF_TRUE; if (!ctx->analyze) naludmx_add_param_nalu(cfg->param_array, sl, GF_VVC_NALU_PIC_PARAM); } count = gf_list_count(ctx->vvc_dci); for (i=0; i<count; i++) { GF_NALUFFParam *sl = gf_list_get(ctx->vvc_dci, i); layer_id = sl->data[0] & 0x3F; if (!layer_id) *has_vvc_base = GF_TRUE; if (!ctx->analyze) naludmx_add_param_nalu(cfg->param_array, sl, GF_VVC_NALU_DEC_PARAM); } count = gf_list_count(ctx->vvc_aps_pre); for (i=0; i<count; i++) { GF_NALUFFParam *sl = gf_list_get(ctx->vvc_aps_pre, i); layer_id = sl->data[0] & 0x3F; if (!layer_id) *has_vvc_base = GF_TRUE; if (!ctx->analyze) naludmx_add_param_nalu(cfg->param_array, sl, GF_VVC_NALU_APS_PREFIX); } *dsi = *dsi_enh = NULL; *dsi_size = *dsi_enh_size = 0; gf_odf_vvc_cfg_write(cfg, dsi, dsi_size); *max_width = max_w; *max_height = max_h; *max_enh_width = max_ew; *max_enh_height = max_eh; count = gf_list_count(cfg->param_array); for (i=0; i<count; i++) { GF_NALUFFParamArray *pa = gf_list_get(cfg->param_array, i); gf_list_reset(pa->nalus); } gf_odf_vvc_cfg_del(cfg); } void naludmx_create_avc_decoder_config(GF_NALUDmxCtx *ctx, u8 **dsi, u32 *dsi_size, u8 **dsi_enh, u32 *dsi_enh_size, u32 *max_width, u32 *max_height, u32 *max_enh_width, u32 *max_enh_height, GF_Fraction *sar) { u32 i, count; Bool first = GF_TRUE; Bool first_svc = GF_TRUE; GF_AVCConfig *cfg; GF_AVCConfig *avcc; GF_AVCConfig *svcc; u32 max_w, max_h, max_ew, max_eh; max_w = max_h = max_ew = max_eh = 0; sar->num = sar->den = 0; avcc = gf_odf_avc_cfg_new(); svcc = gf_odf_avc_cfg_new(); avcc->nal_unit_size = ctx->nal_length; svcc->nal_unit_size = ctx->nal_length; ctx->is_mvc = GF_FALSE; count = gf_list_count(ctx->sps); for (i=0; i<count; i++) { Bool is_svc = GF_FALSE; GF_NALUFFParam *sl = gf_list_get(ctx->sps, i); AVC_SPS *sps = &ctx->avc_state->sps[sl->id]; u32 nal_type = sl->data[0] & 0x1F; if ((sps->profile_idc == 118) || (sps->profile_idc == 128)) { ctx->is_mvc = GF_TRUE; } if (ctx->explicit) { cfg = svcc; } else if (nal_type == GF_AVC_NALU_SVC_SUBSEQ_PARAM) { cfg = svcc; is_svc = GF_TRUE; } else { cfg = avcc; } if (first || (is_svc && first_svc) ) { cfg->configurationVersion = 1; cfg->profile_compatibility = sps->prof_compat; cfg->AVCProfileIndication = sps->profile_idc; cfg->AVCLevelIndication = sps->level_idc; cfg->chroma_format = sps->chroma_format; cfg->luma_bit_depth = 8 + sps->luma_bit_depth_m8; cfg->chroma_bit_depth = 8 + sps->chroma_bit_depth_m8; /*try to patch ?*/ if (!gf_avc_is_rext_profile(cfg->AVCProfileIndication) && ((cfg->chroma_format>1) || (cfg->luma_bit_depth>8) || (cfg->chroma_bit_depth>8)) ) { if ((cfg->luma_bit_depth>8) || (cfg->chroma_bit_depth>8)) { cfg->AVCProfileIndication = 110; } else { cfg->AVCProfileIndication = (cfg->chroma_format==3) ? 244 : 122; } } if (sps->vui_parameters_present_flag && sps->vui.par_num && sps->vui.par_den) { sar->num = sps->vui.par_num; sar->den = sps->vui.par_den; } ctx->interlaced = sps->frame_mbs_only_flag ? GF_FALSE : GF_TRUE; /*disable frame rate scan, most bitstreams have wrong values there*/ if (first && (!ctx->fps.num || !ctx->fps.den) && sps->vui.timing_info_present_flag /*if detected FPS is greater than 1000, assume wrong timing info*/ && (sps->vui.time_scale <= 1000*sps->vui.num_units_in_tick) ) { /*ISO/IEC 14496-10 n11084 Table E-6*/ /* not used : u8 DeltaTfiDivisorTable[] = {1,1,1,2,2,2,2,3,3,4,6}; */ u8 DeltaTfiDivisorIdx; if (!sps->vui.pic_struct_present_flag) { DeltaTfiDivisorIdx = 1 + (1 - ctx->avc_state->s_info.field_pic_flag); } else { if (!ctx->avc_state->sei.pic_timing.pic_struct) DeltaTfiDivisorIdx = 2; else if (ctx->avc_state->sei.pic_timing.pic_struct == 8) DeltaTfiDivisorIdx = 6; else DeltaTfiDivisorIdx = (ctx->avc_state->sei.pic_timing.pic_struct+1) / 2; } if (!ctx->timescale) { ctx->cur_fps.num = 2 * sps->vui.time_scale; ctx->cur_fps.den = 2 * sps->vui.num_units_in_tick * DeltaTfiDivisorIdx; if (!ctx->fps.num && ctx->dts==ctx->fps.den) ctx->dts = ctx->cur_fps.den; } if (! sps->vui.fixed_frame_rate_flag) GF_LOG(GF_LOG_INFO, GF_LOG_PARSER, ("[%s] Possible Variable Frame Rate: VUI \"fixed_frame_rate_flag\" absent\n", ctx->log_name)); } ctx->fps = ctx->cur_fps; } first = GF_FALSE; if (is_svc) { first_svc = GF_FALSE; if (sps->width > max_ew) max_ew = sps->width; if (sps->height > max_eh) max_eh = sps->height; } else { if (sps->width > max_w) max_w = sps->width; if (sps->height > max_h) max_h = sps->height; } if (!ctx->analyze) gf_list_add(cfg->sequenceParameterSets, sl); } cfg = ctx->explicit ? svcc : avcc; count = gf_list_count(ctx->sps_ext); for (i=0; i<count; i++) { GF_NALUFFParam *sl = gf_list_get(ctx->sps_ext, i); if (!cfg->sequenceParameterSetExtensions) cfg->sequenceParameterSetExtensions = gf_list_new(); if (!ctx->analyze) gf_list_add(cfg->sequenceParameterSetExtensions, sl); } cfg = ctx->explicit ? svcc : avcc; count = gf_list_count(ctx->pps); for (i=0; i<count; i++) { GF_NALUFFParam *sl = gf_list_get(ctx->pps, i); if (!ctx->analyze) gf_list_add(cfg->pictureParameterSets, sl); } cfg = svcc; count = gf_list_count(ctx->pps_svc); for (i=0; i<count; i++) { GF_NALUFFParam *sl = gf_list_get(ctx->pps_svc, i); if (!ctx->analyze) gf_list_add(cfg->pictureParameterSets, sl); } *dsi = *dsi_enh = NULL; *dsi_size = *dsi_enh_size = 0; if (ctx->explicit) { gf_odf_avc_cfg_write(svcc, dsi, dsi_size); } else { gf_odf_avc_cfg_write(avcc, dsi, dsi_size); if (gf_list_count(svcc->sequenceParameterSets) || svcc->sequenceParameterSetExtensions) { gf_odf_avc_cfg_write(svcc, dsi_enh, dsi_enh_size); } } gf_list_reset(avcc->sequenceParameterSets); gf_list_reset(avcc->sequenceParameterSetExtensions); gf_list_reset(avcc->pictureParameterSets); gf_list_reset(svcc->sequenceParameterSets); gf_list_reset(svcc->sequenceParameterSetExtensions); gf_list_reset(svcc->pictureParameterSets); gf_odf_avc_cfg_del(avcc); gf_odf_avc_cfg_del(svcc); *max_width = max_w; *max_height = max_h; *max_enh_width = max_ew; *max_enh_height = max_eh; } static void naludmx_check_pid(GF_Filter *filter, GF_NALUDmxCtx *ctx) { u32 w, h, ew, eh; u8 *dsi, *dsi_enh; u32 dsi_size, dsi_enh_size; u32 crc_cfg, crc_cfg_enh; GF_Fraction sar; Bool has_hevc_base = GF_TRUE; Bool has_colr_info = GF_FALSE; if (ctx->analyze) { if (ctx->opid && !ctx->ps_modified) return; } else { if (!ctx->ps_modified) return; } ctx->ps_modified = GF_FALSE; dsi = dsi_enh = NULL; if (!ctx->timescale) { ctx->cur_fps = ctx->fps; if (!ctx->cur_fps.num || !ctx->cur_fps.den) { ctx->cur_fps.num = 25000; ctx->cur_fps.den = 1000; } } if (ctx->codecid==GF_CODECID_HEVC) { naludmx_create_hevc_decoder_config(ctx, &dsi, &dsi_size, &dsi_enh, &dsi_enh_size, &w, &h, &ew, &eh, &sar, &has_hevc_base); } else if (ctx->codecid==GF_CODECID_VVC) { naludmx_create_vvc_decoder_config(ctx, &dsi, &dsi_size, &dsi_enh, &dsi_enh_size, &w, &h, &ew, &eh, &sar, &has_hevc_base); } else { naludmx_create_avc_decoder_config(ctx, &dsi, &dsi_size, &dsi_enh, &dsi_enh_size, &w, &h, &ew, &eh, &sar); } crc_cfg = crc_cfg_enh = 0; if (dsi) crc_cfg = gf_crc_32(dsi, dsi_size); if (dsi_enh) crc_cfg_enh = gf_crc_32(dsi_enh, dsi_enh_size); if (!ctx->opid) { ctx->opid = gf_filter_pid_new(filter); naludmx_check_dur(filter, ctx); ctx->first_slice_in_au = GF_TRUE; } if ((ctx->crc_cfg == crc_cfg) && (ctx->crc_cfg_enh == crc_cfg_enh) && (ctx->width==w) && (ctx->height==h) && (ctx->sar.num * sar.den == ctx->sar.den * sar.num) ) { if (dsi) gf_free(dsi); if (dsi_enh) gf_free(dsi_enh); return; } naludmx_enqueue_or_dispatch(ctx, NULL, GF_TRUE); if (!ctx->analyze && (gf_list_count(ctx->pck_queue)>1)) { GF_LOG(dsi_enh ? GF_LOG_DEBUG : GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] xPS changed but could not flush frames before signaling state change %s\n", ctx->log_name, dsi_enh ? "- likely scalable xPS update" : "!")); } //copy properties at init or reconfig gf_filter_pid_copy_properties(ctx->opid, ctx->ipid); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_STREAM_TYPE, & PROP_UINT(GF_STREAM_VISUAL)); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_UNFRAMED, NULL); if (!gf_filter_pid_get_property(ctx->ipid, GF_PROP_PID_ID)) gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_ID, &PROP_UINT(1)); ctx->width = w; ctx->height = h; ctx->sar = sar; ctx->crc_cfg = crc_cfg; ctx->crc_cfg_enh = crc_cfg_enh; gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_WIDTH, & PROP_UINT( ctx->width)); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_HEIGHT, & PROP_UINT( ctx->height)); if (ew && eh) { gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_WIDTH_MAX, & PROP_UINT( ew )); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_HEIGHT_MAX, & PROP_UINT( eh )); } if (ctx->sar.den) gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_SAR, & PROP_FRAC(ctx->sar)); else gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_SAR, NULL); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_FPS, & PROP_FRAC(ctx->cur_fps)); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_TIMESCALE, & PROP_UINT(ctx->timescale ? ctx->timescale : ctx->cur_fps.num)); if (ctx->explicit || !has_hevc_base) { u32 enh_cid = GF_CODECID_SVC; if (ctx->codecid==GF_CODECID_HEVC) enh_cid = GF_CODECID_LHVC; gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_CODECID, & PROP_UINT(enh_cid)); if (dsi) gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_DECODER_CONFIG, &PROP_DATA_NO_COPY(dsi, dsi_size) ); } else { gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_CODECID, & PROP_UINT(ctx->codecid)); if (dsi) gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_DECODER_CONFIG, &PROP_DATA_NO_COPY(dsi, dsi_size) ); if (dsi_enh) gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_DECODER_CONFIG_ENHANCEMENT, &PROP_DATA_NO_COPY(dsi_enh, dsi_enh_size) ); } if (ctx->bitrate) { gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_BITRATE, & PROP_UINT(ctx->bitrate)); } if ((ctx->codecid==GF_CODECID_HEVC) && gf_list_count(ctx->vps) ) { GF_Err e = naludmx_set_hevc_oinf(ctx, NULL); if (e) { GF_LOG(GF_LOG_WARNING, GF_LOG_PARSER, ("[%s] Failed to create OINF chunk\n", ctx->log_name)); } naludmx_set_hevc_linf(ctx); } if (ctx->duration.num) gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_DURATION, & PROP_FRAC64(ctx->duration)); if (ctx->is_file /* && ctx->index*/) { gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_PLAYBACK_MODE, & PROP_UINT(GF_PLAYBACK_MODE_FASTFORWARD) ); } //set interlaced or remove interlaced property gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_INTERLACED, ctx->interlaced ? & PROP_UINT(GF_TRUE) : NULL); if (ctx->codecid==GF_CODECID_HEVC) { HEVC_SPS *sps = &ctx->hevc_state->sps[ctx->hevc_state->sps_active_idx]; if (sps->colour_description_present_flag) { gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_COLR_PRIMARIES, & PROP_UINT(sps->colour_primaries) ); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_COLR_TRANSFER, & PROP_UINT(sps->transfer_characteristic) ); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_COLR_MX, & PROP_UINT(sps->matrix_coeffs) ); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_COLR_RANGE, & PROP_BOOL(sps->video_full_range_flag) ); has_colr_info = GF_TRUE; } } else if (ctx->codecid==GF_CODECID_VVC) { } else { /*use the last active SPS*/ if (ctx->avc_state->sps[ctx->avc_state->sps_active_idx].vui_parameters_present_flag && ctx->avc_state->sps[ctx->avc_state->sps_active_idx].vui.colour_description_present_flag) { AVC_VUI *vui = &ctx->avc_state->sps[ctx->avc_state->sps_active_idx].vui; gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_COLR_PRIMARIES, & PROP_UINT(vui->colour_primaries) ); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_COLR_TRANSFER, & PROP_UINT(vui->transfer_characteristics) ); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_COLR_MX, & PROP_UINT(vui->matrix_coefficients) ); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_COLR_RANGE, & PROP_BOOL(vui->video_full_range_flag) ); has_colr_info = GF_TRUE; } } if (!has_colr_info) { gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_COLR_PRIMARIES, NULL); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_COLR_TRANSFER, NULL); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_COLR_MX, NULL); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_COLR_RANGE, NULL); } } static Bool naludmx_process_event(GF_Filter *filter, const GF_FilterEvent *evt) { u32 i; u64 file_pos = 0; GF_FilterEvent fevt; GF_NALUDmxCtx *ctx = gf_filter_get_udta(filter); switch (evt->base.type) { case GF_FEVT_PLAY: if (!ctx->is_playing) { ctx->is_playing = GF_TRUE; ctx->cts = ctx->dts = 0; } if (! ctx->is_file) { if (!ctx->initial_play_done) { ctx->initial_play_done = GF_TRUE; if (evt->play.start_range<0.1) return GF_FALSE; } ctx->resume_from = 0; ctx->nal_store_size = 0; return GF_FALSE; } if (ctx->start_range && (ctx->index<0)) { ctx->index = -ctx->index; ctx->file_loaded = GF_FALSE; ctx->duration.den = ctx->duration.num = 0; GF_LOG(GF_LOG_INFO, GF_LOG_PARSER, ("[%s] Play request from %d, building index\n", ctx->log_name, ctx->start_range)); naludmx_check_dur(filter, ctx); } ctx->start_range = evt->play.start_range; ctx->in_seek = GF_TRUE; if (ctx->start_range) { ctx->nb_nalus = ctx->nb_i = ctx->nb_p = ctx->nb_b = ctx->nb_sp = ctx->nb_si = ctx->nb_sei = ctx->nb_idr = 0; for (i=1; i<ctx->index_size; i++) { if (ctx->indexes[i].duration>ctx->start_range) { ctx->cts = ctx->dts = (u64) (ctx->indexes[i-1].duration * ctx->cur_fps.num); file_pos = ctx->indexes[i-1].pos; break; } } } if (!ctx->initial_play_done) { ctx->initial_play_done = GF_TRUE; //seek will not change the current source state, don't send a seek if (!file_pos) { //very short streams, input is done before we get notified for play and everything stored in memory: flush if (gf_filter_pid_is_eos(ctx->ipid) && (ctx->nal_store_size)) { gf_filter_post_process_task(filter); } return GF_TRUE; } } ctx->nb_frames = 0; ctx->nb_nalus = 0; ctx->resume_from = 0; ctx->nal_store_size = 0; //post a seek GF_FEVT_INIT(fevt, GF_FEVT_SOURCE_SEEK, ctx->ipid); fevt.seek.start_offset = file_pos; gf_filter_pid_send_event(ctx->ipid, &fevt); //cancel event return GF_TRUE; case GF_FEVT_STOP: //don't cancel event ctx->is_playing = GF_FALSE; ctx->nal_store_size = 0; ctx->resume_from = 0; return GF_FALSE; case GF_FEVT_SET_SPEED: //cancel event return GF_TRUE; default: break; } //by default don't cancel event - to rework once we have downloading in place return GF_FALSE; } static GFINLINE void naludmx_update_time(GF_NALUDmxCtx *ctx) { assert(ctx->cur_fps.num); if (ctx->timescale) { //very first frame, no dts diff, assume 3000/90k. It should only hurt if we have several frames packet in the first packet sent u64 dts_inc = ctx->cur_fps.den ? ctx->cur_fps.den : 3000; ctx->cts += dts_inc; ctx->dts += dts_inc; } else { assert(ctx->cur_fps.den); ctx->cts += ctx->cur_fps.den; ctx->dts += ctx->cur_fps.den; } } static void naludmx_queue_param_set(GF_NALUDmxCtx *ctx, char *data, u32 size, u32 ps_type, s32 ps_id) { GF_List *list = NULL, *alt_list = NULL; GF_NALUFFParam *sl; u32 i, count; u32 crc = gf_crc_32(data, size); if (ctx->codecid==GF_CODECID_HEVC) { switch (ps_type) { case GF_HEVC_NALU_VID_PARAM: if (!ctx->vps) ctx->vps = gf_list_new(); list = ctx->vps; break; case GF_HEVC_NALU_SEQ_PARAM: list = ctx->sps; break; case GF_HEVC_NALU_PIC_PARAM: list = ctx->pps; break; default: assert(0); return; } } else if (ctx->codecid==GF_CODECID_VVC) { switch (ps_type) { case GF_VVC_NALU_VID_PARAM: if (!ctx->vps) ctx->vps = gf_list_new(); list = ctx->vps; break; case GF_VVC_NALU_SEQ_PARAM: list = ctx->sps; break; case GF_VVC_NALU_PIC_PARAM: list = ctx->pps; break; case GF_VVC_NALU_DEC_PARAM: if (!ctx->vvc_dci) ctx->vvc_dci = gf_list_new(); list = ctx->vvc_dci; break; case GF_VVC_NALU_APS_PREFIX: if (!ctx->vvc_aps_pre) ctx->vvc_aps_pre = gf_list_new(); list = ctx->vvc_aps_pre; break; default: assert(0); return; } } else { switch (ps_type) { case GF_AVC_NALU_SVC_SUBSEQ_PARAM: case GF_AVC_NALU_SEQ_PARAM: list = ctx->sps; break; case GF_AVC_NALU_PIC_PARAM: list = ctx->pps; alt_list = ctx->pps_svc; break; case GF_AVC_NALU_SEQ_PARAM_EXT: if (!ctx->sps_ext) ctx->sps_ext = gf_list_new(); list = ctx->sps_ext; break; default: assert(0); return; } } sl = NULL; count = gf_list_count(list); for (i=0; i<count; i++) { sl = gf_list_get(list, i); if (sl->id != ps_id) { sl = NULL; continue; } //same ID, same CRC, we don't change our state if (sl->crc == crc) return; break; } //handle alt PPS list for SVC if (!sl && alt_list) { count = gf_list_count(alt_list); for (i=0; i<count; i++) { sl = gf_list_get(alt_list, i); if (sl->id != ps_id) { sl = NULL; continue; } //same ID, same CRC, we don't change our state if (sl->crc == crc) return; break; } } if (sl) { //otherwise we keep this new param set sl->data = gf_realloc(sl->data, size); memcpy(sl->data, data, size); sl->size = size; sl->crc = crc; ctx->ps_modified = GF_TRUE; return; } //TODO we might want to purge the list after a while !! GF_SAFEALLOC(sl, GF_NALUFFParam); if (!sl) return; sl->data = gf_malloc(sizeof(char) * size); if (!sl->data) { gf_free(sl); return; } memcpy(sl->data, data, size); sl->size = size; sl->id = ps_id; sl->crc = crc; ctx->ps_modified = GF_TRUE; gf_list_add(list, sl); } static void naludmx_finalize_au_flags(GF_NALUDmxCtx *ctx) { u64 ts; Bool is_rap = GF_FALSE; if (!ctx->first_pck_in_au) return; if (ctx->au_sap) { gf_filter_pck_set_sap(ctx->first_pck_in_au, ctx->au_sap); if (ctx->au_sap == GF_FILTER_SAP_1) { ctx->dts_last_IDR = gf_filter_pck_get_dts(ctx->first_pck_in_au); if (ctx->is_paff) ctx->dts_last_IDR *= 2; } if (ctx->au_sap <= GF_FILTER_SAP_3) { is_rap = GF_TRUE; } } else if (ctx->has_islice && ctx->force_sync && (ctx->sei_recovery_frame_count==0)) { gf_filter_pck_set_sap(ctx->first_pck_in_au, GF_FILTER_SAP_1); if (!ctx->use_opengop_gdr) { ctx->use_opengop_gdr = 1; GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[%s] Forcing non-IDR samples with I slices to be marked as sync points - resulting file will not be ISOBMFF conformant\n", ctx->log_name)); } is_rap = GF_TRUE; } /*set roll info sampleGroups info*/ else if (!ctx->au_sap && ( (ctx->sei_recovery_frame_count >= 0) || ctx->has_islice) ) { /*generic GDR*/ if (ctx->sei_recovery_frame_count > 0) { if (!ctx->use_opengop_gdr) ctx->use_opengop_gdr = 1; gf_filter_pck_set_sap(ctx->first_pck_in_au, GF_FILTER_SAP_4); gf_filter_pck_set_roll_info(ctx->first_pck_in_au, ctx->sei_recovery_frame_count); } /*open-GOP*/ else if ((ctx->sei_recovery_frame_count == 0) && ctx->has_islice) { if (!ctx->use_opengop_gdr) ctx->use_opengop_gdr = 2; gf_filter_pck_set_sap(ctx->first_pck_in_au, GF_FILTER_SAP_3); is_rap = GF_TRUE; } } if (ctx->is_paff) { gf_filter_pck_set_interlaced(ctx->first_pck_in_au, ctx->bottom_field_flag ? 2 : 1); } //if TS is set, the packet was the first in AU in the input timed packet (eg PES), we reuse the input timing ts = gf_filter_pck_get_cts(ctx->first_pck_in_au); if (ts == GF_FILTER_NO_TS) { /*we store the POC (last POC minus the poc shift) as the CTS offset and re-update the CTS when dispatching*/ assert(ctx->last_poc >= ctx->poc_shift); gf_filter_pck_set_cts(ctx->first_pck_in_au, CTS_POC_OFFSET_SAFETY + ctx->last_poc - ctx->poc_shift); //we use the carousel flag temporarly to indicate the cts must be recomputed gf_filter_pck_set_carousel_version(ctx->first_pck_in_au, 1); } if (ctx->subsamp_buffer_size) { gf_filter_pck_set_property(ctx->first_pck_in_au, GF_PROP_PCK_SUBS, &PROP_DATA(ctx->subsamp_buffer, ctx->subsamp_buffer_size) ); ctx->subsamp_buffer_size = 0; ctx->subs_mapped_bytes = 0; } if (ctx->deps) { u8 flags = 0; //dependsOn flags = (is_rap) ? 2 : 1; flags <<= 2; //dependedOn flags |= ctx->has_ref_slices ? 1 : 2; flags <<= 2; //hasRedundant flags |= ctx->has_redundant ? 1 : 2; gf_filter_pck_set_dependency_flags(ctx->first_pck_in_au, flags); } ctx->has_ref_slices = GF_FALSE; ctx->has_redundant = GF_FALSE; //if we reuse input packets timing, we can dispatch asap. //otherwise if poc probe is done (we know the min_poc_diff between images) and we are not in strict mode, dispatch asap //otherwise we will need to wait for the next ref frame to make sure we know all pocs ... if (ctx->timescale || (!ctx->strict_poc && ctx->poc_probe_done) ) naludmx_enqueue_or_dispatch(ctx, NULL, GF_TRUE); ctx->first_pck_in_au = NULL; } static void naludmx_update_nalu_maxsize(GF_NALUDmxCtx *ctx, u32 size) { if (ctx->max_nalu_size < size) { ctx->max_nalu_size = size; if (size > ctx->max_nalu_size_allowed) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] nal size %d larger than max allowed size %d - change import settings\n", ctx->log_name, size, ctx->max_nalu_size_allowed )); } } } GF_Err naludmx_realloc_last_pck(GF_NALUDmxCtx *ctx, u32 nb_bytes_to_add, u8 **data_ptr) { GF_Err e; u8 *pck_data; u32 full_size; GF_FilterPacket *pck = gf_list_last(ctx->pck_queue); *data_ptr = NULL; if (!pck) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] attempt to reallocate a non-existing packet!\n", ctx->log_name)); return GF_SERVICE_ERROR; } e = gf_filter_pck_expand(pck, nb_bytes_to_add, &pck_data, data_ptr, &full_size); if (e) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Failed to reallocate packet buffer: %s\n", ctx->log_name, gf_error_to_string(e) )); return e; } assert(ctx->bs_w); //rewrite NALU size length full_size -= ctx->nal_length; gf_bs_reassign_buffer(ctx->bs_w, pck_data, ctx->nal_length); gf_bs_write_int(ctx->bs_w, full_size, 8*ctx->nal_length); naludmx_update_nalu_maxsize(ctx, full_size); //rewrite subsample size if (ctx->subsamples) { assert(ctx->subsamp_buffer_size>=14); //reassign to beginning of size field (after first u32 flags) gf_bs_reassign_buffer(ctx->bs_w, ctx->subsamp_buffer + ctx->subsamp_buffer_size-14 + 4, 14 - 4); gf_bs_write_u32(ctx->bs_w, full_size + ctx->nal_length); } return GF_OK; } GF_FilterPacket *naludmx_start_nalu(GF_NALUDmxCtx *ctx, u32 nal_size, Bool skip_nal_field, Bool *au_start, u8 **pck_data) { GF_FilterPacket *dst_pck = gf_filter_pck_new_alloc(ctx->opid, nal_size + (skip_nal_field ? 0 : ctx->nal_length), pck_data); if (!dst_pck) return NULL; if (!skip_nal_field) { if (!ctx->bs_w) ctx->bs_w = gf_bs_new(*pck_data, ctx->nal_length, GF_BITSTREAM_WRITE); else gf_bs_reassign_buffer(ctx->bs_w, *pck_data, ctx->nal_length); gf_bs_write_int(ctx->bs_w, nal_size, 8*ctx->nal_length); } if (*au_start) { ctx->first_pck_in_au = dst_pck; if (ctx->src_pck) gf_filter_pck_merge_properties(ctx->src_pck, dst_pck); gf_filter_pck_set_framing(dst_pck, GF_TRUE, GF_FALSE); //we reuse the timing of the input packet for the first nal of the first frame starting in this packet if (ctx->input_is_au_start) { ctx->input_is_au_start = GF_FALSE; gf_filter_pck_set_dts(dst_pck, ctx->dts); gf_filter_pck_set_cts(dst_pck, ctx->cts); } else { //we don't set the CTS, it will be set once we detect frame end gf_filter_pck_set_dts(dst_pck, ctx->dts); } //we use the carousel flag temporarly to indicate the cts must be recomputed gf_filter_pck_set_carousel_version(dst_pck, ctx->timescale ? 0 : 1); gf_filter_pck_set_duration(dst_pck, ctx->pck_duration ? ctx->pck_duration : ctx->cur_fps.den); if (ctx->in_seek) gf_filter_pck_set_seek_flag(dst_pck, GF_TRUE); naludmx_update_time(ctx); *au_start = GF_FALSE; ctx->nb_frames++; } else { gf_filter_pck_set_framing(dst_pck, GF_FALSE, GF_FALSE); } naludmx_update_nalu_maxsize(ctx, nal_size); naludmx_enqueue_or_dispatch(ctx, dst_pck, GF_FALSE); return dst_pck; } void naludmx_add_subsample(GF_NALUDmxCtx *ctx, u32 subs_size, u8 subs_priority, u32 subs_reserved) { if (ctx->subsamp_buffer_alloc < ctx->subsamp_buffer_size+14 ) { ctx->subsamp_buffer_alloc = ctx->subsamp_buffer_size+14; ctx->subsamp_buffer = gf_realloc(ctx->subsamp_buffer, ctx->subsamp_buffer_alloc); } assert(ctx->subsamp_buffer); gf_bs_reassign_buffer(ctx->bs_w, ctx->subsamp_buffer + ctx->subsamp_buffer_size, 14); gf_bs_write_u32(ctx->bs_w, 0); //flags gf_bs_write_u32(ctx->bs_w, subs_size + ctx->nal_length); gf_bs_write_u32(ctx->bs_w, subs_reserved); //reserved gf_bs_write_u8(ctx->bs_w, subs_priority); //priority gf_bs_write_u8(ctx->bs_w, 0); //discardable - todo ctx->subsamp_buffer_size += 14; ctx->subs_mapped_bytes += subs_size + ctx->nal_length; } static s32 naludmx_parse_nal_hevc(GF_NALUDmxCtx *ctx, char *data, u32 size, Bool *skip_nal, Bool *is_slice, Bool *is_islice) { #ifdef GPAC_DISABLE_HEVC return -1; #else s32 ps_idx = 0; s32 res; u8 nal_unit_type, temporal_id, layer_id; *skip_nal = GF_FALSE; gf_bs_reassign_buffer(ctx->bs_r, data, size); res = gf_hevc_parse_nalu_bs(ctx->bs_r, ctx->hevc_state, &nal_unit_type, &temporal_id, &layer_id); ctx->nb_nalus++; if (res < 0) { if (res == -1) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Warning: Error parsing NAL unit\n", ctx->log_name)); } *skip_nal = GF_TRUE; } if (layer_id && ctx->nosvc) { *skip_nal = GF_TRUE; return 0; } switch (nal_unit_type) { case GF_HEVC_NALU_VID_PARAM: if (ctx->novpsext) { //this may modify nal_size, but we don't use it for bitstream reading ps_idx = gf_hevc_read_vps_ex(data, &size, ctx->hevc_state, GF_TRUE); } else { ps_idx = ctx->hevc_state->last_parsed_vps_id; } if (ps_idx<0) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Error parsing Video Param Set\n", ctx->log_name)); } else { naludmx_queue_param_set(ctx, data, size, GF_HEVC_NALU_VID_PARAM, ps_idx); } *skip_nal = GF_TRUE; break; case GF_HEVC_NALU_SEQ_PARAM: ps_idx = ctx->hevc_state->last_parsed_sps_id; if (ps_idx<0) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Error parsing Sequence Param Set\n", ctx->log_name)); } else { naludmx_queue_param_set(ctx, data, size, GF_HEVC_NALU_SEQ_PARAM, ps_idx); } *skip_nal = GF_TRUE; break; case GF_HEVC_NALU_PIC_PARAM: ps_idx = ctx->hevc_state->last_parsed_pps_id; if (ps_idx<0) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Error parsing Picture Param Set\n", ctx->log_name)); } else { naludmx_queue_param_set(ctx, data, size, GF_HEVC_NALU_PIC_PARAM, ps_idx); } *skip_nal = GF_TRUE; break; case GF_HEVC_NALU_SEI_PREFIX: gf_hevc_parse_sei(data, size, ctx->hevc_state); if (!ctx->nosei) { ctx->nb_sei++; if (ctx->sei_buffer_alloc < ctx->sei_buffer_size + size + ctx->nal_length) { ctx->sei_buffer_alloc = ctx->sei_buffer_size + size + ctx->nal_length; ctx->sei_buffer = gf_realloc(ctx->sei_buffer, ctx->sei_buffer_alloc); } if (!ctx->bs_w) ctx->bs_w = gf_bs_new(ctx->sei_buffer + ctx->sei_buffer_size, ctx->nal_length + size, GF_BITSTREAM_WRITE); else gf_bs_reassign_buffer(ctx->bs_w, ctx->sei_buffer + ctx->sei_buffer_size, ctx->nal_length + size); gf_bs_write_int(ctx->bs_w, size, 8*ctx->nal_length); memcpy(ctx->sei_buffer + ctx->sei_buffer_size + ctx->nal_length, data, size); ctx->sei_buffer_size += size + ctx->nal_length; } else { ctx->nb_nalus--; } *skip_nal = GF_TRUE; break; case GF_HEVC_NALU_SEI_SUFFIX: if (! ctx->is_playing) return 0; if (ctx->nosei) { *skip_nal = GF_TRUE; ctx->nb_nalus--; } else { ctx->nb_sei++; } break; /*slice_segment_layer_rbsp*/ case GF_HEVC_NALU_SLICE_STSA_N: case GF_HEVC_NALU_SLICE_STSA_R: case GF_HEVC_NALU_SLICE_RADL_R: case GF_HEVC_NALU_SLICE_RASL_R: case GF_HEVC_NALU_SLICE_RADL_N: case GF_HEVC_NALU_SLICE_RASL_N: case GF_HEVC_NALU_SLICE_TRAIL_N: case GF_HEVC_NALU_SLICE_TRAIL_R: case GF_HEVC_NALU_SLICE_TSA_N: case GF_HEVC_NALU_SLICE_TSA_R: case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_N_LP: case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: case GF_HEVC_NALU_SLICE_CRA: if (! ctx->is_playing) return 0; *is_slice = GF_TRUE; if (! *skip_nal) { switch (ctx->hevc_state->s_info.slice_type) { case GF_HEVC_SLICE_TYPE_P: if (layer_id) ctx->nb_e_p++; else ctx->nb_p++; break; case GF_HEVC_SLICE_TYPE_I: if (layer_id) ctx->nb_e_i++; else ctx->nb_i++; *is_islice = GF_TRUE; break; case GF_HEVC_SLICE_TYPE_B: if (layer_id) ctx->nb_e_b++; else ctx->nb_b++; break; } } break; case GF_HEVC_NALU_ACCESS_UNIT: ctx->nb_aud++; if (!ctx->audelim) { *skip_nal = GF_TRUE; } else if (!ctx->opid) { ctx->has_initial_aud = GF_TRUE; memcpy(ctx->init_aud, data, 3); } break; /*remove*/ case GF_HEVC_NALU_FILLER_DATA: case GF_HEVC_NALU_END_OF_SEQ: case GF_HEVC_NALU_END_OF_STREAM: *skip_nal = GF_TRUE; break; //parsing is partial, see https://github.com/DolbyLaboratories/dlb_mp4base/blob/70a2e1d4d99a8439b7b8087bf50dd503eeea2291/src/esparser/parser_hevc.c#L1233 case GF_HEVC_NALU_DV_RPU: ctx->hevc_state->dv_rpu = GF_TRUE; break; case GF_HEVC_NALU_DV_EL: ctx->hevc_state->dv_el = GF_TRUE; break; default: if (! ctx->is_playing) return 0; GF_LOG(GF_LOG_WARNING, GF_LOG_PARSER, ("[%s] NAL Unit type %d not handled - adding\n", ctx->log_name, nal_unit_type)); break; } if (*skip_nal) return res; ctx->linf[layer_id].layer_id_plus_one = layer_id + 1; if (! ctx->linf[layer_id].max_temporal_id ) ctx->linf[layer_id].max_temporal_id = temporal_id; else if (ctx->linf[layer_id].max_temporal_id < temporal_id) ctx->linf[layer_id].max_temporal_id = temporal_id; if (! ctx->linf[layer_id].min_temporal_id ) ctx->linf[layer_id].min_temporal_id = temporal_id; else if (ctx->linf[layer_id].min_temporal_id > temporal_id) ctx->linf[layer_id].min_temporal_id = temporal_id; if (ctx->max_temporal_id[layer_id] < temporal_id) ctx->max_temporal_id[layer_id] = temporal_id; if (ctx->min_layer_id > layer_id) ctx->min_layer_id = layer_id; return res; #endif // GPAC_DISABLE_HEVC } static s32 naludmx_parse_nal_vvc(GF_NALUDmxCtx *ctx, char *data, u32 size, Bool *skip_nal, Bool *is_slice, Bool *is_islice) { s32 ps_idx = 0; s32 res; u8 nal_unit_type, temporal_id, layer_id; *skip_nal = GF_FALSE; gf_bs_reassign_buffer(ctx->bs_r, data, size); res = gf_media_vvc_parse_nalu_bs(ctx->bs_r, ctx->vvc_state, &nal_unit_type, &temporal_id, &layer_id); ctx->nb_nalus++; if (res < 0) { if (res == -1) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Warning: Error parsing NAL unit\n", ctx->log_name)); } *skip_nal = GF_TRUE; } if (layer_id && ctx->nosvc) { *skip_nal = GF_TRUE; return 0; } switch (nal_unit_type) { case GF_VVC_NALU_VID_PARAM: if (ctx->novpsext) { //this may modify nal_size, but we don't use it for bitstream reading // ps_idx = gf_hevc_read_vps_ex(data, &size, ctx->hevc_state, GF_TRUE); ps_idx = ctx->vvc_state->last_parsed_vps_id; } else { ps_idx = ctx->vvc_state->last_parsed_vps_id; } if (ps_idx<0) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Error parsing Video Param Set\n", ctx->log_name)); } else { naludmx_queue_param_set(ctx, data, size, GF_VVC_NALU_VID_PARAM, ps_idx); } *skip_nal = GF_TRUE; break; case GF_VVC_NALU_SEQ_PARAM: ps_idx = ctx->vvc_state->last_parsed_sps_id; if (ps_idx<0) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Error parsing Sequence Param Set\n", ctx->log_name)); } else { naludmx_queue_param_set(ctx, data, size, GF_VVC_NALU_SEQ_PARAM, ps_idx); } *skip_nal = GF_TRUE; break; case GF_VVC_NALU_PIC_PARAM: ps_idx = ctx->vvc_state->last_parsed_pps_id; if (ps_idx<0) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Error parsing Picture Param Set\n", ctx->log_name)); } else { naludmx_queue_param_set(ctx, data, size, GF_VVC_NALU_PIC_PARAM, ps_idx); } *skip_nal = GF_TRUE; break; case GF_VVC_NALU_DEC_PARAM: ps_idx = 0; naludmx_queue_param_set(ctx, data, size, GF_VVC_NALU_DEC_PARAM, ps_idx); *skip_nal = GF_TRUE; break; case GF_VVC_NALU_APS_PREFIX: //for now we keep APS in the stream #if 0 ps_idx = ctx->vvc_state->last_parsed_aps_id; if (ps_idx<0) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Error parsing Decoder Param Set\n", ctx->log_name)); } else { naludmx_queue_param_set(ctx, data, size, GF_VVC_NALU_APS_PREFIX, ps_idx); } *skip_nal = GF_TRUE; #endif break; case GF_VVC_NALU_SEI_PREFIX: gf_media_vvc_parse_sei(data, size, ctx->vvc_state); if (!ctx->nosei) { ctx->nb_sei++; if (ctx->sei_buffer_alloc < ctx->sei_buffer_size + size + ctx->nal_length) { ctx->sei_buffer_alloc = ctx->sei_buffer_size + size + ctx->nal_length; ctx->sei_buffer = gf_realloc(ctx->sei_buffer, ctx->sei_buffer_alloc); } if (!ctx->bs_w) ctx->bs_w = gf_bs_new(ctx->sei_buffer + ctx->sei_buffer_size, ctx->nal_length + size, GF_BITSTREAM_WRITE); else gf_bs_reassign_buffer(ctx->bs_w, ctx->sei_buffer + ctx->sei_buffer_size, ctx->nal_length + size); gf_bs_write_int(ctx->bs_w, size, 8*ctx->nal_length); memcpy(ctx->sei_buffer + ctx->sei_buffer_size + ctx->nal_length, data, size); ctx->sei_buffer_size += size + ctx->nal_length; } else { ctx->nb_nalus--; } *skip_nal = GF_TRUE; break; case GF_VVC_NALU_SEI_SUFFIX: if (! ctx->is_playing) return 0; if (ctx->nosei) { *skip_nal = GF_TRUE; ctx->nb_nalus--; } else { ctx->nb_sei++; } break; case GF_VVC_NALU_PIC_HEADER: if (! ctx->is_playing) return 0; break; /*slice_segment_layer_rbsp*/ case GF_VVC_NALU_SLICE_TRAIL: case GF_VVC_NALU_SLICE_STSA: case GF_VVC_NALU_SLICE_RADL: case GF_VVC_NALU_SLICE_RASL: case GF_VVC_NALU_SLICE_IDR_W_RADL: case GF_VVC_NALU_SLICE_IDR_N_LP: case GF_VVC_NALU_SLICE_CRA: case GF_VVC_NALU_SLICE_GDR: if (! ctx->is_playing) return 0; *is_slice = GF_TRUE; if (! *skip_nal) { switch (ctx->vvc_state->s_info.slice_type) { case GF_VVC_SLICE_TYPE_P: if (layer_id) ctx->nb_e_p++; else ctx->nb_p++; break; case GF_VVC_SLICE_TYPE_I: if (layer_id) ctx->nb_e_i++; else ctx->nb_i++; *is_islice = GF_TRUE; break; case GF_VVC_SLICE_TYPE_B: if (layer_id) ctx->nb_e_b++; else ctx->nb_b++; break; case GF_VVC_SLICE_TYPE_UNKNOWN: ctx->vvc_no_stats = GF_TRUE; break; } } break; case GF_VVC_NALU_ACCESS_UNIT: ctx->nb_aud++; //no skip AUD in VVC if (!ctx->opid) { ctx->has_initial_aud = GF_TRUE; memcpy(ctx->init_aud, data, 3); } break; /*remove*/ case GF_VVC_NALU_FILLER_DATA: case GF_VVC_NALU_END_OF_SEQ: case GF_VVC_NALU_END_OF_STREAM: *skip_nal = GF_TRUE; break; case GF_VVC_NALU_OPI: if (! ctx->is_playing) return 0; break; default: if (! ctx->is_playing) return 0; GF_LOG(GF_LOG_WARNING, GF_LOG_PARSER, ("[%s] NAL Unit type %d not handled - adding\n", ctx->log_name, nal_unit_type)); break; } if (*skip_nal) return res; ctx->linf[layer_id].layer_id_plus_one = layer_id + 1; if (! ctx->linf[layer_id].max_temporal_id ) ctx->linf[layer_id].max_temporal_id = temporal_id; else if (ctx->linf[layer_id].max_temporal_id < temporal_id) ctx->linf[layer_id].max_temporal_id = temporal_id; if (! ctx->linf[layer_id].min_temporal_id ) ctx->linf[layer_id].min_temporal_id = temporal_id; else if (ctx->linf[layer_id].min_temporal_id > temporal_id) ctx->linf[layer_id].min_temporal_id = temporal_id; if (ctx->max_temporal_id[layer_id] < temporal_id) ctx->max_temporal_id[layer_id] = temporal_id; if (ctx->min_layer_id > layer_id) ctx->min_layer_id = layer_id; return res; } static s32 naludmx_parse_nal_avc(GF_NALUDmxCtx *ctx, char *data, u32 size, u32 nal_type, Bool *skip_nal, Bool *is_slice, Bool *is_islice) { s32 ps_idx = 0; s32 res = 0; gf_bs_reassign_buffer(ctx->bs_r, data, size); *skip_nal = GF_FALSE; res = gf_avc_parse_nalu(ctx->bs_r, ctx->avc_state); if (res < 0) { if (res == -1) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Warning: Error parsing NAL unit\n", ctx->log_name)); } *skip_nal = GF_TRUE; } ctx->nb_nalus++; switch (nal_type) { case GF_AVC_NALU_SVC_SUBSEQ_PARAM: case GF_AVC_NALU_SEQ_PARAM: ps_idx = ctx->avc_state->last_ps_idx; if (ps_idx<0) { if (ctx->avc_state->sps[0].profile_idc) { GF_LOG(ctx->avc_state->sps[0].profile_idc ? GF_LOG_WARNING : GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Error parsing Sequence Param Set\n", ctx->log_name)); } } else { naludmx_queue_param_set(ctx, data, size, GF_AVC_NALU_SEQ_PARAM, ps_idx); } *skip_nal = GF_TRUE; return 0; case GF_AVC_NALU_PIC_PARAM: ps_idx = ctx->avc_state->last_ps_idx; if (ps_idx<0) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Error parsing Picture Param Set\n", ctx->log_name)); } else { naludmx_queue_param_set(ctx, data, size, GF_AVC_NALU_PIC_PARAM, ps_idx); } *skip_nal = GF_TRUE; return 0; case GF_AVC_NALU_SEQ_PARAM_EXT: ps_idx = ctx->avc_state->last_ps_idx; if (ps_idx<0) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Error parsing Sequence Param Set Extension\n", ctx->log_name)); } else { naludmx_queue_param_set(ctx, data, size, GF_AVC_NALU_SEQ_PARAM_EXT, ps_idx); } *skip_nal = GF_TRUE; return 0; case GF_AVC_NALU_SEI: if (ctx->avc_state->sps_active_idx != -1) { u32 rw_sei_size, sei_size = size; if (ctx->sei_buffer_alloc < ctx->sei_buffer_size + sei_size + ctx->nal_length) { ctx->sei_buffer_alloc = ctx->sei_buffer_size + sei_size + ctx->nal_length; ctx->sei_buffer = gf_realloc(ctx->sei_buffer, ctx->sei_buffer_alloc); } if (!ctx->bs_w) ctx->bs_w = gf_bs_new(ctx->sei_buffer + ctx->sei_buffer_size, ctx->nal_length + sei_size, GF_BITSTREAM_WRITE); else gf_bs_reassign_buffer(ctx->bs_w, ctx->sei_buffer + ctx->sei_buffer_size, ctx->nal_length + sei_size); gf_bs_write_int(ctx->bs_w, sei_size, 8*ctx->nal_length); memcpy(ctx->sei_buffer + ctx->sei_buffer_size + ctx->nal_length, data, sei_size); rw_sei_size = gf_media_avc_reformat_sei(ctx->sei_buffer + ctx->sei_buffer_size + ctx->nal_length, sei_size, ctx->seirw, ctx->avc_state); if (rw_sei_size < sei_size) { gf_bs_seek(ctx->bs_w, 0); gf_bs_write_int(ctx->bs_w, rw_sei_size, 8*ctx->nal_length); } *skip_nal = GF_TRUE; ctx->sei_buffer_size += rw_sei_size + ctx->nal_length; if (ctx->nosei) { ctx->sei_buffer_size = 0; } else { ctx->nb_sei++; } } return 0; case GF_AVC_NALU_ACCESS_UNIT: ctx->nb_aud++; if (!ctx->audelim) { *skip_nal = GF_TRUE; } else if (!ctx->opid) { ctx->has_initial_aud = GF_TRUE; memcpy(ctx->init_aud, data, 2); } return 1; /*remove*/ case GF_AVC_NALU_FILLER_DATA: case GF_AVC_NALU_END_OF_SEQ: case GF_AVC_NALU_END_OF_STREAM: *skip_nal = GF_TRUE; return 0; //update stats case GF_AVC_NALU_NON_IDR_SLICE: case GF_AVC_NALU_DP_A_SLICE: case GF_AVC_NALU_DP_B_SLICE: case GF_AVC_NALU_DP_C_SLICE: case GF_AVC_NALU_IDR_SLICE: *is_slice = GF_TRUE; switch (ctx->avc_state->s_info.slice_type) { case GF_AVC_TYPE_P: case GF_AVC_TYPE2_P: ctx->nb_p++; break; case GF_AVC_TYPE_I: case GF_AVC_TYPE2_I: ctx->nb_i++; *is_islice = GF_TRUE; break; case GF_AVC_TYPE_B: case GF_AVC_TYPE2_B: ctx->nb_b++; break; case GF_AVC_TYPE_SP: case GF_AVC_TYPE2_SP: ctx->nb_sp++; break; case GF_AVC_TYPE_SI: case GF_AVC_TYPE2_SI: ctx->nb_si++; break; } break; case GF_AVC_NALU_SVC_SLICE: if (!ctx->explicit) { u32 i; for (i = 0; i < gf_list_count(ctx->pps); i ++) { GF_NALUFFParam *slc = (GF_NALUFFParam*)gf_list_get(ctx->pps, i); if (ctx->avc_state->s_info.pps && ctx->avc_state->s_info.pps->id == slc->id) { /* This PPS is used by an SVC NAL unit, it should be moved to the SVC Config Record) */ gf_list_rem(ctx->pps, i); i--; if (!ctx->pps_svc) ctx->pps_svc = gf_list_new(ctx->pps_svc); gf_list_add(ctx->pps_svc, slc); ctx->ps_modified = GF_TRUE; } } } *is_slice = GF_TRUE; //we disable temporal scalability when parsing mvc - never used and many encoders screw up POC in enhancemen if (ctx->is_mvc && (res>=0)) { res=0; ctx->avc_state->s_info.poc = ctx->last_poc; } if (ctx->avc_state->s_info.sps) { switch (ctx->avc_state->s_info.slice_type) { case GF_AVC_TYPE_P: case GF_AVC_TYPE2_P: ctx->avc_state->s_info.sps->nb_ep++; break; case GF_AVC_TYPE_I: case GF_AVC_TYPE2_I: ctx->avc_state->s_info.sps->nb_ei++; break; case GF_AVC_TYPE_B: case GF_AVC_TYPE2_B: ctx->avc_state->s_info.sps->nb_eb++; break; } } break; case GF_AVC_NALU_SLICE_AUX: *is_slice = GF_TRUE; break; } return res; } static void naldmx_switch_timestamps(GF_NALUDmxCtx *ctx, GF_FilterPacket *pck) { //input pid sets some timescale - we flushed pending data , update cts if (ctx->timescale) { u64 ts = gf_filter_pck_get_cts(pck); if (ts != GF_FILTER_NO_TS) { ctx->prev_cts = ctx->cts; ctx->cts = ts; } ts = gf_filter_pck_get_dts(pck); if (ts != GF_FILTER_NO_TS) { if (ctx->full_au_source) { ctx->prev_dts = ctx->dts; ctx->dts = ts; } else { GF_FilterClockType ck_type = gf_filter_pid_get_clock_info(ctx->ipid, NULL, NULL); if (ck_type==GF_FILTER_CLOCK_PCR_DISC) ctx->dts = ts; else if (ctx->dts<ts) ctx->dts=ts; if (!ctx->prev_dts) ctx->prev_dts = ts; else if (ctx->prev_dts != ts) { u64 diff = ts; diff -= ctx->prev_dts; if (!ctx->cur_fps.den) ctx->cur_fps.den = (u32) diff; else if (ctx->cur_fps.den > diff) ctx->cur_fps.den = (u32) diff; ctx->prev_dts = ts; } } } ctx->pck_duration = gf_filter_pck_get_duration(pck); if (ctx->src_pck) gf_filter_pck_unref(ctx->src_pck); ctx->src_pck = pck; gf_filter_pck_ref_props(&ctx->src_pck); //store framing flags. If input_is_au_start, the first NAL of the first frame beginning in this packet will //use the DTS/CTS of the input packet, otherwise we will use our internal POC recompute gf_filter_pck_get_framing(pck, &ctx->input_is_au_start, NULL); } } static void naldmx_check_timestamp_switch(GF_NALUDmxCtx *ctx, u32 *nalu_store_before, u32 bytes_drop, Bool *drop_packet, GF_FilterPacket *pck) { if (*nalu_store_before) { if (*nalu_store_before > bytes_drop) { *nalu_store_before -= bytes_drop; } else { //all data from previous frame consumed, update timestamps with info from current packet *nalu_store_before = 0; naldmx_switch_timestamps(ctx, pck); if (*drop_packet) { gf_filter_pid_drop_packet(ctx->ipid); *drop_packet = GF_FALSE; } } } } static void naldmx_bs_log(void *udta, const char *field_name, u32 nb_bits, u64 field_val, s32 idx1, s32 idx2, s32 idx3) { GF_NALUDmxCtx *ctx = (GF_NALUDmxCtx *) udta; GF_LOG(GF_LOG_DEBUG, GF_LOG_PARSER, (" %s", field_name)); if (idx1>=0) { GF_LOG(GF_LOG_DEBUG, GF_LOG_PARSER, ("_%d", idx1)); if (idx2>=0) { GF_LOG(GF_LOG_DEBUG, GF_LOG_PARSER, ("_%d", idx2)); if (idx3>=0) { GF_LOG(GF_LOG_DEBUG, GF_LOG_PARSER, ("_%d", idx3)); } } } GF_LOG(GF_LOG_DEBUG, GF_LOG_PARSER, ("=\""LLD, field_val)); if ((ctx->bsdbg==2) && ((s32) nb_bits > 1) ) GF_LOG(GF_LOG_DEBUG, GF_LOG_PARSER, ("(%u)", nb_bits)); GF_LOG(GF_LOG_DEBUG, GF_LOG_PARSER, ("\" ")); } GF_Err naludmx_process(GF_Filter *filter) { GF_NALUDmxCtx *ctx = gf_filter_get_udta(filter); GF_FilterPacket *pck; GF_Err e; u8 *start; u32 nalu_before = ctx->nb_nalus; u32 nalu_store_before = 0; s32 remain; Bool is_eos = GF_FALSE; Bool drop_packet = GF_FALSE; u64 byte_offset = GF_FILTER_NO_BO; //always reparse duration if (!ctx->file_loaded) naludmx_check_dur(filter, ctx); pck = gf_filter_pid_get_packet(ctx->ipid); if (!ctx->resume_from && !pck) { if (gf_filter_pid_is_eos(ctx->ipid)) { if (ctx->nal_store_size) { if (!ctx->is_playing) return GF_OK; start = ctx->nal_store; remain = ctx->nal_store_size; is_eos = GF_TRUE; goto naldmx_flush; } if (ctx->first_pck_in_au) { naludmx_finalize_au_flags(ctx); } //single-frame stream if (!ctx->poc_diff) ctx->poc_diff = 1; ctx->strict_poc = STRICT_POC_OFF; naludmx_enqueue_or_dispatch(ctx, NULL, GF_TRUE); if (ctx->src_pck) gf_filter_pck_unref(ctx->src_pck); ctx->src_pck = NULL; if (!ctx->opid) return GF_EOS; gf_filter_pid_set_info(ctx->opid, GF_PROP_PID_MAX_NALU_SIZE, &PROP_UINT(ctx->max_nalu_size) ); if (ctx->codecid==GF_CODECID_HEVC) { naludmx_set_hevc_oinf(ctx, ctx->max_temporal_id); naludmx_set_hevc_linf(ctx); gf_filter_pid_set_info_str(ctx->opid, "hevc:min_lid", &PROP_UINT(ctx->min_layer_id) ); } if (ctx->opid) gf_filter_pid_set_eos(ctx->opid); return GF_EOS; } return GF_OK; } if (!ctx->is_playing && ctx->opid) return GF_OK; //if we have bytes from previous packet in the header, we cannot switch timing until we know what these bytes are if (!ctx->nal_store_size) naldmx_switch_timestamps(ctx, pck); nalu_store_before = ctx->nal_store_size; if (!ctx->resume_from && pck) { u32 pck_size; const u8 *data = gf_filter_pck_get_data(pck, &pck_size); if (ctx->nal_store_alloc < ctx->nal_store_size + pck_size) { ctx->nal_store_alloc = ctx->nal_store_size + pck_size; ctx->nal_store = gf_realloc(ctx->nal_store, sizeof(char)*ctx->nal_store_alloc); if (!ctx->nal_store) { ctx->nal_store_alloc = 0; return GF_OUT_OF_MEM; } } byte_offset = gf_filter_pck_get_byte_offset(pck); if (byte_offset != GF_FILTER_NO_BO) byte_offset -= ctx->nal_store_size; memcpy(ctx->nal_store + ctx->nal_store_size, data, sizeof(char)*pck_size); ctx->nal_store_size += pck_size; drop_packet = GF_TRUE; } start = ctx->nal_store; remain = ctx->nal_store_size; if (ctx->resume_from) { if (ctx->opid && gf_filter_pid_would_block(ctx->opid)) return GF_OK; assert(ctx->resume_from < ctx->nal_store_size); start += ctx->resume_from; remain -= ctx->resume_from; ctx->resume_from = 0; if (!pck && gf_filter_pid_is_eos(ctx->ipid)) is_eos = GF_TRUE; } naldmx_flush: if (!ctx->bs_r) { ctx->bs_r = gf_bs_new(start, remain, GF_BITSTREAM_READ); #ifndef GPAC_DISABLE_LOG if (ctx->bsdbg && gf_log_tool_level_on(GF_LOG_PARSER, GF_LOG_DEBUG)) gf_bs_set_logger(ctx->bs_r, naldmx_bs_log, ctx); #endif } else { gf_bs_reassign_buffer(ctx->bs_r, start, remain); } assert(remain>=0); while (remain) { u8 *pck_data; u8 *nal_data; u32 nal_size; s32 current; Bool skip_nal = GF_FALSE; u32 sc_size=0; u32 nal_type = 0; u32 nal_ref_idc = 0; s32 next=0; u32 next_sc_size=0; s32 nal_parse_result; Bool slice_is_ref, slice_force_ref; Bool is_slice = GF_FALSE; Bool is_islice = GF_FALSE; Bool bottom_field_flag = GF_FALSE; Bool au_start; u32 avc_svc_subs_reserved = 0; u8 avc_svc_subs_priority = 0; Bool recovery_point_valid = GF_FALSE; u32 recovery_point_frame_cnt = 0; Bool bIntraSlice = GF_FALSE; GF_FilterSAPType au_sap_type = GF_FILTER_SAP_NONE; Bool slice_is_b = GF_FALSE; Bool check_dep = GF_FALSE; s32 slice_poc = 0; //not enough bytes to parse start code + nal hdr if (!is_eos && (remain<6)) { break; } //locate next start code current = gf_media_nalu_next_start_code(start, remain, &sc_size); if (current == remain) current = -1; //no start code: if eos or full AU dispatch mode, send remaining otherwise gather if (current<0) { if (!is_eos && !ctx->full_au_source) { break; } e = naludmx_realloc_last_pck(ctx, (u32) remain, &pck_data); if (e==GF_OK) memcpy(pck_data, start, (size_t) remain); remain = 0; break; } assert(current>=0); //skip if no output pid if (!ctx->opid && current) { assert(remain>=current); assert((s32) current >= 0); start += current; remain -= current; current = 0; } //dispatch remaining bytes if (current>0) { //flush remaining bytes in NAL if (gf_list_count(ctx->pck_queue)) { e = naludmx_realloc_last_pck(ctx, current, &pck_data); if (e==GF_OK) { memcpy(pck_data, start, current); } } assert(remain>=current); start += current; remain -= current; naldmx_check_timestamp_switch(ctx, &nalu_store_before, current, &drop_packet, pck); } if (!remain) break; //not enough bytes to parse start code + nal hdr if (!is_eos && (remain<6)) { break; } nal_data = start + sc_size; nal_size = remain - sc_size; //figure out which nal we need to completely load if (ctx->codecid==GF_CODECID_HEVC) { nal_type = nal_data[0]; nal_type = (nal_type & 0x7E) >> 1; switch (nal_type) { case GF_HEVC_NALU_VID_PARAM: case GF_HEVC_NALU_SEQ_PARAM: case GF_HEVC_NALU_PIC_PARAM: case GF_HEVC_NALU_SEI_PREFIX: case GF_HEVC_NALU_SEI_SUFFIX: break; case GF_HEVC_NALU_SLICE_TRAIL_N: case GF_HEVC_NALU_SLICE_TSA_N: case GF_HEVC_NALU_SLICE_STSA_N: case GF_HEVC_NALU_SLICE_RADL_N: case GF_HEVC_NALU_SLICE_RASL_N: case GF_HEVC_NALU_SLICE_RSV_VCL_N10: case GF_HEVC_NALU_SLICE_RSV_VCL_N12: case GF_HEVC_NALU_SLICE_RSV_VCL_N14: check_dep = GF_TRUE; break; default: if (nal_type<GF_HEVC_NALU_VID_PARAM) nal_ref_idc = GF_TRUE; break; } } else if (ctx->codecid==GF_CODECID_VVC) { nal_type = nal_data[1]>>3; switch (nal_type) { case GF_VVC_NALU_OPI: case GF_VVC_NALU_DEC_PARAM: case GF_VVC_NALU_VID_PARAM: case GF_VVC_NALU_SEQ_PARAM: case GF_VVC_NALU_PIC_PARAM: case GF_VVC_NALU_SEI_PREFIX: case GF_VVC_NALU_SEI_SUFFIX: case GF_VVC_NALU_APS_PREFIX: case GF_VVC_NALU_APS_SUFFIX: case GF_VVC_NALU_PIC_HEADER: break; case GF_VVC_NALU_SLICE_TRAIL: case GF_VVC_NALU_SLICE_STSA: case GF_VVC_NALU_SLICE_RADL: case GF_VVC_NALU_SLICE_RASL: case GF_VVC_NALU_SLICE_IDR_W_RADL: case GF_VVC_NALU_SLICE_IDR_N_LP: case GF_VVC_NALU_SLICE_CRA: case GF_VVC_NALU_SLICE_GDR: if (ctx->deps) { check_dep = GF_TRUE; } break; default: if (nal_type<GF_HEVC_NALU_VID_PARAM) nal_ref_idc = GF_TRUE; break; } } else { nal_type = nal_data[0] & 0x1F; nal_ref_idc = (nal_data[0] & 0x60) >> 5; } //locate next NAL start next = gf_media_nalu_next_start_code(nal_data, nal_size, &next_sc_size); if (!is_eos && (next == nal_size) && !ctx->full_au_source) { next = -1; } //next nal start not found, wait if (next<0) { break; } //this is our exact NAL size, without start code nal_size = next; if (ctx->codecid==GF_CODECID_HEVC) { nal_parse_result = naludmx_parse_nal_hevc(ctx, nal_data, nal_size, &skip_nal, &is_slice, &is_islice); } else if (ctx->codecid==GF_CODECID_VVC) { nal_parse_result = naludmx_parse_nal_vvc(ctx, nal_data, nal_size, &skip_nal, &is_slice, &is_islice); } else { nal_parse_result = naludmx_parse_nal_avc(ctx, nal_data, nal_size, nal_type, &skip_nal, &is_slice, &is_islice); } //dispatch right away if analyze if (ctx->analyze) { skip_nal = GF_FALSE; ctx->sei_buffer_size = 0; } //new frame - if no slices, we detected the new frame on AU delimiter, don't flush new frame ! if ((nal_parse_result>0) && !ctx->first_slice_in_au) { //new frame - we flush later on naludmx_finalize_au_flags(ctx); ctx->has_islice = GF_FALSE; ctx->first_slice_in_au = GF_TRUE; ctx->sei_recovery_frame_count = -1; ctx->au_sap = GF_FILTER_SAP_NONE; ctx->bottom_field_flag = GF_FALSE; } naludmx_check_pid(filter, ctx); if (!ctx->opid) skip_nal = GF_TRUE; if (skip_nal) { nal_size += sc_size; assert((u32) remain >= nal_size); start += nal_size; remain -= nal_size; naldmx_check_timestamp_switch(ctx, &nalu_store_before, nal_size, &drop_packet, pck); continue; } if (!ctx->is_playing) { ctx->resume_from = (u32) (start - ctx->nal_store); assert(ctx->resume_from<=ctx->nal_store_size); GF_LOG(GF_LOG_DEBUG, GF_LOG_PARSER, ("[%s] not yet playing\n", ctx->log_name)); if (drop_packet) gf_filter_pid_drop_packet(ctx->ipid); return GF_OK; } if (ctx->in_seek) { u64 nb_frames_at_seek = (u64) (ctx->start_range * ctx->cur_fps.num); if (ctx->cts + ctx->cur_fps.den >= nb_frames_at_seek) { //u32 samples_to_discard = (ctx->cts + ctx->dts_inc) - nb_samples_at_seek; ctx->in_seek = GF_FALSE; } } if (nal_parse_result<0) { if (byte_offset != GF_FILTER_NO_BO) { u64 bo = byte_offset; bo += (start - ctx->nal_store); GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Error parsing NAL Unit %d (byte offset "LLU" size %d type %d frame %d last POC %d) - skipping\n", ctx->log_name, ctx->nb_nalus, bo, nal_size, nal_type, ctx->nb_frames, ctx->last_poc)); } else { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Error parsing NAL Unit %d (size %d type %d frame %d last POC %d) - skipping\n", ctx->log_name, ctx->nb_nalus, nal_size, nal_type, ctx->nb_frames, ctx->last_poc)); } nal_size += sc_size; assert((u32) remain >= nal_size); start += nal_size; remain -= nal_size; naldmx_check_timestamp_switch(ctx, &nalu_store_before, nal_size, &drop_packet, pck); continue; } if (check_dep) { if ((ctx->codecid==GF_CODECID_HEVC) && ctx->hevc_state->s_info.sps) { HEVC_VPS *vps; u32 temporal_id = nal_data[1] & 0x7; vps = & ctx->hevc_state->vps[ctx->hevc_state->s_info.sps->vps_id]; if (temporal_id + 1 < vps->max_sub_layers) { nal_ref_idc = GF_TRUE; } } else if (ctx->codecid==GF_CODECID_VVC) { if (ctx->vvc_state->s_info.non_ref_pic) { nal_ref_idc = GF_FALSE; } else { //todo nal_ref_idc = GF_TRUE; } } } if (is_islice) ctx->has_islice = GF_TRUE; //store all variables needed to compute POC/CTS and sample SAP and recovery info if (ctx->codecid==GF_CODECID_HEVC) { #ifndef GPAC_DISABLE_HEVC slice_is_ref = gf_hevc_slice_is_IDR(ctx->hevc_state); recovery_point_valid = ctx->hevc_state->sei.recovery_point.valid; recovery_point_frame_cnt = ctx->hevc_state->sei.recovery_point.frame_cnt; bIntraSlice = gf_hevc_slice_is_intra(ctx->hevc_state); au_sap_type = GF_FILTER_SAP_NONE; if (gf_hevc_slice_is_IDR(ctx->hevc_state)) { au_sap_type = GF_FILTER_SAP_1; } else { switch (ctx->hevc_state->s_info.nal_unit_type) { case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: au_sap_type = GF_FILTER_SAP_3; break; case GF_HEVC_NALU_SLICE_BLA_N_LP: au_sap_type = GF_FILTER_SAP_1; break; case GF_HEVC_NALU_SLICE_CRA: au_sap_type = GF_FILTER_SAP_3; break; } } slice_poc = ctx->hevc_state->s_info.poc; /*need to store TS offsets*/ switch (ctx->hevc_state->s_info.slice_type) { case GF_AVC_TYPE_B: case GF_AVC_TYPE2_B: slice_is_b = GF_TRUE; break; } #endif // GPAC_DISABLE_HEVC } else if (ctx->codecid==GF_CODECID_VVC) { slice_is_ref = gf_media_vvc_slice_is_ref(ctx->vvc_state); recovery_point_valid = ctx->vvc_state->s_info.recovery_point_valid; recovery_point_frame_cnt = ctx->vvc_state->s_info.gdr_recovery_count; // commented, set below // if (ctx->vvc_state->s_info.irap_or_gdr_pic && !ctx->vvc_state->s_info.gdr_pic) // bIntraSlice = GF_TRUE; //gf_hevc_slice_is_intra(ctx->hevc_state); au_sap_type = GF_FILTER_SAP_NONE; if (ctx->vvc_state->s_info.irap_or_gdr_pic && !ctx->vvc_state->s_info.gdr_pic) { au_sap_type = GF_FILTER_SAP_1; bIntraSlice = GF_TRUE; slice_is_ref = 1; } else { switch (ctx->vvc_state->s_info.nal_unit_type) { case GF_VVC_NALU_SLICE_IDR_N_LP: au_sap_type = GF_FILTER_SAP_1; slice_is_ref = 1; bIntraSlice = GF_TRUE; break; case GF_VVC_NALU_SLICE_CRA: au_sap_type = GF_FILTER_SAP_3; bIntraSlice = GF_TRUE; break; case GF_VVC_NALU_SLICE_IDR_W_RADL: bIntraSlice = GF_TRUE; if (ctx->vvc_state->s_info.gdr_pic) { au_sap_type = GF_FILTER_SAP_3; } else { au_sap_type = GF_FILTER_SAP_1; slice_is_ref = 1; } break; } } slice_poc = ctx->vvc_state->s_info.poc; /*need to store TS offsets*/ switch (ctx->vvc_state->s_info.slice_type) { case GF_AVC_TYPE_B: case GF_AVC_TYPE2_B: slice_is_b = GF_TRUE; break; } } else { /*fixme - we need finer grain for priority*/ if ((nal_type==GF_AVC_NALU_SVC_PREFIX_NALU) || (nal_type==GF_AVC_NALU_SVC_SLICE)) { if (!ctx->is_mvc) { unsigned char *p = (unsigned char *) start; // RefPicFlag avc_svc_subs_reserved |= (p[0] & 0x60) ? 0x80000000 : 0; // RedPicFlag TODO: not supported, would require to parse NAL unit payload avc_svc_subs_reserved |= (0) ? 0x40000000 : 0; // VclNALUnitFlag avc_svc_subs_reserved |= (1<=nal_type && nal_type<=5) || (nal_type==GF_AVC_NALU_SVC_PREFIX_NALU) || (nal_type==GF_AVC_NALU_SVC_SLICE) ? 0x20000000 : 0; // use values of IdrFlag and PriorityId directly from SVC extension header avc_svc_subs_reserved |= p[1] << 16; // use values of DependencyId and QualityId directly from SVC extension header avc_svc_subs_reserved |= p[2] << 8; // use values of TemporalId and UseRefBasePicFlag directly from SVC extension header avc_svc_subs_reserved |= p[3] & 0xFC; // StoreBaseRepFlag TODO: SVC FF mentions a store_base_rep_flag which cannot be found in SVC spec avc_svc_subs_reserved |= (0) ? 0x00000002 : 0; // priority_id (6 bits) in SVC has inverse meaning -> lower value means higher priority - invert it and scale it to 8 bits avc_svc_subs_priority = (63 - (p[1] & 0x3F)) << 2; } if (nal_type==GF_AVC_NALU_SVC_PREFIX_NALU) { if (ctx->svc_prefix_buffer_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[%s] broken bitstream, two consecutive SVC prefix NALU without SVC slice in-between\n", ctx->log_name)); ctx->svc_prefix_buffer_size = 0; } /* remember reserved and priority value */ ctx->svc_nalu_prefix_reserved = avc_svc_subs_reserved; ctx->svc_nalu_prefix_priority = avc_svc_subs_priority; ctx->svc_prefix_buffer_size = nal_size; if (ctx->svc_prefix_buffer_size > ctx->svc_prefix_buffer_alloc) { ctx->svc_prefix_buffer_alloc = ctx->svc_prefix_buffer_size; ctx->svc_prefix_buffer = gf_realloc(ctx->svc_prefix_buffer, ctx->svc_prefix_buffer_size); } memcpy(ctx->svc_prefix_buffer, start+sc_size, ctx->svc_prefix_buffer_size); assert( (u32) remain >= sc_size + nal_size); start += sc_size + nal_size; remain -= sc_size + nal_size; continue; } } else if (is_slice) { // RefPicFlag avc_svc_subs_reserved |= (start[0] & 0x60) ? 0x80000000 : 0; // VclNALUnitFlag avc_svc_subs_reserved |= (1<=nal_type && nal_type<=5) || (nal_type==GF_AVC_NALU_SVC_PREFIX_NALU) || (nal_type==GF_AVC_NALU_SVC_SLICE) ? 0x20000000 : 0; avc_svc_subs_priority = 0; } if (is_slice && ctx->avc_state->s_info.field_pic_flag) { ctx->is_paff = GF_TRUE; bottom_field_flag = ctx->avc_state->s_info.bottom_field_flag; } slice_is_ref = (ctx->avc_state->s_info.nal_unit_type==GF_AVC_NALU_IDR_SLICE) ? GF_TRUE : GF_FALSE; recovery_point_valid = ctx->avc_state->sei.recovery_point.valid; recovery_point_frame_cnt = ctx->avc_state->sei.recovery_point.frame_cnt; bIntraSlice = gf_media_avc_slice_is_intra(ctx->avc_state); au_sap_type = GF_FILTER_SAP_NONE; if (ctx->avc_state->s_info.nal_unit_type == GF_AVC_NALU_IDR_SLICE) au_sap_type = GF_FILTER_SAP_1; slice_poc = ctx->avc_state->s_info.poc; /*need to store TS offsets*/ switch (ctx->avc_state->s_info.slice_type) { case GF_AVC_TYPE_B: case GF_AVC_TYPE2_B: slice_is_b = GF_TRUE; break; } } if (is_slice) { Bool first_in_au = ctx->first_slice_in_au; if (slice_is_ref) ctx->nb_idr++; slice_force_ref = GF_FALSE; /*we only indicate TRUE IDRs for sync samples (cf AVC file format spec). SEI recovery should be used to build sampleToGroup & RollRecovery tables*/ if (ctx->first_slice_in_au) { ctx->first_slice_in_au = GF_FALSE; if (recovery_point_valid) { ctx->sei_recovery_frame_count = recovery_point_frame_cnt; /*we allow to mark I-frames as sync on open-GOPs (with sei_recovery_frame_count=0) when forcing sync even when the SEI RP is not available*/ if (!recovery_point_frame_cnt && bIntraSlice) { ctx->has_islice = 1; if (ctx->use_opengop_gdr == 1) { ctx->use_opengop_gdr = 2; /*avoid message flooding*/ GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[%s] No valid SEI Recovery Point found although needed - forcing\n", ctx->log_name)); } } if (ctx->codecid==GF_CODECID_HEVC) { ctx->hevc_state->sei.recovery_point.valid = 0; } else if (ctx->codecid==GF_CODECID_VVC) { ctx->vvc_state->s_info.recovery_point_valid = 0; } else { ctx->avc_state->sei.recovery_point.valid = 0; } if (bIntraSlice && ctx->force_sync && (ctx->sei_recovery_frame_count==0)) slice_force_ref = GF_TRUE; } ctx->au_sap = au_sap_type; ctx->bottom_field_flag = bottom_field_flag; } if (slice_poc < ctx->poc_shift) { u32 i, count = gf_list_count(ctx->pck_queue); for (i=0; i<count; i++) { u64 dts, cts; GF_FilterPacket *q_pck = gf_list_get(ctx->pck_queue, i); assert(q_pck); dts = gf_filter_pck_get_dts(q_pck); if (dts == GF_FILTER_NO_TS) continue; cts = gf_filter_pck_get_cts(q_pck); cts += ctx->poc_shift; cts -= slice_poc; gf_filter_pck_set_cts(q_pck, cts); } ctx->poc_shift = slice_poc; } /*if #pics, compute smallest POC increase*/ if (slice_poc != ctx->last_poc) { s32 pdiff = ABS(ctx->last_poc - slice_poc); if ((slice_poc < 0) && !ctx->last_poc) ctx->poc_diff = 0; if (!ctx->poc_diff || (ctx->poc_diff > (s32) pdiff ) ) { ctx->poc_diff = pdiff; ctx->poc_probe_done = GF_FALSE; } else if (first_in_au) { //second frame with the same poc diff, we should be able to properly recompute CTSs ctx->poc_probe_done = GF_TRUE; } ctx->last_poc = slice_poc; } GF_LOG(GF_LOG_DEBUG, GF_LOG_PARSER, ("[%s] POC is %d - min poc diff %d - slice is ref %d\n", ctx->log_name, slice_poc, ctx->poc_diff, slice_is_ref)); /*ref slice, reset poc*/ if (slice_is_ref) { if (first_in_au) { Bool temp_poc_diff = GF_FALSE; //two consecutive IDRs, force poc_diff to 1 if 0 (when we have intra-only) to force frame dispatch if (ctx->last_frame_is_idr && !ctx->poc_diff) { temp_poc_diff = GF_TRUE; ctx->poc_diff = 1; } //new ref frame, dispatch all pending packets naludmx_enqueue_or_dispatch(ctx, NULL, GF_TRUE); ctx->max_last_poc = ctx->last_poc = ctx->max_last_b_poc = 0; ctx->poc_shift = 0; //force probing of POC diff, this will prevent dispatching frames with wrong CTS until we have a clue of min poc_diff used ctx->poc_probe_done = 0; ctx->last_frame_is_idr = GF_TRUE; if (temp_poc_diff) ctx->poc_diff = 0; } } /*forced ref slice*/ else if (slice_force_ref) { ctx->last_frame_is_idr = GF_FALSE; if (first_in_au) { //new ref frame, dispatch all pending packets naludmx_enqueue_or_dispatch(ctx, NULL, GF_TRUE); /*adjust POC shift as sample will now be marked as sync, so we must store poc as if IDR (eg POC=0) for our CTS offset computing to be correct*/ ctx->poc_shift = slice_poc; //force probing of POC diff, this will prevent dispatching frames with wrong CTS until we have a clue of min poc_diff used ctx->poc_probe_done = 0; } } /*strictly less - this is a new P slice*/ else if (ctx->max_last_poc < ctx->last_poc) { ctx->max_last_b_poc = 0; ctx->max_last_poc = ctx->last_poc; ctx->last_frame_is_idr = GF_FALSE; } /*stricly greater*/ else if (slice_is_b && (ctx->max_last_poc > ctx->last_poc)) { ctx->last_frame_is_idr = GF_FALSE; if (!ctx->max_last_b_poc) { ctx->max_last_b_poc = ctx->last_poc; } /*if same poc than last max, this is a B-slice*/ else if (ctx->last_poc > ctx->max_last_b_poc) { ctx->max_last_b_poc = ctx->last_poc; } /*otherwise we had a B-slice reference: do nothing*/ } else { ctx->last_frame_is_idr = GF_FALSE; } if (ctx->deps) { if (nal_ref_idc) { ctx->has_ref_slices = GF_TRUE; } if ((ctx->codecid==GF_CODECID_AVC) && (ctx->avc_state->s_info.redundant_pic_cnt) ) { ctx->has_redundant = GF_TRUE; } } } au_start = ctx->first_pck_in_au ? GF_FALSE : GF_TRUE; if (ctx->has_initial_aud) { u32 audelim_size = (ctx->codecid!=GF_CODECID_AVC) ? 3 : 2; /*dst_pck = */naludmx_start_nalu(ctx, audelim_size, GF_FALSE, &au_start, &pck_data); memcpy(pck_data + ctx->nal_length , ctx->init_aud, audelim_size); ctx->has_initial_aud = GF_FALSE; if (ctx->subsamples) { naludmx_add_subsample(ctx, audelim_size, avc_svc_subs_priority, avc_svc_subs_reserved); } } if (ctx->sei_buffer_size) { //sei buffer is already nal size prefixed /*dst_pck = */naludmx_start_nalu(ctx, ctx->sei_buffer_size, GF_TRUE, &au_start, &pck_data); memcpy(pck_data, ctx->sei_buffer, ctx->sei_buffer_size); if (ctx->subsamples) { naludmx_add_subsample(ctx, ctx->sei_buffer_size - ctx->nal_length, avc_svc_subs_priority, avc_svc_subs_reserved); } ctx->sei_buffer_size = 0; } if (ctx->svc_prefix_buffer_size) { /*dst_pck = */naludmx_start_nalu(ctx, ctx->svc_prefix_buffer_size, GF_FALSE, &au_start, &pck_data); memcpy(pck_data + ctx->nal_length, ctx->svc_prefix_buffer, ctx->svc_prefix_buffer_size); if (ctx->subsamples) { naludmx_add_subsample(ctx, ctx->svc_prefix_buffer_size, ctx->svc_nalu_prefix_priority, ctx->svc_nalu_prefix_reserved); } ctx->svc_prefix_buffer_size = 0; } //nalu size field /*dst_pck = */naludmx_start_nalu(ctx, (u32) nal_size, GF_FALSE, &au_start, &pck_data); pck_data += ctx->nal_length; //add subsample info before touching the size if (ctx->subsamples) { naludmx_add_subsample(ctx, (u32) nal_size, avc_svc_subs_priority, avc_svc_subs_reserved); } //bytes only come from the data packet memcpy(pck_data, nal_data, (size_t) nal_size); nal_size += sc_size; start += nal_size; remain -= nal_size; naldmx_check_timestamp_switch(ctx, &nalu_store_before, nal_size, &drop_packet, pck); //don't demux too much of input, abort when we would block. This avoid dispatching //a huge number of frames in a single call if (remain && gf_filter_pid_would_block(ctx->opid)) { ctx->resume_from = (u32) (start - ctx->nal_store); assert(ctx->resume_from <= ctx->nal_store_size); assert(ctx->resume_from == ctx->nal_store_size - remain); if (drop_packet) gf_filter_pid_drop_packet(ctx->ipid); return GF_OK; } } if (remain) { if (is_eos && (remain == ctx->nal_store_size)) { GF_LOG(GF_LOG_WARNING, GF_LOG_PARSER, ("[%s] Incomplete last NAL and eos, discarding\n", ctx->log_name)); remain = 0; } else { assert((u32) remain<=ctx->nal_store_size); memmove(ctx->nal_store, start, remain); } } ctx->nal_store_size = remain; if (drop_packet) gf_filter_pid_drop_packet(ctx->ipid); if (is_eos) return naludmx_process(filter); if ((ctx->nb_nalus>nalu_before) && gf_filter_reporting_enabled(filter)) { char szStatus[1024]; sprintf(szStatus, "%s %dx%d % 10d NALU % 8d I % 8d P % 8d B % 8d SEI", ctx->log_name, ctx->width, ctx->height, ctx->nb_nalus, ctx->nb_i, ctx->nb_p, ctx->nb_b, ctx->nb_sei); gf_filter_update_status(filter, -1, szStatus); } if (ctx->full_au_source && ctx->poc_probe_done) { if (ctx->first_pck_in_au) naludmx_finalize_au_flags(ctx); naludmx_enqueue_or_dispatch(ctx, NULL, GF_TRUE); } return GF_OK; } static GF_Err naludmx_initialize(GF_Filter *filter) { GF_NALUDmxCtx *ctx = gf_filter_get_udta(filter); ctx->sps = gf_list_new(); ctx->pps = gf_list_new(); switch (ctx->nal_length) { case 1: ctx->max_nalu_size_allowed = 0xFF; break; case 2: ctx->max_nalu_size_allowed = 0xFFFF; break; case 4: ctx->max_nalu_size_allowed = 0xFFFFFFFF; break; case 0: ctx->max_nalu_size_allowed = 0xFFFFFFFF; ctx->nal_length = 4; ctx->nal_adjusted = GF_TRUE; break; default: GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[%s] NAL size length %d is not allowed, defaulting to 4 bytes\n", ctx->log_name)); ctx->max_nalu_size_allowed = 0xFFFFFFFF; ctx->nal_length = 4; break; } return GF_OK; } static void naludmx_del_param_list(GF_List *ps) { if (!ps) return; while (gf_list_count(ps)) { GF_NALUFFParam *sl = gf_list_pop_back(ps); if (sl->data) gf_free(sl->data); gf_free(sl); } gf_list_del(ps); } static void naludmx_log_stats(GF_NALUDmxCtx *ctx) { u32 i, count; const char *msg_import; u32 nb_frames = 0; if (ctx->cur_fps.den) nb_frames = (u32) (ctx->dts / ctx->cur_fps.den); if (ctx->idur.den && ctx->idur.num) { GF_LOG(GF_LOG_INFO, GF_LOG_AUTHOR, ("%s duration specified at import time, may have parsed more frames than imported\n", ctx->log_name)); msg_import = "parsed"; } else { msg_import = "Import results:"; } if (ctx->nb_si || ctx->nb_sp) { GF_LOG(GF_LOG_INFO, GF_LOG_AUTHOR, ("%s %s %d frames (%d NALUs) - Slices: %d I %d P %d B %d SP %d SI - %d SEI - %d IDR\n", ctx->log_name, msg_import, nb_frames, ctx->nb_nalus, ctx->nb_i, ctx->nb_p, ctx->nb_b, ctx->nb_sp, ctx->nb_si, ctx->nb_sei, ctx->nb_idr )); } else if (ctx->vvc_no_stats) { GF_LOG(GF_LOG_INFO, GF_LOG_AUTHOR, ("%s %s %d samples (%d NALUs) - %d SEI - %d IDR\n", ctx->log_name, msg_import, nb_frames, ctx->nb_nalus, ctx->nb_sei, ctx->nb_idr)); } else { GF_LOG(GF_LOG_INFO, GF_LOG_AUTHOR, ("%s %s %d samples (%d NALUs) - Slices: %d I %d P %d B - %d SEI - %d IDR\n", ctx->log_name, msg_import, nb_frames, ctx->nb_nalus, ctx->nb_i, ctx->nb_p, ctx->nb_b, ctx->nb_sei, ctx->nb_idr)); } if (ctx->codecid==GF_CODECID_AVC) { count = gf_list_count(ctx->sps); for (i=0; i<count; i++) { AVC_SPS *sps; GF_NALUFFParam *svcc = (GF_NALUFFParam*)gf_list_get(ctx->sps, i); sps = & ctx->avc_state->sps[svcc->id]; if (sps->nb_ei || sps->nb_ep) { GF_LOG(GF_LOG_INFO, GF_LOG_AUTHOR, ("%s SVC (SSPS ID %d, %dx%d) %s Slices: %d I %d P %d B\n", ctx->log_name, svcc->id - GF_SVC_SSPS_ID_SHIFT, sps->width, sps->height, msg_import, sps->nb_ei, sps->nb_ep, sps->nb_eb )); } } } else if (ctx->nb_e_i || ctx->nb_e_p || ctx->nb_e_b) { GF_LOG(GF_LOG_INFO, GF_LOG_AUTHOR, ("%s L-HEVC %s Slices: %d I %d P %d B\n", ctx->log_name, msg_import, ctx->nb_e_i, ctx->nb_e_p, ctx->nb_e_b )); } if (ctx->max_total_delay>1) { GF_LOG(GF_LOG_INFO, GF_LOG_AUTHOR, ("%s Stream uses forward prediction - stream CTS offset: %d frames\n", ctx->log_name, ctx->max_total_delay)); } if (!ctx->nal_adjusted) { if ((ctx->max_nalu_size < 0xFF) && (ctx->nal_length>1) ){ GF_LOG(GF_LOG_INFO, GF_LOG_AUTHOR, ("%s Max NALU size is %d - stream could be optimized by setting nal_length=1\n", ctx->log_name, ctx->max_nalu_size)); } else if ((ctx->max_nalu_size < 0xFFFF) && (ctx->nal_length>2) ){ GF_LOG(GF_LOG_INFO, GF_LOG_AUTHOR, ("%s Max NALU size is %d - stream could be optimized by setting nal_length=2\n", ctx->log_name, ctx->max_nalu_size)); } } } static void naludmx_finalize(GF_Filter *filter) { GF_NALUDmxCtx *ctx = gf_filter_get_udta(filter); if (ctx->importer) naludmx_log_stats(ctx); if (ctx->bs_r) gf_bs_del(ctx->bs_r); if (ctx->bs_w) gf_bs_del(ctx->bs_w); if (ctx->indexes) gf_free(ctx->indexes); if (ctx->nal_store) gf_free(ctx->nal_store); if (ctx->pck_queue) { while (gf_list_count(ctx->pck_queue)) { GF_FilterPacket *pck = gf_list_pop_back(ctx->pck_queue); gf_filter_pck_discard(pck); } gf_list_del(ctx->pck_queue); } if (ctx->sei_buffer) gf_free(ctx->sei_buffer); if (ctx->svc_prefix_buffer) gf_free(ctx->svc_prefix_buffer); if (ctx->subsamp_buffer) gf_free(ctx->subsamp_buffer); if (ctx->src_pck) gf_filter_pck_unref(ctx->src_pck); ctx->src_pck = NULL; naludmx_del_param_list(ctx->sps); naludmx_del_param_list(ctx->pps); naludmx_del_param_list(ctx->vps); naludmx_del_param_list(ctx->sps_ext); naludmx_del_param_list(ctx->pps_svc); naludmx_del_param_list(ctx->vvc_aps_pre); naludmx_del_param_list(ctx->vvc_dci); if (ctx->avc_state) gf_free(ctx->avc_state); if (ctx->hevc_state) gf_free(ctx->hevc_state); if (ctx->vvc_state) gf_free(ctx->vvc_state); } static const char *naludmx_probe_data(const u8 *data, u32 size, GF_FilterProbeScore *score) { u32 sc, sc_size; u32 not_hevc=0; u32 not_avc=0; u32 not_vvc=0; u32 nb_hevc=0; u32 nb_avc=0; u32 nb_vvc=0; u32 nb_nalus=0; u32 nb_hevc_zero=0; u32 nb_avc_zero=0; u32 nb_vvc_zero=0; u32 nb_sps_hevc=0,nb_pps_hevc=0,nb_vps_hevc=0; u32 nb_sps_avc=0,nb_pps_avc=0; u32 nb_sps_vvc=0,nb_pps_vvc=0,nb_vps_vvc=0; while (size>3) { u32 nal_type=0; sc = gf_media_nalu_next_start_code(data, size, &sc_size); if (!sc_size) break; data += sc + sc_size; if (size <= sc + sc_size) break; size -= sc + sc_size; if (data[0] & 0x80) { not_avc++; not_hevc++; not_vvc++; continue; } nb_nalus++; nal_type = (data[0] & 0x7E) >> 1; if (nal_type<=40) { nb_hevc++; switch (nal_type) { case GF_HEVC_NALU_PIC_PARAM: if (nb_sps_hevc) nb_pps_hevc++; break; case GF_HEVC_NALU_SEQ_PARAM: nb_sps_hevc++; break; case GF_HEVC_NALU_VID_PARAM: nb_vps_hevc++; break; case 0: nb_hevc_zero++; break; } } else { not_hevc++; } nal_type = data[0] & 0x1F; if (nal_type && nal_type<=24) { nb_avc++; switch (nal_type) { case GF_AVC_NALU_PIC_PARAM: if (nb_sps_avc) nb_pps_avc++; break; case GF_AVC_NALU_SEQ_PARAM: nb_sps_avc++; break; case 0: nb_avc_zero++; break; } } else { not_avc++; } //check vvc - 2nd bit reserved to 0 if (data[0] & 0x40) { not_vvc++; continue; } nal_type = data[1] >> 3; if (nal_type>31) { not_vvc++; continue; } nb_vvc++; switch (nal_type) { case GF_VVC_NALU_PIC_PARAM: if (nb_sps_vvc) nb_pps_vvc++; break; case GF_VVC_NALU_SEQ_PARAM: nb_sps_vvc++; break; case GF_VVC_NALU_VID_PARAM: nb_vps_vvc++; break; case 0: nb_vvc_zero++; break; } } if (!nb_sps_avc || !nb_pps_avc) nb_avc=0; if (!nb_sps_hevc || !nb_pps_hevc || !nb_vps_hevc) nb_hevc=0; if (!nb_sps_vvc || !nb_pps_vvc || !nb_vps_vvc) nb_vvc=0; if (not_avc) nb_avc=0; if (not_hevc) nb_hevc=0; if (not_vvc) nb_vvc=0; if (not_avc && not_hevc && not_vvc) return NULL; if (nb_avc==nb_avc_zero) nb_avc=0; if (nb_hevc==nb_hevc_zero) nb_hevc=0; if (nb_vvc==nb_vvc_zero) nb_vvc=0; if (!nb_hevc && !nb_avc && !nb_vvc) return NULL; *score = GF_FPROBE_SUPPORTED; if (!nb_hevc) return (nb_vvc>nb_avc) ? "video/vvc" : "video/avc"; if (!nb_avc) return (nb_vvc>nb_hevc) ? "video/vvc" : "video/hevc"; if (!nb_vvc) return (nb_avc>nb_hevc) ? "video/avc" : "video/hevc"; if ((nb_hevc>nb_avc) && (nb_hevc>nb_vvc)) return "video/hevc"; if ((nb_vvc>nb_avc) && (nb_vvc>nb_hevc)) return "video/vvc"; return "video/avc"; } static const GF_FilterCapability NALUDmxCaps[] = { CAP_UINT(GF_CAPS_INPUT, GF_PROP_PID_STREAM_TYPE, GF_STREAM_FILE), CAP_STRING(GF_CAPS_INPUT, GF_PROP_PID_FILE_EXT, "264|h264|26L|h26L|h26l|avc|svc|mvc|hevc|hvc|265|h265|shvc|lvhc|mhvc|266|vvc|lvvc"), CAP_STRING(GF_CAPS_INPUT, GF_PROP_PID_MIME, "video/avc|video/h264|video/svc|video/mvc|video/hevc|video/lhvc|video/shvc|video/mhvc|video/vvc"), CAP_UINT(GF_CAPS_OUTPUT_STATIC, GF_PROP_PID_STREAM_TYPE, GF_STREAM_VISUAL), CAP_UINT(GF_CAPS_OUTPUT_STATIC, GF_PROP_PID_CODECID, GF_CODECID_AVC), CAP_UINT(GF_CAPS_OUTPUT_STATIC, GF_PROP_PID_CODECID, GF_CODECID_AVC_PS), CAP_UINT(GF_CAPS_OUTPUT_STATIC, GF_PROP_PID_CODECID, GF_CODECID_SVC), CAP_UINT(GF_CAPS_OUTPUT_STATIC, GF_PROP_PID_CODECID, GF_CODECID_MVC), CAP_UINT(GF_CAPS_OUTPUT_STATIC, GF_PROP_PID_CODECID, GF_CODECID_HEVC), CAP_UINT(GF_CAPS_OUTPUT_STATIC, GF_PROP_PID_CODECID, GF_CODECID_LHVC), CAP_UINT(GF_CAPS_OUTPUT_STATIC, GF_PROP_PID_CODECID, GF_CODECID_VVC), CAP_BOOL(GF_CAPS_OUTPUT_STATIC_EXCLUDED, GF_PROP_PID_UNFRAMED, GF_TRUE), CAP_BOOL(GF_CAPS_OUTPUT_STATIC_EXCLUDED, GF_PROP_PID_TILE_BASE, GF_TRUE), {0}, CAP_UINT(GF_CAPS_INPUT,GF_PROP_PID_STREAM_TYPE, GF_STREAM_VISUAL), CAP_UINT(GF_CAPS_INPUT,GF_PROP_PID_CODECID, GF_CODECID_AVC), CAP_UINT(GF_CAPS_INPUT,GF_PROP_PID_CODECID, GF_CODECID_AVC_PS), CAP_UINT(GF_CAPS_INPUT,GF_PROP_PID_CODECID, GF_CODECID_SVC), CAP_UINT(GF_CAPS_INPUT,GF_PROP_PID_CODECID, GF_CODECID_MVC), CAP_UINT(GF_CAPS_INPUT,GF_PROP_PID_CODECID, GF_CODECID_HEVC), CAP_UINT(GF_CAPS_INPUT,GF_PROP_PID_CODECID, GF_CODECID_LHVC), CAP_UINT(GF_CAPS_INPUT,GF_PROP_PID_CODECID, GF_CODECID_VVC), CAP_BOOL(GF_CAPS_INPUT,GF_PROP_PID_UNFRAMED, GF_TRUE), CAP_BOOL(GF_CAPS_INPUT_EXCLUDED, GF_PROP_PID_TILE_BASE, GF_TRUE), }; #define OFFS(_n) #_n, offsetof(GF_NALUDmxCtx, _n) static const GF_FilterArgs NALUDmxArgs[] = { { OFFS(fps), "import frame rate (0 default to FPS from bitstream or 25 Hz)", GF_PROP_FRACTION, "0/1000", NULL, 0}, { OFFS(index), "indexing window length. If 0, bitstream is not probed for duration. A negative value skips the indexing if the source file is larger than 100M (slows down importers) unless a play with start range > 0 is issued, otherwise uses the positive value", GF_PROP_DOUBLE, "-1.0", NULL, 0}, { OFFS(explicit), "use explicit layered (SVC/LHVC) import", GF_PROP_BOOL, "false", NULL, GF_FS_ARG_HINT_ADVANCED}, { OFFS(strict_poc), "delay frame output of an entire GOP to ensure CTS info is correct when POC suddenly changes\n" "- off: disable GOP buffering\n" "- on: enable GOP buffering, assuming no error in POC\n" "- error: enable GOP buffering and try to detect lost frames", GF_PROP_UINT, "off", "off|on|error", GF_FS_ARG_HINT_ADVANCED}, { OFFS(nosei), "remove all sei messages", GF_PROP_BOOL, "false", NULL, GF_FS_ARG_HINT_ADVANCED}, { OFFS(nosvc), "remove all SVC/MVC/LHVC data", GF_PROP_BOOL, "false", NULL, GF_FS_ARG_HINT_ADVANCED}, { OFFS(novpsext), "remove all VPS extensions", GF_PROP_BOOL, "false", NULL, GF_FS_ARG_HINT_ADVANCED}, { OFFS(importer), "compatibility with old importer, displays import results", GF_PROP_BOOL, "false", NULL, GF_FS_ARG_HINT_ADVANCED}, { OFFS(idur), "compatibility with old importer to log imported frames only", GF_PROP_FRACTION, "0", NULL, GF_FS_ARG_HINT_HIDE}, { OFFS(nal_length), "set number of bytes used to code length field: 1, 2 or 4", GF_PROP_UINT, "4", NULL, GF_FS_ARG_HINT_EXPERT}, { OFFS(subsamples), "import subsamples information", GF_PROP_BOOL, "false", NULL, GF_FS_ARG_HINT_EXPERT}, { OFFS(deps), "import samples dependencies information", GF_PROP_BOOL, "false", NULL, GF_FS_ARG_HINT_EXPERT}, { OFFS(seirw), "rewrite AVC sei messages for ISOBMFF constraints", GF_PROP_BOOL, "true", NULL, GF_FS_ARG_HINT_EXPERT}, { OFFS(audelim), "keep Access Unit delimiter in payload", GF_PROP_BOOL, "false", NULL, GF_FS_ARG_HINT_EXPERT}, { OFFS(analyze), "skip reformat of decoder config and SEI and dispatch all NAL in input order - shall only be used with inspect filter analyze mode!", GF_PROP_UINT, "off", "off|on|bs|full", GF_FS_ARG_HINT_HIDE}, { OFFS(bsdbg), "debug NAL parsing in parser@debug logs\n" "- off: not enabled\n" "- on: enabled\n" "- full: enable with number of bits dumped", GF_PROP_UINT, "off", "off|on|full", GF_FS_ARG_HINT_EXPERT}, {0} }; GF_FilterRegister NALUDmxRegister = { .name = "rfnalu", GF_FS_SET_DESCRIPTION("AVC/HEVC reframer") GF_FS_SET_HELP("This filter parses AVC|H264 and HEVC files/data and outputs corresponding video PID and frames.\n" "This demuxer only produces ISOBMFF-compatible output: start codes are removed, NALU length field added and avcC/hvcC config created.\nNote: The demux uses negative CTS offsets: CTS is corrrect, but some frames may have DTS greater than CTS.") .private_size = sizeof(GF_NALUDmxCtx), .args = NALUDmxArgs, .initialize = naludmx_initialize, .finalize = naludmx_finalize, SETCAPS(NALUDmxCaps), .configure_pid = naludmx_configure_pid, .process = naludmx_process, .process_event = naludmx_process_event, .probe_data = naludmx_probe_data, }; const GF_FilterRegister *naludmx_register(GF_FilterSession *session) { return &NALUDmxRegister; } #else const GF_FilterRegister *naludmx_register(GF_FilterSession *session) { return NULL; } #endif //GPAC_DISABLE_AV_PARSERS
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre * Copyright (c) Telecom ParisTech 2000-2021 * All rights reserved * * This file is part of GPAC / NALU (AVC, HEVC, VVC) reframer filter * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/avparse.h> #include <gpac/constants.h> #include <gpac/filters.h> #include <gpac/internal/media_dev.h> //for oinf stuff #include <gpac/internal/isomedia_dev.h> #ifndef GPAC_DISABLE_AV_PARSERS #define CTS_POC_OFFSET_SAFETY 1000 GF_Err gf_bs_set_logger(GF_BitStream *bs, void (*on_bs_log)(void *udta, const char *field_name, u32 nb_bits, u64 field_val, s32 idx1, s32 idx2, s32 idx3), void *udta); typedef struct { u64 pos; Double duration; } NALUIdx; typedef struct { u32 layer_id_plus_one; u32 min_temporal_id, max_temporal_id; } LHVCLayerInfo; enum { STRICT_POC_OFF = 0, STRICT_POC_ON, STRICT_POC_ERROR, }; typedef struct { //filter args GF_Fraction fps; Double index; Bool explicit, force_sync, nosei, importer, subsamples, nosvc, novpsext, deps, seirw, audelim, analyze; u32 nal_length; u32 strict_poc; u32 bsdbg; GF_Fraction idur; //only one input pid declared GF_FilterPid *ipid; //only one output pid declared GF_FilterPid *opid; //read bitstream for AVC/HEVC parsing GF_BitStream *bs_r; //write bitstream for nalus size length rewrite GF_BitStream *bs_w; //current CTS/DTS of the stream, may be overridden by input packet if not file (eg TS PES) u64 cts, dts, prev_dts, prev_cts; u32 pck_duration; //basic config stored here: with, height CRC of base and enh layer decoder config, sample aspect ratio //when changing, a new pid config will be emitted u32 width, height; u32 crc_cfg, crc_cfg_enh; GF_Fraction sar; GF_Fraction cur_fps; //duration of the file if known GF_Fraction64 duration; //playback start range Double start_range; //indicates we are in seek, packets before start range should be marked Bool in_seek; //set once we play something Bool is_playing; //is a file, is a file fully loaded on disk (local or download done) Bool is_file, file_loaded; //initial PLAY command found Bool initial_play_done; //list of RAP entry points NALUIdx *indexes; u32 index_alloc_size, index_size; //timescale of the input pid if any, 0 otherwise u32 timescale; //framing flag of input packet when input pid has timing (eg is not a file) Bool input_is_au_start; GF_FilterPacket *src_pck; Bool full_au_source; //total delay in frames between decode and presentation s32 max_total_delay; //max size codable with our nal_length setting u32 max_nalu_size_allowed; //position in input packet from which we resume parsing u32 resume_from; //prevents message about possible NAL size optimizaion at finalization Bool nal_adjusted; //avc/hevc switch u32 codecid; //name of the logger const char *log_name; //list of packet (in decode order !!) not yet dispatched. //Dispatch depends on the mode: //strict_poc=0: we wait after each IDR until we find a stable poc diff between pictures, controled by poc_probe_done //strict_poc>=1: we dispatch only after IDR or at the end (huge delay) GF_List *pck_queue; //dts of the last IDR found u64 dts_last_IDR; //max size of NALUs in the bitstream u32 max_nalu_size; u8 *nal_store; u32 nal_store_size, nal_store_alloc; //list of param sets found GF_List *sps, *pps, *vps, *sps_ext, *pps_svc, *vvc_aps_pre, *vvc_dci; //set to true if one of the PS has been modified, will potentially trigger a PID reconfigure Bool ps_modified; //stats u32 nb_idr, nb_i, nb_p, nb_b, nb_sp, nb_si, nb_sei, nb_nalus, nb_aud; //frame has intra slice Bool has_islice; //AU is rap GF_FilterSAPType au_sap; //frame first slice Bool first_slice_in_au; //paff used - NEED FURTHER CHECKING Bool is_paff; Bool bottom_field_flag; //SEI recovery count - if 0 and I slice only frame, openGOP detection (avc) s32 sei_recovery_frame_count; u32 use_opengop_gdr; //poc compute variables s32 last_poc, max_last_poc, max_last_b_poc, poc_diff, prev_last_poc, min_poc, poc_shift; //set to TRUE once 3 frames with same min poc diff are found, enabling dispatch of the frames Bool poc_probe_done; //pointer to the first packet of the current frame (the one holding timing info) //this packet is in the packet queue GF_FilterPacket *first_pck_in_au; //frame has slices used as reference Bool has_ref_slices; //frame has redundant coding Bool has_redundant; Bool last_frame_is_idr; //buffer to store SEI messages //for AVC: we have to rewrite the SEI to remove some of the messages according to the spec //for HEVC: we store prefix SEI here and dispatch them once the first VCL is found char *sei_buffer; u32 sei_buffer_size, sei_buffer_alloc; //subsample buffer, only used for SVC for now u32 subsamp_buffer_alloc, subsamp_buffer_size, subs_mapped_bytes; char *subsamp_buffer; //AVC specific //avc bitstream state AVCState *avc_state; //SVC specific char *svc_prefix_buffer; u32 svc_prefix_buffer_size, svc_prefix_buffer_alloc; u32 svc_nalu_prefix_reserved; u8 svc_nalu_prefix_priority; //HEVC specific HEVCState *hevc_state; //shvc stats u32 nb_e_idr, nb_e_i, nb_e_p, nb_e_b; Bool vvc_no_stats; LHVCLayerInfo linf[64]; u8 max_temporal_id[64]; u8 min_layer_id; //VVC specific VVCState *vvc_state; Bool has_initial_aud; char init_aud[3]; Bool interlaced; Bool is_mvc; u32 bitrate; u32 nb_frames; } GF_NALUDmxCtx; static void naludmx_enqueue_or_dispatch(GF_NALUDmxCtx *ctx, GF_FilterPacket *n_pck, Bool flush_ref); static void naludmx_finalize_au_flags(GF_NALUDmxCtx *ctx); GF_Err naludmx_configure_pid(GF_Filter *filter, GF_FilterPid *pid, Bool is_remove) { const GF_PropertyValue *p; GF_NALUDmxCtx *ctx = gf_filter_get_udta(filter); if (is_remove) { ctx->ipid = NULL; if (ctx->opid) { gf_filter_pid_remove(ctx->opid); ctx->opid = NULL; } return GF_OK; } if (! gf_filter_pid_check_caps(pid)) return GF_NOT_SUPPORTED; ctx->ipid = pid; p = gf_filter_pid_get_property(pid, GF_PROP_PID_TIMESCALE); if (p) { ctx->timescale = p->value.uint; //if we have a FPS prop, use it p = gf_filter_pid_get_property(pid, GF_PROP_PID_FPS); if (p) { ctx->cur_fps = p->value.frac; } else { ctx->cur_fps.den = 0; ctx->cur_fps.num = ctx->timescale; } } p = gf_filter_pid_get_property(pid, GF_PROP_PID_CODECID); if (p) { switch (p->value.uint) { case GF_CODECID_HEVC: case GF_CODECID_LHVC: ctx->codecid = GF_CODECID_HEVC; break; case GF_CODECID_VVC: ctx->codecid = GF_CODECID_VVC; break; case GF_CODECID_AVC: case GF_CODECID_AVC_PS: case GF_CODECID_SVC: case GF_CODECID_MVC: ctx->codecid = GF_CODECID_AVC; break; default: return GF_NOT_SUPPORTED; } } else { p = gf_filter_pid_get_property(pid, GF_PROP_PID_MIME); if (p && p->value.string && ( strstr(p->value.string, "hvc") || strstr(p->value.string, "hevc") || strstr(p->value.string, "265") || strstr(p->value.string, "shvc") || strstr(p->value.string, "mhvc") || strstr(p->value.string, "lhvc") ) ) ctx->codecid = GF_CODECID_HEVC; else if (p && p->value.string && ( strstr(p->value.string, "vvc") ) ) ctx->codecid = GF_CODECID_VVC; else { p = gf_filter_pid_get_property(pid, GF_PROP_PID_FILE_EXT); if (p && p->value.string && ( strstr(p->value.string, "hvc") || strstr(p->value.string, "hevc") || strstr(p->value.string, "265") || strstr(p->value.string, "shvc") || strstr(p->value.string, "mhvc") || strstr(p->value.string, "lhvc") ) ) ctx->codecid = GF_CODECID_HEVC; else if (p && p->value.string && ( strstr(p->value.string, "vvc") || strstr(p->value.string, "266") || strstr(p->value.string, "lvvc") ) ) ctx->codecid = GF_CODECID_VVC; else ctx->codecid = GF_CODECID_AVC; } } if (ctx->codecid==GF_CODECID_HEVC) { #ifdef GPAC_DISABLE_HEVC return GF_NOT_SUPPORTED; #else ctx->log_name = "HEVC"; if (ctx->avc_state) gf_free(ctx->avc_state); if (ctx->vvc_state) gf_free(ctx->vvc_state); if (!ctx->hevc_state) GF_SAFEALLOC(ctx->hevc_state, HEVCState); ctx->min_layer_id = 0xFF; #endif } else if (ctx->codecid==GF_CODECID_VVC) { ctx->log_name = "VVC"; if (ctx->hevc_state) gf_free(ctx->hevc_state); if (ctx->avc_state) gf_free(ctx->avc_state); if (!ctx->vvc_state) GF_SAFEALLOC(ctx->vvc_state, VVCState); } else { ctx->log_name = "AVC|H264"; if (ctx->hevc_state) gf_free(ctx->hevc_state); if (ctx->vvc_state) gf_free(ctx->vvc_state); if (!ctx->avc_state) GF_SAFEALLOC(ctx->avc_state, AVCState); } if (ctx->timescale && !ctx->opid) { ctx->opid = gf_filter_pid_new(filter); ctx->first_slice_in_au = GF_TRUE; } ctx->full_au_source = GF_FALSE; p = gf_filter_pid_get_property(pid, GF_PROP_PID_UNFRAMED_FULL_AU); if (p && p->value.boolean) { GF_FilterEvent fevt; //this is a reframer used after an encoder, we want to make sure we have enough frames to compute POC otherwise we might block the chain //by holding input packets - ask 1s by default GF_FEVT_INIT(fevt, GF_FEVT_BUFFER_REQ, ctx->ipid); fevt.buffer_req.pid_only = GF_TRUE; fevt.buffer_req.max_buffer_us = 1000000; gf_filter_pid_send_event(ctx->ipid, &fevt); ctx->full_au_source = GF_TRUE; } //copy properties at init or reconfig if (ctx->opid) { if (ctx->poc_probe_done) { //full frame mode, flush everything before signaling discontinuity //for other modes discontinuity we signal disconntinuity before the current AU being reconstructed if (ctx->full_au_source && ctx->first_pck_in_au) naludmx_finalize_au_flags(ctx); naludmx_enqueue_or_dispatch(ctx, NULL, GF_TRUE); } gf_filter_pid_copy_properties(ctx->opid, ctx->ipid); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_STREAM_TYPE, & PROP_UINT(GF_STREAM_VISUAL)); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_CODECID, & PROP_UINT(ctx->codecid)); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_UNFRAMED, NULL); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_UNFRAMED_FULL_AU, NULL); if (!gf_filter_pid_get_property(ctx->ipid, GF_PROP_PID_ID)) gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_ID, &PROP_UINT(1)); ctx->ps_modified = GF_TRUE; ctx->crc_cfg = ctx->crc_cfg_enh = 0; } return GF_OK; } static void naludmx_check_dur(GF_Filter *filter, GF_NALUDmxCtx *ctx) { FILE *stream; GF_BitStream *bs; u64 duration, cur_dur, nal_start, start_code_pos, rate; AVCState *avc_state = NULL; HEVCState *hevc_state = NULL; VVCState *vvc_state = NULL; Bool first_slice_in_pic = GF_TRUE; const GF_PropertyValue *p; const char *filepath = NULL; if (!ctx->opid || ctx->timescale || ctx->file_loaded) return; p = gf_filter_pid_get_property(ctx->ipid, GF_PROP_PID_FILEPATH); if (!p || !p->value.string || !strncmp(p->value.string, "gmem://", 7)) { ctx->is_file = GF_FALSE; ctx->file_loaded = GF_TRUE; return; } filepath = p->value.string; ctx->is_file = GF_TRUE; if (ctx->index<0) { if (gf_opts_get_bool("temp", "force_indexing")) { ctx->index = 1.0; } else { p = gf_filter_pid_get_property(ctx->ipid, GF_PROP_PID_DOWN_SIZE); if (!p || (p->value.longuint > 20000000)) { GF_LOG(GF_LOG_INFO, GF_LOG_PARSER, ("[%s] Source file larger than 20M, skipping indexing\n", ctx->log_name)); } else { ctx->index = -ctx->index; } } } if (ctx->index<=0) { ctx->duration.num = 1; ctx->file_loaded = GF_TRUE; return; } if (ctx->codecid==GF_CODECID_HEVC) { GF_SAFEALLOC(hevc_state, HEVCState); if (!hevc_state) return; } else if (ctx->codecid==GF_CODECID_VVC) { GF_SAFEALLOC(vvc_state, VVCState); if (!vvc_state) return; } else { GF_SAFEALLOC(avc_state, AVCState); if (!avc_state) return; } stream = gf_fopen(filepath, "rb"); if (!stream) { if (hevc_state) gf_free(hevc_state); if (vvc_state) gf_free(vvc_state); if (avc_state) gf_free(avc_state); return; } ctx->index_size = 0; duration = 0; cur_dur = 0; bs = gf_bs_from_file(stream, GF_BITSTREAM_READ); gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); start_code_pos = gf_bs_get_position(bs); if (!gf_media_nalu_is_start_code(bs)) { if (hevc_state) gf_free(hevc_state); if (avc_state) gf_free(avc_state); gf_bs_del(bs); gf_fclose(stream); ctx->duration.num = 1; ctx->file_loaded = GF_TRUE; return; } nal_start = gf_bs_get_position(bs); while (gf_bs_available(bs)) { u32 nal_size; s32 res; Bool is_rap = GF_FALSE; Bool is_slice = GF_FALSE; nal_size = gf_media_nalu_next_start_code_bs(bs); gf_bs_seek(bs, nal_start); if (hevc_state) { #ifndef GPAC_DISABLE_HEVC u8 temporal_id, layer_id, nal_type; res = gf_hevc_parse_nalu_bs(bs, hevc_state, &nal_type, &temporal_id, &layer_id); if (res>0) first_slice_in_pic = GF_TRUE; switch (nal_type) { case GF_HEVC_NALU_SLICE_IDR_N_LP: case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_CRA: case GF_HEVC_NALU_SLICE_BLA_N_LP: case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: is_rap = GF_TRUE; is_slice = GF_TRUE; break; case GF_HEVC_NALU_SLICE_STSA_N: case GF_HEVC_NALU_SLICE_STSA_R: case GF_HEVC_NALU_SLICE_RADL_R: case GF_HEVC_NALU_SLICE_RASL_R: case GF_HEVC_NALU_SLICE_RADL_N: case GF_HEVC_NALU_SLICE_RASL_N: case GF_HEVC_NALU_SLICE_TRAIL_N: case GF_HEVC_NALU_SLICE_TRAIL_R: case GF_HEVC_NALU_SLICE_TSA_N: case GF_HEVC_NALU_SLICE_TSA_R: is_slice = GF_TRUE; break; } #endif // GPAC_DISABLE_HEVC } else if (vvc_state) { } else { u32 nal_type; u64 pos = gf_bs_get_position(bs); res = gf_avc_parse_nalu(bs, avc_state); if (res>0) first_slice_in_pic = GF_TRUE; nal_type = avc_state->last_nal_type_parsed; switch (nal_type) { case GF_AVC_NALU_SEQ_PARAM: gf_bs_seek(bs, pos); gf_avc_read_sps_bs(bs, avc_state, GF_FALSE, NULL); break; case GF_AVC_NALU_PIC_PARAM: gf_bs_seek(bs, pos); gf_avc_read_pps_bs(bs, avc_state); break; case GF_AVC_NALU_IDR_SLICE: is_rap = GF_TRUE; is_slice = GF_TRUE; break; case GF_AVC_NALU_NON_IDR_SLICE: case GF_AVC_NALU_DP_A_SLICE: case GF_AVC_NALU_DP_B_SLICE: case GF_AVC_NALU_DP_C_SLICE: is_slice = GF_TRUE; break; } } if (is_rap && first_slice_in_pic && (cur_dur >= ctx->index * ctx->cur_fps.num) ) { if (!ctx->index_alloc_size) ctx->index_alloc_size = 10; else if (ctx->index_alloc_size == ctx->index_size) ctx->index_alloc_size *= 2; ctx->indexes = gf_realloc(ctx->indexes, sizeof(NALUIdx)*ctx->index_alloc_size); ctx->indexes[ctx->index_size].pos = start_code_pos; ctx->indexes[ctx->index_size].duration = (Double) duration; ctx->indexes[ctx->index_size].duration /= ctx->cur_fps.num; ctx->index_size ++; cur_dur = 0; } if (is_slice && first_slice_in_pic) { duration += ctx->cur_fps.den; cur_dur += ctx->cur_fps.den; first_slice_in_pic = GF_FALSE; } gf_bs_seek(bs, nal_start + nal_size); /* nal_start = gf_media_nalu_next_start_code_bs(bs); if (nal_start) gf_bs_skip_bytes(bs, nal_start); */ if (gf_bs_available(bs)<4) break; start_code_pos = gf_bs_get_position(bs); nal_start = gf_media_nalu_is_start_code(bs); if (!nal_start) { break; } nal_start = gf_bs_get_position(bs); } rate = gf_bs_get_position(bs); gf_bs_del(bs); gf_fclose(stream); if (hevc_state) gf_free(hevc_state); if (vvc_state) gf_free(vvc_state); if (avc_state) gf_free(avc_state); if (!ctx->duration.num || (ctx->duration.num * ctx->cur_fps.num != duration * ctx->duration.den)) { ctx->duration.num = (s32) duration; ctx->duration.den = ctx->cur_fps.num; gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_DURATION, & PROP_FRAC64(ctx->duration)); if (duration && (!gf_sys_is_test_mode() || gf_opts_get_bool("temp", "force_indexing"))) { rate *= 8 * ctx->duration.den; rate /= ctx->duration.num; ctx->bitrate = (u32) rate; } } p = gf_filter_pid_get_property(ctx->ipid, GF_PROP_PID_FILE_CACHED); if (p && p->value.boolean) ctx->file_loaded = GF_TRUE; gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_CAN_DATAREF, & PROP_BOOL(GF_TRUE ) ); } static void naludmx_enqueue_or_dispatch(GF_NALUDmxCtx *ctx, GF_FilterPacket *n_pck, Bool flush_ref) { //TODO: we are dispatching frames in "negctts mode", ie we may have DTS>CTS //need to signal this for consumers using DTS (eg MPEG-2 TS) if (flush_ref && ctx->pck_queue && ctx->poc_diff) { u32 dts_inc=0; s32 last_poc = 0; Bool patch_missing_frame = GF_FALSE; //send all reference packet queued if (ctx->strict_poc==STRICT_POC_ERROR) { u32 i; u32 nb_bframes = 0; for (i=0; i<gf_list_count(ctx->pck_queue); i++) { s32 poc; u64 poc_ts, dts; GF_FilterPacket *q_pck = gf_list_get(ctx->pck_queue, i); if (q_pck == ctx->first_pck_in_au) break; dts = gf_filter_pck_get_dts(q_pck); if (dts == GF_FILTER_NO_TS) continue; poc_ts = gf_filter_pck_get_cts(q_pck); assert(poc_ts != GF_FILTER_NO_TS); poc = (s32) ((s64) poc_ts - CTS_POC_OFFSET_SAFETY); if (i) { if (last_poc>poc) nb_bframes ++; else if (last_poc + ctx->poc_diff<poc) patch_missing_frame = GF_TRUE; } last_poc = poc; } if (nb_bframes>1) patch_missing_frame = GF_FALSE; else if (nb_bframes) patch_missing_frame = GF_TRUE; } last_poc = GF_INT_MIN; while (gf_list_count(ctx->pck_queue) ) { u64 dts; GF_FilterPacket *q_pck = gf_list_get(ctx->pck_queue, 0); if (q_pck == ctx->first_pck_in_au) break; dts = gf_filter_pck_get_dts(q_pck); if (dts != GF_FILTER_NO_TS) { s32 poc; u64 poc_ts, cts; u8 carousel_info = gf_filter_pck_get_carousel_version(q_pck); //we reused timing from source packets if (!carousel_info) { assert(ctx->timescale); gf_list_rem(ctx->pck_queue, 0); gf_filter_pck_send(q_pck); continue; } gf_filter_pck_set_carousel_version(q_pck, 0); poc_ts = gf_filter_pck_get_cts(q_pck); assert(poc_ts != GF_FILTER_NO_TS); poc = (s32) ((s64) poc_ts - CTS_POC_OFFSET_SAFETY); if (patch_missing_frame) { if (last_poc!=GF_INT_MIN) { //check if we missed an IDR (poc reset) if (poc && (last_poc > poc) ) { last_poc = 0; dts_inc += ctx->cur_fps.den; ctx->dts_last_IDR = dts; ctx->dts += ctx->cur_fps.den; } //check if we miss a frame while (last_poc + ctx->poc_diff < poc) { last_poc += ctx->poc_diff; dts_inc += ctx->cur_fps.den; ctx->dts += ctx->cur_fps.den; } } last_poc = poc; dts += dts_inc; } //poc is stored as diff since last IDR which has min_poc cts = ( (ctx->min_poc + (s32) poc) * ctx->cur_fps.den ) / ctx->poc_diff + ctx->dts_last_IDR; /*if PAFF, 2 pictures (eg poc) <=> 1 aggregated frame (eg sample), divide by 2*/ if (ctx->is_paff) { cts /= 2; /*in some cases the poc is not on the top field - if that is the case, round up*/ if (cts % ctx->cur_fps.den) { cts = ((cts/ctx->cur_fps.den)+1) * ctx->cur_fps.den; } } gf_filter_pck_set_cts(q_pck, cts); GF_LOG(GF_LOG_DEBUG, GF_LOG_PARSER, ("[%s] Frame timestamps computed dts "LLU" cts "LLU" (poc %d min poc %d poc_diff %d last IDR DTS "LLU")\n", ctx->log_name, dts, cts, poc, ctx->min_poc, ctx->poc_diff, ctx->dts_last_IDR)); if (ctx->importer && ctx->cur_fps.den) { poc = (s32) ( (s64) cts - (s64) dts); if (poc<0) poc = -poc; poc /= ctx->cur_fps.den; if (poc > ctx->max_total_delay) ctx->max_total_delay = poc; } } gf_list_rem(ctx->pck_queue, 0); gf_filter_pck_send(q_pck); } } if (!n_pck) return; if (!ctx->pck_queue) ctx->pck_queue = gf_list_new(); gf_list_add(ctx->pck_queue, n_pck); } static void naludmx_add_param_nalu(GF_List *param_list, GF_NALUFFParam *sl, u8 nal_type) { GF_NALUFFParamArray *pa = NULL; u32 i, count; count = gf_list_count(param_list); for (i=0; i<count; i++) { pa = gf_list_get(param_list, i); if (pa->type == nal_type) break; pa = NULL; } if (!pa) { GF_SAFEALLOC(pa, GF_NALUFFParamArray); if (!pa) return; pa->array_completeness = 1; pa->type = nal_type; pa->nalus = gf_list_new(); gf_list_add(param_list, pa); } gf_list_add(pa->nalus, sl); } #ifndef GPAC_DISABLE_HEVC static void naludmx_hevc_set_parall_type(GF_NALUDmxCtx *ctx, GF_HEVCConfig *hevc_cfg) { u32 use_tiles, use_wpp, nb_pps, i, count; HEVCState hevc; count = gf_list_count(ctx->pps); memset(&hevc, 0, sizeof(HEVCState)); hevc.sps_active_idx = -1; use_tiles = 0; use_wpp = 0; nb_pps = 0; for (i=0; i<count; i++) { GF_NALUFFParam *slc = (GF_NALUFFParam*)gf_list_get(ctx->pps, i); s32 idx = gf_hevc_read_pps(slc->data, slc->size, &hevc); if (idx>=0) { HEVC_PPS *pps; nb_pps++; pps = &hevc.pps[idx]; if (!pps->entropy_coding_sync_enabled_flag && pps->tiles_enabled_flag) use_tiles++; else if (pps->entropy_coding_sync_enabled_flag && !pps->tiles_enabled_flag) use_wpp++; } } if (!use_tiles && !use_wpp) hevc_cfg->parallelismType = 1; else if (!use_wpp && (use_tiles==nb_pps) ) hevc_cfg->parallelismType = 2; else if (!use_tiles && (use_wpp==nb_pps) ) hevc_cfg->parallelismType = 3; else hevc_cfg->parallelismType = 0; } #endif // GPAC_DISABLE_HEVC GF_Err naludmx_set_hevc_oinf(GF_NALUDmxCtx *ctx, u8 *max_temporal_id) { GF_OperatingPointsInformation *oinf; GF_BitStream *bs; u8 *data; u32 data_size; u32 i; HEVC_VPS *vps; GF_NALUFFParam *vps_sl = gf_list_get(ctx->vps, 0); if (!vps_sl) return GF_SERVICE_ERROR; vps = &ctx->hevc_state->vps[vps_sl->id]; if (!vps->vps_extension_found) return GF_OK; if (vps->max_layers<2) return GF_OK; oinf = gf_isom_oinf_new_entry(); if (!oinf) return GF_OUT_OF_MEM; oinf->scalability_mask = 0; for (i = 0; i < 16; i++) { if (vps->scalability_mask[i]) oinf->scalability_mask |= 1 << i; } for (i = 0; i < vps->num_profile_tier_level; i++) { HEVC_ProfileTierLevel ptl = (i == 0) ? vps->ptl : vps->ext_ptl[i-1]; LHEVC_ProfileTierLevel *lhevc_ptl; GF_SAFEALLOC(lhevc_ptl, LHEVC_ProfileTierLevel); if (!lhevc_ptl) return GF_OUT_OF_MEM; lhevc_ptl->general_profile_space = ptl.profile_space; lhevc_ptl->general_tier_flag = ptl.tier_flag; lhevc_ptl->general_profile_idc = ptl.profile_idc; lhevc_ptl->general_profile_compatibility_flags = ptl.profile_compatibility_flag; lhevc_ptl->general_constraint_indicator_flags = 0; if (ptl.general_progressive_source_flag) lhevc_ptl->general_constraint_indicator_flags |= ((u64)1) << 47; if (ptl.general_interlaced_source_flag) lhevc_ptl->general_constraint_indicator_flags |= ((u64)1) << 46; if (ptl.general_non_packed_constraint_flag) lhevc_ptl->general_constraint_indicator_flags |= ((u64)1) << 45; if (ptl.general_frame_only_constraint_flag) lhevc_ptl->general_constraint_indicator_flags |= ((u64)1) << 44; lhevc_ptl->general_constraint_indicator_flags |= ptl.general_reserved_44bits; lhevc_ptl->general_level_idc = ptl.level_idc; gf_list_add(oinf->profile_tier_levels, lhevc_ptl); } for (i = 0; i < vps->num_output_layer_sets; i++) { LHEVC_OperatingPoint *op; u32 j; u16 minPicWidth, minPicHeight, maxPicWidth, maxPicHeight; u8 maxChromaFormat, maxBitDepth; u8 maxTemporalId; GF_SAFEALLOC(op, LHEVC_OperatingPoint); if (!op) return GF_OUT_OF_MEM; op->output_layer_set_idx = i; op->layer_count = vps->num_necessary_layers[i]; minPicWidth = minPicHeight = maxPicWidth = maxPicHeight = maxTemporalId = 0; maxChromaFormat = maxBitDepth = 0; for (j = 0; j < op->layer_count; j++) { u32 format_idx; u32 bitDepth; op->layers_info[j].ptl_idx = vps->profile_tier_level_idx[i][j]; op->layers_info[j].layer_id = j; op->layers_info[j].is_outputlayer = vps->output_layer_flag[i][j]; //FIXME: we consider that this flag is never set op->layers_info[j].is_alternate_outputlayer = GF_FALSE; if (max_temporal_id) { if (!maxTemporalId || (maxTemporalId < max_temporal_id[op->layers_info[j].layer_id])) maxTemporalId = max_temporal_id[op->layers_info[j].layer_id]; } else { maxTemporalId = vps->max_sub_layers; } format_idx = vps->rep_format_idx[op->layers_info[j].layer_id]; if (!minPicWidth || (minPicWidth > vps->rep_formats[format_idx].pic_width_luma_samples)) minPicWidth = vps->rep_formats[format_idx].pic_width_luma_samples; if (!minPicHeight || (minPicHeight > vps->rep_formats[format_idx].pic_height_luma_samples)) minPicHeight = vps->rep_formats[format_idx].pic_height_luma_samples; if (!maxPicWidth || (maxPicWidth < vps->rep_formats[format_idx].pic_width_luma_samples)) maxPicWidth = vps->rep_formats[format_idx].pic_width_luma_samples; if (!maxPicHeight || (maxPicHeight < vps->rep_formats[format_idx].pic_height_luma_samples)) maxPicHeight = vps->rep_formats[format_idx].pic_height_luma_samples; if (!maxChromaFormat || (maxChromaFormat < vps->rep_formats[format_idx].chroma_format_idc)) maxChromaFormat = vps->rep_formats[format_idx].chroma_format_idc; bitDepth = vps->rep_formats[format_idx].bit_depth_chroma > vps->rep_formats[format_idx].bit_depth_luma ? vps->rep_formats[format_idx].bit_depth_chroma : vps->rep_formats[format_idx].bit_depth_luma; if (!maxChromaFormat || (maxChromaFormat < bitDepth)) maxChromaFormat = bitDepth; } op->max_temporal_id = maxTemporalId; op->minPicWidth = minPicWidth; op->minPicHeight = minPicHeight; op->maxPicWidth = maxPicWidth; op->maxPicHeight = maxPicHeight; op->maxChromaFormat = maxChromaFormat; op->maxBitDepth = maxBitDepth; op->frame_rate_info_flag = GF_FALSE; //FIXME: should fetch this info from VUI op->bit_rate_info_flag = GF_FALSE; //we don't use it gf_list_add(oinf->operating_points, op); } for (i = 0; i < vps->max_layers; i++) { LHEVC_DependentLayer *dep; u32 j, k; GF_SAFEALLOC(dep, LHEVC_DependentLayer); if (!dep) return GF_OUT_OF_MEM; dep->dependent_layerID = vps->layer_id_in_nuh[i]; for (j = 0; j < vps->max_layers; j++) { if (vps->direct_dependency_flag[dep->dependent_layerID][j]) { dep->dependent_on_layerID[dep->num_layers_dependent_on] = j; dep->num_layers_dependent_on ++; } } k = 0; for (j = 0; j < 16; j++) { if (oinf->scalability_mask & (1 << j)) { dep->dimension_identifier[j] = vps->dimension_id[i][k]; k++; } } gf_list_add(oinf->dependency_layers, dep); } //write Operating Points Information Sample Group bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_isom_oinf_write_entry(oinf, bs); gf_bs_get_content(bs, &data, &data_size); gf_bs_del(bs); gf_isom_oinf_del_entry(oinf); gf_filter_pid_set_info_str(ctx->opid, "hevc:oinf", &PROP_DATA_NO_COPY(data, data_size) ); return GF_OK; } static void naludmx_set_hevc_linf(GF_NALUDmxCtx *ctx) { u32 i, nb_layers=0, nb_sublayers=0; u8 *data; u32 data_size; GF_BitStream *bs; for (i=0; i<64; i++) { if (ctx->linf[i].layer_id_plus_one) nb_layers++; if (ctx->linf[i].min_temporal_id != ctx->linf[i].max_temporal_id) nb_sublayers++; } if (!nb_layers && !nb_sublayers) return; bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_bs_write_int(bs, 0, 2); gf_bs_write_int(bs, nb_layers, 6); for (i=0; i<nb_layers; i++) { if (! ctx->linf[i].layer_id_plus_one) continue; gf_bs_write_int(bs, 0, 4); gf_bs_write_int(bs, ctx->linf[i].layer_id_plus_one - 1, 6); gf_bs_write_int(bs, ctx->linf[i].min_temporal_id, 3); gf_bs_write_int(bs, ctx->linf[i].max_temporal_id, 3); gf_bs_write_int(bs, 0, 1); gf_bs_write_int(bs, 0xFF, 7); } gf_bs_get_content(bs, &data, &data_size); gf_bs_del(bs); gf_filter_pid_set_info_str(ctx->opid, "hevc:linf", &PROP_DATA_NO_COPY(data, data_size) ); } static void naludmx_create_hevc_decoder_config(GF_NALUDmxCtx *ctx, u8 **dsi, u32 *dsi_size, u8 **dsi_enh, u32 *dsi_enh_size, u32 *max_width, u32 *max_height, u32 *max_enh_width, u32 *max_enh_height, GF_Fraction *sar, Bool *has_hevc_base) { #ifndef GPAC_DISABLE_HEVC u32 i, count; u8 layer_id; Bool first = GF_TRUE; Bool first_lhvc = GF_TRUE; GF_HEVCConfig *cfg; GF_HEVCConfig *hvcc; GF_HEVCConfig *lvcc; u32 max_w, max_h, max_ew, max_eh; *has_hevc_base = GF_FALSE; max_w = max_h = 0; max_ew = max_eh = 0; sar->num = sar->den = 0; hvcc = gf_odf_hevc_cfg_new(); lvcc = gf_odf_hevc_cfg_new(); hvcc->nal_unit_size = ctx->nal_length; lvcc->nal_unit_size = ctx->nal_length; lvcc->is_lhvc = GF_TRUE; //check we have one pps or sps in base layer count = gf_list_count(ctx->sps); for (i=0; i<count; i++) { GF_NALUFFParam *sl = gf_list_get(ctx->sps, i); layer_id = ((sl->data[0] & 0x1) << 5) | (sl->data[1] >> 3); if (!layer_id) { *has_hevc_base = GF_TRUE; break; } } count = gf_list_count(ctx->pps); for (i=0; i<count; i++) { GF_NALUFFParam *sl = gf_list_get(ctx->pps, i); layer_id = ((sl->data[0] & 0x1) << 5) | (sl->data[1] >> 3); if (!layer_id) { *has_hevc_base = GF_TRUE; break; } } //assign vps first so that they are serialized first count = gf_list_count(ctx->vps); for (i=0; i<count; i++) { GF_NALUFFParam *sl = gf_list_get(ctx->vps, i); HEVC_VPS *vps = &ctx->hevc_state->vps[sl->id]; if (!i) { hvcc->avgFrameRate = lvcc->avgFrameRate = vps->rates[0].avg_pic_rate; hvcc->constantFrameRate = lvcc->constantFrameRate = vps->rates[0].constand_pic_rate_idc; hvcc->numTemporalLayers = lvcc->numTemporalLayers = vps->max_sub_layers; hvcc->temporalIdNested = lvcc->temporalIdNested = vps->temporal_id_nesting; } //TODO set scalability mask if (!ctx->analyze) naludmx_add_param_nalu((ctx->explicit || ! (*has_hevc_base) ) ? lvcc->param_array : hvcc->param_array, sl, GF_HEVC_NALU_VID_PARAM); } count = gf_list_count(ctx->sps); for (i=0; i<count; i++) { Bool is_lhvc = GF_FALSE; GF_NALUFFParam *sl = gf_list_get(ctx->sps, i); HEVC_SPS *sps = &ctx->hevc_state->sps[sl->id]; layer_id = ((sl->data[0] & 0x1) << 5) | (sl->data[1] >> 3); if (!layer_id) *has_hevc_base = GF_TRUE; if (ctx->explicit || layer_id) { cfg = lvcc; is_lhvc = GF_TRUE; } else { cfg = hvcc; } if (first || (is_lhvc && first_lhvc) ) { cfg->configurationVersion = 1; cfg->profile_space = sps->ptl.profile_space; cfg->tier_flag = sps->ptl.tier_flag; cfg->profile_idc = sps->ptl.profile_idc; cfg->general_profile_compatibility_flags = sps->ptl.profile_compatibility_flag; cfg->progressive_source_flag = sps->ptl.general_progressive_source_flag; cfg->interlaced_source_flag = sps->ptl.general_interlaced_source_flag; cfg->non_packed_constraint_flag = sps->ptl.general_non_packed_constraint_flag; cfg->frame_only_constraint_flag = sps->ptl.general_frame_only_constraint_flag; cfg->constraint_indicator_flags = sps->ptl.general_reserved_44bits; cfg->level_idc = sps->ptl.level_idc; cfg->chromaFormat = sps->chroma_format_idc; cfg->luma_bit_depth = sps->bit_depth_luma; cfg->chroma_bit_depth = sps->bit_depth_chroma; ctx->interlaced = cfg->interlaced_source_flag ? GF_TRUE : GF_FALSE; if (sps->aspect_ratio_info_present_flag && sps->sar_width && sps->sar_height) { sar->num = sps->sar_width; sar->den = sps->sar_height; } /*disable frame rate scan, most bitstreams have wrong values there*/ if (!ctx->timescale && first && (!ctx->fps.num || !ctx->fps.den) && sps->has_timing_info /*if detected FPS is greater than 1000, assume wrong timing info*/ && (sps->time_scale <= 1000*sps->num_units_in_tick) ) { ctx->cur_fps.num = sps->time_scale; ctx->cur_fps.den = sps->num_units_in_tick; if (!ctx->fps.num && ctx->dts==ctx->fps.den) ctx->dts = ctx->cur_fps.den; } ctx->fps = ctx->cur_fps; } first = GF_FALSE; if (is_lhvc) { first_lhvc = GF_FALSE; if (sps->width > max_ew) max_ew = sps->width; if (sps->height > max_eh) max_eh = sps->height; } else { if (sps->width > max_w) max_w = sps->width; if (sps->height > max_h) max_h = sps->height; } if (!ctx->analyze) naludmx_add_param_nalu(cfg->param_array, sl, GF_HEVC_NALU_SEQ_PARAM); } cfg = ctx->explicit ? lvcc : hvcc; count = gf_list_count(ctx->pps); for (i=0; i<count; i++) { GF_NALUFFParam *sl = gf_list_get(ctx->pps, i); layer_id = ((sl->data[0] & 0x1) << 5) | (sl->data[1] >> 3); if (!layer_id) *has_hevc_base = GF_TRUE; if (!ctx->analyze) naludmx_add_param_nalu(layer_id ? lvcc->param_array : cfg->param_array, sl, GF_HEVC_NALU_PIC_PARAM); } *dsi = *dsi_enh = NULL; *dsi_size = *dsi_enh_size = 0; if (ctx->explicit || ! (*has_hevc_base) ) { naludmx_hevc_set_parall_type(ctx, lvcc); gf_odf_hevc_cfg_write(lvcc, dsi, dsi_size); *max_width = *max_enh_width = max_ew; *max_height = *max_enh_height = max_eh; } else { naludmx_hevc_set_parall_type(ctx, hvcc); gf_odf_hevc_cfg_write(hvcc, dsi, dsi_size); if (gf_list_count(lvcc->param_array) ) { naludmx_hevc_set_parall_type(ctx, lvcc); gf_odf_hevc_cfg_write(lvcc, dsi_enh, dsi_enh_size); } *max_width = max_w; *max_height = max_h; *max_enh_width = max_ew; *max_enh_height = max_eh; } count = gf_list_count(hvcc->param_array); for (i=0; i<count; i++) { GF_NALUFFParamArray *pa = gf_list_get(hvcc->param_array, i); gf_list_reset(pa->nalus); } count = gf_list_count(lvcc->param_array); for (i=0; i<count; i++) { GF_NALUFFParamArray *pa = gf_list_get(lvcc->param_array, i); gf_list_reset(pa->nalus); } gf_odf_hevc_cfg_del(hvcc); gf_odf_hevc_cfg_del(lvcc); #endif // GPAC_DISABLE_HEVC } static void naludmx_create_vvc_decoder_config(GF_NALUDmxCtx *ctx, u8 **dsi, u32 *dsi_size, u8 **dsi_enh, u32 *dsi_enh_size, u32 *max_width, u32 *max_height, u32 *max_enh_width, u32 *max_enh_height, GF_Fraction *sar, Bool *has_vvc_base) { u32 i, count; u8 layer_id; Bool first = GF_TRUE; Bool first_lvvc = GF_TRUE; GF_VVCConfig *cfg; u32 max_w, max_h, max_ew, max_eh; *has_vvc_base = GF_FALSE; max_w = max_h = 0; max_ew = max_eh = 0; sar->num = sar->den = 0; cfg = gf_odf_vvc_cfg_new(); cfg->nal_unit_size = ctx->nal_length; //check we have one pps or sps in base layer count = gf_list_count(ctx->sps); for (i=0; i<count; i++) { GF_NALUFFParam *sl = gf_list_get(ctx->sps, i); layer_id = (sl->data[0] & 0x3f); //todo, base is not always 0 ! if (!layer_id) { *has_vvc_base = GF_TRUE; break; } } count = gf_list_count(ctx->pps); for (i=0; i<count; i++) { GF_NALUFFParam *sl = gf_list_get(ctx->pps, i); layer_id = (sl->data[0] & 0x3f); //todo, base is not always 0 ! if (!layer_id) { *has_vvc_base = GF_TRUE; break; } } //assign vps first so that they are serialized first count = gf_list_count(ctx->vps); for (i=0; i<count; i++) { GF_NALUFFParam *sl = gf_list_get(ctx->vps, i); VVC_VPS *vps = &ctx->vvc_state->vps[sl->id]; if (!i) { cfg->avgFrameRate = vps->rates[0].avg_pic_rate; cfg->constantFrameRate = vps->rates[0].constand_pic_rate_idc; cfg->numTemporalLayers = vps->max_sub_layers; } if (!ctx->analyze) naludmx_add_param_nalu(cfg->param_array, sl, GF_VVC_NALU_VID_PARAM); } count = gf_list_count(ctx->sps); for (i=0; i<count; i++) { Bool is_lvvc = GF_FALSE; GF_NALUFFParam *sl = gf_list_get(ctx->sps, i); VVC_SPS *sps = &ctx->vvc_state->sps[sl->id]; layer_id = sl->data[0] & 0x3f; if (!layer_id) *has_vvc_base = GF_TRUE; if (ctx->explicit || layer_id) { is_lvvc = GF_TRUE; } if (first || (is_lvvc && first_lvvc) ) { VVC_VPS *vps = &ctx->vvc_state->vps[sps->vps_id]; cfg->avgFrameRate = 0; cfg->constantFrameRate = 1; cfg->numTemporalLayers = sps->max_sublayers; cfg->nal_unit_size = ctx->nal_length; cfg->ptl_present = vps->num_ptl ? 1 : 0; if (vps->num_ptl) { cfg->num_constraint_info = vps->ptl[0].gci_present ? 1 : 12; cfg->general_profile_idc = vps->ptl[0].general_profile_idc; cfg->general_tier_flag = vps->ptl[0].general_tier_flag; cfg->general_level_idc = vps->ptl[0].general_level_idc; cfg->ptl_frame_only_constraint = vps->ptl[0].frame_only_constraint; cfg->ptl_multilayer_enabled = vps->ptl[0].multilayer_enabled; cfg->general_constraint_info = gf_malloc(sizeof(u8) * cfg-> num_constraint_info); if (cfg->general_constraint_info) memcpy(cfg->general_constraint_info, vps->ptl[0].gci, cfg->num_constraint_info); //todo set temporal sublayers cfg->ptl_sublayer_present_mask = 0; cfg->num_sub_profiles = 0; cfg->ols_idx = 0; } cfg->chroma_format = sps->chroma_format_idc; cfg->bit_depth = sps->bitdepth; cfg->maxPictureWidth = sps->width; cfg->maxPictureHeight = sps->height; if (sps->aspect_ratio_info_present_flag && sps->sar_width && sps->sar_height) { sar->num = sps->sar_width; sar->den = sps->sar_height; } /*disable frame rate scan, most bitstreams have wrong values there*/ if (!ctx->timescale && first && (!ctx->fps.num || !ctx->fps.den) && sps->has_timing_info /*if detected FPS is greater than 1000, assume wrong timing info*/ && (sps->time_scale <= 1000*sps->num_units_in_tick) ) { ctx->cur_fps.num = sps->time_scale; ctx->cur_fps.den = sps->num_units_in_tick; if (!ctx->fps.num && ctx->dts==ctx->fps.den) ctx->dts = ctx->cur_fps.den; } ctx->fps = ctx->cur_fps; } first = GF_FALSE; if (is_lvvc) { first_lvvc = GF_FALSE; if (sps->width > max_ew) max_ew = sps->width; if (sps->height > max_eh) max_eh = sps->height; } else { if (sps->width > max_w) max_w = sps->width; if (sps->height > max_h) max_h = sps->height; } if (!ctx->analyze) naludmx_add_param_nalu(cfg->param_array, sl, GF_VVC_NALU_SEQ_PARAM); } count = gf_list_count(ctx->pps); for (i=0; i<count; i++) { GF_NALUFFParam *sl = gf_list_get(ctx->pps, i); layer_id = sl->data[0] & 0x3F; if (!layer_id) *has_vvc_base = GF_TRUE; if (!ctx->analyze) naludmx_add_param_nalu(cfg->param_array, sl, GF_VVC_NALU_PIC_PARAM); } count = gf_list_count(ctx->vvc_dci); for (i=0; i<count; i++) { GF_NALUFFParam *sl = gf_list_get(ctx->vvc_dci, i); layer_id = sl->data[0] & 0x3F; if (!layer_id) *has_vvc_base = GF_TRUE; if (!ctx->analyze) naludmx_add_param_nalu(cfg->param_array, sl, GF_VVC_NALU_DEC_PARAM); } count = gf_list_count(ctx->vvc_aps_pre); for (i=0; i<count; i++) { GF_NALUFFParam *sl = gf_list_get(ctx->vvc_aps_pre, i); layer_id = sl->data[0] & 0x3F; if (!layer_id) *has_vvc_base = GF_TRUE; if (!ctx->analyze) naludmx_add_param_nalu(cfg->param_array, sl, GF_VVC_NALU_APS_PREFIX); } *dsi = *dsi_enh = NULL; *dsi_size = *dsi_enh_size = 0; gf_odf_vvc_cfg_write(cfg, dsi, dsi_size); *max_width = max_w; *max_height = max_h; *max_enh_width = max_ew; *max_enh_height = max_eh; count = gf_list_count(cfg->param_array); for (i=0; i<count; i++) { GF_NALUFFParamArray *pa = gf_list_get(cfg->param_array, i); gf_list_reset(pa->nalus); } gf_odf_vvc_cfg_del(cfg); } void naludmx_create_avc_decoder_config(GF_NALUDmxCtx *ctx, u8 **dsi, u32 *dsi_size, u8 **dsi_enh, u32 *dsi_enh_size, u32 *max_width, u32 *max_height, u32 *max_enh_width, u32 *max_enh_height, GF_Fraction *sar) { u32 i, count; Bool first = GF_TRUE; Bool first_svc = GF_TRUE; GF_AVCConfig *cfg; GF_AVCConfig *avcc; GF_AVCConfig *svcc; u32 max_w, max_h, max_ew, max_eh; max_w = max_h = max_ew = max_eh = 0; sar->num = sar->den = 0; avcc = gf_odf_avc_cfg_new(); svcc = gf_odf_avc_cfg_new(); avcc->nal_unit_size = ctx->nal_length; svcc->nal_unit_size = ctx->nal_length; ctx->is_mvc = GF_FALSE; count = gf_list_count(ctx->sps); for (i=0; i<count; i++) { Bool is_svc = GF_FALSE; GF_NALUFFParam *sl = gf_list_get(ctx->sps, i); AVC_SPS *sps = &ctx->avc_state->sps[sl->id]; u32 nal_type = sl->data[0] & 0x1F; if ((sps->profile_idc == 118) || (sps->profile_idc == 128)) { ctx->is_mvc = GF_TRUE; } if (ctx->explicit) { cfg = svcc; } else if (nal_type == GF_AVC_NALU_SVC_SUBSEQ_PARAM) { cfg = svcc; is_svc = GF_TRUE; } else { cfg = avcc; } if (first || (is_svc && first_svc) ) { cfg->configurationVersion = 1; cfg->profile_compatibility = sps->prof_compat; cfg->AVCProfileIndication = sps->profile_idc; cfg->AVCLevelIndication = sps->level_idc; cfg->chroma_format = sps->chroma_format; cfg->luma_bit_depth = 8 + sps->luma_bit_depth_m8; cfg->chroma_bit_depth = 8 + sps->chroma_bit_depth_m8; /*try to patch ?*/ if (!gf_avc_is_rext_profile(cfg->AVCProfileIndication) && ((cfg->chroma_format>1) || (cfg->luma_bit_depth>8) || (cfg->chroma_bit_depth>8)) ) { if ((cfg->luma_bit_depth>8) || (cfg->chroma_bit_depth>8)) { cfg->AVCProfileIndication = 110; } else { cfg->AVCProfileIndication = (cfg->chroma_format==3) ? 244 : 122; } } if (sps->vui_parameters_present_flag && sps->vui.par_num && sps->vui.par_den) { sar->num = sps->vui.par_num; sar->den = sps->vui.par_den; } ctx->interlaced = sps->frame_mbs_only_flag ? GF_FALSE : GF_TRUE; /*disable frame rate scan, most bitstreams have wrong values there*/ if (first && (!ctx->fps.num || !ctx->fps.den) && sps->vui.timing_info_present_flag /*if detected FPS is greater than 1000, assume wrong timing info*/ && (sps->vui.time_scale <= 1000*sps->vui.num_units_in_tick) ) { /*ISO/IEC 14496-10 n11084 Table E-6*/ /* not used : u8 DeltaTfiDivisorTable[] = {1,1,1,2,2,2,2,3,3,4,6}; */ u8 DeltaTfiDivisorIdx; if (!sps->vui.pic_struct_present_flag) { DeltaTfiDivisorIdx = 1 + (1 - ctx->avc_state->s_info.field_pic_flag); } else { if (!ctx->avc_state->sei.pic_timing.pic_struct) DeltaTfiDivisorIdx = 2; else if (ctx->avc_state->sei.pic_timing.pic_struct == 8) DeltaTfiDivisorIdx = 6; else DeltaTfiDivisorIdx = (ctx->avc_state->sei.pic_timing.pic_struct+1) / 2; } if (!ctx->timescale) { ctx->cur_fps.num = 2 * sps->vui.time_scale; ctx->cur_fps.den = 2 * sps->vui.num_units_in_tick * DeltaTfiDivisorIdx; if (!ctx->fps.num && ctx->dts==ctx->fps.den) ctx->dts = ctx->cur_fps.den; } if (! sps->vui.fixed_frame_rate_flag) GF_LOG(GF_LOG_INFO, GF_LOG_PARSER, ("[%s] Possible Variable Frame Rate: VUI \"fixed_frame_rate_flag\" absent\n", ctx->log_name)); } ctx->fps = ctx->cur_fps; } first = GF_FALSE; if (is_svc) { first_svc = GF_FALSE; if (sps->width > max_ew) max_ew = sps->width; if (sps->height > max_eh) max_eh = sps->height; } else { if (sps->width > max_w) max_w = sps->width; if (sps->height > max_h) max_h = sps->height; } if (!ctx->analyze) gf_list_add(cfg->sequenceParameterSets, sl); } cfg = ctx->explicit ? svcc : avcc; count = gf_list_count(ctx->sps_ext); for (i=0; i<count; i++) { GF_NALUFFParam *sl = gf_list_get(ctx->sps_ext, i); if (!cfg->sequenceParameterSetExtensions) cfg->sequenceParameterSetExtensions = gf_list_new(); if (!ctx->analyze) gf_list_add(cfg->sequenceParameterSetExtensions, sl); } cfg = ctx->explicit ? svcc : avcc; count = gf_list_count(ctx->pps); for (i=0; i<count; i++) { GF_NALUFFParam *sl = gf_list_get(ctx->pps, i); if (!ctx->analyze) gf_list_add(cfg->pictureParameterSets, sl); } cfg = svcc; count = gf_list_count(ctx->pps_svc); for (i=0; i<count; i++) { GF_NALUFFParam *sl = gf_list_get(ctx->pps_svc, i); if (!ctx->analyze) gf_list_add(cfg->pictureParameterSets, sl); } *dsi = *dsi_enh = NULL; *dsi_size = *dsi_enh_size = 0; if (ctx->explicit) { gf_odf_avc_cfg_write(svcc, dsi, dsi_size); } else { gf_odf_avc_cfg_write(avcc, dsi, dsi_size); if (gf_list_count(svcc->sequenceParameterSets) || svcc->sequenceParameterSetExtensions) { gf_odf_avc_cfg_write(svcc, dsi_enh, dsi_enh_size); } } gf_list_reset(avcc->sequenceParameterSets); gf_list_reset(avcc->sequenceParameterSetExtensions); gf_list_reset(avcc->pictureParameterSets); gf_list_reset(svcc->sequenceParameterSets); gf_list_reset(svcc->sequenceParameterSetExtensions); gf_list_reset(svcc->pictureParameterSets); gf_odf_avc_cfg_del(avcc); gf_odf_avc_cfg_del(svcc); *max_width = max_w; *max_height = max_h; *max_enh_width = max_ew; *max_enh_height = max_eh; } static void naludmx_check_pid(GF_Filter *filter, GF_NALUDmxCtx *ctx) { u32 w, h, ew, eh; u8 *dsi, *dsi_enh; u32 dsi_size, dsi_enh_size; u32 crc_cfg, crc_cfg_enh; GF_Fraction sar; Bool has_hevc_base = GF_TRUE; Bool has_colr_info = GF_FALSE; if (ctx->analyze) { if (ctx->opid && !ctx->ps_modified) return; } else { if (!ctx->ps_modified) return; } ctx->ps_modified = GF_FALSE; dsi = dsi_enh = NULL; if (!ctx->timescale) { ctx->cur_fps = ctx->fps; if (!ctx->cur_fps.num || !ctx->cur_fps.den) { ctx->cur_fps.num = 25000; ctx->cur_fps.den = 1000; } } if (ctx->codecid==GF_CODECID_HEVC) { naludmx_create_hevc_decoder_config(ctx, &dsi, &dsi_size, &dsi_enh, &dsi_enh_size, &w, &h, &ew, &eh, &sar, &has_hevc_base); } else if (ctx->codecid==GF_CODECID_VVC) { naludmx_create_vvc_decoder_config(ctx, &dsi, &dsi_size, &dsi_enh, &dsi_enh_size, &w, &h, &ew, &eh, &sar, &has_hevc_base); } else { naludmx_create_avc_decoder_config(ctx, &dsi, &dsi_size, &dsi_enh, &dsi_enh_size, &w, &h, &ew, &eh, &sar); } crc_cfg = crc_cfg_enh = 0; if (dsi) crc_cfg = gf_crc_32(dsi, dsi_size); if (dsi_enh) crc_cfg_enh = gf_crc_32(dsi_enh, dsi_enh_size); if (!ctx->opid) { ctx->opid = gf_filter_pid_new(filter); naludmx_check_dur(filter, ctx); ctx->first_slice_in_au = GF_TRUE; } if ((ctx->crc_cfg == crc_cfg) && (ctx->crc_cfg_enh == crc_cfg_enh) && (ctx->width==w) && (ctx->height==h) && (ctx->sar.num * sar.den == ctx->sar.den * sar.num) ) { if (dsi) gf_free(dsi); if (dsi_enh) gf_free(dsi_enh); return; } naludmx_enqueue_or_dispatch(ctx, NULL, GF_TRUE); if (!ctx->analyze && (gf_list_count(ctx->pck_queue)>1)) { GF_LOG(dsi_enh ? GF_LOG_DEBUG : GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] xPS changed but could not flush frames before signaling state change %s\n", ctx->log_name, dsi_enh ? "- likely scalable xPS update" : "!")); } //copy properties at init or reconfig gf_filter_pid_copy_properties(ctx->opid, ctx->ipid); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_STREAM_TYPE, & PROP_UINT(GF_STREAM_VISUAL)); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_UNFRAMED, NULL); if (!gf_filter_pid_get_property(ctx->ipid, GF_PROP_PID_ID)) gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_ID, &PROP_UINT(1)); ctx->width = w; ctx->height = h; ctx->sar = sar; ctx->crc_cfg = crc_cfg; ctx->crc_cfg_enh = crc_cfg_enh; gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_WIDTH, & PROP_UINT( ctx->width)); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_HEIGHT, & PROP_UINT( ctx->height)); if (ew && eh) { gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_WIDTH_MAX, & PROP_UINT( ew )); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_HEIGHT_MAX, & PROP_UINT( eh )); } if (ctx->sar.den) gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_SAR, & PROP_FRAC(ctx->sar)); else gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_SAR, NULL); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_FPS, & PROP_FRAC(ctx->cur_fps)); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_TIMESCALE, & PROP_UINT(ctx->timescale ? ctx->timescale : ctx->cur_fps.num)); if (ctx->explicit || !has_hevc_base) { u32 enh_cid = GF_CODECID_SVC; if (ctx->codecid==GF_CODECID_HEVC) enh_cid = GF_CODECID_LHVC; gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_CODECID, & PROP_UINT(enh_cid)); if (dsi) gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_DECODER_CONFIG, &PROP_DATA_NO_COPY(dsi, dsi_size) ); } else { gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_CODECID, & PROP_UINT(ctx->codecid)); if (dsi) gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_DECODER_CONFIG, &PROP_DATA_NO_COPY(dsi, dsi_size) ); if (dsi_enh) gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_DECODER_CONFIG_ENHANCEMENT, &PROP_DATA_NO_COPY(dsi_enh, dsi_enh_size) ); } if (ctx->bitrate) { gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_BITRATE, & PROP_UINT(ctx->bitrate)); } if ((ctx->codecid==GF_CODECID_HEVC) && gf_list_count(ctx->vps) ) { GF_Err e = naludmx_set_hevc_oinf(ctx, NULL); if (e) { GF_LOG(GF_LOG_WARNING, GF_LOG_PARSER, ("[%s] Failed to create OINF chunk\n", ctx->log_name)); } naludmx_set_hevc_linf(ctx); } if (ctx->duration.num) gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_DURATION, & PROP_FRAC64(ctx->duration)); if (ctx->is_file /* && ctx->index*/) { gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_PLAYBACK_MODE, & PROP_UINT(GF_PLAYBACK_MODE_FASTFORWARD) ); } //set interlaced or remove interlaced property gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_INTERLACED, ctx->interlaced ? & PROP_UINT(GF_TRUE) : NULL); if (ctx->codecid==GF_CODECID_HEVC) { HEVC_SPS *sps = &ctx->hevc_state->sps[ctx->hevc_state->sps_active_idx]; if (sps->colour_description_present_flag) { gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_COLR_PRIMARIES, & PROP_UINT(sps->colour_primaries) ); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_COLR_TRANSFER, & PROP_UINT(sps->transfer_characteristic) ); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_COLR_MX, & PROP_UINT(sps->matrix_coeffs) ); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_COLR_RANGE, & PROP_BOOL(sps->video_full_range_flag) ); has_colr_info = GF_TRUE; } } else if (ctx->codecid==GF_CODECID_VVC) { } else { /*use the last active SPS*/ if (ctx->avc_state->sps[ctx->avc_state->sps_active_idx].vui_parameters_present_flag && ctx->avc_state->sps[ctx->avc_state->sps_active_idx].vui.colour_description_present_flag) { AVC_VUI *vui = &ctx->avc_state->sps[ctx->avc_state->sps_active_idx].vui; gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_COLR_PRIMARIES, & PROP_UINT(vui->colour_primaries) ); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_COLR_TRANSFER, & PROP_UINT(vui->transfer_characteristics) ); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_COLR_MX, & PROP_UINT(vui->matrix_coefficients) ); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_COLR_RANGE, & PROP_BOOL(vui->video_full_range_flag) ); has_colr_info = GF_TRUE; } } if (!has_colr_info) { gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_COLR_PRIMARIES, NULL); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_COLR_TRANSFER, NULL); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_COLR_MX, NULL); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_COLR_RANGE, NULL); } } static Bool naludmx_process_event(GF_Filter *filter, const GF_FilterEvent *evt) { u32 i; u64 file_pos = 0; GF_FilterEvent fevt; GF_NALUDmxCtx *ctx = gf_filter_get_udta(filter); switch (evt->base.type) { case GF_FEVT_PLAY: if (!ctx->is_playing) { ctx->is_playing = GF_TRUE; ctx->cts = ctx->dts = 0; } if (! ctx->is_file) { if (!ctx->initial_play_done) { ctx->initial_play_done = GF_TRUE; if (evt->play.start_range<0.1) return GF_FALSE; } ctx->resume_from = 0; ctx->nal_store_size = 0; return GF_FALSE; } if (ctx->start_range && (ctx->index<0)) { ctx->index = -ctx->index; ctx->file_loaded = GF_FALSE; ctx->duration.den = ctx->duration.num = 0; GF_LOG(GF_LOG_INFO, GF_LOG_PARSER, ("[%s] Play request from %d, building index\n", ctx->log_name, ctx->start_range)); naludmx_check_dur(filter, ctx); } ctx->start_range = evt->play.start_range; ctx->in_seek = GF_TRUE; if (ctx->start_range) { ctx->nb_nalus = ctx->nb_i = ctx->nb_p = ctx->nb_b = ctx->nb_sp = ctx->nb_si = ctx->nb_sei = ctx->nb_idr = 0; for (i=1; i<ctx->index_size; i++) { if (ctx->indexes[i].duration>ctx->start_range) { ctx->cts = ctx->dts = (u64) (ctx->indexes[i-1].duration * ctx->cur_fps.num); file_pos = ctx->indexes[i-1].pos; break; } } } if (!ctx->initial_play_done) { ctx->initial_play_done = GF_TRUE; //seek will not change the current source state, don't send a seek if (!file_pos) { //very short streams, input is done before we get notified for play and everything stored in memory: flush if (gf_filter_pid_is_eos(ctx->ipid) && (ctx->nal_store_size)) { gf_filter_post_process_task(filter); } return GF_TRUE; } } ctx->nb_frames = 0; ctx->nb_nalus = 0; ctx->resume_from = 0; ctx->nal_store_size = 0; //post a seek GF_FEVT_INIT(fevt, GF_FEVT_SOURCE_SEEK, ctx->ipid); fevt.seek.start_offset = file_pos; gf_filter_pid_send_event(ctx->ipid, &fevt); //cancel event return GF_TRUE; case GF_FEVT_STOP: //don't cancel event ctx->is_playing = GF_FALSE; ctx->nal_store_size = 0; ctx->resume_from = 0; return GF_FALSE; case GF_FEVT_SET_SPEED: //cancel event return GF_TRUE; default: break; } //by default don't cancel event - to rework once we have downloading in place return GF_FALSE; } static GFINLINE void naludmx_update_time(GF_NALUDmxCtx *ctx) { assert(ctx->cur_fps.num); if (ctx->timescale) { //very first frame, no dts diff, assume 3000/90k. It should only hurt if we have several frames packet in the first packet sent u64 dts_inc = ctx->cur_fps.den ? ctx->cur_fps.den : 3000; ctx->cts += dts_inc; ctx->dts += dts_inc; } else { assert(ctx->cur_fps.den); ctx->cts += ctx->cur_fps.den; ctx->dts += ctx->cur_fps.den; } } static void naludmx_queue_param_set(GF_NALUDmxCtx *ctx, char *data, u32 size, u32 ps_type, s32 ps_id) { GF_List *list = NULL, *alt_list = NULL; GF_NALUFFParam *sl; u32 i, count, crc; if (!size) return; crc = gf_crc_32(data, size); if (ctx->codecid==GF_CODECID_HEVC) { switch (ps_type) { case GF_HEVC_NALU_VID_PARAM: if (!ctx->vps) ctx->vps = gf_list_new(); list = ctx->vps; break; case GF_HEVC_NALU_SEQ_PARAM: list = ctx->sps; break; case GF_HEVC_NALU_PIC_PARAM: list = ctx->pps; break; default: assert(0); return; } } else if (ctx->codecid==GF_CODECID_VVC) { switch (ps_type) { case GF_VVC_NALU_VID_PARAM: if (!ctx->vps) ctx->vps = gf_list_new(); list = ctx->vps; break; case GF_VVC_NALU_SEQ_PARAM: list = ctx->sps; break; case GF_VVC_NALU_PIC_PARAM: list = ctx->pps; break; case GF_VVC_NALU_DEC_PARAM: if (!ctx->vvc_dci) ctx->vvc_dci = gf_list_new(); list = ctx->vvc_dci; break; case GF_VVC_NALU_APS_PREFIX: if (!ctx->vvc_aps_pre) ctx->vvc_aps_pre = gf_list_new(); list = ctx->vvc_aps_pre; break; default: assert(0); return; } } else { switch (ps_type) { case GF_AVC_NALU_SVC_SUBSEQ_PARAM: case GF_AVC_NALU_SEQ_PARAM: list = ctx->sps; break; case GF_AVC_NALU_PIC_PARAM: list = ctx->pps; alt_list = ctx->pps_svc; break; case GF_AVC_NALU_SEQ_PARAM_EXT: if (!ctx->sps_ext) ctx->sps_ext = gf_list_new(); list = ctx->sps_ext; break; default: assert(0); return; } } sl = NULL; count = gf_list_count(list); for (i=0; i<count; i++) { sl = gf_list_get(list, i); if (sl->id != ps_id) { sl = NULL; continue; } //same ID, same CRC, we don't change our state if (sl->crc == crc) return; break; } //handle alt PPS list for SVC if (!sl && alt_list) { count = gf_list_count(alt_list); for (i=0; i<count; i++) { sl = gf_list_get(alt_list, i); if (sl->id != ps_id) { sl = NULL; continue; } //same ID, same CRC, we don't change our state if (sl->crc == crc) return; break; } } if (sl) { //otherwise we keep this new param set sl->data = gf_realloc(sl->data, size); memcpy(sl->data, data, size); sl->size = size; sl->crc = crc; ctx->ps_modified = GF_TRUE; return; } //TODO we might want to purge the list after a while !! GF_SAFEALLOC(sl, GF_NALUFFParam); if (!sl) return; sl->data = gf_malloc(sizeof(char) * size); if (!sl->data) { gf_free(sl); return; } memcpy(sl->data, data, size); sl->size = size; sl->id = ps_id; sl->crc = crc; ctx->ps_modified = GF_TRUE; gf_list_add(list, sl); } static void naludmx_finalize_au_flags(GF_NALUDmxCtx *ctx) { u64 ts; Bool is_rap = GF_FALSE; if (!ctx->first_pck_in_au) return; if (ctx->au_sap) { gf_filter_pck_set_sap(ctx->first_pck_in_au, ctx->au_sap); if (ctx->au_sap == GF_FILTER_SAP_1) { ctx->dts_last_IDR = gf_filter_pck_get_dts(ctx->first_pck_in_au); if (ctx->is_paff) ctx->dts_last_IDR *= 2; } if (ctx->au_sap <= GF_FILTER_SAP_3) { is_rap = GF_TRUE; } } else if (ctx->has_islice && ctx->force_sync && (ctx->sei_recovery_frame_count==0)) { gf_filter_pck_set_sap(ctx->first_pck_in_au, GF_FILTER_SAP_1); if (!ctx->use_opengop_gdr) { ctx->use_opengop_gdr = 1; GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[%s] Forcing non-IDR samples with I slices to be marked as sync points - resulting file will not be ISOBMFF conformant\n", ctx->log_name)); } is_rap = GF_TRUE; } /*set roll info sampleGroups info*/ else if (!ctx->au_sap && ( (ctx->sei_recovery_frame_count >= 0) || ctx->has_islice) ) { /*generic GDR*/ if (ctx->sei_recovery_frame_count > 0) { if (!ctx->use_opengop_gdr) ctx->use_opengop_gdr = 1; gf_filter_pck_set_sap(ctx->first_pck_in_au, GF_FILTER_SAP_4); gf_filter_pck_set_roll_info(ctx->first_pck_in_au, ctx->sei_recovery_frame_count); } /*open-GOP*/ else if ((ctx->sei_recovery_frame_count == 0) && ctx->has_islice) { if (!ctx->use_opengop_gdr) ctx->use_opengop_gdr = 2; gf_filter_pck_set_sap(ctx->first_pck_in_au, GF_FILTER_SAP_3); is_rap = GF_TRUE; } } if (ctx->is_paff) { gf_filter_pck_set_interlaced(ctx->first_pck_in_au, ctx->bottom_field_flag ? 2 : 1); } //if TS is set, the packet was the first in AU in the input timed packet (eg PES), we reuse the input timing ts = gf_filter_pck_get_cts(ctx->first_pck_in_au); if (ts == GF_FILTER_NO_TS) { /*we store the POC (last POC minus the poc shift) as the CTS offset and re-update the CTS when dispatching*/ assert(ctx->last_poc >= ctx->poc_shift); gf_filter_pck_set_cts(ctx->first_pck_in_au, CTS_POC_OFFSET_SAFETY + ctx->last_poc - ctx->poc_shift); //we use the carousel flag temporarly to indicate the cts must be recomputed gf_filter_pck_set_carousel_version(ctx->first_pck_in_au, 1); } if (ctx->subsamp_buffer_size) { gf_filter_pck_set_property(ctx->first_pck_in_au, GF_PROP_PCK_SUBS, &PROP_DATA(ctx->subsamp_buffer, ctx->subsamp_buffer_size) ); ctx->subsamp_buffer_size = 0; ctx->subs_mapped_bytes = 0; } if (ctx->deps) { u8 flags = 0; //dependsOn flags = (is_rap) ? 2 : 1; flags <<= 2; //dependedOn flags |= ctx->has_ref_slices ? 1 : 2; flags <<= 2; //hasRedundant flags |= ctx->has_redundant ? 1 : 2; gf_filter_pck_set_dependency_flags(ctx->first_pck_in_au, flags); } ctx->has_ref_slices = GF_FALSE; ctx->has_redundant = GF_FALSE; //if we reuse input packets timing, we can dispatch asap. //otherwise if poc probe is done (we know the min_poc_diff between images) and we are not in strict mode, dispatch asap //otherwise we will need to wait for the next ref frame to make sure we know all pocs ... if (ctx->timescale || (!ctx->strict_poc && ctx->poc_probe_done) ) naludmx_enqueue_or_dispatch(ctx, NULL, GF_TRUE); ctx->first_pck_in_au = NULL; } static void naludmx_update_nalu_maxsize(GF_NALUDmxCtx *ctx, u32 size) { if (ctx->max_nalu_size < size) { ctx->max_nalu_size = size; if (size > ctx->max_nalu_size_allowed) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] nal size %d larger than max allowed size %d - change import settings\n", ctx->log_name, size, ctx->max_nalu_size_allowed )); } } } GF_Err naludmx_realloc_last_pck(GF_NALUDmxCtx *ctx, u32 nb_bytes_to_add, u8 **data_ptr) { GF_Err e; u8 *pck_data; u32 full_size; GF_FilterPacket *pck = gf_list_last(ctx->pck_queue); *data_ptr = NULL; if (!pck) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] attempt to reallocate a non-existing packet!\n", ctx->log_name)); return GF_SERVICE_ERROR; } e = gf_filter_pck_expand(pck, nb_bytes_to_add, &pck_data, data_ptr, &full_size); if (e) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Failed to reallocate packet buffer: %s\n", ctx->log_name, gf_error_to_string(e) )); return e; } assert(ctx->bs_w); //rewrite NALU size length full_size -= ctx->nal_length; gf_bs_reassign_buffer(ctx->bs_w, pck_data, ctx->nal_length); gf_bs_write_int(ctx->bs_w, full_size, 8*ctx->nal_length); naludmx_update_nalu_maxsize(ctx, full_size); //rewrite subsample size if (ctx->subsamples) { assert(ctx->subsamp_buffer_size>=14); //reassign to beginning of size field (after first u32 flags) gf_bs_reassign_buffer(ctx->bs_w, ctx->subsamp_buffer + ctx->subsamp_buffer_size-14 + 4, 14 - 4); gf_bs_write_u32(ctx->bs_w, full_size + ctx->nal_length); } return GF_OK; } GF_FilterPacket *naludmx_start_nalu(GF_NALUDmxCtx *ctx, u32 nal_size, Bool skip_nal_field, Bool *au_start, u8 **pck_data) { GF_FilterPacket *dst_pck = gf_filter_pck_new_alloc(ctx->opid, nal_size + (skip_nal_field ? 0 : ctx->nal_length), pck_data); if (!dst_pck) return NULL; if (!skip_nal_field) { if (!ctx->bs_w) ctx->bs_w = gf_bs_new(*pck_data, ctx->nal_length, GF_BITSTREAM_WRITE); else gf_bs_reassign_buffer(ctx->bs_w, *pck_data, ctx->nal_length); gf_bs_write_int(ctx->bs_w, nal_size, 8*ctx->nal_length); } if (*au_start) { ctx->first_pck_in_au = dst_pck; if (ctx->src_pck) gf_filter_pck_merge_properties(ctx->src_pck, dst_pck); gf_filter_pck_set_framing(dst_pck, GF_TRUE, GF_FALSE); //we reuse the timing of the input packet for the first nal of the first frame starting in this packet if (ctx->input_is_au_start) { ctx->input_is_au_start = GF_FALSE; gf_filter_pck_set_dts(dst_pck, ctx->dts); gf_filter_pck_set_cts(dst_pck, ctx->cts); } else { //we don't set the CTS, it will be set once we detect frame end gf_filter_pck_set_dts(dst_pck, ctx->dts); } //we use the carousel flag temporarly to indicate the cts must be recomputed gf_filter_pck_set_carousel_version(dst_pck, ctx->timescale ? 0 : 1); gf_filter_pck_set_duration(dst_pck, ctx->pck_duration ? ctx->pck_duration : ctx->cur_fps.den); if (ctx->in_seek) gf_filter_pck_set_seek_flag(dst_pck, GF_TRUE); naludmx_update_time(ctx); *au_start = GF_FALSE; ctx->nb_frames++; } else { gf_filter_pck_set_framing(dst_pck, GF_FALSE, GF_FALSE); } naludmx_update_nalu_maxsize(ctx, nal_size); naludmx_enqueue_or_dispatch(ctx, dst_pck, GF_FALSE); return dst_pck; } void naludmx_add_subsample(GF_NALUDmxCtx *ctx, u32 subs_size, u8 subs_priority, u32 subs_reserved) { if (ctx->subsamp_buffer_alloc < ctx->subsamp_buffer_size+14 ) { ctx->subsamp_buffer_alloc = ctx->subsamp_buffer_size+14; ctx->subsamp_buffer = gf_realloc(ctx->subsamp_buffer, ctx->subsamp_buffer_alloc); } assert(ctx->subsamp_buffer); gf_bs_reassign_buffer(ctx->bs_w, ctx->subsamp_buffer + ctx->subsamp_buffer_size, 14); gf_bs_write_u32(ctx->bs_w, 0); //flags gf_bs_write_u32(ctx->bs_w, subs_size + ctx->nal_length); gf_bs_write_u32(ctx->bs_w, subs_reserved); //reserved gf_bs_write_u8(ctx->bs_w, subs_priority); //priority gf_bs_write_u8(ctx->bs_w, 0); //discardable - todo ctx->subsamp_buffer_size += 14; ctx->subs_mapped_bytes += subs_size + ctx->nal_length; } static s32 naludmx_parse_nal_hevc(GF_NALUDmxCtx *ctx, char *data, u32 size, Bool *skip_nal, Bool *is_slice, Bool *is_islice) { #ifdef GPAC_DISABLE_HEVC return -1; #else s32 ps_idx = 0; s32 res; u8 nal_unit_type, temporal_id, layer_id; *skip_nal = GF_FALSE; gf_bs_reassign_buffer(ctx->bs_r, data, size); res = gf_hevc_parse_nalu_bs(ctx->bs_r, ctx->hevc_state, &nal_unit_type, &temporal_id, &layer_id); ctx->nb_nalus++; if (res < 0) { if (res == -1) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Warning: Error parsing NAL unit\n", ctx->log_name)); } *skip_nal = GF_TRUE; } if (layer_id && ctx->nosvc) { *skip_nal = GF_TRUE; return 0; } switch (nal_unit_type) { case GF_HEVC_NALU_VID_PARAM: if (ctx->novpsext) { //this may modify nal_size, but we don't use it for bitstream reading ps_idx = gf_hevc_read_vps_ex(data, &size, ctx->hevc_state, GF_TRUE); } else { ps_idx = ctx->hevc_state->last_parsed_vps_id; } if (ps_idx<0) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Error parsing Video Param Set\n", ctx->log_name)); } else { naludmx_queue_param_set(ctx, data, size, GF_HEVC_NALU_VID_PARAM, ps_idx); } *skip_nal = GF_TRUE; break; case GF_HEVC_NALU_SEQ_PARAM: ps_idx = ctx->hevc_state->last_parsed_sps_id; if (ps_idx<0) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Error parsing Sequence Param Set\n", ctx->log_name)); } else { naludmx_queue_param_set(ctx, data, size, GF_HEVC_NALU_SEQ_PARAM, ps_idx); } *skip_nal = GF_TRUE; break; case GF_HEVC_NALU_PIC_PARAM: ps_idx = ctx->hevc_state->last_parsed_pps_id; if (ps_idx<0) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Error parsing Picture Param Set\n", ctx->log_name)); } else { naludmx_queue_param_set(ctx, data, size, GF_HEVC_NALU_PIC_PARAM, ps_idx); } *skip_nal = GF_TRUE; break; case GF_HEVC_NALU_SEI_PREFIX: gf_hevc_parse_sei(data, size, ctx->hevc_state); if (!ctx->nosei) { ctx->nb_sei++; if (ctx->sei_buffer_alloc < ctx->sei_buffer_size + size + ctx->nal_length) { ctx->sei_buffer_alloc = ctx->sei_buffer_size + size + ctx->nal_length; ctx->sei_buffer = gf_realloc(ctx->sei_buffer, ctx->sei_buffer_alloc); } if (!ctx->bs_w) ctx->bs_w = gf_bs_new(ctx->sei_buffer + ctx->sei_buffer_size, ctx->nal_length + size, GF_BITSTREAM_WRITE); else gf_bs_reassign_buffer(ctx->bs_w, ctx->sei_buffer + ctx->sei_buffer_size, ctx->nal_length + size); gf_bs_write_int(ctx->bs_w, size, 8*ctx->nal_length); memcpy(ctx->sei_buffer + ctx->sei_buffer_size + ctx->nal_length, data, size); ctx->sei_buffer_size += size + ctx->nal_length; } else { ctx->nb_nalus--; } *skip_nal = GF_TRUE; break; case GF_HEVC_NALU_SEI_SUFFIX: if (! ctx->is_playing) return 0; if (ctx->nosei) { *skip_nal = GF_TRUE; ctx->nb_nalus--; } else { ctx->nb_sei++; } break; /*slice_segment_layer_rbsp*/ case GF_HEVC_NALU_SLICE_STSA_N: case GF_HEVC_NALU_SLICE_STSA_R: case GF_HEVC_NALU_SLICE_RADL_R: case GF_HEVC_NALU_SLICE_RASL_R: case GF_HEVC_NALU_SLICE_RADL_N: case GF_HEVC_NALU_SLICE_RASL_N: case GF_HEVC_NALU_SLICE_TRAIL_N: case GF_HEVC_NALU_SLICE_TRAIL_R: case GF_HEVC_NALU_SLICE_TSA_N: case GF_HEVC_NALU_SLICE_TSA_R: case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_N_LP: case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: case GF_HEVC_NALU_SLICE_CRA: if (! ctx->is_playing) return 0; *is_slice = GF_TRUE; if (! *skip_nal) { switch (ctx->hevc_state->s_info.slice_type) { case GF_HEVC_SLICE_TYPE_P: if (layer_id) ctx->nb_e_p++; else ctx->nb_p++; break; case GF_HEVC_SLICE_TYPE_I: if (layer_id) ctx->nb_e_i++; else ctx->nb_i++; *is_islice = GF_TRUE; break; case GF_HEVC_SLICE_TYPE_B: if (layer_id) ctx->nb_e_b++; else ctx->nb_b++; break; } } break; case GF_HEVC_NALU_ACCESS_UNIT: ctx->nb_aud++; if (!ctx->audelim) { *skip_nal = GF_TRUE; } else if (!ctx->opid) { ctx->has_initial_aud = GF_TRUE; memcpy(ctx->init_aud, data, 3); } break; /*remove*/ case GF_HEVC_NALU_FILLER_DATA: case GF_HEVC_NALU_END_OF_SEQ: case GF_HEVC_NALU_END_OF_STREAM: *skip_nal = GF_TRUE; break; //parsing is partial, see https://github.com/DolbyLaboratories/dlb_mp4base/blob/70a2e1d4d99a8439b7b8087bf50dd503eeea2291/src/esparser/parser_hevc.c#L1233 case GF_HEVC_NALU_DV_RPU: ctx->hevc_state->dv_rpu = GF_TRUE; break; case GF_HEVC_NALU_DV_EL: ctx->hevc_state->dv_el = GF_TRUE; break; default: if (! ctx->is_playing) return 0; GF_LOG(GF_LOG_WARNING, GF_LOG_PARSER, ("[%s] NAL Unit type %d not handled - adding\n", ctx->log_name, nal_unit_type)); break; } if (*skip_nal) return res; ctx->linf[layer_id].layer_id_plus_one = layer_id + 1; if (! ctx->linf[layer_id].max_temporal_id ) ctx->linf[layer_id].max_temporal_id = temporal_id; else if (ctx->linf[layer_id].max_temporal_id < temporal_id) ctx->linf[layer_id].max_temporal_id = temporal_id; if (! ctx->linf[layer_id].min_temporal_id ) ctx->linf[layer_id].min_temporal_id = temporal_id; else if (ctx->linf[layer_id].min_temporal_id > temporal_id) ctx->linf[layer_id].min_temporal_id = temporal_id; if (ctx->max_temporal_id[layer_id] < temporal_id) ctx->max_temporal_id[layer_id] = temporal_id; if (ctx->min_layer_id > layer_id) ctx->min_layer_id = layer_id; return res; #endif // GPAC_DISABLE_HEVC } static s32 naludmx_parse_nal_vvc(GF_NALUDmxCtx *ctx, char *data, u32 size, Bool *skip_nal, Bool *is_slice, Bool *is_islice) { s32 ps_idx = 0; s32 res; u8 nal_unit_type, temporal_id, layer_id; *skip_nal = GF_FALSE; gf_bs_reassign_buffer(ctx->bs_r, data, size); res = gf_media_vvc_parse_nalu_bs(ctx->bs_r, ctx->vvc_state, &nal_unit_type, &temporal_id, &layer_id); ctx->nb_nalus++; if (res < 0) { if (res == -1) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Warning: Error parsing NAL unit\n", ctx->log_name)); } *skip_nal = GF_TRUE; } if (layer_id && ctx->nosvc) { *skip_nal = GF_TRUE; return 0; } switch (nal_unit_type) { case GF_VVC_NALU_VID_PARAM: if (ctx->novpsext) { //this may modify nal_size, but we don't use it for bitstream reading // ps_idx = gf_hevc_read_vps_ex(data, &size, ctx->hevc_state, GF_TRUE); ps_idx = ctx->vvc_state->last_parsed_vps_id; } else { ps_idx = ctx->vvc_state->last_parsed_vps_id; } if (ps_idx<0) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Error parsing Video Param Set\n", ctx->log_name)); } else { naludmx_queue_param_set(ctx, data, size, GF_VVC_NALU_VID_PARAM, ps_idx); } *skip_nal = GF_TRUE; break; case GF_VVC_NALU_SEQ_PARAM: ps_idx = ctx->vvc_state->last_parsed_sps_id; if (ps_idx<0) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Error parsing Sequence Param Set\n", ctx->log_name)); } else { naludmx_queue_param_set(ctx, data, size, GF_VVC_NALU_SEQ_PARAM, ps_idx); } *skip_nal = GF_TRUE; break; case GF_VVC_NALU_PIC_PARAM: ps_idx = ctx->vvc_state->last_parsed_pps_id; if (ps_idx<0) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Error parsing Picture Param Set\n", ctx->log_name)); } else { naludmx_queue_param_set(ctx, data, size, GF_VVC_NALU_PIC_PARAM, ps_idx); } *skip_nal = GF_TRUE; break; case GF_VVC_NALU_DEC_PARAM: ps_idx = 0; naludmx_queue_param_set(ctx, data, size, GF_VVC_NALU_DEC_PARAM, ps_idx); *skip_nal = GF_TRUE; break; case GF_VVC_NALU_APS_PREFIX: //for now we keep APS in the stream #if 0 ps_idx = ctx->vvc_state->last_parsed_aps_id; if (ps_idx<0) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Error parsing Decoder Param Set\n", ctx->log_name)); } else { naludmx_queue_param_set(ctx, data, size, GF_VVC_NALU_APS_PREFIX, ps_idx); } *skip_nal = GF_TRUE; #endif break; case GF_VVC_NALU_SEI_PREFIX: gf_media_vvc_parse_sei(data, size, ctx->vvc_state); if (!ctx->nosei) { ctx->nb_sei++; if (ctx->sei_buffer_alloc < ctx->sei_buffer_size + size + ctx->nal_length) { ctx->sei_buffer_alloc = ctx->sei_buffer_size + size + ctx->nal_length; ctx->sei_buffer = gf_realloc(ctx->sei_buffer, ctx->sei_buffer_alloc); } if (!ctx->bs_w) ctx->bs_w = gf_bs_new(ctx->sei_buffer + ctx->sei_buffer_size, ctx->nal_length + size, GF_BITSTREAM_WRITE); else gf_bs_reassign_buffer(ctx->bs_w, ctx->sei_buffer + ctx->sei_buffer_size, ctx->nal_length + size); gf_bs_write_int(ctx->bs_w, size, 8*ctx->nal_length); memcpy(ctx->sei_buffer + ctx->sei_buffer_size + ctx->nal_length, data, size); ctx->sei_buffer_size += size + ctx->nal_length; } else { ctx->nb_nalus--; } *skip_nal = GF_TRUE; break; case GF_VVC_NALU_SEI_SUFFIX: if (! ctx->is_playing) return 0; if (ctx->nosei) { *skip_nal = GF_TRUE; ctx->nb_nalus--; } else { ctx->nb_sei++; } break; case GF_VVC_NALU_PIC_HEADER: if (! ctx->is_playing) return 0; break; /*slice_segment_layer_rbsp*/ case GF_VVC_NALU_SLICE_TRAIL: case GF_VVC_NALU_SLICE_STSA: case GF_VVC_NALU_SLICE_RADL: case GF_VVC_NALU_SLICE_RASL: case GF_VVC_NALU_SLICE_IDR_W_RADL: case GF_VVC_NALU_SLICE_IDR_N_LP: case GF_VVC_NALU_SLICE_CRA: case GF_VVC_NALU_SLICE_GDR: if (! ctx->is_playing) return 0; *is_slice = GF_TRUE; if (! *skip_nal) { switch (ctx->vvc_state->s_info.slice_type) { case GF_VVC_SLICE_TYPE_P: if (layer_id) ctx->nb_e_p++; else ctx->nb_p++; break; case GF_VVC_SLICE_TYPE_I: if (layer_id) ctx->nb_e_i++; else ctx->nb_i++; *is_islice = GF_TRUE; break; case GF_VVC_SLICE_TYPE_B: if (layer_id) ctx->nb_e_b++; else ctx->nb_b++; break; case GF_VVC_SLICE_TYPE_UNKNOWN: ctx->vvc_no_stats = GF_TRUE; break; } } break; case GF_VVC_NALU_ACCESS_UNIT: ctx->nb_aud++; //no skip AUD in VVC if (!ctx->opid) { ctx->has_initial_aud = GF_TRUE; memcpy(ctx->init_aud, data, 3); } break; /*remove*/ case GF_VVC_NALU_FILLER_DATA: case GF_VVC_NALU_END_OF_SEQ: case GF_VVC_NALU_END_OF_STREAM: *skip_nal = GF_TRUE; break; case GF_VVC_NALU_OPI: if (! ctx->is_playing) return 0; break; default: if (! ctx->is_playing) return 0; GF_LOG(GF_LOG_WARNING, GF_LOG_PARSER, ("[%s] NAL Unit type %d not handled - adding\n", ctx->log_name, nal_unit_type)); break; } if (*skip_nal) return res; ctx->linf[layer_id].layer_id_plus_one = layer_id + 1; if (! ctx->linf[layer_id].max_temporal_id ) ctx->linf[layer_id].max_temporal_id = temporal_id; else if (ctx->linf[layer_id].max_temporal_id < temporal_id) ctx->linf[layer_id].max_temporal_id = temporal_id; if (! ctx->linf[layer_id].min_temporal_id ) ctx->linf[layer_id].min_temporal_id = temporal_id; else if (ctx->linf[layer_id].min_temporal_id > temporal_id) ctx->linf[layer_id].min_temporal_id = temporal_id; if (ctx->max_temporal_id[layer_id] < temporal_id) ctx->max_temporal_id[layer_id] = temporal_id; if (ctx->min_layer_id > layer_id) ctx->min_layer_id = layer_id; return res; } static s32 naludmx_parse_nal_avc(GF_NALUDmxCtx *ctx, char *data, u32 size, u32 nal_type, Bool *skip_nal, Bool *is_slice, Bool *is_islice) { s32 ps_idx = 0; s32 res = 0; gf_bs_reassign_buffer(ctx->bs_r, data, size); *skip_nal = GF_FALSE; res = gf_avc_parse_nalu(ctx->bs_r, ctx->avc_state); if (res < 0) { if (res == -1) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Warning: Error parsing NAL unit\n", ctx->log_name)); } *skip_nal = GF_TRUE; } ctx->nb_nalus++; switch (nal_type) { case GF_AVC_NALU_SVC_SUBSEQ_PARAM: case GF_AVC_NALU_SEQ_PARAM: ps_idx = ctx->avc_state->last_ps_idx; if (ps_idx<0) { if (ctx->avc_state->sps[0].profile_idc) { GF_LOG(ctx->avc_state->sps[0].profile_idc ? GF_LOG_WARNING : GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Error parsing Sequence Param Set\n", ctx->log_name)); } } else { naludmx_queue_param_set(ctx, data, size, GF_AVC_NALU_SEQ_PARAM, ps_idx); } *skip_nal = GF_TRUE; return 0; case GF_AVC_NALU_PIC_PARAM: ps_idx = ctx->avc_state->last_ps_idx; if (ps_idx<0) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Error parsing Picture Param Set\n", ctx->log_name)); } else { naludmx_queue_param_set(ctx, data, size, GF_AVC_NALU_PIC_PARAM, ps_idx); } *skip_nal = GF_TRUE; return 0; case GF_AVC_NALU_SEQ_PARAM_EXT: ps_idx = ctx->avc_state->last_ps_idx; if (ps_idx<0) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Error parsing Sequence Param Set Extension\n", ctx->log_name)); } else { naludmx_queue_param_set(ctx, data, size, GF_AVC_NALU_SEQ_PARAM_EXT, ps_idx); } *skip_nal = GF_TRUE; return 0; case GF_AVC_NALU_SEI: if (ctx->avc_state->sps_active_idx != -1) { u32 rw_sei_size, sei_size = size; if (ctx->sei_buffer_alloc < ctx->sei_buffer_size + sei_size + ctx->nal_length) { ctx->sei_buffer_alloc = ctx->sei_buffer_size + sei_size + ctx->nal_length; ctx->sei_buffer = gf_realloc(ctx->sei_buffer, ctx->sei_buffer_alloc); } if (!ctx->bs_w) ctx->bs_w = gf_bs_new(ctx->sei_buffer + ctx->sei_buffer_size, ctx->nal_length + sei_size, GF_BITSTREAM_WRITE); else gf_bs_reassign_buffer(ctx->bs_w, ctx->sei_buffer + ctx->sei_buffer_size, ctx->nal_length + sei_size); gf_bs_write_int(ctx->bs_w, sei_size, 8*ctx->nal_length); memcpy(ctx->sei_buffer + ctx->sei_buffer_size + ctx->nal_length, data, sei_size); rw_sei_size = gf_media_avc_reformat_sei(ctx->sei_buffer + ctx->sei_buffer_size + ctx->nal_length, sei_size, ctx->seirw, ctx->avc_state); if (rw_sei_size < sei_size) { gf_bs_seek(ctx->bs_w, 0); gf_bs_write_int(ctx->bs_w, rw_sei_size, 8*ctx->nal_length); } *skip_nal = GF_TRUE; ctx->sei_buffer_size += rw_sei_size + ctx->nal_length; if (ctx->nosei) { ctx->sei_buffer_size = 0; } else { ctx->nb_sei++; } } return 0; case GF_AVC_NALU_ACCESS_UNIT: ctx->nb_aud++; if (!ctx->audelim) { *skip_nal = GF_TRUE; } else if (!ctx->opid) { ctx->has_initial_aud = GF_TRUE; memcpy(ctx->init_aud, data, 2); } return 1; /*remove*/ case GF_AVC_NALU_FILLER_DATA: case GF_AVC_NALU_END_OF_SEQ: case GF_AVC_NALU_END_OF_STREAM: *skip_nal = GF_TRUE; return 0; //update stats case GF_AVC_NALU_NON_IDR_SLICE: case GF_AVC_NALU_DP_A_SLICE: case GF_AVC_NALU_DP_B_SLICE: case GF_AVC_NALU_DP_C_SLICE: case GF_AVC_NALU_IDR_SLICE: *is_slice = GF_TRUE; switch (ctx->avc_state->s_info.slice_type) { case GF_AVC_TYPE_P: case GF_AVC_TYPE2_P: ctx->nb_p++; break; case GF_AVC_TYPE_I: case GF_AVC_TYPE2_I: ctx->nb_i++; *is_islice = GF_TRUE; break; case GF_AVC_TYPE_B: case GF_AVC_TYPE2_B: ctx->nb_b++; break; case GF_AVC_TYPE_SP: case GF_AVC_TYPE2_SP: ctx->nb_sp++; break; case GF_AVC_TYPE_SI: case GF_AVC_TYPE2_SI: ctx->nb_si++; break; } break; case GF_AVC_NALU_SVC_SLICE: if (!ctx->explicit) { u32 i; for (i = 0; i < gf_list_count(ctx->pps); i ++) { GF_NALUFFParam *slc = (GF_NALUFFParam*)gf_list_get(ctx->pps, i); if (ctx->avc_state->s_info.pps && ctx->avc_state->s_info.pps->id == slc->id) { /* This PPS is used by an SVC NAL unit, it should be moved to the SVC Config Record) */ gf_list_rem(ctx->pps, i); i--; if (!ctx->pps_svc) ctx->pps_svc = gf_list_new(ctx->pps_svc); gf_list_add(ctx->pps_svc, slc); ctx->ps_modified = GF_TRUE; } } } *is_slice = GF_TRUE; //we disable temporal scalability when parsing mvc - never used and many encoders screw up POC in enhancemen if (ctx->is_mvc && (res>=0)) { res=0; ctx->avc_state->s_info.poc = ctx->last_poc; } if (ctx->avc_state->s_info.sps) { switch (ctx->avc_state->s_info.slice_type) { case GF_AVC_TYPE_P: case GF_AVC_TYPE2_P: ctx->avc_state->s_info.sps->nb_ep++; break; case GF_AVC_TYPE_I: case GF_AVC_TYPE2_I: ctx->avc_state->s_info.sps->nb_ei++; break; case GF_AVC_TYPE_B: case GF_AVC_TYPE2_B: ctx->avc_state->s_info.sps->nb_eb++; break; } } break; case GF_AVC_NALU_SLICE_AUX: *is_slice = GF_TRUE; break; } return res; } static void naldmx_switch_timestamps(GF_NALUDmxCtx *ctx, GF_FilterPacket *pck) { //input pid sets some timescale - we flushed pending data , update cts if (ctx->timescale) { u64 ts = gf_filter_pck_get_cts(pck); if (ts != GF_FILTER_NO_TS) { ctx->prev_cts = ctx->cts; ctx->cts = ts; } ts = gf_filter_pck_get_dts(pck); if (ts != GF_FILTER_NO_TS) { if (ctx->full_au_source) { ctx->prev_dts = ctx->dts; ctx->dts = ts; } else { GF_FilterClockType ck_type = gf_filter_pid_get_clock_info(ctx->ipid, NULL, NULL); if (ck_type==GF_FILTER_CLOCK_PCR_DISC) ctx->dts = ts; else if (ctx->dts<ts) ctx->dts=ts; if (!ctx->prev_dts) ctx->prev_dts = ts; else if (ctx->prev_dts != ts) { u64 diff = ts; diff -= ctx->prev_dts; if (!ctx->cur_fps.den) ctx->cur_fps.den = (u32) diff; else if (ctx->cur_fps.den > diff) ctx->cur_fps.den = (u32) diff; ctx->prev_dts = ts; } } } ctx->pck_duration = gf_filter_pck_get_duration(pck); if (ctx->src_pck) gf_filter_pck_unref(ctx->src_pck); ctx->src_pck = pck; gf_filter_pck_ref_props(&ctx->src_pck); //store framing flags. If input_is_au_start, the first NAL of the first frame beginning in this packet will //use the DTS/CTS of the input packet, otherwise we will use our internal POC recompute gf_filter_pck_get_framing(pck, &ctx->input_is_au_start, NULL); } } static void naldmx_check_timestamp_switch(GF_NALUDmxCtx *ctx, u32 *nalu_store_before, u32 bytes_drop, Bool *drop_packet, GF_FilterPacket *pck) { if (*nalu_store_before) { if (*nalu_store_before > bytes_drop) { *nalu_store_before -= bytes_drop; } else { //all data from previous frame consumed, update timestamps with info from current packet *nalu_store_before = 0; naldmx_switch_timestamps(ctx, pck); if (*drop_packet) { gf_filter_pid_drop_packet(ctx->ipid); *drop_packet = GF_FALSE; } } } } static void naldmx_bs_log(void *udta, const char *field_name, u32 nb_bits, u64 field_val, s32 idx1, s32 idx2, s32 idx3) { GF_NALUDmxCtx *ctx = (GF_NALUDmxCtx *) udta; GF_LOG(GF_LOG_DEBUG, GF_LOG_PARSER, (" %s", field_name)); if (idx1>=0) { GF_LOG(GF_LOG_DEBUG, GF_LOG_PARSER, ("_%d", idx1)); if (idx2>=0) { GF_LOG(GF_LOG_DEBUG, GF_LOG_PARSER, ("_%d", idx2)); if (idx3>=0) { GF_LOG(GF_LOG_DEBUG, GF_LOG_PARSER, ("_%d", idx3)); } } } GF_LOG(GF_LOG_DEBUG, GF_LOG_PARSER, ("=\""LLD, field_val)); if ((ctx->bsdbg==2) && ((s32) nb_bits > 1) ) GF_LOG(GF_LOG_DEBUG, GF_LOG_PARSER, ("(%u)", nb_bits)); GF_LOG(GF_LOG_DEBUG, GF_LOG_PARSER, ("\" ")); } GF_Err naludmx_process(GF_Filter *filter) { GF_NALUDmxCtx *ctx = gf_filter_get_udta(filter); GF_FilterPacket *pck; GF_Err e; u8 *start; u32 nalu_before = ctx->nb_nalus; u32 nalu_store_before = 0; s32 remain; Bool is_eos = GF_FALSE; Bool drop_packet = GF_FALSE; u64 byte_offset = GF_FILTER_NO_BO; //always reparse duration if (!ctx->file_loaded) naludmx_check_dur(filter, ctx); pck = gf_filter_pid_get_packet(ctx->ipid); if (!ctx->resume_from && !pck) { if (gf_filter_pid_is_eos(ctx->ipid)) { if (ctx->nal_store_size) { if (!ctx->is_playing) return GF_OK; start = ctx->nal_store; remain = ctx->nal_store_size; is_eos = GF_TRUE; goto naldmx_flush; } if (ctx->first_pck_in_au) { naludmx_finalize_au_flags(ctx); } //single-frame stream if (!ctx->poc_diff) ctx->poc_diff = 1; ctx->strict_poc = STRICT_POC_OFF; naludmx_enqueue_or_dispatch(ctx, NULL, GF_TRUE); if (ctx->src_pck) gf_filter_pck_unref(ctx->src_pck); ctx->src_pck = NULL; if (!ctx->opid) return GF_EOS; gf_filter_pid_set_info(ctx->opid, GF_PROP_PID_MAX_NALU_SIZE, &PROP_UINT(ctx->max_nalu_size) ); if (ctx->codecid==GF_CODECID_HEVC) { naludmx_set_hevc_oinf(ctx, ctx->max_temporal_id); naludmx_set_hevc_linf(ctx); gf_filter_pid_set_info_str(ctx->opid, "hevc:min_lid", &PROP_UINT(ctx->min_layer_id) ); } if (ctx->opid) gf_filter_pid_set_eos(ctx->opid); return GF_EOS; } return GF_OK; } if (!ctx->is_playing && ctx->opid) return GF_OK; //if we have bytes from previous packet in the header, we cannot switch timing until we know what these bytes are if (!ctx->nal_store_size) naldmx_switch_timestamps(ctx, pck); nalu_store_before = ctx->nal_store_size; if (!ctx->resume_from && pck) { u32 pck_size; const u8 *data = gf_filter_pck_get_data(pck, &pck_size); if (ctx->nal_store_alloc < ctx->nal_store_size + pck_size) { ctx->nal_store_alloc = ctx->nal_store_size + pck_size; ctx->nal_store = gf_realloc(ctx->nal_store, sizeof(char)*ctx->nal_store_alloc); if (!ctx->nal_store) { ctx->nal_store_alloc = 0; return GF_OUT_OF_MEM; } } byte_offset = gf_filter_pck_get_byte_offset(pck); if (byte_offset != GF_FILTER_NO_BO) byte_offset -= ctx->nal_store_size; memcpy(ctx->nal_store + ctx->nal_store_size, data, sizeof(char)*pck_size); ctx->nal_store_size += pck_size; drop_packet = GF_TRUE; } start = ctx->nal_store; remain = ctx->nal_store_size; if (ctx->resume_from) { if (ctx->opid && gf_filter_pid_would_block(ctx->opid)) return GF_OK; assert(ctx->resume_from < ctx->nal_store_size); start += ctx->resume_from; remain -= ctx->resume_from; ctx->resume_from = 0; if (!pck && gf_filter_pid_is_eos(ctx->ipid)) is_eos = GF_TRUE; } naldmx_flush: if (!ctx->bs_r) { ctx->bs_r = gf_bs_new(start, remain, GF_BITSTREAM_READ); #ifndef GPAC_DISABLE_LOG if (ctx->bsdbg && gf_log_tool_level_on(GF_LOG_PARSER, GF_LOG_DEBUG)) gf_bs_set_logger(ctx->bs_r, naldmx_bs_log, ctx); #endif } else { gf_bs_reassign_buffer(ctx->bs_r, start, remain); } assert(remain>=0); while (remain) { u8 *pck_data; u8 *nal_data; u32 nal_size; s32 current; Bool skip_nal = GF_FALSE; u32 sc_size=0; u32 nal_type = 0; u32 nal_ref_idc = 0; s32 next=0; u32 next_sc_size=0; s32 nal_parse_result; Bool slice_is_ref, slice_force_ref; Bool is_slice = GF_FALSE; Bool is_islice = GF_FALSE; Bool bottom_field_flag = GF_FALSE; Bool au_start; u32 avc_svc_subs_reserved = 0; u8 avc_svc_subs_priority = 0; Bool recovery_point_valid = GF_FALSE; u32 recovery_point_frame_cnt = 0; Bool bIntraSlice = GF_FALSE; GF_FilterSAPType au_sap_type = GF_FILTER_SAP_NONE; Bool slice_is_b = GF_FALSE; Bool check_dep = GF_FALSE; s32 slice_poc = 0; //not enough bytes to parse start code + nal hdr if (!is_eos && (remain<6)) { break; } //locate next start code current = gf_media_nalu_next_start_code(start, remain, &sc_size); if (current == remain) current = -1; //no start code: if eos or full AU dispatch mode, send remaining otherwise gather if (current<0) { if (!is_eos && !ctx->full_au_source) { break; } e = naludmx_realloc_last_pck(ctx, (u32) remain, &pck_data); if (e==GF_OK) memcpy(pck_data, start, (size_t) remain); remain = 0; break; } assert(current>=0); //skip if no output pid if (!ctx->opid && current) { assert(remain>=current); assert((s32) current >= 0); start += current; remain -= current; current = 0; } //dispatch remaining bytes if (current>0) { //flush remaining bytes in NAL if (gf_list_count(ctx->pck_queue)) { e = naludmx_realloc_last_pck(ctx, current, &pck_data); if (e==GF_OK) { memcpy(pck_data, start, current); } } assert(remain>=current); start += current; remain -= current; naldmx_check_timestamp_switch(ctx, &nalu_store_before, current, &drop_packet, pck); } if (!remain) break; //not enough bytes to parse start code + nal hdr if (!is_eos && (remain<6)) { break; } nal_data = start + sc_size; nal_size = remain - sc_size; //figure out which nal we need to completely load if (ctx->codecid==GF_CODECID_HEVC) { nal_type = nal_data[0]; nal_type = (nal_type & 0x7E) >> 1; switch (nal_type) { case GF_HEVC_NALU_VID_PARAM: case GF_HEVC_NALU_SEQ_PARAM: case GF_HEVC_NALU_PIC_PARAM: case GF_HEVC_NALU_SEI_PREFIX: case GF_HEVC_NALU_SEI_SUFFIX: break; case GF_HEVC_NALU_SLICE_TRAIL_N: case GF_HEVC_NALU_SLICE_TSA_N: case GF_HEVC_NALU_SLICE_STSA_N: case GF_HEVC_NALU_SLICE_RADL_N: case GF_HEVC_NALU_SLICE_RASL_N: case GF_HEVC_NALU_SLICE_RSV_VCL_N10: case GF_HEVC_NALU_SLICE_RSV_VCL_N12: case GF_HEVC_NALU_SLICE_RSV_VCL_N14: check_dep = GF_TRUE; break; default: if (nal_type<GF_HEVC_NALU_VID_PARAM) nal_ref_idc = GF_TRUE; break; } } else if (ctx->codecid==GF_CODECID_VVC) { nal_type = nal_data[1]>>3; switch (nal_type) { case GF_VVC_NALU_OPI: case GF_VVC_NALU_DEC_PARAM: case GF_VVC_NALU_VID_PARAM: case GF_VVC_NALU_SEQ_PARAM: case GF_VVC_NALU_PIC_PARAM: case GF_VVC_NALU_SEI_PREFIX: case GF_VVC_NALU_SEI_SUFFIX: case GF_VVC_NALU_APS_PREFIX: case GF_VVC_NALU_APS_SUFFIX: case GF_VVC_NALU_PIC_HEADER: break; case GF_VVC_NALU_SLICE_TRAIL: case GF_VVC_NALU_SLICE_STSA: case GF_VVC_NALU_SLICE_RADL: case GF_VVC_NALU_SLICE_RASL: case GF_VVC_NALU_SLICE_IDR_W_RADL: case GF_VVC_NALU_SLICE_IDR_N_LP: case GF_VVC_NALU_SLICE_CRA: case GF_VVC_NALU_SLICE_GDR: if (ctx->deps) { check_dep = GF_TRUE; } break; default: if (nal_type<GF_HEVC_NALU_VID_PARAM) nal_ref_idc = GF_TRUE; break; } } else { nal_type = nal_data[0] & 0x1F; nal_ref_idc = (nal_data[0] & 0x60) >> 5; } //locate next NAL start next = gf_media_nalu_next_start_code(nal_data, nal_size, &next_sc_size); if (!is_eos && (next == nal_size) && !ctx->full_au_source) { next = -1; } //next nal start not found, wait if (next<0) { break; } //this is our exact NAL size, without start code nal_size = next; if (ctx->codecid==GF_CODECID_HEVC) { nal_parse_result = naludmx_parse_nal_hevc(ctx, nal_data, nal_size, &skip_nal, &is_slice, &is_islice); } else if (ctx->codecid==GF_CODECID_VVC) { nal_parse_result = naludmx_parse_nal_vvc(ctx, nal_data, nal_size, &skip_nal, &is_slice, &is_islice); } else { nal_parse_result = naludmx_parse_nal_avc(ctx, nal_data, nal_size, nal_type, &skip_nal, &is_slice, &is_islice); } //dispatch right away if analyze if (ctx->analyze) { skip_nal = GF_FALSE; ctx->sei_buffer_size = 0; } //new frame - if no slices, we detected the new frame on AU delimiter, don't flush new frame ! if ((nal_parse_result>0) && !ctx->first_slice_in_au) { //new frame - we flush later on naludmx_finalize_au_flags(ctx); ctx->has_islice = GF_FALSE; ctx->first_slice_in_au = GF_TRUE; ctx->sei_recovery_frame_count = -1; ctx->au_sap = GF_FILTER_SAP_NONE; ctx->bottom_field_flag = GF_FALSE; } naludmx_check_pid(filter, ctx); if (!ctx->opid) skip_nal = GF_TRUE; if (skip_nal) { nal_size += sc_size; assert((u32) remain >= nal_size); start += nal_size; remain -= nal_size; naldmx_check_timestamp_switch(ctx, &nalu_store_before, nal_size, &drop_packet, pck); continue; } if (!ctx->is_playing) { ctx->resume_from = (u32) (start - ctx->nal_store); assert(ctx->resume_from<=ctx->nal_store_size); GF_LOG(GF_LOG_DEBUG, GF_LOG_PARSER, ("[%s] not yet playing\n", ctx->log_name)); if (drop_packet) gf_filter_pid_drop_packet(ctx->ipid); return GF_OK; } if (ctx->in_seek) { u64 nb_frames_at_seek = (u64) (ctx->start_range * ctx->cur_fps.num); if (ctx->cts + ctx->cur_fps.den >= nb_frames_at_seek) { //u32 samples_to_discard = (ctx->cts + ctx->dts_inc) - nb_samples_at_seek; ctx->in_seek = GF_FALSE; } } if (nal_parse_result<0) { if (byte_offset != GF_FILTER_NO_BO) { u64 bo = byte_offset; bo += (start - ctx->nal_store); GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Error parsing NAL Unit %d (byte offset "LLU" size %d type %d frame %d last POC %d) - skipping\n", ctx->log_name, ctx->nb_nalus, bo, nal_size, nal_type, ctx->nb_frames, ctx->last_poc)); } else { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[%s] Error parsing NAL Unit %d (size %d type %d frame %d last POC %d) - skipping\n", ctx->log_name, ctx->nb_nalus, nal_size, nal_type, ctx->nb_frames, ctx->last_poc)); } nal_size += sc_size; assert((u32) remain >= nal_size); start += nal_size; remain -= nal_size; naldmx_check_timestamp_switch(ctx, &nalu_store_before, nal_size, &drop_packet, pck); continue; } if (check_dep) { if ((ctx->codecid==GF_CODECID_HEVC) && ctx->hevc_state->s_info.sps) { HEVC_VPS *vps; u32 temporal_id = nal_data[1] & 0x7; vps = & ctx->hevc_state->vps[ctx->hevc_state->s_info.sps->vps_id]; if (temporal_id + 1 < vps->max_sub_layers) { nal_ref_idc = GF_TRUE; } } else if (ctx->codecid==GF_CODECID_VVC) { if (ctx->vvc_state->s_info.non_ref_pic) { nal_ref_idc = GF_FALSE; } else { //todo nal_ref_idc = GF_TRUE; } } } if (is_islice) ctx->has_islice = GF_TRUE; //store all variables needed to compute POC/CTS and sample SAP and recovery info if (ctx->codecid==GF_CODECID_HEVC) { #ifndef GPAC_DISABLE_HEVC slice_is_ref = gf_hevc_slice_is_IDR(ctx->hevc_state); recovery_point_valid = ctx->hevc_state->sei.recovery_point.valid; recovery_point_frame_cnt = ctx->hevc_state->sei.recovery_point.frame_cnt; bIntraSlice = gf_hevc_slice_is_intra(ctx->hevc_state); au_sap_type = GF_FILTER_SAP_NONE; if (gf_hevc_slice_is_IDR(ctx->hevc_state)) { au_sap_type = GF_FILTER_SAP_1; } else { switch (ctx->hevc_state->s_info.nal_unit_type) { case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: au_sap_type = GF_FILTER_SAP_3; break; case GF_HEVC_NALU_SLICE_BLA_N_LP: au_sap_type = GF_FILTER_SAP_1; break; case GF_HEVC_NALU_SLICE_CRA: au_sap_type = GF_FILTER_SAP_3; break; } } slice_poc = ctx->hevc_state->s_info.poc; /*need to store TS offsets*/ switch (ctx->hevc_state->s_info.slice_type) { case GF_AVC_TYPE_B: case GF_AVC_TYPE2_B: slice_is_b = GF_TRUE; break; } #endif // GPAC_DISABLE_HEVC } else if (ctx->codecid==GF_CODECID_VVC) { slice_is_ref = gf_media_vvc_slice_is_ref(ctx->vvc_state); recovery_point_valid = ctx->vvc_state->s_info.recovery_point_valid; recovery_point_frame_cnt = ctx->vvc_state->s_info.gdr_recovery_count; // commented, set below // if (ctx->vvc_state->s_info.irap_or_gdr_pic && !ctx->vvc_state->s_info.gdr_pic) // bIntraSlice = GF_TRUE; //gf_hevc_slice_is_intra(ctx->hevc_state); au_sap_type = GF_FILTER_SAP_NONE; if (ctx->vvc_state->s_info.irap_or_gdr_pic && !ctx->vvc_state->s_info.gdr_pic) { au_sap_type = GF_FILTER_SAP_1; bIntraSlice = GF_TRUE; slice_is_ref = 1; } else { switch (ctx->vvc_state->s_info.nal_unit_type) { case GF_VVC_NALU_SLICE_IDR_N_LP: au_sap_type = GF_FILTER_SAP_1; slice_is_ref = 1; bIntraSlice = GF_TRUE; break; case GF_VVC_NALU_SLICE_CRA: au_sap_type = GF_FILTER_SAP_3; bIntraSlice = GF_TRUE; break; case GF_VVC_NALU_SLICE_IDR_W_RADL: bIntraSlice = GF_TRUE; if (ctx->vvc_state->s_info.gdr_pic) { au_sap_type = GF_FILTER_SAP_3; } else { au_sap_type = GF_FILTER_SAP_1; slice_is_ref = 1; } break; } } slice_poc = ctx->vvc_state->s_info.poc; /*need to store TS offsets*/ switch (ctx->vvc_state->s_info.slice_type) { case GF_AVC_TYPE_B: case GF_AVC_TYPE2_B: slice_is_b = GF_TRUE; break; } } else { /*fixme - we need finer grain for priority*/ if ((nal_type==GF_AVC_NALU_SVC_PREFIX_NALU) || (nal_type==GF_AVC_NALU_SVC_SLICE)) { if (!ctx->is_mvc) { unsigned char *p = (unsigned char *) start; // RefPicFlag avc_svc_subs_reserved |= (p[0] & 0x60) ? 0x80000000 : 0; // RedPicFlag TODO: not supported, would require to parse NAL unit payload avc_svc_subs_reserved |= (0) ? 0x40000000 : 0; // VclNALUnitFlag avc_svc_subs_reserved |= (1<=nal_type && nal_type<=5) || (nal_type==GF_AVC_NALU_SVC_PREFIX_NALU) || (nal_type==GF_AVC_NALU_SVC_SLICE) ? 0x20000000 : 0; // use values of IdrFlag and PriorityId directly from SVC extension header avc_svc_subs_reserved |= p[1] << 16; // use values of DependencyId and QualityId directly from SVC extension header avc_svc_subs_reserved |= p[2] << 8; // use values of TemporalId and UseRefBasePicFlag directly from SVC extension header avc_svc_subs_reserved |= p[3] & 0xFC; // StoreBaseRepFlag TODO: SVC FF mentions a store_base_rep_flag which cannot be found in SVC spec avc_svc_subs_reserved |= (0) ? 0x00000002 : 0; // priority_id (6 bits) in SVC has inverse meaning -> lower value means higher priority - invert it and scale it to 8 bits avc_svc_subs_priority = (63 - (p[1] & 0x3F)) << 2; } if (nal_type==GF_AVC_NALU_SVC_PREFIX_NALU) { if (ctx->svc_prefix_buffer_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[%s] broken bitstream, two consecutive SVC prefix NALU without SVC slice in-between\n", ctx->log_name)); ctx->svc_prefix_buffer_size = 0; } /* remember reserved and priority value */ ctx->svc_nalu_prefix_reserved = avc_svc_subs_reserved; ctx->svc_nalu_prefix_priority = avc_svc_subs_priority; ctx->svc_prefix_buffer_size = nal_size; if (ctx->svc_prefix_buffer_size > ctx->svc_prefix_buffer_alloc) { ctx->svc_prefix_buffer_alloc = ctx->svc_prefix_buffer_size; ctx->svc_prefix_buffer = gf_realloc(ctx->svc_prefix_buffer, ctx->svc_prefix_buffer_size); } memcpy(ctx->svc_prefix_buffer, start+sc_size, ctx->svc_prefix_buffer_size); assert( (u32) remain >= sc_size + nal_size); start += sc_size + nal_size; remain -= sc_size + nal_size; continue; } } else if (is_slice) { // RefPicFlag avc_svc_subs_reserved |= (start[0] & 0x60) ? 0x80000000 : 0; // VclNALUnitFlag avc_svc_subs_reserved |= (1<=nal_type && nal_type<=5) || (nal_type==GF_AVC_NALU_SVC_PREFIX_NALU) || (nal_type==GF_AVC_NALU_SVC_SLICE) ? 0x20000000 : 0; avc_svc_subs_priority = 0; } if (is_slice && ctx->avc_state->s_info.field_pic_flag) { ctx->is_paff = GF_TRUE; bottom_field_flag = ctx->avc_state->s_info.bottom_field_flag; } slice_is_ref = (ctx->avc_state->s_info.nal_unit_type==GF_AVC_NALU_IDR_SLICE) ? GF_TRUE : GF_FALSE; recovery_point_valid = ctx->avc_state->sei.recovery_point.valid; recovery_point_frame_cnt = ctx->avc_state->sei.recovery_point.frame_cnt; bIntraSlice = gf_media_avc_slice_is_intra(ctx->avc_state); au_sap_type = GF_FILTER_SAP_NONE; if (ctx->avc_state->s_info.nal_unit_type == GF_AVC_NALU_IDR_SLICE) au_sap_type = GF_FILTER_SAP_1; slice_poc = ctx->avc_state->s_info.poc; /*need to store TS offsets*/ switch (ctx->avc_state->s_info.slice_type) { case GF_AVC_TYPE_B: case GF_AVC_TYPE2_B: slice_is_b = GF_TRUE; break; } } if (is_slice) { Bool first_in_au = ctx->first_slice_in_au; if (slice_is_ref) ctx->nb_idr++; slice_force_ref = GF_FALSE; /*we only indicate TRUE IDRs for sync samples (cf AVC file format spec). SEI recovery should be used to build sampleToGroup & RollRecovery tables*/ if (ctx->first_slice_in_au) { ctx->first_slice_in_au = GF_FALSE; if (recovery_point_valid) { ctx->sei_recovery_frame_count = recovery_point_frame_cnt; /*we allow to mark I-frames as sync on open-GOPs (with sei_recovery_frame_count=0) when forcing sync even when the SEI RP is not available*/ if (!recovery_point_frame_cnt && bIntraSlice) { ctx->has_islice = 1; if (ctx->use_opengop_gdr == 1) { ctx->use_opengop_gdr = 2; /*avoid message flooding*/ GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[%s] No valid SEI Recovery Point found although needed - forcing\n", ctx->log_name)); } } if (ctx->codecid==GF_CODECID_HEVC) { ctx->hevc_state->sei.recovery_point.valid = 0; } else if (ctx->codecid==GF_CODECID_VVC) { ctx->vvc_state->s_info.recovery_point_valid = 0; } else { ctx->avc_state->sei.recovery_point.valid = 0; } if (bIntraSlice && ctx->force_sync && (ctx->sei_recovery_frame_count==0)) slice_force_ref = GF_TRUE; } ctx->au_sap = au_sap_type; ctx->bottom_field_flag = bottom_field_flag; } if (slice_poc < ctx->poc_shift) { u32 i, count = gf_list_count(ctx->pck_queue); for (i=0; i<count; i++) { u64 dts, cts; GF_FilterPacket *q_pck = gf_list_get(ctx->pck_queue, i); assert(q_pck); dts = gf_filter_pck_get_dts(q_pck); if (dts == GF_FILTER_NO_TS) continue; cts = gf_filter_pck_get_cts(q_pck); cts += ctx->poc_shift; cts -= slice_poc; gf_filter_pck_set_cts(q_pck, cts); } ctx->poc_shift = slice_poc; } /*if #pics, compute smallest POC increase*/ if (slice_poc != ctx->last_poc) { s32 pdiff = ABS(ctx->last_poc - slice_poc); if ((slice_poc < 0) && !ctx->last_poc) ctx->poc_diff = 0; if (!ctx->poc_diff || (ctx->poc_diff > (s32) pdiff ) ) { ctx->poc_diff = pdiff; ctx->poc_probe_done = GF_FALSE; } else if (first_in_au) { //second frame with the same poc diff, we should be able to properly recompute CTSs ctx->poc_probe_done = GF_TRUE; } ctx->last_poc = slice_poc; } GF_LOG(GF_LOG_DEBUG, GF_LOG_PARSER, ("[%s] POC is %d - min poc diff %d - slice is ref %d\n", ctx->log_name, slice_poc, ctx->poc_diff, slice_is_ref)); /*ref slice, reset poc*/ if (slice_is_ref) { if (first_in_au) { Bool temp_poc_diff = GF_FALSE; //two consecutive IDRs, force poc_diff to 1 if 0 (when we have intra-only) to force frame dispatch if (ctx->last_frame_is_idr && !ctx->poc_diff) { temp_poc_diff = GF_TRUE; ctx->poc_diff = 1; } //new ref frame, dispatch all pending packets naludmx_enqueue_or_dispatch(ctx, NULL, GF_TRUE); ctx->max_last_poc = ctx->last_poc = ctx->max_last_b_poc = 0; ctx->poc_shift = 0; //force probing of POC diff, this will prevent dispatching frames with wrong CTS until we have a clue of min poc_diff used ctx->poc_probe_done = 0; ctx->last_frame_is_idr = GF_TRUE; if (temp_poc_diff) ctx->poc_diff = 0; } } /*forced ref slice*/ else if (slice_force_ref) { ctx->last_frame_is_idr = GF_FALSE; if (first_in_au) { //new ref frame, dispatch all pending packets naludmx_enqueue_or_dispatch(ctx, NULL, GF_TRUE); /*adjust POC shift as sample will now be marked as sync, so we must store poc as if IDR (eg POC=0) for our CTS offset computing to be correct*/ ctx->poc_shift = slice_poc; //force probing of POC diff, this will prevent dispatching frames with wrong CTS until we have a clue of min poc_diff used ctx->poc_probe_done = 0; } } /*strictly less - this is a new P slice*/ else if (ctx->max_last_poc < ctx->last_poc) { ctx->max_last_b_poc = 0; ctx->max_last_poc = ctx->last_poc; ctx->last_frame_is_idr = GF_FALSE; } /*stricly greater*/ else if (slice_is_b && (ctx->max_last_poc > ctx->last_poc)) { ctx->last_frame_is_idr = GF_FALSE; if (!ctx->max_last_b_poc) { ctx->max_last_b_poc = ctx->last_poc; } /*if same poc than last max, this is a B-slice*/ else if (ctx->last_poc > ctx->max_last_b_poc) { ctx->max_last_b_poc = ctx->last_poc; } /*otherwise we had a B-slice reference: do nothing*/ } else { ctx->last_frame_is_idr = GF_FALSE; } if (ctx->deps) { if (nal_ref_idc) { ctx->has_ref_slices = GF_TRUE; } if ((ctx->codecid==GF_CODECID_AVC) && (ctx->avc_state->s_info.redundant_pic_cnt) ) { ctx->has_redundant = GF_TRUE; } } } au_start = ctx->first_pck_in_au ? GF_FALSE : GF_TRUE; if (ctx->has_initial_aud) { u32 audelim_size = (ctx->codecid!=GF_CODECID_AVC) ? 3 : 2; /*dst_pck = */naludmx_start_nalu(ctx, audelim_size, GF_FALSE, &au_start, &pck_data); memcpy(pck_data + ctx->nal_length , ctx->init_aud, audelim_size); ctx->has_initial_aud = GF_FALSE; if (ctx->subsamples) { naludmx_add_subsample(ctx, audelim_size, avc_svc_subs_priority, avc_svc_subs_reserved); } } if (ctx->sei_buffer_size) { //sei buffer is already nal size prefixed /*dst_pck = */naludmx_start_nalu(ctx, ctx->sei_buffer_size, GF_TRUE, &au_start, &pck_data); memcpy(pck_data, ctx->sei_buffer, ctx->sei_buffer_size); if (ctx->subsamples) { naludmx_add_subsample(ctx, ctx->sei_buffer_size - ctx->nal_length, avc_svc_subs_priority, avc_svc_subs_reserved); } ctx->sei_buffer_size = 0; } if (ctx->svc_prefix_buffer_size) { /*dst_pck = */naludmx_start_nalu(ctx, ctx->svc_prefix_buffer_size, GF_FALSE, &au_start, &pck_data); memcpy(pck_data + ctx->nal_length, ctx->svc_prefix_buffer, ctx->svc_prefix_buffer_size); if (ctx->subsamples) { naludmx_add_subsample(ctx, ctx->svc_prefix_buffer_size, ctx->svc_nalu_prefix_priority, ctx->svc_nalu_prefix_reserved); } ctx->svc_prefix_buffer_size = 0; } //nalu size field /*dst_pck = */naludmx_start_nalu(ctx, (u32) nal_size, GF_FALSE, &au_start, &pck_data); pck_data += ctx->nal_length; //add subsample info before touching the size if (ctx->subsamples) { naludmx_add_subsample(ctx, (u32) nal_size, avc_svc_subs_priority, avc_svc_subs_reserved); } //bytes only come from the data packet memcpy(pck_data, nal_data, (size_t) nal_size); nal_size += sc_size; start += nal_size; remain -= nal_size; naldmx_check_timestamp_switch(ctx, &nalu_store_before, nal_size, &drop_packet, pck); //don't demux too much of input, abort when we would block. This avoid dispatching //a huge number of frames in a single call if (remain && gf_filter_pid_would_block(ctx->opid)) { ctx->resume_from = (u32) (start - ctx->nal_store); assert(ctx->resume_from <= ctx->nal_store_size); assert(ctx->resume_from == ctx->nal_store_size - remain); if (drop_packet) gf_filter_pid_drop_packet(ctx->ipid); return GF_OK; } } if (remain) { if (is_eos && (remain == ctx->nal_store_size)) { GF_LOG(GF_LOG_WARNING, GF_LOG_PARSER, ("[%s] Incomplete last NAL and eos, discarding\n", ctx->log_name)); remain = 0; } else { assert((u32) remain<=ctx->nal_store_size); memmove(ctx->nal_store, start, remain); } } ctx->nal_store_size = remain; if (drop_packet) gf_filter_pid_drop_packet(ctx->ipid); if (is_eos) return naludmx_process(filter); if ((ctx->nb_nalus>nalu_before) && gf_filter_reporting_enabled(filter)) { char szStatus[1024]; sprintf(szStatus, "%s %dx%d % 10d NALU % 8d I % 8d P % 8d B % 8d SEI", ctx->log_name, ctx->width, ctx->height, ctx->nb_nalus, ctx->nb_i, ctx->nb_p, ctx->nb_b, ctx->nb_sei); gf_filter_update_status(filter, -1, szStatus); } if (ctx->full_au_source && ctx->poc_probe_done) { if (ctx->first_pck_in_au) naludmx_finalize_au_flags(ctx); naludmx_enqueue_or_dispatch(ctx, NULL, GF_TRUE); } return GF_OK; } static GF_Err naludmx_initialize(GF_Filter *filter) { GF_NALUDmxCtx *ctx = gf_filter_get_udta(filter); ctx->sps = gf_list_new(); ctx->pps = gf_list_new(); switch (ctx->nal_length) { case 1: ctx->max_nalu_size_allowed = 0xFF; break; case 2: ctx->max_nalu_size_allowed = 0xFFFF; break; case 4: ctx->max_nalu_size_allowed = 0xFFFFFFFF; break; case 0: ctx->max_nalu_size_allowed = 0xFFFFFFFF; ctx->nal_length = 4; ctx->nal_adjusted = GF_TRUE; break; default: GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[%s] NAL size length %d is not allowed, defaulting to 4 bytes\n", ctx->log_name)); ctx->max_nalu_size_allowed = 0xFFFFFFFF; ctx->nal_length = 4; break; } return GF_OK; } static void naludmx_del_param_list(GF_List *ps) { if (!ps) return; while (gf_list_count(ps)) { GF_NALUFFParam *sl = gf_list_pop_back(ps); if (sl->data) gf_free(sl->data); gf_free(sl); } gf_list_del(ps); } static void naludmx_log_stats(GF_NALUDmxCtx *ctx) { u32 i, count; const char *msg_import; u32 nb_frames = 0; if (ctx->cur_fps.den) nb_frames = (u32) (ctx->dts / ctx->cur_fps.den); if (ctx->idur.den && ctx->idur.num) { GF_LOG(GF_LOG_INFO, GF_LOG_AUTHOR, ("%s duration specified at import time, may have parsed more frames than imported\n", ctx->log_name)); msg_import = "parsed"; } else { msg_import = "Import results:"; } if (ctx->nb_si || ctx->nb_sp) { GF_LOG(GF_LOG_INFO, GF_LOG_AUTHOR, ("%s %s %d frames (%d NALUs) - Slices: %d I %d P %d B %d SP %d SI - %d SEI - %d IDR\n", ctx->log_name, msg_import, nb_frames, ctx->nb_nalus, ctx->nb_i, ctx->nb_p, ctx->nb_b, ctx->nb_sp, ctx->nb_si, ctx->nb_sei, ctx->nb_idr )); } else if (ctx->vvc_no_stats) { GF_LOG(GF_LOG_INFO, GF_LOG_AUTHOR, ("%s %s %d samples (%d NALUs) - %d SEI - %d IDR\n", ctx->log_name, msg_import, nb_frames, ctx->nb_nalus, ctx->nb_sei, ctx->nb_idr)); } else { GF_LOG(GF_LOG_INFO, GF_LOG_AUTHOR, ("%s %s %d samples (%d NALUs) - Slices: %d I %d P %d B - %d SEI - %d IDR\n", ctx->log_name, msg_import, nb_frames, ctx->nb_nalus, ctx->nb_i, ctx->nb_p, ctx->nb_b, ctx->nb_sei, ctx->nb_idr)); } if (ctx->codecid==GF_CODECID_AVC) { count = gf_list_count(ctx->sps); for (i=0; i<count; i++) { AVC_SPS *sps; GF_NALUFFParam *svcc = (GF_NALUFFParam*)gf_list_get(ctx->sps, i); sps = & ctx->avc_state->sps[svcc->id]; if (sps->nb_ei || sps->nb_ep) { GF_LOG(GF_LOG_INFO, GF_LOG_AUTHOR, ("%s SVC (SSPS ID %d, %dx%d) %s Slices: %d I %d P %d B\n", ctx->log_name, svcc->id - GF_SVC_SSPS_ID_SHIFT, sps->width, sps->height, msg_import, sps->nb_ei, sps->nb_ep, sps->nb_eb )); } } } else if (ctx->nb_e_i || ctx->nb_e_p || ctx->nb_e_b) { GF_LOG(GF_LOG_INFO, GF_LOG_AUTHOR, ("%s L-HEVC %s Slices: %d I %d P %d B\n", ctx->log_name, msg_import, ctx->nb_e_i, ctx->nb_e_p, ctx->nb_e_b )); } if (ctx->max_total_delay>1) { GF_LOG(GF_LOG_INFO, GF_LOG_AUTHOR, ("%s Stream uses forward prediction - stream CTS offset: %d frames\n", ctx->log_name, ctx->max_total_delay)); } if (!ctx->nal_adjusted) { if ((ctx->max_nalu_size < 0xFF) && (ctx->nal_length>1) ){ GF_LOG(GF_LOG_INFO, GF_LOG_AUTHOR, ("%s Max NALU size is %d - stream could be optimized by setting nal_length=1\n", ctx->log_name, ctx->max_nalu_size)); } else if ((ctx->max_nalu_size < 0xFFFF) && (ctx->nal_length>2) ){ GF_LOG(GF_LOG_INFO, GF_LOG_AUTHOR, ("%s Max NALU size is %d - stream could be optimized by setting nal_length=2\n", ctx->log_name, ctx->max_nalu_size)); } } } static void naludmx_finalize(GF_Filter *filter) { GF_NALUDmxCtx *ctx = gf_filter_get_udta(filter); if (ctx->importer) naludmx_log_stats(ctx); if (ctx->bs_r) gf_bs_del(ctx->bs_r); if (ctx->bs_w) gf_bs_del(ctx->bs_w); if (ctx->indexes) gf_free(ctx->indexes); if (ctx->nal_store) gf_free(ctx->nal_store); if (ctx->pck_queue) { while (gf_list_count(ctx->pck_queue)) { GF_FilterPacket *pck = gf_list_pop_back(ctx->pck_queue); gf_filter_pck_discard(pck); } gf_list_del(ctx->pck_queue); } if (ctx->sei_buffer) gf_free(ctx->sei_buffer); if (ctx->svc_prefix_buffer) gf_free(ctx->svc_prefix_buffer); if (ctx->subsamp_buffer) gf_free(ctx->subsamp_buffer); if (ctx->src_pck) gf_filter_pck_unref(ctx->src_pck); ctx->src_pck = NULL; naludmx_del_param_list(ctx->sps); naludmx_del_param_list(ctx->pps); naludmx_del_param_list(ctx->vps); naludmx_del_param_list(ctx->sps_ext); naludmx_del_param_list(ctx->pps_svc); naludmx_del_param_list(ctx->vvc_aps_pre); naludmx_del_param_list(ctx->vvc_dci); if (ctx->avc_state) gf_free(ctx->avc_state); if (ctx->hevc_state) gf_free(ctx->hevc_state); if (ctx->vvc_state) gf_free(ctx->vvc_state); } static const char *naludmx_probe_data(const u8 *data, u32 size, GF_FilterProbeScore *score) { u32 sc, sc_size; u32 not_hevc=0; u32 not_avc=0; u32 not_vvc=0; u32 nb_hevc=0; u32 nb_avc=0; u32 nb_vvc=0; u32 nb_nalus=0; u32 nb_hevc_zero=0; u32 nb_avc_zero=0; u32 nb_vvc_zero=0; u32 nb_sps_hevc=0,nb_pps_hevc=0,nb_vps_hevc=0; u32 nb_sps_avc=0,nb_pps_avc=0; u32 nb_sps_vvc=0,nb_pps_vvc=0,nb_vps_vvc=0; while (size>3) { u32 nal_type=0; sc = gf_media_nalu_next_start_code(data, size, &sc_size); if (!sc_size) break; data += sc + sc_size; if (size <= sc + sc_size) break; size -= sc + sc_size; if (data[0] & 0x80) { not_avc++; not_hevc++; not_vvc++; continue; } nb_nalus++; nal_type = (data[0] & 0x7E) >> 1; if (nal_type<=40) { nb_hevc++; switch (nal_type) { case GF_HEVC_NALU_PIC_PARAM: if (nb_sps_hevc) nb_pps_hevc++; break; case GF_HEVC_NALU_SEQ_PARAM: nb_sps_hevc++; break; case GF_HEVC_NALU_VID_PARAM: nb_vps_hevc++; break; case 0: nb_hevc_zero++; break; } } else { not_hevc++; } nal_type = data[0] & 0x1F; if (nal_type && nal_type<=24) { nb_avc++; switch (nal_type) { case GF_AVC_NALU_PIC_PARAM: if (nb_sps_avc) nb_pps_avc++; break; case GF_AVC_NALU_SEQ_PARAM: nb_sps_avc++; break; case 0: nb_avc_zero++; break; } } else { not_avc++; } //check vvc - 2nd bit reserved to 0 if (data[0] & 0x40) { not_vvc++; continue; } nal_type = data[1] >> 3; if (nal_type>31) { not_vvc++; continue; } nb_vvc++; switch (nal_type) { case GF_VVC_NALU_PIC_PARAM: if (nb_sps_vvc) nb_pps_vvc++; break; case GF_VVC_NALU_SEQ_PARAM: nb_sps_vvc++; break; case GF_VVC_NALU_VID_PARAM: nb_vps_vvc++; break; case 0: nb_vvc_zero++; break; } } if (!nb_sps_avc || !nb_pps_avc) nb_avc=0; if (!nb_sps_hevc || !nb_pps_hevc || !nb_vps_hevc) nb_hevc=0; if (!nb_sps_vvc || !nb_pps_vvc || !nb_vps_vvc) nb_vvc=0; if (not_avc) nb_avc=0; if (not_hevc) nb_hevc=0; if (not_vvc) nb_vvc=0; if (not_avc && not_hevc && not_vvc) return NULL; if (nb_avc==nb_avc_zero) nb_avc=0; if (nb_hevc==nb_hevc_zero) nb_hevc=0; if (nb_vvc==nb_vvc_zero) nb_vvc=0; if (!nb_hevc && !nb_avc && !nb_vvc) return NULL; *score = GF_FPROBE_SUPPORTED; if (!nb_hevc) return (nb_vvc>nb_avc) ? "video/vvc" : "video/avc"; if (!nb_avc) return (nb_vvc>nb_hevc) ? "video/vvc" : "video/hevc"; if (!nb_vvc) return (nb_avc>nb_hevc) ? "video/avc" : "video/hevc"; if ((nb_hevc>nb_avc) && (nb_hevc>nb_vvc)) return "video/hevc"; if ((nb_vvc>nb_avc) && (nb_vvc>nb_hevc)) return "video/vvc"; return "video/avc"; } static const GF_FilterCapability NALUDmxCaps[] = { CAP_UINT(GF_CAPS_INPUT, GF_PROP_PID_STREAM_TYPE, GF_STREAM_FILE), CAP_STRING(GF_CAPS_INPUT, GF_PROP_PID_FILE_EXT, "264|h264|26L|h26L|h26l|avc|svc|mvc|hevc|hvc|265|h265|shvc|lvhc|mhvc|266|vvc|lvvc"), CAP_STRING(GF_CAPS_INPUT, GF_PROP_PID_MIME, "video/avc|video/h264|video/svc|video/mvc|video/hevc|video/lhvc|video/shvc|video/mhvc|video/vvc"), CAP_UINT(GF_CAPS_OUTPUT_STATIC, GF_PROP_PID_STREAM_TYPE, GF_STREAM_VISUAL), CAP_UINT(GF_CAPS_OUTPUT_STATIC, GF_PROP_PID_CODECID, GF_CODECID_AVC), CAP_UINT(GF_CAPS_OUTPUT_STATIC, GF_PROP_PID_CODECID, GF_CODECID_AVC_PS), CAP_UINT(GF_CAPS_OUTPUT_STATIC, GF_PROP_PID_CODECID, GF_CODECID_SVC), CAP_UINT(GF_CAPS_OUTPUT_STATIC, GF_PROP_PID_CODECID, GF_CODECID_MVC), CAP_UINT(GF_CAPS_OUTPUT_STATIC, GF_PROP_PID_CODECID, GF_CODECID_HEVC), CAP_UINT(GF_CAPS_OUTPUT_STATIC, GF_PROP_PID_CODECID, GF_CODECID_LHVC), CAP_UINT(GF_CAPS_OUTPUT_STATIC, GF_PROP_PID_CODECID, GF_CODECID_VVC), CAP_BOOL(GF_CAPS_OUTPUT_STATIC_EXCLUDED, GF_PROP_PID_UNFRAMED, GF_TRUE), CAP_BOOL(GF_CAPS_OUTPUT_STATIC_EXCLUDED, GF_PROP_PID_TILE_BASE, GF_TRUE), {0}, CAP_UINT(GF_CAPS_INPUT,GF_PROP_PID_STREAM_TYPE, GF_STREAM_VISUAL), CAP_UINT(GF_CAPS_INPUT,GF_PROP_PID_CODECID, GF_CODECID_AVC), CAP_UINT(GF_CAPS_INPUT,GF_PROP_PID_CODECID, GF_CODECID_AVC_PS), CAP_UINT(GF_CAPS_INPUT,GF_PROP_PID_CODECID, GF_CODECID_SVC), CAP_UINT(GF_CAPS_INPUT,GF_PROP_PID_CODECID, GF_CODECID_MVC), CAP_UINT(GF_CAPS_INPUT,GF_PROP_PID_CODECID, GF_CODECID_HEVC), CAP_UINT(GF_CAPS_INPUT,GF_PROP_PID_CODECID, GF_CODECID_LHVC), CAP_UINT(GF_CAPS_INPUT,GF_PROP_PID_CODECID, GF_CODECID_VVC), CAP_BOOL(GF_CAPS_INPUT,GF_PROP_PID_UNFRAMED, GF_TRUE), CAP_BOOL(GF_CAPS_INPUT_EXCLUDED, GF_PROP_PID_TILE_BASE, GF_TRUE), }; #define OFFS(_n) #_n, offsetof(GF_NALUDmxCtx, _n) static const GF_FilterArgs NALUDmxArgs[] = { { OFFS(fps), "import frame rate (0 default to FPS from bitstream or 25 Hz)", GF_PROP_FRACTION, "0/1000", NULL, 0}, { OFFS(index), "indexing window length. If 0, bitstream is not probed for duration. A negative value skips the indexing if the source file is larger than 100M (slows down importers) unless a play with start range > 0 is issued, otherwise uses the positive value", GF_PROP_DOUBLE, "-1.0", NULL, 0}, { OFFS(explicit), "use explicit layered (SVC/LHVC) import", GF_PROP_BOOL, "false", NULL, GF_FS_ARG_HINT_ADVANCED}, { OFFS(strict_poc), "delay frame output of an entire GOP to ensure CTS info is correct when POC suddenly changes\n" "- off: disable GOP buffering\n" "- on: enable GOP buffering, assuming no error in POC\n" "- error: enable GOP buffering and try to detect lost frames", GF_PROP_UINT, "off", "off|on|error", GF_FS_ARG_HINT_ADVANCED}, { OFFS(nosei), "remove all sei messages", GF_PROP_BOOL, "false", NULL, GF_FS_ARG_HINT_ADVANCED}, { OFFS(nosvc), "remove all SVC/MVC/LHVC data", GF_PROP_BOOL, "false", NULL, GF_FS_ARG_HINT_ADVANCED}, { OFFS(novpsext), "remove all VPS extensions", GF_PROP_BOOL, "false", NULL, GF_FS_ARG_HINT_ADVANCED}, { OFFS(importer), "compatibility with old importer, displays import results", GF_PROP_BOOL, "false", NULL, GF_FS_ARG_HINT_ADVANCED}, { OFFS(idur), "compatibility with old importer to log imported frames only", GF_PROP_FRACTION, "0", NULL, GF_FS_ARG_HINT_HIDE}, { OFFS(nal_length), "set number of bytes used to code length field: 1, 2 or 4", GF_PROP_UINT, "4", NULL, GF_FS_ARG_HINT_EXPERT}, { OFFS(subsamples), "import subsamples information", GF_PROP_BOOL, "false", NULL, GF_FS_ARG_HINT_EXPERT}, { OFFS(deps), "import samples dependencies information", GF_PROP_BOOL, "false", NULL, GF_FS_ARG_HINT_EXPERT}, { OFFS(seirw), "rewrite AVC sei messages for ISOBMFF constraints", GF_PROP_BOOL, "true", NULL, GF_FS_ARG_HINT_EXPERT}, { OFFS(audelim), "keep Access Unit delimiter in payload", GF_PROP_BOOL, "false", NULL, GF_FS_ARG_HINT_EXPERT}, { OFFS(analyze), "skip reformat of decoder config and SEI and dispatch all NAL in input order - shall only be used with inspect filter analyze mode!", GF_PROP_UINT, "off", "off|on|bs|full", GF_FS_ARG_HINT_HIDE}, { OFFS(bsdbg), "debug NAL parsing in parser@debug logs\n" "- off: not enabled\n" "- on: enabled\n" "- full: enable with number of bits dumped", GF_PROP_UINT, "off", "off|on|full", GF_FS_ARG_HINT_EXPERT}, {0} }; GF_FilterRegister NALUDmxRegister = { .name = "rfnalu", GF_FS_SET_DESCRIPTION("AVC/HEVC reframer") GF_FS_SET_HELP("This filter parses AVC|H264 and HEVC files/data and outputs corresponding video PID and frames.\n" "This demuxer only produces ISOBMFF-compatible output: start codes are removed, NALU length field added and avcC/hvcC config created.\nNote: The demux uses negative CTS offsets: CTS is corrrect, but some frames may have DTS greater than CTS.") .private_size = sizeof(GF_NALUDmxCtx), .args = NALUDmxArgs, .initialize = naludmx_initialize, .finalize = naludmx_finalize, SETCAPS(NALUDmxCaps), .configure_pid = naludmx_configure_pid, .process = naludmx_process, .process_event = naludmx_process_event, .probe_data = naludmx_probe_data, }; const GF_FilterRegister *naludmx_register(GF_FilterSession *session) { return &NALUDmxRegister; } #else const GF_FilterRegister *naludmx_register(GF_FilterSession *session) { return NULL; } #endif //GPAC_DISABLE_AV_PARSERS
static void naludmx_queue_param_set(GF_NALUDmxCtx *ctx, char *data, u32 size, u32 ps_type, s32 ps_id) { GF_List *list = NULL, *alt_list = NULL; GF_NALUFFParam *sl; u32 i, count; u32 crc = gf_crc_32(data, size); if (ctx->codecid==GF_CODECID_HEVC) { switch (ps_type) { case GF_HEVC_NALU_VID_PARAM: if (!ctx->vps) ctx->vps = gf_list_new(); list = ctx->vps; break; case GF_HEVC_NALU_SEQ_PARAM: list = ctx->sps; break; case GF_HEVC_NALU_PIC_PARAM: list = ctx->pps; break; default: assert(0); return; } } else if (ctx->codecid==GF_CODECID_VVC) { switch (ps_type) { case GF_VVC_NALU_VID_PARAM: if (!ctx->vps) ctx->vps = gf_list_new(); list = ctx->vps; break; case GF_VVC_NALU_SEQ_PARAM: list = ctx->sps; break; case GF_VVC_NALU_PIC_PARAM: list = ctx->pps; break; case GF_VVC_NALU_DEC_PARAM: if (!ctx->vvc_dci) ctx->vvc_dci = gf_list_new(); list = ctx->vvc_dci; break; case GF_VVC_NALU_APS_PREFIX: if (!ctx->vvc_aps_pre) ctx->vvc_aps_pre = gf_list_new(); list = ctx->vvc_aps_pre; break; default: assert(0); return; } } else { switch (ps_type) { case GF_AVC_NALU_SVC_SUBSEQ_PARAM: case GF_AVC_NALU_SEQ_PARAM: list = ctx->sps; break; case GF_AVC_NALU_PIC_PARAM: list = ctx->pps; alt_list = ctx->pps_svc; break; case GF_AVC_NALU_SEQ_PARAM_EXT: if (!ctx->sps_ext) ctx->sps_ext = gf_list_new(); list = ctx->sps_ext; break; default: assert(0); return; } } sl = NULL; count = gf_list_count(list); for (i=0; i<count; i++) { sl = gf_list_get(list, i); if (sl->id != ps_id) { sl = NULL; continue; } //same ID, same CRC, we don't change our state if (sl->crc == crc) return; break; } //handle alt PPS list for SVC if (!sl && alt_list) { count = gf_list_count(alt_list); for (i=0; i<count; i++) { sl = gf_list_get(alt_list, i); if (sl->id != ps_id) { sl = NULL; continue; } //same ID, same CRC, we don't change our state if (sl->crc == crc) return; break; } } if (sl) { //otherwise we keep this new param set sl->data = gf_realloc(sl->data, size); memcpy(sl->data, data, size); sl->size = size; sl->crc = crc; ctx->ps_modified = GF_TRUE; return; } //TODO we might want to purge the list after a while !! GF_SAFEALLOC(sl, GF_NALUFFParam); if (!sl) return; sl->data = gf_malloc(sizeof(char) * size); if (!sl->data) { gf_free(sl); return; } memcpy(sl->data, data, size); sl->size = size; sl->id = ps_id; sl->crc = crc; ctx->ps_modified = GF_TRUE; gf_list_add(list, sl); }
static void naludmx_queue_param_set(GF_NALUDmxCtx *ctx, char *data, u32 size, u32 ps_type, s32 ps_id) { GF_List *list = NULL, *alt_list = NULL; GF_NALUFFParam *sl; u32 i, count, crc; if (!size) return; crc = gf_crc_32(data, size); if (ctx->codecid==GF_CODECID_HEVC) { switch (ps_type) { case GF_HEVC_NALU_VID_PARAM: if (!ctx->vps) ctx->vps = gf_list_new(); list = ctx->vps; break; case GF_HEVC_NALU_SEQ_PARAM: list = ctx->sps; break; case GF_HEVC_NALU_PIC_PARAM: list = ctx->pps; break; default: assert(0); return; } } else if (ctx->codecid==GF_CODECID_VVC) { switch (ps_type) { case GF_VVC_NALU_VID_PARAM: if (!ctx->vps) ctx->vps = gf_list_new(); list = ctx->vps; break; case GF_VVC_NALU_SEQ_PARAM: list = ctx->sps; break; case GF_VVC_NALU_PIC_PARAM: list = ctx->pps; break; case GF_VVC_NALU_DEC_PARAM: if (!ctx->vvc_dci) ctx->vvc_dci = gf_list_new(); list = ctx->vvc_dci; break; case GF_VVC_NALU_APS_PREFIX: if (!ctx->vvc_aps_pre) ctx->vvc_aps_pre = gf_list_new(); list = ctx->vvc_aps_pre; break; default: assert(0); return; } } else { switch (ps_type) { case GF_AVC_NALU_SVC_SUBSEQ_PARAM: case GF_AVC_NALU_SEQ_PARAM: list = ctx->sps; break; case GF_AVC_NALU_PIC_PARAM: list = ctx->pps; alt_list = ctx->pps_svc; break; case GF_AVC_NALU_SEQ_PARAM_EXT: if (!ctx->sps_ext) ctx->sps_ext = gf_list_new(); list = ctx->sps_ext; break; default: assert(0); return; } } sl = NULL; count = gf_list_count(list); for (i=0; i<count; i++) { sl = gf_list_get(list, i); if (sl->id != ps_id) { sl = NULL; continue; } //same ID, same CRC, we don't change our state if (sl->crc == crc) return; break; } //handle alt PPS list for SVC if (!sl && alt_list) { count = gf_list_count(alt_list); for (i=0; i<count; i++) { sl = gf_list_get(alt_list, i); if (sl->id != ps_id) { sl = NULL; continue; } //same ID, same CRC, we don't change our state if (sl->crc == crc) return; break; } } if (sl) { //otherwise we keep this new param set sl->data = gf_realloc(sl->data, size); memcpy(sl->data, data, size); sl->size = size; sl->crc = crc; ctx->ps_modified = GF_TRUE; return; } //TODO we might want to purge the list after a while !! GF_SAFEALLOC(sl, GF_NALUFFParam); if (!sl) return; sl->data = gf_malloc(sizeof(char) * size); if (!sl->data) { gf_free(sl); return; } memcpy(sl->data, data, size); sl->size = size; sl->id = ps_id; sl->crc = crc; ctx->ps_modified = GF_TRUE; gf_list_add(list, sl); }
{'added': [(1683, '\tu32 i, count, crc;'), (1684, ''), (1685, '\tif (!size) return;'), (1686, '\tcrc = gf_crc_32(data, size);')], 'deleted': [(1683, '\tu32 i, count;'), (1684, '\tu32 crc = gf_crc_32(data, size);')]}
4
2
3,044
22,034
110
592
31
https://github.com/gpac/gpac
CVE-2021-40563
CWE-476
888
hci_request.c
C
hci_req_sync
/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2014 Intel Corporation This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #include <linux/sched/signal.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/mgmt.h> #include "smp.h" #include "hci_request.h" #include "msft.h" #define HCI_REQ_DONE 0 #define HCI_REQ_PEND 1 #define HCI_REQ_CANCELED 2 void hci_req_init(struct hci_request *req, struct hci_dev *hdev) { skb_queue_head_init(&req->cmd_q); req->hdev = hdev; req->err = 0; } void hci_req_purge(struct hci_request *req) { skb_queue_purge(&req->cmd_q); } bool hci_req_status_pend(struct hci_dev *hdev) { return hdev->req_status == HCI_REQ_PEND; } static int req_run(struct hci_request *req, hci_req_complete_t complete, hci_req_complete_skb_t complete_skb) { struct hci_dev *hdev = req->hdev; struct sk_buff *skb; unsigned long flags; bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q)); /* If an error occurred during request building, remove all HCI * commands queued on the HCI request queue. */ if (req->err) { skb_queue_purge(&req->cmd_q); return req->err; } /* Do not allow empty requests */ if (skb_queue_empty(&req->cmd_q)) return -ENODATA; skb = skb_peek_tail(&req->cmd_q); if (complete) { bt_cb(skb)->hci.req_complete = complete; } else if (complete_skb) { bt_cb(skb)->hci.req_complete_skb = complete_skb; bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB; } spin_lock_irqsave(&hdev->cmd_q.lock, flags); skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); queue_work(hdev->workqueue, &hdev->cmd_work); return 0; } int hci_req_run(struct hci_request *req, hci_req_complete_t complete) { return req_run(req, complete, NULL); } int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete) { return req_run(req, NULL, complete); } static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, struct sk_buff *skb) { bt_dev_dbg(hdev, "result 0x%2.2x", result); if (hdev->req_status == HCI_REQ_PEND) { hdev->req_result = result; hdev->req_status = HCI_REQ_DONE; if (skb) hdev->req_skb = skb_get(skb); wake_up_interruptible(&hdev->req_wait_q); } } void hci_req_sync_cancel(struct hci_dev *hdev, int err) { bt_dev_dbg(hdev, "err 0x%2.2x", err); if (hdev->req_status == HCI_REQ_PEND) { hdev->req_result = err; hdev->req_status = HCI_REQ_CANCELED; wake_up_interruptible(&hdev->req_wait_q); } } struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u8 event, u32 timeout) { struct hci_request req; struct sk_buff *skb; int err = 0; bt_dev_dbg(hdev, ""); hci_req_init(&req, hdev); hci_req_add_ev(&req, opcode, plen, param, event); hdev->req_status = HCI_REQ_PEND; err = hci_req_run_skb(&req, hci_req_sync_complete); if (err < 0) return ERR_PTR(err); err = wait_event_interruptible_timeout(hdev->req_wait_q, hdev->req_status != HCI_REQ_PEND, timeout); if (err == -ERESTARTSYS) return ERR_PTR(-EINTR); switch (hdev->req_status) { case HCI_REQ_DONE: err = -bt_to_errno(hdev->req_result); break; case HCI_REQ_CANCELED: err = -hdev->req_result; break; default: err = -ETIMEDOUT; break; } hdev->req_status = hdev->req_result = 0; skb = hdev->req_skb; hdev->req_skb = NULL; bt_dev_dbg(hdev, "end: err %d", err); if (err < 0) { kfree_skb(skb); return ERR_PTR(err); } if (!skb) return ERR_PTR(-ENODATA); return skb; } EXPORT_SYMBOL(__hci_cmd_sync_ev); struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u32 timeout) { return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout); } EXPORT_SYMBOL(__hci_cmd_sync); /* Execute request and wait for completion. */ int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req, unsigned long opt), unsigned long opt, u32 timeout, u8 *hci_status) { struct hci_request req; int err = 0; bt_dev_dbg(hdev, "start"); hci_req_init(&req, hdev); hdev->req_status = HCI_REQ_PEND; err = func(&req, opt); if (err) { if (hci_status) *hci_status = HCI_ERROR_UNSPECIFIED; return err; } err = hci_req_run_skb(&req, hci_req_sync_complete); if (err < 0) { hdev->req_status = 0; /* ENODATA means the HCI request command queue is empty. * This can happen when a request with conditionals doesn't * trigger any commands to be sent. This is normal behavior * and should not trigger an error return. */ if (err == -ENODATA) { if (hci_status) *hci_status = 0; return 0; } if (hci_status) *hci_status = HCI_ERROR_UNSPECIFIED; return err; } err = wait_event_interruptible_timeout(hdev->req_wait_q, hdev->req_status != HCI_REQ_PEND, timeout); if (err == -ERESTARTSYS) return -EINTR; switch (hdev->req_status) { case HCI_REQ_DONE: err = -bt_to_errno(hdev->req_result); if (hci_status) *hci_status = hdev->req_result; break; case HCI_REQ_CANCELED: err = -hdev->req_result; if (hci_status) *hci_status = HCI_ERROR_UNSPECIFIED; break; default: err = -ETIMEDOUT; if (hci_status) *hci_status = HCI_ERROR_UNSPECIFIED; break; } kfree_skb(hdev->req_skb); hdev->req_skb = NULL; hdev->req_status = hdev->req_result = 0; bt_dev_dbg(hdev, "end: err %d", err); return err; } int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req, unsigned long opt), unsigned long opt, u32 timeout, u8 *hci_status) { int ret; if (!test_bit(HCI_UP, &hdev->flags)) return -ENETDOWN; /* Serialize all requests */ hci_req_sync_lock(hdev); ret = __hci_req_sync(hdev, req, opt, timeout, hci_status); hci_req_sync_unlock(hdev); return ret; } struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param) { int len = HCI_COMMAND_HDR_SIZE + plen; struct hci_command_hdr *hdr; struct sk_buff *skb; skb = bt_skb_alloc(len, GFP_ATOMIC); if (!skb) return NULL; hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE); hdr->opcode = cpu_to_le16(opcode); hdr->plen = plen; if (plen) skb_put_data(skb, param, plen); bt_dev_dbg(hdev, "skb len %d", skb->len); hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; hci_skb_opcode(skb) = opcode; return skb; } /* Queue a command to an asynchronous HCI request */ void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, const void *param, u8 event) { struct hci_dev *hdev = req->hdev; struct sk_buff *skb; bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); /* If an error occurred during request building, there is no point in * queueing the HCI command. We can simply return. */ if (req->err) return; skb = hci_prepare_cmd(hdev, opcode, plen, param); if (!skb) { bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", opcode); req->err = -ENOMEM; return; } if (skb_queue_empty(&req->cmd_q)) bt_cb(skb)->hci.req_flags |= HCI_REQ_START; bt_cb(skb)->hci.req_event = event; skb_queue_tail(&req->cmd_q, skb); } void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, const void *param) { hci_req_add_ev(req, opcode, plen, param, 0); } void __hci_req_write_fast_connectable(struct hci_request *req, bool enable) { struct hci_dev *hdev = req->hdev; struct hci_cp_write_page_scan_activity acp; u8 type; if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return; if (hdev->hci_ver < BLUETOOTH_VER_1_2) return; if (enable) { type = PAGE_SCAN_TYPE_INTERLACED; /* 160 msec page scan interval */ acp.interval = cpu_to_le16(0x0100); } else { type = hdev->def_page_scan_type; acp.interval = cpu_to_le16(hdev->def_page_scan_int); } acp.window = cpu_to_le16(hdev->def_page_scan_window); if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval || __cpu_to_le16(hdev->page_scan_window) != acp.window) hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(acp), &acp); if (hdev->page_scan_type != type) hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type); } static void start_interleave_scan(struct hci_dev *hdev) { hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; queue_delayed_work(hdev->req_workqueue, &hdev->interleave_scan, 0); } static bool is_interleave_scanning(struct hci_dev *hdev) { return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE; } static void cancel_interleave_scan(struct hci_dev *hdev) { bt_dev_dbg(hdev, "cancelling interleave scan"); cancel_delayed_work_sync(&hdev->interleave_scan); hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE; } /* Return true if interleave_scan wasn't started until exiting this function, * otherwise, return false */ static bool __hci_update_interleaved_scan(struct hci_dev *hdev) { /* Do interleaved scan only if all of the following are true: * - There is at least one ADV monitor * - At least one pending LE connection or one device to be scanned for * - Monitor offloading is not supported * If so, we should alternate between allowlist scan and one without * any filters to save power. */ bool use_interleaving = hci_is_adv_monitoring(hdev) && !(list_empty(&hdev->pend_le_conns) && list_empty(&hdev->pend_le_reports)) && hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE; bool is_interleaving = is_interleave_scanning(hdev); if (use_interleaving && !is_interleaving) { start_interleave_scan(hdev); bt_dev_dbg(hdev, "starting interleave scan"); return true; } if (!use_interleaving && is_interleaving) cancel_interleave_scan(hdev); return false; } /* This function controls the background scanning based on hdev->pend_le_conns * list. If there are pending LE connection we start the background scanning, * otherwise we stop it. * * This function requires the caller holds hdev->lock. */ static void __hci_update_background_scan(struct hci_request *req) { struct hci_dev *hdev = req->hdev; if (!test_bit(HCI_UP, &hdev->flags) || test_bit(HCI_INIT, &hdev->flags) || hci_dev_test_flag(hdev, HCI_SETUP) || hci_dev_test_flag(hdev, HCI_CONFIG) || hci_dev_test_flag(hdev, HCI_AUTO_OFF) || hci_dev_test_flag(hdev, HCI_UNREGISTER)) return; /* No point in doing scanning if LE support hasn't been enabled */ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return; /* If discovery is active don't interfere with it */ if (hdev->discovery.state != DISCOVERY_STOPPED) return; /* Reset RSSI and UUID filters when starting background scanning * since these filters are meant for service discovery only. * * The Start Discovery and Start Service Discovery operations * ensure to set proper values for RSSI threshold and UUID * filter list. So it is safe to just reset them here. */ hci_discovery_filter_clear(hdev); bt_dev_dbg(hdev, "ADV monitoring is %s", hci_is_adv_monitoring(hdev) ? "on" : "off"); if (list_empty(&hdev->pend_le_conns) && list_empty(&hdev->pend_le_reports) && !hci_is_adv_monitoring(hdev)) { /* If there is no pending LE connections or devices * to be scanned for or no ADV monitors, we should stop the * background scanning. */ /* If controller is not scanning we are done. */ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) return; hci_req_add_le_scan_disable(req, false); bt_dev_dbg(hdev, "stopping background scanning"); } else { /* If there is at least one pending LE connection, we should * keep the background scan running. */ /* If controller is connecting, we should not start scanning * since some controllers are not able to scan and connect at * the same time. */ if (hci_lookup_le_connect(hdev)) return; /* If controller is currently scanning, we stop it to ensure we * don't miss any advertising (due to duplicates filter). */ if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) hci_req_add_le_scan_disable(req, false); hci_req_add_le_passive_scan(req); bt_dev_dbg(hdev, "starting background scanning"); } } void __hci_req_update_name(struct hci_request *req) { struct hci_dev *hdev = req->hdev; struct hci_cp_write_local_name cp; memcpy(cp.name, hdev->dev_name, sizeof(cp.name)); hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp); } #define PNP_INFO_SVCLASS_ID 0x1200 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) { u8 *ptr = data, *uuids_start = NULL; struct bt_uuid *uuid; if (len < 4) return ptr; list_for_each_entry(uuid, &hdev->uuids, list) { u16 uuid16; if (uuid->size != 16) continue; uuid16 = get_unaligned_le16(&uuid->uuid[12]); if (uuid16 < 0x1100) continue; if (uuid16 == PNP_INFO_SVCLASS_ID) continue; if (!uuids_start) { uuids_start = ptr; uuids_start[0] = 1; uuids_start[1] = EIR_UUID16_ALL; ptr += 2; } /* Stop if not enough space to put next UUID */ if ((ptr - data) + sizeof(u16) > len) { uuids_start[1] = EIR_UUID16_SOME; break; } *ptr++ = (uuid16 & 0x00ff); *ptr++ = (uuid16 & 0xff00) >> 8; uuids_start[0] += sizeof(uuid16); } return ptr; } static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) { u8 *ptr = data, *uuids_start = NULL; struct bt_uuid *uuid; if (len < 6) return ptr; list_for_each_entry(uuid, &hdev->uuids, list) { if (uuid->size != 32) continue; if (!uuids_start) { uuids_start = ptr; uuids_start[0] = 1; uuids_start[1] = EIR_UUID32_ALL; ptr += 2; } /* Stop if not enough space to put next UUID */ if ((ptr - data) + sizeof(u32) > len) { uuids_start[1] = EIR_UUID32_SOME; break; } memcpy(ptr, &uuid->uuid[12], sizeof(u32)); ptr += sizeof(u32); uuids_start[0] += sizeof(u32); } return ptr; } static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) { u8 *ptr = data, *uuids_start = NULL; struct bt_uuid *uuid; if (len < 18) return ptr; list_for_each_entry(uuid, &hdev->uuids, list) { if (uuid->size != 128) continue; if (!uuids_start) { uuids_start = ptr; uuids_start[0] = 1; uuids_start[1] = EIR_UUID128_ALL; ptr += 2; } /* Stop if not enough space to put next UUID */ if ((ptr - data) + 16 > len) { uuids_start[1] = EIR_UUID128_SOME; break; } memcpy(ptr, uuid->uuid, 16); ptr += 16; uuids_start[0] += 16; } return ptr; } static void create_eir(struct hci_dev *hdev, u8 *data) { u8 *ptr = data; size_t name_len; name_len = strlen(hdev->dev_name); if (name_len > 0) { /* EIR Data type */ if (name_len > 48) { name_len = 48; ptr[1] = EIR_NAME_SHORT; } else ptr[1] = EIR_NAME_COMPLETE; /* EIR Data length */ ptr[0] = name_len + 1; memcpy(ptr + 2, hdev->dev_name, name_len); ptr += (name_len + 2); } if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) { ptr[0] = 2; ptr[1] = EIR_TX_POWER; ptr[2] = (u8) hdev->inq_tx_power; ptr += 3; } if (hdev->devid_source > 0) { ptr[0] = 9; ptr[1] = EIR_DEVICE_ID; put_unaligned_le16(hdev->devid_source, ptr + 2); put_unaligned_le16(hdev->devid_vendor, ptr + 4); put_unaligned_le16(hdev->devid_product, ptr + 6); put_unaligned_le16(hdev->devid_version, ptr + 8); ptr += 10; } ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); } void __hci_req_update_eir(struct hci_request *req) { struct hci_dev *hdev = req->hdev; struct hci_cp_write_eir cp; if (!hdev_is_powered(hdev)) return; if (!lmp_ext_inq_capable(hdev)) return; if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) return; if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) return; memset(&cp, 0, sizeof(cp)); create_eir(hdev, cp.data); if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) return; memcpy(hdev->eir, cp.data, sizeof(cp.data)); hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); } void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn) { struct hci_dev *hdev = req->hdev; if (hdev->scanning_paused) { bt_dev_dbg(hdev, "Scanning is paused for suspend"); return; } if (hdev->suspended) set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); if (use_ext_scan(hdev)) { struct hci_cp_le_set_ext_scan_enable cp; memset(&cp, 0, sizeof(cp)); cp.enable = LE_SCAN_DISABLE; hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp), &cp); } else { struct hci_cp_le_set_scan_enable cp; memset(&cp, 0, sizeof(cp)); cp.enable = LE_SCAN_DISABLE; hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); } /* Disable address resolution */ if (use_ll_privacy(hdev) && hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) { __u8 enable = 0x00; hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); } } static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr, u8 bdaddr_type) { struct hci_cp_le_del_from_white_list cp; cp.bdaddr_type = bdaddr_type; bacpy(&cp.bdaddr, bdaddr); bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr, cp.bdaddr_type); hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp); if (use_ll_privacy(req->hdev) && hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) { struct smp_irk *irk; irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type); if (irk) { struct hci_cp_le_del_from_resolv_list cp; cp.bdaddr_type = bdaddr_type; bacpy(&cp.bdaddr, bdaddr); hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST, sizeof(cp), &cp); } } } /* Adds connection to white list if needed. On error, returns -1. */ static int add_to_white_list(struct hci_request *req, struct hci_conn_params *params, u8 *num_entries, bool allow_rpa) { struct hci_cp_le_add_to_white_list cp; struct hci_dev *hdev = req->hdev; /* Already in white list */ if (hci_bdaddr_list_lookup(&hdev->le_white_list, &params->addr, params->addr_type)) return 0; /* Select filter policy to accept all advertising */ if (*num_entries >= hdev->le_white_list_size) return -1; /* White list can not be used with RPAs */ if (!allow_rpa && !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) { return -1; } /* During suspend, only wakeable devices can be in whitelist */ if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP, params->current_flags)) return 0; *num_entries += 1; cp.bdaddr_type = params->addr_type; bacpy(&cp.bdaddr, &params->addr); bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr, cp.bdaddr_type); hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp); if (use_ll_privacy(hdev) && hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) { struct smp_irk *irk; irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type); if (irk) { struct hci_cp_le_add_to_resolv_list cp; cp.bdaddr_type = params->addr_type; bacpy(&cp.bdaddr, &params->addr); memcpy(cp.peer_irk, irk->val, 16); if (hci_dev_test_flag(hdev, HCI_PRIVACY)) memcpy(cp.local_irk, hdev->irk, 16); else memset(cp.local_irk, 0, 16); hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST, sizeof(cp), &cp); } } return 0; } static u8 update_white_list(struct hci_request *req) { struct hci_dev *hdev = req->hdev; struct hci_conn_params *params; struct bdaddr_list *b; u8 num_entries = 0; bool pend_conn, pend_report; /* We allow whitelisting even with RPAs in suspend. In the worst case, * we won't be able to wake from devices that use the privacy1.2 * features. Additionally, once we support privacy1.2 and IRK * offloading, we can update this to also check for those conditions. */ bool allow_rpa = hdev->suspended; /* Go through the current white list programmed into the * controller one by one and check if that address is still * in the list of pending connections or list of devices to * report. If not present in either list, then queue the * command to remove it from the controller. */ list_for_each_entry(b, &hdev->le_white_list, list) { pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns, &b->bdaddr, b->bdaddr_type); pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports, &b->bdaddr, b->bdaddr_type); /* If the device is not likely to connect or report, * remove it from the whitelist. */ if (!pend_conn && !pend_report) { del_from_white_list(req, &b->bdaddr, b->bdaddr_type); continue; } /* White list can not be used with RPAs */ if (!allow_rpa && !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) { return 0x00; } num_entries++; } /* Since all no longer valid white list entries have been * removed, walk through the list of pending connections * and ensure that any new device gets programmed into * the controller. * * If the list of the devices is larger than the list of * available white list entries in the controller, then * just abort and return filer policy value to not use the * white list. */ list_for_each_entry(params, &hdev->pend_le_conns, action) { if (add_to_white_list(req, params, &num_entries, allow_rpa)) return 0x00; } /* After adding all new pending connections, walk through * the list of pending reports and also add these to the * white list if there is still space. Abort if space runs out. */ list_for_each_entry(params, &hdev->pend_le_reports, action) { if (add_to_white_list(req, params, &num_entries, allow_rpa)) return 0x00; } /* Use the allowlist unless the following conditions are all true: * - We are not currently suspending * - There are 1 or more ADV monitors registered and it's not offloaded * - Interleaved scanning is not currently using the allowlist */ if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended && hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE && hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST) return 0x00; /* Select filter policy to use white list */ return 0x01; } static bool scan_use_rpa(struct hci_dev *hdev) { return hci_dev_test_flag(hdev, HCI_PRIVACY); } static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval, u16 window, u8 own_addr_type, u8 filter_policy, bool addr_resolv) { struct hci_dev *hdev = req->hdev; if (hdev->scanning_paused) { bt_dev_dbg(hdev, "Scanning is paused for suspend"); return; } if (use_ll_privacy(hdev) && hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && addr_resolv) { u8 enable = 0x01; hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); } /* Use ext scanning if set ext scan param and ext scan enable is * supported */ if (use_ext_scan(hdev)) { struct hci_cp_le_set_ext_scan_params *ext_param_cp; struct hci_cp_le_set_ext_scan_enable ext_enable_cp; struct hci_cp_le_scan_phy_params *phy_params; u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2]; u32 plen; ext_param_cp = (void *)data; phy_params = (void *)ext_param_cp->data; memset(ext_param_cp, 0, sizeof(*ext_param_cp)); ext_param_cp->own_addr_type = own_addr_type; ext_param_cp->filter_policy = filter_policy; plen = sizeof(*ext_param_cp); if (scan_1m(hdev) || scan_2m(hdev)) { ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M; memset(phy_params, 0, sizeof(*phy_params)); phy_params->type = type; phy_params->interval = cpu_to_le16(interval); phy_params->window = cpu_to_le16(window); plen += sizeof(*phy_params); phy_params++; } if (scan_coded(hdev)) { ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED; memset(phy_params, 0, sizeof(*phy_params)); phy_params->type = type; phy_params->interval = cpu_to_le16(interval); phy_params->window = cpu_to_le16(window); plen += sizeof(*phy_params); phy_params++; } hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS, plen, ext_param_cp); memset(&ext_enable_cp, 0, sizeof(ext_enable_cp)); ext_enable_cp.enable = LE_SCAN_ENABLE; ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(ext_enable_cp), &ext_enable_cp); } else { struct hci_cp_le_set_scan_param param_cp; struct hci_cp_le_set_scan_enable enable_cp; memset(&param_cp, 0, sizeof(param_cp)); param_cp.type = type; param_cp.interval = cpu_to_le16(interval); param_cp.window = cpu_to_le16(window); param_cp.own_address_type = own_addr_type; param_cp.filter_policy = filter_policy; hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), &param_cp); memset(&enable_cp, 0, sizeof(enable_cp)); enable_cp.enable = LE_SCAN_ENABLE; enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), &enable_cp); } } /* Returns true if an le connection is in the scanning state */ static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type == LE_LINK && c->state == BT_CONNECT && test_bit(HCI_CONN_SCANNING, &c->flags)) { rcu_read_unlock(); return true; } } rcu_read_unlock(); return false; } /* Ensure to call hci_req_add_le_scan_disable() first to disable the * controller based address resolution to be able to reconfigure * resolving list. */ void hci_req_add_le_passive_scan(struct hci_request *req) { struct hci_dev *hdev = req->hdev; u8 own_addr_type; u8 filter_policy; u16 window, interval; /* Background scanning should run with address resolution */ bool addr_resolv = true; if (hdev->scanning_paused) { bt_dev_dbg(hdev, "Scanning is paused for suspend"); return; } /* Set require_privacy to false since no SCAN_REQ are send * during passive scanning. Not using an non-resolvable address * here is important so that peer devices using direct * advertising with our address will be correctly reported * by the controller. */ if (hci_update_random_address(req, false, scan_use_rpa(hdev), &own_addr_type)) return; if (hdev->enable_advmon_interleave_scan && __hci_update_interleaved_scan(hdev)) return; bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state); /* Adding or removing entries from the white list must * happen before enabling scanning. The controller does * not allow white list modification while scanning. */ filter_policy = update_white_list(req); /* When the controller is using random resolvable addresses and * with that having LE privacy enabled, then controllers with * Extended Scanner Filter Policies support can now enable support * for handling directed advertising. * * So instead of using filter polices 0x00 (no whitelist) * and 0x01 (whitelist enabled) use the new filter policies * 0x02 (no whitelist) and 0x03 (whitelist enabled). */ if (hci_dev_test_flag(hdev, HCI_PRIVACY) && (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) filter_policy |= 0x02; if (hdev->suspended) { window = hdev->le_scan_window_suspend; interval = hdev->le_scan_int_suspend; set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks); } else if (hci_is_le_conn_scanning(hdev)) { window = hdev->le_scan_window_connect; interval = hdev->le_scan_int_connect; } else if (hci_is_adv_monitoring(hdev)) { window = hdev->le_scan_window_adv_monitor; interval = hdev->le_scan_int_adv_monitor; } else { window = hdev->le_scan_window; interval = hdev->le_scan_interval; } bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy); hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window, own_addr_type, filter_policy, addr_resolv); } static bool adv_instance_is_scannable(struct hci_dev *hdev, u8 instance) { struct adv_info *adv_instance; /* Instance 0x00 always set local name */ if (instance == 0x00) return true; adv_instance = hci_find_adv_instance(hdev, instance); if (!adv_instance) return false; if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE || adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME) return true; return adv_instance->scan_rsp_len ? true : false; } static void hci_req_clear_event_filter(struct hci_request *req) { struct hci_cp_set_event_filter f; memset(&f, 0, sizeof(f)); f.flt_type = HCI_FLT_CLEAR_ALL; hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f); /* Update page scan state (since we may have modified it when setting * the event filter). */ __hci_req_update_scan(req); } static void hci_req_set_event_filter(struct hci_request *req) { struct bdaddr_list_with_flags *b; struct hci_cp_set_event_filter f; struct hci_dev *hdev = req->hdev; u8 scan = SCAN_DISABLED; /* Always clear event filter when starting */ hci_req_clear_event_filter(req); list_for_each_entry(b, &hdev->whitelist, list) { if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP, b->current_flags)) continue; memset(&f, 0, sizeof(f)); bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr); f.flt_type = HCI_FLT_CONN_SETUP; f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR; f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON; bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr); hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f); scan = SCAN_PAGE; } if (scan) set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks); else set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); } static void cancel_adv_timeout(struct hci_dev *hdev) { if (hdev->adv_instance_timeout) { hdev->adv_instance_timeout = 0; cancel_delayed_work(&hdev->adv_instance_expire); } } /* This function requires the caller holds hdev->lock */ void __hci_req_pause_adv_instances(struct hci_request *req) { bt_dev_dbg(req->hdev, "Pausing advertising instances"); /* Call to disable any advertisements active on the controller. * This will succeed even if no advertisements are configured. */ __hci_req_disable_advertising(req); /* If we are using software rotation, pause the loop */ if (!ext_adv_capable(req->hdev)) cancel_adv_timeout(req->hdev); } /* This function requires the caller holds hdev->lock */ static void __hci_req_resume_adv_instances(struct hci_request *req) { struct adv_info *adv; bt_dev_dbg(req->hdev, "Resuming advertising instances"); if (ext_adv_capable(req->hdev)) { /* Call for each tracked instance to be re-enabled */ list_for_each_entry(adv, &req->hdev->adv_instances, list) { __hci_req_enable_ext_advertising(req, adv->instance); } } else { /* Schedule for most recent instance to be restarted and begin * the software rotation loop */ __hci_req_schedule_adv_instance(req, req->hdev->cur_adv_instance, true); } } /* This function requires the caller holds hdev->lock */ int hci_req_resume_adv_instances(struct hci_dev *hdev) { struct hci_request req; hci_req_init(&req, hdev); __hci_req_resume_adv_instances(&req); return hci_req_run(&req, NULL); } static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode) { bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode, status); if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) || test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) { clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks); clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); wake_up(&hdev->suspend_wait_q); } if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) { clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks); wake_up(&hdev->suspend_wait_q); } } static void hci_req_add_set_adv_filter_enable(struct hci_request *req, bool enable) { struct hci_dev *hdev = req->hdev; switch (hci_get_adv_monitor_offload_ext(hdev)) { case HCI_ADV_MONITOR_EXT_MSFT: msft_req_add_set_filter_enable(req, enable); break; default: return; } /* No need to block when enabling since it's on resume path */ if (hdev->suspended && !enable) set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks); } /* Call with hci_dev_lock */ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next) { int old_state; struct hci_conn *conn; struct hci_request req; u8 page_scan; int disconnect_counter; if (next == hdev->suspend_state) { bt_dev_dbg(hdev, "Same state before and after: %d", next); goto done; } hdev->suspend_state = next; hci_req_init(&req, hdev); if (next == BT_SUSPEND_DISCONNECT) { /* Mark device as suspended */ hdev->suspended = true; /* Pause discovery if not already stopped */ old_state = hdev->discovery.state; if (old_state != DISCOVERY_STOPPED) { set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks); hci_discovery_set_state(hdev, DISCOVERY_STOPPING); queue_work(hdev->req_workqueue, &hdev->discov_update); } hdev->discovery_paused = true; hdev->discovery_old_state = old_state; /* Stop directed advertising */ old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING); if (old_state) { set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks); cancel_delayed_work(&hdev->discov_off); queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, 0); } /* Pause other advertisements */ if (hdev->adv_instance_cnt) __hci_req_pause_adv_instances(&req); hdev->advertising_paused = true; hdev->advertising_old_state = old_state; /* Disable page scan */ page_scan = SCAN_DISABLED; hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan); /* Disable LE passive scan if enabled */ if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { cancel_interleave_scan(hdev); hci_req_add_le_scan_disable(&req, false); } /* Disable advertisement filters */ hci_req_add_set_adv_filter_enable(&req, false); /* Mark task needing completion */ set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); /* Prevent disconnects from causing scanning to be re-enabled */ hdev->scanning_paused = true; /* Run commands before disconnecting */ hci_req_run(&req, suspend_req_complete); disconnect_counter = 0; /* Soft disconnect everything (power off) */ list_for_each_entry(conn, &hdev->conn_hash.list, list) { hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF); disconnect_counter++; } if (disconnect_counter > 0) { bt_dev_dbg(hdev, "Had %d disconnects. Will wait on them", disconnect_counter); set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks); } } else if (next == BT_SUSPEND_CONFIGURE_WAKE) { /* Unpause to take care of updating scanning params */ hdev->scanning_paused = false; /* Enable event filter for paired devices */ hci_req_set_event_filter(&req); /* Enable passive scan at lower duty cycle */ __hci_update_background_scan(&req); /* Pause scan changes again. */ hdev->scanning_paused = true; hci_req_run(&req, suspend_req_complete); } else { hdev->suspended = false; hdev->scanning_paused = false; hci_req_clear_event_filter(&req); /* Reset passive/background scanning to normal */ __hci_update_background_scan(&req); /* Enable all of the advertisement filters */ hci_req_add_set_adv_filter_enable(&req, true); /* Unpause directed advertising */ hdev->advertising_paused = false; if (hdev->advertising_old_state) { set_bit(SUSPEND_UNPAUSE_ADVERTISING, hdev->suspend_tasks); hci_dev_set_flag(hdev, HCI_ADVERTISING); queue_work(hdev->req_workqueue, &hdev->discoverable_update); hdev->advertising_old_state = 0; } /* Resume other advertisements */ if (hdev->adv_instance_cnt) __hci_req_resume_adv_instances(&req); /* Unpause discovery */ hdev->discovery_paused = false; if (hdev->discovery_old_state != DISCOVERY_STOPPED && hdev->discovery_old_state != DISCOVERY_STOPPING) { set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks); hci_discovery_set_state(hdev, DISCOVERY_STARTING); queue_work(hdev->req_workqueue, &hdev->discov_update); } hci_req_run(&req, suspend_req_complete); } hdev->suspend_state = next; done: clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks); wake_up(&hdev->suspend_wait_q); } static bool adv_cur_instance_is_scannable(struct hci_dev *hdev) { return adv_instance_is_scannable(hdev, hdev->cur_adv_instance); } void __hci_req_disable_advertising(struct hci_request *req) { if (ext_adv_capable(req->hdev)) { __hci_req_disable_ext_adv_instance(req, 0x00); } else { u8 enable = 0x00; hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); } } static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance) { u32 flags; struct adv_info *adv_instance; if (instance == 0x00) { /* Instance 0 always manages the "Tx Power" and "Flags" * fields */ flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS; /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting * corresponds to the "connectable" instance flag. */ if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) flags |= MGMT_ADV_FLAG_CONNECTABLE; if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) flags |= MGMT_ADV_FLAG_LIMITED_DISCOV; else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) flags |= MGMT_ADV_FLAG_DISCOV; return flags; } adv_instance = hci_find_adv_instance(hdev, instance); /* Return 0 when we got an invalid instance identifier. */ if (!adv_instance) return 0; return adv_instance->flags; } static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags) { /* If privacy is not enabled don't use RPA */ if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) return false; /* If basic privacy mode is enabled use RPA */ if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) return true; /* If limited privacy mode is enabled don't use RPA if we're * both discoverable and bondable. */ if ((flags & MGMT_ADV_FLAG_DISCOV) && hci_dev_test_flag(hdev, HCI_BONDABLE)) return false; /* We're neither bondable nor discoverable in the limited * privacy mode, therefore use RPA. */ return true; } static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable) { /* If there is no connection we are OK to advertise. */ if (hci_conn_num(hdev, LE_LINK) == 0) return true; /* Check le_states if there is any connection in slave role. */ if (hdev->conn_hash.le_num_slave > 0) { /* Slave connection state and non connectable mode bit 20. */ if (!connectable && !(hdev->le_states[2] & 0x10)) return false; /* Slave connection state and connectable mode bit 38 * and scannable bit 21. */ if (connectable && (!(hdev->le_states[4] & 0x40) || !(hdev->le_states[2] & 0x20))) return false; } /* Check le_states if there is any connection in master role. */ if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) { /* Master connection state and non connectable mode bit 18. */ if (!connectable && !(hdev->le_states[2] & 0x02)) return false; /* Master connection state and connectable mode bit 35 and * scannable 19. */ if (connectable && (!(hdev->le_states[4] & 0x08) || !(hdev->le_states[2] & 0x08))) return false; } return true; } void __hci_req_enable_advertising(struct hci_request *req) { struct hci_dev *hdev = req->hdev; struct adv_info *adv_instance; struct hci_cp_le_set_adv_param cp; u8 own_addr_type, enable = 0x01; bool connectable; u16 adv_min_interval, adv_max_interval; u32 flags; flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance); adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance); /* If the "connectable" instance flag was not set, then choose between * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. */ connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || mgmt_get_connectable(hdev); if (!is_advertising_allowed(hdev, connectable)) return; if (hci_dev_test_flag(hdev, HCI_LE_ADV)) __hci_req_disable_advertising(req); /* Clear the HCI_LE_ADV bit temporarily so that the * hci_update_random_address knows that it's safe to go ahead * and write a new random address. The flag will be set back on * as soon as the SET_ADV_ENABLE HCI command completes. */ hci_dev_clear_flag(hdev, HCI_LE_ADV); /* Set require_privacy to true only when non-connectable * advertising is used. In that case it is fine to use a * non-resolvable private address. */ if (hci_update_random_address(req, !connectable, adv_use_rpa(hdev, flags), &own_addr_type) < 0) return; memset(&cp, 0, sizeof(cp)); if (adv_instance) { adv_min_interval = adv_instance->min_interval; adv_max_interval = adv_instance->max_interval; } else { adv_min_interval = hdev->le_adv_min_interval; adv_max_interval = hdev->le_adv_max_interval; } if (connectable) { cp.type = LE_ADV_IND; } else { if (adv_cur_instance_is_scannable(hdev)) cp.type = LE_ADV_SCAN_IND; else cp.type = LE_ADV_NONCONN_IND; if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) || hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN; adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX; } } cp.min_interval = cpu_to_le16(adv_min_interval); cp.max_interval = cpu_to_le16(adv_max_interval); cp.own_address_type = own_addr_type; cp.channel_map = hdev->le_adv_channel_map; hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp); hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); } u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len) { size_t short_len; size_t complete_len; /* no space left for name (+ NULL + type + len) */ if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3) return ad_len; /* use complete name if present and fits */ complete_len = strlen(hdev->dev_name); if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH) return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE, hdev->dev_name, complete_len + 1); /* use short name if present */ short_len = strlen(hdev->short_name); if (short_len) return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, hdev->short_name, short_len + 1); /* use shortened full name if present, we already know that name * is longer then HCI_MAX_SHORT_NAME_LENGTH */ if (complete_len) { u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1]; memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH); name[HCI_MAX_SHORT_NAME_LENGTH] = '\0'; return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name, sizeof(name)); } return ad_len; } static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len) { return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance); } static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr) { u8 scan_rsp_len = 0; if (hdev->appearance) { scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len); } return append_local_name(hdev, ptr, scan_rsp_len); } static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance, u8 *ptr) { struct adv_info *adv_instance; u32 instance_flags; u8 scan_rsp_len = 0; adv_instance = hci_find_adv_instance(hdev, instance); if (!adv_instance) return 0; instance_flags = adv_instance->flags; if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) { scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len); } memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data, adv_instance->scan_rsp_len); scan_rsp_len += adv_instance->scan_rsp_len; if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME) scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len); return scan_rsp_len; } void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance) { struct hci_dev *hdev = req->hdev; u8 len; if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return; if (ext_adv_capable(hdev)) { struct hci_cp_le_set_ext_scan_rsp_data cp; memset(&cp, 0, sizeof(cp)); if (instance) len = create_instance_scan_rsp_data(hdev, instance, cp.data); else len = create_default_scan_rsp_data(hdev, cp.data); if (hdev->scan_rsp_data_len == len && !memcmp(cp.data, hdev->scan_rsp_data, len)) return; memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); hdev->scan_rsp_data_len = len; cp.handle = instance; cp.length = len; cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp), &cp); } else { struct hci_cp_le_set_scan_rsp_data cp; memset(&cp, 0, sizeof(cp)); if (instance) len = create_instance_scan_rsp_data(hdev, instance, cp.data); else len = create_default_scan_rsp_data(hdev, cp.data); if (hdev->scan_rsp_data_len == len && !memcmp(cp.data, hdev->scan_rsp_data, len)) return; memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); hdev->scan_rsp_data_len = len; cp.length = len; hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp); } } static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) { struct adv_info *adv_instance = NULL; u8 ad_len = 0, flags = 0; u32 instance_flags; /* Return 0 when the current instance identifier is invalid. */ if (instance) { adv_instance = hci_find_adv_instance(hdev, instance); if (!adv_instance) return 0; } instance_flags = get_adv_instance_flags(hdev, instance); /* If instance already has the flags set skip adding it once * again. */ if (adv_instance && eir_get_data(adv_instance->adv_data, adv_instance->adv_data_len, EIR_FLAGS, NULL)) goto skip_flags; /* The Add Advertising command allows userspace to set both the general * and limited discoverable flags. */ if (instance_flags & MGMT_ADV_FLAG_DISCOV) flags |= LE_AD_GENERAL; if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV) flags |= LE_AD_LIMITED; if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) flags |= LE_AD_NO_BREDR; if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) { /* If a discovery flag wasn't provided, simply use the global * settings. */ if (!flags) flags |= mgmt_get_adv_discov_flags(hdev); /* If flags would still be empty, then there is no need to * include the "Flags" AD field". */ if (flags) { ptr[0] = 0x02; ptr[1] = EIR_FLAGS; ptr[2] = flags; ad_len += 3; ptr += 3; } } skip_flags: if (adv_instance) { memcpy(ptr, adv_instance->adv_data, adv_instance->adv_data_len); ad_len += adv_instance->adv_data_len; ptr += adv_instance->adv_data_len; } if (instance_flags & MGMT_ADV_FLAG_TX_POWER) { s8 adv_tx_power; if (ext_adv_capable(hdev)) { if (adv_instance) adv_tx_power = adv_instance->tx_power; else adv_tx_power = hdev->adv_tx_power; } else { adv_tx_power = hdev->adv_tx_power; } /* Provide Tx Power only if we can provide a valid value for it */ if (adv_tx_power != HCI_TX_POWER_INVALID) { ptr[0] = 0x02; ptr[1] = EIR_TX_POWER; ptr[2] = (u8)adv_tx_power; ad_len += 3; ptr += 3; } } return ad_len; } void __hci_req_update_adv_data(struct hci_request *req, u8 instance) { struct hci_dev *hdev = req->hdev; u8 len; if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return; if (ext_adv_capable(hdev)) { struct hci_cp_le_set_ext_adv_data cp; memset(&cp, 0, sizeof(cp)); len = create_instance_adv_data(hdev, instance, cp.data); /* There's nothing to do if the data hasn't changed */ if (hdev->adv_data_len == len && memcmp(cp.data, hdev->adv_data, len) == 0) return; memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); hdev->adv_data_len = len; cp.length = len; cp.handle = instance; cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp); } else { struct hci_cp_le_set_adv_data cp; memset(&cp, 0, sizeof(cp)); len = create_instance_adv_data(hdev, instance, cp.data); /* There's nothing to do if the data hasn't changed */ if (hdev->adv_data_len == len && memcmp(cp.data, hdev->adv_data, len) == 0) return; memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); hdev->adv_data_len = len; cp.length = len; hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp); } } int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance) { struct hci_request req; hci_req_init(&req, hdev); __hci_req_update_adv_data(&req, instance); return hci_req_run(&req, NULL); } static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status, u16 opcode) { BT_DBG("%s status %u", hdev->name, status); } void hci_req_disable_address_resolution(struct hci_dev *hdev) { struct hci_request req; __u8 enable = 0x00; if (!use_ll_privacy(hdev) && !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) return; hci_req_init(&req, hdev); hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); hci_req_run(&req, enable_addr_resolution_complete); } static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode) { bt_dev_dbg(hdev, "status %u", status); } void hci_req_reenable_advertising(struct hci_dev *hdev) { struct hci_request req; if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && list_empty(&hdev->adv_instances)) return; hci_req_init(&req, hdev); if (hdev->cur_adv_instance) { __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance, true); } else { if (ext_adv_capable(hdev)) { __hci_req_start_ext_adv(&req, 0x00); } else { __hci_req_update_adv_data(&req, 0x00); __hci_req_update_scan_rsp_data(&req, 0x00); __hci_req_enable_advertising(&req); } } hci_req_run(&req, adv_enable_complete); } static void adv_timeout_expire(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, adv_instance_expire.work); struct hci_request req; u8 instance; bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); hdev->adv_instance_timeout = 0; instance = hdev->cur_adv_instance; if (instance == 0x00) goto unlock; hci_req_init(&req, hdev); hci_req_clear_adv_instance(hdev, NULL, &req, instance, false); if (list_empty(&hdev->adv_instances)) __hci_req_disable_advertising(&req); hci_req_run(&req, NULL); unlock: hci_dev_unlock(hdev); } static int hci_req_add_le_interleaved_scan(struct hci_request *req, unsigned long opt) { struct hci_dev *hdev = req->hdev; int ret = 0; hci_dev_lock(hdev); if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) hci_req_add_le_scan_disable(req, false); hci_req_add_le_passive_scan(req); switch (hdev->interleave_scan_state) { case INTERLEAVE_SCAN_ALLOWLIST: bt_dev_dbg(hdev, "next state: allowlist"); hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; break; case INTERLEAVE_SCAN_NO_FILTER: bt_dev_dbg(hdev, "next state: no filter"); hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST; break; case INTERLEAVE_SCAN_NONE: BT_ERR("unexpected error"); ret = -1; } hci_dev_unlock(hdev); return ret; } static void interleave_scan_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, interleave_scan.work); u8 status; unsigned long timeout; if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) { timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration); } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) { timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration); } else { bt_dev_err(hdev, "unexpected error"); return; } hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0, HCI_CMD_TIMEOUT, &status); /* Don't continue interleaving if it was canceled */ if (is_interleave_scanning(hdev)) queue_delayed_work(hdev->req_workqueue, &hdev->interleave_scan, timeout); } int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, bool use_rpa, struct adv_info *adv_instance, u8 *own_addr_type, bdaddr_t *rand_addr) { int err; bacpy(rand_addr, BDADDR_ANY); /* If privacy is enabled use a resolvable private address. If * current RPA has expired then generate a new one. */ if (use_rpa) { int to; /* If Controller supports LL Privacy use own address type is * 0x03 */ if (use_ll_privacy(hdev)) *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; else *own_addr_type = ADDR_LE_DEV_RANDOM; if (adv_instance) { if (!adv_instance->rpa_expired && !bacmp(&adv_instance->random_addr, &hdev->rpa)) return 0; adv_instance->rpa_expired = false; } else { if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) && !bacmp(&hdev->random_addr, &hdev->rpa)) return 0; } err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); if (err < 0) { bt_dev_err(hdev, "failed to generate new RPA"); return err; } bacpy(rand_addr, &hdev->rpa); to = msecs_to_jiffies(hdev->rpa_timeout * 1000); if (adv_instance) queue_delayed_work(hdev->workqueue, &adv_instance->rpa_expired_cb, to); else queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to); return 0; } /* In case of required privacy without resolvable private address, * use an non-resolvable private address. This is useful for * non-connectable advertising. */ if (require_privacy) { bdaddr_t nrpa; while (true) { /* The non-resolvable private address is generated * from random six bytes with the two most significant * bits cleared. */ get_random_bytes(&nrpa, 6); nrpa.b[5] &= 0x3f; /* The non-resolvable private address shall not be * equal to the public address. */ if (bacmp(&hdev->bdaddr, &nrpa)) break; } *own_addr_type = ADDR_LE_DEV_RANDOM; bacpy(rand_addr, &nrpa); return 0; } /* No privacy so use a public address. */ *own_addr_type = ADDR_LE_DEV_PUBLIC; return 0; } void __hci_req_clear_ext_adv_sets(struct hci_request *req) { hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL); } int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) { struct hci_cp_le_set_ext_adv_params cp; struct hci_dev *hdev = req->hdev; bool connectable; u32 flags; bdaddr_t random_addr; u8 own_addr_type; int err; struct adv_info *adv_instance; bool secondary_adv; if (instance > 0) { adv_instance = hci_find_adv_instance(hdev, instance); if (!adv_instance) return -EINVAL; } else { adv_instance = NULL; } flags = get_adv_instance_flags(hdev, instance); /* If the "connectable" instance flag was not set, then choose between * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. */ connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || mgmt_get_connectable(hdev); if (!is_advertising_allowed(hdev, connectable)) return -EPERM; /* Set require_privacy to true only when non-connectable * advertising is used. In that case it is fine to use a * non-resolvable private address. */ err = hci_get_random_address(hdev, !connectable, adv_use_rpa(hdev, flags), adv_instance, &own_addr_type, &random_addr); if (err < 0) return err; memset(&cp, 0, sizeof(cp)); if (adv_instance) { hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval); hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval); cp.tx_power = adv_instance->tx_power; } else { hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval); hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval); cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE; } secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK); if (connectable) { if (secondary_adv) cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND); else cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND); } else if (adv_instance_is_scannable(hdev, instance)) { if (secondary_adv) cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND); else cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND); } else { if (secondary_adv) cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND); else cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND); } cp.own_addr_type = own_addr_type; cp.channel_map = hdev->le_adv_channel_map; cp.handle = instance; if (flags & MGMT_ADV_FLAG_SEC_2M) { cp.primary_phy = HCI_ADV_PHY_1M; cp.secondary_phy = HCI_ADV_PHY_2M; } else if (flags & MGMT_ADV_FLAG_SEC_CODED) { cp.primary_phy = HCI_ADV_PHY_CODED; cp.secondary_phy = HCI_ADV_PHY_CODED; } else { /* In all other cases use 1M */ cp.primary_phy = HCI_ADV_PHY_1M; cp.secondary_phy = HCI_ADV_PHY_1M; } hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp); if (own_addr_type == ADDR_LE_DEV_RANDOM && bacmp(&random_addr, BDADDR_ANY)) { struct hci_cp_le_set_adv_set_rand_addr cp; /* Check if random address need to be updated */ if (adv_instance) { if (!bacmp(&random_addr, &adv_instance->random_addr)) return 0; } else { if (!bacmp(&random_addr, &hdev->random_addr)) return 0; } memset(&cp, 0, sizeof(cp)); cp.handle = instance; bacpy(&cp.bdaddr, &random_addr); hci_req_add(req, HCI_OP_LE_SET_ADV_SET_RAND_ADDR, sizeof(cp), &cp); } return 0; } int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance) { struct hci_dev *hdev = req->hdev; struct hci_cp_le_set_ext_adv_enable *cp; struct hci_cp_ext_adv_set *adv_set; u8 data[sizeof(*cp) + sizeof(*adv_set) * 1]; struct adv_info *adv_instance; if (instance > 0) { adv_instance = hci_find_adv_instance(hdev, instance); if (!adv_instance) return -EINVAL; } else { adv_instance = NULL; } cp = (void *) data; adv_set = (void *) cp->data; memset(cp, 0, sizeof(*cp)); cp->enable = 0x01; cp->num_of_sets = 0x01; memset(adv_set, 0, sizeof(*adv_set)); adv_set->handle = instance; /* Set duration per instance since controller is responsible for * scheduling it. */ if (adv_instance && adv_instance->duration) { u16 duration = adv_instance->timeout * MSEC_PER_SEC; /* Time = N * 10 ms */ adv_set->duration = cpu_to_le16(duration / 10); } hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets, data); return 0; } int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance) { struct hci_dev *hdev = req->hdev; struct hci_cp_le_set_ext_adv_enable *cp; struct hci_cp_ext_adv_set *adv_set; u8 data[sizeof(*cp) + sizeof(*adv_set) * 1]; u8 req_size; /* If request specifies an instance that doesn't exist, fail */ if (instance > 0 && !hci_find_adv_instance(hdev, instance)) return -EINVAL; memset(data, 0, sizeof(data)); cp = (void *)data; adv_set = (void *)cp->data; /* Instance 0x00 indicates all advertising instances will be disabled */ cp->num_of_sets = !!instance; cp->enable = 0x00; adv_set->handle = instance; req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets; hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data); return 0; } int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance) { struct hci_dev *hdev = req->hdev; /* If request specifies an instance that doesn't exist, fail */ if (instance > 0 && !hci_find_adv_instance(hdev, instance)) return -EINVAL; hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance); return 0; } int __hci_req_start_ext_adv(struct hci_request *req, u8 instance) { struct hci_dev *hdev = req->hdev; struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance); int err; /* If instance isn't pending, the chip knows about it, and it's safe to * disable */ if (adv_instance && !adv_instance->pending) __hci_req_disable_ext_adv_instance(req, instance); err = __hci_req_setup_ext_adv_instance(req, instance); if (err < 0) return err; __hci_req_update_scan_rsp_data(req, instance); __hci_req_enable_ext_advertising(req, instance); return 0; } int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance, bool force) { struct hci_dev *hdev = req->hdev; struct adv_info *adv_instance = NULL; u16 timeout; if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || list_empty(&hdev->adv_instances)) return -EPERM; if (hdev->adv_instance_timeout) return -EBUSY; adv_instance = hci_find_adv_instance(hdev, instance); if (!adv_instance) return -ENOENT; /* A zero timeout means unlimited advertising. As long as there is * only one instance, duration should be ignored. We still set a timeout * in case further instances are being added later on. * * If the remaining lifetime of the instance is more than the duration * then the timeout corresponds to the duration, otherwise it will be * reduced to the remaining instance lifetime. */ if (adv_instance->timeout == 0 || adv_instance->duration <= adv_instance->remaining_time) timeout = adv_instance->duration; else timeout = adv_instance->remaining_time; /* The remaining time is being reduced unless the instance is being * advertised without time limit. */ if (adv_instance->timeout) adv_instance->remaining_time = adv_instance->remaining_time - timeout; /* Only use work for scheduling instances with legacy advertising */ if (!ext_adv_capable(hdev)) { hdev->adv_instance_timeout = timeout; queue_delayed_work(hdev->req_workqueue, &hdev->adv_instance_expire, msecs_to_jiffies(timeout * 1000)); } /* If we're just re-scheduling the same instance again then do not * execute any HCI commands. This happens when a single instance is * being advertised. */ if (!force && hdev->cur_adv_instance == instance && hci_dev_test_flag(hdev, HCI_LE_ADV)) return 0; hdev->cur_adv_instance = instance; if (ext_adv_capable(hdev)) { __hci_req_start_ext_adv(req, instance); } else { __hci_req_update_adv_data(req, instance); __hci_req_update_scan_rsp_data(req, instance); __hci_req_enable_advertising(req); } return 0; } /* For a single instance: * - force == true: The instance will be removed even when its remaining * lifetime is not zero. * - force == false: the instance will be deactivated but kept stored unless * the remaining lifetime is zero. * * For instance == 0x00: * - force == true: All instances will be removed regardless of their timeout * setting. * - force == false: Only instances that have a timeout will be removed. */ void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk, struct hci_request *req, u8 instance, bool force) { struct adv_info *adv_instance, *n, *next_instance = NULL; int err; u8 rem_inst; /* Cancel any timeout concerning the removed instance(s). */ if (!instance || hdev->cur_adv_instance == instance) cancel_adv_timeout(hdev); /* Get the next instance to advertise BEFORE we remove * the current one. This can be the same instance again * if there is only one instance. */ if (instance && hdev->cur_adv_instance == instance) next_instance = hci_get_next_instance(hdev, instance); if (instance == 0x00) { list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) { if (!(force || adv_instance->timeout)) continue; rem_inst = adv_instance->instance; err = hci_remove_adv_instance(hdev, rem_inst); if (!err) mgmt_advertising_removed(sk, hdev, rem_inst); } } else { adv_instance = hci_find_adv_instance(hdev, instance); if (force || (adv_instance && adv_instance->timeout && !adv_instance->remaining_time)) { /* Don't advertise a removed instance. */ if (next_instance && next_instance->instance == instance) next_instance = NULL; err = hci_remove_adv_instance(hdev, instance); if (!err) mgmt_advertising_removed(sk, hdev, instance); } } if (!req || !hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING)) return; if (next_instance && !ext_adv_capable(hdev)) __hci_req_schedule_adv_instance(req, next_instance->instance, false); } static void set_random_addr(struct hci_request *req, bdaddr_t *rpa) { struct hci_dev *hdev = req->hdev; /* If we're advertising or initiating an LE connection we can't * go ahead and change the random address at this time. This is * because the eventual initiator address used for the * subsequently created connection will be undefined (some * controllers use the new address and others the one we had * when the operation started). * * In this kind of scenario skip the update and let the random * address be updated at the next cycle. */ if (hci_dev_test_flag(hdev, HCI_LE_ADV) || hci_lookup_le_connect(hdev)) { bt_dev_dbg(hdev, "Deferring random address update"); hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); return; } hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa); } int hci_update_random_address(struct hci_request *req, bool require_privacy, bool use_rpa, u8 *own_addr_type) { struct hci_dev *hdev = req->hdev; int err; /* If privacy is enabled use a resolvable private address. If * current RPA has expired or there is something else than * the current RPA in use, then generate a new one. */ if (use_rpa) { int to; /* If Controller supports LL Privacy use own address type is * 0x03 */ if (use_ll_privacy(hdev)) *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; else *own_addr_type = ADDR_LE_DEV_RANDOM; if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) && !bacmp(&hdev->random_addr, &hdev->rpa)) return 0; err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); if (err < 0) { bt_dev_err(hdev, "failed to generate new RPA"); return err; } set_random_addr(req, &hdev->rpa); to = msecs_to_jiffies(hdev->rpa_timeout * 1000); queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to); return 0; } /* In case of required privacy without resolvable private address, * use an non-resolvable private address. This is useful for active * scanning and non-connectable advertising. */ if (require_privacy) { bdaddr_t nrpa; while (true) { /* The non-resolvable private address is generated * from random six bytes with the two most significant * bits cleared. */ get_random_bytes(&nrpa, 6); nrpa.b[5] &= 0x3f; /* The non-resolvable private address shall not be * equal to the public address. */ if (bacmp(&hdev->bdaddr, &nrpa)) break; } *own_addr_type = ADDR_LE_DEV_RANDOM; set_random_addr(req, &nrpa); return 0; } /* If forcing static address is in use or there is no public * address use the static address as random address (but skip * the HCI command if the current random address is already the * static one. * * In case BR/EDR has been disabled on a dual-mode controller * and a static address has been configured, then use that * address instead of the public BR/EDR address. */ if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || !bacmp(&hdev->bdaddr, BDADDR_ANY) || (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && bacmp(&hdev->static_addr, BDADDR_ANY))) { *own_addr_type = ADDR_LE_DEV_RANDOM; if (bacmp(&hdev->static_addr, &hdev->random_addr)) hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &hdev->static_addr); return 0; } /* Neither privacy nor static address is being used so use a * public address. */ *own_addr_type = ADDR_LE_DEV_PUBLIC; return 0; } static bool disconnected_whitelist_entries(struct hci_dev *hdev) { struct bdaddr_list *b; list_for_each_entry(b, &hdev->whitelist, list) { struct hci_conn *conn; conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr); if (!conn) return true; if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) return true; } return false; } void __hci_req_update_scan(struct hci_request *req) { struct hci_dev *hdev = req->hdev; u8 scan; if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return; if (!hdev_is_powered(hdev)) return; if (mgmt_powering_down(hdev)) return; if (hdev->scanning_paused) return; if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) || disconnected_whitelist_entries(hdev)) scan = SCAN_PAGE; else scan = SCAN_DISABLED; if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) scan |= SCAN_INQUIRY; if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) && test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY)) return; hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); } static int update_scan(struct hci_request *req, unsigned long opt) { hci_dev_lock(req->hdev); __hci_req_update_scan(req); hci_dev_unlock(req->hdev); return 0; } static void scan_update_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update); hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL); } static int connectable_update(struct hci_request *req, unsigned long opt) { struct hci_dev *hdev = req->hdev; hci_dev_lock(hdev); __hci_req_update_scan(req); /* If BR/EDR is not enabled and we disable advertising as a * by-product of disabling connectable, we need to update the * advertising flags. */ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) __hci_req_update_adv_data(req, hdev->cur_adv_instance); /* Update the advertising parameters if necessary */ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !list_empty(&hdev->adv_instances)) { if (ext_adv_capable(hdev)) __hci_req_start_ext_adv(req, hdev->cur_adv_instance); else __hci_req_enable_advertising(req); } __hci_update_background_scan(req); hci_dev_unlock(hdev); return 0; } static void connectable_update_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, connectable_update); u8 status; hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status); mgmt_set_connectable_complete(hdev, status); } static u8 get_service_classes(struct hci_dev *hdev) { struct bt_uuid *uuid; u8 val = 0; list_for_each_entry(uuid, &hdev->uuids, list) val |= uuid->svc_hint; return val; } void __hci_req_update_class(struct hci_request *req) { struct hci_dev *hdev = req->hdev; u8 cod[3]; bt_dev_dbg(hdev, ""); if (!hdev_is_powered(hdev)) return; if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return; if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) return; cod[0] = hdev->minor_class; cod[1] = hdev->major_class; cod[2] = get_service_classes(hdev); if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) cod[1] |= 0x20; if (memcmp(cod, hdev->dev_class, 3) == 0) return; hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod); } static void write_iac(struct hci_request *req) { struct hci_dev *hdev = req->hdev; struct hci_cp_write_current_iac_lap cp; if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) return; if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { /* Limited discoverable mode */ cp.num_iac = min_t(u8, hdev->num_iac, 2); cp.iac_lap[0] = 0x00; /* LIAC */ cp.iac_lap[1] = 0x8b; cp.iac_lap[2] = 0x9e; cp.iac_lap[3] = 0x33; /* GIAC */ cp.iac_lap[4] = 0x8b; cp.iac_lap[5] = 0x9e; } else { /* General discoverable mode */ cp.num_iac = 1; cp.iac_lap[0] = 0x33; /* GIAC */ cp.iac_lap[1] = 0x8b; cp.iac_lap[2] = 0x9e; } hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP, (cp.num_iac * 3) + 1, &cp); } static int discoverable_update(struct hci_request *req, unsigned long opt) { struct hci_dev *hdev = req->hdev; hci_dev_lock(hdev); if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { write_iac(req); __hci_req_update_scan(req); __hci_req_update_class(req); } /* Advertising instances don't use the global discoverable setting, so * only update AD if advertising was enabled using Set Advertising. */ if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) { __hci_req_update_adv_data(req, 0x00); /* Discoverable mode affects the local advertising * address in limited privacy mode. */ if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) { if (ext_adv_capable(hdev)) __hci_req_start_ext_adv(req, 0x00); else __hci_req_enable_advertising(req); } } hci_dev_unlock(hdev); return 0; } static void discoverable_update_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, discoverable_update); u8 status; hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status); mgmt_set_discoverable_complete(hdev, status); } void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn, u8 reason) { switch (conn->state) { case BT_CONNECTED: case BT_CONFIG: if (conn->type == AMP_LINK) { struct hci_cp_disconn_phy_link cp; cp.phy_handle = HCI_PHY_HANDLE(conn->handle); cp.reason = reason; hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp), &cp); } else { struct hci_cp_disconnect dc; dc.handle = cpu_to_le16(conn->handle); dc.reason = reason; hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc); } conn->state = BT_DISCONN; break; case BT_CONNECT: if (conn->type == LE_LINK) { if (test_bit(HCI_CONN_SCANNING, &conn->flags)) break; hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL); } else if (conn->type == ACL_LINK) { if (req->hdev->hci_ver < BLUETOOTH_VER_1_2) break; hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL, 6, &conn->dst); } break; case BT_CONNECT2: if (conn->type == ACL_LINK) { struct hci_cp_reject_conn_req rej; bacpy(&rej.bdaddr, &conn->dst); rej.reason = reason; hci_req_add(req, HCI_OP_REJECT_CONN_REQ, sizeof(rej), &rej); } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) { struct hci_cp_reject_sync_conn_req rej; bacpy(&rej.bdaddr, &conn->dst); /* SCO rejection has its own limited set of * allowed error values (0x0D-0x0F) which isn't * compatible with most values passed to this * function. To be safe hard-code one of the * values that's suitable for SCO. */ rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES; hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(rej), &rej); } break; default: conn->state = BT_CLOSED; break; } } static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode) { if (status) bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status); } int hci_abort_conn(struct hci_conn *conn, u8 reason) { struct hci_request req; int err; hci_req_init(&req, conn->hdev); __hci_abort_conn(&req, conn, reason); err = hci_req_run(&req, abort_conn_complete); if (err && err != -ENODATA) { bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err); return err; } return 0; } static int update_bg_scan(struct hci_request *req, unsigned long opt) { hci_dev_lock(req->hdev); __hci_update_background_scan(req); hci_dev_unlock(req->hdev); return 0; } static void bg_scan_update(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, bg_scan_update); struct hci_conn *conn; u8 status; int err; err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status); if (!err) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); if (conn) hci_le_conn_failed(conn, status); hci_dev_unlock(hdev); } static int le_scan_disable(struct hci_request *req, unsigned long opt) { hci_req_add_le_scan_disable(req, false); return 0; } static int bredr_inquiry(struct hci_request *req, unsigned long opt) { u8 length = opt; const u8 giac[3] = { 0x33, 0x8b, 0x9e }; const u8 liac[3] = { 0x00, 0x8b, 0x9e }; struct hci_cp_inquiry cp; bt_dev_dbg(req->hdev, ""); hci_dev_lock(req->hdev); hci_inquiry_cache_flush(req->hdev); hci_dev_unlock(req->hdev); memset(&cp, 0, sizeof(cp)); if (req->hdev->discovery.limited) memcpy(&cp.lap, liac, sizeof(cp.lap)); else memcpy(&cp.lap, giac, sizeof(cp.lap)); cp.length = length; hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); return 0; } static void le_scan_disable_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan_disable.work); u8 status; bt_dev_dbg(hdev, ""); if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) return; cancel_delayed_work(&hdev->le_scan_restart); hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status); if (status) { bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x", status); return; } hdev->discovery.scan_start = 0; /* If we were running LE only scan, change discovery state. If * we were running both LE and BR/EDR inquiry simultaneously, * and BR/EDR inquiry is already finished, stop discovery, * otherwise BR/EDR inquiry will stop discovery when finished. * If we will resolve remote device name, do not change * discovery state. */ if (hdev->discovery.type == DISCOV_TYPE_LE) goto discov_stopped; if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED) return; if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) { if (!test_bit(HCI_INQUIRY, &hdev->flags) && hdev->discovery.state != DISCOVERY_RESOLVING) goto discov_stopped; return; } hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN, HCI_CMD_TIMEOUT, &status); if (status) { bt_dev_err(hdev, "inquiry failed: status 0x%02x", status); goto discov_stopped; } return; discov_stopped: hci_dev_lock(hdev); hci_discovery_set_state(hdev, DISCOVERY_STOPPED); hci_dev_unlock(hdev); } static int le_scan_restart(struct hci_request *req, unsigned long opt) { struct hci_dev *hdev = req->hdev; /* If controller is not scanning we are done. */ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) return 0; if (hdev->scanning_paused) { bt_dev_dbg(hdev, "Scanning is paused for suspend"); return 0; } hci_req_add_le_scan_disable(req, false); if (use_ext_scan(hdev)) { struct hci_cp_le_set_ext_scan_enable ext_enable_cp; memset(&ext_enable_cp, 0, sizeof(ext_enable_cp)); ext_enable_cp.enable = LE_SCAN_ENABLE; ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(ext_enable_cp), &ext_enable_cp); } else { struct hci_cp_le_set_scan_enable cp; memset(&cp, 0, sizeof(cp)); cp.enable = LE_SCAN_ENABLE; cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); } return 0; } static void le_scan_restart_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan_restart.work); unsigned long timeout, duration, scan_start, now; u8 status; bt_dev_dbg(hdev, ""); hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status); if (status) { bt_dev_err(hdev, "failed to restart LE scan: status %d", status); return; } hci_dev_lock(hdev); if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) || !hdev->discovery.scan_start) goto unlock; /* When the scan was started, hdev->le_scan_disable has been queued * after duration from scan_start. During scan restart this job * has been canceled, and we need to queue it again after proper * timeout, to make sure that scan does not run indefinitely. */ duration = hdev->discovery.scan_duration; scan_start = hdev->discovery.scan_start; now = jiffies; if (now - scan_start <= duration) { int elapsed; if (now >= scan_start) elapsed = now - scan_start; else elapsed = ULONG_MAX - scan_start + now; timeout = duration - elapsed; } else { timeout = 0; } queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable, timeout); unlock: hci_dev_unlock(hdev); } static int active_scan(struct hci_request *req, unsigned long opt) { uint16_t interval = opt; struct hci_dev *hdev = req->hdev; u8 own_addr_type; /* White list is not used for discovery */ u8 filter_policy = 0x00; /* Discovery doesn't require controller address resolution */ bool addr_resolv = false; int err; bt_dev_dbg(hdev, ""); /* If controller is scanning, it means the background scanning is * running. Thus, we should temporarily stop it in order to set the * discovery scanning parameters. */ if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { hci_req_add_le_scan_disable(req, false); cancel_interleave_scan(hdev); } /* All active scans will be done with either a resolvable private * address (when privacy feature has been enabled) or non-resolvable * private address. */ err = hci_update_random_address(req, true, scan_use_rpa(hdev), &own_addr_type); if (err < 0) own_addr_type = ADDR_LE_DEV_PUBLIC; hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, hdev->le_scan_window_discovery, own_addr_type, filter_policy, addr_resolv); return 0; } static int interleaved_discov(struct hci_request *req, unsigned long opt) { int err; bt_dev_dbg(req->hdev, ""); err = active_scan(req, opt); if (err) return err; return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN); } static void start_discovery(struct hci_dev *hdev, u8 *status) { unsigned long timeout; bt_dev_dbg(hdev, "type %u", hdev->discovery.type); switch (hdev->discovery.type) { case DISCOV_TYPE_BREDR: if (!hci_dev_test_flag(hdev, HCI_INQUIRY)) hci_req_sync(hdev, bredr_inquiry, DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT, status); return; case DISCOV_TYPE_INTERLEAVED: /* When running simultaneous discovery, the LE scanning time * should occupy the whole discovery time sine BR/EDR inquiry * and LE scanning are scheduled by the controller. * * For interleaving discovery in comparison, BR/EDR inquiry * and LE scanning are done sequentially with separate * timeouts. */ if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) { timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); /* During simultaneous discovery, we double LE scan * interval. We must leave some time for the controller * to do BR/EDR inquiry. */ hci_req_sync(hdev, interleaved_discov, hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT, status); break; } timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout); hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery, HCI_CMD_TIMEOUT, status); break; case DISCOV_TYPE_LE: timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery, HCI_CMD_TIMEOUT, status); break; default: *status = HCI_ERROR_UNSPECIFIED; return; } if (*status) return; bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout)); /* When service discovery is used and the controller has a * strict duplicate filter, it is important to remember the * start and duration of the scan. This is required for * restarting scanning during the discovery phase. */ if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) && hdev->discovery.result_filtering) { hdev->discovery.scan_start = jiffies; hdev->discovery.scan_duration = timeout; } queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable, timeout); } bool hci_req_stop_discovery(struct hci_request *req) { struct hci_dev *hdev = req->hdev; struct discovery_state *d = &hdev->discovery; struct hci_cp_remote_name_req_cancel cp; struct inquiry_entry *e; bool ret = false; bt_dev_dbg(hdev, "state %u", hdev->discovery.state); if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) { if (test_bit(HCI_INQUIRY, &hdev->flags)) hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL); if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { cancel_delayed_work(&hdev->le_scan_disable); hci_req_add_le_scan_disable(req, false); } ret = true; } else { /* Passive scanning */ if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { hci_req_add_le_scan_disable(req, false); ret = true; } } /* No further actions needed for LE-only discovery */ if (d->type == DISCOV_TYPE_LE) return ret; if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) { e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_PENDING); if (!e) return ret; bacpy(&cp.bdaddr, &e->data.bdaddr); hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp), &cp); ret = true; } return ret; } static int stop_discovery(struct hci_request *req, unsigned long opt) { hci_dev_lock(req->hdev); hci_req_stop_discovery(req); hci_dev_unlock(req->hdev); return 0; } static void discov_update(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, discov_update); u8 status = 0; switch (hdev->discovery.state) { case DISCOVERY_STARTING: start_discovery(hdev, &status); mgmt_start_discovery_complete(hdev, status); if (status) hci_discovery_set_state(hdev, DISCOVERY_STOPPED); else hci_discovery_set_state(hdev, DISCOVERY_FINDING); break; case DISCOVERY_STOPPING: hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status); mgmt_stop_discovery_complete(hdev, status); if (!status) hci_discovery_set_state(hdev, DISCOVERY_STOPPED); break; case DISCOVERY_STOPPED: default: return; } } static void discov_off(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, discov_off.work); bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); /* When discoverable timeout triggers, then just make sure * the limited discoverable flag is cleared. Even in the case * of a timeout triggered from general discoverable, it is * safe to unconditionally clear the flag. */ hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); hdev->discov_timeout = 0; hci_dev_unlock(hdev); hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL); mgmt_new_settings(hdev); } static int powered_update_hci(struct hci_request *req, unsigned long opt) { struct hci_dev *hdev = req->hdev; u8 link_sec; hci_dev_lock(hdev); if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) && !lmp_host_ssp_capable(hdev)) { u8 mode = 0x01; hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode); if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) { u8 support = 0x01; hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT, sizeof(support), &support); } } if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) && lmp_bredr_capable(hdev)) { struct hci_cp_write_le_host_supported cp; cp.le = 0x01; cp.simul = 0x00; /* Check first if we already have the right * host state (host features set) */ if (cp.le != lmp_host_le_capable(hdev) || cp.simul != lmp_host_le_br_capable(hdev)) hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp); } if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { /* Make sure the controller has a good default for * advertising data. This also applies to the case * where BR/EDR was toggled during the AUTO_OFF phase. */ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || list_empty(&hdev->adv_instances)) { int err; if (ext_adv_capable(hdev)) { err = __hci_req_setup_ext_adv_instance(req, 0x00); if (!err) __hci_req_update_scan_rsp_data(req, 0x00); } else { err = 0; __hci_req_update_adv_data(req, 0x00); __hci_req_update_scan_rsp_data(req, 0x00); } if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) { if (!ext_adv_capable(hdev)) __hci_req_enable_advertising(req); else if (!err) __hci_req_enable_ext_advertising(req, 0x00); } } else if (!list_empty(&hdev->adv_instances)) { struct adv_info *adv_instance; adv_instance = list_first_entry(&hdev->adv_instances, struct adv_info, list); __hci_req_schedule_adv_instance(req, adv_instance->instance, true); } } link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY); if (link_sec != test_bit(HCI_AUTH, &hdev->flags)) hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(link_sec), &link_sec); if (lmp_bredr_capable(hdev)) { if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) __hci_req_write_fast_connectable(req, true); else __hci_req_write_fast_connectable(req, false); __hci_req_update_scan(req); __hci_req_update_class(req); __hci_req_update_name(req); __hci_req_update_eir(req); } hci_dev_unlock(hdev); return 0; } int __hci_req_hci_power_on(struct hci_dev *hdev) { /* Register the available SMP channels (BR/EDR and LE) only when * successfully powering on the controller. This late * registration is required so that LE SMP can clearly decide if * the public address or static address is used. */ smp_register(hdev); return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT, NULL); } void hci_request_setup(struct hci_dev *hdev) { INIT_WORK(&hdev->discov_update, discov_update); INIT_WORK(&hdev->bg_scan_update, bg_scan_update); INIT_WORK(&hdev->scan_update, scan_update_work); INIT_WORK(&hdev->connectable_update, connectable_update_work); INIT_WORK(&hdev->discoverable_update, discoverable_update_work); INIT_DELAYED_WORK(&hdev->discov_off, discov_off); INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work); INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work); INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire); INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work); } void hci_request_cancel_all(struct hci_dev *hdev) { hci_req_sync_cancel(hdev, ENODEV); cancel_work_sync(&hdev->discov_update); cancel_work_sync(&hdev->bg_scan_update); cancel_work_sync(&hdev->scan_update); cancel_work_sync(&hdev->connectable_update); cancel_work_sync(&hdev->discoverable_update); cancel_delayed_work_sync(&hdev->discov_off); cancel_delayed_work_sync(&hdev->le_scan_disable); cancel_delayed_work_sync(&hdev->le_scan_restart); if (hdev->adv_instance_timeout) { cancel_delayed_work_sync(&hdev->adv_instance_expire); hdev->adv_instance_timeout = 0; } cancel_interleave_scan(hdev); }
/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2014 Intel Corporation This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #include <linux/sched/signal.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/mgmt.h> #include "smp.h" #include "hci_request.h" #include "msft.h" #define HCI_REQ_DONE 0 #define HCI_REQ_PEND 1 #define HCI_REQ_CANCELED 2 void hci_req_init(struct hci_request *req, struct hci_dev *hdev) { skb_queue_head_init(&req->cmd_q); req->hdev = hdev; req->err = 0; } void hci_req_purge(struct hci_request *req) { skb_queue_purge(&req->cmd_q); } bool hci_req_status_pend(struct hci_dev *hdev) { return hdev->req_status == HCI_REQ_PEND; } static int req_run(struct hci_request *req, hci_req_complete_t complete, hci_req_complete_skb_t complete_skb) { struct hci_dev *hdev = req->hdev; struct sk_buff *skb; unsigned long flags; bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q)); /* If an error occurred during request building, remove all HCI * commands queued on the HCI request queue. */ if (req->err) { skb_queue_purge(&req->cmd_q); return req->err; } /* Do not allow empty requests */ if (skb_queue_empty(&req->cmd_q)) return -ENODATA; skb = skb_peek_tail(&req->cmd_q); if (complete) { bt_cb(skb)->hci.req_complete = complete; } else if (complete_skb) { bt_cb(skb)->hci.req_complete_skb = complete_skb; bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB; } spin_lock_irqsave(&hdev->cmd_q.lock, flags); skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); queue_work(hdev->workqueue, &hdev->cmd_work); return 0; } int hci_req_run(struct hci_request *req, hci_req_complete_t complete) { return req_run(req, complete, NULL); } int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete) { return req_run(req, NULL, complete); } static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, struct sk_buff *skb) { bt_dev_dbg(hdev, "result 0x%2.2x", result); if (hdev->req_status == HCI_REQ_PEND) { hdev->req_result = result; hdev->req_status = HCI_REQ_DONE; if (skb) hdev->req_skb = skb_get(skb); wake_up_interruptible(&hdev->req_wait_q); } } void hci_req_sync_cancel(struct hci_dev *hdev, int err) { bt_dev_dbg(hdev, "err 0x%2.2x", err); if (hdev->req_status == HCI_REQ_PEND) { hdev->req_result = err; hdev->req_status = HCI_REQ_CANCELED; wake_up_interruptible(&hdev->req_wait_q); } } struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u8 event, u32 timeout) { struct hci_request req; struct sk_buff *skb; int err = 0; bt_dev_dbg(hdev, ""); hci_req_init(&req, hdev); hci_req_add_ev(&req, opcode, plen, param, event); hdev->req_status = HCI_REQ_PEND; err = hci_req_run_skb(&req, hci_req_sync_complete); if (err < 0) return ERR_PTR(err); err = wait_event_interruptible_timeout(hdev->req_wait_q, hdev->req_status != HCI_REQ_PEND, timeout); if (err == -ERESTARTSYS) return ERR_PTR(-EINTR); switch (hdev->req_status) { case HCI_REQ_DONE: err = -bt_to_errno(hdev->req_result); break; case HCI_REQ_CANCELED: err = -hdev->req_result; break; default: err = -ETIMEDOUT; break; } hdev->req_status = hdev->req_result = 0; skb = hdev->req_skb; hdev->req_skb = NULL; bt_dev_dbg(hdev, "end: err %d", err); if (err < 0) { kfree_skb(skb); return ERR_PTR(err); } if (!skb) return ERR_PTR(-ENODATA); return skb; } EXPORT_SYMBOL(__hci_cmd_sync_ev); struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u32 timeout) { return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout); } EXPORT_SYMBOL(__hci_cmd_sync); /* Execute request and wait for completion. */ int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req, unsigned long opt), unsigned long opt, u32 timeout, u8 *hci_status) { struct hci_request req; int err = 0; bt_dev_dbg(hdev, "start"); hci_req_init(&req, hdev); hdev->req_status = HCI_REQ_PEND; err = func(&req, opt); if (err) { if (hci_status) *hci_status = HCI_ERROR_UNSPECIFIED; return err; } err = hci_req_run_skb(&req, hci_req_sync_complete); if (err < 0) { hdev->req_status = 0; /* ENODATA means the HCI request command queue is empty. * This can happen when a request with conditionals doesn't * trigger any commands to be sent. This is normal behavior * and should not trigger an error return. */ if (err == -ENODATA) { if (hci_status) *hci_status = 0; return 0; } if (hci_status) *hci_status = HCI_ERROR_UNSPECIFIED; return err; } err = wait_event_interruptible_timeout(hdev->req_wait_q, hdev->req_status != HCI_REQ_PEND, timeout); if (err == -ERESTARTSYS) return -EINTR; switch (hdev->req_status) { case HCI_REQ_DONE: err = -bt_to_errno(hdev->req_result); if (hci_status) *hci_status = hdev->req_result; break; case HCI_REQ_CANCELED: err = -hdev->req_result; if (hci_status) *hci_status = HCI_ERROR_UNSPECIFIED; break; default: err = -ETIMEDOUT; if (hci_status) *hci_status = HCI_ERROR_UNSPECIFIED; break; } kfree_skb(hdev->req_skb); hdev->req_skb = NULL; hdev->req_status = hdev->req_result = 0; bt_dev_dbg(hdev, "end: err %d", err); return err; } int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req, unsigned long opt), unsigned long opt, u32 timeout, u8 *hci_status) { int ret; /* Serialize all requests */ hci_req_sync_lock(hdev); /* check the state after obtaing the lock to protect the HCI_UP * against any races from hci_dev_do_close when the controller * gets removed. */ if (test_bit(HCI_UP, &hdev->flags)) ret = __hci_req_sync(hdev, req, opt, timeout, hci_status); else ret = -ENETDOWN; hci_req_sync_unlock(hdev); return ret; } struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param) { int len = HCI_COMMAND_HDR_SIZE + plen; struct hci_command_hdr *hdr; struct sk_buff *skb; skb = bt_skb_alloc(len, GFP_ATOMIC); if (!skb) return NULL; hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE); hdr->opcode = cpu_to_le16(opcode); hdr->plen = plen; if (plen) skb_put_data(skb, param, plen); bt_dev_dbg(hdev, "skb len %d", skb->len); hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; hci_skb_opcode(skb) = opcode; return skb; } /* Queue a command to an asynchronous HCI request */ void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, const void *param, u8 event) { struct hci_dev *hdev = req->hdev; struct sk_buff *skb; bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); /* If an error occurred during request building, there is no point in * queueing the HCI command. We can simply return. */ if (req->err) return; skb = hci_prepare_cmd(hdev, opcode, plen, param); if (!skb) { bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", opcode); req->err = -ENOMEM; return; } if (skb_queue_empty(&req->cmd_q)) bt_cb(skb)->hci.req_flags |= HCI_REQ_START; bt_cb(skb)->hci.req_event = event; skb_queue_tail(&req->cmd_q, skb); } void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, const void *param) { hci_req_add_ev(req, opcode, plen, param, 0); } void __hci_req_write_fast_connectable(struct hci_request *req, bool enable) { struct hci_dev *hdev = req->hdev; struct hci_cp_write_page_scan_activity acp; u8 type; if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return; if (hdev->hci_ver < BLUETOOTH_VER_1_2) return; if (enable) { type = PAGE_SCAN_TYPE_INTERLACED; /* 160 msec page scan interval */ acp.interval = cpu_to_le16(0x0100); } else { type = hdev->def_page_scan_type; acp.interval = cpu_to_le16(hdev->def_page_scan_int); } acp.window = cpu_to_le16(hdev->def_page_scan_window); if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval || __cpu_to_le16(hdev->page_scan_window) != acp.window) hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(acp), &acp); if (hdev->page_scan_type != type) hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type); } static void start_interleave_scan(struct hci_dev *hdev) { hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; queue_delayed_work(hdev->req_workqueue, &hdev->interleave_scan, 0); } static bool is_interleave_scanning(struct hci_dev *hdev) { return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE; } static void cancel_interleave_scan(struct hci_dev *hdev) { bt_dev_dbg(hdev, "cancelling interleave scan"); cancel_delayed_work_sync(&hdev->interleave_scan); hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE; } /* Return true if interleave_scan wasn't started until exiting this function, * otherwise, return false */ static bool __hci_update_interleaved_scan(struct hci_dev *hdev) { /* Do interleaved scan only if all of the following are true: * - There is at least one ADV monitor * - At least one pending LE connection or one device to be scanned for * - Monitor offloading is not supported * If so, we should alternate between allowlist scan and one without * any filters to save power. */ bool use_interleaving = hci_is_adv_monitoring(hdev) && !(list_empty(&hdev->pend_le_conns) && list_empty(&hdev->pend_le_reports)) && hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE; bool is_interleaving = is_interleave_scanning(hdev); if (use_interleaving && !is_interleaving) { start_interleave_scan(hdev); bt_dev_dbg(hdev, "starting interleave scan"); return true; } if (!use_interleaving && is_interleaving) cancel_interleave_scan(hdev); return false; } /* This function controls the background scanning based on hdev->pend_le_conns * list. If there are pending LE connection we start the background scanning, * otherwise we stop it. * * This function requires the caller holds hdev->lock. */ static void __hci_update_background_scan(struct hci_request *req) { struct hci_dev *hdev = req->hdev; if (!test_bit(HCI_UP, &hdev->flags) || test_bit(HCI_INIT, &hdev->flags) || hci_dev_test_flag(hdev, HCI_SETUP) || hci_dev_test_flag(hdev, HCI_CONFIG) || hci_dev_test_flag(hdev, HCI_AUTO_OFF) || hci_dev_test_flag(hdev, HCI_UNREGISTER)) return; /* No point in doing scanning if LE support hasn't been enabled */ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return; /* If discovery is active don't interfere with it */ if (hdev->discovery.state != DISCOVERY_STOPPED) return; /* Reset RSSI and UUID filters when starting background scanning * since these filters are meant for service discovery only. * * The Start Discovery and Start Service Discovery operations * ensure to set proper values for RSSI threshold and UUID * filter list. So it is safe to just reset them here. */ hci_discovery_filter_clear(hdev); bt_dev_dbg(hdev, "ADV monitoring is %s", hci_is_adv_monitoring(hdev) ? "on" : "off"); if (list_empty(&hdev->pend_le_conns) && list_empty(&hdev->pend_le_reports) && !hci_is_adv_monitoring(hdev)) { /* If there is no pending LE connections or devices * to be scanned for or no ADV monitors, we should stop the * background scanning. */ /* If controller is not scanning we are done. */ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) return; hci_req_add_le_scan_disable(req, false); bt_dev_dbg(hdev, "stopping background scanning"); } else { /* If there is at least one pending LE connection, we should * keep the background scan running. */ /* If controller is connecting, we should not start scanning * since some controllers are not able to scan and connect at * the same time. */ if (hci_lookup_le_connect(hdev)) return; /* If controller is currently scanning, we stop it to ensure we * don't miss any advertising (due to duplicates filter). */ if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) hci_req_add_le_scan_disable(req, false); hci_req_add_le_passive_scan(req); bt_dev_dbg(hdev, "starting background scanning"); } } void __hci_req_update_name(struct hci_request *req) { struct hci_dev *hdev = req->hdev; struct hci_cp_write_local_name cp; memcpy(cp.name, hdev->dev_name, sizeof(cp.name)); hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp); } #define PNP_INFO_SVCLASS_ID 0x1200 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) { u8 *ptr = data, *uuids_start = NULL; struct bt_uuid *uuid; if (len < 4) return ptr; list_for_each_entry(uuid, &hdev->uuids, list) { u16 uuid16; if (uuid->size != 16) continue; uuid16 = get_unaligned_le16(&uuid->uuid[12]); if (uuid16 < 0x1100) continue; if (uuid16 == PNP_INFO_SVCLASS_ID) continue; if (!uuids_start) { uuids_start = ptr; uuids_start[0] = 1; uuids_start[1] = EIR_UUID16_ALL; ptr += 2; } /* Stop if not enough space to put next UUID */ if ((ptr - data) + sizeof(u16) > len) { uuids_start[1] = EIR_UUID16_SOME; break; } *ptr++ = (uuid16 & 0x00ff); *ptr++ = (uuid16 & 0xff00) >> 8; uuids_start[0] += sizeof(uuid16); } return ptr; } static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) { u8 *ptr = data, *uuids_start = NULL; struct bt_uuid *uuid; if (len < 6) return ptr; list_for_each_entry(uuid, &hdev->uuids, list) { if (uuid->size != 32) continue; if (!uuids_start) { uuids_start = ptr; uuids_start[0] = 1; uuids_start[1] = EIR_UUID32_ALL; ptr += 2; } /* Stop if not enough space to put next UUID */ if ((ptr - data) + sizeof(u32) > len) { uuids_start[1] = EIR_UUID32_SOME; break; } memcpy(ptr, &uuid->uuid[12], sizeof(u32)); ptr += sizeof(u32); uuids_start[0] += sizeof(u32); } return ptr; } static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) { u8 *ptr = data, *uuids_start = NULL; struct bt_uuid *uuid; if (len < 18) return ptr; list_for_each_entry(uuid, &hdev->uuids, list) { if (uuid->size != 128) continue; if (!uuids_start) { uuids_start = ptr; uuids_start[0] = 1; uuids_start[1] = EIR_UUID128_ALL; ptr += 2; } /* Stop if not enough space to put next UUID */ if ((ptr - data) + 16 > len) { uuids_start[1] = EIR_UUID128_SOME; break; } memcpy(ptr, uuid->uuid, 16); ptr += 16; uuids_start[0] += 16; } return ptr; } static void create_eir(struct hci_dev *hdev, u8 *data) { u8 *ptr = data; size_t name_len; name_len = strlen(hdev->dev_name); if (name_len > 0) { /* EIR Data type */ if (name_len > 48) { name_len = 48; ptr[1] = EIR_NAME_SHORT; } else ptr[1] = EIR_NAME_COMPLETE; /* EIR Data length */ ptr[0] = name_len + 1; memcpy(ptr + 2, hdev->dev_name, name_len); ptr += (name_len + 2); } if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) { ptr[0] = 2; ptr[1] = EIR_TX_POWER; ptr[2] = (u8) hdev->inq_tx_power; ptr += 3; } if (hdev->devid_source > 0) { ptr[0] = 9; ptr[1] = EIR_DEVICE_ID; put_unaligned_le16(hdev->devid_source, ptr + 2); put_unaligned_le16(hdev->devid_vendor, ptr + 4); put_unaligned_le16(hdev->devid_product, ptr + 6); put_unaligned_le16(hdev->devid_version, ptr + 8); ptr += 10; } ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); } void __hci_req_update_eir(struct hci_request *req) { struct hci_dev *hdev = req->hdev; struct hci_cp_write_eir cp; if (!hdev_is_powered(hdev)) return; if (!lmp_ext_inq_capable(hdev)) return; if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) return; if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) return; memset(&cp, 0, sizeof(cp)); create_eir(hdev, cp.data); if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) return; memcpy(hdev->eir, cp.data, sizeof(cp.data)); hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); } void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn) { struct hci_dev *hdev = req->hdev; if (hdev->scanning_paused) { bt_dev_dbg(hdev, "Scanning is paused for suspend"); return; } if (hdev->suspended) set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); if (use_ext_scan(hdev)) { struct hci_cp_le_set_ext_scan_enable cp; memset(&cp, 0, sizeof(cp)); cp.enable = LE_SCAN_DISABLE; hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp), &cp); } else { struct hci_cp_le_set_scan_enable cp; memset(&cp, 0, sizeof(cp)); cp.enable = LE_SCAN_DISABLE; hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); } /* Disable address resolution */ if (use_ll_privacy(hdev) && hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) { __u8 enable = 0x00; hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); } } static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr, u8 bdaddr_type) { struct hci_cp_le_del_from_white_list cp; cp.bdaddr_type = bdaddr_type; bacpy(&cp.bdaddr, bdaddr); bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr, cp.bdaddr_type); hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp); if (use_ll_privacy(req->hdev) && hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) { struct smp_irk *irk; irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type); if (irk) { struct hci_cp_le_del_from_resolv_list cp; cp.bdaddr_type = bdaddr_type; bacpy(&cp.bdaddr, bdaddr); hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST, sizeof(cp), &cp); } } } /* Adds connection to white list if needed. On error, returns -1. */ static int add_to_white_list(struct hci_request *req, struct hci_conn_params *params, u8 *num_entries, bool allow_rpa) { struct hci_cp_le_add_to_white_list cp; struct hci_dev *hdev = req->hdev; /* Already in white list */ if (hci_bdaddr_list_lookup(&hdev->le_white_list, &params->addr, params->addr_type)) return 0; /* Select filter policy to accept all advertising */ if (*num_entries >= hdev->le_white_list_size) return -1; /* White list can not be used with RPAs */ if (!allow_rpa && !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) { return -1; } /* During suspend, only wakeable devices can be in whitelist */ if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP, params->current_flags)) return 0; *num_entries += 1; cp.bdaddr_type = params->addr_type; bacpy(&cp.bdaddr, &params->addr); bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr, cp.bdaddr_type); hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp); if (use_ll_privacy(hdev) && hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) { struct smp_irk *irk; irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type); if (irk) { struct hci_cp_le_add_to_resolv_list cp; cp.bdaddr_type = params->addr_type; bacpy(&cp.bdaddr, &params->addr); memcpy(cp.peer_irk, irk->val, 16); if (hci_dev_test_flag(hdev, HCI_PRIVACY)) memcpy(cp.local_irk, hdev->irk, 16); else memset(cp.local_irk, 0, 16); hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST, sizeof(cp), &cp); } } return 0; } static u8 update_white_list(struct hci_request *req) { struct hci_dev *hdev = req->hdev; struct hci_conn_params *params; struct bdaddr_list *b; u8 num_entries = 0; bool pend_conn, pend_report; /* We allow whitelisting even with RPAs in suspend. In the worst case, * we won't be able to wake from devices that use the privacy1.2 * features. Additionally, once we support privacy1.2 and IRK * offloading, we can update this to also check for those conditions. */ bool allow_rpa = hdev->suspended; /* Go through the current white list programmed into the * controller one by one and check if that address is still * in the list of pending connections or list of devices to * report. If not present in either list, then queue the * command to remove it from the controller. */ list_for_each_entry(b, &hdev->le_white_list, list) { pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns, &b->bdaddr, b->bdaddr_type); pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports, &b->bdaddr, b->bdaddr_type); /* If the device is not likely to connect or report, * remove it from the whitelist. */ if (!pend_conn && !pend_report) { del_from_white_list(req, &b->bdaddr, b->bdaddr_type); continue; } /* White list can not be used with RPAs */ if (!allow_rpa && !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) { return 0x00; } num_entries++; } /* Since all no longer valid white list entries have been * removed, walk through the list of pending connections * and ensure that any new device gets programmed into * the controller. * * If the list of the devices is larger than the list of * available white list entries in the controller, then * just abort and return filer policy value to not use the * white list. */ list_for_each_entry(params, &hdev->pend_le_conns, action) { if (add_to_white_list(req, params, &num_entries, allow_rpa)) return 0x00; } /* After adding all new pending connections, walk through * the list of pending reports and also add these to the * white list if there is still space. Abort if space runs out. */ list_for_each_entry(params, &hdev->pend_le_reports, action) { if (add_to_white_list(req, params, &num_entries, allow_rpa)) return 0x00; } /* Use the allowlist unless the following conditions are all true: * - We are not currently suspending * - There are 1 or more ADV monitors registered and it's not offloaded * - Interleaved scanning is not currently using the allowlist */ if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended && hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE && hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST) return 0x00; /* Select filter policy to use white list */ return 0x01; } static bool scan_use_rpa(struct hci_dev *hdev) { return hci_dev_test_flag(hdev, HCI_PRIVACY); } static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval, u16 window, u8 own_addr_type, u8 filter_policy, bool addr_resolv) { struct hci_dev *hdev = req->hdev; if (hdev->scanning_paused) { bt_dev_dbg(hdev, "Scanning is paused for suspend"); return; } if (use_ll_privacy(hdev) && hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && addr_resolv) { u8 enable = 0x01; hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); } /* Use ext scanning if set ext scan param and ext scan enable is * supported */ if (use_ext_scan(hdev)) { struct hci_cp_le_set_ext_scan_params *ext_param_cp; struct hci_cp_le_set_ext_scan_enable ext_enable_cp; struct hci_cp_le_scan_phy_params *phy_params; u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2]; u32 plen; ext_param_cp = (void *)data; phy_params = (void *)ext_param_cp->data; memset(ext_param_cp, 0, sizeof(*ext_param_cp)); ext_param_cp->own_addr_type = own_addr_type; ext_param_cp->filter_policy = filter_policy; plen = sizeof(*ext_param_cp); if (scan_1m(hdev) || scan_2m(hdev)) { ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M; memset(phy_params, 0, sizeof(*phy_params)); phy_params->type = type; phy_params->interval = cpu_to_le16(interval); phy_params->window = cpu_to_le16(window); plen += sizeof(*phy_params); phy_params++; } if (scan_coded(hdev)) { ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED; memset(phy_params, 0, sizeof(*phy_params)); phy_params->type = type; phy_params->interval = cpu_to_le16(interval); phy_params->window = cpu_to_le16(window); plen += sizeof(*phy_params); phy_params++; } hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS, plen, ext_param_cp); memset(&ext_enable_cp, 0, sizeof(ext_enable_cp)); ext_enable_cp.enable = LE_SCAN_ENABLE; ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(ext_enable_cp), &ext_enable_cp); } else { struct hci_cp_le_set_scan_param param_cp; struct hci_cp_le_set_scan_enable enable_cp; memset(&param_cp, 0, sizeof(param_cp)); param_cp.type = type; param_cp.interval = cpu_to_le16(interval); param_cp.window = cpu_to_le16(window); param_cp.own_address_type = own_addr_type; param_cp.filter_policy = filter_policy; hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), &param_cp); memset(&enable_cp, 0, sizeof(enable_cp)); enable_cp.enable = LE_SCAN_ENABLE; enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), &enable_cp); } } /* Returns true if an le connection is in the scanning state */ static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type == LE_LINK && c->state == BT_CONNECT && test_bit(HCI_CONN_SCANNING, &c->flags)) { rcu_read_unlock(); return true; } } rcu_read_unlock(); return false; } /* Ensure to call hci_req_add_le_scan_disable() first to disable the * controller based address resolution to be able to reconfigure * resolving list. */ void hci_req_add_le_passive_scan(struct hci_request *req) { struct hci_dev *hdev = req->hdev; u8 own_addr_type; u8 filter_policy; u16 window, interval; /* Background scanning should run with address resolution */ bool addr_resolv = true; if (hdev->scanning_paused) { bt_dev_dbg(hdev, "Scanning is paused for suspend"); return; } /* Set require_privacy to false since no SCAN_REQ are send * during passive scanning. Not using an non-resolvable address * here is important so that peer devices using direct * advertising with our address will be correctly reported * by the controller. */ if (hci_update_random_address(req, false, scan_use_rpa(hdev), &own_addr_type)) return; if (hdev->enable_advmon_interleave_scan && __hci_update_interleaved_scan(hdev)) return; bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state); /* Adding or removing entries from the white list must * happen before enabling scanning. The controller does * not allow white list modification while scanning. */ filter_policy = update_white_list(req); /* When the controller is using random resolvable addresses and * with that having LE privacy enabled, then controllers with * Extended Scanner Filter Policies support can now enable support * for handling directed advertising. * * So instead of using filter polices 0x00 (no whitelist) * and 0x01 (whitelist enabled) use the new filter policies * 0x02 (no whitelist) and 0x03 (whitelist enabled). */ if (hci_dev_test_flag(hdev, HCI_PRIVACY) && (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) filter_policy |= 0x02; if (hdev->suspended) { window = hdev->le_scan_window_suspend; interval = hdev->le_scan_int_suspend; set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks); } else if (hci_is_le_conn_scanning(hdev)) { window = hdev->le_scan_window_connect; interval = hdev->le_scan_int_connect; } else if (hci_is_adv_monitoring(hdev)) { window = hdev->le_scan_window_adv_monitor; interval = hdev->le_scan_int_adv_monitor; } else { window = hdev->le_scan_window; interval = hdev->le_scan_interval; } bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy); hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window, own_addr_type, filter_policy, addr_resolv); } static bool adv_instance_is_scannable(struct hci_dev *hdev, u8 instance) { struct adv_info *adv_instance; /* Instance 0x00 always set local name */ if (instance == 0x00) return true; adv_instance = hci_find_adv_instance(hdev, instance); if (!adv_instance) return false; if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE || adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME) return true; return adv_instance->scan_rsp_len ? true : false; } static void hci_req_clear_event_filter(struct hci_request *req) { struct hci_cp_set_event_filter f; memset(&f, 0, sizeof(f)); f.flt_type = HCI_FLT_CLEAR_ALL; hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f); /* Update page scan state (since we may have modified it when setting * the event filter). */ __hci_req_update_scan(req); } static void hci_req_set_event_filter(struct hci_request *req) { struct bdaddr_list_with_flags *b; struct hci_cp_set_event_filter f; struct hci_dev *hdev = req->hdev; u8 scan = SCAN_DISABLED; /* Always clear event filter when starting */ hci_req_clear_event_filter(req); list_for_each_entry(b, &hdev->whitelist, list) { if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP, b->current_flags)) continue; memset(&f, 0, sizeof(f)); bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr); f.flt_type = HCI_FLT_CONN_SETUP; f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR; f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON; bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr); hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f); scan = SCAN_PAGE; } if (scan) set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks); else set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); } static void cancel_adv_timeout(struct hci_dev *hdev) { if (hdev->adv_instance_timeout) { hdev->adv_instance_timeout = 0; cancel_delayed_work(&hdev->adv_instance_expire); } } /* This function requires the caller holds hdev->lock */ void __hci_req_pause_adv_instances(struct hci_request *req) { bt_dev_dbg(req->hdev, "Pausing advertising instances"); /* Call to disable any advertisements active on the controller. * This will succeed even if no advertisements are configured. */ __hci_req_disable_advertising(req); /* If we are using software rotation, pause the loop */ if (!ext_adv_capable(req->hdev)) cancel_adv_timeout(req->hdev); } /* This function requires the caller holds hdev->lock */ static void __hci_req_resume_adv_instances(struct hci_request *req) { struct adv_info *adv; bt_dev_dbg(req->hdev, "Resuming advertising instances"); if (ext_adv_capable(req->hdev)) { /* Call for each tracked instance to be re-enabled */ list_for_each_entry(adv, &req->hdev->adv_instances, list) { __hci_req_enable_ext_advertising(req, adv->instance); } } else { /* Schedule for most recent instance to be restarted and begin * the software rotation loop */ __hci_req_schedule_adv_instance(req, req->hdev->cur_adv_instance, true); } } /* This function requires the caller holds hdev->lock */ int hci_req_resume_adv_instances(struct hci_dev *hdev) { struct hci_request req; hci_req_init(&req, hdev); __hci_req_resume_adv_instances(&req); return hci_req_run(&req, NULL); } static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode) { bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode, status); if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) || test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) { clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks); clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); wake_up(&hdev->suspend_wait_q); } if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) { clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks); wake_up(&hdev->suspend_wait_q); } } static void hci_req_add_set_adv_filter_enable(struct hci_request *req, bool enable) { struct hci_dev *hdev = req->hdev; switch (hci_get_adv_monitor_offload_ext(hdev)) { case HCI_ADV_MONITOR_EXT_MSFT: msft_req_add_set_filter_enable(req, enable); break; default: return; } /* No need to block when enabling since it's on resume path */ if (hdev->suspended && !enable) set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks); } /* Call with hci_dev_lock */ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next) { int old_state; struct hci_conn *conn; struct hci_request req; u8 page_scan; int disconnect_counter; if (next == hdev->suspend_state) { bt_dev_dbg(hdev, "Same state before and after: %d", next); goto done; } hdev->suspend_state = next; hci_req_init(&req, hdev); if (next == BT_SUSPEND_DISCONNECT) { /* Mark device as suspended */ hdev->suspended = true; /* Pause discovery if not already stopped */ old_state = hdev->discovery.state; if (old_state != DISCOVERY_STOPPED) { set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks); hci_discovery_set_state(hdev, DISCOVERY_STOPPING); queue_work(hdev->req_workqueue, &hdev->discov_update); } hdev->discovery_paused = true; hdev->discovery_old_state = old_state; /* Stop directed advertising */ old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING); if (old_state) { set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks); cancel_delayed_work(&hdev->discov_off); queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, 0); } /* Pause other advertisements */ if (hdev->adv_instance_cnt) __hci_req_pause_adv_instances(&req); hdev->advertising_paused = true; hdev->advertising_old_state = old_state; /* Disable page scan */ page_scan = SCAN_DISABLED; hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan); /* Disable LE passive scan if enabled */ if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { cancel_interleave_scan(hdev); hci_req_add_le_scan_disable(&req, false); } /* Disable advertisement filters */ hci_req_add_set_adv_filter_enable(&req, false); /* Mark task needing completion */ set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); /* Prevent disconnects from causing scanning to be re-enabled */ hdev->scanning_paused = true; /* Run commands before disconnecting */ hci_req_run(&req, suspend_req_complete); disconnect_counter = 0; /* Soft disconnect everything (power off) */ list_for_each_entry(conn, &hdev->conn_hash.list, list) { hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF); disconnect_counter++; } if (disconnect_counter > 0) { bt_dev_dbg(hdev, "Had %d disconnects. Will wait on them", disconnect_counter); set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks); } } else if (next == BT_SUSPEND_CONFIGURE_WAKE) { /* Unpause to take care of updating scanning params */ hdev->scanning_paused = false; /* Enable event filter for paired devices */ hci_req_set_event_filter(&req); /* Enable passive scan at lower duty cycle */ __hci_update_background_scan(&req); /* Pause scan changes again. */ hdev->scanning_paused = true; hci_req_run(&req, suspend_req_complete); } else { hdev->suspended = false; hdev->scanning_paused = false; hci_req_clear_event_filter(&req); /* Reset passive/background scanning to normal */ __hci_update_background_scan(&req); /* Enable all of the advertisement filters */ hci_req_add_set_adv_filter_enable(&req, true); /* Unpause directed advertising */ hdev->advertising_paused = false; if (hdev->advertising_old_state) { set_bit(SUSPEND_UNPAUSE_ADVERTISING, hdev->suspend_tasks); hci_dev_set_flag(hdev, HCI_ADVERTISING); queue_work(hdev->req_workqueue, &hdev->discoverable_update); hdev->advertising_old_state = 0; } /* Resume other advertisements */ if (hdev->adv_instance_cnt) __hci_req_resume_adv_instances(&req); /* Unpause discovery */ hdev->discovery_paused = false; if (hdev->discovery_old_state != DISCOVERY_STOPPED && hdev->discovery_old_state != DISCOVERY_STOPPING) { set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks); hci_discovery_set_state(hdev, DISCOVERY_STARTING); queue_work(hdev->req_workqueue, &hdev->discov_update); } hci_req_run(&req, suspend_req_complete); } hdev->suspend_state = next; done: clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks); wake_up(&hdev->suspend_wait_q); } static bool adv_cur_instance_is_scannable(struct hci_dev *hdev) { return adv_instance_is_scannable(hdev, hdev->cur_adv_instance); } void __hci_req_disable_advertising(struct hci_request *req) { if (ext_adv_capable(req->hdev)) { __hci_req_disable_ext_adv_instance(req, 0x00); } else { u8 enable = 0x00; hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); } } static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance) { u32 flags; struct adv_info *adv_instance; if (instance == 0x00) { /* Instance 0 always manages the "Tx Power" and "Flags" * fields */ flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS; /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting * corresponds to the "connectable" instance flag. */ if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) flags |= MGMT_ADV_FLAG_CONNECTABLE; if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) flags |= MGMT_ADV_FLAG_LIMITED_DISCOV; else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) flags |= MGMT_ADV_FLAG_DISCOV; return flags; } adv_instance = hci_find_adv_instance(hdev, instance); /* Return 0 when we got an invalid instance identifier. */ if (!adv_instance) return 0; return adv_instance->flags; } static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags) { /* If privacy is not enabled don't use RPA */ if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) return false; /* If basic privacy mode is enabled use RPA */ if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) return true; /* If limited privacy mode is enabled don't use RPA if we're * both discoverable and bondable. */ if ((flags & MGMT_ADV_FLAG_DISCOV) && hci_dev_test_flag(hdev, HCI_BONDABLE)) return false; /* We're neither bondable nor discoverable in the limited * privacy mode, therefore use RPA. */ return true; } static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable) { /* If there is no connection we are OK to advertise. */ if (hci_conn_num(hdev, LE_LINK) == 0) return true; /* Check le_states if there is any connection in slave role. */ if (hdev->conn_hash.le_num_slave > 0) { /* Slave connection state and non connectable mode bit 20. */ if (!connectable && !(hdev->le_states[2] & 0x10)) return false; /* Slave connection state and connectable mode bit 38 * and scannable bit 21. */ if (connectable && (!(hdev->le_states[4] & 0x40) || !(hdev->le_states[2] & 0x20))) return false; } /* Check le_states if there is any connection in master role. */ if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) { /* Master connection state and non connectable mode bit 18. */ if (!connectable && !(hdev->le_states[2] & 0x02)) return false; /* Master connection state and connectable mode bit 35 and * scannable 19. */ if (connectable && (!(hdev->le_states[4] & 0x08) || !(hdev->le_states[2] & 0x08))) return false; } return true; } void __hci_req_enable_advertising(struct hci_request *req) { struct hci_dev *hdev = req->hdev; struct adv_info *adv_instance; struct hci_cp_le_set_adv_param cp; u8 own_addr_type, enable = 0x01; bool connectable; u16 adv_min_interval, adv_max_interval; u32 flags; flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance); adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance); /* If the "connectable" instance flag was not set, then choose between * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. */ connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || mgmt_get_connectable(hdev); if (!is_advertising_allowed(hdev, connectable)) return; if (hci_dev_test_flag(hdev, HCI_LE_ADV)) __hci_req_disable_advertising(req); /* Clear the HCI_LE_ADV bit temporarily so that the * hci_update_random_address knows that it's safe to go ahead * and write a new random address. The flag will be set back on * as soon as the SET_ADV_ENABLE HCI command completes. */ hci_dev_clear_flag(hdev, HCI_LE_ADV); /* Set require_privacy to true only when non-connectable * advertising is used. In that case it is fine to use a * non-resolvable private address. */ if (hci_update_random_address(req, !connectable, adv_use_rpa(hdev, flags), &own_addr_type) < 0) return; memset(&cp, 0, sizeof(cp)); if (adv_instance) { adv_min_interval = adv_instance->min_interval; adv_max_interval = adv_instance->max_interval; } else { adv_min_interval = hdev->le_adv_min_interval; adv_max_interval = hdev->le_adv_max_interval; } if (connectable) { cp.type = LE_ADV_IND; } else { if (adv_cur_instance_is_scannable(hdev)) cp.type = LE_ADV_SCAN_IND; else cp.type = LE_ADV_NONCONN_IND; if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) || hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN; adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX; } } cp.min_interval = cpu_to_le16(adv_min_interval); cp.max_interval = cpu_to_le16(adv_max_interval); cp.own_address_type = own_addr_type; cp.channel_map = hdev->le_adv_channel_map; hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp); hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); } u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len) { size_t short_len; size_t complete_len; /* no space left for name (+ NULL + type + len) */ if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3) return ad_len; /* use complete name if present and fits */ complete_len = strlen(hdev->dev_name); if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH) return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE, hdev->dev_name, complete_len + 1); /* use short name if present */ short_len = strlen(hdev->short_name); if (short_len) return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, hdev->short_name, short_len + 1); /* use shortened full name if present, we already know that name * is longer then HCI_MAX_SHORT_NAME_LENGTH */ if (complete_len) { u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1]; memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH); name[HCI_MAX_SHORT_NAME_LENGTH] = '\0'; return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name, sizeof(name)); } return ad_len; } static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len) { return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance); } static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr) { u8 scan_rsp_len = 0; if (hdev->appearance) { scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len); } return append_local_name(hdev, ptr, scan_rsp_len); } static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance, u8 *ptr) { struct adv_info *adv_instance; u32 instance_flags; u8 scan_rsp_len = 0; adv_instance = hci_find_adv_instance(hdev, instance); if (!adv_instance) return 0; instance_flags = adv_instance->flags; if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) { scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len); } memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data, adv_instance->scan_rsp_len); scan_rsp_len += adv_instance->scan_rsp_len; if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME) scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len); return scan_rsp_len; } void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance) { struct hci_dev *hdev = req->hdev; u8 len; if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return; if (ext_adv_capable(hdev)) { struct hci_cp_le_set_ext_scan_rsp_data cp; memset(&cp, 0, sizeof(cp)); if (instance) len = create_instance_scan_rsp_data(hdev, instance, cp.data); else len = create_default_scan_rsp_data(hdev, cp.data); if (hdev->scan_rsp_data_len == len && !memcmp(cp.data, hdev->scan_rsp_data, len)) return; memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); hdev->scan_rsp_data_len = len; cp.handle = instance; cp.length = len; cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp), &cp); } else { struct hci_cp_le_set_scan_rsp_data cp; memset(&cp, 0, sizeof(cp)); if (instance) len = create_instance_scan_rsp_data(hdev, instance, cp.data); else len = create_default_scan_rsp_data(hdev, cp.data); if (hdev->scan_rsp_data_len == len && !memcmp(cp.data, hdev->scan_rsp_data, len)) return; memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); hdev->scan_rsp_data_len = len; cp.length = len; hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp); } } static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) { struct adv_info *adv_instance = NULL; u8 ad_len = 0, flags = 0; u32 instance_flags; /* Return 0 when the current instance identifier is invalid. */ if (instance) { adv_instance = hci_find_adv_instance(hdev, instance); if (!adv_instance) return 0; } instance_flags = get_adv_instance_flags(hdev, instance); /* If instance already has the flags set skip adding it once * again. */ if (adv_instance && eir_get_data(adv_instance->adv_data, adv_instance->adv_data_len, EIR_FLAGS, NULL)) goto skip_flags; /* The Add Advertising command allows userspace to set both the general * and limited discoverable flags. */ if (instance_flags & MGMT_ADV_FLAG_DISCOV) flags |= LE_AD_GENERAL; if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV) flags |= LE_AD_LIMITED; if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) flags |= LE_AD_NO_BREDR; if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) { /* If a discovery flag wasn't provided, simply use the global * settings. */ if (!flags) flags |= mgmt_get_adv_discov_flags(hdev); /* If flags would still be empty, then there is no need to * include the "Flags" AD field". */ if (flags) { ptr[0] = 0x02; ptr[1] = EIR_FLAGS; ptr[2] = flags; ad_len += 3; ptr += 3; } } skip_flags: if (adv_instance) { memcpy(ptr, adv_instance->adv_data, adv_instance->adv_data_len); ad_len += adv_instance->adv_data_len; ptr += adv_instance->adv_data_len; } if (instance_flags & MGMT_ADV_FLAG_TX_POWER) { s8 adv_tx_power; if (ext_adv_capable(hdev)) { if (adv_instance) adv_tx_power = adv_instance->tx_power; else adv_tx_power = hdev->adv_tx_power; } else { adv_tx_power = hdev->adv_tx_power; } /* Provide Tx Power only if we can provide a valid value for it */ if (adv_tx_power != HCI_TX_POWER_INVALID) { ptr[0] = 0x02; ptr[1] = EIR_TX_POWER; ptr[2] = (u8)adv_tx_power; ad_len += 3; ptr += 3; } } return ad_len; } void __hci_req_update_adv_data(struct hci_request *req, u8 instance) { struct hci_dev *hdev = req->hdev; u8 len; if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return; if (ext_adv_capable(hdev)) { struct hci_cp_le_set_ext_adv_data cp; memset(&cp, 0, sizeof(cp)); len = create_instance_adv_data(hdev, instance, cp.data); /* There's nothing to do if the data hasn't changed */ if (hdev->adv_data_len == len && memcmp(cp.data, hdev->adv_data, len) == 0) return; memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); hdev->adv_data_len = len; cp.length = len; cp.handle = instance; cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp); } else { struct hci_cp_le_set_adv_data cp; memset(&cp, 0, sizeof(cp)); len = create_instance_adv_data(hdev, instance, cp.data); /* There's nothing to do if the data hasn't changed */ if (hdev->adv_data_len == len && memcmp(cp.data, hdev->adv_data, len) == 0) return; memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); hdev->adv_data_len = len; cp.length = len; hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp); } } int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance) { struct hci_request req; hci_req_init(&req, hdev); __hci_req_update_adv_data(&req, instance); return hci_req_run(&req, NULL); } static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status, u16 opcode) { BT_DBG("%s status %u", hdev->name, status); } void hci_req_disable_address_resolution(struct hci_dev *hdev) { struct hci_request req; __u8 enable = 0x00; if (!use_ll_privacy(hdev) && !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) return; hci_req_init(&req, hdev); hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); hci_req_run(&req, enable_addr_resolution_complete); } static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode) { bt_dev_dbg(hdev, "status %u", status); } void hci_req_reenable_advertising(struct hci_dev *hdev) { struct hci_request req; if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && list_empty(&hdev->adv_instances)) return; hci_req_init(&req, hdev); if (hdev->cur_adv_instance) { __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance, true); } else { if (ext_adv_capable(hdev)) { __hci_req_start_ext_adv(&req, 0x00); } else { __hci_req_update_adv_data(&req, 0x00); __hci_req_update_scan_rsp_data(&req, 0x00); __hci_req_enable_advertising(&req); } } hci_req_run(&req, adv_enable_complete); } static void adv_timeout_expire(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, adv_instance_expire.work); struct hci_request req; u8 instance; bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); hdev->adv_instance_timeout = 0; instance = hdev->cur_adv_instance; if (instance == 0x00) goto unlock; hci_req_init(&req, hdev); hci_req_clear_adv_instance(hdev, NULL, &req, instance, false); if (list_empty(&hdev->adv_instances)) __hci_req_disable_advertising(&req); hci_req_run(&req, NULL); unlock: hci_dev_unlock(hdev); } static int hci_req_add_le_interleaved_scan(struct hci_request *req, unsigned long opt) { struct hci_dev *hdev = req->hdev; int ret = 0; hci_dev_lock(hdev); if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) hci_req_add_le_scan_disable(req, false); hci_req_add_le_passive_scan(req); switch (hdev->interleave_scan_state) { case INTERLEAVE_SCAN_ALLOWLIST: bt_dev_dbg(hdev, "next state: allowlist"); hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; break; case INTERLEAVE_SCAN_NO_FILTER: bt_dev_dbg(hdev, "next state: no filter"); hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST; break; case INTERLEAVE_SCAN_NONE: BT_ERR("unexpected error"); ret = -1; } hci_dev_unlock(hdev); return ret; } static void interleave_scan_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, interleave_scan.work); u8 status; unsigned long timeout; if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) { timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration); } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) { timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration); } else { bt_dev_err(hdev, "unexpected error"); return; } hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0, HCI_CMD_TIMEOUT, &status); /* Don't continue interleaving if it was canceled */ if (is_interleave_scanning(hdev)) queue_delayed_work(hdev->req_workqueue, &hdev->interleave_scan, timeout); } int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, bool use_rpa, struct adv_info *adv_instance, u8 *own_addr_type, bdaddr_t *rand_addr) { int err; bacpy(rand_addr, BDADDR_ANY); /* If privacy is enabled use a resolvable private address. If * current RPA has expired then generate a new one. */ if (use_rpa) { int to; /* If Controller supports LL Privacy use own address type is * 0x03 */ if (use_ll_privacy(hdev)) *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; else *own_addr_type = ADDR_LE_DEV_RANDOM; if (adv_instance) { if (!adv_instance->rpa_expired && !bacmp(&adv_instance->random_addr, &hdev->rpa)) return 0; adv_instance->rpa_expired = false; } else { if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) && !bacmp(&hdev->random_addr, &hdev->rpa)) return 0; } err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); if (err < 0) { bt_dev_err(hdev, "failed to generate new RPA"); return err; } bacpy(rand_addr, &hdev->rpa); to = msecs_to_jiffies(hdev->rpa_timeout * 1000); if (adv_instance) queue_delayed_work(hdev->workqueue, &adv_instance->rpa_expired_cb, to); else queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to); return 0; } /* In case of required privacy without resolvable private address, * use an non-resolvable private address. This is useful for * non-connectable advertising. */ if (require_privacy) { bdaddr_t nrpa; while (true) { /* The non-resolvable private address is generated * from random six bytes with the two most significant * bits cleared. */ get_random_bytes(&nrpa, 6); nrpa.b[5] &= 0x3f; /* The non-resolvable private address shall not be * equal to the public address. */ if (bacmp(&hdev->bdaddr, &nrpa)) break; } *own_addr_type = ADDR_LE_DEV_RANDOM; bacpy(rand_addr, &nrpa); return 0; } /* No privacy so use a public address. */ *own_addr_type = ADDR_LE_DEV_PUBLIC; return 0; } void __hci_req_clear_ext_adv_sets(struct hci_request *req) { hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL); } int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) { struct hci_cp_le_set_ext_adv_params cp; struct hci_dev *hdev = req->hdev; bool connectable; u32 flags; bdaddr_t random_addr; u8 own_addr_type; int err; struct adv_info *adv_instance; bool secondary_adv; if (instance > 0) { adv_instance = hci_find_adv_instance(hdev, instance); if (!adv_instance) return -EINVAL; } else { adv_instance = NULL; } flags = get_adv_instance_flags(hdev, instance); /* If the "connectable" instance flag was not set, then choose between * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. */ connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || mgmt_get_connectable(hdev); if (!is_advertising_allowed(hdev, connectable)) return -EPERM; /* Set require_privacy to true only when non-connectable * advertising is used. In that case it is fine to use a * non-resolvable private address. */ err = hci_get_random_address(hdev, !connectable, adv_use_rpa(hdev, flags), adv_instance, &own_addr_type, &random_addr); if (err < 0) return err; memset(&cp, 0, sizeof(cp)); if (adv_instance) { hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval); hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval); cp.tx_power = adv_instance->tx_power; } else { hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval); hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval); cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE; } secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK); if (connectable) { if (secondary_adv) cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND); else cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND); } else if (adv_instance_is_scannable(hdev, instance)) { if (secondary_adv) cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND); else cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND); } else { if (secondary_adv) cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND); else cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND); } cp.own_addr_type = own_addr_type; cp.channel_map = hdev->le_adv_channel_map; cp.handle = instance; if (flags & MGMT_ADV_FLAG_SEC_2M) { cp.primary_phy = HCI_ADV_PHY_1M; cp.secondary_phy = HCI_ADV_PHY_2M; } else if (flags & MGMT_ADV_FLAG_SEC_CODED) { cp.primary_phy = HCI_ADV_PHY_CODED; cp.secondary_phy = HCI_ADV_PHY_CODED; } else { /* In all other cases use 1M */ cp.primary_phy = HCI_ADV_PHY_1M; cp.secondary_phy = HCI_ADV_PHY_1M; } hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp); if (own_addr_type == ADDR_LE_DEV_RANDOM && bacmp(&random_addr, BDADDR_ANY)) { struct hci_cp_le_set_adv_set_rand_addr cp; /* Check if random address need to be updated */ if (adv_instance) { if (!bacmp(&random_addr, &adv_instance->random_addr)) return 0; } else { if (!bacmp(&random_addr, &hdev->random_addr)) return 0; } memset(&cp, 0, sizeof(cp)); cp.handle = instance; bacpy(&cp.bdaddr, &random_addr); hci_req_add(req, HCI_OP_LE_SET_ADV_SET_RAND_ADDR, sizeof(cp), &cp); } return 0; } int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance) { struct hci_dev *hdev = req->hdev; struct hci_cp_le_set_ext_adv_enable *cp; struct hci_cp_ext_adv_set *adv_set; u8 data[sizeof(*cp) + sizeof(*adv_set) * 1]; struct adv_info *adv_instance; if (instance > 0) { adv_instance = hci_find_adv_instance(hdev, instance); if (!adv_instance) return -EINVAL; } else { adv_instance = NULL; } cp = (void *) data; adv_set = (void *) cp->data; memset(cp, 0, sizeof(*cp)); cp->enable = 0x01; cp->num_of_sets = 0x01; memset(adv_set, 0, sizeof(*adv_set)); adv_set->handle = instance; /* Set duration per instance since controller is responsible for * scheduling it. */ if (adv_instance && adv_instance->duration) { u16 duration = adv_instance->timeout * MSEC_PER_SEC; /* Time = N * 10 ms */ adv_set->duration = cpu_to_le16(duration / 10); } hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets, data); return 0; } int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance) { struct hci_dev *hdev = req->hdev; struct hci_cp_le_set_ext_adv_enable *cp; struct hci_cp_ext_adv_set *adv_set; u8 data[sizeof(*cp) + sizeof(*adv_set) * 1]; u8 req_size; /* If request specifies an instance that doesn't exist, fail */ if (instance > 0 && !hci_find_adv_instance(hdev, instance)) return -EINVAL; memset(data, 0, sizeof(data)); cp = (void *)data; adv_set = (void *)cp->data; /* Instance 0x00 indicates all advertising instances will be disabled */ cp->num_of_sets = !!instance; cp->enable = 0x00; adv_set->handle = instance; req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets; hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data); return 0; } int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance) { struct hci_dev *hdev = req->hdev; /* If request specifies an instance that doesn't exist, fail */ if (instance > 0 && !hci_find_adv_instance(hdev, instance)) return -EINVAL; hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance); return 0; } int __hci_req_start_ext_adv(struct hci_request *req, u8 instance) { struct hci_dev *hdev = req->hdev; struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance); int err; /* If instance isn't pending, the chip knows about it, and it's safe to * disable */ if (adv_instance && !adv_instance->pending) __hci_req_disable_ext_adv_instance(req, instance); err = __hci_req_setup_ext_adv_instance(req, instance); if (err < 0) return err; __hci_req_update_scan_rsp_data(req, instance); __hci_req_enable_ext_advertising(req, instance); return 0; } int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance, bool force) { struct hci_dev *hdev = req->hdev; struct adv_info *adv_instance = NULL; u16 timeout; if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || list_empty(&hdev->adv_instances)) return -EPERM; if (hdev->adv_instance_timeout) return -EBUSY; adv_instance = hci_find_adv_instance(hdev, instance); if (!adv_instance) return -ENOENT; /* A zero timeout means unlimited advertising. As long as there is * only one instance, duration should be ignored. We still set a timeout * in case further instances are being added later on. * * If the remaining lifetime of the instance is more than the duration * then the timeout corresponds to the duration, otherwise it will be * reduced to the remaining instance lifetime. */ if (adv_instance->timeout == 0 || adv_instance->duration <= adv_instance->remaining_time) timeout = adv_instance->duration; else timeout = adv_instance->remaining_time; /* The remaining time is being reduced unless the instance is being * advertised without time limit. */ if (adv_instance->timeout) adv_instance->remaining_time = adv_instance->remaining_time - timeout; /* Only use work for scheduling instances with legacy advertising */ if (!ext_adv_capable(hdev)) { hdev->adv_instance_timeout = timeout; queue_delayed_work(hdev->req_workqueue, &hdev->adv_instance_expire, msecs_to_jiffies(timeout * 1000)); } /* If we're just re-scheduling the same instance again then do not * execute any HCI commands. This happens when a single instance is * being advertised. */ if (!force && hdev->cur_adv_instance == instance && hci_dev_test_flag(hdev, HCI_LE_ADV)) return 0; hdev->cur_adv_instance = instance; if (ext_adv_capable(hdev)) { __hci_req_start_ext_adv(req, instance); } else { __hci_req_update_adv_data(req, instance); __hci_req_update_scan_rsp_data(req, instance); __hci_req_enable_advertising(req); } return 0; } /* For a single instance: * - force == true: The instance will be removed even when its remaining * lifetime is not zero. * - force == false: the instance will be deactivated but kept stored unless * the remaining lifetime is zero. * * For instance == 0x00: * - force == true: All instances will be removed regardless of their timeout * setting. * - force == false: Only instances that have a timeout will be removed. */ void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk, struct hci_request *req, u8 instance, bool force) { struct adv_info *adv_instance, *n, *next_instance = NULL; int err; u8 rem_inst; /* Cancel any timeout concerning the removed instance(s). */ if (!instance || hdev->cur_adv_instance == instance) cancel_adv_timeout(hdev); /* Get the next instance to advertise BEFORE we remove * the current one. This can be the same instance again * if there is only one instance. */ if (instance && hdev->cur_adv_instance == instance) next_instance = hci_get_next_instance(hdev, instance); if (instance == 0x00) { list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) { if (!(force || adv_instance->timeout)) continue; rem_inst = adv_instance->instance; err = hci_remove_adv_instance(hdev, rem_inst); if (!err) mgmt_advertising_removed(sk, hdev, rem_inst); } } else { adv_instance = hci_find_adv_instance(hdev, instance); if (force || (adv_instance && adv_instance->timeout && !adv_instance->remaining_time)) { /* Don't advertise a removed instance. */ if (next_instance && next_instance->instance == instance) next_instance = NULL; err = hci_remove_adv_instance(hdev, instance); if (!err) mgmt_advertising_removed(sk, hdev, instance); } } if (!req || !hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING)) return; if (next_instance && !ext_adv_capable(hdev)) __hci_req_schedule_adv_instance(req, next_instance->instance, false); } static void set_random_addr(struct hci_request *req, bdaddr_t *rpa) { struct hci_dev *hdev = req->hdev; /* If we're advertising or initiating an LE connection we can't * go ahead and change the random address at this time. This is * because the eventual initiator address used for the * subsequently created connection will be undefined (some * controllers use the new address and others the one we had * when the operation started). * * In this kind of scenario skip the update and let the random * address be updated at the next cycle. */ if (hci_dev_test_flag(hdev, HCI_LE_ADV) || hci_lookup_le_connect(hdev)) { bt_dev_dbg(hdev, "Deferring random address update"); hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); return; } hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa); } int hci_update_random_address(struct hci_request *req, bool require_privacy, bool use_rpa, u8 *own_addr_type) { struct hci_dev *hdev = req->hdev; int err; /* If privacy is enabled use a resolvable private address. If * current RPA has expired or there is something else than * the current RPA in use, then generate a new one. */ if (use_rpa) { int to; /* If Controller supports LL Privacy use own address type is * 0x03 */ if (use_ll_privacy(hdev)) *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; else *own_addr_type = ADDR_LE_DEV_RANDOM; if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) && !bacmp(&hdev->random_addr, &hdev->rpa)) return 0; err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); if (err < 0) { bt_dev_err(hdev, "failed to generate new RPA"); return err; } set_random_addr(req, &hdev->rpa); to = msecs_to_jiffies(hdev->rpa_timeout * 1000); queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to); return 0; } /* In case of required privacy without resolvable private address, * use an non-resolvable private address. This is useful for active * scanning and non-connectable advertising. */ if (require_privacy) { bdaddr_t nrpa; while (true) { /* The non-resolvable private address is generated * from random six bytes with the two most significant * bits cleared. */ get_random_bytes(&nrpa, 6); nrpa.b[5] &= 0x3f; /* The non-resolvable private address shall not be * equal to the public address. */ if (bacmp(&hdev->bdaddr, &nrpa)) break; } *own_addr_type = ADDR_LE_DEV_RANDOM; set_random_addr(req, &nrpa); return 0; } /* If forcing static address is in use or there is no public * address use the static address as random address (but skip * the HCI command if the current random address is already the * static one. * * In case BR/EDR has been disabled on a dual-mode controller * and a static address has been configured, then use that * address instead of the public BR/EDR address. */ if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || !bacmp(&hdev->bdaddr, BDADDR_ANY) || (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && bacmp(&hdev->static_addr, BDADDR_ANY))) { *own_addr_type = ADDR_LE_DEV_RANDOM; if (bacmp(&hdev->static_addr, &hdev->random_addr)) hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &hdev->static_addr); return 0; } /* Neither privacy nor static address is being used so use a * public address. */ *own_addr_type = ADDR_LE_DEV_PUBLIC; return 0; } static bool disconnected_whitelist_entries(struct hci_dev *hdev) { struct bdaddr_list *b; list_for_each_entry(b, &hdev->whitelist, list) { struct hci_conn *conn; conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr); if (!conn) return true; if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) return true; } return false; } void __hci_req_update_scan(struct hci_request *req) { struct hci_dev *hdev = req->hdev; u8 scan; if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return; if (!hdev_is_powered(hdev)) return; if (mgmt_powering_down(hdev)) return; if (hdev->scanning_paused) return; if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) || disconnected_whitelist_entries(hdev)) scan = SCAN_PAGE; else scan = SCAN_DISABLED; if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) scan |= SCAN_INQUIRY; if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) && test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY)) return; hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); } static int update_scan(struct hci_request *req, unsigned long opt) { hci_dev_lock(req->hdev); __hci_req_update_scan(req); hci_dev_unlock(req->hdev); return 0; } static void scan_update_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update); hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL); } static int connectable_update(struct hci_request *req, unsigned long opt) { struct hci_dev *hdev = req->hdev; hci_dev_lock(hdev); __hci_req_update_scan(req); /* If BR/EDR is not enabled and we disable advertising as a * by-product of disabling connectable, we need to update the * advertising flags. */ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) __hci_req_update_adv_data(req, hdev->cur_adv_instance); /* Update the advertising parameters if necessary */ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !list_empty(&hdev->adv_instances)) { if (ext_adv_capable(hdev)) __hci_req_start_ext_adv(req, hdev->cur_adv_instance); else __hci_req_enable_advertising(req); } __hci_update_background_scan(req); hci_dev_unlock(hdev); return 0; } static void connectable_update_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, connectable_update); u8 status; hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status); mgmt_set_connectable_complete(hdev, status); } static u8 get_service_classes(struct hci_dev *hdev) { struct bt_uuid *uuid; u8 val = 0; list_for_each_entry(uuid, &hdev->uuids, list) val |= uuid->svc_hint; return val; } void __hci_req_update_class(struct hci_request *req) { struct hci_dev *hdev = req->hdev; u8 cod[3]; bt_dev_dbg(hdev, ""); if (!hdev_is_powered(hdev)) return; if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return; if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) return; cod[0] = hdev->minor_class; cod[1] = hdev->major_class; cod[2] = get_service_classes(hdev); if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) cod[1] |= 0x20; if (memcmp(cod, hdev->dev_class, 3) == 0) return; hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod); } static void write_iac(struct hci_request *req) { struct hci_dev *hdev = req->hdev; struct hci_cp_write_current_iac_lap cp; if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) return; if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { /* Limited discoverable mode */ cp.num_iac = min_t(u8, hdev->num_iac, 2); cp.iac_lap[0] = 0x00; /* LIAC */ cp.iac_lap[1] = 0x8b; cp.iac_lap[2] = 0x9e; cp.iac_lap[3] = 0x33; /* GIAC */ cp.iac_lap[4] = 0x8b; cp.iac_lap[5] = 0x9e; } else { /* General discoverable mode */ cp.num_iac = 1; cp.iac_lap[0] = 0x33; /* GIAC */ cp.iac_lap[1] = 0x8b; cp.iac_lap[2] = 0x9e; } hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP, (cp.num_iac * 3) + 1, &cp); } static int discoverable_update(struct hci_request *req, unsigned long opt) { struct hci_dev *hdev = req->hdev; hci_dev_lock(hdev); if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { write_iac(req); __hci_req_update_scan(req); __hci_req_update_class(req); } /* Advertising instances don't use the global discoverable setting, so * only update AD if advertising was enabled using Set Advertising. */ if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) { __hci_req_update_adv_data(req, 0x00); /* Discoverable mode affects the local advertising * address in limited privacy mode. */ if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) { if (ext_adv_capable(hdev)) __hci_req_start_ext_adv(req, 0x00); else __hci_req_enable_advertising(req); } } hci_dev_unlock(hdev); return 0; } static void discoverable_update_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, discoverable_update); u8 status; hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status); mgmt_set_discoverable_complete(hdev, status); } void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn, u8 reason) { switch (conn->state) { case BT_CONNECTED: case BT_CONFIG: if (conn->type == AMP_LINK) { struct hci_cp_disconn_phy_link cp; cp.phy_handle = HCI_PHY_HANDLE(conn->handle); cp.reason = reason; hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp), &cp); } else { struct hci_cp_disconnect dc; dc.handle = cpu_to_le16(conn->handle); dc.reason = reason; hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc); } conn->state = BT_DISCONN; break; case BT_CONNECT: if (conn->type == LE_LINK) { if (test_bit(HCI_CONN_SCANNING, &conn->flags)) break; hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL); } else if (conn->type == ACL_LINK) { if (req->hdev->hci_ver < BLUETOOTH_VER_1_2) break; hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL, 6, &conn->dst); } break; case BT_CONNECT2: if (conn->type == ACL_LINK) { struct hci_cp_reject_conn_req rej; bacpy(&rej.bdaddr, &conn->dst); rej.reason = reason; hci_req_add(req, HCI_OP_REJECT_CONN_REQ, sizeof(rej), &rej); } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) { struct hci_cp_reject_sync_conn_req rej; bacpy(&rej.bdaddr, &conn->dst); /* SCO rejection has its own limited set of * allowed error values (0x0D-0x0F) which isn't * compatible with most values passed to this * function. To be safe hard-code one of the * values that's suitable for SCO. */ rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES; hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(rej), &rej); } break; default: conn->state = BT_CLOSED; break; } } static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode) { if (status) bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status); } int hci_abort_conn(struct hci_conn *conn, u8 reason) { struct hci_request req; int err; hci_req_init(&req, conn->hdev); __hci_abort_conn(&req, conn, reason); err = hci_req_run(&req, abort_conn_complete); if (err && err != -ENODATA) { bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err); return err; } return 0; } static int update_bg_scan(struct hci_request *req, unsigned long opt) { hci_dev_lock(req->hdev); __hci_update_background_scan(req); hci_dev_unlock(req->hdev); return 0; } static void bg_scan_update(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, bg_scan_update); struct hci_conn *conn; u8 status; int err; err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status); if (!err) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); if (conn) hci_le_conn_failed(conn, status); hci_dev_unlock(hdev); } static int le_scan_disable(struct hci_request *req, unsigned long opt) { hci_req_add_le_scan_disable(req, false); return 0; } static int bredr_inquiry(struct hci_request *req, unsigned long opt) { u8 length = opt; const u8 giac[3] = { 0x33, 0x8b, 0x9e }; const u8 liac[3] = { 0x00, 0x8b, 0x9e }; struct hci_cp_inquiry cp; bt_dev_dbg(req->hdev, ""); hci_dev_lock(req->hdev); hci_inquiry_cache_flush(req->hdev); hci_dev_unlock(req->hdev); memset(&cp, 0, sizeof(cp)); if (req->hdev->discovery.limited) memcpy(&cp.lap, liac, sizeof(cp.lap)); else memcpy(&cp.lap, giac, sizeof(cp.lap)); cp.length = length; hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); return 0; } static void le_scan_disable_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan_disable.work); u8 status; bt_dev_dbg(hdev, ""); if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) return; cancel_delayed_work(&hdev->le_scan_restart); hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status); if (status) { bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x", status); return; } hdev->discovery.scan_start = 0; /* If we were running LE only scan, change discovery state. If * we were running both LE and BR/EDR inquiry simultaneously, * and BR/EDR inquiry is already finished, stop discovery, * otherwise BR/EDR inquiry will stop discovery when finished. * If we will resolve remote device name, do not change * discovery state. */ if (hdev->discovery.type == DISCOV_TYPE_LE) goto discov_stopped; if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED) return; if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) { if (!test_bit(HCI_INQUIRY, &hdev->flags) && hdev->discovery.state != DISCOVERY_RESOLVING) goto discov_stopped; return; } hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN, HCI_CMD_TIMEOUT, &status); if (status) { bt_dev_err(hdev, "inquiry failed: status 0x%02x", status); goto discov_stopped; } return; discov_stopped: hci_dev_lock(hdev); hci_discovery_set_state(hdev, DISCOVERY_STOPPED); hci_dev_unlock(hdev); } static int le_scan_restart(struct hci_request *req, unsigned long opt) { struct hci_dev *hdev = req->hdev; /* If controller is not scanning we are done. */ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) return 0; if (hdev->scanning_paused) { bt_dev_dbg(hdev, "Scanning is paused for suspend"); return 0; } hci_req_add_le_scan_disable(req, false); if (use_ext_scan(hdev)) { struct hci_cp_le_set_ext_scan_enable ext_enable_cp; memset(&ext_enable_cp, 0, sizeof(ext_enable_cp)); ext_enable_cp.enable = LE_SCAN_ENABLE; ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(ext_enable_cp), &ext_enable_cp); } else { struct hci_cp_le_set_scan_enable cp; memset(&cp, 0, sizeof(cp)); cp.enable = LE_SCAN_ENABLE; cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); } return 0; } static void le_scan_restart_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan_restart.work); unsigned long timeout, duration, scan_start, now; u8 status; bt_dev_dbg(hdev, ""); hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status); if (status) { bt_dev_err(hdev, "failed to restart LE scan: status %d", status); return; } hci_dev_lock(hdev); if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) || !hdev->discovery.scan_start) goto unlock; /* When the scan was started, hdev->le_scan_disable has been queued * after duration from scan_start. During scan restart this job * has been canceled, and we need to queue it again after proper * timeout, to make sure that scan does not run indefinitely. */ duration = hdev->discovery.scan_duration; scan_start = hdev->discovery.scan_start; now = jiffies; if (now - scan_start <= duration) { int elapsed; if (now >= scan_start) elapsed = now - scan_start; else elapsed = ULONG_MAX - scan_start + now; timeout = duration - elapsed; } else { timeout = 0; } queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable, timeout); unlock: hci_dev_unlock(hdev); } static int active_scan(struct hci_request *req, unsigned long opt) { uint16_t interval = opt; struct hci_dev *hdev = req->hdev; u8 own_addr_type; /* White list is not used for discovery */ u8 filter_policy = 0x00; /* Discovery doesn't require controller address resolution */ bool addr_resolv = false; int err; bt_dev_dbg(hdev, ""); /* If controller is scanning, it means the background scanning is * running. Thus, we should temporarily stop it in order to set the * discovery scanning parameters. */ if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { hci_req_add_le_scan_disable(req, false); cancel_interleave_scan(hdev); } /* All active scans will be done with either a resolvable private * address (when privacy feature has been enabled) or non-resolvable * private address. */ err = hci_update_random_address(req, true, scan_use_rpa(hdev), &own_addr_type); if (err < 0) own_addr_type = ADDR_LE_DEV_PUBLIC; hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, hdev->le_scan_window_discovery, own_addr_type, filter_policy, addr_resolv); return 0; } static int interleaved_discov(struct hci_request *req, unsigned long opt) { int err; bt_dev_dbg(req->hdev, ""); err = active_scan(req, opt); if (err) return err; return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN); } static void start_discovery(struct hci_dev *hdev, u8 *status) { unsigned long timeout; bt_dev_dbg(hdev, "type %u", hdev->discovery.type); switch (hdev->discovery.type) { case DISCOV_TYPE_BREDR: if (!hci_dev_test_flag(hdev, HCI_INQUIRY)) hci_req_sync(hdev, bredr_inquiry, DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT, status); return; case DISCOV_TYPE_INTERLEAVED: /* When running simultaneous discovery, the LE scanning time * should occupy the whole discovery time sine BR/EDR inquiry * and LE scanning are scheduled by the controller. * * For interleaving discovery in comparison, BR/EDR inquiry * and LE scanning are done sequentially with separate * timeouts. */ if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) { timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); /* During simultaneous discovery, we double LE scan * interval. We must leave some time for the controller * to do BR/EDR inquiry. */ hci_req_sync(hdev, interleaved_discov, hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT, status); break; } timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout); hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery, HCI_CMD_TIMEOUT, status); break; case DISCOV_TYPE_LE: timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery, HCI_CMD_TIMEOUT, status); break; default: *status = HCI_ERROR_UNSPECIFIED; return; } if (*status) return; bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout)); /* When service discovery is used and the controller has a * strict duplicate filter, it is important to remember the * start and duration of the scan. This is required for * restarting scanning during the discovery phase. */ if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) && hdev->discovery.result_filtering) { hdev->discovery.scan_start = jiffies; hdev->discovery.scan_duration = timeout; } queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable, timeout); } bool hci_req_stop_discovery(struct hci_request *req) { struct hci_dev *hdev = req->hdev; struct discovery_state *d = &hdev->discovery; struct hci_cp_remote_name_req_cancel cp; struct inquiry_entry *e; bool ret = false; bt_dev_dbg(hdev, "state %u", hdev->discovery.state); if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) { if (test_bit(HCI_INQUIRY, &hdev->flags)) hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL); if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { cancel_delayed_work(&hdev->le_scan_disable); hci_req_add_le_scan_disable(req, false); } ret = true; } else { /* Passive scanning */ if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { hci_req_add_le_scan_disable(req, false); ret = true; } } /* No further actions needed for LE-only discovery */ if (d->type == DISCOV_TYPE_LE) return ret; if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) { e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_PENDING); if (!e) return ret; bacpy(&cp.bdaddr, &e->data.bdaddr); hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp), &cp); ret = true; } return ret; } static int stop_discovery(struct hci_request *req, unsigned long opt) { hci_dev_lock(req->hdev); hci_req_stop_discovery(req); hci_dev_unlock(req->hdev); return 0; } static void discov_update(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, discov_update); u8 status = 0; switch (hdev->discovery.state) { case DISCOVERY_STARTING: start_discovery(hdev, &status); mgmt_start_discovery_complete(hdev, status); if (status) hci_discovery_set_state(hdev, DISCOVERY_STOPPED); else hci_discovery_set_state(hdev, DISCOVERY_FINDING); break; case DISCOVERY_STOPPING: hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status); mgmt_stop_discovery_complete(hdev, status); if (!status) hci_discovery_set_state(hdev, DISCOVERY_STOPPED); break; case DISCOVERY_STOPPED: default: return; } } static void discov_off(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, discov_off.work); bt_dev_dbg(hdev, ""); hci_dev_lock(hdev); /* When discoverable timeout triggers, then just make sure * the limited discoverable flag is cleared. Even in the case * of a timeout triggered from general discoverable, it is * safe to unconditionally clear the flag. */ hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); hdev->discov_timeout = 0; hci_dev_unlock(hdev); hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL); mgmt_new_settings(hdev); } static int powered_update_hci(struct hci_request *req, unsigned long opt) { struct hci_dev *hdev = req->hdev; u8 link_sec; hci_dev_lock(hdev); if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) && !lmp_host_ssp_capable(hdev)) { u8 mode = 0x01; hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode); if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) { u8 support = 0x01; hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT, sizeof(support), &support); } } if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) && lmp_bredr_capable(hdev)) { struct hci_cp_write_le_host_supported cp; cp.le = 0x01; cp.simul = 0x00; /* Check first if we already have the right * host state (host features set) */ if (cp.le != lmp_host_le_capable(hdev) || cp.simul != lmp_host_le_br_capable(hdev)) hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp); } if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { /* Make sure the controller has a good default for * advertising data. This also applies to the case * where BR/EDR was toggled during the AUTO_OFF phase. */ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || list_empty(&hdev->adv_instances)) { int err; if (ext_adv_capable(hdev)) { err = __hci_req_setup_ext_adv_instance(req, 0x00); if (!err) __hci_req_update_scan_rsp_data(req, 0x00); } else { err = 0; __hci_req_update_adv_data(req, 0x00); __hci_req_update_scan_rsp_data(req, 0x00); } if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) { if (!ext_adv_capable(hdev)) __hci_req_enable_advertising(req); else if (!err) __hci_req_enable_ext_advertising(req, 0x00); } } else if (!list_empty(&hdev->adv_instances)) { struct adv_info *adv_instance; adv_instance = list_first_entry(&hdev->adv_instances, struct adv_info, list); __hci_req_schedule_adv_instance(req, adv_instance->instance, true); } } link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY); if (link_sec != test_bit(HCI_AUTH, &hdev->flags)) hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(link_sec), &link_sec); if (lmp_bredr_capable(hdev)) { if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) __hci_req_write_fast_connectable(req, true); else __hci_req_write_fast_connectable(req, false); __hci_req_update_scan(req); __hci_req_update_class(req); __hci_req_update_name(req); __hci_req_update_eir(req); } hci_dev_unlock(hdev); return 0; } int __hci_req_hci_power_on(struct hci_dev *hdev) { /* Register the available SMP channels (BR/EDR and LE) only when * successfully powering on the controller. This late * registration is required so that LE SMP can clearly decide if * the public address or static address is used. */ smp_register(hdev); return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT, NULL); } void hci_request_setup(struct hci_dev *hdev) { INIT_WORK(&hdev->discov_update, discov_update); INIT_WORK(&hdev->bg_scan_update, bg_scan_update); INIT_WORK(&hdev->scan_update, scan_update_work); INIT_WORK(&hdev->connectable_update, connectable_update_work); INIT_WORK(&hdev->discoverable_update, discoverable_update_work); INIT_DELAYED_WORK(&hdev->discov_off, discov_off); INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work); INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work); INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire); INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work); } void hci_request_cancel_all(struct hci_dev *hdev) { hci_req_sync_cancel(hdev, ENODEV); cancel_work_sync(&hdev->discov_update); cancel_work_sync(&hdev->bg_scan_update); cancel_work_sync(&hdev->scan_update); cancel_work_sync(&hdev->connectable_update); cancel_work_sync(&hdev->discoverable_update); cancel_delayed_work_sync(&hdev->discov_off); cancel_delayed_work_sync(&hdev->le_scan_disable); cancel_delayed_work_sync(&hdev->le_scan_restart); if (hdev->adv_instance_timeout) { cancel_delayed_work_sync(&hdev->adv_instance_expire); hdev->adv_instance_timeout = 0; } cancel_interleave_scan(hdev); }
int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req, unsigned long opt), unsigned long opt, u32 timeout, u8 *hci_status) { int ret; if (!test_bit(HCI_UP, &hdev->flags)) return -ENETDOWN; /* Serialize all requests */ hci_req_sync_lock(hdev); ret = __hci_req_sync(hdev, req, opt, timeout, hci_status); hci_req_sync_unlock(hdev); return ret; }
int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req, unsigned long opt), unsigned long opt, u32 timeout, u8 *hci_status) { int ret; /* Serialize all requests */ hci_req_sync_lock(hdev); /* check the state after obtaing the lock to protect the HCI_UP * against any races from hci_dev_do_close when the controller * gets removed. */ if (test_bit(HCI_UP, &hdev->flags)) ret = __hci_req_sync(hdev, req, opt, timeout, hci_status); else ret = -ENETDOWN; hci_req_sync_unlock(hdev); return ret; }
{'added': [(277, '\t/* check the state after obtaing the lock to protect the HCI_UP'), (278, '\t * against any races from hci_dev_do_close when the controller'), (279, '\t * gets removed.'), (280, '\t */'), (281, '\tif (test_bit(HCI_UP, &hdev->flags))'), (282, '\t\tret = __hci_req_sync(hdev, req, opt, timeout, hci_status);'), (283, '\telse'), (284, '\t\tret = -ENETDOWN;')], 'deleted': [(275, '\tif (!test_bit(HCI_UP, &hdev->flags))'), (276, '\t\treturn -ENETDOWN;'), (277, ''), (280, '\tret = __hci_req_sync(hdev, req, opt, timeout, hci_status);')]}
8
4
2,341
14,517
12
84
2
https://github.com/torvalds/linux
CVE-2021-32399
CWE-362
1,710
netscreen.c
C
parse_netscreen_packet
/* netscreen.c * * Juniper NetScreen snoop output parser * Created by re-using a lot of code from cosine.c * Copyright (c) 2007 by Sake Blok <sake@euronet.nl> * * Wiretap Library * Copyright (c) 1998 by Gilbert Ramirez <gram@alumni.rice.edu> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "config.h" #include "wtap-int.h" #include "netscreen.h" #include "file_wrappers.h" #include <stdlib.h> #include <string.h> /* XXX TODO: * * o Construct a list of interfaces, with interface names, give * them link-layer types based on the interface name and packet * data, and supply interface IDs with each packet (i.e., make * this supply a pcap-ng-style set of interfaces and associate * packets with interfaces). This is probably the right way * to "Pass the interface names and the traffic direction to either * the frame-structure, a pseudo-header or use PPI." See the * message at * * http://www.wireshark.org/lists/wireshark-dev/200708/msg00029.html * * to see whether any further discussion is still needed. I suspect * it doesn't; pcap-NG existed at the time, as per the final * message in that thread: * * http://www.wireshark.org/lists/wireshark-dev/200708/msg00039.html * * but I don't think we fully *supported* it at that point. Now * that we do, we have the infrastructure to support this, except * that we currently have no way to translate interface IDs to * interface names in the "frame" dissector or to supply interface * information as part of the packet metadata from Wiretap modules. * That should be fixed so that we can show interface information, * such as the interface name, in packet dissections from, for example, * pcap-NG captures. */ static gboolean info_line(const gchar *line); static gint64 netscreen_seek_next_packet(wtap *wth, int *err, gchar **err_info, char *hdr); static gboolean netscreen_check_file_type(wtap *wth, int *err, gchar **err_info); static gboolean netscreen_read(wtap *wth, int *err, gchar **err_info, gint64 *data_offset); static gboolean netscreen_seek_read(wtap *wth, gint64 seek_off, struct wtap_pkthdr *phdr, Buffer *buf, int *err, gchar **err_info); static gboolean parse_netscreen_packet(FILE_T fh, struct wtap_pkthdr *phdr, Buffer* buf, char *line, int *err, gchar **err_info); static int parse_single_hex_dump_line(char* rec, guint8 *buf, guint byte_offset); /* Returns TRUE if the line appears to be a line with protocol info. Otherwise it returns FALSE. */ static gboolean info_line(const gchar *line) { int i=NETSCREEN_SPACES_ON_INFO_LINE; while (i-- > 0) { if (g_ascii_isspace(*line)) { line++; continue; } else { return FALSE; } } return TRUE; } /* Seeks to the beginning of the next packet, and returns the byte offset. Copy the header line to hdr. Returns -1 on failure, and sets "*err" to the error and sets "*err_info" to null or an additional error string. */ static gint64 netscreen_seek_next_packet(wtap *wth, int *err, gchar **err_info, char *hdr) { gint64 cur_off; char buf[NETSCREEN_LINE_LENGTH]; while (1) { cur_off = file_tell(wth->fh); if (cur_off == -1) { /* Error */ *err = file_error(wth->fh, err_info); return -1; } if (file_gets(buf, sizeof(buf), wth->fh) == NULL) { /* EOF or error. */ *err = file_error(wth->fh, err_info); break; } if (strstr(buf, NETSCREEN_REC_MAGIC_STR1) || strstr(buf, NETSCREEN_REC_MAGIC_STR2)) { g_strlcpy(hdr, buf, NETSCREEN_LINE_LENGTH); return cur_off; } } return -1; } /* Look through the first part of a file to see if this is * NetScreen snoop output. * * Returns TRUE if it is, FALSE if it isn't or if we get an I/O error; * if we get an I/O error, "*err" will be set to a non-zero value and * "*err_info" is set to null or an additional error string. */ static gboolean netscreen_check_file_type(wtap *wth, int *err, gchar **err_info) { char buf[NETSCREEN_LINE_LENGTH]; guint reclen, line; buf[NETSCREEN_LINE_LENGTH-1] = '\0'; for (line = 0; line < NETSCREEN_HEADER_LINES_TO_CHECK; line++) { if (file_gets(buf, NETSCREEN_LINE_LENGTH, wth->fh) == NULL) { /* EOF or error. */ *err = file_error(wth->fh, err_info); return FALSE; } reclen = (guint) strlen(buf); if (reclen < strlen(NETSCREEN_HDR_MAGIC_STR1) || reclen < strlen(NETSCREEN_HDR_MAGIC_STR2)) { continue; } if (strstr(buf, NETSCREEN_HDR_MAGIC_STR1) || strstr(buf, NETSCREEN_HDR_MAGIC_STR2)) { return TRUE; } } *err = 0; return FALSE; } wtap_open_return_val netscreen_open(wtap *wth, int *err, gchar **err_info) { /* Look for a NetScreen snoop header line */ if (!netscreen_check_file_type(wth, err, err_info)) { if (*err != 0 && *err != WTAP_ERR_SHORT_READ) return WTAP_OPEN_ERROR; return WTAP_OPEN_NOT_MINE; } if (file_seek(wth->fh, 0L, SEEK_SET, err) == -1) /* rewind */ return WTAP_OPEN_ERROR; wth->file_encap = WTAP_ENCAP_UNKNOWN; wth->file_type_subtype = WTAP_FILE_TYPE_SUBTYPE_NETSCREEN; wth->snapshot_length = 0; /* not known */ wth->subtype_read = netscreen_read; wth->subtype_seek_read = netscreen_seek_read; wth->file_tsprec = WTAP_TSPREC_DSEC; return WTAP_OPEN_MINE; } /* Find the next packet and parse it; called from wtap_read(). */ static gboolean netscreen_read(wtap *wth, int *err, gchar **err_info, gint64 *data_offset) { gint64 offset; char line[NETSCREEN_LINE_LENGTH]; /* Find the next packet */ offset = netscreen_seek_next_packet(wth, err, err_info, line); if (offset < 0) return FALSE; /* Parse the header and convert the ASCII hex dump to binary data */ if (!parse_netscreen_packet(wth->fh, &wth->phdr, wth->frame_buffer, line, err, err_info)) return FALSE; /* * If the per-file encapsulation isn't known, set it to this * packet's encapsulation. * * If it *is* known, and it isn't this packet's encapsulation, * set it to WTAP_ENCAP_PER_PACKET, as this file doesn't * have a single encapsulation for all packets in the file. */ if (wth->file_encap == WTAP_ENCAP_UNKNOWN) wth->file_encap = wth->phdr.pkt_encap; else { if (wth->file_encap != wth->phdr.pkt_encap) wth->file_encap = WTAP_ENCAP_PER_PACKET; } *data_offset = offset; return TRUE; } /* Used to read packets in random-access fashion */ static gboolean netscreen_seek_read(wtap *wth, gint64 seek_off, struct wtap_pkthdr *phdr, Buffer *buf, int *err, gchar **err_info) { char line[NETSCREEN_LINE_LENGTH]; if (file_seek(wth->random_fh, seek_off, SEEK_SET, err) == -1) { return FALSE; } if (file_gets(line, NETSCREEN_LINE_LENGTH, wth->random_fh) == NULL) { *err = file_error(wth->random_fh, err_info); if (*err == 0) { *err = WTAP_ERR_SHORT_READ; } return FALSE; } return parse_netscreen_packet(wth->random_fh, phdr, buf, line, err, err_info); } /* Parses a packet record header. There are a few possible formats: * * XXX list extra formats here! 6843828.0: trust(o) len=98:00121ebbd132->00600868d659/0800 192.168.1.1 -> 192.168.1.10/6 vhl=45, tos=00, id=37739, frag=0000, ttl=64 tlen=84 tcp:ports 2222->2333, seq=3452113890, ack=1540618280, flag=5018/ACK 00 60 08 68 d6 59 00 12 1e bb d1 32 08 00 45 00 .`.h.Y.....2..E. 00 54 93 6b 00 00 40 06 63 dd c0 a8 01 01 c0 a8 .T.k..@.c....... 01 0a 08 ae 09 1d cd c3 13 e2 5b d3 f8 28 50 18 ..........[..(P. 1f d4 79 21 00 00 e7 76 89 64 16 e2 19 0a 80 09 ..y!...v.d...... 31 e7 04 28 04 58 f3 d9 b1 9f 3d 65 1a db d8 61 1..(.X....=e...a 2c 21 b6 d3 20 60 0c 8c 35 98 88 cf 20 91 0e a9 ,!...`..5....... 1d 0b .. */ static gboolean parse_netscreen_packet(FILE_T fh, struct wtap_pkthdr *phdr, Buffer* buf, char *line, int *err, gchar **err_info) { int sec; int dsec; char cap_int[NETSCREEN_MAX_INT_NAME_LENGTH]; char direction[2]; guint pkt_len; char cap_src[13]; char cap_dst[13]; guint8 *pd; gchar *p; int n, i = 0; guint offset = 0; gchar dststr[13]; phdr->rec_type = REC_TYPE_PACKET; phdr->presence_flags = WTAP_HAS_TS|WTAP_HAS_CAP_LEN; if (sscanf(line, "%9d.%9d: %15[a-z0-9/:.-](%1[io]) len=%9u:%12s->%12s/", &sec, &dsec, cap_int, direction, &pkt_len, cap_src, cap_dst) < 5) { *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup("netscreen: Can't parse packet-header"); return -1; } if (pkt_len > WTAP_MAX_PACKET_SIZE) { /* * Probably a corrupt capture file; don't blow up trying * to allocate space for an immensely-large packet. */ *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup_printf("netscreen: File has %u-byte packet, bigger than maximum of %u", pkt_len, WTAP_MAX_PACKET_SIZE); return FALSE; } /* * If direction[0] is 'o', the direction is NETSCREEN_EGRESS, * otherwise it's NETSCREEN_INGRESS. */ phdr->ts.secs = sec; phdr->ts.nsecs = dsec * 100000000; phdr->len = pkt_len; /* Make sure we have enough room for the packet */ ws_buffer_assure_space(buf, pkt_len); pd = ws_buffer_start_ptr(buf); while(1) { /* The last packet is not delimited by an empty line, but by EOF * So accept EOF as a valid delimiter too */ if (file_gets(line, NETSCREEN_LINE_LENGTH, fh) == NULL) { break; } /* * Skip blanks. * The number of blanks is not fixed - for wireless * interfaces, there may be 14 extra spaces before * the hex data. */ for (p = &line[0]; g_ascii_isspace(*p); p++) ; /* packets are delimited with empty lines */ if (*p == '\0') { break; } n = parse_single_hex_dump_line(p, pd, offset); /* the smallest packet has a length of 6 bytes, if * the first hex-data is less then check whether * it is a info-line and act accordingly */ if (offset == 0 && n < 6) { if (info_line(line)) { if (++i <= NETSCREEN_MAX_INFOLINES) { continue; } } else { *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup("netscreen: cannot parse hex-data"); return FALSE; } } /* If there is no more data and the line was not empty, * then there must be an error in the file */ if (n == -1) { *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup("netscreen: cannot parse hex-data"); return FALSE; } /* Adjust the offset to the data that was just added to the buffer */ offset += n; /* If there was more hex-data than was announced in the len=x * header, then then there must be an error in the file */ if (offset > pkt_len) { *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup("netscreen: too much hex-data"); return FALSE; } } /* * Determine the encapsulation type, based on the * first 4 characters of the interface name * * XXX convert this to a 'case' structure when adding more * (non-ethernet) interfacetypes */ if (strncmp(cap_int, "adsl", 4) == 0) { /* The ADSL interface can be bridged with or without * PPP encapsulation. Check whether the first six bytes * of the hex data are the same as the destination mac * address in the header. If they are, assume ethernet * LinkLayer or else PPP */ g_snprintf(dststr, 13, "%02x%02x%02x%02x%02x%02x", pd[0], pd[1], pd[2], pd[3], pd[4], pd[5]); if (strncmp(dststr, cap_dst, 12) == 0) phdr->pkt_encap = WTAP_ENCAP_ETHERNET; else phdr->pkt_encap = WTAP_ENCAP_PPP; } else if (strncmp(cap_int, "seri", 4) == 0) phdr->pkt_encap = WTAP_ENCAP_PPP; else phdr->pkt_encap = WTAP_ENCAP_ETHERNET; phdr->caplen = offset; return TRUE; } /* Take a string representing one line from a hex dump, with leading white * space removed, and converts the text to binary data. We place the bytes * in the buffer at the specified offset. * * Returns number of bytes successfully read, -1 if bad. */ static int parse_single_hex_dump_line(char* rec, guint8 *buf, guint byte_offset) { int num_items_scanned; guint8 character; guint8 byte; for (num_items_scanned = 0; num_items_scanned < 16; num_items_scanned++) { character = *rec++; if (character >= '0' && character <= '9') byte = character - '0' + 0; else if (character >= 'A' && character <= 'F') byte = character - 'A' + 0xA; else if (character >= 'a' && character <= 'f') byte = character - 'a' + 0xa; else if (character == ' ' || character == '\r' || character == '\n' || character == '\0') { /* Nothing more to parse */ break; } else return -1; /* not a hex digit, space before ASCII dump, or EOL */ byte <<= 4; character = *rec++ & 0xFF; if (character >= '0' && character <= '9') byte += character - '0' + 0; else if (character >= 'A' && character <= 'F') byte += character - 'A' + 0xA; else if (character >= 'a' && character <= 'f') byte += character - 'a' + 0xa; else return -1; /* not a hex digit */ buf[byte_offset + num_items_scanned] = byte; character = *rec++ & 0xFF; if (character == '\0' || character == '\r' || character == '\n') { /* Nothing more to parse */ break; } else if (character != ' ') { /* not space before ASCII dump */ return -1; } } if (num_items_scanned == 0) return -1; return num_items_scanned; } /* * Editor modelines - http://www.wireshark.org/tools/modelines.html * * Local variables: * c-basic-offset: 8 * tab-width: 8 * indent-tabs-mode: t * End: * * vi: set shiftwidth=8 tabstop=8 noexpandtab: * :indentSize=8:tabSize=8:noTabs=false: */
/* netscreen.c * * Juniper NetScreen snoop output parser * Created by re-using a lot of code from cosine.c * Copyright (c) 2007 by Sake Blok <sake@euronet.nl> * * Wiretap Library * Copyright (c) 1998 by Gilbert Ramirez <gram@alumni.rice.edu> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "config.h" #include "wtap-int.h" #include "netscreen.h" #include "file_wrappers.h" #include <stdlib.h> #include <string.h> /* XXX TODO: * * o Construct a list of interfaces, with interface names, give * them link-layer types based on the interface name and packet * data, and supply interface IDs with each packet (i.e., make * this supply a pcap-ng-style set of interfaces and associate * packets with interfaces). This is probably the right way * to "Pass the interface names and the traffic direction to either * the frame-structure, a pseudo-header or use PPI." See the * message at * * http://www.wireshark.org/lists/wireshark-dev/200708/msg00029.html * * to see whether any further discussion is still needed. I suspect * it doesn't; pcap-NG existed at the time, as per the final * message in that thread: * * http://www.wireshark.org/lists/wireshark-dev/200708/msg00039.html * * but I don't think we fully *supported* it at that point. Now * that we do, we have the infrastructure to support this, except * that we currently have no way to translate interface IDs to * interface names in the "frame" dissector or to supply interface * information as part of the packet metadata from Wiretap modules. * That should be fixed so that we can show interface information, * such as the interface name, in packet dissections from, for example, * pcap-NG captures. */ static gboolean info_line(const gchar *line); static gint64 netscreen_seek_next_packet(wtap *wth, int *err, gchar **err_info, char *hdr); static gboolean netscreen_check_file_type(wtap *wth, int *err, gchar **err_info); static gboolean netscreen_read(wtap *wth, int *err, gchar **err_info, gint64 *data_offset); static gboolean netscreen_seek_read(wtap *wth, gint64 seek_off, struct wtap_pkthdr *phdr, Buffer *buf, int *err, gchar **err_info); static gboolean parse_netscreen_packet(FILE_T fh, struct wtap_pkthdr *phdr, Buffer* buf, char *line, int *err, gchar **err_info); static int parse_single_hex_dump_line(char* rec, guint8 *buf, guint byte_offset); /* Returns TRUE if the line appears to be a line with protocol info. Otherwise it returns FALSE. */ static gboolean info_line(const gchar *line) { int i=NETSCREEN_SPACES_ON_INFO_LINE; while (i-- > 0) { if (g_ascii_isspace(*line)) { line++; continue; } else { return FALSE; } } return TRUE; } /* Seeks to the beginning of the next packet, and returns the byte offset. Copy the header line to hdr. Returns -1 on failure, and sets "*err" to the error and sets "*err_info" to null or an additional error string. */ static gint64 netscreen_seek_next_packet(wtap *wth, int *err, gchar **err_info, char *hdr) { gint64 cur_off; char buf[NETSCREEN_LINE_LENGTH]; while (1) { cur_off = file_tell(wth->fh); if (cur_off == -1) { /* Error */ *err = file_error(wth->fh, err_info); return -1; } if (file_gets(buf, sizeof(buf), wth->fh) == NULL) { /* EOF or error. */ *err = file_error(wth->fh, err_info); break; } if (strstr(buf, NETSCREEN_REC_MAGIC_STR1) || strstr(buf, NETSCREEN_REC_MAGIC_STR2)) { g_strlcpy(hdr, buf, NETSCREEN_LINE_LENGTH); return cur_off; } } return -1; } /* Look through the first part of a file to see if this is * NetScreen snoop output. * * Returns TRUE if it is, FALSE if it isn't or if we get an I/O error; * if we get an I/O error, "*err" will be set to a non-zero value and * "*err_info" is set to null or an additional error string. */ static gboolean netscreen_check_file_type(wtap *wth, int *err, gchar **err_info) { char buf[NETSCREEN_LINE_LENGTH]; guint reclen, line; buf[NETSCREEN_LINE_LENGTH-1] = '\0'; for (line = 0; line < NETSCREEN_HEADER_LINES_TO_CHECK; line++) { if (file_gets(buf, NETSCREEN_LINE_LENGTH, wth->fh) == NULL) { /* EOF or error. */ *err = file_error(wth->fh, err_info); return FALSE; } reclen = (guint) strlen(buf); if (reclen < strlen(NETSCREEN_HDR_MAGIC_STR1) || reclen < strlen(NETSCREEN_HDR_MAGIC_STR2)) { continue; } if (strstr(buf, NETSCREEN_HDR_MAGIC_STR1) || strstr(buf, NETSCREEN_HDR_MAGIC_STR2)) { return TRUE; } } *err = 0; return FALSE; } wtap_open_return_val netscreen_open(wtap *wth, int *err, gchar **err_info) { /* Look for a NetScreen snoop header line */ if (!netscreen_check_file_type(wth, err, err_info)) { if (*err != 0 && *err != WTAP_ERR_SHORT_READ) return WTAP_OPEN_ERROR; return WTAP_OPEN_NOT_MINE; } if (file_seek(wth->fh, 0L, SEEK_SET, err) == -1) /* rewind */ return WTAP_OPEN_ERROR; wth->file_encap = WTAP_ENCAP_UNKNOWN; wth->file_type_subtype = WTAP_FILE_TYPE_SUBTYPE_NETSCREEN; wth->snapshot_length = 0; /* not known */ wth->subtype_read = netscreen_read; wth->subtype_seek_read = netscreen_seek_read; wth->file_tsprec = WTAP_TSPREC_DSEC; return WTAP_OPEN_MINE; } /* Find the next packet and parse it; called from wtap_read(). */ static gboolean netscreen_read(wtap *wth, int *err, gchar **err_info, gint64 *data_offset) { gint64 offset; char line[NETSCREEN_LINE_LENGTH]; /* Find the next packet */ offset = netscreen_seek_next_packet(wth, err, err_info, line); if (offset < 0) return FALSE; /* Parse the header and convert the ASCII hex dump to binary data */ if (!parse_netscreen_packet(wth->fh, &wth->phdr, wth->frame_buffer, line, err, err_info)) return FALSE; /* * If the per-file encapsulation isn't known, set it to this * packet's encapsulation. * * If it *is* known, and it isn't this packet's encapsulation, * set it to WTAP_ENCAP_PER_PACKET, as this file doesn't * have a single encapsulation for all packets in the file. */ if (wth->file_encap == WTAP_ENCAP_UNKNOWN) wth->file_encap = wth->phdr.pkt_encap; else { if (wth->file_encap != wth->phdr.pkt_encap) wth->file_encap = WTAP_ENCAP_PER_PACKET; } *data_offset = offset; return TRUE; } /* Used to read packets in random-access fashion */ static gboolean netscreen_seek_read(wtap *wth, gint64 seek_off, struct wtap_pkthdr *phdr, Buffer *buf, int *err, gchar **err_info) { char line[NETSCREEN_LINE_LENGTH]; if (file_seek(wth->random_fh, seek_off, SEEK_SET, err) == -1) { return FALSE; } if (file_gets(line, NETSCREEN_LINE_LENGTH, wth->random_fh) == NULL) { *err = file_error(wth->random_fh, err_info); if (*err == 0) { *err = WTAP_ERR_SHORT_READ; } return FALSE; } return parse_netscreen_packet(wth->random_fh, phdr, buf, line, err, err_info); } /* Parses a packet record header. There are a few possible formats: * * XXX list extra formats here! 6843828.0: trust(o) len=98:00121ebbd132->00600868d659/0800 192.168.1.1 -> 192.168.1.10/6 vhl=45, tos=00, id=37739, frag=0000, ttl=64 tlen=84 tcp:ports 2222->2333, seq=3452113890, ack=1540618280, flag=5018/ACK 00 60 08 68 d6 59 00 12 1e bb d1 32 08 00 45 00 .`.h.Y.....2..E. 00 54 93 6b 00 00 40 06 63 dd c0 a8 01 01 c0 a8 .T.k..@.c....... 01 0a 08 ae 09 1d cd c3 13 e2 5b d3 f8 28 50 18 ..........[..(P. 1f d4 79 21 00 00 e7 76 89 64 16 e2 19 0a 80 09 ..y!...v.d...... 31 e7 04 28 04 58 f3 d9 b1 9f 3d 65 1a db d8 61 1..(.X....=e...a 2c 21 b6 d3 20 60 0c 8c 35 98 88 cf 20 91 0e a9 ,!...`..5....... 1d 0b .. */ static gboolean parse_netscreen_packet(FILE_T fh, struct wtap_pkthdr *phdr, Buffer* buf, char *line, int *err, gchar **err_info) { int pkt_len; int sec; int dsec; char cap_int[NETSCREEN_MAX_INT_NAME_LENGTH]; char direction[2]; char cap_src[13]; char cap_dst[13]; guint8 *pd; gchar *p; int n, i = 0; int offset = 0; gchar dststr[13]; phdr->rec_type = REC_TYPE_PACKET; phdr->presence_flags = WTAP_HAS_TS|WTAP_HAS_CAP_LEN; if (sscanf(line, "%9d.%9d: %15[a-z0-9/:.-](%1[io]) len=%9d:%12s->%12s/", &sec, &dsec, cap_int, direction, &pkt_len, cap_src, cap_dst) < 5) { *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup("netscreen: Can't parse packet-header"); return -1; } if (pkt_len < 0) { *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup("netscreen: packet header has a negative packet length"); return FALSE; } if (pkt_len > WTAP_MAX_PACKET_SIZE) { /* * Probably a corrupt capture file; don't blow up trying * to allocate space for an immensely-large packet. */ *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup_printf("netscreen: File has %u-byte packet, bigger than maximum of %u", pkt_len, WTAP_MAX_PACKET_SIZE); return FALSE; } /* * If direction[0] is 'o', the direction is NETSCREEN_EGRESS, * otherwise it's NETSCREEN_INGRESS. */ phdr->ts.secs = sec; phdr->ts.nsecs = dsec * 100000000; phdr->len = pkt_len; /* Make sure we have enough room for the packet */ ws_buffer_assure_space(buf, pkt_len); pd = ws_buffer_start_ptr(buf); while(1) { /* The last packet is not delimited by an empty line, but by EOF * So accept EOF as a valid delimiter too */ if (file_gets(line, NETSCREEN_LINE_LENGTH, fh) == NULL) { break; } /* * Skip blanks. * The number of blanks is not fixed - for wireless * interfaces, there may be 14 extra spaces before * the hex data. */ for (p = &line[0]; g_ascii_isspace(*p); p++) ; /* packets are delimited with empty lines */ if (*p == '\0') { break; } n = parse_single_hex_dump_line(p, pd, offset); /* the smallest packet has a length of 6 bytes, if * the first hex-data is less then check whether * it is a info-line and act accordingly */ if (offset == 0 && n < 6) { if (info_line(line)) { if (++i <= NETSCREEN_MAX_INFOLINES) { continue; } } else { *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup("netscreen: cannot parse hex-data"); return FALSE; } } /* If there is no more data and the line was not empty, * then there must be an error in the file */ if (n == -1) { *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup("netscreen: cannot parse hex-data"); return FALSE; } /* Adjust the offset to the data that was just added to the buffer */ offset += n; /* If there was more hex-data than was announced in the len=x * header, then then there must be an error in the file */ if (offset > pkt_len) { *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup("netscreen: too much hex-data"); return FALSE; } } /* * Determine the encapsulation type, based on the * first 4 characters of the interface name * * XXX convert this to a 'case' structure when adding more * (non-ethernet) interfacetypes */ if (strncmp(cap_int, "adsl", 4) == 0) { /* The ADSL interface can be bridged with or without * PPP encapsulation. Check whether the first six bytes * of the hex data are the same as the destination mac * address in the header. If they are, assume ethernet * LinkLayer or else PPP */ g_snprintf(dststr, 13, "%02x%02x%02x%02x%02x%02x", pd[0], pd[1], pd[2], pd[3], pd[4], pd[5]); if (strncmp(dststr, cap_dst, 12) == 0) phdr->pkt_encap = WTAP_ENCAP_ETHERNET; else phdr->pkt_encap = WTAP_ENCAP_PPP; } else if (strncmp(cap_int, "seri", 4) == 0) phdr->pkt_encap = WTAP_ENCAP_PPP; else phdr->pkt_encap = WTAP_ENCAP_ETHERNET; phdr->caplen = offset; return TRUE; } /* Take a string representing one line from a hex dump, with leading white * space removed, and converts the text to binary data. We place the bytes * in the buffer at the specified offset. * * Returns number of bytes successfully read, -1 if bad. */ static int parse_single_hex_dump_line(char* rec, guint8 *buf, guint byte_offset) { int num_items_scanned; guint8 character; guint8 byte; for (num_items_scanned = 0; num_items_scanned < 16; num_items_scanned++) { character = *rec++; if (character >= '0' && character <= '9') byte = character - '0' + 0; else if (character >= 'A' && character <= 'F') byte = character - 'A' + 0xA; else if (character >= 'a' && character <= 'f') byte = character - 'a' + 0xa; else if (character == ' ' || character == '\r' || character == '\n' || character == '\0') { /* Nothing more to parse */ break; } else return -1; /* not a hex digit, space before ASCII dump, or EOL */ byte <<= 4; character = *rec++ & 0xFF; if (character >= '0' && character <= '9') byte += character - '0' + 0; else if (character >= 'A' && character <= 'F') byte += character - 'A' + 0xA; else if (character >= 'a' && character <= 'f') byte += character - 'a' + 0xa; else return -1; /* not a hex digit */ buf[byte_offset + num_items_scanned] = byte; character = *rec++ & 0xFF; if (character == '\0' || character == '\r' || character == '\n') { /* Nothing more to parse */ break; } else if (character != ' ') { /* not space before ASCII dump */ return -1; } } if (num_items_scanned == 0) return -1; return num_items_scanned; } /* * Editor modelines - http://www.wireshark.org/tools/modelines.html * * Local variables: * c-basic-offset: 8 * tab-width: 8 * indent-tabs-mode: t * End: * * vi: set shiftwidth=8 tabstop=8 noexpandtab: * :indentSize=8:tabSize=8:noTabs=false: */
parse_netscreen_packet(FILE_T fh, struct wtap_pkthdr *phdr, Buffer* buf, char *line, int *err, gchar **err_info) { int sec; int dsec; char cap_int[NETSCREEN_MAX_INT_NAME_LENGTH]; char direction[2]; guint pkt_len; char cap_src[13]; char cap_dst[13]; guint8 *pd; gchar *p; int n, i = 0; guint offset = 0; gchar dststr[13]; phdr->rec_type = REC_TYPE_PACKET; phdr->presence_flags = WTAP_HAS_TS|WTAP_HAS_CAP_LEN; if (sscanf(line, "%9d.%9d: %15[a-z0-9/:.-](%1[io]) len=%9u:%12s->%12s/", &sec, &dsec, cap_int, direction, &pkt_len, cap_src, cap_dst) < 5) { *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup("netscreen: Can't parse packet-header"); return -1; } if (pkt_len > WTAP_MAX_PACKET_SIZE) { /* * Probably a corrupt capture file; don't blow up trying * to allocate space for an immensely-large packet. */ *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup_printf("netscreen: File has %u-byte packet, bigger than maximum of %u", pkt_len, WTAP_MAX_PACKET_SIZE); return FALSE; } /* * If direction[0] is 'o', the direction is NETSCREEN_EGRESS, * otherwise it's NETSCREEN_INGRESS. */ phdr->ts.secs = sec; phdr->ts.nsecs = dsec * 100000000; phdr->len = pkt_len; /* Make sure we have enough room for the packet */ ws_buffer_assure_space(buf, pkt_len); pd = ws_buffer_start_ptr(buf); while(1) { /* The last packet is not delimited by an empty line, but by EOF * So accept EOF as a valid delimiter too */ if (file_gets(line, NETSCREEN_LINE_LENGTH, fh) == NULL) { break; } /* * Skip blanks. * The number of blanks is not fixed - for wireless * interfaces, there may be 14 extra spaces before * the hex data. */ for (p = &line[0]; g_ascii_isspace(*p); p++) ; /* packets are delimited with empty lines */ if (*p == '\0') { break; } n = parse_single_hex_dump_line(p, pd, offset); /* the smallest packet has a length of 6 bytes, if * the first hex-data is less then check whether * it is a info-line and act accordingly */ if (offset == 0 && n < 6) { if (info_line(line)) { if (++i <= NETSCREEN_MAX_INFOLINES) { continue; } } else { *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup("netscreen: cannot parse hex-data"); return FALSE; } } /* If there is no more data and the line was not empty, * then there must be an error in the file */ if (n == -1) { *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup("netscreen: cannot parse hex-data"); return FALSE; } /* Adjust the offset to the data that was just added to the buffer */ offset += n; /* If there was more hex-data than was announced in the len=x * header, then then there must be an error in the file */ if (offset > pkt_len) { *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup("netscreen: too much hex-data"); return FALSE; } } /* * Determine the encapsulation type, based on the * first 4 characters of the interface name * * XXX convert this to a 'case' structure when adding more * (non-ethernet) interfacetypes */ if (strncmp(cap_int, "adsl", 4) == 0) { /* The ADSL interface can be bridged with or without * PPP encapsulation. Check whether the first six bytes * of the hex data are the same as the destination mac * address in the header. If they are, assume ethernet * LinkLayer or else PPP */ g_snprintf(dststr, 13, "%02x%02x%02x%02x%02x%02x", pd[0], pd[1], pd[2], pd[3], pd[4], pd[5]); if (strncmp(dststr, cap_dst, 12) == 0) phdr->pkt_encap = WTAP_ENCAP_ETHERNET; else phdr->pkt_encap = WTAP_ENCAP_PPP; } else if (strncmp(cap_int, "seri", 4) == 0) phdr->pkt_encap = WTAP_ENCAP_PPP; else phdr->pkt_encap = WTAP_ENCAP_ETHERNET; phdr->caplen = offset; return TRUE; }
parse_netscreen_packet(FILE_T fh, struct wtap_pkthdr *phdr, Buffer* buf, char *line, int *err, gchar **err_info) { int pkt_len; int sec; int dsec; char cap_int[NETSCREEN_MAX_INT_NAME_LENGTH]; char direction[2]; char cap_src[13]; char cap_dst[13]; guint8 *pd; gchar *p; int n, i = 0; int offset = 0; gchar dststr[13]; phdr->rec_type = REC_TYPE_PACKET; phdr->presence_flags = WTAP_HAS_TS|WTAP_HAS_CAP_LEN; if (sscanf(line, "%9d.%9d: %15[a-z0-9/:.-](%1[io]) len=%9d:%12s->%12s/", &sec, &dsec, cap_int, direction, &pkt_len, cap_src, cap_dst) < 5) { *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup("netscreen: Can't parse packet-header"); return -1; } if (pkt_len < 0) { *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup("netscreen: packet header has a negative packet length"); return FALSE; } if (pkt_len > WTAP_MAX_PACKET_SIZE) { /* * Probably a corrupt capture file; don't blow up trying * to allocate space for an immensely-large packet. */ *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup_printf("netscreen: File has %u-byte packet, bigger than maximum of %u", pkt_len, WTAP_MAX_PACKET_SIZE); return FALSE; } /* * If direction[0] is 'o', the direction is NETSCREEN_EGRESS, * otherwise it's NETSCREEN_INGRESS. */ phdr->ts.secs = sec; phdr->ts.nsecs = dsec * 100000000; phdr->len = pkt_len; /* Make sure we have enough room for the packet */ ws_buffer_assure_space(buf, pkt_len); pd = ws_buffer_start_ptr(buf); while(1) { /* The last packet is not delimited by an empty line, but by EOF * So accept EOF as a valid delimiter too */ if (file_gets(line, NETSCREEN_LINE_LENGTH, fh) == NULL) { break; } /* * Skip blanks. * The number of blanks is not fixed - for wireless * interfaces, there may be 14 extra spaces before * the hex data. */ for (p = &line[0]; g_ascii_isspace(*p); p++) ; /* packets are delimited with empty lines */ if (*p == '\0') { break; } n = parse_single_hex_dump_line(p, pd, offset); /* the smallest packet has a length of 6 bytes, if * the first hex-data is less then check whether * it is a info-line and act accordingly */ if (offset == 0 && n < 6) { if (info_line(line)) { if (++i <= NETSCREEN_MAX_INFOLINES) { continue; } } else { *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup("netscreen: cannot parse hex-data"); return FALSE; } } /* If there is no more data and the line was not empty, * then there must be an error in the file */ if (n == -1) { *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup("netscreen: cannot parse hex-data"); return FALSE; } /* Adjust the offset to the data that was just added to the buffer */ offset += n; /* If there was more hex-data than was announced in the len=x * header, then then there must be an error in the file */ if (offset > pkt_len) { *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup("netscreen: too much hex-data"); return FALSE; } } /* * Determine the encapsulation type, based on the * first 4 characters of the interface name * * XXX convert this to a 'case' structure when adding more * (non-ethernet) interfacetypes */ if (strncmp(cap_int, "adsl", 4) == 0) { /* The ADSL interface can be bridged with or without * PPP encapsulation. Check whether the first six bytes * of the hex data are the same as the destination mac * address in the header. If they are, assume ethernet * LinkLayer or else PPP */ g_snprintf(dststr, 13, "%02x%02x%02x%02x%02x%02x", pd[0], pd[1], pd[2], pd[3], pd[4], pd[5]); if (strncmp(dststr, cap_dst, 12) == 0) phdr->pkt_encap = WTAP_ENCAP_ETHERNET; else phdr->pkt_encap = WTAP_ENCAP_PPP; } else if (strncmp(cap_int, "seri", 4) == 0) phdr->pkt_encap = WTAP_ENCAP_PPP; else phdr->pkt_encap = WTAP_ENCAP_ETHERNET; phdr->caplen = offset; return TRUE; }
{'added': [(266, '\tint\t\tpkt_len;'), (276, '\tint\t\toffset = 0;'), (282, '\tif (sscanf(line, "%9d.%9d: %15[a-z0-9/:.-](%1[io]) len=%9d:%12s->%12s/",'), (288, '\tif (pkt_len < 0) {'), (289, '\t\t*err = WTAP_ERR_BAD_FILE;'), (290, '\t\t*err_info = g_strdup("netscreen: packet header has a negative packet length");'), (291, '\t\treturn FALSE;'), (292, '\t}')], 'deleted': [(270, '\tguint\t\tpkt_len;'), (276, '\tguint\t\toffset = 0;'), (282, '\tif (sscanf(line, "%9d.%9d: %15[a-z0-9/:.-](%1[io]) len=%9u:%12s->%12s/",')]}
8
3
263
1,637
82
499
16
https://github.com/wireshark/wireshark
CVE-2016-5357
CWE-20
1,001
sco.c
C
sco_sock_sendmsg
/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth SCO sockets. */ #include <linux/module.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/sched/signal.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/sco.h> static bool disable_esco; static const struct proto_ops sco_sock_ops; static struct bt_sock_list sco_sk_list = { .lock = __RW_LOCK_UNLOCKED(sco_sk_list.lock) }; /* ---- SCO connections ---- */ struct sco_conn { struct hci_conn *hcon; spinlock_t lock; struct sock *sk; struct delayed_work timeout_work; unsigned int mtu; }; #define sco_conn_lock(c) spin_lock(&c->lock) #define sco_conn_unlock(c) spin_unlock(&c->lock) static void sco_sock_close(struct sock *sk); static void sco_sock_kill(struct sock *sk); /* ----- SCO socket info ----- */ #define sco_pi(sk) ((struct sco_pinfo *) sk) struct sco_pinfo { struct bt_sock bt; bdaddr_t src; bdaddr_t dst; __u32 flags; __u16 setting; __u8 cmsg_mask; struct sco_conn *conn; }; /* ---- SCO timers ---- */ #define SCO_CONN_TIMEOUT (HZ * 40) #define SCO_DISCONN_TIMEOUT (HZ * 2) static void sco_sock_timeout(struct work_struct *work) { struct sco_conn *conn = container_of(work, struct sco_conn, timeout_work.work); struct sock *sk; sco_conn_lock(conn); sk = conn->sk; if (sk) sock_hold(sk); sco_conn_unlock(conn); if (!sk) return; BT_DBG("sock %p state %d", sk, sk->sk_state); lock_sock(sk); sk->sk_err = ETIMEDOUT; sk->sk_state_change(sk); release_sock(sk); sock_put(sk); } static void sco_sock_set_timer(struct sock *sk, long timeout) { if (!sco_pi(sk)->conn) return; BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout); cancel_delayed_work(&sco_pi(sk)->conn->timeout_work); schedule_delayed_work(&sco_pi(sk)->conn->timeout_work, timeout); } static void sco_sock_clear_timer(struct sock *sk) { if (!sco_pi(sk)->conn) return; BT_DBG("sock %p state %d", sk, sk->sk_state); cancel_delayed_work(&sco_pi(sk)->conn->timeout_work); } /* ---- SCO connections ---- */ static struct sco_conn *sco_conn_add(struct hci_conn *hcon) { struct hci_dev *hdev = hcon->hdev; struct sco_conn *conn = hcon->sco_data; if (conn) return conn; conn = kzalloc(sizeof(struct sco_conn), GFP_KERNEL); if (!conn) return NULL; spin_lock_init(&conn->lock); hcon->sco_data = conn; conn->hcon = hcon; if (hdev->sco_mtu > 0) conn->mtu = hdev->sco_mtu; else conn->mtu = 60; BT_DBG("hcon %p conn %p", hcon, conn); return conn; } /* Delete channel. * Must be called on the locked socket. */ static void sco_chan_del(struct sock *sk, int err) { struct sco_conn *conn; conn = sco_pi(sk)->conn; BT_DBG("sk %p, conn %p, err %d", sk, conn, err); if (conn) { sco_conn_lock(conn); conn->sk = NULL; sco_pi(sk)->conn = NULL; sco_conn_unlock(conn); if (conn->hcon) hci_conn_drop(conn->hcon); } sk->sk_state = BT_CLOSED; sk->sk_err = err; sk->sk_state_change(sk); sock_set_flag(sk, SOCK_ZAPPED); } static void sco_conn_del(struct hci_conn *hcon, int err) { struct sco_conn *conn = hcon->sco_data; struct sock *sk; if (!conn) return; BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); /* Kill socket */ sco_conn_lock(conn); sk = conn->sk; sco_conn_unlock(conn); if (sk) { sock_hold(sk); lock_sock(sk); sco_sock_clear_timer(sk); sco_chan_del(sk, err); release_sock(sk); sock_put(sk); /* Ensure no more work items will run before freeing conn. */ cancel_delayed_work_sync(&conn->timeout_work); } hcon->sco_data = NULL; kfree(conn); } static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent) { BT_DBG("conn %p", conn); sco_pi(sk)->conn = conn; conn->sk = sk; INIT_DELAYED_WORK(&conn->timeout_work, sco_sock_timeout); if (parent) bt_accept_enqueue(parent, sk, true); } static int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent) { int err = 0; sco_conn_lock(conn); if (conn->sk) err = -EBUSY; else __sco_chan_add(conn, sk, parent); sco_conn_unlock(conn); return err; } static int sco_connect(struct hci_dev *hdev, struct sock *sk) { struct sco_conn *conn; struct hci_conn *hcon; int err, type; BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst); if (lmp_esco_capable(hdev) && !disable_esco) type = ESCO_LINK; else type = SCO_LINK; if (sco_pi(sk)->setting == BT_VOICE_TRANSPARENT && (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev))) return -EOPNOTSUPP; hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst, sco_pi(sk)->setting); if (IS_ERR(hcon)) return PTR_ERR(hcon); conn = sco_conn_add(hcon); if (!conn) { hci_conn_drop(hcon); return -ENOMEM; } /* Update source addr of the socket */ bacpy(&sco_pi(sk)->src, &hcon->src); err = sco_chan_add(conn, sk, NULL); if (err) return err; if (hcon->state == BT_CONNECTED) { sco_sock_clear_timer(sk); sk->sk_state = BT_CONNECTED; } else { sk->sk_state = BT_CONNECT; sco_sock_set_timer(sk, sk->sk_sndtimeo); } return err; } static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len) { struct sco_conn *conn = sco_pi(sk)->conn; struct sk_buff *skb; int err; /* Check outgoing MTU */ if (len > conn->mtu) return -EINVAL; BT_DBG("sk %p len %d", sk, len); skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) return err; if (memcpy_from_msg(skb_put(skb, len), msg, len)) { kfree_skb(skb); return -EFAULT; } hci_send_sco(conn->hcon, skb); return len; } static void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb) { struct sock *sk; sco_conn_lock(conn); sk = conn->sk; sco_conn_unlock(conn); if (!sk) goto drop; BT_DBG("sk %p len %u", sk, skb->len); if (sk->sk_state != BT_CONNECTED) goto drop; if (!sock_queue_rcv_skb(sk, skb)) return; drop: kfree_skb(skb); } /* -------- Socket interface ---------- */ static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba) { struct sock *sk; sk_for_each(sk, &sco_sk_list.head) { if (sk->sk_state != BT_LISTEN) continue; if (!bacmp(&sco_pi(sk)->src, ba)) return sk; } return NULL; } /* Find socket listening on source bdaddr. * Returns closest match. */ static struct sock *sco_get_sock_listen(bdaddr_t *src) { struct sock *sk = NULL, *sk1 = NULL; read_lock(&sco_sk_list.lock); sk_for_each(sk, &sco_sk_list.head) { if (sk->sk_state != BT_LISTEN) continue; /* Exact match. */ if (!bacmp(&sco_pi(sk)->src, src)) break; /* Closest match */ if (!bacmp(&sco_pi(sk)->src, BDADDR_ANY)) sk1 = sk; } read_unlock(&sco_sk_list.lock); return sk ? sk : sk1; } static void sco_sock_destruct(struct sock *sk) { BT_DBG("sk %p", sk); skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_write_queue); } static void sco_sock_cleanup_listen(struct sock *parent) { struct sock *sk; BT_DBG("parent %p", parent); /* Close not yet accepted channels */ while ((sk = bt_accept_dequeue(parent, NULL))) { sco_sock_close(sk); sco_sock_kill(sk); } parent->sk_state = BT_CLOSED; sock_set_flag(parent, SOCK_ZAPPED); } /* Kill socket (only if zapped and orphan) * Must be called on unlocked socket. */ static void sco_sock_kill(struct sock *sk) { if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) return; BT_DBG("sk %p state %d", sk, sk->sk_state); /* Kill poor orphan */ bt_sock_unlink(&sco_sk_list, sk); sock_set_flag(sk, SOCK_DEAD); sock_put(sk); } static void __sco_sock_close(struct sock *sk) { BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); switch (sk->sk_state) { case BT_LISTEN: sco_sock_cleanup_listen(sk); break; case BT_CONNECTED: case BT_CONFIG: if (sco_pi(sk)->conn->hcon) { sk->sk_state = BT_DISCONN; sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT); sco_conn_lock(sco_pi(sk)->conn); hci_conn_drop(sco_pi(sk)->conn->hcon); sco_pi(sk)->conn->hcon = NULL; sco_conn_unlock(sco_pi(sk)->conn); } else sco_chan_del(sk, ECONNRESET); break; case BT_CONNECT2: case BT_CONNECT: case BT_DISCONN: sco_chan_del(sk, ECONNRESET); break; default: sock_set_flag(sk, SOCK_ZAPPED); break; } } /* Must be called on unlocked socket. */ static void sco_sock_close(struct sock *sk) { lock_sock(sk); sco_sock_clear_timer(sk); __sco_sock_close(sk); release_sock(sk); } static void sco_skb_put_cmsg(struct sk_buff *skb, struct msghdr *msg, struct sock *sk) { if (sco_pi(sk)->cmsg_mask & SCO_CMSG_PKT_STATUS) put_cmsg(msg, SOL_BLUETOOTH, BT_SCM_PKT_STATUS, sizeof(bt_cb(skb)->sco.pkt_status), &bt_cb(skb)->sco.pkt_status); } static void sco_sock_init(struct sock *sk, struct sock *parent) { BT_DBG("sk %p", sk); if (parent) { sk->sk_type = parent->sk_type; bt_sk(sk)->flags = bt_sk(parent)->flags; security_sk_clone(parent, sk); } else { bt_sk(sk)->skb_put_cmsg = sco_skb_put_cmsg; } } static struct proto sco_proto = { .name = "SCO", .owner = THIS_MODULE, .obj_size = sizeof(struct sco_pinfo) }; static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio, int kern) { struct sock *sk; sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto, kern); if (!sk) return NULL; sock_init_data(sock, sk); INIT_LIST_HEAD(&bt_sk(sk)->accept_q); sk->sk_destruct = sco_sock_destruct; sk->sk_sndtimeo = SCO_CONN_TIMEOUT; sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = proto; sk->sk_state = BT_OPEN; sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT; bt_sock_link(&sco_sk_list, sk); return sk; } static int sco_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; BT_DBG("sock %p", sock); sock->state = SS_UNCONNECTED; if (sock->type != SOCK_SEQPACKET) return -ESOCKTNOSUPPORT; sock->ops = &sco_sock_ops; sk = sco_sock_alloc(net, sock, protocol, GFP_ATOMIC, kern); if (!sk) return -ENOMEM; sco_sock_init(sk, NULL); return 0; } static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; struct sock *sk = sock->sk; int err = 0; if (!addr || addr_len < sizeof(struct sockaddr_sco) || addr->sa_family != AF_BLUETOOTH) return -EINVAL; BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr); lock_sock(sk); if (sk->sk_state != BT_OPEN) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_SEQPACKET) { err = -EINVAL; goto done; } bacpy(&sco_pi(sk)->src, &sa->sco_bdaddr); sk->sk_state = BT_BOUND; done: release_sock(sk); return err; } static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; struct sock *sk = sock->sk; struct hci_dev *hdev; int err; BT_DBG("sk %p", sk); if (alen < sizeof(struct sockaddr_sco) || addr->sa_family != AF_BLUETOOTH) return -EINVAL; if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) return -EBADFD; if (sk->sk_type != SOCK_SEQPACKET) return -EINVAL; hdev = hci_get_route(&sa->sco_bdaddr, &sco_pi(sk)->src, BDADDR_BREDR); if (!hdev) return -EHOSTUNREACH; hci_dev_lock(hdev); lock_sock(sk); /* Set destination address and psm */ bacpy(&sco_pi(sk)->dst, &sa->sco_bdaddr); err = sco_connect(hdev, sk); hci_dev_unlock(hdev); hci_dev_put(hdev); if (err) goto done; err = bt_sock_wait_state(sk, BT_CONNECTED, sock_sndtimeo(sk, flags & O_NONBLOCK)); done: release_sock(sk); return err; } static int sco_sock_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; bdaddr_t *src = &sco_pi(sk)->src; int err = 0; BT_DBG("sk %p backlog %d", sk, backlog); lock_sock(sk); if (sk->sk_state != BT_BOUND) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_SEQPACKET) { err = -EINVAL; goto done; } write_lock(&sco_sk_list.lock); if (__sco_get_sock_listen_by_addr(src)) { err = -EADDRINUSE; goto unlock; } sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; sk->sk_state = BT_LISTEN; unlock: write_unlock(&sco_sk_list.lock); done: release_sock(sk); return err; } static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flags, bool kern) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct sock *sk = sock->sk, *ch; long timeo; int err = 0; lock_sock(sk); timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); BT_DBG("sk %p timeo %ld", sk, timeo); /* Wait for an incoming connection. (wake-one). */ add_wait_queue_exclusive(sk_sleep(sk), &wait); while (1) { if (sk->sk_state != BT_LISTEN) { err = -EBADFD; break; } ch = bt_accept_dequeue(sk, newsock); if (ch) break; if (!timeo) { err = -EAGAIN; break; } if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } release_sock(sk); timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); lock_sock(sk); } remove_wait_queue(sk_sleep(sk), &wait); if (err) goto done; newsock->state = SS_CONNECTED; BT_DBG("new socket %p", ch); done: release_sock(sk); return err; } static int sco_sock_getname(struct socket *sock, struct sockaddr *addr, int peer) { struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; struct sock *sk = sock->sk; BT_DBG("sock %p, sk %p", sock, sk); addr->sa_family = AF_BLUETOOTH; if (peer) bacpy(&sa->sco_bdaddr, &sco_pi(sk)->dst); else bacpy(&sa->sco_bdaddr, &sco_pi(sk)->src); return sizeof(struct sockaddr_sco); } static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; int err; BT_DBG("sock %p, sk %p", sock, sk); err = sock_error(sk); if (err) return err; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; lock_sock(sk); if (sk->sk_state == BT_CONNECTED) err = sco_send_frame(sk, msg, len); else err = -ENOTCONN; release_sock(sk); return err; } static void sco_conn_defer_accept(struct hci_conn *conn, u16 setting) { struct hci_dev *hdev = conn->hdev; BT_DBG("conn %p", conn); conn->state = BT_CONFIG; if (!lmp_esco_capable(hdev)) { struct hci_cp_accept_conn_req cp; bacpy(&cp.bdaddr, &conn->dst); cp.role = 0x00; /* Ignored */ hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp); } else { struct hci_cp_accept_sync_conn_req cp; bacpy(&cp.bdaddr, &conn->dst); cp.pkt_type = cpu_to_le16(conn->pkt_type); cp.tx_bandwidth = cpu_to_le32(0x00001f40); cp.rx_bandwidth = cpu_to_le32(0x00001f40); cp.content_format = cpu_to_le16(setting); switch (setting & SCO_AIRMODE_MASK) { case SCO_AIRMODE_TRANSP: if (conn->pkt_type & ESCO_2EV3) cp.max_latency = cpu_to_le16(0x0008); else cp.max_latency = cpu_to_le16(0x000D); cp.retrans_effort = 0x02; break; case SCO_AIRMODE_CVSD: cp.max_latency = cpu_to_le16(0xffff); cp.retrans_effort = 0xff; break; default: /* use CVSD settings as fallback */ cp.max_latency = cpu_to_le16(0xffff); cp.retrans_effort = 0xff; break; } hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp), &cp); } } static int sco_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct sco_pinfo *pi = sco_pi(sk); lock_sock(sk); if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { sco_conn_defer_accept(pi->conn->hcon, pi->setting); sk->sk_state = BT_CONFIG; release_sock(sk); return 0; } release_sock(sk); return bt_sock_recvmsg(sock, msg, len, flags); } static int sco_sock_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; int len, err = 0; struct bt_voice voice; u32 opt; BT_DBG("sk %p", sk); lock_sock(sk); switch (optname) { case BT_DEFER_SETUP: if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { err = -EINVAL; break; } if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } if (opt) set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); else clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); break; case BT_VOICE: if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECT2) { err = -EINVAL; break; } voice.setting = sco_pi(sk)->setting; len = min_t(unsigned int, sizeof(voice), optlen); if (copy_from_sockptr(&voice, optval, len)) { err = -EFAULT; break; } /* Explicitly check for these values */ if (voice.setting != BT_VOICE_TRANSPARENT && voice.setting != BT_VOICE_CVSD_16BIT) { err = -EINVAL; break; } sco_pi(sk)->setting = voice.setting; break; case BT_PKT_STATUS: if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } if (opt) sco_pi(sk)->cmsg_mask |= SCO_CMSG_PKT_STATUS; else sco_pi(sk)->cmsg_mask &= SCO_CMSG_PKT_STATUS; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct sco_options opts; struct sco_conninfo cinfo; int len, err = 0; BT_DBG("sk %p", sk); if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case SCO_OPTIONS: if (sk->sk_state != BT_CONNECTED && !(sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) { err = -ENOTCONN; break; } opts.mtu = sco_pi(sk)->conn->mtu; BT_DBG("mtu %u", opts.mtu); len = min_t(unsigned int, len, sizeof(opts)); if (copy_to_user(optval, (char *)&opts, len)) err = -EFAULT; break; case SCO_CONNINFO: if (sk->sk_state != BT_CONNECTED && !(sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) { err = -ENOTCONN; break; } memset(&cinfo, 0, sizeof(cinfo)); cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle; memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3); len = min_t(unsigned int, len, sizeof(cinfo)); if (copy_to_user(optval, (char *)&cinfo, len)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; int len, err = 0; struct bt_voice voice; u32 phys; int pkt_status; BT_DBG("sk %p", sk); if (level == SOL_SCO) return sco_sock_getsockopt_old(sock, optname, optval, optlen); if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case BT_DEFER_SETUP: if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { err = -EINVAL; break; } if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags), (u32 __user *)optval)) err = -EFAULT; break; case BT_VOICE: voice.setting = sco_pi(sk)->setting; len = min_t(unsigned int, len, sizeof(voice)); if (copy_to_user(optval, (char *)&voice, len)) err = -EFAULT; break; case BT_PHY: if (sk->sk_state != BT_CONNECTED) { err = -ENOTCONN; break; } phys = hci_conn_get_phy(sco_pi(sk)->conn->hcon); if (put_user(phys, (u32 __user *) optval)) err = -EFAULT; break; case BT_PKT_STATUS: pkt_status = (sco_pi(sk)->cmsg_mask & SCO_CMSG_PKT_STATUS); if (put_user(pkt_status, (int __user *)optval)) err = -EFAULT; break; case BT_SNDMTU: case BT_RCVMTU: if (sk->sk_state != BT_CONNECTED) { err = -ENOTCONN; break; } if (put_user(sco_pi(sk)->conn->mtu, (u32 __user *)optval)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int sco_sock_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sock %p, sk %p", sock, sk); if (!sk) return 0; sock_hold(sk); lock_sock(sk); if (!sk->sk_shutdown) { sk->sk_shutdown = SHUTDOWN_MASK; sco_sock_clear_timer(sk); __sco_sock_close(sk); if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && !(current->flags & PF_EXITING)) err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); } release_sock(sk); sock_put(sk); return err; } static int sco_sock_release(struct socket *sock) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sock %p, sk %p", sock, sk); if (!sk) return 0; sco_sock_close(sk); if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && !(current->flags & PF_EXITING)) { lock_sock(sk); err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); release_sock(sk); } sock_orphan(sk); sco_sock_kill(sk); return err; } static void sco_conn_ready(struct sco_conn *conn) { struct sock *parent; struct sock *sk = conn->sk; BT_DBG("conn %p", conn); if (sk) { lock_sock(sk); sco_sock_clear_timer(sk); sk->sk_state = BT_CONNECTED; sk->sk_state_change(sk); release_sock(sk); } else { sco_conn_lock(conn); if (!conn->hcon) { sco_conn_unlock(conn); return; } parent = sco_get_sock_listen(&conn->hcon->src); if (!parent) { sco_conn_unlock(conn); return; } lock_sock(parent); sk = sco_sock_alloc(sock_net(parent), NULL, BTPROTO_SCO, GFP_ATOMIC, 0); if (!sk) { release_sock(parent); sco_conn_unlock(conn); return; } sco_sock_init(sk, parent); bacpy(&sco_pi(sk)->src, &conn->hcon->src); bacpy(&sco_pi(sk)->dst, &conn->hcon->dst); hci_conn_hold(conn->hcon); __sco_chan_add(conn, sk, parent); if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) sk->sk_state = BT_CONNECT2; else sk->sk_state = BT_CONNECTED; /* Wake up parent */ parent->sk_data_ready(parent); release_sock(parent); sco_conn_unlock(conn); } } /* ----- SCO interface with lower layer (HCI) ----- */ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) { struct sock *sk; int lm = 0; BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr); /* Find listening sockets */ read_lock(&sco_sk_list.lock); sk_for_each(sk, &sco_sk_list.head) { if (sk->sk_state != BT_LISTEN) continue; if (!bacmp(&sco_pi(sk)->src, &hdev->bdaddr) || !bacmp(&sco_pi(sk)->src, BDADDR_ANY)) { lm |= HCI_LM_ACCEPT; if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) *flags |= HCI_PROTO_DEFER; break; } } read_unlock(&sco_sk_list.lock); return lm; } static void sco_connect_cfm(struct hci_conn *hcon, __u8 status) { if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) return; BT_DBG("hcon %p bdaddr %pMR status %u", hcon, &hcon->dst, status); if (!status) { struct sco_conn *conn; conn = sco_conn_add(hcon); if (conn) sco_conn_ready(conn); } else sco_conn_del(hcon, bt_to_errno(status)); } static void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason) { if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) return; BT_DBG("hcon %p reason %d", hcon, reason); sco_conn_del(hcon, bt_to_errno(reason)); } void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb) { struct sco_conn *conn = hcon->sco_data; if (!conn) goto drop; BT_DBG("conn %p len %u", conn, skb->len); if (skb->len) { sco_recv_frame(conn, skb); return; } drop: kfree_skb(skb); } static struct hci_cb sco_cb = { .name = "SCO", .connect_cfm = sco_connect_cfm, .disconn_cfm = sco_disconn_cfm, }; static int sco_debugfs_show(struct seq_file *f, void *p) { struct sock *sk; read_lock(&sco_sk_list.lock); sk_for_each(sk, &sco_sk_list.head) { seq_printf(f, "%pMR %pMR %d\n", &sco_pi(sk)->src, &sco_pi(sk)->dst, sk->sk_state); } read_unlock(&sco_sk_list.lock); return 0; } DEFINE_SHOW_ATTRIBUTE(sco_debugfs); static struct dentry *sco_debugfs; static const struct proto_ops sco_sock_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .release = sco_sock_release, .bind = sco_sock_bind, .connect = sco_sock_connect, .listen = sco_sock_listen, .accept = sco_sock_accept, .getname = sco_sock_getname, .sendmsg = sco_sock_sendmsg, .recvmsg = sco_sock_recvmsg, .poll = bt_sock_poll, .ioctl = bt_sock_ioctl, .gettstamp = sock_gettstamp, .mmap = sock_no_mmap, .socketpair = sock_no_socketpair, .shutdown = sco_sock_shutdown, .setsockopt = sco_sock_setsockopt, .getsockopt = sco_sock_getsockopt }; static const struct net_proto_family sco_sock_family_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .create = sco_sock_create, }; int __init sco_init(void) { int err; BUILD_BUG_ON(sizeof(struct sockaddr_sco) > sizeof(struct sockaddr)); err = proto_register(&sco_proto, 0); if (err < 0) return err; err = bt_sock_register(BTPROTO_SCO, &sco_sock_family_ops); if (err < 0) { BT_ERR("SCO socket registration failed"); goto error; } err = bt_procfs_init(&init_net, "sco", &sco_sk_list, NULL); if (err < 0) { BT_ERR("Failed to create SCO proc file"); bt_sock_unregister(BTPROTO_SCO); goto error; } BT_INFO("SCO socket layer initialized"); hci_register_cb(&sco_cb); if (IS_ERR_OR_NULL(bt_debugfs)) return 0; sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs, NULL, &sco_debugfs_fops); return 0; error: proto_unregister(&sco_proto); return err; } void sco_exit(void) { bt_procfs_cleanup(&init_net, "sco"); debugfs_remove(sco_debugfs); hci_unregister_cb(&sco_cb); bt_sock_unregister(BTPROTO_SCO); proto_unregister(&sco_proto); } module_param(disable_esco, bool, 0644); MODULE_PARM_DESC(disable_esco, "Disable eSCO connection creation");
/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth SCO sockets. */ #include <linux/module.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/sched/signal.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/sco.h> static bool disable_esco; static const struct proto_ops sco_sock_ops; static struct bt_sock_list sco_sk_list = { .lock = __RW_LOCK_UNLOCKED(sco_sk_list.lock) }; /* ---- SCO connections ---- */ struct sco_conn { struct hci_conn *hcon; spinlock_t lock; struct sock *sk; struct delayed_work timeout_work; unsigned int mtu; }; #define sco_conn_lock(c) spin_lock(&c->lock) #define sco_conn_unlock(c) spin_unlock(&c->lock) static void sco_sock_close(struct sock *sk); static void sco_sock_kill(struct sock *sk); /* ----- SCO socket info ----- */ #define sco_pi(sk) ((struct sco_pinfo *) sk) struct sco_pinfo { struct bt_sock bt; bdaddr_t src; bdaddr_t dst; __u32 flags; __u16 setting; __u8 cmsg_mask; struct sco_conn *conn; }; /* ---- SCO timers ---- */ #define SCO_CONN_TIMEOUT (HZ * 40) #define SCO_DISCONN_TIMEOUT (HZ * 2) static void sco_sock_timeout(struct work_struct *work) { struct sco_conn *conn = container_of(work, struct sco_conn, timeout_work.work); struct sock *sk; sco_conn_lock(conn); sk = conn->sk; if (sk) sock_hold(sk); sco_conn_unlock(conn); if (!sk) return; BT_DBG("sock %p state %d", sk, sk->sk_state); lock_sock(sk); sk->sk_err = ETIMEDOUT; sk->sk_state_change(sk); release_sock(sk); sock_put(sk); } static void sco_sock_set_timer(struct sock *sk, long timeout) { if (!sco_pi(sk)->conn) return; BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout); cancel_delayed_work(&sco_pi(sk)->conn->timeout_work); schedule_delayed_work(&sco_pi(sk)->conn->timeout_work, timeout); } static void sco_sock_clear_timer(struct sock *sk) { if (!sco_pi(sk)->conn) return; BT_DBG("sock %p state %d", sk, sk->sk_state); cancel_delayed_work(&sco_pi(sk)->conn->timeout_work); } /* ---- SCO connections ---- */ static struct sco_conn *sco_conn_add(struct hci_conn *hcon) { struct hci_dev *hdev = hcon->hdev; struct sco_conn *conn = hcon->sco_data; if (conn) return conn; conn = kzalloc(sizeof(struct sco_conn), GFP_KERNEL); if (!conn) return NULL; spin_lock_init(&conn->lock); hcon->sco_data = conn; conn->hcon = hcon; if (hdev->sco_mtu > 0) conn->mtu = hdev->sco_mtu; else conn->mtu = 60; BT_DBG("hcon %p conn %p", hcon, conn); return conn; } /* Delete channel. * Must be called on the locked socket. */ static void sco_chan_del(struct sock *sk, int err) { struct sco_conn *conn; conn = sco_pi(sk)->conn; BT_DBG("sk %p, conn %p, err %d", sk, conn, err); if (conn) { sco_conn_lock(conn); conn->sk = NULL; sco_pi(sk)->conn = NULL; sco_conn_unlock(conn); if (conn->hcon) hci_conn_drop(conn->hcon); } sk->sk_state = BT_CLOSED; sk->sk_err = err; sk->sk_state_change(sk); sock_set_flag(sk, SOCK_ZAPPED); } static void sco_conn_del(struct hci_conn *hcon, int err) { struct sco_conn *conn = hcon->sco_data; struct sock *sk; if (!conn) return; BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); /* Kill socket */ sco_conn_lock(conn); sk = conn->sk; sco_conn_unlock(conn); if (sk) { sock_hold(sk); lock_sock(sk); sco_sock_clear_timer(sk); sco_chan_del(sk, err); release_sock(sk); sock_put(sk); /* Ensure no more work items will run before freeing conn. */ cancel_delayed_work_sync(&conn->timeout_work); } hcon->sco_data = NULL; kfree(conn); } static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent) { BT_DBG("conn %p", conn); sco_pi(sk)->conn = conn; conn->sk = sk; INIT_DELAYED_WORK(&conn->timeout_work, sco_sock_timeout); if (parent) bt_accept_enqueue(parent, sk, true); } static int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent) { int err = 0; sco_conn_lock(conn); if (conn->sk) err = -EBUSY; else __sco_chan_add(conn, sk, parent); sco_conn_unlock(conn); return err; } static int sco_connect(struct hci_dev *hdev, struct sock *sk) { struct sco_conn *conn; struct hci_conn *hcon; int err, type; BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst); if (lmp_esco_capable(hdev) && !disable_esco) type = ESCO_LINK; else type = SCO_LINK; if (sco_pi(sk)->setting == BT_VOICE_TRANSPARENT && (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev))) return -EOPNOTSUPP; hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst, sco_pi(sk)->setting); if (IS_ERR(hcon)) return PTR_ERR(hcon); conn = sco_conn_add(hcon); if (!conn) { hci_conn_drop(hcon); return -ENOMEM; } /* Update source addr of the socket */ bacpy(&sco_pi(sk)->src, &hcon->src); err = sco_chan_add(conn, sk, NULL); if (err) return err; if (hcon->state == BT_CONNECTED) { sco_sock_clear_timer(sk); sk->sk_state = BT_CONNECTED; } else { sk->sk_state = BT_CONNECT; sco_sock_set_timer(sk, sk->sk_sndtimeo); } return err; } static int sco_send_frame(struct sock *sk, void *buf, int len, unsigned int msg_flags) { struct sco_conn *conn = sco_pi(sk)->conn; struct sk_buff *skb; int err; /* Check outgoing MTU */ if (len > conn->mtu) return -EINVAL; BT_DBG("sk %p len %d", sk, len); skb = bt_skb_send_alloc(sk, len, msg_flags & MSG_DONTWAIT, &err); if (!skb) return err; memcpy(skb_put(skb, len), buf, len); hci_send_sco(conn->hcon, skb); return len; } static void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb) { struct sock *sk; sco_conn_lock(conn); sk = conn->sk; sco_conn_unlock(conn); if (!sk) goto drop; BT_DBG("sk %p len %u", sk, skb->len); if (sk->sk_state != BT_CONNECTED) goto drop; if (!sock_queue_rcv_skb(sk, skb)) return; drop: kfree_skb(skb); } /* -------- Socket interface ---------- */ static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba) { struct sock *sk; sk_for_each(sk, &sco_sk_list.head) { if (sk->sk_state != BT_LISTEN) continue; if (!bacmp(&sco_pi(sk)->src, ba)) return sk; } return NULL; } /* Find socket listening on source bdaddr. * Returns closest match. */ static struct sock *sco_get_sock_listen(bdaddr_t *src) { struct sock *sk = NULL, *sk1 = NULL; read_lock(&sco_sk_list.lock); sk_for_each(sk, &sco_sk_list.head) { if (sk->sk_state != BT_LISTEN) continue; /* Exact match. */ if (!bacmp(&sco_pi(sk)->src, src)) break; /* Closest match */ if (!bacmp(&sco_pi(sk)->src, BDADDR_ANY)) sk1 = sk; } read_unlock(&sco_sk_list.lock); return sk ? sk : sk1; } static void sco_sock_destruct(struct sock *sk) { BT_DBG("sk %p", sk); skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_write_queue); } static void sco_sock_cleanup_listen(struct sock *parent) { struct sock *sk; BT_DBG("parent %p", parent); /* Close not yet accepted channels */ while ((sk = bt_accept_dequeue(parent, NULL))) { sco_sock_close(sk); sco_sock_kill(sk); } parent->sk_state = BT_CLOSED; sock_set_flag(parent, SOCK_ZAPPED); } /* Kill socket (only if zapped and orphan) * Must be called on unlocked socket. */ static void sco_sock_kill(struct sock *sk) { if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) return; BT_DBG("sk %p state %d", sk, sk->sk_state); /* Kill poor orphan */ bt_sock_unlink(&sco_sk_list, sk); sock_set_flag(sk, SOCK_DEAD); sock_put(sk); } static void __sco_sock_close(struct sock *sk) { BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); switch (sk->sk_state) { case BT_LISTEN: sco_sock_cleanup_listen(sk); break; case BT_CONNECTED: case BT_CONFIG: if (sco_pi(sk)->conn->hcon) { sk->sk_state = BT_DISCONN; sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT); sco_conn_lock(sco_pi(sk)->conn); hci_conn_drop(sco_pi(sk)->conn->hcon); sco_pi(sk)->conn->hcon = NULL; sco_conn_unlock(sco_pi(sk)->conn); } else sco_chan_del(sk, ECONNRESET); break; case BT_CONNECT2: case BT_CONNECT: case BT_DISCONN: sco_chan_del(sk, ECONNRESET); break; default: sock_set_flag(sk, SOCK_ZAPPED); break; } } /* Must be called on unlocked socket. */ static void sco_sock_close(struct sock *sk) { lock_sock(sk); sco_sock_clear_timer(sk); __sco_sock_close(sk); release_sock(sk); } static void sco_skb_put_cmsg(struct sk_buff *skb, struct msghdr *msg, struct sock *sk) { if (sco_pi(sk)->cmsg_mask & SCO_CMSG_PKT_STATUS) put_cmsg(msg, SOL_BLUETOOTH, BT_SCM_PKT_STATUS, sizeof(bt_cb(skb)->sco.pkt_status), &bt_cb(skb)->sco.pkt_status); } static void sco_sock_init(struct sock *sk, struct sock *parent) { BT_DBG("sk %p", sk); if (parent) { sk->sk_type = parent->sk_type; bt_sk(sk)->flags = bt_sk(parent)->flags; security_sk_clone(parent, sk); } else { bt_sk(sk)->skb_put_cmsg = sco_skb_put_cmsg; } } static struct proto sco_proto = { .name = "SCO", .owner = THIS_MODULE, .obj_size = sizeof(struct sco_pinfo) }; static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio, int kern) { struct sock *sk; sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto, kern); if (!sk) return NULL; sock_init_data(sock, sk); INIT_LIST_HEAD(&bt_sk(sk)->accept_q); sk->sk_destruct = sco_sock_destruct; sk->sk_sndtimeo = SCO_CONN_TIMEOUT; sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = proto; sk->sk_state = BT_OPEN; sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT; bt_sock_link(&sco_sk_list, sk); return sk; } static int sco_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; BT_DBG("sock %p", sock); sock->state = SS_UNCONNECTED; if (sock->type != SOCK_SEQPACKET) return -ESOCKTNOSUPPORT; sock->ops = &sco_sock_ops; sk = sco_sock_alloc(net, sock, protocol, GFP_ATOMIC, kern); if (!sk) return -ENOMEM; sco_sock_init(sk, NULL); return 0; } static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; struct sock *sk = sock->sk; int err = 0; if (!addr || addr_len < sizeof(struct sockaddr_sco) || addr->sa_family != AF_BLUETOOTH) return -EINVAL; BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr); lock_sock(sk); if (sk->sk_state != BT_OPEN) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_SEQPACKET) { err = -EINVAL; goto done; } bacpy(&sco_pi(sk)->src, &sa->sco_bdaddr); sk->sk_state = BT_BOUND; done: release_sock(sk); return err; } static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; struct sock *sk = sock->sk; struct hci_dev *hdev; int err; BT_DBG("sk %p", sk); if (alen < sizeof(struct sockaddr_sco) || addr->sa_family != AF_BLUETOOTH) return -EINVAL; if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) return -EBADFD; if (sk->sk_type != SOCK_SEQPACKET) return -EINVAL; hdev = hci_get_route(&sa->sco_bdaddr, &sco_pi(sk)->src, BDADDR_BREDR); if (!hdev) return -EHOSTUNREACH; hci_dev_lock(hdev); lock_sock(sk); /* Set destination address and psm */ bacpy(&sco_pi(sk)->dst, &sa->sco_bdaddr); err = sco_connect(hdev, sk); hci_dev_unlock(hdev); hci_dev_put(hdev); if (err) goto done; err = bt_sock_wait_state(sk, BT_CONNECTED, sock_sndtimeo(sk, flags & O_NONBLOCK)); done: release_sock(sk); return err; } static int sco_sock_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; bdaddr_t *src = &sco_pi(sk)->src; int err = 0; BT_DBG("sk %p backlog %d", sk, backlog); lock_sock(sk); if (sk->sk_state != BT_BOUND) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_SEQPACKET) { err = -EINVAL; goto done; } write_lock(&sco_sk_list.lock); if (__sco_get_sock_listen_by_addr(src)) { err = -EADDRINUSE; goto unlock; } sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; sk->sk_state = BT_LISTEN; unlock: write_unlock(&sco_sk_list.lock); done: release_sock(sk); return err; } static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flags, bool kern) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct sock *sk = sock->sk, *ch; long timeo; int err = 0; lock_sock(sk); timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); BT_DBG("sk %p timeo %ld", sk, timeo); /* Wait for an incoming connection. (wake-one). */ add_wait_queue_exclusive(sk_sleep(sk), &wait); while (1) { if (sk->sk_state != BT_LISTEN) { err = -EBADFD; break; } ch = bt_accept_dequeue(sk, newsock); if (ch) break; if (!timeo) { err = -EAGAIN; break; } if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } release_sock(sk); timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); lock_sock(sk); } remove_wait_queue(sk_sleep(sk), &wait); if (err) goto done; newsock->state = SS_CONNECTED; BT_DBG("new socket %p", ch); done: release_sock(sk); return err; } static int sco_sock_getname(struct socket *sock, struct sockaddr *addr, int peer) { struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; struct sock *sk = sock->sk; BT_DBG("sock %p, sk %p", sock, sk); addr->sa_family = AF_BLUETOOTH; if (peer) bacpy(&sa->sco_bdaddr, &sco_pi(sk)->dst); else bacpy(&sa->sco_bdaddr, &sco_pi(sk)->src); return sizeof(struct sockaddr_sco); } static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; void *buf; int err; BT_DBG("sock %p, sk %p", sock, sk); err = sock_error(sk); if (err) return err; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; buf = kmalloc(len, GFP_KERNEL); if (!buf) return -ENOMEM; if (memcpy_from_msg(buf, msg, len)) { kfree(buf); return -EFAULT; } lock_sock(sk); if (sk->sk_state == BT_CONNECTED) err = sco_send_frame(sk, buf, len, msg->msg_flags); else err = -ENOTCONN; release_sock(sk); kfree(buf); return err; } static void sco_conn_defer_accept(struct hci_conn *conn, u16 setting) { struct hci_dev *hdev = conn->hdev; BT_DBG("conn %p", conn); conn->state = BT_CONFIG; if (!lmp_esco_capable(hdev)) { struct hci_cp_accept_conn_req cp; bacpy(&cp.bdaddr, &conn->dst); cp.role = 0x00; /* Ignored */ hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp); } else { struct hci_cp_accept_sync_conn_req cp; bacpy(&cp.bdaddr, &conn->dst); cp.pkt_type = cpu_to_le16(conn->pkt_type); cp.tx_bandwidth = cpu_to_le32(0x00001f40); cp.rx_bandwidth = cpu_to_le32(0x00001f40); cp.content_format = cpu_to_le16(setting); switch (setting & SCO_AIRMODE_MASK) { case SCO_AIRMODE_TRANSP: if (conn->pkt_type & ESCO_2EV3) cp.max_latency = cpu_to_le16(0x0008); else cp.max_latency = cpu_to_le16(0x000D); cp.retrans_effort = 0x02; break; case SCO_AIRMODE_CVSD: cp.max_latency = cpu_to_le16(0xffff); cp.retrans_effort = 0xff; break; default: /* use CVSD settings as fallback */ cp.max_latency = cpu_to_le16(0xffff); cp.retrans_effort = 0xff; break; } hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp), &cp); } } static int sco_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct sco_pinfo *pi = sco_pi(sk); lock_sock(sk); if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { sco_conn_defer_accept(pi->conn->hcon, pi->setting); sk->sk_state = BT_CONFIG; release_sock(sk); return 0; } release_sock(sk); return bt_sock_recvmsg(sock, msg, len, flags); } static int sco_sock_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; int len, err = 0; struct bt_voice voice; u32 opt; BT_DBG("sk %p", sk); lock_sock(sk); switch (optname) { case BT_DEFER_SETUP: if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { err = -EINVAL; break; } if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } if (opt) set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); else clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); break; case BT_VOICE: if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECT2) { err = -EINVAL; break; } voice.setting = sco_pi(sk)->setting; len = min_t(unsigned int, sizeof(voice), optlen); if (copy_from_sockptr(&voice, optval, len)) { err = -EFAULT; break; } /* Explicitly check for these values */ if (voice.setting != BT_VOICE_TRANSPARENT && voice.setting != BT_VOICE_CVSD_16BIT) { err = -EINVAL; break; } sco_pi(sk)->setting = voice.setting; break; case BT_PKT_STATUS: if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } if (opt) sco_pi(sk)->cmsg_mask |= SCO_CMSG_PKT_STATUS; else sco_pi(sk)->cmsg_mask &= SCO_CMSG_PKT_STATUS; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct sco_options opts; struct sco_conninfo cinfo; int len, err = 0; BT_DBG("sk %p", sk); if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case SCO_OPTIONS: if (sk->sk_state != BT_CONNECTED && !(sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) { err = -ENOTCONN; break; } opts.mtu = sco_pi(sk)->conn->mtu; BT_DBG("mtu %u", opts.mtu); len = min_t(unsigned int, len, sizeof(opts)); if (copy_to_user(optval, (char *)&opts, len)) err = -EFAULT; break; case SCO_CONNINFO: if (sk->sk_state != BT_CONNECTED && !(sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) { err = -ENOTCONN; break; } memset(&cinfo, 0, sizeof(cinfo)); cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle; memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3); len = min_t(unsigned int, len, sizeof(cinfo)); if (copy_to_user(optval, (char *)&cinfo, len)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; int len, err = 0; struct bt_voice voice; u32 phys; int pkt_status; BT_DBG("sk %p", sk); if (level == SOL_SCO) return sco_sock_getsockopt_old(sock, optname, optval, optlen); if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case BT_DEFER_SETUP: if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { err = -EINVAL; break; } if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags), (u32 __user *)optval)) err = -EFAULT; break; case BT_VOICE: voice.setting = sco_pi(sk)->setting; len = min_t(unsigned int, len, sizeof(voice)); if (copy_to_user(optval, (char *)&voice, len)) err = -EFAULT; break; case BT_PHY: if (sk->sk_state != BT_CONNECTED) { err = -ENOTCONN; break; } phys = hci_conn_get_phy(sco_pi(sk)->conn->hcon); if (put_user(phys, (u32 __user *) optval)) err = -EFAULT; break; case BT_PKT_STATUS: pkt_status = (sco_pi(sk)->cmsg_mask & SCO_CMSG_PKT_STATUS); if (put_user(pkt_status, (int __user *)optval)) err = -EFAULT; break; case BT_SNDMTU: case BT_RCVMTU: if (sk->sk_state != BT_CONNECTED) { err = -ENOTCONN; break; } if (put_user(sco_pi(sk)->conn->mtu, (u32 __user *)optval)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int sco_sock_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sock %p, sk %p", sock, sk); if (!sk) return 0; sock_hold(sk); lock_sock(sk); if (!sk->sk_shutdown) { sk->sk_shutdown = SHUTDOWN_MASK; sco_sock_clear_timer(sk); __sco_sock_close(sk); if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && !(current->flags & PF_EXITING)) err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); } release_sock(sk); sock_put(sk); return err; } static int sco_sock_release(struct socket *sock) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sock %p, sk %p", sock, sk); if (!sk) return 0; sco_sock_close(sk); if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && !(current->flags & PF_EXITING)) { lock_sock(sk); err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); release_sock(sk); } sock_orphan(sk); sco_sock_kill(sk); return err; } static void sco_conn_ready(struct sco_conn *conn) { struct sock *parent; struct sock *sk = conn->sk; BT_DBG("conn %p", conn); if (sk) { lock_sock(sk); sco_sock_clear_timer(sk); sk->sk_state = BT_CONNECTED; sk->sk_state_change(sk); release_sock(sk); } else { sco_conn_lock(conn); if (!conn->hcon) { sco_conn_unlock(conn); return; } parent = sco_get_sock_listen(&conn->hcon->src); if (!parent) { sco_conn_unlock(conn); return; } lock_sock(parent); sk = sco_sock_alloc(sock_net(parent), NULL, BTPROTO_SCO, GFP_ATOMIC, 0); if (!sk) { release_sock(parent); sco_conn_unlock(conn); return; } sco_sock_init(sk, parent); bacpy(&sco_pi(sk)->src, &conn->hcon->src); bacpy(&sco_pi(sk)->dst, &conn->hcon->dst); hci_conn_hold(conn->hcon); __sco_chan_add(conn, sk, parent); if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) sk->sk_state = BT_CONNECT2; else sk->sk_state = BT_CONNECTED; /* Wake up parent */ parent->sk_data_ready(parent); release_sock(parent); sco_conn_unlock(conn); } } /* ----- SCO interface with lower layer (HCI) ----- */ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) { struct sock *sk; int lm = 0; BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr); /* Find listening sockets */ read_lock(&sco_sk_list.lock); sk_for_each(sk, &sco_sk_list.head) { if (sk->sk_state != BT_LISTEN) continue; if (!bacmp(&sco_pi(sk)->src, &hdev->bdaddr) || !bacmp(&sco_pi(sk)->src, BDADDR_ANY)) { lm |= HCI_LM_ACCEPT; if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) *flags |= HCI_PROTO_DEFER; break; } } read_unlock(&sco_sk_list.lock); return lm; } static void sco_connect_cfm(struct hci_conn *hcon, __u8 status) { if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) return; BT_DBG("hcon %p bdaddr %pMR status %u", hcon, &hcon->dst, status); if (!status) { struct sco_conn *conn; conn = sco_conn_add(hcon); if (conn) sco_conn_ready(conn); } else sco_conn_del(hcon, bt_to_errno(status)); } static void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason) { if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) return; BT_DBG("hcon %p reason %d", hcon, reason); sco_conn_del(hcon, bt_to_errno(reason)); } void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb) { struct sco_conn *conn = hcon->sco_data; if (!conn) goto drop; BT_DBG("conn %p len %u", conn, skb->len); if (skb->len) { sco_recv_frame(conn, skb); return; } drop: kfree_skb(skb); } static struct hci_cb sco_cb = { .name = "SCO", .connect_cfm = sco_connect_cfm, .disconn_cfm = sco_disconn_cfm, }; static int sco_debugfs_show(struct seq_file *f, void *p) { struct sock *sk; read_lock(&sco_sk_list.lock); sk_for_each(sk, &sco_sk_list.head) { seq_printf(f, "%pMR %pMR %d\n", &sco_pi(sk)->src, &sco_pi(sk)->dst, sk->sk_state); } read_unlock(&sco_sk_list.lock); return 0; } DEFINE_SHOW_ATTRIBUTE(sco_debugfs); static struct dentry *sco_debugfs; static const struct proto_ops sco_sock_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .release = sco_sock_release, .bind = sco_sock_bind, .connect = sco_sock_connect, .listen = sco_sock_listen, .accept = sco_sock_accept, .getname = sco_sock_getname, .sendmsg = sco_sock_sendmsg, .recvmsg = sco_sock_recvmsg, .poll = bt_sock_poll, .ioctl = bt_sock_ioctl, .gettstamp = sock_gettstamp, .mmap = sock_no_mmap, .socketpair = sock_no_socketpair, .shutdown = sco_sock_shutdown, .setsockopt = sco_sock_setsockopt, .getsockopt = sco_sock_getsockopt }; static const struct net_proto_family sco_sock_family_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .create = sco_sock_create, }; int __init sco_init(void) { int err; BUILD_BUG_ON(sizeof(struct sockaddr_sco) > sizeof(struct sockaddr)); err = proto_register(&sco_proto, 0); if (err < 0) return err; err = bt_sock_register(BTPROTO_SCO, &sco_sock_family_ops); if (err < 0) { BT_ERR("SCO socket registration failed"); goto error; } err = bt_procfs_init(&init_net, "sco", &sco_sk_list, NULL); if (err < 0) { BT_ERR("Failed to create SCO proc file"); bt_sock_unregister(BTPROTO_SCO); goto error; } BT_INFO("SCO socket layer initialized"); hci_register_cb(&sco_cb); if (IS_ERR_OR_NULL(bt_debugfs)) return 0; sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs, NULL, &sco_debugfs_fops); return 0; error: proto_unregister(&sco_proto); return err; } void sco_exit(void) { bt_procfs_cleanup(&init_net, "sco"); debugfs_remove(sco_debugfs); hci_unregister_cb(&sco_cb); bt_sock_unregister(BTPROTO_SCO); proto_unregister(&sco_proto); } module_param(disable_esco, bool, 0644); MODULE_PARM_DESC(disable_esco, "Disable eSCO connection creation");
static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; int err; BT_DBG("sock %p, sk %p", sock, sk); err = sock_error(sk); if (err) return err; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; lock_sock(sk); if (sk->sk_state == BT_CONNECTED) err = sco_send_frame(sk, msg, len); else err = -ENOTCONN; release_sock(sk); return err; }
static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; void *buf; int err; BT_DBG("sock %p, sk %p", sock, sk); err = sock_error(sk); if (err) return err; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; buf = kmalloc(len, GFP_KERNEL); if (!buf) return -ENOMEM; if (memcpy_from_msg(buf, msg, len)) { kfree(buf); return -EFAULT; } lock_sock(sk); if (sk->sk_state == BT_CONNECTED) err = sco_send_frame(sk, buf, len, msg->msg_flags); else err = -ENOTCONN; release_sock(sk); kfree(buf); return err; }
{'added': [(283, 'static int sco_send_frame(struct sock *sk, void *buf, int len,'), (284, '\t\t\t unsigned int msg_flags)'), (296, '\tskb = bt_skb_send_alloc(sk, len, msg_flags & MSG_DONTWAIT, &err);'), (300, '\tmemcpy(skb_put(skb, len), buf, len);'), (725, '\tvoid *buf;'), (737, '\tbuf = kmalloc(len, GFP_KERNEL);'), (738, '\tif (!buf)'), (739, '\t\treturn -ENOMEM;'), (740, ''), (741, '\tif (memcpy_from_msg(buf, msg, len)) {'), (742, '\t\tkfree(buf);'), (743, '\t\treturn -EFAULT;'), (744, '\t}'), (745, ''), (749, '\t\terr = sco_send_frame(sk, buf, len, msg->msg_flags);'), (754, '\tkfree(buf);')], 'deleted': [(283, 'static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)'), (295, '\tskb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);'), (299, '\tif (memcpy_from_msg(skb_put(skb, len), msg, len)) {'), (300, '\t\tkfree_skb(skb);'), (301, '\t\treturn -EFAULT;'), (302, '\t}'), (303, ''), (742, '\t\terr = sco_send_frame(sk, msg, len);')]}
16
8
962
5,784
19
102
4
https://github.com/torvalds/linux
CVE-2021-3640
CWE-416
55
StructuredHeadersUtilities.cpp
C++
proxygen::StructuredHeaders::encodeBase64
/* * Copyright (c) 2015-present, Facebook, Inc. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. An additional grant * of patent rights can be found in the PATENTS file in the same directory. * */ #include "StructuredHeadersUtilities.h" #include <boost/archive/iterators/binary_from_base64.hpp> #include <boost/archive/iterators/base64_from_binary.hpp> #include <boost/archive/iterators/transform_width.hpp> #include "StructuredHeadersConstants.h" namespace proxygen { namespace StructuredHeaders { bool isLcAlpha(char c) { return c >= 0x61 && c <= 0x7A; } bool isValidIdentifierChar(char c) { return isLcAlpha(c) || std::isdigit(c) || c == '_' || c == '-' || c == '*' || c == '/'; } bool isValidEncodedBinaryContentChar( char c) { return std::isalpha(c) || std::isdigit(c) || c == '+' || c == '/' || c == '='; } bool isValidStringChar(char c) { /* * The difference between the character restriction here and that mentioned * in section 3.7 of version 6 of the Structured Headers draft is that this * function accepts \ and DQUOTE characters. These characters are allowed * as long as they are present as a part of an escape sequence, which is * checked for in the parseString() function in the StructuredHeadersBuffer. */ return c >= 0x20 && c <= 0x7E; } bool isValidIdentifier(const std::string& s) { if (s.size() == 0 || !isLcAlpha(s[0])) { return false; } for (char c : s) { if (!isValidIdentifierChar(c)) { return false; } } return true; } bool isValidString(const std::string& s) { for (char c : s) { if (!isValidStringChar(c)) { return false; } } return true; } bool isValidEncodedBinaryContent( const std::string& s) { if (s.size() % 4 != 0) { return false; } bool equalSeen = false; for (auto it = s.begin(); it != s.end(); it++) { if (*it == '=') { equalSeen = true; } else if (equalSeen || !isValidEncodedBinaryContentChar(*it)) { return false; } } return true; } bool itemTypeMatchesContent( const StructuredHeaderItem& input) { switch (input.tag) { case StructuredHeaderItem::Type::BINARYCONTENT: case StructuredHeaderItem::Type::IDENTIFIER: case StructuredHeaderItem::Type::STRING: return input.value.type() == typeid(std::string); case StructuredHeaderItem::Type::INT64: return input.value.type() == typeid(int64_t); case StructuredHeaderItem::Type::DOUBLE: return input.value.type() == typeid(double); case StructuredHeaderItem::Type::NONE: return true; } return false; } std::string decodeBase64( const std::string& encoded) { if (encoded.size() == 0) { // special case, to prevent an integer overflow down below. return ""; } using namespace boost::archive::iterators; using b64it = transform_width<binary_from_base64<std::string::const_iterator>, 8, 6>; std::string decoded = std::string(b64it(std::begin(encoded)), b64it(std::end(encoded))); uint32_t numPadding = std::count(encoded.begin(), encoded.end(), '='); decoded.erase(decoded.end() - numPadding, decoded.end()); return decoded; } std::string encodeBase64(const std::string& input) { using namespace boost::archive::iterators; using b64it = base64_from_binary<transform_width<const char*, 6, 8>>; auto data = input.data(); std::string encoded(b64it(data), b64it(data + (input.length()))); encoded.append((3 - (input.length() % 3)) % 3, '='); return encoded; } } }
/* * Copyright (c) 2015-present, Facebook, Inc. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. An additional grant * of patent rights can be found in the PATENTS file in the same directory. * */ #include "StructuredHeadersUtilities.h" #include "StructuredHeadersConstants.h" #include "proxygen/lib/utils/Base64.h" namespace proxygen { namespace StructuredHeaders { bool isLcAlpha(char c) { return c >= 0x61 && c <= 0x7A; } bool isValidIdentifierChar(char c) { return isLcAlpha(c) || std::isdigit(c) || c == '_' || c == '-' || c == '*' || c == '/'; } bool isValidEncodedBinaryContentChar( char c) { return std::isalpha(c) || std::isdigit(c) || c == '+' || c == '/' || c == '='; } bool isValidStringChar(char c) { /* * The difference between the character restriction here and that mentioned * in section 3.7 of version 6 of the Structured Headers draft is that this * function accepts \ and DQUOTE characters. These characters are allowed * as long as they are present as a part of an escape sequence, which is * checked for in the parseString() function in the StructuredHeadersBuffer. */ return c >= 0x20 && c <= 0x7E; } bool isValidIdentifier(const std::string& s) { if (s.size() == 0 || !isLcAlpha(s[0])) { return false; } for (char c : s) { if (!isValidIdentifierChar(c)) { return false; } } return true; } bool isValidString(const std::string& s) { for (char c : s) { if (!isValidStringChar(c)) { return false; } } return true; } bool isValidEncodedBinaryContent( const std::string& s) { if (s.size() % 4 != 0) { return false; } bool equalSeen = false; for (auto it = s.begin(); it != s.end(); it++) { if (*it == '=') { equalSeen = true; } else if (equalSeen || !isValidEncodedBinaryContentChar(*it)) { return false; } } return true; } bool itemTypeMatchesContent( const StructuredHeaderItem& input) { switch (input.tag) { case StructuredHeaderItem::Type::BINARYCONTENT: case StructuredHeaderItem::Type::IDENTIFIER: case StructuredHeaderItem::Type::STRING: return input.value.type() == typeid(std::string); case StructuredHeaderItem::Type::INT64: return input.value.type() == typeid(int64_t); case StructuredHeaderItem::Type::DOUBLE: return input.value.type() == typeid(double); case StructuredHeaderItem::Type::NONE: return true; } return false; } std::string decodeBase64( const std::string& encoded) { if (encoded.size() == 0) { // special case, to prevent an integer overflow down below. return std::string(); } int padding = 0; for (auto it = encoded.rbegin(); padding < 2 && it != encoded.rend() && *it == '='; ++it) { ++padding; } return Base64::decode(encoded, padding); } std::string encodeBase64(const std::string& input) { return Base64::encode(folly::ByteRange( reinterpret_cast<const uint8_t*>(input.c_str()), input.length())); } } }
std::string encodeBase64(const std::string& input) { using namespace boost::archive::iterators; using b64it = base64_from_binary<transform_width<const char*, 6, 8>>; auto data = input.data(); std::string encoded(b64it(data), b64it(data + (input.length()))); encoded.append((3 - (input.length() % 3)) % 3, '='); return encoded; }
std::string encodeBase64(const std::string& input) { return Base64::encode(folly::ByteRange( reinterpret_cast<const uint8_t*>(input.c_str()), input.length())); }
{'added': [(14, '#include "proxygen/lib/utils/Base64.h"'), (15, ''), (109, ' return std::string();'), (112, ' int padding = 0;'), (113, ' for (auto it = encoded.rbegin();'), (114, " padding < 2 && it != encoded.rend() && *it == '=';"), (115, ' ++it) {'), (116, ' ++padding;'), (117, ' }'), (119, ' return Base64::decode(encoded, padding);'), (123, ' return Base64::encode(folly::ByteRange('), (124, ' reinterpret_cast<const uint8_t*>(input.c_str()),'), (125, ' input.length()));')], 'deleted': [(12, '#include <boost/archive/iterators/binary_from_base64.hpp>'), (13, '#include <boost/archive/iterators/base64_from_binary.hpp>'), (14, '#include <boost/archive/iterators/transform_width.hpp>'), (110, ' return "";'), (113, ' using namespace boost::archive::iterators;'), (114, ' using b64it ='), (115, ' transform_width<binary_from_base64<std::string::const_iterator>, 8, 6>;'), (116, ''), (117, ' std::string decoded = std::string(b64it(std::begin(encoded)),'), (118, ' b64it(std::end(encoded)));'), (119, ''), (120, " uint32_t numPadding = std::count(encoded.begin(), encoded.end(), '=');"), (121, ' decoded.erase(decoded.end() - numPadding, decoded.end());'), (123, ' return decoded;'), (127, ' using namespace boost::archive::iterators;'), (128, ' using b64it = base64_from_binary<transform_width<const char*, 6, 8>>;'), (129, ''), (130, ' auto data = input.data();'), (131, ' std::string encoded(b64it(data), b64it(data + (input.length())));'), (132, " encoded.append((3 - (input.length() % 3)) % 3, '=');"), (133, ''), (134, ' return encoded;')]}
13
22
89
548
8
95
1
https://github.com/facebook/proxygen
CVE-2019-11921
CWE-787
1,558
control.c
C
snd_ctl_elem_add
/* * Routines for driver control interface * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/threads.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/time.h> #include <sound/core.h> #include <sound/minors.h> #include <sound/info.h> #include <sound/control.h> /* max number of user-defined controls */ #define MAX_USER_CONTROLS 32 #define MAX_CONTROL_COUNT 1028 struct snd_kctl_ioctl { struct list_head list; /* list of all ioctls */ snd_kctl_ioctl_func_t fioctl; }; static DECLARE_RWSEM(snd_ioctl_rwsem); static LIST_HEAD(snd_control_ioctls); #ifdef CONFIG_COMPAT static LIST_HEAD(snd_control_compat_ioctls); #endif static int snd_ctl_open(struct inode *inode, struct file *file) { unsigned long flags; struct snd_card *card; struct snd_ctl_file *ctl; int err; err = nonseekable_open(inode, file); if (err < 0) return err; card = snd_lookup_minor_data(iminor(inode), SNDRV_DEVICE_TYPE_CONTROL); if (!card) { err = -ENODEV; goto __error1; } err = snd_card_file_add(card, file); if (err < 0) { err = -ENODEV; goto __error1; } if (!try_module_get(card->module)) { err = -EFAULT; goto __error2; } ctl = kzalloc(sizeof(*ctl), GFP_KERNEL); if (ctl == NULL) { err = -ENOMEM; goto __error; } INIT_LIST_HEAD(&ctl->events); init_waitqueue_head(&ctl->change_sleep); spin_lock_init(&ctl->read_lock); ctl->card = card; ctl->prefer_pcm_subdevice = -1; ctl->prefer_rawmidi_subdevice = -1; ctl->pid = get_pid(task_pid(current)); file->private_data = ctl; write_lock_irqsave(&card->ctl_files_rwlock, flags); list_add_tail(&ctl->list, &card->ctl_files); write_unlock_irqrestore(&card->ctl_files_rwlock, flags); snd_card_unref(card); return 0; __error: module_put(card->module); __error2: snd_card_file_remove(card, file); __error1: if (card) snd_card_unref(card); return err; } static void snd_ctl_empty_read_queue(struct snd_ctl_file * ctl) { unsigned long flags; struct snd_kctl_event *cread; spin_lock_irqsave(&ctl->read_lock, flags); while (!list_empty(&ctl->events)) { cread = snd_kctl_event(ctl->events.next); list_del(&cread->list); kfree(cread); } spin_unlock_irqrestore(&ctl->read_lock, flags); } static int snd_ctl_release(struct inode *inode, struct file *file) { unsigned long flags; struct snd_card *card; struct snd_ctl_file *ctl; struct snd_kcontrol *control; unsigned int idx; ctl = file->private_data; file->private_data = NULL; card = ctl->card; write_lock_irqsave(&card->ctl_files_rwlock, flags); list_del(&ctl->list); write_unlock_irqrestore(&card->ctl_files_rwlock, flags); down_write(&card->controls_rwsem); list_for_each_entry(control, &card->controls, list) for (idx = 0; idx < control->count; idx++) if (control->vd[idx].owner == ctl) control->vd[idx].owner = NULL; up_write(&card->controls_rwsem); snd_ctl_empty_read_queue(ctl); put_pid(ctl->pid); kfree(ctl); module_put(card->module); snd_card_file_remove(card, file); return 0; } void snd_ctl_notify(struct snd_card *card, unsigned int mask, struct snd_ctl_elem_id *id) { unsigned long flags; struct snd_ctl_file *ctl; struct snd_kctl_event *ev; if (snd_BUG_ON(!card || !id)) return; read_lock(&card->ctl_files_rwlock); #if IS_ENABLED(CONFIG_SND_MIXER_OSS) card->mixer_oss_change_count++; #endif list_for_each_entry(ctl, &card->ctl_files, list) { if (!ctl->subscribed) continue; spin_lock_irqsave(&ctl->read_lock, flags); list_for_each_entry(ev, &ctl->events, list) { if (ev->id.numid == id->numid) { ev->mask |= mask; goto _found; } } ev = kzalloc(sizeof(*ev), GFP_ATOMIC); if (ev) { ev->id = *id; ev->mask = mask; list_add_tail(&ev->list, &ctl->events); } else { dev_err(card->dev, "No memory available to allocate event\n"); } _found: wake_up(&ctl->change_sleep); spin_unlock_irqrestore(&ctl->read_lock, flags); kill_fasync(&ctl->fasync, SIGIO, POLL_IN); } read_unlock(&card->ctl_files_rwlock); } EXPORT_SYMBOL(snd_ctl_notify); /** * snd_ctl_new - create a control instance from the template * @control: the control template * @access: the default control access * * Allocates a new struct snd_kcontrol instance and copies the given template * to the new instance. It does not copy volatile data (access). * * Return: The pointer of the new instance, or %NULL on failure. */ static struct snd_kcontrol *snd_ctl_new(struct snd_kcontrol *control, unsigned int access) { struct snd_kcontrol *kctl; unsigned int idx; if (snd_BUG_ON(!control || !control->count)) return NULL; if (control->count > MAX_CONTROL_COUNT) return NULL; kctl = kzalloc(sizeof(*kctl) + sizeof(struct snd_kcontrol_volatile) * control->count, GFP_KERNEL); if (kctl == NULL) { pr_err("ALSA: Cannot allocate control instance\n"); return NULL; } *kctl = *control; for (idx = 0; idx < kctl->count; idx++) kctl->vd[idx].access = access; return kctl; } /** * snd_ctl_new1 - create a control instance from the template * @ncontrol: the initialization record * @private_data: the private data to set * * Allocates a new struct snd_kcontrol instance and initialize from the given * template. When the access field of ncontrol is 0, it's assumed as * READWRITE access. When the count field is 0, it's assumes as one. * * Return: The pointer of the newly generated instance, or %NULL on failure. */ struct snd_kcontrol *snd_ctl_new1(const struct snd_kcontrol_new *ncontrol, void *private_data) { struct snd_kcontrol kctl; unsigned int access; if (snd_BUG_ON(!ncontrol || !ncontrol->info)) return NULL; memset(&kctl, 0, sizeof(kctl)); kctl.id.iface = ncontrol->iface; kctl.id.device = ncontrol->device; kctl.id.subdevice = ncontrol->subdevice; if (ncontrol->name) { strlcpy(kctl.id.name, ncontrol->name, sizeof(kctl.id.name)); if (strcmp(ncontrol->name, kctl.id.name) != 0) pr_warn("ALSA: Control name '%s' truncated to '%s'\n", ncontrol->name, kctl.id.name); } kctl.id.index = ncontrol->index; kctl.count = ncontrol->count ? ncontrol->count : 1; access = ncontrol->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE : (ncontrol->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE| SNDRV_CTL_ELEM_ACCESS_VOLATILE| SNDRV_CTL_ELEM_ACCESS_INACTIVE| SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE| SNDRV_CTL_ELEM_ACCESS_TLV_COMMAND| SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK)); kctl.info = ncontrol->info; kctl.get = ncontrol->get; kctl.put = ncontrol->put; kctl.tlv.p = ncontrol->tlv.p; kctl.private_value = ncontrol->private_value; kctl.private_data = private_data; return snd_ctl_new(&kctl, access); } EXPORT_SYMBOL(snd_ctl_new1); /** * snd_ctl_free_one - release the control instance * @kcontrol: the control instance * * Releases the control instance created via snd_ctl_new() * or snd_ctl_new1(). * Don't call this after the control was added to the card. */ void snd_ctl_free_one(struct snd_kcontrol *kcontrol) { if (kcontrol) { if (kcontrol->private_free) kcontrol->private_free(kcontrol); kfree(kcontrol); } } EXPORT_SYMBOL(snd_ctl_free_one); static bool snd_ctl_remove_numid_conflict(struct snd_card *card, unsigned int count) { struct snd_kcontrol *kctl; list_for_each_entry(kctl, &card->controls, list) { if (kctl->id.numid < card->last_numid + 1 + count && kctl->id.numid + kctl->count > card->last_numid + 1) { card->last_numid = kctl->id.numid + kctl->count - 1; return true; } } return false; } static int snd_ctl_find_hole(struct snd_card *card, unsigned int count) { unsigned int iter = 100000; while (snd_ctl_remove_numid_conflict(card, count)) { if (--iter == 0) { /* this situation is very unlikely */ dev_err(card->dev, "unable to allocate new control numid\n"); return -ENOMEM; } } return 0; } /** * snd_ctl_add - add the control instance to the card * @card: the card instance * @kcontrol: the control instance to add * * Adds the control instance created via snd_ctl_new() or * snd_ctl_new1() to the given card. Assigns also an unique * numid used for fast search. * * It frees automatically the control which cannot be added. * * Return: Zero if successful, or a negative error code on failure. * */ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol) { struct snd_ctl_elem_id id; unsigned int idx; int err = -EINVAL; if (! kcontrol) return err; if (snd_BUG_ON(!card || !kcontrol->info)) goto error; id = kcontrol->id; down_write(&card->controls_rwsem); if (snd_ctl_find_id(card, &id)) { up_write(&card->controls_rwsem); dev_err(card->dev, "control %i:%i:%i:%s:%i is already present\n", id.iface, id.device, id.subdevice, id.name, id.index); err = -EBUSY; goto error; } if (snd_ctl_find_hole(card, kcontrol->count) < 0) { up_write(&card->controls_rwsem); err = -ENOMEM; goto error; } list_add_tail(&kcontrol->list, &card->controls); card->controls_count += kcontrol->count; kcontrol->id.numid = card->last_numid + 1; card->last_numid += kcontrol->count; up_write(&card->controls_rwsem); for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++) snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id); return 0; error: snd_ctl_free_one(kcontrol); return err; } EXPORT_SYMBOL(snd_ctl_add); /** * snd_ctl_replace - replace the control instance of the card * @card: the card instance * @kcontrol: the control instance to replace * @add_on_replace: add the control if not already added * * Replaces the given control. If the given control does not exist * and the add_on_replace flag is set, the control is added. If the * control exists, it is destroyed first. * * It frees automatically the control which cannot be added or replaced. * * Return: Zero if successful, or a negative error code on failure. */ int snd_ctl_replace(struct snd_card *card, struct snd_kcontrol *kcontrol, bool add_on_replace) { struct snd_ctl_elem_id id; unsigned int idx; struct snd_kcontrol *old; int ret; if (!kcontrol) return -EINVAL; if (snd_BUG_ON(!card || !kcontrol->info)) { ret = -EINVAL; goto error; } id = kcontrol->id; down_write(&card->controls_rwsem); old = snd_ctl_find_id(card, &id); if (!old) { if (add_on_replace) goto add; up_write(&card->controls_rwsem); ret = -EINVAL; goto error; } ret = snd_ctl_remove(card, old); if (ret < 0) { up_write(&card->controls_rwsem); goto error; } add: if (snd_ctl_find_hole(card, kcontrol->count) < 0) { up_write(&card->controls_rwsem); ret = -ENOMEM; goto error; } list_add_tail(&kcontrol->list, &card->controls); card->controls_count += kcontrol->count; kcontrol->id.numid = card->last_numid + 1; card->last_numid += kcontrol->count; up_write(&card->controls_rwsem); for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++) snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id); return 0; error: snd_ctl_free_one(kcontrol); return ret; } EXPORT_SYMBOL(snd_ctl_replace); /** * snd_ctl_remove - remove the control from the card and release it * @card: the card instance * @kcontrol: the control instance to remove * * Removes the control from the card and then releases the instance. * You don't need to call snd_ctl_free_one(). You must be in * the write lock - down_write(&card->controls_rwsem). * * Return: 0 if successful, or a negative error code on failure. */ int snd_ctl_remove(struct snd_card *card, struct snd_kcontrol *kcontrol) { struct snd_ctl_elem_id id; unsigned int idx; if (snd_BUG_ON(!card || !kcontrol)) return -EINVAL; list_del(&kcontrol->list); card->controls_count -= kcontrol->count; id = kcontrol->id; for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++) snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_REMOVE, &id); snd_ctl_free_one(kcontrol); return 0; } EXPORT_SYMBOL(snd_ctl_remove); /** * snd_ctl_remove_id - remove the control of the given id and release it * @card: the card instance * @id: the control id to remove * * Finds the control instance with the given id, removes it from the * card list and releases it. * * Return: 0 if successful, or a negative error code on failure. */ int snd_ctl_remove_id(struct snd_card *card, struct snd_ctl_elem_id *id) { struct snd_kcontrol *kctl; int ret; down_write(&card->controls_rwsem); kctl = snd_ctl_find_id(card, id); if (kctl == NULL) { up_write(&card->controls_rwsem); return -ENOENT; } ret = snd_ctl_remove(card, kctl); up_write(&card->controls_rwsem); return ret; } EXPORT_SYMBOL(snd_ctl_remove_id); /** * snd_ctl_remove_user_ctl - remove and release the unlocked user control * @file: active control handle * @id: the control id to remove * * Finds the control instance with the given id, removes it from the * card list and releases it. * * Return: 0 if successful, or a negative error code on failure. */ static int snd_ctl_remove_user_ctl(struct snd_ctl_file * file, struct snd_ctl_elem_id *id) { struct snd_card *card = file->card; struct snd_kcontrol *kctl; int idx, ret; down_write(&card->controls_rwsem); kctl = snd_ctl_find_id(card, id); if (kctl == NULL) { ret = -ENOENT; goto error; } if (!(kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_USER)) { ret = -EINVAL; goto error; } for (idx = 0; idx < kctl->count; idx++) if (kctl->vd[idx].owner != NULL && kctl->vd[idx].owner != file) { ret = -EBUSY; goto error; } ret = snd_ctl_remove(card, kctl); if (ret < 0) goto error; card->user_ctl_count--; error: up_write(&card->controls_rwsem); return ret; } /** * snd_ctl_activate_id - activate/inactivate the control of the given id * @card: the card instance * @id: the control id to activate/inactivate * @active: non-zero to activate * * Finds the control instance with the given id, and activate or * inactivate the control together with notification, if changed. * * Return: 0 if unchanged, 1 if changed, or a negative error code on failure. */ int snd_ctl_activate_id(struct snd_card *card, struct snd_ctl_elem_id *id, int active) { struct snd_kcontrol *kctl; struct snd_kcontrol_volatile *vd; unsigned int index_offset; int ret; down_write(&card->controls_rwsem); kctl = snd_ctl_find_id(card, id); if (kctl == NULL) { ret = -ENOENT; goto unlock; } index_offset = snd_ctl_get_ioff(kctl, &kctl->id); vd = &kctl->vd[index_offset]; ret = 0; if (active) { if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_INACTIVE)) goto unlock; vd->access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; } else { if (vd->access & SNDRV_CTL_ELEM_ACCESS_INACTIVE) goto unlock; vd->access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; } ret = 1; unlock: up_write(&card->controls_rwsem); if (ret > 0) snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_INFO, id); return ret; } EXPORT_SYMBOL_GPL(snd_ctl_activate_id); /** * snd_ctl_rename_id - replace the id of a control on the card * @card: the card instance * @src_id: the old id * @dst_id: the new id * * Finds the control with the old id from the card, and replaces the * id with the new one. * * Return: Zero if successful, or a negative error code on failure. */ int snd_ctl_rename_id(struct snd_card *card, struct snd_ctl_elem_id *src_id, struct snd_ctl_elem_id *dst_id) { struct snd_kcontrol *kctl; down_write(&card->controls_rwsem); kctl = snd_ctl_find_id(card, src_id); if (kctl == NULL) { up_write(&card->controls_rwsem); return -ENOENT; } kctl->id = *dst_id; kctl->id.numid = card->last_numid + 1; card->last_numid += kctl->count; up_write(&card->controls_rwsem); return 0; } EXPORT_SYMBOL(snd_ctl_rename_id); /** * snd_ctl_find_numid - find the control instance with the given number-id * @card: the card instance * @numid: the number-id to search * * Finds the control instance with the given number-id from the card. * * The caller must down card->controls_rwsem before calling this function * (if the race condition can happen). * * Return: The pointer of the instance if found, or %NULL if not. * */ struct snd_kcontrol *snd_ctl_find_numid(struct snd_card *card, unsigned int numid) { struct snd_kcontrol *kctl; if (snd_BUG_ON(!card || !numid)) return NULL; list_for_each_entry(kctl, &card->controls, list) { if (kctl->id.numid <= numid && kctl->id.numid + kctl->count > numid) return kctl; } return NULL; } EXPORT_SYMBOL(snd_ctl_find_numid); /** * snd_ctl_find_id - find the control instance with the given id * @card: the card instance * @id: the id to search * * Finds the control instance with the given id from the card. * * The caller must down card->controls_rwsem before calling this function * (if the race condition can happen). * * Return: The pointer of the instance if found, or %NULL if not. * */ struct snd_kcontrol *snd_ctl_find_id(struct snd_card *card, struct snd_ctl_elem_id *id) { struct snd_kcontrol *kctl; if (snd_BUG_ON(!card || !id)) return NULL; if (id->numid != 0) return snd_ctl_find_numid(card, id->numid); list_for_each_entry(kctl, &card->controls, list) { if (kctl->id.iface != id->iface) continue; if (kctl->id.device != id->device) continue; if (kctl->id.subdevice != id->subdevice) continue; if (strncmp(kctl->id.name, id->name, sizeof(kctl->id.name))) continue; if (kctl->id.index > id->index) continue; if (kctl->id.index + kctl->count <= id->index) continue; return kctl; } return NULL; } EXPORT_SYMBOL(snd_ctl_find_id); static int snd_ctl_card_info(struct snd_card *card, struct snd_ctl_file * ctl, unsigned int cmd, void __user *arg) { struct snd_ctl_card_info *info; info = kzalloc(sizeof(*info), GFP_KERNEL); if (! info) return -ENOMEM; down_read(&snd_ioctl_rwsem); info->card = card->number; strlcpy(info->id, card->id, sizeof(info->id)); strlcpy(info->driver, card->driver, sizeof(info->driver)); strlcpy(info->name, card->shortname, sizeof(info->name)); strlcpy(info->longname, card->longname, sizeof(info->longname)); strlcpy(info->mixername, card->mixername, sizeof(info->mixername)); strlcpy(info->components, card->components, sizeof(info->components)); up_read(&snd_ioctl_rwsem); if (copy_to_user(arg, info, sizeof(struct snd_ctl_card_info))) { kfree(info); return -EFAULT; } kfree(info); return 0; } static int snd_ctl_elem_list(struct snd_card *card, struct snd_ctl_elem_list __user *_list) { struct list_head *plist; struct snd_ctl_elem_list list; struct snd_kcontrol *kctl; struct snd_ctl_elem_id *dst, *id; unsigned int offset, space, jidx; if (copy_from_user(&list, _list, sizeof(list))) return -EFAULT; offset = list.offset; space = list.space; /* try limit maximum space */ if (space > 16384) return -ENOMEM; if (space > 0) { /* allocate temporary buffer for atomic operation */ dst = vmalloc(space * sizeof(struct snd_ctl_elem_id)); if (dst == NULL) return -ENOMEM; down_read(&card->controls_rwsem); list.count = card->controls_count; plist = card->controls.next; while (plist != &card->controls) { if (offset == 0) break; kctl = snd_kcontrol(plist); if (offset < kctl->count) break; offset -= kctl->count; plist = plist->next; } list.used = 0; id = dst; while (space > 0 && plist != &card->controls) { kctl = snd_kcontrol(plist); for (jidx = offset; space > 0 && jidx < kctl->count; jidx++) { snd_ctl_build_ioff(id, kctl, jidx); id++; space--; list.used++; } plist = plist->next; offset = 0; } up_read(&card->controls_rwsem); if (list.used > 0 && copy_to_user(list.pids, dst, list.used * sizeof(struct snd_ctl_elem_id))) { vfree(dst); return -EFAULT; } vfree(dst); } else { down_read(&card->controls_rwsem); list.count = card->controls_count; up_read(&card->controls_rwsem); } if (copy_to_user(_list, &list, sizeof(list))) return -EFAULT; return 0; } static int snd_ctl_elem_info(struct snd_ctl_file *ctl, struct snd_ctl_elem_info *info) { struct snd_card *card = ctl->card; struct snd_kcontrol *kctl; struct snd_kcontrol_volatile *vd; unsigned int index_offset; int result; down_read(&card->controls_rwsem); kctl = snd_ctl_find_id(card, &info->id); if (kctl == NULL) { up_read(&card->controls_rwsem); return -ENOENT; } #ifdef CONFIG_SND_DEBUG info->access = 0; #endif result = kctl->info(kctl, info); if (result >= 0) { snd_BUG_ON(info->access); index_offset = snd_ctl_get_ioff(kctl, &info->id); vd = &kctl->vd[index_offset]; snd_ctl_build_ioff(&info->id, kctl, index_offset); info->access = vd->access; if (vd->owner) { info->access |= SNDRV_CTL_ELEM_ACCESS_LOCK; if (vd->owner == ctl) info->access |= SNDRV_CTL_ELEM_ACCESS_OWNER; info->owner = pid_vnr(vd->owner->pid); } else { info->owner = -1; } } up_read(&card->controls_rwsem); return result; } static int snd_ctl_elem_info_user(struct snd_ctl_file *ctl, struct snd_ctl_elem_info __user *_info) { struct snd_ctl_elem_info info; int result; if (copy_from_user(&info, _info, sizeof(info))) return -EFAULT; snd_power_lock(ctl->card); result = snd_power_wait(ctl->card, SNDRV_CTL_POWER_D0); if (result >= 0) result = snd_ctl_elem_info(ctl, &info); snd_power_unlock(ctl->card); if (result >= 0) if (copy_to_user(_info, &info, sizeof(info))) return -EFAULT; return result; } static int snd_ctl_elem_read(struct snd_card *card, struct snd_ctl_elem_value *control) { struct snd_kcontrol *kctl; struct snd_kcontrol_volatile *vd; unsigned int index_offset; int result; down_read(&card->controls_rwsem); kctl = snd_ctl_find_id(card, &control->id); if (kctl == NULL) { result = -ENOENT; } else { index_offset = snd_ctl_get_ioff(kctl, &control->id); vd = &kctl->vd[index_offset]; if ((vd->access & SNDRV_CTL_ELEM_ACCESS_READ) && kctl->get != NULL) { snd_ctl_build_ioff(&control->id, kctl, index_offset); result = kctl->get(kctl, control); } else result = -EPERM; } up_read(&card->controls_rwsem); return result; } static int snd_ctl_elem_read_user(struct snd_card *card, struct snd_ctl_elem_value __user *_control) { struct snd_ctl_elem_value *control; int result; control = memdup_user(_control, sizeof(*control)); if (IS_ERR(control)) return PTR_ERR(control); snd_power_lock(card); result = snd_power_wait(card, SNDRV_CTL_POWER_D0); if (result >= 0) result = snd_ctl_elem_read(card, control); snd_power_unlock(card); if (result >= 0) if (copy_to_user(_control, control, sizeof(*control))) result = -EFAULT; kfree(control); return result; } static int snd_ctl_elem_write(struct snd_card *card, struct snd_ctl_file *file, struct snd_ctl_elem_value *control) { struct snd_kcontrol *kctl; struct snd_kcontrol_volatile *vd; unsigned int index_offset; int result; down_read(&card->controls_rwsem); kctl = snd_ctl_find_id(card, &control->id); if (kctl == NULL) { result = -ENOENT; } else { index_offset = snd_ctl_get_ioff(kctl, &control->id); vd = &kctl->vd[index_offset]; if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_WRITE) || kctl->put == NULL || (file && vd->owner && vd->owner != file)) { result = -EPERM; } else { snd_ctl_build_ioff(&control->id, kctl, index_offset); result = kctl->put(kctl, control); } if (result > 0) { up_read(&card->controls_rwsem); snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &control->id); return 0; } } up_read(&card->controls_rwsem); return result; } static int snd_ctl_elem_write_user(struct snd_ctl_file *file, struct snd_ctl_elem_value __user *_control) { struct snd_ctl_elem_value *control; struct snd_card *card; int result; control = memdup_user(_control, sizeof(*control)); if (IS_ERR(control)) return PTR_ERR(control); card = file->card; snd_power_lock(card); result = snd_power_wait(card, SNDRV_CTL_POWER_D0); if (result >= 0) result = snd_ctl_elem_write(card, file, control); snd_power_unlock(card); if (result >= 0) if (copy_to_user(_control, control, sizeof(*control))) result = -EFAULT; kfree(control); return result; } static int snd_ctl_elem_lock(struct snd_ctl_file *file, struct snd_ctl_elem_id __user *_id) { struct snd_card *card = file->card; struct snd_ctl_elem_id id; struct snd_kcontrol *kctl; struct snd_kcontrol_volatile *vd; int result; if (copy_from_user(&id, _id, sizeof(id))) return -EFAULT; down_write(&card->controls_rwsem); kctl = snd_ctl_find_id(card, &id); if (kctl == NULL) { result = -ENOENT; } else { vd = &kctl->vd[snd_ctl_get_ioff(kctl, &id)]; if (vd->owner != NULL) result = -EBUSY; else { vd->owner = file; result = 0; } } up_write(&card->controls_rwsem); return result; } static int snd_ctl_elem_unlock(struct snd_ctl_file *file, struct snd_ctl_elem_id __user *_id) { struct snd_card *card = file->card; struct snd_ctl_elem_id id; struct snd_kcontrol *kctl; struct snd_kcontrol_volatile *vd; int result; if (copy_from_user(&id, _id, sizeof(id))) return -EFAULT; down_write(&card->controls_rwsem); kctl = snd_ctl_find_id(card, &id); if (kctl == NULL) { result = -ENOENT; } else { vd = &kctl->vd[snd_ctl_get_ioff(kctl, &id)]; if (vd->owner == NULL) result = -EINVAL; else if (vd->owner != file) result = -EPERM; else { vd->owner = NULL; result = 0; } } up_write(&card->controls_rwsem); return result; } struct user_element { struct snd_ctl_elem_info info; struct snd_card *card; void *elem_data; /* element data */ unsigned long elem_data_size; /* size of element data in bytes */ void *tlv_data; /* TLV data */ unsigned long tlv_data_size; /* TLV data size */ void *priv_data; /* private data (like strings for enumerated type) */ }; static int snd_ctl_elem_user_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct user_element *ue = kcontrol->private_data; *uinfo = ue->info; return 0; } static int snd_ctl_elem_user_enum_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct user_element *ue = kcontrol->private_data; const char *names; unsigned int item; item = uinfo->value.enumerated.item; *uinfo = ue->info; item = min(item, uinfo->value.enumerated.items - 1); uinfo->value.enumerated.item = item; names = ue->priv_data; for (; item > 0; --item) names += strlen(names) + 1; strcpy(uinfo->value.enumerated.name, names); return 0; } static int snd_ctl_elem_user_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct user_element *ue = kcontrol->private_data; mutex_lock(&ue->card->user_ctl_lock); memcpy(&ucontrol->value, ue->elem_data, ue->elem_data_size); mutex_unlock(&ue->card->user_ctl_lock); return 0; } static int snd_ctl_elem_user_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int change; struct user_element *ue = kcontrol->private_data; mutex_lock(&ue->card->user_ctl_lock); change = memcmp(&ucontrol->value, ue->elem_data, ue->elem_data_size) != 0; if (change) memcpy(ue->elem_data, &ucontrol->value, ue->elem_data_size); mutex_unlock(&ue->card->user_ctl_lock); return change; } static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol, int op_flag, unsigned int size, unsigned int __user *tlv) { struct user_element *ue = kcontrol->private_data; int change = 0; void *new_data; if (op_flag > 0) { if (size > 1024 * 128) /* sane value */ return -EINVAL; new_data = memdup_user(tlv, size); if (IS_ERR(new_data)) return PTR_ERR(new_data); mutex_lock(&ue->card->user_ctl_lock); change = ue->tlv_data_size != size; if (!change) change = memcmp(ue->tlv_data, new_data, size); kfree(ue->tlv_data); ue->tlv_data = new_data; ue->tlv_data_size = size; mutex_unlock(&ue->card->user_ctl_lock); } else { int ret = 0; mutex_lock(&ue->card->user_ctl_lock); if (!ue->tlv_data_size || !ue->tlv_data) { ret = -ENXIO; goto err_unlock; } if (size < ue->tlv_data_size) { ret = -ENOSPC; goto err_unlock; } if (copy_to_user(tlv, ue->tlv_data, ue->tlv_data_size)) ret = -EFAULT; err_unlock: mutex_unlock(&ue->card->user_ctl_lock); if (ret) return ret; } return change; } static int snd_ctl_elem_init_enum_names(struct user_element *ue) { char *names, *p; size_t buf_len, name_len; unsigned int i; const uintptr_t user_ptrval = ue->info.value.enumerated.names_ptr; if (ue->info.value.enumerated.names_length > 64 * 1024) return -EINVAL; names = memdup_user((const void __user *)user_ptrval, ue->info.value.enumerated.names_length); if (IS_ERR(names)) return PTR_ERR(names); /* check that there are enough valid names */ buf_len = ue->info.value.enumerated.names_length; p = names; for (i = 0; i < ue->info.value.enumerated.items; ++i) { name_len = strnlen(p, buf_len); if (name_len == 0 || name_len >= 64 || name_len == buf_len) { kfree(names); return -EINVAL; } p += name_len + 1; buf_len -= name_len + 1; } ue->priv_data = names; ue->info.value.enumerated.names_ptr = 0; return 0; } static void snd_ctl_elem_user_free(struct snd_kcontrol *kcontrol) { struct user_element *ue = kcontrol->private_data; kfree(ue->tlv_data); kfree(ue->priv_data); kfree(ue); } static int snd_ctl_elem_add(struct snd_ctl_file *file, struct snd_ctl_elem_info *info, int replace) { struct snd_card *card = file->card; struct snd_kcontrol kctl, *_kctl; unsigned int access; long private_size; struct user_element *ue; int idx, err; if (!replace && card->user_ctl_count >= MAX_USER_CONTROLS) return -ENOMEM; if (info->count < 1) return -EINVAL; access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE : (info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE| SNDRV_CTL_ELEM_ACCESS_INACTIVE| SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE)); info->id.numid = 0; memset(&kctl, 0, sizeof(kctl)); down_write(&card->controls_rwsem); _kctl = snd_ctl_find_id(card, &info->id); err = 0; if (_kctl) { if (replace) err = snd_ctl_remove(card, _kctl); else err = -EBUSY; } else { if (replace) err = -ENOENT; } up_write(&card->controls_rwsem); if (err < 0) return err; memcpy(&kctl.id, &info->id, sizeof(info->id)); kctl.count = info->owner ? info->owner : 1; access |= SNDRV_CTL_ELEM_ACCESS_USER; if (info->type == SNDRV_CTL_ELEM_TYPE_ENUMERATED) kctl.info = snd_ctl_elem_user_enum_info; else kctl.info = snd_ctl_elem_user_info; if (access & SNDRV_CTL_ELEM_ACCESS_READ) kctl.get = snd_ctl_elem_user_get; if (access & SNDRV_CTL_ELEM_ACCESS_WRITE) kctl.put = snd_ctl_elem_user_put; if (access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE) { kctl.tlv.c = snd_ctl_elem_user_tlv; access |= SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK; } switch (info->type) { case SNDRV_CTL_ELEM_TYPE_BOOLEAN: case SNDRV_CTL_ELEM_TYPE_INTEGER: private_size = sizeof(long); if (info->count > 128) return -EINVAL; break; case SNDRV_CTL_ELEM_TYPE_INTEGER64: private_size = sizeof(long long); if (info->count > 64) return -EINVAL; break; case SNDRV_CTL_ELEM_TYPE_ENUMERATED: private_size = sizeof(unsigned int); if (info->count > 128 || info->value.enumerated.items == 0) return -EINVAL; break; case SNDRV_CTL_ELEM_TYPE_BYTES: private_size = sizeof(unsigned char); if (info->count > 512) return -EINVAL; break; case SNDRV_CTL_ELEM_TYPE_IEC958: private_size = sizeof(struct snd_aes_iec958); if (info->count != 1) return -EINVAL; break; default: return -EINVAL; } private_size *= info->count; ue = kzalloc(sizeof(struct user_element) + private_size, GFP_KERNEL); if (ue == NULL) return -ENOMEM; ue->card = card; ue->info = *info; ue->info.access = 0; ue->elem_data = (char *)ue + sizeof(*ue); ue->elem_data_size = private_size; if (ue->info.type == SNDRV_CTL_ELEM_TYPE_ENUMERATED) { err = snd_ctl_elem_init_enum_names(ue); if (err < 0) { kfree(ue); return err; } } kctl.private_free = snd_ctl_elem_user_free; _kctl = snd_ctl_new(&kctl, access); if (_kctl == NULL) { kfree(ue->priv_data); kfree(ue); return -ENOMEM; } _kctl->private_data = ue; for (idx = 0; idx < _kctl->count; idx++) _kctl->vd[idx].owner = file; err = snd_ctl_add(card, _kctl); if (err < 0) return err; down_write(&card->controls_rwsem); card->user_ctl_count++; up_write(&card->controls_rwsem); return 0; } static int snd_ctl_elem_add_user(struct snd_ctl_file *file, struct snd_ctl_elem_info __user *_info, int replace) { struct snd_ctl_elem_info info; if (copy_from_user(&info, _info, sizeof(info))) return -EFAULT; return snd_ctl_elem_add(file, &info, replace); } static int snd_ctl_elem_remove(struct snd_ctl_file *file, struct snd_ctl_elem_id __user *_id) { struct snd_ctl_elem_id id; if (copy_from_user(&id, _id, sizeof(id))) return -EFAULT; return snd_ctl_remove_user_ctl(file, &id); } static int snd_ctl_subscribe_events(struct snd_ctl_file *file, int __user *ptr) { int subscribe; if (get_user(subscribe, ptr)) return -EFAULT; if (subscribe < 0) { subscribe = file->subscribed; if (put_user(subscribe, ptr)) return -EFAULT; return 0; } if (subscribe) { file->subscribed = 1; return 0; } else if (file->subscribed) { snd_ctl_empty_read_queue(file); file->subscribed = 0; } return 0; } static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file, struct snd_ctl_tlv __user *_tlv, int op_flag) { struct snd_card *card = file->card; struct snd_ctl_tlv tlv; struct snd_kcontrol *kctl; struct snd_kcontrol_volatile *vd; unsigned int len; int err = 0; if (copy_from_user(&tlv, _tlv, sizeof(tlv))) return -EFAULT; if (tlv.length < sizeof(unsigned int) * 2) return -EINVAL; down_read(&card->controls_rwsem); kctl = snd_ctl_find_numid(card, tlv.numid); if (kctl == NULL) { err = -ENOENT; goto __kctl_end; } if (kctl->tlv.p == NULL) { err = -ENXIO; goto __kctl_end; } vd = &kctl->vd[tlv.numid - kctl->id.numid]; if ((op_flag == 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_READ) == 0) || (op_flag > 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_WRITE) == 0) || (op_flag < 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_COMMAND) == 0)) { err = -ENXIO; goto __kctl_end; } if (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { if (vd->owner != NULL && vd->owner != file) { err = -EPERM; goto __kctl_end; } err = kctl->tlv.c(kctl, op_flag, tlv.length, _tlv->tlv); if (err > 0) { up_read(&card->controls_rwsem); snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &kctl->id); return 0; } } else { if (op_flag) { err = -ENXIO; goto __kctl_end; } len = kctl->tlv.p[1] + 2 * sizeof(unsigned int); if (tlv.length < len) { err = -ENOMEM; goto __kctl_end; } if (copy_to_user(_tlv->tlv, kctl->tlv.p, len)) err = -EFAULT; } __kctl_end: up_read(&card->controls_rwsem); return err; } static long snd_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct snd_ctl_file *ctl; struct snd_card *card; struct snd_kctl_ioctl *p; void __user *argp = (void __user *)arg; int __user *ip = argp; int err; ctl = file->private_data; card = ctl->card; if (snd_BUG_ON(!card)) return -ENXIO; switch (cmd) { case SNDRV_CTL_IOCTL_PVERSION: return put_user(SNDRV_CTL_VERSION, ip) ? -EFAULT : 0; case SNDRV_CTL_IOCTL_CARD_INFO: return snd_ctl_card_info(card, ctl, cmd, argp); case SNDRV_CTL_IOCTL_ELEM_LIST: return snd_ctl_elem_list(card, argp); case SNDRV_CTL_IOCTL_ELEM_INFO: return snd_ctl_elem_info_user(ctl, argp); case SNDRV_CTL_IOCTL_ELEM_READ: return snd_ctl_elem_read_user(card, argp); case SNDRV_CTL_IOCTL_ELEM_WRITE: return snd_ctl_elem_write_user(ctl, argp); case SNDRV_CTL_IOCTL_ELEM_LOCK: return snd_ctl_elem_lock(ctl, argp); case SNDRV_CTL_IOCTL_ELEM_UNLOCK: return snd_ctl_elem_unlock(ctl, argp); case SNDRV_CTL_IOCTL_ELEM_ADD: return snd_ctl_elem_add_user(ctl, argp, 0); case SNDRV_CTL_IOCTL_ELEM_REPLACE: return snd_ctl_elem_add_user(ctl, argp, 1); case SNDRV_CTL_IOCTL_ELEM_REMOVE: return snd_ctl_elem_remove(ctl, argp); case SNDRV_CTL_IOCTL_SUBSCRIBE_EVENTS: return snd_ctl_subscribe_events(ctl, ip); case SNDRV_CTL_IOCTL_TLV_READ: return snd_ctl_tlv_ioctl(ctl, argp, 0); case SNDRV_CTL_IOCTL_TLV_WRITE: return snd_ctl_tlv_ioctl(ctl, argp, 1); case SNDRV_CTL_IOCTL_TLV_COMMAND: return snd_ctl_tlv_ioctl(ctl, argp, -1); case SNDRV_CTL_IOCTL_POWER: return -ENOPROTOOPT; case SNDRV_CTL_IOCTL_POWER_STATE: #ifdef CONFIG_PM return put_user(card->power_state, ip) ? -EFAULT : 0; #else return put_user(SNDRV_CTL_POWER_D0, ip) ? -EFAULT : 0; #endif } down_read(&snd_ioctl_rwsem); list_for_each_entry(p, &snd_control_ioctls, list) { err = p->fioctl(card, ctl, cmd, arg); if (err != -ENOIOCTLCMD) { up_read(&snd_ioctl_rwsem); return err; } } up_read(&snd_ioctl_rwsem); dev_dbg(card->dev, "unknown ioctl = 0x%x\n", cmd); return -ENOTTY; } static ssize_t snd_ctl_read(struct file *file, char __user *buffer, size_t count, loff_t * offset) { struct snd_ctl_file *ctl; int err = 0; ssize_t result = 0; ctl = file->private_data; if (snd_BUG_ON(!ctl || !ctl->card)) return -ENXIO; if (!ctl->subscribed) return -EBADFD; if (count < sizeof(struct snd_ctl_event)) return -EINVAL; spin_lock_irq(&ctl->read_lock); while (count >= sizeof(struct snd_ctl_event)) { struct snd_ctl_event ev; struct snd_kctl_event *kev; while (list_empty(&ctl->events)) { wait_queue_t wait; if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) { err = -EAGAIN; goto __end_lock; } init_waitqueue_entry(&wait, current); add_wait_queue(&ctl->change_sleep, &wait); set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irq(&ctl->read_lock); schedule(); remove_wait_queue(&ctl->change_sleep, &wait); if (ctl->card->shutdown) return -ENODEV; if (signal_pending(current)) return -ERESTARTSYS; spin_lock_irq(&ctl->read_lock); } kev = snd_kctl_event(ctl->events.next); ev.type = SNDRV_CTL_EVENT_ELEM; ev.data.elem.mask = kev->mask; ev.data.elem.id = kev->id; list_del(&kev->list); spin_unlock_irq(&ctl->read_lock); kfree(kev); if (copy_to_user(buffer, &ev, sizeof(struct snd_ctl_event))) { err = -EFAULT; goto __end; } spin_lock_irq(&ctl->read_lock); buffer += sizeof(struct snd_ctl_event); count -= sizeof(struct snd_ctl_event); result += sizeof(struct snd_ctl_event); } __end_lock: spin_unlock_irq(&ctl->read_lock); __end: return result > 0 ? result : err; } static unsigned int snd_ctl_poll(struct file *file, poll_table * wait) { unsigned int mask; struct snd_ctl_file *ctl; ctl = file->private_data; if (!ctl->subscribed) return 0; poll_wait(file, &ctl->change_sleep, wait); mask = 0; if (!list_empty(&ctl->events)) mask |= POLLIN | POLLRDNORM; return mask; } /* * register the device-specific control-ioctls. * called from each device manager like pcm.c, hwdep.c, etc. */ static int _snd_ctl_register_ioctl(snd_kctl_ioctl_func_t fcn, struct list_head *lists) { struct snd_kctl_ioctl *pn; pn = kzalloc(sizeof(struct snd_kctl_ioctl), GFP_KERNEL); if (pn == NULL) return -ENOMEM; pn->fioctl = fcn; down_write(&snd_ioctl_rwsem); list_add_tail(&pn->list, lists); up_write(&snd_ioctl_rwsem); return 0; } int snd_ctl_register_ioctl(snd_kctl_ioctl_func_t fcn) { return _snd_ctl_register_ioctl(fcn, &snd_control_ioctls); } EXPORT_SYMBOL(snd_ctl_register_ioctl); #ifdef CONFIG_COMPAT int snd_ctl_register_ioctl_compat(snd_kctl_ioctl_func_t fcn) { return _snd_ctl_register_ioctl(fcn, &snd_control_compat_ioctls); } EXPORT_SYMBOL(snd_ctl_register_ioctl_compat); #endif /* * de-register the device-specific control-ioctls. */ static int _snd_ctl_unregister_ioctl(snd_kctl_ioctl_func_t fcn, struct list_head *lists) { struct snd_kctl_ioctl *p; if (snd_BUG_ON(!fcn)) return -EINVAL; down_write(&snd_ioctl_rwsem); list_for_each_entry(p, lists, list) { if (p->fioctl == fcn) { list_del(&p->list); up_write(&snd_ioctl_rwsem); kfree(p); return 0; } } up_write(&snd_ioctl_rwsem); snd_BUG(); return -EINVAL; } int snd_ctl_unregister_ioctl(snd_kctl_ioctl_func_t fcn) { return _snd_ctl_unregister_ioctl(fcn, &snd_control_ioctls); } EXPORT_SYMBOL(snd_ctl_unregister_ioctl); #ifdef CONFIG_COMPAT int snd_ctl_unregister_ioctl_compat(snd_kctl_ioctl_func_t fcn) { return _snd_ctl_unregister_ioctl(fcn, &snd_control_compat_ioctls); } EXPORT_SYMBOL(snd_ctl_unregister_ioctl_compat); #endif static int snd_ctl_fasync(int fd, struct file * file, int on) { struct snd_ctl_file *ctl; ctl = file->private_data; return fasync_helper(fd, file, on, &ctl->fasync); } /* * ioctl32 compat */ #ifdef CONFIG_COMPAT #include "control_compat.c" #else #define snd_ctl_ioctl_compat NULL #endif /* * INIT PART */ static const struct file_operations snd_ctl_f_ops = { .owner = THIS_MODULE, .read = snd_ctl_read, .open = snd_ctl_open, .release = snd_ctl_release, .llseek = no_llseek, .poll = snd_ctl_poll, .unlocked_ioctl = snd_ctl_ioctl, .compat_ioctl = snd_ctl_ioctl_compat, .fasync = snd_ctl_fasync, }; /* * registration of the control device */ static int snd_ctl_dev_register(struct snd_device *device) { struct snd_card *card = device->device_data; int err, cardnum; char name[16]; if (snd_BUG_ON(!card)) return -ENXIO; cardnum = card->number; if (snd_BUG_ON(cardnum < 0 || cardnum >= SNDRV_CARDS)) return -ENXIO; sprintf(name, "controlC%i", cardnum); if ((err = snd_register_device(SNDRV_DEVICE_TYPE_CONTROL, card, -1, &snd_ctl_f_ops, card, name)) < 0) return err; return 0; } /* * disconnection of the control device */ static int snd_ctl_dev_disconnect(struct snd_device *device) { struct snd_card *card = device->device_data; struct snd_ctl_file *ctl; int err, cardnum; if (snd_BUG_ON(!card)) return -ENXIO; cardnum = card->number; if (snd_BUG_ON(cardnum < 0 || cardnum >= SNDRV_CARDS)) return -ENXIO; read_lock(&card->ctl_files_rwlock); list_for_each_entry(ctl, &card->ctl_files, list) { wake_up(&ctl->change_sleep); kill_fasync(&ctl->fasync, SIGIO, POLL_ERR); } read_unlock(&card->ctl_files_rwlock); if ((err = snd_unregister_device(SNDRV_DEVICE_TYPE_CONTROL, card, -1)) < 0) return err; return 0; } /* * free all controls */ static int snd_ctl_dev_free(struct snd_device *device) { struct snd_card *card = device->device_data; struct snd_kcontrol *control; down_write(&card->controls_rwsem); while (!list_empty(&card->controls)) { control = snd_kcontrol(card->controls.next); snd_ctl_remove(card, control); } up_write(&card->controls_rwsem); return 0; } /* * create control core: * called from init.c */ int snd_ctl_create(struct snd_card *card) { static struct snd_device_ops ops = { .dev_free = snd_ctl_dev_free, .dev_register = snd_ctl_dev_register, .dev_disconnect = snd_ctl_dev_disconnect, }; if (snd_BUG_ON(!card)) return -ENXIO; return snd_device_new(card, SNDRV_DEV_CONTROL, card, &ops); } /* * Frequently used control callbacks/helpers */ int snd_ctl_boolean_mono_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 1; return 0; } EXPORT_SYMBOL(snd_ctl_boolean_mono_info); int snd_ctl_boolean_stereo_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 1; return 0; } EXPORT_SYMBOL(snd_ctl_boolean_stereo_info); /** * snd_ctl_enum_info - fills the info structure for an enumerated control * @info: the structure to be filled * @channels: the number of the control's channels; often one * @items: the number of control values; also the size of @names * @names: an array containing the names of all control values * * Sets all required fields in @info to their appropriate values. * If the control's accessibility is not the default (readable and writable), * the caller has to fill @info->access. * * Return: Zero. */ int snd_ctl_enum_info(struct snd_ctl_elem_info *info, unsigned int channels, unsigned int items, const char *const names[]) { info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; info->count = channels; info->value.enumerated.items = items; if (info->value.enumerated.item >= items) info->value.enumerated.item = items - 1; strlcpy(info->value.enumerated.name, names[info->value.enumerated.item], sizeof(info->value.enumerated.name)); return 0; } EXPORT_SYMBOL(snd_ctl_enum_info);
/* * Routines for driver control interface * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/threads.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/time.h> #include <sound/core.h> #include <sound/minors.h> #include <sound/info.h> #include <sound/control.h> /* max number of user-defined controls */ #define MAX_USER_CONTROLS 32 #define MAX_CONTROL_COUNT 1028 struct snd_kctl_ioctl { struct list_head list; /* list of all ioctls */ snd_kctl_ioctl_func_t fioctl; }; static DECLARE_RWSEM(snd_ioctl_rwsem); static LIST_HEAD(snd_control_ioctls); #ifdef CONFIG_COMPAT static LIST_HEAD(snd_control_compat_ioctls); #endif static int snd_ctl_open(struct inode *inode, struct file *file) { unsigned long flags; struct snd_card *card; struct snd_ctl_file *ctl; int err; err = nonseekable_open(inode, file); if (err < 0) return err; card = snd_lookup_minor_data(iminor(inode), SNDRV_DEVICE_TYPE_CONTROL); if (!card) { err = -ENODEV; goto __error1; } err = snd_card_file_add(card, file); if (err < 0) { err = -ENODEV; goto __error1; } if (!try_module_get(card->module)) { err = -EFAULT; goto __error2; } ctl = kzalloc(sizeof(*ctl), GFP_KERNEL); if (ctl == NULL) { err = -ENOMEM; goto __error; } INIT_LIST_HEAD(&ctl->events); init_waitqueue_head(&ctl->change_sleep); spin_lock_init(&ctl->read_lock); ctl->card = card; ctl->prefer_pcm_subdevice = -1; ctl->prefer_rawmidi_subdevice = -1; ctl->pid = get_pid(task_pid(current)); file->private_data = ctl; write_lock_irqsave(&card->ctl_files_rwlock, flags); list_add_tail(&ctl->list, &card->ctl_files); write_unlock_irqrestore(&card->ctl_files_rwlock, flags); snd_card_unref(card); return 0; __error: module_put(card->module); __error2: snd_card_file_remove(card, file); __error1: if (card) snd_card_unref(card); return err; } static void snd_ctl_empty_read_queue(struct snd_ctl_file * ctl) { unsigned long flags; struct snd_kctl_event *cread; spin_lock_irqsave(&ctl->read_lock, flags); while (!list_empty(&ctl->events)) { cread = snd_kctl_event(ctl->events.next); list_del(&cread->list); kfree(cread); } spin_unlock_irqrestore(&ctl->read_lock, flags); } static int snd_ctl_release(struct inode *inode, struct file *file) { unsigned long flags; struct snd_card *card; struct snd_ctl_file *ctl; struct snd_kcontrol *control; unsigned int idx; ctl = file->private_data; file->private_data = NULL; card = ctl->card; write_lock_irqsave(&card->ctl_files_rwlock, flags); list_del(&ctl->list); write_unlock_irqrestore(&card->ctl_files_rwlock, flags); down_write(&card->controls_rwsem); list_for_each_entry(control, &card->controls, list) for (idx = 0; idx < control->count; idx++) if (control->vd[idx].owner == ctl) control->vd[idx].owner = NULL; up_write(&card->controls_rwsem); snd_ctl_empty_read_queue(ctl); put_pid(ctl->pid); kfree(ctl); module_put(card->module); snd_card_file_remove(card, file); return 0; } void snd_ctl_notify(struct snd_card *card, unsigned int mask, struct snd_ctl_elem_id *id) { unsigned long flags; struct snd_ctl_file *ctl; struct snd_kctl_event *ev; if (snd_BUG_ON(!card || !id)) return; read_lock(&card->ctl_files_rwlock); #if IS_ENABLED(CONFIG_SND_MIXER_OSS) card->mixer_oss_change_count++; #endif list_for_each_entry(ctl, &card->ctl_files, list) { if (!ctl->subscribed) continue; spin_lock_irqsave(&ctl->read_lock, flags); list_for_each_entry(ev, &ctl->events, list) { if (ev->id.numid == id->numid) { ev->mask |= mask; goto _found; } } ev = kzalloc(sizeof(*ev), GFP_ATOMIC); if (ev) { ev->id = *id; ev->mask = mask; list_add_tail(&ev->list, &ctl->events); } else { dev_err(card->dev, "No memory available to allocate event\n"); } _found: wake_up(&ctl->change_sleep); spin_unlock_irqrestore(&ctl->read_lock, flags); kill_fasync(&ctl->fasync, SIGIO, POLL_IN); } read_unlock(&card->ctl_files_rwlock); } EXPORT_SYMBOL(snd_ctl_notify); /** * snd_ctl_new - create a control instance from the template * @control: the control template * @access: the default control access * * Allocates a new struct snd_kcontrol instance and copies the given template * to the new instance. It does not copy volatile data (access). * * Return: The pointer of the new instance, or %NULL on failure. */ static struct snd_kcontrol *snd_ctl_new(struct snd_kcontrol *control, unsigned int access) { struct snd_kcontrol *kctl; unsigned int idx; if (snd_BUG_ON(!control || !control->count)) return NULL; if (control->count > MAX_CONTROL_COUNT) return NULL; kctl = kzalloc(sizeof(*kctl) + sizeof(struct snd_kcontrol_volatile) * control->count, GFP_KERNEL); if (kctl == NULL) { pr_err("ALSA: Cannot allocate control instance\n"); return NULL; } *kctl = *control; for (idx = 0; idx < kctl->count; idx++) kctl->vd[idx].access = access; return kctl; } /** * snd_ctl_new1 - create a control instance from the template * @ncontrol: the initialization record * @private_data: the private data to set * * Allocates a new struct snd_kcontrol instance and initialize from the given * template. When the access field of ncontrol is 0, it's assumed as * READWRITE access. When the count field is 0, it's assumes as one. * * Return: The pointer of the newly generated instance, or %NULL on failure. */ struct snd_kcontrol *snd_ctl_new1(const struct snd_kcontrol_new *ncontrol, void *private_data) { struct snd_kcontrol kctl; unsigned int access; if (snd_BUG_ON(!ncontrol || !ncontrol->info)) return NULL; memset(&kctl, 0, sizeof(kctl)); kctl.id.iface = ncontrol->iface; kctl.id.device = ncontrol->device; kctl.id.subdevice = ncontrol->subdevice; if (ncontrol->name) { strlcpy(kctl.id.name, ncontrol->name, sizeof(kctl.id.name)); if (strcmp(ncontrol->name, kctl.id.name) != 0) pr_warn("ALSA: Control name '%s' truncated to '%s'\n", ncontrol->name, kctl.id.name); } kctl.id.index = ncontrol->index; kctl.count = ncontrol->count ? ncontrol->count : 1; access = ncontrol->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE : (ncontrol->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE| SNDRV_CTL_ELEM_ACCESS_VOLATILE| SNDRV_CTL_ELEM_ACCESS_INACTIVE| SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE| SNDRV_CTL_ELEM_ACCESS_TLV_COMMAND| SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK)); kctl.info = ncontrol->info; kctl.get = ncontrol->get; kctl.put = ncontrol->put; kctl.tlv.p = ncontrol->tlv.p; kctl.private_value = ncontrol->private_value; kctl.private_data = private_data; return snd_ctl_new(&kctl, access); } EXPORT_SYMBOL(snd_ctl_new1); /** * snd_ctl_free_one - release the control instance * @kcontrol: the control instance * * Releases the control instance created via snd_ctl_new() * or snd_ctl_new1(). * Don't call this after the control was added to the card. */ void snd_ctl_free_one(struct snd_kcontrol *kcontrol) { if (kcontrol) { if (kcontrol->private_free) kcontrol->private_free(kcontrol); kfree(kcontrol); } } EXPORT_SYMBOL(snd_ctl_free_one); static bool snd_ctl_remove_numid_conflict(struct snd_card *card, unsigned int count) { struct snd_kcontrol *kctl; list_for_each_entry(kctl, &card->controls, list) { if (kctl->id.numid < card->last_numid + 1 + count && kctl->id.numid + kctl->count > card->last_numid + 1) { card->last_numid = kctl->id.numid + kctl->count - 1; return true; } } return false; } static int snd_ctl_find_hole(struct snd_card *card, unsigned int count) { unsigned int iter = 100000; while (snd_ctl_remove_numid_conflict(card, count)) { if (--iter == 0) { /* this situation is very unlikely */ dev_err(card->dev, "unable to allocate new control numid\n"); return -ENOMEM; } } return 0; } /** * snd_ctl_add - add the control instance to the card * @card: the card instance * @kcontrol: the control instance to add * * Adds the control instance created via snd_ctl_new() or * snd_ctl_new1() to the given card. Assigns also an unique * numid used for fast search. * * It frees automatically the control which cannot be added. * * Return: Zero if successful, or a negative error code on failure. * */ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol) { struct snd_ctl_elem_id id; unsigned int idx; int err = -EINVAL; if (! kcontrol) return err; if (snd_BUG_ON(!card || !kcontrol->info)) goto error; id = kcontrol->id; down_write(&card->controls_rwsem); if (snd_ctl_find_id(card, &id)) { up_write(&card->controls_rwsem); dev_err(card->dev, "control %i:%i:%i:%s:%i is already present\n", id.iface, id.device, id.subdevice, id.name, id.index); err = -EBUSY; goto error; } if (snd_ctl_find_hole(card, kcontrol->count) < 0) { up_write(&card->controls_rwsem); err = -ENOMEM; goto error; } list_add_tail(&kcontrol->list, &card->controls); card->controls_count += kcontrol->count; kcontrol->id.numid = card->last_numid + 1; card->last_numid += kcontrol->count; up_write(&card->controls_rwsem); for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++) snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id); return 0; error: snd_ctl_free_one(kcontrol); return err; } EXPORT_SYMBOL(snd_ctl_add); /** * snd_ctl_replace - replace the control instance of the card * @card: the card instance * @kcontrol: the control instance to replace * @add_on_replace: add the control if not already added * * Replaces the given control. If the given control does not exist * and the add_on_replace flag is set, the control is added. If the * control exists, it is destroyed first. * * It frees automatically the control which cannot be added or replaced. * * Return: Zero if successful, or a negative error code on failure. */ int snd_ctl_replace(struct snd_card *card, struct snd_kcontrol *kcontrol, bool add_on_replace) { struct snd_ctl_elem_id id; unsigned int idx; struct snd_kcontrol *old; int ret; if (!kcontrol) return -EINVAL; if (snd_BUG_ON(!card || !kcontrol->info)) { ret = -EINVAL; goto error; } id = kcontrol->id; down_write(&card->controls_rwsem); old = snd_ctl_find_id(card, &id); if (!old) { if (add_on_replace) goto add; up_write(&card->controls_rwsem); ret = -EINVAL; goto error; } ret = snd_ctl_remove(card, old); if (ret < 0) { up_write(&card->controls_rwsem); goto error; } add: if (snd_ctl_find_hole(card, kcontrol->count) < 0) { up_write(&card->controls_rwsem); ret = -ENOMEM; goto error; } list_add_tail(&kcontrol->list, &card->controls); card->controls_count += kcontrol->count; kcontrol->id.numid = card->last_numid + 1; card->last_numid += kcontrol->count; up_write(&card->controls_rwsem); for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++) snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id); return 0; error: snd_ctl_free_one(kcontrol); return ret; } EXPORT_SYMBOL(snd_ctl_replace); /** * snd_ctl_remove - remove the control from the card and release it * @card: the card instance * @kcontrol: the control instance to remove * * Removes the control from the card and then releases the instance. * You don't need to call snd_ctl_free_one(). You must be in * the write lock - down_write(&card->controls_rwsem). * * Return: 0 if successful, or a negative error code on failure. */ int snd_ctl_remove(struct snd_card *card, struct snd_kcontrol *kcontrol) { struct snd_ctl_elem_id id; unsigned int idx; if (snd_BUG_ON(!card || !kcontrol)) return -EINVAL; list_del(&kcontrol->list); card->controls_count -= kcontrol->count; id = kcontrol->id; for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++) snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_REMOVE, &id); snd_ctl_free_one(kcontrol); return 0; } EXPORT_SYMBOL(snd_ctl_remove); /** * snd_ctl_remove_id - remove the control of the given id and release it * @card: the card instance * @id: the control id to remove * * Finds the control instance with the given id, removes it from the * card list and releases it. * * Return: 0 if successful, or a negative error code on failure. */ int snd_ctl_remove_id(struct snd_card *card, struct snd_ctl_elem_id *id) { struct snd_kcontrol *kctl; int ret; down_write(&card->controls_rwsem); kctl = snd_ctl_find_id(card, id); if (kctl == NULL) { up_write(&card->controls_rwsem); return -ENOENT; } ret = snd_ctl_remove(card, kctl); up_write(&card->controls_rwsem); return ret; } EXPORT_SYMBOL(snd_ctl_remove_id); /** * snd_ctl_remove_user_ctl - remove and release the unlocked user control * @file: active control handle * @id: the control id to remove * * Finds the control instance with the given id, removes it from the * card list and releases it. * * Return: 0 if successful, or a negative error code on failure. */ static int snd_ctl_remove_user_ctl(struct snd_ctl_file * file, struct snd_ctl_elem_id *id) { struct snd_card *card = file->card; struct snd_kcontrol *kctl; int idx, ret; down_write(&card->controls_rwsem); kctl = snd_ctl_find_id(card, id); if (kctl == NULL) { ret = -ENOENT; goto error; } if (!(kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_USER)) { ret = -EINVAL; goto error; } for (idx = 0; idx < kctl->count; idx++) if (kctl->vd[idx].owner != NULL && kctl->vd[idx].owner != file) { ret = -EBUSY; goto error; } ret = snd_ctl_remove(card, kctl); if (ret < 0) goto error; card->user_ctl_count--; error: up_write(&card->controls_rwsem); return ret; } /** * snd_ctl_activate_id - activate/inactivate the control of the given id * @card: the card instance * @id: the control id to activate/inactivate * @active: non-zero to activate * * Finds the control instance with the given id, and activate or * inactivate the control together with notification, if changed. * * Return: 0 if unchanged, 1 if changed, or a negative error code on failure. */ int snd_ctl_activate_id(struct snd_card *card, struct snd_ctl_elem_id *id, int active) { struct snd_kcontrol *kctl; struct snd_kcontrol_volatile *vd; unsigned int index_offset; int ret; down_write(&card->controls_rwsem); kctl = snd_ctl_find_id(card, id); if (kctl == NULL) { ret = -ENOENT; goto unlock; } index_offset = snd_ctl_get_ioff(kctl, &kctl->id); vd = &kctl->vd[index_offset]; ret = 0; if (active) { if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_INACTIVE)) goto unlock; vd->access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; } else { if (vd->access & SNDRV_CTL_ELEM_ACCESS_INACTIVE) goto unlock; vd->access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; } ret = 1; unlock: up_write(&card->controls_rwsem); if (ret > 0) snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_INFO, id); return ret; } EXPORT_SYMBOL_GPL(snd_ctl_activate_id); /** * snd_ctl_rename_id - replace the id of a control on the card * @card: the card instance * @src_id: the old id * @dst_id: the new id * * Finds the control with the old id from the card, and replaces the * id with the new one. * * Return: Zero if successful, or a negative error code on failure. */ int snd_ctl_rename_id(struct snd_card *card, struct snd_ctl_elem_id *src_id, struct snd_ctl_elem_id *dst_id) { struct snd_kcontrol *kctl; down_write(&card->controls_rwsem); kctl = snd_ctl_find_id(card, src_id); if (kctl == NULL) { up_write(&card->controls_rwsem); return -ENOENT; } kctl->id = *dst_id; kctl->id.numid = card->last_numid + 1; card->last_numid += kctl->count; up_write(&card->controls_rwsem); return 0; } EXPORT_SYMBOL(snd_ctl_rename_id); /** * snd_ctl_find_numid - find the control instance with the given number-id * @card: the card instance * @numid: the number-id to search * * Finds the control instance with the given number-id from the card. * * The caller must down card->controls_rwsem before calling this function * (if the race condition can happen). * * Return: The pointer of the instance if found, or %NULL if not. * */ struct snd_kcontrol *snd_ctl_find_numid(struct snd_card *card, unsigned int numid) { struct snd_kcontrol *kctl; if (snd_BUG_ON(!card || !numid)) return NULL; list_for_each_entry(kctl, &card->controls, list) { if (kctl->id.numid <= numid && kctl->id.numid + kctl->count > numid) return kctl; } return NULL; } EXPORT_SYMBOL(snd_ctl_find_numid); /** * snd_ctl_find_id - find the control instance with the given id * @card: the card instance * @id: the id to search * * Finds the control instance with the given id from the card. * * The caller must down card->controls_rwsem before calling this function * (if the race condition can happen). * * Return: The pointer of the instance if found, or %NULL if not. * */ struct snd_kcontrol *snd_ctl_find_id(struct snd_card *card, struct snd_ctl_elem_id *id) { struct snd_kcontrol *kctl; if (snd_BUG_ON(!card || !id)) return NULL; if (id->numid != 0) return snd_ctl_find_numid(card, id->numid); list_for_each_entry(kctl, &card->controls, list) { if (kctl->id.iface != id->iface) continue; if (kctl->id.device != id->device) continue; if (kctl->id.subdevice != id->subdevice) continue; if (strncmp(kctl->id.name, id->name, sizeof(kctl->id.name))) continue; if (kctl->id.index > id->index) continue; if (kctl->id.index + kctl->count <= id->index) continue; return kctl; } return NULL; } EXPORT_SYMBOL(snd_ctl_find_id); static int snd_ctl_card_info(struct snd_card *card, struct snd_ctl_file * ctl, unsigned int cmd, void __user *arg) { struct snd_ctl_card_info *info; info = kzalloc(sizeof(*info), GFP_KERNEL); if (! info) return -ENOMEM; down_read(&snd_ioctl_rwsem); info->card = card->number; strlcpy(info->id, card->id, sizeof(info->id)); strlcpy(info->driver, card->driver, sizeof(info->driver)); strlcpy(info->name, card->shortname, sizeof(info->name)); strlcpy(info->longname, card->longname, sizeof(info->longname)); strlcpy(info->mixername, card->mixername, sizeof(info->mixername)); strlcpy(info->components, card->components, sizeof(info->components)); up_read(&snd_ioctl_rwsem); if (copy_to_user(arg, info, sizeof(struct snd_ctl_card_info))) { kfree(info); return -EFAULT; } kfree(info); return 0; } static int snd_ctl_elem_list(struct snd_card *card, struct snd_ctl_elem_list __user *_list) { struct list_head *plist; struct snd_ctl_elem_list list; struct snd_kcontrol *kctl; struct snd_ctl_elem_id *dst, *id; unsigned int offset, space, jidx; if (copy_from_user(&list, _list, sizeof(list))) return -EFAULT; offset = list.offset; space = list.space; /* try limit maximum space */ if (space > 16384) return -ENOMEM; if (space > 0) { /* allocate temporary buffer for atomic operation */ dst = vmalloc(space * sizeof(struct snd_ctl_elem_id)); if (dst == NULL) return -ENOMEM; down_read(&card->controls_rwsem); list.count = card->controls_count; plist = card->controls.next; while (plist != &card->controls) { if (offset == 0) break; kctl = snd_kcontrol(plist); if (offset < kctl->count) break; offset -= kctl->count; plist = plist->next; } list.used = 0; id = dst; while (space > 0 && plist != &card->controls) { kctl = snd_kcontrol(plist); for (jidx = offset; space > 0 && jidx < kctl->count; jidx++) { snd_ctl_build_ioff(id, kctl, jidx); id++; space--; list.used++; } plist = plist->next; offset = 0; } up_read(&card->controls_rwsem); if (list.used > 0 && copy_to_user(list.pids, dst, list.used * sizeof(struct snd_ctl_elem_id))) { vfree(dst); return -EFAULT; } vfree(dst); } else { down_read(&card->controls_rwsem); list.count = card->controls_count; up_read(&card->controls_rwsem); } if (copy_to_user(_list, &list, sizeof(list))) return -EFAULT; return 0; } static int snd_ctl_elem_info(struct snd_ctl_file *ctl, struct snd_ctl_elem_info *info) { struct snd_card *card = ctl->card; struct snd_kcontrol *kctl; struct snd_kcontrol_volatile *vd; unsigned int index_offset; int result; down_read(&card->controls_rwsem); kctl = snd_ctl_find_id(card, &info->id); if (kctl == NULL) { up_read(&card->controls_rwsem); return -ENOENT; } #ifdef CONFIG_SND_DEBUG info->access = 0; #endif result = kctl->info(kctl, info); if (result >= 0) { snd_BUG_ON(info->access); index_offset = snd_ctl_get_ioff(kctl, &info->id); vd = &kctl->vd[index_offset]; snd_ctl_build_ioff(&info->id, kctl, index_offset); info->access = vd->access; if (vd->owner) { info->access |= SNDRV_CTL_ELEM_ACCESS_LOCK; if (vd->owner == ctl) info->access |= SNDRV_CTL_ELEM_ACCESS_OWNER; info->owner = pid_vnr(vd->owner->pid); } else { info->owner = -1; } } up_read(&card->controls_rwsem); return result; } static int snd_ctl_elem_info_user(struct snd_ctl_file *ctl, struct snd_ctl_elem_info __user *_info) { struct snd_ctl_elem_info info; int result; if (copy_from_user(&info, _info, sizeof(info))) return -EFAULT; snd_power_lock(ctl->card); result = snd_power_wait(ctl->card, SNDRV_CTL_POWER_D0); if (result >= 0) result = snd_ctl_elem_info(ctl, &info); snd_power_unlock(ctl->card); if (result >= 0) if (copy_to_user(_info, &info, sizeof(info))) return -EFAULT; return result; } static int snd_ctl_elem_read(struct snd_card *card, struct snd_ctl_elem_value *control) { struct snd_kcontrol *kctl; struct snd_kcontrol_volatile *vd; unsigned int index_offset; int result; down_read(&card->controls_rwsem); kctl = snd_ctl_find_id(card, &control->id); if (kctl == NULL) { result = -ENOENT; } else { index_offset = snd_ctl_get_ioff(kctl, &control->id); vd = &kctl->vd[index_offset]; if ((vd->access & SNDRV_CTL_ELEM_ACCESS_READ) && kctl->get != NULL) { snd_ctl_build_ioff(&control->id, kctl, index_offset); result = kctl->get(kctl, control); } else result = -EPERM; } up_read(&card->controls_rwsem); return result; } static int snd_ctl_elem_read_user(struct snd_card *card, struct snd_ctl_elem_value __user *_control) { struct snd_ctl_elem_value *control; int result; control = memdup_user(_control, sizeof(*control)); if (IS_ERR(control)) return PTR_ERR(control); snd_power_lock(card); result = snd_power_wait(card, SNDRV_CTL_POWER_D0); if (result >= 0) result = snd_ctl_elem_read(card, control); snd_power_unlock(card); if (result >= 0) if (copy_to_user(_control, control, sizeof(*control))) result = -EFAULT; kfree(control); return result; } static int snd_ctl_elem_write(struct snd_card *card, struct snd_ctl_file *file, struct snd_ctl_elem_value *control) { struct snd_kcontrol *kctl; struct snd_kcontrol_volatile *vd; unsigned int index_offset; int result; down_read(&card->controls_rwsem); kctl = snd_ctl_find_id(card, &control->id); if (kctl == NULL) { result = -ENOENT; } else { index_offset = snd_ctl_get_ioff(kctl, &control->id); vd = &kctl->vd[index_offset]; if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_WRITE) || kctl->put == NULL || (file && vd->owner && vd->owner != file)) { result = -EPERM; } else { snd_ctl_build_ioff(&control->id, kctl, index_offset); result = kctl->put(kctl, control); } if (result > 0) { up_read(&card->controls_rwsem); snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &control->id); return 0; } } up_read(&card->controls_rwsem); return result; } static int snd_ctl_elem_write_user(struct snd_ctl_file *file, struct snd_ctl_elem_value __user *_control) { struct snd_ctl_elem_value *control; struct snd_card *card; int result; control = memdup_user(_control, sizeof(*control)); if (IS_ERR(control)) return PTR_ERR(control); card = file->card; snd_power_lock(card); result = snd_power_wait(card, SNDRV_CTL_POWER_D0); if (result >= 0) result = snd_ctl_elem_write(card, file, control); snd_power_unlock(card); if (result >= 0) if (copy_to_user(_control, control, sizeof(*control))) result = -EFAULT; kfree(control); return result; } static int snd_ctl_elem_lock(struct snd_ctl_file *file, struct snd_ctl_elem_id __user *_id) { struct snd_card *card = file->card; struct snd_ctl_elem_id id; struct snd_kcontrol *kctl; struct snd_kcontrol_volatile *vd; int result; if (copy_from_user(&id, _id, sizeof(id))) return -EFAULT; down_write(&card->controls_rwsem); kctl = snd_ctl_find_id(card, &id); if (kctl == NULL) { result = -ENOENT; } else { vd = &kctl->vd[snd_ctl_get_ioff(kctl, &id)]; if (vd->owner != NULL) result = -EBUSY; else { vd->owner = file; result = 0; } } up_write(&card->controls_rwsem); return result; } static int snd_ctl_elem_unlock(struct snd_ctl_file *file, struct snd_ctl_elem_id __user *_id) { struct snd_card *card = file->card; struct snd_ctl_elem_id id; struct snd_kcontrol *kctl; struct snd_kcontrol_volatile *vd; int result; if (copy_from_user(&id, _id, sizeof(id))) return -EFAULT; down_write(&card->controls_rwsem); kctl = snd_ctl_find_id(card, &id); if (kctl == NULL) { result = -ENOENT; } else { vd = &kctl->vd[snd_ctl_get_ioff(kctl, &id)]; if (vd->owner == NULL) result = -EINVAL; else if (vd->owner != file) result = -EPERM; else { vd->owner = NULL; result = 0; } } up_write(&card->controls_rwsem); return result; } struct user_element { struct snd_ctl_elem_info info; struct snd_card *card; void *elem_data; /* element data */ unsigned long elem_data_size; /* size of element data in bytes */ void *tlv_data; /* TLV data */ unsigned long tlv_data_size; /* TLV data size */ void *priv_data; /* private data (like strings for enumerated type) */ }; static int snd_ctl_elem_user_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct user_element *ue = kcontrol->private_data; *uinfo = ue->info; return 0; } static int snd_ctl_elem_user_enum_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct user_element *ue = kcontrol->private_data; const char *names; unsigned int item; item = uinfo->value.enumerated.item; *uinfo = ue->info; item = min(item, uinfo->value.enumerated.items - 1); uinfo->value.enumerated.item = item; names = ue->priv_data; for (; item > 0; --item) names += strlen(names) + 1; strcpy(uinfo->value.enumerated.name, names); return 0; } static int snd_ctl_elem_user_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct user_element *ue = kcontrol->private_data; mutex_lock(&ue->card->user_ctl_lock); memcpy(&ucontrol->value, ue->elem_data, ue->elem_data_size); mutex_unlock(&ue->card->user_ctl_lock); return 0; } static int snd_ctl_elem_user_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int change; struct user_element *ue = kcontrol->private_data; mutex_lock(&ue->card->user_ctl_lock); change = memcmp(&ucontrol->value, ue->elem_data, ue->elem_data_size) != 0; if (change) memcpy(ue->elem_data, &ucontrol->value, ue->elem_data_size); mutex_unlock(&ue->card->user_ctl_lock); return change; } static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol, int op_flag, unsigned int size, unsigned int __user *tlv) { struct user_element *ue = kcontrol->private_data; int change = 0; void *new_data; if (op_flag > 0) { if (size > 1024 * 128) /* sane value */ return -EINVAL; new_data = memdup_user(tlv, size); if (IS_ERR(new_data)) return PTR_ERR(new_data); mutex_lock(&ue->card->user_ctl_lock); change = ue->tlv_data_size != size; if (!change) change = memcmp(ue->tlv_data, new_data, size); kfree(ue->tlv_data); ue->tlv_data = new_data; ue->tlv_data_size = size; mutex_unlock(&ue->card->user_ctl_lock); } else { int ret = 0; mutex_lock(&ue->card->user_ctl_lock); if (!ue->tlv_data_size || !ue->tlv_data) { ret = -ENXIO; goto err_unlock; } if (size < ue->tlv_data_size) { ret = -ENOSPC; goto err_unlock; } if (copy_to_user(tlv, ue->tlv_data, ue->tlv_data_size)) ret = -EFAULT; err_unlock: mutex_unlock(&ue->card->user_ctl_lock); if (ret) return ret; } return change; } static int snd_ctl_elem_init_enum_names(struct user_element *ue) { char *names, *p; size_t buf_len, name_len; unsigned int i; const uintptr_t user_ptrval = ue->info.value.enumerated.names_ptr; if (ue->info.value.enumerated.names_length > 64 * 1024) return -EINVAL; names = memdup_user((const void __user *)user_ptrval, ue->info.value.enumerated.names_length); if (IS_ERR(names)) return PTR_ERR(names); /* check that there are enough valid names */ buf_len = ue->info.value.enumerated.names_length; p = names; for (i = 0; i < ue->info.value.enumerated.items; ++i) { name_len = strnlen(p, buf_len); if (name_len == 0 || name_len >= 64 || name_len == buf_len) { kfree(names); return -EINVAL; } p += name_len + 1; buf_len -= name_len + 1; } ue->priv_data = names; ue->info.value.enumerated.names_ptr = 0; return 0; } static void snd_ctl_elem_user_free(struct snd_kcontrol *kcontrol) { struct user_element *ue = kcontrol->private_data; kfree(ue->tlv_data); kfree(ue->priv_data); kfree(ue); } static int snd_ctl_elem_add(struct snd_ctl_file *file, struct snd_ctl_elem_info *info, int replace) { struct snd_card *card = file->card; struct snd_kcontrol kctl, *_kctl; unsigned int access; long private_size; struct user_element *ue; int idx, err; if (info->count < 1) return -EINVAL; access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE : (info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE| SNDRV_CTL_ELEM_ACCESS_INACTIVE| SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE)); info->id.numid = 0; memset(&kctl, 0, sizeof(kctl)); if (replace) { err = snd_ctl_remove_user_ctl(file, &info->id); if (err) return err; } if (card->user_ctl_count >= MAX_USER_CONTROLS) return -ENOMEM; memcpy(&kctl.id, &info->id, sizeof(info->id)); kctl.count = info->owner ? info->owner : 1; access |= SNDRV_CTL_ELEM_ACCESS_USER; if (info->type == SNDRV_CTL_ELEM_TYPE_ENUMERATED) kctl.info = snd_ctl_elem_user_enum_info; else kctl.info = snd_ctl_elem_user_info; if (access & SNDRV_CTL_ELEM_ACCESS_READ) kctl.get = snd_ctl_elem_user_get; if (access & SNDRV_CTL_ELEM_ACCESS_WRITE) kctl.put = snd_ctl_elem_user_put; if (access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE) { kctl.tlv.c = snd_ctl_elem_user_tlv; access |= SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK; } switch (info->type) { case SNDRV_CTL_ELEM_TYPE_BOOLEAN: case SNDRV_CTL_ELEM_TYPE_INTEGER: private_size = sizeof(long); if (info->count > 128) return -EINVAL; break; case SNDRV_CTL_ELEM_TYPE_INTEGER64: private_size = sizeof(long long); if (info->count > 64) return -EINVAL; break; case SNDRV_CTL_ELEM_TYPE_ENUMERATED: private_size = sizeof(unsigned int); if (info->count > 128 || info->value.enumerated.items == 0) return -EINVAL; break; case SNDRV_CTL_ELEM_TYPE_BYTES: private_size = sizeof(unsigned char); if (info->count > 512) return -EINVAL; break; case SNDRV_CTL_ELEM_TYPE_IEC958: private_size = sizeof(struct snd_aes_iec958); if (info->count != 1) return -EINVAL; break; default: return -EINVAL; } private_size *= info->count; ue = kzalloc(sizeof(struct user_element) + private_size, GFP_KERNEL); if (ue == NULL) return -ENOMEM; ue->card = card; ue->info = *info; ue->info.access = 0; ue->elem_data = (char *)ue + sizeof(*ue); ue->elem_data_size = private_size; if (ue->info.type == SNDRV_CTL_ELEM_TYPE_ENUMERATED) { err = snd_ctl_elem_init_enum_names(ue); if (err < 0) { kfree(ue); return err; } } kctl.private_free = snd_ctl_elem_user_free; _kctl = snd_ctl_new(&kctl, access); if (_kctl == NULL) { kfree(ue->priv_data); kfree(ue); return -ENOMEM; } _kctl->private_data = ue; for (idx = 0; idx < _kctl->count; idx++) _kctl->vd[idx].owner = file; err = snd_ctl_add(card, _kctl); if (err < 0) return err; down_write(&card->controls_rwsem); card->user_ctl_count++; up_write(&card->controls_rwsem); return 0; } static int snd_ctl_elem_add_user(struct snd_ctl_file *file, struct snd_ctl_elem_info __user *_info, int replace) { struct snd_ctl_elem_info info; if (copy_from_user(&info, _info, sizeof(info))) return -EFAULT; return snd_ctl_elem_add(file, &info, replace); } static int snd_ctl_elem_remove(struct snd_ctl_file *file, struct snd_ctl_elem_id __user *_id) { struct snd_ctl_elem_id id; if (copy_from_user(&id, _id, sizeof(id))) return -EFAULT; return snd_ctl_remove_user_ctl(file, &id); } static int snd_ctl_subscribe_events(struct snd_ctl_file *file, int __user *ptr) { int subscribe; if (get_user(subscribe, ptr)) return -EFAULT; if (subscribe < 0) { subscribe = file->subscribed; if (put_user(subscribe, ptr)) return -EFAULT; return 0; } if (subscribe) { file->subscribed = 1; return 0; } else if (file->subscribed) { snd_ctl_empty_read_queue(file); file->subscribed = 0; } return 0; } static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file, struct snd_ctl_tlv __user *_tlv, int op_flag) { struct snd_card *card = file->card; struct snd_ctl_tlv tlv; struct snd_kcontrol *kctl; struct snd_kcontrol_volatile *vd; unsigned int len; int err = 0; if (copy_from_user(&tlv, _tlv, sizeof(tlv))) return -EFAULT; if (tlv.length < sizeof(unsigned int) * 2) return -EINVAL; down_read(&card->controls_rwsem); kctl = snd_ctl_find_numid(card, tlv.numid); if (kctl == NULL) { err = -ENOENT; goto __kctl_end; } if (kctl->tlv.p == NULL) { err = -ENXIO; goto __kctl_end; } vd = &kctl->vd[tlv.numid - kctl->id.numid]; if ((op_flag == 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_READ) == 0) || (op_flag > 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_WRITE) == 0) || (op_flag < 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_COMMAND) == 0)) { err = -ENXIO; goto __kctl_end; } if (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { if (vd->owner != NULL && vd->owner != file) { err = -EPERM; goto __kctl_end; } err = kctl->tlv.c(kctl, op_flag, tlv.length, _tlv->tlv); if (err > 0) { up_read(&card->controls_rwsem); snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &kctl->id); return 0; } } else { if (op_flag) { err = -ENXIO; goto __kctl_end; } len = kctl->tlv.p[1] + 2 * sizeof(unsigned int); if (tlv.length < len) { err = -ENOMEM; goto __kctl_end; } if (copy_to_user(_tlv->tlv, kctl->tlv.p, len)) err = -EFAULT; } __kctl_end: up_read(&card->controls_rwsem); return err; } static long snd_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct snd_ctl_file *ctl; struct snd_card *card; struct snd_kctl_ioctl *p; void __user *argp = (void __user *)arg; int __user *ip = argp; int err; ctl = file->private_data; card = ctl->card; if (snd_BUG_ON(!card)) return -ENXIO; switch (cmd) { case SNDRV_CTL_IOCTL_PVERSION: return put_user(SNDRV_CTL_VERSION, ip) ? -EFAULT : 0; case SNDRV_CTL_IOCTL_CARD_INFO: return snd_ctl_card_info(card, ctl, cmd, argp); case SNDRV_CTL_IOCTL_ELEM_LIST: return snd_ctl_elem_list(card, argp); case SNDRV_CTL_IOCTL_ELEM_INFO: return snd_ctl_elem_info_user(ctl, argp); case SNDRV_CTL_IOCTL_ELEM_READ: return snd_ctl_elem_read_user(card, argp); case SNDRV_CTL_IOCTL_ELEM_WRITE: return snd_ctl_elem_write_user(ctl, argp); case SNDRV_CTL_IOCTL_ELEM_LOCK: return snd_ctl_elem_lock(ctl, argp); case SNDRV_CTL_IOCTL_ELEM_UNLOCK: return snd_ctl_elem_unlock(ctl, argp); case SNDRV_CTL_IOCTL_ELEM_ADD: return snd_ctl_elem_add_user(ctl, argp, 0); case SNDRV_CTL_IOCTL_ELEM_REPLACE: return snd_ctl_elem_add_user(ctl, argp, 1); case SNDRV_CTL_IOCTL_ELEM_REMOVE: return snd_ctl_elem_remove(ctl, argp); case SNDRV_CTL_IOCTL_SUBSCRIBE_EVENTS: return snd_ctl_subscribe_events(ctl, ip); case SNDRV_CTL_IOCTL_TLV_READ: return snd_ctl_tlv_ioctl(ctl, argp, 0); case SNDRV_CTL_IOCTL_TLV_WRITE: return snd_ctl_tlv_ioctl(ctl, argp, 1); case SNDRV_CTL_IOCTL_TLV_COMMAND: return snd_ctl_tlv_ioctl(ctl, argp, -1); case SNDRV_CTL_IOCTL_POWER: return -ENOPROTOOPT; case SNDRV_CTL_IOCTL_POWER_STATE: #ifdef CONFIG_PM return put_user(card->power_state, ip) ? -EFAULT : 0; #else return put_user(SNDRV_CTL_POWER_D0, ip) ? -EFAULT : 0; #endif } down_read(&snd_ioctl_rwsem); list_for_each_entry(p, &snd_control_ioctls, list) { err = p->fioctl(card, ctl, cmd, arg); if (err != -ENOIOCTLCMD) { up_read(&snd_ioctl_rwsem); return err; } } up_read(&snd_ioctl_rwsem); dev_dbg(card->dev, "unknown ioctl = 0x%x\n", cmd); return -ENOTTY; } static ssize_t snd_ctl_read(struct file *file, char __user *buffer, size_t count, loff_t * offset) { struct snd_ctl_file *ctl; int err = 0; ssize_t result = 0; ctl = file->private_data; if (snd_BUG_ON(!ctl || !ctl->card)) return -ENXIO; if (!ctl->subscribed) return -EBADFD; if (count < sizeof(struct snd_ctl_event)) return -EINVAL; spin_lock_irq(&ctl->read_lock); while (count >= sizeof(struct snd_ctl_event)) { struct snd_ctl_event ev; struct snd_kctl_event *kev; while (list_empty(&ctl->events)) { wait_queue_t wait; if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) { err = -EAGAIN; goto __end_lock; } init_waitqueue_entry(&wait, current); add_wait_queue(&ctl->change_sleep, &wait); set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irq(&ctl->read_lock); schedule(); remove_wait_queue(&ctl->change_sleep, &wait); if (ctl->card->shutdown) return -ENODEV; if (signal_pending(current)) return -ERESTARTSYS; spin_lock_irq(&ctl->read_lock); } kev = snd_kctl_event(ctl->events.next); ev.type = SNDRV_CTL_EVENT_ELEM; ev.data.elem.mask = kev->mask; ev.data.elem.id = kev->id; list_del(&kev->list); spin_unlock_irq(&ctl->read_lock); kfree(kev); if (copy_to_user(buffer, &ev, sizeof(struct snd_ctl_event))) { err = -EFAULT; goto __end; } spin_lock_irq(&ctl->read_lock); buffer += sizeof(struct snd_ctl_event); count -= sizeof(struct snd_ctl_event); result += sizeof(struct snd_ctl_event); } __end_lock: spin_unlock_irq(&ctl->read_lock); __end: return result > 0 ? result : err; } static unsigned int snd_ctl_poll(struct file *file, poll_table * wait) { unsigned int mask; struct snd_ctl_file *ctl; ctl = file->private_data; if (!ctl->subscribed) return 0; poll_wait(file, &ctl->change_sleep, wait); mask = 0; if (!list_empty(&ctl->events)) mask |= POLLIN | POLLRDNORM; return mask; } /* * register the device-specific control-ioctls. * called from each device manager like pcm.c, hwdep.c, etc. */ static int _snd_ctl_register_ioctl(snd_kctl_ioctl_func_t fcn, struct list_head *lists) { struct snd_kctl_ioctl *pn; pn = kzalloc(sizeof(struct snd_kctl_ioctl), GFP_KERNEL); if (pn == NULL) return -ENOMEM; pn->fioctl = fcn; down_write(&snd_ioctl_rwsem); list_add_tail(&pn->list, lists); up_write(&snd_ioctl_rwsem); return 0; } int snd_ctl_register_ioctl(snd_kctl_ioctl_func_t fcn) { return _snd_ctl_register_ioctl(fcn, &snd_control_ioctls); } EXPORT_SYMBOL(snd_ctl_register_ioctl); #ifdef CONFIG_COMPAT int snd_ctl_register_ioctl_compat(snd_kctl_ioctl_func_t fcn) { return _snd_ctl_register_ioctl(fcn, &snd_control_compat_ioctls); } EXPORT_SYMBOL(snd_ctl_register_ioctl_compat); #endif /* * de-register the device-specific control-ioctls. */ static int _snd_ctl_unregister_ioctl(snd_kctl_ioctl_func_t fcn, struct list_head *lists) { struct snd_kctl_ioctl *p; if (snd_BUG_ON(!fcn)) return -EINVAL; down_write(&snd_ioctl_rwsem); list_for_each_entry(p, lists, list) { if (p->fioctl == fcn) { list_del(&p->list); up_write(&snd_ioctl_rwsem); kfree(p); return 0; } } up_write(&snd_ioctl_rwsem); snd_BUG(); return -EINVAL; } int snd_ctl_unregister_ioctl(snd_kctl_ioctl_func_t fcn) { return _snd_ctl_unregister_ioctl(fcn, &snd_control_ioctls); } EXPORT_SYMBOL(snd_ctl_unregister_ioctl); #ifdef CONFIG_COMPAT int snd_ctl_unregister_ioctl_compat(snd_kctl_ioctl_func_t fcn) { return _snd_ctl_unregister_ioctl(fcn, &snd_control_compat_ioctls); } EXPORT_SYMBOL(snd_ctl_unregister_ioctl_compat); #endif static int snd_ctl_fasync(int fd, struct file * file, int on) { struct snd_ctl_file *ctl; ctl = file->private_data; return fasync_helper(fd, file, on, &ctl->fasync); } /* * ioctl32 compat */ #ifdef CONFIG_COMPAT #include "control_compat.c" #else #define snd_ctl_ioctl_compat NULL #endif /* * INIT PART */ static const struct file_operations snd_ctl_f_ops = { .owner = THIS_MODULE, .read = snd_ctl_read, .open = snd_ctl_open, .release = snd_ctl_release, .llseek = no_llseek, .poll = snd_ctl_poll, .unlocked_ioctl = snd_ctl_ioctl, .compat_ioctl = snd_ctl_ioctl_compat, .fasync = snd_ctl_fasync, }; /* * registration of the control device */ static int snd_ctl_dev_register(struct snd_device *device) { struct snd_card *card = device->device_data; int err, cardnum; char name[16]; if (snd_BUG_ON(!card)) return -ENXIO; cardnum = card->number; if (snd_BUG_ON(cardnum < 0 || cardnum >= SNDRV_CARDS)) return -ENXIO; sprintf(name, "controlC%i", cardnum); if ((err = snd_register_device(SNDRV_DEVICE_TYPE_CONTROL, card, -1, &snd_ctl_f_ops, card, name)) < 0) return err; return 0; } /* * disconnection of the control device */ static int snd_ctl_dev_disconnect(struct snd_device *device) { struct snd_card *card = device->device_data; struct snd_ctl_file *ctl; int err, cardnum; if (snd_BUG_ON(!card)) return -ENXIO; cardnum = card->number; if (snd_BUG_ON(cardnum < 0 || cardnum >= SNDRV_CARDS)) return -ENXIO; read_lock(&card->ctl_files_rwlock); list_for_each_entry(ctl, &card->ctl_files, list) { wake_up(&ctl->change_sleep); kill_fasync(&ctl->fasync, SIGIO, POLL_ERR); } read_unlock(&card->ctl_files_rwlock); if ((err = snd_unregister_device(SNDRV_DEVICE_TYPE_CONTROL, card, -1)) < 0) return err; return 0; } /* * free all controls */ static int snd_ctl_dev_free(struct snd_device *device) { struct snd_card *card = device->device_data; struct snd_kcontrol *control; down_write(&card->controls_rwsem); while (!list_empty(&card->controls)) { control = snd_kcontrol(card->controls.next); snd_ctl_remove(card, control); } up_write(&card->controls_rwsem); return 0; } /* * create control core: * called from init.c */ int snd_ctl_create(struct snd_card *card) { static struct snd_device_ops ops = { .dev_free = snd_ctl_dev_free, .dev_register = snd_ctl_dev_register, .dev_disconnect = snd_ctl_dev_disconnect, }; if (snd_BUG_ON(!card)) return -ENXIO; return snd_device_new(card, SNDRV_DEV_CONTROL, card, &ops); } /* * Frequently used control callbacks/helpers */ int snd_ctl_boolean_mono_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 1; return 0; } EXPORT_SYMBOL(snd_ctl_boolean_mono_info); int snd_ctl_boolean_stereo_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 1; return 0; } EXPORT_SYMBOL(snd_ctl_boolean_stereo_info); /** * snd_ctl_enum_info - fills the info structure for an enumerated control * @info: the structure to be filled * @channels: the number of the control's channels; often one * @items: the number of control values; also the size of @names * @names: an array containing the names of all control values * * Sets all required fields in @info to their appropriate values. * If the control's accessibility is not the default (readable and writable), * the caller has to fill @info->access. * * Return: Zero. */ int snd_ctl_enum_info(struct snd_ctl_elem_info *info, unsigned int channels, unsigned int items, const char *const names[]) { info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; info->count = channels; info->value.enumerated.items = items; if (info->value.enumerated.item >= items) info->value.enumerated.item = items - 1; strlcpy(info->value.enumerated.name, names[info->value.enumerated.item], sizeof(info->value.enumerated.name)); return 0; } EXPORT_SYMBOL(snd_ctl_enum_info);
static int snd_ctl_elem_add(struct snd_ctl_file *file, struct snd_ctl_elem_info *info, int replace) { struct snd_card *card = file->card; struct snd_kcontrol kctl, *_kctl; unsigned int access; long private_size; struct user_element *ue; int idx, err; if (!replace && card->user_ctl_count >= MAX_USER_CONTROLS) return -ENOMEM; if (info->count < 1) return -EINVAL; access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE : (info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE| SNDRV_CTL_ELEM_ACCESS_INACTIVE| SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE)); info->id.numid = 0; memset(&kctl, 0, sizeof(kctl)); down_write(&card->controls_rwsem); _kctl = snd_ctl_find_id(card, &info->id); err = 0; if (_kctl) { if (replace) err = snd_ctl_remove(card, _kctl); else err = -EBUSY; } else { if (replace) err = -ENOENT; } up_write(&card->controls_rwsem); if (err < 0) return err; memcpy(&kctl.id, &info->id, sizeof(info->id)); kctl.count = info->owner ? info->owner : 1; access |= SNDRV_CTL_ELEM_ACCESS_USER; if (info->type == SNDRV_CTL_ELEM_TYPE_ENUMERATED) kctl.info = snd_ctl_elem_user_enum_info; else kctl.info = snd_ctl_elem_user_info; if (access & SNDRV_CTL_ELEM_ACCESS_READ) kctl.get = snd_ctl_elem_user_get; if (access & SNDRV_CTL_ELEM_ACCESS_WRITE) kctl.put = snd_ctl_elem_user_put; if (access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE) { kctl.tlv.c = snd_ctl_elem_user_tlv; access |= SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK; } switch (info->type) { case SNDRV_CTL_ELEM_TYPE_BOOLEAN: case SNDRV_CTL_ELEM_TYPE_INTEGER: private_size = sizeof(long); if (info->count > 128) return -EINVAL; break; case SNDRV_CTL_ELEM_TYPE_INTEGER64: private_size = sizeof(long long); if (info->count > 64) return -EINVAL; break; case SNDRV_CTL_ELEM_TYPE_ENUMERATED: private_size = sizeof(unsigned int); if (info->count > 128 || info->value.enumerated.items == 0) return -EINVAL; break; case SNDRV_CTL_ELEM_TYPE_BYTES: private_size = sizeof(unsigned char); if (info->count > 512) return -EINVAL; break; case SNDRV_CTL_ELEM_TYPE_IEC958: private_size = sizeof(struct snd_aes_iec958); if (info->count != 1) return -EINVAL; break; default: return -EINVAL; } private_size *= info->count; ue = kzalloc(sizeof(struct user_element) + private_size, GFP_KERNEL); if (ue == NULL) return -ENOMEM; ue->card = card; ue->info = *info; ue->info.access = 0; ue->elem_data = (char *)ue + sizeof(*ue); ue->elem_data_size = private_size; if (ue->info.type == SNDRV_CTL_ELEM_TYPE_ENUMERATED) { err = snd_ctl_elem_init_enum_names(ue); if (err < 0) { kfree(ue); return err; } } kctl.private_free = snd_ctl_elem_user_free; _kctl = snd_ctl_new(&kctl, access); if (_kctl == NULL) { kfree(ue->priv_data); kfree(ue); return -ENOMEM; } _kctl->private_data = ue; for (idx = 0; idx < _kctl->count; idx++) _kctl->vd[idx].owner = file; err = snd_ctl_add(card, _kctl); if (err < 0) return err; down_write(&card->controls_rwsem); card->user_ctl_count++; up_write(&card->controls_rwsem); return 0; }
static int snd_ctl_elem_add(struct snd_ctl_file *file, struct snd_ctl_elem_info *info, int replace) { struct snd_card *card = file->card; struct snd_kcontrol kctl, *_kctl; unsigned int access; long private_size; struct user_element *ue; int idx, err; if (info->count < 1) return -EINVAL; access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE : (info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE| SNDRV_CTL_ELEM_ACCESS_INACTIVE| SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE)); info->id.numid = 0; memset(&kctl, 0, sizeof(kctl)); if (replace) { err = snd_ctl_remove_user_ctl(file, &info->id); if (err) return err; } if (card->user_ctl_count >= MAX_USER_CONTROLS) return -ENOMEM; memcpy(&kctl.id, &info->id, sizeof(info->id)); kctl.count = info->owner ? info->owner : 1; access |= SNDRV_CTL_ELEM_ACCESS_USER; if (info->type == SNDRV_CTL_ELEM_TYPE_ENUMERATED) kctl.info = snd_ctl_elem_user_enum_info; else kctl.info = snd_ctl_elem_user_info; if (access & SNDRV_CTL_ELEM_ACCESS_READ) kctl.get = snd_ctl_elem_user_get; if (access & SNDRV_CTL_ELEM_ACCESS_WRITE) kctl.put = snd_ctl_elem_user_put; if (access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE) { kctl.tlv.c = snd_ctl_elem_user_tlv; access |= SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK; } switch (info->type) { case SNDRV_CTL_ELEM_TYPE_BOOLEAN: case SNDRV_CTL_ELEM_TYPE_INTEGER: private_size = sizeof(long); if (info->count > 128) return -EINVAL; break; case SNDRV_CTL_ELEM_TYPE_INTEGER64: private_size = sizeof(long long); if (info->count > 64) return -EINVAL; break; case SNDRV_CTL_ELEM_TYPE_ENUMERATED: private_size = sizeof(unsigned int); if (info->count > 128 || info->value.enumerated.items == 0) return -EINVAL; break; case SNDRV_CTL_ELEM_TYPE_BYTES: private_size = sizeof(unsigned char); if (info->count > 512) return -EINVAL; break; case SNDRV_CTL_ELEM_TYPE_IEC958: private_size = sizeof(struct snd_aes_iec958); if (info->count != 1) return -EINVAL; break; default: return -EINVAL; } private_size *= info->count; ue = kzalloc(sizeof(struct user_element) + private_size, GFP_KERNEL); if (ue == NULL) return -ENOMEM; ue->card = card; ue->info = *info; ue->info.access = 0; ue->elem_data = (char *)ue + sizeof(*ue); ue->elem_data_size = private_size; if (ue->info.type == SNDRV_CTL_ELEM_TYPE_ENUMERATED) { err = snd_ctl_elem_init_enum_names(ue); if (err < 0) { kfree(ue); return err; } } kctl.private_free = snd_ctl_elem_user_free; _kctl = snd_ctl_new(&kctl, access); if (_kctl == NULL) { kfree(ue->priv_data); kfree(ue); return -ENOMEM; } _kctl->private_data = ue; for (idx = 0; idx < _kctl->count; idx++) _kctl->vd[idx].owner = file; err = snd_ctl_add(card, _kctl); if (err < 0) return err; down_write(&card->controls_rwsem); card->user_ctl_count++; up_write(&card->controls_rwsem); return 0; }
{'added': [(1165, ''), (1166, '\tif (replace) {'), (1167, '\t\terr = snd_ctl_remove_user_ctl(file, &info->id);'), (1168, '\t\tif (err)'), (1169, '\t\t\treturn err;'), (1171, ''), (1172, '\tif (card->user_ctl_count >= MAX_USER_CONTROLS)'), (1173, '\t\treturn -ENOMEM;'), (1174, '')], 'deleted': [(1157, '\tif (!replace && card->user_ctl_count >= MAX_USER_CONTROLS)'), (1158, '\t\treturn -ENOMEM;'), (1167, '\tdown_write(&card->controls_rwsem);'), (1168, '\t_kctl = snd_ctl_find_id(card, &info->id);'), (1169, '\terr = 0;'), (1170, '\tif (_kctl) {'), (1171, '\t\tif (replace)'), (1172, '\t\t\terr = snd_ctl_remove(card, _kctl);'), (1173, '\t\telse'), (1174, '\t\t\terr = -EBUSY;'), (1175, '\t} else {'), (1176, '\t\tif (replace)'), (1177, '\t\t\terr = -ENOENT;'), (1179, '\tup_write(&card->controls_rwsem);'), (1180, '\tif (err < 0)'), (1181, '\t\treturn err;')]}
9
16
1,367
8,532
113
678
32
https://github.com/torvalds/linux
CVE-2014-4654
CWE-416
1,956
enc28j60_driver.c
C
enc28j60SelectBank
/** * @file enc28j60_driver.c * @brief ENC28J60 Ethernet controller * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.0 **/ //Switch to the appropriate trace level #define TRACE_LEVEL NIC_TRACE_LEVEL //Dependencies #include <limits.h> #include "core/net.h" #include "drivers/eth/enc28j60_driver.h" #include "debug.h" /** * @brief ENC28J60 driver **/ const NicDriver enc28j60Driver = { NIC_TYPE_ETHERNET, ETH_MTU, enc28j60Init, enc28j60Tick, enc28j60EnableIrq, enc28j60DisableIrq, enc28j60EventHandler, enc28j60SendPacket, enc28j60UpdateMacAddrFilter, NULL, NULL, NULL, TRUE, TRUE, TRUE, FALSE }; /** * @brief ENC28J60 controller initialization * @param[in] interface Underlying network interface * @return Error code **/ error_t enc28j60Init(NetInterface *interface) { uint8_t revisionId; Enc28j60Context *context; //Debug message TRACE_INFO("Initializing ENC28J60 Ethernet controller...\r\n"); //Initialize SPI interface->spiDriver->init(); //Initialize external interrupt line interface->extIntDriver->init(); //Issue a system reset enc28j60SoftReset(interface); //After issuing the reset command, wait at least 1ms in firmware //for the device to be ready sleep(10); //Point to the driver context context = (Enc28j60Context *) interface->nicContext; //Initialize driver specific variables context->currentBank = UINT16_MAX; context->nextPacket = ENC28J60_RX_BUFFER_START; //Allocate RX buffer context->rxBuffer = memPoolAlloc(ETH_MAX_FRAME_SIZE); //Failed to allocate memory? if(context->rxBuffer == NULL) { return ERROR_OUT_OF_MEMORY; } //Read silicon revision ID revisionId = enc28j60ReadReg(interface, ENC28J60_REG_EREVID); //Debug message TRACE_INFO("ENC28J60 revision ID: 0x%02X\r\n", revisionId); //Disable CLKOUT output enc28j60WriteReg(interface, ENC28J60_REG_ECOCON, 0x00); //Set the MAC address of the station enc28j60WriteReg(interface, ENC28J60_REG_MAADR1, interface->macAddr.b[0]); enc28j60WriteReg(interface, ENC28J60_REG_MAADR2, interface->macAddr.b[1]); enc28j60WriteReg(interface, ENC28J60_REG_MAADR3, interface->macAddr.b[2]); enc28j60WriteReg(interface, ENC28J60_REG_MAADR4, interface->macAddr.b[3]); enc28j60WriteReg(interface, ENC28J60_REG_MAADR5, interface->macAddr.b[4]); enc28j60WriteReg(interface, ENC28J60_REG_MAADR6, interface->macAddr.b[5]); //Set receive buffer location enc28j60WriteReg(interface, ENC28J60_REG_ERXSTL, LSB(ENC28J60_RX_BUFFER_START)); enc28j60WriteReg(interface, ENC28J60_REG_ERXSTH, MSB(ENC28J60_RX_BUFFER_START)); enc28j60WriteReg(interface, ENC28J60_REG_ERXNDL, LSB(ENC28J60_RX_BUFFER_STOP)); enc28j60WriteReg(interface, ENC28J60_REG_ERXNDH, MSB(ENC28J60_RX_BUFFER_STOP)); //The ERXRDPT register defines a location within the FIFO //where the receive hardware is forbidden to write to enc28j60WriteReg(interface, ENC28J60_REG_ERXRDPTL, LSB(ENC28J60_RX_BUFFER_STOP)); enc28j60WriteReg(interface, ENC28J60_REG_ERXRDPTH, MSB(ENC28J60_RX_BUFFER_STOP)); //Configure the receive filters enc28j60WriteReg(interface, ENC28J60_REG_ERXFCON, ERXFCON_UCEN | ERXFCON_CRCEN | ERXFCON_HTEN | ERXFCON_BCEN); //Initialize the hash table enc28j60WriteReg(interface, ENC28J60_REG_EHT0, 0x00); enc28j60WriteReg(interface, ENC28J60_REG_EHT1, 0x00); enc28j60WriteReg(interface, ENC28J60_REG_EHT2, 0x00); enc28j60WriteReg(interface, ENC28J60_REG_EHT3, 0x00); enc28j60WriteReg(interface, ENC28J60_REG_EHT4, 0x00); enc28j60WriteReg(interface, ENC28J60_REG_EHT5, 0x00); enc28j60WriteReg(interface, ENC28J60_REG_EHT6, 0x00); enc28j60WriteReg(interface, ENC28J60_REG_EHT7, 0x00); //Pull the MAC out of reset enc28j60WriteReg(interface, ENC28J60_REG_MACON2, 0x00); //Enable the MAC to receive frames enc28j60WriteReg(interface, ENC28J60_REG_MACON1, MACON1_TXPAUS | MACON1_RXPAUS | MACON1_MARXEN); //Enable automatic padding to at least 60 bytes, always append a valid CRC //and check frame length. MAC can operate in half-duplex or full-duplex mode #if (ENC28J60_FULL_DUPLEX_SUPPORT == ENABLED) enc28j60WriteReg(interface, ENC28J60_REG_MACON3, MACON3_PADCFG(1) | MACON3_TXCRCEN | MACON3_FRMLNEN | MACON3_FULDPX); #else enc28j60WriteReg(interface, ENC28J60_REG_MACON3, MACON3_PADCFG(1) | MACON3_TXCRCEN | MACON3_FRMLNEN); #endif //When the medium is occupied, the MAC will wait indefinitely for it to //become free when attempting to transmit enc28j60WriteReg(interface, ENC28J60_REG_MACON4, MACON4_DEFER); //Maximum frame length that can be received or transmitted enc28j60WriteReg(interface, ENC28J60_REG_MAMXFLL, LSB(ETH_MAX_FRAME_SIZE)); enc28j60WriteReg(interface, ENC28J60_REG_MAMXFLH, MSB(ETH_MAX_FRAME_SIZE)); //Configure the back-to-back inter-packet gap register #if (ENC28J60_FULL_DUPLEX_SUPPORT == ENABLED) enc28j60WriteReg(interface, ENC28J60_REG_MABBIPG, 0x15); #else enc28j60WriteReg(interface, ENC28J60_REG_MABBIPG, 0x12); #endif //Configure the non-back-to-back inter-packet gap register enc28j60WriteReg(interface, ENC28J60_REG_MAIPGL, 0x12); enc28j60WriteReg(interface, ENC28J60_REG_MAIPGH, 0x0C); //Collision window register enc28j60WriteReg(interface, ENC28J60_REG_MACLCON2, 63); //Set the PHY to the proper duplex mode #if (ENC28J60_FULL_DUPLEX_SUPPORT == ENABLED) enc28j60WritePhyReg(interface, ENC28J60_PHY_REG_PHCON1, PHCON1_PDPXMD); #else enc28j60WritePhyReg(interface, ENC28J60_PHY_REG_PHCON1, 0x0000); #endif //Disable half-duplex loopback in PHY enc28j60WritePhyReg(interface, ENC28J60_PHY_REG_PHCON2, PHCON2_HDLDIS); //LEDA displays link status and LEDB displays TX/RX activity enc28j60WritePhyReg(interface, ENC28J60_PHY_REG_PHLCON, PHLCON_LACFG(4) | PHLCON_LBCFG(7) | PHLCON_LFRQ(0) | PHLCON_STRCH); //Clear interrupt flags enc28j60WriteReg(interface, ENC28J60_REG_EIR, 0x00); //Configure interrupts as desired enc28j60WriteReg(interface, ENC28J60_REG_EIE, EIE_INTIE | EIE_PKTIE | EIE_LINKIE | EIE_TXIE | EIE_TXERIE); //Configure PHY interrupts as desired enc28j60WritePhyReg(interface, ENC28J60_PHY_REG_PHIE, PHIE_PLNKIE | PHIE_PGEIE); //Set RXEN to enable reception enc28j60SetBit(interface, ENC28J60_REG_ECON1, ECON1_RXEN); //Dump registers for debugging purpose enc28j60DumpReg(interface); enc28j60DumpPhyReg(interface); //Accept any packets from the upper layer osSetEvent(&interface->nicTxEvent); //Force the TCP/IP stack to poll the link state at startup interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event osSetEvent(&netEvent); //Successful initialization return NO_ERROR; } /** * @brief ENC28J60 timer handler * @param[in] interface Underlying network interface **/ void enc28j60Tick(NetInterface *interface) { } /** * @brief Enable interrupts * @param[in] interface Underlying network interface **/ void enc28j60EnableIrq(NetInterface *interface) { //Enable interrupts interface->extIntDriver->enableIrq(); } /** * @brief Disable interrupts * @param[in] interface Underlying network interface **/ void enc28j60DisableIrq(NetInterface *interface) { //Disable interrupts interface->extIntDriver->disableIrq(); } /** * @brief ENC28J60 interrupt service routine * @param[in] interface Underlying network interface * @return TRUE if a higher priority task must be woken. Else FALSE is returned **/ bool_t enc28j60IrqHandler(NetInterface *interface) { bool_t flag; uint8_t status; //This flag will be set if a higher priority task must be woken flag = FALSE; //Clear the INTIE bit, immediately after an interrupt event enc28j60ClearBit(interface, ENC28J60_REG_EIE, EIE_INTIE); //Read interrupt status register status = enc28j60ReadReg(interface, ENC28J60_REG_EIR); //Link status change? if((status & EIR_LINKIF) != 0) { //Disable LINKIE interrupt enc28j60ClearBit(interface, ENC28J60_REG_EIE, EIE_LINKIE); //Set event flag interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Packet received? if((status & EIR_PKTIF) != 0) { //Disable PKTIE interrupt enc28j60ClearBit(interface, ENC28J60_REG_EIE, EIE_PKTIE); //Set event flag interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Packet transmission complete? if((status & (EIR_TXIF | EIE_TXERIE)) != 0) { //Clear interrupt flags enc28j60ClearBit(interface, ENC28J60_REG_EIR, EIR_TXIF | EIE_TXERIE); //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&interface->nicTxEvent); } //Once the interrupt has been serviced, the INTIE bit //is set again to re-enable interrupts enc28j60SetBit(interface, ENC28J60_REG_EIE, EIE_INTIE); //A higher priority task must be woken? return flag; } /** * @brief ENC28J60 event handler * @param[in] interface Underlying network interface **/ void enc28j60EventHandler(NetInterface *interface) { error_t error; uint16_t status; uint16_t value; //Read interrupt status register status = enc28j60ReadReg(interface, ENC28J60_REG_EIR); //Check whether the link state has changed if((status & EIR_LINKIF) != 0) { //Clear PHY interrupts flags enc28j60ReadPhyReg(interface, ENC28J60_PHY_REG_PHIR); //Clear interrupt flag enc28j60ClearBit(interface, ENC28J60_REG_EIR, EIR_LINKIF); //Read PHY status register value = enc28j60ReadPhyReg(interface, ENC28J60_PHY_REG_PHSTAT2); //Check link state if((value & PHSTAT2_LSTAT) != 0) { //Link speed interface->linkSpeed = NIC_LINK_SPEED_10MBPS; #if (ENC28J60_FULL_DUPLEX_SUPPORT == ENABLED) //Full-duplex mode interface->duplexMode = NIC_FULL_DUPLEX_MODE; #else //Half-duplex mode interface->duplexMode = NIC_HALF_DUPLEX_MODE; #endif //Link is up interface->linkState = TRUE; } else { //Link is down interface->linkState = FALSE; } //Process link state change event nicNotifyLinkChange(interface); } //Check whether a packet has been received? if((status & EIR_PKTIF) != 0) { //Clear interrupt flag enc28j60ClearBit(interface, ENC28J60_REG_EIR, EIR_PKTIF); //Process all pending packets do { //Read incoming packet error = enc28j60ReceivePacket(interface); //No more data in the receive buffer? } while(error != ERROR_BUFFER_EMPTY); } //Re-enable LINKIE and PKTIE interrupts enc28j60SetBit(interface, ENC28J60_REG_EIE, EIE_LINKIE | EIE_PKTIE); } /** * @brief Send a packet * @param[in] interface Underlying network interface * @param[in] buffer Multi-part buffer containing the data to send * @param[in] offset Offset to the first data byte * @param[in] ancillary Additional options passed to the stack along with * the packet * @return Error code **/ error_t enc28j60SendPacket(NetInterface *interface, const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary) { size_t length; //Retrieve the length of the packet length = netBufferGetLength(buffer) - offset; //Check the frame length if(length > 1536) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Report an error return ERROR_INVALID_LENGTH; } //Make sure the link is up before transmitting the frame if(!interface->linkState) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Drop current packet return NO_ERROR; } //It is recommended to reset the transmit logic before //attempting to transmit a packet enc28j60SetBit(interface, ENC28J60_REG_ECON1, ECON1_TXRST); enc28j60ClearBit(interface, ENC28J60_REG_ECON1, ECON1_TXRST); //Interrupt flags should be cleared after the reset is completed enc28j60ClearBit(interface, ENC28J60_REG_EIR, EIR_TXIF | EIR_TXERIF); //Set transmit buffer location enc28j60WriteReg(interface, ENC28J60_REG_ETXSTL, LSB(ENC28J60_TX_BUFFER_START)); enc28j60WriteReg(interface, ENC28J60_REG_ETXSTH, MSB(ENC28J60_TX_BUFFER_START)); //Point to start of transmit buffer enc28j60WriteReg(interface, ENC28J60_REG_EWRPTL, LSB(ENC28J60_TX_BUFFER_START)); enc28j60WriteReg(interface, ENC28J60_REG_EWRPTH, MSB(ENC28J60_TX_BUFFER_START)); //Copy the data to the transmit buffer enc28j60WriteBuffer(interface, buffer, offset); //ETXND should point to the last byte in the data payload enc28j60WriteReg(interface, ENC28J60_REG_ETXNDL, LSB(ENC28J60_TX_BUFFER_START + length)); enc28j60WriteReg(interface, ENC28J60_REG_ETXNDH, MSB(ENC28J60_TX_BUFFER_START + length)); //Start transmission enc28j60SetBit(interface, ENC28J60_REG_ECON1, ECON1_TXRTS); //Successful processing return NO_ERROR; } /** * @brief Receive a packet * @param[in] interface Underlying network interface * @return Error code **/ error_t enc28j60ReceivePacket(NetInterface *interface) { error_t error; uint16_t n; uint16_t status; Enc28j60Context *context; //Point to the driver context context = (Enc28j60Context *) interface->nicContext; //Any packet pending in the receive buffer? if(enc28j60ReadReg(interface, ENC28J60_REG_EPKTCNT)) { //Point to the start of the received packet enc28j60WriteReg(interface, ENC28J60_REG_ERDPTL, LSB(context->nextPacket)); enc28j60WriteReg(interface, ENC28J60_REG_ERDPTH, MSB(context->nextPacket)); //Read the first two bytes, which are the address of the next packet enc28j60ReadBuffer(interface, (uint8_t *) &context->nextPacket, sizeof(uint16_t)); //Get the length of the received frame in bytes enc28j60ReadBuffer(interface, (uint8_t *) &n, sizeof(uint16_t)); //Read the receive status vector (RSV) enc28j60ReadBuffer(interface, (uint8_t *) &status, sizeof(uint16_t)); //Make sure no error occurred if((status & RSV_RECEIVED_OK) != 0) { //Limit the number of data to read n = MIN(n, ETH_MAX_FRAME_SIZE); //Read the Ethernet frame enc28j60ReadBuffer(interface, context->rxBuffer, n); //Valid packet received error = NO_ERROR; } else { //The received packet contains an error error = ERROR_INVALID_PACKET; } //Advance the ERXRDPT pointer, taking care to wrap back at the //end of the received memory buffer if(context->nextPacket == ENC28J60_RX_BUFFER_START) { enc28j60WriteReg(interface, ENC28J60_REG_ERXRDPTL, LSB(ENC28J60_RX_BUFFER_STOP)); enc28j60WriteReg(interface, ENC28J60_REG_ERXRDPTH, MSB(ENC28J60_RX_BUFFER_STOP)); } else { enc28j60WriteReg(interface, ENC28J60_REG_ERXRDPTL, LSB(context->nextPacket - 1)); enc28j60WriteReg(interface, ENC28J60_REG_ERXRDPTH, MSB(context->nextPacket - 1)); } //Decrement the packet counter enc28j60SetBit(interface, ENC28J60_REG_ECON2, ECON2_PKTDEC); } else { //No more data in the receive buffer error = ERROR_BUFFER_EMPTY; } //Check whether a valid packet has been received if(!error) { NetRxAncillary ancillary; //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_RX_ANCILLARY; //Pass the packet to the upper layer nicProcessPacket(interface, context->rxBuffer, n, &ancillary); } //Return status code return error; } /** * @brief Configure MAC address filtering * @param[in] interface Underlying network interface * @return Error code **/ error_t enc28j60UpdateMacAddrFilter(NetInterface *interface) { uint_t i; uint_t k; uint32_t crc; uint8_t hashTable[8]; MacFilterEntry *entry; //Debug message TRACE_DEBUG("Updating MAC filter...\r\n"); //Clear hash table osMemset(hashTable, 0, sizeof(hashTable)); //The MAC address filter contains the list of MAC addresses to accept //when receiving an Ethernet frame for(i = 0; i < MAC_ADDR_FILTER_SIZE; i++) { //Point to the current entry entry = &interface->macAddrFilter[i]; //Valid entry? if(entry->refCount > 0) { //Compute CRC over the current MAC address crc = enc28j60CalcCrc(&entry->addr, sizeof(MacAddr)); //Calculate the corresponding index in the table k = (crc >> 23) & 0x3F; //Update hash table contents hashTable[k / 8] |= (1 << (k % 8)); } } //Write the hash table to the ENC28J60 controller enc28j60WriteReg(interface, ENC28J60_REG_EHT0, hashTable[0]); enc28j60WriteReg(interface, ENC28J60_REG_EHT1, hashTable[1]); enc28j60WriteReg(interface, ENC28J60_REG_EHT2, hashTable[2]); enc28j60WriteReg(interface, ENC28J60_REG_EHT3, hashTable[3]); enc28j60WriteReg(interface, ENC28J60_REG_EHT4, hashTable[4]); enc28j60WriteReg(interface, ENC28J60_REG_EHT5, hashTable[5]); enc28j60WriteReg(interface, ENC28J60_REG_EHT6, hashTable[6]); enc28j60WriteReg(interface, ENC28J60_REG_EHT7, hashTable[7]); //Debug message TRACE_DEBUG(" EHT0 = %02" PRIX8 "\r\n", enc28j60ReadReg(interface, ENC28J60_REG_EHT0)); TRACE_DEBUG(" EHT1 = %02" PRIX8 "\r\n", enc28j60ReadReg(interface, ENC28J60_REG_EHT1)); TRACE_DEBUG(" EHT2 = %02" PRIX8 "\r\n", enc28j60ReadReg(interface, ENC28J60_REG_EHT2)); TRACE_DEBUG(" EHT3 = %02" PRIX8 "\r\n", enc28j60ReadReg(interface, ENC28J60_REG_EHT3)); TRACE_DEBUG(" EHT0 = %02" PRIX8 "\r\n", enc28j60ReadReg(interface, ENC28J60_REG_EHT4)); TRACE_DEBUG(" EHT1 = %02" PRIX8 "\r\n", enc28j60ReadReg(interface, ENC28J60_REG_EHT5)); TRACE_DEBUG(" EHT2 = %02" PRIX8 "\r\n", enc28j60ReadReg(interface, ENC28J60_REG_EHT6)); TRACE_DEBUG(" EHT3 = %02" PRIX8 "\r\n", enc28j60ReadReg(interface, ENC28J60_REG_EHT7)); //Successful processing return NO_ERROR; } /** * @brief ENC28J60 controller reset * @param[in] interface Underlying network interface **/ void enc28j60SoftReset(NetInterface *interface) { //Pull the CS pin low interface->spiDriver->assertCs(); //Write opcode interface->spiDriver->transfer(ENC28J60_CMD_SRC); //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); } /** * @brief Bank selection * @param[in] interface Underlying network interface * @param[in] address Register address **/ void enc28j60SelectBank(NetInterface *interface, uint16_t address) { uint16_t bank; Enc28j60Context *context; //Point to the driver context context = (Enc28j60Context *) interface->nicContext; //Get the bank number from the specified address bank = address & REG_BANK_MASK; //Rewrite the bank number only if a change is detected if(bank != context->currentBank) { //Select specified bank switch(bank) { case BANK_0: //Select bank 0 enc28j60ClearBit(interface, ENC28J60_REG_ECON1, ECON1_BSEL1 | ECON1_BSEL0); break; case BANK_1: //Select bank 1 enc28j60SetBit(interface, ENC28J60_REG_ECON1, ECON1_BSEL0); enc28j60ClearBit(interface, ENC28J60_REG_ECON1, ECON1_BSEL1); break; case BANK_2: //Select bank 2 enc28j60ClearBit(interface, ENC28J60_REG_ECON1, ECON1_BSEL0); enc28j60SetBit(interface, ENC28J60_REG_ECON1, ECON1_BSEL1); break; case BANK_3: //Select bank 3 enc28j60SetBit(interface, ENC28J60_REG_ECON1, ECON1_BSEL1 | ECON1_BSEL0); break; default: //Invalid bank break; } //Save bank number context->currentBank = bank; } } /** * @brief Write ENC28J60 register * @param[in] interface Underlying network interface * @param[in] address Register address * @param[in] data Register value **/ void enc28j60WriteReg(NetInterface *interface, uint16_t address, uint8_t data) { //Make sure the corresponding bank is selected enc28j60SelectBank(interface, address); //Pull the CS pin low interface->spiDriver->assertCs(); //Write opcode and register address interface->spiDriver->transfer(ENC28J60_CMD_WCR | (address & REG_ADDR_MASK)); //Write register value interface->spiDriver->transfer(data); //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); } /** * @brief Read ENC28J60 register * @param[in] interface Underlying network interface * @param[in] address Register address * @return Register value **/ uint8_t enc28j60ReadReg(NetInterface *interface, uint16_t address) { uint16_t data; //Make sure the corresponding bank is selected enc28j60SelectBank(interface, address); //Pull the CS pin low interface->spiDriver->assertCs(); //Write opcode and register address interface->spiDriver->transfer(ENC28J60_CMD_RCR | (address & REG_ADDR_MASK)); //When reading MAC or MII registers, a dummy byte is first shifted out if((address & REG_TYPE_MASK) != ETH_REG_TYPE) { interface->spiDriver->transfer(0x00); } //Read register contents data = interface->spiDriver->transfer(0x00); //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); //Return register contents return data; } /** * @brief Write PHY register * @param[in] interface Underlying network interface * @param[in] address PHY register address * @param[in] data Register value **/ void enc28j60WritePhyReg(NetInterface *interface, uint16_t address, uint16_t data) { //Write register address enc28j60WriteReg(interface, ENC28J60_REG_MIREGADR, address & REG_ADDR_MASK); //Write the lower 8 bits enc28j60WriteReg(interface, ENC28J60_REG_MIWRL, LSB(data)); //Write the upper 8 bits enc28j60WriteReg(interface, ENC28J60_REG_MIWRH, MSB(data)); //Wait until the PHY register has been written while((enc28j60ReadReg(interface, ENC28J60_REG_MISTAT) & MISTAT_BUSY) != 0) { } } /** * @brief Read PHY register * @param[in] interface Underlying network interface * @param[in] address PHY register address * @return Register value **/ uint16_t enc28j60ReadPhyReg(NetInterface *interface, uint16_t address) { uint16_t data; //Write register address enc28j60WriteReg(interface, ENC28J60_REG_MIREGADR, address & REG_ADDR_MASK); //Start read operation enc28j60WriteReg(interface, ENC28J60_REG_MICMD, MICMD_MIIRD); //Wait for the read operation to complete while((enc28j60ReadReg(interface, ENC28J60_REG_MISTAT) & MISTAT_BUSY) != 0) { } //Clear command register enc28j60WriteReg(interface, ENC28J60_REG_MICMD, 0); //Read the lower 8 bits data = enc28j60ReadReg(interface, ENC28J60_REG_MIRDL); //Read the upper 8 bits data |= enc28j60ReadReg(interface, ENC28J60_REG_MIRDH) << 8; //Return register contents return data; } /** * @brief Write SRAM buffer * @param[in] interface Underlying network interface * @param[in] buffer Multi-part buffer containing the data to be written * @param[in] offset Offset to the first data byte **/ void enc28j60WriteBuffer(NetInterface *interface, const NetBuffer *buffer, size_t offset) { uint_t i; size_t j; size_t n; uint8_t *p; //Pull the CS pin low interface->spiDriver->assertCs(); //Write opcode interface->spiDriver->transfer(ENC28J60_CMD_WBM); //Write per-packet control byte interface->spiDriver->transfer(0x00); //Loop through data chunks for(i = 0; i < buffer->chunkCount; i++) { //Is there any data to copy from the current chunk? if(offset < buffer->chunk[i].length) { //Point to the first byte to be read p = (uint8_t *) buffer->chunk[i].address + offset; //Compute the number of bytes to copy at a time n = buffer->chunk[i].length - offset; //Copy data to SRAM buffer for(j = 0; j < n; j++) { interface->spiDriver->transfer(p[j]); } //Process the next block from the start offset = 0; } else { //Skip the current chunk offset -= buffer->chunk[i].length; } } //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); } /** * @brief Read SRAM buffer * @param[in] interface Underlying network interface * @param[in] data Buffer where to store the incoming data * @param[in] length Number of data to read **/ void enc28j60ReadBuffer(NetInterface *interface, uint8_t *data, size_t length) { size_t i; //Pull the CS pin low interface->spiDriver->assertCs(); //Write opcode interface->spiDriver->transfer(ENC28J60_CMD_RBM); //Copy data from SRAM buffer for(i = 0; i < length; i++) { data[i] = interface->spiDriver->transfer(0x00); } //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); } /** * @brief Set bit field * @param[in] interface Underlying network interface * @param[in] address Register address * @param[in] mask Bits to set in the target register **/ void enc28j60SetBit(NetInterface *interface, uint16_t address, uint16_t mask) { //Pull the CS pin low interface->spiDriver->assertCs(); //Write opcode and register address interface->spiDriver->transfer(ENC28J60_CMD_BFS | (address & REG_ADDR_MASK)); //Write bit mask interface->spiDriver->transfer(mask); //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); } /** * @brief Clear bit field * @param[in] interface Underlying network interface * @param[in] address Register address * @param[in] mask Bits to clear in the target register **/ void enc28j60ClearBit(NetInterface *interface, uint16_t address, uint16_t mask) { //Pull the CS pin low interface->spiDriver->assertCs(); //Write opcode and register address interface->spiDriver->transfer(ENC28J60_CMD_BFC | (address & REG_ADDR_MASK)); //Write bit mask interface->spiDriver->transfer(mask); //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); } /** * @brief CRC calculation using the polynomial 0x4C11DB7 * @param[in] data Pointer to the data over which to calculate the CRC * @param[in] length Number of bytes to process * @return Resulting CRC value **/ uint32_t enc28j60CalcCrc(const void *data, size_t length) { uint_t i; uint_t j; uint32_t crc; const uint8_t *p; //Point to the data over which to calculate the CRC p = (uint8_t *) data; //CRC preset value crc = 0xFFFFFFFF; //Loop through data for(i = 0; i < length; i++) { //The message is processed bit by bit for(j = 0; j < 8; j++) { //Update CRC value if((((crc >> 31) ^ (p[i] >> j)) & 0x01) != 0) { crc = (crc << 1) ^ 0x04C11DB7; } else { crc = crc << 1; } } } //Return CRC value return crc; } /** * @brief Dump registers for debugging purpose * @param[in] interface Underlying network interface **/ void enc28j60DumpReg(NetInterface *interface) { #if (TRACE_LEVEL >= TRACE_LEVEL_DEBUG) uint8_t i; uint8_t bank; uint16_t address; //Display header TRACE_DEBUG(" Bank 0 Bank 1 Bank 2 Bank 3\r\n"); //Loop through register addresses for(i = 0; i < 32; i++) { //Display register address TRACE_DEBUG("%02" PRIX8 ": ", i); //Loop through bank numbers for(bank = 0; bank < 4; bank++) { //Format register address address = (bank << 8) | i; //MAC and MII registers require a specific read sequence if(address >= 0x200 && address <= 0x219) { address |= MAC_REG_TYPE; } else if(address >= 0x300 && address <= 0x305) { address |= MAC_REG_TYPE; } else if(address == 0x30A) { address |= MAC_REG_TYPE; } //Display register contents TRACE_DEBUG("0x%02" PRIX8 " ", enc28j60ReadReg(interface, address)); } //Jump to the following line TRACE_DEBUG("\r\n"); } //Terminate with a line feed TRACE_DEBUG("\r\n"); #endif } /** * @brief Dump PHY registers for debugging purpose * @param[in] interface Underlying network interface **/ void enc28j60DumpPhyReg(NetInterface *interface) { #if (TRACE_LEVEL >= TRACE_LEVEL_DEBUG) uint8_t i; //Loop through PHY registers for(i = 0; i < 32; i++) { //Display current PHY register TRACE_DEBUG("%02" PRIX8 ": 0x%04" PRIX16 "\r\n", i, enc28j60ReadPhyReg(interface, i)); } //Terminate with a line feed TRACE_DEBUG("\r\n"); #endif }
/** * @file enc28j60_driver.c * @brief ENC28J60 Ethernet controller * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.2 **/ //Switch to the appropriate trace level #define TRACE_LEVEL NIC_TRACE_LEVEL //Dependencies #include "core/net.h" #include "drivers/eth/enc28j60_driver.h" #include "debug.h" /** * @brief ENC28J60 driver **/ const NicDriver enc28j60Driver = { NIC_TYPE_ETHERNET, ETH_MTU, enc28j60Init, enc28j60Tick, enc28j60EnableIrq, enc28j60DisableIrq, enc28j60EventHandler, enc28j60SendPacket, enc28j60UpdateMacAddrFilter, NULL, NULL, NULL, TRUE, TRUE, TRUE, FALSE }; /** * @brief ENC28J60 controller initialization * @param[in] interface Underlying network interface * @return Error code **/ error_t enc28j60Init(NetInterface *interface) { uint8_t revisionId; Enc28j60Context *context; //Debug message TRACE_INFO("Initializing ENC28J60 Ethernet controller...\r\n"); //Initialize SPI interface->spiDriver->init(); //Initialize external interrupt line interface->extIntDriver->init(); //Issue a system reset enc28j60SoftReset(interface); //After issuing the reset command, wait at least 1ms in firmware //for the device to be ready sleep(10); //Point to the driver context context = (Enc28j60Context *) interface->nicContext; //Initialize driver specific variables context->currentBank = UINT16_MAX; context->nextPacket = ENC28J60_RX_BUFFER_START; //Allocate RX buffer context->rxBuffer = memPoolAlloc(ETH_MAX_FRAME_SIZE); //Failed to allocate memory? if(context->rxBuffer == NULL) { return ERROR_OUT_OF_MEMORY; } //Read silicon revision ID revisionId = enc28j60ReadReg(interface, ENC28J60_EREVID); //Debug message TRACE_INFO("ENC28J60 revision ID: 0x%02X\r\n", revisionId); //Disable CLKOUT output enc28j60WriteReg(interface, ENC28J60_ECOCON, ENC28J60_ECOCON_COCON_DISABLED); //Set the MAC address of the station enc28j60WriteReg(interface, ENC28J60_MAADR5, interface->macAddr.b[0]); enc28j60WriteReg(interface, ENC28J60_MAADR4, interface->macAddr.b[1]); enc28j60WriteReg(interface, ENC28J60_MAADR3, interface->macAddr.b[2]); enc28j60WriteReg(interface, ENC28J60_MAADR2, interface->macAddr.b[3]); enc28j60WriteReg(interface, ENC28J60_MAADR1, interface->macAddr.b[4]); enc28j60WriteReg(interface, ENC28J60_MAADR0, interface->macAddr.b[5]); //Set receive buffer location enc28j60WriteReg(interface, ENC28J60_ERXSTL, LSB(ENC28J60_RX_BUFFER_START)); enc28j60WriteReg(interface, ENC28J60_ERXSTH, MSB(ENC28J60_RX_BUFFER_START)); enc28j60WriteReg(interface, ENC28J60_ERXNDL, LSB(ENC28J60_RX_BUFFER_STOP)); enc28j60WriteReg(interface, ENC28J60_ERXNDH, MSB(ENC28J60_RX_BUFFER_STOP)); //The ERXRDPT register defines a location within the FIFO where the receive //hardware is forbidden to write to enc28j60WriteReg(interface, ENC28J60_ERXRDPTL, LSB(ENC28J60_RX_BUFFER_STOP)); enc28j60WriteReg(interface, ENC28J60_ERXRDPTH, MSB(ENC28J60_RX_BUFFER_STOP)); //Configure the receive filters enc28j60WriteReg(interface, ENC28J60_ERXFCON, ENC28J60_ERXFCON_UCEN | ENC28J60_ERXFCON_CRCEN | ENC28J60_ERXFCON_HTEN | ENC28J60_ERXFCON_BCEN); //Initialize the hash table enc28j60WriteReg(interface, ENC28J60_EHT0, 0x00); enc28j60WriteReg(interface, ENC28J60_EHT1, 0x00); enc28j60WriteReg(interface, ENC28J60_EHT2, 0x00); enc28j60WriteReg(interface, ENC28J60_EHT3, 0x00); enc28j60WriteReg(interface, ENC28J60_EHT4, 0x00); enc28j60WriteReg(interface, ENC28J60_EHT5, 0x00); enc28j60WriteReg(interface, ENC28J60_EHT6, 0x00); enc28j60WriteReg(interface, ENC28J60_EHT7, 0x00); //Pull the MAC out of reset enc28j60WriteReg(interface, ENC28J60_MACON2, 0x00); //Enable the MAC to receive frames enc28j60WriteReg(interface, ENC28J60_MACON1, ENC28J60_MACON1_TXPAUS | ENC28J60_MACON1_RXPAUS | ENC28J60_MACON1_MARXEN); //Enable automatic padding, always append a valid CRC and check frame //length. MAC can operate in half-duplex or full-duplex mode #if (ENC28J60_FULL_DUPLEX_SUPPORT == ENABLED) enc28j60WriteReg(interface, ENC28J60_MACON3, ENC28J60_MACON3_PADCFG_AUTO | ENC28J60_MACON3_TXCRCEN | ENC28J60_MACON3_FRMLNEN | ENC28J60_MACON3_FULDPX); #else enc28j60WriteReg(interface, ENC28J60_MACON3, ENC28J60_MACON3_PADCFG_AUTO | ENC28J60_MACON3_TXCRCEN | ENC28J60_MACON3_FRMLNEN); #endif //When the medium is occupied, the MAC will wait indefinitely for it to //become free when attempting to transmit enc28j60WriteReg(interface, ENC28J60_MACON4, ENC28J60_MACON4_DEFER); //Maximum frame length that can be received or transmitted enc28j60WriteReg(interface, ENC28J60_MAMXFLL, LSB(ETH_MAX_FRAME_SIZE)); enc28j60WriteReg(interface, ENC28J60_MAMXFLH, MSB(ETH_MAX_FRAME_SIZE)); //Configure the back-to-back inter-packet gap register #if (ENC28J60_FULL_DUPLEX_SUPPORT == ENABLED) enc28j60WriteReg(interface, ENC28J60_MABBIPG, ENC28J60_MABBIPG_DEFAULT_FD); #else enc28j60WriteReg(interface, ENC28J60_MABBIPG, ENC28J60_MABBIPG_DEFAULT_HD); #endif //Configure the non-back-to-back inter-packet gap register enc28j60WriteReg(interface, ENC28J60_MAIPGL, ENC28J60_MAIPGL_DEFAULT); enc28j60WriteReg(interface, ENC28J60_MAIPGH, ENC28J60_MAIPGH_DEFAULT); //Collision window register enc28j60WriteReg(interface, ENC28J60_MACLCON2, ENC28J60_MACLCON2_COLWIN_DEFAULT); //Set the PHY to the proper duplex mode #if (ENC28J60_FULL_DUPLEX_SUPPORT == ENABLED) enc28j60WritePhyReg(interface, ENC28J60_PHCON1, ENC28J60_PHCON1_PDPXMD); #else enc28j60WritePhyReg(interface, ENC28J60_PHCON1, 0x0000); #endif //Disable half-duplex loopback in PHY enc28j60WritePhyReg(interface, ENC28J60_PHCON2, ENC28J60_PHCON2_HDLDIS); //LEDA displays link status and LEDB displays TX/RX activity enc28j60WritePhyReg(interface, ENC28J60_PHLCON, ENC28J60_PHLCON_LACFG_LINK | ENC28J60_PHLCON_LBCFG_TX_RX | ENC28J60_PHLCON_LFRQ_40_MS | ENC28J60_PHLCON_STRCH); //Clear interrupt flags enc28j60WriteReg(interface, ENC28J60_EIR, 0x00); //Configure interrupts as desired enc28j60WriteReg(interface, ENC28J60_EIE, ENC28J60_EIE_INTIE | ENC28J60_EIE_PKTIE | ENC28J60_EIE_LINKIE | ENC28J60_EIE_TXIE | ENC28J60_EIE_TXERIE); //Configure PHY interrupts as desired enc28j60WritePhyReg(interface, ENC28J60_PHIE, ENC28J60_PHIE_PLNKIE | ENC28J60_PHIE_PGEIE); //Set RXEN to enable reception enc28j60SetBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_RXEN); //Dump registers for debugging purpose enc28j60DumpReg(interface); enc28j60DumpPhyReg(interface); //Accept any packets from the upper layer osSetEvent(&interface->nicTxEvent); //Force the TCP/IP stack to poll the link state at startup interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event osSetEvent(&netEvent); //Successful initialization return NO_ERROR; } /** * @brief ENC28J60 timer handler * @param[in] interface Underlying network interface **/ void enc28j60Tick(NetInterface *interface) { } /** * @brief Enable interrupts * @param[in] interface Underlying network interface **/ void enc28j60EnableIrq(NetInterface *interface) { //Enable interrupts interface->extIntDriver->enableIrq(); } /** * @brief Disable interrupts * @param[in] interface Underlying network interface **/ void enc28j60DisableIrq(NetInterface *interface) { //Disable interrupts interface->extIntDriver->disableIrq(); } /** * @brief ENC28J60 interrupt service routine * @param[in] interface Underlying network interface * @return TRUE if a higher priority task must be woken. Else FALSE is returned **/ bool_t enc28j60IrqHandler(NetInterface *interface) { bool_t flag; uint8_t status; //This flag will be set if a higher priority task must be woken flag = FALSE; //Clear the INTIE bit, immediately after an interrupt event enc28j60ClearBit(interface, ENC28J60_EIE, ENC28J60_EIE_INTIE); //Read interrupt status register status = enc28j60ReadReg(interface, ENC28J60_EIR); //Link status change? if((status & ENC28J60_EIR_LINKIF) != 0) { //Disable LINKIE interrupt enc28j60ClearBit(interface, ENC28J60_EIE, ENC28J60_EIE_LINKIE); //Set event flag interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Packet received? if(enc28j60ReadReg(interface, ENC28J60_EPKTCNT) != 0) { //Disable PKTIE interrupt enc28j60ClearBit(interface, ENC28J60_EIE, ENC28J60_EIE_PKTIE); //Set event flag interface->nicEvent = TRUE; //Notify the TCP/IP stack of the event flag |= osSetEventFromIsr(&netEvent); } //Packet transmission complete? if((status & (ENC28J60_EIR_TXIF | ENC28J60_EIE_TXERIE)) != 0) { //Clear interrupt flags enc28j60ClearBit(interface, ENC28J60_EIR, ENC28J60_EIR_TXIF | ENC28J60_EIE_TXERIE); //Notify the TCP/IP stack that the transmitter is ready to send flag |= osSetEventFromIsr(&interface->nicTxEvent); } //Once the interrupt has been serviced, the INTIE bit //is set again to re-enable interrupts enc28j60SetBit(interface, ENC28J60_EIE, ENC28J60_EIE_INTIE); //A higher priority task must be woken? return flag; } /** * @brief ENC28J60 event handler * @param[in] interface Underlying network interface **/ void enc28j60EventHandler(NetInterface *interface) { error_t error; uint16_t status; uint16_t value; //Read interrupt status register status = enc28j60ReadReg(interface, ENC28J60_EIR); //Check whether the link state has changed if((status & ENC28J60_EIR_LINKIF) != 0) { //Clear PHY interrupts flags enc28j60ReadPhyReg(interface, ENC28J60_PHIR); //Clear interrupt flag enc28j60ClearBit(interface, ENC28J60_EIR, ENC28J60_EIR_LINKIF); //Read PHY status register value = enc28j60ReadPhyReg(interface, ENC28J60_PHSTAT2); //Check link state if((value & ENC28J60_PHSTAT2_LSTAT) != 0) { //Link speed interface->linkSpeed = NIC_LINK_SPEED_10MBPS; #if (ENC28J60_FULL_DUPLEX_SUPPORT == ENABLED) //Full-duplex mode interface->duplexMode = NIC_FULL_DUPLEX_MODE; #else //Half-duplex mode interface->duplexMode = NIC_HALF_DUPLEX_MODE; #endif //Link is up interface->linkState = TRUE; } else { //Link is down interface->linkState = FALSE; } //Process link state change event nicNotifyLinkChange(interface); } //Check whether a packet has been received? if(enc28j60ReadReg(interface, ENC28J60_EPKTCNT) != 0) { //Clear interrupt flag enc28j60ClearBit(interface, ENC28J60_EIR, ENC28J60_EIR_PKTIF); //Process all pending packets do { //Read incoming packet error = enc28j60ReceivePacket(interface); //No more data in the receive buffer? } while(error != ERROR_BUFFER_EMPTY); } //Re-enable LINKIE and PKTIE interrupts enc28j60SetBit(interface, ENC28J60_EIE, ENC28J60_EIE_LINKIE | ENC28J60_EIE_PKTIE); } /** * @brief Send a packet * @param[in] interface Underlying network interface * @param[in] buffer Multi-part buffer containing the data to send * @param[in] offset Offset to the first data byte * @param[in] ancillary Additional options passed to the stack along with * the packet * @return Error code **/ error_t enc28j60SendPacket(NetInterface *interface, const NetBuffer *buffer, size_t offset, NetTxAncillary *ancillary) { size_t length; //Retrieve the length of the packet length = netBufferGetLength(buffer) - offset; //Check the frame length if(length > 1536) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Report an error return ERROR_INVALID_LENGTH; } //Make sure the link is up before transmitting the frame if(!interface->linkState) { //The transmitter can accept another packet osSetEvent(&interface->nicTxEvent); //Drop current packet return NO_ERROR; } //It is recommended to reset the transmit logic before //attempting to transmit a packet enc28j60SetBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_TXRST); enc28j60ClearBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_TXRST); //Interrupt flags should be cleared after the reset is completed enc28j60ClearBit(interface, ENC28J60_EIR, ENC28J60_EIR_TXIF | ENC28J60_EIR_TXERIF); //Set transmit buffer location enc28j60WriteReg(interface, ENC28J60_ETXSTL, LSB(ENC28J60_TX_BUFFER_START)); enc28j60WriteReg(interface, ENC28J60_ETXSTH, MSB(ENC28J60_TX_BUFFER_START)); //Point to start of transmit buffer enc28j60WriteReg(interface, ENC28J60_EWRPTL, LSB(ENC28J60_TX_BUFFER_START)); enc28j60WriteReg(interface, ENC28J60_EWRPTH, MSB(ENC28J60_TX_BUFFER_START)); //Copy the data to the transmit buffer enc28j60WriteBuffer(interface, buffer, offset); //ETXND should point to the last byte in the data payload enc28j60WriteReg(interface, ENC28J60_ETXNDL, LSB(ENC28J60_TX_BUFFER_START + length)); enc28j60WriteReg(interface, ENC28J60_ETXNDH, MSB(ENC28J60_TX_BUFFER_START + length)); //Start transmission enc28j60SetBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_TXRTS); //Successful processing return NO_ERROR; } /** * @brief Receive a packet * @param[in] interface Underlying network interface * @return Error code **/ error_t enc28j60ReceivePacket(NetInterface *interface) { error_t error; uint16_t length; uint16_t status; uint8_t header[6]; Enc28j60Context *context; //Point to the driver context context = (Enc28j60Context *) interface->nicContext; //Any packet pending in the receive buffer? if(enc28j60ReadReg(interface, ENC28J60_EPKTCNT) != 0) { //Point to the start of the received packet enc28j60WriteReg(interface, ENC28J60_ERDPTL, LSB(context->nextPacket)); enc28j60WriteReg(interface, ENC28J60_ERDPTH, MSB(context->nextPacket)); //The packet is preceded by a 6-byte header enc28j60ReadBuffer(interface, header, sizeof(header)); //The first two bytes are the address of the next packet context->nextPacket = LOAD16LE(header); //Get the length of the received packet length = LOAD16LE(header + 2); //Get the receive status vector (RSV) status = LOAD16LE(header + 4); //Make sure no error occurred if((status & ENC28J60_RSV_RECEIVED_OK) != 0) { //Limit the number of data to read length = MIN(length, ETH_MAX_FRAME_SIZE); //Read the Ethernet frame enc28j60ReadBuffer(interface, context->rxBuffer, length); //Valid packet received error = NO_ERROR; } else { //The received packet contains an error error = ERROR_INVALID_PACKET; } //Advance the ERXRDPT pointer, taking care to wrap back at the //end of the received memory buffer if(context->nextPacket == ENC28J60_RX_BUFFER_START) { enc28j60WriteReg(interface, ENC28J60_ERXRDPTL, LSB(ENC28J60_RX_BUFFER_STOP)); enc28j60WriteReg(interface, ENC28J60_ERXRDPTH, MSB(ENC28J60_RX_BUFFER_STOP)); } else { enc28j60WriteReg(interface, ENC28J60_ERXRDPTL, LSB(context->nextPacket - 1)); enc28j60WriteReg(interface, ENC28J60_ERXRDPTH, MSB(context->nextPacket - 1)); } //Decrement the packet counter enc28j60SetBit(interface, ENC28J60_ECON2, ENC28J60_ECON2_PKTDEC); } else { //No more data in the receive buffer error = ERROR_BUFFER_EMPTY; } //Check whether a valid packet has been received if(!error) { NetRxAncillary ancillary; //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_RX_ANCILLARY; //Pass the packet to the upper layer nicProcessPacket(interface, context->rxBuffer, length, &ancillary); } //Return status code return error; } /** * @brief Configure MAC address filtering * @param[in] interface Underlying network interface * @return Error code **/ error_t enc28j60UpdateMacAddrFilter(NetInterface *interface) { uint_t i; uint_t k; uint32_t crc; uint8_t hashTable[8]; MacFilterEntry *entry; //Debug message TRACE_DEBUG("Updating MAC filter...\r\n"); //Clear hash table osMemset(hashTable, 0, sizeof(hashTable)); //The MAC address filter contains the list of MAC addresses to accept //when receiving an Ethernet frame for(i = 0; i < MAC_ADDR_FILTER_SIZE; i++) { //Point to the current entry entry = &interface->macAddrFilter[i]; //Valid entry? if(entry->refCount > 0) { //Compute CRC over the current MAC address crc = enc28j60CalcCrc(&entry->addr, sizeof(MacAddr)); //Calculate the corresponding index in the table k = (crc >> 23) & 0x3F; //Update hash table contents hashTable[k / 8] |= (1 << (k % 8)); } } //Write the hash table to the ENC28J60 controller enc28j60WriteReg(interface, ENC28J60_EHT0, hashTable[0]); enc28j60WriteReg(interface, ENC28J60_EHT1, hashTable[1]); enc28j60WriteReg(interface, ENC28J60_EHT2, hashTable[2]); enc28j60WriteReg(interface, ENC28J60_EHT3, hashTable[3]); enc28j60WriteReg(interface, ENC28J60_EHT4, hashTable[4]); enc28j60WriteReg(interface, ENC28J60_EHT5, hashTable[5]); enc28j60WriteReg(interface, ENC28J60_EHT6, hashTable[6]); enc28j60WriteReg(interface, ENC28J60_EHT7, hashTable[7]); //Debug message TRACE_DEBUG(" EHT0 = %02" PRIX8 "\r\n", enc28j60ReadReg(interface, ENC28J60_EHT0)); TRACE_DEBUG(" EHT1 = %02" PRIX8 "\r\n", enc28j60ReadReg(interface, ENC28J60_EHT1)); TRACE_DEBUG(" EHT2 = %02" PRIX8 "\r\n", enc28j60ReadReg(interface, ENC28J60_EHT2)); TRACE_DEBUG(" EHT3 = %02" PRIX8 "\r\n", enc28j60ReadReg(interface, ENC28J60_EHT3)); TRACE_DEBUG(" EHT0 = %02" PRIX8 "\r\n", enc28j60ReadReg(interface, ENC28J60_EHT4)); TRACE_DEBUG(" EHT1 = %02" PRIX8 "\r\n", enc28j60ReadReg(interface, ENC28J60_EHT5)); TRACE_DEBUG(" EHT2 = %02" PRIX8 "\r\n", enc28j60ReadReg(interface, ENC28J60_EHT6)); TRACE_DEBUG(" EHT3 = %02" PRIX8 "\r\n", enc28j60ReadReg(interface, ENC28J60_EHT7)); //Successful processing return NO_ERROR; } /** * @brief ENC28J60 controller reset * @param[in] interface Underlying network interface **/ void enc28j60SoftReset(NetInterface *interface) { //Pull the CS pin low interface->spiDriver->assertCs(); //Write opcode interface->spiDriver->transfer(ENC28J60_CMD_SRC); //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); } /** * @brief Bank selection * @param[in] interface Underlying network interface * @param[in] address Register address **/ void enc28j60SelectBank(NetInterface *interface, uint16_t address) { uint16_t bank; Enc28j60Context *context; //Point to the driver context context = (Enc28j60Context *) interface->nicContext; //Get the bank number from the specified address bank = address & REG_BANK_MASK; //Rewrite the bank number only if a change is detected if(bank != context->currentBank) { //Select the relevant bank if(bank == BANK_0) { //Select bank 0 enc28j60ClearBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_BSEL1 | ENC28J60_ECON1_BSEL0); } else if(bank == BANK_1) { //Select bank 1 enc28j60SetBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_BSEL0); enc28j60ClearBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_BSEL1); } else if(bank == BANK_2) { //Select bank 2 enc28j60ClearBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_BSEL0); enc28j60SetBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_BSEL1); } else { //Select bank 3 enc28j60SetBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_BSEL1 | ENC28J60_ECON1_BSEL0); } //Save bank number context->currentBank = bank; } } /** * @brief Write ENC28J60 register * @param[in] interface Underlying network interface * @param[in] address Register address * @param[in] data Register value **/ void enc28j60WriteReg(NetInterface *interface, uint16_t address, uint8_t data) { //Make sure the corresponding bank is selected enc28j60SelectBank(interface, address); //Pull the CS pin low interface->spiDriver->assertCs(); //Write opcode and register address interface->spiDriver->transfer(ENC28J60_CMD_WCR | (address & REG_ADDR_MASK)); //Write register value interface->spiDriver->transfer(data); //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); } /** * @brief Read ENC28J60 register * @param[in] interface Underlying network interface * @param[in] address Register address * @return Register value **/ uint8_t enc28j60ReadReg(NetInterface *interface, uint16_t address) { uint16_t data; //Make sure the corresponding bank is selected enc28j60SelectBank(interface, address); //Pull the CS pin low interface->spiDriver->assertCs(); //Write opcode and register address interface->spiDriver->transfer(ENC28J60_CMD_RCR | (address & REG_ADDR_MASK)); //When reading MAC or MII registers, a dummy byte is first shifted out if((address & REG_TYPE_MASK) != ETH_REG_TYPE) { interface->spiDriver->transfer(0x00); } //Read register contents data = interface->spiDriver->transfer(0x00); //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); //Return register contents return data; } /** * @brief Write PHY register * @param[in] interface Underlying network interface * @param[in] address PHY register address * @param[in] data Register value **/ void enc28j60WritePhyReg(NetInterface *interface, uint16_t address, uint16_t data) { //Write register address enc28j60WriteReg(interface, ENC28J60_MIREGADR, address & REG_ADDR_MASK); //Write the lower 8 bits enc28j60WriteReg(interface, ENC28J60_MIWRL, LSB(data)); //Write the upper 8 bits enc28j60WriteReg(interface, ENC28J60_MIWRH, MSB(data)); //Wait until the PHY register has been written while((enc28j60ReadReg(interface, ENC28J60_MISTAT) & ENC28J60_MISTAT_BUSY) != 0) { } } /** * @brief Read PHY register * @param[in] interface Underlying network interface * @param[in] address PHY register address * @return Register value **/ uint16_t enc28j60ReadPhyReg(NetInterface *interface, uint16_t address) { uint16_t data; //Write register address enc28j60WriteReg(interface, ENC28J60_MIREGADR, address & REG_ADDR_MASK); //Start read operation enc28j60WriteReg(interface, ENC28J60_MICMD, ENC28J60_MICMD_MIIRD); //Wait for the read operation to complete while((enc28j60ReadReg(interface, ENC28J60_MISTAT) & ENC28J60_MISTAT_BUSY) != 0) { } //Clear command register enc28j60WriteReg(interface, ENC28J60_MICMD, 0); //Read the lower 8 bits data = enc28j60ReadReg(interface, ENC28J60_MIRDL); //Read the upper 8 bits data |= enc28j60ReadReg(interface, ENC28J60_MIRDH) << 8; //Return register contents return data; } /** * @brief Write SRAM buffer * @param[in] interface Underlying network interface * @param[in] buffer Multi-part buffer containing the data to be written * @param[in] offset Offset to the first data byte **/ void enc28j60WriteBuffer(NetInterface *interface, const NetBuffer *buffer, size_t offset) { uint_t i; size_t j; size_t n; uint8_t *p; //Pull the CS pin low interface->spiDriver->assertCs(); //Write opcode interface->spiDriver->transfer(ENC28J60_CMD_WBM); //Write per-packet control byte interface->spiDriver->transfer(0x00); //Loop through data chunks for(i = 0; i < buffer->chunkCount; i++) { //Is there any data to copy from the current chunk? if(offset < buffer->chunk[i].length) { //Point to the first byte to be read p = (uint8_t *) buffer->chunk[i].address + offset; //Compute the number of bytes to copy at a time n = buffer->chunk[i].length - offset; //Copy data to SRAM buffer for(j = 0; j < n; j++) { interface->spiDriver->transfer(p[j]); } //Process the next block from the start offset = 0; } else { //Skip the current chunk offset -= buffer->chunk[i].length; } } //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); } /** * @brief Read SRAM buffer * @param[in] interface Underlying network interface * @param[in] data Buffer where to store the incoming data * @param[in] length Number of data to read **/ void enc28j60ReadBuffer(NetInterface *interface, uint8_t *data, size_t length) { size_t i; //Pull the CS pin low interface->spiDriver->assertCs(); //Write opcode interface->spiDriver->transfer(ENC28J60_CMD_RBM); //Copy data from SRAM buffer for(i = 0; i < length; i++) { data[i] = interface->spiDriver->transfer(0x00); } //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); } /** * @brief Set bit field * @param[in] interface Underlying network interface * @param[in] address Register address * @param[in] mask Bits to set in the target register **/ void enc28j60SetBit(NetInterface *interface, uint16_t address, uint16_t mask) { //Pull the CS pin low interface->spiDriver->assertCs(); //Write opcode and register address interface->spiDriver->transfer(ENC28J60_CMD_BFS | (address & REG_ADDR_MASK)); //Write bit mask interface->spiDriver->transfer(mask); //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); } /** * @brief Clear bit field * @param[in] interface Underlying network interface * @param[in] address Register address * @param[in] mask Bits to clear in the target register **/ void enc28j60ClearBit(NetInterface *interface, uint16_t address, uint16_t mask) { //Pull the CS pin low interface->spiDriver->assertCs(); //Write opcode and register address interface->spiDriver->transfer(ENC28J60_CMD_BFC | (address & REG_ADDR_MASK)); //Write bit mask interface->spiDriver->transfer(mask); //Terminate the operation by raising the CS pin interface->spiDriver->deassertCs(); } /** * @brief CRC calculation using the polynomial 0x4C11DB7 * @param[in] data Pointer to the data over which to calculate the CRC * @param[in] length Number of bytes to process * @return Resulting CRC value **/ uint32_t enc28j60CalcCrc(const void *data, size_t length) { uint_t i; uint_t j; uint32_t crc; const uint8_t *p; //Point to the data over which to calculate the CRC p = (uint8_t *) data; //CRC preset value crc = 0xFFFFFFFF; //Loop through data for(i = 0; i < length; i++) { //The message is processed bit by bit for(j = 0; j < 8; j++) { //Update CRC value if((((crc >> 31) ^ (p[i] >> j)) & 0x01) != 0) { crc = (crc << 1) ^ 0x04C11DB7; } else { crc = crc << 1; } } } //Return CRC value return crc; } /** * @brief Dump registers for debugging purpose * @param[in] interface Underlying network interface **/ void enc28j60DumpReg(NetInterface *interface) { #if (TRACE_LEVEL >= TRACE_LEVEL_DEBUG) uint8_t i; uint8_t bank; uint16_t address; //Display header TRACE_DEBUG(" Bank 0 Bank 1 Bank 2 Bank 3\r\n"); //Loop through register addresses for(i = 0; i < 32; i++) { //Display register address TRACE_DEBUG("%02" PRIX8 ": ", i); //Loop through bank numbers for(bank = 0; bank < 4; bank++) { //Format register address address = (bank << 8) | i; //MAC and MII registers require a specific read sequence if(address >= 0x200 && address <= 0x219) { address |= MAC_REG_TYPE; } else if(address >= 0x300 && address <= 0x305) { address |= MAC_REG_TYPE; } else if(address == 0x30A) { address |= MAC_REG_TYPE; } //Display register contents TRACE_DEBUG("0x%02" PRIX8 " ", enc28j60ReadReg(interface, address)); } //Jump to the following line TRACE_DEBUG("\r\n"); } //Terminate with a line feed TRACE_DEBUG("\r\n"); #endif } /** * @brief Dump PHY registers for debugging purpose * @param[in] interface Underlying network interface **/ void enc28j60DumpPhyReg(NetInterface *interface) { #if (TRACE_LEVEL >= TRACE_LEVEL_DEBUG) uint8_t i; //Loop through PHY registers for(i = 0; i < 32; i++) { //Display current PHY register TRACE_DEBUG("%02" PRIX8 ": 0x%04" PRIX16 "\r\n", i, enc28j60ReadPhyReg(interface, i)); } //Terminate with a line feed TRACE_DEBUG("\r\n"); #endif }
void enc28j60SelectBank(NetInterface *interface, uint16_t address) { uint16_t bank; Enc28j60Context *context; //Point to the driver context context = (Enc28j60Context *) interface->nicContext; //Get the bank number from the specified address bank = address & REG_BANK_MASK; //Rewrite the bank number only if a change is detected if(bank != context->currentBank) { //Select specified bank switch(bank) { case BANK_0: //Select bank 0 enc28j60ClearBit(interface, ENC28J60_REG_ECON1, ECON1_BSEL1 | ECON1_BSEL0); break; case BANK_1: //Select bank 1 enc28j60SetBit(interface, ENC28J60_REG_ECON1, ECON1_BSEL0); enc28j60ClearBit(interface, ENC28J60_REG_ECON1, ECON1_BSEL1); break; case BANK_2: //Select bank 2 enc28j60ClearBit(interface, ENC28J60_REG_ECON1, ECON1_BSEL0); enc28j60SetBit(interface, ENC28J60_REG_ECON1, ECON1_BSEL1); break; case BANK_3: //Select bank 3 enc28j60SetBit(interface, ENC28J60_REG_ECON1, ECON1_BSEL1 | ECON1_BSEL0); break; default: //Invalid bank break; } //Save bank number context->currentBank = bank; } }
void enc28j60SelectBank(NetInterface *interface, uint16_t address) { uint16_t bank; Enc28j60Context *context; //Point to the driver context context = (Enc28j60Context *) interface->nicContext; //Get the bank number from the specified address bank = address & REG_BANK_MASK; //Rewrite the bank number only if a change is detected if(bank != context->currentBank) { //Select the relevant bank if(bank == BANK_0) { //Select bank 0 enc28j60ClearBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_BSEL1 | ENC28J60_ECON1_BSEL0); } else if(bank == BANK_1) { //Select bank 1 enc28j60SetBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_BSEL0); enc28j60ClearBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_BSEL1); } else if(bank == BANK_2) { //Select bank 2 enc28j60ClearBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_BSEL0); enc28j60SetBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_BSEL1); } else { //Select bank 3 enc28j60SetBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_BSEL1 | ENC28J60_ECON1_BSEL0); } //Save bank number context->currentBank = bank; } }
{'added': [(9, ' * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.2'), (107, ' revisionId = enc28j60ReadReg(interface, ENC28J60_EREVID);'), (113, ' enc28j60WriteReg(interface, ENC28J60_ECOCON, ENC28J60_ECOCON_COCON_DISABLED);'), (116, ' enc28j60WriteReg(interface, ENC28J60_MAADR5, interface->macAddr.b[0]);'), (117, ' enc28j60WriteReg(interface, ENC28J60_MAADR4, interface->macAddr.b[1]);'), (118, ' enc28j60WriteReg(interface, ENC28J60_MAADR3, interface->macAddr.b[2]);'), (119, ' enc28j60WriteReg(interface, ENC28J60_MAADR2, interface->macAddr.b[3]);'), (120, ' enc28j60WriteReg(interface, ENC28J60_MAADR1, interface->macAddr.b[4]);'), (121, ' enc28j60WriteReg(interface, ENC28J60_MAADR0, interface->macAddr.b[5]);'), (124, ' enc28j60WriteReg(interface, ENC28J60_ERXSTL, LSB(ENC28J60_RX_BUFFER_START));'), (125, ' enc28j60WriteReg(interface, ENC28J60_ERXSTH, MSB(ENC28J60_RX_BUFFER_START));'), (126, ' enc28j60WriteReg(interface, ENC28J60_ERXNDL, LSB(ENC28J60_RX_BUFFER_STOP));'), (127, ' enc28j60WriteReg(interface, ENC28J60_ERXNDH, MSB(ENC28J60_RX_BUFFER_STOP));'), (129, ' //The ERXRDPT register defines a location within the FIFO where the receive'), (130, ' //hardware is forbidden to write to'), (131, ' enc28j60WriteReg(interface, ENC28J60_ERXRDPTL, LSB(ENC28J60_RX_BUFFER_STOP));'), (132, ' enc28j60WriteReg(interface, ENC28J60_ERXRDPTH, MSB(ENC28J60_RX_BUFFER_STOP));'), (135, ' enc28j60WriteReg(interface, ENC28J60_ERXFCON, ENC28J60_ERXFCON_UCEN |'), (136, ' ENC28J60_ERXFCON_CRCEN | ENC28J60_ERXFCON_HTEN | ENC28J60_ERXFCON_BCEN);'), (139, ' enc28j60WriteReg(interface, ENC28J60_EHT0, 0x00);'), (140, ' enc28j60WriteReg(interface, ENC28J60_EHT1, 0x00);'), (141, ' enc28j60WriteReg(interface, ENC28J60_EHT2, 0x00);'), (142, ' enc28j60WriteReg(interface, ENC28J60_EHT3, 0x00);'), (143, ' enc28j60WriteReg(interface, ENC28J60_EHT4, 0x00);'), (144, ' enc28j60WriteReg(interface, ENC28J60_EHT5, 0x00);'), (145, ' enc28j60WriteReg(interface, ENC28J60_EHT6, 0x00);'), (146, ' enc28j60WriteReg(interface, ENC28J60_EHT7, 0x00);'), (149, ' enc28j60WriteReg(interface, ENC28J60_MACON2, 0x00);'), (152, ' enc28j60WriteReg(interface, ENC28J60_MACON1, ENC28J60_MACON1_TXPAUS |'), (153, ' ENC28J60_MACON1_RXPAUS | ENC28J60_MACON1_MARXEN);'), (155, ' //Enable automatic padding, always append a valid CRC and check frame'), (156, ' //length. MAC can operate in half-duplex or full-duplex mode'), (158, ' enc28j60WriteReg(interface, ENC28J60_MACON3, ENC28J60_MACON3_PADCFG_AUTO |'), (159, ' ENC28J60_MACON3_TXCRCEN | ENC28J60_MACON3_FRMLNEN |'), (160, ' ENC28J60_MACON3_FULDPX);'), (162, ' enc28j60WriteReg(interface, ENC28J60_MACON3, ENC28J60_MACON3_PADCFG_AUTO |'), (163, ' ENC28J60_MACON3_TXCRCEN | ENC28J60_MACON3_FRMLNEN);'), (168, ' enc28j60WriteReg(interface, ENC28J60_MACON4, ENC28J60_MACON4_DEFER);'), (171, ' enc28j60WriteReg(interface, ENC28J60_MAMXFLL, LSB(ETH_MAX_FRAME_SIZE));'), (172, ' enc28j60WriteReg(interface, ENC28J60_MAMXFLH, MSB(ETH_MAX_FRAME_SIZE));'), (176, ' enc28j60WriteReg(interface, ENC28J60_MABBIPG, ENC28J60_MABBIPG_DEFAULT_FD);'), (178, ' enc28j60WriteReg(interface, ENC28J60_MABBIPG, ENC28J60_MABBIPG_DEFAULT_HD);'), (182, ' enc28j60WriteReg(interface, ENC28J60_MAIPGL, ENC28J60_MAIPGL_DEFAULT);'), (183, ' enc28j60WriteReg(interface, ENC28J60_MAIPGH, ENC28J60_MAIPGH_DEFAULT);'), (186, ' enc28j60WriteReg(interface, ENC28J60_MACLCON2,'), (187, ' ENC28J60_MACLCON2_COLWIN_DEFAULT);'), (191, ' enc28j60WritePhyReg(interface, ENC28J60_PHCON1, ENC28J60_PHCON1_PDPXMD);'), (193, ' enc28j60WritePhyReg(interface, ENC28J60_PHCON1, 0x0000);'), (197, ' enc28j60WritePhyReg(interface, ENC28J60_PHCON2, ENC28J60_PHCON2_HDLDIS);'), (200, ' enc28j60WritePhyReg(interface, ENC28J60_PHLCON,'), (201, ' ENC28J60_PHLCON_LACFG_LINK | ENC28J60_PHLCON_LBCFG_TX_RX |'), (202, ' ENC28J60_PHLCON_LFRQ_40_MS | ENC28J60_PHLCON_STRCH);'), (205, ' enc28j60WriteReg(interface, ENC28J60_EIR, 0x00);'), (208, ' enc28j60WriteReg(interface, ENC28J60_EIE, ENC28J60_EIE_INTIE |'), (209, ' ENC28J60_EIE_PKTIE | ENC28J60_EIE_LINKIE | ENC28J60_EIE_TXIE |'), (210, ' ENC28J60_EIE_TXERIE);'), (213, ' enc28j60WritePhyReg(interface, ENC28J60_PHIE, ENC28J60_PHIE_PLNKIE |'), (214, ' ENC28J60_PHIE_PGEIE);'), (217, ' enc28j60SetBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_RXEN);'), (285, ' enc28j60ClearBit(interface, ENC28J60_EIE, ENC28J60_EIE_INTIE);'), (288, ' status = enc28j60ReadReg(interface, ENC28J60_EIR);'), (291, ' if((status & ENC28J60_EIR_LINKIF) != 0)'), (294, ' enc28j60ClearBit(interface, ENC28J60_EIE, ENC28J60_EIE_LINKIE);'), (303, ' if(enc28j60ReadReg(interface, ENC28J60_EPKTCNT) != 0)'), (306, ' enc28j60ClearBit(interface, ENC28J60_EIE, ENC28J60_EIE_PKTIE);'), (315, ' if((status & (ENC28J60_EIR_TXIF | ENC28J60_EIE_TXERIE)) != 0)'), (318, ' enc28j60ClearBit(interface, ENC28J60_EIR, ENC28J60_EIR_TXIF |'), (319, ' ENC28J60_EIE_TXERIE);'), (327, ' enc28j60SetBit(interface, ENC28J60_EIE, ENC28J60_EIE_INTIE);'), (346, ' status = enc28j60ReadReg(interface, ENC28J60_EIR);'), (349, ' if((status & ENC28J60_EIR_LINKIF) != 0)'), (352, ' enc28j60ReadPhyReg(interface, ENC28J60_PHIR);'), (354, ' enc28j60ClearBit(interface, ENC28J60_EIR, ENC28J60_EIR_LINKIF);'), (356, ' value = enc28j60ReadPhyReg(interface, ENC28J60_PHSTAT2);'), (359, ' if((value & ENC28J60_PHSTAT2_LSTAT) != 0)'), (385, ' if(enc28j60ReadReg(interface, ENC28J60_EPKTCNT) != 0)'), (388, ' enc28j60ClearBit(interface, ENC28J60_EIR, ENC28J60_EIR_PKTIF);'), (401, ' enc28j60SetBit(interface, ENC28J60_EIE, ENC28J60_EIE_LINKIE |'), (402, ' ENC28J60_EIE_PKTIE);'), (444, ' enc28j60SetBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_TXRST);'), (445, ' enc28j60ClearBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_TXRST);'), (448, ' enc28j60ClearBit(interface, ENC28J60_EIR, ENC28J60_EIR_TXIF |'), (449, ' ENC28J60_EIR_TXERIF);'), (452, ' enc28j60WriteReg(interface, ENC28J60_ETXSTL, LSB(ENC28J60_TX_BUFFER_START));'), (453, ' enc28j60WriteReg(interface, ENC28J60_ETXSTH, MSB(ENC28J60_TX_BUFFER_START));'), (456, ' enc28j60WriteReg(interface, ENC28J60_EWRPTL, LSB(ENC28J60_TX_BUFFER_START));'), (457, ' enc28j60WriteReg(interface, ENC28J60_EWRPTH, MSB(ENC28J60_TX_BUFFER_START));'), (463, ' enc28j60WriteReg(interface, ENC28J60_ETXNDL, LSB(ENC28J60_TX_BUFFER_START + length));'), (464, ' enc28j60WriteReg(interface, ENC28J60_ETXNDH, MSB(ENC28J60_TX_BUFFER_START + length));'), (467, ' enc28j60SetBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_TXRTS);'), (483, ' uint16_t length;'), (485, ' uint8_t header[6];'), (492, ' if(enc28j60ReadReg(interface, ENC28J60_EPKTCNT) != 0)'), (495, ' enc28j60WriteReg(interface, ENC28J60_ERDPTL, LSB(context->nextPacket));'), (496, ' enc28j60WriteReg(interface, ENC28J60_ERDPTH, MSB(context->nextPacket));'), (498, ' //The packet is preceded by a 6-byte header'), (499, ' enc28j60ReadBuffer(interface, header, sizeof(header));'), (500, ''), (501, ' //The first two bytes are the address of the next packet'), (502, ' context->nextPacket = LOAD16LE(header);'), (503, ' //Get the length of the received packet'), (504, ' length = LOAD16LE(header + 2);'), (505, ' //Get the receive status vector (RSV)'), (506, ' status = LOAD16LE(header + 4);'), (509, ' if((status & ENC28J60_RSV_RECEIVED_OK) != 0)'), (512, ' length = MIN(length, ETH_MAX_FRAME_SIZE);'), (514, ' enc28j60ReadBuffer(interface, context->rxBuffer, length);'), (528, ' enc28j60WriteReg(interface, ENC28J60_ERXRDPTL, LSB(ENC28J60_RX_BUFFER_STOP));'), (529, ' enc28j60WriteReg(interface, ENC28J60_ERXRDPTH, MSB(ENC28J60_RX_BUFFER_STOP));'), (533, ' enc28j60WriteReg(interface, ENC28J60_ERXRDPTL, LSB(context->nextPacket - 1));'), (534, ' enc28j60WriteReg(interface, ENC28J60_ERXRDPTH, MSB(context->nextPacket - 1));'), (538, ' enc28j60SetBit(interface, ENC28J60_ECON2, ENC28J60_ECON2_PKTDEC);'), (555, ' nicProcessPacket(interface, context->rxBuffer, length, &ancillary);'), (603, ' enc28j60WriteReg(interface, ENC28J60_EHT0, hashTable[0]);'), (604, ' enc28j60WriteReg(interface, ENC28J60_EHT1, hashTable[1]);'), (605, ' enc28j60WriteReg(interface, ENC28J60_EHT2, hashTable[2]);'), (606, ' enc28j60WriteReg(interface, ENC28J60_EHT3, hashTable[3]);'), (607, ' enc28j60WriteReg(interface, ENC28J60_EHT4, hashTable[4]);'), (608, ' enc28j60WriteReg(interface, ENC28J60_EHT5, hashTable[5]);'), (609, ' enc28j60WriteReg(interface, ENC28J60_EHT6, hashTable[6]);'), (610, ' enc28j60WriteReg(interface, ENC28J60_EHT7, hashTable[7]);'), (613, ' TRACE_DEBUG(" EHT0 = %02" PRIX8 "\\r\\n", enc28j60ReadReg(interface, ENC28J60_EHT0));'), (614, ' TRACE_DEBUG(" EHT1 = %02" PRIX8 "\\r\\n", enc28j60ReadReg(interface, ENC28J60_EHT1));'), (615, ' TRACE_DEBUG(" EHT2 = %02" PRIX8 "\\r\\n", enc28j60ReadReg(interface, ENC28J60_EHT2));'), (616, ' TRACE_DEBUG(" EHT3 = %02" PRIX8 "\\r\\n", enc28j60ReadReg(interface, ENC28J60_EHT3));'), (617, ' TRACE_DEBUG(" EHT0 = %02" PRIX8 "\\r\\n", enc28j60ReadReg(interface, ENC28J60_EHT4));'), (618, ' TRACE_DEBUG(" EHT1 = %02" PRIX8 "\\r\\n", enc28j60ReadReg(interface, ENC28J60_EHT5));'), (619, ' TRACE_DEBUG(" EHT2 = %02" PRIX8 "\\r\\n", enc28j60ReadReg(interface, ENC28J60_EHT6));'), (620, ' TRACE_DEBUG(" EHT3 = %02" PRIX8 "\\r\\n", enc28j60ReadReg(interface, ENC28J60_EHT7));'), (665, ' //Select the relevant bank'), (666, ' if(bank == BANK_0)'), (669, ' enc28j60ClearBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_BSEL1 |'), (670, ' ENC28J60_ECON1_BSEL0);'), (671, ' }'), (672, ' else if(bank == BANK_1)'), (673, ' {'), (675, ' enc28j60SetBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_BSEL0);'), (676, ' enc28j60ClearBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_BSEL1);'), (677, ' }'), (678, ' else if(bank == BANK_2)'), (679, ' {'), (681, ' enc28j60ClearBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_BSEL0);'), (682, ' enc28j60SetBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_BSEL1);'), (683, ' }'), (684, ' else'), (685, ' {'), (687, ' enc28j60SetBit(interface, ENC28J60_ECON1, ENC28J60_ECON1_BSEL1 |'), (688, ' ENC28J60_ECON1_BSEL0);'), (770, ' enc28j60WriteReg(interface, ENC28J60_MIREGADR, address & REG_ADDR_MASK);'), (773, ' enc28j60WriteReg(interface, ENC28J60_MIWRL, LSB(data));'), (775, ' enc28j60WriteReg(interface, ENC28J60_MIWRH, MSB(data));'), (778, ' while((enc28j60ReadReg(interface, ENC28J60_MISTAT) & ENC28J60_MISTAT_BUSY) != 0)'), (796, ' enc28j60WriteReg(interface, ENC28J60_MIREGADR, address & REG_ADDR_MASK);'), (799, ' enc28j60WriteReg(interface, ENC28J60_MICMD, ENC28J60_MICMD_MIIRD);'), (801, ' while((enc28j60ReadReg(interface, ENC28J60_MISTAT) & ENC28J60_MISTAT_BUSY) != 0)'), (806, ' enc28j60WriteReg(interface, ENC28J60_MICMD, 0);'), (809, ' data = enc28j60ReadReg(interface, ENC28J60_MIRDL);'), (811, ' data |= enc28j60ReadReg(interface, ENC28J60_MIRDH) << 8;')], 'deleted': [(9, ' * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.0'), (35, '#include <limits.h>'), (108, ' revisionId = enc28j60ReadReg(interface, ENC28J60_REG_EREVID);'), (114, ' enc28j60WriteReg(interface, ENC28J60_REG_ECOCON, 0x00);'), (117, ' enc28j60WriteReg(interface, ENC28J60_REG_MAADR1, interface->macAddr.b[0]);'), (118, ' enc28j60WriteReg(interface, ENC28J60_REG_MAADR2, interface->macAddr.b[1]);'), (119, ' enc28j60WriteReg(interface, ENC28J60_REG_MAADR3, interface->macAddr.b[2]);'), (120, ' enc28j60WriteReg(interface, ENC28J60_REG_MAADR4, interface->macAddr.b[3]);'), (121, ' enc28j60WriteReg(interface, ENC28J60_REG_MAADR5, interface->macAddr.b[4]);'), (122, ' enc28j60WriteReg(interface, ENC28J60_REG_MAADR6, interface->macAddr.b[5]);'), (125, ' enc28j60WriteReg(interface, ENC28J60_REG_ERXSTL, LSB(ENC28J60_RX_BUFFER_START));'), (126, ' enc28j60WriteReg(interface, ENC28J60_REG_ERXSTH, MSB(ENC28J60_RX_BUFFER_START));'), (127, ' enc28j60WriteReg(interface, ENC28J60_REG_ERXNDL, LSB(ENC28J60_RX_BUFFER_STOP));'), (128, ' enc28j60WriteReg(interface, ENC28J60_REG_ERXNDH, MSB(ENC28J60_RX_BUFFER_STOP));'), (130, ' //The ERXRDPT register defines a location within the FIFO'), (131, ' //where the receive hardware is forbidden to write to'), (132, ' enc28j60WriteReg(interface, ENC28J60_REG_ERXRDPTL, LSB(ENC28J60_RX_BUFFER_STOP));'), (133, ' enc28j60WriteReg(interface, ENC28J60_REG_ERXRDPTH, MSB(ENC28J60_RX_BUFFER_STOP));'), (136, ' enc28j60WriteReg(interface, ENC28J60_REG_ERXFCON, ERXFCON_UCEN |'), (137, ' ERXFCON_CRCEN | ERXFCON_HTEN | ERXFCON_BCEN);'), (140, ' enc28j60WriteReg(interface, ENC28J60_REG_EHT0, 0x00);'), (141, ' enc28j60WriteReg(interface, ENC28J60_REG_EHT1, 0x00);'), (142, ' enc28j60WriteReg(interface, ENC28J60_REG_EHT2, 0x00);'), (143, ' enc28j60WriteReg(interface, ENC28J60_REG_EHT3, 0x00);'), (144, ' enc28j60WriteReg(interface, ENC28J60_REG_EHT4, 0x00);'), (145, ' enc28j60WriteReg(interface, ENC28J60_REG_EHT5, 0x00);'), (146, ' enc28j60WriteReg(interface, ENC28J60_REG_EHT6, 0x00);'), (147, ' enc28j60WriteReg(interface, ENC28J60_REG_EHT7, 0x00);'), (150, ' enc28j60WriteReg(interface, ENC28J60_REG_MACON2, 0x00);'), (153, ' enc28j60WriteReg(interface, ENC28J60_REG_MACON1,'), (154, ' MACON1_TXPAUS | MACON1_RXPAUS | MACON1_MARXEN);'), (156, ' //Enable automatic padding to at least 60 bytes, always append a valid CRC'), (157, ' //and check frame length. MAC can operate in half-duplex or full-duplex mode'), (159, ' enc28j60WriteReg(interface, ENC28J60_REG_MACON3, MACON3_PADCFG(1) |'), (160, ' MACON3_TXCRCEN | MACON3_FRMLNEN | MACON3_FULDPX);'), (162, ' enc28j60WriteReg(interface, ENC28J60_REG_MACON3, MACON3_PADCFG(1) |'), (163, ' MACON3_TXCRCEN | MACON3_FRMLNEN);'), (168, ' enc28j60WriteReg(interface, ENC28J60_REG_MACON4, MACON4_DEFER);'), (171, ' enc28j60WriteReg(interface, ENC28J60_REG_MAMXFLL, LSB(ETH_MAX_FRAME_SIZE));'), (172, ' enc28j60WriteReg(interface, ENC28J60_REG_MAMXFLH, MSB(ETH_MAX_FRAME_SIZE));'), (176, ' enc28j60WriteReg(interface, ENC28J60_REG_MABBIPG, 0x15);'), (178, ' enc28j60WriteReg(interface, ENC28J60_REG_MABBIPG, 0x12);'), (182, ' enc28j60WriteReg(interface, ENC28J60_REG_MAIPGL, 0x12);'), (183, ' enc28j60WriteReg(interface, ENC28J60_REG_MAIPGH, 0x0C);'), (186, ' enc28j60WriteReg(interface, ENC28J60_REG_MACLCON2, 63);'), (190, ' enc28j60WritePhyReg(interface, ENC28J60_PHY_REG_PHCON1, PHCON1_PDPXMD);'), (192, ' enc28j60WritePhyReg(interface, ENC28J60_PHY_REG_PHCON1, 0x0000);'), (196, ' enc28j60WritePhyReg(interface, ENC28J60_PHY_REG_PHCON2, PHCON2_HDLDIS);'), (199, ' enc28j60WritePhyReg(interface, ENC28J60_PHY_REG_PHLCON,'), (200, ' PHLCON_LACFG(4) | PHLCON_LBCFG(7) | PHLCON_LFRQ(0) | PHLCON_STRCH);'), (203, ' enc28j60WriteReg(interface, ENC28J60_REG_EIR, 0x00);'), (206, ' enc28j60WriteReg(interface, ENC28J60_REG_EIE, EIE_INTIE |'), (207, ' EIE_PKTIE | EIE_LINKIE | EIE_TXIE | EIE_TXERIE);'), (210, ' enc28j60WritePhyReg(interface, ENC28J60_PHY_REG_PHIE,'), (211, ' PHIE_PLNKIE | PHIE_PGEIE);'), (214, ' enc28j60SetBit(interface, ENC28J60_REG_ECON1, ECON1_RXEN);'), (282, ' enc28j60ClearBit(interface, ENC28J60_REG_EIE, EIE_INTIE);'), (285, ' status = enc28j60ReadReg(interface, ENC28J60_REG_EIR);'), (288, ' if((status & EIR_LINKIF) != 0)'), (291, ' enc28j60ClearBit(interface, ENC28J60_REG_EIE, EIE_LINKIE);'), (300, ' if((status & EIR_PKTIF) != 0)'), (303, ' enc28j60ClearBit(interface, ENC28J60_REG_EIE, EIE_PKTIE);'), (312, ' if((status & (EIR_TXIF | EIE_TXERIE)) != 0)'), (315, ' enc28j60ClearBit(interface, ENC28J60_REG_EIR, EIR_TXIF | EIE_TXERIE);'), (323, ' enc28j60SetBit(interface, ENC28J60_REG_EIE, EIE_INTIE);'), (342, ' status = enc28j60ReadReg(interface, ENC28J60_REG_EIR);'), (345, ' if((status & EIR_LINKIF) != 0)'), (348, ' enc28j60ReadPhyReg(interface, ENC28J60_PHY_REG_PHIR);'), (350, ' enc28j60ClearBit(interface, ENC28J60_REG_EIR, EIR_LINKIF);'), (352, ' value = enc28j60ReadPhyReg(interface, ENC28J60_PHY_REG_PHSTAT2);'), (355, ' if((value & PHSTAT2_LSTAT) != 0)'), (381, ' if((status & EIR_PKTIF) != 0)'), (384, ' enc28j60ClearBit(interface, ENC28J60_REG_EIR, EIR_PKTIF);'), (397, ' enc28j60SetBit(interface, ENC28J60_REG_EIE, EIE_LINKIE | EIE_PKTIE);'), (439, ' enc28j60SetBit(interface, ENC28J60_REG_ECON1, ECON1_TXRST);'), (440, ' enc28j60ClearBit(interface, ENC28J60_REG_ECON1, ECON1_TXRST);'), (443, ' enc28j60ClearBit(interface, ENC28J60_REG_EIR, EIR_TXIF | EIR_TXERIF);'), (446, ' enc28j60WriteReg(interface, ENC28J60_REG_ETXSTL, LSB(ENC28J60_TX_BUFFER_START));'), (447, ' enc28j60WriteReg(interface, ENC28J60_REG_ETXSTH, MSB(ENC28J60_TX_BUFFER_START));'), (450, ' enc28j60WriteReg(interface, ENC28J60_REG_EWRPTL, LSB(ENC28J60_TX_BUFFER_START));'), (451, ' enc28j60WriteReg(interface, ENC28J60_REG_EWRPTH, MSB(ENC28J60_TX_BUFFER_START));'), (457, ' enc28j60WriteReg(interface, ENC28J60_REG_ETXNDL, LSB(ENC28J60_TX_BUFFER_START + length));'), (458, ' enc28j60WriteReg(interface, ENC28J60_REG_ETXNDH, MSB(ENC28J60_TX_BUFFER_START + length));'), (461, ' enc28j60SetBit(interface, ENC28J60_REG_ECON1, ECON1_TXRTS);'), (477, ' uint16_t n;'), (485, ' if(enc28j60ReadReg(interface, ENC28J60_REG_EPKTCNT))'), (488, ' enc28j60WriteReg(interface, ENC28J60_REG_ERDPTL, LSB(context->nextPacket));'), (489, ' enc28j60WriteReg(interface, ENC28J60_REG_ERDPTH, MSB(context->nextPacket));'), (491, ' //Read the first two bytes, which are the address of the next packet'), (492, ' enc28j60ReadBuffer(interface, (uint8_t *) &context->nextPacket, sizeof(uint16_t));'), (493, ' //Get the length of the received frame in bytes'), (494, ' enc28j60ReadBuffer(interface, (uint8_t *) &n, sizeof(uint16_t));'), (495, ' //Read the receive status vector (RSV)'), (496, ' enc28j60ReadBuffer(interface, (uint8_t *) &status, sizeof(uint16_t));'), (499, ' if((status & RSV_RECEIVED_OK) != 0)'), (502, ' n = MIN(n, ETH_MAX_FRAME_SIZE);'), (504, ' enc28j60ReadBuffer(interface, context->rxBuffer, n);'), (518, ' enc28j60WriteReg(interface, ENC28J60_REG_ERXRDPTL, LSB(ENC28J60_RX_BUFFER_STOP));'), (519, ' enc28j60WriteReg(interface, ENC28J60_REG_ERXRDPTH, MSB(ENC28J60_RX_BUFFER_STOP));'), (523, ' enc28j60WriteReg(interface, ENC28J60_REG_ERXRDPTL, LSB(context->nextPacket - 1));'), (524, ' enc28j60WriteReg(interface, ENC28J60_REG_ERXRDPTH, MSB(context->nextPacket - 1));'), (528, ' enc28j60SetBit(interface, ENC28J60_REG_ECON2, ECON2_PKTDEC);'), (545, ' nicProcessPacket(interface, context->rxBuffer, n, &ancillary);'), (593, ' enc28j60WriteReg(interface, ENC28J60_REG_EHT0, hashTable[0]);'), (594, ' enc28j60WriteReg(interface, ENC28J60_REG_EHT1, hashTable[1]);'), (595, ' enc28j60WriteReg(interface, ENC28J60_REG_EHT2, hashTable[2]);'), (596, ' enc28j60WriteReg(interface, ENC28J60_REG_EHT3, hashTable[3]);'), (597, ' enc28j60WriteReg(interface, ENC28J60_REG_EHT4, hashTable[4]);'), (598, ' enc28j60WriteReg(interface, ENC28J60_REG_EHT5, hashTable[5]);'), (599, ' enc28j60WriteReg(interface, ENC28J60_REG_EHT6, hashTable[6]);'), (600, ' enc28j60WriteReg(interface, ENC28J60_REG_EHT7, hashTable[7]);'), (603, ' TRACE_DEBUG(" EHT0 = %02" PRIX8 "\\r\\n", enc28j60ReadReg(interface, ENC28J60_REG_EHT0));'), (604, ' TRACE_DEBUG(" EHT1 = %02" PRIX8 "\\r\\n", enc28j60ReadReg(interface, ENC28J60_REG_EHT1));'), (605, ' TRACE_DEBUG(" EHT2 = %02" PRIX8 "\\r\\n", enc28j60ReadReg(interface, ENC28J60_REG_EHT2));'), (606, ' TRACE_DEBUG(" EHT3 = %02" PRIX8 "\\r\\n", enc28j60ReadReg(interface, ENC28J60_REG_EHT3));'), (607, ' TRACE_DEBUG(" EHT0 = %02" PRIX8 "\\r\\n", enc28j60ReadReg(interface, ENC28J60_REG_EHT4));'), (608, ' TRACE_DEBUG(" EHT1 = %02" PRIX8 "\\r\\n", enc28j60ReadReg(interface, ENC28J60_REG_EHT5));'), (609, ' TRACE_DEBUG(" EHT2 = %02" PRIX8 "\\r\\n", enc28j60ReadReg(interface, ENC28J60_REG_EHT6));'), (610, ' TRACE_DEBUG(" EHT3 = %02" PRIX8 "\\r\\n", enc28j60ReadReg(interface, ENC28J60_REG_EHT7));'), (655, ' //Select specified bank'), (656, ' switch(bank)'), (658, ' case BANK_0:'), (660, ' enc28j60ClearBit(interface, ENC28J60_REG_ECON1, ECON1_BSEL1 | ECON1_BSEL0);'), (661, ' break;'), (662, ' case BANK_1:'), (664, ' enc28j60SetBit(interface, ENC28J60_REG_ECON1, ECON1_BSEL0);'), (665, ' enc28j60ClearBit(interface, ENC28J60_REG_ECON1, ECON1_BSEL1);'), (666, ' break;'), (667, ' case BANK_2:'), (669, ' enc28j60ClearBit(interface, ENC28J60_REG_ECON1, ECON1_BSEL0);'), (670, ' enc28j60SetBit(interface, ENC28J60_REG_ECON1, ECON1_BSEL1);'), (671, ' break;'), (672, ' case BANK_3:'), (674, ' enc28j60SetBit(interface, ENC28J60_REG_ECON1, ECON1_BSEL1 | ECON1_BSEL0);'), (675, ' break;'), (676, ' default:'), (677, ' //Invalid bank'), (678, ' break;'), (760, ' enc28j60WriteReg(interface, ENC28J60_REG_MIREGADR, address & REG_ADDR_MASK);'), (763, ' enc28j60WriteReg(interface, ENC28J60_REG_MIWRL, LSB(data));'), (765, ' enc28j60WriteReg(interface, ENC28J60_REG_MIWRH, MSB(data));'), (768, ' while((enc28j60ReadReg(interface, ENC28J60_REG_MISTAT) & MISTAT_BUSY) != 0)'), (786, ' enc28j60WriteReg(interface, ENC28J60_REG_MIREGADR, address & REG_ADDR_MASK);'), (789, ' enc28j60WriteReg(interface, ENC28J60_REG_MICMD, MICMD_MIIRD);'), (791, ' while((enc28j60ReadReg(interface, ENC28J60_REG_MISTAT) & MISTAT_BUSY) != 0)'), (796, ' enc28j60WriteReg(interface, ENC28J60_REG_MICMD, 0);'), (799, ' data = enc28j60ReadReg(interface, ENC28J60_REG_MIRDL);'), (801, ' data |= enc28j60ReadReg(interface, ENC28J60_REG_MIRDH) << 8;')]}
159
149
492
2,981
30
138
6
https://github.com/Oryx-Embedded/CycloneTCP
CVE-2021-26788
CWE-20
3,273
g2meet.c
C
kempf_decode_tile
/* * Go2Webinar decoder * Copyright (c) 2012 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Go2Webinar decoder */ #include <zlib.h> #include "libavutil/intreadwrite.h" #include "avcodec.h" #include "bytestream.h" #include "dsputil.h" #include "get_bits.h" #include "internal.h" #include "mjpeg.h" enum ChunkType { FRAME_INFO = 0xC8, TILE_DATA, CURSOR_POS, CURSOR_SHAPE, CHUNK_CC, CHUNK_CD }; enum Compression { COMPR_EPIC_J_B = 2, COMPR_KEMPF_J_B, }; static const uint8_t luma_quant[64] = { 8, 6, 5, 8, 12, 20, 26, 31, 6, 6, 7, 10, 13, 29, 30, 28, 7, 7, 8, 12, 20, 29, 35, 28, 7, 9, 11, 15, 26, 44, 40, 31, 9, 11, 19, 28, 34, 55, 52, 39, 12, 18, 28, 32, 41, 52, 57, 46, 25, 32, 39, 44, 52, 61, 60, 51, 36, 46, 48, 49, 56, 50, 52, 50 }; static const uint8_t chroma_quant[64] = { 9, 9, 12, 24, 50, 50, 50, 50, 9, 11, 13, 33, 50, 50, 50, 50, 12, 13, 28, 50, 50, 50, 50, 50, 24, 33, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, }; typedef struct JPGContext { DSPContext dsp; ScanTable scantable; VLC dc_vlc[2], ac_vlc[2]; int prev_dc[3]; DECLARE_ALIGNED(16, int16_t, block)[6][64]; uint8_t *buf; } JPGContext; typedef struct G2MContext { JPGContext jc; int version; int compression; int width, height, bpp; int tile_width, tile_height; int tiles_x, tiles_y, tile_x, tile_y; int got_header; uint8_t *framebuf; int framebuf_stride, old_width, old_height; uint8_t *synth_tile, *jpeg_tile; int tile_stride, old_tile_w, old_tile_h; uint8_t *kempf_buf, *kempf_flags; uint8_t *cursor; int cursor_stride; int cursor_fmt; int cursor_w, cursor_h, cursor_x, cursor_y; int cursor_hot_x, cursor_hot_y; } G2MContext; static av_cold int build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int nb_codes, int is_ac) { uint8_t huff_size[256] = { 0 }; uint16_t huff_code[256]; uint16_t huff_sym[256]; int i; ff_mjpeg_build_huffman_codes(huff_size, huff_code, bits_table, val_table); for (i = 0; i < 256; i++) huff_sym[i] = i + 16 * is_ac; if (is_ac) huff_sym[0] = 16 * 256; return ff_init_vlc_sparse(vlc, 9, nb_codes, huff_size, 1, 1, huff_code, 2, 2, huff_sym, 2, 2, 0); } static av_cold int jpg_init(AVCodecContext *avctx, JPGContext *c) { int ret; ret = build_vlc(&c->dc_vlc[0], avpriv_mjpeg_bits_dc_luminance, avpriv_mjpeg_val_dc, 12, 0); if (ret) return ret; ret = build_vlc(&c->dc_vlc[1], avpriv_mjpeg_bits_dc_chrominance, avpriv_mjpeg_val_dc, 12, 0); if (ret) return ret; ret = build_vlc(&c->ac_vlc[0], avpriv_mjpeg_bits_ac_luminance, avpriv_mjpeg_val_ac_luminance, 251, 1); if (ret) return ret; ret = build_vlc(&c->ac_vlc[1], avpriv_mjpeg_bits_ac_chrominance, avpriv_mjpeg_val_ac_chrominance, 251, 1); if (ret) return ret; ff_dsputil_init(&c->dsp, avctx); ff_init_scantable(c->dsp.idct_permutation, &c->scantable, ff_zigzag_direct); return 0; } static av_cold void jpg_free_context(JPGContext *ctx) { int i; for (i = 0; i < 2; i++) { ff_free_vlc(&ctx->dc_vlc[i]); ff_free_vlc(&ctx->ac_vlc[i]); } av_freep(&ctx->buf); } static void jpg_unescape(const uint8_t *src, int src_size, uint8_t *dst, int *dst_size) { const uint8_t *src_end = src + src_size; uint8_t *dst_start = dst; while (src < src_end) { uint8_t x = *src++; *dst++ = x; if (x == 0xFF && !*src) src++; } *dst_size = dst - dst_start; } static int jpg_decode_block(JPGContext *c, GetBitContext *gb, int plane, int16_t *block) { int dc, val, pos; const int is_chroma = !!plane; const uint8_t *qmat = is_chroma ? chroma_quant : luma_quant; c->dsp.clear_block(block); dc = get_vlc2(gb, c->dc_vlc[is_chroma].table, 9, 3); if (dc < 0) return AVERROR_INVALIDDATA; if (dc) dc = get_xbits(gb, dc); dc = dc * qmat[0] + c->prev_dc[plane]; block[0] = dc; c->prev_dc[plane] = dc; pos = 0; while (pos < 63) { val = get_vlc2(gb, c->ac_vlc[is_chroma].table, 9, 3); if (val < 0) return AVERROR_INVALIDDATA; pos += val >> 4; val &= 0xF; if (pos > 63) return val ? AVERROR_INVALIDDATA : 0; if (val) { int nbits = val; val = get_xbits(gb, nbits); val *= qmat[ff_zigzag_direct[pos]]; block[c->scantable.permutated[pos]] = val; } } return 0; } static inline void yuv2rgb(uint8_t *out, int Y, int U, int V) { out[0] = av_clip_uint8(Y + ( 91881 * V + 32768 >> 16)); out[1] = av_clip_uint8(Y + (-22554 * U - 46802 * V + 32768 >> 16)); out[2] = av_clip_uint8(Y + (116130 * U + 32768 >> 16)); } static int jpg_decode_data(JPGContext *c, int width, int height, const uint8_t *src, int src_size, uint8_t *dst, int dst_stride, const uint8_t *mask, int mask_stride, int num_mbs, int swapuv) { GetBitContext gb; uint8_t *tmp; int mb_w, mb_h, mb_x, mb_y, i, j; int bx, by; int unesc_size; int ret; tmp = av_realloc(c->buf, src_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!tmp) return AVERROR(ENOMEM); c->buf = tmp; jpg_unescape(src, src_size, c->buf, &unesc_size); memset(c->buf + unesc_size, 0, FF_INPUT_BUFFER_PADDING_SIZE); init_get_bits(&gb, c->buf, unesc_size * 8); width = FFALIGN(width, 16); mb_w = width >> 4; mb_h = (height + 15) >> 4; if (!num_mbs) num_mbs = mb_w * mb_h; for (i = 0; i < 3; i++) c->prev_dc[i] = 1024; bx = by = 0; for (mb_y = 0; mb_y < mb_h; mb_y++) { for (mb_x = 0; mb_x < mb_w; mb_x++) { if (mask && !mask[mb_x]) { bx += 16; continue; } for (j = 0; j < 2; j++) { for (i = 0; i < 2; i++) { if ((ret = jpg_decode_block(c, &gb, 0, c->block[i + j * 2])) != 0) return ret; c->dsp.idct(c->block[i + j * 2]); } } for (i = 1; i < 3; i++) { if ((ret = jpg_decode_block(c, &gb, i, c->block[i + 3])) != 0) return ret; c->dsp.idct(c->block[i + 3]); } for (j = 0; j < 16; j++) { uint8_t *out = dst + bx * 3 + (by + j) * dst_stride; for (i = 0; i < 16; i++) { int Y, U, V; Y = c->block[(j >> 3) * 2 + (i >> 3)][(i & 7) + (j & 7) * 8]; U = c->block[4 ^ swapuv][(i >> 1) + (j >> 1) * 8] - 128; V = c->block[5 ^ swapuv][(i >> 1) + (j >> 1) * 8] - 128; yuv2rgb(out + i * 3, Y, U, V); } } if (!--num_mbs) return 0; bx += 16; } bx = 0; by += 16; if (mask) mask += mask_stride; } return 0; } static void kempf_restore_buf(const uint8_t *src, int len, uint8_t *dst, int stride, const uint8_t *jpeg_tile, int tile_stride, int width, int height, const uint8_t *pal, int npal, int tidx) { GetBitContext gb; int i, j, nb, col; init_get_bits(&gb, src, len * 8); if (npal <= 2) nb = 1; else if (npal <= 4) nb = 2; else if (npal <= 16) nb = 4; else nb = 8; for (j = 0; j < height; j++, dst += stride, jpeg_tile += tile_stride) { if (get_bits(&gb, 8)) continue; for (i = 0; i < width; i++) { col = get_bits(&gb, nb); if (col != tidx) memcpy(dst + i * 3, pal + col * 3, 3); else memcpy(dst + i * 3, jpeg_tile + i * 3, 3); } } } static int kempf_decode_tile(G2MContext *c, int tile_x, int tile_y, const uint8_t *src, int src_size) { int width, height; int hdr, zsize, npal, tidx = -1, ret; int i, j; const uint8_t *src_end = src + src_size; uint8_t pal[768], transp[3]; uLongf dlen = (c->tile_width + 1) * c->tile_height; int sub_type; int nblocks, cblocks, bstride; int bits, bitbuf, coded; uint8_t *dst = c->framebuf + tile_x * c->tile_width * 3 + tile_y * c->tile_height * c->framebuf_stride; if (src_size < 2) return AVERROR_INVALIDDATA; width = FFMIN(c->width - tile_x * c->tile_width, c->tile_width); height = FFMIN(c->height - tile_y * c->tile_height, c->tile_height); hdr = *src++; sub_type = hdr >> 5; if (sub_type == 0) { int j; memcpy(transp, src, 3); src += 3; for (j = 0; j < height; j++, dst += c->framebuf_stride) for (i = 0; i < width; i++) memcpy(dst + i * 3, transp, 3); return 0; } else if (sub_type == 1) { return jpg_decode_data(&c->jc, width, height, src, src_end - src, dst, c->framebuf_stride, NULL, 0, 0, 0); } if (sub_type != 2) { memcpy(transp, src, 3); src += 3; } npal = *src++ + 1; memcpy(pal, src, npal * 3); src += npal * 3; if (sub_type != 2) { for (i = 0; i < npal; i++) { if (!memcmp(pal + i * 3, transp, 3)) { tidx = i; break; } } } if (src_end - src < 2) return 0; zsize = (src[0] << 8) | src[1]; src += 2; if (src_end - src < zsize) return AVERROR_INVALIDDATA; ret = uncompress(c->kempf_buf, &dlen, src, zsize); if (ret) return AVERROR_INVALIDDATA; src += zsize; if (sub_type == 2) { kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride, NULL, 0, width, height, pal, npal, tidx); return 0; } nblocks = *src++ + 1; cblocks = 0; bstride = FFALIGN(width, 16) >> 4; // blocks are coded LSB and we need normal bitreader for JPEG data bits = 0; for (i = 0; i < (FFALIGN(height, 16) >> 4); i++) { for (j = 0; j < (FFALIGN(width, 16) >> 4); j++) { if (!bits) { bitbuf = *src++; bits = 8; } coded = bitbuf & 1; bits--; bitbuf >>= 1; cblocks += coded; if (cblocks > nblocks) return AVERROR_INVALIDDATA; c->kempf_flags[j + i * bstride] = coded; } } memset(c->jpeg_tile, 0, c->tile_stride * height); jpg_decode_data(&c->jc, width, height, src, src_end - src, c->jpeg_tile, c->tile_stride, c->kempf_flags, bstride, nblocks, 0); kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride, c->jpeg_tile, c->tile_stride, width, height, pal, npal, tidx); return 0; } static int g2m_init_buffers(G2MContext *c) { int aligned_height; if (!c->framebuf || c->old_width < c->width || c->old_height < c->height) { c->framebuf_stride = FFALIGN(c->width * 3, 16); aligned_height = FFALIGN(c->height, 16); av_free(c->framebuf); c->framebuf = av_mallocz(c->framebuf_stride * aligned_height); if (!c->framebuf) return AVERROR(ENOMEM); } if (!c->synth_tile || !c->jpeg_tile || c->old_tile_w < c->tile_width || c->old_tile_h < c->tile_height) { c->tile_stride = FFALIGN(c->tile_width * 3, 16); aligned_height = FFALIGN(c->tile_height, 16); av_free(c->synth_tile); av_free(c->jpeg_tile); av_free(c->kempf_buf); av_free(c->kempf_flags); c->synth_tile = av_mallocz(c->tile_stride * aligned_height); c->jpeg_tile = av_mallocz(c->tile_stride * aligned_height); c->kempf_buf = av_mallocz((c->tile_width + 1) * aligned_height + FF_INPUT_BUFFER_PADDING_SIZE); c->kempf_flags = av_mallocz( c->tile_width * aligned_height); if (!c->synth_tile || !c->jpeg_tile || !c->kempf_buf || !c->kempf_flags) return AVERROR(ENOMEM); } return 0; } static int g2m_load_cursor(AVCodecContext *avctx, G2MContext *c, GetByteContext *gb) { int i, j, k; uint8_t *dst; uint32_t bits; uint32_t cur_size, cursor_w, cursor_h, cursor_stride; uint32_t cursor_hot_x, cursor_hot_y; int cursor_fmt; uint8_t *tmp; cur_size = bytestream2_get_be32(gb); cursor_w = bytestream2_get_byte(gb); cursor_h = bytestream2_get_byte(gb); cursor_hot_x = bytestream2_get_byte(gb); cursor_hot_y = bytestream2_get_byte(gb); cursor_fmt = bytestream2_get_byte(gb); cursor_stride = cursor_w * 4; if (cursor_w < 1 || cursor_w > 256 || cursor_h < 1 || cursor_h > 256) { av_log(avctx, AV_LOG_ERROR, "Invalid cursor dimensions %dx%d\n", cursor_w, cursor_h); return AVERROR_INVALIDDATA; } if (cursor_hot_x > cursor_w || cursor_hot_y > cursor_h) { av_log(avctx, AV_LOG_WARNING, "Invalid hotspot position %d,%d\n", cursor_hot_x, cursor_hot_y); cursor_hot_x = FFMIN(cursor_hot_x, cursor_w - 1); cursor_hot_y = FFMIN(cursor_hot_y, cursor_h - 1); } if (cur_size - 9 > bytestream2_get_bytes_left(gb) || c->cursor_w * c->cursor_h / 4 > cur_size) { av_log(avctx, AV_LOG_ERROR, "Invalid cursor data size %d/%d\n", cur_size, bytestream2_get_bytes_left(gb)); return AVERROR_INVALIDDATA; } if (cursor_fmt != 1 && cursor_fmt != 32) { avpriv_report_missing_feature(avctx, "Cursor format %d", cursor_fmt); return AVERROR_PATCHWELCOME; } if (cursor_fmt == 1 && cursor_w % 32) { avpriv_report_missing_feature(avctx, "odd monochrome cursor width %d", cursor_w); return AVERROR_PATCHWELCOME; } tmp = av_realloc(c->cursor, cursor_stride * cursor_h); if (!tmp) { av_log(avctx, AV_LOG_ERROR, "Cannot allocate cursor buffer\n"); return AVERROR(ENOMEM); } c->cursor = tmp; c->cursor_w = cursor_w; c->cursor_h = cursor_h; c->cursor_hot_x = cursor_hot_x; c->cursor_hot_y = cursor_hot_y; c->cursor_fmt = cursor_fmt; c->cursor_stride = cursor_stride; dst = c->cursor; switch (c->cursor_fmt) { case 1: // old monochrome for (j = 0; j < c->cursor_h; j++) { for (i = 0; i < c->cursor_w; i += 32) { bits = bytestream2_get_be32(gb); for (k = 0; k < 32; k++) { dst[0] = !!(bits & 0x80000000); dst += 4; bits <<= 1; } } } dst = c->cursor; for (j = 0; j < c->cursor_h; j++) { for (i = 0; i < c->cursor_w; i += 32) { bits = bytestream2_get_be32(gb); for (k = 0; k < 32; k++) { int mask_bit = !!(bits & 0x80000000); switch (dst[0] * 2 + mask_bit) { case 0: dst[0] = 0xFF; dst[1] = 0x00; dst[2] = 0x00; dst[3] = 0x00; break; case 1: dst[0] = 0xFF; dst[1] = 0xFF; dst[2] = 0xFF; dst[3] = 0xFF; break; default: dst[0] = 0x00; dst[1] = 0x00; dst[2] = 0x00; dst[3] = 0x00; } dst += 4; bits <<= 1; } } } break; case 32: // full colour /* skip monochrome version of the cursor and decode RGBA instead */ bytestream2_skip(gb, c->cursor_h * (FFALIGN(c->cursor_w, 32) >> 3)); for (j = 0; j < c->cursor_h; j++) { for (i = 0; i < c->cursor_w; i++) { int val = bytestream2_get_be32(gb); *dst++ = val >> 0; *dst++ = val >> 8; *dst++ = val >> 16; *dst++ = val >> 24; } } break; default: return AVERROR_PATCHWELCOME; } return 0; } #define APPLY_ALPHA(src, new, alpha) \ src = (src * (256 - alpha) + new * alpha) >> 8 static void g2m_paint_cursor(G2MContext *c, uint8_t *dst, int stride) { int i, j; int x, y, w, h; const uint8_t *cursor; if (!c->cursor) return; x = c->cursor_x - c->cursor_hot_x; y = c->cursor_y - c->cursor_hot_y; cursor = c->cursor; w = c->cursor_w; h = c->cursor_h; if (x + w > c->width) w = c->width - x; if (y + h > c->height) h = c->height - y; if (x < 0) { w += x; cursor += -x * 4; } else { dst += x * 3; } if (y < 0) { h += y; cursor += -y * c->cursor_stride; } else { dst += y * stride; } if (w < 0 || h < 0) return; for (j = 0; j < h; j++) { for (i = 0; i < w; i++) { uint8_t alpha = cursor[i * 4]; APPLY_ALPHA(dst[i * 3 + 0], cursor[i * 4 + 1], alpha); APPLY_ALPHA(dst[i * 3 + 1], cursor[i * 4 + 2], alpha); APPLY_ALPHA(dst[i * 3 + 2], cursor[i * 4 + 3], alpha); } dst += stride; cursor += c->cursor_stride; } } static int g2m_decode_frame(AVCodecContext *avctx, void *data, int *got_picture_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; G2MContext *c = avctx->priv_data; AVFrame *pic = data; GetByteContext bc, tbc; int magic; int got_header = 0; uint32_t chunk_size; int chunk_type; int i; int ret; if (buf_size < 12) { av_log(avctx, AV_LOG_ERROR, "Frame should have at least 12 bytes, got %d instead\n", buf_size); return AVERROR_INVALIDDATA; } bytestream2_init(&bc, buf, buf_size); magic = bytestream2_get_be32(&bc); if ((magic & ~0xF) != MKBETAG('G', '2', 'M', '0') || (magic & 0xF) < 2 || (magic & 0xF) > 4) { av_log(avctx, AV_LOG_ERROR, "Wrong magic %08X\n", magic); return AVERROR_INVALIDDATA; } if ((magic & 0xF) != 4) { av_log(avctx, AV_LOG_ERROR, "G2M2 and G2M3 are not yet supported\n"); return AVERROR(ENOSYS); } while (bytestream2_get_bytes_left(&bc) > 5) { chunk_size = bytestream2_get_le32(&bc) - 1; chunk_type = bytestream2_get_byte(&bc); if (chunk_size > bytestream2_get_bytes_left(&bc)) { av_log(avctx, AV_LOG_ERROR, "Invalid chunk size %d type %02X\n", chunk_size, chunk_type); break; } switch (chunk_type) { case FRAME_INFO: c->got_header = 0; if (chunk_size < 21) { av_log(avctx, AV_LOG_ERROR, "Invalid frame info size %d\n", chunk_size); break; } c->width = bytestream2_get_be32(&bc); c->height = bytestream2_get_be32(&bc); if (c->width < 16 || c->width > avctx->width || c->height < 16 || c->height > avctx->height) { av_log(avctx, AV_LOG_ERROR, "Invalid frame dimensions %dx%d\n", c->width, c->height); ret = AVERROR_INVALIDDATA; goto header_fail; } if (c->width != avctx->width || c->height != avctx->height) avcodec_set_dimensions(avctx, c->width, c->height); c->compression = bytestream2_get_be32(&bc); if (c->compression != 2 && c->compression != 3) { av_log(avctx, AV_LOG_ERROR, "Unknown compression method %d\n", c->compression); return AVERROR_PATCHWELCOME; } c->tile_width = bytestream2_get_be32(&bc); c->tile_height = bytestream2_get_be32(&bc); if (!c->tile_width || !c->tile_height) { av_log(avctx, AV_LOG_ERROR, "Invalid tile dimensions %dx%d\n", c->tile_width, c->tile_height); ret = AVERROR_INVALIDDATA; goto header_fail; } c->tiles_x = (c->width + c->tile_width - 1) / c->tile_width; c->tiles_y = (c->height + c->tile_height - 1) / c->tile_height; c->bpp = bytestream2_get_byte(&bc); chunk_size -= 21; bytestream2_skip(&bc, chunk_size); if (g2m_init_buffers(c)) { ret = AVERROR(ENOMEM); goto header_fail; } got_header = 1; break; case TILE_DATA: if (!c->tiles_x || !c->tiles_y) { av_log(avctx, AV_LOG_WARNING, "No frame header - skipping tile\n"); bytestream2_skip(&bc, bytestream2_get_bytes_left(&bc)); break; } if (chunk_size < 2) { av_log(avctx, AV_LOG_ERROR, "Invalid tile data size %d\n", chunk_size); break; } c->tile_x = bytestream2_get_byte(&bc); c->tile_y = bytestream2_get_byte(&bc); if (c->tile_x >= c->tiles_x || c->tile_y >= c->tiles_y) { av_log(avctx, AV_LOG_ERROR, "Invalid tile pos %d,%d (in %dx%d grid)\n", c->tile_x, c->tile_y, c->tiles_x, c->tiles_y); break; } chunk_size -= 2; ret = 0; switch (c->compression) { case COMPR_EPIC_J_B: av_log(avctx, AV_LOG_ERROR, "ePIC j-b compression is not implemented yet\n"); return AVERROR(ENOSYS); case COMPR_KEMPF_J_B: ret = kempf_decode_tile(c, c->tile_x, c->tile_y, buf + bytestream2_tell(&bc), chunk_size); break; } if (ret && c->framebuf) av_log(avctx, AV_LOG_ERROR, "Error decoding tile %d,%d\n", c->tile_x, c->tile_y); bytestream2_skip(&bc, chunk_size); break; case CURSOR_POS: if (chunk_size < 5) { av_log(avctx, AV_LOG_ERROR, "Invalid cursor pos size %d\n", chunk_size); break; } c->cursor_x = bytestream2_get_be16(&bc); c->cursor_y = bytestream2_get_be16(&bc); bytestream2_skip(&bc, chunk_size - 4); break; case CURSOR_SHAPE: if (chunk_size < 8) { av_log(avctx, AV_LOG_ERROR, "Invalid cursor data size %d\n", chunk_size); break; } bytestream2_init(&tbc, buf + bytestream2_tell(&bc), chunk_size - 4); g2m_load_cursor(avctx, c, &tbc); bytestream2_skip(&bc, chunk_size); break; case CHUNK_CC: case CHUNK_CD: bytestream2_skip(&bc, chunk_size); break; default: av_log(avctx, AV_LOG_WARNING, "Skipping chunk type %02X\n", chunk_type); bytestream2_skip(&bc, chunk_size); } } if (got_header) c->got_header = 1; if (c->width && c->height && c->framebuf) { if ((ret = ff_get_buffer(avctx, pic, 0)) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return ret; } pic->key_frame = got_header; pic->pict_type = got_header ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; for (i = 0; i < avctx->height; i++) memcpy(pic->data[0] + i * pic->linesize[0], c->framebuf + i * c->framebuf_stride, c->width * 3); g2m_paint_cursor(c, pic->data[0], pic->linesize[0]); *got_picture_ptr = 1; } return buf_size; header_fail: c->width = c->height = 0; c->tiles_x = c->tiles_y = 0; return ret; } static av_cold int g2m_decode_init(AVCodecContext *avctx) { G2MContext * const c = avctx->priv_data; int ret; if ((ret = jpg_init(avctx, &c->jc)) != 0) { av_log(avctx, AV_LOG_ERROR, "Cannot initialise VLCs\n"); jpg_free_context(&c->jc); return AVERROR(ENOMEM); } avctx->pix_fmt = AV_PIX_FMT_RGB24; return 0; } static av_cold int g2m_decode_end(AVCodecContext *avctx) { G2MContext * const c = avctx->priv_data; jpg_free_context(&c->jc); av_freep(&c->kempf_buf); av_freep(&c->kempf_flags); av_freep(&c->synth_tile); av_freep(&c->jpeg_tile); av_freep(&c->cursor); av_freep(&c->framebuf); return 0; } AVCodec ff_g2m_decoder = { .name = "g2m", .long_name = NULL_IF_CONFIG_SMALL("Go2Meeting"), .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_G2M, .priv_data_size = sizeof(G2MContext), .init = g2m_decode_init, .close = g2m_decode_end, .decode = g2m_decode_frame, .capabilities = CODEC_CAP_DR1, };
/* * Go2Webinar decoder * Copyright (c) 2012 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Go2Webinar decoder */ #include <zlib.h> #include "libavutil/intreadwrite.h" #include "avcodec.h" #include "bytestream.h" #include "dsputil.h" #include "get_bits.h" #include "internal.h" #include "mjpeg.h" enum ChunkType { FRAME_INFO = 0xC8, TILE_DATA, CURSOR_POS, CURSOR_SHAPE, CHUNK_CC, CHUNK_CD }; enum Compression { COMPR_EPIC_J_B = 2, COMPR_KEMPF_J_B, }; static const uint8_t luma_quant[64] = { 8, 6, 5, 8, 12, 20, 26, 31, 6, 6, 7, 10, 13, 29, 30, 28, 7, 7, 8, 12, 20, 29, 35, 28, 7, 9, 11, 15, 26, 44, 40, 31, 9, 11, 19, 28, 34, 55, 52, 39, 12, 18, 28, 32, 41, 52, 57, 46, 25, 32, 39, 44, 52, 61, 60, 51, 36, 46, 48, 49, 56, 50, 52, 50 }; static const uint8_t chroma_quant[64] = { 9, 9, 12, 24, 50, 50, 50, 50, 9, 11, 13, 33, 50, 50, 50, 50, 12, 13, 28, 50, 50, 50, 50, 50, 24, 33, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, }; typedef struct JPGContext { DSPContext dsp; ScanTable scantable; VLC dc_vlc[2], ac_vlc[2]; int prev_dc[3]; DECLARE_ALIGNED(16, int16_t, block)[6][64]; uint8_t *buf; } JPGContext; typedef struct G2MContext { JPGContext jc; int version; int compression; int width, height, bpp; int tile_width, tile_height; int tiles_x, tiles_y, tile_x, tile_y; int got_header; uint8_t *framebuf; int framebuf_stride, old_width, old_height; uint8_t *synth_tile, *jpeg_tile; int tile_stride, old_tile_w, old_tile_h; uint8_t *kempf_buf, *kempf_flags; uint8_t *cursor; int cursor_stride; int cursor_fmt; int cursor_w, cursor_h, cursor_x, cursor_y; int cursor_hot_x, cursor_hot_y; } G2MContext; static av_cold int build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int nb_codes, int is_ac) { uint8_t huff_size[256] = { 0 }; uint16_t huff_code[256]; uint16_t huff_sym[256]; int i; ff_mjpeg_build_huffman_codes(huff_size, huff_code, bits_table, val_table); for (i = 0; i < 256; i++) huff_sym[i] = i + 16 * is_ac; if (is_ac) huff_sym[0] = 16 * 256; return ff_init_vlc_sparse(vlc, 9, nb_codes, huff_size, 1, 1, huff_code, 2, 2, huff_sym, 2, 2, 0); } static av_cold int jpg_init(AVCodecContext *avctx, JPGContext *c) { int ret; ret = build_vlc(&c->dc_vlc[0], avpriv_mjpeg_bits_dc_luminance, avpriv_mjpeg_val_dc, 12, 0); if (ret) return ret; ret = build_vlc(&c->dc_vlc[1], avpriv_mjpeg_bits_dc_chrominance, avpriv_mjpeg_val_dc, 12, 0); if (ret) return ret; ret = build_vlc(&c->ac_vlc[0], avpriv_mjpeg_bits_ac_luminance, avpriv_mjpeg_val_ac_luminance, 251, 1); if (ret) return ret; ret = build_vlc(&c->ac_vlc[1], avpriv_mjpeg_bits_ac_chrominance, avpriv_mjpeg_val_ac_chrominance, 251, 1); if (ret) return ret; ff_dsputil_init(&c->dsp, avctx); ff_init_scantable(c->dsp.idct_permutation, &c->scantable, ff_zigzag_direct); return 0; } static av_cold void jpg_free_context(JPGContext *ctx) { int i; for (i = 0; i < 2; i++) { ff_free_vlc(&ctx->dc_vlc[i]); ff_free_vlc(&ctx->ac_vlc[i]); } av_freep(&ctx->buf); } static void jpg_unescape(const uint8_t *src, int src_size, uint8_t *dst, int *dst_size) { const uint8_t *src_end = src + src_size; uint8_t *dst_start = dst; while (src < src_end) { uint8_t x = *src++; *dst++ = x; if (x == 0xFF && !*src) src++; } *dst_size = dst - dst_start; } static int jpg_decode_block(JPGContext *c, GetBitContext *gb, int plane, int16_t *block) { int dc, val, pos; const int is_chroma = !!plane; const uint8_t *qmat = is_chroma ? chroma_quant : luma_quant; c->dsp.clear_block(block); dc = get_vlc2(gb, c->dc_vlc[is_chroma].table, 9, 3); if (dc < 0) return AVERROR_INVALIDDATA; if (dc) dc = get_xbits(gb, dc); dc = dc * qmat[0] + c->prev_dc[plane]; block[0] = dc; c->prev_dc[plane] = dc; pos = 0; while (pos < 63) { val = get_vlc2(gb, c->ac_vlc[is_chroma].table, 9, 3); if (val < 0) return AVERROR_INVALIDDATA; pos += val >> 4; val &= 0xF; if (pos > 63) return val ? AVERROR_INVALIDDATA : 0; if (val) { int nbits = val; val = get_xbits(gb, nbits); val *= qmat[ff_zigzag_direct[pos]]; block[c->scantable.permutated[pos]] = val; } } return 0; } static inline void yuv2rgb(uint8_t *out, int Y, int U, int V) { out[0] = av_clip_uint8(Y + ( 91881 * V + 32768 >> 16)); out[1] = av_clip_uint8(Y + (-22554 * U - 46802 * V + 32768 >> 16)); out[2] = av_clip_uint8(Y + (116130 * U + 32768 >> 16)); } static int jpg_decode_data(JPGContext *c, int width, int height, const uint8_t *src, int src_size, uint8_t *dst, int dst_stride, const uint8_t *mask, int mask_stride, int num_mbs, int swapuv) { GetBitContext gb; uint8_t *tmp; int mb_w, mb_h, mb_x, mb_y, i, j; int bx, by; int unesc_size; int ret; tmp = av_realloc(c->buf, src_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!tmp) return AVERROR(ENOMEM); c->buf = tmp; jpg_unescape(src, src_size, c->buf, &unesc_size); memset(c->buf + unesc_size, 0, FF_INPUT_BUFFER_PADDING_SIZE); init_get_bits(&gb, c->buf, unesc_size * 8); width = FFALIGN(width, 16); mb_w = width >> 4; mb_h = (height + 15) >> 4; if (!num_mbs) num_mbs = mb_w * mb_h; for (i = 0; i < 3; i++) c->prev_dc[i] = 1024; bx = by = 0; for (mb_y = 0; mb_y < mb_h; mb_y++) { for (mb_x = 0; mb_x < mb_w; mb_x++) { if (mask && !mask[mb_x]) { bx += 16; continue; } for (j = 0; j < 2; j++) { for (i = 0; i < 2; i++) { if ((ret = jpg_decode_block(c, &gb, 0, c->block[i + j * 2])) != 0) return ret; c->dsp.idct(c->block[i + j * 2]); } } for (i = 1; i < 3; i++) { if ((ret = jpg_decode_block(c, &gb, i, c->block[i + 3])) != 0) return ret; c->dsp.idct(c->block[i + 3]); } for (j = 0; j < 16; j++) { uint8_t *out = dst + bx * 3 + (by + j) * dst_stride; for (i = 0; i < 16; i++) { int Y, U, V; Y = c->block[(j >> 3) * 2 + (i >> 3)][(i & 7) + (j & 7) * 8]; U = c->block[4 ^ swapuv][(i >> 1) + (j >> 1) * 8] - 128; V = c->block[5 ^ swapuv][(i >> 1) + (j >> 1) * 8] - 128; yuv2rgb(out + i * 3, Y, U, V); } } if (!--num_mbs) return 0; bx += 16; } bx = 0; by += 16; if (mask) mask += mask_stride; } return 0; } static void kempf_restore_buf(const uint8_t *src, int len, uint8_t *dst, int stride, const uint8_t *jpeg_tile, int tile_stride, int width, int height, const uint8_t *pal, int npal, int tidx) { GetBitContext gb; int i, j, nb, col; init_get_bits(&gb, src, len * 8); if (npal <= 2) nb = 1; else if (npal <= 4) nb = 2; else if (npal <= 16) nb = 4; else nb = 8; for (j = 0; j < height; j++, dst += stride, jpeg_tile += tile_stride) { if (get_bits(&gb, 8)) continue; for (i = 0; i < width; i++) { col = get_bits(&gb, nb); if (col != tidx) memcpy(dst + i * 3, pal + col * 3, 3); else memcpy(dst + i * 3, jpeg_tile + i * 3, 3); } } } static int kempf_decode_tile(G2MContext *c, int tile_x, int tile_y, const uint8_t *src, int src_size) { int width, height; int hdr, zsize, npal, tidx = -1, ret; int i, j; const uint8_t *src_end = src + src_size; uint8_t pal[768], transp[3]; uLongf dlen = (c->tile_width + 1) * c->tile_height; int sub_type; int nblocks, cblocks, bstride; int bits, bitbuf, coded; uint8_t *dst = c->framebuf + tile_x * c->tile_width * 3 + tile_y * c->tile_height * c->framebuf_stride; if (src_size < 2) return AVERROR_INVALIDDATA; width = FFMIN(c->width - tile_x * c->tile_width, c->tile_width); height = FFMIN(c->height - tile_y * c->tile_height, c->tile_height); hdr = *src++; sub_type = hdr >> 5; if (sub_type == 0) { int j; memcpy(transp, src, 3); src += 3; for (j = 0; j < height; j++, dst += c->framebuf_stride) for (i = 0; i < width; i++) memcpy(dst + i * 3, transp, 3); return 0; } else if (sub_type == 1) { return jpg_decode_data(&c->jc, width, height, src, src_end - src, dst, c->framebuf_stride, NULL, 0, 0, 0); } if (sub_type != 2) { memcpy(transp, src, 3); src += 3; } npal = *src++ + 1; memcpy(pal, src, npal * 3); src += npal * 3; if (sub_type != 2) { for (i = 0; i < npal; i++) { if (!memcmp(pal + i * 3, transp, 3)) { tidx = i; break; } } } if (src_end - src < 2) return 0; zsize = (src[0] << 8) | src[1]; src += 2; if (src_end - src < zsize + (sub_type != 2)) return AVERROR_INVALIDDATA; ret = uncompress(c->kempf_buf, &dlen, src, zsize); if (ret) return AVERROR_INVALIDDATA; src += zsize; if (sub_type == 2) { kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride, NULL, 0, width, height, pal, npal, tidx); return 0; } nblocks = *src++ + 1; cblocks = 0; bstride = FFALIGN(width, 16) >> 4; // blocks are coded LSB and we need normal bitreader for JPEG data bits = 0; for (i = 0; i < (FFALIGN(height, 16) >> 4); i++) { for (j = 0; j < (FFALIGN(width, 16) >> 4); j++) { if (!bits) { if (src >= src_end) return AVERROR_INVALIDDATA; bitbuf = *src++; bits = 8; } coded = bitbuf & 1; bits--; bitbuf >>= 1; cblocks += coded; if (cblocks > nblocks) return AVERROR_INVALIDDATA; c->kempf_flags[j + i * bstride] = coded; } } memset(c->jpeg_tile, 0, c->tile_stride * height); jpg_decode_data(&c->jc, width, height, src, src_end - src, c->jpeg_tile, c->tile_stride, c->kempf_flags, bstride, nblocks, 0); kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride, c->jpeg_tile, c->tile_stride, width, height, pal, npal, tidx); return 0; } static int g2m_init_buffers(G2MContext *c) { int aligned_height; if (!c->framebuf || c->old_width < c->width || c->old_height < c->height) { c->framebuf_stride = FFALIGN(c->width * 3, 16); aligned_height = FFALIGN(c->height, 16); av_free(c->framebuf); c->framebuf = av_mallocz(c->framebuf_stride * aligned_height); if (!c->framebuf) return AVERROR(ENOMEM); } if (!c->synth_tile || !c->jpeg_tile || c->old_tile_w < c->tile_width || c->old_tile_h < c->tile_height) { c->tile_stride = FFALIGN(c->tile_width * 3, 16); aligned_height = FFALIGN(c->tile_height, 16); av_free(c->synth_tile); av_free(c->jpeg_tile); av_free(c->kempf_buf); av_free(c->kempf_flags); c->synth_tile = av_mallocz(c->tile_stride * aligned_height); c->jpeg_tile = av_mallocz(c->tile_stride * aligned_height); c->kempf_buf = av_mallocz((c->tile_width + 1) * aligned_height + FF_INPUT_BUFFER_PADDING_SIZE); c->kempf_flags = av_mallocz( c->tile_width * aligned_height); if (!c->synth_tile || !c->jpeg_tile || !c->kempf_buf || !c->kempf_flags) return AVERROR(ENOMEM); } return 0; } static int g2m_load_cursor(AVCodecContext *avctx, G2MContext *c, GetByteContext *gb) { int i, j, k; uint8_t *dst; uint32_t bits; uint32_t cur_size, cursor_w, cursor_h, cursor_stride; uint32_t cursor_hot_x, cursor_hot_y; int cursor_fmt; uint8_t *tmp; cur_size = bytestream2_get_be32(gb); cursor_w = bytestream2_get_byte(gb); cursor_h = bytestream2_get_byte(gb); cursor_hot_x = bytestream2_get_byte(gb); cursor_hot_y = bytestream2_get_byte(gb); cursor_fmt = bytestream2_get_byte(gb); cursor_stride = cursor_w * 4; if (cursor_w < 1 || cursor_w > 256 || cursor_h < 1 || cursor_h > 256) { av_log(avctx, AV_LOG_ERROR, "Invalid cursor dimensions %dx%d\n", cursor_w, cursor_h); return AVERROR_INVALIDDATA; } if (cursor_hot_x > cursor_w || cursor_hot_y > cursor_h) { av_log(avctx, AV_LOG_WARNING, "Invalid hotspot position %d,%d\n", cursor_hot_x, cursor_hot_y); cursor_hot_x = FFMIN(cursor_hot_x, cursor_w - 1); cursor_hot_y = FFMIN(cursor_hot_y, cursor_h - 1); } if (cur_size - 9 > bytestream2_get_bytes_left(gb) || c->cursor_w * c->cursor_h / 4 > cur_size) { av_log(avctx, AV_LOG_ERROR, "Invalid cursor data size %d/%d\n", cur_size, bytestream2_get_bytes_left(gb)); return AVERROR_INVALIDDATA; } if (cursor_fmt != 1 && cursor_fmt != 32) { avpriv_report_missing_feature(avctx, "Cursor format %d", cursor_fmt); return AVERROR_PATCHWELCOME; } if (cursor_fmt == 1 && cursor_w % 32) { avpriv_report_missing_feature(avctx, "odd monochrome cursor width %d", cursor_w); return AVERROR_PATCHWELCOME; } tmp = av_realloc(c->cursor, cursor_stride * cursor_h); if (!tmp) { av_log(avctx, AV_LOG_ERROR, "Cannot allocate cursor buffer\n"); return AVERROR(ENOMEM); } c->cursor = tmp; c->cursor_w = cursor_w; c->cursor_h = cursor_h; c->cursor_hot_x = cursor_hot_x; c->cursor_hot_y = cursor_hot_y; c->cursor_fmt = cursor_fmt; c->cursor_stride = cursor_stride; dst = c->cursor; switch (c->cursor_fmt) { case 1: // old monochrome for (j = 0; j < c->cursor_h; j++) { for (i = 0; i < c->cursor_w; i += 32) { bits = bytestream2_get_be32(gb); for (k = 0; k < 32; k++) { dst[0] = !!(bits & 0x80000000); dst += 4; bits <<= 1; } } } dst = c->cursor; for (j = 0; j < c->cursor_h; j++) { for (i = 0; i < c->cursor_w; i += 32) { bits = bytestream2_get_be32(gb); for (k = 0; k < 32; k++) { int mask_bit = !!(bits & 0x80000000); switch (dst[0] * 2 + mask_bit) { case 0: dst[0] = 0xFF; dst[1] = 0x00; dst[2] = 0x00; dst[3] = 0x00; break; case 1: dst[0] = 0xFF; dst[1] = 0xFF; dst[2] = 0xFF; dst[3] = 0xFF; break; default: dst[0] = 0x00; dst[1] = 0x00; dst[2] = 0x00; dst[3] = 0x00; } dst += 4; bits <<= 1; } } } break; case 32: // full colour /* skip monochrome version of the cursor and decode RGBA instead */ bytestream2_skip(gb, c->cursor_h * (FFALIGN(c->cursor_w, 32) >> 3)); for (j = 0; j < c->cursor_h; j++) { for (i = 0; i < c->cursor_w; i++) { int val = bytestream2_get_be32(gb); *dst++ = val >> 0; *dst++ = val >> 8; *dst++ = val >> 16; *dst++ = val >> 24; } } break; default: return AVERROR_PATCHWELCOME; } return 0; } #define APPLY_ALPHA(src, new, alpha) \ src = (src * (256 - alpha) + new * alpha) >> 8 static void g2m_paint_cursor(G2MContext *c, uint8_t *dst, int stride) { int i, j; int x, y, w, h; const uint8_t *cursor; if (!c->cursor) return; x = c->cursor_x - c->cursor_hot_x; y = c->cursor_y - c->cursor_hot_y; cursor = c->cursor; w = c->cursor_w; h = c->cursor_h; if (x + w > c->width) w = c->width - x; if (y + h > c->height) h = c->height - y; if (x < 0) { w += x; cursor += -x * 4; } else { dst += x * 3; } if (y < 0) { h += y; cursor += -y * c->cursor_stride; } else { dst += y * stride; } if (w < 0 || h < 0) return; for (j = 0; j < h; j++) { for (i = 0; i < w; i++) { uint8_t alpha = cursor[i * 4]; APPLY_ALPHA(dst[i * 3 + 0], cursor[i * 4 + 1], alpha); APPLY_ALPHA(dst[i * 3 + 1], cursor[i * 4 + 2], alpha); APPLY_ALPHA(dst[i * 3 + 2], cursor[i * 4 + 3], alpha); } dst += stride; cursor += c->cursor_stride; } } static int g2m_decode_frame(AVCodecContext *avctx, void *data, int *got_picture_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; G2MContext *c = avctx->priv_data; AVFrame *pic = data; GetByteContext bc, tbc; int magic; int got_header = 0; uint32_t chunk_size; int chunk_type; int i; int ret; if (buf_size < 12) { av_log(avctx, AV_LOG_ERROR, "Frame should have at least 12 bytes, got %d instead\n", buf_size); return AVERROR_INVALIDDATA; } bytestream2_init(&bc, buf, buf_size); magic = bytestream2_get_be32(&bc); if ((magic & ~0xF) != MKBETAG('G', '2', 'M', '0') || (magic & 0xF) < 2 || (magic & 0xF) > 4) { av_log(avctx, AV_LOG_ERROR, "Wrong magic %08X\n", magic); return AVERROR_INVALIDDATA; } if ((magic & 0xF) != 4) { av_log(avctx, AV_LOG_ERROR, "G2M2 and G2M3 are not yet supported\n"); return AVERROR(ENOSYS); } while (bytestream2_get_bytes_left(&bc) > 5) { chunk_size = bytestream2_get_le32(&bc) - 1; chunk_type = bytestream2_get_byte(&bc); if (chunk_size > bytestream2_get_bytes_left(&bc)) { av_log(avctx, AV_LOG_ERROR, "Invalid chunk size %d type %02X\n", chunk_size, chunk_type); break; } switch (chunk_type) { case FRAME_INFO: c->got_header = 0; if (chunk_size < 21) { av_log(avctx, AV_LOG_ERROR, "Invalid frame info size %d\n", chunk_size); break; } c->width = bytestream2_get_be32(&bc); c->height = bytestream2_get_be32(&bc); if (c->width < 16 || c->width > avctx->width || c->height < 16 || c->height > avctx->height) { av_log(avctx, AV_LOG_ERROR, "Invalid frame dimensions %dx%d\n", c->width, c->height); ret = AVERROR_INVALIDDATA; goto header_fail; } if (c->width != avctx->width || c->height != avctx->height) avcodec_set_dimensions(avctx, c->width, c->height); c->compression = bytestream2_get_be32(&bc); if (c->compression != 2 && c->compression != 3) { av_log(avctx, AV_LOG_ERROR, "Unknown compression method %d\n", c->compression); return AVERROR_PATCHWELCOME; } c->tile_width = bytestream2_get_be32(&bc); c->tile_height = bytestream2_get_be32(&bc); if (!c->tile_width || !c->tile_height) { av_log(avctx, AV_LOG_ERROR, "Invalid tile dimensions %dx%d\n", c->tile_width, c->tile_height); ret = AVERROR_INVALIDDATA; goto header_fail; } c->tiles_x = (c->width + c->tile_width - 1) / c->tile_width; c->tiles_y = (c->height + c->tile_height - 1) / c->tile_height; c->bpp = bytestream2_get_byte(&bc); chunk_size -= 21; bytestream2_skip(&bc, chunk_size); if (g2m_init_buffers(c)) { ret = AVERROR(ENOMEM); goto header_fail; } got_header = 1; break; case TILE_DATA: if (!c->tiles_x || !c->tiles_y) { av_log(avctx, AV_LOG_WARNING, "No frame header - skipping tile\n"); bytestream2_skip(&bc, bytestream2_get_bytes_left(&bc)); break; } if (chunk_size < 2) { av_log(avctx, AV_LOG_ERROR, "Invalid tile data size %d\n", chunk_size); break; } c->tile_x = bytestream2_get_byte(&bc); c->tile_y = bytestream2_get_byte(&bc); if (c->tile_x >= c->tiles_x || c->tile_y >= c->tiles_y) { av_log(avctx, AV_LOG_ERROR, "Invalid tile pos %d,%d (in %dx%d grid)\n", c->tile_x, c->tile_y, c->tiles_x, c->tiles_y); break; } chunk_size -= 2; ret = 0; switch (c->compression) { case COMPR_EPIC_J_B: av_log(avctx, AV_LOG_ERROR, "ePIC j-b compression is not implemented yet\n"); return AVERROR(ENOSYS); case COMPR_KEMPF_J_B: ret = kempf_decode_tile(c, c->tile_x, c->tile_y, buf + bytestream2_tell(&bc), chunk_size); break; } if (ret && c->framebuf) av_log(avctx, AV_LOG_ERROR, "Error decoding tile %d,%d\n", c->tile_x, c->tile_y); bytestream2_skip(&bc, chunk_size); break; case CURSOR_POS: if (chunk_size < 5) { av_log(avctx, AV_LOG_ERROR, "Invalid cursor pos size %d\n", chunk_size); break; } c->cursor_x = bytestream2_get_be16(&bc); c->cursor_y = bytestream2_get_be16(&bc); bytestream2_skip(&bc, chunk_size - 4); break; case CURSOR_SHAPE: if (chunk_size < 8) { av_log(avctx, AV_LOG_ERROR, "Invalid cursor data size %d\n", chunk_size); break; } bytestream2_init(&tbc, buf + bytestream2_tell(&bc), chunk_size - 4); g2m_load_cursor(avctx, c, &tbc); bytestream2_skip(&bc, chunk_size); break; case CHUNK_CC: case CHUNK_CD: bytestream2_skip(&bc, chunk_size); break; default: av_log(avctx, AV_LOG_WARNING, "Skipping chunk type %02X\n", chunk_type); bytestream2_skip(&bc, chunk_size); } } if (got_header) c->got_header = 1; if (c->width && c->height && c->framebuf) { if ((ret = ff_get_buffer(avctx, pic, 0)) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return ret; } pic->key_frame = got_header; pic->pict_type = got_header ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; for (i = 0; i < avctx->height; i++) memcpy(pic->data[0] + i * pic->linesize[0], c->framebuf + i * c->framebuf_stride, c->width * 3); g2m_paint_cursor(c, pic->data[0], pic->linesize[0]); *got_picture_ptr = 1; } return buf_size; header_fail: c->width = c->height = 0; c->tiles_x = c->tiles_y = 0; return ret; } static av_cold int g2m_decode_init(AVCodecContext *avctx) { G2MContext * const c = avctx->priv_data; int ret; if ((ret = jpg_init(avctx, &c->jc)) != 0) { av_log(avctx, AV_LOG_ERROR, "Cannot initialise VLCs\n"); jpg_free_context(&c->jc); return AVERROR(ENOMEM); } avctx->pix_fmt = AV_PIX_FMT_RGB24; return 0; } static av_cold int g2m_decode_end(AVCodecContext *avctx) { G2MContext * const c = avctx->priv_data; jpg_free_context(&c->jc); av_freep(&c->kempf_buf); av_freep(&c->kempf_flags); av_freep(&c->synth_tile); av_freep(&c->jpeg_tile); av_freep(&c->cursor); av_freep(&c->framebuf); return 0; } AVCodec ff_g2m_decoder = { .name = "g2m", .long_name = NULL_IF_CONFIG_SMALL("Go2Meeting"), .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_G2M, .priv_data_size = sizeof(G2MContext), .init = g2m_decode_init, .close = g2m_decode_end, .decode = g2m_decode_frame, .capabilities = CODEC_CAP_DR1, };
static int kempf_decode_tile(G2MContext *c, int tile_x, int tile_y, const uint8_t *src, int src_size) { int width, height; int hdr, zsize, npal, tidx = -1, ret; int i, j; const uint8_t *src_end = src + src_size; uint8_t pal[768], transp[3]; uLongf dlen = (c->tile_width + 1) * c->tile_height; int sub_type; int nblocks, cblocks, bstride; int bits, bitbuf, coded; uint8_t *dst = c->framebuf + tile_x * c->tile_width * 3 + tile_y * c->tile_height * c->framebuf_stride; if (src_size < 2) return AVERROR_INVALIDDATA; width = FFMIN(c->width - tile_x * c->tile_width, c->tile_width); height = FFMIN(c->height - tile_y * c->tile_height, c->tile_height); hdr = *src++; sub_type = hdr >> 5; if (sub_type == 0) { int j; memcpy(transp, src, 3); src += 3; for (j = 0; j < height; j++, dst += c->framebuf_stride) for (i = 0; i < width; i++) memcpy(dst + i * 3, transp, 3); return 0; } else if (sub_type == 1) { return jpg_decode_data(&c->jc, width, height, src, src_end - src, dst, c->framebuf_stride, NULL, 0, 0, 0); } if (sub_type != 2) { memcpy(transp, src, 3); src += 3; } npal = *src++ + 1; memcpy(pal, src, npal * 3); src += npal * 3; if (sub_type != 2) { for (i = 0; i < npal; i++) { if (!memcmp(pal + i * 3, transp, 3)) { tidx = i; break; } } } if (src_end - src < 2) return 0; zsize = (src[0] << 8) | src[1]; src += 2; if (src_end - src < zsize) return AVERROR_INVALIDDATA; ret = uncompress(c->kempf_buf, &dlen, src, zsize); if (ret) return AVERROR_INVALIDDATA; src += zsize; if (sub_type == 2) { kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride, NULL, 0, width, height, pal, npal, tidx); return 0; } nblocks = *src++ + 1; cblocks = 0; bstride = FFALIGN(width, 16) >> 4; // blocks are coded LSB and we need normal bitreader for JPEG data bits = 0; for (i = 0; i < (FFALIGN(height, 16) >> 4); i++) { for (j = 0; j < (FFALIGN(width, 16) >> 4); j++) { if (!bits) { bitbuf = *src++; bits = 8; } coded = bitbuf & 1; bits--; bitbuf >>= 1; cblocks += coded; if (cblocks > nblocks) return AVERROR_INVALIDDATA; c->kempf_flags[j + i * bstride] = coded; } } memset(c->jpeg_tile, 0, c->tile_stride * height); jpg_decode_data(&c->jc, width, height, src, src_end - src, c->jpeg_tile, c->tile_stride, c->kempf_flags, bstride, nblocks, 0); kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride, c->jpeg_tile, c->tile_stride, width, height, pal, npal, tidx); return 0; }
static int kempf_decode_tile(G2MContext *c, int tile_x, int tile_y, const uint8_t *src, int src_size) { int width, height; int hdr, zsize, npal, tidx = -1, ret; int i, j; const uint8_t *src_end = src + src_size; uint8_t pal[768], transp[3]; uLongf dlen = (c->tile_width + 1) * c->tile_height; int sub_type; int nblocks, cblocks, bstride; int bits, bitbuf, coded; uint8_t *dst = c->framebuf + tile_x * c->tile_width * 3 + tile_y * c->tile_height * c->framebuf_stride; if (src_size < 2) return AVERROR_INVALIDDATA; width = FFMIN(c->width - tile_x * c->tile_width, c->tile_width); height = FFMIN(c->height - tile_y * c->tile_height, c->tile_height); hdr = *src++; sub_type = hdr >> 5; if (sub_type == 0) { int j; memcpy(transp, src, 3); src += 3; for (j = 0; j < height; j++, dst += c->framebuf_stride) for (i = 0; i < width; i++) memcpy(dst + i * 3, transp, 3); return 0; } else if (sub_type == 1) { return jpg_decode_data(&c->jc, width, height, src, src_end - src, dst, c->framebuf_stride, NULL, 0, 0, 0); } if (sub_type != 2) { memcpy(transp, src, 3); src += 3; } npal = *src++ + 1; memcpy(pal, src, npal * 3); src += npal * 3; if (sub_type != 2) { for (i = 0; i < npal; i++) { if (!memcmp(pal + i * 3, transp, 3)) { tidx = i; break; } } } if (src_end - src < 2) return 0; zsize = (src[0] << 8) | src[1]; src += 2; if (src_end - src < zsize + (sub_type != 2)) return AVERROR_INVALIDDATA; ret = uncompress(c->kempf_buf, &dlen, src, zsize); if (ret) return AVERROR_INVALIDDATA; src += zsize; if (sub_type == 2) { kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride, NULL, 0, width, height, pal, npal, tidx); return 0; } nblocks = *src++ + 1; cblocks = 0; bstride = FFALIGN(width, 16) >> 4; // blocks are coded LSB and we need normal bitreader for JPEG data bits = 0; for (i = 0; i < (FFALIGN(height, 16) >> 4); i++) { for (j = 0; j < (FFALIGN(width, 16) >> 4); j++) { if (!bits) { if (src >= src_end) return AVERROR_INVALIDDATA; bitbuf = *src++; bits = 8; } coded = bitbuf & 1; bits--; bitbuf >>= 1; cblocks += coded; if (cblocks > nblocks) return AVERROR_INVALIDDATA; c->kempf_flags[j + i * bstride] = coded; } } memset(c->jpeg_tile, 0, c->tile_stride * height); jpg_decode_data(&c->jc, width, height, src, src_end - src, c->jpeg_tile, c->tile_stride, c->kempf_flags, bstride, nblocks, 0); kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride, c->jpeg_tile, c->tile_stride, width, height, pal, npal, tidx); return 0; }
{'added': [(392, ' if (src_end - src < zsize + (sub_type != 2))'), (414, ' if (src >= src_end)'), (415, ' return AVERROR_INVALIDDATA;')], 'deleted': [(392, ' if (src_end - src < zsize)')]}
3
1
745
5,456
88
722
18
https://github.com/FFmpeg/FFmpeg
CVE-2013-4264
CWE-119
2,057
dma.c
C
dma_rx
/* Broadcom B43 wireless driver DMA ringbuffer and descriptor allocation/management Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de> Some code in this file is derived from the b44.c driver Copyright (C) 2002 David S. Miller Copyright (C) Pekka Pietikainen This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "b43.h" #include "dma.h" #include "main.h" #include "debugfs.h" #include "xmit.h" #include <linux/dma-mapping.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/skbuff.h> #include <linux/etherdevice.h> #include <linux/slab.h> #include <asm/div64.h> /* Required number of TX DMA slots per TX frame. * This currently is 2, because we put the header and the ieee80211 frame * into separate slots. */ #define TX_SLOTS_PER_FRAME 2 /* 32bit DMA ops. */ static struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring, int slot, struct b43_dmadesc_meta **meta) { struct b43_dmadesc32 *desc; *meta = &(ring->meta[slot]); desc = ring->descbase; desc = &(desc[slot]); return (struct b43_dmadesc_generic *)desc; } static void op32_fill_descriptor(struct b43_dmaring *ring, struct b43_dmadesc_generic *desc, dma_addr_t dmaaddr, u16 bufsize, int start, int end, int irq) { struct b43_dmadesc32 *descbase = ring->descbase; int slot; u32 ctl; u32 addr; u32 addrext; slot = (int)(&(desc->dma32) - descbase); B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK); addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK) >> SSB_DMA_TRANSLATION_SHIFT; addr |= ssb_dma_translation(ring->dev->dev); ctl = bufsize & B43_DMA32_DCTL_BYTECNT; if (slot == ring->nr_slots - 1) ctl |= B43_DMA32_DCTL_DTABLEEND; if (start) ctl |= B43_DMA32_DCTL_FRAMESTART; if (end) ctl |= B43_DMA32_DCTL_FRAMEEND; if (irq) ctl |= B43_DMA32_DCTL_IRQ; ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT) & B43_DMA32_DCTL_ADDREXT_MASK; desc->dma32.control = cpu_to_le32(ctl); desc->dma32.address = cpu_to_le32(addr); } static void op32_poke_tx(struct b43_dmaring *ring, int slot) { b43_dma_write(ring, B43_DMA32_TXINDEX, (u32) (slot * sizeof(struct b43_dmadesc32))); } static void op32_tx_suspend(struct b43_dmaring *ring) { b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL) | B43_DMA32_TXSUSPEND); } static void op32_tx_resume(struct b43_dmaring *ring) { b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL) & ~B43_DMA32_TXSUSPEND); } static int op32_get_current_rxslot(struct b43_dmaring *ring) { u32 val; val = b43_dma_read(ring, B43_DMA32_RXSTATUS); val &= B43_DMA32_RXDPTR; return (val / sizeof(struct b43_dmadesc32)); } static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot) { b43_dma_write(ring, B43_DMA32_RXINDEX, (u32) (slot * sizeof(struct b43_dmadesc32))); } static const struct b43_dma_ops dma32_ops = { .idx2desc = op32_idx2desc, .fill_descriptor = op32_fill_descriptor, .poke_tx = op32_poke_tx, .tx_suspend = op32_tx_suspend, .tx_resume = op32_tx_resume, .get_current_rxslot = op32_get_current_rxslot, .set_current_rxslot = op32_set_current_rxslot, }; /* 64bit DMA ops. */ static struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring, int slot, struct b43_dmadesc_meta **meta) { struct b43_dmadesc64 *desc; *meta = &(ring->meta[slot]); desc = ring->descbase; desc = &(desc[slot]); return (struct b43_dmadesc_generic *)desc; } static void op64_fill_descriptor(struct b43_dmaring *ring, struct b43_dmadesc_generic *desc, dma_addr_t dmaaddr, u16 bufsize, int start, int end, int irq) { struct b43_dmadesc64 *descbase = ring->descbase; int slot; u32 ctl0 = 0, ctl1 = 0; u32 addrlo, addrhi; u32 addrext; slot = (int)(&(desc->dma64) - descbase); B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); addrlo = (u32) (dmaaddr & 0xFFFFFFFF); addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK); addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK) >> SSB_DMA_TRANSLATION_SHIFT; addrhi |= (ssb_dma_translation(ring->dev->dev) << 1); if (slot == ring->nr_slots - 1) ctl0 |= B43_DMA64_DCTL0_DTABLEEND; if (start) ctl0 |= B43_DMA64_DCTL0_FRAMESTART; if (end) ctl0 |= B43_DMA64_DCTL0_FRAMEEND; if (irq) ctl0 |= B43_DMA64_DCTL0_IRQ; ctl1 |= bufsize & B43_DMA64_DCTL1_BYTECNT; ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) & B43_DMA64_DCTL1_ADDREXT_MASK; desc->dma64.control0 = cpu_to_le32(ctl0); desc->dma64.control1 = cpu_to_le32(ctl1); desc->dma64.address_low = cpu_to_le32(addrlo); desc->dma64.address_high = cpu_to_le32(addrhi); } static void op64_poke_tx(struct b43_dmaring *ring, int slot) { b43_dma_write(ring, B43_DMA64_TXINDEX, (u32) (slot * sizeof(struct b43_dmadesc64))); } static void op64_tx_suspend(struct b43_dmaring *ring) { b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL) | B43_DMA64_TXSUSPEND); } static void op64_tx_resume(struct b43_dmaring *ring) { b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL) & ~B43_DMA64_TXSUSPEND); } static int op64_get_current_rxslot(struct b43_dmaring *ring) { u32 val; val = b43_dma_read(ring, B43_DMA64_RXSTATUS); val &= B43_DMA64_RXSTATDPTR; return (val / sizeof(struct b43_dmadesc64)); } static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot) { b43_dma_write(ring, B43_DMA64_RXINDEX, (u32) (slot * sizeof(struct b43_dmadesc64))); } static const struct b43_dma_ops dma64_ops = { .idx2desc = op64_idx2desc, .fill_descriptor = op64_fill_descriptor, .poke_tx = op64_poke_tx, .tx_suspend = op64_tx_suspend, .tx_resume = op64_tx_resume, .get_current_rxslot = op64_get_current_rxslot, .set_current_rxslot = op64_set_current_rxslot, }; static inline int free_slots(struct b43_dmaring *ring) { return (ring->nr_slots - ring->used_slots); } static inline int next_slot(struct b43_dmaring *ring, int slot) { B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1)); if (slot == ring->nr_slots - 1) return 0; return slot + 1; } static inline int prev_slot(struct b43_dmaring *ring, int slot) { B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1)); if (slot == 0) return ring->nr_slots - 1; return slot - 1; } #ifdef CONFIG_B43_DEBUG static void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots) { if (current_used_slots <= ring->max_used_slots) return; ring->max_used_slots = current_used_slots; if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) { b43dbg(ring->dev->wl, "max_used_slots increased to %d on %s ring %d\n", ring->max_used_slots, ring->tx ? "TX" : "RX", ring->index); } } #else static inline void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots) { } #endif /* DEBUG */ /* Request a slot for usage. */ static inline int request_slot(struct b43_dmaring *ring) { int slot; B43_WARN_ON(!ring->tx); B43_WARN_ON(ring->stopped); B43_WARN_ON(free_slots(ring) == 0); slot = next_slot(ring, ring->current_slot); ring->current_slot = slot; ring->used_slots++; update_max_used_slots(ring, ring->used_slots); return slot; } static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx) { static const u16 map64[] = { B43_MMIO_DMA64_BASE0, B43_MMIO_DMA64_BASE1, B43_MMIO_DMA64_BASE2, B43_MMIO_DMA64_BASE3, B43_MMIO_DMA64_BASE4, B43_MMIO_DMA64_BASE5, }; static const u16 map32[] = { B43_MMIO_DMA32_BASE0, B43_MMIO_DMA32_BASE1, B43_MMIO_DMA32_BASE2, B43_MMIO_DMA32_BASE3, B43_MMIO_DMA32_BASE4, B43_MMIO_DMA32_BASE5, }; if (type == B43_DMA_64BIT) { B43_WARN_ON(!(controller_idx >= 0 && controller_idx < ARRAY_SIZE(map64))); return map64[controller_idx]; } B43_WARN_ON(!(controller_idx >= 0 && controller_idx < ARRAY_SIZE(map32))); return map32[controller_idx]; } static inline dma_addr_t map_descbuffer(struct b43_dmaring *ring, unsigned char *buf, size_t len, int tx) { dma_addr_t dmaaddr; if (tx) { dmaaddr = dma_map_single(ring->dev->dev->dma_dev, buf, len, DMA_TO_DEVICE); } else { dmaaddr = dma_map_single(ring->dev->dev->dma_dev, buf, len, DMA_FROM_DEVICE); } return dmaaddr; } static inline void unmap_descbuffer(struct b43_dmaring *ring, dma_addr_t addr, size_t len, int tx) { if (tx) { dma_unmap_single(ring->dev->dev->dma_dev, addr, len, DMA_TO_DEVICE); } else { dma_unmap_single(ring->dev->dev->dma_dev, addr, len, DMA_FROM_DEVICE); } } static inline void sync_descbuffer_for_cpu(struct b43_dmaring *ring, dma_addr_t addr, size_t len) { B43_WARN_ON(ring->tx); dma_sync_single_for_cpu(ring->dev->dev->dma_dev, addr, len, DMA_FROM_DEVICE); } static inline void sync_descbuffer_for_device(struct b43_dmaring *ring, dma_addr_t addr, size_t len) { B43_WARN_ON(ring->tx); dma_sync_single_for_device(ring->dev->dev->dma_dev, addr, len, DMA_FROM_DEVICE); } static inline void free_descriptor_buffer(struct b43_dmaring *ring, struct b43_dmadesc_meta *meta) { if (meta->skb) { dev_kfree_skb_any(meta->skb); meta->skb = NULL; } } static int alloc_ringmemory(struct b43_dmaring *ring) { gfp_t flags = GFP_KERNEL; /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing * has shown that 4K is sufficient for the latter as long as the buffer * does not cross an 8K boundary. * * For unknown reasons - possibly a hardware error - the BCM4311 rev * 02, which uses 64-bit DMA, needs the ring buffer in very low memory, * which accounts for the GFP_DMA flag below. * * The flags here must match the flags in free_ringmemory below! */ if (ring->type == B43_DMA_64BIT) flags |= GFP_DMA; ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE, &(ring->dmabase), flags); if (!ring->descbase) { b43err(ring->dev->wl, "DMA ringmemory allocation failed\n"); return -ENOMEM; } memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE); return 0; } static void free_ringmemory(struct b43_dmaring *ring) { dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE, ring->descbase, ring->dmabase); } /* Reset the RX DMA channel */ static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, enum b43_dmatype type) { int i; u32 value; u16 offset; might_sleep(); offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL; b43_write32(dev, mmio_base + offset, 0); for (i = 0; i < 10; i++) { offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS : B43_DMA32_RXSTATUS; value = b43_read32(dev, mmio_base + offset); if (type == B43_DMA_64BIT) { value &= B43_DMA64_RXSTAT; if (value == B43_DMA64_RXSTAT_DISABLED) { i = -1; break; } } else { value &= B43_DMA32_RXSTATE; if (value == B43_DMA32_RXSTAT_DISABLED) { i = -1; break; } } msleep(1); } if (i != -1) { b43err(dev->wl, "DMA RX reset timed out\n"); return -ENODEV; } return 0; } /* Reset the TX DMA channel */ static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, enum b43_dmatype type) { int i; u32 value; u16 offset; might_sleep(); for (i = 0; i < 10; i++) { offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS; value = b43_read32(dev, mmio_base + offset); if (type == B43_DMA_64BIT) { value &= B43_DMA64_TXSTAT; if (value == B43_DMA64_TXSTAT_DISABLED || value == B43_DMA64_TXSTAT_IDLEWAIT || value == B43_DMA64_TXSTAT_STOPPED) break; } else { value &= B43_DMA32_TXSTATE; if (value == B43_DMA32_TXSTAT_DISABLED || value == B43_DMA32_TXSTAT_IDLEWAIT || value == B43_DMA32_TXSTAT_STOPPED) break; } msleep(1); } offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL; b43_write32(dev, mmio_base + offset, 0); for (i = 0; i < 10; i++) { offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS; value = b43_read32(dev, mmio_base + offset); if (type == B43_DMA_64BIT) { value &= B43_DMA64_TXSTAT; if (value == B43_DMA64_TXSTAT_DISABLED) { i = -1; break; } } else { value &= B43_DMA32_TXSTATE; if (value == B43_DMA32_TXSTAT_DISABLED) { i = -1; break; } } msleep(1); } if (i != -1) { b43err(dev->wl, "DMA TX reset timed out\n"); return -ENODEV; } /* ensure the reset is completed. */ msleep(1); return 0; } /* Check if a DMA mapping address is invalid. */ static bool b43_dma_mapping_error(struct b43_dmaring *ring, dma_addr_t addr, size_t buffersize, bool dma_to_device) { if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr))) return 1; switch (ring->type) { case B43_DMA_30BIT: if ((u64)addr + buffersize > (1ULL << 30)) goto address_error; break; case B43_DMA_32BIT: if ((u64)addr + buffersize > (1ULL << 32)) goto address_error; break; case B43_DMA_64BIT: /* Currently we can't have addresses beyond * 64bit in the kernel. */ break; } /* The address is OK. */ return 0; address_error: /* We can't support this address. Unmap it again. */ unmap_descbuffer(ring, addr, buffersize, dma_to_device); return 1; } static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb) { unsigned char *f = skb->data + ring->frameoffset; return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF); } static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb) { struct b43_rxhdr_fw4 *rxhdr; unsigned char *frame; /* This poisons the RX buffer to detect DMA failures. */ rxhdr = (struct b43_rxhdr_fw4 *)(skb->data); rxhdr->frame_len = 0; B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2); frame = skb->data + ring->frameoffset; memset(frame, 0xFF, sizeof(struct b43_plcp_hdr6) + 2 /* padding */); } static int setup_rx_descbuffer(struct b43_dmaring *ring, struct b43_dmadesc_generic *desc, struct b43_dmadesc_meta *meta, gfp_t gfp_flags) { dma_addr_t dmaaddr; struct sk_buff *skb; B43_WARN_ON(ring->tx); skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); if (unlikely(!skb)) return -ENOMEM; b43_poison_rx_buffer(ring, skb); dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { /* ugh. try to realloc in zone_dma */ gfp_flags |= GFP_DMA; dev_kfree_skb_any(skb); skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); if (unlikely(!skb)) return -ENOMEM; b43_poison_rx_buffer(ring, skb); dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { b43err(ring->dev->wl, "RX DMA buffer allocation failed\n"); dev_kfree_skb_any(skb); return -EIO; } } meta->skb = skb; meta->dmaaddr = dmaaddr; ring->ops->fill_descriptor(ring, desc, dmaaddr, ring->rx_buffersize, 0, 0, 0); return 0; } /* Allocate the initial descbuffers. * This is used for an RX ring only. */ static int alloc_initial_descbuffers(struct b43_dmaring *ring) { int i, err = -ENOMEM; struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta; for (i = 0; i < ring->nr_slots; i++) { desc = ring->ops->idx2desc(ring, i, &meta); err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); if (err) { b43err(ring->dev->wl, "Failed to allocate initial descbuffers\n"); goto err_unwind; } } mb(); ring->used_slots = ring->nr_slots; err = 0; out: return err; err_unwind: for (i--; i >= 0; i--) { desc = ring->ops->idx2desc(ring, i, &meta); unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); dev_kfree_skb(meta->skb); } goto out; } /* Do initial setup of the DMA controller. * Reset the controller, write the ring busaddress * and switch the "enable" bit on. */ static int dmacontroller_setup(struct b43_dmaring *ring) { int err = 0; u32 value; u32 addrext; u32 trans = ssb_dma_translation(ring->dev->dev); if (ring->tx) { if (ring->type == B43_DMA_64BIT) { u64 ringbase = (u64) (ring->dmabase); addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) >> SSB_DMA_TRANSLATION_SHIFT; value = B43_DMA64_TXENABLE; value |= (addrext << B43_DMA64_TXADDREXT_SHIFT) & B43_DMA64_TXADDREXT_MASK; b43_dma_write(ring, B43_DMA64_TXCTL, value); b43_dma_write(ring, B43_DMA64_TXRINGLO, (ringbase & 0xFFFFFFFF)); b43_dma_write(ring, B43_DMA64_TXRINGHI, ((ringbase >> 32) & ~SSB_DMA_TRANSLATION_MASK) | (trans << 1)); } else { u32 ringbase = (u32) (ring->dmabase); addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) >> SSB_DMA_TRANSLATION_SHIFT; value = B43_DMA32_TXENABLE; value |= (addrext << B43_DMA32_TXADDREXT_SHIFT) & B43_DMA32_TXADDREXT_MASK; b43_dma_write(ring, B43_DMA32_TXCTL, value); b43_dma_write(ring, B43_DMA32_TXRING, (ringbase & ~SSB_DMA_TRANSLATION_MASK) | trans); } } else { err = alloc_initial_descbuffers(ring); if (err) goto out; if (ring->type == B43_DMA_64BIT) { u64 ringbase = (u64) (ring->dmabase); addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) >> SSB_DMA_TRANSLATION_SHIFT; value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT); value |= B43_DMA64_RXENABLE; value |= (addrext << B43_DMA64_RXADDREXT_SHIFT) & B43_DMA64_RXADDREXT_MASK; b43_dma_write(ring, B43_DMA64_RXCTL, value); b43_dma_write(ring, B43_DMA64_RXRINGLO, (ringbase & 0xFFFFFFFF)); b43_dma_write(ring, B43_DMA64_RXRINGHI, ((ringbase >> 32) & ~SSB_DMA_TRANSLATION_MASK) | (trans << 1)); b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots * sizeof(struct b43_dmadesc64)); } else { u32 ringbase = (u32) (ring->dmabase); addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) >> SSB_DMA_TRANSLATION_SHIFT; value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT); value |= B43_DMA32_RXENABLE; value |= (addrext << B43_DMA32_RXADDREXT_SHIFT) & B43_DMA32_RXADDREXT_MASK; b43_dma_write(ring, B43_DMA32_RXCTL, value); b43_dma_write(ring, B43_DMA32_RXRING, (ringbase & ~SSB_DMA_TRANSLATION_MASK) | trans); b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots * sizeof(struct b43_dmadesc32)); } } out: return err; } /* Shutdown the DMA controller. */ static void dmacontroller_cleanup(struct b43_dmaring *ring) { if (ring->tx) { b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base, ring->type); if (ring->type == B43_DMA_64BIT) { b43_dma_write(ring, B43_DMA64_TXRINGLO, 0); b43_dma_write(ring, B43_DMA64_TXRINGHI, 0); } else b43_dma_write(ring, B43_DMA32_TXRING, 0); } else { b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base, ring->type); if (ring->type == B43_DMA_64BIT) { b43_dma_write(ring, B43_DMA64_RXRINGLO, 0); b43_dma_write(ring, B43_DMA64_RXRINGHI, 0); } else b43_dma_write(ring, B43_DMA32_RXRING, 0); } } static void free_all_descbuffers(struct b43_dmaring *ring) { struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta; int i; if (!ring->used_slots) return; for (i = 0; i < ring->nr_slots; i++) { desc = ring->ops->idx2desc(ring, i, &meta); if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) { B43_WARN_ON(!ring->tx); continue; } if (ring->tx) { unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); } else { unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); } free_descriptor_buffer(ring, meta); } } static u64 supported_dma_mask(struct b43_wldev *dev) { u32 tmp; u16 mmio_base; tmp = b43_read32(dev, SSB_TMSHIGH); if (tmp & SSB_TMSHIGH_DMA64) return DMA_BIT_MASK(64); mmio_base = b43_dmacontroller_base(0, 0); b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK); tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL); if (tmp & B43_DMA32_TXADDREXT_MASK) return DMA_BIT_MASK(32); return DMA_BIT_MASK(30); } static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask) { if (dmamask == DMA_BIT_MASK(30)) return B43_DMA_30BIT; if (dmamask == DMA_BIT_MASK(32)) return B43_DMA_32BIT; if (dmamask == DMA_BIT_MASK(64)) return B43_DMA_64BIT; B43_WARN_ON(1); return B43_DMA_30BIT; } /* Main initialization function. */ static struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, int controller_index, int for_tx, enum b43_dmatype type) { struct b43_dmaring *ring; int i, err; dma_addr_t dma_test; ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) goto out; ring->nr_slots = B43_RXRING_SLOTS; if (for_tx) ring->nr_slots = B43_TXRING_SLOTS; ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta), GFP_KERNEL); if (!ring->meta) goto err_kfree_ring; for (i = 0; i < ring->nr_slots; i++) ring->meta->skb = B43_DMA_PTR_POISON; ring->type = type; ring->dev = dev; ring->mmio_base = b43_dmacontroller_base(type, controller_index); ring->index = controller_index; if (type == B43_DMA_64BIT) ring->ops = &dma64_ops; else ring->ops = &dma32_ops; if (for_tx) { ring->tx = 1; ring->current_slot = -1; } else { if (ring->index == 0) { ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE; ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET; } else B43_WARN_ON(1); } #ifdef CONFIG_B43_DEBUG ring->last_injected_overflow = jiffies; #endif if (for_tx) { /* Assumption: B43_TXRING_SLOTS can be divided by TX_SLOTS_PER_FRAME */ BUILD_BUG_ON(B43_TXRING_SLOTS % TX_SLOTS_PER_FRAME != 0); ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME, b43_txhdr_size(dev), GFP_KERNEL); if (!ring->txhdr_cache) goto err_kfree_meta; /* test for ability to dma to txhdr_cache */ dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache, b43_txhdr_size(dev), DMA_TO_DEVICE); if (b43_dma_mapping_error(ring, dma_test, b43_txhdr_size(dev), 1)) { /* ugh realloc */ kfree(ring->txhdr_cache); ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME, b43_txhdr_size(dev), GFP_KERNEL | GFP_DMA); if (!ring->txhdr_cache) goto err_kfree_meta; dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache, b43_txhdr_size(dev), DMA_TO_DEVICE); if (b43_dma_mapping_error(ring, dma_test, b43_txhdr_size(dev), 1)) { b43err(dev->wl, "TXHDR DMA allocation failed\n"); goto err_kfree_txhdr_cache; } } dma_unmap_single(dev->dev->dma_dev, dma_test, b43_txhdr_size(dev), DMA_TO_DEVICE); } err = alloc_ringmemory(ring); if (err) goto err_kfree_txhdr_cache; err = dmacontroller_setup(ring); if (err) goto err_free_ringmemory; out: return ring; err_free_ringmemory: free_ringmemory(ring); err_kfree_txhdr_cache: kfree(ring->txhdr_cache); err_kfree_meta: kfree(ring->meta); err_kfree_ring: kfree(ring); ring = NULL; goto out; } #define divide(a, b) ({ \ typeof(a) __a = a; \ do_div(__a, b); \ __a; \ }) #define modulo(a, b) ({ \ typeof(a) __a = a; \ do_div(__a, b); \ }) /* Main cleanup function. */ static void b43_destroy_dmaring(struct b43_dmaring *ring, const char *ringname) { if (!ring) return; #ifdef CONFIG_B43_DEBUG { /* Print some statistics. */ u64 failed_packets = ring->nr_failed_tx_packets; u64 succeed_packets = ring->nr_succeed_tx_packets; u64 nr_packets = failed_packets + succeed_packets; u64 permille_failed = 0, average_tries = 0; if (nr_packets) permille_failed = divide(failed_packets * 1000, nr_packets); if (nr_packets) average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets); b43dbg(ring->dev->wl, "DMA-%u %s: " "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, " "Average tries %llu.%02llu\n", (unsigned int)(ring->type), ringname, ring->max_used_slots, ring->nr_slots, (unsigned long long)failed_packets, (unsigned long long)nr_packets, (unsigned long long)divide(permille_failed, 10), (unsigned long long)modulo(permille_failed, 10), (unsigned long long)divide(average_tries, 100), (unsigned long long)modulo(average_tries, 100)); } #endif /* DEBUG */ /* Device IRQs are disabled prior entering this function, * so no need to take care of concurrency with rx handler stuff. */ dmacontroller_cleanup(ring); free_all_descbuffers(ring); free_ringmemory(ring); kfree(ring->txhdr_cache); kfree(ring->meta); kfree(ring); } #define destroy_ring(dma, ring) do { \ b43_destroy_dmaring((dma)->ring, __stringify(ring)); \ (dma)->ring = NULL; \ } while (0) void b43_dma_free(struct b43_wldev *dev) { struct b43_dma *dma; if (b43_using_pio_transfers(dev)) return; dma = &dev->dma; destroy_ring(dma, rx_ring); destroy_ring(dma, tx_ring_AC_BK); destroy_ring(dma, tx_ring_AC_BE); destroy_ring(dma, tx_ring_AC_VI); destroy_ring(dma, tx_ring_AC_VO); destroy_ring(dma, tx_ring_mcast); } static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask) { u64 orig_mask = mask; bool fallback = 0; int err; /* Try to set the DMA mask. If it fails, try falling back to a * lower mask, as we can always also support a lower one. */ while (1) { err = dma_set_mask(dev->dev->dma_dev, mask); if (!err) { err = dma_set_coherent_mask(dev->dev->dma_dev, mask); if (!err) break; } if (mask == DMA_BIT_MASK(64)) { mask = DMA_BIT_MASK(32); fallback = 1; continue; } if (mask == DMA_BIT_MASK(32)) { mask = DMA_BIT_MASK(30); fallback = 1; continue; } b43err(dev->wl, "The machine/kernel does not support " "the required %u-bit DMA mask\n", (unsigned int)dma_mask_to_engine_type(orig_mask)); return -EOPNOTSUPP; } if (fallback) { b43info(dev->wl, "DMA mask fallback from %u-bit to %u-bit\n", (unsigned int)dma_mask_to_engine_type(orig_mask), (unsigned int)dma_mask_to_engine_type(mask)); } return 0; } int b43_dma_init(struct b43_wldev *dev) { struct b43_dma *dma = &dev->dma; int err; u64 dmamask; enum b43_dmatype type; dmamask = supported_dma_mask(dev); type = dma_mask_to_engine_type(dmamask); err = b43_dma_set_mask(dev, dmamask); if (err) return err; err = -ENOMEM; /* setup TX DMA channels. */ dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type); if (!dma->tx_ring_AC_BK) goto out; dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type); if (!dma->tx_ring_AC_BE) goto err_destroy_bk; dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type); if (!dma->tx_ring_AC_VI) goto err_destroy_be; dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type); if (!dma->tx_ring_AC_VO) goto err_destroy_vi; dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type); if (!dma->tx_ring_mcast) goto err_destroy_vo; /* setup RX DMA channel. */ dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type); if (!dma->rx_ring) goto err_destroy_mcast; /* No support for the TX status DMA ring. */ B43_WARN_ON(dev->dev->id.revision < 5); b43dbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type); err = 0; out: return err; err_destroy_mcast: destroy_ring(dma, tx_ring_mcast); err_destroy_vo: destroy_ring(dma, tx_ring_AC_VO); err_destroy_vi: destroy_ring(dma, tx_ring_AC_VI); err_destroy_be: destroy_ring(dma, tx_ring_AC_BE); err_destroy_bk: destroy_ring(dma, tx_ring_AC_BK); return err; } /* Generate a cookie for the TX header. */ static u16 generate_cookie(struct b43_dmaring *ring, int slot) { u16 cookie; /* Use the upper 4 bits of the cookie as * DMA controller ID and store the slot number * in the lower 12 bits. * Note that the cookie must never be 0, as this * is a special value used in RX path. * It can also not be 0xFFFF because that is special * for multicast frames. */ cookie = (((u16)ring->index + 1) << 12); B43_WARN_ON(slot & ~0x0FFF); cookie |= (u16)slot; return cookie; } /* Inspect a cookie and find out to which controller/slot it belongs. */ static struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot) { struct b43_dma *dma = &dev->dma; struct b43_dmaring *ring = NULL; switch (cookie & 0xF000) { case 0x1000: ring = dma->tx_ring_AC_BK; break; case 0x2000: ring = dma->tx_ring_AC_BE; break; case 0x3000: ring = dma->tx_ring_AC_VI; break; case 0x4000: ring = dma->tx_ring_AC_VO; break; case 0x5000: ring = dma->tx_ring_mcast; break; } *slot = (cookie & 0x0FFF); if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) { b43dbg(dev->wl, "TX-status contains " "invalid cookie: 0x%04X\n", cookie); return NULL; } return ring; } static int dma_tx_fragment(struct b43_dmaring *ring, struct sk_buff *skb) { const struct b43_dma_ops *ops = ring->ops; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info); u8 *header; int slot, old_top_slot, old_used_slots; int err; struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta; struct b43_dmadesc_meta *meta_hdr; u16 cookie; size_t hdrsize = b43_txhdr_size(ring->dev); /* Important note: If the number of used DMA slots per TX frame * is changed here, the TX_SLOTS_PER_FRAME definition at the top of * the file has to be updated, too! */ old_top_slot = ring->current_slot; old_used_slots = ring->used_slots; /* Get a slot for the header. */ slot = request_slot(ring); desc = ops->idx2desc(ring, slot, &meta_hdr); memset(meta_hdr, 0, sizeof(*meta_hdr)); header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]); cookie = generate_cookie(ring, slot); err = b43_generate_txhdr(ring->dev, header, skb, info, cookie); if (unlikely(err)) { ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; return err; } meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, hdrsize, 1); if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) { ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; return -EIO; } ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr, hdrsize, 1, 0, 0); /* Get a slot for the payload. */ slot = request_slot(ring); desc = ops->idx2desc(ring, slot, &meta); memset(meta, 0, sizeof(*meta)); meta->skb = skb; meta->is_last_fragment = 1; priv_info->bouncebuffer = NULL; meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); /* create a bounce buffer in zone_dma on mapping failure. */ if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { priv_info->bouncebuffer = kmemdup(skb->data, skb->len, GFP_ATOMIC | GFP_DMA); if (!priv_info->bouncebuffer) { ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; err = -ENOMEM; goto out_unmap_hdr; } meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1); if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { kfree(priv_info->bouncebuffer); priv_info->bouncebuffer = NULL; ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; err = -EIO; goto out_unmap_hdr; } } ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1); if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { /* Tell the firmware about the cookie of the last * mcast frame, so it can clear the more-data bit in it. */ b43_shm_write16(ring->dev, B43_SHM_SHARED, B43_SHM_SH_MCASTCOOKIE, cookie); } /* Now transfer the whole frame. */ wmb(); ops->poke_tx(ring, next_slot(ring, slot)); return 0; out_unmap_hdr: unmap_descbuffer(ring, meta_hdr->dmaaddr, hdrsize, 1); return err; } static inline int should_inject_overflow(struct b43_dmaring *ring) { #ifdef CONFIG_B43_DEBUG if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) { /* Check if we should inject another ringbuffer overflow * to test handling of this situation in the stack. */ unsigned long next_overflow; next_overflow = ring->last_injected_overflow + HZ; if (time_after(jiffies, next_overflow)) { ring->last_injected_overflow = jiffies; b43dbg(ring->dev->wl, "Injecting TX ring overflow on " "DMA controller %d\n", ring->index); return 1; } } #endif /* CONFIG_B43_DEBUG */ return 0; } /* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */ static struct b43_dmaring *select_ring_by_priority(struct b43_wldev *dev, u8 queue_prio) { struct b43_dmaring *ring; if (dev->qos_enabled) { /* 0 = highest priority */ switch (queue_prio) { default: B43_WARN_ON(1); /* fallthrough */ case 0: ring = dev->dma.tx_ring_AC_VO; break; case 1: ring = dev->dma.tx_ring_AC_VI; break; case 2: ring = dev->dma.tx_ring_AC_BE; break; case 3: ring = dev->dma.tx_ring_AC_BK; break; } } else ring = dev->dma.tx_ring_AC_BE; return ring; } int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) { struct b43_dmaring *ring; struct ieee80211_hdr *hdr; int err = 0; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); hdr = (struct ieee80211_hdr *)skb->data; if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { /* The multicast ring will be sent after the DTIM */ ring = dev->dma.tx_ring_mcast; /* Set the more-data bit. Ucode will clear it on * the last frame for us. */ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); } else { /* Decide by priority where to put this frame. */ ring = select_ring_by_priority( dev, skb_get_queue_mapping(skb)); } B43_WARN_ON(!ring->tx); if (unlikely(ring->stopped)) { /* We get here only because of a bug in mac80211. * Because of a race, one packet may be queued after * the queue is stopped, thus we got called when we shouldn't. * For now, just refuse the transmit. */ if (b43_debug(dev, B43_DBG_DMAVERBOSE)) b43err(dev->wl, "Packet after queue stopped\n"); err = -ENOSPC; goto out; } if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) { /* If we get here, we have a real error with the queue * full, but queues not stopped. */ b43err(dev->wl, "DMA queue overflow\n"); err = -ENOSPC; goto out; } /* Assign the queue number to the ring (if not already done before) * so TX status handling can use it. The queue to ring mapping is * static, so we don't need to store it per frame. */ ring->queue_prio = skb_get_queue_mapping(skb); err = dma_tx_fragment(ring, skb); if (unlikely(err == -ENOKEY)) { /* Drop this packet, as we don't have the encryption key * anymore and must not transmit it unencrypted. */ dev_kfree_skb_any(skb); err = 0; goto out; } if (unlikely(err)) { b43err(dev->wl, "DMA tx mapping failure\n"); goto out; } if ((free_slots(ring) < TX_SLOTS_PER_FRAME) || should_inject_overflow(ring)) { /* This TX ring is full. */ ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb)); ring->stopped = 1; if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index); } } out: return err; } void b43_dma_handle_txstatus(struct b43_wldev *dev, const struct b43_txstatus *status) { const struct b43_dma_ops *ops; struct b43_dmaring *ring; struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta; int slot, firstused; bool frame_succeed; ring = parse_cookie(dev, status->cookie, &slot); if (unlikely(!ring)) return; B43_WARN_ON(!ring->tx); /* Sanity check: TX packets are processed in-order on one ring. * Check if the slot deduced from the cookie really is the first * used slot. */ firstused = ring->current_slot - ring->used_slots + 1; if (firstused < 0) firstused = ring->nr_slots + firstused; if (unlikely(slot != firstused)) { /* This possibly is a firmware bug and will result in * malfunction, memory leaks and/or stall of DMA functionality. */ b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. " "Expected %d, but got %d\n", ring->index, firstused, slot); return; } ops = ring->ops; while (1) { B43_WARN_ON(slot < 0 || slot >= ring->nr_slots); desc = ops->idx2desc(ring, slot, &meta); if (b43_dma_ptr_is_poisoned(meta->skb)) { b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) " "on ring %d\n", slot, firstused, ring->index); break; } if (meta->skb) { struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); kfree(priv_info->bouncebuffer); priv_info->bouncebuffer = NULL; } else { unmap_descbuffer(ring, meta->dmaaddr, b43_txhdr_size(dev), 1); } if (meta->is_last_fragment) { struct ieee80211_tx_info *info; if (unlikely(!meta->skb)) { /* This is a scatter-gather fragment of a frame, so * the skb pointer must not be NULL. */ b43dbg(dev->wl, "TX status unexpected NULL skb " "at slot %d (first=%d) on ring %d\n", slot, firstused, ring->index); break; } info = IEEE80211_SKB_CB(meta->skb); /* * Call back to inform the ieee80211 subsystem about * the status of the transmission. */ frame_succeed = b43_fill_txstatus_report(dev, info, status); #ifdef CONFIG_B43_DEBUG if (frame_succeed) ring->nr_succeed_tx_packets++; else ring->nr_failed_tx_packets++; ring->nr_total_packet_tries += status->frame_count; #endif /* DEBUG */ ieee80211_tx_status(dev->wl->hw, meta->skb); /* skb will be freed by ieee80211_tx_status(). * Poison our pointer. */ meta->skb = B43_DMA_PTR_POISON; } else { /* No need to call free_descriptor_buffer here, as * this is only the txhdr, which is not allocated. */ if (unlikely(meta->skb)) { b43dbg(dev->wl, "TX status unexpected non-NULL skb " "at slot %d (first=%d) on ring %d\n", slot, firstused, ring->index); break; } } /* Everything unmapped and free'd. So it's not used anymore. */ ring->used_slots--; if (meta->is_last_fragment) { /* This is the last scatter-gather * fragment of the frame. We are done. */ break; } slot = next_slot(ring, slot); } if (ring->stopped) { B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME); ieee80211_wake_queue(dev->wl->hw, ring->queue_prio); ring->stopped = 0; if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index); } } } static void dma_rx(struct b43_dmaring *ring, int *slot) { const struct b43_dma_ops *ops = ring->ops; struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta; struct b43_rxhdr_fw4 *rxhdr; struct sk_buff *skb; u16 len; int err; dma_addr_t dmaaddr; desc = ops->idx2desc(ring, *slot, &meta); sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); skb = meta->skb; rxhdr = (struct b43_rxhdr_fw4 *)skb->data; len = le16_to_cpu(rxhdr->frame_len); if (len == 0) { int i = 0; do { udelay(2); barrier(); len = le16_to_cpu(rxhdr->frame_len); } while (len == 0 && i++ < 5); if (unlikely(len == 0)) { dmaaddr = meta->dmaaddr; goto drop_recycle_buffer; } } if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) { /* Something went wrong with the DMA. * The device did not touch the buffer and did not overwrite the poison. */ b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n"); dmaaddr = meta->dmaaddr; goto drop_recycle_buffer; } if (unlikely(len > ring->rx_buffersize)) { /* The data did not fit into one descriptor buffer * and is split over multiple buffers. * This should never happen, as we try to allocate buffers * big enough. So simply ignore this packet. */ int cnt = 0; s32 tmp = len; while (1) { desc = ops->idx2desc(ring, *slot, &meta); /* recycle the descriptor buffer. */ b43_poison_rx_buffer(ring, meta->skb); sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize); *slot = next_slot(ring, *slot); cnt++; tmp -= ring->rx_buffersize; if (tmp <= 0) break; } b43err(ring->dev->wl, "DMA RX buffer too small " "(len: %u, buffer: %u, nr-dropped: %d)\n", len, ring->rx_buffersize, cnt); goto drop; } dmaaddr = meta->dmaaddr; err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); if (unlikely(err)) { b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n"); goto drop_recycle_buffer; } unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); skb_put(skb, len + ring->frameoffset); skb_pull(skb, ring->frameoffset); b43_rx(ring->dev, skb, rxhdr); drop: return; drop_recycle_buffer: /* Poison and recycle the RX buffer. */ b43_poison_rx_buffer(ring, skb); sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); } void b43_dma_rx(struct b43_dmaring *ring) { const struct b43_dma_ops *ops = ring->ops; int slot, current_slot; int used_slots = 0; B43_WARN_ON(ring->tx); current_slot = ops->get_current_rxslot(ring); B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots)); slot = ring->current_slot; for (; slot != current_slot; slot = next_slot(ring, slot)) { dma_rx(ring, &slot); update_max_used_slots(ring, ++used_slots); } ops->set_current_rxslot(ring, slot); ring->current_slot = slot; } static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring) { B43_WARN_ON(!ring->tx); ring->ops->tx_suspend(ring); } static void b43_dma_tx_resume_ring(struct b43_dmaring *ring) { B43_WARN_ON(!ring->tx); ring->ops->tx_resume(ring); } void b43_dma_tx_suspend(struct b43_wldev *dev) { b43_power_saving_ctl_bits(dev, B43_PS_AWAKE); b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK); b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE); b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI); b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO); b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast); } void b43_dma_tx_resume(struct b43_wldev *dev) { b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast); b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO); b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI); b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE); b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK); b43_power_saving_ctl_bits(dev, 0); } static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type, u16 mmio_base, bool enable) { u32 ctl; if (type == B43_DMA_64BIT) { ctl = b43_read32(dev, mmio_base + B43_DMA64_RXCTL); ctl &= ~B43_DMA64_RXDIRECTFIFO; if (enable) ctl |= B43_DMA64_RXDIRECTFIFO; b43_write32(dev, mmio_base + B43_DMA64_RXCTL, ctl); } else { ctl = b43_read32(dev, mmio_base + B43_DMA32_RXCTL); ctl &= ~B43_DMA32_RXDIRECTFIFO; if (enable) ctl |= B43_DMA32_RXDIRECTFIFO; b43_write32(dev, mmio_base + B43_DMA32_RXCTL, ctl); } } /* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine. * This is called from PIO code, so DMA structures are not available. */ void b43_dma_direct_fifo_rx(struct b43_wldev *dev, unsigned int engine_index, bool enable) { enum b43_dmatype type; u16 mmio_base; type = dma_mask_to_engine_type(supported_dma_mask(dev)); mmio_base = b43_dmacontroller_base(type, engine_index); direct_fifo_rx(dev, type, mmio_base, enable); }
/* Broadcom B43 wireless driver DMA ringbuffer and descriptor allocation/management Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de> Some code in this file is derived from the b44.c driver Copyright (C) 2002 David S. Miller Copyright (C) Pekka Pietikainen This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "b43.h" #include "dma.h" #include "main.h" #include "debugfs.h" #include "xmit.h" #include <linux/dma-mapping.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/skbuff.h> #include <linux/etherdevice.h> #include <linux/slab.h> #include <asm/div64.h> /* Required number of TX DMA slots per TX frame. * This currently is 2, because we put the header and the ieee80211 frame * into separate slots. */ #define TX_SLOTS_PER_FRAME 2 /* 32bit DMA ops. */ static struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring, int slot, struct b43_dmadesc_meta **meta) { struct b43_dmadesc32 *desc; *meta = &(ring->meta[slot]); desc = ring->descbase; desc = &(desc[slot]); return (struct b43_dmadesc_generic *)desc; } static void op32_fill_descriptor(struct b43_dmaring *ring, struct b43_dmadesc_generic *desc, dma_addr_t dmaaddr, u16 bufsize, int start, int end, int irq) { struct b43_dmadesc32 *descbase = ring->descbase; int slot; u32 ctl; u32 addr; u32 addrext; slot = (int)(&(desc->dma32) - descbase); B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK); addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK) >> SSB_DMA_TRANSLATION_SHIFT; addr |= ssb_dma_translation(ring->dev->dev); ctl = bufsize & B43_DMA32_DCTL_BYTECNT; if (slot == ring->nr_slots - 1) ctl |= B43_DMA32_DCTL_DTABLEEND; if (start) ctl |= B43_DMA32_DCTL_FRAMESTART; if (end) ctl |= B43_DMA32_DCTL_FRAMEEND; if (irq) ctl |= B43_DMA32_DCTL_IRQ; ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT) & B43_DMA32_DCTL_ADDREXT_MASK; desc->dma32.control = cpu_to_le32(ctl); desc->dma32.address = cpu_to_le32(addr); } static void op32_poke_tx(struct b43_dmaring *ring, int slot) { b43_dma_write(ring, B43_DMA32_TXINDEX, (u32) (slot * sizeof(struct b43_dmadesc32))); } static void op32_tx_suspend(struct b43_dmaring *ring) { b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL) | B43_DMA32_TXSUSPEND); } static void op32_tx_resume(struct b43_dmaring *ring) { b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL) & ~B43_DMA32_TXSUSPEND); } static int op32_get_current_rxslot(struct b43_dmaring *ring) { u32 val; val = b43_dma_read(ring, B43_DMA32_RXSTATUS); val &= B43_DMA32_RXDPTR; return (val / sizeof(struct b43_dmadesc32)); } static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot) { b43_dma_write(ring, B43_DMA32_RXINDEX, (u32) (slot * sizeof(struct b43_dmadesc32))); } static const struct b43_dma_ops dma32_ops = { .idx2desc = op32_idx2desc, .fill_descriptor = op32_fill_descriptor, .poke_tx = op32_poke_tx, .tx_suspend = op32_tx_suspend, .tx_resume = op32_tx_resume, .get_current_rxslot = op32_get_current_rxslot, .set_current_rxslot = op32_set_current_rxslot, }; /* 64bit DMA ops. */ static struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring, int slot, struct b43_dmadesc_meta **meta) { struct b43_dmadesc64 *desc; *meta = &(ring->meta[slot]); desc = ring->descbase; desc = &(desc[slot]); return (struct b43_dmadesc_generic *)desc; } static void op64_fill_descriptor(struct b43_dmaring *ring, struct b43_dmadesc_generic *desc, dma_addr_t dmaaddr, u16 bufsize, int start, int end, int irq) { struct b43_dmadesc64 *descbase = ring->descbase; int slot; u32 ctl0 = 0, ctl1 = 0; u32 addrlo, addrhi; u32 addrext; slot = (int)(&(desc->dma64) - descbase); B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); addrlo = (u32) (dmaaddr & 0xFFFFFFFF); addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK); addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK) >> SSB_DMA_TRANSLATION_SHIFT; addrhi |= (ssb_dma_translation(ring->dev->dev) << 1); if (slot == ring->nr_slots - 1) ctl0 |= B43_DMA64_DCTL0_DTABLEEND; if (start) ctl0 |= B43_DMA64_DCTL0_FRAMESTART; if (end) ctl0 |= B43_DMA64_DCTL0_FRAMEEND; if (irq) ctl0 |= B43_DMA64_DCTL0_IRQ; ctl1 |= bufsize & B43_DMA64_DCTL1_BYTECNT; ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) & B43_DMA64_DCTL1_ADDREXT_MASK; desc->dma64.control0 = cpu_to_le32(ctl0); desc->dma64.control1 = cpu_to_le32(ctl1); desc->dma64.address_low = cpu_to_le32(addrlo); desc->dma64.address_high = cpu_to_le32(addrhi); } static void op64_poke_tx(struct b43_dmaring *ring, int slot) { b43_dma_write(ring, B43_DMA64_TXINDEX, (u32) (slot * sizeof(struct b43_dmadesc64))); } static void op64_tx_suspend(struct b43_dmaring *ring) { b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL) | B43_DMA64_TXSUSPEND); } static void op64_tx_resume(struct b43_dmaring *ring) { b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL) & ~B43_DMA64_TXSUSPEND); } static int op64_get_current_rxslot(struct b43_dmaring *ring) { u32 val; val = b43_dma_read(ring, B43_DMA64_RXSTATUS); val &= B43_DMA64_RXSTATDPTR; return (val / sizeof(struct b43_dmadesc64)); } static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot) { b43_dma_write(ring, B43_DMA64_RXINDEX, (u32) (slot * sizeof(struct b43_dmadesc64))); } static const struct b43_dma_ops dma64_ops = { .idx2desc = op64_idx2desc, .fill_descriptor = op64_fill_descriptor, .poke_tx = op64_poke_tx, .tx_suspend = op64_tx_suspend, .tx_resume = op64_tx_resume, .get_current_rxslot = op64_get_current_rxslot, .set_current_rxslot = op64_set_current_rxslot, }; static inline int free_slots(struct b43_dmaring *ring) { return (ring->nr_slots - ring->used_slots); } static inline int next_slot(struct b43_dmaring *ring, int slot) { B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1)); if (slot == ring->nr_slots - 1) return 0; return slot + 1; } static inline int prev_slot(struct b43_dmaring *ring, int slot) { B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1)); if (slot == 0) return ring->nr_slots - 1; return slot - 1; } #ifdef CONFIG_B43_DEBUG static void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots) { if (current_used_slots <= ring->max_used_slots) return; ring->max_used_slots = current_used_slots; if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) { b43dbg(ring->dev->wl, "max_used_slots increased to %d on %s ring %d\n", ring->max_used_slots, ring->tx ? "TX" : "RX", ring->index); } } #else static inline void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots) { } #endif /* DEBUG */ /* Request a slot for usage. */ static inline int request_slot(struct b43_dmaring *ring) { int slot; B43_WARN_ON(!ring->tx); B43_WARN_ON(ring->stopped); B43_WARN_ON(free_slots(ring) == 0); slot = next_slot(ring, ring->current_slot); ring->current_slot = slot; ring->used_slots++; update_max_used_slots(ring, ring->used_slots); return slot; } static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx) { static const u16 map64[] = { B43_MMIO_DMA64_BASE0, B43_MMIO_DMA64_BASE1, B43_MMIO_DMA64_BASE2, B43_MMIO_DMA64_BASE3, B43_MMIO_DMA64_BASE4, B43_MMIO_DMA64_BASE5, }; static const u16 map32[] = { B43_MMIO_DMA32_BASE0, B43_MMIO_DMA32_BASE1, B43_MMIO_DMA32_BASE2, B43_MMIO_DMA32_BASE3, B43_MMIO_DMA32_BASE4, B43_MMIO_DMA32_BASE5, }; if (type == B43_DMA_64BIT) { B43_WARN_ON(!(controller_idx >= 0 && controller_idx < ARRAY_SIZE(map64))); return map64[controller_idx]; } B43_WARN_ON(!(controller_idx >= 0 && controller_idx < ARRAY_SIZE(map32))); return map32[controller_idx]; } static inline dma_addr_t map_descbuffer(struct b43_dmaring *ring, unsigned char *buf, size_t len, int tx) { dma_addr_t dmaaddr; if (tx) { dmaaddr = dma_map_single(ring->dev->dev->dma_dev, buf, len, DMA_TO_DEVICE); } else { dmaaddr = dma_map_single(ring->dev->dev->dma_dev, buf, len, DMA_FROM_DEVICE); } return dmaaddr; } static inline void unmap_descbuffer(struct b43_dmaring *ring, dma_addr_t addr, size_t len, int tx) { if (tx) { dma_unmap_single(ring->dev->dev->dma_dev, addr, len, DMA_TO_DEVICE); } else { dma_unmap_single(ring->dev->dev->dma_dev, addr, len, DMA_FROM_DEVICE); } } static inline void sync_descbuffer_for_cpu(struct b43_dmaring *ring, dma_addr_t addr, size_t len) { B43_WARN_ON(ring->tx); dma_sync_single_for_cpu(ring->dev->dev->dma_dev, addr, len, DMA_FROM_DEVICE); } static inline void sync_descbuffer_for_device(struct b43_dmaring *ring, dma_addr_t addr, size_t len) { B43_WARN_ON(ring->tx); dma_sync_single_for_device(ring->dev->dev->dma_dev, addr, len, DMA_FROM_DEVICE); } static inline void free_descriptor_buffer(struct b43_dmaring *ring, struct b43_dmadesc_meta *meta) { if (meta->skb) { dev_kfree_skb_any(meta->skb); meta->skb = NULL; } } static int alloc_ringmemory(struct b43_dmaring *ring) { gfp_t flags = GFP_KERNEL; /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing * has shown that 4K is sufficient for the latter as long as the buffer * does not cross an 8K boundary. * * For unknown reasons - possibly a hardware error - the BCM4311 rev * 02, which uses 64-bit DMA, needs the ring buffer in very low memory, * which accounts for the GFP_DMA flag below. * * The flags here must match the flags in free_ringmemory below! */ if (ring->type == B43_DMA_64BIT) flags |= GFP_DMA; ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE, &(ring->dmabase), flags); if (!ring->descbase) { b43err(ring->dev->wl, "DMA ringmemory allocation failed\n"); return -ENOMEM; } memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE); return 0; } static void free_ringmemory(struct b43_dmaring *ring) { dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE, ring->descbase, ring->dmabase); } /* Reset the RX DMA channel */ static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, enum b43_dmatype type) { int i; u32 value; u16 offset; might_sleep(); offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL; b43_write32(dev, mmio_base + offset, 0); for (i = 0; i < 10; i++) { offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS : B43_DMA32_RXSTATUS; value = b43_read32(dev, mmio_base + offset); if (type == B43_DMA_64BIT) { value &= B43_DMA64_RXSTAT; if (value == B43_DMA64_RXSTAT_DISABLED) { i = -1; break; } } else { value &= B43_DMA32_RXSTATE; if (value == B43_DMA32_RXSTAT_DISABLED) { i = -1; break; } } msleep(1); } if (i != -1) { b43err(dev->wl, "DMA RX reset timed out\n"); return -ENODEV; } return 0; } /* Reset the TX DMA channel */ static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, enum b43_dmatype type) { int i; u32 value; u16 offset; might_sleep(); for (i = 0; i < 10; i++) { offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS; value = b43_read32(dev, mmio_base + offset); if (type == B43_DMA_64BIT) { value &= B43_DMA64_TXSTAT; if (value == B43_DMA64_TXSTAT_DISABLED || value == B43_DMA64_TXSTAT_IDLEWAIT || value == B43_DMA64_TXSTAT_STOPPED) break; } else { value &= B43_DMA32_TXSTATE; if (value == B43_DMA32_TXSTAT_DISABLED || value == B43_DMA32_TXSTAT_IDLEWAIT || value == B43_DMA32_TXSTAT_STOPPED) break; } msleep(1); } offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL; b43_write32(dev, mmio_base + offset, 0); for (i = 0; i < 10; i++) { offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS; value = b43_read32(dev, mmio_base + offset); if (type == B43_DMA_64BIT) { value &= B43_DMA64_TXSTAT; if (value == B43_DMA64_TXSTAT_DISABLED) { i = -1; break; } } else { value &= B43_DMA32_TXSTATE; if (value == B43_DMA32_TXSTAT_DISABLED) { i = -1; break; } } msleep(1); } if (i != -1) { b43err(dev->wl, "DMA TX reset timed out\n"); return -ENODEV; } /* ensure the reset is completed. */ msleep(1); return 0; } /* Check if a DMA mapping address is invalid. */ static bool b43_dma_mapping_error(struct b43_dmaring *ring, dma_addr_t addr, size_t buffersize, bool dma_to_device) { if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr))) return 1; switch (ring->type) { case B43_DMA_30BIT: if ((u64)addr + buffersize > (1ULL << 30)) goto address_error; break; case B43_DMA_32BIT: if ((u64)addr + buffersize > (1ULL << 32)) goto address_error; break; case B43_DMA_64BIT: /* Currently we can't have addresses beyond * 64bit in the kernel. */ break; } /* The address is OK. */ return 0; address_error: /* We can't support this address. Unmap it again. */ unmap_descbuffer(ring, addr, buffersize, dma_to_device); return 1; } static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb) { unsigned char *f = skb->data + ring->frameoffset; return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF); } static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb) { struct b43_rxhdr_fw4 *rxhdr; unsigned char *frame; /* This poisons the RX buffer to detect DMA failures. */ rxhdr = (struct b43_rxhdr_fw4 *)(skb->data); rxhdr->frame_len = 0; B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2); frame = skb->data + ring->frameoffset; memset(frame, 0xFF, sizeof(struct b43_plcp_hdr6) + 2 /* padding */); } static int setup_rx_descbuffer(struct b43_dmaring *ring, struct b43_dmadesc_generic *desc, struct b43_dmadesc_meta *meta, gfp_t gfp_flags) { dma_addr_t dmaaddr; struct sk_buff *skb; B43_WARN_ON(ring->tx); skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); if (unlikely(!skb)) return -ENOMEM; b43_poison_rx_buffer(ring, skb); dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { /* ugh. try to realloc in zone_dma */ gfp_flags |= GFP_DMA; dev_kfree_skb_any(skb); skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); if (unlikely(!skb)) return -ENOMEM; b43_poison_rx_buffer(ring, skb); dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { b43err(ring->dev->wl, "RX DMA buffer allocation failed\n"); dev_kfree_skb_any(skb); return -EIO; } } meta->skb = skb; meta->dmaaddr = dmaaddr; ring->ops->fill_descriptor(ring, desc, dmaaddr, ring->rx_buffersize, 0, 0, 0); return 0; } /* Allocate the initial descbuffers. * This is used for an RX ring only. */ static int alloc_initial_descbuffers(struct b43_dmaring *ring) { int i, err = -ENOMEM; struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta; for (i = 0; i < ring->nr_slots; i++) { desc = ring->ops->idx2desc(ring, i, &meta); err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); if (err) { b43err(ring->dev->wl, "Failed to allocate initial descbuffers\n"); goto err_unwind; } } mb(); ring->used_slots = ring->nr_slots; err = 0; out: return err; err_unwind: for (i--; i >= 0; i--) { desc = ring->ops->idx2desc(ring, i, &meta); unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); dev_kfree_skb(meta->skb); } goto out; } /* Do initial setup of the DMA controller. * Reset the controller, write the ring busaddress * and switch the "enable" bit on. */ static int dmacontroller_setup(struct b43_dmaring *ring) { int err = 0; u32 value; u32 addrext; u32 trans = ssb_dma_translation(ring->dev->dev); if (ring->tx) { if (ring->type == B43_DMA_64BIT) { u64 ringbase = (u64) (ring->dmabase); addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) >> SSB_DMA_TRANSLATION_SHIFT; value = B43_DMA64_TXENABLE; value |= (addrext << B43_DMA64_TXADDREXT_SHIFT) & B43_DMA64_TXADDREXT_MASK; b43_dma_write(ring, B43_DMA64_TXCTL, value); b43_dma_write(ring, B43_DMA64_TXRINGLO, (ringbase & 0xFFFFFFFF)); b43_dma_write(ring, B43_DMA64_TXRINGHI, ((ringbase >> 32) & ~SSB_DMA_TRANSLATION_MASK) | (trans << 1)); } else { u32 ringbase = (u32) (ring->dmabase); addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) >> SSB_DMA_TRANSLATION_SHIFT; value = B43_DMA32_TXENABLE; value |= (addrext << B43_DMA32_TXADDREXT_SHIFT) & B43_DMA32_TXADDREXT_MASK; b43_dma_write(ring, B43_DMA32_TXCTL, value); b43_dma_write(ring, B43_DMA32_TXRING, (ringbase & ~SSB_DMA_TRANSLATION_MASK) | trans); } } else { err = alloc_initial_descbuffers(ring); if (err) goto out; if (ring->type == B43_DMA_64BIT) { u64 ringbase = (u64) (ring->dmabase); addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) >> SSB_DMA_TRANSLATION_SHIFT; value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT); value |= B43_DMA64_RXENABLE; value |= (addrext << B43_DMA64_RXADDREXT_SHIFT) & B43_DMA64_RXADDREXT_MASK; b43_dma_write(ring, B43_DMA64_RXCTL, value); b43_dma_write(ring, B43_DMA64_RXRINGLO, (ringbase & 0xFFFFFFFF)); b43_dma_write(ring, B43_DMA64_RXRINGHI, ((ringbase >> 32) & ~SSB_DMA_TRANSLATION_MASK) | (trans << 1)); b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots * sizeof(struct b43_dmadesc64)); } else { u32 ringbase = (u32) (ring->dmabase); addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) >> SSB_DMA_TRANSLATION_SHIFT; value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT); value |= B43_DMA32_RXENABLE; value |= (addrext << B43_DMA32_RXADDREXT_SHIFT) & B43_DMA32_RXADDREXT_MASK; b43_dma_write(ring, B43_DMA32_RXCTL, value); b43_dma_write(ring, B43_DMA32_RXRING, (ringbase & ~SSB_DMA_TRANSLATION_MASK) | trans); b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots * sizeof(struct b43_dmadesc32)); } } out: return err; } /* Shutdown the DMA controller. */ static void dmacontroller_cleanup(struct b43_dmaring *ring) { if (ring->tx) { b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base, ring->type); if (ring->type == B43_DMA_64BIT) { b43_dma_write(ring, B43_DMA64_TXRINGLO, 0); b43_dma_write(ring, B43_DMA64_TXRINGHI, 0); } else b43_dma_write(ring, B43_DMA32_TXRING, 0); } else { b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base, ring->type); if (ring->type == B43_DMA_64BIT) { b43_dma_write(ring, B43_DMA64_RXRINGLO, 0); b43_dma_write(ring, B43_DMA64_RXRINGHI, 0); } else b43_dma_write(ring, B43_DMA32_RXRING, 0); } } static void free_all_descbuffers(struct b43_dmaring *ring) { struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta; int i; if (!ring->used_slots) return; for (i = 0; i < ring->nr_slots; i++) { desc = ring->ops->idx2desc(ring, i, &meta); if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) { B43_WARN_ON(!ring->tx); continue; } if (ring->tx) { unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); } else { unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); } free_descriptor_buffer(ring, meta); } } static u64 supported_dma_mask(struct b43_wldev *dev) { u32 tmp; u16 mmio_base; tmp = b43_read32(dev, SSB_TMSHIGH); if (tmp & SSB_TMSHIGH_DMA64) return DMA_BIT_MASK(64); mmio_base = b43_dmacontroller_base(0, 0); b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK); tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL); if (tmp & B43_DMA32_TXADDREXT_MASK) return DMA_BIT_MASK(32); return DMA_BIT_MASK(30); } static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask) { if (dmamask == DMA_BIT_MASK(30)) return B43_DMA_30BIT; if (dmamask == DMA_BIT_MASK(32)) return B43_DMA_32BIT; if (dmamask == DMA_BIT_MASK(64)) return B43_DMA_64BIT; B43_WARN_ON(1); return B43_DMA_30BIT; } /* Main initialization function. */ static struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, int controller_index, int for_tx, enum b43_dmatype type) { struct b43_dmaring *ring; int i, err; dma_addr_t dma_test; ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) goto out; ring->nr_slots = B43_RXRING_SLOTS; if (for_tx) ring->nr_slots = B43_TXRING_SLOTS; ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta), GFP_KERNEL); if (!ring->meta) goto err_kfree_ring; for (i = 0; i < ring->nr_slots; i++) ring->meta->skb = B43_DMA_PTR_POISON; ring->type = type; ring->dev = dev; ring->mmio_base = b43_dmacontroller_base(type, controller_index); ring->index = controller_index; if (type == B43_DMA_64BIT) ring->ops = &dma64_ops; else ring->ops = &dma32_ops; if (for_tx) { ring->tx = 1; ring->current_slot = -1; } else { if (ring->index == 0) { ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE; ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET; } else B43_WARN_ON(1); } #ifdef CONFIG_B43_DEBUG ring->last_injected_overflow = jiffies; #endif if (for_tx) { /* Assumption: B43_TXRING_SLOTS can be divided by TX_SLOTS_PER_FRAME */ BUILD_BUG_ON(B43_TXRING_SLOTS % TX_SLOTS_PER_FRAME != 0); ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME, b43_txhdr_size(dev), GFP_KERNEL); if (!ring->txhdr_cache) goto err_kfree_meta; /* test for ability to dma to txhdr_cache */ dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache, b43_txhdr_size(dev), DMA_TO_DEVICE); if (b43_dma_mapping_error(ring, dma_test, b43_txhdr_size(dev), 1)) { /* ugh realloc */ kfree(ring->txhdr_cache); ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME, b43_txhdr_size(dev), GFP_KERNEL | GFP_DMA); if (!ring->txhdr_cache) goto err_kfree_meta; dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache, b43_txhdr_size(dev), DMA_TO_DEVICE); if (b43_dma_mapping_error(ring, dma_test, b43_txhdr_size(dev), 1)) { b43err(dev->wl, "TXHDR DMA allocation failed\n"); goto err_kfree_txhdr_cache; } } dma_unmap_single(dev->dev->dma_dev, dma_test, b43_txhdr_size(dev), DMA_TO_DEVICE); } err = alloc_ringmemory(ring); if (err) goto err_kfree_txhdr_cache; err = dmacontroller_setup(ring); if (err) goto err_free_ringmemory; out: return ring; err_free_ringmemory: free_ringmemory(ring); err_kfree_txhdr_cache: kfree(ring->txhdr_cache); err_kfree_meta: kfree(ring->meta); err_kfree_ring: kfree(ring); ring = NULL; goto out; } #define divide(a, b) ({ \ typeof(a) __a = a; \ do_div(__a, b); \ __a; \ }) #define modulo(a, b) ({ \ typeof(a) __a = a; \ do_div(__a, b); \ }) /* Main cleanup function. */ static void b43_destroy_dmaring(struct b43_dmaring *ring, const char *ringname) { if (!ring) return; #ifdef CONFIG_B43_DEBUG { /* Print some statistics. */ u64 failed_packets = ring->nr_failed_tx_packets; u64 succeed_packets = ring->nr_succeed_tx_packets; u64 nr_packets = failed_packets + succeed_packets; u64 permille_failed = 0, average_tries = 0; if (nr_packets) permille_failed = divide(failed_packets * 1000, nr_packets); if (nr_packets) average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets); b43dbg(ring->dev->wl, "DMA-%u %s: " "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, " "Average tries %llu.%02llu\n", (unsigned int)(ring->type), ringname, ring->max_used_slots, ring->nr_slots, (unsigned long long)failed_packets, (unsigned long long)nr_packets, (unsigned long long)divide(permille_failed, 10), (unsigned long long)modulo(permille_failed, 10), (unsigned long long)divide(average_tries, 100), (unsigned long long)modulo(average_tries, 100)); } #endif /* DEBUG */ /* Device IRQs are disabled prior entering this function, * so no need to take care of concurrency with rx handler stuff. */ dmacontroller_cleanup(ring); free_all_descbuffers(ring); free_ringmemory(ring); kfree(ring->txhdr_cache); kfree(ring->meta); kfree(ring); } #define destroy_ring(dma, ring) do { \ b43_destroy_dmaring((dma)->ring, __stringify(ring)); \ (dma)->ring = NULL; \ } while (0) void b43_dma_free(struct b43_wldev *dev) { struct b43_dma *dma; if (b43_using_pio_transfers(dev)) return; dma = &dev->dma; destroy_ring(dma, rx_ring); destroy_ring(dma, tx_ring_AC_BK); destroy_ring(dma, tx_ring_AC_BE); destroy_ring(dma, tx_ring_AC_VI); destroy_ring(dma, tx_ring_AC_VO); destroy_ring(dma, tx_ring_mcast); } static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask) { u64 orig_mask = mask; bool fallback = 0; int err; /* Try to set the DMA mask. If it fails, try falling back to a * lower mask, as we can always also support a lower one. */ while (1) { err = dma_set_mask(dev->dev->dma_dev, mask); if (!err) { err = dma_set_coherent_mask(dev->dev->dma_dev, mask); if (!err) break; } if (mask == DMA_BIT_MASK(64)) { mask = DMA_BIT_MASK(32); fallback = 1; continue; } if (mask == DMA_BIT_MASK(32)) { mask = DMA_BIT_MASK(30); fallback = 1; continue; } b43err(dev->wl, "The machine/kernel does not support " "the required %u-bit DMA mask\n", (unsigned int)dma_mask_to_engine_type(orig_mask)); return -EOPNOTSUPP; } if (fallback) { b43info(dev->wl, "DMA mask fallback from %u-bit to %u-bit\n", (unsigned int)dma_mask_to_engine_type(orig_mask), (unsigned int)dma_mask_to_engine_type(mask)); } return 0; } int b43_dma_init(struct b43_wldev *dev) { struct b43_dma *dma = &dev->dma; int err; u64 dmamask; enum b43_dmatype type; dmamask = supported_dma_mask(dev); type = dma_mask_to_engine_type(dmamask); err = b43_dma_set_mask(dev, dmamask); if (err) return err; err = -ENOMEM; /* setup TX DMA channels. */ dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type); if (!dma->tx_ring_AC_BK) goto out; dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type); if (!dma->tx_ring_AC_BE) goto err_destroy_bk; dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type); if (!dma->tx_ring_AC_VI) goto err_destroy_be; dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type); if (!dma->tx_ring_AC_VO) goto err_destroy_vi; dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type); if (!dma->tx_ring_mcast) goto err_destroy_vo; /* setup RX DMA channel. */ dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type); if (!dma->rx_ring) goto err_destroy_mcast; /* No support for the TX status DMA ring. */ B43_WARN_ON(dev->dev->id.revision < 5); b43dbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type); err = 0; out: return err; err_destroy_mcast: destroy_ring(dma, tx_ring_mcast); err_destroy_vo: destroy_ring(dma, tx_ring_AC_VO); err_destroy_vi: destroy_ring(dma, tx_ring_AC_VI); err_destroy_be: destroy_ring(dma, tx_ring_AC_BE); err_destroy_bk: destroy_ring(dma, tx_ring_AC_BK); return err; } /* Generate a cookie for the TX header. */ static u16 generate_cookie(struct b43_dmaring *ring, int slot) { u16 cookie; /* Use the upper 4 bits of the cookie as * DMA controller ID and store the slot number * in the lower 12 bits. * Note that the cookie must never be 0, as this * is a special value used in RX path. * It can also not be 0xFFFF because that is special * for multicast frames. */ cookie = (((u16)ring->index + 1) << 12); B43_WARN_ON(slot & ~0x0FFF); cookie |= (u16)slot; return cookie; } /* Inspect a cookie and find out to which controller/slot it belongs. */ static struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot) { struct b43_dma *dma = &dev->dma; struct b43_dmaring *ring = NULL; switch (cookie & 0xF000) { case 0x1000: ring = dma->tx_ring_AC_BK; break; case 0x2000: ring = dma->tx_ring_AC_BE; break; case 0x3000: ring = dma->tx_ring_AC_VI; break; case 0x4000: ring = dma->tx_ring_AC_VO; break; case 0x5000: ring = dma->tx_ring_mcast; break; } *slot = (cookie & 0x0FFF); if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) { b43dbg(dev->wl, "TX-status contains " "invalid cookie: 0x%04X\n", cookie); return NULL; } return ring; } static int dma_tx_fragment(struct b43_dmaring *ring, struct sk_buff *skb) { const struct b43_dma_ops *ops = ring->ops; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info); u8 *header; int slot, old_top_slot, old_used_slots; int err; struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta; struct b43_dmadesc_meta *meta_hdr; u16 cookie; size_t hdrsize = b43_txhdr_size(ring->dev); /* Important note: If the number of used DMA slots per TX frame * is changed here, the TX_SLOTS_PER_FRAME definition at the top of * the file has to be updated, too! */ old_top_slot = ring->current_slot; old_used_slots = ring->used_slots; /* Get a slot for the header. */ slot = request_slot(ring); desc = ops->idx2desc(ring, slot, &meta_hdr); memset(meta_hdr, 0, sizeof(*meta_hdr)); header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]); cookie = generate_cookie(ring, slot); err = b43_generate_txhdr(ring->dev, header, skb, info, cookie); if (unlikely(err)) { ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; return err; } meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, hdrsize, 1); if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) { ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; return -EIO; } ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr, hdrsize, 1, 0, 0); /* Get a slot for the payload. */ slot = request_slot(ring); desc = ops->idx2desc(ring, slot, &meta); memset(meta, 0, sizeof(*meta)); meta->skb = skb; meta->is_last_fragment = 1; priv_info->bouncebuffer = NULL; meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); /* create a bounce buffer in zone_dma on mapping failure. */ if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { priv_info->bouncebuffer = kmemdup(skb->data, skb->len, GFP_ATOMIC | GFP_DMA); if (!priv_info->bouncebuffer) { ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; err = -ENOMEM; goto out_unmap_hdr; } meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1); if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { kfree(priv_info->bouncebuffer); priv_info->bouncebuffer = NULL; ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; err = -EIO; goto out_unmap_hdr; } } ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1); if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { /* Tell the firmware about the cookie of the last * mcast frame, so it can clear the more-data bit in it. */ b43_shm_write16(ring->dev, B43_SHM_SHARED, B43_SHM_SH_MCASTCOOKIE, cookie); } /* Now transfer the whole frame. */ wmb(); ops->poke_tx(ring, next_slot(ring, slot)); return 0; out_unmap_hdr: unmap_descbuffer(ring, meta_hdr->dmaaddr, hdrsize, 1); return err; } static inline int should_inject_overflow(struct b43_dmaring *ring) { #ifdef CONFIG_B43_DEBUG if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) { /* Check if we should inject another ringbuffer overflow * to test handling of this situation in the stack. */ unsigned long next_overflow; next_overflow = ring->last_injected_overflow + HZ; if (time_after(jiffies, next_overflow)) { ring->last_injected_overflow = jiffies; b43dbg(ring->dev->wl, "Injecting TX ring overflow on " "DMA controller %d\n", ring->index); return 1; } } #endif /* CONFIG_B43_DEBUG */ return 0; } /* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */ static struct b43_dmaring *select_ring_by_priority(struct b43_wldev *dev, u8 queue_prio) { struct b43_dmaring *ring; if (dev->qos_enabled) { /* 0 = highest priority */ switch (queue_prio) { default: B43_WARN_ON(1); /* fallthrough */ case 0: ring = dev->dma.tx_ring_AC_VO; break; case 1: ring = dev->dma.tx_ring_AC_VI; break; case 2: ring = dev->dma.tx_ring_AC_BE; break; case 3: ring = dev->dma.tx_ring_AC_BK; break; } } else ring = dev->dma.tx_ring_AC_BE; return ring; } int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) { struct b43_dmaring *ring; struct ieee80211_hdr *hdr; int err = 0; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); hdr = (struct ieee80211_hdr *)skb->data; if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { /* The multicast ring will be sent after the DTIM */ ring = dev->dma.tx_ring_mcast; /* Set the more-data bit. Ucode will clear it on * the last frame for us. */ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); } else { /* Decide by priority where to put this frame. */ ring = select_ring_by_priority( dev, skb_get_queue_mapping(skb)); } B43_WARN_ON(!ring->tx); if (unlikely(ring->stopped)) { /* We get here only because of a bug in mac80211. * Because of a race, one packet may be queued after * the queue is stopped, thus we got called when we shouldn't. * For now, just refuse the transmit. */ if (b43_debug(dev, B43_DBG_DMAVERBOSE)) b43err(dev->wl, "Packet after queue stopped\n"); err = -ENOSPC; goto out; } if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) { /* If we get here, we have a real error with the queue * full, but queues not stopped. */ b43err(dev->wl, "DMA queue overflow\n"); err = -ENOSPC; goto out; } /* Assign the queue number to the ring (if not already done before) * so TX status handling can use it. The queue to ring mapping is * static, so we don't need to store it per frame. */ ring->queue_prio = skb_get_queue_mapping(skb); err = dma_tx_fragment(ring, skb); if (unlikely(err == -ENOKEY)) { /* Drop this packet, as we don't have the encryption key * anymore and must not transmit it unencrypted. */ dev_kfree_skb_any(skb); err = 0; goto out; } if (unlikely(err)) { b43err(dev->wl, "DMA tx mapping failure\n"); goto out; } if ((free_slots(ring) < TX_SLOTS_PER_FRAME) || should_inject_overflow(ring)) { /* This TX ring is full. */ ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb)); ring->stopped = 1; if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index); } } out: return err; } void b43_dma_handle_txstatus(struct b43_wldev *dev, const struct b43_txstatus *status) { const struct b43_dma_ops *ops; struct b43_dmaring *ring; struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta; int slot, firstused; bool frame_succeed; ring = parse_cookie(dev, status->cookie, &slot); if (unlikely(!ring)) return; B43_WARN_ON(!ring->tx); /* Sanity check: TX packets are processed in-order on one ring. * Check if the slot deduced from the cookie really is the first * used slot. */ firstused = ring->current_slot - ring->used_slots + 1; if (firstused < 0) firstused = ring->nr_slots + firstused; if (unlikely(slot != firstused)) { /* This possibly is a firmware bug and will result in * malfunction, memory leaks and/or stall of DMA functionality. */ b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. " "Expected %d, but got %d\n", ring->index, firstused, slot); return; } ops = ring->ops; while (1) { B43_WARN_ON(slot < 0 || slot >= ring->nr_slots); desc = ops->idx2desc(ring, slot, &meta); if (b43_dma_ptr_is_poisoned(meta->skb)) { b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) " "on ring %d\n", slot, firstused, ring->index); break; } if (meta->skb) { struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); kfree(priv_info->bouncebuffer); priv_info->bouncebuffer = NULL; } else { unmap_descbuffer(ring, meta->dmaaddr, b43_txhdr_size(dev), 1); } if (meta->is_last_fragment) { struct ieee80211_tx_info *info; if (unlikely(!meta->skb)) { /* This is a scatter-gather fragment of a frame, so * the skb pointer must not be NULL. */ b43dbg(dev->wl, "TX status unexpected NULL skb " "at slot %d (first=%d) on ring %d\n", slot, firstused, ring->index); break; } info = IEEE80211_SKB_CB(meta->skb); /* * Call back to inform the ieee80211 subsystem about * the status of the transmission. */ frame_succeed = b43_fill_txstatus_report(dev, info, status); #ifdef CONFIG_B43_DEBUG if (frame_succeed) ring->nr_succeed_tx_packets++; else ring->nr_failed_tx_packets++; ring->nr_total_packet_tries += status->frame_count; #endif /* DEBUG */ ieee80211_tx_status(dev->wl->hw, meta->skb); /* skb will be freed by ieee80211_tx_status(). * Poison our pointer. */ meta->skb = B43_DMA_PTR_POISON; } else { /* No need to call free_descriptor_buffer here, as * this is only the txhdr, which is not allocated. */ if (unlikely(meta->skb)) { b43dbg(dev->wl, "TX status unexpected non-NULL skb " "at slot %d (first=%d) on ring %d\n", slot, firstused, ring->index); break; } } /* Everything unmapped and free'd. So it's not used anymore. */ ring->used_slots--; if (meta->is_last_fragment) { /* This is the last scatter-gather * fragment of the frame. We are done. */ break; } slot = next_slot(ring, slot); } if (ring->stopped) { B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME); ieee80211_wake_queue(dev->wl->hw, ring->queue_prio); ring->stopped = 0; if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index); } } } static void dma_rx(struct b43_dmaring *ring, int *slot) { const struct b43_dma_ops *ops = ring->ops; struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta; struct b43_rxhdr_fw4 *rxhdr; struct sk_buff *skb; u16 len; int err; dma_addr_t dmaaddr; desc = ops->idx2desc(ring, *slot, &meta); sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); skb = meta->skb; rxhdr = (struct b43_rxhdr_fw4 *)skb->data; len = le16_to_cpu(rxhdr->frame_len); if (len == 0) { int i = 0; do { udelay(2); barrier(); len = le16_to_cpu(rxhdr->frame_len); } while (len == 0 && i++ < 5); if (unlikely(len == 0)) { dmaaddr = meta->dmaaddr; goto drop_recycle_buffer; } } if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) { /* Something went wrong with the DMA. * The device did not touch the buffer and did not overwrite the poison. */ b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n"); dmaaddr = meta->dmaaddr; goto drop_recycle_buffer; } if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) { /* The data did not fit into one descriptor buffer * and is split over multiple buffers. * This should never happen, as we try to allocate buffers * big enough. So simply ignore this packet. */ int cnt = 0; s32 tmp = len; while (1) { desc = ops->idx2desc(ring, *slot, &meta); /* recycle the descriptor buffer. */ b43_poison_rx_buffer(ring, meta->skb); sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize); *slot = next_slot(ring, *slot); cnt++; tmp -= ring->rx_buffersize; if (tmp <= 0) break; } b43err(ring->dev->wl, "DMA RX buffer too small " "(len: %u, buffer: %u, nr-dropped: %d)\n", len, ring->rx_buffersize, cnt); goto drop; } dmaaddr = meta->dmaaddr; err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); if (unlikely(err)) { b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n"); goto drop_recycle_buffer; } unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); skb_put(skb, len + ring->frameoffset); skb_pull(skb, ring->frameoffset); b43_rx(ring->dev, skb, rxhdr); drop: return; drop_recycle_buffer: /* Poison and recycle the RX buffer. */ b43_poison_rx_buffer(ring, skb); sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); } void b43_dma_rx(struct b43_dmaring *ring) { const struct b43_dma_ops *ops = ring->ops; int slot, current_slot; int used_slots = 0; B43_WARN_ON(ring->tx); current_slot = ops->get_current_rxslot(ring); B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots)); slot = ring->current_slot; for (; slot != current_slot; slot = next_slot(ring, slot)) { dma_rx(ring, &slot); update_max_used_slots(ring, ++used_slots); } ops->set_current_rxslot(ring, slot); ring->current_slot = slot; } static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring) { B43_WARN_ON(!ring->tx); ring->ops->tx_suspend(ring); } static void b43_dma_tx_resume_ring(struct b43_dmaring *ring) { B43_WARN_ON(!ring->tx); ring->ops->tx_resume(ring); } void b43_dma_tx_suspend(struct b43_wldev *dev) { b43_power_saving_ctl_bits(dev, B43_PS_AWAKE); b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK); b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE); b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI); b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO); b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast); } void b43_dma_tx_resume(struct b43_wldev *dev) { b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast); b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO); b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI); b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE); b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK); b43_power_saving_ctl_bits(dev, 0); } static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type, u16 mmio_base, bool enable) { u32 ctl; if (type == B43_DMA_64BIT) { ctl = b43_read32(dev, mmio_base + B43_DMA64_RXCTL); ctl &= ~B43_DMA64_RXDIRECTFIFO; if (enable) ctl |= B43_DMA64_RXDIRECTFIFO; b43_write32(dev, mmio_base + B43_DMA64_RXCTL, ctl); } else { ctl = b43_read32(dev, mmio_base + B43_DMA32_RXCTL); ctl &= ~B43_DMA32_RXDIRECTFIFO; if (enable) ctl |= B43_DMA32_RXDIRECTFIFO; b43_write32(dev, mmio_base + B43_DMA32_RXCTL, ctl); } } /* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine. * This is called from PIO code, so DMA structures are not available. */ void b43_dma_direct_fifo_rx(struct b43_wldev *dev, unsigned int engine_index, bool enable) { enum b43_dmatype type; u16 mmio_base; type = dma_mask_to_engine_type(supported_dma_mask(dev)); mmio_base = b43_dmacontroller_base(type, engine_index); direct_fifo_rx(dev, type, mmio_base, enable); }
static void dma_rx(struct b43_dmaring *ring, int *slot) { const struct b43_dma_ops *ops = ring->ops; struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta; struct b43_rxhdr_fw4 *rxhdr; struct sk_buff *skb; u16 len; int err; dma_addr_t dmaaddr; desc = ops->idx2desc(ring, *slot, &meta); sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); skb = meta->skb; rxhdr = (struct b43_rxhdr_fw4 *)skb->data; len = le16_to_cpu(rxhdr->frame_len); if (len == 0) { int i = 0; do { udelay(2); barrier(); len = le16_to_cpu(rxhdr->frame_len); } while (len == 0 && i++ < 5); if (unlikely(len == 0)) { dmaaddr = meta->dmaaddr; goto drop_recycle_buffer; } } if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) { /* Something went wrong with the DMA. * The device did not touch the buffer and did not overwrite the poison. */ b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n"); dmaaddr = meta->dmaaddr; goto drop_recycle_buffer; } if (unlikely(len > ring->rx_buffersize)) { /* The data did not fit into one descriptor buffer * and is split over multiple buffers. * This should never happen, as we try to allocate buffers * big enough. So simply ignore this packet. */ int cnt = 0; s32 tmp = len; while (1) { desc = ops->idx2desc(ring, *slot, &meta); /* recycle the descriptor buffer. */ b43_poison_rx_buffer(ring, meta->skb); sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize); *slot = next_slot(ring, *slot); cnt++; tmp -= ring->rx_buffersize; if (tmp <= 0) break; } b43err(ring->dev->wl, "DMA RX buffer too small " "(len: %u, buffer: %u, nr-dropped: %d)\n", len, ring->rx_buffersize, cnt); goto drop; } dmaaddr = meta->dmaaddr; err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); if (unlikely(err)) { b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n"); goto drop_recycle_buffer; } unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); skb_put(skb, len + ring->frameoffset); skb_pull(skb, ring->frameoffset); b43_rx(ring->dev, skb, rxhdr); drop: return; drop_recycle_buffer: /* Poison and recycle the RX buffer. */ b43_poison_rx_buffer(ring, skb); sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); }
static void dma_rx(struct b43_dmaring *ring, int *slot) { const struct b43_dma_ops *ops = ring->ops; struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta; struct b43_rxhdr_fw4 *rxhdr; struct sk_buff *skb; u16 len; int err; dma_addr_t dmaaddr; desc = ops->idx2desc(ring, *slot, &meta); sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); skb = meta->skb; rxhdr = (struct b43_rxhdr_fw4 *)skb->data; len = le16_to_cpu(rxhdr->frame_len); if (len == 0) { int i = 0; do { udelay(2); barrier(); len = le16_to_cpu(rxhdr->frame_len); } while (len == 0 && i++ < 5); if (unlikely(len == 0)) { dmaaddr = meta->dmaaddr; goto drop_recycle_buffer; } } if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) { /* Something went wrong with the DMA. * The device did not touch the buffer and did not overwrite the poison. */ b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n"); dmaaddr = meta->dmaaddr; goto drop_recycle_buffer; } if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) { /* The data did not fit into one descriptor buffer * and is split over multiple buffers. * This should never happen, as we try to allocate buffers * big enough. So simply ignore this packet. */ int cnt = 0; s32 tmp = len; while (1) { desc = ops->idx2desc(ring, *slot, &meta); /* recycle the descriptor buffer. */ b43_poison_rx_buffer(ring, meta->skb); sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize); *slot = next_slot(ring, *slot); cnt++; tmp -= ring->rx_buffersize; if (tmp <= 0) break; } b43err(ring->dev->wl, "DMA RX buffer too small " "(len: %u, buffer: %u, nr-dropped: %d)\n", len, ring->rx_buffersize, cnt); goto drop; } dmaaddr = meta->dmaaddr; err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); if (unlikely(err)) { b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n"); goto drop_recycle_buffer; } unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); skb_put(skb, len + ring->frameoffset); skb_pull(skb, ring->frameoffset); b43_rx(ring->dev, skb, rxhdr); drop: return; drop_recycle_buffer: /* Poison and recycle the RX buffer. */ b43_poison_rx_buffer(ring, skb); sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); }
{'added': [(1539, '\tif (unlikely(len + ring->frameoffset > ring->rx_buffersize)) {')], 'deleted': [(1539, '\tif (unlikely(len > ring->rx_buffersize)) {')]}
1
1
1,285
7,704
67
433
10
https://github.com/torvalds/linux
CVE-2011-3359
CWE-119
877
elf.c
C
store_versioninfo_gnu_verdef
/* radare - LGPL - Copyright 2008-2017 - nibble, pancake, alvaro_fe */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <r_types.h> #include <r_util.h> #include "elf.h" #ifdef IFDBG #undef IFDBG #endif #define DO_THE_DBG 0 #define IFDBG if (DO_THE_DBG) #define IFINT if (0) #define ELF_PAGE_MASK 0xFFFFFFFFFFFFF000LL #define ELF_PAGE_SIZE 12 #define R_ELF_NO_RELRO 0 #define R_ELF_PART_RELRO 1 #define R_ELF_FULL_RELRO 2 #define bprintf if(bin->verbose)eprintf #define READ8(x, i) r_read_ble8(x + i); i += 1; #define READ16(x, i) r_read_ble16(x + i, bin->endian); i += 2; #define READ32(x, i) r_read_ble32(x + i, bin->endian); i += 4; #define READ64(x, i) r_read_ble64(x + i, bin->endian); i += 8; #define GROWTH_FACTOR (1.5) static inline int __strnlen(const char *str, int len) { int l = 0; while (IS_PRINTABLE (*str) && --len) { if (((ut8)*str) == 0xff) { break; } str++; l++; } return l + 1; } static int handle_e_ident(ELFOBJ *bin) { return !strncmp ((char *)bin->ehdr.e_ident, ELFMAG, SELFMAG) || !strncmp ((char *)bin->ehdr.e_ident, CGCMAG, SCGCMAG); } static int init_ehdr(ELFOBJ *bin) { ut8 e_ident[EI_NIDENT]; ut8 ehdr[sizeof (Elf_(Ehdr))] = {0}; int i, len; if (r_buf_read_at (bin->b, 0, e_ident, EI_NIDENT) == -1) { bprintf ("Warning: read (magic)\n"); return false; } sdb_set (bin->kv, "elf_type.cparse", "enum elf_type { ET_NONE=0, ET_REL=1," " ET_EXEC=2, ET_DYN=3, ET_CORE=4, ET_LOOS=0xfe00, ET_HIOS=0xfeff," " ET_LOPROC=0xff00, ET_HIPROC=0xffff };", 0); sdb_set (bin->kv, "elf_machine.cparse", "enum elf_machine{EM_NONE=0, EM_M32=1," " EM_SPARC=2, EM_386=3, EM_68K=4, EM_88K=5, EM_486=6, " " EM_860=7, EM_MIPS=8, EM_S370=9, EM_MIPS_RS3_LE=10, EM_RS6000=11," " EM_UNKNOWN12=12, EM_UNKNOWN13=13, EM_UNKNOWN14=14, " " EM_PA_RISC=15, EM_PARISC=EM_PA_RISC, EM_nCUBE=16, EM_VPP500=17," " EM_SPARC32PLUS=18, EM_960=19, EM_PPC=20, EM_PPC64=21, " " EM_S390=22, EM_UNKNOWN22=EM_S390, EM_UNKNOWN23=23, EM_UNKNOWN24=24," " EM_UNKNOWN25=25, EM_UNKNOWN26=26, EM_UNKNOWN27=27, EM_UNKNOWN28=28," " EM_UNKNOWN29=29, EM_UNKNOWN30=30, EM_UNKNOWN31=31, EM_UNKNOWN32=32," " EM_UNKNOWN33=33, EM_UNKNOWN34=34, EM_UNKNOWN35=35, EM_V800=36," " EM_FR20=37, EM_RH32=38, EM_RCE=39, EM_ARM=40, EM_ALPHA=41, EM_SH=42," " EM_SPARCV9=43, EM_TRICORE=44, EM_ARC=45, EM_H8_300=46, EM_H8_300H=47," " EM_H8S=48, EM_H8_500=49, EM_IA_64=50, EM_MIPS_X=51, EM_COLDFIRE=52," " EM_68HC12=53, EM_MMA=54, EM_PCP=55, EM_NCPU=56, EM_NDR1=57," " EM_STARCORE=58, EM_ME16=59, EM_ST100=60, EM_TINYJ=61, EM_AMD64=62," " EM_X86_64=EM_AMD64, EM_PDSP=63, EM_UNKNOWN64=64, EM_UNKNOWN65=65," " EM_FX66=66, EM_ST9PLUS=67, EM_ST7=68, EM_68HC16=69, EM_68HC11=70," " EM_68HC08=71, EM_68HC05=72, EM_SVX=73, EM_ST19=74, EM_VAX=75, " " EM_CRIS=76, EM_JAVELIN=77, EM_FIREPATH=78, EM_ZSP=79, EM_MMIX=80," " EM_HUANY=81, EM_PRISM=82, EM_AVR=83, EM_FR30=84, EM_D10V=85, EM_D30V=86," " EM_V850=87, EM_M32R=88, EM_MN10300=89, EM_MN10200=90, EM_PJ=91," " EM_OPENRISC=92, EM_ARC_A5=93, EM_XTENSA=94, EM_NUM=95};", 0); sdb_num_set (bin->kv, "elf_header.offset", 0, 0); sdb_num_set (bin->kv, "elf_header.size", sizeof (Elf_(Ehdr)), 0); #if R_BIN_ELF64 sdb_set (bin->kv, "elf_header.format", "[16]z[2]E[2]Exqqqxwwwwww" " ident (elf_type)type (elf_machine)machine version entry phoff shoff flags ehsize" " phentsize phnum shentsize shnum shstrndx", 0); #else sdb_set (bin->kv, "elf_header.format", "[16]z[2]E[2]Exxxxxwwwwww" " ident (elf_type)type (elf_machine)machine version entry phoff shoff flags ehsize" " phentsize phnum shentsize shnum shstrndx", 0); #endif bin->endian = (e_ident[EI_DATA] == ELFDATA2MSB)? 1: 0; memset (&bin->ehdr, 0, sizeof (Elf_(Ehdr))); len = r_buf_read_at (bin->b, 0, ehdr, sizeof (Elf_(Ehdr))); if (len < 1) { bprintf ("Warning: read (ehdr)\n"); return false; } memcpy (&bin->ehdr.e_ident, ehdr, 16); i = 16; bin->ehdr.e_type = READ16 (ehdr, i) bin->ehdr.e_machine = READ16 (ehdr, i) bin->ehdr.e_version = READ32 (ehdr, i) #if R_BIN_ELF64 bin->ehdr.e_entry = READ64 (ehdr, i) bin->ehdr.e_phoff = READ64 (ehdr, i) bin->ehdr.e_shoff = READ64 (ehdr, i) #else bin->ehdr.e_entry = READ32 (ehdr, i) bin->ehdr.e_phoff = READ32 (ehdr, i) bin->ehdr.e_shoff = READ32 (ehdr, i) #endif bin->ehdr.e_flags = READ32 (ehdr, i) bin->ehdr.e_ehsize = READ16 (ehdr, i) bin->ehdr.e_phentsize = READ16 (ehdr, i) bin->ehdr.e_phnum = READ16 (ehdr, i) bin->ehdr.e_shentsize = READ16 (ehdr, i) bin->ehdr.e_shnum = READ16 (ehdr, i) bin->ehdr.e_shstrndx = READ16 (ehdr, i) return handle_e_ident (bin); // Usage example: // > td `k bin/cur/info/elf_type.cparse`; td `k bin/cur/info/elf_machine.cparse` // > pf `k bin/cur/info/elf_header.format` @ `k bin/cur/info/elf_header.offset` } static int init_phdr(ELFOBJ *bin) { ut32 phdr_size; ut8 phdr[sizeof (Elf_(Phdr))] = {0}; int i, j, len; if (!bin->ehdr.e_phnum) { return false; } if (bin->phdr) { return true; } if (!UT32_MUL (&phdr_size, (ut32)bin->ehdr.e_phnum, sizeof (Elf_(Phdr)))) { return false; } if (!phdr_size) { return false; } if (phdr_size > bin->size) { return false; } if (phdr_size > (ut32)bin->size) { return false; } if (bin->ehdr.e_phoff > bin->size) { return false; } if (bin->ehdr.e_phoff + phdr_size > bin->size) { return false; } if (!(bin->phdr = calloc (phdr_size, 1))) { perror ("malloc (phdr)"); return false; } for (i = 0; i < bin->ehdr.e_phnum; i++) { j = 0; len = r_buf_read_at (bin->b, bin->ehdr.e_phoff + i * sizeof (Elf_(Phdr)), phdr, sizeof (Elf_(Phdr))); if (len < 1) { bprintf ("Warning: read (phdr)\n"); R_FREE (bin->phdr); return false; } bin->phdr[i].p_type = READ32 (phdr, j) #if R_BIN_ELF64 bin->phdr[i].p_flags = READ32 (phdr, j) bin->phdr[i].p_offset = READ64 (phdr, j) bin->phdr[i].p_vaddr = READ64 (phdr, j) bin->phdr[i].p_paddr = READ64 (phdr, j) bin->phdr[i].p_filesz = READ64 (phdr, j) bin->phdr[i].p_memsz = READ64 (phdr, j) bin->phdr[i].p_align = READ64 (phdr, j) #else bin->phdr[i].p_offset = READ32 (phdr, j) bin->phdr[i].p_vaddr = READ32 (phdr, j) bin->phdr[i].p_paddr = READ32 (phdr, j) bin->phdr[i].p_filesz = READ32 (phdr, j) bin->phdr[i].p_memsz = READ32 (phdr, j) bin->phdr[i].p_flags = READ32 (phdr, j) bin->phdr[i].p_align = READ32 (phdr, j) #endif } sdb_num_set (bin->kv, "elf_phdr.offset", bin->ehdr.e_phoff, 0); sdb_num_set (bin->kv, "elf_phdr.size", sizeof (Elf_(Phdr)), 0); sdb_set (bin->kv, "elf_p_type.cparse", "enum elf_p_type {PT_NULL=0,PT_LOAD=1,PT_DYNAMIC=2," "PT_INTERP=3,PT_NOTE=4,PT_SHLIB=5,PT_PHDR=6,PT_LOOS=0x60000000," "PT_HIOS=0x6fffffff,PT_LOPROC=0x70000000,PT_HIPROC=0x7fffffff};", 0); sdb_set (bin->kv, "elf_p_flags.cparse", "enum elf_p_flags {PF_None=0,PF_Exec=1," "PF_Write=2,PF_Write_Exec=3,PF_Read=4,PF_Read_Exec=5,PF_Read_Write=6," "PF_Read_Write_Exec=7};", 0); #if R_BIN_ELF64 sdb_set (bin->kv, "elf_phdr.format", "[4]E[4]Eqqqqqq (elf_p_type)type (elf_p_flags)flags" " offset vaddr paddr filesz memsz align", 0); #else sdb_set (bin->kv, "elf_phdr.format", "[4]Exxxxx[4]Ex (elf_p_type)type offset vaddr paddr" " filesz memsz (elf_p_flags)flags align", 0); #endif return true; // Usage example: // > td `k bin/cur/info/elf_p_type.cparse`; td `k bin/cur/info/elf_p_flags.cparse` // > pf `k bin/cur/info/elf_phdr.format` @ `k bin/cur/info/elf_phdr.offset` } static int init_shdr(ELFOBJ *bin) { ut32 shdr_size; ut8 shdr[sizeof (Elf_(Shdr))] = {0}; int i, j, len; if (!bin || bin->shdr) { return true; } if (!UT32_MUL (&shdr_size, bin->ehdr.e_shnum, sizeof (Elf_(Shdr)))) { return false; } if (shdr_size < 1) { return false; } if (shdr_size > bin->size) { return false; } if (bin->ehdr.e_shoff > bin->size) { return false; } if (bin->ehdr.e_shoff + shdr_size > bin->size) { return false; } if (!(bin->shdr = calloc (1, shdr_size + 1))) { perror ("malloc (shdr)"); return false; } sdb_num_set (bin->kv, "elf_shdr.offset", bin->ehdr.e_shoff, 0); sdb_num_set (bin->kv, "elf_shdr.size", sizeof (Elf_(Shdr)), 0); sdb_set (bin->kv, "elf_s_type.cparse", "enum elf_s_type {SHT_NULL=0,SHT_PROGBITS=1," "SHT_SYMTAB=2,SHT_STRTAB=3,SHT_RELA=4,SHT_HASH=5,SHT_DYNAMIC=6,SHT_NOTE=7," "SHT_NOBITS=8,SHT_REL=9,SHT_SHLIB=10,SHT_DYNSYM=11,SHT_LOOS=0x60000000," "SHT_HIOS=0x6fffffff,SHT_LOPROC=0x70000000,SHT_HIPROC=0x7fffffff};", 0); for (i = 0; i < bin->ehdr.e_shnum; i++) { j = 0; len = r_buf_read_at (bin->b, bin->ehdr.e_shoff + i * sizeof (Elf_(Shdr)), shdr, sizeof (Elf_(Shdr))); if (len < 1) { bprintf ("Warning: read (shdr) at 0x%"PFMT64x"\n", (ut64) bin->ehdr.e_shoff); R_FREE (bin->shdr); return false; } bin->shdr[i].sh_name = READ32 (shdr, j) bin->shdr[i].sh_type = READ32 (shdr, j) #if R_BIN_ELF64 bin->shdr[i].sh_flags = READ64 (shdr, j) bin->shdr[i].sh_addr = READ64 (shdr, j) bin->shdr[i].sh_offset = READ64 (shdr, j) bin->shdr[i].sh_size = READ64 (shdr, j) bin->shdr[i].sh_link = READ32 (shdr, j) bin->shdr[i].sh_info = READ32 (shdr, j) bin->shdr[i].sh_addralign = READ64 (shdr, j) bin->shdr[i].sh_entsize = READ64 (shdr, j) #else bin->shdr[i].sh_flags = READ32 (shdr, j) bin->shdr[i].sh_addr = READ32 (shdr, j) bin->shdr[i].sh_offset = READ32 (shdr, j) bin->shdr[i].sh_size = READ32 (shdr, j) bin->shdr[i].sh_link = READ32 (shdr, j) bin->shdr[i].sh_info = READ32 (shdr, j) bin->shdr[i].sh_addralign = READ32 (shdr, j) bin->shdr[i].sh_entsize = READ32 (shdr, j) #endif } #if R_BIN_ELF64 sdb_set (bin->kv, "elf_s_flags_64.cparse", "enum elf_s_flags_64 {SF64_None=0,SF64_Exec=1," "SF64_Alloc=2,SF64_Alloc_Exec=3,SF64_Write=4,SF64_Write_Exec=5," "SF64_Write_Alloc=6,SF64_Write_Alloc_Exec=7};", 0); sdb_set (bin->kv, "elf_shdr.format", "x[4]E[8]Eqqqxxqq name (elf_s_type)type" " (elf_s_flags_64)flags addr offset size link info addralign entsize", 0); #else sdb_set (bin->kv, "elf_s_flags_32.cparse", "enum elf_s_flags_32 {SF32_None=0,SF32_Exec=1," "SF32_Alloc=2,SF32_Alloc_Exec=3,SF32_Write=4,SF32_Write_Exec=5," "SF32_Write_Alloc=6,SF32_Write_Alloc_Exec=7};", 0); sdb_set (bin->kv, "elf_shdr.format", "x[4]E[4]Exxxxxxx name (elf_s_type)type" " (elf_s_flags_32)flags addr offset size link info addralign entsize", 0); #endif return true; // Usage example: // > td `k bin/cur/info/elf_s_type.cparse`; td `k bin/cur/info/elf_s_flags_64.cparse` // > pf `k bin/cur/info/elf_shdr.format` @ `k bin/cur/info/elf_shdr.offset` } static int init_strtab(ELFOBJ *bin) { if (bin->strtab || !bin->shdr) { return false; } if (bin->ehdr.e_shstrndx != SHN_UNDEF && (bin->ehdr.e_shstrndx >= bin->ehdr.e_shnum || (bin->ehdr.e_shstrndx >= SHN_LORESERVE && bin->ehdr.e_shstrndx < SHN_HIRESERVE))) return false; /* sh_size must be lower than UT32_MAX and not equal to zero, to avoid bugs on malloc() */ if (bin->shdr[bin->ehdr.e_shstrndx].sh_size > UT32_MAX) { return false; } if (!bin->shdr[bin->ehdr.e_shstrndx].sh_size) { return false; } bin->shstrtab_section = bin->strtab_section = &bin->shdr[bin->ehdr.e_shstrndx]; bin->shstrtab_size = bin->strtab_section->sh_size; if (bin->shstrtab_size > bin->size) { return false; } if (!(bin->shstrtab = calloc (1, bin->shstrtab_size + 1))) { perror ("malloc"); bin->shstrtab = NULL; return false; } if (bin->shstrtab_section->sh_offset > bin->size) { R_FREE (bin->shstrtab); return false; } if (bin->shstrtab_section->sh_offset + bin->shstrtab_section->sh_size > bin->size) { R_FREE (bin->shstrtab); return false; } if (r_buf_read_at (bin->b, bin->shstrtab_section->sh_offset, (ut8*)bin->shstrtab, bin->shstrtab_section->sh_size + 1) < 1) { bprintf ("Warning: read (shstrtab) at 0x%"PFMT64x"\n", (ut64) bin->shstrtab_section->sh_offset); R_FREE (bin->shstrtab); return false; } bin->shstrtab[bin->shstrtab_section->sh_size] = '\0'; sdb_num_set (bin->kv, "elf_shstrtab.offset", bin->shstrtab_section->sh_offset, 0); sdb_num_set (bin->kv, "elf_shstrtab.size", bin->shstrtab_section->sh_size, 0); return true; } static int init_dynamic_section(struct Elf_(r_bin_elf_obj_t) *bin) { Elf_(Dyn) *dyn = NULL; Elf_(Dyn) d = {0}; Elf_(Addr) strtabaddr = 0; ut64 offset = 0; char *strtab = NULL; size_t relentry = 0, strsize = 0; int entries; int i, j, len, r; ut8 sdyn[sizeof (Elf_(Dyn))] = {0}; ut32 dyn_size = 0; if (!bin || !bin->phdr || !bin->ehdr.e_phnum) { return false; } for (i = 0; i < bin->ehdr.e_phnum ; i++) { if (bin->phdr[i].p_type == PT_DYNAMIC) { dyn_size = bin->phdr[i].p_filesz; break; } } if (i == bin->ehdr.e_phnum) { return false; } if (bin->phdr[i].p_filesz > bin->size) { return false; } if (bin->phdr[i].p_offset > bin->size) { return false; } if (bin->phdr[i].p_offset + sizeof(Elf_(Dyn)) > bin->size) { return false; } for (entries = 0; entries < (dyn_size / sizeof (Elf_(Dyn))); entries++) { j = 0; len = r_buf_read_at (bin->b, bin->phdr[i].p_offset + entries * sizeof (Elf_(Dyn)), sdyn, sizeof (Elf_(Dyn))); if (len < 1) { goto beach; } #if R_BIN_ELF64 d.d_tag = READ64 (sdyn, j) #else d.d_tag = READ32 (sdyn, j) #endif if (d.d_tag == DT_NULL) { break; } } if (entries < 1) { return false; } dyn = (Elf_(Dyn)*)calloc (entries, sizeof (Elf_(Dyn))); if (!dyn) { return false; } if (!UT32_MUL (&dyn_size, entries, sizeof (Elf_(Dyn)))) { goto beach; } if (!dyn_size) { goto beach; } offset = Elf_(r_bin_elf_v2p) (bin, bin->phdr[i].p_vaddr); if (offset > bin->size || offset + dyn_size > bin->size) { goto beach; } for (i = 0; i < entries; i++) { j = 0; r_buf_read_at (bin->b, offset + i * sizeof (Elf_(Dyn)), sdyn, sizeof (Elf_(Dyn))); if (len < 1) { bprintf("Warning: read (dyn)\n"); } #if R_BIN_ELF64 dyn[i].d_tag = READ64 (sdyn, j) dyn[i].d_un.d_ptr = READ64 (sdyn, j) #else dyn[i].d_tag = READ32 (sdyn, j) dyn[i].d_un.d_ptr = READ32 (sdyn, j) #endif switch (dyn[i].d_tag) { case DT_STRTAB: strtabaddr = Elf_(r_bin_elf_v2p) (bin, dyn[i].d_un.d_ptr); break; case DT_STRSZ: strsize = dyn[i].d_un.d_val; break; case DT_PLTREL: bin->is_rela = dyn[i].d_un.d_val; break; case DT_RELAENT: relentry = dyn[i].d_un.d_val; break; default: if ((dyn[i].d_tag >= DT_VERSYM) && (dyn[i].d_tag <= DT_VERNEEDNUM)) { bin->version_info[DT_VERSIONTAGIDX (dyn[i].d_tag)] = dyn[i].d_un.d_val; } break; } } if (!bin->is_rela) { bin->is_rela = sizeof (Elf_(Rela)) == relentry? DT_RELA : DT_REL; } if (!strtabaddr || strtabaddr > bin->size || strsize > ST32_MAX || !strsize || strsize > bin->size) { if (!strtabaddr) { bprintf ("Warning: section.shstrtab not found or invalid\n"); } goto beach; } strtab = (char *)calloc (1, strsize + 1); if (!strtab) { goto beach; } if (strtabaddr + strsize > bin->size) { free (strtab); goto beach; } r = r_buf_read_at (bin->b, strtabaddr, (ut8 *)strtab, strsize); if (r < 1) { free (strtab); goto beach; } bin->dyn_buf = dyn; bin->dyn_entries = entries; bin->strtab = strtab; bin->strtab_size = strsize; r = Elf_(r_bin_elf_has_relro)(bin); switch (r) { case R_ELF_FULL_RELRO: sdb_set (bin->kv, "elf.relro", "full", 0); break; case R_ELF_PART_RELRO: sdb_set (bin->kv, "elf.relro", "partial", 0); break; default: sdb_set (bin->kv, "elf.relro", "no", 0); break; } sdb_num_set (bin->kv, "elf_strtab.offset", strtabaddr, 0); sdb_num_set (bin->kv, "elf_strtab.size", strsize, 0); return true; beach: free (dyn); return false; } static RBinElfSection* get_section_by_name(ELFOBJ *bin, const char *section_name) { int i; if (!bin->g_sections) { return NULL; } for (i = 0; !bin->g_sections[i].last; i++) { if (!strncmp (bin->g_sections[i].name, section_name, ELF_STRING_LENGTH-1)) { return &bin->g_sections[i]; } } return NULL; } static char *get_ver_flags(ut32 flags) { static char buff[32]; buff[0] = 0; if (!flags) { return "none"; } if (flags & VER_FLG_BASE) { strcpy (buff, "BASE "); } if (flags & VER_FLG_WEAK) { if (flags & VER_FLG_BASE) { strcat (buff, "| "); } strcat (buff, "WEAK "); } if (flags & ~(VER_FLG_BASE | VER_FLG_WEAK)) { strcat (buff, "| <unknown>"); } return buff; } static Sdb *store_versioninfo_gnu_versym(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { int i; const ut64 num_entries = sz / sizeof (Elf_(Versym)); const char *section_name = ""; const char *link_section_name = ""; Elf_(Shdr) *link_shdr = NULL; Sdb *sdb = sdb_new0(); if (!sdb) { return NULL; } if (!bin->version_info[DT_VERSIONTAGIDX (DT_VERSYM)]) { sdb_free (sdb); return NULL; } if (shdr->sh_link > bin->ehdr.e_shnum) { sdb_free (sdb); return NULL; } link_shdr = &bin->shdr[shdr->sh_link]; ut8 *edata = (ut8*) calloc (R_MAX (1, num_entries), sizeof (ut16)); if (!edata) { sdb_free (sdb); return NULL; } ut16 *data = (ut16*) calloc (R_MAX (1, num_entries), sizeof (ut16)); if (!data) { free (edata); sdb_free (sdb); return NULL; } ut64 off = Elf_(r_bin_elf_v2p) (bin, bin->version_info[DT_VERSIONTAGIDX (DT_VERSYM)]); if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } r_buf_read_at (bin->b, off, edata, sizeof (ut16) * num_entries); sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "num_entries", num_entries, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); for (i = num_entries; i--;) { data[i] = r_read_ble16 (&edata[i * sizeof (ut16)], bin->endian); } R_FREE (edata); for (i = 0; i < num_entries; i += 4) { int j; int check_def; char key[32] = {0}; Sdb *sdb_entry = sdb_new0 (); snprintf (key, sizeof (key), "entry%d", i / 4); sdb_ns_set (sdb, key, sdb_entry); sdb_num_set (sdb_entry, "idx", i, 0); for (j = 0; (j < 4) && (i + j) < num_entries; ++j) { int k; char *tmp_val = NULL; snprintf (key, sizeof (key), "value%d", j); switch (data[i + j]) { case 0: sdb_set (sdb_entry, key, "0 (*local*)", 0); break; case 1: sdb_set (sdb_entry, key, "1 (*global*)", 0); break; default: tmp_val = sdb_fmt (0, "%x ", data[i+j] & 0x7FFF); check_def = true; if (bin->version_info[DT_VERSIONTAGIDX (DT_VERNEED)]) { Elf_(Verneed) vn; ut8 svn[sizeof (Elf_(Verneed))] = {0}; ut64 offset = Elf_(r_bin_elf_v2p) (bin, bin->version_info[DT_VERSIONTAGIDX (DT_VERNEED)]); do { Elf_(Vernaux) vna; ut8 svna[sizeof (Elf_(Vernaux))] = {0}; ut64 a_off; if (offset > bin->size || offset + sizeof (vn) > bin->size) { goto beach; } if (r_buf_read_at (bin->b, offset, svn, sizeof (svn)) < 0) { bprintf ("Warning: Cannot read Verneed for Versym\n"); goto beach; } k = 0; vn.vn_version = READ16 (svn, k) vn.vn_cnt = READ16 (svn, k) vn.vn_file = READ32 (svn, k) vn.vn_aux = READ32 (svn, k) vn.vn_next = READ32 (svn, k) a_off = offset + vn.vn_aux; do { if (a_off > bin->size || a_off + sizeof (vna) > bin->size) { goto beach; } if (r_buf_read_at (bin->b, a_off, svna, sizeof (svna)) < 0) { bprintf ("Warning: Cannot read Vernaux for Versym\n"); goto beach; } k = 0; vna.vna_hash = READ32 (svna, k) vna.vna_flags = READ16 (svna, k) vna.vna_other = READ16 (svna, k) vna.vna_name = READ32 (svna, k) vna.vna_next = READ32 (svna, k) a_off += vna.vna_next; } while (vna.vna_other != data[i + j] && vna.vna_next != 0); if (vna.vna_other == data[i + j]) { if (vna.vna_name > bin->strtab_size) { goto beach; } sdb_set (sdb_entry, key, sdb_fmt (0, "%s(%s)", tmp_val, bin->strtab + vna.vna_name), 0); check_def = false; break; } offset += vn.vn_next; } while (vn.vn_next); } ut64 vinfoaddr = bin->version_info[DT_VERSIONTAGIDX (DT_VERDEF)]; if (check_def && data[i + j] != 0x8001 && vinfoaddr) { Elf_(Verdef) vd; ut8 svd[sizeof (Elf_(Verdef))] = {0}; ut64 offset = Elf_(r_bin_elf_v2p) (bin, vinfoaddr); if (offset > bin->size || offset + sizeof (vd) > bin->size) { goto beach; } do { if (r_buf_read_at (bin->b, offset, svd, sizeof (svd)) < 0) { bprintf ("Warning: Cannot read Verdef for Versym\n"); goto beach; } k = 0; vd.vd_version = READ16 (svd, k) vd.vd_flags = READ16 (svd, k) vd.vd_ndx = READ16 (svd, k) vd.vd_cnt = READ16 (svd, k) vd.vd_hash = READ32 (svd, k) vd.vd_aux = READ32 (svd, k) vd.vd_next = READ32 (svd, k) offset += vd.vd_next; } while (vd.vd_ndx != (data[i + j] & 0x7FFF) && vd.vd_next != 0); if (vd.vd_ndx == (data[i + j] & 0x7FFF)) { Elf_(Verdaux) vda; ut8 svda[sizeof (Elf_(Verdaux))] = {0}; ut64 off_vda = offset - vd.vd_next + vd.vd_aux; if (off_vda > bin->size || off_vda + sizeof (vda) > bin->size) { goto beach; } if (r_buf_read_at (bin->b, off_vda, svda, sizeof (svda)) < 0) { bprintf ("Warning: Cannot read Verdaux for Versym\n"); goto beach; } k = 0; vda.vda_name = READ32 (svda, k) vda.vda_next = READ32 (svda, k) if (vda.vda_name > bin->strtab_size) { goto beach; } const char *name = bin->strtab + vda.vda_name; sdb_set (sdb_entry, key, sdb_fmt (0,"%s(%s%-*s)", tmp_val, name, (int)(12 - strlen (name)),")") , 0); } } } } } beach: free (data); return sdb; } static Sdb *store_versioninfo_gnu_verdef(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { const char *section_name = ""; const char *link_section_name = ""; char *end = NULL; Elf_(Shdr) *link_shdr = NULL; ut8 dfs[sizeof (Elf_(Verdef))] = {0}; Sdb *sdb; int cnt, i; if (shdr->sh_link > bin->ehdr.e_shnum) { return false; } link_shdr = &bin->shdr[shdr->sh_link]; if (shdr->sh_size < 1) { return false; } Elf_(Verdef) *defs = calloc (shdr->sh_size, sizeof (char)); if (!defs) { return false; } if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (link_shdr && bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } if (!defs) { bprintf ("Warning: Cannot allocate memory (Check Elf_(Verdef))\n"); return NULL; } sdb = sdb_new0 (); end = (char *)defs + shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); for (cnt = 0, i = 0; i >= 0 && cnt < shdr->sh_info && ((char *)defs + i < end); ++cnt) { Sdb *sdb_verdef = sdb_new0 (); char *vstart = ((char*)defs) + i; char key[32] = {0}; Elf_(Verdef) *verdef = (Elf_(Verdef)*)vstart; Elf_(Verdaux) aux = {0}; int j = 0; int isum = 0; r_buf_read_at (bin->b, shdr->sh_offset + i, dfs, sizeof (Elf_(Verdef))); verdef->vd_version = READ16 (dfs, j) verdef->vd_flags = READ16 (dfs, j) verdef->vd_ndx = READ16 (dfs, j) verdef->vd_cnt = READ16 (dfs, j) verdef->vd_hash = READ32 (dfs, j) verdef->vd_aux = READ32 (dfs, j) verdef->vd_next = READ32 (dfs, j) int vdaux = verdef->vd_aux; if (vdaux < 1) { sdb_free (sdb_verdef); goto out_error; } vstart += vdaux; if (vstart > end || vstart + sizeof (Elf_(Verdaux)) > end) { sdb_free (sdb_verdef); goto out_error; } j = 0; aux.vda_name = READ32 (vstart, j) aux.vda_next = READ32 (vstart, j) isum = i + verdef->vd_aux; if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); goto out_error; } sdb_num_set (sdb_verdef, "idx", i, 0); sdb_num_set (sdb_verdef, "vd_version", verdef->vd_version, 0); sdb_num_set (sdb_verdef, "vd_ndx", verdef->vd_ndx, 0); sdb_num_set (sdb_verdef, "vd_cnt", verdef->vd_cnt, 0); sdb_set (sdb_verdef, "vda_name", &bin->dynstr[aux.vda_name], 0); sdb_set (sdb_verdef, "flags", get_ver_flags (verdef->vd_flags), 0); for (j = 1; j < verdef->vd_cnt; ++j) { int k; Sdb *sdb_parent = sdb_new0 (); isum += aux.vda_next; vstart += aux.vda_next; if (vstart > end || vstart + sizeof(Elf_(Verdaux)) > end) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } k = 0; aux.vda_name = READ32 (vstart, k) aux.vda_next = READ32 (vstart, k) if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } sdb_num_set (sdb_parent, "idx", isum, 0); sdb_num_set (sdb_parent, "parent", j, 0); sdb_set (sdb_parent, "vda_name", &bin->dynstr[aux.vda_name], 0); snprintf (key, sizeof (key), "parent%d", j - 1); sdb_ns_set (sdb_verdef, key, sdb_parent); } snprintf (key, sizeof (key), "verdef%d", cnt); sdb_ns_set (sdb, key, sdb_verdef); if (!verdef->vd_next) { sdb_free (sdb_verdef); goto out_error; } if ((st32)verdef->vd_next < 1) { eprintf ("Warning: Invalid vd_next in the ELF version\n"); break; } i += verdef->vd_next; } free (defs); return sdb; out_error: free (defs); sdb_free (sdb); return NULL; } static Sdb *store_versioninfo_gnu_verneed(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { ut8 *end, *need = NULL; const char *section_name = ""; Elf_(Shdr) *link_shdr = NULL; const char *link_section_name = ""; Sdb *sdb_vernaux = NULL; Sdb *sdb_version = NULL; Sdb *sdb = NULL; int i, cnt; if (!bin || !bin->dynstr) { return NULL; } if (shdr->sh_link > bin->ehdr.e_shnum) { return NULL; } if (shdr->sh_size < 1) { return NULL; } sdb = sdb_new0 (); if (!sdb) { return NULL; } link_shdr = &bin->shdr[shdr->sh_link]; if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } if (!(need = (ut8*) calloc (R_MAX (1, shdr->sh_size), sizeof (ut8)))) { bprintf ("Warning: Cannot allocate memory for Elf_(Verneed)\n"); goto beach; } end = need + shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "num_entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); if (shdr->sh_offset > bin->size || shdr->sh_offset + shdr->sh_size > bin->size) { goto beach; } if (shdr->sh_offset + shdr->sh_size < shdr->sh_size) { goto beach; } i = r_buf_read_at (bin->b, shdr->sh_offset, need, shdr->sh_size); if (i < 0) goto beach; //XXX we should use DT_VERNEEDNUM instead of sh_info //TODO https://sourceware.org/ml/binutils/2014-11/msg00353.html for (i = 0, cnt = 0; cnt < shdr->sh_info; ++cnt) { int j, isum; ut8 *vstart = need + i; Elf_(Verneed) vvn = {0}; if (vstart + sizeof (Elf_(Verneed)) > end) { goto beach; } Elf_(Verneed) *entry = &vvn; char key[32] = {0}; sdb_version = sdb_new0 (); if (!sdb_version) { goto beach; } j = 0; vvn.vn_version = READ16 (vstart, j) vvn.vn_cnt = READ16 (vstart, j) vvn.vn_file = READ32 (vstart, j) vvn.vn_aux = READ32 (vstart, j) vvn.vn_next = READ32 (vstart, j) sdb_num_set (sdb_version, "vn_version", entry->vn_version, 0); sdb_num_set (sdb_version, "idx", i, 0); if (entry->vn_file > bin->dynstr_size) { goto beach; } { char *s = r_str_ndup (&bin->dynstr[entry->vn_file], 16); sdb_set (sdb_version, "file_name", s, 0); free (s); } sdb_num_set (sdb_version, "cnt", entry->vn_cnt, 0); st32 vnaux = entry->vn_aux; if (vnaux < 1) { goto beach; } vstart += vnaux; for (j = 0, isum = i + entry->vn_aux; j < entry->vn_cnt && vstart + sizeof (Elf_(Vernaux)) <= end; ++j) { int k; Elf_(Vernaux) * aux = NULL; Elf_(Vernaux) vaux = {0}; sdb_vernaux = sdb_new0 (); if (!sdb_vernaux) { goto beach; } aux = (Elf_(Vernaux)*)&vaux; k = 0; vaux.vna_hash = READ32 (vstart, k) vaux.vna_flags = READ16 (vstart, k) vaux.vna_other = READ16 (vstart, k) vaux.vna_name = READ32 (vstart, k) vaux.vna_next = READ32 (vstart, k) if (aux->vna_name > bin->dynstr_size) { goto beach; } sdb_num_set (sdb_vernaux, "idx", isum, 0); if (aux->vna_name > 0 && aux->vna_name + 8 < bin->dynstr_size) { char name [16]; strncpy (name, &bin->dynstr[aux->vna_name], sizeof (name)-1); name[sizeof(name)-1] = 0; sdb_set (sdb_vernaux, "name", name, 0); } sdb_set (sdb_vernaux, "flags", get_ver_flags (aux->vna_flags), 0); sdb_num_set (sdb_vernaux, "version", aux->vna_other, 0); isum += aux->vna_next; vstart += aux->vna_next; snprintf (key, sizeof (key), "vernaux%d", j); sdb_ns_set (sdb_version, key, sdb_vernaux); } if ((int)entry->vn_next < 0) { bprintf ("Invalid vn_next\n"); break; } i += entry->vn_next; snprintf (key, sizeof (key), "version%d", cnt ); sdb_ns_set (sdb, key, sdb_version); //if entry->vn_next is 0 it iterate infinitely if (!entry->vn_next) { break; } } free (need); return sdb; beach: free (need); sdb_free (sdb_vernaux); sdb_free (sdb_version); sdb_free (sdb); return NULL; } static Sdb *store_versioninfo(ELFOBJ *bin) { Sdb *sdb_versioninfo = NULL; int num_verdef = 0; int num_verneed = 0; int num_versym = 0; int i; if (!bin || !bin->shdr) { return NULL; } if (!(sdb_versioninfo = sdb_new0 ())) { return NULL; } for (i = 0; i < bin->ehdr.e_shnum; i++) { Sdb *sdb = NULL; char key[32] = {0}; int size = bin->shdr[i].sh_size; if (size - (i*sizeof(Elf_(Shdr)) > bin->size)) { size = bin->size - (i*sizeof(Elf_(Shdr))); } int left = size - (i * sizeof (Elf_(Shdr))); left = R_MIN (left, bin->shdr[i].sh_size); if (left < 0) { break; } switch (bin->shdr[i].sh_type) { case SHT_GNU_verdef: sdb = store_versioninfo_gnu_verdef (bin, &bin->shdr[i], left); snprintf (key, sizeof (key), "verdef%d", num_verdef++); sdb_ns_set (sdb_versioninfo, key, sdb); break; case SHT_GNU_verneed: sdb = store_versioninfo_gnu_verneed (bin, &bin->shdr[i], left); snprintf (key, sizeof (key), "verneed%d", num_verneed++); sdb_ns_set (sdb_versioninfo, key, sdb); break; case SHT_GNU_versym: sdb = store_versioninfo_gnu_versym (bin, &bin->shdr[i], left); snprintf (key, sizeof (key), "versym%d", num_versym++); sdb_ns_set (sdb_versioninfo, key, sdb); break; } } return sdb_versioninfo; } static bool init_dynstr(ELFOBJ *bin) { int i, r; const char *section_name = NULL; if (!bin || !bin->shdr) { return false; } if (!bin->shstrtab) { return false; } for (i = 0; i < bin->ehdr.e_shnum; ++i) { if (bin->shdr[i].sh_name > bin->shstrtab_size) { return false; } section_name = &bin->shstrtab[bin->shdr[i].sh_name]; if (bin->shdr[i].sh_type == SHT_STRTAB && !strcmp (section_name, ".dynstr")) { if (!(bin->dynstr = (char*) calloc (bin->shdr[i].sh_size + 1, sizeof (char)))) { bprintf("Warning: Cannot allocate memory for dynamic strings\n"); return false; } if (bin->shdr[i].sh_offset > bin->size) { return false; } if (bin->shdr[i].sh_offset + bin->shdr[i].sh_size > bin->size) { return false; } if (bin->shdr[i].sh_offset + bin->shdr[i].sh_size < bin->shdr[i].sh_size) { return false; } r = r_buf_read_at (bin->b, bin->shdr[i].sh_offset, (ut8*)bin->dynstr, bin->shdr[i].sh_size); if (r < 1) { R_FREE (bin->dynstr); bin->dynstr_size = 0; return false; } bin->dynstr_size = bin->shdr[i].sh_size; return true; } } return false; } static int elf_init(ELFOBJ *bin) { bin->phdr = NULL; bin->shdr = NULL; bin->strtab = NULL; bin->shstrtab = NULL; bin->strtab_size = 0; bin->strtab_section = NULL; bin->dyn_buf = NULL; bin->dynstr = NULL; ZERO_FILL (bin->version_info); bin->g_sections = NULL; bin->g_symbols = NULL; bin->g_imports = NULL; /* bin is not an ELF */ if (!init_ehdr (bin)) { return false; } if (!init_phdr (bin)) { bprintf ("Warning: Cannot initialize program headers\n"); } if (!init_shdr (bin)) { bprintf ("Warning: Cannot initialize section headers\n"); } if (!init_strtab (bin)) { bprintf ("Warning: Cannot initialize strings table\n"); } if (!init_dynstr (bin)) { bprintf ("Warning: Cannot initialize dynamic strings\n"); } bin->baddr = Elf_(r_bin_elf_get_baddr) (bin); if (!init_dynamic_section (bin) && !Elf_(r_bin_elf_get_static)(bin)) bprintf ("Warning: Cannot initialize dynamic section\n"); bin->imports_by_ord_size = 0; bin->imports_by_ord = NULL; bin->symbols_by_ord_size = 0; bin->symbols_by_ord = NULL; bin->g_sections = Elf_(r_bin_elf_get_sections) (bin); bin->boffset = Elf_(r_bin_elf_get_boffset) (bin); sdb_ns_set (bin->kv, "versioninfo", store_versioninfo (bin)); return true; } ut64 Elf_(r_bin_elf_get_section_offset)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); if (!section) return UT64_MAX; return section->offset; } ut64 Elf_(r_bin_elf_get_section_addr)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); return section? section->rva: UT64_MAX; } ut64 Elf_(r_bin_elf_get_section_addr_end)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); return section? section->rva + section->size: UT64_MAX; } #define REL (is_rela ? (void*)rela : (void*)rel) #define REL_BUF is_rela ? (ut8*)(&rela[k]) : (ut8*)(&rel[k]) #define REL_OFFSET is_rela ? rela[k].r_offset : rel[k].r_offset #define REL_TYPE is_rela ? rela[k].r_info : rel[k].r_info static ut64 get_import_addr(ELFOBJ *bin, int sym) { Elf_(Rel) *rel = NULL; Elf_(Rela) *rela = NULL; ut8 rl[sizeof (Elf_(Rel))] = {0}; ut8 rla[sizeof (Elf_(Rela))] = {0}; RBinElfSection *rel_sec = NULL; Elf_(Addr) plt_sym_addr = -1; ut64 got_addr, got_offset; ut64 plt_addr; int j, k, tsize, len, nrel; bool is_rela = false; const char *rel_sect[] = { ".rel.plt", ".rela.plt", ".rel.dyn", ".rela.dyn", NULL }; const char *rela_sect[] = { ".rela.plt", ".rel.plt", ".rela.dyn", ".rel.dyn", NULL }; if ((!bin->shdr || !bin->strtab) && !bin->phdr) { return -1; } if ((got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got")) == -1 && (got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got.plt")) == -1) { return -1; } if ((got_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".got")) == -1 && (got_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".got.plt")) == -1) { return -1; } if (bin->is_rela == DT_REL) { j = 0; while (!rel_sec && rel_sect[j]) { rel_sec = get_section_by_name (bin, rel_sect[j++]); } tsize = sizeof (Elf_(Rel)); } else if (bin->is_rela == DT_RELA) { j = 0; while (!rel_sec && rela_sect[j]) { rel_sec = get_section_by_name (bin, rela_sect[j++]); } is_rela = true; tsize = sizeof (Elf_(Rela)); } if (!rel_sec) { return -1; } if (rel_sec->size < 1) { return -1; } nrel = (ut32)((int)rel_sec->size / (int)tsize); if (nrel < 1) { return -1; } if (is_rela) { rela = calloc (nrel, tsize); if (!rela) { return -1; } } else { rel = calloc (nrel, tsize); if (!rel) { return -1; } } for (j = k = 0; j < rel_sec->size && k < nrel; j += tsize, k++) { int l = 0; if (rel_sec->offset + j > bin->size) { goto out; } if (rel_sec->offset + j + tsize > bin->size) { goto out; } len = r_buf_read_at ( bin->b, rel_sec->offset + j, is_rela ? rla : rl, is_rela ? sizeof (Elf_ (Rela)) : sizeof (Elf_ (Rel))); if (len < 1) { goto out; } #if R_BIN_ELF64 if (is_rela) { rela[k].r_offset = READ64 (rla, l) rela[k].r_info = READ64 (rla, l) rela[k].r_addend = READ64 (rla, l) } else { rel[k].r_offset = READ64 (rl, l) rel[k].r_info = READ64 (rl, l) } #else if (is_rela) { rela[k].r_offset = READ32 (rla, l) rela[k].r_info = READ32 (rla, l) rela[k].r_addend = READ32 (rla, l) } else { rel[k].r_offset = READ32 (rl, l) rel[k].r_info = READ32 (rl, l) } #endif int reloc_type = ELF_R_TYPE (REL_TYPE); int reloc_sym = ELF_R_SYM (REL_TYPE); if (reloc_sym == sym) { int of = REL_OFFSET; of = of - got_addr + got_offset; switch (bin->ehdr.e_machine) { case EM_PPC: case EM_PPC64: { RBinElfSection *s = get_section_by_name (bin, ".plt"); if (s) { ut8 buf[4]; ut64 base; len = r_buf_read_at (bin->b, s->offset, buf, sizeof (buf)); if (len < 4) { goto out; } base = r_read_be32 (buf); base -= (nrel * 16); base += (k * 16); plt_addr = base; free (REL); return plt_addr; } } break; case EM_SPARC: case EM_SPARCV9: case EM_SPARC32PLUS: plt_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".plt"); if (plt_addr == -1) { free (rela); return -1; } if (reloc_type == R_386_PC16) { plt_addr += k * 12 + 20; // thumb symbol if (plt_addr & 1) { plt_addr--; } free (REL); return plt_addr; } else { bprintf ("Unknown sparc reloc type %d\n", reloc_type); } /* SPARC */ break; case EM_ARM: case EM_AARCH64: plt_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".plt"); if (plt_addr == -1) { free (rela); return UT32_MAX; } switch (reloc_type) { case R_386_8: { plt_addr += k * 12 + 20; // thumb symbol if (plt_addr & 1) { plt_addr--; } free (REL); return plt_addr; } break; case 1026: // arm64 aarch64 plt_sym_addr = plt_addr + k * 16 + 32; goto done; default: bprintf ("Unsupported relocation type for imports %d\n", reloc_type); break; } break; case EM_386: case EM_X86_64: switch (reloc_type) { case 1: // unknown relocs found in voidlinux for x86-64 // break; case R_386_GLOB_DAT: case R_386_JMP_SLOT: { ut8 buf[8]; if (of + sizeof(Elf_(Addr)) < bin->size) { // ONLY FOR X86 if (of > bin->size || of + sizeof (Elf_(Addr)) > bin->size) { goto out; } len = r_buf_read_at (bin->b, of, buf, sizeof (Elf_(Addr))); if (len < -1) { goto out; } plt_sym_addr = sizeof (Elf_(Addr)) == 4 ? r_read_le32 (buf) : r_read_le64 (buf); if (!plt_sym_addr) { //XXX HACK ALERT!!!! full relro?? try to fix it //will there always be .plt.got, what would happen if is .got.plt? RBinElfSection *s = get_section_by_name (bin, ".plt.got"); if (Elf_(r_bin_elf_has_relro)(bin) < R_ELF_PART_RELRO || !s) { goto done; } plt_addr = s->offset; of = of + got_addr - got_offset; while (plt_addr + 2 + 4 < s->offset + s->size) { /*we try to locate the plt entry that correspond with the relocation since got does not point back to .plt. In this case it has the following form ff253a152000 JMP QWORD [RIP + 0x20153A] 6690 NOP ---- ff25ec9f0408 JMP DWORD [reloc.puts_236] plt_addr + 2 to remove jmp opcode and get the imm reading 4 and if RIP (plt_addr + 6) + imm == rel->offset return plt_addr, that will be our sym addr perhaps this hack doesn't work on 32 bits */ len = r_buf_read_at (bin->b, plt_addr + 2, buf, 4); if (len < -1) { goto out; } plt_sym_addr = sizeof (Elf_(Addr)) == 4 ? r_read_le32 (buf) : r_read_le64 (buf); //relative address if ((plt_addr + 6 + Elf_(r_bin_elf_v2p) (bin, plt_sym_addr)) == of) { plt_sym_addr = plt_addr; goto done; } else if (plt_sym_addr == of) { plt_sym_addr = plt_addr; goto done; } plt_addr += 8; } } else { plt_sym_addr -= 6; } goto done; } break; } default: bprintf ("Unsupported relocation type for imports %d\n", reloc_type); free (REL); return of; break; } break; case 8: // MIPS32 BIG ENDIAN relocs { RBinElfSection *s = get_section_by_name (bin, ".rela.plt"); if (s) { ut8 buf[1024]; const ut8 *base; plt_addr = s->rva + s->size; len = r_buf_read_at (bin->b, s->offset + s->size, buf, sizeof (buf)); if (len != sizeof (buf)) { // oops } base = r_mem_mem_aligned (buf, sizeof (buf), (const ut8*)"\x3c\x0f\x00", 3, 4); if (base) { plt_addr += (int)(size_t)(base - buf); } else { plt_addr += 108 + 8; // HARDCODED HACK } plt_addr += k * 16; free (REL); return plt_addr; } } break; default: bprintf ("Unsupported relocs type %d for arch %d\n", reloc_type, bin->ehdr.e_machine); break; } } } done: free (REL); return plt_sym_addr; out: free (REL); return -1; } int Elf_(r_bin_elf_has_nx)(ELFOBJ *bin) { int i; if (bin && bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_GNU_STACK) { return (!(bin->phdr[i].p_flags & 1))? 1: 0; } } } return 0; } int Elf_(r_bin_elf_has_relro)(ELFOBJ *bin) { int i; bool haveBindNow = false; bool haveGnuRelro = false; if (bin && bin->dyn_buf) { for (i = 0; i < bin->dyn_entries; i++) { switch (bin->dyn_buf[i].d_tag) { case DT_BIND_NOW: haveBindNow = true; break; case DT_FLAGS: for (i++; i < bin->dyn_entries ; i++) { ut32 dTag = bin->dyn_buf[i].d_tag; if (!dTag) { break; } switch (dTag) { case DT_FLAGS_1: if (bin->dyn_buf[i].d_un.d_val & DF_1_NOW) { haveBindNow = true; break; } } } break; } } } if (bin && bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_GNU_RELRO) { haveGnuRelro = true; break; } } } if (haveGnuRelro) { if (haveBindNow) { return R_ELF_FULL_RELRO; } return R_ELF_PART_RELRO; } return R_ELF_NO_RELRO; } /* To compute the base address, one determines the memory address associated with the lowest p_vaddr value for a PT_LOAD segment. One then obtains the base address by truncating the memory address to the nearest multiple of the maximum page size */ ut64 Elf_(r_bin_elf_get_baddr)(ELFOBJ *bin) { int i; ut64 tmp, base = UT64_MAX; if (!bin) { return 0; } if (bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_LOAD) { tmp = (ut64)bin->phdr[i].p_vaddr & ELF_PAGE_MASK; tmp = tmp - (tmp % (1 << ELF_PAGE_SIZE)); if (tmp < base) { base = tmp; } } } } if (base == UT64_MAX && bin->ehdr.e_type == ET_REL) { //we return our own base address for ET_REL type //we act as a loader for ELF return 0x08000000; } return base == UT64_MAX ? 0 : base; } ut64 Elf_(r_bin_elf_get_boffset)(ELFOBJ *bin) { int i; ut64 tmp, base = UT64_MAX; if (bin && bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_LOAD) { tmp = (ut64)bin->phdr[i].p_offset & ELF_PAGE_MASK; tmp = tmp - (tmp % (1 << ELF_PAGE_SIZE)); if (tmp < base) { base = tmp; } } } } return base == UT64_MAX ? 0 : base; } ut64 Elf_(r_bin_elf_get_init_offset)(ELFOBJ *bin) { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); ut8 buf[512]; if (!bin) { return 0LL; } if (r_buf_read_at (bin->b, entry + 16, buf, sizeof (buf)) < 1) { bprintf ("Warning: read (init_offset)\n"); return 0; } if (buf[0] == 0x68) { // push // x86 only ut64 addr; memmove (buf, buf+1, 4); addr = (ut64)r_read_le32 (buf); return Elf_(r_bin_elf_v2p) (bin, addr); } return 0; } ut64 Elf_(r_bin_elf_get_fini_offset)(ELFOBJ *bin) { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); ut8 buf[512]; if (!bin) { return 0LL; } if (r_buf_read_at (bin->b, entry+11, buf, sizeof (buf)) == -1) { bprintf ("Warning: read (get_fini)\n"); return 0; } if (*buf == 0x68) { // push // x86/32 only ut64 addr; memmove (buf, buf+1, 4); addr = (ut64)r_read_le32 (buf); return Elf_(r_bin_elf_v2p) (bin, addr); } return 0; } ut64 Elf_(r_bin_elf_get_entry_offset)(ELFOBJ *bin) { ut64 entry; if (!bin) { return 0LL; } entry = bin->ehdr.e_entry; if (!entry) { entry = Elf_(r_bin_elf_get_section_offset)(bin, ".init.text"); if (entry != UT64_MAX) { return entry; } entry = Elf_(r_bin_elf_get_section_offset)(bin, ".text"); if (entry != UT64_MAX) { return entry; } entry = Elf_(r_bin_elf_get_section_offset)(bin, ".init"); if (entry != UT64_MAX) { return entry; } if (entry == UT64_MAX) { return 0; } } return Elf_(r_bin_elf_v2p) (bin, entry); } static ut64 getmainsymbol(ELFOBJ *bin) { struct r_bin_elf_symbol_t *symbol; int i; if (!(symbol = Elf_(r_bin_elf_get_symbols) (bin))) { return UT64_MAX; } for (i = 0; !symbol[i].last; i++) { if (!strcmp (symbol[i].name, "main")) { ut64 paddr = symbol[i].offset; return Elf_(r_bin_elf_p2v) (bin, paddr); } } return UT64_MAX; } ut64 Elf_(r_bin_elf_get_main_offset)(ELFOBJ *bin) { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); ut8 buf[512]; if (!bin) { return 0LL; } if (entry > bin->size || (entry + sizeof (buf)) > bin->size) { return 0; } if (r_buf_read_at (bin->b, entry, buf, sizeof (buf)) < 1) { bprintf ("Warning: read (main)\n"); return 0; } // ARM64 if (buf[0x18+3] == 0x58 && buf[0x2f] == 0x00) { ut32 entry_vaddr = Elf_(r_bin_elf_p2v) (bin, entry); ut32 main_addr = r_read_le32 (&buf[0x30]); if ((main_addr >> 16) == (entry_vaddr >> 16)) { return Elf_(r_bin_elf_v2p) (bin, main_addr); } } // TODO: Use arch to identify arch before memcmp's // ARM ut64 text = Elf_(r_bin_elf_get_section_offset)(bin, ".text"); ut64 text_end = text + bin->size; // ARM-Thumb-Linux if (entry & 1 && !memcmp (buf, "\xf0\x00\x0b\x4f\xf0\x00", 6)) { ut32 * ptr = (ut32*)(buf+40-1); if (*ptr &1) { return Elf_(r_bin_elf_v2p) (bin, *ptr -1); } } if (!memcmp (buf, "\x00\xb0\xa0\xe3\x00\xe0\xa0\xe3", 8)) { // endian stuff here ut32 *addr = (ut32*)(buf+0x34); /* 0x00012000 00b0a0e3 mov fp, 0 0x00012004 00e0a0e3 mov lr, 0 */ if (*addr > text && *addr < (text_end)) { return Elf_(r_bin_elf_v2p) (bin, *addr); } } // MIPS /* get .got, calculate offset of main symbol */ if (!memcmp (buf, "\x21\x00\xe0\x03\x01\x00\x11\x04", 8)) { /* assuming the startup code looks like got = gp-0x7ff0 got[index__libc_start_main] ( got[index_main] ); looking for the instruction generating the first argument to find main lw a0, offset(gp) */ ut64 got_offset; if ((got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got")) != -1 || (got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got.plt")) != -1) { const ut64 gp = got_offset + 0x7ff0; unsigned i; for (i = 0; i < sizeof(buf) / sizeof(buf[0]); i += 4) { const ut32 instr = r_read_le32 (&buf[i]); if ((instr & 0xffff0000) == 0x8f840000) { // lw a0, offset(gp) const short delta = instr & 0x0000ffff; r_buf_read_at (bin->b, /* got_entry_offset = */ gp + delta, buf, 4); return Elf_(r_bin_elf_v2p) (bin, r_read_le32 (&buf[0])); } } } return 0; } // ARM if (!memcmp (buf, "\x24\xc0\x9f\xe5\x00\xb0\xa0\xe3", 8)) { ut64 addr = r_read_le32 (&buf[48]); return Elf_(r_bin_elf_v2p) (bin, addr); } // X86-CGC if (buf[0] == 0xe8 && !memcmp (buf + 5, "\x50\xe8\x00\x00\x00\x00\xb8\x01\x00\x00\x00\x53", 12)) { size_t SIZEOF_CALL = 5; ut64 rel_addr = (ut64)((int)(buf[1] + (buf[2] << 8) + (buf[3] << 16) + (buf[4] << 24))); ut64 addr = Elf_(r_bin_elf_p2v)(bin, entry + SIZEOF_CALL); addr += rel_addr; return Elf_(r_bin_elf_v2p) (bin, addr); } // X86-PIE if (buf[0x00] == 0x48 && buf[0x1e] == 0x8d && buf[0x11] == 0xe8) { ut32 *pmain = (ut32*)(buf + 0x30); ut64 vmain = Elf_(r_bin_elf_p2v) (bin, (ut64)*pmain); ut64 ventry = Elf_(r_bin_elf_p2v) (bin, entry); if (vmain >> 16 == ventry >> 16) { return (ut64)vmain; } } // X86-PIE if (buf[0x1d] == 0x48 && buf[0x1e] == 0x8b) { if (!memcmp (buf, "\x31\xed\x49\x89", 4)) {// linux ut64 maddr, baddr; ut8 n32s[sizeof (ut32)] = {0}; maddr = entry + 0x24 + r_read_le32 (buf + 0x20); if (r_buf_read_at (bin->b, maddr, n32s, sizeof (ut32)) == -1) { bprintf ("Warning: read (maddr) 2\n"); return 0; } maddr = (ut64)r_read_le32 (&n32s[0]); baddr = (bin->ehdr.e_entry >> 16) << 16; if (bin->phdr) { baddr = Elf_(r_bin_elf_get_baddr) (bin); } maddr += baddr; return maddr; } } // X86-NONPIE #if R_BIN_ELF64 if (!memcmp (buf, "\x49\x89\xd9", 3) && buf[156] == 0xe8) { // openbsd return r_read_le32 (&buf[157]) + entry + 156 + 5; } if (!memcmp (buf+29, "\x48\xc7\xc7", 3)) { // linux ut64 addr = (ut64)r_read_le32 (&buf[29 + 3]); return Elf_(r_bin_elf_v2p) (bin, addr); } #else if (buf[23] == '\x68') { ut64 addr = (ut64)r_read_le32 (&buf[23 + 1]); return Elf_(r_bin_elf_v2p) (bin, addr); } #endif /* linux64 pie main -- probably buggy in some cases */ if (buf[29] == 0x48 && buf[30] == 0x8d) { // lea rdi, qword [rip-0x21c4] ut8 *p = buf + 32; st32 maindelta = (st32)r_read_le32 (p); ut64 vmain = (ut64)(entry + 29 + maindelta) + 7; ut64 ventry = Elf_(r_bin_elf_p2v) (bin, entry); if (vmain>>16 == ventry>>16) { return (ut64)vmain; } } /* find sym.main if possible */ { ut64 m = getmainsymbol (bin); if (m != UT64_MAX) return m; } return UT64_MAX; } int Elf_(r_bin_elf_get_stripped)(ELFOBJ *bin) { int i; if (!bin->shdr) { return false; } for (i = 0; i < bin->ehdr.e_shnum; i++) { if (bin->shdr[i].sh_type == SHT_SYMTAB) { return false; } } return true; } char *Elf_(r_bin_elf_intrp)(ELFOBJ *bin) { int i; if (!bin || !bin->phdr) { return NULL; } for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_INTERP) { char *str = NULL; ut64 addr = bin->phdr[i].p_offset; int sz = bin->phdr[i].p_memsz; sdb_num_set (bin->kv, "elf_header.intrp_addr", addr, 0); sdb_num_set (bin->kv, "elf_header.intrp_size", sz, 0); if (sz < 1) { return NULL; } str = malloc (sz + 1); if (!str) { return NULL; } if (r_buf_read_at (bin->b, addr, (ut8*)str, sz) < 1) { bprintf ("Warning: read (main)\n"); return 0; } str[sz] = 0; sdb_set (bin->kv, "elf_header.intrp", str, 0); return str; } } return NULL; } int Elf_(r_bin_elf_get_static)(ELFOBJ *bin) { int i; if (!bin->phdr) { return false; } for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_INTERP) { return false; } } return true; } char* Elf_(r_bin_elf_get_data_encoding)(ELFOBJ *bin) { switch (bin->ehdr.e_ident[EI_DATA]) { case ELFDATANONE: return strdup ("none"); case ELFDATA2LSB: return strdup ("2's complement, little endian"); case ELFDATA2MSB: return strdup ("2's complement, big endian"); default: return r_str_newf ("<unknown: %x>", bin->ehdr.e_ident[EI_DATA]); } } int Elf_(r_bin_elf_has_va)(ELFOBJ *bin) { return true; } char* Elf_(r_bin_elf_get_arch)(ELFOBJ *bin) { switch (bin->ehdr.e_machine) { case EM_ARC: case EM_ARC_A5: return strdup ("arc"); case EM_AVR: return strdup ("avr"); case EM_CRIS: return strdup ("cris"); case EM_68K: return strdup ("m68k"); case EM_MIPS: case EM_MIPS_RS3_LE: case EM_MIPS_X: return strdup ("mips"); case EM_MCST_ELBRUS: return strdup ("elbrus"); case EM_TRICORE: return strdup ("tricore"); case EM_ARM: case EM_AARCH64: return strdup ("arm"); case EM_HEXAGON: return strdup ("hexagon"); case EM_BLACKFIN: return strdup ("blackfin"); case EM_SPARC: case EM_SPARC32PLUS: case EM_SPARCV9: return strdup ("sparc"); case EM_PPC: case EM_PPC64: return strdup ("ppc"); case EM_PARISC: return strdup ("hppa"); case EM_PROPELLER: return strdup ("propeller"); case EM_MICROBLAZE: return strdup ("microblaze.gnu"); case EM_RISCV: return strdup ("riscv"); case EM_VAX: return strdup ("vax"); case EM_XTENSA: return strdup ("xtensa"); case EM_LANAI: return strdup ("lanai"); case EM_VIDEOCORE3: case EM_VIDEOCORE4: return strdup ("vc4"); case EM_SH: return strdup ("sh"); case EM_V850: return strdup ("v850"); case EM_IA_64: return strdup("ia64"); default: return strdup ("x86"); } } char* Elf_(r_bin_elf_get_machine_name)(ELFOBJ *bin) { switch (bin->ehdr.e_machine) { case EM_NONE: return strdup ("No machine"); case EM_M32: return strdup ("AT&T WE 32100"); case EM_SPARC: return strdup ("SUN SPARC"); case EM_386: return strdup ("Intel 80386"); case EM_68K: return strdup ("Motorola m68k family"); case EM_88K: return strdup ("Motorola m88k family"); case EM_860: return strdup ("Intel 80860"); case EM_MIPS: return strdup ("MIPS R3000"); case EM_S370: return strdup ("IBM System/370"); case EM_MIPS_RS3_LE: return strdup ("MIPS R3000 little-endian"); case EM_PARISC: return strdup ("HPPA"); case EM_VPP500: return strdup ("Fujitsu VPP500"); case EM_SPARC32PLUS: return strdup ("Sun's \"v8plus\""); case EM_960: return strdup ("Intel 80960"); case EM_PPC: return strdup ("PowerPC"); case EM_PPC64: return strdup ("PowerPC 64-bit"); case EM_S390: return strdup ("IBM S390"); case EM_V800: return strdup ("NEC V800 series"); case EM_FR20: return strdup ("Fujitsu FR20"); case EM_RH32: return strdup ("TRW RH-32"); case EM_RCE: return strdup ("Motorola RCE"); case EM_ARM: return strdup ("ARM"); case EM_BLACKFIN: return strdup ("Analog Devices Blackfin"); case EM_FAKE_ALPHA: return strdup ("Digital Alpha"); case EM_SH: return strdup ("Hitachi SH"); case EM_SPARCV9: return strdup ("SPARC v9 64-bit"); case EM_TRICORE: return strdup ("Siemens Tricore"); case EM_ARC: return strdup ("Argonaut RISC Core"); case EM_H8_300: return strdup ("Hitachi H8/300"); case EM_H8_300H: return strdup ("Hitachi H8/300H"); case EM_H8S: return strdup ("Hitachi H8S"); case EM_H8_500: return strdup ("Hitachi H8/500"); case EM_IA_64: return strdup ("Intel Merced"); case EM_MIPS_X: return strdup ("Stanford MIPS-X"); case EM_COLDFIRE: return strdup ("Motorola Coldfire"); case EM_68HC12: return strdup ("Motorola M68HC12"); case EM_MMA: return strdup ("Fujitsu MMA Multimedia Accelerator"); case EM_PCP: return strdup ("Siemens PCP"); case EM_NCPU: return strdup ("Sony nCPU embeeded RISC"); case EM_NDR1: return strdup ("Denso NDR1 microprocessor"); case EM_STARCORE: return strdup ("Motorola Start*Core processor"); case EM_ME16: return strdup ("Toyota ME16 processor"); case EM_ST100: return strdup ("STMicroelectronic ST100 processor"); case EM_TINYJ: return strdup ("Advanced Logic Corp. Tinyj emb.fam"); case EM_X86_64: return strdup ("AMD x86-64 architecture"); case EM_LANAI: return strdup ("32bit LANAI architecture"); case EM_PDSP: return strdup ("Sony DSP Processor"); case EM_FX66: return strdup ("Siemens FX66 microcontroller"); case EM_ST9PLUS: return strdup ("STMicroelectronics ST9+ 8/16 mc"); case EM_ST7: return strdup ("STmicroelectronics ST7 8 bit mc"); case EM_68HC16: return strdup ("Motorola MC68HC16 microcontroller"); case EM_68HC11: return strdup ("Motorola MC68HC11 microcontroller"); case EM_68HC08: return strdup ("Motorola MC68HC08 microcontroller"); case EM_68HC05: return strdup ("Motorola MC68HC05 microcontroller"); case EM_SVX: return strdup ("Silicon Graphics SVx"); case EM_ST19: return strdup ("STMicroelectronics ST19 8 bit mc"); case EM_VAX: return strdup ("Digital VAX"); case EM_CRIS: return strdup ("Axis Communications 32-bit embedded processor"); case EM_JAVELIN: return strdup ("Infineon Technologies 32-bit embedded processor"); case EM_FIREPATH: return strdup ("Element 14 64-bit DSP Processor"); case EM_ZSP: return strdup ("LSI Logic 16-bit DSP Processor"); case EM_MMIX: return strdup ("Donald Knuth's educational 64-bit processor"); case EM_HUANY: return strdup ("Harvard University machine-independent object files"); case EM_PRISM: return strdup ("SiTera Prism"); case EM_AVR: return strdup ("Atmel AVR 8-bit microcontroller"); case EM_FR30: return strdup ("Fujitsu FR30"); case EM_D10V: return strdup ("Mitsubishi D10V"); case EM_D30V: return strdup ("Mitsubishi D30V"); case EM_V850: return strdup ("NEC v850"); case EM_M32R: return strdup ("Mitsubishi M32R"); case EM_MN10300: return strdup ("Matsushita MN10300"); case EM_MN10200: return strdup ("Matsushita MN10200"); case EM_PJ: return strdup ("picoJava"); case EM_OPENRISC: return strdup ("OpenRISC 32-bit embedded processor"); case EM_ARC_A5: return strdup ("ARC Cores Tangent-A5"); case EM_XTENSA: return strdup ("Tensilica Xtensa Architecture"); case EM_AARCH64: return strdup ("ARM aarch64"); case EM_PROPELLER: return strdup ("Parallax Propeller"); case EM_MICROBLAZE: return strdup ("Xilinx MicroBlaze"); case EM_RISCV: return strdup ("RISC V"); case EM_VIDEOCORE3: return strdup ("VideoCore III"); case EM_VIDEOCORE4: return strdup ("VideoCore IV"); default: return r_str_newf ("<unknown>: 0x%x", bin->ehdr.e_machine); } } char* Elf_(r_bin_elf_get_file_type)(ELFOBJ *bin) { ut32 e_type; if (!bin) { return NULL; } e_type = (ut32)bin->ehdr.e_type; // cast to avoid warn in iphone-gcc, must be ut16 switch (e_type) { case ET_NONE: return strdup ("NONE (None)"); case ET_REL: return strdup ("REL (Relocatable file)"); case ET_EXEC: return strdup ("EXEC (Executable file)"); case ET_DYN: return strdup ("DYN (Shared object file)"); case ET_CORE: return strdup ("CORE (Core file)"); } if ((e_type >= ET_LOPROC) && (e_type <= ET_HIPROC)) { return r_str_newf ("Processor Specific: %x", e_type); } if ((e_type >= ET_LOOS) && (e_type <= ET_HIOS)) { return r_str_newf ("OS Specific: %x", e_type); } return r_str_newf ("<unknown>: %x", e_type); } char* Elf_(r_bin_elf_get_elf_class)(ELFOBJ *bin) { switch (bin->ehdr.e_ident[EI_CLASS]) { case ELFCLASSNONE: return strdup ("none"); case ELFCLASS32: return strdup ("ELF32"); case ELFCLASS64: return strdup ("ELF64"); default: return r_str_newf ("<unknown: %x>", bin->ehdr.e_ident[EI_CLASS]); } } int Elf_(r_bin_elf_get_bits)(ELFOBJ *bin) { /* Hack for ARCompact */ if (bin->ehdr.e_machine == EM_ARC_A5) { return 16; } /* Hack for Ps2 */ if (bin->phdr && bin->ehdr.e_machine == EM_MIPS) { const ut32 mipsType = bin->ehdr.e_flags & EF_MIPS_ARCH; if (bin->ehdr.e_type == ET_EXEC) { int i; bool haveInterp = false; for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_INTERP) { haveInterp = true; } } if (!haveInterp && mipsType == EF_MIPS_ARCH_3) { // Playstation2 Hack return 64; } } // TODO: show this specific asm.cpu somewhere in bininfo (mips1, mips2, mips3, mips32r2, ...) switch (mipsType) { case EF_MIPS_ARCH_1: case EF_MIPS_ARCH_2: case EF_MIPS_ARCH_3: case EF_MIPS_ARCH_4: case EF_MIPS_ARCH_5: case EF_MIPS_ARCH_32: return 32; case EF_MIPS_ARCH_64: return 64; case EF_MIPS_ARCH_32R2: return 32; case EF_MIPS_ARCH_64R2: return 64; break; } return 32; } /* Hack for Thumb */ if (bin->ehdr.e_machine == EM_ARM) { if (bin->ehdr.e_type != ET_EXEC) { struct r_bin_elf_symbol_t *symbol; if ((symbol = Elf_(r_bin_elf_get_symbols) (bin))) { int i = 0; for (i = 0; !symbol[i].last; i++) { ut64 paddr = symbol[i].offset; if (paddr & 1) { return 16; } } } } { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); if (entry & 1) { return 16; } } } switch (bin->ehdr.e_ident[EI_CLASS]) { case ELFCLASS32: return 32; case ELFCLASS64: return 64; case ELFCLASSNONE: default: return 32; // defaults } } static inline int noodle(ELFOBJ *bin, const char *s) { const ut8 *p = bin->b->buf; if (bin->b->length > 64) { p += bin->b->length - 64; } else { return 0; } return r_mem_mem (p, 64, (const ut8 *)s, strlen (s)) != NULL; } static inline int needle(ELFOBJ *bin, const char *s) { if (bin->shstrtab) { ut32 len = bin->shstrtab_size; if (len > 4096) { len = 4096; // avoid slow loading .. can be buggy? } return r_mem_mem ((const ut8*)bin->shstrtab, len, (const ut8*)s, strlen (s)) != NULL; } return 0; } // TODO: must return const char * all those strings must be const char os[LINUX] or so char* Elf_(r_bin_elf_get_osabi_name)(ELFOBJ *bin) { switch (bin->ehdr.e_ident[EI_OSABI]) { case ELFOSABI_LINUX: return strdup("linux"); case ELFOSABI_SOLARIS: return strdup("solaris"); case ELFOSABI_FREEBSD: return strdup("freebsd"); case ELFOSABI_HPUX: return strdup("hpux"); } /* Hack to identify OS */ if (needle (bin, "openbsd")) return strdup ("openbsd"); if (needle (bin, "netbsd")) return strdup ("netbsd"); if (needle (bin, "freebsd")) return strdup ("freebsd"); if (noodle (bin, "BEOS:APP_VERSION")) return strdup ("beos"); if (needle (bin, "GNU")) return strdup ("linux"); return strdup ("linux"); } ut8 *Elf_(r_bin_elf_grab_regstate)(ELFOBJ *bin, int *len) { if (bin->phdr) { int i; int num = bin->ehdr.e_phnum; for (i = 0; i < num; i++) { if (bin->phdr[i].p_type != PT_NOTE) { continue; } int bits = Elf_(r_bin_elf_get_bits)(bin); int regdelta = (bits == 64)? 0x84: 0x40; // x64 vs x32 int regsize = 160; // for x86-64 ut8 *buf = malloc (regsize); if (r_buf_read_at (bin->b, bin->phdr[i].p_offset + regdelta, buf, regsize) != regsize) { free (buf); bprintf ("Cannot read register state from CORE file\n"); return NULL; } if (len) { *len = regsize; } return buf; } } bprintf ("Cannot find NOTE section\n"); return NULL; } int Elf_(r_bin_elf_is_big_endian)(ELFOBJ *bin) { return (bin->ehdr.e_ident[EI_DATA] == ELFDATA2MSB); } /* XXX Init dt_strtab? */ char *Elf_(r_bin_elf_get_rpath)(ELFOBJ *bin) { char *ret = NULL; int j; if (!bin || !bin->phdr || !bin->dyn_buf || !bin->strtab) { return NULL; } for (j = 0; j< bin->dyn_entries; j++) { if (bin->dyn_buf[j].d_tag == DT_RPATH || bin->dyn_buf[j].d_tag == DT_RUNPATH) { if (!(ret = calloc (1, ELF_STRING_LENGTH))) { perror ("malloc (rpath)"); return NULL; } if (bin->dyn_buf[j].d_un.d_val > bin->strtab_size) { free (ret); return NULL; } strncpy (ret, bin->strtab + bin->dyn_buf[j].d_un.d_val, ELF_STRING_LENGTH); ret[ELF_STRING_LENGTH - 1] = '\0'; break; } } return ret; } static size_t get_relocs_num(ELFOBJ *bin) { size_t i, size, ret = 0; /* we need to be careful here, in malformed files the section size might * not be a multiple of a Rel/Rela size; round up so we allocate enough * space. */ #define NUMENTRIES_ROUNDUP(sectionsize, entrysize) (((sectionsize)+(entrysize)-1)/(entrysize)) if (!bin->g_sections) { return 0; } size = bin->is_rela == DT_REL ? sizeof (Elf_(Rel)) : sizeof (Elf_(Rela)); for (i = 0; !bin->g_sections[i].last; i++) { if (!strncmp (bin->g_sections[i].name, ".rela.", strlen (".rela."))) { if (!bin->is_rela) { size = sizeof (Elf_(Rela)); } ret += NUMENTRIES_ROUNDUP (bin->g_sections[i].size, size); } else if (!strncmp (bin->g_sections[i].name, ".rel.", strlen (".rel."))){ if (!bin->is_rela) { size = sizeof (Elf_(Rel)); } ret += NUMENTRIES_ROUNDUP (bin->g_sections[i].size, size); } } return ret; #undef NUMENTRIES_ROUNDUP } static int read_reloc(ELFOBJ *bin, RBinElfReloc *r, int is_rela, ut64 offset) { ut8 *buf = bin->b->buf; int j = 0; if (offset + sizeof (Elf_ (Rela)) > bin->size || offset + sizeof (Elf_(Rela)) < offset) { return -1; } if (is_rela == DT_RELA) { Elf_(Rela) rela; #if R_BIN_ELF64 rela.r_offset = READ64 (buf + offset, j) rela.r_info = READ64 (buf + offset, j) rela.r_addend = READ64 (buf + offset, j) #else rela.r_offset = READ32 (buf + offset, j) rela.r_info = READ32 (buf + offset, j) rela.r_addend = READ32 (buf + offset, j) #endif r->is_rela = is_rela; r->offset = rela.r_offset; r->type = ELF_R_TYPE (rela.r_info); r->sym = ELF_R_SYM (rela.r_info); r->last = 0; r->addend = rela.r_addend; return sizeof (Elf_(Rela)); } else { Elf_(Rel) rel; #if R_BIN_ELF64 rel.r_offset = READ64 (buf + offset, j) rel.r_info = READ64 (buf + offset, j) #else rel.r_offset = READ32 (buf + offset, j) rel.r_info = READ32 (buf + offset, j) #endif r->is_rela = is_rela; r->offset = rel.r_offset; r->type = ELF_R_TYPE (rel.r_info); r->sym = ELF_R_SYM (rel.r_info); r->last = 0; return sizeof (Elf_(Rel)); } } RBinElfReloc* Elf_(r_bin_elf_get_relocs)(ELFOBJ *bin) { int res, rel, rela, i, j; size_t reloc_num = 0; RBinElfReloc *ret = NULL; if (!bin || !bin->g_sections) { return NULL; } reloc_num = get_relocs_num (bin); if (!reloc_num) { return NULL; } bin->reloc_num = reloc_num; ret = (RBinElfReloc*)calloc ((size_t)reloc_num + 1, sizeof(RBinElfReloc)); if (!ret) { return NULL; } #if DEAD_CODE ut64 section_text_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".text"); if (section_text_offset == -1) { section_text_offset = 0; } #endif for (i = 0, rel = 0; !bin->g_sections[i].last && rel < reloc_num ; i++) { bool is_rela = 0 == strncmp (bin->g_sections[i].name, ".rela.", strlen (".rela.")); bool is_rel = 0 == strncmp (bin->g_sections[i].name, ".rel.", strlen (".rel.")); if (!is_rela && !is_rel) { continue; } for (j = 0; j < bin->g_sections[i].size; j += res) { if (bin->g_sections[i].size > bin->size) { break; } if (bin->g_sections[i].offset > bin->size) { break; } if (rel >= reloc_num) { bprintf ("Internal error: ELF relocation buffer too small," "please file a bug report."); break; } if (!bin->is_rela) { rela = is_rela? DT_RELA : DT_REL; } else { rela = bin->is_rela; } res = read_reloc (bin, &ret[rel], rela, bin->g_sections[i].offset + j); if (j + res > bin->g_sections[i].size) { bprintf ("Warning: malformed file, relocation entry #%u is partially beyond the end of section %u.\n", rel, i); } if (bin->ehdr.e_type == ET_REL) { if (bin->g_sections[i].info < bin->ehdr.e_shnum && bin->shdr) { ret[rel].rva = bin->shdr[bin->g_sections[i].info].sh_offset + ret[rel].offset; ret[rel].rva = Elf_(r_bin_elf_p2v) (bin, ret[rel].rva); } else { ret[rel].rva = ret[rel].offset; } } else { ret[rel].rva = ret[rel].offset; ret[rel].offset = Elf_(r_bin_elf_v2p) (bin, ret[rel].offset); } ret[rel].last = 0; if (res < 0) { break; } rel++; } } ret[reloc_num].last = 1; return ret; } RBinElfLib* Elf_(r_bin_elf_get_libs)(ELFOBJ *bin) { RBinElfLib *ret = NULL; int j, k; if (!bin || !bin->phdr || !bin->dyn_buf || !bin->strtab || *(bin->strtab+1) == '0') { return NULL; } for (j = 0, k = 0; j < bin->dyn_entries; j++) if (bin->dyn_buf[j].d_tag == DT_NEEDED) { RBinElfLib *r = realloc (ret, (k + 1) * sizeof (RBinElfLib)); if (!r) { perror ("realloc (libs)"); free (ret); return NULL; } ret = r; if (bin->dyn_buf[j].d_un.d_val > bin->strtab_size) { free (ret); return NULL; } strncpy (ret[k].name, bin->strtab + bin->dyn_buf[j].d_un.d_val, ELF_STRING_LENGTH); ret[k].name[ELF_STRING_LENGTH - 1] = '\0'; ret[k].last = 0; if (ret[k].name[0]) { k++; } } RBinElfLib *r = realloc (ret, (k + 1) * sizeof (RBinElfLib)); if (!r) { perror ("realloc (libs)"); free (ret); return NULL; } ret = r; ret[k].last = 1; return ret; } static RBinElfSection* get_sections_from_phdr(ELFOBJ *bin) { RBinElfSection *ret; int i, num_sections = 0; ut64 reldyn = 0, relava = 0, pltgotva = 0, relva = 0; ut64 reldynsz = 0, relasz = 0, pltgotsz = 0; if (!bin || !bin->phdr || !bin->ehdr.e_phnum) return NULL; for (i = 0; i < bin->dyn_entries; i++) { switch (bin->dyn_buf[i].d_tag) { case DT_REL: reldyn = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; case DT_RELA: relva = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; case DT_RELSZ: reldynsz = bin->dyn_buf[i].d_un.d_val; break; case DT_RELASZ: relasz = bin->dyn_buf[i].d_un.d_val; break; case DT_PLTGOT: pltgotva = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; case DT_PLTRELSZ: pltgotsz = bin->dyn_buf[i].d_un.d_val; break; case DT_JMPREL: relava = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; default: break; } } ret = calloc (num_sections + 1, sizeof(RBinElfSection)); if (!ret) { return NULL; } i = 0; if (reldyn) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, reldyn); ret[i].rva = reldyn; ret[i].size = reldynsz; strcpy (ret[i].name, ".rel.dyn"); ret[i].last = 0; i++; } if (relava) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, relava); ret[i].rva = relava; ret[i].size = pltgotsz; strcpy (ret[i].name, ".rela.plt"); ret[i].last = 0; i++; } if (relva) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, relva); ret[i].rva = relva; ret[i].size = relasz; strcpy (ret[i].name, ".rel.plt"); ret[i].last = 0; i++; } if (pltgotva) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, pltgotva); ret[i].rva = pltgotva; ret[i].size = pltgotsz; strcpy (ret[i].name, ".got.plt"); ret[i].last = 0; i++; } ret[i].last = 1; return ret; } RBinElfSection* Elf_(r_bin_elf_get_sections)(ELFOBJ *bin) { RBinElfSection *ret = NULL; char unknown_s[20], invalid_s[20]; int i, nidx, unknown_c=0, invalid_c=0; if (!bin) { return NULL; } if (bin->g_sections) { return bin->g_sections; } if (!bin->shdr) { //we don't give up search in phdr section return get_sections_from_phdr (bin); } if (!(ret = calloc ((bin->ehdr.e_shnum + 1), sizeof (RBinElfSection)))) { return NULL; } for (i = 0; i < bin->ehdr.e_shnum; i++) { ret[i].offset = bin->shdr[i].sh_offset; ret[i].size = bin->shdr[i].sh_size; ret[i].align = bin->shdr[i].sh_addralign; ret[i].flags = bin->shdr[i].sh_flags; ret[i].link = bin->shdr[i].sh_link; ret[i].info = bin->shdr[i].sh_info; ret[i].type = bin->shdr[i].sh_type; if (bin->ehdr.e_type == ET_REL) { ret[i].rva = bin->baddr + bin->shdr[i].sh_offset; } else { ret[i].rva = bin->shdr[i].sh_addr; } nidx = bin->shdr[i].sh_name; #define SHNAME (int)bin->shdr[i].sh_name #define SHNLEN ELF_STRING_LENGTH - 4 #define SHSIZE (int)bin->shstrtab_size if (nidx < 0 || !bin->shstrtab_section || !bin->shstrtab_size || nidx > bin->shstrtab_size) { snprintf (invalid_s, sizeof (invalid_s) - 4, "invalid%d", invalid_c); strncpy (ret[i].name, invalid_s, SHNLEN); invalid_c++; } else { if (bin->shstrtab && (SHNAME > 0) && (SHNAME < SHSIZE)) { strncpy (ret[i].name, &bin->shstrtab[SHNAME], SHNLEN); } else { if (bin->shdr[i].sh_type == SHT_NULL) { //to follow the same behaviour as readelf strncpy (ret[i].name, "", sizeof (ret[i].name) - 4); } else { snprintf (unknown_s, sizeof (unknown_s)-4, "unknown%d", unknown_c); strncpy (ret[i].name, unknown_s, sizeof (ret[i].name)-4); unknown_c++; } } } ret[i].name[ELF_STRING_LENGTH-2] = '\0'; ret[i].last = 0; } ret[i].last = 1; return ret; } static void fill_symbol_bind_and_type (struct r_bin_elf_symbol_t *ret, Elf_(Sym) *sym) { #define s_bind(x) ret->bind = x #define s_type(x) ret->type = x switch (ELF_ST_BIND(sym->st_info)) { case STB_LOCAL: s_bind ("LOCAL"); break; case STB_GLOBAL: s_bind ("GLOBAL"); break; case STB_WEAK: s_bind ("WEAK"); break; case STB_NUM: s_bind ("NUM"); break; case STB_LOOS: s_bind ("LOOS"); break; case STB_HIOS: s_bind ("HIOS"); break; case STB_LOPROC: s_bind ("LOPROC"); break; case STB_HIPROC: s_bind ("HIPROC"); break; default: s_bind ("UNKNOWN"); } switch (ELF_ST_TYPE (sym->st_info)) { case STT_NOTYPE: s_type ("NOTYPE"); break; case STT_OBJECT: s_type ("OBJECT"); break; case STT_FUNC: s_type ("FUNC"); break; case STT_SECTION: s_type ("SECTION"); break; case STT_FILE: s_type ("FILE"); break; case STT_COMMON: s_type ("COMMON"); break; case STT_TLS: s_type ("TLS"); break; case STT_NUM: s_type ("NUM"); break; case STT_LOOS: s_type ("LOOS"); break; case STT_HIOS: s_type ("HIOS"); break; case STT_LOPROC: s_type ("LOPROC"); break; case STT_HIPROC: s_type ("HIPROC"); break; default: s_type ("UNKNOWN"); } } static RBinElfSymbol* get_symbols_from_phdr(ELFOBJ *bin, int type) { Elf_(Sym) *sym = NULL; Elf_(Addr) addr_sym_table = 0; ut8 s[sizeof (Elf_(Sym))] = {0}; RBinElfSymbol *ret = NULL; int i, j, r, tsize, nsym, ret_ctr; ut64 toffset = 0, tmp_offset; ut32 size, sym_size = 0; if (!bin || !bin->phdr || !bin->ehdr.e_phnum) { return NULL; } for (j = 0; j < bin->dyn_entries; j++) { switch (bin->dyn_buf[j].d_tag) { case (DT_SYMTAB): addr_sym_table = Elf_(r_bin_elf_v2p) (bin, bin->dyn_buf[j].d_un.d_ptr); break; case (DT_SYMENT): sym_size = bin->dyn_buf[j].d_un.d_val; break; default: break; } } if (!addr_sym_table) { return NULL; } if (!sym_size) { return NULL; } //since ELF doesn't specify the symbol table size we may read until the end of the buffer nsym = (bin->size - addr_sym_table) / sym_size; if (!UT32_MUL (&size, nsym, sizeof (Elf_ (Sym)))) { goto beach; } if (size < 1) { goto beach; } if (addr_sym_table > bin->size || addr_sym_table + size > bin->size) { goto beach; } if (nsym < 1) { return NULL; } // we reserve room for 4096 and grow as needed. size_t capacity1 = 4096; size_t capacity2 = 4096; sym = (Elf_(Sym)*) calloc (capacity1, sym_size); ret = (RBinElfSymbol *) calloc (capacity2, sizeof (struct r_bin_elf_symbol_t)); if (!sym || !ret) { goto beach; } for (i = 1, ret_ctr = 0; i < nsym; i++) { if (i >= capacity1) { // maybe grow // You take what you want, but you eat what you take. Elf_(Sym)* temp_sym = (Elf_(Sym)*) realloc(sym, (capacity1 * GROWTH_FACTOR) * sym_size); if (!temp_sym) { goto beach; } sym = temp_sym; capacity1 *= GROWTH_FACTOR; } if (ret_ctr >= capacity2) { // maybe grow RBinElfSymbol *temp_ret = realloc (ret, capacity2 * GROWTH_FACTOR * sizeof (struct r_bin_elf_symbol_t)); if (!temp_ret) { goto beach; } ret = temp_ret; capacity2 *= GROWTH_FACTOR; } // read in one entry r = r_buf_read_at (bin->b, addr_sym_table + i * sizeof (Elf_ (Sym)), s, sizeof (Elf_ (Sym))); if (r < 1) { goto beach; } int j = 0; #if R_BIN_ELF64 sym[i].st_name = READ32 (s, j); sym[i].st_info = READ8 (s, j); sym[i].st_other = READ8 (s, j); sym[i].st_shndx = READ16 (s, j); sym[i].st_value = READ64 (s, j); sym[i].st_size = READ64 (s, j); #else sym[i].st_name = READ32 (s, j); sym[i].st_value = READ32 (s, j); sym[i].st_size = READ32 (s, j); sym[i].st_info = READ8 (s, j); sym[i].st_other = READ8 (s, j); sym[i].st_shndx = READ16 (s, j); #endif // zero symbol is always empty // Examine entry and maybe store if (type == R_BIN_ELF_IMPORTS && sym[i].st_shndx == STN_UNDEF) { if (sym[i].st_value) { toffset = sym[i].st_value; } else if ((toffset = get_import_addr (bin, i)) == -1){ toffset = 0; } tsize = 16; } else if (type == R_BIN_ELF_SYMBOLS && sym[i].st_shndx != STN_UNDEF && ELF_ST_TYPE (sym[i].st_info) != STT_SECTION && ELF_ST_TYPE (sym[i].st_info) != STT_FILE) { tsize = sym[i].st_size; toffset = (ut64) sym[i].st_value; } else { continue; } tmp_offset = Elf_(r_bin_elf_v2p) (bin, toffset); if (tmp_offset > bin->size) { goto done; } if (sym[i].st_name + 2 > bin->strtab_size) { // Since we are reading beyond the symbol table what's happening // is that some entry is trying to dereference the strtab beyond its capacity // is not a symbol so is the end goto done; } ret[ret_ctr].offset = tmp_offset; ret[ret_ctr].size = tsize; { int rest = ELF_STRING_LENGTH - 1; int st_name = sym[i].st_name; int maxsize = R_MIN (bin->size, bin->strtab_size); if (st_name < 0 || st_name >= maxsize) { ret[ret_ctr].name[0] = 0; } else { const int len = __strnlen (bin->strtab + st_name, rest); memcpy (ret[ret_ctr].name, &bin->strtab[st_name], len); } } ret[ret_ctr].ordinal = i; ret[ret_ctr].in_shdr = false; ret[ret_ctr].name[ELF_STRING_LENGTH - 2] = '\0'; fill_symbol_bind_and_type (&ret[ret_ctr], &sym[i]); ret[ret_ctr].last = 0; ret_ctr++; } done: ret[ret_ctr].last = 1; // Size everything down to only what is used { nsym = i > 0 ? i : 1; Elf_ (Sym) * temp_sym = (Elf_ (Sym)*) realloc (sym, (nsym * GROWTH_FACTOR) * sym_size); if (!temp_sym) { goto beach; } sym = temp_sym; } { ret_ctr = ret_ctr > 0 ? ret_ctr : 1; RBinElfSymbol *p = (RBinElfSymbol *) realloc (ret, (ret_ctr + 1) * sizeof (RBinElfSymbol)); if (!p) { goto beach; } ret = p; } if (type == R_BIN_ELF_IMPORTS && !bin->imports_by_ord_size) { bin->imports_by_ord_size = ret_ctr + 1; if (ret_ctr > 0) { bin->imports_by_ord = (RBinImport * *) calloc (ret_ctr + 1, sizeof (RBinImport*)); } else { bin->imports_by_ord = NULL; } } else if (type == R_BIN_ELF_SYMBOLS && !bin->symbols_by_ord_size && ret_ctr) { bin->symbols_by_ord_size = ret_ctr + 1; if (ret_ctr > 0) { bin->symbols_by_ord = (RBinSymbol * *) calloc (ret_ctr + 1, sizeof (RBinSymbol*)); }else { bin->symbols_by_ord = NULL; } } free (sym); return ret; beach: free (sym); free (ret); return NULL; } static RBinElfSymbol *Elf_(r_bin_elf_get_phdr_symbols)(ELFOBJ *bin) { if (!bin) { return NULL; } if (bin->phdr_symbols) { return bin->phdr_symbols; } bin->phdr_symbols = get_symbols_from_phdr (bin, R_BIN_ELF_SYMBOLS); return bin->phdr_symbols; } static RBinElfSymbol *Elf_(r_bin_elf_get_phdr_imports)(ELFOBJ *bin) { if (!bin) { return NULL; } if (bin->phdr_imports) { return bin->phdr_imports; } bin->phdr_imports = get_symbols_from_phdr (bin, R_BIN_ELF_IMPORTS); return bin->phdr_imports; } static int Elf_(fix_symbols)(ELFOBJ *bin, int nsym, int type, RBinElfSymbol **sym) { int count = 0; RBinElfSymbol *ret = *sym; RBinElfSymbol *phdr_symbols = (type == R_BIN_ELF_SYMBOLS) ? Elf_(r_bin_elf_get_phdr_symbols) (bin) : Elf_(r_bin_elf_get_phdr_imports) (bin); RBinElfSymbol *tmp, *p; if (phdr_symbols) { RBinElfSymbol *d = ret; while (!d->last) { /* find match in phdr */ p = phdr_symbols; while (!p->last) { if (p->offset && d->offset == p->offset) { p->in_shdr = true; if (*p->name && strcmp (d->name, p->name)) { strcpy (d->name, p->name); } } p++; } d++; } p = phdr_symbols; while (!p->last) { if (!p->in_shdr) { count++; } p++; } /*Take those symbols that are not present in the shdr but yes in phdr*/ /*This should only should happen with fucked up binaries*/ if (count > 0) { /*what happens if a shdr says it has only one symbol? we should look anyway into phdr*/ tmp = (RBinElfSymbol*)realloc (ret, (nsym + count + 1) * sizeof (RBinElfSymbol)); if (!tmp) { return -1; } ret = tmp; ret[nsym--].last = 0; p = phdr_symbols; while (!p->last) { if (!p->in_shdr) { memcpy (&ret[++nsym], p, sizeof (RBinElfSymbol)); } p++; } ret[nsym + 1].last = 1; } *sym = ret; return nsym + 1; } return nsym; } static RBinElfSymbol* Elf_(_r_bin_elf_get_symbols_imports)(ELFOBJ *bin, int type) { ut32 shdr_size; int tsize, nsym, ret_ctr = 0, i, j, r, k, newsize; ut64 toffset; ut32 size = 0; RBinElfSymbol *ret = NULL; Elf_(Shdr) *strtab_section = NULL; Elf_(Sym) *sym = NULL; ut8 s[sizeof (Elf_(Sym))] = { 0 }; char *strtab = NULL; if (!bin || !bin->shdr || !bin->ehdr.e_shnum || bin->ehdr.e_shnum == 0xffff) { return (type == R_BIN_ELF_SYMBOLS) ? Elf_(r_bin_elf_get_phdr_symbols) (bin) : Elf_(r_bin_elf_get_phdr_imports) (bin); } if (!UT32_MUL (&shdr_size, bin->ehdr.e_shnum, sizeof (Elf_(Shdr)))) { return false; } if (shdr_size + 8 > bin->size) { return false; } for (i = 0; i < bin->ehdr.e_shnum; i++) { if ((type == R_BIN_ELF_IMPORTS && bin->shdr[i].sh_type == (bin->ehdr.e_type == ET_REL ? SHT_SYMTAB : SHT_DYNSYM)) || (type == R_BIN_ELF_SYMBOLS && bin->shdr[i].sh_type == (Elf_(r_bin_elf_get_stripped) (bin) ? SHT_DYNSYM : SHT_SYMTAB))) { if (bin->shdr[i].sh_link < 1) { /* oops. fix out of range pointers */ continue; } // hack to avoid asan cry if ((bin->shdr[i].sh_link * sizeof(Elf_(Shdr))) >= shdr_size) { /* oops. fix out of range pointers */ continue; } strtab_section = &bin->shdr[bin->shdr[i].sh_link]; if (strtab_section->sh_size > ST32_MAX || strtab_section->sh_size+8 > bin->size) { bprintf ("size (syms strtab)"); free (ret); free (strtab); return NULL; } if (!strtab) { if (!(strtab = (char *)calloc (1, 8 + strtab_section->sh_size))) { bprintf ("malloc (syms strtab)"); goto beach; } if (strtab_section->sh_offset > bin->size || strtab_section->sh_offset + strtab_section->sh_size > bin->size) { goto beach; } if (r_buf_read_at (bin->b, strtab_section->sh_offset, (ut8*)strtab, strtab_section->sh_size) == -1) { bprintf ("Warning: read (syms strtab)\n"); goto beach; } } newsize = 1 + bin->shdr[i].sh_size; if (newsize < 0 || newsize > bin->size) { bprintf ("invalid shdr %d size\n", i); goto beach; } nsym = (int)(bin->shdr[i].sh_size / sizeof (Elf_(Sym))); if (nsym < 0) { goto beach; } if (!(sym = (Elf_(Sym) *)calloc (nsym, sizeof (Elf_(Sym))))) { bprintf ("calloc (syms)"); goto beach; } if (!UT32_MUL (&size, nsym, sizeof (Elf_(Sym)))) { goto beach; } if (size < 1 || size > bin->size) { goto beach; } if (bin->shdr[i].sh_offset > bin->size) { goto beach; } if (bin->shdr[i].sh_offset + size > bin->size) { goto beach; } for (j = 0; j < nsym; j++) { int k = 0; r = r_buf_read_at (bin->b, bin->shdr[i].sh_offset + j * sizeof (Elf_(Sym)), s, sizeof (Elf_(Sym))); if (r < 1) { bprintf ("Warning: read (sym)\n"); goto beach; } #if R_BIN_ELF64 sym[j].st_name = READ32 (s, k) sym[j].st_info = READ8 (s, k) sym[j].st_other = READ8 (s, k) sym[j].st_shndx = READ16 (s, k) sym[j].st_value = READ64 (s, k) sym[j].st_size = READ64 (s, k) #else sym[j].st_name = READ32 (s, k) sym[j].st_value = READ32 (s, k) sym[j].st_size = READ32 (s, k) sym[j].st_info = READ8 (s, k) sym[j].st_other = READ8 (s, k) sym[j].st_shndx = READ16 (s, k) #endif } free (ret); ret = calloc (nsym, sizeof (RBinElfSymbol)); if (!ret) { bprintf ("Cannot allocate %d symbols\n", nsym); goto beach; } for (k = 1, ret_ctr = 0; k < nsym; k++) { if (type == R_BIN_ELF_IMPORTS && sym[k].st_shndx == STN_UNDEF) { if (sym[k].st_value) { toffset = sym[k].st_value; } else if ((toffset = get_import_addr (bin, k)) == -1){ toffset = 0; } tsize = 16; } else if (type == R_BIN_ELF_SYMBOLS && sym[k].st_shndx != STN_UNDEF && ELF_ST_TYPE (sym[k].st_info) != STT_SECTION && ELF_ST_TYPE (sym[k].st_info) != STT_FILE) { //int idx = sym[k].st_shndx; tsize = sym[k].st_size; toffset = (ut64)sym[k].st_value; } else { continue; } if (bin->ehdr.e_type == ET_REL) { if (sym[k].st_shndx < bin->ehdr.e_shnum) ret[ret_ctr].offset = sym[k].st_value + bin->shdr[sym[k].st_shndx].sh_offset; } else { ret[ret_ctr].offset = Elf_(r_bin_elf_v2p) (bin, toffset); } ret[ret_ctr].size = tsize; if (sym[k].st_name + 2 > strtab_section->sh_size) { bprintf ("Warning: index out of strtab range\n"); goto beach; } { int rest = ELF_STRING_LENGTH - 1; int st_name = sym[k].st_name; int maxsize = R_MIN (bin->b->length, strtab_section->sh_size); if (st_name < 0 || st_name >= maxsize) { ret[ret_ctr].name[0] = 0; } else { const size_t len = __strnlen (strtab + sym[k].st_name, rest); memcpy (ret[ret_ctr].name, &strtab[sym[k].st_name], len); } } ret[ret_ctr].ordinal = k; ret[ret_ctr].name[ELF_STRING_LENGTH - 2] = '\0'; fill_symbol_bind_and_type (&ret[ret_ctr], &sym[k]); ret[ret_ctr].last = 0; ret_ctr++; } ret[ret_ctr].last = 1; // ugly dirty hack :D R_FREE (strtab); R_FREE (sym); } } if (!ret) { return (type == R_BIN_ELF_SYMBOLS) ? Elf_(r_bin_elf_get_phdr_symbols) (bin) : Elf_(r_bin_elf_get_phdr_imports) (bin); } int max = -1; RBinElfSymbol *aux = NULL; nsym = Elf_(fix_symbols) (bin, ret_ctr, type, &ret); if (nsym == -1) { goto beach; } aux = ret; while (!aux->last) { if ((int)aux->ordinal > max) { max = aux->ordinal; } aux++; } nsym = max; if (type == R_BIN_ELF_IMPORTS) { R_FREE (bin->imports_by_ord); bin->imports_by_ord_size = nsym + 1; bin->imports_by_ord = (RBinImport**)calloc (R_MAX (1, nsym + 1), sizeof (RBinImport*)); } else if (type == R_BIN_ELF_SYMBOLS) { R_FREE (bin->symbols_by_ord); bin->symbols_by_ord_size = nsym + 1; bin->symbols_by_ord = (RBinSymbol**)calloc (R_MAX (1, nsym + 1), sizeof (RBinSymbol*)); } return ret; beach: free (ret); free (sym); free (strtab); return NULL; } RBinElfSymbol *Elf_(r_bin_elf_get_symbols)(ELFOBJ *bin) { if (!bin->g_symbols) { bin->g_symbols = Elf_(_r_bin_elf_get_symbols_imports) (bin, R_BIN_ELF_SYMBOLS); } return bin->g_symbols; } RBinElfSymbol *Elf_(r_bin_elf_get_imports)(ELFOBJ *bin) { if (!bin->g_imports) { bin->g_imports = Elf_(_r_bin_elf_get_symbols_imports) (bin, R_BIN_ELF_IMPORTS); } return bin->g_imports; } RBinElfField* Elf_(r_bin_elf_get_fields)(ELFOBJ *bin) { RBinElfField *ret = NULL; int i = 0, j; if (!bin || !(ret = calloc ((bin->ehdr.e_phnum + 3 + 1), sizeof (RBinElfField)))) { return NULL; } strncpy (ret[i].name, "ehdr", ELF_STRING_LENGTH); ret[i].offset = 0; ret[i++].last = 0; strncpy (ret[i].name, "shoff", ELF_STRING_LENGTH); ret[i].offset = bin->ehdr.e_shoff; ret[i++].last = 0; strncpy (ret[i].name, "phoff", ELF_STRING_LENGTH); ret[i].offset = bin->ehdr.e_phoff; ret[i++].last = 0; for (j = 0; bin->phdr && j < bin->ehdr.e_phnum; i++, j++) { snprintf (ret[i].name, ELF_STRING_LENGTH, "phdr_%i", j); ret[i].offset = bin->phdr[j].p_offset; ret[i].last = 0; } ret[i].last = 1; return ret; } void* Elf_(r_bin_elf_free)(ELFOBJ* bin) { int i; if (!bin) { return NULL; } free (bin->phdr); free (bin->shdr); free (bin->strtab); free (bin->dyn_buf); free (bin->shstrtab); free (bin->dynstr); //free (bin->strtab_section); if (bin->imports_by_ord) { for (i = 0; i<bin->imports_by_ord_size; i++) { free (bin->imports_by_ord[i]); } free (bin->imports_by_ord); } if (bin->symbols_by_ord) { for (i = 0; i<bin->symbols_by_ord_size; i++) { free (bin->symbols_by_ord[i]); } free (bin->symbols_by_ord); } r_buf_free (bin->b); if (bin->g_symbols != bin->phdr_symbols) { R_FREE (bin->phdr_symbols); } if (bin->g_imports != bin->phdr_imports) { R_FREE (bin->phdr_imports); } R_FREE (bin->g_sections); R_FREE (bin->g_symbols); R_FREE (bin->g_imports); free (bin); return NULL; } ELFOBJ* Elf_(r_bin_elf_new)(const char* file, bool verbose) { ut8 *buf; int size; ELFOBJ *bin = R_NEW0 (ELFOBJ); if (!bin) { return NULL; } memset (bin, 0, sizeof (ELFOBJ)); bin->file = file; if (!(buf = (ut8*)r_file_slurp (file, &size))) { return Elf_(r_bin_elf_free) (bin); } bin->size = size; bin->verbose = verbose; bin->b = r_buf_new (); if (!r_buf_set_bytes (bin->b, buf, bin->size)) { free (buf); return Elf_(r_bin_elf_free) (bin); } if (!elf_init (bin)) { free (buf); return Elf_(r_bin_elf_free) (bin); } free (buf); return bin; } ELFOBJ* Elf_(r_bin_elf_new_buf)(RBuffer *buf, bool verbose) { ELFOBJ *bin = R_NEW0 (ELFOBJ); bin->kv = sdb_new0 (); bin->b = r_buf_new (); bin->size = (ut32)buf->length; bin->verbose = verbose; if (!r_buf_set_bytes (bin->b, buf->buf, buf->length)) { return Elf_(r_bin_elf_free) (bin); } if (!elf_init (bin)) { return Elf_(r_bin_elf_free) (bin); } return bin; } static int is_in_pphdr (Elf_(Phdr) *p, ut64 addr) { return addr >= p->p_offset && addr < p->p_offset + p->p_memsz; } static int is_in_vphdr (Elf_(Phdr) *p, ut64 addr) { return addr >= p->p_vaddr && addr < p->p_vaddr + p->p_memsz; } /* converts a physical address to the virtual address, looking * at the program headers in the binary bin */ ut64 Elf_(r_bin_elf_p2v) (ELFOBJ *bin, ut64 paddr) { int i; if (!bin) return 0; if (!bin->phdr) { if (bin->ehdr.e_type == ET_REL) { return bin->baddr + paddr; } return paddr; } for (i = 0; i < bin->ehdr.e_phnum; ++i) { Elf_(Phdr) *p = &bin->phdr[i]; if (!p) { break; } if (p->p_type == PT_LOAD && is_in_pphdr (p, paddr)) { if (!p->p_vaddr && !p->p_offset) { continue; } return p->p_vaddr + paddr - p->p_offset; } } return paddr; } /* converts a virtual address to the relative physical address, looking * at the program headers in the binary bin */ ut64 Elf_(r_bin_elf_v2p) (ELFOBJ *bin, ut64 vaddr) { int i; if (!bin) { return 0; } if (!bin->phdr) { if (bin->ehdr.e_type == ET_REL) { return vaddr - bin->baddr; } return vaddr; } for (i = 0; i < bin->ehdr.e_phnum; ++i) { Elf_(Phdr) *p = &bin->phdr[i]; if (!p) { break; } if (p->p_type == PT_LOAD && is_in_vphdr (p, vaddr)) { if (!p->p_offset && !p->p_vaddr) { continue; } return p->p_offset + vaddr - p->p_vaddr; } } return vaddr; }
/* radare - LGPL - Copyright 2008-2017 - nibble, pancake, alvaro_fe */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <r_types.h> #include <r_util.h> #include "elf.h" #ifdef IFDBG #undef IFDBG #endif #define DO_THE_DBG 0 #define IFDBG if (DO_THE_DBG) #define IFINT if (0) #define ELF_PAGE_MASK 0xFFFFFFFFFFFFF000LL #define ELF_PAGE_SIZE 12 #define R_ELF_NO_RELRO 0 #define R_ELF_PART_RELRO 1 #define R_ELF_FULL_RELRO 2 #define bprintf if(bin->verbose)eprintf #define READ8(x, i) r_read_ble8(x + i); i += 1; #define READ16(x, i) r_read_ble16(x + i, bin->endian); i += 2; #define READ32(x, i) r_read_ble32(x + i, bin->endian); i += 4; #define READ64(x, i) r_read_ble64(x + i, bin->endian); i += 8; #define GROWTH_FACTOR (1.5) static inline int __strnlen(const char *str, int len) { int l = 0; while (IS_PRINTABLE (*str) && --len) { if (((ut8)*str) == 0xff) { break; } str++; l++; } return l + 1; } static int handle_e_ident(ELFOBJ *bin) { return !strncmp ((char *)bin->ehdr.e_ident, ELFMAG, SELFMAG) || !strncmp ((char *)bin->ehdr.e_ident, CGCMAG, SCGCMAG); } static int init_ehdr(ELFOBJ *bin) { ut8 e_ident[EI_NIDENT]; ut8 ehdr[sizeof (Elf_(Ehdr))] = {0}; int i, len; if (r_buf_read_at (bin->b, 0, e_ident, EI_NIDENT) == -1) { bprintf ("Warning: read (magic)\n"); return false; } sdb_set (bin->kv, "elf_type.cparse", "enum elf_type { ET_NONE=0, ET_REL=1," " ET_EXEC=2, ET_DYN=3, ET_CORE=4, ET_LOOS=0xfe00, ET_HIOS=0xfeff," " ET_LOPROC=0xff00, ET_HIPROC=0xffff };", 0); sdb_set (bin->kv, "elf_machine.cparse", "enum elf_machine{EM_NONE=0, EM_M32=1," " EM_SPARC=2, EM_386=3, EM_68K=4, EM_88K=5, EM_486=6, " " EM_860=7, EM_MIPS=8, EM_S370=9, EM_MIPS_RS3_LE=10, EM_RS6000=11," " EM_UNKNOWN12=12, EM_UNKNOWN13=13, EM_UNKNOWN14=14, " " EM_PA_RISC=15, EM_PARISC=EM_PA_RISC, EM_nCUBE=16, EM_VPP500=17," " EM_SPARC32PLUS=18, EM_960=19, EM_PPC=20, EM_PPC64=21, " " EM_S390=22, EM_UNKNOWN22=EM_S390, EM_UNKNOWN23=23, EM_UNKNOWN24=24," " EM_UNKNOWN25=25, EM_UNKNOWN26=26, EM_UNKNOWN27=27, EM_UNKNOWN28=28," " EM_UNKNOWN29=29, EM_UNKNOWN30=30, EM_UNKNOWN31=31, EM_UNKNOWN32=32," " EM_UNKNOWN33=33, EM_UNKNOWN34=34, EM_UNKNOWN35=35, EM_V800=36," " EM_FR20=37, EM_RH32=38, EM_RCE=39, EM_ARM=40, EM_ALPHA=41, EM_SH=42," " EM_SPARCV9=43, EM_TRICORE=44, EM_ARC=45, EM_H8_300=46, EM_H8_300H=47," " EM_H8S=48, EM_H8_500=49, EM_IA_64=50, EM_MIPS_X=51, EM_COLDFIRE=52," " EM_68HC12=53, EM_MMA=54, EM_PCP=55, EM_NCPU=56, EM_NDR1=57," " EM_STARCORE=58, EM_ME16=59, EM_ST100=60, EM_TINYJ=61, EM_AMD64=62," " EM_X86_64=EM_AMD64, EM_PDSP=63, EM_UNKNOWN64=64, EM_UNKNOWN65=65," " EM_FX66=66, EM_ST9PLUS=67, EM_ST7=68, EM_68HC16=69, EM_68HC11=70," " EM_68HC08=71, EM_68HC05=72, EM_SVX=73, EM_ST19=74, EM_VAX=75, " " EM_CRIS=76, EM_JAVELIN=77, EM_FIREPATH=78, EM_ZSP=79, EM_MMIX=80," " EM_HUANY=81, EM_PRISM=82, EM_AVR=83, EM_FR30=84, EM_D10V=85, EM_D30V=86," " EM_V850=87, EM_M32R=88, EM_MN10300=89, EM_MN10200=90, EM_PJ=91," " EM_OPENRISC=92, EM_ARC_A5=93, EM_XTENSA=94, EM_NUM=95};", 0); sdb_num_set (bin->kv, "elf_header.offset", 0, 0); sdb_num_set (bin->kv, "elf_header.size", sizeof (Elf_(Ehdr)), 0); #if R_BIN_ELF64 sdb_set (bin->kv, "elf_header.format", "[16]z[2]E[2]Exqqqxwwwwww" " ident (elf_type)type (elf_machine)machine version entry phoff shoff flags ehsize" " phentsize phnum shentsize shnum shstrndx", 0); #else sdb_set (bin->kv, "elf_header.format", "[16]z[2]E[2]Exxxxxwwwwww" " ident (elf_type)type (elf_machine)machine version entry phoff shoff flags ehsize" " phentsize phnum shentsize shnum shstrndx", 0); #endif bin->endian = (e_ident[EI_DATA] == ELFDATA2MSB)? 1: 0; memset (&bin->ehdr, 0, sizeof (Elf_(Ehdr))); len = r_buf_read_at (bin->b, 0, ehdr, sizeof (Elf_(Ehdr))); if (len < 1) { bprintf ("Warning: read (ehdr)\n"); return false; } memcpy (&bin->ehdr.e_ident, ehdr, 16); i = 16; bin->ehdr.e_type = READ16 (ehdr, i) bin->ehdr.e_machine = READ16 (ehdr, i) bin->ehdr.e_version = READ32 (ehdr, i) #if R_BIN_ELF64 bin->ehdr.e_entry = READ64 (ehdr, i) bin->ehdr.e_phoff = READ64 (ehdr, i) bin->ehdr.e_shoff = READ64 (ehdr, i) #else bin->ehdr.e_entry = READ32 (ehdr, i) bin->ehdr.e_phoff = READ32 (ehdr, i) bin->ehdr.e_shoff = READ32 (ehdr, i) #endif bin->ehdr.e_flags = READ32 (ehdr, i) bin->ehdr.e_ehsize = READ16 (ehdr, i) bin->ehdr.e_phentsize = READ16 (ehdr, i) bin->ehdr.e_phnum = READ16 (ehdr, i) bin->ehdr.e_shentsize = READ16 (ehdr, i) bin->ehdr.e_shnum = READ16 (ehdr, i) bin->ehdr.e_shstrndx = READ16 (ehdr, i) return handle_e_ident (bin); // Usage example: // > td `k bin/cur/info/elf_type.cparse`; td `k bin/cur/info/elf_machine.cparse` // > pf `k bin/cur/info/elf_header.format` @ `k bin/cur/info/elf_header.offset` } static int init_phdr(ELFOBJ *bin) { ut32 phdr_size; ut8 phdr[sizeof (Elf_(Phdr))] = {0}; int i, j, len; if (!bin->ehdr.e_phnum) { return false; } if (bin->phdr) { return true; } if (!UT32_MUL (&phdr_size, (ut32)bin->ehdr.e_phnum, sizeof (Elf_(Phdr)))) { return false; } if (!phdr_size) { return false; } if (phdr_size > bin->size) { return false; } if (phdr_size > (ut32)bin->size) { return false; } if (bin->ehdr.e_phoff > bin->size) { return false; } if (bin->ehdr.e_phoff + phdr_size > bin->size) { return false; } if (!(bin->phdr = calloc (phdr_size, 1))) { perror ("malloc (phdr)"); return false; } for (i = 0; i < bin->ehdr.e_phnum; i++) { j = 0; len = r_buf_read_at (bin->b, bin->ehdr.e_phoff + i * sizeof (Elf_(Phdr)), phdr, sizeof (Elf_(Phdr))); if (len < 1) { bprintf ("Warning: read (phdr)\n"); R_FREE (bin->phdr); return false; } bin->phdr[i].p_type = READ32 (phdr, j) #if R_BIN_ELF64 bin->phdr[i].p_flags = READ32 (phdr, j) bin->phdr[i].p_offset = READ64 (phdr, j) bin->phdr[i].p_vaddr = READ64 (phdr, j) bin->phdr[i].p_paddr = READ64 (phdr, j) bin->phdr[i].p_filesz = READ64 (phdr, j) bin->phdr[i].p_memsz = READ64 (phdr, j) bin->phdr[i].p_align = READ64 (phdr, j) #else bin->phdr[i].p_offset = READ32 (phdr, j) bin->phdr[i].p_vaddr = READ32 (phdr, j) bin->phdr[i].p_paddr = READ32 (phdr, j) bin->phdr[i].p_filesz = READ32 (phdr, j) bin->phdr[i].p_memsz = READ32 (phdr, j) bin->phdr[i].p_flags = READ32 (phdr, j) bin->phdr[i].p_align = READ32 (phdr, j) #endif } sdb_num_set (bin->kv, "elf_phdr.offset", bin->ehdr.e_phoff, 0); sdb_num_set (bin->kv, "elf_phdr.size", sizeof (Elf_(Phdr)), 0); sdb_set (bin->kv, "elf_p_type.cparse", "enum elf_p_type {PT_NULL=0,PT_LOAD=1,PT_DYNAMIC=2," "PT_INTERP=3,PT_NOTE=4,PT_SHLIB=5,PT_PHDR=6,PT_LOOS=0x60000000," "PT_HIOS=0x6fffffff,PT_LOPROC=0x70000000,PT_HIPROC=0x7fffffff};", 0); sdb_set (bin->kv, "elf_p_flags.cparse", "enum elf_p_flags {PF_None=0,PF_Exec=1," "PF_Write=2,PF_Write_Exec=3,PF_Read=4,PF_Read_Exec=5,PF_Read_Write=6," "PF_Read_Write_Exec=7};", 0); #if R_BIN_ELF64 sdb_set (bin->kv, "elf_phdr.format", "[4]E[4]Eqqqqqq (elf_p_type)type (elf_p_flags)flags" " offset vaddr paddr filesz memsz align", 0); #else sdb_set (bin->kv, "elf_phdr.format", "[4]Exxxxx[4]Ex (elf_p_type)type offset vaddr paddr" " filesz memsz (elf_p_flags)flags align", 0); #endif return true; // Usage example: // > td `k bin/cur/info/elf_p_type.cparse`; td `k bin/cur/info/elf_p_flags.cparse` // > pf `k bin/cur/info/elf_phdr.format` @ `k bin/cur/info/elf_phdr.offset` } static int init_shdr(ELFOBJ *bin) { ut32 shdr_size; ut8 shdr[sizeof (Elf_(Shdr))] = {0}; int i, j, len; if (!bin || bin->shdr) { return true; } if (!UT32_MUL (&shdr_size, bin->ehdr.e_shnum, sizeof (Elf_(Shdr)))) { return false; } if (shdr_size < 1) { return false; } if (shdr_size > bin->size) { return false; } if (bin->ehdr.e_shoff > bin->size) { return false; } if (bin->ehdr.e_shoff + shdr_size > bin->size) { return false; } if (!(bin->shdr = calloc (1, shdr_size + 1))) { perror ("malloc (shdr)"); return false; } sdb_num_set (bin->kv, "elf_shdr.offset", bin->ehdr.e_shoff, 0); sdb_num_set (bin->kv, "elf_shdr.size", sizeof (Elf_(Shdr)), 0); sdb_set (bin->kv, "elf_s_type.cparse", "enum elf_s_type {SHT_NULL=0,SHT_PROGBITS=1," "SHT_SYMTAB=2,SHT_STRTAB=3,SHT_RELA=4,SHT_HASH=5,SHT_DYNAMIC=6,SHT_NOTE=7," "SHT_NOBITS=8,SHT_REL=9,SHT_SHLIB=10,SHT_DYNSYM=11,SHT_LOOS=0x60000000," "SHT_HIOS=0x6fffffff,SHT_LOPROC=0x70000000,SHT_HIPROC=0x7fffffff};", 0); for (i = 0; i < bin->ehdr.e_shnum; i++) { j = 0; len = r_buf_read_at (bin->b, bin->ehdr.e_shoff + i * sizeof (Elf_(Shdr)), shdr, sizeof (Elf_(Shdr))); if (len < 1) { bprintf ("Warning: read (shdr) at 0x%"PFMT64x"\n", (ut64) bin->ehdr.e_shoff); R_FREE (bin->shdr); return false; } bin->shdr[i].sh_name = READ32 (shdr, j) bin->shdr[i].sh_type = READ32 (shdr, j) #if R_BIN_ELF64 bin->shdr[i].sh_flags = READ64 (shdr, j) bin->shdr[i].sh_addr = READ64 (shdr, j) bin->shdr[i].sh_offset = READ64 (shdr, j) bin->shdr[i].sh_size = READ64 (shdr, j) bin->shdr[i].sh_link = READ32 (shdr, j) bin->shdr[i].sh_info = READ32 (shdr, j) bin->shdr[i].sh_addralign = READ64 (shdr, j) bin->shdr[i].sh_entsize = READ64 (shdr, j) #else bin->shdr[i].sh_flags = READ32 (shdr, j) bin->shdr[i].sh_addr = READ32 (shdr, j) bin->shdr[i].sh_offset = READ32 (shdr, j) bin->shdr[i].sh_size = READ32 (shdr, j) bin->shdr[i].sh_link = READ32 (shdr, j) bin->shdr[i].sh_info = READ32 (shdr, j) bin->shdr[i].sh_addralign = READ32 (shdr, j) bin->shdr[i].sh_entsize = READ32 (shdr, j) #endif } #if R_BIN_ELF64 sdb_set (bin->kv, "elf_s_flags_64.cparse", "enum elf_s_flags_64 {SF64_None=0,SF64_Exec=1," "SF64_Alloc=2,SF64_Alloc_Exec=3,SF64_Write=4,SF64_Write_Exec=5," "SF64_Write_Alloc=6,SF64_Write_Alloc_Exec=7};", 0); sdb_set (bin->kv, "elf_shdr.format", "x[4]E[8]Eqqqxxqq name (elf_s_type)type" " (elf_s_flags_64)flags addr offset size link info addralign entsize", 0); #else sdb_set (bin->kv, "elf_s_flags_32.cparse", "enum elf_s_flags_32 {SF32_None=0,SF32_Exec=1," "SF32_Alloc=2,SF32_Alloc_Exec=3,SF32_Write=4,SF32_Write_Exec=5," "SF32_Write_Alloc=6,SF32_Write_Alloc_Exec=7};", 0); sdb_set (bin->kv, "elf_shdr.format", "x[4]E[4]Exxxxxxx name (elf_s_type)type" " (elf_s_flags_32)flags addr offset size link info addralign entsize", 0); #endif return true; // Usage example: // > td `k bin/cur/info/elf_s_type.cparse`; td `k bin/cur/info/elf_s_flags_64.cparse` // > pf `k bin/cur/info/elf_shdr.format` @ `k bin/cur/info/elf_shdr.offset` } static int init_strtab(ELFOBJ *bin) { if (bin->strtab || !bin->shdr) { return false; } if (bin->ehdr.e_shstrndx != SHN_UNDEF && (bin->ehdr.e_shstrndx >= bin->ehdr.e_shnum || (bin->ehdr.e_shstrndx >= SHN_LORESERVE && bin->ehdr.e_shstrndx < SHN_HIRESERVE))) return false; /* sh_size must be lower than UT32_MAX and not equal to zero, to avoid bugs on malloc() */ if (bin->shdr[bin->ehdr.e_shstrndx].sh_size > UT32_MAX) { return false; } if (!bin->shdr[bin->ehdr.e_shstrndx].sh_size) { return false; } bin->shstrtab_section = bin->strtab_section = &bin->shdr[bin->ehdr.e_shstrndx]; bin->shstrtab_size = bin->strtab_section->sh_size; if (bin->shstrtab_size > bin->size) { return false; } if (!(bin->shstrtab = calloc (1, bin->shstrtab_size + 1))) { perror ("malloc"); bin->shstrtab = NULL; return false; } if (bin->shstrtab_section->sh_offset > bin->size) { R_FREE (bin->shstrtab); return false; } if (bin->shstrtab_section->sh_offset + bin->shstrtab_section->sh_size > bin->size) { R_FREE (bin->shstrtab); return false; } if (r_buf_read_at (bin->b, bin->shstrtab_section->sh_offset, (ut8*)bin->shstrtab, bin->shstrtab_section->sh_size + 1) < 1) { bprintf ("Warning: read (shstrtab) at 0x%"PFMT64x"\n", (ut64) bin->shstrtab_section->sh_offset); R_FREE (bin->shstrtab); return false; } bin->shstrtab[bin->shstrtab_section->sh_size] = '\0'; sdb_num_set (bin->kv, "elf_shstrtab.offset", bin->shstrtab_section->sh_offset, 0); sdb_num_set (bin->kv, "elf_shstrtab.size", bin->shstrtab_section->sh_size, 0); return true; } static int init_dynamic_section(struct Elf_(r_bin_elf_obj_t) *bin) { Elf_(Dyn) *dyn = NULL; Elf_(Dyn) d = {0}; Elf_(Addr) strtabaddr = 0; ut64 offset = 0; char *strtab = NULL; size_t relentry = 0, strsize = 0; int entries; int i, j, len, r; ut8 sdyn[sizeof (Elf_(Dyn))] = {0}; ut32 dyn_size = 0; if (!bin || !bin->phdr || !bin->ehdr.e_phnum) { return false; } for (i = 0; i < bin->ehdr.e_phnum ; i++) { if (bin->phdr[i].p_type == PT_DYNAMIC) { dyn_size = bin->phdr[i].p_filesz; break; } } if (i == bin->ehdr.e_phnum) { return false; } if (bin->phdr[i].p_filesz > bin->size) { return false; } if (bin->phdr[i].p_offset > bin->size) { return false; } if (bin->phdr[i].p_offset + sizeof(Elf_(Dyn)) > bin->size) { return false; } for (entries = 0; entries < (dyn_size / sizeof (Elf_(Dyn))); entries++) { j = 0; len = r_buf_read_at (bin->b, bin->phdr[i].p_offset + entries * sizeof (Elf_(Dyn)), sdyn, sizeof (Elf_(Dyn))); if (len < 1) { goto beach; } #if R_BIN_ELF64 d.d_tag = READ64 (sdyn, j) #else d.d_tag = READ32 (sdyn, j) #endif if (d.d_tag == DT_NULL) { break; } } if (entries < 1) { return false; } dyn = (Elf_(Dyn)*)calloc (entries, sizeof (Elf_(Dyn))); if (!dyn) { return false; } if (!UT32_MUL (&dyn_size, entries, sizeof (Elf_(Dyn)))) { goto beach; } if (!dyn_size) { goto beach; } offset = Elf_(r_bin_elf_v2p) (bin, bin->phdr[i].p_vaddr); if (offset > bin->size || offset + dyn_size > bin->size) { goto beach; } for (i = 0; i < entries; i++) { j = 0; r_buf_read_at (bin->b, offset + i * sizeof (Elf_(Dyn)), sdyn, sizeof (Elf_(Dyn))); if (len < 1) { bprintf("Warning: read (dyn)\n"); } #if R_BIN_ELF64 dyn[i].d_tag = READ64 (sdyn, j) dyn[i].d_un.d_ptr = READ64 (sdyn, j) #else dyn[i].d_tag = READ32 (sdyn, j) dyn[i].d_un.d_ptr = READ32 (sdyn, j) #endif switch (dyn[i].d_tag) { case DT_STRTAB: strtabaddr = Elf_(r_bin_elf_v2p) (bin, dyn[i].d_un.d_ptr); break; case DT_STRSZ: strsize = dyn[i].d_un.d_val; break; case DT_PLTREL: bin->is_rela = dyn[i].d_un.d_val; break; case DT_RELAENT: relentry = dyn[i].d_un.d_val; break; default: if ((dyn[i].d_tag >= DT_VERSYM) && (dyn[i].d_tag <= DT_VERNEEDNUM)) { bin->version_info[DT_VERSIONTAGIDX (dyn[i].d_tag)] = dyn[i].d_un.d_val; } break; } } if (!bin->is_rela) { bin->is_rela = sizeof (Elf_(Rela)) == relentry? DT_RELA : DT_REL; } if (!strtabaddr || strtabaddr > bin->size || strsize > ST32_MAX || !strsize || strsize > bin->size) { if (!strtabaddr) { bprintf ("Warning: section.shstrtab not found or invalid\n"); } goto beach; } strtab = (char *)calloc (1, strsize + 1); if (!strtab) { goto beach; } if (strtabaddr + strsize > bin->size) { free (strtab); goto beach; } r = r_buf_read_at (bin->b, strtabaddr, (ut8 *)strtab, strsize); if (r < 1) { free (strtab); goto beach; } bin->dyn_buf = dyn; bin->dyn_entries = entries; bin->strtab = strtab; bin->strtab_size = strsize; r = Elf_(r_bin_elf_has_relro)(bin); switch (r) { case R_ELF_FULL_RELRO: sdb_set (bin->kv, "elf.relro", "full", 0); break; case R_ELF_PART_RELRO: sdb_set (bin->kv, "elf.relro", "partial", 0); break; default: sdb_set (bin->kv, "elf.relro", "no", 0); break; } sdb_num_set (bin->kv, "elf_strtab.offset", strtabaddr, 0); sdb_num_set (bin->kv, "elf_strtab.size", strsize, 0); return true; beach: free (dyn); return false; } static RBinElfSection* get_section_by_name(ELFOBJ *bin, const char *section_name) { int i; if (!bin->g_sections) { return NULL; } for (i = 0; !bin->g_sections[i].last; i++) { if (!strncmp (bin->g_sections[i].name, section_name, ELF_STRING_LENGTH-1)) { return &bin->g_sections[i]; } } return NULL; } static char *get_ver_flags(ut32 flags) { static char buff[32]; buff[0] = 0; if (!flags) { return "none"; } if (flags & VER_FLG_BASE) { strcpy (buff, "BASE "); } if (flags & VER_FLG_WEAK) { if (flags & VER_FLG_BASE) { strcat (buff, "| "); } strcat (buff, "WEAK "); } if (flags & ~(VER_FLG_BASE | VER_FLG_WEAK)) { strcat (buff, "| <unknown>"); } return buff; } static Sdb *store_versioninfo_gnu_versym(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { int i; const ut64 num_entries = sz / sizeof (Elf_(Versym)); const char *section_name = ""; const char *link_section_name = ""; Elf_(Shdr) *link_shdr = NULL; Sdb *sdb = sdb_new0(); if (!sdb) { return NULL; } if (!bin->version_info[DT_VERSIONTAGIDX (DT_VERSYM)]) { sdb_free (sdb); return NULL; } if (shdr->sh_link > bin->ehdr.e_shnum) { sdb_free (sdb); return NULL; } link_shdr = &bin->shdr[shdr->sh_link]; ut8 *edata = (ut8*) calloc (R_MAX (1, num_entries), sizeof (ut16)); if (!edata) { sdb_free (sdb); return NULL; } ut16 *data = (ut16*) calloc (R_MAX (1, num_entries), sizeof (ut16)); if (!data) { free (edata); sdb_free (sdb); return NULL; } ut64 off = Elf_(r_bin_elf_v2p) (bin, bin->version_info[DT_VERSIONTAGIDX (DT_VERSYM)]); if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } r_buf_read_at (bin->b, off, edata, sizeof (ut16) * num_entries); sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "num_entries", num_entries, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); for (i = num_entries; i--;) { data[i] = r_read_ble16 (&edata[i * sizeof (ut16)], bin->endian); } R_FREE (edata); for (i = 0; i < num_entries; i += 4) { int j; int check_def; char key[32] = {0}; Sdb *sdb_entry = sdb_new0 (); snprintf (key, sizeof (key), "entry%d", i / 4); sdb_ns_set (sdb, key, sdb_entry); sdb_num_set (sdb_entry, "idx", i, 0); for (j = 0; (j < 4) && (i + j) < num_entries; ++j) { int k; char *tmp_val = NULL; snprintf (key, sizeof (key), "value%d", j); switch (data[i + j]) { case 0: sdb_set (sdb_entry, key, "0 (*local*)", 0); break; case 1: sdb_set (sdb_entry, key, "1 (*global*)", 0); break; default: tmp_val = sdb_fmt (0, "%x ", data[i+j] & 0x7FFF); check_def = true; if (bin->version_info[DT_VERSIONTAGIDX (DT_VERNEED)]) { Elf_(Verneed) vn; ut8 svn[sizeof (Elf_(Verneed))] = {0}; ut64 offset = Elf_(r_bin_elf_v2p) (bin, bin->version_info[DT_VERSIONTAGIDX (DT_VERNEED)]); do { Elf_(Vernaux) vna; ut8 svna[sizeof (Elf_(Vernaux))] = {0}; ut64 a_off; if (offset > bin->size || offset + sizeof (vn) > bin->size) { goto beach; } if (r_buf_read_at (bin->b, offset, svn, sizeof (svn)) < 0) { bprintf ("Warning: Cannot read Verneed for Versym\n"); goto beach; } k = 0; vn.vn_version = READ16 (svn, k) vn.vn_cnt = READ16 (svn, k) vn.vn_file = READ32 (svn, k) vn.vn_aux = READ32 (svn, k) vn.vn_next = READ32 (svn, k) a_off = offset + vn.vn_aux; do { if (a_off > bin->size || a_off + sizeof (vna) > bin->size) { goto beach; } if (r_buf_read_at (bin->b, a_off, svna, sizeof (svna)) < 0) { bprintf ("Warning: Cannot read Vernaux for Versym\n"); goto beach; } k = 0; vna.vna_hash = READ32 (svna, k) vna.vna_flags = READ16 (svna, k) vna.vna_other = READ16 (svna, k) vna.vna_name = READ32 (svna, k) vna.vna_next = READ32 (svna, k) a_off += vna.vna_next; } while (vna.vna_other != data[i + j] && vna.vna_next != 0); if (vna.vna_other == data[i + j]) { if (vna.vna_name > bin->strtab_size) { goto beach; } sdb_set (sdb_entry, key, sdb_fmt (0, "%s(%s)", tmp_val, bin->strtab + vna.vna_name), 0); check_def = false; break; } offset += vn.vn_next; } while (vn.vn_next); } ut64 vinfoaddr = bin->version_info[DT_VERSIONTAGIDX (DT_VERDEF)]; if (check_def && data[i + j] != 0x8001 && vinfoaddr) { Elf_(Verdef) vd; ut8 svd[sizeof (Elf_(Verdef))] = {0}; ut64 offset = Elf_(r_bin_elf_v2p) (bin, vinfoaddr); if (offset > bin->size || offset + sizeof (vd) > bin->size) { goto beach; } do { if (r_buf_read_at (bin->b, offset, svd, sizeof (svd)) < 0) { bprintf ("Warning: Cannot read Verdef for Versym\n"); goto beach; } k = 0; vd.vd_version = READ16 (svd, k) vd.vd_flags = READ16 (svd, k) vd.vd_ndx = READ16 (svd, k) vd.vd_cnt = READ16 (svd, k) vd.vd_hash = READ32 (svd, k) vd.vd_aux = READ32 (svd, k) vd.vd_next = READ32 (svd, k) offset += vd.vd_next; } while (vd.vd_ndx != (data[i + j] & 0x7FFF) && vd.vd_next != 0); if (vd.vd_ndx == (data[i + j] & 0x7FFF)) { Elf_(Verdaux) vda; ut8 svda[sizeof (Elf_(Verdaux))] = {0}; ut64 off_vda = offset - vd.vd_next + vd.vd_aux; if (off_vda > bin->size || off_vda + sizeof (vda) > bin->size) { goto beach; } if (r_buf_read_at (bin->b, off_vda, svda, sizeof (svda)) < 0) { bprintf ("Warning: Cannot read Verdaux for Versym\n"); goto beach; } k = 0; vda.vda_name = READ32 (svda, k) vda.vda_next = READ32 (svda, k) if (vda.vda_name > bin->strtab_size) { goto beach; } const char *name = bin->strtab + vda.vda_name; sdb_set (sdb_entry, key, sdb_fmt (0,"%s(%s%-*s)", tmp_val, name, (int)(12 - strlen (name)),")") , 0); } } } } } beach: free (data); return sdb; } static Sdb *store_versioninfo_gnu_verdef(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { const char *section_name = ""; const char *link_section_name = ""; char *end = NULL; Elf_(Shdr) *link_shdr = NULL; ut8 dfs[sizeof (Elf_(Verdef))] = {0}; Sdb *sdb; int cnt, i; if (shdr->sh_link > bin->ehdr.e_shnum) { return false; } link_shdr = &bin->shdr[shdr->sh_link]; if (shdr->sh_size < 1 || shdr->sh_size > SIZE_MAX) { return false; } Elf_(Verdef) *defs = calloc (shdr->sh_size, sizeof (char)); if (!defs) { return false; } if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (link_shdr && bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } if (!defs) { bprintf ("Warning: Cannot allocate memory (Check Elf_(Verdef))\n"); return NULL; } sdb = sdb_new0 (); end = (char *)defs + shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); for (cnt = 0, i = 0; i >= 0 && cnt < shdr->sh_info && ((char *)defs + i < end); ++cnt) { Sdb *sdb_verdef = sdb_new0 (); char *vstart = ((char*)defs) + i; char key[32] = {0}; Elf_(Verdef) *verdef = (Elf_(Verdef)*)vstart; Elf_(Verdaux) aux = {0}; int j = 0; int isum = 0; r_buf_read_at (bin->b, shdr->sh_offset + i, dfs, sizeof (Elf_(Verdef))); verdef->vd_version = READ16 (dfs, j) verdef->vd_flags = READ16 (dfs, j) verdef->vd_ndx = READ16 (dfs, j) verdef->vd_cnt = READ16 (dfs, j) verdef->vd_hash = READ32 (dfs, j) verdef->vd_aux = READ32 (dfs, j) verdef->vd_next = READ32 (dfs, j) int vdaux = verdef->vd_aux; if (vdaux < 1) { sdb_free (sdb_verdef); goto out_error; } vstart += vdaux; if (vstart > end || vstart + sizeof (Elf_(Verdaux)) > end) { sdb_free (sdb_verdef); goto out_error; } j = 0; aux.vda_name = READ32 (vstart, j) aux.vda_next = READ32 (vstart, j) isum = i + verdef->vd_aux; if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); goto out_error; } sdb_num_set (sdb_verdef, "idx", i, 0); sdb_num_set (sdb_verdef, "vd_version", verdef->vd_version, 0); sdb_num_set (sdb_verdef, "vd_ndx", verdef->vd_ndx, 0); sdb_num_set (sdb_verdef, "vd_cnt", verdef->vd_cnt, 0); sdb_set (sdb_verdef, "vda_name", &bin->dynstr[aux.vda_name], 0); sdb_set (sdb_verdef, "flags", get_ver_flags (verdef->vd_flags), 0); for (j = 1; j < verdef->vd_cnt; ++j) { int k; Sdb *sdb_parent = sdb_new0 (); isum += aux.vda_next; vstart += aux.vda_next; if (vstart > end || vstart + sizeof(Elf_(Verdaux)) > end) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } k = 0; aux.vda_name = READ32 (vstart, k) aux.vda_next = READ32 (vstart, k) if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } sdb_num_set (sdb_parent, "idx", isum, 0); sdb_num_set (sdb_parent, "parent", j, 0); sdb_set (sdb_parent, "vda_name", &bin->dynstr[aux.vda_name], 0); snprintf (key, sizeof (key), "parent%d", j - 1); sdb_ns_set (sdb_verdef, key, sdb_parent); } snprintf (key, sizeof (key), "verdef%d", cnt); sdb_ns_set (sdb, key, sdb_verdef); if (!verdef->vd_next) { sdb_free (sdb_verdef); goto out_error; } if ((st32)verdef->vd_next < 1) { eprintf ("Warning: Invalid vd_next in the ELF version\n"); break; } i += verdef->vd_next; } free (defs); return sdb; out_error: free (defs); sdb_free (sdb); return NULL; } static Sdb *store_versioninfo_gnu_verneed(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { ut8 *end, *need = NULL; const char *section_name = ""; Elf_(Shdr) *link_shdr = NULL; const char *link_section_name = ""; Sdb *sdb_vernaux = NULL; Sdb *sdb_version = NULL; Sdb *sdb = NULL; int i, cnt; if (!bin || !bin->dynstr) { return NULL; } if (shdr->sh_link > bin->ehdr.e_shnum) { return NULL; } if (shdr->sh_size < 1 || shdr->sh_size > SIZE_MAX) { return NULL; } sdb = sdb_new0 (); if (!sdb) { return NULL; } link_shdr = &bin->shdr[shdr->sh_link]; if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } if (!(need = (ut8*) calloc (R_MAX (1, shdr->sh_size), sizeof (ut8)))) { bprintf ("Warning: Cannot allocate memory for Elf_(Verneed)\n"); goto beach; } end = need + shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "num_entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); if (shdr->sh_offset > bin->size || shdr->sh_offset + shdr->sh_size > bin->size) { goto beach; } if (shdr->sh_offset + shdr->sh_size < shdr->sh_size) { goto beach; } i = r_buf_read_at (bin->b, shdr->sh_offset, need, shdr->sh_size); if (i < 0) goto beach; //XXX we should use DT_VERNEEDNUM instead of sh_info //TODO https://sourceware.org/ml/binutils/2014-11/msg00353.html for (i = 0, cnt = 0; cnt < shdr->sh_info; ++cnt) { int j, isum; ut8 *vstart = need + i; Elf_(Verneed) vvn = {0}; if (vstart + sizeof (Elf_(Verneed)) > end) { goto beach; } Elf_(Verneed) *entry = &vvn; char key[32] = {0}; sdb_version = sdb_new0 (); if (!sdb_version) { goto beach; } j = 0; vvn.vn_version = READ16 (vstart, j) vvn.vn_cnt = READ16 (vstart, j) vvn.vn_file = READ32 (vstart, j) vvn.vn_aux = READ32 (vstart, j) vvn.vn_next = READ32 (vstart, j) sdb_num_set (sdb_version, "vn_version", entry->vn_version, 0); sdb_num_set (sdb_version, "idx", i, 0); if (entry->vn_file > bin->dynstr_size) { goto beach; } { char *s = r_str_ndup (&bin->dynstr[entry->vn_file], 16); sdb_set (sdb_version, "file_name", s, 0); free (s); } sdb_num_set (sdb_version, "cnt", entry->vn_cnt, 0); st32 vnaux = entry->vn_aux; if (vnaux < 1) { goto beach; } vstart += vnaux; for (j = 0, isum = i + entry->vn_aux; j < entry->vn_cnt && vstart + sizeof (Elf_(Vernaux)) <= end; ++j) { int k; Elf_(Vernaux) * aux = NULL; Elf_(Vernaux) vaux = {0}; sdb_vernaux = sdb_new0 (); if (!sdb_vernaux) { goto beach; } aux = (Elf_(Vernaux)*)&vaux; k = 0; vaux.vna_hash = READ32 (vstart, k) vaux.vna_flags = READ16 (vstart, k) vaux.vna_other = READ16 (vstart, k) vaux.vna_name = READ32 (vstart, k) vaux.vna_next = READ32 (vstart, k) if (aux->vna_name > bin->dynstr_size) { goto beach; } sdb_num_set (sdb_vernaux, "idx", isum, 0); if (aux->vna_name > 0 && aux->vna_name + 8 < bin->dynstr_size) { char name [16]; strncpy (name, &bin->dynstr[aux->vna_name], sizeof (name)-1); name[sizeof(name)-1] = 0; sdb_set (sdb_vernaux, "name", name, 0); } sdb_set (sdb_vernaux, "flags", get_ver_flags (aux->vna_flags), 0); sdb_num_set (sdb_vernaux, "version", aux->vna_other, 0); isum += aux->vna_next; vstart += aux->vna_next; snprintf (key, sizeof (key), "vernaux%d", j); sdb_ns_set (sdb_version, key, sdb_vernaux); } if ((int)entry->vn_next < 0) { bprintf ("Invalid vn_next\n"); break; } i += entry->vn_next; snprintf (key, sizeof (key), "version%d", cnt ); sdb_ns_set (sdb, key, sdb_version); //if entry->vn_next is 0 it iterate infinitely if (!entry->vn_next) { break; } } free (need); return sdb; beach: free (need); sdb_free (sdb_vernaux); sdb_free (sdb_version); sdb_free (sdb); return NULL; } static Sdb *store_versioninfo(ELFOBJ *bin) { Sdb *sdb_versioninfo = NULL; int num_verdef = 0; int num_verneed = 0; int num_versym = 0; int i; if (!bin || !bin->shdr) { return NULL; } if (!(sdb_versioninfo = sdb_new0 ())) { return NULL; } for (i = 0; i < bin->ehdr.e_shnum; i++) { Sdb *sdb = NULL; char key[32] = {0}; int size = bin->shdr[i].sh_size; if (size - (i*sizeof(Elf_(Shdr)) > bin->size)) { size = bin->size - (i*sizeof(Elf_(Shdr))); } int left = size - (i * sizeof (Elf_(Shdr))); left = R_MIN (left, bin->shdr[i].sh_size); if (left < 0) { break; } switch (bin->shdr[i].sh_type) { case SHT_GNU_verdef: sdb = store_versioninfo_gnu_verdef (bin, &bin->shdr[i], left); snprintf (key, sizeof (key), "verdef%d", num_verdef++); sdb_ns_set (sdb_versioninfo, key, sdb); break; case SHT_GNU_verneed: sdb = store_versioninfo_gnu_verneed (bin, &bin->shdr[i], left); snprintf (key, sizeof (key), "verneed%d", num_verneed++); sdb_ns_set (sdb_versioninfo, key, sdb); break; case SHT_GNU_versym: sdb = store_versioninfo_gnu_versym (bin, &bin->shdr[i], left); snprintf (key, sizeof (key), "versym%d", num_versym++); sdb_ns_set (sdb_versioninfo, key, sdb); break; } } return sdb_versioninfo; } static bool init_dynstr(ELFOBJ *bin) { int i, r; const char *section_name = NULL; if (!bin || !bin->shdr) { return false; } if (!bin->shstrtab) { return false; } for (i = 0; i < bin->ehdr.e_shnum; ++i) { if (bin->shdr[i].sh_name > bin->shstrtab_size) { return false; } section_name = &bin->shstrtab[bin->shdr[i].sh_name]; if (bin->shdr[i].sh_type == SHT_STRTAB && !strcmp (section_name, ".dynstr")) { if (!(bin->dynstr = (char*) calloc (bin->shdr[i].sh_size + 1, sizeof (char)))) { bprintf("Warning: Cannot allocate memory for dynamic strings\n"); return false; } if (bin->shdr[i].sh_offset > bin->size) { return false; } if (bin->shdr[i].sh_offset + bin->shdr[i].sh_size > bin->size) { return false; } if (bin->shdr[i].sh_offset + bin->shdr[i].sh_size < bin->shdr[i].sh_size) { return false; } r = r_buf_read_at (bin->b, bin->shdr[i].sh_offset, (ut8*)bin->dynstr, bin->shdr[i].sh_size); if (r < 1) { R_FREE (bin->dynstr); bin->dynstr_size = 0; return false; } bin->dynstr_size = bin->shdr[i].sh_size; return true; } } return false; } static int elf_init(ELFOBJ *bin) { bin->phdr = NULL; bin->shdr = NULL; bin->strtab = NULL; bin->shstrtab = NULL; bin->strtab_size = 0; bin->strtab_section = NULL; bin->dyn_buf = NULL; bin->dynstr = NULL; ZERO_FILL (bin->version_info); bin->g_sections = NULL; bin->g_symbols = NULL; bin->g_imports = NULL; /* bin is not an ELF */ if (!init_ehdr (bin)) { return false; } if (!init_phdr (bin)) { bprintf ("Warning: Cannot initialize program headers\n"); } if (!init_shdr (bin)) { bprintf ("Warning: Cannot initialize section headers\n"); } if (!init_strtab (bin)) { bprintf ("Warning: Cannot initialize strings table\n"); } if (!init_dynstr (bin)) { bprintf ("Warning: Cannot initialize dynamic strings\n"); } bin->baddr = Elf_(r_bin_elf_get_baddr) (bin); if (!init_dynamic_section (bin) && !Elf_(r_bin_elf_get_static)(bin)) bprintf ("Warning: Cannot initialize dynamic section\n"); bin->imports_by_ord_size = 0; bin->imports_by_ord = NULL; bin->symbols_by_ord_size = 0; bin->symbols_by_ord = NULL; bin->g_sections = Elf_(r_bin_elf_get_sections) (bin); bin->boffset = Elf_(r_bin_elf_get_boffset) (bin); sdb_ns_set (bin->kv, "versioninfo", store_versioninfo (bin)); return true; } ut64 Elf_(r_bin_elf_get_section_offset)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); if (!section) return UT64_MAX; return section->offset; } ut64 Elf_(r_bin_elf_get_section_addr)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); return section? section->rva: UT64_MAX; } ut64 Elf_(r_bin_elf_get_section_addr_end)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); return section? section->rva + section->size: UT64_MAX; } #define REL (is_rela ? (void*)rela : (void*)rel) #define REL_BUF is_rela ? (ut8*)(&rela[k]) : (ut8*)(&rel[k]) #define REL_OFFSET is_rela ? rela[k].r_offset : rel[k].r_offset #define REL_TYPE is_rela ? rela[k].r_info : rel[k].r_info static ut64 get_import_addr(ELFOBJ *bin, int sym) { Elf_(Rel) *rel = NULL; Elf_(Rela) *rela = NULL; ut8 rl[sizeof (Elf_(Rel))] = {0}; ut8 rla[sizeof (Elf_(Rela))] = {0}; RBinElfSection *rel_sec = NULL; Elf_(Addr) plt_sym_addr = -1; ut64 got_addr, got_offset; ut64 plt_addr; int j, k, tsize, len, nrel; bool is_rela = false; const char *rel_sect[] = { ".rel.plt", ".rela.plt", ".rel.dyn", ".rela.dyn", NULL }; const char *rela_sect[] = { ".rela.plt", ".rel.plt", ".rela.dyn", ".rel.dyn", NULL }; if ((!bin->shdr || !bin->strtab) && !bin->phdr) { return -1; } if ((got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got")) == -1 && (got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got.plt")) == -1) { return -1; } if ((got_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".got")) == -1 && (got_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".got.plt")) == -1) { return -1; } if (bin->is_rela == DT_REL) { j = 0; while (!rel_sec && rel_sect[j]) { rel_sec = get_section_by_name (bin, rel_sect[j++]); } tsize = sizeof (Elf_(Rel)); } else if (bin->is_rela == DT_RELA) { j = 0; while (!rel_sec && rela_sect[j]) { rel_sec = get_section_by_name (bin, rela_sect[j++]); } is_rela = true; tsize = sizeof (Elf_(Rela)); } if (!rel_sec) { return -1; } if (rel_sec->size < 1) { return -1; } nrel = (ut32)((int)rel_sec->size / (int)tsize); if (nrel < 1) { return -1; } if (is_rela) { rela = calloc (nrel, tsize); if (!rela) { return -1; } } else { rel = calloc (nrel, tsize); if (!rel) { return -1; } } for (j = k = 0; j < rel_sec->size && k < nrel; j += tsize, k++) { int l = 0; if (rel_sec->offset + j > bin->size) { goto out; } if (rel_sec->offset + j + tsize > bin->size) { goto out; } len = r_buf_read_at ( bin->b, rel_sec->offset + j, is_rela ? rla : rl, is_rela ? sizeof (Elf_ (Rela)) : sizeof (Elf_ (Rel))); if (len < 1) { goto out; } #if R_BIN_ELF64 if (is_rela) { rela[k].r_offset = READ64 (rla, l) rela[k].r_info = READ64 (rla, l) rela[k].r_addend = READ64 (rla, l) } else { rel[k].r_offset = READ64 (rl, l) rel[k].r_info = READ64 (rl, l) } #else if (is_rela) { rela[k].r_offset = READ32 (rla, l) rela[k].r_info = READ32 (rla, l) rela[k].r_addend = READ32 (rla, l) } else { rel[k].r_offset = READ32 (rl, l) rel[k].r_info = READ32 (rl, l) } #endif int reloc_type = ELF_R_TYPE (REL_TYPE); int reloc_sym = ELF_R_SYM (REL_TYPE); if (reloc_sym == sym) { int of = REL_OFFSET; of = of - got_addr + got_offset; switch (bin->ehdr.e_machine) { case EM_PPC: case EM_PPC64: { RBinElfSection *s = get_section_by_name (bin, ".plt"); if (s) { ut8 buf[4]; ut64 base; len = r_buf_read_at (bin->b, s->offset, buf, sizeof (buf)); if (len < 4) { goto out; } base = r_read_be32 (buf); base -= (nrel * 16); base += (k * 16); plt_addr = base; free (REL); return plt_addr; } } break; case EM_SPARC: case EM_SPARCV9: case EM_SPARC32PLUS: plt_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".plt"); if (plt_addr == -1) { free (rela); return -1; } if (reloc_type == R_386_PC16) { plt_addr += k * 12 + 20; // thumb symbol if (plt_addr & 1) { plt_addr--; } free (REL); return plt_addr; } else { bprintf ("Unknown sparc reloc type %d\n", reloc_type); } /* SPARC */ break; case EM_ARM: case EM_AARCH64: plt_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".plt"); if (plt_addr == -1) { free (rela); return UT32_MAX; } switch (reloc_type) { case R_386_8: { plt_addr += k * 12 + 20; // thumb symbol if (plt_addr & 1) { plt_addr--; } free (REL); return plt_addr; } break; case 1026: // arm64 aarch64 plt_sym_addr = plt_addr + k * 16 + 32; goto done; default: bprintf ("Unsupported relocation type for imports %d\n", reloc_type); break; } break; case EM_386: case EM_X86_64: switch (reloc_type) { case 1: // unknown relocs found in voidlinux for x86-64 // break; case R_386_GLOB_DAT: case R_386_JMP_SLOT: { ut8 buf[8]; if (of + sizeof(Elf_(Addr)) < bin->size) { // ONLY FOR X86 if (of > bin->size || of + sizeof (Elf_(Addr)) > bin->size) { goto out; } len = r_buf_read_at (bin->b, of, buf, sizeof (Elf_(Addr))); if (len < -1) { goto out; } plt_sym_addr = sizeof (Elf_(Addr)) == 4 ? r_read_le32 (buf) : r_read_le64 (buf); if (!plt_sym_addr) { //XXX HACK ALERT!!!! full relro?? try to fix it //will there always be .plt.got, what would happen if is .got.plt? RBinElfSection *s = get_section_by_name (bin, ".plt.got"); if (Elf_(r_bin_elf_has_relro)(bin) < R_ELF_PART_RELRO || !s) { goto done; } plt_addr = s->offset; of = of + got_addr - got_offset; while (plt_addr + 2 + 4 < s->offset + s->size) { /*we try to locate the plt entry that correspond with the relocation since got does not point back to .plt. In this case it has the following form ff253a152000 JMP QWORD [RIP + 0x20153A] 6690 NOP ---- ff25ec9f0408 JMP DWORD [reloc.puts_236] plt_addr + 2 to remove jmp opcode and get the imm reading 4 and if RIP (plt_addr + 6) + imm == rel->offset return plt_addr, that will be our sym addr perhaps this hack doesn't work on 32 bits */ len = r_buf_read_at (bin->b, plt_addr + 2, buf, 4); if (len < -1) { goto out; } plt_sym_addr = sizeof (Elf_(Addr)) == 4 ? r_read_le32 (buf) : r_read_le64 (buf); //relative address if ((plt_addr + 6 + Elf_(r_bin_elf_v2p) (bin, plt_sym_addr)) == of) { plt_sym_addr = plt_addr; goto done; } else if (plt_sym_addr == of) { plt_sym_addr = plt_addr; goto done; } plt_addr += 8; } } else { plt_sym_addr -= 6; } goto done; } break; } default: bprintf ("Unsupported relocation type for imports %d\n", reloc_type); free (REL); return of; break; } break; case 8: // MIPS32 BIG ENDIAN relocs { RBinElfSection *s = get_section_by_name (bin, ".rela.plt"); if (s) { ut8 buf[1024]; const ut8 *base; plt_addr = s->rva + s->size; len = r_buf_read_at (bin->b, s->offset + s->size, buf, sizeof (buf)); if (len != sizeof (buf)) { // oops } base = r_mem_mem_aligned (buf, sizeof (buf), (const ut8*)"\x3c\x0f\x00", 3, 4); if (base) { plt_addr += (int)(size_t)(base - buf); } else { plt_addr += 108 + 8; // HARDCODED HACK } plt_addr += k * 16; free (REL); return plt_addr; } } break; default: bprintf ("Unsupported relocs type %d for arch %d\n", reloc_type, bin->ehdr.e_machine); break; } } } done: free (REL); return plt_sym_addr; out: free (REL); return -1; } int Elf_(r_bin_elf_has_nx)(ELFOBJ *bin) { int i; if (bin && bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_GNU_STACK) { return (!(bin->phdr[i].p_flags & 1))? 1: 0; } } } return 0; } int Elf_(r_bin_elf_has_relro)(ELFOBJ *bin) { int i; bool haveBindNow = false; bool haveGnuRelro = false; if (bin && bin->dyn_buf) { for (i = 0; i < bin->dyn_entries; i++) { switch (bin->dyn_buf[i].d_tag) { case DT_BIND_NOW: haveBindNow = true; break; case DT_FLAGS: for (i++; i < bin->dyn_entries ; i++) { ut32 dTag = bin->dyn_buf[i].d_tag; if (!dTag) { break; } switch (dTag) { case DT_FLAGS_1: if (bin->dyn_buf[i].d_un.d_val & DF_1_NOW) { haveBindNow = true; break; } } } break; } } } if (bin && bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_GNU_RELRO) { haveGnuRelro = true; break; } } } if (haveGnuRelro) { if (haveBindNow) { return R_ELF_FULL_RELRO; } return R_ELF_PART_RELRO; } return R_ELF_NO_RELRO; } /* To compute the base address, one determines the memory address associated with the lowest p_vaddr value for a PT_LOAD segment. One then obtains the base address by truncating the memory address to the nearest multiple of the maximum page size */ ut64 Elf_(r_bin_elf_get_baddr)(ELFOBJ *bin) { int i; ut64 tmp, base = UT64_MAX; if (!bin) { return 0; } if (bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_LOAD) { tmp = (ut64)bin->phdr[i].p_vaddr & ELF_PAGE_MASK; tmp = tmp - (tmp % (1 << ELF_PAGE_SIZE)); if (tmp < base) { base = tmp; } } } } if (base == UT64_MAX && bin->ehdr.e_type == ET_REL) { //we return our own base address for ET_REL type //we act as a loader for ELF return 0x08000000; } return base == UT64_MAX ? 0 : base; } ut64 Elf_(r_bin_elf_get_boffset)(ELFOBJ *bin) { int i; ut64 tmp, base = UT64_MAX; if (bin && bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_LOAD) { tmp = (ut64)bin->phdr[i].p_offset & ELF_PAGE_MASK; tmp = tmp - (tmp % (1 << ELF_PAGE_SIZE)); if (tmp < base) { base = tmp; } } } } return base == UT64_MAX ? 0 : base; } ut64 Elf_(r_bin_elf_get_init_offset)(ELFOBJ *bin) { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); ut8 buf[512]; if (!bin) { return 0LL; } if (r_buf_read_at (bin->b, entry + 16, buf, sizeof (buf)) < 1) { bprintf ("Warning: read (init_offset)\n"); return 0; } if (buf[0] == 0x68) { // push // x86 only ut64 addr; memmove (buf, buf+1, 4); addr = (ut64)r_read_le32 (buf); return Elf_(r_bin_elf_v2p) (bin, addr); } return 0; } ut64 Elf_(r_bin_elf_get_fini_offset)(ELFOBJ *bin) { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); ut8 buf[512]; if (!bin) { return 0LL; } if (r_buf_read_at (bin->b, entry+11, buf, sizeof (buf)) == -1) { bprintf ("Warning: read (get_fini)\n"); return 0; } if (*buf == 0x68) { // push // x86/32 only ut64 addr; memmove (buf, buf+1, 4); addr = (ut64)r_read_le32 (buf); return Elf_(r_bin_elf_v2p) (bin, addr); } return 0; } ut64 Elf_(r_bin_elf_get_entry_offset)(ELFOBJ *bin) { ut64 entry; if (!bin) { return 0LL; } entry = bin->ehdr.e_entry; if (!entry) { entry = Elf_(r_bin_elf_get_section_offset)(bin, ".init.text"); if (entry != UT64_MAX) { return entry; } entry = Elf_(r_bin_elf_get_section_offset)(bin, ".text"); if (entry != UT64_MAX) { return entry; } entry = Elf_(r_bin_elf_get_section_offset)(bin, ".init"); if (entry != UT64_MAX) { return entry; } if (entry == UT64_MAX) { return 0; } } return Elf_(r_bin_elf_v2p) (bin, entry); } static ut64 getmainsymbol(ELFOBJ *bin) { struct r_bin_elf_symbol_t *symbol; int i; if (!(symbol = Elf_(r_bin_elf_get_symbols) (bin))) { return UT64_MAX; } for (i = 0; !symbol[i].last; i++) { if (!strcmp (symbol[i].name, "main")) { ut64 paddr = symbol[i].offset; return Elf_(r_bin_elf_p2v) (bin, paddr); } } return UT64_MAX; } ut64 Elf_(r_bin_elf_get_main_offset)(ELFOBJ *bin) { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); ut8 buf[512]; if (!bin) { return 0LL; } if (entry > bin->size || (entry + sizeof (buf)) > bin->size) { return 0; } if (r_buf_read_at (bin->b, entry, buf, sizeof (buf)) < 1) { bprintf ("Warning: read (main)\n"); return 0; } // ARM64 if (buf[0x18+3] == 0x58 && buf[0x2f] == 0x00) { ut32 entry_vaddr = Elf_(r_bin_elf_p2v) (bin, entry); ut32 main_addr = r_read_le32 (&buf[0x30]); if ((main_addr >> 16) == (entry_vaddr >> 16)) { return Elf_(r_bin_elf_v2p) (bin, main_addr); } } // TODO: Use arch to identify arch before memcmp's // ARM ut64 text = Elf_(r_bin_elf_get_section_offset)(bin, ".text"); ut64 text_end = text + bin->size; // ARM-Thumb-Linux if (entry & 1 && !memcmp (buf, "\xf0\x00\x0b\x4f\xf0\x00", 6)) { ut32 * ptr = (ut32*)(buf+40-1); if (*ptr &1) { return Elf_(r_bin_elf_v2p) (bin, *ptr -1); } } if (!memcmp (buf, "\x00\xb0\xa0\xe3\x00\xe0\xa0\xe3", 8)) { // endian stuff here ut32 *addr = (ut32*)(buf+0x34); /* 0x00012000 00b0a0e3 mov fp, 0 0x00012004 00e0a0e3 mov lr, 0 */ if (*addr > text && *addr < (text_end)) { return Elf_(r_bin_elf_v2p) (bin, *addr); } } // MIPS /* get .got, calculate offset of main symbol */ if (!memcmp (buf, "\x21\x00\xe0\x03\x01\x00\x11\x04", 8)) { /* assuming the startup code looks like got = gp-0x7ff0 got[index__libc_start_main] ( got[index_main] ); looking for the instruction generating the first argument to find main lw a0, offset(gp) */ ut64 got_offset; if ((got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got")) != -1 || (got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got.plt")) != -1) { const ut64 gp = got_offset + 0x7ff0; unsigned i; for (i = 0; i < sizeof(buf) / sizeof(buf[0]); i += 4) { const ut32 instr = r_read_le32 (&buf[i]); if ((instr & 0xffff0000) == 0x8f840000) { // lw a0, offset(gp) const short delta = instr & 0x0000ffff; r_buf_read_at (bin->b, /* got_entry_offset = */ gp + delta, buf, 4); return Elf_(r_bin_elf_v2p) (bin, r_read_le32 (&buf[0])); } } } return 0; } // ARM if (!memcmp (buf, "\x24\xc0\x9f\xe5\x00\xb0\xa0\xe3", 8)) { ut64 addr = r_read_le32 (&buf[48]); return Elf_(r_bin_elf_v2p) (bin, addr); } // X86-CGC if (buf[0] == 0xe8 && !memcmp (buf + 5, "\x50\xe8\x00\x00\x00\x00\xb8\x01\x00\x00\x00\x53", 12)) { size_t SIZEOF_CALL = 5; ut64 rel_addr = (ut64)((int)(buf[1] + (buf[2] << 8) + (buf[3] << 16) + (buf[4] << 24))); ut64 addr = Elf_(r_bin_elf_p2v)(bin, entry + SIZEOF_CALL); addr += rel_addr; return Elf_(r_bin_elf_v2p) (bin, addr); } // X86-PIE if (buf[0x00] == 0x48 && buf[0x1e] == 0x8d && buf[0x11] == 0xe8) { ut32 *pmain = (ut32*)(buf + 0x30); ut64 vmain = Elf_(r_bin_elf_p2v) (bin, (ut64)*pmain); ut64 ventry = Elf_(r_bin_elf_p2v) (bin, entry); if (vmain >> 16 == ventry >> 16) { return (ut64)vmain; } } // X86-PIE if (buf[0x1d] == 0x48 && buf[0x1e] == 0x8b) { if (!memcmp (buf, "\x31\xed\x49\x89", 4)) {// linux ut64 maddr, baddr; ut8 n32s[sizeof (ut32)] = {0}; maddr = entry + 0x24 + r_read_le32 (buf + 0x20); if (r_buf_read_at (bin->b, maddr, n32s, sizeof (ut32)) == -1) { bprintf ("Warning: read (maddr) 2\n"); return 0; } maddr = (ut64)r_read_le32 (&n32s[0]); baddr = (bin->ehdr.e_entry >> 16) << 16; if (bin->phdr) { baddr = Elf_(r_bin_elf_get_baddr) (bin); } maddr += baddr; return maddr; } } // X86-NONPIE #if R_BIN_ELF64 if (!memcmp (buf, "\x49\x89\xd9", 3) && buf[156] == 0xe8) { // openbsd return r_read_le32 (&buf[157]) + entry + 156 + 5; } if (!memcmp (buf+29, "\x48\xc7\xc7", 3)) { // linux ut64 addr = (ut64)r_read_le32 (&buf[29 + 3]); return Elf_(r_bin_elf_v2p) (bin, addr); } #else if (buf[23] == '\x68') { ut64 addr = (ut64)r_read_le32 (&buf[23 + 1]); return Elf_(r_bin_elf_v2p) (bin, addr); } #endif /* linux64 pie main -- probably buggy in some cases */ if (buf[29] == 0x48 && buf[30] == 0x8d) { // lea rdi, qword [rip-0x21c4] ut8 *p = buf + 32; st32 maindelta = (st32)r_read_le32 (p); ut64 vmain = (ut64)(entry + 29 + maindelta) + 7; ut64 ventry = Elf_(r_bin_elf_p2v) (bin, entry); if (vmain>>16 == ventry>>16) { return (ut64)vmain; } } /* find sym.main if possible */ { ut64 m = getmainsymbol (bin); if (m != UT64_MAX) return m; } return UT64_MAX; } int Elf_(r_bin_elf_get_stripped)(ELFOBJ *bin) { int i; if (!bin->shdr) { return false; } for (i = 0; i < bin->ehdr.e_shnum; i++) { if (bin->shdr[i].sh_type == SHT_SYMTAB) { return false; } } return true; } char *Elf_(r_bin_elf_intrp)(ELFOBJ *bin) { int i; if (!bin || !bin->phdr) { return NULL; } for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_INTERP) { char *str = NULL; ut64 addr = bin->phdr[i].p_offset; int sz = bin->phdr[i].p_memsz; sdb_num_set (bin->kv, "elf_header.intrp_addr", addr, 0); sdb_num_set (bin->kv, "elf_header.intrp_size", sz, 0); if (sz < 1) { return NULL; } str = malloc (sz + 1); if (!str) { return NULL; } if (r_buf_read_at (bin->b, addr, (ut8*)str, sz) < 1) { bprintf ("Warning: read (main)\n"); return 0; } str[sz] = 0; sdb_set (bin->kv, "elf_header.intrp", str, 0); return str; } } return NULL; } int Elf_(r_bin_elf_get_static)(ELFOBJ *bin) { int i; if (!bin->phdr) { return false; } for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_INTERP) { return false; } } return true; } char* Elf_(r_bin_elf_get_data_encoding)(ELFOBJ *bin) { switch (bin->ehdr.e_ident[EI_DATA]) { case ELFDATANONE: return strdup ("none"); case ELFDATA2LSB: return strdup ("2's complement, little endian"); case ELFDATA2MSB: return strdup ("2's complement, big endian"); default: return r_str_newf ("<unknown: %x>", bin->ehdr.e_ident[EI_DATA]); } } int Elf_(r_bin_elf_has_va)(ELFOBJ *bin) { return true; } char* Elf_(r_bin_elf_get_arch)(ELFOBJ *bin) { switch (bin->ehdr.e_machine) { case EM_ARC: case EM_ARC_A5: return strdup ("arc"); case EM_AVR: return strdup ("avr"); case EM_CRIS: return strdup ("cris"); case EM_68K: return strdup ("m68k"); case EM_MIPS: case EM_MIPS_RS3_LE: case EM_MIPS_X: return strdup ("mips"); case EM_MCST_ELBRUS: return strdup ("elbrus"); case EM_TRICORE: return strdup ("tricore"); case EM_ARM: case EM_AARCH64: return strdup ("arm"); case EM_HEXAGON: return strdup ("hexagon"); case EM_BLACKFIN: return strdup ("blackfin"); case EM_SPARC: case EM_SPARC32PLUS: case EM_SPARCV9: return strdup ("sparc"); case EM_PPC: case EM_PPC64: return strdup ("ppc"); case EM_PARISC: return strdup ("hppa"); case EM_PROPELLER: return strdup ("propeller"); case EM_MICROBLAZE: return strdup ("microblaze.gnu"); case EM_RISCV: return strdup ("riscv"); case EM_VAX: return strdup ("vax"); case EM_XTENSA: return strdup ("xtensa"); case EM_LANAI: return strdup ("lanai"); case EM_VIDEOCORE3: case EM_VIDEOCORE4: return strdup ("vc4"); case EM_SH: return strdup ("sh"); case EM_V850: return strdup ("v850"); case EM_IA_64: return strdup("ia64"); default: return strdup ("x86"); } } char* Elf_(r_bin_elf_get_machine_name)(ELFOBJ *bin) { switch (bin->ehdr.e_machine) { case EM_NONE: return strdup ("No machine"); case EM_M32: return strdup ("AT&T WE 32100"); case EM_SPARC: return strdup ("SUN SPARC"); case EM_386: return strdup ("Intel 80386"); case EM_68K: return strdup ("Motorola m68k family"); case EM_88K: return strdup ("Motorola m88k family"); case EM_860: return strdup ("Intel 80860"); case EM_MIPS: return strdup ("MIPS R3000"); case EM_S370: return strdup ("IBM System/370"); case EM_MIPS_RS3_LE: return strdup ("MIPS R3000 little-endian"); case EM_PARISC: return strdup ("HPPA"); case EM_VPP500: return strdup ("Fujitsu VPP500"); case EM_SPARC32PLUS: return strdup ("Sun's \"v8plus\""); case EM_960: return strdup ("Intel 80960"); case EM_PPC: return strdup ("PowerPC"); case EM_PPC64: return strdup ("PowerPC 64-bit"); case EM_S390: return strdup ("IBM S390"); case EM_V800: return strdup ("NEC V800 series"); case EM_FR20: return strdup ("Fujitsu FR20"); case EM_RH32: return strdup ("TRW RH-32"); case EM_RCE: return strdup ("Motorola RCE"); case EM_ARM: return strdup ("ARM"); case EM_BLACKFIN: return strdup ("Analog Devices Blackfin"); case EM_FAKE_ALPHA: return strdup ("Digital Alpha"); case EM_SH: return strdup ("Hitachi SH"); case EM_SPARCV9: return strdup ("SPARC v9 64-bit"); case EM_TRICORE: return strdup ("Siemens Tricore"); case EM_ARC: return strdup ("Argonaut RISC Core"); case EM_H8_300: return strdup ("Hitachi H8/300"); case EM_H8_300H: return strdup ("Hitachi H8/300H"); case EM_H8S: return strdup ("Hitachi H8S"); case EM_H8_500: return strdup ("Hitachi H8/500"); case EM_IA_64: return strdup ("Intel Merced"); case EM_MIPS_X: return strdup ("Stanford MIPS-X"); case EM_COLDFIRE: return strdup ("Motorola Coldfire"); case EM_68HC12: return strdup ("Motorola M68HC12"); case EM_MMA: return strdup ("Fujitsu MMA Multimedia Accelerator"); case EM_PCP: return strdup ("Siemens PCP"); case EM_NCPU: return strdup ("Sony nCPU embeeded RISC"); case EM_NDR1: return strdup ("Denso NDR1 microprocessor"); case EM_STARCORE: return strdup ("Motorola Start*Core processor"); case EM_ME16: return strdup ("Toyota ME16 processor"); case EM_ST100: return strdup ("STMicroelectronic ST100 processor"); case EM_TINYJ: return strdup ("Advanced Logic Corp. Tinyj emb.fam"); case EM_X86_64: return strdup ("AMD x86-64 architecture"); case EM_LANAI: return strdup ("32bit LANAI architecture"); case EM_PDSP: return strdup ("Sony DSP Processor"); case EM_FX66: return strdup ("Siemens FX66 microcontroller"); case EM_ST9PLUS: return strdup ("STMicroelectronics ST9+ 8/16 mc"); case EM_ST7: return strdup ("STmicroelectronics ST7 8 bit mc"); case EM_68HC16: return strdup ("Motorola MC68HC16 microcontroller"); case EM_68HC11: return strdup ("Motorola MC68HC11 microcontroller"); case EM_68HC08: return strdup ("Motorola MC68HC08 microcontroller"); case EM_68HC05: return strdup ("Motorola MC68HC05 microcontroller"); case EM_SVX: return strdup ("Silicon Graphics SVx"); case EM_ST19: return strdup ("STMicroelectronics ST19 8 bit mc"); case EM_VAX: return strdup ("Digital VAX"); case EM_CRIS: return strdup ("Axis Communications 32-bit embedded processor"); case EM_JAVELIN: return strdup ("Infineon Technologies 32-bit embedded processor"); case EM_FIREPATH: return strdup ("Element 14 64-bit DSP Processor"); case EM_ZSP: return strdup ("LSI Logic 16-bit DSP Processor"); case EM_MMIX: return strdup ("Donald Knuth's educational 64-bit processor"); case EM_HUANY: return strdup ("Harvard University machine-independent object files"); case EM_PRISM: return strdup ("SiTera Prism"); case EM_AVR: return strdup ("Atmel AVR 8-bit microcontroller"); case EM_FR30: return strdup ("Fujitsu FR30"); case EM_D10V: return strdup ("Mitsubishi D10V"); case EM_D30V: return strdup ("Mitsubishi D30V"); case EM_V850: return strdup ("NEC v850"); case EM_M32R: return strdup ("Mitsubishi M32R"); case EM_MN10300: return strdup ("Matsushita MN10300"); case EM_MN10200: return strdup ("Matsushita MN10200"); case EM_PJ: return strdup ("picoJava"); case EM_OPENRISC: return strdup ("OpenRISC 32-bit embedded processor"); case EM_ARC_A5: return strdup ("ARC Cores Tangent-A5"); case EM_XTENSA: return strdup ("Tensilica Xtensa Architecture"); case EM_AARCH64: return strdup ("ARM aarch64"); case EM_PROPELLER: return strdup ("Parallax Propeller"); case EM_MICROBLAZE: return strdup ("Xilinx MicroBlaze"); case EM_RISCV: return strdup ("RISC V"); case EM_VIDEOCORE3: return strdup ("VideoCore III"); case EM_VIDEOCORE4: return strdup ("VideoCore IV"); default: return r_str_newf ("<unknown>: 0x%x", bin->ehdr.e_machine); } } char* Elf_(r_bin_elf_get_file_type)(ELFOBJ *bin) { ut32 e_type; if (!bin) { return NULL; } e_type = (ut32)bin->ehdr.e_type; // cast to avoid warn in iphone-gcc, must be ut16 switch (e_type) { case ET_NONE: return strdup ("NONE (None)"); case ET_REL: return strdup ("REL (Relocatable file)"); case ET_EXEC: return strdup ("EXEC (Executable file)"); case ET_DYN: return strdup ("DYN (Shared object file)"); case ET_CORE: return strdup ("CORE (Core file)"); } if ((e_type >= ET_LOPROC) && (e_type <= ET_HIPROC)) { return r_str_newf ("Processor Specific: %x", e_type); } if ((e_type >= ET_LOOS) && (e_type <= ET_HIOS)) { return r_str_newf ("OS Specific: %x", e_type); } return r_str_newf ("<unknown>: %x", e_type); } char* Elf_(r_bin_elf_get_elf_class)(ELFOBJ *bin) { switch (bin->ehdr.e_ident[EI_CLASS]) { case ELFCLASSNONE: return strdup ("none"); case ELFCLASS32: return strdup ("ELF32"); case ELFCLASS64: return strdup ("ELF64"); default: return r_str_newf ("<unknown: %x>", bin->ehdr.e_ident[EI_CLASS]); } } int Elf_(r_bin_elf_get_bits)(ELFOBJ *bin) { /* Hack for ARCompact */ if (bin->ehdr.e_machine == EM_ARC_A5) { return 16; } /* Hack for Ps2 */ if (bin->phdr && bin->ehdr.e_machine == EM_MIPS) { const ut32 mipsType = bin->ehdr.e_flags & EF_MIPS_ARCH; if (bin->ehdr.e_type == ET_EXEC) { int i; bool haveInterp = false; for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_INTERP) { haveInterp = true; } } if (!haveInterp && mipsType == EF_MIPS_ARCH_3) { // Playstation2 Hack return 64; } } // TODO: show this specific asm.cpu somewhere in bininfo (mips1, mips2, mips3, mips32r2, ...) switch (mipsType) { case EF_MIPS_ARCH_1: case EF_MIPS_ARCH_2: case EF_MIPS_ARCH_3: case EF_MIPS_ARCH_4: case EF_MIPS_ARCH_5: case EF_MIPS_ARCH_32: return 32; case EF_MIPS_ARCH_64: return 64; case EF_MIPS_ARCH_32R2: return 32; case EF_MIPS_ARCH_64R2: return 64; break; } return 32; } /* Hack for Thumb */ if (bin->ehdr.e_machine == EM_ARM) { if (bin->ehdr.e_type != ET_EXEC) { struct r_bin_elf_symbol_t *symbol; if ((symbol = Elf_(r_bin_elf_get_symbols) (bin))) { int i = 0; for (i = 0; !symbol[i].last; i++) { ut64 paddr = symbol[i].offset; if (paddr & 1) { return 16; } } } } { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); if (entry & 1) { return 16; } } } switch (bin->ehdr.e_ident[EI_CLASS]) { case ELFCLASS32: return 32; case ELFCLASS64: return 64; case ELFCLASSNONE: default: return 32; // defaults } } static inline int noodle(ELFOBJ *bin, const char *s) { const ut8 *p = bin->b->buf; if (bin->b->length > 64) { p += bin->b->length - 64; } else { return 0; } return r_mem_mem (p, 64, (const ut8 *)s, strlen (s)) != NULL; } static inline int needle(ELFOBJ *bin, const char *s) { if (bin->shstrtab) { ut32 len = bin->shstrtab_size; if (len > 4096) { len = 4096; // avoid slow loading .. can be buggy? } return r_mem_mem ((const ut8*)bin->shstrtab, len, (const ut8*)s, strlen (s)) != NULL; } return 0; } // TODO: must return const char * all those strings must be const char os[LINUX] or so char* Elf_(r_bin_elf_get_osabi_name)(ELFOBJ *bin) { switch (bin->ehdr.e_ident[EI_OSABI]) { case ELFOSABI_LINUX: return strdup("linux"); case ELFOSABI_SOLARIS: return strdup("solaris"); case ELFOSABI_FREEBSD: return strdup("freebsd"); case ELFOSABI_HPUX: return strdup("hpux"); } /* Hack to identify OS */ if (needle (bin, "openbsd")) return strdup ("openbsd"); if (needle (bin, "netbsd")) return strdup ("netbsd"); if (needle (bin, "freebsd")) return strdup ("freebsd"); if (noodle (bin, "BEOS:APP_VERSION")) return strdup ("beos"); if (needle (bin, "GNU")) return strdup ("linux"); return strdup ("linux"); } ut8 *Elf_(r_bin_elf_grab_regstate)(ELFOBJ *bin, int *len) { if (bin->phdr) { int i; int num = bin->ehdr.e_phnum; for (i = 0; i < num; i++) { if (bin->phdr[i].p_type != PT_NOTE) { continue; } int bits = Elf_(r_bin_elf_get_bits)(bin); int regdelta = (bits == 64)? 0x84: 0x40; // x64 vs x32 int regsize = 160; // for x86-64 ut8 *buf = malloc (regsize); if (r_buf_read_at (bin->b, bin->phdr[i].p_offset + regdelta, buf, regsize) != regsize) { free (buf); bprintf ("Cannot read register state from CORE file\n"); return NULL; } if (len) { *len = regsize; } return buf; } } bprintf ("Cannot find NOTE section\n"); return NULL; } int Elf_(r_bin_elf_is_big_endian)(ELFOBJ *bin) { return (bin->ehdr.e_ident[EI_DATA] == ELFDATA2MSB); } /* XXX Init dt_strtab? */ char *Elf_(r_bin_elf_get_rpath)(ELFOBJ *bin) { char *ret = NULL; int j; if (!bin || !bin->phdr || !bin->dyn_buf || !bin->strtab) { return NULL; } for (j = 0; j< bin->dyn_entries; j++) { if (bin->dyn_buf[j].d_tag == DT_RPATH || bin->dyn_buf[j].d_tag == DT_RUNPATH) { if (!(ret = calloc (1, ELF_STRING_LENGTH))) { perror ("malloc (rpath)"); return NULL; } if (bin->dyn_buf[j].d_un.d_val > bin->strtab_size) { free (ret); return NULL; } strncpy (ret, bin->strtab + bin->dyn_buf[j].d_un.d_val, ELF_STRING_LENGTH); ret[ELF_STRING_LENGTH - 1] = '\0'; break; } } return ret; } static size_t get_relocs_num(ELFOBJ *bin) { size_t i, size, ret = 0; /* we need to be careful here, in malformed files the section size might * not be a multiple of a Rel/Rela size; round up so we allocate enough * space. */ #define NUMENTRIES_ROUNDUP(sectionsize, entrysize) (((sectionsize)+(entrysize)-1)/(entrysize)) if (!bin->g_sections) { return 0; } size = bin->is_rela == DT_REL ? sizeof (Elf_(Rel)) : sizeof (Elf_(Rela)); for (i = 0; !bin->g_sections[i].last; i++) { if (!strncmp (bin->g_sections[i].name, ".rela.", strlen (".rela."))) { if (!bin->is_rela) { size = sizeof (Elf_(Rela)); } ret += NUMENTRIES_ROUNDUP (bin->g_sections[i].size, size); } else if (!strncmp (bin->g_sections[i].name, ".rel.", strlen (".rel."))){ if (!bin->is_rela) { size = sizeof (Elf_(Rel)); } ret += NUMENTRIES_ROUNDUP (bin->g_sections[i].size, size); } } return ret; #undef NUMENTRIES_ROUNDUP } static int read_reloc(ELFOBJ *bin, RBinElfReloc *r, int is_rela, ut64 offset) { ut8 *buf = bin->b->buf; int j = 0; if (offset + sizeof (Elf_ (Rela)) > bin->size || offset + sizeof (Elf_(Rela)) < offset) { return -1; } if (is_rela == DT_RELA) { Elf_(Rela) rela; #if R_BIN_ELF64 rela.r_offset = READ64 (buf + offset, j) rela.r_info = READ64 (buf + offset, j) rela.r_addend = READ64 (buf + offset, j) #else rela.r_offset = READ32 (buf + offset, j) rela.r_info = READ32 (buf + offset, j) rela.r_addend = READ32 (buf + offset, j) #endif r->is_rela = is_rela; r->offset = rela.r_offset; r->type = ELF_R_TYPE (rela.r_info); r->sym = ELF_R_SYM (rela.r_info); r->last = 0; r->addend = rela.r_addend; return sizeof (Elf_(Rela)); } else { Elf_(Rel) rel; #if R_BIN_ELF64 rel.r_offset = READ64 (buf + offset, j) rel.r_info = READ64 (buf + offset, j) #else rel.r_offset = READ32 (buf + offset, j) rel.r_info = READ32 (buf + offset, j) #endif r->is_rela = is_rela; r->offset = rel.r_offset; r->type = ELF_R_TYPE (rel.r_info); r->sym = ELF_R_SYM (rel.r_info); r->last = 0; return sizeof (Elf_(Rel)); } } RBinElfReloc* Elf_(r_bin_elf_get_relocs)(ELFOBJ *bin) { int res, rel, rela, i, j; size_t reloc_num = 0; RBinElfReloc *ret = NULL; if (!bin || !bin->g_sections) { return NULL; } reloc_num = get_relocs_num (bin); if (!reloc_num) { return NULL; } bin->reloc_num = reloc_num; ret = (RBinElfReloc*)calloc ((size_t)reloc_num + 1, sizeof(RBinElfReloc)); if (!ret) { return NULL; } #if DEAD_CODE ut64 section_text_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".text"); if (section_text_offset == -1) { section_text_offset = 0; } #endif for (i = 0, rel = 0; !bin->g_sections[i].last && rel < reloc_num ; i++) { bool is_rela = 0 == strncmp (bin->g_sections[i].name, ".rela.", strlen (".rela.")); bool is_rel = 0 == strncmp (bin->g_sections[i].name, ".rel.", strlen (".rel.")); if (!is_rela && !is_rel) { continue; } for (j = 0; j < bin->g_sections[i].size; j += res) { if (bin->g_sections[i].size > bin->size) { break; } if (bin->g_sections[i].offset > bin->size) { break; } if (rel >= reloc_num) { bprintf ("Internal error: ELF relocation buffer too small," "please file a bug report."); break; } if (!bin->is_rela) { rela = is_rela? DT_RELA : DT_REL; } else { rela = bin->is_rela; } res = read_reloc (bin, &ret[rel], rela, bin->g_sections[i].offset + j); if (j + res > bin->g_sections[i].size) { bprintf ("Warning: malformed file, relocation entry #%u is partially beyond the end of section %u.\n", rel, i); } if (bin->ehdr.e_type == ET_REL) { if (bin->g_sections[i].info < bin->ehdr.e_shnum && bin->shdr) { ret[rel].rva = bin->shdr[bin->g_sections[i].info].sh_offset + ret[rel].offset; ret[rel].rva = Elf_(r_bin_elf_p2v) (bin, ret[rel].rva); } else { ret[rel].rva = ret[rel].offset; } } else { ret[rel].rva = ret[rel].offset; ret[rel].offset = Elf_(r_bin_elf_v2p) (bin, ret[rel].offset); } ret[rel].last = 0; if (res < 0) { break; } rel++; } } ret[reloc_num].last = 1; return ret; } RBinElfLib* Elf_(r_bin_elf_get_libs)(ELFOBJ *bin) { RBinElfLib *ret = NULL; int j, k; if (!bin || !bin->phdr || !bin->dyn_buf || !bin->strtab || *(bin->strtab+1) == '0') { return NULL; } for (j = 0, k = 0; j < bin->dyn_entries; j++) if (bin->dyn_buf[j].d_tag == DT_NEEDED) { RBinElfLib *r = realloc (ret, (k + 1) * sizeof (RBinElfLib)); if (!r) { perror ("realloc (libs)"); free (ret); return NULL; } ret = r; if (bin->dyn_buf[j].d_un.d_val > bin->strtab_size) { free (ret); return NULL; } strncpy (ret[k].name, bin->strtab + bin->dyn_buf[j].d_un.d_val, ELF_STRING_LENGTH); ret[k].name[ELF_STRING_LENGTH - 1] = '\0'; ret[k].last = 0; if (ret[k].name[0]) { k++; } } RBinElfLib *r = realloc (ret, (k + 1) * sizeof (RBinElfLib)); if (!r) { perror ("realloc (libs)"); free (ret); return NULL; } ret = r; ret[k].last = 1; return ret; } static RBinElfSection* get_sections_from_phdr(ELFOBJ *bin) { RBinElfSection *ret; int i, num_sections = 0; ut64 reldyn = 0, relava = 0, pltgotva = 0, relva = 0; ut64 reldynsz = 0, relasz = 0, pltgotsz = 0; if (!bin || !bin->phdr || !bin->ehdr.e_phnum) return NULL; for (i = 0; i < bin->dyn_entries; i++) { switch (bin->dyn_buf[i].d_tag) { case DT_REL: reldyn = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; case DT_RELA: relva = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; case DT_RELSZ: reldynsz = bin->dyn_buf[i].d_un.d_val; break; case DT_RELASZ: relasz = bin->dyn_buf[i].d_un.d_val; break; case DT_PLTGOT: pltgotva = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; case DT_PLTRELSZ: pltgotsz = bin->dyn_buf[i].d_un.d_val; break; case DT_JMPREL: relava = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; default: break; } } ret = calloc (num_sections + 1, sizeof(RBinElfSection)); if (!ret) { return NULL; } i = 0; if (reldyn) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, reldyn); ret[i].rva = reldyn; ret[i].size = reldynsz; strcpy (ret[i].name, ".rel.dyn"); ret[i].last = 0; i++; } if (relava) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, relava); ret[i].rva = relava; ret[i].size = pltgotsz; strcpy (ret[i].name, ".rela.plt"); ret[i].last = 0; i++; } if (relva) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, relva); ret[i].rva = relva; ret[i].size = relasz; strcpy (ret[i].name, ".rel.plt"); ret[i].last = 0; i++; } if (pltgotva) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, pltgotva); ret[i].rva = pltgotva; ret[i].size = pltgotsz; strcpy (ret[i].name, ".got.plt"); ret[i].last = 0; i++; } ret[i].last = 1; return ret; } RBinElfSection* Elf_(r_bin_elf_get_sections)(ELFOBJ *bin) { RBinElfSection *ret = NULL; char unknown_s[20], invalid_s[20]; int i, nidx, unknown_c=0, invalid_c=0; if (!bin) { return NULL; } if (bin->g_sections) { return bin->g_sections; } if (!bin->shdr) { //we don't give up search in phdr section return get_sections_from_phdr (bin); } if (!(ret = calloc ((bin->ehdr.e_shnum + 1), sizeof (RBinElfSection)))) { return NULL; } for (i = 0; i < bin->ehdr.e_shnum; i++) { ret[i].offset = bin->shdr[i].sh_offset; ret[i].size = bin->shdr[i].sh_size; ret[i].align = bin->shdr[i].sh_addralign; ret[i].flags = bin->shdr[i].sh_flags; ret[i].link = bin->shdr[i].sh_link; ret[i].info = bin->shdr[i].sh_info; ret[i].type = bin->shdr[i].sh_type; if (bin->ehdr.e_type == ET_REL) { ret[i].rva = bin->baddr + bin->shdr[i].sh_offset; } else { ret[i].rva = bin->shdr[i].sh_addr; } nidx = bin->shdr[i].sh_name; #define SHNAME (int)bin->shdr[i].sh_name #define SHNLEN ELF_STRING_LENGTH - 4 #define SHSIZE (int)bin->shstrtab_size if (nidx < 0 || !bin->shstrtab_section || !bin->shstrtab_size || nidx > bin->shstrtab_size) { snprintf (invalid_s, sizeof (invalid_s) - 4, "invalid%d", invalid_c); strncpy (ret[i].name, invalid_s, SHNLEN); invalid_c++; } else { if (bin->shstrtab && (SHNAME > 0) && (SHNAME < SHSIZE)) { strncpy (ret[i].name, &bin->shstrtab[SHNAME], SHNLEN); } else { if (bin->shdr[i].sh_type == SHT_NULL) { //to follow the same behaviour as readelf strncpy (ret[i].name, "", sizeof (ret[i].name) - 4); } else { snprintf (unknown_s, sizeof (unknown_s)-4, "unknown%d", unknown_c); strncpy (ret[i].name, unknown_s, sizeof (ret[i].name)-4); unknown_c++; } } } ret[i].name[ELF_STRING_LENGTH-2] = '\0'; ret[i].last = 0; } ret[i].last = 1; return ret; } static void fill_symbol_bind_and_type (struct r_bin_elf_symbol_t *ret, Elf_(Sym) *sym) { #define s_bind(x) ret->bind = x #define s_type(x) ret->type = x switch (ELF_ST_BIND(sym->st_info)) { case STB_LOCAL: s_bind ("LOCAL"); break; case STB_GLOBAL: s_bind ("GLOBAL"); break; case STB_WEAK: s_bind ("WEAK"); break; case STB_NUM: s_bind ("NUM"); break; case STB_LOOS: s_bind ("LOOS"); break; case STB_HIOS: s_bind ("HIOS"); break; case STB_LOPROC: s_bind ("LOPROC"); break; case STB_HIPROC: s_bind ("HIPROC"); break; default: s_bind ("UNKNOWN"); } switch (ELF_ST_TYPE (sym->st_info)) { case STT_NOTYPE: s_type ("NOTYPE"); break; case STT_OBJECT: s_type ("OBJECT"); break; case STT_FUNC: s_type ("FUNC"); break; case STT_SECTION: s_type ("SECTION"); break; case STT_FILE: s_type ("FILE"); break; case STT_COMMON: s_type ("COMMON"); break; case STT_TLS: s_type ("TLS"); break; case STT_NUM: s_type ("NUM"); break; case STT_LOOS: s_type ("LOOS"); break; case STT_HIOS: s_type ("HIOS"); break; case STT_LOPROC: s_type ("LOPROC"); break; case STT_HIPROC: s_type ("HIPROC"); break; default: s_type ("UNKNOWN"); } } static RBinElfSymbol* get_symbols_from_phdr(ELFOBJ *bin, int type) { Elf_(Sym) *sym = NULL; Elf_(Addr) addr_sym_table = 0; ut8 s[sizeof (Elf_(Sym))] = {0}; RBinElfSymbol *ret = NULL; int i, j, r, tsize, nsym, ret_ctr; ut64 toffset = 0, tmp_offset; ut32 size, sym_size = 0; if (!bin || !bin->phdr || !bin->ehdr.e_phnum) { return NULL; } for (j = 0; j < bin->dyn_entries; j++) { switch (bin->dyn_buf[j].d_tag) { case (DT_SYMTAB): addr_sym_table = Elf_(r_bin_elf_v2p) (bin, bin->dyn_buf[j].d_un.d_ptr); break; case (DT_SYMENT): sym_size = bin->dyn_buf[j].d_un.d_val; break; default: break; } } if (!addr_sym_table) { return NULL; } if (!sym_size) { return NULL; } //since ELF doesn't specify the symbol table size we may read until the end of the buffer nsym = (bin->size - addr_sym_table) / sym_size; if (!UT32_MUL (&size, nsym, sizeof (Elf_ (Sym)))) { goto beach; } if (size < 1) { goto beach; } if (addr_sym_table > bin->size || addr_sym_table + size > bin->size) { goto beach; } if (nsym < 1) { return NULL; } // we reserve room for 4096 and grow as needed. size_t capacity1 = 4096; size_t capacity2 = 4096; sym = (Elf_(Sym)*) calloc (capacity1, sym_size); ret = (RBinElfSymbol *) calloc (capacity2, sizeof (struct r_bin_elf_symbol_t)); if (!sym || !ret) { goto beach; } for (i = 1, ret_ctr = 0; i < nsym; i++) { if (i >= capacity1) { // maybe grow // You take what you want, but you eat what you take. Elf_(Sym)* temp_sym = (Elf_(Sym)*) realloc(sym, (capacity1 * GROWTH_FACTOR) * sym_size); if (!temp_sym) { goto beach; } sym = temp_sym; capacity1 *= GROWTH_FACTOR; } if (ret_ctr >= capacity2) { // maybe grow RBinElfSymbol *temp_ret = realloc (ret, capacity2 * GROWTH_FACTOR * sizeof (struct r_bin_elf_symbol_t)); if (!temp_ret) { goto beach; } ret = temp_ret; capacity2 *= GROWTH_FACTOR; } // read in one entry r = r_buf_read_at (bin->b, addr_sym_table + i * sizeof (Elf_ (Sym)), s, sizeof (Elf_ (Sym))); if (r < 1) { goto beach; } int j = 0; #if R_BIN_ELF64 sym[i].st_name = READ32 (s, j); sym[i].st_info = READ8 (s, j); sym[i].st_other = READ8 (s, j); sym[i].st_shndx = READ16 (s, j); sym[i].st_value = READ64 (s, j); sym[i].st_size = READ64 (s, j); #else sym[i].st_name = READ32 (s, j); sym[i].st_value = READ32 (s, j); sym[i].st_size = READ32 (s, j); sym[i].st_info = READ8 (s, j); sym[i].st_other = READ8 (s, j); sym[i].st_shndx = READ16 (s, j); #endif // zero symbol is always empty // Examine entry and maybe store if (type == R_BIN_ELF_IMPORTS && sym[i].st_shndx == STN_UNDEF) { if (sym[i].st_value) { toffset = sym[i].st_value; } else if ((toffset = get_import_addr (bin, i)) == -1){ toffset = 0; } tsize = 16; } else if (type == R_BIN_ELF_SYMBOLS && sym[i].st_shndx != STN_UNDEF && ELF_ST_TYPE (sym[i].st_info) != STT_SECTION && ELF_ST_TYPE (sym[i].st_info) != STT_FILE) { tsize = sym[i].st_size; toffset = (ut64) sym[i].st_value; } else { continue; } tmp_offset = Elf_(r_bin_elf_v2p) (bin, toffset); if (tmp_offset > bin->size) { goto done; } if (sym[i].st_name + 2 > bin->strtab_size) { // Since we are reading beyond the symbol table what's happening // is that some entry is trying to dereference the strtab beyond its capacity // is not a symbol so is the end goto done; } ret[ret_ctr].offset = tmp_offset; ret[ret_ctr].size = tsize; { int rest = ELF_STRING_LENGTH - 1; int st_name = sym[i].st_name; int maxsize = R_MIN (bin->size, bin->strtab_size); if (st_name < 0 || st_name >= maxsize) { ret[ret_ctr].name[0] = 0; } else { const int len = __strnlen (bin->strtab + st_name, rest); memcpy (ret[ret_ctr].name, &bin->strtab[st_name], len); } } ret[ret_ctr].ordinal = i; ret[ret_ctr].in_shdr = false; ret[ret_ctr].name[ELF_STRING_LENGTH - 2] = '\0'; fill_symbol_bind_and_type (&ret[ret_ctr], &sym[i]); ret[ret_ctr].last = 0; ret_ctr++; } done: ret[ret_ctr].last = 1; // Size everything down to only what is used { nsym = i > 0 ? i : 1; Elf_ (Sym) * temp_sym = (Elf_ (Sym)*) realloc (sym, (nsym * GROWTH_FACTOR) * sym_size); if (!temp_sym) { goto beach; } sym = temp_sym; } { ret_ctr = ret_ctr > 0 ? ret_ctr : 1; RBinElfSymbol *p = (RBinElfSymbol *) realloc (ret, (ret_ctr + 1) * sizeof (RBinElfSymbol)); if (!p) { goto beach; } ret = p; } if (type == R_BIN_ELF_IMPORTS && !bin->imports_by_ord_size) { bin->imports_by_ord_size = ret_ctr + 1; if (ret_ctr > 0) { bin->imports_by_ord = (RBinImport * *) calloc (ret_ctr + 1, sizeof (RBinImport*)); } else { bin->imports_by_ord = NULL; } } else if (type == R_BIN_ELF_SYMBOLS && !bin->symbols_by_ord_size && ret_ctr) { bin->symbols_by_ord_size = ret_ctr + 1; if (ret_ctr > 0) { bin->symbols_by_ord = (RBinSymbol * *) calloc (ret_ctr + 1, sizeof (RBinSymbol*)); }else { bin->symbols_by_ord = NULL; } } free (sym); return ret; beach: free (sym); free (ret); return NULL; } static RBinElfSymbol *Elf_(r_bin_elf_get_phdr_symbols)(ELFOBJ *bin) { if (!bin) { return NULL; } if (bin->phdr_symbols) { return bin->phdr_symbols; } bin->phdr_symbols = get_symbols_from_phdr (bin, R_BIN_ELF_SYMBOLS); return bin->phdr_symbols; } static RBinElfSymbol *Elf_(r_bin_elf_get_phdr_imports)(ELFOBJ *bin) { if (!bin) { return NULL; } if (bin->phdr_imports) { return bin->phdr_imports; } bin->phdr_imports = get_symbols_from_phdr (bin, R_BIN_ELF_IMPORTS); return bin->phdr_imports; } static int Elf_(fix_symbols)(ELFOBJ *bin, int nsym, int type, RBinElfSymbol **sym) { int count = 0; RBinElfSymbol *ret = *sym; RBinElfSymbol *phdr_symbols = (type == R_BIN_ELF_SYMBOLS) ? Elf_(r_bin_elf_get_phdr_symbols) (bin) : Elf_(r_bin_elf_get_phdr_imports) (bin); RBinElfSymbol *tmp, *p; if (phdr_symbols) { RBinElfSymbol *d = ret; while (!d->last) { /* find match in phdr */ p = phdr_symbols; while (!p->last) { if (p->offset && d->offset == p->offset) { p->in_shdr = true; if (*p->name && strcmp (d->name, p->name)) { strcpy (d->name, p->name); } } p++; } d++; } p = phdr_symbols; while (!p->last) { if (!p->in_shdr) { count++; } p++; } /*Take those symbols that are not present in the shdr but yes in phdr*/ /*This should only should happen with fucked up binaries*/ if (count > 0) { /*what happens if a shdr says it has only one symbol? we should look anyway into phdr*/ tmp = (RBinElfSymbol*)realloc (ret, (nsym + count + 1) * sizeof (RBinElfSymbol)); if (!tmp) { return -1; } ret = tmp; ret[nsym--].last = 0; p = phdr_symbols; while (!p->last) { if (!p->in_shdr) { memcpy (&ret[++nsym], p, sizeof (RBinElfSymbol)); } p++; } ret[nsym + 1].last = 1; } *sym = ret; return nsym + 1; } return nsym; } static RBinElfSymbol* Elf_(_r_bin_elf_get_symbols_imports)(ELFOBJ *bin, int type) { ut32 shdr_size; int tsize, nsym, ret_ctr = 0, i, j, r, k, newsize; ut64 toffset; ut32 size = 0; RBinElfSymbol *ret = NULL; Elf_(Shdr) *strtab_section = NULL; Elf_(Sym) *sym = NULL; ut8 s[sizeof (Elf_(Sym))] = { 0 }; char *strtab = NULL; if (!bin || !bin->shdr || !bin->ehdr.e_shnum || bin->ehdr.e_shnum == 0xffff) { return (type == R_BIN_ELF_SYMBOLS) ? Elf_(r_bin_elf_get_phdr_symbols) (bin) : Elf_(r_bin_elf_get_phdr_imports) (bin); } if (!UT32_MUL (&shdr_size, bin->ehdr.e_shnum, sizeof (Elf_(Shdr)))) { return false; } if (shdr_size + 8 > bin->size) { return false; } for (i = 0; i < bin->ehdr.e_shnum; i++) { if ((type == R_BIN_ELF_IMPORTS && bin->shdr[i].sh_type == (bin->ehdr.e_type == ET_REL ? SHT_SYMTAB : SHT_DYNSYM)) || (type == R_BIN_ELF_SYMBOLS && bin->shdr[i].sh_type == (Elf_(r_bin_elf_get_stripped) (bin) ? SHT_DYNSYM : SHT_SYMTAB))) { if (bin->shdr[i].sh_link < 1) { /* oops. fix out of range pointers */ continue; } // hack to avoid asan cry if ((bin->shdr[i].sh_link * sizeof(Elf_(Shdr))) >= shdr_size) { /* oops. fix out of range pointers */ continue; } strtab_section = &bin->shdr[bin->shdr[i].sh_link]; if (strtab_section->sh_size > ST32_MAX || strtab_section->sh_size+8 > bin->size) { bprintf ("size (syms strtab)"); free (ret); free (strtab); return NULL; } if (!strtab) { if (!(strtab = (char *)calloc (1, 8 + strtab_section->sh_size))) { bprintf ("malloc (syms strtab)"); goto beach; } if (strtab_section->sh_offset > bin->size || strtab_section->sh_offset + strtab_section->sh_size > bin->size) { goto beach; } if (r_buf_read_at (bin->b, strtab_section->sh_offset, (ut8*)strtab, strtab_section->sh_size) == -1) { bprintf ("Warning: read (syms strtab)\n"); goto beach; } } newsize = 1 + bin->shdr[i].sh_size; if (newsize < 0 || newsize > bin->size) { bprintf ("invalid shdr %d size\n", i); goto beach; } nsym = (int)(bin->shdr[i].sh_size / sizeof (Elf_(Sym))); if (nsym < 0) { goto beach; } if (!(sym = (Elf_(Sym) *)calloc (nsym, sizeof (Elf_(Sym))))) { bprintf ("calloc (syms)"); goto beach; } if (!UT32_MUL (&size, nsym, sizeof (Elf_(Sym)))) { goto beach; } if (size < 1 || size > bin->size) { goto beach; } if (bin->shdr[i].sh_offset > bin->size) { goto beach; } if (bin->shdr[i].sh_offset + size > bin->size) { goto beach; } for (j = 0; j < nsym; j++) { int k = 0; r = r_buf_read_at (bin->b, bin->shdr[i].sh_offset + j * sizeof (Elf_(Sym)), s, sizeof (Elf_(Sym))); if (r < 1) { bprintf ("Warning: read (sym)\n"); goto beach; } #if R_BIN_ELF64 sym[j].st_name = READ32 (s, k) sym[j].st_info = READ8 (s, k) sym[j].st_other = READ8 (s, k) sym[j].st_shndx = READ16 (s, k) sym[j].st_value = READ64 (s, k) sym[j].st_size = READ64 (s, k) #else sym[j].st_name = READ32 (s, k) sym[j].st_value = READ32 (s, k) sym[j].st_size = READ32 (s, k) sym[j].st_info = READ8 (s, k) sym[j].st_other = READ8 (s, k) sym[j].st_shndx = READ16 (s, k) #endif } free (ret); ret = calloc (nsym, sizeof (RBinElfSymbol)); if (!ret) { bprintf ("Cannot allocate %d symbols\n", nsym); goto beach; } for (k = 1, ret_ctr = 0; k < nsym; k++) { if (type == R_BIN_ELF_IMPORTS && sym[k].st_shndx == STN_UNDEF) { if (sym[k].st_value) { toffset = sym[k].st_value; } else if ((toffset = get_import_addr (bin, k)) == -1){ toffset = 0; } tsize = 16; } else if (type == R_BIN_ELF_SYMBOLS && sym[k].st_shndx != STN_UNDEF && ELF_ST_TYPE (sym[k].st_info) != STT_SECTION && ELF_ST_TYPE (sym[k].st_info) != STT_FILE) { //int idx = sym[k].st_shndx; tsize = sym[k].st_size; toffset = (ut64)sym[k].st_value; } else { continue; } if (bin->ehdr.e_type == ET_REL) { if (sym[k].st_shndx < bin->ehdr.e_shnum) ret[ret_ctr].offset = sym[k].st_value + bin->shdr[sym[k].st_shndx].sh_offset; } else { ret[ret_ctr].offset = Elf_(r_bin_elf_v2p) (bin, toffset); } ret[ret_ctr].size = tsize; if (sym[k].st_name + 2 > strtab_section->sh_size) { bprintf ("Warning: index out of strtab range\n"); goto beach; } { int rest = ELF_STRING_LENGTH - 1; int st_name = sym[k].st_name; int maxsize = R_MIN (bin->b->length, strtab_section->sh_size); if (st_name < 0 || st_name >= maxsize) { ret[ret_ctr].name[0] = 0; } else { const size_t len = __strnlen (strtab + sym[k].st_name, rest); memcpy (ret[ret_ctr].name, &strtab[sym[k].st_name], len); } } ret[ret_ctr].ordinal = k; ret[ret_ctr].name[ELF_STRING_LENGTH - 2] = '\0'; fill_symbol_bind_and_type (&ret[ret_ctr], &sym[k]); ret[ret_ctr].last = 0; ret_ctr++; } ret[ret_ctr].last = 1; // ugly dirty hack :D R_FREE (strtab); R_FREE (sym); } } if (!ret) { return (type == R_BIN_ELF_SYMBOLS) ? Elf_(r_bin_elf_get_phdr_symbols) (bin) : Elf_(r_bin_elf_get_phdr_imports) (bin); } int max = -1; RBinElfSymbol *aux = NULL; nsym = Elf_(fix_symbols) (bin, ret_ctr, type, &ret); if (nsym == -1) { goto beach; } aux = ret; while (!aux->last) { if ((int)aux->ordinal > max) { max = aux->ordinal; } aux++; } nsym = max; if (type == R_BIN_ELF_IMPORTS) { R_FREE (bin->imports_by_ord); bin->imports_by_ord_size = nsym + 1; bin->imports_by_ord = (RBinImport**)calloc (R_MAX (1, nsym + 1), sizeof (RBinImport*)); } else if (type == R_BIN_ELF_SYMBOLS) { R_FREE (bin->symbols_by_ord); bin->symbols_by_ord_size = nsym + 1; bin->symbols_by_ord = (RBinSymbol**)calloc (R_MAX (1, nsym + 1), sizeof (RBinSymbol*)); } return ret; beach: free (ret); free (sym); free (strtab); return NULL; } RBinElfSymbol *Elf_(r_bin_elf_get_symbols)(ELFOBJ *bin) { if (!bin->g_symbols) { bin->g_symbols = Elf_(_r_bin_elf_get_symbols_imports) (bin, R_BIN_ELF_SYMBOLS); } return bin->g_symbols; } RBinElfSymbol *Elf_(r_bin_elf_get_imports)(ELFOBJ *bin) { if (!bin->g_imports) { bin->g_imports = Elf_(_r_bin_elf_get_symbols_imports) (bin, R_BIN_ELF_IMPORTS); } return bin->g_imports; } RBinElfField* Elf_(r_bin_elf_get_fields)(ELFOBJ *bin) { RBinElfField *ret = NULL; int i = 0, j; if (!bin || !(ret = calloc ((bin->ehdr.e_phnum + 3 + 1), sizeof (RBinElfField)))) { return NULL; } strncpy (ret[i].name, "ehdr", ELF_STRING_LENGTH); ret[i].offset = 0; ret[i++].last = 0; strncpy (ret[i].name, "shoff", ELF_STRING_LENGTH); ret[i].offset = bin->ehdr.e_shoff; ret[i++].last = 0; strncpy (ret[i].name, "phoff", ELF_STRING_LENGTH); ret[i].offset = bin->ehdr.e_phoff; ret[i++].last = 0; for (j = 0; bin->phdr && j < bin->ehdr.e_phnum; i++, j++) { snprintf (ret[i].name, ELF_STRING_LENGTH, "phdr_%i", j); ret[i].offset = bin->phdr[j].p_offset; ret[i].last = 0; } ret[i].last = 1; return ret; } void* Elf_(r_bin_elf_free)(ELFOBJ* bin) { int i; if (!bin) { return NULL; } free (bin->phdr); free (bin->shdr); free (bin->strtab); free (bin->dyn_buf); free (bin->shstrtab); free (bin->dynstr); //free (bin->strtab_section); if (bin->imports_by_ord) { for (i = 0; i<bin->imports_by_ord_size; i++) { free (bin->imports_by_ord[i]); } free (bin->imports_by_ord); } if (bin->symbols_by_ord) { for (i = 0; i<bin->symbols_by_ord_size; i++) { free (bin->symbols_by_ord[i]); } free (bin->symbols_by_ord); } r_buf_free (bin->b); if (bin->g_symbols != bin->phdr_symbols) { R_FREE (bin->phdr_symbols); } if (bin->g_imports != bin->phdr_imports) { R_FREE (bin->phdr_imports); } R_FREE (bin->g_sections); R_FREE (bin->g_symbols); R_FREE (bin->g_imports); free (bin); return NULL; } ELFOBJ* Elf_(r_bin_elf_new)(const char* file, bool verbose) { ut8 *buf; int size; ELFOBJ *bin = R_NEW0 (ELFOBJ); if (!bin) { return NULL; } memset (bin, 0, sizeof (ELFOBJ)); bin->file = file; if (!(buf = (ut8*)r_file_slurp (file, &size))) { return Elf_(r_bin_elf_free) (bin); } bin->size = size; bin->verbose = verbose; bin->b = r_buf_new (); if (!r_buf_set_bytes (bin->b, buf, bin->size)) { free (buf); return Elf_(r_bin_elf_free) (bin); } if (!elf_init (bin)) { free (buf); return Elf_(r_bin_elf_free) (bin); } free (buf); return bin; } ELFOBJ* Elf_(r_bin_elf_new_buf)(RBuffer *buf, bool verbose) { ELFOBJ *bin = R_NEW0 (ELFOBJ); bin->kv = sdb_new0 (); bin->b = r_buf_new (); bin->size = (ut32)buf->length; bin->verbose = verbose; if (!r_buf_set_bytes (bin->b, buf->buf, buf->length)) { return Elf_(r_bin_elf_free) (bin); } if (!elf_init (bin)) { return Elf_(r_bin_elf_free) (bin); } return bin; } static int is_in_pphdr (Elf_(Phdr) *p, ut64 addr) { return addr >= p->p_offset && addr < p->p_offset + p->p_memsz; } static int is_in_vphdr (Elf_(Phdr) *p, ut64 addr) { return addr >= p->p_vaddr && addr < p->p_vaddr + p->p_memsz; } /* converts a physical address to the virtual address, looking * at the program headers in the binary bin */ ut64 Elf_(r_bin_elf_p2v) (ELFOBJ *bin, ut64 paddr) { int i; if (!bin) return 0; if (!bin->phdr) { if (bin->ehdr.e_type == ET_REL) { return bin->baddr + paddr; } return paddr; } for (i = 0; i < bin->ehdr.e_phnum; ++i) { Elf_(Phdr) *p = &bin->phdr[i]; if (!p) { break; } if (p->p_type == PT_LOAD && is_in_pphdr (p, paddr)) { if (!p->p_vaddr && !p->p_offset) { continue; } return p->p_vaddr + paddr - p->p_offset; } } return paddr; } /* converts a virtual address to the relative physical address, looking * at the program headers in the binary bin */ ut64 Elf_(r_bin_elf_v2p) (ELFOBJ *bin, ut64 vaddr) { int i; if (!bin) { return 0; } if (!bin->phdr) { if (bin->ehdr.e_type == ET_REL) { return vaddr - bin->baddr; } return vaddr; } for (i = 0; i < bin->ehdr.e_phnum; ++i) { Elf_(Phdr) *p = &bin->phdr[i]; if (!p) { break; } if (p->p_type == PT_LOAD && is_in_vphdr (p, vaddr)) { if (!p->p_offset && !p->p_vaddr) { continue; } return p->p_offset + vaddr - p->p_vaddr; } } return vaddr; }
static Sdb *store_versioninfo_gnu_verdef(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { const char *section_name = ""; const char *link_section_name = ""; char *end = NULL; Elf_(Shdr) *link_shdr = NULL; ut8 dfs[sizeof (Elf_(Verdef))] = {0}; Sdb *sdb; int cnt, i; if (shdr->sh_link > bin->ehdr.e_shnum) { return false; } link_shdr = &bin->shdr[shdr->sh_link]; if (shdr->sh_size < 1) { return false; } Elf_(Verdef) *defs = calloc (shdr->sh_size, sizeof (char)); if (!defs) { return false; } if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (link_shdr && bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } if (!defs) { bprintf ("Warning: Cannot allocate memory (Check Elf_(Verdef))\n"); return NULL; } sdb = sdb_new0 (); end = (char *)defs + shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); for (cnt = 0, i = 0; i >= 0 && cnt < shdr->sh_info && ((char *)defs + i < end); ++cnt) { Sdb *sdb_verdef = sdb_new0 (); char *vstart = ((char*)defs) + i; char key[32] = {0}; Elf_(Verdef) *verdef = (Elf_(Verdef)*)vstart; Elf_(Verdaux) aux = {0}; int j = 0; int isum = 0; r_buf_read_at (bin->b, shdr->sh_offset + i, dfs, sizeof (Elf_(Verdef))); verdef->vd_version = READ16 (dfs, j) verdef->vd_flags = READ16 (dfs, j) verdef->vd_ndx = READ16 (dfs, j) verdef->vd_cnt = READ16 (dfs, j) verdef->vd_hash = READ32 (dfs, j) verdef->vd_aux = READ32 (dfs, j) verdef->vd_next = READ32 (dfs, j) int vdaux = verdef->vd_aux; if (vdaux < 1) { sdb_free (sdb_verdef); goto out_error; } vstart += vdaux; if (vstart > end || vstart + sizeof (Elf_(Verdaux)) > end) { sdb_free (sdb_verdef); goto out_error; } j = 0; aux.vda_name = READ32 (vstart, j) aux.vda_next = READ32 (vstart, j) isum = i + verdef->vd_aux; if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); goto out_error; } sdb_num_set (sdb_verdef, "idx", i, 0); sdb_num_set (sdb_verdef, "vd_version", verdef->vd_version, 0); sdb_num_set (sdb_verdef, "vd_ndx", verdef->vd_ndx, 0); sdb_num_set (sdb_verdef, "vd_cnt", verdef->vd_cnt, 0); sdb_set (sdb_verdef, "vda_name", &bin->dynstr[aux.vda_name], 0); sdb_set (sdb_verdef, "flags", get_ver_flags (verdef->vd_flags), 0); for (j = 1; j < verdef->vd_cnt; ++j) { int k; Sdb *sdb_parent = sdb_new0 (); isum += aux.vda_next; vstart += aux.vda_next; if (vstart > end || vstart + sizeof(Elf_(Verdaux)) > end) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } k = 0; aux.vda_name = READ32 (vstart, k) aux.vda_next = READ32 (vstart, k) if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } sdb_num_set (sdb_parent, "idx", isum, 0); sdb_num_set (sdb_parent, "parent", j, 0); sdb_set (sdb_parent, "vda_name", &bin->dynstr[aux.vda_name], 0); snprintf (key, sizeof (key), "parent%d", j - 1); sdb_ns_set (sdb_verdef, key, sdb_parent); } snprintf (key, sizeof (key), "verdef%d", cnt); sdb_ns_set (sdb, key, sdb_verdef); if (!verdef->vd_next) { sdb_free (sdb_verdef); goto out_error; } if ((st32)verdef->vd_next < 1) { eprintf ("Warning: Invalid vd_next in the ELF version\n"); break; } i += verdef->vd_next; } free (defs); return sdb; out_error: free (defs); sdb_free (sdb); return NULL; }
static Sdb *store_versioninfo_gnu_verdef(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { const char *section_name = ""; const char *link_section_name = ""; char *end = NULL; Elf_(Shdr) *link_shdr = NULL; ut8 dfs[sizeof (Elf_(Verdef))] = {0}; Sdb *sdb; int cnt, i; if (shdr->sh_link > bin->ehdr.e_shnum) { return false; } link_shdr = &bin->shdr[shdr->sh_link]; if (shdr->sh_size < 1 || shdr->sh_size > SIZE_MAX) { return false; } Elf_(Verdef) *defs = calloc (shdr->sh_size, sizeof (char)); if (!defs) { return false; } if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (link_shdr && bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } if (!defs) { bprintf ("Warning: Cannot allocate memory (Check Elf_(Verdef))\n"); return NULL; } sdb = sdb_new0 (); end = (char *)defs + shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); for (cnt = 0, i = 0; i >= 0 && cnt < shdr->sh_info && ((char *)defs + i < end); ++cnt) { Sdb *sdb_verdef = sdb_new0 (); char *vstart = ((char*)defs) + i; char key[32] = {0}; Elf_(Verdef) *verdef = (Elf_(Verdef)*)vstart; Elf_(Verdaux) aux = {0}; int j = 0; int isum = 0; r_buf_read_at (bin->b, shdr->sh_offset + i, dfs, sizeof (Elf_(Verdef))); verdef->vd_version = READ16 (dfs, j) verdef->vd_flags = READ16 (dfs, j) verdef->vd_ndx = READ16 (dfs, j) verdef->vd_cnt = READ16 (dfs, j) verdef->vd_hash = READ32 (dfs, j) verdef->vd_aux = READ32 (dfs, j) verdef->vd_next = READ32 (dfs, j) int vdaux = verdef->vd_aux; if (vdaux < 1) { sdb_free (sdb_verdef); goto out_error; } vstart += vdaux; if (vstart > end || vstart + sizeof (Elf_(Verdaux)) > end) { sdb_free (sdb_verdef); goto out_error; } j = 0; aux.vda_name = READ32 (vstart, j) aux.vda_next = READ32 (vstart, j) isum = i + verdef->vd_aux; if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); goto out_error; } sdb_num_set (sdb_verdef, "idx", i, 0); sdb_num_set (sdb_verdef, "vd_version", verdef->vd_version, 0); sdb_num_set (sdb_verdef, "vd_ndx", verdef->vd_ndx, 0); sdb_num_set (sdb_verdef, "vd_cnt", verdef->vd_cnt, 0); sdb_set (sdb_verdef, "vda_name", &bin->dynstr[aux.vda_name], 0); sdb_set (sdb_verdef, "flags", get_ver_flags (verdef->vd_flags), 0); for (j = 1; j < verdef->vd_cnt; ++j) { int k; Sdb *sdb_parent = sdb_new0 (); isum += aux.vda_next; vstart += aux.vda_next; if (vstart > end || vstart + sizeof(Elf_(Verdaux)) > end) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } k = 0; aux.vda_name = READ32 (vstart, k) aux.vda_next = READ32 (vstart, k) if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } sdb_num_set (sdb_parent, "idx", isum, 0); sdb_num_set (sdb_parent, "parent", j, 0); sdb_set (sdb_parent, "vda_name", &bin->dynstr[aux.vda_name], 0); snprintf (key, sizeof (key), "parent%d", j - 1); sdb_ns_set (sdb_verdef, key, sdb_parent); } snprintf (key, sizeof (key), "verdef%d", cnt); sdb_ns_set (sdb, key, sdb_verdef); if (!verdef->vd_next) { sdb_free (sdb_verdef); goto out_error; } if ((st32)verdef->vd_next < 1) { eprintf ("Warning: Invalid vd_next in the ELF version\n"); break; } i += verdef->vd_next; } free (defs); return sdb; out_error: free (defs); sdb_free (sdb); return NULL; }
{'added': [(708, '\tif (shdr->sh_size < 1 || shdr->sh_size > SIZE_MAX) {'), (840, '\tif (shdr->sh_size < 1 || shdr->sh_size > SIZE_MAX) {')], 'deleted': [(708, '\tif (shdr->sh_size < 1) {'), (840, '\tif (shdr->sh_size < 1) {')]}
2
2
2,830
21,469
120
975
23
https://github.com/radare/radare2
CVE-2017-16357
CWE-119
1,668
arp_tables.c
C
check_compat_entry_size_and_hooks
/* * Packet matching code for ARP packets. * * Based heavily, if not almost entirely, upon ip_tables.c framework. * * Some ARP specific bits are: * * Copyright (C) 2002 David S. Miller (davem@redhat.com) * Copyright (C) 2006-2009 Patrick McHardy <kaber@trash.net> * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/capability.h> #include <linux/if_arp.h> #include <linux/kmod.h> #include <linux/vmalloc.h> #include <linux/proc_fs.h> #include <linux/module.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/err.h> #include <net/compat.h> #include <net/sock.h> #include <asm/uaccess.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_arp/arp_tables.h> #include "../../netfilter/xt_repldata.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); MODULE_DESCRIPTION("arptables core"); /*#define DEBUG_ARP_TABLES*/ /*#define DEBUG_ARP_TABLES_USER*/ #ifdef DEBUG_ARP_TABLES #define dprintf(format, args...) pr_debug(format, ## args) #else #define dprintf(format, args...) #endif #ifdef DEBUG_ARP_TABLES_USER #define duprintf(format, args...) pr_debug(format, ## args) #else #define duprintf(format, args...) #endif #ifdef CONFIG_NETFILTER_DEBUG #define ARP_NF_ASSERT(x) WARN_ON(!(x)) #else #define ARP_NF_ASSERT(x) #endif void *arpt_alloc_initial_table(const struct xt_table *info) { return xt_alloc_initial_table(arpt, ARPT); } EXPORT_SYMBOL_GPL(arpt_alloc_initial_table); static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap, const char *hdr_addr, int len) { int i, ret; if (len > ARPT_DEV_ADDR_LEN_MAX) len = ARPT_DEV_ADDR_LEN_MAX; ret = 0; for (i = 0; i < len; i++) ret |= (hdr_addr[i] ^ ap->addr[i]) & ap->mask[i]; return ret != 0; } /* * Unfortunately, _b and _mask are not aligned to an int (or long int) * Some arches dont care, unrolling the loop is a win on them. * For other arches, we only have a 16bit alignement. */ static unsigned long ifname_compare(const char *_a, const char *_b, const char *_mask) { #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS unsigned long ret = ifname_compare_aligned(_a, _b, _mask); #else unsigned long ret = 0; const u16 *a = (const u16 *)_a; const u16 *b = (const u16 *)_b; const u16 *mask = (const u16 *)_mask; int i; for (i = 0; i < IFNAMSIZ/sizeof(u16); i++) ret |= (a[i] ^ b[i]) & mask[i]; #endif return ret; } /* Returns whether packet matches rule or not. */ static inline int arp_packet_match(const struct arphdr *arphdr, struct net_device *dev, const char *indev, const char *outdev, const struct arpt_arp *arpinfo) { const char *arpptr = (char *)(arphdr + 1); const char *src_devaddr, *tgt_devaddr; __be32 src_ipaddr, tgt_ipaddr; long ret; #define FWINV(bool, invflg) ((bool) ^ !!(arpinfo->invflags & (invflg))) if (FWINV((arphdr->ar_op & arpinfo->arpop_mask) != arpinfo->arpop, ARPT_INV_ARPOP)) { dprintf("ARP operation field mismatch.\n"); dprintf("ar_op: %04x info->arpop: %04x info->arpop_mask: %04x\n", arphdr->ar_op, arpinfo->arpop, arpinfo->arpop_mask); return 0; } if (FWINV((arphdr->ar_hrd & arpinfo->arhrd_mask) != arpinfo->arhrd, ARPT_INV_ARPHRD)) { dprintf("ARP hardware address format mismatch.\n"); dprintf("ar_hrd: %04x info->arhrd: %04x info->arhrd_mask: %04x\n", arphdr->ar_hrd, arpinfo->arhrd, arpinfo->arhrd_mask); return 0; } if (FWINV((arphdr->ar_pro & arpinfo->arpro_mask) != arpinfo->arpro, ARPT_INV_ARPPRO)) { dprintf("ARP protocol address format mismatch.\n"); dprintf("ar_pro: %04x info->arpro: %04x info->arpro_mask: %04x\n", arphdr->ar_pro, arpinfo->arpro, arpinfo->arpro_mask); return 0; } if (FWINV((arphdr->ar_hln & arpinfo->arhln_mask) != arpinfo->arhln, ARPT_INV_ARPHLN)) { dprintf("ARP hardware address length mismatch.\n"); dprintf("ar_hln: %02x info->arhln: %02x info->arhln_mask: %02x\n", arphdr->ar_hln, arpinfo->arhln, arpinfo->arhln_mask); return 0; } src_devaddr = arpptr; arpptr += dev->addr_len; memcpy(&src_ipaddr, arpptr, sizeof(u32)); arpptr += sizeof(u32); tgt_devaddr = arpptr; arpptr += dev->addr_len; memcpy(&tgt_ipaddr, arpptr, sizeof(u32)); if (FWINV(arp_devaddr_compare(&arpinfo->src_devaddr, src_devaddr, dev->addr_len), ARPT_INV_SRCDEVADDR) || FWINV(arp_devaddr_compare(&arpinfo->tgt_devaddr, tgt_devaddr, dev->addr_len), ARPT_INV_TGTDEVADDR)) { dprintf("Source or target device address mismatch.\n"); return 0; } if (FWINV((src_ipaddr & arpinfo->smsk.s_addr) != arpinfo->src.s_addr, ARPT_INV_SRCIP) || FWINV(((tgt_ipaddr & arpinfo->tmsk.s_addr) != arpinfo->tgt.s_addr), ARPT_INV_TGTIP)) { dprintf("Source or target IP address mismatch.\n"); dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n", &src_ipaddr, &arpinfo->smsk.s_addr, &arpinfo->src.s_addr, arpinfo->invflags & ARPT_INV_SRCIP ? " (INV)" : ""); dprintf("TGT: %pI4 Mask: %pI4 Target: %pI4.%s\n", &tgt_ipaddr, &arpinfo->tmsk.s_addr, &arpinfo->tgt.s_addr, arpinfo->invflags & ARPT_INV_TGTIP ? " (INV)" : ""); return 0; } /* Look for ifname matches. */ ret = ifname_compare(indev, arpinfo->iniface, arpinfo->iniface_mask); if (FWINV(ret != 0, ARPT_INV_VIA_IN)) { dprintf("VIA in mismatch (%s vs %s).%s\n", indev, arpinfo->iniface, arpinfo->invflags & ARPT_INV_VIA_IN ? " (INV)" : ""); return 0; } ret = ifname_compare(outdev, arpinfo->outiface, arpinfo->outiface_mask); if (FWINV(ret != 0, ARPT_INV_VIA_OUT)) { dprintf("VIA out mismatch (%s vs %s).%s\n", outdev, arpinfo->outiface, arpinfo->invflags & ARPT_INV_VIA_OUT ? " (INV)" : ""); return 0; } return 1; #undef FWINV } static inline int arp_checkentry(const struct arpt_arp *arp) { if (arp->flags & ~ARPT_F_MASK) { duprintf("Unknown flag bits set: %08X\n", arp->flags & ~ARPT_F_MASK); return 0; } if (arp->invflags & ~ARPT_INV_MASK) { duprintf("Unknown invflag bits set: %08X\n", arp->invflags & ~ARPT_INV_MASK); return 0; } return 1; } static unsigned int arpt_error(struct sk_buff *skb, const struct xt_action_param *par) { net_err_ratelimited("arp_tables: error: '%s'\n", (const char *)par->targinfo); return NF_DROP; } static inline const struct xt_entry_target * arpt_get_target_c(const struct arpt_entry *e) { return arpt_get_target((struct arpt_entry *)e); } static inline struct arpt_entry * get_entry(const void *base, unsigned int offset) { return (struct arpt_entry *)(base + offset); } static inline struct arpt_entry *arpt_next_entry(const struct arpt_entry *entry) { return (void *)entry + entry->next_offset; } unsigned int arpt_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table) { unsigned int hook = state->hook; static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); unsigned int verdict = NF_DROP; const struct arphdr *arp; struct arpt_entry *e, **jumpstack; const char *indev, *outdev; const void *table_base; unsigned int cpu, stackidx = 0; const struct xt_table_info *private; struct xt_action_param acpar; unsigned int addend; if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) return NF_DROP; indev = state->in ? state->in->name : nulldevname; outdev = state->out ? state->out->name : nulldevname; local_bh_disable(); addend = xt_write_recseq_begin(); private = table->private; cpu = smp_processor_id(); /* * Ensure we load private-> members after we've fetched the base * pointer. */ smp_read_barrier_depends(); table_base = private->entries; jumpstack = (struct arpt_entry **)private->jumpstack[cpu]; /* No TEE support for arptables, so no need to switch to alternate * stack. All targets that reenter must return absolute verdicts. */ e = get_entry(table_base, private->hook_entry[hook]); acpar.net = state->net; acpar.in = state->in; acpar.out = state->out; acpar.hooknum = hook; acpar.family = NFPROTO_ARP; acpar.hotdrop = false; arp = arp_hdr(skb); do { const struct xt_entry_target *t; struct xt_counters *counter; if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) { e = arpt_next_entry(e); continue; } counter = xt_get_this_cpu_counter(&e->counters); ADD_COUNTER(*counter, arp_hdr_len(skb->dev), 1); t = arpt_get_target_c(e); /* Standard target? */ if (!t->u.kernel.target->target) { int v; v = ((struct xt_standard_target *)t)->verdict; if (v < 0) { /* Pop from stack? */ if (v != XT_RETURN) { verdict = (unsigned int)(-v) - 1; break; } if (stackidx == 0) { e = get_entry(table_base, private->underflow[hook]); } else { e = jumpstack[--stackidx]; e = arpt_next_entry(e); } continue; } if (table_base + v != arpt_next_entry(e)) { jumpstack[stackidx++] = e; } e = get_entry(table_base, v); continue; } acpar.target = t->u.kernel.target; acpar.targinfo = t->data; verdict = t->u.kernel.target->target(skb, &acpar); /* Target might have changed stuff. */ arp = arp_hdr(skb); if (verdict == XT_CONTINUE) e = arpt_next_entry(e); else /* Verdict */ break; } while (!acpar.hotdrop); xt_write_recseq_end(addend); local_bh_enable(); if (acpar.hotdrop) return NF_DROP; else return verdict; } /* All zeroes == unconditional rule. */ static inline bool unconditional(const struct arpt_arp *arp) { static const struct arpt_arp uncond; return memcmp(arp, &uncond, sizeof(uncond)) == 0; } /* Figures out from what hook each rule can be called: returns 0 if * there are loops. Puts hook bitmask in comefrom. */ static int mark_source_chains(const struct xt_table_info *newinfo, unsigned int valid_hooks, void *entry0) { unsigned int hook; /* No recursion; use packet counter to save back ptrs (reset * to 0 as we leave), and comefrom to save source hook bitmask. */ for (hook = 0; hook < NF_ARP_NUMHOOKS; hook++) { unsigned int pos = newinfo->hook_entry[hook]; struct arpt_entry *e = (struct arpt_entry *)(entry0 + pos); if (!(valid_hooks & (1 << hook))) continue; /* Set initial back pointer. */ e->counters.pcnt = pos; for (;;) { const struct xt_standard_target *t = (void *)arpt_get_target_c(e); int visited = e->comefrom & (1 << hook); if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) { pr_notice("arptables: loop hook %u pos %u %08X.\n", hook, pos, e->comefrom); return 0; } e->comefrom |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS)); /* Unconditional return/END. */ if ((e->target_offset == sizeof(struct arpt_entry) && (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < 0 && unconditional(&e->arp)) || visited) { unsigned int oldpos, size; if ((strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < -NF_MAX_VERDICT - 1) { duprintf("mark_source_chains: bad " "negative verdict (%i)\n", t->verdict); return 0; } /* Return: backtrack through the last * big jump. */ do { e->comefrom ^= (1<<NF_ARP_NUMHOOKS); oldpos = pos; pos = e->counters.pcnt; e->counters.pcnt = 0; /* We're at the start. */ if (pos == oldpos) goto next; e = (struct arpt_entry *) (entry0 + pos); } while (oldpos == pos + e->next_offset); /* Move along one */ size = e->next_offset; e = (struct arpt_entry *) (entry0 + pos + size); e->counters.pcnt = pos; pos += size; } else { int newpos = t->verdict; if (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0 && newpos >= 0) { if (newpos > newinfo->size - sizeof(struct arpt_entry)) { duprintf("mark_source_chains: " "bad verdict (%i)\n", newpos); return 0; } /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; } e = (struct arpt_entry *) (entry0 + newpos); e->counters.pcnt = pos; pos = newpos; } } next: duprintf("Finished chain %u\n", hook); } return 1; } static inline int check_entry(const struct arpt_entry *e) { const struct xt_entry_target *t; if (!arp_checkentry(&e->arp)) return -EINVAL; if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset) return -EINVAL; t = arpt_get_target_c(e); if (e->target_offset + t->u.target_size > e->next_offset) return -EINVAL; return 0; } static inline int check_target(struct arpt_entry *e, const char *name) { struct xt_entry_target *t = arpt_get_target(e); int ret; struct xt_tgchk_param par = { .table = name, .entryinfo = e, .target = t->u.kernel.target, .targinfo = t->data, .hook_mask = e->comefrom, .family = NFPROTO_ARP, }; ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false); if (ret < 0) { duprintf("arp_tables: check failed for `%s'.\n", t->u.kernel.target->name); return ret; } return 0; } static inline int find_check_entry(struct arpt_entry *e, const char *name, unsigned int size) { struct xt_entry_target *t; struct xt_target *target; int ret; e->counters.pcnt = xt_percpu_counter_alloc(); if (IS_ERR_VALUE(e->counters.pcnt)) return -ENOMEM; t = arpt_get_target(e); target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("find_check_entry: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto out; } t->u.kernel.target = target; ret = check_target(e, name); if (ret) goto err; return 0; err: module_put(t->u.kernel.target->me); out: xt_percpu_counter_free(e->counters.pcnt); return ret; } static bool check_underflow(const struct arpt_entry *e) { const struct xt_entry_target *t; unsigned int verdict; if (!unconditional(&e->arp)) return false; t = arpt_get_target_c(e); if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) return false; verdict = ((struct xt_standard_target *)t)->verdict; verdict = -verdict - 1; return verdict == NF_DROP || verdict == NF_ACCEPT; } static inline int check_entry_size_and_hooks(struct arpt_entry *e, struct xt_table_info *newinfo, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int valid_hooks) { unsigned int h; int err; if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 || (unsigned char *)e + sizeof(struct arpt_entry) >= limit) { duprintf("Bad offset %p\n", e); return -EINVAL; } if (e->next_offset < sizeof(struct arpt_entry) + sizeof(struct xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } err = check_entry(e); if (err) return err; /* Check hooks & underflows */ for (h = 0; h < NF_ARP_NUMHOOKS; h++) { if (!(valid_hooks & (1 << h))) continue; if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) { if (!check_underflow(e)) { pr_err("Underflows must be unconditional and " "use the STANDARD target with " "ACCEPT/DROP\n"); return -EINVAL; } newinfo->underflow[h] = underflows[h]; } } /* Clear counters and comefrom */ e->counters = ((struct xt_counters) { 0, 0 }); e->comefrom = 0; return 0; } static inline void cleanup_entry(struct arpt_entry *e) { struct xt_tgdtor_param par; struct xt_entry_target *t; t = arpt_get_target(e); par.target = t->u.kernel.target; par.targinfo = t->data; par.family = NFPROTO_ARP; if (par.target->destroy != NULL) par.target->destroy(&par); module_put(par.target->me); xt_percpu_counter_free(e->counters.pcnt); } /* Checks and translates the user-supplied table segment (held in * newinfo). */ static int translate_table(struct xt_table_info *newinfo, void *entry0, const struct arpt_replace *repl) { struct arpt_entry *iter; unsigned int i; int ret = 0; newinfo->size = repl->size; newinfo->number = repl->num_entries; /* Init all hooks to impossible value. */ for (i = 0; i < NF_ARP_NUMHOOKS; i++) { newinfo->hook_entry[i] = 0xFFFFFFFF; newinfo->underflow[i] = 0xFFFFFFFF; } duprintf("translate_table: size %u\n", newinfo->size); i = 0; /* Walk through entries, checking offsets. */ xt_entry_foreach(iter, entry0, newinfo->size) { ret = check_entry_size_and_hooks(iter, newinfo, entry0, entry0 + repl->size, repl->hook_entry, repl->underflow, repl->valid_hooks); if (ret != 0) break; ++i; if (strcmp(arpt_get_target(iter)->u.user.name, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret); if (ret != 0) return ret; if (i != repl->num_entries) { duprintf("translate_table: %u not %u entries\n", i, repl->num_entries); return -EINVAL; } /* Check hooks all assigned */ for (i = 0; i < NF_ARP_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(repl->valid_hooks & (1 << i))) continue; if (newinfo->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, repl->hook_entry[i]); return -EINVAL; } if (newinfo->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, repl->underflow[i]); return -EINVAL; } } if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) { duprintf("Looping hook\n"); return -ELOOP; } /* Finally, each sanity check must pass */ i = 0; xt_entry_foreach(iter, entry0, newinfo->size) { ret = find_check_entry(iter, repl->name, repl->size); if (ret != 0) break; ++i; } if (ret != 0) { xt_entry_foreach(iter, entry0, newinfo->size) { if (i-- == 0) break; cleanup_entry(iter); } return ret; } return ret; } static void get_counters(const struct xt_table_info *t, struct xt_counters counters[]) { struct arpt_entry *iter; unsigned int cpu; unsigned int i; for_each_possible_cpu(cpu) { seqcount_t *s = &per_cpu(xt_recseq, cpu); i = 0; xt_entry_foreach(iter, t->entries, t->size) { struct xt_counters *tmp; u64 bcnt, pcnt; unsigned int start; tmp = xt_get_per_cpu_counter(&iter->counters, cpu); do { start = read_seqcount_begin(s); bcnt = tmp->bcnt; pcnt = tmp->pcnt; } while (read_seqcount_retry(s, start)); ADD_COUNTER(counters[i], bcnt, pcnt); ++i; } } } static struct xt_counters *alloc_counters(const struct xt_table *table) { unsigned int countersize; struct xt_counters *counters; const struct xt_table_info *private = table->private; /* We need atomic snapshot of counters: rest doesn't change * (other than comefrom, which userspace doesn't care * about). */ countersize = sizeof(struct xt_counters) * private->number; counters = vzalloc(countersize); if (counters == NULL) return ERR_PTR(-ENOMEM); get_counters(private, counters); return counters; } static int copy_entries_to_user(unsigned int total_size, const struct xt_table *table, void __user *userptr) { unsigned int off, num; const struct arpt_entry *e; struct xt_counters *counters; struct xt_table_info *private = table->private; int ret = 0; void *loc_cpu_entry; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); loc_cpu_entry = private->entries; /* ... then copy entire thing ... */ if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { ret = -EFAULT; goto free_counters; } /* FIXME: use iterator macros --RR */ /* ... then go back and fix counters and names */ for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ const struct xt_entry_target *t; e = (struct arpt_entry *)(loc_cpu_entry + off); if (copy_to_user(userptr + off + offsetof(struct arpt_entry, counters), &counters[num], sizeof(counters[num])) != 0) { ret = -EFAULT; goto free_counters; } t = arpt_get_target_c(e); if (copy_to_user(userptr + off + e->target_offset + offsetof(struct xt_entry_target, u.user.name), t->u.kernel.target->name, strlen(t->u.kernel.target->name)+1) != 0) { ret = -EFAULT; goto free_counters; } } free_counters: vfree(counters); return ret; } #ifdef CONFIG_COMPAT static void compat_standard_from_user(void *dst, const void *src) { int v = *(compat_int_t *)src; if (v > 0) v += xt_compat_calc_jump(NFPROTO_ARP, v); memcpy(dst, &v, sizeof(v)); } static int compat_standard_to_user(void __user *dst, const void *src) { compat_int_t cv = *(int *)src; if (cv > 0) cv -= xt_compat_calc_jump(NFPROTO_ARP, cv); return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; } static int compat_calc_entry(const struct arpt_entry *e, const struct xt_table_info *info, const void *base, struct xt_table_info *newinfo) { const struct xt_entry_target *t; unsigned int entry_offset; int off, i, ret; off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); entry_offset = (void *)e - base; t = arpt_get_target_c(e); off += xt_compat_target_offset(t->u.kernel.target); newinfo->size -= off; ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); if (ret) return ret; for (i = 0; i < NF_ARP_NUMHOOKS; i++) { if (info->hook_entry[i] && (e < (struct arpt_entry *)(base + info->hook_entry[i]))) newinfo->hook_entry[i] -= off; if (info->underflow[i] && (e < (struct arpt_entry *)(base + info->underflow[i]))) newinfo->underflow[i] -= off; } return 0; } static int compat_table_info(const struct xt_table_info *info, struct xt_table_info *newinfo) { struct arpt_entry *iter; const void *loc_cpu_entry; int ret; if (!newinfo || !info) return -EINVAL; /* we dont care about newinfo->entries */ memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); newinfo->initial_entries = 0; loc_cpu_entry = info->entries; xt_compat_init_offsets(NFPROTO_ARP, info->number); xt_entry_foreach(iter, loc_cpu_entry, info->size) { ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); if (ret != 0) return ret; } return 0; } #endif static int get_info(struct net *net, void __user *user, const int *len, int compat) { char name[XT_TABLE_MAXNAMELEN]; struct xt_table *t; int ret; if (*len != sizeof(struct arpt_getinfo)) { duprintf("length %u != %Zu\n", *len, sizeof(struct arpt_getinfo)); return -EINVAL; } if (copy_from_user(name, user, sizeof(name)) != 0) return -EFAULT; name[XT_TABLE_MAXNAMELEN-1] = '\0'; #ifdef CONFIG_COMPAT if (compat) xt_compat_lock(NFPROTO_ARP); #endif t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name), "arptable_%s", name); if (!IS_ERR_OR_NULL(t)) { struct arpt_getinfo info; const struct xt_table_info *private = t->private; #ifdef CONFIG_COMPAT struct xt_table_info tmp; if (compat) { ret = compat_table_info(private, &tmp); xt_compat_flush_offsets(NFPROTO_ARP); private = &tmp; } #endif memset(&info, 0, sizeof(info)); info.valid_hooks = t->valid_hooks; memcpy(info.hook_entry, private->hook_entry, sizeof(info.hook_entry)); memcpy(info.underflow, private->underflow, sizeof(info.underflow)); info.num_entries = private->number; info.size = private->size; strcpy(info.name, name); if (copy_to_user(user, &info, *len) != 0) ret = -EFAULT; else ret = 0; xt_table_unlock(t); module_put(t->me); } else ret = t ? PTR_ERR(t) : -ENOENT; #ifdef CONFIG_COMPAT if (compat) xt_compat_unlock(NFPROTO_ARP); #endif return ret; } static int get_entries(struct net *net, struct arpt_get_entries __user *uptr, const int *len) { int ret; struct arpt_get_entries get; struct xt_table *t; if (*len < sizeof(get)) { duprintf("get_entries: %u < %Zu\n", *len, sizeof(get)); return -EINVAL; } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; if (*len != sizeof(struct arpt_get_entries) + get.size) { duprintf("get_entries: %u != %Zu\n", *len, sizeof(struct arpt_get_entries) + get.size); return -EINVAL; } t = xt_find_table_lock(net, NFPROTO_ARP, get.name); if (!IS_ERR_OR_NULL(t)) { const struct xt_table_info *private = t->private; duprintf("t->private->number = %u\n", private->number); if (get.size == private->size) ret = copy_entries_to_user(private->size, t, uptr->entrytable); else { duprintf("get_entries: I've got %u not %u!\n", private->size, get.size); ret = -EAGAIN; } module_put(t->me); xt_table_unlock(t); } else ret = t ? PTR_ERR(t) : -ENOENT; return ret; } static int __do_replace(struct net *net, const char *name, unsigned int valid_hooks, struct xt_table_info *newinfo, unsigned int num_counters, void __user *counters_ptr) { int ret; struct xt_table *t; struct xt_table_info *oldinfo; struct xt_counters *counters; void *loc_cpu_old_entry; struct arpt_entry *iter; ret = 0; counters = vzalloc(num_counters * sizeof(struct xt_counters)); if (!counters) { ret = -ENOMEM; goto out; } t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name), "arptable_%s", name); if (IS_ERR_OR_NULL(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free_newinfo_counters_untrans; } /* You lied! */ if (valid_hooks != t->valid_hooks) { duprintf("Valid hook crap: %08X vs %08X\n", valid_hooks, t->valid_hooks); ret = -EINVAL; goto put_module; } oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); if (!oldinfo) goto put_module; /* Update module usage count based on number of rules */ duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", oldinfo->number, oldinfo->initial_entries, newinfo->number); if ((oldinfo->number > oldinfo->initial_entries) || (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); if ((oldinfo->number > oldinfo->initial_entries) && (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); /* Get the old counters, and synchronize with replace */ get_counters(oldinfo, counters); /* Decrease module usage counts and free resource */ loc_cpu_old_entry = oldinfo->entries; xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size) cleanup_entry(iter); xt_free_table_info(oldinfo); if (copy_to_user(counters_ptr, counters, sizeof(struct xt_counters) * num_counters) != 0) { /* Silent error, can't fail, new table is already in place */ net_warn_ratelimited("arptables: counters copy to user failed while replacing table\n"); } vfree(counters); xt_table_unlock(t); return ret; put_module: module_put(t->me); xt_table_unlock(t); free_newinfo_counters_untrans: vfree(counters); out: return ret; } static int do_replace(struct net *net, const void __user *user, unsigned int len) { int ret; struct arpt_replace tmp; struct xt_table_info *newinfo; void *loc_cpu_entry; struct arpt_entry *iter; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; if (tmp.num_counters == 0) return -EINVAL; tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } ret = translate_table(newinfo, loc_cpu_entry, &tmp); if (ret != 0) goto free_newinfo; duprintf("arp_tables: Translated table\n"); ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, tmp.counters); if (ret) goto free_newinfo_untrans; return 0; free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) cleanup_entry(iter); free_newinfo: xt_free_table_info(newinfo); return ret; } static int do_add_counters(struct net *net, const void __user *user, unsigned int len, int compat) { unsigned int i; struct xt_counters_info tmp; struct xt_counters *paddc; unsigned int num_counters; const char *name; int size; void *ptmp; struct xt_table *t; const struct xt_table_info *private; int ret = 0; struct arpt_entry *iter; unsigned int addend; #ifdef CONFIG_COMPAT struct compat_xt_counters_info compat_tmp; if (compat) { ptmp = &compat_tmp; size = sizeof(struct compat_xt_counters_info); } else #endif { ptmp = &tmp; size = sizeof(struct xt_counters_info); } if (copy_from_user(ptmp, user, size) != 0) return -EFAULT; #ifdef CONFIG_COMPAT if (compat) { num_counters = compat_tmp.num_counters; name = compat_tmp.name; } else #endif { num_counters = tmp.num_counters; name = tmp.name; } if (len != size + num_counters * sizeof(struct xt_counters)) return -EINVAL; paddc = vmalloc(len - size); if (!paddc) return -ENOMEM; if (copy_from_user(paddc, user + size, len - size) != 0) { ret = -EFAULT; goto free; } t = xt_find_table_lock(net, NFPROTO_ARP, name); if (IS_ERR_OR_NULL(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free; } local_bh_disable(); private = t->private; if (private->number != num_counters) { ret = -EINVAL; goto unlock_up_free; } i = 0; addend = xt_write_recseq_begin(); xt_entry_foreach(iter, private->entries, private->size) { struct xt_counters *tmp; tmp = xt_get_this_cpu_counter(&iter->counters); ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt); ++i; } xt_write_recseq_end(addend); unlock_up_free: local_bh_enable(); xt_table_unlock(t); module_put(t->me); free: vfree(paddc); return ret; } #ifdef CONFIG_COMPAT static inline void compat_release_entry(struct compat_arpt_entry *e) { struct xt_entry_target *t; t = compat_arpt_get_target(e); module_put(t->u.kernel.target->me); } static inline int check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, struct xt_table_info *newinfo, unsigned int *size, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, const char *name) { struct xt_entry_target *t; struct xt_target *target; unsigned int entry_offset; int ret, off, h; duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 || (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) { duprintf("Bad offset %p, limit = %p\n", e, limit); return -EINVAL; } if (e->next_offset < sizeof(struct compat_arpt_entry) + sizeof(struct compat_xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } /* For purposes of check_entry casting the compat entry is fine */ ret = check_entry((struct arpt_entry *)e); if (ret) return ret; off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); entry_offset = (void *)e - (void *)base; t = compat_arpt_get_target(e); target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto out; } t->u.kernel.target = target; off += xt_compat_target_offset(target); *size += off; ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); if (ret) goto release_target; /* Check hooks & underflows */ for (h = 0; h < NF_ARP_NUMHOOKS; h++) { if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) newinfo->underflow[h] = underflows[h]; } /* Clear counters and comefrom */ memset(&e->counters, 0, sizeof(e->counters)); e->comefrom = 0; return 0; release_target: module_put(t->u.kernel.target->me); out: return ret; } static int compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr, unsigned int *size, const char *name, struct xt_table_info *newinfo, unsigned char *base) { struct xt_entry_target *t; struct xt_target *target; struct arpt_entry *de; unsigned int origsize; int ret, h; ret = 0; origsize = *size; de = (struct arpt_entry *)*dstptr; memcpy(de, e, sizeof(struct arpt_entry)); memcpy(&de->counters, &e->counters, sizeof(e->counters)); *dstptr += sizeof(struct arpt_entry); *size += sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); de->target_offset = e->target_offset - (origsize - *size); t = compat_arpt_get_target(e); target = t->u.kernel.target; xt_compat_target_from_user(t, dstptr, size); de->next_offset = e->next_offset - (origsize - *size); for (h = 0; h < NF_ARP_NUMHOOKS; h++) { if ((unsigned char *)de - base < newinfo->hook_entry[h]) newinfo->hook_entry[h] -= origsize - *size; if ((unsigned char *)de - base < newinfo->underflow[h]) newinfo->underflow[h] -= origsize - *size; } return ret; } static int translate_compat_table(const char *name, unsigned int valid_hooks, struct xt_table_info **pinfo, void **pentry0, unsigned int total_size, unsigned int number, unsigned int *hook_entries, unsigned int *underflows) { unsigned int i, j; struct xt_table_info *newinfo, *info; void *pos, *entry0, *entry1; struct compat_arpt_entry *iter0; struct arpt_entry *iter1; unsigned int size; int ret = 0; info = *pinfo; entry0 = *pentry0; size = total_size; info->number = number; /* Init all hooks to impossible value. */ for (i = 0; i < NF_ARP_NUMHOOKS; i++) { info->hook_entry[i] = 0xFFFFFFFF; info->underflow[i] = 0xFFFFFFFF; } duprintf("translate_compat_table: size %u\n", info->size); j = 0; xt_compat_lock(NFPROTO_ARP); xt_compat_init_offsets(NFPROTO_ARP, number); /* Walk through entries, checking offsets. */ xt_entry_foreach(iter0, entry0, total_size) { ret = check_compat_entry_size_and_hooks(iter0, info, &size, entry0, entry0 + total_size, hook_entries, underflows, name); if (ret != 0) goto out_unlock; ++j; } ret = -EINVAL; if (j != number) { duprintf("translate_compat_table: %u not %u entries\n", j, number); goto out_unlock; } /* Check hooks all assigned */ for (i = 0; i < NF_ARP_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(valid_hooks & (1 << i))) continue; if (info->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, hook_entries[i]); goto out_unlock; } if (info->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, underflows[i]); goto out_unlock; } } ret = -ENOMEM; newinfo = xt_alloc_table_info(size); if (!newinfo) goto out_unlock; newinfo->number = number; for (i = 0; i < NF_ARP_NUMHOOKS; i++) { newinfo->hook_entry[i] = info->hook_entry[i]; newinfo->underflow[i] = info->underflow[i]; } entry1 = newinfo->entries; pos = entry1; size = total_size; xt_entry_foreach(iter0, entry0, total_size) { ret = compat_copy_entry_from_user(iter0, &pos, &size, name, newinfo, entry1); if (ret != 0) break; } xt_compat_flush_offsets(NFPROTO_ARP); xt_compat_unlock(NFPROTO_ARP); if (ret) goto free_newinfo; ret = -ELOOP; if (!mark_source_chains(newinfo, valid_hooks, entry1)) goto free_newinfo; i = 0; xt_entry_foreach(iter1, entry1, newinfo->size) { iter1->counters.pcnt = xt_percpu_counter_alloc(); if (IS_ERR_VALUE(iter1->counters.pcnt)) { ret = -ENOMEM; break; } ret = check_target(iter1, name); if (ret != 0) { xt_percpu_counter_free(iter1->counters.pcnt); break; } ++i; if (strcmp(arpt_get_target(iter1)->u.user.name, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } if (ret) { /* * The first i matches need cleanup_entry (calls ->destroy) * because they had called ->check already. The other j-i * entries need only release. */ int skip = i; j -= i; xt_entry_foreach(iter0, entry0, newinfo->size) { if (skip-- > 0) continue; if (j-- == 0) break; compat_release_entry(iter0); } xt_entry_foreach(iter1, entry1, newinfo->size) { if (i-- == 0) break; cleanup_entry(iter1); } xt_free_table_info(newinfo); return ret; } *pinfo = newinfo; *pentry0 = entry1; xt_free_table_info(info); return 0; free_newinfo: xt_free_table_info(newinfo); out: xt_entry_foreach(iter0, entry0, total_size) { if (j-- == 0) break; compat_release_entry(iter0); } return ret; out_unlock: xt_compat_flush_offsets(NFPROTO_ARP); xt_compat_unlock(NFPROTO_ARP); goto out; } struct compat_arpt_replace { char name[XT_TABLE_MAXNAMELEN]; u32 valid_hooks; u32 num_entries; u32 size; u32 hook_entry[NF_ARP_NUMHOOKS]; u32 underflow[NF_ARP_NUMHOOKS]; u32 num_counters; compat_uptr_t counters; struct compat_arpt_entry entries[0]; }; static int compat_do_replace(struct net *net, void __user *user, unsigned int len) { int ret; struct compat_arpt_replace tmp; struct xt_table_info *newinfo; void *loc_cpu_entry; struct arpt_entry *iter; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.size >= INT_MAX / num_possible_cpus()) return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; if (tmp.num_counters == 0) return -EINVAL; tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } ret = translate_compat_table(tmp.name, tmp.valid_hooks, &newinfo, &loc_cpu_entry, tmp.size, tmp.num_entries, tmp.hook_entry, tmp.underflow); if (ret != 0) goto free_newinfo; duprintf("compat_do_replace: Translated table\n"); ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, compat_ptr(tmp.counters)); if (ret) goto free_newinfo_untrans; return 0; free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) cleanup_entry(iter); free_newinfo: xt_free_table_info(newinfo); return ret; } static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case ARPT_SO_SET_REPLACE: ret = compat_do_replace(sock_net(sk), user, len); break; case ARPT_SO_SET_ADD_COUNTERS: ret = do_add_counters(sock_net(sk), user, len, 1); break; default: duprintf("do_arpt_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr, compat_uint_t *size, struct xt_counters *counters, unsigned int i) { struct xt_entry_target *t; struct compat_arpt_entry __user *ce; u_int16_t target_offset, next_offset; compat_uint_t origsize; int ret; origsize = *size; ce = (struct compat_arpt_entry __user *)*dstptr; if (copy_to_user(ce, e, sizeof(struct arpt_entry)) != 0 || copy_to_user(&ce->counters, &counters[i], sizeof(counters[i])) != 0) return -EFAULT; *dstptr += sizeof(struct compat_arpt_entry); *size -= sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); target_offset = e->target_offset - (origsize - *size); t = arpt_get_target(e); ret = xt_compat_target_to_user(t, dstptr, size); if (ret) return ret; next_offset = e->next_offset - (origsize - *size); if (put_user(target_offset, &ce->target_offset) != 0 || put_user(next_offset, &ce->next_offset) != 0) return -EFAULT; return 0; } static int compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, void __user *userptr) { struct xt_counters *counters; const struct xt_table_info *private = table->private; void __user *pos; unsigned int size; int ret = 0; unsigned int i = 0; struct arpt_entry *iter; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); pos = userptr; size = total_size; xt_entry_foreach(iter, private->entries, total_size) { ret = compat_copy_entry_to_user(iter, &pos, &size, counters, i++); if (ret != 0) break; } vfree(counters); return ret; } struct compat_arpt_get_entries { char name[XT_TABLE_MAXNAMELEN]; compat_uint_t size; struct compat_arpt_entry entrytable[0]; }; static int compat_get_entries(struct net *net, struct compat_arpt_get_entries __user *uptr, int *len) { int ret; struct compat_arpt_get_entries get; struct xt_table *t; if (*len < sizeof(get)) { duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); return -EINVAL; } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; if (*len != sizeof(struct compat_arpt_get_entries) + get.size) { duprintf("compat_get_entries: %u != %zu\n", *len, sizeof(get) + get.size); return -EINVAL; } xt_compat_lock(NFPROTO_ARP); t = xt_find_table_lock(net, NFPROTO_ARP, get.name); if (!IS_ERR_OR_NULL(t)) { const struct xt_table_info *private = t->private; struct xt_table_info info; duprintf("t->private->number = %u\n", private->number); ret = compat_table_info(private, &info); if (!ret && get.size == info.size) { ret = compat_copy_entries_to_user(private->size, t, uptr->entrytable); } else if (!ret) { duprintf("compat_get_entries: I've got %u not %u!\n", private->size, get.size); ret = -EAGAIN; } xt_compat_flush_offsets(NFPROTO_ARP); module_put(t->me); xt_table_unlock(t); } else ret = t ? PTR_ERR(t) : -ENOENT; xt_compat_unlock(NFPROTO_ARP); return ret; } static int do_arpt_get_ctl(struct sock *, int, void __user *, int *); static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case ARPT_SO_GET_INFO: ret = get_info(sock_net(sk), user, len, 1); break; case ARPT_SO_GET_ENTRIES: ret = compat_get_entries(sock_net(sk), user, len); break; default: ret = do_arpt_get_ctl(sk, cmd, user, len); } return ret; } #endif static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case ARPT_SO_SET_REPLACE: ret = do_replace(sock_net(sk), user, len); break; case ARPT_SO_SET_ADD_COUNTERS: ret = do_add_counters(sock_net(sk), user, len, 0); break; default: duprintf("do_arpt_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case ARPT_SO_GET_INFO: ret = get_info(sock_net(sk), user, len, 0); break; case ARPT_SO_GET_ENTRIES: ret = get_entries(sock_net(sk), user, len); break; case ARPT_SO_GET_REVISION_TARGET: { struct xt_get_revision rev; if (*len != sizeof(rev)) { ret = -EINVAL; break; } if (copy_from_user(&rev, user, sizeof(rev)) != 0) { ret = -EFAULT; break; } rev.name[sizeof(rev.name)-1] = 0; try_then_request_module(xt_find_revision(NFPROTO_ARP, rev.name, rev.revision, 1, &ret), "arpt_%s", rev.name); break; } default: duprintf("do_arpt_get_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static void __arpt_unregister_table(struct xt_table *table) { struct xt_table_info *private; void *loc_cpu_entry; struct module *table_owner = table->me; struct arpt_entry *iter; private = xt_unregister_table(table); /* Decrease module usage counts and free resources */ loc_cpu_entry = private->entries; xt_entry_foreach(iter, loc_cpu_entry, private->size) cleanup_entry(iter); if (private->number > private->initial_entries) module_put(table_owner); xt_free_table_info(private); } int arpt_register_table(struct net *net, const struct xt_table *table, const struct arpt_replace *repl, const struct nf_hook_ops *ops, struct xt_table **res) { int ret; struct xt_table_info *newinfo; struct xt_table_info bootstrap = {0}; void *loc_cpu_entry; struct xt_table *new_table; newinfo = xt_alloc_table_info(repl->size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; memcpy(loc_cpu_entry, repl->entries, repl->size); ret = translate_table(newinfo, loc_cpu_entry, repl); duprintf("arpt_register_table: translate table gives %d\n", ret); if (ret != 0) goto out_free; new_table = xt_register_table(net, table, &bootstrap, newinfo); if (IS_ERR(new_table)) { ret = PTR_ERR(new_table); goto out_free; } /* set res now, will see skbs right after nf_register_net_hooks */ WRITE_ONCE(*res, new_table); ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks)); if (ret != 0) { __arpt_unregister_table(new_table); *res = NULL; } return ret; out_free: xt_free_table_info(newinfo); return ret; } void arpt_unregister_table(struct net *net, struct xt_table *table, const struct nf_hook_ops *ops) { nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks)); __arpt_unregister_table(table); } /* The built-in targets: standard (NULL) and error. */ static struct xt_target arpt_builtin_tg[] __read_mostly = { { .name = XT_STANDARD_TARGET, .targetsize = sizeof(int), .family = NFPROTO_ARP, #ifdef CONFIG_COMPAT .compatsize = sizeof(compat_int_t), .compat_from_user = compat_standard_from_user, .compat_to_user = compat_standard_to_user, #endif }, { .name = XT_ERROR_TARGET, .target = arpt_error, .targetsize = XT_FUNCTION_MAXNAMELEN, .family = NFPROTO_ARP, }, }; static struct nf_sockopt_ops arpt_sockopts = { .pf = PF_INET, .set_optmin = ARPT_BASE_CTL, .set_optmax = ARPT_SO_SET_MAX+1, .set = do_arpt_set_ctl, #ifdef CONFIG_COMPAT .compat_set = compat_do_arpt_set_ctl, #endif .get_optmin = ARPT_BASE_CTL, .get_optmax = ARPT_SO_GET_MAX+1, .get = do_arpt_get_ctl, #ifdef CONFIG_COMPAT .compat_get = compat_do_arpt_get_ctl, #endif .owner = THIS_MODULE, }; static int __net_init arp_tables_net_init(struct net *net) { return xt_proto_init(net, NFPROTO_ARP); } static void __net_exit arp_tables_net_exit(struct net *net) { xt_proto_fini(net, NFPROTO_ARP); } static struct pernet_operations arp_tables_net_ops = { .init = arp_tables_net_init, .exit = arp_tables_net_exit, }; static int __init arp_tables_init(void) { int ret; ret = register_pernet_subsys(&arp_tables_net_ops); if (ret < 0) goto err1; /* No one else will be downing sem now, so we won't sleep */ ret = xt_register_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); if (ret < 0) goto err2; /* Register setsockopt */ ret = nf_register_sockopt(&arpt_sockopts); if (ret < 0) goto err4; pr_info("arp_tables: (C) 2002 David S. Miller\n"); return 0; err4: xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); err2: unregister_pernet_subsys(&arp_tables_net_ops); err1: return ret; } static void __exit arp_tables_fini(void) { nf_unregister_sockopt(&arpt_sockopts); xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); unregister_pernet_subsys(&arp_tables_net_ops); } EXPORT_SYMBOL(arpt_register_table); EXPORT_SYMBOL(arpt_unregister_table); EXPORT_SYMBOL(arpt_do_table); module_init(arp_tables_init); module_exit(arp_tables_fini);
/* * Packet matching code for ARP packets. * * Based heavily, if not almost entirely, upon ip_tables.c framework. * * Some ARP specific bits are: * * Copyright (C) 2002 David S. Miller (davem@redhat.com) * Copyright (C) 2006-2009 Patrick McHardy <kaber@trash.net> * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/capability.h> #include <linux/if_arp.h> #include <linux/kmod.h> #include <linux/vmalloc.h> #include <linux/proc_fs.h> #include <linux/module.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/err.h> #include <net/compat.h> #include <net/sock.h> #include <asm/uaccess.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_arp/arp_tables.h> #include "../../netfilter/xt_repldata.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); MODULE_DESCRIPTION("arptables core"); /*#define DEBUG_ARP_TABLES*/ /*#define DEBUG_ARP_TABLES_USER*/ #ifdef DEBUG_ARP_TABLES #define dprintf(format, args...) pr_debug(format, ## args) #else #define dprintf(format, args...) #endif #ifdef DEBUG_ARP_TABLES_USER #define duprintf(format, args...) pr_debug(format, ## args) #else #define duprintf(format, args...) #endif #ifdef CONFIG_NETFILTER_DEBUG #define ARP_NF_ASSERT(x) WARN_ON(!(x)) #else #define ARP_NF_ASSERT(x) #endif void *arpt_alloc_initial_table(const struct xt_table *info) { return xt_alloc_initial_table(arpt, ARPT); } EXPORT_SYMBOL_GPL(arpt_alloc_initial_table); static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap, const char *hdr_addr, int len) { int i, ret; if (len > ARPT_DEV_ADDR_LEN_MAX) len = ARPT_DEV_ADDR_LEN_MAX; ret = 0; for (i = 0; i < len; i++) ret |= (hdr_addr[i] ^ ap->addr[i]) & ap->mask[i]; return ret != 0; } /* * Unfortunately, _b and _mask are not aligned to an int (or long int) * Some arches dont care, unrolling the loop is a win on them. * For other arches, we only have a 16bit alignement. */ static unsigned long ifname_compare(const char *_a, const char *_b, const char *_mask) { #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS unsigned long ret = ifname_compare_aligned(_a, _b, _mask); #else unsigned long ret = 0; const u16 *a = (const u16 *)_a; const u16 *b = (const u16 *)_b; const u16 *mask = (const u16 *)_mask; int i; for (i = 0; i < IFNAMSIZ/sizeof(u16); i++) ret |= (a[i] ^ b[i]) & mask[i]; #endif return ret; } /* Returns whether packet matches rule or not. */ static inline int arp_packet_match(const struct arphdr *arphdr, struct net_device *dev, const char *indev, const char *outdev, const struct arpt_arp *arpinfo) { const char *arpptr = (char *)(arphdr + 1); const char *src_devaddr, *tgt_devaddr; __be32 src_ipaddr, tgt_ipaddr; long ret; #define FWINV(bool, invflg) ((bool) ^ !!(arpinfo->invflags & (invflg))) if (FWINV((arphdr->ar_op & arpinfo->arpop_mask) != arpinfo->arpop, ARPT_INV_ARPOP)) { dprintf("ARP operation field mismatch.\n"); dprintf("ar_op: %04x info->arpop: %04x info->arpop_mask: %04x\n", arphdr->ar_op, arpinfo->arpop, arpinfo->arpop_mask); return 0; } if (FWINV((arphdr->ar_hrd & arpinfo->arhrd_mask) != arpinfo->arhrd, ARPT_INV_ARPHRD)) { dprintf("ARP hardware address format mismatch.\n"); dprintf("ar_hrd: %04x info->arhrd: %04x info->arhrd_mask: %04x\n", arphdr->ar_hrd, arpinfo->arhrd, arpinfo->arhrd_mask); return 0; } if (FWINV((arphdr->ar_pro & arpinfo->arpro_mask) != arpinfo->arpro, ARPT_INV_ARPPRO)) { dprintf("ARP protocol address format mismatch.\n"); dprintf("ar_pro: %04x info->arpro: %04x info->arpro_mask: %04x\n", arphdr->ar_pro, arpinfo->arpro, arpinfo->arpro_mask); return 0; } if (FWINV((arphdr->ar_hln & arpinfo->arhln_mask) != arpinfo->arhln, ARPT_INV_ARPHLN)) { dprintf("ARP hardware address length mismatch.\n"); dprintf("ar_hln: %02x info->arhln: %02x info->arhln_mask: %02x\n", arphdr->ar_hln, arpinfo->arhln, arpinfo->arhln_mask); return 0; } src_devaddr = arpptr; arpptr += dev->addr_len; memcpy(&src_ipaddr, arpptr, sizeof(u32)); arpptr += sizeof(u32); tgt_devaddr = arpptr; arpptr += dev->addr_len; memcpy(&tgt_ipaddr, arpptr, sizeof(u32)); if (FWINV(arp_devaddr_compare(&arpinfo->src_devaddr, src_devaddr, dev->addr_len), ARPT_INV_SRCDEVADDR) || FWINV(arp_devaddr_compare(&arpinfo->tgt_devaddr, tgt_devaddr, dev->addr_len), ARPT_INV_TGTDEVADDR)) { dprintf("Source or target device address mismatch.\n"); return 0; } if (FWINV((src_ipaddr & arpinfo->smsk.s_addr) != arpinfo->src.s_addr, ARPT_INV_SRCIP) || FWINV(((tgt_ipaddr & arpinfo->tmsk.s_addr) != arpinfo->tgt.s_addr), ARPT_INV_TGTIP)) { dprintf("Source or target IP address mismatch.\n"); dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n", &src_ipaddr, &arpinfo->smsk.s_addr, &arpinfo->src.s_addr, arpinfo->invflags & ARPT_INV_SRCIP ? " (INV)" : ""); dprintf("TGT: %pI4 Mask: %pI4 Target: %pI4.%s\n", &tgt_ipaddr, &arpinfo->tmsk.s_addr, &arpinfo->tgt.s_addr, arpinfo->invflags & ARPT_INV_TGTIP ? " (INV)" : ""); return 0; } /* Look for ifname matches. */ ret = ifname_compare(indev, arpinfo->iniface, arpinfo->iniface_mask); if (FWINV(ret != 0, ARPT_INV_VIA_IN)) { dprintf("VIA in mismatch (%s vs %s).%s\n", indev, arpinfo->iniface, arpinfo->invflags & ARPT_INV_VIA_IN ? " (INV)" : ""); return 0; } ret = ifname_compare(outdev, arpinfo->outiface, arpinfo->outiface_mask); if (FWINV(ret != 0, ARPT_INV_VIA_OUT)) { dprintf("VIA out mismatch (%s vs %s).%s\n", outdev, arpinfo->outiface, arpinfo->invflags & ARPT_INV_VIA_OUT ? " (INV)" : ""); return 0; } return 1; #undef FWINV } static inline int arp_checkentry(const struct arpt_arp *arp) { if (arp->flags & ~ARPT_F_MASK) { duprintf("Unknown flag bits set: %08X\n", arp->flags & ~ARPT_F_MASK); return 0; } if (arp->invflags & ~ARPT_INV_MASK) { duprintf("Unknown invflag bits set: %08X\n", arp->invflags & ~ARPT_INV_MASK); return 0; } return 1; } static unsigned int arpt_error(struct sk_buff *skb, const struct xt_action_param *par) { net_err_ratelimited("arp_tables: error: '%s'\n", (const char *)par->targinfo); return NF_DROP; } static inline const struct xt_entry_target * arpt_get_target_c(const struct arpt_entry *e) { return arpt_get_target((struct arpt_entry *)e); } static inline struct arpt_entry * get_entry(const void *base, unsigned int offset) { return (struct arpt_entry *)(base + offset); } static inline struct arpt_entry *arpt_next_entry(const struct arpt_entry *entry) { return (void *)entry + entry->next_offset; } unsigned int arpt_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table) { unsigned int hook = state->hook; static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); unsigned int verdict = NF_DROP; const struct arphdr *arp; struct arpt_entry *e, **jumpstack; const char *indev, *outdev; const void *table_base; unsigned int cpu, stackidx = 0; const struct xt_table_info *private; struct xt_action_param acpar; unsigned int addend; if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) return NF_DROP; indev = state->in ? state->in->name : nulldevname; outdev = state->out ? state->out->name : nulldevname; local_bh_disable(); addend = xt_write_recseq_begin(); private = table->private; cpu = smp_processor_id(); /* * Ensure we load private-> members after we've fetched the base * pointer. */ smp_read_barrier_depends(); table_base = private->entries; jumpstack = (struct arpt_entry **)private->jumpstack[cpu]; /* No TEE support for arptables, so no need to switch to alternate * stack. All targets that reenter must return absolute verdicts. */ e = get_entry(table_base, private->hook_entry[hook]); acpar.net = state->net; acpar.in = state->in; acpar.out = state->out; acpar.hooknum = hook; acpar.family = NFPROTO_ARP; acpar.hotdrop = false; arp = arp_hdr(skb); do { const struct xt_entry_target *t; struct xt_counters *counter; if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) { e = arpt_next_entry(e); continue; } counter = xt_get_this_cpu_counter(&e->counters); ADD_COUNTER(*counter, arp_hdr_len(skb->dev), 1); t = arpt_get_target_c(e); /* Standard target? */ if (!t->u.kernel.target->target) { int v; v = ((struct xt_standard_target *)t)->verdict; if (v < 0) { /* Pop from stack? */ if (v != XT_RETURN) { verdict = (unsigned int)(-v) - 1; break; } if (stackidx == 0) { e = get_entry(table_base, private->underflow[hook]); } else { e = jumpstack[--stackidx]; e = arpt_next_entry(e); } continue; } if (table_base + v != arpt_next_entry(e)) { jumpstack[stackidx++] = e; } e = get_entry(table_base, v); continue; } acpar.target = t->u.kernel.target; acpar.targinfo = t->data; verdict = t->u.kernel.target->target(skb, &acpar); /* Target might have changed stuff. */ arp = arp_hdr(skb); if (verdict == XT_CONTINUE) e = arpt_next_entry(e); else /* Verdict */ break; } while (!acpar.hotdrop); xt_write_recseq_end(addend); local_bh_enable(); if (acpar.hotdrop) return NF_DROP; else return verdict; } /* All zeroes == unconditional rule. */ static inline bool unconditional(const struct arpt_arp *arp) { static const struct arpt_arp uncond; return memcmp(arp, &uncond, sizeof(uncond)) == 0; } /* Figures out from what hook each rule can be called: returns 0 if * there are loops. Puts hook bitmask in comefrom. */ static int mark_source_chains(const struct xt_table_info *newinfo, unsigned int valid_hooks, void *entry0) { unsigned int hook; /* No recursion; use packet counter to save back ptrs (reset * to 0 as we leave), and comefrom to save source hook bitmask. */ for (hook = 0; hook < NF_ARP_NUMHOOKS; hook++) { unsigned int pos = newinfo->hook_entry[hook]; struct arpt_entry *e = (struct arpt_entry *)(entry0 + pos); if (!(valid_hooks & (1 << hook))) continue; /* Set initial back pointer. */ e->counters.pcnt = pos; for (;;) { const struct xt_standard_target *t = (void *)arpt_get_target_c(e); int visited = e->comefrom & (1 << hook); if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) { pr_notice("arptables: loop hook %u pos %u %08X.\n", hook, pos, e->comefrom); return 0; } e->comefrom |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS)); /* Unconditional return/END. */ if ((e->target_offset == sizeof(struct arpt_entry) && (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < 0 && unconditional(&e->arp)) || visited) { unsigned int oldpos, size; if ((strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < -NF_MAX_VERDICT - 1) { duprintf("mark_source_chains: bad " "negative verdict (%i)\n", t->verdict); return 0; } /* Return: backtrack through the last * big jump. */ do { e->comefrom ^= (1<<NF_ARP_NUMHOOKS); oldpos = pos; pos = e->counters.pcnt; e->counters.pcnt = 0; /* We're at the start. */ if (pos == oldpos) goto next; e = (struct arpt_entry *) (entry0 + pos); } while (oldpos == pos + e->next_offset); /* Move along one */ size = e->next_offset; e = (struct arpt_entry *) (entry0 + pos + size); e->counters.pcnt = pos; pos += size; } else { int newpos = t->verdict; if (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0 && newpos >= 0) { if (newpos > newinfo->size - sizeof(struct arpt_entry)) { duprintf("mark_source_chains: " "bad verdict (%i)\n", newpos); return 0; } /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; } e = (struct arpt_entry *) (entry0 + newpos); e->counters.pcnt = pos; pos = newpos; } } next: duprintf("Finished chain %u\n", hook); } return 1; } static inline int check_entry(const struct arpt_entry *e) { const struct xt_entry_target *t; if (!arp_checkentry(&e->arp)) return -EINVAL; if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset) return -EINVAL; t = arpt_get_target_c(e); if (e->target_offset + t->u.target_size > e->next_offset) return -EINVAL; return 0; } static inline int check_target(struct arpt_entry *e, const char *name) { struct xt_entry_target *t = arpt_get_target(e); int ret; struct xt_tgchk_param par = { .table = name, .entryinfo = e, .target = t->u.kernel.target, .targinfo = t->data, .hook_mask = e->comefrom, .family = NFPROTO_ARP, }; ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false); if (ret < 0) { duprintf("arp_tables: check failed for `%s'.\n", t->u.kernel.target->name); return ret; } return 0; } static inline int find_check_entry(struct arpt_entry *e, const char *name, unsigned int size) { struct xt_entry_target *t; struct xt_target *target; int ret; e->counters.pcnt = xt_percpu_counter_alloc(); if (IS_ERR_VALUE(e->counters.pcnt)) return -ENOMEM; t = arpt_get_target(e); target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("find_check_entry: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto out; } t->u.kernel.target = target; ret = check_target(e, name); if (ret) goto err; return 0; err: module_put(t->u.kernel.target->me); out: xt_percpu_counter_free(e->counters.pcnt); return ret; } static bool check_underflow(const struct arpt_entry *e) { const struct xt_entry_target *t; unsigned int verdict; if (!unconditional(&e->arp)) return false; t = arpt_get_target_c(e); if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) return false; verdict = ((struct xt_standard_target *)t)->verdict; verdict = -verdict - 1; return verdict == NF_DROP || verdict == NF_ACCEPT; } static inline int check_entry_size_and_hooks(struct arpt_entry *e, struct xt_table_info *newinfo, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int valid_hooks) { unsigned int h; int err; if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 || (unsigned char *)e + sizeof(struct arpt_entry) >= limit || (unsigned char *)e + e->next_offset > limit) { duprintf("Bad offset %p\n", e); return -EINVAL; } if (e->next_offset < sizeof(struct arpt_entry) + sizeof(struct xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } err = check_entry(e); if (err) return err; /* Check hooks & underflows */ for (h = 0; h < NF_ARP_NUMHOOKS; h++) { if (!(valid_hooks & (1 << h))) continue; if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) { if (!check_underflow(e)) { pr_err("Underflows must be unconditional and " "use the STANDARD target with " "ACCEPT/DROP\n"); return -EINVAL; } newinfo->underflow[h] = underflows[h]; } } /* Clear counters and comefrom */ e->counters = ((struct xt_counters) { 0, 0 }); e->comefrom = 0; return 0; } static inline void cleanup_entry(struct arpt_entry *e) { struct xt_tgdtor_param par; struct xt_entry_target *t; t = arpt_get_target(e); par.target = t->u.kernel.target; par.targinfo = t->data; par.family = NFPROTO_ARP; if (par.target->destroy != NULL) par.target->destroy(&par); module_put(par.target->me); xt_percpu_counter_free(e->counters.pcnt); } /* Checks and translates the user-supplied table segment (held in * newinfo). */ static int translate_table(struct xt_table_info *newinfo, void *entry0, const struct arpt_replace *repl) { struct arpt_entry *iter; unsigned int i; int ret = 0; newinfo->size = repl->size; newinfo->number = repl->num_entries; /* Init all hooks to impossible value. */ for (i = 0; i < NF_ARP_NUMHOOKS; i++) { newinfo->hook_entry[i] = 0xFFFFFFFF; newinfo->underflow[i] = 0xFFFFFFFF; } duprintf("translate_table: size %u\n", newinfo->size); i = 0; /* Walk through entries, checking offsets. */ xt_entry_foreach(iter, entry0, newinfo->size) { ret = check_entry_size_and_hooks(iter, newinfo, entry0, entry0 + repl->size, repl->hook_entry, repl->underflow, repl->valid_hooks); if (ret != 0) break; ++i; if (strcmp(arpt_get_target(iter)->u.user.name, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret); if (ret != 0) return ret; if (i != repl->num_entries) { duprintf("translate_table: %u not %u entries\n", i, repl->num_entries); return -EINVAL; } /* Check hooks all assigned */ for (i = 0; i < NF_ARP_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(repl->valid_hooks & (1 << i))) continue; if (newinfo->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, repl->hook_entry[i]); return -EINVAL; } if (newinfo->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, repl->underflow[i]); return -EINVAL; } } if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) { duprintf("Looping hook\n"); return -ELOOP; } /* Finally, each sanity check must pass */ i = 0; xt_entry_foreach(iter, entry0, newinfo->size) { ret = find_check_entry(iter, repl->name, repl->size); if (ret != 0) break; ++i; } if (ret != 0) { xt_entry_foreach(iter, entry0, newinfo->size) { if (i-- == 0) break; cleanup_entry(iter); } return ret; } return ret; } static void get_counters(const struct xt_table_info *t, struct xt_counters counters[]) { struct arpt_entry *iter; unsigned int cpu; unsigned int i; for_each_possible_cpu(cpu) { seqcount_t *s = &per_cpu(xt_recseq, cpu); i = 0; xt_entry_foreach(iter, t->entries, t->size) { struct xt_counters *tmp; u64 bcnt, pcnt; unsigned int start; tmp = xt_get_per_cpu_counter(&iter->counters, cpu); do { start = read_seqcount_begin(s); bcnt = tmp->bcnt; pcnt = tmp->pcnt; } while (read_seqcount_retry(s, start)); ADD_COUNTER(counters[i], bcnt, pcnt); ++i; } } } static struct xt_counters *alloc_counters(const struct xt_table *table) { unsigned int countersize; struct xt_counters *counters; const struct xt_table_info *private = table->private; /* We need atomic snapshot of counters: rest doesn't change * (other than comefrom, which userspace doesn't care * about). */ countersize = sizeof(struct xt_counters) * private->number; counters = vzalloc(countersize); if (counters == NULL) return ERR_PTR(-ENOMEM); get_counters(private, counters); return counters; } static int copy_entries_to_user(unsigned int total_size, const struct xt_table *table, void __user *userptr) { unsigned int off, num; const struct arpt_entry *e; struct xt_counters *counters; struct xt_table_info *private = table->private; int ret = 0; void *loc_cpu_entry; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); loc_cpu_entry = private->entries; /* ... then copy entire thing ... */ if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { ret = -EFAULT; goto free_counters; } /* FIXME: use iterator macros --RR */ /* ... then go back and fix counters and names */ for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ const struct xt_entry_target *t; e = (struct arpt_entry *)(loc_cpu_entry + off); if (copy_to_user(userptr + off + offsetof(struct arpt_entry, counters), &counters[num], sizeof(counters[num])) != 0) { ret = -EFAULT; goto free_counters; } t = arpt_get_target_c(e); if (copy_to_user(userptr + off + e->target_offset + offsetof(struct xt_entry_target, u.user.name), t->u.kernel.target->name, strlen(t->u.kernel.target->name)+1) != 0) { ret = -EFAULT; goto free_counters; } } free_counters: vfree(counters); return ret; } #ifdef CONFIG_COMPAT static void compat_standard_from_user(void *dst, const void *src) { int v = *(compat_int_t *)src; if (v > 0) v += xt_compat_calc_jump(NFPROTO_ARP, v); memcpy(dst, &v, sizeof(v)); } static int compat_standard_to_user(void __user *dst, const void *src) { compat_int_t cv = *(int *)src; if (cv > 0) cv -= xt_compat_calc_jump(NFPROTO_ARP, cv); return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; } static int compat_calc_entry(const struct arpt_entry *e, const struct xt_table_info *info, const void *base, struct xt_table_info *newinfo) { const struct xt_entry_target *t; unsigned int entry_offset; int off, i, ret; off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); entry_offset = (void *)e - base; t = arpt_get_target_c(e); off += xt_compat_target_offset(t->u.kernel.target); newinfo->size -= off; ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); if (ret) return ret; for (i = 0; i < NF_ARP_NUMHOOKS; i++) { if (info->hook_entry[i] && (e < (struct arpt_entry *)(base + info->hook_entry[i]))) newinfo->hook_entry[i] -= off; if (info->underflow[i] && (e < (struct arpt_entry *)(base + info->underflow[i]))) newinfo->underflow[i] -= off; } return 0; } static int compat_table_info(const struct xt_table_info *info, struct xt_table_info *newinfo) { struct arpt_entry *iter; const void *loc_cpu_entry; int ret; if (!newinfo || !info) return -EINVAL; /* we dont care about newinfo->entries */ memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); newinfo->initial_entries = 0; loc_cpu_entry = info->entries; xt_compat_init_offsets(NFPROTO_ARP, info->number); xt_entry_foreach(iter, loc_cpu_entry, info->size) { ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); if (ret != 0) return ret; } return 0; } #endif static int get_info(struct net *net, void __user *user, const int *len, int compat) { char name[XT_TABLE_MAXNAMELEN]; struct xt_table *t; int ret; if (*len != sizeof(struct arpt_getinfo)) { duprintf("length %u != %Zu\n", *len, sizeof(struct arpt_getinfo)); return -EINVAL; } if (copy_from_user(name, user, sizeof(name)) != 0) return -EFAULT; name[XT_TABLE_MAXNAMELEN-1] = '\0'; #ifdef CONFIG_COMPAT if (compat) xt_compat_lock(NFPROTO_ARP); #endif t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name), "arptable_%s", name); if (!IS_ERR_OR_NULL(t)) { struct arpt_getinfo info; const struct xt_table_info *private = t->private; #ifdef CONFIG_COMPAT struct xt_table_info tmp; if (compat) { ret = compat_table_info(private, &tmp); xt_compat_flush_offsets(NFPROTO_ARP); private = &tmp; } #endif memset(&info, 0, sizeof(info)); info.valid_hooks = t->valid_hooks; memcpy(info.hook_entry, private->hook_entry, sizeof(info.hook_entry)); memcpy(info.underflow, private->underflow, sizeof(info.underflow)); info.num_entries = private->number; info.size = private->size; strcpy(info.name, name); if (copy_to_user(user, &info, *len) != 0) ret = -EFAULT; else ret = 0; xt_table_unlock(t); module_put(t->me); } else ret = t ? PTR_ERR(t) : -ENOENT; #ifdef CONFIG_COMPAT if (compat) xt_compat_unlock(NFPROTO_ARP); #endif return ret; } static int get_entries(struct net *net, struct arpt_get_entries __user *uptr, const int *len) { int ret; struct arpt_get_entries get; struct xt_table *t; if (*len < sizeof(get)) { duprintf("get_entries: %u < %Zu\n", *len, sizeof(get)); return -EINVAL; } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; if (*len != sizeof(struct arpt_get_entries) + get.size) { duprintf("get_entries: %u != %Zu\n", *len, sizeof(struct arpt_get_entries) + get.size); return -EINVAL; } t = xt_find_table_lock(net, NFPROTO_ARP, get.name); if (!IS_ERR_OR_NULL(t)) { const struct xt_table_info *private = t->private; duprintf("t->private->number = %u\n", private->number); if (get.size == private->size) ret = copy_entries_to_user(private->size, t, uptr->entrytable); else { duprintf("get_entries: I've got %u not %u!\n", private->size, get.size); ret = -EAGAIN; } module_put(t->me); xt_table_unlock(t); } else ret = t ? PTR_ERR(t) : -ENOENT; return ret; } static int __do_replace(struct net *net, const char *name, unsigned int valid_hooks, struct xt_table_info *newinfo, unsigned int num_counters, void __user *counters_ptr) { int ret; struct xt_table *t; struct xt_table_info *oldinfo; struct xt_counters *counters; void *loc_cpu_old_entry; struct arpt_entry *iter; ret = 0; counters = vzalloc(num_counters * sizeof(struct xt_counters)); if (!counters) { ret = -ENOMEM; goto out; } t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name), "arptable_%s", name); if (IS_ERR_OR_NULL(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free_newinfo_counters_untrans; } /* You lied! */ if (valid_hooks != t->valid_hooks) { duprintf("Valid hook crap: %08X vs %08X\n", valid_hooks, t->valid_hooks); ret = -EINVAL; goto put_module; } oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); if (!oldinfo) goto put_module; /* Update module usage count based on number of rules */ duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", oldinfo->number, oldinfo->initial_entries, newinfo->number); if ((oldinfo->number > oldinfo->initial_entries) || (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); if ((oldinfo->number > oldinfo->initial_entries) && (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); /* Get the old counters, and synchronize with replace */ get_counters(oldinfo, counters); /* Decrease module usage counts and free resource */ loc_cpu_old_entry = oldinfo->entries; xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size) cleanup_entry(iter); xt_free_table_info(oldinfo); if (copy_to_user(counters_ptr, counters, sizeof(struct xt_counters) * num_counters) != 0) { /* Silent error, can't fail, new table is already in place */ net_warn_ratelimited("arptables: counters copy to user failed while replacing table\n"); } vfree(counters); xt_table_unlock(t); return ret; put_module: module_put(t->me); xt_table_unlock(t); free_newinfo_counters_untrans: vfree(counters); out: return ret; } static int do_replace(struct net *net, const void __user *user, unsigned int len) { int ret; struct arpt_replace tmp; struct xt_table_info *newinfo; void *loc_cpu_entry; struct arpt_entry *iter; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; if (tmp.num_counters == 0) return -EINVAL; tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } ret = translate_table(newinfo, loc_cpu_entry, &tmp); if (ret != 0) goto free_newinfo; duprintf("arp_tables: Translated table\n"); ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, tmp.counters); if (ret) goto free_newinfo_untrans; return 0; free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) cleanup_entry(iter); free_newinfo: xt_free_table_info(newinfo); return ret; } static int do_add_counters(struct net *net, const void __user *user, unsigned int len, int compat) { unsigned int i; struct xt_counters_info tmp; struct xt_counters *paddc; unsigned int num_counters; const char *name; int size; void *ptmp; struct xt_table *t; const struct xt_table_info *private; int ret = 0; struct arpt_entry *iter; unsigned int addend; #ifdef CONFIG_COMPAT struct compat_xt_counters_info compat_tmp; if (compat) { ptmp = &compat_tmp; size = sizeof(struct compat_xt_counters_info); } else #endif { ptmp = &tmp; size = sizeof(struct xt_counters_info); } if (copy_from_user(ptmp, user, size) != 0) return -EFAULT; #ifdef CONFIG_COMPAT if (compat) { num_counters = compat_tmp.num_counters; name = compat_tmp.name; } else #endif { num_counters = tmp.num_counters; name = tmp.name; } if (len != size + num_counters * sizeof(struct xt_counters)) return -EINVAL; paddc = vmalloc(len - size); if (!paddc) return -ENOMEM; if (copy_from_user(paddc, user + size, len - size) != 0) { ret = -EFAULT; goto free; } t = xt_find_table_lock(net, NFPROTO_ARP, name); if (IS_ERR_OR_NULL(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free; } local_bh_disable(); private = t->private; if (private->number != num_counters) { ret = -EINVAL; goto unlock_up_free; } i = 0; addend = xt_write_recseq_begin(); xt_entry_foreach(iter, private->entries, private->size) { struct xt_counters *tmp; tmp = xt_get_this_cpu_counter(&iter->counters); ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt); ++i; } xt_write_recseq_end(addend); unlock_up_free: local_bh_enable(); xt_table_unlock(t); module_put(t->me); free: vfree(paddc); return ret; } #ifdef CONFIG_COMPAT static inline void compat_release_entry(struct compat_arpt_entry *e) { struct xt_entry_target *t; t = compat_arpt_get_target(e); module_put(t->u.kernel.target->me); } static inline int check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, struct xt_table_info *newinfo, unsigned int *size, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, const char *name) { struct xt_entry_target *t; struct xt_target *target; unsigned int entry_offset; int ret, off, h; duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 || (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit || (unsigned char *)e + e->next_offset > limit) { duprintf("Bad offset %p, limit = %p\n", e, limit); return -EINVAL; } if (e->next_offset < sizeof(struct compat_arpt_entry) + sizeof(struct compat_xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } /* For purposes of check_entry casting the compat entry is fine */ ret = check_entry((struct arpt_entry *)e); if (ret) return ret; off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); entry_offset = (void *)e - (void *)base; t = compat_arpt_get_target(e); target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto out; } t->u.kernel.target = target; off += xt_compat_target_offset(target); *size += off; ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); if (ret) goto release_target; /* Check hooks & underflows */ for (h = 0; h < NF_ARP_NUMHOOKS; h++) { if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) newinfo->underflow[h] = underflows[h]; } /* Clear counters and comefrom */ memset(&e->counters, 0, sizeof(e->counters)); e->comefrom = 0; return 0; release_target: module_put(t->u.kernel.target->me); out: return ret; } static int compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr, unsigned int *size, const char *name, struct xt_table_info *newinfo, unsigned char *base) { struct xt_entry_target *t; struct xt_target *target; struct arpt_entry *de; unsigned int origsize; int ret, h; ret = 0; origsize = *size; de = (struct arpt_entry *)*dstptr; memcpy(de, e, sizeof(struct arpt_entry)); memcpy(&de->counters, &e->counters, sizeof(e->counters)); *dstptr += sizeof(struct arpt_entry); *size += sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); de->target_offset = e->target_offset - (origsize - *size); t = compat_arpt_get_target(e); target = t->u.kernel.target; xt_compat_target_from_user(t, dstptr, size); de->next_offset = e->next_offset - (origsize - *size); for (h = 0; h < NF_ARP_NUMHOOKS; h++) { if ((unsigned char *)de - base < newinfo->hook_entry[h]) newinfo->hook_entry[h] -= origsize - *size; if ((unsigned char *)de - base < newinfo->underflow[h]) newinfo->underflow[h] -= origsize - *size; } return ret; } static int translate_compat_table(const char *name, unsigned int valid_hooks, struct xt_table_info **pinfo, void **pentry0, unsigned int total_size, unsigned int number, unsigned int *hook_entries, unsigned int *underflows) { unsigned int i, j; struct xt_table_info *newinfo, *info; void *pos, *entry0, *entry1; struct compat_arpt_entry *iter0; struct arpt_entry *iter1; unsigned int size; int ret = 0; info = *pinfo; entry0 = *pentry0; size = total_size; info->number = number; /* Init all hooks to impossible value. */ for (i = 0; i < NF_ARP_NUMHOOKS; i++) { info->hook_entry[i] = 0xFFFFFFFF; info->underflow[i] = 0xFFFFFFFF; } duprintf("translate_compat_table: size %u\n", info->size); j = 0; xt_compat_lock(NFPROTO_ARP); xt_compat_init_offsets(NFPROTO_ARP, number); /* Walk through entries, checking offsets. */ xt_entry_foreach(iter0, entry0, total_size) { ret = check_compat_entry_size_and_hooks(iter0, info, &size, entry0, entry0 + total_size, hook_entries, underflows, name); if (ret != 0) goto out_unlock; ++j; } ret = -EINVAL; if (j != number) { duprintf("translate_compat_table: %u not %u entries\n", j, number); goto out_unlock; } /* Check hooks all assigned */ for (i = 0; i < NF_ARP_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(valid_hooks & (1 << i))) continue; if (info->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, hook_entries[i]); goto out_unlock; } if (info->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, underflows[i]); goto out_unlock; } } ret = -ENOMEM; newinfo = xt_alloc_table_info(size); if (!newinfo) goto out_unlock; newinfo->number = number; for (i = 0; i < NF_ARP_NUMHOOKS; i++) { newinfo->hook_entry[i] = info->hook_entry[i]; newinfo->underflow[i] = info->underflow[i]; } entry1 = newinfo->entries; pos = entry1; size = total_size; xt_entry_foreach(iter0, entry0, total_size) { ret = compat_copy_entry_from_user(iter0, &pos, &size, name, newinfo, entry1); if (ret != 0) break; } xt_compat_flush_offsets(NFPROTO_ARP); xt_compat_unlock(NFPROTO_ARP); if (ret) goto free_newinfo; ret = -ELOOP; if (!mark_source_chains(newinfo, valid_hooks, entry1)) goto free_newinfo; i = 0; xt_entry_foreach(iter1, entry1, newinfo->size) { iter1->counters.pcnt = xt_percpu_counter_alloc(); if (IS_ERR_VALUE(iter1->counters.pcnt)) { ret = -ENOMEM; break; } ret = check_target(iter1, name); if (ret != 0) { xt_percpu_counter_free(iter1->counters.pcnt); break; } ++i; if (strcmp(arpt_get_target(iter1)->u.user.name, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } if (ret) { /* * The first i matches need cleanup_entry (calls ->destroy) * because they had called ->check already. The other j-i * entries need only release. */ int skip = i; j -= i; xt_entry_foreach(iter0, entry0, newinfo->size) { if (skip-- > 0) continue; if (j-- == 0) break; compat_release_entry(iter0); } xt_entry_foreach(iter1, entry1, newinfo->size) { if (i-- == 0) break; cleanup_entry(iter1); } xt_free_table_info(newinfo); return ret; } *pinfo = newinfo; *pentry0 = entry1; xt_free_table_info(info); return 0; free_newinfo: xt_free_table_info(newinfo); out: xt_entry_foreach(iter0, entry0, total_size) { if (j-- == 0) break; compat_release_entry(iter0); } return ret; out_unlock: xt_compat_flush_offsets(NFPROTO_ARP); xt_compat_unlock(NFPROTO_ARP); goto out; } struct compat_arpt_replace { char name[XT_TABLE_MAXNAMELEN]; u32 valid_hooks; u32 num_entries; u32 size; u32 hook_entry[NF_ARP_NUMHOOKS]; u32 underflow[NF_ARP_NUMHOOKS]; u32 num_counters; compat_uptr_t counters; struct compat_arpt_entry entries[0]; }; static int compat_do_replace(struct net *net, void __user *user, unsigned int len) { int ret; struct compat_arpt_replace tmp; struct xt_table_info *newinfo; void *loc_cpu_entry; struct arpt_entry *iter; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.size >= INT_MAX / num_possible_cpus()) return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; if (tmp.num_counters == 0) return -EINVAL; tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } ret = translate_compat_table(tmp.name, tmp.valid_hooks, &newinfo, &loc_cpu_entry, tmp.size, tmp.num_entries, tmp.hook_entry, tmp.underflow); if (ret != 0) goto free_newinfo; duprintf("compat_do_replace: Translated table\n"); ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, compat_ptr(tmp.counters)); if (ret) goto free_newinfo_untrans; return 0; free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) cleanup_entry(iter); free_newinfo: xt_free_table_info(newinfo); return ret; } static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case ARPT_SO_SET_REPLACE: ret = compat_do_replace(sock_net(sk), user, len); break; case ARPT_SO_SET_ADD_COUNTERS: ret = do_add_counters(sock_net(sk), user, len, 1); break; default: duprintf("do_arpt_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr, compat_uint_t *size, struct xt_counters *counters, unsigned int i) { struct xt_entry_target *t; struct compat_arpt_entry __user *ce; u_int16_t target_offset, next_offset; compat_uint_t origsize; int ret; origsize = *size; ce = (struct compat_arpt_entry __user *)*dstptr; if (copy_to_user(ce, e, sizeof(struct arpt_entry)) != 0 || copy_to_user(&ce->counters, &counters[i], sizeof(counters[i])) != 0) return -EFAULT; *dstptr += sizeof(struct compat_arpt_entry); *size -= sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); target_offset = e->target_offset - (origsize - *size); t = arpt_get_target(e); ret = xt_compat_target_to_user(t, dstptr, size); if (ret) return ret; next_offset = e->next_offset - (origsize - *size); if (put_user(target_offset, &ce->target_offset) != 0 || put_user(next_offset, &ce->next_offset) != 0) return -EFAULT; return 0; } static int compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, void __user *userptr) { struct xt_counters *counters; const struct xt_table_info *private = table->private; void __user *pos; unsigned int size; int ret = 0; unsigned int i = 0; struct arpt_entry *iter; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); pos = userptr; size = total_size; xt_entry_foreach(iter, private->entries, total_size) { ret = compat_copy_entry_to_user(iter, &pos, &size, counters, i++); if (ret != 0) break; } vfree(counters); return ret; } struct compat_arpt_get_entries { char name[XT_TABLE_MAXNAMELEN]; compat_uint_t size; struct compat_arpt_entry entrytable[0]; }; static int compat_get_entries(struct net *net, struct compat_arpt_get_entries __user *uptr, int *len) { int ret; struct compat_arpt_get_entries get; struct xt_table *t; if (*len < sizeof(get)) { duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); return -EINVAL; } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; if (*len != sizeof(struct compat_arpt_get_entries) + get.size) { duprintf("compat_get_entries: %u != %zu\n", *len, sizeof(get) + get.size); return -EINVAL; } xt_compat_lock(NFPROTO_ARP); t = xt_find_table_lock(net, NFPROTO_ARP, get.name); if (!IS_ERR_OR_NULL(t)) { const struct xt_table_info *private = t->private; struct xt_table_info info; duprintf("t->private->number = %u\n", private->number); ret = compat_table_info(private, &info); if (!ret && get.size == info.size) { ret = compat_copy_entries_to_user(private->size, t, uptr->entrytable); } else if (!ret) { duprintf("compat_get_entries: I've got %u not %u!\n", private->size, get.size); ret = -EAGAIN; } xt_compat_flush_offsets(NFPROTO_ARP); module_put(t->me); xt_table_unlock(t); } else ret = t ? PTR_ERR(t) : -ENOENT; xt_compat_unlock(NFPROTO_ARP); return ret; } static int do_arpt_get_ctl(struct sock *, int, void __user *, int *); static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case ARPT_SO_GET_INFO: ret = get_info(sock_net(sk), user, len, 1); break; case ARPT_SO_GET_ENTRIES: ret = compat_get_entries(sock_net(sk), user, len); break; default: ret = do_arpt_get_ctl(sk, cmd, user, len); } return ret; } #endif static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case ARPT_SO_SET_REPLACE: ret = do_replace(sock_net(sk), user, len); break; case ARPT_SO_SET_ADD_COUNTERS: ret = do_add_counters(sock_net(sk), user, len, 0); break; default: duprintf("do_arpt_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case ARPT_SO_GET_INFO: ret = get_info(sock_net(sk), user, len, 0); break; case ARPT_SO_GET_ENTRIES: ret = get_entries(sock_net(sk), user, len); break; case ARPT_SO_GET_REVISION_TARGET: { struct xt_get_revision rev; if (*len != sizeof(rev)) { ret = -EINVAL; break; } if (copy_from_user(&rev, user, sizeof(rev)) != 0) { ret = -EFAULT; break; } rev.name[sizeof(rev.name)-1] = 0; try_then_request_module(xt_find_revision(NFPROTO_ARP, rev.name, rev.revision, 1, &ret), "arpt_%s", rev.name); break; } default: duprintf("do_arpt_get_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static void __arpt_unregister_table(struct xt_table *table) { struct xt_table_info *private; void *loc_cpu_entry; struct module *table_owner = table->me; struct arpt_entry *iter; private = xt_unregister_table(table); /* Decrease module usage counts and free resources */ loc_cpu_entry = private->entries; xt_entry_foreach(iter, loc_cpu_entry, private->size) cleanup_entry(iter); if (private->number > private->initial_entries) module_put(table_owner); xt_free_table_info(private); } int arpt_register_table(struct net *net, const struct xt_table *table, const struct arpt_replace *repl, const struct nf_hook_ops *ops, struct xt_table **res) { int ret; struct xt_table_info *newinfo; struct xt_table_info bootstrap = {0}; void *loc_cpu_entry; struct xt_table *new_table; newinfo = xt_alloc_table_info(repl->size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; memcpy(loc_cpu_entry, repl->entries, repl->size); ret = translate_table(newinfo, loc_cpu_entry, repl); duprintf("arpt_register_table: translate table gives %d\n", ret); if (ret != 0) goto out_free; new_table = xt_register_table(net, table, &bootstrap, newinfo); if (IS_ERR(new_table)) { ret = PTR_ERR(new_table); goto out_free; } /* set res now, will see skbs right after nf_register_net_hooks */ WRITE_ONCE(*res, new_table); ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks)); if (ret != 0) { __arpt_unregister_table(new_table); *res = NULL; } return ret; out_free: xt_free_table_info(newinfo); return ret; } void arpt_unregister_table(struct net *net, struct xt_table *table, const struct nf_hook_ops *ops) { nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks)); __arpt_unregister_table(table); } /* The built-in targets: standard (NULL) and error. */ static struct xt_target arpt_builtin_tg[] __read_mostly = { { .name = XT_STANDARD_TARGET, .targetsize = sizeof(int), .family = NFPROTO_ARP, #ifdef CONFIG_COMPAT .compatsize = sizeof(compat_int_t), .compat_from_user = compat_standard_from_user, .compat_to_user = compat_standard_to_user, #endif }, { .name = XT_ERROR_TARGET, .target = arpt_error, .targetsize = XT_FUNCTION_MAXNAMELEN, .family = NFPROTO_ARP, }, }; static struct nf_sockopt_ops arpt_sockopts = { .pf = PF_INET, .set_optmin = ARPT_BASE_CTL, .set_optmax = ARPT_SO_SET_MAX+1, .set = do_arpt_set_ctl, #ifdef CONFIG_COMPAT .compat_set = compat_do_arpt_set_ctl, #endif .get_optmin = ARPT_BASE_CTL, .get_optmax = ARPT_SO_GET_MAX+1, .get = do_arpt_get_ctl, #ifdef CONFIG_COMPAT .compat_get = compat_do_arpt_get_ctl, #endif .owner = THIS_MODULE, }; static int __net_init arp_tables_net_init(struct net *net) { return xt_proto_init(net, NFPROTO_ARP); } static void __net_exit arp_tables_net_exit(struct net *net) { xt_proto_fini(net, NFPROTO_ARP); } static struct pernet_operations arp_tables_net_ops = { .init = arp_tables_net_init, .exit = arp_tables_net_exit, }; static int __init arp_tables_init(void) { int ret; ret = register_pernet_subsys(&arp_tables_net_ops); if (ret < 0) goto err1; /* No one else will be downing sem now, so we won't sleep */ ret = xt_register_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); if (ret < 0) goto err2; /* Register setsockopt */ ret = nf_register_sockopt(&arpt_sockopts); if (ret < 0) goto err4; pr_info("arp_tables: (C) 2002 David S. Miller\n"); return 0; err4: xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); err2: unregister_pernet_subsys(&arp_tables_net_ops); err1: return ret; } static void __exit arp_tables_fini(void) { nf_unregister_sockopt(&arpt_sockopts); xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); unregister_pernet_subsys(&arp_tables_net_ops); } EXPORT_SYMBOL(arpt_register_table); EXPORT_SYMBOL(arpt_unregister_table); EXPORT_SYMBOL(arpt_do_table); module_init(arp_tables_init); module_exit(arp_tables_fini);
check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, struct xt_table_info *newinfo, unsigned int *size, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, const char *name) { struct xt_entry_target *t; struct xt_target *target; unsigned int entry_offset; int ret, off, h; duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 || (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) { duprintf("Bad offset %p, limit = %p\n", e, limit); return -EINVAL; } if (e->next_offset < sizeof(struct compat_arpt_entry) + sizeof(struct compat_xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } /* For purposes of check_entry casting the compat entry is fine */ ret = check_entry((struct arpt_entry *)e); if (ret) return ret; off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); entry_offset = (void *)e - (void *)base; t = compat_arpt_get_target(e); target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto out; } t->u.kernel.target = target; off += xt_compat_target_offset(target); *size += off; ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); if (ret) goto release_target; /* Check hooks & underflows */ for (h = 0; h < NF_ARP_NUMHOOKS; h++) { if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) newinfo->underflow[h] = underflows[h]; } /* Clear counters and comefrom */ memset(&e->counters, 0, sizeof(e->counters)); e->comefrom = 0; return 0; release_target: module_put(t->u.kernel.target->me); out: return ret; }
check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, struct xt_table_info *newinfo, unsigned int *size, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, const char *name) { struct xt_entry_target *t; struct xt_target *target; unsigned int entry_offset; int ret, off, h; duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 || (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit || (unsigned char *)e + e->next_offset > limit) { duprintf("Bad offset %p, limit = %p\n", e, limit); return -EINVAL; } if (e->next_offset < sizeof(struct compat_arpt_entry) + sizeof(struct compat_xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } /* For purposes of check_entry casting the compat entry is fine */ ret = check_entry((struct arpt_entry *)e); if (ret) return ret; off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); entry_offset = (void *)e - (void *)base; t = compat_arpt_get_target(e); target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto out; } t->u.kernel.target = target; off += xt_compat_target_offset(target); *size += off; ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); if (ret) goto release_target; /* Check hooks & underflows */ for (h = 0; h < NF_ARP_NUMHOOKS; h++) { if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) newinfo->underflow[h] = underflows[h]; } /* Clear counters and comefrom */ memset(&e->counters, 0, sizeof(e->counters)); e->comefrom = 0; return 0; release_target: module_put(t->u.kernel.target->me); out: return ret; }
{'added': [(576, '\t (unsigned char *)e + sizeof(struct arpt_entry) >= limit ||'), (577, '\t (unsigned char *)e + e->next_offset > limit) {'), (1236, '\t (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit ||'), (1237, '\t (unsigned char *)e + e->next_offset > limit) {')], 'deleted': [(576, '\t (unsigned char *)e + sizeof(struct arpt_entry) >= limit) {'), (1235, '\t (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) {')]}
4
2
1,537
9,608
59
423
10
https://github.com/torvalds/linux
CVE-2016-4998
CWE-119
508
ip_options.c
C
ip_options_get_alloc
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * The options processing module for ip.c * * Authors: A.N.Kuznetsov * */ #include <linux/capability.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <asm/uaccess.h> #include <linux/skbuff.h> #include <linux/ip.h> #include <linux/icmp.h> #include <linux/netdevice.h> #include <linux/rtnetlink.h> #include <net/sock.h> #include <net/ip.h> #include <net/icmp.h> #include <net/route.h> #include <net/cipso_ipv4.h> /* * Write options to IP header, record destination address to * source route option, address of outgoing interface * (we should already know it, so that this function is allowed be * called only after routing decision) and timestamp, * if we originate this datagram. * * daddr is real destination address, next hop is recorded in IP header. * saddr is address of outgoing interface. */ void ip_options_build(struct sk_buff * skb, struct ip_options * opt, __be32 daddr, struct rtable *rt, int is_frag) { unsigned char *iph = skb_network_header(skb); memcpy(&(IPCB(skb)->opt), opt, sizeof(struct ip_options)); memcpy(iph+sizeof(struct iphdr), opt->__data, opt->optlen); opt = &(IPCB(skb)->opt); if (opt->srr) memcpy(iph+opt->srr+iph[opt->srr+1]-4, &daddr, 4); if (!is_frag) { if (opt->rr_needaddr) ip_rt_get_source(iph+opt->rr+iph[opt->rr+2]-5, rt); if (opt->ts_needaddr) ip_rt_get_source(iph+opt->ts+iph[opt->ts+2]-9, rt); if (opt->ts_needtime) { struct timespec tv; __be32 midtime; getnstimeofday(&tv); midtime = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC); memcpy(iph+opt->ts+iph[opt->ts+2]-5, &midtime, 4); } return; } if (opt->rr) { memset(iph+opt->rr, IPOPT_NOP, iph[opt->rr+1]); opt->rr = 0; opt->rr_needaddr = 0; } if (opt->ts) { memset(iph+opt->ts, IPOPT_NOP, iph[opt->ts+1]); opt->ts = 0; opt->ts_needaddr = opt->ts_needtime = 0; } } /* * Provided (sopt, skb) points to received options, * build in dopt compiled option set appropriate for answering. * i.e. invert SRR option, copy anothers, * and grab room in RR/TS options. * * NOTE: dopt cannot point to skb. */ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb) { struct ip_options *sopt; unsigned char *sptr, *dptr; int soffset, doffset; int optlen; __be32 daddr; memset(dopt, 0, sizeof(struct ip_options)); sopt = &(IPCB(skb)->opt); if (sopt->optlen == 0) { dopt->optlen = 0; return 0; } sptr = skb_network_header(skb); dptr = dopt->__data; daddr = skb_rtable(skb)->rt_spec_dst; if (sopt->rr) { optlen = sptr[sopt->rr+1]; soffset = sptr[sopt->rr+2]; dopt->rr = dopt->optlen + sizeof(struct iphdr); memcpy(dptr, sptr+sopt->rr, optlen); if (sopt->rr_needaddr && soffset <= optlen) { if (soffset + 3 > optlen) return -EINVAL; dptr[2] = soffset + 4; dopt->rr_needaddr = 1; } dptr += optlen; dopt->optlen += optlen; } if (sopt->ts) { optlen = sptr[sopt->ts+1]; soffset = sptr[sopt->ts+2]; dopt->ts = dopt->optlen + sizeof(struct iphdr); memcpy(dptr, sptr+sopt->ts, optlen); if (soffset <= optlen) { if (sopt->ts_needaddr) { if (soffset + 3 > optlen) return -EINVAL; dopt->ts_needaddr = 1; soffset += 4; } if (sopt->ts_needtime) { if (soffset + 3 > optlen) return -EINVAL; if ((dptr[3]&0xF) != IPOPT_TS_PRESPEC) { dopt->ts_needtime = 1; soffset += 4; } else { dopt->ts_needtime = 0; if (soffset + 7 <= optlen) { __be32 addr; memcpy(&addr, dptr+soffset-1, 4); if (inet_addr_type(dev_net(skb_dst(skb)->dev), addr) != RTN_UNICAST) { dopt->ts_needtime = 1; soffset += 8; } } } } dptr[2] = soffset; } dptr += optlen; dopt->optlen += optlen; } if (sopt->srr) { unsigned char * start = sptr+sopt->srr; __be32 faddr; optlen = start[1]; soffset = start[2]; doffset = 0; if (soffset > optlen) soffset = optlen + 1; soffset -= 4; if (soffset > 3) { memcpy(&faddr, &start[soffset-1], 4); for (soffset-=4, doffset=4; soffset > 3; soffset-=4, doffset+=4) memcpy(&dptr[doffset-1], &start[soffset-1], 4); /* * RFC1812 requires to fix illegal source routes. */ if (memcmp(&ip_hdr(skb)->saddr, &start[soffset + 3], 4) == 0) doffset -= 4; } if (doffset > 3) { memcpy(&start[doffset-1], &daddr, 4); dopt->faddr = faddr; dptr[0] = start[0]; dptr[1] = doffset+3; dptr[2] = 4; dptr += doffset+3; dopt->srr = dopt->optlen + sizeof(struct iphdr); dopt->optlen += doffset+3; dopt->is_strictroute = sopt->is_strictroute; } } if (sopt->cipso) { optlen = sptr[sopt->cipso+1]; dopt->cipso = dopt->optlen+sizeof(struct iphdr); memcpy(dptr, sptr+sopt->cipso, optlen); dptr += optlen; dopt->optlen += optlen; } while (dopt->optlen & 3) { *dptr++ = IPOPT_END; dopt->optlen++; } return 0; } /* * Options "fragmenting", just fill options not * allowed in fragments with NOOPs. * Simple and stupid 8), but the most efficient way. */ void ip_options_fragment(struct sk_buff * skb) { unsigned char *optptr = skb_network_header(skb) + sizeof(struct iphdr); struct ip_options * opt = &(IPCB(skb)->opt); int l = opt->optlen; int optlen; while (l > 0) { switch (*optptr) { case IPOPT_END: return; case IPOPT_NOOP: l--; optptr++; continue; } optlen = optptr[1]; if (optlen<2 || optlen>l) return; if (!IPOPT_COPIED(*optptr)) memset(optptr, IPOPT_NOOP, optlen); l -= optlen; optptr += optlen; } opt->ts = 0; opt->rr = 0; opt->rr_needaddr = 0; opt->ts_needaddr = 0; opt->ts_needtime = 0; } /* * Verify options and fill pointers in struct options. * Caller should clear *opt, and set opt->data. * If opt == NULL, then skb->data should point to IP header. */ int ip_options_compile(struct net *net, struct ip_options * opt, struct sk_buff * skb) { int l; unsigned char * iph; unsigned char * optptr; int optlen; unsigned char * pp_ptr = NULL; struct rtable *rt = NULL; if (skb != NULL) { rt = skb_rtable(skb); optptr = (unsigned char *)&(ip_hdr(skb)[1]); } else optptr = opt->__data; iph = optptr - sizeof(struct iphdr); for (l = opt->optlen; l > 0; ) { switch (*optptr) { case IPOPT_END: for (optptr++, l--; l>0; optptr++, l--) { if (*optptr != IPOPT_END) { *optptr = IPOPT_END; opt->is_changed = 1; } } goto eol; case IPOPT_NOOP: l--; optptr++; continue; } optlen = optptr[1]; if (optlen<2 || optlen>l) { pp_ptr = optptr; goto error; } switch (*optptr) { case IPOPT_SSRR: case IPOPT_LSRR: if (optlen < 3) { pp_ptr = optptr + 1; goto error; } if (optptr[2] < 4) { pp_ptr = optptr + 2; goto error; } /* NB: cf RFC-1812 5.2.4.1 */ if (opt->srr) { pp_ptr = optptr; goto error; } if (!skb) { if (optptr[2] != 4 || optlen < 7 || ((optlen-3) & 3)) { pp_ptr = optptr + 1; goto error; } memcpy(&opt->faddr, &optptr[3], 4); if (optlen > 7) memmove(&optptr[3], &optptr[7], optlen-7); } opt->is_strictroute = (optptr[0] == IPOPT_SSRR); opt->srr = optptr - iph; break; case IPOPT_RR: if (opt->rr) { pp_ptr = optptr; goto error; } if (optlen < 3) { pp_ptr = optptr + 1; goto error; } if (optptr[2] < 4) { pp_ptr = optptr + 2; goto error; } if (optptr[2] <= optlen) { if (optptr[2]+3 > optlen) { pp_ptr = optptr + 2; goto error; } if (rt) { memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); opt->is_changed = 1; } optptr[2] += 4; opt->rr_needaddr = 1; } opt->rr = optptr - iph; break; case IPOPT_TIMESTAMP: if (opt->ts) { pp_ptr = optptr; goto error; } if (optlen < 4) { pp_ptr = optptr + 1; goto error; } if (optptr[2] < 5) { pp_ptr = optptr + 2; goto error; } if (optptr[2] <= optlen) { __be32 *timeptr = NULL; if (optptr[2]+3 > optptr[1]) { pp_ptr = optptr + 2; goto error; } switch (optptr[3]&0xF) { case IPOPT_TS_TSONLY: opt->ts = optptr - iph; if (skb) timeptr = (__be32*)&optptr[optptr[2]-1]; opt->ts_needtime = 1; optptr[2] += 4; break; case IPOPT_TS_TSANDADDR: if (optptr[2]+7 > optptr[1]) { pp_ptr = optptr + 2; goto error; } opt->ts = optptr - iph; if (rt) { memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); timeptr = (__be32*)&optptr[optptr[2]+3]; } opt->ts_needaddr = 1; opt->ts_needtime = 1; optptr[2] += 8; break; case IPOPT_TS_PRESPEC: if (optptr[2]+7 > optptr[1]) { pp_ptr = optptr + 2; goto error; } opt->ts = optptr - iph; { __be32 addr; memcpy(&addr, &optptr[optptr[2]-1], 4); if (inet_addr_type(net, addr) == RTN_UNICAST) break; if (skb) timeptr = (__be32*)&optptr[optptr[2]+3]; } opt->ts_needtime = 1; optptr[2] += 8; break; default: if (!skb && !capable(CAP_NET_RAW)) { pp_ptr = optptr + 3; goto error; } break; } if (timeptr) { struct timespec tv; __be32 midtime; getnstimeofday(&tv); midtime = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC); memcpy(timeptr, &midtime, sizeof(__be32)); opt->is_changed = 1; } } else { unsigned overflow = optptr[3]>>4; if (overflow == 15) { pp_ptr = optptr + 3; goto error; } opt->ts = optptr - iph; if (skb) { optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4); opt->is_changed = 1; } } break; case IPOPT_RA: if (optlen < 4) { pp_ptr = optptr + 1; goto error; } if (optptr[2] == 0 && optptr[3] == 0) opt->router_alert = optptr - iph; break; case IPOPT_CIPSO: if ((!skb && !capable(CAP_NET_RAW)) || opt->cipso) { pp_ptr = optptr; goto error; } opt->cipso = optptr - iph; if (cipso_v4_validate(skb, &optptr)) { pp_ptr = optptr; goto error; } break; case IPOPT_SEC: case IPOPT_SID: default: if (!skb && !capable(CAP_NET_RAW)) { pp_ptr = optptr; goto error; } break; } l -= optlen; optptr += optlen; } eol: if (!pp_ptr) return 0; error: if (skb) { icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl((pp_ptr-iph)<<24)); } return -EINVAL; } EXPORT_SYMBOL(ip_options_compile); /* * Undo all the changes done by ip_options_compile(). */ void ip_options_undo(struct ip_options * opt) { if (opt->srr) { unsigned char * optptr = opt->__data+opt->srr-sizeof(struct iphdr); memmove(optptr+7, optptr+3, optptr[1]-7); memcpy(optptr+3, &opt->faddr, 4); } if (opt->rr_needaddr) { unsigned char * optptr = opt->__data+opt->rr-sizeof(struct iphdr); optptr[2] -= 4; memset(&optptr[optptr[2]-1], 0, 4); } if (opt->ts) { unsigned char * optptr = opt->__data+opt->ts-sizeof(struct iphdr); if (opt->ts_needtime) { optptr[2] -= 4; memset(&optptr[optptr[2]-1], 0, 4); if ((optptr[3]&0xF) == IPOPT_TS_PRESPEC) optptr[2] -= 4; } if (opt->ts_needaddr) { optptr[2] -= 4; memset(&optptr[optptr[2]-1], 0, 4); } } } static struct ip_options *ip_options_get_alloc(const int optlen) { return kzalloc(sizeof(struct ip_options) + ((optlen + 3) & ~3), GFP_KERNEL); } static int ip_options_get_finish(struct net *net, struct ip_options **optp, struct ip_options *opt, int optlen) { while (optlen & 3) opt->__data[optlen++] = IPOPT_END; opt->optlen = optlen; if (optlen && ip_options_compile(net, opt, NULL)) { kfree(opt); return -EINVAL; } kfree(*optp); *optp = opt; return 0; } int ip_options_get_from_user(struct net *net, struct ip_options **optp, unsigned char __user *data, int optlen) { struct ip_options *opt = ip_options_get_alloc(optlen); if (!opt) return -ENOMEM; if (optlen && copy_from_user(opt->__data, data, optlen)) { kfree(opt); return -EFAULT; } return ip_options_get_finish(net, optp, opt, optlen); } int ip_options_get(struct net *net, struct ip_options **optp, unsigned char *data, int optlen) { struct ip_options *opt = ip_options_get_alloc(optlen); if (!opt) return -ENOMEM; if (optlen) memcpy(opt->__data, data, optlen); return ip_options_get_finish(net, optp, opt, optlen); } void ip_forward_options(struct sk_buff *skb) { struct ip_options * opt = &(IPCB(skb)->opt); unsigned char * optptr; struct rtable *rt = skb_rtable(skb); unsigned char *raw = skb_network_header(skb); if (opt->rr_needaddr) { optptr = (unsigned char *)raw + opt->rr; ip_rt_get_source(&optptr[optptr[2]-5], rt); opt->is_changed = 1; } if (opt->srr_is_hit) { int srrptr, srrspace; optptr = raw + opt->srr; for ( srrptr=optptr[2], srrspace = optptr[1]; srrptr <= srrspace; srrptr += 4 ) { if (srrptr + 3 > srrspace) break; if (memcmp(&rt->rt_dst, &optptr[srrptr-1], 4) == 0) break; } if (srrptr + 3 <= srrspace) { opt->is_changed = 1; ip_rt_get_source(&optptr[srrptr-1], rt); ip_hdr(skb)->daddr = rt->rt_dst; optptr[2] = srrptr+4; } else if (net_ratelimit()) printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n"); if (opt->ts_needaddr) { optptr = raw + opt->ts; ip_rt_get_source(&optptr[optptr[2]-9], rt); opt->is_changed = 1; } } if (opt->is_changed) { opt->is_changed = 0; ip_send_check(ip_hdr(skb)); } } int ip_options_rcv_srr(struct sk_buff *skb) { struct ip_options *opt = &(IPCB(skb)->opt); int srrspace, srrptr; __be32 nexthop; struct iphdr *iph = ip_hdr(skb); unsigned char *optptr = skb_network_header(skb) + opt->srr; struct rtable *rt = skb_rtable(skb); struct rtable *rt2; unsigned long orefdst; int err; if (!opt->srr || !rt) return 0; if (skb->pkt_type != PACKET_HOST) return -EINVAL; if (rt->rt_type == RTN_UNICAST) { if (!opt->is_strictroute) return 0; icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl(16<<24)); return -EINVAL; } if (rt->rt_type != RTN_LOCAL) return -EINVAL; for (srrptr=optptr[2], srrspace = optptr[1]; srrptr <= srrspace; srrptr += 4) { if (srrptr + 3 > srrspace) { icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl((opt->srr+2)<<24)); return -EINVAL; } memcpy(&nexthop, &optptr[srrptr-1], 4); orefdst = skb->_skb_refdst; skb_dst_set(skb, NULL); err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev); rt2 = skb_rtable(skb); if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) { skb_dst_drop(skb); skb->_skb_refdst = orefdst; return -EINVAL; } refdst_drop(orefdst); if (rt2->rt_type != RTN_LOCAL) break; /* Superfast 8) loopback forward */ memcpy(&iph->daddr, &optptr[srrptr-1], 4); opt->is_changed = 1; } if (srrptr <= srrspace) { opt->srr_is_hit = 1; opt->is_changed = 1; } return 0; } EXPORT_SYMBOL(ip_options_rcv_srr);
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * The options processing module for ip.c * * Authors: A.N.Kuznetsov * */ #include <linux/capability.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <asm/uaccess.h> #include <linux/skbuff.h> #include <linux/ip.h> #include <linux/icmp.h> #include <linux/netdevice.h> #include <linux/rtnetlink.h> #include <net/sock.h> #include <net/ip.h> #include <net/icmp.h> #include <net/route.h> #include <net/cipso_ipv4.h> /* * Write options to IP header, record destination address to * source route option, address of outgoing interface * (we should already know it, so that this function is allowed be * called only after routing decision) and timestamp, * if we originate this datagram. * * daddr is real destination address, next hop is recorded in IP header. * saddr is address of outgoing interface. */ void ip_options_build(struct sk_buff *skb, struct ip_options *opt, __be32 daddr, struct rtable *rt, int is_frag) { unsigned char *iph = skb_network_header(skb); memcpy(&(IPCB(skb)->opt), opt, sizeof(struct ip_options)); memcpy(iph+sizeof(struct iphdr), opt->__data, opt->optlen); opt = &(IPCB(skb)->opt); if (opt->srr) memcpy(iph+opt->srr+iph[opt->srr+1]-4, &daddr, 4); if (!is_frag) { if (opt->rr_needaddr) ip_rt_get_source(iph+opt->rr+iph[opt->rr+2]-5, rt); if (opt->ts_needaddr) ip_rt_get_source(iph+opt->ts+iph[opt->ts+2]-9, rt); if (opt->ts_needtime) { struct timespec tv; __be32 midtime; getnstimeofday(&tv); midtime = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC); memcpy(iph+opt->ts+iph[opt->ts+2]-5, &midtime, 4); } return; } if (opt->rr) { memset(iph+opt->rr, IPOPT_NOP, iph[opt->rr+1]); opt->rr = 0; opt->rr_needaddr = 0; } if (opt->ts) { memset(iph+opt->ts, IPOPT_NOP, iph[opt->ts+1]); opt->ts = 0; opt->ts_needaddr = opt->ts_needtime = 0; } } /* * Provided (sopt, skb) points to received options, * build in dopt compiled option set appropriate for answering. * i.e. invert SRR option, copy anothers, * and grab room in RR/TS options. * * NOTE: dopt cannot point to skb. */ int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb) { const struct ip_options *sopt; unsigned char *sptr, *dptr; int soffset, doffset; int optlen; __be32 daddr; memset(dopt, 0, sizeof(struct ip_options)); sopt = &(IPCB(skb)->opt); if (sopt->optlen == 0) return 0; sptr = skb_network_header(skb); dptr = dopt->__data; daddr = skb_rtable(skb)->rt_spec_dst; if (sopt->rr) { optlen = sptr[sopt->rr+1]; soffset = sptr[sopt->rr+2]; dopt->rr = dopt->optlen + sizeof(struct iphdr); memcpy(dptr, sptr+sopt->rr, optlen); if (sopt->rr_needaddr && soffset <= optlen) { if (soffset + 3 > optlen) return -EINVAL; dptr[2] = soffset + 4; dopt->rr_needaddr = 1; } dptr += optlen; dopt->optlen += optlen; } if (sopt->ts) { optlen = sptr[sopt->ts+1]; soffset = sptr[sopt->ts+2]; dopt->ts = dopt->optlen + sizeof(struct iphdr); memcpy(dptr, sptr+sopt->ts, optlen); if (soffset <= optlen) { if (sopt->ts_needaddr) { if (soffset + 3 > optlen) return -EINVAL; dopt->ts_needaddr = 1; soffset += 4; } if (sopt->ts_needtime) { if (soffset + 3 > optlen) return -EINVAL; if ((dptr[3]&0xF) != IPOPT_TS_PRESPEC) { dopt->ts_needtime = 1; soffset += 4; } else { dopt->ts_needtime = 0; if (soffset + 7 <= optlen) { __be32 addr; memcpy(&addr, dptr+soffset-1, 4); if (inet_addr_type(dev_net(skb_dst(skb)->dev), addr) != RTN_UNICAST) { dopt->ts_needtime = 1; soffset += 8; } } } } dptr[2] = soffset; } dptr += optlen; dopt->optlen += optlen; } if (sopt->srr) { unsigned char *start = sptr+sopt->srr; __be32 faddr; optlen = start[1]; soffset = start[2]; doffset = 0; if (soffset > optlen) soffset = optlen + 1; soffset -= 4; if (soffset > 3) { memcpy(&faddr, &start[soffset-1], 4); for (soffset-=4, doffset=4; soffset > 3; soffset-=4, doffset+=4) memcpy(&dptr[doffset-1], &start[soffset-1], 4); /* * RFC1812 requires to fix illegal source routes. */ if (memcmp(&ip_hdr(skb)->saddr, &start[soffset + 3], 4) == 0) doffset -= 4; } if (doffset > 3) { memcpy(&start[doffset-1], &daddr, 4); dopt->faddr = faddr; dptr[0] = start[0]; dptr[1] = doffset+3; dptr[2] = 4; dptr += doffset+3; dopt->srr = dopt->optlen + sizeof(struct iphdr); dopt->optlen += doffset+3; dopt->is_strictroute = sopt->is_strictroute; } } if (sopt->cipso) { optlen = sptr[sopt->cipso+1]; dopt->cipso = dopt->optlen+sizeof(struct iphdr); memcpy(dptr, sptr+sopt->cipso, optlen); dptr += optlen; dopt->optlen += optlen; } while (dopt->optlen & 3) { *dptr++ = IPOPT_END; dopt->optlen++; } return 0; } /* * Options "fragmenting", just fill options not * allowed in fragments with NOOPs. * Simple and stupid 8), but the most efficient way. */ void ip_options_fragment(struct sk_buff * skb) { unsigned char *optptr = skb_network_header(skb) + sizeof(struct iphdr); struct ip_options * opt = &(IPCB(skb)->opt); int l = opt->optlen; int optlen; while (l > 0) { switch (*optptr) { case IPOPT_END: return; case IPOPT_NOOP: l--; optptr++; continue; } optlen = optptr[1]; if (optlen<2 || optlen>l) return; if (!IPOPT_COPIED(*optptr)) memset(optptr, IPOPT_NOOP, optlen); l -= optlen; optptr += optlen; } opt->ts = 0; opt->rr = 0; opt->rr_needaddr = 0; opt->ts_needaddr = 0; opt->ts_needtime = 0; } /* * Verify options and fill pointers in struct options. * Caller should clear *opt, and set opt->data. * If opt == NULL, then skb->data should point to IP header. */ int ip_options_compile(struct net *net, struct ip_options * opt, struct sk_buff * skb) { int l; unsigned char * iph; unsigned char * optptr; int optlen; unsigned char * pp_ptr = NULL; struct rtable *rt = NULL; if (skb != NULL) { rt = skb_rtable(skb); optptr = (unsigned char *)&(ip_hdr(skb)[1]); } else optptr = opt->__data; iph = optptr - sizeof(struct iphdr); for (l = opt->optlen; l > 0; ) { switch (*optptr) { case IPOPT_END: for (optptr++, l--; l>0; optptr++, l--) { if (*optptr != IPOPT_END) { *optptr = IPOPT_END; opt->is_changed = 1; } } goto eol; case IPOPT_NOOP: l--; optptr++; continue; } optlen = optptr[1]; if (optlen<2 || optlen>l) { pp_ptr = optptr; goto error; } switch (*optptr) { case IPOPT_SSRR: case IPOPT_LSRR: if (optlen < 3) { pp_ptr = optptr + 1; goto error; } if (optptr[2] < 4) { pp_ptr = optptr + 2; goto error; } /* NB: cf RFC-1812 5.2.4.1 */ if (opt->srr) { pp_ptr = optptr; goto error; } if (!skb) { if (optptr[2] != 4 || optlen < 7 || ((optlen-3) & 3)) { pp_ptr = optptr + 1; goto error; } memcpy(&opt->faddr, &optptr[3], 4); if (optlen > 7) memmove(&optptr[3], &optptr[7], optlen-7); } opt->is_strictroute = (optptr[0] == IPOPT_SSRR); opt->srr = optptr - iph; break; case IPOPT_RR: if (opt->rr) { pp_ptr = optptr; goto error; } if (optlen < 3) { pp_ptr = optptr + 1; goto error; } if (optptr[2] < 4) { pp_ptr = optptr + 2; goto error; } if (optptr[2] <= optlen) { if (optptr[2]+3 > optlen) { pp_ptr = optptr + 2; goto error; } if (rt) { memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); opt->is_changed = 1; } optptr[2] += 4; opt->rr_needaddr = 1; } opt->rr = optptr - iph; break; case IPOPT_TIMESTAMP: if (opt->ts) { pp_ptr = optptr; goto error; } if (optlen < 4) { pp_ptr = optptr + 1; goto error; } if (optptr[2] < 5) { pp_ptr = optptr + 2; goto error; } if (optptr[2] <= optlen) { __be32 *timeptr = NULL; if (optptr[2]+3 > optptr[1]) { pp_ptr = optptr + 2; goto error; } switch (optptr[3]&0xF) { case IPOPT_TS_TSONLY: opt->ts = optptr - iph; if (skb) timeptr = (__be32*)&optptr[optptr[2]-1]; opt->ts_needtime = 1; optptr[2] += 4; break; case IPOPT_TS_TSANDADDR: if (optptr[2]+7 > optptr[1]) { pp_ptr = optptr + 2; goto error; } opt->ts = optptr - iph; if (rt) { memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); timeptr = (__be32*)&optptr[optptr[2]+3]; } opt->ts_needaddr = 1; opt->ts_needtime = 1; optptr[2] += 8; break; case IPOPT_TS_PRESPEC: if (optptr[2]+7 > optptr[1]) { pp_ptr = optptr + 2; goto error; } opt->ts = optptr - iph; { __be32 addr; memcpy(&addr, &optptr[optptr[2]-1], 4); if (inet_addr_type(net, addr) == RTN_UNICAST) break; if (skb) timeptr = (__be32*)&optptr[optptr[2]+3]; } opt->ts_needtime = 1; optptr[2] += 8; break; default: if (!skb && !capable(CAP_NET_RAW)) { pp_ptr = optptr + 3; goto error; } break; } if (timeptr) { struct timespec tv; __be32 midtime; getnstimeofday(&tv); midtime = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC); memcpy(timeptr, &midtime, sizeof(__be32)); opt->is_changed = 1; } } else { unsigned overflow = optptr[3]>>4; if (overflow == 15) { pp_ptr = optptr + 3; goto error; } opt->ts = optptr - iph; if (skb) { optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4); opt->is_changed = 1; } } break; case IPOPT_RA: if (optlen < 4) { pp_ptr = optptr + 1; goto error; } if (optptr[2] == 0 && optptr[3] == 0) opt->router_alert = optptr - iph; break; case IPOPT_CIPSO: if ((!skb && !capable(CAP_NET_RAW)) || opt->cipso) { pp_ptr = optptr; goto error; } opt->cipso = optptr - iph; if (cipso_v4_validate(skb, &optptr)) { pp_ptr = optptr; goto error; } break; case IPOPT_SEC: case IPOPT_SID: default: if (!skb && !capable(CAP_NET_RAW)) { pp_ptr = optptr; goto error; } break; } l -= optlen; optptr += optlen; } eol: if (!pp_ptr) return 0; error: if (skb) { icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl((pp_ptr-iph)<<24)); } return -EINVAL; } EXPORT_SYMBOL(ip_options_compile); /* * Undo all the changes done by ip_options_compile(). */ void ip_options_undo(struct ip_options * opt) { if (opt->srr) { unsigned char * optptr = opt->__data+opt->srr-sizeof(struct iphdr); memmove(optptr+7, optptr+3, optptr[1]-7); memcpy(optptr+3, &opt->faddr, 4); } if (opt->rr_needaddr) { unsigned char * optptr = opt->__data+opt->rr-sizeof(struct iphdr); optptr[2] -= 4; memset(&optptr[optptr[2]-1], 0, 4); } if (opt->ts) { unsigned char * optptr = opt->__data+opt->ts-sizeof(struct iphdr); if (opt->ts_needtime) { optptr[2] -= 4; memset(&optptr[optptr[2]-1], 0, 4); if ((optptr[3]&0xF) == IPOPT_TS_PRESPEC) optptr[2] -= 4; } if (opt->ts_needaddr) { optptr[2] -= 4; memset(&optptr[optptr[2]-1], 0, 4); } } } static struct ip_options_rcu *ip_options_get_alloc(const int optlen) { return kzalloc(sizeof(struct ip_options_rcu) + ((optlen + 3) & ~3), GFP_KERNEL); } static int ip_options_get_finish(struct net *net, struct ip_options_rcu **optp, struct ip_options_rcu *opt, int optlen) { while (optlen & 3) opt->opt.__data[optlen++] = IPOPT_END; opt->opt.optlen = optlen; if (optlen && ip_options_compile(net, &opt->opt, NULL)) { kfree(opt); return -EINVAL; } kfree(*optp); *optp = opt; return 0; } int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp, unsigned char __user *data, int optlen) { struct ip_options_rcu *opt = ip_options_get_alloc(optlen); if (!opt) return -ENOMEM; if (optlen && copy_from_user(opt->opt.__data, data, optlen)) { kfree(opt); return -EFAULT; } return ip_options_get_finish(net, optp, opt, optlen); } int ip_options_get(struct net *net, struct ip_options_rcu **optp, unsigned char *data, int optlen) { struct ip_options_rcu *opt = ip_options_get_alloc(optlen); if (!opt) return -ENOMEM; if (optlen) memcpy(opt->opt.__data, data, optlen); return ip_options_get_finish(net, optp, opt, optlen); } void ip_forward_options(struct sk_buff *skb) { struct ip_options * opt = &(IPCB(skb)->opt); unsigned char * optptr; struct rtable *rt = skb_rtable(skb); unsigned char *raw = skb_network_header(skb); if (opt->rr_needaddr) { optptr = (unsigned char *)raw + opt->rr; ip_rt_get_source(&optptr[optptr[2]-5], rt); opt->is_changed = 1; } if (opt->srr_is_hit) { int srrptr, srrspace; optptr = raw + opt->srr; for ( srrptr=optptr[2], srrspace = optptr[1]; srrptr <= srrspace; srrptr += 4 ) { if (srrptr + 3 > srrspace) break; if (memcmp(&rt->rt_dst, &optptr[srrptr-1], 4) == 0) break; } if (srrptr + 3 <= srrspace) { opt->is_changed = 1; ip_rt_get_source(&optptr[srrptr-1], rt); ip_hdr(skb)->daddr = rt->rt_dst; optptr[2] = srrptr+4; } else if (net_ratelimit()) printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n"); if (opt->ts_needaddr) { optptr = raw + opt->ts; ip_rt_get_source(&optptr[optptr[2]-9], rt); opt->is_changed = 1; } } if (opt->is_changed) { opt->is_changed = 0; ip_send_check(ip_hdr(skb)); } } int ip_options_rcv_srr(struct sk_buff *skb) { struct ip_options *opt = &(IPCB(skb)->opt); int srrspace, srrptr; __be32 nexthop; struct iphdr *iph = ip_hdr(skb); unsigned char *optptr = skb_network_header(skb) + opt->srr; struct rtable *rt = skb_rtable(skb); struct rtable *rt2; unsigned long orefdst; int err; if (!opt->srr || !rt) return 0; if (skb->pkt_type != PACKET_HOST) return -EINVAL; if (rt->rt_type == RTN_UNICAST) { if (!opt->is_strictroute) return 0; icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl(16<<24)); return -EINVAL; } if (rt->rt_type != RTN_LOCAL) return -EINVAL; for (srrptr=optptr[2], srrspace = optptr[1]; srrptr <= srrspace; srrptr += 4) { if (srrptr + 3 > srrspace) { icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl((opt->srr+2)<<24)); return -EINVAL; } memcpy(&nexthop, &optptr[srrptr-1], 4); orefdst = skb->_skb_refdst; skb_dst_set(skb, NULL); err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev); rt2 = skb_rtable(skb); if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) { skb_dst_drop(skb); skb->_skb_refdst = orefdst; return -EINVAL; } refdst_drop(orefdst); if (rt2->rt_type != RTN_LOCAL) break; /* Superfast 8) loopback forward */ memcpy(&iph->daddr, &optptr[srrptr-1], 4); opt->is_changed = 1; } if (srrptr <= srrspace) { opt->srr_is_hit = 1; opt->is_changed = 1; } return 0; } EXPORT_SYMBOL(ip_options_rcv_srr);
static struct ip_options *ip_options_get_alloc(const int optlen) { return kzalloc(sizeof(struct ip_options) + ((optlen + 3) & ~3), GFP_KERNEL); }
static struct ip_options_rcu *ip_options_get_alloc(const int optlen) { return kzalloc(sizeof(struct ip_options_rcu) + ((optlen + 3) & ~3), GFP_KERNEL); }
{'added': [(39, 'void ip_options_build(struct sk_buff *skb, struct ip_options *opt,'), (86, 'int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb)'), (88, '\tconst struct ip_options *sopt;'), (98, '\tif (sopt->optlen == 0)'), (158, '\t\tunsigned char *start = sptr+sopt->srr;'), (500, 'static struct ip_options_rcu *ip_options_get_alloc(const int optlen)'), (502, '\treturn kzalloc(sizeof(struct ip_options_rcu) + ((optlen + 3) & ~3),'), (506, 'static int ip_options_get_finish(struct net *net, struct ip_options_rcu **optp,'), (507, '\t\t\t\t struct ip_options_rcu *opt, int optlen)'), (510, '\t\topt->opt.__data[optlen++] = IPOPT_END;'), (511, '\topt->opt.optlen = optlen;'), (512, '\tif (optlen && ip_options_compile(net, &opt->opt, NULL)) {'), (521, 'int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,'), (524, '\tstruct ip_options_rcu *opt = ip_options_get_alloc(optlen);'), (528, '\tif (optlen && copy_from_user(opt->opt.__data, data, optlen)) {'), (535, 'int ip_options_get(struct net *net, struct ip_options_rcu **optp,'), (538, '\tstruct ip_options_rcu *opt = ip_options_get_alloc(optlen);'), (543, '\t\tmemcpy(opt->opt.__data, data, optlen);')], 'deleted': [(39, 'void ip_options_build(struct sk_buff * skb, struct ip_options * opt,'), (86, 'int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)'), (88, '\tstruct ip_options *sopt;'), (98, '\tif (sopt->optlen == 0) {'), (99, '\t\tdopt->optlen = 0;'), (101, '\t}'), (160, '\t\tunsigned char * start = sptr+sopt->srr;'), (502, 'static struct ip_options *ip_options_get_alloc(const int optlen)'), (504, '\treturn kzalloc(sizeof(struct ip_options) + ((optlen + 3) & ~3),'), (508, 'static int ip_options_get_finish(struct net *net, struct ip_options **optp,'), (509, '\t\t\t\t struct ip_options *opt, int optlen)'), (512, '\t\topt->__data[optlen++] = IPOPT_END;'), (513, '\topt->optlen = optlen;'), (514, '\tif (optlen && ip_options_compile(net, opt, NULL)) {'), (523, 'int ip_options_get_from_user(struct net *net, struct ip_options **optp,'), (526, '\tstruct ip_options *opt = ip_options_get_alloc(optlen);'), (530, '\tif (optlen && copy_from_user(opt->__data, data, optlen)) {'), (537, 'int ip_options_get(struct net *net, struct ip_options **optp,'), (540, '\tstruct ip_options *opt = ip_options_get_alloc(optlen);'), (545, '\t\tmemcpy(opt->__data, data, optlen);')]}
18
20
558
3,699
5
30
1
https://github.com/torvalds/linux
CVE-2012-3552
CWE-362
2,059
MemInStream.h
C++
rdr::MemInStream::MemInStream
/* Copyright (C) 2002-2005 RealVNC Ltd. All Rights Reserved. * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this software; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, * USA. */ // // rdr::MemInStream is an InStream which streams from a given memory buffer. // If the deleteWhenDone parameter is true then the buffer will be delete[]d in // the destructor. Note that it is delete[]d as a U8* - strictly speaking this // means it ought to be new[]ed as a U8* as well, but on most platforms this // doesn't matter. // #ifndef __RDR_MEMINSTREAM_H__ #define __RDR_MEMINSTREAM_H__ #include <rdr/InStream.h> #include <rdr/Exception.h> namespace rdr { class MemInStream : public InStream { public: MemInStream(const void* data, int len, bool deleteWhenDone_=false) : start((const U8*)data), deleteWhenDone(deleteWhenDone_) { ptr = start; end = start + len; } virtual ~MemInStream() { if (deleteWhenDone) delete [] start; } int pos() { return ptr - start; } void reposition(int pos) { ptr = start + pos; } private: int overrun(int itemSize, int nItems, bool wait) { throw EndOfStream(); } const U8* start; bool deleteWhenDone; }; } #endif
/* Copyright (C) 2002-2005 RealVNC Ltd. All Rights Reserved. * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this software; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, * USA. */ // // rdr::MemInStream is an InStream which streams from a given memory buffer. // If the deleteWhenDone parameter is true then the buffer will be delete[]d in // the destructor. Note that it is delete[]d as a U8* - strictly speaking this // means it ought to be new[]ed as a U8* as well, but on most platforms this // doesn't matter. // #ifndef __RDR_MEMINSTREAM_H__ #define __RDR_MEMINSTREAM_H__ #include <rdr/InStream.h> #include <rdr/Exception.h> namespace rdr { class MemInStream : public InStream { public: MemInStream(const void* data, size_t len, bool deleteWhenDone_=false) : start((const U8*)data), deleteWhenDone(deleteWhenDone_) { ptr = start; end = start + len; } virtual ~MemInStream() { if (deleteWhenDone) delete [] start; } size_t pos() { return ptr - start; } void reposition(size_t pos) { ptr = start + pos; } private: size_t overrun(size_t itemSize, size_t nItems, bool wait) { throw EndOfStream(); } const U8* start; bool deleteWhenDone; }; } #endif
MemInStream(const void* data, int len, bool deleteWhenDone_=false) : start((const U8*)data), deleteWhenDone(deleteWhenDone_) { ptr = start; end = start + len; }
MemInStream(const void* data, size_t len, bool deleteWhenDone_=false) : start((const U8*)data), deleteWhenDone(deleteWhenDone_) { ptr = start; end = start + len; }
{'added': [(39, ' MemInStream(const void* data, size_t len, bool deleteWhenDone_=false)'), (51, ' size_t pos() { return ptr - start; }'), (52, ' void reposition(size_t pos) { ptr = start + pos; }'), (56, ' size_t overrun(size_t itemSize, size_t nItems, bool wait) { throw EndOfStream(); }')], 'deleted': [(39, ' MemInStream(const void* data, int len, bool deleteWhenDone_=false)'), (51, ' int pos() { return ptr - start; }'), (52, ' void reposition(int pos) { ptr = start + pos; }'), (56, ' int overrun(int itemSize, int nItems, bool wait) { throw EndOfStream(); }')]}
4
4
23
129
6
42
1
https://github.com/CendioOssman/tigervnc
CVE-2019-15694
CWE-787
1,148
nego.c
C++
nego_process_negotiation_request
/** * FreeRDP: A Remote Desktop Protocol Implementation * RDP Protocol Security Negotiation * * Copyright 2011 Marc-Andre Moreau <marcandre.moreau@gmail.com> * Copyright 2014 Norbert Federa <norbert.federa@thincast.com> * Copyright 2015 Thincast Technologies GmbH * Copyright 2015 DI (FH) Martin Haimberger <martin.haimberger@thincast.com> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <winpr/crt.h> #include <freerdp/log.h> #include "tpkt.h" #include "nego.h" #include "transport.h" #define TAG FREERDP_TAG("core.nego") struct rdp_nego { UINT16 port; UINT32 flags; const char* hostname; char* cookie; BYTE* RoutingToken; DWORD RoutingTokenLength; BOOL SendPreconnectionPdu; UINT32 PreconnectionId; char* PreconnectionBlob; NEGO_STATE state; BOOL TcpConnected; BOOL SecurityConnected; UINT32 CookieMaxLength; BOOL sendNegoData; UINT32 SelectedProtocol; UINT32 RequestedProtocols; BOOL NegotiateSecurityLayer; BOOL EnabledProtocols[16]; BOOL RestrictedAdminModeRequired; BOOL GatewayEnabled; BOOL GatewayBypassLocal; rdpTransport* transport; }; static const char* nego_state_string(NEGO_STATE state) { static const char* const NEGO_STATE_STRINGS[] = { "NEGO_STATE_INITIAL", "NEGO_STATE_EXT", "NEGO_STATE_NLA", "NEGO_STATE_TLS", "NEGO_STATE_RDP", "NEGO_STATE_FAIL", "NEGO_STATE_FINAL", "NEGO_STATE_INVALID" }; if (state >= ARRAYSIZE(NEGO_STATE_STRINGS)) return NEGO_STATE_STRINGS[ARRAYSIZE(NEGO_STATE_STRINGS) - 1]; return NEGO_STATE_STRINGS[state]; } static const char* protocol_security_string(UINT32 security) { static const char* PROTOCOL_SECURITY_STRINGS[] = { "RDP", "TLS", "NLA", "UNK", "UNK", "UNK", "UNK", "UNK", "EXT", "UNK" }; if (security >= ARRAYSIZE(PROTOCOL_SECURITY_STRINGS)) return PROTOCOL_SECURITY_STRINGS[ARRAYSIZE(PROTOCOL_SECURITY_STRINGS) - 1]; return PROTOCOL_SECURITY_STRINGS[security]; } static BOOL nego_transport_connect(rdpNego* nego); static BOOL nego_transport_disconnect(rdpNego* nego); static BOOL nego_security_connect(rdpNego* nego); static BOOL nego_send_preconnection_pdu(rdpNego* nego); static BOOL nego_recv_response(rdpNego* nego); static void nego_send(rdpNego* nego); static void nego_process_negotiation_request(rdpNego* nego, wStream* s); static void nego_process_negotiation_response(rdpNego* nego, wStream* s); static void nego_process_negotiation_failure(rdpNego* nego, wStream* s); /** * Negotiate protocol security and connect. * @param nego * @return */ BOOL nego_connect(rdpNego* nego) { rdpSettings* settings = nego->transport->settings; if (nego->state == NEGO_STATE_INITIAL) { if (nego->EnabledProtocols[PROTOCOL_HYBRID_EX]) { nego->state = NEGO_STATE_EXT; } else if (nego->EnabledProtocols[PROTOCOL_HYBRID]) { nego->state = NEGO_STATE_NLA; } else if (nego->EnabledProtocols[PROTOCOL_SSL]) { nego->state = NEGO_STATE_TLS; } else if (nego->EnabledProtocols[PROTOCOL_RDP]) { nego->state = NEGO_STATE_RDP; } else { WLog_ERR(TAG, "No security protocol is enabled"); nego->state = NEGO_STATE_FAIL; return FALSE; } if (!nego->NegotiateSecurityLayer) { WLog_DBG(TAG, "Security Layer Negotiation is disabled"); /* attempt only the highest enabled protocol (see nego_attempt_*) */ nego->EnabledProtocols[PROTOCOL_HYBRID] = FALSE; nego->EnabledProtocols[PROTOCOL_SSL] = FALSE; nego->EnabledProtocols[PROTOCOL_RDP] = FALSE; nego->EnabledProtocols[PROTOCOL_HYBRID_EX] = FALSE; if (nego->state == NEGO_STATE_EXT) { nego->EnabledProtocols[PROTOCOL_HYBRID_EX] = TRUE; nego->EnabledProtocols[PROTOCOL_HYBRID] = TRUE; nego->SelectedProtocol = PROTOCOL_HYBRID_EX; } else if (nego->state == NEGO_STATE_NLA) { nego->EnabledProtocols[PROTOCOL_HYBRID] = TRUE; nego->SelectedProtocol = PROTOCOL_HYBRID; } else if (nego->state == NEGO_STATE_TLS) { nego->EnabledProtocols[PROTOCOL_SSL] = TRUE; nego->SelectedProtocol = PROTOCOL_SSL; } else if (nego->state == NEGO_STATE_RDP) { nego->EnabledProtocols[PROTOCOL_RDP] = TRUE; nego->SelectedProtocol = PROTOCOL_RDP; } } if (nego->SendPreconnectionPdu) { if (!nego_send_preconnection_pdu(nego)) { WLog_ERR(TAG, "Failed to send preconnection pdu"); nego->state = NEGO_STATE_FINAL; return FALSE; } } } if (!nego->NegotiateSecurityLayer) { nego->state = NEGO_STATE_FINAL; } else { do { WLog_DBG(TAG, "state: %s", nego_state_string(nego->state)); nego_send(nego); if (nego->state == NEGO_STATE_FAIL) { if (freerdp_get_last_error(nego->transport->context) == FREERDP_ERROR_SUCCESS) WLog_ERR(TAG, "Protocol Security Negotiation Failure"); nego->state = NEGO_STATE_FINAL; return FALSE; } } while (nego->state != NEGO_STATE_FINAL); } WLog_DBG(TAG, "Negotiated %s security", protocol_security_string(nego->SelectedProtocol)); /* update settings with negotiated protocol security */ settings->RequestedProtocols = nego->RequestedProtocols; settings->SelectedProtocol = nego->SelectedProtocol; settings->NegotiationFlags = nego->flags; if (nego->SelectedProtocol == PROTOCOL_RDP) { settings->UseRdpSecurityLayer = TRUE; if (!settings->EncryptionMethods) { /** * Advertise all supported encryption methods if the client * implementation did not set any security methods */ settings->EncryptionMethods = ENCRYPTION_METHOD_40BIT | ENCRYPTION_METHOD_56BIT | ENCRYPTION_METHOD_128BIT | ENCRYPTION_METHOD_FIPS; } } /* finally connect security layer (if not already done) */ if (!nego_security_connect(nego)) { WLog_DBG(TAG, "Failed to connect with %s security", protocol_security_string(nego->SelectedProtocol)); return FALSE; } return TRUE; } BOOL nego_disconnect(rdpNego* nego) { nego->state = NEGO_STATE_INITIAL; return nego_transport_disconnect(nego); } /* connect to selected security layer */ BOOL nego_security_connect(rdpNego* nego) { if (!nego->TcpConnected) { nego->SecurityConnected = FALSE; } else if (!nego->SecurityConnected) { if (nego->SelectedProtocol == PROTOCOL_HYBRID) { WLog_DBG(TAG, "nego_security_connect with PROTOCOL_HYBRID"); nego->SecurityConnected = transport_connect_nla(nego->transport); } else if (nego->SelectedProtocol == PROTOCOL_SSL) { WLog_DBG(TAG, "nego_security_connect with PROTOCOL_SSL"); nego->SecurityConnected = transport_connect_tls(nego->transport); } else if (nego->SelectedProtocol == PROTOCOL_RDP) { WLog_DBG(TAG, "nego_security_connect with PROTOCOL_RDP"); nego->SecurityConnected = transport_connect_rdp(nego->transport); } else { WLog_ERR(TAG, "cannot connect security layer because no protocol has been selected yet."); } } return nego->SecurityConnected; } /** * Connect TCP layer. * @param nego * @return */ static BOOL nego_tcp_connect(rdpNego* nego) { if (!nego->TcpConnected) { if (nego->GatewayEnabled) { if (nego->GatewayBypassLocal) { /* Attempt a direct connection first, and then fallback to using the gateway */ WLog_INFO(TAG, "Detecting if host can be reached locally. - This might take some time."); WLog_INFO(TAG, "To disable auto detection use /gateway-usage-method:direct"); transport_set_gateway_enabled(nego->transport, FALSE); nego->TcpConnected = transport_connect(nego->transport, nego->hostname, nego->port, 1); } if (!nego->TcpConnected) { transport_set_gateway_enabled(nego->transport, TRUE); nego->TcpConnected = transport_connect(nego->transport, nego->hostname, nego->port, 15); } } else { nego->TcpConnected = transport_connect(nego->transport, nego->hostname, nego->port, 15); } } return nego->TcpConnected; } /** * Connect TCP layer. For direct approach, connect security layer as well. * @param nego * @return */ BOOL nego_transport_connect(rdpNego* nego) { if (!nego_tcp_connect(nego)) return FALSE; if (nego->TcpConnected && !nego->NegotiateSecurityLayer) return nego_security_connect(nego); return nego->TcpConnected; } /** * Disconnect TCP layer. * @param nego * @return */ BOOL nego_transport_disconnect(rdpNego* nego) { if (nego->TcpConnected) transport_disconnect(nego->transport); nego->TcpConnected = FALSE; nego->SecurityConnected = FALSE; return TRUE; } /** * Send preconnection information if enabled. * @param nego * @return */ BOOL nego_send_preconnection_pdu(rdpNego* nego) { wStream* s; UINT32 cbSize; UINT16 cchPCB = 0; WCHAR* wszPCB = NULL; WLog_DBG(TAG, "Sending preconnection PDU"); if (!nego_tcp_connect(nego)) return FALSE; /* it's easier to always send the version 2 PDU, and it's just 2 bytes overhead */ cbSize = PRECONNECTION_PDU_V2_MIN_SIZE; if (nego->PreconnectionBlob) { cchPCB = (UINT16)ConvertToUnicode(CP_UTF8, 0, nego->PreconnectionBlob, -1, &wszPCB, 0); cchPCB += 1; /* zero-termination */ cbSize += cchPCB * 2; } s = Stream_New(NULL, cbSize); if (!s) { free(wszPCB); WLog_ERR(TAG, "Stream_New failed!"); return FALSE; } Stream_Write_UINT32(s, cbSize); /* cbSize */ Stream_Write_UINT32(s, 0); /* Flags */ Stream_Write_UINT32(s, PRECONNECTION_PDU_V2); /* Version */ Stream_Write_UINT32(s, nego->PreconnectionId); /* Id */ Stream_Write_UINT16(s, cchPCB); /* cchPCB */ if (wszPCB) { Stream_Write(s, wszPCB, cchPCB * 2); /* wszPCB */ free(wszPCB); } Stream_SealLength(s); if (transport_write(nego->transport, s) < 0) { Stream_Free(s, TRUE); return FALSE; } Stream_Free(s, TRUE); return TRUE; } /** * Attempt negotiating NLA + TLS extended security. * @param nego */ static void nego_attempt_ext(rdpNego* nego) { nego->RequestedProtocols = PROTOCOL_HYBRID | PROTOCOL_SSL | PROTOCOL_HYBRID_EX; WLog_DBG(TAG, "Attempting NLA extended security"); if (!nego_transport_connect(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_send_negotiation_request(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_recv_response(nego)) { nego->state = NEGO_STATE_FAIL; return; } WLog_DBG(TAG, "state: %s", nego_state_string(nego->state)); if (nego->state != NEGO_STATE_FINAL) { nego_transport_disconnect(nego); if (nego->EnabledProtocols[PROTOCOL_HYBRID]) nego->state = NEGO_STATE_NLA; else if (nego->EnabledProtocols[PROTOCOL_SSL]) nego->state = NEGO_STATE_TLS; else if (nego->EnabledProtocols[PROTOCOL_RDP]) nego->state = NEGO_STATE_RDP; else nego->state = NEGO_STATE_FAIL; } } /** * Attempt negotiating NLA + TLS security. * @param nego */ static void nego_attempt_nla(rdpNego* nego) { nego->RequestedProtocols = PROTOCOL_HYBRID | PROTOCOL_SSL; WLog_DBG(TAG, "Attempting NLA security"); if (!nego_transport_connect(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_send_negotiation_request(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_recv_response(nego)) { nego->state = NEGO_STATE_FAIL; return; } WLog_DBG(TAG, "state: %s", nego_state_string(nego->state)); if (nego->state != NEGO_STATE_FINAL) { nego_transport_disconnect(nego); if (nego->EnabledProtocols[PROTOCOL_SSL]) nego->state = NEGO_STATE_TLS; else if (nego->EnabledProtocols[PROTOCOL_RDP]) nego->state = NEGO_STATE_RDP; else nego->state = NEGO_STATE_FAIL; } } /** * Attempt negotiating TLS security. * @param nego */ static void nego_attempt_tls(rdpNego* nego) { nego->RequestedProtocols = PROTOCOL_SSL; WLog_DBG(TAG, "Attempting TLS security"); if (!nego_transport_connect(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_send_negotiation_request(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_recv_response(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (nego->state != NEGO_STATE_FINAL) { nego_transport_disconnect(nego); if (nego->EnabledProtocols[PROTOCOL_RDP]) nego->state = NEGO_STATE_RDP; else nego->state = NEGO_STATE_FAIL; } } /** * Attempt negotiating standard RDP security. * @param nego */ static void nego_attempt_rdp(rdpNego* nego) { nego->RequestedProtocols = PROTOCOL_RDP; WLog_DBG(TAG, "Attempting RDP security"); if (!nego_transport_connect(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_send_negotiation_request(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_recv_response(nego)) { nego->state = NEGO_STATE_FAIL; return; } } /** * Wait to receive a negotiation response * @param nego */ BOOL nego_recv_response(rdpNego* nego) { int status; wStream* s; s = Stream_New(NULL, 1024); if (!s) { WLog_ERR(TAG, "Stream_New failed!"); return FALSE; } status = transport_read_pdu(nego->transport, s); if (status < 0) { Stream_Free(s, TRUE); return FALSE; } status = nego_recv(nego->transport, s, nego); Stream_Free(s, TRUE); if (status < 0) return FALSE; return TRUE; } /** * Receive protocol security negotiation message.\n * @msdn{cc240501} * @param transport transport * @param s stream * @param extra nego pointer */ int nego_recv(rdpTransport* transport, wStream* s, void* extra) { BYTE li; BYTE type; UINT16 length; rdpNego* nego = (rdpNego*)extra; if (!tpkt_read_header(s, &length)) return -1; if (!tpdu_read_connection_confirm(s, &li, length)) return -1; if (li > 6) { /* rdpNegData (optional) */ Stream_Read_UINT8(s, type); /* Type */ switch (type) { case TYPE_RDP_NEG_RSP: nego_process_negotiation_response(nego, s); WLog_DBG(TAG, "selected_protocol: %" PRIu32 "", nego->SelectedProtocol); /* enhanced security selected ? */ if (nego->SelectedProtocol) { if ((nego->SelectedProtocol == PROTOCOL_HYBRID) && (!nego->EnabledProtocols[PROTOCOL_HYBRID])) { nego->state = NEGO_STATE_FAIL; } if ((nego->SelectedProtocol == PROTOCOL_SSL) && (!nego->EnabledProtocols[PROTOCOL_SSL])) { nego->state = NEGO_STATE_FAIL; } } else if (!nego->EnabledProtocols[PROTOCOL_RDP]) { nego->state = NEGO_STATE_FAIL; } break; case TYPE_RDP_NEG_FAILURE: nego_process_negotiation_failure(nego, s); break; } } else if (li == 6) { WLog_DBG(TAG, "no rdpNegData"); if (!nego->EnabledProtocols[PROTOCOL_RDP]) nego->state = NEGO_STATE_FAIL; else nego->state = NEGO_STATE_FINAL; } else { WLog_ERR(TAG, "invalid negotiation response"); nego->state = NEGO_STATE_FAIL; } if (!tpkt_ensure_stream_consumed(s, length)) return -1; return 0; } /** * Read optional routing token or cookie of X.224 Connection Request PDU. * @msdn{cc240470} * @param nego * @param s stream */ static BOOL nego_read_request_token_or_cookie(rdpNego* nego, wStream* s) { /* routingToken and cookie are optional and mutually exclusive! * * routingToken (variable): An optional and variable-length routing * token (used for load balancing) terminated by a 0x0D0A two-byte * sequence: (check [MSFT-SDLBTS] for details!) * Cookie:[space]msts=[ip address].[port].[reserved][\x0D\x0A] * * cookie (variable): An optional and variable-length ANSI character * string terminated by a 0x0D0A two-byte sequence: * Cookie:[space]mstshash=[ANSISTRING][\x0D\x0A] */ BYTE* str = NULL; UINT16 crlf = 0; size_t pos, len; BOOL result = FALSE; BOOL isToken = FALSE; size_t remain = Stream_GetRemainingLength(s); str = Stream_Pointer(s); pos = Stream_GetPosition(s); /* minimum length for token is 15 */ if (remain < 15) return TRUE; if (memcmp(Stream_Pointer(s), "Cookie: mstshash=", 17) != 0) { isToken = TRUE; } else { /* not a token, minimum length for cookie is 19 */ if (remain < 19) return TRUE; Stream_Seek(s, 17); } while ((remain = Stream_GetRemainingLength(s)) >= 2) { Stream_Read_UINT16(s, crlf); if (crlf == 0x0A0D) break; Stream_Rewind(s, 1); } if (crlf == 0x0A0D) { Stream_Rewind(s, 2); len = Stream_GetPosition(s) - pos; remain = Stream_GetRemainingLength(s); Stream_Write_UINT16(s, 0); if (strnlen((char*)str, len) == len) { if (isToken) result = nego_set_routing_token(nego, str, len); else result = nego_set_cookie(nego, (char*)str); } } if (!result) { Stream_SetPosition(s, pos); WLog_ERR(TAG, "invalid %s received", isToken ? "routing token" : "cookie"); } else { WLog_DBG(TAG, "received %s [%s]", isToken ? "routing token" : "cookie", str); } return result; } /** * Read protocol security negotiation request message.\n * @param nego * @param s stream */ BOOL nego_read_request(rdpNego* nego, wStream* s) { BYTE li; BYTE type; UINT16 length; if (!tpkt_read_header(s, &length)) return FALSE; if (!tpdu_read_connection_request(s, &li, length)) return FALSE; if (li != Stream_GetRemainingLength(s) + 6) { WLog_ERR(TAG, "Incorrect TPDU length indicator."); return FALSE; } if (!nego_read_request_token_or_cookie(nego, s)) { WLog_ERR(TAG, "Failed to parse routing token or cookie."); return FALSE; } if (Stream_GetRemainingLength(s) >= 8) { /* rdpNegData (optional) */ Stream_Read_UINT8(s, type); /* Type */ if (type != TYPE_RDP_NEG_REQ) { WLog_ERR(TAG, "Incorrect negotiation request type %" PRIu8 "", type); return FALSE; } nego_process_negotiation_request(nego, s); } return tpkt_ensure_stream_consumed(s, length); } /** * Send protocol security negotiation message. * @param nego */ void nego_send(rdpNego* nego) { if (nego->state == NEGO_STATE_EXT) nego_attempt_ext(nego); else if (nego->state == NEGO_STATE_NLA) nego_attempt_nla(nego); else if (nego->state == NEGO_STATE_TLS) nego_attempt_tls(nego); else if (nego->state == NEGO_STATE_RDP) nego_attempt_rdp(nego); else WLog_ERR(TAG, "invalid negotiation state for sending"); } /** * Send RDP Negotiation Request (RDP_NEG_REQ).\n * @msdn{cc240500}\n * @msdn{cc240470} * @param nego */ BOOL nego_send_negotiation_request(rdpNego* nego) { BOOL rc = FALSE; wStream* s; size_t length; size_t bm, em; BYTE flags = 0; size_t cookie_length; s = Stream_New(NULL, 512); if (!s) { WLog_ERR(TAG, "Stream_New failed!"); return FALSE; } length = TPDU_CONNECTION_REQUEST_LENGTH; bm = Stream_GetPosition(s); Stream_Seek(s, length); if (nego->RoutingToken) { Stream_Write(s, nego->RoutingToken, nego->RoutingTokenLength); /* Ensure Routing Token is correctly terminated - may already be present in string */ if ((nego->RoutingTokenLength > 2) && (nego->RoutingToken[nego->RoutingTokenLength - 2] == 0x0D) && (nego->RoutingToken[nego->RoutingTokenLength - 1] == 0x0A)) { WLog_DBG(TAG, "Routing token looks correctly terminated - use verbatim"); length += nego->RoutingTokenLength; } else { WLog_DBG(TAG, "Adding terminating CRLF to routing token"); Stream_Write_UINT8(s, 0x0D); /* CR */ Stream_Write_UINT8(s, 0x0A); /* LF */ length += nego->RoutingTokenLength + 2; } } else if (nego->cookie) { cookie_length = strlen(nego->cookie); if (cookie_length > nego->CookieMaxLength) cookie_length = nego->CookieMaxLength; Stream_Write(s, "Cookie: mstshash=", 17); Stream_Write(s, (BYTE*)nego->cookie, cookie_length); Stream_Write_UINT8(s, 0x0D); /* CR */ Stream_Write_UINT8(s, 0x0A); /* LF */ length += cookie_length + 19; } WLog_DBG(TAG, "RequestedProtocols: %" PRIu32 "", nego->RequestedProtocols); if ((nego->RequestedProtocols > PROTOCOL_RDP) || (nego->sendNegoData)) { /* RDP_NEG_DATA must be present for TLS and NLA */ if (nego->RestrictedAdminModeRequired) flags |= RESTRICTED_ADMIN_MODE_REQUIRED; Stream_Write_UINT8(s, TYPE_RDP_NEG_REQ); Stream_Write_UINT8(s, flags); Stream_Write_UINT16(s, 8); /* RDP_NEG_DATA length (8) */ Stream_Write_UINT32(s, nego->RequestedProtocols); /* requestedProtocols */ length += 8; } if (length > UINT16_MAX) goto fail; em = Stream_GetPosition(s); Stream_SetPosition(s, bm); tpkt_write_header(s, (UINT16)length); tpdu_write_connection_request(s, (UINT16)length - 5); Stream_SetPosition(s, em); Stream_SealLength(s); rc = (transport_write(nego->transport, s) >= 0); fail: Stream_Free(s, TRUE); return rc; } /** * Process Negotiation Request from Connection Request message. * @param nego * @param s */ void nego_process_negotiation_request(rdpNego* nego, wStream* s) { BYTE flags; UINT16 length; Stream_Read_UINT8(s, flags); Stream_Read_UINT16(s, length); Stream_Read_UINT32(s, nego->RequestedProtocols); WLog_DBG(TAG, "RDP_NEG_REQ: RequestedProtocol: 0x%08" PRIX32 "", nego->RequestedProtocols); nego->state = NEGO_STATE_FINAL; } /** * Process Negotiation Response from Connection Confirm message. * @param nego * @param s */ void nego_process_negotiation_response(rdpNego* nego, wStream* s) { UINT16 length; WLog_DBG(TAG, "RDP_NEG_RSP"); if (Stream_GetRemainingLength(s) < 7) { WLog_ERR(TAG, "Invalid RDP_NEG_RSP"); nego->state = NEGO_STATE_FAIL; return; } Stream_Read_UINT8(s, nego->flags); Stream_Read_UINT16(s, length); Stream_Read_UINT32(s, nego->SelectedProtocol); nego->state = NEGO_STATE_FINAL; } /** * Process Negotiation Failure from Connection Confirm message. * @param nego * @param s */ void nego_process_negotiation_failure(rdpNego* nego, wStream* s) { BYTE flags; UINT16 length; UINT32 failureCode; WLog_DBG(TAG, "RDP_NEG_FAILURE"); Stream_Read_UINT8(s, flags); Stream_Read_UINT16(s, length); Stream_Read_UINT32(s, failureCode); switch (failureCode) { case SSL_REQUIRED_BY_SERVER: WLog_WARN(TAG, "Error: SSL_REQUIRED_BY_SERVER"); break; case SSL_NOT_ALLOWED_BY_SERVER: WLog_WARN(TAG, "Error: SSL_NOT_ALLOWED_BY_SERVER"); nego->sendNegoData = TRUE; break; case SSL_CERT_NOT_ON_SERVER: WLog_ERR(TAG, "Error: SSL_CERT_NOT_ON_SERVER"); nego->sendNegoData = TRUE; break; case INCONSISTENT_FLAGS: WLog_ERR(TAG, "Error: INCONSISTENT_FLAGS"); break; case HYBRID_REQUIRED_BY_SERVER: WLog_WARN(TAG, "Error: HYBRID_REQUIRED_BY_SERVER"); break; default: WLog_ERR(TAG, "Error: Unknown protocol security error %" PRIu32 "", failureCode); break; } nego->state = NEGO_STATE_FAIL; } /** * Send RDP Negotiation Response (RDP_NEG_RSP).\n * @param nego */ BOOL nego_send_negotiation_response(rdpNego* nego) { UINT16 length; size_t bm, em; BOOL status; wStream* s; BYTE flags; rdpSettings* settings; status = TRUE; settings = nego->transport->settings; s = Stream_New(NULL, 512); if (!s) { WLog_ERR(TAG, "Stream_New failed!"); return FALSE; } length = TPDU_CONNECTION_CONFIRM_LENGTH; bm = Stream_GetPosition(s); Stream_Seek(s, length); if (nego->SelectedProtocol & PROTOCOL_FAILED_NEGO) { UINT32 errorCode = (nego->SelectedProtocol & ~PROTOCOL_FAILED_NEGO); flags = 0; Stream_Write_UINT8(s, TYPE_RDP_NEG_FAILURE); Stream_Write_UINT8(s, flags); /* flags */ Stream_Write_UINT16(s, 8); /* RDP_NEG_DATA length (8) */ Stream_Write_UINT32(s, errorCode); length += 8; status = FALSE; } else { flags = EXTENDED_CLIENT_DATA_SUPPORTED; if (settings->SupportGraphicsPipeline) flags |= DYNVC_GFX_PROTOCOL_SUPPORTED; /* RDP_NEG_DATA must be present for TLS, NLA, and RDP */ Stream_Write_UINT8(s, TYPE_RDP_NEG_RSP); Stream_Write_UINT8(s, flags); /* flags */ Stream_Write_UINT16(s, 8); /* RDP_NEG_DATA length (8) */ Stream_Write_UINT32(s, nego->SelectedProtocol); /* selectedProtocol */ length += 8; } em = Stream_GetPosition(s); Stream_SetPosition(s, bm); tpkt_write_header(s, length); tpdu_write_connection_confirm(s, length - 5); Stream_SetPosition(s, em); Stream_SealLength(s); if (transport_write(nego->transport, s) < 0) { Stream_Free(s, TRUE); return FALSE; } Stream_Free(s, TRUE); if (status) { /* update settings with negotiated protocol security */ settings->RequestedProtocols = nego->RequestedProtocols; settings->SelectedProtocol = nego->SelectedProtocol; if (settings->SelectedProtocol == PROTOCOL_RDP) { settings->TlsSecurity = FALSE; settings->NlaSecurity = FALSE; settings->RdpSecurity = TRUE; settings->UseRdpSecurityLayer = TRUE; if (settings->EncryptionLevel == ENCRYPTION_LEVEL_NONE) { /** * If the server implementation did not explicitely set a * encryption level we default to client compatible */ settings->EncryptionLevel = ENCRYPTION_LEVEL_CLIENT_COMPATIBLE; } if (settings->LocalConnection) { /** * Note: This hack was firstly introduced in commit 95f5e115 to * disable the unnecessary encryption with peers connecting to * 127.0.0.1 or local unix sockets. * This also affects connections via port tunnels! (e.g. ssh -L) */ WLog_INFO(TAG, "Turning off encryption for local peer with standard rdp security"); settings->UseRdpSecurityLayer = FALSE; settings->EncryptionLevel = ENCRYPTION_LEVEL_NONE; } if (!settings->RdpServerRsaKey && !settings->RdpKeyFile && !settings->RdpKeyContent) { WLog_ERR(TAG, "Missing server certificate"); return FALSE; } } else if (settings->SelectedProtocol == PROTOCOL_SSL) { settings->TlsSecurity = TRUE; settings->NlaSecurity = FALSE; settings->RdpSecurity = FALSE; settings->UseRdpSecurityLayer = FALSE; settings->EncryptionLevel = ENCRYPTION_LEVEL_NONE; } else if (settings->SelectedProtocol == PROTOCOL_HYBRID) { settings->TlsSecurity = TRUE; settings->NlaSecurity = TRUE; settings->RdpSecurity = FALSE; settings->UseRdpSecurityLayer = FALSE; settings->EncryptionLevel = ENCRYPTION_LEVEL_NONE; } } return status; } /** * Initialize NEGO state machine. * @param nego */ void nego_init(rdpNego* nego) { nego->state = NEGO_STATE_INITIAL; nego->RequestedProtocols = PROTOCOL_RDP; nego->CookieMaxLength = DEFAULT_COOKIE_MAX_LENGTH; nego->sendNegoData = FALSE; nego->flags = 0; } /** * Create a new NEGO state machine instance. * @param transport * @return */ rdpNego* nego_new(rdpTransport* transport) { rdpNego* nego = (rdpNego*)calloc(1, sizeof(rdpNego)); if (!nego) return NULL; nego->transport = transport; nego_init(nego); return nego; } /** * Free NEGO state machine. * @param nego */ void nego_free(rdpNego* nego) { if (nego) { free(nego->RoutingToken); free(nego->cookie); free(nego); } } /** * Set target hostname and port. * @param nego * @param hostname * @param port */ BOOL nego_set_target(rdpNego* nego, const char* hostname, UINT16 port) { if (!nego || !hostname) return FALSE; nego->hostname = hostname; nego->port = port; return TRUE; } /** * Enable security layer negotiation. * @param nego pointer to the negotiation structure * @param enable_rdp whether to enable security layer negotiation (TRUE for enabled, FALSE for * disabled) */ void nego_set_negotiation_enabled(rdpNego* nego, BOOL NegotiateSecurityLayer) { WLog_DBG(TAG, "Enabling security layer negotiation: %s", NegotiateSecurityLayer ? "TRUE" : "FALSE"); nego->NegotiateSecurityLayer = NegotiateSecurityLayer; } /** * Enable restricted admin mode. * @param nego pointer to the negotiation structure * @param enable_restricted whether to enable security layer negotiation (TRUE for enabled, FALSE * for disabled) */ void nego_set_restricted_admin_mode_required(rdpNego* nego, BOOL RestrictedAdminModeRequired) { WLog_DBG(TAG, "Enabling restricted admin mode: %s", RestrictedAdminModeRequired ? "TRUE" : "FALSE"); nego->RestrictedAdminModeRequired = RestrictedAdminModeRequired; } void nego_set_gateway_enabled(rdpNego* nego, BOOL GatewayEnabled) { nego->GatewayEnabled = GatewayEnabled; } void nego_set_gateway_bypass_local(rdpNego* nego, BOOL GatewayBypassLocal) { nego->GatewayBypassLocal = GatewayBypassLocal; } /** * Enable RDP security protocol. * @param nego pointer to the negotiation structure * @param enable_rdp whether to enable normal RDP protocol (TRUE for enabled, FALSE for disabled) */ void nego_enable_rdp(rdpNego* nego, BOOL enable_rdp) { WLog_DBG(TAG, "Enabling RDP security: %s", enable_rdp ? "TRUE" : "FALSE"); nego->EnabledProtocols[PROTOCOL_RDP] = enable_rdp; } /** * Enable TLS security protocol. * @param nego pointer to the negotiation structure * @param enable_tls whether to enable TLS + RDP protocol (TRUE for enabled, FALSE for disabled) */ void nego_enable_tls(rdpNego* nego, BOOL enable_tls) { WLog_DBG(TAG, "Enabling TLS security: %s", enable_tls ? "TRUE" : "FALSE"); nego->EnabledProtocols[PROTOCOL_SSL] = enable_tls; } /** * Enable NLA security protocol. * @param nego pointer to the negotiation structure * @param enable_nla whether to enable network level authentication protocol (TRUE for enabled, * FALSE for disabled) */ void nego_enable_nla(rdpNego* nego, BOOL enable_nla) { WLog_DBG(TAG, "Enabling NLA security: %s", enable_nla ? "TRUE" : "FALSE"); nego->EnabledProtocols[PROTOCOL_HYBRID] = enable_nla; } /** * Enable NLA extended security protocol. * @param nego pointer to the negotiation structure * @param enable_ext whether to enable network level authentication extended protocol (TRUE for * enabled, FALSE for disabled) */ void nego_enable_ext(rdpNego* nego, BOOL enable_ext) { WLog_DBG(TAG, "Enabling NLA extended security: %s", enable_ext ? "TRUE" : "FALSE"); nego->EnabledProtocols[PROTOCOL_HYBRID_EX] = enable_ext; } /** * Set routing token. * @param nego * @param RoutingToken * @param RoutingTokenLength */ BOOL nego_set_routing_token(rdpNego* nego, BYTE* RoutingToken, DWORD RoutingTokenLength) { if (RoutingTokenLength == 0) return FALSE; free(nego->RoutingToken); nego->RoutingTokenLength = RoutingTokenLength; nego->RoutingToken = (BYTE*)malloc(nego->RoutingTokenLength); if (!nego->RoutingToken) return FALSE; CopyMemory(nego->RoutingToken, RoutingToken, nego->RoutingTokenLength); return TRUE; } /** * Set cookie. * @param nego * @param cookie */ BOOL nego_set_cookie(rdpNego* nego, char* cookie) { if (nego->cookie) { free(nego->cookie); nego->cookie = NULL; } if (!cookie) return TRUE; nego->cookie = _strdup(cookie); if (!nego->cookie) return FALSE; return TRUE; } /** * Set cookie maximum length * @param nego * @param CookieMaxLength */ void nego_set_cookie_max_length(rdpNego* nego, UINT32 CookieMaxLength) { nego->CookieMaxLength = CookieMaxLength; } /** * Enable / disable preconnection PDU. * @param nego * @param send_pcpdu */ void nego_set_send_preconnection_pdu(rdpNego* nego, BOOL SendPreconnectionPdu) { nego->SendPreconnectionPdu = SendPreconnectionPdu; } /** * Set preconnection id. * @param nego * @param id */ void nego_set_preconnection_id(rdpNego* nego, UINT32 PreconnectionId) { nego->PreconnectionId = PreconnectionId; } /** * Set preconnection blob. * @param nego * @param blob */ void nego_set_preconnection_blob(rdpNego* nego, char* PreconnectionBlob) { nego->PreconnectionBlob = PreconnectionBlob; } UINT32 nego_get_selected_protocol(rdpNego* nego) { if (!nego) return 0; return nego->SelectedProtocol; } BOOL nego_set_selected_protocol(rdpNego* nego, UINT32 SelectedProtocol) { if (!nego) return FALSE; nego->SelectedProtocol = SelectedProtocol; return TRUE; } UINT32 nego_get_requested_protocols(rdpNego* nego) { if (!nego) return 0; return nego->RequestedProtocols; } BOOL nego_set_requested_protocols(rdpNego* nego, UINT32 RequestedProtocols) { if (!nego) return FALSE; nego->RequestedProtocols = RequestedProtocols; return TRUE; } NEGO_STATE nego_get_state(rdpNego* nego) { if (!nego) return NEGO_STATE_FAIL; return nego->state; } BOOL nego_set_state(rdpNego* nego, NEGO_STATE state) { if (!nego) return FALSE; nego->state = state; return TRUE; } SEC_WINNT_AUTH_IDENTITY* nego_get_identity(rdpNego* nego) { if (!nego) return NULL; return nla_get_identity(nego->transport->nla); } void nego_free_nla(rdpNego* nego) { if (!nego || !nego->transport) return; nla_free(nego->transport->nla); nego->transport->nla = NULL; } const BYTE* nego_get_routing_token(rdpNego* nego, DWORD* RoutingTokenLength) { if (!nego) return NULL; if (RoutingTokenLength) *RoutingTokenLength = nego->RoutingTokenLength; return nego->RoutingToken; }
/** * FreeRDP: A Remote Desktop Protocol Implementation * RDP Protocol Security Negotiation * * Copyright 2011 Marc-Andre Moreau <marcandre.moreau@gmail.com> * Copyright 2014 Norbert Federa <norbert.federa@thincast.com> * Copyright 2015 Thincast Technologies GmbH * Copyright 2015 DI (FH) Martin Haimberger <martin.haimberger@thincast.com> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <winpr/crt.h> #include <freerdp/log.h> #include "tpkt.h" #include "nego.h" #include "transport.h" #define TAG FREERDP_TAG("core.nego") struct rdp_nego { UINT16 port; UINT32 flags; const char* hostname; char* cookie; BYTE* RoutingToken; DWORD RoutingTokenLength; BOOL SendPreconnectionPdu; UINT32 PreconnectionId; char* PreconnectionBlob; NEGO_STATE state; BOOL TcpConnected; BOOL SecurityConnected; UINT32 CookieMaxLength; BOOL sendNegoData; UINT32 SelectedProtocol; UINT32 RequestedProtocols; BOOL NegotiateSecurityLayer; BOOL EnabledProtocols[16]; BOOL RestrictedAdminModeRequired; BOOL GatewayEnabled; BOOL GatewayBypassLocal; rdpTransport* transport; }; static const char* nego_state_string(NEGO_STATE state) { static const char* const NEGO_STATE_STRINGS[] = { "NEGO_STATE_INITIAL", "NEGO_STATE_EXT", "NEGO_STATE_NLA", "NEGO_STATE_TLS", "NEGO_STATE_RDP", "NEGO_STATE_FAIL", "NEGO_STATE_FINAL", "NEGO_STATE_INVALID" }; if (state >= ARRAYSIZE(NEGO_STATE_STRINGS)) return NEGO_STATE_STRINGS[ARRAYSIZE(NEGO_STATE_STRINGS) - 1]; return NEGO_STATE_STRINGS[state]; } static const char* protocol_security_string(UINT32 security) { static const char* PROTOCOL_SECURITY_STRINGS[] = { "RDP", "TLS", "NLA", "UNK", "UNK", "UNK", "UNK", "UNK", "EXT", "UNK" }; if (security >= ARRAYSIZE(PROTOCOL_SECURITY_STRINGS)) return PROTOCOL_SECURITY_STRINGS[ARRAYSIZE(PROTOCOL_SECURITY_STRINGS) - 1]; return PROTOCOL_SECURITY_STRINGS[security]; } static BOOL nego_transport_connect(rdpNego* nego); static BOOL nego_transport_disconnect(rdpNego* nego); static BOOL nego_security_connect(rdpNego* nego); static BOOL nego_send_preconnection_pdu(rdpNego* nego); static BOOL nego_recv_response(rdpNego* nego); static void nego_send(rdpNego* nego); static BOOL nego_process_negotiation_request(rdpNego* nego, wStream* s); static BOOL nego_process_negotiation_response(rdpNego* nego, wStream* s); static BOOL nego_process_negotiation_failure(rdpNego* nego, wStream* s); /** * Negotiate protocol security and connect. * @param nego * @return */ BOOL nego_connect(rdpNego* nego) { rdpSettings* settings = nego->transport->settings; if (nego->state == NEGO_STATE_INITIAL) { if (nego->EnabledProtocols[PROTOCOL_HYBRID_EX]) { nego->state = NEGO_STATE_EXT; } else if (nego->EnabledProtocols[PROTOCOL_HYBRID]) { nego->state = NEGO_STATE_NLA; } else if (nego->EnabledProtocols[PROTOCOL_SSL]) { nego->state = NEGO_STATE_TLS; } else if (nego->EnabledProtocols[PROTOCOL_RDP]) { nego->state = NEGO_STATE_RDP; } else { WLog_ERR(TAG, "No security protocol is enabled"); nego->state = NEGO_STATE_FAIL; return FALSE; } if (!nego->NegotiateSecurityLayer) { WLog_DBG(TAG, "Security Layer Negotiation is disabled"); /* attempt only the highest enabled protocol (see nego_attempt_*) */ nego->EnabledProtocols[PROTOCOL_HYBRID] = FALSE; nego->EnabledProtocols[PROTOCOL_SSL] = FALSE; nego->EnabledProtocols[PROTOCOL_RDP] = FALSE; nego->EnabledProtocols[PROTOCOL_HYBRID_EX] = FALSE; if (nego->state == NEGO_STATE_EXT) { nego->EnabledProtocols[PROTOCOL_HYBRID_EX] = TRUE; nego->EnabledProtocols[PROTOCOL_HYBRID] = TRUE; nego->SelectedProtocol = PROTOCOL_HYBRID_EX; } else if (nego->state == NEGO_STATE_NLA) { nego->EnabledProtocols[PROTOCOL_HYBRID] = TRUE; nego->SelectedProtocol = PROTOCOL_HYBRID; } else if (nego->state == NEGO_STATE_TLS) { nego->EnabledProtocols[PROTOCOL_SSL] = TRUE; nego->SelectedProtocol = PROTOCOL_SSL; } else if (nego->state == NEGO_STATE_RDP) { nego->EnabledProtocols[PROTOCOL_RDP] = TRUE; nego->SelectedProtocol = PROTOCOL_RDP; } } if (nego->SendPreconnectionPdu) { if (!nego_send_preconnection_pdu(nego)) { WLog_ERR(TAG, "Failed to send preconnection pdu"); nego->state = NEGO_STATE_FINAL; return FALSE; } } } if (!nego->NegotiateSecurityLayer) { nego->state = NEGO_STATE_FINAL; } else { do { WLog_DBG(TAG, "state: %s", nego_state_string(nego->state)); nego_send(nego); if (nego->state == NEGO_STATE_FAIL) { if (freerdp_get_last_error(nego->transport->context) == FREERDP_ERROR_SUCCESS) WLog_ERR(TAG, "Protocol Security Negotiation Failure"); nego->state = NEGO_STATE_FINAL; return FALSE; } } while (nego->state != NEGO_STATE_FINAL); } WLog_DBG(TAG, "Negotiated %s security", protocol_security_string(nego->SelectedProtocol)); /* update settings with negotiated protocol security */ settings->RequestedProtocols = nego->RequestedProtocols; settings->SelectedProtocol = nego->SelectedProtocol; settings->NegotiationFlags = nego->flags; if (nego->SelectedProtocol == PROTOCOL_RDP) { settings->UseRdpSecurityLayer = TRUE; if (!settings->EncryptionMethods) { /** * Advertise all supported encryption methods if the client * implementation did not set any security methods */ settings->EncryptionMethods = ENCRYPTION_METHOD_40BIT | ENCRYPTION_METHOD_56BIT | ENCRYPTION_METHOD_128BIT | ENCRYPTION_METHOD_FIPS; } } /* finally connect security layer (if not already done) */ if (!nego_security_connect(nego)) { WLog_DBG(TAG, "Failed to connect with %s security", protocol_security_string(nego->SelectedProtocol)); return FALSE; } return TRUE; } BOOL nego_disconnect(rdpNego* nego) { nego->state = NEGO_STATE_INITIAL; return nego_transport_disconnect(nego); } /* connect to selected security layer */ BOOL nego_security_connect(rdpNego* nego) { if (!nego->TcpConnected) { nego->SecurityConnected = FALSE; } else if (!nego->SecurityConnected) { if (nego->SelectedProtocol == PROTOCOL_HYBRID) { WLog_DBG(TAG, "nego_security_connect with PROTOCOL_HYBRID"); nego->SecurityConnected = transport_connect_nla(nego->transport); } else if (nego->SelectedProtocol == PROTOCOL_SSL) { WLog_DBG(TAG, "nego_security_connect with PROTOCOL_SSL"); nego->SecurityConnected = transport_connect_tls(nego->transport); } else if (nego->SelectedProtocol == PROTOCOL_RDP) { WLog_DBG(TAG, "nego_security_connect with PROTOCOL_RDP"); nego->SecurityConnected = transport_connect_rdp(nego->transport); } else { WLog_ERR(TAG, "cannot connect security layer because no protocol has been selected yet."); } } return nego->SecurityConnected; } /** * Connect TCP layer. * @param nego * @return */ static BOOL nego_tcp_connect(rdpNego* nego) { if (!nego->TcpConnected) { if (nego->GatewayEnabled) { if (nego->GatewayBypassLocal) { /* Attempt a direct connection first, and then fallback to using the gateway */ WLog_INFO(TAG, "Detecting if host can be reached locally. - This might take some time."); WLog_INFO(TAG, "To disable auto detection use /gateway-usage-method:direct"); transport_set_gateway_enabled(nego->transport, FALSE); nego->TcpConnected = transport_connect(nego->transport, nego->hostname, nego->port, 1); } if (!nego->TcpConnected) { transport_set_gateway_enabled(nego->transport, TRUE); nego->TcpConnected = transport_connect(nego->transport, nego->hostname, nego->port, 15); } } else { nego->TcpConnected = transport_connect(nego->transport, nego->hostname, nego->port, 15); } } return nego->TcpConnected; } /** * Connect TCP layer. For direct approach, connect security layer as well. * @param nego * @return */ BOOL nego_transport_connect(rdpNego* nego) { if (!nego_tcp_connect(nego)) return FALSE; if (nego->TcpConnected && !nego->NegotiateSecurityLayer) return nego_security_connect(nego); return nego->TcpConnected; } /** * Disconnect TCP layer. * @param nego * @return */ BOOL nego_transport_disconnect(rdpNego* nego) { if (nego->TcpConnected) transport_disconnect(nego->transport); nego->TcpConnected = FALSE; nego->SecurityConnected = FALSE; return TRUE; } /** * Send preconnection information if enabled. * @param nego * @return */ BOOL nego_send_preconnection_pdu(rdpNego* nego) { wStream* s; UINT32 cbSize; UINT16 cchPCB = 0; WCHAR* wszPCB = NULL; WLog_DBG(TAG, "Sending preconnection PDU"); if (!nego_tcp_connect(nego)) return FALSE; /* it's easier to always send the version 2 PDU, and it's just 2 bytes overhead */ cbSize = PRECONNECTION_PDU_V2_MIN_SIZE; if (nego->PreconnectionBlob) { cchPCB = (UINT16)ConvertToUnicode(CP_UTF8, 0, nego->PreconnectionBlob, -1, &wszPCB, 0); cchPCB += 1; /* zero-termination */ cbSize += cchPCB * 2; } s = Stream_New(NULL, cbSize); if (!s) { free(wszPCB); WLog_ERR(TAG, "Stream_New failed!"); return FALSE; } Stream_Write_UINT32(s, cbSize); /* cbSize */ Stream_Write_UINT32(s, 0); /* Flags */ Stream_Write_UINT32(s, PRECONNECTION_PDU_V2); /* Version */ Stream_Write_UINT32(s, nego->PreconnectionId); /* Id */ Stream_Write_UINT16(s, cchPCB); /* cchPCB */ if (wszPCB) { Stream_Write(s, wszPCB, cchPCB * 2); /* wszPCB */ free(wszPCB); } Stream_SealLength(s); if (transport_write(nego->transport, s) < 0) { Stream_Free(s, TRUE); return FALSE; } Stream_Free(s, TRUE); return TRUE; } /** * Attempt negotiating NLA + TLS extended security. * @param nego */ static void nego_attempt_ext(rdpNego* nego) { nego->RequestedProtocols = PROTOCOL_HYBRID | PROTOCOL_SSL | PROTOCOL_HYBRID_EX; WLog_DBG(TAG, "Attempting NLA extended security"); if (!nego_transport_connect(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_send_negotiation_request(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_recv_response(nego)) { nego->state = NEGO_STATE_FAIL; return; } WLog_DBG(TAG, "state: %s", nego_state_string(nego->state)); if (nego->state != NEGO_STATE_FINAL) { nego_transport_disconnect(nego); if (nego->EnabledProtocols[PROTOCOL_HYBRID]) nego->state = NEGO_STATE_NLA; else if (nego->EnabledProtocols[PROTOCOL_SSL]) nego->state = NEGO_STATE_TLS; else if (nego->EnabledProtocols[PROTOCOL_RDP]) nego->state = NEGO_STATE_RDP; else nego->state = NEGO_STATE_FAIL; } } /** * Attempt negotiating NLA + TLS security. * @param nego */ static void nego_attempt_nla(rdpNego* nego) { nego->RequestedProtocols = PROTOCOL_HYBRID | PROTOCOL_SSL; WLog_DBG(TAG, "Attempting NLA security"); if (!nego_transport_connect(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_send_negotiation_request(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_recv_response(nego)) { nego->state = NEGO_STATE_FAIL; return; } WLog_DBG(TAG, "state: %s", nego_state_string(nego->state)); if (nego->state != NEGO_STATE_FINAL) { nego_transport_disconnect(nego); if (nego->EnabledProtocols[PROTOCOL_SSL]) nego->state = NEGO_STATE_TLS; else if (nego->EnabledProtocols[PROTOCOL_RDP]) nego->state = NEGO_STATE_RDP; else nego->state = NEGO_STATE_FAIL; } } /** * Attempt negotiating TLS security. * @param nego */ static void nego_attempt_tls(rdpNego* nego) { nego->RequestedProtocols = PROTOCOL_SSL; WLog_DBG(TAG, "Attempting TLS security"); if (!nego_transport_connect(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_send_negotiation_request(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_recv_response(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (nego->state != NEGO_STATE_FINAL) { nego_transport_disconnect(nego); if (nego->EnabledProtocols[PROTOCOL_RDP]) nego->state = NEGO_STATE_RDP; else nego->state = NEGO_STATE_FAIL; } } /** * Attempt negotiating standard RDP security. * @param nego */ static void nego_attempt_rdp(rdpNego* nego) { nego->RequestedProtocols = PROTOCOL_RDP; WLog_DBG(TAG, "Attempting RDP security"); if (!nego_transport_connect(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_send_negotiation_request(nego)) { nego->state = NEGO_STATE_FAIL; return; } if (!nego_recv_response(nego)) { nego->state = NEGO_STATE_FAIL; return; } } /** * Wait to receive a negotiation response * @param nego */ BOOL nego_recv_response(rdpNego* nego) { int status; wStream* s; s = Stream_New(NULL, 1024); if (!s) { WLog_ERR(TAG, "Stream_New failed!"); return FALSE; } status = transport_read_pdu(nego->transport, s); if (status < 0) { Stream_Free(s, TRUE); return FALSE; } status = nego_recv(nego->transport, s, nego); Stream_Free(s, TRUE); if (status < 0) return FALSE; return TRUE; } /** * Receive protocol security negotiation message.\n * @msdn{cc240501} * @param transport transport * @param s stream * @param extra nego pointer */ int nego_recv(rdpTransport* transport, wStream* s, void* extra) { BYTE li; BYTE type; UINT16 length; rdpNego* nego = (rdpNego*)extra; if (!tpkt_read_header(s, &length)) return -1; if (!tpdu_read_connection_confirm(s, &li, length)) return -1; if (li > 6) { /* rdpNegData (optional) */ Stream_Read_UINT8(s, type); /* Type */ switch (type) { case TYPE_RDP_NEG_RSP: if (!nego_process_negotiation_response(nego, s)) return -1; WLog_DBG(TAG, "selected_protocol: %" PRIu32 "", nego->SelectedProtocol); /* enhanced security selected ? */ if (nego->SelectedProtocol) { if ((nego->SelectedProtocol == PROTOCOL_HYBRID) && (!nego->EnabledProtocols[PROTOCOL_HYBRID])) { nego->state = NEGO_STATE_FAIL; } if ((nego->SelectedProtocol == PROTOCOL_SSL) && (!nego->EnabledProtocols[PROTOCOL_SSL])) { nego->state = NEGO_STATE_FAIL; } } else if (!nego->EnabledProtocols[PROTOCOL_RDP]) { nego->state = NEGO_STATE_FAIL; } break; case TYPE_RDP_NEG_FAILURE: if (!nego_process_negotiation_failure(nego, s)) return -1; break; } } else if (li == 6) { WLog_DBG(TAG, "no rdpNegData"); if (!nego->EnabledProtocols[PROTOCOL_RDP]) nego->state = NEGO_STATE_FAIL; else nego->state = NEGO_STATE_FINAL; } else { WLog_ERR(TAG, "invalid negotiation response"); nego->state = NEGO_STATE_FAIL; } if (!tpkt_ensure_stream_consumed(s, length)) return -1; return 0; } /** * Read optional routing token or cookie of X.224 Connection Request PDU. * @msdn{cc240470} * @param nego * @param s stream */ static BOOL nego_read_request_token_or_cookie(rdpNego* nego, wStream* s) { /* routingToken and cookie are optional and mutually exclusive! * * routingToken (variable): An optional and variable-length routing * token (used for load balancing) terminated by a 0x0D0A two-byte * sequence: (check [MSFT-SDLBTS] for details!) * Cookie:[space]msts=[ip address].[port].[reserved][\x0D\x0A] * * cookie (variable): An optional and variable-length ANSI character * string terminated by a 0x0D0A two-byte sequence: * Cookie:[space]mstshash=[ANSISTRING][\x0D\x0A] */ BYTE* str = NULL; UINT16 crlf = 0; size_t pos, len; BOOL result = FALSE; BOOL isToken = FALSE; size_t remain = Stream_GetRemainingLength(s); str = Stream_Pointer(s); pos = Stream_GetPosition(s); /* minimum length for token is 15 */ if (remain < 15) return TRUE; if (memcmp(Stream_Pointer(s), "Cookie: mstshash=", 17) != 0) { isToken = TRUE; } else { /* not a token, minimum length for cookie is 19 */ if (remain < 19) return TRUE; Stream_Seek(s, 17); } while ((remain = Stream_GetRemainingLength(s)) >= 2) { Stream_Read_UINT16(s, crlf); if (crlf == 0x0A0D) break; Stream_Rewind(s, 1); } if (crlf == 0x0A0D) { Stream_Rewind(s, 2); len = Stream_GetPosition(s) - pos; remain = Stream_GetRemainingLength(s); Stream_Write_UINT16(s, 0); if (strnlen((char*)str, len) == len) { if (isToken) result = nego_set_routing_token(nego, str, len); else result = nego_set_cookie(nego, (char*)str); } } if (!result) { Stream_SetPosition(s, pos); WLog_ERR(TAG, "invalid %s received", isToken ? "routing token" : "cookie"); } else { WLog_DBG(TAG, "received %s [%s]", isToken ? "routing token" : "cookie", str); } return result; } /** * Read protocol security negotiation request message.\n * @param nego * @param s stream */ BOOL nego_read_request(rdpNego* nego, wStream* s) { BYTE li; BYTE type; UINT16 length; if (!tpkt_read_header(s, &length)) return FALSE; if (!tpdu_read_connection_request(s, &li, length)) return FALSE; if (li != Stream_GetRemainingLength(s) + 6) { WLog_ERR(TAG, "Incorrect TPDU length indicator."); return FALSE; } if (!nego_read_request_token_or_cookie(nego, s)) { WLog_ERR(TAG, "Failed to parse routing token or cookie."); return FALSE; } if (Stream_GetRemainingLength(s) >= 8) { /* rdpNegData (optional) */ Stream_Read_UINT8(s, type); /* Type */ if (type != TYPE_RDP_NEG_REQ) { WLog_ERR(TAG, "Incorrect negotiation request type %" PRIu8 "", type); return FALSE; } if (!nego_process_negotiation_request(nego, s)) return FALSE; } return tpkt_ensure_stream_consumed(s, length); } /** * Send protocol security negotiation message. * @param nego */ void nego_send(rdpNego* nego) { if (nego->state == NEGO_STATE_EXT) nego_attempt_ext(nego); else if (nego->state == NEGO_STATE_NLA) nego_attempt_nla(nego); else if (nego->state == NEGO_STATE_TLS) nego_attempt_tls(nego); else if (nego->state == NEGO_STATE_RDP) nego_attempt_rdp(nego); else WLog_ERR(TAG, "invalid negotiation state for sending"); } /** * Send RDP Negotiation Request (RDP_NEG_REQ).\n * @msdn{cc240500}\n * @msdn{cc240470} * @param nego */ BOOL nego_send_negotiation_request(rdpNego* nego) { BOOL rc = FALSE; wStream* s; size_t length; size_t bm, em; BYTE flags = 0; size_t cookie_length; s = Stream_New(NULL, 512); if (!s) { WLog_ERR(TAG, "Stream_New failed!"); return FALSE; } length = TPDU_CONNECTION_REQUEST_LENGTH; bm = Stream_GetPosition(s); Stream_Seek(s, length); if (nego->RoutingToken) { Stream_Write(s, nego->RoutingToken, nego->RoutingTokenLength); /* Ensure Routing Token is correctly terminated - may already be present in string */ if ((nego->RoutingTokenLength > 2) && (nego->RoutingToken[nego->RoutingTokenLength - 2] == 0x0D) && (nego->RoutingToken[nego->RoutingTokenLength - 1] == 0x0A)) { WLog_DBG(TAG, "Routing token looks correctly terminated - use verbatim"); length += nego->RoutingTokenLength; } else { WLog_DBG(TAG, "Adding terminating CRLF to routing token"); Stream_Write_UINT8(s, 0x0D); /* CR */ Stream_Write_UINT8(s, 0x0A); /* LF */ length += nego->RoutingTokenLength + 2; } } else if (nego->cookie) { cookie_length = strlen(nego->cookie); if (cookie_length > nego->CookieMaxLength) cookie_length = nego->CookieMaxLength; Stream_Write(s, "Cookie: mstshash=", 17); Stream_Write(s, (BYTE*)nego->cookie, cookie_length); Stream_Write_UINT8(s, 0x0D); /* CR */ Stream_Write_UINT8(s, 0x0A); /* LF */ length += cookie_length + 19; } WLog_DBG(TAG, "RequestedProtocols: %" PRIu32 "", nego->RequestedProtocols); if ((nego->RequestedProtocols > PROTOCOL_RDP) || (nego->sendNegoData)) { /* RDP_NEG_DATA must be present for TLS and NLA */ if (nego->RestrictedAdminModeRequired) flags |= RESTRICTED_ADMIN_MODE_REQUIRED; Stream_Write_UINT8(s, TYPE_RDP_NEG_REQ); Stream_Write_UINT8(s, flags); Stream_Write_UINT16(s, 8); /* RDP_NEG_DATA length (8) */ Stream_Write_UINT32(s, nego->RequestedProtocols); /* requestedProtocols */ length += 8; } if (length > UINT16_MAX) goto fail; em = Stream_GetPosition(s); Stream_SetPosition(s, bm); tpkt_write_header(s, (UINT16)length); tpdu_write_connection_request(s, (UINT16)length - 5); Stream_SetPosition(s, em); Stream_SealLength(s); rc = (transport_write(nego->transport, s) >= 0); fail: Stream_Free(s, TRUE); return rc; } /** * Process Negotiation Request from Connection Request message. * @param nego * @param s */ BOOL nego_process_negotiation_request(rdpNego* nego, wStream* s) { BYTE flags; UINT16 length; if (Stream_GetRemainingLength(s) < 7) return FALSE; Stream_Read_UINT8(s, flags); Stream_Read_UINT16(s, length); Stream_Read_UINT32(s, nego->RequestedProtocols); WLog_DBG(TAG, "RDP_NEG_REQ: RequestedProtocol: 0x%08" PRIX32 "", nego->RequestedProtocols); nego->state = NEGO_STATE_FINAL; return TRUE; } /** * Process Negotiation Response from Connection Confirm message. * @param nego * @param s */ BOOL nego_process_negotiation_response(rdpNego* nego, wStream* s) { UINT16 length; WLog_DBG(TAG, "RDP_NEG_RSP"); if (Stream_GetRemainingLength(s) < 7) { WLog_ERR(TAG, "Invalid RDP_NEG_RSP"); nego->state = NEGO_STATE_FAIL; return FALSE; } Stream_Read_UINT8(s, nego->flags); Stream_Read_UINT16(s, length); Stream_Read_UINT32(s, nego->SelectedProtocol); nego->state = NEGO_STATE_FINAL; return TRUE; } /** * Process Negotiation Failure from Connection Confirm message. * @param nego * @param s */ BOOL nego_process_negotiation_failure(rdpNego* nego, wStream* s) { BYTE flags; UINT16 length; UINT32 failureCode; WLog_DBG(TAG, "RDP_NEG_FAILURE"); if (Stream_GetRemainingLength(s) < 7) return FALSE; Stream_Read_UINT8(s, flags); Stream_Read_UINT16(s, length); Stream_Read_UINT32(s, failureCode); switch (failureCode) { case SSL_REQUIRED_BY_SERVER: WLog_WARN(TAG, "Error: SSL_REQUIRED_BY_SERVER"); break; case SSL_NOT_ALLOWED_BY_SERVER: WLog_WARN(TAG, "Error: SSL_NOT_ALLOWED_BY_SERVER"); nego->sendNegoData = TRUE; break; case SSL_CERT_NOT_ON_SERVER: WLog_ERR(TAG, "Error: SSL_CERT_NOT_ON_SERVER"); nego->sendNegoData = TRUE; break; case INCONSISTENT_FLAGS: WLog_ERR(TAG, "Error: INCONSISTENT_FLAGS"); break; case HYBRID_REQUIRED_BY_SERVER: WLog_WARN(TAG, "Error: HYBRID_REQUIRED_BY_SERVER"); break; default: WLog_ERR(TAG, "Error: Unknown protocol security error %" PRIu32 "", failureCode); break; } nego->state = NEGO_STATE_FAIL; return TRUE; } /** * Send RDP Negotiation Response (RDP_NEG_RSP).\n * @param nego */ BOOL nego_send_negotiation_response(rdpNego* nego) { UINT16 length; size_t bm, em; BOOL status; wStream* s; BYTE flags; rdpSettings* settings; status = TRUE; settings = nego->transport->settings; s = Stream_New(NULL, 512); if (!s) { WLog_ERR(TAG, "Stream_New failed!"); return FALSE; } length = TPDU_CONNECTION_CONFIRM_LENGTH; bm = Stream_GetPosition(s); Stream_Seek(s, length); if (nego->SelectedProtocol & PROTOCOL_FAILED_NEGO) { UINT32 errorCode = (nego->SelectedProtocol & ~PROTOCOL_FAILED_NEGO); flags = 0; Stream_Write_UINT8(s, TYPE_RDP_NEG_FAILURE); Stream_Write_UINT8(s, flags); /* flags */ Stream_Write_UINT16(s, 8); /* RDP_NEG_DATA length (8) */ Stream_Write_UINT32(s, errorCode); length += 8; status = FALSE; } else { flags = EXTENDED_CLIENT_DATA_SUPPORTED; if (settings->SupportGraphicsPipeline) flags |= DYNVC_GFX_PROTOCOL_SUPPORTED; /* RDP_NEG_DATA must be present for TLS, NLA, and RDP */ Stream_Write_UINT8(s, TYPE_RDP_NEG_RSP); Stream_Write_UINT8(s, flags); /* flags */ Stream_Write_UINT16(s, 8); /* RDP_NEG_DATA length (8) */ Stream_Write_UINT32(s, nego->SelectedProtocol); /* selectedProtocol */ length += 8; } em = Stream_GetPosition(s); Stream_SetPosition(s, bm); tpkt_write_header(s, length); tpdu_write_connection_confirm(s, length - 5); Stream_SetPosition(s, em); Stream_SealLength(s); if (transport_write(nego->transport, s) < 0) { Stream_Free(s, TRUE); return FALSE; } Stream_Free(s, TRUE); if (status) { /* update settings with negotiated protocol security */ settings->RequestedProtocols = nego->RequestedProtocols; settings->SelectedProtocol = nego->SelectedProtocol; if (settings->SelectedProtocol == PROTOCOL_RDP) { settings->TlsSecurity = FALSE; settings->NlaSecurity = FALSE; settings->RdpSecurity = TRUE; settings->UseRdpSecurityLayer = TRUE; if (settings->EncryptionLevel == ENCRYPTION_LEVEL_NONE) { /** * If the server implementation did not explicitely set a * encryption level we default to client compatible */ settings->EncryptionLevel = ENCRYPTION_LEVEL_CLIENT_COMPATIBLE; } if (settings->LocalConnection) { /** * Note: This hack was firstly introduced in commit 95f5e115 to * disable the unnecessary encryption with peers connecting to * 127.0.0.1 or local unix sockets. * This also affects connections via port tunnels! (e.g. ssh -L) */ WLog_INFO(TAG, "Turning off encryption for local peer with standard rdp security"); settings->UseRdpSecurityLayer = FALSE; settings->EncryptionLevel = ENCRYPTION_LEVEL_NONE; } if (!settings->RdpServerRsaKey && !settings->RdpKeyFile && !settings->RdpKeyContent) { WLog_ERR(TAG, "Missing server certificate"); return FALSE; } } else if (settings->SelectedProtocol == PROTOCOL_SSL) { settings->TlsSecurity = TRUE; settings->NlaSecurity = FALSE; settings->RdpSecurity = FALSE; settings->UseRdpSecurityLayer = FALSE; settings->EncryptionLevel = ENCRYPTION_LEVEL_NONE; } else if (settings->SelectedProtocol == PROTOCOL_HYBRID) { settings->TlsSecurity = TRUE; settings->NlaSecurity = TRUE; settings->RdpSecurity = FALSE; settings->UseRdpSecurityLayer = FALSE; settings->EncryptionLevel = ENCRYPTION_LEVEL_NONE; } } return status; } /** * Initialize NEGO state machine. * @param nego */ void nego_init(rdpNego* nego) { nego->state = NEGO_STATE_INITIAL; nego->RequestedProtocols = PROTOCOL_RDP; nego->CookieMaxLength = DEFAULT_COOKIE_MAX_LENGTH; nego->sendNegoData = FALSE; nego->flags = 0; } /** * Create a new NEGO state machine instance. * @param transport * @return */ rdpNego* nego_new(rdpTransport* transport) { rdpNego* nego = (rdpNego*)calloc(1, sizeof(rdpNego)); if (!nego) return NULL; nego->transport = transport; nego_init(nego); return nego; } /** * Free NEGO state machine. * @param nego */ void nego_free(rdpNego* nego) { if (nego) { free(nego->RoutingToken); free(nego->cookie); free(nego); } } /** * Set target hostname and port. * @param nego * @param hostname * @param port */ BOOL nego_set_target(rdpNego* nego, const char* hostname, UINT16 port) { if (!nego || !hostname) return FALSE; nego->hostname = hostname; nego->port = port; return TRUE; } /** * Enable security layer negotiation. * @param nego pointer to the negotiation structure * @param enable_rdp whether to enable security layer negotiation (TRUE for enabled, FALSE for * disabled) */ void nego_set_negotiation_enabled(rdpNego* nego, BOOL NegotiateSecurityLayer) { WLog_DBG(TAG, "Enabling security layer negotiation: %s", NegotiateSecurityLayer ? "TRUE" : "FALSE"); nego->NegotiateSecurityLayer = NegotiateSecurityLayer; } /** * Enable restricted admin mode. * @param nego pointer to the negotiation structure * @param enable_restricted whether to enable security layer negotiation (TRUE for enabled, FALSE * for disabled) */ void nego_set_restricted_admin_mode_required(rdpNego* nego, BOOL RestrictedAdminModeRequired) { WLog_DBG(TAG, "Enabling restricted admin mode: %s", RestrictedAdminModeRequired ? "TRUE" : "FALSE"); nego->RestrictedAdminModeRequired = RestrictedAdminModeRequired; } void nego_set_gateway_enabled(rdpNego* nego, BOOL GatewayEnabled) { nego->GatewayEnabled = GatewayEnabled; } void nego_set_gateway_bypass_local(rdpNego* nego, BOOL GatewayBypassLocal) { nego->GatewayBypassLocal = GatewayBypassLocal; } /** * Enable RDP security protocol. * @param nego pointer to the negotiation structure * @param enable_rdp whether to enable normal RDP protocol (TRUE for enabled, FALSE for disabled) */ void nego_enable_rdp(rdpNego* nego, BOOL enable_rdp) { WLog_DBG(TAG, "Enabling RDP security: %s", enable_rdp ? "TRUE" : "FALSE"); nego->EnabledProtocols[PROTOCOL_RDP] = enable_rdp; } /** * Enable TLS security protocol. * @param nego pointer to the negotiation structure * @param enable_tls whether to enable TLS + RDP protocol (TRUE for enabled, FALSE for disabled) */ void nego_enable_tls(rdpNego* nego, BOOL enable_tls) { WLog_DBG(TAG, "Enabling TLS security: %s", enable_tls ? "TRUE" : "FALSE"); nego->EnabledProtocols[PROTOCOL_SSL] = enable_tls; } /** * Enable NLA security protocol. * @param nego pointer to the negotiation structure * @param enable_nla whether to enable network level authentication protocol (TRUE for enabled, * FALSE for disabled) */ void nego_enable_nla(rdpNego* nego, BOOL enable_nla) { WLog_DBG(TAG, "Enabling NLA security: %s", enable_nla ? "TRUE" : "FALSE"); nego->EnabledProtocols[PROTOCOL_HYBRID] = enable_nla; } /** * Enable NLA extended security protocol. * @param nego pointer to the negotiation structure * @param enable_ext whether to enable network level authentication extended protocol (TRUE for * enabled, FALSE for disabled) */ void nego_enable_ext(rdpNego* nego, BOOL enable_ext) { WLog_DBG(TAG, "Enabling NLA extended security: %s", enable_ext ? "TRUE" : "FALSE"); nego->EnabledProtocols[PROTOCOL_HYBRID_EX] = enable_ext; } /** * Set routing token. * @param nego * @param RoutingToken * @param RoutingTokenLength */ BOOL nego_set_routing_token(rdpNego* nego, BYTE* RoutingToken, DWORD RoutingTokenLength) { if (RoutingTokenLength == 0) return FALSE; free(nego->RoutingToken); nego->RoutingTokenLength = RoutingTokenLength; nego->RoutingToken = (BYTE*)malloc(nego->RoutingTokenLength); if (!nego->RoutingToken) return FALSE; CopyMemory(nego->RoutingToken, RoutingToken, nego->RoutingTokenLength); return TRUE; } /** * Set cookie. * @param nego * @param cookie */ BOOL nego_set_cookie(rdpNego* nego, char* cookie) { if (nego->cookie) { free(nego->cookie); nego->cookie = NULL; } if (!cookie) return TRUE; nego->cookie = _strdup(cookie); if (!nego->cookie) return FALSE; return TRUE; } /** * Set cookie maximum length * @param nego * @param CookieMaxLength */ void nego_set_cookie_max_length(rdpNego* nego, UINT32 CookieMaxLength) { nego->CookieMaxLength = CookieMaxLength; } /** * Enable / disable preconnection PDU. * @param nego * @param send_pcpdu */ void nego_set_send_preconnection_pdu(rdpNego* nego, BOOL SendPreconnectionPdu) { nego->SendPreconnectionPdu = SendPreconnectionPdu; } /** * Set preconnection id. * @param nego * @param id */ void nego_set_preconnection_id(rdpNego* nego, UINT32 PreconnectionId) { nego->PreconnectionId = PreconnectionId; } /** * Set preconnection blob. * @param nego * @param blob */ void nego_set_preconnection_blob(rdpNego* nego, char* PreconnectionBlob) { nego->PreconnectionBlob = PreconnectionBlob; } UINT32 nego_get_selected_protocol(rdpNego* nego) { if (!nego) return 0; return nego->SelectedProtocol; } BOOL nego_set_selected_protocol(rdpNego* nego, UINT32 SelectedProtocol) { if (!nego) return FALSE; nego->SelectedProtocol = SelectedProtocol; return TRUE; } UINT32 nego_get_requested_protocols(rdpNego* nego) { if (!nego) return 0; return nego->RequestedProtocols; } BOOL nego_set_requested_protocols(rdpNego* nego, UINT32 RequestedProtocols) { if (!nego) return FALSE; nego->RequestedProtocols = RequestedProtocols; return TRUE; } NEGO_STATE nego_get_state(rdpNego* nego) { if (!nego) return NEGO_STATE_FAIL; return nego->state; } BOOL nego_set_state(rdpNego* nego, NEGO_STATE state) { if (!nego) return FALSE; nego->state = state; return TRUE; } SEC_WINNT_AUTH_IDENTITY* nego_get_identity(rdpNego* nego) { if (!nego) return NULL; return nla_get_identity(nego->transport->nla); } void nego_free_nla(rdpNego* nego) { if (!nego || !nego->transport) return; nla_free(nego->transport->nla); nego->transport->nla = NULL; } const BYTE* nego_get_routing_token(rdpNego* nego, DWORD* RoutingTokenLength) { if (!nego) return NULL; if (RoutingTokenLength) *RoutingTokenLength = nego->RoutingTokenLength; return nego->RoutingToken; }
void nego_process_negotiation_request(rdpNego* nego, wStream* s) { BYTE flags; UINT16 length; Stream_Read_UINT8(s, flags); Stream_Read_UINT16(s, length); Stream_Read_UINT32(s, nego->RequestedProtocols); WLog_DBG(TAG, "RDP_NEG_REQ: RequestedProtocol: 0x%08" PRIX32 "", nego->RequestedProtocols); nego->state = NEGO_STATE_FINAL; }
BOOL nego_process_negotiation_request(rdpNego* nego, wStream* s) { BYTE flags; UINT16 length; if (Stream_GetRemainingLength(s) < 7) return FALSE; Stream_Read_UINT8(s, flags); Stream_Read_UINT16(s, length); Stream_Read_UINT32(s, nego->RequestedProtocols); WLog_DBG(TAG, "RDP_NEG_REQ: RequestedProtocol: 0x%08" PRIX32 "", nego->RequestedProtocols); nego->state = NEGO_STATE_FINAL; return TRUE; }
{'added': [(94, 'static BOOL nego_process_negotiation_request(rdpNego* nego, wStream* s);'), (95, 'static BOOL nego_process_negotiation_response(rdpNego* nego, wStream* s);'), (96, 'static BOOL nego_process_negotiation_failure(rdpNego* nego, wStream* s);'), (621, '\t\t\t\tif (!nego_process_negotiation_response(nego, s))'), (622, '\t\t\t\t\treturn -1;'), (649, '\t\t\t\tif (!nego_process_negotiation_failure(nego, s))'), (650, '\t\t\t\t\treturn -1;'), (800, '\t\tif (!nego_process_negotiation_request(nego, s))'), (801, '\t\t\treturn FALSE;'), (924, 'BOOL nego_process_negotiation_request(rdpNego* nego, wStream* s)'), (928, ''), (929, '\tif (Stream_GetRemainingLength(s) < 7)'), (930, '\t\treturn FALSE;'), (936, '\treturn TRUE;'), (945, 'BOOL nego_process_negotiation_response(rdpNego* nego, wStream* s)'), (954, '\t\treturn FALSE;'), (961, '\treturn TRUE;'), (970, 'BOOL nego_process_negotiation_failure(rdpNego* nego, wStream* s)'), (976, '\tif (Stream_GetRemainingLength(s) < 7)'), (977, '\t\treturn FALSE;'), (1012, '\treturn TRUE;')], 'deleted': [(94, 'static void nego_process_negotiation_request(rdpNego* nego, wStream* s);'), (95, 'static void nego_process_negotiation_response(rdpNego* nego, wStream* s);'), (96, 'static void nego_process_negotiation_failure(rdpNego* nego, wStream* s);'), (621, '\t\t\t\tnego_process_negotiation_response(nego, s);'), (648, '\t\t\t\tnego_process_negotiation_failure(nego, s);'), (798, '\t\tnego_process_negotiation_request(nego, s);'), (921, 'void nego_process_negotiation_request(rdpNego* nego, wStream* s)'), (938, 'void nego_process_negotiation_response(rdpNego* nego, wStream* s)'), (947, '\t\treturn;'), (962, 'void nego_process_negotiation_failure(rdpNego* nego, wStream* s)')]}
21
10
990
4,944
10
60
1
https://github.com/FreeRDP/FreeRDP
CVE-2020-11089
CWE-125
2,964
Ap4MetaData.cpp
C++
AP4_DataAtom::AP4_DataAtom
/***************************************************************** | | AP4 - MetaData | | Copyright 2002-2008 Axiomatic Systems, LLC | | | This file is part of Bento4/AP4 (MP4 Atom Processing Library). | | Unless you have obtained Bento4 under a difference license, | this version of Bento4 is Bento4|GPL. | Bento4|GPL is free software; you can redistribute it and/or modify | it under the terms of the GNU General Public License as published by | the Free Software Foundation; either version 2, or (at your option) | any later version. | | Bento4|GPL is distributed in the hope that it will be useful, | but WITHOUT ANY WARRANTY; without even the implied warranty of | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | GNU General Public License for more details. | | You should have received a copy of the GNU General Public License | along with Bento4|GPL; see the file COPYING. If not, write to the | Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA | 02111-1307, USA. | ****************************************************************/ /*---------------------------------------------------------------------- | includes +---------------------------------------------------------------------*/ #include "Ap4File.h" #include "Ap4Movie.h" #include "Ap4MetaData.h" #include "Ap4ContainerAtom.h" #include "Ap4MoovAtom.h" #include "Ap4HdlrAtom.h" #include "Ap4DataBuffer.h" #include "Ap4Utils.h" #include "Ap4String.h" /*---------------------------------------------------------------------- | dynamic cast support +---------------------------------------------------------------------*/ AP4_DEFINE_DYNAMIC_CAST_ANCHOR(AP4_3GppLocalizedStringAtom) AP4_DEFINE_DYNAMIC_CAST_ANCHOR(AP4_DcfdAtom) AP4_DEFINE_DYNAMIC_CAST_ANCHOR(AP4_DcfStringAtom) AP4_DEFINE_DYNAMIC_CAST_ANCHOR(AP4_DataAtom) AP4_DEFINE_DYNAMIC_CAST_ANCHOR(AP4_MetaDataStringAtom) /*---------------------------------------------------------------------- | metadata keys +---------------------------------------------------------------------*/ static const AP4_MetaData::KeyInfo AP4_MetaData_KeyInfos [] = { {"Name", "Name", AP4_ATOM_TYPE_cNAM, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Artist", "Artist", AP4_ATOM_TYPE_cART, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"AlbumArtist", "Album Artist", AP4_ATOM_TYPE_aART, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Composer", "Composer", AP4_ATOM_TYPE_cCOM, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Writer", "Writer", AP4_ATOM_TYPE_cWRT, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Album", "Album", AP4_ATOM_TYPE_cALB, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"GenreCode", "Genre", AP4_ATOM_TYPE_GNRE, AP4_MetaData::Value::TYPE_BINARY}, {"GenreName", "Genre", AP4_ATOM_TYPE_cGEN, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Grouping", "Grouping", AP4_ATOM_TYPE_cGRP, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Date", "Date", AP4_ATOM_TYPE_cDAY, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Tool", "Encoding Tool", AP4_ATOM_TYPE_cTOO, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Comment", "Comment", AP4_ATOM_TYPE_cCMT, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Lyrics", "Lyrics", AP4_ATOM_TYPE_cLYR, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Copyright", "Copyright", AP4_ATOM_TYPE_CPRT, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Track", "Track Number", AP4_ATOM_TYPE_TRKN, AP4_MetaData::Value::TYPE_BINARY}, {"Disc", "Disc Number", AP4_ATOM_TYPE_DISK, AP4_MetaData::Value::TYPE_BINARY}, {"Cover", "Cover Art", AP4_ATOM_TYPE_COVR, AP4_MetaData::Value::TYPE_BINARY}, {"Description", "Description", AP4_ATOM_TYPE_DESC, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Rating", "Rating", AP4_ATOM_TYPE_RTNG, AP4_MetaData::Value::TYPE_INT_08_BE}, {"Tempo", "Tempo", AP4_ATOM_TYPE_TMPO, AP4_MetaData::Value::TYPE_INT_16_BE}, {"Compilation", "Compilation", AP4_ATOM_TYPE_CPIL, AP4_MetaData::Value::TYPE_INT_08_BE}, {"IsGapless", "Is Gapless", AP4_ATOM_TYPE_PGAP, AP4_MetaData::Value::TYPE_INT_08_BE}, {"Title", "Title", AP4_ATOM_TYPE_TITL, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Description", "Description", AP4_ATOM_TYPE_DSCP, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"StoreFrontID", "Store Front ID", AP4_ATOM_TYPE_sfID, AP4_MetaData::Value::TYPE_INT_32_BE}, {"FileKind", "File Kind", AP4_ATOM_TYPE_STIK, AP4_MetaData::Value::TYPE_INT_08_BE}, {"ShowName", "Show Name", AP4_ATOM_TYPE_TVSH, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"ShowSeason", "Show Season Number", AP4_ATOM_TYPE_TVSN, AP4_MetaData::Value::TYPE_INT_32_BE}, {"ShowEpisodeNumber", "Show Episode Number", AP4_ATOM_TYPE_TVES, AP4_MetaData::Value::TYPE_INT_32_BE}, {"ShowEpisodeName", "Show Episode Name", AP4_ATOM_TYPE_TVEN, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"TVNetworkName", "TV Network Name", AP4_ATOM_TYPE_TVNN, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"IsPodcast", "Is a Podcast", AP4_ATOM_TYPE_PCST, AP4_MetaData::Value::TYPE_INT_08_BE}, {"PodcastUrl", "Podcast URL", AP4_ATOM_TYPE_PURL, AP4_MetaData::Value::TYPE_BINARY}, {"PodcastGuid", "Podcast GUID", AP4_ATOM_TYPE_EGID, AP4_MetaData::Value::TYPE_BINARY}, {"PodcastCategory", "Podcast Category", AP4_ATOM_TYPE_CATG, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Keywords", "Keywords", AP4_ATOM_TYPE_KEYW, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"PurchaseDate", "Purchase Date", AP4_ATOM_TYPE_PURD, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"IconUri", "Icon URI", AP4_ATOM_TYPE_ICNU, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"InfoUrl", "Info URL", AP4_ATOM_TYPE_INFU, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"CoverUri", "Cover Art URI", AP4_ATOM_TYPE_CVRU, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"LyricsUri", "Lyrics URI", AP4_ATOM_TYPE_LRCU, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Duration", "Duration", AP4_ATOM_TYPE_DCFD, AP4_MetaData::Value::TYPE_INT_32_BE}, {"Performer", "Performer", AP4_ATOM_TYPE_PERF, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Author", "Author", AP4_ATOM_TYPE_AUTH, AP4_MetaData::Value::TYPE_STRING_UTF_8}, }; AP4_Array<AP4_MetaData::KeyInfo> AP4_MetaData::KeyInfos( AP4_MetaData_KeyInfos, sizeof(AP4_MetaData_KeyInfos)/sizeof(KeyInfo)); AP4_Result AP4_MetaData::Initialized() { return AP4_MetaData::KeyInfos.ItemCount() != 0; } AP4_Result AP4_MetaData::Initialize() { unsigned int item_count = sizeof(AP4_MetaData_KeyInfos)/sizeof(KeyInfo); KeyInfos.SetItemCount(item_count); for (unsigned int i=0; i<item_count; i++) { KeyInfos[i] = AP4_MetaData_KeyInfos[i]; } return AP4_SUCCESS; } AP4_Result AP4_MetaData::UnInitialize() { return AP4_MetaData::KeyInfos.Clear(); } /*---------------------------------------------------------------------- | genre IDs +---------------------------------------------------------------------*/ static const char* const Ap4Id3Genres[] = { "Blues", "Classic Rock", "Country", "Dance", "Disco", "Funk", "Grunge", "Hip-Hop", "Jazz", "Metal", "New Age", "Oldies", "Other", "Pop", "R&B", "Rap", "Reggae", "Rock", "Techno", "Industrial", "Alternative", "Ska", "Death Metal", "Pranks", "Soundtrack", "Euro-Techno", "Ambient", "Trip-Hop", "Vocal", "Jazz+Funk", "Fusion", "Trance", "Classical", "Instrumental", "Acid", "House", "Game", "Sound Clip", "Gospel", "Noise", "AlternRock", "Bass", "Soul", "Punk", "Space", "Meditative", "Instrumental Pop", "Instrumental Rock", "Ethnic", "Gothic", "Darkwave", "Techno-Industrial", "Electronic", "Pop-Folk", "Eurodance", "Dream", "Southern Rock", "Comedy", "Cult", "Gangsta", "Top 40", "Christian Rap", "Pop/Funk", "Jungle", "Native American", "Cabaret", "New Wave", "Psychadelic", "Rave", "Showtunes", "Trailer", "Lo-Fi", "Tribal", "Acid Punk", "Acid Jazz", "Polka", "Retro", "Musical", "Rock & Roll", "Hard Rock", "Folk", "Folk-Rock", "National Folk", "Swing", "Fast Fusion", "Bebob", "Latin", "Revival", "Celtic", "Bluegrass", "Avantgarde", "Gothic Rock", "Progressive Rock", "Psychedelic Rock", "Symphonic Rock", "Slow Rock", "Big Band", "Chorus", "Easy Listening", "Acoustic", "Humour", "Speech", "Chanson", "Opera", "Chamber Music", "Sonata", "Symphony", "Booty Bass", "Primus", "Porn Groove", "Satire", "Slow Jam", "Club", "Tango", "Samba", "Folklore", "Ballad", "Power Ballad", "Rhythmic Soul", "Freestyle", "Duet", "Punk Rock", "Drum Solo", "Acapella", "Euro-House", "Dance Hall" }; static const char* Ap4StikNames[] = { "Movie", // 0 "Normal", // 1 "Audiobook", // 2 "?", // 3 "?", // 4 "Whacked Bookmark", // 5 "Music Video", // 6 "?", // 7 "?", // 8 "Short Film", // 9 "TV Show", // 10 "Booklet", // 11 "?", // 12 "?", // 13 "Ring Tone" // 14 }; /* sfID Store Front country Australia => 143460, Austria => 143445, Belgium => 143446, Canada => 143455, Denmark => 143458, Finland => 143447, France => 143442, Germany => 143443, Greece => 143448, Ireland => 143449, Italy => 143450, Japan => 143462, Luxembourg => 143451, Netherlands => 143452, Norway => 143457, Portugal => 143453, Spain => 143454, Sweden => 143456, Switzerland => 143459, UK => 143444, USA => 143441, */ /*---------------------------------------------------------------------- | constants +---------------------------------------------------------------------*/ const AP4_Size AP4_DATA_ATOM_MAX_SIZE = 0x40000000; /*---------------------------------------------------------------------- | 3GPP localized string atoms +---------------------------------------------------------------------*/ const AP4_Atom::Type AP4_MetaDataAtomTypeHandler::_3gppLocalizedStringTypes[] = { AP4_ATOM_TYPE_TITL, AP4_ATOM_TYPE_DSCP, AP4_ATOM_TYPE_CPRT, AP4_ATOM_TYPE_PERF, AP4_ATOM_TYPE_AUTH, AP4_ATOM_TYPE_GNRE }; const AP4_MetaDataAtomTypeHandler::TypeList AP4_MetaDataAtomTypeHandler::_3gppLocalizedStringTypeList = { _3gppLocalizedStringTypes, sizeof(_3gppLocalizedStringTypes)/sizeof(_3gppLocalizedStringTypes[0]) }; /*---------------------------------------------------------------------- | other 3GPP atoms +---------------------------------------------------------------------*/ const AP4_Atom::Type AP4_MetaDataAtomTypeHandler::_3gppOtherTypes[] = { AP4_ATOM_TYPE_RTNG, AP4_ATOM_TYPE_CLSF, AP4_ATOM_TYPE_KYWD, AP4_ATOM_TYPE_LOCI, AP4_ATOM_TYPE_ALBM, AP4_ATOM_TYPE_YRRC, }; const AP4_MetaDataAtomTypeHandler::TypeList AP4_MetaDataAtomTypeHandler::_3gppOtherTypeList = { _3gppOtherTypes, sizeof(_3gppOtherTypes)/sizeof(_3gppOtherTypes[0]) }; /*---------------------------------------------------------------------- | DCF string atoms +---------------------------------------------------------------------*/ const AP4_Atom::Type AP4_MetaDataAtomTypeHandler::DcfStringTypes[] = { AP4_ATOM_TYPE_ICNU, AP4_ATOM_TYPE_INFU, AP4_ATOM_TYPE_CVRU, AP4_ATOM_TYPE_LRCU }; const AP4_MetaDataAtomTypeHandler::TypeList AP4_MetaDataAtomTypeHandler::DcfStringTypeList = { DcfStringTypes, sizeof(DcfStringTypes)/sizeof(DcfStringTypes[0]) }; /*---------------------------------------------------------------------- | atom type lists +---------------------------------------------------------------------*/ const AP4_Atom::Type AP4_MetaDataAtomTypeHandler::IlstTypes[] = { AP4_ATOM_TYPE_dddd, AP4_ATOM_TYPE_cNAM, AP4_ATOM_TYPE_cART, AP4_ATOM_TYPE_cCOM, AP4_ATOM_TYPE_cWRT, AP4_ATOM_TYPE_cALB, AP4_ATOM_TYPE_cGEN, AP4_ATOM_TYPE_cGRP, AP4_ATOM_TYPE_cDAY, AP4_ATOM_TYPE_cTOO, AP4_ATOM_TYPE_cCMT, AP4_ATOM_TYPE_CPRT, AP4_ATOM_TYPE_TRKN, AP4_ATOM_TYPE_DISK, AP4_ATOM_TYPE_COVR, AP4_ATOM_TYPE_DESC, AP4_ATOM_TYPE_GNRE, AP4_ATOM_TYPE_CPIL, AP4_ATOM_TYPE_TMPO, AP4_ATOM_TYPE_RTNG, AP4_ATOM_TYPE_apID, AP4_ATOM_TYPE_cnID, AP4_ATOM_TYPE_cmID, AP4_ATOM_TYPE_atID, AP4_ATOM_TYPE_plID, AP4_ATOM_TYPE_geID, AP4_ATOM_TYPE_sfID, AP4_ATOM_TYPE_akID, AP4_ATOM_TYPE_aART, AP4_ATOM_TYPE_TVNN, AP4_ATOM_TYPE_TVSH, AP4_ATOM_TYPE_TVEN, AP4_ATOM_TYPE_TVSN, AP4_ATOM_TYPE_TVES, AP4_ATOM_TYPE_STIK, AP4_ATOM_TYPE_PGAP, AP4_ATOM_TYPE_PCST, AP4_ATOM_TYPE_PURD, AP4_ATOM_TYPE_PURL, AP4_ATOM_TYPE_EGID, AP4_ATOM_TYPE_SONM, AP4_ATOM_TYPE_SOAL, AP4_ATOM_TYPE_SOAR, AP4_ATOM_TYPE_SOAA, AP4_ATOM_TYPE_SOCO, AP4_ATOM_TYPE_SOSN }; const AP4_MetaDataAtomTypeHandler::TypeList AP4_MetaDataAtomTypeHandler::IlstTypeList = { IlstTypes, sizeof(IlstTypes)/sizeof(IlstTypes[0]) }; /*---------------------------------------------------------------------- | AP4_MetaDataAtomTypeHandler::CreateAtom +---------------------------------------------------------------------*/ AP4_Result AP4_MetaDataAtomTypeHandler::CreateAtom(AP4_Atom::Type type, AP4_UI32 size, AP4_ByteStream& stream, AP4_Atom::Type context, AP4_Atom*& atom) { atom = NULL; if (context == AP4_ATOM_TYPE_ILST) { if (IsTypeInList(type, IlstTypeList)) { m_AtomFactory->PushContext(type); atom = AP4_ContainerAtom::Create(type, size, false, false, stream, *m_AtomFactory); m_AtomFactory->PopContext(); } } else if (type == AP4_ATOM_TYPE_DATA) { if (IsTypeInList(context, IlstTypeList)) { atom = new AP4_DataAtom(size, stream); } } else if (context == AP4_ATOM_TYPE_dddd) { if (type == AP4_ATOM_TYPE_MEAN || type == AP4_ATOM_TYPE_NAME) { atom = new AP4_MetaDataStringAtom(type, size, stream); } } else if (context == AP4_ATOM_TYPE_UDTA) { if (IsTypeInList(type, _3gppLocalizedStringTypeList)) { atom = AP4_3GppLocalizedStringAtom::Create(type, size, stream); } else if (IsTypeInList(type, DcfStringTypeList)) { atom = AP4_DcfStringAtom::Create(type, size, stream); } else if (type == AP4_ATOM_TYPE_DCFD) { atom = AP4_DcfdAtom::Create(size, stream); } } return atom?AP4_SUCCESS:AP4_FAILURE; } /*---------------------------------------------------------------------- | AP4_MetaDataAtomTypeHandler::IsTypeInList +---------------------------------------------------------------------*/ bool AP4_MetaDataAtomTypeHandler::IsTypeInList(AP4_Atom::Type type, const AP4_MetaDataAtomTypeHandler::TypeList& list) { for (unsigned int i=0; i<list.m_Size; i++) { if (type == list.m_Types[i]) return true; } return false; } /*---------------------------------------------------------------------- | AP4_MetaData::AP4_MetaData +---------------------------------------------------------------------*/ AP4_MetaData::AP4_MetaData(AP4_File* file) { // get the file's movie AP4_Movie* movie = file->GetMovie(); // handle the movie's metadata if there is a movie in the file if (movie) { AP4_MoovAtom* moov = movie->GetMoovAtom(); if (moov == NULL) return; ParseMoov(moov); AP4_Atom* udta = moov->GetChild(AP4_ATOM_TYPE_UDTA); if (udta) { AP4_ContainerAtom* udta_container = AP4_DYNAMIC_CAST(AP4_ContainerAtom, udta); if (udta_container) { ParseUdta(udta_container, "3gpp"); } } } else { // if we don't have a movie, try to show metadata from a udta atom AP4_List<AP4_Atom>& top_level_atoms = file->GetTopLevelAtoms(); AP4_List<AP4_Atom>::Item* atom_item = top_level_atoms.FirstItem(); while (atom_item) { AP4_ContainerAtom* container = AP4_DYNAMIC_CAST(AP4_ContainerAtom, atom_item->GetData()); if (container) { // look for a udta in a DCF layout AP4_Atom* udta = container->FindChild("odhe/udta"); if (udta) { AP4_ContainerAtom* udta_container = AP4_DYNAMIC_CAST(AP4_ContainerAtom, udta); if (udta_container) { ParseUdta(udta_container, "dcf"); } } } atom_item = atom_item->GetNext(); } } } /*---------------------------------------------------------------------- | AP4_MetaData::ParseMoov +---------------------------------------------------------------------*/ AP4_Result AP4_MetaData::ParseMoov(AP4_MoovAtom* moov) { // look for a 'meta' atom with 'hdlr' type 'mdir' AP4_HdlrAtom* hdlr = AP4_DYNAMIC_CAST(AP4_HdlrAtom, moov->FindChild("udta/meta/hdlr")); if (hdlr == NULL || hdlr->GetHandlerType() != AP4_HANDLER_TYPE_MDIR) return AP4_ERROR_NO_SUCH_ITEM; // get the list of entries AP4_ContainerAtom* ilst = AP4_DYNAMIC_CAST(AP4_ContainerAtom, moov->FindChild("udta/meta/ilst")); if (ilst == NULL) return AP4_ERROR_NO_SUCH_ITEM; AP4_List<AP4_Atom>::Item* ilst_item = ilst->GetChildren().FirstItem(); while (ilst_item) { AP4_ContainerAtom* entry_atom = AP4_DYNAMIC_CAST(AP4_ContainerAtom, ilst_item->GetData()); if (entry_atom) { AddIlstEntries(entry_atom, "meta"); } ilst_item = ilst_item->GetNext(); } return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_MetaData::ParseUdta +---------------------------------------------------------------------*/ AP4_Result AP4_MetaData::ParseUdta(AP4_ContainerAtom* udta, const char* namespc) { // check that the atom is indeed a 'udta' atom if (udta->GetType() != AP4_ATOM_TYPE_UDTA) { return AP4_ERROR_INVALID_PARAMETERS; } AP4_List<AP4_Atom>::Item* udta_item = udta->GetChildren().FirstItem(); for (; udta_item; udta_item = udta_item->GetNext()) { AP4_3GppLocalizedStringAtom* _3gpp_atom = AP4_DYNAMIC_CAST(AP4_3GppLocalizedStringAtom, udta_item->GetData()); if (_3gpp_atom) { Add3GppEntry(_3gpp_atom, namespc); continue; } AP4_DcfStringAtom* dcfs_atom = AP4_DYNAMIC_CAST(AP4_DcfStringAtom, udta_item->GetData()); if (dcfs_atom) { AddDcfStringEntry(dcfs_atom, namespc); continue; } AP4_DcfdAtom* dcfd_atom = AP4_DYNAMIC_CAST(AP4_DcfdAtom, udta_item->GetData()); if (dcfd_atom) { AddDcfdEntry(dcfd_atom, namespc); } } return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_MetaData::~AP4_MetaData +---------------------------------------------------------------------*/ AP4_MetaData::~AP4_MetaData() { m_Entries.DeleteReferences(); } /*---------------------------------------------------------------------- | AP4_MetaData::ResolveKeyName +---------------------------------------------------------------------*/ AP4_Result AP4_MetaData::ResolveKeyName(AP4_Atom::Type atom_type, AP4_String& value) { const char* key_name = NULL; char four_cc[5]; // look for a match in the key infos for (unsigned int i=0; i<sizeof(AP4_MetaData_KeyInfos)/sizeof(AP4_MetaData_KeyInfos[0]); i++) { if (AP4_MetaData_KeyInfos[i].four_cc == atom_type) { key_name = AP4_MetaData_KeyInfos[i].name; break; } } if (key_name == NULL) { // this key was not found in the key infos, create a name for it AP4_FormatFourChars(four_cc, (AP4_UI32)atom_type); key_name = four_cc; } value = key_name; return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_MetaData::AddIlstEntries +---------------------------------------------------------------------*/ AP4_Result AP4_MetaData::AddIlstEntries(AP4_ContainerAtom* atom, const char* namespc) { AP4_MetaData::Value* value = NULL; if (atom->GetType() == AP4_ATOM_TYPE_dddd) { // look for the namespace AP4_MetaDataStringAtom* mean = static_cast<AP4_MetaDataStringAtom*>(atom->GetChild(AP4_ATOM_TYPE_MEAN)); if (mean == NULL) return AP4_ERROR_INVALID_FORMAT; // look for the name AP4_MetaDataStringAtom* name = static_cast<AP4_MetaDataStringAtom*>(atom->GetChild(AP4_ATOM_TYPE_NAME)); if (name == NULL) return AP4_ERROR_INVALID_FORMAT; // get the value AP4_DataAtom* data_atom = static_cast<AP4_DataAtom*>(atom->GetChild(AP4_ATOM_TYPE_DATA)); if (data_atom == NULL) return AP4_ERROR_INVALID_FORMAT; value = new AP4_AtomMetaDataValue(data_atom, atom->GetType()); return m_Entries.Add(new Entry(name->GetValue().GetChars(), mean->GetValue().GetChars(), value)); } else { const char* key_name = NULL; char four_cc[5]; // convert the atom type to a name AP4_FormatFourChars(four_cc, (AP4_UI32)atom->GetType()); key_name = four_cc; // add one entry for each data atom AP4_List<AP4_Atom>::Item* data_item = atom->GetChildren().FirstItem(); while (data_item) { AP4_Atom* item_atom = data_item->GetData(); if (item_atom->GetType() == AP4_ATOM_TYPE_DATA) { AP4_DataAtom* data_atom = static_cast<AP4_DataAtom*>(item_atom); value = new AP4_AtomMetaDataValue(data_atom, atom->GetType()); m_Entries.Add(new Entry(key_name, namespc, value)); } data_item = data_item->GetNext(); } return AP4_SUCCESS; } } /*---------------------------------------------------------------------- | AP4_MetaData::Add3GppEntry +---------------------------------------------------------------------*/ AP4_Result AP4_MetaData::Add3GppEntry(AP4_3GppLocalizedStringAtom* atom, const char* namespc) { AP4_String key_name; ResolveKeyName(atom->GetType(), key_name); const char* language = NULL; if (atom->GetLanguage()[0]) { language = atom->GetLanguage(); } AP4_MetaData::Value* value = new AP4_StringMetaDataValue(atom->GetValue().GetChars(), language); m_Entries.Add(new Entry(key_name.GetChars(), namespc, value)); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_MetaData::AddDcfStringEntry +---------------------------------------------------------------------*/ AP4_Result AP4_MetaData::AddDcfStringEntry(AP4_DcfStringAtom* atom, const char* namespc) { AP4_String key_name; ResolveKeyName(atom->GetType(), key_name); AP4_MetaData::Value* value = new AP4_StringMetaDataValue(atom->GetValue().GetChars()); m_Entries.Add(new Entry(key_name.GetChars(), namespc, value)); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_MetaData::AddDcfdEntry +---------------------------------------------------------------------*/ AP4_Result AP4_MetaData::AddDcfdEntry(AP4_DcfdAtom* atom, const char* namespc) { AP4_String key_name; ResolveKeyName(atom->GetType(), key_name); AP4_MetaData::Value* value = new AP4_IntegerMetaDataValue(AP4_MetaData::Value::TYPE_INT_32_BE, atom->GetDuration()); m_Entries.Add(new Entry(key_name.GetChars(), namespc, value)); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_MetaData::Value::MapDataTypeToCategory +---------------------------------------------------------------------*/ AP4_MetaData::Value::TypeCategory AP4_MetaData::Value::MapTypeToCategory(Type type) { switch (type) { case AP4_MetaData::Value::TYPE_INT_08_BE: case AP4_MetaData::Value::TYPE_INT_16_BE: case AP4_MetaData::Value::TYPE_INT_32_BE: return AP4_MetaData::Value::TYPE_CATEGORY_INTEGER; case AP4_MetaData::Value::TYPE_STRING_UTF_8: case AP4_MetaData::Value::TYPE_STRING_UTF_16: case AP4_MetaData::Value::TYPE_STRING_PASCAL: return AP4_MetaData::Value::TYPE_CATEGORY_STRING; case AP4_MetaData::Value::TYPE_FLOAT_32_BE: case AP4_MetaData::Value::TYPE_FLOAT_64_BE: return AP4_MetaData::Value::TYPE_CATEGORY_FLOAT; default: return AP4_MetaData::Value::TYPE_CATEGORY_BINARY; } } /*---------------------------------------------------------------------- | AP4_MetaData::Value::GetTypeCategory +---------------------------------------------------------------------*/ AP4_MetaData::Value::TypeCategory AP4_MetaData::Value::GetTypeCategory() const { return MapTypeToCategory(m_Type); } /*---------------------------------------------------------------------- | AP4_MetaData::Entry::ToAtom +---------------------------------------------------------------------*/ AP4_Result AP4_MetaData::Entry::ToAtom(AP4_Atom*& atom) const { atom = NULL; if (m_Value == NULL) { return AP4_ERROR_INVALID_PARAMETERS; } if (m_Key.GetNamespace() == "meta") { // convert the name into an atom type if (m_Key.GetName().GetLength() != 4) { // the name is not in the right format return AP4_ERROR_INVALID_PARAMETERS; } AP4_Atom::Type atom_type = AP4_Atom::TypeFromString(m_Key.GetName().GetChars()); // create a container atom for the data AP4_ContainerAtom* container = new AP4_ContainerAtom(atom_type); // add the data atom AP4_DataAtom* data = new AP4_DataAtom(*m_Value); container->AddChild(data); atom = container; return AP4_SUCCESS; } else if (m_Key.GetNamespace() == "dcf") { // convert the name into an atom type if (m_Key.GetName().GetLength() != 4) { // the name is not in the right format return AP4_ERROR_INVALID_PARAMETERS; } AP4_Atom::Type atom_type = AP4_Atom::TypeFromString(m_Key.GetName().GetChars()); if (AP4_MetaDataAtomTypeHandler::IsTypeInList(atom_type, AP4_MetaDataAtomTypeHandler::DcfStringTypeList)) { AP4_String atom_value = m_Value->ToString(); atom = new AP4_DcfStringAtom(atom_type, atom_value.GetChars()); return AP4_SUCCESS; } else if (AP4_MetaDataAtomTypeHandler::IsTypeInList(atom_type, AP4_MetaDataAtomTypeHandler::_3gppLocalizedStringTypeList)) { AP4_String atom_value = m_Value->ToString(); const char* language = "eng"; // default if (m_Value->GetLanguage().GetLength() != 0) { language = m_Value->GetLanguage().GetChars(); } atom = new AP4_3GppLocalizedStringAtom(atom_type, language, atom_value.GetChars()); return AP4_SUCCESS; } else if (atom_type == AP4_ATOM_TYPE_DCFD) { atom = new AP4_DcfdAtom((AP4_UI32)m_Value->ToInteger()); return AP4_SUCCESS; } // not supported return AP4_ERROR_NOT_SUPPORTED; } else { // create a '----' atom AP4_ContainerAtom* container = new AP4_ContainerAtom(AP4_ATOM_TYPE_dddd); // add a 'mean' string container->AddChild(new AP4_MetaDataStringAtom(AP4_ATOM_TYPE_MEAN, m_Key.GetNamespace().GetChars())); // add a 'name' string container->AddChild(new AP4_MetaDataStringAtom(AP4_ATOM_TYPE_NAME, m_Key.GetName().GetChars())); // add the data atom AP4_DataAtom* data = new AP4_DataAtom(*m_Value); container->AddChild(data); atom = container; return AP4_SUCCESS; } // unreachable - return AP4_ERROR_NOT_SUPPORTED; } /*---------------------------------------------------------------------- | AP4_MetaData::Entry::FindInIlst +---------------------------------------------------------------------*/ AP4_ContainerAtom* AP4_MetaData::Entry::FindInIlst(AP4_ContainerAtom* ilst) const { if (m_Key.GetNamespace() == "meta") { AP4_Atom::Type atom_type = AP4_Atom::TypeFromString(m_Key.GetName().GetChars()); return AP4_DYNAMIC_CAST(AP4_ContainerAtom, ilst->GetChild(atom_type)); } else { AP4_List<AP4_Atom>::Item* ilst_item = ilst->GetChildren().FirstItem(); while (ilst_item) { AP4_ContainerAtom* entry_atom = AP4_DYNAMIC_CAST(AP4_ContainerAtom, ilst_item->GetData()); if (entry_atom) { AP4_MetaDataStringAtom* mean = static_cast<AP4_MetaDataStringAtom*>(entry_atom->GetChild(AP4_ATOM_TYPE_MEAN)); AP4_MetaDataStringAtom* name = static_cast<AP4_MetaDataStringAtom*>(entry_atom->GetChild(AP4_ATOM_TYPE_NAME)); if (mean && name && mean->GetValue() == m_Key.GetNamespace() && name->GetValue() == m_Key.GetName()) { return entry_atom; } } ilst_item = ilst_item->GetNext(); } } // not found return NULL; } /*---------------------------------------------------------------------- | AP4_MetaData::Entry::AddToFileIlst +---------------------------------------------------------------------*/ AP4_Result AP4_MetaData::Entry::AddToFileIlst(AP4_File& file, AP4_Ordinal index) { // check that we have a correct entry if (m_Value == NULL) return AP4_ERROR_INVALID_STATE; // convert the entry into an atom AP4_Atom* atom; AP4_Result result = ToAtom(atom); if (AP4_FAILED(result)) return result; AP4_ContainerAtom* entry_atom = AP4_DYNAMIC_CAST(AP4_ContainerAtom, atom); if (entry_atom == NULL) { return AP4_ERROR_INVALID_FORMAT; } // look for the 'moov' AP4_Movie* movie = file.GetMovie(); if (movie == NULL) return AP4_ERROR_INVALID_FORMAT; AP4_MoovAtom* moov = movie->GetMoovAtom(); if (moov == NULL) return AP4_ERROR_INVALID_FORMAT; // look for 'udta', and create if it does not exist AP4_ContainerAtom* udta = AP4_DYNAMIC_CAST(AP4_ContainerAtom, moov->FindChild("udta", true)); if (udta == NULL) return AP4_ERROR_INTERNAL; // look for 'meta', and create if it does not exist ('meta' is a FULL atom) AP4_ContainerAtom* meta = AP4_DYNAMIC_CAST(AP4_ContainerAtom, udta->FindChild("meta", true, true)); if (meta == NULL) return AP4_ERROR_INTERNAL; // look for a 'hdlr' atom type 'mdir' AP4_HdlrAtom* hdlr = AP4_DYNAMIC_CAST(AP4_HdlrAtom, meta->FindChild("hdlr")); if (hdlr == NULL) { hdlr = new AP4_HdlrAtom(AP4_HANDLER_TYPE_MDIR, ""); meta->AddChild(hdlr); } else { if (hdlr->GetHandlerType() != AP4_HANDLER_TYPE_MDIR) { return AP4_ERROR_INVALID_FORMAT; } } // get/create the list of entries AP4_ContainerAtom* ilst = AP4_DYNAMIC_CAST(AP4_ContainerAtom, meta->FindChild("ilst", true)); if (ilst == NULL) return AP4_ERROR_INTERNAL; // look if there is already a container for this entry AP4_ContainerAtom* existing = FindInIlst(ilst); if (existing == NULL) { // just add the one we have ilst->AddChild(entry_atom); } else { // add the entry's data to the existing entry AP4_DataAtom* data_atom = AP4_DYNAMIC_CAST(AP4_DataAtom, entry_atom->GetChild(AP4_ATOM_TYPE_DATA)); if (data_atom == NULL) return AP4_ERROR_INTERNAL; entry_atom->RemoveChild(data_atom); existing->AddChild(data_atom, index); delete entry_atom; } return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_MetaData::Entry::AddToFileDcf +---------------------------------------------------------------------*/ AP4_Result AP4_MetaData::Entry::AddToFileDcf(AP4_File& file, AP4_Ordinal index) { // check that we have a correct entry if (m_Value == NULL) return AP4_ERROR_INVALID_STATE; // look for 'odrm/odhe' AP4_ContainerAtom* odhe = AP4_DYNAMIC_CAST(AP4_ContainerAtom, file.FindChild("odrm/odhe")); if (odhe == NULL) return AP4_ERROR_NO_SUCH_ITEM; // get/create the list of entries AP4_ContainerAtom* udta = AP4_DYNAMIC_CAST(AP4_ContainerAtom, odhe->FindChild("udta", true)); if (udta == NULL) return AP4_ERROR_INTERNAL; // convert the entry into an atom AP4_Atom* data_atom; AP4_Result result = ToAtom(data_atom); if (AP4_FAILED(result)) return result; // add the entry's data to the container return udta->AddChild(data_atom, index); } /*---------------------------------------------------------------------- | AP4_MetaData::Entry::AddToFile +---------------------------------------------------------------------*/ AP4_Result AP4_MetaData::Entry::AddToFile(AP4_File& file, AP4_Ordinal index) { // check that we have a correct entry if (m_Value == NULL) return AP4_ERROR_INVALID_STATE; // check the namespace of the key to know where to add the atom if (m_Key.GetNamespace() == "meta") { return AddToFileIlst(file, index); } else if (m_Key.GetNamespace() == "dcf") { return AddToFileDcf(file, index); } else { // custom namespace return AddToFileIlst(file, index); } } /*---------------------------------------------------------------------- | AP4_MetaData::Entry::RemoveFromFileIlst +---------------------------------------------------------------------*/ AP4_Result AP4_MetaData::Entry::RemoveFromFileIlst(AP4_File& file, AP4_Ordinal index) { // look for the 'moov' AP4_Movie* movie = file.GetMovie(); if (movie == NULL) return AP4_ERROR_INVALID_FORMAT; AP4_MoovAtom* moov = movie->GetMoovAtom(); if (moov == NULL) return AP4_ERROR_INVALID_FORMAT; // look for 'udta/meta/ilst' AP4_ContainerAtom* ilst = AP4_DYNAMIC_CAST(AP4_ContainerAtom, moov->FindChild("udta/meta/ilst")); if (ilst == NULL) return AP4_ERROR_NO_SUCH_ITEM; // look if there is already a container for this entry AP4_ContainerAtom* existing = FindInIlst(ilst); if (existing == NULL) return AP4_ERROR_NO_SUCH_ITEM; // remove the data atom in the entry AP4_Result result = existing->DeleteChild(AP4_ATOM_TYPE_DATA, index); if (AP4_FAILED(result)) return result; // cleanup if (existing->GetType() == AP4_ATOM_TYPE_dddd) { // custom entry: if there are no more 'data' children, remove the entry if (existing->GetChild(AP4_ATOM_TYPE_DATA) == NULL) { ilst->RemoveChild(existing); delete existing; } } else { // normal entry: if the entry is empty, remove it if (existing->GetChildren().ItemCount() == 0) { ilst->RemoveChild(existing); delete existing; } } return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_MetaData::Entry::RemoveFromFileDcf +---------------------------------------------------------------------*/ AP4_Result AP4_MetaData::Entry::RemoveFromFileDcf(AP4_File& file, AP4_Ordinal index) { // look for 'odrm/odhe/udta' AP4_ContainerAtom* udta = AP4_DYNAMIC_CAST(AP4_ContainerAtom, file.FindChild("odrm/odhe/udta")); if (udta == NULL) return AP4_ERROR_NO_SUCH_ITEM; // remove the data atom in the entry AP4_UI32 type = AP4_BytesToUInt32BE((const unsigned char*)m_Key.GetName().GetChars()); AP4_Result result = udta->DeleteChild(type, index); if (AP4_FAILED(result)) return result; return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_MetaData::Entry::RemoveFromFile +---------------------------------------------------------------------*/ AP4_Result AP4_MetaData::Entry::RemoveFromFile(AP4_File& file, AP4_Ordinal index) { // check the namespace of the key to know where to add the atom if (m_Key.GetNamespace() == "meta") { return RemoveFromFileIlst(file, index); } else if (m_Key.GetNamespace() == "dcf") { return RemoveFromFileDcf(file, index); } else { // custom namespace return RemoveFromFileIlst(file, index); } } /*---------------------------------------------------------------------- | AP4_StringMetaDataValue::ToString +---------------------------------------------------------------------*/ AP4_String AP4_StringMetaDataValue::ToString() const { return m_Value; } /*---------------------------------------------------------------------- | AP4_StringMetaDataValue::ToBytes +---------------------------------------------------------------------*/ AP4_Result AP4_StringMetaDataValue::ToBytes(AP4_DataBuffer& /* bytes */) const { return AP4_ERROR_NOT_SUPPORTED; } /*---------------------------------------------------------------------- | AP4_StringMetaDataValue::ToInteger +---------------------------------------------------------------------*/ long AP4_StringMetaDataValue::ToInteger() const { return 0; } /*---------------------------------------------------------------------- | AP4_IntegerMetaDataValue::ToString +---------------------------------------------------------------------*/ AP4_String AP4_IntegerMetaDataValue::ToString() const { char value[16]; AP4_FormatString(value, sizeof(value), "%ld", m_Value); return AP4_String(value); } /*---------------------------------------------------------------------- | AP4_IntegerMetaDataValue::ToBytes +---------------------------------------------------------------------*/ AP4_Result AP4_IntegerMetaDataValue::ToBytes(AP4_DataBuffer& /* bytes */) const { return AP4_ERROR_NOT_SUPPORTED; } /*---------------------------------------------------------------------- | AP4_IntegerMetaDataValue::ToInteger +---------------------------------------------------------------------*/ long AP4_IntegerMetaDataValue::ToInteger() const { return m_Value; } /*---------------------------------------------------------------------- | AP4_BinaryMetaDataValue::ToString +---------------------------------------------------------------------*/ AP4_String AP4_BinaryMetaDataValue::ToString() const { return AP4_String(); // not supported } /*---------------------------------------------------------------------- | AP4_BinaryMetaDataValue::ToBytes +---------------------------------------------------------------------*/ AP4_Result AP4_BinaryMetaDataValue::ToBytes(AP4_DataBuffer& bytes) const { bytes.SetDataSize(m_Value.GetDataSize()); AP4_CopyMemory(bytes.UseData(), m_Value.GetData(), m_Value.GetDataSize()); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_BinaryMetaDataValue::ToInteger +---------------------------------------------------------------------*/ long AP4_BinaryMetaDataValue::ToInteger() const { return 0; // NOT SUPPORTED } /*---------------------------------------------------------------------- | AP4_AtomMetaDataValue::AP4_AtomMetaDataValue +---------------------------------------------------------------------*/ AP4_AtomMetaDataValue::AP4_AtomMetaDataValue(AP4_DataAtom* atom, AP4_UI32 parent_type) : Value(atom->GetValueType()), m_DataAtom(atom) { switch (parent_type) { case AP4_ATOM_TYPE_GNRE: m_Meaning = MEANING_ID3_GENRE; break; case AP4_ATOM_TYPE_CPIL: m_Meaning = MEANING_BOOLEAN; break; case AP4_ATOM_TYPE_PGAP: case AP4_ATOM_TYPE_PCST: m_Meaning = MEANING_BOOLEAN; break; case AP4_ATOM_TYPE_STIK: m_Meaning = MEANING_FILE_KIND; break; case AP4_ATOM_TYPE_PURL: case AP4_ATOM_TYPE_EGID: m_Meaning = MEANING_BINARY_ENCODED_CHARS; break; default: break; } } /*---------------------------------------------------------------------- | AP4_AtomMetaDataValue::ToString +---------------------------------------------------------------------*/ AP4_String AP4_AtomMetaDataValue::ToString() const { char string[256] = ""; AP4_MetaData::Value::Type value_type = m_DataAtom->GetValueType(); switch (AP4_MetaData::Value::MapTypeToCategory(value_type)) { case AP4_MetaData::Value::TYPE_CATEGORY_INTEGER: { long value; if (AP4_SUCCEEDED(m_DataAtom->LoadInteger(value))) { if (m_Meaning == MEANING_BOOLEAN) { if (value) { return "True"; } else { return "False"; } } else if (m_Meaning == MEANING_FILE_KIND) { if (value >= 0 && ((unsigned int)value) <= sizeof(Ap4StikNames)/sizeof(Ap4StikNames[0])) { AP4_FormatString(string, sizeof(string), "(%ld) %s", value, Ap4StikNames[value]); } else { return "Unknown"; } } else { AP4_FormatString(string, sizeof(string), "%ld", value); } } return AP4_String((const char*)string); break; } case AP4_MetaData::Value::TYPE_CATEGORY_STRING: { AP4_String* category_string; if (AP4_SUCCEEDED(m_DataAtom->LoadString(category_string))) { AP4_String result(*category_string); delete category_string; return result; } break; } case AP4_MetaData::Value::TYPE_CATEGORY_BINARY: { AP4_DataBuffer data; if (AP4_SUCCEEDED(m_DataAtom->LoadBytes(data))) { if (m_Meaning == MEANING_ID3_GENRE && data.GetDataSize() == 2) { unsigned int genre = (data.GetData()[0])*256+data.GetData()[1]; if (genre >= 1 && genre <= sizeof(Ap4Id3Genres)/sizeof(Ap4Id3Genres[0])) { AP4_FormatString(string, sizeof(string), "(%d) %s", genre, Ap4Id3Genres[genre-1]); return AP4_String((const char*)string); } else { return "Unknown"; } } else if (m_Meaning == MEANING_BINARY_ENCODED_CHARS) { AP4_String result; result.Assign((const char*)data.GetData(), data.GetDataSize()); return result; } else { unsigned int dump_length = data.GetDataSize(); bool truncate = false; if (dump_length > 16) { dump_length = 16; truncate = true; } char* out = string; for (unsigned int i=0; i<dump_length; i++) { AP4_FormatString(out, sizeof(string)-(out-string), "%02x ", data.GetData()[i]); out += 3; } if (truncate) { *out++='.'; *out++='.'; *out++='.'; *out++=' '; } AP4_FormatString(out, sizeof(string)-(out-string), "[%d bytes]", (int)data.GetDataSize()); } } return AP4_String(string); } default: return AP4_String(); } return AP4_String(); } /*---------------------------------------------------------------------- | AP4_AtomMetaDataValue::ToBytes +---------------------------------------------------------------------*/ AP4_Result AP4_AtomMetaDataValue::ToBytes(AP4_DataBuffer& bytes) const { return m_DataAtom->LoadBytes(bytes); } /*---------------------------------------------------------------------- | AP4_AtomMetaDataValue::ToInteger +---------------------------------------------------------------------*/ long AP4_AtomMetaDataValue::ToInteger() const { long value; if (AP4_SUCCEEDED(m_DataAtom->LoadInteger(value))) { return value; } else { return 0; } } /*---------------------------------------------------------------------- | AP4_DataAtom::AP4_DataAtom +---------------------------------------------------------------------*/ AP4_DataAtom::AP4_DataAtom(const AP4_MetaData::Value& value) : AP4_Atom(AP4_ATOM_TYPE_DATA, AP4_ATOM_HEADER_SIZE), m_DataType(DATA_TYPE_BINARY) { AP4_MemoryByteStream* memory = new AP4_MemoryByteStream(); AP4_Size payload_size = 8; m_Source = memory; switch (value.GetType()) { case AP4_MetaData::Value::TYPE_STRING_UTF_8: { m_DataType = DATA_TYPE_STRING_UTF_8; AP4_String string_value = value.ToString(); if (string_value.GetLength()) { memory->Write(string_value.GetChars(), string_value.GetLength()); } payload_size += string_value.GetLength(); break; } case AP4_MetaData::Value::TYPE_INT_08_BE: { m_DataType = DATA_TYPE_SIGNED_INT_BE; AP4_UI08 int_value = (AP4_UI08)value.ToInteger(); memory->Write(&int_value, 1); payload_size += 1; break; } case AP4_MetaData::Value::TYPE_INT_16_BE: { m_DataType = DATA_TYPE_SIGNED_INT_BE; AP4_UI16 int_value = (AP4_UI16)value.ToInteger(); memory->Write(&int_value, 2); payload_size += 2; break; } case AP4_MetaData::Value::TYPE_INT_32_BE: { m_DataType = DATA_TYPE_SIGNED_INT_BE; AP4_UI32 int_value = (AP4_UI32)value.ToInteger(); memory->Write(&int_value, 4); payload_size += 4; break; } case AP4_MetaData::Value::TYPE_JPEG: m_DataType = DATA_TYPE_JPEG; // FALLTHROUGH case AP4_MetaData::Value::TYPE_GIF: if (m_DataType == DATA_TYPE_BINARY) m_DataType = DATA_TYPE_GIF; // FALLTHROUGH case AP4_MetaData::Value::TYPE_BINARY: { AP4_DataBuffer buffer; value.ToBytes(buffer); if (buffer.GetDataSize()) { memory->Write(buffer.GetData(), buffer.GetDataSize()); } payload_size += buffer.GetDataSize(); break; } default: break; } const AP4_String& language = value.GetLanguage(); if (language == "en") { m_DataLang = LANGUAGE_ENGLISH; } else { // default m_DataLang = LANGUAGE_ENGLISH; } m_Size32 += payload_size; } /*---------------------------------------------------------------------- | AP4_DataAtom::AP4_DataAtom +---------------------------------------------------------------------*/ AP4_DataAtom::AP4_DataAtom(AP4_UI32 size, AP4_ByteStream& stream) : AP4_Atom(AP4_ATOM_TYPE_DATA, size) { if (size < AP4_ATOM_HEADER_SIZE+8) return; AP4_UI32 i; stream.ReadUI32(i); m_DataType = (DataType)i; stream.ReadUI32(i); m_DataLang = (DataLang)i; // the stream for the data is a substream of this source AP4_Position data_offset; stream.Tell(data_offset); AP4_Size data_size = size-AP4_ATOM_HEADER_SIZE-8; m_Source = new AP4_SubStream(stream, data_offset, data_size); } /*---------------------------------------------------------------------- | AP4_DataAtom::~AP4_DataAtom +---------------------------------------------------------------------*/ AP4_DataAtom::~AP4_DataAtom() { delete(m_Source); } /*---------------------------------------------------------------------- | AP4_DataAtom::GetValueType +---------------------------------------------------------------------*/ AP4_MetaData::Value::Type AP4_DataAtom::GetValueType() { switch (m_DataType) { case DATA_TYPE_BINARY: return AP4_MetaData::Value::TYPE_BINARY; case DATA_TYPE_SIGNED_INT_BE: switch (m_Size32-16) { case 1: return AP4_MetaData::Value::TYPE_INT_08_BE; case 2: return AP4_MetaData::Value::TYPE_INT_16_BE; case 4: return AP4_MetaData::Value::TYPE_INT_32_BE; default: return AP4_MetaData::Value::TYPE_BINARY; } break; case DATA_TYPE_STRING_UTF_8: return AP4_MetaData::Value::TYPE_STRING_UTF_8; case DATA_TYPE_STRING_UTF_16: return AP4_MetaData::Value::TYPE_STRING_UTF_16; case DATA_TYPE_STRING_PASCAL: return AP4_MetaData::Value::TYPE_STRING_PASCAL; case DATA_TYPE_GIF: return AP4_MetaData::Value::TYPE_GIF; case DATA_TYPE_JPEG: return AP4_MetaData::Value::TYPE_JPEG; default: return AP4_MetaData::Value::TYPE_BINARY; } // unreachable - return AP4_MetaData::Value::TYPE_BINARY; } /*---------------------------------------------------------------------- | AP4_DataAtom::WriteFields +---------------------------------------------------------------------*/ AP4_Result AP4_DataAtom::WriteFields(AP4_ByteStream& stream) { stream.WriteUI32(m_DataType); stream.WriteUI32(m_DataLang); if (m_Source) { AP4_LargeSize size = 0; m_Source->GetSize(size); m_Source->Seek(0); m_Source->CopyTo(stream, size); } return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_DataAtom::InspectFields +---------------------------------------------------------------------*/ AP4_Result AP4_DataAtom::InspectFields(AP4_AtomInspector& inspector) { inspector.AddField("type", m_DataType); inspector.AddField("lang", m_DataLang); if (m_DataType == DATA_TYPE_STRING_UTF_8) { AP4_String* str; if (AP4_SUCCEEDED(LoadString(str))) { inspector.AddField("value", str->GetChars()); delete str; } } else if (m_DataType == DATA_TYPE_SIGNED_INT_BE) { long value; if (AP4_SUCCEEDED(LoadInteger(value))) { inspector.AddField("value", value); } } return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_DataAtom::LoadString +---------------------------------------------------------------------*/ AP4_Result AP4_DataAtom::LoadString(AP4_String*& string) { if (m_Source == NULL) { string = new AP4_String(); return AP4_SUCCESS; } else { // create a string with enough capactiy for the data AP4_LargeSize size = 0; m_Source->GetSize(size); if (size > AP4_DATA_ATOM_MAX_SIZE) return AP4_ERROR_OUT_OF_RANGE; string = new AP4_String((AP4_Size)size); // read from the start of the stream m_Source->Seek(0); AP4_Result result = m_Source->Read(string->UseChars(), (AP4_Size)size); if (AP4_FAILED(result)) { delete string; string = NULL; } return result; } } /*---------------------------------------------------------------------- | AP4_DataAtom::LoadBytes +---------------------------------------------------------------------*/ AP4_Result AP4_DataAtom::LoadBytes(AP4_DataBuffer& bytes) { if (m_Source == NULL) { bytes.SetDataSize(0); return AP4_SUCCESS; } AP4_LargeSize size = 0; m_Source->GetSize(size); if (size > AP4_DATA_ATOM_MAX_SIZE) return AP4_ERROR_OUT_OF_RANGE; bytes.SetDataSize((AP4_Size)size); m_Source->Seek(0); AP4_Result result = m_Source->Read(bytes.UseData(), (AP4_Size)size); if (AP4_FAILED(result)) { bytes.SetDataSize(0); } return result; } /*---------------------------------------------------------------------- | AP4_DataAtom::LoadInteger +---------------------------------------------------------------------*/ AP4_Result AP4_DataAtom::LoadInteger(long& value) { AP4_Result result = AP4_FAILURE; value = 0; if (m_Source == NULL) return AP4_SUCCESS; AP4_LargeSize size = 0; m_Source->GetSize(size); if (size > 4) { return AP4_ERROR_OUT_OF_RANGE; } unsigned char bytes[4]; m_Source->Seek(0); m_Source->Read(bytes, (AP4_Size)size); result = AP4_SUCCESS; switch (size) { case 1: value = bytes[0]; break; case 2: value = AP4_BytesToInt16BE(bytes); break; case 4: value = AP4_BytesToInt32BE(bytes); break; default: value = 0; result = AP4_ERROR_INVALID_FORMAT; break; } return result; } /*---------------------------------------------------------------------- | AP4_MetaDataStringAtom::AP4_MetaDataStringAtom +---------------------------------------------------------------------*/ AP4_MetaDataStringAtom::AP4_MetaDataStringAtom(Type type, const char* value) : AP4_Atom(type, AP4_ATOM_HEADER_SIZE), m_Reserved(0), m_Value(value) { m_Size32 += 4+m_Value.GetLength(); } /*---------------------------------------------------------------------- | AP4_MetaDataStringAtom::AP4_MetaDataStringAtom +---------------------------------------------------------------------*/ AP4_MetaDataStringAtom::AP4_MetaDataStringAtom(Type type, AP4_UI32 size, AP4_ByteStream& stream) : AP4_Atom(type, size), m_Reserved(0), m_Value((AP4_Size)(size-AP4_ATOM_HEADER_SIZE-4)) { stream.ReadUI32(m_Reserved); stream.Read(m_Value.UseChars(), m_Value.GetLength()); } /*---------------------------------------------------------------------- | AP4_MetaDataStringAtom::WriteFields +---------------------------------------------------------------------*/ AP4_Result AP4_MetaDataStringAtom::WriteFields(AP4_ByteStream& stream) { stream.WriteUI32(m_Reserved); return stream.Write(m_Value.GetChars(), m_Value.GetLength()); } /*---------------------------------------------------------------------- | AP4_MetaDataStringAtom::InspectFields +---------------------------------------------------------------------*/ AP4_Result AP4_MetaDataStringAtom::InspectFields(AP4_AtomInspector& inspector) { inspector.AddField("value", m_Value.GetChars()); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_3GppLocalizedStringAtom::Create +---------------------------------------------------------------------*/ AP4_3GppLocalizedStringAtom* AP4_3GppLocalizedStringAtom::Create(Type type, AP4_UI32 size, AP4_ByteStream& stream) { AP4_UI08 version; AP4_UI32 flags; if (AP4_FAILED(AP4_Atom::ReadFullHeader(stream, version, flags))) return NULL; if (version != 0) return NULL; return new AP4_3GppLocalizedStringAtom(type, size, version, flags, stream); } /*---------------------------------------------------------------------- | AP4_3GppLocalizedStringAtom::AP4_3GppLocalizedStringAtom +---------------------------------------------------------------------*/ AP4_3GppLocalizedStringAtom::AP4_3GppLocalizedStringAtom(Type type, const char* language, const char* value) : AP4_Atom(type, AP4_FULL_ATOM_HEADER_SIZE+2, 0, 0), m_Value(value) { m_Language[0] = language[0]; m_Language[1] = language[1]; m_Language[2] = language[2]; m_Language[3] = language[3]; m_Size32 += m_Value.GetLength()+1; } /*---------------------------------------------------------------------- | AP4_3GppLocalizedStringAtom::AP4_3GppLocalizedStringAtom +---------------------------------------------------------------------*/ AP4_3GppLocalizedStringAtom::AP4_3GppLocalizedStringAtom(Type type, AP4_UI32 size, AP4_UI08 version, AP4_UI32 flags, AP4_ByteStream& stream) : AP4_Atom(type, size, version, flags) { // read the language code AP4_UI16 packed_language; stream.ReadUI16(packed_language); m_Language[0] = 0x60+((packed_language>>10)&0x1F); m_Language[1] = 0x60+((packed_language>> 5)&0x1F); m_Language[2] = 0x60+((packed_language )&0x1F); m_Language[3] = '\0'; // read the value (should be a NULL-terminated string, but we'll // allow for strings that are not terminated) if (size > AP4_FULL_ATOM_HEADER_SIZE+2) { AP4_UI32 value_size = size-(AP4_FULL_ATOM_HEADER_SIZE+2); char* value = new char[value_size]; stream.Read(value, value_size); m_Value.Assign(value, value_size); delete[] value; } } /*---------------------------------------------------------------------- | AP4_3GppLocalizedStringAtom::WriteFields +---------------------------------------------------------------------*/ AP4_Result AP4_3GppLocalizedStringAtom::WriteFields(AP4_ByteStream& stream) { AP4_UI16 packed_language = ((m_Language[0]-0x60)<<10) | ((m_Language[1]-0x60)<< 5) | ((m_Language[2]-0x60)); stream.WriteUI16(packed_language); AP4_Size payload_size = (AP4_UI32)GetSize()-GetHeaderSize(); if (payload_size < 2) return AP4_ERROR_INVALID_FORMAT; AP4_Size value_size = m_Value.GetLength()+1; if (value_size > payload_size-2) { value_size = payload_size-2; } stream.Write(m_Value.GetChars(), value_size); for (unsigned int i=value_size; i<payload_size-2; i++) { stream.WriteUI08(0); } return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_3GppLocalizedStringAtom::InspectFields +---------------------------------------------------------------------*/ AP4_Result AP4_3GppLocalizedStringAtom::InspectFields(AP4_AtomInspector& inspector) { inspector.AddField("language", GetLanguage()); inspector.AddField("value", m_Value.GetChars()); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_DcfStringAtom::Create +---------------------------------------------------------------------*/ AP4_DcfStringAtom* AP4_DcfStringAtom::Create(Type type, AP4_UI32 size, AP4_ByteStream& stream) { AP4_UI08 version; AP4_UI32 flags; if (AP4_FAILED(AP4_Atom::ReadFullHeader(stream, version, flags))) return NULL; if (version != 0) return NULL; return new AP4_DcfStringAtom(type, size, version, flags, stream); } /*---------------------------------------------------------------------- | AP4_DcfStringAtom::AP4_DcfStringAtom +---------------------------------------------------------------------*/ AP4_DcfStringAtom::AP4_DcfStringAtom(Type type, const char* value) : AP4_Atom(type, AP4_FULL_ATOM_HEADER_SIZE, 0, 0), m_Value(value) { m_Size32 += m_Value.GetLength(); } /*---------------------------------------------------------------------- | AP4_DcfStringAtom::AP4_DcfStringAtom +---------------------------------------------------------------------*/ AP4_DcfStringAtom::AP4_DcfStringAtom(Type type, AP4_UI32 size, AP4_UI08 version, AP4_UI32 flags, AP4_ByteStream& stream) : AP4_Atom(type, size, version, flags) { if (size > AP4_FULL_ATOM_HEADER_SIZE) { AP4_UI32 value_size = size-(AP4_FULL_ATOM_HEADER_SIZE); char* value = new char[value_size]; stream.Read(value, value_size); m_Value.Assign(value, value_size); delete[] value; } } /*---------------------------------------------------------------------- | AP4_DcfStringAtom::WriteFields +---------------------------------------------------------------------*/ AP4_Result AP4_DcfStringAtom::WriteFields(AP4_ByteStream& stream) { if (m_Value.GetLength()) stream.Write(m_Value.GetChars(), m_Value.GetLength()); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_DcfStringAtom::InspectFields +---------------------------------------------------------------------*/ AP4_Result AP4_DcfStringAtom::InspectFields(AP4_AtomInspector& inspector) { inspector.AddField("value", m_Value.GetChars()); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_DcfdAtom::Create +---------------------------------------------------------------------*/ AP4_DcfdAtom* AP4_DcfdAtom::Create(AP4_UI32 size, AP4_ByteStream& stream) { AP4_UI08 version; AP4_UI32 flags; if (AP4_FAILED(AP4_Atom::ReadFullHeader(stream, version, flags))) return NULL; if (version != 0) return NULL; if (size != AP4_FULL_ATOM_HEADER_SIZE+4) return NULL; return new AP4_DcfdAtom(version, flags, stream); } /*---------------------------------------------------------------------- | AP4_DcfdAtom::AP4_DcfdAtom +---------------------------------------------------------------------*/ AP4_DcfdAtom::AP4_DcfdAtom(AP4_UI08 version, AP4_UI32 flags, AP4_ByteStream& stream) : AP4_Atom(AP4_ATOM_TYPE_DCFD, AP4_FULL_ATOM_HEADER_SIZE+4, version, flags), m_Duration(0) { stream.ReadUI32(m_Duration); } /*---------------------------------------------------------------------- | AP4_DcfdAtom::AP4_DcfdAtom +---------------------------------------------------------------------*/ AP4_DcfdAtom::AP4_DcfdAtom(AP4_UI32 duration) : AP4_Atom(AP4_ATOM_TYPE_DCFD, AP4_FULL_ATOM_HEADER_SIZE+4, 0, 0), m_Duration(duration) { } /*---------------------------------------------------------------------- | AP4_DcfdAtom::WriteFields +---------------------------------------------------------------------*/ AP4_Result AP4_DcfdAtom::WriteFields(AP4_ByteStream& stream) { stream.WriteUI32(m_Duration); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_DcfdAtom::InspectFields +---------------------------------------------------------------------*/ AP4_Result AP4_DcfdAtom::InspectFields(AP4_AtomInspector& inspector) { inspector.AddField("duration", m_Duration); return AP4_SUCCESS; }
/***************************************************************** | | AP4 - MetaData | | Copyright 2002-2008 Axiomatic Systems, LLC | | | This file is part of Bento4/AP4 (MP4 Atom Processing Library). | | Unless you have obtained Bento4 under a difference license, | this version of Bento4 is Bento4|GPL. | Bento4|GPL is free software; you can redistribute it and/or modify | it under the terms of the GNU General Public License as published by | the Free Software Foundation; either version 2, or (at your option) | any later version. | | Bento4|GPL is distributed in the hope that it will be useful, | but WITHOUT ANY WARRANTY; without even the implied warranty of | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | GNU General Public License for more details. | | You should have received a copy of the GNU General Public License | along with Bento4|GPL; see the file COPYING. If not, write to the | Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA | 02111-1307, USA. | ****************************************************************/ /*---------------------------------------------------------------------- | includes +---------------------------------------------------------------------*/ #include "Ap4File.h" #include "Ap4Movie.h" #include "Ap4MetaData.h" #include "Ap4ContainerAtom.h" #include "Ap4MoovAtom.h" #include "Ap4HdlrAtom.h" #include "Ap4DataBuffer.h" #include "Ap4Utils.h" #include "Ap4String.h" /*---------------------------------------------------------------------- | dynamic cast support +---------------------------------------------------------------------*/ AP4_DEFINE_DYNAMIC_CAST_ANCHOR(AP4_3GppLocalizedStringAtom) AP4_DEFINE_DYNAMIC_CAST_ANCHOR(AP4_DcfdAtom) AP4_DEFINE_DYNAMIC_CAST_ANCHOR(AP4_DcfStringAtom) AP4_DEFINE_DYNAMIC_CAST_ANCHOR(AP4_DataAtom) AP4_DEFINE_DYNAMIC_CAST_ANCHOR(AP4_MetaDataStringAtom) /*---------------------------------------------------------------------- | metadata keys +---------------------------------------------------------------------*/ static const AP4_MetaData::KeyInfo AP4_MetaData_KeyInfos [] = { {"Name", "Name", AP4_ATOM_TYPE_cNAM, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Artist", "Artist", AP4_ATOM_TYPE_cART, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"AlbumArtist", "Album Artist", AP4_ATOM_TYPE_aART, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Composer", "Composer", AP4_ATOM_TYPE_cCOM, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Writer", "Writer", AP4_ATOM_TYPE_cWRT, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Album", "Album", AP4_ATOM_TYPE_cALB, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"GenreCode", "Genre", AP4_ATOM_TYPE_GNRE, AP4_MetaData::Value::TYPE_BINARY}, {"GenreName", "Genre", AP4_ATOM_TYPE_cGEN, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Grouping", "Grouping", AP4_ATOM_TYPE_cGRP, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Date", "Date", AP4_ATOM_TYPE_cDAY, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Tool", "Encoding Tool", AP4_ATOM_TYPE_cTOO, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Comment", "Comment", AP4_ATOM_TYPE_cCMT, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Lyrics", "Lyrics", AP4_ATOM_TYPE_cLYR, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Copyright", "Copyright", AP4_ATOM_TYPE_CPRT, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Track", "Track Number", AP4_ATOM_TYPE_TRKN, AP4_MetaData::Value::TYPE_BINARY}, {"Disc", "Disc Number", AP4_ATOM_TYPE_DISK, AP4_MetaData::Value::TYPE_BINARY}, {"Cover", "Cover Art", AP4_ATOM_TYPE_COVR, AP4_MetaData::Value::TYPE_BINARY}, {"Description", "Description", AP4_ATOM_TYPE_DESC, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Rating", "Rating", AP4_ATOM_TYPE_RTNG, AP4_MetaData::Value::TYPE_INT_08_BE}, {"Tempo", "Tempo", AP4_ATOM_TYPE_TMPO, AP4_MetaData::Value::TYPE_INT_16_BE}, {"Compilation", "Compilation", AP4_ATOM_TYPE_CPIL, AP4_MetaData::Value::TYPE_INT_08_BE}, {"IsGapless", "Is Gapless", AP4_ATOM_TYPE_PGAP, AP4_MetaData::Value::TYPE_INT_08_BE}, {"Title", "Title", AP4_ATOM_TYPE_TITL, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Description", "Description", AP4_ATOM_TYPE_DSCP, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"StoreFrontID", "Store Front ID", AP4_ATOM_TYPE_sfID, AP4_MetaData::Value::TYPE_INT_32_BE}, {"FileKind", "File Kind", AP4_ATOM_TYPE_STIK, AP4_MetaData::Value::TYPE_INT_08_BE}, {"ShowName", "Show Name", AP4_ATOM_TYPE_TVSH, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"ShowSeason", "Show Season Number", AP4_ATOM_TYPE_TVSN, AP4_MetaData::Value::TYPE_INT_32_BE}, {"ShowEpisodeNumber", "Show Episode Number", AP4_ATOM_TYPE_TVES, AP4_MetaData::Value::TYPE_INT_32_BE}, {"ShowEpisodeName", "Show Episode Name", AP4_ATOM_TYPE_TVEN, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"TVNetworkName", "TV Network Name", AP4_ATOM_TYPE_TVNN, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"IsPodcast", "Is a Podcast", AP4_ATOM_TYPE_PCST, AP4_MetaData::Value::TYPE_INT_08_BE}, {"PodcastUrl", "Podcast URL", AP4_ATOM_TYPE_PURL, AP4_MetaData::Value::TYPE_BINARY}, {"PodcastGuid", "Podcast GUID", AP4_ATOM_TYPE_EGID, AP4_MetaData::Value::TYPE_BINARY}, {"PodcastCategory", "Podcast Category", AP4_ATOM_TYPE_CATG, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Keywords", "Keywords", AP4_ATOM_TYPE_KEYW, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"PurchaseDate", "Purchase Date", AP4_ATOM_TYPE_PURD, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"IconUri", "Icon URI", AP4_ATOM_TYPE_ICNU, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"InfoUrl", "Info URL", AP4_ATOM_TYPE_INFU, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"CoverUri", "Cover Art URI", AP4_ATOM_TYPE_CVRU, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"LyricsUri", "Lyrics URI", AP4_ATOM_TYPE_LRCU, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Duration", "Duration", AP4_ATOM_TYPE_DCFD, AP4_MetaData::Value::TYPE_INT_32_BE}, {"Performer", "Performer", AP4_ATOM_TYPE_PERF, AP4_MetaData::Value::TYPE_STRING_UTF_8}, {"Author", "Author", AP4_ATOM_TYPE_AUTH, AP4_MetaData::Value::TYPE_STRING_UTF_8}, }; AP4_Array<AP4_MetaData::KeyInfo> AP4_MetaData::KeyInfos( AP4_MetaData_KeyInfos, sizeof(AP4_MetaData_KeyInfos)/sizeof(KeyInfo)); AP4_Result AP4_MetaData::Initialized() { return AP4_MetaData::KeyInfos.ItemCount() != 0; } AP4_Result AP4_MetaData::Initialize() { unsigned int item_count = sizeof(AP4_MetaData_KeyInfos)/sizeof(KeyInfo); KeyInfos.SetItemCount(item_count); for (unsigned int i=0; i<item_count; i++) { KeyInfos[i] = AP4_MetaData_KeyInfos[i]; } return AP4_SUCCESS; } AP4_Result AP4_MetaData::UnInitialize() { return AP4_MetaData::KeyInfos.Clear(); } /*---------------------------------------------------------------------- | genre IDs +---------------------------------------------------------------------*/ static const char* const Ap4Id3Genres[] = { "Blues", "Classic Rock", "Country", "Dance", "Disco", "Funk", "Grunge", "Hip-Hop", "Jazz", "Metal", "New Age", "Oldies", "Other", "Pop", "R&B", "Rap", "Reggae", "Rock", "Techno", "Industrial", "Alternative", "Ska", "Death Metal", "Pranks", "Soundtrack", "Euro-Techno", "Ambient", "Trip-Hop", "Vocal", "Jazz+Funk", "Fusion", "Trance", "Classical", "Instrumental", "Acid", "House", "Game", "Sound Clip", "Gospel", "Noise", "AlternRock", "Bass", "Soul", "Punk", "Space", "Meditative", "Instrumental Pop", "Instrumental Rock", "Ethnic", "Gothic", "Darkwave", "Techno-Industrial", "Electronic", "Pop-Folk", "Eurodance", "Dream", "Southern Rock", "Comedy", "Cult", "Gangsta", "Top 40", "Christian Rap", "Pop/Funk", "Jungle", "Native American", "Cabaret", "New Wave", "Psychadelic", "Rave", "Showtunes", "Trailer", "Lo-Fi", "Tribal", "Acid Punk", "Acid Jazz", "Polka", "Retro", "Musical", "Rock & Roll", "Hard Rock", "Folk", "Folk-Rock", "National Folk", "Swing", "Fast Fusion", "Bebob", "Latin", "Revival", "Celtic", "Bluegrass", "Avantgarde", "Gothic Rock", "Progressive Rock", "Psychedelic Rock", "Symphonic Rock", "Slow Rock", "Big Band", "Chorus", "Easy Listening", "Acoustic", "Humour", "Speech", "Chanson", "Opera", "Chamber Music", "Sonata", "Symphony", "Booty Bass", "Primus", "Porn Groove", "Satire", "Slow Jam", "Club", "Tango", "Samba", "Folklore", "Ballad", "Power Ballad", "Rhythmic Soul", "Freestyle", "Duet", "Punk Rock", "Drum Solo", "Acapella", "Euro-House", "Dance Hall" }; static const char* Ap4StikNames[] = { "Movie", // 0 "Normal", // 1 "Audiobook", // 2 "?", // 3 "?", // 4 "Whacked Bookmark", // 5 "Music Video", // 6 "?", // 7 "?", // 8 "Short Film", // 9 "TV Show", // 10 "Booklet", // 11 "?", // 12 "?", // 13 "Ring Tone" // 14 }; /* sfID Store Front country Australia => 143460, Austria => 143445, Belgium => 143446, Canada => 143455, Denmark => 143458, Finland => 143447, France => 143442, Germany => 143443, Greece => 143448, Ireland => 143449, Italy => 143450, Japan => 143462, Luxembourg => 143451, Netherlands => 143452, Norway => 143457, Portugal => 143453, Spain => 143454, Sweden => 143456, Switzerland => 143459, UK => 143444, USA => 143441, */ /*---------------------------------------------------------------------- | constants +---------------------------------------------------------------------*/ const AP4_Size AP4_DATA_ATOM_MAX_SIZE = 0x40000000; /*---------------------------------------------------------------------- | 3GPP localized string atoms +---------------------------------------------------------------------*/ const AP4_Atom::Type AP4_MetaDataAtomTypeHandler::_3gppLocalizedStringTypes[] = { AP4_ATOM_TYPE_TITL, AP4_ATOM_TYPE_DSCP, AP4_ATOM_TYPE_CPRT, AP4_ATOM_TYPE_PERF, AP4_ATOM_TYPE_AUTH, AP4_ATOM_TYPE_GNRE }; const AP4_MetaDataAtomTypeHandler::TypeList AP4_MetaDataAtomTypeHandler::_3gppLocalizedStringTypeList = { _3gppLocalizedStringTypes, sizeof(_3gppLocalizedStringTypes)/sizeof(_3gppLocalizedStringTypes[0]) }; /*---------------------------------------------------------------------- | other 3GPP atoms +---------------------------------------------------------------------*/ const AP4_Atom::Type AP4_MetaDataAtomTypeHandler::_3gppOtherTypes[] = { AP4_ATOM_TYPE_RTNG, AP4_ATOM_TYPE_CLSF, AP4_ATOM_TYPE_KYWD, AP4_ATOM_TYPE_LOCI, AP4_ATOM_TYPE_ALBM, AP4_ATOM_TYPE_YRRC, }; const AP4_MetaDataAtomTypeHandler::TypeList AP4_MetaDataAtomTypeHandler::_3gppOtherTypeList = { _3gppOtherTypes, sizeof(_3gppOtherTypes)/sizeof(_3gppOtherTypes[0]) }; /*---------------------------------------------------------------------- | DCF string atoms +---------------------------------------------------------------------*/ const AP4_Atom::Type AP4_MetaDataAtomTypeHandler::DcfStringTypes[] = { AP4_ATOM_TYPE_ICNU, AP4_ATOM_TYPE_INFU, AP4_ATOM_TYPE_CVRU, AP4_ATOM_TYPE_LRCU }; const AP4_MetaDataAtomTypeHandler::TypeList AP4_MetaDataAtomTypeHandler::DcfStringTypeList = { DcfStringTypes, sizeof(DcfStringTypes)/sizeof(DcfStringTypes[0]) }; /*---------------------------------------------------------------------- | atom type lists +---------------------------------------------------------------------*/ const AP4_Atom::Type AP4_MetaDataAtomTypeHandler::IlstTypes[] = { AP4_ATOM_TYPE_dddd, AP4_ATOM_TYPE_cNAM, AP4_ATOM_TYPE_cART, AP4_ATOM_TYPE_cCOM, AP4_ATOM_TYPE_cWRT, AP4_ATOM_TYPE_cALB, AP4_ATOM_TYPE_cGEN, AP4_ATOM_TYPE_cGRP, AP4_ATOM_TYPE_cDAY, AP4_ATOM_TYPE_cTOO, AP4_ATOM_TYPE_cCMT, AP4_ATOM_TYPE_CPRT, AP4_ATOM_TYPE_TRKN, AP4_ATOM_TYPE_DISK, AP4_ATOM_TYPE_COVR, AP4_ATOM_TYPE_DESC, AP4_ATOM_TYPE_GNRE, AP4_ATOM_TYPE_CPIL, AP4_ATOM_TYPE_TMPO, AP4_ATOM_TYPE_RTNG, AP4_ATOM_TYPE_apID, AP4_ATOM_TYPE_cnID, AP4_ATOM_TYPE_cmID, AP4_ATOM_TYPE_atID, AP4_ATOM_TYPE_plID, AP4_ATOM_TYPE_geID, AP4_ATOM_TYPE_sfID, AP4_ATOM_TYPE_akID, AP4_ATOM_TYPE_aART, AP4_ATOM_TYPE_TVNN, AP4_ATOM_TYPE_TVSH, AP4_ATOM_TYPE_TVEN, AP4_ATOM_TYPE_TVSN, AP4_ATOM_TYPE_TVES, AP4_ATOM_TYPE_STIK, AP4_ATOM_TYPE_PGAP, AP4_ATOM_TYPE_PCST, AP4_ATOM_TYPE_PURD, AP4_ATOM_TYPE_PURL, AP4_ATOM_TYPE_EGID, AP4_ATOM_TYPE_SONM, AP4_ATOM_TYPE_SOAL, AP4_ATOM_TYPE_SOAR, AP4_ATOM_TYPE_SOAA, AP4_ATOM_TYPE_SOCO, AP4_ATOM_TYPE_SOSN }; const AP4_MetaDataAtomTypeHandler::TypeList AP4_MetaDataAtomTypeHandler::IlstTypeList = { IlstTypes, sizeof(IlstTypes)/sizeof(IlstTypes[0]) }; /*---------------------------------------------------------------------- | AP4_MetaDataAtomTypeHandler::CreateAtom +---------------------------------------------------------------------*/ AP4_Result AP4_MetaDataAtomTypeHandler::CreateAtom(AP4_Atom::Type type, AP4_UI32 size, AP4_ByteStream& stream, AP4_Atom::Type context, AP4_Atom*& atom) { atom = NULL; if (context == AP4_ATOM_TYPE_ILST) { if (IsTypeInList(type, IlstTypeList)) { m_AtomFactory->PushContext(type); atom = AP4_ContainerAtom::Create(type, size, false, false, stream, *m_AtomFactory); m_AtomFactory->PopContext(); } } else if (type == AP4_ATOM_TYPE_DATA) { if (IsTypeInList(context, IlstTypeList)) { atom = new AP4_DataAtom(size, stream); } } else if (context == AP4_ATOM_TYPE_dddd) { if (type == AP4_ATOM_TYPE_MEAN || type == AP4_ATOM_TYPE_NAME) { atom = new AP4_MetaDataStringAtom(type, size, stream); } } else if (context == AP4_ATOM_TYPE_UDTA) { if (IsTypeInList(type, _3gppLocalizedStringTypeList)) { atom = AP4_3GppLocalizedStringAtom::Create(type, size, stream); } else if (IsTypeInList(type, DcfStringTypeList)) { atom = AP4_DcfStringAtom::Create(type, size, stream); } else if (type == AP4_ATOM_TYPE_DCFD) { atom = AP4_DcfdAtom::Create(size, stream); } } return atom?AP4_SUCCESS:AP4_FAILURE; } /*---------------------------------------------------------------------- | AP4_MetaDataAtomTypeHandler::IsTypeInList +---------------------------------------------------------------------*/ bool AP4_MetaDataAtomTypeHandler::IsTypeInList(AP4_Atom::Type type, const AP4_MetaDataAtomTypeHandler::TypeList& list) { for (unsigned int i=0; i<list.m_Size; i++) { if (type == list.m_Types[i]) return true; } return false; } /*---------------------------------------------------------------------- | AP4_MetaData::AP4_MetaData +---------------------------------------------------------------------*/ AP4_MetaData::AP4_MetaData(AP4_File* file) { // get the file's movie AP4_Movie* movie = file->GetMovie(); // handle the movie's metadata if there is a movie in the file if (movie) { AP4_MoovAtom* moov = movie->GetMoovAtom(); if (moov == NULL) return; ParseMoov(moov); AP4_Atom* udta = moov->GetChild(AP4_ATOM_TYPE_UDTA); if (udta) { AP4_ContainerAtom* udta_container = AP4_DYNAMIC_CAST(AP4_ContainerAtom, udta); if (udta_container) { ParseUdta(udta_container, "3gpp"); } } } else { // if we don't have a movie, try to show metadata from a udta atom AP4_List<AP4_Atom>& top_level_atoms = file->GetTopLevelAtoms(); AP4_List<AP4_Atom>::Item* atom_item = top_level_atoms.FirstItem(); while (atom_item) { AP4_ContainerAtom* container = AP4_DYNAMIC_CAST(AP4_ContainerAtom, atom_item->GetData()); if (container) { // look for a udta in a DCF layout AP4_Atom* udta = container->FindChild("odhe/udta"); if (udta) { AP4_ContainerAtom* udta_container = AP4_DYNAMIC_CAST(AP4_ContainerAtom, udta); if (udta_container) { ParseUdta(udta_container, "dcf"); } } } atom_item = atom_item->GetNext(); } } } /*---------------------------------------------------------------------- | AP4_MetaData::ParseMoov +---------------------------------------------------------------------*/ AP4_Result AP4_MetaData::ParseMoov(AP4_MoovAtom* moov) { // look for a 'meta' atom with 'hdlr' type 'mdir' AP4_HdlrAtom* hdlr = AP4_DYNAMIC_CAST(AP4_HdlrAtom, moov->FindChild("udta/meta/hdlr")); if (hdlr == NULL || hdlr->GetHandlerType() != AP4_HANDLER_TYPE_MDIR) return AP4_ERROR_NO_SUCH_ITEM; // get the list of entries AP4_ContainerAtom* ilst = AP4_DYNAMIC_CAST(AP4_ContainerAtom, moov->FindChild("udta/meta/ilst")); if (ilst == NULL) return AP4_ERROR_NO_SUCH_ITEM; AP4_List<AP4_Atom>::Item* ilst_item = ilst->GetChildren().FirstItem(); while (ilst_item) { AP4_ContainerAtom* entry_atom = AP4_DYNAMIC_CAST(AP4_ContainerAtom, ilst_item->GetData()); if (entry_atom) { AddIlstEntries(entry_atom, "meta"); } ilst_item = ilst_item->GetNext(); } return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_MetaData::ParseUdta +---------------------------------------------------------------------*/ AP4_Result AP4_MetaData::ParseUdta(AP4_ContainerAtom* udta, const char* namespc) { // check that the atom is indeed a 'udta' atom if (udta->GetType() != AP4_ATOM_TYPE_UDTA) { return AP4_ERROR_INVALID_PARAMETERS; } AP4_List<AP4_Atom>::Item* udta_item = udta->GetChildren().FirstItem(); for (; udta_item; udta_item = udta_item->GetNext()) { AP4_3GppLocalizedStringAtom* _3gpp_atom = AP4_DYNAMIC_CAST(AP4_3GppLocalizedStringAtom, udta_item->GetData()); if (_3gpp_atom) { Add3GppEntry(_3gpp_atom, namespc); continue; } AP4_DcfStringAtom* dcfs_atom = AP4_DYNAMIC_CAST(AP4_DcfStringAtom, udta_item->GetData()); if (dcfs_atom) { AddDcfStringEntry(dcfs_atom, namespc); continue; } AP4_DcfdAtom* dcfd_atom = AP4_DYNAMIC_CAST(AP4_DcfdAtom, udta_item->GetData()); if (dcfd_atom) { AddDcfdEntry(dcfd_atom, namespc); } } return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_MetaData::~AP4_MetaData +---------------------------------------------------------------------*/ AP4_MetaData::~AP4_MetaData() { m_Entries.DeleteReferences(); } /*---------------------------------------------------------------------- | AP4_MetaData::ResolveKeyName +---------------------------------------------------------------------*/ AP4_Result AP4_MetaData::ResolveKeyName(AP4_Atom::Type atom_type, AP4_String& value) { const char* key_name = NULL; char four_cc[5]; // look for a match in the key infos for (unsigned int i=0; i<sizeof(AP4_MetaData_KeyInfos)/sizeof(AP4_MetaData_KeyInfos[0]); i++) { if (AP4_MetaData_KeyInfos[i].four_cc == atom_type) { key_name = AP4_MetaData_KeyInfos[i].name; break; } } if (key_name == NULL) { // this key was not found in the key infos, create a name for it AP4_FormatFourChars(four_cc, (AP4_UI32)atom_type); key_name = four_cc; } value = key_name; return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_MetaData::AddIlstEntries +---------------------------------------------------------------------*/ AP4_Result AP4_MetaData::AddIlstEntries(AP4_ContainerAtom* atom, const char* namespc) { AP4_MetaData::Value* value = NULL; if (atom->GetType() == AP4_ATOM_TYPE_dddd) { // look for the namespace AP4_MetaDataStringAtom* mean = static_cast<AP4_MetaDataStringAtom*>(atom->GetChild(AP4_ATOM_TYPE_MEAN)); if (mean == NULL) return AP4_ERROR_INVALID_FORMAT; // look for the name AP4_MetaDataStringAtom* name = static_cast<AP4_MetaDataStringAtom*>(atom->GetChild(AP4_ATOM_TYPE_NAME)); if (name == NULL) return AP4_ERROR_INVALID_FORMAT; // get the value AP4_DataAtom* data_atom = static_cast<AP4_DataAtom*>(atom->GetChild(AP4_ATOM_TYPE_DATA)); if (data_atom == NULL) return AP4_ERROR_INVALID_FORMAT; value = new AP4_AtomMetaDataValue(data_atom, atom->GetType()); return m_Entries.Add(new Entry(name->GetValue().GetChars(), mean->GetValue().GetChars(), value)); } else { const char* key_name = NULL; char four_cc[5]; // convert the atom type to a name AP4_FormatFourChars(four_cc, (AP4_UI32)atom->GetType()); key_name = four_cc; // add one entry for each data atom AP4_List<AP4_Atom>::Item* data_item = atom->GetChildren().FirstItem(); while (data_item) { AP4_Atom* item_atom = data_item->GetData(); if (item_atom->GetType() == AP4_ATOM_TYPE_DATA) { AP4_DataAtom* data_atom = static_cast<AP4_DataAtom*>(item_atom); value = new AP4_AtomMetaDataValue(data_atom, atom->GetType()); m_Entries.Add(new Entry(key_name, namespc, value)); } data_item = data_item->GetNext(); } return AP4_SUCCESS; } } /*---------------------------------------------------------------------- | AP4_MetaData::Add3GppEntry +---------------------------------------------------------------------*/ AP4_Result AP4_MetaData::Add3GppEntry(AP4_3GppLocalizedStringAtom* atom, const char* namespc) { AP4_String key_name; ResolveKeyName(atom->GetType(), key_name); const char* language = NULL; if (atom->GetLanguage()[0]) { language = atom->GetLanguage(); } AP4_MetaData::Value* value = new AP4_StringMetaDataValue(atom->GetValue().GetChars(), language); m_Entries.Add(new Entry(key_name.GetChars(), namespc, value)); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_MetaData::AddDcfStringEntry +---------------------------------------------------------------------*/ AP4_Result AP4_MetaData::AddDcfStringEntry(AP4_DcfStringAtom* atom, const char* namespc) { AP4_String key_name; ResolveKeyName(atom->GetType(), key_name); AP4_MetaData::Value* value = new AP4_StringMetaDataValue(atom->GetValue().GetChars()); m_Entries.Add(new Entry(key_name.GetChars(), namespc, value)); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_MetaData::AddDcfdEntry +---------------------------------------------------------------------*/ AP4_Result AP4_MetaData::AddDcfdEntry(AP4_DcfdAtom* atom, const char* namespc) { AP4_String key_name; ResolveKeyName(atom->GetType(), key_name); AP4_MetaData::Value* value = new AP4_IntegerMetaDataValue(AP4_MetaData::Value::TYPE_INT_32_BE, atom->GetDuration()); m_Entries.Add(new Entry(key_name.GetChars(), namespc, value)); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_MetaData::Value::MapDataTypeToCategory +---------------------------------------------------------------------*/ AP4_MetaData::Value::TypeCategory AP4_MetaData::Value::MapTypeToCategory(Type type) { switch (type) { case AP4_MetaData::Value::TYPE_INT_08_BE: case AP4_MetaData::Value::TYPE_INT_16_BE: case AP4_MetaData::Value::TYPE_INT_32_BE: return AP4_MetaData::Value::TYPE_CATEGORY_INTEGER; case AP4_MetaData::Value::TYPE_STRING_UTF_8: case AP4_MetaData::Value::TYPE_STRING_UTF_16: case AP4_MetaData::Value::TYPE_STRING_PASCAL: return AP4_MetaData::Value::TYPE_CATEGORY_STRING; case AP4_MetaData::Value::TYPE_FLOAT_32_BE: case AP4_MetaData::Value::TYPE_FLOAT_64_BE: return AP4_MetaData::Value::TYPE_CATEGORY_FLOAT; default: return AP4_MetaData::Value::TYPE_CATEGORY_BINARY; } } /*---------------------------------------------------------------------- | AP4_MetaData::Value::GetTypeCategory +---------------------------------------------------------------------*/ AP4_MetaData::Value::TypeCategory AP4_MetaData::Value::GetTypeCategory() const { return MapTypeToCategory(m_Type); } /*---------------------------------------------------------------------- | AP4_MetaData::Entry::ToAtom +---------------------------------------------------------------------*/ AP4_Result AP4_MetaData::Entry::ToAtom(AP4_Atom*& atom) const { atom = NULL; if (m_Value == NULL) { return AP4_ERROR_INVALID_PARAMETERS; } if (m_Key.GetNamespace() == "meta") { // convert the name into an atom type if (m_Key.GetName().GetLength() != 4) { // the name is not in the right format return AP4_ERROR_INVALID_PARAMETERS; } AP4_Atom::Type atom_type = AP4_Atom::TypeFromString(m_Key.GetName().GetChars()); // create a container atom for the data AP4_ContainerAtom* container = new AP4_ContainerAtom(atom_type); // add the data atom AP4_DataAtom* data = new AP4_DataAtom(*m_Value); container->AddChild(data); atom = container; return AP4_SUCCESS; } else if (m_Key.GetNamespace() == "dcf") { // convert the name into an atom type if (m_Key.GetName().GetLength() != 4) { // the name is not in the right format return AP4_ERROR_INVALID_PARAMETERS; } AP4_Atom::Type atom_type = AP4_Atom::TypeFromString(m_Key.GetName().GetChars()); if (AP4_MetaDataAtomTypeHandler::IsTypeInList(atom_type, AP4_MetaDataAtomTypeHandler::DcfStringTypeList)) { AP4_String atom_value = m_Value->ToString(); atom = new AP4_DcfStringAtom(atom_type, atom_value.GetChars()); return AP4_SUCCESS; } else if (AP4_MetaDataAtomTypeHandler::IsTypeInList(atom_type, AP4_MetaDataAtomTypeHandler::_3gppLocalizedStringTypeList)) { AP4_String atom_value = m_Value->ToString(); const char* language = "eng"; // default if (m_Value->GetLanguage().GetLength() != 0) { language = m_Value->GetLanguage().GetChars(); } atom = new AP4_3GppLocalizedStringAtom(atom_type, language, atom_value.GetChars()); return AP4_SUCCESS; } else if (atom_type == AP4_ATOM_TYPE_DCFD) { atom = new AP4_DcfdAtom((AP4_UI32)m_Value->ToInteger()); return AP4_SUCCESS; } // not supported return AP4_ERROR_NOT_SUPPORTED; } else { // create a '----' atom AP4_ContainerAtom* container = new AP4_ContainerAtom(AP4_ATOM_TYPE_dddd); // add a 'mean' string container->AddChild(new AP4_MetaDataStringAtom(AP4_ATOM_TYPE_MEAN, m_Key.GetNamespace().GetChars())); // add a 'name' string container->AddChild(new AP4_MetaDataStringAtom(AP4_ATOM_TYPE_NAME, m_Key.GetName().GetChars())); // add the data atom AP4_DataAtom* data = new AP4_DataAtom(*m_Value); container->AddChild(data); atom = container; return AP4_SUCCESS; } // unreachable - return AP4_ERROR_NOT_SUPPORTED; } /*---------------------------------------------------------------------- | AP4_MetaData::Entry::FindInIlst +---------------------------------------------------------------------*/ AP4_ContainerAtom* AP4_MetaData::Entry::FindInIlst(AP4_ContainerAtom* ilst) const { if (m_Key.GetNamespace() == "meta") { AP4_Atom::Type atom_type = AP4_Atom::TypeFromString(m_Key.GetName().GetChars()); return AP4_DYNAMIC_CAST(AP4_ContainerAtom, ilst->GetChild(atom_type)); } else { AP4_List<AP4_Atom>::Item* ilst_item = ilst->GetChildren().FirstItem(); while (ilst_item) { AP4_ContainerAtom* entry_atom = AP4_DYNAMIC_CAST(AP4_ContainerAtom, ilst_item->GetData()); if (entry_atom) { AP4_MetaDataStringAtom* mean = static_cast<AP4_MetaDataStringAtom*>(entry_atom->GetChild(AP4_ATOM_TYPE_MEAN)); AP4_MetaDataStringAtom* name = static_cast<AP4_MetaDataStringAtom*>(entry_atom->GetChild(AP4_ATOM_TYPE_NAME)); if (mean && name && mean->GetValue() == m_Key.GetNamespace() && name->GetValue() == m_Key.GetName()) { return entry_atom; } } ilst_item = ilst_item->GetNext(); } } // not found return NULL; } /*---------------------------------------------------------------------- | AP4_MetaData::Entry::AddToFileIlst +---------------------------------------------------------------------*/ AP4_Result AP4_MetaData::Entry::AddToFileIlst(AP4_File& file, AP4_Ordinal index) { // check that we have a correct entry if (m_Value == NULL) return AP4_ERROR_INVALID_STATE; // convert the entry into an atom AP4_Atom* atom; AP4_Result result = ToAtom(atom); if (AP4_FAILED(result)) return result; AP4_ContainerAtom* entry_atom = AP4_DYNAMIC_CAST(AP4_ContainerAtom, atom); if (entry_atom == NULL) { return AP4_ERROR_INVALID_FORMAT; } // look for the 'moov' AP4_Movie* movie = file.GetMovie(); if (movie == NULL) return AP4_ERROR_INVALID_FORMAT; AP4_MoovAtom* moov = movie->GetMoovAtom(); if (moov == NULL) return AP4_ERROR_INVALID_FORMAT; // look for 'udta', and create if it does not exist AP4_ContainerAtom* udta = AP4_DYNAMIC_CAST(AP4_ContainerAtom, moov->FindChild("udta", true)); if (udta == NULL) return AP4_ERROR_INTERNAL; // look for 'meta', and create if it does not exist ('meta' is a FULL atom) AP4_ContainerAtom* meta = AP4_DYNAMIC_CAST(AP4_ContainerAtom, udta->FindChild("meta", true, true)); if (meta == NULL) return AP4_ERROR_INTERNAL; // look for a 'hdlr' atom type 'mdir' AP4_HdlrAtom* hdlr = AP4_DYNAMIC_CAST(AP4_HdlrAtom, meta->FindChild("hdlr")); if (hdlr == NULL) { hdlr = new AP4_HdlrAtom(AP4_HANDLER_TYPE_MDIR, ""); meta->AddChild(hdlr); } else { if (hdlr->GetHandlerType() != AP4_HANDLER_TYPE_MDIR) { return AP4_ERROR_INVALID_FORMAT; } } // get/create the list of entries AP4_ContainerAtom* ilst = AP4_DYNAMIC_CAST(AP4_ContainerAtom, meta->FindChild("ilst", true)); if (ilst == NULL) return AP4_ERROR_INTERNAL; // look if there is already a container for this entry AP4_ContainerAtom* existing = FindInIlst(ilst); if (existing == NULL) { // just add the one we have ilst->AddChild(entry_atom); } else { // add the entry's data to the existing entry AP4_DataAtom* data_atom = AP4_DYNAMIC_CAST(AP4_DataAtom, entry_atom->GetChild(AP4_ATOM_TYPE_DATA)); if (data_atom == NULL) return AP4_ERROR_INTERNAL; entry_atom->RemoveChild(data_atom); existing->AddChild(data_atom, index); delete entry_atom; } return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_MetaData::Entry::AddToFileDcf +---------------------------------------------------------------------*/ AP4_Result AP4_MetaData::Entry::AddToFileDcf(AP4_File& file, AP4_Ordinal index) { // check that we have a correct entry if (m_Value == NULL) return AP4_ERROR_INVALID_STATE; // look for 'odrm/odhe' AP4_ContainerAtom* odhe = AP4_DYNAMIC_CAST(AP4_ContainerAtom, file.FindChild("odrm/odhe")); if (odhe == NULL) return AP4_ERROR_NO_SUCH_ITEM; // get/create the list of entries AP4_ContainerAtom* udta = AP4_DYNAMIC_CAST(AP4_ContainerAtom, odhe->FindChild("udta", true)); if (udta == NULL) return AP4_ERROR_INTERNAL; // convert the entry into an atom AP4_Atom* data_atom; AP4_Result result = ToAtom(data_atom); if (AP4_FAILED(result)) return result; // add the entry's data to the container return udta->AddChild(data_atom, index); } /*---------------------------------------------------------------------- | AP4_MetaData::Entry::AddToFile +---------------------------------------------------------------------*/ AP4_Result AP4_MetaData::Entry::AddToFile(AP4_File& file, AP4_Ordinal index) { // check that we have a correct entry if (m_Value == NULL) return AP4_ERROR_INVALID_STATE; // check the namespace of the key to know where to add the atom if (m_Key.GetNamespace() == "meta") { return AddToFileIlst(file, index); } else if (m_Key.GetNamespace() == "dcf") { return AddToFileDcf(file, index); } else { // custom namespace return AddToFileIlst(file, index); } } /*---------------------------------------------------------------------- | AP4_MetaData::Entry::RemoveFromFileIlst +---------------------------------------------------------------------*/ AP4_Result AP4_MetaData::Entry::RemoveFromFileIlst(AP4_File& file, AP4_Ordinal index) { // look for the 'moov' AP4_Movie* movie = file.GetMovie(); if (movie == NULL) return AP4_ERROR_INVALID_FORMAT; AP4_MoovAtom* moov = movie->GetMoovAtom(); if (moov == NULL) return AP4_ERROR_INVALID_FORMAT; // look for 'udta/meta/ilst' AP4_ContainerAtom* ilst = AP4_DYNAMIC_CAST(AP4_ContainerAtom, moov->FindChild("udta/meta/ilst")); if (ilst == NULL) return AP4_ERROR_NO_SUCH_ITEM; // look if there is already a container for this entry AP4_ContainerAtom* existing = FindInIlst(ilst); if (existing == NULL) return AP4_ERROR_NO_SUCH_ITEM; // remove the data atom in the entry AP4_Result result = existing->DeleteChild(AP4_ATOM_TYPE_DATA, index); if (AP4_FAILED(result)) return result; // cleanup if (existing->GetType() == AP4_ATOM_TYPE_dddd) { // custom entry: if there are no more 'data' children, remove the entry if (existing->GetChild(AP4_ATOM_TYPE_DATA) == NULL) { ilst->RemoveChild(existing); delete existing; } } else { // normal entry: if the entry is empty, remove it if (existing->GetChildren().ItemCount() == 0) { ilst->RemoveChild(existing); delete existing; } } return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_MetaData::Entry::RemoveFromFileDcf +---------------------------------------------------------------------*/ AP4_Result AP4_MetaData::Entry::RemoveFromFileDcf(AP4_File& file, AP4_Ordinal index) { // look for 'odrm/odhe/udta' AP4_ContainerAtom* udta = AP4_DYNAMIC_CAST(AP4_ContainerAtom, file.FindChild("odrm/odhe/udta")); if (udta == NULL) return AP4_ERROR_NO_SUCH_ITEM; // remove the data atom in the entry AP4_UI32 type = AP4_BytesToUInt32BE((const unsigned char*)m_Key.GetName().GetChars()); AP4_Result result = udta->DeleteChild(type, index); if (AP4_FAILED(result)) return result; return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_MetaData::Entry::RemoveFromFile +---------------------------------------------------------------------*/ AP4_Result AP4_MetaData::Entry::RemoveFromFile(AP4_File& file, AP4_Ordinal index) { // check the namespace of the key to know where to add the atom if (m_Key.GetNamespace() == "meta") { return RemoveFromFileIlst(file, index); } else if (m_Key.GetNamespace() == "dcf") { return RemoveFromFileDcf(file, index); } else { // custom namespace return RemoveFromFileIlst(file, index); } } /*---------------------------------------------------------------------- | AP4_StringMetaDataValue::ToString +---------------------------------------------------------------------*/ AP4_String AP4_StringMetaDataValue::ToString() const { return m_Value; } /*---------------------------------------------------------------------- | AP4_StringMetaDataValue::ToBytes +---------------------------------------------------------------------*/ AP4_Result AP4_StringMetaDataValue::ToBytes(AP4_DataBuffer& /* bytes */) const { return AP4_ERROR_NOT_SUPPORTED; } /*---------------------------------------------------------------------- | AP4_StringMetaDataValue::ToInteger +---------------------------------------------------------------------*/ long AP4_StringMetaDataValue::ToInteger() const { return 0; } /*---------------------------------------------------------------------- | AP4_IntegerMetaDataValue::ToString +---------------------------------------------------------------------*/ AP4_String AP4_IntegerMetaDataValue::ToString() const { char value[16]; AP4_FormatString(value, sizeof(value), "%ld", m_Value); return AP4_String(value); } /*---------------------------------------------------------------------- | AP4_IntegerMetaDataValue::ToBytes +---------------------------------------------------------------------*/ AP4_Result AP4_IntegerMetaDataValue::ToBytes(AP4_DataBuffer& /* bytes */) const { return AP4_ERROR_NOT_SUPPORTED; } /*---------------------------------------------------------------------- | AP4_IntegerMetaDataValue::ToInteger +---------------------------------------------------------------------*/ long AP4_IntegerMetaDataValue::ToInteger() const { return m_Value; } /*---------------------------------------------------------------------- | AP4_BinaryMetaDataValue::ToString +---------------------------------------------------------------------*/ AP4_String AP4_BinaryMetaDataValue::ToString() const { return AP4_String(); // not supported } /*---------------------------------------------------------------------- | AP4_BinaryMetaDataValue::ToBytes +---------------------------------------------------------------------*/ AP4_Result AP4_BinaryMetaDataValue::ToBytes(AP4_DataBuffer& bytes) const { bytes.SetDataSize(m_Value.GetDataSize()); AP4_CopyMemory(bytes.UseData(), m_Value.GetData(), m_Value.GetDataSize()); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_BinaryMetaDataValue::ToInteger +---------------------------------------------------------------------*/ long AP4_BinaryMetaDataValue::ToInteger() const { return 0; // NOT SUPPORTED } /*---------------------------------------------------------------------- | AP4_AtomMetaDataValue::AP4_AtomMetaDataValue +---------------------------------------------------------------------*/ AP4_AtomMetaDataValue::AP4_AtomMetaDataValue(AP4_DataAtom* atom, AP4_UI32 parent_type) : Value(atom->GetValueType()), m_DataAtom(atom) { switch (parent_type) { case AP4_ATOM_TYPE_GNRE: m_Meaning = MEANING_ID3_GENRE; break; case AP4_ATOM_TYPE_CPIL: m_Meaning = MEANING_BOOLEAN; break; case AP4_ATOM_TYPE_PGAP: case AP4_ATOM_TYPE_PCST: m_Meaning = MEANING_BOOLEAN; break; case AP4_ATOM_TYPE_STIK: m_Meaning = MEANING_FILE_KIND; break; case AP4_ATOM_TYPE_PURL: case AP4_ATOM_TYPE_EGID: m_Meaning = MEANING_BINARY_ENCODED_CHARS; break; default: break; } } /*---------------------------------------------------------------------- | AP4_AtomMetaDataValue::ToString +---------------------------------------------------------------------*/ AP4_String AP4_AtomMetaDataValue::ToString() const { char string[256] = ""; AP4_MetaData::Value::Type value_type = m_DataAtom->GetValueType(); switch (AP4_MetaData::Value::MapTypeToCategory(value_type)) { case AP4_MetaData::Value::TYPE_CATEGORY_INTEGER: { long value; if (AP4_SUCCEEDED(m_DataAtom->LoadInteger(value))) { if (m_Meaning == MEANING_BOOLEAN) { if (value) { return "True"; } else { return "False"; } } else if (m_Meaning == MEANING_FILE_KIND) { if (value >= 0 && ((unsigned int)value) <= sizeof(Ap4StikNames)/sizeof(Ap4StikNames[0])) { AP4_FormatString(string, sizeof(string), "(%ld) %s", value, Ap4StikNames[value]); } else { return "Unknown"; } } else { AP4_FormatString(string, sizeof(string), "%ld", value); } } return AP4_String((const char*)string); break; } case AP4_MetaData::Value::TYPE_CATEGORY_STRING: { AP4_String* category_string; if (AP4_SUCCEEDED(m_DataAtom->LoadString(category_string))) { AP4_String result(*category_string); delete category_string; return result; } break; } case AP4_MetaData::Value::TYPE_CATEGORY_BINARY: { AP4_DataBuffer data; if (AP4_SUCCEEDED(m_DataAtom->LoadBytes(data))) { if (m_Meaning == MEANING_ID3_GENRE && data.GetDataSize() == 2) { unsigned int genre = (data.GetData()[0])*256+data.GetData()[1]; if (genre >= 1 && genre <= sizeof(Ap4Id3Genres)/sizeof(Ap4Id3Genres[0])) { AP4_FormatString(string, sizeof(string), "(%d) %s", genre, Ap4Id3Genres[genre-1]); return AP4_String((const char*)string); } else { return "Unknown"; } } else if (m_Meaning == MEANING_BINARY_ENCODED_CHARS) { AP4_String result; result.Assign((const char*)data.GetData(), data.GetDataSize()); return result; } else { unsigned int dump_length = data.GetDataSize(); bool truncate = false; if (dump_length > 16) { dump_length = 16; truncate = true; } char* out = string; for (unsigned int i=0; i<dump_length; i++) { AP4_FormatString(out, sizeof(string)-(out-string), "%02x ", data.GetData()[i]); out += 3; } if (truncate) { *out++='.'; *out++='.'; *out++='.'; *out++=' '; } AP4_FormatString(out, sizeof(string)-(out-string), "[%d bytes]", (int)data.GetDataSize()); } } return AP4_String(string); } default: return AP4_String(); } return AP4_String(); } /*---------------------------------------------------------------------- | AP4_AtomMetaDataValue::ToBytes +---------------------------------------------------------------------*/ AP4_Result AP4_AtomMetaDataValue::ToBytes(AP4_DataBuffer& bytes) const { return m_DataAtom->LoadBytes(bytes); } /*---------------------------------------------------------------------- | AP4_AtomMetaDataValue::ToInteger +---------------------------------------------------------------------*/ long AP4_AtomMetaDataValue::ToInteger() const { long value; if (AP4_SUCCEEDED(m_DataAtom->LoadInteger(value))) { return value; } else { return 0; } } /*---------------------------------------------------------------------- | AP4_DataAtom::AP4_DataAtom +---------------------------------------------------------------------*/ AP4_DataAtom::AP4_DataAtom(const AP4_MetaData::Value& value) : AP4_Atom(AP4_ATOM_TYPE_DATA, AP4_ATOM_HEADER_SIZE), m_DataType(DATA_TYPE_BINARY), m_Source(NULL) { AP4_MemoryByteStream* memory = new AP4_MemoryByteStream(); AP4_Size payload_size = 8; m_Source = memory; switch (value.GetType()) { case AP4_MetaData::Value::TYPE_STRING_UTF_8: { m_DataType = DATA_TYPE_STRING_UTF_8; AP4_String string_value = value.ToString(); if (string_value.GetLength()) { memory->Write(string_value.GetChars(), string_value.GetLength()); } payload_size += string_value.GetLength(); break; } case AP4_MetaData::Value::TYPE_INT_08_BE: { m_DataType = DATA_TYPE_SIGNED_INT_BE; AP4_UI08 int_value = (AP4_UI08)value.ToInteger(); memory->Write(&int_value, 1); payload_size += 1; break; } case AP4_MetaData::Value::TYPE_INT_16_BE: { m_DataType = DATA_TYPE_SIGNED_INT_BE; AP4_UI16 int_value = (AP4_UI16)value.ToInteger(); memory->Write(&int_value, 2); payload_size += 2; break; } case AP4_MetaData::Value::TYPE_INT_32_BE: { m_DataType = DATA_TYPE_SIGNED_INT_BE; AP4_UI32 int_value = (AP4_UI32)value.ToInteger(); memory->Write(&int_value, 4); payload_size += 4; break; } case AP4_MetaData::Value::TYPE_JPEG: m_DataType = DATA_TYPE_JPEG; // FALLTHROUGH case AP4_MetaData::Value::TYPE_GIF: if (m_DataType == DATA_TYPE_BINARY) m_DataType = DATA_TYPE_GIF; // FALLTHROUGH case AP4_MetaData::Value::TYPE_BINARY: { AP4_DataBuffer buffer; value.ToBytes(buffer); if (buffer.GetDataSize()) { memory->Write(buffer.GetData(), buffer.GetDataSize()); } payload_size += buffer.GetDataSize(); break; } default: break; } const AP4_String& language = value.GetLanguage(); if (language == "en") { m_DataLang = LANGUAGE_ENGLISH; } else { // default m_DataLang = LANGUAGE_ENGLISH; } m_Size32 += payload_size; } /*---------------------------------------------------------------------- | AP4_DataAtom::AP4_DataAtom +---------------------------------------------------------------------*/ AP4_DataAtom::AP4_DataAtom(AP4_UI32 size, AP4_ByteStream& stream) : AP4_Atom(AP4_ATOM_TYPE_DATA, size), m_Source(NULL) { if (size < AP4_ATOM_HEADER_SIZE+8) return; AP4_UI32 i; stream.ReadUI32(i); m_DataType = (DataType)i; stream.ReadUI32(i); m_DataLang = (DataLang)i; // the stream for the data is a substream of this source AP4_Position data_offset; stream.Tell(data_offset); AP4_Size data_size = size-AP4_ATOM_HEADER_SIZE-8; m_Source = new AP4_SubStream(stream, data_offset, data_size); } /*---------------------------------------------------------------------- | AP4_DataAtom::~AP4_DataAtom +---------------------------------------------------------------------*/ AP4_DataAtom::~AP4_DataAtom() { delete(m_Source); } /*---------------------------------------------------------------------- | AP4_DataAtom::GetValueType +---------------------------------------------------------------------*/ AP4_MetaData::Value::Type AP4_DataAtom::GetValueType() { switch (m_DataType) { case DATA_TYPE_BINARY: return AP4_MetaData::Value::TYPE_BINARY; case DATA_TYPE_SIGNED_INT_BE: switch (m_Size32-16) { case 1: return AP4_MetaData::Value::TYPE_INT_08_BE; case 2: return AP4_MetaData::Value::TYPE_INT_16_BE; case 4: return AP4_MetaData::Value::TYPE_INT_32_BE; default: return AP4_MetaData::Value::TYPE_BINARY; } break; case DATA_TYPE_STRING_UTF_8: return AP4_MetaData::Value::TYPE_STRING_UTF_8; case DATA_TYPE_STRING_UTF_16: return AP4_MetaData::Value::TYPE_STRING_UTF_16; case DATA_TYPE_STRING_PASCAL: return AP4_MetaData::Value::TYPE_STRING_PASCAL; case DATA_TYPE_GIF: return AP4_MetaData::Value::TYPE_GIF; case DATA_TYPE_JPEG: return AP4_MetaData::Value::TYPE_JPEG; default: return AP4_MetaData::Value::TYPE_BINARY; } // unreachable - return AP4_MetaData::Value::TYPE_BINARY; } /*---------------------------------------------------------------------- | AP4_DataAtom::WriteFields +---------------------------------------------------------------------*/ AP4_Result AP4_DataAtom::WriteFields(AP4_ByteStream& stream) { stream.WriteUI32(m_DataType); stream.WriteUI32(m_DataLang); if (m_Source) { AP4_LargeSize size = 0; m_Source->GetSize(size); m_Source->Seek(0); m_Source->CopyTo(stream, size); } return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_DataAtom::InspectFields +---------------------------------------------------------------------*/ AP4_Result AP4_DataAtom::InspectFields(AP4_AtomInspector& inspector) { inspector.AddField("type", m_DataType); inspector.AddField("lang", m_DataLang); if (m_DataType == DATA_TYPE_STRING_UTF_8) { AP4_String* str; if (AP4_SUCCEEDED(LoadString(str))) { inspector.AddField("value", str->GetChars()); delete str; } } else if (m_DataType == DATA_TYPE_SIGNED_INT_BE) { long value; if (AP4_SUCCEEDED(LoadInteger(value))) { inspector.AddField("value", value); } } return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_DataAtom::LoadString +---------------------------------------------------------------------*/ AP4_Result AP4_DataAtom::LoadString(AP4_String*& string) { if (m_Source == NULL) { string = new AP4_String(); return AP4_SUCCESS; } else { // create a string with enough capactiy for the data AP4_LargeSize size = 0; m_Source->GetSize(size); if (size > AP4_DATA_ATOM_MAX_SIZE) return AP4_ERROR_OUT_OF_RANGE; string = new AP4_String((AP4_Size)size); // read from the start of the stream m_Source->Seek(0); AP4_Result result = m_Source->Read(string->UseChars(), (AP4_Size)size); if (AP4_FAILED(result)) { delete string; string = NULL; } return result; } } /*---------------------------------------------------------------------- | AP4_DataAtom::LoadBytes +---------------------------------------------------------------------*/ AP4_Result AP4_DataAtom::LoadBytes(AP4_DataBuffer& bytes) { if (m_Source == NULL) { bytes.SetDataSize(0); return AP4_SUCCESS; } AP4_LargeSize size = 0; m_Source->GetSize(size); if (size > AP4_DATA_ATOM_MAX_SIZE) return AP4_ERROR_OUT_OF_RANGE; bytes.SetDataSize((AP4_Size)size); m_Source->Seek(0); AP4_Result result = m_Source->Read(bytes.UseData(), (AP4_Size)size); if (AP4_FAILED(result)) { bytes.SetDataSize(0); } return result; } /*---------------------------------------------------------------------- | AP4_DataAtom::LoadInteger +---------------------------------------------------------------------*/ AP4_Result AP4_DataAtom::LoadInteger(long& value) { AP4_Result result = AP4_FAILURE; value = 0; if (m_Source == NULL) return AP4_SUCCESS; AP4_LargeSize size = 0; m_Source->GetSize(size); if (size > 4) { return AP4_ERROR_OUT_OF_RANGE; } unsigned char bytes[4]; m_Source->Seek(0); m_Source->Read(bytes, (AP4_Size)size); result = AP4_SUCCESS; switch (size) { case 1: value = bytes[0]; break; case 2: value = AP4_BytesToInt16BE(bytes); break; case 4: value = AP4_BytesToInt32BE(bytes); break; default: value = 0; result = AP4_ERROR_INVALID_FORMAT; break; } return result; } /*---------------------------------------------------------------------- | AP4_MetaDataStringAtom::AP4_MetaDataStringAtom +---------------------------------------------------------------------*/ AP4_MetaDataStringAtom::AP4_MetaDataStringAtom(Type type, const char* value) : AP4_Atom(type, AP4_ATOM_HEADER_SIZE), m_Reserved(0), m_Value(value) { m_Size32 += 4+m_Value.GetLength(); } /*---------------------------------------------------------------------- | AP4_MetaDataStringAtom::AP4_MetaDataStringAtom +---------------------------------------------------------------------*/ AP4_MetaDataStringAtom::AP4_MetaDataStringAtom(Type type, AP4_UI32 size, AP4_ByteStream& stream) : AP4_Atom(type, size), m_Reserved(0), m_Value((AP4_Size)(size-AP4_ATOM_HEADER_SIZE-4)) { stream.ReadUI32(m_Reserved); stream.Read(m_Value.UseChars(), m_Value.GetLength()); } /*---------------------------------------------------------------------- | AP4_MetaDataStringAtom::WriteFields +---------------------------------------------------------------------*/ AP4_Result AP4_MetaDataStringAtom::WriteFields(AP4_ByteStream& stream) { stream.WriteUI32(m_Reserved); return stream.Write(m_Value.GetChars(), m_Value.GetLength()); } /*---------------------------------------------------------------------- | AP4_MetaDataStringAtom::InspectFields +---------------------------------------------------------------------*/ AP4_Result AP4_MetaDataStringAtom::InspectFields(AP4_AtomInspector& inspector) { inspector.AddField("value", m_Value.GetChars()); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_3GppLocalizedStringAtom::Create +---------------------------------------------------------------------*/ AP4_3GppLocalizedStringAtom* AP4_3GppLocalizedStringAtom::Create(Type type, AP4_UI32 size, AP4_ByteStream& stream) { AP4_UI08 version; AP4_UI32 flags; if (AP4_FAILED(AP4_Atom::ReadFullHeader(stream, version, flags))) return NULL; if (version != 0) return NULL; return new AP4_3GppLocalizedStringAtom(type, size, version, flags, stream); } /*---------------------------------------------------------------------- | AP4_3GppLocalizedStringAtom::AP4_3GppLocalizedStringAtom +---------------------------------------------------------------------*/ AP4_3GppLocalizedStringAtom::AP4_3GppLocalizedStringAtom(Type type, const char* language, const char* value) : AP4_Atom(type, AP4_FULL_ATOM_HEADER_SIZE+2, 0, 0), m_Value(value) { m_Language[0] = language[0]; m_Language[1] = language[1]; m_Language[2] = language[2]; m_Language[3] = language[3]; m_Size32 += m_Value.GetLength()+1; } /*---------------------------------------------------------------------- | AP4_3GppLocalizedStringAtom::AP4_3GppLocalizedStringAtom +---------------------------------------------------------------------*/ AP4_3GppLocalizedStringAtom::AP4_3GppLocalizedStringAtom(Type type, AP4_UI32 size, AP4_UI08 version, AP4_UI32 flags, AP4_ByteStream& stream) : AP4_Atom(type, size, version, flags) { // read the language code AP4_UI16 packed_language; stream.ReadUI16(packed_language); m_Language[0] = 0x60+((packed_language>>10)&0x1F); m_Language[1] = 0x60+((packed_language>> 5)&0x1F); m_Language[2] = 0x60+((packed_language )&0x1F); m_Language[3] = '\0'; // read the value (should be a NULL-terminated string, but we'll // allow for strings that are not terminated) if (size > AP4_FULL_ATOM_HEADER_SIZE+2) { AP4_UI32 value_size = size-(AP4_FULL_ATOM_HEADER_SIZE+2); char* value = new char[value_size]; stream.Read(value, value_size); m_Value.Assign(value, value_size); delete[] value; } } /*---------------------------------------------------------------------- | AP4_3GppLocalizedStringAtom::WriteFields +---------------------------------------------------------------------*/ AP4_Result AP4_3GppLocalizedStringAtom::WriteFields(AP4_ByteStream& stream) { AP4_UI16 packed_language = ((m_Language[0]-0x60)<<10) | ((m_Language[1]-0x60)<< 5) | ((m_Language[2]-0x60)); stream.WriteUI16(packed_language); AP4_Size payload_size = (AP4_UI32)GetSize()-GetHeaderSize(); if (payload_size < 2) return AP4_ERROR_INVALID_FORMAT; AP4_Size value_size = m_Value.GetLength()+1; if (value_size > payload_size-2) { value_size = payload_size-2; } stream.Write(m_Value.GetChars(), value_size); for (unsigned int i=value_size; i<payload_size-2; i++) { stream.WriteUI08(0); } return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_3GppLocalizedStringAtom::InspectFields +---------------------------------------------------------------------*/ AP4_Result AP4_3GppLocalizedStringAtom::InspectFields(AP4_AtomInspector& inspector) { inspector.AddField("language", GetLanguage()); inspector.AddField("value", m_Value.GetChars()); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_DcfStringAtom::Create +---------------------------------------------------------------------*/ AP4_DcfStringAtom* AP4_DcfStringAtom::Create(Type type, AP4_UI32 size, AP4_ByteStream& stream) { AP4_UI08 version; AP4_UI32 flags; if (AP4_FAILED(AP4_Atom::ReadFullHeader(stream, version, flags))) return NULL; if (version != 0) return NULL; return new AP4_DcfStringAtom(type, size, version, flags, stream); } /*---------------------------------------------------------------------- | AP4_DcfStringAtom::AP4_DcfStringAtom +---------------------------------------------------------------------*/ AP4_DcfStringAtom::AP4_DcfStringAtom(Type type, const char* value) : AP4_Atom(type, AP4_FULL_ATOM_HEADER_SIZE, 0, 0), m_Value(value) { m_Size32 += m_Value.GetLength(); } /*---------------------------------------------------------------------- | AP4_DcfStringAtom::AP4_DcfStringAtom +---------------------------------------------------------------------*/ AP4_DcfStringAtom::AP4_DcfStringAtom(Type type, AP4_UI32 size, AP4_UI08 version, AP4_UI32 flags, AP4_ByteStream& stream) : AP4_Atom(type, size, version, flags) { if (size > AP4_FULL_ATOM_HEADER_SIZE) { AP4_UI32 value_size = size-(AP4_FULL_ATOM_HEADER_SIZE); char* value = new char[value_size]; stream.Read(value, value_size); m_Value.Assign(value, value_size); delete[] value; } } /*---------------------------------------------------------------------- | AP4_DcfStringAtom::WriteFields +---------------------------------------------------------------------*/ AP4_Result AP4_DcfStringAtom::WriteFields(AP4_ByteStream& stream) { if (m_Value.GetLength()) stream.Write(m_Value.GetChars(), m_Value.GetLength()); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_DcfStringAtom::InspectFields +---------------------------------------------------------------------*/ AP4_Result AP4_DcfStringAtom::InspectFields(AP4_AtomInspector& inspector) { inspector.AddField("value", m_Value.GetChars()); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_DcfdAtom::Create +---------------------------------------------------------------------*/ AP4_DcfdAtom* AP4_DcfdAtom::Create(AP4_UI32 size, AP4_ByteStream& stream) { AP4_UI08 version; AP4_UI32 flags; if (AP4_FAILED(AP4_Atom::ReadFullHeader(stream, version, flags))) return NULL; if (version != 0) return NULL; if (size != AP4_FULL_ATOM_HEADER_SIZE+4) return NULL; return new AP4_DcfdAtom(version, flags, stream); } /*---------------------------------------------------------------------- | AP4_DcfdAtom::AP4_DcfdAtom +---------------------------------------------------------------------*/ AP4_DcfdAtom::AP4_DcfdAtom(AP4_UI08 version, AP4_UI32 flags, AP4_ByteStream& stream) : AP4_Atom(AP4_ATOM_TYPE_DCFD, AP4_FULL_ATOM_HEADER_SIZE+4, version, flags), m_Duration(0) { stream.ReadUI32(m_Duration); } /*---------------------------------------------------------------------- | AP4_DcfdAtom::AP4_DcfdAtom +---------------------------------------------------------------------*/ AP4_DcfdAtom::AP4_DcfdAtom(AP4_UI32 duration) : AP4_Atom(AP4_ATOM_TYPE_DCFD, AP4_FULL_ATOM_HEADER_SIZE+4, 0, 0), m_Duration(duration) { } /*---------------------------------------------------------------------- | AP4_DcfdAtom::WriteFields +---------------------------------------------------------------------*/ AP4_Result AP4_DcfdAtom::WriteFields(AP4_ByteStream& stream) { stream.WriteUI32(m_Duration); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_DcfdAtom::InspectFields +---------------------------------------------------------------------*/ AP4_Result AP4_DcfdAtom::InspectFields(AP4_AtomInspector& inspector) { inspector.AddField("duration", m_Duration); return AP4_SUCCESS; }
AP4_DataAtom::AP4_DataAtom(AP4_UI32 size, AP4_ByteStream& stream) : AP4_Atom(AP4_ATOM_TYPE_DATA, size) { if (size < AP4_ATOM_HEADER_SIZE+8) return; AP4_UI32 i; stream.ReadUI32(i); m_DataType = (DataType)i; stream.ReadUI32(i); m_DataLang = (DataLang)i; // the stream for the data is a substream of this source AP4_Position data_offset; stream.Tell(data_offset); AP4_Size data_size = size-AP4_ATOM_HEADER_SIZE-8; m_Source = new AP4_SubStream(stream, data_offset, data_size); }
AP4_DataAtom::AP4_DataAtom(AP4_UI32 size, AP4_ByteStream& stream) : AP4_Atom(AP4_ATOM_TYPE_DATA, size), m_Source(NULL) { if (size < AP4_ATOM_HEADER_SIZE+8) return; AP4_UI32 i; stream.ReadUI32(i); m_DataType = (DataType)i; stream.ReadUI32(i); m_DataLang = (DataLang)i; // the stream for the data is a substream of this source AP4_Position data_offset; stream.Tell(data_offset); AP4_Size data_size = size-AP4_ATOM_HEADER_SIZE-8; m_Source = new AP4_SubStream(stream, data_offset, data_size); }
{'added': [(1261, ' m_DataType(DATA_TYPE_BINARY),'), (1262, ' m_Source(NULL)'), (1338, ' AP4_Atom(AP4_ATOM_TYPE_DATA, size),'), (1339, ' m_Source(NULL)')], 'deleted': [(1261, ' m_DataType(DATA_TYPE_BINARY)'), (1337, ' AP4_Atom(AP4_ATOM_TYPE_DATA, size)')]}
4
2
1,275
7,485
12
92
2
https://github.com/axiomatic-systems/Bento4
CVE-2017-14641
CWE-476
2,233
lgc.c
C
youngcollection
/* ** $Id: lgc.c $ ** Garbage Collector ** See Copyright Notice in lua.h */ #define lgc_c #define LUA_CORE #include "lprefix.h" #include <stdio.h> #include <string.h> #include "lua.h" #include "ldebug.h" #include "ldo.h" #include "lfunc.h" #include "lgc.h" #include "lmem.h" #include "lobject.h" #include "lstate.h" #include "lstring.h" #include "ltable.h" #include "ltm.h" /* ** Maximum number of elements to sweep in each single step. ** (Large enough to dissipate fixed overheads but small enough ** to allow small steps for the collector.) */ #define GCSWEEPMAX 100 /* ** Maximum number of finalizers to call in each single step. */ #define GCFINMAX 10 /* ** Cost of calling one finalizer. */ #define GCFINALIZECOST 50 /* ** The equivalent, in bytes, of one unit of "work" (visiting a slot, ** sweeping an object, etc.) */ #define WORK2MEM sizeof(TValue) /* ** macro to adjust 'pause': 'pause' is actually used like ** 'pause / PAUSEADJ' (value chosen by tests) */ #define PAUSEADJ 100 /* mask to erase all color bits (plus gen. related stuff) */ #define maskcolors (~(bitmask(BLACKBIT) | WHITEBITS | AGEBITS)) /* macro to erase all color bits then sets only the current white bit */ #define makewhite(g,x) \ (x->marked = cast_byte((x->marked & maskcolors) | luaC_white(g))) #define white2gray(x) resetbits(x->marked, WHITEBITS) #define black2gray(x) resetbit(x->marked, BLACKBIT) #define valiswhite(x) (iscollectable(x) && iswhite(gcvalue(x))) #define keyiswhite(n) (keyiscollectable(n) && iswhite(gckey(n))) #define checkconsistency(obj) \ lua_longassert(!iscollectable(obj) || righttt(obj)) /* ** Protected access to objects in values */ #define gcvalueN(o) (iscollectable(o) ? gcvalue(o) : NULL) #define markvalue(g,o) { checkconsistency(o); \ if (valiswhite(o)) reallymarkobject(g,gcvalue(o)); } #define markkey(g, n) { if keyiswhite(n) reallymarkobject(g,gckey(n)); } #define markobject(g,t) { if (iswhite(t)) reallymarkobject(g, obj2gco(t)); } /* ** mark an object that can be NULL (either because it is really optional, ** or it was stripped as debug info, or inside an uncompleted structure) */ #define markobjectN(g,t) { if (t) markobject(g,t); } static void reallymarkobject (global_State *g, GCObject *o); static lu_mem atomic (lua_State *L); static void entersweep (lua_State *L); /* ** {====================================================== ** Generic functions ** ======================================================= */ /* ** one after last element in a hash array */ #define gnodelast(h) gnode(h, cast_sizet(sizenode(h))) static GCObject **getgclist (GCObject *o) { switch (o->tt) { case LUA_VTABLE: return &gco2t(o)->gclist; case LUA_VLCL: return &gco2lcl(o)->gclist; case LUA_VCCL: return &gco2ccl(o)->gclist; case LUA_VTHREAD: return &gco2th(o)->gclist; case LUA_VPROTO: return &gco2p(o)->gclist; case LUA_VUSERDATA: { Udata *u = gco2u(o); lua_assert(u->nuvalue > 0); return &u->gclist; } default: lua_assert(0); return 0; } } /* ** Link a collectable object 'o' with a known type into list pointed by 'p'. */ #define linkgclist(o,p) ((o)->gclist = (p), (p) = obj2gco(o)) /* ** Link a generic collectable object 'o' into list pointed by 'p'. */ #define linkobjgclist(o,p) (*getgclist(o) = (p), (p) = obj2gco(o)) /* ** Clear keys for empty entries in tables. If entry is empty ** and its key is not marked, mark its entry as dead. This allows the ** collection of the key, but keeps its entry in the table (its removal ** could break a chain). The main feature of a dead key is that it must ** be different from any other value, to do not disturb searches. ** Other places never manipulate dead keys, because its associated empty ** value is enough to signal that the entry is logically empty. */ static void clearkey (Node *n) { lua_assert(isempty(gval(n))); if (keyiswhite(n)) setdeadkey(n); /* unused and unmarked key; remove it */ } /* ** tells whether a key or value can be cleared from a weak ** table. Non-collectable objects are never removed from weak ** tables. Strings behave as 'values', so are never removed too. for ** other objects: if really collected, cannot keep them; for objects ** being finalized, keep them in keys, but not in values */ static int iscleared (global_State *g, const GCObject *o) { if (o == NULL) return 0; /* non-collectable value */ else if (novariant(o->tt) == LUA_TSTRING) { markobject(g, o); /* strings are 'values', so are never weak */ return 0; } else return iswhite(o); } /* ** barrier that moves collector forward, that is, mark the white object ** 'v' being pointed by the black object 'o'. (If in sweep phase, clear ** the black object to white [sweep it] to avoid other barrier calls for ** this same object.) In the generational mode, 'v' must also become ** old, if 'o' is old; however, it cannot be changed directly to OLD, ** because it may still point to non-old objects. So, it is marked as ** OLD0. In the next cycle it will become OLD1, and in the next it ** will finally become OLD (regular old). */ void luaC_barrier_ (lua_State *L, GCObject *o, GCObject *v) { global_State *g = G(L); lua_assert(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o)); if (keepinvariant(g)) { /* must keep invariant? */ reallymarkobject(g, v); /* restore invariant */ if (isold(o)) { lua_assert(!isold(v)); /* white object could not be old */ setage(v, G_OLD0); /* restore generational invariant */ } } else { /* sweep phase */ lua_assert(issweepphase(g)); makewhite(g, o); /* mark main obj. as white to avoid other barriers */ } } /* ** barrier that moves collector backward, that is, mark the black object ** pointing to a white object as gray again. */ void luaC_barrierback_ (lua_State *L, GCObject *o) { global_State *g = G(L); lua_assert(isblack(o) && !isdead(g, o)); lua_assert(g->gckind != KGC_GEN || (isold(o) && getage(o) != G_TOUCHED1)); if (getage(o) != G_TOUCHED2) /* not already in gray list? */ linkobjgclist(o, g->grayagain); /* link it in 'grayagain' */ black2gray(o); /* make object gray (again) */ setage(o, G_TOUCHED1); /* touched in current cycle */ } void luaC_fix (lua_State *L, GCObject *o) { global_State *g = G(L); lua_assert(g->allgc == o); /* object must be 1st in 'allgc' list! */ white2gray(o); /* they will be gray forever */ setage(o, G_OLD); /* and old forever */ g->allgc = o->next; /* remove object from 'allgc' list */ o->next = g->fixedgc; /* link it to 'fixedgc' list */ g->fixedgc = o; } /* ** create a new collectable object (with given type and size) and link ** it to 'allgc' list. */ GCObject *luaC_newobj (lua_State *L, int tt, size_t sz) { global_State *g = G(L); GCObject *o = cast(GCObject *, luaM_newobject(L, novariant(tt), sz)); o->marked = luaC_white(g); o->tt = tt; o->next = g->allgc; g->allgc = o; return o; } /* }====================================================== */ /* ** {====================================================== ** Mark functions ** ======================================================= */ /* ** Mark an object. Userdata, strings, and closed upvalues are visited ** and turned black here. Other objects are marked gray and added ** to appropriate list to be visited (and turned black) later. (Open ** upvalues are already linked in 'headuv' list. They are kept gray ** to avoid barriers, as their values will be revisited by the thread.) */ static void reallymarkobject (global_State *g, GCObject *o) { white2gray(o); switch (o->tt) { case LUA_VSHRSTR: case LUA_VLNGSTR: { gray2black(o); break; } case LUA_VUPVAL: { UpVal *uv = gco2upv(o); if (!upisopen(uv)) /* open upvalues are kept gray */ gray2black(o); markvalue(g, uv->v); /* mark its content */ break; } case LUA_VUSERDATA: { Udata *u = gco2u(o); if (u->nuvalue == 0) { /* no user values? */ markobjectN(g, u->metatable); /* mark its metatable */ gray2black(o); /* nothing else to mark */ break; } /* else... */ } /* FALLTHROUGH */ case LUA_VLCL: case LUA_VCCL: case LUA_VTABLE: case LUA_VTHREAD: case LUA_VPROTO: { linkobjgclist(o, g->gray); break; } default: lua_assert(0); break; } } /* ** mark metamethods for basic types */ static void markmt (global_State *g) { int i; for (i=0; i < LUA_NUMTAGS; i++) markobjectN(g, g->mt[i]); } /* ** mark all objects in list of being-finalized */ static lu_mem markbeingfnz (global_State *g) { GCObject *o; lu_mem count = 0; for (o = g->tobefnz; o != NULL; o = o->next) { count++; markobject(g, o); } return count; } /* ** Mark all values stored in marked open upvalues from non-marked threads. ** (Values from marked threads were already marked when traversing the ** thread.) Remove from the list threads that no longer have upvalues and ** not-marked threads. */ static int remarkupvals (global_State *g) { lua_State *thread; lua_State **p = &g->twups; int work = 0; while ((thread = *p) != NULL) { work++; lua_assert(!isblack(thread)); /* threads are never black */ if (isgray(thread) && thread->openupval != NULL) p = &thread->twups; /* keep marked thread with upvalues in the list */ else { /* thread is not marked or without upvalues */ UpVal *uv; *p = thread->twups; /* remove thread from the list */ thread->twups = thread; /* mark that it is out of list */ for (uv = thread->openupval; uv != NULL; uv = uv->u.open.next) { work++; if (!iswhite(uv)) /* upvalue already visited? */ markvalue(g, uv->v); /* mark its value */ } } } return work; } /* ** mark root set and reset all gray lists, to start a new collection */ static void restartcollection (global_State *g) { g->gray = g->grayagain = NULL; g->weak = g->allweak = g->ephemeron = NULL; markobject(g, g->mainthread); markvalue(g, &g->l_registry); markmt(g); markbeingfnz(g); /* mark any finalizing object left from previous cycle */ } /* }====================================================== */ /* ** {====================================================== ** Traverse functions ** ======================================================= */ /* ** Traverse a table with weak values and link it to proper list. During ** propagate phase, keep it in 'grayagain' list, to be revisited in the ** atomic phase. In the atomic phase, if table has any white value, ** put it in 'weak' list, to be cleared. */ static void traverseweakvalue (global_State *g, Table *h) { Node *n, *limit = gnodelast(h); /* if there is array part, assume it may have white values (it is not worth traversing it now just to check) */ int hasclears = (h->alimit > 0); for (n = gnode(h, 0); n < limit; n++) { /* traverse hash part */ if (isempty(gval(n))) /* entry is empty? */ clearkey(n); /* clear its key */ else { lua_assert(!keyisnil(n)); markkey(g, n); if (!hasclears && iscleared(g, gcvalueN(gval(n)))) /* a white value? */ hasclears = 1; /* table will have to be cleared */ } } if (g->gcstate == GCSatomic && hasclears) linkgclist(h, g->weak); /* has to be cleared later */ else linkgclist(h, g->grayagain); /* must retraverse it in atomic phase */ } /* ** Traverse an ephemeron table and link it to proper list. Returns true ** iff any object was marked during this traversal (which implies that ** convergence has to continue). During propagation phase, keep table ** in 'grayagain' list, to be visited again in the atomic phase. In ** the atomic phase, if table has any white->white entry, it has to ** be revisited during ephemeron convergence (as that key may turn ** black). Otherwise, if it has any white key, table has to be cleared ** (in the atomic phase). In generational mode, it (like all visited ** tables) must be kept in some gray list for post-processing. */ static int traverseephemeron (global_State *g, Table *h, int inv) { int marked = 0; /* true if an object is marked in this traversal */ int hasclears = 0; /* true if table has white keys */ int hasww = 0; /* true if table has entry "white-key -> white-value" */ unsigned int i; unsigned int asize = luaH_realasize(h); unsigned int nsize = sizenode(h); /* traverse array part */ for (i = 0; i < asize; i++) { if (valiswhite(&h->array[i])) { marked = 1; reallymarkobject(g, gcvalue(&h->array[i])); } } /* traverse hash part; if 'inv', traverse descending (see 'convergeephemerons') */ for (i = 0; i < nsize; i++) { Node *n = inv ? gnode(h, nsize - 1 - i) : gnode(h, i); if (isempty(gval(n))) /* entry is empty? */ clearkey(n); /* clear its key */ else if (iscleared(g, gckeyN(n))) { /* key is not marked (yet)? */ hasclears = 1; /* table must be cleared */ if (valiswhite(gval(n))) /* value not marked yet? */ hasww = 1; /* white-white entry */ } else if (valiswhite(gval(n))) { /* value not marked yet? */ marked = 1; reallymarkobject(g, gcvalue(gval(n))); /* mark it now */ } } /* link table into proper list */ if (g->gcstate == GCSpropagate) linkgclist(h, g->grayagain); /* must retraverse it in atomic phase */ else if (hasww) /* table has white->white entries? */ linkgclist(h, g->ephemeron); /* have to propagate again */ else if (hasclears) /* table has white keys? */ linkgclist(h, g->allweak); /* may have to clean white keys */ else if (g->gckind == KGC_GEN) linkgclist(h, g->grayagain); /* keep it in some list */ else gray2black(h); return marked; } static void traversestrongtable (global_State *g, Table *h) { Node *n, *limit = gnodelast(h); unsigned int i; unsigned int asize = luaH_realasize(h); for (i = 0; i < asize; i++) /* traverse array part */ markvalue(g, &h->array[i]); for (n = gnode(h, 0); n < limit; n++) { /* traverse hash part */ if (isempty(gval(n))) /* entry is empty? */ clearkey(n); /* clear its key */ else { lua_assert(!keyisnil(n)); markkey(g, n); markvalue(g, gval(n)); } } if (g->gckind == KGC_GEN) { linkgclist(h, g->grayagain); /* keep it in some gray list */ black2gray(h); } } static lu_mem traversetable (global_State *g, Table *h) { const char *weakkey, *weakvalue; const TValue *mode = gfasttm(g, h->metatable, TM_MODE); markobjectN(g, h->metatable); if (mode && ttisstring(mode) && /* is there a weak mode? */ (cast_void(weakkey = strchr(svalue(mode), 'k')), cast_void(weakvalue = strchr(svalue(mode), 'v')), (weakkey || weakvalue))) { /* is really weak? */ black2gray(h); /* keep table gray */ if (!weakkey) /* strong keys? */ traverseweakvalue(g, h); else if (!weakvalue) /* strong values? */ traverseephemeron(g, h, 0); else /* all weak */ linkgclist(h, g->allweak); /* nothing to traverse now */ } else /* not weak */ traversestrongtable(g, h); return 1 + h->alimit + 2 * allocsizenode(h); } static int traverseudata (global_State *g, Udata *u) { int i; markobjectN(g, u->metatable); /* mark its metatable */ for (i = 0; i < u->nuvalue; i++) markvalue(g, &u->uv[i].uv); if (g->gckind == KGC_GEN) { linkgclist(u, g->grayagain); /* keep it in some gray list */ black2gray(u); } return 1 + u->nuvalue; } /* ** Traverse a prototype. (While a prototype is being build, its ** arrays can be larger than needed; the extra slots are filled with ** NULL, so the use of 'markobjectN') */ static int traverseproto (global_State *g, Proto *f) { int i; markobjectN(g, f->source); for (i = 0; i < f->sizek; i++) /* mark literals */ markvalue(g, &f->k[i]); for (i = 0; i < f->sizeupvalues; i++) /* mark upvalue names */ markobjectN(g, f->upvalues[i].name); for (i = 0; i < f->sizep; i++) /* mark nested protos */ markobjectN(g, f->p[i]); for (i = 0; i < f->sizelocvars; i++) /* mark local-variable names */ markobjectN(g, f->locvars[i].varname); return 1 + f->sizek + f->sizeupvalues + f->sizep + f->sizelocvars; } static int traverseCclosure (global_State *g, CClosure *cl) { int i; for (i = 0; i < cl->nupvalues; i++) /* mark its upvalues */ markvalue(g, &cl->upvalue[i]); return 1 + cl->nupvalues; } /* ** Traverse a Lua closure, marking its prototype and its upvalues. ** (Both can be NULL while closure is being created.) */ static int traverseLclosure (global_State *g, LClosure *cl) { int i; markobjectN(g, cl->p); /* mark its prototype */ for (i = 0; i < cl->nupvalues; i++) { /* visit its upvalues */ UpVal *uv = cl->upvals[i]; markobjectN(g, uv); /* mark upvalue */ } return 1 + cl->nupvalues; } /* ** Traverse a thread, marking the elements in the stack up to its top ** and cleaning the rest of the stack in the final traversal. ** That ensures that the entire stack have valid (non-dead) objects. */ static int traversethread (global_State *g, lua_State *th) { UpVal *uv; StkId o = th->stack; if (o == NULL) return 1; /* stack not completely built yet */ lua_assert(g->gcstate == GCSatomic || th->openupval == NULL || isintwups(th)); for (; o < th->top; o++) /* mark live elements in the stack */ markvalue(g, s2v(o)); for (uv = th->openupval; uv != NULL; uv = uv->u.open.next) markobject(g, uv); /* open upvalues cannot be collected */ if (g->gcstate == GCSatomic) { /* final traversal? */ StkId lim = th->stack + th->stacksize; /* real end of stack */ for (; o < lim; o++) /* clear not-marked stack slice */ setnilvalue(s2v(o)); /* 'remarkupvals' may have removed thread from 'twups' list */ if (!isintwups(th) && th->openupval != NULL) { th->twups = g->twups; /* link it back to the list */ g->twups = th; } } else if (!g->gcemergency) luaD_shrinkstack(th); /* do not change stack in emergency cycle */ return 1 + th->stacksize; } /* ** traverse one gray object, turning it to black (except for threads, ** which are always gray). */ static lu_mem propagatemark (global_State *g) { GCObject *o = g->gray; gray2black(o); g->gray = *getgclist(o); /* remove from 'gray' list */ switch (o->tt) { case LUA_VTABLE: return traversetable(g, gco2t(o)); case LUA_VUSERDATA: return traverseudata(g, gco2u(o)); case LUA_VLCL: return traverseLclosure(g, gco2lcl(o)); case LUA_VCCL: return traverseCclosure(g, gco2ccl(o)); case LUA_VPROTO: return traverseproto(g, gco2p(o)); case LUA_VTHREAD: { lua_State *th = gco2th(o); linkgclist(th, g->grayagain); /* insert into 'grayagain' list */ black2gray(o); return traversethread(g, th); } default: lua_assert(0); return 0; } } static lu_mem propagateall (global_State *g) { lu_mem tot = 0; while (g->gray) tot += propagatemark(g); return tot; } /* ** Traverse all ephemeron tables propagating marks from keys to values. ** Repeat until it converges, that is, nothing new is marked. 'dir' ** inverts the direction of the traversals, trying to speed up ** convergence on chains in the same table. ** */ static void convergeephemerons (global_State *g) { int changed; int dir = 0; do { GCObject *w; GCObject *next = g->ephemeron; /* get ephemeron list */ g->ephemeron = NULL; /* tables may return to this list when traversed */ changed = 0; while ((w = next) != NULL) { /* for each ephemeron table */ next = gco2t(w)->gclist; /* list is rebuilt during loop */ if (traverseephemeron(g, gco2t(w), dir)) { /* marked some value? */ propagateall(g); /* propagate changes */ changed = 1; /* will have to revisit all ephemeron tables */ } } dir = !dir; /* invert direction next time */ } while (changed); /* repeat until no more changes */ } /* }====================================================== */ /* ** {====================================================== ** Sweep Functions ** ======================================================= */ /* ** clear entries with unmarked keys from all weaktables in list 'l' */ static void clearbykeys (global_State *g, GCObject *l) { for (; l; l = gco2t(l)->gclist) { Table *h = gco2t(l); Node *limit = gnodelast(h); Node *n; for (n = gnode(h, 0); n < limit; n++) { if (iscleared(g, gckeyN(n))) /* unmarked key? */ setempty(gval(n)); /* remove entry */ if (isempty(gval(n))) /* is entry empty? */ clearkey(n); /* clear its key */ } } } /* ** clear entries with unmarked values from all weaktables in list 'l' up ** to element 'f' */ static void clearbyvalues (global_State *g, GCObject *l, GCObject *f) { for (; l != f; l = gco2t(l)->gclist) { Table *h = gco2t(l); Node *n, *limit = gnodelast(h); unsigned int i; unsigned int asize = luaH_realasize(h); for (i = 0; i < asize; i++) { TValue *o = &h->array[i]; if (iscleared(g, gcvalueN(o))) /* value was collected? */ setempty(o); /* remove entry */ } for (n = gnode(h, 0); n < limit; n++) { if (iscleared(g, gcvalueN(gval(n)))) /* unmarked value? */ setempty(gval(n)); /* remove entry */ if (isempty(gval(n))) /* is entry empty? */ clearkey(n); /* clear its key */ } } } static void freeupval (lua_State *L, UpVal *uv) { if (upisopen(uv)) luaF_unlinkupval(uv); luaM_free(L, uv); } static void freeobj (lua_State *L, GCObject *o) { switch (o->tt) { case LUA_VPROTO: luaF_freeproto(L, gco2p(o)); break; case LUA_VUPVAL: freeupval(L, gco2upv(o)); break; case LUA_VLCL: luaM_freemem(L, o, sizeLclosure(gco2lcl(o)->nupvalues)); break; case LUA_VCCL: luaM_freemem(L, o, sizeCclosure(gco2ccl(o)->nupvalues)); break; case LUA_VTABLE: luaH_free(L, gco2t(o)); break; case LUA_VTHREAD: luaE_freethread(L, gco2th(o)); break; case LUA_VUSERDATA: { Udata *u = gco2u(o); luaM_freemem(L, o, sizeudata(u->nuvalue, u->len)); break; } case LUA_VSHRSTR: luaS_remove(L, gco2ts(o)); /* remove it from hash table */ luaM_freemem(L, o, sizelstring(gco2ts(o)->shrlen)); break; case LUA_VLNGSTR: luaM_freemem(L, o, sizelstring(gco2ts(o)->u.lnglen)); break; default: lua_assert(0); } } /* ** sweep at most 'countin' elements from a list of GCObjects erasing dead ** objects, where a dead object is one marked with the old (non current) ** white; change all non-dead objects back to white, preparing for next ** collection cycle. Return where to continue the traversal or NULL if ** list is finished. ('*countout' gets the number of elements traversed.) */ static GCObject **sweeplist (lua_State *L, GCObject **p, int countin, int *countout) { global_State *g = G(L); int ow = otherwhite(g); int i; int white = luaC_white(g); /* current white */ for (i = 0; *p != NULL && i < countin; i++) { GCObject *curr = *p; int marked = curr->marked; if (isdeadm(ow, marked)) { /* is 'curr' dead? */ *p = curr->next; /* remove 'curr' from list */ freeobj(L, curr); /* erase 'curr' */ } else { /* change mark to 'white' */ curr->marked = cast_byte((marked & maskcolors) | white); p = &curr->next; /* go to next element */ } } if (countout) *countout = i; /* number of elements traversed */ return (*p == NULL) ? NULL : p; } /* ** sweep a list until a live object (or end of list) */ static GCObject **sweeptolive (lua_State *L, GCObject **p) { GCObject **old = p; do { p = sweeplist(L, p, 1, NULL); } while (p == old); return p; } /* }====================================================== */ /* ** {====================================================== ** Finalization ** ======================================================= */ /* ** If possible, shrink string table. */ static void checkSizes (lua_State *L, global_State *g) { if (!g->gcemergency) { if (g->strt.nuse < g->strt.size / 4) { /* string table too big? */ l_mem olddebt = g->GCdebt; luaS_resize(L, g->strt.size / 2); g->GCestimate += g->GCdebt - olddebt; /* correct estimate */ } } } /* ** Get the next udata to be finalized from the 'tobefnz' list, and ** link it back into the 'allgc' list. */ static GCObject *udata2finalize (global_State *g) { GCObject *o = g->tobefnz; /* get first element */ lua_assert(tofinalize(o)); g->tobefnz = o->next; /* remove it from 'tobefnz' list */ o->next = g->allgc; /* return it to 'allgc' list */ g->allgc = o; resetbit(o->marked, FINALIZEDBIT); /* object is "normal" again */ if (issweepphase(g)) makewhite(g, o); /* "sweep" object */ return o; } static void dothecall (lua_State *L, void *ud) { UNUSED(ud); luaD_callnoyield(L, L->top - 2, 0); } static void GCTM (lua_State *L) { global_State *g = G(L); const TValue *tm; TValue v; lua_assert(!g->gcemergency); setgcovalue(L, &v, udata2finalize(g)); tm = luaT_gettmbyobj(L, &v, TM_GC); if (!notm(tm)) { /* is there a finalizer? */ int status; lu_byte oldah = L->allowhook; int running = g->gcrunning; L->allowhook = 0; /* stop debug hooks during GC metamethod */ g->gcrunning = 0; /* avoid GC steps */ setobj2s(L, L->top++, tm); /* push finalizer... */ setobj2s(L, L->top++, &v); /* ... and its argument */ L->ci->callstatus |= CIST_FIN; /* will run a finalizer */ status = luaD_pcall(L, dothecall, NULL, savestack(L, L->top - 2), 0); L->ci->callstatus &= ~CIST_FIN; /* not running a finalizer anymore */ L->allowhook = oldah; /* restore hooks */ g->gcrunning = running; /* restore state */ if (unlikely(status != LUA_OK)) { /* error while running __gc? */ luaE_warnerror(L, "__gc metamethod"); L->top--; /* pops error object */ } } } /* ** Call a few finalizers */ static int runafewfinalizers (lua_State *L, int n) { global_State *g = G(L); int i; for (i = 0; i < n && g->tobefnz; i++) GCTM(L); /* call one finalizer */ return i; } /* ** call all pending finalizers */ static void callallpendingfinalizers (lua_State *L) { global_State *g = G(L); while (g->tobefnz) GCTM(L); } /* ** find last 'next' field in list 'p' list (to add elements in its end) */ static GCObject **findlast (GCObject **p) { while (*p != NULL) p = &(*p)->next; return p; } /* ** Move all unreachable objects (or 'all' objects) that need ** finalization from list 'finobj' to list 'tobefnz' (to be finalized). ** (Note that objects after 'finobjold' cannot be white, so they ** don't need to be traversed. In incremental mode, 'finobjold' is NULL, ** so the whole list is traversed.) */ static void separatetobefnz (global_State *g, int all) { GCObject *curr; GCObject **p = &g->finobj; GCObject **lastnext = findlast(&g->tobefnz); while ((curr = *p) != g->finobjold) { /* traverse all finalizable objects */ lua_assert(tofinalize(curr)); if (!(iswhite(curr) || all)) /* not being collected? */ p = &curr->next; /* don't bother with it */ else { if (curr == g->finobjsur) /* removing 'finobjsur'? */ g->finobjsur = curr->next; /* correct it */ *p = curr->next; /* remove 'curr' from 'finobj' list */ curr->next = *lastnext; /* link at the end of 'tobefnz' list */ *lastnext = curr; lastnext = &curr->next; } } } /* ** if object 'o' has a finalizer, remove it from 'allgc' list (must ** search the list to find it) and link it in 'finobj' list. */ void luaC_checkfinalizer (lua_State *L, GCObject *o, Table *mt) { global_State *g = G(L); if (tofinalize(o) || /* obj. is already marked... */ gfasttm(g, mt, TM_GC) == NULL) /* or has no finalizer? */ return; /* nothing to be done */ else { /* move 'o' to 'finobj' list */ GCObject **p; if (issweepphase(g)) { makewhite(g, o); /* "sweep" object 'o' */ if (g->sweepgc == &o->next) /* should not remove 'sweepgc' object */ g->sweepgc = sweeptolive(L, g->sweepgc); /* change 'sweepgc' */ } else { /* correct pointers into 'allgc' list, if needed */ if (o == g->survival) g->survival = o->next; if (o == g->old) g->old = o->next; if (o == g->reallyold) g->reallyold = o->next; } /* search for pointer pointing to 'o' */ for (p = &g->allgc; *p != o; p = &(*p)->next) { /* empty */ } *p = o->next; /* remove 'o' from 'allgc' list */ o->next = g->finobj; /* link it in 'finobj' list */ g->finobj = o; l_setbit(o->marked, FINALIZEDBIT); /* mark it as such */ } } /* }====================================================== */ /* ** {====================================================== ** Generational Collector ** ======================================================= */ static void setpause (global_State *g); /* mask to erase all color bits, not changing gen-related stuff */ #define maskgencolors (~(bitmask(BLACKBIT) | WHITEBITS)) /* ** Sweep a list of objects, deleting dead ones and turning ** the non dead to old (without changing their colors). */ static void sweep2old (lua_State *L, GCObject **p) { GCObject *curr; while ((curr = *p) != NULL) { if (iswhite(curr)) { /* is 'curr' dead? */ lua_assert(isdead(G(L), curr)); *p = curr->next; /* remove 'curr' from list */ freeobj(L, curr); /* erase 'curr' */ } else { /* all surviving objects become old */ setage(curr, G_OLD); p = &curr->next; /* go to next element */ } } } /* ** Sweep for generational mode. Delete dead objects. (Because the ** collection is not incremental, there are no "new white" objects ** during the sweep. So, any white object must be dead.) For ** non-dead objects, advance their ages and clear the color of ** new objects. (Old objects keep their colors.) */ static GCObject **sweepgen (lua_State *L, global_State *g, GCObject **p, GCObject *limit) { static const lu_byte nextage[] = { G_SURVIVAL, /* from G_NEW */ G_OLD1, /* from G_SURVIVAL */ G_OLD1, /* from G_OLD0 */ G_OLD, /* from G_OLD1 */ G_OLD, /* from G_OLD (do not change) */ G_TOUCHED1, /* from G_TOUCHED1 (do not change) */ G_TOUCHED2 /* from G_TOUCHED2 (do not change) */ }; int white = luaC_white(g); GCObject *curr; while ((curr = *p) != limit) { if (iswhite(curr)) { /* is 'curr' dead? */ lua_assert(!isold(curr) && isdead(g, curr)); *p = curr->next; /* remove 'curr' from list */ freeobj(L, curr); /* erase 'curr' */ } else { /* correct mark and age */ if (getage(curr) == G_NEW) curr->marked = cast_byte((curr->marked & maskgencolors) | white); setage(curr, nextage[getage(curr)]); p = &curr->next; /* go to next element */ } } return p; } /* ** Traverse a list making all its elements white and clearing their ** age. */ static void whitelist (global_State *g, GCObject *p) { int white = luaC_white(g); for (; p != NULL; p = p->next) p->marked = cast_byte((p->marked & maskcolors) | white); } /* ** Correct a list of gray objects. ** Because this correction is done after sweeping, young objects might ** be turned white and still be in the list. They are only removed. ** For tables and userdata, advance 'touched1' to 'touched2'; 'touched2' ** objects become regular old and are removed from the list. ** For threads, just remove white ones from the list. */ static GCObject **correctgraylist (GCObject **p) { GCObject *curr; while ((curr = *p) != NULL) { switch (curr->tt) { case LUA_VTABLE: case LUA_VUSERDATA: { GCObject **next = getgclist(curr); if (getage(curr) == G_TOUCHED1) { /* touched in this cycle? */ lua_assert(isgray(curr)); gray2black(curr); /* make it black, for next barrier */ changeage(curr, G_TOUCHED1, G_TOUCHED2); p = next; /* go to next element */ } else { /* not touched in this cycle */ if (!iswhite(curr)) { /* not white? */ lua_assert(isold(curr)); if (getage(curr) == G_TOUCHED2) /* advance from G_TOUCHED2... */ changeage(curr, G_TOUCHED2, G_OLD); /* ... to G_OLD */ gray2black(curr); /* make it black */ } /* else, object is white: just remove it from this list */ *p = *next; /* remove 'curr' from gray list */ } break; } case LUA_VTHREAD: { lua_State *th = gco2th(curr); lua_assert(!isblack(th)); if (iswhite(th)) /* new object? */ *p = th->gclist; /* remove from gray list */ else /* old threads remain gray */ p = &th->gclist; /* go to next element */ break; } default: lua_assert(0); /* nothing more could be gray here */ } } return p; } /* ** Correct all gray lists, coalescing them into 'grayagain'. */ static void correctgraylists (global_State *g) { GCObject **list = correctgraylist(&g->grayagain); *list = g->weak; g->weak = NULL; list = correctgraylist(list); *list = g->allweak; g->allweak = NULL; list = correctgraylist(list); *list = g->ephemeron; g->ephemeron = NULL; correctgraylist(list); } /* ** Mark 'OLD1' objects when starting a new young collection. ** Gray objects are already in some gray list, and so will be visited ** in the atomic step. */ static void markold (global_State *g, GCObject *from, GCObject *to) { GCObject *p; for (p = from; p != to; p = p->next) { if (getage(p) == G_OLD1) { lua_assert(!iswhite(p)); if (isblack(p)) { black2gray(p); /* should be '2white', but gray works too */ reallymarkobject(g, p); } } } } /* ** Finish a young-generation collection. */ static void finishgencycle (lua_State *L, global_State *g) { correctgraylists(g); checkSizes(L, g); g->gcstate = GCSpropagate; /* skip restart */ if (!g->gcemergency) callallpendingfinalizers(L); } /* ** Does a young collection. First, mark 'OLD1' objects. (Only survival ** and "recent old" lists can contain 'OLD1' objects. New lists cannot ** contain 'OLD1' objects, at most 'OLD0' objects that were already ** visited when marked old.) Then does the atomic step. Then, ** sweep all lists and advance pointers. Finally, finish the collection. */ static void youngcollection (lua_State *L, global_State *g) { GCObject **psurvival; /* to point to first non-dead survival object */ lua_assert(g->gcstate == GCSpropagate); markold(g, g->survival, g->reallyold); markold(g, g->finobj, g->finobjrold); atomic(L); /* sweep nursery and get a pointer to its last live element */ psurvival = sweepgen(L, g, &g->allgc, g->survival); /* sweep 'survival' and 'old' */ sweepgen(L, g, psurvival, g->reallyold); g->reallyold = g->old; g->old = *psurvival; /* 'survival' survivals are old now */ g->survival = g->allgc; /* all news are survivals */ /* repeat for 'finobj' lists */ psurvival = sweepgen(L, g, &g->finobj, g->finobjsur); /* sweep 'survival' and 'old' */ sweepgen(L, g, psurvival, g->finobjrold); g->finobjrold = g->finobjold; g->finobjold = *psurvival; /* 'survival' survivals are old now */ g->finobjsur = g->finobj; /* all news are survivals */ sweepgen(L, g, &g->tobefnz, NULL); finishgencycle(L, g); } static void atomic2gen (lua_State *L, global_State *g) { /* sweep all elements making them old */ sweep2old(L, &g->allgc); /* everything alive now is old */ g->reallyold = g->old = g->survival = g->allgc; /* repeat for 'finobj' lists */ sweep2old(L, &g->finobj); g->finobjrold = g->finobjold = g->finobjsur = g->finobj; sweep2old(L, &g->tobefnz); g->gckind = KGC_GEN; g->lastatomic = 0; g->GCestimate = gettotalbytes(g); /* base for memory control */ finishgencycle(L, g); } /* ** Enter generational mode. Must go until the end of an atomic cycle ** to ensure that all threads and weak tables are in the gray lists. ** Then, turn all objects into old and finishes the collection. */ static lu_mem entergen (lua_State *L, global_State *g) { lu_mem numobjs; luaC_runtilstate(L, bitmask(GCSpause)); /* prepare to start a new cycle */ luaC_runtilstate(L, bitmask(GCSpropagate)); /* start new cycle */ numobjs = atomic(L); /* propagates all and then do the atomic stuff */ atomic2gen(L, g); return numobjs; } /* ** Enter incremental mode. Turn all objects white, make all ** intermediate lists point to NULL (to avoid invalid pointers), ** and go to the pause state. */ static void enterinc (global_State *g) { whitelist(g, g->allgc); g->reallyold = g->old = g->survival = NULL; whitelist(g, g->finobj); whitelist(g, g->tobefnz); g->finobjrold = g->finobjold = g->finobjsur = NULL; g->gcstate = GCSpause; g->gckind = KGC_INC; g->lastatomic = 0; } /* ** Change collector mode to 'newmode'. */ void luaC_changemode (lua_State *L, int newmode) { global_State *g = G(L); if (newmode != g->gckind) { if (newmode == KGC_GEN) /* entering generational mode? */ entergen(L, g); else enterinc(g); /* entering incremental mode */ } g->lastatomic = 0; } /* ** Does a full collection in generational mode. */ static lu_mem fullgen (lua_State *L, global_State *g) { enterinc(g); return entergen(L, g); } /* ** Set debt for the next minor collection, which will happen when ** memory grows 'genminormul'%. */ static void setminordebt (global_State *g) { luaE_setdebt(g, -(cast(l_mem, (gettotalbytes(g) / 100)) * g->genminormul)); } /* ** Does a major collection after last collection was a "bad collection". ** ** When the program is building a big structure, it allocates lots of ** memory but generates very little garbage. In those scenarios, ** the generational mode just wastes time doing small collections, and ** major collections are frequently what we call a "bad collection", a ** collection that frees too few objects. To avoid the cost of switching ** between generational mode and the incremental mode needed for full ** (major) collections, the collector tries to stay in incremental mode ** after a bad collection, and to switch back to generational mode only ** after a "good" collection (one that traverses less than 9/8 objects ** of the previous one). ** The collector must choose whether to stay in incremental mode or to ** switch back to generational mode before sweeping. At this point, it ** does not know the real memory in use, so it cannot use memory to ** decide whether to return to generational mode. Instead, it uses the ** number of objects traversed (returned by 'atomic') as a proxy. The ** field 'g->lastatomic' keeps this count from the last collection. ** ('g->lastatomic != 0' also means that the last collection was bad.) */ static void stepgenfull (lua_State *L, global_State *g) { lu_mem newatomic; /* count of traversed objects */ lu_mem lastatomic = g->lastatomic; /* count from last collection */ if (g->gckind == KGC_GEN) /* still in generational mode? */ enterinc(g); /* enter incremental mode */ luaC_runtilstate(L, bitmask(GCSpropagate)); /* start new cycle */ newatomic = atomic(L); /* mark everybody */ if (newatomic < lastatomic + (lastatomic >> 3)) { /* good collection? */ atomic2gen(L, g); /* return to generational mode */ setminordebt(g); } else { /* another bad collection; stay in incremental mode */ g->GCestimate = gettotalbytes(g); /* first estimate */; entersweep(L); luaC_runtilstate(L, bitmask(GCSpause)); /* finish collection */ setpause(g); g->lastatomic = newatomic; } } /* ** Does a generational "step". ** Usually, this means doing a minor collection and setting the debt to ** make another collection when memory grows 'genminormul'% larger. ** ** However, there are exceptions. If memory grows 'genmajormul'% ** larger than it was at the end of the last major collection (kept ** in 'g->GCestimate'), the function does a major collection. At the ** end, it checks whether the major collection was able to free a ** decent amount of memory (at least half the growth in memory since ** previous major collection). If so, the collector keeps its state, ** and the next collection will probably be minor again. Otherwise, ** we have what we call a "bad collection". In that case, set the field ** 'g->lastatomic' to signal that fact, so that the next collection will ** go to 'stepgenfull'. ** ** 'GCdebt <= 0' means an explicit call to GC step with "size" zero; ** in that case, do a minor collection. */ static void genstep (lua_State *L, global_State *g) { if (g->lastatomic != 0) /* last collection was a bad one? */ stepgenfull(L, g); /* do a full step */ else { lu_mem majorbase = g->GCestimate; /* memory after last major collection */ lu_mem majorinc = (majorbase / 100) * getgcparam(g->genmajormul); if (g->GCdebt > 0 && gettotalbytes(g) > majorbase + majorinc) { lu_mem numobjs = fullgen(L, g); /* do a major collection */ if (gettotalbytes(g) < majorbase + (majorinc / 2)) { /* collected at least half of memory growth since last major collection; keep doing minor collections */ setminordebt(g); } else { /* bad collection */ g->lastatomic = numobjs; /* signal that last collection was bad */ setpause(g); /* do a long wait for next (major) collection */ } } else { /* regular case; do a minor collection */ youngcollection(L, g); setminordebt(g); g->GCestimate = majorbase; /* preserve base value */ } } lua_assert(isdecGCmodegen(g)); } /* }====================================================== */ /* ** {====================================================== ** GC control ** ======================================================= */ /* ** Set the "time" to wait before starting a new GC cycle; cycle will ** start when memory use hits the threshold of ('estimate' * pause / ** PAUSEADJ). (Division by 'estimate' should be OK: it cannot be zero, ** because Lua cannot even start with less than PAUSEADJ bytes). */ static void setpause (global_State *g) { l_mem threshold, debt; int pause = getgcparam(g->gcpause); l_mem estimate = g->GCestimate / PAUSEADJ; /* adjust 'estimate' */ lua_assert(estimate > 0); threshold = (pause < MAX_LMEM / estimate) /* overflow? */ ? estimate * pause /* no overflow */ : MAX_LMEM; /* overflow; truncate to maximum */ debt = gettotalbytes(g) - threshold; if (debt > 0) debt = 0; luaE_setdebt(g, debt); } /* ** Enter first sweep phase. ** The call to 'sweeptolive' makes the pointer point to an object ** inside the list (instead of to the header), so that the real sweep do ** not need to skip objects created between "now" and the start of the ** real sweep. */ static void entersweep (lua_State *L) { global_State *g = G(L); g->gcstate = GCSswpallgc; lua_assert(g->sweepgc == NULL); g->sweepgc = sweeptolive(L, &g->allgc); } /* ** Delete all objects in list 'p' until (but not including) object ** 'limit'. */ static void deletelist (lua_State *L, GCObject *p, GCObject *limit) { while (p != limit) { GCObject *next = p->next; freeobj(L, p); p = next; } } /* ** Call all finalizers of the objects in the given Lua state, and ** then free all objects, except for the main thread. */ void luaC_freeallobjects (lua_State *L) { global_State *g = G(L); luaC_changemode(L, KGC_INC); separatetobefnz(g, 1); /* separate all objects with finalizers */ lua_assert(g->finobj == NULL); callallpendingfinalizers(L); deletelist(L, g->allgc, obj2gco(g->mainthread)); deletelist(L, g->finobj, NULL); deletelist(L, g->fixedgc, NULL); /* collect fixed objects */ lua_assert(g->strt.nuse == 0); } static lu_mem atomic (lua_State *L) { global_State *g = G(L); lu_mem work = 0; GCObject *origweak, *origall; GCObject *grayagain = g->grayagain; /* save original list */ g->grayagain = NULL; lua_assert(g->ephemeron == NULL && g->weak == NULL); lua_assert(!iswhite(g->mainthread)); g->gcstate = GCSatomic; markobject(g, L); /* mark running thread */ /* registry and global metatables may be changed by API */ markvalue(g, &g->l_registry); markmt(g); /* mark global metatables */ work += propagateall(g); /* empties 'gray' list */ /* remark occasional upvalues of (maybe) dead threads */ work += remarkupvals(g); work += propagateall(g); /* propagate changes */ g->gray = grayagain; work += propagateall(g); /* traverse 'grayagain' list */ convergeephemerons(g); /* at this point, all strongly accessible objects are marked. */ /* Clear values from weak tables, before checking finalizers */ clearbyvalues(g, g->weak, NULL); clearbyvalues(g, g->allweak, NULL); origweak = g->weak; origall = g->allweak; separatetobefnz(g, 0); /* separate objects to be finalized */ work += markbeingfnz(g); /* mark objects that will be finalized */ work += propagateall(g); /* remark, to propagate 'resurrection' */ convergeephemerons(g); /* at this point, all resurrected objects are marked. */ /* remove dead objects from weak tables */ clearbykeys(g, g->ephemeron); /* clear keys from all ephemeron tables */ clearbykeys(g, g->allweak); /* clear keys from all 'allweak' tables */ /* clear values from resurrected weak tables */ clearbyvalues(g, g->weak, origweak); clearbyvalues(g, g->allweak, origall); luaS_clearcache(g); g->currentwhite = cast_byte(otherwhite(g)); /* flip current white */ lua_assert(g->gray == NULL); return work; /* estimate of slots marked by 'atomic' */ } static int sweepstep (lua_State *L, global_State *g, int nextstate, GCObject **nextlist) { if (g->sweepgc) { l_mem olddebt = g->GCdebt; int count; g->sweepgc = sweeplist(L, g->sweepgc, GCSWEEPMAX, &count); g->GCestimate += g->GCdebt - olddebt; /* update estimate */ return count; } else { /* enter next state */ g->gcstate = nextstate; g->sweepgc = nextlist; return 0; /* no work done */ } } static lu_mem singlestep (lua_State *L) { global_State *g = G(L); switch (g->gcstate) { case GCSpause: { restartcollection(g); g->gcstate = GCSpropagate; return 1; } case GCSpropagate: { if (g->gray == NULL) { /* no more gray objects? */ g->gcstate = GCSenteratomic; /* finish propagate phase */ return 0; } else return propagatemark(g); /* traverse one gray object */ } case GCSenteratomic: { lu_mem work = atomic(L); /* work is what was traversed by 'atomic' */ entersweep(L); g->GCestimate = gettotalbytes(g); /* first estimate */; return work; } case GCSswpallgc: { /* sweep "regular" objects */ return sweepstep(L, g, GCSswpfinobj, &g->finobj); } case GCSswpfinobj: { /* sweep objects with finalizers */ return sweepstep(L, g, GCSswptobefnz, &g->tobefnz); } case GCSswptobefnz: { /* sweep objects to be finalized */ return sweepstep(L, g, GCSswpend, NULL); } case GCSswpend: { /* finish sweeps */ checkSizes(L, g); g->gcstate = GCScallfin; return 0; } case GCScallfin: { /* call remaining finalizers */ if (g->tobefnz && !g->gcemergency) { int n = runafewfinalizers(L, GCFINMAX); return n * GCFINALIZECOST; } else { /* emergency mode or no more finalizers */ g->gcstate = GCSpause; /* finish collection */ return 0; } } default: lua_assert(0); return 0; } } /* ** advances the garbage collector until it reaches a state allowed ** by 'statemask' */ void luaC_runtilstate (lua_State *L, int statesmask) { global_State *g = G(L); while (!testbit(statesmask, g->gcstate)) singlestep(L); } /* ** Performs a basic incremental step. The debt and step size are ** converted from bytes to "units of work"; then the function loops ** running single steps until adding that many units of work or ** finishing a cycle (pause state). Finally, it sets the debt that ** controls when next step will be performed. */ static void incstep (lua_State *L, global_State *g) { int stepmul = (getgcparam(g->gcstepmul) | 1); /* avoid division by 0 */ l_mem debt = (g->GCdebt / WORK2MEM) * stepmul; l_mem stepsize = (g->gcstepsize <= log2maxs(l_mem)) ? ((cast(l_mem, 1) << g->gcstepsize) / WORK2MEM) * stepmul : MAX_LMEM; /* overflow; keep maximum value */ do { /* repeat until pause or enough "credit" (negative debt) */ lu_mem work = singlestep(L); /* perform one single step */ debt -= work; } while (debt > -stepsize && g->gcstate != GCSpause); if (g->gcstate == GCSpause) setpause(g); /* pause until next cycle */ else { debt = (debt / stepmul) * WORK2MEM; /* convert 'work units' to bytes */ luaE_setdebt(g, debt); } } /* ** performs a basic GC step if collector is running */ void luaC_step (lua_State *L) { global_State *g = G(L); lua_assert(!g->gcemergency); if (g->gcrunning) { /* running? */ if(isdecGCmodegen(g)) genstep(L, g); else incstep(L, g); } } /* ** Perform a full collection in incremental mode. ** Before running the collection, check 'keepinvariant'; if it is true, ** there may be some objects marked as black, so the collector has ** to sweep all objects to turn them back to white (as white has not ** changed, nothing will be collected). */ static void fullinc (lua_State *L, global_State *g) { if (keepinvariant(g)) /* black objects? */ entersweep(L); /* sweep everything to turn them back to white */ /* finish any pending sweep phase to start a new cycle */ luaC_runtilstate(L, bitmask(GCSpause)); luaC_runtilstate(L, bitmask(GCScallfin)); /* run up to finalizers */ /* estimate must be correct after a full GC cycle */ lua_assert(g->GCestimate == gettotalbytes(g)); luaC_runtilstate(L, bitmask(GCSpause)); /* finish collection */ setpause(g); } /* ** Performs a full GC cycle; if 'isemergency', set a flag to avoid ** some operations which could change the interpreter state in some ** unexpected ways (running finalizers and shrinking some structures). */ void luaC_fullgc (lua_State *L, int isemergency) { global_State *g = G(L); lua_assert(!g->gcemergency); g->gcemergency = isemergency; /* set flag */ if (g->gckind == KGC_INC) fullinc(L, g); else fullgen(L, g); g->gcemergency = 0; } /* }====================================================== */
/* ** $Id: lgc.c $ ** Garbage Collector ** See Copyright Notice in lua.h */ #define lgc_c #define LUA_CORE #include "lprefix.h" #include <stdio.h> #include <string.h> #include "lua.h" #include "ldebug.h" #include "ldo.h" #include "lfunc.h" #include "lgc.h" #include "lmem.h" #include "lobject.h" #include "lstate.h" #include "lstring.h" #include "ltable.h" #include "ltm.h" /* ** Maximum number of elements to sweep in each single step. ** (Large enough to dissipate fixed overheads but small enough ** to allow small steps for the collector.) */ #define GCSWEEPMAX 100 /* ** Maximum number of finalizers to call in each single step. */ #define GCFINMAX 10 /* ** Cost of calling one finalizer. */ #define GCFINALIZECOST 50 /* ** The equivalent, in bytes, of one unit of "work" (visiting a slot, ** sweeping an object, etc.) */ #define WORK2MEM sizeof(TValue) /* ** macro to adjust 'pause': 'pause' is actually used like ** 'pause / PAUSEADJ' (value chosen by tests) */ #define PAUSEADJ 100 /* mask to erase all color bits (plus gen. related stuff) */ #define maskcolors (~(bitmask(BLACKBIT) | WHITEBITS | AGEBITS)) /* macro to erase all color bits then sets only the current white bit */ #define makewhite(g,x) \ (x->marked = cast_byte((x->marked & maskcolors) | luaC_white(g))) #define white2gray(x) resetbits(x->marked, WHITEBITS) #define black2gray(x) resetbit(x->marked, BLACKBIT) #define valiswhite(x) (iscollectable(x) && iswhite(gcvalue(x))) #define keyiswhite(n) (keyiscollectable(n) && iswhite(gckey(n))) #define checkconsistency(obj) \ lua_longassert(!iscollectable(obj) || righttt(obj)) /* ** Protected access to objects in values */ #define gcvalueN(o) (iscollectable(o) ? gcvalue(o) : NULL) #define markvalue(g,o) { checkconsistency(o); \ if (valiswhite(o)) reallymarkobject(g,gcvalue(o)); } #define markkey(g, n) { if keyiswhite(n) reallymarkobject(g,gckey(n)); } #define markobject(g,t) { if (iswhite(t)) reallymarkobject(g, obj2gco(t)); } /* ** mark an object that can be NULL (either because it is really optional, ** or it was stripped as debug info, or inside an uncompleted structure) */ #define markobjectN(g,t) { if (t) markobject(g,t); } static void reallymarkobject (global_State *g, GCObject *o); static lu_mem atomic (lua_State *L); static void entersweep (lua_State *L); /* ** {====================================================== ** Generic functions ** ======================================================= */ /* ** one after last element in a hash array */ #define gnodelast(h) gnode(h, cast_sizet(sizenode(h))) static GCObject **getgclist (GCObject *o) { switch (o->tt) { case LUA_VTABLE: return &gco2t(o)->gclist; case LUA_VLCL: return &gco2lcl(o)->gclist; case LUA_VCCL: return &gco2ccl(o)->gclist; case LUA_VTHREAD: return &gco2th(o)->gclist; case LUA_VPROTO: return &gco2p(o)->gclist; case LUA_VUSERDATA: { Udata *u = gco2u(o); lua_assert(u->nuvalue > 0); return &u->gclist; } default: lua_assert(0); return 0; } } /* ** Link a collectable object 'o' with a known type into list pointed by 'p'. */ #define linkgclist(o,p) ((o)->gclist = (p), (p) = obj2gco(o)) /* ** Link a generic collectable object 'o' into list pointed by 'p'. */ #define linkobjgclist(o,p) (*getgclist(o) = (p), (p) = obj2gco(o)) /* ** Clear keys for empty entries in tables. If entry is empty ** and its key is not marked, mark its entry as dead. This allows the ** collection of the key, but keeps its entry in the table (its removal ** could break a chain). The main feature of a dead key is that it must ** be different from any other value, to do not disturb searches. ** Other places never manipulate dead keys, because its associated empty ** value is enough to signal that the entry is logically empty. */ static void clearkey (Node *n) { lua_assert(isempty(gval(n))); if (keyiswhite(n)) setdeadkey(n); /* unused and unmarked key; remove it */ } /* ** tells whether a key or value can be cleared from a weak ** table. Non-collectable objects are never removed from weak ** tables. Strings behave as 'values', so are never removed too. for ** other objects: if really collected, cannot keep them; for objects ** being finalized, keep them in keys, but not in values */ static int iscleared (global_State *g, const GCObject *o) { if (o == NULL) return 0; /* non-collectable value */ else if (novariant(o->tt) == LUA_TSTRING) { markobject(g, o); /* strings are 'values', so are never weak */ return 0; } else return iswhite(o); } /* ** barrier that moves collector forward, that is, mark the white object ** 'v' being pointed by the black object 'o'. (If in sweep phase, clear ** the black object to white [sweep it] to avoid other barrier calls for ** this same object.) In the generational mode, 'v' must also become ** old, if 'o' is old; however, it cannot be changed directly to OLD, ** because it may still point to non-old objects. So, it is marked as ** OLD0. In the next cycle it will become OLD1, and in the next it ** will finally become OLD (regular old). */ void luaC_barrier_ (lua_State *L, GCObject *o, GCObject *v) { global_State *g = G(L); lua_assert(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o)); if (keepinvariant(g)) { /* must keep invariant? */ reallymarkobject(g, v); /* restore invariant */ if (isold(o)) { lua_assert(!isold(v)); /* white object could not be old */ setage(v, G_OLD0); /* restore generational invariant */ } } else { /* sweep phase */ lua_assert(issweepphase(g)); makewhite(g, o); /* mark main obj. as white to avoid other barriers */ } } /* ** barrier that moves collector backward, that is, mark the black object ** pointing to a white object as gray again. */ void luaC_barrierback_ (lua_State *L, GCObject *o) { global_State *g = G(L); lua_assert(isblack(o) && !isdead(g, o)); lua_assert(g->gckind != KGC_GEN || (isold(o) && getage(o) != G_TOUCHED1)); if (getage(o) != G_TOUCHED2) /* not already in gray list? */ linkobjgclist(o, g->grayagain); /* link it in 'grayagain' */ black2gray(o); /* make object gray (again) */ setage(o, G_TOUCHED1); /* touched in current cycle */ } void luaC_fix (lua_State *L, GCObject *o) { global_State *g = G(L); lua_assert(g->allgc == o); /* object must be 1st in 'allgc' list! */ white2gray(o); /* they will be gray forever */ setage(o, G_OLD); /* and old forever */ g->allgc = o->next; /* remove object from 'allgc' list */ o->next = g->fixedgc; /* link it to 'fixedgc' list */ g->fixedgc = o; } /* ** create a new collectable object (with given type and size) and link ** it to 'allgc' list. */ GCObject *luaC_newobj (lua_State *L, int tt, size_t sz) { global_State *g = G(L); GCObject *o = cast(GCObject *, luaM_newobject(L, novariant(tt), sz)); o->marked = luaC_white(g); o->tt = tt; o->next = g->allgc; g->allgc = o; return o; } /* }====================================================== */ /* ** {====================================================== ** Mark functions ** ======================================================= */ /* ** Mark an object. Userdata, strings, and closed upvalues are visited ** and turned black here. Other objects are marked gray and added ** to appropriate list to be visited (and turned black) later. (Open ** upvalues are already linked in 'headuv' list. They are kept gray ** to avoid barriers, as their values will be revisited by the thread.) */ static void reallymarkobject (global_State *g, GCObject *o) { white2gray(o); switch (o->tt) { case LUA_VSHRSTR: case LUA_VLNGSTR: { gray2black(o); break; } case LUA_VUPVAL: { UpVal *uv = gco2upv(o); if (!upisopen(uv)) /* open upvalues are kept gray */ gray2black(o); markvalue(g, uv->v); /* mark its content */ break; } case LUA_VUSERDATA: { Udata *u = gco2u(o); if (u->nuvalue == 0) { /* no user values? */ markobjectN(g, u->metatable); /* mark its metatable */ gray2black(o); /* nothing else to mark */ break; } /* else... */ } /* FALLTHROUGH */ case LUA_VLCL: case LUA_VCCL: case LUA_VTABLE: case LUA_VTHREAD: case LUA_VPROTO: { linkobjgclist(o, g->gray); break; } default: lua_assert(0); break; } } /* ** mark metamethods for basic types */ static void markmt (global_State *g) { int i; for (i=0; i < LUA_NUMTAGS; i++) markobjectN(g, g->mt[i]); } /* ** mark all objects in list of being-finalized */ static lu_mem markbeingfnz (global_State *g) { GCObject *o; lu_mem count = 0; for (o = g->tobefnz; o != NULL; o = o->next) { count++; markobject(g, o); } return count; } /* ** Mark all values stored in marked open upvalues from non-marked threads. ** (Values from marked threads were already marked when traversing the ** thread.) Remove from the list threads that no longer have upvalues and ** not-marked threads. */ static int remarkupvals (global_State *g) { lua_State *thread; lua_State **p = &g->twups; int work = 0; while ((thread = *p) != NULL) { work++; lua_assert(!isblack(thread)); /* threads are never black */ if (isgray(thread) && thread->openupval != NULL) p = &thread->twups; /* keep marked thread with upvalues in the list */ else { /* thread is not marked or without upvalues */ UpVal *uv; *p = thread->twups; /* remove thread from the list */ thread->twups = thread; /* mark that it is out of list */ for (uv = thread->openupval; uv != NULL; uv = uv->u.open.next) { work++; if (!iswhite(uv)) /* upvalue already visited? */ markvalue(g, uv->v); /* mark its value */ } } } return work; } /* ** mark root set and reset all gray lists, to start a new collection */ static void restartcollection (global_State *g) { g->gray = g->grayagain = NULL; g->weak = g->allweak = g->ephemeron = NULL; markobject(g, g->mainthread); markvalue(g, &g->l_registry); markmt(g); markbeingfnz(g); /* mark any finalizing object left from previous cycle */ } /* }====================================================== */ /* ** {====================================================== ** Traverse functions ** ======================================================= */ /* ** Traverse a table with weak values and link it to proper list. During ** propagate phase, keep it in 'grayagain' list, to be revisited in the ** atomic phase. In the atomic phase, if table has any white value, ** put it in 'weak' list, to be cleared. */ static void traverseweakvalue (global_State *g, Table *h) { Node *n, *limit = gnodelast(h); /* if there is array part, assume it may have white values (it is not worth traversing it now just to check) */ int hasclears = (h->alimit > 0); for (n = gnode(h, 0); n < limit; n++) { /* traverse hash part */ if (isempty(gval(n))) /* entry is empty? */ clearkey(n); /* clear its key */ else { lua_assert(!keyisnil(n)); markkey(g, n); if (!hasclears && iscleared(g, gcvalueN(gval(n)))) /* a white value? */ hasclears = 1; /* table will have to be cleared */ } } if (g->gcstate == GCSatomic && hasclears) linkgclist(h, g->weak); /* has to be cleared later */ else linkgclist(h, g->grayagain); /* must retraverse it in atomic phase */ } /* ** Traverse an ephemeron table and link it to proper list. Returns true ** iff any object was marked during this traversal (which implies that ** convergence has to continue). During propagation phase, keep table ** in 'grayagain' list, to be visited again in the atomic phase. In ** the atomic phase, if table has any white->white entry, it has to ** be revisited during ephemeron convergence (as that key may turn ** black). Otherwise, if it has any white key, table has to be cleared ** (in the atomic phase). In generational mode, it (like all visited ** tables) must be kept in some gray list for post-processing. */ static int traverseephemeron (global_State *g, Table *h, int inv) { int marked = 0; /* true if an object is marked in this traversal */ int hasclears = 0; /* true if table has white keys */ int hasww = 0; /* true if table has entry "white-key -> white-value" */ unsigned int i; unsigned int asize = luaH_realasize(h); unsigned int nsize = sizenode(h); /* traverse array part */ for (i = 0; i < asize; i++) { if (valiswhite(&h->array[i])) { marked = 1; reallymarkobject(g, gcvalue(&h->array[i])); } } /* traverse hash part; if 'inv', traverse descending (see 'convergeephemerons') */ for (i = 0; i < nsize; i++) { Node *n = inv ? gnode(h, nsize - 1 - i) : gnode(h, i); if (isempty(gval(n))) /* entry is empty? */ clearkey(n); /* clear its key */ else if (iscleared(g, gckeyN(n))) { /* key is not marked (yet)? */ hasclears = 1; /* table must be cleared */ if (valiswhite(gval(n))) /* value not marked yet? */ hasww = 1; /* white-white entry */ } else if (valiswhite(gval(n))) { /* value not marked yet? */ marked = 1; reallymarkobject(g, gcvalue(gval(n))); /* mark it now */ } } /* link table into proper list */ if (g->gcstate == GCSpropagate) linkgclist(h, g->grayagain); /* must retraverse it in atomic phase */ else if (hasww) /* table has white->white entries? */ linkgclist(h, g->ephemeron); /* have to propagate again */ else if (hasclears) /* table has white keys? */ linkgclist(h, g->allweak); /* may have to clean white keys */ else if (g->gckind == KGC_GEN) linkgclist(h, g->grayagain); /* keep it in some list */ else gray2black(h); return marked; } static void traversestrongtable (global_State *g, Table *h) { Node *n, *limit = gnodelast(h); unsigned int i; unsigned int asize = luaH_realasize(h); for (i = 0; i < asize; i++) /* traverse array part */ markvalue(g, &h->array[i]); for (n = gnode(h, 0); n < limit; n++) { /* traverse hash part */ if (isempty(gval(n))) /* entry is empty? */ clearkey(n); /* clear its key */ else { lua_assert(!keyisnil(n)); markkey(g, n); markvalue(g, gval(n)); } } if (g->gckind == KGC_GEN) { linkgclist(h, g->grayagain); /* keep it in some gray list */ black2gray(h); } } static lu_mem traversetable (global_State *g, Table *h) { const char *weakkey, *weakvalue; const TValue *mode = gfasttm(g, h->metatable, TM_MODE); markobjectN(g, h->metatable); if (mode && ttisstring(mode) && /* is there a weak mode? */ (cast_void(weakkey = strchr(svalue(mode), 'k')), cast_void(weakvalue = strchr(svalue(mode), 'v')), (weakkey || weakvalue))) { /* is really weak? */ black2gray(h); /* keep table gray */ if (!weakkey) /* strong keys? */ traverseweakvalue(g, h); else if (!weakvalue) /* strong values? */ traverseephemeron(g, h, 0); else /* all weak */ linkgclist(h, g->allweak); /* nothing to traverse now */ } else /* not weak */ traversestrongtable(g, h); return 1 + h->alimit + 2 * allocsizenode(h); } static int traverseudata (global_State *g, Udata *u) { int i; markobjectN(g, u->metatable); /* mark its metatable */ for (i = 0; i < u->nuvalue; i++) markvalue(g, &u->uv[i].uv); if (g->gckind == KGC_GEN) { linkgclist(u, g->grayagain); /* keep it in some gray list */ black2gray(u); } return 1 + u->nuvalue; } /* ** Traverse a prototype. (While a prototype is being build, its ** arrays can be larger than needed; the extra slots are filled with ** NULL, so the use of 'markobjectN') */ static int traverseproto (global_State *g, Proto *f) { int i; markobjectN(g, f->source); for (i = 0; i < f->sizek; i++) /* mark literals */ markvalue(g, &f->k[i]); for (i = 0; i < f->sizeupvalues; i++) /* mark upvalue names */ markobjectN(g, f->upvalues[i].name); for (i = 0; i < f->sizep; i++) /* mark nested protos */ markobjectN(g, f->p[i]); for (i = 0; i < f->sizelocvars; i++) /* mark local-variable names */ markobjectN(g, f->locvars[i].varname); return 1 + f->sizek + f->sizeupvalues + f->sizep + f->sizelocvars; } static int traverseCclosure (global_State *g, CClosure *cl) { int i; for (i = 0; i < cl->nupvalues; i++) /* mark its upvalues */ markvalue(g, &cl->upvalue[i]); return 1 + cl->nupvalues; } /* ** Traverse a Lua closure, marking its prototype and its upvalues. ** (Both can be NULL while closure is being created.) */ static int traverseLclosure (global_State *g, LClosure *cl) { int i; markobjectN(g, cl->p); /* mark its prototype */ for (i = 0; i < cl->nupvalues; i++) { /* visit its upvalues */ UpVal *uv = cl->upvals[i]; markobjectN(g, uv); /* mark upvalue */ } return 1 + cl->nupvalues; } /* ** Traverse a thread, marking the elements in the stack up to its top ** and cleaning the rest of the stack in the final traversal. ** That ensures that the entire stack have valid (non-dead) objects. */ static int traversethread (global_State *g, lua_State *th) { UpVal *uv; StkId o = th->stack; if (o == NULL) return 1; /* stack not completely built yet */ lua_assert(g->gcstate == GCSatomic || th->openupval == NULL || isintwups(th)); for (; o < th->top; o++) /* mark live elements in the stack */ markvalue(g, s2v(o)); for (uv = th->openupval; uv != NULL; uv = uv->u.open.next) markobject(g, uv); /* open upvalues cannot be collected */ if (g->gcstate == GCSatomic) { /* final traversal? */ StkId lim = th->stack + th->stacksize; /* real end of stack */ for (; o < lim; o++) /* clear not-marked stack slice */ setnilvalue(s2v(o)); /* 'remarkupvals' may have removed thread from 'twups' list */ if (!isintwups(th) && th->openupval != NULL) { th->twups = g->twups; /* link it back to the list */ g->twups = th; } } else if (!g->gcemergency) luaD_shrinkstack(th); /* do not change stack in emergency cycle */ return 1 + th->stacksize; } /* ** traverse one gray object, turning it to black (except for threads, ** which are always gray). */ static lu_mem propagatemark (global_State *g) { GCObject *o = g->gray; gray2black(o); g->gray = *getgclist(o); /* remove from 'gray' list */ switch (o->tt) { case LUA_VTABLE: return traversetable(g, gco2t(o)); case LUA_VUSERDATA: return traverseudata(g, gco2u(o)); case LUA_VLCL: return traverseLclosure(g, gco2lcl(o)); case LUA_VCCL: return traverseCclosure(g, gco2ccl(o)); case LUA_VPROTO: return traverseproto(g, gco2p(o)); case LUA_VTHREAD: { lua_State *th = gco2th(o); linkgclist(th, g->grayagain); /* insert into 'grayagain' list */ black2gray(o); return traversethread(g, th); } default: lua_assert(0); return 0; } } static lu_mem propagateall (global_State *g) { lu_mem tot = 0; while (g->gray) tot += propagatemark(g); return tot; } /* ** Traverse all ephemeron tables propagating marks from keys to values. ** Repeat until it converges, that is, nothing new is marked. 'dir' ** inverts the direction of the traversals, trying to speed up ** convergence on chains in the same table. ** */ static void convergeephemerons (global_State *g) { int changed; int dir = 0; do { GCObject *w; GCObject *next = g->ephemeron; /* get ephemeron list */ g->ephemeron = NULL; /* tables may return to this list when traversed */ changed = 0; while ((w = next) != NULL) { /* for each ephemeron table */ next = gco2t(w)->gclist; /* list is rebuilt during loop */ if (traverseephemeron(g, gco2t(w), dir)) { /* marked some value? */ propagateall(g); /* propagate changes */ changed = 1; /* will have to revisit all ephemeron tables */ } } dir = !dir; /* invert direction next time */ } while (changed); /* repeat until no more changes */ } /* }====================================================== */ /* ** {====================================================== ** Sweep Functions ** ======================================================= */ /* ** clear entries with unmarked keys from all weaktables in list 'l' */ static void clearbykeys (global_State *g, GCObject *l) { for (; l; l = gco2t(l)->gclist) { Table *h = gco2t(l); Node *limit = gnodelast(h); Node *n; for (n = gnode(h, 0); n < limit; n++) { if (iscleared(g, gckeyN(n))) /* unmarked key? */ setempty(gval(n)); /* remove entry */ if (isempty(gval(n))) /* is entry empty? */ clearkey(n); /* clear its key */ } } } /* ** clear entries with unmarked values from all weaktables in list 'l' up ** to element 'f' */ static void clearbyvalues (global_State *g, GCObject *l, GCObject *f) { for (; l != f; l = gco2t(l)->gclist) { Table *h = gco2t(l); Node *n, *limit = gnodelast(h); unsigned int i; unsigned int asize = luaH_realasize(h); for (i = 0; i < asize; i++) { TValue *o = &h->array[i]; if (iscleared(g, gcvalueN(o))) /* value was collected? */ setempty(o); /* remove entry */ } for (n = gnode(h, 0); n < limit; n++) { if (iscleared(g, gcvalueN(gval(n)))) /* unmarked value? */ setempty(gval(n)); /* remove entry */ if (isempty(gval(n))) /* is entry empty? */ clearkey(n); /* clear its key */ } } } static void freeupval (lua_State *L, UpVal *uv) { if (upisopen(uv)) luaF_unlinkupval(uv); luaM_free(L, uv); } static void freeobj (lua_State *L, GCObject *o) { switch (o->tt) { case LUA_VPROTO: luaF_freeproto(L, gco2p(o)); break; case LUA_VUPVAL: freeupval(L, gco2upv(o)); break; case LUA_VLCL: luaM_freemem(L, o, sizeLclosure(gco2lcl(o)->nupvalues)); break; case LUA_VCCL: luaM_freemem(L, o, sizeCclosure(gco2ccl(o)->nupvalues)); break; case LUA_VTABLE: luaH_free(L, gco2t(o)); break; case LUA_VTHREAD: luaE_freethread(L, gco2th(o)); break; case LUA_VUSERDATA: { Udata *u = gco2u(o); luaM_freemem(L, o, sizeudata(u->nuvalue, u->len)); break; } case LUA_VSHRSTR: luaS_remove(L, gco2ts(o)); /* remove it from hash table */ luaM_freemem(L, o, sizelstring(gco2ts(o)->shrlen)); break; case LUA_VLNGSTR: luaM_freemem(L, o, sizelstring(gco2ts(o)->u.lnglen)); break; default: lua_assert(0); } } /* ** sweep at most 'countin' elements from a list of GCObjects erasing dead ** objects, where a dead object is one marked with the old (non current) ** white; change all non-dead objects back to white, preparing for next ** collection cycle. Return where to continue the traversal or NULL if ** list is finished. ('*countout' gets the number of elements traversed.) */ static GCObject **sweeplist (lua_State *L, GCObject **p, int countin, int *countout) { global_State *g = G(L); int ow = otherwhite(g); int i; int white = luaC_white(g); /* current white */ for (i = 0; *p != NULL && i < countin; i++) { GCObject *curr = *p; int marked = curr->marked; if (isdeadm(ow, marked)) { /* is 'curr' dead? */ *p = curr->next; /* remove 'curr' from list */ freeobj(L, curr); /* erase 'curr' */ } else { /* change mark to 'white' */ curr->marked = cast_byte((marked & maskcolors) | white); p = &curr->next; /* go to next element */ } } if (countout) *countout = i; /* number of elements traversed */ return (*p == NULL) ? NULL : p; } /* ** sweep a list until a live object (or end of list) */ static GCObject **sweeptolive (lua_State *L, GCObject **p) { GCObject **old = p; do { p = sweeplist(L, p, 1, NULL); } while (p == old); return p; } /* }====================================================== */ /* ** {====================================================== ** Finalization ** ======================================================= */ /* ** If possible, shrink string table. */ static void checkSizes (lua_State *L, global_State *g) { if (!g->gcemergency) { if (g->strt.nuse < g->strt.size / 4) { /* string table too big? */ l_mem olddebt = g->GCdebt; luaS_resize(L, g->strt.size / 2); g->GCestimate += g->GCdebt - olddebt; /* correct estimate */ } } } /* ** Get the next udata to be finalized from the 'tobefnz' list, and ** link it back into the 'allgc' list. */ static GCObject *udata2finalize (global_State *g) { GCObject *o = g->tobefnz; /* get first element */ lua_assert(tofinalize(o)); g->tobefnz = o->next; /* remove it from 'tobefnz' list */ o->next = g->allgc; /* return it to 'allgc' list */ g->allgc = o; resetbit(o->marked, FINALIZEDBIT); /* object is "normal" again */ if (issweepphase(g)) makewhite(g, o); /* "sweep" object */ return o; } static void dothecall (lua_State *L, void *ud) { UNUSED(ud); luaD_callnoyield(L, L->top - 2, 0); } static void GCTM (lua_State *L) { global_State *g = G(L); const TValue *tm; TValue v; lua_assert(!g->gcemergency); setgcovalue(L, &v, udata2finalize(g)); tm = luaT_gettmbyobj(L, &v, TM_GC); if (!notm(tm)) { /* is there a finalizer? */ int status; lu_byte oldah = L->allowhook; int running = g->gcrunning; L->allowhook = 0; /* stop debug hooks during GC metamethod */ g->gcrunning = 0; /* avoid GC steps */ setobj2s(L, L->top++, tm); /* push finalizer... */ setobj2s(L, L->top++, &v); /* ... and its argument */ L->ci->callstatus |= CIST_FIN; /* will run a finalizer */ status = luaD_pcall(L, dothecall, NULL, savestack(L, L->top - 2), 0); L->ci->callstatus &= ~CIST_FIN; /* not running a finalizer anymore */ L->allowhook = oldah; /* restore hooks */ g->gcrunning = running; /* restore state */ if (unlikely(status != LUA_OK)) { /* error while running __gc? */ luaE_warnerror(L, "__gc metamethod"); L->top--; /* pops error object */ } } } /* ** Call a few finalizers */ static int runafewfinalizers (lua_State *L, int n) { global_State *g = G(L); int i; for (i = 0; i < n && g->tobefnz; i++) GCTM(L); /* call one finalizer */ return i; } /* ** call all pending finalizers */ static void callallpendingfinalizers (lua_State *L) { global_State *g = G(L); while (g->tobefnz) GCTM(L); } /* ** find last 'next' field in list 'p' list (to add elements in its end) */ static GCObject **findlast (GCObject **p) { while (*p != NULL) p = &(*p)->next; return p; } /* ** Move all unreachable objects (or 'all' objects) that need ** finalization from list 'finobj' to list 'tobefnz' (to be finalized). ** (Note that objects after 'finobjold' cannot be white, so they ** don't need to be traversed. In incremental mode, 'finobjold' is NULL, ** so the whole list is traversed.) */ static void separatetobefnz (global_State *g, int all) { GCObject *curr; GCObject **p = &g->finobj; GCObject **lastnext = findlast(&g->tobefnz); while ((curr = *p) != g->finobjold) { /* traverse all finalizable objects */ lua_assert(tofinalize(curr)); if (!(iswhite(curr) || all)) /* not being collected? */ p = &curr->next; /* don't bother with it */ else { if (curr == g->finobjsur) /* removing 'finobjsur'? */ g->finobjsur = curr->next; /* correct it */ *p = curr->next; /* remove 'curr' from 'finobj' list */ curr->next = *lastnext; /* link at the end of 'tobefnz' list */ *lastnext = curr; lastnext = &curr->next; } } } /* ** if object 'o' has a finalizer, remove it from 'allgc' list (must ** search the list to find it) and link it in 'finobj' list. */ void luaC_checkfinalizer (lua_State *L, GCObject *o, Table *mt) { global_State *g = G(L); if (tofinalize(o) || /* obj. is already marked... */ gfasttm(g, mt, TM_GC) == NULL) /* or has no finalizer? */ return; /* nothing to be done */ else { /* move 'o' to 'finobj' list */ GCObject **p; if (issweepphase(g)) { makewhite(g, o); /* "sweep" object 'o' */ if (g->sweepgc == &o->next) /* should not remove 'sweepgc' object */ g->sweepgc = sweeptolive(L, g->sweepgc); /* change 'sweepgc' */ } else { /* correct pointers into 'allgc' list, if needed */ if (o == g->survival) g->survival = o->next; if (o == g->old) g->old = o->next; if (o == g->reallyold) g->reallyold = o->next; } /* search for pointer pointing to 'o' */ for (p = &g->allgc; *p != o; p = &(*p)->next) { /* empty */ } *p = o->next; /* remove 'o' from 'allgc' list */ o->next = g->finobj; /* link it in 'finobj' list */ g->finobj = o; l_setbit(o->marked, FINALIZEDBIT); /* mark it as such */ } } /* }====================================================== */ /* ** {====================================================== ** Generational Collector ** ======================================================= */ static void setpause (global_State *g); /* mask to erase all color bits, not changing gen-related stuff */ #define maskgencolors (~(bitmask(BLACKBIT) | WHITEBITS)) /* ** Sweep a list of objects, deleting dead ones and turning ** the non dead to old (without changing their colors). */ static void sweep2old (lua_State *L, GCObject **p) { GCObject *curr; while ((curr = *p) != NULL) { if (iswhite(curr)) { /* is 'curr' dead? */ lua_assert(isdead(G(L), curr)); *p = curr->next; /* remove 'curr' from list */ freeobj(L, curr); /* erase 'curr' */ } else { /* all surviving objects become old */ setage(curr, G_OLD); p = &curr->next; /* go to next element */ } } } /* ** Sweep for generational mode. Delete dead objects. (Because the ** collection is not incremental, there are no "new white" objects ** during the sweep. So, any white object must be dead.) For ** non-dead objects, advance their ages and clear the color of ** new objects. (Old objects keep their colors.) */ static GCObject **sweepgen (lua_State *L, global_State *g, GCObject **p, GCObject *limit) { static const lu_byte nextage[] = { G_SURVIVAL, /* from G_NEW */ G_OLD1, /* from G_SURVIVAL */ G_OLD1, /* from G_OLD0 */ G_OLD, /* from G_OLD1 */ G_OLD, /* from G_OLD (do not change) */ G_TOUCHED1, /* from G_TOUCHED1 (do not change) */ G_TOUCHED2 /* from G_TOUCHED2 (do not change) */ }; int white = luaC_white(g); GCObject *curr; while ((curr = *p) != limit) { if (iswhite(curr)) { /* is 'curr' dead? */ lua_assert(!isold(curr) && isdead(g, curr)); *p = curr->next; /* remove 'curr' from list */ freeobj(L, curr); /* erase 'curr' */ } else { /* correct mark and age */ if (getage(curr) == G_NEW) curr->marked = cast_byte((curr->marked & maskgencolors) | white); setage(curr, nextage[getage(curr)]); p = &curr->next; /* go to next element */ } } return p; } /* ** Traverse a list making all its elements white and clearing their ** age. */ static void whitelist (global_State *g, GCObject *p) { int white = luaC_white(g); for (; p != NULL; p = p->next) p->marked = cast_byte((p->marked & maskcolors) | white); } /* ** Correct a list of gray objects. ** Because this correction is done after sweeping, young objects might ** be turned white and still be in the list. They are only removed. ** For tables and userdata, advance 'touched1' to 'touched2'; 'touched2' ** objects become regular old and are removed from the list. ** For threads, just remove white ones from the list. */ static GCObject **correctgraylist (GCObject **p) { GCObject *curr; while ((curr = *p) != NULL) { switch (curr->tt) { case LUA_VTABLE: case LUA_VUSERDATA: { GCObject **next = getgclist(curr); if (getage(curr) == G_TOUCHED1) { /* touched in this cycle? */ lua_assert(isgray(curr)); gray2black(curr); /* make it black, for next barrier */ changeage(curr, G_TOUCHED1, G_TOUCHED2); p = next; /* go to next element */ } else { /* not touched in this cycle */ if (!iswhite(curr)) { /* not white? */ lua_assert(isold(curr)); if (getage(curr) == G_TOUCHED2) /* advance from G_TOUCHED2... */ changeage(curr, G_TOUCHED2, G_OLD); /* ... to G_OLD */ gray2black(curr); /* make it black */ } /* else, object is white: just remove it from this list */ *p = *next; /* remove 'curr' from gray list */ } break; } case LUA_VTHREAD: { lua_State *th = gco2th(curr); lua_assert(!isblack(th)); if (iswhite(th)) /* new object? */ *p = th->gclist; /* remove from gray list */ else /* old threads remain gray */ p = &th->gclist; /* go to next element */ break; } default: lua_assert(0); /* nothing more could be gray here */ } } return p; } /* ** Correct all gray lists, coalescing them into 'grayagain'. */ static void correctgraylists (global_State *g) { GCObject **list = correctgraylist(&g->grayagain); *list = g->weak; g->weak = NULL; list = correctgraylist(list); *list = g->allweak; g->allweak = NULL; list = correctgraylist(list); *list = g->ephemeron; g->ephemeron = NULL; correctgraylist(list); } /* ** Mark 'OLD1' objects when starting a new young collection. ** Gray objects are already in some gray list, and so will be visited ** in the atomic step. */ static void markold (global_State *g, GCObject *from, GCObject *to) { GCObject *p; for (p = from; p != to; p = p->next) { if (getage(p) == G_OLD1) { lua_assert(!iswhite(p)); if (isblack(p)) { black2gray(p); /* should be '2white', but gray works too */ reallymarkobject(g, p); } } } } /* ** Finish a young-generation collection. */ static void finishgencycle (lua_State *L, global_State *g) { correctgraylists(g); checkSizes(L, g); g->gcstate = GCSpropagate; /* skip restart */ if (!g->gcemergency) callallpendingfinalizers(L); } /* ** Does a young collection. First, mark 'OLD1' objects. Then does the ** atomic step. Then, sweep all lists and advance pointers. Finally, ** finish the collection. */ static void youngcollection (lua_State *L, global_State *g) { GCObject **psurvival; /* to point to first non-dead survival object */ lua_assert(g->gcstate == GCSpropagate); markold(g, g->allgc, g->reallyold); markold(g, g->finobj, g->finobjrold); atomic(L); /* sweep nursery and get a pointer to its last live element */ psurvival = sweepgen(L, g, &g->allgc, g->survival); /* sweep 'survival' and 'old' */ sweepgen(L, g, psurvival, g->reallyold); g->reallyold = g->old; g->old = *psurvival; /* 'survival' survivals are old now */ g->survival = g->allgc; /* all news are survivals */ /* repeat for 'finobj' lists */ psurvival = sweepgen(L, g, &g->finobj, g->finobjsur); /* sweep 'survival' and 'old' */ sweepgen(L, g, psurvival, g->finobjrold); g->finobjrold = g->finobjold; g->finobjold = *psurvival; /* 'survival' survivals are old now */ g->finobjsur = g->finobj; /* all news are survivals */ sweepgen(L, g, &g->tobefnz, NULL); finishgencycle(L, g); } static void atomic2gen (lua_State *L, global_State *g) { /* sweep all elements making them old */ sweep2old(L, &g->allgc); /* everything alive now is old */ g->reallyold = g->old = g->survival = g->allgc; /* repeat for 'finobj' lists */ sweep2old(L, &g->finobj); g->finobjrold = g->finobjold = g->finobjsur = g->finobj; sweep2old(L, &g->tobefnz); g->gckind = KGC_GEN; g->lastatomic = 0; g->GCestimate = gettotalbytes(g); /* base for memory control */ finishgencycle(L, g); } /* ** Enter generational mode. Must go until the end of an atomic cycle ** to ensure that all threads and weak tables are in the gray lists. ** Then, turn all objects into old and finishes the collection. */ static lu_mem entergen (lua_State *L, global_State *g) { lu_mem numobjs; luaC_runtilstate(L, bitmask(GCSpause)); /* prepare to start a new cycle */ luaC_runtilstate(L, bitmask(GCSpropagate)); /* start new cycle */ numobjs = atomic(L); /* propagates all and then do the atomic stuff */ atomic2gen(L, g); return numobjs; } /* ** Enter incremental mode. Turn all objects white, make all ** intermediate lists point to NULL (to avoid invalid pointers), ** and go to the pause state. */ static void enterinc (global_State *g) { whitelist(g, g->allgc); g->reallyold = g->old = g->survival = NULL; whitelist(g, g->finobj); whitelist(g, g->tobefnz); g->finobjrold = g->finobjold = g->finobjsur = NULL; g->gcstate = GCSpause; g->gckind = KGC_INC; g->lastatomic = 0; } /* ** Change collector mode to 'newmode'. */ void luaC_changemode (lua_State *L, int newmode) { global_State *g = G(L); if (newmode != g->gckind) { if (newmode == KGC_GEN) /* entering generational mode? */ entergen(L, g); else enterinc(g); /* entering incremental mode */ } g->lastatomic = 0; } /* ** Does a full collection in generational mode. */ static lu_mem fullgen (lua_State *L, global_State *g) { enterinc(g); return entergen(L, g); } /* ** Set debt for the next minor collection, which will happen when ** memory grows 'genminormul'%. */ static void setminordebt (global_State *g) { luaE_setdebt(g, -(cast(l_mem, (gettotalbytes(g) / 100)) * g->genminormul)); } /* ** Does a major collection after last collection was a "bad collection". ** ** When the program is building a big structure, it allocates lots of ** memory but generates very little garbage. In those scenarios, ** the generational mode just wastes time doing small collections, and ** major collections are frequently what we call a "bad collection", a ** collection that frees too few objects. To avoid the cost of switching ** between generational mode and the incremental mode needed for full ** (major) collections, the collector tries to stay in incremental mode ** after a bad collection, and to switch back to generational mode only ** after a "good" collection (one that traverses less than 9/8 objects ** of the previous one). ** The collector must choose whether to stay in incremental mode or to ** switch back to generational mode before sweeping. At this point, it ** does not know the real memory in use, so it cannot use memory to ** decide whether to return to generational mode. Instead, it uses the ** number of objects traversed (returned by 'atomic') as a proxy. The ** field 'g->lastatomic' keeps this count from the last collection. ** ('g->lastatomic != 0' also means that the last collection was bad.) */ static void stepgenfull (lua_State *L, global_State *g) { lu_mem newatomic; /* count of traversed objects */ lu_mem lastatomic = g->lastatomic; /* count from last collection */ if (g->gckind == KGC_GEN) /* still in generational mode? */ enterinc(g); /* enter incremental mode */ luaC_runtilstate(L, bitmask(GCSpropagate)); /* start new cycle */ newatomic = atomic(L); /* mark everybody */ if (newatomic < lastatomic + (lastatomic >> 3)) { /* good collection? */ atomic2gen(L, g); /* return to generational mode */ setminordebt(g); } else { /* another bad collection; stay in incremental mode */ g->GCestimate = gettotalbytes(g); /* first estimate */; entersweep(L); luaC_runtilstate(L, bitmask(GCSpause)); /* finish collection */ setpause(g); g->lastatomic = newatomic; } } /* ** Does a generational "step". ** Usually, this means doing a minor collection and setting the debt to ** make another collection when memory grows 'genminormul'% larger. ** ** However, there are exceptions. If memory grows 'genmajormul'% ** larger than it was at the end of the last major collection (kept ** in 'g->GCestimate'), the function does a major collection. At the ** end, it checks whether the major collection was able to free a ** decent amount of memory (at least half the growth in memory since ** previous major collection). If so, the collector keeps its state, ** and the next collection will probably be minor again. Otherwise, ** we have what we call a "bad collection". In that case, set the field ** 'g->lastatomic' to signal that fact, so that the next collection will ** go to 'stepgenfull'. ** ** 'GCdebt <= 0' means an explicit call to GC step with "size" zero; ** in that case, do a minor collection. */ static void genstep (lua_State *L, global_State *g) { if (g->lastatomic != 0) /* last collection was a bad one? */ stepgenfull(L, g); /* do a full step */ else { lu_mem majorbase = g->GCestimate; /* memory after last major collection */ lu_mem majorinc = (majorbase / 100) * getgcparam(g->genmajormul); if (g->GCdebt > 0 && gettotalbytes(g) > majorbase + majorinc) { lu_mem numobjs = fullgen(L, g); /* do a major collection */ if (gettotalbytes(g) < majorbase + (majorinc / 2)) { /* collected at least half of memory growth since last major collection; keep doing minor collections */ setminordebt(g); } else { /* bad collection */ g->lastatomic = numobjs; /* signal that last collection was bad */ setpause(g); /* do a long wait for next (major) collection */ } } else { /* regular case; do a minor collection */ youngcollection(L, g); setminordebt(g); g->GCestimate = majorbase; /* preserve base value */ } } lua_assert(isdecGCmodegen(g)); } /* }====================================================== */ /* ** {====================================================== ** GC control ** ======================================================= */ /* ** Set the "time" to wait before starting a new GC cycle; cycle will ** start when memory use hits the threshold of ('estimate' * pause / ** PAUSEADJ). (Division by 'estimate' should be OK: it cannot be zero, ** because Lua cannot even start with less than PAUSEADJ bytes). */ static void setpause (global_State *g) { l_mem threshold, debt; int pause = getgcparam(g->gcpause); l_mem estimate = g->GCestimate / PAUSEADJ; /* adjust 'estimate' */ lua_assert(estimate > 0); threshold = (pause < MAX_LMEM / estimate) /* overflow? */ ? estimate * pause /* no overflow */ : MAX_LMEM; /* overflow; truncate to maximum */ debt = gettotalbytes(g) - threshold; if (debt > 0) debt = 0; luaE_setdebt(g, debt); } /* ** Enter first sweep phase. ** The call to 'sweeptolive' makes the pointer point to an object ** inside the list (instead of to the header), so that the real sweep do ** not need to skip objects created between "now" and the start of the ** real sweep. */ static void entersweep (lua_State *L) { global_State *g = G(L); g->gcstate = GCSswpallgc; lua_assert(g->sweepgc == NULL); g->sweepgc = sweeptolive(L, &g->allgc); } /* ** Delete all objects in list 'p' until (but not including) object ** 'limit'. */ static void deletelist (lua_State *L, GCObject *p, GCObject *limit) { while (p != limit) { GCObject *next = p->next; freeobj(L, p); p = next; } } /* ** Call all finalizers of the objects in the given Lua state, and ** then free all objects, except for the main thread. */ void luaC_freeallobjects (lua_State *L) { global_State *g = G(L); luaC_changemode(L, KGC_INC); separatetobefnz(g, 1); /* separate all objects with finalizers */ lua_assert(g->finobj == NULL); callallpendingfinalizers(L); deletelist(L, g->allgc, obj2gco(g->mainthread)); deletelist(L, g->finobj, NULL); deletelist(L, g->fixedgc, NULL); /* collect fixed objects */ lua_assert(g->strt.nuse == 0); } static lu_mem atomic (lua_State *L) { global_State *g = G(L); lu_mem work = 0; GCObject *origweak, *origall; GCObject *grayagain = g->grayagain; /* save original list */ g->grayagain = NULL; lua_assert(g->ephemeron == NULL && g->weak == NULL); lua_assert(!iswhite(g->mainthread)); g->gcstate = GCSatomic; markobject(g, L); /* mark running thread */ /* registry and global metatables may be changed by API */ markvalue(g, &g->l_registry); markmt(g); /* mark global metatables */ work += propagateall(g); /* empties 'gray' list */ /* remark occasional upvalues of (maybe) dead threads */ work += remarkupvals(g); work += propagateall(g); /* propagate changes */ g->gray = grayagain; work += propagateall(g); /* traverse 'grayagain' list */ convergeephemerons(g); /* at this point, all strongly accessible objects are marked. */ /* Clear values from weak tables, before checking finalizers */ clearbyvalues(g, g->weak, NULL); clearbyvalues(g, g->allweak, NULL); origweak = g->weak; origall = g->allweak; separatetobefnz(g, 0); /* separate objects to be finalized */ work += markbeingfnz(g); /* mark objects that will be finalized */ work += propagateall(g); /* remark, to propagate 'resurrection' */ convergeephemerons(g); /* at this point, all resurrected objects are marked. */ /* remove dead objects from weak tables */ clearbykeys(g, g->ephemeron); /* clear keys from all ephemeron tables */ clearbykeys(g, g->allweak); /* clear keys from all 'allweak' tables */ /* clear values from resurrected weak tables */ clearbyvalues(g, g->weak, origweak); clearbyvalues(g, g->allweak, origall); luaS_clearcache(g); g->currentwhite = cast_byte(otherwhite(g)); /* flip current white */ lua_assert(g->gray == NULL); return work; /* estimate of slots marked by 'atomic' */ } static int sweepstep (lua_State *L, global_State *g, int nextstate, GCObject **nextlist) { if (g->sweepgc) { l_mem olddebt = g->GCdebt; int count; g->sweepgc = sweeplist(L, g->sweepgc, GCSWEEPMAX, &count); g->GCestimate += g->GCdebt - olddebt; /* update estimate */ return count; } else { /* enter next state */ g->gcstate = nextstate; g->sweepgc = nextlist; return 0; /* no work done */ } } static lu_mem singlestep (lua_State *L) { global_State *g = G(L); switch (g->gcstate) { case GCSpause: { restartcollection(g); g->gcstate = GCSpropagate; return 1; } case GCSpropagate: { if (g->gray == NULL) { /* no more gray objects? */ g->gcstate = GCSenteratomic; /* finish propagate phase */ return 0; } else return propagatemark(g); /* traverse one gray object */ } case GCSenteratomic: { lu_mem work = atomic(L); /* work is what was traversed by 'atomic' */ entersweep(L); g->GCestimate = gettotalbytes(g); /* first estimate */; return work; } case GCSswpallgc: { /* sweep "regular" objects */ return sweepstep(L, g, GCSswpfinobj, &g->finobj); } case GCSswpfinobj: { /* sweep objects with finalizers */ return sweepstep(L, g, GCSswptobefnz, &g->tobefnz); } case GCSswptobefnz: { /* sweep objects to be finalized */ return sweepstep(L, g, GCSswpend, NULL); } case GCSswpend: { /* finish sweeps */ checkSizes(L, g); g->gcstate = GCScallfin; return 0; } case GCScallfin: { /* call remaining finalizers */ if (g->tobefnz && !g->gcemergency) { int n = runafewfinalizers(L, GCFINMAX); return n * GCFINALIZECOST; } else { /* emergency mode or no more finalizers */ g->gcstate = GCSpause; /* finish collection */ return 0; } } default: lua_assert(0); return 0; } } /* ** advances the garbage collector until it reaches a state allowed ** by 'statemask' */ void luaC_runtilstate (lua_State *L, int statesmask) { global_State *g = G(L); while (!testbit(statesmask, g->gcstate)) singlestep(L); } /* ** Performs a basic incremental step. The debt and step size are ** converted from bytes to "units of work"; then the function loops ** running single steps until adding that many units of work or ** finishing a cycle (pause state). Finally, it sets the debt that ** controls when next step will be performed. */ static void incstep (lua_State *L, global_State *g) { int stepmul = (getgcparam(g->gcstepmul) | 1); /* avoid division by 0 */ l_mem debt = (g->GCdebt / WORK2MEM) * stepmul; l_mem stepsize = (g->gcstepsize <= log2maxs(l_mem)) ? ((cast(l_mem, 1) << g->gcstepsize) / WORK2MEM) * stepmul : MAX_LMEM; /* overflow; keep maximum value */ do { /* repeat until pause or enough "credit" (negative debt) */ lu_mem work = singlestep(L); /* perform one single step */ debt -= work; } while (debt > -stepsize && g->gcstate != GCSpause); if (g->gcstate == GCSpause) setpause(g); /* pause until next cycle */ else { debt = (debt / stepmul) * WORK2MEM; /* convert 'work units' to bytes */ luaE_setdebt(g, debt); } } /* ** performs a basic GC step if collector is running */ void luaC_step (lua_State *L) { global_State *g = G(L); lua_assert(!g->gcemergency); if (g->gcrunning) { /* running? */ if(isdecGCmodegen(g)) genstep(L, g); else incstep(L, g); } } /* ** Perform a full collection in incremental mode. ** Before running the collection, check 'keepinvariant'; if it is true, ** there may be some objects marked as black, so the collector has ** to sweep all objects to turn them back to white (as white has not ** changed, nothing will be collected). */ static void fullinc (lua_State *L, global_State *g) { if (keepinvariant(g)) /* black objects? */ entersweep(L); /* sweep everything to turn them back to white */ /* finish any pending sweep phase to start a new cycle */ luaC_runtilstate(L, bitmask(GCSpause)); luaC_runtilstate(L, bitmask(GCScallfin)); /* run up to finalizers */ /* estimate must be correct after a full GC cycle */ lua_assert(g->GCestimate == gettotalbytes(g)); luaC_runtilstate(L, bitmask(GCSpause)); /* finish collection */ setpause(g); } /* ** Performs a full GC cycle; if 'isemergency', set a flag to avoid ** some operations which could change the interpreter state in some ** unexpected ways (running finalizers and shrinking some structures). */ void luaC_fullgc (lua_State *L, int isemergency) { global_State *g = G(L); lua_assert(!g->gcemergency); g->gcemergency = isemergency; /* set flag */ if (g->gckind == KGC_INC) fullinc(L, g); else fullgen(L, g); g->gcemergency = 0; } /* }====================================================== */
static void youngcollection (lua_State *L, global_State *g) { GCObject **psurvival; /* to point to first non-dead survival object */ lua_assert(g->gcstate == GCSpropagate); markold(g, g->survival, g->reallyold); markold(g, g->finobj, g->finobjrold); atomic(L); /* sweep nursery and get a pointer to its last live element */ psurvival = sweepgen(L, g, &g->allgc, g->survival); /* sweep 'survival' and 'old' */ sweepgen(L, g, psurvival, g->reallyold); g->reallyold = g->old; g->old = *psurvival; /* 'survival' survivals are old now */ g->survival = g->allgc; /* all news are survivals */ /* repeat for 'finobj' lists */ psurvival = sweepgen(L, g, &g->finobj, g->finobjsur); /* sweep 'survival' and 'old' */ sweepgen(L, g, psurvival, g->finobjrold); g->finobjrold = g->finobjold; g->finobjold = *psurvival; /* 'survival' survivals are old now */ g->finobjsur = g->finobj; /* all news are survivals */ sweepgen(L, g, &g->tobefnz, NULL); finishgencycle(L, g); }
static void youngcollection (lua_State *L, global_State *g) { GCObject **psurvival; /* to point to first non-dead survival object */ lua_assert(g->gcstate == GCSpropagate); markold(g, g->allgc, g->reallyold); markold(g, g->finobj, g->finobjrold); atomic(L); /* sweep nursery and get a pointer to its last live element */ psurvival = sweepgen(L, g, &g->allgc, g->survival); /* sweep 'survival' and 'old' */ sweepgen(L, g, psurvival, g->reallyold); g->reallyold = g->old; g->old = *psurvival; /* 'survival' survivals are old now */ g->survival = g->allgc; /* all news are survivals */ /* repeat for 'finobj' lists */ psurvival = sweepgen(L, g, &g->finobj, g->finobjsur); /* sweep 'survival' and 'old' */ sweepgen(L, g, psurvival, g->finobjrold); g->finobjrold = g->finobjold; g->finobjold = *psurvival; /* 'survival' survivals are old now */ g->finobjsur = g->finobj; /* all news are survivals */ sweepgen(L, g, &g->tobefnz, NULL); finishgencycle(L, g); }
{'added': [(1134, "** Does a young collection. First, mark 'OLD1' objects. Then does the"), (1135, '** atomic step. Then, sweep all lists and advance pointers. Finally,'), (1136, '** finish the collection.'), (1141, ' markold(g, g->allgc, g->reallyold);')], 'deleted': [(1134, "** Does a young collection. First, mark 'OLD1' objects. (Only survival"), (1135, '** and "recent old" lists can contain \'OLD1\' objects. New lists cannot'), (1136, "** contain 'OLD1' objects, at most 'OLD0' objects that were already"), (1137, '** visited when marked old.) Then does the atomic step. Then,'), (1138, '** sweep all lists and advance pointers. Finally, finish the collection.'), (1143, ' markold(g, g->survival, g->reallyold);')]}
4
6
984
6,972
19
185
1
https://github.com/lua/lua
CVE-2020-15889
CWE-125
687
marshal.c
C
get_tuple_object
/* radare - LGPL3 - Copyright 2016-2021 - Matthieu (c0riolis) Tardy - l0stb1t*/ #include <r_io.h> #include <r_bin.h> #include "marshal.h" #include "pyc_magic.h" // avoiding using r2 internals asserts #define if_true_return(cond,ret) if(cond){return(ret);} // TODO: kill globals static ut32 magic_int; static ut32 symbols_ordinal = 0; static RList *refs = NULL; // If you don't have a good reason, do not change this. And also checkout !refs in get_code_object() /* interned_table is used to handle TYPE_INTERNED object */ extern RList *interned_table; static pyc_object *get_object(RBuffer *buffer); static pyc_object *copy_object(pyc_object *object); static void free_object(pyc_object *object); static ut8 get_ut8(RBuffer *buffer, bool *error) { ut8 ret = 0; int size = r_buf_read (buffer, &ret, sizeof (ret)); if (size < sizeof (ret)) { *error = true; } return ret; } static ut16 get_ut16(RBuffer *buffer, bool *error) { ut16 ret = 0; int size = r_buf_read (buffer, (ut8 *)&ret, sizeof (ret)); if (size != sizeof (ret)) { *error = true; } return ret; } static ut32 get_ut32(RBuffer *buffer, bool *error) { ut32 ret = 0; int size = r_buf_read (buffer, (ut8 *)&ret, sizeof (ret)); if (size != sizeof (ret)) { *error = true; } return ret; } static st32 get_st32(RBuffer *buffer, bool *error) { st32 ret = 0; int size = r_buf_read (buffer, (ut8 *)&ret, sizeof (ret)); if (size < sizeof (ret)) { *error = true; } return ret; } static st64 get_st64(RBuffer *buffer, bool *error) { st64 ret = 0; int size = r_buf_read (buffer, (ut8 *)&ret, sizeof (ret)); if (size < sizeof (ret)) { *error = true; } return ret; } static double get_float64(RBuffer *buffer, bool *error) { double ret = 0; int size = r_buf_read (buffer, (ut8 *)&ret, sizeof (ret)); if (size < sizeof (ret)) { *error = true; } return ret; } static ut8 *get_bytes(RBuffer *buffer, ut32 size) { ut8 *ret = R_NEWS0 (ut8, size + 1); if (!ret) { return NULL; } if (r_buf_read (buffer, ret, size) < size) { free (ret); return NULL; } return ret; } static pyc_object *get_none_object(void) { pyc_object *ret = R_NEW0 (pyc_object); if (ret) { ret->type = TYPE_NONE; ret->data = strdup ("None"); if (!ret->data) { R_FREE (ret); } } return ret; } static pyc_object *get_false_object(void) { pyc_object *ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_FALSE; ret->data = strdup ("False"); if (!ret->data) { R_FREE (ret); } return ret; } static pyc_object *get_true_object(void) { pyc_object *ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_TRUE; ret->data = strdup ("True"); if (!ret->data) { R_FREE (ret); } return ret; } static pyc_object *get_int_object(RBuffer *buffer) { bool error = false; st32 i = get_st32 (buffer, &error); if (error) { return NULL; } pyc_object *ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_INT; ret->data = r_str_newf ("%d", i); if (!ret->data) { R_FREE (ret); } return ret; } static pyc_object *get_int64_object(RBuffer *buffer) { bool error = false; st64 i = get_st64 (buffer, &error); if (error) { return NULL; } pyc_object *ret = R_NEW0 (pyc_object); if (ret) { ret->type = TYPE_INT64; ret->data = r_str_newf ("%"PFMT64d, (st64)i); if (!ret->data) { R_FREE (ret); } } return ret; } /* long is used when the number is > MAX_INT64 */ static pyc_object *get_long_object(RBuffer *buffer) { bool error = false; bool neg = false; ut32 tmp = 0; size_t size; size_t i, j = 0, left = 0; ut16 n; char *hexstr; char digist2hex[] = "0123456789abcdef"; st32 ndigits = get_st32 (buffer, &error); if (error) { return NULL; } pyc_object *ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_LONG; if (ndigits < 0) { ndigits = -ndigits; neg = true; } if (ndigits == 0) { ret->data = strdup ("0x0"); } else { if (ndigits > 10) { free (ret); return NULL; } size = ndigits * 15; if (size < 0) { return NULL; } size = (size - 1) / 4 + 1; if (size < 1) { free (ret); return NULL; } size += 3 + (neg? 1: 0); j = size - 1; hexstr = calloc (size, sizeof (char)); if (!hexstr) { free (ret); return NULL; } for (i = 0; i < ndigits; i++) { n = get_ut16 (buffer, &error); tmp |= n << left; left += 15; while (left >= 4 && j >= 0) { hexstr[--j] = digist2hex[tmp & 0xf]; tmp >>= 4; left -= 4; } } if (tmp) { hexstr[--j] = digist2hex[tmp & 0xf]; } if (j > 0) { hexstr[--j] = 'x'; } if (j > 0) { hexstr[--j] = '0'; } if (neg && j > 0) { hexstr[--j] = '-'; } ret->data = &hexstr[j]; } return ret; } static pyc_object *get_stringref_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 n = get_st32 (buffer, &error); if (n >= r_list_length (interned_table)) { eprintf ("bad marshal data (string ref out of range)"); return NULL; } if (error) { return NULL; } ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_STRINGREF; ret->data = r_list_get_n (interned_table, n); if (!ret->data) { R_FREE (ret); } return ret; } static pyc_object *get_float_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 size = 0; ut8 n = get_ut8 (buffer, &error); if (error) { return NULL; } ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ut8 *s = malloc (n + 1); if (!s) { free (ret); return NULL; } /* object contain string representation of the number */ size = r_buf_read (buffer, s, n); if (size != n) { R_FREE (s); R_FREE (ret); return NULL; } s[n] = '\0'; ret->type = TYPE_FLOAT; ret->data = s; return ret; } static pyc_object *get_binary_float_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; double f; f = get_float64 (buffer, &error); if (error) { return NULL; } ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_FLOAT; ret->data = r_str_newf ("%.15g", f); if (!ret->data) { R_FREE (ret); return NULL; } return ret; } static pyc_object *get_complex_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 size = 0; st32 n1 = 0; st32 n2 = 0; ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } if ((magic_int & 0xffff) <= 62061) { n1 = get_ut8 (buffer, &error); } else { n1 = get_st32 (buffer, &error); } if (error || n1 < 1) { free (ret); return NULL; } ut8 *s1 = malloc (n1 + 1); if (!s1) { free (ret); return NULL; } /* object contain string representation of the number */ size = r_buf_read (buffer, s1, n1); if (size != n1) { R_FREE (s1); R_FREE (ret); return NULL; } s1[n1] = '\0'; if ((magic_int & 0xffff) <= 62061) { n2 = get_ut8 (buffer, &error); } else n2 = get_st32 (buffer, &error); if (error) { return NULL; } ut8 *s2 = malloc (n2 + 1); if (!s2) { return NULL; } /* object contain string representation of the number */ size = r_buf_read (buffer, s2, n2); if (size != n2) { R_FREE (s1); R_FREE (s2); R_FREE (ret); return NULL; } s2[n2] = '\0'; ret->type = TYPE_COMPLEX; ret->data = r_str_newf ("%s+%sj", s1, s2); R_FREE (s1); R_FREE (s2); if (!ret->data) { R_FREE (ret); return NULL; } return ret; } static pyc_object *get_binary_complex_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; double a, b; //a + bj a = get_float64 (buffer, &error); b = get_float64 (buffer, &error); if (error) { return NULL; } ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_BINARY_COMPLEX; ret->data = r_str_newf ("%.15g+%.15gj", a, b); if (!ret->data) { R_FREE (ret); return NULL; } return ret; } static pyc_object *get_string_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 n = 0; n = get_ut32 (buffer, &error); if (n > ST32_MAX) { eprintf ("bad marshal data (string size out of range)"); return NULL; } if (error) { return NULL; } ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_STRING; ret->data = get_bytes (buffer, n); if (!ret->data) { R_FREE (ret); return NULL; } return ret; } static pyc_object *get_unicode_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 n = 0; n = get_ut32 (buffer, &error); if (n > ST32_MAX) { eprintf ("bad marshal data (unicode size out of range)"); return NULL; } if (error) { return NULL; } ret = R_NEW0 (pyc_object); ret->type = TYPE_UNICODE; ret->data = get_bytes (buffer, n); if (!ret->data) { R_FREE (ret); return NULL; } return ret; } static pyc_object *get_interned_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 n = 0; n = get_ut32 (buffer, &error); if (n > ST32_MAX) { eprintf ("bad marshal data (string size out of range)"); return NULL; } if (error) { return NULL; } ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_INTERNED; ret->data = get_bytes (buffer, n); /* add data pointer to interned table */ r_list_append (interned_table, ret->data); if (!ret->data) { R_FREE (ret); } return ret; } static pyc_object *get_array_object_generic(RBuffer *buffer, ut32 size) { pyc_object *tmp = NULL; pyc_object *ret = NULL; ut32 i = 0; ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->data = r_list_newf ((RListFree)free_object); if (!ret->data) { free (ret); return NULL; } for (i = 0; i < size; i++) { tmp = get_object (buffer); if (!tmp) { r_list_free (ret->data); R_FREE (ret); return NULL; } if (!r_list_append (ret->data, tmp)) { free_object (tmp); r_list_free (ret->data); free (ret); return NULL; } } return ret; } /* small TYPE_SMALL_TUPLE doesn't exist in python2 */ /* */ static pyc_object *get_small_tuple_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut8 n = 0; n = get_ut8 (buffer, &error); if (error) { return NULL; } ret = get_array_object_generic (buffer, n); if (ret) { ret->type = TYPE_SMALL_TUPLE; return ret; } return NULL; } static pyc_object *get_tuple_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 n = 0; n = get_ut32 (buffer, &error); if (n > ST32_MAX) { eprintf ("bad marshal data (tuple size out of range)\n"); return NULL; } if (error) { return NULL; } ret = get_array_object_generic (buffer, n); if (ret) { ret->type = TYPE_TUPLE; return ret; } return NULL; } static pyc_object *get_list_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 n = 0; n = get_ut32 (buffer, &error); if (n > ST32_MAX) { eprintf ("bad marshal data (list size out of range)\n"); return NULL; } if (error) { return NULL; } ret = get_array_object_generic (buffer, n); if (ret) { ret->type = TYPE_LIST; return ret; } return NULL; } static pyc_object *get_dict_object(RBuffer *buffer) { pyc_object *key = NULL, *val = NULL; pyc_object *ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->data = r_list_newf ((RListFree)free_object); if (!ret->data) { R_FREE (ret); return NULL; } for (;;) { key = get_object (buffer); if (!key) { break; } if (!r_list_append (ret->data, key)) { r_list_free (ret->data); R_FREE (ret); free_object (key); return NULL; } val = get_object (buffer); if (!val) { break; } if (!r_list_append (ret->data, val)) { free_object (val); r_list_free (ret->data); R_FREE (ret); return NULL; } } ret->type = TYPE_DICT; return ret; } static pyc_object *get_set_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 n = get_ut32 (buffer, &error); if (n > ST32_MAX) { eprintf ("bad marshal data (set size out of range)\n"); return NULL; } if (error) { return NULL; } ret = get_array_object_generic (buffer, n); if (!ret) { return NULL; } ret->type = TYPE_SET; return ret; } static pyc_object *get_ascii_object_generic(RBuffer *buffer, ut32 size, bool interned) { pyc_object *ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_ASCII; ret->data = get_bytes (buffer, size); if (!ret->data) { R_FREE (ret); } return ret; } static pyc_object *get_ascii_object(RBuffer *buffer) { bool error = false; ut32 n = get_ut32 (buffer, &error); if (error) { return NULL; } return get_ascii_object_generic (buffer, n, true); } static pyc_object *get_ascii_interned_object(RBuffer *buffer) { bool error = false; ut32 n = get_ut32 (buffer, &error); if (error) { return NULL; } return get_ascii_object_generic (buffer, n, true); } static pyc_object *get_short_ascii_object(RBuffer *buffer) { bool error = false; ut8 n = get_ut8 (buffer, &error); if (error) { return NULL; } return get_ascii_object_generic (buffer, n, false); } static pyc_object *get_short_ascii_interned_object(RBuffer *buffer) { bool error = false; ut8 n = get_ut8 (buffer, &error); return error? NULL: get_ascii_object_generic (buffer, n, true); } static pyc_object *get_ref_object(RBuffer *buffer) { bool error = false; ut32 index = get_ut32 (buffer, &error); if (error) { return NULL; } if (index >= r_list_length (refs)) { return NULL; } pyc_object *obj = r_list_get_n (refs, index); return obj? copy_object (obj): NULL; } static void free_object(pyc_object *object) { if (!object) { return; } if ((int)object->type == 0) { return; } switch (object->type) { case TYPE_SMALL_TUPLE: case TYPE_TUPLE: r_list_free (object->data); break; case TYPE_STRING: case TYPE_TRUE: case TYPE_FALSE: case TYPE_INT: case TYPE_NONE: case TYPE_NULL: case TYPE_ASCII_INTERNED: case TYPE_SHORT_ASCII: case TYPE_ASCII: case TYPE_SHORT_ASCII_INTERNED: free (object->data); break; case TYPE_CODE_v0: case TYPE_CODE_v1: { pyc_code_object *cobj = object->data; free_object (cobj->code); free_object (cobj->consts); free_object (cobj->names); free_object (cobj->varnames); free_object (cobj->freevars); free_object (cobj->cellvars); free_object (cobj->filename); free_object (cobj->name); free_object (cobj->lnotab); free (object->data); break; } case TYPE_REF: free_object (object->data); break; case TYPE_SET: case TYPE_FROZENSET: case TYPE_ELLIPSIS: case TYPE_STOPITER: case TYPE_BINARY_COMPLEX: case TYPE_BINARY_FLOAT: case TYPE_COMPLEX: case TYPE_STRINGREF: case TYPE_DICT: case TYPE_FLOAT: case TYPE_INT64: case TYPE_INTERNED: case TYPE_LIST: case TYPE_LONG: case TYPE_UNICODE: case TYPE_UNKNOWN: eprintf ("Free not implemented for type %x\n", object->type); break; default: eprintf ("Undefined type in free_object (%x)\n", object->type); break; } free (object); } static pyc_object *copy_object(pyc_object *object) { pyc_object *copy = R_NEW0 (pyc_object); if (!copy || !object) { free (copy); return NULL; } copy->type = object->type; if ((int)object->type == 0) { // do nothing } else switch (object->type) { case TYPE_NULL: break; case TYPE_TUPLE: case TYPE_SMALL_TUPLE: copy->data = r_list_clone (object->data); break; case TYPE_INT: case TYPE_INT64: case TYPE_NONE: case TYPE_TRUE: case TYPE_FALSE: case TYPE_STRING: case TYPE_ASCII: case TYPE_SHORT_ASCII: case TYPE_ASCII_INTERNED: case TYPE_SHORT_ASCII_INTERNED: copy->data = strdup (object->data); break; case TYPE_CODE_v0: case TYPE_CODE_v1: { pyc_code_object *src = object->data; pyc_code_object *dst = R_NEW0 (pyc_code_object); if (!dst) { break; } memcpy (dst, src, sizeof (*dst)); dst->code = copy_object (src->code); dst->consts = copy_object (src->consts); dst->names = copy_object (src->names); dst->varnames = copy_object (src->varnames); dst->freevars = copy_object (src->freevars); dst->cellvars = copy_object (src->cellvars); dst->filename = copy_object (src->filename); dst->name = copy_object (src->name); dst->lnotab = copy_object (src->lnotab); copy->data = dst; break; } case TYPE_REF: copy->data = copy_object (object->data); break; case TYPE_ELLIPSIS: case TYPE_STOPITER: case TYPE_BINARY_COMPLEX: case TYPE_BINARY_FLOAT: case TYPE_COMPLEX: case TYPE_STRINGREF: case TYPE_DICT: case TYPE_FLOAT: case TYPE_FROZENSET: case TYPE_INTERNED: case TYPE_LIST: case TYPE_LONG: case TYPE_SET: case TYPE_UNICODE: case TYPE_UNKNOWN: eprintf ("Copy not implemented for type %x\n", object->type); break; default: eprintf ("Undefined type in copy_object (%x)\n", object->type); break; } if (!copy->data) { R_FREE (copy); } return copy; } static pyc_object *get_code_object(RBuffer *buffer) { bool error = false; pyc_object *ret = R_NEW0 (pyc_object); pyc_code_object *cobj = R_NEW0 (pyc_code_object); if (!ret || !cobj) { free (ret); free (cobj); return NULL; } //ret->type = TYPE_CODE_v1; // support start from v1.0 ret->data = cobj; bool v10_to_12 = magic_int_within (magic_int, 39170, 16679, &error); // 1.0.1 - 1.2 bool v13_to_22 = magic_int_within (magic_int, 11913, 60718, &error); // 1.3b1 - 2.2a1 bool v11_to_14 = magic_int_within (magic_int, 39170, 20117, &error); // 1.0.1 - 1.4 bool v15_to_22 = magic_int_within (magic_int, 20121, 60718, &error); // 1.5a1 - 2.2a1 bool v13_to_20 = magic_int_within (magic_int, 11913, 50824, &error); // 1.3b1 - 2.0b1 //bool v21_to_27 = (!v13_to_20) && magic_int_within (magic_int, 60124, 62212, &error); bool has_posonlyargcount = magic_int_within (magic_int, 3410, 3424, &error); // v3.8.0a4 - latest if (error) { free (ret); free (cobj); return NULL; } if (v13_to_22) { cobj->argcount = get_ut16 (buffer, &error); } else if (v10_to_12) { cobj->argcount = 0; } else { cobj->argcount = get_ut32 (buffer, &error); } if (has_posonlyargcount) { cobj->posonlyargcount = get_ut32 (buffer, &error); // Included in argcount } else { cobj->posonlyargcount = 0; // None } if (((3020 < (magic_int & 0xffff)) && ((magic_int & 0xffff) < 20121)) && (!v11_to_14)) { cobj->kwonlyargcount = get_ut32 (buffer, &error); // Not included in argcount } else { cobj->kwonlyargcount = 0; } if (v13_to_22) { cobj->nlocals = get_ut16 (buffer, &error); } else if (v10_to_12) { cobj->nlocals = 0; } else { cobj->nlocals = get_ut32 (buffer, &error); } if (v15_to_22) { cobj->stacksize = get_ut16 (buffer, &error); } else if (v11_to_14 || v10_to_12) { cobj->stacksize = 0; } else { cobj->stacksize = get_ut32 (buffer, &error); } if (v13_to_22) { cobj->flags = get_ut16 (buffer, &error); } else if (v10_to_12) { cobj->flags = 0; } else { cobj->flags = get_ut32 (buffer, &error); } //to help disassemble the code cobj->start_offset = r_buf_tell (buffer) + 5; // 1 from get_object() and 4 from get_string_object() if (!refs) { return ret; //return for entried part to get the root object of this file } cobj->code = get_object (buffer); cobj->end_offset = r_buf_tell (buffer); cobj->consts = get_object (buffer); cobj->names = get_object (buffer); if (v10_to_12) { cobj->varnames = NULL; } else { cobj->varnames = get_object (buffer); } if (!(v10_to_12 || v13_to_20)) { cobj->freevars = get_object (buffer); cobj->cellvars = get_object (buffer); } else { cobj->freevars = NULL; cobj->cellvars = NULL; } cobj->filename = get_object (buffer); cobj->name = get_object (buffer); if (v15_to_22) { cobj->firstlineno = get_ut16 (buffer, &error); } else if (v11_to_14) { cobj->firstlineno = 0; } else { cobj->firstlineno = get_ut32 (buffer, &error); } if (v11_to_14) { cobj->lnotab = NULL; } else { cobj->lnotab = get_object (buffer); } if (error) { free_object (cobj->code); free_object (cobj->consts); free_object (cobj->names); free_object (cobj->varnames); free_object (cobj->freevars); free_object (cobj->cellvars); free_object (cobj->filename); free_object (cobj->name); free_object (cobj->lnotab); free (cobj); R_FREE (ret); return NULL; } return ret; } ut64 get_code_object_addr(RBuffer *buffer, ut32 magic) { magic_int = magic; pyc_object *co = get_code_object (buffer); ut64 result = 0; if (!co) { return 0; } pyc_code_object *cobj = co->data; result = cobj->start_offset; free_object (co); return result; } static pyc_object *get_object(RBuffer *buffer) { bool error = false; pyc_object *ret = NULL; ut8 code = get_ut8 (buffer, &error); bool flag = (code & FLAG_REF); RListIter *ref_idx = NULL; ut8 type = (code & ~FLAG_REF); if (error) { return NULL; } if (flag) { pyc_object *noneret = get_none_object (); if (noneret) { ref_idx = r_list_append (refs, noneret); } } switch (type) { case TYPE_NULL: free_object (ret); return NULL; case TYPE_TRUE: return get_true_object (); case TYPE_FALSE: free_object (ret); return get_false_object (); case TYPE_NONE: free_object (ret); return get_none_object (); case TYPE_REF: free_object (ret); return get_ref_object (buffer); case TYPE_SMALL_TUPLE: ret = get_small_tuple_object (buffer); break; case TYPE_TUPLE: ret = get_tuple_object (buffer); break; case TYPE_STRING: ret = get_string_object (buffer); break; case TYPE_CODE_v0: ret = get_code_object (buffer); if (ret) { ret->type = TYPE_CODE_v0; } break; case TYPE_CODE_v1: ret = get_code_object (buffer); if (ret) { ret->type = TYPE_CODE_v1; } break; case TYPE_INT: ret = get_int_object (buffer); break; case TYPE_ASCII_INTERNED: ret = get_ascii_interned_object (buffer); break; case TYPE_SHORT_ASCII: ret = get_short_ascii_object (buffer); break; case TYPE_ASCII: ret = get_ascii_object (buffer); break; case TYPE_SHORT_ASCII_INTERNED: ret = get_short_ascii_interned_object (buffer); break; case TYPE_INT64: ret = get_int64_object (buffer); break; case TYPE_INTERNED: ret = get_interned_object (buffer); break; case TYPE_STRINGREF: ret = get_stringref_object (buffer); break; case TYPE_FLOAT: ret = get_float_object (buffer); break; case TYPE_BINARY_FLOAT: ret = get_binary_float_object (buffer); break; case TYPE_COMPLEX: ret = get_complex_object (buffer); // behaviour depends on Python version break; case TYPE_BINARY_COMPLEX: ret = get_binary_complex_object (buffer); break; case TYPE_LIST: ret = get_list_object (buffer); break; case TYPE_LONG: ret = get_long_object (buffer); break; case TYPE_UNICODE: ret = get_unicode_object (buffer); break; case TYPE_DICT: ret = get_dict_object (buffer); break; case TYPE_FROZENSET: case TYPE_SET: ret = get_set_object (buffer); break; case TYPE_STOPITER: case TYPE_ELLIPSIS: ret = R_NEW0 (pyc_object); break; case TYPE_UNKNOWN: eprintf ("Get not implemented for type 0x%x\n", type); // r_list_pop (refs); free_object (ret); return NULL; case 0: // nop break; default: eprintf ("Undefined type in get_object (0x%x)\n", type); // r_list_pop (refs); return NULL; } if (ret && flag && ref_idx) { if (ref_idx->data != ret) { free_object (ref_idx->data); } ref_idx->data = copy_object (ret); } if (ret) { return ret; } ret = get_none_object (); if (!ret) { return NULL; } r_list_append (refs, ret); return ret; } static bool extract_sections_symbols(pyc_object *obj, RList *sections, RList *symbols, RList *cobjs, char *prefix) { pyc_code_object *cobj = NULL; RBinSection *section = NULL; RBinSymbol *symbol = NULL; RListIter *i = NULL; //each code object is a section if_true_return (!obj || (obj->type != TYPE_CODE_v1 && obj->type != TYPE_CODE_v0), false); cobj = obj->data; if_true_return (!cobj || !cobj->name, false); if_true_return (cobj->name->type != TYPE_ASCII && cobj->name->type != TYPE_STRING && cobj->name->type != TYPE_INTERNED, false); if_true_return (!cobj->name->data, false); if_true_return (!cobj->consts, false); //add the cobj to objs list if (!r_list_append (cobjs, cobj)) { goto fail; } section = R_NEW0 (RBinSection); symbol = R_NEW0 (RBinSymbol); prefix = r_str_newf ("%s%s%s", r_str_get (prefix), prefix? ".": "", (const char *)cobj->name->data); if (!prefix || !section || !symbol) { goto fail; } section->name = strdup (prefix); if (!section->name) { goto fail; } section->paddr = cobj->start_offset; section->vaddr = cobj->start_offset; section->size = cobj->end_offset - cobj->start_offset; section->vsize = cobj->end_offset - cobj->start_offset; if (!r_list_append (sections, section)) { goto fail; } // start building symbol symbol->name = strdup (prefix); //symbol->bind; symbol->type = R_BIN_TYPE_FUNC_STR; symbol->size = cobj->end_offset - cobj->start_offset; symbol->vaddr = cobj->start_offset; symbol->paddr = cobj->start_offset; symbol->ordinal = symbols_ordinal++; if (cobj->consts->type != TYPE_TUPLE && cobj->consts->type != TYPE_SMALL_TUPLE) { goto fail2; } if (!r_list_append (symbols, symbol)) { goto fail2; } r_list_foreach (((RList *)(cobj->consts->data)), i, obj) { extract_sections_symbols (obj, sections, symbols, cobjs, prefix); } free (prefix); return true; fail: free (section); free (prefix); free (symbol); return false; fail2: free (prefix); free (symbol); return false; } bool get_sections_symbols_from_code_objects(RBuffer *buffer, RList *sections, RList *symbols, RList *cobjs, ut32 magic) { bool ret; magic_int = magic; refs = r_list_newf (NULL); // (RListFree)free_object); if (!refs) { return false; } ret = extract_sections_symbols (get_object (buffer), sections, symbols, cobjs, NULL); r_list_free (refs); refs = NULL; return ret; }
/* radare - LGPL3 - Copyright 2016-2022 - Matthieu (c0riolis) Tardy - l0stb1t */ #include <r_io.h> #include <r_bin.h> #include "marshal.h" #include "pyc_magic.h" // avoiding using r2 internals asserts #define if_true_return(cond,ret) if(cond){return(ret);} // TODO: kill globals static R_TH_LOCAL ut32 magic_int; static R_TH_LOCAL ut32 symbols_ordinal = 0; static R_TH_LOCAL RList *refs = NULL; // If you don't have a good reason, do not change this. And also checkout !refs in get_code_object() /* interned_table is used to handle TYPE_INTERNED object */ extern RList *interned_table; static pyc_object *get_object(RBuffer *buffer); static pyc_object *copy_object(pyc_object *object); static void free_object(pyc_object *object); static ut8 get_ut8(RBuffer *buffer, bool *error) { ut8 ret = 0; int size = r_buf_read (buffer, &ret, sizeof (ret)); if (size < sizeof (ret)) { *error = true; } return ret; } static ut16 get_ut16(RBuffer *buffer, bool *error) { ut16 ret = 0; int size = r_buf_read (buffer, (ut8 *)&ret, sizeof (ret)); if (size != sizeof (ret)) { *error = true; } return ret; } static ut32 get_ut32(RBuffer *buffer, bool *error) { ut32 ret = 0; int size = r_buf_read (buffer, (ut8 *)&ret, sizeof (ret)); if (size != sizeof (ret)) { *error = true; } return ret; } static st32 get_st32(RBuffer *buffer, bool *error) { st32 ret = 0; int size = r_buf_read (buffer, (ut8 *)&ret, sizeof (ret)); if (size < sizeof (ret)) { *error = true; } return ret; } static st64 get_st64(RBuffer *buffer, bool *error) { st64 ret = 0; int size = r_buf_read (buffer, (ut8 *)&ret, sizeof (ret)); if (size < sizeof (ret)) { *error = true; } return ret; } static double get_float64(RBuffer *buffer, bool *error) { double ret = 0; int size = r_buf_read (buffer, (ut8 *)&ret, sizeof (ret)); if (size < sizeof (ret)) { *error = true; } return ret; } static ut8 *get_bytes(RBuffer *buffer, ut32 size) { ut8 *ret = R_NEWS0 (ut8, size + 1); if (!ret) { return NULL; } if (r_buf_read (buffer, ret, size) < size) { free (ret); return NULL; } return ret; } static pyc_object *get_none_object(void) { pyc_object *ret = R_NEW0 (pyc_object); if (ret) { ret->type = TYPE_NONE; ret->data = strdup ("None"); if (!ret->data) { R_FREE (ret); } } return ret; } static pyc_object *get_false_object(void) { pyc_object *ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_FALSE; ret->data = strdup ("False"); if (!ret->data) { R_FREE (ret); } return ret; } static pyc_object *get_true_object(void) { pyc_object *ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_TRUE; ret->data = strdup ("True"); if (!ret->data) { R_FREE (ret); } return ret; } static pyc_object *get_int_object(RBuffer *buffer) { bool error = false; st32 i = get_st32 (buffer, &error); if (error) { return NULL; } pyc_object *ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_INT; ret->data = r_str_newf ("%d", i); if (!ret->data) { R_FREE (ret); } return ret; } static pyc_object *get_int64_object(RBuffer *buffer) { bool error = false; st64 i = get_st64 (buffer, &error); if (error) { return NULL; } pyc_object *ret = R_NEW0 (pyc_object); if (ret) { ret->type = TYPE_INT64; ret->data = r_str_newf ("%"PFMT64d, (st64)i); if (!ret->data) { R_FREE (ret); } } return ret; } /* long is used when the number is > MAX_INT64 */ static pyc_object *get_long_object(RBuffer *buffer) { bool error = false; bool neg = false; ut32 tmp = 0; size_t size; size_t i, j = 0, left = 0; ut16 n; char *hexstr; char digist2hex[] = "0123456789abcdef"; st32 ndigits = get_st32 (buffer, &error); if (error) { return NULL; } pyc_object *ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_LONG; if (ndigits < 0) { ndigits = -ndigits; neg = true; } if (ndigits == 0) { ret->data = strdup ("0x0"); } else { if (ndigits > 10) { free (ret); return NULL; } size = ndigits * 15; if (size < 0) { return NULL; } size = (size - 1) / 4 + 1; if (size < 1) { free (ret); return NULL; } size += 3 + (neg? 1: 0); j = size - 1; hexstr = calloc (size, sizeof (char)); if (!hexstr) { free (ret); return NULL; } for (i = 0; i < ndigits; i++) { n = get_ut16 (buffer, &error); tmp |= n << left; left += 15; while (left >= 4 && j >= 0) { hexstr[--j] = digist2hex[tmp & 0xf]; tmp >>= 4; left -= 4; } } if (tmp) { hexstr[--j] = digist2hex[tmp & 0xf]; } if (j > 0) { hexstr[--j] = 'x'; } if (j > 0) { hexstr[--j] = '0'; } if (neg && j > 0) { hexstr[--j] = '-'; } ret->data = &hexstr[j]; } return ret; } static pyc_object *get_stringref_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 n = get_st32 (buffer, &error); if (n >= r_list_length (interned_table)) { eprintf ("bad marshal data (string ref out of range)"); return NULL; } if (error) { return NULL; } ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_STRINGREF; ret->data = r_list_get_n (interned_table, n); if (!ret->data) { R_FREE (ret); } return ret; } static pyc_object *get_float_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 size = 0; ut8 n = get_ut8 (buffer, &error); if (error) { return NULL; } ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ut8 *s = malloc (n + 1); if (!s) { free (ret); return NULL; } /* object contain string representation of the number */ size = r_buf_read (buffer, s, n); if (size != n) { R_FREE (s); R_FREE (ret); return NULL; } s[n] = '\0'; ret->type = TYPE_FLOAT; ret->data = s; return ret; } static pyc_object *get_binary_float_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; double f; f = get_float64 (buffer, &error); if (error) { return NULL; } ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_FLOAT; ret->data = r_str_newf ("%.15g", f); if (!ret->data) { R_FREE (ret); return NULL; } return ret; } static pyc_object *get_complex_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 size = 0; st32 n1 = 0; st32 n2 = 0; ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } if ((magic_int & 0xffff) <= 62061) { n1 = get_ut8 (buffer, &error); } else { n1 = get_st32 (buffer, &error); } if (error || n1 < 1) { free (ret); return NULL; } ut8 *s1 = malloc (n1 + 1); if (!s1) { free (ret); return NULL; } /* object contain string representation of the number */ size = r_buf_read (buffer, s1, n1); if (size != n1) { R_FREE (s1); R_FREE (ret); return NULL; } s1[n1] = '\0'; if ((magic_int & 0xffff) <= 62061) { n2 = get_ut8 (buffer, &error); } else n2 = get_st32 (buffer, &error); if (error) { return NULL; } ut8 *s2 = malloc (n2 + 1); if (!s2) { return NULL; } /* object contain string representation of the number */ size = r_buf_read (buffer, s2, n2); if (size != n2) { R_FREE (s1); R_FREE (s2); R_FREE (ret); return NULL; } s2[n2] = '\0'; ret->type = TYPE_COMPLEX; ret->data = r_str_newf ("%s+%sj", s1, s2); R_FREE (s1); R_FREE (s2); if (!ret->data) { R_FREE (ret); return NULL; } return ret; } static pyc_object *get_binary_complex_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; double a, b; //a + bj a = get_float64 (buffer, &error); b = get_float64 (buffer, &error); if (error) { return NULL; } ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_BINARY_COMPLEX; ret->data = r_str_newf ("%.15g+%.15gj", a, b); if (!ret->data) { R_FREE (ret); return NULL; } return ret; } static pyc_object *get_string_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 n = 0; n = get_ut32 (buffer, &error); if (n > ST32_MAX) { eprintf ("bad marshal data (string size out of range)"); return NULL; } if (error) { return NULL; } ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_STRING; ret->data = get_bytes (buffer, n); if (!ret->data) { R_FREE (ret); return NULL; } return ret; } static pyc_object *get_unicode_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 n = 0; n = get_ut32 (buffer, &error); if (n > ST32_MAX) { eprintf ("bad marshal data (unicode size out of range)"); return NULL; } if (error) { return NULL; } ret = R_NEW0 (pyc_object); ret->type = TYPE_UNICODE; ret->data = get_bytes (buffer, n); if (!ret->data) { R_FREE (ret); return NULL; } return ret; } static pyc_object *get_interned_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 n = 0; n = get_ut32 (buffer, &error); if (n > ST32_MAX) { eprintf ("bad marshal data (string size out of range)"); return NULL; } if (error) { return NULL; } ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_INTERNED; ret->data = get_bytes (buffer, n); /* add data pointer to interned table */ r_list_append (interned_table, ret->data); if (!ret->data) { R_FREE (ret); } return ret; } static pyc_object *get_array_object_generic(RBuffer *buffer, ut32 size) { pyc_object *tmp = NULL; pyc_object *ret = NULL; ut32 i = 0; ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->data = r_list_newf ((RListFree)free_object); if (!ret->data) { free (ret); return NULL; } for (i = 0; i < size; i++) { tmp = get_object (buffer); if (!tmp || !r_list_append (ret->data, tmp)) { free_object (tmp); ((RList*)ret->data)->free = NULL; r_list_free (ret->data); free (ret); return NULL; } } return ret; } /* small TYPE_SMALL_TUPLE doesn't exist in python2 */ static pyc_object *get_small_tuple_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut8 n = 0; n = get_ut8 (buffer, &error); if (error) { return NULL; } ret = get_array_object_generic (buffer, n); if (ret) { ret->type = TYPE_SMALL_TUPLE; return ret; } return NULL; } static pyc_object *get_tuple_object(RBuffer *buffer) { bool error = false; ut32 n = get_ut32 (buffer, &error); if (n > ST32_MAX) { eprintf ("bad marshal data (tuple size out of range)\n"); return NULL; } if (error) { return NULL; } pyc_object *ret = get_array_object_generic (buffer, n); if (ret) { ret->type = TYPE_TUPLE; } return ret; } static pyc_object *get_list_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 n = get_ut32 (buffer, &error); if (n > ST32_MAX) { eprintf ("bad marshal data (list size out of range)\n"); return NULL; } if (error) { return NULL; } ret = get_array_object_generic (buffer, n); if (ret) { ret->type = TYPE_LIST; return ret; } return NULL; } static pyc_object *get_dict_object(RBuffer *buffer) { pyc_object *key = NULL, *val = NULL; pyc_object *ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->data = r_list_newf ((RListFree)free_object); if (!ret->data) { R_FREE (ret); return NULL; } for (;;) { key = get_object (buffer); if (!key) { break; } if (!r_list_append (ret->data, key)) { r_list_free (ret->data); R_FREE (ret); free_object (key); return NULL; } val = get_object (buffer); if (!val) { break; } if (!r_list_append (ret->data, val)) { free_object (val); r_list_free (ret->data); R_FREE (ret); return NULL; } } ret->type = TYPE_DICT; return ret; } static pyc_object *get_set_object(RBuffer *buffer) { bool error = false; ut32 n = get_ut32 (buffer, &error); if (n > ST32_MAX) { eprintf ("bad marshal data (set size out of range)\n"); return NULL; } if (error) { return NULL; } pyc_object *ret = get_array_object_generic (buffer, n); if (ret) { ret->type = TYPE_SET; } return ret; } static pyc_object *get_ascii_object_generic(RBuffer *buffer, ut32 size, bool interned) { pyc_object *ret = R_NEW0 (pyc_object); if (!ret) { return NULL; } ret->type = TYPE_ASCII; ret->data = get_bytes (buffer, size); if (!ret->data) { R_FREE (ret); } return ret; } static pyc_object *get_ascii_object(RBuffer *buffer) { bool error = false; ut32 n = get_ut32 (buffer, &error); if (error) { return NULL; } return get_ascii_object_generic (buffer, n, true); } static pyc_object *get_ascii_interned_object(RBuffer *buffer) { bool error = false; ut32 n = get_ut32 (buffer, &error); if (error) { return NULL; } return get_ascii_object_generic (buffer, n, true); } static pyc_object *get_short_ascii_object(RBuffer *buffer) { bool error = false; ut8 n = get_ut8 (buffer, &error); if (error) { return NULL; } return get_ascii_object_generic (buffer, n, false); } static pyc_object *get_short_ascii_interned_object(RBuffer *buffer) { bool error = false; ut8 n = get_ut8 (buffer, &error); return error? NULL: get_ascii_object_generic (buffer, n, true); } static pyc_object *get_ref_object(RBuffer *buffer) { bool error = false; ut32 index = get_ut32 (buffer, &error); if (error) { return NULL; } if (index >= r_list_length (refs)) { return NULL; } pyc_object *obj = r_list_get_n (refs, index); return obj? copy_object (obj): NULL; } static void free_object(pyc_object *object) { if (!object) { return; } if ((int)object->type == 0) { return; } switch (object->type) { case TYPE_SMALL_TUPLE: case TYPE_TUPLE: r_list_free (object->data); break; case TYPE_STRING: case TYPE_TRUE: case TYPE_FALSE: case TYPE_INT: case TYPE_NONE: case TYPE_NULL: case TYPE_ASCII_INTERNED: case TYPE_SHORT_ASCII: case TYPE_ASCII: case TYPE_SHORT_ASCII_INTERNED: free (object->data); break; case TYPE_CODE_v0: case TYPE_CODE_v1: { pyc_code_object *cobj = object->data; free_object (cobj->code); free_object (cobj->consts); free_object (cobj->names); free_object (cobj->varnames); free_object (cobj->freevars); free_object (cobj->cellvars); free_object (cobj->filename); free_object (cobj->name); free_object (cobj->lnotab); free (object->data); break; } case TYPE_REF: free_object (object->data); break; case TYPE_SET: case TYPE_FROZENSET: case TYPE_ELLIPSIS: case TYPE_STOPITER: case TYPE_BINARY_COMPLEX: case TYPE_BINARY_FLOAT: case TYPE_COMPLEX: case TYPE_STRINGREF: case TYPE_DICT: case TYPE_FLOAT: case TYPE_INT64: case TYPE_INTERNED: case TYPE_LIST: case TYPE_LONG: case TYPE_UNICODE: case TYPE_UNKNOWN: eprintf ("Free not implemented for type %x\n", object->type); break; default: eprintf ("Undefined type in free_object (%x)\n", object->type); break; } free (object); } static pyc_object *copy_object(pyc_object *object) { pyc_object *copy = R_NEW0 (pyc_object); if (!copy || !object) { free (copy); return NULL; } copy->type = object->type; if ((int)object->type == 0) { // do nothing } else switch (object->type) { case TYPE_NULL: break; case TYPE_TUPLE: case TYPE_SMALL_TUPLE: copy->data = r_list_clone (object->data); break; case TYPE_INT: case TYPE_INT64: case TYPE_NONE: case TYPE_TRUE: case TYPE_FALSE: case TYPE_STRING: case TYPE_ASCII: case TYPE_SHORT_ASCII: case TYPE_ASCII_INTERNED: case TYPE_SHORT_ASCII_INTERNED: copy->data = strdup (object->data); break; case TYPE_CODE_v0: case TYPE_CODE_v1: { pyc_code_object *src = object->data; pyc_code_object *dst = R_NEW0 (pyc_code_object); if (!dst) { break; } memcpy (dst, src, sizeof (*dst)); dst->code = copy_object (src->code); dst->consts = copy_object (src->consts); dst->names = copy_object (src->names); dst->varnames = copy_object (src->varnames); dst->freevars = copy_object (src->freevars); dst->cellvars = copy_object (src->cellvars); dst->filename = copy_object (src->filename); dst->name = copy_object (src->name); dst->lnotab = copy_object (src->lnotab); copy->data = dst; break; } case TYPE_REF: copy->data = copy_object (object->data); break; case TYPE_ELLIPSIS: case TYPE_STOPITER: case TYPE_BINARY_COMPLEX: case TYPE_BINARY_FLOAT: case TYPE_COMPLEX: case TYPE_STRINGREF: case TYPE_DICT: case TYPE_FLOAT: case TYPE_FROZENSET: case TYPE_INTERNED: case TYPE_LIST: case TYPE_LONG: case TYPE_SET: case TYPE_UNICODE: case TYPE_UNKNOWN: eprintf ("Copy not implemented for type %x\n", object->type); break; default: eprintf ("Undefined type in copy_object (%x)\n", object->type); break; } if (!copy->data) { R_FREE (copy); } return copy; } static pyc_object *get_code_object(RBuffer *buffer) { bool error = false; pyc_object *ret = R_NEW0 (pyc_object); pyc_code_object *cobj = R_NEW0 (pyc_code_object); if (!ret || !cobj) { free (ret); free (cobj); return NULL; } //ret->type = TYPE_CODE_v1; // support start from v1.0 ret->data = cobj; bool v10_to_12 = magic_int_within (magic_int, 39170, 16679, &error); // 1.0.1 - 1.2 bool v13_to_22 = magic_int_within (magic_int, 11913, 60718, &error); // 1.3b1 - 2.2a1 bool v11_to_14 = magic_int_within (magic_int, 39170, 20117, &error); // 1.0.1 - 1.4 bool v15_to_22 = magic_int_within (magic_int, 20121, 60718, &error); // 1.5a1 - 2.2a1 bool v13_to_20 = magic_int_within (magic_int, 11913, 50824, &error); // 1.3b1 - 2.0b1 //bool v21_to_27 = (!v13_to_20) && magic_int_within (magic_int, 60124, 62212, &error); bool has_posonlyargcount = magic_int_within (magic_int, 3410, 3424, &error); // v3.8.0a4 - latest if (error) { free (ret); free (cobj); return NULL; } if (v13_to_22) { cobj->argcount = get_ut16 (buffer, &error); } else if (v10_to_12) { cobj->argcount = 0; } else { cobj->argcount = get_ut32 (buffer, &error); } if (has_posonlyargcount) { cobj->posonlyargcount = get_ut32 (buffer, &error); // Included in argcount } else { cobj->posonlyargcount = 0; // None } if (((3020 < (magic_int & 0xffff)) && ((magic_int & 0xffff) < 20121)) && (!v11_to_14)) { cobj->kwonlyargcount = get_ut32 (buffer, &error); // Not included in argcount } else { cobj->kwonlyargcount = 0; } if (v13_to_22) { cobj->nlocals = get_ut16 (buffer, &error); } else if (v10_to_12) { cobj->nlocals = 0; } else { cobj->nlocals = get_ut32 (buffer, &error); } if (v15_to_22) { cobj->stacksize = get_ut16 (buffer, &error); } else if (v11_to_14 || v10_to_12) { cobj->stacksize = 0; } else { cobj->stacksize = get_ut32 (buffer, &error); } if (v13_to_22) { cobj->flags = get_ut16 (buffer, &error); } else if (v10_to_12) { cobj->flags = 0; } else { cobj->flags = get_ut32 (buffer, &error); } //to help disassemble the code cobj->start_offset = r_buf_tell (buffer) + 5; // 1 from get_object() and 4 from get_string_object() if (!refs) { return ret; //return for entried part to get the root object of this file } cobj->code = get_object (buffer); cobj->end_offset = r_buf_tell (buffer); cobj->consts = get_object (buffer); cobj->names = get_object (buffer); if (v10_to_12) { cobj->varnames = NULL; } else { cobj->varnames = get_object (buffer); } if (!(v10_to_12 || v13_to_20)) { cobj->freevars = get_object (buffer); cobj->cellvars = get_object (buffer); } else { cobj->freevars = NULL; cobj->cellvars = NULL; } cobj->filename = get_object (buffer); cobj->name = get_object (buffer); if (v15_to_22) { cobj->firstlineno = get_ut16 (buffer, &error); } else if (v11_to_14) { cobj->firstlineno = 0; } else { cobj->firstlineno = get_ut32 (buffer, &error); } if (v11_to_14) { cobj->lnotab = NULL; } else { cobj->lnotab = get_object (buffer); } if (error) { free_object (cobj->code); free_object (cobj->consts); free_object (cobj->names); free_object (cobj->varnames); free_object (cobj->freevars); free_object (cobj->cellvars); free_object (cobj->filename); free_object (cobj->name); free_object (cobj->lnotab); free (cobj); R_FREE (ret); return NULL; } return ret; } ut64 get_code_object_addr(RBuffer *buffer, ut32 magic) { magic_int = magic; pyc_object *co = get_code_object (buffer); ut64 result = 0; if (!co) { return 0; } pyc_code_object *cobj = co->data; result = cobj->start_offset; free_object (co); return result; } static pyc_object *get_object(RBuffer *buffer) { bool error = false; pyc_object *ret = NULL; ut8 code = get_ut8 (buffer, &error); bool flag = (code & FLAG_REF); RListIter *ref_idx = NULL; ut8 type = (code & ~FLAG_REF); if (error) { return NULL; } if (flag) { pyc_object *noneret = get_none_object (); if (noneret) { ref_idx = r_list_append (refs, noneret); } } switch (type) { case TYPE_NULL: free_object (ret); return NULL; case TYPE_TRUE: return get_true_object (); case TYPE_FALSE: free_object (ret); return get_false_object (); case TYPE_NONE: free_object (ret); return get_none_object (); case TYPE_REF: free_object (ret); return get_ref_object (buffer); case TYPE_SMALL_TUPLE: ret = get_small_tuple_object (buffer); break; case TYPE_TUPLE: ret = get_tuple_object (buffer); break; case TYPE_STRING: ret = get_string_object (buffer); break; case TYPE_CODE_v0: ret = get_code_object (buffer); if (ret) { ret->type = TYPE_CODE_v0; } break; case TYPE_CODE_v1: ret = get_code_object (buffer); if (ret) { ret->type = TYPE_CODE_v1; } break; case TYPE_INT: ret = get_int_object (buffer); break; case TYPE_ASCII_INTERNED: ret = get_ascii_interned_object (buffer); break; case TYPE_SHORT_ASCII: ret = get_short_ascii_object (buffer); break; case TYPE_ASCII: ret = get_ascii_object (buffer); break; case TYPE_SHORT_ASCII_INTERNED: ret = get_short_ascii_interned_object (buffer); break; case TYPE_INT64: ret = get_int64_object (buffer); break; case TYPE_INTERNED: ret = get_interned_object (buffer); break; case TYPE_STRINGREF: ret = get_stringref_object (buffer); break; case TYPE_FLOAT: ret = get_float_object (buffer); break; case TYPE_BINARY_FLOAT: ret = get_binary_float_object (buffer); break; case TYPE_COMPLEX: ret = get_complex_object (buffer); // behaviour depends on Python version break; case TYPE_BINARY_COMPLEX: ret = get_binary_complex_object (buffer); break; case TYPE_LIST: ret = get_list_object (buffer); break; case TYPE_LONG: ret = get_long_object (buffer); break; case TYPE_UNICODE: ret = get_unicode_object (buffer); break; case TYPE_DICT: ret = get_dict_object (buffer); break; case TYPE_FROZENSET: case TYPE_SET: ret = get_set_object (buffer); break; case TYPE_STOPITER: case TYPE_ELLIPSIS: ret = R_NEW0 (pyc_object); break; case TYPE_UNKNOWN: eprintf ("Get not implemented for type 0x%x\n", type); // r_list_pop (refs); free_object (ret); return NULL; case 0: // nop break; default: eprintf ("Undefined type in get_object (0x%x)\n", type); // r_list_pop (refs); return NULL; } if (ret && flag && ref_idx) { if (ref_idx->data != ret) { free_object (ref_idx->data); } ref_idx->data = copy_object (ret); } if (ret) { return ret; } ret = get_none_object (); if (!ret) { return NULL; } r_list_append (refs, ret); return ret; } static bool extract_sections_symbols(pyc_object *obj, RList *sections, RList *symbols, RList *cobjs, char *prefix) { pyc_code_object *cobj = NULL; RBinSection *section = NULL; RBinSymbol *symbol = NULL; RListIter *i = NULL; //each code object is a section if_true_return (!obj || (obj->type != TYPE_CODE_v1 && obj->type != TYPE_CODE_v0), false); cobj = obj->data; if_true_return (!cobj || !cobj->name, false); if_true_return (cobj->name->type != TYPE_ASCII && cobj->name->type != TYPE_STRING && cobj->name->type != TYPE_INTERNED, false); if_true_return (!cobj->name->data, false); if_true_return (!cobj->consts, false); //add the cobj to objs list if (!r_list_append (cobjs, cobj)) { goto fail; } section = R_NEW0 (RBinSection); symbol = R_NEW0 (RBinSymbol); prefix = r_str_newf ("%s%s%s", r_str_get (prefix), prefix? ".": "", (const char *)cobj->name->data); if (!prefix || !section || !symbol) { goto fail; } section->name = strdup (prefix); if (!section->name) { goto fail; } section->paddr = cobj->start_offset; section->vaddr = cobj->start_offset; section->size = cobj->end_offset - cobj->start_offset; section->vsize = cobj->end_offset - cobj->start_offset; if (!r_list_append (sections, section)) { goto fail; } // start building symbol symbol->name = strdup (prefix); //symbol->bind; symbol->type = R_BIN_TYPE_FUNC_STR; symbol->size = cobj->end_offset - cobj->start_offset; symbol->vaddr = cobj->start_offset; symbol->paddr = cobj->start_offset; symbol->ordinal = symbols_ordinal++; if (cobj->consts->type != TYPE_TUPLE && cobj->consts->type != TYPE_SMALL_TUPLE) { goto fail2; } if (!r_list_append (symbols, symbol)) { goto fail2; } r_list_foreach (((RList *)(cobj->consts->data)), i, obj) { extract_sections_symbols (obj, sections, symbols, cobjs, prefix); } free (prefix); return true; fail: free (section); free (prefix); free (symbol); return false; fail2: free (prefix); free (symbol); return false; } bool get_sections_symbols_from_code_objects(RBuffer *buffer, RList *sections, RList *symbols, RList *cobjs, ut32 magic) { bool ret; magic_int = magic; refs = r_list_newf (NULL); // (RListFree)free_object); if (!refs) { return false; } ret = extract_sections_symbols (get_object (buffer), sections, symbols, cobjs, NULL); r_list_free (refs); refs = NULL; return ret; }
static pyc_object *get_tuple_object(RBuffer *buffer) { pyc_object *ret = NULL; bool error = false; ut32 n = 0; n = get_ut32 (buffer, &error); if (n > ST32_MAX) { eprintf ("bad marshal data (tuple size out of range)\n"); return NULL; } if (error) { return NULL; } ret = get_array_object_generic (buffer, n); if (ret) { ret->type = TYPE_TUPLE; return ret; } return NULL; }
static pyc_object *get_tuple_object(RBuffer *buffer) { bool error = false; ut32 n = get_ut32 (buffer, &error); if (n > ST32_MAX) { eprintf ("bad marshal data (tuple size out of range)\n"); return NULL; } if (error) { return NULL; } pyc_object *ret = get_array_object_generic (buffer, n); if (ret) { ret->type = TYPE_TUPLE; } return ret; }
{'added': [(1, '/* radare - LGPL3 - Copyright 2016-2022 - Matthieu (c0riolis) Tardy - l0stb1t */'), (12, 'static R_TH_LOCAL ut32 magic_int;'), (13, 'static R_TH_LOCAL ut32 symbols_ordinal = 0;'), (14, "static R_TH_LOCAL RList *refs = NULL; // If you don't have a good reason, do not change this. And also checkout !refs in get_code_object()"), (503, '\t\tif (!tmp || !r_list_append (ret->data, tmp)) {'), (505, '\t\t\t((RList*)ret->data)->free = NULL;'), (534, '\tut32 n = get_ut32 (buffer, &error);'), (542, '\tpyc_object *ret = get_array_object_generic (buffer, n);'), (546, '\treturn ret;'), (552, '\tut32 n = get_ut32 (buffer, &error);'), (617, '\tpyc_object *ret = get_array_object_generic (buffer, n);'), (618, '\tif (ret) {'), (619, '\t\tret->type = TYPE_SET;')], 'deleted': [(1, '/* radare - LGPL3 - Copyright 2016-2021 - Matthieu (c0riolis) Tardy - l0stb1t*/'), (12, 'static ut32 magic_int;'), (13, 'static ut32 symbols_ordinal = 0;'), (14, "static RList *refs = NULL; // If you don't have a good reason, do not change this. And also checkout !refs in get_code_object()"), (503, '\t\tif (!tmp) {'), (504, '\t\t\tr_list_free (ret->data);'), (505, '\t\t\tR_FREE (ret);'), (506, '\t\t\treturn NULL;'), (507, '\t\t}'), (508, '\t\tif (!r_list_append (ret->data, tmp)) {'), (519, '/* */'), (538, '\tpyc_object *ret = NULL;'), (540, '\tut32 n = 0;'), (541, ''), (542, '\tn = get_ut32 (buffer, &error);'), (550, '\tret = get_array_object_generic (buffer, n);'), (553, '\t\treturn ret;'), (555, '\treturn NULL;'), (561, '\tut32 n = 0;'), (562, ''), (563, '\tn = get_ut32 (buffer, &error);'), (619, '\tpyc_object *ret = NULL;'), (629, '\tret = get_array_object_generic (buffer, n);'), (630, '\tif (!ret) {'), (631, '\t\treturn NULL;'), (633, '\tret->type = TYPE_SET;')]}
13
26
1,078
5,896
19
86
4
https://github.com/radareorg/radare2
CVE-2022-0523
CWE-119
1,526
rose_timer.c
C
rose_start_hbtimer
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright (C) 2002 Ralf Baechle DO1GRB (ralf@gnu.org) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <net/tcp_states.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <net/rose.h> static void rose_heartbeat_expiry(struct timer_list *t); static void rose_timer_expiry(struct timer_list *); static void rose_idletimer_expiry(struct timer_list *); void rose_start_heartbeat(struct sock *sk) { del_timer(&sk->sk_timer); sk->sk_timer.function = rose_heartbeat_expiry; sk->sk_timer.expires = jiffies + 5 * HZ; add_timer(&sk->sk_timer); } void rose_start_t1timer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); del_timer(&rose->timer); rose->timer.function = rose_timer_expiry; rose->timer.expires = jiffies + rose->t1; add_timer(&rose->timer); } void rose_start_t2timer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); del_timer(&rose->timer); rose->timer.function = rose_timer_expiry; rose->timer.expires = jiffies + rose->t2; add_timer(&rose->timer); } void rose_start_t3timer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); del_timer(&rose->timer); rose->timer.function = rose_timer_expiry; rose->timer.expires = jiffies + rose->t3; add_timer(&rose->timer); } void rose_start_hbtimer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); del_timer(&rose->timer); rose->timer.function = rose_timer_expiry; rose->timer.expires = jiffies + rose->hb; add_timer(&rose->timer); } void rose_start_idletimer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); del_timer(&rose->idletimer); if (rose->idle > 0) { rose->idletimer.function = rose_idletimer_expiry; rose->idletimer.expires = jiffies + rose->idle; add_timer(&rose->idletimer); } } void rose_stop_heartbeat(struct sock *sk) { del_timer(&sk->sk_timer); } void rose_stop_timer(struct sock *sk) { del_timer(&rose_sk(sk)->timer); } void rose_stop_idletimer(struct sock *sk) { del_timer(&rose_sk(sk)->idletimer); } static void rose_heartbeat_expiry(struct timer_list *t) { struct sock *sk = from_timer(sk, t, sk_timer); struct rose_sock *rose = rose_sk(sk); bh_lock_sock(sk); switch (rose->state) { case ROSE_STATE_0: /* Magic here: If we listen() and a new link dies before it is accepted() it isn't 'dead' so doesn't get removed. */ if (sock_flag(sk, SOCK_DESTROY) || (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) { bh_unlock_sock(sk); rose_destroy_socket(sk); return; } break; case ROSE_STATE_3: /* * Check for the state of the receive buffer. */ if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) && (rose->condition & ROSE_COND_OWN_RX_BUSY)) { rose->condition &= ~ROSE_COND_OWN_RX_BUSY; rose->condition &= ~ROSE_COND_ACK_PENDING; rose->vl = rose->vr; rose_write_internal(sk, ROSE_RR); rose_stop_timer(sk); /* HB */ break; } break; } rose_start_heartbeat(sk); bh_unlock_sock(sk); } static void rose_timer_expiry(struct timer_list *t) { struct rose_sock *rose = from_timer(rose, t, timer); struct sock *sk = &rose->sock; bh_lock_sock(sk); switch (rose->state) { case ROSE_STATE_1: /* T1 */ case ROSE_STATE_4: /* T2 */ rose_write_internal(sk, ROSE_CLEAR_REQUEST); rose->state = ROSE_STATE_2; rose_start_t3timer(sk); break; case ROSE_STATE_2: /* T3 */ rose->neighbour->use--; rose_disconnect(sk, ETIMEDOUT, -1, -1); break; case ROSE_STATE_3: /* HB */ if (rose->condition & ROSE_COND_ACK_PENDING) { rose->condition &= ~ROSE_COND_ACK_PENDING; rose_enquiry_response(sk); } break; } bh_unlock_sock(sk); } static void rose_idletimer_expiry(struct timer_list *t) { struct rose_sock *rose = from_timer(rose, t, idletimer); struct sock *sk = &rose->sock; bh_lock_sock(sk); rose_clear_queues(sk); rose_write_internal(sk, ROSE_CLEAR_REQUEST); rose_sk(sk)->state = ROSE_STATE_2; rose_start_t3timer(sk); sk->sk_state = TCP_CLOSE; sk->sk_err = 0; sk->sk_shutdown |= SEND_SHUTDOWN; if (!sock_flag(sk, SOCK_DEAD)) { sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); } bh_unlock_sock(sk); }
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright (C) 2002 Ralf Baechle DO1GRB (ralf@gnu.org) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <net/tcp_states.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <net/rose.h> static void rose_heartbeat_expiry(struct timer_list *t); static void rose_timer_expiry(struct timer_list *); static void rose_idletimer_expiry(struct timer_list *); void rose_start_heartbeat(struct sock *sk) { sk_stop_timer(sk, &sk->sk_timer); sk->sk_timer.function = rose_heartbeat_expiry; sk->sk_timer.expires = jiffies + 5 * HZ; sk_reset_timer(sk, &sk->sk_timer, sk->sk_timer.expires); } void rose_start_t1timer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); sk_stop_timer(sk, &rose->timer); rose->timer.function = rose_timer_expiry; rose->timer.expires = jiffies + rose->t1; sk_reset_timer(sk, &rose->timer, rose->timer.expires); } void rose_start_t2timer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); sk_stop_timer(sk, &rose->timer); rose->timer.function = rose_timer_expiry; rose->timer.expires = jiffies + rose->t2; sk_reset_timer(sk, &rose->timer, rose->timer.expires); } void rose_start_t3timer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); sk_stop_timer(sk, &rose->timer); rose->timer.function = rose_timer_expiry; rose->timer.expires = jiffies + rose->t3; sk_reset_timer(sk, &rose->timer, rose->timer.expires); } void rose_start_hbtimer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); sk_stop_timer(sk, &rose->timer); rose->timer.function = rose_timer_expiry; rose->timer.expires = jiffies + rose->hb; sk_reset_timer(sk, &rose->timer, rose->timer.expires); } void rose_start_idletimer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); sk_stop_timer(sk, &rose->idletimer); if (rose->idle > 0) { rose->idletimer.function = rose_idletimer_expiry; rose->idletimer.expires = jiffies + rose->idle; sk_reset_timer(sk, &rose->idletimer, rose->idletimer.expires); } } void rose_stop_heartbeat(struct sock *sk) { sk_stop_timer(sk, &sk->sk_timer); } void rose_stop_timer(struct sock *sk) { sk_stop_timer(sk, &rose_sk(sk)->timer); } void rose_stop_idletimer(struct sock *sk) { sk_stop_timer(sk, &rose_sk(sk)->idletimer); } static void rose_heartbeat_expiry(struct timer_list *t) { struct sock *sk = from_timer(sk, t, sk_timer); struct rose_sock *rose = rose_sk(sk); bh_lock_sock(sk); switch (rose->state) { case ROSE_STATE_0: /* Magic here: If we listen() and a new link dies before it is accepted() it isn't 'dead' so doesn't get removed. */ if (sock_flag(sk, SOCK_DESTROY) || (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) { bh_unlock_sock(sk); rose_destroy_socket(sk); sock_put(sk); return; } break; case ROSE_STATE_3: /* * Check for the state of the receive buffer. */ if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) && (rose->condition & ROSE_COND_OWN_RX_BUSY)) { rose->condition &= ~ROSE_COND_OWN_RX_BUSY; rose->condition &= ~ROSE_COND_ACK_PENDING; rose->vl = rose->vr; rose_write_internal(sk, ROSE_RR); rose_stop_timer(sk); /* HB */ break; } break; } rose_start_heartbeat(sk); bh_unlock_sock(sk); sock_put(sk); } static void rose_timer_expiry(struct timer_list *t) { struct rose_sock *rose = from_timer(rose, t, timer); struct sock *sk = &rose->sock; bh_lock_sock(sk); switch (rose->state) { case ROSE_STATE_1: /* T1 */ case ROSE_STATE_4: /* T2 */ rose_write_internal(sk, ROSE_CLEAR_REQUEST); rose->state = ROSE_STATE_2; rose_start_t3timer(sk); break; case ROSE_STATE_2: /* T3 */ rose->neighbour->use--; rose_disconnect(sk, ETIMEDOUT, -1, -1); break; case ROSE_STATE_3: /* HB */ if (rose->condition & ROSE_COND_ACK_PENDING) { rose->condition &= ~ROSE_COND_ACK_PENDING; rose_enquiry_response(sk); } break; } bh_unlock_sock(sk); sock_put(sk); } static void rose_idletimer_expiry(struct timer_list *t) { struct rose_sock *rose = from_timer(rose, t, idletimer); struct sock *sk = &rose->sock; bh_lock_sock(sk); rose_clear_queues(sk); rose_write_internal(sk, ROSE_CLEAR_REQUEST); rose_sk(sk)->state = ROSE_STATE_2; rose_start_t3timer(sk); sk->sk_state = TCP_CLOSE; sk->sk_err = 0; sk->sk_shutdown |= SEND_SHUTDOWN; if (!sock_flag(sk, SOCK_DEAD)) { sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); } bh_unlock_sock(sk); sock_put(sk); }
void rose_start_hbtimer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); del_timer(&rose->timer); rose->timer.function = rose_timer_expiry; rose->timer.expires = jiffies + rose->hb; add_timer(&rose->timer); }
void rose_start_hbtimer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); sk_stop_timer(sk, &rose->timer); rose->timer.function = rose_timer_expiry; rose->timer.expires = jiffies + rose->hb; sk_reset_timer(sk, &rose->timer, rose->timer.expires); }
{'added': [(34, '\tsk_stop_timer(sk, &sk->sk_timer);'), (39, '\tsk_reset_timer(sk, &sk->sk_timer, sk->sk_timer.expires);'), (46, '\tsk_stop_timer(sk, &rose->timer);'), (51, '\tsk_reset_timer(sk, &rose->timer, rose->timer.expires);'), (58, '\tsk_stop_timer(sk, &rose->timer);'), (63, '\tsk_reset_timer(sk, &rose->timer, rose->timer.expires);'), (70, '\tsk_stop_timer(sk, &rose->timer);'), (75, '\tsk_reset_timer(sk, &rose->timer, rose->timer.expires);'), (82, '\tsk_stop_timer(sk, &rose->timer);'), (87, '\tsk_reset_timer(sk, &rose->timer, rose->timer.expires);'), (94, '\tsk_stop_timer(sk, &rose->idletimer);'), (100, '\t\tsk_reset_timer(sk, &rose->idletimer, rose->idletimer.expires);'), (106, '\tsk_stop_timer(sk, &sk->sk_timer);'), (111, '\tsk_stop_timer(sk, &rose_sk(sk)->timer);'), (116, '\tsk_stop_timer(sk, &rose_sk(sk)->idletimer);'), (133, '\t\t\tsock_put(sk);'), (156, '\tsock_put(sk);'), (186, '\tsock_put(sk);'), (211, '\tsock_put(sk);')], 'deleted': [(34, '\tdel_timer(&sk->sk_timer);'), (39, '\tadd_timer(&sk->sk_timer);'), (46, '\tdel_timer(&rose->timer);'), (51, '\tadd_timer(&rose->timer);'), (58, '\tdel_timer(&rose->timer);'), (63, '\tadd_timer(&rose->timer);'), (70, '\tdel_timer(&rose->timer);'), (75, '\tadd_timer(&rose->timer);'), (82, '\tdel_timer(&rose->timer);'), (87, '\tadd_timer(&rose->timer);'), (94, '\tdel_timer(&rose->idletimer);'), (100, '\t\tadd_timer(&rose->idletimer);'), (106, '\tdel_timer(&sk->sk_timer);'), (111, '\tdel_timer(&rose_sk(sk)->timer);'), (116, '\tdel_timer(&rose_sk(sk)->idletimer);')]}
19
15
160
963
8
55
1
https://github.com/torvalds/linux
CVE-2022-2318
CWE-416
200
spl_directory.c
C
SPL_METHOD
/* +----------------------------------------------------------------------+ | PHP Version 5 | +----------------------------------------------------------------------+ | Copyright (c) 1997-2015 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Author: Marcus Boerger <helly@php.net> | +----------------------------------------------------------------------+ */ /* $Id$ */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include "php.h" #include "php_ini.h" #include "ext/standard/info.h" #include "ext/standard/file.h" #include "ext/standard/php_string.h" #include "zend_compile.h" #include "zend_exceptions.h" #include "zend_interfaces.h" #include "php_spl.h" #include "spl_functions.h" #include "spl_engine.h" #include "spl_iterators.h" #include "spl_directory.h" #include "spl_exceptions.h" #include "php.h" #include "fopen_wrappers.h" #include "ext/standard/basic_functions.h" #include "ext/standard/php_filestat.h" #define SPL_HAS_FLAG(flags, test_flag) ((flags & test_flag) ? 1 : 0) /* declare the class handlers */ static zend_object_handlers spl_filesystem_object_handlers; /* includes handler to validate object state when retrieving methods */ static zend_object_handlers spl_filesystem_object_check_handlers; /* decalre the class entry */ PHPAPI zend_class_entry *spl_ce_SplFileInfo; PHPAPI zend_class_entry *spl_ce_DirectoryIterator; PHPAPI zend_class_entry *spl_ce_FilesystemIterator; PHPAPI zend_class_entry *spl_ce_RecursiveDirectoryIterator; PHPAPI zend_class_entry *spl_ce_GlobIterator; PHPAPI zend_class_entry *spl_ce_SplFileObject; PHPAPI zend_class_entry *spl_ce_SplTempFileObject; static void spl_filesystem_file_free_line(spl_filesystem_object *intern TSRMLS_DC) /* {{{ */ { if (intern->u.file.current_line) { efree(intern->u.file.current_line); intern->u.file.current_line = NULL; } if (intern->u.file.current_zval) { zval_ptr_dtor(&intern->u.file.current_zval); intern->u.file.current_zval = NULL; } } /* }}} */ static void spl_filesystem_object_free_storage(void *object TSRMLS_DC) /* {{{ */ { spl_filesystem_object *intern = (spl_filesystem_object*)object; if (intern->oth_handler && intern->oth_handler->dtor) { intern->oth_handler->dtor(intern TSRMLS_CC); } zend_object_std_dtor(&intern->std TSRMLS_CC); if (intern->_path) { efree(intern->_path); } if (intern->file_name) { efree(intern->file_name); } switch(intern->type) { case SPL_FS_INFO: break; case SPL_FS_DIR: if (intern->u.dir.dirp) { php_stream_close(intern->u.dir.dirp); intern->u.dir.dirp = NULL; } if (intern->u.dir.sub_path) { efree(intern->u.dir.sub_path); } break; case SPL_FS_FILE: if (intern->u.file.stream) { if (intern->u.file.zcontext) { /* zend_list_delref(Z_RESVAL_P(intern->zcontext));*/ } if (!intern->u.file.stream->is_persistent) { php_stream_free(intern->u.file.stream, PHP_STREAM_FREE_CLOSE); } else { php_stream_free(intern->u.file.stream, PHP_STREAM_FREE_CLOSE_PERSISTENT); } if (intern->u.file.open_mode) { efree(intern->u.file.open_mode); } if (intern->orig_path) { efree(intern->orig_path); } } spl_filesystem_file_free_line(intern TSRMLS_CC); break; } { zend_object_iterator *iterator; iterator = (zend_object_iterator*) spl_filesystem_object_to_iterator(intern); if (iterator->data != NULL) { iterator->data = NULL; iterator->funcs->dtor(iterator TSRMLS_CC); } } efree(object); } /* }}} */ /* {{{ spl_ce_dir_object_new */ /* creates the object by - allocating memory - initializing the object members - storing the object - setting it's handlers called from - clone - new */ static zend_object_value spl_filesystem_object_new_ex(zend_class_entry *class_type, spl_filesystem_object **obj TSRMLS_DC) { zend_object_value retval; spl_filesystem_object *intern; intern = emalloc(sizeof(spl_filesystem_object)); memset(intern, 0, sizeof(spl_filesystem_object)); /* intern->type = SPL_FS_INFO; done by set 0 */ intern->file_class = spl_ce_SplFileObject; intern->info_class = spl_ce_SplFileInfo; if (obj) *obj = intern; zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, (zend_objects_free_object_storage_t) spl_filesystem_object_free_storage, NULL TSRMLS_CC); retval.handlers = &spl_filesystem_object_handlers; return retval; } /* }}} */ /* {{{ spl_filesystem_object_new */ /* See spl_filesystem_object_new_ex */ static zend_object_value spl_filesystem_object_new(zend_class_entry *class_type TSRMLS_DC) { return spl_filesystem_object_new_ex(class_type, NULL TSRMLS_CC); } /* }}} */ /* {{{ spl_filesystem_object_new_ex */ static zend_object_value spl_filesystem_object_new_check(zend_class_entry *class_type TSRMLS_DC) { zend_object_value ret = spl_filesystem_object_new_ex(class_type, NULL TSRMLS_CC); ret.handlers = &spl_filesystem_object_check_handlers; return ret; } /* }}} */ PHPAPI char* spl_filesystem_object_get_path(spl_filesystem_object *intern, int *len TSRMLS_DC) /* {{{ */ { #ifdef HAVE_GLOB if (intern->type == SPL_FS_DIR) { if (php_stream_is(intern->u.dir.dirp ,&php_glob_stream_ops)) { return php_glob_stream_get_path(intern->u.dir.dirp, 0, len); } } #endif if (len) { *len = intern->_path_len; } return intern->_path; } /* }}} */ static inline void spl_filesystem_object_get_file_name(spl_filesystem_object *intern TSRMLS_DC) /* {{{ */ { char slash = SPL_HAS_FLAG(intern->flags, SPL_FILE_DIR_UNIXPATHS) ? '/' : DEFAULT_SLASH; switch (intern->type) { case SPL_FS_INFO: case SPL_FS_FILE: if (!intern->file_name) { php_error_docref(NULL TSRMLS_CC, E_ERROR, "Object not initialized"); } break; case SPL_FS_DIR: if (intern->file_name) { efree(intern->file_name); } intern->file_name_len = spprintf(&intern->file_name, 0, "%s%c%s", spl_filesystem_object_get_path(intern, NULL TSRMLS_CC), slash, intern->u.dir.entry.d_name); break; } } /* }}} */ static int spl_filesystem_dir_read(spl_filesystem_object *intern TSRMLS_DC) /* {{{ */ { if (!intern->u.dir.dirp || !php_stream_readdir(intern->u.dir.dirp, &intern->u.dir.entry)) { intern->u.dir.entry.d_name[0] = '\0'; return 0; } else { return 1; } } /* }}} */ #define IS_SLASH_AT(zs, pos) (IS_SLASH(zs[pos])) static inline int spl_filesystem_is_dot(const char * d_name) /* {{{ */ { return !strcmp(d_name, ".") || !strcmp(d_name, ".."); } /* }}} */ /* {{{ spl_filesystem_dir_open */ /* open a directory resource */ static void spl_filesystem_dir_open(spl_filesystem_object* intern, char *path TSRMLS_DC) { int skip_dots = SPL_HAS_FLAG(intern->flags, SPL_FILE_DIR_SKIPDOTS); intern->type = SPL_FS_DIR; intern->_path_len = strlen(path); intern->u.dir.dirp = php_stream_opendir(path, REPORT_ERRORS, FG(default_context)); if (intern->_path_len > 1 && IS_SLASH_AT(path, intern->_path_len-1)) { intern->_path = estrndup(path, --intern->_path_len); } else { intern->_path = estrndup(path, intern->_path_len); } intern->u.dir.index = 0; if (EG(exception) || intern->u.dir.dirp == NULL) { intern->u.dir.entry.d_name[0] = '\0'; if (!EG(exception)) { /* open failed w/out notice (turned to exception due to EH_THROW) */ zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0 TSRMLS_CC, "Failed to open directory \"%s\"", path); } } else { do { spl_filesystem_dir_read(intern TSRMLS_CC); } while (skip_dots && spl_filesystem_is_dot(intern->u.dir.entry.d_name)); } } /* }}} */ static int spl_filesystem_file_open(spl_filesystem_object *intern, int use_include_path, int silent TSRMLS_DC) /* {{{ */ { zval tmp; intern->type = SPL_FS_FILE; php_stat(intern->file_name, intern->file_name_len, FS_IS_DIR, &tmp TSRMLS_CC); if (Z_LVAL(tmp)) { intern->u.file.open_mode = NULL; intern->file_name = NULL; zend_throw_exception_ex(spl_ce_LogicException, 0 TSRMLS_CC, "Cannot use SplFileObject with directories"); return FAILURE; } intern->u.file.context = php_stream_context_from_zval(intern->u.file.zcontext, 0); intern->u.file.stream = php_stream_open_wrapper_ex(intern->file_name, intern->u.file.open_mode, (use_include_path ? USE_PATH : 0) | REPORT_ERRORS, NULL, intern->u.file.context); if (!intern->file_name_len || !intern->u.file.stream) { if (!EG(exception)) { zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Cannot open file '%s'", intern->file_name_len ? intern->file_name : ""); } intern->file_name = NULL; /* until here it is not a copy */ intern->u.file.open_mode = NULL; return FAILURE; } if (intern->u.file.zcontext) { zend_list_addref(Z_RESVAL_P(intern->u.file.zcontext)); } if (intern->file_name_len > 1 && IS_SLASH_AT(intern->file_name, intern->file_name_len-1)) { intern->file_name_len--; } intern->orig_path = estrndup(intern->u.file.stream->orig_path, strlen(intern->u.file.stream->orig_path)); intern->file_name = estrndup(intern->file_name, intern->file_name_len); intern->u.file.open_mode = estrndup(intern->u.file.open_mode, intern->u.file.open_mode_len); /* avoid reference counting in debug mode, thus do it manually */ ZVAL_RESOURCE(&intern->u.file.zresource, php_stream_get_resource_id(intern->u.file.stream)); Z_SET_REFCOUNT(intern->u.file.zresource, 1); intern->u.file.delimiter = ','; intern->u.file.enclosure = '"'; intern->u.file.escape = '\\'; zend_hash_find(&intern->std.ce->function_table, "getcurrentline", sizeof("getcurrentline"), (void **) &intern->u.file.func_getCurr); return SUCCESS; } /* }}} */ /* {{{ spl_filesystem_object_clone */ /* Local zend_object_value creation (on stack) Load the 'other' object Create a new empty object (See spl_filesystem_object_new_ex) Open the directory Clone other members (properties) */ static zend_object_value spl_filesystem_object_clone(zval *zobject TSRMLS_DC) { zend_object_value new_obj_val; zend_object *old_object; zend_object *new_object; zend_object_handle handle = Z_OBJ_HANDLE_P(zobject); spl_filesystem_object *intern; spl_filesystem_object *source; int index, skip_dots; old_object = zend_objects_get_address(zobject TSRMLS_CC); source = (spl_filesystem_object*)old_object; new_obj_val = spl_filesystem_object_new_ex(old_object->ce, &intern TSRMLS_CC); new_object = &intern->std; intern->flags = source->flags; switch (source->type) { case SPL_FS_INFO: intern->_path_len = source->_path_len; intern->_path = estrndup(source->_path, source->_path_len); intern->file_name_len = source->file_name_len; intern->file_name = estrndup(source->file_name, intern->file_name_len); break; case SPL_FS_DIR: spl_filesystem_dir_open(intern, source->_path TSRMLS_CC); /* read until we hit the position in which we were before */ skip_dots = SPL_HAS_FLAG(source->flags, SPL_FILE_DIR_SKIPDOTS); for(index = 0; index < source->u.dir.index; ++index) { do { spl_filesystem_dir_read(intern TSRMLS_CC); } while (skip_dots && spl_filesystem_is_dot(intern->u.dir.entry.d_name)); } intern->u.dir.index = index; break; case SPL_FS_FILE: php_error_docref(NULL TSRMLS_CC, E_ERROR, "An object of class %s cannot be cloned", old_object->ce->name); break; } intern->file_class = source->file_class; intern->info_class = source->info_class; intern->oth = source->oth; intern->oth_handler = source->oth_handler; zend_objects_clone_members(new_object, new_obj_val, old_object, handle TSRMLS_CC); if (intern->oth_handler && intern->oth_handler->clone) { intern->oth_handler->clone(source, intern TSRMLS_CC); } return new_obj_val; } /* }}} */ void spl_filesystem_info_set_filename(spl_filesystem_object *intern, char *path, int len, int use_copy TSRMLS_DC) /* {{{ */ { char *p1, *p2; if (intern->file_name) { efree(intern->file_name); } intern->file_name = use_copy ? estrndup(path, len) : path; intern->file_name_len = len; while(IS_SLASH_AT(intern->file_name, intern->file_name_len-1) && intern->file_name_len > 1) { intern->file_name[intern->file_name_len-1] = 0; intern->file_name_len--; } p1 = strrchr(intern->file_name, '/'); #if defined(PHP_WIN32) || defined(NETWARE) p2 = strrchr(intern->file_name, '\\'); #else p2 = 0; #endif if (p1 || p2) { intern->_path_len = (p1 > p2 ? p1 : p2) - intern->file_name; } else { intern->_path_len = 0; } if (intern->_path) { efree(intern->_path); } intern->_path = estrndup(path, intern->_path_len); } /* }}} */ static spl_filesystem_object * spl_filesystem_object_create_info(spl_filesystem_object *source, char *file_path, int file_path_len, int use_copy, zend_class_entry *ce, zval *return_value TSRMLS_DC) /* {{{ */ { spl_filesystem_object *intern; zval *arg1; zend_error_handling error_handling; if (!file_path || !file_path_len) { #if defined(PHP_WIN32) zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Cannot create SplFileInfo for empty path"); if (file_path && !use_copy) { efree(file_path); } #else if (file_path && !use_copy) { efree(file_path); } file_path_len = 1; file_path = "/"; #endif return NULL; } zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC); ce = ce ? ce : source->info_class; zend_update_class_constants(ce TSRMLS_CC); return_value->value.obj = spl_filesystem_object_new_ex(ce, &intern TSRMLS_CC); Z_TYPE_P(return_value) = IS_OBJECT; if (ce->constructor->common.scope != spl_ce_SplFileInfo) { MAKE_STD_ZVAL(arg1); ZVAL_STRINGL(arg1, file_path, file_path_len, use_copy); zend_call_method_with_1_params(&return_value, ce, &ce->constructor, "__construct", NULL, arg1); zval_ptr_dtor(&arg1); } else { spl_filesystem_info_set_filename(intern, file_path, file_path_len, use_copy TSRMLS_CC); } zend_restore_error_handling(&error_handling TSRMLS_CC); return intern; } /* }}} */ static spl_filesystem_object * spl_filesystem_object_create_type(int ht, spl_filesystem_object *source, int type, zend_class_entry *ce, zval *return_value TSRMLS_DC) /* {{{ */ { spl_filesystem_object *intern; zend_bool use_include_path = 0; zval *arg1, *arg2; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC); switch (source->type) { case SPL_FS_INFO: case SPL_FS_FILE: break; case SPL_FS_DIR: if (!source->u.dir.entry.d_name[0]) { zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Could not open file"); zend_restore_error_handling(&error_handling TSRMLS_CC); return NULL; } } switch (type) { case SPL_FS_INFO: ce = ce ? ce : source->info_class; zend_update_class_constants(ce TSRMLS_CC); return_value->value.obj = spl_filesystem_object_new_ex(ce, &intern TSRMLS_CC); Z_TYPE_P(return_value) = IS_OBJECT; spl_filesystem_object_get_file_name(source TSRMLS_CC); if (ce->constructor->common.scope != spl_ce_SplFileInfo) { MAKE_STD_ZVAL(arg1); ZVAL_STRINGL(arg1, source->file_name, source->file_name_len, 1); zend_call_method_with_1_params(&return_value, ce, &ce->constructor, "__construct", NULL, arg1); zval_ptr_dtor(&arg1); } else { intern->file_name = estrndup(source->file_name, source->file_name_len); intern->file_name_len = source->file_name_len; intern->_path = spl_filesystem_object_get_path(source, &intern->_path_len TSRMLS_CC); intern->_path = estrndup(intern->_path, intern->_path_len); } break; case SPL_FS_FILE: ce = ce ? ce : source->file_class; zend_update_class_constants(ce TSRMLS_CC); return_value->value.obj = spl_filesystem_object_new_ex(ce, &intern TSRMLS_CC); Z_TYPE_P(return_value) = IS_OBJECT; spl_filesystem_object_get_file_name(source TSRMLS_CC); if (ce->constructor->common.scope != spl_ce_SplFileObject) { MAKE_STD_ZVAL(arg1); MAKE_STD_ZVAL(arg2); ZVAL_STRINGL(arg1, source->file_name, source->file_name_len, 1); ZVAL_STRINGL(arg2, "r", 1, 1); zend_call_method_with_2_params(&return_value, ce, &ce->constructor, "__construct", NULL, arg1, arg2); zval_ptr_dtor(&arg1); zval_ptr_dtor(&arg2); } else { intern->file_name = source->file_name; intern->file_name_len = source->file_name_len; intern->_path = spl_filesystem_object_get_path(source, &intern->_path_len TSRMLS_CC); intern->_path = estrndup(intern->_path, intern->_path_len); intern->u.file.open_mode = "r"; intern->u.file.open_mode_len = 1; if (ht && zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|sbr", &intern->u.file.open_mode, &intern->u.file.open_mode_len, &use_include_path, &intern->u.file.zcontext) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); intern->u.file.open_mode = NULL; intern->file_name = NULL; zval_dtor(return_value); Z_TYPE_P(return_value) = IS_NULL; return NULL; } if (spl_filesystem_file_open(intern, use_include_path, 0 TSRMLS_CC) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); zval_dtor(return_value); Z_TYPE_P(return_value) = IS_NULL; return NULL; } } break; case SPL_FS_DIR: zend_restore_error_handling(&error_handling TSRMLS_CC); zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Operation not supported"); return NULL; } zend_restore_error_handling(&error_handling TSRMLS_CC); return NULL; } /* }}} */ static int spl_filesystem_is_invalid_or_dot(const char * d_name) /* {{{ */ { return d_name[0] == '\0' || spl_filesystem_is_dot(d_name); } /* }}} */ static char *spl_filesystem_object_get_pathname(spl_filesystem_object *intern, int *len TSRMLS_DC) { /* {{{ */ switch (intern->type) { case SPL_FS_INFO: case SPL_FS_FILE: *len = intern->file_name_len; return intern->file_name; case SPL_FS_DIR: if (intern->u.dir.entry.d_name[0]) { spl_filesystem_object_get_file_name(intern TSRMLS_CC); *len = intern->file_name_len; return intern->file_name; } } *len = 0; return NULL; } /* }}} */ static HashTable* spl_filesystem_object_get_debug_info(zval *obj, int *is_temp TSRMLS_DC) /* {{{ */ { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(obj TSRMLS_CC); HashTable *rv; zval *tmp, zrv; char *pnstr, *path; int pnlen, path_len; char stmp[2]; *is_temp = 1; if (!intern->std.properties) { rebuild_object_properties(&intern->std); } ALLOC_HASHTABLE(rv); ZEND_INIT_SYMTABLE_EX(rv, zend_hash_num_elements(intern->std.properties) + 3, 0); INIT_PZVAL(&zrv); Z_ARRVAL(zrv) = rv; zend_hash_copy(rv, intern->std.properties, (copy_ctor_func_t) zval_add_ref, (void *) &tmp, sizeof(zval *)); pnstr = spl_gen_private_prop_name(spl_ce_SplFileInfo, "pathName", sizeof("pathName")-1, &pnlen TSRMLS_CC); path = spl_filesystem_object_get_pathname(intern, &path_len TSRMLS_CC); add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, path, path_len, 1); efree(pnstr); if (intern->file_name) { pnstr = spl_gen_private_prop_name(spl_ce_SplFileInfo, "fileName", sizeof("fileName")-1, &pnlen TSRMLS_CC); spl_filesystem_object_get_path(intern, &path_len TSRMLS_CC); if (path_len && path_len < intern->file_name_len) { add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, intern->file_name + path_len + 1, intern->file_name_len - (path_len + 1), 1); } else { add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, intern->file_name, intern->file_name_len, 1); } efree(pnstr); } if (intern->type == SPL_FS_DIR) { #ifdef HAVE_GLOB pnstr = spl_gen_private_prop_name(spl_ce_DirectoryIterator, "glob", sizeof("glob")-1, &pnlen TSRMLS_CC); if (php_stream_is(intern->u.dir.dirp ,&php_glob_stream_ops)) { add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, intern->_path, intern->_path_len, 1); } else { add_assoc_bool_ex(&zrv, pnstr, pnlen+1, 0); } efree(pnstr); #endif pnstr = spl_gen_private_prop_name(spl_ce_RecursiveDirectoryIterator, "subPathName", sizeof("subPathName")-1, &pnlen TSRMLS_CC); if (intern->u.dir.sub_path) { add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, intern->u.dir.sub_path, intern->u.dir.sub_path_len, 1); } else { add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, "", 0, 1); } efree(pnstr); } if (intern->type == SPL_FS_FILE) { pnstr = spl_gen_private_prop_name(spl_ce_SplFileObject, "openMode", sizeof("openMode")-1, &pnlen TSRMLS_CC); add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, intern->u.file.open_mode, intern->u.file.open_mode_len, 1); efree(pnstr); stmp[1] = '\0'; stmp[0] = intern->u.file.delimiter; pnstr = spl_gen_private_prop_name(spl_ce_SplFileObject, "delimiter", sizeof("delimiter")-1, &pnlen TSRMLS_CC); add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, stmp, 1, 1); efree(pnstr); stmp[0] = intern->u.file.enclosure; pnstr = spl_gen_private_prop_name(spl_ce_SplFileObject, "enclosure", sizeof("enclosure")-1, &pnlen TSRMLS_CC); add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, stmp, 1, 1); efree(pnstr); } return rv; } /* }}} */ zend_function *spl_filesystem_object_get_method_check(zval **object_ptr, char *method, int method_len, const struct _zend_literal *key TSRMLS_DC) /* {{{ */ { spl_filesystem_object *fsobj = zend_object_store_get_object(*object_ptr TSRMLS_CC); if (fsobj->u.dir.entry.d_name[0] == '\0' && fsobj->orig_path == NULL) { method = "_bad_state_ex"; method_len = sizeof("_bad_state_ex") - 1; key = NULL; } return zend_get_std_object_handlers()->get_method(object_ptr, method, method_len, key TSRMLS_CC); } /* }}} */ #define DIT_CTOR_FLAGS 0x00000001 #define DIT_CTOR_GLOB 0x00000002 void spl_filesystem_object_construct(INTERNAL_FUNCTION_PARAMETERS, long ctor_flags) /* {{{ */ { spl_filesystem_object *intern; char *path; int parsed, len; long flags; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_UnexpectedValueException, &error_handling TSRMLS_CC); if (SPL_HAS_FLAG(ctor_flags, DIT_CTOR_FLAGS)) { flags = SPL_FILE_DIR_KEY_AS_PATHNAME|SPL_FILE_DIR_CURRENT_AS_FILEINFO; parsed = zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|l", &path, &len, &flags); } else { flags = SPL_FILE_DIR_KEY_AS_PATHNAME|SPL_FILE_DIR_CURRENT_AS_SELF; parsed = zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &path, &len); } if (SPL_HAS_FLAG(ctor_flags, SPL_FILE_DIR_SKIPDOTS)) { flags |= SPL_FILE_DIR_SKIPDOTS; } if (SPL_HAS_FLAG(ctor_flags, SPL_FILE_DIR_UNIXPATHS)) { flags |= SPL_FILE_DIR_UNIXPATHS; } if (parsed == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } if (!len) { zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Directory name must not be empty."); zend_restore_error_handling(&error_handling TSRMLS_CC); return; } intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (intern->_path) { /* object is alreay initialized */ zend_restore_error_handling(&error_handling TSRMLS_CC); php_error_docref(NULL TSRMLS_CC, E_WARNING, "Directory object is already initialized"); return; } intern->flags = flags; #ifdef HAVE_GLOB if (SPL_HAS_FLAG(ctor_flags, DIT_CTOR_GLOB) && strstr(path, "glob://") != path) { spprintf(&path, 0, "glob://%s", path); spl_filesystem_dir_open(intern, path TSRMLS_CC); efree(path); } else #endif { spl_filesystem_dir_open(intern, path TSRMLS_CC); } intern->u.dir.is_recursive = instanceof_function(intern->std.ce, spl_ce_RecursiveDirectoryIterator TSRMLS_CC) ? 1 : 0; zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ proto void DirectoryIterator::__construct(string path) Cronstructs a new dir iterator from a path. */ SPL_METHOD(DirectoryIterator, __construct) { spl_filesystem_object_construct(INTERNAL_FUNCTION_PARAM_PASSTHRU, 0); } /* }}} */ /* {{{ proto void DirectoryIterator::rewind() Rewind dir back to the start */ SPL_METHOD(DirectoryIterator, rewind) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } intern->u.dir.index = 0; if (intern->u.dir.dirp) { php_stream_rewinddir(intern->u.dir.dirp); } spl_filesystem_dir_read(intern TSRMLS_CC); } /* }}} */ /* {{{ proto string DirectoryIterator::key() Return current dir entry */ SPL_METHOD(DirectoryIterator, key) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (intern->u.dir.dirp) { RETURN_LONG(intern->u.dir.index); } else { RETURN_FALSE; } } /* }}} */ /* {{{ proto DirectoryIterator DirectoryIterator::current() Return this (needed for Iterator interface) */ SPL_METHOD(DirectoryIterator, current) { if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_ZVAL(getThis(), 1, 0); } /* }}} */ /* {{{ proto void DirectoryIterator::next() Move to next entry */ SPL_METHOD(DirectoryIterator, next) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); int skip_dots = SPL_HAS_FLAG(intern->flags, SPL_FILE_DIR_SKIPDOTS); if (zend_parse_parameters_none() == FAILURE) { return; } intern->u.dir.index++; do { spl_filesystem_dir_read(intern TSRMLS_CC); } while (skip_dots && spl_filesystem_is_dot(intern->u.dir.entry.d_name)); if (intern->file_name) { efree(intern->file_name); intern->file_name = NULL; } } /* }}} */ /* {{{ proto void DirectoryIterator::seek(int position) Seek to the given position */ SPL_METHOD(DirectoryIterator, seek) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zval *retval = NULL; long pos; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &pos) == FAILURE) { return; } if (intern->u.dir.index > pos) { /* we first rewind */ zend_call_method_with_0_params(&this_ptr, Z_OBJCE_P(getThis()), &intern->u.dir.func_rewind, "rewind", &retval); if (retval) { zval_ptr_dtor(&retval); retval = NULL; } } while (intern->u.dir.index < pos) { int valid = 0; zend_call_method_with_0_params(&this_ptr, Z_OBJCE_P(getThis()), &intern->u.dir.func_valid, "valid", &retval); if (retval) { valid = zend_is_true(retval); zval_ptr_dtor(&retval); retval = NULL; } if (!valid) { break; } zend_call_method_with_0_params(&this_ptr, Z_OBJCE_P(getThis()), &intern->u.dir.func_next, "next", &retval); if (retval) { zval_ptr_dtor(&retval); } } } /* }}} */ /* {{{ proto string DirectoryIterator::valid() Check whether dir contains more entries */ SPL_METHOD(DirectoryIterator, valid) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_BOOL(intern->u.dir.entry.d_name[0] != '\0'); } /* }}} */ /* {{{ proto string SplFileInfo::getPath() Return the path */ SPL_METHOD(SplFileInfo, getPath) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *path; int path_len; if (zend_parse_parameters_none() == FAILURE) { return; } path = spl_filesystem_object_get_path(intern, &path_len TSRMLS_CC); RETURN_STRINGL(path, path_len, 1); } /* }}} */ /* {{{ proto string SplFileInfo::getFilename() Return filename only */ SPL_METHOD(SplFileInfo, getFilename) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); int path_len; if (zend_parse_parameters_none() == FAILURE) { return; } spl_filesystem_object_get_path(intern, &path_len TSRMLS_CC); if (path_len && path_len < intern->file_name_len) { RETURN_STRINGL(intern->file_name + path_len + 1, intern->file_name_len - (path_len + 1), 1); } else { RETURN_STRINGL(intern->file_name, intern->file_name_len, 1); } } /* }}} */ /* {{{ proto string DirectoryIterator::getFilename() Return filename of current dir entry */ SPL_METHOD(DirectoryIterator, getFilename) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_STRING(intern->u.dir.entry.d_name, 1); } /* }}} */ /* {{{ proto string SplFileInfo::getExtension() Returns file extension component of path */ SPL_METHOD(SplFileInfo, getExtension) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *fname = NULL; const char *p; size_t flen; int path_len, idx; if (zend_parse_parameters_none() == FAILURE) { return; } spl_filesystem_object_get_path(intern, &path_len TSRMLS_CC); if (path_len && path_len < intern->file_name_len) { fname = intern->file_name + path_len + 1; flen = intern->file_name_len - (path_len + 1); } else { fname = intern->file_name; flen = intern->file_name_len; } php_basename(fname, flen, NULL, 0, &fname, &flen TSRMLS_CC); p = zend_memrchr(fname, '.', flen); if (p) { idx = p - fname; RETVAL_STRINGL(fname + idx + 1, flen - idx - 1, 1); efree(fname); return; } else { if (fname) { efree(fname); } RETURN_EMPTY_STRING(); } } /* }}}*/ /* {{{ proto string DirectoryIterator::getExtension() Returns the file extension component of path */ SPL_METHOD(DirectoryIterator, getExtension) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *fname = NULL; const char *p; size_t flen; int idx; if (zend_parse_parameters_none() == FAILURE) { return; } php_basename(intern->u.dir.entry.d_name, strlen(intern->u.dir.entry.d_name), NULL, 0, &fname, &flen TSRMLS_CC); p = zend_memrchr(fname, '.', flen); if (p) { idx = p - fname; RETVAL_STRINGL(fname + idx + 1, flen - idx - 1, 1); efree(fname); return; } else { if (fname) { efree(fname); } RETURN_EMPTY_STRING(); } } /* }}} */ /* {{{ proto string SplFileInfo::getBasename([string $suffix]) U Returns filename component of path */ SPL_METHOD(SplFileInfo, getBasename) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *fname, *suffix = 0; size_t flen; int slen = 0, path_len; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|s", &suffix, &slen) == FAILURE) { return; } spl_filesystem_object_get_path(intern, &path_len TSRMLS_CC); if (path_len && path_len < intern->file_name_len) { fname = intern->file_name + path_len + 1; flen = intern->file_name_len - (path_len + 1); } else { fname = intern->file_name; flen = intern->file_name_len; } php_basename(fname, flen, suffix, slen, &fname, &flen TSRMLS_CC); RETURN_STRINGL(fname, flen, 0); } /* }}}*/ /* {{{ proto string DirectoryIterator::getBasename([string $suffix]) U Returns filename component of current dir entry */ SPL_METHOD(DirectoryIterator, getBasename) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *suffix = 0, *fname; int slen = 0; size_t flen; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|s", &suffix, &slen) == FAILURE) { return; } php_basename(intern->u.dir.entry.d_name, strlen(intern->u.dir.entry.d_name), suffix, slen, &fname, &flen TSRMLS_CC); RETURN_STRINGL(fname, flen, 0); } /* }}} */ /* {{{ proto string SplFileInfo::getPathname() Return path and filename */ SPL_METHOD(SplFileInfo, getPathname) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *path; int path_len; if (zend_parse_parameters_none() == FAILURE) { return; } path = spl_filesystem_object_get_pathname(intern, &path_len TSRMLS_CC); if (path != NULL) { RETURN_STRINGL(path, path_len, 1); } else { RETURN_FALSE; } } /* }}} */ /* {{{ proto string FilesystemIterator::key() Return getPathname() or getFilename() depending on flags */ SPL_METHOD(FilesystemIterator, key) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (SPL_FILE_DIR_KEY(intern, SPL_FILE_DIR_KEY_AS_FILENAME)) { RETURN_STRING(intern->u.dir.entry.d_name, 1); } else { spl_filesystem_object_get_file_name(intern TSRMLS_CC); RETURN_STRINGL(intern->file_name, intern->file_name_len, 1); } } /* }}} */ /* {{{ proto string FilesystemIterator::current() Return getFilename(), getFileInfo() or $this depending on flags */ SPL_METHOD(FilesystemIterator, current) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (SPL_FILE_DIR_CURRENT(intern, SPL_FILE_DIR_CURRENT_AS_PATHNAME)) { spl_filesystem_object_get_file_name(intern TSRMLS_CC); RETURN_STRINGL(intern->file_name, intern->file_name_len, 1); } else if (SPL_FILE_DIR_CURRENT(intern, SPL_FILE_DIR_CURRENT_AS_FILEINFO)) { spl_filesystem_object_get_file_name(intern TSRMLS_CC); spl_filesystem_object_create_type(0, intern, SPL_FS_INFO, NULL, return_value TSRMLS_CC); } else { RETURN_ZVAL(getThis(), 1, 0); /*RETURN_STRING(intern->u.dir.entry.d_name, 1);*/ } } /* }}} */ /* {{{ proto bool DirectoryIterator::isDot() Returns true if current entry is '.' or '..' */ SPL_METHOD(DirectoryIterator, isDot) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_BOOL(spl_filesystem_is_dot(intern->u.dir.entry.d_name)); } /* }}} */ /* {{{ proto void SplFileInfo::__construct(string file_name) Cronstructs a new SplFileInfo from a path. */ /* zend_replace_error_handling() is used to throw exceptions in case the constructor fails. Here we use this to ensure the object has a valid directory resource. When the constructor gets called the object is already created by the engine, so we must only call 'additional' initializations. */ SPL_METHOD(SplFileInfo, __construct) { spl_filesystem_object *intern; char *path; int len; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &path, &len) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); spl_filesystem_info_set_filename(intern, path, len, 1 TSRMLS_CC); zend_restore_error_handling(&error_handling TSRMLS_CC); /* intern->type = SPL_FS_INFO; already set */ } /* }}} */ /* {{{ FileInfoFunction */ #define FileInfoFunction(func_name, func_num) \ SPL_METHOD(SplFileInfo, func_name) \ { \ spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); \ zend_error_handling error_handling; \ if (zend_parse_parameters_none() == FAILURE) { \ return; \ } \ \ zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC);\ spl_filesystem_object_get_file_name(intern TSRMLS_CC); \ php_stat(intern->file_name, intern->file_name_len, func_num, return_value TSRMLS_CC); \ zend_restore_error_handling(&error_handling TSRMLS_CC); \ } /* }}} */ /* {{{ proto int SplFileInfo::getPerms() Get file permissions */ FileInfoFunction(getPerms, FS_PERMS) /* }}} */ /* {{{ proto int SplFileInfo::getInode() Get file inode */ FileInfoFunction(getInode, FS_INODE) /* }}} */ /* {{{ proto int SplFileInfo::getSize() Get file size */ FileInfoFunction(getSize, FS_SIZE) /* }}} */ /* {{{ proto int SplFileInfo::getOwner() Get file owner */ FileInfoFunction(getOwner, FS_OWNER) /* }}} */ /* {{{ proto int SplFileInfo::getGroup() Get file group */ FileInfoFunction(getGroup, FS_GROUP) /* }}} */ /* {{{ proto int SplFileInfo::getATime() Get last access time of file */ FileInfoFunction(getATime, FS_ATIME) /* }}} */ /* {{{ proto int SplFileInfo::getMTime() Get last modification time of file */ FileInfoFunction(getMTime, FS_MTIME) /* }}} */ /* {{{ proto int SplFileInfo::getCTime() Get inode modification time of file */ FileInfoFunction(getCTime, FS_CTIME) /* }}} */ /* {{{ proto string SplFileInfo::getType() Get file type */ FileInfoFunction(getType, FS_TYPE) /* }}} */ /* {{{ proto bool SplFileInfo::isWritable() Returns true if file can be written */ FileInfoFunction(isWritable, FS_IS_W) /* }}} */ /* {{{ proto bool SplFileInfo::isReadable() Returns true if file can be read */ FileInfoFunction(isReadable, FS_IS_R) /* }}} */ /* {{{ proto bool SplFileInfo::isExecutable() Returns true if file is executable */ FileInfoFunction(isExecutable, FS_IS_X) /* }}} */ /* {{{ proto bool SplFileInfo::isFile() Returns true if file is a regular file */ FileInfoFunction(isFile, FS_IS_FILE) /* }}} */ /* {{{ proto bool SplFileInfo::isDir() Returns true if file is directory */ FileInfoFunction(isDir, FS_IS_DIR) /* }}} */ /* {{{ proto bool SplFileInfo::isLink() Returns true if file is symbolic link */ FileInfoFunction(isLink, FS_IS_LINK) /* }}} */ /* {{{ proto string SplFileInfo::getLinkTarget() U Return the target of a symbolic link */ SPL_METHOD(SplFileInfo, getLinkTarget) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); int ret; char buff[MAXPATHLEN]; zend_error_handling error_handling; if (zend_parse_parameters_none() == FAILURE) { return; } zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC); #if defined(PHP_WIN32) || HAVE_SYMLINK if (intern->file_name == NULL) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Empty filename"); RETURN_FALSE; } else if (!IS_ABSOLUTE_PATH(intern->file_name, intern->file_name_len)) { char expanded_path[MAXPATHLEN]; if (!expand_filepath_with_mode(intern->file_name, expanded_path, NULL, 0, CWD_EXPAND TSRMLS_CC)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "No such file or directory"); RETURN_FALSE; } ret = php_sys_readlink(expanded_path, buff, MAXPATHLEN - 1); } else { ret = php_sys_readlink(intern->file_name, buff, MAXPATHLEN-1); } #else ret = -1; /* always fail if not implemented */ #endif if (ret == -1) { zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Unable to read link %s, error: %s", intern->file_name, strerror(errno)); RETVAL_FALSE; } else { /* Append NULL to the end of the string */ buff[ret] = '\0'; RETVAL_STRINGL(buff, ret, 1); } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ #if (!defined(__BEOS__) && !defined(NETWARE) && HAVE_REALPATH) || defined(ZTS) /* {{{ proto string SplFileInfo::getRealPath() Return the resolved path */ SPL_METHOD(SplFileInfo, getRealPath) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char buff[MAXPATHLEN]; char *filename; zend_error_handling error_handling; if (zend_parse_parameters_none() == FAILURE) { return; } zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC); if (intern->type == SPL_FS_DIR && !intern->file_name && intern->u.dir.entry.d_name[0]) { spl_filesystem_object_get_file_name(intern TSRMLS_CC); } if (intern->orig_path) { filename = intern->orig_path; } else { filename = intern->file_name; } if (filename && VCWD_REALPATH(filename, buff)) { #ifdef ZTS if (VCWD_ACCESS(buff, F_OK)) { RETVAL_FALSE; } else #endif RETVAL_STRING(buff, 1); } else { RETVAL_FALSE; } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ #endif /* {{{ proto SplFileObject SplFileInfo::openFile([string mode = 'r' [, bool use_include_path [, resource context]]]) Open the current file */ SPL_METHOD(SplFileInfo, openFile) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); spl_filesystem_object_create_type(ht, intern, SPL_FS_FILE, NULL, return_value TSRMLS_CC); } /* }}} */ /* {{{ proto void SplFileInfo::setFileClass([string class_name]) Class to use in openFile() */ SPL_METHOD(SplFileInfo, setFileClass) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zend_class_entry *ce = spl_ce_SplFileObject; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_UnexpectedValueException, &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|C", &ce) == SUCCESS) { intern->file_class = ce; } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ proto void SplFileInfo::setInfoClass([string class_name]) Class to use in getFileInfo(), getPathInfo() */ SPL_METHOD(SplFileInfo, setInfoClass) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zend_class_entry *ce = spl_ce_SplFileInfo; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_UnexpectedValueException, &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|C", &ce) == SUCCESS) { intern->info_class = ce; } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ proto SplFileInfo SplFileInfo::getFileInfo([string $class_name]) Get/copy file info */ SPL_METHOD(SplFileInfo, getFileInfo) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zend_class_entry *ce = intern->info_class; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_UnexpectedValueException, &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|C", &ce) == SUCCESS) { spl_filesystem_object_create_type(ht, intern, SPL_FS_INFO, ce, return_value TSRMLS_CC); } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ proto SplFileInfo SplFileInfo::getPathInfo([string $class_name]) Get/copy file info */ SPL_METHOD(SplFileInfo, getPathInfo) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zend_class_entry *ce = intern->info_class; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_UnexpectedValueException, &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|C", &ce) == SUCCESS) { int path_len; char *path = spl_filesystem_object_get_pathname(intern, &path_len TSRMLS_CC); if (path) { char *dpath = estrndup(path, path_len); path_len = php_dirname(dpath, path_len); spl_filesystem_object_create_info(intern, dpath, path_len, 1, ce, return_value TSRMLS_CC); efree(dpath); } } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ */ SPL_METHOD(SplFileInfo, _bad_state_ex) { zend_throw_exception_ex(spl_ce_LogicException, 0 TSRMLS_CC, "The parent constructor was not called: the object is in an " "invalid state "); } /* }}} */ /* {{{ proto void FilesystemIterator::__construct(string path [, int flags]) Cronstructs a new dir iterator from a path. */ SPL_METHOD(FilesystemIterator, __construct) { spl_filesystem_object_construct(INTERNAL_FUNCTION_PARAM_PASSTHRU, DIT_CTOR_FLAGS | SPL_FILE_DIR_SKIPDOTS); } /* }}} */ /* {{{ proto void FilesystemIterator::rewind() Rewind dir back to the start */ SPL_METHOD(FilesystemIterator, rewind) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); int skip_dots = SPL_HAS_FLAG(intern->flags, SPL_FILE_DIR_SKIPDOTS); if (zend_parse_parameters_none() == FAILURE) { return; } intern->u.dir.index = 0; if (intern->u.dir.dirp) { php_stream_rewinddir(intern->u.dir.dirp); } do { spl_filesystem_dir_read(intern TSRMLS_CC); } while (skip_dots && spl_filesystem_is_dot(intern->u.dir.entry.d_name)); } /* }}} */ /* {{{ proto int FilesystemIterator::getFlags() Get handling flags */ SPL_METHOD(FilesystemIterator, getFlags) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_LONG(intern->flags & (SPL_FILE_DIR_KEY_MODE_MASK | SPL_FILE_DIR_CURRENT_MODE_MASK | SPL_FILE_DIR_OTHERS_MASK)); } /* }}} */ /* {{{ proto void FilesystemIterator::setFlags(long $flags) Set handling flags */ SPL_METHOD(FilesystemIterator, setFlags) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); long flags; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &flags) == FAILURE) { return; } intern->flags &= ~(SPL_FILE_DIR_KEY_MODE_MASK|SPL_FILE_DIR_CURRENT_MODE_MASK|SPL_FILE_DIR_OTHERS_MASK); intern->flags |= ((SPL_FILE_DIR_KEY_MODE_MASK|SPL_FILE_DIR_CURRENT_MODE_MASK|SPL_FILE_DIR_OTHERS_MASK) & flags); } /* }}} */ /* {{{ proto bool RecursiveDirectoryIterator::hasChildren([bool $allow_links = false]) Returns whether current entry is a directory and not '.' or '..' */ SPL_METHOD(RecursiveDirectoryIterator, hasChildren) { zend_bool allow_links = 0; spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|b", &allow_links) == FAILURE) { return; } if (spl_filesystem_is_invalid_or_dot(intern->u.dir.entry.d_name)) { RETURN_FALSE; } else { spl_filesystem_object_get_file_name(intern TSRMLS_CC); if (!allow_links && !(intern->flags & SPL_FILE_DIR_FOLLOW_SYMLINKS)) { php_stat(intern->file_name, intern->file_name_len, FS_IS_LINK, return_value TSRMLS_CC); if (zend_is_true(return_value)) { RETURN_FALSE; } } php_stat(intern->file_name, intern->file_name_len, FS_IS_DIR, return_value TSRMLS_CC); } } /* }}} */ /* {{{ proto RecursiveDirectoryIterator DirectoryIterator::getChildren() Returns an iterator for the current entry if it is a directory */ SPL_METHOD(RecursiveDirectoryIterator, getChildren) { zval *zpath, *zflags; spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); spl_filesystem_object *subdir; char slash = SPL_HAS_FLAG(intern->flags, SPL_FILE_DIR_UNIXPATHS) ? '/' : DEFAULT_SLASH; if (zend_parse_parameters_none() == FAILURE) { return; } spl_filesystem_object_get_file_name(intern TSRMLS_CC); MAKE_STD_ZVAL(zflags); MAKE_STD_ZVAL(zpath); ZVAL_LONG(zflags, intern->flags); ZVAL_STRINGL(zpath, intern->file_name, intern->file_name_len, 1); spl_instantiate_arg_ex2(Z_OBJCE_P(getThis()), &return_value, 0, zpath, zflags TSRMLS_CC); zval_ptr_dtor(&zpath); zval_ptr_dtor(&zflags); subdir = (spl_filesystem_object*)zend_object_store_get_object(return_value TSRMLS_CC); if (subdir) { if (intern->u.dir.sub_path && intern->u.dir.sub_path[0]) { subdir->u.dir.sub_path_len = spprintf(&subdir->u.dir.sub_path, 0, "%s%c%s", intern->u.dir.sub_path, slash, intern->u.dir.entry.d_name); } else { subdir->u.dir.sub_path_len = strlen(intern->u.dir.entry.d_name); subdir->u.dir.sub_path = estrndup(intern->u.dir.entry.d_name, subdir->u.dir.sub_path_len); } subdir->info_class = intern->info_class; subdir->file_class = intern->file_class; subdir->oth = intern->oth; } } /* }}} */ /* {{{ proto void RecursiveDirectoryIterator::getSubPath() Get sub path */ SPL_METHOD(RecursiveDirectoryIterator, getSubPath) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (intern->u.dir.sub_path) { RETURN_STRINGL(intern->u.dir.sub_path, intern->u.dir.sub_path_len, 1); } else { RETURN_STRINGL("", 0, 1); } } /* }}} */ /* {{{ proto void RecursiveDirectoryIterator::getSubPathname() Get sub path and file name */ SPL_METHOD(RecursiveDirectoryIterator, getSubPathname) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *sub_name; int len; char slash = SPL_HAS_FLAG(intern->flags, SPL_FILE_DIR_UNIXPATHS) ? '/' : DEFAULT_SLASH; if (zend_parse_parameters_none() == FAILURE) { return; } if (intern->u.dir.sub_path) { len = spprintf(&sub_name, 0, "%s%c%s", intern->u.dir.sub_path, slash, intern->u.dir.entry.d_name); RETURN_STRINGL(sub_name, len, 0); } else { RETURN_STRING(intern->u.dir.entry.d_name, 1); } } /* }}} */ /* {{{ proto int RecursiveDirectoryIterator::__construct(string path [, int flags]) Cronstructs a new dir iterator from a path. */ SPL_METHOD(RecursiveDirectoryIterator, __construct) { spl_filesystem_object_construct(INTERNAL_FUNCTION_PARAM_PASSTHRU, DIT_CTOR_FLAGS); } /* }}} */ #ifdef HAVE_GLOB /* {{{ proto int GlobIterator::__construct(string path [, int flags]) Cronstructs a new dir iterator from a glob expression (no glob:// needed). */ SPL_METHOD(GlobIterator, __construct) { spl_filesystem_object_construct(INTERNAL_FUNCTION_PARAM_PASSTHRU, DIT_CTOR_FLAGS|DIT_CTOR_GLOB); } /* }}} */ /* {{{ proto int GlobIterator::cont() Return the number of directories and files found by globbing */ SPL_METHOD(GlobIterator, count) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (php_stream_is(intern->u.dir.dirp ,&php_glob_stream_ops)) { RETURN_LONG(php_glob_stream_get_count(intern->u.dir.dirp, NULL)); } else { /* should not happen */ php_error_docref(NULL TSRMLS_CC, E_ERROR, "GlobIterator lost glob state"); } } /* }}} */ #endif /* HAVE_GLOB */ /* {{{ forward declarations to the iterator handlers */ static void spl_filesystem_dir_it_dtor(zend_object_iterator *iter TSRMLS_DC); static int spl_filesystem_dir_it_valid(zend_object_iterator *iter TSRMLS_DC); static void spl_filesystem_dir_it_current_data(zend_object_iterator *iter, zval ***data TSRMLS_DC); static void spl_filesystem_dir_it_current_key(zend_object_iterator *iter, zval *key TSRMLS_DC); static void spl_filesystem_dir_it_move_forward(zend_object_iterator *iter TSRMLS_DC); static void spl_filesystem_dir_it_rewind(zend_object_iterator *iter TSRMLS_DC); /* iterator handler table */ zend_object_iterator_funcs spl_filesystem_dir_it_funcs = { spl_filesystem_dir_it_dtor, spl_filesystem_dir_it_valid, spl_filesystem_dir_it_current_data, spl_filesystem_dir_it_current_key, spl_filesystem_dir_it_move_forward, spl_filesystem_dir_it_rewind }; /* }}} */ /* {{{ spl_ce_dir_get_iterator */ zend_object_iterator *spl_filesystem_dir_get_iterator(zend_class_entry *ce, zval *object, int by_ref TSRMLS_DC) { spl_filesystem_iterator *iterator; spl_filesystem_object *dir_object; if (by_ref) { zend_error(E_ERROR, "An iterator cannot be used with foreach by reference"); } dir_object = (spl_filesystem_object*)zend_object_store_get_object(object TSRMLS_CC); iterator = spl_filesystem_object_to_iterator(dir_object); /* initialize iterator if it wasn't gotten before */ if (iterator->intern.data == NULL) { iterator->intern.data = object; iterator->intern.funcs = &spl_filesystem_dir_it_funcs; /* ->current must be initialized; rewind doesn't set it and valid * doesn't check whether it's set */ iterator->current = object; } zval_add_ref(&object); return (zend_object_iterator*)iterator; } /* }}} */ /* {{{ spl_filesystem_dir_it_dtor */ static void spl_filesystem_dir_it_dtor(zend_object_iterator *iter TSRMLS_DC) { spl_filesystem_iterator *iterator = (spl_filesystem_iterator *)iter; if (iterator->intern.data) { zval *object = iterator->intern.data; zval_ptr_dtor(&object); } /* Otherwise we were called from the owning object free storage handler as * it sets * iterator->intern.data to NULL. * We don't even need to destroy iterator->current as we didn't add a * reference to it in move_forward or get_iterator */ } /* }}} */ /* {{{ spl_filesystem_dir_it_valid */ static int spl_filesystem_dir_it_valid(zend_object_iterator *iter TSRMLS_DC) { spl_filesystem_object *object = spl_filesystem_iterator_to_object((spl_filesystem_iterator *)iter); return object->u.dir.entry.d_name[0] != '\0' ? SUCCESS : FAILURE; } /* }}} */ /* {{{ spl_filesystem_dir_it_current_data */ static void spl_filesystem_dir_it_current_data(zend_object_iterator *iter, zval ***data TSRMLS_DC) { spl_filesystem_iterator *iterator = (spl_filesystem_iterator *)iter; *data = &iterator->current; } /* }}} */ /* {{{ spl_filesystem_dir_it_current_key */ static void spl_filesystem_dir_it_current_key(zend_object_iterator *iter, zval *key TSRMLS_DC) { spl_filesystem_object *object = spl_filesystem_iterator_to_object((spl_filesystem_iterator *)iter); ZVAL_LONG(key, object->u.dir.index); } /* }}} */ /* {{{ spl_filesystem_dir_it_move_forward */ static void spl_filesystem_dir_it_move_forward(zend_object_iterator *iter TSRMLS_DC) { spl_filesystem_object *object = spl_filesystem_iterator_to_object((spl_filesystem_iterator *)iter); object->u.dir.index++; spl_filesystem_dir_read(object TSRMLS_CC); if (object->file_name) { efree(object->file_name); object->file_name = NULL; } } /* }}} */ /* {{{ spl_filesystem_dir_it_rewind */ static void spl_filesystem_dir_it_rewind(zend_object_iterator *iter TSRMLS_DC) { spl_filesystem_object *object = spl_filesystem_iterator_to_object((spl_filesystem_iterator *)iter); object->u.dir.index = 0; if (object->u.dir.dirp) { php_stream_rewinddir(object->u.dir.dirp); } spl_filesystem_dir_read(object TSRMLS_CC); } /* }}} */ /* {{{ spl_filesystem_tree_it_dtor */ static void spl_filesystem_tree_it_dtor(zend_object_iterator *iter TSRMLS_DC) { spl_filesystem_iterator *iterator = (spl_filesystem_iterator *)iter; if (iterator->intern.data) { zval *object = iterator->intern.data; zval_ptr_dtor(&object); } else { if (iterator->current) { zval_ptr_dtor(&iterator->current); } } } /* }}} */ /* {{{ spl_filesystem_tree_it_current_data */ static void spl_filesystem_tree_it_current_data(zend_object_iterator *iter, zval ***data TSRMLS_DC) { spl_filesystem_iterator *iterator = (spl_filesystem_iterator *)iter; spl_filesystem_object *object = spl_filesystem_iterator_to_object(iterator); if (SPL_FILE_DIR_CURRENT(object, SPL_FILE_DIR_CURRENT_AS_PATHNAME)) { if (!iterator->current) { ALLOC_INIT_ZVAL(iterator->current); spl_filesystem_object_get_file_name(object TSRMLS_CC); ZVAL_STRINGL(iterator->current, object->file_name, object->file_name_len, 1); } *data = &iterator->current; } else if (SPL_FILE_DIR_CURRENT(object, SPL_FILE_DIR_CURRENT_AS_FILEINFO)) { if (!iterator->current) { ALLOC_INIT_ZVAL(iterator->current); spl_filesystem_object_get_file_name(object TSRMLS_CC); spl_filesystem_object_create_type(0, object, SPL_FS_INFO, NULL, iterator->current TSRMLS_CC); } *data = &iterator->current; } else { *data = (zval**)&iterator->intern.data; } } /* }}} */ /* {{{ spl_filesystem_tree_it_current_key */ static void spl_filesystem_tree_it_current_key(zend_object_iterator *iter, zval *key TSRMLS_DC) { spl_filesystem_object *object = spl_filesystem_iterator_to_object((spl_filesystem_iterator *)iter); if (SPL_FILE_DIR_KEY(object, SPL_FILE_DIR_KEY_AS_FILENAME)) { ZVAL_STRING(key, object->u.dir.entry.d_name, 1); } else { spl_filesystem_object_get_file_name(object TSRMLS_CC); ZVAL_STRINGL(key, object->file_name, object->file_name_len, 1); } } /* }}} */ /* {{{ spl_filesystem_tree_it_move_forward */ static void spl_filesystem_tree_it_move_forward(zend_object_iterator *iter TSRMLS_DC) { spl_filesystem_iterator *iterator = (spl_filesystem_iterator *)iter; spl_filesystem_object *object = spl_filesystem_iterator_to_object(iterator); object->u.dir.index++; do { spl_filesystem_dir_read(object TSRMLS_CC); } while (spl_filesystem_is_dot(object->u.dir.entry.d_name)); if (object->file_name) { efree(object->file_name); object->file_name = NULL; } if (iterator->current) { zval_ptr_dtor(&iterator->current); iterator->current = NULL; } } /* }}} */ /* {{{ spl_filesystem_tree_it_rewind */ static void spl_filesystem_tree_it_rewind(zend_object_iterator *iter TSRMLS_DC) { spl_filesystem_iterator *iterator = (spl_filesystem_iterator *)iter; spl_filesystem_object *object = spl_filesystem_iterator_to_object(iterator); object->u.dir.index = 0; if (object->u.dir.dirp) { php_stream_rewinddir(object->u.dir.dirp); } do { spl_filesystem_dir_read(object TSRMLS_CC); } while (spl_filesystem_is_dot(object->u.dir.entry.d_name)); if (iterator->current) { zval_ptr_dtor(&iterator->current); iterator->current = NULL; } } /* }}} */ /* {{{ iterator handler table */ zend_object_iterator_funcs spl_filesystem_tree_it_funcs = { spl_filesystem_tree_it_dtor, spl_filesystem_dir_it_valid, spl_filesystem_tree_it_current_data, spl_filesystem_tree_it_current_key, spl_filesystem_tree_it_move_forward, spl_filesystem_tree_it_rewind }; /* }}} */ /* {{{ spl_ce_dir_get_iterator */ zend_object_iterator *spl_filesystem_tree_get_iterator(zend_class_entry *ce, zval *object, int by_ref TSRMLS_DC) { spl_filesystem_iterator *iterator; spl_filesystem_object *dir_object; if (by_ref) { zend_error(E_ERROR, "An iterator cannot be used with foreach by reference"); } dir_object = (spl_filesystem_object*)zend_object_store_get_object(object TSRMLS_CC); iterator = spl_filesystem_object_to_iterator(dir_object); /* initialize iterator if wasn't gotten before */ if (iterator->intern.data == NULL) { iterator->intern.data = object; iterator->intern.funcs = &spl_filesystem_tree_it_funcs; } zval_add_ref(&object); return (zend_object_iterator*)iterator; } /* }}} */ /* {{{ spl_filesystem_object_cast */ static int spl_filesystem_object_cast(zval *readobj, zval *writeobj, int type TSRMLS_DC) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(readobj TSRMLS_CC); if (type == IS_STRING) { if (Z_OBJCE_P(readobj)->__tostring) { return std_object_handlers.cast_object(readobj, writeobj, type TSRMLS_CC); } switch (intern->type) { case SPL_FS_INFO: case SPL_FS_FILE: if (readobj == writeobj) { zval retval; zval *retval_ptr = &retval; ZVAL_STRINGL(retval_ptr, intern->file_name, intern->file_name_len, 1); zval_dtor(readobj); ZVAL_ZVAL(writeobj, retval_ptr, 0, 0); } else { ZVAL_STRINGL(writeobj, intern->file_name, intern->file_name_len, 1); } return SUCCESS; case SPL_FS_DIR: if (readobj == writeobj) { zval retval; zval *retval_ptr = &retval; ZVAL_STRING(retval_ptr, intern->u.dir.entry.d_name, 1); zval_dtor(readobj); ZVAL_ZVAL(writeobj, retval_ptr, 0, 0); } else { ZVAL_STRING(writeobj, intern->u.dir.entry.d_name, 1); } return SUCCESS; } } else if (type == IS_BOOL) { ZVAL_BOOL(writeobj, 1); return SUCCESS; } if (readobj == writeobj) { zval_dtor(readobj); } ZVAL_NULL(writeobj); return FAILURE; } /* }}} */ /* {{{ declare method parameters */ /* supply a name and default to call by parameter */ ZEND_BEGIN_ARG_INFO(arginfo_info___construct, 0) ZEND_ARG_INFO(0, file_name) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_info_openFile, 0, 0, 0) ZEND_ARG_INFO(0, open_mode) ZEND_ARG_INFO(0, use_include_path) ZEND_ARG_INFO(0, context) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_info_optinalFileClass, 0, 0, 0) ZEND_ARG_INFO(0, class_name) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_optinalSuffix, 0, 0, 0) ZEND_ARG_INFO(0, suffix) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_splfileinfo_void, 0) ZEND_END_ARG_INFO() /* the method table */ /* each method can have its own parameters and visibility */ static const zend_function_entry spl_SplFileInfo_functions[] = { SPL_ME(SplFileInfo, __construct, arginfo_info___construct, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getPath, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getFilename, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getExtension, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getBasename, arginfo_optinalSuffix, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getPathname, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getPerms, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getInode, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getSize, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getOwner, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getGroup, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getATime, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getMTime, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getCTime, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getType, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, isWritable, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, isReadable, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, isExecutable, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, isFile, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, isDir, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, isLink, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getLinkTarget, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) #if (!defined(__BEOS__) && !defined(NETWARE) && HAVE_REALPATH) || defined(ZTS) SPL_ME(SplFileInfo, getRealPath, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) #endif SPL_ME(SplFileInfo, getFileInfo, arginfo_info_optinalFileClass, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getPathInfo, arginfo_info_optinalFileClass, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, openFile, arginfo_info_openFile, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, setFileClass, arginfo_info_optinalFileClass, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, setInfoClass, arginfo_info_optinalFileClass, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, _bad_state_ex, NULL, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL) SPL_MA(SplFileInfo, __toString, SplFileInfo, getPathname, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) PHP_FE_END }; ZEND_BEGIN_ARG_INFO(arginfo_dir___construct, 0) ZEND_ARG_INFO(0, path) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_dir_it_seek, 0) ZEND_ARG_INFO(0, position) ZEND_END_ARG_INFO(); /* the method table */ /* each method can have its own parameters and visibility */ static const zend_function_entry spl_DirectoryIterator_functions[] = { SPL_ME(DirectoryIterator, __construct, arginfo_dir___construct, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, getFilename, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, getExtension, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, getBasename, arginfo_optinalSuffix, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, isDot, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, rewind, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, valid, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, key, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, current, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, next, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, seek, arginfo_dir_it_seek, ZEND_ACC_PUBLIC) SPL_MA(DirectoryIterator, __toString, DirectoryIterator, getFilename, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) PHP_FE_END }; ZEND_BEGIN_ARG_INFO_EX(arginfo_r_dir___construct, 0, 0, 1) ZEND_ARG_INFO(0, path) ZEND_ARG_INFO(0, flags) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_r_dir_hasChildren, 0, 0, 0) ZEND_ARG_INFO(0, allow_links) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_r_dir_setFlags, 0, 0, 0) ZEND_ARG_INFO(0, flags) ZEND_END_ARG_INFO() static const zend_function_entry spl_FilesystemIterator_functions[] = { SPL_ME(FilesystemIterator, __construct, arginfo_r_dir___construct, ZEND_ACC_PUBLIC) SPL_ME(FilesystemIterator, rewind, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, next, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(FilesystemIterator, key, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(FilesystemIterator, current, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(FilesystemIterator, getFlags, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(FilesystemIterator, setFlags, arginfo_r_dir_setFlags, ZEND_ACC_PUBLIC) PHP_FE_END }; static const zend_function_entry spl_RecursiveDirectoryIterator_functions[] = { SPL_ME(RecursiveDirectoryIterator, __construct, arginfo_r_dir___construct, ZEND_ACC_PUBLIC) SPL_ME(RecursiveDirectoryIterator, hasChildren, arginfo_r_dir_hasChildren, ZEND_ACC_PUBLIC) SPL_ME(RecursiveDirectoryIterator, getChildren, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(RecursiveDirectoryIterator, getSubPath, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(RecursiveDirectoryIterator, getSubPathname,arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) PHP_FE_END }; #ifdef HAVE_GLOB static const zend_function_entry spl_GlobIterator_functions[] = { SPL_ME(GlobIterator, __construct, arginfo_r_dir___construct, ZEND_ACC_PUBLIC) SPL_ME(GlobIterator, count, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) PHP_FE_END }; #endif /* }}} */ static int spl_filesystem_file_read(spl_filesystem_object *intern, int silent TSRMLS_DC) /* {{{ */ { char *buf; size_t line_len = 0; long line_add = (intern->u.file.current_line || intern->u.file.current_zval) ? 1 : 0; spl_filesystem_file_free_line(intern TSRMLS_CC); if (php_stream_eof(intern->u.file.stream)) { if (!silent) { zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Cannot read from file %s", intern->file_name); } return FAILURE; } if (intern->u.file.max_line_len > 0) { buf = safe_emalloc((intern->u.file.max_line_len + 1), sizeof(char), 0); if (php_stream_get_line(intern->u.file.stream, buf, intern->u.file.max_line_len + 1, &line_len) == NULL) { efree(buf); buf = NULL; } else { buf[line_len] = '\0'; } } else { buf = php_stream_get_line(intern->u.file.stream, NULL, 0, &line_len); } if (!buf) { intern->u.file.current_line = estrdup(""); intern->u.file.current_line_len = 0; } else { if (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_DROP_NEW_LINE)) { line_len = strcspn(buf, "\r\n"); buf[line_len] = '\0'; } intern->u.file.current_line = buf; intern->u.file.current_line_len = line_len; } intern->u.file.current_line_num += line_add; return SUCCESS; } /* }}} */ static int spl_filesystem_file_call(spl_filesystem_object *intern, zend_function *func_ptr, int pass_num_args, zval *return_value, zval *arg2 TSRMLS_DC) /* {{{ */ { zend_fcall_info fci; zend_fcall_info_cache fcic; zval z_fname; zval * zresource_ptr = &intern->u.file.zresource, *retval; int result; int num_args = pass_num_args + (arg2 ? 2 : 1); zval ***params = (zval***)safe_emalloc(num_args, sizeof(zval**), 0); params[0] = &zresource_ptr; if (arg2) { params[1] = &arg2; } zend_get_parameters_array_ex(pass_num_args, params+(arg2 ? 2 : 1)); ZVAL_STRING(&z_fname, func_ptr->common.function_name, 0); fci.size = sizeof(fci); fci.function_table = EG(function_table); fci.object_ptr = NULL; fci.function_name = &z_fname; fci.retval_ptr_ptr = &retval; fci.param_count = num_args; fci.params = params; fci.no_separation = 1; fci.symbol_table = NULL; fcic.initialized = 1; fcic.function_handler = func_ptr; fcic.calling_scope = NULL; fcic.called_scope = NULL; fcic.object_ptr = NULL; result = zend_call_function(&fci, &fcic TSRMLS_CC); if (result == FAILURE) { RETVAL_FALSE; } else { ZVAL_ZVAL(return_value, retval, 1, 1); } efree(params); return result; } /* }}} */ #define FileFunctionCall(func_name, pass_num_args, arg2) /* {{{ */ \ { \ zend_function *func_ptr; \ int ret; \ ret = zend_hash_find(EG(function_table), #func_name, sizeof(#func_name), (void **) &func_ptr); \ if (ret != SUCCESS) { \ zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Internal error, function '%s' not found. Please report", #func_name); \ return; \ } \ spl_filesystem_file_call(intern, func_ptr, pass_num_args, return_value, arg2 TSRMLS_CC); \ } /* }}} */ static int spl_filesystem_file_read_csv(spl_filesystem_object *intern, char delimiter, char enclosure, char escape, zval *return_value TSRMLS_DC) /* {{{ */ { int ret = SUCCESS; do { ret = spl_filesystem_file_read(intern, 1 TSRMLS_CC); } while (ret == SUCCESS && !intern->u.file.current_line_len && SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_SKIP_EMPTY)); if (ret == SUCCESS) { size_t buf_len = intern->u.file.current_line_len; char *buf = estrndup(intern->u.file.current_line, buf_len); if (intern->u.file.current_zval) { zval_ptr_dtor(&intern->u.file.current_zval); } ALLOC_INIT_ZVAL(intern->u.file.current_zval); php_fgetcsv(intern->u.file.stream, delimiter, enclosure, escape, buf_len, buf, intern->u.file.current_zval TSRMLS_CC); if (return_value) { if (Z_TYPE_P(return_value) != IS_NULL) { zval_dtor(return_value); ZVAL_NULL(return_value); } ZVAL_ZVAL(return_value, intern->u.file.current_zval, 1, 0); } } return ret; } /* }}} */ static int spl_filesystem_file_read_line_ex(zval * this_ptr, spl_filesystem_object *intern, int silent TSRMLS_DC) /* {{{ */ { zval *retval = NULL; /* 1) use fgetcsv? 2) overloaded call the function, 3) do it directly */ if (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_READ_CSV) || intern->u.file.func_getCurr->common.scope != spl_ce_SplFileObject) { if (php_stream_eof(intern->u.file.stream)) { if (!silent) { zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Cannot read from file %s", intern->file_name); } return FAILURE; } if (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_READ_CSV)) { return spl_filesystem_file_read_csv(intern, intern->u.file.delimiter, intern->u.file.enclosure, intern->u.file.escape, NULL TSRMLS_CC); } else { zend_call_method_with_0_params(&this_ptr, Z_OBJCE_P(getThis()), &intern->u.file.func_getCurr, "getCurrentLine", &retval); } if (retval) { if (intern->u.file.current_line || intern->u.file.current_zval) { intern->u.file.current_line_num++; } spl_filesystem_file_free_line(intern TSRMLS_CC); if (Z_TYPE_P(retval) == IS_STRING) { intern->u.file.current_line = estrndup(Z_STRVAL_P(retval), Z_STRLEN_P(retval)); intern->u.file.current_line_len = Z_STRLEN_P(retval); } else { MAKE_STD_ZVAL(intern->u.file.current_zval); ZVAL_ZVAL(intern->u.file.current_zval, retval, 1, 0); } zval_ptr_dtor(&retval); return SUCCESS; } else { return FAILURE; } } else { return spl_filesystem_file_read(intern, silent TSRMLS_CC); } } /* }}} */ static int spl_filesystem_file_is_empty_line(spl_filesystem_object *intern TSRMLS_DC) /* {{{ */ { if (intern->u.file.current_line) { return intern->u.file.current_line_len == 0; } else if (intern->u.file.current_zval) { switch(Z_TYPE_P(intern->u.file.current_zval)) { case IS_STRING: return Z_STRLEN_P(intern->u.file.current_zval) == 0; case IS_ARRAY: if (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_READ_CSV) && zend_hash_num_elements(Z_ARRVAL_P(intern->u.file.current_zval)) == 1) { zval ** first = Z_ARRVAL_P(intern->u.file.current_zval)->pListHead->pData; return Z_TYPE_PP(first) == IS_STRING && Z_STRLEN_PP(first) == 0; } return zend_hash_num_elements(Z_ARRVAL_P(intern->u.file.current_zval)) == 0; case IS_NULL: return 1; default: return 0; } } else { return 1; } } /* }}} */ static int spl_filesystem_file_read_line(zval * this_ptr, spl_filesystem_object *intern, int silent TSRMLS_DC) /* {{{ */ { int ret = spl_filesystem_file_read_line_ex(this_ptr, intern, silent TSRMLS_CC); while (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_SKIP_EMPTY) && ret == SUCCESS && spl_filesystem_file_is_empty_line(intern TSRMLS_CC)) { spl_filesystem_file_free_line(intern TSRMLS_CC); ret = spl_filesystem_file_read_line_ex(this_ptr, intern, silent TSRMLS_CC); } return ret; } /* }}} */ static void spl_filesystem_file_rewind(zval * this_ptr, spl_filesystem_object *intern TSRMLS_DC) /* {{{ */ { if (-1 == php_stream_rewind(intern->u.file.stream)) { zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Cannot rewind file %s", intern->file_name); } else { spl_filesystem_file_free_line(intern TSRMLS_CC); intern->u.file.current_line_num = 0; } if (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_READ_AHEAD)) { spl_filesystem_file_read_line(this_ptr, intern, 1 TSRMLS_CC); } } /* }}} */ /* {{{ proto void SplFileObject::__construct(string filename [, string mode = 'r' [, bool use_include_path [, resource context]]]]) Construct a new file object */ SPL_METHOD(SplFileObject, __construct) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zend_bool use_include_path = 0; char *p1, *p2; char *tmp_path; int tmp_path_len; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC); intern->u.file.open_mode = NULL; intern->u.file.open_mode_len = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "p|sbr!", &intern->file_name, &intern->file_name_len, &intern->u.file.open_mode, &intern->u.file.open_mode_len, &use_include_path, &intern->u.file.zcontext) == FAILURE) { intern->u.file.open_mode = NULL; intern->file_name = NULL; zend_restore_error_handling(&error_handling TSRMLS_CC); return; } if (intern->u.file.open_mode == NULL) { intern->u.file.open_mode = "r"; intern->u.file.open_mode_len = 1; } if (spl_filesystem_file_open(intern, use_include_path, 0 TSRMLS_CC) == SUCCESS) { tmp_path_len = strlen(intern->u.file.stream->orig_path); if (tmp_path_len > 1 && IS_SLASH_AT(intern->u.file.stream->orig_path, tmp_path_len-1)) { tmp_path_len--; } tmp_path = estrndup(intern->u.file.stream->orig_path, tmp_path_len); p1 = strrchr(tmp_path, '/'); #if defined(PHP_WIN32) || defined(NETWARE) p2 = strrchr(tmp_path, '\\'); #else p2 = 0; #endif if (p1 || p2) { intern->_path_len = (p1 > p2 ? p1 : p2) - tmp_path; } else { intern->_path_len = 0; } efree(tmp_path); intern->_path = estrndup(intern->u.file.stream->orig_path, intern->_path_len); } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ proto void SplTempFileObject::__construct([int max_memory]) Construct a new temp file object */ SPL_METHOD(SplTempFileObject, __construct) { long max_memory = PHP_STREAM_MAX_MEM; char tmp_fname[48]; spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|l", &max_memory) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } if (max_memory < 0) { intern->file_name = "php://memory"; intern->file_name_len = 12; } else if (ZEND_NUM_ARGS()) { intern->file_name_len = slprintf(tmp_fname, sizeof(tmp_fname), "php://temp/maxmemory:%ld", max_memory); intern->file_name = tmp_fname; } else { intern->file_name = "php://temp"; intern->file_name_len = 10; } intern->u.file.open_mode = "wb"; intern->u.file.open_mode_len = 1; intern->u.file.zcontext = NULL; if (spl_filesystem_file_open(intern, 0, 0 TSRMLS_CC) == SUCCESS) { intern->_path_len = 0; intern->_path = estrndup("", 0); } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ proto void SplFileObject::rewind() Rewind the file and read the first line */ SPL_METHOD(SplFileObject, rewind) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } spl_filesystem_file_rewind(getThis(), intern TSRMLS_CC); } /* }}} */ /* {{{ proto void SplFileObject::eof() Return whether end of file is reached */ SPL_METHOD(SplFileObject, eof) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_BOOL(php_stream_eof(intern->u.file.stream)); } /* }}} */ /* {{{ proto void SplFileObject::valid() Return !eof() */ SPL_METHOD(SplFileObject, valid) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_READ_AHEAD)) { RETURN_BOOL(intern->u.file.current_line || intern->u.file.current_zval); } else { RETVAL_BOOL(!php_stream_eof(intern->u.file.stream)); } } /* }}} */ /* {{{ proto string SplFileObject::fgets() Rturn next line from file */ SPL_METHOD(SplFileObject, fgets) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_filesystem_file_read(intern, 0 TSRMLS_CC) == FAILURE) { RETURN_FALSE; } RETURN_STRINGL(intern->u.file.current_line, intern->u.file.current_line_len, 1); } /* }}} */ /* {{{ proto string SplFileObject::current() Return current line from file */ SPL_METHOD(SplFileObject, current) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (!intern->u.file.current_line && !intern->u.file.current_zval) { spl_filesystem_file_read_line(getThis(), intern, 1 TSRMLS_CC); } if (intern->u.file.current_line && (!SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_READ_CSV) || !intern->u.file.current_zval)) { RETURN_STRINGL(intern->u.file.current_line, intern->u.file.current_line_len, 1); } else if (intern->u.file.current_zval) { RETURN_ZVAL(intern->u.file.current_zval, 1, 0); } RETURN_FALSE; } /* }}} */ /* {{{ proto int SplFileObject::key() Return line number */ SPL_METHOD(SplFileObject, key) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } /* Do not read the next line to support correct counting with fgetc() if (!intern->current_line) { spl_filesystem_file_read_line(getThis(), intern, 1 TSRMLS_CC); } */ RETURN_LONG(intern->u.file.current_line_num); } /* }}} */ /* {{{ proto void SplFileObject::next() Read next line */ SPL_METHOD(SplFileObject, next) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } spl_filesystem_file_free_line(intern TSRMLS_CC); if (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_READ_AHEAD)) { spl_filesystem_file_read_line(getThis(), intern, 1 TSRMLS_CC); } intern->u.file.current_line_num++; } /* }}} */ /* {{{ proto void SplFileObject::setFlags(int flags) Set file handling flags */ SPL_METHOD(SplFileObject, setFlags) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &intern->flags) == FAILURE) { return; } } /* }}} */ /* {{{ proto int SplFileObject::getFlags() Get file handling flags */ SPL_METHOD(SplFileObject, getFlags) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_LONG(intern->flags & SPL_FILE_OBJECT_MASK); } /* }}} */ /* {{{ proto void SplFileObject::setMaxLineLen(int max_len) Set maximum line length */ SPL_METHOD(SplFileObject, setMaxLineLen) { long max_len; spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &max_len) == FAILURE) { return; } if (max_len < 0) { zend_throw_exception_ex(spl_ce_DomainException, 0 TSRMLS_CC, "Maximum line length must be greater than or equal zero"); return; } intern->u.file.max_line_len = max_len; } /* }}} */ /* {{{ proto int SplFileObject::getMaxLineLen() Get maximum line length */ SPL_METHOD(SplFileObject, getMaxLineLen) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_LONG((long)intern->u.file.max_line_len); } /* }}} */ /* {{{ proto bool SplFileObject::hasChildren() Return false */ SPL_METHOD(SplFileObject, hasChildren) { if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_FALSE; } /* }}} */ /* {{{ proto bool SplFileObject::getChildren() Read NULL */ SPL_METHOD(SplFileObject, getChildren) { if (zend_parse_parameters_none() == FAILURE) { return; } /* return NULL */ } /* }}} */ /* {{{ FileFunction */ #define FileFunction(func_name) \ SPL_METHOD(SplFileObject, func_name) \ { \ spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); \ FileFunctionCall(func_name, ZEND_NUM_ARGS(), NULL); \ } /* }}} */ /* {{{ proto array SplFileObject::fgetcsv([string delimiter [, string enclosure [, escape = '\\']]]) Return current line as csv */ SPL_METHOD(SplFileObject, fgetcsv) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char delimiter = intern->u.file.delimiter, enclosure = intern->u.file.enclosure, escape = intern->u.file.escape; char *delim = NULL, *enclo = NULL, *esc = NULL; int d_len = 0, e_len = 0, esc_len = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|sss", &delim, &d_len, &enclo, &e_len, &esc, &esc_len) == SUCCESS) { switch(ZEND_NUM_ARGS()) { case 3: if (esc_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "escape must be a character"); RETURN_FALSE; } escape = esc[0]; /* no break */ case 2: if (e_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "enclosure must be a character"); RETURN_FALSE; } enclosure = enclo[0]; /* no break */ case 1: if (d_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "delimiter must be a character"); RETURN_FALSE; } delimiter = delim[0]; /* no break */ case 0: break; } spl_filesystem_file_read_csv(intern, delimiter, enclosure, escape, return_value TSRMLS_CC); } } /* }}} */ /* {{{ proto int SplFileObject::fputcsv(array fields, [string delimiter [, string enclosure [, string escape]]]) Output a field array as a CSV line */ SPL_METHOD(SplFileObject, fputcsv) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char delimiter = intern->u.file.delimiter, enclosure = intern->u.file.enclosure, escape = intern->u.file.escape; char *delim = NULL, *enclo = NULL, *esc = NULL; int d_len = 0, e_len = 0, esc_len = 0, ret; zval *fields = NULL; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "a|sss", &fields, &delim, &d_len, &enclo, &e_len, &esc, &esc_len) == SUCCESS) { switch(ZEND_NUM_ARGS()) { case 4: if (esc_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "escape must be a character"); RETURN_FALSE; } escape = esc[0]; /* no break */ case 3: if (e_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "enclosure must be a character"); RETURN_FALSE; } enclosure = enclo[0]; /* no break */ case 2: if (d_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "delimiter must be a character"); RETURN_FALSE; } delimiter = delim[0]; /* no break */ case 1: case 0: break; } ret = php_fputcsv(intern->u.file.stream, fields, delimiter, enclosure, escape TSRMLS_CC); RETURN_LONG(ret); } } /* }}} */ /* {{{ proto void SplFileObject::setCsvControl([string delimiter = ',' [, string enclosure = '"' [, string escape = '\\']]]) Set the delimiter and enclosure character used in fgetcsv */ SPL_METHOD(SplFileObject, setCsvControl) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char delimiter = ',', enclosure = '"', escape='\\'; char *delim = NULL, *enclo = NULL, *esc = NULL; int d_len = 0, e_len = 0, esc_len = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|sss", &delim, &d_len, &enclo, &e_len, &esc, &esc_len) == SUCCESS) { switch(ZEND_NUM_ARGS()) { case 3: if (esc_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "escape must be a character"); RETURN_FALSE; } escape = esc[0]; /* no break */ case 2: if (e_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "enclosure must be a character"); RETURN_FALSE; } enclosure = enclo[0]; /* no break */ case 1: if (d_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "delimiter must be a character"); RETURN_FALSE; } delimiter = delim[0]; /* no break */ case 0: break; } intern->u.file.delimiter = delimiter; intern->u.file.enclosure = enclosure; intern->u.file.escape = escape; } } /* }}} */ /* {{{ proto array SplFileObject::getCsvControl() Get the delimiter and enclosure character used in fgetcsv */ SPL_METHOD(SplFileObject, getCsvControl) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char delimiter[2], enclosure[2]; array_init(return_value); delimiter[0] = intern->u.file.delimiter; delimiter[1] = '\0'; enclosure[0] = intern->u.file.enclosure; enclosure[1] = '\0'; add_next_index_string(return_value, delimiter, 1); add_next_index_string(return_value, enclosure, 1); } /* }}} */ /* {{{ proto bool SplFileObject::flock(int operation [, int &wouldblock]) Portable file locking */ FileFunction(flock) /* }}} */ /* {{{ proto bool SplFileObject::fflush() Flush the file */ SPL_METHOD(SplFileObject, fflush) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); RETURN_BOOL(!php_stream_flush(intern->u.file.stream)); } /* }}} */ /* {{{ proto int SplFileObject::ftell() Return current file position */ SPL_METHOD(SplFileObject, ftell) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); long ret = php_stream_tell(intern->u.file.stream); if (ret == -1) { RETURN_FALSE; } else { RETURN_LONG(ret); } } /* }}} */ /* {{{ proto int SplFileObject::fseek(int pos [, int whence = SEEK_SET]) Return current file position */ SPL_METHOD(SplFileObject, fseek) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); long pos, whence = SEEK_SET; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l|l", &pos, &whence) == FAILURE) { return; } spl_filesystem_file_free_line(intern TSRMLS_CC); RETURN_LONG(php_stream_seek(intern->u.file.stream, pos, whence)); } /* }}} */ /* {{{ proto int SplFileObject::fgetc() Get a character form the file */ SPL_METHOD(SplFileObject, fgetc) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char buf[2]; int result; spl_filesystem_file_free_line(intern TSRMLS_CC); result = php_stream_getc(intern->u.file.stream); if (result == EOF) { RETVAL_FALSE; } else { if (result == '\n') { intern->u.file.current_line_num++; } buf[0] = result; buf[1] = '\0'; RETURN_STRINGL(buf, 1, 1); } } /* }}} */ /* {{{ proto string SplFileObject::fgetss([string allowable_tags]) Get a line from file pointer and strip HTML tags */ SPL_METHOD(SplFileObject, fgetss) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zval *arg2 = NULL; MAKE_STD_ZVAL(arg2); if (intern->u.file.max_line_len > 0) { ZVAL_LONG(arg2, intern->u.file.max_line_len); } else { ZVAL_LONG(arg2, 1024); } spl_filesystem_file_free_line(intern TSRMLS_CC); intern->u.file.current_line_num++; FileFunctionCall(fgetss, ZEND_NUM_ARGS(), arg2); zval_ptr_dtor(&arg2); } /* }}} */ /* {{{ proto int SplFileObject::fpassthru() Output all remaining data from a file pointer */ SPL_METHOD(SplFileObject, fpassthru) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); RETURN_LONG(php_stream_passthru(intern->u.file.stream)); } /* }}} */ /* {{{ proto bool SplFileObject::fscanf(string format [, string ...]) Implements a mostly ANSI compatible fscanf() */ SPL_METHOD(SplFileObject, fscanf) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); spl_filesystem_file_free_line(intern TSRMLS_CC); intern->u.file.current_line_num++; FileFunctionCall(fscanf, ZEND_NUM_ARGS(), NULL); } /* }}} */ /* {{{ proto mixed SplFileObject::fwrite(string str [, int length]) Binary-safe file write */ SPL_METHOD(SplFileObject, fwrite) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *str; int str_len; long length = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|l", &str, &str_len, &length) == FAILURE) { return; } if (ZEND_NUM_ARGS() > 1) { str_len = MAX(0, MIN(length, str_len)); } if (!str_len) { RETURN_LONG(0); } RETURN_LONG(php_stream_write(intern->u.file.stream, str, str_len)); } /* }}} */ SPL_METHOD(SplFileObject, fread) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); long length = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &length) == FAILURE) { return; } if (length <= 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Length parameter must be greater than 0"); RETURN_FALSE; } Z_STRVAL_P(return_value) = emalloc(length + 1); Z_STRLEN_P(return_value) = php_stream_read(intern->u.file.stream, Z_STRVAL_P(return_value), length); /* needed because recv/read/gzread doesnt put a null at the end*/ Z_STRVAL_P(return_value)[Z_STRLEN_P(return_value)] = 0; Z_TYPE_P(return_value) = IS_STRING; } /* {{{ proto bool SplFileObject::fstat() Stat() on a filehandle */ FileFunction(fstat) /* }}} */ /* {{{ proto bool SplFileObject::ftruncate(int size) Truncate file to 'size' length */ SPL_METHOD(SplFileObject, ftruncate) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); long size; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &size) == FAILURE) { return; } if (!php_stream_truncate_supported(intern->u.file.stream)) { zend_throw_exception_ex(spl_ce_LogicException, 0 TSRMLS_CC, "Can't truncate file %s", intern->file_name); RETURN_FALSE; } RETURN_BOOL(0 == php_stream_truncate_set_size(intern->u.file.stream, size)); } /* }}} */ /* {{{ proto void SplFileObject::seek(int line_pos) Seek to specified line */ SPL_METHOD(SplFileObject, seek) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); long line_pos; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &line_pos) == FAILURE) { return; } if (line_pos < 0) { zend_throw_exception_ex(spl_ce_LogicException, 0 TSRMLS_CC, "Can't seek file %s to negative line %ld", intern->file_name, line_pos); RETURN_FALSE; } spl_filesystem_file_rewind(getThis(), intern TSRMLS_CC); while(intern->u.file.current_line_num < line_pos) { if (spl_filesystem_file_read_line(getThis(), intern, 1 TSRMLS_CC) == FAILURE) { break; } } } /* }}} */ /* {{{ Function/Class/Method definitions */ ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object___construct, 0, 0, 1) ZEND_ARG_INFO(0, file_name) ZEND_ARG_INFO(0, open_mode) ZEND_ARG_INFO(0, use_include_path) ZEND_ARG_INFO(0, context) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_file_object_setFlags, 0) ZEND_ARG_INFO(0, flags) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_file_object_setMaxLineLen, 0) ZEND_ARG_INFO(0, max_len) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fgetcsv, 0, 0, 0) ZEND_ARG_INFO(0, delimiter) ZEND_ARG_INFO(0, enclosure) ZEND_ARG_INFO(0, escape) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fputcsv, 0, 0, 1) ZEND_ARG_INFO(0, fields) ZEND_ARG_INFO(0, delimiter) ZEND_ARG_INFO(0, enclosure) ZEND_ARG_INFO(0, escape) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_flock, 0, 0, 1) ZEND_ARG_INFO(0, operation) ZEND_ARG_INFO(1, wouldblock) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fseek, 0, 0, 1) ZEND_ARG_INFO(0, pos) ZEND_ARG_INFO(0, whence) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fgetss, 0, 0, 0) ZEND_ARG_INFO(0, allowable_tags) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fscanf, 1, 0, 1) ZEND_ARG_INFO(0, format) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fwrite, 0, 0, 1) ZEND_ARG_INFO(0, str) ZEND_ARG_INFO(0, length) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fread, 0, 0, 1) ZEND_ARG_INFO(0, length) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_ftruncate, 0, 0, 1) ZEND_ARG_INFO(0, size) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_seek, 0, 0, 1) ZEND_ARG_INFO(0, line_pos) ZEND_END_ARG_INFO() static const zend_function_entry spl_SplFileObject_functions[] = { SPL_ME(SplFileObject, __construct, arginfo_file_object___construct, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, rewind, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, eof, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, valid, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fgets, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fgetcsv, arginfo_file_object_fgetcsv, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fputcsv, arginfo_file_object_fputcsv, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, setCsvControl, arginfo_file_object_fgetcsv, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, getCsvControl, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, flock, arginfo_file_object_flock, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fflush, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, ftell, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fseek, arginfo_file_object_fseek, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fgetc, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fpassthru, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fgetss, arginfo_file_object_fgetss, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fscanf, arginfo_file_object_fscanf, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fwrite, arginfo_file_object_fwrite, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fread, arginfo_file_object_fread, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fstat, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, ftruncate, arginfo_file_object_ftruncate, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, current, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, key, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, next, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, setFlags, arginfo_file_object_setFlags, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, getFlags, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, setMaxLineLen, arginfo_file_object_setMaxLineLen, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, getMaxLineLen, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, hasChildren, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, getChildren, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, seek, arginfo_file_object_seek, ZEND_ACC_PUBLIC) /* mappings */ SPL_MA(SplFileObject, getCurrentLine, SplFileObject, fgets, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_MA(SplFileObject, __toString, SplFileObject, current, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) PHP_FE_END }; ZEND_BEGIN_ARG_INFO_EX(arginfo_temp_file_object___construct, 0, 0, 0) ZEND_ARG_INFO(0, max_memory) ZEND_END_ARG_INFO() static const zend_function_entry spl_SplTempFileObject_functions[] = { SPL_ME(SplTempFileObject, __construct, arginfo_temp_file_object___construct, ZEND_ACC_PUBLIC) PHP_FE_END }; /* }}} */ /* {{{ PHP_MINIT_FUNCTION(spl_directory) */ PHP_MINIT_FUNCTION(spl_directory) { REGISTER_SPL_STD_CLASS_EX(SplFileInfo, spl_filesystem_object_new, spl_SplFileInfo_functions); memcpy(&spl_filesystem_object_handlers, zend_get_std_object_handlers(), sizeof(zend_object_handlers)); spl_filesystem_object_handlers.clone_obj = spl_filesystem_object_clone; spl_filesystem_object_handlers.cast_object = spl_filesystem_object_cast; spl_filesystem_object_handlers.get_debug_info = spl_filesystem_object_get_debug_info; spl_ce_SplFileInfo->serialize = zend_class_serialize_deny; spl_ce_SplFileInfo->unserialize = zend_class_unserialize_deny; REGISTER_SPL_SUB_CLASS_EX(DirectoryIterator, SplFileInfo, spl_filesystem_object_new, spl_DirectoryIterator_functions); zend_class_implements(spl_ce_DirectoryIterator TSRMLS_CC, 1, zend_ce_iterator); REGISTER_SPL_IMPLEMENTS(DirectoryIterator, SeekableIterator); spl_ce_DirectoryIterator->get_iterator = spl_filesystem_dir_get_iterator; REGISTER_SPL_SUB_CLASS_EX(FilesystemIterator, DirectoryIterator, spl_filesystem_object_new, spl_FilesystemIterator_functions); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "CURRENT_MODE_MASK", SPL_FILE_DIR_CURRENT_MODE_MASK); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "CURRENT_AS_PATHNAME", SPL_FILE_DIR_CURRENT_AS_PATHNAME); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "CURRENT_AS_FILEINFO", SPL_FILE_DIR_CURRENT_AS_FILEINFO); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "CURRENT_AS_SELF", SPL_FILE_DIR_CURRENT_AS_SELF); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "KEY_MODE_MASK", SPL_FILE_DIR_KEY_MODE_MASK); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "KEY_AS_PATHNAME", SPL_FILE_DIR_KEY_AS_PATHNAME); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "FOLLOW_SYMLINKS", SPL_FILE_DIR_FOLLOW_SYMLINKS); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "KEY_AS_FILENAME", SPL_FILE_DIR_KEY_AS_FILENAME); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "NEW_CURRENT_AND_KEY", SPL_FILE_DIR_KEY_AS_FILENAME|SPL_FILE_DIR_CURRENT_AS_FILEINFO); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "OTHER_MODE_MASK", SPL_FILE_DIR_OTHERS_MASK); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "SKIP_DOTS", SPL_FILE_DIR_SKIPDOTS); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "UNIX_PATHS", SPL_FILE_DIR_UNIXPATHS); spl_ce_FilesystemIterator->get_iterator = spl_filesystem_tree_get_iterator; REGISTER_SPL_SUB_CLASS_EX(RecursiveDirectoryIterator, FilesystemIterator, spl_filesystem_object_new, spl_RecursiveDirectoryIterator_functions); REGISTER_SPL_IMPLEMENTS(RecursiveDirectoryIterator, RecursiveIterator); memcpy(&spl_filesystem_object_check_handlers, &spl_filesystem_object_handlers, sizeof(zend_object_handlers)); spl_filesystem_object_check_handlers.get_method = spl_filesystem_object_get_method_check; #ifdef HAVE_GLOB REGISTER_SPL_SUB_CLASS_EX(GlobIterator, FilesystemIterator, spl_filesystem_object_new_check, spl_GlobIterator_functions); REGISTER_SPL_IMPLEMENTS(GlobIterator, Countable); #endif REGISTER_SPL_SUB_CLASS_EX(SplFileObject, SplFileInfo, spl_filesystem_object_new_check, spl_SplFileObject_functions); REGISTER_SPL_IMPLEMENTS(SplFileObject, RecursiveIterator); REGISTER_SPL_IMPLEMENTS(SplFileObject, SeekableIterator); REGISTER_SPL_CLASS_CONST_LONG(SplFileObject, "DROP_NEW_LINE", SPL_FILE_OBJECT_DROP_NEW_LINE); REGISTER_SPL_CLASS_CONST_LONG(SplFileObject, "READ_AHEAD", SPL_FILE_OBJECT_READ_AHEAD); REGISTER_SPL_CLASS_CONST_LONG(SplFileObject, "SKIP_EMPTY", SPL_FILE_OBJECT_SKIP_EMPTY); REGISTER_SPL_CLASS_CONST_LONG(SplFileObject, "READ_CSV", SPL_FILE_OBJECT_READ_CSV); REGISTER_SPL_SUB_CLASS_EX(SplTempFileObject, SplFileObject, spl_filesystem_object_new_check, spl_SplTempFileObject_functions); return SUCCESS; } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */
/* +----------------------------------------------------------------------+ | PHP Version 5 | +----------------------------------------------------------------------+ | Copyright (c) 1997-2015 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Author: Marcus Boerger <helly@php.net> | +----------------------------------------------------------------------+ */ /* $Id$ */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include "php.h" #include "php_ini.h" #include "ext/standard/info.h" #include "ext/standard/file.h" #include "ext/standard/php_string.h" #include "zend_compile.h" #include "zend_exceptions.h" #include "zend_interfaces.h" #include "php_spl.h" #include "spl_functions.h" #include "spl_engine.h" #include "spl_iterators.h" #include "spl_directory.h" #include "spl_exceptions.h" #include "php.h" #include "fopen_wrappers.h" #include "ext/standard/basic_functions.h" #include "ext/standard/php_filestat.h" #define SPL_HAS_FLAG(flags, test_flag) ((flags & test_flag) ? 1 : 0) /* declare the class handlers */ static zend_object_handlers spl_filesystem_object_handlers; /* includes handler to validate object state when retrieving methods */ static zend_object_handlers spl_filesystem_object_check_handlers; /* decalre the class entry */ PHPAPI zend_class_entry *spl_ce_SplFileInfo; PHPAPI zend_class_entry *spl_ce_DirectoryIterator; PHPAPI zend_class_entry *spl_ce_FilesystemIterator; PHPAPI zend_class_entry *spl_ce_RecursiveDirectoryIterator; PHPAPI zend_class_entry *spl_ce_GlobIterator; PHPAPI zend_class_entry *spl_ce_SplFileObject; PHPAPI zend_class_entry *spl_ce_SplTempFileObject; static void spl_filesystem_file_free_line(spl_filesystem_object *intern TSRMLS_DC) /* {{{ */ { if (intern->u.file.current_line) { efree(intern->u.file.current_line); intern->u.file.current_line = NULL; } if (intern->u.file.current_zval) { zval_ptr_dtor(&intern->u.file.current_zval); intern->u.file.current_zval = NULL; } } /* }}} */ static void spl_filesystem_object_free_storage(void *object TSRMLS_DC) /* {{{ */ { spl_filesystem_object *intern = (spl_filesystem_object*)object; if (intern->oth_handler && intern->oth_handler->dtor) { intern->oth_handler->dtor(intern TSRMLS_CC); } zend_object_std_dtor(&intern->std TSRMLS_CC); if (intern->_path) { efree(intern->_path); } if (intern->file_name) { efree(intern->file_name); } switch(intern->type) { case SPL_FS_INFO: break; case SPL_FS_DIR: if (intern->u.dir.dirp) { php_stream_close(intern->u.dir.dirp); intern->u.dir.dirp = NULL; } if (intern->u.dir.sub_path) { efree(intern->u.dir.sub_path); } break; case SPL_FS_FILE: if (intern->u.file.stream) { if (intern->u.file.zcontext) { /* zend_list_delref(Z_RESVAL_P(intern->zcontext));*/ } if (!intern->u.file.stream->is_persistent) { php_stream_free(intern->u.file.stream, PHP_STREAM_FREE_CLOSE); } else { php_stream_free(intern->u.file.stream, PHP_STREAM_FREE_CLOSE_PERSISTENT); } if (intern->u.file.open_mode) { efree(intern->u.file.open_mode); } if (intern->orig_path) { efree(intern->orig_path); } } spl_filesystem_file_free_line(intern TSRMLS_CC); break; } { zend_object_iterator *iterator; iterator = (zend_object_iterator*) spl_filesystem_object_to_iterator(intern); if (iterator->data != NULL) { iterator->data = NULL; iterator->funcs->dtor(iterator TSRMLS_CC); } } efree(object); } /* }}} */ /* {{{ spl_ce_dir_object_new */ /* creates the object by - allocating memory - initializing the object members - storing the object - setting it's handlers called from - clone - new */ static zend_object_value spl_filesystem_object_new_ex(zend_class_entry *class_type, spl_filesystem_object **obj TSRMLS_DC) { zend_object_value retval; spl_filesystem_object *intern; intern = emalloc(sizeof(spl_filesystem_object)); memset(intern, 0, sizeof(spl_filesystem_object)); /* intern->type = SPL_FS_INFO; done by set 0 */ intern->file_class = spl_ce_SplFileObject; intern->info_class = spl_ce_SplFileInfo; if (obj) *obj = intern; zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, (zend_objects_free_object_storage_t) spl_filesystem_object_free_storage, NULL TSRMLS_CC); retval.handlers = &spl_filesystem_object_handlers; return retval; } /* }}} */ /* {{{ spl_filesystem_object_new */ /* See spl_filesystem_object_new_ex */ static zend_object_value spl_filesystem_object_new(zend_class_entry *class_type TSRMLS_DC) { return spl_filesystem_object_new_ex(class_type, NULL TSRMLS_CC); } /* }}} */ /* {{{ spl_filesystem_object_new_ex */ static zend_object_value spl_filesystem_object_new_check(zend_class_entry *class_type TSRMLS_DC) { zend_object_value ret = spl_filesystem_object_new_ex(class_type, NULL TSRMLS_CC); ret.handlers = &spl_filesystem_object_check_handlers; return ret; } /* }}} */ PHPAPI char* spl_filesystem_object_get_path(spl_filesystem_object *intern, int *len TSRMLS_DC) /* {{{ */ { #ifdef HAVE_GLOB if (intern->type == SPL_FS_DIR) { if (php_stream_is(intern->u.dir.dirp ,&php_glob_stream_ops)) { return php_glob_stream_get_path(intern->u.dir.dirp, 0, len); } } #endif if (len) { *len = intern->_path_len; } return intern->_path; } /* }}} */ static inline void spl_filesystem_object_get_file_name(spl_filesystem_object *intern TSRMLS_DC) /* {{{ */ { char slash = SPL_HAS_FLAG(intern->flags, SPL_FILE_DIR_UNIXPATHS) ? '/' : DEFAULT_SLASH; switch (intern->type) { case SPL_FS_INFO: case SPL_FS_FILE: if (!intern->file_name) { php_error_docref(NULL TSRMLS_CC, E_ERROR, "Object not initialized"); } break; case SPL_FS_DIR: if (intern->file_name) { efree(intern->file_name); } intern->file_name_len = spprintf(&intern->file_name, 0, "%s%c%s", spl_filesystem_object_get_path(intern, NULL TSRMLS_CC), slash, intern->u.dir.entry.d_name); break; } } /* }}} */ static int spl_filesystem_dir_read(spl_filesystem_object *intern TSRMLS_DC) /* {{{ */ { if (!intern->u.dir.dirp || !php_stream_readdir(intern->u.dir.dirp, &intern->u.dir.entry)) { intern->u.dir.entry.d_name[0] = '\0'; return 0; } else { return 1; } } /* }}} */ #define IS_SLASH_AT(zs, pos) (IS_SLASH(zs[pos])) static inline int spl_filesystem_is_dot(const char * d_name) /* {{{ */ { return !strcmp(d_name, ".") || !strcmp(d_name, ".."); } /* }}} */ /* {{{ spl_filesystem_dir_open */ /* open a directory resource */ static void spl_filesystem_dir_open(spl_filesystem_object* intern, char *path TSRMLS_DC) { int skip_dots = SPL_HAS_FLAG(intern->flags, SPL_FILE_DIR_SKIPDOTS); intern->type = SPL_FS_DIR; intern->_path_len = strlen(path); intern->u.dir.dirp = php_stream_opendir(path, REPORT_ERRORS, FG(default_context)); if (intern->_path_len > 1 && IS_SLASH_AT(path, intern->_path_len-1)) { intern->_path = estrndup(path, --intern->_path_len); } else { intern->_path = estrndup(path, intern->_path_len); } intern->u.dir.index = 0; if (EG(exception) || intern->u.dir.dirp == NULL) { intern->u.dir.entry.d_name[0] = '\0'; if (!EG(exception)) { /* open failed w/out notice (turned to exception due to EH_THROW) */ zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0 TSRMLS_CC, "Failed to open directory \"%s\"", path); } } else { do { spl_filesystem_dir_read(intern TSRMLS_CC); } while (skip_dots && spl_filesystem_is_dot(intern->u.dir.entry.d_name)); } } /* }}} */ static int spl_filesystem_file_open(spl_filesystem_object *intern, int use_include_path, int silent TSRMLS_DC) /* {{{ */ { zval tmp; intern->type = SPL_FS_FILE; php_stat(intern->file_name, intern->file_name_len, FS_IS_DIR, &tmp TSRMLS_CC); if (Z_LVAL(tmp)) { intern->u.file.open_mode = NULL; intern->file_name = NULL; zend_throw_exception_ex(spl_ce_LogicException, 0 TSRMLS_CC, "Cannot use SplFileObject with directories"); return FAILURE; } intern->u.file.context = php_stream_context_from_zval(intern->u.file.zcontext, 0); intern->u.file.stream = php_stream_open_wrapper_ex(intern->file_name, intern->u.file.open_mode, (use_include_path ? USE_PATH : 0) | REPORT_ERRORS, NULL, intern->u.file.context); if (!intern->file_name_len || !intern->u.file.stream) { if (!EG(exception)) { zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Cannot open file '%s'", intern->file_name_len ? intern->file_name : ""); } intern->file_name = NULL; /* until here it is not a copy */ intern->u.file.open_mode = NULL; return FAILURE; } if (intern->u.file.zcontext) { zend_list_addref(Z_RESVAL_P(intern->u.file.zcontext)); } if (intern->file_name_len > 1 && IS_SLASH_AT(intern->file_name, intern->file_name_len-1)) { intern->file_name_len--; } intern->orig_path = estrndup(intern->u.file.stream->orig_path, strlen(intern->u.file.stream->orig_path)); intern->file_name = estrndup(intern->file_name, intern->file_name_len); intern->u.file.open_mode = estrndup(intern->u.file.open_mode, intern->u.file.open_mode_len); /* avoid reference counting in debug mode, thus do it manually */ ZVAL_RESOURCE(&intern->u.file.zresource, php_stream_get_resource_id(intern->u.file.stream)); Z_SET_REFCOUNT(intern->u.file.zresource, 1); intern->u.file.delimiter = ','; intern->u.file.enclosure = '"'; intern->u.file.escape = '\\'; zend_hash_find(&intern->std.ce->function_table, "getcurrentline", sizeof("getcurrentline"), (void **) &intern->u.file.func_getCurr); return SUCCESS; } /* }}} */ /* {{{ spl_filesystem_object_clone */ /* Local zend_object_value creation (on stack) Load the 'other' object Create a new empty object (See spl_filesystem_object_new_ex) Open the directory Clone other members (properties) */ static zend_object_value spl_filesystem_object_clone(zval *zobject TSRMLS_DC) { zend_object_value new_obj_val; zend_object *old_object; zend_object *new_object; zend_object_handle handle = Z_OBJ_HANDLE_P(zobject); spl_filesystem_object *intern; spl_filesystem_object *source; int index, skip_dots; old_object = zend_objects_get_address(zobject TSRMLS_CC); source = (spl_filesystem_object*)old_object; new_obj_val = spl_filesystem_object_new_ex(old_object->ce, &intern TSRMLS_CC); new_object = &intern->std; intern->flags = source->flags; switch (source->type) { case SPL_FS_INFO: intern->_path_len = source->_path_len; intern->_path = estrndup(source->_path, source->_path_len); intern->file_name_len = source->file_name_len; intern->file_name = estrndup(source->file_name, intern->file_name_len); break; case SPL_FS_DIR: spl_filesystem_dir_open(intern, source->_path TSRMLS_CC); /* read until we hit the position in which we were before */ skip_dots = SPL_HAS_FLAG(source->flags, SPL_FILE_DIR_SKIPDOTS); for(index = 0; index < source->u.dir.index; ++index) { do { spl_filesystem_dir_read(intern TSRMLS_CC); } while (skip_dots && spl_filesystem_is_dot(intern->u.dir.entry.d_name)); } intern->u.dir.index = index; break; case SPL_FS_FILE: php_error_docref(NULL TSRMLS_CC, E_ERROR, "An object of class %s cannot be cloned", old_object->ce->name); break; } intern->file_class = source->file_class; intern->info_class = source->info_class; intern->oth = source->oth; intern->oth_handler = source->oth_handler; zend_objects_clone_members(new_object, new_obj_val, old_object, handle TSRMLS_CC); if (intern->oth_handler && intern->oth_handler->clone) { intern->oth_handler->clone(source, intern TSRMLS_CC); } return new_obj_val; } /* }}} */ void spl_filesystem_info_set_filename(spl_filesystem_object *intern, char *path, int len, int use_copy TSRMLS_DC) /* {{{ */ { char *p1, *p2; if (intern->file_name) { efree(intern->file_name); } intern->file_name = use_copy ? estrndup(path, len) : path; intern->file_name_len = len; while(IS_SLASH_AT(intern->file_name, intern->file_name_len-1) && intern->file_name_len > 1) { intern->file_name[intern->file_name_len-1] = 0; intern->file_name_len--; } p1 = strrchr(intern->file_name, '/'); #if defined(PHP_WIN32) || defined(NETWARE) p2 = strrchr(intern->file_name, '\\'); #else p2 = 0; #endif if (p1 || p2) { intern->_path_len = (p1 > p2 ? p1 : p2) - intern->file_name; } else { intern->_path_len = 0; } if (intern->_path) { efree(intern->_path); } intern->_path = estrndup(path, intern->_path_len); } /* }}} */ static spl_filesystem_object * spl_filesystem_object_create_info(spl_filesystem_object *source, char *file_path, int file_path_len, int use_copy, zend_class_entry *ce, zval *return_value TSRMLS_DC) /* {{{ */ { spl_filesystem_object *intern; zval *arg1; zend_error_handling error_handling; if (!file_path || !file_path_len) { #if defined(PHP_WIN32) zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Cannot create SplFileInfo for empty path"); if (file_path && !use_copy) { efree(file_path); } #else if (file_path && !use_copy) { efree(file_path); } file_path_len = 1; file_path = "/"; #endif return NULL; } zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC); ce = ce ? ce : source->info_class; zend_update_class_constants(ce TSRMLS_CC); return_value->value.obj = spl_filesystem_object_new_ex(ce, &intern TSRMLS_CC); Z_TYPE_P(return_value) = IS_OBJECT; if (ce->constructor->common.scope != spl_ce_SplFileInfo) { MAKE_STD_ZVAL(arg1); ZVAL_STRINGL(arg1, file_path, file_path_len, use_copy); zend_call_method_with_1_params(&return_value, ce, &ce->constructor, "__construct", NULL, arg1); zval_ptr_dtor(&arg1); } else { spl_filesystem_info_set_filename(intern, file_path, file_path_len, use_copy TSRMLS_CC); } zend_restore_error_handling(&error_handling TSRMLS_CC); return intern; } /* }}} */ static spl_filesystem_object * spl_filesystem_object_create_type(int ht, spl_filesystem_object *source, int type, zend_class_entry *ce, zval *return_value TSRMLS_DC) /* {{{ */ { spl_filesystem_object *intern; zend_bool use_include_path = 0; zval *arg1, *arg2; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC); switch (source->type) { case SPL_FS_INFO: case SPL_FS_FILE: break; case SPL_FS_DIR: if (!source->u.dir.entry.d_name[0]) { zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Could not open file"); zend_restore_error_handling(&error_handling TSRMLS_CC); return NULL; } } switch (type) { case SPL_FS_INFO: ce = ce ? ce : source->info_class; zend_update_class_constants(ce TSRMLS_CC); return_value->value.obj = spl_filesystem_object_new_ex(ce, &intern TSRMLS_CC); Z_TYPE_P(return_value) = IS_OBJECT; spl_filesystem_object_get_file_name(source TSRMLS_CC); if (ce->constructor->common.scope != spl_ce_SplFileInfo) { MAKE_STD_ZVAL(arg1); ZVAL_STRINGL(arg1, source->file_name, source->file_name_len, 1); zend_call_method_with_1_params(&return_value, ce, &ce->constructor, "__construct", NULL, arg1); zval_ptr_dtor(&arg1); } else { intern->file_name = estrndup(source->file_name, source->file_name_len); intern->file_name_len = source->file_name_len; intern->_path = spl_filesystem_object_get_path(source, &intern->_path_len TSRMLS_CC); intern->_path = estrndup(intern->_path, intern->_path_len); } break; case SPL_FS_FILE: ce = ce ? ce : source->file_class; zend_update_class_constants(ce TSRMLS_CC); return_value->value.obj = spl_filesystem_object_new_ex(ce, &intern TSRMLS_CC); Z_TYPE_P(return_value) = IS_OBJECT; spl_filesystem_object_get_file_name(source TSRMLS_CC); if (ce->constructor->common.scope != spl_ce_SplFileObject) { MAKE_STD_ZVAL(arg1); MAKE_STD_ZVAL(arg2); ZVAL_STRINGL(arg1, source->file_name, source->file_name_len, 1); ZVAL_STRINGL(arg2, "r", 1, 1); zend_call_method_with_2_params(&return_value, ce, &ce->constructor, "__construct", NULL, arg1, arg2); zval_ptr_dtor(&arg1); zval_ptr_dtor(&arg2); } else { intern->file_name = source->file_name; intern->file_name_len = source->file_name_len; intern->_path = spl_filesystem_object_get_path(source, &intern->_path_len TSRMLS_CC); intern->_path = estrndup(intern->_path, intern->_path_len); intern->u.file.open_mode = "r"; intern->u.file.open_mode_len = 1; if (ht && zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|sbr", &intern->u.file.open_mode, &intern->u.file.open_mode_len, &use_include_path, &intern->u.file.zcontext) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); intern->u.file.open_mode = NULL; intern->file_name = NULL; zval_dtor(return_value); Z_TYPE_P(return_value) = IS_NULL; return NULL; } if (spl_filesystem_file_open(intern, use_include_path, 0 TSRMLS_CC) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); zval_dtor(return_value); Z_TYPE_P(return_value) = IS_NULL; return NULL; } } break; case SPL_FS_DIR: zend_restore_error_handling(&error_handling TSRMLS_CC); zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Operation not supported"); return NULL; } zend_restore_error_handling(&error_handling TSRMLS_CC); return NULL; } /* }}} */ static int spl_filesystem_is_invalid_or_dot(const char * d_name) /* {{{ */ { return d_name[0] == '\0' || spl_filesystem_is_dot(d_name); } /* }}} */ static char *spl_filesystem_object_get_pathname(spl_filesystem_object *intern, int *len TSRMLS_DC) { /* {{{ */ switch (intern->type) { case SPL_FS_INFO: case SPL_FS_FILE: *len = intern->file_name_len; return intern->file_name; case SPL_FS_DIR: if (intern->u.dir.entry.d_name[0]) { spl_filesystem_object_get_file_name(intern TSRMLS_CC); *len = intern->file_name_len; return intern->file_name; } } *len = 0; return NULL; } /* }}} */ static HashTable* spl_filesystem_object_get_debug_info(zval *obj, int *is_temp TSRMLS_DC) /* {{{ */ { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(obj TSRMLS_CC); HashTable *rv; zval *tmp, zrv; char *pnstr, *path; int pnlen, path_len; char stmp[2]; *is_temp = 1; if (!intern->std.properties) { rebuild_object_properties(&intern->std); } ALLOC_HASHTABLE(rv); ZEND_INIT_SYMTABLE_EX(rv, zend_hash_num_elements(intern->std.properties) + 3, 0); INIT_PZVAL(&zrv); Z_ARRVAL(zrv) = rv; zend_hash_copy(rv, intern->std.properties, (copy_ctor_func_t) zval_add_ref, (void *) &tmp, sizeof(zval *)); pnstr = spl_gen_private_prop_name(spl_ce_SplFileInfo, "pathName", sizeof("pathName")-1, &pnlen TSRMLS_CC); path = spl_filesystem_object_get_pathname(intern, &path_len TSRMLS_CC); add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, path, path_len, 1); efree(pnstr); if (intern->file_name) { pnstr = spl_gen_private_prop_name(spl_ce_SplFileInfo, "fileName", sizeof("fileName")-1, &pnlen TSRMLS_CC); spl_filesystem_object_get_path(intern, &path_len TSRMLS_CC); if (path_len && path_len < intern->file_name_len) { add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, intern->file_name + path_len + 1, intern->file_name_len - (path_len + 1), 1); } else { add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, intern->file_name, intern->file_name_len, 1); } efree(pnstr); } if (intern->type == SPL_FS_DIR) { #ifdef HAVE_GLOB pnstr = spl_gen_private_prop_name(spl_ce_DirectoryIterator, "glob", sizeof("glob")-1, &pnlen TSRMLS_CC); if (php_stream_is(intern->u.dir.dirp ,&php_glob_stream_ops)) { add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, intern->_path, intern->_path_len, 1); } else { add_assoc_bool_ex(&zrv, pnstr, pnlen+1, 0); } efree(pnstr); #endif pnstr = spl_gen_private_prop_name(spl_ce_RecursiveDirectoryIterator, "subPathName", sizeof("subPathName")-1, &pnlen TSRMLS_CC); if (intern->u.dir.sub_path) { add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, intern->u.dir.sub_path, intern->u.dir.sub_path_len, 1); } else { add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, "", 0, 1); } efree(pnstr); } if (intern->type == SPL_FS_FILE) { pnstr = spl_gen_private_prop_name(spl_ce_SplFileObject, "openMode", sizeof("openMode")-1, &pnlen TSRMLS_CC); add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, intern->u.file.open_mode, intern->u.file.open_mode_len, 1); efree(pnstr); stmp[1] = '\0'; stmp[0] = intern->u.file.delimiter; pnstr = spl_gen_private_prop_name(spl_ce_SplFileObject, "delimiter", sizeof("delimiter")-1, &pnlen TSRMLS_CC); add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, stmp, 1, 1); efree(pnstr); stmp[0] = intern->u.file.enclosure; pnstr = spl_gen_private_prop_name(spl_ce_SplFileObject, "enclosure", sizeof("enclosure")-1, &pnlen TSRMLS_CC); add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, stmp, 1, 1); efree(pnstr); } return rv; } /* }}} */ zend_function *spl_filesystem_object_get_method_check(zval **object_ptr, char *method, int method_len, const struct _zend_literal *key TSRMLS_DC) /* {{{ */ { spl_filesystem_object *fsobj = zend_object_store_get_object(*object_ptr TSRMLS_CC); if (fsobj->u.dir.entry.d_name[0] == '\0' && fsobj->orig_path == NULL) { method = "_bad_state_ex"; method_len = sizeof("_bad_state_ex") - 1; key = NULL; } return zend_get_std_object_handlers()->get_method(object_ptr, method, method_len, key TSRMLS_CC); } /* }}} */ #define DIT_CTOR_FLAGS 0x00000001 #define DIT_CTOR_GLOB 0x00000002 void spl_filesystem_object_construct(INTERNAL_FUNCTION_PARAMETERS, long ctor_flags) /* {{{ */ { spl_filesystem_object *intern; char *path; int parsed, len; long flags; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_UnexpectedValueException, &error_handling TSRMLS_CC); if (SPL_HAS_FLAG(ctor_flags, DIT_CTOR_FLAGS)) { flags = SPL_FILE_DIR_KEY_AS_PATHNAME|SPL_FILE_DIR_CURRENT_AS_FILEINFO; parsed = zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|l", &path, &len, &flags); } else { flags = SPL_FILE_DIR_KEY_AS_PATHNAME|SPL_FILE_DIR_CURRENT_AS_SELF; parsed = zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &path, &len); } if (SPL_HAS_FLAG(ctor_flags, SPL_FILE_DIR_SKIPDOTS)) { flags |= SPL_FILE_DIR_SKIPDOTS; } if (SPL_HAS_FLAG(ctor_flags, SPL_FILE_DIR_UNIXPATHS)) { flags |= SPL_FILE_DIR_UNIXPATHS; } if (parsed == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } if (!len) { zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Directory name must not be empty."); zend_restore_error_handling(&error_handling TSRMLS_CC); return; } intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (intern->_path) { /* object is alreay initialized */ zend_restore_error_handling(&error_handling TSRMLS_CC); php_error_docref(NULL TSRMLS_CC, E_WARNING, "Directory object is already initialized"); return; } intern->flags = flags; #ifdef HAVE_GLOB if (SPL_HAS_FLAG(ctor_flags, DIT_CTOR_GLOB) && strstr(path, "glob://") != path) { spprintf(&path, 0, "glob://%s", path); spl_filesystem_dir_open(intern, path TSRMLS_CC); efree(path); } else #endif { spl_filesystem_dir_open(intern, path TSRMLS_CC); } intern->u.dir.is_recursive = instanceof_function(intern->std.ce, spl_ce_RecursiveDirectoryIterator TSRMLS_CC) ? 1 : 0; zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ proto void DirectoryIterator::__construct(string path) Cronstructs a new dir iterator from a path. */ SPL_METHOD(DirectoryIterator, __construct) { spl_filesystem_object_construct(INTERNAL_FUNCTION_PARAM_PASSTHRU, 0); } /* }}} */ /* {{{ proto void DirectoryIterator::rewind() Rewind dir back to the start */ SPL_METHOD(DirectoryIterator, rewind) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } intern->u.dir.index = 0; if (intern->u.dir.dirp) { php_stream_rewinddir(intern->u.dir.dirp); } spl_filesystem_dir_read(intern TSRMLS_CC); } /* }}} */ /* {{{ proto string DirectoryIterator::key() Return current dir entry */ SPL_METHOD(DirectoryIterator, key) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (intern->u.dir.dirp) { RETURN_LONG(intern->u.dir.index); } else { RETURN_FALSE; } } /* }}} */ /* {{{ proto DirectoryIterator DirectoryIterator::current() Return this (needed for Iterator interface) */ SPL_METHOD(DirectoryIterator, current) { if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_ZVAL(getThis(), 1, 0); } /* }}} */ /* {{{ proto void DirectoryIterator::next() Move to next entry */ SPL_METHOD(DirectoryIterator, next) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); int skip_dots = SPL_HAS_FLAG(intern->flags, SPL_FILE_DIR_SKIPDOTS); if (zend_parse_parameters_none() == FAILURE) { return; } intern->u.dir.index++; do { spl_filesystem_dir_read(intern TSRMLS_CC); } while (skip_dots && spl_filesystem_is_dot(intern->u.dir.entry.d_name)); if (intern->file_name) { efree(intern->file_name); intern->file_name = NULL; } } /* }}} */ /* {{{ proto void DirectoryIterator::seek(int position) Seek to the given position */ SPL_METHOD(DirectoryIterator, seek) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zval *retval = NULL; long pos; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &pos) == FAILURE) { return; } if (intern->u.dir.index > pos) { /* we first rewind */ zend_call_method_with_0_params(&this_ptr, Z_OBJCE_P(getThis()), &intern->u.dir.func_rewind, "rewind", &retval); if (retval) { zval_ptr_dtor(&retval); retval = NULL; } } while (intern->u.dir.index < pos) { int valid = 0; zend_call_method_with_0_params(&this_ptr, Z_OBJCE_P(getThis()), &intern->u.dir.func_valid, "valid", &retval); if (retval) { valid = zend_is_true(retval); zval_ptr_dtor(&retval); retval = NULL; } if (!valid) { break; } zend_call_method_with_0_params(&this_ptr, Z_OBJCE_P(getThis()), &intern->u.dir.func_next, "next", &retval); if (retval) { zval_ptr_dtor(&retval); } } } /* }}} */ /* {{{ proto string DirectoryIterator::valid() Check whether dir contains more entries */ SPL_METHOD(DirectoryIterator, valid) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_BOOL(intern->u.dir.entry.d_name[0] != '\0'); } /* }}} */ /* {{{ proto string SplFileInfo::getPath() Return the path */ SPL_METHOD(SplFileInfo, getPath) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *path; int path_len; if (zend_parse_parameters_none() == FAILURE) { return; } path = spl_filesystem_object_get_path(intern, &path_len TSRMLS_CC); RETURN_STRINGL(path, path_len, 1); } /* }}} */ /* {{{ proto string SplFileInfo::getFilename() Return filename only */ SPL_METHOD(SplFileInfo, getFilename) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); int path_len; if (zend_parse_parameters_none() == FAILURE) { return; } spl_filesystem_object_get_path(intern, &path_len TSRMLS_CC); if (path_len && path_len < intern->file_name_len) { RETURN_STRINGL(intern->file_name + path_len + 1, intern->file_name_len - (path_len + 1), 1); } else { RETURN_STRINGL(intern->file_name, intern->file_name_len, 1); } } /* }}} */ /* {{{ proto string DirectoryIterator::getFilename() Return filename of current dir entry */ SPL_METHOD(DirectoryIterator, getFilename) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_STRING(intern->u.dir.entry.d_name, 1); } /* }}} */ /* {{{ proto string SplFileInfo::getExtension() Returns file extension component of path */ SPL_METHOD(SplFileInfo, getExtension) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *fname = NULL; const char *p; size_t flen; int path_len, idx; if (zend_parse_parameters_none() == FAILURE) { return; } spl_filesystem_object_get_path(intern, &path_len TSRMLS_CC); if (path_len && path_len < intern->file_name_len) { fname = intern->file_name + path_len + 1; flen = intern->file_name_len - (path_len + 1); } else { fname = intern->file_name; flen = intern->file_name_len; } php_basename(fname, flen, NULL, 0, &fname, &flen TSRMLS_CC); p = zend_memrchr(fname, '.', flen); if (p) { idx = p - fname; RETVAL_STRINGL(fname + idx + 1, flen - idx - 1, 1); efree(fname); return; } else { if (fname) { efree(fname); } RETURN_EMPTY_STRING(); } } /* }}}*/ /* {{{ proto string DirectoryIterator::getExtension() Returns the file extension component of path */ SPL_METHOD(DirectoryIterator, getExtension) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *fname = NULL; const char *p; size_t flen; int idx; if (zend_parse_parameters_none() == FAILURE) { return; } php_basename(intern->u.dir.entry.d_name, strlen(intern->u.dir.entry.d_name), NULL, 0, &fname, &flen TSRMLS_CC); p = zend_memrchr(fname, '.', flen); if (p) { idx = p - fname; RETVAL_STRINGL(fname + idx + 1, flen - idx - 1, 1); efree(fname); return; } else { if (fname) { efree(fname); } RETURN_EMPTY_STRING(); } } /* }}} */ /* {{{ proto string SplFileInfo::getBasename([string $suffix]) U Returns filename component of path */ SPL_METHOD(SplFileInfo, getBasename) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *fname, *suffix = 0; size_t flen; int slen = 0, path_len; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|s", &suffix, &slen) == FAILURE) { return; } spl_filesystem_object_get_path(intern, &path_len TSRMLS_CC); if (path_len && path_len < intern->file_name_len) { fname = intern->file_name + path_len + 1; flen = intern->file_name_len - (path_len + 1); } else { fname = intern->file_name; flen = intern->file_name_len; } php_basename(fname, flen, suffix, slen, &fname, &flen TSRMLS_CC); RETURN_STRINGL(fname, flen, 0); } /* }}}*/ /* {{{ proto string DirectoryIterator::getBasename([string $suffix]) U Returns filename component of current dir entry */ SPL_METHOD(DirectoryIterator, getBasename) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *suffix = 0, *fname; int slen = 0; size_t flen; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|s", &suffix, &slen) == FAILURE) { return; } php_basename(intern->u.dir.entry.d_name, strlen(intern->u.dir.entry.d_name), suffix, slen, &fname, &flen TSRMLS_CC); RETURN_STRINGL(fname, flen, 0); } /* }}} */ /* {{{ proto string SplFileInfo::getPathname() Return path and filename */ SPL_METHOD(SplFileInfo, getPathname) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *path; int path_len; if (zend_parse_parameters_none() == FAILURE) { return; } path = spl_filesystem_object_get_pathname(intern, &path_len TSRMLS_CC); if (path != NULL) { RETURN_STRINGL(path, path_len, 1); } else { RETURN_FALSE; } } /* }}} */ /* {{{ proto string FilesystemIterator::key() Return getPathname() or getFilename() depending on flags */ SPL_METHOD(FilesystemIterator, key) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (SPL_FILE_DIR_KEY(intern, SPL_FILE_DIR_KEY_AS_FILENAME)) { RETURN_STRING(intern->u.dir.entry.d_name, 1); } else { spl_filesystem_object_get_file_name(intern TSRMLS_CC); RETURN_STRINGL(intern->file_name, intern->file_name_len, 1); } } /* }}} */ /* {{{ proto string FilesystemIterator::current() Return getFilename(), getFileInfo() or $this depending on flags */ SPL_METHOD(FilesystemIterator, current) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (SPL_FILE_DIR_CURRENT(intern, SPL_FILE_DIR_CURRENT_AS_PATHNAME)) { spl_filesystem_object_get_file_name(intern TSRMLS_CC); RETURN_STRINGL(intern->file_name, intern->file_name_len, 1); } else if (SPL_FILE_DIR_CURRENT(intern, SPL_FILE_DIR_CURRENT_AS_FILEINFO)) { spl_filesystem_object_get_file_name(intern TSRMLS_CC); spl_filesystem_object_create_type(0, intern, SPL_FS_INFO, NULL, return_value TSRMLS_CC); } else { RETURN_ZVAL(getThis(), 1, 0); /*RETURN_STRING(intern->u.dir.entry.d_name, 1);*/ } } /* }}} */ /* {{{ proto bool DirectoryIterator::isDot() Returns true if current entry is '.' or '..' */ SPL_METHOD(DirectoryIterator, isDot) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_BOOL(spl_filesystem_is_dot(intern->u.dir.entry.d_name)); } /* }}} */ /* {{{ proto void SplFileInfo::__construct(string file_name) Cronstructs a new SplFileInfo from a path. */ /* zend_replace_error_handling() is used to throw exceptions in case the constructor fails. Here we use this to ensure the object has a valid directory resource. When the constructor gets called the object is already created by the engine, so we must only call 'additional' initializations. */ SPL_METHOD(SplFileInfo, __construct) { spl_filesystem_object *intern; char *path; int len; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &path, &len) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); spl_filesystem_info_set_filename(intern, path, len, 1 TSRMLS_CC); zend_restore_error_handling(&error_handling TSRMLS_CC); /* intern->type = SPL_FS_INFO; already set */ } /* }}} */ /* {{{ FileInfoFunction */ #define FileInfoFunction(func_name, func_num) \ SPL_METHOD(SplFileInfo, func_name) \ { \ spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); \ zend_error_handling error_handling; \ if (zend_parse_parameters_none() == FAILURE) { \ return; \ } \ \ zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC);\ spl_filesystem_object_get_file_name(intern TSRMLS_CC); \ php_stat(intern->file_name, intern->file_name_len, func_num, return_value TSRMLS_CC); \ zend_restore_error_handling(&error_handling TSRMLS_CC); \ } /* }}} */ /* {{{ proto int SplFileInfo::getPerms() Get file permissions */ FileInfoFunction(getPerms, FS_PERMS) /* }}} */ /* {{{ proto int SplFileInfo::getInode() Get file inode */ FileInfoFunction(getInode, FS_INODE) /* }}} */ /* {{{ proto int SplFileInfo::getSize() Get file size */ FileInfoFunction(getSize, FS_SIZE) /* }}} */ /* {{{ proto int SplFileInfo::getOwner() Get file owner */ FileInfoFunction(getOwner, FS_OWNER) /* }}} */ /* {{{ proto int SplFileInfo::getGroup() Get file group */ FileInfoFunction(getGroup, FS_GROUP) /* }}} */ /* {{{ proto int SplFileInfo::getATime() Get last access time of file */ FileInfoFunction(getATime, FS_ATIME) /* }}} */ /* {{{ proto int SplFileInfo::getMTime() Get last modification time of file */ FileInfoFunction(getMTime, FS_MTIME) /* }}} */ /* {{{ proto int SplFileInfo::getCTime() Get inode modification time of file */ FileInfoFunction(getCTime, FS_CTIME) /* }}} */ /* {{{ proto string SplFileInfo::getType() Get file type */ FileInfoFunction(getType, FS_TYPE) /* }}} */ /* {{{ proto bool SplFileInfo::isWritable() Returns true if file can be written */ FileInfoFunction(isWritable, FS_IS_W) /* }}} */ /* {{{ proto bool SplFileInfo::isReadable() Returns true if file can be read */ FileInfoFunction(isReadable, FS_IS_R) /* }}} */ /* {{{ proto bool SplFileInfo::isExecutable() Returns true if file is executable */ FileInfoFunction(isExecutable, FS_IS_X) /* }}} */ /* {{{ proto bool SplFileInfo::isFile() Returns true if file is a regular file */ FileInfoFunction(isFile, FS_IS_FILE) /* }}} */ /* {{{ proto bool SplFileInfo::isDir() Returns true if file is directory */ FileInfoFunction(isDir, FS_IS_DIR) /* }}} */ /* {{{ proto bool SplFileInfo::isLink() Returns true if file is symbolic link */ FileInfoFunction(isLink, FS_IS_LINK) /* }}} */ /* {{{ proto string SplFileInfo::getLinkTarget() U Return the target of a symbolic link */ SPL_METHOD(SplFileInfo, getLinkTarget) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); int ret; char buff[MAXPATHLEN]; zend_error_handling error_handling; if (zend_parse_parameters_none() == FAILURE) { return; } zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC); #if defined(PHP_WIN32) || HAVE_SYMLINK if (intern->file_name == NULL) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Empty filename"); RETURN_FALSE; } else if (!IS_ABSOLUTE_PATH(intern->file_name, intern->file_name_len)) { char expanded_path[MAXPATHLEN]; if (!expand_filepath_with_mode(intern->file_name, expanded_path, NULL, 0, CWD_EXPAND TSRMLS_CC)) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "No such file or directory"); RETURN_FALSE; } ret = php_sys_readlink(expanded_path, buff, MAXPATHLEN - 1); } else { ret = php_sys_readlink(intern->file_name, buff, MAXPATHLEN-1); } #else ret = -1; /* always fail if not implemented */ #endif if (ret == -1) { zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Unable to read link %s, error: %s", intern->file_name, strerror(errno)); RETVAL_FALSE; } else { /* Append NULL to the end of the string */ buff[ret] = '\0'; RETVAL_STRINGL(buff, ret, 1); } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ #if (!defined(__BEOS__) && !defined(NETWARE) && HAVE_REALPATH) || defined(ZTS) /* {{{ proto string SplFileInfo::getRealPath() Return the resolved path */ SPL_METHOD(SplFileInfo, getRealPath) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char buff[MAXPATHLEN]; char *filename; zend_error_handling error_handling; if (zend_parse_parameters_none() == FAILURE) { return; } zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC); if (intern->type == SPL_FS_DIR && !intern->file_name && intern->u.dir.entry.d_name[0]) { spl_filesystem_object_get_file_name(intern TSRMLS_CC); } if (intern->orig_path) { filename = intern->orig_path; } else { filename = intern->file_name; } if (filename && VCWD_REALPATH(filename, buff)) { #ifdef ZTS if (VCWD_ACCESS(buff, F_OK)) { RETVAL_FALSE; } else #endif RETVAL_STRING(buff, 1); } else { RETVAL_FALSE; } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ #endif /* {{{ proto SplFileObject SplFileInfo::openFile([string mode = 'r' [, bool use_include_path [, resource context]]]) Open the current file */ SPL_METHOD(SplFileInfo, openFile) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); spl_filesystem_object_create_type(ht, intern, SPL_FS_FILE, NULL, return_value TSRMLS_CC); } /* }}} */ /* {{{ proto void SplFileInfo::setFileClass([string class_name]) Class to use in openFile() */ SPL_METHOD(SplFileInfo, setFileClass) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zend_class_entry *ce = spl_ce_SplFileObject; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_UnexpectedValueException, &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|C", &ce) == SUCCESS) { intern->file_class = ce; } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ proto void SplFileInfo::setInfoClass([string class_name]) Class to use in getFileInfo(), getPathInfo() */ SPL_METHOD(SplFileInfo, setInfoClass) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zend_class_entry *ce = spl_ce_SplFileInfo; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_UnexpectedValueException, &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|C", &ce) == SUCCESS) { intern->info_class = ce; } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ proto SplFileInfo SplFileInfo::getFileInfo([string $class_name]) Get/copy file info */ SPL_METHOD(SplFileInfo, getFileInfo) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zend_class_entry *ce = intern->info_class; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_UnexpectedValueException, &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|C", &ce) == SUCCESS) { spl_filesystem_object_create_type(ht, intern, SPL_FS_INFO, ce, return_value TSRMLS_CC); } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ proto SplFileInfo SplFileInfo::getPathInfo([string $class_name]) Get/copy file info */ SPL_METHOD(SplFileInfo, getPathInfo) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zend_class_entry *ce = intern->info_class; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_UnexpectedValueException, &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|C", &ce) == SUCCESS) { int path_len; char *path = spl_filesystem_object_get_pathname(intern, &path_len TSRMLS_CC); if (path) { char *dpath = estrndup(path, path_len); path_len = php_dirname(dpath, path_len); spl_filesystem_object_create_info(intern, dpath, path_len, 1, ce, return_value TSRMLS_CC); efree(dpath); } } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ */ SPL_METHOD(SplFileInfo, _bad_state_ex) { zend_throw_exception_ex(spl_ce_LogicException, 0 TSRMLS_CC, "The parent constructor was not called: the object is in an " "invalid state "); } /* }}} */ /* {{{ proto void FilesystemIterator::__construct(string path [, int flags]) Cronstructs a new dir iterator from a path. */ SPL_METHOD(FilesystemIterator, __construct) { spl_filesystem_object_construct(INTERNAL_FUNCTION_PARAM_PASSTHRU, DIT_CTOR_FLAGS | SPL_FILE_DIR_SKIPDOTS); } /* }}} */ /* {{{ proto void FilesystemIterator::rewind() Rewind dir back to the start */ SPL_METHOD(FilesystemIterator, rewind) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); int skip_dots = SPL_HAS_FLAG(intern->flags, SPL_FILE_DIR_SKIPDOTS); if (zend_parse_parameters_none() == FAILURE) { return; } intern->u.dir.index = 0; if (intern->u.dir.dirp) { php_stream_rewinddir(intern->u.dir.dirp); } do { spl_filesystem_dir_read(intern TSRMLS_CC); } while (skip_dots && spl_filesystem_is_dot(intern->u.dir.entry.d_name)); } /* }}} */ /* {{{ proto int FilesystemIterator::getFlags() Get handling flags */ SPL_METHOD(FilesystemIterator, getFlags) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_LONG(intern->flags & (SPL_FILE_DIR_KEY_MODE_MASK | SPL_FILE_DIR_CURRENT_MODE_MASK | SPL_FILE_DIR_OTHERS_MASK)); } /* }}} */ /* {{{ proto void FilesystemIterator::setFlags(long $flags) Set handling flags */ SPL_METHOD(FilesystemIterator, setFlags) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); long flags; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &flags) == FAILURE) { return; } intern->flags &= ~(SPL_FILE_DIR_KEY_MODE_MASK|SPL_FILE_DIR_CURRENT_MODE_MASK|SPL_FILE_DIR_OTHERS_MASK); intern->flags |= ((SPL_FILE_DIR_KEY_MODE_MASK|SPL_FILE_DIR_CURRENT_MODE_MASK|SPL_FILE_DIR_OTHERS_MASK) & flags); } /* }}} */ /* {{{ proto bool RecursiveDirectoryIterator::hasChildren([bool $allow_links = false]) Returns whether current entry is a directory and not '.' or '..' */ SPL_METHOD(RecursiveDirectoryIterator, hasChildren) { zend_bool allow_links = 0; spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|b", &allow_links) == FAILURE) { return; } if (spl_filesystem_is_invalid_or_dot(intern->u.dir.entry.d_name)) { RETURN_FALSE; } else { spl_filesystem_object_get_file_name(intern TSRMLS_CC); if (!allow_links && !(intern->flags & SPL_FILE_DIR_FOLLOW_SYMLINKS)) { php_stat(intern->file_name, intern->file_name_len, FS_IS_LINK, return_value TSRMLS_CC); if (zend_is_true(return_value)) { RETURN_FALSE; } } php_stat(intern->file_name, intern->file_name_len, FS_IS_DIR, return_value TSRMLS_CC); } } /* }}} */ /* {{{ proto RecursiveDirectoryIterator DirectoryIterator::getChildren() Returns an iterator for the current entry if it is a directory */ SPL_METHOD(RecursiveDirectoryIterator, getChildren) { zval *zpath, *zflags; spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); spl_filesystem_object *subdir; char slash = SPL_HAS_FLAG(intern->flags, SPL_FILE_DIR_UNIXPATHS) ? '/' : DEFAULT_SLASH; if (zend_parse_parameters_none() == FAILURE) { return; } spl_filesystem_object_get_file_name(intern TSRMLS_CC); MAKE_STD_ZVAL(zflags); MAKE_STD_ZVAL(zpath); ZVAL_LONG(zflags, intern->flags); ZVAL_STRINGL(zpath, intern->file_name, intern->file_name_len, 1); spl_instantiate_arg_ex2(Z_OBJCE_P(getThis()), &return_value, 0, zpath, zflags TSRMLS_CC); zval_ptr_dtor(&zpath); zval_ptr_dtor(&zflags); subdir = (spl_filesystem_object*)zend_object_store_get_object(return_value TSRMLS_CC); if (subdir) { if (intern->u.dir.sub_path && intern->u.dir.sub_path[0]) { subdir->u.dir.sub_path_len = spprintf(&subdir->u.dir.sub_path, 0, "%s%c%s", intern->u.dir.sub_path, slash, intern->u.dir.entry.d_name); } else { subdir->u.dir.sub_path_len = strlen(intern->u.dir.entry.d_name); subdir->u.dir.sub_path = estrndup(intern->u.dir.entry.d_name, subdir->u.dir.sub_path_len); } subdir->info_class = intern->info_class; subdir->file_class = intern->file_class; subdir->oth = intern->oth; } } /* }}} */ /* {{{ proto void RecursiveDirectoryIterator::getSubPath() Get sub path */ SPL_METHOD(RecursiveDirectoryIterator, getSubPath) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (intern->u.dir.sub_path) { RETURN_STRINGL(intern->u.dir.sub_path, intern->u.dir.sub_path_len, 1); } else { RETURN_STRINGL("", 0, 1); } } /* }}} */ /* {{{ proto void RecursiveDirectoryIterator::getSubPathname() Get sub path and file name */ SPL_METHOD(RecursiveDirectoryIterator, getSubPathname) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *sub_name; int len; char slash = SPL_HAS_FLAG(intern->flags, SPL_FILE_DIR_UNIXPATHS) ? '/' : DEFAULT_SLASH; if (zend_parse_parameters_none() == FAILURE) { return; } if (intern->u.dir.sub_path) { len = spprintf(&sub_name, 0, "%s%c%s", intern->u.dir.sub_path, slash, intern->u.dir.entry.d_name); RETURN_STRINGL(sub_name, len, 0); } else { RETURN_STRING(intern->u.dir.entry.d_name, 1); } } /* }}} */ /* {{{ proto int RecursiveDirectoryIterator::__construct(string path [, int flags]) Cronstructs a new dir iterator from a path. */ SPL_METHOD(RecursiveDirectoryIterator, __construct) { spl_filesystem_object_construct(INTERNAL_FUNCTION_PARAM_PASSTHRU, DIT_CTOR_FLAGS); } /* }}} */ #ifdef HAVE_GLOB /* {{{ proto int GlobIterator::__construct(string path [, int flags]) Cronstructs a new dir iterator from a glob expression (no glob:// needed). */ SPL_METHOD(GlobIterator, __construct) { spl_filesystem_object_construct(INTERNAL_FUNCTION_PARAM_PASSTHRU, DIT_CTOR_FLAGS|DIT_CTOR_GLOB); } /* }}} */ /* {{{ proto int GlobIterator::cont() Return the number of directories and files found by globbing */ SPL_METHOD(GlobIterator, count) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (php_stream_is(intern->u.dir.dirp ,&php_glob_stream_ops)) { RETURN_LONG(php_glob_stream_get_count(intern->u.dir.dirp, NULL)); } else { /* should not happen */ php_error_docref(NULL TSRMLS_CC, E_ERROR, "GlobIterator lost glob state"); } } /* }}} */ #endif /* HAVE_GLOB */ /* {{{ forward declarations to the iterator handlers */ static void spl_filesystem_dir_it_dtor(zend_object_iterator *iter TSRMLS_DC); static int spl_filesystem_dir_it_valid(zend_object_iterator *iter TSRMLS_DC); static void spl_filesystem_dir_it_current_data(zend_object_iterator *iter, zval ***data TSRMLS_DC); static void spl_filesystem_dir_it_current_key(zend_object_iterator *iter, zval *key TSRMLS_DC); static void spl_filesystem_dir_it_move_forward(zend_object_iterator *iter TSRMLS_DC); static void spl_filesystem_dir_it_rewind(zend_object_iterator *iter TSRMLS_DC); /* iterator handler table */ zend_object_iterator_funcs spl_filesystem_dir_it_funcs = { spl_filesystem_dir_it_dtor, spl_filesystem_dir_it_valid, spl_filesystem_dir_it_current_data, spl_filesystem_dir_it_current_key, spl_filesystem_dir_it_move_forward, spl_filesystem_dir_it_rewind }; /* }}} */ /* {{{ spl_ce_dir_get_iterator */ zend_object_iterator *spl_filesystem_dir_get_iterator(zend_class_entry *ce, zval *object, int by_ref TSRMLS_DC) { spl_filesystem_iterator *iterator; spl_filesystem_object *dir_object; if (by_ref) { zend_error(E_ERROR, "An iterator cannot be used with foreach by reference"); } dir_object = (spl_filesystem_object*)zend_object_store_get_object(object TSRMLS_CC); iterator = spl_filesystem_object_to_iterator(dir_object); /* initialize iterator if it wasn't gotten before */ if (iterator->intern.data == NULL) { iterator->intern.data = object; iterator->intern.funcs = &spl_filesystem_dir_it_funcs; /* ->current must be initialized; rewind doesn't set it and valid * doesn't check whether it's set */ iterator->current = object; } zval_add_ref(&object); return (zend_object_iterator*)iterator; } /* }}} */ /* {{{ spl_filesystem_dir_it_dtor */ static void spl_filesystem_dir_it_dtor(zend_object_iterator *iter TSRMLS_DC) { spl_filesystem_iterator *iterator = (spl_filesystem_iterator *)iter; if (iterator->intern.data) { zval *object = iterator->intern.data; zval_ptr_dtor(&object); } /* Otherwise we were called from the owning object free storage handler as * it sets * iterator->intern.data to NULL. * We don't even need to destroy iterator->current as we didn't add a * reference to it in move_forward or get_iterator */ } /* }}} */ /* {{{ spl_filesystem_dir_it_valid */ static int spl_filesystem_dir_it_valid(zend_object_iterator *iter TSRMLS_DC) { spl_filesystem_object *object = spl_filesystem_iterator_to_object((spl_filesystem_iterator *)iter); return object->u.dir.entry.d_name[0] != '\0' ? SUCCESS : FAILURE; } /* }}} */ /* {{{ spl_filesystem_dir_it_current_data */ static void spl_filesystem_dir_it_current_data(zend_object_iterator *iter, zval ***data TSRMLS_DC) { spl_filesystem_iterator *iterator = (spl_filesystem_iterator *)iter; *data = &iterator->current; } /* }}} */ /* {{{ spl_filesystem_dir_it_current_key */ static void spl_filesystem_dir_it_current_key(zend_object_iterator *iter, zval *key TSRMLS_DC) { spl_filesystem_object *object = spl_filesystem_iterator_to_object((spl_filesystem_iterator *)iter); ZVAL_LONG(key, object->u.dir.index); } /* }}} */ /* {{{ spl_filesystem_dir_it_move_forward */ static void spl_filesystem_dir_it_move_forward(zend_object_iterator *iter TSRMLS_DC) { spl_filesystem_object *object = spl_filesystem_iterator_to_object((spl_filesystem_iterator *)iter); object->u.dir.index++; spl_filesystem_dir_read(object TSRMLS_CC); if (object->file_name) { efree(object->file_name); object->file_name = NULL; } } /* }}} */ /* {{{ spl_filesystem_dir_it_rewind */ static void spl_filesystem_dir_it_rewind(zend_object_iterator *iter TSRMLS_DC) { spl_filesystem_object *object = spl_filesystem_iterator_to_object((spl_filesystem_iterator *)iter); object->u.dir.index = 0; if (object->u.dir.dirp) { php_stream_rewinddir(object->u.dir.dirp); } spl_filesystem_dir_read(object TSRMLS_CC); } /* }}} */ /* {{{ spl_filesystem_tree_it_dtor */ static void spl_filesystem_tree_it_dtor(zend_object_iterator *iter TSRMLS_DC) { spl_filesystem_iterator *iterator = (spl_filesystem_iterator *)iter; if (iterator->intern.data) { zval *object = iterator->intern.data; zval_ptr_dtor(&object); } else { if (iterator->current) { zval_ptr_dtor(&iterator->current); } } } /* }}} */ /* {{{ spl_filesystem_tree_it_current_data */ static void spl_filesystem_tree_it_current_data(zend_object_iterator *iter, zval ***data TSRMLS_DC) { spl_filesystem_iterator *iterator = (spl_filesystem_iterator *)iter; spl_filesystem_object *object = spl_filesystem_iterator_to_object(iterator); if (SPL_FILE_DIR_CURRENT(object, SPL_FILE_DIR_CURRENT_AS_PATHNAME)) { if (!iterator->current) { ALLOC_INIT_ZVAL(iterator->current); spl_filesystem_object_get_file_name(object TSRMLS_CC); ZVAL_STRINGL(iterator->current, object->file_name, object->file_name_len, 1); } *data = &iterator->current; } else if (SPL_FILE_DIR_CURRENT(object, SPL_FILE_DIR_CURRENT_AS_FILEINFO)) { if (!iterator->current) { ALLOC_INIT_ZVAL(iterator->current); spl_filesystem_object_get_file_name(object TSRMLS_CC); spl_filesystem_object_create_type(0, object, SPL_FS_INFO, NULL, iterator->current TSRMLS_CC); } *data = &iterator->current; } else { *data = (zval**)&iterator->intern.data; } } /* }}} */ /* {{{ spl_filesystem_tree_it_current_key */ static void spl_filesystem_tree_it_current_key(zend_object_iterator *iter, zval *key TSRMLS_DC) { spl_filesystem_object *object = spl_filesystem_iterator_to_object((spl_filesystem_iterator *)iter); if (SPL_FILE_DIR_KEY(object, SPL_FILE_DIR_KEY_AS_FILENAME)) { ZVAL_STRING(key, object->u.dir.entry.d_name, 1); } else { spl_filesystem_object_get_file_name(object TSRMLS_CC); ZVAL_STRINGL(key, object->file_name, object->file_name_len, 1); } } /* }}} */ /* {{{ spl_filesystem_tree_it_move_forward */ static void spl_filesystem_tree_it_move_forward(zend_object_iterator *iter TSRMLS_DC) { spl_filesystem_iterator *iterator = (spl_filesystem_iterator *)iter; spl_filesystem_object *object = spl_filesystem_iterator_to_object(iterator); object->u.dir.index++; do { spl_filesystem_dir_read(object TSRMLS_CC); } while (spl_filesystem_is_dot(object->u.dir.entry.d_name)); if (object->file_name) { efree(object->file_name); object->file_name = NULL; } if (iterator->current) { zval_ptr_dtor(&iterator->current); iterator->current = NULL; } } /* }}} */ /* {{{ spl_filesystem_tree_it_rewind */ static void spl_filesystem_tree_it_rewind(zend_object_iterator *iter TSRMLS_DC) { spl_filesystem_iterator *iterator = (spl_filesystem_iterator *)iter; spl_filesystem_object *object = spl_filesystem_iterator_to_object(iterator); object->u.dir.index = 0; if (object->u.dir.dirp) { php_stream_rewinddir(object->u.dir.dirp); } do { spl_filesystem_dir_read(object TSRMLS_CC); } while (spl_filesystem_is_dot(object->u.dir.entry.d_name)); if (iterator->current) { zval_ptr_dtor(&iterator->current); iterator->current = NULL; } } /* }}} */ /* {{{ iterator handler table */ zend_object_iterator_funcs spl_filesystem_tree_it_funcs = { spl_filesystem_tree_it_dtor, spl_filesystem_dir_it_valid, spl_filesystem_tree_it_current_data, spl_filesystem_tree_it_current_key, spl_filesystem_tree_it_move_forward, spl_filesystem_tree_it_rewind }; /* }}} */ /* {{{ spl_ce_dir_get_iterator */ zend_object_iterator *spl_filesystem_tree_get_iterator(zend_class_entry *ce, zval *object, int by_ref TSRMLS_DC) { spl_filesystem_iterator *iterator; spl_filesystem_object *dir_object; if (by_ref) { zend_error(E_ERROR, "An iterator cannot be used with foreach by reference"); } dir_object = (spl_filesystem_object*)zend_object_store_get_object(object TSRMLS_CC); iterator = spl_filesystem_object_to_iterator(dir_object); /* initialize iterator if wasn't gotten before */ if (iterator->intern.data == NULL) { iterator->intern.data = object; iterator->intern.funcs = &spl_filesystem_tree_it_funcs; } zval_add_ref(&object); return (zend_object_iterator*)iterator; } /* }}} */ /* {{{ spl_filesystem_object_cast */ static int spl_filesystem_object_cast(zval *readobj, zval *writeobj, int type TSRMLS_DC) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(readobj TSRMLS_CC); if (type == IS_STRING) { if (Z_OBJCE_P(readobj)->__tostring) { return std_object_handlers.cast_object(readobj, writeobj, type TSRMLS_CC); } switch (intern->type) { case SPL_FS_INFO: case SPL_FS_FILE: if (readobj == writeobj) { zval retval; zval *retval_ptr = &retval; ZVAL_STRINGL(retval_ptr, intern->file_name, intern->file_name_len, 1); zval_dtor(readobj); ZVAL_ZVAL(writeobj, retval_ptr, 0, 0); } else { ZVAL_STRINGL(writeobj, intern->file_name, intern->file_name_len, 1); } return SUCCESS; case SPL_FS_DIR: if (readobj == writeobj) { zval retval; zval *retval_ptr = &retval; ZVAL_STRING(retval_ptr, intern->u.dir.entry.d_name, 1); zval_dtor(readobj); ZVAL_ZVAL(writeobj, retval_ptr, 0, 0); } else { ZVAL_STRING(writeobj, intern->u.dir.entry.d_name, 1); } return SUCCESS; } } else if (type == IS_BOOL) { ZVAL_BOOL(writeobj, 1); return SUCCESS; } if (readobj == writeobj) { zval_dtor(readobj); } ZVAL_NULL(writeobj); return FAILURE; } /* }}} */ /* {{{ declare method parameters */ /* supply a name and default to call by parameter */ ZEND_BEGIN_ARG_INFO(arginfo_info___construct, 0) ZEND_ARG_INFO(0, file_name) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_info_openFile, 0, 0, 0) ZEND_ARG_INFO(0, open_mode) ZEND_ARG_INFO(0, use_include_path) ZEND_ARG_INFO(0, context) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_info_optinalFileClass, 0, 0, 0) ZEND_ARG_INFO(0, class_name) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_optinalSuffix, 0, 0, 0) ZEND_ARG_INFO(0, suffix) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_splfileinfo_void, 0) ZEND_END_ARG_INFO() /* the method table */ /* each method can have its own parameters and visibility */ static const zend_function_entry spl_SplFileInfo_functions[] = { SPL_ME(SplFileInfo, __construct, arginfo_info___construct, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getPath, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getFilename, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getExtension, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getBasename, arginfo_optinalSuffix, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getPathname, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getPerms, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getInode, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getSize, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getOwner, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getGroup, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getATime, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getMTime, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getCTime, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getType, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, isWritable, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, isReadable, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, isExecutable, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, isFile, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, isDir, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, isLink, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getLinkTarget, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) #if (!defined(__BEOS__) && !defined(NETWARE) && HAVE_REALPATH) || defined(ZTS) SPL_ME(SplFileInfo, getRealPath, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) #endif SPL_ME(SplFileInfo, getFileInfo, arginfo_info_optinalFileClass, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, getPathInfo, arginfo_info_optinalFileClass, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, openFile, arginfo_info_openFile, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, setFileClass, arginfo_info_optinalFileClass, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, setInfoClass, arginfo_info_optinalFileClass, ZEND_ACC_PUBLIC) SPL_ME(SplFileInfo, _bad_state_ex, NULL, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL) SPL_MA(SplFileInfo, __toString, SplFileInfo, getPathname, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) PHP_FE_END }; ZEND_BEGIN_ARG_INFO(arginfo_dir___construct, 0) ZEND_ARG_INFO(0, path) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_dir_it_seek, 0) ZEND_ARG_INFO(0, position) ZEND_END_ARG_INFO(); /* the method table */ /* each method can have its own parameters and visibility */ static const zend_function_entry spl_DirectoryIterator_functions[] = { SPL_ME(DirectoryIterator, __construct, arginfo_dir___construct, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, getFilename, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, getExtension, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, getBasename, arginfo_optinalSuffix, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, isDot, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, rewind, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, valid, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, key, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, current, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, next, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, seek, arginfo_dir_it_seek, ZEND_ACC_PUBLIC) SPL_MA(DirectoryIterator, __toString, DirectoryIterator, getFilename, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) PHP_FE_END }; ZEND_BEGIN_ARG_INFO_EX(arginfo_r_dir___construct, 0, 0, 1) ZEND_ARG_INFO(0, path) ZEND_ARG_INFO(0, flags) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_r_dir_hasChildren, 0, 0, 0) ZEND_ARG_INFO(0, allow_links) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_r_dir_setFlags, 0, 0, 0) ZEND_ARG_INFO(0, flags) ZEND_END_ARG_INFO() static const zend_function_entry spl_FilesystemIterator_functions[] = { SPL_ME(FilesystemIterator, __construct, arginfo_r_dir___construct, ZEND_ACC_PUBLIC) SPL_ME(FilesystemIterator, rewind, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(DirectoryIterator, next, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(FilesystemIterator, key, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(FilesystemIterator, current, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(FilesystemIterator, getFlags, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(FilesystemIterator, setFlags, arginfo_r_dir_setFlags, ZEND_ACC_PUBLIC) PHP_FE_END }; static const zend_function_entry spl_RecursiveDirectoryIterator_functions[] = { SPL_ME(RecursiveDirectoryIterator, __construct, arginfo_r_dir___construct, ZEND_ACC_PUBLIC) SPL_ME(RecursiveDirectoryIterator, hasChildren, arginfo_r_dir_hasChildren, ZEND_ACC_PUBLIC) SPL_ME(RecursiveDirectoryIterator, getChildren, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(RecursiveDirectoryIterator, getSubPath, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(RecursiveDirectoryIterator, getSubPathname,arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) PHP_FE_END }; #ifdef HAVE_GLOB static const zend_function_entry spl_GlobIterator_functions[] = { SPL_ME(GlobIterator, __construct, arginfo_r_dir___construct, ZEND_ACC_PUBLIC) SPL_ME(GlobIterator, count, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) PHP_FE_END }; #endif /* }}} */ static int spl_filesystem_file_read(spl_filesystem_object *intern, int silent TSRMLS_DC) /* {{{ */ { char *buf; size_t line_len = 0; long line_add = (intern->u.file.current_line || intern->u.file.current_zval) ? 1 : 0; spl_filesystem_file_free_line(intern TSRMLS_CC); if (php_stream_eof(intern->u.file.stream)) { if (!silent) { zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Cannot read from file %s", intern->file_name); } return FAILURE; } if (intern->u.file.max_line_len > 0) { buf = safe_emalloc((intern->u.file.max_line_len + 1), sizeof(char), 0); if (php_stream_get_line(intern->u.file.stream, buf, intern->u.file.max_line_len + 1, &line_len) == NULL) { efree(buf); buf = NULL; } else { buf[line_len] = '\0'; } } else { buf = php_stream_get_line(intern->u.file.stream, NULL, 0, &line_len); } if (!buf) { intern->u.file.current_line = estrdup(""); intern->u.file.current_line_len = 0; } else { if (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_DROP_NEW_LINE)) { line_len = strcspn(buf, "\r\n"); buf[line_len] = '\0'; } intern->u.file.current_line = buf; intern->u.file.current_line_len = line_len; } intern->u.file.current_line_num += line_add; return SUCCESS; } /* }}} */ static int spl_filesystem_file_call(spl_filesystem_object *intern, zend_function *func_ptr, int pass_num_args, zval *return_value, zval *arg2 TSRMLS_DC) /* {{{ */ { zend_fcall_info fci; zend_fcall_info_cache fcic; zval z_fname; zval * zresource_ptr = &intern->u.file.zresource, *retval; int result; int num_args = pass_num_args + (arg2 ? 2 : 1); zval ***params = (zval***)safe_emalloc(num_args, sizeof(zval**), 0); params[0] = &zresource_ptr; if (arg2) { params[1] = &arg2; } zend_get_parameters_array_ex(pass_num_args, params+(arg2 ? 2 : 1)); ZVAL_STRING(&z_fname, func_ptr->common.function_name, 0); fci.size = sizeof(fci); fci.function_table = EG(function_table); fci.object_ptr = NULL; fci.function_name = &z_fname; fci.retval_ptr_ptr = &retval; fci.param_count = num_args; fci.params = params; fci.no_separation = 1; fci.symbol_table = NULL; fcic.initialized = 1; fcic.function_handler = func_ptr; fcic.calling_scope = NULL; fcic.called_scope = NULL; fcic.object_ptr = NULL; result = zend_call_function(&fci, &fcic TSRMLS_CC); if (result == FAILURE) { RETVAL_FALSE; } else { ZVAL_ZVAL(return_value, retval, 1, 1); } efree(params); return result; } /* }}} */ #define FileFunctionCall(func_name, pass_num_args, arg2) /* {{{ */ \ { \ zend_function *func_ptr; \ int ret; \ ret = zend_hash_find(EG(function_table), #func_name, sizeof(#func_name), (void **) &func_ptr); \ if (ret != SUCCESS) { \ zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Internal error, function '%s' not found. Please report", #func_name); \ return; \ } \ spl_filesystem_file_call(intern, func_ptr, pass_num_args, return_value, arg2 TSRMLS_CC); \ } /* }}} */ static int spl_filesystem_file_read_csv(spl_filesystem_object *intern, char delimiter, char enclosure, char escape, zval *return_value TSRMLS_DC) /* {{{ */ { int ret = SUCCESS; do { ret = spl_filesystem_file_read(intern, 1 TSRMLS_CC); } while (ret == SUCCESS && !intern->u.file.current_line_len && SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_SKIP_EMPTY)); if (ret == SUCCESS) { size_t buf_len = intern->u.file.current_line_len; char *buf = estrndup(intern->u.file.current_line, buf_len); if (intern->u.file.current_zval) { zval_ptr_dtor(&intern->u.file.current_zval); } ALLOC_INIT_ZVAL(intern->u.file.current_zval); php_fgetcsv(intern->u.file.stream, delimiter, enclosure, escape, buf_len, buf, intern->u.file.current_zval TSRMLS_CC); if (return_value) { if (Z_TYPE_P(return_value) != IS_NULL) { zval_dtor(return_value); ZVAL_NULL(return_value); } ZVAL_ZVAL(return_value, intern->u.file.current_zval, 1, 0); } } return ret; } /* }}} */ static int spl_filesystem_file_read_line_ex(zval * this_ptr, spl_filesystem_object *intern, int silent TSRMLS_DC) /* {{{ */ { zval *retval = NULL; /* 1) use fgetcsv? 2) overloaded call the function, 3) do it directly */ if (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_READ_CSV) || intern->u.file.func_getCurr->common.scope != spl_ce_SplFileObject) { if (php_stream_eof(intern->u.file.stream)) { if (!silent) { zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Cannot read from file %s", intern->file_name); } return FAILURE; } if (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_READ_CSV)) { return spl_filesystem_file_read_csv(intern, intern->u.file.delimiter, intern->u.file.enclosure, intern->u.file.escape, NULL TSRMLS_CC); } else { zend_call_method_with_0_params(&this_ptr, Z_OBJCE_P(getThis()), &intern->u.file.func_getCurr, "getCurrentLine", &retval); } if (retval) { if (intern->u.file.current_line || intern->u.file.current_zval) { intern->u.file.current_line_num++; } spl_filesystem_file_free_line(intern TSRMLS_CC); if (Z_TYPE_P(retval) == IS_STRING) { intern->u.file.current_line = estrndup(Z_STRVAL_P(retval), Z_STRLEN_P(retval)); intern->u.file.current_line_len = Z_STRLEN_P(retval); } else { MAKE_STD_ZVAL(intern->u.file.current_zval); ZVAL_ZVAL(intern->u.file.current_zval, retval, 1, 0); } zval_ptr_dtor(&retval); return SUCCESS; } else { return FAILURE; } } else { return spl_filesystem_file_read(intern, silent TSRMLS_CC); } } /* }}} */ static int spl_filesystem_file_is_empty_line(spl_filesystem_object *intern TSRMLS_DC) /* {{{ */ { if (intern->u.file.current_line) { return intern->u.file.current_line_len == 0; } else if (intern->u.file.current_zval) { switch(Z_TYPE_P(intern->u.file.current_zval)) { case IS_STRING: return Z_STRLEN_P(intern->u.file.current_zval) == 0; case IS_ARRAY: if (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_READ_CSV) && zend_hash_num_elements(Z_ARRVAL_P(intern->u.file.current_zval)) == 1) { zval ** first = Z_ARRVAL_P(intern->u.file.current_zval)->pListHead->pData; return Z_TYPE_PP(first) == IS_STRING && Z_STRLEN_PP(first) == 0; } return zend_hash_num_elements(Z_ARRVAL_P(intern->u.file.current_zval)) == 0; case IS_NULL: return 1; default: return 0; } } else { return 1; } } /* }}} */ static int spl_filesystem_file_read_line(zval * this_ptr, spl_filesystem_object *intern, int silent TSRMLS_DC) /* {{{ */ { int ret = spl_filesystem_file_read_line_ex(this_ptr, intern, silent TSRMLS_CC); while (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_SKIP_EMPTY) && ret == SUCCESS && spl_filesystem_file_is_empty_line(intern TSRMLS_CC)) { spl_filesystem_file_free_line(intern TSRMLS_CC); ret = spl_filesystem_file_read_line_ex(this_ptr, intern, silent TSRMLS_CC); } return ret; } /* }}} */ static void spl_filesystem_file_rewind(zval * this_ptr, spl_filesystem_object *intern TSRMLS_DC) /* {{{ */ { if (-1 == php_stream_rewind(intern->u.file.stream)) { zend_throw_exception_ex(spl_ce_RuntimeException, 0 TSRMLS_CC, "Cannot rewind file %s", intern->file_name); } else { spl_filesystem_file_free_line(intern TSRMLS_CC); intern->u.file.current_line_num = 0; } if (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_READ_AHEAD)) { spl_filesystem_file_read_line(this_ptr, intern, 1 TSRMLS_CC); } } /* }}} */ /* {{{ proto void SplFileObject::__construct(string filename [, string mode = 'r' [, bool use_include_path [, resource context]]]]) Construct a new file object */ SPL_METHOD(SplFileObject, __construct) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zend_bool use_include_path = 0; char *p1, *p2; char *tmp_path; int tmp_path_len; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC); intern->u.file.open_mode = NULL; intern->u.file.open_mode_len = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "p|sbr!", &intern->file_name, &intern->file_name_len, &intern->u.file.open_mode, &intern->u.file.open_mode_len, &use_include_path, &intern->u.file.zcontext) == FAILURE) { intern->u.file.open_mode = NULL; intern->file_name = NULL; zend_restore_error_handling(&error_handling TSRMLS_CC); return; } if (intern->u.file.open_mode == NULL) { intern->u.file.open_mode = "r"; intern->u.file.open_mode_len = 1; } if (spl_filesystem_file_open(intern, use_include_path, 0 TSRMLS_CC) == SUCCESS) { tmp_path_len = strlen(intern->u.file.stream->orig_path); if (tmp_path_len > 1 && IS_SLASH_AT(intern->u.file.stream->orig_path, tmp_path_len-1)) { tmp_path_len--; } tmp_path = estrndup(intern->u.file.stream->orig_path, tmp_path_len); p1 = strrchr(tmp_path, '/'); #if defined(PHP_WIN32) || defined(NETWARE) p2 = strrchr(tmp_path, '\\'); #else p2 = 0; #endif if (p1 || p2) { intern->_path_len = (p1 > p2 ? p1 : p2) - tmp_path; } else { intern->_path_len = 0; } efree(tmp_path); intern->_path = estrndup(intern->u.file.stream->orig_path, intern->_path_len); } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ proto void SplTempFileObject::__construct([int max_memory]) Construct a new temp file object */ SPL_METHOD(SplTempFileObject, __construct) { long max_memory = PHP_STREAM_MAX_MEM; char tmp_fname[48]; spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|l", &max_memory) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } if (max_memory < 0) { intern->file_name = "php://memory"; intern->file_name_len = 12; } else if (ZEND_NUM_ARGS()) { intern->file_name_len = slprintf(tmp_fname, sizeof(tmp_fname), "php://temp/maxmemory:%ld", max_memory); intern->file_name = tmp_fname; } else { intern->file_name = "php://temp"; intern->file_name_len = 10; } intern->u.file.open_mode = "wb"; intern->u.file.open_mode_len = 1; intern->u.file.zcontext = NULL; if (spl_filesystem_file_open(intern, 0, 0 TSRMLS_CC) == SUCCESS) { intern->_path_len = 0; intern->_path = estrndup("", 0); } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ proto void SplFileObject::rewind() Rewind the file and read the first line */ SPL_METHOD(SplFileObject, rewind) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } spl_filesystem_file_rewind(getThis(), intern TSRMLS_CC); } /* }}} */ /* {{{ proto void SplFileObject::eof() Return whether end of file is reached */ SPL_METHOD(SplFileObject, eof) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_BOOL(php_stream_eof(intern->u.file.stream)); } /* }}} */ /* {{{ proto void SplFileObject::valid() Return !eof() */ SPL_METHOD(SplFileObject, valid) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_READ_AHEAD)) { RETURN_BOOL(intern->u.file.current_line || intern->u.file.current_zval); } else { RETVAL_BOOL(!php_stream_eof(intern->u.file.stream)); } } /* }}} */ /* {{{ proto string SplFileObject::fgets() Rturn next line from file */ SPL_METHOD(SplFileObject, fgets) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_filesystem_file_read(intern, 0 TSRMLS_CC) == FAILURE) { RETURN_FALSE; } RETURN_STRINGL(intern->u.file.current_line, intern->u.file.current_line_len, 1); } /* }}} */ /* {{{ proto string SplFileObject::current() Return current line from file */ SPL_METHOD(SplFileObject, current) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (!intern->u.file.current_line && !intern->u.file.current_zval) { spl_filesystem_file_read_line(getThis(), intern, 1 TSRMLS_CC); } if (intern->u.file.current_line && (!SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_READ_CSV) || !intern->u.file.current_zval)) { RETURN_STRINGL(intern->u.file.current_line, intern->u.file.current_line_len, 1); } else if (intern->u.file.current_zval) { RETURN_ZVAL(intern->u.file.current_zval, 1, 0); } RETURN_FALSE; } /* }}} */ /* {{{ proto int SplFileObject::key() Return line number */ SPL_METHOD(SplFileObject, key) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } /* Do not read the next line to support correct counting with fgetc() if (!intern->current_line) { spl_filesystem_file_read_line(getThis(), intern, 1 TSRMLS_CC); } */ RETURN_LONG(intern->u.file.current_line_num); } /* }}} */ /* {{{ proto void SplFileObject::next() Read next line */ SPL_METHOD(SplFileObject, next) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } spl_filesystem_file_free_line(intern TSRMLS_CC); if (SPL_HAS_FLAG(intern->flags, SPL_FILE_OBJECT_READ_AHEAD)) { spl_filesystem_file_read_line(getThis(), intern, 1 TSRMLS_CC); } intern->u.file.current_line_num++; } /* }}} */ /* {{{ proto void SplFileObject::setFlags(int flags) Set file handling flags */ SPL_METHOD(SplFileObject, setFlags) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &intern->flags) == FAILURE) { return; } } /* }}} */ /* {{{ proto int SplFileObject::getFlags() Get file handling flags */ SPL_METHOD(SplFileObject, getFlags) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_LONG(intern->flags & SPL_FILE_OBJECT_MASK); } /* }}} */ /* {{{ proto void SplFileObject::setMaxLineLen(int max_len) Set maximum line length */ SPL_METHOD(SplFileObject, setMaxLineLen) { long max_len; spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &max_len) == FAILURE) { return; } if (max_len < 0) { zend_throw_exception_ex(spl_ce_DomainException, 0 TSRMLS_CC, "Maximum line length must be greater than or equal zero"); return; } intern->u.file.max_line_len = max_len; } /* }}} */ /* {{{ proto int SplFileObject::getMaxLineLen() Get maximum line length */ SPL_METHOD(SplFileObject, getMaxLineLen) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_LONG((long)intern->u.file.max_line_len); } /* }}} */ /* {{{ proto bool SplFileObject::hasChildren() Return false */ SPL_METHOD(SplFileObject, hasChildren) { if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_FALSE; } /* }}} */ /* {{{ proto bool SplFileObject::getChildren() Read NULL */ SPL_METHOD(SplFileObject, getChildren) { if (zend_parse_parameters_none() == FAILURE) { return; } /* return NULL */ } /* }}} */ /* {{{ FileFunction */ #define FileFunction(func_name) \ SPL_METHOD(SplFileObject, func_name) \ { \ spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); \ FileFunctionCall(func_name, ZEND_NUM_ARGS(), NULL); \ } /* }}} */ /* {{{ proto array SplFileObject::fgetcsv([string delimiter [, string enclosure [, escape = '\\']]]) Return current line as csv */ SPL_METHOD(SplFileObject, fgetcsv) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char delimiter = intern->u.file.delimiter, enclosure = intern->u.file.enclosure, escape = intern->u.file.escape; char *delim = NULL, *enclo = NULL, *esc = NULL; int d_len = 0, e_len = 0, esc_len = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|sss", &delim, &d_len, &enclo, &e_len, &esc, &esc_len) == SUCCESS) { switch(ZEND_NUM_ARGS()) { case 3: if (esc_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "escape must be a character"); RETURN_FALSE; } escape = esc[0]; /* no break */ case 2: if (e_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "enclosure must be a character"); RETURN_FALSE; } enclosure = enclo[0]; /* no break */ case 1: if (d_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "delimiter must be a character"); RETURN_FALSE; } delimiter = delim[0]; /* no break */ case 0: break; } spl_filesystem_file_read_csv(intern, delimiter, enclosure, escape, return_value TSRMLS_CC); } } /* }}} */ /* {{{ proto int SplFileObject::fputcsv(array fields, [string delimiter [, string enclosure [, string escape]]]) Output a field array as a CSV line */ SPL_METHOD(SplFileObject, fputcsv) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char delimiter = intern->u.file.delimiter, enclosure = intern->u.file.enclosure, escape = intern->u.file.escape; char *delim = NULL, *enclo = NULL, *esc = NULL; int d_len = 0, e_len = 0, esc_len = 0, ret; zval *fields = NULL; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "a|sss", &fields, &delim, &d_len, &enclo, &e_len, &esc, &esc_len) == SUCCESS) { switch(ZEND_NUM_ARGS()) { case 4: if (esc_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "escape must be a character"); RETURN_FALSE; } escape = esc[0]; /* no break */ case 3: if (e_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "enclosure must be a character"); RETURN_FALSE; } enclosure = enclo[0]; /* no break */ case 2: if (d_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "delimiter must be a character"); RETURN_FALSE; } delimiter = delim[0]; /* no break */ case 1: case 0: break; } ret = php_fputcsv(intern->u.file.stream, fields, delimiter, enclosure, escape TSRMLS_CC); RETURN_LONG(ret); } } /* }}} */ /* {{{ proto void SplFileObject::setCsvControl([string delimiter = ',' [, string enclosure = '"' [, string escape = '\\']]]) Set the delimiter and enclosure character used in fgetcsv */ SPL_METHOD(SplFileObject, setCsvControl) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char delimiter = ',', enclosure = '"', escape='\\'; char *delim = NULL, *enclo = NULL, *esc = NULL; int d_len = 0, e_len = 0, esc_len = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|sss", &delim, &d_len, &enclo, &e_len, &esc, &esc_len) == SUCCESS) { switch(ZEND_NUM_ARGS()) { case 3: if (esc_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "escape must be a character"); RETURN_FALSE; } escape = esc[0]; /* no break */ case 2: if (e_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "enclosure must be a character"); RETURN_FALSE; } enclosure = enclo[0]; /* no break */ case 1: if (d_len != 1) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "delimiter must be a character"); RETURN_FALSE; } delimiter = delim[0]; /* no break */ case 0: break; } intern->u.file.delimiter = delimiter; intern->u.file.enclosure = enclosure; intern->u.file.escape = escape; } } /* }}} */ /* {{{ proto array SplFileObject::getCsvControl() Get the delimiter and enclosure character used in fgetcsv */ SPL_METHOD(SplFileObject, getCsvControl) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char delimiter[2], enclosure[2]; array_init(return_value); delimiter[0] = intern->u.file.delimiter; delimiter[1] = '\0'; enclosure[0] = intern->u.file.enclosure; enclosure[1] = '\0'; add_next_index_string(return_value, delimiter, 1); add_next_index_string(return_value, enclosure, 1); } /* }}} */ /* {{{ proto bool SplFileObject::flock(int operation [, int &wouldblock]) Portable file locking */ FileFunction(flock) /* }}} */ /* {{{ proto bool SplFileObject::fflush() Flush the file */ SPL_METHOD(SplFileObject, fflush) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); RETURN_BOOL(!php_stream_flush(intern->u.file.stream)); } /* }}} */ /* {{{ proto int SplFileObject::ftell() Return current file position */ SPL_METHOD(SplFileObject, ftell) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); long ret = php_stream_tell(intern->u.file.stream); if (ret == -1) { RETURN_FALSE; } else { RETURN_LONG(ret); } } /* }}} */ /* {{{ proto int SplFileObject::fseek(int pos [, int whence = SEEK_SET]) Return current file position */ SPL_METHOD(SplFileObject, fseek) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); long pos, whence = SEEK_SET; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l|l", &pos, &whence) == FAILURE) { return; } spl_filesystem_file_free_line(intern TSRMLS_CC); RETURN_LONG(php_stream_seek(intern->u.file.stream, pos, whence)); } /* }}} */ /* {{{ proto int SplFileObject::fgetc() Get a character form the file */ SPL_METHOD(SplFileObject, fgetc) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char buf[2]; int result; spl_filesystem_file_free_line(intern TSRMLS_CC); result = php_stream_getc(intern->u.file.stream); if (result == EOF) { RETVAL_FALSE; } else { if (result == '\n') { intern->u.file.current_line_num++; } buf[0] = result; buf[1] = '\0'; RETURN_STRINGL(buf, 1, 1); } } /* }}} */ /* {{{ proto string SplFileObject::fgetss([string allowable_tags]) Get a line from file pointer and strip HTML tags */ SPL_METHOD(SplFileObject, fgetss) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zval *arg2 = NULL; MAKE_STD_ZVAL(arg2); if (intern->u.file.max_line_len > 0) { ZVAL_LONG(arg2, intern->u.file.max_line_len); } else { ZVAL_LONG(arg2, 1024); } spl_filesystem_file_free_line(intern TSRMLS_CC); intern->u.file.current_line_num++; FileFunctionCall(fgetss, ZEND_NUM_ARGS(), arg2); zval_ptr_dtor(&arg2); } /* }}} */ /* {{{ proto int SplFileObject::fpassthru() Output all remaining data from a file pointer */ SPL_METHOD(SplFileObject, fpassthru) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); RETURN_LONG(php_stream_passthru(intern->u.file.stream)); } /* }}} */ /* {{{ proto bool SplFileObject::fscanf(string format [, string ...]) Implements a mostly ANSI compatible fscanf() */ SPL_METHOD(SplFileObject, fscanf) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); spl_filesystem_file_free_line(intern TSRMLS_CC); intern->u.file.current_line_num++; FileFunctionCall(fscanf, ZEND_NUM_ARGS(), NULL); } /* }}} */ /* {{{ proto mixed SplFileObject::fwrite(string str [, int length]) Binary-safe file write */ SPL_METHOD(SplFileObject, fwrite) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *str; int str_len; long length = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|l", &str, &str_len, &length) == FAILURE) { return; } if (ZEND_NUM_ARGS() > 1) { str_len = MAX(0, MIN(length, str_len)); } if (!str_len) { RETURN_LONG(0); } RETURN_LONG(php_stream_write(intern->u.file.stream, str, str_len)); } /* }}} */ SPL_METHOD(SplFileObject, fread) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); long length = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &length) == FAILURE) { return; } if (length <= 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Length parameter must be greater than 0"); RETURN_FALSE; } if (length > INT_MAX) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Length parameter must be no more than %d", INT_MAX); RETURN_FALSE; } Z_STRVAL_P(return_value) = emalloc(length + 1); Z_STRLEN_P(return_value) = php_stream_read(intern->u.file.stream, Z_STRVAL_P(return_value), length); /* needed because recv/read/gzread doesnt put a null at the end*/ Z_STRVAL_P(return_value)[Z_STRLEN_P(return_value)] = 0; Z_TYPE_P(return_value) = IS_STRING; } /* {{{ proto bool SplFileObject::fstat() Stat() on a filehandle */ FileFunction(fstat) /* }}} */ /* {{{ proto bool SplFileObject::ftruncate(int size) Truncate file to 'size' length */ SPL_METHOD(SplFileObject, ftruncate) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); long size; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &size) == FAILURE) { return; } if (!php_stream_truncate_supported(intern->u.file.stream)) { zend_throw_exception_ex(spl_ce_LogicException, 0 TSRMLS_CC, "Can't truncate file %s", intern->file_name); RETURN_FALSE; } RETURN_BOOL(0 == php_stream_truncate_set_size(intern->u.file.stream, size)); } /* }}} */ /* {{{ proto void SplFileObject::seek(int line_pos) Seek to specified line */ SPL_METHOD(SplFileObject, seek) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); long line_pos; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &line_pos) == FAILURE) { return; } if (line_pos < 0) { zend_throw_exception_ex(spl_ce_LogicException, 0 TSRMLS_CC, "Can't seek file %s to negative line %ld", intern->file_name, line_pos); RETURN_FALSE; } spl_filesystem_file_rewind(getThis(), intern TSRMLS_CC); while(intern->u.file.current_line_num < line_pos) { if (spl_filesystem_file_read_line(getThis(), intern, 1 TSRMLS_CC) == FAILURE) { break; } } } /* }}} */ /* {{{ Function/Class/Method definitions */ ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object___construct, 0, 0, 1) ZEND_ARG_INFO(0, file_name) ZEND_ARG_INFO(0, open_mode) ZEND_ARG_INFO(0, use_include_path) ZEND_ARG_INFO(0, context) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_file_object_setFlags, 0) ZEND_ARG_INFO(0, flags) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_file_object_setMaxLineLen, 0) ZEND_ARG_INFO(0, max_len) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fgetcsv, 0, 0, 0) ZEND_ARG_INFO(0, delimiter) ZEND_ARG_INFO(0, enclosure) ZEND_ARG_INFO(0, escape) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fputcsv, 0, 0, 1) ZEND_ARG_INFO(0, fields) ZEND_ARG_INFO(0, delimiter) ZEND_ARG_INFO(0, enclosure) ZEND_ARG_INFO(0, escape) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_flock, 0, 0, 1) ZEND_ARG_INFO(0, operation) ZEND_ARG_INFO(1, wouldblock) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fseek, 0, 0, 1) ZEND_ARG_INFO(0, pos) ZEND_ARG_INFO(0, whence) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fgetss, 0, 0, 0) ZEND_ARG_INFO(0, allowable_tags) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fscanf, 1, 0, 1) ZEND_ARG_INFO(0, format) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fwrite, 0, 0, 1) ZEND_ARG_INFO(0, str) ZEND_ARG_INFO(0, length) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fread, 0, 0, 1) ZEND_ARG_INFO(0, length) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_ftruncate, 0, 0, 1) ZEND_ARG_INFO(0, size) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_seek, 0, 0, 1) ZEND_ARG_INFO(0, line_pos) ZEND_END_ARG_INFO() static const zend_function_entry spl_SplFileObject_functions[] = { SPL_ME(SplFileObject, __construct, arginfo_file_object___construct, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, rewind, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, eof, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, valid, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fgets, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fgetcsv, arginfo_file_object_fgetcsv, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fputcsv, arginfo_file_object_fputcsv, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, setCsvControl, arginfo_file_object_fgetcsv, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, getCsvControl, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, flock, arginfo_file_object_flock, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fflush, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, ftell, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fseek, arginfo_file_object_fseek, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fgetc, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fpassthru, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fgetss, arginfo_file_object_fgetss, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fscanf, arginfo_file_object_fscanf, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fwrite, arginfo_file_object_fwrite, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fread, arginfo_file_object_fread, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, fstat, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, ftruncate, arginfo_file_object_ftruncate, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, current, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, key, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, next, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, setFlags, arginfo_file_object_setFlags, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, getFlags, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, setMaxLineLen, arginfo_file_object_setMaxLineLen, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, getMaxLineLen, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, hasChildren, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, getChildren, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_ME(SplFileObject, seek, arginfo_file_object_seek, ZEND_ACC_PUBLIC) /* mappings */ SPL_MA(SplFileObject, getCurrentLine, SplFileObject, fgets, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) SPL_MA(SplFileObject, __toString, SplFileObject, current, arginfo_splfileinfo_void, ZEND_ACC_PUBLIC) PHP_FE_END }; ZEND_BEGIN_ARG_INFO_EX(arginfo_temp_file_object___construct, 0, 0, 0) ZEND_ARG_INFO(0, max_memory) ZEND_END_ARG_INFO() static const zend_function_entry spl_SplTempFileObject_functions[] = { SPL_ME(SplTempFileObject, __construct, arginfo_temp_file_object___construct, ZEND_ACC_PUBLIC) PHP_FE_END }; /* }}} */ /* {{{ PHP_MINIT_FUNCTION(spl_directory) */ PHP_MINIT_FUNCTION(spl_directory) { REGISTER_SPL_STD_CLASS_EX(SplFileInfo, spl_filesystem_object_new, spl_SplFileInfo_functions); memcpy(&spl_filesystem_object_handlers, zend_get_std_object_handlers(), sizeof(zend_object_handlers)); spl_filesystem_object_handlers.clone_obj = spl_filesystem_object_clone; spl_filesystem_object_handlers.cast_object = spl_filesystem_object_cast; spl_filesystem_object_handlers.get_debug_info = spl_filesystem_object_get_debug_info; spl_ce_SplFileInfo->serialize = zend_class_serialize_deny; spl_ce_SplFileInfo->unserialize = zend_class_unserialize_deny; REGISTER_SPL_SUB_CLASS_EX(DirectoryIterator, SplFileInfo, spl_filesystem_object_new, spl_DirectoryIterator_functions); zend_class_implements(spl_ce_DirectoryIterator TSRMLS_CC, 1, zend_ce_iterator); REGISTER_SPL_IMPLEMENTS(DirectoryIterator, SeekableIterator); spl_ce_DirectoryIterator->get_iterator = spl_filesystem_dir_get_iterator; REGISTER_SPL_SUB_CLASS_EX(FilesystemIterator, DirectoryIterator, spl_filesystem_object_new, spl_FilesystemIterator_functions); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "CURRENT_MODE_MASK", SPL_FILE_DIR_CURRENT_MODE_MASK); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "CURRENT_AS_PATHNAME", SPL_FILE_DIR_CURRENT_AS_PATHNAME); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "CURRENT_AS_FILEINFO", SPL_FILE_DIR_CURRENT_AS_FILEINFO); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "CURRENT_AS_SELF", SPL_FILE_DIR_CURRENT_AS_SELF); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "KEY_MODE_MASK", SPL_FILE_DIR_KEY_MODE_MASK); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "KEY_AS_PATHNAME", SPL_FILE_DIR_KEY_AS_PATHNAME); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "FOLLOW_SYMLINKS", SPL_FILE_DIR_FOLLOW_SYMLINKS); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "KEY_AS_FILENAME", SPL_FILE_DIR_KEY_AS_FILENAME); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "NEW_CURRENT_AND_KEY", SPL_FILE_DIR_KEY_AS_FILENAME|SPL_FILE_DIR_CURRENT_AS_FILEINFO); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "OTHER_MODE_MASK", SPL_FILE_DIR_OTHERS_MASK); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "SKIP_DOTS", SPL_FILE_DIR_SKIPDOTS); REGISTER_SPL_CLASS_CONST_LONG(FilesystemIterator, "UNIX_PATHS", SPL_FILE_DIR_UNIXPATHS); spl_ce_FilesystemIterator->get_iterator = spl_filesystem_tree_get_iterator; REGISTER_SPL_SUB_CLASS_EX(RecursiveDirectoryIterator, FilesystemIterator, spl_filesystem_object_new, spl_RecursiveDirectoryIterator_functions); REGISTER_SPL_IMPLEMENTS(RecursiveDirectoryIterator, RecursiveIterator); memcpy(&spl_filesystem_object_check_handlers, &spl_filesystem_object_handlers, sizeof(zend_object_handlers)); spl_filesystem_object_check_handlers.get_method = spl_filesystem_object_get_method_check; #ifdef HAVE_GLOB REGISTER_SPL_SUB_CLASS_EX(GlobIterator, FilesystemIterator, spl_filesystem_object_new_check, spl_GlobIterator_functions); REGISTER_SPL_IMPLEMENTS(GlobIterator, Countable); #endif REGISTER_SPL_SUB_CLASS_EX(SplFileObject, SplFileInfo, spl_filesystem_object_new_check, spl_SplFileObject_functions); REGISTER_SPL_IMPLEMENTS(SplFileObject, RecursiveIterator); REGISTER_SPL_IMPLEMENTS(SplFileObject, SeekableIterator); REGISTER_SPL_CLASS_CONST_LONG(SplFileObject, "DROP_NEW_LINE", SPL_FILE_OBJECT_DROP_NEW_LINE); REGISTER_SPL_CLASS_CONST_LONG(SplFileObject, "READ_AHEAD", SPL_FILE_OBJECT_READ_AHEAD); REGISTER_SPL_CLASS_CONST_LONG(SplFileObject, "SKIP_EMPTY", SPL_FILE_OBJECT_SKIP_EMPTY); REGISTER_SPL_CLASS_CONST_LONG(SplFileObject, "READ_CSV", SPL_FILE_OBJECT_READ_CSV); REGISTER_SPL_SUB_CLASS_EX(SplTempFileObject, SplFileObject, spl_filesystem_object_new_check, spl_SplTempFileObject_functions); return SUCCESS; } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */
SPL_METHOD(SplFileInfo, setInfoClass) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zend_class_entry *ce = spl_ce_SplFileInfo; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_UnexpectedValueException, &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|C", &ce) == SUCCESS) { intern->info_class = ce; } zend_restore_error_handling(&error_handling TSRMLS_CC); }
SPL_METHOD(SplFileInfo, setInfoClass) { spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC); zend_class_entry *ce = spl_ce_SplFileInfo; zend_error_handling error_handling; zend_replace_error_handling(EH_THROW, spl_ce_UnexpectedValueException, &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|C", &ce) == SUCCESS) { intern->info_class = ce; } zend_restore_error_handling(&error_handling TSRMLS_CC); }
{'added': [(82, ''), (84, ''), (101, '\t\t}'), (137, '/* creates the object by'), (138, ' - allocating memory'), (143, ' called from'), (316, ''), (328, " Load the 'other' object"), (373, ''), (392, ''), (416, ''), (462, ''), (517, ''), (533, ''), (536, ''), (537, '\t\t\tif (ht && zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|sbr",'), (538, '\t\t\t\t\t&intern->u.file.open_mode, &intern->u.file.open_mode_len,'), (547, ''), (556, '\tcase SPL_FS_DIR:'), (620, ''), (668, ''), (674, ''), (754, ''), (772, ''), (802, ''), (862, ''), (878, ''), (894, ''), (900, ''), (914, ''), (1022, '/* }}}*/'), (1032, ''), (1068, ''), (1087, ''), (1110, ''), (1124, ''), (1125, ' When the constructor gets called the object is already created'), (1143, ''), (1147, ''), (1252, ''), (1300, ''), (1310, ''), (1313, '\t} else {'), (1351, ''), (1369, ''), (1387, ''), (1405, ''), (1466, ''), (1522, ''), (1526, ''), (1557, ''), (1578, ''), (1614, ''), (1669, ''), (1704, ''), (1722, ''), (1736, ''), (1806, ''), (1827, ''), (1871, ''), (1927, 'ZEND_BEGIN_ARG_INFO(arginfo_info___construct, 0)'), (1986, 'ZEND_BEGIN_ARG_INFO(arginfo_dir___construct, 0)'), (1990, 'ZEND_BEGIN_ARG_INFO(arginfo_dir_it_seek, 0)'), (2012, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_r_dir___construct, 0, 0, 1)'), (2061, ''), (2089, ''), (2110, ''), (2136, ''), (2162, ''), (2166, ''), (2240, ''), (2263, ''), (2297, '\tif (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "p|sbr!",'), (2299, '\t\t\t&intern->u.file.open_mode, &intern->u.file.open_mode_len,'), (2300, '\t\t\t&use_include_path, &intern->u.file.zcontext) == FAILURE) {'), (2306, ''), (2371, ''), (2384, ''), (2397, ''), (2410, ''), (2427, ''), (2443, ''), (2464, ''), (2481, ''), (2533, ''), (2542, ''), (2557, ''), (2588, ''), (2630, ''), (2673, ''), (2716, ''), (2745, '\tspl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC);'), (2875, '\tif (length > INT_MAX) {'), (2876, '\t\tphp_error_docref(NULL TSRMLS_CC, E_WARNING, "Length parameter must be no more than %d", INT_MAX);'), (2877, '\t\tRETURN_FALSE;'), (2878, '\t}'), (2899, ''), (2908, ''), (2918, ''), (2924, '\t\tRETURN_FALSE;'), (2926, ''), (2928, ''), (2965, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_flock, 0, 0, 1)'), (2970, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fseek, 0, 0, 1)'), (2975, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fgetss, 0, 0, 0)'), (2979, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fscanf, 1, 0, 1)'), (2983, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fwrite, 0, 0, 1)'), (2992, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_ftruncate, 0, 0, 1)'), (2996, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_seek, 0, 0, 1)'), (3085, ''), (3102, '')], 'deleted': [(82, ''), (84, ''), (101, '\t\t}'), (137, '/* creates the object by'), (138, ' - allocating memory'), (143, ' called from'), (316, ''), (328, " Load the 'other' object"), (373, ''), (392, ''), (416, ''), (462, ''), (517, ''), (533, ''), (536, ''), (537, '\t\t\tif (ht && zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|sbr",'), (538, '\t\t\t\t\t&intern->u.file.open_mode, &intern->u.file.open_mode_len,'), (547, ''), (556, '\tcase SPL_FS_DIR:'), (620, ''), (668, ''), (674, ''), (754, ''), (772, ''), (802, ''), (862, ''), (878, ''), (894, ''), (900, ''), (914, ''), (1022, '/* }}}*/'), (1032, ''), (1068, ''), (1087, ''), (1110, ''), (1124, ''), (1125, ' When the constructor gets called the object is already created'), (1143, ''), (1147, ''), (1252, ''), (1300, ''), (1310, ''), (1313, '\t} else {'), (1351, ''), (1369, ''), (1387, ''), (1405, ''), (1466, ''), (1522, ''), (1526, ''), (1557, ''), (1578, ''), (1614, ''), (1669, ''), (1704, ''), (1722, ''), (1736, ''), (1806, ''), (1827, ''), (1871, ''), (1927, 'ZEND_BEGIN_ARG_INFO(arginfo_info___construct, 0)'), (1986, 'ZEND_BEGIN_ARG_INFO(arginfo_dir___construct, 0)'), (1990, 'ZEND_BEGIN_ARG_INFO(arginfo_dir_it_seek, 0)'), (2012, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_r_dir___construct, 0, 0, 1)'), (2061, ''), (2089, ''), (2110, ''), (2136, ''), (2162, ''), (2166, ''), (2240, ''), (2263, ''), (2297, '\tif (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "p|sbr!",'), (2299, '\t\t\t&intern->u.file.open_mode, &intern->u.file.open_mode_len,'), (2300, '\t\t\t&use_include_path, &intern->u.file.zcontext) == FAILURE) {'), (2306, ''), (2371, ''), (2384, ''), (2397, ''), (2410, ''), (2427, ''), (2443, ''), (2464, ''), (2481, ''), (2533, ''), (2542, ''), (2557, ''), (2588, ''), (2630, ''), (2673, ''), (2716, ''), (2745, '\tspl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC);'), (2895, ''), (2904, ''), (2914, ''), (2920, '\t\tRETURN_FALSE;'), (2922, ''), (2924, ''), (2961, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_flock, 0, 0, 1)'), (2966, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fseek, 0, 0, 1)'), (2971, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fgetss, 0, 0, 0)'), (2975, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fscanf, 1, 0, 1)'), (2979, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_fwrite, 0, 0, 1)'), (2988, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_ftruncate, 0, 0, 1)'), (2992, 'ZEND_BEGIN_ARG_INFO_EX(arginfo_file_object_seek, 0, 0, 1)'), (3081, ''), (3098, '')]}
111
107
2,194
15,773
11
76
2
https://github.com/php/php-src
CVE-2016-5770
CWE-190
486
kernel_util.cc
C++
tflite::GetMutableInput
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/lite/kernels/kernel_util.h" #include <stdint.h> #include <stdlib.h> #include <algorithm> #include <limits> #include <memory> #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/cppmath.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" namespace tflite { namespace { inline TfLiteTensor* GetMutableInput(const TfLiteContext* context, const TfLiteNode* node, int index) { if (context->tensors != nullptr) { return &context->tensors[node->inputs->data[index]]; } else { return context->GetTensor(context, node->inputs->data[index]); } } } // anonymous namespace. const TfLiteTensor* GetInput(const TfLiteContext* context, const TfLiteNode* node, int index) { return GetMutableInput(context, node, index); } TfLiteTensor* GetVariableInput(TfLiteContext* context, const TfLiteNode* node, int index) { TfLiteTensor* tensor = GetMutableInput(context, node, index); return tensor->is_variable ? tensor : nullptr; } TfLiteTensor* GetOutput(TfLiteContext* context, const TfLiteNode* node, int index) { if (context->tensors != nullptr) { return &context->tensors[node->outputs->data[index]]; } else { return context->GetTensor(context, node->outputs->data[index]); } } const TfLiteTensor* GetOptionalInputTensor(const TfLiteContext* context, const TfLiteNode* node, int index) { const bool use_tensor = index < node->inputs->size && node->inputs->data[index] != kTfLiteOptionalTensor; if (use_tensor) { return GetMutableInput(context, node, index); } return nullptr; } // Per-axis TfLiteStatus PopulateConvolutionQuantizationParams( TfLiteContext* context, const TfLiteTensor* input, const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output, const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift, int32_t* output_activation_min, int32_t* output_activation_max, int32_t* per_channel_multiplier, int* per_channel_shift) { const auto* affine_quantization = reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params); return PopulateConvolutionQuantizationParams( context, input, filter, bias, output, activation, multiplier, shift, output_activation_min, output_activation_max, per_channel_multiplier, per_channel_shift, affine_quantization->scale->size); } // Per-axis & per-tensor TfLiteStatus PopulateConvolutionQuantizationParams( TfLiteContext* context, const TfLiteTensor* input, const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output, const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift, int32_t* output_activation_min, int32_t* output_activation_max, int32_t* per_channel_multiplier, int* per_channel_shift, int num_channels) { TF_LITE_ENSURE_EQ(context, input->quantization.type, kTfLiteAffineQuantization); TF_LITE_ENSURE_EQ(context, filter->quantization.type, kTfLiteAffineQuantization); // TODO(jianlijianli): Enable bias type check and bias scale == input scale // * filter scale for each channel in affine quantization once bias // quantization is properly populated. // TF_LITE_ENSURE_EQ(context, bias->quantization.type, // kTfLiteAffineQuantization); // Check data type. const auto* affine_quantization = reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params); TF_LITE_ENSURE(context, affine_quantization); TF_LITE_ENSURE(context, affine_quantization->scale); const bool is_per_channel = affine_quantization->scale->size > 1; if (is_per_channel) { // Currently only Int8/Int16 is supported for per channel quantization. TF_LITE_ENSURE(context, input->type == kTfLiteInt8 || input->type == kTfLiteInt16); TF_LITE_ENSURE_EQ(context, filter->type, kTfLiteInt8); TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, num_channels); TF_LITE_ENSURE_EQ( context, num_channels, filter->dims->data[affine_quantization->quantized_dimension]); } // Populate multiplier and shift using affine quantization. const float input_scale = input->params.scale; const float output_scale = output->params.scale; const float* filter_scales = affine_quantization->scale->data; for (int i = 0; i < num_channels; ++i) { // If per-tensor quantization parameter is specified, broadcast it along the // quantization dimension (channels_out). const float scale = is_per_channel ? filter_scales[i] : filter_scales[0]; const double filter_scale = static_cast<double>(scale); const double effective_output_scale = static_cast<double>(input_scale) * filter_scale / static_cast<double>(output_scale); int32_t significand; int channel_shift; QuantizeMultiplier(effective_output_scale, &significand, &channel_shift); per_channel_multiplier[i] = significand; per_channel_shift[i] = channel_shift; } // Populate scalar quantization parameters. // This check on legacy quantization parameters is kept only for backward // compatibility. if (input->type == kTfLiteUInt8) { // Check bias scale == input scale * filter scale. double real_multiplier = 0.0; TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler( context, input, filter, bias, output, &real_multiplier)); int exponent; // Populate quantization parameters with multiplier and shift. QuantizeMultiplier(real_multiplier, multiplier, &exponent); *shift = -exponent; } if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8 || input->type == kTfLiteInt16) { TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( context, activation, output, output_activation_min, output_activation_max)); } return kTfLiteOk; } TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context, const TfLiteTensor* input, const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output, double* multiplier) { const double input_product_scale = static_cast<double>(input->params.scale) * static_cast<double>(filter->params.scale); // TODO(ahentz): The following conditions must be guaranteed by the training // pipeline. if (bias) { const double bias_scale = static_cast<double>(bias->params.scale); // Here we're making sure the input_product_scale & bias_scale are about the // same. Since we have: // (output - output_zp) * output_scale = // input_product_scale * input_product + bias * bias_scale ---- (0) // // (0) equals: // (input_product + bias) * input_product_scale ----- (1) // + // bias * (bias_scale - input_product_scale) ------ (2) // // For the real kernel computation, we're doing (1), so we really need to // make sure (2) has minimum impact on the output, so: // bias * (bias_scale - input_product_scale) / output_scale should be // a small number for an integer. // Since normally bias should be within a small range. // We should expect (bias_scale - input_product_scale) / output_scale to // be a small number like 0.02. const double scale_diff = std::abs(input_product_scale - bias_scale); const double output_scale = static_cast<double>(output->params.scale); TF_LITE_ENSURE(context, scale_diff / output_scale <= 0.02); } return GetQuantizedConvolutionMultipler(context, input, filter, output, multiplier); } TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context, const TfLiteTensor* input, const TfLiteTensor* filter, TfLiteTensor* output, double* multiplier) { const double input_product_scale = static_cast<double>(input->params.scale * filter->params.scale); TF_LITE_ENSURE(context, input_product_scale >= 0); *multiplier = input_product_scale / static_cast<double>(output->params.scale); return kTfLiteOk; } namespace { void CalculateActivationRangeQuantizedImpl(TfLiteFusedActivation activation, int32_t qmin, int32_t qmax, TfLiteTensor* output, int32_t* act_min, int32_t* act_max) { const auto scale = output->params.scale; const auto zero_point = output->params.zero_point; auto quantize = [scale, zero_point](float f) { return zero_point + static_cast<int32_t>(TfLiteRound(f / scale)); }; if (activation == kTfLiteActRelu) { *act_min = std::max(qmin, quantize(0.0)); *act_max = qmax; } else if (activation == kTfLiteActRelu6) { *act_min = std::max(qmin, quantize(0.0)); *act_max = std::min(qmax, quantize(6.0)); } else if (activation == kTfLiteActReluN1To1) { *act_min = std::max(qmin, quantize(-1.0)); *act_max = std::min(qmax, quantize(1.0)); } else { *act_min = qmin; *act_max = qmax; } } } // namespace TfLiteStatus CalculateActivationRangeQuantized(TfLiteContext* context, TfLiteFusedActivation activation, TfLiteTensor* output, int32_t* act_min, int32_t* act_max) { int32_t qmin = 0; int32_t qmax = 0; if (output->type == kTfLiteUInt8) { qmin = std::numeric_limits<uint8_t>::min(); qmax = std::numeric_limits<uint8_t>::max(); } else if (output->type == kTfLiteInt8) { qmin = std::numeric_limits<int8_t>::min(); qmax = std::numeric_limits<int8_t>::max(); } else if (output->type == kTfLiteInt16) { qmin = std::numeric_limits<int16_t>::min(); qmax = std::numeric_limits<int16_t>::max(); } else { TF_LITE_ENSURE(context, false); } CalculateActivationRangeQuantizedImpl(activation, qmin, qmax, output, act_min, act_max); return kTfLiteOk; } bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2) { return TfLiteIntArrayEqual(input1->dims, input2->dims); } // TODO(petewarden): Having macros around this is ugly, look at other strategies // before replicating this approach elsewhere. #ifndef TF_LITE_STATIC_MEMORY TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context, const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteIntArray** output_shape) { int dims1 = NumDimensions(input1); int dims2 = NumDimensions(input2); int out_dims = std::max(dims1, dims2); if (NumElements(input1) == 0) { *output_shape = TfLiteIntArrayCopy(input1->dims); return kTfLiteOk; } std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> shape( TfLiteIntArrayCreate(out_dims), TfLiteIntArrayFree); for (int i = 0; i < out_dims; ++i) { int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1); int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1); TF_LITE_ENSURE(context, d1 == d2 || d1 == 1 || d2 == 1); shape->data[out_dims - i - 1] = std::max(d1, d2); } *output_shape = shape.release(); return kTfLiteOk; } TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context, const TfLiteTensor* input1, const TfLiteTensor* input2, const TfLiteTensor* input3, TfLiteIntArray** output_shape) { int dims1 = NumDimensions(input1); int dims2 = NumDimensions(input2); int dims3 = NumDimensions(input3); int out_dims = std::max(std::max(dims1, dims2), dims3); std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> shape( TfLiteIntArrayCreate(out_dims), TfLiteIntArrayFree); for (int i = 0; i < out_dims; ++i) { int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1); int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1); int d3 = i >= dims3 ? 1 : SizeOfDimension(input3, dims3 - i - 1); int max_value = std::max(std::max(d1, d2), d3); TF_LITE_ENSURE(context, d1 == 1 || d1 == max_value); TF_LITE_ENSURE(context, d2 == 1 || d2 == max_value); TF_LITE_ENSURE(context, d3 == 1 || d3 == max_value); shape->data[out_dims - i - 1] = max_value; } *output_shape = shape.release(); return kTfLiteOk; } #endif // TF_LITE_STATIC_MEMORY } // namespace tflite
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/lite/kernels/kernel_util.h" #include <stdint.h> #include <stdlib.h> #include <algorithm> #include <limits> #include <memory> #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/cppmath.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" namespace tflite { namespace { inline TfLiteTensor* GetMutableInput(const TfLiteContext* context, const TfLiteNode* node, int index) { if (index >= 0 && index < node->inputs->size) { const int tensor_index = node->inputs->data[index]; if (tensor_index != kTfLiteOptionalTensor) { if (context->tensors != nullptr) { return &context->tensors[tensor_index]; } else { return context->GetTensor(context, tensor_index); } } } return nullptr; } } // anonymous namespace. const TfLiteTensor* GetInput(const TfLiteContext* context, const TfLiteNode* node, int index) { return GetMutableInput(context, node, index); } TfLiteTensor* GetVariableInput(TfLiteContext* context, const TfLiteNode* node, int index) { TfLiteTensor* tensor = GetMutableInput(context, node, index); return tensor->is_variable ? tensor : nullptr; } TfLiteTensor* GetOutput(TfLiteContext* context, const TfLiteNode* node, int index) { if (index >= 0 && index < node->outputs->size) { const int tensor_index = node->outputs->data[index]; if (tensor_index != kTfLiteOptionalTensor) { if (context->tensors != nullptr) { return &context->tensors[tensor_index]; } else { return context->GetTensor(context, tensor_index); } } } return nullptr; } const TfLiteTensor* GetOptionalInputTensor(const TfLiteContext* context, const TfLiteNode* node, int index) { const bool use_tensor = index < node->inputs->size && node->inputs->data[index] != kTfLiteOptionalTensor; if (use_tensor) { return GetMutableInput(context, node, index); } return nullptr; } // Per-axis TfLiteStatus PopulateConvolutionQuantizationParams( TfLiteContext* context, const TfLiteTensor* input, const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output, const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift, int32_t* output_activation_min, int32_t* output_activation_max, int32_t* per_channel_multiplier, int* per_channel_shift) { const auto* affine_quantization = reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params); return PopulateConvolutionQuantizationParams( context, input, filter, bias, output, activation, multiplier, shift, output_activation_min, output_activation_max, per_channel_multiplier, per_channel_shift, affine_quantization->scale->size); } // Per-axis & per-tensor TfLiteStatus PopulateConvolutionQuantizationParams( TfLiteContext* context, const TfLiteTensor* input, const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output, const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift, int32_t* output_activation_min, int32_t* output_activation_max, int32_t* per_channel_multiplier, int* per_channel_shift, int num_channels) { TF_LITE_ENSURE_EQ(context, input->quantization.type, kTfLiteAffineQuantization); TF_LITE_ENSURE_EQ(context, filter->quantization.type, kTfLiteAffineQuantization); // TODO(jianlijianli): Enable bias type check and bias scale == input scale // * filter scale for each channel in affine quantization once bias // quantization is properly populated. // TF_LITE_ENSURE_EQ(context, bias->quantization.type, // kTfLiteAffineQuantization); // Check data type. const auto* affine_quantization = reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params); TF_LITE_ENSURE(context, affine_quantization); TF_LITE_ENSURE(context, affine_quantization->scale); const bool is_per_channel = affine_quantization->scale->size > 1; if (is_per_channel) { // Currently only Int8/Int16 is supported for per channel quantization. TF_LITE_ENSURE(context, input->type == kTfLiteInt8 || input->type == kTfLiteInt16); TF_LITE_ENSURE_EQ(context, filter->type, kTfLiteInt8); TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, num_channels); TF_LITE_ENSURE_EQ( context, num_channels, filter->dims->data[affine_quantization->quantized_dimension]); } // Populate multiplier and shift using affine quantization. const float input_scale = input->params.scale; const float output_scale = output->params.scale; const float* filter_scales = affine_quantization->scale->data; for (int i = 0; i < num_channels; ++i) { // If per-tensor quantization parameter is specified, broadcast it along the // quantization dimension (channels_out). const float scale = is_per_channel ? filter_scales[i] : filter_scales[0]; const double filter_scale = static_cast<double>(scale); const double effective_output_scale = static_cast<double>(input_scale) * filter_scale / static_cast<double>(output_scale); int32_t significand; int channel_shift; QuantizeMultiplier(effective_output_scale, &significand, &channel_shift); per_channel_multiplier[i] = significand; per_channel_shift[i] = channel_shift; } // Populate scalar quantization parameters. // This check on legacy quantization parameters is kept only for backward // compatibility. if (input->type == kTfLiteUInt8) { // Check bias scale == input scale * filter scale. double real_multiplier = 0.0; TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler( context, input, filter, bias, output, &real_multiplier)); int exponent; // Populate quantization parameters with multiplier and shift. QuantizeMultiplier(real_multiplier, multiplier, &exponent); *shift = -exponent; } if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8 || input->type == kTfLiteInt16) { TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( context, activation, output, output_activation_min, output_activation_max)); } return kTfLiteOk; } TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context, const TfLiteTensor* input, const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output, double* multiplier) { const double input_product_scale = static_cast<double>(input->params.scale) * static_cast<double>(filter->params.scale); // TODO(ahentz): The following conditions must be guaranteed by the training // pipeline. if (bias) { const double bias_scale = static_cast<double>(bias->params.scale); // Here we're making sure the input_product_scale & bias_scale are about the // same. Since we have: // (output - output_zp) * output_scale = // input_product_scale * input_product + bias * bias_scale ---- (0) // // (0) equals: // (input_product + bias) * input_product_scale ----- (1) // + // bias * (bias_scale - input_product_scale) ------ (2) // // For the real kernel computation, we're doing (1), so we really need to // make sure (2) has minimum impact on the output, so: // bias * (bias_scale - input_product_scale) / output_scale should be // a small number for an integer. // Since normally bias should be within a small range. // We should expect (bias_scale - input_product_scale) / output_scale to // be a small number like 0.02. const double scale_diff = std::abs(input_product_scale - bias_scale); const double output_scale = static_cast<double>(output->params.scale); TF_LITE_ENSURE(context, scale_diff / output_scale <= 0.02); } return GetQuantizedConvolutionMultipler(context, input, filter, output, multiplier); } TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context, const TfLiteTensor* input, const TfLiteTensor* filter, TfLiteTensor* output, double* multiplier) { const double input_product_scale = static_cast<double>(input->params.scale * filter->params.scale); TF_LITE_ENSURE(context, input_product_scale >= 0); *multiplier = input_product_scale / static_cast<double>(output->params.scale); return kTfLiteOk; } namespace { void CalculateActivationRangeQuantizedImpl(TfLiteFusedActivation activation, int32_t qmin, int32_t qmax, TfLiteTensor* output, int32_t* act_min, int32_t* act_max) { const auto scale = output->params.scale; const auto zero_point = output->params.zero_point; auto quantize = [scale, zero_point](float f) { return zero_point + static_cast<int32_t>(TfLiteRound(f / scale)); }; if (activation == kTfLiteActRelu) { *act_min = std::max(qmin, quantize(0.0)); *act_max = qmax; } else if (activation == kTfLiteActRelu6) { *act_min = std::max(qmin, quantize(0.0)); *act_max = std::min(qmax, quantize(6.0)); } else if (activation == kTfLiteActReluN1To1) { *act_min = std::max(qmin, quantize(-1.0)); *act_max = std::min(qmax, quantize(1.0)); } else { *act_min = qmin; *act_max = qmax; } } } // namespace TfLiteStatus CalculateActivationRangeQuantized(TfLiteContext* context, TfLiteFusedActivation activation, TfLiteTensor* output, int32_t* act_min, int32_t* act_max) { int32_t qmin = 0; int32_t qmax = 0; if (output->type == kTfLiteUInt8) { qmin = std::numeric_limits<uint8_t>::min(); qmax = std::numeric_limits<uint8_t>::max(); } else if (output->type == kTfLiteInt8) { qmin = std::numeric_limits<int8_t>::min(); qmax = std::numeric_limits<int8_t>::max(); } else if (output->type == kTfLiteInt16) { qmin = std::numeric_limits<int16_t>::min(); qmax = std::numeric_limits<int16_t>::max(); } else { TF_LITE_ENSURE(context, false); } CalculateActivationRangeQuantizedImpl(activation, qmin, qmax, output, act_min, act_max); return kTfLiteOk; } bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2) { return TfLiteIntArrayEqual(input1->dims, input2->dims); } // TODO(petewarden): Having macros around this is ugly, look at other strategies // before replicating this approach elsewhere. #ifndef TF_LITE_STATIC_MEMORY TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context, const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteIntArray** output_shape) { int dims1 = NumDimensions(input1); int dims2 = NumDimensions(input2); int out_dims = std::max(dims1, dims2); if (NumElements(input1) == 0) { *output_shape = TfLiteIntArrayCopy(input1->dims); return kTfLiteOk; } std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> shape( TfLiteIntArrayCreate(out_dims), TfLiteIntArrayFree); for (int i = 0; i < out_dims; ++i) { int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1); int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1); TF_LITE_ENSURE(context, d1 == d2 || d1 == 1 || d2 == 1); shape->data[out_dims - i - 1] = std::max(d1, d2); } *output_shape = shape.release(); return kTfLiteOk; } TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context, const TfLiteTensor* input1, const TfLiteTensor* input2, const TfLiteTensor* input3, TfLiteIntArray** output_shape) { int dims1 = NumDimensions(input1); int dims2 = NumDimensions(input2); int dims3 = NumDimensions(input3); int out_dims = std::max(std::max(dims1, dims2), dims3); std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> shape( TfLiteIntArrayCreate(out_dims), TfLiteIntArrayFree); for (int i = 0; i < out_dims; ++i) { int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1); int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1); int d3 = i >= dims3 ? 1 : SizeOfDimension(input3, dims3 - i - 1); int max_value = std::max(std::max(d1, d2), d3); TF_LITE_ENSURE(context, d1 == 1 || d1 == max_value); TF_LITE_ENSURE(context, d2 == 1 || d2 == max_value); TF_LITE_ENSURE(context, d3 == 1 || d3 == max_value); shape->data[out_dims - i - 1] = max_value; } *output_shape = shape.release(); return kTfLiteOk; } #endif // TF_LITE_STATIC_MEMORY } // namespace tflite
inline TfLiteTensor* GetMutableInput(const TfLiteContext* context, const TfLiteNode* node, int index) { if (context->tensors != nullptr) { return &context->tensors[node->inputs->data[index]]; } else { return context->GetTensor(context, node->inputs->data[index]); } }
inline TfLiteTensor* GetMutableInput(const TfLiteContext* context, const TfLiteNode* node, int index) { if (index >= 0 && index < node->inputs->size) { const int tensor_index = node->inputs->data[index]; if (tensor_index != kTfLiteOptionalTensor) { if (context->tensors != nullptr) { return &context->tensors[tensor_index]; } else { return context->GetTensor(context, tensor_index); } } } return nullptr; }
{'added': [(35, ' if (index >= 0 && index < node->inputs->size) {'), (36, ' const int tensor_index = node->inputs->data[index];'), (37, ' if (tensor_index != kTfLiteOptionalTensor) {'), (38, ' if (context->tensors != nullptr) {'), (39, ' return &context->tensors[tensor_index];'), (40, ' } else {'), (41, ' return context->GetTensor(context, tensor_index);'), (42, ' }'), (43, ' }'), (45, ' return nullptr;'), (63, ' if (index >= 0 && index < node->outputs->size) {'), (64, ' const int tensor_index = node->outputs->data[index];'), (65, ' if (tensor_index != kTfLiteOptionalTensor) {'), (66, ' if (context->tensors != nullptr) {'), (67, ' return &context->tensors[tensor_index];'), (68, ' } else {'), (69, ' return context->GetTensor(context, tensor_index);'), (70, ' }'), (71, ' }'), (73, ' return nullptr;')], 'deleted': [(35, ' if (context->tensors != nullptr) {'), (36, ' return &context->tensors[node->inputs->data[index]];'), (37, ' } else {'), (38, ' return context->GetTensor(context, node->inputs->data[index]);'), (57, ' if (context->tensors != nullptr) {'), (58, ' return &context->tensors[node->outputs->data[index]];'), (59, ' } else {'), (60, ' return context->GetTensor(context, node->outputs->data[index]);')]}
20
8
253
1,956
8
63
2
https://github.com/tensorflow/tensorflow
CVE-2020-15211
CWE-125
2,315
quantized_resize_bilinear_op.cc
C++
tensorflow::QuantizedResizeBilinearOp::Compute
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // Implements a quantized version of the resize bilinear op. #define EIGEN_USE_THREADS #if defined(__ARM_NEON__) || defined(__ARM_NEON) #define USE_NEON #define QUANTIZED_RESIZE_BILINEAR_USE_NEON #include <arm_neon.h> #endif #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/util/image_resizer_state.h" namespace tensorflow { static constexpr bool USE_REFERENCE = false; namespace { // Compute the interpolation indices only once. template <typename T_SCALE> struct InterpolationCache { std::vector<int64> lower; // Lower source index used in the interpolation std::vector<int64> upper; // Upper source index used in the interpolation // 1-D linear interpolation scale (see: // https://en.wikipedia.org/wiki/Bilinear_interpolation) std::vector<float> lerp; std::vector<T_SCALE> ilerp; }; template <typename T_SCALE, typename Scaler> inline void ComputeInterpolationWeights( const int64 out_size, const int64 in_size, const float scale, const int resolution, InterpolationCache<T_SCALE>* interpolation) { const Scaler scaler; interpolation->lower.resize(out_size + 1); interpolation->upper.resize(out_size + 1); interpolation->lerp.resize(out_size + 1); interpolation->ilerp.resize(out_size + 1); interpolation->lower[out_size] = 0; interpolation->upper[out_size] = 0; for (int64 i = out_size - 1; i >= 0; --i) { const float in = scaler(i, scale); const float in_f = std::floor(in); interpolation->lower[i] = std::max(static_cast<int64>(in_f), static_cast<int64>(0)); interpolation->upper[i] = std::min(static_cast<int64>(std::ceil(in)), in_size - 1); interpolation->lower[i] = std::min(interpolation->lower[i], interpolation->upper[i]); interpolation->lerp[i] = in - in_f; interpolation->ilerp[i] = static_cast<T_SCALE>((in - in_f) * (1 << resolution)); } } template <typename T_SCALE> inline InterpolationCache<T_SCALE> BuildLerpCache( const int64 out_size, const int64 in_size, const float scale, const int index_step, const int resolution, const bool half_pixel_centers) { InterpolationCache<T_SCALE> cache; // Compute the cached interpolation weights on the x and y dimensions. if (half_pixel_centers) { ComputeInterpolationWeights<T_SCALE, HalfPixelScaler>( out_size, in_size, scale, resolution, &cache); } else { ComputeInterpolationWeights<T_SCALE, LegacyScaler>(out_size, in_size, scale, resolution, &cache); } CHECK(index_step > 0); if (index_step > 1) { for (int i = 0; i < cache.lower.size(); ++i) { cache.lower[i] *= index_step; cache.upper[i] *= index_step; } } return cache; } /** * Computes the bilinear interpolation from the appropriate 4 float points * and the linear interpolation weights. */ template <typename T> inline T ComputeLerpReference(const T in_top_left, const T in_top_right, const T in_bottom_left, const T in_bottom_right, const float x_lerp, const float y_lerp, const float min, const float max) { const float top_left = QuantizedToFloat<T>(in_top_left, min, max); const float top_right = QuantizedToFloat<T>(in_top_right, min, max); const float bottom_left = QuantizedToFloat<T>(in_bottom_left, min, max); const float bottom_right = QuantizedToFloat<T>(in_bottom_right, min, max); const float top = top_left + (top_right - top_left) * x_lerp; const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp; const float out = top + (bottom - top) * y_lerp; return FloatToQuantized<T>(out, min, max); } template <typename T, typename T_SCALE, typename T_CALC> inline T_CALC MulOffset(T a, T b, T_SCALE c) { return (static_cast<T_CALC>(a) - static_cast<T_CALC>(b)) * static_cast<T_CALC>(c); } template <int RESOLUTION, typename T, typename T_SCALE, typename T_CALC> inline T ComputeLerp(const T top_left, const T top_right, const T bottom_left, const T bottom_right, const T_SCALE x_lerp, const T_SCALE y_lerp) { constexpr T_CALC RESOLUTION_MULT = (1 << RESOLUTION); const T_CALC top = static_cast<T_CALC>(top_left) * RESOLUTION_MULT + MulOffset<T, T_SCALE, T_CALC>(top_right, top_left, x_lerp); const T_CALC bottom = static_cast<T_CALC>(bottom_left) * RESOLUTION_MULT + MulOffset<T, T_SCALE, T_CALC>(bottom_right, bottom_left, x_lerp); const T_CALC out = top + (bottom - top) / RESOLUTION_MULT * y_lerp; return static_cast<T>( static_cast<int32>((out + RESOLUTION_MULT / 2) / RESOLUTION_MULT)); } #ifdef QUANTIZED_RESIZE_BILINEAR_USE_NEON inline uint8x8_t ToUint8x8(const quint8* v0, const quint8* v1, const quint8* v2, const quint8* v3, const quint8* v4, const quint8* v5, const quint8* v6, const quint8* v7) { static const uint8x8_t ZERO_8x8 = vmov_n_u8(0); uint8x8_t ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v0), ZERO_8x8, 0); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v1), ret, 1); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v2), ret, 2); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v3), ret, 3); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v4), ret, 4); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v5), ret, 5); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v6), ret, 6); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v7), ret, 7); return ret; } inline int16x8_t ToInt16x8(const int16* v0, const int16* v1, const int16* v2, const int16* v3, const int16* v4, const int16* v5, const int16* v6, const int16* v7) { static const int16x8_t ZERO_16x8 = vmovq_n_s16(0); int16x8_t ret = vld1q_lane_s16(v0, ZERO_16x8, 0); ret = vld1q_lane_s16(v1, ret, 1); ret = vld1q_lane_s16(v2, ret, 2); ret = vld1q_lane_s16(v3, ret, 3); ret = vld1q_lane_s16(v4, ret, 4); ret = vld1q_lane_s16(v5, ret, 5); ret = vld1q_lane_s16(v6, ret, 6); ret = vld1q_lane_s16(v7, ret, 7); return ret; } inline int32x2_t ToInt32x2(const qint32* v0, const qint32* v1) { static const int32x2_t ZERO_32x2 = vmov_n_s32(0); const int32x2_t ret0 = vld1_lane_s32(reinterpret_cast<const int32*>(v0), ZERO_32x2, 0); const int32x2_t ret1 = vld1_lane_s32(reinterpret_cast<const int32*>(v1), ret0, 1); return ret1; } template <int RESOLUTION, bool X_LERP_SAME> inline int32x2_t ComputeLerpx2( const qint32* top_left0, const qint32* top_right0, const qint32* bottom_left0, const qint32* bottom_right0, const qint32* top_left1, const qint32* top_right1, const qint32* bottom_left1, const qint32* bottom_right1, const int32* x_lerp, const int32x2_t y_lerpsx) { const int32x2_t x_lerpsx = X_LERP_SAME ? vld1_dup_s32(reinterpret_cast<const int32*>(x_lerp)) : vld1_s32(reinterpret_cast<const int32*>(x_lerp)); const int32x2_t top_leftsx = ToInt32x2(top_left0, top_left1); const int32x2_t top_rightsx = ToInt32x2(top_right0, top_right1); const int32x2_t bottom_leftsx = ToInt32x2(bottom_left0, bottom_left1); const int32x2_t bottom_rightsx = ToInt32x2(bottom_right0, bottom_right1); const int32x2_t retval = ComputeLerp32x2<RESOLUTION>(top_leftsx, top_rightsx, bottom_leftsx, bottom_rightsx, x_lerpsx, y_lerpsx); return retval; } template <int RESOLUTION> inline uint8x8_t ComputeLerpx8( const quint8* tl0, const quint8* tr0, const quint8* bl0, const quint8* br0, const int16* xlp0, const quint8* tl1, const quint8* tr1, const quint8* bl1, const quint8* br1, const int16* xlp1, const quint8* tl2, const quint8* tr2, const quint8* bl2, const quint8* br2, const int16* xlp2, const quint8* tl3, const quint8* tr3, const quint8* bl3, const quint8* br3, const int16* xlp3, const quint8* tl4, const quint8* tr4, const quint8* bl4, const quint8* br4, const int16* xlp4, const quint8* tl5, const quint8* tr5, const quint8* bl5, const quint8* br5, const int16* xlp5, const quint8* tl6, const quint8* tr6, const quint8* bl6, const quint8* br6, const int16* xlp6, const quint8* tl7, const quint8* tr7, const quint8* bl7, const quint8* br7, const int16* xlp7, const int16x8_t ys_lerpsx) { const uint8x8_t tl8x8 = ToUint8x8(tl0, tl1, tl2, tl3, tl4, tl5, tl6, tl7); const uint8x8_t tr8x8 = ToUint8x8(tr0, tr1, tr2, tr3, tr4, tr5, tr6, tr7); const uint8x8_t bl8x8 = ToUint8x8(bl0, bl1, bl2, bl3, bl4, bl5, bl6, bl7); const uint8x8_t br8x8 = ToUint8x8(br0, br1, br2, br3, br4, br5, br6, br7); const int16x8_t xs_lerpsx = ToInt16x8(xlp0, xlp1, xlp2, xlp3, xlp4, xlp5, xlp6, xlp7); return ComputeLerp8x8<RESOLUTION>(tl8x8, tr8x8, bl8x8, br8x8, xs_lerpsx, ys_lerpsx); } // Expand address at compile time to improve performance template <int RESOLUTION, int ID0, int CH0, int ID1, int CH1, int ID2, int CH2, int ID3, int CH3, int ID4, int CH4, int ID5, int CH5, int ID6, int CH6, int ID7, int CH7> inline uint8x8_t ComputeLerpx8Tmpl(const quint8* const yl, const quint8* yu, const int64* xl, const int64* xu, const int16* xlp, const int16x8_t ys_lerpsx) { return ComputeLerpx8<RESOLUTION>( yl + xl[ID0] + CH0, yl + xu[ID0] + CH0, yu + xl[ID0] + CH0, yu + xu[ID0] + CH0, xlp + ID0, yl + xl[ID1] + CH1, yl + xu[ID1] + CH1, yu + xl[ID1] + CH1, yu + xu[ID1] + CH1, xlp + ID1, yl + xl[ID2] + CH2, yl + xu[ID2] + CH2, yu + xl[ID2] + CH2, yu + xu[ID2] + CH2, xlp + ID2, yl + xl[ID3] + CH3, yl + xu[ID3] + CH3, yu + xl[ID3] + CH3, yu + xu[ID3] + CH3, xlp + ID3, yl + xl[ID4] + CH4, yl + xu[ID4] + CH4, yu + xl[ID4] + CH4, yu + xu[ID4] + CH4, xlp + ID4, yl + xl[ID5] + CH5, yl + xu[ID5] + CH5, yu + xl[ID5] + CH5, yu + xu[ID5] + CH5, xlp + ID5, yl + xl[ID6] + CH6, yl + xu[ID6] + CH6, yu + xl[ID6] + CH6, yu + xu[ID6] + CH6, xlp + ID6, yl + xl[ID7] + CH7, yl + xu[ID7] + CH7, yu + xl[ID7] + CH7, yu + xu[ID7] + CH7, xlp + ID7, ys_lerpsx); } #endif template <int RESOLUTION, typename T, typename T_SCALE, typename T_CALC> inline void OutputLerpForChannels(const InterpolationCache<T_SCALE>& xs, const int64 x, const T_SCALE ys_ilerp, const int channels, const float min, const float max, const T* ys_input_lower_ptr, const T* ys_input_upper_ptr, T* output_y_ptr) { const int64 xs_lower = xs.lower[x]; const int64 xs_upper = xs.upper[x]; const T_SCALE xs_ilerp = xs.ilerp[x]; for (int c = 0; c < channels; ++c) { const T top_left = ys_input_lower_ptr[xs_lower + c]; const T top_right = ys_input_lower_ptr[xs_upper + c]; const T bottom_left = ys_input_upper_ptr[xs_lower + c]; const T bottom_right = ys_input_upper_ptr[xs_upper + c]; const T val = ComputeLerp<RESOLUTION, T, T_SCALE, T_CALC>( top_left, top_right, bottom_left, bottom_right, xs_ilerp, ys_ilerp); output_y_ptr[x * channels + c] = val; } } template <int RES> inline void OutputLerp8x8x1(const InterpolationCache<int16>& xs, const int64 x_start, const int16 ys_ilerp, const float min, const float max, const quint8* const ys_input_lower_ptr, const quint8* const ys_input_upper_ptr, quint8* output_y_ptr) { #ifdef QUANTIZED_RESIZE_BILINEAR_USE_NEON const int16x8_t y_lerpsx = vmovq_n_s16(ys_ilerp); const uint8x8_t x0x7 = ComputeLerpx8Tmpl<RES, 0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0>( ys_input_lower_ptr, ys_input_upper_ptr, &xs.lower[x_start], &xs.upper[x_start], &xs.ilerp[x_start], y_lerpsx); vst1_u8(reinterpret_cast<uint8_t*>(output_y_ptr + x_start), x0x7); #else for (int x = x_start; x < x_start + 8; ++x) { OutputLerpForChannels<RES, quint8, int16, int16>( xs, x, ys_ilerp, 1, min, max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } #endif } template <int RES> inline void OutputLerp8x8x3(const InterpolationCache<int16>& xs, const int64 x_start, const int16 ys_ilerp, const float min, const float max, const quint8* const ys_input_lower_ptr, const quint8* const ys_input_upper_ptr, quint8* output_y_ptr) { #ifdef QUANTIZED_RESIZE_BILINEAR_USE_NEON const int16x8_t y_lerpsx = vmovq_n_s16(ys_ilerp); const uint8x8_t x0c0x2c1 = ComputeLerpx8Tmpl<RES, 0, 0, 0, 1, 0, 2, 1, 0, 1, 1, 1, 2, 2, 0, 2, 1>( ys_input_lower_ptr, ys_input_upper_ptr, &xs.lower[x_start], &xs.upper[x_start], &xs.ilerp[x_start], y_lerpsx); vst1_u8(reinterpret_cast<uint8_t*>(output_y_ptr + x_start * 3), x0c0x2c1); const uint8x8_t x2c2x5c0 = ComputeLerpx8Tmpl<RES, 2, 2, 3, 0, 3, 1, 3, 2, 4, 0, 4, 1, 4, 2, 5, 0>( ys_input_lower_ptr, ys_input_upper_ptr, &xs.lower[x_start], &xs.upper[x_start], &xs.ilerp[x_start], y_lerpsx); vst1_u8(reinterpret_cast<uint8_t*>(output_y_ptr + x_start * 3 + 8), x2c2x5c0); const uint8x8_t x5c1x7c2 = ComputeLerpx8Tmpl<RES, 5, 1, 5, 2, 6, 0, 6, 1, 6, 2, 7, 0, 7, 1, 7, 2>( ys_input_lower_ptr, ys_input_upper_ptr, &xs.lower[x_start], &xs.upper[x_start], &xs.ilerp[x_start], y_lerpsx); vst1_u8(reinterpret_cast<uint8_t*>(output_y_ptr + x_start * 3 + 16), x5c1x7c2); #else for (int x = x_start; x < x_start + 8; ++x) { OutputLerpForChannels<RES, quint8, int16, int16>( xs, x, ys_ilerp, 3, min, max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } #endif } template <int RESOLUTION> inline void OutputLerp32x4x1(const InterpolationCache<int32>& xs, const int64 x_start, const int32 ys_ilerp, const float min, const float max, const qint32* const ys_input_lower_ptr, const qint32* const ys_input_upper_ptr, qint32* output_y_ptr) { #ifdef QUANTIZED_RESIZE_BILINEAR_USE_NEON const int64 xs_lower0 = xs.lower[x_start]; const int64 xs_upper0 = xs.upper[x_start]; const int32* const xs_ilerp0 = &xs.ilerp[x_start]; const int64 xs_lower1 = xs.lower[x_start + 1]; const int64 xs_upper1 = xs.upper[x_start + 1]; const int64 xs_lower2 = xs.lower[x_start + 2]; const int64 xs_upper2 = xs.upper[x_start + 2]; const int32* const xs_ilerp2 = &xs.ilerp[x_start + 2]; const int64 xs_lower3 = xs.lower[x_start + 3]; const int64 xs_upper3 = xs.upper[x_start + 3]; const int32x2_t y_lerpsx = vmov_n_s32(ys_ilerp); const int32x2_t x0x1 = ComputeLerpx2<RESOLUTION, false>( ys_input_lower_ptr + xs_lower0, ys_input_lower_ptr + xs_upper0, ys_input_upper_ptr + xs_lower0, ys_input_upper_ptr + xs_upper0, ys_input_lower_ptr + xs_lower1, ys_input_lower_ptr + xs_upper1, ys_input_upper_ptr + xs_lower1, ys_input_upper_ptr + xs_upper1, xs_ilerp0, y_lerpsx); const int32x2_t x1x2 = ComputeLerpx2<RESOLUTION, false>( ys_input_lower_ptr + xs_lower2, ys_input_lower_ptr + xs_upper2, ys_input_upper_ptr + xs_lower2, ys_input_upper_ptr + xs_upper2, ys_input_lower_ptr + xs_lower3, ys_input_lower_ptr + xs_upper3, ys_input_upper_ptr + xs_lower3, ys_input_upper_ptr + xs_upper3, xs_ilerp2, y_lerpsx); const int32x4_t x0x1x2x3 = vcombine_s32(x0x1, x1x2); vst1q_s32(reinterpret_cast<int32*>(output_y_ptr + x_start), x0x1x2x3); #else for (int x = x_start; x < x_start + 4; ++x) { OutputLerpForChannels<RESOLUTION, qint32, int32, int64>( xs, x, ys_ilerp, 1, min, max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } #endif } template <int RESOLUTION> inline void OutputLerp32x4x3(const InterpolationCache<int32>& xs, const int64 x_start, const int32 ys_ilerp, const float min, const float max, const qint32* const ys_input_lower_ptr, const qint32* const ys_input_upper_ptr, qint32* output_y_ptr) { #ifdef QUANTIZED_RESIZE_BILINEAR_USE_NEON const int64 xs_lower0 = xs.lower[x_start]; const int64 xs_upper0 = xs.upper[x_start]; const int32* const xs_ilerp0 = &xs.ilerp[x_start]; const int64 xs_lower1 = xs.lower[x_start + 1]; const int64 xs_upper1 = xs.upper[x_start + 1]; const int32* const xs_ilerp1 = &xs.ilerp[x_start + 1]; const int64 xs_lower2 = xs.lower[x_start + 2]; const int64 xs_upper2 = xs.upper[x_start + 2]; const int32* const xs_ilerp2 = &xs.ilerp[x_start + 2]; const int64 xs_lower3 = xs.lower[x_start + 3]; const int64 xs_upper3 = xs.upper[x_start + 3]; const int32* const xs_ilerp3 = &xs.ilerp[x_start + 3]; const int32x2_t y_lerpsx = vmov_n_s32(ys_ilerp); const int32x2_t x0c0x0c1 = ComputeLerpx2<RESOLUTION, true>( ys_input_lower_ptr + xs_lower0, ys_input_lower_ptr + xs_upper0, ys_input_upper_ptr + xs_lower0, ys_input_upper_ptr + xs_upper0, ys_input_lower_ptr + xs_lower0 + 1, ys_input_lower_ptr + xs_upper0 + 1, ys_input_upper_ptr + xs_lower0 + 1, ys_input_upper_ptr + xs_upper0 + 1, xs_ilerp0, y_lerpsx); const int32x2_t x0c2x1c0 = ComputeLerpx2<RESOLUTION, false>( ys_input_lower_ptr + xs_lower0 + 2, ys_input_lower_ptr + xs_upper0 + 2, ys_input_upper_ptr + xs_lower0 + 2, ys_input_upper_ptr + xs_upper0 + 2, ys_input_lower_ptr + xs_lower1, ys_input_lower_ptr + xs_upper1, ys_input_upper_ptr + xs_lower1, ys_input_upper_ptr + xs_upper1, xs_ilerp0, y_lerpsx); const int32x2_t x1c1x1c2 = ComputeLerpx2<RESOLUTION, true>( ys_input_lower_ptr + xs_lower1 + 1, ys_input_lower_ptr + xs_upper1 + 1, ys_input_upper_ptr + xs_lower1 + 1, ys_input_upper_ptr + xs_upper1 + 1, ys_input_lower_ptr + xs_lower1 + 2, ys_input_lower_ptr + xs_upper1 + 2, ys_input_upper_ptr + xs_lower1 + 2, ys_input_upper_ptr + xs_upper1 + 2, xs_ilerp1, y_lerpsx); const int32x2_t x2c0x2c1 = ComputeLerpx2<RESOLUTION, true>( ys_input_lower_ptr + xs_lower2, ys_input_lower_ptr + xs_upper2, ys_input_upper_ptr + xs_lower2, ys_input_upper_ptr + xs_upper2, ys_input_lower_ptr + xs_lower2 + 1, ys_input_lower_ptr + xs_upper2 + 1, ys_input_upper_ptr + xs_lower2 + 1, ys_input_upper_ptr + xs_upper2 + 1, xs_ilerp2, y_lerpsx); const int32x2_t x2c2x3c0 = ComputeLerpx2<RESOLUTION, false>( ys_input_lower_ptr + xs_lower2 + 2, ys_input_lower_ptr + xs_upper2 + 2, ys_input_upper_ptr + xs_lower2 + 2, ys_input_upper_ptr + xs_upper2 + 2, ys_input_lower_ptr + xs_lower3, ys_input_lower_ptr + xs_upper3, ys_input_upper_ptr + xs_lower3, ys_input_upper_ptr + xs_upper3, xs_ilerp2, y_lerpsx); const int32x2_t x3c1x3c2 = ComputeLerpx2<RESOLUTION, true>( ys_input_lower_ptr + xs_lower3 + 1, ys_input_lower_ptr + xs_upper3 + 1, ys_input_upper_ptr + xs_lower3 + 1, ys_input_upper_ptr + xs_upper3 + 1, ys_input_lower_ptr + xs_lower3 + 2, ys_input_lower_ptr + xs_upper3 + 2, ys_input_upper_ptr + xs_lower3 + 2, ys_input_upper_ptr + xs_upper3 + 2, xs_ilerp3, y_lerpsx); const int32x4_t x0c0x0c1x0c2x1c0 = vcombine_s32(x0c0x0c1, x0c2x1c0); const int32x4_t x1c1x1c2x2c0x2c1 = vcombine_s32(x1c1x1c2, x2c0x2c1); const int32x4_t x2c2x3c0x3c1x3c2 = vcombine_s32(x2c2x3c0, x3c1x3c2); vst1q_s32(reinterpret_cast<int32*>(output_y_ptr + x_start * 3), x0c0x0c1x0c2x1c0); vst1q_s32(reinterpret_cast<int32*>(output_y_ptr + x_start * 3 + 4), x1c1x1c2x2c0x2c1); vst1q_s32(reinterpret_cast<int32*>(output_y_ptr + x_start * 3 + 8), x2c2x3c0x3c1x3c2); #else for (int x = x_start; x < x_start + 4; ++x) { OutputLerpForChannels<RESOLUTION, qint32, int32, int64>( xs, x, ys_ilerp, 3, min, max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } #endif } template <typename T> void ResizeImageReference(typename TTypes<T, 4>::ConstTensor images, const int batch_size, const int64 in_height, const int64 in_width, const int64 out_height, const int64 out_width, const int channels, const float height_scale, const float width_scale, const float in_min, const float in_max, const bool half_pixel_centers, typename TTypes<T, 4>::Tensor* output) { CHECK_NOTNULL(output); const InterpolationCache<float> xs = BuildLerpCache<float>( out_width, in_width, width_scale, channels, 0, half_pixel_centers); const InterpolationCache<float> ys = BuildLerpCache<float>( out_height, in_height, height_scale, 1, 0, half_pixel_centers); const int64 in_row_size = in_width * channels; const int64 in_batch_num_values = in_height * in_row_size; const int64 out_row_size = out_width * channels; const T* input_b_ptr = images.data(); T* output_y_ptr = output->data(); for (int b = 0; b < batch_size; ++b) { for (int64 y = 0; y < out_height; ++y) { const T* ys_input_lower_ptr = input_b_ptr + ys.lower[y] * in_row_size; const T* ys_input_upper_ptr = input_b_ptr + ys.upper[y] * in_row_size; const float ys_lerp = ys.lerp[y]; for (int64 x = 0; x < out_width; ++x) { const int64 xs_lower = xs.lower[x]; const int64 xs_upper = xs.upper[x]; const float xs_lerp = xs.lerp[x]; for (int c = 0; c < channels; ++c) { const T top_left = ys_input_lower_ptr[xs_lower + c]; const T top_right = ys_input_lower_ptr[xs_upper + c]; const T bottom_left = ys_input_upper_ptr[xs_lower + c]; const T bottom_right = ys_input_upper_ptr[xs_upper + c]; const T val = ComputeLerpReference<T>( top_left, top_right, bottom_left, bottom_right, xs_lerp, ys_lerp, in_min, in_max); output_y_ptr[x * channels + c] = val; } } output_y_ptr += out_row_size; } input_b_ptr += in_batch_num_values; } } template <typename T> void ResizeImage(typename TTypes<T, 4>::ConstTensor images, const int batch_size, const int64 in_height, const int64 in_width, const int64 out_height, const int64 out_width, const int channels, const float height_scale, const float width_scale, const float in_min, const float in_max, const bool half_pixel_centers, typename TTypes<T, 4>::Tensor* output) { ResizeImageReference<T>(images, batch_size, in_height, in_width, out_height, out_width, channels, height_scale, width_scale, in_min, in_max, half_pixel_centers, output); } template <> void ResizeImage<qint32>(typename TTypes<qint32, 4>::ConstTensor images, const int batch_size, const int64 in_height, const int64 in_width, const int64 out_height, const int64 out_width, const int channels, const float height_scale, const float width_scale, const float in_min, const float in_max, const bool half_pixel_centers, typename TTypes<qint32, 4>::Tensor* output) { // 30 is maximum resolution for signed int. constexpr int RESOLUTION = 30; constexpr int SIMD_STEP = 4; CHECK_NOTNULL(output); const InterpolationCache<int32> xs = BuildLerpCache<int32>(out_width, in_width, width_scale, channels, RESOLUTION, half_pixel_centers); const InterpolationCache<int32> ys = BuildLerpCache<int32>( out_height, in_height, height_scale, 1, RESOLUTION, half_pixel_centers); const int64 in_row_size = in_width * channels; const int64 in_batch_num_values = in_height * in_row_size; const int64 out_row_size = out_width * channels; const qint32* input_b_ptr = images.data(); qint32* output_y_ptr = output->data(); for (int b = 0; b < batch_size; ++b) { for (int64 y = 0; y < out_height; ++y) { const qint32* ys_input_lower_ptr = input_b_ptr + ys.lower[y] * in_row_size; const qint32* ys_input_upper_ptr = input_b_ptr + ys.upper[y] * in_row_size; const int32 ys_ilerp = ys.ilerp[y]; // Optimized for channels == 1 or channels == 3 as this // is typical channels. int64 x = 0; if (channels == 1) { for (; x < out_width - SIMD_STEP + 1; x += SIMD_STEP) { OutputLerp32x4x1<RESOLUTION>(xs, x, ys_ilerp, in_min, in_max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } } else if (channels == 3) { for (; x < out_width - SIMD_STEP + 1; x += SIMD_STEP) { OutputLerp32x4x3<RESOLUTION>(xs, x, ys_ilerp, in_min, in_max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } } for (; x < out_width; ++x) { OutputLerpForChannels<RESOLUTION, qint32, int32, int64>( xs, x, ys_ilerp, channels, in_min, in_max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } output_y_ptr += out_row_size; } input_b_ptr += in_batch_num_values; } } template <> void ResizeImage<quint8>(typename TTypes<quint8, 4>::ConstTensor images, const int batch_size, const int64 in_height, const int64 in_width, const int64 out_height, const int64 out_width, const int channels, const float height_scale, const float width_scale, const float in_min, const float in_max, const bool half_pixel_centers, typename TTypes<quint8, 4>::Tensor* output) { // 7 is maximum resolution for unsigned byte. constexpr int RESOLUTION = 7; constexpr int SIMD_STEP = 8; CHECK_NOTNULL(output); const InterpolationCache<int16> xs = BuildLerpCache<int16>(out_width, in_width, width_scale, channels, RESOLUTION, half_pixel_centers); const InterpolationCache<int16> ys = BuildLerpCache<int16>( out_height, in_height, height_scale, 1, RESOLUTION, half_pixel_centers); const int64 in_row_size = in_width * channels; const int64 in_batch_num_values = in_height * in_row_size; const int64 out_row_size = out_width * channels; const quint8* input_b_ptr = images.data(); quint8* output_y_ptr = output->data(); for (int b = 0; b < batch_size; ++b) { for (int64 y = 0; y < out_height; ++y) { const quint8* ys_input_lower_ptr = input_b_ptr + ys.lower[y] * in_row_size; const quint8* ys_input_upper_ptr = input_b_ptr + ys.upper[y] * in_row_size; const int32 ys_ilerp = ys.ilerp[y]; // Optimized for channels == 1 or channels == 3 as this // is typical channels. // TODO(satok): Support more generic NEON optimized implementation // for different channels. int64 x = 0; if (channels == 1) { for (; x < out_width - SIMD_STEP + 1; x += SIMD_STEP) { OutputLerp8x8x1<RESOLUTION>(xs, x, ys_ilerp, in_min, in_max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } } else if (channels == 3) { for (; x < out_width - SIMD_STEP + 1; x += SIMD_STEP) { OutputLerp8x8x3<RESOLUTION>(xs, x, ys_ilerp, in_min, in_max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } } for (; x < out_width; ++x) { OutputLerpForChannels<RESOLUTION, quint8, int16, int16>( xs, x, ys_ilerp, channels, in_min, in_max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } output_y_ptr += out_row_size; } input_b_ptr += in_batch_num_values; } } template <typename T> void ResizeBilinear(const typename TTypes<T, 4>::ConstTensor& images, const float height_scale, const float width_scale, const float in_min, const float in_max, const bool half_pixel_centers, typename TTypes<T, 4>::Tensor* output) { CHECK_NOTNULL(output); const int batch_size = images.dimension(0); const int64 in_height = images.dimension(1); const int64 in_width = images.dimension(2); const int channels = images.dimension(3); const int64 out_height = output->dimension(1); const int64 out_width = output->dimension(2); // Handle no-op resizes efficiently. if (out_height == in_height && out_width == in_width) { *output = images.template cast<T>(); return; } if (USE_REFERENCE) { ResizeImageReference<T>(images, batch_size, in_height, in_width, out_height, out_width, channels, height_scale, width_scale, in_min, in_max, half_pixel_centers, output); } else { ResizeImage<T>(images, batch_size, in_height, in_width, out_height, out_width, channels, height_scale, width_scale, in_min, in_max, half_pixel_centers, output); } } } // namespace template <class T> class QuantizedResizeBilinearOp : public OpKernel { public: explicit QuantizedResizeBilinearOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("align_corners", &align_corners_)); OP_REQUIRES_OK( context, context->GetAttr("half_pixel_centers", &half_pixel_centers_)); } void Compute(OpKernelContext* context) override { const float in_min = context->input(2).flat<float>()(0); const float in_max = context->input(3).flat<float>()(0); ImageResizerState st(align_corners_, false); st.ValidateAndCreateOutput(context); if (!context->status().ok()) return; // Return if the output is empty. if (st.output->NumElements() == 0) return; typename TTypes<T, 4>::ConstTensor image_data( context->input(0).tensor<T, 4>()); typename TTypes<T, 4>::Tensor output_data(st.output->tensor<T, 4>()); ResizeBilinear<T>(image_data, st.height_scale, st.width_scale, in_min, in_max, half_pixel_centers_, &output_data); Tensor* out_min = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, {}, &out_min)); out_min->flat<float>()(0) = in_min; Tensor* out_max = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, {}, &out_max)); out_max->flat<float>()(0) = in_max; } private: bool align_corners_; bool half_pixel_centers_; TF_DISALLOW_COPY_AND_ASSIGN(QuantizedResizeBilinearOp<T>); }; #define REGISTER_CPU_KERNEL(type) \ REGISTER_KERNEL_BUILDER(Name("QuantizedResizeBilinear") \ .Device(DEVICE_CPU) \ .HostMemory("size") \ .TypeConstraint<type>("T"), \ QuantizedResizeBilinearOp<type>) REGISTER_CPU_KERNEL(::tensorflow::quint8); REGISTER_CPU_KERNEL(::tensorflow::qint32); REGISTER_CPU_KERNEL(float); } // namespace tensorflow
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // Implements a quantized version of the resize bilinear op. #define EIGEN_USE_THREADS #if defined(__ARM_NEON__) || defined(__ARM_NEON) #define USE_NEON #define QUANTIZED_RESIZE_BILINEAR_USE_NEON #include <arm_neon.h> #endif #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/util/image_resizer_state.h" namespace tensorflow { static constexpr bool USE_REFERENCE = false; namespace { // Compute the interpolation indices only once. template <typename T_SCALE> struct InterpolationCache { std::vector<int64> lower; // Lower source index used in the interpolation std::vector<int64> upper; // Upper source index used in the interpolation // 1-D linear interpolation scale (see: // https://en.wikipedia.org/wiki/Bilinear_interpolation) std::vector<float> lerp; std::vector<T_SCALE> ilerp; }; template <typename T_SCALE, typename Scaler> inline void ComputeInterpolationWeights( const int64 out_size, const int64 in_size, const float scale, const int resolution, InterpolationCache<T_SCALE>* interpolation) { const Scaler scaler; interpolation->lower.resize(out_size + 1); interpolation->upper.resize(out_size + 1); interpolation->lerp.resize(out_size + 1); interpolation->ilerp.resize(out_size + 1); interpolation->lower[out_size] = 0; interpolation->upper[out_size] = 0; for (int64 i = out_size - 1; i >= 0; --i) { const float in = scaler(i, scale); const float in_f = std::floor(in); interpolation->lower[i] = std::max(static_cast<int64>(in_f), static_cast<int64>(0)); interpolation->upper[i] = std::min(static_cast<int64>(std::ceil(in)), in_size - 1); interpolation->lower[i] = std::min(interpolation->lower[i], interpolation->upper[i]); interpolation->lerp[i] = in - in_f; interpolation->ilerp[i] = static_cast<T_SCALE>((in - in_f) * (1 << resolution)); } } template <typename T_SCALE> inline InterpolationCache<T_SCALE> BuildLerpCache( const int64 out_size, const int64 in_size, const float scale, const int index_step, const int resolution, const bool half_pixel_centers) { InterpolationCache<T_SCALE> cache; // Compute the cached interpolation weights on the x and y dimensions. if (half_pixel_centers) { ComputeInterpolationWeights<T_SCALE, HalfPixelScaler>( out_size, in_size, scale, resolution, &cache); } else { ComputeInterpolationWeights<T_SCALE, LegacyScaler>(out_size, in_size, scale, resolution, &cache); } CHECK(index_step > 0); if (index_step > 1) { for (int i = 0; i < cache.lower.size(); ++i) { cache.lower[i] *= index_step; cache.upper[i] *= index_step; } } return cache; } /** * Computes the bilinear interpolation from the appropriate 4 float points * and the linear interpolation weights. */ template <typename T> inline T ComputeLerpReference(const T in_top_left, const T in_top_right, const T in_bottom_left, const T in_bottom_right, const float x_lerp, const float y_lerp, const float min, const float max) { const float top_left = QuantizedToFloat<T>(in_top_left, min, max); const float top_right = QuantizedToFloat<T>(in_top_right, min, max); const float bottom_left = QuantizedToFloat<T>(in_bottom_left, min, max); const float bottom_right = QuantizedToFloat<T>(in_bottom_right, min, max); const float top = top_left + (top_right - top_left) * x_lerp; const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp; const float out = top + (bottom - top) * y_lerp; return FloatToQuantized<T>(out, min, max); } template <typename T, typename T_SCALE, typename T_CALC> inline T_CALC MulOffset(T a, T b, T_SCALE c) { return (static_cast<T_CALC>(a) - static_cast<T_CALC>(b)) * static_cast<T_CALC>(c); } template <int RESOLUTION, typename T, typename T_SCALE, typename T_CALC> inline T ComputeLerp(const T top_left, const T top_right, const T bottom_left, const T bottom_right, const T_SCALE x_lerp, const T_SCALE y_lerp) { constexpr T_CALC RESOLUTION_MULT = (1 << RESOLUTION); const T_CALC top = static_cast<T_CALC>(top_left) * RESOLUTION_MULT + MulOffset<T, T_SCALE, T_CALC>(top_right, top_left, x_lerp); const T_CALC bottom = static_cast<T_CALC>(bottom_left) * RESOLUTION_MULT + MulOffset<T, T_SCALE, T_CALC>(bottom_right, bottom_left, x_lerp); const T_CALC out = top + (bottom - top) / RESOLUTION_MULT * y_lerp; return static_cast<T>( static_cast<int32>((out + RESOLUTION_MULT / 2) / RESOLUTION_MULT)); } #ifdef QUANTIZED_RESIZE_BILINEAR_USE_NEON inline uint8x8_t ToUint8x8(const quint8* v0, const quint8* v1, const quint8* v2, const quint8* v3, const quint8* v4, const quint8* v5, const quint8* v6, const quint8* v7) { static const uint8x8_t ZERO_8x8 = vmov_n_u8(0); uint8x8_t ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v0), ZERO_8x8, 0); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v1), ret, 1); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v2), ret, 2); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v3), ret, 3); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v4), ret, 4); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v5), ret, 5); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v6), ret, 6); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v7), ret, 7); return ret; } inline int16x8_t ToInt16x8(const int16* v0, const int16* v1, const int16* v2, const int16* v3, const int16* v4, const int16* v5, const int16* v6, const int16* v7) { static const int16x8_t ZERO_16x8 = vmovq_n_s16(0); int16x8_t ret = vld1q_lane_s16(v0, ZERO_16x8, 0); ret = vld1q_lane_s16(v1, ret, 1); ret = vld1q_lane_s16(v2, ret, 2); ret = vld1q_lane_s16(v3, ret, 3); ret = vld1q_lane_s16(v4, ret, 4); ret = vld1q_lane_s16(v5, ret, 5); ret = vld1q_lane_s16(v6, ret, 6); ret = vld1q_lane_s16(v7, ret, 7); return ret; } inline int32x2_t ToInt32x2(const qint32* v0, const qint32* v1) { static const int32x2_t ZERO_32x2 = vmov_n_s32(0); const int32x2_t ret0 = vld1_lane_s32(reinterpret_cast<const int32*>(v0), ZERO_32x2, 0); const int32x2_t ret1 = vld1_lane_s32(reinterpret_cast<const int32*>(v1), ret0, 1); return ret1; } template <int RESOLUTION, bool X_LERP_SAME> inline int32x2_t ComputeLerpx2( const qint32* top_left0, const qint32* top_right0, const qint32* bottom_left0, const qint32* bottom_right0, const qint32* top_left1, const qint32* top_right1, const qint32* bottom_left1, const qint32* bottom_right1, const int32* x_lerp, const int32x2_t y_lerpsx) { const int32x2_t x_lerpsx = X_LERP_SAME ? vld1_dup_s32(reinterpret_cast<const int32*>(x_lerp)) : vld1_s32(reinterpret_cast<const int32*>(x_lerp)); const int32x2_t top_leftsx = ToInt32x2(top_left0, top_left1); const int32x2_t top_rightsx = ToInt32x2(top_right0, top_right1); const int32x2_t bottom_leftsx = ToInt32x2(bottom_left0, bottom_left1); const int32x2_t bottom_rightsx = ToInt32x2(bottom_right0, bottom_right1); const int32x2_t retval = ComputeLerp32x2<RESOLUTION>(top_leftsx, top_rightsx, bottom_leftsx, bottom_rightsx, x_lerpsx, y_lerpsx); return retval; } template <int RESOLUTION> inline uint8x8_t ComputeLerpx8( const quint8* tl0, const quint8* tr0, const quint8* bl0, const quint8* br0, const int16* xlp0, const quint8* tl1, const quint8* tr1, const quint8* bl1, const quint8* br1, const int16* xlp1, const quint8* tl2, const quint8* tr2, const quint8* bl2, const quint8* br2, const int16* xlp2, const quint8* tl3, const quint8* tr3, const quint8* bl3, const quint8* br3, const int16* xlp3, const quint8* tl4, const quint8* tr4, const quint8* bl4, const quint8* br4, const int16* xlp4, const quint8* tl5, const quint8* tr5, const quint8* bl5, const quint8* br5, const int16* xlp5, const quint8* tl6, const quint8* tr6, const quint8* bl6, const quint8* br6, const int16* xlp6, const quint8* tl7, const quint8* tr7, const quint8* bl7, const quint8* br7, const int16* xlp7, const int16x8_t ys_lerpsx) { const uint8x8_t tl8x8 = ToUint8x8(tl0, tl1, tl2, tl3, tl4, tl5, tl6, tl7); const uint8x8_t tr8x8 = ToUint8x8(tr0, tr1, tr2, tr3, tr4, tr5, tr6, tr7); const uint8x8_t bl8x8 = ToUint8x8(bl0, bl1, bl2, bl3, bl4, bl5, bl6, bl7); const uint8x8_t br8x8 = ToUint8x8(br0, br1, br2, br3, br4, br5, br6, br7); const int16x8_t xs_lerpsx = ToInt16x8(xlp0, xlp1, xlp2, xlp3, xlp4, xlp5, xlp6, xlp7); return ComputeLerp8x8<RESOLUTION>(tl8x8, tr8x8, bl8x8, br8x8, xs_lerpsx, ys_lerpsx); } // Expand address at compile time to improve performance template <int RESOLUTION, int ID0, int CH0, int ID1, int CH1, int ID2, int CH2, int ID3, int CH3, int ID4, int CH4, int ID5, int CH5, int ID6, int CH6, int ID7, int CH7> inline uint8x8_t ComputeLerpx8Tmpl(const quint8* const yl, const quint8* yu, const int64* xl, const int64* xu, const int16* xlp, const int16x8_t ys_lerpsx) { return ComputeLerpx8<RESOLUTION>( yl + xl[ID0] + CH0, yl + xu[ID0] + CH0, yu + xl[ID0] + CH0, yu + xu[ID0] + CH0, xlp + ID0, yl + xl[ID1] + CH1, yl + xu[ID1] + CH1, yu + xl[ID1] + CH1, yu + xu[ID1] + CH1, xlp + ID1, yl + xl[ID2] + CH2, yl + xu[ID2] + CH2, yu + xl[ID2] + CH2, yu + xu[ID2] + CH2, xlp + ID2, yl + xl[ID3] + CH3, yl + xu[ID3] + CH3, yu + xl[ID3] + CH3, yu + xu[ID3] + CH3, xlp + ID3, yl + xl[ID4] + CH4, yl + xu[ID4] + CH4, yu + xl[ID4] + CH4, yu + xu[ID4] + CH4, xlp + ID4, yl + xl[ID5] + CH5, yl + xu[ID5] + CH5, yu + xl[ID5] + CH5, yu + xu[ID5] + CH5, xlp + ID5, yl + xl[ID6] + CH6, yl + xu[ID6] + CH6, yu + xl[ID6] + CH6, yu + xu[ID6] + CH6, xlp + ID6, yl + xl[ID7] + CH7, yl + xu[ID7] + CH7, yu + xl[ID7] + CH7, yu + xu[ID7] + CH7, xlp + ID7, ys_lerpsx); } #endif template <int RESOLUTION, typename T, typename T_SCALE, typename T_CALC> inline void OutputLerpForChannels(const InterpolationCache<T_SCALE>& xs, const int64 x, const T_SCALE ys_ilerp, const int channels, const float min, const float max, const T* ys_input_lower_ptr, const T* ys_input_upper_ptr, T* output_y_ptr) { const int64 xs_lower = xs.lower[x]; const int64 xs_upper = xs.upper[x]; const T_SCALE xs_ilerp = xs.ilerp[x]; for (int c = 0; c < channels; ++c) { const T top_left = ys_input_lower_ptr[xs_lower + c]; const T top_right = ys_input_lower_ptr[xs_upper + c]; const T bottom_left = ys_input_upper_ptr[xs_lower + c]; const T bottom_right = ys_input_upper_ptr[xs_upper + c]; const T val = ComputeLerp<RESOLUTION, T, T_SCALE, T_CALC>( top_left, top_right, bottom_left, bottom_right, xs_ilerp, ys_ilerp); output_y_ptr[x * channels + c] = val; } } template <int RES> inline void OutputLerp8x8x1(const InterpolationCache<int16>& xs, const int64 x_start, const int16 ys_ilerp, const float min, const float max, const quint8* const ys_input_lower_ptr, const quint8* const ys_input_upper_ptr, quint8* output_y_ptr) { #ifdef QUANTIZED_RESIZE_BILINEAR_USE_NEON const int16x8_t y_lerpsx = vmovq_n_s16(ys_ilerp); const uint8x8_t x0x7 = ComputeLerpx8Tmpl<RES, 0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0>( ys_input_lower_ptr, ys_input_upper_ptr, &xs.lower[x_start], &xs.upper[x_start], &xs.ilerp[x_start], y_lerpsx); vst1_u8(reinterpret_cast<uint8_t*>(output_y_ptr + x_start), x0x7); #else for (int x = x_start; x < x_start + 8; ++x) { OutputLerpForChannels<RES, quint8, int16, int16>( xs, x, ys_ilerp, 1, min, max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } #endif } template <int RES> inline void OutputLerp8x8x3(const InterpolationCache<int16>& xs, const int64 x_start, const int16 ys_ilerp, const float min, const float max, const quint8* const ys_input_lower_ptr, const quint8* const ys_input_upper_ptr, quint8* output_y_ptr) { #ifdef QUANTIZED_RESIZE_BILINEAR_USE_NEON const int16x8_t y_lerpsx = vmovq_n_s16(ys_ilerp); const uint8x8_t x0c0x2c1 = ComputeLerpx8Tmpl<RES, 0, 0, 0, 1, 0, 2, 1, 0, 1, 1, 1, 2, 2, 0, 2, 1>( ys_input_lower_ptr, ys_input_upper_ptr, &xs.lower[x_start], &xs.upper[x_start], &xs.ilerp[x_start], y_lerpsx); vst1_u8(reinterpret_cast<uint8_t*>(output_y_ptr + x_start * 3), x0c0x2c1); const uint8x8_t x2c2x5c0 = ComputeLerpx8Tmpl<RES, 2, 2, 3, 0, 3, 1, 3, 2, 4, 0, 4, 1, 4, 2, 5, 0>( ys_input_lower_ptr, ys_input_upper_ptr, &xs.lower[x_start], &xs.upper[x_start], &xs.ilerp[x_start], y_lerpsx); vst1_u8(reinterpret_cast<uint8_t*>(output_y_ptr + x_start * 3 + 8), x2c2x5c0); const uint8x8_t x5c1x7c2 = ComputeLerpx8Tmpl<RES, 5, 1, 5, 2, 6, 0, 6, 1, 6, 2, 7, 0, 7, 1, 7, 2>( ys_input_lower_ptr, ys_input_upper_ptr, &xs.lower[x_start], &xs.upper[x_start], &xs.ilerp[x_start], y_lerpsx); vst1_u8(reinterpret_cast<uint8_t*>(output_y_ptr + x_start * 3 + 16), x5c1x7c2); #else for (int x = x_start; x < x_start + 8; ++x) { OutputLerpForChannels<RES, quint8, int16, int16>( xs, x, ys_ilerp, 3, min, max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } #endif } template <int RESOLUTION> inline void OutputLerp32x4x1(const InterpolationCache<int32>& xs, const int64 x_start, const int32 ys_ilerp, const float min, const float max, const qint32* const ys_input_lower_ptr, const qint32* const ys_input_upper_ptr, qint32* output_y_ptr) { #ifdef QUANTIZED_RESIZE_BILINEAR_USE_NEON const int64 xs_lower0 = xs.lower[x_start]; const int64 xs_upper0 = xs.upper[x_start]; const int32* const xs_ilerp0 = &xs.ilerp[x_start]; const int64 xs_lower1 = xs.lower[x_start + 1]; const int64 xs_upper1 = xs.upper[x_start + 1]; const int64 xs_lower2 = xs.lower[x_start + 2]; const int64 xs_upper2 = xs.upper[x_start + 2]; const int32* const xs_ilerp2 = &xs.ilerp[x_start + 2]; const int64 xs_lower3 = xs.lower[x_start + 3]; const int64 xs_upper3 = xs.upper[x_start + 3]; const int32x2_t y_lerpsx = vmov_n_s32(ys_ilerp); const int32x2_t x0x1 = ComputeLerpx2<RESOLUTION, false>( ys_input_lower_ptr + xs_lower0, ys_input_lower_ptr + xs_upper0, ys_input_upper_ptr + xs_lower0, ys_input_upper_ptr + xs_upper0, ys_input_lower_ptr + xs_lower1, ys_input_lower_ptr + xs_upper1, ys_input_upper_ptr + xs_lower1, ys_input_upper_ptr + xs_upper1, xs_ilerp0, y_lerpsx); const int32x2_t x1x2 = ComputeLerpx2<RESOLUTION, false>( ys_input_lower_ptr + xs_lower2, ys_input_lower_ptr + xs_upper2, ys_input_upper_ptr + xs_lower2, ys_input_upper_ptr + xs_upper2, ys_input_lower_ptr + xs_lower3, ys_input_lower_ptr + xs_upper3, ys_input_upper_ptr + xs_lower3, ys_input_upper_ptr + xs_upper3, xs_ilerp2, y_lerpsx); const int32x4_t x0x1x2x3 = vcombine_s32(x0x1, x1x2); vst1q_s32(reinterpret_cast<int32*>(output_y_ptr + x_start), x0x1x2x3); #else for (int x = x_start; x < x_start + 4; ++x) { OutputLerpForChannels<RESOLUTION, qint32, int32, int64>( xs, x, ys_ilerp, 1, min, max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } #endif } template <int RESOLUTION> inline void OutputLerp32x4x3(const InterpolationCache<int32>& xs, const int64 x_start, const int32 ys_ilerp, const float min, const float max, const qint32* const ys_input_lower_ptr, const qint32* const ys_input_upper_ptr, qint32* output_y_ptr) { #ifdef QUANTIZED_RESIZE_BILINEAR_USE_NEON const int64 xs_lower0 = xs.lower[x_start]; const int64 xs_upper0 = xs.upper[x_start]; const int32* const xs_ilerp0 = &xs.ilerp[x_start]; const int64 xs_lower1 = xs.lower[x_start + 1]; const int64 xs_upper1 = xs.upper[x_start + 1]; const int32* const xs_ilerp1 = &xs.ilerp[x_start + 1]; const int64 xs_lower2 = xs.lower[x_start + 2]; const int64 xs_upper2 = xs.upper[x_start + 2]; const int32* const xs_ilerp2 = &xs.ilerp[x_start + 2]; const int64 xs_lower3 = xs.lower[x_start + 3]; const int64 xs_upper3 = xs.upper[x_start + 3]; const int32* const xs_ilerp3 = &xs.ilerp[x_start + 3]; const int32x2_t y_lerpsx = vmov_n_s32(ys_ilerp); const int32x2_t x0c0x0c1 = ComputeLerpx2<RESOLUTION, true>( ys_input_lower_ptr + xs_lower0, ys_input_lower_ptr + xs_upper0, ys_input_upper_ptr + xs_lower0, ys_input_upper_ptr + xs_upper0, ys_input_lower_ptr + xs_lower0 + 1, ys_input_lower_ptr + xs_upper0 + 1, ys_input_upper_ptr + xs_lower0 + 1, ys_input_upper_ptr + xs_upper0 + 1, xs_ilerp0, y_lerpsx); const int32x2_t x0c2x1c0 = ComputeLerpx2<RESOLUTION, false>( ys_input_lower_ptr + xs_lower0 + 2, ys_input_lower_ptr + xs_upper0 + 2, ys_input_upper_ptr + xs_lower0 + 2, ys_input_upper_ptr + xs_upper0 + 2, ys_input_lower_ptr + xs_lower1, ys_input_lower_ptr + xs_upper1, ys_input_upper_ptr + xs_lower1, ys_input_upper_ptr + xs_upper1, xs_ilerp0, y_lerpsx); const int32x2_t x1c1x1c2 = ComputeLerpx2<RESOLUTION, true>( ys_input_lower_ptr + xs_lower1 + 1, ys_input_lower_ptr + xs_upper1 + 1, ys_input_upper_ptr + xs_lower1 + 1, ys_input_upper_ptr + xs_upper1 + 1, ys_input_lower_ptr + xs_lower1 + 2, ys_input_lower_ptr + xs_upper1 + 2, ys_input_upper_ptr + xs_lower1 + 2, ys_input_upper_ptr + xs_upper1 + 2, xs_ilerp1, y_lerpsx); const int32x2_t x2c0x2c1 = ComputeLerpx2<RESOLUTION, true>( ys_input_lower_ptr + xs_lower2, ys_input_lower_ptr + xs_upper2, ys_input_upper_ptr + xs_lower2, ys_input_upper_ptr + xs_upper2, ys_input_lower_ptr + xs_lower2 + 1, ys_input_lower_ptr + xs_upper2 + 1, ys_input_upper_ptr + xs_lower2 + 1, ys_input_upper_ptr + xs_upper2 + 1, xs_ilerp2, y_lerpsx); const int32x2_t x2c2x3c0 = ComputeLerpx2<RESOLUTION, false>( ys_input_lower_ptr + xs_lower2 + 2, ys_input_lower_ptr + xs_upper2 + 2, ys_input_upper_ptr + xs_lower2 + 2, ys_input_upper_ptr + xs_upper2 + 2, ys_input_lower_ptr + xs_lower3, ys_input_lower_ptr + xs_upper3, ys_input_upper_ptr + xs_lower3, ys_input_upper_ptr + xs_upper3, xs_ilerp2, y_lerpsx); const int32x2_t x3c1x3c2 = ComputeLerpx2<RESOLUTION, true>( ys_input_lower_ptr + xs_lower3 + 1, ys_input_lower_ptr + xs_upper3 + 1, ys_input_upper_ptr + xs_lower3 + 1, ys_input_upper_ptr + xs_upper3 + 1, ys_input_lower_ptr + xs_lower3 + 2, ys_input_lower_ptr + xs_upper3 + 2, ys_input_upper_ptr + xs_lower3 + 2, ys_input_upper_ptr + xs_upper3 + 2, xs_ilerp3, y_lerpsx); const int32x4_t x0c0x0c1x0c2x1c0 = vcombine_s32(x0c0x0c1, x0c2x1c0); const int32x4_t x1c1x1c2x2c0x2c1 = vcombine_s32(x1c1x1c2, x2c0x2c1); const int32x4_t x2c2x3c0x3c1x3c2 = vcombine_s32(x2c2x3c0, x3c1x3c2); vst1q_s32(reinterpret_cast<int32*>(output_y_ptr + x_start * 3), x0c0x0c1x0c2x1c0); vst1q_s32(reinterpret_cast<int32*>(output_y_ptr + x_start * 3 + 4), x1c1x1c2x2c0x2c1); vst1q_s32(reinterpret_cast<int32*>(output_y_ptr + x_start * 3 + 8), x2c2x3c0x3c1x3c2); #else for (int x = x_start; x < x_start + 4; ++x) { OutputLerpForChannels<RESOLUTION, qint32, int32, int64>( xs, x, ys_ilerp, 3, min, max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } #endif } template <typename T> void ResizeImageReference(typename TTypes<T, 4>::ConstTensor images, const int batch_size, const int64 in_height, const int64 in_width, const int64 out_height, const int64 out_width, const int channels, const float height_scale, const float width_scale, const float in_min, const float in_max, const bool half_pixel_centers, typename TTypes<T, 4>::Tensor* output) { CHECK_NOTNULL(output); const InterpolationCache<float> xs = BuildLerpCache<float>( out_width, in_width, width_scale, channels, 0, half_pixel_centers); const InterpolationCache<float> ys = BuildLerpCache<float>( out_height, in_height, height_scale, 1, 0, half_pixel_centers); const int64 in_row_size = in_width * channels; const int64 in_batch_num_values = in_height * in_row_size; const int64 out_row_size = out_width * channels; const T* input_b_ptr = images.data(); T* output_y_ptr = output->data(); for (int b = 0; b < batch_size; ++b) { for (int64 y = 0; y < out_height; ++y) { const T* ys_input_lower_ptr = input_b_ptr + ys.lower[y] * in_row_size; const T* ys_input_upper_ptr = input_b_ptr + ys.upper[y] * in_row_size; const float ys_lerp = ys.lerp[y]; for (int64 x = 0; x < out_width; ++x) { const int64 xs_lower = xs.lower[x]; const int64 xs_upper = xs.upper[x]; const float xs_lerp = xs.lerp[x]; for (int c = 0; c < channels; ++c) { const T top_left = ys_input_lower_ptr[xs_lower + c]; const T top_right = ys_input_lower_ptr[xs_upper + c]; const T bottom_left = ys_input_upper_ptr[xs_lower + c]; const T bottom_right = ys_input_upper_ptr[xs_upper + c]; const T val = ComputeLerpReference<T>( top_left, top_right, bottom_left, bottom_right, xs_lerp, ys_lerp, in_min, in_max); output_y_ptr[x * channels + c] = val; } } output_y_ptr += out_row_size; } input_b_ptr += in_batch_num_values; } } template <typename T> void ResizeImage(typename TTypes<T, 4>::ConstTensor images, const int batch_size, const int64 in_height, const int64 in_width, const int64 out_height, const int64 out_width, const int channels, const float height_scale, const float width_scale, const float in_min, const float in_max, const bool half_pixel_centers, typename TTypes<T, 4>::Tensor* output) { ResizeImageReference<T>(images, batch_size, in_height, in_width, out_height, out_width, channels, height_scale, width_scale, in_min, in_max, half_pixel_centers, output); } template <> void ResizeImage<qint32>(typename TTypes<qint32, 4>::ConstTensor images, const int batch_size, const int64 in_height, const int64 in_width, const int64 out_height, const int64 out_width, const int channels, const float height_scale, const float width_scale, const float in_min, const float in_max, const bool half_pixel_centers, typename TTypes<qint32, 4>::Tensor* output) { // 30 is maximum resolution for signed int. constexpr int RESOLUTION = 30; constexpr int SIMD_STEP = 4; CHECK_NOTNULL(output); const InterpolationCache<int32> xs = BuildLerpCache<int32>(out_width, in_width, width_scale, channels, RESOLUTION, half_pixel_centers); const InterpolationCache<int32> ys = BuildLerpCache<int32>( out_height, in_height, height_scale, 1, RESOLUTION, half_pixel_centers); const int64 in_row_size = in_width * channels; const int64 in_batch_num_values = in_height * in_row_size; const int64 out_row_size = out_width * channels; const qint32* input_b_ptr = images.data(); qint32* output_y_ptr = output->data(); for (int b = 0; b < batch_size; ++b) { for (int64 y = 0; y < out_height; ++y) { const qint32* ys_input_lower_ptr = input_b_ptr + ys.lower[y] * in_row_size; const qint32* ys_input_upper_ptr = input_b_ptr + ys.upper[y] * in_row_size; const int32 ys_ilerp = ys.ilerp[y]; // Optimized for channels == 1 or channels == 3 as this // is typical channels. int64 x = 0; if (channels == 1) { for (; x < out_width - SIMD_STEP + 1; x += SIMD_STEP) { OutputLerp32x4x1<RESOLUTION>(xs, x, ys_ilerp, in_min, in_max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } } else if (channels == 3) { for (; x < out_width - SIMD_STEP + 1; x += SIMD_STEP) { OutputLerp32x4x3<RESOLUTION>(xs, x, ys_ilerp, in_min, in_max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } } for (; x < out_width; ++x) { OutputLerpForChannels<RESOLUTION, qint32, int32, int64>( xs, x, ys_ilerp, channels, in_min, in_max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } output_y_ptr += out_row_size; } input_b_ptr += in_batch_num_values; } } template <> void ResizeImage<quint8>(typename TTypes<quint8, 4>::ConstTensor images, const int batch_size, const int64 in_height, const int64 in_width, const int64 out_height, const int64 out_width, const int channels, const float height_scale, const float width_scale, const float in_min, const float in_max, const bool half_pixel_centers, typename TTypes<quint8, 4>::Tensor* output) { // 7 is maximum resolution for unsigned byte. constexpr int RESOLUTION = 7; constexpr int SIMD_STEP = 8; CHECK_NOTNULL(output); const InterpolationCache<int16> xs = BuildLerpCache<int16>(out_width, in_width, width_scale, channels, RESOLUTION, half_pixel_centers); const InterpolationCache<int16> ys = BuildLerpCache<int16>( out_height, in_height, height_scale, 1, RESOLUTION, half_pixel_centers); const int64 in_row_size = in_width * channels; const int64 in_batch_num_values = in_height * in_row_size; const int64 out_row_size = out_width * channels; const quint8* input_b_ptr = images.data(); quint8* output_y_ptr = output->data(); for (int b = 0; b < batch_size; ++b) { for (int64 y = 0; y < out_height; ++y) { const quint8* ys_input_lower_ptr = input_b_ptr + ys.lower[y] * in_row_size; const quint8* ys_input_upper_ptr = input_b_ptr + ys.upper[y] * in_row_size; const int32 ys_ilerp = ys.ilerp[y]; // Optimized for channels == 1 or channels == 3 as this // is typical channels. // TODO(satok): Support more generic NEON optimized implementation // for different channels. int64 x = 0; if (channels == 1) { for (; x < out_width - SIMD_STEP + 1; x += SIMD_STEP) { OutputLerp8x8x1<RESOLUTION>(xs, x, ys_ilerp, in_min, in_max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } } else if (channels == 3) { for (; x < out_width - SIMD_STEP + 1; x += SIMD_STEP) { OutputLerp8x8x3<RESOLUTION>(xs, x, ys_ilerp, in_min, in_max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } } for (; x < out_width; ++x) { OutputLerpForChannels<RESOLUTION, quint8, int16, int16>( xs, x, ys_ilerp, channels, in_min, in_max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } output_y_ptr += out_row_size; } input_b_ptr += in_batch_num_values; } } template <typename T> void ResizeBilinear(const typename TTypes<T, 4>::ConstTensor& images, const float height_scale, const float width_scale, const float in_min, const float in_max, const bool half_pixel_centers, typename TTypes<T, 4>::Tensor* output) { CHECK_NOTNULL(output); const int batch_size = images.dimension(0); const int64 in_height = images.dimension(1); const int64 in_width = images.dimension(2); const int channels = images.dimension(3); const int64 out_height = output->dimension(1); const int64 out_width = output->dimension(2); // Handle no-op resizes efficiently. if (out_height == in_height && out_width == in_width) { *output = images.template cast<T>(); return; } if (USE_REFERENCE) { ResizeImageReference<T>(images, batch_size, in_height, in_width, out_height, out_width, channels, height_scale, width_scale, in_min, in_max, half_pixel_centers, output); } else { ResizeImage<T>(images, batch_size, in_height, in_width, out_height, out_width, channels, height_scale, width_scale, in_min, in_max, half_pixel_centers, output); } } } // namespace template <class T> class QuantizedResizeBilinearOp : public OpKernel { public: explicit QuantizedResizeBilinearOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("align_corners", &align_corners_)); OP_REQUIRES_OK( context, context->GetAttr("half_pixel_centers", &half_pixel_centers_)); } void Compute(OpKernelContext* context) override { const auto& in_min_tensor = context->input(2); OP_REQUIRES(context, TensorShapeUtils::IsScalar(in_min_tensor.shape()), errors::InvalidArgument("min must be a scalar")); const float in_min = in_min_tensor.flat<float>()(0); const auto& in_max_tensor = context->input(3); OP_REQUIRES(context, TensorShapeUtils::IsScalar(in_max_tensor.shape()), errors::InvalidArgument("max must be a scalar")); const float in_max = in_max_tensor.flat<float>()(0); ImageResizerState st(align_corners_, false); st.ValidateAndCreateOutput(context); if (!context->status().ok()) return; // Return if the output is empty. if (st.output->NumElements() == 0) return; typename TTypes<T, 4>::ConstTensor image_data( context->input(0).tensor<T, 4>()); typename TTypes<T, 4>::Tensor output_data(st.output->tensor<T, 4>()); ResizeBilinear<T>(image_data, st.height_scale, st.width_scale, in_min, in_max, half_pixel_centers_, &output_data); Tensor* out_min = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, {}, &out_min)); out_min->flat<float>()(0) = in_min; Tensor* out_max = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, {}, &out_max)); out_max->flat<float>()(0) = in_max; } private: bool align_corners_; bool half_pixel_centers_; TF_DISALLOW_COPY_AND_ASSIGN(QuantizedResizeBilinearOp<T>); }; #define REGISTER_CPU_KERNEL(type) \ REGISTER_KERNEL_BUILDER(Name("QuantizedResizeBilinear") \ .Device(DEVICE_CPU) \ .HostMemory("size") \ .TypeConstraint<type>("T"), \ QuantizedResizeBilinearOp<type>) REGISTER_CPU_KERNEL(::tensorflow::quint8); REGISTER_CPU_KERNEL(::tensorflow::qint32); REGISTER_CPU_KERNEL(float); } // namespace tensorflow
void Compute(OpKernelContext* context) override { const float in_min = context->input(2).flat<float>()(0); const float in_max = context->input(3).flat<float>()(0); ImageResizerState st(align_corners_, false); st.ValidateAndCreateOutput(context); if (!context->status().ok()) return; // Return if the output is empty. if (st.output->NumElements() == 0) return; typename TTypes<T, 4>::ConstTensor image_data( context->input(0).tensor<T, 4>()); typename TTypes<T, 4>::Tensor output_data(st.output->tensor<T, 4>()); ResizeBilinear<T>(image_data, st.height_scale, st.width_scale, in_min, in_max, half_pixel_centers_, &output_data); Tensor* out_min = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, {}, &out_min)); out_min->flat<float>()(0) = in_min; Tensor* out_max = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, {}, &out_max)); out_max->flat<float>()(0) = in_max; }
void Compute(OpKernelContext* context) override { const auto& in_min_tensor = context->input(2); OP_REQUIRES(context, TensorShapeUtils::IsScalar(in_min_tensor.shape()), errors::InvalidArgument("min must be a scalar")); const float in_min = in_min_tensor.flat<float>()(0); const auto& in_max_tensor = context->input(3); OP_REQUIRES(context, TensorShapeUtils::IsScalar(in_max_tensor.shape()), errors::InvalidArgument("max must be a scalar")); const float in_max = in_max_tensor.flat<float>()(0); ImageResizerState st(align_corners_, false); st.ValidateAndCreateOutput(context); if (!context->status().ok()) return; // Return if the output is empty. if (st.output->NumElements() == 0) return; typename TTypes<T, 4>::ConstTensor image_data( context->input(0).tensor<T, 4>()); typename TTypes<T, 4>::Tensor output_data(st.output->tensor<T, 4>()); ResizeBilinear<T>(image_data, st.height_scale, st.width_scale, in_min, in_max, half_pixel_centers_, &output_data); Tensor* out_min = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, {}, &out_min)); out_min->flat<float>()(0) = in_min; Tensor* out_max = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, {}, &out_max)); out_max->flat<float>()(0) = in_max; }
{'added': [(705, ' const auto& in_min_tensor = context->input(2);'), (706, ' OP_REQUIRES(context, TensorShapeUtils::IsScalar(in_min_tensor.shape()),'), (707, ' errors::InvalidArgument("min must be a scalar"));'), (708, ' const float in_min = in_min_tensor.flat<float>()(0);'), (709, ' const auto& in_max_tensor = context->input(3);'), (710, ' OP_REQUIRES(context, TensorShapeUtils::IsScalar(in_max_tensor.shape()),'), (711, ' errors::InvalidArgument("max must be a scalar"));'), (712, ' const float in_max = in_max_tensor.flat<float>()(0);')], 'deleted': [(705, ' const float in_min = context->input(2).flat<float>()(0);'), (706, ' const float in_max = context->input(3).flat<float>()(0);')]}
8
2
604
6,036
19
249
3
https://github.com/tensorflow/tensorflow
CVE-2021-29537
CWE-787
3,250
sm_make_chunk.c
C
sctp_verify_asconf
/* SCTP kernel implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001-2002 Intel Corp. * * This file is part of the SCTP kernel implementation * * These functions work with the state functions in sctp_sm_statefuns.c * to implement the state operations. These functions implement the * steps which require modifying existing data structures. * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Karl Knutson <karl@athena.chicago.il.us> * C. Robin <chris@hundredacre.ac.uk> * Jon Grimm <jgrimm@us.ibm.com> * Xingang Guo <xingang.guo@intel.com> * Dajiang Zhang <dajiang.zhang@nokia.com> * Sridhar Samudrala <sri@us.ibm.com> * Daisy Chang <daisyc@us.ibm.com> * Ardelle Fan <ardelle.fan@intel.com> * Kevin Gao <kevin.gao@intel.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include <linux/kernel.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/net.h> #include <linux/inet.h> #include <linux/scatterlist.h> #include <linux/crypto.h> #include <linux/slab.h> #include <net/sock.h> #include <linux/skbuff.h> #include <linux/random.h> /* for get_random_bytes */ #include <net/sctp/sctp.h> #include <net/sctp/sm.h> static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc, __u8 type, __u8 flags, int paylen); static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc, __u8 flags, int paylen); static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc, __u8 type, __u8 flags, int paylen); static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, const struct sctp_association *asoc, const struct sctp_chunk *init_chunk, int *cookie_len, const __u8 *raw_addrs, int addrs_len); static int sctp_process_param(struct sctp_association *asoc, union sctp_params param, const union sctp_addr *peer_addr, gfp_t gfp); static void *sctp_addto_param(struct sctp_chunk *chunk, int len, const void *data); static void *sctp_addto_chunk_fixed(struct sctp_chunk *, int len, const void *data); /* Control chunk destructor */ static void sctp_control_release_owner(struct sk_buff *skb) { /*TODO: do memory release */ } static void sctp_control_set_owner_w(struct sctp_chunk *chunk) { struct sctp_association *asoc = chunk->asoc; struct sk_buff *skb = chunk->skb; /* TODO: properly account for control chunks. * To do it right we'll need: * 1) endpoint if association isn't known. * 2) proper memory accounting. * * For now don't do anything for now. */ skb->sk = asoc ? asoc->base.sk : NULL; skb->destructor = sctp_control_release_owner; } /* What was the inbound interface for this chunk? */ int sctp_chunk_iif(const struct sctp_chunk *chunk) { struct sctp_af *af; int iif = 0; af = sctp_get_af_specific(ipver2af(ip_hdr(chunk->skb)->version)); if (af) iif = af->skb_iif(chunk->skb); return iif; } /* RFC 2960 3.3.2 Initiation (INIT) (1) * * Note 2: The ECN capable field is reserved for future use of * Explicit Congestion Notification. */ static const struct sctp_paramhdr ecap_param = { SCTP_PARAM_ECN_CAPABLE, cpu_to_be16(sizeof(struct sctp_paramhdr)), }; static const struct sctp_paramhdr prsctp_param = { SCTP_PARAM_FWD_TSN_SUPPORT, cpu_to_be16(sizeof(struct sctp_paramhdr)), }; /* A helper to initialize an op error inside a * provided chunk, as most cause codes will be embedded inside an * abort chunk. */ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, size_t paylen) { sctp_errhdr_t err; __u16 len; /* Cause code constants are now defined in network order. */ err.cause = cause_code; len = sizeof(sctp_errhdr_t) + paylen; err.length = htons(len); chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); } /* A helper to initialize an op error inside a * provided chunk, as most cause codes will be embedded inside an * abort chunk. Differs from sctp_init_cause in that it won't oops * if there isn't enough space in the op error chunk */ static int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code, size_t paylen) { sctp_errhdr_t err; __u16 len; /* Cause code constants are now defined in network order. */ err.cause = cause_code; len = sizeof(sctp_errhdr_t) + paylen; err.length = htons(len); if (skb_tailroom(chunk->skb) < len) return -ENOSPC; chunk->subh.err_hdr = sctp_addto_chunk_fixed(chunk, sizeof(sctp_errhdr_t), &err); return 0; } /* 3.3.2 Initiation (INIT) (1) * * This chunk is used to initiate a SCTP association between two * endpoints. The format of the INIT chunk is shown below: * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 1 | Chunk Flags | Chunk Length | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Initiate Tag | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Advertised Receiver Window Credit (a_rwnd) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Number of Outbound Streams | Number of Inbound Streams | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Initial TSN | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * \ \ * / Optional/Variable-Length Parameters / * \ \ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * * The INIT chunk contains the following parameters. Unless otherwise * noted, each parameter MUST only be included once in the INIT chunk. * * Fixed Parameters Status * ---------------------------------------------- * Initiate Tag Mandatory * Advertised Receiver Window Credit Mandatory * Number of Outbound Streams Mandatory * Number of Inbound Streams Mandatory * Initial TSN Mandatory * * Variable Parameters Status Type Value * ------------------------------------------------------------- * IPv4 Address (Note 1) Optional 5 * IPv6 Address (Note 1) Optional 6 * Cookie Preservative Optional 9 * Reserved for ECN Capable (Note 2) Optional 32768 (0x8000) * Host Name Address (Note 3) Optional 11 * Supported Address Types (Note 4) Optional 12 */ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, const struct sctp_bind_addr *bp, gfp_t gfp, int vparam_len) { struct net *net = sock_net(asoc->base.sk); struct sctp_endpoint *ep = asoc->ep; sctp_inithdr_t init; union sctp_params addrs; size_t chunksize; struct sctp_chunk *retval = NULL; int num_types, addrs_len = 0; struct sctp_sock *sp; sctp_supported_addrs_param_t sat; __be16 types[2]; sctp_adaptation_ind_param_t aiparam; sctp_supported_ext_param_t ext_param; int num_ext = 0; __u8 extensions[3]; sctp_paramhdr_t *auth_chunks = NULL, *auth_hmacs = NULL; /* RFC 2960 3.3.2 Initiation (INIT) (1) * * Note 1: The INIT chunks can contain multiple addresses that * can be IPv4 and/or IPv6 in any combination. */ retval = NULL; /* Convert the provided bind address list to raw format. */ addrs = sctp_bind_addrs_to_raw(bp, &addrs_len, gfp); init.init_tag = htonl(asoc->c.my_vtag); init.a_rwnd = htonl(asoc->rwnd); init.num_outbound_streams = htons(asoc->c.sinit_num_ostreams); init.num_inbound_streams = htons(asoc->c.sinit_max_instreams); init.initial_tsn = htonl(asoc->c.initial_tsn); /* How many address types are needed? */ sp = sctp_sk(asoc->base.sk); num_types = sp->pf->supported_addrs(sp, types); chunksize = sizeof(init) + addrs_len; chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types)); chunksize += sizeof(ecap_param); if (net->sctp.prsctp_enable) chunksize += sizeof(prsctp_param); /* ADDIP: Section 4.2.7: * An implementation supporting this extension [ADDIP] MUST list * the ASCONF,the ASCONF-ACK, and the AUTH chunks in its INIT and * INIT-ACK parameters. */ if (net->sctp.addip_enable) { extensions[num_ext] = SCTP_CID_ASCONF; extensions[num_ext+1] = SCTP_CID_ASCONF_ACK; num_ext += 2; } if (sp->adaptation_ind) chunksize += sizeof(aiparam); chunksize += vparam_len; /* Account for AUTH related parameters */ if (ep->auth_enable) { /* Add random parameter length*/ chunksize += sizeof(asoc->c.auth_random); /* Add HMACS parameter length if any were defined */ auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; if (auth_hmacs->length) chunksize += WORD_ROUND(ntohs(auth_hmacs->length)); else auth_hmacs = NULL; /* Add CHUNKS parameter length */ auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; if (auth_chunks->length) chunksize += WORD_ROUND(ntohs(auth_chunks->length)); else auth_chunks = NULL; extensions[num_ext] = SCTP_CID_AUTH; num_ext += 1; } /* If we have any extensions to report, account for that */ if (num_ext) chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) + num_ext); /* RFC 2960 3.3.2 Initiation (INIT) (1) * * Note 3: An INIT chunk MUST NOT contain more than one Host * Name address parameter. Moreover, the sender of the INIT * MUST NOT combine any other address types with the Host Name * address in the INIT. The receiver of INIT MUST ignore any * other address types if the Host Name address parameter is * present in the received INIT chunk. * * PLEASE DO NOT FIXME [This version does not support Host Name.] */ retval = sctp_make_control(asoc, SCTP_CID_INIT, 0, chunksize); if (!retval) goto nodata; retval->subh.init_hdr = sctp_addto_chunk(retval, sizeof(init), &init); retval->param_hdr.v = sctp_addto_chunk(retval, addrs_len, addrs.v); /* RFC 2960 3.3.2 Initiation (INIT) (1) * * Note 4: This parameter, when present, specifies all the * address types the sending endpoint can support. The absence * of this parameter indicates that the sending endpoint can * support any address type. */ sat.param_hdr.type = SCTP_PARAM_SUPPORTED_ADDRESS_TYPES; sat.param_hdr.length = htons(SCTP_SAT_LEN(num_types)); sctp_addto_chunk(retval, sizeof(sat), &sat); sctp_addto_chunk(retval, num_types * sizeof(__u16), &types); sctp_addto_chunk(retval, sizeof(ecap_param), &ecap_param); /* Add the supported extensions parameter. Be nice and add this * fist before addiding the parameters for the extensions themselves */ if (num_ext) { ext_param.param_hdr.type = SCTP_PARAM_SUPPORTED_EXT; ext_param.param_hdr.length = htons(sizeof(sctp_supported_ext_param_t) + num_ext); sctp_addto_chunk(retval, sizeof(sctp_supported_ext_param_t), &ext_param); sctp_addto_param(retval, num_ext, extensions); } if (net->sctp.prsctp_enable) sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param); if (sp->adaptation_ind) { aiparam.param_hdr.type = SCTP_PARAM_ADAPTATION_LAYER_IND; aiparam.param_hdr.length = htons(sizeof(aiparam)); aiparam.adaptation_ind = htonl(sp->adaptation_ind); sctp_addto_chunk(retval, sizeof(aiparam), &aiparam); } /* Add SCTP-AUTH chunks to the parameter list */ if (ep->auth_enable) { sctp_addto_chunk(retval, sizeof(asoc->c.auth_random), asoc->c.auth_random); if (auth_hmacs) sctp_addto_chunk(retval, ntohs(auth_hmacs->length), auth_hmacs); if (auth_chunks) sctp_addto_chunk(retval, ntohs(auth_chunks->length), auth_chunks); } nodata: kfree(addrs.v); return retval; } struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, const struct sctp_chunk *chunk, gfp_t gfp, int unkparam_len) { sctp_inithdr_t initack; struct sctp_chunk *retval; union sctp_params addrs; struct sctp_sock *sp; int addrs_len; sctp_cookie_param_t *cookie; int cookie_len; size_t chunksize; sctp_adaptation_ind_param_t aiparam; sctp_supported_ext_param_t ext_param; int num_ext = 0; __u8 extensions[3]; sctp_paramhdr_t *auth_chunks = NULL, *auth_hmacs = NULL, *auth_random = NULL; retval = NULL; /* Note: there may be no addresses to embed. */ addrs = sctp_bind_addrs_to_raw(&asoc->base.bind_addr, &addrs_len, gfp); initack.init_tag = htonl(asoc->c.my_vtag); initack.a_rwnd = htonl(asoc->rwnd); initack.num_outbound_streams = htons(asoc->c.sinit_num_ostreams); initack.num_inbound_streams = htons(asoc->c.sinit_max_instreams); initack.initial_tsn = htonl(asoc->c.initial_tsn); /* FIXME: We really ought to build the cookie right * into the packet instead of allocating more fresh memory. */ cookie = sctp_pack_cookie(asoc->ep, asoc, chunk, &cookie_len, addrs.v, addrs_len); if (!cookie) goto nomem_cookie; /* Calculate the total size of allocation, include the reserved * space for reporting unknown parameters if it is specified. */ sp = sctp_sk(asoc->base.sk); chunksize = sizeof(initack) + addrs_len + cookie_len + unkparam_len; /* Tell peer that we'll do ECN only if peer advertised such cap. */ if (asoc->peer.ecn_capable) chunksize += sizeof(ecap_param); if (asoc->peer.prsctp_capable) chunksize += sizeof(prsctp_param); if (asoc->peer.asconf_capable) { extensions[num_ext] = SCTP_CID_ASCONF; extensions[num_ext+1] = SCTP_CID_ASCONF_ACK; num_ext += 2; } if (sp->adaptation_ind) chunksize += sizeof(aiparam); if (asoc->peer.auth_capable) { auth_random = (sctp_paramhdr_t *)asoc->c.auth_random; chunksize += ntohs(auth_random->length); auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; if (auth_hmacs->length) chunksize += WORD_ROUND(ntohs(auth_hmacs->length)); else auth_hmacs = NULL; auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; if (auth_chunks->length) chunksize += WORD_ROUND(ntohs(auth_chunks->length)); else auth_chunks = NULL; extensions[num_ext] = SCTP_CID_AUTH; num_ext += 1; } if (num_ext) chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) + num_ext); /* Now allocate and fill out the chunk. */ retval = sctp_make_control(asoc, SCTP_CID_INIT_ACK, 0, chunksize); if (!retval) goto nomem_chunk; /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it received the DATA or control chunk * to which it is replying. * * [INIT ACK back to where the INIT came from.] */ retval->transport = chunk->transport; retval->subh.init_hdr = sctp_addto_chunk(retval, sizeof(initack), &initack); retval->param_hdr.v = sctp_addto_chunk(retval, addrs_len, addrs.v); sctp_addto_chunk(retval, cookie_len, cookie); if (asoc->peer.ecn_capable) sctp_addto_chunk(retval, sizeof(ecap_param), &ecap_param); if (num_ext) { ext_param.param_hdr.type = SCTP_PARAM_SUPPORTED_EXT; ext_param.param_hdr.length = htons(sizeof(sctp_supported_ext_param_t) + num_ext); sctp_addto_chunk(retval, sizeof(sctp_supported_ext_param_t), &ext_param); sctp_addto_param(retval, num_ext, extensions); } if (asoc->peer.prsctp_capable) sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param); if (sp->adaptation_ind) { aiparam.param_hdr.type = SCTP_PARAM_ADAPTATION_LAYER_IND; aiparam.param_hdr.length = htons(sizeof(aiparam)); aiparam.adaptation_ind = htonl(sp->adaptation_ind); sctp_addto_chunk(retval, sizeof(aiparam), &aiparam); } if (asoc->peer.auth_capable) { sctp_addto_chunk(retval, ntohs(auth_random->length), auth_random); if (auth_hmacs) sctp_addto_chunk(retval, ntohs(auth_hmacs->length), auth_hmacs); if (auth_chunks) sctp_addto_chunk(retval, ntohs(auth_chunks->length), auth_chunks); } /* We need to remove the const qualifier at this point. */ retval->asoc = (struct sctp_association *) asoc; nomem_chunk: kfree(cookie); nomem_cookie: kfree(addrs.v); return retval; } /* 3.3.11 Cookie Echo (COOKIE ECHO) (10): * * This chunk is used only during the initialization of an association. * It is sent by the initiator of an association to its peer to complete * the initialization process. This chunk MUST precede any DATA chunk * sent within the association, but MAY be bundled with one or more DATA * chunks in the same packet. * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 10 |Chunk Flags | Length | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * / Cookie / * \ \ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Chunk Flags: 8 bit * * Set to zero on transmit and ignored on receipt. * * Length: 16 bits (unsigned integer) * * Set to the size of the chunk in bytes, including the 4 bytes of * the chunk header and the size of the Cookie. * * Cookie: variable size * * This field must contain the exact cookie received in the * State Cookie parameter from the previous INIT ACK. * * An implementation SHOULD make the cookie as small as possible * to insure interoperability. */ struct sctp_chunk *sctp_make_cookie_echo(const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; void *cookie; int cookie_len; cookie = asoc->peer.cookie; cookie_len = asoc->peer.cookie_len; /* Build a cookie echo chunk. */ retval = sctp_make_control(asoc, SCTP_CID_COOKIE_ECHO, 0, cookie_len); if (!retval) goto nodata; retval->subh.cookie_hdr = sctp_addto_chunk(retval, cookie_len, cookie); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [COOKIE ECHO back to where the INIT ACK came from.] */ if (chunk) retval->transport = chunk->transport; nodata: return retval; } /* 3.3.12 Cookie Acknowledgement (COOKIE ACK) (11): * * This chunk is used only during the initialization of an * association. It is used to acknowledge the receipt of a COOKIE * ECHO chunk. This chunk MUST precede any DATA or SACK chunk sent * within the association, but MAY be bundled with one or more DATA * chunks or SACK chunk in the same SCTP packet. * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 11 |Chunk Flags | Length = 4 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Chunk Flags: 8 bits * * Set to zero on transmit and ignored on receipt. */ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; retval = sctp_make_control(asoc, SCTP_CID_COOKIE_ACK, 0, 0); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [COOKIE ACK back to where the COOKIE ECHO came from.] */ if (retval && chunk) retval->transport = chunk->transport; return retval; } /* * Appendix A: Explicit Congestion Notification: * CWR: * * RFC 2481 details a specific bit for a sender to send in the header of * its next outbound TCP segment to indicate to its peer that it has * reduced its congestion window. This is termed the CWR bit. For * SCTP the same indication is made by including the CWR chunk. * This chunk contains one data element, i.e. the TSN number that * was sent in the ECNE chunk. This element represents the lowest * TSN number in the datagram that was originally marked with the * CE bit. * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Chunk Type=13 | Flags=00000000| Chunk Length = 8 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Lowest TSN Number | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Note: The CWR is considered a Control chunk. */ struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc, const __u32 lowest_tsn, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; sctp_cwrhdr_t cwr; cwr.lowest_tsn = htonl(lowest_tsn); retval = sctp_make_control(asoc, SCTP_CID_ECN_CWR, 0, sizeof(sctp_cwrhdr_t)); if (!retval) goto nodata; retval->subh.ecn_cwr_hdr = sctp_addto_chunk(retval, sizeof(cwr), &cwr); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [Report a reduced congestion window back to where the ECNE * came from.] */ if (chunk) retval->transport = chunk->transport; nodata: return retval; } /* Make an ECNE chunk. This is a congestion experienced report. */ struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc, const __u32 lowest_tsn) { struct sctp_chunk *retval; sctp_ecnehdr_t ecne; ecne.lowest_tsn = htonl(lowest_tsn); retval = sctp_make_control(asoc, SCTP_CID_ECN_ECNE, 0, sizeof(sctp_ecnehdr_t)); if (!retval) goto nodata; retval->subh.ecne_hdr = sctp_addto_chunk(retval, sizeof(ecne), &ecne); nodata: return retval; } /* Make a DATA chunk for the given association from the provided * parameters. However, do not populate the data payload. */ struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc, const struct sctp_sndrcvinfo *sinfo, int data_len, __u8 flags, __u16 ssn) { struct sctp_chunk *retval; struct sctp_datahdr dp; int chunk_len; /* We assign the TSN as LATE as possible, not here when * creating the chunk. */ dp.tsn = 0; dp.stream = htons(sinfo->sinfo_stream); dp.ppid = sinfo->sinfo_ppid; /* Set the flags for an unordered send. */ if (sinfo->sinfo_flags & SCTP_UNORDERED) { flags |= SCTP_DATA_UNORDERED; dp.ssn = 0; } else dp.ssn = htons(ssn); chunk_len = sizeof(dp) + data_len; retval = sctp_make_data(asoc, flags, chunk_len); if (!retval) goto nodata; retval->subh.data_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp); memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo)); nodata: return retval; } /* Create a selective ackowledgement (SACK) for the given * association. This reports on which TSN's we've seen to date, * including duplicates and gaps. */ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc) { struct sctp_chunk *retval; struct sctp_sackhdr sack; int len; __u32 ctsn; __u16 num_gabs, num_dup_tsns; struct sctp_association *aptr = (struct sctp_association *)asoc; struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; struct sctp_gap_ack_block gabs[SCTP_MAX_GABS]; struct sctp_transport *trans; memset(gabs, 0, sizeof(gabs)); ctsn = sctp_tsnmap_get_ctsn(map); pr_debug("%s: sackCTSNAck sent:0x%x\n", __func__, ctsn); /* How much room is needed in the chunk? */ num_gabs = sctp_tsnmap_num_gabs(map, gabs); num_dup_tsns = sctp_tsnmap_num_dups(map); /* Initialize the SACK header. */ sack.cum_tsn_ack = htonl(ctsn); sack.a_rwnd = htonl(asoc->a_rwnd); sack.num_gap_ack_blocks = htons(num_gabs); sack.num_dup_tsns = htons(num_dup_tsns); len = sizeof(sack) + sizeof(struct sctp_gap_ack_block) * num_gabs + sizeof(__u32) * num_dup_tsns; /* Create the chunk. */ retval = sctp_make_control(asoc, SCTP_CID_SACK, 0, len); if (!retval) goto nodata; /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, etc.) to the same destination transport * address from which it received the DATA or control chunk to * which it is replying. This rule should also be followed if * the endpoint is bundling DATA chunks together with the * reply chunk. * * However, when acknowledging multiple DATA chunks received * in packets from different source addresses in a single * SACK, the SACK chunk may be transmitted to one of the * destination transport addresses from which the DATA or * control chunks being acknowledged were received. * * [BUG: We do not implement the following paragraph. * Perhaps we should remember the last transport we used for a * SACK and avoid that (if possible) if we have seen any * duplicates. --piggy] * * When a receiver of a duplicate DATA chunk sends a SACK to a * multi- homed endpoint it MAY be beneficial to vary the * destination address and not use the source address of the * DATA chunk. The reason being that receiving a duplicate * from a multi-homed endpoint might indicate that the return * path (as specified in the source address of the DATA chunk) * for the SACK is broken. * * [Send to the address from which we last received a DATA chunk.] */ retval->transport = asoc->peer.last_data_from; retval->subh.sack_hdr = sctp_addto_chunk(retval, sizeof(sack), &sack); /* Add the gap ack block information. */ if (num_gabs) sctp_addto_chunk(retval, sizeof(__u32) * num_gabs, gabs); /* Add the duplicate TSN information. */ if (num_dup_tsns) { aptr->stats.idupchunks += num_dup_tsns; sctp_addto_chunk(retval, sizeof(__u32) * num_dup_tsns, sctp_tsnmap_get_dups(map)); } /* Once we have a sack generated, check to see what our sack * generation is, if its 0, reset the transports to 0, and reset * the association generation to 1 * * The idea is that zero is never used as a valid generation for the * association so no transport will match after a wrap event like this, * Until the next sack */ if (++aptr->peer.sack_generation == 0) { list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) trans->sack_generation = 0; aptr->peer.sack_generation = 1; } nodata: return retval; } /* Make a SHUTDOWN chunk. */ struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; sctp_shutdownhdr_t shut; __u32 ctsn; ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); shut.cum_tsn_ack = htonl(ctsn); retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN, 0, sizeof(sctp_shutdownhdr_t)); if (!retval) goto nodata; retval->subh.shutdown_hdr = sctp_addto_chunk(retval, sizeof(shut), &shut); if (chunk) retval->transport = chunk->transport; nodata: return retval; } struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN_ACK, 0, 0); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [ACK back to where the SHUTDOWN came from.] */ if (retval && chunk) retval->transport = chunk->transport; return retval; } struct sctp_chunk *sctp_make_shutdown_complete( const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; __u8 flags = 0; /* Set the T-bit if we have no association (vtag will be * reflected) */ flags |= asoc ? 0 : SCTP_CHUNK_FLAG_T; retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN_COMPLETE, flags, 0); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [Report SHUTDOWN COMPLETE back to where the SHUTDOWN ACK * came from.] */ if (retval && chunk) retval->transport = chunk->transport; return retval; } /* Create an ABORT. Note that we set the T bit if we have no * association, except when responding to an INIT (sctpimpguide 2.41). */ struct sctp_chunk *sctp_make_abort(const struct sctp_association *asoc, const struct sctp_chunk *chunk, const size_t hint) { struct sctp_chunk *retval; __u8 flags = 0; /* Set the T-bit if we have no association and 'chunk' is not * an INIT (vtag will be reflected). */ if (!asoc) { if (chunk && chunk->chunk_hdr && chunk->chunk_hdr->type == SCTP_CID_INIT) flags = 0; else flags = SCTP_CHUNK_FLAG_T; } retval = sctp_make_control(asoc, SCTP_CID_ABORT, flags, hint); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [ABORT back to where the offender came from.] */ if (retval && chunk) retval->transport = chunk->transport; return retval; } /* Helper to create ABORT with a NO_USER_DATA error. */ struct sctp_chunk *sctp_make_abort_no_data( const struct sctp_association *asoc, const struct sctp_chunk *chunk, __u32 tsn) { struct sctp_chunk *retval; __be32 payload; retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + sizeof(tsn)); if (!retval) goto no_mem; /* Put the tsn back into network byte order. */ payload = htonl(tsn); sctp_init_cause(retval, SCTP_ERROR_NO_DATA, sizeof(payload)); sctp_addto_chunk(retval, sizeof(payload), (const void *)&payload); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [ABORT back to where the offender came from.] */ if (chunk) retval->transport = chunk->transport; no_mem: return retval; } /* Helper to create ABORT with a SCTP_ERROR_USER_ABORT error. */ struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc, const struct msghdr *msg, size_t paylen) { struct sctp_chunk *retval; void *payload = NULL; int err; retval = sctp_make_abort(asoc, NULL, sizeof(sctp_errhdr_t) + paylen); if (!retval) goto err_chunk; if (paylen) { /* Put the msg_iov together into payload. */ payload = kmalloc(paylen, GFP_KERNEL); if (!payload) goto err_payload; err = memcpy_fromiovec(payload, msg->msg_iov, paylen); if (err < 0) goto err_copy; } sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, paylen); sctp_addto_chunk(retval, paylen, payload); if (paylen) kfree(payload); return retval; err_copy: kfree(payload); err_payload: sctp_chunk_free(retval); retval = NULL; err_chunk: return retval; } /* Append bytes to the end of a parameter. Will panic if chunk is not big * enough. */ static void *sctp_addto_param(struct sctp_chunk *chunk, int len, const void *data) { void *target; int chunklen = ntohs(chunk->chunk_hdr->length); target = skb_put(chunk->skb, len); if (data) memcpy(target, data, len); else memset(target, 0, len); /* Adjust the chunk length field. */ chunk->chunk_hdr->length = htons(chunklen + len); chunk->chunk_end = skb_tail_pointer(chunk->skb); return target; } /* Make an ABORT chunk with a PROTOCOL VIOLATION cause code. */ struct sctp_chunk *sctp_make_abort_violation( const struct sctp_association *asoc, const struct sctp_chunk *chunk, const __u8 *payload, const size_t paylen) { struct sctp_chunk *retval; struct sctp_paramhdr phdr; retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen + sizeof(sctp_paramhdr_t)); if (!retval) goto end; sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, paylen + sizeof(sctp_paramhdr_t)); phdr.type = htons(chunk->chunk_hdr->type); phdr.length = chunk->chunk_hdr->length; sctp_addto_chunk(retval, paylen, payload); sctp_addto_param(retval, sizeof(sctp_paramhdr_t), &phdr); end: return retval; } struct sctp_chunk *sctp_make_violation_paramlen( const struct sctp_association *asoc, const struct sctp_chunk *chunk, struct sctp_paramhdr *param) { struct sctp_chunk *retval; static const char error[] = "The following parameter had invalid length:"; size_t payload_len = sizeof(error) + sizeof(sctp_errhdr_t) + sizeof(sctp_paramhdr_t); retval = sctp_make_abort(asoc, chunk, payload_len); if (!retval) goto nodata; sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, sizeof(error) + sizeof(sctp_paramhdr_t)); sctp_addto_chunk(retval, sizeof(error), error); sctp_addto_param(retval, sizeof(sctp_paramhdr_t), param); nodata: return retval; } struct sctp_chunk *sctp_make_violation_max_retrans( const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; static const char error[] = "Association exceeded its max_retans count"; size_t payload_len = sizeof(error) + sizeof(sctp_errhdr_t); retval = sctp_make_abort(asoc, chunk, payload_len); if (!retval) goto nodata; sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, sizeof(error)); sctp_addto_chunk(retval, sizeof(error), error); nodata: return retval; } /* Make a HEARTBEAT chunk. */ struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc, const struct sctp_transport *transport) { struct sctp_chunk *retval; sctp_sender_hb_info_t hbinfo; retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT, 0, sizeof(hbinfo)); if (!retval) goto nodata; hbinfo.param_hdr.type = SCTP_PARAM_HEARTBEAT_INFO; hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t)); hbinfo.daddr = transport->ipaddr; hbinfo.sent_at = jiffies; hbinfo.hb_nonce = transport->hb_nonce; /* Cast away the 'const', as this is just telling the chunk * what transport it belongs to. */ retval->transport = (struct sctp_transport *) transport; retval->subh.hbs_hdr = sctp_addto_chunk(retval, sizeof(hbinfo), &hbinfo); nodata: return retval; } struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *asoc, const struct sctp_chunk *chunk, const void *payload, const size_t paylen) { struct sctp_chunk *retval; retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT_ACK, 0, paylen); if (!retval) goto nodata; retval->subh.hbs_hdr = sctp_addto_chunk(retval, paylen, payload); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [HBACK back to where the HEARTBEAT came from.] */ if (chunk) retval->transport = chunk->transport; nodata: return retval; } /* Create an Operation Error chunk with the specified space reserved. * This routine can be used for containing multiple causes in the chunk. */ static struct sctp_chunk *sctp_make_op_error_space( const struct sctp_association *asoc, const struct sctp_chunk *chunk, size_t size) { struct sctp_chunk *retval; retval = sctp_make_control(asoc, SCTP_CID_ERROR, 0, sizeof(sctp_errhdr_t) + size); if (!retval) goto nodata; /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, etc.) to the same destination transport * address from which it received the DATA or control chunk * to which it is replying. * */ if (chunk) retval->transport = chunk->transport; nodata: return retval; } /* Create an Operation Error chunk of a fixed size, * specifically, max(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT) * This is a helper function to allocate an error chunk for * for those invalid parameter codes in which we may not want * to report all the errors, if the incoming chunk is large */ static inline struct sctp_chunk *sctp_make_op_error_fixed( const struct sctp_association *asoc, const struct sctp_chunk *chunk) { size_t size = asoc ? asoc->pathmtu : 0; if (!size) size = SCTP_DEFAULT_MAXSEGMENT; return sctp_make_op_error_space(asoc, chunk, size); } /* Create an Operation Error chunk. */ struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc, const struct sctp_chunk *chunk, __be16 cause_code, const void *payload, size_t paylen, size_t reserve_tail) { struct sctp_chunk *retval; retval = sctp_make_op_error_space(asoc, chunk, paylen + reserve_tail); if (!retval) goto nodata; sctp_init_cause(retval, cause_code, paylen + reserve_tail); sctp_addto_chunk(retval, paylen, payload); if (reserve_tail) sctp_addto_param(retval, reserve_tail, NULL); nodata: return retval; } struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc) { struct sctp_chunk *retval; struct sctp_hmac *hmac_desc; struct sctp_authhdr auth_hdr; __u8 *hmac; /* Get the first hmac that the peer told us to use */ hmac_desc = sctp_auth_asoc_get_hmac(asoc); if (unlikely(!hmac_desc)) return NULL; retval = sctp_make_control(asoc, SCTP_CID_AUTH, 0, hmac_desc->hmac_len + sizeof(sctp_authhdr_t)); if (!retval) return NULL; auth_hdr.hmac_id = htons(hmac_desc->hmac_id); auth_hdr.shkey_id = htons(asoc->active_key_id); retval->subh.auth_hdr = sctp_addto_chunk(retval, sizeof(sctp_authhdr_t), &auth_hdr); hmac = skb_put(retval->skb, hmac_desc->hmac_len); memset(hmac, 0, hmac_desc->hmac_len); /* Adjust the chunk header to include the empty MAC */ retval->chunk_hdr->length = htons(ntohs(retval->chunk_hdr->length) + hmac_desc->hmac_len); retval->chunk_end = skb_tail_pointer(retval->skb); return retval; } /******************************************************************** * 2nd Level Abstractions ********************************************************************/ /* Turn an skb into a chunk. * FIXME: Eventually move the structure directly inside the skb->cb[]. * * sctpimpguide-05.txt Section 2.8.2 * M1) Each time a new DATA chunk is transmitted * set the 'TSN.Missing.Report' count for that TSN to 0. The * 'TSN.Missing.Report' count will be used to determine missing chunks * and when to fast retransmit. * */ struct sctp_chunk *sctp_chunkify(struct sk_buff *skb, const struct sctp_association *asoc, struct sock *sk) { struct sctp_chunk *retval; retval = kmem_cache_zalloc(sctp_chunk_cachep, GFP_ATOMIC); if (!retval) goto nodata; if (!sk) pr_debug("%s: chunkifying skb:%p w/o an sk\n", __func__, skb); INIT_LIST_HEAD(&retval->list); retval->skb = skb; retval->asoc = (struct sctp_association *)asoc; retval->singleton = 1; retval->fast_retransmit = SCTP_CAN_FRTX; /* Polish the bead hole. */ INIT_LIST_HEAD(&retval->transmitted_list); INIT_LIST_HEAD(&retval->frag_list); SCTP_DBG_OBJCNT_INC(chunk); atomic_set(&retval->refcnt, 1); nodata: return retval; } /* Set chunk->source and dest based on the IP header in chunk->skb. */ void sctp_init_addrs(struct sctp_chunk *chunk, union sctp_addr *src, union sctp_addr *dest) { memcpy(&chunk->source, src, sizeof(union sctp_addr)); memcpy(&chunk->dest, dest, sizeof(union sctp_addr)); } /* Extract the source address from a chunk. */ const union sctp_addr *sctp_source(const struct sctp_chunk *chunk) { /* If we have a known transport, use that. */ if (chunk->transport) { return &chunk->transport->ipaddr; } else { /* Otherwise, extract it from the IP header. */ return &chunk->source; } } /* Create a new chunk, setting the type and flags headers from the * arguments, reserving enough space for a 'paylen' byte payload. */ static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc, __u8 type, __u8 flags, int paylen) { struct sctp_chunk *retval; sctp_chunkhdr_t *chunk_hdr; struct sk_buff *skb; struct sock *sk; /* No need to allocate LL here, as this is only a chunk. */ skb = alloc_skb(WORD_ROUND(sizeof(sctp_chunkhdr_t) + paylen), GFP_ATOMIC); if (!skb) goto nodata; /* Make room for the chunk header. */ chunk_hdr = (sctp_chunkhdr_t *)skb_put(skb, sizeof(sctp_chunkhdr_t)); chunk_hdr->type = type; chunk_hdr->flags = flags; chunk_hdr->length = htons(sizeof(sctp_chunkhdr_t)); sk = asoc ? asoc->base.sk : NULL; retval = sctp_chunkify(skb, asoc, sk); if (!retval) { kfree_skb(skb); goto nodata; } retval->chunk_hdr = chunk_hdr; retval->chunk_end = ((__u8 *)chunk_hdr) + sizeof(struct sctp_chunkhdr); /* Determine if the chunk needs to be authenticated */ if (sctp_auth_send_cid(type, asoc)) retval->auth = 1; return retval; nodata: return NULL; } static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc, __u8 flags, int paylen) { return _sctp_make_chunk(asoc, SCTP_CID_DATA, flags, paylen); } static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc, __u8 type, __u8 flags, int paylen) { struct sctp_chunk *chunk = _sctp_make_chunk(asoc, type, flags, paylen); if (chunk) sctp_control_set_owner_w(chunk); return chunk; } /* Release the memory occupied by a chunk. */ static void sctp_chunk_destroy(struct sctp_chunk *chunk) { BUG_ON(!list_empty(&chunk->list)); list_del_init(&chunk->transmitted_list); consume_skb(chunk->skb); consume_skb(chunk->auth_chunk); SCTP_DBG_OBJCNT_DEC(chunk); kmem_cache_free(sctp_chunk_cachep, chunk); } /* Possibly, free the chunk. */ void sctp_chunk_free(struct sctp_chunk *chunk) { /* Release our reference on the message tracker. */ if (chunk->msg) sctp_datamsg_put(chunk->msg); sctp_chunk_put(chunk); } /* Grab a reference to the chunk. */ void sctp_chunk_hold(struct sctp_chunk *ch) { atomic_inc(&ch->refcnt); } /* Release a reference to the chunk. */ void sctp_chunk_put(struct sctp_chunk *ch) { if (atomic_dec_and_test(&ch->refcnt)) sctp_chunk_destroy(ch); } /* Append bytes to the end of a chunk. Will panic if chunk is not big * enough. */ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data) { void *target; void *padding; int chunklen = ntohs(chunk->chunk_hdr->length); int padlen = WORD_ROUND(chunklen) - chunklen; padding = skb_put(chunk->skb, padlen); target = skb_put(chunk->skb, len); memset(padding, 0, padlen); memcpy(target, data, len); /* Adjust the chunk length field. */ chunk->chunk_hdr->length = htons(chunklen + padlen + len); chunk->chunk_end = skb_tail_pointer(chunk->skb); return target; } /* Append bytes to the end of a chunk. Returns NULL if there isn't sufficient * space in the chunk */ static void *sctp_addto_chunk_fixed(struct sctp_chunk *chunk, int len, const void *data) { if (skb_tailroom(chunk->skb) >= len) return sctp_addto_chunk(chunk, len, data); else return NULL; } /* Append bytes from user space to the end of a chunk. Will panic if * chunk is not big enough. * Returns a kernel err value. */ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len, struct iovec *data) { __u8 *target; int err = 0; /* Make room in chunk for data. */ target = skb_put(chunk->skb, len); /* Copy data (whole iovec) into chunk */ if ((err = memcpy_fromiovecend(target, data, off, len))) goto out; /* Adjust the chunk length field. */ chunk->chunk_hdr->length = htons(ntohs(chunk->chunk_hdr->length) + len); chunk->chunk_end = skb_tail_pointer(chunk->skb); out: return err; } /* Helper function to assign a TSN if needed. This assumes that both * the data_hdr and association have already been assigned. */ void sctp_chunk_assign_ssn(struct sctp_chunk *chunk) { struct sctp_datamsg *msg; struct sctp_chunk *lchunk; struct sctp_stream *stream; __u16 ssn; __u16 sid; if (chunk->has_ssn) return; /* All fragments will be on the same stream */ sid = ntohs(chunk->subh.data_hdr->stream); stream = &chunk->asoc->ssnmap->out; /* Now assign the sequence number to the entire message. * All fragments must have the same stream sequence number. */ msg = chunk->msg; list_for_each_entry(lchunk, &msg->chunks, frag_list) { if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { ssn = 0; } else { if (lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG) ssn = sctp_ssn_next(stream, sid); else ssn = sctp_ssn_peek(stream, sid); } lchunk->subh.data_hdr->ssn = htons(ssn); lchunk->has_ssn = 1; } } /* Helper function to assign a TSN if needed. This assumes that both * the data_hdr and association have already been assigned. */ void sctp_chunk_assign_tsn(struct sctp_chunk *chunk) { if (!chunk->has_tsn) { /* This is the last possible instant to * assign a TSN. */ chunk->subh.data_hdr->tsn = htonl(sctp_association_get_next_tsn(chunk->asoc)); chunk->has_tsn = 1; } } /* Create a CLOSED association to use with an incoming packet. */ struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *ep, struct sctp_chunk *chunk, gfp_t gfp) { struct sctp_association *asoc; struct sk_buff *skb; sctp_scope_t scope; struct sctp_af *af; /* Create the bare association. */ scope = sctp_scope(sctp_source(chunk)); asoc = sctp_association_new(ep, ep->base.sk, scope, gfp); if (!asoc) goto nodata; asoc->temp = 1; skb = chunk->skb; /* Create an entry for the source address of the packet. */ af = sctp_get_af_specific(ipver2af(ip_hdr(skb)->version)); if (unlikely(!af)) goto fail; af->from_skb(&asoc->c.peer_addr, skb, 1); nodata: return asoc; fail: sctp_association_free(asoc); return NULL; } /* Build a cookie representing asoc. * This INCLUDES the param header needed to put the cookie in the INIT ACK. */ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, const struct sctp_association *asoc, const struct sctp_chunk *init_chunk, int *cookie_len, const __u8 *raw_addrs, int addrs_len) { sctp_cookie_param_t *retval; struct sctp_signed_cookie *cookie; struct scatterlist sg; int headersize, bodysize; /* Header size is static data prior to the actual cookie, including * any padding. */ headersize = sizeof(sctp_paramhdr_t) + (sizeof(struct sctp_signed_cookie) - sizeof(struct sctp_cookie)); bodysize = sizeof(struct sctp_cookie) + ntohs(init_chunk->chunk_hdr->length) + addrs_len; /* Pad out the cookie to a multiple to make the signature * functions simpler to write. */ if (bodysize % SCTP_COOKIE_MULTIPLE) bodysize += SCTP_COOKIE_MULTIPLE - (bodysize % SCTP_COOKIE_MULTIPLE); *cookie_len = headersize + bodysize; /* Clear this memory since we are sending this data structure * out on the network. */ retval = kzalloc(*cookie_len, GFP_ATOMIC); if (!retval) goto nodata; cookie = (struct sctp_signed_cookie *) retval->body; /* Set up the parameter header. */ retval->p.type = SCTP_PARAM_STATE_COOKIE; retval->p.length = htons(*cookie_len); /* Copy the cookie part of the association itself. */ cookie->c = asoc->c; /* Save the raw address list length in the cookie. */ cookie->c.raw_addr_list_len = addrs_len; /* Remember PR-SCTP capability. */ cookie->c.prsctp_capable = asoc->peer.prsctp_capable; /* Save adaptation indication in the cookie. */ cookie->c.adaptation_ind = asoc->peer.adaptation_ind; /* Set an expiration time for the cookie. */ cookie->c.expiration = ktime_add(asoc->cookie_life, ktime_get()); /* Copy the peer's init packet. */ memcpy(&cookie->c.peer_init[0], init_chunk->chunk_hdr, ntohs(init_chunk->chunk_hdr->length)); /* Copy the raw local address list of the association. */ memcpy((__u8 *)&cookie->c.peer_init[0] + ntohs(init_chunk->chunk_hdr->length), raw_addrs, addrs_len); if (sctp_sk(ep->base.sk)->hmac) { struct hash_desc desc; /* Sign the message. */ sg_init_one(&sg, &cookie->c, bodysize); desc.tfm = sctp_sk(ep->base.sk)->hmac; desc.flags = 0; if (crypto_hash_setkey(desc.tfm, ep->secret_key, sizeof(ep->secret_key)) || crypto_hash_digest(&desc, &sg, bodysize, cookie->signature)) goto free_cookie; } return retval; free_cookie: kfree(retval); nodata: *cookie_len = 0; return NULL; } /* Unpack the cookie from COOKIE ECHO chunk, recreating the association. */ struct sctp_association *sctp_unpack_cookie( const struct sctp_endpoint *ep, const struct sctp_association *asoc, struct sctp_chunk *chunk, gfp_t gfp, int *error, struct sctp_chunk **errp) { struct sctp_association *retval = NULL; struct sctp_signed_cookie *cookie; struct sctp_cookie *bear_cookie; int headersize, bodysize, fixed_size; __u8 *digest = ep->digest; struct scatterlist sg; unsigned int len; sctp_scope_t scope; struct sk_buff *skb = chunk->skb; ktime_t kt; struct hash_desc desc; /* Header size is static data prior to the actual cookie, including * any padding. */ headersize = sizeof(sctp_chunkhdr_t) + (sizeof(struct sctp_signed_cookie) - sizeof(struct sctp_cookie)); bodysize = ntohs(chunk->chunk_hdr->length) - headersize; fixed_size = headersize + sizeof(struct sctp_cookie); /* Verify that the chunk looks like it even has a cookie. * There must be enough room for our cookie and our peer's * INIT chunk. */ len = ntohs(chunk->chunk_hdr->length); if (len < fixed_size + sizeof(struct sctp_chunkhdr)) goto malformed; /* Verify that the cookie has been padded out. */ if (bodysize % SCTP_COOKIE_MULTIPLE) goto malformed; /* Process the cookie. */ cookie = chunk->subh.cookie_hdr; bear_cookie = &cookie->c; if (!sctp_sk(ep->base.sk)->hmac) goto no_hmac; /* Check the signature. */ sg_init_one(&sg, bear_cookie, bodysize); desc.tfm = sctp_sk(ep->base.sk)->hmac; desc.flags = 0; memset(digest, 0x00, SCTP_SIGNATURE_SIZE); if (crypto_hash_setkey(desc.tfm, ep->secret_key, sizeof(ep->secret_key)) || crypto_hash_digest(&desc, &sg, bodysize, digest)) { *error = -SCTP_IERROR_NOMEM; goto fail; } if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) { *error = -SCTP_IERROR_BAD_SIG; goto fail; } no_hmac: /* IG Section 2.35.2: * 3) Compare the port numbers and the verification tag contained * within the COOKIE ECHO chunk to the actual port numbers and the * verification tag within the SCTP common header of the received * packet. If these values do not match the packet MUST be silently * discarded, */ if (ntohl(chunk->sctp_hdr->vtag) != bear_cookie->my_vtag) { *error = -SCTP_IERROR_BAD_TAG; goto fail; } if (chunk->sctp_hdr->source != bear_cookie->peer_addr.v4.sin_port || ntohs(chunk->sctp_hdr->dest) != bear_cookie->my_port) { *error = -SCTP_IERROR_BAD_PORTS; goto fail; } /* Check to see if the cookie is stale. If there is already * an association, there is no need to check cookie's expiration * for init collision case of lost COOKIE ACK. * If skb has been timestamped, then use the stamp, otherwise * use current time. This introduces a small possibility that * that a cookie may be considered expired, but his would only slow * down the new association establishment instead of every packet. */ if (sock_flag(ep->base.sk, SOCK_TIMESTAMP)) kt = skb_get_ktime(skb); else kt = ktime_get(); if (!asoc && ktime_before(bear_cookie->expiration, kt)) { /* * Section 3.3.10.3 Stale Cookie Error (3) * * Cause of error * --------------- * Stale Cookie Error: Indicates the receipt of a valid State * Cookie that has expired. */ len = ntohs(chunk->chunk_hdr->length); *errp = sctp_make_op_error_space(asoc, chunk, len); if (*errp) { suseconds_t usecs = ktime_to_us(ktime_sub(kt, bear_cookie->expiration)); __be32 n = htonl(usecs); sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE, sizeof(n)); sctp_addto_chunk(*errp, sizeof(n), &n); *error = -SCTP_IERROR_STALE_COOKIE; } else *error = -SCTP_IERROR_NOMEM; goto fail; } /* Make a new base association. */ scope = sctp_scope(sctp_source(chunk)); retval = sctp_association_new(ep, ep->base.sk, scope, gfp); if (!retval) { *error = -SCTP_IERROR_NOMEM; goto fail; } /* Set up our peer's port number. */ retval->peer.port = ntohs(chunk->sctp_hdr->source); /* Populate the association from the cookie. */ memcpy(&retval->c, bear_cookie, sizeof(*bear_cookie)); if (sctp_assoc_set_bind_addr_from_cookie(retval, bear_cookie, GFP_ATOMIC) < 0) { *error = -SCTP_IERROR_NOMEM; goto fail; } /* Also, add the destination address. */ if (list_empty(&retval->base.bind_addr.address_list)) { sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, SCTP_ADDR_SRC, GFP_ATOMIC); } retval->next_tsn = retval->c.initial_tsn; retval->ctsn_ack_point = retval->next_tsn - 1; retval->addip_serial = retval->c.initial_tsn; retval->adv_peer_ack_point = retval->ctsn_ack_point; retval->peer.prsctp_capable = retval->c.prsctp_capable; retval->peer.adaptation_ind = retval->c.adaptation_ind; /* The INIT stuff will be done by the side effects. */ return retval; fail: if (retval) sctp_association_free(retval); return NULL; malformed: /* Yikes! The packet is either corrupt or deliberately * malformed. */ *error = -SCTP_IERROR_MALFORMED; goto fail; } /******************************************************************** * 3rd Level Abstractions ********************************************************************/ struct __sctp_missing { __be32 num_missing; __be16 type; } __packed; /* * Report a missing mandatory parameter. */ static int sctp_process_missing_param(const struct sctp_association *asoc, sctp_param_t paramtype, struct sctp_chunk *chunk, struct sctp_chunk **errp) { struct __sctp_missing report; __u16 len; len = WORD_ROUND(sizeof(report)); /* Make an ERROR chunk, preparing enough room for * returning multiple unknown parameters. */ if (!*errp) *errp = sctp_make_op_error_space(asoc, chunk, len); if (*errp) { report.num_missing = htonl(1); report.type = paramtype; sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM, sizeof(report)); sctp_addto_chunk(*errp, sizeof(report), &report); } /* Stop processing this chunk. */ return 0; } /* Report an Invalid Mandatory Parameter. */ static int sctp_process_inv_mandatory(const struct sctp_association *asoc, struct sctp_chunk *chunk, struct sctp_chunk **errp) { /* Invalid Mandatory Parameter Error has no payload. */ if (!*errp) *errp = sctp_make_op_error_space(asoc, chunk, 0); if (*errp) sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, 0); /* Stop processing this chunk. */ return 0; } static int sctp_process_inv_paramlength(const struct sctp_association *asoc, struct sctp_paramhdr *param, const struct sctp_chunk *chunk, struct sctp_chunk **errp) { /* This is a fatal error. Any accumulated non-fatal errors are * not reported. */ if (*errp) sctp_chunk_free(*errp); /* Create an error chunk and fill it in with our payload. */ *errp = sctp_make_violation_paramlen(asoc, chunk, param); return 0; } /* Do not attempt to handle the HOST_NAME parm. However, do * send back an indicator to the peer. */ static int sctp_process_hn_param(const struct sctp_association *asoc, union sctp_params param, struct sctp_chunk *chunk, struct sctp_chunk **errp) { __u16 len = ntohs(param.p->length); /* Processing of the HOST_NAME parameter will generate an * ABORT. If we've accumulated any non-fatal errors, they * would be unrecognized parameters and we should not include * them in the ABORT. */ if (*errp) sctp_chunk_free(*errp); *errp = sctp_make_op_error_space(asoc, chunk, len); if (*errp) { sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len); sctp_addto_chunk(*errp, len, param.v); } /* Stop processing this chunk. */ return 0; } static int sctp_verify_ext_param(struct net *net, union sctp_params param) { __u16 num_ext = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); int have_auth = 0; int have_asconf = 0; int i; for (i = 0; i < num_ext; i++) { switch (param.ext->chunks[i]) { case SCTP_CID_AUTH: have_auth = 1; break; case SCTP_CID_ASCONF: case SCTP_CID_ASCONF_ACK: have_asconf = 1; break; } } /* ADD-IP Security: The draft requires us to ABORT or ignore the * INIT/INIT-ACK if ADD-IP is listed, but AUTH is not. Do this * only if ADD-IP is turned on and we are not backward-compatible * mode. */ if (net->sctp.addip_noauth) return 1; if (net->sctp.addip_enable && !have_auth && have_asconf) return 0; return 1; } static void sctp_process_ext_param(struct sctp_association *asoc, union sctp_params param) { struct net *net = sock_net(asoc->base.sk); __u16 num_ext = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); int i; for (i = 0; i < num_ext; i++) { switch (param.ext->chunks[i]) { case SCTP_CID_FWD_TSN: if (net->sctp.prsctp_enable && !asoc->peer.prsctp_capable) asoc->peer.prsctp_capable = 1; break; case SCTP_CID_AUTH: /* if the peer reports AUTH, assume that he * supports AUTH. */ if (asoc->ep->auth_enable) asoc->peer.auth_capable = 1; break; case SCTP_CID_ASCONF: case SCTP_CID_ASCONF_ACK: if (net->sctp.addip_enable) asoc->peer.asconf_capable = 1; break; default: break; } } } /* RFC 3.2.1 & the Implementers Guide 2.2. * * The Parameter Types are encoded such that the * highest-order two bits specify the action that must be * taken if the processing endpoint does not recognize the * Parameter Type. * * 00 - Stop processing this parameter; do not process any further * parameters within this chunk * * 01 - Stop processing this parameter, do not process any further * parameters within this chunk, and report the unrecognized * parameter in an 'Unrecognized Parameter' ERROR chunk. * * 10 - Skip this parameter and continue processing. * * 11 - Skip this parameter and continue processing but * report the unrecognized parameter in an * 'Unrecognized Parameter' ERROR chunk. * * Return value: * SCTP_IERROR_NO_ERROR - continue with the chunk * SCTP_IERROR_ERROR - stop and report an error. * SCTP_IERROR_NOMEME - out of memory. */ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc, union sctp_params param, struct sctp_chunk *chunk, struct sctp_chunk **errp) { int retval = SCTP_IERROR_NO_ERROR; switch (param.p->type & SCTP_PARAM_ACTION_MASK) { case SCTP_PARAM_ACTION_DISCARD: retval = SCTP_IERROR_ERROR; break; case SCTP_PARAM_ACTION_SKIP: break; case SCTP_PARAM_ACTION_DISCARD_ERR: retval = SCTP_IERROR_ERROR; /* Fall through */ case SCTP_PARAM_ACTION_SKIP_ERR: /* Make an ERROR chunk, preparing enough room for * returning multiple unknown parameters. */ if (NULL == *errp) *errp = sctp_make_op_error_fixed(asoc, chunk); if (*errp) { if (!sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM, WORD_ROUND(ntohs(param.p->length)))) sctp_addto_chunk_fixed(*errp, WORD_ROUND(ntohs(param.p->length)), param.v); } else { /* If there is no memory for generating the ERROR * report as specified, an ABORT will be triggered * to the peer and the association won't be * established. */ retval = SCTP_IERROR_NOMEM; } break; default: break; } return retval; } /* Verify variable length parameters * Return values: * SCTP_IERROR_ABORT - trigger an ABORT * SCTP_IERROR_NOMEM - out of memory (abort) * SCTP_IERROR_ERROR - stop processing, trigger an ERROR * SCTP_IERROR_NO_ERROR - continue with the chunk */ static sctp_ierror_t sctp_verify_param(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, union sctp_params param, sctp_cid_t cid, struct sctp_chunk *chunk, struct sctp_chunk **err_chunk) { struct sctp_hmac_algo_param *hmacs; int retval = SCTP_IERROR_NO_ERROR; __u16 n_elt, id = 0; int i; /* FIXME - This routine is not looking at each parameter per the * chunk type, i.e., unrecognized parameters should be further * identified based on the chunk id. */ switch (param.p->type) { case SCTP_PARAM_IPV4_ADDRESS: case SCTP_PARAM_IPV6_ADDRESS: case SCTP_PARAM_COOKIE_PRESERVATIVE: case SCTP_PARAM_SUPPORTED_ADDRESS_TYPES: case SCTP_PARAM_STATE_COOKIE: case SCTP_PARAM_HEARTBEAT_INFO: case SCTP_PARAM_UNRECOGNIZED_PARAMETERS: case SCTP_PARAM_ECN_CAPABLE: case SCTP_PARAM_ADAPTATION_LAYER_IND: break; case SCTP_PARAM_SUPPORTED_EXT: if (!sctp_verify_ext_param(net, param)) return SCTP_IERROR_ABORT; break; case SCTP_PARAM_SET_PRIMARY: if (net->sctp.addip_enable) break; goto fallthrough; case SCTP_PARAM_HOST_NAME_ADDRESS: /* Tell the peer, we won't support this param. */ sctp_process_hn_param(asoc, param, chunk, err_chunk); retval = SCTP_IERROR_ABORT; break; case SCTP_PARAM_FWD_TSN_SUPPORT: if (net->sctp.prsctp_enable) break; goto fallthrough; case SCTP_PARAM_RANDOM: if (!ep->auth_enable) goto fallthrough; /* SCTP-AUTH: Secion 6.1 * If the random number is not 32 byte long the association * MUST be aborted. The ABORT chunk SHOULD contain the error * cause 'Protocol Violation'. */ if (SCTP_AUTH_RANDOM_LENGTH != ntohs(param.p->length) - sizeof(sctp_paramhdr_t)) { sctp_process_inv_paramlength(asoc, param.p, chunk, err_chunk); retval = SCTP_IERROR_ABORT; } break; case SCTP_PARAM_CHUNKS: if (!ep->auth_enable) goto fallthrough; /* SCTP-AUTH: Section 3.2 * The CHUNKS parameter MUST be included once in the INIT or * INIT-ACK chunk if the sender wants to receive authenticated * chunks. Its maximum length is 260 bytes. */ if (260 < ntohs(param.p->length)) { sctp_process_inv_paramlength(asoc, param.p, chunk, err_chunk); retval = SCTP_IERROR_ABORT; } break; case SCTP_PARAM_HMAC_ALGO: if (!ep->auth_enable) goto fallthrough; hmacs = (struct sctp_hmac_algo_param *)param.p; n_elt = (ntohs(param.p->length) - sizeof(sctp_paramhdr_t)) >> 1; /* SCTP-AUTH: Section 6.1 * The HMAC algorithm based on SHA-1 MUST be supported and * included in the HMAC-ALGO parameter. */ for (i = 0; i < n_elt; i++) { id = ntohs(hmacs->hmac_ids[i]); if (id == SCTP_AUTH_HMAC_ID_SHA1) break; } if (id != SCTP_AUTH_HMAC_ID_SHA1) { sctp_process_inv_paramlength(asoc, param.p, chunk, err_chunk); retval = SCTP_IERROR_ABORT; } break; fallthrough: default: pr_debug("%s: unrecognized param:%d for chunk:%d\n", __func__, ntohs(param.p->type), cid); retval = sctp_process_unk_param(asoc, param, chunk, err_chunk); break; } return retval; } /* Verify the INIT packet before we process it. */ int sctp_verify_init(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, sctp_cid_t cid, sctp_init_chunk_t *peer_init, struct sctp_chunk *chunk, struct sctp_chunk **errp) { union sctp_params param; bool has_cookie = false; int result; /* Check for missing mandatory parameters. Note: Initial TSN is * also mandatory, but is not checked here since the valid range * is 0..2**32-1. RFC4960, section 3.3.3. */ if (peer_init->init_hdr.num_outbound_streams == 0 || peer_init->init_hdr.num_inbound_streams == 0 || peer_init->init_hdr.init_tag == 0 || ntohl(peer_init->init_hdr.a_rwnd) < SCTP_DEFAULT_MINWINDOW) return sctp_process_inv_mandatory(asoc, chunk, errp); sctp_walk_params(param, peer_init, init_hdr.params) { if (param.p->type == SCTP_PARAM_STATE_COOKIE) has_cookie = true; } /* There is a possibility that a parameter length was bad and * in that case we would have stoped walking the parameters. * The current param.p would point at the bad one. * Current consensus on the mailing list is to generate a PROTOCOL * VIOLATION error. We build the ERROR chunk here and let the normal * error handling code build and send the packet. */ if (param.v != (void *)chunk->chunk_end) return sctp_process_inv_paramlength(asoc, param.p, chunk, errp); /* The only missing mandatory param possible today is * the state cookie for an INIT-ACK chunk. */ if ((SCTP_CID_INIT_ACK == cid) && !has_cookie) return sctp_process_missing_param(asoc, SCTP_PARAM_STATE_COOKIE, chunk, errp); /* Verify all the variable length parameters */ sctp_walk_params(param, peer_init, init_hdr.params) { result = sctp_verify_param(net, ep, asoc, param, cid, chunk, errp); switch (result) { case SCTP_IERROR_ABORT: case SCTP_IERROR_NOMEM: return 0; case SCTP_IERROR_ERROR: return 1; case SCTP_IERROR_NO_ERROR: default: break; } } /* for (loop through all parameters) */ return 1; } /* Unpack the parameters in an INIT packet into an association. * Returns 0 on failure, else success. * FIXME: This is an association method. */ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk, const union sctp_addr *peer_addr, sctp_init_chunk_t *peer_init, gfp_t gfp) { struct net *net = sock_net(asoc->base.sk); union sctp_params param; struct sctp_transport *transport; struct list_head *pos, *temp; struct sctp_af *af; union sctp_addr addr; char *cookie; int src_match = 0; /* We must include the address that the INIT packet came from. * This is the only address that matters for an INIT packet. * When processing a COOKIE ECHO, we retrieve the from address * of the INIT from the cookie. */ /* This implementation defaults to making the first transport * added as the primary transport. The source address seems to * be a a better choice than any of the embedded addresses. */ if (!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE)) goto nomem; if (sctp_cmp_addr_exact(sctp_source(chunk), peer_addr)) src_match = 1; /* Process the initialization parameters. */ sctp_walk_params(param, peer_init, init_hdr.params) { if (!src_match && (param.p->type == SCTP_PARAM_IPV4_ADDRESS || param.p->type == SCTP_PARAM_IPV6_ADDRESS)) { af = sctp_get_af_specific(param_type2af(param.p->type)); af->from_addr_param(&addr, param.addr, chunk->sctp_hdr->source, 0); if (sctp_cmp_addr_exact(sctp_source(chunk), &addr)) src_match = 1; } if (!sctp_process_param(asoc, param, peer_addr, gfp)) goto clean_up; } /* source address of chunk may not match any valid address */ if (!src_match) goto clean_up; /* AUTH: After processing the parameters, make sure that we * have all the required info to potentially do authentications. */ if (asoc->peer.auth_capable && (!asoc->peer.peer_random || !asoc->peer.peer_hmacs)) asoc->peer.auth_capable = 0; /* In a non-backward compatible mode, if the peer claims * support for ADD-IP but not AUTH, the ADD-IP spec states * that we MUST ABORT the association. Section 6. The section * also give us an option to silently ignore the packet, which * is what we'll do here. */ if (!net->sctp.addip_noauth && (asoc->peer.asconf_capable && !asoc->peer.auth_capable)) { asoc->peer.addip_disabled_mask |= (SCTP_PARAM_ADD_IP | SCTP_PARAM_DEL_IP | SCTP_PARAM_SET_PRIMARY); asoc->peer.asconf_capable = 0; goto clean_up; } /* Walk list of transports, removing transports in the UNKNOWN state. */ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { transport = list_entry(pos, struct sctp_transport, transports); if (transport->state == SCTP_UNKNOWN) { sctp_assoc_rm_peer(asoc, transport); } } /* The fixed INIT headers are always in network byte * order. */ asoc->peer.i.init_tag = ntohl(peer_init->init_hdr.init_tag); asoc->peer.i.a_rwnd = ntohl(peer_init->init_hdr.a_rwnd); asoc->peer.i.num_outbound_streams = ntohs(peer_init->init_hdr.num_outbound_streams); asoc->peer.i.num_inbound_streams = ntohs(peer_init->init_hdr.num_inbound_streams); asoc->peer.i.initial_tsn = ntohl(peer_init->init_hdr.initial_tsn); /* Apply the upper bounds for output streams based on peer's * number of inbound streams. */ if (asoc->c.sinit_num_ostreams > ntohs(peer_init->init_hdr.num_inbound_streams)) { asoc->c.sinit_num_ostreams = ntohs(peer_init->init_hdr.num_inbound_streams); } if (asoc->c.sinit_max_instreams > ntohs(peer_init->init_hdr.num_outbound_streams)) { asoc->c.sinit_max_instreams = ntohs(peer_init->init_hdr.num_outbound_streams); } /* Copy Initiation tag from INIT to VT_peer in cookie. */ asoc->c.peer_vtag = asoc->peer.i.init_tag; /* Peer Rwnd : Current calculated value of the peer's rwnd. */ asoc->peer.rwnd = asoc->peer.i.a_rwnd; /* Copy cookie in case we need to resend COOKIE-ECHO. */ cookie = asoc->peer.cookie; if (cookie) { asoc->peer.cookie = kmemdup(cookie, asoc->peer.cookie_len, gfp); if (!asoc->peer.cookie) goto clean_up; } /* RFC 2960 7.2.1 The initial value of ssthresh MAY be arbitrarily * high (for example, implementations MAY use the size of the receiver * advertised window). */ list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { transport->ssthresh = asoc->peer.i.a_rwnd; } /* Set up the TSN tracking pieces. */ if (!sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL, asoc->peer.i.initial_tsn, gfp)) goto clean_up; /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number * * The stream sequence number in all the streams shall start * from 0 when the association is established. Also, when the * stream sequence number reaches the value 65535 the next * stream sequence number shall be set to 0. */ /* Allocate storage for the negotiated streams if it is not a temporary * association. */ if (!asoc->temp) { int error; asoc->ssnmap = sctp_ssnmap_new(asoc->c.sinit_max_instreams, asoc->c.sinit_num_ostreams, gfp); if (!asoc->ssnmap) goto clean_up; error = sctp_assoc_set_id(asoc, gfp); if (error) goto clean_up; } /* ADDIP Section 4.1 ASCONF Chunk Procedures * * When an endpoint has an ASCONF signaled change to be sent to the * remote endpoint it should do the following: * ... * A2) A serial number should be assigned to the Chunk. The serial * number should be a monotonically increasing number. All serial * numbers are defined to be initialized at the start of the * association to the same value as the Initial TSN. */ asoc->peer.addip_serial = asoc->peer.i.initial_tsn - 1; return 1; clean_up: /* Release the transport structures. */ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { transport = list_entry(pos, struct sctp_transport, transports); if (transport->state != SCTP_ACTIVE) sctp_assoc_rm_peer(asoc, transport); } nomem: return 0; } /* Update asoc with the option described in param. * * RFC2960 3.3.2.1 Optional/Variable Length Parameters in INIT * * asoc is the association to update. * param is the variable length parameter to use for update. * cid tells us if this is an INIT, INIT ACK or COOKIE ECHO. * If the current packet is an INIT we want to minimize the amount of * work we do. In particular, we should not build transport * structures for the addresses. */ static int sctp_process_param(struct sctp_association *asoc, union sctp_params param, const union sctp_addr *peer_addr, gfp_t gfp) { struct net *net = sock_net(asoc->base.sk); union sctp_addr addr; int i; __u16 sat; int retval = 1; sctp_scope_t scope; time_t stale; struct sctp_af *af; union sctp_addr_param *addr_param; struct sctp_transport *t; struct sctp_endpoint *ep = asoc->ep; /* We maintain all INIT parameters in network byte order all the * time. This allows us to not worry about whether the parameters * came from a fresh INIT, and INIT ACK, or were stored in a cookie. */ switch (param.p->type) { case SCTP_PARAM_IPV6_ADDRESS: if (PF_INET6 != asoc->base.sk->sk_family) break; goto do_addr_param; case SCTP_PARAM_IPV4_ADDRESS: /* v4 addresses are not allowed on v6-only socket */ if (ipv6_only_sock(asoc->base.sk)) break; do_addr_param: af = sctp_get_af_specific(param_type2af(param.p->type)); af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0); scope = sctp_scope(peer_addr); if (sctp_in_scope(net, &addr, scope)) if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED)) return 0; break; case SCTP_PARAM_COOKIE_PRESERVATIVE: if (!net->sctp.cookie_preserve_enable) break; stale = ntohl(param.life->lifespan_increment); /* Suggested Cookie Life span increment's unit is msec, * (1/1000sec). */ asoc->cookie_life = ktime_add_ms(asoc->cookie_life, stale); break; case SCTP_PARAM_HOST_NAME_ADDRESS: pr_debug("%s: unimplemented SCTP_HOST_NAME_ADDRESS\n", __func__); break; case SCTP_PARAM_SUPPORTED_ADDRESS_TYPES: /* Turn off the default values first so we'll know which * ones are really set by the peer. */ asoc->peer.ipv4_address = 0; asoc->peer.ipv6_address = 0; /* Assume that peer supports the address family * by which it sends a packet. */ if (peer_addr->sa.sa_family == AF_INET6) asoc->peer.ipv6_address = 1; else if (peer_addr->sa.sa_family == AF_INET) asoc->peer.ipv4_address = 1; /* Cycle through address types; avoid divide by 0. */ sat = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); if (sat) sat /= sizeof(__u16); for (i = 0; i < sat; ++i) { switch (param.sat->types[i]) { case SCTP_PARAM_IPV4_ADDRESS: asoc->peer.ipv4_address = 1; break; case SCTP_PARAM_IPV6_ADDRESS: if (PF_INET6 == asoc->base.sk->sk_family) asoc->peer.ipv6_address = 1; break; case SCTP_PARAM_HOST_NAME_ADDRESS: asoc->peer.hostname_address = 1; break; default: /* Just ignore anything else. */ break; } } break; case SCTP_PARAM_STATE_COOKIE: asoc->peer.cookie_len = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); asoc->peer.cookie = param.cookie->body; break; case SCTP_PARAM_HEARTBEAT_INFO: /* Would be odd to receive, but it causes no problems. */ break; case SCTP_PARAM_UNRECOGNIZED_PARAMETERS: /* Rejected during verify stage. */ break; case SCTP_PARAM_ECN_CAPABLE: asoc->peer.ecn_capable = 1; break; case SCTP_PARAM_ADAPTATION_LAYER_IND: asoc->peer.adaptation_ind = ntohl(param.aind->adaptation_ind); break; case SCTP_PARAM_SET_PRIMARY: if (!net->sctp.addip_enable) goto fall_through; addr_param = param.v + sizeof(sctp_addip_param_t); af = sctp_get_af_specific(param_type2af(param.p->type)); af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0); /* if the address is invalid, we can't process it. * XXX: see spec for what to do. */ if (!af->addr_valid(&addr, NULL, NULL)) break; t = sctp_assoc_lookup_paddr(asoc, &addr); if (!t) break; sctp_assoc_set_primary(asoc, t); break; case SCTP_PARAM_SUPPORTED_EXT: sctp_process_ext_param(asoc, param); break; case SCTP_PARAM_FWD_TSN_SUPPORT: if (net->sctp.prsctp_enable) { asoc->peer.prsctp_capable = 1; break; } /* Fall Through */ goto fall_through; case SCTP_PARAM_RANDOM: if (!ep->auth_enable) goto fall_through; /* Save peer's random parameter */ asoc->peer.peer_random = kmemdup(param.p, ntohs(param.p->length), gfp); if (!asoc->peer.peer_random) { retval = 0; break; } break; case SCTP_PARAM_HMAC_ALGO: if (!ep->auth_enable) goto fall_through; /* Save peer's HMAC list */ asoc->peer.peer_hmacs = kmemdup(param.p, ntohs(param.p->length), gfp); if (!asoc->peer.peer_hmacs) { retval = 0; break; } /* Set the default HMAC the peer requested*/ sctp_auth_asoc_set_default_hmac(asoc, param.hmac_algo); break; case SCTP_PARAM_CHUNKS: if (!ep->auth_enable) goto fall_through; asoc->peer.peer_chunks = kmemdup(param.p, ntohs(param.p->length), gfp); if (!asoc->peer.peer_chunks) retval = 0; break; fall_through: default: /* Any unrecognized parameters should have been caught * and handled by sctp_verify_param() which should be * called prior to this routine. Simply log the error * here. */ pr_debug("%s: ignoring param:%d for association:%p.\n", __func__, ntohs(param.p->type), asoc); break; } return retval; } /* Select a new verification tag. */ __u32 sctp_generate_tag(const struct sctp_endpoint *ep) { /* I believe that this random number generator complies with RFC1750. * A tag of 0 is reserved for special cases (e.g. INIT). */ __u32 x; do { get_random_bytes(&x, sizeof(__u32)); } while (x == 0); return x; } /* Select an initial TSN to send during startup. */ __u32 sctp_generate_tsn(const struct sctp_endpoint *ep) { __u32 retval; get_random_bytes(&retval, sizeof(__u32)); return retval; } /* * ADDIP 3.1.1 Address Configuration Change Chunk (ASCONF) * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 0xC1 | Chunk Flags | Chunk Length | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Serial Number | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Address Parameter | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF Parameter #1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * \ \ * / .... / * \ \ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF Parameter #N | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Address Parameter and other parameter will not be wrapped in this function */ static struct sctp_chunk *sctp_make_asconf(struct sctp_association *asoc, union sctp_addr *addr, int vparam_len) { sctp_addiphdr_t asconf; struct sctp_chunk *retval; int length = sizeof(asconf) + vparam_len; union sctp_addr_param addrparam; int addrlen; struct sctp_af *af = sctp_get_af_specific(addr->v4.sin_family); addrlen = af->to_addr_param(addr, &addrparam); if (!addrlen) return NULL; length += addrlen; /* Create the chunk. */ retval = sctp_make_control(asoc, SCTP_CID_ASCONF, 0, length); if (!retval) return NULL; asconf.serial = htonl(asoc->addip_serial++); retval->subh.addip_hdr = sctp_addto_chunk(retval, sizeof(asconf), &asconf); retval->param_hdr.v = sctp_addto_chunk(retval, addrlen, &addrparam); return retval; } /* ADDIP * 3.2.1 Add IP Address * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 0xC001 | Length = Variable | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF-Request Correlation ID | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Address Parameter | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 3.2.2 Delete IP Address * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 0xC002 | Length = Variable | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF-Request Correlation ID | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Address Parameter | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * */ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc, union sctp_addr *laddr, struct sockaddr *addrs, int addrcnt, __be16 flags) { sctp_addip_param_t param; struct sctp_chunk *retval; union sctp_addr_param addr_param; union sctp_addr *addr; void *addr_buf; struct sctp_af *af; int paramlen = sizeof(param); int addr_param_len = 0; int totallen = 0; int i; int del_pickup = 0; /* Get total length of all the address parameters. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { addr = addr_buf; af = sctp_get_af_specific(addr->v4.sin_family); addr_param_len = af->to_addr_param(addr, &addr_param); totallen += paramlen; totallen += addr_param_len; addr_buf += af->sockaddr_len; if (asoc->asconf_addr_del_pending && !del_pickup) { /* reuse the parameter length from the same scope one */ totallen += paramlen; totallen += addr_param_len; del_pickup = 1; pr_debug("%s: picked same-scope del_pending addr, " "totallen for all addresses is %d\n", __func__, totallen); } } /* Create an asconf chunk with the required length. */ retval = sctp_make_asconf(asoc, laddr, totallen); if (!retval) return NULL; /* Add the address parameters to the asconf chunk. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { addr = addr_buf; af = sctp_get_af_specific(addr->v4.sin_family); addr_param_len = af->to_addr_param(addr, &addr_param); param.param_hdr.type = flags; param.param_hdr.length = htons(paramlen + addr_param_len); param.crr_id = i; sctp_addto_chunk(retval, paramlen, &param); sctp_addto_chunk(retval, addr_param_len, &addr_param); addr_buf += af->sockaddr_len; } if (flags == SCTP_PARAM_ADD_IP && del_pickup) { addr = asoc->asconf_addr_del_pending; af = sctp_get_af_specific(addr->v4.sin_family); addr_param_len = af->to_addr_param(addr, &addr_param); param.param_hdr.type = SCTP_PARAM_DEL_IP; param.param_hdr.length = htons(paramlen + addr_param_len); param.crr_id = i; sctp_addto_chunk(retval, paramlen, &param); sctp_addto_chunk(retval, addr_param_len, &addr_param); } return retval; } /* ADDIP * 3.2.4 Set Primary IP Address * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type =0xC004 | Length = Variable | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF-Request Correlation ID | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Address Parameter | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Create an ASCONF chunk with Set Primary IP address parameter. */ struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc, union sctp_addr *addr) { sctp_addip_param_t param; struct sctp_chunk *retval; int len = sizeof(param); union sctp_addr_param addrparam; int addrlen; struct sctp_af *af = sctp_get_af_specific(addr->v4.sin_family); addrlen = af->to_addr_param(addr, &addrparam); if (!addrlen) return NULL; len += addrlen; /* Create the chunk and make asconf header. */ retval = sctp_make_asconf(asoc, addr, len); if (!retval) return NULL; param.param_hdr.type = SCTP_PARAM_SET_PRIMARY; param.param_hdr.length = htons(len); param.crr_id = 0; sctp_addto_chunk(retval, sizeof(param), &param); sctp_addto_chunk(retval, addrlen, &addrparam); return retval; } /* ADDIP 3.1.2 Address Configuration Acknowledgement Chunk (ASCONF-ACK) * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 0x80 | Chunk Flags | Chunk Length | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Serial Number | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF Parameter Response#1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * \ \ * / .... / * \ \ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF Parameter Response#N | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Create an ASCONF_ACK chunk with enough space for the parameter responses. */ static struct sctp_chunk *sctp_make_asconf_ack(const struct sctp_association *asoc, __u32 serial, int vparam_len) { sctp_addiphdr_t asconf; struct sctp_chunk *retval; int length = sizeof(asconf) + vparam_len; /* Create the chunk. */ retval = sctp_make_control(asoc, SCTP_CID_ASCONF_ACK, 0, length); if (!retval) return NULL; asconf.serial = htonl(serial); retval->subh.addip_hdr = sctp_addto_chunk(retval, sizeof(asconf), &asconf); return retval; } /* Add response parameters to an ASCONF_ACK chunk. */ static void sctp_add_asconf_response(struct sctp_chunk *chunk, __be32 crr_id, __be16 err_code, sctp_addip_param_t *asconf_param) { sctp_addip_param_t ack_param; sctp_errhdr_t err_param; int asconf_param_len = 0; int err_param_len = 0; __be16 response_type; if (SCTP_ERROR_NO_ERROR == err_code) { response_type = SCTP_PARAM_SUCCESS_REPORT; } else { response_type = SCTP_PARAM_ERR_CAUSE; err_param_len = sizeof(err_param); if (asconf_param) asconf_param_len = ntohs(asconf_param->param_hdr.length); } /* Add Success Indication or Error Cause Indication parameter. */ ack_param.param_hdr.type = response_type; ack_param.param_hdr.length = htons(sizeof(ack_param) + err_param_len + asconf_param_len); ack_param.crr_id = crr_id; sctp_addto_chunk(chunk, sizeof(ack_param), &ack_param); if (SCTP_ERROR_NO_ERROR == err_code) return; /* Add Error Cause parameter. */ err_param.cause = err_code; err_param.length = htons(err_param_len + asconf_param_len); sctp_addto_chunk(chunk, err_param_len, &err_param); /* Add the failed TLV copied from ASCONF chunk. */ if (asconf_param) sctp_addto_chunk(chunk, asconf_param_len, asconf_param); } /* Process a asconf parameter. */ static __be16 sctp_process_asconf_param(struct sctp_association *asoc, struct sctp_chunk *asconf, sctp_addip_param_t *asconf_param) { struct sctp_transport *peer; struct sctp_af *af; union sctp_addr addr; union sctp_addr_param *addr_param; addr_param = (void *)asconf_param + sizeof(sctp_addip_param_t); if (asconf_param->param_hdr.type != SCTP_PARAM_ADD_IP && asconf_param->param_hdr.type != SCTP_PARAM_DEL_IP && asconf_param->param_hdr.type != SCTP_PARAM_SET_PRIMARY) return SCTP_ERROR_UNKNOWN_PARAM; switch (addr_param->p.type) { case SCTP_PARAM_IPV6_ADDRESS: if (!asoc->peer.ipv6_address) return SCTP_ERROR_DNS_FAILED; break; case SCTP_PARAM_IPV4_ADDRESS: if (!asoc->peer.ipv4_address) return SCTP_ERROR_DNS_FAILED; break; default: return SCTP_ERROR_DNS_FAILED; } af = sctp_get_af_specific(param_type2af(addr_param->p.type)); if (unlikely(!af)) return SCTP_ERROR_DNS_FAILED; af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0); /* ADDIP 4.2.1 This parameter MUST NOT contain a broadcast * or multicast address. * (note: wildcard is permitted and requires special handling so * make sure we check for that) */ if (!af->is_any(&addr) && !af->addr_valid(&addr, NULL, asconf->skb)) return SCTP_ERROR_DNS_FAILED; switch (asconf_param->param_hdr.type) { case SCTP_PARAM_ADD_IP: /* Section 4.2.1: * If the address 0.0.0.0 or ::0 is provided, the source * address of the packet MUST be added. */ if (af->is_any(&addr)) memcpy(&addr, &asconf->source, sizeof(addr)); /* ADDIP 4.3 D9) If an endpoint receives an ADD IP address * request and does not have the local resources to add this * new address to the association, it MUST return an Error * Cause TLV set to the new error code 'Operation Refused * Due to Resource Shortage'. */ peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_UNCONFIRMED); if (!peer) return SCTP_ERROR_RSRC_LOW; /* Start the heartbeat timer. */ if (!mod_timer(&peer->hb_timer, sctp_transport_timeout(peer))) sctp_transport_hold(peer); asoc->new_transport = peer; break; case SCTP_PARAM_DEL_IP: /* ADDIP 4.3 D7) If a request is received to delete the * last remaining IP address of a peer endpoint, the receiver * MUST send an Error Cause TLV with the error cause set to the * new error code 'Request to Delete Last Remaining IP Address'. */ if (asoc->peer.transport_count == 1) return SCTP_ERROR_DEL_LAST_IP; /* ADDIP 4.3 D8) If a request is received to delete an IP * address which is also the source address of the IP packet * which contained the ASCONF chunk, the receiver MUST reject * this request. To reject the request the receiver MUST send * an Error Cause TLV set to the new error code 'Request to * Delete Source IP Address' */ if (sctp_cmp_addr_exact(&asconf->source, &addr)) return SCTP_ERROR_DEL_SRC_IP; /* Section 4.2.2 * If the address 0.0.0.0 or ::0 is provided, all * addresses of the peer except the source address of the * packet MUST be deleted. */ if (af->is_any(&addr)) { sctp_assoc_set_primary(asoc, asconf->transport); sctp_assoc_del_nonprimary_peers(asoc, asconf->transport); } else sctp_assoc_del_peer(asoc, &addr); break; case SCTP_PARAM_SET_PRIMARY: /* ADDIP Section 4.2.4 * If the address 0.0.0.0 or ::0 is provided, the receiver * MAY mark the source address of the packet as its * primary. */ if (af->is_any(&addr)) memcpy(&addr.v4, sctp_source(asconf), sizeof(addr)); peer = sctp_assoc_lookup_paddr(asoc, &addr); if (!peer) return SCTP_ERROR_DNS_FAILED; sctp_assoc_set_primary(asoc, peer); break; } return SCTP_ERROR_NO_ERROR; } /* Verify the ASCONF packet before we process it. */ int sctp_verify_asconf(const struct sctp_association *asoc, struct sctp_paramhdr *param_hdr, void *chunk_end, struct sctp_paramhdr **errp) { sctp_addip_param_t *asconf_param; union sctp_params param; int length, plen; param.v = (sctp_paramhdr_t *) param_hdr; while (param.v <= chunk_end - sizeof(sctp_paramhdr_t)) { length = ntohs(param.p->length); *errp = param.p; if (param.v > chunk_end - length || length < sizeof(sctp_paramhdr_t)) return 0; switch (param.p->type) { case SCTP_PARAM_ADD_IP: case SCTP_PARAM_DEL_IP: case SCTP_PARAM_SET_PRIMARY: asconf_param = (sctp_addip_param_t *)param.v; plen = ntohs(asconf_param->param_hdr.length); if (plen < sizeof(sctp_addip_param_t) + sizeof(sctp_paramhdr_t)) return 0; break; case SCTP_PARAM_SUCCESS_REPORT: case SCTP_PARAM_ADAPTATION_LAYER_IND: if (length != sizeof(sctp_addip_param_t)) return 0; break; default: break; } param.v += WORD_ROUND(length); } if (param.v != chunk_end) return 0; return 1; } /* Process an incoming ASCONF chunk with the next expected serial no. and * return an ASCONF_ACK chunk to be sent in response. */ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, struct sctp_chunk *asconf) { sctp_addiphdr_t *hdr; union sctp_addr_param *addr_param; sctp_addip_param_t *asconf_param; struct sctp_chunk *asconf_ack; __be16 err_code; int length = 0; int chunk_len; __u32 serial; int all_param_pass = 1; chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t); hdr = (sctp_addiphdr_t *)asconf->skb->data; serial = ntohl(hdr->serial); /* Skip the addiphdr and store a pointer to address parameter. */ length = sizeof(sctp_addiphdr_t); addr_param = (union sctp_addr_param *)(asconf->skb->data + length); chunk_len -= length; /* Skip the address parameter and store a pointer to the first * asconf parameter. */ length = ntohs(addr_param->p.length); asconf_param = (void *)addr_param + length; chunk_len -= length; /* create an ASCONF_ACK chunk. * Based on the definitions of parameters, we know that the size of * ASCONF_ACK parameters are less than or equal to the fourfold of ASCONF * parameters. */ asconf_ack = sctp_make_asconf_ack(asoc, serial, chunk_len * 4); if (!asconf_ack) goto done; /* Process the TLVs contained within the ASCONF chunk. */ while (chunk_len > 0) { err_code = sctp_process_asconf_param(asoc, asconf, asconf_param); /* ADDIP 4.1 A7) * If an error response is received for a TLV parameter, * all TLVs with no response before the failed TLV are * considered successful if not reported. All TLVs after * the failed response are considered unsuccessful unless * a specific success indication is present for the parameter. */ if (SCTP_ERROR_NO_ERROR != err_code) all_param_pass = 0; if (!all_param_pass) sctp_add_asconf_response(asconf_ack, asconf_param->crr_id, err_code, asconf_param); /* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add * an IP address sends an 'Out of Resource' in its response, it * MUST also fail any subsequent add or delete requests bundled * in the ASCONF. */ if (SCTP_ERROR_RSRC_LOW == err_code) goto done; /* Move to the next ASCONF param. */ length = ntohs(asconf_param->param_hdr.length); asconf_param = (void *)asconf_param + length; chunk_len -= length; } done: asoc->peer.addip_serial++; /* If we are sending a new ASCONF_ACK hold a reference to it in assoc * after freeing the reference to old asconf ack if any. */ if (asconf_ack) { sctp_chunk_hold(asconf_ack); list_add_tail(&asconf_ack->transmitted_list, &asoc->asconf_ack_list); } return asconf_ack; } /* Process a asconf parameter that is successfully acked. */ static void sctp_asconf_param_success(struct sctp_association *asoc, sctp_addip_param_t *asconf_param) { struct sctp_af *af; union sctp_addr addr; struct sctp_bind_addr *bp = &asoc->base.bind_addr; union sctp_addr_param *addr_param; struct sctp_transport *transport; struct sctp_sockaddr_entry *saddr; addr_param = (void *)asconf_param + sizeof(sctp_addip_param_t); /* We have checked the packet before, so we do not check again. */ af = sctp_get_af_specific(param_type2af(addr_param->p.type)); af->from_addr_param(&addr, addr_param, htons(bp->port), 0); switch (asconf_param->param_hdr.type) { case SCTP_PARAM_ADD_IP: /* This is always done in BH context with a socket lock * held, so the list can not change. */ local_bh_disable(); list_for_each_entry(saddr, &bp->address_list, list) { if (sctp_cmp_addr_exact(&saddr->a, &addr)) saddr->state = SCTP_ADDR_SRC; } local_bh_enable(); list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { dst_release(transport->dst); transport->dst = NULL; } break; case SCTP_PARAM_DEL_IP: local_bh_disable(); sctp_del_bind_addr(bp, &addr); if (asoc->asconf_addr_del_pending != NULL && sctp_cmp_addr_exact(asoc->asconf_addr_del_pending, &addr)) { kfree(asoc->asconf_addr_del_pending); asoc->asconf_addr_del_pending = NULL; } local_bh_enable(); list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { dst_release(transport->dst); transport->dst = NULL; } break; default: break; } } /* Get the corresponding ASCONF response error code from the ASCONF_ACK chunk * for the given asconf parameter. If there is no response for this parameter, * return the error code based on the third argument 'no_err'. * ADDIP 4.1 * A7) If an error response is received for a TLV parameter, all TLVs with no * response before the failed TLV are considered successful if not reported. * All TLVs after the failed response are considered unsuccessful unless a * specific success indication is present for the parameter. */ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack, sctp_addip_param_t *asconf_param, int no_err) { sctp_addip_param_t *asconf_ack_param; sctp_errhdr_t *err_param; int length; int asconf_ack_len; __be16 err_code; if (no_err) err_code = SCTP_ERROR_NO_ERROR; else err_code = SCTP_ERROR_REQ_REFUSED; asconf_ack_len = ntohs(asconf_ack->chunk_hdr->length) - sizeof(sctp_chunkhdr_t); /* Skip the addiphdr from the asconf_ack chunk and store a pointer to * the first asconf_ack parameter. */ length = sizeof(sctp_addiphdr_t); asconf_ack_param = (sctp_addip_param_t *)(asconf_ack->skb->data + length); asconf_ack_len -= length; while (asconf_ack_len > 0) { if (asconf_ack_param->crr_id == asconf_param->crr_id) { switch (asconf_ack_param->param_hdr.type) { case SCTP_PARAM_SUCCESS_REPORT: return SCTP_ERROR_NO_ERROR; case SCTP_PARAM_ERR_CAUSE: length = sizeof(sctp_addip_param_t); err_param = (void *)asconf_ack_param + length; asconf_ack_len -= length; if (asconf_ack_len > 0) return err_param->cause; else return SCTP_ERROR_INV_PARAM; break; default: return SCTP_ERROR_INV_PARAM; } } length = ntohs(asconf_ack_param->param_hdr.length); asconf_ack_param = (void *)asconf_ack_param + length; asconf_ack_len -= length; } return err_code; } /* Process an incoming ASCONF_ACK chunk against the cached last ASCONF chunk. */ int sctp_process_asconf_ack(struct sctp_association *asoc, struct sctp_chunk *asconf_ack) { struct sctp_chunk *asconf = asoc->addip_last_asconf; union sctp_addr_param *addr_param; sctp_addip_param_t *asconf_param; int length = 0; int asconf_len = asconf->skb->len; int all_param_pass = 0; int no_err = 1; int retval = 0; __be16 err_code = SCTP_ERROR_NO_ERROR; /* Skip the chunkhdr and addiphdr from the last asconf sent and store * a pointer to address parameter. */ length = sizeof(sctp_addip_chunk_t); addr_param = (union sctp_addr_param *)(asconf->skb->data + length); asconf_len -= length; /* Skip the address parameter in the last asconf sent and store a * pointer to the first asconf parameter. */ length = ntohs(addr_param->p.length); asconf_param = (void *)addr_param + length; asconf_len -= length; /* ADDIP 4.1 * A8) If there is no response(s) to specific TLV parameter(s), and no * failures are indicated, then all request(s) are considered * successful. */ if (asconf_ack->skb->len == sizeof(sctp_addiphdr_t)) all_param_pass = 1; /* Process the TLVs contained in the last sent ASCONF chunk. */ while (asconf_len > 0) { if (all_param_pass) err_code = SCTP_ERROR_NO_ERROR; else { err_code = sctp_get_asconf_response(asconf_ack, asconf_param, no_err); if (no_err && (SCTP_ERROR_NO_ERROR != err_code)) no_err = 0; } switch (err_code) { case SCTP_ERROR_NO_ERROR: sctp_asconf_param_success(asoc, asconf_param); break; case SCTP_ERROR_RSRC_LOW: retval = 1; break; case SCTP_ERROR_UNKNOWN_PARAM: /* Disable sending this type of asconf parameter in * future. */ asoc->peer.addip_disabled_mask |= asconf_param->param_hdr.type; break; case SCTP_ERROR_REQ_REFUSED: case SCTP_ERROR_DEL_LAST_IP: case SCTP_ERROR_DEL_SRC_IP: default: break; } /* Skip the processed asconf parameter and move to the next * one. */ length = ntohs(asconf_param->param_hdr.length); asconf_param = (void *)asconf_param + length; asconf_len -= length; } if (no_err && asoc->src_out_of_asoc_ok) { asoc->src_out_of_asoc_ok = 0; sctp_transport_immediate_rtx(asoc->peer.primary_path); } /* Free the cached last sent asconf chunk. */ list_del_init(&asconf->transmitted_list); sctp_chunk_free(asconf); asoc->addip_last_asconf = NULL; return retval; } /* Make a FWD TSN chunk. */ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc, __u32 new_cum_tsn, size_t nstreams, struct sctp_fwdtsn_skip *skiplist) { struct sctp_chunk *retval = NULL; struct sctp_fwdtsn_hdr ftsn_hdr; struct sctp_fwdtsn_skip skip; size_t hint; int i; hint = (nstreams + 1) * sizeof(__u32); retval = sctp_make_control(asoc, SCTP_CID_FWD_TSN, 0, hint); if (!retval) return NULL; ftsn_hdr.new_cum_tsn = htonl(new_cum_tsn); retval->subh.fwdtsn_hdr = sctp_addto_chunk(retval, sizeof(ftsn_hdr), &ftsn_hdr); for (i = 0; i < nstreams; i++) { skip.stream = skiplist[i].stream; skip.ssn = skiplist[i].ssn; sctp_addto_chunk(retval, sizeof(skip), &skip); } return retval; }
/* SCTP kernel implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001-2002 Intel Corp. * * This file is part of the SCTP kernel implementation * * These functions work with the state functions in sctp_sm_statefuns.c * to implement the state operations. These functions implement the * steps which require modifying existing data structures. * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Karl Knutson <karl@athena.chicago.il.us> * C. Robin <chris@hundredacre.ac.uk> * Jon Grimm <jgrimm@us.ibm.com> * Xingang Guo <xingang.guo@intel.com> * Dajiang Zhang <dajiang.zhang@nokia.com> * Sridhar Samudrala <sri@us.ibm.com> * Daisy Chang <daisyc@us.ibm.com> * Ardelle Fan <ardelle.fan@intel.com> * Kevin Gao <kevin.gao@intel.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include <linux/kernel.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/net.h> #include <linux/inet.h> #include <linux/scatterlist.h> #include <linux/crypto.h> #include <linux/slab.h> #include <net/sock.h> #include <linux/skbuff.h> #include <linux/random.h> /* for get_random_bytes */ #include <net/sctp/sctp.h> #include <net/sctp/sm.h> static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc, __u8 type, __u8 flags, int paylen); static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc, __u8 flags, int paylen); static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc, __u8 type, __u8 flags, int paylen); static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, const struct sctp_association *asoc, const struct sctp_chunk *init_chunk, int *cookie_len, const __u8 *raw_addrs, int addrs_len); static int sctp_process_param(struct sctp_association *asoc, union sctp_params param, const union sctp_addr *peer_addr, gfp_t gfp); static void *sctp_addto_param(struct sctp_chunk *chunk, int len, const void *data); static void *sctp_addto_chunk_fixed(struct sctp_chunk *, int len, const void *data); /* Control chunk destructor */ static void sctp_control_release_owner(struct sk_buff *skb) { /*TODO: do memory release */ } static void sctp_control_set_owner_w(struct sctp_chunk *chunk) { struct sctp_association *asoc = chunk->asoc; struct sk_buff *skb = chunk->skb; /* TODO: properly account for control chunks. * To do it right we'll need: * 1) endpoint if association isn't known. * 2) proper memory accounting. * * For now don't do anything for now. */ skb->sk = asoc ? asoc->base.sk : NULL; skb->destructor = sctp_control_release_owner; } /* What was the inbound interface for this chunk? */ int sctp_chunk_iif(const struct sctp_chunk *chunk) { struct sctp_af *af; int iif = 0; af = sctp_get_af_specific(ipver2af(ip_hdr(chunk->skb)->version)); if (af) iif = af->skb_iif(chunk->skb); return iif; } /* RFC 2960 3.3.2 Initiation (INIT) (1) * * Note 2: The ECN capable field is reserved for future use of * Explicit Congestion Notification. */ static const struct sctp_paramhdr ecap_param = { SCTP_PARAM_ECN_CAPABLE, cpu_to_be16(sizeof(struct sctp_paramhdr)), }; static const struct sctp_paramhdr prsctp_param = { SCTP_PARAM_FWD_TSN_SUPPORT, cpu_to_be16(sizeof(struct sctp_paramhdr)), }; /* A helper to initialize an op error inside a * provided chunk, as most cause codes will be embedded inside an * abort chunk. */ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, size_t paylen) { sctp_errhdr_t err; __u16 len; /* Cause code constants are now defined in network order. */ err.cause = cause_code; len = sizeof(sctp_errhdr_t) + paylen; err.length = htons(len); chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); } /* A helper to initialize an op error inside a * provided chunk, as most cause codes will be embedded inside an * abort chunk. Differs from sctp_init_cause in that it won't oops * if there isn't enough space in the op error chunk */ static int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code, size_t paylen) { sctp_errhdr_t err; __u16 len; /* Cause code constants are now defined in network order. */ err.cause = cause_code; len = sizeof(sctp_errhdr_t) + paylen; err.length = htons(len); if (skb_tailroom(chunk->skb) < len) return -ENOSPC; chunk->subh.err_hdr = sctp_addto_chunk_fixed(chunk, sizeof(sctp_errhdr_t), &err); return 0; } /* 3.3.2 Initiation (INIT) (1) * * This chunk is used to initiate a SCTP association between two * endpoints. The format of the INIT chunk is shown below: * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 1 | Chunk Flags | Chunk Length | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Initiate Tag | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Advertised Receiver Window Credit (a_rwnd) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Number of Outbound Streams | Number of Inbound Streams | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Initial TSN | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * \ \ * / Optional/Variable-Length Parameters / * \ \ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * * The INIT chunk contains the following parameters. Unless otherwise * noted, each parameter MUST only be included once in the INIT chunk. * * Fixed Parameters Status * ---------------------------------------------- * Initiate Tag Mandatory * Advertised Receiver Window Credit Mandatory * Number of Outbound Streams Mandatory * Number of Inbound Streams Mandatory * Initial TSN Mandatory * * Variable Parameters Status Type Value * ------------------------------------------------------------- * IPv4 Address (Note 1) Optional 5 * IPv6 Address (Note 1) Optional 6 * Cookie Preservative Optional 9 * Reserved for ECN Capable (Note 2) Optional 32768 (0x8000) * Host Name Address (Note 3) Optional 11 * Supported Address Types (Note 4) Optional 12 */ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, const struct sctp_bind_addr *bp, gfp_t gfp, int vparam_len) { struct net *net = sock_net(asoc->base.sk); struct sctp_endpoint *ep = asoc->ep; sctp_inithdr_t init; union sctp_params addrs; size_t chunksize; struct sctp_chunk *retval = NULL; int num_types, addrs_len = 0; struct sctp_sock *sp; sctp_supported_addrs_param_t sat; __be16 types[2]; sctp_adaptation_ind_param_t aiparam; sctp_supported_ext_param_t ext_param; int num_ext = 0; __u8 extensions[3]; sctp_paramhdr_t *auth_chunks = NULL, *auth_hmacs = NULL; /* RFC 2960 3.3.2 Initiation (INIT) (1) * * Note 1: The INIT chunks can contain multiple addresses that * can be IPv4 and/or IPv6 in any combination. */ retval = NULL; /* Convert the provided bind address list to raw format. */ addrs = sctp_bind_addrs_to_raw(bp, &addrs_len, gfp); init.init_tag = htonl(asoc->c.my_vtag); init.a_rwnd = htonl(asoc->rwnd); init.num_outbound_streams = htons(asoc->c.sinit_num_ostreams); init.num_inbound_streams = htons(asoc->c.sinit_max_instreams); init.initial_tsn = htonl(asoc->c.initial_tsn); /* How many address types are needed? */ sp = sctp_sk(asoc->base.sk); num_types = sp->pf->supported_addrs(sp, types); chunksize = sizeof(init) + addrs_len; chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types)); chunksize += sizeof(ecap_param); if (net->sctp.prsctp_enable) chunksize += sizeof(prsctp_param); /* ADDIP: Section 4.2.7: * An implementation supporting this extension [ADDIP] MUST list * the ASCONF,the ASCONF-ACK, and the AUTH chunks in its INIT and * INIT-ACK parameters. */ if (net->sctp.addip_enable) { extensions[num_ext] = SCTP_CID_ASCONF; extensions[num_ext+1] = SCTP_CID_ASCONF_ACK; num_ext += 2; } if (sp->adaptation_ind) chunksize += sizeof(aiparam); chunksize += vparam_len; /* Account for AUTH related parameters */ if (ep->auth_enable) { /* Add random parameter length*/ chunksize += sizeof(asoc->c.auth_random); /* Add HMACS parameter length if any were defined */ auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; if (auth_hmacs->length) chunksize += WORD_ROUND(ntohs(auth_hmacs->length)); else auth_hmacs = NULL; /* Add CHUNKS parameter length */ auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; if (auth_chunks->length) chunksize += WORD_ROUND(ntohs(auth_chunks->length)); else auth_chunks = NULL; extensions[num_ext] = SCTP_CID_AUTH; num_ext += 1; } /* If we have any extensions to report, account for that */ if (num_ext) chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) + num_ext); /* RFC 2960 3.3.2 Initiation (INIT) (1) * * Note 3: An INIT chunk MUST NOT contain more than one Host * Name address parameter. Moreover, the sender of the INIT * MUST NOT combine any other address types with the Host Name * address in the INIT. The receiver of INIT MUST ignore any * other address types if the Host Name address parameter is * present in the received INIT chunk. * * PLEASE DO NOT FIXME [This version does not support Host Name.] */ retval = sctp_make_control(asoc, SCTP_CID_INIT, 0, chunksize); if (!retval) goto nodata; retval->subh.init_hdr = sctp_addto_chunk(retval, sizeof(init), &init); retval->param_hdr.v = sctp_addto_chunk(retval, addrs_len, addrs.v); /* RFC 2960 3.3.2 Initiation (INIT) (1) * * Note 4: This parameter, when present, specifies all the * address types the sending endpoint can support. The absence * of this parameter indicates that the sending endpoint can * support any address type. */ sat.param_hdr.type = SCTP_PARAM_SUPPORTED_ADDRESS_TYPES; sat.param_hdr.length = htons(SCTP_SAT_LEN(num_types)); sctp_addto_chunk(retval, sizeof(sat), &sat); sctp_addto_chunk(retval, num_types * sizeof(__u16), &types); sctp_addto_chunk(retval, sizeof(ecap_param), &ecap_param); /* Add the supported extensions parameter. Be nice and add this * fist before addiding the parameters for the extensions themselves */ if (num_ext) { ext_param.param_hdr.type = SCTP_PARAM_SUPPORTED_EXT; ext_param.param_hdr.length = htons(sizeof(sctp_supported_ext_param_t) + num_ext); sctp_addto_chunk(retval, sizeof(sctp_supported_ext_param_t), &ext_param); sctp_addto_param(retval, num_ext, extensions); } if (net->sctp.prsctp_enable) sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param); if (sp->adaptation_ind) { aiparam.param_hdr.type = SCTP_PARAM_ADAPTATION_LAYER_IND; aiparam.param_hdr.length = htons(sizeof(aiparam)); aiparam.adaptation_ind = htonl(sp->adaptation_ind); sctp_addto_chunk(retval, sizeof(aiparam), &aiparam); } /* Add SCTP-AUTH chunks to the parameter list */ if (ep->auth_enable) { sctp_addto_chunk(retval, sizeof(asoc->c.auth_random), asoc->c.auth_random); if (auth_hmacs) sctp_addto_chunk(retval, ntohs(auth_hmacs->length), auth_hmacs); if (auth_chunks) sctp_addto_chunk(retval, ntohs(auth_chunks->length), auth_chunks); } nodata: kfree(addrs.v); return retval; } struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, const struct sctp_chunk *chunk, gfp_t gfp, int unkparam_len) { sctp_inithdr_t initack; struct sctp_chunk *retval; union sctp_params addrs; struct sctp_sock *sp; int addrs_len; sctp_cookie_param_t *cookie; int cookie_len; size_t chunksize; sctp_adaptation_ind_param_t aiparam; sctp_supported_ext_param_t ext_param; int num_ext = 0; __u8 extensions[3]; sctp_paramhdr_t *auth_chunks = NULL, *auth_hmacs = NULL, *auth_random = NULL; retval = NULL; /* Note: there may be no addresses to embed. */ addrs = sctp_bind_addrs_to_raw(&asoc->base.bind_addr, &addrs_len, gfp); initack.init_tag = htonl(asoc->c.my_vtag); initack.a_rwnd = htonl(asoc->rwnd); initack.num_outbound_streams = htons(asoc->c.sinit_num_ostreams); initack.num_inbound_streams = htons(asoc->c.sinit_max_instreams); initack.initial_tsn = htonl(asoc->c.initial_tsn); /* FIXME: We really ought to build the cookie right * into the packet instead of allocating more fresh memory. */ cookie = sctp_pack_cookie(asoc->ep, asoc, chunk, &cookie_len, addrs.v, addrs_len); if (!cookie) goto nomem_cookie; /* Calculate the total size of allocation, include the reserved * space for reporting unknown parameters if it is specified. */ sp = sctp_sk(asoc->base.sk); chunksize = sizeof(initack) + addrs_len + cookie_len + unkparam_len; /* Tell peer that we'll do ECN only if peer advertised such cap. */ if (asoc->peer.ecn_capable) chunksize += sizeof(ecap_param); if (asoc->peer.prsctp_capable) chunksize += sizeof(prsctp_param); if (asoc->peer.asconf_capable) { extensions[num_ext] = SCTP_CID_ASCONF; extensions[num_ext+1] = SCTP_CID_ASCONF_ACK; num_ext += 2; } if (sp->adaptation_ind) chunksize += sizeof(aiparam); if (asoc->peer.auth_capable) { auth_random = (sctp_paramhdr_t *)asoc->c.auth_random; chunksize += ntohs(auth_random->length); auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; if (auth_hmacs->length) chunksize += WORD_ROUND(ntohs(auth_hmacs->length)); else auth_hmacs = NULL; auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; if (auth_chunks->length) chunksize += WORD_ROUND(ntohs(auth_chunks->length)); else auth_chunks = NULL; extensions[num_ext] = SCTP_CID_AUTH; num_ext += 1; } if (num_ext) chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) + num_ext); /* Now allocate and fill out the chunk. */ retval = sctp_make_control(asoc, SCTP_CID_INIT_ACK, 0, chunksize); if (!retval) goto nomem_chunk; /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it received the DATA or control chunk * to which it is replying. * * [INIT ACK back to where the INIT came from.] */ retval->transport = chunk->transport; retval->subh.init_hdr = sctp_addto_chunk(retval, sizeof(initack), &initack); retval->param_hdr.v = sctp_addto_chunk(retval, addrs_len, addrs.v); sctp_addto_chunk(retval, cookie_len, cookie); if (asoc->peer.ecn_capable) sctp_addto_chunk(retval, sizeof(ecap_param), &ecap_param); if (num_ext) { ext_param.param_hdr.type = SCTP_PARAM_SUPPORTED_EXT; ext_param.param_hdr.length = htons(sizeof(sctp_supported_ext_param_t) + num_ext); sctp_addto_chunk(retval, sizeof(sctp_supported_ext_param_t), &ext_param); sctp_addto_param(retval, num_ext, extensions); } if (asoc->peer.prsctp_capable) sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param); if (sp->adaptation_ind) { aiparam.param_hdr.type = SCTP_PARAM_ADAPTATION_LAYER_IND; aiparam.param_hdr.length = htons(sizeof(aiparam)); aiparam.adaptation_ind = htonl(sp->adaptation_ind); sctp_addto_chunk(retval, sizeof(aiparam), &aiparam); } if (asoc->peer.auth_capable) { sctp_addto_chunk(retval, ntohs(auth_random->length), auth_random); if (auth_hmacs) sctp_addto_chunk(retval, ntohs(auth_hmacs->length), auth_hmacs); if (auth_chunks) sctp_addto_chunk(retval, ntohs(auth_chunks->length), auth_chunks); } /* We need to remove the const qualifier at this point. */ retval->asoc = (struct sctp_association *) asoc; nomem_chunk: kfree(cookie); nomem_cookie: kfree(addrs.v); return retval; } /* 3.3.11 Cookie Echo (COOKIE ECHO) (10): * * This chunk is used only during the initialization of an association. * It is sent by the initiator of an association to its peer to complete * the initialization process. This chunk MUST precede any DATA chunk * sent within the association, but MAY be bundled with one or more DATA * chunks in the same packet. * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 10 |Chunk Flags | Length | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * / Cookie / * \ \ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Chunk Flags: 8 bit * * Set to zero on transmit and ignored on receipt. * * Length: 16 bits (unsigned integer) * * Set to the size of the chunk in bytes, including the 4 bytes of * the chunk header and the size of the Cookie. * * Cookie: variable size * * This field must contain the exact cookie received in the * State Cookie parameter from the previous INIT ACK. * * An implementation SHOULD make the cookie as small as possible * to insure interoperability. */ struct sctp_chunk *sctp_make_cookie_echo(const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; void *cookie; int cookie_len; cookie = asoc->peer.cookie; cookie_len = asoc->peer.cookie_len; /* Build a cookie echo chunk. */ retval = sctp_make_control(asoc, SCTP_CID_COOKIE_ECHO, 0, cookie_len); if (!retval) goto nodata; retval->subh.cookie_hdr = sctp_addto_chunk(retval, cookie_len, cookie); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [COOKIE ECHO back to where the INIT ACK came from.] */ if (chunk) retval->transport = chunk->transport; nodata: return retval; } /* 3.3.12 Cookie Acknowledgement (COOKIE ACK) (11): * * This chunk is used only during the initialization of an * association. It is used to acknowledge the receipt of a COOKIE * ECHO chunk. This chunk MUST precede any DATA or SACK chunk sent * within the association, but MAY be bundled with one or more DATA * chunks or SACK chunk in the same SCTP packet. * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 11 |Chunk Flags | Length = 4 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Chunk Flags: 8 bits * * Set to zero on transmit and ignored on receipt. */ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; retval = sctp_make_control(asoc, SCTP_CID_COOKIE_ACK, 0, 0); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [COOKIE ACK back to where the COOKIE ECHO came from.] */ if (retval && chunk) retval->transport = chunk->transport; return retval; } /* * Appendix A: Explicit Congestion Notification: * CWR: * * RFC 2481 details a specific bit for a sender to send in the header of * its next outbound TCP segment to indicate to its peer that it has * reduced its congestion window. This is termed the CWR bit. For * SCTP the same indication is made by including the CWR chunk. * This chunk contains one data element, i.e. the TSN number that * was sent in the ECNE chunk. This element represents the lowest * TSN number in the datagram that was originally marked with the * CE bit. * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Chunk Type=13 | Flags=00000000| Chunk Length = 8 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Lowest TSN Number | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Note: The CWR is considered a Control chunk. */ struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc, const __u32 lowest_tsn, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; sctp_cwrhdr_t cwr; cwr.lowest_tsn = htonl(lowest_tsn); retval = sctp_make_control(asoc, SCTP_CID_ECN_CWR, 0, sizeof(sctp_cwrhdr_t)); if (!retval) goto nodata; retval->subh.ecn_cwr_hdr = sctp_addto_chunk(retval, sizeof(cwr), &cwr); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [Report a reduced congestion window back to where the ECNE * came from.] */ if (chunk) retval->transport = chunk->transport; nodata: return retval; } /* Make an ECNE chunk. This is a congestion experienced report. */ struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc, const __u32 lowest_tsn) { struct sctp_chunk *retval; sctp_ecnehdr_t ecne; ecne.lowest_tsn = htonl(lowest_tsn); retval = sctp_make_control(asoc, SCTP_CID_ECN_ECNE, 0, sizeof(sctp_ecnehdr_t)); if (!retval) goto nodata; retval->subh.ecne_hdr = sctp_addto_chunk(retval, sizeof(ecne), &ecne); nodata: return retval; } /* Make a DATA chunk for the given association from the provided * parameters. However, do not populate the data payload. */ struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc, const struct sctp_sndrcvinfo *sinfo, int data_len, __u8 flags, __u16 ssn) { struct sctp_chunk *retval; struct sctp_datahdr dp; int chunk_len; /* We assign the TSN as LATE as possible, not here when * creating the chunk. */ dp.tsn = 0; dp.stream = htons(sinfo->sinfo_stream); dp.ppid = sinfo->sinfo_ppid; /* Set the flags for an unordered send. */ if (sinfo->sinfo_flags & SCTP_UNORDERED) { flags |= SCTP_DATA_UNORDERED; dp.ssn = 0; } else dp.ssn = htons(ssn); chunk_len = sizeof(dp) + data_len; retval = sctp_make_data(asoc, flags, chunk_len); if (!retval) goto nodata; retval->subh.data_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp); memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo)); nodata: return retval; } /* Create a selective ackowledgement (SACK) for the given * association. This reports on which TSN's we've seen to date, * including duplicates and gaps. */ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc) { struct sctp_chunk *retval; struct sctp_sackhdr sack; int len; __u32 ctsn; __u16 num_gabs, num_dup_tsns; struct sctp_association *aptr = (struct sctp_association *)asoc; struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; struct sctp_gap_ack_block gabs[SCTP_MAX_GABS]; struct sctp_transport *trans; memset(gabs, 0, sizeof(gabs)); ctsn = sctp_tsnmap_get_ctsn(map); pr_debug("%s: sackCTSNAck sent:0x%x\n", __func__, ctsn); /* How much room is needed in the chunk? */ num_gabs = sctp_tsnmap_num_gabs(map, gabs); num_dup_tsns = sctp_tsnmap_num_dups(map); /* Initialize the SACK header. */ sack.cum_tsn_ack = htonl(ctsn); sack.a_rwnd = htonl(asoc->a_rwnd); sack.num_gap_ack_blocks = htons(num_gabs); sack.num_dup_tsns = htons(num_dup_tsns); len = sizeof(sack) + sizeof(struct sctp_gap_ack_block) * num_gabs + sizeof(__u32) * num_dup_tsns; /* Create the chunk. */ retval = sctp_make_control(asoc, SCTP_CID_SACK, 0, len); if (!retval) goto nodata; /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, etc.) to the same destination transport * address from which it received the DATA or control chunk to * which it is replying. This rule should also be followed if * the endpoint is bundling DATA chunks together with the * reply chunk. * * However, when acknowledging multiple DATA chunks received * in packets from different source addresses in a single * SACK, the SACK chunk may be transmitted to one of the * destination transport addresses from which the DATA or * control chunks being acknowledged were received. * * [BUG: We do not implement the following paragraph. * Perhaps we should remember the last transport we used for a * SACK and avoid that (if possible) if we have seen any * duplicates. --piggy] * * When a receiver of a duplicate DATA chunk sends a SACK to a * multi- homed endpoint it MAY be beneficial to vary the * destination address and not use the source address of the * DATA chunk. The reason being that receiving a duplicate * from a multi-homed endpoint might indicate that the return * path (as specified in the source address of the DATA chunk) * for the SACK is broken. * * [Send to the address from which we last received a DATA chunk.] */ retval->transport = asoc->peer.last_data_from; retval->subh.sack_hdr = sctp_addto_chunk(retval, sizeof(sack), &sack); /* Add the gap ack block information. */ if (num_gabs) sctp_addto_chunk(retval, sizeof(__u32) * num_gabs, gabs); /* Add the duplicate TSN information. */ if (num_dup_tsns) { aptr->stats.idupchunks += num_dup_tsns; sctp_addto_chunk(retval, sizeof(__u32) * num_dup_tsns, sctp_tsnmap_get_dups(map)); } /* Once we have a sack generated, check to see what our sack * generation is, if its 0, reset the transports to 0, and reset * the association generation to 1 * * The idea is that zero is never used as a valid generation for the * association so no transport will match after a wrap event like this, * Until the next sack */ if (++aptr->peer.sack_generation == 0) { list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) trans->sack_generation = 0; aptr->peer.sack_generation = 1; } nodata: return retval; } /* Make a SHUTDOWN chunk. */ struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; sctp_shutdownhdr_t shut; __u32 ctsn; ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); shut.cum_tsn_ack = htonl(ctsn); retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN, 0, sizeof(sctp_shutdownhdr_t)); if (!retval) goto nodata; retval->subh.shutdown_hdr = sctp_addto_chunk(retval, sizeof(shut), &shut); if (chunk) retval->transport = chunk->transport; nodata: return retval; } struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN_ACK, 0, 0); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [ACK back to where the SHUTDOWN came from.] */ if (retval && chunk) retval->transport = chunk->transport; return retval; } struct sctp_chunk *sctp_make_shutdown_complete( const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; __u8 flags = 0; /* Set the T-bit if we have no association (vtag will be * reflected) */ flags |= asoc ? 0 : SCTP_CHUNK_FLAG_T; retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN_COMPLETE, flags, 0); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [Report SHUTDOWN COMPLETE back to where the SHUTDOWN ACK * came from.] */ if (retval && chunk) retval->transport = chunk->transport; return retval; } /* Create an ABORT. Note that we set the T bit if we have no * association, except when responding to an INIT (sctpimpguide 2.41). */ struct sctp_chunk *sctp_make_abort(const struct sctp_association *asoc, const struct sctp_chunk *chunk, const size_t hint) { struct sctp_chunk *retval; __u8 flags = 0; /* Set the T-bit if we have no association and 'chunk' is not * an INIT (vtag will be reflected). */ if (!asoc) { if (chunk && chunk->chunk_hdr && chunk->chunk_hdr->type == SCTP_CID_INIT) flags = 0; else flags = SCTP_CHUNK_FLAG_T; } retval = sctp_make_control(asoc, SCTP_CID_ABORT, flags, hint); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [ABORT back to where the offender came from.] */ if (retval && chunk) retval->transport = chunk->transport; return retval; } /* Helper to create ABORT with a NO_USER_DATA error. */ struct sctp_chunk *sctp_make_abort_no_data( const struct sctp_association *asoc, const struct sctp_chunk *chunk, __u32 tsn) { struct sctp_chunk *retval; __be32 payload; retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + sizeof(tsn)); if (!retval) goto no_mem; /* Put the tsn back into network byte order. */ payload = htonl(tsn); sctp_init_cause(retval, SCTP_ERROR_NO_DATA, sizeof(payload)); sctp_addto_chunk(retval, sizeof(payload), (const void *)&payload); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [ABORT back to where the offender came from.] */ if (chunk) retval->transport = chunk->transport; no_mem: return retval; } /* Helper to create ABORT with a SCTP_ERROR_USER_ABORT error. */ struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc, const struct msghdr *msg, size_t paylen) { struct sctp_chunk *retval; void *payload = NULL; int err; retval = sctp_make_abort(asoc, NULL, sizeof(sctp_errhdr_t) + paylen); if (!retval) goto err_chunk; if (paylen) { /* Put the msg_iov together into payload. */ payload = kmalloc(paylen, GFP_KERNEL); if (!payload) goto err_payload; err = memcpy_fromiovec(payload, msg->msg_iov, paylen); if (err < 0) goto err_copy; } sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, paylen); sctp_addto_chunk(retval, paylen, payload); if (paylen) kfree(payload); return retval; err_copy: kfree(payload); err_payload: sctp_chunk_free(retval); retval = NULL; err_chunk: return retval; } /* Append bytes to the end of a parameter. Will panic if chunk is not big * enough. */ static void *sctp_addto_param(struct sctp_chunk *chunk, int len, const void *data) { void *target; int chunklen = ntohs(chunk->chunk_hdr->length); target = skb_put(chunk->skb, len); if (data) memcpy(target, data, len); else memset(target, 0, len); /* Adjust the chunk length field. */ chunk->chunk_hdr->length = htons(chunklen + len); chunk->chunk_end = skb_tail_pointer(chunk->skb); return target; } /* Make an ABORT chunk with a PROTOCOL VIOLATION cause code. */ struct sctp_chunk *sctp_make_abort_violation( const struct sctp_association *asoc, const struct sctp_chunk *chunk, const __u8 *payload, const size_t paylen) { struct sctp_chunk *retval; struct sctp_paramhdr phdr; retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen + sizeof(sctp_paramhdr_t)); if (!retval) goto end; sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, paylen + sizeof(sctp_paramhdr_t)); phdr.type = htons(chunk->chunk_hdr->type); phdr.length = chunk->chunk_hdr->length; sctp_addto_chunk(retval, paylen, payload); sctp_addto_param(retval, sizeof(sctp_paramhdr_t), &phdr); end: return retval; } struct sctp_chunk *sctp_make_violation_paramlen( const struct sctp_association *asoc, const struct sctp_chunk *chunk, struct sctp_paramhdr *param) { struct sctp_chunk *retval; static const char error[] = "The following parameter had invalid length:"; size_t payload_len = sizeof(error) + sizeof(sctp_errhdr_t) + sizeof(sctp_paramhdr_t); retval = sctp_make_abort(asoc, chunk, payload_len); if (!retval) goto nodata; sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, sizeof(error) + sizeof(sctp_paramhdr_t)); sctp_addto_chunk(retval, sizeof(error), error); sctp_addto_param(retval, sizeof(sctp_paramhdr_t), param); nodata: return retval; } struct sctp_chunk *sctp_make_violation_max_retrans( const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; static const char error[] = "Association exceeded its max_retans count"; size_t payload_len = sizeof(error) + sizeof(sctp_errhdr_t); retval = sctp_make_abort(asoc, chunk, payload_len); if (!retval) goto nodata; sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, sizeof(error)); sctp_addto_chunk(retval, sizeof(error), error); nodata: return retval; } /* Make a HEARTBEAT chunk. */ struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc, const struct sctp_transport *transport) { struct sctp_chunk *retval; sctp_sender_hb_info_t hbinfo; retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT, 0, sizeof(hbinfo)); if (!retval) goto nodata; hbinfo.param_hdr.type = SCTP_PARAM_HEARTBEAT_INFO; hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t)); hbinfo.daddr = transport->ipaddr; hbinfo.sent_at = jiffies; hbinfo.hb_nonce = transport->hb_nonce; /* Cast away the 'const', as this is just telling the chunk * what transport it belongs to. */ retval->transport = (struct sctp_transport *) transport; retval->subh.hbs_hdr = sctp_addto_chunk(retval, sizeof(hbinfo), &hbinfo); nodata: return retval; } struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *asoc, const struct sctp_chunk *chunk, const void *payload, const size_t paylen) { struct sctp_chunk *retval; retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT_ACK, 0, paylen); if (!retval) goto nodata; retval->subh.hbs_hdr = sctp_addto_chunk(retval, paylen, payload); /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, * etc.) to the same destination transport * address from which it * received the DATA or control chunk * to which it is replying. * * [HBACK back to where the HEARTBEAT came from.] */ if (chunk) retval->transport = chunk->transport; nodata: return retval; } /* Create an Operation Error chunk with the specified space reserved. * This routine can be used for containing multiple causes in the chunk. */ static struct sctp_chunk *sctp_make_op_error_space( const struct sctp_association *asoc, const struct sctp_chunk *chunk, size_t size) { struct sctp_chunk *retval; retval = sctp_make_control(asoc, SCTP_CID_ERROR, 0, sizeof(sctp_errhdr_t) + size); if (!retval) goto nodata; /* RFC 2960 6.4 Multi-homed SCTP Endpoints * * An endpoint SHOULD transmit reply chunks (e.g., SACK, * HEARTBEAT ACK, etc.) to the same destination transport * address from which it received the DATA or control chunk * to which it is replying. * */ if (chunk) retval->transport = chunk->transport; nodata: return retval; } /* Create an Operation Error chunk of a fixed size, * specifically, max(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT) * This is a helper function to allocate an error chunk for * for those invalid parameter codes in which we may not want * to report all the errors, if the incoming chunk is large */ static inline struct sctp_chunk *sctp_make_op_error_fixed( const struct sctp_association *asoc, const struct sctp_chunk *chunk) { size_t size = asoc ? asoc->pathmtu : 0; if (!size) size = SCTP_DEFAULT_MAXSEGMENT; return sctp_make_op_error_space(asoc, chunk, size); } /* Create an Operation Error chunk. */ struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc, const struct sctp_chunk *chunk, __be16 cause_code, const void *payload, size_t paylen, size_t reserve_tail) { struct sctp_chunk *retval; retval = sctp_make_op_error_space(asoc, chunk, paylen + reserve_tail); if (!retval) goto nodata; sctp_init_cause(retval, cause_code, paylen + reserve_tail); sctp_addto_chunk(retval, paylen, payload); if (reserve_tail) sctp_addto_param(retval, reserve_tail, NULL); nodata: return retval; } struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc) { struct sctp_chunk *retval; struct sctp_hmac *hmac_desc; struct sctp_authhdr auth_hdr; __u8 *hmac; /* Get the first hmac that the peer told us to use */ hmac_desc = sctp_auth_asoc_get_hmac(asoc); if (unlikely(!hmac_desc)) return NULL; retval = sctp_make_control(asoc, SCTP_CID_AUTH, 0, hmac_desc->hmac_len + sizeof(sctp_authhdr_t)); if (!retval) return NULL; auth_hdr.hmac_id = htons(hmac_desc->hmac_id); auth_hdr.shkey_id = htons(asoc->active_key_id); retval->subh.auth_hdr = sctp_addto_chunk(retval, sizeof(sctp_authhdr_t), &auth_hdr); hmac = skb_put(retval->skb, hmac_desc->hmac_len); memset(hmac, 0, hmac_desc->hmac_len); /* Adjust the chunk header to include the empty MAC */ retval->chunk_hdr->length = htons(ntohs(retval->chunk_hdr->length) + hmac_desc->hmac_len); retval->chunk_end = skb_tail_pointer(retval->skb); return retval; } /******************************************************************** * 2nd Level Abstractions ********************************************************************/ /* Turn an skb into a chunk. * FIXME: Eventually move the structure directly inside the skb->cb[]. * * sctpimpguide-05.txt Section 2.8.2 * M1) Each time a new DATA chunk is transmitted * set the 'TSN.Missing.Report' count for that TSN to 0. The * 'TSN.Missing.Report' count will be used to determine missing chunks * and when to fast retransmit. * */ struct sctp_chunk *sctp_chunkify(struct sk_buff *skb, const struct sctp_association *asoc, struct sock *sk) { struct sctp_chunk *retval; retval = kmem_cache_zalloc(sctp_chunk_cachep, GFP_ATOMIC); if (!retval) goto nodata; if (!sk) pr_debug("%s: chunkifying skb:%p w/o an sk\n", __func__, skb); INIT_LIST_HEAD(&retval->list); retval->skb = skb; retval->asoc = (struct sctp_association *)asoc; retval->singleton = 1; retval->fast_retransmit = SCTP_CAN_FRTX; /* Polish the bead hole. */ INIT_LIST_HEAD(&retval->transmitted_list); INIT_LIST_HEAD(&retval->frag_list); SCTP_DBG_OBJCNT_INC(chunk); atomic_set(&retval->refcnt, 1); nodata: return retval; } /* Set chunk->source and dest based on the IP header in chunk->skb. */ void sctp_init_addrs(struct sctp_chunk *chunk, union sctp_addr *src, union sctp_addr *dest) { memcpy(&chunk->source, src, sizeof(union sctp_addr)); memcpy(&chunk->dest, dest, sizeof(union sctp_addr)); } /* Extract the source address from a chunk. */ const union sctp_addr *sctp_source(const struct sctp_chunk *chunk) { /* If we have a known transport, use that. */ if (chunk->transport) { return &chunk->transport->ipaddr; } else { /* Otherwise, extract it from the IP header. */ return &chunk->source; } } /* Create a new chunk, setting the type and flags headers from the * arguments, reserving enough space for a 'paylen' byte payload. */ static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc, __u8 type, __u8 flags, int paylen) { struct sctp_chunk *retval; sctp_chunkhdr_t *chunk_hdr; struct sk_buff *skb; struct sock *sk; /* No need to allocate LL here, as this is only a chunk. */ skb = alloc_skb(WORD_ROUND(sizeof(sctp_chunkhdr_t) + paylen), GFP_ATOMIC); if (!skb) goto nodata; /* Make room for the chunk header. */ chunk_hdr = (sctp_chunkhdr_t *)skb_put(skb, sizeof(sctp_chunkhdr_t)); chunk_hdr->type = type; chunk_hdr->flags = flags; chunk_hdr->length = htons(sizeof(sctp_chunkhdr_t)); sk = asoc ? asoc->base.sk : NULL; retval = sctp_chunkify(skb, asoc, sk); if (!retval) { kfree_skb(skb); goto nodata; } retval->chunk_hdr = chunk_hdr; retval->chunk_end = ((__u8 *)chunk_hdr) + sizeof(struct sctp_chunkhdr); /* Determine if the chunk needs to be authenticated */ if (sctp_auth_send_cid(type, asoc)) retval->auth = 1; return retval; nodata: return NULL; } static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc, __u8 flags, int paylen) { return _sctp_make_chunk(asoc, SCTP_CID_DATA, flags, paylen); } static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc, __u8 type, __u8 flags, int paylen) { struct sctp_chunk *chunk = _sctp_make_chunk(asoc, type, flags, paylen); if (chunk) sctp_control_set_owner_w(chunk); return chunk; } /* Release the memory occupied by a chunk. */ static void sctp_chunk_destroy(struct sctp_chunk *chunk) { BUG_ON(!list_empty(&chunk->list)); list_del_init(&chunk->transmitted_list); consume_skb(chunk->skb); consume_skb(chunk->auth_chunk); SCTP_DBG_OBJCNT_DEC(chunk); kmem_cache_free(sctp_chunk_cachep, chunk); } /* Possibly, free the chunk. */ void sctp_chunk_free(struct sctp_chunk *chunk) { /* Release our reference on the message tracker. */ if (chunk->msg) sctp_datamsg_put(chunk->msg); sctp_chunk_put(chunk); } /* Grab a reference to the chunk. */ void sctp_chunk_hold(struct sctp_chunk *ch) { atomic_inc(&ch->refcnt); } /* Release a reference to the chunk. */ void sctp_chunk_put(struct sctp_chunk *ch) { if (atomic_dec_and_test(&ch->refcnt)) sctp_chunk_destroy(ch); } /* Append bytes to the end of a chunk. Will panic if chunk is not big * enough. */ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data) { void *target; void *padding; int chunklen = ntohs(chunk->chunk_hdr->length); int padlen = WORD_ROUND(chunklen) - chunklen; padding = skb_put(chunk->skb, padlen); target = skb_put(chunk->skb, len); memset(padding, 0, padlen); memcpy(target, data, len); /* Adjust the chunk length field. */ chunk->chunk_hdr->length = htons(chunklen + padlen + len); chunk->chunk_end = skb_tail_pointer(chunk->skb); return target; } /* Append bytes to the end of a chunk. Returns NULL if there isn't sufficient * space in the chunk */ static void *sctp_addto_chunk_fixed(struct sctp_chunk *chunk, int len, const void *data) { if (skb_tailroom(chunk->skb) >= len) return sctp_addto_chunk(chunk, len, data); else return NULL; } /* Append bytes from user space to the end of a chunk. Will panic if * chunk is not big enough. * Returns a kernel err value. */ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len, struct iovec *data) { __u8 *target; int err = 0; /* Make room in chunk for data. */ target = skb_put(chunk->skb, len); /* Copy data (whole iovec) into chunk */ if ((err = memcpy_fromiovecend(target, data, off, len))) goto out; /* Adjust the chunk length field. */ chunk->chunk_hdr->length = htons(ntohs(chunk->chunk_hdr->length) + len); chunk->chunk_end = skb_tail_pointer(chunk->skb); out: return err; } /* Helper function to assign a TSN if needed. This assumes that both * the data_hdr and association have already been assigned. */ void sctp_chunk_assign_ssn(struct sctp_chunk *chunk) { struct sctp_datamsg *msg; struct sctp_chunk *lchunk; struct sctp_stream *stream; __u16 ssn; __u16 sid; if (chunk->has_ssn) return; /* All fragments will be on the same stream */ sid = ntohs(chunk->subh.data_hdr->stream); stream = &chunk->asoc->ssnmap->out; /* Now assign the sequence number to the entire message. * All fragments must have the same stream sequence number. */ msg = chunk->msg; list_for_each_entry(lchunk, &msg->chunks, frag_list) { if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { ssn = 0; } else { if (lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG) ssn = sctp_ssn_next(stream, sid); else ssn = sctp_ssn_peek(stream, sid); } lchunk->subh.data_hdr->ssn = htons(ssn); lchunk->has_ssn = 1; } } /* Helper function to assign a TSN if needed. This assumes that both * the data_hdr and association have already been assigned. */ void sctp_chunk_assign_tsn(struct sctp_chunk *chunk) { if (!chunk->has_tsn) { /* This is the last possible instant to * assign a TSN. */ chunk->subh.data_hdr->tsn = htonl(sctp_association_get_next_tsn(chunk->asoc)); chunk->has_tsn = 1; } } /* Create a CLOSED association to use with an incoming packet. */ struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *ep, struct sctp_chunk *chunk, gfp_t gfp) { struct sctp_association *asoc; struct sk_buff *skb; sctp_scope_t scope; struct sctp_af *af; /* Create the bare association. */ scope = sctp_scope(sctp_source(chunk)); asoc = sctp_association_new(ep, ep->base.sk, scope, gfp); if (!asoc) goto nodata; asoc->temp = 1; skb = chunk->skb; /* Create an entry for the source address of the packet. */ af = sctp_get_af_specific(ipver2af(ip_hdr(skb)->version)); if (unlikely(!af)) goto fail; af->from_skb(&asoc->c.peer_addr, skb, 1); nodata: return asoc; fail: sctp_association_free(asoc); return NULL; } /* Build a cookie representing asoc. * This INCLUDES the param header needed to put the cookie in the INIT ACK. */ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, const struct sctp_association *asoc, const struct sctp_chunk *init_chunk, int *cookie_len, const __u8 *raw_addrs, int addrs_len) { sctp_cookie_param_t *retval; struct sctp_signed_cookie *cookie; struct scatterlist sg; int headersize, bodysize; /* Header size is static data prior to the actual cookie, including * any padding. */ headersize = sizeof(sctp_paramhdr_t) + (sizeof(struct sctp_signed_cookie) - sizeof(struct sctp_cookie)); bodysize = sizeof(struct sctp_cookie) + ntohs(init_chunk->chunk_hdr->length) + addrs_len; /* Pad out the cookie to a multiple to make the signature * functions simpler to write. */ if (bodysize % SCTP_COOKIE_MULTIPLE) bodysize += SCTP_COOKIE_MULTIPLE - (bodysize % SCTP_COOKIE_MULTIPLE); *cookie_len = headersize + bodysize; /* Clear this memory since we are sending this data structure * out on the network. */ retval = kzalloc(*cookie_len, GFP_ATOMIC); if (!retval) goto nodata; cookie = (struct sctp_signed_cookie *) retval->body; /* Set up the parameter header. */ retval->p.type = SCTP_PARAM_STATE_COOKIE; retval->p.length = htons(*cookie_len); /* Copy the cookie part of the association itself. */ cookie->c = asoc->c; /* Save the raw address list length in the cookie. */ cookie->c.raw_addr_list_len = addrs_len; /* Remember PR-SCTP capability. */ cookie->c.prsctp_capable = asoc->peer.prsctp_capable; /* Save adaptation indication in the cookie. */ cookie->c.adaptation_ind = asoc->peer.adaptation_ind; /* Set an expiration time for the cookie. */ cookie->c.expiration = ktime_add(asoc->cookie_life, ktime_get()); /* Copy the peer's init packet. */ memcpy(&cookie->c.peer_init[0], init_chunk->chunk_hdr, ntohs(init_chunk->chunk_hdr->length)); /* Copy the raw local address list of the association. */ memcpy((__u8 *)&cookie->c.peer_init[0] + ntohs(init_chunk->chunk_hdr->length), raw_addrs, addrs_len); if (sctp_sk(ep->base.sk)->hmac) { struct hash_desc desc; /* Sign the message. */ sg_init_one(&sg, &cookie->c, bodysize); desc.tfm = sctp_sk(ep->base.sk)->hmac; desc.flags = 0; if (crypto_hash_setkey(desc.tfm, ep->secret_key, sizeof(ep->secret_key)) || crypto_hash_digest(&desc, &sg, bodysize, cookie->signature)) goto free_cookie; } return retval; free_cookie: kfree(retval); nodata: *cookie_len = 0; return NULL; } /* Unpack the cookie from COOKIE ECHO chunk, recreating the association. */ struct sctp_association *sctp_unpack_cookie( const struct sctp_endpoint *ep, const struct sctp_association *asoc, struct sctp_chunk *chunk, gfp_t gfp, int *error, struct sctp_chunk **errp) { struct sctp_association *retval = NULL; struct sctp_signed_cookie *cookie; struct sctp_cookie *bear_cookie; int headersize, bodysize, fixed_size; __u8 *digest = ep->digest; struct scatterlist sg; unsigned int len; sctp_scope_t scope; struct sk_buff *skb = chunk->skb; ktime_t kt; struct hash_desc desc; /* Header size is static data prior to the actual cookie, including * any padding. */ headersize = sizeof(sctp_chunkhdr_t) + (sizeof(struct sctp_signed_cookie) - sizeof(struct sctp_cookie)); bodysize = ntohs(chunk->chunk_hdr->length) - headersize; fixed_size = headersize + sizeof(struct sctp_cookie); /* Verify that the chunk looks like it even has a cookie. * There must be enough room for our cookie and our peer's * INIT chunk. */ len = ntohs(chunk->chunk_hdr->length); if (len < fixed_size + sizeof(struct sctp_chunkhdr)) goto malformed; /* Verify that the cookie has been padded out. */ if (bodysize % SCTP_COOKIE_MULTIPLE) goto malformed; /* Process the cookie. */ cookie = chunk->subh.cookie_hdr; bear_cookie = &cookie->c; if (!sctp_sk(ep->base.sk)->hmac) goto no_hmac; /* Check the signature. */ sg_init_one(&sg, bear_cookie, bodysize); desc.tfm = sctp_sk(ep->base.sk)->hmac; desc.flags = 0; memset(digest, 0x00, SCTP_SIGNATURE_SIZE); if (crypto_hash_setkey(desc.tfm, ep->secret_key, sizeof(ep->secret_key)) || crypto_hash_digest(&desc, &sg, bodysize, digest)) { *error = -SCTP_IERROR_NOMEM; goto fail; } if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) { *error = -SCTP_IERROR_BAD_SIG; goto fail; } no_hmac: /* IG Section 2.35.2: * 3) Compare the port numbers and the verification tag contained * within the COOKIE ECHO chunk to the actual port numbers and the * verification tag within the SCTP common header of the received * packet. If these values do not match the packet MUST be silently * discarded, */ if (ntohl(chunk->sctp_hdr->vtag) != bear_cookie->my_vtag) { *error = -SCTP_IERROR_BAD_TAG; goto fail; } if (chunk->sctp_hdr->source != bear_cookie->peer_addr.v4.sin_port || ntohs(chunk->sctp_hdr->dest) != bear_cookie->my_port) { *error = -SCTP_IERROR_BAD_PORTS; goto fail; } /* Check to see if the cookie is stale. If there is already * an association, there is no need to check cookie's expiration * for init collision case of lost COOKIE ACK. * If skb has been timestamped, then use the stamp, otherwise * use current time. This introduces a small possibility that * that a cookie may be considered expired, but his would only slow * down the new association establishment instead of every packet. */ if (sock_flag(ep->base.sk, SOCK_TIMESTAMP)) kt = skb_get_ktime(skb); else kt = ktime_get(); if (!asoc && ktime_before(bear_cookie->expiration, kt)) { /* * Section 3.3.10.3 Stale Cookie Error (3) * * Cause of error * --------------- * Stale Cookie Error: Indicates the receipt of a valid State * Cookie that has expired. */ len = ntohs(chunk->chunk_hdr->length); *errp = sctp_make_op_error_space(asoc, chunk, len); if (*errp) { suseconds_t usecs = ktime_to_us(ktime_sub(kt, bear_cookie->expiration)); __be32 n = htonl(usecs); sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE, sizeof(n)); sctp_addto_chunk(*errp, sizeof(n), &n); *error = -SCTP_IERROR_STALE_COOKIE; } else *error = -SCTP_IERROR_NOMEM; goto fail; } /* Make a new base association. */ scope = sctp_scope(sctp_source(chunk)); retval = sctp_association_new(ep, ep->base.sk, scope, gfp); if (!retval) { *error = -SCTP_IERROR_NOMEM; goto fail; } /* Set up our peer's port number. */ retval->peer.port = ntohs(chunk->sctp_hdr->source); /* Populate the association from the cookie. */ memcpy(&retval->c, bear_cookie, sizeof(*bear_cookie)); if (sctp_assoc_set_bind_addr_from_cookie(retval, bear_cookie, GFP_ATOMIC) < 0) { *error = -SCTP_IERROR_NOMEM; goto fail; } /* Also, add the destination address. */ if (list_empty(&retval->base.bind_addr.address_list)) { sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, SCTP_ADDR_SRC, GFP_ATOMIC); } retval->next_tsn = retval->c.initial_tsn; retval->ctsn_ack_point = retval->next_tsn - 1; retval->addip_serial = retval->c.initial_tsn; retval->adv_peer_ack_point = retval->ctsn_ack_point; retval->peer.prsctp_capable = retval->c.prsctp_capable; retval->peer.adaptation_ind = retval->c.adaptation_ind; /* The INIT stuff will be done by the side effects. */ return retval; fail: if (retval) sctp_association_free(retval); return NULL; malformed: /* Yikes! The packet is either corrupt or deliberately * malformed. */ *error = -SCTP_IERROR_MALFORMED; goto fail; } /******************************************************************** * 3rd Level Abstractions ********************************************************************/ struct __sctp_missing { __be32 num_missing; __be16 type; } __packed; /* * Report a missing mandatory parameter. */ static int sctp_process_missing_param(const struct sctp_association *asoc, sctp_param_t paramtype, struct sctp_chunk *chunk, struct sctp_chunk **errp) { struct __sctp_missing report; __u16 len; len = WORD_ROUND(sizeof(report)); /* Make an ERROR chunk, preparing enough room for * returning multiple unknown parameters. */ if (!*errp) *errp = sctp_make_op_error_space(asoc, chunk, len); if (*errp) { report.num_missing = htonl(1); report.type = paramtype; sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM, sizeof(report)); sctp_addto_chunk(*errp, sizeof(report), &report); } /* Stop processing this chunk. */ return 0; } /* Report an Invalid Mandatory Parameter. */ static int sctp_process_inv_mandatory(const struct sctp_association *asoc, struct sctp_chunk *chunk, struct sctp_chunk **errp) { /* Invalid Mandatory Parameter Error has no payload. */ if (!*errp) *errp = sctp_make_op_error_space(asoc, chunk, 0); if (*errp) sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, 0); /* Stop processing this chunk. */ return 0; } static int sctp_process_inv_paramlength(const struct sctp_association *asoc, struct sctp_paramhdr *param, const struct sctp_chunk *chunk, struct sctp_chunk **errp) { /* This is a fatal error. Any accumulated non-fatal errors are * not reported. */ if (*errp) sctp_chunk_free(*errp); /* Create an error chunk and fill it in with our payload. */ *errp = sctp_make_violation_paramlen(asoc, chunk, param); return 0; } /* Do not attempt to handle the HOST_NAME parm. However, do * send back an indicator to the peer. */ static int sctp_process_hn_param(const struct sctp_association *asoc, union sctp_params param, struct sctp_chunk *chunk, struct sctp_chunk **errp) { __u16 len = ntohs(param.p->length); /* Processing of the HOST_NAME parameter will generate an * ABORT. If we've accumulated any non-fatal errors, they * would be unrecognized parameters and we should not include * them in the ABORT. */ if (*errp) sctp_chunk_free(*errp); *errp = sctp_make_op_error_space(asoc, chunk, len); if (*errp) { sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len); sctp_addto_chunk(*errp, len, param.v); } /* Stop processing this chunk. */ return 0; } static int sctp_verify_ext_param(struct net *net, union sctp_params param) { __u16 num_ext = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); int have_auth = 0; int have_asconf = 0; int i; for (i = 0; i < num_ext; i++) { switch (param.ext->chunks[i]) { case SCTP_CID_AUTH: have_auth = 1; break; case SCTP_CID_ASCONF: case SCTP_CID_ASCONF_ACK: have_asconf = 1; break; } } /* ADD-IP Security: The draft requires us to ABORT or ignore the * INIT/INIT-ACK if ADD-IP is listed, but AUTH is not. Do this * only if ADD-IP is turned on and we are not backward-compatible * mode. */ if (net->sctp.addip_noauth) return 1; if (net->sctp.addip_enable && !have_auth && have_asconf) return 0; return 1; } static void sctp_process_ext_param(struct sctp_association *asoc, union sctp_params param) { struct net *net = sock_net(asoc->base.sk); __u16 num_ext = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); int i; for (i = 0; i < num_ext; i++) { switch (param.ext->chunks[i]) { case SCTP_CID_FWD_TSN: if (net->sctp.prsctp_enable && !asoc->peer.prsctp_capable) asoc->peer.prsctp_capable = 1; break; case SCTP_CID_AUTH: /* if the peer reports AUTH, assume that he * supports AUTH. */ if (asoc->ep->auth_enable) asoc->peer.auth_capable = 1; break; case SCTP_CID_ASCONF: case SCTP_CID_ASCONF_ACK: if (net->sctp.addip_enable) asoc->peer.asconf_capable = 1; break; default: break; } } } /* RFC 3.2.1 & the Implementers Guide 2.2. * * The Parameter Types are encoded such that the * highest-order two bits specify the action that must be * taken if the processing endpoint does not recognize the * Parameter Type. * * 00 - Stop processing this parameter; do not process any further * parameters within this chunk * * 01 - Stop processing this parameter, do not process any further * parameters within this chunk, and report the unrecognized * parameter in an 'Unrecognized Parameter' ERROR chunk. * * 10 - Skip this parameter and continue processing. * * 11 - Skip this parameter and continue processing but * report the unrecognized parameter in an * 'Unrecognized Parameter' ERROR chunk. * * Return value: * SCTP_IERROR_NO_ERROR - continue with the chunk * SCTP_IERROR_ERROR - stop and report an error. * SCTP_IERROR_NOMEME - out of memory. */ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc, union sctp_params param, struct sctp_chunk *chunk, struct sctp_chunk **errp) { int retval = SCTP_IERROR_NO_ERROR; switch (param.p->type & SCTP_PARAM_ACTION_MASK) { case SCTP_PARAM_ACTION_DISCARD: retval = SCTP_IERROR_ERROR; break; case SCTP_PARAM_ACTION_SKIP: break; case SCTP_PARAM_ACTION_DISCARD_ERR: retval = SCTP_IERROR_ERROR; /* Fall through */ case SCTP_PARAM_ACTION_SKIP_ERR: /* Make an ERROR chunk, preparing enough room for * returning multiple unknown parameters. */ if (NULL == *errp) *errp = sctp_make_op_error_fixed(asoc, chunk); if (*errp) { if (!sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM, WORD_ROUND(ntohs(param.p->length)))) sctp_addto_chunk_fixed(*errp, WORD_ROUND(ntohs(param.p->length)), param.v); } else { /* If there is no memory for generating the ERROR * report as specified, an ABORT will be triggered * to the peer and the association won't be * established. */ retval = SCTP_IERROR_NOMEM; } break; default: break; } return retval; } /* Verify variable length parameters * Return values: * SCTP_IERROR_ABORT - trigger an ABORT * SCTP_IERROR_NOMEM - out of memory (abort) * SCTP_IERROR_ERROR - stop processing, trigger an ERROR * SCTP_IERROR_NO_ERROR - continue with the chunk */ static sctp_ierror_t sctp_verify_param(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, union sctp_params param, sctp_cid_t cid, struct sctp_chunk *chunk, struct sctp_chunk **err_chunk) { struct sctp_hmac_algo_param *hmacs; int retval = SCTP_IERROR_NO_ERROR; __u16 n_elt, id = 0; int i; /* FIXME - This routine is not looking at each parameter per the * chunk type, i.e., unrecognized parameters should be further * identified based on the chunk id. */ switch (param.p->type) { case SCTP_PARAM_IPV4_ADDRESS: case SCTP_PARAM_IPV6_ADDRESS: case SCTP_PARAM_COOKIE_PRESERVATIVE: case SCTP_PARAM_SUPPORTED_ADDRESS_TYPES: case SCTP_PARAM_STATE_COOKIE: case SCTP_PARAM_HEARTBEAT_INFO: case SCTP_PARAM_UNRECOGNIZED_PARAMETERS: case SCTP_PARAM_ECN_CAPABLE: case SCTP_PARAM_ADAPTATION_LAYER_IND: break; case SCTP_PARAM_SUPPORTED_EXT: if (!sctp_verify_ext_param(net, param)) return SCTP_IERROR_ABORT; break; case SCTP_PARAM_SET_PRIMARY: if (net->sctp.addip_enable) break; goto fallthrough; case SCTP_PARAM_HOST_NAME_ADDRESS: /* Tell the peer, we won't support this param. */ sctp_process_hn_param(asoc, param, chunk, err_chunk); retval = SCTP_IERROR_ABORT; break; case SCTP_PARAM_FWD_TSN_SUPPORT: if (net->sctp.prsctp_enable) break; goto fallthrough; case SCTP_PARAM_RANDOM: if (!ep->auth_enable) goto fallthrough; /* SCTP-AUTH: Secion 6.1 * If the random number is not 32 byte long the association * MUST be aborted. The ABORT chunk SHOULD contain the error * cause 'Protocol Violation'. */ if (SCTP_AUTH_RANDOM_LENGTH != ntohs(param.p->length) - sizeof(sctp_paramhdr_t)) { sctp_process_inv_paramlength(asoc, param.p, chunk, err_chunk); retval = SCTP_IERROR_ABORT; } break; case SCTP_PARAM_CHUNKS: if (!ep->auth_enable) goto fallthrough; /* SCTP-AUTH: Section 3.2 * The CHUNKS parameter MUST be included once in the INIT or * INIT-ACK chunk if the sender wants to receive authenticated * chunks. Its maximum length is 260 bytes. */ if (260 < ntohs(param.p->length)) { sctp_process_inv_paramlength(asoc, param.p, chunk, err_chunk); retval = SCTP_IERROR_ABORT; } break; case SCTP_PARAM_HMAC_ALGO: if (!ep->auth_enable) goto fallthrough; hmacs = (struct sctp_hmac_algo_param *)param.p; n_elt = (ntohs(param.p->length) - sizeof(sctp_paramhdr_t)) >> 1; /* SCTP-AUTH: Section 6.1 * The HMAC algorithm based on SHA-1 MUST be supported and * included in the HMAC-ALGO parameter. */ for (i = 0; i < n_elt; i++) { id = ntohs(hmacs->hmac_ids[i]); if (id == SCTP_AUTH_HMAC_ID_SHA1) break; } if (id != SCTP_AUTH_HMAC_ID_SHA1) { sctp_process_inv_paramlength(asoc, param.p, chunk, err_chunk); retval = SCTP_IERROR_ABORT; } break; fallthrough: default: pr_debug("%s: unrecognized param:%d for chunk:%d\n", __func__, ntohs(param.p->type), cid); retval = sctp_process_unk_param(asoc, param, chunk, err_chunk); break; } return retval; } /* Verify the INIT packet before we process it. */ int sctp_verify_init(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, sctp_cid_t cid, sctp_init_chunk_t *peer_init, struct sctp_chunk *chunk, struct sctp_chunk **errp) { union sctp_params param; bool has_cookie = false; int result; /* Check for missing mandatory parameters. Note: Initial TSN is * also mandatory, but is not checked here since the valid range * is 0..2**32-1. RFC4960, section 3.3.3. */ if (peer_init->init_hdr.num_outbound_streams == 0 || peer_init->init_hdr.num_inbound_streams == 0 || peer_init->init_hdr.init_tag == 0 || ntohl(peer_init->init_hdr.a_rwnd) < SCTP_DEFAULT_MINWINDOW) return sctp_process_inv_mandatory(asoc, chunk, errp); sctp_walk_params(param, peer_init, init_hdr.params) { if (param.p->type == SCTP_PARAM_STATE_COOKIE) has_cookie = true; } /* There is a possibility that a parameter length was bad and * in that case we would have stoped walking the parameters. * The current param.p would point at the bad one. * Current consensus on the mailing list is to generate a PROTOCOL * VIOLATION error. We build the ERROR chunk here and let the normal * error handling code build and send the packet. */ if (param.v != (void *)chunk->chunk_end) return sctp_process_inv_paramlength(asoc, param.p, chunk, errp); /* The only missing mandatory param possible today is * the state cookie for an INIT-ACK chunk. */ if ((SCTP_CID_INIT_ACK == cid) && !has_cookie) return sctp_process_missing_param(asoc, SCTP_PARAM_STATE_COOKIE, chunk, errp); /* Verify all the variable length parameters */ sctp_walk_params(param, peer_init, init_hdr.params) { result = sctp_verify_param(net, ep, asoc, param, cid, chunk, errp); switch (result) { case SCTP_IERROR_ABORT: case SCTP_IERROR_NOMEM: return 0; case SCTP_IERROR_ERROR: return 1; case SCTP_IERROR_NO_ERROR: default: break; } } /* for (loop through all parameters) */ return 1; } /* Unpack the parameters in an INIT packet into an association. * Returns 0 on failure, else success. * FIXME: This is an association method. */ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk, const union sctp_addr *peer_addr, sctp_init_chunk_t *peer_init, gfp_t gfp) { struct net *net = sock_net(asoc->base.sk); union sctp_params param; struct sctp_transport *transport; struct list_head *pos, *temp; struct sctp_af *af; union sctp_addr addr; char *cookie; int src_match = 0; /* We must include the address that the INIT packet came from. * This is the only address that matters for an INIT packet. * When processing a COOKIE ECHO, we retrieve the from address * of the INIT from the cookie. */ /* This implementation defaults to making the first transport * added as the primary transport. The source address seems to * be a a better choice than any of the embedded addresses. */ if (!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE)) goto nomem; if (sctp_cmp_addr_exact(sctp_source(chunk), peer_addr)) src_match = 1; /* Process the initialization parameters. */ sctp_walk_params(param, peer_init, init_hdr.params) { if (!src_match && (param.p->type == SCTP_PARAM_IPV4_ADDRESS || param.p->type == SCTP_PARAM_IPV6_ADDRESS)) { af = sctp_get_af_specific(param_type2af(param.p->type)); af->from_addr_param(&addr, param.addr, chunk->sctp_hdr->source, 0); if (sctp_cmp_addr_exact(sctp_source(chunk), &addr)) src_match = 1; } if (!sctp_process_param(asoc, param, peer_addr, gfp)) goto clean_up; } /* source address of chunk may not match any valid address */ if (!src_match) goto clean_up; /* AUTH: After processing the parameters, make sure that we * have all the required info to potentially do authentications. */ if (asoc->peer.auth_capable && (!asoc->peer.peer_random || !asoc->peer.peer_hmacs)) asoc->peer.auth_capable = 0; /* In a non-backward compatible mode, if the peer claims * support for ADD-IP but not AUTH, the ADD-IP spec states * that we MUST ABORT the association. Section 6. The section * also give us an option to silently ignore the packet, which * is what we'll do here. */ if (!net->sctp.addip_noauth && (asoc->peer.asconf_capable && !asoc->peer.auth_capable)) { asoc->peer.addip_disabled_mask |= (SCTP_PARAM_ADD_IP | SCTP_PARAM_DEL_IP | SCTP_PARAM_SET_PRIMARY); asoc->peer.asconf_capable = 0; goto clean_up; } /* Walk list of transports, removing transports in the UNKNOWN state. */ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { transport = list_entry(pos, struct sctp_transport, transports); if (transport->state == SCTP_UNKNOWN) { sctp_assoc_rm_peer(asoc, transport); } } /* The fixed INIT headers are always in network byte * order. */ asoc->peer.i.init_tag = ntohl(peer_init->init_hdr.init_tag); asoc->peer.i.a_rwnd = ntohl(peer_init->init_hdr.a_rwnd); asoc->peer.i.num_outbound_streams = ntohs(peer_init->init_hdr.num_outbound_streams); asoc->peer.i.num_inbound_streams = ntohs(peer_init->init_hdr.num_inbound_streams); asoc->peer.i.initial_tsn = ntohl(peer_init->init_hdr.initial_tsn); /* Apply the upper bounds for output streams based on peer's * number of inbound streams. */ if (asoc->c.sinit_num_ostreams > ntohs(peer_init->init_hdr.num_inbound_streams)) { asoc->c.sinit_num_ostreams = ntohs(peer_init->init_hdr.num_inbound_streams); } if (asoc->c.sinit_max_instreams > ntohs(peer_init->init_hdr.num_outbound_streams)) { asoc->c.sinit_max_instreams = ntohs(peer_init->init_hdr.num_outbound_streams); } /* Copy Initiation tag from INIT to VT_peer in cookie. */ asoc->c.peer_vtag = asoc->peer.i.init_tag; /* Peer Rwnd : Current calculated value of the peer's rwnd. */ asoc->peer.rwnd = asoc->peer.i.a_rwnd; /* Copy cookie in case we need to resend COOKIE-ECHO. */ cookie = asoc->peer.cookie; if (cookie) { asoc->peer.cookie = kmemdup(cookie, asoc->peer.cookie_len, gfp); if (!asoc->peer.cookie) goto clean_up; } /* RFC 2960 7.2.1 The initial value of ssthresh MAY be arbitrarily * high (for example, implementations MAY use the size of the receiver * advertised window). */ list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { transport->ssthresh = asoc->peer.i.a_rwnd; } /* Set up the TSN tracking pieces. */ if (!sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL, asoc->peer.i.initial_tsn, gfp)) goto clean_up; /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number * * The stream sequence number in all the streams shall start * from 0 when the association is established. Also, when the * stream sequence number reaches the value 65535 the next * stream sequence number shall be set to 0. */ /* Allocate storage for the negotiated streams if it is not a temporary * association. */ if (!asoc->temp) { int error; asoc->ssnmap = sctp_ssnmap_new(asoc->c.sinit_max_instreams, asoc->c.sinit_num_ostreams, gfp); if (!asoc->ssnmap) goto clean_up; error = sctp_assoc_set_id(asoc, gfp); if (error) goto clean_up; } /* ADDIP Section 4.1 ASCONF Chunk Procedures * * When an endpoint has an ASCONF signaled change to be sent to the * remote endpoint it should do the following: * ... * A2) A serial number should be assigned to the Chunk. The serial * number should be a monotonically increasing number. All serial * numbers are defined to be initialized at the start of the * association to the same value as the Initial TSN. */ asoc->peer.addip_serial = asoc->peer.i.initial_tsn - 1; return 1; clean_up: /* Release the transport structures. */ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { transport = list_entry(pos, struct sctp_transport, transports); if (transport->state != SCTP_ACTIVE) sctp_assoc_rm_peer(asoc, transport); } nomem: return 0; } /* Update asoc with the option described in param. * * RFC2960 3.3.2.1 Optional/Variable Length Parameters in INIT * * asoc is the association to update. * param is the variable length parameter to use for update. * cid tells us if this is an INIT, INIT ACK or COOKIE ECHO. * If the current packet is an INIT we want to minimize the amount of * work we do. In particular, we should not build transport * structures for the addresses. */ static int sctp_process_param(struct sctp_association *asoc, union sctp_params param, const union sctp_addr *peer_addr, gfp_t gfp) { struct net *net = sock_net(asoc->base.sk); union sctp_addr addr; int i; __u16 sat; int retval = 1; sctp_scope_t scope; time_t stale; struct sctp_af *af; union sctp_addr_param *addr_param; struct sctp_transport *t; struct sctp_endpoint *ep = asoc->ep; /* We maintain all INIT parameters in network byte order all the * time. This allows us to not worry about whether the parameters * came from a fresh INIT, and INIT ACK, or were stored in a cookie. */ switch (param.p->type) { case SCTP_PARAM_IPV6_ADDRESS: if (PF_INET6 != asoc->base.sk->sk_family) break; goto do_addr_param; case SCTP_PARAM_IPV4_ADDRESS: /* v4 addresses are not allowed on v6-only socket */ if (ipv6_only_sock(asoc->base.sk)) break; do_addr_param: af = sctp_get_af_specific(param_type2af(param.p->type)); af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0); scope = sctp_scope(peer_addr); if (sctp_in_scope(net, &addr, scope)) if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED)) return 0; break; case SCTP_PARAM_COOKIE_PRESERVATIVE: if (!net->sctp.cookie_preserve_enable) break; stale = ntohl(param.life->lifespan_increment); /* Suggested Cookie Life span increment's unit is msec, * (1/1000sec). */ asoc->cookie_life = ktime_add_ms(asoc->cookie_life, stale); break; case SCTP_PARAM_HOST_NAME_ADDRESS: pr_debug("%s: unimplemented SCTP_HOST_NAME_ADDRESS\n", __func__); break; case SCTP_PARAM_SUPPORTED_ADDRESS_TYPES: /* Turn off the default values first so we'll know which * ones are really set by the peer. */ asoc->peer.ipv4_address = 0; asoc->peer.ipv6_address = 0; /* Assume that peer supports the address family * by which it sends a packet. */ if (peer_addr->sa.sa_family == AF_INET6) asoc->peer.ipv6_address = 1; else if (peer_addr->sa.sa_family == AF_INET) asoc->peer.ipv4_address = 1; /* Cycle through address types; avoid divide by 0. */ sat = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); if (sat) sat /= sizeof(__u16); for (i = 0; i < sat; ++i) { switch (param.sat->types[i]) { case SCTP_PARAM_IPV4_ADDRESS: asoc->peer.ipv4_address = 1; break; case SCTP_PARAM_IPV6_ADDRESS: if (PF_INET6 == asoc->base.sk->sk_family) asoc->peer.ipv6_address = 1; break; case SCTP_PARAM_HOST_NAME_ADDRESS: asoc->peer.hostname_address = 1; break; default: /* Just ignore anything else. */ break; } } break; case SCTP_PARAM_STATE_COOKIE: asoc->peer.cookie_len = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); asoc->peer.cookie = param.cookie->body; break; case SCTP_PARAM_HEARTBEAT_INFO: /* Would be odd to receive, but it causes no problems. */ break; case SCTP_PARAM_UNRECOGNIZED_PARAMETERS: /* Rejected during verify stage. */ break; case SCTP_PARAM_ECN_CAPABLE: asoc->peer.ecn_capable = 1; break; case SCTP_PARAM_ADAPTATION_LAYER_IND: asoc->peer.adaptation_ind = ntohl(param.aind->adaptation_ind); break; case SCTP_PARAM_SET_PRIMARY: if (!net->sctp.addip_enable) goto fall_through; addr_param = param.v + sizeof(sctp_addip_param_t); af = sctp_get_af_specific(param_type2af(param.p->type)); af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0); /* if the address is invalid, we can't process it. * XXX: see spec for what to do. */ if (!af->addr_valid(&addr, NULL, NULL)) break; t = sctp_assoc_lookup_paddr(asoc, &addr); if (!t) break; sctp_assoc_set_primary(asoc, t); break; case SCTP_PARAM_SUPPORTED_EXT: sctp_process_ext_param(asoc, param); break; case SCTP_PARAM_FWD_TSN_SUPPORT: if (net->sctp.prsctp_enable) { asoc->peer.prsctp_capable = 1; break; } /* Fall Through */ goto fall_through; case SCTP_PARAM_RANDOM: if (!ep->auth_enable) goto fall_through; /* Save peer's random parameter */ asoc->peer.peer_random = kmemdup(param.p, ntohs(param.p->length), gfp); if (!asoc->peer.peer_random) { retval = 0; break; } break; case SCTP_PARAM_HMAC_ALGO: if (!ep->auth_enable) goto fall_through; /* Save peer's HMAC list */ asoc->peer.peer_hmacs = kmemdup(param.p, ntohs(param.p->length), gfp); if (!asoc->peer.peer_hmacs) { retval = 0; break; } /* Set the default HMAC the peer requested*/ sctp_auth_asoc_set_default_hmac(asoc, param.hmac_algo); break; case SCTP_PARAM_CHUNKS: if (!ep->auth_enable) goto fall_through; asoc->peer.peer_chunks = kmemdup(param.p, ntohs(param.p->length), gfp); if (!asoc->peer.peer_chunks) retval = 0; break; fall_through: default: /* Any unrecognized parameters should have been caught * and handled by sctp_verify_param() which should be * called prior to this routine. Simply log the error * here. */ pr_debug("%s: ignoring param:%d for association:%p.\n", __func__, ntohs(param.p->type), asoc); break; } return retval; } /* Select a new verification tag. */ __u32 sctp_generate_tag(const struct sctp_endpoint *ep) { /* I believe that this random number generator complies with RFC1750. * A tag of 0 is reserved for special cases (e.g. INIT). */ __u32 x; do { get_random_bytes(&x, sizeof(__u32)); } while (x == 0); return x; } /* Select an initial TSN to send during startup. */ __u32 sctp_generate_tsn(const struct sctp_endpoint *ep) { __u32 retval; get_random_bytes(&retval, sizeof(__u32)); return retval; } /* * ADDIP 3.1.1 Address Configuration Change Chunk (ASCONF) * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 0xC1 | Chunk Flags | Chunk Length | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Serial Number | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Address Parameter | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF Parameter #1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * \ \ * / .... / * \ \ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF Parameter #N | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Address Parameter and other parameter will not be wrapped in this function */ static struct sctp_chunk *sctp_make_asconf(struct sctp_association *asoc, union sctp_addr *addr, int vparam_len) { sctp_addiphdr_t asconf; struct sctp_chunk *retval; int length = sizeof(asconf) + vparam_len; union sctp_addr_param addrparam; int addrlen; struct sctp_af *af = sctp_get_af_specific(addr->v4.sin_family); addrlen = af->to_addr_param(addr, &addrparam); if (!addrlen) return NULL; length += addrlen; /* Create the chunk. */ retval = sctp_make_control(asoc, SCTP_CID_ASCONF, 0, length); if (!retval) return NULL; asconf.serial = htonl(asoc->addip_serial++); retval->subh.addip_hdr = sctp_addto_chunk(retval, sizeof(asconf), &asconf); retval->param_hdr.v = sctp_addto_chunk(retval, addrlen, &addrparam); return retval; } /* ADDIP * 3.2.1 Add IP Address * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 0xC001 | Length = Variable | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF-Request Correlation ID | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Address Parameter | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 3.2.2 Delete IP Address * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 0xC002 | Length = Variable | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF-Request Correlation ID | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Address Parameter | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * */ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc, union sctp_addr *laddr, struct sockaddr *addrs, int addrcnt, __be16 flags) { sctp_addip_param_t param; struct sctp_chunk *retval; union sctp_addr_param addr_param; union sctp_addr *addr; void *addr_buf; struct sctp_af *af; int paramlen = sizeof(param); int addr_param_len = 0; int totallen = 0; int i; int del_pickup = 0; /* Get total length of all the address parameters. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { addr = addr_buf; af = sctp_get_af_specific(addr->v4.sin_family); addr_param_len = af->to_addr_param(addr, &addr_param); totallen += paramlen; totallen += addr_param_len; addr_buf += af->sockaddr_len; if (asoc->asconf_addr_del_pending && !del_pickup) { /* reuse the parameter length from the same scope one */ totallen += paramlen; totallen += addr_param_len; del_pickup = 1; pr_debug("%s: picked same-scope del_pending addr, " "totallen for all addresses is %d\n", __func__, totallen); } } /* Create an asconf chunk with the required length. */ retval = sctp_make_asconf(asoc, laddr, totallen); if (!retval) return NULL; /* Add the address parameters to the asconf chunk. */ addr_buf = addrs; for (i = 0; i < addrcnt; i++) { addr = addr_buf; af = sctp_get_af_specific(addr->v4.sin_family); addr_param_len = af->to_addr_param(addr, &addr_param); param.param_hdr.type = flags; param.param_hdr.length = htons(paramlen + addr_param_len); param.crr_id = i; sctp_addto_chunk(retval, paramlen, &param); sctp_addto_chunk(retval, addr_param_len, &addr_param); addr_buf += af->sockaddr_len; } if (flags == SCTP_PARAM_ADD_IP && del_pickup) { addr = asoc->asconf_addr_del_pending; af = sctp_get_af_specific(addr->v4.sin_family); addr_param_len = af->to_addr_param(addr, &addr_param); param.param_hdr.type = SCTP_PARAM_DEL_IP; param.param_hdr.length = htons(paramlen + addr_param_len); param.crr_id = i; sctp_addto_chunk(retval, paramlen, &param); sctp_addto_chunk(retval, addr_param_len, &addr_param); } return retval; } /* ADDIP * 3.2.4 Set Primary IP Address * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type =0xC004 | Length = Variable | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF-Request Correlation ID | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Address Parameter | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Create an ASCONF chunk with Set Primary IP address parameter. */ struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc, union sctp_addr *addr) { sctp_addip_param_t param; struct sctp_chunk *retval; int len = sizeof(param); union sctp_addr_param addrparam; int addrlen; struct sctp_af *af = sctp_get_af_specific(addr->v4.sin_family); addrlen = af->to_addr_param(addr, &addrparam); if (!addrlen) return NULL; len += addrlen; /* Create the chunk and make asconf header. */ retval = sctp_make_asconf(asoc, addr, len); if (!retval) return NULL; param.param_hdr.type = SCTP_PARAM_SET_PRIMARY; param.param_hdr.length = htons(len); param.crr_id = 0; sctp_addto_chunk(retval, sizeof(param), &param); sctp_addto_chunk(retval, addrlen, &addrparam); return retval; } /* ADDIP 3.1.2 Address Configuration Acknowledgement Chunk (ASCONF-ACK) * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Type = 0x80 | Chunk Flags | Chunk Length | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Serial Number | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF Parameter Response#1 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * \ \ * / .... / * \ \ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | ASCONF Parameter Response#N | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Create an ASCONF_ACK chunk with enough space for the parameter responses. */ static struct sctp_chunk *sctp_make_asconf_ack(const struct sctp_association *asoc, __u32 serial, int vparam_len) { sctp_addiphdr_t asconf; struct sctp_chunk *retval; int length = sizeof(asconf) + vparam_len; /* Create the chunk. */ retval = sctp_make_control(asoc, SCTP_CID_ASCONF_ACK, 0, length); if (!retval) return NULL; asconf.serial = htonl(serial); retval->subh.addip_hdr = sctp_addto_chunk(retval, sizeof(asconf), &asconf); return retval; } /* Add response parameters to an ASCONF_ACK chunk. */ static void sctp_add_asconf_response(struct sctp_chunk *chunk, __be32 crr_id, __be16 err_code, sctp_addip_param_t *asconf_param) { sctp_addip_param_t ack_param; sctp_errhdr_t err_param; int asconf_param_len = 0; int err_param_len = 0; __be16 response_type; if (SCTP_ERROR_NO_ERROR == err_code) { response_type = SCTP_PARAM_SUCCESS_REPORT; } else { response_type = SCTP_PARAM_ERR_CAUSE; err_param_len = sizeof(err_param); if (asconf_param) asconf_param_len = ntohs(asconf_param->param_hdr.length); } /* Add Success Indication or Error Cause Indication parameter. */ ack_param.param_hdr.type = response_type; ack_param.param_hdr.length = htons(sizeof(ack_param) + err_param_len + asconf_param_len); ack_param.crr_id = crr_id; sctp_addto_chunk(chunk, sizeof(ack_param), &ack_param); if (SCTP_ERROR_NO_ERROR == err_code) return; /* Add Error Cause parameter. */ err_param.cause = err_code; err_param.length = htons(err_param_len + asconf_param_len); sctp_addto_chunk(chunk, err_param_len, &err_param); /* Add the failed TLV copied from ASCONF chunk. */ if (asconf_param) sctp_addto_chunk(chunk, asconf_param_len, asconf_param); } /* Process a asconf parameter. */ static __be16 sctp_process_asconf_param(struct sctp_association *asoc, struct sctp_chunk *asconf, sctp_addip_param_t *asconf_param) { struct sctp_transport *peer; struct sctp_af *af; union sctp_addr addr; union sctp_addr_param *addr_param; addr_param = (void *)asconf_param + sizeof(sctp_addip_param_t); if (asconf_param->param_hdr.type != SCTP_PARAM_ADD_IP && asconf_param->param_hdr.type != SCTP_PARAM_DEL_IP && asconf_param->param_hdr.type != SCTP_PARAM_SET_PRIMARY) return SCTP_ERROR_UNKNOWN_PARAM; switch (addr_param->p.type) { case SCTP_PARAM_IPV6_ADDRESS: if (!asoc->peer.ipv6_address) return SCTP_ERROR_DNS_FAILED; break; case SCTP_PARAM_IPV4_ADDRESS: if (!asoc->peer.ipv4_address) return SCTP_ERROR_DNS_FAILED; break; default: return SCTP_ERROR_DNS_FAILED; } af = sctp_get_af_specific(param_type2af(addr_param->p.type)); if (unlikely(!af)) return SCTP_ERROR_DNS_FAILED; af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0); /* ADDIP 4.2.1 This parameter MUST NOT contain a broadcast * or multicast address. * (note: wildcard is permitted and requires special handling so * make sure we check for that) */ if (!af->is_any(&addr) && !af->addr_valid(&addr, NULL, asconf->skb)) return SCTP_ERROR_DNS_FAILED; switch (asconf_param->param_hdr.type) { case SCTP_PARAM_ADD_IP: /* Section 4.2.1: * If the address 0.0.0.0 or ::0 is provided, the source * address of the packet MUST be added. */ if (af->is_any(&addr)) memcpy(&addr, &asconf->source, sizeof(addr)); /* ADDIP 4.3 D9) If an endpoint receives an ADD IP address * request and does not have the local resources to add this * new address to the association, it MUST return an Error * Cause TLV set to the new error code 'Operation Refused * Due to Resource Shortage'. */ peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_UNCONFIRMED); if (!peer) return SCTP_ERROR_RSRC_LOW; /* Start the heartbeat timer. */ if (!mod_timer(&peer->hb_timer, sctp_transport_timeout(peer))) sctp_transport_hold(peer); asoc->new_transport = peer; break; case SCTP_PARAM_DEL_IP: /* ADDIP 4.3 D7) If a request is received to delete the * last remaining IP address of a peer endpoint, the receiver * MUST send an Error Cause TLV with the error cause set to the * new error code 'Request to Delete Last Remaining IP Address'. */ if (asoc->peer.transport_count == 1) return SCTP_ERROR_DEL_LAST_IP; /* ADDIP 4.3 D8) If a request is received to delete an IP * address which is also the source address of the IP packet * which contained the ASCONF chunk, the receiver MUST reject * this request. To reject the request the receiver MUST send * an Error Cause TLV set to the new error code 'Request to * Delete Source IP Address' */ if (sctp_cmp_addr_exact(&asconf->source, &addr)) return SCTP_ERROR_DEL_SRC_IP; /* Section 4.2.2 * If the address 0.0.0.0 or ::0 is provided, all * addresses of the peer except the source address of the * packet MUST be deleted. */ if (af->is_any(&addr)) { sctp_assoc_set_primary(asoc, asconf->transport); sctp_assoc_del_nonprimary_peers(asoc, asconf->transport); } else sctp_assoc_del_peer(asoc, &addr); break; case SCTP_PARAM_SET_PRIMARY: /* ADDIP Section 4.2.4 * If the address 0.0.0.0 or ::0 is provided, the receiver * MAY mark the source address of the packet as its * primary. */ if (af->is_any(&addr)) memcpy(&addr.v4, sctp_source(asconf), sizeof(addr)); peer = sctp_assoc_lookup_paddr(asoc, &addr); if (!peer) return SCTP_ERROR_DNS_FAILED; sctp_assoc_set_primary(asoc, peer); break; } return SCTP_ERROR_NO_ERROR; } /* Verify the ASCONF packet before we process it. */ bool sctp_verify_asconf(const struct sctp_association *asoc, struct sctp_chunk *chunk, bool addr_param_needed, struct sctp_paramhdr **errp) { sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) chunk->chunk_hdr; union sctp_params param; bool addr_param_seen = false; sctp_walk_params(param, addip, addip_hdr.params) { size_t length = ntohs(param.p->length); *errp = param.p; switch (param.p->type) { case SCTP_PARAM_ERR_CAUSE: break; case SCTP_PARAM_IPV4_ADDRESS: if (length != sizeof(sctp_ipv4addr_param_t)) return false; addr_param_seen = true; break; case SCTP_PARAM_IPV6_ADDRESS: if (length != sizeof(sctp_ipv6addr_param_t)) return false; addr_param_seen = true; break; case SCTP_PARAM_ADD_IP: case SCTP_PARAM_DEL_IP: case SCTP_PARAM_SET_PRIMARY: /* In ASCONF chunks, these need to be first. */ if (addr_param_needed && !addr_param_seen) return false; length = ntohs(param.addip->param_hdr.length); if (length < sizeof(sctp_addip_param_t) + sizeof(sctp_paramhdr_t)) return false; break; case SCTP_PARAM_SUCCESS_REPORT: case SCTP_PARAM_ADAPTATION_LAYER_IND: if (length != sizeof(sctp_addip_param_t)) return false; break; default: /* This is unkown to us, reject! */ return false; } } /* Remaining sanity checks. */ if (addr_param_needed && !addr_param_seen) return false; if (!addr_param_needed && addr_param_seen) return false; if (param.v != chunk->chunk_end) return false; return true; } /* Process an incoming ASCONF chunk with the next expected serial no. and * return an ASCONF_ACK chunk to be sent in response. */ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, struct sctp_chunk *asconf) { sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) asconf->chunk_hdr; bool all_param_pass = true; union sctp_params param; sctp_addiphdr_t *hdr; union sctp_addr_param *addr_param; sctp_addip_param_t *asconf_param; struct sctp_chunk *asconf_ack; __be16 err_code; int length = 0; int chunk_len; __u32 serial; chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t); hdr = (sctp_addiphdr_t *)asconf->skb->data; serial = ntohl(hdr->serial); /* Skip the addiphdr and store a pointer to address parameter. */ length = sizeof(sctp_addiphdr_t); addr_param = (union sctp_addr_param *)(asconf->skb->data + length); chunk_len -= length; /* Skip the address parameter and store a pointer to the first * asconf parameter. */ length = ntohs(addr_param->p.length); asconf_param = (void *)addr_param + length; chunk_len -= length; /* create an ASCONF_ACK chunk. * Based on the definitions of parameters, we know that the size of * ASCONF_ACK parameters are less than or equal to the fourfold of ASCONF * parameters. */ asconf_ack = sctp_make_asconf_ack(asoc, serial, chunk_len * 4); if (!asconf_ack) goto done; /* Process the TLVs contained within the ASCONF chunk. */ sctp_walk_params(param, addip, addip_hdr.params) { /* Skip preceeding address parameters. */ if (param.p->type == SCTP_PARAM_IPV4_ADDRESS || param.p->type == SCTP_PARAM_IPV6_ADDRESS) continue; err_code = sctp_process_asconf_param(asoc, asconf, param.addip); /* ADDIP 4.1 A7) * If an error response is received for a TLV parameter, * all TLVs with no response before the failed TLV are * considered successful if not reported. All TLVs after * the failed response are considered unsuccessful unless * a specific success indication is present for the parameter. */ if (err_code != SCTP_ERROR_NO_ERROR) all_param_pass = false; if (!all_param_pass) sctp_add_asconf_response(asconf_ack, param.addip->crr_id, err_code, param.addip); /* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add * an IP address sends an 'Out of Resource' in its response, it * MUST also fail any subsequent add or delete requests bundled * in the ASCONF. */ if (err_code == SCTP_ERROR_RSRC_LOW) goto done; } done: asoc->peer.addip_serial++; /* If we are sending a new ASCONF_ACK hold a reference to it in assoc * after freeing the reference to old asconf ack if any. */ if (asconf_ack) { sctp_chunk_hold(asconf_ack); list_add_tail(&asconf_ack->transmitted_list, &asoc->asconf_ack_list); } return asconf_ack; } /* Process a asconf parameter that is successfully acked. */ static void sctp_asconf_param_success(struct sctp_association *asoc, sctp_addip_param_t *asconf_param) { struct sctp_af *af; union sctp_addr addr; struct sctp_bind_addr *bp = &asoc->base.bind_addr; union sctp_addr_param *addr_param; struct sctp_transport *transport; struct sctp_sockaddr_entry *saddr; addr_param = (void *)asconf_param + sizeof(sctp_addip_param_t); /* We have checked the packet before, so we do not check again. */ af = sctp_get_af_specific(param_type2af(addr_param->p.type)); af->from_addr_param(&addr, addr_param, htons(bp->port), 0); switch (asconf_param->param_hdr.type) { case SCTP_PARAM_ADD_IP: /* This is always done in BH context with a socket lock * held, so the list can not change. */ local_bh_disable(); list_for_each_entry(saddr, &bp->address_list, list) { if (sctp_cmp_addr_exact(&saddr->a, &addr)) saddr->state = SCTP_ADDR_SRC; } local_bh_enable(); list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { dst_release(transport->dst); transport->dst = NULL; } break; case SCTP_PARAM_DEL_IP: local_bh_disable(); sctp_del_bind_addr(bp, &addr); if (asoc->asconf_addr_del_pending != NULL && sctp_cmp_addr_exact(asoc->asconf_addr_del_pending, &addr)) { kfree(asoc->asconf_addr_del_pending); asoc->asconf_addr_del_pending = NULL; } local_bh_enable(); list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { dst_release(transport->dst); transport->dst = NULL; } break; default: break; } } /* Get the corresponding ASCONF response error code from the ASCONF_ACK chunk * for the given asconf parameter. If there is no response for this parameter, * return the error code based on the third argument 'no_err'. * ADDIP 4.1 * A7) If an error response is received for a TLV parameter, all TLVs with no * response before the failed TLV are considered successful if not reported. * All TLVs after the failed response are considered unsuccessful unless a * specific success indication is present for the parameter. */ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack, sctp_addip_param_t *asconf_param, int no_err) { sctp_addip_param_t *asconf_ack_param; sctp_errhdr_t *err_param; int length; int asconf_ack_len; __be16 err_code; if (no_err) err_code = SCTP_ERROR_NO_ERROR; else err_code = SCTP_ERROR_REQ_REFUSED; asconf_ack_len = ntohs(asconf_ack->chunk_hdr->length) - sizeof(sctp_chunkhdr_t); /* Skip the addiphdr from the asconf_ack chunk and store a pointer to * the first asconf_ack parameter. */ length = sizeof(sctp_addiphdr_t); asconf_ack_param = (sctp_addip_param_t *)(asconf_ack->skb->data + length); asconf_ack_len -= length; while (asconf_ack_len > 0) { if (asconf_ack_param->crr_id == asconf_param->crr_id) { switch (asconf_ack_param->param_hdr.type) { case SCTP_PARAM_SUCCESS_REPORT: return SCTP_ERROR_NO_ERROR; case SCTP_PARAM_ERR_CAUSE: length = sizeof(sctp_addip_param_t); err_param = (void *)asconf_ack_param + length; asconf_ack_len -= length; if (asconf_ack_len > 0) return err_param->cause; else return SCTP_ERROR_INV_PARAM; break; default: return SCTP_ERROR_INV_PARAM; } } length = ntohs(asconf_ack_param->param_hdr.length); asconf_ack_param = (void *)asconf_ack_param + length; asconf_ack_len -= length; } return err_code; } /* Process an incoming ASCONF_ACK chunk against the cached last ASCONF chunk. */ int sctp_process_asconf_ack(struct sctp_association *asoc, struct sctp_chunk *asconf_ack) { struct sctp_chunk *asconf = asoc->addip_last_asconf; union sctp_addr_param *addr_param; sctp_addip_param_t *asconf_param; int length = 0; int asconf_len = asconf->skb->len; int all_param_pass = 0; int no_err = 1; int retval = 0; __be16 err_code = SCTP_ERROR_NO_ERROR; /* Skip the chunkhdr and addiphdr from the last asconf sent and store * a pointer to address parameter. */ length = sizeof(sctp_addip_chunk_t); addr_param = (union sctp_addr_param *)(asconf->skb->data + length); asconf_len -= length; /* Skip the address parameter in the last asconf sent and store a * pointer to the first asconf parameter. */ length = ntohs(addr_param->p.length); asconf_param = (void *)addr_param + length; asconf_len -= length; /* ADDIP 4.1 * A8) If there is no response(s) to specific TLV parameter(s), and no * failures are indicated, then all request(s) are considered * successful. */ if (asconf_ack->skb->len == sizeof(sctp_addiphdr_t)) all_param_pass = 1; /* Process the TLVs contained in the last sent ASCONF chunk. */ while (asconf_len > 0) { if (all_param_pass) err_code = SCTP_ERROR_NO_ERROR; else { err_code = sctp_get_asconf_response(asconf_ack, asconf_param, no_err); if (no_err && (SCTP_ERROR_NO_ERROR != err_code)) no_err = 0; } switch (err_code) { case SCTP_ERROR_NO_ERROR: sctp_asconf_param_success(asoc, asconf_param); break; case SCTP_ERROR_RSRC_LOW: retval = 1; break; case SCTP_ERROR_UNKNOWN_PARAM: /* Disable sending this type of asconf parameter in * future. */ asoc->peer.addip_disabled_mask |= asconf_param->param_hdr.type; break; case SCTP_ERROR_REQ_REFUSED: case SCTP_ERROR_DEL_LAST_IP: case SCTP_ERROR_DEL_SRC_IP: default: break; } /* Skip the processed asconf parameter and move to the next * one. */ length = ntohs(asconf_param->param_hdr.length); asconf_param = (void *)asconf_param + length; asconf_len -= length; } if (no_err && asoc->src_out_of_asoc_ok) { asoc->src_out_of_asoc_ok = 0; sctp_transport_immediate_rtx(asoc->peer.primary_path); } /* Free the cached last sent asconf chunk. */ list_del_init(&asconf->transmitted_list); sctp_chunk_free(asconf); asoc->addip_last_asconf = NULL; return retval; } /* Make a FWD TSN chunk. */ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc, __u32 new_cum_tsn, size_t nstreams, struct sctp_fwdtsn_skip *skiplist) { struct sctp_chunk *retval = NULL; struct sctp_fwdtsn_hdr ftsn_hdr; struct sctp_fwdtsn_skip skip; size_t hint; int i; hint = (nstreams + 1) * sizeof(__u32); retval = sctp_make_control(asoc, SCTP_CID_FWD_TSN, 0, hint); if (!retval) return NULL; ftsn_hdr.new_cum_tsn = htonl(new_cum_tsn); retval->subh.fwdtsn_hdr = sctp_addto_chunk(retval, sizeof(ftsn_hdr), &ftsn_hdr); for (i = 0; i < nstreams; i++) { skip.stream = skiplist[i].stream; skip.ssn = skiplist[i].ssn; sctp_addto_chunk(retval, sizeof(skip), &skip); } return retval; }
int sctp_verify_asconf(const struct sctp_association *asoc, struct sctp_paramhdr *param_hdr, void *chunk_end, struct sctp_paramhdr **errp) { sctp_addip_param_t *asconf_param; union sctp_params param; int length, plen; param.v = (sctp_paramhdr_t *) param_hdr; while (param.v <= chunk_end - sizeof(sctp_paramhdr_t)) { length = ntohs(param.p->length); *errp = param.p; if (param.v > chunk_end - length || length < sizeof(sctp_paramhdr_t)) return 0; switch (param.p->type) { case SCTP_PARAM_ADD_IP: case SCTP_PARAM_DEL_IP: case SCTP_PARAM_SET_PRIMARY: asconf_param = (sctp_addip_param_t *)param.v; plen = ntohs(asconf_param->param_hdr.length); if (plen < sizeof(sctp_addip_param_t) + sizeof(sctp_paramhdr_t)) return 0; break; case SCTP_PARAM_SUCCESS_REPORT: case SCTP_PARAM_ADAPTATION_LAYER_IND: if (length != sizeof(sctp_addip_param_t)) return 0; break; default: break; } param.v += WORD_ROUND(length); } if (param.v != chunk_end) return 0; return 1; }
bool sctp_verify_asconf(const struct sctp_association *asoc, struct sctp_chunk *chunk, bool addr_param_needed, struct sctp_paramhdr **errp) { sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) chunk->chunk_hdr; union sctp_params param; bool addr_param_seen = false; sctp_walk_params(param, addip, addip_hdr.params) { size_t length = ntohs(param.p->length); *errp = param.p; switch (param.p->type) { case SCTP_PARAM_ERR_CAUSE: break; case SCTP_PARAM_IPV4_ADDRESS: if (length != sizeof(sctp_ipv4addr_param_t)) return false; addr_param_seen = true; break; case SCTP_PARAM_IPV6_ADDRESS: if (length != sizeof(sctp_ipv6addr_param_t)) return false; addr_param_seen = true; break; case SCTP_PARAM_ADD_IP: case SCTP_PARAM_DEL_IP: case SCTP_PARAM_SET_PRIMARY: /* In ASCONF chunks, these need to be first. */ if (addr_param_needed && !addr_param_seen) return false; length = ntohs(param.addip->param_hdr.length); if (length < sizeof(sctp_addip_param_t) + sizeof(sctp_paramhdr_t)) return false; break; case SCTP_PARAM_SUCCESS_REPORT: case SCTP_PARAM_ADAPTATION_LAYER_IND: if (length != sizeof(sctp_addip_param_t)) return false; break; default: /* This is unkown to us, reject! */ return false; } } /* Remaining sanity checks. */ if (addr_param_needed && !addr_param_seen) return false; if (!addr_param_needed && addr_param_seen) return false; if (param.v != chunk->chunk_end) return false; return true; }
{'added': [(3113, '/* Verify the ASCONF packet before we process it. */'), (3114, 'bool sctp_verify_asconf(const struct sctp_association *asoc,'), (3115, '\t\t\tstruct sctp_chunk *chunk, bool addr_param_needed,'), (3116, '\t\t\tstruct sctp_paramhdr **errp)'), (3117, '{'), (3118, '\tsctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) chunk->chunk_hdr;'), (3120, '\tbool addr_param_seen = false;'), (3122, '\tsctp_walk_params(param, addip, addip_hdr.params) {'), (3123, '\t\tsize_t length = ntohs(param.p->length);'), (3125, '\t\t*errp = param.p;'), (3127, '\t\tcase SCTP_PARAM_ERR_CAUSE:'), (3128, '\t\t\tbreak;'), (3129, '\t\tcase SCTP_PARAM_IPV4_ADDRESS:'), (3130, '\t\t\tif (length != sizeof(sctp_ipv4addr_param_t))'), (3131, '\t\t\t\treturn false;'), (3132, '\t\t\taddr_param_seen = true;'), (3133, '\t\t\tbreak;'), (3134, '\t\tcase SCTP_PARAM_IPV6_ADDRESS:'), (3135, '\t\t\tif (length != sizeof(sctp_ipv6addr_param_t))'), (3136, '\t\t\t\treturn false;'), (3137, '\t\t\taddr_param_seen = true;'), (3138, '\t\t\tbreak;'), (3142, '\t\t\t/* In ASCONF chunks, these need to be first. */'), (3143, '\t\t\tif (addr_param_needed && !addr_param_seen)'), (3144, '\t\t\t\treturn false;'), (3145, '\t\t\tlength = ntohs(param.addip->param_hdr.length);'), (3146, '\t\t\tif (length < sizeof(sctp_addip_param_t) +'), (3147, '\t\t\t\t sizeof(sctp_paramhdr_t))'), (3148, '\t\t\t\treturn false;'), (3153, '\t\t\t\treturn false;'), (3156, '\t\t\t/* This is unkown to us, reject! */'), (3157, '\t\t\treturn false;'), (3161, '\t/* Remaining sanity checks. */'), (3162, '\tif (addr_param_needed && !addr_param_seen)'), (3163, '\t\treturn false;'), (3164, '\tif (!addr_param_needed && addr_param_seen)'), (3165, '\t\treturn false;'), (3166, '\tif (param.v != chunk->chunk_end)'), (3167, '\t\treturn false;'), (3169, '\treturn true;'), (3178, '\tsctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) asconf->chunk_hdr;'), (3179, '\tbool all_param_pass = true;'), (3180, '\tunion sctp_params param;'), (3216, '\tsctp_walk_params(param, addip, addip_hdr.params) {'), (3217, '\t\t/* Skip preceeding address parameters. */'), (3218, '\t\tif (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||'), (3219, '\t\t param.p->type == SCTP_PARAM_IPV6_ADDRESS)'), (3220, '\t\t\tcontinue;'), (3221, ''), (3223, '\t\t\t\t\t\t param.addip);'), (3231, '\t\tif (err_code != SCTP_ERROR_NO_ERROR)'), (3232, '\t\t\tall_param_pass = false;'), (3234, '\t\t\tsctp_add_asconf_response(asconf_ack, param.addip->crr_id,'), (3235, '\t\t\t\t\t\t err_code, param.addip);'), (3242, '\t\tif (err_code == SCTP_ERROR_RSRC_LOW)')], 'deleted': [(3113, '/* Verify the ASCONF packet before we process it. */'), (3114, 'int sctp_verify_asconf(const struct sctp_association *asoc,'), (3115, '\t\t struct sctp_paramhdr *param_hdr, void *chunk_end,'), (3116, '\t\t struct sctp_paramhdr **errp) {'), (3117, '\tsctp_addip_param_t *asconf_param;'), (3119, '\tint length, plen;'), (3120, ''), (3121, '\tparam.v = (sctp_paramhdr_t *) param_hdr;'), (3122, '\twhile (param.v <= chunk_end - sizeof(sctp_paramhdr_t)) {'), (3123, '\t\tlength = ntohs(param.p->length);'), (3124, '\t\t*errp = param.p;'), (3126, '\t\tif (param.v > chunk_end - length ||'), (3127, '\t\t length < sizeof(sctp_paramhdr_t))'), (3128, '\t\t\treturn 0;'), (3134, '\t\t\tasconf_param = (sctp_addip_param_t *)param.v;'), (3135, '\t\t\tplen = ntohs(asconf_param->param_hdr.length);'), (3136, '\t\t\tif (plen < sizeof(sctp_addip_param_t) +'), (3137, '\t\t\t sizeof(sctp_paramhdr_t))'), (3138, '\t\t\t\treturn 0;'), (3143, '\t\t\t\treturn 0;'), (3144, ''), (3147, '\t\t\tbreak;'), (3149, ''), (3150, '\t\tparam.v += WORD_ROUND(length);'), (3153, '\tif (param.v != chunk_end)'), (3154, '\t\treturn 0;'), (3156, '\treturn 1;'), (3169, ''), (3174, '\tint\tall_param_pass = 1;'), (3202, '\twhile (chunk_len > 0) {'), (3204, '\t\t\t\t\t\t asconf_param);'), (3212, '\t\tif (SCTP_ERROR_NO_ERROR != err_code)'), (3213, '\t\t\tall_param_pass = 0;'), (3214, ''), (3216, '\t\t\tsctp_add_asconf_response(asconf_ack,'), (3217, '\t\t\t\t\t\t asconf_param->crr_id, err_code,'), (3218, '\t\t\t\t\t\t asconf_param);'), (3225, '\t\tif (SCTP_ERROR_RSRC_LOW == err_code)'), (3227, ''), (3228, '\t\t/* Move to the next ASCONF param. */'), (3229, '\t\tlength = ntohs(asconf_param->param_hdr.length);'), (3230, '\t\tasconf_param = (void *)asconf_param + length;'), (3231, '\t\tchunk_len -= length;'), (3233, '')]}
55
44
2,055
12,748
37
206
12
https://github.com/torvalds/linux
CVE-2014-3673
CWE-20
637
tiffcp.c
C
DECLAREreadFunc
/* $Id$ */ /* * Copyright (c) 1988-1997 Sam Leffler * Copyright (c) 1991-1997 Silicon Graphics, Inc. * * Revised: 2/18/01 BAR -- added syntax for extracting single images from * multi-image TIFF files. * * New syntax is: sourceFileName,image# * * image# ranges from 0..<n-1> where n is the # of images in the file. * There may be no white space between the comma and the filename or * image number. * * Example: tiffcp source.tif,1 destination.tif * * Copies the 2nd image in source.tif to the destination. * ***** * Permission to use, copy, modify, distribute, and sell this software and * its documentation for any purpose is hereby granted without fee, provided * that (i) the above copyright notices and this permission notice appear in * all copies of the software and related documentation, and (ii) the names of * Sam Leffler and Silicon Graphics may not be used in any advertising or * publicity relating to the software without the specific, prior written * permission of Sam Leffler and Silicon Graphics. * * THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND, * EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. * * IN NO EVENT SHALL SAM LEFFLER OR SILICON GRAPHICS BE LIABLE FOR * ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, * OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF * LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ #include "tif_config.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #include <assert.h> #ifdef HAVE_UNISTD_H # include <unistd.h> #endif #include "tiffio.h" #ifndef HAVE_GETOPT extern int getopt(int, char**, char*); #endif #if defined(VMS) # define unlink delete #endif #define streq(a,b) (strcmp(a,b) == 0) #define strneq(a,b,n) (strncmp(a,b,n) == 0) #define TRUE 1 #define FALSE 0 static int outtiled = -1; static uint32 tilewidth; static uint32 tilelength; static uint16 config; static uint16 compression; static uint16 predictor; static int preset; static uint16 fillorder; static uint16 orientation; static uint32 rowsperstrip; static uint32 g3opts; static int ignore = FALSE; /* if true, ignore read errors */ static uint32 defg3opts = (uint32) -1; static int quality = 75; /* JPEG quality */ static int jpegcolormode = JPEGCOLORMODE_RGB; static uint16 defcompression = (uint16) -1; static uint16 defpredictor = (uint16) -1; static int defpreset = -1; static int tiffcp(TIFF*, TIFF*); static int processCompressOptions(char*); static void usage(void); static char comma = ','; /* (default) comma separator character */ static TIFF* bias = NULL; static int pageNum = 0; static int pageInSeq = 0; static int nextSrcImage (TIFF *tif, char **imageSpec) /* seek to the next image specified in *imageSpec returns 1 if success, 0 if no more images to process *imageSpec=NULL if subsequent images should be processed in sequence */ { if (**imageSpec == comma) { /* if not @comma, we've done all images */ char *start = *imageSpec + 1; tdir_t nextImage = (tdir_t)strtol(start, imageSpec, 0); if (start == *imageSpec) nextImage = TIFFCurrentDirectory (tif); if (**imageSpec) { if (**imageSpec == comma) { /* a trailing comma denotes remaining images in sequence */ if ((*imageSpec)[1] == '\0') *imageSpec = NULL; }else{ fprintf (stderr, "Expected a %c separated image # list after %s\n", comma, TIFFFileName (tif)); exit (-4); /* syntax error */ } } if (TIFFSetDirectory (tif, nextImage)) return 1; fprintf (stderr, "%s%c%d not found!\n", TIFFFileName(tif), comma, (int) nextImage); } return 0; } static TIFF* openSrcImage (char **imageSpec) /* imageSpec points to a pointer to a filename followed by optional ,image#'s Open the TIFF file and assign *imageSpec to either NULL if there are no images specified, or a pointer to the next image number text */ { TIFF *tif; char *fn = *imageSpec; *imageSpec = strchr (fn, comma); if (*imageSpec) { /* there is at least one image number specifier */ **imageSpec = '\0'; tif = TIFFOpen (fn, "r"); /* but, ignore any single trailing comma */ if (!(*imageSpec)[1]) {*imageSpec = NULL; return tif;} if (tif) { **imageSpec = comma; /* replace the comma */ if (!nextSrcImage(tif, imageSpec)) { TIFFClose (tif); tif = NULL; } } }else tif = TIFFOpen (fn, "r"); return tif; } int main(int argc, char* argv[]) { uint16 defconfig = (uint16) -1; uint16 deffillorder = 0; uint32 deftilewidth = (uint32) -1; uint32 deftilelength = (uint32) -1; uint32 defrowsperstrip = (uint32) 0; uint64 diroff = 0; TIFF* in; TIFF* out; char mode[10]; char* mp = mode; int c; #if !HAVE_DECL_OPTARG extern int optind; extern char* optarg; #endif *mp++ = 'w'; *mp = '\0'; while ((c = getopt(argc, argv, ",:b:c:f:l:o:p:r:w:aistBLMC8x")) != -1) switch (c) { case ',': if (optarg[0] != '=') usage(); comma = optarg[1]; break; case 'b': /* this file is bias image subtracted from others */ if (bias) { fputs ("Only 1 bias image may be specified\n", stderr); exit (-2); } { uint16 samples = (uint16) -1; char **biasFn = &optarg; bias = openSrcImage (biasFn); if (!bias) exit (-5); if (TIFFIsTiled (bias)) { fputs ("Bias image must be organized in strips\n", stderr); exit (-7); } TIFFGetField(bias, TIFFTAG_SAMPLESPERPIXEL, &samples); if (samples != 1) { fputs ("Bias image must be monochrome\n", stderr); exit (-7); } } break; case 'a': /* append to output */ mode[0] = 'a'; break; case 'c': /* compression scheme */ if (!processCompressOptions(optarg)) usage(); break; case 'f': /* fill order */ if (streq(optarg, "lsb2msb")) deffillorder = FILLORDER_LSB2MSB; else if (streq(optarg, "msb2lsb")) deffillorder = FILLORDER_MSB2LSB; else usage(); break; case 'i': /* ignore errors */ ignore = TRUE; break; case 'l': /* tile length */ outtiled = TRUE; deftilelength = atoi(optarg); break; case 'o': /* initial directory offset */ diroff = strtoul(optarg, NULL, 0); break; case 'p': /* planar configuration */ if (streq(optarg, "separate")) defconfig = PLANARCONFIG_SEPARATE; else if (streq(optarg, "contig")) defconfig = PLANARCONFIG_CONTIG; else usage(); break; case 'r': /* rows/strip */ defrowsperstrip = atol(optarg); break; case 's': /* generate stripped output */ outtiled = FALSE; break; case 't': /* generate tiled output */ outtiled = TRUE; break; case 'w': /* tile width */ outtiled = TRUE; deftilewidth = atoi(optarg); break; case 'B': *mp++ = 'b'; *mp = '\0'; break; case 'L': *mp++ = 'l'; *mp = '\0'; break; case 'M': *mp++ = 'm'; *mp = '\0'; break; case 'C': *mp++ = 'c'; *mp = '\0'; break; case '8': *mp++ = '8'; *mp = '\0'; break; case 'x': pageInSeq = 1; break; case '?': usage(); /*NOTREACHED*/ } if (argc - optind < 2) usage(); out = TIFFOpen(argv[argc-1], mode); if (out == NULL) return (-2); if ((argc - optind) == 2) pageNum = -1; for (; optind < argc-1 ; optind++) { char *imageCursor = argv[optind]; in = openSrcImage (&imageCursor); if (in == NULL) { (void) TIFFClose(out); return (-3); } if (diroff != 0 && !TIFFSetSubDirectory(in, diroff)) { TIFFError(TIFFFileName(in), "Error, setting subdirectory at " TIFF_UINT64_FORMAT, diroff); (void) TIFFClose(in); (void) TIFFClose(out); return (1); } for (;;) { config = defconfig; compression = defcompression; predictor = defpredictor; preset = defpreset; fillorder = deffillorder; rowsperstrip = defrowsperstrip; tilewidth = deftilewidth; tilelength = deftilelength; g3opts = defg3opts; if (!tiffcp(in, out) || !TIFFWriteDirectory(out)) { (void) TIFFClose(in); (void) TIFFClose(out); return (1); } if (imageCursor) { /* seek next image directory */ if (!nextSrcImage(in, &imageCursor)) break; }else if (!TIFFReadDirectory(in)) break; } (void) TIFFClose(in); } (void) TIFFClose(out); return (0); } static void processZIPOptions(char* cp) { if ( (cp = strchr(cp, ':')) ) { do { cp++; if (isdigit((int)*cp)) defpredictor = atoi(cp); else if (*cp == 'p') defpreset = atoi(++cp); else usage(); } while( (cp = strchr(cp, ':')) ); } } static void processG3Options(char* cp) { if( (cp = strchr(cp, ':')) ) { if (defg3opts == (uint32) -1) defg3opts = 0; do { cp++; if (strneq(cp, "1d", 2)) defg3opts &= ~GROUP3OPT_2DENCODING; else if (strneq(cp, "2d", 2)) defg3opts |= GROUP3OPT_2DENCODING; else if (strneq(cp, "fill", 4)) defg3opts |= GROUP3OPT_FILLBITS; else usage(); } while( (cp = strchr(cp, ':')) ); } } static int processCompressOptions(char* opt) { if (streq(opt, "none")) { defcompression = COMPRESSION_NONE; } else if (streq(opt, "packbits")) { defcompression = COMPRESSION_PACKBITS; } else if (strneq(opt, "jpeg", 4)) { char* cp = strchr(opt, ':'); defcompression = COMPRESSION_JPEG; while( cp ) { if (isdigit((int)cp[1])) quality = atoi(cp+1); else if (cp[1] == 'r' ) jpegcolormode = JPEGCOLORMODE_RAW; else usage(); cp = strchr(cp+1,':'); } } else if (strneq(opt, "g3", 2)) { processG3Options(opt); defcompression = COMPRESSION_CCITTFAX3; } else if (streq(opt, "g4")) { defcompression = COMPRESSION_CCITTFAX4; } else if (strneq(opt, "lzw", 3)) { char* cp = strchr(opt, ':'); if (cp) defpredictor = atoi(cp+1); defcompression = COMPRESSION_LZW; } else if (strneq(opt, "zip", 3)) { processZIPOptions(opt); defcompression = COMPRESSION_ADOBE_DEFLATE; } else if (strneq(opt, "lzma", 4)) { processZIPOptions(opt); defcompression = COMPRESSION_LZMA; } else if (strneq(opt, "jbig", 4)) { defcompression = COMPRESSION_JBIG; } else if (strneq(opt, "sgilog", 6)) { defcompression = COMPRESSION_SGILOG; } else return (0); return (1); } char* stuff[] = { "usage: tiffcp [options] input... output", "where options are:", " -a append to output instead of overwriting", " -o offset set initial directory offset", " -p contig pack samples contiguously (e.g. RGBRGB...)", " -p separate store samples separately (e.g. RRR...GGG...BBB...)", " -s write output in strips", " -t write output in tiles", " -x force the merged tiff pages in sequence", " -8 write BigTIFF instead of default ClassicTIFF", " -B write big-endian instead of native byte order", " -L write little-endian instead of native byte order", " -M disable use of memory-mapped files", " -C disable strip chopping", " -i ignore read errors", " -b file[,#] bias (dark) monochrome image to be subtracted from all others", " -,=% use % rather than , to separate image #'s (per Note below)", "", " -r # make each strip have no more than # rows", " -w # set output tile width (pixels)", " -l # set output tile length (pixels)", "", " -f lsb2msb force lsb-to-msb FillOrder for output", " -f msb2lsb force msb-to-lsb FillOrder for output", "", " -c lzw[:opts] compress output with Lempel-Ziv & Welch encoding", " -c zip[:opts] compress output with deflate encoding", " -c lzma[:opts] compress output with LZMA2 encoding", " -c jpeg[:opts] compress output with JPEG encoding", " -c jbig compress output with ISO JBIG encoding", " -c packbits compress output with packbits encoding", " -c g3[:opts] compress output with CCITT Group 3 encoding", " -c g4 compress output with CCITT Group 4 encoding", " -c sgilog compress output with SGILOG encoding", " -c none use no compression algorithm on output", "", "Group 3 options:", " 1d use default CCITT Group 3 1D-encoding", " 2d use optional CCITT Group 3 2D-encoding", " fill byte-align EOL codes", "For example, -c g3:2d:fill to get G3-2D-encoded data with byte-aligned EOLs", "", "JPEG options:", " # set compression quality level (0-100, default 75)", " r output color image as RGB rather than YCbCr", "For example, -c jpeg:r:50 to get JPEG-encoded RGB data with 50% comp. quality", "", "LZW, Deflate (ZIP) and LZMA2 options:", " # set predictor value", " p# set compression level (preset)", "For example, -c lzw:2 to get LZW-encoded data with horizontal differencing,", "-c zip:3:p9 for Deflate encoding with maximum compression level and floating", "point predictor.", "", "Note that input filenames may be of the form filename,x,y,z", "where x, y, and z specify image numbers in the filename to copy.", "example: tiffcp -c none -b esp.tif,1 esp.tif,0 test.tif", " subtract 2nd image in esp.tif from 1st yielding uncompressed result test.tif", NULL }; static void usage(void) { char buf[BUFSIZ]; int i; setbuf(stderr, buf); fprintf(stderr, "%s\n\n", TIFFGetVersion()); for (i = 0; stuff[i] != NULL; i++) fprintf(stderr, "%s\n", stuff[i]); exit(-1); } #define CopyField(tag, v) \ if (TIFFGetField(in, tag, &v)) TIFFSetField(out, tag, v) #define CopyField2(tag, v1, v2) \ if (TIFFGetField(in, tag, &v1, &v2)) TIFFSetField(out, tag, v1, v2) #define CopyField3(tag, v1, v2, v3) \ if (TIFFGetField(in, tag, &v1, &v2, &v3)) TIFFSetField(out, tag, v1, v2, v3) #define CopyField4(tag, v1, v2, v3, v4) \ if (TIFFGetField(in, tag, &v1, &v2, &v3, &v4)) TIFFSetField(out, tag, v1, v2, v3, v4) static void cpTag(TIFF* in, TIFF* out, uint16 tag, uint16 count, TIFFDataType type) { switch (type) { case TIFF_SHORT: if (count == 1) { uint16 shortv; CopyField(tag, shortv); } else if (count == 2) { uint16 shortv1, shortv2; CopyField2(tag, shortv1, shortv2); } else if (count == 4) { uint16 *tr, *tg, *tb, *ta; CopyField4(tag, tr, tg, tb, ta); } else if (count == (uint16) -1) { uint16 shortv1; uint16* shortav; CopyField2(tag, shortv1, shortav); } break; case TIFF_LONG: { uint32 longv; CopyField(tag, longv); } break; case TIFF_RATIONAL: if (count == 1) { float floatv; CopyField(tag, floatv); } else if (count == (uint16) -1) { float* floatav; CopyField(tag, floatav); } break; case TIFF_ASCII: { char* stringv; CopyField(tag, stringv); } break; case TIFF_DOUBLE: if (count == 1) { double doublev; CopyField(tag, doublev); } else if (count == (uint16) -1) { double* doubleav; CopyField(tag, doubleav); } break; default: TIFFError(TIFFFileName(in), "Data type %d is not supported, tag %d skipped.", tag, type); } } static struct cpTag { uint16 tag; uint16 count; TIFFDataType type; } tags[] = { { TIFFTAG_SUBFILETYPE, 1, TIFF_LONG }, { TIFFTAG_THRESHHOLDING, 1, TIFF_SHORT }, { TIFFTAG_DOCUMENTNAME, 1, TIFF_ASCII }, { TIFFTAG_IMAGEDESCRIPTION, 1, TIFF_ASCII }, { TIFFTAG_MAKE, 1, TIFF_ASCII }, { TIFFTAG_MODEL, 1, TIFF_ASCII }, { TIFFTAG_MINSAMPLEVALUE, 1, TIFF_SHORT }, { TIFFTAG_MAXSAMPLEVALUE, 1, TIFF_SHORT }, { TIFFTAG_XRESOLUTION, 1, TIFF_RATIONAL }, { TIFFTAG_YRESOLUTION, 1, TIFF_RATIONAL }, { TIFFTAG_PAGENAME, 1, TIFF_ASCII }, { TIFFTAG_XPOSITION, 1, TIFF_RATIONAL }, { TIFFTAG_YPOSITION, 1, TIFF_RATIONAL }, { TIFFTAG_RESOLUTIONUNIT, 1, TIFF_SHORT }, { TIFFTAG_SOFTWARE, 1, TIFF_ASCII }, { TIFFTAG_DATETIME, 1, TIFF_ASCII }, { TIFFTAG_ARTIST, 1, TIFF_ASCII }, { TIFFTAG_HOSTCOMPUTER, 1, TIFF_ASCII }, { TIFFTAG_WHITEPOINT, (uint16) -1, TIFF_RATIONAL }, { TIFFTAG_PRIMARYCHROMATICITIES,(uint16) -1,TIFF_RATIONAL }, { TIFFTAG_HALFTONEHINTS, 2, TIFF_SHORT }, { TIFFTAG_INKSET, 1, TIFF_SHORT }, { TIFFTAG_DOTRANGE, 2, TIFF_SHORT }, { TIFFTAG_TARGETPRINTER, 1, TIFF_ASCII }, { TIFFTAG_SAMPLEFORMAT, 1, TIFF_SHORT }, { TIFFTAG_YCBCRCOEFFICIENTS, (uint16) -1,TIFF_RATIONAL }, { TIFFTAG_YCBCRSUBSAMPLING, 2, TIFF_SHORT }, { TIFFTAG_YCBCRPOSITIONING, 1, TIFF_SHORT }, { TIFFTAG_REFERENCEBLACKWHITE, (uint16) -1,TIFF_RATIONAL }, { TIFFTAG_EXTRASAMPLES, (uint16) -1, TIFF_SHORT }, { TIFFTAG_SMINSAMPLEVALUE, 1, TIFF_DOUBLE }, { TIFFTAG_SMAXSAMPLEVALUE, 1, TIFF_DOUBLE }, { TIFFTAG_STONITS, 1, TIFF_DOUBLE }, }; #define NTAGS (sizeof (tags) / sizeof (tags[0])) #define CopyTag(tag, count, type) cpTag(in, out, tag, count, type) typedef int (*copyFunc) (TIFF* in, TIFF* out, uint32 l, uint32 w, uint16 samplesperpixel); static copyFunc pickCopyFunc(TIFF*, TIFF*, uint16, uint16); /* PODD */ static int tiffcp(TIFF* in, TIFF* out) { uint16 bitspersample, samplesperpixel = 1; uint16 input_compression, input_photometric = PHOTOMETRIC_MINISBLACK; copyFunc cf; uint32 width, length; struct cpTag* p; CopyField(TIFFTAG_IMAGEWIDTH, width); CopyField(TIFFTAG_IMAGELENGTH, length); CopyField(TIFFTAG_BITSPERSAMPLE, bitspersample); CopyField(TIFFTAG_SAMPLESPERPIXEL, samplesperpixel); if (compression != (uint16)-1) TIFFSetField(out, TIFFTAG_COMPRESSION, compression); else CopyField(TIFFTAG_COMPRESSION, compression); TIFFGetFieldDefaulted(in, TIFFTAG_COMPRESSION, &input_compression); TIFFGetFieldDefaulted(in, TIFFTAG_PHOTOMETRIC, &input_photometric); if (input_compression == COMPRESSION_JPEG) { /* Force conversion to RGB */ TIFFSetField(in, TIFFTAG_JPEGCOLORMODE, JPEGCOLORMODE_RGB); } else if (input_photometric == PHOTOMETRIC_YCBCR) { /* Otherwise, can't handle subsampled input */ uint16 subsamplinghor,subsamplingver; TIFFGetFieldDefaulted(in, TIFFTAG_YCBCRSUBSAMPLING, &subsamplinghor, &subsamplingver); if (subsamplinghor!=1 || subsamplingver!=1) { fprintf(stderr, "tiffcp: %s: Can't copy/convert subsampled image.\n", TIFFFileName(in)); return FALSE; } } if (compression == COMPRESSION_JPEG) { if (input_photometric == PHOTOMETRIC_RGB && jpegcolormode == JPEGCOLORMODE_RGB) TIFFSetField(out, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_YCBCR); else TIFFSetField(out, TIFFTAG_PHOTOMETRIC, input_photometric); } else if (compression == COMPRESSION_SGILOG || compression == COMPRESSION_SGILOG24) TIFFSetField(out, TIFFTAG_PHOTOMETRIC, samplesperpixel == 1 ? PHOTOMETRIC_LOGL : PHOTOMETRIC_LOGLUV); else if (input_compression == COMPRESSION_JPEG && samplesperpixel == 3 ) { /* RGB conversion was forced above hence the output will be of the same type */ TIFFSetField(out, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_RGB); } else CopyTag(TIFFTAG_PHOTOMETRIC, 1, TIFF_SHORT); if (fillorder != 0) TIFFSetField(out, TIFFTAG_FILLORDER, fillorder); else CopyTag(TIFFTAG_FILLORDER, 1, TIFF_SHORT); /* * Will copy `Orientation' tag from input image */ TIFFGetFieldDefaulted(in, TIFFTAG_ORIENTATION, &orientation); switch (orientation) { case ORIENTATION_BOTRIGHT: case ORIENTATION_RIGHTBOT: /* XXX */ TIFFWarning(TIFFFileName(in), "using bottom-left orientation"); orientation = ORIENTATION_BOTLEFT; /* fall thru... */ case ORIENTATION_LEFTBOT: /* XXX */ case ORIENTATION_BOTLEFT: break; case ORIENTATION_TOPRIGHT: case ORIENTATION_RIGHTTOP: /* XXX */ default: TIFFWarning(TIFFFileName(in), "using top-left orientation"); orientation = ORIENTATION_TOPLEFT; /* fall thru... */ case ORIENTATION_LEFTTOP: /* XXX */ case ORIENTATION_TOPLEFT: break; } TIFFSetField(out, TIFFTAG_ORIENTATION, orientation); /* * Choose tiles/strip for the output image according to * the command line arguments (-tiles, -strips) and the * structure of the input image. */ if (outtiled == -1) outtiled = TIFFIsTiled(in); if (outtiled) { /* * Setup output file's tile width&height. If either * is not specified, use either the value from the * input image or, if nothing is defined, use the * library default. */ if (tilewidth == (uint32) -1) TIFFGetField(in, TIFFTAG_TILEWIDTH, &tilewidth); if (tilelength == (uint32) -1) TIFFGetField(in, TIFFTAG_TILELENGTH, &tilelength); TIFFDefaultTileSize(out, &tilewidth, &tilelength); TIFFSetField(out, TIFFTAG_TILEWIDTH, tilewidth); TIFFSetField(out, TIFFTAG_TILELENGTH, tilelength); } else { /* * RowsPerStrip is left unspecified: use either the * value from the input image or, if nothing is defined, * use the library default. */ if (rowsperstrip == (uint32) 0) { if (!TIFFGetField(in, TIFFTAG_ROWSPERSTRIP, &rowsperstrip)) { rowsperstrip = TIFFDefaultStripSize(out, rowsperstrip); } if (rowsperstrip > length && rowsperstrip != (uint32)-1) rowsperstrip = length; } else if (rowsperstrip == (uint32) -1) rowsperstrip = length; TIFFSetField(out, TIFFTAG_ROWSPERSTRIP, rowsperstrip); } if (config != (uint16) -1) TIFFSetField(out, TIFFTAG_PLANARCONFIG, config); else CopyField(TIFFTAG_PLANARCONFIG, config); if (samplesperpixel <= 4) CopyTag(TIFFTAG_TRANSFERFUNCTION, 4, TIFF_SHORT); CopyTag(TIFFTAG_COLORMAP, 4, TIFF_SHORT); /* SMinSampleValue & SMaxSampleValue */ switch (compression) { case COMPRESSION_JPEG: TIFFSetField(out, TIFFTAG_JPEGQUALITY, quality); TIFFSetField(out, TIFFTAG_JPEGCOLORMODE, jpegcolormode); break; case COMPRESSION_JBIG: CopyTag(TIFFTAG_FAXRECVPARAMS, 1, TIFF_LONG); CopyTag(TIFFTAG_FAXRECVTIME, 1, TIFF_LONG); CopyTag(TIFFTAG_FAXSUBADDRESS, 1, TIFF_ASCII); CopyTag(TIFFTAG_FAXDCS, 1, TIFF_ASCII); break; case COMPRESSION_LZW: case COMPRESSION_ADOBE_DEFLATE: case COMPRESSION_DEFLATE: case COMPRESSION_LZMA: if (predictor != (uint16)-1) TIFFSetField(out, TIFFTAG_PREDICTOR, predictor); else CopyField(TIFFTAG_PREDICTOR, predictor); if (preset != -1) { if (compression == COMPRESSION_ADOBE_DEFLATE || compression == COMPRESSION_DEFLATE) TIFFSetField(out, TIFFTAG_ZIPQUALITY, preset); else if (compression == COMPRESSION_LZMA) TIFFSetField(out, TIFFTAG_LZMAPRESET, preset); } break; case COMPRESSION_CCITTFAX3: case COMPRESSION_CCITTFAX4: if (compression == COMPRESSION_CCITTFAX3) { if (g3opts != (uint32) -1) TIFFSetField(out, TIFFTAG_GROUP3OPTIONS, g3opts); else CopyField(TIFFTAG_GROUP3OPTIONS, g3opts); } else CopyTag(TIFFTAG_GROUP4OPTIONS, 1, TIFF_LONG); CopyTag(TIFFTAG_BADFAXLINES, 1, TIFF_LONG); CopyTag(TIFFTAG_CLEANFAXDATA, 1, TIFF_LONG); CopyTag(TIFFTAG_CONSECUTIVEBADFAXLINES, 1, TIFF_LONG); CopyTag(TIFFTAG_FAXRECVPARAMS, 1, TIFF_LONG); CopyTag(TIFFTAG_FAXRECVTIME, 1, TIFF_LONG); CopyTag(TIFFTAG_FAXSUBADDRESS, 1, TIFF_ASCII); break; } { uint32 len32; void** data; if (TIFFGetField(in, TIFFTAG_ICCPROFILE, &len32, &data)) TIFFSetField(out, TIFFTAG_ICCPROFILE, len32, data); } { uint16 ninks; const char* inknames; if (TIFFGetField(in, TIFFTAG_NUMBEROFINKS, &ninks)) { TIFFSetField(out, TIFFTAG_NUMBEROFINKS, ninks); if (TIFFGetField(in, TIFFTAG_INKNAMES, &inknames)) { int inknameslen = strlen(inknames) + 1; const char* cp = inknames; while (ninks > 1) { cp = strchr(cp, '\0'); cp++; inknameslen += (strlen(cp) + 1); ninks--; } TIFFSetField(out, TIFFTAG_INKNAMES, inknameslen, inknames); } } } { unsigned short pg0, pg1; if (pageInSeq == 1) { if (pageNum < 0) /* only one input file */ { if (TIFFGetField(in, TIFFTAG_PAGENUMBER, &pg0, &pg1)) TIFFSetField(out, TIFFTAG_PAGENUMBER, pg0, pg1); } else TIFFSetField(out, TIFFTAG_PAGENUMBER, pageNum++, 0); } else { if (TIFFGetField(in, TIFFTAG_PAGENUMBER, &pg0, &pg1)) { if (pageNum < 0) /* only one input file */ TIFFSetField(out, TIFFTAG_PAGENUMBER, pg0, pg1); else TIFFSetField(out, TIFFTAG_PAGENUMBER, pageNum++, 0); } } } for (p = tags; p < &tags[NTAGS]; p++) CopyTag(p->tag, p->count, p->type); cf = pickCopyFunc(in, out, bitspersample, samplesperpixel); return (cf ? (*cf)(in, out, length, width, samplesperpixel) : FALSE); } /* * Copy Functions. */ #define DECLAREcpFunc(x) \ static int x(TIFF* in, TIFF* out, \ uint32 imagelength, uint32 imagewidth, tsample_t spp) #define DECLAREreadFunc(x) \ static int x(TIFF* in, \ uint8* buf, uint32 imagelength, uint32 imagewidth, tsample_t spp) typedef int (*readFunc)(TIFF*, uint8*, uint32, uint32, tsample_t); #define DECLAREwriteFunc(x) \ static int x(TIFF* out, \ uint8* buf, uint32 imagelength, uint32 imagewidth, tsample_t spp) typedef int (*writeFunc)(TIFF*, uint8*, uint32, uint32, tsample_t); /* * Contig -> contig by scanline for rows/strip change. */ DECLAREcpFunc(cpContig2ContigByRow) { tsize_t scanlinesize = TIFFScanlineSize(in); tdata_t buf; uint32 row; buf = _TIFFmalloc(scanlinesize); if (!buf) return 0; _TIFFmemset(buf, 0, scanlinesize); (void) imagewidth; (void) spp; for (row = 0; row < imagelength; row++) { if (TIFFReadScanline(in, buf, row, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); goto bad; } if (TIFFWriteScanline(out, buf, row, 0) < 0) { TIFFError(TIFFFileName(out), "Error, can't write scanline %lu", (unsigned long) row); goto bad; } } _TIFFfree(buf); return 1; bad: _TIFFfree(buf); return 0; } typedef void biasFn (void *image, void *bias, uint32 pixels); #define subtract(bits) \ static void subtract##bits (void *i, void *b, uint32 pixels)\ {\ uint##bits *image = i;\ uint##bits *bias = b;\ while (pixels--) {\ *image = *image > *bias ? *image-*bias : 0;\ image++, bias++; \ } \ } subtract(8) subtract(16) subtract(32) static biasFn *lineSubtractFn (unsigned bits) { switch (bits) { case 8: return subtract8; case 16: return subtract16; case 32: return subtract32; } return NULL; } /* * Contig -> contig by scanline while subtracting a bias image. */ DECLAREcpFunc(cpBiasedContig2Contig) { if (spp == 1) { tsize_t biasSize = TIFFScanlineSize(bias); tsize_t bufSize = TIFFScanlineSize(in); tdata_t buf, biasBuf; uint32 biasWidth = 0, biasLength = 0; TIFFGetField(bias, TIFFTAG_IMAGEWIDTH, &biasWidth); TIFFGetField(bias, TIFFTAG_IMAGELENGTH, &biasLength); if (biasSize == bufSize && imagelength == biasLength && imagewidth == biasWidth) { uint16 sampleBits = 0; biasFn *subtractLine; TIFFGetField(in, TIFFTAG_BITSPERSAMPLE, &sampleBits); subtractLine = lineSubtractFn (sampleBits); if (subtractLine) { uint32 row; buf = _TIFFmalloc(bufSize); biasBuf = _TIFFmalloc(bufSize); for (row = 0; row < imagelength; row++) { if (TIFFReadScanline(in, buf, row, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); goto bad; } if (TIFFReadScanline(bias, biasBuf, row, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read biased scanline %lu", (unsigned long) row); goto bad; } subtractLine (buf, biasBuf, imagewidth); if (TIFFWriteScanline(out, buf, row, 0) < 0) { TIFFError(TIFFFileName(out), "Error, can't write scanline %lu", (unsigned long) row); goto bad; } } _TIFFfree(buf); _TIFFfree(biasBuf); TIFFSetDirectory(bias, TIFFCurrentDirectory(bias)); /* rewind */ return 1; bad: _TIFFfree(buf); _TIFFfree(biasBuf); return 0; } else { TIFFError(TIFFFileName(in), "No support for biasing %d bit pixels\n", sampleBits); return 0; } } TIFFError(TIFFFileName(in), "Bias image %s,%d\nis not the same size as %s,%d\n", TIFFFileName(bias), TIFFCurrentDirectory(bias), TIFFFileName(in), TIFFCurrentDirectory(in)); return 0; } else { TIFFError(TIFFFileName(in), "Can't bias %s,%d as it has >1 Sample/Pixel\n", TIFFFileName(in), TIFFCurrentDirectory(in)); return 0; } } /* * Strip -> strip for change in encoding. */ DECLAREcpFunc(cpDecodedStrips) { tsize_t stripsize = TIFFStripSize(in); tdata_t buf = _TIFFmalloc(stripsize); (void) imagewidth; (void) spp; if (buf) { tstrip_t s, ns = TIFFNumberOfStrips(in); uint32 row = 0; _TIFFmemset(buf, 0, stripsize); for (s = 0; s < ns && row < imagelength; s++) { tsize_t cc = (row + rowsperstrip > imagelength) ? TIFFVStripSize(in, imagelength - row) : stripsize; if (TIFFReadEncodedStrip(in, s, buf, cc) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read strip %lu", (unsigned long) s); goto bad; } if (TIFFWriteEncodedStrip(out, s, buf, cc) < 0) { TIFFError(TIFFFileName(out), "Error, can't write strip %lu", (unsigned long) s); goto bad; } row += rowsperstrip; } _TIFFfree(buf); return 1; } else { TIFFError(TIFFFileName(in), "Error, can't allocate memory buffer of size %lu " "to read strips", (unsigned long) stripsize); return 0; } bad: _TIFFfree(buf); return 0; } /* * Separate -> separate by row for rows/strip change. */ DECLAREcpFunc(cpSeparate2SeparateByRow) { tsize_t scanlinesize = TIFFScanlineSize(in); tdata_t buf; uint32 row; tsample_t s; (void) imagewidth; buf = _TIFFmalloc(scanlinesize); if (!buf) return 0; _TIFFmemset(buf, 0, scanlinesize); for (s = 0; s < spp; s++) { for (row = 0; row < imagelength; row++) { if (TIFFReadScanline(in, buf, row, s) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); goto bad; } if (TIFFWriteScanline(out, buf, row, s) < 0) { TIFFError(TIFFFileName(out), "Error, can't write scanline %lu", (unsigned long) row); goto bad; } } } _TIFFfree(buf); return 1; bad: _TIFFfree(buf); return 0; } /* * Contig -> separate by row. */ DECLAREcpFunc(cpContig2SeparateByRow) { tsize_t scanlinesizein = TIFFScanlineSize(in); tsize_t scanlinesizeout = TIFFScanlineSize(out); tdata_t inbuf; tdata_t outbuf; register uint8 *inp, *outp; register uint32 n; uint32 row; tsample_t s; inbuf = _TIFFmalloc(scanlinesizein); outbuf = _TIFFmalloc(scanlinesizeout); if (!inbuf || !outbuf) goto bad; _TIFFmemset(inbuf, 0, scanlinesizein); _TIFFmemset(outbuf, 0, scanlinesizeout); /* unpack channels */ for (s = 0; s < spp; s++) { for (row = 0; row < imagelength; row++) { if (TIFFReadScanline(in, inbuf, row, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); goto bad; } inp = ((uint8*)inbuf) + s; outp = (uint8*)outbuf; for (n = imagewidth; n-- > 0;) { *outp++ = *inp; inp += spp; } if (TIFFWriteScanline(out, outbuf, row, s) < 0) { TIFFError(TIFFFileName(out), "Error, can't write scanline %lu", (unsigned long) row); goto bad; } } } if (inbuf) _TIFFfree(inbuf); if (outbuf) _TIFFfree(outbuf); return 1; bad: if (inbuf) _TIFFfree(inbuf); if (outbuf) _TIFFfree(outbuf); return 0; } /* * Separate -> contig by row. */ DECLAREcpFunc(cpSeparate2ContigByRow) { tsize_t scanlinesizein = TIFFScanlineSize(in); tsize_t scanlinesizeout = TIFFScanlineSize(out); tdata_t inbuf; tdata_t outbuf; register uint8 *inp, *outp; register uint32 n; uint32 row; tsample_t s; inbuf = _TIFFmalloc(scanlinesizein); outbuf = _TIFFmalloc(scanlinesizeout); if (!inbuf || !outbuf) goto bad; _TIFFmemset(inbuf, 0, scanlinesizein); _TIFFmemset(outbuf, 0, scanlinesizeout); for (row = 0; row < imagelength; row++) { /* merge channels */ for (s = 0; s < spp; s++) { if (TIFFReadScanline(in, inbuf, row, s) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); goto bad; } inp = (uint8*)inbuf; outp = ((uint8*)outbuf) + s; for (n = imagewidth; n-- > 0;) { *outp = *inp++; outp += spp; } } if (TIFFWriteScanline(out, outbuf, row, 0) < 0) { TIFFError(TIFFFileName(out), "Error, can't write scanline %lu", (unsigned long) row); goto bad; } } if (inbuf) _TIFFfree(inbuf); if (outbuf) _TIFFfree(outbuf); return 1; bad: if (inbuf) _TIFFfree(inbuf); if (outbuf) _TIFFfree(outbuf); return 0; } static void cpStripToTile(uint8* out, uint8* in, uint32 rows, uint32 cols, int outskew, int inskew) { while (rows-- > 0) { uint32 j = cols; while (j-- > 0) *out++ = *in++; out += outskew; in += inskew; } } static void cpContigBufToSeparateBuf(uint8* out, uint8* in, uint32 rows, uint32 cols, int outskew, int inskew, tsample_t spp, int bytes_per_sample ) { while (rows-- > 0) { uint32 j = cols; while (j-- > 0) { int n = bytes_per_sample; while( n-- ) { *out++ = *in++; } in += (spp-1) * bytes_per_sample; } out += outskew; in += inskew; } } static void cpSeparateBufToContigBuf(uint8* out, uint8* in, uint32 rows, uint32 cols, int outskew, int inskew, tsample_t spp, int bytes_per_sample) { while (rows-- > 0) { uint32 j = cols; while (j-- > 0) { int n = bytes_per_sample; while( n-- ) { *out++ = *in++; } out += (spp-1)*bytes_per_sample; } out += outskew; in += inskew; } } static int cpImage(TIFF* in, TIFF* out, readFunc fin, writeFunc fout, uint32 imagelength, uint32 imagewidth, tsample_t spp) { int status = 0; tdata_t buf = NULL; tsize_t scanlinesize = TIFFRasterScanlineSize(in); tsize_t bytes = scanlinesize * (tsize_t)imagelength; /* * XXX: Check for integer overflow. */ if (scanlinesize && imagelength && bytes / (tsize_t)imagelength == scanlinesize) { buf = _TIFFmalloc(bytes); if (buf) { if ((*fin)(in, (uint8*)buf, imagelength, imagewidth, spp)) { status = (*fout)(out, (uint8*)buf, imagelength, imagewidth, spp); } _TIFFfree(buf); } else { TIFFError(TIFFFileName(in), "Error, can't allocate space for image buffer"); } } else { TIFFError(TIFFFileName(in), "Error, no space for image buffer"); } return status; } DECLAREreadFunc(readContigStripsIntoBuffer) { tsize_t scanlinesize = TIFFScanlineSize(in); uint8* bufp = buf; uint32 row; (void) imagewidth; (void) spp; for (row = 0; row < imagelength; row++) { if (TIFFReadScanline(in, (tdata_t) bufp, row, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); return 0; } bufp += scanlinesize; } return 1; } DECLAREreadFunc(readSeparateStripsIntoBuffer) { int status = 1; tsize_t scanlinesize = TIFFScanlineSize(in); tdata_t scanline; if (!scanlinesize) return 0; scanline = _TIFFmalloc(scanlinesize); if (!scanline) return 0; _TIFFmemset(scanline, 0, scanlinesize); (void) imagewidth; if (scanline) { uint8* bufp = (uint8*) buf; uint32 row; tsample_t s; for (row = 0; row < imagelength; row++) { /* merge channels */ for (s = 0; s < spp; s++) { uint8* bp = bufp + s; tsize_t n = scanlinesize; uint8* sbuf = scanline; if (TIFFReadScanline(in, scanline, row, s) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); status = 0; goto done; } while (n-- > 0) *bp = *sbuf++, bp += spp; } bufp += scanlinesize * spp; } } done: _TIFFfree(scanline); return status; } DECLAREreadFunc(readContigTilesIntoBuffer) { int status = 1; tsize_t tilesize = TIFFTileSize(in); tdata_t tilebuf; uint32 imagew = TIFFScanlineSize(in); uint32 tilew = TIFFTileRowSize(in); int iskew = imagew - tilew; uint8* bufp = (uint8*) buf; uint32 tw, tl; uint32 row; (void) spp; tilebuf = _TIFFmalloc(tilesize); if (tilebuf == 0) return 0; _TIFFmemset(tilebuf, 0, tilesize); (void) TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw); (void) TIFFGetField(in, TIFFTAG_TILELENGTH, &tl); for (row = 0; row < imagelength; row += tl) { uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl; uint32 colb = 0; uint32 col; for (col = 0; col < imagewidth && colb < imagew; col += tw) { if (TIFFReadTile(in, tilebuf, col, row, 0, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read tile at %lu %lu", (unsigned long) col, (unsigned long) row); status = 0; goto done; } if (colb + tilew > imagew) { uint32 width = imagew - colb; uint32 oskew = tilew - width; cpStripToTile(bufp + colb, tilebuf, nrow, width, oskew + iskew, oskew ); } else cpStripToTile(bufp + colb, tilebuf, nrow, tilew, iskew, 0); colb += tilew; } bufp += imagew * nrow; } done: _TIFFfree(tilebuf); return status; } DECLAREreadFunc(readSeparateTilesIntoBuffer) { int status = 1; uint32 imagew = TIFFRasterScanlineSize(in); uint32 tilew = TIFFTileRowSize(in); int iskew = imagew - tilew*spp; tsize_t tilesize = TIFFTileSize(in); tdata_t tilebuf; uint8* bufp = (uint8*) buf; uint32 tw, tl; uint32 row; uint16 bps = 0, bytes_per_sample; tilebuf = _TIFFmalloc(tilesize); if (tilebuf == 0) return 0; _TIFFmemset(tilebuf, 0, tilesize); (void) TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw); (void) TIFFGetField(in, TIFFTAG_TILELENGTH, &tl); (void) TIFFGetField(in, TIFFTAG_BITSPERSAMPLE, &bps); if( bps == 0 ) { TIFFError(TIFFFileName(in), "Error, cannot read BitsPerSample"); status = 0; goto done; } assert( bps % 8 == 0 ); bytes_per_sample = bps/8; for (row = 0; row < imagelength; row += tl) { uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl; uint32 colb = 0; uint32 col; for (col = 0; col < imagewidth; col += tw) { tsample_t s; for (s = 0; s < spp; s++) { if (TIFFReadTile(in, tilebuf, col, row, 0, s) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read tile at %lu %lu, " "sample %lu", (unsigned long) col, (unsigned long) row, (unsigned long) s); status = 0; goto done; } /* * Tile is clipped horizontally. Calculate * visible portion and skewing factors. */ if (colb + tilew*spp > imagew) { uint32 width = imagew - colb; int oskew = tilew*spp - width; cpSeparateBufToContigBuf( bufp+colb+s*bytes_per_sample, tilebuf, nrow, width/(spp*bytes_per_sample), oskew + iskew, oskew/spp, spp, bytes_per_sample); } else cpSeparateBufToContigBuf( bufp+colb+s*bytes_per_sample, tilebuf, nrow, tw, iskew, 0, spp, bytes_per_sample); } colb += tilew*spp; } bufp += imagew * nrow; } done: _TIFFfree(tilebuf); return status; } DECLAREwriteFunc(writeBufferToContigStrips) { uint32 row, rowsperstrip; tstrip_t strip = 0; (void) imagewidth; (void) spp; (void) TIFFGetFieldDefaulted(out, TIFFTAG_ROWSPERSTRIP, &rowsperstrip); for (row = 0; row < imagelength; row += rowsperstrip) { uint32 nrows = (row+rowsperstrip > imagelength) ? imagelength-row : rowsperstrip; tsize_t stripsize = TIFFVStripSize(out, nrows); if (TIFFWriteEncodedStrip(out, strip++, buf, stripsize) < 0) { TIFFError(TIFFFileName(out), "Error, can't write strip %u", strip - 1); return 0; } buf += stripsize; } return 1; } DECLAREwriteFunc(writeBufferToSeparateStrips) { uint32 rowsize = imagewidth * spp; uint32 rowsperstrip; tsize_t stripsize = TIFFStripSize(out); tdata_t obuf; tstrip_t strip = 0; tsample_t s; obuf = _TIFFmalloc(stripsize); if (obuf == NULL) return (0); _TIFFmemset(obuf, 0, stripsize); (void) TIFFGetFieldDefaulted(out, TIFFTAG_ROWSPERSTRIP, &rowsperstrip); for (s = 0; s < spp; s++) { uint32 row; for (row = 0; row < imagelength; row += rowsperstrip) { uint32 nrows = (row+rowsperstrip > imagelength) ? imagelength-row : rowsperstrip; tsize_t stripsize = TIFFVStripSize(out, nrows); cpContigBufToSeparateBuf( obuf, (uint8*) buf + row*rowsize + s, nrows, imagewidth, 0, 0, spp, 1); if (TIFFWriteEncodedStrip(out, strip++, obuf, stripsize) < 0) { TIFFError(TIFFFileName(out), "Error, can't write strip %u", strip - 1); _TIFFfree(obuf); return 0; } } } _TIFFfree(obuf); return 1; } DECLAREwriteFunc(writeBufferToContigTiles) { uint32 imagew = TIFFScanlineSize(out); uint32 tilew = TIFFTileRowSize(out); int iskew = imagew - tilew; tsize_t tilesize = TIFFTileSize(out); tdata_t obuf; uint8* bufp = (uint8*) buf; uint32 tl, tw; uint32 row; (void) spp; obuf = _TIFFmalloc(TIFFTileSize(out)); if (obuf == NULL) return 0; _TIFFmemset(obuf, 0, tilesize); (void) TIFFGetField(out, TIFFTAG_TILELENGTH, &tl); (void) TIFFGetField(out, TIFFTAG_TILEWIDTH, &tw); for (row = 0; row < imagelength; row += tilelength) { uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl; uint32 colb = 0; uint32 col; for (col = 0; col < imagewidth && colb < imagew; col += tw) { /* * Tile is clipped horizontally. Calculate * visible portion and skewing factors. */ if (colb + tilew > imagew) { uint32 width = imagew - colb; int oskew = tilew - width; cpStripToTile(obuf, bufp + colb, nrow, width, oskew, oskew + iskew); } else cpStripToTile(obuf, bufp + colb, nrow, tilew, 0, iskew); if (TIFFWriteTile(out, obuf, col, row, 0, 0) < 0) { TIFFError(TIFFFileName(out), "Error, can't write tile at %lu %lu", (unsigned long) col, (unsigned long) row); _TIFFfree(obuf); return 0; } colb += tilew; } bufp += nrow * imagew; } _TIFFfree(obuf); return 1; } DECLAREwriteFunc(writeBufferToSeparateTiles) { uint32 imagew = TIFFScanlineSize(out); tsize_t tilew = TIFFTileRowSize(out); uint32 iimagew = TIFFRasterScanlineSize(out); int iskew = iimagew - tilew*spp; tsize_t tilesize = TIFFTileSize(out); tdata_t obuf; uint8* bufp = (uint8*) buf; uint32 tl, tw; uint32 row; uint16 bps = 0, bytes_per_sample; obuf = _TIFFmalloc(TIFFTileSize(out)); if (obuf == NULL) return 0; _TIFFmemset(obuf, 0, tilesize); (void) TIFFGetField(out, TIFFTAG_TILELENGTH, &tl); (void) TIFFGetField(out, TIFFTAG_TILEWIDTH, &tw); (void) TIFFGetField(out, TIFFTAG_BITSPERSAMPLE, &bps); if( bps == 0 ) { TIFFError(TIFFFileName(out), "Error, cannot read BitsPerSample"); _TIFFfree(obuf); return 0; } assert( bps % 8 == 0 ); bytes_per_sample = bps/8; for (row = 0; row < imagelength; row += tl) { uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl; uint32 colb = 0; uint32 col; for (col = 0; col < imagewidth; col += tw) { tsample_t s; for (s = 0; s < spp; s++) { /* * Tile is clipped horizontally. Calculate * visible portion and skewing factors. */ if (colb + tilew > imagew) { uint32 width = (imagew - colb); int oskew = tilew - width; cpContigBufToSeparateBuf(obuf, bufp + (colb*spp) + s, nrow, width/bytes_per_sample, oskew, (oskew*spp)+iskew, spp, bytes_per_sample); } else cpContigBufToSeparateBuf(obuf, bufp + (colb*spp) + s, nrow, tilewidth, 0, iskew, spp, bytes_per_sample); if (TIFFWriteTile(out, obuf, col, row, 0, s) < 0) { TIFFError(TIFFFileName(out), "Error, can't write tile at %lu %lu " "sample %lu", (unsigned long) col, (unsigned long) row, (unsigned long) s); _TIFFfree(obuf); return 0; } } colb += tilew; } bufp += nrow * iimagew; } _TIFFfree(obuf); return 1; } /* * Contig strips -> contig tiles. */ DECLAREcpFunc(cpContigStrips2ContigTiles) { return cpImage(in, out, readContigStripsIntoBuffer, writeBufferToContigTiles, imagelength, imagewidth, spp); } /* * Contig strips -> separate tiles. */ DECLAREcpFunc(cpContigStrips2SeparateTiles) { return cpImage(in, out, readContigStripsIntoBuffer, writeBufferToSeparateTiles, imagelength, imagewidth, spp); } /* * Separate strips -> contig tiles. */ DECLAREcpFunc(cpSeparateStrips2ContigTiles) { return cpImage(in, out, readSeparateStripsIntoBuffer, writeBufferToContigTiles, imagelength, imagewidth, spp); } /* * Separate strips -> separate tiles. */ DECLAREcpFunc(cpSeparateStrips2SeparateTiles) { return cpImage(in, out, readSeparateStripsIntoBuffer, writeBufferToSeparateTiles, imagelength, imagewidth, spp); } /* * Contig strips -> contig tiles. */ DECLAREcpFunc(cpContigTiles2ContigTiles) { return cpImage(in, out, readContigTilesIntoBuffer, writeBufferToContigTiles, imagelength, imagewidth, spp); } /* * Contig tiles -> separate tiles. */ DECLAREcpFunc(cpContigTiles2SeparateTiles) { return cpImage(in, out, readContigTilesIntoBuffer, writeBufferToSeparateTiles, imagelength, imagewidth, spp); } /* * Separate tiles -> contig tiles. */ DECLAREcpFunc(cpSeparateTiles2ContigTiles) { return cpImage(in, out, readSeparateTilesIntoBuffer, writeBufferToContigTiles, imagelength, imagewidth, spp); } /* * Separate tiles -> separate tiles (tile dimension change). */ DECLAREcpFunc(cpSeparateTiles2SeparateTiles) { return cpImage(in, out, readSeparateTilesIntoBuffer, writeBufferToSeparateTiles, imagelength, imagewidth, spp); } /* * Contig tiles -> contig tiles (tile dimension change). */ DECLAREcpFunc(cpContigTiles2ContigStrips) { return cpImage(in, out, readContigTilesIntoBuffer, writeBufferToContigStrips, imagelength, imagewidth, spp); } /* * Contig tiles -> separate strips. */ DECLAREcpFunc(cpContigTiles2SeparateStrips) { return cpImage(in, out, readContigTilesIntoBuffer, writeBufferToSeparateStrips, imagelength, imagewidth, spp); } /* * Separate tiles -> contig strips. */ DECLAREcpFunc(cpSeparateTiles2ContigStrips) { return cpImage(in, out, readSeparateTilesIntoBuffer, writeBufferToContigStrips, imagelength, imagewidth, spp); } /* * Separate tiles -> separate strips. */ DECLAREcpFunc(cpSeparateTiles2SeparateStrips) { return cpImage(in, out, readSeparateTilesIntoBuffer, writeBufferToSeparateStrips, imagelength, imagewidth, spp); } /* * Select the appropriate copy function to use. */ static copyFunc pickCopyFunc(TIFF* in, TIFF* out, uint16 bitspersample, uint16 samplesperpixel) { uint16 shortv; uint32 w, l, tw, tl; int bychunk; (void) TIFFGetField(in, TIFFTAG_PLANARCONFIG, &shortv); if (shortv != config && bitspersample != 8 && samplesperpixel > 1) { fprintf(stderr, "%s: Cannot handle different planar configuration w/ bits/sample != 8\n", TIFFFileName(in)); return (NULL); } TIFFGetField(in, TIFFTAG_IMAGEWIDTH, &w); TIFFGetField(in, TIFFTAG_IMAGELENGTH, &l); if (!(TIFFIsTiled(out) || TIFFIsTiled(in))) { uint32 irps = (uint32) -1L; TIFFGetField(in, TIFFTAG_ROWSPERSTRIP, &irps); /* if biased, force decoded copying to allow image subtraction */ bychunk = !bias && (rowsperstrip == irps); }else{ /* either in or out is tiled */ if (bias) { fprintf(stderr, "%s: Cannot handle tiled configuration w/bias image\n", TIFFFileName(in)); return (NULL); } if (TIFFIsTiled(out)) { if (!TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw)) tw = w; if (!TIFFGetField(in, TIFFTAG_TILELENGTH, &tl)) tl = l; bychunk = (tw == tilewidth && tl == tilelength); } else { /* out's not, so in must be tiled */ TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw); TIFFGetField(in, TIFFTAG_TILELENGTH, &tl); bychunk = (tw == w && tl == rowsperstrip); } } #define T 1 #define F 0 #define pack(a,b,c,d,e) ((long)(((a)<<11)|((b)<<3)|((c)<<2)|((d)<<1)|(e))) switch(pack(shortv,config,TIFFIsTiled(in),TIFFIsTiled(out),bychunk)) { /* Strips -> Tiles */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,T,T): return cpContigStrips2ContigTiles; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,T,T): return cpContigStrips2SeparateTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,T,T): return cpSeparateStrips2ContigTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,T,T): return cpSeparateStrips2SeparateTiles; /* Tiles -> Tiles */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,T,T): return cpContigTiles2ContigTiles; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,T,T): return cpContigTiles2SeparateTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,T,T): return cpSeparateTiles2ContigTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,T,T): return cpSeparateTiles2SeparateTiles; /* Tiles -> Strips */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,F,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,F,T): return cpContigTiles2ContigStrips; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,F,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,F,T): return cpContigTiles2SeparateStrips; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,F,T): return cpSeparateTiles2ContigStrips; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,F,T): return cpSeparateTiles2SeparateStrips; /* Strips -> Strips */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,F,F): return bias ? cpBiasedContig2Contig : cpContig2ContigByRow; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,F,T): return cpDecodedStrips; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,F,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,F,T): return cpContig2SeparateByRow; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,F,T): return cpSeparate2ContigByRow; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,F,T): return cpSeparate2SeparateByRow; } #undef pack #undef F #undef T fprintf(stderr, "tiffcp: %s: Don't know how to copy/convert image.\n", TIFFFileName(in)); return (NULL); } /* vim: set ts=8 sts=8 sw=8 noet: */ /* * Local Variables: * mode: c * c-basic-offset: 8 * fill-column: 78 * End: */
/* $Id$ */ /* * Copyright (c) 1988-1997 Sam Leffler * Copyright (c) 1991-1997 Silicon Graphics, Inc. * * Revised: 2/18/01 BAR -- added syntax for extracting single images from * multi-image TIFF files. * * New syntax is: sourceFileName,image# * * image# ranges from 0..<n-1> where n is the # of images in the file. * There may be no white space between the comma and the filename or * image number. * * Example: tiffcp source.tif,1 destination.tif * * Copies the 2nd image in source.tif to the destination. * ***** * Permission to use, copy, modify, distribute, and sell this software and * its documentation for any purpose is hereby granted without fee, provided * that (i) the above copyright notices and this permission notice appear in * all copies of the software and related documentation, and (ii) the names of * Sam Leffler and Silicon Graphics may not be used in any advertising or * publicity relating to the software without the specific, prior written * permission of Sam Leffler and Silicon Graphics. * * THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND, * EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. * * IN NO EVENT SHALL SAM LEFFLER OR SILICON GRAPHICS BE LIABLE FOR * ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, * OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF * LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ #include "tif_config.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #include <assert.h> #ifdef HAVE_UNISTD_H # include <unistd.h> #endif #include "tiffio.h" #ifndef HAVE_GETOPT extern int getopt(int, char**, char*); #endif #if defined(VMS) # define unlink delete #endif #define streq(a,b) (strcmp(a,b) == 0) #define strneq(a,b,n) (strncmp(a,b,n) == 0) #define TRUE 1 #define FALSE 0 static int outtiled = -1; static uint32 tilewidth; static uint32 tilelength; static uint16 config; static uint16 compression; static uint16 predictor; static int preset; static uint16 fillorder; static uint16 orientation; static uint32 rowsperstrip; static uint32 g3opts; static int ignore = FALSE; /* if true, ignore read errors */ static uint32 defg3opts = (uint32) -1; static int quality = 75; /* JPEG quality */ static int jpegcolormode = JPEGCOLORMODE_RGB; static uint16 defcompression = (uint16) -1; static uint16 defpredictor = (uint16) -1; static int defpreset = -1; static int tiffcp(TIFF*, TIFF*); static int processCompressOptions(char*); static void usage(void); static char comma = ','; /* (default) comma separator character */ static TIFF* bias = NULL; static int pageNum = 0; static int pageInSeq = 0; static int nextSrcImage (TIFF *tif, char **imageSpec) /* seek to the next image specified in *imageSpec returns 1 if success, 0 if no more images to process *imageSpec=NULL if subsequent images should be processed in sequence */ { if (**imageSpec == comma) { /* if not @comma, we've done all images */ char *start = *imageSpec + 1; tdir_t nextImage = (tdir_t)strtol(start, imageSpec, 0); if (start == *imageSpec) nextImage = TIFFCurrentDirectory (tif); if (**imageSpec) { if (**imageSpec == comma) { /* a trailing comma denotes remaining images in sequence */ if ((*imageSpec)[1] == '\0') *imageSpec = NULL; }else{ fprintf (stderr, "Expected a %c separated image # list after %s\n", comma, TIFFFileName (tif)); exit (-4); /* syntax error */ } } if (TIFFSetDirectory (tif, nextImage)) return 1; fprintf (stderr, "%s%c%d not found!\n", TIFFFileName(tif), comma, (int) nextImage); } return 0; } static TIFF* openSrcImage (char **imageSpec) /* imageSpec points to a pointer to a filename followed by optional ,image#'s Open the TIFF file and assign *imageSpec to either NULL if there are no images specified, or a pointer to the next image number text */ { TIFF *tif; char *fn = *imageSpec; *imageSpec = strchr (fn, comma); if (*imageSpec) { /* there is at least one image number specifier */ **imageSpec = '\0'; tif = TIFFOpen (fn, "r"); /* but, ignore any single trailing comma */ if (!(*imageSpec)[1]) {*imageSpec = NULL; return tif;} if (tif) { **imageSpec = comma; /* replace the comma */ if (!nextSrcImage(tif, imageSpec)) { TIFFClose (tif); tif = NULL; } } }else tif = TIFFOpen (fn, "r"); return tif; } int main(int argc, char* argv[]) { uint16 defconfig = (uint16) -1; uint16 deffillorder = 0; uint32 deftilewidth = (uint32) -1; uint32 deftilelength = (uint32) -1; uint32 defrowsperstrip = (uint32) 0; uint64 diroff = 0; TIFF* in; TIFF* out; char mode[10]; char* mp = mode; int c; #if !HAVE_DECL_OPTARG extern int optind; extern char* optarg; #endif *mp++ = 'w'; *mp = '\0'; while ((c = getopt(argc, argv, ",:b:c:f:l:o:p:r:w:aistBLMC8x")) != -1) switch (c) { case ',': if (optarg[0] != '=') usage(); comma = optarg[1]; break; case 'b': /* this file is bias image subtracted from others */ if (bias) { fputs ("Only 1 bias image may be specified\n", stderr); exit (-2); } { uint16 samples = (uint16) -1; char **biasFn = &optarg; bias = openSrcImage (biasFn); if (!bias) exit (-5); if (TIFFIsTiled (bias)) { fputs ("Bias image must be organized in strips\n", stderr); exit (-7); } TIFFGetField(bias, TIFFTAG_SAMPLESPERPIXEL, &samples); if (samples != 1) { fputs ("Bias image must be monochrome\n", stderr); exit (-7); } } break; case 'a': /* append to output */ mode[0] = 'a'; break; case 'c': /* compression scheme */ if (!processCompressOptions(optarg)) usage(); break; case 'f': /* fill order */ if (streq(optarg, "lsb2msb")) deffillorder = FILLORDER_LSB2MSB; else if (streq(optarg, "msb2lsb")) deffillorder = FILLORDER_MSB2LSB; else usage(); break; case 'i': /* ignore errors */ ignore = TRUE; break; case 'l': /* tile length */ outtiled = TRUE; deftilelength = atoi(optarg); break; case 'o': /* initial directory offset */ diroff = strtoul(optarg, NULL, 0); break; case 'p': /* planar configuration */ if (streq(optarg, "separate")) defconfig = PLANARCONFIG_SEPARATE; else if (streq(optarg, "contig")) defconfig = PLANARCONFIG_CONTIG; else usage(); break; case 'r': /* rows/strip */ defrowsperstrip = atol(optarg); break; case 's': /* generate stripped output */ outtiled = FALSE; break; case 't': /* generate tiled output */ outtiled = TRUE; break; case 'w': /* tile width */ outtiled = TRUE; deftilewidth = atoi(optarg); break; case 'B': *mp++ = 'b'; *mp = '\0'; break; case 'L': *mp++ = 'l'; *mp = '\0'; break; case 'M': *mp++ = 'm'; *mp = '\0'; break; case 'C': *mp++ = 'c'; *mp = '\0'; break; case '8': *mp++ = '8'; *mp = '\0'; break; case 'x': pageInSeq = 1; break; case '?': usage(); /*NOTREACHED*/ } if (argc - optind < 2) usage(); out = TIFFOpen(argv[argc-1], mode); if (out == NULL) return (-2); if ((argc - optind) == 2) pageNum = -1; for (; optind < argc-1 ; optind++) { char *imageCursor = argv[optind]; in = openSrcImage (&imageCursor); if (in == NULL) { (void) TIFFClose(out); return (-3); } if (diroff != 0 && !TIFFSetSubDirectory(in, diroff)) { TIFFError(TIFFFileName(in), "Error, setting subdirectory at " TIFF_UINT64_FORMAT, diroff); (void) TIFFClose(in); (void) TIFFClose(out); return (1); } for (;;) { config = defconfig; compression = defcompression; predictor = defpredictor; preset = defpreset; fillorder = deffillorder; rowsperstrip = defrowsperstrip; tilewidth = deftilewidth; tilelength = deftilelength; g3opts = defg3opts; if (!tiffcp(in, out) || !TIFFWriteDirectory(out)) { (void) TIFFClose(in); (void) TIFFClose(out); return (1); } if (imageCursor) { /* seek next image directory */ if (!nextSrcImage(in, &imageCursor)) break; }else if (!TIFFReadDirectory(in)) break; } (void) TIFFClose(in); } (void) TIFFClose(out); return (0); } static void processZIPOptions(char* cp) { if ( (cp = strchr(cp, ':')) ) { do { cp++; if (isdigit((int)*cp)) defpredictor = atoi(cp); else if (*cp == 'p') defpreset = atoi(++cp); else usage(); } while( (cp = strchr(cp, ':')) ); } } static void processG3Options(char* cp) { if( (cp = strchr(cp, ':')) ) { if (defg3opts == (uint32) -1) defg3opts = 0; do { cp++; if (strneq(cp, "1d", 2)) defg3opts &= ~GROUP3OPT_2DENCODING; else if (strneq(cp, "2d", 2)) defg3opts |= GROUP3OPT_2DENCODING; else if (strneq(cp, "fill", 4)) defg3opts |= GROUP3OPT_FILLBITS; else usage(); } while( (cp = strchr(cp, ':')) ); } } static int processCompressOptions(char* opt) { if (streq(opt, "none")) { defcompression = COMPRESSION_NONE; } else if (streq(opt, "packbits")) { defcompression = COMPRESSION_PACKBITS; } else if (strneq(opt, "jpeg", 4)) { char* cp = strchr(opt, ':'); defcompression = COMPRESSION_JPEG; while( cp ) { if (isdigit((int)cp[1])) quality = atoi(cp+1); else if (cp[1] == 'r' ) jpegcolormode = JPEGCOLORMODE_RAW; else usage(); cp = strchr(cp+1,':'); } } else if (strneq(opt, "g3", 2)) { processG3Options(opt); defcompression = COMPRESSION_CCITTFAX3; } else if (streq(opt, "g4")) { defcompression = COMPRESSION_CCITTFAX4; } else if (strneq(opt, "lzw", 3)) { char* cp = strchr(opt, ':'); if (cp) defpredictor = atoi(cp+1); defcompression = COMPRESSION_LZW; } else if (strneq(opt, "zip", 3)) { processZIPOptions(opt); defcompression = COMPRESSION_ADOBE_DEFLATE; } else if (strneq(opt, "lzma", 4)) { processZIPOptions(opt); defcompression = COMPRESSION_LZMA; } else if (strneq(opt, "jbig", 4)) { defcompression = COMPRESSION_JBIG; } else if (strneq(opt, "sgilog", 6)) { defcompression = COMPRESSION_SGILOG; } else return (0); return (1); } char* stuff[] = { "usage: tiffcp [options] input... output", "where options are:", " -a append to output instead of overwriting", " -o offset set initial directory offset", " -p contig pack samples contiguously (e.g. RGBRGB...)", " -p separate store samples separately (e.g. RRR...GGG...BBB...)", " -s write output in strips", " -t write output in tiles", " -x force the merged tiff pages in sequence", " -8 write BigTIFF instead of default ClassicTIFF", " -B write big-endian instead of native byte order", " -L write little-endian instead of native byte order", " -M disable use of memory-mapped files", " -C disable strip chopping", " -i ignore read errors", " -b file[,#] bias (dark) monochrome image to be subtracted from all others", " -,=% use % rather than , to separate image #'s (per Note below)", "", " -r # make each strip have no more than # rows", " -w # set output tile width (pixels)", " -l # set output tile length (pixels)", "", " -f lsb2msb force lsb-to-msb FillOrder for output", " -f msb2lsb force msb-to-lsb FillOrder for output", "", " -c lzw[:opts] compress output with Lempel-Ziv & Welch encoding", " -c zip[:opts] compress output with deflate encoding", " -c lzma[:opts] compress output with LZMA2 encoding", " -c jpeg[:opts] compress output with JPEG encoding", " -c jbig compress output with ISO JBIG encoding", " -c packbits compress output with packbits encoding", " -c g3[:opts] compress output with CCITT Group 3 encoding", " -c g4 compress output with CCITT Group 4 encoding", " -c sgilog compress output with SGILOG encoding", " -c none use no compression algorithm on output", "", "Group 3 options:", " 1d use default CCITT Group 3 1D-encoding", " 2d use optional CCITT Group 3 2D-encoding", " fill byte-align EOL codes", "For example, -c g3:2d:fill to get G3-2D-encoded data with byte-aligned EOLs", "", "JPEG options:", " # set compression quality level (0-100, default 75)", " r output color image as RGB rather than YCbCr", "For example, -c jpeg:r:50 to get JPEG-encoded RGB data with 50% comp. quality", "", "LZW, Deflate (ZIP) and LZMA2 options:", " # set predictor value", " p# set compression level (preset)", "For example, -c lzw:2 to get LZW-encoded data with horizontal differencing,", "-c zip:3:p9 for Deflate encoding with maximum compression level and floating", "point predictor.", "", "Note that input filenames may be of the form filename,x,y,z", "where x, y, and z specify image numbers in the filename to copy.", "example: tiffcp -c none -b esp.tif,1 esp.tif,0 test.tif", " subtract 2nd image in esp.tif from 1st yielding uncompressed result test.tif", NULL }; static void usage(void) { char buf[BUFSIZ]; int i; setbuf(stderr, buf); fprintf(stderr, "%s\n\n", TIFFGetVersion()); for (i = 0; stuff[i] != NULL; i++) fprintf(stderr, "%s\n", stuff[i]); exit(-1); } #define CopyField(tag, v) \ if (TIFFGetField(in, tag, &v)) TIFFSetField(out, tag, v) #define CopyField2(tag, v1, v2) \ if (TIFFGetField(in, tag, &v1, &v2)) TIFFSetField(out, tag, v1, v2) #define CopyField3(tag, v1, v2, v3) \ if (TIFFGetField(in, tag, &v1, &v2, &v3)) TIFFSetField(out, tag, v1, v2, v3) #define CopyField4(tag, v1, v2, v3, v4) \ if (TIFFGetField(in, tag, &v1, &v2, &v3, &v4)) TIFFSetField(out, tag, v1, v2, v3, v4) static void cpTag(TIFF* in, TIFF* out, uint16 tag, uint16 count, TIFFDataType type) { switch (type) { case TIFF_SHORT: if (count == 1) { uint16 shortv; CopyField(tag, shortv); } else if (count == 2) { uint16 shortv1, shortv2; CopyField2(tag, shortv1, shortv2); } else if (count == 4) { uint16 *tr, *tg, *tb, *ta; CopyField4(tag, tr, tg, tb, ta); } else if (count == (uint16) -1) { uint16 shortv1; uint16* shortav; CopyField2(tag, shortv1, shortav); } break; case TIFF_LONG: { uint32 longv; CopyField(tag, longv); } break; case TIFF_RATIONAL: if (count == 1) { float floatv; CopyField(tag, floatv); } else if (count == (uint16) -1) { float* floatav; CopyField(tag, floatav); } break; case TIFF_ASCII: { char* stringv; CopyField(tag, stringv); } break; case TIFF_DOUBLE: if (count == 1) { double doublev; CopyField(tag, doublev); } else if (count == (uint16) -1) { double* doubleav; CopyField(tag, doubleav); } break; default: TIFFError(TIFFFileName(in), "Data type %d is not supported, tag %d skipped.", tag, type); } } static struct cpTag { uint16 tag; uint16 count; TIFFDataType type; } tags[] = { { TIFFTAG_SUBFILETYPE, 1, TIFF_LONG }, { TIFFTAG_THRESHHOLDING, 1, TIFF_SHORT }, { TIFFTAG_DOCUMENTNAME, 1, TIFF_ASCII }, { TIFFTAG_IMAGEDESCRIPTION, 1, TIFF_ASCII }, { TIFFTAG_MAKE, 1, TIFF_ASCII }, { TIFFTAG_MODEL, 1, TIFF_ASCII }, { TIFFTAG_MINSAMPLEVALUE, 1, TIFF_SHORT }, { TIFFTAG_MAXSAMPLEVALUE, 1, TIFF_SHORT }, { TIFFTAG_XRESOLUTION, 1, TIFF_RATIONAL }, { TIFFTAG_YRESOLUTION, 1, TIFF_RATIONAL }, { TIFFTAG_PAGENAME, 1, TIFF_ASCII }, { TIFFTAG_XPOSITION, 1, TIFF_RATIONAL }, { TIFFTAG_YPOSITION, 1, TIFF_RATIONAL }, { TIFFTAG_RESOLUTIONUNIT, 1, TIFF_SHORT }, { TIFFTAG_SOFTWARE, 1, TIFF_ASCII }, { TIFFTAG_DATETIME, 1, TIFF_ASCII }, { TIFFTAG_ARTIST, 1, TIFF_ASCII }, { TIFFTAG_HOSTCOMPUTER, 1, TIFF_ASCII }, { TIFFTAG_WHITEPOINT, (uint16) -1, TIFF_RATIONAL }, { TIFFTAG_PRIMARYCHROMATICITIES,(uint16) -1,TIFF_RATIONAL }, { TIFFTAG_HALFTONEHINTS, 2, TIFF_SHORT }, { TIFFTAG_INKSET, 1, TIFF_SHORT }, { TIFFTAG_DOTRANGE, 2, TIFF_SHORT }, { TIFFTAG_TARGETPRINTER, 1, TIFF_ASCII }, { TIFFTAG_SAMPLEFORMAT, 1, TIFF_SHORT }, { TIFFTAG_YCBCRCOEFFICIENTS, (uint16) -1,TIFF_RATIONAL }, { TIFFTAG_YCBCRSUBSAMPLING, 2, TIFF_SHORT }, { TIFFTAG_YCBCRPOSITIONING, 1, TIFF_SHORT }, { TIFFTAG_REFERENCEBLACKWHITE, (uint16) -1,TIFF_RATIONAL }, { TIFFTAG_EXTRASAMPLES, (uint16) -1, TIFF_SHORT }, { TIFFTAG_SMINSAMPLEVALUE, 1, TIFF_DOUBLE }, { TIFFTAG_SMAXSAMPLEVALUE, 1, TIFF_DOUBLE }, { TIFFTAG_STONITS, 1, TIFF_DOUBLE }, }; #define NTAGS (sizeof (tags) / sizeof (tags[0])) #define CopyTag(tag, count, type) cpTag(in, out, tag, count, type) typedef int (*copyFunc) (TIFF* in, TIFF* out, uint32 l, uint32 w, uint16 samplesperpixel); static copyFunc pickCopyFunc(TIFF*, TIFF*, uint16, uint16); /* PODD */ static int tiffcp(TIFF* in, TIFF* out) { uint16 bitspersample, samplesperpixel = 1; uint16 input_compression, input_photometric = PHOTOMETRIC_MINISBLACK; copyFunc cf; uint32 width, length; struct cpTag* p; CopyField(TIFFTAG_IMAGEWIDTH, width); CopyField(TIFFTAG_IMAGELENGTH, length); CopyField(TIFFTAG_BITSPERSAMPLE, bitspersample); CopyField(TIFFTAG_SAMPLESPERPIXEL, samplesperpixel); if (compression != (uint16)-1) TIFFSetField(out, TIFFTAG_COMPRESSION, compression); else CopyField(TIFFTAG_COMPRESSION, compression); TIFFGetFieldDefaulted(in, TIFFTAG_COMPRESSION, &input_compression); TIFFGetFieldDefaulted(in, TIFFTAG_PHOTOMETRIC, &input_photometric); if (input_compression == COMPRESSION_JPEG) { /* Force conversion to RGB */ TIFFSetField(in, TIFFTAG_JPEGCOLORMODE, JPEGCOLORMODE_RGB); } else if (input_photometric == PHOTOMETRIC_YCBCR) { /* Otherwise, can't handle subsampled input */ uint16 subsamplinghor,subsamplingver; TIFFGetFieldDefaulted(in, TIFFTAG_YCBCRSUBSAMPLING, &subsamplinghor, &subsamplingver); if (subsamplinghor!=1 || subsamplingver!=1) { fprintf(stderr, "tiffcp: %s: Can't copy/convert subsampled image.\n", TIFFFileName(in)); return FALSE; } } if (compression == COMPRESSION_JPEG) { if (input_photometric == PHOTOMETRIC_RGB && jpegcolormode == JPEGCOLORMODE_RGB) TIFFSetField(out, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_YCBCR); else TIFFSetField(out, TIFFTAG_PHOTOMETRIC, input_photometric); } else if (compression == COMPRESSION_SGILOG || compression == COMPRESSION_SGILOG24) TIFFSetField(out, TIFFTAG_PHOTOMETRIC, samplesperpixel == 1 ? PHOTOMETRIC_LOGL : PHOTOMETRIC_LOGLUV); else if (input_compression == COMPRESSION_JPEG && samplesperpixel == 3 ) { /* RGB conversion was forced above hence the output will be of the same type */ TIFFSetField(out, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_RGB); } else CopyTag(TIFFTAG_PHOTOMETRIC, 1, TIFF_SHORT); if (fillorder != 0) TIFFSetField(out, TIFFTAG_FILLORDER, fillorder); else CopyTag(TIFFTAG_FILLORDER, 1, TIFF_SHORT); /* * Will copy `Orientation' tag from input image */ TIFFGetFieldDefaulted(in, TIFFTAG_ORIENTATION, &orientation); switch (orientation) { case ORIENTATION_BOTRIGHT: case ORIENTATION_RIGHTBOT: /* XXX */ TIFFWarning(TIFFFileName(in), "using bottom-left orientation"); orientation = ORIENTATION_BOTLEFT; /* fall thru... */ case ORIENTATION_LEFTBOT: /* XXX */ case ORIENTATION_BOTLEFT: break; case ORIENTATION_TOPRIGHT: case ORIENTATION_RIGHTTOP: /* XXX */ default: TIFFWarning(TIFFFileName(in), "using top-left orientation"); orientation = ORIENTATION_TOPLEFT; /* fall thru... */ case ORIENTATION_LEFTTOP: /* XXX */ case ORIENTATION_TOPLEFT: break; } TIFFSetField(out, TIFFTAG_ORIENTATION, orientation); /* * Choose tiles/strip for the output image according to * the command line arguments (-tiles, -strips) and the * structure of the input image. */ if (outtiled == -1) outtiled = TIFFIsTiled(in); if (outtiled) { /* * Setup output file's tile width&height. If either * is not specified, use either the value from the * input image or, if nothing is defined, use the * library default. */ if (tilewidth == (uint32) -1) TIFFGetField(in, TIFFTAG_TILEWIDTH, &tilewidth); if (tilelength == (uint32) -1) TIFFGetField(in, TIFFTAG_TILELENGTH, &tilelength); TIFFDefaultTileSize(out, &tilewidth, &tilelength); TIFFSetField(out, TIFFTAG_TILEWIDTH, tilewidth); TIFFSetField(out, TIFFTAG_TILELENGTH, tilelength); } else { /* * RowsPerStrip is left unspecified: use either the * value from the input image or, if nothing is defined, * use the library default. */ if (rowsperstrip == (uint32) 0) { if (!TIFFGetField(in, TIFFTAG_ROWSPERSTRIP, &rowsperstrip)) { rowsperstrip = TIFFDefaultStripSize(out, rowsperstrip); } if (rowsperstrip > length && rowsperstrip != (uint32)-1) rowsperstrip = length; } else if (rowsperstrip == (uint32) -1) rowsperstrip = length; TIFFSetField(out, TIFFTAG_ROWSPERSTRIP, rowsperstrip); } if (config != (uint16) -1) TIFFSetField(out, TIFFTAG_PLANARCONFIG, config); else CopyField(TIFFTAG_PLANARCONFIG, config); if (samplesperpixel <= 4) CopyTag(TIFFTAG_TRANSFERFUNCTION, 4, TIFF_SHORT); CopyTag(TIFFTAG_COLORMAP, 4, TIFF_SHORT); /* SMinSampleValue & SMaxSampleValue */ switch (compression) { case COMPRESSION_JPEG: TIFFSetField(out, TIFFTAG_JPEGQUALITY, quality); TIFFSetField(out, TIFFTAG_JPEGCOLORMODE, jpegcolormode); break; case COMPRESSION_JBIG: CopyTag(TIFFTAG_FAXRECVPARAMS, 1, TIFF_LONG); CopyTag(TIFFTAG_FAXRECVTIME, 1, TIFF_LONG); CopyTag(TIFFTAG_FAXSUBADDRESS, 1, TIFF_ASCII); CopyTag(TIFFTAG_FAXDCS, 1, TIFF_ASCII); break; case COMPRESSION_LZW: case COMPRESSION_ADOBE_DEFLATE: case COMPRESSION_DEFLATE: case COMPRESSION_LZMA: if (predictor != (uint16)-1) TIFFSetField(out, TIFFTAG_PREDICTOR, predictor); else CopyField(TIFFTAG_PREDICTOR, predictor); if (preset != -1) { if (compression == COMPRESSION_ADOBE_DEFLATE || compression == COMPRESSION_DEFLATE) TIFFSetField(out, TIFFTAG_ZIPQUALITY, preset); else if (compression == COMPRESSION_LZMA) TIFFSetField(out, TIFFTAG_LZMAPRESET, preset); } break; case COMPRESSION_CCITTFAX3: case COMPRESSION_CCITTFAX4: if (compression == COMPRESSION_CCITTFAX3) { if (g3opts != (uint32) -1) TIFFSetField(out, TIFFTAG_GROUP3OPTIONS, g3opts); else CopyField(TIFFTAG_GROUP3OPTIONS, g3opts); } else CopyTag(TIFFTAG_GROUP4OPTIONS, 1, TIFF_LONG); CopyTag(TIFFTAG_BADFAXLINES, 1, TIFF_LONG); CopyTag(TIFFTAG_CLEANFAXDATA, 1, TIFF_LONG); CopyTag(TIFFTAG_CONSECUTIVEBADFAXLINES, 1, TIFF_LONG); CopyTag(TIFFTAG_FAXRECVPARAMS, 1, TIFF_LONG); CopyTag(TIFFTAG_FAXRECVTIME, 1, TIFF_LONG); CopyTag(TIFFTAG_FAXSUBADDRESS, 1, TIFF_ASCII); break; } { uint32 len32; void** data; if (TIFFGetField(in, TIFFTAG_ICCPROFILE, &len32, &data)) TIFFSetField(out, TIFFTAG_ICCPROFILE, len32, data); } { uint16 ninks; const char* inknames; if (TIFFGetField(in, TIFFTAG_NUMBEROFINKS, &ninks)) { TIFFSetField(out, TIFFTAG_NUMBEROFINKS, ninks); if (TIFFGetField(in, TIFFTAG_INKNAMES, &inknames)) { int inknameslen = strlen(inknames) + 1; const char* cp = inknames; while (ninks > 1) { cp = strchr(cp, '\0'); cp++; inknameslen += (strlen(cp) + 1); ninks--; } TIFFSetField(out, TIFFTAG_INKNAMES, inknameslen, inknames); } } } { unsigned short pg0, pg1; if (pageInSeq == 1) { if (pageNum < 0) /* only one input file */ { if (TIFFGetField(in, TIFFTAG_PAGENUMBER, &pg0, &pg1)) TIFFSetField(out, TIFFTAG_PAGENUMBER, pg0, pg1); } else TIFFSetField(out, TIFFTAG_PAGENUMBER, pageNum++, 0); } else { if (TIFFGetField(in, TIFFTAG_PAGENUMBER, &pg0, &pg1)) { if (pageNum < 0) /* only one input file */ TIFFSetField(out, TIFFTAG_PAGENUMBER, pg0, pg1); else TIFFSetField(out, TIFFTAG_PAGENUMBER, pageNum++, 0); } } } for (p = tags; p < &tags[NTAGS]; p++) CopyTag(p->tag, p->count, p->type); cf = pickCopyFunc(in, out, bitspersample, samplesperpixel); return (cf ? (*cf)(in, out, length, width, samplesperpixel) : FALSE); } /* * Copy Functions. */ #define DECLAREcpFunc(x) \ static int x(TIFF* in, TIFF* out, \ uint32 imagelength, uint32 imagewidth, tsample_t spp) #define DECLAREreadFunc(x) \ static int x(TIFF* in, \ uint8* buf, uint32 imagelength, uint32 imagewidth, tsample_t spp) typedef int (*readFunc)(TIFF*, uint8*, uint32, uint32, tsample_t); #define DECLAREwriteFunc(x) \ static int x(TIFF* out, \ uint8* buf, uint32 imagelength, uint32 imagewidth, tsample_t spp) typedef int (*writeFunc)(TIFF*, uint8*, uint32, uint32, tsample_t); /* * Contig -> contig by scanline for rows/strip change. */ DECLAREcpFunc(cpContig2ContigByRow) { tsize_t scanlinesize = TIFFScanlineSize(in); tdata_t buf; uint32 row; buf = _TIFFmalloc(scanlinesize); if (!buf) return 0; _TIFFmemset(buf, 0, scanlinesize); (void) imagewidth; (void) spp; for (row = 0; row < imagelength; row++) { if (TIFFReadScanline(in, buf, row, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); goto bad; } if (TIFFWriteScanline(out, buf, row, 0) < 0) { TIFFError(TIFFFileName(out), "Error, can't write scanline %lu", (unsigned long) row); goto bad; } } _TIFFfree(buf); return 1; bad: _TIFFfree(buf); return 0; } typedef void biasFn (void *image, void *bias, uint32 pixels); #define subtract(bits) \ static void subtract##bits (void *i, void *b, uint32 pixels)\ {\ uint##bits *image = i;\ uint##bits *bias = b;\ while (pixels--) {\ *image = *image > *bias ? *image-*bias : 0;\ image++, bias++; \ } \ } subtract(8) subtract(16) subtract(32) static biasFn *lineSubtractFn (unsigned bits) { switch (bits) { case 8: return subtract8; case 16: return subtract16; case 32: return subtract32; } return NULL; } /* * Contig -> contig by scanline while subtracting a bias image. */ DECLAREcpFunc(cpBiasedContig2Contig) { if (spp == 1) { tsize_t biasSize = TIFFScanlineSize(bias); tsize_t bufSize = TIFFScanlineSize(in); tdata_t buf, biasBuf; uint32 biasWidth = 0, biasLength = 0; TIFFGetField(bias, TIFFTAG_IMAGEWIDTH, &biasWidth); TIFFGetField(bias, TIFFTAG_IMAGELENGTH, &biasLength); if (biasSize == bufSize && imagelength == biasLength && imagewidth == biasWidth) { uint16 sampleBits = 0; biasFn *subtractLine; TIFFGetField(in, TIFFTAG_BITSPERSAMPLE, &sampleBits); subtractLine = lineSubtractFn (sampleBits); if (subtractLine) { uint32 row; buf = _TIFFmalloc(bufSize); biasBuf = _TIFFmalloc(bufSize); for (row = 0; row < imagelength; row++) { if (TIFFReadScanline(in, buf, row, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); goto bad; } if (TIFFReadScanline(bias, biasBuf, row, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read biased scanline %lu", (unsigned long) row); goto bad; } subtractLine (buf, biasBuf, imagewidth); if (TIFFWriteScanline(out, buf, row, 0) < 0) { TIFFError(TIFFFileName(out), "Error, can't write scanline %lu", (unsigned long) row); goto bad; } } _TIFFfree(buf); _TIFFfree(biasBuf); TIFFSetDirectory(bias, TIFFCurrentDirectory(bias)); /* rewind */ return 1; bad: _TIFFfree(buf); _TIFFfree(biasBuf); return 0; } else { TIFFError(TIFFFileName(in), "No support for biasing %d bit pixels\n", sampleBits); return 0; } } TIFFError(TIFFFileName(in), "Bias image %s,%d\nis not the same size as %s,%d\n", TIFFFileName(bias), TIFFCurrentDirectory(bias), TIFFFileName(in), TIFFCurrentDirectory(in)); return 0; } else { TIFFError(TIFFFileName(in), "Can't bias %s,%d as it has >1 Sample/Pixel\n", TIFFFileName(in), TIFFCurrentDirectory(in)); return 0; } } /* * Strip -> strip for change in encoding. */ DECLAREcpFunc(cpDecodedStrips) { tsize_t stripsize = TIFFStripSize(in); tdata_t buf = _TIFFmalloc(stripsize); (void) imagewidth; (void) spp; if (buf) { tstrip_t s, ns = TIFFNumberOfStrips(in); uint32 row = 0; _TIFFmemset(buf, 0, stripsize); for (s = 0; s < ns && row < imagelength; s++) { tsize_t cc = (row + rowsperstrip > imagelength) ? TIFFVStripSize(in, imagelength - row) : stripsize; if (TIFFReadEncodedStrip(in, s, buf, cc) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read strip %lu", (unsigned long) s); goto bad; } if (TIFFWriteEncodedStrip(out, s, buf, cc) < 0) { TIFFError(TIFFFileName(out), "Error, can't write strip %lu", (unsigned long) s); goto bad; } row += rowsperstrip; } _TIFFfree(buf); return 1; } else { TIFFError(TIFFFileName(in), "Error, can't allocate memory buffer of size %lu " "to read strips", (unsigned long) stripsize); return 0; } bad: _TIFFfree(buf); return 0; } /* * Separate -> separate by row for rows/strip change. */ DECLAREcpFunc(cpSeparate2SeparateByRow) { tsize_t scanlinesize = TIFFScanlineSize(in); tdata_t buf; uint32 row; tsample_t s; (void) imagewidth; buf = _TIFFmalloc(scanlinesize); if (!buf) return 0; _TIFFmemset(buf, 0, scanlinesize); for (s = 0; s < spp; s++) { for (row = 0; row < imagelength; row++) { if (TIFFReadScanline(in, buf, row, s) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); goto bad; } if (TIFFWriteScanline(out, buf, row, s) < 0) { TIFFError(TIFFFileName(out), "Error, can't write scanline %lu", (unsigned long) row); goto bad; } } } _TIFFfree(buf); return 1; bad: _TIFFfree(buf); return 0; } /* * Contig -> separate by row. */ DECLAREcpFunc(cpContig2SeparateByRow) { tsize_t scanlinesizein = TIFFScanlineSize(in); tsize_t scanlinesizeout = TIFFScanlineSize(out); tdata_t inbuf; tdata_t outbuf; register uint8 *inp, *outp; register uint32 n; uint32 row; tsample_t s; inbuf = _TIFFmalloc(scanlinesizein); outbuf = _TIFFmalloc(scanlinesizeout); if (!inbuf || !outbuf) goto bad; _TIFFmemset(inbuf, 0, scanlinesizein); _TIFFmemset(outbuf, 0, scanlinesizeout); /* unpack channels */ for (s = 0; s < spp; s++) { for (row = 0; row < imagelength; row++) { if (TIFFReadScanline(in, inbuf, row, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); goto bad; } inp = ((uint8*)inbuf) + s; outp = (uint8*)outbuf; for (n = imagewidth; n-- > 0;) { *outp++ = *inp; inp += spp; } if (TIFFWriteScanline(out, outbuf, row, s) < 0) { TIFFError(TIFFFileName(out), "Error, can't write scanline %lu", (unsigned long) row); goto bad; } } } if (inbuf) _TIFFfree(inbuf); if (outbuf) _TIFFfree(outbuf); return 1; bad: if (inbuf) _TIFFfree(inbuf); if (outbuf) _TIFFfree(outbuf); return 0; } /* * Separate -> contig by row. */ DECLAREcpFunc(cpSeparate2ContigByRow) { tsize_t scanlinesizein = TIFFScanlineSize(in); tsize_t scanlinesizeout = TIFFScanlineSize(out); tdata_t inbuf; tdata_t outbuf; register uint8 *inp, *outp; register uint32 n; uint32 row; tsample_t s; inbuf = _TIFFmalloc(scanlinesizein); outbuf = _TIFFmalloc(scanlinesizeout); if (!inbuf || !outbuf) goto bad; _TIFFmemset(inbuf, 0, scanlinesizein); _TIFFmemset(outbuf, 0, scanlinesizeout); for (row = 0; row < imagelength; row++) { /* merge channels */ for (s = 0; s < spp; s++) { if (TIFFReadScanline(in, inbuf, row, s) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); goto bad; } inp = (uint8*)inbuf; outp = ((uint8*)outbuf) + s; for (n = imagewidth; n-- > 0;) { *outp = *inp++; outp += spp; } } if (TIFFWriteScanline(out, outbuf, row, 0) < 0) { TIFFError(TIFFFileName(out), "Error, can't write scanline %lu", (unsigned long) row); goto bad; } } if (inbuf) _TIFFfree(inbuf); if (outbuf) _TIFFfree(outbuf); return 1; bad: if (inbuf) _TIFFfree(inbuf); if (outbuf) _TIFFfree(outbuf); return 0; } static void cpStripToTile(uint8* out, uint8* in, uint32 rows, uint32 cols, int outskew, int64 inskew) { while (rows-- > 0) { uint32 j = cols; while (j-- > 0) *out++ = *in++; out += outskew; in += inskew; } } static void cpContigBufToSeparateBuf(uint8* out, uint8* in, uint32 rows, uint32 cols, int outskew, int inskew, tsample_t spp, int bytes_per_sample ) { while (rows-- > 0) { uint32 j = cols; while (j-- > 0) { int n = bytes_per_sample; while( n-- ) { *out++ = *in++; } in += (spp-1) * bytes_per_sample; } out += outskew; in += inskew; } } static void cpSeparateBufToContigBuf(uint8* out, uint8* in, uint32 rows, uint32 cols, int outskew, int inskew, tsample_t spp, int bytes_per_sample) { while (rows-- > 0) { uint32 j = cols; while (j-- > 0) { int n = bytes_per_sample; while( n-- ) { *out++ = *in++; } out += (spp-1)*bytes_per_sample; } out += outskew; in += inskew; } } static int cpImage(TIFF* in, TIFF* out, readFunc fin, writeFunc fout, uint32 imagelength, uint32 imagewidth, tsample_t spp) { int status = 0; tdata_t buf = NULL; tsize_t scanlinesize = TIFFRasterScanlineSize(in); tsize_t bytes = scanlinesize * (tsize_t)imagelength; /* * XXX: Check for integer overflow. */ if (scanlinesize && imagelength && bytes / (tsize_t)imagelength == scanlinesize) { buf = _TIFFmalloc(bytes); if (buf) { if ((*fin)(in, (uint8*)buf, imagelength, imagewidth, spp)) { status = (*fout)(out, (uint8*)buf, imagelength, imagewidth, spp); } _TIFFfree(buf); } else { TIFFError(TIFFFileName(in), "Error, can't allocate space for image buffer"); } } else { TIFFError(TIFFFileName(in), "Error, no space for image buffer"); } return status; } DECLAREreadFunc(readContigStripsIntoBuffer) { tsize_t scanlinesize = TIFFScanlineSize(in); uint8* bufp = buf; uint32 row; (void) imagewidth; (void) spp; for (row = 0; row < imagelength; row++) { if (TIFFReadScanline(in, (tdata_t) bufp, row, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); return 0; } bufp += scanlinesize; } return 1; } DECLAREreadFunc(readSeparateStripsIntoBuffer) { int status = 1; tsize_t scanlinesize = TIFFScanlineSize(in); tdata_t scanline; if (!scanlinesize) return 0; scanline = _TIFFmalloc(scanlinesize); if (!scanline) return 0; _TIFFmemset(scanline, 0, scanlinesize); (void) imagewidth; if (scanline) { uint8* bufp = (uint8*) buf; uint32 row; tsample_t s; for (row = 0; row < imagelength; row++) { /* merge channels */ for (s = 0; s < spp; s++) { uint8* bp = bufp + s; tsize_t n = scanlinesize; uint8* sbuf = scanline; if (TIFFReadScanline(in, scanline, row, s) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); status = 0; goto done; } while (n-- > 0) *bp = *sbuf++, bp += spp; } bufp += scanlinesize * spp; } } done: _TIFFfree(scanline); return status; } DECLAREreadFunc(readContigTilesIntoBuffer) { int status = 1; tsize_t tilesize = TIFFTileSize(in); tdata_t tilebuf; uint32 imagew = TIFFScanlineSize(in); uint32 tilew = TIFFTileRowSize(in); int64 iskew = (int64)imagew - (int64)tilew; uint8* bufp = (uint8*) buf; uint32 tw, tl; uint32 row; (void) spp; tilebuf = _TIFFmalloc(tilesize); if (tilebuf == 0) return 0; _TIFFmemset(tilebuf, 0, tilesize); (void) TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw); (void) TIFFGetField(in, TIFFTAG_TILELENGTH, &tl); for (row = 0; row < imagelength; row += tl) { uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl; uint32 colb = 0; uint32 col; for (col = 0; col < imagewidth && colb < imagew; col += tw) { if (TIFFReadTile(in, tilebuf, col, row, 0, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read tile at %lu %lu", (unsigned long) col, (unsigned long) row); status = 0; goto done; } if (colb > iskew) { uint32 width = imagew - colb; uint32 oskew = tilew - width; cpStripToTile(bufp + colb, tilebuf, nrow, width, oskew + iskew, oskew ); } else cpStripToTile(bufp + colb, tilebuf, nrow, tilew, iskew, 0); colb += tilew; } bufp += imagew * nrow; } done: _TIFFfree(tilebuf); return status; } DECLAREreadFunc(readSeparateTilesIntoBuffer) { int status = 1; uint32 imagew = TIFFRasterScanlineSize(in); uint32 tilew = TIFFTileRowSize(in); int iskew = imagew - tilew*spp; tsize_t tilesize = TIFFTileSize(in); tdata_t tilebuf; uint8* bufp = (uint8*) buf; uint32 tw, tl; uint32 row; uint16 bps = 0, bytes_per_sample; tilebuf = _TIFFmalloc(tilesize); if (tilebuf == 0) return 0; _TIFFmemset(tilebuf, 0, tilesize); (void) TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw); (void) TIFFGetField(in, TIFFTAG_TILELENGTH, &tl); (void) TIFFGetField(in, TIFFTAG_BITSPERSAMPLE, &bps); if( bps == 0 ) { TIFFError(TIFFFileName(in), "Error, cannot read BitsPerSample"); status = 0; goto done; } assert( bps % 8 == 0 ); bytes_per_sample = bps/8; for (row = 0; row < imagelength; row += tl) { uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl; uint32 colb = 0; uint32 col; for (col = 0; col < imagewidth; col += tw) { tsample_t s; for (s = 0; s < spp; s++) { if (TIFFReadTile(in, tilebuf, col, row, 0, s) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read tile at %lu %lu, " "sample %lu", (unsigned long) col, (unsigned long) row, (unsigned long) s); status = 0; goto done; } /* * Tile is clipped horizontally. Calculate * visible portion and skewing factors. */ if (colb + tilew*spp > imagew) { uint32 width = imagew - colb; int oskew = tilew*spp - width; cpSeparateBufToContigBuf( bufp+colb+s*bytes_per_sample, tilebuf, nrow, width/(spp*bytes_per_sample), oskew + iskew, oskew/spp, spp, bytes_per_sample); } else cpSeparateBufToContigBuf( bufp+colb+s*bytes_per_sample, tilebuf, nrow, tw, iskew, 0, spp, bytes_per_sample); } colb += tilew*spp; } bufp += imagew * nrow; } done: _TIFFfree(tilebuf); return status; } DECLAREwriteFunc(writeBufferToContigStrips) { uint32 row, rowsperstrip; tstrip_t strip = 0; (void) imagewidth; (void) spp; (void) TIFFGetFieldDefaulted(out, TIFFTAG_ROWSPERSTRIP, &rowsperstrip); for (row = 0; row < imagelength; row += rowsperstrip) { uint32 nrows = (row+rowsperstrip > imagelength) ? imagelength-row : rowsperstrip; tsize_t stripsize = TIFFVStripSize(out, nrows); if (TIFFWriteEncodedStrip(out, strip++, buf, stripsize) < 0) { TIFFError(TIFFFileName(out), "Error, can't write strip %u", strip - 1); return 0; } buf += stripsize; } return 1; } DECLAREwriteFunc(writeBufferToSeparateStrips) { uint32 rowsize = imagewidth * spp; uint32 rowsperstrip; tsize_t stripsize = TIFFStripSize(out); tdata_t obuf; tstrip_t strip = 0; tsample_t s; obuf = _TIFFmalloc(stripsize); if (obuf == NULL) return (0); _TIFFmemset(obuf, 0, stripsize); (void) TIFFGetFieldDefaulted(out, TIFFTAG_ROWSPERSTRIP, &rowsperstrip); for (s = 0; s < spp; s++) { uint32 row; for (row = 0; row < imagelength; row += rowsperstrip) { uint32 nrows = (row+rowsperstrip > imagelength) ? imagelength-row : rowsperstrip; tsize_t stripsize = TIFFVStripSize(out, nrows); cpContigBufToSeparateBuf( obuf, (uint8*) buf + row*rowsize + s, nrows, imagewidth, 0, 0, spp, 1); if (TIFFWriteEncodedStrip(out, strip++, obuf, stripsize) < 0) { TIFFError(TIFFFileName(out), "Error, can't write strip %u", strip - 1); _TIFFfree(obuf); return 0; } } } _TIFFfree(obuf); return 1; } DECLAREwriteFunc(writeBufferToContigTiles) { uint32 imagew = TIFFScanlineSize(out); uint32 tilew = TIFFTileRowSize(out); int iskew = imagew - tilew; tsize_t tilesize = TIFFTileSize(out); tdata_t obuf; uint8* bufp = (uint8*) buf; uint32 tl, tw; uint32 row; (void) spp; obuf = _TIFFmalloc(TIFFTileSize(out)); if (obuf == NULL) return 0; _TIFFmemset(obuf, 0, tilesize); (void) TIFFGetField(out, TIFFTAG_TILELENGTH, &tl); (void) TIFFGetField(out, TIFFTAG_TILEWIDTH, &tw); for (row = 0; row < imagelength; row += tilelength) { uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl; uint32 colb = 0; uint32 col; for (col = 0; col < imagewidth && colb < imagew; col += tw) { /* * Tile is clipped horizontally. Calculate * visible portion and skewing factors. */ if (colb + tilew > imagew) { uint32 width = imagew - colb; int oskew = tilew - width; cpStripToTile(obuf, bufp + colb, nrow, width, oskew, oskew + iskew); } else cpStripToTile(obuf, bufp + colb, nrow, tilew, 0, iskew); if (TIFFWriteTile(out, obuf, col, row, 0, 0) < 0) { TIFFError(TIFFFileName(out), "Error, can't write tile at %lu %lu", (unsigned long) col, (unsigned long) row); _TIFFfree(obuf); return 0; } colb += tilew; } bufp += nrow * imagew; } _TIFFfree(obuf); return 1; } DECLAREwriteFunc(writeBufferToSeparateTiles) { uint32 imagew = TIFFScanlineSize(out); tsize_t tilew = TIFFTileRowSize(out); uint32 iimagew = TIFFRasterScanlineSize(out); int iskew = iimagew - tilew*spp; tsize_t tilesize = TIFFTileSize(out); tdata_t obuf; uint8* bufp = (uint8*) buf; uint32 tl, tw; uint32 row; uint16 bps = 0, bytes_per_sample; obuf = _TIFFmalloc(TIFFTileSize(out)); if (obuf == NULL) return 0; _TIFFmemset(obuf, 0, tilesize); (void) TIFFGetField(out, TIFFTAG_TILELENGTH, &tl); (void) TIFFGetField(out, TIFFTAG_TILEWIDTH, &tw); (void) TIFFGetField(out, TIFFTAG_BITSPERSAMPLE, &bps); if( bps == 0 ) { TIFFError(TIFFFileName(out), "Error, cannot read BitsPerSample"); _TIFFfree(obuf); return 0; } assert( bps % 8 == 0 ); bytes_per_sample = bps/8; for (row = 0; row < imagelength; row += tl) { uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl; uint32 colb = 0; uint32 col; for (col = 0; col < imagewidth; col += tw) { tsample_t s; for (s = 0; s < spp; s++) { /* * Tile is clipped horizontally. Calculate * visible portion and skewing factors. */ if (colb + tilew > imagew) { uint32 width = (imagew - colb); int oskew = tilew - width; cpContigBufToSeparateBuf(obuf, bufp + (colb*spp) + s, nrow, width/bytes_per_sample, oskew, (oskew*spp)+iskew, spp, bytes_per_sample); } else cpContigBufToSeparateBuf(obuf, bufp + (colb*spp) + s, nrow, tilewidth, 0, iskew, spp, bytes_per_sample); if (TIFFWriteTile(out, obuf, col, row, 0, s) < 0) { TIFFError(TIFFFileName(out), "Error, can't write tile at %lu %lu " "sample %lu", (unsigned long) col, (unsigned long) row, (unsigned long) s); _TIFFfree(obuf); return 0; } } colb += tilew; } bufp += nrow * iimagew; } _TIFFfree(obuf); return 1; } /* * Contig strips -> contig tiles. */ DECLAREcpFunc(cpContigStrips2ContigTiles) { return cpImage(in, out, readContigStripsIntoBuffer, writeBufferToContigTiles, imagelength, imagewidth, spp); } /* * Contig strips -> separate tiles. */ DECLAREcpFunc(cpContigStrips2SeparateTiles) { return cpImage(in, out, readContigStripsIntoBuffer, writeBufferToSeparateTiles, imagelength, imagewidth, spp); } /* * Separate strips -> contig tiles. */ DECLAREcpFunc(cpSeparateStrips2ContigTiles) { return cpImage(in, out, readSeparateStripsIntoBuffer, writeBufferToContigTiles, imagelength, imagewidth, spp); } /* * Separate strips -> separate tiles. */ DECLAREcpFunc(cpSeparateStrips2SeparateTiles) { return cpImage(in, out, readSeparateStripsIntoBuffer, writeBufferToSeparateTiles, imagelength, imagewidth, spp); } /* * Contig strips -> contig tiles. */ DECLAREcpFunc(cpContigTiles2ContigTiles) { return cpImage(in, out, readContigTilesIntoBuffer, writeBufferToContigTiles, imagelength, imagewidth, spp); } /* * Contig tiles -> separate tiles. */ DECLAREcpFunc(cpContigTiles2SeparateTiles) { return cpImage(in, out, readContigTilesIntoBuffer, writeBufferToSeparateTiles, imagelength, imagewidth, spp); } /* * Separate tiles -> contig tiles. */ DECLAREcpFunc(cpSeparateTiles2ContigTiles) { return cpImage(in, out, readSeparateTilesIntoBuffer, writeBufferToContigTiles, imagelength, imagewidth, spp); } /* * Separate tiles -> separate tiles (tile dimension change). */ DECLAREcpFunc(cpSeparateTiles2SeparateTiles) { return cpImage(in, out, readSeparateTilesIntoBuffer, writeBufferToSeparateTiles, imagelength, imagewidth, spp); } /* * Contig tiles -> contig tiles (tile dimension change). */ DECLAREcpFunc(cpContigTiles2ContigStrips) { return cpImage(in, out, readContigTilesIntoBuffer, writeBufferToContigStrips, imagelength, imagewidth, spp); } /* * Contig tiles -> separate strips. */ DECLAREcpFunc(cpContigTiles2SeparateStrips) { return cpImage(in, out, readContigTilesIntoBuffer, writeBufferToSeparateStrips, imagelength, imagewidth, spp); } /* * Separate tiles -> contig strips. */ DECLAREcpFunc(cpSeparateTiles2ContigStrips) { return cpImage(in, out, readSeparateTilesIntoBuffer, writeBufferToContigStrips, imagelength, imagewidth, spp); } /* * Separate tiles -> separate strips. */ DECLAREcpFunc(cpSeparateTiles2SeparateStrips) { return cpImage(in, out, readSeparateTilesIntoBuffer, writeBufferToSeparateStrips, imagelength, imagewidth, spp); } /* * Select the appropriate copy function to use. */ static copyFunc pickCopyFunc(TIFF* in, TIFF* out, uint16 bitspersample, uint16 samplesperpixel) { uint16 shortv; uint32 w, l, tw, tl; int bychunk; (void) TIFFGetField(in, TIFFTAG_PLANARCONFIG, &shortv); if (shortv != config && bitspersample != 8 && samplesperpixel > 1) { fprintf(stderr, "%s: Cannot handle different planar configuration w/ bits/sample != 8\n", TIFFFileName(in)); return (NULL); } TIFFGetField(in, TIFFTAG_IMAGEWIDTH, &w); TIFFGetField(in, TIFFTAG_IMAGELENGTH, &l); if (!(TIFFIsTiled(out) || TIFFIsTiled(in))) { uint32 irps = (uint32) -1L; TIFFGetField(in, TIFFTAG_ROWSPERSTRIP, &irps); /* if biased, force decoded copying to allow image subtraction */ bychunk = !bias && (rowsperstrip == irps); }else{ /* either in or out is tiled */ if (bias) { fprintf(stderr, "%s: Cannot handle tiled configuration w/bias image\n", TIFFFileName(in)); return (NULL); } if (TIFFIsTiled(out)) { if (!TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw)) tw = w; if (!TIFFGetField(in, TIFFTAG_TILELENGTH, &tl)) tl = l; bychunk = (tw == tilewidth && tl == tilelength); } else { /* out's not, so in must be tiled */ TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw); TIFFGetField(in, TIFFTAG_TILELENGTH, &tl); bychunk = (tw == w && tl == rowsperstrip); } } #define T 1 #define F 0 #define pack(a,b,c,d,e) ((long)(((a)<<11)|((b)<<3)|((c)<<2)|((d)<<1)|(e))) switch(pack(shortv,config,TIFFIsTiled(in),TIFFIsTiled(out),bychunk)) { /* Strips -> Tiles */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,T,T): return cpContigStrips2ContigTiles; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,T,T): return cpContigStrips2SeparateTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,T,T): return cpSeparateStrips2ContigTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,T,T): return cpSeparateStrips2SeparateTiles; /* Tiles -> Tiles */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,T,T): return cpContigTiles2ContigTiles; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,T,T): return cpContigTiles2SeparateTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,T,T): return cpSeparateTiles2ContigTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,T,T): return cpSeparateTiles2SeparateTiles; /* Tiles -> Strips */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,F,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,F,T): return cpContigTiles2ContigStrips; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,F,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,F,T): return cpContigTiles2SeparateStrips; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,F,T): return cpSeparateTiles2ContigStrips; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,F,T): return cpSeparateTiles2SeparateStrips; /* Strips -> Strips */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,F,F): return bias ? cpBiasedContig2Contig : cpContig2ContigByRow; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,F,T): return cpDecodedStrips; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,F,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,F,T): return cpContig2SeparateByRow; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,F,T): return cpSeparate2ContigByRow; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,F,T): return cpSeparate2SeparateByRow; } #undef pack #undef F #undef T fprintf(stderr, "tiffcp: %s: Don't know how to copy/convert image.\n", TIFFFileName(in)); return (NULL); } /* vim: set ts=8 sts=8 sw=8 noet: */ /* * Local Variables: * mode: c * c-basic-offset: 8 * fill-column: 78 * End: */
DECLAREreadFunc(readContigTilesIntoBuffer) { int status = 1; tsize_t tilesize = TIFFTileSize(in); tdata_t tilebuf; uint32 imagew = TIFFScanlineSize(in); uint32 tilew = TIFFTileRowSize(in); int iskew = imagew - tilew; uint8* bufp = (uint8*) buf; uint32 tw, tl; uint32 row; (void) spp; tilebuf = _TIFFmalloc(tilesize); if (tilebuf == 0) return 0; _TIFFmemset(tilebuf, 0, tilesize); (void) TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw); (void) TIFFGetField(in, TIFFTAG_TILELENGTH, &tl); for (row = 0; row < imagelength; row += tl) { uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl; uint32 colb = 0; uint32 col; for (col = 0; col < imagewidth && colb < imagew; col += tw) { if (TIFFReadTile(in, tilebuf, col, row, 0, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read tile at %lu %lu", (unsigned long) col, (unsigned long) row); status = 0; goto done; } if (colb + tilew > imagew) { uint32 width = imagew - colb; uint32 oskew = tilew - width; cpStripToTile(bufp + colb, tilebuf, nrow, width, oskew + iskew, oskew ); } else cpStripToTile(bufp + colb, tilebuf, nrow, tilew, iskew, 0); colb += tilew; } bufp += imagew * nrow; } done: _TIFFfree(tilebuf); return status; }
DECLAREreadFunc(readContigTilesIntoBuffer) { int status = 1; tsize_t tilesize = TIFFTileSize(in); tdata_t tilebuf; uint32 imagew = TIFFScanlineSize(in); uint32 tilew = TIFFTileRowSize(in); int64 iskew = (int64)imagew - (int64)tilew; uint8* bufp = (uint8*) buf; uint32 tw, tl; uint32 row; (void) spp; tilebuf = _TIFFmalloc(tilesize); if (tilebuf == 0) return 0; _TIFFmemset(tilebuf, 0, tilesize); (void) TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw); (void) TIFFGetField(in, TIFFTAG_TILELENGTH, &tl); for (row = 0; row < imagelength; row += tl) { uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl; uint32 colb = 0; uint32 col; for (col = 0; col < imagewidth && colb < imagew; col += tw) { if (TIFFReadTile(in, tilebuf, col, row, 0, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read tile at %lu %lu", (unsigned long) col, (unsigned long) row); status = 0; goto done; } if (colb > iskew) { uint32 width = imagew - colb; uint32 oskew = tilew - width; cpStripToTile(bufp + colb, tilebuf, nrow, width, oskew + iskew, oskew ); } else cpStripToTile(bufp + colb, tilebuf, nrow, tilew, iskew, 0); colb += tilew; } bufp += imagew * nrow; } done: _TIFFfree(tilebuf); return status; }
{'added': [(1166, ' uint32 rows, uint32 cols, int outskew, int64 inskew)'), (1323, '\tint64 iskew = (int64)imagew - (int64)tilew;'), (1351, '\t\t\tif (colb > iskew) {')], 'deleted': [(1166, ' uint32 rows, uint32 cols, int outskew, int inskew)'), (1323, '\tint iskew = imagew - tilew;'), (1351, '\t\t\tif (colb + tilew > imagew) {')]}
3
3
1,550
9,045
50
314
9
https://github.com/vadz/libtiff
CVE-2016-10093
CWE-119
793
jsparse.c
C++
jspGetNamedFieldInParents
/* * This file is part of Espruino, a JavaScript interpreter for Microcontrollers * * Copyright (C) 2013 Gordon Williams <gw@pur3.co.uk> * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * ---------------------------------------------------------------------------- * Recursive descent parser for code execution * ---------------------------------------------------------------------------- */ #include "jsparse.h" #include "jsinteractive.h" #include "jswrapper.h" #include "jsnative.h" #include "jswrap_object.h" // for function_replacewith #include "jswrap_functions.h" // insane check for eval in jspeFunctionCall #include "jswrap_json.h" // for jsfPrintJSON #include "jswrap_espruino.h" // for jswrap_espruino_memoryArea #ifndef SAVE_ON_FLASH #include "jswrap_regexp.h" // for jswrap_regexp_constructor #endif /* Info about execution when Parsing - this saves passing it on the stack * for each call */ JsExecInfo execInfo; // ----------------------------------------------- Forward decls JsVar *jspeAssignmentExpression(); JsVar *jspeExpression(); JsVar *jspeUnaryExpression(); void jspeBlock(); void jspeBlockNoBrackets(); JsVar *jspeStatement(); JsVar *jspeFactor(); void jspEnsureIsPrototype(JsVar *instanceOf, JsVar *prototypeName); #ifndef SAVE_ON_FLASH JsVar *jspeArrowFunction(JsVar *funcVar, JsVar *a); #endif // ----------------------------------------------- Utils #define JSP_MATCH_WITH_CLEANUP_AND_RETURN(TOKEN, CLEANUP_CODE, RETURN_VAL) { if (!jslMatch((TOKEN))) { CLEANUP_CODE; return RETURN_VAL; } } #define JSP_MATCH_WITH_RETURN(TOKEN, RETURN_VAL) JSP_MATCH_WITH_CLEANUP_AND_RETURN(TOKEN, , RETURN_VAL) #define JSP_MATCH(TOKEN) JSP_MATCH_WITH_CLEANUP_AND_RETURN(TOKEN, , 0) // Match where the user could have given us the wrong token #define JSP_ASSERT_MATCH(TOKEN) { assert(lex->tk==(TOKEN));jslGetNextToken(); } // Match where if we have the wrong token, it's an internal error #define JSP_SHOULD_EXECUTE (((execInfo.execute)&EXEC_RUN_MASK)==EXEC_YES) #define JSP_SAVE_EXECUTE() JsExecFlags oldExecute = execInfo.execute #define JSP_RESTORE_EXECUTE() execInfo.execute = (execInfo.execute&(JsExecFlags)(~EXEC_SAVE_RESTORE_MASK)) | (oldExecute&EXEC_SAVE_RESTORE_MASK); #define JSP_HAS_ERROR (((execInfo.execute)&EXEC_ERROR_MASK)!=0) #define JSP_SHOULDNT_PARSE (((execInfo.execute)&EXEC_NO_PARSE_MASK)!=0) ALWAYS_INLINE void jspDebuggerLoopIfCtrlC() { #ifdef USE_DEBUGGER if (execInfo.execute & EXEC_CTRL_C_WAIT && JSP_SHOULD_EXECUTE) jsiDebuggerLoop(); #endif } /// if interrupting execution, this is set bool jspIsInterrupted() { return (execInfo.execute & EXEC_INTERRUPTED)!=0; } /// if interrupting execution, this is set void jspSetInterrupted(bool interrupt) { if (interrupt) execInfo.execute = execInfo.execute | EXEC_INTERRUPTED; else execInfo.execute = execInfo.execute & (JsExecFlags)~EXEC_INTERRUPTED; } /// Set the error flag - set lineReported if we've already output the line number void jspSetError(bool lineReported) { execInfo.execute = (execInfo.execute & (JsExecFlags)~EXEC_YES) | EXEC_ERROR; if (lineReported) execInfo.execute |= EXEC_ERROR_LINE_REPORTED; } bool jspHasError() { return JSP_HAS_ERROR; } void jspeiClearScopes() { jsvUnLock(execInfo.scopesVar); execInfo.scopesVar = 0; } bool jspeiAddScope(JsVar *scope) { if (!execInfo.scopesVar) execInfo.scopesVar = jsvNewEmptyArray(); if (!execInfo.scopesVar) return false; jsvArrayPush(execInfo.scopesVar, scope); return true; } void jspeiRemoveScope() { if (!execInfo.scopesVar || !jsvGetArrayLength(execInfo.scopesVar)) { jsExceptionHere(JSET_INTERNALERROR, "Too many scopes removed"); jspSetError(false); return; } jsvUnLock(jsvArrayPop(execInfo.scopesVar)); if (!jsvGetFirstChild(execInfo.scopesVar)) { jsvUnLock(execInfo.scopesVar); execInfo.scopesVar = 0; } } JsVar *jspeiFindInScopes(const char *name) { if (execInfo.scopesVar) { JsVar *it = jsvLockSafe(jsvGetLastChild(execInfo.scopesVar)); while (it) { JsVar *scope = jsvSkipName(it); JsVarRef next = jsvGetPrevSibling(it); JsVar *ref = jsvFindChildFromString(scope, name, false); jsvUnLock2(it, scope); if (ref) return ref; it = jsvLockSafe(next); } } return jsvFindChildFromString(execInfo.root, name, false); } /// Return the topmost scope (and lock it) JsVar *jspeiGetTopScope() { if (execInfo.scopesVar) { JsVar *scope = jsvGetLastArrayItem(execInfo.scopesVar); if (scope) return scope; } return jsvLockAgain(execInfo.root); } JsVar *jspeiFindOnTop(const char *name, bool createIfNotFound) { JsVar *scope = jspeiGetTopScope(); JsVar *result = jsvFindChildFromString(scope, name, createIfNotFound); jsvUnLock(scope); return result; } JsVar *jspeiFindNameOnTop(JsVar *childName, bool createIfNotFound) { JsVar *scope = jspeiGetTopScope(); JsVar *result = jsvFindChildFromVar(scope, childName, createIfNotFound); jsvUnLock(scope); return result; } JsVar *jspFindPrototypeFor(const char *className) { JsVar *obj = jsvObjectGetChild(execInfo.root, className, 0); if (!obj) return 0; JsVar *proto = jsvObjectGetChild(obj, JSPARSE_PROTOTYPE_VAR, 0); jsvUnLock(obj); return proto; } /** Here we assume that we have already looked in the parent itself - * and are now going down looking at the stuff it inherited */ JsVar *jspeiFindChildFromStringInParents(JsVar *parent, const char *name) { if (jsvIsObject(parent)) { // If an object, look for an 'inherits' var JsVar *inheritsFrom = jsvObjectGetChild(parent, JSPARSE_INHERITS_VAR, 0); // if there's no inheritsFrom, just default to 'Object.prototype' if (!inheritsFrom) inheritsFrom = jspFindPrototypeFor("Object"); if (inheritsFrom && inheritsFrom!=parent) { // we have what it inherits from (this is ACTUALLY the prototype var) // https://developer.mozilla.org/en-US/docs/JavaScript/Reference/Global_Objects/Object/proto JsVar *child = jsvFindChildFromString(inheritsFrom, name, false); if (!child) child = jspeiFindChildFromStringInParents(inheritsFrom, name); jsvUnLock(inheritsFrom); if (child) return child; } else jsvUnLock(inheritsFrom); } else { // Not actually an object - but might be an array/string/etc const char *objectName = jswGetBasicObjectName(parent); while (objectName) { JsVar *objName = jsvFindChildFromString(execInfo.root, objectName, false); if (objName) { JsVar *result = 0; JsVar *obj = jsvSkipNameAndUnLock(objName); // could be something the user has made - eg. 'Array=1' if (jsvHasChildren(obj)) { // We have found an object with this name - search for the prototype var JsVar *proto = jsvObjectGetChild(obj, JSPARSE_PROTOTYPE_VAR, 0); if (proto) { result = jsvFindChildFromString(proto, name, false); jsvUnLock(proto); } } jsvUnLock(obj); if (result) return result; } /* We haven't found anything in the actual object, we should check the 'Object' itself eg, we tried 'String', so now we should try 'Object'. Built-in types don't have room for a prototype field, so we hard-code it */ objectName = jswGetBasicObjectPrototypeName(objectName); } } // no luck! return 0; } JsVar *jspeiGetScopesAsVar() { if (!execInfo.scopesVar) return 0; // no scopes! // If just one element, return it (no array) if (jsvGetArrayLength(execInfo.scopesVar)==1) { JsVar *v = jsvGetLastArrayItem(execInfo.scopesVar); // this is faster than getting by index return v; } // Copy this - because if we just returned it, the underlying array would get altered return jsvCopy(execInfo.scopesVar, true); } void jspeiLoadScopesFromVar(JsVar *arr) { jsvUnLock(execInfo.scopesVar); execInfo.scopesVar = 0; if (arr) { if (jsvIsArray(arr)) { // TODO: copy on write? would make function calls faster execInfo.scopesVar = jsvCopy(arr, true); } else { // just a single item,but we must package it in an array execInfo.scopesVar = jsvNewArray(&arr, 1); } } } // ----------------------------------------------- /// Check that we have enough stack to recurse. Return true if all ok, error if not. bool jspCheckStackPosition() { if (jsuGetFreeStack() < 512) { // giving us 512 bytes leeway jsExceptionHere(JSET_ERROR, "Too much recursion - the stack is about to overflow"); jspSetInterrupted(true); return false; } return true; } // Set execFlags such that we are not executing void jspSetNoExecute() { execInfo.execute = (execInfo.execute & (JsExecFlags)(int)~EXEC_RUN_MASK) | EXEC_NO; } void jspAppendStackTrace(JsVar *stackTrace) { JsvStringIterator it; jsvStringIteratorNew(&it, stackTrace, 0); jsvStringIteratorGotoEnd(&it); jslPrintPosition((vcbprintf_callback)jsvStringIteratorPrintfCallback, &it, lex->tokenLastStart); jslPrintTokenLineMarker((vcbprintf_callback)jsvStringIteratorPrintfCallback, &it, lex->tokenLastStart, 0); jsvStringIteratorFree(&it); } /// We had an exception (argument is the exception's value) void jspSetException(JsVar *value) { // Add the exception itself to a variable in root scope JsVar *exception = jsvFindChildFromString(execInfo.hiddenRoot, JSPARSE_EXCEPTION_VAR, true); if (exception) { jsvSetValueOfName(exception, value); jsvUnLock(exception); } // Set the exception flag execInfo.execute = execInfo.execute | EXEC_EXCEPTION; // Try and do a stack trace if (lex) { JsVar *stackTrace = jsvObjectGetChild(execInfo.hiddenRoot, JSPARSE_STACKTRACE_VAR, JSV_STRING_0); if (stackTrace) { jsvAppendPrintf(stackTrace, " at "); jspAppendStackTrace(stackTrace); jsvUnLock(stackTrace); // stop us from printing the trace in the same block execInfo.execute = execInfo.execute | EXEC_ERROR_LINE_REPORTED; } } } /** Return the reported exception if there was one (and clear it) */ JsVar *jspGetException() { JsVar *exceptionName = jsvFindChildFromString(execInfo.hiddenRoot, JSPARSE_EXCEPTION_VAR, false); if (exceptionName) { JsVar *exception = jsvSkipName(exceptionName); jsvRemoveChild(execInfo.hiddenRoot, exceptionName); jsvUnLock(exceptionName); JsVar *stack = jspGetStackTrace(); if (stack && jsvHasChildren(exception)) { jsvObjectSetChild(exception, "stack", stack); } jsvUnLock(stack); return exception; } return 0; } /** Return a stack trace string if there was one (and clear it) */ JsVar *jspGetStackTrace() { JsVar *stackTraceName = jsvFindChildFromString(execInfo.hiddenRoot, JSPARSE_STACKTRACE_VAR, false); if (stackTraceName) { JsVar *stackTrace = jsvSkipName(stackTraceName); jsvRemoveChild(execInfo.hiddenRoot, stackTraceName); jsvUnLock(stackTraceName); return stackTrace; } return 0; } // ---------------------------------------------- // we return a value so that JSP_MATCH can return 0 if it fails (if we pass 0, we just parse all args) NO_INLINE bool jspeFunctionArguments(JsVar *funcVar) { JSP_MATCH('('); while (lex->tk!=')') { if (funcVar) { char buf[JSLEX_MAX_TOKEN_LENGTH+1]; buf[0] = '\xFF'; strcpy(&buf[1], jslGetTokenValueAsString()); JsVar *param = jsvAddNamedChild(funcVar, 0, buf); if (!param) { // out of memory jspSetError(false); return false; } jsvMakeFunctionParameter(param); // force this to be called a function parameter jsvUnLock(param); } JSP_MATCH(LEX_ID); if (lex->tk!=')') JSP_MATCH(','); } JSP_MATCH(')'); return true; } // Parse function, assuming we're on '{'. funcVar can be 0. returns 'true' is the function included the 'this' keyword NO_INLINE bool jspeFunctionDefinitionInternal(JsVar *funcVar, bool expressionOnly) { bool forcePretokenise = false; if (expressionOnly) { if (funcVar) funcVar->flags = (funcVar->flags & ~JSV_VARTYPEMASK) | JSV_FUNCTION_RETURN; } else { JSP_MATCH('{'); #ifndef SAVE_ON_FLASH if (lex->tk==LEX_STR) { if (!strcmp(jslGetTokenValueAsString(), "compiled")) jsWarn("Function marked with \"compiled\" uploaded in source form"); if (lex->tk==LEX_STR && !strcmp(jslGetTokenValueAsString(), "ram")) { JSP_ASSERT_MATCH(LEX_STR); forcePretokenise = true; } } #endif /* If the function starts with return, treat it specially - * we don't want to store the 'return' part of it */ if (funcVar && lex->tk==LEX_R_RETURN) { funcVar->flags = (funcVar->flags & ~JSV_VARTYPEMASK) | JSV_FUNCTION_RETURN; JSP_ASSERT_MATCH(LEX_R_RETURN); } } #ifndef ESPR_NO_LINE_NUMBERS // Get the line number (if needed) JsVarInt lineNumber = 0; if (funcVar && lex->lineNumberOffset && !(forcePretokenise||jsfGetFlag(JSF_PRETOKENISE))) { // jslGetLineNumber is slow, so we only do it if we have debug info lineNumber = (JsVarInt)jslGetLineNumber() + (JsVarInt)lex->lineNumberOffset - 1; } #endif // Get the code - parse it and figure out where it stops JslCharPos funcBegin; jslSkipWhiteSpace(); jslCharPosNew(&funcBegin, lex->sourceVar, lex->tokenStart); int lastTokenEnd = -1; lex->hadThisKeyword = lex->tk == LEX_R_THIS; if (!expressionOnly) { int brackets = 0; while (lex->tk && (brackets || lex->tk != '}')) { if (lex->tk == '{') brackets++; if (lex->tk == '}') brackets--; lastTokenEnd = (int)jsvStringIteratorGetIndex(&lex->it)-1; JSP_ASSERT_MATCH(lex->tk); } // FIXME: we might be including whitespace after the last token } else { JsExecFlags oldExec = execInfo.execute; execInfo.execute = EXEC_NO; jsvUnLock(jspeAssignmentExpression()); execInfo.execute = oldExec; lastTokenEnd = (int)lex->tokenStart; } bool hadThisKeyword = lex->hadThisKeyword; // Then create var and set (if there was any code!) if (funcVar && lastTokenEnd>0) { // code var JsVar *funcCodeVar; if (!forcePretokenise && jsvIsNativeString(lex->sourceVar)) { /* If we're parsing from a Native String (eg. E.memoryArea, E.setBootCode) then use another Native String to load function code straight from flash */ int s = (int)jsvStringIteratorGetIndex(&funcBegin.it) - 1; funcCodeVar = jsvNewNativeString(lex->sourceVar->varData.nativeStr.ptr + s, (unsigned int)(lastTokenEnd - s)); #ifdef SPIFLASH_BASE } else if (!forcePretokenise && jsvIsFlashString(lex->sourceVar)) { /* If we're parsing from a Flash String (eg. loaded from Storage on Bangle.js) then use another Flash String to load function code straight from flash*/ int s = (int)jsvStringIteratorGetIndex(&funcBegin.it) - 1; funcCodeVar = jsvNewFlashString(lex->sourceVar->varData.nativeStr.ptr + s, (unsigned int)(lastTokenEnd - s)); #endif } else { if (jsfGetFlag(JSF_PRETOKENISE) || forcePretokenise) { funcCodeVar = jslNewTokenisedStringFromLexer(&funcBegin, (size_t)lastTokenEnd); } else { funcCodeVar = jslNewStringFromLexer(&funcBegin, (size_t)lastTokenEnd); } } jsvUnLock2(jsvAddNamedChild(funcVar, funcCodeVar, JSPARSE_FUNCTION_CODE_NAME), funcCodeVar); // scope var JsVar *funcScopeVar = jspeiGetScopesAsVar(); if (funcScopeVar) { jsvUnLock2(jsvAddNamedChild(funcVar, funcScopeVar, JSPARSE_FUNCTION_SCOPE_NAME), funcScopeVar); } #ifndef ESPR_NO_LINE_NUMBERS // If we've got a line number, add a var for it if (lineNumber) { JsVar *funcLineNumber = jsvNewFromInteger(lineNumber); if (funcLineNumber) { jsvUnLock2(jsvAddNamedChild(funcVar, funcLineNumber, JSPARSE_FUNCTION_LINENUMBER_NAME), funcLineNumber); } } #endif } jslCharPosFree(&funcBegin); if (!expressionOnly) JSP_MATCH('}'); return hadThisKeyword; } // Parse function (after 'function' has occurred NO_INLINE JsVar *jspeFunctionDefinition(bool parseNamedFunction) { // actually parse a function... We assume that the LEX_FUNCTION and name // have already been parsed JsVar *funcVar = 0; bool actuallyCreateFunction = JSP_SHOULD_EXECUTE; if (actuallyCreateFunction) funcVar = jsvNewWithFlags(JSV_FUNCTION); JsVar *functionInternalName = 0; if (parseNamedFunction && lex->tk==LEX_ID) { // you can do `var a = function foo() { foo(); };` - so cope with this if (funcVar) functionInternalName = jslGetTokenValueAsVar(); // note that we don't add it to the beginning, because it would mess up our function call code JSP_ASSERT_MATCH(LEX_ID); } // Get arguments save them to the structure if (!jspeFunctionArguments(funcVar)) { jsvUnLock2(functionInternalName, funcVar); // parse failed return 0; } // Parse the actual function block jspeFunctionDefinitionInternal(funcVar, false); // if we had a function name, add it to the end (if we don't it gets confused with arguments) if (funcVar && functionInternalName) jsvObjectSetChildAndUnLock(funcVar, JSPARSE_FUNCTION_NAME_NAME, functionInternalName); return funcVar; } /* Parse just the brackets of a function - and throw * everything away */ NO_INLINE bool jspeParseFunctionCallBrackets() { assert(!JSP_SHOULD_EXECUTE); JSP_MATCH('('); while (!JSP_SHOULDNT_PARSE && lex->tk != ')') { jsvUnLock(jspeAssignmentExpression()); #ifndef SAVE_ON_FLASH if (lex->tk==LEX_ARROW_FUNCTION) { jsvUnLock(jspeArrowFunction(0, 0)); } #endif if (lex->tk!=')') JSP_MATCH(','); } if (!JSP_SHOULDNT_PARSE) JSP_MATCH(')'); return 0; } /** Handle a function call (assumes we've parsed the function name and we're * on the start bracket). 'thisArg' is the value of the 'this' variable when the * function is executed (it's usually the parent object) * * * NOTE: this does not set the execInfo flags - so if execInfo==EXEC_NO, it won't execute * * If !isParsing and arg0!=0, argument 0 is set to what is supplied (same with arg1) * * functionName is used only for error reporting - and can be 0 */ NO_INLINE JsVar *jspeFunctionCall(JsVar *function, JsVar *functionName, JsVar *thisArg, bool isParsing, int argCount, JsVar **argPtr) { if (JSP_SHOULD_EXECUTE && !function) { if (functionName) jsExceptionHere(JSET_ERROR, "Function %q not found!", functionName); else jsExceptionHere(JSET_ERROR, "Function not found!", functionName); return 0; } if (JSP_SHOULD_EXECUTE) if (!jspCheckStackPosition()) return 0; // try and ensure that we won't overflow our stack if (JSP_SHOULD_EXECUTE && function) { JsVar *returnVar = 0; if (!jsvIsFunction(function)) { jsExceptionHere(JSET_ERROR, "Expecting a function to call, got %t", function); return 0; } JsVar *thisVar = jsvLockAgainSafe(thisArg); if (isParsing) JSP_MATCH('('); /* Ok, so we have 4 options here. * * 1: we're native. * a) args have been pre-parsed, which is awesome * b) we have to parse our own args into an array * 2: we're not native * a) args were pre-parsed and we have to populate the function * b) we parse our own args, which is possibly better */ if (jsvIsNativeFunction(function)) { // ------------------------------------- NATIVE unsigned int argPtrSize = 0; int boundArgs = 0; // Add 'bound' parameters if there were any JsvObjectIterator it; jsvObjectIteratorNew(&it, function); JsVar *param = jsvObjectIteratorGetKey(&it); while (jsvIsFunctionParameter(param)) { if ((unsigned)argCount>=argPtrSize) { // allocate more space on stack if needed unsigned int newArgPtrSize = (argPtrSize?argPtrSize:(unsigned int)argCount)*4; size_t newArgPtrByteSize = sizeof(JsVar*)*newArgPtrSize; if (jsuGetFreeStack() < 256+newArgPtrByteSize) { jsExceptionHere(JSET_ERROR, "Insufficient stack for this many arguments"); jsvUnLock(thisVar); return 0; } JsVar **newArgPtr = (JsVar**)alloca(newArgPtrByteSize); memcpy(newArgPtr, argPtr, (unsigned)argCount*sizeof(JsVar*)); argPtr = newArgPtr; argPtrSize = newArgPtrSize; } // if we already had arguments - shift them up... int i; for (i=argCount-1;i>=boundArgs;i--) argPtr[i+1] = argPtr[i]; // add bound argument argPtr[boundArgs] = jsvSkipName(param); argCount++; boundArgs++; jsvUnLock(param); jsvObjectIteratorNext(&it); param = jsvObjectIteratorGetKey(&it); } // check if 'this' was defined while (param) { if (jsvIsStringEqual(param, JSPARSE_FUNCTION_THIS_NAME)) { jsvUnLock(thisVar); thisVar = jsvSkipName(param); break; } jsvUnLock(param); jsvObjectIteratorNext(&it); param = jsvObjectIteratorGetKey(&it); } jsvUnLock(param); jsvObjectIteratorFree(&it); // Now, if we're parsing add the rest of the arguments int allocatedArgCount = boundArgs; if (isParsing) { while (!JSP_HAS_ERROR && lex->tk!=')' && lex->tk!=LEX_EOF) { if ((unsigned)argCount>=argPtrSize) { // allocate more space on stack unsigned int newArgPtrSize = argPtrSize?argPtrSize*4:16; JsVar **newArgPtr = (JsVar**)alloca(sizeof(JsVar*)*newArgPtrSize); memcpy(newArgPtr, argPtr, (unsigned)argCount*sizeof(JsVar*)); argPtr = newArgPtr; argPtrSize = newArgPtrSize; } argPtr[argCount++] = jsvSkipNameAndUnLock(jspeAssignmentExpression()); if (lex->tk!=')') JSP_MATCH_WITH_CLEANUP_AND_RETURN(',',jsvUnLockMany((unsigned)argCount, argPtr);jsvUnLock(thisVar);, 0); } JSP_MATCH(')'); allocatedArgCount = argCount; } void *nativePtr = jsvGetNativeFunctionPtr(function); JsVar *oldThisVar = execInfo.thisVar; if (thisVar) execInfo.thisVar = jsvRef(thisVar); else { if (nativePtr==jswrap_eval) { // eval gets to use the current scope /* Note: proper JS has some utterly insane code that depends on whether * eval is an lvalue or not: * * http://stackoverflow.com/questions/9107240/1-evalthis-vs-evalthis-in-javascript * * Doing this in Espruino is quite an upheaval for that one * slightly insane case - so it's not implemented. */ if (execInfo.thisVar) execInfo.thisVar = jsvRef(execInfo.thisVar); } else { execInfo.thisVar = jsvRef(execInfo.root); // 'this' should always default to root } } if (nativePtr && !JSP_HAS_ERROR) { returnVar = jsnCallFunction(nativePtr, function->varData.native.argTypes, thisVar, argPtr, argCount); assert(!jsvIsName(returnVar)); } else { returnVar = 0; } // unlock values if we locked them jsvUnLockMany((unsigned)allocatedArgCount, argPtr); /* Return to old 'this' var. No need to unlock as we never locked before */ if (execInfo.thisVar) jsvUnRef(execInfo.thisVar); execInfo.thisVar = oldThisVar; } else { // ----------------------------------------------------- NOT NATIVE // create a new symbol table entry for execution of this function // OPT: can we cache this function execution environment + param variables? // OPT: Probably when calling a function ONCE, use it, otherwise when recursing, make new? JsVar *functionRoot = jsvNewWithFlags(JSV_FUNCTION); if (!functionRoot) { // out of memory jspSetError(false); jsvUnLock(thisVar); return 0; } JsVar *functionScope = 0; JsVar *functionCode = 0; JsVar *functionInternalName = 0; #ifndef ESPR_NO_LINE_NUMBERS uint16_t functionLineNumber = 0; #endif /** NOTE: We expect that the function object will have: * * * Parameters * * Code/Scope/Name * * IN THAT ORDER. */ JsvObjectIterator it; jsvObjectIteratorNew(&it, function); JsVar *param = jsvObjectIteratorGetKey(&it); JsVar *value = jsvObjectIteratorGetValue(&it); while (jsvIsFunctionParameter(param) && value) { jsvAddFunctionParameter(functionRoot, jsvNewFromStringVar(param,1,JSVAPPENDSTRINGVAR_MAXLENGTH), value); jsvUnLock2(value, param); jsvObjectIteratorNext(&it); param = jsvObjectIteratorGetKey(&it); value = jsvObjectIteratorGetValue(&it); } jsvUnLock2(value, param); if (isParsing) { int hadParams = 0; // grab in all parameters. We go around this loop until we've run out // of named parameters AND we've parsed all the supplied arguments while (!JSP_SHOULDNT_PARSE && lex->tk!=')') { JsVar *param = jsvObjectIteratorGetKey(&it); bool paramDefined = jsvIsFunctionParameter(param); if (lex->tk!=')' || paramDefined) { hadParams++; JsVar *value = 0; // ONLY parse this if it was supplied, otherwise leave 0 (undefined) if (lex->tk!=')') value = jspeAssignmentExpression(); // and if execute, copy it over value = jsvSkipNameAndUnLock(value); jsvAddFunctionParameter(functionRoot, paramDefined?jsvNewFromStringVar(param,1,JSVAPPENDSTRINGVAR_MAXLENGTH):0, value); jsvUnLock(value); if (lex->tk!=')') JSP_MATCH(','); } jsvUnLock(param); if (paramDefined) jsvObjectIteratorNext(&it); } JSP_MATCH(')'); } else { // and NOT isParsing int args = 0; while (args<argCount) { JsVar *param = jsvObjectIteratorGetKey(&it); bool paramDefined = jsvIsFunctionParameter(param); jsvAddFunctionParameter(functionRoot, paramDefined?jsvNewFromStringVar(param,1,JSVAPPENDSTRINGVAR_MAXLENGTH):0, argPtr[args]); args++; jsvUnLock(param); if (paramDefined) jsvObjectIteratorNext(&it); } } // Now go through what's left while (jsvObjectIteratorHasValue(&it)) { JsVar *param = jsvObjectIteratorGetKey(&it); if (jsvIsString(param)) { if (jsvIsStringEqual(param, JSPARSE_FUNCTION_SCOPE_NAME)) functionScope = jsvSkipName(param); else if (jsvIsStringEqual(param, JSPARSE_FUNCTION_CODE_NAME)) functionCode = jsvSkipName(param); else if (jsvIsStringEqual(param, JSPARSE_FUNCTION_NAME_NAME)) functionInternalName = jsvSkipName(param); else if (jsvIsStringEqual(param, JSPARSE_FUNCTION_THIS_NAME)) { jsvUnLock(thisVar); thisVar = jsvSkipName(param); } #ifndef ESPR_NO_LINE_NUMBERS else if (jsvIsStringEqual(param, JSPARSE_FUNCTION_LINENUMBER_NAME)) functionLineNumber = (uint16_t)jsvGetIntegerAndUnLock(jsvSkipName(param)); #endif else if (jsvIsFunctionParameter(param)) { JsVar *defaultVal = jsvSkipName(param); jsvAddFunctionParameter(functionRoot, jsvNewFromStringVar(param,1,JSVAPPENDSTRINGVAR_MAXLENGTH), defaultVal); jsvUnLock(defaultVal); } } jsvUnLock(param); jsvObjectIteratorNext(&it); } jsvObjectIteratorFree(&it); // setup a the function's name (if a named function) if (functionInternalName) { JsVar *name = jsvMakeIntoVariableName(jsvNewFromStringVar(functionInternalName,0,JSVAPPENDSTRINGVAR_MAXLENGTH), function); jsvAddName(functionRoot, name); jsvUnLock2(name, functionInternalName); } if (!JSP_HAS_ERROR) { // save old scopes and reset scope list JsVar *oldScopeVar = execInfo.scopesVar; execInfo.scopesVar = 0; // if we have a scope var, load it up. We may not have one if there were no scopes apart from root if (functionScope) { jspeiLoadScopesFromVar(functionScope); jsvUnLock(functionScope); } // add the function's execute space to the symbol table so we can recurse if (jspeiAddScope(functionRoot)) { /* Adding scope may have failed - we may have descended too deep - so be sure * not to pull somebody else's scope off */ JsVar *oldThisVar = execInfo.thisVar; if (thisVar) execInfo.thisVar = jsvRef(thisVar); else execInfo.thisVar = jsvRef(execInfo.root); // 'this' should always default to root /* we just want to execute the block, but something could * have messed up and left us with the wrong Lexer, so * we want to be careful here... */ if (functionCode) { #ifdef USE_DEBUGGER bool hadDebuggerNextLineOnly = false; if (execInfo.execute&EXEC_DEBUGGER_STEP_INTO) { if (functionName) jsiConsolePrintf("Stepping into %v\n", functionName); else jsiConsolePrintf("Stepping into function\n", functionName); } else { hadDebuggerNextLineOnly = execInfo.execute&EXEC_DEBUGGER_NEXT_LINE; if (hadDebuggerNextLineOnly) execInfo.execute &= (JsExecFlags)~EXEC_DEBUGGER_NEXT_LINE; } #endif JsLex newLex; JsLex *oldLex = jslSetLex(&newLex); jslInit(functionCode); #ifndef ESPR_NO_LINE_NUMBERS newLex.lineNumberOffset = functionLineNumber; #endif JSP_SAVE_EXECUTE(); // force execute without any previous state #ifdef USE_DEBUGGER execInfo.execute = EXEC_YES | (execInfo.execute&(EXEC_CTRL_C_MASK|EXEC_ERROR_MASK|EXEC_DEBUGGER_NEXT_LINE)); #else execInfo.execute = EXEC_YES | (execInfo.execute&(EXEC_CTRL_C_MASK|EXEC_ERROR_MASK)); #endif if (jsvIsFunctionReturn(function)) { #ifdef USE_DEBUGGER // we didn't parse a statement so wouldn't trigger the debugger otherwise if (execInfo.execute&EXEC_DEBUGGER_NEXT_LINE && JSP_SHOULD_EXECUTE) { lex->tokenLastStart = lex->tokenStart; jsiDebuggerLoop(); } #endif // implicit return - we just need an expression (optional) if (lex->tk != ';' && lex->tk != '}') returnVar = jsvSkipNameAndUnLock(jspeExpression()); } else { // setup a return variable JsVar *returnVarName = jsvAddNamedChild(functionRoot, 0, JSPARSE_RETURN_VAR); // parse the whole block jspeBlockNoBrackets(); /* get the real return var before we remove it from our function. * We can unlock below because returnVarName is still part of * functionRoot, so won't get freed. */ returnVar = jsvSkipNameAndUnLock(returnVarName); if (returnVarName) // could have failed with out of memory jsvSetValueOfName(returnVarName, 0); // remove return value (which helps stops circular references) } // Store a stack trace if we had an error JsExecFlags hasError = execInfo.execute&EXEC_ERROR_MASK; JSP_RESTORE_EXECUTE(); // because return will probably have set execute to false #ifdef USE_DEBUGGER bool calledDebugger = false; if (execInfo.execute & EXEC_DEBUGGER_MASK) { jsiConsolePrint("Value returned is ="); jsfPrintJSON(returnVar, JSON_LIMIT | JSON_SOME_NEWLINES | JSON_PRETTY | JSON_SHOW_DEVICES); jsiConsolePrintChar('\n'); if (execInfo.execute & EXEC_DEBUGGER_FINISH_FUNCTION) { calledDebugger = true; jsiDebuggerLoop(); } } if (hadDebuggerNextLineOnly && !calledDebugger) execInfo.execute |= EXEC_DEBUGGER_NEXT_LINE; #endif jslKill(); jslSetLex(oldLex); if (hasError) { execInfo.execute |= hasError; // propogate error JsVar *stackTrace = jsvObjectGetChild(execInfo.hiddenRoot, JSPARSE_STACKTRACE_VAR, JSV_STRING_0); if (stackTrace) { jsvAppendPrintf(stackTrace, jsvIsString(functionName)?"in function %q called from ": "in function called from ", functionName); if (lex) { jspAppendStackTrace(stackTrace); } else jsvAppendPrintf(stackTrace, "system\n"); jsvUnLock(stackTrace); } } } /* Return to old 'this' var. No need to unlock as we never locked before */ if (execInfo.thisVar) jsvUnRef(execInfo.thisVar); execInfo.thisVar = oldThisVar; jspeiRemoveScope(); } // Unlock scopes and restore old ones jsvUnLock(execInfo.scopesVar); execInfo.scopesVar = oldScopeVar; } jsvUnLock(functionCode); jsvUnLock(functionRoot); } jsvUnLock(thisVar); return returnVar; } else if (isParsing) { // ---------------------------------- function, but not executing - just parse args and be done jspeParseFunctionCallBrackets(); /* Do not return function, as it will be unlocked! */ return 0; } else return 0; } // Find a variable (or built-in function) based on the current scopes JsVar *jspGetNamedVariable(const char *tokenName) { JsVar *a = JSP_SHOULD_EXECUTE ? jspeiFindInScopes(tokenName) : 0; if (JSP_SHOULD_EXECUTE && !a) { /* Special case! We haven't found the variable, so check out * and see if it's one of our builtins... */ if (jswIsBuiltInObject(tokenName)) { // Check if we have a built-in function for it // OPT: Could we instead have jswIsBuiltInObjectWithoutConstructor? JsVar *obj = jswFindBuiltInFunction(0, tokenName); // If not, make one if (!obj) obj = jspNewBuiltin(tokenName); if (obj) { // not out of memory a = jsvAddNamedChild(execInfo.root, obj, tokenName); jsvUnLock(obj); } } else { a = jswFindBuiltInFunction(0, tokenName); if (!a) { /* Variable doesn't exist! JavaScript says we should create it * (we won't add it here. This is done in the assignment operator)*/ a = jsvMakeIntoVariableName(jsvNewFromString(tokenName), 0); } } } return a; } /// Used by jspGetNamedField / jspGetVarNamedField static NO_INLINE JsVar *jspGetNamedFieldInParents(JsVar *object, const char* name, bool returnName) { // Now look in prototypes JsVar * child = jspeiFindChildFromStringInParents(object, name); /* Check for builtins via separate function * This way we save on RAM for built-ins because everything comes out of program code */ if (!child) { child = jswFindBuiltInFunction(object, name); } /* We didn't get here if we found a child in the object itself, so * if we're here then we probably have the wrong name - so for example * with `a.b = c;` could end up setting `a.prototype.b` (bug #360) * * Also we might have got a built-in, which wouldn't have a name on it * anyway - so in both cases, strip the name if it is there, and create * a new name that references the object we actually requested the * member from.. */ if (child && returnName) { // Get rid of existing name if (jsvIsName(child)) { JsVar *t = jsvGetValueOfName(child); jsvUnLock(child); child = t; } // create a new name JsVar *nameVar = jsvNewFromString(name); JsVar *newChild = jsvCreateNewChild(object, nameVar, child); jsvUnLock2(nameVar, child); child = newChild; } // If not found and is the prototype, create it if (!child) { if (jsvIsFunction(object) && strcmp(name, JSPARSE_PROTOTYPE_VAR)==0) { // prototype is supposed to be an object JsVar *proto = jsvNewObject(); // make sure it has a 'constructor' variable that points to the object it was part of jsvObjectSetChild(proto, JSPARSE_CONSTRUCTOR_VAR, object); child = jsvAddNamedChild(object, proto, JSPARSE_PROTOTYPE_VAR); jspEnsureIsPrototype(object, child); jsvUnLock(proto); } else if (strcmp(name, JSPARSE_INHERITS_VAR)==0) { const char *objName = jswGetBasicObjectName(object); if (objName) { child = jspNewPrototype(objName); } } } return child; } /** Get the named function/variable on the object - whether it's built in, or predefined. * If !returnName, returns the function/variable itself or undefined, but * if returnName, return a name (could be fake) referencing the parent. * * NOTE: ArrayBuffer/Strings are not handled here. We assume that if we're * passing a char* rather than a JsVar it's because we're looking up via * a symbol rather than a variable. To handle these use jspGetVarNamedField */ JsVar *jspGetNamedField(JsVar *object, const char* name, bool returnName) { JsVar *child = 0; // if we're an object (or pretending to be one) if (jsvHasChildren(object)) child = jsvFindChildFromString(object, name, false); if (!child) { child = jspGetNamedFieldInParents(object, name, returnName); // If not found and is the prototype, create it if (!child && jsvIsFunction(object) && strcmp(name, JSPARSE_PROTOTYPE_VAR)==0) { JsVar *value = jsvNewObject(); // prototype is supposed to be an object child = jsvAddNamedChild(object, value, JSPARSE_PROTOTYPE_VAR); jsvUnLock(value); } } if (returnName) return child; else return jsvSkipNameAndUnLock(child); } /// see jspGetNamedField - note that nameVar should have had jsvAsArrayIndex called on it first JsVar *jspGetVarNamedField(JsVar *object, JsVar *nameVar, bool returnName) { JsVar *child = 0; // if we're an object (or pretending to be one) if (jsvHasChildren(object)) child = jsvFindChildFromVar(object, nameVar, false); if (!child) { if (jsvIsArrayBuffer(object) && jsvIsInt(nameVar)) { // for array buffers, we actually create a NAME, and hand that back - then when we assign (or use SkipName) we pull out the correct data child = jsvMakeIntoVariableName(jsvNewFromInteger(jsvGetInteger(nameVar)), object); if (child) // turn into an 'array buffer name' child->flags = (child->flags & ~JSV_VARTYPEMASK) | JSV_ARRAYBUFFERNAME; } else if (jsvIsString(object) && jsvIsInt(nameVar)) { JsVarInt idx = jsvGetInteger(nameVar); if (idx>=0 && idx<(JsVarInt)jsvGetStringLength(object)) { char ch = jsvGetCharInString(object, (size_t)idx); child = jsvNewStringOfLength(1, &ch); } else if (returnName) child = jsvCreateNewChild(object, nameVar, 0); // just return *something* to show this is handled } else { // get the name as a string char name[JSLEX_MAX_TOKEN_LENGTH]; jsvGetString(nameVar, name, JSLEX_MAX_TOKEN_LENGTH); // try and find it in parents child = jspGetNamedFieldInParents(object, name, returnName); // If not found and is the prototype, create it if (!child && jsvIsFunction(object) && jsvIsStringEqual(nameVar, JSPARSE_PROTOTYPE_VAR)) { JsVar *value = jsvNewObject(); // prototype is supposed to be an object child = jsvAddNamedChild(object, value, JSPARSE_PROTOTYPE_VAR); jsvUnLock(value); } } } if (returnName) return child; else return jsvSkipNameAndUnLock(child); } /// Call the named function on the object - whether it's built in, or predefined. Returns the return value of the function. JsVar *jspCallNamedFunction(JsVar *object, char* name, int argCount, JsVar **argPtr) { JsVar *child = jspGetNamedField(object, name, false); JsVar *r = 0; if (jsvIsFunction(child)) r = jspeFunctionCall(child, 0, object, false, argCount, argPtr); jsvUnLock(child); return r; } NO_INLINE JsVar *jspeFactorMember(JsVar *a, JsVar **parentResult) { /* The parent if we're executing a method call */ JsVar *parent = 0; while (lex->tk=='.' || lex->tk=='[') { if (lex->tk == '.') { // ------------------------------------- Record Access JSP_ASSERT_MATCH('.'); if (jslIsIDOrReservedWord()) { if (JSP_SHOULD_EXECUTE) { // Note: name will go away when we parse something else! const char *name = jslGetTokenValueAsString(); JsVar *aVar = jsvSkipNameWithParent(a,true,parent); JsVar *child = 0; if (aVar) child = jspGetNamedField(aVar, name, true); if (!child) { if (!jsvIsUndefined(aVar)) { // if no child found, create a pointer to where it could be // as we don't want to allocate it until it's written JsVar *nameVar = jslGetTokenValueAsVar(); child = jsvCreateNewChild(aVar, nameVar, 0); jsvUnLock(nameVar); } else { // could have been a string... jsExceptionHere(JSET_ERROR, "Cannot read property '%s' of undefined", name); } } jsvUnLock(parent); parent = aVar; jsvUnLock(a); a = child; } // skip over current token (we checked above that it was an ID or reserved word) jslGetNextToken(); } else { // incorrect token - force a match fail by asking for an ID JSP_MATCH_WITH_RETURN(LEX_ID, a); } } else if (lex->tk == '[') { // ------------------------------------- Array Access JsVar *index; JSP_ASSERT_MATCH('['); if (!jspCheckStackPosition()) return parent; index = jsvSkipNameAndUnLock(jspeAssignmentExpression()); JSP_MATCH_WITH_CLEANUP_AND_RETURN(']', jsvUnLock2(parent, index);, a); if (JSP_SHOULD_EXECUTE) { index = jsvAsArrayIndexAndUnLock(index); JsVar *aVar = jsvSkipNameWithParent(a,true,parent); JsVar *child = 0; if (aVar) child = jspGetVarNamedField(aVar, index, true); if (!child) { if (jsvHasChildren(aVar)) { // if no child found, create a pointer to where it could be // as we don't want to allocate it until it's written child = jsvCreateNewChild(aVar, index, 0); } else { jsExceptionHere(JSET_ERROR, "Field or method %q does not already exist, and can't create it on %t", index, aVar); } } jsvUnLock(parent); parent = jsvLockAgainSafe(aVar); jsvUnLock(a); a = child; jsvUnLock(aVar); } jsvUnLock(index); } else { assert(0); } } if (parentResult) *parentResult = parent; else jsvUnLock(parent); return a; } NO_INLINE JsVar *jspeConstruct(JsVar *func, JsVar *funcName, bool hasArgs) { assert(JSP_SHOULD_EXECUTE); if (!jsvIsFunction(func)) { jsExceptionHere(JSET_ERROR, "Constructor should be a function, but is %t", func); return 0; } JsVar *thisObj = jsvNewObject(); if (!thisObj) return 0; // out of memory // Make sure the function has a 'prototype' var JsVar *prototypeName = jsvFindChildFromString(func, JSPARSE_PROTOTYPE_VAR, true); jspEnsureIsPrototype(func, prototypeName); // make sure it's an object JsVar *prototypeVar = jsvSkipName(prototypeName); jsvUnLock3(jsvAddNamedChild(thisObj, prototypeVar, JSPARSE_INHERITS_VAR), prototypeVar, prototypeName); JsVar *a = jspeFunctionCall(func, funcName, thisObj, hasArgs, 0, 0); /* FIXME: we should ignore return values that aren't objects (bug #848), but then we need * to be aware of `new String()` and `new Uint8Array()`. Ideally we'd let through * arrays/etc, and then String/etc should return 'boxed' values. * * But they don't return boxed values at the moment, so let's just * pass the return value through. If you try and return a string from * a function it's broken JS code anyway. */ if (a) { jsvUnLock(thisObj); thisObj = a; } else { jsvUnLock(a); } return thisObj; } NO_INLINE JsVar *jspeFactorFunctionCall() { /* The parent if we're executing a method call */ bool isConstructor = false; if (lex->tk==LEX_R_NEW) { JSP_ASSERT_MATCH(LEX_R_NEW); isConstructor = true; if (lex->tk==LEX_R_NEW) { jsExceptionHere(JSET_ERROR, "Nesting 'new' operators is unsupported"); jspSetError(false); return 0; } } JsVar *parent = 0; #ifndef SAVE_ON_FLASH bool wasSuper = lex->tk==LEX_R_SUPER; #endif JsVar *a = jspeFactorMember(jspeFactor(), &parent); #ifndef SAVE_ON_FLASH if (wasSuper) { /* if this was 'super.something' then we need * to overwrite the parent, because it'll be * set to the prototype otherwise. */ jsvUnLock(parent); parent = jsvLockAgainSafe(execInfo.thisVar); } #endif while ((lex->tk=='(' || (isConstructor && JSP_SHOULD_EXECUTE)) && !jspIsInterrupted()) { JsVar *funcName = a; JsVar *func = jsvSkipName(funcName); /* The constructor function doesn't change parsing, so if we're * not executing, just short-cut it. */ if (isConstructor && JSP_SHOULD_EXECUTE) { // If we have '(' parse an argument list, otherwise don't look for any args bool parseArgs = lex->tk=='('; a = jspeConstruct(func, funcName, parseArgs); isConstructor = false; // don't treat subsequent brackets as constructors } else a = jspeFunctionCall(func, funcName, parent, true, 0, 0); jsvUnLock3(funcName, func, parent); parent=0; a = jspeFactorMember(a, &parent); } #ifndef SAVE_ON_FLASH /* If we've got something that we care about the parent of (eg. a getter/setter) * then we repackage it into a 'NewChild' name that references the parent before * we leave. Note: You can't do this on everything because normally NewChild * forces a new child to be blindly created. It works on Getters/Setters because * we *always* run those rather than adding them. */ if (parent && jsvIsBasicName(a) && !jsvIsNewChild(a)) { JsVar *value = jsvLockSafe(jsvGetFirstChild(a)); if (jsvIsGetterOrSetter(value)) { // no need to do this for functions since we've just executed whatever we needed to JsVar *nameVar = jsvCopyNameOnly(a,false,true); JsVar *newChild = jsvCreateNewChild(parent, nameVar, value); jsvUnLock2(nameVar, a); a = newChild; } jsvUnLock(value); } #endif jsvUnLock(parent); return a; } NO_INLINE JsVar *jspeFactorObject() { if (JSP_SHOULD_EXECUTE) { JsVar *contents = jsvNewObject(); if (!contents) { // out of memory jspSetError(false); return 0; } /* JSON-style object definition */ JSP_MATCH_WITH_RETURN('{', contents); while (!JSP_SHOULDNT_PARSE && lex->tk != '}') { JsVar *varName = 0; // we only allow strings or IDs on the left hand side of an initialisation if (jslIsIDOrReservedWord()) { if (JSP_SHOULD_EXECUTE) varName = jslGetTokenValueAsVar(); jslGetNextToken(); // skip over current token } else if ( lex->tk==LEX_STR || lex->tk==LEX_FLOAT || lex->tk==LEX_INT || lex->tk==LEX_R_TRUE || lex->tk==LEX_R_FALSE || lex->tk==LEX_R_NULL || lex->tk==LEX_R_UNDEFINED) { varName = jspeFactor(); } else { JSP_MATCH_WITH_RETURN(LEX_ID, contents); } #ifndef SAVE_ON_FLASH if (lex->tk==LEX_ID && jsvIsString(varName)) { bool isGetter = jsvIsStringEqual(varName, "get"); bool isSetter = jsvIsStringEqual(varName, "set"); if (isGetter || isSetter) { jsvUnLock(varName); varName = jslGetTokenValueAsVar(); JSP_ASSERT_MATCH(LEX_ID); JsVar *method = jspeFunctionDefinition(false); jsvAddGetterOrSetter(contents, varName, isGetter, method); jsvUnLock(method); } } else #endif { JSP_MATCH_WITH_CLEANUP_AND_RETURN(':', jsvUnLock(varName), contents); if (JSP_SHOULD_EXECUTE) { varName = jsvAsArrayIndexAndUnLock(varName); JsVar *contentsName = jsvFindChildFromVar(contents, varName, true); if (contentsName) { JsVar *value = jsvSkipNameAndUnLock(jspeAssignmentExpression()); // value can be 0 (could be undefined!) jsvUnLock2(jsvSetValueOfName(contentsName, value), value); } } } jsvUnLock(varName); // no need to clean here, as it will definitely be used if (lex->tk != '}') JSP_MATCH_WITH_RETURN(',', contents); } JSP_MATCH_WITH_RETURN('}', contents); return contents; } else { // Not executing so do fast skip jspeBlock(); return 0; } } NO_INLINE JsVar *jspeFactorArray() { int idx = 0; JsVar *contents = 0; if (JSP_SHOULD_EXECUTE) { contents = jsvNewEmptyArray(); if (!contents) { // out of memory jspSetError(false); return 0; } } /* JSON-style array */ JSP_MATCH_WITH_RETURN('[', contents); while (!JSP_SHOULDNT_PARSE && lex->tk != ']') { if (JSP_SHOULD_EXECUTE) { JsVar *aVar = 0; JsVar *indexName = 0; if (lex->tk != ',') { // #287 - [,] and [1,2,,4] are allowed aVar = jsvSkipNameAndUnLock(jspeAssignmentExpression()); indexName = jsvMakeIntoVariableName(jsvNewFromInteger(idx), aVar); } if (indexName) { // could be out of memory jsvAddName(contents, indexName); jsvUnLock(indexName); } jsvUnLock(aVar); } else { jsvUnLock(jspeAssignmentExpression()); } // no need to clean here, as it will definitely be used if (lex->tk != ']') JSP_MATCH_WITH_RETURN(',', contents); idx++; } if (contents) jsvSetArrayLength(contents, idx, false); JSP_MATCH_WITH_RETURN(']', contents); return contents; } NO_INLINE void jspEnsureIsPrototype(JsVar *instanceOf, JsVar *prototypeName) { if (!prototypeName) return; JsVar *prototypeVar = jsvSkipName(prototypeName); if (!(jsvIsObject(prototypeVar) || jsvIsFunction(prototypeVar))) { if (!jsvIsUndefined(prototypeVar)) jsExceptionHere(JSET_TYPEERROR, "Prototype should be an object, got %t", prototypeVar); jsvUnLock(prototypeVar); prototypeVar = jsvNewObject(); // prototype is supposed to be an object JsVar *lastName = jsvSkipToLastName(prototypeName); jsvSetValueOfName(lastName, prototypeVar); jsvUnLock(lastName); } JsVar *constructor = jsvFindChildFromString(prototypeVar, JSPARSE_CONSTRUCTOR_VAR, true); if (constructor) jsvSetValueOfName(constructor, instanceOf); jsvUnLock2(constructor, prototypeVar); } NO_INLINE JsVar *jspeFactorTypeOf() { JSP_ASSERT_MATCH(LEX_R_TYPEOF); JsVar *a = jspeUnaryExpression(); JsVar *result = 0; if (JSP_SHOULD_EXECUTE) { if (!jsvIsVariableDefined(a)) { // so we don't get a ReferenceError when accessing an undefined var result=jsvNewFromString("undefined"); } else { a = jsvSkipNameAndUnLock(a); result=jsvNewFromString(jsvGetTypeOf(a)); } } jsvUnLock(a); return result; } NO_INLINE JsVar *jspeFactorDelete() { JSP_ASSERT_MATCH(LEX_R_DELETE); JsVar *parent = 0; JsVar *a = jspeFactorMember(jspeFactor(), &parent); JsVar *result = 0; if (JSP_SHOULD_EXECUTE) { bool ok = false; if (jsvIsName(a) && !jsvIsNewChild(a)) { // if no parent, check in root? if (!parent && jsvIsChild(execInfo.root, a)) parent = jsvLockAgain(execInfo.root); if (jsvHasChildren(parent)) { // else remove properly. if (jsvIsArray(parent)) { // For arrays, we must make sure we don't change the length JsVarInt l = jsvGetArrayLength(parent); jsvRemoveChild(parent, a); jsvSetArrayLength(parent, l, false); } else { jsvRemoveChild(parent, a); } ok = true; } } result = jsvNewFromBool(ok); } jsvUnLock2(a, parent); return result; } #ifndef SAVE_ON_FLASH JsVar *jspeTemplateLiteral() { JsVar *a = 0; if (JSP_SHOULD_EXECUTE) { JsVar *template = jslGetTokenValueAsVar(); a = jsvNewFromEmptyString(); if (a && template) { JsvStringIterator it, dit; jsvStringIteratorNew(&it, template, 0); jsvStringIteratorNew(&dit, a, 0); while (jsvStringIteratorHasChar(&it)) { char ch = jsvStringIteratorGetCharAndNext(&it); if (ch=='$') { ch = jsvStringIteratorGetChar(&it); if (ch=='{') { // Now parse out the expression jsvStringIteratorNext(&it); int brackets = 1; JsVar *expr = jsvNewFromEmptyString(); if (!expr) break; JsvStringIterator eit; jsvStringIteratorNew(&eit, expr, 0); while (jsvStringIteratorHasChar(&it)) { ch = jsvStringIteratorGetCharAndNext(&it); if (ch=='{') brackets++; if (ch=='}') { brackets--; if (!brackets) break; } jsvStringIteratorAppend(&eit, ch); } jsvStringIteratorFree(&eit); JsVar *result = jspEvaluateExpressionVar(expr); jsvUnLock(expr); result = jsvAsStringAndUnLock(result); jsvStringIteratorAppendString(&dit, result, 0, JSVAPPENDSTRINGVAR_MAXLENGTH); jsvUnLock(result); } else { jsvStringIteratorAppend(&dit, '$'); } } else { jsvStringIteratorAppend(&dit, ch); } } jsvStringIteratorFree(&it); jsvStringIteratorFree(&dit); } jsvUnLock(template); } JSP_ASSERT_MATCH(LEX_TEMPLATE_LITERAL); return a; } #endif NO_INLINE JsVar *jspeAddNamedFunctionParameter(JsVar *funcVar, JsVar *name) { if (!funcVar) funcVar = jsvNewWithFlags(JSV_FUNCTION); char buf[JSLEX_MAX_TOKEN_LENGTH+1]; buf[0] = '\xFF'; size_t l = jsvGetString(name, &buf[1], JSLEX_MAX_TOKEN_LENGTH); buf[l+1] = 0; // zero terminate since jsvGetString doesn't add one JsVar *param = jsvAddNamedChild(funcVar, 0, buf); jsvMakeFunctionParameter(param); jsvUnLock(param); return funcVar; } #ifndef SAVE_ON_FLASH // parse an arrow function NO_INLINE JsVar *jspeArrowFunction(JsVar *funcVar, JsVar *a) { assert(!a || jsvIsName(a)); JSP_ASSERT_MATCH(LEX_ARROW_FUNCTION); funcVar = jspeAddNamedFunctionParameter(funcVar, a); bool expressionOnly = lex->tk!='{'; bool fnIncludesThis = jspeFunctionDefinitionInternal(funcVar, expressionOnly); /* Arrow functions store the value of 'this' when they were defined. In order to differentiate between normal functions we usually have to store 'this' even if 'this' was just the global object. Very few arrow functions actually use 'this' though - usually they are just used as a shorthand, and so we end up wasting a whole extra var for every single arrow function. So... while parsing the function's body we check of the 'this' keyword is used. If it isn't, we just don't include it. */ if (fnIncludesThis) jsvObjectSetChild(funcVar, JSPARSE_FUNCTION_THIS_NAME, execInfo.thisVar); return funcVar; } // parse expressions with commas, maybe followed by an arrow function (bracket already matched) NO_INLINE JsVar *jspeExpressionOrArrowFunction() { JsVar *a = 0; JsVar *funcVar = 0; bool allNames = true; while (lex->tk!=')' && !JSP_SHOULDNT_PARSE) { if (allNames && a) { // we never get here if this isn't a name and a string funcVar = jspeAddNamedFunctionParameter(funcVar, a); } jsvUnLock(a); a = jspeAssignmentExpression(); /* if we're not executing, `a` will always be undefined so don't do the check for allNames - just assume all is good. We'll properly check when we execute. */ if (JSP_SHOULD_EXECUTE && !(jsvIsName(a) && jsvIsString(a))) allNames = false; if (lex->tk!=')') JSP_MATCH_WITH_CLEANUP_AND_RETURN(',', jsvUnLock2(a,funcVar), 0); } JSP_MATCH_WITH_CLEANUP_AND_RETURN(')', jsvUnLock2(a,funcVar), 0); // if all names inside brackets and an arrow is found, create a function if (allNames && lex->tk==LEX_ARROW_FUNCTION) { funcVar = jspeArrowFunction(funcVar, a); jsvUnLock(a); return funcVar; } else { jsvUnLock(funcVar); return a; } } /// Parse an ES6 class, expects LEX_R_CLASS already parsed NO_INLINE JsVar *jspeClassDefinition(bool parseNamedClass) { JsVar *classFunction = 0; JsVar *classPrototype = 0; JsVar *classInternalName = 0; bool actuallyCreateClass = JSP_SHOULD_EXECUTE; if (actuallyCreateClass) { classFunction = jsvNewWithFlags(JSV_FUNCTION); JsVar *scopeVar = jspeiGetScopesAsVar(); if (scopeVar) jsvUnLock2(jsvAddNamedChild(classFunction, scopeVar, JSPARSE_FUNCTION_SCOPE_NAME), scopeVar); } if (parseNamedClass && lex->tk==LEX_ID) { if (classFunction) classInternalName = jslGetTokenValueAsVar(); JSP_ASSERT_MATCH(LEX_ID); } if (classFunction) { JsVar *prototypeName = jsvFindChildFromString(classFunction, JSPARSE_PROTOTYPE_VAR, true); jspEnsureIsPrototype(classFunction, prototypeName); // make sure it's an object classPrototype = jsvSkipName(prototypeName); jsvUnLock(prototypeName); } if (lex->tk==LEX_R_EXTENDS) { JSP_ASSERT_MATCH(LEX_R_EXTENDS); JsVar *extendsFrom = actuallyCreateClass ? jsvSkipNameAndUnLock(jspGetNamedVariable(jslGetTokenValueAsString())) : 0; JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_ID,jsvUnLock4(extendsFrom,classFunction,classInternalName,classPrototype),0); if (classPrototype) { if (jsvIsFunction(extendsFrom)) { JsVar *extendsFromProto = jsvObjectGetChild(extendsFrom, JSPARSE_PROTOTYPE_VAR, 0); if (extendsFromProto) { jsvObjectSetChild(classPrototype, JSPARSE_INHERITS_VAR, extendsFromProto); // link in default constructor if ours isn't supplied jsvObjectSetChildAndUnLock(classFunction, JSPARSE_FUNCTION_CODE_NAME, jsvNewFromString("if(this.__proto__.__proto__.constructor)this.__proto__.__proto__.constructor.apply(this,arguments)")); jsvUnLock(extendsFromProto); } } else jsExceptionHere(JSET_SYNTAXERROR, "'extends' argument should be a function, got %t", extendsFrom); } jsvUnLock(extendsFrom); } JSP_MATCH_WITH_CLEANUP_AND_RETURN('{',jsvUnLock3(classFunction,classInternalName,classPrototype),0); while ((lex->tk==LEX_ID || lex->tk==LEX_R_STATIC) && !jspIsInterrupted()) { bool isStatic = lex->tk==LEX_R_STATIC; if (isStatic) JSP_ASSERT_MATCH(LEX_R_STATIC); JsVar *funcName = jslGetTokenValueAsVar(); JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_ID,jsvUnLock4(funcName,classFunction,classInternalName,classPrototype),0); #ifndef SAVE_ON_FLASH bool isGetter = false, isSetter = false; if (lex->tk==LEX_ID) { isGetter = jsvIsStringEqual(funcName, "get"); isSetter = jsvIsStringEqual(funcName, "set"); if (isGetter || isSetter) { jsvUnLock(funcName); funcName = jslGetTokenValueAsVar(); JSP_ASSERT_MATCH(LEX_ID); } } #endif JsVar *method = jspeFunctionDefinition(false); if (classFunction && classPrototype) { JsVar *obj = isStatic ? classFunction : classPrototype; if (jsvIsStringEqual(funcName, "constructor")) { jswrap_function_replaceWith(classFunction, method); #ifndef SAVE_ON_FLASH } else if (isGetter || isSetter) { jsvAddGetterOrSetter(obj, funcName, isGetter, method); #endif } else { funcName = jsvMakeIntoVariableName(funcName, 0); jsvSetValueOfName(funcName, method); jsvAddName(obj, funcName); } } jsvUnLock2(method,funcName); } jsvUnLock(classPrototype); // If we had a name, add it to the end (or it gets confused with the constructor arguments) if (classInternalName) jsvObjectSetChildAndUnLock(classFunction, JSPARSE_FUNCTION_NAME_NAME, classInternalName); JSP_MATCH_WITH_CLEANUP_AND_RETURN('}',jsvUnLock(classFunction),0); return classFunction; } #endif NO_INLINE JsVar *jspeFactor() { if (lex->tk==LEX_ID) { JsVar *a = jspGetNamedVariable(jslGetTokenValueAsString()); JSP_ASSERT_MATCH(LEX_ID); #ifndef SAVE_ON_FLASH if (lex->tk==LEX_TEMPLATE_LITERAL) jsExceptionHere(JSET_SYNTAXERROR, "Tagged template literals not supported"); else if (lex->tk==LEX_ARROW_FUNCTION && (jsvIsName(a) || (a==0 && !JSP_SHOULD_EXECUTE))) { // 'a' needs to be a name, *or* we're not executing so 0 gets returned anyway JsVar *funcVar = jspeArrowFunction(0,a); jsvUnLock(a); a=funcVar; } #endif return a; } else if (lex->tk==LEX_INT) { JsVar *v = 0; if (JSP_SHOULD_EXECUTE) { v = jsvNewFromLongInteger(stringToInt(jslGetTokenValueAsString())); } JSP_ASSERT_MATCH(LEX_INT); return v; } else if (lex->tk==LEX_FLOAT) { JsVar *v = 0; if (JSP_SHOULD_EXECUTE) { v = jsvNewFromFloat(stringToFloat(jslGetTokenValueAsString())); } JSP_ASSERT_MATCH(LEX_FLOAT); return v; } else if (lex->tk=='(') { JSP_ASSERT_MATCH('('); if (!jspCheckStackPosition()) return 0; #ifdef SAVE_ON_FLASH // Just parse a normal expression (which can include commas) JsVar *a = jspeExpression(); if (!JSP_SHOULDNT_PARSE) JSP_MATCH_WITH_RETURN(')',a); return a; #else return jspeExpressionOrArrowFunction(); #endif } else if (lex->tk==LEX_R_TRUE) { JSP_ASSERT_MATCH(LEX_R_TRUE); return JSP_SHOULD_EXECUTE ? jsvNewFromBool(true) : 0; } else if (lex->tk==LEX_R_FALSE) { JSP_ASSERT_MATCH(LEX_R_FALSE); return JSP_SHOULD_EXECUTE ? jsvNewFromBool(false) : 0; } else if (lex->tk==LEX_R_NULL) { JSP_ASSERT_MATCH(LEX_R_NULL); return JSP_SHOULD_EXECUTE ? jsvNewWithFlags(JSV_NULL) : 0; } else if (lex->tk==LEX_R_UNDEFINED) { JSP_ASSERT_MATCH(LEX_R_UNDEFINED); return 0; } else if (lex->tk==LEX_STR) { JsVar *a = 0; if (JSP_SHOULD_EXECUTE) a = jslGetTokenValueAsVar(); JSP_ASSERT_MATCH(LEX_STR); return a; #ifndef SAVE_ON_FLASH } else if (lex->tk==LEX_TEMPLATE_LITERAL) { return jspeTemplateLiteral(); #endif } else if (lex->tk==LEX_REGEX) { JsVar *a = 0; #ifdef SAVE_ON_FLASH jsExceptionHere(JSET_SYNTAXERROR, "RegEx are not supported in this version of Espruino\n"); #else JsVar *regex = jslGetTokenValueAsVar(); size_t regexEnd = 0, regexLen = 0; JsvStringIterator it; jsvStringIteratorNew(&it, regex, 0); while (jsvStringIteratorHasChar(&it)) { regexLen++; if (jsvStringIteratorGetCharAndNext(&it)=='/') regexEnd = regexLen; } jsvStringIteratorFree(&it); JsVar *flags = 0; if (regexEnd < regexLen) flags = jsvNewFromStringVar(regex, regexEnd, JSVAPPENDSTRINGVAR_MAXLENGTH); JsVar *regexSource = jsvNewFromStringVar(regex, 1, regexEnd-2); a = jswrap_regexp_constructor(regexSource, flags); jsvUnLock3(regex, flags, regexSource); #endif JSP_ASSERT_MATCH(LEX_REGEX); return a; } else if (lex->tk=='{') { if (!jspCheckStackPosition()) return 0; return jspeFactorObject(); } else if (lex->tk=='[') { if (!jspCheckStackPosition()) return 0; return jspeFactorArray(); } else if (lex->tk==LEX_R_FUNCTION) { if (!jspCheckStackPosition()) return 0; JSP_ASSERT_MATCH(LEX_R_FUNCTION); return jspeFunctionDefinition(true); #ifndef SAVE_ON_FLASH } else if (lex->tk==LEX_R_CLASS) { if (!jspCheckStackPosition()) return 0; JSP_ASSERT_MATCH(LEX_R_CLASS); return jspeClassDefinition(true); } else if (lex->tk==LEX_R_SUPER) { JSP_ASSERT_MATCH(LEX_R_SUPER); /* This is kind of nasty, since super appears to do three different things. * In the constructor it references the extended class's constructor * in a method it references the constructor's prototype. * in a static method it references the extended class's constructor (but this is different) */ if (jsvIsObject(execInfo.thisVar)) { // 'this' is an object - must be calling a normal method JsVar *proto1 = jsvObjectGetChild(execInfo.thisVar, JSPARSE_INHERITS_VAR, 0); // if we're in a method, get __proto__ first JsVar *proto2 = jsvIsObject(proto1) ? jsvObjectGetChild(proto1, JSPARSE_INHERITS_VAR, 0) : 0; // still in method, get __proto__.__proto__ jsvUnLock(proto1); if (!proto2) { jsExceptionHere(JSET_SYNTAXERROR, "Calling 'super' outside of class"); return 0; } // If we're doing super() we want the constructor if (lex->tk=='(') { JsVar *constr = jsvObjectGetChild(proto2, JSPARSE_CONSTRUCTOR_VAR, 0); jsvUnLock(proto2); return constr; } // But if we're doing something else - eg 'super.' or 'super[' then it needs to reference the prototype return proto2; } else if (jsvIsFunction(execInfo.thisVar)) { // 'this' is a function - must be calling a static method JsVar *proto1 = jsvObjectGetChild(execInfo.thisVar, JSPARSE_PROTOTYPE_VAR, 0); JsVar *proto2 = jsvIsObject(proto1) ? jsvObjectGetChild(proto1, JSPARSE_INHERITS_VAR, 0) : 0; jsvUnLock(proto1); if (!proto2) { jsExceptionHere(JSET_SYNTAXERROR, "Calling 'super' outside of class"); return 0; } JsVar *constr = jsvObjectGetChild(proto2, JSPARSE_CONSTRUCTOR_VAR, 0); jsvUnLock(proto2); return constr; } jsExceptionHere(JSET_SYNTAXERROR, "Calling 'super' outside of class"); return 0; #endif } else if (lex->tk==LEX_R_THIS) { JSP_ASSERT_MATCH(LEX_R_THIS); return jsvLockAgain( execInfo.thisVar ? execInfo.thisVar : execInfo.root ); } else if (lex->tk==LEX_R_DELETE) { if (!jspCheckStackPosition()) return 0; return jspeFactorDelete(); } else if (lex->tk==LEX_R_TYPEOF) { if (!jspCheckStackPosition()) return 0; return jspeFactorTypeOf(); } else if (lex->tk==LEX_R_VOID) { if (!jspCheckStackPosition()) return 0; JSP_ASSERT_MATCH(LEX_R_VOID); jsvUnLock(jspeUnaryExpression()); return 0; } JSP_MATCH(LEX_EOF); jsExceptionHere(JSET_SYNTAXERROR, "Unexpected end of Input\n"); return 0; } NO_INLINE JsVar *__jspePostfixExpression(JsVar *a) { while (lex->tk==LEX_PLUSPLUS || lex->tk==LEX_MINUSMINUS) { int op = lex->tk; JSP_ASSERT_MATCH(op); if (JSP_SHOULD_EXECUTE) { JsVar *one = jsvNewFromInteger(1); JsVar *oldValue = jsvAsNumberAndUnLock(jsvSkipName(a)); // keep the old value (but convert to number) JsVar *res = jsvMathsOpSkipNames(oldValue, one, op==LEX_PLUSPLUS ? '+' : '-'); jsvUnLock(one); // in-place add/subtract jsvReplaceWith(a, res); jsvUnLock(res); // but then use the old value jsvUnLock(a); a = oldValue; } } return a; } NO_INLINE JsVar *jspePostfixExpression() { JsVar *a; // TODO: should be in jspeUnaryExpression if (lex->tk==LEX_PLUSPLUS || lex->tk==LEX_MINUSMINUS) { int op = lex->tk; JSP_ASSERT_MATCH(op); a = jspePostfixExpression(); if (JSP_SHOULD_EXECUTE) { JsVar *one = jsvNewFromInteger(1); JsVar *res = jsvMathsOpSkipNames(a, one, op==LEX_PLUSPLUS ? '+' : '-'); jsvUnLock(one); // in-place add/subtract jsvReplaceWith(a, res); jsvUnLock(res); } } else a = jspeFactorFunctionCall(); return __jspePostfixExpression(a); } NO_INLINE JsVar *jspeUnaryExpression() { if (lex->tk=='!' || lex->tk=='~' || lex->tk=='-' || lex->tk=='+') { short tk = lex->tk; JSP_ASSERT_MATCH(tk); if (!JSP_SHOULD_EXECUTE) { return jspeUnaryExpression(); } if (tk=='!') { // logical not return jsvNewFromBool(!jsvGetBoolAndUnLock(jsvSkipNameAndUnLock(jspeUnaryExpression()))); } else if (tk=='~') { // bitwise not return jsvNewFromInteger(~jsvGetIntegerAndUnLock(jsvSkipNameAndUnLock(jspeUnaryExpression()))); } else if (tk=='-') { // unary minus return jsvNegateAndUnLock(jspeUnaryExpression()); // names already skipped } else if (tk=='+') { // unary plus (convert to number) JsVar *v = jsvSkipNameAndUnLock(jspeUnaryExpression()); JsVar *r = jsvAsNumber(v); // names already skipped jsvUnLock(v); return r; } assert(0); return 0; } else return jspePostfixExpression(); } // Get the precedence of a BinaryExpression - or return 0 if not one unsigned int jspeGetBinaryExpressionPrecedence(int op) { switch (op) { case LEX_OROR: return 1; break; case LEX_ANDAND: return 2; break; case '|' : return 3; break; case '^' : return 4; break; case '&' : return 5; break; case LEX_EQUAL: case LEX_NEQUAL: case LEX_TYPEEQUAL: case LEX_NTYPEEQUAL: return 6; case LEX_LEQUAL: case LEX_GEQUAL: case '<': case '>': case LEX_R_INSTANCEOF: return 7; case LEX_R_IN: return (execInfo.execute&EXEC_FOR_INIT)?0:7; case LEX_LSHIFT: case LEX_RSHIFT: case LEX_RSHIFTUNSIGNED: return 8; case '+': case '-': return 9; case '*': case '/': case '%': return 10; default: return 0; } } NO_INLINE JsVar *__jspeBinaryExpression(JsVar *a, unsigned int lastPrecedence) { /* This one's a bit strange. Basically all the ops have their own precedence, it's not * like & and | share the same precedence. We don't want to recurse for each one, * so instead we do this. * * We deal with an expression in recursion ONLY if it's of higher precedence * than the current one, otherwise we stick in the while loop. */ unsigned int precedence = jspeGetBinaryExpressionPrecedence(lex->tk); while (precedence && precedence>lastPrecedence) { int op = lex->tk; JSP_ASSERT_MATCH(op); // if we have short-circuit ops, then if we know the outcome // we don't bother to execute the other op. Even if not // we need to tell mathsOp it's an & or | if (op==LEX_ANDAND || op==LEX_OROR) { bool aValue = jsvGetBoolAndUnLock(jsvSkipName(a)); if ((!aValue && op==LEX_ANDAND) || (aValue && op==LEX_OROR)) { // use first argument (A) JSP_SAVE_EXECUTE(); jspSetNoExecute(); jsvUnLock(__jspeBinaryExpression(jspeUnaryExpression(),precedence)); JSP_RESTORE_EXECUTE(); } else { // use second argument (B) jsvUnLock(a); a = __jspeBinaryExpression(jspeUnaryExpression(),precedence); } } else { // else it's a more 'normal' logical expression - just use Maths JsVar *b = __jspeBinaryExpression(jspeUnaryExpression(),precedence); if (JSP_SHOULD_EXECUTE) { if (op==LEX_R_IN) { JsVar *av = jsvSkipName(a); // needle JsVar *bv = jsvSkipName(b); // haystack if (jsvHasChildren(bv)) { // search keys, NOT values av = jsvAsArrayIndexAndUnLock(av); JsVar *varFound = jspGetVarNamedField( bv, av, true); jsvUnLock2(a,varFound); a = jsvNewFromBool(varFound!=0); } else { // else maybe it's a fake object... const JswSymList *syms = jswGetSymbolListForObjectProto(bv); if (syms) { JsVar *varFound = 0; char nameBuf[JSLEX_MAX_TOKEN_LENGTH]; if (jsvGetString(av, nameBuf, sizeof(nameBuf)) < sizeof(nameBuf)) varFound = jswBinarySearch(syms, bv, nameBuf); bool found = varFound!=0; jsvUnLock2(a, varFound); if (!found && jsvIsArrayBuffer(bv)) { JsVarFloat f = jsvGetFloat(av); // if not a number this will be NaN, f==floor(f) fails if (f==floor(f) && f>=0 && f<jsvGetArrayBufferLength(bv)) found = true; } a = jsvNewFromBool(found); } else { // not built-in, just assume we can't do it jsExceptionHere(JSET_ERROR, "Cannot use 'in' operator to search a %t", bv); jsvUnLock(a); a = 0; } } jsvUnLock2(av, bv); } else if (op==LEX_R_INSTANCEOF) { bool inst = false; JsVar *av = jsvSkipName(a); JsVar *bv = jsvSkipName(b); if (!jsvIsFunction(bv)) { jsExceptionHere(JSET_ERROR, "Expecting a function on RHS in instanceof check, got %t", bv); } else { if (jsvIsObject(av) || jsvIsFunction(av)) { JsVar *bproto = jspGetNamedField(bv, JSPARSE_PROTOTYPE_VAR, false); JsVar *proto = jsvObjectGetChild(av, JSPARSE_INHERITS_VAR, 0); while (proto) { if (proto == bproto) inst=true; // search prototype chain JsVar *childProto = jsvObjectGetChild(proto, JSPARSE_INHERITS_VAR, 0); jsvUnLock(proto); proto = childProto; } if (jspIsConstructor(bv, "Object")) inst = true; jsvUnLock(bproto); } if (!inst) { const char *name = jswGetBasicObjectName(av); if (name) { inst = jspIsConstructor(bv, name); } // Hack for built-ins that should also be instances of Object if (!inst && (jsvIsArray(av) || jsvIsArrayBuffer(av)) && jspIsConstructor(bv, "Object")) inst = true; } } jsvUnLock3(av, bv, a); a = jsvNewFromBool(inst); } else { // --------------------------------------------- NORMAL JsVar *res = jsvMathsOpSkipNames(a, b, op); jsvUnLock(a); a = res; } } jsvUnLock(b); } precedence = jspeGetBinaryExpressionPrecedence(lex->tk); } return a; } JsVar *jspeBinaryExpression() { return __jspeBinaryExpression(jspeUnaryExpression(),0); } NO_INLINE JsVar *__jspeConditionalExpression(JsVar *lhs) { if (lex->tk=='?') { JSP_ASSERT_MATCH('?'); if (!JSP_SHOULD_EXECUTE) { // just let lhs pass through jsvUnLock(jspeAssignmentExpression()); JSP_MATCH(':'); jsvUnLock(jspeAssignmentExpression()); } else { bool first = jsvGetBoolAndUnLock(jsvSkipName(lhs)); jsvUnLock(lhs); if (first) { lhs = jspeAssignmentExpression(); JSP_MATCH(':'); JSP_SAVE_EXECUTE(); jspSetNoExecute(); jsvUnLock(jspeAssignmentExpression()); JSP_RESTORE_EXECUTE(); } else { JSP_SAVE_EXECUTE(); jspSetNoExecute(); jsvUnLock(jspeAssignmentExpression()); JSP_RESTORE_EXECUTE(); JSP_MATCH(':'); lhs = jspeAssignmentExpression(); } } } return lhs; } JsVar *jspeConditionalExpression() { return __jspeConditionalExpression(jspeBinaryExpression()); } NO_INLINE JsVar *__jspeAssignmentExpression(JsVar *lhs) { if (lex->tk=='=' || lex->tk==LEX_PLUSEQUAL || lex->tk==LEX_MINUSEQUAL || lex->tk==LEX_MULEQUAL || lex->tk==LEX_DIVEQUAL || lex->tk==LEX_MODEQUAL || lex->tk==LEX_ANDEQUAL || lex->tk==LEX_OREQUAL || lex->tk==LEX_XOREQUAL || lex->tk==LEX_RSHIFTEQUAL || lex->tk==LEX_LSHIFTEQUAL || lex->tk==LEX_RSHIFTUNSIGNEDEQUAL) { JsVar *rhs; int op = lex->tk; JSP_ASSERT_MATCH(op); rhs = jspeAssignmentExpression(); rhs = jsvSkipNameAndUnLock(rhs); // ensure we get rid of any references on the RHS if (JSP_SHOULD_EXECUTE && lhs) { if (op=='=') { jsvReplaceWithOrAddToRoot(lhs, rhs); } else { if (op==LEX_PLUSEQUAL) op='+'; else if (op==LEX_MINUSEQUAL) op='-'; else if (op==LEX_MULEQUAL) op='*'; else if (op==LEX_DIVEQUAL) op='/'; else if (op==LEX_MODEQUAL) op='%'; else if (op==LEX_ANDEQUAL) op='&'; else if (op==LEX_OREQUAL) op='|'; else if (op==LEX_XOREQUAL) op='^'; else if (op==LEX_RSHIFTEQUAL) op=LEX_RSHIFT; else if (op==LEX_LSHIFTEQUAL) op=LEX_LSHIFT; else if (op==LEX_RSHIFTUNSIGNEDEQUAL) op=LEX_RSHIFTUNSIGNED; if (op=='+' && jsvIsName(lhs)) { JsVar *currentValue = jsvSkipName(lhs); if (jsvIsBasicString(currentValue) && jsvGetRefs(currentValue)==1 && rhs!=currentValue) { /* A special case for string += where this is the only use of the string * and we're not appending to ourselves. In this case we can do a * simple append (rather than clone + append)*/ JsVar *str = jsvAsString(rhs); jsvAppendStringVarComplete(currentValue, str); jsvUnLock(str); op = 0; } jsvUnLock(currentValue); } if (op) { /* Fallback which does a proper add */ JsVar *res = jsvMathsOpSkipNames(lhs,rhs,op); jsvReplaceWith(lhs, res); jsvUnLock(res); } } } jsvUnLock(rhs); } return lhs; } JsVar *jspeAssignmentExpression() { return __jspeAssignmentExpression(jspeConditionalExpression()); } // ',' is allowed to add multiple expressions, this is not allowed in jspeAssignmentExpression NO_INLINE JsVar *jspeExpression() { while (!JSP_SHOULDNT_PARSE) { JsVar *a = jspeAssignmentExpression(); if (lex->tk!=',') return a; // if we get a comma, we just forget this data and parse the next bit... jsvCheckReferenceError(a); jsvUnLock(a); JSP_ASSERT_MATCH(','); } return 0; } /** Parse a block `{ ... }` */ NO_INLINE void jspeSkipBlock() { // fast skip of blocks int brackets = 1; while (lex->tk && brackets) { if (lex->tk == '{') brackets++; else if (lex->tk == '}') { brackets--; if (!brackets) return; } JSP_ASSERT_MATCH(lex->tk); } } /** Parse a block `{ ... }` but assume brackets are already parsed */ NO_INLINE void jspeBlockNoBrackets() { if (JSP_SHOULD_EXECUTE) { while (lex->tk && lex->tk!='}') { JsVar *a = jspeStatement(); jsvCheckReferenceError(a); jsvUnLock(a); if (JSP_HAS_ERROR) { if (lex && !(execInfo.execute&EXEC_ERROR_LINE_REPORTED)) { execInfo.execute = (JsExecFlags)(execInfo.execute | EXEC_ERROR_LINE_REPORTED); JsVar *stackTrace = jsvObjectGetChild(execInfo.hiddenRoot, JSPARSE_STACKTRACE_VAR, JSV_STRING_0); if (stackTrace) { jsvAppendPrintf(stackTrace, "at "); jspAppendStackTrace(stackTrace); jsvUnLock(stackTrace); } } } if (JSP_SHOULDNT_PARSE) return; if (!JSP_SHOULD_EXECUTE) { jspeSkipBlock(); return; } } } else { jspeSkipBlock(); } return; } /** Parse a block `{ ... }` */ NO_INLINE void jspeBlock() { JSP_MATCH_WITH_RETURN('{',); jspeBlockNoBrackets(); if (!JSP_SHOULDNT_PARSE) JSP_MATCH_WITH_RETURN('}',); return; } NO_INLINE JsVar *jspeBlockOrStatement() { if (lex->tk=='{') { jspeBlock(); return 0; } else { JsVar *v = jspeStatement(); if (lex->tk==';') JSP_ASSERT_MATCH(';'); return v; } } /** Parse using current lexer until we hit the end of * input or there was some problem. */ NO_INLINE JsVar *jspParse() { JsVar *v = 0; while (!JSP_SHOULDNT_PARSE && lex->tk != LEX_EOF) { jsvUnLock(v); v = jspeBlockOrStatement(); jsvCheckReferenceError(v); } return v; } NO_INLINE JsVar *jspeStatementVar() { JsVar *lastDefined = 0; /* variable creation. TODO - we need a better way of parsing the left * hand side. Maybe just have a flag called can_create_var that we * set and then we parse as if we're doing a normal equals.*/ assert(lex->tk==LEX_R_VAR || lex->tk==LEX_R_LET || lex->tk==LEX_R_CONST); jslGetNextToken(); ///TODO: Correctly implement CONST and LET - we just treat them like 'var' at the moment bool hasComma = true; // for first time in loop while (hasComma && lex->tk == LEX_ID && !jspIsInterrupted()) { JsVar *a = 0; if (JSP_SHOULD_EXECUTE) { a = jspeiFindOnTop(jslGetTokenValueAsString(), true); if (!a) { // out of memory jspSetError(false); return lastDefined; } } JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_ID, jsvUnLock(a), lastDefined); // sort out initialiser if (lex->tk == '=') { JsVar *var; JSP_MATCH_WITH_CLEANUP_AND_RETURN('=', jsvUnLock(a), lastDefined); var = jsvSkipNameAndUnLock(jspeAssignmentExpression()); if (JSP_SHOULD_EXECUTE) jsvReplaceWith(a, var); jsvUnLock(var); } jsvUnLock(lastDefined); lastDefined = a; hasComma = lex->tk == ','; if (hasComma) JSP_MATCH_WITH_RETURN(',', lastDefined); } return lastDefined; } NO_INLINE JsVar *jspeStatementIf() { bool cond; JsVar *var, *result = 0; JSP_ASSERT_MATCH(LEX_R_IF); JSP_MATCH('('); var = jspeExpression(); if (JSP_SHOULDNT_PARSE) return var; JSP_MATCH(')'); cond = JSP_SHOULD_EXECUTE && jsvGetBoolAndUnLock(jsvSkipName(var)); jsvUnLock(var); JSP_SAVE_EXECUTE(); if (!cond) jspSetNoExecute(); JsExecFlags hasError = 0; JsVar *a = jspeBlockOrStatement(); hasError |= execInfo.execute&EXEC_ERROR_MASK; if (!cond) { jsvUnLock(a); JSP_RESTORE_EXECUTE(); execInfo.execute |= hasError; } else { result = a; } if (lex->tk==LEX_R_ELSE) { JSP_ASSERT_MATCH(LEX_R_ELSE); JSP_SAVE_EXECUTE(); if (cond) jspSetNoExecute(); JsVar *a = jspeBlockOrStatement(); hasError |= execInfo.execute&EXEC_ERROR_MASK; if (cond) { jsvUnLock(a); JSP_RESTORE_EXECUTE(); execInfo.execute |= hasError; } else { result = a; } } return result; } NO_INLINE JsVar *jspeStatementSwitch() { JSP_ASSERT_MATCH(LEX_R_SWITCH); JSP_MATCH('('); JsVar *switchOn = jspeExpression(); JSP_SAVE_EXECUTE(); bool execute = JSP_SHOULD_EXECUTE; JSP_MATCH_WITH_CLEANUP_AND_RETURN(')', jsvUnLock(switchOn), 0); // shortcut if not executing... if (!execute) { jsvUnLock(switchOn); jspeBlock(); return 0; } JSP_MATCH_WITH_CLEANUP_AND_RETURN('{', jsvUnLock(switchOn), 0); bool executeDefault = true; if (execute) execInfo.execute=EXEC_NO|EXEC_IN_SWITCH; while (lex->tk==LEX_R_CASE) { JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_R_CASE, jsvUnLock(switchOn), 0); JsExecFlags oldFlags = execInfo.execute; if (execute) execInfo.execute=EXEC_YES|EXEC_IN_SWITCH; JsVar *test = jspeAssignmentExpression(); execInfo.execute = oldFlags|EXEC_IN_SWITCH;; JSP_MATCH_WITH_CLEANUP_AND_RETURN(':', jsvUnLock2(switchOn, test), 0); bool cond = false; if (execute) cond = jsvGetBoolAndUnLock(jsvMathsOpSkipNames(switchOn, test, LEX_TYPEEQUAL)); if (cond) executeDefault = false; jsvUnLock(test); if (cond && (execInfo.execute&EXEC_RUN_MASK)==EXEC_NO) execInfo.execute=EXEC_YES|EXEC_IN_SWITCH; while (!JSP_SHOULDNT_PARSE && lex->tk!=LEX_EOF && lex->tk!=LEX_R_CASE && lex->tk!=LEX_R_DEFAULT && lex->tk!='}') jsvUnLock(jspeBlockOrStatement()); oldExecute |= execInfo.execute & (EXEC_ERROR_MASK|EXEC_RETURN); // copy across any errors/exceptions/returns } jsvUnLock(switchOn); if (execute && (execInfo.execute&EXEC_RUN_MASK)==EXEC_BREAK) { execInfo.execute=EXEC_YES|EXEC_IN_SWITCH; } else { executeDefault = true; } JSP_RESTORE_EXECUTE(); if (lex->tk==LEX_R_DEFAULT) { JSP_ASSERT_MATCH(LEX_R_DEFAULT); JSP_MATCH(':'); JSP_SAVE_EXECUTE(); if (!executeDefault) jspSetNoExecute(); else execInfo.execute |= EXEC_IN_SWITCH; while (!JSP_SHOULDNT_PARSE && lex->tk!=LEX_EOF && lex->tk!='}' && lex->tk!=LEX_R_CASE) jsvUnLock(jspeBlockOrStatement()); oldExecute |= execInfo.execute & (EXEC_ERROR_MASK|EXEC_RETURN); // copy across any errors/exceptions/returns execInfo.execute = execInfo.execute & (JsExecFlags)~EXEC_BREAK; JSP_RESTORE_EXECUTE(); } if (lex->tk==LEX_R_CASE) { jsExceptionHere(JSET_SYNTAXERROR, "Espruino doesn't support CASE after DEFAULT"); return 0; } JSP_MATCH('}'); return 0; } // Check whether we received a break/continue while parsing previously. Return true if we had a 'break; static NO_INLINE bool jspeCheckBreakContinue() { if (execInfo.execute & EXEC_CONTINUE) execInfo.execute = (execInfo.execute & ~EXEC_RUN_MASK) | EXEC_YES; else if (execInfo.execute & EXEC_BREAK) { execInfo.execute = (execInfo.execute & ~EXEC_RUN_MASK) | EXEC_YES; return true; } return false; } NO_INLINE JsVar *jspeStatementDoOrWhile(bool isWhile) { JsVar *cond; bool loopCond = true; // true for do...while loops bool hasHadBreak = false; JslCharPos whileCondStart; // We do repetition by pulling out the string representing our statement // there's definitely some opportunity for optimisation here bool wasInLoop = (execInfo.execute&EXEC_IN_LOOP)!=0; JslCharPos whileBodyStart; if (isWhile) { // while loop JSP_ASSERT_MATCH(LEX_R_WHILE); jslCharPosFromLex(&whileCondStart); JSP_MATCH_WITH_CLEANUP_AND_RETURN('(',jslCharPosFree(&whileCondStart);,0); cond = jspeExpression(); loopCond = JSP_SHOULD_EXECUTE && jsvGetBoolAndUnLock(jsvSkipName(cond)); jsvUnLock(cond); jslCharPosFromLex(&whileBodyStart); JSP_MATCH_WITH_CLEANUP_AND_RETURN(')',jslCharPosFree(&whileBodyStart);jslCharPosFree(&whileCondStart);,0); } else { jslCharPosFromLex(&whileBodyStart); JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_R_DO, jslCharPosFree(&whileBodyStart);,0); } JSP_SAVE_EXECUTE(); // actually try and execute first bit of while loop (we'll do the rest in the actual loop later) if (!loopCond) jspSetNoExecute(); execInfo.execute |= EXEC_IN_LOOP; jsvUnLock(jspeBlockOrStatement()); if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP; hasHadBreak |= jspeCheckBreakContinue(); if (!loopCond) JSP_RESTORE_EXECUTE(); if (!isWhile) { // do..while loop JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_R_WHILE,jslCharPosFree(&whileBodyStart);,0); jslCharPosFromLex(&whileCondStart); JSP_MATCH_WITH_CLEANUP_AND_RETURN('(',jslCharPosFree(&whileBodyStart);jslCharPosFree(&whileCondStart);,0); cond = jspeExpression(); loopCond = JSP_SHOULD_EXECUTE && jsvGetBoolAndUnLock(jsvSkipName(cond)); jsvUnLock(cond); JSP_MATCH_WITH_CLEANUP_AND_RETURN(')',jslCharPosFree(&whileBodyStart);jslCharPosFree(&whileCondStart);,0); } JslCharPos whileBodyEnd; jslCharPosNew(&whileBodyEnd, lex->sourceVar, lex->tokenStart); int loopCount = 0; while (!hasHadBreak && loopCond #ifdef JSPARSE_MAX_LOOP_ITERATIONS && loopCount<JSPARSE_MAX_LOOP_ITERATIONS #endif ) { if (isWhile || loopCount) { // don't check the start condition a second time if we're in a do..while loop jslSeekToP(&whileCondStart); cond = jspeExpression(); loopCond = JSP_SHOULD_EXECUTE && jsvGetBoolAndUnLock(jsvSkipName(cond)); jsvUnLock(cond); } if (loopCond) { jslSeekToP(&whileBodyStart); execInfo.execute |= EXEC_IN_LOOP; jspDebuggerLoopIfCtrlC(); jsvUnLock(jspeBlockOrStatement()); if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP; hasHadBreak |= jspeCheckBreakContinue(); } loopCount++; } jslSeekToP(&whileBodyEnd); jslCharPosFree(&whileCondStart); jslCharPosFree(&whileBodyStart); jslCharPosFree(&whileBodyEnd); #ifdef JSPARSE_MAX_LOOP_ITERATIONS if (loopCount > JSPARSE_MAX_LOOP_ITERATIONS) { jsExceptionHere(JSET_ERROR, "WHILE Loop exceeded the maximum number of iterations (" STRINGIFY(JSPARSE_MAX_LOOP_ITERATIONS) ")"); } #endif return 0; } NO_INLINE JsVar *jspGetBuiltinPrototype(JsVar *obj) { if (jsvIsArray(obj)) { JsVar *v = jspFindPrototypeFor("Array"); if (v) return v; } if (jsvIsObject(obj) || jsvIsArray(obj)) { JsVar *v = jspFindPrototypeFor("Object"); if (v==obj) { // don't return ourselves jsvUnLock(v); v = 0; } return v; } return 0; } NO_INLINE JsVar *jspeStatementFor() { JSP_ASSERT_MATCH(LEX_R_FOR); JSP_MATCH('('); bool wasInLoop = (execInfo.execute&EXEC_IN_LOOP)!=0; execInfo.execute |= EXEC_FOR_INIT; // initialisation JsVar *forStatement = 0; // we could have 'for (;;)' - so don't munch up our semicolon if that's all we have if (lex->tk != ';') forStatement = jspeStatement(); if (jspIsInterrupted()) { jsvUnLock(forStatement); return 0; } execInfo.execute &= (JsExecFlags)~EXEC_FOR_INIT; #ifndef SAVE_ON_FLASH_EXTREME if (lex->tk == LEX_R_IN || lex->tk == LEX_R_OF) { bool isForOf = lex->tk == LEX_R_OF; // for (i in array) or for (i of array) // where i = forStatement if (JSP_SHOULD_EXECUTE && !jsvIsName(forStatement)) { jsvUnLock(forStatement); jsExceptionHere(JSET_ERROR, "for(a %s b) - 'a' must be a variable name, not %t", isForOf?"of":"in", forStatement); return 0; } JSP_ASSERT_MATCH(lex->tk); // skip over in/of JsVar *array = jsvSkipNameAndUnLock(jspeExpression()); JslCharPos forBodyStart; jslCharPosFromLex(&forBodyStart); JSP_MATCH_WITH_CLEANUP_AND_RETURN(')', jsvUnLock2(forStatement, array);jslCharPosFree(&forBodyStart), 0); // Simply scan over the loop the first time without executing to figure out where it ends // OPT: we could skip the first parse and actually execute the first time JSP_SAVE_EXECUTE(); jspSetNoExecute(); execInfo.execute |= EXEC_IN_LOOP; jsvUnLock(jspeBlockOrStatement()); JslCharPos forBodyEnd; jslCharPosNew(&forBodyEnd, lex->sourceVar, lex->tokenStart); if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP; JSP_RESTORE_EXECUTE(); // Now start executing properly if (JSP_SHOULD_EXECUTE) { if (jsvIsIterable(array)) { JsvIsInternalChecker checkerFunction = jsvGetInternalFunctionCheckerFor(array); JsVar *foundPrototype = 0; if (!isForOf) // for..in foundPrototype = jspGetBuiltinPrototype(array); JsvIterator it; jsvIteratorNew(&it, array, isForOf ? /* for of */ JSIF_EVERY_ARRAY_ELEMENT : /* for in */ JSIF_DEFINED_ARRAY_ElEMENTS); bool hasHadBreak = false; while (JSP_SHOULD_EXECUTE && jsvIteratorHasElement(&it) && !hasHadBreak) { JsVar *loopIndexVar = jsvIteratorGetKey(&it); bool ignore = false; if (checkerFunction && checkerFunction(loopIndexVar)) { ignore = true; if (jsvIsString(loopIndexVar) && jsvIsStringEqual(loopIndexVar, JSPARSE_INHERITS_VAR)) foundPrototype = jsvSkipName(loopIndexVar); } if (!ignore) { JsVar *iteratorValue; if (isForOf) { // for (... of ...) iteratorValue = jsvIteratorGetValue(&it); } else { // for (... in ...) iteratorValue = jsvIsName(loopIndexVar) ? jsvCopyNameOnly(loopIndexVar, false/*no copy children*/, false/*not a name*/) : loopIndexVar; assert(jsvGetRefs(iteratorValue)==0); } if (isForOf || iteratorValue) { // could be out of memory assert(!jsvIsName(iteratorValue)); jsvReplaceWithOrAddToRoot(forStatement, iteratorValue); if (iteratorValue!=loopIndexVar) jsvUnLock(iteratorValue); jslSeekToP(&forBodyStart); execInfo.execute |= EXEC_IN_LOOP; jspDebuggerLoopIfCtrlC(); jsvUnLock(jspeBlockOrStatement()); if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP; hasHadBreak |= jspeCheckBreakContinue(); } } jsvIteratorNext(&it); jsvUnLock(loopIndexVar); // if using for..in we'll skip down the prototype chain when we reach the end of the current one if (!jsvIteratorHasElement(&it) && !isForOf && foundPrototype) { jsvIteratorFree(&it); JsVar *iterable = foundPrototype; jsvIteratorNew(&it, iterable, JSIF_DEFINED_ARRAY_ElEMENTS); checkerFunction = jsvGetInternalFunctionCheckerFor(iterable); foundPrototype = jspGetBuiltinPrototype(iterable); jsvUnLock(iterable); } } assert(!foundPrototype); jsvIteratorFree(&it); } else if (!jsvIsUndefined(array)) { jsExceptionHere(JSET_ERROR, "FOR loop can only iterate over Arrays, Strings or Objects, not %t", array); } } jslSeekToP(&forBodyEnd); jslCharPosFree(&forBodyStart); jslCharPosFree(&forBodyEnd); jsvUnLock2(forStatement, array); #else // SAVE_ON_FLASH_EXTREME if (false) { #endif // SAVE_ON_FLASH_EXTREME } else { // ----------------------------------------------- NORMAL FOR LOOP #ifdef JSPARSE_MAX_LOOP_ITERATIONS int loopCount = JSPARSE_MAX_LOOP_ITERATIONS; #endif bool loopCond = true; bool hasHadBreak = false; jsvUnLock(forStatement); JslCharPos forCondStart; jslCharPosFromLex(&forCondStart); JSP_MATCH_WITH_CLEANUP_AND_RETURN(';',jslCharPosFree(&forCondStart);,0); if (lex->tk != ';') { JsVar *cond = jspeExpression(); // condition loopCond = JSP_SHOULD_EXECUTE && jsvGetBoolAndUnLock(jsvSkipName(cond)); jsvUnLock(cond); } JslCharPos forIterStart; jslCharPosFromLex(&forIterStart); JSP_MATCH_WITH_CLEANUP_AND_RETURN(';',jslCharPosFree(&forCondStart);jslCharPosFree(&forIterStart);,0); if (lex->tk != ')') { // we could have 'for (;;)' JSP_SAVE_EXECUTE(); jspSetNoExecute(); jsvUnLock(jspeExpression()); // iterator JSP_RESTORE_EXECUTE(); } JslCharPos forBodyStart; jslSkipWhiteSpace(); jslCharPosFromLex(&forBodyStart); // actual for body JSP_MATCH_WITH_CLEANUP_AND_RETURN(')',jslCharPosFree(&forCondStart);jslCharPosFree(&forIterStart);jslCharPosFree(&forBodyStart);,0); JSP_SAVE_EXECUTE(); if (!loopCond) jspSetNoExecute(); execInfo.execute |= EXEC_IN_LOOP; jsvUnLock(jspeBlockOrStatement()); JslCharPos forBodyEnd; jslSkipWhiteSpace(); jslCharPosNew(&forBodyEnd, lex->sourceVar, lex->tokenStart); if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP; if (loopCond || !JSP_SHOULD_EXECUTE) { hasHadBreak |= jspeCheckBreakContinue(); } if (!loopCond) JSP_RESTORE_EXECUTE(); if (loopCond) { jslSeekToP(&forIterStart); if (lex->tk != ')') jsvUnLock(jspeExpression()); } while (!hasHadBreak && JSP_SHOULD_EXECUTE && loopCond #ifdef JSPARSE_MAX_LOOP_ITERATIONS && loopCount-->0 #endif ) { jslSeekToP(&forCondStart); ; if (lex->tk == ';') { loopCond = true; } else { JsVar *cond = jspeExpression(); loopCond = jsvGetBoolAndUnLock(jsvSkipName(cond)); jsvUnLock(cond); } if (JSP_SHOULD_EXECUTE && loopCond) { jslSeekToP(&forBodyStart); execInfo.execute |= EXEC_IN_LOOP; jspDebuggerLoopIfCtrlC(); jsvUnLock(jspeBlockOrStatement()); if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP; hasHadBreak |= jspeCheckBreakContinue(); } if (JSP_SHOULD_EXECUTE && loopCond && !hasHadBreak) { jslSeekToP(&forIterStart); if (lex->tk != ')') jsvUnLock(jspeExpression()); } } jslSeekToP(&forBodyEnd); jslCharPosFree(&forCondStart); jslCharPosFree(&forIterStart); jslCharPosFree(&forBodyStart); jslCharPosFree(&forBodyEnd); #ifdef JSPARSE_MAX_LOOP_ITERATIONS if (loopCount<=0) { jsExceptionHere(JSET_ERROR, "FOR Loop exceeded the maximum number of iterations ("STRINGIFY(JSPARSE_MAX_LOOP_ITERATIONS)")"); } #endif } return 0; } NO_INLINE JsVar *jspeStatementTry() { // execute the try block JSP_ASSERT_MATCH(LEX_R_TRY); bool shouldExecuteBefore = JSP_SHOULD_EXECUTE; jspeBlock(); bool hadException = shouldExecuteBefore && ((execInfo.execute & EXEC_EXCEPTION)!=0); bool hadCatch = false; if (lex->tk == LEX_R_CATCH) { JSP_ASSERT_MATCH(LEX_R_CATCH); hadCatch = true; JSP_MATCH('('); JsVar *scope = 0; JsVar *exceptionVar = 0; if (hadException) { scope = jsvNewObject(); if (scope) exceptionVar = jsvFindChildFromString(scope, jslGetTokenValueAsString(), true); } JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_ID,jsvUnLock2(scope,exceptionVar),0); JSP_MATCH_WITH_CLEANUP_AND_RETURN(')',jsvUnLock2(scope,exceptionVar),0); if (exceptionVar) { // set the exception var up properly JsVar *exception = jspGetException(); if (exception) { jsvSetValueOfName(exceptionVar, exception); jsvUnLock(exception); } // Now clear the exception flag (it's handled - we hope!) execInfo.execute = execInfo.execute & (JsExecFlags)~(EXEC_EXCEPTION|EXEC_ERROR_LINE_REPORTED); jsvUnLock(exceptionVar); } if (shouldExecuteBefore && !hadException) { JSP_SAVE_EXECUTE(); jspSetNoExecute(); jspeBlock(); JSP_RESTORE_EXECUTE(); } else { if (!scope || jspeiAddScope(scope)) { jspeBlock(); if (scope) jspeiRemoveScope(); } } jsvUnLock(scope); } if (lex->tk == LEX_R_FINALLY || (!hadCatch && ((execInfo.execute&(EXEC_ERROR|EXEC_INTERRUPTED))==0))) { JSP_MATCH(LEX_R_FINALLY); // clear the exception flag - but only momentarily! if (hadException) execInfo.execute = execInfo.execute & (JsExecFlags)~EXEC_EXCEPTION; jspeBlock(); // put the flag back! if (hadException && !hadCatch) execInfo.execute = execInfo.execute | EXEC_EXCEPTION; } return 0; } NO_INLINE JsVar *jspeStatementReturn() { JsVar *result = 0; JSP_ASSERT_MATCH(LEX_R_RETURN); if (lex->tk != ';' && lex->tk != '}') { // we only want the value, so skip the name if there was one result = jsvSkipNameAndUnLock(jspeExpression()); } if (JSP_SHOULD_EXECUTE) { JsVar *resultVar = jspeiFindInScopes(JSPARSE_RETURN_VAR); if (resultVar) { jsvReplaceWith(resultVar, result); jsvUnLock(resultVar); execInfo.execute |= EXEC_RETURN; // Stop anything else in this function executing } else { jsExceptionHere(JSET_SYNTAXERROR, "RETURN statement, but not in a function.\n"); } } jsvUnLock(result); return 0; } NO_INLINE JsVar *jspeStatementThrow() { JsVar *result = 0; JSP_ASSERT_MATCH(LEX_R_THROW); result = jsvSkipNameAndUnLock(jspeExpression()); if (JSP_SHOULD_EXECUTE) { jspSetException(result); // Stop anything else in this function executing } jsvUnLock(result); return 0; } NO_INLINE JsVar *jspeStatementFunctionDecl(bool isClass) { JsVar *funcName = 0; JsVar *funcVar; #ifndef SAVE_ON_FLASH JSP_ASSERT_MATCH(isClass ? LEX_R_CLASS : LEX_R_FUNCTION); #else JSP_ASSERT_MATCH(LEX_R_FUNCTION); #endif bool actuallyCreateFunction = JSP_SHOULD_EXECUTE; if (actuallyCreateFunction) { funcName = jsvMakeIntoVariableName(jslGetTokenValueAsVar(), 0); if (!funcName) { // out of memory return 0; } } JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_ID, jsvUnLock(funcName), 0); #ifndef SAVE_ON_FLASH funcVar = isClass ? jspeClassDefinition(false) : jspeFunctionDefinition(false); #else funcVar = jspeFunctionDefinition(false); #endif if (actuallyCreateFunction) { // find a function with the same name (or make one) // OPT: can Find* use just a JsVar that is a 'name'? JsVar *existingName = jspeiFindNameOnTop(funcName, true); JsVar *existingFunc = jsvSkipName(existingName); if (jsvIsFunction(existingFunc)) { // 'proper' replace, that keeps the original function var and swaps the children funcVar = jsvSkipNameAndUnLock(funcVar); jswrap_function_replaceWith(existingFunc, funcVar); } else { jsvReplaceWith(existingName, funcVar); } jsvUnLock(funcName); funcName = existingName; jsvUnLock(existingFunc); // existingName is used - don't UnLock } jsvUnLock(funcVar); return funcName; } NO_INLINE JsVar *jspeStatement() { #ifdef USE_DEBUGGER if (execInfo.execute&EXEC_DEBUGGER_NEXT_LINE && lex->tk!=';' && JSP_SHOULD_EXECUTE) { lex->tokenLastStart = lex->tokenStart; jsiDebuggerLoop(); } #endif if (lex->tk==LEX_ID || lex->tk==LEX_INT || lex->tk==LEX_FLOAT || lex->tk==LEX_STR || lex->tk==LEX_TEMPLATE_LITERAL || lex->tk==LEX_REGEX || lex->tk==LEX_R_NEW || lex->tk==LEX_R_NULL || lex->tk==LEX_R_UNDEFINED || lex->tk==LEX_R_TRUE || lex->tk==LEX_R_FALSE || lex->tk==LEX_R_THIS || lex->tk==LEX_R_DELETE || lex->tk==LEX_R_TYPEOF || lex->tk==LEX_R_VOID || lex->tk==LEX_R_SUPER || lex->tk==LEX_PLUSPLUS || lex->tk==LEX_MINUSMINUS || lex->tk=='!' || lex->tk=='-' || lex->tk=='+' || lex->tk=='~' || lex->tk=='[' || lex->tk=='(') { /* Execute a simple statement that only contains basic arithmetic... */ return jspeExpression(); } else if (lex->tk=='{') { /* A block of code */ if (!jspCheckStackPosition()) return 0; jspeBlock(); return 0; } else if (lex->tk==';') { /* Empty statement - to allow things like ;;; */ JSP_ASSERT_MATCH(';'); return 0; } else if (lex->tk==LEX_R_VAR || lex->tk==LEX_R_LET || lex->tk==LEX_R_CONST) { return jspeStatementVar(); } else if (lex->tk==LEX_R_IF) { return jspeStatementIf(); } else if (lex->tk==LEX_R_DO) { return jspeStatementDoOrWhile(false); } else if (lex->tk==LEX_R_WHILE) { return jspeStatementDoOrWhile(true); } else if (lex->tk==LEX_R_FOR) { return jspeStatementFor(); } else if (lex->tk==LEX_R_TRY) { return jspeStatementTry(); } else if (lex->tk==LEX_R_RETURN) { return jspeStatementReturn(); } else if (lex->tk==LEX_R_THROW) { return jspeStatementThrow(); } else if (lex->tk==LEX_R_FUNCTION) { return jspeStatementFunctionDecl(false/* function */); #ifndef SAVE_ON_FLASH } else if (lex->tk==LEX_R_CLASS) { return jspeStatementFunctionDecl(true/* class */); #endif } else if (lex->tk==LEX_R_CONTINUE) { JSP_ASSERT_MATCH(LEX_R_CONTINUE); if (JSP_SHOULD_EXECUTE) { if (!(execInfo.execute & EXEC_IN_LOOP)) jsExceptionHere(JSET_SYNTAXERROR, "CONTINUE statement outside of FOR or WHILE loop"); else execInfo.execute = (execInfo.execute & (JsExecFlags)~EXEC_RUN_MASK) | EXEC_CONTINUE; } } else if (lex->tk==LEX_R_BREAK) { JSP_ASSERT_MATCH(LEX_R_BREAK); if (JSP_SHOULD_EXECUTE) { if (!(execInfo.execute & (EXEC_IN_LOOP|EXEC_IN_SWITCH))) jsExceptionHere(JSET_SYNTAXERROR, "BREAK statement outside of SWITCH, FOR or WHILE loop"); else execInfo.execute = (execInfo.execute & (JsExecFlags)~EXEC_RUN_MASK) | EXEC_BREAK; } } else if (lex->tk==LEX_R_SWITCH) { return jspeStatementSwitch(); } else if (lex->tk==LEX_R_DEBUGGER) { JSP_ASSERT_MATCH(LEX_R_DEBUGGER); #ifdef USE_DEBUGGER if (JSP_SHOULD_EXECUTE) jsiDebuggerLoop(); #endif } else JSP_MATCH(LEX_EOF); return 0; } // ----------------------------------------------------------------------------- /// Create a new built-in object that jswrapper can use to check for built-in functions JsVar *jspNewBuiltin(const char *instanceOf) { JsVar *objFunc = jswFindBuiltInFunction(0, instanceOf); if (!objFunc) return 0; // out of memory return objFunc; } /// Create a new Class of the given instance and return its prototype NO_INLINE JsVar *jspNewPrototype(const char *instanceOf) { JsVar *objFuncName = jsvFindChildFromString(execInfo.root, instanceOf, true); if (!objFuncName) // out of memory return 0; JsVar *objFunc = jsvSkipName(objFuncName); if (!objFunc) { objFunc = jspNewBuiltin(instanceOf); if (!objFunc) { // out of memory jsvUnLock(objFuncName); return 0; } // set up name jsvSetValueOfName(objFuncName, objFunc); } JsVar *prototypeName = jsvFindChildFromString(objFunc, JSPARSE_PROTOTYPE_VAR, true); jspEnsureIsPrototype(objFunc, prototypeName); // make sure it's an object jsvUnLock2(objFunc, objFuncName); return prototypeName; } /** Create a new object of the given instance and add it to root with name 'name'. * If name!=0, added to root with name, and the name is returned * If name==0, not added to root and Object itself returned */ NO_INLINE JsVar *jspNewObject(const char *name, const char *instanceOf) { JsVar *prototypeName = jspNewPrototype(instanceOf); JsVar *obj = jsvNewObject(); if (!obj) { // out of memory jsvUnLock(prototypeName); return 0; } if (name) { // If it's a device, set the device number up as the Object data // See jsiGetDeviceFromClass IOEventFlags device = jshFromDeviceString(name); if (device!=EV_NONE) { obj->varData.str[0] = 'D'; obj->varData.str[1] = 'E'; obj->varData.str[2] = 'V'; obj->varData.str[3] = (char)device; } } // add __proto__ JsVar *prototypeVar = jsvSkipName(prototypeName); jsvUnLock3(jsvAddNamedChild(obj, prototypeVar, JSPARSE_INHERITS_VAR), prototypeVar, prototypeName);prototypeName=0; if (name) { JsVar *objName = jsvFindChildFromString(execInfo.root, name, true); if (objName) jsvSetValueOfName(objName, obj); jsvUnLock(obj); if (!objName) { // out of memory return 0; } return objName; } else return obj; } /** Returns true if the constructor function given is the same as that * of the object with the given name. */ bool jspIsConstructor(JsVar *constructor, const char *constructorName) { JsVar *objFunc = jsvObjectGetChild(execInfo.root, constructorName, 0); if (!objFunc) return false; bool isConstructor = objFunc == constructor; jsvUnLock(objFunc); return isConstructor; } /** Get the prototype of the given object, or return 0 if not found, or not an object */ JsVar *jspGetPrototype(JsVar *object) { if (!jsvIsObject(object)) return 0; JsVar *proto = jsvObjectGetChild(object, JSPARSE_INHERITS_VAR, 0); if (jsvIsObject(proto)) return proto; jsvUnLock(proto); return 0; } /** Get the constructor of the given object, or return 0 if not found, or not a function */ JsVar *jspGetConstructor(JsVar *object) { JsVar *proto = jspGetPrototype(object); if (proto) { JsVar *constr = jsvObjectGetChild(proto, JSPARSE_CONSTRUCTOR_VAR, 0); if (jsvIsFunction(constr)) { jsvUnLock(proto); return constr; } jsvUnLock2(constr, proto); } return 0; } // ----------------------------------------------------------------------------- void jspSoftInit() { execInfo.root = jsvFindOrCreateRoot(); // Root now has a lock and a ref execInfo.hiddenRoot = jsvObjectGetChild(execInfo.root, JS_HIDDEN_CHAR_STR, JSV_OBJECT); execInfo.execute = EXEC_YES; } void jspSoftKill() { jsvUnLock(execInfo.scopesVar); execInfo.scopesVar = 0; jsvUnLock(execInfo.hiddenRoot); execInfo.hiddenRoot = 0; jsvUnLock(execInfo.root); execInfo.root = 0; // Root is now left with just a ref } void jspInit() { jspSoftInit(); } void jspKill() { jspSoftKill(); // Unreffing this should completely kill everything attached to root JsVar *r = jsvFindOrCreateRoot(); jsvUnRef(r); jsvUnLock(r); } /** Evaluate the given variable as an expression (in current scope) */ JsVar *jspEvaluateExpressionVar(JsVar *str) { JsLex lex; assert(jsvIsString(str)); JsLex *oldLex = jslSetLex(&lex); jslInit(str); #ifndef ESPR_NO_LINE_NUMBERS lex.lineNumberOffset = oldLex->lineNumberOffset; #endif // actually do the parsing JsVar *v = jspeExpression(); jslKill(); jslSetLex(oldLex); return jsvSkipNameAndUnLock(v); } /** Execute code form a variable and return the result. If lineNumberOffset * is nonzero it's added to the line numbers that get reported for errors/debug */ JsVar *jspEvaluateVar(JsVar *str, JsVar *scope, uint16_t lineNumberOffset) { JsLex lex; assert(jsvIsString(str)); JsLex *oldLex = jslSetLex(&lex); jslInit(str); #ifndef ESPR_NO_LINE_NUMBERS lex.lineNumberOffset = lineNumberOffset; #endif JsExecInfo oldExecInfo = execInfo; execInfo.execute = EXEC_YES; if (scope) { // if we're adding a scope, make sure it's the *only* scope execInfo.scopesVar = 0; if (scope!=execInfo.root) jspeiAddScope(scope); // it's searched by default anyway } // actually do the parsing JsVar *v = jspParse(); // clean up if (scope) jspeiClearScopes(); jslKill(); jslSetLex(oldLex); // restore state and execInfo (keep error flags & ctrl-c) oldExecInfo.execute |= execInfo.execute & EXEC_PERSIST; execInfo = oldExecInfo; // It may have returned a reference, but we just want the value... return jsvSkipNameAndUnLock(v); } JsVar *jspEvaluate(const char *str, bool stringIsStatic) { /* using a memory area is more efficient, but the interpreter * may use substrings from it for function code. This means that * if the string goes away, everything gets corrupted - hence * the option here. */ JsVar *evCode; if (stringIsStatic) evCode = jsvNewNativeString((char*)str, strlen(str)); else evCode = jsvNewFromString(str); if (!evCode) return 0; JsVar *v = 0; if (!jsvIsMemoryFull()) v = jspEvaluateVar(evCode, 0, 0); jsvUnLock(evCode); return v; } JsVar *jspExecuteJSFunction(const char *jsCode, JsVar *thisArg, int argCount, JsVar **argPtr) { JsVar *fn = jspEvaluate(jsCode,true); JsVar *result = jspExecuteFunction(fn,thisArg,argCount,argPtr); jsvUnLock(fn); return result; } JsVar *jspExecuteFunction(JsVar *func, JsVar *thisArg, int argCount, JsVar **argPtr) { JsExecInfo oldExecInfo = execInfo; execInfo.scopesVar = 0; execInfo.execute = EXEC_YES; execInfo.thisVar = 0; JsVar *result = jspeFunctionCall(func, 0, thisArg, false, argCount, argPtr); // clean up jspeiClearScopes(); // restore state and execInfo (keep error flags & ctrl-c) oldExecInfo.execute |= execInfo.execute&EXEC_PERSIST; jspeiClearScopes(); execInfo = oldExecInfo; return result; } /// Evaluate a JavaScript module and return its exports JsVar *jspEvaluateModule(JsVar *moduleContents) { assert(jsvIsString(moduleContents) || jsvIsFunction(moduleContents)); if (jsvIsFunction(moduleContents)) { moduleContents = jsvObjectGetChild(moduleContents,JSPARSE_FUNCTION_CODE_NAME,0); if (!jsvIsString(moduleContents)) { jsvUnLock(moduleContents); return 0; } } else jsvLockAgain(moduleContents); JsVar *scope = jsvNewObject(); JsVar *scopeExports = jsvNewObject(); if (!scope || !scopeExports) { // out of mem jsvUnLock3(scope, scopeExports, moduleContents); return 0; } JsVar *exportsName = jsvAddNamedChild(scope, scopeExports, "exports"); jsvUnLock2(scopeExports, jsvAddNamedChild(scope, scope, "module")); JsExecFlags oldExecute = execInfo.execute; JsVar *oldThisVar = execInfo.thisVar; execInfo.thisVar = scopeExports; // set 'this' variable to exports jsvUnLock(jspEvaluateVar(moduleContents, scope, 0)); execInfo.thisVar = oldThisVar; execInfo.execute = oldExecute; // make sure we fully restore state after parsing a module jsvUnLock2(moduleContents, scope); return jsvSkipNameAndUnLock(exportsName); } /** Get the owner of the current prototype. We assume that it's * the first item in the array, because that's what we will * have added when we created it. It's safe to call this on * non-prototypes and non-objects. */ JsVar *jspGetPrototypeOwner(JsVar *proto) { if (jsvIsObject(proto) || jsvIsArray(proto)) { return jsvSkipNameAndUnLock(jsvObjectGetChild(proto, JSPARSE_CONSTRUCTOR_VAR, 0)); } return 0; }
/* * This file is part of Espruino, a JavaScript interpreter for Microcontrollers * * Copyright (C) 2013 Gordon Williams <gw@pur3.co.uk> * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * ---------------------------------------------------------------------------- * Recursive descent parser for code execution * ---------------------------------------------------------------------------- */ #include "jsparse.h" #include "jsinteractive.h" #include "jswrapper.h" #include "jsnative.h" #include "jswrap_object.h" // for function_replacewith #include "jswrap_functions.h" // insane check for eval in jspeFunctionCall #include "jswrap_json.h" // for jsfPrintJSON #include "jswrap_espruino.h" // for jswrap_espruino_memoryArea #ifndef SAVE_ON_FLASH #include "jswrap_regexp.h" // for jswrap_regexp_constructor #endif /* Info about execution when Parsing - this saves passing it on the stack * for each call */ JsExecInfo execInfo; // ----------------------------------------------- Forward decls JsVar *jspeAssignmentExpression(); JsVar *jspeExpression(); JsVar *jspeUnaryExpression(); void jspeBlock(); void jspeBlockNoBrackets(); JsVar *jspeStatement(); JsVar *jspeFactor(); void jspEnsureIsPrototype(JsVar *instanceOf, JsVar *prototypeName); #ifndef SAVE_ON_FLASH JsVar *jspeArrowFunction(JsVar *funcVar, JsVar *a); #endif // ----------------------------------------------- Utils #define JSP_MATCH_WITH_CLEANUP_AND_RETURN(TOKEN, CLEANUP_CODE, RETURN_VAL) { if (!jslMatch((TOKEN))) { CLEANUP_CODE; return RETURN_VAL; } } #define JSP_MATCH_WITH_RETURN(TOKEN, RETURN_VAL) JSP_MATCH_WITH_CLEANUP_AND_RETURN(TOKEN, , RETURN_VAL) #define JSP_MATCH(TOKEN) JSP_MATCH_WITH_CLEANUP_AND_RETURN(TOKEN, , 0) // Match where the user could have given us the wrong token #define JSP_ASSERT_MATCH(TOKEN) { assert(lex->tk==(TOKEN));jslGetNextToken(); } // Match where if we have the wrong token, it's an internal error #define JSP_SHOULD_EXECUTE (((execInfo.execute)&EXEC_RUN_MASK)==EXEC_YES) #define JSP_SAVE_EXECUTE() JsExecFlags oldExecute = execInfo.execute #define JSP_RESTORE_EXECUTE() execInfo.execute = (execInfo.execute&(JsExecFlags)(~EXEC_SAVE_RESTORE_MASK)) | (oldExecute&EXEC_SAVE_RESTORE_MASK); #define JSP_HAS_ERROR (((execInfo.execute)&EXEC_ERROR_MASK)!=0) #define JSP_SHOULDNT_PARSE (((execInfo.execute)&EXEC_NO_PARSE_MASK)!=0) ALWAYS_INLINE void jspDebuggerLoopIfCtrlC() { #ifdef USE_DEBUGGER if (execInfo.execute & EXEC_CTRL_C_WAIT && JSP_SHOULD_EXECUTE) jsiDebuggerLoop(); #endif } /// if interrupting execution, this is set bool jspIsInterrupted() { return (execInfo.execute & EXEC_INTERRUPTED)!=0; } /// if interrupting execution, this is set void jspSetInterrupted(bool interrupt) { if (interrupt) execInfo.execute = execInfo.execute | EXEC_INTERRUPTED; else execInfo.execute = execInfo.execute & (JsExecFlags)~EXEC_INTERRUPTED; } /// Set the error flag - set lineReported if we've already output the line number void jspSetError(bool lineReported) { execInfo.execute = (execInfo.execute & (JsExecFlags)~EXEC_YES) | EXEC_ERROR; if (lineReported) execInfo.execute |= EXEC_ERROR_LINE_REPORTED; } bool jspHasError() { return JSP_HAS_ERROR; } void jspeiClearScopes() { jsvUnLock(execInfo.scopesVar); execInfo.scopesVar = 0; } bool jspeiAddScope(JsVar *scope) { if (!execInfo.scopesVar) execInfo.scopesVar = jsvNewEmptyArray(); if (!execInfo.scopesVar) return false; jsvArrayPush(execInfo.scopesVar, scope); return true; } void jspeiRemoveScope() { if (!execInfo.scopesVar || !jsvGetArrayLength(execInfo.scopesVar)) { jsExceptionHere(JSET_INTERNALERROR, "Too many scopes removed"); jspSetError(false); return; } jsvUnLock(jsvArrayPop(execInfo.scopesVar)); if (!jsvGetFirstChild(execInfo.scopesVar)) { jsvUnLock(execInfo.scopesVar); execInfo.scopesVar = 0; } } JsVar *jspeiFindInScopes(const char *name) { if (execInfo.scopesVar) { JsVar *it = jsvLockSafe(jsvGetLastChild(execInfo.scopesVar)); while (it) { JsVar *scope = jsvSkipName(it); JsVarRef next = jsvGetPrevSibling(it); JsVar *ref = jsvFindChildFromString(scope, name, false); jsvUnLock2(it, scope); if (ref) return ref; it = jsvLockSafe(next); } } return jsvFindChildFromString(execInfo.root, name, false); } /// Return the topmost scope (and lock it) JsVar *jspeiGetTopScope() { if (execInfo.scopesVar) { JsVar *scope = jsvGetLastArrayItem(execInfo.scopesVar); if (scope) return scope; } return jsvLockAgain(execInfo.root); } JsVar *jspeiFindOnTop(const char *name, bool createIfNotFound) { JsVar *scope = jspeiGetTopScope(); JsVar *result = jsvFindChildFromString(scope, name, createIfNotFound); jsvUnLock(scope); return result; } JsVar *jspeiFindNameOnTop(JsVar *childName, bool createIfNotFound) { JsVar *scope = jspeiGetTopScope(); JsVar *result = jsvFindChildFromVar(scope, childName, createIfNotFound); jsvUnLock(scope); return result; } JsVar *jspFindPrototypeFor(const char *className) { JsVar *obj = jsvObjectGetChild(execInfo.root, className, 0); if (!obj) return 0; JsVar *proto = jsvObjectGetChild(obj, JSPARSE_PROTOTYPE_VAR, 0); jsvUnLock(obj); return proto; } /** Here we assume that we have already looked in the parent itself - * and are now going down looking at the stuff it inherited */ JsVar *jspeiFindChildFromStringInParents(JsVar *parent, const char *name) { if (jsvIsObject(parent)) { // If an object, look for an 'inherits' var JsVar *inheritsFrom = jsvObjectGetChild(parent, JSPARSE_INHERITS_VAR, 0); // if there's no inheritsFrom, just default to 'Object.prototype' if (!inheritsFrom) inheritsFrom = jspFindPrototypeFor("Object"); if (inheritsFrom && inheritsFrom!=parent) { // we have what it inherits from (this is ACTUALLY the prototype var) // https://developer.mozilla.org/en-US/docs/JavaScript/Reference/Global_Objects/Object/proto JsVar *child = jsvFindChildFromString(inheritsFrom, name, false); if (!child) child = jspeiFindChildFromStringInParents(inheritsFrom, name); jsvUnLock(inheritsFrom); if (child) return child; } else jsvUnLock(inheritsFrom); } else { // Not actually an object - but might be an array/string/etc const char *objectName = jswGetBasicObjectName(parent); while (objectName) { JsVar *objName = jsvFindChildFromString(execInfo.root, objectName, false); if (objName) { JsVar *result = 0; JsVar *obj = jsvSkipNameAndUnLock(objName); // could be something the user has made - eg. 'Array=1' if (jsvHasChildren(obj)) { // We have found an object with this name - search for the prototype var JsVar *proto = jsvObjectGetChild(obj, JSPARSE_PROTOTYPE_VAR, 0); if (proto) { result = jsvFindChildFromString(proto, name, false); jsvUnLock(proto); } } jsvUnLock(obj); if (result) return result; } /* We haven't found anything in the actual object, we should check the 'Object' itself eg, we tried 'String', so now we should try 'Object'. Built-in types don't have room for a prototype field, so we hard-code it */ objectName = jswGetBasicObjectPrototypeName(objectName); } } // no luck! return 0; } JsVar *jspeiGetScopesAsVar() { if (!execInfo.scopesVar) return 0; // no scopes! // If just one element, return it (no array) if (jsvGetArrayLength(execInfo.scopesVar)==1) { JsVar *v = jsvGetLastArrayItem(execInfo.scopesVar); // this is faster than getting by index return v; } // Copy this - because if we just returned it, the underlying array would get altered return jsvCopy(execInfo.scopesVar, true); } void jspeiLoadScopesFromVar(JsVar *arr) { jsvUnLock(execInfo.scopesVar); execInfo.scopesVar = 0; if (arr) { if (jsvIsArray(arr)) { // TODO: copy on write? would make function calls faster execInfo.scopesVar = jsvCopy(arr, true); } else { // just a single item,but we must package it in an array execInfo.scopesVar = jsvNewArray(&arr, 1); } } } // ----------------------------------------------- /// Check that we have enough stack to recurse. Return true if all ok, error if not. bool jspCheckStackPosition() { if (jsuGetFreeStack() < 512) { // giving us 512 bytes leeway jsExceptionHere(JSET_ERROR, "Too much recursion - the stack is about to overflow"); jspSetInterrupted(true); return false; } return true; } // Set execFlags such that we are not executing void jspSetNoExecute() { execInfo.execute = (execInfo.execute & (JsExecFlags)(int)~EXEC_RUN_MASK) | EXEC_NO; } void jspAppendStackTrace(JsVar *stackTrace) { JsvStringIterator it; jsvStringIteratorNew(&it, stackTrace, 0); jsvStringIteratorGotoEnd(&it); jslPrintPosition((vcbprintf_callback)jsvStringIteratorPrintfCallback, &it, lex->tokenLastStart); jslPrintTokenLineMarker((vcbprintf_callback)jsvStringIteratorPrintfCallback, &it, lex->tokenLastStart, 0); jsvStringIteratorFree(&it); } /// We had an exception (argument is the exception's value) void jspSetException(JsVar *value) { // Add the exception itself to a variable in root scope JsVar *exception = jsvFindChildFromString(execInfo.hiddenRoot, JSPARSE_EXCEPTION_VAR, true); if (exception) { jsvSetValueOfName(exception, value); jsvUnLock(exception); } // Set the exception flag execInfo.execute = execInfo.execute | EXEC_EXCEPTION; // Try and do a stack trace if (lex) { JsVar *stackTrace = jsvObjectGetChild(execInfo.hiddenRoot, JSPARSE_STACKTRACE_VAR, JSV_STRING_0); if (stackTrace) { jsvAppendPrintf(stackTrace, " at "); jspAppendStackTrace(stackTrace); jsvUnLock(stackTrace); // stop us from printing the trace in the same block execInfo.execute = execInfo.execute | EXEC_ERROR_LINE_REPORTED; } } } /** Return the reported exception if there was one (and clear it) */ JsVar *jspGetException() { JsVar *exceptionName = jsvFindChildFromString(execInfo.hiddenRoot, JSPARSE_EXCEPTION_VAR, false); if (exceptionName) { JsVar *exception = jsvSkipName(exceptionName); jsvRemoveChild(execInfo.hiddenRoot, exceptionName); jsvUnLock(exceptionName); JsVar *stack = jspGetStackTrace(); if (stack && jsvHasChildren(exception)) { jsvObjectSetChild(exception, "stack", stack); } jsvUnLock(stack); return exception; } return 0; } /** Return a stack trace string if there was one (and clear it) */ JsVar *jspGetStackTrace() { JsVar *stackTraceName = jsvFindChildFromString(execInfo.hiddenRoot, JSPARSE_STACKTRACE_VAR, false); if (stackTraceName) { JsVar *stackTrace = jsvSkipName(stackTraceName); jsvRemoveChild(execInfo.hiddenRoot, stackTraceName); jsvUnLock(stackTraceName); return stackTrace; } return 0; } // ---------------------------------------------- // we return a value so that JSP_MATCH can return 0 if it fails (if we pass 0, we just parse all args) NO_INLINE bool jspeFunctionArguments(JsVar *funcVar) { JSP_MATCH('('); while (lex->tk!=')') { if (funcVar) { char buf[JSLEX_MAX_TOKEN_LENGTH+1]; buf[0] = '\xFF'; strcpy(&buf[1], jslGetTokenValueAsString()); JsVar *param = jsvAddNamedChild(funcVar, 0, buf); if (!param) { // out of memory jspSetError(false); return false; } jsvMakeFunctionParameter(param); // force this to be called a function parameter jsvUnLock(param); } JSP_MATCH(LEX_ID); if (lex->tk!=')') JSP_MATCH(','); } JSP_MATCH(')'); return true; } // Parse function, assuming we're on '{'. funcVar can be 0. returns 'true' is the function included the 'this' keyword NO_INLINE bool jspeFunctionDefinitionInternal(JsVar *funcVar, bool expressionOnly) { bool forcePretokenise = false; if (expressionOnly) { if (funcVar) funcVar->flags = (funcVar->flags & ~JSV_VARTYPEMASK) | JSV_FUNCTION_RETURN; } else { JSP_MATCH('{'); #ifndef SAVE_ON_FLASH if (lex->tk==LEX_STR) { if (!strcmp(jslGetTokenValueAsString(), "compiled")) jsWarn("Function marked with \"compiled\" uploaded in source form"); if (lex->tk==LEX_STR && !strcmp(jslGetTokenValueAsString(), "ram")) { JSP_ASSERT_MATCH(LEX_STR); forcePretokenise = true; } } #endif /* If the function starts with return, treat it specially - * we don't want to store the 'return' part of it */ if (funcVar && lex->tk==LEX_R_RETURN) { funcVar->flags = (funcVar->flags & ~JSV_VARTYPEMASK) | JSV_FUNCTION_RETURN; JSP_ASSERT_MATCH(LEX_R_RETURN); } } #ifndef ESPR_NO_LINE_NUMBERS // Get the line number (if needed) JsVarInt lineNumber = 0; if (funcVar && lex->lineNumberOffset && !(forcePretokenise||jsfGetFlag(JSF_PRETOKENISE))) { // jslGetLineNumber is slow, so we only do it if we have debug info lineNumber = (JsVarInt)jslGetLineNumber() + (JsVarInt)lex->lineNumberOffset - 1; } #endif // Get the code - parse it and figure out where it stops JslCharPos funcBegin; jslSkipWhiteSpace(); jslCharPosNew(&funcBegin, lex->sourceVar, lex->tokenStart); int lastTokenEnd = -1; lex->hadThisKeyword = lex->tk == LEX_R_THIS; if (!expressionOnly) { int brackets = 0; while (lex->tk && (brackets || lex->tk != '}')) { if (lex->tk == '{') brackets++; if (lex->tk == '}') brackets--; lastTokenEnd = (int)jsvStringIteratorGetIndex(&lex->it)-1; JSP_ASSERT_MATCH(lex->tk); } // FIXME: we might be including whitespace after the last token } else { JsExecFlags oldExec = execInfo.execute; execInfo.execute = EXEC_NO; jsvUnLock(jspeAssignmentExpression()); execInfo.execute = oldExec; lastTokenEnd = (int)lex->tokenStart; } bool hadThisKeyword = lex->hadThisKeyword; // Then create var and set (if there was any code!) if (funcVar && lastTokenEnd>0) { // code var JsVar *funcCodeVar; if (!forcePretokenise && jsvIsNativeString(lex->sourceVar)) { /* If we're parsing from a Native String (eg. E.memoryArea, E.setBootCode) then use another Native String to load function code straight from flash */ int s = (int)jsvStringIteratorGetIndex(&funcBegin.it) - 1; funcCodeVar = jsvNewNativeString(lex->sourceVar->varData.nativeStr.ptr + s, (unsigned int)(lastTokenEnd - s)); #ifdef SPIFLASH_BASE } else if (!forcePretokenise && jsvIsFlashString(lex->sourceVar)) { /* If we're parsing from a Flash String (eg. loaded from Storage on Bangle.js) then use another Flash String to load function code straight from flash*/ int s = (int)jsvStringIteratorGetIndex(&funcBegin.it) - 1; funcCodeVar = jsvNewFlashString(lex->sourceVar->varData.nativeStr.ptr + s, (unsigned int)(lastTokenEnd - s)); #endif } else { if (jsfGetFlag(JSF_PRETOKENISE) || forcePretokenise) { funcCodeVar = jslNewTokenisedStringFromLexer(&funcBegin, (size_t)lastTokenEnd); } else { funcCodeVar = jslNewStringFromLexer(&funcBegin, (size_t)lastTokenEnd); } } jsvUnLock2(jsvAddNamedChild(funcVar, funcCodeVar, JSPARSE_FUNCTION_CODE_NAME), funcCodeVar); // scope var JsVar *funcScopeVar = jspeiGetScopesAsVar(); if (funcScopeVar) { jsvUnLock2(jsvAddNamedChild(funcVar, funcScopeVar, JSPARSE_FUNCTION_SCOPE_NAME), funcScopeVar); } #ifndef ESPR_NO_LINE_NUMBERS // If we've got a line number, add a var for it if (lineNumber) { JsVar *funcLineNumber = jsvNewFromInteger(lineNumber); if (funcLineNumber) { jsvUnLock2(jsvAddNamedChild(funcVar, funcLineNumber, JSPARSE_FUNCTION_LINENUMBER_NAME), funcLineNumber); } } #endif } jslCharPosFree(&funcBegin); if (!expressionOnly) JSP_MATCH('}'); return hadThisKeyword; } // Parse function (after 'function' has occurred NO_INLINE JsVar *jspeFunctionDefinition(bool parseNamedFunction) { // actually parse a function... We assume that the LEX_FUNCTION and name // have already been parsed JsVar *funcVar = 0; bool actuallyCreateFunction = JSP_SHOULD_EXECUTE; if (actuallyCreateFunction) funcVar = jsvNewWithFlags(JSV_FUNCTION); JsVar *functionInternalName = 0; if (parseNamedFunction && lex->tk==LEX_ID) { // you can do `var a = function foo() { foo(); };` - so cope with this if (funcVar) functionInternalName = jslGetTokenValueAsVar(); // note that we don't add it to the beginning, because it would mess up our function call code JSP_ASSERT_MATCH(LEX_ID); } // Get arguments save them to the structure if (!jspeFunctionArguments(funcVar)) { jsvUnLock2(functionInternalName, funcVar); // parse failed return 0; } // Parse the actual function block jspeFunctionDefinitionInternal(funcVar, false); // if we had a function name, add it to the end (if we don't it gets confused with arguments) if (funcVar && functionInternalName) jsvObjectSetChildAndUnLock(funcVar, JSPARSE_FUNCTION_NAME_NAME, functionInternalName); return funcVar; } /* Parse just the brackets of a function - and throw * everything away */ NO_INLINE bool jspeParseFunctionCallBrackets() { assert(!JSP_SHOULD_EXECUTE); JSP_MATCH('('); while (!JSP_SHOULDNT_PARSE && lex->tk != ')') { jsvUnLock(jspeAssignmentExpression()); #ifndef SAVE_ON_FLASH if (lex->tk==LEX_ARROW_FUNCTION) { jsvUnLock(jspeArrowFunction(0, 0)); } #endif if (lex->tk!=')') JSP_MATCH(','); } if (!JSP_SHOULDNT_PARSE) JSP_MATCH(')'); return 0; } /** Handle a function call (assumes we've parsed the function name and we're * on the start bracket). 'thisArg' is the value of the 'this' variable when the * function is executed (it's usually the parent object) * * * NOTE: this does not set the execInfo flags - so if execInfo==EXEC_NO, it won't execute * * If !isParsing and arg0!=0, argument 0 is set to what is supplied (same with arg1) * * functionName is used only for error reporting - and can be 0 */ NO_INLINE JsVar *jspeFunctionCall(JsVar *function, JsVar *functionName, JsVar *thisArg, bool isParsing, int argCount, JsVar **argPtr) { if (JSP_SHOULD_EXECUTE && !function) { if (functionName) jsExceptionHere(JSET_ERROR, "Function %q not found!", functionName); else jsExceptionHere(JSET_ERROR, "Function not found!", functionName); return 0; } if (JSP_SHOULD_EXECUTE) if (!jspCheckStackPosition()) return 0; // try and ensure that we won't overflow our stack if (JSP_SHOULD_EXECUTE && function) { JsVar *returnVar = 0; if (!jsvIsFunction(function)) { jsExceptionHere(JSET_ERROR, "Expecting a function to call, got %t", function); return 0; } JsVar *thisVar = jsvLockAgainSafe(thisArg); if (isParsing) JSP_MATCH('('); /* Ok, so we have 4 options here. * * 1: we're native. * a) args have been pre-parsed, which is awesome * b) we have to parse our own args into an array * 2: we're not native * a) args were pre-parsed and we have to populate the function * b) we parse our own args, which is possibly better */ if (jsvIsNativeFunction(function)) { // ------------------------------------- NATIVE unsigned int argPtrSize = 0; int boundArgs = 0; // Add 'bound' parameters if there were any JsvObjectIterator it; jsvObjectIteratorNew(&it, function); JsVar *param = jsvObjectIteratorGetKey(&it); while (jsvIsFunctionParameter(param)) { if ((unsigned)argCount>=argPtrSize) { // allocate more space on stack if needed unsigned int newArgPtrSize = (argPtrSize?argPtrSize:(unsigned int)argCount)*4; size_t newArgPtrByteSize = sizeof(JsVar*)*newArgPtrSize; if (jsuGetFreeStack() < 256+newArgPtrByteSize) { jsExceptionHere(JSET_ERROR, "Insufficient stack for this many arguments"); jsvUnLock(thisVar); return 0; } JsVar **newArgPtr = (JsVar**)alloca(newArgPtrByteSize); memcpy(newArgPtr, argPtr, (unsigned)argCount*sizeof(JsVar*)); argPtr = newArgPtr; argPtrSize = newArgPtrSize; } // if we already had arguments - shift them up... int i; for (i=argCount-1;i>=boundArgs;i--) argPtr[i+1] = argPtr[i]; // add bound argument argPtr[boundArgs] = jsvSkipName(param); argCount++; boundArgs++; jsvUnLock(param); jsvObjectIteratorNext(&it); param = jsvObjectIteratorGetKey(&it); } // check if 'this' was defined while (param) { if (jsvIsStringEqual(param, JSPARSE_FUNCTION_THIS_NAME)) { jsvUnLock(thisVar); thisVar = jsvSkipName(param); break; } jsvUnLock(param); jsvObjectIteratorNext(&it); param = jsvObjectIteratorGetKey(&it); } jsvUnLock(param); jsvObjectIteratorFree(&it); // Now, if we're parsing add the rest of the arguments int allocatedArgCount = boundArgs; if (isParsing) { while (!JSP_HAS_ERROR && lex->tk!=')' && lex->tk!=LEX_EOF) { if ((unsigned)argCount>=argPtrSize) { // allocate more space on stack unsigned int newArgPtrSize = argPtrSize?argPtrSize*4:16; JsVar **newArgPtr = (JsVar**)alloca(sizeof(JsVar*)*newArgPtrSize); memcpy(newArgPtr, argPtr, (unsigned)argCount*sizeof(JsVar*)); argPtr = newArgPtr; argPtrSize = newArgPtrSize; } argPtr[argCount++] = jsvSkipNameAndUnLock(jspeAssignmentExpression()); if (lex->tk!=')') JSP_MATCH_WITH_CLEANUP_AND_RETURN(',',jsvUnLockMany((unsigned)argCount, argPtr);jsvUnLock(thisVar);, 0); } JSP_MATCH(')'); allocatedArgCount = argCount; } void *nativePtr = jsvGetNativeFunctionPtr(function); JsVar *oldThisVar = execInfo.thisVar; if (thisVar) execInfo.thisVar = jsvRef(thisVar); else { if (nativePtr==jswrap_eval) { // eval gets to use the current scope /* Note: proper JS has some utterly insane code that depends on whether * eval is an lvalue or not: * * http://stackoverflow.com/questions/9107240/1-evalthis-vs-evalthis-in-javascript * * Doing this in Espruino is quite an upheaval for that one * slightly insane case - so it's not implemented. */ if (execInfo.thisVar) execInfo.thisVar = jsvRef(execInfo.thisVar); } else { execInfo.thisVar = jsvRef(execInfo.root); // 'this' should always default to root } } if (nativePtr && !JSP_HAS_ERROR) { returnVar = jsnCallFunction(nativePtr, function->varData.native.argTypes, thisVar, argPtr, argCount); assert(!jsvIsName(returnVar)); } else { returnVar = 0; } // unlock values if we locked them jsvUnLockMany((unsigned)allocatedArgCount, argPtr); /* Return to old 'this' var. No need to unlock as we never locked before */ if (execInfo.thisVar) jsvUnRef(execInfo.thisVar); execInfo.thisVar = oldThisVar; } else { // ----------------------------------------------------- NOT NATIVE // create a new symbol table entry for execution of this function // OPT: can we cache this function execution environment + param variables? // OPT: Probably when calling a function ONCE, use it, otherwise when recursing, make new? JsVar *functionRoot = jsvNewWithFlags(JSV_FUNCTION); if (!functionRoot) { // out of memory jspSetError(false); jsvUnLock(thisVar); return 0; } JsVar *functionScope = 0; JsVar *functionCode = 0; JsVar *functionInternalName = 0; #ifndef ESPR_NO_LINE_NUMBERS uint16_t functionLineNumber = 0; #endif /** NOTE: We expect that the function object will have: * * * Parameters * * Code/Scope/Name * * IN THAT ORDER. */ JsvObjectIterator it; jsvObjectIteratorNew(&it, function); JsVar *param = jsvObjectIteratorGetKey(&it); JsVar *value = jsvObjectIteratorGetValue(&it); while (jsvIsFunctionParameter(param) && value) { jsvAddFunctionParameter(functionRoot, jsvNewFromStringVar(param,1,JSVAPPENDSTRINGVAR_MAXLENGTH), value); jsvUnLock2(value, param); jsvObjectIteratorNext(&it); param = jsvObjectIteratorGetKey(&it); value = jsvObjectIteratorGetValue(&it); } jsvUnLock2(value, param); if (isParsing) { int hadParams = 0; // grab in all parameters. We go around this loop until we've run out // of named parameters AND we've parsed all the supplied arguments while (!JSP_SHOULDNT_PARSE && lex->tk!=')') { JsVar *param = jsvObjectIteratorGetKey(&it); bool paramDefined = jsvIsFunctionParameter(param); if (lex->tk!=')' || paramDefined) { hadParams++; JsVar *value = 0; // ONLY parse this if it was supplied, otherwise leave 0 (undefined) if (lex->tk!=')') value = jspeAssignmentExpression(); // and if execute, copy it over value = jsvSkipNameAndUnLock(value); jsvAddFunctionParameter(functionRoot, paramDefined?jsvNewFromStringVar(param,1,JSVAPPENDSTRINGVAR_MAXLENGTH):0, value); jsvUnLock(value); if (lex->tk!=')') JSP_MATCH(','); } jsvUnLock(param); if (paramDefined) jsvObjectIteratorNext(&it); } JSP_MATCH(')'); } else { // and NOT isParsing int args = 0; while (args<argCount) { JsVar *param = jsvObjectIteratorGetKey(&it); bool paramDefined = jsvIsFunctionParameter(param); jsvAddFunctionParameter(functionRoot, paramDefined?jsvNewFromStringVar(param,1,JSVAPPENDSTRINGVAR_MAXLENGTH):0, argPtr[args]); args++; jsvUnLock(param); if (paramDefined) jsvObjectIteratorNext(&it); } } // Now go through what's left while (jsvObjectIteratorHasValue(&it)) { JsVar *param = jsvObjectIteratorGetKey(&it); if (jsvIsString(param)) { if (jsvIsStringEqual(param, JSPARSE_FUNCTION_SCOPE_NAME)) functionScope = jsvSkipName(param); else if (jsvIsStringEqual(param, JSPARSE_FUNCTION_CODE_NAME)) functionCode = jsvSkipName(param); else if (jsvIsStringEqual(param, JSPARSE_FUNCTION_NAME_NAME)) functionInternalName = jsvSkipName(param); else if (jsvIsStringEqual(param, JSPARSE_FUNCTION_THIS_NAME)) { jsvUnLock(thisVar); thisVar = jsvSkipName(param); } #ifndef ESPR_NO_LINE_NUMBERS else if (jsvIsStringEqual(param, JSPARSE_FUNCTION_LINENUMBER_NAME)) functionLineNumber = (uint16_t)jsvGetIntegerAndUnLock(jsvSkipName(param)); #endif else if (jsvIsFunctionParameter(param)) { JsVar *defaultVal = jsvSkipName(param); jsvAddFunctionParameter(functionRoot, jsvNewFromStringVar(param,1,JSVAPPENDSTRINGVAR_MAXLENGTH), defaultVal); jsvUnLock(defaultVal); } } jsvUnLock(param); jsvObjectIteratorNext(&it); } jsvObjectIteratorFree(&it); // setup a the function's name (if a named function) if (functionInternalName) { JsVar *name = jsvMakeIntoVariableName(jsvNewFromStringVar(functionInternalName,0,JSVAPPENDSTRINGVAR_MAXLENGTH), function); jsvAddName(functionRoot, name); jsvUnLock2(name, functionInternalName); } if (!JSP_HAS_ERROR) { // save old scopes and reset scope list JsVar *oldScopeVar = execInfo.scopesVar; execInfo.scopesVar = 0; // if we have a scope var, load it up. We may not have one if there were no scopes apart from root if (functionScope) { jspeiLoadScopesFromVar(functionScope); jsvUnLock(functionScope); } // add the function's execute space to the symbol table so we can recurse if (jspeiAddScope(functionRoot)) { /* Adding scope may have failed - we may have descended too deep - so be sure * not to pull somebody else's scope off */ JsVar *oldThisVar = execInfo.thisVar; if (thisVar) execInfo.thisVar = jsvRef(thisVar); else execInfo.thisVar = jsvRef(execInfo.root); // 'this' should always default to root /* we just want to execute the block, but something could * have messed up and left us with the wrong Lexer, so * we want to be careful here... */ if (functionCode) { #ifdef USE_DEBUGGER bool hadDebuggerNextLineOnly = false; if (execInfo.execute&EXEC_DEBUGGER_STEP_INTO) { if (functionName) jsiConsolePrintf("Stepping into %v\n", functionName); else jsiConsolePrintf("Stepping into function\n", functionName); } else { hadDebuggerNextLineOnly = execInfo.execute&EXEC_DEBUGGER_NEXT_LINE; if (hadDebuggerNextLineOnly) execInfo.execute &= (JsExecFlags)~EXEC_DEBUGGER_NEXT_LINE; } #endif JsLex newLex; JsLex *oldLex = jslSetLex(&newLex); jslInit(functionCode); #ifndef ESPR_NO_LINE_NUMBERS newLex.lineNumberOffset = functionLineNumber; #endif JSP_SAVE_EXECUTE(); // force execute without any previous state #ifdef USE_DEBUGGER execInfo.execute = EXEC_YES | (execInfo.execute&(EXEC_CTRL_C_MASK|EXEC_ERROR_MASK|EXEC_DEBUGGER_NEXT_LINE)); #else execInfo.execute = EXEC_YES | (execInfo.execute&(EXEC_CTRL_C_MASK|EXEC_ERROR_MASK)); #endif if (jsvIsFunctionReturn(function)) { #ifdef USE_DEBUGGER // we didn't parse a statement so wouldn't trigger the debugger otherwise if (execInfo.execute&EXEC_DEBUGGER_NEXT_LINE && JSP_SHOULD_EXECUTE) { lex->tokenLastStart = lex->tokenStart; jsiDebuggerLoop(); } #endif // implicit return - we just need an expression (optional) if (lex->tk != ';' && lex->tk != '}') returnVar = jsvSkipNameAndUnLock(jspeExpression()); } else { // setup a return variable JsVar *returnVarName = jsvAddNamedChild(functionRoot, 0, JSPARSE_RETURN_VAR); // parse the whole block jspeBlockNoBrackets(); /* get the real return var before we remove it from our function. * We can unlock below because returnVarName is still part of * functionRoot, so won't get freed. */ returnVar = jsvSkipNameAndUnLock(returnVarName); if (returnVarName) // could have failed with out of memory jsvSetValueOfName(returnVarName, 0); // remove return value (which helps stops circular references) } // Store a stack trace if we had an error JsExecFlags hasError = execInfo.execute&EXEC_ERROR_MASK; JSP_RESTORE_EXECUTE(); // because return will probably have set execute to false #ifdef USE_DEBUGGER bool calledDebugger = false; if (execInfo.execute & EXEC_DEBUGGER_MASK) { jsiConsolePrint("Value returned is ="); jsfPrintJSON(returnVar, JSON_LIMIT | JSON_SOME_NEWLINES | JSON_PRETTY | JSON_SHOW_DEVICES); jsiConsolePrintChar('\n'); if (execInfo.execute & EXEC_DEBUGGER_FINISH_FUNCTION) { calledDebugger = true; jsiDebuggerLoop(); } } if (hadDebuggerNextLineOnly && !calledDebugger) execInfo.execute |= EXEC_DEBUGGER_NEXT_LINE; #endif jslKill(); jslSetLex(oldLex); if (hasError) { execInfo.execute |= hasError; // propogate error JsVar *stackTrace = jsvObjectGetChild(execInfo.hiddenRoot, JSPARSE_STACKTRACE_VAR, JSV_STRING_0); if (stackTrace) { jsvAppendPrintf(stackTrace, jsvIsString(functionName)?"in function %q called from ": "in function called from ", functionName); if (lex) { jspAppendStackTrace(stackTrace); } else jsvAppendPrintf(stackTrace, "system\n"); jsvUnLock(stackTrace); } } } /* Return to old 'this' var. No need to unlock as we never locked before */ if (execInfo.thisVar) jsvUnRef(execInfo.thisVar); execInfo.thisVar = oldThisVar; jspeiRemoveScope(); } // Unlock scopes and restore old ones jsvUnLock(execInfo.scopesVar); execInfo.scopesVar = oldScopeVar; } jsvUnLock(functionCode); jsvUnLock(functionRoot); } jsvUnLock(thisVar); return returnVar; } else if (isParsing) { // ---------------------------------- function, but not executing - just parse args and be done jspeParseFunctionCallBrackets(); /* Do not return function, as it will be unlocked! */ return 0; } else return 0; } // Find a variable (or built-in function) based on the current scopes JsVar *jspGetNamedVariable(const char *tokenName) { JsVar *a = JSP_SHOULD_EXECUTE ? jspeiFindInScopes(tokenName) : 0; if (JSP_SHOULD_EXECUTE && !a) { /* Special case! We haven't found the variable, so check out * and see if it's one of our builtins... */ if (jswIsBuiltInObject(tokenName)) { // Check if we have a built-in function for it // OPT: Could we instead have jswIsBuiltInObjectWithoutConstructor? JsVar *obj = jswFindBuiltInFunction(0, tokenName); // If not, make one if (!obj) obj = jspNewBuiltin(tokenName); if (obj) { // not out of memory a = jsvAddNamedChild(execInfo.root, obj, tokenName); jsvUnLock(obj); } } else { a = jswFindBuiltInFunction(0, tokenName); if (!a) { /* Variable doesn't exist! JavaScript says we should create it * (we won't add it here. This is done in the assignment operator)*/ a = jsvMakeIntoVariableName(jsvNewFromString(tokenName), 0); } } } return a; } /// Used by jspGetNamedField / jspGetVarNamedField static NO_INLINE JsVar *jspGetNamedFieldInParents(JsVar *object, const char* name, bool returnName) { // Now look in prototypes JsVar * child = jspeiFindChildFromStringInParents(object, name); /* Check for builtins via separate function * This way we save on RAM for built-ins because everything comes out of program code */ if (!child) { child = jswFindBuiltInFunction(object, name); } /* We didn't get here if we found a child in the object itself, so * if we're here then we probably have the wrong name - so for example * with `a.b = c;` could end up setting `a.prototype.b` (bug #360) * * Also we might have got a built-in, which wouldn't have a name on it * anyway - so in both cases, strip the name if it is there, and create * a new name that references the object we actually requested the * member from.. */ if (child && returnName) { // Get rid of existing name if (jsvIsName(child)) { JsVar *t = jsvGetValueOfName(child); jsvUnLock(child); child = t; } // create a new name JsVar *nameVar = jsvNewFromString(name); JsVar *newChild = jsvCreateNewChild(object, nameVar, child); jsvUnLock2(nameVar, child); child = newChild; } // If not found and is the prototype, create it if (!child) { if (jsvIsFunction(object) && strcmp(name, JSPARSE_PROTOTYPE_VAR)==0) { // prototype is supposed to be an object JsVar *proto = jsvNewObject(); // make sure it has a 'constructor' variable that points to the object it was part of jsvObjectSetChild(proto, JSPARSE_CONSTRUCTOR_VAR, object); child = jsvAddNamedChild(object, proto, JSPARSE_PROTOTYPE_VAR); jspEnsureIsPrototype(object, child); jsvUnLock(proto); } else if (strcmp(name, JSPARSE_INHERITS_VAR)==0) { const char *objName = jswGetBasicObjectName(object); if (objName) { JsVar *p = jsvSkipNameAndUnLock(jspNewPrototype(objName)); // jspNewPrototype returns a 'prototype' name that's already a child of eg. an array // Create a new 'name' called __proto__ that links to it JsVar *i = jsvNewFromString(JSPARSE_INHERITS_VAR); if (p) child = jsvCreateNewChild(object, i, p); jsvUnLock(i); } } } return child; } /** Get the named function/variable on the object - whether it's built in, or predefined. * If !returnName, returns the function/variable itself or undefined, but * if returnName, return a name (could be fake) referencing the parent. * * NOTE: ArrayBuffer/Strings are not handled here. We assume that if we're * passing a char* rather than a JsVar it's because we're looking up via * a symbol rather than a variable. To handle these use jspGetVarNamedField */ JsVar *jspGetNamedField(JsVar *object, const char* name, bool returnName) { JsVar *child = 0; // if we're an object (or pretending to be one) if (jsvHasChildren(object)) child = jsvFindChildFromString(object, name, false); if (!child) { child = jspGetNamedFieldInParents(object, name, returnName); // If not found and is the prototype, create it if (!child && jsvIsFunction(object) && strcmp(name, JSPARSE_PROTOTYPE_VAR)==0) { JsVar *value = jsvNewObject(); // prototype is supposed to be an object child = jsvAddNamedChild(object, value, JSPARSE_PROTOTYPE_VAR); jsvUnLock(value); } } if (returnName) return child; else return jsvSkipNameAndUnLock(child); } /// see jspGetNamedField - note that nameVar should have had jsvAsArrayIndex called on it first JsVar *jspGetVarNamedField(JsVar *object, JsVar *nameVar, bool returnName) { JsVar *child = 0; // if we're an object (or pretending to be one) if (jsvHasChildren(object)) child = jsvFindChildFromVar(object, nameVar, false); if (!child) { if (jsvIsArrayBuffer(object) && jsvIsInt(nameVar)) { // for array buffers, we actually create a NAME, and hand that back - then when we assign (or use SkipName) we pull out the correct data child = jsvMakeIntoVariableName(jsvNewFromInteger(jsvGetInteger(nameVar)), object); if (child) // turn into an 'array buffer name' child->flags = (child->flags & ~JSV_VARTYPEMASK) | JSV_ARRAYBUFFERNAME; } else if (jsvIsString(object) && jsvIsInt(nameVar)) { JsVarInt idx = jsvGetInteger(nameVar); if (idx>=0 && idx<(JsVarInt)jsvGetStringLength(object)) { char ch = jsvGetCharInString(object, (size_t)idx); child = jsvNewStringOfLength(1, &ch); } else if (returnName) child = jsvCreateNewChild(object, nameVar, 0); // just return *something* to show this is handled } else { // get the name as a string char name[JSLEX_MAX_TOKEN_LENGTH]; jsvGetString(nameVar, name, JSLEX_MAX_TOKEN_LENGTH); // try and find it in parents child = jspGetNamedFieldInParents(object, name, returnName); // If not found and is the prototype, create it if (!child && jsvIsFunction(object) && jsvIsStringEqual(nameVar, JSPARSE_PROTOTYPE_VAR)) { JsVar *value = jsvNewObject(); // prototype is supposed to be an object child = jsvAddNamedChild(object, value, JSPARSE_PROTOTYPE_VAR); jsvUnLock(value); } } } if (returnName) return child; else return jsvSkipNameAndUnLock(child); } /// Call the named function on the object - whether it's built in, or predefined. Returns the return value of the function. JsVar *jspCallNamedFunction(JsVar *object, char* name, int argCount, JsVar **argPtr) { JsVar *child = jspGetNamedField(object, name, false); JsVar *r = 0; if (jsvIsFunction(child)) r = jspeFunctionCall(child, 0, object, false, argCount, argPtr); jsvUnLock(child); return r; } NO_INLINE JsVar *jspeFactorMember(JsVar *a, JsVar **parentResult) { /* The parent if we're executing a method call */ JsVar *parent = 0; while (lex->tk=='.' || lex->tk=='[') { if (lex->tk == '.') { // ------------------------------------- Record Access JSP_ASSERT_MATCH('.'); if (jslIsIDOrReservedWord()) { if (JSP_SHOULD_EXECUTE) { // Note: name will go away when we parse something else! const char *name = jslGetTokenValueAsString(); JsVar *aVar = jsvSkipNameWithParent(a,true,parent); JsVar *child = 0; if (aVar) child = jspGetNamedField(aVar, name, true); if (!child) { if (!jsvIsUndefined(aVar)) { // if no child found, create a pointer to where it could be // as we don't want to allocate it until it's written JsVar *nameVar = jslGetTokenValueAsVar(); child = jsvCreateNewChild(aVar, nameVar, 0); jsvUnLock(nameVar); } else { // could have been a string... jsExceptionHere(JSET_ERROR, "Cannot read property '%s' of undefined", name); } } jsvUnLock(parent); parent = aVar; jsvUnLock(a); a = child; } // skip over current token (we checked above that it was an ID or reserved word) jslGetNextToken(); } else { // incorrect token - force a match fail by asking for an ID JSP_MATCH_WITH_RETURN(LEX_ID, a); } } else if (lex->tk == '[') { // ------------------------------------- Array Access JsVar *index; JSP_ASSERT_MATCH('['); if (!jspCheckStackPosition()) return parent; index = jsvSkipNameAndUnLock(jspeAssignmentExpression()); JSP_MATCH_WITH_CLEANUP_AND_RETURN(']', jsvUnLock2(parent, index);, a); if (JSP_SHOULD_EXECUTE) { index = jsvAsArrayIndexAndUnLock(index); JsVar *aVar = jsvSkipNameWithParent(a,true,parent); JsVar *child = 0; if (aVar) child = jspGetVarNamedField(aVar, index, true); if (!child) { if (jsvHasChildren(aVar)) { // if no child found, create a pointer to where it could be // as we don't want to allocate it until it's written child = jsvCreateNewChild(aVar, index, 0); } else { jsExceptionHere(JSET_ERROR, "Field or method %q does not already exist, and can't create it on %t", index, aVar); } } jsvUnLock(parent); parent = jsvLockAgainSafe(aVar); jsvUnLock(a); a = child; jsvUnLock(aVar); } jsvUnLock(index); } else { assert(0); } } if (parentResult) *parentResult = parent; else jsvUnLock(parent); return a; } NO_INLINE JsVar *jspeConstruct(JsVar *func, JsVar *funcName, bool hasArgs) { assert(JSP_SHOULD_EXECUTE); if (!jsvIsFunction(func)) { jsExceptionHere(JSET_ERROR, "Constructor should be a function, but is %t", func); return 0; } JsVar *thisObj = jsvNewObject(); if (!thisObj) return 0; // out of memory // Make sure the function has a 'prototype' var JsVar *prototypeName = jsvFindChildFromString(func, JSPARSE_PROTOTYPE_VAR, true); jspEnsureIsPrototype(func, prototypeName); // make sure it's an object JsVar *prototypeVar = jsvSkipName(prototypeName); jsvUnLock3(jsvAddNamedChild(thisObj, prototypeVar, JSPARSE_INHERITS_VAR), prototypeVar, prototypeName); JsVar *a = jspeFunctionCall(func, funcName, thisObj, hasArgs, 0, 0); /* FIXME: we should ignore return values that aren't objects (bug #848), but then we need * to be aware of `new String()` and `new Uint8Array()`. Ideally we'd let through * arrays/etc, and then String/etc should return 'boxed' values. * * But they don't return boxed values at the moment, so let's just * pass the return value through. If you try and return a string from * a function it's broken JS code anyway. */ if (a) { jsvUnLock(thisObj); thisObj = a; } else { jsvUnLock(a); } return thisObj; } NO_INLINE JsVar *jspeFactorFunctionCall() { /* The parent if we're executing a method call */ bool isConstructor = false; if (lex->tk==LEX_R_NEW) { JSP_ASSERT_MATCH(LEX_R_NEW); isConstructor = true; if (lex->tk==LEX_R_NEW) { jsExceptionHere(JSET_ERROR, "Nesting 'new' operators is unsupported"); jspSetError(false); return 0; } } JsVar *parent = 0; #ifndef SAVE_ON_FLASH bool wasSuper = lex->tk==LEX_R_SUPER; #endif JsVar *a = jspeFactorMember(jspeFactor(), &parent); #ifndef SAVE_ON_FLASH if (wasSuper) { /* if this was 'super.something' then we need * to overwrite the parent, because it'll be * set to the prototype otherwise. */ jsvUnLock(parent); parent = jsvLockAgainSafe(execInfo.thisVar); } #endif while ((lex->tk=='(' || (isConstructor && JSP_SHOULD_EXECUTE)) && !jspIsInterrupted()) { JsVar *funcName = a; JsVar *func = jsvSkipName(funcName); /* The constructor function doesn't change parsing, so if we're * not executing, just short-cut it. */ if (isConstructor && JSP_SHOULD_EXECUTE) { // If we have '(' parse an argument list, otherwise don't look for any args bool parseArgs = lex->tk=='('; a = jspeConstruct(func, funcName, parseArgs); isConstructor = false; // don't treat subsequent brackets as constructors } else a = jspeFunctionCall(func, funcName, parent, true, 0, 0); jsvUnLock3(funcName, func, parent); parent=0; a = jspeFactorMember(a, &parent); } #ifndef SAVE_ON_FLASH /* If we've got something that we care about the parent of (eg. a getter/setter) * then we repackage it into a 'NewChild' name that references the parent before * we leave. Note: You can't do this on everything because normally NewChild * forces a new child to be blindly created. It works on Getters/Setters because * we *always* run those rather than adding them. */ if (parent && jsvIsBasicName(a) && !jsvIsNewChild(a)) { JsVar *value = jsvLockSafe(jsvGetFirstChild(a)); if (jsvIsGetterOrSetter(value)) { // no need to do this for functions since we've just executed whatever we needed to JsVar *nameVar = jsvCopyNameOnly(a,false,true); JsVar *newChild = jsvCreateNewChild(parent, nameVar, value); jsvUnLock2(nameVar, a); a = newChild; } jsvUnLock(value); } #endif jsvUnLock(parent); return a; } NO_INLINE JsVar *jspeFactorObject() { if (JSP_SHOULD_EXECUTE) { JsVar *contents = jsvNewObject(); if (!contents) { // out of memory jspSetError(false); return 0; } /* JSON-style object definition */ JSP_MATCH_WITH_RETURN('{', contents); while (!JSP_SHOULDNT_PARSE && lex->tk != '}') { JsVar *varName = 0; // we only allow strings or IDs on the left hand side of an initialisation if (jslIsIDOrReservedWord()) { if (JSP_SHOULD_EXECUTE) varName = jslGetTokenValueAsVar(); jslGetNextToken(); // skip over current token } else if ( lex->tk==LEX_STR || lex->tk==LEX_FLOAT || lex->tk==LEX_INT || lex->tk==LEX_R_TRUE || lex->tk==LEX_R_FALSE || lex->tk==LEX_R_NULL || lex->tk==LEX_R_UNDEFINED) { varName = jspeFactor(); } else { JSP_MATCH_WITH_RETURN(LEX_ID, contents); } #ifndef SAVE_ON_FLASH if (lex->tk==LEX_ID && jsvIsString(varName)) { bool isGetter = jsvIsStringEqual(varName, "get"); bool isSetter = jsvIsStringEqual(varName, "set"); if (isGetter || isSetter) { jsvUnLock(varName); varName = jslGetTokenValueAsVar(); JSP_ASSERT_MATCH(LEX_ID); JsVar *method = jspeFunctionDefinition(false); jsvAddGetterOrSetter(contents, varName, isGetter, method); jsvUnLock(method); } } else #endif { JSP_MATCH_WITH_CLEANUP_AND_RETURN(':', jsvUnLock(varName), contents); if (JSP_SHOULD_EXECUTE) { varName = jsvAsArrayIndexAndUnLock(varName); JsVar *contentsName = jsvFindChildFromVar(contents, varName, true); if (contentsName) { JsVar *value = jsvSkipNameAndUnLock(jspeAssignmentExpression()); // value can be 0 (could be undefined!) jsvUnLock2(jsvSetValueOfName(contentsName, value), value); } } } jsvUnLock(varName); // no need to clean here, as it will definitely be used if (lex->tk != '}') JSP_MATCH_WITH_RETURN(',', contents); } JSP_MATCH_WITH_RETURN('}', contents); return contents; } else { // Not executing so do fast skip jspeBlock(); return 0; } } NO_INLINE JsVar *jspeFactorArray() { int idx = 0; JsVar *contents = 0; if (JSP_SHOULD_EXECUTE) { contents = jsvNewEmptyArray(); if (!contents) { // out of memory jspSetError(false); return 0; } } /* JSON-style array */ JSP_MATCH_WITH_RETURN('[', contents); while (!JSP_SHOULDNT_PARSE && lex->tk != ']') { if (JSP_SHOULD_EXECUTE) { JsVar *aVar = 0; JsVar *indexName = 0; if (lex->tk != ',') { // #287 - [,] and [1,2,,4] are allowed aVar = jsvSkipNameAndUnLock(jspeAssignmentExpression()); indexName = jsvMakeIntoVariableName(jsvNewFromInteger(idx), aVar); } if (indexName) { // could be out of memory jsvAddName(contents, indexName); jsvUnLock(indexName); } jsvUnLock(aVar); } else { jsvUnLock(jspeAssignmentExpression()); } // no need to clean here, as it will definitely be used if (lex->tk != ']') JSP_MATCH_WITH_RETURN(',', contents); idx++; } if (contents) jsvSetArrayLength(contents, idx, false); JSP_MATCH_WITH_RETURN(']', contents); return contents; } NO_INLINE void jspEnsureIsPrototype(JsVar *instanceOf, JsVar *prototypeName) { if (!prototypeName) return; JsVar *prototypeVar = jsvSkipName(prototypeName); if (!(jsvIsObject(prototypeVar) || jsvIsFunction(prototypeVar))) { if (!jsvIsUndefined(prototypeVar)) jsExceptionHere(JSET_TYPEERROR, "Prototype should be an object, got %t", prototypeVar); jsvUnLock(prototypeVar); prototypeVar = jsvNewObject(); // prototype is supposed to be an object JsVar *lastName = jsvSkipToLastName(prototypeName); jsvSetValueOfName(lastName, prototypeVar); jsvUnLock(lastName); } JsVar *constructor = jsvFindChildFromString(prototypeVar, JSPARSE_CONSTRUCTOR_VAR, true); if (constructor) jsvSetValueOfName(constructor, instanceOf); jsvUnLock2(constructor, prototypeVar); } NO_INLINE JsVar *jspeFactorTypeOf() { JSP_ASSERT_MATCH(LEX_R_TYPEOF); JsVar *a = jspeUnaryExpression(); JsVar *result = 0; if (JSP_SHOULD_EXECUTE) { if (!jsvIsVariableDefined(a)) { // so we don't get a ReferenceError when accessing an undefined var result=jsvNewFromString("undefined"); } else { a = jsvSkipNameAndUnLock(a); result=jsvNewFromString(jsvGetTypeOf(a)); } } jsvUnLock(a); return result; } NO_INLINE JsVar *jspeFactorDelete() { JSP_ASSERT_MATCH(LEX_R_DELETE); JsVar *parent = 0; JsVar *a = jspeFactorMember(jspeFactor(), &parent); JsVar *result = 0; if (JSP_SHOULD_EXECUTE) { bool ok = false; if (jsvIsName(a) && !jsvIsNewChild(a)) { // if no parent, check in root? if (!parent && jsvIsChild(execInfo.root, a)) parent = jsvLockAgain(execInfo.root); #ifdef DEBUG if (jsvHasChildren(parent)) assert(jsvIsChild(parent, a)); #endif if (jsvHasChildren(parent) && jsvIsChild(parent, a)) { // else remove properly. /* we use jsvIsChild here just in case. delete probably isn't called that often so it pays to be safe */ if (jsvIsArray(parent)) { // For arrays, we must make sure we don't change the length JsVarInt l = jsvGetArrayLength(parent); jsvRemoveChild(parent, a); jsvSetArrayLength(parent, l, false); } else { jsvRemoveChild(parent, a); } ok = true; } } result = jsvNewFromBool(ok); } jsvUnLock2(a, parent); return result; } #ifndef SAVE_ON_FLASH JsVar *jspeTemplateLiteral() { JsVar *a = 0; if (JSP_SHOULD_EXECUTE) { JsVar *template = jslGetTokenValueAsVar(); a = jsvNewFromEmptyString(); if (a && template) { JsvStringIterator it, dit; jsvStringIteratorNew(&it, template, 0); jsvStringIteratorNew(&dit, a, 0); while (jsvStringIteratorHasChar(&it)) { char ch = jsvStringIteratorGetCharAndNext(&it); if (ch=='$') { ch = jsvStringIteratorGetChar(&it); if (ch=='{') { // Now parse out the expression jsvStringIteratorNext(&it); int brackets = 1; JsVar *expr = jsvNewFromEmptyString(); if (!expr) break; JsvStringIterator eit; jsvStringIteratorNew(&eit, expr, 0); while (jsvStringIteratorHasChar(&it)) { ch = jsvStringIteratorGetCharAndNext(&it); if (ch=='{') brackets++; if (ch=='}') { brackets--; if (!brackets) break; } jsvStringIteratorAppend(&eit, ch); } jsvStringIteratorFree(&eit); JsVar *result = jspEvaluateExpressionVar(expr); jsvUnLock(expr); result = jsvAsStringAndUnLock(result); jsvStringIteratorAppendString(&dit, result, 0, JSVAPPENDSTRINGVAR_MAXLENGTH); jsvUnLock(result); } else { jsvStringIteratorAppend(&dit, '$'); } } else { jsvStringIteratorAppend(&dit, ch); } } jsvStringIteratorFree(&it); jsvStringIteratorFree(&dit); } jsvUnLock(template); } JSP_ASSERT_MATCH(LEX_TEMPLATE_LITERAL); return a; } #endif NO_INLINE JsVar *jspeAddNamedFunctionParameter(JsVar *funcVar, JsVar *name) { if (!funcVar) funcVar = jsvNewWithFlags(JSV_FUNCTION); char buf[JSLEX_MAX_TOKEN_LENGTH+1]; buf[0] = '\xFF'; size_t l = jsvGetString(name, &buf[1], JSLEX_MAX_TOKEN_LENGTH); buf[l+1] = 0; // zero terminate since jsvGetString doesn't add one JsVar *param = jsvAddNamedChild(funcVar, 0, buf); jsvMakeFunctionParameter(param); jsvUnLock(param); return funcVar; } #ifndef SAVE_ON_FLASH // parse an arrow function NO_INLINE JsVar *jspeArrowFunction(JsVar *funcVar, JsVar *a) { assert(!a || jsvIsName(a)); JSP_ASSERT_MATCH(LEX_ARROW_FUNCTION); funcVar = jspeAddNamedFunctionParameter(funcVar, a); bool expressionOnly = lex->tk!='{'; bool fnIncludesThis = jspeFunctionDefinitionInternal(funcVar, expressionOnly); /* Arrow functions store the value of 'this' when they were defined. In order to differentiate between normal functions we usually have to store 'this' even if 'this' was just the global object. Very few arrow functions actually use 'this' though - usually they are just used as a shorthand, and so we end up wasting a whole extra var for every single arrow function. So... while parsing the function's body we check of the 'this' keyword is used. If it isn't, we just don't include it. */ if (fnIncludesThis) jsvObjectSetChild(funcVar, JSPARSE_FUNCTION_THIS_NAME, execInfo.thisVar); return funcVar; } // parse expressions with commas, maybe followed by an arrow function (bracket already matched) NO_INLINE JsVar *jspeExpressionOrArrowFunction() { JsVar *a = 0; JsVar *funcVar = 0; bool allNames = true; while (lex->tk!=')' && !JSP_SHOULDNT_PARSE) { if (allNames && a) { // we never get here if this isn't a name and a string funcVar = jspeAddNamedFunctionParameter(funcVar, a); } jsvUnLock(a); a = jspeAssignmentExpression(); /* if we're not executing, `a` will always be undefined so don't do the check for allNames - just assume all is good. We'll properly check when we execute. */ if (JSP_SHOULD_EXECUTE && !(jsvIsName(a) && jsvIsString(a))) allNames = false; if (lex->tk!=')') JSP_MATCH_WITH_CLEANUP_AND_RETURN(',', jsvUnLock2(a,funcVar), 0); } JSP_MATCH_WITH_CLEANUP_AND_RETURN(')', jsvUnLock2(a,funcVar), 0); // if all names inside brackets and an arrow is found, create a function if (allNames && lex->tk==LEX_ARROW_FUNCTION) { funcVar = jspeArrowFunction(funcVar, a); jsvUnLock(a); return funcVar; } else { jsvUnLock(funcVar); return a; } } /// Parse an ES6 class, expects LEX_R_CLASS already parsed NO_INLINE JsVar *jspeClassDefinition(bool parseNamedClass) { JsVar *classFunction = 0; JsVar *classPrototype = 0; JsVar *classInternalName = 0; bool actuallyCreateClass = JSP_SHOULD_EXECUTE; if (actuallyCreateClass) { classFunction = jsvNewWithFlags(JSV_FUNCTION); JsVar *scopeVar = jspeiGetScopesAsVar(); if (scopeVar) jsvUnLock2(jsvAddNamedChild(classFunction, scopeVar, JSPARSE_FUNCTION_SCOPE_NAME), scopeVar); } if (parseNamedClass && lex->tk==LEX_ID) { if (classFunction) classInternalName = jslGetTokenValueAsVar(); JSP_ASSERT_MATCH(LEX_ID); } if (classFunction) { JsVar *prototypeName = jsvFindChildFromString(classFunction, JSPARSE_PROTOTYPE_VAR, true); jspEnsureIsPrototype(classFunction, prototypeName); // make sure it's an object classPrototype = jsvSkipName(prototypeName); jsvUnLock(prototypeName); } if (lex->tk==LEX_R_EXTENDS) { JSP_ASSERT_MATCH(LEX_R_EXTENDS); JsVar *extendsFrom = actuallyCreateClass ? jsvSkipNameAndUnLock(jspGetNamedVariable(jslGetTokenValueAsString())) : 0; JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_ID,jsvUnLock4(extendsFrom,classFunction,classInternalName,classPrototype),0); if (classPrototype) { if (jsvIsFunction(extendsFrom)) { JsVar *extendsFromProto = jsvObjectGetChild(extendsFrom, JSPARSE_PROTOTYPE_VAR, 0); if (extendsFromProto) { jsvObjectSetChild(classPrototype, JSPARSE_INHERITS_VAR, extendsFromProto); // link in default constructor if ours isn't supplied jsvObjectSetChildAndUnLock(classFunction, JSPARSE_FUNCTION_CODE_NAME, jsvNewFromString("if(this.__proto__.__proto__.constructor)this.__proto__.__proto__.constructor.apply(this,arguments)")); jsvUnLock(extendsFromProto); } } else jsExceptionHere(JSET_SYNTAXERROR, "'extends' argument should be a function, got %t", extendsFrom); } jsvUnLock(extendsFrom); } JSP_MATCH_WITH_CLEANUP_AND_RETURN('{',jsvUnLock3(classFunction,classInternalName,classPrototype),0); while ((lex->tk==LEX_ID || lex->tk==LEX_R_STATIC) && !jspIsInterrupted()) { bool isStatic = lex->tk==LEX_R_STATIC; if (isStatic) JSP_ASSERT_MATCH(LEX_R_STATIC); JsVar *funcName = jslGetTokenValueAsVar(); JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_ID,jsvUnLock4(funcName,classFunction,classInternalName,classPrototype),0); #ifndef SAVE_ON_FLASH bool isGetter = false, isSetter = false; if (lex->tk==LEX_ID) { isGetter = jsvIsStringEqual(funcName, "get"); isSetter = jsvIsStringEqual(funcName, "set"); if (isGetter || isSetter) { jsvUnLock(funcName); funcName = jslGetTokenValueAsVar(); JSP_ASSERT_MATCH(LEX_ID); } } #endif JsVar *method = jspeFunctionDefinition(false); if (classFunction && classPrototype) { JsVar *obj = isStatic ? classFunction : classPrototype; if (jsvIsStringEqual(funcName, "constructor")) { jswrap_function_replaceWith(classFunction, method); #ifndef SAVE_ON_FLASH } else if (isGetter || isSetter) { jsvAddGetterOrSetter(obj, funcName, isGetter, method); #endif } else { funcName = jsvMakeIntoVariableName(funcName, 0); jsvSetValueOfName(funcName, method); jsvAddName(obj, funcName); } } jsvUnLock2(method,funcName); } jsvUnLock(classPrototype); // If we had a name, add it to the end (or it gets confused with the constructor arguments) if (classInternalName) jsvObjectSetChildAndUnLock(classFunction, JSPARSE_FUNCTION_NAME_NAME, classInternalName); JSP_MATCH_WITH_CLEANUP_AND_RETURN('}',jsvUnLock(classFunction),0); return classFunction; } #endif NO_INLINE JsVar *jspeFactor() { if (lex->tk==LEX_ID) { JsVar *a = jspGetNamedVariable(jslGetTokenValueAsString()); JSP_ASSERT_MATCH(LEX_ID); #ifndef SAVE_ON_FLASH if (lex->tk==LEX_TEMPLATE_LITERAL) jsExceptionHere(JSET_SYNTAXERROR, "Tagged template literals not supported"); else if (lex->tk==LEX_ARROW_FUNCTION && (jsvIsName(a) || (a==0 && !JSP_SHOULD_EXECUTE))) { // 'a' needs to be a name, *or* we're not executing so 0 gets returned anyway JsVar *funcVar = jspeArrowFunction(0,a); jsvUnLock(a); a=funcVar; } #endif return a; } else if (lex->tk==LEX_INT) { JsVar *v = 0; if (JSP_SHOULD_EXECUTE) { v = jsvNewFromLongInteger(stringToInt(jslGetTokenValueAsString())); } JSP_ASSERT_MATCH(LEX_INT); return v; } else if (lex->tk==LEX_FLOAT) { JsVar *v = 0; if (JSP_SHOULD_EXECUTE) { v = jsvNewFromFloat(stringToFloat(jslGetTokenValueAsString())); } JSP_ASSERT_MATCH(LEX_FLOAT); return v; } else if (lex->tk=='(') { JSP_ASSERT_MATCH('('); if (!jspCheckStackPosition()) return 0; #ifdef SAVE_ON_FLASH // Just parse a normal expression (which can include commas) JsVar *a = jspeExpression(); if (!JSP_SHOULDNT_PARSE) JSP_MATCH_WITH_RETURN(')',a); return a; #else return jspeExpressionOrArrowFunction(); #endif } else if (lex->tk==LEX_R_TRUE) { JSP_ASSERT_MATCH(LEX_R_TRUE); return JSP_SHOULD_EXECUTE ? jsvNewFromBool(true) : 0; } else if (lex->tk==LEX_R_FALSE) { JSP_ASSERT_MATCH(LEX_R_FALSE); return JSP_SHOULD_EXECUTE ? jsvNewFromBool(false) : 0; } else if (lex->tk==LEX_R_NULL) { JSP_ASSERT_MATCH(LEX_R_NULL); return JSP_SHOULD_EXECUTE ? jsvNewWithFlags(JSV_NULL) : 0; } else if (lex->tk==LEX_R_UNDEFINED) { JSP_ASSERT_MATCH(LEX_R_UNDEFINED); return 0; } else if (lex->tk==LEX_STR) { JsVar *a = 0; if (JSP_SHOULD_EXECUTE) a = jslGetTokenValueAsVar(); JSP_ASSERT_MATCH(LEX_STR); return a; #ifndef SAVE_ON_FLASH } else if (lex->tk==LEX_TEMPLATE_LITERAL) { return jspeTemplateLiteral(); #endif } else if (lex->tk==LEX_REGEX) { JsVar *a = 0; #ifdef SAVE_ON_FLASH jsExceptionHere(JSET_SYNTAXERROR, "RegEx are not supported in this version of Espruino\n"); #else JsVar *regex = jslGetTokenValueAsVar(); size_t regexEnd = 0, regexLen = 0; JsvStringIterator it; jsvStringIteratorNew(&it, regex, 0); while (jsvStringIteratorHasChar(&it)) { regexLen++; if (jsvStringIteratorGetCharAndNext(&it)=='/') regexEnd = regexLen; } jsvStringIteratorFree(&it); JsVar *flags = 0; if (regexEnd < regexLen) flags = jsvNewFromStringVar(regex, regexEnd, JSVAPPENDSTRINGVAR_MAXLENGTH); JsVar *regexSource = jsvNewFromStringVar(regex, 1, regexEnd-2); a = jswrap_regexp_constructor(regexSource, flags); jsvUnLock3(regex, flags, regexSource); #endif JSP_ASSERT_MATCH(LEX_REGEX); return a; } else if (lex->tk=='{') { if (!jspCheckStackPosition()) return 0; return jspeFactorObject(); } else if (lex->tk=='[') { if (!jspCheckStackPosition()) return 0; return jspeFactorArray(); } else if (lex->tk==LEX_R_FUNCTION) { if (!jspCheckStackPosition()) return 0; JSP_ASSERT_MATCH(LEX_R_FUNCTION); return jspeFunctionDefinition(true); #ifndef SAVE_ON_FLASH } else if (lex->tk==LEX_R_CLASS) { if (!jspCheckStackPosition()) return 0; JSP_ASSERT_MATCH(LEX_R_CLASS); return jspeClassDefinition(true); } else if (lex->tk==LEX_R_SUPER) { JSP_ASSERT_MATCH(LEX_R_SUPER); /* This is kind of nasty, since super appears to do three different things. * In the constructor it references the extended class's constructor * in a method it references the constructor's prototype. * in a static method it references the extended class's constructor (but this is different) */ if (jsvIsObject(execInfo.thisVar)) { // 'this' is an object - must be calling a normal method JsVar *proto1 = jsvObjectGetChild(execInfo.thisVar, JSPARSE_INHERITS_VAR, 0); // if we're in a method, get __proto__ first JsVar *proto2 = jsvIsObject(proto1) ? jsvObjectGetChild(proto1, JSPARSE_INHERITS_VAR, 0) : 0; // still in method, get __proto__.__proto__ jsvUnLock(proto1); if (!proto2) { jsExceptionHere(JSET_SYNTAXERROR, "Calling 'super' outside of class"); return 0; } // If we're doing super() we want the constructor if (lex->tk=='(') { JsVar *constr = jsvObjectGetChild(proto2, JSPARSE_CONSTRUCTOR_VAR, 0); jsvUnLock(proto2); return constr; } // But if we're doing something else - eg 'super.' or 'super[' then it needs to reference the prototype return proto2; } else if (jsvIsFunction(execInfo.thisVar)) { // 'this' is a function - must be calling a static method JsVar *proto1 = jsvObjectGetChild(execInfo.thisVar, JSPARSE_PROTOTYPE_VAR, 0); JsVar *proto2 = jsvIsObject(proto1) ? jsvObjectGetChild(proto1, JSPARSE_INHERITS_VAR, 0) : 0; jsvUnLock(proto1); if (!proto2) { jsExceptionHere(JSET_SYNTAXERROR, "Calling 'super' outside of class"); return 0; } JsVar *constr = jsvObjectGetChild(proto2, JSPARSE_CONSTRUCTOR_VAR, 0); jsvUnLock(proto2); return constr; } jsExceptionHere(JSET_SYNTAXERROR, "Calling 'super' outside of class"); return 0; #endif } else if (lex->tk==LEX_R_THIS) { JSP_ASSERT_MATCH(LEX_R_THIS); return jsvLockAgain( execInfo.thisVar ? execInfo.thisVar : execInfo.root ); } else if (lex->tk==LEX_R_DELETE) { if (!jspCheckStackPosition()) return 0; return jspeFactorDelete(); } else if (lex->tk==LEX_R_TYPEOF) { if (!jspCheckStackPosition()) return 0; return jspeFactorTypeOf(); } else if (lex->tk==LEX_R_VOID) { if (!jspCheckStackPosition()) return 0; JSP_ASSERT_MATCH(LEX_R_VOID); jsvUnLock(jspeUnaryExpression()); return 0; } JSP_MATCH(LEX_EOF); jsExceptionHere(JSET_SYNTAXERROR, "Unexpected end of Input\n"); return 0; } NO_INLINE JsVar *__jspePostfixExpression(JsVar *a) { while (lex->tk==LEX_PLUSPLUS || lex->tk==LEX_MINUSMINUS) { int op = lex->tk; JSP_ASSERT_MATCH(op); if (JSP_SHOULD_EXECUTE) { JsVar *one = jsvNewFromInteger(1); JsVar *oldValue = jsvAsNumberAndUnLock(jsvSkipName(a)); // keep the old value (but convert to number) JsVar *res = jsvMathsOpSkipNames(oldValue, one, op==LEX_PLUSPLUS ? '+' : '-'); jsvUnLock(one); // in-place add/subtract jsvReplaceWith(a, res); jsvUnLock(res); // but then use the old value jsvUnLock(a); a = oldValue; } } return a; } NO_INLINE JsVar *jspePostfixExpression() { JsVar *a; // TODO: should be in jspeUnaryExpression if (lex->tk==LEX_PLUSPLUS || lex->tk==LEX_MINUSMINUS) { int op = lex->tk; JSP_ASSERT_MATCH(op); a = jspePostfixExpression(); if (JSP_SHOULD_EXECUTE) { JsVar *one = jsvNewFromInteger(1); JsVar *res = jsvMathsOpSkipNames(a, one, op==LEX_PLUSPLUS ? '+' : '-'); jsvUnLock(one); // in-place add/subtract jsvReplaceWith(a, res); jsvUnLock(res); } } else a = jspeFactorFunctionCall(); return __jspePostfixExpression(a); } NO_INLINE JsVar *jspeUnaryExpression() { if (lex->tk=='!' || lex->tk=='~' || lex->tk=='-' || lex->tk=='+') { short tk = lex->tk; JSP_ASSERT_MATCH(tk); if (!JSP_SHOULD_EXECUTE) { return jspeUnaryExpression(); } if (tk=='!') { // logical not return jsvNewFromBool(!jsvGetBoolAndUnLock(jsvSkipNameAndUnLock(jspeUnaryExpression()))); } else if (tk=='~') { // bitwise not return jsvNewFromInteger(~jsvGetIntegerAndUnLock(jsvSkipNameAndUnLock(jspeUnaryExpression()))); } else if (tk=='-') { // unary minus return jsvNegateAndUnLock(jspeUnaryExpression()); // names already skipped } else if (tk=='+') { // unary plus (convert to number) JsVar *v = jsvSkipNameAndUnLock(jspeUnaryExpression()); JsVar *r = jsvAsNumber(v); // names already skipped jsvUnLock(v); return r; } assert(0); return 0; } else return jspePostfixExpression(); } // Get the precedence of a BinaryExpression - or return 0 if not one unsigned int jspeGetBinaryExpressionPrecedence(int op) { switch (op) { case LEX_OROR: return 1; break; case LEX_ANDAND: return 2; break; case '|' : return 3; break; case '^' : return 4; break; case '&' : return 5; break; case LEX_EQUAL: case LEX_NEQUAL: case LEX_TYPEEQUAL: case LEX_NTYPEEQUAL: return 6; case LEX_LEQUAL: case LEX_GEQUAL: case '<': case '>': case LEX_R_INSTANCEOF: return 7; case LEX_R_IN: return (execInfo.execute&EXEC_FOR_INIT)?0:7; case LEX_LSHIFT: case LEX_RSHIFT: case LEX_RSHIFTUNSIGNED: return 8; case '+': case '-': return 9; case '*': case '/': case '%': return 10; default: return 0; } } NO_INLINE JsVar *__jspeBinaryExpression(JsVar *a, unsigned int lastPrecedence) { /* This one's a bit strange. Basically all the ops have their own precedence, it's not * like & and | share the same precedence. We don't want to recurse for each one, * so instead we do this. * * We deal with an expression in recursion ONLY if it's of higher precedence * than the current one, otherwise we stick in the while loop. */ unsigned int precedence = jspeGetBinaryExpressionPrecedence(lex->tk); while (precedence && precedence>lastPrecedence) { int op = lex->tk; JSP_ASSERT_MATCH(op); // if we have short-circuit ops, then if we know the outcome // we don't bother to execute the other op. Even if not // we need to tell mathsOp it's an & or | if (op==LEX_ANDAND || op==LEX_OROR) { bool aValue = jsvGetBoolAndUnLock(jsvSkipName(a)); if ((!aValue && op==LEX_ANDAND) || (aValue && op==LEX_OROR)) { // use first argument (A) JSP_SAVE_EXECUTE(); jspSetNoExecute(); jsvUnLock(__jspeBinaryExpression(jspeUnaryExpression(),precedence)); JSP_RESTORE_EXECUTE(); } else { // use second argument (B) jsvUnLock(a); a = __jspeBinaryExpression(jspeUnaryExpression(),precedence); } } else { // else it's a more 'normal' logical expression - just use Maths JsVar *b = __jspeBinaryExpression(jspeUnaryExpression(),precedence); if (JSP_SHOULD_EXECUTE) { if (op==LEX_R_IN) { JsVar *av = jsvSkipName(a); // needle JsVar *bv = jsvSkipName(b); // haystack if (jsvHasChildren(bv)) { // search keys, NOT values av = jsvAsArrayIndexAndUnLock(av); JsVar *varFound = jspGetVarNamedField( bv, av, true); jsvUnLock2(a,varFound); a = jsvNewFromBool(varFound!=0); } else { // else maybe it's a fake object... const JswSymList *syms = jswGetSymbolListForObjectProto(bv); if (syms) { JsVar *varFound = 0; char nameBuf[JSLEX_MAX_TOKEN_LENGTH]; if (jsvGetString(av, nameBuf, sizeof(nameBuf)) < sizeof(nameBuf)) varFound = jswBinarySearch(syms, bv, nameBuf); bool found = varFound!=0; jsvUnLock2(a, varFound); if (!found && jsvIsArrayBuffer(bv)) { JsVarFloat f = jsvGetFloat(av); // if not a number this will be NaN, f==floor(f) fails if (f==floor(f) && f>=0 && f<jsvGetArrayBufferLength(bv)) found = true; } a = jsvNewFromBool(found); } else { // not built-in, just assume we can't do it jsExceptionHere(JSET_ERROR, "Cannot use 'in' operator to search a %t", bv); jsvUnLock(a); a = 0; } } jsvUnLock2(av, bv); } else if (op==LEX_R_INSTANCEOF) { bool inst = false; JsVar *av = jsvSkipName(a); JsVar *bv = jsvSkipName(b); if (!jsvIsFunction(bv)) { jsExceptionHere(JSET_ERROR, "Expecting a function on RHS in instanceof check, got %t", bv); } else { if (jsvIsObject(av) || jsvIsFunction(av)) { JsVar *bproto = jspGetNamedField(bv, JSPARSE_PROTOTYPE_VAR, false); JsVar *proto = jsvObjectGetChild(av, JSPARSE_INHERITS_VAR, 0); while (proto) { if (proto == bproto) inst=true; // search prototype chain JsVar *childProto = jsvObjectGetChild(proto, JSPARSE_INHERITS_VAR, 0); jsvUnLock(proto); proto = childProto; } if (jspIsConstructor(bv, "Object")) inst = true; jsvUnLock(bproto); } if (!inst) { const char *name = jswGetBasicObjectName(av); if (name) { inst = jspIsConstructor(bv, name); } // Hack for built-ins that should also be instances of Object if (!inst && (jsvIsArray(av) || jsvIsArrayBuffer(av)) && jspIsConstructor(bv, "Object")) inst = true; } } jsvUnLock3(av, bv, a); a = jsvNewFromBool(inst); } else { // --------------------------------------------- NORMAL JsVar *res = jsvMathsOpSkipNames(a, b, op); jsvUnLock(a); a = res; } } jsvUnLock(b); } precedence = jspeGetBinaryExpressionPrecedence(lex->tk); } return a; } JsVar *jspeBinaryExpression() { return __jspeBinaryExpression(jspeUnaryExpression(),0); } NO_INLINE JsVar *__jspeConditionalExpression(JsVar *lhs) { if (lex->tk=='?') { JSP_ASSERT_MATCH('?'); if (!JSP_SHOULD_EXECUTE) { // just let lhs pass through jsvUnLock(jspeAssignmentExpression()); JSP_MATCH(':'); jsvUnLock(jspeAssignmentExpression()); } else { bool first = jsvGetBoolAndUnLock(jsvSkipName(lhs)); jsvUnLock(lhs); if (first) { lhs = jspeAssignmentExpression(); JSP_MATCH(':'); JSP_SAVE_EXECUTE(); jspSetNoExecute(); jsvUnLock(jspeAssignmentExpression()); JSP_RESTORE_EXECUTE(); } else { JSP_SAVE_EXECUTE(); jspSetNoExecute(); jsvUnLock(jspeAssignmentExpression()); JSP_RESTORE_EXECUTE(); JSP_MATCH(':'); lhs = jspeAssignmentExpression(); } } } return lhs; } JsVar *jspeConditionalExpression() { return __jspeConditionalExpression(jspeBinaryExpression()); } NO_INLINE JsVar *__jspeAssignmentExpression(JsVar *lhs) { if (lex->tk=='=' || lex->tk==LEX_PLUSEQUAL || lex->tk==LEX_MINUSEQUAL || lex->tk==LEX_MULEQUAL || lex->tk==LEX_DIVEQUAL || lex->tk==LEX_MODEQUAL || lex->tk==LEX_ANDEQUAL || lex->tk==LEX_OREQUAL || lex->tk==LEX_XOREQUAL || lex->tk==LEX_RSHIFTEQUAL || lex->tk==LEX_LSHIFTEQUAL || lex->tk==LEX_RSHIFTUNSIGNEDEQUAL) { JsVar *rhs; int op = lex->tk; JSP_ASSERT_MATCH(op); rhs = jspeAssignmentExpression(); rhs = jsvSkipNameAndUnLock(rhs); // ensure we get rid of any references on the RHS if (JSP_SHOULD_EXECUTE && lhs) { if (op=='=') { jsvReplaceWithOrAddToRoot(lhs, rhs); } else { if (op==LEX_PLUSEQUAL) op='+'; else if (op==LEX_MINUSEQUAL) op='-'; else if (op==LEX_MULEQUAL) op='*'; else if (op==LEX_DIVEQUAL) op='/'; else if (op==LEX_MODEQUAL) op='%'; else if (op==LEX_ANDEQUAL) op='&'; else if (op==LEX_OREQUAL) op='|'; else if (op==LEX_XOREQUAL) op='^'; else if (op==LEX_RSHIFTEQUAL) op=LEX_RSHIFT; else if (op==LEX_LSHIFTEQUAL) op=LEX_LSHIFT; else if (op==LEX_RSHIFTUNSIGNEDEQUAL) op=LEX_RSHIFTUNSIGNED; if (op=='+' && jsvIsName(lhs)) { JsVar *currentValue = jsvSkipName(lhs); if (jsvIsBasicString(currentValue) && jsvGetRefs(currentValue)==1 && rhs!=currentValue) { /* A special case for string += where this is the only use of the string * and we're not appending to ourselves. In this case we can do a * simple append (rather than clone + append)*/ JsVar *str = jsvAsString(rhs); jsvAppendStringVarComplete(currentValue, str); jsvUnLock(str); op = 0; } jsvUnLock(currentValue); } if (op) { /* Fallback which does a proper add */ JsVar *res = jsvMathsOpSkipNames(lhs,rhs,op); jsvReplaceWith(lhs, res); jsvUnLock(res); } } } jsvUnLock(rhs); } return lhs; } JsVar *jspeAssignmentExpression() { return __jspeAssignmentExpression(jspeConditionalExpression()); } // ',' is allowed to add multiple expressions, this is not allowed in jspeAssignmentExpression NO_INLINE JsVar *jspeExpression() { while (!JSP_SHOULDNT_PARSE) { JsVar *a = jspeAssignmentExpression(); if (lex->tk!=',') return a; // if we get a comma, we just forget this data and parse the next bit... jsvCheckReferenceError(a); jsvUnLock(a); JSP_ASSERT_MATCH(','); } return 0; } /** Parse a block `{ ... }` */ NO_INLINE void jspeSkipBlock() { // fast skip of blocks int brackets = 1; while (lex->tk && brackets) { if (lex->tk == '{') brackets++; else if (lex->tk == '}') { brackets--; if (!brackets) return; } JSP_ASSERT_MATCH(lex->tk); } } /** Parse a block `{ ... }` but assume brackets are already parsed */ NO_INLINE void jspeBlockNoBrackets() { if (JSP_SHOULD_EXECUTE) { while (lex->tk && lex->tk!='}') { JsVar *a = jspeStatement(); jsvCheckReferenceError(a); jsvUnLock(a); if (JSP_HAS_ERROR) { if (lex && !(execInfo.execute&EXEC_ERROR_LINE_REPORTED)) { execInfo.execute = (JsExecFlags)(execInfo.execute | EXEC_ERROR_LINE_REPORTED); JsVar *stackTrace = jsvObjectGetChild(execInfo.hiddenRoot, JSPARSE_STACKTRACE_VAR, JSV_STRING_0); if (stackTrace) { jsvAppendPrintf(stackTrace, "at "); jspAppendStackTrace(stackTrace); jsvUnLock(stackTrace); } } } if (JSP_SHOULDNT_PARSE) return; if (!JSP_SHOULD_EXECUTE) { jspeSkipBlock(); return; } } } else { jspeSkipBlock(); } return; } /** Parse a block `{ ... }` */ NO_INLINE void jspeBlock() { JSP_MATCH_WITH_RETURN('{',); jspeBlockNoBrackets(); if (!JSP_SHOULDNT_PARSE) JSP_MATCH_WITH_RETURN('}',); return; } NO_INLINE JsVar *jspeBlockOrStatement() { if (lex->tk=='{') { jspeBlock(); return 0; } else { JsVar *v = jspeStatement(); if (lex->tk==';') JSP_ASSERT_MATCH(';'); return v; } } /** Parse using current lexer until we hit the end of * input or there was some problem. */ NO_INLINE JsVar *jspParse() { JsVar *v = 0; while (!JSP_SHOULDNT_PARSE && lex->tk != LEX_EOF) { jsvUnLock(v); v = jspeBlockOrStatement(); jsvCheckReferenceError(v); } return v; } NO_INLINE JsVar *jspeStatementVar() { JsVar *lastDefined = 0; /* variable creation. TODO - we need a better way of parsing the left * hand side. Maybe just have a flag called can_create_var that we * set and then we parse as if we're doing a normal equals.*/ assert(lex->tk==LEX_R_VAR || lex->tk==LEX_R_LET || lex->tk==LEX_R_CONST); jslGetNextToken(); ///TODO: Correctly implement CONST and LET - we just treat them like 'var' at the moment bool hasComma = true; // for first time in loop while (hasComma && lex->tk == LEX_ID && !jspIsInterrupted()) { JsVar *a = 0; if (JSP_SHOULD_EXECUTE) { a = jspeiFindOnTop(jslGetTokenValueAsString(), true); if (!a) { // out of memory jspSetError(false); return lastDefined; } } JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_ID, jsvUnLock(a), lastDefined); // sort out initialiser if (lex->tk == '=') { JsVar *var; JSP_MATCH_WITH_CLEANUP_AND_RETURN('=', jsvUnLock(a), lastDefined); var = jsvSkipNameAndUnLock(jspeAssignmentExpression()); if (JSP_SHOULD_EXECUTE) jsvReplaceWith(a, var); jsvUnLock(var); } jsvUnLock(lastDefined); lastDefined = a; hasComma = lex->tk == ','; if (hasComma) JSP_MATCH_WITH_RETURN(',', lastDefined); } return lastDefined; } NO_INLINE JsVar *jspeStatementIf() { bool cond; JsVar *var, *result = 0; JSP_ASSERT_MATCH(LEX_R_IF); JSP_MATCH('('); var = jspeExpression(); if (JSP_SHOULDNT_PARSE) return var; JSP_MATCH(')'); cond = JSP_SHOULD_EXECUTE && jsvGetBoolAndUnLock(jsvSkipName(var)); jsvUnLock(var); JSP_SAVE_EXECUTE(); if (!cond) jspSetNoExecute(); JsExecFlags hasError = 0; JsVar *a = jspeBlockOrStatement(); hasError |= execInfo.execute&EXEC_ERROR_MASK; if (!cond) { jsvUnLock(a); JSP_RESTORE_EXECUTE(); execInfo.execute |= hasError; } else { result = a; } if (lex->tk==LEX_R_ELSE) { JSP_ASSERT_MATCH(LEX_R_ELSE); JSP_SAVE_EXECUTE(); if (cond) jspSetNoExecute(); JsVar *a = jspeBlockOrStatement(); hasError |= execInfo.execute&EXEC_ERROR_MASK; if (cond) { jsvUnLock(a); JSP_RESTORE_EXECUTE(); execInfo.execute |= hasError; } else { result = a; } } return result; } NO_INLINE JsVar *jspeStatementSwitch() { JSP_ASSERT_MATCH(LEX_R_SWITCH); JSP_MATCH('('); JsVar *switchOn = jspeExpression(); JSP_SAVE_EXECUTE(); bool execute = JSP_SHOULD_EXECUTE; JSP_MATCH_WITH_CLEANUP_AND_RETURN(')', jsvUnLock(switchOn), 0); // shortcut if not executing... if (!execute) { jsvUnLock(switchOn); jspeBlock(); return 0; } JSP_MATCH_WITH_CLEANUP_AND_RETURN('{', jsvUnLock(switchOn), 0); bool executeDefault = true; if (execute) execInfo.execute=EXEC_NO|EXEC_IN_SWITCH; while (lex->tk==LEX_R_CASE) { JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_R_CASE, jsvUnLock(switchOn), 0); JsExecFlags oldFlags = execInfo.execute; if (execute) execInfo.execute=EXEC_YES|EXEC_IN_SWITCH; JsVar *test = jspeAssignmentExpression(); execInfo.execute = oldFlags|EXEC_IN_SWITCH;; JSP_MATCH_WITH_CLEANUP_AND_RETURN(':', jsvUnLock2(switchOn, test), 0); bool cond = false; if (execute) cond = jsvGetBoolAndUnLock(jsvMathsOpSkipNames(switchOn, test, LEX_TYPEEQUAL)); if (cond) executeDefault = false; jsvUnLock(test); if (cond && (execInfo.execute&EXEC_RUN_MASK)==EXEC_NO) execInfo.execute=EXEC_YES|EXEC_IN_SWITCH; while (!JSP_SHOULDNT_PARSE && lex->tk!=LEX_EOF && lex->tk!=LEX_R_CASE && lex->tk!=LEX_R_DEFAULT && lex->tk!='}') jsvUnLock(jspeBlockOrStatement()); oldExecute |= execInfo.execute & (EXEC_ERROR_MASK|EXEC_RETURN); // copy across any errors/exceptions/returns } jsvUnLock(switchOn); if (execute && (execInfo.execute&EXEC_RUN_MASK)==EXEC_BREAK) { execInfo.execute=EXEC_YES|EXEC_IN_SWITCH; } else { executeDefault = true; } JSP_RESTORE_EXECUTE(); if (lex->tk==LEX_R_DEFAULT) { JSP_ASSERT_MATCH(LEX_R_DEFAULT); JSP_MATCH(':'); JSP_SAVE_EXECUTE(); if (!executeDefault) jspSetNoExecute(); else execInfo.execute |= EXEC_IN_SWITCH; while (!JSP_SHOULDNT_PARSE && lex->tk!=LEX_EOF && lex->tk!='}' && lex->tk!=LEX_R_CASE) jsvUnLock(jspeBlockOrStatement()); oldExecute |= execInfo.execute & (EXEC_ERROR_MASK|EXEC_RETURN); // copy across any errors/exceptions/returns execInfo.execute = execInfo.execute & (JsExecFlags)~EXEC_BREAK; JSP_RESTORE_EXECUTE(); } if (lex->tk==LEX_R_CASE) { jsExceptionHere(JSET_SYNTAXERROR, "Espruino doesn't support CASE after DEFAULT"); return 0; } JSP_MATCH('}'); return 0; } // Check whether we received a break/continue while parsing previously. Return true if we had a 'break; static NO_INLINE bool jspeCheckBreakContinue() { if (execInfo.execute & EXEC_CONTINUE) execInfo.execute = (execInfo.execute & ~EXEC_RUN_MASK) | EXEC_YES; else if (execInfo.execute & EXEC_BREAK) { execInfo.execute = (execInfo.execute & ~EXEC_RUN_MASK) | EXEC_YES; return true; } return false; } NO_INLINE JsVar *jspeStatementDoOrWhile(bool isWhile) { JsVar *cond; bool loopCond = true; // true for do...while loops bool hasHadBreak = false; JslCharPos whileCondStart; // We do repetition by pulling out the string representing our statement // there's definitely some opportunity for optimisation here bool wasInLoop = (execInfo.execute&EXEC_IN_LOOP)!=0; JslCharPos whileBodyStart; if (isWhile) { // while loop JSP_ASSERT_MATCH(LEX_R_WHILE); jslCharPosFromLex(&whileCondStart); JSP_MATCH_WITH_CLEANUP_AND_RETURN('(',jslCharPosFree(&whileCondStart);,0); cond = jspeExpression(); loopCond = JSP_SHOULD_EXECUTE && jsvGetBoolAndUnLock(jsvSkipName(cond)); jsvUnLock(cond); jslCharPosFromLex(&whileBodyStart); JSP_MATCH_WITH_CLEANUP_AND_RETURN(')',jslCharPosFree(&whileBodyStart);jslCharPosFree(&whileCondStart);,0); } else { jslCharPosFromLex(&whileBodyStart); JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_R_DO, jslCharPosFree(&whileBodyStart);,0); } JSP_SAVE_EXECUTE(); // actually try and execute first bit of while loop (we'll do the rest in the actual loop later) if (!loopCond) jspSetNoExecute(); execInfo.execute |= EXEC_IN_LOOP; jsvUnLock(jspeBlockOrStatement()); if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP; hasHadBreak |= jspeCheckBreakContinue(); if (!loopCond) JSP_RESTORE_EXECUTE(); if (!isWhile) { // do..while loop JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_R_WHILE,jslCharPosFree(&whileBodyStart);,0); jslCharPosFromLex(&whileCondStart); JSP_MATCH_WITH_CLEANUP_AND_RETURN('(',jslCharPosFree(&whileBodyStart);jslCharPosFree(&whileCondStart);,0); cond = jspeExpression(); loopCond = JSP_SHOULD_EXECUTE && jsvGetBoolAndUnLock(jsvSkipName(cond)); jsvUnLock(cond); JSP_MATCH_WITH_CLEANUP_AND_RETURN(')',jslCharPosFree(&whileBodyStart);jslCharPosFree(&whileCondStart);,0); } JslCharPos whileBodyEnd; jslCharPosNew(&whileBodyEnd, lex->sourceVar, lex->tokenStart); int loopCount = 0; while (!hasHadBreak && loopCond #ifdef JSPARSE_MAX_LOOP_ITERATIONS && loopCount<JSPARSE_MAX_LOOP_ITERATIONS #endif ) { if (isWhile || loopCount) { // don't check the start condition a second time if we're in a do..while loop jslSeekToP(&whileCondStart); cond = jspeExpression(); loopCond = JSP_SHOULD_EXECUTE && jsvGetBoolAndUnLock(jsvSkipName(cond)); jsvUnLock(cond); } if (loopCond) { jslSeekToP(&whileBodyStart); execInfo.execute |= EXEC_IN_LOOP; jspDebuggerLoopIfCtrlC(); jsvUnLock(jspeBlockOrStatement()); if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP; hasHadBreak |= jspeCheckBreakContinue(); } loopCount++; } jslSeekToP(&whileBodyEnd); jslCharPosFree(&whileCondStart); jslCharPosFree(&whileBodyStart); jslCharPosFree(&whileBodyEnd); #ifdef JSPARSE_MAX_LOOP_ITERATIONS if (loopCount > JSPARSE_MAX_LOOP_ITERATIONS) { jsExceptionHere(JSET_ERROR, "WHILE Loop exceeded the maximum number of iterations (" STRINGIFY(JSPARSE_MAX_LOOP_ITERATIONS) ")"); } #endif return 0; } NO_INLINE JsVar *jspGetBuiltinPrototype(JsVar *obj) { if (jsvIsArray(obj)) { JsVar *v = jspFindPrototypeFor("Array"); if (v) return v; } if (jsvIsObject(obj) || jsvIsArray(obj)) { JsVar *v = jspFindPrototypeFor("Object"); if (v==obj) { // don't return ourselves jsvUnLock(v); v = 0; } return v; } return 0; } NO_INLINE JsVar *jspeStatementFor() { JSP_ASSERT_MATCH(LEX_R_FOR); JSP_MATCH('('); bool wasInLoop = (execInfo.execute&EXEC_IN_LOOP)!=0; execInfo.execute |= EXEC_FOR_INIT; // initialisation JsVar *forStatement = 0; // we could have 'for (;;)' - so don't munch up our semicolon if that's all we have if (lex->tk != ';') forStatement = jspeStatement(); if (jspIsInterrupted()) { jsvUnLock(forStatement); return 0; } execInfo.execute &= (JsExecFlags)~EXEC_FOR_INIT; #ifndef SAVE_ON_FLASH_EXTREME if (lex->tk == LEX_R_IN || lex->tk == LEX_R_OF) { bool isForOf = lex->tk == LEX_R_OF; // for (i in array) or for (i of array) // where i = forStatement if (JSP_SHOULD_EXECUTE && !jsvIsName(forStatement)) { jsvUnLock(forStatement); jsExceptionHere(JSET_ERROR, "for(a %s b) - 'a' must be a variable name, not %t", isForOf?"of":"in", forStatement); return 0; } JSP_ASSERT_MATCH(lex->tk); // skip over in/of JsVar *array = jsvSkipNameAndUnLock(jspeExpression()); JslCharPos forBodyStart; jslCharPosFromLex(&forBodyStart); JSP_MATCH_WITH_CLEANUP_AND_RETURN(')', jsvUnLock2(forStatement, array);jslCharPosFree(&forBodyStart), 0); // Simply scan over the loop the first time without executing to figure out where it ends // OPT: we could skip the first parse and actually execute the first time JSP_SAVE_EXECUTE(); jspSetNoExecute(); execInfo.execute |= EXEC_IN_LOOP; jsvUnLock(jspeBlockOrStatement()); JslCharPos forBodyEnd; jslCharPosNew(&forBodyEnd, lex->sourceVar, lex->tokenStart); if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP; JSP_RESTORE_EXECUTE(); // Now start executing properly if (JSP_SHOULD_EXECUTE) { if (jsvIsIterable(array)) { JsvIsInternalChecker checkerFunction = jsvGetInternalFunctionCheckerFor(array); JsVar *foundPrototype = 0; if (!isForOf) // for..in foundPrototype = jspGetBuiltinPrototype(array); JsvIterator it; jsvIteratorNew(&it, array, isForOf ? /* for of */ JSIF_EVERY_ARRAY_ELEMENT : /* for in */ JSIF_DEFINED_ARRAY_ElEMENTS); bool hasHadBreak = false; while (JSP_SHOULD_EXECUTE && jsvIteratorHasElement(&it) && !hasHadBreak) { JsVar *loopIndexVar = jsvIteratorGetKey(&it); bool ignore = false; if (checkerFunction && checkerFunction(loopIndexVar)) { ignore = true; if (jsvIsString(loopIndexVar) && jsvIsStringEqual(loopIndexVar, JSPARSE_INHERITS_VAR)) foundPrototype = jsvSkipName(loopIndexVar); } if (!ignore) { JsVar *iteratorValue; if (isForOf) { // for (... of ...) iteratorValue = jsvIteratorGetValue(&it); } else { // for (... in ...) iteratorValue = jsvIsName(loopIndexVar) ? jsvCopyNameOnly(loopIndexVar, false/*no copy children*/, false/*not a name*/) : loopIndexVar; assert(jsvGetRefs(iteratorValue)==0); } if (isForOf || iteratorValue) { // could be out of memory assert(!jsvIsName(iteratorValue)); jsvReplaceWithOrAddToRoot(forStatement, iteratorValue); if (iteratorValue!=loopIndexVar) jsvUnLock(iteratorValue); jslSeekToP(&forBodyStart); execInfo.execute |= EXEC_IN_LOOP; jspDebuggerLoopIfCtrlC(); jsvUnLock(jspeBlockOrStatement()); if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP; hasHadBreak |= jspeCheckBreakContinue(); } } jsvIteratorNext(&it); jsvUnLock(loopIndexVar); // if using for..in we'll skip down the prototype chain when we reach the end of the current one if (!jsvIteratorHasElement(&it) && !isForOf && foundPrototype) { jsvIteratorFree(&it); JsVar *iterable = foundPrototype; jsvIteratorNew(&it, iterable, JSIF_DEFINED_ARRAY_ElEMENTS); checkerFunction = jsvGetInternalFunctionCheckerFor(iterable); foundPrototype = jspGetBuiltinPrototype(iterable); jsvUnLock(iterable); } } assert(!foundPrototype); jsvIteratorFree(&it); } else if (!jsvIsUndefined(array)) { jsExceptionHere(JSET_ERROR, "FOR loop can only iterate over Arrays, Strings or Objects, not %t", array); } } jslSeekToP(&forBodyEnd); jslCharPosFree(&forBodyStart); jslCharPosFree(&forBodyEnd); jsvUnLock2(forStatement, array); #else // SAVE_ON_FLASH_EXTREME if (false) { #endif // SAVE_ON_FLASH_EXTREME } else { // ----------------------------------------------- NORMAL FOR LOOP #ifdef JSPARSE_MAX_LOOP_ITERATIONS int loopCount = JSPARSE_MAX_LOOP_ITERATIONS; #endif bool loopCond = true; bool hasHadBreak = false; jsvUnLock(forStatement); JslCharPos forCondStart; jslCharPosFromLex(&forCondStart); JSP_MATCH_WITH_CLEANUP_AND_RETURN(';',jslCharPosFree(&forCondStart);,0); if (lex->tk != ';') { JsVar *cond = jspeExpression(); // condition loopCond = JSP_SHOULD_EXECUTE && jsvGetBoolAndUnLock(jsvSkipName(cond)); jsvUnLock(cond); } JslCharPos forIterStart; jslCharPosFromLex(&forIterStart); JSP_MATCH_WITH_CLEANUP_AND_RETURN(';',jslCharPosFree(&forCondStart);jslCharPosFree(&forIterStart);,0); if (lex->tk != ')') { // we could have 'for (;;)' JSP_SAVE_EXECUTE(); jspSetNoExecute(); jsvUnLock(jspeExpression()); // iterator JSP_RESTORE_EXECUTE(); } JslCharPos forBodyStart; jslSkipWhiteSpace(); jslCharPosFromLex(&forBodyStart); // actual for body JSP_MATCH_WITH_CLEANUP_AND_RETURN(')',jslCharPosFree(&forCondStart);jslCharPosFree(&forIterStart);jslCharPosFree(&forBodyStart);,0); JSP_SAVE_EXECUTE(); if (!loopCond) jspSetNoExecute(); execInfo.execute |= EXEC_IN_LOOP; jsvUnLock(jspeBlockOrStatement()); JslCharPos forBodyEnd; jslSkipWhiteSpace(); jslCharPosNew(&forBodyEnd, lex->sourceVar, lex->tokenStart); if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP; if (loopCond || !JSP_SHOULD_EXECUTE) { hasHadBreak |= jspeCheckBreakContinue(); } if (!loopCond) JSP_RESTORE_EXECUTE(); if (loopCond) { jslSeekToP(&forIterStart); if (lex->tk != ')') jsvUnLock(jspeExpression()); } while (!hasHadBreak && JSP_SHOULD_EXECUTE && loopCond #ifdef JSPARSE_MAX_LOOP_ITERATIONS && loopCount-->0 #endif ) { jslSeekToP(&forCondStart); ; if (lex->tk == ';') { loopCond = true; } else { JsVar *cond = jspeExpression(); loopCond = jsvGetBoolAndUnLock(jsvSkipName(cond)); jsvUnLock(cond); } if (JSP_SHOULD_EXECUTE && loopCond) { jslSeekToP(&forBodyStart); execInfo.execute |= EXEC_IN_LOOP; jspDebuggerLoopIfCtrlC(); jsvUnLock(jspeBlockOrStatement()); if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP; hasHadBreak |= jspeCheckBreakContinue(); } if (JSP_SHOULD_EXECUTE && loopCond && !hasHadBreak) { jslSeekToP(&forIterStart); if (lex->tk != ')') jsvUnLock(jspeExpression()); } } jslSeekToP(&forBodyEnd); jslCharPosFree(&forCondStart); jslCharPosFree(&forIterStart); jslCharPosFree(&forBodyStart); jslCharPosFree(&forBodyEnd); #ifdef JSPARSE_MAX_LOOP_ITERATIONS if (loopCount<=0) { jsExceptionHere(JSET_ERROR, "FOR Loop exceeded the maximum number of iterations ("STRINGIFY(JSPARSE_MAX_LOOP_ITERATIONS)")"); } #endif } return 0; } NO_INLINE JsVar *jspeStatementTry() { // execute the try block JSP_ASSERT_MATCH(LEX_R_TRY); bool shouldExecuteBefore = JSP_SHOULD_EXECUTE; jspeBlock(); bool hadException = shouldExecuteBefore && ((execInfo.execute & EXEC_EXCEPTION)!=0); bool hadCatch = false; if (lex->tk == LEX_R_CATCH) { JSP_ASSERT_MATCH(LEX_R_CATCH); hadCatch = true; JSP_MATCH('('); JsVar *scope = 0; JsVar *exceptionVar = 0; if (hadException) { scope = jsvNewObject(); if (scope) exceptionVar = jsvFindChildFromString(scope, jslGetTokenValueAsString(), true); } JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_ID,jsvUnLock2(scope,exceptionVar),0); JSP_MATCH_WITH_CLEANUP_AND_RETURN(')',jsvUnLock2(scope,exceptionVar),0); if (exceptionVar) { // set the exception var up properly JsVar *exception = jspGetException(); if (exception) { jsvSetValueOfName(exceptionVar, exception); jsvUnLock(exception); } // Now clear the exception flag (it's handled - we hope!) execInfo.execute = execInfo.execute & (JsExecFlags)~(EXEC_EXCEPTION|EXEC_ERROR_LINE_REPORTED); jsvUnLock(exceptionVar); } if (shouldExecuteBefore && !hadException) { JSP_SAVE_EXECUTE(); jspSetNoExecute(); jspeBlock(); JSP_RESTORE_EXECUTE(); } else { if (!scope || jspeiAddScope(scope)) { jspeBlock(); if (scope) jspeiRemoveScope(); } } jsvUnLock(scope); } if (lex->tk == LEX_R_FINALLY || (!hadCatch && ((execInfo.execute&(EXEC_ERROR|EXEC_INTERRUPTED))==0))) { JSP_MATCH(LEX_R_FINALLY); // clear the exception flag - but only momentarily! if (hadException) execInfo.execute = execInfo.execute & (JsExecFlags)~EXEC_EXCEPTION; jspeBlock(); // put the flag back! if (hadException && !hadCatch) execInfo.execute = execInfo.execute | EXEC_EXCEPTION; } return 0; } NO_INLINE JsVar *jspeStatementReturn() { JsVar *result = 0; JSP_ASSERT_MATCH(LEX_R_RETURN); if (lex->tk != ';' && lex->tk != '}') { // we only want the value, so skip the name if there was one result = jsvSkipNameAndUnLock(jspeExpression()); } if (JSP_SHOULD_EXECUTE) { JsVar *resultVar = jspeiFindInScopes(JSPARSE_RETURN_VAR); if (resultVar) { jsvReplaceWith(resultVar, result); jsvUnLock(resultVar); execInfo.execute |= EXEC_RETURN; // Stop anything else in this function executing } else { jsExceptionHere(JSET_SYNTAXERROR, "RETURN statement, but not in a function.\n"); } } jsvUnLock(result); return 0; } NO_INLINE JsVar *jspeStatementThrow() { JsVar *result = 0; JSP_ASSERT_MATCH(LEX_R_THROW); result = jsvSkipNameAndUnLock(jspeExpression()); if (JSP_SHOULD_EXECUTE) { jspSetException(result); // Stop anything else in this function executing } jsvUnLock(result); return 0; } NO_INLINE JsVar *jspeStatementFunctionDecl(bool isClass) { JsVar *funcName = 0; JsVar *funcVar; #ifndef SAVE_ON_FLASH JSP_ASSERT_MATCH(isClass ? LEX_R_CLASS : LEX_R_FUNCTION); #else JSP_ASSERT_MATCH(LEX_R_FUNCTION); #endif bool actuallyCreateFunction = JSP_SHOULD_EXECUTE; if (actuallyCreateFunction) { funcName = jsvMakeIntoVariableName(jslGetTokenValueAsVar(), 0); if (!funcName) { // out of memory return 0; } } JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_ID, jsvUnLock(funcName), 0); #ifndef SAVE_ON_FLASH funcVar = isClass ? jspeClassDefinition(false) : jspeFunctionDefinition(false); #else funcVar = jspeFunctionDefinition(false); #endif if (actuallyCreateFunction) { // find a function with the same name (or make one) // OPT: can Find* use just a JsVar that is a 'name'? JsVar *existingName = jspeiFindNameOnTop(funcName, true); JsVar *existingFunc = jsvSkipName(existingName); if (jsvIsFunction(existingFunc)) { // 'proper' replace, that keeps the original function var and swaps the children funcVar = jsvSkipNameAndUnLock(funcVar); jswrap_function_replaceWith(existingFunc, funcVar); } else { jsvReplaceWith(existingName, funcVar); } jsvUnLock(funcName); funcName = existingName; jsvUnLock(existingFunc); // existingName is used - don't UnLock } jsvUnLock(funcVar); return funcName; } NO_INLINE JsVar *jspeStatement() { #ifdef USE_DEBUGGER if (execInfo.execute&EXEC_DEBUGGER_NEXT_LINE && lex->tk!=';' && JSP_SHOULD_EXECUTE) { lex->tokenLastStart = lex->tokenStart; jsiDebuggerLoop(); } #endif if (lex->tk==LEX_ID || lex->tk==LEX_INT || lex->tk==LEX_FLOAT || lex->tk==LEX_STR || lex->tk==LEX_TEMPLATE_LITERAL || lex->tk==LEX_REGEX || lex->tk==LEX_R_NEW || lex->tk==LEX_R_NULL || lex->tk==LEX_R_UNDEFINED || lex->tk==LEX_R_TRUE || lex->tk==LEX_R_FALSE || lex->tk==LEX_R_THIS || lex->tk==LEX_R_DELETE || lex->tk==LEX_R_TYPEOF || lex->tk==LEX_R_VOID || lex->tk==LEX_R_SUPER || lex->tk==LEX_PLUSPLUS || lex->tk==LEX_MINUSMINUS || lex->tk=='!' || lex->tk=='-' || lex->tk=='+' || lex->tk=='~' || lex->tk=='[' || lex->tk=='(') { /* Execute a simple statement that only contains basic arithmetic... */ return jspeExpression(); } else if (lex->tk=='{') { /* A block of code */ if (!jspCheckStackPosition()) return 0; jspeBlock(); return 0; } else if (lex->tk==';') { /* Empty statement - to allow things like ;;; */ JSP_ASSERT_MATCH(';'); return 0; } else if (lex->tk==LEX_R_VAR || lex->tk==LEX_R_LET || lex->tk==LEX_R_CONST) { return jspeStatementVar(); } else if (lex->tk==LEX_R_IF) { return jspeStatementIf(); } else if (lex->tk==LEX_R_DO) { return jspeStatementDoOrWhile(false); } else if (lex->tk==LEX_R_WHILE) { return jspeStatementDoOrWhile(true); } else if (lex->tk==LEX_R_FOR) { return jspeStatementFor(); } else if (lex->tk==LEX_R_TRY) { return jspeStatementTry(); } else if (lex->tk==LEX_R_RETURN) { return jspeStatementReturn(); } else if (lex->tk==LEX_R_THROW) { return jspeStatementThrow(); } else if (lex->tk==LEX_R_FUNCTION) { return jspeStatementFunctionDecl(false/* function */); #ifndef SAVE_ON_FLASH } else if (lex->tk==LEX_R_CLASS) { return jspeStatementFunctionDecl(true/* class */); #endif } else if (lex->tk==LEX_R_CONTINUE) { JSP_ASSERT_MATCH(LEX_R_CONTINUE); if (JSP_SHOULD_EXECUTE) { if (!(execInfo.execute & EXEC_IN_LOOP)) jsExceptionHere(JSET_SYNTAXERROR, "CONTINUE statement outside of FOR or WHILE loop"); else execInfo.execute = (execInfo.execute & (JsExecFlags)~EXEC_RUN_MASK) | EXEC_CONTINUE; } } else if (lex->tk==LEX_R_BREAK) { JSP_ASSERT_MATCH(LEX_R_BREAK); if (JSP_SHOULD_EXECUTE) { if (!(execInfo.execute & (EXEC_IN_LOOP|EXEC_IN_SWITCH))) jsExceptionHere(JSET_SYNTAXERROR, "BREAK statement outside of SWITCH, FOR or WHILE loop"); else execInfo.execute = (execInfo.execute & (JsExecFlags)~EXEC_RUN_MASK) | EXEC_BREAK; } } else if (lex->tk==LEX_R_SWITCH) { return jspeStatementSwitch(); } else if (lex->tk==LEX_R_DEBUGGER) { JSP_ASSERT_MATCH(LEX_R_DEBUGGER); #ifdef USE_DEBUGGER if (JSP_SHOULD_EXECUTE) jsiDebuggerLoop(); #endif } else JSP_MATCH(LEX_EOF); return 0; } // ----------------------------------------------------------------------------- /// Create a new built-in object that jswrapper can use to check for built-in functions JsVar *jspNewBuiltin(const char *instanceOf) { JsVar *objFunc = jswFindBuiltInFunction(0, instanceOf); if (!objFunc) return 0; // out of memory return objFunc; } /// Create a new Class of the given instance and return its prototype (as a name 'prototype') NO_INLINE JsVar *jspNewPrototype(const char *instanceOf) { JsVar *objFuncName = jsvFindChildFromString(execInfo.root, instanceOf, true); if (!objFuncName) // out of memory return 0; JsVar *objFunc = jsvSkipName(objFuncName); if (!objFunc) { objFunc = jspNewBuiltin(instanceOf); if (!objFunc) { // out of memory jsvUnLock(objFuncName); return 0; } // set up name jsvSetValueOfName(objFuncName, objFunc); } JsVar *prototypeName = jsvFindChildFromString(objFunc, JSPARSE_PROTOTYPE_VAR, true); jspEnsureIsPrototype(objFunc, prototypeName); // make sure it's an object jsvUnLock2(objFunc, objFuncName); return prototypeName; } /** Create a new object of the given instance and add it to root with name 'name'. * If name!=0, added to root with name, and the name is returned * If name==0, not added to root and Object itself returned */ NO_INLINE JsVar *jspNewObject(const char *name, const char *instanceOf) { JsVar *prototypeName = jspNewPrototype(instanceOf); JsVar *obj = jsvNewObject(); if (!obj) { // out of memory jsvUnLock(prototypeName); return 0; } if (name) { // If it's a device, set the device number up as the Object data // See jsiGetDeviceFromClass IOEventFlags device = jshFromDeviceString(name); if (device!=EV_NONE) { obj->varData.str[0] = 'D'; obj->varData.str[1] = 'E'; obj->varData.str[2] = 'V'; obj->varData.str[3] = (char)device; } } // add __proto__ JsVar *prototypeVar = jsvSkipName(prototypeName); jsvUnLock3(jsvAddNamedChild(obj, prototypeVar, JSPARSE_INHERITS_VAR), prototypeVar, prototypeName);prototypeName=0; if (name) { JsVar *objName = jsvFindChildFromString(execInfo.root, name, true); if (objName) jsvSetValueOfName(objName, obj); jsvUnLock(obj); if (!objName) { // out of memory return 0; } return objName; } else return obj; } /** Returns true if the constructor function given is the same as that * of the object with the given name. */ bool jspIsConstructor(JsVar *constructor, const char *constructorName) { JsVar *objFunc = jsvObjectGetChild(execInfo.root, constructorName, 0); if (!objFunc) return false; bool isConstructor = objFunc == constructor; jsvUnLock(objFunc); return isConstructor; } /** Get the prototype of the given object, or return 0 if not found, or not an object */ JsVar *jspGetPrototype(JsVar *object) { if (!jsvIsObject(object)) return 0; JsVar *proto = jsvObjectGetChild(object, JSPARSE_INHERITS_VAR, 0); if (jsvIsObject(proto)) return proto; jsvUnLock(proto); return 0; } /** Get the constructor of the given object, or return 0 if not found, or not a function */ JsVar *jspGetConstructor(JsVar *object) { JsVar *proto = jspGetPrototype(object); if (proto) { JsVar *constr = jsvObjectGetChild(proto, JSPARSE_CONSTRUCTOR_VAR, 0); if (jsvIsFunction(constr)) { jsvUnLock(proto); return constr; } jsvUnLock2(constr, proto); } return 0; } // ----------------------------------------------------------------------------- void jspSoftInit() { execInfo.root = jsvFindOrCreateRoot(); // Root now has a lock and a ref execInfo.hiddenRoot = jsvObjectGetChild(execInfo.root, JS_HIDDEN_CHAR_STR, JSV_OBJECT); execInfo.execute = EXEC_YES; } void jspSoftKill() { jsvUnLock(execInfo.scopesVar); execInfo.scopesVar = 0; jsvUnLock(execInfo.hiddenRoot); execInfo.hiddenRoot = 0; jsvUnLock(execInfo.root); execInfo.root = 0; // Root is now left with just a ref } void jspInit() { jspSoftInit(); } void jspKill() { jspSoftKill(); // Unreffing this should completely kill everything attached to root JsVar *r = jsvFindOrCreateRoot(); jsvUnRef(r); jsvUnLock(r); } /** Evaluate the given variable as an expression (in current scope) */ JsVar *jspEvaluateExpressionVar(JsVar *str) { JsLex lex; assert(jsvIsString(str)); JsLex *oldLex = jslSetLex(&lex); jslInit(str); #ifndef ESPR_NO_LINE_NUMBERS lex.lineNumberOffset = oldLex->lineNumberOffset; #endif // actually do the parsing JsVar *v = jspeExpression(); jslKill(); jslSetLex(oldLex); return jsvSkipNameAndUnLock(v); } /** Execute code form a variable and return the result. If lineNumberOffset * is nonzero it's added to the line numbers that get reported for errors/debug */ JsVar *jspEvaluateVar(JsVar *str, JsVar *scope, uint16_t lineNumberOffset) { JsLex lex; assert(jsvIsString(str)); JsLex *oldLex = jslSetLex(&lex); jslInit(str); #ifndef ESPR_NO_LINE_NUMBERS lex.lineNumberOffset = lineNumberOffset; #endif JsExecInfo oldExecInfo = execInfo; execInfo.execute = EXEC_YES; if (scope) { // if we're adding a scope, make sure it's the *only* scope execInfo.scopesVar = 0; if (scope!=execInfo.root) jspeiAddScope(scope); // it's searched by default anyway } // actually do the parsing JsVar *v = jspParse(); // clean up if (scope) jspeiClearScopes(); jslKill(); jslSetLex(oldLex); // restore state and execInfo (keep error flags & ctrl-c) oldExecInfo.execute |= execInfo.execute & EXEC_PERSIST; execInfo = oldExecInfo; // It may have returned a reference, but we just want the value... return jsvSkipNameAndUnLock(v); } JsVar *jspEvaluate(const char *str, bool stringIsStatic) { /* using a memory area is more efficient, but the interpreter * may use substrings from it for function code. This means that * if the string goes away, everything gets corrupted - hence * the option here. */ JsVar *evCode; if (stringIsStatic) evCode = jsvNewNativeString((char*)str, strlen(str)); else evCode = jsvNewFromString(str); if (!evCode) return 0; JsVar *v = 0; if (!jsvIsMemoryFull()) v = jspEvaluateVar(evCode, 0, 0); jsvUnLock(evCode); return v; } JsVar *jspExecuteJSFunction(const char *jsCode, JsVar *thisArg, int argCount, JsVar **argPtr) { JsVar *fn = jspEvaluate(jsCode,true); JsVar *result = jspExecuteFunction(fn,thisArg,argCount,argPtr); jsvUnLock(fn); return result; } JsVar *jspExecuteFunction(JsVar *func, JsVar *thisArg, int argCount, JsVar **argPtr) { JsExecInfo oldExecInfo = execInfo; execInfo.scopesVar = 0; execInfo.execute = EXEC_YES; execInfo.thisVar = 0; JsVar *result = jspeFunctionCall(func, 0, thisArg, false, argCount, argPtr); // clean up jspeiClearScopes(); // restore state and execInfo (keep error flags & ctrl-c) oldExecInfo.execute |= execInfo.execute&EXEC_PERSIST; jspeiClearScopes(); execInfo = oldExecInfo; return result; } /// Evaluate a JavaScript module and return its exports JsVar *jspEvaluateModule(JsVar *moduleContents) { assert(jsvIsString(moduleContents) || jsvIsFunction(moduleContents)); if (jsvIsFunction(moduleContents)) { moduleContents = jsvObjectGetChild(moduleContents,JSPARSE_FUNCTION_CODE_NAME,0); if (!jsvIsString(moduleContents)) { jsvUnLock(moduleContents); return 0; } } else jsvLockAgain(moduleContents); JsVar *scope = jsvNewObject(); JsVar *scopeExports = jsvNewObject(); if (!scope || !scopeExports) { // out of mem jsvUnLock3(scope, scopeExports, moduleContents); return 0; } JsVar *exportsName = jsvAddNamedChild(scope, scopeExports, "exports"); jsvUnLock2(scopeExports, jsvAddNamedChild(scope, scope, "module")); JsExecFlags oldExecute = execInfo.execute; JsVar *oldThisVar = execInfo.thisVar; execInfo.thisVar = scopeExports; // set 'this' variable to exports jsvUnLock(jspEvaluateVar(moduleContents, scope, 0)); execInfo.thisVar = oldThisVar; execInfo.execute = oldExecute; // make sure we fully restore state after parsing a module jsvUnLock2(moduleContents, scope); return jsvSkipNameAndUnLock(exportsName); } /** Get the owner of the current prototype. We assume that it's * the first item in the array, because that's what we will * have added when we created it. It's safe to call this on * non-prototypes and non-objects. */ JsVar *jspGetPrototypeOwner(JsVar *proto) { if (jsvIsObject(proto) || jsvIsArray(proto)) { return jsvSkipNameAndUnLock(jsvObjectGetChild(proto, JSPARSE_CONSTRUCTOR_VAR, 0)); } return 0; }
static NO_INLINE JsVar *jspGetNamedFieldInParents(JsVar *object, const char* name, bool returnName) { // Now look in prototypes JsVar * child = jspeiFindChildFromStringInParents(object, name); /* Check for builtins via separate function * This way we save on RAM for built-ins because everything comes out of program code */ if (!child) { child = jswFindBuiltInFunction(object, name); } /* We didn't get here if we found a child in the object itself, so * if we're here then we probably have the wrong name - so for example * with `a.b = c;` could end up setting `a.prototype.b` (bug #360) * * Also we might have got a built-in, which wouldn't have a name on it * anyway - so in both cases, strip the name if it is there, and create * a new name that references the object we actually requested the * member from.. */ if (child && returnName) { // Get rid of existing name if (jsvIsName(child)) { JsVar *t = jsvGetValueOfName(child); jsvUnLock(child); child = t; } // create a new name JsVar *nameVar = jsvNewFromString(name); JsVar *newChild = jsvCreateNewChild(object, nameVar, child); jsvUnLock2(nameVar, child); child = newChild; } // If not found and is the prototype, create it if (!child) { if (jsvIsFunction(object) && strcmp(name, JSPARSE_PROTOTYPE_VAR)==0) { // prototype is supposed to be an object JsVar *proto = jsvNewObject(); // make sure it has a 'constructor' variable that points to the object it was part of jsvObjectSetChild(proto, JSPARSE_CONSTRUCTOR_VAR, object); child = jsvAddNamedChild(object, proto, JSPARSE_PROTOTYPE_VAR); jspEnsureIsPrototype(object, child); jsvUnLock(proto); } else if (strcmp(name, JSPARSE_INHERITS_VAR)==0) { const char *objName = jswGetBasicObjectName(object); if (objName) { child = jspNewPrototype(objName); } } } return child; }
static NO_INLINE JsVar *jspGetNamedFieldInParents(JsVar *object, const char* name, bool returnName) { // Now look in prototypes JsVar * child = jspeiFindChildFromStringInParents(object, name); /* Check for builtins via separate function * This way we save on RAM for built-ins because everything comes out of program code */ if (!child) { child = jswFindBuiltInFunction(object, name); } /* We didn't get here if we found a child in the object itself, so * if we're here then we probably have the wrong name - so for example * with `a.b = c;` could end up setting `a.prototype.b` (bug #360) * * Also we might have got a built-in, which wouldn't have a name on it * anyway - so in both cases, strip the name if it is there, and create * a new name that references the object we actually requested the * member from.. */ if (child && returnName) { // Get rid of existing name if (jsvIsName(child)) { JsVar *t = jsvGetValueOfName(child); jsvUnLock(child); child = t; } // create a new name JsVar *nameVar = jsvNewFromString(name); JsVar *newChild = jsvCreateNewChild(object, nameVar, child); jsvUnLock2(nameVar, child); child = newChild; } // If not found and is the prototype, create it if (!child) { if (jsvIsFunction(object) && strcmp(name, JSPARSE_PROTOTYPE_VAR)==0) { // prototype is supposed to be an object JsVar *proto = jsvNewObject(); // make sure it has a 'constructor' variable that points to the object it was part of jsvObjectSetChild(proto, JSPARSE_CONSTRUCTOR_VAR, object); child = jsvAddNamedChild(object, proto, JSPARSE_PROTOTYPE_VAR); jspEnsureIsPrototype(object, child); jsvUnLock(proto); } else if (strcmp(name, JSPARSE_INHERITS_VAR)==0) { const char *objName = jswGetBasicObjectName(object); if (objName) { JsVar *p = jsvSkipNameAndUnLock(jspNewPrototype(objName)); // jspNewPrototype returns a 'prototype' name that's already a child of eg. an array // Create a new 'name' called __proto__ that links to it JsVar *i = jsvNewFromString(JSPARSE_INHERITS_VAR); if (p) child = jsvCreateNewChild(object, i, p); jsvUnLock(i); } } } return child; }
{'added': [(955, ' JsVar *p = jsvSkipNameAndUnLock(jspNewPrototype(objName));'), (956, " // jspNewPrototype returns a 'prototype' name that's already a child of eg. an array"), (957, " // Create a new 'name' called __proto__ that links to it"), (958, ' JsVar *i = jsvNewFromString(JSPARSE_INHERITS_VAR);'), (959, ' if (p) child = jsvCreateNewChild(object, i, p);'), (960, ' jsvUnLock(i);'), (1380, '#ifdef DEBUG'), (1381, ' if (jsvHasChildren(parent)) assert(jsvIsChild(parent, a));'), (1382, '#endif'), (1383, ' if (jsvHasChildren(parent) && jsvIsChild(parent, a)) {'), (1385, " /* we use jsvIsChild here just in case. delete probably isn't called"), (1386, ' that often so it pays to be safe */'), (2861, "/// Create a new Class of the given instance and return its prototype (as a name 'prototype')")], 'deleted': [(955, ' child = jspNewPrototype(objName);'), (1375, ' if (jsvHasChildren(parent)) {'), (2851, '/// Create a new Class of the given instance and return its prototype')]}
13
3
2,428
16,105
32
216
10
https://github.com/espruino/Espruino
CVE-2022-25044
CWE-787
3,206
gstasfdemux.c
C
gst_asf_demux_process_ext_content_desc
/* GStreamer ASF/WMV/WMA demuxer * Copyright (C) 1999 Erik Walthinsen <omega@cse.ogi.edu> * Copyright (C) 2006-2009 Tim-Philipp Müller <tim centricular net> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, * Boston, MA 02110-1301, USA. */ /* TODO: * * - _loop(): * stop if at end of segment if != end of file, ie. demux->segment.stop * * - fix packet parsing: * there's something wrong with timestamps for packets with keyframes, * and durations too. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <gst/gstutils.h> #include <gst/base/gstbytereader.h> #include <gst/base/gsttypefindhelper.h> #include <gst/riff/riff-media.h> #include <gst/tag/tag.h> #include <gst/gst-i18n-plugin.h> #include <gst/video/video.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include "gstasfdemux.h" #include "asfheaders.h" #include "asfpacket.h" static GstStaticPadTemplate gst_asf_demux_sink_template = GST_STATIC_PAD_TEMPLATE ("sink", GST_PAD_SINK, GST_PAD_ALWAYS, GST_STATIC_CAPS ("video/x-ms-asf") ); static GstStaticPadTemplate audio_src_template = GST_STATIC_PAD_TEMPLATE ("audio_%u", GST_PAD_SRC, GST_PAD_SOMETIMES, GST_STATIC_CAPS_ANY); static GstStaticPadTemplate video_src_template = GST_STATIC_PAD_TEMPLATE ("video_%u", GST_PAD_SRC, GST_PAD_SOMETIMES, GST_STATIC_CAPS_ANY); /* size of an ASF object header, ie. GUID (16 bytes) + object size (8 bytes) */ #define ASF_OBJECT_HEADER_SIZE (16+8) /* FIXME: get rid of this */ /* abuse this GstFlowReturn enum for internal usage */ #define ASF_FLOW_NEED_MORE_DATA 99 #define gst_asf_get_flow_name(flow) \ (flow == ASF_FLOW_NEED_MORE_DATA) ? \ "need-more-data" : gst_flow_get_name (flow) GST_DEBUG_CATEGORY (asfdemux_dbg); static GstStateChangeReturn gst_asf_demux_change_state (GstElement * element, GstStateChange transition); static gboolean gst_asf_demux_element_send_event (GstElement * element, GstEvent * event); static gboolean gst_asf_demux_send_event_unlocked (GstASFDemux * demux, GstEvent * event); static gboolean gst_asf_demux_handle_src_query (GstPad * pad, GstObject * parent, GstQuery * query); static GstFlowReturn gst_asf_demux_chain (GstPad * pad, GstObject * parent, GstBuffer * buf); static gboolean gst_asf_demux_sink_event (GstPad * pad, GstObject * parent, GstEvent * event); static GstFlowReturn gst_asf_demux_process_object (GstASFDemux * demux, guint8 ** p_data, guint64 * p_size); static gboolean gst_asf_demux_activate (GstPad * sinkpad, GstObject * parent); static gboolean gst_asf_demux_activate_mode (GstPad * sinkpad, GstObject * parent, GstPadMode mode, gboolean active); static void gst_asf_demux_loop (GstASFDemux * demux); static void gst_asf_demux_process_queued_extended_stream_objects (GstASFDemux * demux); static gboolean gst_asf_demux_pull_headers (GstASFDemux * demux, GstFlowReturn * pflow); static GstFlowReturn gst_asf_demux_pull_indices (GstASFDemux * demux); static void gst_asf_demux_reset_stream_state_after_discont (GstASFDemux * asf); static gboolean gst_asf_demux_parse_data_object_start (GstASFDemux * demux, guint8 * data); static void gst_asf_demux_descramble_buffer (GstASFDemux * demux, AsfStream * stream, GstBuffer ** p_buffer); static void gst_asf_demux_activate_stream (GstASFDemux * demux, AsfStream * stream); static GstStructure *gst_asf_demux_get_metadata_for_stream (GstASFDemux * d, guint stream_num); static GstFlowReturn gst_asf_demux_push_complete_payloads (GstASFDemux * demux, gboolean force); #define gst_asf_demux_parent_class parent_class G_DEFINE_TYPE (GstASFDemux, gst_asf_demux, GST_TYPE_ELEMENT); static void gst_asf_demux_class_init (GstASFDemuxClass * klass) { GstElementClass *gstelement_class; gstelement_class = (GstElementClass *) klass; gst_element_class_set_static_metadata (gstelement_class, "ASF Demuxer", "Codec/Demuxer", "Demultiplexes ASF Streams", "Owen Fraser-Green <owen@discobabe.net>"); gst_element_class_add_static_pad_template (gstelement_class, &audio_src_template); gst_element_class_add_static_pad_template (gstelement_class, &video_src_template); gst_element_class_add_static_pad_template (gstelement_class, &gst_asf_demux_sink_template); gstelement_class->change_state = GST_DEBUG_FUNCPTR (gst_asf_demux_change_state); gstelement_class->send_event = GST_DEBUG_FUNCPTR (gst_asf_demux_element_send_event); } static void gst_asf_demux_free_stream (GstASFDemux * demux, AsfStream * stream) { gst_caps_replace (&stream->caps, NULL); if (stream->pending_tags) { gst_tag_list_unref (stream->pending_tags); stream->pending_tags = NULL; } if (stream->streamheader) { gst_buffer_unref (stream->streamheader); stream->streamheader = NULL; } if (stream->pad) { if (stream->active) { gst_element_remove_pad (GST_ELEMENT_CAST (demux), stream->pad); gst_flow_combiner_remove_pad (demux->flowcombiner, stream->pad); } else gst_object_unref (stream->pad); stream->pad = NULL; } if (stream->payloads) { while (stream->payloads->len > 0) { AsfPayload *payload; guint last; last = stream->payloads->len - 1; payload = &g_array_index (stream->payloads, AsfPayload, last); gst_buffer_replace (&payload->buf, NULL); g_array_remove_index (stream->payloads, last); } g_array_free (stream->payloads, TRUE); stream->payloads = NULL; } if (stream->payloads_rev) { while (stream->payloads_rev->len > 0) { AsfPayload *payload; guint last; last = stream->payloads_rev->len - 1; payload = &g_array_index (stream->payloads_rev, AsfPayload, last); gst_buffer_replace (&payload->buf, NULL); g_array_remove_index (stream->payloads_rev, last); } g_array_free (stream->payloads_rev, TRUE); stream->payloads_rev = NULL; } if (stream->ext_props.valid) { g_free (stream->ext_props.payload_extensions); stream->ext_props.payload_extensions = NULL; } } static void gst_asf_demux_reset (GstASFDemux * demux, gboolean chain_reset) { GST_LOG_OBJECT (demux, "resetting"); gst_segment_init (&demux->segment, GST_FORMAT_UNDEFINED); demux->segment_running = FALSE; if (demux->adapter && !chain_reset) { gst_adapter_clear (demux->adapter); g_object_unref (demux->adapter); demux->adapter = NULL; } if (demux->taglist) { gst_tag_list_unref (demux->taglist); demux->taglist = NULL; } if (demux->metadata) { gst_caps_unref (demux->metadata); demux->metadata = NULL; } if (demux->global_metadata) { gst_structure_free (demux->global_metadata); demux->global_metadata = NULL; } if (demux->mut_ex_streams) { g_slist_free (demux->mut_ex_streams); demux->mut_ex_streams = NULL; } demux->state = GST_ASF_DEMUX_STATE_HEADER; g_free (demux->objpath); demux->objpath = NULL; g_strfreev (demux->languages); demux->languages = NULL; demux->num_languages = 0; g_slist_foreach (demux->ext_stream_props, (GFunc) gst_mini_object_unref, NULL); g_slist_free (demux->ext_stream_props); demux->ext_stream_props = NULL; while (demux->old_num_streams > 0) { gst_asf_demux_free_stream (demux, &demux->old_stream[demux->old_num_streams - 1]); --demux->old_num_streams; } memset (demux->old_stream, 0, sizeof (demux->old_stream)); demux->old_num_streams = 0; /* when resetting for a new chained asf, we don't want to remove the pads * before adding the new ones */ if (chain_reset) { memcpy (demux->old_stream, demux->stream, sizeof (demux->stream)); demux->old_num_streams = demux->num_streams; demux->num_streams = 0; } while (demux->num_streams > 0) { gst_asf_demux_free_stream (demux, &demux->stream[demux->num_streams - 1]); --demux->num_streams; } memset (demux->stream, 0, sizeof (demux->stream)); if (!chain_reset) { /* do not remove those for not adding pads with same name */ demux->num_audio_streams = 0; demux->num_video_streams = 0; demux->have_group_id = FALSE; demux->group_id = G_MAXUINT; } demux->num_streams = 0; demux->activated_streams = FALSE; demux->first_ts = GST_CLOCK_TIME_NONE; demux->segment_ts = GST_CLOCK_TIME_NONE; demux->in_gap = 0; if (!chain_reset) gst_segment_init (&demux->in_segment, GST_FORMAT_UNDEFINED); demux->state = GST_ASF_DEMUX_STATE_HEADER; demux->seekable = FALSE; demux->broadcast = FALSE; demux->sidx_interval = 0; demux->sidx_num_entries = 0; g_free (demux->sidx_entries); demux->sidx_entries = NULL; demux->speed_packets = 1; demux->asf_3D_mode = GST_ASF_3D_NONE; if (chain_reset) { GST_LOG_OBJECT (demux, "Restarting"); gst_segment_init (&demux->segment, GST_FORMAT_TIME); demux->need_newsegment = TRUE; demux->segment_seqnum = 0; demux->segment_running = FALSE; demux->keyunit_sync = FALSE; demux->accurate = FALSE; demux->metadata = gst_caps_new_empty (); demux->global_metadata = gst_structure_new_empty ("metadata"); demux->data_size = 0; demux->data_offset = 0; demux->index_offset = 0; } else { demux->base_offset = 0; } g_slist_free (demux->other_streams); demux->other_streams = NULL; } static void gst_asf_demux_init (GstASFDemux * demux) { demux->sinkpad = gst_pad_new_from_static_template (&gst_asf_demux_sink_template, "sink"); gst_pad_set_chain_function (demux->sinkpad, GST_DEBUG_FUNCPTR (gst_asf_demux_chain)); gst_pad_set_event_function (demux->sinkpad, GST_DEBUG_FUNCPTR (gst_asf_demux_sink_event)); gst_pad_set_activate_function (demux->sinkpad, GST_DEBUG_FUNCPTR (gst_asf_demux_activate)); gst_pad_set_activatemode_function (demux->sinkpad, GST_DEBUG_FUNCPTR (gst_asf_demux_activate_mode)); gst_element_add_pad (GST_ELEMENT (demux), demux->sinkpad); /* set initial state */ gst_asf_demux_reset (demux, FALSE); } static gboolean gst_asf_demux_activate (GstPad * sinkpad, GstObject * parent) { GstQuery *query; gboolean pull_mode; query = gst_query_new_scheduling (); if (!gst_pad_peer_query (sinkpad, query)) { gst_query_unref (query); goto activate_push; } pull_mode = gst_query_has_scheduling_mode_with_flags (query, GST_PAD_MODE_PULL, GST_SCHEDULING_FLAG_SEEKABLE); gst_query_unref (query); if (!pull_mode) goto activate_push; GST_DEBUG_OBJECT (sinkpad, "activating pull"); return gst_pad_activate_mode (sinkpad, GST_PAD_MODE_PULL, TRUE); activate_push: { GST_DEBUG_OBJECT (sinkpad, "activating push"); return gst_pad_activate_mode (sinkpad, GST_PAD_MODE_PUSH, TRUE); } } static gboolean gst_asf_demux_activate_mode (GstPad * sinkpad, GstObject * parent, GstPadMode mode, gboolean active) { gboolean res; GstASFDemux *demux; demux = GST_ASF_DEMUX (parent); switch (mode) { case GST_PAD_MODE_PUSH: demux->state = GST_ASF_DEMUX_STATE_HEADER; demux->streaming = TRUE; res = TRUE; break; case GST_PAD_MODE_PULL: if (active) { demux->state = GST_ASF_DEMUX_STATE_HEADER; demux->streaming = FALSE; res = gst_pad_start_task (sinkpad, (GstTaskFunction) gst_asf_demux_loop, demux, NULL); } else { res = gst_pad_stop_task (sinkpad); } break; default: res = FALSE; break; } return res; } static gboolean gst_asf_demux_sink_event (GstPad * pad, GstObject * parent, GstEvent * event) { GstASFDemux *demux; gboolean ret = TRUE; demux = GST_ASF_DEMUX (parent); GST_LOG_OBJECT (demux, "handling %s event", GST_EVENT_TYPE_NAME (event)); switch (GST_EVENT_TYPE (event)) { case GST_EVENT_SEGMENT:{ const GstSegment *segment; gst_event_parse_segment (event, &segment); if (segment->format == GST_FORMAT_BYTES) { if (demux->packet_size && segment->start > demux->data_offset) demux->packet = (segment->start - demux->data_offset) / demux->packet_size; else demux->packet = 0; } else if (segment->format == GST_FORMAT_TIME) { /* do not know packet position, not really a problem */ demux->packet = -1; } else { GST_WARNING_OBJECT (demux, "unsupported newsegment format, ignoring"); gst_event_unref (event); break; } /* record upstream segment for interpolation */ if (segment->format != demux->in_segment.format) gst_segment_init (&demux->in_segment, GST_FORMAT_UNDEFINED); gst_segment_copy_into (segment, &demux->in_segment); /* in either case, clear some state and generate newsegment later on */ GST_OBJECT_LOCK (demux); demux->segment_ts = GST_CLOCK_TIME_NONE; demux->in_gap = GST_CLOCK_TIME_NONE; demux->need_newsegment = TRUE; demux->segment_seqnum = gst_event_get_seqnum (event); gst_asf_demux_reset_stream_state_after_discont (demux); /* if we seek back after reaching EOS, go back to packet reading state */ if (demux->data_offset > 0 && segment->start >= demux->data_offset && demux->state == GST_ASF_DEMUX_STATE_INDEX) { demux->state = GST_ASF_DEMUX_STATE_DATA; } GST_OBJECT_UNLOCK (demux); gst_event_unref (event); break; } case GST_EVENT_EOS:{ GstFlowReturn flow; if (demux->state == GST_ASF_DEMUX_STATE_HEADER) { GST_ELEMENT_ERROR (demux, STREAM, DEMUX, (_("This stream contains no data.")), ("got eos and didn't receive a complete header object")); break; } flow = gst_asf_demux_push_complete_payloads (demux, TRUE); if (!demux->activated_streams) { /* If we still haven't got activated streams, the file is most likely corrupt */ GST_ELEMENT_ERROR (demux, STREAM, WRONG_TYPE, (_("This stream contains no data.")), ("got eos and didn't receive a complete header object")); break; } if (flow < GST_FLOW_EOS || flow == GST_FLOW_NOT_LINKED) { GST_ELEMENT_FLOW_ERROR (demux, flow); break; } GST_OBJECT_LOCK (demux); gst_adapter_clear (demux->adapter); GST_OBJECT_UNLOCK (demux); gst_asf_demux_send_event_unlocked (demux, event); break; } case GST_EVENT_FLUSH_STOP: GST_OBJECT_LOCK (demux); gst_asf_demux_reset_stream_state_after_discont (demux); GST_OBJECT_UNLOCK (demux); gst_asf_demux_send_event_unlocked (demux, event); /* upon activation, latency is no longer introduced, e.g. after seek */ if (demux->activated_streams) demux->latency = 0; break; default: ret = gst_pad_event_default (pad, parent, event); break; } return ret; } static gboolean gst_asf_demux_seek_index_lookup (GstASFDemux * demux, guint * packet, GstClockTime seek_time, GstClockTime * p_idx_time, guint * speed, gboolean next, gboolean * eos) { GstClockTime idx_time; guint idx; if (eos) *eos = FALSE; if (G_UNLIKELY (demux->sidx_num_entries == 0 || demux->sidx_interval == 0)) return FALSE; idx = (guint) ((seek_time + demux->preroll) / demux->sidx_interval); if (next) { /* if we want the next keyframe, we have to go forward till we find a different packet number */ guint idx2; if (idx >= demux->sidx_num_entries - 1) { /* If we get here, we're asking for next keyframe after the last one. There isn't one. */ if (eos) *eos = TRUE; return FALSE; } for (idx2 = idx + 1; idx2 < demux->sidx_num_entries; ++idx2) { if (demux->sidx_entries[idx].packet != demux->sidx_entries[idx2].packet) { idx = idx2; break; } } } if (G_UNLIKELY (idx >= demux->sidx_num_entries)) { if (eos) *eos = TRUE; return FALSE; } *packet = demux->sidx_entries[idx].packet; if (speed) *speed = demux->sidx_entries[idx].count; /* so we get closer to the actual time of the packet ... actually, let's not * do this, since we throw away superfluous payloads before the seek position * anyway; this way, our key unit seek 'snap resolution' is a bit better * (ie. same as index resolution) */ /* while (idx > 0 && demux->sidx_entries[idx-1] == demux->sidx_entries[idx]) --idx; */ idx_time = demux->sidx_interval * idx; if (G_LIKELY (idx_time >= demux->preroll)) idx_time -= demux->preroll; GST_DEBUG_OBJECT (demux, "%" GST_TIME_FORMAT " => packet %u at %" GST_TIME_FORMAT, GST_TIME_ARGS (seek_time), *packet, GST_TIME_ARGS (idx_time)); if (G_LIKELY (p_idx_time)) *p_idx_time = idx_time; return TRUE; } static void gst_asf_demux_reset_stream_state_after_discont (GstASFDemux * demux) { guint n; gst_adapter_clear (demux->adapter); GST_DEBUG_OBJECT (demux, "reset stream state"); gst_flow_combiner_reset (demux->flowcombiner); for (n = 0; n < demux->num_streams; n++) { demux->stream[n].discont = TRUE; demux->stream[n].first_buffer = TRUE; while (demux->stream[n].payloads->len > 0) { AsfPayload *payload; guint last; last = demux->stream[n].payloads->len - 1; payload = &g_array_index (demux->stream[n].payloads, AsfPayload, last); gst_buffer_replace (&payload->buf, NULL); g_array_remove_index (demux->stream[n].payloads, last); } } } static void gst_asf_demux_mark_discont (GstASFDemux * demux) { guint n; GST_DEBUG_OBJECT (demux, "Mark stream discont"); for (n = 0; n < demux->num_streams; n++) demux->stream[n].discont = TRUE; } /* do a seek in push based mode */ static gboolean gst_asf_demux_handle_seek_push (GstASFDemux * demux, GstEvent * event) { gdouble rate; GstFormat format; GstSeekFlags flags; GstSeekType cur_type, stop_type; gint64 cur, stop; guint packet; gboolean res; GstEvent *byte_event; gst_event_parse_seek (event, &rate, &format, &flags, &cur_type, &cur, &stop_type, &stop); stop_type = GST_SEEK_TYPE_NONE; stop = -1; GST_DEBUG_OBJECT (demux, "seeking to %" GST_TIME_FORMAT, GST_TIME_ARGS (cur)); /* determine packet, by index or by estimation */ if (!gst_asf_demux_seek_index_lookup (demux, &packet, cur, NULL, NULL, FALSE, NULL)) { packet = (guint) gst_util_uint64_scale (demux->num_packets, cur, demux->play_time); } if (packet > demux->num_packets) { GST_DEBUG_OBJECT (demux, "could not determine packet to seek to, " "seek aborted."); return FALSE; } GST_DEBUG_OBJECT (demux, "seeking to packet %d", packet); cur = demux->data_offset + ((guint64) packet * demux->packet_size); GST_DEBUG_OBJECT (demux, "Pushing BYTE seek rate %g, " "start %" G_GINT64_FORMAT ", stop %" G_GINT64_FORMAT, rate, cur, stop); /* BYTE seek event */ byte_event = gst_event_new_seek (rate, GST_FORMAT_BYTES, flags, cur_type, cur, stop_type, stop); gst_event_set_seqnum (byte_event, gst_event_get_seqnum (event)); res = gst_pad_push_event (demux->sinkpad, byte_event); return res; } static gboolean gst_asf_demux_handle_seek_event (GstASFDemux * demux, GstEvent * event) { GstClockTime idx_time; GstSegment segment; GstSeekFlags flags; GstSeekType cur_type, stop_type; GstFormat format; gboolean only_need_update; gboolean after, before, next; gboolean flush; gdouble rate; gint64 cur, stop; gint64 seek_time; guint packet, speed_count = 1; gboolean eos; guint32 seqnum; GstEvent *fevent; gint i; gst_event_parse_seek (event, &rate, &format, &flags, &cur_type, &cur, &stop_type, &stop); if (G_UNLIKELY (format != GST_FORMAT_TIME)) { GST_LOG_OBJECT (demux, "seeking is only supported in TIME format"); return FALSE; } /* upstream might handle TIME seek, e.g. mms or rtsp, or not, e.g. http, * so first try to let it handle the seek event. */ if (gst_pad_push_event (demux->sinkpad, gst_event_ref (event))) return TRUE; if (G_UNLIKELY (demux->seekable == FALSE || demux->packet_size == 0 || demux->num_packets == 0 || demux->play_time == 0)) { GST_LOG_OBJECT (demux, "stream is not seekable"); return FALSE; } if (G_UNLIKELY (!demux->activated_streams)) { GST_LOG_OBJECT (demux, "streams not yet activated, ignoring seek"); return FALSE; } if (G_UNLIKELY (rate <= 0.0)) { GST_LOG_OBJECT (demux, "backward playback"); demux->seek_to_cur_pos = TRUE; for (i = 0; i < demux->num_streams; i++) { demux->stream[i].reverse_kf_ready = FALSE; } } seqnum = gst_event_get_seqnum (event); flush = ((flags & GST_SEEK_FLAG_FLUSH) == GST_SEEK_FLAG_FLUSH); demux->accurate = ((flags & GST_SEEK_FLAG_ACCURATE) == GST_SEEK_FLAG_ACCURATE); demux->keyunit_sync = ((flags & GST_SEEK_FLAG_KEY_UNIT) == GST_SEEK_FLAG_KEY_UNIT); after = ((flags & GST_SEEK_FLAG_SNAP_AFTER) == GST_SEEK_FLAG_SNAP_AFTER); before = ((flags & GST_SEEK_FLAG_SNAP_BEFORE) == GST_SEEK_FLAG_SNAP_BEFORE); next = after && !before; if (G_UNLIKELY (demux->streaming)) { /* support it safely needs more segment handling, e.g. closing etc */ if (!flush) { GST_LOG_OBJECT (demux, "streaming; non-flushing seek not supported"); return FALSE; } /* we can (re)construct the start later on, but not the end */ if (stop_type != GST_SEEK_TYPE_NONE && (stop_type != GST_SEEK_TYPE_SET || GST_CLOCK_TIME_IS_VALID (stop))) { GST_LOG_OBJECT (demux, "streaming; end position must be NONE"); return FALSE; } return gst_asf_demux_handle_seek_push (demux, event); } /* unlock the streaming thread */ if (G_LIKELY (flush)) { fevent = gst_event_new_flush_start (); gst_event_set_seqnum (fevent, seqnum); gst_pad_push_event (demux->sinkpad, gst_event_ref (fevent)); gst_asf_demux_send_event_unlocked (demux, fevent); } else { gst_pad_pause_task (demux->sinkpad); } /* grab the stream lock so that streaming cannot continue, for * non flushing seeks when the element is in PAUSED this could block * forever */ GST_PAD_STREAM_LOCK (demux->sinkpad); /* we now can stop flushing, since we have the stream lock now */ fevent = gst_event_new_flush_stop (TRUE); gst_event_set_seqnum (fevent, seqnum); gst_pad_push_event (demux->sinkpad, gst_event_ref (fevent)); if (G_LIKELY (flush)) gst_asf_demux_send_event_unlocked (demux, fevent); else gst_event_unref (fevent); /* operating on copy of segment until we know the seek worked */ segment = demux->segment; if (G_UNLIKELY (demux->segment_running && !flush)) { GstSegment newsegment; GstEvent *newseg; /* create the segment event to close the current segment */ gst_segment_copy_into (&segment, &newsegment); newseg = gst_event_new_segment (&newsegment); gst_event_set_seqnum (newseg, seqnum); gst_asf_demux_send_event_unlocked (demux, newseg); } gst_segment_do_seek (&segment, rate, format, flags, cur_type, cur, stop_type, stop, &only_need_update); GST_DEBUG_OBJECT (demux, "seeking to time %" GST_TIME_FORMAT ", segment: " "%" GST_SEGMENT_FORMAT, GST_TIME_ARGS (segment.start), &segment); if (cur_type != GST_SEEK_TYPE_SET) seek_time = segment.start; else seek_time = cur; /* FIXME: should check the KEY_UNIT flag; need to adjust position to * real start of data and segment_start to indexed time for key unit seek*/ if (G_UNLIKELY (!gst_asf_demux_seek_index_lookup (demux, &packet, seek_time, &idx_time, &speed_count, next, &eos))) { gint64 offset; if (eos) { demux->packet = demux->num_packets; goto skip; } /* First try to query our source to see if it can convert for us. This is the case when our source is an mms stream, notice that in this case gstmms will do a time based seek to get the byte offset, this is not a problem as the seek to this offset needs to happen anway. */ if (gst_pad_peer_query_convert (demux->sinkpad, GST_FORMAT_TIME, seek_time, GST_FORMAT_BYTES, &offset)) { packet = (offset - demux->data_offset) / demux->packet_size; GST_LOG_OBJECT (demux, "convert %" GST_TIME_FORMAT " to bytes query result: %" G_GINT64_FORMAT ", data_ofset: %" G_GINT64_FORMAT ", packet_size: %u," " resulting packet: %u\n", GST_TIME_ARGS (seek_time), offset, demux->data_offset, demux->packet_size, packet); } else { /* FIXME: For streams containing video, seek to an earlier position in * the hope of hitting a keyframe and let the sinks throw away the stuff * before the segment start. For audio-only this is unnecessary as every * frame is 'key'. */ if (flush && (demux->accurate || (demux->keyunit_sync && !next)) && demux->num_video_streams > 0) { seek_time -= 5 * GST_SECOND; if (seek_time < 0) seek_time = 0; } packet = (guint) gst_util_uint64_scale (demux->num_packets, seek_time, demux->play_time); if (packet > demux->num_packets) packet = demux->num_packets; } } else { if (G_LIKELY (demux->keyunit_sync && !demux->accurate)) { GST_DEBUG_OBJECT (demux, "key unit seek, adjust seek_time = %" GST_TIME_FORMAT " to index_time = %" GST_TIME_FORMAT, GST_TIME_ARGS (seek_time), GST_TIME_ARGS (idx_time)); segment.start = idx_time; segment.position = idx_time; segment.time = idx_time; } } GST_DEBUG_OBJECT (demux, "seeking to packet %u (%d)", packet, speed_count); GST_OBJECT_LOCK (demux); demux->segment = segment; if (GST_ASF_DEMUX_IS_REVERSE_PLAYBACK (demux->segment)) { demux->packet = (gint64) gst_util_uint64_scale (demux->num_packets, stop, demux->play_time); } else { demux->packet = packet; } demux->need_newsegment = TRUE; demux->segment_seqnum = seqnum; demux->speed_packets = GST_ASF_DEMUX_IS_REVERSE_PLAYBACK (demux->segment) ? 1 : speed_count; gst_asf_demux_reset_stream_state_after_discont (demux); GST_OBJECT_UNLOCK (demux); skip: /* restart our task since it might have been stopped when we did the flush */ gst_pad_start_task (demux->sinkpad, (GstTaskFunction) gst_asf_demux_loop, demux, NULL); /* streaming can continue now */ GST_PAD_STREAM_UNLOCK (demux->sinkpad); return TRUE; } static gboolean gst_asf_demux_handle_src_event (GstPad * pad, GstObject * parent, GstEvent * event) { GstASFDemux *demux; gboolean ret; demux = GST_ASF_DEMUX (parent); switch (GST_EVENT_TYPE (event)) { case GST_EVENT_SEEK: GST_LOG_OBJECT (pad, "seek event"); ret = gst_asf_demux_handle_seek_event (demux, event); gst_event_unref (event); break; case GST_EVENT_QOS: case GST_EVENT_NAVIGATION: /* just drop these two silently */ gst_event_unref (event); ret = FALSE; break; default: GST_LOG_OBJECT (pad, "%s event", GST_EVENT_TYPE_NAME (event)); ret = gst_pad_event_default (pad, parent, event); break; } return ret; } static inline guint32 gst_asf_demux_identify_guid (const ASFGuidHash * guids, ASFGuid * guid) { guint32 ret; ret = gst_asf_identify_guid (guids, guid); GST_LOG ("%s 0x%08x-0x%08x-0x%08x-0x%08x", gst_asf_get_guid_nick (guids, ret), guid->v1, guid->v2, guid->v3, guid->v4); return ret; } typedef struct { AsfObjectID id; guint64 size; } AsfObject; /* Peek for an object. * * Returns FALSE is the object is corrupted (such as the reported * object size being greater than 2**32bits. */ static gboolean asf_demux_peek_object (GstASFDemux * demux, const guint8 * data, guint data_len, AsfObject * object, gboolean expect) { ASFGuid guid; /* Callers should have made sure that data_len is big enough */ g_assert (data_len >= ASF_OBJECT_HEADER_SIZE); if (data_len < ASF_OBJECT_HEADER_SIZE) return FALSE; guid.v1 = GST_READ_UINT32_LE (data + 0); guid.v2 = GST_READ_UINT32_LE (data + 4); guid.v3 = GST_READ_UINT32_LE (data + 8); guid.v4 = GST_READ_UINT32_LE (data + 12); /* FIXME: make asf_demux_identify_object_guid() */ object->id = gst_asf_demux_identify_guid (asf_object_guids, &guid); if (object->id == ASF_OBJ_UNDEFINED && expect) { GST_WARNING_OBJECT (demux, "Unknown object %08x-%08x-%08x-%08x", guid.v1, guid.v2, guid.v3, guid.v4); } object->size = GST_READ_UINT64_LE (data + 16); if (object->id != ASF_OBJ_DATA && object->size >= G_MAXUINT) { GST_WARNING_OBJECT (demux, "ASF Object size corrupted (greater than 32bit)"); return FALSE; } return TRUE; } static void gst_asf_demux_release_old_pads (GstASFDemux * demux) { GST_DEBUG_OBJECT (demux, "Releasing old pads"); while (demux->old_num_streams > 0) { gst_pad_push_event (demux->old_stream[demux->old_num_streams - 1].pad, gst_event_new_eos ()); gst_asf_demux_free_stream (demux, &demux->old_stream[demux->old_num_streams - 1]); --demux->old_num_streams; } memset (demux->old_stream, 0, sizeof (demux->old_stream)); demux->old_num_streams = 0; } static GstFlowReturn gst_asf_demux_chain_headers (GstASFDemux * demux) { AsfObject obj; guint8 *header_data, *data = NULL; const guint8 *cdata = NULL; guint64 header_size; GstFlowReturn flow = GST_FLOW_OK; cdata = (guint8 *) gst_adapter_map (demux->adapter, ASF_OBJECT_HEADER_SIZE); if (cdata == NULL) goto need_more_data; if (!asf_demux_peek_object (demux, cdata, ASF_OBJECT_HEADER_SIZE, &obj, TRUE)) goto parse_failed; if (obj.id != ASF_OBJ_HEADER) goto wrong_type; GST_LOG_OBJECT (demux, "header size = %u", (guint) obj.size); /* + 50 for non-packet data at beginning of ASF_OBJ_DATA */ if (gst_adapter_available (demux->adapter) < obj.size + 50) goto need_more_data; data = gst_adapter_take (demux->adapter, obj.size + 50); header_data = data; header_size = obj.size; flow = gst_asf_demux_process_object (demux, &header_data, &header_size); if (flow != GST_FLOW_OK) goto parse_failed; /* calculate where the packet data starts */ demux->data_offset = obj.size + 50; /* now parse the beginning of the ASF_OBJ_DATA object */ if (!gst_asf_demux_parse_data_object_start (demux, data + obj.size)) goto wrong_type; if (demux->num_streams == 0) goto no_streams; g_free (data); return GST_FLOW_OK; /* NON-FATAL */ need_more_data: { GST_LOG_OBJECT (demux, "not enough data in adapter yet"); return GST_FLOW_OK; } /* ERRORS */ wrong_type: { GST_ELEMENT_ERROR (demux, STREAM, WRONG_TYPE, (NULL), ("This doesn't seem to be an ASF file")); g_free (data); return GST_FLOW_ERROR; } no_streams: parse_failed: { GST_ELEMENT_ERROR (demux, STREAM, DEMUX, (NULL), ("header parsing failed, or no streams found, flow = %s", gst_flow_get_name (flow))); g_free (data); return GST_FLOW_ERROR; } } static gboolean gst_asf_demux_pull_data (GstASFDemux * demux, guint64 offset, guint size, GstBuffer ** p_buf, GstFlowReturn * p_flow) { gsize buffer_size; GstFlowReturn flow; GST_LOG_OBJECT (demux, "pulling buffer at %" G_GUINT64_FORMAT "+%u", offset, size); flow = gst_pad_pull_range (demux->sinkpad, offset, size, p_buf); if (G_LIKELY (p_flow)) *p_flow = flow; if (G_UNLIKELY (flow != GST_FLOW_OK)) { GST_DEBUG_OBJECT (demux, "flow %s pulling buffer at %" G_GUINT64_FORMAT "+%u", gst_flow_get_name (flow), offset, size); *p_buf = NULL; return FALSE; } g_assert (*p_buf != NULL); buffer_size = gst_buffer_get_size (*p_buf); if (G_UNLIKELY (buffer_size < size)) { GST_DEBUG_OBJECT (demux, "short read pulling buffer at %" G_GUINT64_FORMAT "+%u (got only %" G_GSIZE_FORMAT " bytes)", offset, size, buffer_size); gst_buffer_unref (*p_buf); if (G_LIKELY (p_flow)) *p_flow = GST_FLOW_EOS; *p_buf = NULL; return FALSE; } return TRUE; } static GstFlowReturn gst_asf_demux_pull_indices (GstASFDemux * demux) { GstBuffer *buf = NULL; guint64 offset; guint num_read = 0; GstFlowReturn ret = GST_FLOW_OK; offset = demux->index_offset; if (G_UNLIKELY (offset == 0)) { GST_DEBUG_OBJECT (demux, "can't read indices, don't know index offset"); /* non-fatal */ return GST_FLOW_OK; } while (gst_asf_demux_pull_data (demux, offset, 16 + 8, &buf, NULL)) { AsfObject obj; GstMapInfo map; guint8 *bufdata; guint64 obj_size; gst_buffer_map (buf, &map, GST_MAP_READ); g_assert (map.size >= 16 + 8); if (!asf_demux_peek_object (demux, map.data, 16 + 8, &obj, TRUE)) { gst_buffer_unmap (buf, &map); gst_buffer_replace (&buf, NULL); ret = GST_FLOW_ERROR; break; } gst_buffer_unmap (buf, &map); gst_buffer_replace (&buf, NULL); /* check for sanity */ if (G_UNLIKELY (obj.size > (5 * 1024 * 1024))) { GST_DEBUG_OBJECT (demux, "implausible index object size, bailing out"); break; } if (G_UNLIKELY (!gst_asf_demux_pull_data (demux, offset, obj.size, &buf, NULL))) break; GST_LOG_OBJECT (demux, "index object at offset 0x%" G_GINT64_MODIFIER "X" ", size %u", offset, (guint) obj.size); offset += obj.size; /* increase before _process_object changes it */ gst_buffer_map (buf, &map, GST_MAP_READ); g_assert (map.size >= obj.size); bufdata = (guint8 *) map.data; obj_size = obj.size; ret = gst_asf_demux_process_object (demux, &bufdata, &obj_size); gst_buffer_unmap (buf, &map); gst_buffer_replace (&buf, NULL); if (G_UNLIKELY (ret != GST_FLOW_OK)) break; ++num_read; } GST_DEBUG_OBJECT (demux, "read %u index objects", num_read); return ret; } static gboolean gst_asf_demux_parse_data_object_start (GstASFDemux * demux, guint8 * data) { AsfObject obj; if (!asf_demux_peek_object (demux, data, 50, &obj, TRUE)) { GST_WARNING_OBJECT (demux, "Corrupted data"); return FALSE; } if (obj.id != ASF_OBJ_DATA) { GST_WARNING_OBJECT (demux, "headers not followed by a DATA object"); return FALSE; } demux->state = GST_ASF_DEMUX_STATE_DATA; if (!demux->broadcast && obj.size > 50) { demux->data_size = obj.size - 50; /* CHECKME: for at least one file this is off by +158 bytes?! */ demux->index_offset = demux->data_offset + demux->data_size; } else { demux->data_size = 0; demux->index_offset = 0; } demux->packet = 0; if (!demux->broadcast) { /* skip object header (24 bytes) and file GUID (16 bytes) */ demux->num_packets = GST_READ_UINT64_LE (data + (16 + 8) + 16); } else { demux->num_packets = 0; } if (demux->num_packets == 0) demux->seekable = FALSE; /* fallback in the unlikely case that headers are inconsistent, can't hurt */ if (demux->data_size == 0 && demux->num_packets > 0) { demux->data_size = demux->num_packets * demux->packet_size; demux->index_offset = demux->data_offset + demux->data_size; } /* process pending stream objects and create pads for those */ gst_asf_demux_process_queued_extended_stream_objects (demux); GST_INFO_OBJECT (demux, "Stream has %" G_GUINT64_FORMAT " packets, " "data_offset=%" G_GINT64_FORMAT ", data_size=%" G_GINT64_FORMAT ", index_offset=%" G_GUINT64_FORMAT, demux->num_packets, demux->data_offset, demux->data_size, demux->index_offset); return TRUE; } static gboolean gst_asf_demux_pull_headers (GstASFDemux * demux, GstFlowReturn * pflow) { GstFlowReturn flow = GST_FLOW_OK; AsfObject obj; GstBuffer *buf = NULL; guint64 size; GstMapInfo map; guint8 *bufdata; GST_LOG_OBJECT (demux, "reading headers"); /* pull HEADER object header, so we know its size */ if (!gst_asf_demux_pull_data (demux, demux->base_offset, 16 + 8, &buf, &flow)) goto read_failed; gst_buffer_map (buf, &map, GST_MAP_READ); g_assert (map.size >= 16 + 8); if (!asf_demux_peek_object (demux, map.data, 16 + 8, &obj, TRUE)) { gst_buffer_unmap (buf, &map); gst_buffer_replace (&buf, NULL); flow = GST_FLOW_ERROR; goto read_failed; } gst_buffer_unmap (buf, &map); gst_buffer_replace (&buf, NULL); if (obj.id != ASF_OBJ_HEADER) goto wrong_type; GST_LOG_OBJECT (demux, "header size = %" G_GUINT64_FORMAT, obj.size); /* pull HEADER object */ if (!gst_asf_demux_pull_data (demux, demux->base_offset, obj.size, &buf, &flow)) goto read_failed; size = obj.size; /* don't want obj.size changed */ gst_buffer_map (buf, &map, GST_MAP_READ); g_assert (map.size >= size); bufdata = (guint8 *) map.data; flow = gst_asf_demux_process_object (demux, &bufdata, &size); gst_buffer_unmap (buf, &map); gst_buffer_replace (&buf, NULL); if (flow != GST_FLOW_OK) { GST_WARNING_OBJECT (demux, "process_object: %s", gst_flow_get_name (flow)); goto parse_failed; } /* calculate where the packet data starts */ demux->data_offset = demux->base_offset + obj.size + 50; /* now pull beginning of DATA object before packet data */ if (!gst_asf_demux_pull_data (demux, demux->base_offset + obj.size, 50, &buf, &flow)) goto read_failed; gst_buffer_map (buf, &map, GST_MAP_READ); g_assert (map.size >= size); bufdata = (guint8 *) map.data; if (!gst_asf_demux_parse_data_object_start (demux, bufdata)) goto wrong_type; if (demux->num_streams == 0) goto no_streams; gst_buffer_unmap (buf, &map); gst_buffer_replace (&buf, NULL); return TRUE; /* ERRORS */ wrong_type: { if (buf != NULL) { gst_buffer_unmap (buf, &map); gst_buffer_replace (&buf, NULL); } GST_ELEMENT_ERROR (demux, STREAM, WRONG_TYPE, (NULL), ("This doesn't seem to be an ASF file")); *pflow = GST_FLOW_ERROR; return FALSE; } no_streams: flow = GST_FLOW_ERROR; GST_ELEMENT_ERROR (demux, STREAM, DEMUX, (NULL), ("header parsing failed, or no streams found, flow = %s", gst_flow_get_name (flow))); read_failed: parse_failed: { if (buf) gst_buffer_unmap (buf, &map); gst_buffer_replace (&buf, NULL); if (flow == ASF_FLOW_NEED_MORE_DATA) flow = GST_FLOW_ERROR; *pflow = flow; return FALSE; } } static gboolean all_streams_prerolled (GstASFDemux * demux) { GstClockTime preroll_time; guint i, num_no_data = 0; /* Allow at least 500ms of preroll_time */ preroll_time = MAX (demux->preroll, 500 * GST_MSECOND); /* returns TRUE as long as there isn't a stream which (a) has data queued * and (b) the timestamp of last piece of data queued is < demux->preroll * AND there is at least one other stream with data queued */ for (i = 0; i < demux->num_streams; ++i) { AsfPayload *last_payload = NULL; AsfStream *stream; gint last_idx; stream = &demux->stream[i]; if (G_UNLIKELY (stream->payloads->len == 0)) { ++num_no_data; GST_LOG_OBJECT (stream->pad, "no data queued"); continue; } /* find last payload with timestamp */ for (last_idx = stream->payloads->len - 1; last_idx >= 0 && (last_payload == NULL || !GST_CLOCK_TIME_IS_VALID (last_payload->ts)); --last_idx) { last_payload = &g_array_index (stream->payloads, AsfPayload, last_idx); } GST_LOG_OBJECT (stream->pad, "checking if %" GST_TIME_FORMAT " > %" GST_TIME_FORMAT, GST_TIME_ARGS (last_payload->ts), GST_TIME_ARGS (preroll_time)); if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (last_payload->ts) || last_payload->ts <= preroll_time)) { GST_LOG_OBJECT (stream->pad, "not beyond preroll point yet"); return FALSE; } } if (G_UNLIKELY (num_no_data > 0)) return FALSE; return TRUE; } #if 0 static gboolean gst_asf_demux_have_mutually_exclusive_active_stream (GstASFDemux * demux, AsfStream * stream) { GSList *l; for (l = demux->mut_ex_streams; l != NULL; l = l->next) { guint8 *mes; /* check for each mutual exclusion group whether it affects this stream */ for (mes = (guint8 *) l->data; mes != NULL && *mes != 0xff; ++mes) { if (*mes == stream->id) { /* we are in this group; let's check if we've already activated streams * that are in the same group (and hence mutually exclusive to this * one) */ for (mes = (guint8 *) l->data; mes != NULL && *mes != 0xff; ++mes) { guint i; for (i = 0; i < demux->num_streams; ++i) { if (demux->stream[i].id == *mes && demux->stream[i].active) { GST_LOG_OBJECT (demux, "stream with ID %d is mutually exclusive " "to already active stream with ID %d", stream->id, demux->stream[i].id); return TRUE; } } } /* we can only be in this group once, let's break out and move on to * the next mutual exclusion group */ break; } } } return FALSE; } #endif static void gst_asf_demux_check_segment_ts (GstASFDemux * demux, GstClockTime payload_ts) { /* remember the first queued timestamp for the segment */ if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (demux->segment_ts) && GST_CLOCK_TIME_IS_VALID (demux->first_ts))) { GST_DEBUG_OBJECT (demux, "segment ts: %" GST_TIME_FORMAT, GST_TIME_ARGS (demux->first_ts)); demux->segment_ts = payload_ts; /* always note, but only determines segment when streaming */ if (demux->streaming) gst_segment_do_seek (&demux->segment, demux->in_segment.rate, GST_FORMAT_TIME, (GstSeekFlags) demux->segment.flags, GST_SEEK_TYPE_SET, demux->segment_ts, GST_SEEK_TYPE_NONE, 0, NULL); } } static gboolean gst_asf_demux_check_first_ts (GstASFDemux * demux, gboolean force) { if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (demux->first_ts))) { GstClockTime first_ts = GST_CLOCK_TIME_NONE; int i; /* go trhough each stream, find smallest timestamp */ for (i = 0; i < demux->num_streams; ++i) { AsfStream *stream; int j; GstClockTime stream_min_ts = GST_CLOCK_TIME_NONE; GstClockTime stream_min_ts2 = GST_CLOCK_TIME_NONE; /* second smallest timestamp */ stream = &demux->stream[i]; for (j = 0; j < stream->payloads->len; ++j) { AsfPayload *payload = &g_array_index (stream->payloads, AsfPayload, j); if (GST_CLOCK_TIME_IS_VALID (payload->ts) && (!GST_CLOCK_TIME_IS_VALID (stream_min_ts) || stream_min_ts > payload->ts)) { stream_min_ts = payload->ts; } if (GST_CLOCK_TIME_IS_VALID (payload->ts) && payload->ts > stream_min_ts && (!GST_CLOCK_TIME_IS_VALID (stream_min_ts2) || stream_min_ts2 > payload->ts)) { stream_min_ts2 = payload->ts; } } /* there are some DVR ms files where first packet has TS of 0 (instead of -1) while subsequent packets have regular (singificantly larger) timestamps. If we don't deal with it, we may end up with huge gap in timestamps which makes playback stuck. The 0 timestamp may also be valid though, if the second packet timestamp continues from it. I havent found a better way to distinguish between these two, except to set an arbitrary boundary and disregard the first 0 timestamp if the second timestamp is bigger than the boundary) */ if (stream_min_ts == 0 && stream_min_ts2 == GST_CLOCK_TIME_NONE && !force) /* still waiting for the second timestamp */ return FALSE; if (stream_min_ts == 0 && stream_min_ts2 > GST_SECOND) /* first timestamp is 0 and second is significantly larger, disregard the 0 */ stream_min_ts = stream_min_ts2; /* if we don't have timestamp for this stream, wait for more data */ if (!GST_CLOCK_TIME_IS_VALID (stream_min_ts) && !force) return FALSE; if (GST_CLOCK_TIME_IS_VALID (stream_min_ts) && (!GST_CLOCK_TIME_IS_VALID (first_ts) || first_ts > stream_min_ts)) first_ts = stream_min_ts; } if (!GST_CLOCK_TIME_IS_VALID (first_ts)) /* can happen with force = TRUE */ first_ts = 0; demux->first_ts = first_ts; /* update packets queued before we knew first timestamp */ for (i = 0; i < demux->num_streams; ++i) { AsfStream *stream; int j; stream = &demux->stream[i]; for (j = 0; j < stream->payloads->len; ++j) { AsfPayload *payload = &g_array_index (stream->payloads, AsfPayload, j); if (GST_CLOCK_TIME_IS_VALID (payload->ts)) { if (payload->ts > first_ts) payload->ts -= first_ts; else payload->ts = 0; } } } } gst_asf_demux_check_segment_ts (demux, 0); return TRUE; } static gboolean gst_asf_demux_update_caps_from_payload (GstASFDemux * demux, AsfStream * stream) { /* try to determine whether the stream is AC-3 or MPEG; In dvr-ms the codecTag is unreliable and often set wrong, inspecting the data is the only way that seem to be working */ GstTypeFindProbability prob = GST_TYPE_FIND_NONE; GstCaps *caps = NULL; int i; GstAdapter *adapter = gst_adapter_new (); for (i = 0; i < stream->payloads->len && prob < GST_TYPE_FIND_LIKELY; ++i) { const guint8 *data; AsfPayload *payload; int len; payload = &g_array_index (stream->payloads, AsfPayload, i); gst_adapter_push (adapter, gst_buffer_ref (payload->buf)); len = gst_adapter_available (adapter); data = gst_adapter_map (adapter, len); again: #define MIN_LENGTH 128 /* look for the sync points */ while (TRUE) { if (len < MIN_LENGTH || /* give typefind something to work on */ (data[0] == 0x0b && data[1] == 0x77) || /* AC-3 sync point */ (data[0] == 0xFF && ((data[1] & 0xF0) >> 4) == 0xF)) /* MPEG sync point */ break; ++data; --len; } gst_caps_take (&caps, gst_type_find_helper_for_data (GST_OBJECT (demux), data, len, &prob)); if (prob < GST_TYPE_FIND_LIKELY) { ++data; --len; if (len > MIN_LENGTH) /* this wasn't it, look for another sync point */ goto again; } gst_adapter_unmap (adapter); } gst_object_unref (adapter); if (caps) { gst_caps_take (&stream->caps, caps); return TRUE; } else { return FALSE; } } static gboolean gst_asf_demux_check_activate_streams (GstASFDemux * demux, gboolean force) { guint i, actual_streams = 0; if (demux->activated_streams) return TRUE; if (G_UNLIKELY (!gst_asf_demux_check_first_ts (demux, force))) return FALSE; if (!all_streams_prerolled (demux) && !force) { GST_DEBUG_OBJECT (demux, "not all streams with data beyond preroll yet"); return FALSE; } for (i = 0; i < demux->num_streams; ++i) { AsfStream *stream = &demux->stream[i]; if (stream->payloads->len > 0) { if (stream->inspect_payload && /* dvr-ms required payload inspection */ !stream->active && /* do not inspect active streams (caps were already set) */ !gst_asf_demux_update_caps_from_payload (demux, stream) && /* failed to determine caps */ stream->payloads->len < 20) { /* if we couldn't determine the caps from 20 packets then just give up and use whatever was in codecTag */ /* try to gather some more data */ return FALSE; } /* we don't check mutual exclusion stuff here; either we have data for * a stream, then we active it, or we don't, then we'll ignore it */ GST_LOG_OBJECT (stream->pad, "is prerolled - activate!"); gst_asf_demux_activate_stream (demux, stream); actual_streams += 1; } else { GST_LOG_OBJECT (stream->pad, "no data, ignoring stream"); } } if (actual_streams == 0) { /* We don't have any streams activated ! */ GST_ERROR_OBJECT (demux, "No streams activated!"); return FALSE; } gst_asf_demux_release_old_pads (demux); demux->activated_streams = TRUE; GST_LOG_OBJECT (demux, "signalling no more pads"); gst_element_no_more_pads (GST_ELEMENT (demux)); return TRUE; } /* returns the stream that has a complete payload with the lowest timestamp * queued, or NULL (we push things by timestamp because during the internal * prerolling we might accumulate more data then the external queues can take, * so we'd lock up if we pushed all accumulated data for stream N in one go) */ static AsfStream * gst_asf_demux_find_stream_with_complete_payload (GstASFDemux * demux) { AsfPayload *best_payload = NULL; AsfStream *best_stream = NULL; guint i; for (i = 0; i < demux->num_streams; ++i) { AsfStream *stream; int j; stream = &demux->stream[i]; /* Don't push any data until we have at least one payload that falls within * the current segment. This way we can remove out-of-segment payloads that * don't need to be decoded after a seek, sending only data from the * keyframe directly before our segment start */ if (stream->payloads->len > 0) { AsfPayload *payload = NULL; gint last_idx; if (GST_ASF_DEMUX_IS_REVERSE_PLAYBACK (demux->segment)) { /* Reverse playback */ if (stream->is_video) { /* We have to push payloads from KF to the first frame we accumulated (reverse order) */ if (stream->reverse_kf_ready) { payload = &g_array_index (stream->payloads, AsfPayload, stream->kf_pos); if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (payload->ts))) { /* TODO : remove payload from the list? */ continue; } } else { continue; } } else { /* find first complete payload with timestamp */ for (j = stream->payloads->len - 1; j >= 0 && (payload == NULL || !GST_CLOCK_TIME_IS_VALID (payload->ts)); --j) { payload = &g_array_index (stream->payloads, AsfPayload, j); } /* If there's a complete payload queued for this stream */ if (!gst_asf_payload_is_complete (payload)) continue; } } else { /* find last payload with timestamp */ for (last_idx = stream->payloads->len - 1; last_idx >= 0 && (payload == NULL || !GST_CLOCK_TIME_IS_VALID (payload->ts)); --last_idx) { payload = &g_array_index (stream->payloads, AsfPayload, last_idx); } /* if this is first payload after seek we might need to update the segment */ if (GST_CLOCK_TIME_IS_VALID (payload->ts)) gst_asf_demux_check_segment_ts (demux, payload->ts); if (G_UNLIKELY (GST_CLOCK_TIME_IS_VALID (payload->ts) && (payload->ts < demux->segment.start))) { if (G_UNLIKELY ((!demux->keyunit_sync) && (!demux->accurate) && payload->keyframe)) { GST_DEBUG_OBJECT (stream->pad, "Found keyframe, updating segment start to %" GST_TIME_FORMAT, GST_TIME_ARGS (payload->ts)); demux->segment.start = payload->ts; demux->segment.time = payload->ts; } else { GST_DEBUG_OBJECT (stream->pad, "Last queued payload has timestamp %" GST_TIME_FORMAT " which is before our segment start %" GST_TIME_FORMAT ", not pushing yet", GST_TIME_ARGS (payload->ts), GST_TIME_ARGS (demux->segment.start)); continue; } } payload = NULL; /* find first complete payload with timestamp */ for (j = 0; j < stream->payloads->len && (payload == NULL || !GST_CLOCK_TIME_IS_VALID (payload->ts)); ++j) { payload = &g_array_index (stream->payloads, AsfPayload, j); } /* Now see if there's a complete payload queued for this stream */ if (!gst_asf_payload_is_complete (payload)) continue; } /* ... and whether its timestamp is lower than the current best */ if (best_stream == NULL || best_payload->ts > payload->ts) { best_stream = stream; best_payload = payload; } } } return best_stream; } static GstFlowReturn gst_asf_demux_push_complete_payloads (GstASFDemux * demux, gboolean force) { AsfStream *stream; GstFlowReturn ret = GST_FLOW_OK; if (G_UNLIKELY (!demux->activated_streams)) { if (!gst_asf_demux_check_activate_streams (demux, force)) return GST_FLOW_OK; /* streams are now activated */ } while ((stream = gst_asf_demux_find_stream_with_complete_payload (demux))) { AsfPayload *payload; GstClockTime timestamp = GST_CLOCK_TIME_NONE; GstClockTime duration = GST_CLOCK_TIME_NONE; /* wait until we had a chance to "lock on" some payload's timestamp */ if (G_UNLIKELY (demux->need_newsegment && !GST_CLOCK_TIME_IS_VALID (demux->segment_ts))) return GST_FLOW_OK; if (GST_ASF_DEMUX_IS_REVERSE_PLAYBACK (demux->segment) && stream->is_video && stream->payloads->len) { payload = &g_array_index (stream->payloads, AsfPayload, stream->kf_pos); } else { payload = &g_array_index (stream->payloads, AsfPayload, 0); } /* do we need to send a newsegment event */ if ((G_UNLIKELY (demux->need_newsegment))) { GstEvent *segment_event; /* safe default if insufficient upstream info */ if (!GST_CLOCK_TIME_IS_VALID (demux->in_gap)) demux->in_gap = 0; if (demux->segment.stop == GST_CLOCK_TIME_NONE && demux->segment.duration > 0) { /* slight HACK; prevent clipping of last bit */ demux->segment.stop = demux->segment.duration + demux->in_gap; } /* FIXME : only if ACCURATE ! */ if (G_LIKELY (!demux->keyunit_sync && !demux->accurate && (GST_CLOCK_TIME_IS_VALID (payload->ts))) && !GST_ASF_DEMUX_IS_REVERSE_PLAYBACK (demux->segment)) { GST_DEBUG ("Adjusting newsegment start to %" GST_TIME_FORMAT, GST_TIME_ARGS (payload->ts)); demux->segment.start = payload->ts; demux->segment.time = payload->ts; } GST_DEBUG_OBJECT (demux, "sending new-segment event %" GST_SEGMENT_FORMAT, &demux->segment); /* note: we fix up all timestamps to start from 0, so this should be ok */ segment_event = gst_event_new_segment (&demux->segment); if (demux->segment_seqnum) gst_event_set_seqnum (segment_event, demux->segment_seqnum); gst_asf_demux_send_event_unlocked (demux, segment_event); /* now post any global tags we may have found */ if (demux->taglist == NULL) { demux->taglist = gst_tag_list_new_empty (); gst_tag_list_set_scope (demux->taglist, GST_TAG_SCOPE_GLOBAL); } gst_tag_list_add (demux->taglist, GST_TAG_MERGE_REPLACE, GST_TAG_CONTAINER_FORMAT, "ASF", NULL); GST_DEBUG_OBJECT (demux, "global tags: %" GST_PTR_FORMAT, demux->taglist); gst_asf_demux_send_event_unlocked (demux, gst_event_new_tag (demux->taglist)); demux->taglist = NULL; demux->need_newsegment = FALSE; demux->segment_seqnum = 0; demux->segment_running = TRUE; } /* Do we have tags pending for this stream? */ if (G_UNLIKELY (stream->pending_tags)) { GST_LOG_OBJECT (stream->pad, "%" GST_PTR_FORMAT, stream->pending_tags); gst_pad_push_event (stream->pad, gst_event_new_tag (stream->pending_tags)); stream->pending_tags = NULL; } /* We have the whole packet now so we should push the packet to * the src pad now. First though we should check if we need to do * descrambling */ if (G_UNLIKELY (stream->span > 1)) { gst_asf_demux_descramble_buffer (demux, stream, &payload->buf); } payload->buf = gst_buffer_make_writable (payload->buf); if (G_LIKELY (!payload->keyframe)) { GST_BUFFER_FLAG_SET (payload->buf, GST_BUFFER_FLAG_DELTA_UNIT); } if (G_UNLIKELY (stream->discont)) { GST_DEBUG_OBJECT (stream->pad, "marking DISCONT on stream"); GST_BUFFER_FLAG_SET (payload->buf, GST_BUFFER_FLAG_DISCONT); stream->discont = FALSE; } if (G_UNLIKELY (stream->is_video && payload->par_x && payload->par_y && (payload->par_x != stream->par_x) && (payload->par_y != stream->par_y))) { GST_DEBUG ("Updating PAR (%d/%d => %d/%d)", stream->par_x, stream->par_y, payload->par_x, payload->par_y); stream->par_x = payload->par_x; stream->par_y = payload->par_y; stream->caps = gst_caps_make_writable (stream->caps); gst_caps_set_simple (stream->caps, "pixel-aspect-ratio", GST_TYPE_FRACTION, stream->par_x, stream->par_y, NULL); gst_pad_set_caps (stream->pad, stream->caps); } if (G_UNLIKELY (stream->interlaced != payload->interlaced)) { GST_DEBUG ("Updating interlaced status (%d => %d)", stream->interlaced, payload->interlaced); stream->interlaced = payload->interlaced; stream->caps = gst_caps_make_writable (stream->caps); gst_caps_set_simple (stream->caps, "interlace-mode", G_TYPE_BOOLEAN, (stream->interlaced ? "mixed" : "progressive"), NULL); gst_pad_set_caps (stream->pad, stream->caps); } /* (sort of) interpolate timestamps using upstream "frame of reference", * typically useful for live src, but might (unavoidably) mess with * position reporting if a live src is playing not so live content * (e.g. rtspsrc taking some time to fall back to tcp) */ timestamp = payload->ts; if (GST_CLOCK_TIME_IS_VALID (timestamp) && !GST_ASF_DEMUX_IS_REVERSE_PLAYBACK (demux->segment)) { timestamp += demux->in_gap; /* Check if we're after the segment already, if so no need to push * anything here */ if (demux->segment.stop != -1 && timestamp > demux->segment.stop) { GST_DEBUG_OBJECT (stream->pad, "Payload after segment stop %" GST_TIME_FORMAT, GST_TIME_ARGS (demux->segment.stop)); ret = gst_flow_combiner_update_pad_flow (demux->flowcombiner, stream->pad, GST_FLOW_EOS); gst_buffer_unref (payload->buf); payload->buf = NULL; g_array_remove_index (stream->payloads, 0); /* Break out as soon as we have an issue */ if (G_UNLIKELY (ret != GST_FLOW_OK)) break; continue; } } GST_BUFFER_PTS (payload->buf) = timestamp; if (payload->duration == GST_CLOCK_TIME_NONE && stream->ext_props.avg_time_per_frame != 0) { duration = stream->ext_props.avg_time_per_frame * 100; } else { duration = payload->duration; } GST_BUFFER_DURATION (payload->buf) = duration; /* FIXME: we should really set durations on buffers if we can */ GST_LOG_OBJECT (stream->pad, "pushing buffer, %" GST_PTR_FORMAT, payload->buf); if (GST_ASF_DEMUX_IS_REVERSE_PLAYBACK (demux->segment) && stream->is_video) { if (stream->reverse_kf_ready == TRUE && stream->kf_pos == 0) { GST_BUFFER_FLAG_SET (payload->buf, GST_BUFFER_FLAG_DISCONT); } } else if (GST_ASF_DEMUX_IS_REVERSE_PLAYBACK (demux->segment)) { GST_BUFFER_FLAG_SET (payload->buf, GST_BUFFER_FLAG_DISCONT); } if (stream->active) { if (G_UNLIKELY (stream->first_buffer)) { if (stream->streamheader != NULL) { GST_DEBUG_OBJECT (stream->pad, "Pushing streamheader before first buffer"); gst_pad_push (stream->pad, gst_buffer_ref (stream->streamheader)); } stream->first_buffer = FALSE; } if (GST_CLOCK_TIME_IS_VALID (timestamp) && timestamp > demux->segment.position) { demux->segment.position = timestamp; if (GST_CLOCK_TIME_IS_VALID (duration)) demux->segment.position += timestamp; } ret = gst_pad_push (stream->pad, payload->buf); ret = gst_flow_combiner_update_pad_flow (demux->flowcombiner, stream->pad, ret); } else { gst_buffer_unref (payload->buf); ret = GST_FLOW_OK; } payload->buf = NULL; if (GST_ASF_DEMUX_IS_REVERSE_PLAYBACK (demux->segment) && stream->is_video && stream->reverse_kf_ready) { g_array_remove_index (stream->payloads, stream->kf_pos); stream->kf_pos--; if (stream->reverse_kf_ready == TRUE && stream->kf_pos < 0) { stream->kf_pos = 0; stream->reverse_kf_ready = FALSE; } } else { g_array_remove_index (stream->payloads, 0); } /* Break out as soon as we have an issue */ if (G_UNLIKELY (ret != GST_FLOW_OK)) break; } return ret; } static gboolean gst_asf_demux_check_buffer_is_header (GstASFDemux * demux, GstBuffer * buf) { AsfObject obj; GstMapInfo map; gboolean valid; g_assert (buf != NULL); GST_LOG_OBJECT (demux, "Checking if buffer is a header"); gst_buffer_map (buf, &map, GST_MAP_READ); /* we return false on buffer too small */ if (map.size < ASF_OBJECT_HEADER_SIZE) { gst_buffer_unmap (buf, &map); return FALSE; } /* check if it is a header */ valid = asf_demux_peek_object (demux, map.data, ASF_OBJECT_HEADER_SIZE, &obj, TRUE); gst_buffer_unmap (buf, &map); if (valid && obj.id == ASF_OBJ_HEADER) { return TRUE; } return FALSE; } static gboolean gst_asf_demux_check_chained_asf (GstASFDemux * demux) { guint64 off = demux->data_offset + (demux->packet * demux->packet_size); GstFlowReturn ret = GST_FLOW_OK; GstBuffer *buf = NULL; gboolean header = FALSE; /* TODO maybe we should skip index objects after the data and look * further for a new header */ if (gst_asf_demux_pull_data (demux, off, ASF_OBJECT_HEADER_SIZE, &buf, &ret)) { g_assert (buf != NULL); /* check if it is a header */ if (gst_asf_demux_check_buffer_is_header (demux, buf)) { GST_DEBUG_OBJECT (demux, "new base offset: %" G_GUINT64_FORMAT, off); demux->base_offset = off; header = TRUE; } gst_buffer_unref (buf); } return header; } static void gst_asf_demux_loop (GstASFDemux * demux) { GstFlowReturn flow = GST_FLOW_OK; GstBuffer *buf = NULL; guint64 off; if (G_UNLIKELY (demux->state == GST_ASF_DEMUX_STATE_HEADER)) { if (!gst_asf_demux_pull_headers (demux, &flow)) { goto pause; } flow = gst_asf_demux_pull_indices (demux); if (flow != GST_FLOW_OK) goto pause; } g_assert (demux->state == GST_ASF_DEMUX_STATE_DATA); if (G_UNLIKELY (demux->num_packets != 0 && demux->packet >= demux->num_packets)) goto eos; GST_LOG_OBJECT (demux, "packet %u/%u", (guint) demux->packet + 1, (guint) demux->num_packets); off = demux->data_offset + (demux->packet * demux->packet_size); if (G_UNLIKELY (!gst_asf_demux_pull_data (demux, off, demux->packet_size * demux->speed_packets, &buf, &flow))) { GST_DEBUG_OBJECT (demux, "got flow %s", gst_flow_get_name (flow)); if (flow == GST_FLOW_EOS) { goto eos; } else if (flow == GST_FLOW_FLUSHING) { GST_DEBUG_OBJECT (demux, "Not fatal"); goto pause; } else { goto read_failed; } } if (G_LIKELY (demux->speed_packets == 1)) { GstAsfDemuxParsePacketError err; err = gst_asf_demux_parse_packet (demux, buf); if (G_UNLIKELY (err != GST_ASF_DEMUX_PARSE_PACKET_ERROR_NONE)) { /* when we don't know when the data object ends, we should check * for a chained asf */ if (demux->num_packets == 0) { if (gst_asf_demux_check_buffer_is_header (demux, buf)) { GST_INFO_OBJECT (demux, "Chained asf found"); demux->base_offset = off; gst_asf_demux_reset (demux, TRUE); gst_buffer_unref (buf); return; } } /* FIXME: We should tally up fatal errors and error out only * after a few broken packets in a row? */ GST_INFO_OBJECT (demux, "Ignoring recoverable parse error"); gst_buffer_unref (buf); if (GST_ASF_DEMUX_IS_REVERSE_PLAYBACK (demux->segment) && !demux->seek_to_cur_pos) { --demux->packet; if (demux->packet < 0) { goto eos; } } else { ++demux->packet; } return; } flow = gst_asf_demux_push_complete_payloads (demux, FALSE); if (GST_ASF_DEMUX_IS_REVERSE_PLAYBACK (demux->segment) && !demux->seek_to_cur_pos) { --demux->packet; if (demux->packet < 0) { goto eos; } } else { ++demux->packet; } } else { guint n; for (n = 0; n < demux->speed_packets; n++) { GstBuffer *sub; GstAsfDemuxParsePacketError err; sub = gst_buffer_copy_region (buf, GST_BUFFER_COPY_ALL, n * demux->packet_size, demux->packet_size); err = gst_asf_demux_parse_packet (demux, sub); if (G_UNLIKELY (err != GST_ASF_DEMUX_PARSE_PACKET_ERROR_NONE)) { /* when we don't know when the data object ends, we should check * for a chained asf */ if (demux->num_packets == 0) { if (gst_asf_demux_check_buffer_is_header (demux, sub)) { GST_INFO_OBJECT (demux, "Chained asf found"); demux->base_offset = off + n * demux->packet_size; gst_asf_demux_reset (demux, TRUE); gst_buffer_unref (sub); gst_buffer_unref (buf); return; } } /* FIXME: We should tally up fatal errors and error out only * after a few broken packets in a row? */ GST_INFO_OBJECT (demux, "Ignoring recoverable parse error"); flow = GST_FLOW_OK; } gst_buffer_unref (sub); if (err == GST_ASF_DEMUX_PARSE_PACKET_ERROR_NONE) flow = gst_asf_demux_push_complete_payloads (demux, FALSE); ++demux->packet; } /* reset speed pull */ demux->speed_packets = 1; } gst_buffer_unref (buf); if (G_UNLIKELY ((demux->num_packets > 0 && demux->packet >= demux->num_packets) || flow == GST_FLOW_EOS)) { GST_LOG_OBJECT (demux, "reached EOS"); goto eos; } if (G_UNLIKELY (flow != GST_FLOW_OK)) { GST_DEBUG_OBJECT (demux, "pushing complete payloads failed"); goto pause; } /* check if we're at the end of the configured segment */ /* FIXME: check if segment end reached etc. */ return; eos: { /* if we haven't activated our streams yet, this might be because we have * less data queued than required for preroll; force stream activation and * send any pending payloads before sending EOS */ if (!demux->activated_streams) flow = gst_asf_demux_push_complete_payloads (demux, TRUE); /* we want to push an eos or post a segment-done in any case */ if (demux->segment.flags & GST_SEEK_FLAG_SEGMENT) { gint64 stop; /* for segment playback we need to post when (in stream time) * we stopped, this is either stop (when set) or the duration. */ if ((stop = demux->segment.stop) == -1) stop = demux->segment.duration; GST_INFO_OBJECT (demux, "Posting segment-done, at end of segment"); gst_element_post_message (GST_ELEMENT_CAST (demux), gst_message_new_segment_done (GST_OBJECT (demux), GST_FORMAT_TIME, stop)); gst_asf_demux_send_event_unlocked (demux, gst_event_new_segment_done (GST_FORMAT_TIME, stop)); } else if (flow != GST_FLOW_EOS) { /* check if we have a chained asf, in case, we don't eos yet */ if (gst_asf_demux_check_chained_asf (demux)) { GST_INFO_OBJECT (demux, "Chained ASF starting"); gst_asf_demux_reset (demux, TRUE); return; } } if (!(demux->segment.flags & GST_SEEK_FLAG_SEGMENT)) { if (demux->activated_streams) { /* normal playback, send EOS to all linked pads */ GST_INFO_OBJECT (demux, "Sending EOS, at end of stream"); gst_asf_demux_send_event_unlocked (demux, gst_event_new_eos ()); } else { GST_WARNING_OBJECT (demux, "EOS without exposed streams"); flow = GST_FLOW_EOS; } } /* ... and fall through to pause */ } pause: { GST_DEBUG_OBJECT (demux, "pausing task, flow return: %s", gst_flow_get_name (flow)); demux->segment_running = FALSE; gst_pad_pause_task (demux->sinkpad); /* For the error cases */ if (flow == GST_FLOW_EOS && !demux->activated_streams) { GST_ELEMENT_ERROR (demux, STREAM, WRONG_TYPE, (NULL), ("This doesn't seem to be an ASF file")); } else if (flow < GST_FLOW_EOS || flow == GST_FLOW_NOT_LINKED) { /* Post an error. Hopefully something else already has, but if not... */ GST_ELEMENT_FLOW_ERROR (demux, flow); gst_asf_demux_send_event_unlocked (demux, gst_event_new_eos ()); } return; } /* ERRORS */ read_failed: { GST_DEBUG_OBJECT (demux, "Read failed, doh"); flow = GST_FLOW_EOS; goto pause; } #if 0 /* See FIXMEs above */ parse_error: { gst_buffer_unref (buf); GST_ELEMENT_ERROR (demux, STREAM, DEMUX, (NULL), ("Error parsing ASF packet %u", (guint) demux->packet)); gst_asf_demux_send_event_unlocked (demux, gst_event_new_eos ()); flow = GST_FLOW_ERROR; goto pause; } #endif } #define GST_ASF_DEMUX_CHECK_HEADER_YES 0 #define GST_ASF_DEMUX_CHECK_HEADER_NO 1 #define GST_ASF_DEMUX_CHECK_HEADER_NEED_DATA 2 static gint gst_asf_demux_check_header (GstASFDemux * demux) { AsfObject obj; guint8 *cdata = (guint8 *) gst_adapter_map (demux->adapter, ASF_OBJECT_HEADER_SIZE); if (cdata == NULL) /* need more data */ return GST_ASF_DEMUX_CHECK_HEADER_NEED_DATA; if (asf_demux_peek_object (demux, cdata, ASF_OBJECT_HEADER_SIZE, &obj, FALSE && obj.id == ASF_OBJ_HEADER)) return GST_ASF_DEMUX_CHECK_HEADER_YES; return GST_ASF_DEMUX_CHECK_HEADER_NO; } static GstFlowReturn gst_asf_demux_chain (GstPad * pad, GstObject * parent, GstBuffer * buf) { GstFlowReturn ret = GST_FLOW_OK; GstASFDemux *demux; demux = GST_ASF_DEMUX (parent); GST_LOG_OBJECT (demux, "buffer: size=%" G_GSIZE_FORMAT ", offset=%" G_GINT64_FORMAT ", time=%" GST_TIME_FORMAT, gst_buffer_get_size (buf), GST_BUFFER_OFFSET (buf), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf))); if (G_UNLIKELY (GST_BUFFER_IS_DISCONT (buf))) { GST_DEBUG_OBJECT (demux, "received DISCONT"); gst_asf_demux_mark_discont (demux); } if (G_UNLIKELY ((!GST_CLOCK_TIME_IS_VALID (demux->in_gap) && GST_BUFFER_TIMESTAMP_IS_VALID (buf)))) { demux->in_gap = GST_BUFFER_TIMESTAMP (buf) - demux->in_segment.start; GST_DEBUG_OBJECT (demux, "upstream segment start %" GST_TIME_FORMAT ", interpolation gap: %" GST_TIME_FORMAT, GST_TIME_ARGS (demux->in_segment.start), GST_TIME_ARGS (demux->in_gap)); } gst_adapter_push (demux->adapter, buf); switch (demux->state) { case GST_ASF_DEMUX_STATE_INDEX:{ gint result = gst_asf_demux_check_header (demux); if (result == GST_ASF_DEMUX_CHECK_HEADER_NEED_DATA) /* need more data */ break; if (result == GST_ASF_DEMUX_CHECK_HEADER_NO) { /* we don't care about this, probably an index */ /* TODO maybe would be smarter to skip all the indices * until we got a new header or EOS to decide */ GST_LOG_OBJECT (demux, "Received index object, its EOS"); goto eos; } else { GST_INFO_OBJECT (demux, "Chained asf starting"); /* cleanup and get ready for a chained asf */ gst_asf_demux_reset (demux, TRUE); /* fall through */ } } case GST_ASF_DEMUX_STATE_HEADER:{ ret = gst_asf_demux_chain_headers (demux); if (demux->state != GST_ASF_DEMUX_STATE_DATA) break; /* otherwise fall through */ } case GST_ASF_DEMUX_STATE_DATA: { guint64 data_size; data_size = demux->packet_size; while (gst_adapter_available (demux->adapter) >= data_size) { GstBuffer *buf; GstAsfDemuxParsePacketError err; /* we don't know the length of the stream * check for a chained asf everytime */ if (demux->num_packets == 0) { gint result = gst_asf_demux_check_header (demux); if (result == GST_ASF_DEMUX_CHECK_HEADER_YES) { GST_INFO_OBJECT (demux, "Chained asf starting"); /* cleanup and get ready for a chained asf */ gst_asf_demux_reset (demux, TRUE); break; } } else if (G_UNLIKELY (demux->num_packets != 0 && demux->packet >= 0 && demux->packet >= demux->num_packets)) { /* do not overshoot data section when streaming */ break; } buf = gst_adapter_take_buffer (demux->adapter, data_size); /* FIXME: We should tally up fatal errors and error out only * after a few broken packets in a row? */ err = gst_asf_demux_parse_packet (demux, buf); gst_buffer_unref (buf); if (G_LIKELY (err == GST_ASF_DEMUX_PARSE_PACKET_ERROR_NONE)) ret = gst_asf_demux_push_complete_payloads (demux, FALSE); else GST_WARNING_OBJECT (demux, "Parse error"); if (demux->packet >= 0) ++demux->packet; } if (G_UNLIKELY (demux->num_packets != 0 && demux->packet >= 0 && demux->packet >= demux->num_packets)) { demux->state = GST_ASF_DEMUX_STATE_INDEX; } break; } default: g_assert_not_reached (); } done: if (ret != GST_FLOW_OK) GST_DEBUG_OBJECT (demux, "flow: %s", gst_flow_get_name (ret)); return ret; eos: { GST_DEBUG_OBJECT (demux, "Handled last packet, setting EOS"); ret = GST_FLOW_EOS; goto done; } } static inline gboolean gst_asf_demux_skip_bytes (guint num_bytes, guint8 ** p_data, guint64 * p_size) { if (*p_size < num_bytes) return FALSE; *p_data += num_bytes; *p_size -= num_bytes; return TRUE; } static inline guint8 gst_asf_demux_get_uint8 (guint8 ** p_data, guint64 * p_size) { guint8 ret; g_assert (*p_size >= 1); ret = GST_READ_UINT8 (*p_data); *p_data += sizeof (guint8); *p_size -= sizeof (guint8); return ret; } static inline guint16 gst_asf_demux_get_uint16 (guint8 ** p_data, guint64 * p_size) { guint16 ret; g_assert (*p_size >= 2); ret = GST_READ_UINT16_LE (*p_data); *p_data += sizeof (guint16); *p_size -= sizeof (guint16); return ret; } static inline guint32 gst_asf_demux_get_uint32 (guint8 ** p_data, guint64 * p_size) { guint32 ret; g_assert (*p_size >= 4); ret = GST_READ_UINT32_LE (*p_data); *p_data += sizeof (guint32); *p_size -= sizeof (guint32); return ret; } static inline guint64 gst_asf_demux_get_uint64 (guint8 ** p_data, guint64 * p_size) { guint64 ret; g_assert (*p_size >= 8); ret = GST_READ_UINT64_LE (*p_data); *p_data += sizeof (guint64); *p_size -= sizeof (guint64); return ret; } static gboolean gst_asf_demux_get_buffer (GstBuffer ** p_buf, guint num_bytes_to_read, guint8 ** p_data, guint64 * p_size) { *p_buf = NULL; if (*p_size < num_bytes_to_read) return FALSE; *p_buf = gst_buffer_new_and_alloc (num_bytes_to_read); gst_buffer_fill (*p_buf, 0, *p_data, num_bytes_to_read); *p_data += num_bytes_to_read; *p_size -= num_bytes_to_read; return TRUE; } static gboolean gst_asf_demux_get_bytes (guint8 ** p_buf, guint num_bytes_to_read, guint8 ** p_data, guint64 * p_size) { *p_buf = NULL; if (*p_size < num_bytes_to_read) return FALSE; *p_buf = g_memdup (*p_data, num_bytes_to_read); *p_data += num_bytes_to_read; *p_size -= num_bytes_to_read; return TRUE; } static gboolean gst_asf_demux_get_string (gchar ** p_str, guint16 * p_strlen, guint8 ** p_data, guint64 * p_size) { guint16 s_length; guint8 *s; *p_str = NULL; if (*p_size < 2) return FALSE; s_length = gst_asf_demux_get_uint16 (p_data, p_size); if (p_strlen) *p_strlen = s_length; if (s_length == 0) { GST_WARNING ("zero-length string"); *p_str = g_strdup (""); return TRUE; } if (!gst_asf_demux_get_bytes (&s, s_length, p_data, p_size)) return FALSE; g_assert (s != NULL); /* just because They don't exist doesn't * mean They are not out to get you ... */ if (s[s_length - 1] != '\0') { s = g_realloc (s, s_length + 1); s[s_length] = '\0'; } *p_str = (gchar *) s; return TRUE; } static void gst_asf_demux_get_guid (ASFGuid * guid, guint8 ** p_data, guint64 * p_size) { g_assert (*p_size >= 4 * sizeof (guint32)); guid->v1 = gst_asf_demux_get_uint32 (p_data, p_size); guid->v2 = gst_asf_demux_get_uint32 (p_data, p_size); guid->v3 = gst_asf_demux_get_uint32 (p_data, p_size); guid->v4 = gst_asf_demux_get_uint32 (p_data, p_size); } static gboolean gst_asf_demux_get_stream_audio (asf_stream_audio * audio, guint8 ** p_data, guint64 * p_size) { if (*p_size < (2 + 2 + 4 + 4 + 2 + 2 + 2)) return FALSE; /* WAVEFORMATEX Structure */ audio->codec_tag = gst_asf_demux_get_uint16 (p_data, p_size); audio->channels = gst_asf_demux_get_uint16 (p_data, p_size); audio->sample_rate = gst_asf_demux_get_uint32 (p_data, p_size); audio->byte_rate = gst_asf_demux_get_uint32 (p_data, p_size); audio->block_align = gst_asf_demux_get_uint16 (p_data, p_size); audio->word_size = gst_asf_demux_get_uint16 (p_data, p_size); /* Codec specific data size */ audio->size = gst_asf_demux_get_uint16 (p_data, p_size); if (audio->size > *p_size) { GST_WARNING ("Corrupted audio codec_data (should be at least %u bytes, is %" G_GUINT64_FORMAT " long)", audio->size, *p_size); return FALSE; } return TRUE; } static gboolean gst_asf_demux_get_stream_video (asf_stream_video * video, guint8 ** p_data, guint64 * p_size) { if (*p_size < (4 + 4 + 1 + 2)) return FALSE; video->width = gst_asf_demux_get_uint32 (p_data, p_size); video->height = gst_asf_demux_get_uint32 (p_data, p_size); video->unknown = gst_asf_demux_get_uint8 (p_data, p_size); video->size = gst_asf_demux_get_uint16 (p_data, p_size); return TRUE; } static gboolean gst_asf_demux_get_stream_video_format (asf_stream_video_format * fmt, guint8 ** p_data, guint64 * p_size) { if (*p_size < (4 + 4 + 4 + 2 + 2 + 4 + 4 + 4 + 4 + 4 + 4)) return FALSE; fmt->size = gst_asf_demux_get_uint32 (p_data, p_size); /* Sanity checks */ if (fmt->size < 40) { GST_WARNING ("Corrupted asf_stream_video_format (size < 40)"); return FALSE; } if ((guint64) fmt->size - 4 > *p_size) { GST_WARNING ("Corrupted asf_stream_video_format (codec_data is too small)"); return FALSE; } fmt->width = gst_asf_demux_get_uint32 (p_data, p_size); fmt->height = gst_asf_demux_get_uint32 (p_data, p_size); fmt->planes = gst_asf_demux_get_uint16 (p_data, p_size); fmt->depth = gst_asf_demux_get_uint16 (p_data, p_size); fmt->tag = gst_asf_demux_get_uint32 (p_data, p_size); fmt->image_size = gst_asf_demux_get_uint32 (p_data, p_size); fmt->xpels_meter = gst_asf_demux_get_uint32 (p_data, p_size); fmt->ypels_meter = gst_asf_demux_get_uint32 (p_data, p_size); fmt->num_colors = gst_asf_demux_get_uint32 (p_data, p_size); fmt->imp_colors = gst_asf_demux_get_uint32 (p_data, p_size); return TRUE; } AsfStream * gst_asf_demux_get_stream (GstASFDemux * demux, guint16 id) { guint i; for (i = 0; i < demux->num_streams; i++) { if (demux->stream[i].id == id) return &demux->stream[i]; } if (gst_asf_demux_is_unknown_stream (demux, id)) GST_WARNING ("Segment found for undefined stream: (%d)", id); return NULL; } static AsfStream * gst_asf_demux_setup_pad (GstASFDemux * demux, GstPad * src_pad, GstCaps * caps, guint16 id, gboolean is_video, GstBuffer * streamheader, GstTagList * tags) { AsfStream *stream; gst_pad_use_fixed_caps (src_pad); gst_pad_set_caps (src_pad, caps); gst_pad_set_event_function (src_pad, GST_DEBUG_FUNCPTR (gst_asf_demux_handle_src_event)); gst_pad_set_query_function (src_pad, GST_DEBUG_FUNCPTR (gst_asf_demux_handle_src_query)); stream = &demux->stream[demux->num_streams]; stream->caps = caps; stream->pad = src_pad; stream->id = id; stream->fps_known = !is_video; /* bit hacky for audio */ stream->is_video = is_video; stream->pending_tags = tags; stream->discont = TRUE; stream->first_buffer = TRUE; stream->streamheader = streamheader; if (stream->streamheader) { stream->streamheader = gst_buffer_make_writable (streamheader); GST_BUFFER_FLAG_SET (stream->streamheader, GST_BUFFER_FLAG_HEADER); } if (is_video) { GstStructure *st; gint par_x, par_y; st = gst_caps_get_structure (caps, 0); if (gst_structure_get_fraction (st, "pixel-aspect-ratio", &par_x, &par_y) && par_x > 0 && par_y > 0) { GST_DEBUG ("PAR %d/%d", par_x, par_y); stream->par_x = par_x; stream->par_y = par_y; } } stream->payloads = g_array_new (FALSE, FALSE, sizeof (AsfPayload)); /* TODO: create this array during reverse play? */ stream->payloads_rev = g_array_new (FALSE, FALSE, sizeof (AsfPayload)); GST_INFO ("Created pad %s for stream %u with caps %" GST_PTR_FORMAT, GST_PAD_NAME (src_pad), demux->num_streams, caps); ++demux->num_streams; stream->active = FALSE; return stream; } static void gst_asf_demux_add_stream_headers_to_caps (GstASFDemux * demux, GstBuffer * buffer, GstStructure * structure) { GValue arr_val = G_VALUE_INIT; GValue buf_val = G_VALUE_INIT; g_value_init (&arr_val, GST_TYPE_ARRAY); g_value_init (&buf_val, GST_TYPE_BUFFER); gst_value_set_buffer (&buf_val, buffer); gst_value_array_append_and_take_value (&arr_val, &buf_val); gst_structure_take_value (structure, "streamheader", &arr_val); } static AsfStream * gst_asf_demux_add_audio_stream (GstASFDemux * demux, asf_stream_audio * audio, guint16 id, guint8 ** p_data, guint64 * p_size) { GstTagList *tags = NULL; GstBuffer *extradata = NULL; GstPad *src_pad; GstCaps *caps; guint16 size_left = 0; gchar *codec_name = NULL; gchar *name = NULL; size_left = audio->size; /* Create the audio pad */ name = g_strdup_printf ("audio_%u", demux->num_audio_streams); src_pad = gst_pad_new_from_static_template (&audio_src_template, name); g_free (name); /* Swallow up any left over data and set up the * standard properties from the header info */ if (size_left) { GST_INFO_OBJECT (demux, "Audio header contains %d bytes of " "codec specific data", size_left); g_assert (size_left <= *p_size); gst_asf_demux_get_buffer (&extradata, size_left, p_data, p_size); } /* asf_stream_audio is the same as gst_riff_strf_auds, but with an * additional two bytes indicating extradata. */ /* FIXME: Handle the channel reorder map here */ caps = gst_riff_create_audio_caps (audio->codec_tag, NULL, (gst_riff_strf_auds *) audio, extradata, NULL, &codec_name, NULL); if (caps == NULL) { caps = gst_caps_new_simple ("audio/x-asf-unknown", "codec_id", G_TYPE_INT, (gint) audio->codec_tag, NULL); } /* Informing about that audio format we just added */ if (codec_name) { tags = gst_tag_list_new (GST_TAG_AUDIO_CODEC, codec_name, NULL); g_free (codec_name); } if (audio->byte_rate > 0) { /* Some ASF files have no bitrate props object (often seen with * ASF files that contain raw audio data). Example files can * be generated with FFmpeg (tested with v2.8.6), like this: * * ffmpeg -i sine-wave.wav -c:a pcm_alaw file.asf * * In this case, if audio->byte_rate is nonzero, use that as * the bitrate. */ guint bitrate = audio->byte_rate * 8; if (tags == NULL) tags = gst_tag_list_new_empty (); /* Add bitrate, but only if there is none set already, since * this is just a fallback in case there is no bitrate tag * already present */ gst_tag_list_add (tags, GST_TAG_MERGE_KEEP, GST_TAG_BITRATE, bitrate, NULL); } if (extradata) gst_buffer_unref (extradata); GST_INFO ("Adding audio stream #%u, id %u codec %u (0x%04x), tags=%" GST_PTR_FORMAT, demux->num_audio_streams, id, audio->codec_tag, audio->codec_tag, tags); ++demux->num_audio_streams; return gst_asf_demux_setup_pad (demux, src_pad, caps, id, FALSE, NULL, tags); } static AsfStream * gst_asf_demux_add_video_stream (GstASFDemux * demux, asf_stream_video_format * video, guint16 id, guint8 ** p_data, guint64 * p_size) { GstTagList *tags = NULL; GstStructure *caps_s; GstBuffer *extradata = NULL; GstPad *src_pad; GstCaps *caps; gchar *str; gchar *name = NULL; gchar *codec_name = NULL; guint64 size_left = video->size - 40; GstBuffer *streamheader = NULL; guint par_w = 1, par_h = 1; /* Create the video pad */ name = g_strdup_printf ("video_%u", demux->num_video_streams); src_pad = gst_pad_new_from_static_template (&video_src_template, name); g_free (name); /* Now try some gstreamer formatted MIME types (from gst_avi_demux_strf_vids) */ if (size_left) { GST_LOG ("Video header has %" G_GUINT64_FORMAT " bytes of codec specific data (vs %" G_GUINT64_FORMAT ")", size_left, *p_size); g_assert (size_left <= *p_size); gst_asf_demux_get_buffer (&extradata, size_left, p_data, p_size); } GST_DEBUG ("video codec %" GST_FOURCC_FORMAT, GST_FOURCC_ARGS (video->tag)); /* yes, asf_stream_video_format and gst_riff_strf_vids are the same */ caps = gst_riff_create_video_caps (video->tag, NULL, (gst_riff_strf_vids *) video, extradata, NULL, &codec_name); if (caps == NULL) { caps = gst_caps_new_simple ("video/x-asf-unknown", "fourcc", G_TYPE_UINT, video->tag, NULL); } else { GstStructure *s; gint ax, ay; s = gst_asf_demux_get_metadata_for_stream (demux, id); if (gst_structure_get_int (s, "AspectRatioX", &ax) && gst_structure_get_int (s, "AspectRatioY", &ay) && (ax > 0 && ay > 0)) { par_w = ax; par_h = ay; gst_caps_set_simple (caps, "pixel-aspect-ratio", GST_TYPE_FRACTION, ax, ay, NULL); } else { guint ax, ay; /* retry with the global metadata */ GST_DEBUG ("Retrying with global metadata %" GST_PTR_FORMAT, demux->global_metadata); s = demux->global_metadata; if (gst_structure_get_uint (s, "AspectRatioX", &ax) && gst_structure_get_uint (s, "AspectRatioY", &ay)) { GST_DEBUG ("ax:%d, ay:%d", ax, ay); if (ax > 0 && ay > 0) { par_w = ax; par_h = ay; gst_caps_set_simple (caps, "pixel-aspect-ratio", GST_TYPE_FRACTION, ax, ay, NULL); } } } s = gst_caps_get_structure (caps, 0); gst_structure_remove_field (s, "framerate"); } caps_s = gst_caps_get_structure (caps, 0); /* add format field with fourcc to WMV/VC1 caps to differentiate variants */ if (gst_structure_has_name (caps_s, "video/x-wmv")) { str = g_strdup_printf ("%" GST_FOURCC_FORMAT, GST_FOURCC_ARGS (video->tag)); gst_caps_set_simple (caps, "format", G_TYPE_STRING, str, NULL); g_free (str); /* check if h264 has codec_data (avc) or streamheaders (bytestream) */ } else if (gst_structure_has_name (caps_s, "video/x-h264")) { const GValue *value = gst_structure_get_value (caps_s, "codec_data"); if (value) { GstBuffer *buf = gst_value_get_buffer (value); GstMapInfo mapinfo; if (gst_buffer_map (buf, &mapinfo, GST_MAP_READ)) { if (mapinfo.size >= 4 && GST_READ_UINT32_BE (mapinfo.data) == 1) { /* this looks like a bytestream start */ streamheader = gst_buffer_ref (buf); gst_asf_demux_add_stream_headers_to_caps (demux, buf, caps_s); gst_structure_remove_field (caps_s, "codec_data"); } gst_buffer_unmap (buf, &mapinfo); } } } /* For a 3D video, set multiview information into the caps based on * what was detected during object parsing */ if (demux->asf_3D_mode != GST_ASF_3D_NONE) { GstVideoMultiviewMode mv_mode = GST_VIDEO_MULTIVIEW_MODE_NONE; GstVideoMultiviewFlags mv_flags = GST_VIDEO_MULTIVIEW_FLAGS_NONE; const gchar *mview_mode_str; switch (demux->asf_3D_mode) { case GST_ASF_3D_SIDE_BY_SIDE_HALF_LR: mv_mode = GST_VIDEO_MULTIVIEW_MODE_SIDE_BY_SIDE; break; case GST_ASF_3D_SIDE_BY_SIDE_HALF_RL: mv_mode = GST_VIDEO_MULTIVIEW_MODE_SIDE_BY_SIDE; mv_flags = GST_VIDEO_MULTIVIEW_FLAGS_RIGHT_VIEW_FIRST; break; case GST_ASF_3D_TOP_AND_BOTTOM_HALF_LR: mv_mode = GST_VIDEO_MULTIVIEW_MODE_TOP_BOTTOM; break; case GST_ASF_3D_TOP_AND_BOTTOM_HALF_RL: mv_mode = GST_VIDEO_MULTIVIEW_MODE_TOP_BOTTOM; mv_flags = GST_VIDEO_MULTIVIEW_FLAGS_RIGHT_VIEW_FIRST; break; case GST_ASF_3D_DUAL_STREAM:{ gboolean is_right_view = FALSE; /* if Advanced_Mutual_Exclusion object exists, use it * to figure out which is the left view (lower ID) */ if (demux->mut_ex_streams != NULL) { guint length; gint i; length = g_slist_length (demux->mut_ex_streams); for (i = 0; i < length; i++) { gpointer v_s_id; v_s_id = g_slist_nth_data (demux->mut_ex_streams, i); GST_DEBUG_OBJECT (demux, "has Mutual_Exclusion object. stream id in object is %d", GPOINTER_TO_INT (v_s_id)); if (id > GPOINTER_TO_INT (v_s_id)) is_right_view = TRUE; } } else { /* if the Advaced_Mutual_Exclusion object doesn't exist, assume the * first video stream encountered has the lower ID */ if (demux->num_video_streams > 0) { /* This is not the first video stream, assuming right eye view */ is_right_view = TRUE; } } if (is_right_view) mv_mode = GST_VIDEO_MULTIVIEW_MODE_RIGHT; else mv_mode = GST_VIDEO_MULTIVIEW_MODE_LEFT; break; } default: break; } GST_INFO_OBJECT (demux, "stream_id %d, has multiview-mode %d flags 0x%x", id, mv_mode, (guint) mv_flags); mview_mode_str = gst_video_multiview_mode_to_caps_string (mv_mode); if (mview_mode_str != NULL) { if (gst_video_multiview_guess_half_aspect (mv_mode, video->width, video->height, par_w, par_h)) mv_flags |= GST_VIDEO_MULTIVIEW_FLAGS_HALF_ASPECT; gst_caps_set_simple (caps, "multiview-mode", G_TYPE_STRING, mview_mode_str, "multiview-flags", GST_TYPE_VIDEO_MULTIVIEW_FLAGSET, mv_flags, GST_FLAG_SET_MASK_EXACT, NULL); } } if (codec_name) { tags = gst_tag_list_new (GST_TAG_VIDEO_CODEC, codec_name, NULL); g_free (codec_name); } if (extradata) gst_buffer_unref (extradata); GST_INFO ("Adding video stream #%u, id %u, codec %" GST_FOURCC_FORMAT " (0x%08x)", demux->num_video_streams, id, GST_FOURCC_ARGS (video->tag), video->tag); ++demux->num_video_streams; return gst_asf_demux_setup_pad (demux, src_pad, caps, id, TRUE, streamheader, tags); } static void gst_asf_demux_activate_stream (GstASFDemux * demux, AsfStream * stream) { if (!stream->active) { GstEvent *event; gchar *stream_id; GST_INFO_OBJECT (demux, "Activating stream %2u, pad %s, caps %" GST_PTR_FORMAT, stream->id, GST_PAD_NAME (stream->pad), stream->caps); gst_pad_set_active (stream->pad, TRUE); stream_id = gst_pad_create_stream_id_printf (stream->pad, GST_ELEMENT_CAST (demux), "%03u", stream->id); event = gst_pad_get_sticky_event (demux->sinkpad, GST_EVENT_STREAM_START, 0); if (event) { if (gst_event_parse_group_id (event, &demux->group_id)) demux->have_group_id = TRUE; else demux->have_group_id = FALSE; gst_event_unref (event); } else if (!demux->have_group_id) { demux->have_group_id = TRUE; demux->group_id = gst_util_group_id_next (); } event = gst_event_new_stream_start (stream_id); if (demux->have_group_id) gst_event_set_group_id (event, demux->group_id); gst_pad_push_event (stream->pad, event); g_free (stream_id); gst_pad_set_caps (stream->pad, stream->caps); gst_element_add_pad (GST_ELEMENT_CAST (demux), stream->pad); gst_flow_combiner_add_pad (demux->flowcombiner, stream->pad); stream->active = TRUE; } } static AsfStream * gst_asf_demux_parse_stream_object (GstASFDemux * demux, guint8 * data, guint64 size) { AsfCorrectionType correction_type; AsfStreamType stream_type; GstClockTime time_offset; gboolean is_encrypted G_GNUC_UNUSED; guint16 stream_id; guint16 flags; ASFGuid guid; guint stream_specific_size; guint type_specific_size G_GNUC_UNUSED; guint unknown G_GNUC_UNUSED; gboolean inspect_payload = FALSE; AsfStream *stream = NULL; /* Get the rest of the header's header */ if (size < (16 + 16 + 8 + 4 + 4 + 2 + 4)) goto not_enough_data; gst_asf_demux_get_guid (&guid, &data, &size); stream_type = gst_asf_demux_identify_guid (asf_stream_guids, &guid); gst_asf_demux_get_guid (&guid, &data, &size); correction_type = gst_asf_demux_identify_guid (asf_correction_guids, &guid); time_offset = gst_asf_demux_get_uint64 (&data, &size) * 100; type_specific_size = gst_asf_demux_get_uint32 (&data, &size); stream_specific_size = gst_asf_demux_get_uint32 (&data, &size); flags = gst_asf_demux_get_uint16 (&data, &size); stream_id = flags & 0x7f; is_encrypted = ! !((flags & 0x8000) << 15); unknown = gst_asf_demux_get_uint32 (&data, &size); GST_DEBUG_OBJECT (demux, "Found stream %u, time_offset=%" GST_TIME_FORMAT, stream_id, GST_TIME_ARGS (time_offset)); /* dvr-ms has audio stream declared in stream specific data */ if (stream_type == ASF_STREAM_EXT_EMBED_HEADER) { AsfExtStreamType ext_stream_type; gst_asf_demux_get_guid (&guid, &data, &size); ext_stream_type = gst_asf_demux_identify_guid (asf_ext_stream_guids, &guid); if (ext_stream_type == ASF_EXT_STREAM_AUDIO) { inspect_payload = TRUE; gst_asf_demux_get_guid (&guid, &data, &size); gst_asf_demux_get_uint32 (&data, &size); gst_asf_demux_get_uint32 (&data, &size); gst_asf_demux_get_uint32 (&data, &size); gst_asf_demux_get_guid (&guid, &data, &size); gst_asf_demux_get_uint32 (&data, &size); stream_type = ASF_STREAM_AUDIO; } } switch (stream_type) { case ASF_STREAM_AUDIO:{ asf_stream_audio audio_object; if (!gst_asf_demux_get_stream_audio (&audio_object, &data, &size)) goto not_enough_data; GST_INFO ("Object is an audio stream with %u bytes of additional data", audio_object.size); stream = gst_asf_demux_add_audio_stream (demux, &audio_object, stream_id, &data, &size); switch (correction_type) { case ASF_CORRECTION_ON:{ guint span, packet_size, chunk_size, data_size, silence_data; GST_INFO ("Using error correction"); if (size < (1 + 2 + 2 + 2 + 1)) goto not_enough_data; span = gst_asf_demux_get_uint8 (&data, &size); packet_size = gst_asf_demux_get_uint16 (&data, &size); chunk_size = gst_asf_demux_get_uint16 (&data, &size); data_size = gst_asf_demux_get_uint16 (&data, &size); silence_data = gst_asf_demux_get_uint8 (&data, &size); stream->span = span; GST_DEBUG_OBJECT (demux, "Descrambling ps:%u cs:%u ds:%u s:%u sd:%u", packet_size, chunk_size, data_size, span, silence_data); if (stream->span > 1) { if (chunk_size == 0 || ((packet_size / chunk_size) <= 1)) { /* Disable descrambling */ stream->span = 0; } else { /* FIXME: this else branch was added for * weird_al_yankovic - the saga begins.asf */ stream->ds_packet_size = packet_size; stream->ds_chunk_size = chunk_size; } } else { /* Descambling is enabled */ stream->ds_packet_size = packet_size; stream->ds_chunk_size = chunk_size; } #if 0 /* Now skip the rest of the silence data */ if (data_size > 1) gst_bytestream_flush (demux->bs, data_size - 1); #else /* FIXME: CHECKME. And why -1? */ if (data_size > 1) { if (!gst_asf_demux_skip_bytes (data_size - 1, &data, &size)) { goto not_enough_data; } } #endif break; } case ASF_CORRECTION_OFF:{ GST_INFO ("Error correction off"); if (!gst_asf_demux_skip_bytes (stream_specific_size, &data, &size)) goto not_enough_data; break; } default: GST_ELEMENT_ERROR (demux, STREAM, DEMUX, (NULL), ("Audio stream using unknown error correction")); return NULL; } break; } case ASF_STREAM_VIDEO:{ asf_stream_video_format video_format_object; asf_stream_video video_object; guint16 vsize; if (!gst_asf_demux_get_stream_video (&video_object, &data, &size)) goto not_enough_data; vsize = video_object.size - 40; /* Byte order gets offset by single byte */ GST_INFO ("object is a video stream with %u bytes of " "additional data", vsize); if (!gst_asf_demux_get_stream_video_format (&video_format_object, &data, &size)) { goto not_enough_data; } stream = gst_asf_demux_add_video_stream (demux, &video_format_object, stream_id, &data, &size); break; } default: GST_WARNING_OBJECT (demux, "Unknown stream type for stream %u", stream_id); demux->other_streams = g_slist_append (demux->other_streams, GINT_TO_POINTER (stream_id)); break; } if (stream) stream->inspect_payload = inspect_payload; return stream; not_enough_data: { GST_WARNING_OBJECT (demux, "Unexpected end of data parsing stream object"); /* we'll error out later if we found no streams */ return NULL; } } static const gchar * gst_asf_demux_get_gst_tag_from_tag_name (const gchar * name_utf8) { const struct { const gchar *asf_name; const gchar *gst_name; } tags[] = { { "WM/Genre", GST_TAG_GENRE}, { "WM/AlbumTitle", GST_TAG_ALBUM}, { "WM/AlbumArtist", GST_TAG_ARTIST}, { "WM/Picture", GST_TAG_IMAGE}, { "WM/Track", GST_TAG_TRACK_NUMBER}, { "WM/TrackNumber", GST_TAG_TRACK_NUMBER}, { "WM/Year", GST_TAG_DATE_TIME} /* { "WM/Composer", GST_TAG_COMPOSER } */ }; gsize out; guint i; if (name_utf8 == NULL) { GST_WARNING ("Failed to convert name to UTF8, skipping"); return NULL; } out = strlen (name_utf8); for (i = 0; i < G_N_ELEMENTS (tags); ++i) { if (strncmp (tags[i].asf_name, name_utf8, out) == 0) { GST_LOG ("map tagname '%s' -> '%s'", name_utf8, tags[i].gst_name); return tags[i].gst_name; } } return NULL; } /* gst_asf_demux_add_global_tags() takes ownership of taglist! */ static void gst_asf_demux_add_global_tags (GstASFDemux * demux, GstTagList * taglist) { GstTagList *t; GST_DEBUG_OBJECT (demux, "adding global tags: %" GST_PTR_FORMAT, taglist); if (taglist == NULL) return; if (gst_tag_list_is_empty (taglist)) { gst_tag_list_unref (taglist); return; } t = gst_tag_list_merge (demux->taglist, taglist, GST_TAG_MERGE_APPEND); gst_tag_list_set_scope (t, GST_TAG_SCOPE_GLOBAL); if (demux->taglist) gst_tag_list_unref (demux->taglist); gst_tag_list_unref (taglist); demux->taglist = t; GST_LOG_OBJECT (demux, "global tags now: %" GST_PTR_FORMAT, demux->taglist); } #define ASF_DEMUX_DATA_TYPE_UTF16LE_STRING 0 #define ASF_DEMUX_DATA_TYPE_BYTE_ARRAY 1 #define ASF_DEMUX_DATA_TYPE_BOOL 2 #define ASF_DEMUX_DATA_TYPE_DWORD 3 static void asf_demux_parse_picture_tag (GstTagList * tags, const guint8 * tag_data, guint tag_data_len) { GstByteReader r; const guint8 *img_data = NULL; guint32 img_data_len = 0; guint8 pic_type = 0; gst_byte_reader_init (&r, tag_data, tag_data_len); /* skip mime type string (we don't trust it and do our own typefinding), * and also skip the description string, since we don't use it */ if (!gst_byte_reader_get_uint8 (&r, &pic_type) || !gst_byte_reader_get_uint32_le (&r, &img_data_len) || !gst_byte_reader_skip_string_utf16 (&r) || !gst_byte_reader_skip_string_utf16 (&r) || !gst_byte_reader_get_data (&r, img_data_len, &img_data)) { goto not_enough_data; } if (!gst_tag_list_add_id3_image (tags, img_data, img_data_len, pic_type)) GST_DEBUG ("failed to add image extracted from WM/Picture tag to taglist"); return; not_enough_data: { GST_DEBUG ("Failed to read WM/Picture tag: not enough data"); GST_MEMDUMP ("WM/Picture data", tag_data, tag_data_len); return; } } /* Extended Content Description Object */ static GstFlowReturn gst_asf_demux_process_ext_content_desc (GstASFDemux * demux, guint8 * data, guint64 size) { /* Other known (and unused) 'text/unicode' metadata available : * * WM/Lyrics = * WM/MediaPrimaryClassID = {D1607DBC-E323-4BE2-86A1-48A42A28441E} * WMFSDKVersion = 9.00.00.2980 * WMFSDKNeeded = 0.0.0.0000 * WM/UniqueFileIdentifier = AMGa_id=R 15334;AMGp_id=P 5149;AMGt_id=T 2324984 * WM/Publisher = 4AD * WM/Provider = AMG * WM/ProviderRating = 8 * WM/ProviderStyle = Rock (similar to WM/Genre) * WM/GenreID (similar to WM/Genre) * WM/TrackNumber (same as WM/Track but as a string) * * Other known (and unused) 'non-text' metadata available : * * WM/EncodingTime * WM/MCDI * IsVBR * * We might want to read WM/TrackNumber and use atoi() if we don't have * WM/Track */ GstTagList *taglist; guint16 blockcount, i; gboolean content3D = FALSE; struct { const gchar *interleave_name; GstASF3DMode interleaving_type; } stereoscopic_layout_map[] = { { "SideBySideRF", GST_ASF_3D_SIDE_BY_SIDE_HALF_RL}, { "SideBySideLF", GST_ASF_3D_SIDE_BY_SIDE_HALF_LR}, { "OverUnderRT", GST_ASF_3D_TOP_AND_BOTTOM_HALF_RL}, { "OverUnderLT", GST_ASF_3D_TOP_AND_BOTTOM_HALF_LR}, { "DualStream", GST_ASF_3D_DUAL_STREAM} }; GST_INFO_OBJECT (demux, "object is an extended content description"); taglist = gst_tag_list_new_empty (); /* Content Descriptor Count */ if (size < 2) goto not_enough_data; blockcount = gst_asf_demux_get_uint16 (&data, &size); for (i = 1; i <= blockcount; ++i) { const gchar *gst_tag_name; guint16 datatype; guint16 value_len; guint16 name_len; GValue tag_value = { 0, }; gsize in, out; gchar *name; gchar *name_utf8 = NULL; gchar *value; /* Descriptor */ if (!gst_asf_demux_get_string (&name, &name_len, &data, &size)) goto not_enough_data; if (size < 2) { g_free (name); goto not_enough_data; } /* Descriptor Value Data Type */ datatype = gst_asf_demux_get_uint16 (&data, &size); /* Descriptor Value (not really a string, but same thing reading-wise) */ if (!gst_asf_demux_get_string (&value, &value_len, &data, &size)) { g_free (name); goto not_enough_data; } name_utf8 = g_convert (name, name_len, "UTF-8", "UTF-16LE", &in, &out, NULL); if (name_utf8 != NULL) { GST_DEBUG ("Found tag/metadata %s", name_utf8); gst_tag_name = gst_asf_demux_get_gst_tag_from_tag_name (name_utf8); GST_DEBUG ("gst_tag_name %s", GST_STR_NULL (gst_tag_name)); switch (datatype) { case ASF_DEMUX_DATA_TYPE_UTF16LE_STRING:{ gchar *value_utf8; value_utf8 = g_convert (value, value_len, "UTF-8", "UTF-16LE", &in, &out, NULL); /* get rid of tags with empty value */ if (value_utf8 != NULL && *value_utf8 != '\0') { GST_DEBUG ("string value %s", value_utf8); value_utf8[out] = '\0'; if (gst_tag_name != NULL) { if (strcmp (gst_tag_name, GST_TAG_DATE_TIME) == 0) { guint year = atoi (value_utf8); if (year > 0) { g_value_init (&tag_value, GST_TYPE_DATE_TIME); g_value_take_boxed (&tag_value, gst_date_time_new_y (year)); } } else if (strcmp (gst_tag_name, GST_TAG_GENRE) == 0) { guint id3v1_genre_id; const gchar *genre_str; if (sscanf (value_utf8, "(%u)", &id3v1_genre_id) == 1 && ((genre_str = gst_tag_id3_genre_get (id3v1_genre_id)))) { GST_DEBUG ("Genre: %s -> %s", value_utf8, genre_str); g_free (value_utf8); value_utf8 = g_strdup (genre_str); } } else { GType tag_type; /* convert tag from string to other type if required */ tag_type = gst_tag_get_type (gst_tag_name); g_value_init (&tag_value, tag_type); if (!gst_value_deserialize (&tag_value, value_utf8)) { GValue from_val = { 0, }; g_value_init (&from_val, G_TYPE_STRING); g_value_set_string (&from_val, value_utf8); if (!g_value_transform (&from_val, &tag_value)) { GST_WARNING_OBJECT (demux, "Could not transform string tag to " "%s tag type %s", gst_tag_name, g_type_name (tag_type)); g_value_unset (&tag_value); } g_value_unset (&from_val); } } } else { /* metadata ! */ GST_DEBUG ("Setting metadata"); g_value_init (&tag_value, G_TYPE_STRING); g_value_set_string (&tag_value, value_utf8); /* If we found a stereoscopic marker, look for StereoscopicLayout * metadata */ if (content3D) { guint i; if (strncmp ("StereoscopicLayout", name_utf8, strlen (name_utf8)) == 0) { for (i = 0; i < G_N_ELEMENTS (stereoscopic_layout_map); i++) { if (g_str_equal (stereoscopic_layout_map[i].interleave_name, value_utf8)) { demux->asf_3D_mode = stereoscopic_layout_map[i].interleaving_type; GST_INFO ("find interleave type %u", demux->asf_3D_mode); } } } GST_INFO_OBJECT (demux, "3d type is %u", demux->asf_3D_mode); } else { demux->asf_3D_mode = GST_ASF_3D_NONE; GST_INFO_OBJECT (demux, "None 3d type"); } } } else if (value_utf8 == NULL) { GST_WARNING ("Failed to convert string value to UTF8, skipping"); } else { GST_DEBUG ("Skipping empty string value for %s", GST_STR_NULL (gst_tag_name)); } g_free (value_utf8); break; } case ASF_DEMUX_DATA_TYPE_BYTE_ARRAY:{ if (gst_tag_name) { if (!g_str_equal (gst_tag_name, GST_TAG_IMAGE)) { GST_FIXME ("Unhandled byte array tag %s", GST_STR_NULL (gst_tag_name)); break; } else { asf_demux_parse_picture_tag (taglist, (guint8 *) value, value_len); } } break; } case ASF_DEMUX_DATA_TYPE_DWORD:{ guint uint_val = GST_READ_UINT32_LE (value); /* this is the track number */ g_value_init (&tag_value, G_TYPE_UINT); /* WM/Track counts from 0 */ if (!strcmp (name_utf8, "WM/Track")) ++uint_val; g_value_set_uint (&tag_value, uint_val); break; } /* Detect 3D */ case ASF_DEMUX_DATA_TYPE_BOOL:{ gboolean bool_val = GST_READ_UINT32_LE (value); if (strncmp ("Stereoscopic", name_utf8, strlen (name_utf8)) == 0) { if (bool_val) { GST_INFO_OBJECT (demux, "This is 3D contents"); content3D = TRUE; } else { GST_INFO_OBJECT (demux, "This is not 3D contenst"); content3D = FALSE; } } break; } default:{ GST_DEBUG ("Skipping tag %s of type %d", gst_tag_name, datatype); break; } } if (G_IS_VALUE (&tag_value)) { if (gst_tag_name) { GstTagMergeMode merge_mode = GST_TAG_MERGE_APPEND; /* WM/TrackNumber is more reliable than WM/Track, since the latter * is supposed to have a 0 base but is often wrongly written to start * from 1 as well, so prefer WM/TrackNumber when we have it: either * replace the value added earlier from WM/Track or put it first in * the list, so that it will get picked up by _get_uint() */ if (strcmp (name_utf8, "WM/TrackNumber") == 0) merge_mode = GST_TAG_MERGE_REPLACE; gst_tag_list_add_values (taglist, merge_mode, gst_tag_name, &tag_value, NULL); } else { GST_DEBUG ("Setting global metadata %s", name_utf8); gst_structure_set_value (demux->global_metadata, name_utf8, &tag_value); } g_value_unset (&tag_value); } } g_free (name); g_free (value); g_free (name_utf8); } gst_asf_demux_add_global_tags (demux, taglist); return GST_FLOW_OK; /* Errors */ not_enough_data: { GST_WARNING ("Unexpected end of data parsing ext content desc object"); gst_tag_list_unref (taglist); return GST_FLOW_OK; /* not really fatal */ } } static GstStructure * gst_asf_demux_get_metadata_for_stream (GstASFDemux * demux, guint stream_num) { gchar sname[32]; guint i; g_snprintf (sname, sizeof (sname), "stream-%u", stream_num); for (i = 0; i < gst_caps_get_size (demux->metadata); ++i) { GstStructure *s; s = gst_caps_get_structure (demux->metadata, i); if (gst_structure_has_name (s, sname)) return s; } gst_caps_append_structure (demux->metadata, gst_structure_new_empty (sname)); /* try lookup again; demux->metadata took ownership of the structure, so we * can't really make any assumptions about what happened to it, so we can't * just return it directly after appending it */ return gst_asf_demux_get_metadata_for_stream (demux, stream_num); } static GstFlowReturn gst_asf_demux_process_metadata (GstASFDemux * demux, guint8 * data, guint64 size) { guint16 blockcount, i; GST_INFO_OBJECT (demux, "object is a metadata object"); /* Content Descriptor Count */ if (size < 2) goto not_enough_data; blockcount = gst_asf_demux_get_uint16 (&data, &size); for (i = 0; i < blockcount; ++i) { GstStructure *s; guint16 stream_num, name_len, data_type, lang_idx G_GNUC_UNUSED; guint32 data_len, ival; gchar *name_utf8; if (size < (2 + 2 + 2 + 2 + 4)) goto not_enough_data; lang_idx = gst_asf_demux_get_uint16 (&data, &size); stream_num = gst_asf_demux_get_uint16 (&data, &size); name_len = gst_asf_demux_get_uint16 (&data, &size); data_type = gst_asf_demux_get_uint16 (&data, &size); data_len = gst_asf_demux_get_uint32 (&data, &size); if (size < name_len + data_len) goto not_enough_data; /* convert name to UTF-8 */ name_utf8 = g_convert ((gchar *) data, name_len, "UTF-8", "UTF-16LE", NULL, NULL, NULL); gst_asf_demux_skip_bytes (name_len, &data, &size); if (name_utf8 == NULL) { GST_WARNING ("Failed to convert value name to UTF8, skipping"); gst_asf_demux_skip_bytes (data_len, &data, &size); continue; } if (data_type != ASF_DEMUX_DATA_TYPE_DWORD) { gst_asf_demux_skip_bytes (data_len, &data, &size); g_free (name_utf8); continue; } /* read DWORD */ if (size < 4) { g_free (name_utf8); goto not_enough_data; } ival = gst_asf_demux_get_uint32 (&data, &size); /* skip anything else there may be, just in case */ gst_asf_demux_skip_bytes (data_len - 4, &data, &size); s = gst_asf_demux_get_metadata_for_stream (demux, stream_num); gst_structure_set (s, name_utf8, G_TYPE_INT, ival, NULL); g_free (name_utf8); } GST_INFO_OBJECT (demux, "metadata = %" GST_PTR_FORMAT, demux->metadata); return GST_FLOW_OK; /* Errors */ not_enough_data: { GST_WARNING ("Unexpected end of data parsing metadata object"); return GST_FLOW_OK; /* not really fatal */ } } static GstFlowReturn gst_asf_demux_process_header (GstASFDemux * demux, guint8 * data, guint64 size) { GstFlowReturn ret = GST_FLOW_OK; guint32 i, num_objects; guint8 unknown G_GNUC_UNUSED; /* Get the rest of the header's header */ if (size < (4 + 1 + 1)) goto not_enough_data; num_objects = gst_asf_demux_get_uint32 (&data, &size); unknown = gst_asf_demux_get_uint8 (&data, &size); unknown = gst_asf_demux_get_uint8 (&data, &size); GST_INFO_OBJECT (demux, "object is a header with %u parts", num_objects); demux->saw_file_header = FALSE; /* Loop through the header's objects, processing those */ for (i = 0; i < num_objects; ++i) { GST_INFO_OBJECT (demux, "reading header part %u", i); ret = gst_asf_demux_process_object (demux, &data, &size); if (ret != GST_FLOW_OK) { GST_WARNING ("process_object returned %s", gst_asf_get_flow_name (ret)); break; } } if (!demux->saw_file_header) { GST_ELEMENT_ERROR (demux, STREAM, DEMUX, (NULL), ("Header does not have mandatory FILE section")); return GST_FLOW_ERROR; } return ret; not_enough_data: { GST_ELEMENT_ERROR (demux, STREAM, DEMUX, (NULL), ("short read parsing HEADER object")); return GST_FLOW_ERROR; } } static GstFlowReturn gst_asf_demux_process_file (GstASFDemux * demux, guint8 * data, guint64 size) { guint64 creation_time G_GNUC_UNUSED; guint64 file_size G_GNUC_UNUSED; guint64 send_time G_GNUC_UNUSED; guint64 packets_count, play_time, preroll; guint32 flags, min_pktsize, max_pktsize, min_bitrate G_GNUC_UNUSED; if (size < (16 + 8 + 8 + 8 + 8 + 8 + 8 + 4 + 4 + 4 + 4)) goto not_enough_data; gst_asf_demux_skip_bytes (16, &data, &size); /* skip GUID */ file_size = gst_asf_demux_get_uint64 (&data, &size); creation_time = gst_asf_demux_get_uint64 (&data, &size); packets_count = gst_asf_demux_get_uint64 (&data, &size); play_time = gst_asf_demux_get_uint64 (&data, &size); send_time = gst_asf_demux_get_uint64 (&data, &size); preroll = gst_asf_demux_get_uint64 (&data, &size); flags = gst_asf_demux_get_uint32 (&data, &size); min_pktsize = gst_asf_demux_get_uint32 (&data, &size); max_pktsize = gst_asf_demux_get_uint32 (&data, &size); min_bitrate = gst_asf_demux_get_uint32 (&data, &size); demux->broadcast = ! !(flags & 0x01); demux->seekable = ! !(flags & 0x02); GST_DEBUG_OBJECT (demux, "min_pktsize = %u", min_pktsize); GST_DEBUG_OBJECT (demux, "flags::broadcast = %d", demux->broadcast); GST_DEBUG_OBJECT (demux, "flags::seekable = %d", demux->seekable); if (demux->broadcast) { /* these fields are invalid if the broadcast flag is set */ play_time = 0; file_size = 0; } if (min_pktsize != max_pktsize) goto non_fixed_packet_size; demux->packet_size = max_pktsize; /* FIXME: do we need send_time as well? what is it? */ if ((play_time * 100) >= (preroll * GST_MSECOND)) demux->play_time = (play_time * 100) - (preroll * GST_MSECOND); else demux->play_time = 0; demux->preroll = preroll * GST_MSECOND; /* initial latency */ demux->latency = demux->preroll; if (demux->play_time == 0) demux->seekable = FALSE; GST_DEBUG_OBJECT (demux, "play_time %" GST_TIME_FORMAT, GST_TIME_ARGS (demux->play_time)); GST_DEBUG_OBJECT (demux, "preroll %" GST_TIME_FORMAT, GST_TIME_ARGS (demux->preroll)); if (demux->play_time > 0) { demux->segment.duration = demux->play_time; } GST_INFO ("object is a file with %" G_GUINT64_FORMAT " data packets", packets_count); GST_INFO ("preroll = %" G_GUINT64_FORMAT, demux->preroll); demux->saw_file_header = TRUE; return GST_FLOW_OK; /* ERRORS */ non_fixed_packet_size: { GST_ELEMENT_ERROR (demux, STREAM, DEMUX, (NULL), ("packet size must be fixed")); return GST_FLOW_ERROR; } not_enough_data: { GST_ELEMENT_ERROR (demux, STREAM, DEMUX, (NULL), ("short read parsing FILE object")); return GST_FLOW_ERROR; } } /* Content Description Object */ static GstFlowReturn gst_asf_demux_process_comment (GstASFDemux * demux, guint8 * data, guint64 size) { struct { const gchar *gst_tag; guint16 val_length; gchar *val_utf8; } tags[5] = { { GST_TAG_TITLE, 0, NULL}, { GST_TAG_ARTIST, 0, NULL}, { GST_TAG_COPYRIGHT, 0, NULL}, { GST_TAG_DESCRIPTION, 0, NULL}, { GST_TAG_COMMENT, 0, NULL} }; GstTagList *taglist; GValue value = { 0 }; gsize in, out; gint i = -1; GST_INFO_OBJECT (demux, "object is a comment"); if (size < (2 + 2 + 2 + 2 + 2)) goto not_enough_data; tags[0].val_length = gst_asf_demux_get_uint16 (&data, &size); tags[1].val_length = gst_asf_demux_get_uint16 (&data, &size); tags[2].val_length = gst_asf_demux_get_uint16 (&data, &size); tags[3].val_length = gst_asf_demux_get_uint16 (&data, &size); tags[4].val_length = gst_asf_demux_get_uint16 (&data, &size); GST_DEBUG_OBJECT (demux, "Comment lengths: title=%d author=%d copyright=%d " "description=%d rating=%d", tags[0].val_length, tags[1].val_length, tags[2].val_length, tags[3].val_length, tags[4].val_length); for (i = 0; i < G_N_ELEMENTS (tags); ++i) { if (size < tags[i].val_length) goto not_enough_data; /* might be just '/0', '/0'... */ if (tags[i].val_length > 2 && tags[i].val_length % 2 == 0) { /* convert to UTF-8 */ tags[i].val_utf8 = g_convert ((gchar *) data, tags[i].val_length, "UTF-8", "UTF-16LE", &in, &out, NULL); } gst_asf_demux_skip_bytes (tags[i].val_length, &data, &size); } /* parse metadata into taglist */ taglist = gst_tag_list_new_empty (); g_value_init (&value, G_TYPE_STRING); for (i = 0; i < G_N_ELEMENTS (tags); ++i) { if (tags[i].val_utf8 && strlen (tags[i].val_utf8) > 0 && tags[i].gst_tag) { g_value_set_string (&value, tags[i].val_utf8); gst_tag_list_add_values (taglist, GST_TAG_MERGE_APPEND, tags[i].gst_tag, &value, NULL); } } g_value_unset (&value); gst_asf_demux_add_global_tags (demux, taglist); for (i = 0; i < G_N_ELEMENTS (tags); ++i) g_free (tags[i].val_utf8); return GST_FLOW_OK; not_enough_data: { GST_WARNING_OBJECT (demux, "unexpectedly short of data while processing " "comment tag section %d, skipping comment object", i); for (i = 0; i < G_N_ELEMENTS (tags); i++) g_free (tags[i].val_utf8); return GST_FLOW_OK; /* not really fatal */ } } static GstFlowReturn gst_asf_demux_process_bitrate_props_object (GstASFDemux * demux, guint8 * data, guint64 size) { guint16 num_streams, i; AsfStream *stream; if (size < 2) goto not_enough_data; num_streams = gst_asf_demux_get_uint16 (&data, &size); GST_INFO ("object is a bitrate properties object with %u streams", num_streams); if (size < (num_streams * (2 + 4))) goto not_enough_data; for (i = 0; i < num_streams; ++i) { guint32 bitrate; guint16 stream_id; stream_id = gst_asf_demux_get_uint16 (&data, &size); bitrate = gst_asf_demux_get_uint32 (&data, &size); if (stream_id < GST_ASF_DEMUX_NUM_STREAM_IDS) { GST_DEBUG_OBJECT (demux, "bitrate of stream %u = %u", stream_id, bitrate); stream = gst_asf_demux_get_stream (demux, stream_id); if (stream) { if (stream->pending_tags == NULL) stream->pending_tags = gst_tag_list_new_empty (); gst_tag_list_add (stream->pending_tags, GST_TAG_MERGE_REPLACE, GST_TAG_BITRATE, bitrate, NULL); } else { GST_WARNING_OBJECT (demux, "Stream id %u wasn't found", stream_id); } } else { GST_WARNING ("stream id %u is too large", stream_id); } } return GST_FLOW_OK; not_enough_data: { GST_WARNING_OBJECT (demux, "short read parsing bitrate props object!"); return GST_FLOW_OK; /* not really fatal */ } } static GstFlowReturn gst_asf_demux_process_header_ext (GstASFDemux * demux, guint8 * data, guint64 size) { GstFlowReturn ret = GST_FLOW_OK; guint64 hdr_size; /* Get the rest of the header's header */ if (size < (16 + 2 + 4)) goto not_enough_data; /* skip GUID and two other bytes */ gst_asf_demux_skip_bytes (16 + 2, &data, &size); hdr_size = gst_asf_demux_get_uint32 (&data, &size); GST_INFO ("extended header object with a size of %u bytes", (guint) size); /* FIXME: does data_size include the rest of the header that we have read? */ if (hdr_size > size) goto not_enough_data; while (hdr_size > 0) { ret = gst_asf_demux_process_object (demux, &data, &hdr_size); if (ret != GST_FLOW_OK) break; } return ret; not_enough_data: { GST_ELEMENT_ERROR (demux, STREAM, DEMUX, (NULL), ("short read parsing extended header object")); return GST_FLOW_ERROR; } } static GstFlowReturn gst_asf_demux_process_language_list (GstASFDemux * demux, guint8 * data, guint64 size) { guint i; if (size < 2) goto not_enough_data; if (demux->languages) { GST_WARNING ("More than one LANGUAGE_LIST object in stream"); g_strfreev (demux->languages); demux->languages = NULL; demux->num_languages = 0; } demux->num_languages = gst_asf_demux_get_uint16 (&data, &size); GST_LOG ("%u languages:", demux->num_languages); demux->languages = g_new0 (gchar *, demux->num_languages + 1); for (i = 0; i < demux->num_languages; ++i) { guint8 len, *lang_data = NULL; if (size < 1) goto not_enough_data; len = gst_asf_demux_get_uint8 (&data, &size); if (gst_asf_demux_get_bytes (&lang_data, len, &data, &size)) { gchar *utf8; utf8 = g_convert ((gchar *) lang_data, len, "UTF-8", "UTF-16LE", NULL, NULL, NULL); /* truncate "en-us" etc. to just "en" */ if (utf8 && strlen (utf8) >= 5 && (utf8[2] == '-' || utf8[2] == '_')) { utf8[2] = '\0'; } GST_DEBUG ("[%u] %s", i, GST_STR_NULL (utf8)); demux->languages[i] = utf8; g_free (lang_data); } else { goto not_enough_data; } } return GST_FLOW_OK; not_enough_data: { GST_WARNING_OBJECT (demux, "short read parsing language list object!"); g_free (demux->languages); demux->languages = NULL; demux->num_languages = 0; return GST_FLOW_OK; /* not fatal */ } } static GstFlowReturn gst_asf_demux_process_simple_index (GstASFDemux * demux, guint8 * data, guint64 size) { GstClockTime interval; guint32 count, i; if (size < (16 + 8 + 4 + 4)) goto not_enough_data; /* skip file id */ gst_asf_demux_skip_bytes (16, &data, &size); interval = gst_asf_demux_get_uint64 (&data, &size) * (GstClockTime) 100; gst_asf_demux_skip_bytes (4, &data, &size); count = gst_asf_demux_get_uint32 (&data, &size); if (count > 0) { demux->sidx_interval = interval; demux->sidx_num_entries = count; g_free (demux->sidx_entries); demux->sidx_entries = g_new0 (AsfSimpleIndexEntry, count); for (i = 0; i < count; ++i) { if (G_UNLIKELY (size < 6)) { /* adjust for broken files, to avoid having entries at the end * of the parsed index that point to time=0. Resulting in seeking to * the end of the file leading back to the beginning */ demux->sidx_num_entries -= (count - i); break; } demux->sidx_entries[i].packet = gst_asf_demux_get_uint32 (&data, &size); demux->sidx_entries[i].count = gst_asf_demux_get_uint16 (&data, &size); GST_LOG_OBJECT (demux, "%" GST_TIME_FORMAT " = packet %4u count : %2d", GST_TIME_ARGS (i * interval), demux->sidx_entries[i].packet, demux->sidx_entries[i].count); } } else { GST_DEBUG_OBJECT (demux, "simple index object with 0 entries"); } return GST_FLOW_OK; not_enough_data: { GST_WARNING_OBJECT (demux, "short read parsing simple index object!"); return GST_FLOW_OK; /* not fatal */ } } static GstFlowReturn gst_asf_demux_process_advanced_mutual_exclusion (GstASFDemux * demux, guint8 * data, guint64 size) { ASFGuid guid; guint16 num, i; if (size < 16 + 2 + (2 * 2)) goto not_enough_data; gst_asf_demux_get_guid (&guid, &data, &size); num = gst_asf_demux_get_uint16 (&data, &size); if (num < 2) { GST_WARNING_OBJECT (demux, "nonsensical mutually exclusive streams count"); return GST_FLOW_OK; } if (size < (num * sizeof (guint16))) goto not_enough_data; /* read mutually exclusive stream numbers */ for (i = 0; i < num; ++i) { guint8 mes; mes = gst_asf_demux_get_uint16 (&data, &size) & 0x7f; GST_LOG_OBJECT (demux, "mutually exclusive: stream %d", mes); demux->mut_ex_streams = g_slist_append (demux->mut_ex_streams, GINT_TO_POINTER (mes)); } return GST_FLOW_OK; /* Errors */ not_enough_data: { GST_WARNING_OBJECT (demux, "short read parsing advanced mutual exclusion"); return GST_FLOW_OK; /* not absolutely fatal */ } } gboolean gst_asf_demux_is_unknown_stream (GstASFDemux * demux, guint stream_num) { return g_slist_find (demux->other_streams, GINT_TO_POINTER (stream_num)) == NULL; } static GstFlowReturn gst_asf_demux_process_ext_stream_props (GstASFDemux * demux, guint8 * data, guint64 size) { AsfStreamExtProps esp; AsfStream *stream = NULL; AsfObject stream_obj; guint16 stream_name_count; guint16 num_payload_ext; guint64 len; guint8 *stream_obj_data = NULL; guint8 *data_start; guint obj_size; guint i, stream_num; data_start = data; obj_size = (guint) size; esp.payload_extensions = NULL; if (size < 64) goto not_enough_data; esp.valid = TRUE; esp.start_time = gst_asf_demux_get_uint64 (&data, &size) * GST_MSECOND; esp.end_time = gst_asf_demux_get_uint64 (&data, &size) * GST_MSECOND; esp.data_bitrate = gst_asf_demux_get_uint32 (&data, &size); esp.buffer_size = gst_asf_demux_get_uint32 (&data, &size); esp.intial_buf_fullness = gst_asf_demux_get_uint32 (&data, &size); esp.data_bitrate2 = gst_asf_demux_get_uint32 (&data, &size); esp.buffer_size2 = gst_asf_demux_get_uint32 (&data, &size); esp.intial_buf_fullness2 = gst_asf_demux_get_uint32 (&data, &size); esp.max_obj_size = gst_asf_demux_get_uint32 (&data, &size); esp.flags = gst_asf_demux_get_uint32 (&data, &size); stream_num = gst_asf_demux_get_uint16 (&data, &size); esp.lang_idx = gst_asf_demux_get_uint16 (&data, &size); esp.avg_time_per_frame = gst_asf_demux_get_uint64 (&data, &size); stream_name_count = gst_asf_demux_get_uint16 (&data, &size); num_payload_ext = gst_asf_demux_get_uint16 (&data, &size); GST_INFO ("start_time = %" GST_TIME_FORMAT, GST_TIME_ARGS (esp.start_time)); GST_INFO ("end_time = %" GST_TIME_FORMAT, GST_TIME_ARGS (esp.end_time)); GST_INFO ("flags = %08x", esp.flags); GST_INFO ("average time per frame = %" GST_TIME_FORMAT, GST_TIME_ARGS (esp.avg_time_per_frame * 100)); GST_INFO ("stream number = %u", stream_num); GST_INFO ("stream language ID idx = %u (%s)", esp.lang_idx, (esp.lang_idx < demux->num_languages) ? GST_STR_NULL (demux->languages[esp.lang_idx]) : "??"); GST_INFO ("stream name count = %u", stream_name_count); /* read stream names */ for (i = 0; i < stream_name_count; ++i) { guint16 stream_lang_idx G_GNUC_UNUSED; gchar *stream_name = NULL; if (size < 2) goto not_enough_data; stream_lang_idx = gst_asf_demux_get_uint16 (&data, &size); if (!gst_asf_demux_get_string (&stream_name, NULL, &data, &size)) goto not_enough_data; GST_INFO ("stream name %d: %s", i, GST_STR_NULL (stream_name)); g_free (stream_name); /* TODO: store names in struct */ } /* read payload extension systems stuff */ GST_LOG ("payload extension systems count = %u", num_payload_ext); if (num_payload_ext > 0) esp.payload_extensions = g_new0 (AsfPayloadExtension, num_payload_ext + 1); for (i = 0; i < num_payload_ext; ++i) { AsfPayloadExtension ext; ASFGuid ext_guid; guint32 sys_info_len; if (size < 16 + 2 + 4) goto not_enough_data; gst_asf_demux_get_guid (&ext_guid, &data, &size); ext.id = gst_asf_demux_identify_guid (asf_payload_ext_guids, &ext_guid); ext.len = gst_asf_demux_get_uint16 (&data, &size); sys_info_len = gst_asf_demux_get_uint32 (&data, &size); GST_LOG ("payload systems info len = %u", sys_info_len); if (!gst_asf_demux_skip_bytes (sys_info_len, &data, &size)) goto not_enough_data; esp.payload_extensions[i] = ext; } GST_LOG ("bytes read: %u/%u", (guint) (data - data_start), obj_size); /* there might be an optional STREAM_INFO object here now; if not, we * should have parsed the corresponding stream info object already (since * we are parsing the extended stream properties objects delayed) */ if (size == 0) { stream = gst_asf_demux_get_stream (demux, stream_num); goto done; } if (size < ASF_OBJECT_HEADER_SIZE) goto not_enough_data; /* get size of the stream object */ if (!asf_demux_peek_object (demux, data, size, &stream_obj, TRUE)) goto corrupted_stream; if (stream_obj.id != ASF_OBJ_STREAM) goto expected_stream_object; if (stream_obj.size < ASF_OBJECT_HEADER_SIZE || stream_obj.size > (10 * 1024 * 1024)) goto not_enough_data; gst_asf_demux_skip_bytes (ASF_OBJECT_HEADER_SIZE, &data, &size); /* process this stream object later after all the other 'normal' ones * have been processed (since the others are more important/non-hidden) */ len = stream_obj.size - ASF_OBJECT_HEADER_SIZE; if (!gst_asf_demux_get_bytes (&stream_obj_data, len, &data, &size)) goto not_enough_data; /* parse stream object */ stream = gst_asf_demux_parse_stream_object (demux, stream_obj_data, len); g_free (stream_obj_data); done: if (stream) { stream->ext_props = esp; /* try to set the framerate */ if (stream->is_video && stream->caps) { GValue framerate = { 0 }; GstStructure *s; gint num, denom; g_value_init (&framerate, GST_TYPE_FRACTION); num = GST_SECOND / 100; denom = esp.avg_time_per_frame; if (denom == 0) { /* avoid division by 0, assume 25/1 framerate */ denom = GST_SECOND / 2500; } gst_value_set_fraction (&framerate, num, denom); stream->caps = gst_caps_make_writable (stream->caps); s = gst_caps_get_structure (stream->caps, 0); gst_structure_set_value (s, "framerate", &framerate); g_value_unset (&framerate); GST_DEBUG_OBJECT (demux, "setting framerate of %d/%d = %f", num, denom, ((gdouble) num) / denom); } /* add language info now if we have it */ if (stream->ext_props.lang_idx < demux->num_languages) { if (stream->pending_tags == NULL) stream->pending_tags = gst_tag_list_new_empty (); GST_LOG_OBJECT (demux, "stream %u has language '%s'", stream->id, demux->languages[stream->ext_props.lang_idx]); gst_tag_list_add (stream->pending_tags, GST_TAG_MERGE_APPEND, GST_TAG_LANGUAGE_CODE, demux->languages[stream->ext_props.lang_idx], NULL); } } else if (gst_asf_demux_is_unknown_stream (demux, stream_num)) { GST_WARNING_OBJECT (demux, "Ext. stream properties for unknown stream"); } if (!stream) g_free (esp.payload_extensions); return GST_FLOW_OK; /* Errors */ not_enough_data: { GST_WARNING_OBJECT (demux, "short read parsing ext stream props object!"); g_free (esp.payload_extensions); return GST_FLOW_OK; /* not absolutely fatal */ } expected_stream_object: { GST_WARNING_OBJECT (demux, "error parsing extended stream properties " "object: expected embedded stream object, but got %s object instead!", gst_asf_get_guid_nick (asf_object_guids, stream_obj.id)); g_free (esp.payload_extensions); return GST_FLOW_OK; /* not absolutely fatal */ } corrupted_stream: { GST_WARNING_OBJECT (demux, "Corrupted stream"); g_free (esp.payload_extensions); return GST_FLOW_ERROR; } } static const gchar * gst_asf_demux_push_obj (GstASFDemux * demux, guint32 obj_id) { const gchar *nick; nick = gst_asf_get_guid_nick (asf_object_guids, obj_id); if (g_str_has_prefix (nick, "ASF_OBJ_")) nick += strlen ("ASF_OBJ_"); if (demux->objpath == NULL) { demux->objpath = g_strdup (nick); } else { gchar *newpath; newpath = g_strdup_printf ("%s/%s", demux->objpath, nick); g_free (demux->objpath); demux->objpath = newpath; } return (const gchar *) demux->objpath; } static void gst_asf_demux_pop_obj (GstASFDemux * demux) { gchar *s; if ((s = g_strrstr (demux->objpath, "/"))) { *s = '\0'; } else { g_free (demux->objpath); demux->objpath = NULL; } } static void gst_asf_demux_process_queued_extended_stream_objects (GstASFDemux * demux) { GSList *l; guint i; /* Parse the queued extended stream property objects and add the info * to the existing streams or add the new embedded streams, but without * activating them yet */ GST_LOG_OBJECT (demux, "%u queued extended stream properties objects", g_slist_length (demux->ext_stream_props)); for (l = demux->ext_stream_props, i = 0; l != NULL; l = l->next, ++i) { GstBuffer *buf = GST_BUFFER (l->data); GstMapInfo map; gst_buffer_map (buf, &map, GST_MAP_READ); GST_LOG_OBJECT (demux, "parsing ext. stream properties object #%u", i); gst_asf_demux_process_ext_stream_props (demux, map.data, map.size); gst_buffer_unmap (buf, &map); gst_buffer_unref (buf); } g_slist_free (demux->ext_stream_props); demux->ext_stream_props = NULL; } #if 0 static void gst_asf_demux_activate_ext_props_streams (GstASFDemux * demux) { guint i, j; for (i = 0; i < demux->num_streams; ++i) { AsfStream *stream; gboolean is_hidden; GSList *x; stream = &demux->stream[i]; GST_LOG_OBJECT (demux, "checking stream %2u", stream->id); if (stream->active) { GST_LOG_OBJECT (demux, "stream %2u is already activated", stream->id); continue; } is_hidden = FALSE; for (x = demux->mut_ex_streams; x != NULL; x = x->next) { guint8 *mes; /* check for each mutual exclusion whether it affects this stream */ for (mes = (guint8 *) x->data; mes != NULL && *mes != 0xff; ++mes) { if (*mes == stream->id) { /* if yes, check if we've already added streams that are mutually * exclusive with the stream we're about to add */ for (mes = (guint8 *) x->data; mes != NULL && *mes != 0xff; ++mes) { for (j = 0; j < demux->num_streams; ++j) { /* if the broadcast flag is set, assume the hidden streams aren't * actually streamed and hide them (or playbin won't work right), * otherwise assume their data is available */ if (demux->stream[j].id == *mes && demux->broadcast) { is_hidden = TRUE; GST_LOG_OBJECT (demux, "broadcast stream ID %d to be added is " "mutually exclusive with already existing stream ID %d, " "hiding stream", stream->id, demux->stream[j].id); goto next; } } } break; } } } next: /* FIXME: we should do stream activation based on preroll data in * streaming mode too */ if (demux->streaming && !is_hidden) gst_asf_demux_activate_stream (demux, stream); } } #endif static GstFlowReturn gst_asf_demux_process_object (GstASFDemux * demux, guint8 ** p_data, guint64 * p_size) { GstFlowReturn ret = GST_FLOW_OK; AsfObject obj; guint64 obj_data_size; if (*p_size < ASF_OBJECT_HEADER_SIZE) return ASF_FLOW_NEED_MORE_DATA; if (!asf_demux_peek_object (demux, *p_data, ASF_OBJECT_HEADER_SIZE, &obj, TRUE)) return GST_FLOW_ERROR; gst_asf_demux_skip_bytes (ASF_OBJECT_HEADER_SIZE, p_data, p_size); obj_data_size = obj.size - ASF_OBJECT_HEADER_SIZE; if (*p_size < obj_data_size) return ASF_FLOW_NEED_MORE_DATA; gst_asf_demux_push_obj (demux, obj.id); GST_INFO ("%s: size %" G_GUINT64_FORMAT, demux->objpath, obj.size); switch (obj.id) { case ASF_OBJ_STREAM: gst_asf_demux_parse_stream_object (demux, *p_data, obj_data_size); ret = GST_FLOW_OK; break; case ASF_OBJ_FILE: ret = gst_asf_demux_process_file (demux, *p_data, obj_data_size); break; case ASF_OBJ_HEADER: ret = gst_asf_demux_process_header (demux, *p_data, obj_data_size); break; case ASF_OBJ_COMMENT: ret = gst_asf_demux_process_comment (demux, *p_data, obj_data_size); break; case ASF_OBJ_HEAD1: ret = gst_asf_demux_process_header_ext (demux, *p_data, obj_data_size); break; case ASF_OBJ_BITRATE_PROPS: ret = gst_asf_demux_process_bitrate_props_object (demux, *p_data, obj_data_size); break; case ASF_OBJ_EXT_CONTENT_DESC: ret = gst_asf_demux_process_ext_content_desc (demux, *p_data, obj_data_size); break; case ASF_OBJ_METADATA_OBJECT: ret = gst_asf_demux_process_metadata (demux, *p_data, obj_data_size); break; case ASF_OBJ_EXTENDED_STREAM_PROPS:{ GstBuffer *buf; /* process these later, we might not have parsed the corresponding * stream object yet */ GST_LOG ("%s: queued for later parsing", demux->objpath); buf = gst_buffer_new_and_alloc (obj_data_size); gst_buffer_fill (buf, 0, *p_data, obj_data_size); demux->ext_stream_props = g_slist_append (demux->ext_stream_props, buf); ret = GST_FLOW_OK; break; } case ASF_OBJ_LANGUAGE_LIST: ret = gst_asf_demux_process_language_list (demux, *p_data, obj_data_size); break; case ASF_OBJ_ADVANCED_MUTUAL_EXCLUSION: ret = gst_asf_demux_process_advanced_mutual_exclusion (demux, *p_data, obj_data_size); break; case ASF_OBJ_SIMPLE_INDEX: ret = gst_asf_demux_process_simple_index (demux, *p_data, obj_data_size); break; case ASF_OBJ_CONTENT_ENCRYPTION: case ASF_OBJ_EXT_CONTENT_ENCRYPTION: case ASF_OBJ_DIGITAL_SIGNATURE_OBJECT: case ASF_OBJ_UNKNOWN_ENCRYPTION_OBJECT: goto error_encrypted; case ASF_OBJ_CONCEAL_NONE: case ASF_OBJ_HEAD2: case ASF_OBJ_UNDEFINED: case ASF_OBJ_CODEC_COMMENT: case ASF_OBJ_INDEX: case ASF_OBJ_PADDING: case ASF_OBJ_BITRATE_MUTEX: case ASF_OBJ_COMPATIBILITY: case ASF_OBJ_INDEX_PLACEHOLDER: case ASF_OBJ_INDEX_PARAMETERS: case ASF_OBJ_STREAM_PRIORITIZATION: case ASF_OBJ_SCRIPT_COMMAND: case ASF_OBJ_METADATA_LIBRARY_OBJECT: default: /* Unknown/unhandled object, skip it and hope for the best */ GST_INFO ("%s: skipping object", demux->objpath); ret = GST_FLOW_OK; break; } /* this can't fail, we checked the number of bytes available before */ gst_asf_demux_skip_bytes (obj_data_size, p_data, p_size); GST_LOG ("%s: ret = %s", demux->objpath, gst_asf_get_flow_name (ret)); gst_asf_demux_pop_obj (demux); return ret; /* ERRORS */ error_encrypted: { GST_ELEMENT_ERROR (demux, STREAM, DECRYPT, (NULL), (NULL)); return GST_FLOW_ERROR; } } static void gst_asf_demux_descramble_buffer (GstASFDemux * demux, AsfStream * stream, GstBuffer ** p_buffer) { GstBuffer *descrambled_buffer; GstBuffer *scrambled_buffer; GstBuffer *sub_buffer; guint offset; guint off; guint row; guint col; guint idx; /* descrambled_buffer is initialised in the first iteration */ descrambled_buffer = NULL; scrambled_buffer = *p_buffer; if (gst_buffer_get_size (scrambled_buffer) < stream->ds_packet_size * stream->span) return; for (offset = 0; offset < gst_buffer_get_size (scrambled_buffer); offset += stream->ds_chunk_size) { off = offset / stream->ds_chunk_size; row = off / stream->span; col = off % stream->span; idx = row + col * stream->ds_packet_size / stream->ds_chunk_size; GST_DEBUG ("idx=%u, row=%u, col=%u, off=%u, ds_chunk_size=%u", idx, row, col, off, stream->ds_chunk_size); GST_DEBUG ("scrambled buffer size=%" G_GSIZE_FORMAT ", span=%u, packet_size=%u", gst_buffer_get_size (scrambled_buffer), stream->span, stream->ds_packet_size); GST_DEBUG ("gst_buffer_get_size (scrambled_buffer) = %" G_GSIZE_FORMAT, gst_buffer_get_size (scrambled_buffer)); sub_buffer = gst_buffer_copy_region (scrambled_buffer, GST_BUFFER_COPY_MEMORY, idx * stream->ds_chunk_size, stream->ds_chunk_size); if (!offset) { descrambled_buffer = sub_buffer; } else { descrambled_buffer = gst_buffer_append (descrambled_buffer, sub_buffer); } } GST_BUFFER_TIMESTAMP (descrambled_buffer) = GST_BUFFER_TIMESTAMP (scrambled_buffer); GST_BUFFER_DURATION (descrambled_buffer) = GST_BUFFER_DURATION (scrambled_buffer); GST_BUFFER_OFFSET (descrambled_buffer) = GST_BUFFER_OFFSET (scrambled_buffer); GST_BUFFER_OFFSET_END (descrambled_buffer) = GST_BUFFER_OFFSET_END (scrambled_buffer); /* FIXME/CHECK: do we need to transfer buffer flags here too? */ gst_buffer_unref (scrambled_buffer); *p_buffer = descrambled_buffer; } static gboolean gst_asf_demux_element_send_event (GstElement * element, GstEvent * event) { GstASFDemux *demux = GST_ASF_DEMUX (element); gint i; GST_DEBUG ("handling element event of type %s", GST_EVENT_TYPE_NAME (event)); for (i = 0; i < demux->num_streams; ++i) { gst_event_ref (event); if (gst_asf_demux_handle_src_event (demux->stream[i].pad, GST_OBJECT_CAST (element), event)) { gst_event_unref (event); return TRUE; } } gst_event_unref (event); return FALSE; } /* takes ownership of the passed event */ static gboolean gst_asf_demux_send_event_unlocked (GstASFDemux * demux, GstEvent * event) { gboolean ret = TRUE; gint i; GST_DEBUG_OBJECT (demux, "sending %s event to all source pads", GST_EVENT_TYPE_NAME (event)); for (i = 0; i < demux->num_streams; ++i) { gst_event_ref (event); ret &= gst_pad_push_event (demux->stream[i].pad, event); } gst_event_unref (event); return ret; } static gboolean gst_asf_demux_handle_src_query (GstPad * pad, GstObject * parent, GstQuery * query) { GstASFDemux *demux; gboolean res = FALSE; demux = GST_ASF_DEMUX (parent); GST_DEBUG ("handling %s query", gst_query_type_get_name (GST_QUERY_TYPE (query))); switch (GST_QUERY_TYPE (query)) { case GST_QUERY_DURATION: { GstFormat format; gst_query_parse_duration (query, &format, NULL); if (format != GST_FORMAT_TIME) { GST_LOG ("only support duration queries in TIME format"); break; } res = gst_pad_query_default (pad, parent, query); if (!res) { GST_OBJECT_LOCK (demux); if (demux->segment.duration != GST_CLOCK_TIME_NONE) { GST_LOG ("returning duration: %" GST_TIME_FORMAT, GST_TIME_ARGS (demux->segment.duration)); gst_query_set_duration (query, GST_FORMAT_TIME, demux->segment.duration); res = TRUE; } else { GST_LOG ("duration not known yet"); } GST_OBJECT_UNLOCK (demux); } break; } case GST_QUERY_POSITION:{ GstFormat format; gst_query_parse_position (query, &format, NULL); if (format != GST_FORMAT_TIME) { GST_LOG ("only support position queries in TIME format"); break; } GST_OBJECT_LOCK (demux); if (demux->segment.position != GST_CLOCK_TIME_NONE) { GST_LOG ("returning position: %" GST_TIME_FORMAT, GST_TIME_ARGS (demux->segment.position)); gst_query_set_position (query, GST_FORMAT_TIME, demux->segment.position); res = TRUE; } else { GST_LOG ("position not known yet"); } GST_OBJECT_UNLOCK (demux); break; } case GST_QUERY_SEEKING:{ GstFormat format; gst_query_parse_seeking (query, &format, NULL, NULL, NULL); if (format == GST_FORMAT_TIME) { gint64 duration; GST_OBJECT_LOCK (demux); duration = demux->segment.duration; GST_OBJECT_UNLOCK (demux); if (!demux->streaming || !demux->seekable) { gst_query_set_seeking (query, GST_FORMAT_TIME, demux->seekable, 0, duration); res = TRUE; } else { GstFormat fmt; gboolean seekable; /* try upstream first in TIME */ res = gst_pad_query_default (pad, parent, query); gst_query_parse_seeking (query, &fmt, &seekable, NULL, NULL); GST_LOG_OBJECT (demux, "upstream %s seekable %d", GST_STR_NULL (gst_format_get_name (fmt)), seekable); /* if no luck, maybe in BYTES */ if (!seekable || fmt != GST_FORMAT_TIME) { GstQuery *q; q = gst_query_new_seeking (GST_FORMAT_BYTES); if ((res = gst_pad_peer_query (demux->sinkpad, q))) { gst_query_parse_seeking (q, &fmt, &seekable, NULL, NULL); GST_LOG_OBJECT (demux, "upstream %s seekable %d", GST_STR_NULL (gst_format_get_name (fmt)), seekable); if (fmt != GST_FORMAT_BYTES) seekable = FALSE; } gst_query_unref (q); gst_query_set_seeking (query, GST_FORMAT_TIME, seekable, 0, duration); res = TRUE; } } } else GST_LOG_OBJECT (demux, "only support seeking in TIME format"); break; } case GST_QUERY_LATENCY: { gboolean live; GstClockTime min, max; /* preroll delay does not matter in non-live pipeline, * but we might end up in a live (rtsp) one ... */ /* first forward */ res = gst_pad_query_default (pad, parent, query); if (!res) break; gst_query_parse_latency (query, &live, &min, &max); GST_DEBUG_OBJECT (demux, "Peer latency: live %d, min %" GST_TIME_FORMAT " max %" GST_TIME_FORMAT, live, GST_TIME_ARGS (min), GST_TIME_ARGS (max)); GST_OBJECT_LOCK (demux); min += demux->latency; if (max != -1) max += demux->latency; GST_OBJECT_UNLOCK (demux); gst_query_set_latency (query, live, min, max); break; } case GST_QUERY_SEGMENT: { GstFormat format; gint64 start, stop; format = demux->segment.format; start = gst_segment_to_stream_time (&demux->segment, format, demux->segment.start); if ((stop = demux->segment.stop) == -1) stop = demux->segment.duration; else stop = gst_segment_to_stream_time (&demux->segment, format, stop); gst_query_set_segment (query, demux->segment.rate, format, start, stop); res = TRUE; break; } default: res = gst_pad_query_default (pad, parent, query); break; } return res; } static GstStateChangeReturn gst_asf_demux_change_state (GstElement * element, GstStateChange transition) { GstASFDemux *demux = GST_ASF_DEMUX (element); GstStateChangeReturn ret = GST_STATE_CHANGE_SUCCESS; switch (transition) { case GST_STATE_CHANGE_NULL_TO_READY:{ gst_segment_init (&demux->segment, GST_FORMAT_TIME); demux->need_newsegment = TRUE; demux->segment_running = FALSE; demux->keyunit_sync = FALSE; demux->accurate = FALSE; demux->adapter = gst_adapter_new (); demux->metadata = gst_caps_new_empty (); demux->global_metadata = gst_structure_new_empty ("metadata"); demux->data_size = 0; demux->data_offset = 0; demux->index_offset = 0; demux->base_offset = 0; demux->flowcombiner = gst_flow_combiner_new (); break; } default: break; } ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition); if (ret == GST_STATE_CHANGE_FAILURE) return ret; switch (transition) { case GST_STATE_CHANGE_PAUSED_TO_READY: gst_asf_demux_reset (demux, FALSE); break; case GST_STATE_CHANGE_READY_TO_NULL: gst_asf_demux_reset (demux, FALSE); gst_flow_combiner_free (demux->flowcombiner); demux->flowcombiner = NULL; break; default: break; } return ret; }
/* GStreamer ASF/WMV/WMA demuxer * Copyright (C) 1999 Erik Walthinsen <omega@cse.ogi.edu> * Copyright (C) 2006-2009 Tim-Philipp Müller <tim centricular net> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, * Boston, MA 02110-1301, USA. */ /* TODO: * * - _loop(): * stop if at end of segment if != end of file, ie. demux->segment.stop * * - fix packet parsing: * there's something wrong with timestamps for packets with keyframes, * and durations too. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <gst/gstutils.h> #include <gst/base/gstbytereader.h> #include <gst/base/gsttypefindhelper.h> #include <gst/riff/riff-media.h> #include <gst/tag/tag.h> #include <gst/gst-i18n-plugin.h> #include <gst/video/video.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include "gstasfdemux.h" #include "asfheaders.h" #include "asfpacket.h" static GstStaticPadTemplate gst_asf_demux_sink_template = GST_STATIC_PAD_TEMPLATE ("sink", GST_PAD_SINK, GST_PAD_ALWAYS, GST_STATIC_CAPS ("video/x-ms-asf") ); static GstStaticPadTemplate audio_src_template = GST_STATIC_PAD_TEMPLATE ("audio_%u", GST_PAD_SRC, GST_PAD_SOMETIMES, GST_STATIC_CAPS_ANY); static GstStaticPadTemplate video_src_template = GST_STATIC_PAD_TEMPLATE ("video_%u", GST_PAD_SRC, GST_PAD_SOMETIMES, GST_STATIC_CAPS_ANY); /* size of an ASF object header, ie. GUID (16 bytes) + object size (8 bytes) */ #define ASF_OBJECT_HEADER_SIZE (16+8) /* FIXME: get rid of this */ /* abuse this GstFlowReturn enum for internal usage */ #define ASF_FLOW_NEED_MORE_DATA 99 #define gst_asf_get_flow_name(flow) \ (flow == ASF_FLOW_NEED_MORE_DATA) ? \ "need-more-data" : gst_flow_get_name (flow) GST_DEBUG_CATEGORY (asfdemux_dbg); static GstStateChangeReturn gst_asf_demux_change_state (GstElement * element, GstStateChange transition); static gboolean gst_asf_demux_element_send_event (GstElement * element, GstEvent * event); static gboolean gst_asf_demux_send_event_unlocked (GstASFDemux * demux, GstEvent * event); static gboolean gst_asf_demux_handle_src_query (GstPad * pad, GstObject * parent, GstQuery * query); static GstFlowReturn gst_asf_demux_chain (GstPad * pad, GstObject * parent, GstBuffer * buf); static gboolean gst_asf_demux_sink_event (GstPad * pad, GstObject * parent, GstEvent * event); static GstFlowReturn gst_asf_demux_process_object (GstASFDemux * demux, guint8 ** p_data, guint64 * p_size); static gboolean gst_asf_demux_activate (GstPad * sinkpad, GstObject * parent); static gboolean gst_asf_demux_activate_mode (GstPad * sinkpad, GstObject * parent, GstPadMode mode, gboolean active); static void gst_asf_demux_loop (GstASFDemux * demux); static void gst_asf_demux_process_queued_extended_stream_objects (GstASFDemux * demux); static gboolean gst_asf_demux_pull_headers (GstASFDemux * demux, GstFlowReturn * pflow); static GstFlowReturn gst_asf_demux_pull_indices (GstASFDemux * demux); static void gst_asf_demux_reset_stream_state_after_discont (GstASFDemux * asf); static gboolean gst_asf_demux_parse_data_object_start (GstASFDemux * demux, guint8 * data); static void gst_asf_demux_descramble_buffer (GstASFDemux * demux, AsfStream * stream, GstBuffer ** p_buffer); static void gst_asf_demux_activate_stream (GstASFDemux * demux, AsfStream * stream); static GstStructure *gst_asf_demux_get_metadata_for_stream (GstASFDemux * d, guint stream_num); static GstFlowReturn gst_asf_demux_push_complete_payloads (GstASFDemux * demux, gboolean force); #define gst_asf_demux_parent_class parent_class G_DEFINE_TYPE (GstASFDemux, gst_asf_demux, GST_TYPE_ELEMENT); static void gst_asf_demux_class_init (GstASFDemuxClass * klass) { GstElementClass *gstelement_class; gstelement_class = (GstElementClass *) klass; gst_element_class_set_static_metadata (gstelement_class, "ASF Demuxer", "Codec/Demuxer", "Demultiplexes ASF Streams", "Owen Fraser-Green <owen@discobabe.net>"); gst_element_class_add_static_pad_template (gstelement_class, &audio_src_template); gst_element_class_add_static_pad_template (gstelement_class, &video_src_template); gst_element_class_add_static_pad_template (gstelement_class, &gst_asf_demux_sink_template); gstelement_class->change_state = GST_DEBUG_FUNCPTR (gst_asf_demux_change_state); gstelement_class->send_event = GST_DEBUG_FUNCPTR (gst_asf_demux_element_send_event); } static void gst_asf_demux_free_stream (GstASFDemux * demux, AsfStream * stream) { gst_caps_replace (&stream->caps, NULL); if (stream->pending_tags) { gst_tag_list_unref (stream->pending_tags); stream->pending_tags = NULL; } if (stream->streamheader) { gst_buffer_unref (stream->streamheader); stream->streamheader = NULL; } if (stream->pad) { if (stream->active) { gst_element_remove_pad (GST_ELEMENT_CAST (demux), stream->pad); gst_flow_combiner_remove_pad (demux->flowcombiner, stream->pad); } else gst_object_unref (stream->pad); stream->pad = NULL; } if (stream->payloads) { while (stream->payloads->len > 0) { AsfPayload *payload; guint last; last = stream->payloads->len - 1; payload = &g_array_index (stream->payloads, AsfPayload, last); gst_buffer_replace (&payload->buf, NULL); g_array_remove_index (stream->payloads, last); } g_array_free (stream->payloads, TRUE); stream->payloads = NULL; } if (stream->payloads_rev) { while (stream->payloads_rev->len > 0) { AsfPayload *payload; guint last; last = stream->payloads_rev->len - 1; payload = &g_array_index (stream->payloads_rev, AsfPayload, last); gst_buffer_replace (&payload->buf, NULL); g_array_remove_index (stream->payloads_rev, last); } g_array_free (stream->payloads_rev, TRUE); stream->payloads_rev = NULL; } if (stream->ext_props.valid) { g_free (stream->ext_props.payload_extensions); stream->ext_props.payload_extensions = NULL; } } static void gst_asf_demux_reset (GstASFDemux * demux, gboolean chain_reset) { GST_LOG_OBJECT (demux, "resetting"); gst_segment_init (&demux->segment, GST_FORMAT_UNDEFINED); demux->segment_running = FALSE; if (demux->adapter && !chain_reset) { gst_adapter_clear (demux->adapter); g_object_unref (demux->adapter); demux->adapter = NULL; } if (demux->taglist) { gst_tag_list_unref (demux->taglist); demux->taglist = NULL; } if (demux->metadata) { gst_caps_unref (demux->metadata); demux->metadata = NULL; } if (demux->global_metadata) { gst_structure_free (demux->global_metadata); demux->global_metadata = NULL; } if (demux->mut_ex_streams) { g_slist_free (demux->mut_ex_streams); demux->mut_ex_streams = NULL; } demux->state = GST_ASF_DEMUX_STATE_HEADER; g_free (demux->objpath); demux->objpath = NULL; g_strfreev (demux->languages); demux->languages = NULL; demux->num_languages = 0; g_slist_foreach (demux->ext_stream_props, (GFunc) gst_mini_object_unref, NULL); g_slist_free (demux->ext_stream_props); demux->ext_stream_props = NULL; while (demux->old_num_streams > 0) { gst_asf_demux_free_stream (demux, &demux->old_stream[demux->old_num_streams - 1]); --demux->old_num_streams; } memset (demux->old_stream, 0, sizeof (demux->old_stream)); demux->old_num_streams = 0; /* when resetting for a new chained asf, we don't want to remove the pads * before adding the new ones */ if (chain_reset) { memcpy (demux->old_stream, demux->stream, sizeof (demux->stream)); demux->old_num_streams = demux->num_streams; demux->num_streams = 0; } while (demux->num_streams > 0) { gst_asf_demux_free_stream (demux, &demux->stream[demux->num_streams - 1]); --demux->num_streams; } memset (demux->stream, 0, sizeof (demux->stream)); if (!chain_reset) { /* do not remove those for not adding pads with same name */ demux->num_audio_streams = 0; demux->num_video_streams = 0; demux->have_group_id = FALSE; demux->group_id = G_MAXUINT; } demux->num_streams = 0; demux->activated_streams = FALSE; demux->first_ts = GST_CLOCK_TIME_NONE; demux->segment_ts = GST_CLOCK_TIME_NONE; demux->in_gap = 0; if (!chain_reset) gst_segment_init (&demux->in_segment, GST_FORMAT_UNDEFINED); demux->state = GST_ASF_DEMUX_STATE_HEADER; demux->seekable = FALSE; demux->broadcast = FALSE; demux->sidx_interval = 0; demux->sidx_num_entries = 0; g_free (demux->sidx_entries); demux->sidx_entries = NULL; demux->speed_packets = 1; demux->asf_3D_mode = GST_ASF_3D_NONE; if (chain_reset) { GST_LOG_OBJECT (demux, "Restarting"); gst_segment_init (&demux->segment, GST_FORMAT_TIME); demux->need_newsegment = TRUE; demux->segment_seqnum = 0; demux->segment_running = FALSE; demux->keyunit_sync = FALSE; demux->accurate = FALSE; demux->metadata = gst_caps_new_empty (); demux->global_metadata = gst_structure_new_empty ("metadata"); demux->data_size = 0; demux->data_offset = 0; demux->index_offset = 0; } else { demux->base_offset = 0; } g_slist_free (demux->other_streams); demux->other_streams = NULL; } static void gst_asf_demux_init (GstASFDemux * demux) { demux->sinkpad = gst_pad_new_from_static_template (&gst_asf_demux_sink_template, "sink"); gst_pad_set_chain_function (demux->sinkpad, GST_DEBUG_FUNCPTR (gst_asf_demux_chain)); gst_pad_set_event_function (demux->sinkpad, GST_DEBUG_FUNCPTR (gst_asf_demux_sink_event)); gst_pad_set_activate_function (demux->sinkpad, GST_DEBUG_FUNCPTR (gst_asf_demux_activate)); gst_pad_set_activatemode_function (demux->sinkpad, GST_DEBUG_FUNCPTR (gst_asf_demux_activate_mode)); gst_element_add_pad (GST_ELEMENT (demux), demux->sinkpad); /* set initial state */ gst_asf_demux_reset (demux, FALSE); } static gboolean gst_asf_demux_activate (GstPad * sinkpad, GstObject * parent) { GstQuery *query; gboolean pull_mode; query = gst_query_new_scheduling (); if (!gst_pad_peer_query (sinkpad, query)) { gst_query_unref (query); goto activate_push; } pull_mode = gst_query_has_scheduling_mode_with_flags (query, GST_PAD_MODE_PULL, GST_SCHEDULING_FLAG_SEEKABLE); gst_query_unref (query); if (!pull_mode) goto activate_push; GST_DEBUG_OBJECT (sinkpad, "activating pull"); return gst_pad_activate_mode (sinkpad, GST_PAD_MODE_PULL, TRUE); activate_push: { GST_DEBUG_OBJECT (sinkpad, "activating push"); return gst_pad_activate_mode (sinkpad, GST_PAD_MODE_PUSH, TRUE); } } static gboolean gst_asf_demux_activate_mode (GstPad * sinkpad, GstObject * parent, GstPadMode mode, gboolean active) { gboolean res; GstASFDemux *demux; demux = GST_ASF_DEMUX (parent); switch (mode) { case GST_PAD_MODE_PUSH: demux->state = GST_ASF_DEMUX_STATE_HEADER; demux->streaming = TRUE; res = TRUE; break; case GST_PAD_MODE_PULL: if (active) { demux->state = GST_ASF_DEMUX_STATE_HEADER; demux->streaming = FALSE; res = gst_pad_start_task (sinkpad, (GstTaskFunction) gst_asf_demux_loop, demux, NULL); } else { res = gst_pad_stop_task (sinkpad); } break; default: res = FALSE; break; } return res; } static gboolean gst_asf_demux_sink_event (GstPad * pad, GstObject * parent, GstEvent * event) { GstASFDemux *demux; gboolean ret = TRUE; demux = GST_ASF_DEMUX (parent); GST_LOG_OBJECT (demux, "handling %s event", GST_EVENT_TYPE_NAME (event)); switch (GST_EVENT_TYPE (event)) { case GST_EVENT_SEGMENT:{ const GstSegment *segment; gst_event_parse_segment (event, &segment); if (segment->format == GST_FORMAT_BYTES) { if (demux->packet_size && segment->start > demux->data_offset) demux->packet = (segment->start - demux->data_offset) / demux->packet_size; else demux->packet = 0; } else if (segment->format == GST_FORMAT_TIME) { /* do not know packet position, not really a problem */ demux->packet = -1; } else { GST_WARNING_OBJECT (demux, "unsupported newsegment format, ignoring"); gst_event_unref (event); break; } /* record upstream segment for interpolation */ if (segment->format != demux->in_segment.format) gst_segment_init (&demux->in_segment, GST_FORMAT_UNDEFINED); gst_segment_copy_into (segment, &demux->in_segment); /* in either case, clear some state and generate newsegment later on */ GST_OBJECT_LOCK (demux); demux->segment_ts = GST_CLOCK_TIME_NONE; demux->in_gap = GST_CLOCK_TIME_NONE; demux->need_newsegment = TRUE; demux->segment_seqnum = gst_event_get_seqnum (event); gst_asf_demux_reset_stream_state_after_discont (demux); /* if we seek back after reaching EOS, go back to packet reading state */ if (demux->data_offset > 0 && segment->start >= demux->data_offset && demux->state == GST_ASF_DEMUX_STATE_INDEX) { demux->state = GST_ASF_DEMUX_STATE_DATA; } GST_OBJECT_UNLOCK (demux); gst_event_unref (event); break; } case GST_EVENT_EOS:{ GstFlowReturn flow; if (demux->state == GST_ASF_DEMUX_STATE_HEADER) { GST_ELEMENT_ERROR (demux, STREAM, DEMUX, (_("This stream contains no data.")), ("got eos and didn't receive a complete header object")); break; } flow = gst_asf_demux_push_complete_payloads (demux, TRUE); if (!demux->activated_streams) { /* If we still haven't got activated streams, the file is most likely corrupt */ GST_ELEMENT_ERROR (demux, STREAM, WRONG_TYPE, (_("This stream contains no data.")), ("got eos and didn't receive a complete header object")); break; } if (flow < GST_FLOW_EOS || flow == GST_FLOW_NOT_LINKED) { GST_ELEMENT_FLOW_ERROR (demux, flow); break; } GST_OBJECT_LOCK (demux); gst_adapter_clear (demux->adapter); GST_OBJECT_UNLOCK (demux); gst_asf_demux_send_event_unlocked (demux, event); break; } case GST_EVENT_FLUSH_STOP: GST_OBJECT_LOCK (demux); gst_asf_demux_reset_stream_state_after_discont (demux); GST_OBJECT_UNLOCK (demux); gst_asf_demux_send_event_unlocked (demux, event); /* upon activation, latency is no longer introduced, e.g. after seek */ if (demux->activated_streams) demux->latency = 0; break; default: ret = gst_pad_event_default (pad, parent, event); break; } return ret; } static gboolean gst_asf_demux_seek_index_lookup (GstASFDemux * demux, guint * packet, GstClockTime seek_time, GstClockTime * p_idx_time, guint * speed, gboolean next, gboolean * eos) { GstClockTime idx_time; guint idx; if (eos) *eos = FALSE; if (G_UNLIKELY (demux->sidx_num_entries == 0 || demux->sidx_interval == 0)) return FALSE; idx = (guint) ((seek_time + demux->preroll) / demux->sidx_interval); if (next) { /* if we want the next keyframe, we have to go forward till we find a different packet number */ guint idx2; if (idx >= demux->sidx_num_entries - 1) { /* If we get here, we're asking for next keyframe after the last one. There isn't one. */ if (eos) *eos = TRUE; return FALSE; } for (idx2 = idx + 1; idx2 < demux->sidx_num_entries; ++idx2) { if (demux->sidx_entries[idx].packet != demux->sidx_entries[idx2].packet) { idx = idx2; break; } } } if (G_UNLIKELY (idx >= demux->sidx_num_entries)) { if (eos) *eos = TRUE; return FALSE; } *packet = demux->sidx_entries[idx].packet; if (speed) *speed = demux->sidx_entries[idx].count; /* so we get closer to the actual time of the packet ... actually, let's not * do this, since we throw away superfluous payloads before the seek position * anyway; this way, our key unit seek 'snap resolution' is a bit better * (ie. same as index resolution) */ /* while (idx > 0 && demux->sidx_entries[idx-1] == demux->sidx_entries[idx]) --idx; */ idx_time = demux->sidx_interval * idx; if (G_LIKELY (idx_time >= demux->preroll)) idx_time -= demux->preroll; GST_DEBUG_OBJECT (demux, "%" GST_TIME_FORMAT " => packet %u at %" GST_TIME_FORMAT, GST_TIME_ARGS (seek_time), *packet, GST_TIME_ARGS (idx_time)); if (G_LIKELY (p_idx_time)) *p_idx_time = idx_time; return TRUE; } static void gst_asf_demux_reset_stream_state_after_discont (GstASFDemux * demux) { guint n; gst_adapter_clear (demux->adapter); GST_DEBUG_OBJECT (demux, "reset stream state"); gst_flow_combiner_reset (demux->flowcombiner); for (n = 0; n < demux->num_streams; n++) { demux->stream[n].discont = TRUE; demux->stream[n].first_buffer = TRUE; while (demux->stream[n].payloads->len > 0) { AsfPayload *payload; guint last; last = demux->stream[n].payloads->len - 1; payload = &g_array_index (demux->stream[n].payloads, AsfPayload, last); gst_buffer_replace (&payload->buf, NULL); g_array_remove_index (demux->stream[n].payloads, last); } } } static void gst_asf_demux_mark_discont (GstASFDemux * demux) { guint n; GST_DEBUG_OBJECT (demux, "Mark stream discont"); for (n = 0; n < demux->num_streams; n++) demux->stream[n].discont = TRUE; } /* do a seek in push based mode */ static gboolean gst_asf_demux_handle_seek_push (GstASFDemux * demux, GstEvent * event) { gdouble rate; GstFormat format; GstSeekFlags flags; GstSeekType cur_type, stop_type; gint64 cur, stop; guint packet; gboolean res; GstEvent *byte_event; gst_event_parse_seek (event, &rate, &format, &flags, &cur_type, &cur, &stop_type, &stop); stop_type = GST_SEEK_TYPE_NONE; stop = -1; GST_DEBUG_OBJECT (demux, "seeking to %" GST_TIME_FORMAT, GST_TIME_ARGS (cur)); /* determine packet, by index or by estimation */ if (!gst_asf_demux_seek_index_lookup (demux, &packet, cur, NULL, NULL, FALSE, NULL)) { packet = (guint) gst_util_uint64_scale (demux->num_packets, cur, demux->play_time); } if (packet > demux->num_packets) { GST_DEBUG_OBJECT (demux, "could not determine packet to seek to, " "seek aborted."); return FALSE; } GST_DEBUG_OBJECT (demux, "seeking to packet %d", packet); cur = demux->data_offset + ((guint64) packet * demux->packet_size); GST_DEBUG_OBJECT (demux, "Pushing BYTE seek rate %g, " "start %" G_GINT64_FORMAT ", stop %" G_GINT64_FORMAT, rate, cur, stop); /* BYTE seek event */ byte_event = gst_event_new_seek (rate, GST_FORMAT_BYTES, flags, cur_type, cur, stop_type, stop); gst_event_set_seqnum (byte_event, gst_event_get_seqnum (event)); res = gst_pad_push_event (demux->sinkpad, byte_event); return res; } static gboolean gst_asf_demux_handle_seek_event (GstASFDemux * demux, GstEvent * event) { GstClockTime idx_time; GstSegment segment; GstSeekFlags flags; GstSeekType cur_type, stop_type; GstFormat format; gboolean only_need_update; gboolean after, before, next; gboolean flush; gdouble rate; gint64 cur, stop; gint64 seek_time; guint packet, speed_count = 1; gboolean eos; guint32 seqnum; GstEvent *fevent; gint i; gst_event_parse_seek (event, &rate, &format, &flags, &cur_type, &cur, &stop_type, &stop); if (G_UNLIKELY (format != GST_FORMAT_TIME)) { GST_LOG_OBJECT (demux, "seeking is only supported in TIME format"); return FALSE; } /* upstream might handle TIME seek, e.g. mms or rtsp, or not, e.g. http, * so first try to let it handle the seek event. */ if (gst_pad_push_event (demux->sinkpad, gst_event_ref (event))) return TRUE; if (G_UNLIKELY (demux->seekable == FALSE || demux->packet_size == 0 || demux->num_packets == 0 || demux->play_time == 0)) { GST_LOG_OBJECT (demux, "stream is not seekable"); return FALSE; } if (G_UNLIKELY (!demux->activated_streams)) { GST_LOG_OBJECT (demux, "streams not yet activated, ignoring seek"); return FALSE; } if (G_UNLIKELY (rate <= 0.0)) { GST_LOG_OBJECT (demux, "backward playback"); demux->seek_to_cur_pos = TRUE; for (i = 0; i < demux->num_streams; i++) { demux->stream[i].reverse_kf_ready = FALSE; } } seqnum = gst_event_get_seqnum (event); flush = ((flags & GST_SEEK_FLAG_FLUSH) == GST_SEEK_FLAG_FLUSH); demux->accurate = ((flags & GST_SEEK_FLAG_ACCURATE) == GST_SEEK_FLAG_ACCURATE); demux->keyunit_sync = ((flags & GST_SEEK_FLAG_KEY_UNIT) == GST_SEEK_FLAG_KEY_UNIT); after = ((flags & GST_SEEK_FLAG_SNAP_AFTER) == GST_SEEK_FLAG_SNAP_AFTER); before = ((flags & GST_SEEK_FLAG_SNAP_BEFORE) == GST_SEEK_FLAG_SNAP_BEFORE); next = after && !before; if (G_UNLIKELY (demux->streaming)) { /* support it safely needs more segment handling, e.g. closing etc */ if (!flush) { GST_LOG_OBJECT (demux, "streaming; non-flushing seek not supported"); return FALSE; } /* we can (re)construct the start later on, but not the end */ if (stop_type != GST_SEEK_TYPE_NONE && (stop_type != GST_SEEK_TYPE_SET || GST_CLOCK_TIME_IS_VALID (stop))) { GST_LOG_OBJECT (demux, "streaming; end position must be NONE"); return FALSE; } return gst_asf_demux_handle_seek_push (demux, event); } /* unlock the streaming thread */ if (G_LIKELY (flush)) { fevent = gst_event_new_flush_start (); gst_event_set_seqnum (fevent, seqnum); gst_pad_push_event (demux->sinkpad, gst_event_ref (fevent)); gst_asf_demux_send_event_unlocked (demux, fevent); } else { gst_pad_pause_task (demux->sinkpad); } /* grab the stream lock so that streaming cannot continue, for * non flushing seeks when the element is in PAUSED this could block * forever */ GST_PAD_STREAM_LOCK (demux->sinkpad); /* we now can stop flushing, since we have the stream lock now */ fevent = gst_event_new_flush_stop (TRUE); gst_event_set_seqnum (fevent, seqnum); gst_pad_push_event (demux->sinkpad, gst_event_ref (fevent)); if (G_LIKELY (flush)) gst_asf_demux_send_event_unlocked (demux, fevent); else gst_event_unref (fevent); /* operating on copy of segment until we know the seek worked */ segment = demux->segment; if (G_UNLIKELY (demux->segment_running && !flush)) { GstSegment newsegment; GstEvent *newseg; /* create the segment event to close the current segment */ gst_segment_copy_into (&segment, &newsegment); newseg = gst_event_new_segment (&newsegment); gst_event_set_seqnum (newseg, seqnum); gst_asf_demux_send_event_unlocked (demux, newseg); } gst_segment_do_seek (&segment, rate, format, flags, cur_type, cur, stop_type, stop, &only_need_update); GST_DEBUG_OBJECT (demux, "seeking to time %" GST_TIME_FORMAT ", segment: " "%" GST_SEGMENT_FORMAT, GST_TIME_ARGS (segment.start), &segment); if (cur_type != GST_SEEK_TYPE_SET) seek_time = segment.start; else seek_time = cur; /* FIXME: should check the KEY_UNIT flag; need to adjust position to * real start of data and segment_start to indexed time for key unit seek*/ if (G_UNLIKELY (!gst_asf_demux_seek_index_lookup (demux, &packet, seek_time, &idx_time, &speed_count, next, &eos))) { gint64 offset; if (eos) { demux->packet = demux->num_packets; goto skip; } /* First try to query our source to see if it can convert for us. This is the case when our source is an mms stream, notice that in this case gstmms will do a time based seek to get the byte offset, this is not a problem as the seek to this offset needs to happen anway. */ if (gst_pad_peer_query_convert (demux->sinkpad, GST_FORMAT_TIME, seek_time, GST_FORMAT_BYTES, &offset)) { packet = (offset - demux->data_offset) / demux->packet_size; GST_LOG_OBJECT (demux, "convert %" GST_TIME_FORMAT " to bytes query result: %" G_GINT64_FORMAT ", data_ofset: %" G_GINT64_FORMAT ", packet_size: %u," " resulting packet: %u\n", GST_TIME_ARGS (seek_time), offset, demux->data_offset, demux->packet_size, packet); } else { /* FIXME: For streams containing video, seek to an earlier position in * the hope of hitting a keyframe and let the sinks throw away the stuff * before the segment start. For audio-only this is unnecessary as every * frame is 'key'. */ if (flush && (demux->accurate || (demux->keyunit_sync && !next)) && demux->num_video_streams > 0) { seek_time -= 5 * GST_SECOND; if (seek_time < 0) seek_time = 0; } packet = (guint) gst_util_uint64_scale (demux->num_packets, seek_time, demux->play_time); if (packet > demux->num_packets) packet = demux->num_packets; } } else { if (G_LIKELY (demux->keyunit_sync && !demux->accurate)) { GST_DEBUG_OBJECT (demux, "key unit seek, adjust seek_time = %" GST_TIME_FORMAT " to index_time = %" GST_TIME_FORMAT, GST_TIME_ARGS (seek_time), GST_TIME_ARGS (idx_time)); segment.start = idx_time; segment.position = idx_time; segment.time = idx_time; } } GST_DEBUG_OBJECT (demux, "seeking to packet %u (%d)", packet, speed_count); GST_OBJECT_LOCK (demux); demux->segment = segment; if (GST_ASF_DEMUX_IS_REVERSE_PLAYBACK (demux->segment)) { demux->packet = (gint64) gst_util_uint64_scale (demux->num_packets, stop, demux->play_time); } else { demux->packet = packet; } demux->need_newsegment = TRUE; demux->segment_seqnum = seqnum; demux->speed_packets = GST_ASF_DEMUX_IS_REVERSE_PLAYBACK (demux->segment) ? 1 : speed_count; gst_asf_demux_reset_stream_state_after_discont (demux); GST_OBJECT_UNLOCK (demux); skip: /* restart our task since it might have been stopped when we did the flush */ gst_pad_start_task (demux->sinkpad, (GstTaskFunction) gst_asf_demux_loop, demux, NULL); /* streaming can continue now */ GST_PAD_STREAM_UNLOCK (demux->sinkpad); return TRUE; } static gboolean gst_asf_demux_handle_src_event (GstPad * pad, GstObject * parent, GstEvent * event) { GstASFDemux *demux; gboolean ret; demux = GST_ASF_DEMUX (parent); switch (GST_EVENT_TYPE (event)) { case GST_EVENT_SEEK: GST_LOG_OBJECT (pad, "seek event"); ret = gst_asf_demux_handle_seek_event (demux, event); gst_event_unref (event); break; case GST_EVENT_QOS: case GST_EVENT_NAVIGATION: /* just drop these two silently */ gst_event_unref (event); ret = FALSE; break; default: GST_LOG_OBJECT (pad, "%s event", GST_EVENT_TYPE_NAME (event)); ret = gst_pad_event_default (pad, parent, event); break; } return ret; } static inline guint32 gst_asf_demux_identify_guid (const ASFGuidHash * guids, ASFGuid * guid) { guint32 ret; ret = gst_asf_identify_guid (guids, guid); GST_LOG ("%s 0x%08x-0x%08x-0x%08x-0x%08x", gst_asf_get_guid_nick (guids, ret), guid->v1, guid->v2, guid->v3, guid->v4); return ret; } typedef struct { AsfObjectID id; guint64 size; } AsfObject; /* Peek for an object. * * Returns FALSE is the object is corrupted (such as the reported * object size being greater than 2**32bits. */ static gboolean asf_demux_peek_object (GstASFDemux * demux, const guint8 * data, guint data_len, AsfObject * object, gboolean expect) { ASFGuid guid; /* Callers should have made sure that data_len is big enough */ g_assert (data_len >= ASF_OBJECT_HEADER_SIZE); if (data_len < ASF_OBJECT_HEADER_SIZE) return FALSE; guid.v1 = GST_READ_UINT32_LE (data + 0); guid.v2 = GST_READ_UINT32_LE (data + 4); guid.v3 = GST_READ_UINT32_LE (data + 8); guid.v4 = GST_READ_UINT32_LE (data + 12); /* FIXME: make asf_demux_identify_object_guid() */ object->id = gst_asf_demux_identify_guid (asf_object_guids, &guid); if (object->id == ASF_OBJ_UNDEFINED && expect) { GST_WARNING_OBJECT (demux, "Unknown object %08x-%08x-%08x-%08x", guid.v1, guid.v2, guid.v3, guid.v4); } object->size = GST_READ_UINT64_LE (data + 16); if (object->id != ASF_OBJ_DATA && object->size >= G_MAXUINT) { GST_WARNING_OBJECT (demux, "ASF Object size corrupted (greater than 32bit)"); return FALSE; } return TRUE; } static void gst_asf_demux_release_old_pads (GstASFDemux * demux) { GST_DEBUG_OBJECT (demux, "Releasing old pads"); while (demux->old_num_streams > 0) { gst_pad_push_event (demux->old_stream[demux->old_num_streams - 1].pad, gst_event_new_eos ()); gst_asf_demux_free_stream (demux, &demux->old_stream[demux->old_num_streams - 1]); --demux->old_num_streams; } memset (demux->old_stream, 0, sizeof (demux->old_stream)); demux->old_num_streams = 0; } static GstFlowReturn gst_asf_demux_chain_headers (GstASFDemux * demux) { AsfObject obj; guint8 *header_data, *data = NULL; const guint8 *cdata = NULL; guint64 header_size; GstFlowReturn flow = GST_FLOW_OK; cdata = (guint8 *) gst_adapter_map (demux->adapter, ASF_OBJECT_HEADER_SIZE); if (cdata == NULL) goto need_more_data; if (!asf_demux_peek_object (demux, cdata, ASF_OBJECT_HEADER_SIZE, &obj, TRUE)) goto parse_failed; if (obj.id != ASF_OBJ_HEADER) goto wrong_type; GST_LOG_OBJECT (demux, "header size = %u", (guint) obj.size); /* + 50 for non-packet data at beginning of ASF_OBJ_DATA */ if (gst_adapter_available (demux->adapter) < obj.size + 50) goto need_more_data; data = gst_adapter_take (demux->adapter, obj.size + 50); header_data = data; header_size = obj.size; flow = gst_asf_demux_process_object (demux, &header_data, &header_size); if (flow != GST_FLOW_OK) goto parse_failed; /* calculate where the packet data starts */ demux->data_offset = obj.size + 50; /* now parse the beginning of the ASF_OBJ_DATA object */ if (!gst_asf_demux_parse_data_object_start (demux, data + obj.size)) goto wrong_type; if (demux->num_streams == 0) goto no_streams; g_free (data); return GST_FLOW_OK; /* NON-FATAL */ need_more_data: { GST_LOG_OBJECT (demux, "not enough data in adapter yet"); return GST_FLOW_OK; } /* ERRORS */ wrong_type: { GST_ELEMENT_ERROR (demux, STREAM, WRONG_TYPE, (NULL), ("This doesn't seem to be an ASF file")); g_free (data); return GST_FLOW_ERROR; } no_streams: parse_failed: { GST_ELEMENT_ERROR (demux, STREAM, DEMUX, (NULL), ("header parsing failed, or no streams found, flow = %s", gst_flow_get_name (flow))); g_free (data); return GST_FLOW_ERROR; } } static gboolean gst_asf_demux_pull_data (GstASFDemux * demux, guint64 offset, guint size, GstBuffer ** p_buf, GstFlowReturn * p_flow) { gsize buffer_size; GstFlowReturn flow; GST_LOG_OBJECT (demux, "pulling buffer at %" G_GUINT64_FORMAT "+%u", offset, size); flow = gst_pad_pull_range (demux->sinkpad, offset, size, p_buf); if (G_LIKELY (p_flow)) *p_flow = flow; if (G_UNLIKELY (flow != GST_FLOW_OK)) { GST_DEBUG_OBJECT (demux, "flow %s pulling buffer at %" G_GUINT64_FORMAT "+%u", gst_flow_get_name (flow), offset, size); *p_buf = NULL; return FALSE; } g_assert (*p_buf != NULL); buffer_size = gst_buffer_get_size (*p_buf); if (G_UNLIKELY (buffer_size < size)) { GST_DEBUG_OBJECT (demux, "short read pulling buffer at %" G_GUINT64_FORMAT "+%u (got only %" G_GSIZE_FORMAT " bytes)", offset, size, buffer_size); gst_buffer_unref (*p_buf); if (G_LIKELY (p_flow)) *p_flow = GST_FLOW_EOS; *p_buf = NULL; return FALSE; } return TRUE; } static GstFlowReturn gst_asf_demux_pull_indices (GstASFDemux * demux) { GstBuffer *buf = NULL; guint64 offset; guint num_read = 0; GstFlowReturn ret = GST_FLOW_OK; offset = demux->index_offset; if (G_UNLIKELY (offset == 0)) { GST_DEBUG_OBJECT (demux, "can't read indices, don't know index offset"); /* non-fatal */ return GST_FLOW_OK; } while (gst_asf_demux_pull_data (demux, offset, 16 + 8, &buf, NULL)) { AsfObject obj; GstMapInfo map; guint8 *bufdata; guint64 obj_size; gst_buffer_map (buf, &map, GST_MAP_READ); g_assert (map.size >= 16 + 8); if (!asf_demux_peek_object (demux, map.data, 16 + 8, &obj, TRUE)) { gst_buffer_unmap (buf, &map); gst_buffer_replace (&buf, NULL); ret = GST_FLOW_ERROR; break; } gst_buffer_unmap (buf, &map); gst_buffer_replace (&buf, NULL); /* check for sanity */ if (G_UNLIKELY (obj.size > (5 * 1024 * 1024))) { GST_DEBUG_OBJECT (demux, "implausible index object size, bailing out"); break; } if (G_UNLIKELY (!gst_asf_demux_pull_data (demux, offset, obj.size, &buf, NULL))) break; GST_LOG_OBJECT (demux, "index object at offset 0x%" G_GINT64_MODIFIER "X" ", size %u", offset, (guint) obj.size); offset += obj.size; /* increase before _process_object changes it */ gst_buffer_map (buf, &map, GST_MAP_READ); g_assert (map.size >= obj.size); bufdata = (guint8 *) map.data; obj_size = obj.size; ret = gst_asf_demux_process_object (demux, &bufdata, &obj_size); gst_buffer_unmap (buf, &map); gst_buffer_replace (&buf, NULL); if (G_UNLIKELY (ret != GST_FLOW_OK)) break; ++num_read; } GST_DEBUG_OBJECT (demux, "read %u index objects", num_read); return ret; } static gboolean gst_asf_demux_parse_data_object_start (GstASFDemux * demux, guint8 * data) { AsfObject obj; if (!asf_demux_peek_object (demux, data, 50, &obj, TRUE)) { GST_WARNING_OBJECT (demux, "Corrupted data"); return FALSE; } if (obj.id != ASF_OBJ_DATA) { GST_WARNING_OBJECT (demux, "headers not followed by a DATA object"); return FALSE; } demux->state = GST_ASF_DEMUX_STATE_DATA; if (!demux->broadcast && obj.size > 50) { demux->data_size = obj.size - 50; /* CHECKME: for at least one file this is off by +158 bytes?! */ demux->index_offset = demux->data_offset + demux->data_size; } else { demux->data_size = 0; demux->index_offset = 0; } demux->packet = 0; if (!demux->broadcast) { /* skip object header (24 bytes) and file GUID (16 bytes) */ demux->num_packets = GST_READ_UINT64_LE (data + (16 + 8) + 16); } else { demux->num_packets = 0; } if (demux->num_packets == 0) demux->seekable = FALSE; /* fallback in the unlikely case that headers are inconsistent, can't hurt */ if (demux->data_size == 0 && demux->num_packets > 0) { demux->data_size = demux->num_packets * demux->packet_size; demux->index_offset = demux->data_offset + demux->data_size; } /* process pending stream objects and create pads for those */ gst_asf_demux_process_queued_extended_stream_objects (demux); GST_INFO_OBJECT (demux, "Stream has %" G_GUINT64_FORMAT " packets, " "data_offset=%" G_GINT64_FORMAT ", data_size=%" G_GINT64_FORMAT ", index_offset=%" G_GUINT64_FORMAT, demux->num_packets, demux->data_offset, demux->data_size, demux->index_offset); return TRUE; } static gboolean gst_asf_demux_pull_headers (GstASFDemux * demux, GstFlowReturn * pflow) { GstFlowReturn flow = GST_FLOW_OK; AsfObject obj; GstBuffer *buf = NULL; guint64 size; GstMapInfo map; guint8 *bufdata; GST_LOG_OBJECT (demux, "reading headers"); /* pull HEADER object header, so we know its size */ if (!gst_asf_demux_pull_data (demux, demux->base_offset, 16 + 8, &buf, &flow)) goto read_failed; gst_buffer_map (buf, &map, GST_MAP_READ); g_assert (map.size >= 16 + 8); if (!asf_demux_peek_object (demux, map.data, 16 + 8, &obj, TRUE)) { gst_buffer_unmap (buf, &map); gst_buffer_replace (&buf, NULL); flow = GST_FLOW_ERROR; goto read_failed; } gst_buffer_unmap (buf, &map); gst_buffer_replace (&buf, NULL); if (obj.id != ASF_OBJ_HEADER) goto wrong_type; GST_LOG_OBJECT (demux, "header size = %" G_GUINT64_FORMAT, obj.size); /* pull HEADER object */ if (!gst_asf_demux_pull_data (demux, demux->base_offset, obj.size, &buf, &flow)) goto read_failed; size = obj.size; /* don't want obj.size changed */ gst_buffer_map (buf, &map, GST_MAP_READ); g_assert (map.size >= size); bufdata = (guint8 *) map.data; flow = gst_asf_demux_process_object (demux, &bufdata, &size); gst_buffer_unmap (buf, &map); gst_buffer_replace (&buf, NULL); if (flow != GST_FLOW_OK) { GST_WARNING_OBJECT (demux, "process_object: %s", gst_flow_get_name (flow)); goto parse_failed; } /* calculate where the packet data starts */ demux->data_offset = demux->base_offset + obj.size + 50; /* now pull beginning of DATA object before packet data */ if (!gst_asf_demux_pull_data (demux, demux->base_offset + obj.size, 50, &buf, &flow)) goto read_failed; gst_buffer_map (buf, &map, GST_MAP_READ); g_assert (map.size >= size); bufdata = (guint8 *) map.data; if (!gst_asf_demux_parse_data_object_start (demux, bufdata)) goto wrong_type; if (demux->num_streams == 0) goto no_streams; gst_buffer_unmap (buf, &map); gst_buffer_replace (&buf, NULL); return TRUE; /* ERRORS */ wrong_type: { if (buf != NULL) { gst_buffer_unmap (buf, &map); gst_buffer_replace (&buf, NULL); } GST_ELEMENT_ERROR (demux, STREAM, WRONG_TYPE, (NULL), ("This doesn't seem to be an ASF file")); *pflow = GST_FLOW_ERROR; return FALSE; } no_streams: flow = GST_FLOW_ERROR; GST_ELEMENT_ERROR (demux, STREAM, DEMUX, (NULL), ("header parsing failed, or no streams found, flow = %s", gst_flow_get_name (flow))); read_failed: parse_failed: { if (buf) gst_buffer_unmap (buf, &map); gst_buffer_replace (&buf, NULL); if (flow == ASF_FLOW_NEED_MORE_DATA) flow = GST_FLOW_ERROR; *pflow = flow; return FALSE; } } static gboolean all_streams_prerolled (GstASFDemux * demux) { GstClockTime preroll_time; guint i, num_no_data = 0; /* Allow at least 500ms of preroll_time */ preroll_time = MAX (demux->preroll, 500 * GST_MSECOND); /* returns TRUE as long as there isn't a stream which (a) has data queued * and (b) the timestamp of last piece of data queued is < demux->preroll * AND there is at least one other stream with data queued */ for (i = 0; i < demux->num_streams; ++i) { AsfPayload *last_payload = NULL; AsfStream *stream; gint last_idx; stream = &demux->stream[i]; if (G_UNLIKELY (stream->payloads->len == 0)) { ++num_no_data; GST_LOG_OBJECT (stream->pad, "no data queued"); continue; } /* find last payload with timestamp */ for (last_idx = stream->payloads->len - 1; last_idx >= 0 && (last_payload == NULL || !GST_CLOCK_TIME_IS_VALID (last_payload->ts)); --last_idx) { last_payload = &g_array_index (stream->payloads, AsfPayload, last_idx); } GST_LOG_OBJECT (stream->pad, "checking if %" GST_TIME_FORMAT " > %" GST_TIME_FORMAT, GST_TIME_ARGS (last_payload->ts), GST_TIME_ARGS (preroll_time)); if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (last_payload->ts) || last_payload->ts <= preroll_time)) { GST_LOG_OBJECT (stream->pad, "not beyond preroll point yet"); return FALSE; } } if (G_UNLIKELY (num_no_data > 0)) return FALSE; return TRUE; } #if 0 static gboolean gst_asf_demux_have_mutually_exclusive_active_stream (GstASFDemux * demux, AsfStream * stream) { GSList *l; for (l = demux->mut_ex_streams; l != NULL; l = l->next) { guint8 *mes; /* check for each mutual exclusion group whether it affects this stream */ for (mes = (guint8 *) l->data; mes != NULL && *mes != 0xff; ++mes) { if (*mes == stream->id) { /* we are in this group; let's check if we've already activated streams * that are in the same group (and hence mutually exclusive to this * one) */ for (mes = (guint8 *) l->data; mes != NULL && *mes != 0xff; ++mes) { guint i; for (i = 0; i < demux->num_streams; ++i) { if (demux->stream[i].id == *mes && demux->stream[i].active) { GST_LOG_OBJECT (demux, "stream with ID %d is mutually exclusive " "to already active stream with ID %d", stream->id, demux->stream[i].id); return TRUE; } } } /* we can only be in this group once, let's break out and move on to * the next mutual exclusion group */ break; } } } return FALSE; } #endif static void gst_asf_demux_check_segment_ts (GstASFDemux * demux, GstClockTime payload_ts) { /* remember the first queued timestamp for the segment */ if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (demux->segment_ts) && GST_CLOCK_TIME_IS_VALID (demux->first_ts))) { GST_DEBUG_OBJECT (demux, "segment ts: %" GST_TIME_FORMAT, GST_TIME_ARGS (demux->first_ts)); demux->segment_ts = payload_ts; /* always note, but only determines segment when streaming */ if (demux->streaming) gst_segment_do_seek (&demux->segment, demux->in_segment.rate, GST_FORMAT_TIME, (GstSeekFlags) demux->segment.flags, GST_SEEK_TYPE_SET, demux->segment_ts, GST_SEEK_TYPE_NONE, 0, NULL); } } static gboolean gst_asf_demux_check_first_ts (GstASFDemux * demux, gboolean force) { if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (demux->first_ts))) { GstClockTime first_ts = GST_CLOCK_TIME_NONE; int i; /* go trhough each stream, find smallest timestamp */ for (i = 0; i < demux->num_streams; ++i) { AsfStream *stream; int j; GstClockTime stream_min_ts = GST_CLOCK_TIME_NONE; GstClockTime stream_min_ts2 = GST_CLOCK_TIME_NONE; /* second smallest timestamp */ stream = &demux->stream[i]; for (j = 0; j < stream->payloads->len; ++j) { AsfPayload *payload = &g_array_index (stream->payloads, AsfPayload, j); if (GST_CLOCK_TIME_IS_VALID (payload->ts) && (!GST_CLOCK_TIME_IS_VALID (stream_min_ts) || stream_min_ts > payload->ts)) { stream_min_ts = payload->ts; } if (GST_CLOCK_TIME_IS_VALID (payload->ts) && payload->ts > stream_min_ts && (!GST_CLOCK_TIME_IS_VALID (stream_min_ts2) || stream_min_ts2 > payload->ts)) { stream_min_ts2 = payload->ts; } } /* there are some DVR ms files where first packet has TS of 0 (instead of -1) while subsequent packets have regular (singificantly larger) timestamps. If we don't deal with it, we may end up with huge gap in timestamps which makes playback stuck. The 0 timestamp may also be valid though, if the second packet timestamp continues from it. I havent found a better way to distinguish between these two, except to set an arbitrary boundary and disregard the first 0 timestamp if the second timestamp is bigger than the boundary) */ if (stream_min_ts == 0 && stream_min_ts2 == GST_CLOCK_TIME_NONE && !force) /* still waiting for the second timestamp */ return FALSE; if (stream_min_ts == 0 && stream_min_ts2 > GST_SECOND) /* first timestamp is 0 and second is significantly larger, disregard the 0 */ stream_min_ts = stream_min_ts2; /* if we don't have timestamp for this stream, wait for more data */ if (!GST_CLOCK_TIME_IS_VALID (stream_min_ts) && !force) return FALSE; if (GST_CLOCK_TIME_IS_VALID (stream_min_ts) && (!GST_CLOCK_TIME_IS_VALID (first_ts) || first_ts > stream_min_ts)) first_ts = stream_min_ts; } if (!GST_CLOCK_TIME_IS_VALID (first_ts)) /* can happen with force = TRUE */ first_ts = 0; demux->first_ts = first_ts; /* update packets queued before we knew first timestamp */ for (i = 0; i < demux->num_streams; ++i) { AsfStream *stream; int j; stream = &demux->stream[i]; for (j = 0; j < stream->payloads->len; ++j) { AsfPayload *payload = &g_array_index (stream->payloads, AsfPayload, j); if (GST_CLOCK_TIME_IS_VALID (payload->ts)) { if (payload->ts > first_ts) payload->ts -= first_ts; else payload->ts = 0; } } } } gst_asf_demux_check_segment_ts (demux, 0); return TRUE; } static gboolean gst_asf_demux_update_caps_from_payload (GstASFDemux * demux, AsfStream * stream) { /* try to determine whether the stream is AC-3 or MPEG; In dvr-ms the codecTag is unreliable and often set wrong, inspecting the data is the only way that seem to be working */ GstTypeFindProbability prob = GST_TYPE_FIND_NONE; GstCaps *caps = NULL; int i; GstAdapter *adapter = gst_adapter_new (); for (i = 0; i < stream->payloads->len && prob < GST_TYPE_FIND_LIKELY; ++i) { const guint8 *data; AsfPayload *payload; int len; payload = &g_array_index (stream->payloads, AsfPayload, i); gst_adapter_push (adapter, gst_buffer_ref (payload->buf)); len = gst_adapter_available (adapter); data = gst_adapter_map (adapter, len); again: #define MIN_LENGTH 128 /* look for the sync points */ while (TRUE) { if (len < MIN_LENGTH || /* give typefind something to work on */ (data[0] == 0x0b && data[1] == 0x77) || /* AC-3 sync point */ (data[0] == 0xFF && ((data[1] & 0xF0) >> 4) == 0xF)) /* MPEG sync point */ break; ++data; --len; } gst_caps_take (&caps, gst_type_find_helper_for_data (GST_OBJECT (demux), data, len, &prob)); if (prob < GST_TYPE_FIND_LIKELY) { ++data; --len; if (len > MIN_LENGTH) /* this wasn't it, look for another sync point */ goto again; } gst_adapter_unmap (adapter); } gst_object_unref (adapter); if (caps) { gst_caps_take (&stream->caps, caps); return TRUE; } else { return FALSE; } } static gboolean gst_asf_demux_check_activate_streams (GstASFDemux * demux, gboolean force) { guint i, actual_streams = 0; if (demux->activated_streams) return TRUE; if (G_UNLIKELY (!gst_asf_demux_check_first_ts (demux, force))) return FALSE; if (!all_streams_prerolled (demux) && !force) { GST_DEBUG_OBJECT (demux, "not all streams with data beyond preroll yet"); return FALSE; } for (i = 0; i < demux->num_streams; ++i) { AsfStream *stream = &demux->stream[i]; if (stream->payloads->len > 0) { if (stream->inspect_payload && /* dvr-ms required payload inspection */ !stream->active && /* do not inspect active streams (caps were already set) */ !gst_asf_demux_update_caps_from_payload (demux, stream) && /* failed to determine caps */ stream->payloads->len < 20) { /* if we couldn't determine the caps from 20 packets then just give up and use whatever was in codecTag */ /* try to gather some more data */ return FALSE; } /* we don't check mutual exclusion stuff here; either we have data for * a stream, then we active it, or we don't, then we'll ignore it */ GST_LOG_OBJECT (stream->pad, "is prerolled - activate!"); gst_asf_demux_activate_stream (demux, stream); actual_streams += 1; } else { GST_LOG_OBJECT (stream->pad, "no data, ignoring stream"); } } if (actual_streams == 0) { /* We don't have any streams activated ! */ GST_ERROR_OBJECT (demux, "No streams activated!"); return FALSE; } gst_asf_demux_release_old_pads (demux); demux->activated_streams = TRUE; GST_LOG_OBJECT (demux, "signalling no more pads"); gst_element_no_more_pads (GST_ELEMENT (demux)); return TRUE; } /* returns the stream that has a complete payload with the lowest timestamp * queued, or NULL (we push things by timestamp because during the internal * prerolling we might accumulate more data then the external queues can take, * so we'd lock up if we pushed all accumulated data for stream N in one go) */ static AsfStream * gst_asf_demux_find_stream_with_complete_payload (GstASFDemux * demux) { AsfPayload *best_payload = NULL; AsfStream *best_stream = NULL; guint i; for (i = 0; i < demux->num_streams; ++i) { AsfStream *stream; int j; stream = &demux->stream[i]; /* Don't push any data until we have at least one payload that falls within * the current segment. This way we can remove out-of-segment payloads that * don't need to be decoded after a seek, sending only data from the * keyframe directly before our segment start */ if (stream->payloads->len > 0) { AsfPayload *payload = NULL; gint last_idx; if (GST_ASF_DEMUX_IS_REVERSE_PLAYBACK (demux->segment)) { /* Reverse playback */ if (stream->is_video) { /* We have to push payloads from KF to the first frame we accumulated (reverse order) */ if (stream->reverse_kf_ready) { payload = &g_array_index (stream->payloads, AsfPayload, stream->kf_pos); if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (payload->ts))) { /* TODO : remove payload from the list? */ continue; } } else { continue; } } else { /* find first complete payload with timestamp */ for (j = stream->payloads->len - 1; j >= 0 && (payload == NULL || !GST_CLOCK_TIME_IS_VALID (payload->ts)); --j) { payload = &g_array_index (stream->payloads, AsfPayload, j); } /* If there's a complete payload queued for this stream */ if (!gst_asf_payload_is_complete (payload)) continue; } } else { /* find last payload with timestamp */ for (last_idx = stream->payloads->len - 1; last_idx >= 0 && (payload == NULL || !GST_CLOCK_TIME_IS_VALID (payload->ts)); --last_idx) { payload = &g_array_index (stream->payloads, AsfPayload, last_idx); } /* if this is first payload after seek we might need to update the segment */ if (GST_CLOCK_TIME_IS_VALID (payload->ts)) gst_asf_demux_check_segment_ts (demux, payload->ts); if (G_UNLIKELY (GST_CLOCK_TIME_IS_VALID (payload->ts) && (payload->ts < demux->segment.start))) { if (G_UNLIKELY ((!demux->keyunit_sync) && (!demux->accurate) && payload->keyframe)) { GST_DEBUG_OBJECT (stream->pad, "Found keyframe, updating segment start to %" GST_TIME_FORMAT, GST_TIME_ARGS (payload->ts)); demux->segment.start = payload->ts; demux->segment.time = payload->ts; } else { GST_DEBUG_OBJECT (stream->pad, "Last queued payload has timestamp %" GST_TIME_FORMAT " which is before our segment start %" GST_TIME_FORMAT ", not pushing yet", GST_TIME_ARGS (payload->ts), GST_TIME_ARGS (demux->segment.start)); continue; } } payload = NULL; /* find first complete payload with timestamp */ for (j = 0; j < stream->payloads->len && (payload == NULL || !GST_CLOCK_TIME_IS_VALID (payload->ts)); ++j) { payload = &g_array_index (stream->payloads, AsfPayload, j); } /* Now see if there's a complete payload queued for this stream */ if (!gst_asf_payload_is_complete (payload)) continue; } /* ... and whether its timestamp is lower than the current best */ if (best_stream == NULL || best_payload->ts > payload->ts) { best_stream = stream; best_payload = payload; } } } return best_stream; } static GstFlowReturn gst_asf_demux_push_complete_payloads (GstASFDemux * demux, gboolean force) { AsfStream *stream; GstFlowReturn ret = GST_FLOW_OK; if (G_UNLIKELY (!demux->activated_streams)) { if (!gst_asf_demux_check_activate_streams (demux, force)) return GST_FLOW_OK; /* streams are now activated */ } while ((stream = gst_asf_demux_find_stream_with_complete_payload (demux))) { AsfPayload *payload; GstClockTime timestamp = GST_CLOCK_TIME_NONE; GstClockTime duration = GST_CLOCK_TIME_NONE; /* wait until we had a chance to "lock on" some payload's timestamp */ if (G_UNLIKELY (demux->need_newsegment && !GST_CLOCK_TIME_IS_VALID (demux->segment_ts))) return GST_FLOW_OK; if (GST_ASF_DEMUX_IS_REVERSE_PLAYBACK (demux->segment) && stream->is_video && stream->payloads->len) { payload = &g_array_index (stream->payloads, AsfPayload, stream->kf_pos); } else { payload = &g_array_index (stream->payloads, AsfPayload, 0); } /* do we need to send a newsegment event */ if ((G_UNLIKELY (demux->need_newsegment))) { GstEvent *segment_event; /* safe default if insufficient upstream info */ if (!GST_CLOCK_TIME_IS_VALID (demux->in_gap)) demux->in_gap = 0; if (demux->segment.stop == GST_CLOCK_TIME_NONE && demux->segment.duration > 0) { /* slight HACK; prevent clipping of last bit */ demux->segment.stop = demux->segment.duration + demux->in_gap; } /* FIXME : only if ACCURATE ! */ if (G_LIKELY (!demux->keyunit_sync && !demux->accurate && (GST_CLOCK_TIME_IS_VALID (payload->ts))) && !GST_ASF_DEMUX_IS_REVERSE_PLAYBACK (demux->segment)) { GST_DEBUG ("Adjusting newsegment start to %" GST_TIME_FORMAT, GST_TIME_ARGS (payload->ts)); demux->segment.start = payload->ts; demux->segment.time = payload->ts; } GST_DEBUG_OBJECT (demux, "sending new-segment event %" GST_SEGMENT_FORMAT, &demux->segment); /* note: we fix up all timestamps to start from 0, so this should be ok */ segment_event = gst_event_new_segment (&demux->segment); if (demux->segment_seqnum) gst_event_set_seqnum (segment_event, demux->segment_seqnum); gst_asf_demux_send_event_unlocked (demux, segment_event); /* now post any global tags we may have found */ if (demux->taglist == NULL) { demux->taglist = gst_tag_list_new_empty (); gst_tag_list_set_scope (demux->taglist, GST_TAG_SCOPE_GLOBAL); } gst_tag_list_add (demux->taglist, GST_TAG_MERGE_REPLACE, GST_TAG_CONTAINER_FORMAT, "ASF", NULL); GST_DEBUG_OBJECT (demux, "global tags: %" GST_PTR_FORMAT, demux->taglist); gst_asf_demux_send_event_unlocked (demux, gst_event_new_tag (demux->taglist)); demux->taglist = NULL; demux->need_newsegment = FALSE; demux->segment_seqnum = 0; demux->segment_running = TRUE; } /* Do we have tags pending for this stream? */ if (G_UNLIKELY (stream->pending_tags)) { GST_LOG_OBJECT (stream->pad, "%" GST_PTR_FORMAT, stream->pending_tags); gst_pad_push_event (stream->pad, gst_event_new_tag (stream->pending_tags)); stream->pending_tags = NULL; } /* We have the whole packet now so we should push the packet to * the src pad now. First though we should check if we need to do * descrambling */ if (G_UNLIKELY (stream->span > 1)) { gst_asf_demux_descramble_buffer (demux, stream, &payload->buf); } payload->buf = gst_buffer_make_writable (payload->buf); if (G_LIKELY (!payload->keyframe)) { GST_BUFFER_FLAG_SET (payload->buf, GST_BUFFER_FLAG_DELTA_UNIT); } if (G_UNLIKELY (stream->discont)) { GST_DEBUG_OBJECT (stream->pad, "marking DISCONT on stream"); GST_BUFFER_FLAG_SET (payload->buf, GST_BUFFER_FLAG_DISCONT); stream->discont = FALSE; } if (G_UNLIKELY (stream->is_video && payload->par_x && payload->par_y && (payload->par_x != stream->par_x) && (payload->par_y != stream->par_y))) { GST_DEBUG ("Updating PAR (%d/%d => %d/%d)", stream->par_x, stream->par_y, payload->par_x, payload->par_y); stream->par_x = payload->par_x; stream->par_y = payload->par_y; stream->caps = gst_caps_make_writable (stream->caps); gst_caps_set_simple (stream->caps, "pixel-aspect-ratio", GST_TYPE_FRACTION, stream->par_x, stream->par_y, NULL); gst_pad_set_caps (stream->pad, stream->caps); } if (G_UNLIKELY (stream->interlaced != payload->interlaced)) { GST_DEBUG ("Updating interlaced status (%d => %d)", stream->interlaced, payload->interlaced); stream->interlaced = payload->interlaced; stream->caps = gst_caps_make_writable (stream->caps); gst_caps_set_simple (stream->caps, "interlace-mode", G_TYPE_BOOLEAN, (stream->interlaced ? "mixed" : "progressive"), NULL); gst_pad_set_caps (stream->pad, stream->caps); } /* (sort of) interpolate timestamps using upstream "frame of reference", * typically useful for live src, but might (unavoidably) mess with * position reporting if a live src is playing not so live content * (e.g. rtspsrc taking some time to fall back to tcp) */ timestamp = payload->ts; if (GST_CLOCK_TIME_IS_VALID (timestamp) && !GST_ASF_DEMUX_IS_REVERSE_PLAYBACK (demux->segment)) { timestamp += demux->in_gap; /* Check if we're after the segment already, if so no need to push * anything here */ if (demux->segment.stop != -1 && timestamp > demux->segment.stop) { GST_DEBUG_OBJECT (stream->pad, "Payload after segment stop %" GST_TIME_FORMAT, GST_TIME_ARGS (demux->segment.stop)); ret = gst_flow_combiner_update_pad_flow (demux->flowcombiner, stream->pad, GST_FLOW_EOS); gst_buffer_unref (payload->buf); payload->buf = NULL; g_array_remove_index (stream->payloads, 0); /* Break out as soon as we have an issue */ if (G_UNLIKELY (ret != GST_FLOW_OK)) break; continue; } } GST_BUFFER_PTS (payload->buf) = timestamp; if (payload->duration == GST_CLOCK_TIME_NONE && stream->ext_props.avg_time_per_frame != 0) { duration = stream->ext_props.avg_time_per_frame * 100; } else { duration = payload->duration; } GST_BUFFER_DURATION (payload->buf) = duration; /* FIXME: we should really set durations on buffers if we can */ GST_LOG_OBJECT (stream->pad, "pushing buffer, %" GST_PTR_FORMAT, payload->buf); if (GST_ASF_DEMUX_IS_REVERSE_PLAYBACK (demux->segment) && stream->is_video) { if (stream->reverse_kf_ready == TRUE && stream->kf_pos == 0) { GST_BUFFER_FLAG_SET (payload->buf, GST_BUFFER_FLAG_DISCONT); } } else if (GST_ASF_DEMUX_IS_REVERSE_PLAYBACK (demux->segment)) { GST_BUFFER_FLAG_SET (payload->buf, GST_BUFFER_FLAG_DISCONT); } if (stream->active) { if (G_UNLIKELY (stream->first_buffer)) { if (stream->streamheader != NULL) { GST_DEBUG_OBJECT (stream->pad, "Pushing streamheader before first buffer"); gst_pad_push (stream->pad, gst_buffer_ref (stream->streamheader)); } stream->first_buffer = FALSE; } if (GST_CLOCK_TIME_IS_VALID (timestamp) && timestamp > demux->segment.position) { demux->segment.position = timestamp; if (GST_CLOCK_TIME_IS_VALID (duration)) demux->segment.position += timestamp; } ret = gst_pad_push (stream->pad, payload->buf); ret = gst_flow_combiner_update_pad_flow (demux->flowcombiner, stream->pad, ret); } else { gst_buffer_unref (payload->buf); ret = GST_FLOW_OK; } payload->buf = NULL; if (GST_ASF_DEMUX_IS_REVERSE_PLAYBACK (demux->segment) && stream->is_video && stream->reverse_kf_ready) { g_array_remove_index (stream->payloads, stream->kf_pos); stream->kf_pos--; if (stream->reverse_kf_ready == TRUE && stream->kf_pos < 0) { stream->kf_pos = 0; stream->reverse_kf_ready = FALSE; } } else { g_array_remove_index (stream->payloads, 0); } /* Break out as soon as we have an issue */ if (G_UNLIKELY (ret != GST_FLOW_OK)) break; } return ret; } static gboolean gst_asf_demux_check_buffer_is_header (GstASFDemux * demux, GstBuffer * buf) { AsfObject obj; GstMapInfo map; gboolean valid; g_assert (buf != NULL); GST_LOG_OBJECT (demux, "Checking if buffer is a header"); gst_buffer_map (buf, &map, GST_MAP_READ); /* we return false on buffer too small */ if (map.size < ASF_OBJECT_HEADER_SIZE) { gst_buffer_unmap (buf, &map); return FALSE; } /* check if it is a header */ valid = asf_demux_peek_object (demux, map.data, ASF_OBJECT_HEADER_SIZE, &obj, TRUE); gst_buffer_unmap (buf, &map); if (valid && obj.id == ASF_OBJ_HEADER) { return TRUE; } return FALSE; } static gboolean gst_asf_demux_check_chained_asf (GstASFDemux * demux) { guint64 off = demux->data_offset + (demux->packet * demux->packet_size); GstFlowReturn ret = GST_FLOW_OK; GstBuffer *buf = NULL; gboolean header = FALSE; /* TODO maybe we should skip index objects after the data and look * further for a new header */ if (gst_asf_demux_pull_data (demux, off, ASF_OBJECT_HEADER_SIZE, &buf, &ret)) { g_assert (buf != NULL); /* check if it is a header */ if (gst_asf_demux_check_buffer_is_header (demux, buf)) { GST_DEBUG_OBJECT (demux, "new base offset: %" G_GUINT64_FORMAT, off); demux->base_offset = off; header = TRUE; } gst_buffer_unref (buf); } return header; } static void gst_asf_demux_loop (GstASFDemux * demux) { GstFlowReturn flow = GST_FLOW_OK; GstBuffer *buf = NULL; guint64 off; if (G_UNLIKELY (demux->state == GST_ASF_DEMUX_STATE_HEADER)) { if (!gst_asf_demux_pull_headers (demux, &flow)) { goto pause; } flow = gst_asf_demux_pull_indices (demux); if (flow != GST_FLOW_OK) goto pause; } g_assert (demux->state == GST_ASF_DEMUX_STATE_DATA); if (G_UNLIKELY (demux->num_packets != 0 && demux->packet >= demux->num_packets)) goto eos; GST_LOG_OBJECT (demux, "packet %u/%u", (guint) demux->packet + 1, (guint) demux->num_packets); off = demux->data_offset + (demux->packet * demux->packet_size); if (G_UNLIKELY (!gst_asf_demux_pull_data (demux, off, demux->packet_size * demux->speed_packets, &buf, &flow))) { GST_DEBUG_OBJECT (demux, "got flow %s", gst_flow_get_name (flow)); if (flow == GST_FLOW_EOS) { goto eos; } else if (flow == GST_FLOW_FLUSHING) { GST_DEBUG_OBJECT (demux, "Not fatal"); goto pause; } else { goto read_failed; } } if (G_LIKELY (demux->speed_packets == 1)) { GstAsfDemuxParsePacketError err; err = gst_asf_demux_parse_packet (demux, buf); if (G_UNLIKELY (err != GST_ASF_DEMUX_PARSE_PACKET_ERROR_NONE)) { /* when we don't know when the data object ends, we should check * for a chained asf */ if (demux->num_packets == 0) { if (gst_asf_demux_check_buffer_is_header (demux, buf)) { GST_INFO_OBJECT (demux, "Chained asf found"); demux->base_offset = off; gst_asf_demux_reset (demux, TRUE); gst_buffer_unref (buf); return; } } /* FIXME: We should tally up fatal errors and error out only * after a few broken packets in a row? */ GST_INFO_OBJECT (demux, "Ignoring recoverable parse error"); gst_buffer_unref (buf); if (GST_ASF_DEMUX_IS_REVERSE_PLAYBACK (demux->segment) && !demux->seek_to_cur_pos) { --demux->packet; if (demux->packet < 0) { goto eos; } } else { ++demux->packet; } return; } flow = gst_asf_demux_push_complete_payloads (demux, FALSE); if (GST_ASF_DEMUX_IS_REVERSE_PLAYBACK (demux->segment) && !demux->seek_to_cur_pos) { --demux->packet; if (demux->packet < 0) { goto eos; } } else { ++demux->packet; } } else { guint n; for (n = 0; n < demux->speed_packets; n++) { GstBuffer *sub; GstAsfDemuxParsePacketError err; sub = gst_buffer_copy_region (buf, GST_BUFFER_COPY_ALL, n * demux->packet_size, demux->packet_size); err = gst_asf_demux_parse_packet (demux, sub); if (G_UNLIKELY (err != GST_ASF_DEMUX_PARSE_PACKET_ERROR_NONE)) { /* when we don't know when the data object ends, we should check * for a chained asf */ if (demux->num_packets == 0) { if (gst_asf_demux_check_buffer_is_header (demux, sub)) { GST_INFO_OBJECT (demux, "Chained asf found"); demux->base_offset = off + n * demux->packet_size; gst_asf_demux_reset (demux, TRUE); gst_buffer_unref (sub); gst_buffer_unref (buf); return; } } /* FIXME: We should tally up fatal errors and error out only * after a few broken packets in a row? */ GST_INFO_OBJECT (demux, "Ignoring recoverable parse error"); flow = GST_FLOW_OK; } gst_buffer_unref (sub); if (err == GST_ASF_DEMUX_PARSE_PACKET_ERROR_NONE) flow = gst_asf_demux_push_complete_payloads (demux, FALSE); ++demux->packet; } /* reset speed pull */ demux->speed_packets = 1; } gst_buffer_unref (buf); if (G_UNLIKELY ((demux->num_packets > 0 && demux->packet >= demux->num_packets) || flow == GST_FLOW_EOS)) { GST_LOG_OBJECT (demux, "reached EOS"); goto eos; } if (G_UNLIKELY (flow != GST_FLOW_OK)) { GST_DEBUG_OBJECT (demux, "pushing complete payloads failed"); goto pause; } /* check if we're at the end of the configured segment */ /* FIXME: check if segment end reached etc. */ return; eos: { /* if we haven't activated our streams yet, this might be because we have * less data queued than required for preroll; force stream activation and * send any pending payloads before sending EOS */ if (!demux->activated_streams) flow = gst_asf_demux_push_complete_payloads (demux, TRUE); /* we want to push an eos or post a segment-done in any case */ if (demux->segment.flags & GST_SEEK_FLAG_SEGMENT) { gint64 stop; /* for segment playback we need to post when (in stream time) * we stopped, this is either stop (when set) or the duration. */ if ((stop = demux->segment.stop) == -1) stop = demux->segment.duration; GST_INFO_OBJECT (demux, "Posting segment-done, at end of segment"); gst_element_post_message (GST_ELEMENT_CAST (demux), gst_message_new_segment_done (GST_OBJECT (demux), GST_FORMAT_TIME, stop)); gst_asf_demux_send_event_unlocked (demux, gst_event_new_segment_done (GST_FORMAT_TIME, stop)); } else if (flow != GST_FLOW_EOS) { /* check if we have a chained asf, in case, we don't eos yet */ if (gst_asf_demux_check_chained_asf (demux)) { GST_INFO_OBJECT (demux, "Chained ASF starting"); gst_asf_demux_reset (demux, TRUE); return; } } if (!(demux->segment.flags & GST_SEEK_FLAG_SEGMENT)) { if (demux->activated_streams) { /* normal playback, send EOS to all linked pads */ GST_INFO_OBJECT (demux, "Sending EOS, at end of stream"); gst_asf_demux_send_event_unlocked (demux, gst_event_new_eos ()); } else { GST_WARNING_OBJECT (demux, "EOS without exposed streams"); flow = GST_FLOW_EOS; } } /* ... and fall through to pause */ } pause: { GST_DEBUG_OBJECT (demux, "pausing task, flow return: %s", gst_flow_get_name (flow)); demux->segment_running = FALSE; gst_pad_pause_task (demux->sinkpad); /* For the error cases */ if (flow == GST_FLOW_EOS && !demux->activated_streams) { GST_ELEMENT_ERROR (demux, STREAM, WRONG_TYPE, (NULL), ("This doesn't seem to be an ASF file")); } else if (flow < GST_FLOW_EOS || flow == GST_FLOW_NOT_LINKED) { /* Post an error. Hopefully something else already has, but if not... */ GST_ELEMENT_FLOW_ERROR (demux, flow); gst_asf_demux_send_event_unlocked (demux, gst_event_new_eos ()); } return; } /* ERRORS */ read_failed: { GST_DEBUG_OBJECT (demux, "Read failed, doh"); flow = GST_FLOW_EOS; goto pause; } #if 0 /* See FIXMEs above */ parse_error: { gst_buffer_unref (buf); GST_ELEMENT_ERROR (demux, STREAM, DEMUX, (NULL), ("Error parsing ASF packet %u", (guint) demux->packet)); gst_asf_demux_send_event_unlocked (demux, gst_event_new_eos ()); flow = GST_FLOW_ERROR; goto pause; } #endif } #define GST_ASF_DEMUX_CHECK_HEADER_YES 0 #define GST_ASF_DEMUX_CHECK_HEADER_NO 1 #define GST_ASF_DEMUX_CHECK_HEADER_NEED_DATA 2 static gint gst_asf_demux_check_header (GstASFDemux * demux) { AsfObject obj; guint8 *cdata = (guint8 *) gst_adapter_map (demux->adapter, ASF_OBJECT_HEADER_SIZE); if (cdata == NULL) /* need more data */ return GST_ASF_DEMUX_CHECK_HEADER_NEED_DATA; if (asf_demux_peek_object (demux, cdata, ASF_OBJECT_HEADER_SIZE, &obj, FALSE && obj.id == ASF_OBJ_HEADER)) return GST_ASF_DEMUX_CHECK_HEADER_YES; return GST_ASF_DEMUX_CHECK_HEADER_NO; } static GstFlowReturn gst_asf_demux_chain (GstPad * pad, GstObject * parent, GstBuffer * buf) { GstFlowReturn ret = GST_FLOW_OK; GstASFDemux *demux; demux = GST_ASF_DEMUX (parent); GST_LOG_OBJECT (demux, "buffer: size=%" G_GSIZE_FORMAT ", offset=%" G_GINT64_FORMAT ", time=%" GST_TIME_FORMAT, gst_buffer_get_size (buf), GST_BUFFER_OFFSET (buf), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf))); if (G_UNLIKELY (GST_BUFFER_IS_DISCONT (buf))) { GST_DEBUG_OBJECT (demux, "received DISCONT"); gst_asf_demux_mark_discont (demux); } if (G_UNLIKELY ((!GST_CLOCK_TIME_IS_VALID (demux->in_gap) && GST_BUFFER_TIMESTAMP_IS_VALID (buf)))) { demux->in_gap = GST_BUFFER_TIMESTAMP (buf) - demux->in_segment.start; GST_DEBUG_OBJECT (demux, "upstream segment start %" GST_TIME_FORMAT ", interpolation gap: %" GST_TIME_FORMAT, GST_TIME_ARGS (demux->in_segment.start), GST_TIME_ARGS (demux->in_gap)); } gst_adapter_push (demux->adapter, buf); switch (demux->state) { case GST_ASF_DEMUX_STATE_INDEX:{ gint result = gst_asf_demux_check_header (demux); if (result == GST_ASF_DEMUX_CHECK_HEADER_NEED_DATA) /* need more data */ break; if (result == GST_ASF_DEMUX_CHECK_HEADER_NO) { /* we don't care about this, probably an index */ /* TODO maybe would be smarter to skip all the indices * until we got a new header or EOS to decide */ GST_LOG_OBJECT (demux, "Received index object, its EOS"); goto eos; } else { GST_INFO_OBJECT (demux, "Chained asf starting"); /* cleanup and get ready for a chained asf */ gst_asf_demux_reset (demux, TRUE); /* fall through */ } } case GST_ASF_DEMUX_STATE_HEADER:{ ret = gst_asf_demux_chain_headers (demux); if (demux->state != GST_ASF_DEMUX_STATE_DATA) break; /* otherwise fall through */ } case GST_ASF_DEMUX_STATE_DATA: { guint64 data_size; data_size = demux->packet_size; while (gst_adapter_available (demux->adapter) >= data_size) { GstBuffer *buf; GstAsfDemuxParsePacketError err; /* we don't know the length of the stream * check for a chained asf everytime */ if (demux->num_packets == 0) { gint result = gst_asf_demux_check_header (demux); if (result == GST_ASF_DEMUX_CHECK_HEADER_YES) { GST_INFO_OBJECT (demux, "Chained asf starting"); /* cleanup and get ready for a chained asf */ gst_asf_demux_reset (demux, TRUE); break; } } else if (G_UNLIKELY (demux->num_packets != 0 && demux->packet >= 0 && demux->packet >= demux->num_packets)) { /* do not overshoot data section when streaming */ break; } buf = gst_adapter_take_buffer (demux->adapter, data_size); /* FIXME: We should tally up fatal errors and error out only * after a few broken packets in a row? */ err = gst_asf_demux_parse_packet (demux, buf); gst_buffer_unref (buf); if (G_LIKELY (err == GST_ASF_DEMUX_PARSE_PACKET_ERROR_NONE)) ret = gst_asf_demux_push_complete_payloads (demux, FALSE); else GST_WARNING_OBJECT (demux, "Parse error"); if (demux->packet >= 0) ++demux->packet; } if (G_UNLIKELY (demux->num_packets != 0 && demux->packet >= 0 && demux->packet >= demux->num_packets)) { demux->state = GST_ASF_DEMUX_STATE_INDEX; } break; } default: g_assert_not_reached (); } done: if (ret != GST_FLOW_OK) GST_DEBUG_OBJECT (demux, "flow: %s", gst_flow_get_name (ret)); return ret; eos: { GST_DEBUG_OBJECT (demux, "Handled last packet, setting EOS"); ret = GST_FLOW_EOS; goto done; } } static inline gboolean gst_asf_demux_skip_bytes (guint num_bytes, guint8 ** p_data, guint64 * p_size) { if (*p_size < num_bytes) return FALSE; *p_data += num_bytes; *p_size -= num_bytes; return TRUE; } static inline guint8 gst_asf_demux_get_uint8 (guint8 ** p_data, guint64 * p_size) { guint8 ret; g_assert (*p_size >= 1); ret = GST_READ_UINT8 (*p_data); *p_data += sizeof (guint8); *p_size -= sizeof (guint8); return ret; } static inline guint16 gst_asf_demux_get_uint16 (guint8 ** p_data, guint64 * p_size) { guint16 ret; g_assert (*p_size >= 2); ret = GST_READ_UINT16_LE (*p_data); *p_data += sizeof (guint16); *p_size -= sizeof (guint16); return ret; } static inline guint32 gst_asf_demux_get_uint32 (guint8 ** p_data, guint64 * p_size) { guint32 ret; g_assert (*p_size >= 4); ret = GST_READ_UINT32_LE (*p_data); *p_data += sizeof (guint32); *p_size -= sizeof (guint32); return ret; } static inline guint64 gst_asf_demux_get_uint64 (guint8 ** p_data, guint64 * p_size) { guint64 ret; g_assert (*p_size >= 8); ret = GST_READ_UINT64_LE (*p_data); *p_data += sizeof (guint64); *p_size -= sizeof (guint64); return ret; } static gboolean gst_asf_demux_get_buffer (GstBuffer ** p_buf, guint num_bytes_to_read, guint8 ** p_data, guint64 * p_size) { *p_buf = NULL; if (*p_size < num_bytes_to_read) return FALSE; *p_buf = gst_buffer_new_and_alloc (num_bytes_to_read); gst_buffer_fill (*p_buf, 0, *p_data, num_bytes_to_read); *p_data += num_bytes_to_read; *p_size -= num_bytes_to_read; return TRUE; } static gboolean gst_asf_demux_get_bytes (guint8 ** p_buf, guint num_bytes_to_read, guint8 ** p_data, guint64 * p_size) { *p_buf = NULL; if (*p_size < num_bytes_to_read) return FALSE; *p_buf = g_memdup (*p_data, num_bytes_to_read); *p_data += num_bytes_to_read; *p_size -= num_bytes_to_read; return TRUE; } static gboolean gst_asf_demux_get_string (gchar ** p_str, guint16 * p_strlen, guint8 ** p_data, guint64 * p_size) { guint16 s_length; guint8 *s; *p_str = NULL; if (*p_size < 2) return FALSE; s_length = gst_asf_demux_get_uint16 (p_data, p_size); if (p_strlen) *p_strlen = s_length; if (s_length == 0) { GST_WARNING ("zero-length string"); *p_str = g_strdup (""); return TRUE; } if (!gst_asf_demux_get_bytes (&s, s_length, p_data, p_size)) return FALSE; g_assert (s != NULL); /* just because They don't exist doesn't * mean They are not out to get you ... */ if (s[s_length - 1] != '\0') { s = g_realloc (s, s_length + 1); s[s_length] = '\0'; } *p_str = (gchar *) s; return TRUE; } static void gst_asf_demux_get_guid (ASFGuid * guid, guint8 ** p_data, guint64 * p_size) { g_assert (*p_size >= 4 * sizeof (guint32)); guid->v1 = gst_asf_demux_get_uint32 (p_data, p_size); guid->v2 = gst_asf_demux_get_uint32 (p_data, p_size); guid->v3 = gst_asf_demux_get_uint32 (p_data, p_size); guid->v4 = gst_asf_demux_get_uint32 (p_data, p_size); } static gboolean gst_asf_demux_get_stream_audio (asf_stream_audio * audio, guint8 ** p_data, guint64 * p_size) { if (*p_size < (2 + 2 + 4 + 4 + 2 + 2 + 2)) return FALSE; /* WAVEFORMATEX Structure */ audio->codec_tag = gst_asf_demux_get_uint16 (p_data, p_size); audio->channels = gst_asf_demux_get_uint16 (p_data, p_size); audio->sample_rate = gst_asf_demux_get_uint32 (p_data, p_size); audio->byte_rate = gst_asf_demux_get_uint32 (p_data, p_size); audio->block_align = gst_asf_demux_get_uint16 (p_data, p_size); audio->word_size = gst_asf_demux_get_uint16 (p_data, p_size); /* Codec specific data size */ audio->size = gst_asf_demux_get_uint16 (p_data, p_size); if (audio->size > *p_size) { GST_WARNING ("Corrupted audio codec_data (should be at least %u bytes, is %" G_GUINT64_FORMAT " long)", audio->size, *p_size); return FALSE; } return TRUE; } static gboolean gst_asf_demux_get_stream_video (asf_stream_video * video, guint8 ** p_data, guint64 * p_size) { if (*p_size < (4 + 4 + 1 + 2)) return FALSE; video->width = gst_asf_demux_get_uint32 (p_data, p_size); video->height = gst_asf_demux_get_uint32 (p_data, p_size); video->unknown = gst_asf_demux_get_uint8 (p_data, p_size); video->size = gst_asf_demux_get_uint16 (p_data, p_size); return TRUE; } static gboolean gst_asf_demux_get_stream_video_format (asf_stream_video_format * fmt, guint8 ** p_data, guint64 * p_size) { if (*p_size < (4 + 4 + 4 + 2 + 2 + 4 + 4 + 4 + 4 + 4 + 4)) return FALSE; fmt->size = gst_asf_demux_get_uint32 (p_data, p_size); /* Sanity checks */ if (fmt->size < 40) { GST_WARNING ("Corrupted asf_stream_video_format (size < 40)"); return FALSE; } if ((guint64) fmt->size - 4 > *p_size) { GST_WARNING ("Corrupted asf_stream_video_format (codec_data is too small)"); return FALSE; } fmt->width = gst_asf_demux_get_uint32 (p_data, p_size); fmt->height = gst_asf_demux_get_uint32 (p_data, p_size); fmt->planes = gst_asf_demux_get_uint16 (p_data, p_size); fmt->depth = gst_asf_demux_get_uint16 (p_data, p_size); fmt->tag = gst_asf_demux_get_uint32 (p_data, p_size); fmt->image_size = gst_asf_demux_get_uint32 (p_data, p_size); fmt->xpels_meter = gst_asf_demux_get_uint32 (p_data, p_size); fmt->ypels_meter = gst_asf_demux_get_uint32 (p_data, p_size); fmt->num_colors = gst_asf_demux_get_uint32 (p_data, p_size); fmt->imp_colors = gst_asf_demux_get_uint32 (p_data, p_size); return TRUE; } AsfStream * gst_asf_demux_get_stream (GstASFDemux * demux, guint16 id) { guint i; for (i = 0; i < demux->num_streams; i++) { if (demux->stream[i].id == id) return &demux->stream[i]; } if (gst_asf_demux_is_unknown_stream (demux, id)) GST_WARNING ("Segment found for undefined stream: (%d)", id); return NULL; } static AsfStream * gst_asf_demux_setup_pad (GstASFDemux * demux, GstPad * src_pad, GstCaps * caps, guint16 id, gboolean is_video, GstBuffer * streamheader, GstTagList * tags) { AsfStream *stream; gst_pad_use_fixed_caps (src_pad); gst_pad_set_caps (src_pad, caps); gst_pad_set_event_function (src_pad, GST_DEBUG_FUNCPTR (gst_asf_demux_handle_src_event)); gst_pad_set_query_function (src_pad, GST_DEBUG_FUNCPTR (gst_asf_demux_handle_src_query)); stream = &demux->stream[demux->num_streams]; stream->caps = caps; stream->pad = src_pad; stream->id = id; stream->fps_known = !is_video; /* bit hacky for audio */ stream->is_video = is_video; stream->pending_tags = tags; stream->discont = TRUE; stream->first_buffer = TRUE; stream->streamheader = streamheader; if (stream->streamheader) { stream->streamheader = gst_buffer_make_writable (streamheader); GST_BUFFER_FLAG_SET (stream->streamheader, GST_BUFFER_FLAG_HEADER); } if (is_video) { GstStructure *st; gint par_x, par_y; st = gst_caps_get_structure (caps, 0); if (gst_structure_get_fraction (st, "pixel-aspect-ratio", &par_x, &par_y) && par_x > 0 && par_y > 0) { GST_DEBUG ("PAR %d/%d", par_x, par_y); stream->par_x = par_x; stream->par_y = par_y; } } stream->payloads = g_array_new (FALSE, FALSE, sizeof (AsfPayload)); /* TODO: create this array during reverse play? */ stream->payloads_rev = g_array_new (FALSE, FALSE, sizeof (AsfPayload)); GST_INFO ("Created pad %s for stream %u with caps %" GST_PTR_FORMAT, GST_PAD_NAME (src_pad), demux->num_streams, caps); ++demux->num_streams; stream->active = FALSE; return stream; } static void gst_asf_demux_add_stream_headers_to_caps (GstASFDemux * demux, GstBuffer * buffer, GstStructure * structure) { GValue arr_val = G_VALUE_INIT; GValue buf_val = G_VALUE_INIT; g_value_init (&arr_val, GST_TYPE_ARRAY); g_value_init (&buf_val, GST_TYPE_BUFFER); gst_value_set_buffer (&buf_val, buffer); gst_value_array_append_and_take_value (&arr_val, &buf_val); gst_structure_take_value (structure, "streamheader", &arr_val); } static AsfStream * gst_asf_demux_add_audio_stream (GstASFDemux * demux, asf_stream_audio * audio, guint16 id, guint8 ** p_data, guint64 * p_size) { GstTagList *tags = NULL; GstBuffer *extradata = NULL; GstPad *src_pad; GstCaps *caps; guint16 size_left = 0; gchar *codec_name = NULL; gchar *name = NULL; size_left = audio->size; /* Create the audio pad */ name = g_strdup_printf ("audio_%u", demux->num_audio_streams); src_pad = gst_pad_new_from_static_template (&audio_src_template, name); g_free (name); /* Swallow up any left over data and set up the * standard properties from the header info */ if (size_left) { GST_INFO_OBJECT (demux, "Audio header contains %d bytes of " "codec specific data", size_left); g_assert (size_left <= *p_size); gst_asf_demux_get_buffer (&extradata, size_left, p_data, p_size); } /* asf_stream_audio is the same as gst_riff_strf_auds, but with an * additional two bytes indicating extradata. */ /* FIXME: Handle the channel reorder map here */ caps = gst_riff_create_audio_caps (audio->codec_tag, NULL, (gst_riff_strf_auds *) audio, extradata, NULL, &codec_name, NULL); if (caps == NULL) { caps = gst_caps_new_simple ("audio/x-asf-unknown", "codec_id", G_TYPE_INT, (gint) audio->codec_tag, NULL); } /* Informing about that audio format we just added */ if (codec_name) { tags = gst_tag_list_new (GST_TAG_AUDIO_CODEC, codec_name, NULL); g_free (codec_name); } if (audio->byte_rate > 0) { /* Some ASF files have no bitrate props object (often seen with * ASF files that contain raw audio data). Example files can * be generated with FFmpeg (tested with v2.8.6), like this: * * ffmpeg -i sine-wave.wav -c:a pcm_alaw file.asf * * In this case, if audio->byte_rate is nonzero, use that as * the bitrate. */ guint bitrate = audio->byte_rate * 8; if (tags == NULL) tags = gst_tag_list_new_empty (); /* Add bitrate, but only if there is none set already, since * this is just a fallback in case there is no bitrate tag * already present */ gst_tag_list_add (tags, GST_TAG_MERGE_KEEP, GST_TAG_BITRATE, bitrate, NULL); } if (extradata) gst_buffer_unref (extradata); GST_INFO ("Adding audio stream #%u, id %u codec %u (0x%04x), tags=%" GST_PTR_FORMAT, demux->num_audio_streams, id, audio->codec_tag, audio->codec_tag, tags); ++demux->num_audio_streams; return gst_asf_demux_setup_pad (demux, src_pad, caps, id, FALSE, NULL, tags); } static AsfStream * gst_asf_demux_add_video_stream (GstASFDemux * demux, asf_stream_video_format * video, guint16 id, guint8 ** p_data, guint64 * p_size) { GstTagList *tags = NULL; GstStructure *caps_s; GstBuffer *extradata = NULL; GstPad *src_pad; GstCaps *caps; gchar *str; gchar *name = NULL; gchar *codec_name = NULL; guint64 size_left = video->size - 40; GstBuffer *streamheader = NULL; guint par_w = 1, par_h = 1; /* Create the video pad */ name = g_strdup_printf ("video_%u", demux->num_video_streams); src_pad = gst_pad_new_from_static_template (&video_src_template, name); g_free (name); /* Now try some gstreamer formatted MIME types (from gst_avi_demux_strf_vids) */ if (size_left) { GST_LOG ("Video header has %" G_GUINT64_FORMAT " bytes of codec specific data (vs %" G_GUINT64_FORMAT ")", size_left, *p_size); g_assert (size_left <= *p_size); gst_asf_demux_get_buffer (&extradata, size_left, p_data, p_size); } GST_DEBUG ("video codec %" GST_FOURCC_FORMAT, GST_FOURCC_ARGS (video->tag)); /* yes, asf_stream_video_format and gst_riff_strf_vids are the same */ caps = gst_riff_create_video_caps (video->tag, NULL, (gst_riff_strf_vids *) video, extradata, NULL, &codec_name); if (caps == NULL) { caps = gst_caps_new_simple ("video/x-asf-unknown", "fourcc", G_TYPE_UINT, video->tag, NULL); } else { GstStructure *s; gint ax, ay; s = gst_asf_demux_get_metadata_for_stream (demux, id); if (gst_structure_get_int (s, "AspectRatioX", &ax) && gst_structure_get_int (s, "AspectRatioY", &ay) && (ax > 0 && ay > 0)) { par_w = ax; par_h = ay; gst_caps_set_simple (caps, "pixel-aspect-ratio", GST_TYPE_FRACTION, ax, ay, NULL); } else { guint ax, ay; /* retry with the global metadata */ GST_DEBUG ("Retrying with global metadata %" GST_PTR_FORMAT, demux->global_metadata); s = demux->global_metadata; if (gst_structure_get_uint (s, "AspectRatioX", &ax) && gst_structure_get_uint (s, "AspectRatioY", &ay)) { GST_DEBUG ("ax:%d, ay:%d", ax, ay); if (ax > 0 && ay > 0) { par_w = ax; par_h = ay; gst_caps_set_simple (caps, "pixel-aspect-ratio", GST_TYPE_FRACTION, ax, ay, NULL); } } } s = gst_caps_get_structure (caps, 0); gst_structure_remove_field (s, "framerate"); } caps_s = gst_caps_get_structure (caps, 0); /* add format field with fourcc to WMV/VC1 caps to differentiate variants */ if (gst_structure_has_name (caps_s, "video/x-wmv")) { str = g_strdup_printf ("%" GST_FOURCC_FORMAT, GST_FOURCC_ARGS (video->tag)); gst_caps_set_simple (caps, "format", G_TYPE_STRING, str, NULL); g_free (str); /* check if h264 has codec_data (avc) or streamheaders (bytestream) */ } else if (gst_structure_has_name (caps_s, "video/x-h264")) { const GValue *value = gst_structure_get_value (caps_s, "codec_data"); if (value) { GstBuffer *buf = gst_value_get_buffer (value); GstMapInfo mapinfo; if (gst_buffer_map (buf, &mapinfo, GST_MAP_READ)) { if (mapinfo.size >= 4 && GST_READ_UINT32_BE (mapinfo.data) == 1) { /* this looks like a bytestream start */ streamheader = gst_buffer_ref (buf); gst_asf_demux_add_stream_headers_to_caps (demux, buf, caps_s); gst_structure_remove_field (caps_s, "codec_data"); } gst_buffer_unmap (buf, &mapinfo); } } } /* For a 3D video, set multiview information into the caps based on * what was detected during object parsing */ if (demux->asf_3D_mode != GST_ASF_3D_NONE) { GstVideoMultiviewMode mv_mode = GST_VIDEO_MULTIVIEW_MODE_NONE; GstVideoMultiviewFlags mv_flags = GST_VIDEO_MULTIVIEW_FLAGS_NONE; const gchar *mview_mode_str; switch (demux->asf_3D_mode) { case GST_ASF_3D_SIDE_BY_SIDE_HALF_LR: mv_mode = GST_VIDEO_MULTIVIEW_MODE_SIDE_BY_SIDE; break; case GST_ASF_3D_SIDE_BY_SIDE_HALF_RL: mv_mode = GST_VIDEO_MULTIVIEW_MODE_SIDE_BY_SIDE; mv_flags = GST_VIDEO_MULTIVIEW_FLAGS_RIGHT_VIEW_FIRST; break; case GST_ASF_3D_TOP_AND_BOTTOM_HALF_LR: mv_mode = GST_VIDEO_MULTIVIEW_MODE_TOP_BOTTOM; break; case GST_ASF_3D_TOP_AND_BOTTOM_HALF_RL: mv_mode = GST_VIDEO_MULTIVIEW_MODE_TOP_BOTTOM; mv_flags = GST_VIDEO_MULTIVIEW_FLAGS_RIGHT_VIEW_FIRST; break; case GST_ASF_3D_DUAL_STREAM:{ gboolean is_right_view = FALSE; /* if Advanced_Mutual_Exclusion object exists, use it * to figure out which is the left view (lower ID) */ if (demux->mut_ex_streams != NULL) { guint length; gint i; length = g_slist_length (demux->mut_ex_streams); for (i = 0; i < length; i++) { gpointer v_s_id; v_s_id = g_slist_nth_data (demux->mut_ex_streams, i); GST_DEBUG_OBJECT (demux, "has Mutual_Exclusion object. stream id in object is %d", GPOINTER_TO_INT (v_s_id)); if (id > GPOINTER_TO_INT (v_s_id)) is_right_view = TRUE; } } else { /* if the Advaced_Mutual_Exclusion object doesn't exist, assume the * first video stream encountered has the lower ID */ if (demux->num_video_streams > 0) { /* This is not the first video stream, assuming right eye view */ is_right_view = TRUE; } } if (is_right_view) mv_mode = GST_VIDEO_MULTIVIEW_MODE_RIGHT; else mv_mode = GST_VIDEO_MULTIVIEW_MODE_LEFT; break; } default: break; } GST_INFO_OBJECT (demux, "stream_id %d, has multiview-mode %d flags 0x%x", id, mv_mode, (guint) mv_flags); mview_mode_str = gst_video_multiview_mode_to_caps_string (mv_mode); if (mview_mode_str != NULL) { if (gst_video_multiview_guess_half_aspect (mv_mode, video->width, video->height, par_w, par_h)) mv_flags |= GST_VIDEO_MULTIVIEW_FLAGS_HALF_ASPECT; gst_caps_set_simple (caps, "multiview-mode", G_TYPE_STRING, mview_mode_str, "multiview-flags", GST_TYPE_VIDEO_MULTIVIEW_FLAGSET, mv_flags, GST_FLAG_SET_MASK_EXACT, NULL); } } if (codec_name) { tags = gst_tag_list_new (GST_TAG_VIDEO_CODEC, codec_name, NULL); g_free (codec_name); } if (extradata) gst_buffer_unref (extradata); GST_INFO ("Adding video stream #%u, id %u, codec %" GST_FOURCC_FORMAT " (0x%08x)", demux->num_video_streams, id, GST_FOURCC_ARGS (video->tag), video->tag); ++demux->num_video_streams; return gst_asf_demux_setup_pad (demux, src_pad, caps, id, TRUE, streamheader, tags); } static void gst_asf_demux_activate_stream (GstASFDemux * demux, AsfStream * stream) { if (!stream->active) { GstEvent *event; gchar *stream_id; GST_INFO_OBJECT (demux, "Activating stream %2u, pad %s, caps %" GST_PTR_FORMAT, stream->id, GST_PAD_NAME (stream->pad), stream->caps); gst_pad_set_active (stream->pad, TRUE); stream_id = gst_pad_create_stream_id_printf (stream->pad, GST_ELEMENT_CAST (demux), "%03u", stream->id); event = gst_pad_get_sticky_event (demux->sinkpad, GST_EVENT_STREAM_START, 0); if (event) { if (gst_event_parse_group_id (event, &demux->group_id)) demux->have_group_id = TRUE; else demux->have_group_id = FALSE; gst_event_unref (event); } else if (!demux->have_group_id) { demux->have_group_id = TRUE; demux->group_id = gst_util_group_id_next (); } event = gst_event_new_stream_start (stream_id); if (demux->have_group_id) gst_event_set_group_id (event, demux->group_id); gst_pad_push_event (stream->pad, event); g_free (stream_id); gst_pad_set_caps (stream->pad, stream->caps); gst_element_add_pad (GST_ELEMENT_CAST (demux), stream->pad); gst_flow_combiner_add_pad (demux->flowcombiner, stream->pad); stream->active = TRUE; } } static AsfStream * gst_asf_demux_parse_stream_object (GstASFDemux * demux, guint8 * data, guint64 size) { AsfCorrectionType correction_type; AsfStreamType stream_type; GstClockTime time_offset; gboolean is_encrypted G_GNUC_UNUSED; guint16 stream_id; guint16 flags; ASFGuid guid; guint stream_specific_size; guint type_specific_size G_GNUC_UNUSED; guint unknown G_GNUC_UNUSED; gboolean inspect_payload = FALSE; AsfStream *stream = NULL; /* Get the rest of the header's header */ if (size < (16 + 16 + 8 + 4 + 4 + 2 + 4)) goto not_enough_data; gst_asf_demux_get_guid (&guid, &data, &size); stream_type = gst_asf_demux_identify_guid (asf_stream_guids, &guid); gst_asf_demux_get_guid (&guid, &data, &size); correction_type = gst_asf_demux_identify_guid (asf_correction_guids, &guid); time_offset = gst_asf_demux_get_uint64 (&data, &size) * 100; type_specific_size = gst_asf_demux_get_uint32 (&data, &size); stream_specific_size = gst_asf_demux_get_uint32 (&data, &size); flags = gst_asf_demux_get_uint16 (&data, &size); stream_id = flags & 0x7f; is_encrypted = ! !((flags & 0x8000) << 15); unknown = gst_asf_demux_get_uint32 (&data, &size); GST_DEBUG_OBJECT (demux, "Found stream %u, time_offset=%" GST_TIME_FORMAT, stream_id, GST_TIME_ARGS (time_offset)); /* dvr-ms has audio stream declared in stream specific data */ if (stream_type == ASF_STREAM_EXT_EMBED_HEADER) { AsfExtStreamType ext_stream_type; gst_asf_demux_get_guid (&guid, &data, &size); ext_stream_type = gst_asf_demux_identify_guid (asf_ext_stream_guids, &guid); if (ext_stream_type == ASF_EXT_STREAM_AUDIO) { inspect_payload = TRUE; gst_asf_demux_get_guid (&guid, &data, &size); gst_asf_demux_get_uint32 (&data, &size); gst_asf_demux_get_uint32 (&data, &size); gst_asf_demux_get_uint32 (&data, &size); gst_asf_demux_get_guid (&guid, &data, &size); gst_asf_demux_get_uint32 (&data, &size); stream_type = ASF_STREAM_AUDIO; } } switch (stream_type) { case ASF_STREAM_AUDIO:{ asf_stream_audio audio_object; if (!gst_asf_demux_get_stream_audio (&audio_object, &data, &size)) goto not_enough_data; GST_INFO ("Object is an audio stream with %u bytes of additional data", audio_object.size); stream = gst_asf_demux_add_audio_stream (demux, &audio_object, stream_id, &data, &size); switch (correction_type) { case ASF_CORRECTION_ON:{ guint span, packet_size, chunk_size, data_size, silence_data; GST_INFO ("Using error correction"); if (size < (1 + 2 + 2 + 2 + 1)) goto not_enough_data; span = gst_asf_demux_get_uint8 (&data, &size); packet_size = gst_asf_demux_get_uint16 (&data, &size); chunk_size = gst_asf_demux_get_uint16 (&data, &size); data_size = gst_asf_demux_get_uint16 (&data, &size); silence_data = gst_asf_demux_get_uint8 (&data, &size); stream->span = span; GST_DEBUG_OBJECT (demux, "Descrambling ps:%u cs:%u ds:%u s:%u sd:%u", packet_size, chunk_size, data_size, span, silence_data); if (stream->span > 1) { if (chunk_size == 0 || ((packet_size / chunk_size) <= 1)) { /* Disable descrambling */ stream->span = 0; } else { /* FIXME: this else branch was added for * weird_al_yankovic - the saga begins.asf */ stream->ds_packet_size = packet_size; stream->ds_chunk_size = chunk_size; } } else { /* Descambling is enabled */ stream->ds_packet_size = packet_size; stream->ds_chunk_size = chunk_size; } #if 0 /* Now skip the rest of the silence data */ if (data_size > 1) gst_bytestream_flush (demux->bs, data_size - 1); #else /* FIXME: CHECKME. And why -1? */ if (data_size > 1) { if (!gst_asf_demux_skip_bytes (data_size - 1, &data, &size)) { goto not_enough_data; } } #endif break; } case ASF_CORRECTION_OFF:{ GST_INFO ("Error correction off"); if (!gst_asf_demux_skip_bytes (stream_specific_size, &data, &size)) goto not_enough_data; break; } default: GST_ELEMENT_ERROR (demux, STREAM, DEMUX, (NULL), ("Audio stream using unknown error correction")); return NULL; } break; } case ASF_STREAM_VIDEO:{ asf_stream_video_format video_format_object; asf_stream_video video_object; guint16 vsize; if (!gst_asf_demux_get_stream_video (&video_object, &data, &size)) goto not_enough_data; vsize = video_object.size - 40; /* Byte order gets offset by single byte */ GST_INFO ("object is a video stream with %u bytes of " "additional data", vsize); if (!gst_asf_demux_get_stream_video_format (&video_format_object, &data, &size)) { goto not_enough_data; } stream = gst_asf_demux_add_video_stream (demux, &video_format_object, stream_id, &data, &size); break; } default: GST_WARNING_OBJECT (demux, "Unknown stream type for stream %u", stream_id); demux->other_streams = g_slist_append (demux->other_streams, GINT_TO_POINTER (stream_id)); break; } if (stream) stream->inspect_payload = inspect_payload; return stream; not_enough_data: { GST_WARNING_OBJECT (demux, "Unexpected end of data parsing stream object"); /* we'll error out later if we found no streams */ return NULL; } } static const gchar * gst_asf_demux_get_gst_tag_from_tag_name (const gchar * name_utf8) { const struct { const gchar *asf_name; const gchar *gst_name; } tags[] = { { "WM/Genre", GST_TAG_GENRE}, { "WM/AlbumTitle", GST_TAG_ALBUM}, { "WM/AlbumArtist", GST_TAG_ARTIST}, { "WM/Picture", GST_TAG_IMAGE}, { "WM/Track", GST_TAG_TRACK_NUMBER}, { "WM/TrackNumber", GST_TAG_TRACK_NUMBER}, { "WM/Year", GST_TAG_DATE_TIME} /* { "WM/Composer", GST_TAG_COMPOSER } */ }; gsize out; guint i; if (name_utf8 == NULL) { GST_WARNING ("Failed to convert name to UTF8, skipping"); return NULL; } out = strlen (name_utf8); for (i = 0; i < G_N_ELEMENTS (tags); ++i) { if (strncmp (tags[i].asf_name, name_utf8, out) == 0) { GST_LOG ("map tagname '%s' -> '%s'", name_utf8, tags[i].gst_name); return tags[i].gst_name; } } return NULL; } /* gst_asf_demux_add_global_tags() takes ownership of taglist! */ static void gst_asf_demux_add_global_tags (GstASFDemux * demux, GstTagList * taglist) { GstTagList *t; GST_DEBUG_OBJECT (demux, "adding global tags: %" GST_PTR_FORMAT, taglist); if (taglist == NULL) return; if (gst_tag_list_is_empty (taglist)) { gst_tag_list_unref (taglist); return; } t = gst_tag_list_merge (demux->taglist, taglist, GST_TAG_MERGE_APPEND); gst_tag_list_set_scope (t, GST_TAG_SCOPE_GLOBAL); if (demux->taglist) gst_tag_list_unref (demux->taglist); gst_tag_list_unref (taglist); demux->taglist = t; GST_LOG_OBJECT (demux, "global tags now: %" GST_PTR_FORMAT, demux->taglist); } #define ASF_DEMUX_DATA_TYPE_UTF16LE_STRING 0 #define ASF_DEMUX_DATA_TYPE_BYTE_ARRAY 1 #define ASF_DEMUX_DATA_TYPE_BOOL 2 #define ASF_DEMUX_DATA_TYPE_DWORD 3 static void asf_demux_parse_picture_tag (GstTagList * tags, const guint8 * tag_data, guint tag_data_len) { GstByteReader r; const guint8 *img_data = NULL; guint32 img_data_len = 0; guint8 pic_type = 0; gst_byte_reader_init (&r, tag_data, tag_data_len); /* skip mime type string (we don't trust it and do our own typefinding), * and also skip the description string, since we don't use it */ if (!gst_byte_reader_get_uint8 (&r, &pic_type) || !gst_byte_reader_get_uint32_le (&r, &img_data_len) || !gst_byte_reader_skip_string_utf16 (&r) || !gst_byte_reader_skip_string_utf16 (&r) || !gst_byte_reader_get_data (&r, img_data_len, &img_data)) { goto not_enough_data; } if (!gst_tag_list_add_id3_image (tags, img_data, img_data_len, pic_type)) GST_DEBUG ("failed to add image extracted from WM/Picture tag to taglist"); return; not_enough_data: { GST_DEBUG ("Failed to read WM/Picture tag: not enough data"); GST_MEMDUMP ("WM/Picture data", tag_data, tag_data_len); return; } } /* Extended Content Description Object */ static GstFlowReturn gst_asf_demux_process_ext_content_desc (GstASFDemux * demux, guint8 * data, guint64 size) { /* Other known (and unused) 'text/unicode' metadata available : * * WM/Lyrics = * WM/MediaPrimaryClassID = {D1607DBC-E323-4BE2-86A1-48A42A28441E} * WMFSDKVersion = 9.00.00.2980 * WMFSDKNeeded = 0.0.0.0000 * WM/UniqueFileIdentifier = AMGa_id=R 15334;AMGp_id=P 5149;AMGt_id=T 2324984 * WM/Publisher = 4AD * WM/Provider = AMG * WM/ProviderRating = 8 * WM/ProviderStyle = Rock (similar to WM/Genre) * WM/GenreID (similar to WM/Genre) * WM/TrackNumber (same as WM/Track but as a string) * * Other known (and unused) 'non-text' metadata available : * * WM/EncodingTime * WM/MCDI * IsVBR * * We might want to read WM/TrackNumber and use atoi() if we don't have * WM/Track */ GstTagList *taglist; guint16 blockcount, i; gboolean content3D = FALSE; struct { const gchar *interleave_name; GstASF3DMode interleaving_type; } stereoscopic_layout_map[] = { { "SideBySideRF", GST_ASF_3D_SIDE_BY_SIDE_HALF_RL}, { "SideBySideLF", GST_ASF_3D_SIDE_BY_SIDE_HALF_LR}, { "OverUnderRT", GST_ASF_3D_TOP_AND_BOTTOM_HALF_RL}, { "OverUnderLT", GST_ASF_3D_TOP_AND_BOTTOM_HALF_LR}, { "DualStream", GST_ASF_3D_DUAL_STREAM} }; GST_INFO_OBJECT (demux, "object is an extended content description"); taglist = gst_tag_list_new_empty (); /* Content Descriptor Count */ if (size < 2) goto not_enough_data; blockcount = gst_asf_demux_get_uint16 (&data, &size); for (i = 1; i <= blockcount; ++i) { const gchar *gst_tag_name; guint16 datatype; guint16 value_len; guint16 name_len; GValue tag_value = { 0, }; gsize in, out; gchar *name; gchar *name_utf8 = NULL; gchar *value; /* Descriptor */ if (!gst_asf_demux_get_string (&name, &name_len, &data, &size)) goto not_enough_data; if (size < 2) { g_free (name); goto not_enough_data; } /* Descriptor Value Data Type */ datatype = gst_asf_demux_get_uint16 (&data, &size); /* Descriptor Value (not really a string, but same thing reading-wise) */ if (!gst_asf_demux_get_string (&value, &value_len, &data, &size)) { g_free (name); goto not_enough_data; } name_utf8 = g_convert (name, name_len, "UTF-8", "UTF-16LE", &in, &out, NULL); if (name_utf8 != NULL) { GST_DEBUG ("Found tag/metadata %s", name_utf8); gst_tag_name = gst_asf_demux_get_gst_tag_from_tag_name (name_utf8); GST_DEBUG ("gst_tag_name %s", GST_STR_NULL (gst_tag_name)); switch (datatype) { case ASF_DEMUX_DATA_TYPE_UTF16LE_STRING:{ gchar *value_utf8; value_utf8 = g_convert (value, value_len, "UTF-8", "UTF-16LE", &in, &out, NULL); /* get rid of tags with empty value */ if (value_utf8 != NULL && *value_utf8 != '\0') { GST_DEBUG ("string value %s", value_utf8); value_utf8[out] = '\0'; if (gst_tag_name != NULL) { if (strcmp (gst_tag_name, GST_TAG_DATE_TIME) == 0) { guint year = atoi (value_utf8); if (year > 0) { g_value_init (&tag_value, GST_TYPE_DATE_TIME); g_value_take_boxed (&tag_value, gst_date_time_new_y (year)); } } else if (strcmp (gst_tag_name, GST_TAG_GENRE) == 0) { guint id3v1_genre_id; const gchar *genre_str; if (sscanf (value_utf8, "(%u)", &id3v1_genre_id) == 1 && ((genre_str = gst_tag_id3_genre_get (id3v1_genre_id)))) { GST_DEBUG ("Genre: %s -> %s", value_utf8, genre_str); g_free (value_utf8); value_utf8 = g_strdup (genre_str); } } else { GType tag_type; /* convert tag from string to other type if required */ tag_type = gst_tag_get_type (gst_tag_name); g_value_init (&tag_value, tag_type); if (!gst_value_deserialize (&tag_value, value_utf8)) { GValue from_val = { 0, }; g_value_init (&from_val, G_TYPE_STRING); g_value_set_string (&from_val, value_utf8); if (!g_value_transform (&from_val, &tag_value)) { GST_WARNING_OBJECT (demux, "Could not transform string tag to " "%s tag type %s", gst_tag_name, g_type_name (tag_type)); g_value_unset (&tag_value); } g_value_unset (&from_val); } } } else { /* metadata ! */ GST_DEBUG ("Setting metadata"); g_value_init (&tag_value, G_TYPE_STRING); g_value_set_string (&tag_value, value_utf8); /* If we found a stereoscopic marker, look for StereoscopicLayout * metadata */ if (content3D) { guint i; if (strncmp ("StereoscopicLayout", name_utf8, strlen (name_utf8)) == 0) { for (i = 0; i < G_N_ELEMENTS (stereoscopic_layout_map); i++) { if (g_str_equal (stereoscopic_layout_map[i].interleave_name, value_utf8)) { demux->asf_3D_mode = stereoscopic_layout_map[i].interleaving_type; GST_INFO ("find interleave type %u", demux->asf_3D_mode); } } } GST_INFO_OBJECT (demux, "3d type is %u", demux->asf_3D_mode); } else { demux->asf_3D_mode = GST_ASF_3D_NONE; GST_INFO_OBJECT (demux, "None 3d type"); } } } else if (value_utf8 == NULL) { GST_WARNING ("Failed to convert string value to UTF8, skipping"); } else { GST_DEBUG ("Skipping empty string value for %s", GST_STR_NULL (gst_tag_name)); } g_free (value_utf8); break; } case ASF_DEMUX_DATA_TYPE_BYTE_ARRAY:{ if (gst_tag_name) { if (!g_str_equal (gst_tag_name, GST_TAG_IMAGE)) { GST_FIXME ("Unhandled byte array tag %s", GST_STR_NULL (gst_tag_name)); break; } else { asf_demux_parse_picture_tag (taglist, (guint8 *) value, value_len); } } break; } case ASF_DEMUX_DATA_TYPE_DWORD:{ guint uint_val; if (value_len < 4) break; uint_val = GST_READ_UINT32_LE (value); /* this is the track number */ g_value_init (&tag_value, G_TYPE_UINT); /* WM/Track counts from 0 */ if (!strcmp (name_utf8, "WM/Track")) ++uint_val; g_value_set_uint (&tag_value, uint_val); break; } /* Detect 3D */ case ASF_DEMUX_DATA_TYPE_BOOL:{ gboolean bool_val; if (value_len < 4) break; bool_val = GST_READ_UINT32_LE (value); if (strncmp ("Stereoscopic", name_utf8, strlen (name_utf8)) == 0) { if (bool_val) { GST_INFO_OBJECT (demux, "This is 3D contents"); content3D = TRUE; } else { GST_INFO_OBJECT (demux, "This is not 3D contenst"); content3D = FALSE; } } break; } default:{ GST_DEBUG ("Skipping tag %s of type %d", gst_tag_name, datatype); break; } } if (G_IS_VALUE (&tag_value)) { if (gst_tag_name) { GstTagMergeMode merge_mode = GST_TAG_MERGE_APPEND; /* WM/TrackNumber is more reliable than WM/Track, since the latter * is supposed to have a 0 base but is often wrongly written to start * from 1 as well, so prefer WM/TrackNumber when we have it: either * replace the value added earlier from WM/Track or put it first in * the list, so that it will get picked up by _get_uint() */ if (strcmp (name_utf8, "WM/TrackNumber") == 0) merge_mode = GST_TAG_MERGE_REPLACE; gst_tag_list_add_values (taglist, merge_mode, gst_tag_name, &tag_value, NULL); } else { GST_DEBUG ("Setting global metadata %s", name_utf8); gst_structure_set_value (demux->global_metadata, name_utf8, &tag_value); } g_value_unset (&tag_value); } } g_free (name); g_free (value); g_free (name_utf8); } gst_asf_demux_add_global_tags (demux, taglist); return GST_FLOW_OK; /* Errors */ not_enough_data: { GST_WARNING ("Unexpected end of data parsing ext content desc object"); gst_tag_list_unref (taglist); return GST_FLOW_OK; /* not really fatal */ } } static GstStructure * gst_asf_demux_get_metadata_for_stream (GstASFDemux * demux, guint stream_num) { gchar sname[32]; guint i; g_snprintf (sname, sizeof (sname), "stream-%u", stream_num); for (i = 0; i < gst_caps_get_size (demux->metadata); ++i) { GstStructure *s; s = gst_caps_get_structure (demux->metadata, i); if (gst_structure_has_name (s, sname)) return s; } gst_caps_append_structure (demux->metadata, gst_structure_new_empty (sname)); /* try lookup again; demux->metadata took ownership of the structure, so we * can't really make any assumptions about what happened to it, so we can't * just return it directly after appending it */ return gst_asf_demux_get_metadata_for_stream (demux, stream_num); } static GstFlowReturn gst_asf_demux_process_metadata (GstASFDemux * demux, guint8 * data, guint64 size) { guint16 blockcount, i; GST_INFO_OBJECT (demux, "object is a metadata object"); /* Content Descriptor Count */ if (size < 2) goto not_enough_data; blockcount = gst_asf_demux_get_uint16 (&data, &size); for (i = 0; i < blockcount; ++i) { GstStructure *s; guint16 stream_num, name_len, data_type, lang_idx G_GNUC_UNUSED; guint32 data_len, ival; gchar *name_utf8; if (size < (2 + 2 + 2 + 2 + 4)) goto not_enough_data; lang_idx = gst_asf_demux_get_uint16 (&data, &size); stream_num = gst_asf_demux_get_uint16 (&data, &size); name_len = gst_asf_demux_get_uint16 (&data, &size); data_type = gst_asf_demux_get_uint16 (&data, &size); data_len = gst_asf_demux_get_uint32 (&data, &size); if (size < name_len + data_len) goto not_enough_data; /* convert name to UTF-8 */ name_utf8 = g_convert ((gchar *) data, name_len, "UTF-8", "UTF-16LE", NULL, NULL, NULL); gst_asf_demux_skip_bytes (name_len, &data, &size); if (name_utf8 == NULL) { GST_WARNING ("Failed to convert value name to UTF8, skipping"); gst_asf_demux_skip_bytes (data_len, &data, &size); continue; } if (data_type != ASF_DEMUX_DATA_TYPE_DWORD) { gst_asf_demux_skip_bytes (data_len, &data, &size); g_free (name_utf8); continue; } /* read DWORD */ if (size < 4) { g_free (name_utf8); goto not_enough_data; } ival = gst_asf_demux_get_uint32 (&data, &size); /* skip anything else there may be, just in case */ gst_asf_demux_skip_bytes (data_len - 4, &data, &size); s = gst_asf_demux_get_metadata_for_stream (demux, stream_num); gst_structure_set (s, name_utf8, G_TYPE_INT, ival, NULL); g_free (name_utf8); } GST_INFO_OBJECT (demux, "metadata = %" GST_PTR_FORMAT, demux->metadata); return GST_FLOW_OK; /* Errors */ not_enough_data: { GST_WARNING ("Unexpected end of data parsing metadata object"); return GST_FLOW_OK; /* not really fatal */ } } static GstFlowReturn gst_asf_demux_process_header (GstASFDemux * demux, guint8 * data, guint64 size) { GstFlowReturn ret = GST_FLOW_OK; guint32 i, num_objects; guint8 unknown G_GNUC_UNUSED; /* Get the rest of the header's header */ if (size < (4 + 1 + 1)) goto not_enough_data; num_objects = gst_asf_demux_get_uint32 (&data, &size); unknown = gst_asf_demux_get_uint8 (&data, &size); unknown = gst_asf_demux_get_uint8 (&data, &size); GST_INFO_OBJECT (demux, "object is a header with %u parts", num_objects); demux->saw_file_header = FALSE; /* Loop through the header's objects, processing those */ for (i = 0; i < num_objects; ++i) { GST_INFO_OBJECT (demux, "reading header part %u", i); ret = gst_asf_demux_process_object (demux, &data, &size); if (ret != GST_FLOW_OK) { GST_WARNING ("process_object returned %s", gst_asf_get_flow_name (ret)); break; } } if (!demux->saw_file_header) { GST_ELEMENT_ERROR (demux, STREAM, DEMUX, (NULL), ("Header does not have mandatory FILE section")); return GST_FLOW_ERROR; } return ret; not_enough_data: { GST_ELEMENT_ERROR (demux, STREAM, DEMUX, (NULL), ("short read parsing HEADER object")); return GST_FLOW_ERROR; } } static GstFlowReturn gst_asf_demux_process_file (GstASFDemux * demux, guint8 * data, guint64 size) { guint64 creation_time G_GNUC_UNUSED; guint64 file_size G_GNUC_UNUSED; guint64 send_time G_GNUC_UNUSED; guint64 packets_count, play_time, preroll; guint32 flags, min_pktsize, max_pktsize, min_bitrate G_GNUC_UNUSED; if (size < (16 + 8 + 8 + 8 + 8 + 8 + 8 + 4 + 4 + 4 + 4)) goto not_enough_data; gst_asf_demux_skip_bytes (16, &data, &size); /* skip GUID */ file_size = gst_asf_demux_get_uint64 (&data, &size); creation_time = gst_asf_demux_get_uint64 (&data, &size); packets_count = gst_asf_demux_get_uint64 (&data, &size); play_time = gst_asf_demux_get_uint64 (&data, &size); send_time = gst_asf_demux_get_uint64 (&data, &size); preroll = gst_asf_demux_get_uint64 (&data, &size); flags = gst_asf_demux_get_uint32 (&data, &size); min_pktsize = gst_asf_demux_get_uint32 (&data, &size); max_pktsize = gst_asf_demux_get_uint32 (&data, &size); min_bitrate = gst_asf_demux_get_uint32 (&data, &size); demux->broadcast = ! !(flags & 0x01); demux->seekable = ! !(flags & 0x02); GST_DEBUG_OBJECT (demux, "min_pktsize = %u", min_pktsize); GST_DEBUG_OBJECT (demux, "flags::broadcast = %d", demux->broadcast); GST_DEBUG_OBJECT (demux, "flags::seekable = %d", demux->seekable); if (demux->broadcast) { /* these fields are invalid if the broadcast flag is set */ play_time = 0; file_size = 0; } if (min_pktsize != max_pktsize) goto non_fixed_packet_size; demux->packet_size = max_pktsize; /* FIXME: do we need send_time as well? what is it? */ if ((play_time * 100) >= (preroll * GST_MSECOND)) demux->play_time = (play_time * 100) - (preroll * GST_MSECOND); else demux->play_time = 0; demux->preroll = preroll * GST_MSECOND; /* initial latency */ demux->latency = demux->preroll; if (demux->play_time == 0) demux->seekable = FALSE; GST_DEBUG_OBJECT (demux, "play_time %" GST_TIME_FORMAT, GST_TIME_ARGS (demux->play_time)); GST_DEBUG_OBJECT (demux, "preroll %" GST_TIME_FORMAT, GST_TIME_ARGS (demux->preroll)); if (demux->play_time > 0) { demux->segment.duration = demux->play_time; } GST_INFO ("object is a file with %" G_GUINT64_FORMAT " data packets", packets_count); GST_INFO ("preroll = %" G_GUINT64_FORMAT, demux->preroll); demux->saw_file_header = TRUE; return GST_FLOW_OK; /* ERRORS */ non_fixed_packet_size: { GST_ELEMENT_ERROR (demux, STREAM, DEMUX, (NULL), ("packet size must be fixed")); return GST_FLOW_ERROR; } not_enough_data: { GST_ELEMENT_ERROR (demux, STREAM, DEMUX, (NULL), ("short read parsing FILE object")); return GST_FLOW_ERROR; } } /* Content Description Object */ static GstFlowReturn gst_asf_demux_process_comment (GstASFDemux * demux, guint8 * data, guint64 size) { struct { const gchar *gst_tag; guint16 val_length; gchar *val_utf8; } tags[5] = { { GST_TAG_TITLE, 0, NULL}, { GST_TAG_ARTIST, 0, NULL}, { GST_TAG_COPYRIGHT, 0, NULL}, { GST_TAG_DESCRIPTION, 0, NULL}, { GST_TAG_COMMENT, 0, NULL} }; GstTagList *taglist; GValue value = { 0 }; gsize in, out; gint i = -1; GST_INFO_OBJECT (demux, "object is a comment"); if (size < (2 + 2 + 2 + 2 + 2)) goto not_enough_data; tags[0].val_length = gst_asf_demux_get_uint16 (&data, &size); tags[1].val_length = gst_asf_demux_get_uint16 (&data, &size); tags[2].val_length = gst_asf_demux_get_uint16 (&data, &size); tags[3].val_length = gst_asf_demux_get_uint16 (&data, &size); tags[4].val_length = gst_asf_demux_get_uint16 (&data, &size); GST_DEBUG_OBJECT (demux, "Comment lengths: title=%d author=%d copyright=%d " "description=%d rating=%d", tags[0].val_length, tags[1].val_length, tags[2].val_length, tags[3].val_length, tags[4].val_length); for (i = 0; i < G_N_ELEMENTS (tags); ++i) { if (size < tags[i].val_length) goto not_enough_data; /* might be just '/0', '/0'... */ if (tags[i].val_length > 2 && tags[i].val_length % 2 == 0) { /* convert to UTF-8 */ tags[i].val_utf8 = g_convert ((gchar *) data, tags[i].val_length, "UTF-8", "UTF-16LE", &in, &out, NULL); } gst_asf_demux_skip_bytes (tags[i].val_length, &data, &size); } /* parse metadata into taglist */ taglist = gst_tag_list_new_empty (); g_value_init (&value, G_TYPE_STRING); for (i = 0; i < G_N_ELEMENTS (tags); ++i) { if (tags[i].val_utf8 && strlen (tags[i].val_utf8) > 0 && tags[i].gst_tag) { g_value_set_string (&value, tags[i].val_utf8); gst_tag_list_add_values (taglist, GST_TAG_MERGE_APPEND, tags[i].gst_tag, &value, NULL); } } g_value_unset (&value); gst_asf_demux_add_global_tags (demux, taglist); for (i = 0; i < G_N_ELEMENTS (tags); ++i) g_free (tags[i].val_utf8); return GST_FLOW_OK; not_enough_data: { GST_WARNING_OBJECT (demux, "unexpectedly short of data while processing " "comment tag section %d, skipping comment object", i); for (i = 0; i < G_N_ELEMENTS (tags); i++) g_free (tags[i].val_utf8); return GST_FLOW_OK; /* not really fatal */ } } static GstFlowReturn gst_asf_demux_process_bitrate_props_object (GstASFDemux * demux, guint8 * data, guint64 size) { guint16 num_streams, i; AsfStream *stream; if (size < 2) goto not_enough_data; num_streams = gst_asf_demux_get_uint16 (&data, &size); GST_INFO ("object is a bitrate properties object with %u streams", num_streams); if (size < (num_streams * (2 + 4))) goto not_enough_data; for (i = 0; i < num_streams; ++i) { guint32 bitrate; guint16 stream_id; stream_id = gst_asf_demux_get_uint16 (&data, &size); bitrate = gst_asf_demux_get_uint32 (&data, &size); if (stream_id < GST_ASF_DEMUX_NUM_STREAM_IDS) { GST_DEBUG_OBJECT (demux, "bitrate of stream %u = %u", stream_id, bitrate); stream = gst_asf_demux_get_stream (demux, stream_id); if (stream) { if (stream->pending_tags == NULL) stream->pending_tags = gst_tag_list_new_empty (); gst_tag_list_add (stream->pending_tags, GST_TAG_MERGE_REPLACE, GST_TAG_BITRATE, bitrate, NULL); } else { GST_WARNING_OBJECT (demux, "Stream id %u wasn't found", stream_id); } } else { GST_WARNING ("stream id %u is too large", stream_id); } } return GST_FLOW_OK; not_enough_data: { GST_WARNING_OBJECT (demux, "short read parsing bitrate props object!"); return GST_FLOW_OK; /* not really fatal */ } } static GstFlowReturn gst_asf_demux_process_header_ext (GstASFDemux * demux, guint8 * data, guint64 size) { GstFlowReturn ret = GST_FLOW_OK; guint64 hdr_size; /* Get the rest of the header's header */ if (size < (16 + 2 + 4)) goto not_enough_data; /* skip GUID and two other bytes */ gst_asf_demux_skip_bytes (16 + 2, &data, &size); hdr_size = gst_asf_demux_get_uint32 (&data, &size); GST_INFO ("extended header object with a size of %u bytes", (guint) size); /* FIXME: does data_size include the rest of the header that we have read? */ if (hdr_size > size) goto not_enough_data; while (hdr_size > 0) { ret = gst_asf_demux_process_object (demux, &data, &hdr_size); if (ret != GST_FLOW_OK) break; } return ret; not_enough_data: { GST_ELEMENT_ERROR (demux, STREAM, DEMUX, (NULL), ("short read parsing extended header object")); return GST_FLOW_ERROR; } } static GstFlowReturn gst_asf_demux_process_language_list (GstASFDemux * demux, guint8 * data, guint64 size) { guint i; if (size < 2) goto not_enough_data; if (demux->languages) { GST_WARNING ("More than one LANGUAGE_LIST object in stream"); g_strfreev (demux->languages); demux->languages = NULL; demux->num_languages = 0; } demux->num_languages = gst_asf_demux_get_uint16 (&data, &size); GST_LOG ("%u languages:", demux->num_languages); demux->languages = g_new0 (gchar *, demux->num_languages + 1); for (i = 0; i < demux->num_languages; ++i) { guint8 len, *lang_data = NULL; if (size < 1) goto not_enough_data; len = gst_asf_demux_get_uint8 (&data, &size); if (gst_asf_demux_get_bytes (&lang_data, len, &data, &size)) { gchar *utf8; utf8 = g_convert ((gchar *) lang_data, len, "UTF-8", "UTF-16LE", NULL, NULL, NULL); /* truncate "en-us" etc. to just "en" */ if (utf8 && strlen (utf8) >= 5 && (utf8[2] == '-' || utf8[2] == '_')) { utf8[2] = '\0'; } GST_DEBUG ("[%u] %s", i, GST_STR_NULL (utf8)); demux->languages[i] = utf8; g_free (lang_data); } else { goto not_enough_data; } } return GST_FLOW_OK; not_enough_data: { GST_WARNING_OBJECT (demux, "short read parsing language list object!"); g_free (demux->languages); demux->languages = NULL; demux->num_languages = 0; return GST_FLOW_OK; /* not fatal */ } } static GstFlowReturn gst_asf_demux_process_simple_index (GstASFDemux * demux, guint8 * data, guint64 size) { GstClockTime interval; guint32 count, i; if (size < (16 + 8 + 4 + 4)) goto not_enough_data; /* skip file id */ gst_asf_demux_skip_bytes (16, &data, &size); interval = gst_asf_demux_get_uint64 (&data, &size) * (GstClockTime) 100; gst_asf_demux_skip_bytes (4, &data, &size); count = gst_asf_demux_get_uint32 (&data, &size); if (count > 0) { demux->sidx_interval = interval; demux->sidx_num_entries = count; g_free (demux->sidx_entries); demux->sidx_entries = g_new0 (AsfSimpleIndexEntry, count); for (i = 0; i < count; ++i) { if (G_UNLIKELY (size < 6)) { /* adjust for broken files, to avoid having entries at the end * of the parsed index that point to time=0. Resulting in seeking to * the end of the file leading back to the beginning */ demux->sidx_num_entries -= (count - i); break; } demux->sidx_entries[i].packet = gst_asf_demux_get_uint32 (&data, &size); demux->sidx_entries[i].count = gst_asf_demux_get_uint16 (&data, &size); GST_LOG_OBJECT (demux, "%" GST_TIME_FORMAT " = packet %4u count : %2d", GST_TIME_ARGS (i * interval), demux->sidx_entries[i].packet, demux->sidx_entries[i].count); } } else { GST_DEBUG_OBJECT (demux, "simple index object with 0 entries"); } return GST_FLOW_OK; not_enough_data: { GST_WARNING_OBJECT (demux, "short read parsing simple index object!"); return GST_FLOW_OK; /* not fatal */ } } static GstFlowReturn gst_asf_demux_process_advanced_mutual_exclusion (GstASFDemux * demux, guint8 * data, guint64 size) { ASFGuid guid; guint16 num, i; if (size < 16 + 2 + (2 * 2)) goto not_enough_data; gst_asf_demux_get_guid (&guid, &data, &size); num = gst_asf_demux_get_uint16 (&data, &size); if (num < 2) { GST_WARNING_OBJECT (demux, "nonsensical mutually exclusive streams count"); return GST_FLOW_OK; } if (size < (num * sizeof (guint16))) goto not_enough_data; /* read mutually exclusive stream numbers */ for (i = 0; i < num; ++i) { guint8 mes; mes = gst_asf_demux_get_uint16 (&data, &size) & 0x7f; GST_LOG_OBJECT (demux, "mutually exclusive: stream %d", mes); demux->mut_ex_streams = g_slist_append (demux->mut_ex_streams, GINT_TO_POINTER (mes)); } return GST_FLOW_OK; /* Errors */ not_enough_data: { GST_WARNING_OBJECT (demux, "short read parsing advanced mutual exclusion"); return GST_FLOW_OK; /* not absolutely fatal */ } } gboolean gst_asf_demux_is_unknown_stream (GstASFDemux * demux, guint stream_num) { return g_slist_find (demux->other_streams, GINT_TO_POINTER (stream_num)) == NULL; } static GstFlowReturn gst_asf_demux_process_ext_stream_props (GstASFDemux * demux, guint8 * data, guint64 size) { AsfStreamExtProps esp; AsfStream *stream = NULL; AsfObject stream_obj; guint16 stream_name_count; guint16 num_payload_ext; guint64 len; guint8 *stream_obj_data = NULL; guint8 *data_start; guint obj_size; guint i, stream_num; data_start = data; obj_size = (guint) size; esp.payload_extensions = NULL; if (size < 64) goto not_enough_data; esp.valid = TRUE; esp.start_time = gst_asf_demux_get_uint64 (&data, &size) * GST_MSECOND; esp.end_time = gst_asf_demux_get_uint64 (&data, &size) * GST_MSECOND; esp.data_bitrate = gst_asf_demux_get_uint32 (&data, &size); esp.buffer_size = gst_asf_demux_get_uint32 (&data, &size); esp.intial_buf_fullness = gst_asf_demux_get_uint32 (&data, &size); esp.data_bitrate2 = gst_asf_demux_get_uint32 (&data, &size); esp.buffer_size2 = gst_asf_demux_get_uint32 (&data, &size); esp.intial_buf_fullness2 = gst_asf_demux_get_uint32 (&data, &size); esp.max_obj_size = gst_asf_demux_get_uint32 (&data, &size); esp.flags = gst_asf_demux_get_uint32 (&data, &size); stream_num = gst_asf_demux_get_uint16 (&data, &size); esp.lang_idx = gst_asf_demux_get_uint16 (&data, &size); esp.avg_time_per_frame = gst_asf_demux_get_uint64 (&data, &size); stream_name_count = gst_asf_demux_get_uint16 (&data, &size); num_payload_ext = gst_asf_demux_get_uint16 (&data, &size); GST_INFO ("start_time = %" GST_TIME_FORMAT, GST_TIME_ARGS (esp.start_time)); GST_INFO ("end_time = %" GST_TIME_FORMAT, GST_TIME_ARGS (esp.end_time)); GST_INFO ("flags = %08x", esp.flags); GST_INFO ("average time per frame = %" GST_TIME_FORMAT, GST_TIME_ARGS (esp.avg_time_per_frame * 100)); GST_INFO ("stream number = %u", stream_num); GST_INFO ("stream language ID idx = %u (%s)", esp.lang_idx, (esp.lang_idx < demux->num_languages) ? GST_STR_NULL (demux->languages[esp.lang_idx]) : "??"); GST_INFO ("stream name count = %u", stream_name_count); /* read stream names */ for (i = 0; i < stream_name_count; ++i) { guint16 stream_lang_idx G_GNUC_UNUSED; gchar *stream_name = NULL; if (size < 2) goto not_enough_data; stream_lang_idx = gst_asf_demux_get_uint16 (&data, &size); if (!gst_asf_demux_get_string (&stream_name, NULL, &data, &size)) goto not_enough_data; GST_INFO ("stream name %d: %s", i, GST_STR_NULL (stream_name)); g_free (stream_name); /* TODO: store names in struct */ } /* read payload extension systems stuff */ GST_LOG ("payload extension systems count = %u", num_payload_ext); if (num_payload_ext > 0) esp.payload_extensions = g_new0 (AsfPayloadExtension, num_payload_ext + 1); for (i = 0; i < num_payload_ext; ++i) { AsfPayloadExtension ext; ASFGuid ext_guid; guint32 sys_info_len; if (size < 16 + 2 + 4) goto not_enough_data; gst_asf_demux_get_guid (&ext_guid, &data, &size); ext.id = gst_asf_demux_identify_guid (asf_payload_ext_guids, &ext_guid); ext.len = gst_asf_demux_get_uint16 (&data, &size); sys_info_len = gst_asf_demux_get_uint32 (&data, &size); GST_LOG ("payload systems info len = %u", sys_info_len); if (!gst_asf_demux_skip_bytes (sys_info_len, &data, &size)) goto not_enough_data; esp.payload_extensions[i] = ext; } GST_LOG ("bytes read: %u/%u", (guint) (data - data_start), obj_size); /* there might be an optional STREAM_INFO object here now; if not, we * should have parsed the corresponding stream info object already (since * we are parsing the extended stream properties objects delayed) */ if (size == 0) { stream = gst_asf_demux_get_stream (demux, stream_num); goto done; } if (size < ASF_OBJECT_HEADER_SIZE) goto not_enough_data; /* get size of the stream object */ if (!asf_demux_peek_object (demux, data, size, &stream_obj, TRUE)) goto corrupted_stream; if (stream_obj.id != ASF_OBJ_STREAM) goto expected_stream_object; if (stream_obj.size < ASF_OBJECT_HEADER_SIZE || stream_obj.size > (10 * 1024 * 1024)) goto not_enough_data; gst_asf_demux_skip_bytes (ASF_OBJECT_HEADER_SIZE, &data, &size); /* process this stream object later after all the other 'normal' ones * have been processed (since the others are more important/non-hidden) */ len = stream_obj.size - ASF_OBJECT_HEADER_SIZE; if (!gst_asf_demux_get_bytes (&stream_obj_data, len, &data, &size)) goto not_enough_data; /* parse stream object */ stream = gst_asf_demux_parse_stream_object (demux, stream_obj_data, len); g_free (stream_obj_data); done: if (stream) { stream->ext_props = esp; /* try to set the framerate */ if (stream->is_video && stream->caps) { GValue framerate = { 0 }; GstStructure *s; gint num, denom; g_value_init (&framerate, GST_TYPE_FRACTION); num = GST_SECOND / 100; denom = esp.avg_time_per_frame; if (denom == 0) { /* avoid division by 0, assume 25/1 framerate */ denom = GST_SECOND / 2500; } gst_value_set_fraction (&framerate, num, denom); stream->caps = gst_caps_make_writable (stream->caps); s = gst_caps_get_structure (stream->caps, 0); gst_structure_set_value (s, "framerate", &framerate); g_value_unset (&framerate); GST_DEBUG_OBJECT (demux, "setting framerate of %d/%d = %f", num, denom, ((gdouble) num) / denom); } /* add language info now if we have it */ if (stream->ext_props.lang_idx < demux->num_languages) { if (stream->pending_tags == NULL) stream->pending_tags = gst_tag_list_new_empty (); GST_LOG_OBJECT (demux, "stream %u has language '%s'", stream->id, demux->languages[stream->ext_props.lang_idx]); gst_tag_list_add (stream->pending_tags, GST_TAG_MERGE_APPEND, GST_TAG_LANGUAGE_CODE, demux->languages[stream->ext_props.lang_idx], NULL); } } else if (gst_asf_demux_is_unknown_stream (demux, stream_num)) { GST_WARNING_OBJECT (demux, "Ext. stream properties for unknown stream"); } if (!stream) g_free (esp.payload_extensions); return GST_FLOW_OK; /* Errors */ not_enough_data: { GST_WARNING_OBJECT (demux, "short read parsing ext stream props object!"); g_free (esp.payload_extensions); return GST_FLOW_OK; /* not absolutely fatal */ } expected_stream_object: { GST_WARNING_OBJECT (demux, "error parsing extended stream properties " "object: expected embedded stream object, but got %s object instead!", gst_asf_get_guid_nick (asf_object_guids, stream_obj.id)); g_free (esp.payload_extensions); return GST_FLOW_OK; /* not absolutely fatal */ } corrupted_stream: { GST_WARNING_OBJECT (demux, "Corrupted stream"); g_free (esp.payload_extensions); return GST_FLOW_ERROR; } } static const gchar * gst_asf_demux_push_obj (GstASFDemux * demux, guint32 obj_id) { const gchar *nick; nick = gst_asf_get_guid_nick (asf_object_guids, obj_id); if (g_str_has_prefix (nick, "ASF_OBJ_")) nick += strlen ("ASF_OBJ_"); if (demux->objpath == NULL) { demux->objpath = g_strdup (nick); } else { gchar *newpath; newpath = g_strdup_printf ("%s/%s", demux->objpath, nick); g_free (demux->objpath); demux->objpath = newpath; } return (const gchar *) demux->objpath; } static void gst_asf_demux_pop_obj (GstASFDemux * demux) { gchar *s; if ((s = g_strrstr (demux->objpath, "/"))) { *s = '\0'; } else { g_free (demux->objpath); demux->objpath = NULL; } } static void gst_asf_demux_process_queued_extended_stream_objects (GstASFDemux * demux) { GSList *l; guint i; /* Parse the queued extended stream property objects and add the info * to the existing streams or add the new embedded streams, but without * activating them yet */ GST_LOG_OBJECT (demux, "%u queued extended stream properties objects", g_slist_length (demux->ext_stream_props)); for (l = demux->ext_stream_props, i = 0; l != NULL; l = l->next, ++i) { GstBuffer *buf = GST_BUFFER (l->data); GstMapInfo map; gst_buffer_map (buf, &map, GST_MAP_READ); GST_LOG_OBJECT (demux, "parsing ext. stream properties object #%u", i); gst_asf_demux_process_ext_stream_props (demux, map.data, map.size); gst_buffer_unmap (buf, &map); gst_buffer_unref (buf); } g_slist_free (demux->ext_stream_props); demux->ext_stream_props = NULL; } #if 0 static void gst_asf_demux_activate_ext_props_streams (GstASFDemux * demux) { guint i, j; for (i = 0; i < demux->num_streams; ++i) { AsfStream *stream; gboolean is_hidden; GSList *x; stream = &demux->stream[i]; GST_LOG_OBJECT (demux, "checking stream %2u", stream->id); if (stream->active) { GST_LOG_OBJECT (demux, "stream %2u is already activated", stream->id); continue; } is_hidden = FALSE; for (x = demux->mut_ex_streams; x != NULL; x = x->next) { guint8 *mes; /* check for each mutual exclusion whether it affects this stream */ for (mes = (guint8 *) x->data; mes != NULL && *mes != 0xff; ++mes) { if (*mes == stream->id) { /* if yes, check if we've already added streams that are mutually * exclusive with the stream we're about to add */ for (mes = (guint8 *) x->data; mes != NULL && *mes != 0xff; ++mes) { for (j = 0; j < demux->num_streams; ++j) { /* if the broadcast flag is set, assume the hidden streams aren't * actually streamed and hide them (or playbin won't work right), * otherwise assume their data is available */ if (demux->stream[j].id == *mes && demux->broadcast) { is_hidden = TRUE; GST_LOG_OBJECT (demux, "broadcast stream ID %d to be added is " "mutually exclusive with already existing stream ID %d, " "hiding stream", stream->id, demux->stream[j].id); goto next; } } } break; } } } next: /* FIXME: we should do stream activation based on preroll data in * streaming mode too */ if (demux->streaming && !is_hidden) gst_asf_demux_activate_stream (demux, stream); } } #endif static GstFlowReturn gst_asf_demux_process_object (GstASFDemux * demux, guint8 ** p_data, guint64 * p_size) { GstFlowReturn ret = GST_FLOW_OK; AsfObject obj; guint64 obj_data_size; if (*p_size < ASF_OBJECT_HEADER_SIZE) return ASF_FLOW_NEED_MORE_DATA; if (!asf_demux_peek_object (demux, *p_data, ASF_OBJECT_HEADER_SIZE, &obj, TRUE)) return GST_FLOW_ERROR; gst_asf_demux_skip_bytes (ASF_OBJECT_HEADER_SIZE, p_data, p_size); obj_data_size = obj.size - ASF_OBJECT_HEADER_SIZE; if (*p_size < obj_data_size) return ASF_FLOW_NEED_MORE_DATA; gst_asf_demux_push_obj (demux, obj.id); GST_INFO ("%s: size %" G_GUINT64_FORMAT, demux->objpath, obj.size); switch (obj.id) { case ASF_OBJ_STREAM: gst_asf_demux_parse_stream_object (demux, *p_data, obj_data_size); ret = GST_FLOW_OK; break; case ASF_OBJ_FILE: ret = gst_asf_demux_process_file (demux, *p_data, obj_data_size); break; case ASF_OBJ_HEADER: ret = gst_asf_demux_process_header (demux, *p_data, obj_data_size); break; case ASF_OBJ_COMMENT: ret = gst_asf_demux_process_comment (demux, *p_data, obj_data_size); break; case ASF_OBJ_HEAD1: ret = gst_asf_demux_process_header_ext (demux, *p_data, obj_data_size); break; case ASF_OBJ_BITRATE_PROPS: ret = gst_asf_demux_process_bitrate_props_object (demux, *p_data, obj_data_size); break; case ASF_OBJ_EXT_CONTENT_DESC: ret = gst_asf_demux_process_ext_content_desc (demux, *p_data, obj_data_size); break; case ASF_OBJ_METADATA_OBJECT: ret = gst_asf_demux_process_metadata (demux, *p_data, obj_data_size); break; case ASF_OBJ_EXTENDED_STREAM_PROPS:{ GstBuffer *buf; /* process these later, we might not have parsed the corresponding * stream object yet */ GST_LOG ("%s: queued for later parsing", demux->objpath); buf = gst_buffer_new_and_alloc (obj_data_size); gst_buffer_fill (buf, 0, *p_data, obj_data_size); demux->ext_stream_props = g_slist_append (demux->ext_stream_props, buf); ret = GST_FLOW_OK; break; } case ASF_OBJ_LANGUAGE_LIST: ret = gst_asf_demux_process_language_list (demux, *p_data, obj_data_size); break; case ASF_OBJ_ADVANCED_MUTUAL_EXCLUSION: ret = gst_asf_demux_process_advanced_mutual_exclusion (demux, *p_data, obj_data_size); break; case ASF_OBJ_SIMPLE_INDEX: ret = gst_asf_demux_process_simple_index (demux, *p_data, obj_data_size); break; case ASF_OBJ_CONTENT_ENCRYPTION: case ASF_OBJ_EXT_CONTENT_ENCRYPTION: case ASF_OBJ_DIGITAL_SIGNATURE_OBJECT: case ASF_OBJ_UNKNOWN_ENCRYPTION_OBJECT: goto error_encrypted; case ASF_OBJ_CONCEAL_NONE: case ASF_OBJ_HEAD2: case ASF_OBJ_UNDEFINED: case ASF_OBJ_CODEC_COMMENT: case ASF_OBJ_INDEX: case ASF_OBJ_PADDING: case ASF_OBJ_BITRATE_MUTEX: case ASF_OBJ_COMPATIBILITY: case ASF_OBJ_INDEX_PLACEHOLDER: case ASF_OBJ_INDEX_PARAMETERS: case ASF_OBJ_STREAM_PRIORITIZATION: case ASF_OBJ_SCRIPT_COMMAND: case ASF_OBJ_METADATA_LIBRARY_OBJECT: default: /* Unknown/unhandled object, skip it and hope for the best */ GST_INFO ("%s: skipping object", demux->objpath); ret = GST_FLOW_OK; break; } /* this can't fail, we checked the number of bytes available before */ gst_asf_demux_skip_bytes (obj_data_size, p_data, p_size); GST_LOG ("%s: ret = %s", demux->objpath, gst_asf_get_flow_name (ret)); gst_asf_demux_pop_obj (demux); return ret; /* ERRORS */ error_encrypted: { GST_ELEMENT_ERROR (demux, STREAM, DECRYPT, (NULL), (NULL)); return GST_FLOW_ERROR; } } static void gst_asf_demux_descramble_buffer (GstASFDemux * demux, AsfStream * stream, GstBuffer ** p_buffer) { GstBuffer *descrambled_buffer; GstBuffer *scrambled_buffer; GstBuffer *sub_buffer; guint offset; guint off; guint row; guint col; guint idx; /* descrambled_buffer is initialised in the first iteration */ descrambled_buffer = NULL; scrambled_buffer = *p_buffer; if (gst_buffer_get_size (scrambled_buffer) < stream->ds_packet_size * stream->span) return; for (offset = 0; offset < gst_buffer_get_size (scrambled_buffer); offset += stream->ds_chunk_size) { off = offset / stream->ds_chunk_size; row = off / stream->span; col = off % stream->span; idx = row + col * stream->ds_packet_size / stream->ds_chunk_size; GST_DEBUG ("idx=%u, row=%u, col=%u, off=%u, ds_chunk_size=%u", idx, row, col, off, stream->ds_chunk_size); GST_DEBUG ("scrambled buffer size=%" G_GSIZE_FORMAT ", span=%u, packet_size=%u", gst_buffer_get_size (scrambled_buffer), stream->span, stream->ds_packet_size); GST_DEBUG ("gst_buffer_get_size (scrambled_buffer) = %" G_GSIZE_FORMAT, gst_buffer_get_size (scrambled_buffer)); sub_buffer = gst_buffer_copy_region (scrambled_buffer, GST_BUFFER_COPY_MEMORY, idx * stream->ds_chunk_size, stream->ds_chunk_size); if (!offset) { descrambled_buffer = sub_buffer; } else { descrambled_buffer = gst_buffer_append (descrambled_buffer, sub_buffer); } } GST_BUFFER_TIMESTAMP (descrambled_buffer) = GST_BUFFER_TIMESTAMP (scrambled_buffer); GST_BUFFER_DURATION (descrambled_buffer) = GST_BUFFER_DURATION (scrambled_buffer); GST_BUFFER_OFFSET (descrambled_buffer) = GST_BUFFER_OFFSET (scrambled_buffer); GST_BUFFER_OFFSET_END (descrambled_buffer) = GST_BUFFER_OFFSET_END (scrambled_buffer); /* FIXME/CHECK: do we need to transfer buffer flags here too? */ gst_buffer_unref (scrambled_buffer); *p_buffer = descrambled_buffer; } static gboolean gst_asf_demux_element_send_event (GstElement * element, GstEvent * event) { GstASFDemux *demux = GST_ASF_DEMUX (element); gint i; GST_DEBUG ("handling element event of type %s", GST_EVENT_TYPE_NAME (event)); for (i = 0; i < demux->num_streams; ++i) { gst_event_ref (event); if (gst_asf_demux_handle_src_event (demux->stream[i].pad, GST_OBJECT_CAST (element), event)) { gst_event_unref (event); return TRUE; } } gst_event_unref (event); return FALSE; } /* takes ownership of the passed event */ static gboolean gst_asf_demux_send_event_unlocked (GstASFDemux * demux, GstEvent * event) { gboolean ret = TRUE; gint i; GST_DEBUG_OBJECT (demux, "sending %s event to all source pads", GST_EVENT_TYPE_NAME (event)); for (i = 0; i < demux->num_streams; ++i) { gst_event_ref (event); ret &= gst_pad_push_event (demux->stream[i].pad, event); } gst_event_unref (event); return ret; } static gboolean gst_asf_demux_handle_src_query (GstPad * pad, GstObject * parent, GstQuery * query) { GstASFDemux *demux; gboolean res = FALSE; demux = GST_ASF_DEMUX (parent); GST_DEBUG ("handling %s query", gst_query_type_get_name (GST_QUERY_TYPE (query))); switch (GST_QUERY_TYPE (query)) { case GST_QUERY_DURATION: { GstFormat format; gst_query_parse_duration (query, &format, NULL); if (format != GST_FORMAT_TIME) { GST_LOG ("only support duration queries in TIME format"); break; } res = gst_pad_query_default (pad, parent, query); if (!res) { GST_OBJECT_LOCK (demux); if (demux->segment.duration != GST_CLOCK_TIME_NONE) { GST_LOG ("returning duration: %" GST_TIME_FORMAT, GST_TIME_ARGS (demux->segment.duration)); gst_query_set_duration (query, GST_FORMAT_TIME, demux->segment.duration); res = TRUE; } else { GST_LOG ("duration not known yet"); } GST_OBJECT_UNLOCK (demux); } break; } case GST_QUERY_POSITION:{ GstFormat format; gst_query_parse_position (query, &format, NULL); if (format != GST_FORMAT_TIME) { GST_LOG ("only support position queries in TIME format"); break; } GST_OBJECT_LOCK (demux); if (demux->segment.position != GST_CLOCK_TIME_NONE) { GST_LOG ("returning position: %" GST_TIME_FORMAT, GST_TIME_ARGS (demux->segment.position)); gst_query_set_position (query, GST_FORMAT_TIME, demux->segment.position); res = TRUE; } else { GST_LOG ("position not known yet"); } GST_OBJECT_UNLOCK (demux); break; } case GST_QUERY_SEEKING:{ GstFormat format; gst_query_parse_seeking (query, &format, NULL, NULL, NULL); if (format == GST_FORMAT_TIME) { gint64 duration; GST_OBJECT_LOCK (demux); duration = demux->segment.duration; GST_OBJECT_UNLOCK (demux); if (!demux->streaming || !demux->seekable) { gst_query_set_seeking (query, GST_FORMAT_TIME, demux->seekable, 0, duration); res = TRUE; } else { GstFormat fmt; gboolean seekable; /* try upstream first in TIME */ res = gst_pad_query_default (pad, parent, query); gst_query_parse_seeking (query, &fmt, &seekable, NULL, NULL); GST_LOG_OBJECT (demux, "upstream %s seekable %d", GST_STR_NULL (gst_format_get_name (fmt)), seekable); /* if no luck, maybe in BYTES */ if (!seekable || fmt != GST_FORMAT_TIME) { GstQuery *q; q = gst_query_new_seeking (GST_FORMAT_BYTES); if ((res = gst_pad_peer_query (demux->sinkpad, q))) { gst_query_parse_seeking (q, &fmt, &seekable, NULL, NULL); GST_LOG_OBJECT (demux, "upstream %s seekable %d", GST_STR_NULL (gst_format_get_name (fmt)), seekable); if (fmt != GST_FORMAT_BYTES) seekable = FALSE; } gst_query_unref (q); gst_query_set_seeking (query, GST_FORMAT_TIME, seekable, 0, duration); res = TRUE; } } } else GST_LOG_OBJECT (demux, "only support seeking in TIME format"); break; } case GST_QUERY_LATENCY: { gboolean live; GstClockTime min, max; /* preroll delay does not matter in non-live pipeline, * but we might end up in a live (rtsp) one ... */ /* first forward */ res = gst_pad_query_default (pad, parent, query); if (!res) break; gst_query_parse_latency (query, &live, &min, &max); GST_DEBUG_OBJECT (demux, "Peer latency: live %d, min %" GST_TIME_FORMAT " max %" GST_TIME_FORMAT, live, GST_TIME_ARGS (min), GST_TIME_ARGS (max)); GST_OBJECT_LOCK (demux); min += demux->latency; if (max != -1) max += demux->latency; GST_OBJECT_UNLOCK (demux); gst_query_set_latency (query, live, min, max); break; } case GST_QUERY_SEGMENT: { GstFormat format; gint64 start, stop; format = demux->segment.format; start = gst_segment_to_stream_time (&demux->segment, format, demux->segment.start); if ((stop = demux->segment.stop) == -1) stop = demux->segment.duration; else stop = gst_segment_to_stream_time (&demux->segment, format, stop); gst_query_set_segment (query, demux->segment.rate, format, start, stop); res = TRUE; break; } default: res = gst_pad_query_default (pad, parent, query); break; } return res; } static GstStateChangeReturn gst_asf_demux_change_state (GstElement * element, GstStateChange transition) { GstASFDemux *demux = GST_ASF_DEMUX (element); GstStateChangeReturn ret = GST_STATE_CHANGE_SUCCESS; switch (transition) { case GST_STATE_CHANGE_NULL_TO_READY:{ gst_segment_init (&demux->segment, GST_FORMAT_TIME); demux->need_newsegment = TRUE; demux->segment_running = FALSE; demux->keyunit_sync = FALSE; demux->accurate = FALSE; demux->adapter = gst_adapter_new (); demux->metadata = gst_caps_new_empty (); demux->global_metadata = gst_structure_new_empty ("metadata"); demux->data_size = 0; demux->data_offset = 0; demux->index_offset = 0; demux->base_offset = 0; demux->flowcombiner = gst_flow_combiner_new (); break; } default: break; } ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition); if (ret == GST_STATE_CHANGE_FAILURE) return ret; switch (transition) { case GST_STATE_CHANGE_PAUSED_TO_READY: gst_asf_demux_reset (demux, FALSE); break; case GST_STATE_CHANGE_READY_TO_NULL: gst_asf_demux_reset (demux, FALSE); gst_flow_combiner_free (demux->flowcombiner); demux->flowcombiner = NULL; break; default: break; } return ret; }
gst_asf_demux_process_ext_content_desc (GstASFDemux * demux, guint8 * data, guint64 size) { /* Other known (and unused) 'text/unicode' metadata available : * * WM/Lyrics = * WM/MediaPrimaryClassID = {D1607DBC-E323-4BE2-86A1-48A42A28441E} * WMFSDKVersion = 9.00.00.2980 * WMFSDKNeeded = 0.0.0.0000 * WM/UniqueFileIdentifier = AMGa_id=R 15334;AMGp_id=P 5149;AMGt_id=T 2324984 * WM/Publisher = 4AD * WM/Provider = AMG * WM/ProviderRating = 8 * WM/ProviderStyle = Rock (similar to WM/Genre) * WM/GenreID (similar to WM/Genre) * WM/TrackNumber (same as WM/Track but as a string) * * Other known (and unused) 'non-text' metadata available : * * WM/EncodingTime * WM/MCDI * IsVBR * * We might want to read WM/TrackNumber and use atoi() if we don't have * WM/Track */ GstTagList *taglist; guint16 blockcount, i; gboolean content3D = FALSE; struct { const gchar *interleave_name; GstASF3DMode interleaving_type; } stereoscopic_layout_map[] = { { "SideBySideRF", GST_ASF_3D_SIDE_BY_SIDE_HALF_RL}, { "SideBySideLF", GST_ASF_3D_SIDE_BY_SIDE_HALF_LR}, { "OverUnderRT", GST_ASF_3D_TOP_AND_BOTTOM_HALF_RL}, { "OverUnderLT", GST_ASF_3D_TOP_AND_BOTTOM_HALF_LR}, { "DualStream", GST_ASF_3D_DUAL_STREAM} }; GST_INFO_OBJECT (demux, "object is an extended content description"); taglist = gst_tag_list_new_empty (); /* Content Descriptor Count */ if (size < 2) goto not_enough_data; blockcount = gst_asf_demux_get_uint16 (&data, &size); for (i = 1; i <= blockcount; ++i) { const gchar *gst_tag_name; guint16 datatype; guint16 value_len; guint16 name_len; GValue tag_value = { 0, }; gsize in, out; gchar *name; gchar *name_utf8 = NULL; gchar *value; /* Descriptor */ if (!gst_asf_demux_get_string (&name, &name_len, &data, &size)) goto not_enough_data; if (size < 2) { g_free (name); goto not_enough_data; } /* Descriptor Value Data Type */ datatype = gst_asf_demux_get_uint16 (&data, &size); /* Descriptor Value (not really a string, but same thing reading-wise) */ if (!gst_asf_demux_get_string (&value, &value_len, &data, &size)) { g_free (name); goto not_enough_data; } name_utf8 = g_convert (name, name_len, "UTF-8", "UTF-16LE", &in, &out, NULL); if (name_utf8 != NULL) { GST_DEBUG ("Found tag/metadata %s", name_utf8); gst_tag_name = gst_asf_demux_get_gst_tag_from_tag_name (name_utf8); GST_DEBUG ("gst_tag_name %s", GST_STR_NULL (gst_tag_name)); switch (datatype) { case ASF_DEMUX_DATA_TYPE_UTF16LE_STRING:{ gchar *value_utf8; value_utf8 = g_convert (value, value_len, "UTF-8", "UTF-16LE", &in, &out, NULL); /* get rid of tags with empty value */ if (value_utf8 != NULL && *value_utf8 != '\0') { GST_DEBUG ("string value %s", value_utf8); value_utf8[out] = '\0'; if (gst_tag_name != NULL) { if (strcmp (gst_tag_name, GST_TAG_DATE_TIME) == 0) { guint year = atoi (value_utf8); if (year > 0) { g_value_init (&tag_value, GST_TYPE_DATE_TIME); g_value_take_boxed (&tag_value, gst_date_time_new_y (year)); } } else if (strcmp (gst_tag_name, GST_TAG_GENRE) == 0) { guint id3v1_genre_id; const gchar *genre_str; if (sscanf (value_utf8, "(%u)", &id3v1_genre_id) == 1 && ((genre_str = gst_tag_id3_genre_get (id3v1_genre_id)))) { GST_DEBUG ("Genre: %s -> %s", value_utf8, genre_str); g_free (value_utf8); value_utf8 = g_strdup (genre_str); } } else { GType tag_type; /* convert tag from string to other type if required */ tag_type = gst_tag_get_type (gst_tag_name); g_value_init (&tag_value, tag_type); if (!gst_value_deserialize (&tag_value, value_utf8)) { GValue from_val = { 0, }; g_value_init (&from_val, G_TYPE_STRING); g_value_set_string (&from_val, value_utf8); if (!g_value_transform (&from_val, &tag_value)) { GST_WARNING_OBJECT (demux, "Could not transform string tag to " "%s tag type %s", gst_tag_name, g_type_name (tag_type)); g_value_unset (&tag_value); } g_value_unset (&from_val); } } } else { /* metadata ! */ GST_DEBUG ("Setting metadata"); g_value_init (&tag_value, G_TYPE_STRING); g_value_set_string (&tag_value, value_utf8); /* If we found a stereoscopic marker, look for StereoscopicLayout * metadata */ if (content3D) { guint i; if (strncmp ("StereoscopicLayout", name_utf8, strlen (name_utf8)) == 0) { for (i = 0; i < G_N_ELEMENTS (stereoscopic_layout_map); i++) { if (g_str_equal (stereoscopic_layout_map[i].interleave_name, value_utf8)) { demux->asf_3D_mode = stereoscopic_layout_map[i].interleaving_type; GST_INFO ("find interleave type %u", demux->asf_3D_mode); } } } GST_INFO_OBJECT (demux, "3d type is %u", demux->asf_3D_mode); } else { demux->asf_3D_mode = GST_ASF_3D_NONE; GST_INFO_OBJECT (demux, "None 3d type"); } } } else if (value_utf8 == NULL) { GST_WARNING ("Failed to convert string value to UTF8, skipping"); } else { GST_DEBUG ("Skipping empty string value for %s", GST_STR_NULL (gst_tag_name)); } g_free (value_utf8); break; } case ASF_DEMUX_DATA_TYPE_BYTE_ARRAY:{ if (gst_tag_name) { if (!g_str_equal (gst_tag_name, GST_TAG_IMAGE)) { GST_FIXME ("Unhandled byte array tag %s", GST_STR_NULL (gst_tag_name)); break; } else { asf_demux_parse_picture_tag (taglist, (guint8 *) value, value_len); } } break; } case ASF_DEMUX_DATA_TYPE_DWORD:{ guint uint_val = GST_READ_UINT32_LE (value); /* this is the track number */ g_value_init (&tag_value, G_TYPE_UINT); /* WM/Track counts from 0 */ if (!strcmp (name_utf8, "WM/Track")) ++uint_val; g_value_set_uint (&tag_value, uint_val); break; } /* Detect 3D */ case ASF_DEMUX_DATA_TYPE_BOOL:{ gboolean bool_val = GST_READ_UINT32_LE (value); if (strncmp ("Stereoscopic", name_utf8, strlen (name_utf8)) == 0) { if (bool_val) { GST_INFO_OBJECT (demux, "This is 3D contents"); content3D = TRUE; } else { GST_INFO_OBJECT (demux, "This is not 3D contenst"); content3D = FALSE; } } break; } default:{ GST_DEBUG ("Skipping tag %s of type %d", gst_tag_name, datatype); break; } } if (G_IS_VALUE (&tag_value)) { if (gst_tag_name) { GstTagMergeMode merge_mode = GST_TAG_MERGE_APPEND; /* WM/TrackNumber is more reliable than WM/Track, since the latter * is supposed to have a 0 base but is often wrongly written to start * from 1 as well, so prefer WM/TrackNumber when we have it: either * replace the value added earlier from WM/Track or put it first in * the list, so that it will get picked up by _get_uint() */ if (strcmp (name_utf8, "WM/TrackNumber") == 0) merge_mode = GST_TAG_MERGE_REPLACE; gst_tag_list_add_values (taglist, merge_mode, gst_tag_name, &tag_value, NULL); } else { GST_DEBUG ("Setting global metadata %s", name_utf8); gst_structure_set_value (demux->global_metadata, name_utf8, &tag_value); } g_value_unset (&tag_value); } } g_free (name); g_free (value); g_free (name_utf8); } gst_asf_demux_add_global_tags (demux, taglist); return GST_FLOW_OK; /* Errors */ not_enough_data: { GST_WARNING ("Unexpected end of data parsing ext content desc object"); gst_tag_list_unref (taglist); return GST_FLOW_OK; /* not really fatal */ } }
gst_asf_demux_process_ext_content_desc (GstASFDemux * demux, guint8 * data, guint64 size) { /* Other known (and unused) 'text/unicode' metadata available : * * WM/Lyrics = * WM/MediaPrimaryClassID = {D1607DBC-E323-4BE2-86A1-48A42A28441E} * WMFSDKVersion = 9.00.00.2980 * WMFSDKNeeded = 0.0.0.0000 * WM/UniqueFileIdentifier = AMGa_id=R 15334;AMGp_id=P 5149;AMGt_id=T 2324984 * WM/Publisher = 4AD * WM/Provider = AMG * WM/ProviderRating = 8 * WM/ProviderStyle = Rock (similar to WM/Genre) * WM/GenreID (similar to WM/Genre) * WM/TrackNumber (same as WM/Track but as a string) * * Other known (and unused) 'non-text' metadata available : * * WM/EncodingTime * WM/MCDI * IsVBR * * We might want to read WM/TrackNumber and use atoi() if we don't have * WM/Track */ GstTagList *taglist; guint16 blockcount, i; gboolean content3D = FALSE; struct { const gchar *interleave_name; GstASF3DMode interleaving_type; } stereoscopic_layout_map[] = { { "SideBySideRF", GST_ASF_3D_SIDE_BY_SIDE_HALF_RL}, { "SideBySideLF", GST_ASF_3D_SIDE_BY_SIDE_HALF_LR}, { "OverUnderRT", GST_ASF_3D_TOP_AND_BOTTOM_HALF_RL}, { "OverUnderLT", GST_ASF_3D_TOP_AND_BOTTOM_HALF_LR}, { "DualStream", GST_ASF_3D_DUAL_STREAM} }; GST_INFO_OBJECT (demux, "object is an extended content description"); taglist = gst_tag_list_new_empty (); /* Content Descriptor Count */ if (size < 2) goto not_enough_data; blockcount = gst_asf_demux_get_uint16 (&data, &size); for (i = 1; i <= blockcount; ++i) { const gchar *gst_tag_name; guint16 datatype; guint16 value_len; guint16 name_len; GValue tag_value = { 0, }; gsize in, out; gchar *name; gchar *name_utf8 = NULL; gchar *value; /* Descriptor */ if (!gst_asf_demux_get_string (&name, &name_len, &data, &size)) goto not_enough_data; if (size < 2) { g_free (name); goto not_enough_data; } /* Descriptor Value Data Type */ datatype = gst_asf_demux_get_uint16 (&data, &size); /* Descriptor Value (not really a string, but same thing reading-wise) */ if (!gst_asf_demux_get_string (&value, &value_len, &data, &size)) { g_free (name); goto not_enough_data; } name_utf8 = g_convert (name, name_len, "UTF-8", "UTF-16LE", &in, &out, NULL); if (name_utf8 != NULL) { GST_DEBUG ("Found tag/metadata %s", name_utf8); gst_tag_name = gst_asf_demux_get_gst_tag_from_tag_name (name_utf8); GST_DEBUG ("gst_tag_name %s", GST_STR_NULL (gst_tag_name)); switch (datatype) { case ASF_DEMUX_DATA_TYPE_UTF16LE_STRING:{ gchar *value_utf8; value_utf8 = g_convert (value, value_len, "UTF-8", "UTF-16LE", &in, &out, NULL); /* get rid of tags with empty value */ if (value_utf8 != NULL && *value_utf8 != '\0') { GST_DEBUG ("string value %s", value_utf8); value_utf8[out] = '\0'; if (gst_tag_name != NULL) { if (strcmp (gst_tag_name, GST_TAG_DATE_TIME) == 0) { guint year = atoi (value_utf8); if (year > 0) { g_value_init (&tag_value, GST_TYPE_DATE_TIME); g_value_take_boxed (&tag_value, gst_date_time_new_y (year)); } } else if (strcmp (gst_tag_name, GST_TAG_GENRE) == 0) { guint id3v1_genre_id; const gchar *genre_str; if (sscanf (value_utf8, "(%u)", &id3v1_genre_id) == 1 && ((genre_str = gst_tag_id3_genre_get (id3v1_genre_id)))) { GST_DEBUG ("Genre: %s -> %s", value_utf8, genre_str); g_free (value_utf8); value_utf8 = g_strdup (genre_str); } } else { GType tag_type; /* convert tag from string to other type if required */ tag_type = gst_tag_get_type (gst_tag_name); g_value_init (&tag_value, tag_type); if (!gst_value_deserialize (&tag_value, value_utf8)) { GValue from_val = { 0, }; g_value_init (&from_val, G_TYPE_STRING); g_value_set_string (&from_val, value_utf8); if (!g_value_transform (&from_val, &tag_value)) { GST_WARNING_OBJECT (demux, "Could not transform string tag to " "%s tag type %s", gst_tag_name, g_type_name (tag_type)); g_value_unset (&tag_value); } g_value_unset (&from_val); } } } else { /* metadata ! */ GST_DEBUG ("Setting metadata"); g_value_init (&tag_value, G_TYPE_STRING); g_value_set_string (&tag_value, value_utf8); /* If we found a stereoscopic marker, look for StereoscopicLayout * metadata */ if (content3D) { guint i; if (strncmp ("StereoscopicLayout", name_utf8, strlen (name_utf8)) == 0) { for (i = 0; i < G_N_ELEMENTS (stereoscopic_layout_map); i++) { if (g_str_equal (stereoscopic_layout_map[i].interleave_name, value_utf8)) { demux->asf_3D_mode = stereoscopic_layout_map[i].interleaving_type; GST_INFO ("find interleave type %u", demux->asf_3D_mode); } } } GST_INFO_OBJECT (demux, "3d type is %u", demux->asf_3D_mode); } else { demux->asf_3D_mode = GST_ASF_3D_NONE; GST_INFO_OBJECT (demux, "None 3d type"); } } } else if (value_utf8 == NULL) { GST_WARNING ("Failed to convert string value to UTF8, skipping"); } else { GST_DEBUG ("Skipping empty string value for %s", GST_STR_NULL (gst_tag_name)); } g_free (value_utf8); break; } case ASF_DEMUX_DATA_TYPE_BYTE_ARRAY:{ if (gst_tag_name) { if (!g_str_equal (gst_tag_name, GST_TAG_IMAGE)) { GST_FIXME ("Unhandled byte array tag %s", GST_STR_NULL (gst_tag_name)); break; } else { asf_demux_parse_picture_tag (taglist, (guint8 *) value, value_len); } } break; } case ASF_DEMUX_DATA_TYPE_DWORD:{ guint uint_val; if (value_len < 4) break; uint_val = GST_READ_UINT32_LE (value); /* this is the track number */ g_value_init (&tag_value, G_TYPE_UINT); /* WM/Track counts from 0 */ if (!strcmp (name_utf8, "WM/Track")) ++uint_val; g_value_set_uint (&tag_value, uint_val); break; } /* Detect 3D */ case ASF_DEMUX_DATA_TYPE_BOOL:{ gboolean bool_val; if (value_len < 4) break; bool_val = GST_READ_UINT32_LE (value); if (strncmp ("Stereoscopic", name_utf8, strlen (name_utf8)) == 0) { if (bool_val) { GST_INFO_OBJECT (demux, "This is 3D contents"); content3D = TRUE; } else { GST_INFO_OBJECT (demux, "This is not 3D contenst"); content3D = FALSE; } } break; } default:{ GST_DEBUG ("Skipping tag %s of type %d", gst_tag_name, datatype); break; } } if (G_IS_VALUE (&tag_value)) { if (gst_tag_name) { GstTagMergeMode merge_mode = GST_TAG_MERGE_APPEND; /* WM/TrackNumber is more reliable than WM/Track, since the latter * is supposed to have a 0 base but is often wrongly written to start * from 1 as well, so prefer WM/TrackNumber when we have it: either * replace the value added earlier from WM/Track or put it first in * the list, so that it will get picked up by _get_uint() */ if (strcmp (name_utf8, "WM/TrackNumber") == 0) merge_mode = GST_TAG_MERGE_REPLACE; gst_tag_list_add_values (taglist, merge_mode, gst_tag_name, &tag_value, NULL); } else { GST_DEBUG ("Setting global metadata %s", name_utf8); gst_structure_set_value (demux->global_metadata, name_utf8, &tag_value); } g_value_unset (&tag_value); } } g_free (name); g_free (value); g_free (name_utf8); } gst_asf_demux_add_global_tags (demux, taglist); return GST_FLOW_OK; /* Errors */ not_enough_data: { GST_WARNING ("Unexpected end of data parsing ext content desc object"); gst_tag_list_unref (taglist); return GST_FLOW_OK; /* not really fatal */ } }
{'added': [(3442, ' guint uint_val;'), (3443, ''), (3444, ' if (value_len < 4)'), (3445, ' break;'), (3446, ''), (3447, ' uint_val = GST_READ_UINT32_LE (value);'), (3461, ' gboolean bool_val;'), (3462, ''), (3463, ' if (value_len < 4)'), (3464, ' break;'), (3465, ''), (3466, ' bool_val = GST_READ_UINT32_LE (value);')], 'deleted': [(3442, ' guint uint_val = GST_READ_UINT32_LE (value);'), (3456, ' gboolean bool_val = GST_READ_UINT32_LE (value);')]}
12
2
3,643
22,302
190
1,044
33
https://github.com/GStreamer/gst-plugins-ugly
CVE-2017-5847
CWE-125
1,252
print-l2tp.c
C
l2tp_bearer_type_print
/* * Copyright (c) 1991, 1993, 1994, 1995, 1996, 1997 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * L2TP support contributed by Motonori Shindo (mshindo@mshindo.net) */ /* \summary: Layer Two Tunneling Protocol (L2TP) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include "netdissect.h" #include "extract.h" #define L2TP_FLAG_TYPE 0x8000 /* Type (0=Data, 1=Control) */ #define L2TP_FLAG_LENGTH 0x4000 /* Length */ #define L2TP_FLAG_SEQUENCE 0x0800 /* Sequence */ #define L2TP_FLAG_OFFSET 0x0200 /* Offset */ #define L2TP_FLAG_PRIORITY 0x0100 /* Priority */ #define L2TP_VERSION_MASK 0x000f /* Version Mask */ #define L2TP_VERSION_L2F 0x0001 /* L2F */ #define L2TP_VERSION_L2TP 0x0002 /* L2TP */ #define L2TP_AVP_HDR_FLAG_MANDATORY 0x8000 /* Mandatory Flag */ #define L2TP_AVP_HDR_FLAG_HIDDEN 0x4000 /* Hidden Flag */ #define L2TP_AVP_HDR_LEN_MASK 0x03ff /* Length Mask */ #define L2TP_FRAMING_CAP_SYNC_MASK 0x00000001 /* Synchronous */ #define L2TP_FRAMING_CAP_ASYNC_MASK 0x00000002 /* Asynchronous */ #define L2TP_FRAMING_TYPE_SYNC_MASK 0x00000001 /* Synchronous */ #define L2TP_FRAMING_TYPE_ASYNC_MASK 0x00000002 /* Asynchronous */ #define L2TP_BEARER_CAP_DIGITAL_MASK 0x00000001 /* Digital */ #define L2TP_BEARER_CAP_ANALOG_MASK 0x00000002 /* Analog */ #define L2TP_BEARER_TYPE_DIGITAL_MASK 0x00000001 /* Digital */ #define L2TP_BEARER_TYPE_ANALOG_MASK 0x00000002 /* Analog */ /* Authen Type */ #define L2TP_AUTHEN_TYPE_RESERVED 0x0000 /* Reserved */ #define L2TP_AUTHEN_TYPE_TEXTUAL 0x0001 /* Textual username/password exchange */ #define L2TP_AUTHEN_TYPE_CHAP 0x0002 /* PPP CHAP */ #define L2TP_AUTHEN_TYPE_PAP 0x0003 /* PPP PAP */ #define L2TP_AUTHEN_TYPE_NO_AUTH 0x0004 /* No Authentication */ #define L2TP_AUTHEN_TYPE_MSCHAPv1 0x0005 /* MSCHAPv1 */ #define L2TP_PROXY_AUTH_ID_MASK 0x00ff static const char tstr[] = " [|l2tp]"; #define L2TP_MSGTYPE_SCCRQ 1 /* Start-Control-Connection-Request */ #define L2TP_MSGTYPE_SCCRP 2 /* Start-Control-Connection-Reply */ #define L2TP_MSGTYPE_SCCCN 3 /* Start-Control-Connection-Connected */ #define L2TP_MSGTYPE_STOPCCN 4 /* Stop-Control-Connection-Notification */ #define L2TP_MSGTYPE_HELLO 6 /* Hello */ #define L2TP_MSGTYPE_OCRQ 7 /* Outgoing-Call-Request */ #define L2TP_MSGTYPE_OCRP 8 /* Outgoing-Call-Reply */ #define L2TP_MSGTYPE_OCCN 9 /* Outgoing-Call-Connected */ #define L2TP_MSGTYPE_ICRQ 10 /* Incoming-Call-Request */ #define L2TP_MSGTYPE_ICRP 11 /* Incoming-Call-Reply */ #define L2TP_MSGTYPE_ICCN 12 /* Incoming-Call-Connected */ #define L2TP_MSGTYPE_CDN 14 /* Call-Disconnect-Notify */ #define L2TP_MSGTYPE_WEN 15 /* WAN-Error-Notify */ #define L2TP_MSGTYPE_SLI 16 /* Set-Link-Info */ static const struct tok l2tp_msgtype2str[] = { { L2TP_MSGTYPE_SCCRQ, "SCCRQ" }, { L2TP_MSGTYPE_SCCRP, "SCCRP" }, { L2TP_MSGTYPE_SCCCN, "SCCCN" }, { L2TP_MSGTYPE_STOPCCN, "StopCCN" }, { L2TP_MSGTYPE_HELLO, "HELLO" }, { L2TP_MSGTYPE_OCRQ, "OCRQ" }, { L2TP_MSGTYPE_OCRP, "OCRP" }, { L2TP_MSGTYPE_OCCN, "OCCN" }, { L2TP_MSGTYPE_ICRQ, "ICRQ" }, { L2TP_MSGTYPE_ICRP, "ICRP" }, { L2TP_MSGTYPE_ICCN, "ICCN" }, { L2TP_MSGTYPE_CDN, "CDN" }, { L2TP_MSGTYPE_WEN, "WEN" }, { L2TP_MSGTYPE_SLI, "SLI" }, { 0, NULL } }; #define L2TP_AVP_MSGTYPE 0 /* Message Type */ #define L2TP_AVP_RESULT_CODE 1 /* Result Code */ #define L2TP_AVP_PROTO_VER 2 /* Protocol Version */ #define L2TP_AVP_FRAMING_CAP 3 /* Framing Capabilities */ #define L2TP_AVP_BEARER_CAP 4 /* Bearer Capabilities */ #define L2TP_AVP_TIE_BREAKER 5 /* Tie Breaker */ #define L2TP_AVP_FIRM_VER 6 /* Firmware Revision */ #define L2TP_AVP_HOST_NAME 7 /* Host Name */ #define L2TP_AVP_VENDOR_NAME 8 /* Vendor Name */ #define L2TP_AVP_ASSND_TUN_ID 9 /* Assigned Tunnel ID */ #define L2TP_AVP_RECV_WIN_SIZE 10 /* Receive Window Size */ #define L2TP_AVP_CHALLENGE 11 /* Challenge */ #define L2TP_AVP_Q931_CC 12 /* Q.931 Cause Code */ #define L2TP_AVP_CHALLENGE_RESP 13 /* Challenge Response */ #define L2TP_AVP_ASSND_SESS_ID 14 /* Assigned Session ID */ #define L2TP_AVP_CALL_SER_NUM 15 /* Call Serial Number */ #define L2TP_AVP_MINIMUM_BPS 16 /* Minimum BPS */ #define L2TP_AVP_MAXIMUM_BPS 17 /* Maximum BPS */ #define L2TP_AVP_BEARER_TYPE 18 /* Bearer Type */ #define L2TP_AVP_FRAMING_TYPE 19 /* Framing Type */ #define L2TP_AVP_PACKET_PROC_DELAY 20 /* Packet Processing Delay (OBSOLETE) */ #define L2TP_AVP_CALLED_NUMBER 21 /* Called Number */ #define L2TP_AVP_CALLING_NUMBER 22 /* Calling Number */ #define L2TP_AVP_SUB_ADDRESS 23 /* Sub-Address */ #define L2TP_AVP_TX_CONN_SPEED 24 /* (Tx) Connect Speed */ #define L2TP_AVP_PHY_CHANNEL_ID 25 /* Physical Channel ID */ #define L2TP_AVP_INI_RECV_LCP 26 /* Initial Received LCP CONFREQ */ #define L2TP_AVP_LAST_SENT_LCP 27 /* Last Sent LCP CONFREQ */ #define L2TP_AVP_LAST_RECV_LCP 28 /* Last Received LCP CONFREQ */ #define L2TP_AVP_PROXY_AUTH_TYPE 29 /* Proxy Authen Type */ #define L2TP_AVP_PROXY_AUTH_NAME 30 /* Proxy Authen Name */ #define L2TP_AVP_PROXY_AUTH_CHAL 31 /* Proxy Authen Challenge */ #define L2TP_AVP_PROXY_AUTH_ID 32 /* Proxy Authen ID */ #define L2TP_AVP_PROXY_AUTH_RESP 33 /* Proxy Authen Response */ #define L2TP_AVP_CALL_ERRORS 34 /* Call Errors */ #define L2TP_AVP_ACCM 35 /* ACCM */ #define L2TP_AVP_RANDOM_VECTOR 36 /* Random Vector */ #define L2TP_AVP_PRIVATE_GRP_ID 37 /* Private Group ID */ #define L2TP_AVP_RX_CONN_SPEED 38 /* (Rx) Connect Speed */ #define L2TP_AVP_SEQ_REQUIRED 39 /* Sequencing Required */ #define L2TP_AVP_PPP_DISCON_CC 46 /* PPP Disconnect Cause Code */ static const struct tok l2tp_avp2str[] = { { L2TP_AVP_MSGTYPE, "MSGTYPE" }, { L2TP_AVP_RESULT_CODE, "RESULT_CODE" }, { L2TP_AVP_PROTO_VER, "PROTO_VER" }, { L2TP_AVP_FRAMING_CAP, "FRAMING_CAP" }, { L2TP_AVP_BEARER_CAP, "BEARER_CAP" }, { L2TP_AVP_TIE_BREAKER, "TIE_BREAKER" }, { L2TP_AVP_FIRM_VER, "FIRM_VER" }, { L2TP_AVP_HOST_NAME, "HOST_NAME" }, { L2TP_AVP_VENDOR_NAME, "VENDOR_NAME" }, { L2TP_AVP_ASSND_TUN_ID, "ASSND_TUN_ID" }, { L2TP_AVP_RECV_WIN_SIZE, "RECV_WIN_SIZE" }, { L2TP_AVP_CHALLENGE, "CHALLENGE" }, { L2TP_AVP_Q931_CC, "Q931_CC", }, { L2TP_AVP_CHALLENGE_RESP, "CHALLENGE_RESP" }, { L2TP_AVP_ASSND_SESS_ID, "ASSND_SESS_ID" }, { L2TP_AVP_CALL_SER_NUM, "CALL_SER_NUM" }, { L2TP_AVP_MINIMUM_BPS, "MINIMUM_BPS" }, { L2TP_AVP_MAXIMUM_BPS, "MAXIMUM_BPS" }, { L2TP_AVP_BEARER_TYPE, "BEARER_TYPE" }, { L2TP_AVP_FRAMING_TYPE, "FRAMING_TYPE" }, { L2TP_AVP_PACKET_PROC_DELAY, "PACKET_PROC_DELAY" }, { L2TP_AVP_CALLED_NUMBER, "CALLED_NUMBER" }, { L2TP_AVP_CALLING_NUMBER, "CALLING_NUMBER" }, { L2TP_AVP_SUB_ADDRESS, "SUB_ADDRESS" }, { L2TP_AVP_TX_CONN_SPEED, "TX_CONN_SPEED" }, { L2TP_AVP_PHY_CHANNEL_ID, "PHY_CHANNEL_ID" }, { L2TP_AVP_INI_RECV_LCP, "INI_RECV_LCP" }, { L2TP_AVP_LAST_SENT_LCP, "LAST_SENT_LCP" }, { L2TP_AVP_LAST_RECV_LCP, "LAST_RECV_LCP" }, { L2TP_AVP_PROXY_AUTH_TYPE, "PROXY_AUTH_TYPE" }, { L2TP_AVP_PROXY_AUTH_NAME, "PROXY_AUTH_NAME" }, { L2TP_AVP_PROXY_AUTH_CHAL, "PROXY_AUTH_CHAL" }, { L2TP_AVP_PROXY_AUTH_ID, "PROXY_AUTH_ID" }, { L2TP_AVP_PROXY_AUTH_RESP, "PROXY_AUTH_RESP" }, { L2TP_AVP_CALL_ERRORS, "CALL_ERRORS" }, { L2TP_AVP_ACCM, "ACCM" }, { L2TP_AVP_RANDOM_VECTOR, "RANDOM_VECTOR" }, { L2TP_AVP_PRIVATE_GRP_ID, "PRIVATE_GRP_ID" }, { L2TP_AVP_RX_CONN_SPEED, "RX_CONN_SPEED" }, { L2TP_AVP_SEQ_REQUIRED, "SEQ_REQUIRED" }, { L2TP_AVP_PPP_DISCON_CC, "PPP_DISCON_CC" }, { 0, NULL } }; static const struct tok l2tp_authentype2str[] = { { L2TP_AUTHEN_TYPE_RESERVED, "Reserved" }, { L2TP_AUTHEN_TYPE_TEXTUAL, "Textual" }, { L2TP_AUTHEN_TYPE_CHAP, "CHAP" }, { L2TP_AUTHEN_TYPE_PAP, "PAP" }, { L2TP_AUTHEN_TYPE_NO_AUTH, "No Auth" }, { L2TP_AUTHEN_TYPE_MSCHAPv1, "MS-CHAPv1" }, { 0, NULL } }; #define L2TP_PPP_DISCON_CC_DIRECTION_GLOBAL 0 #define L2TP_PPP_DISCON_CC_DIRECTION_AT_PEER 1 #define L2TP_PPP_DISCON_CC_DIRECTION_AT_LOCAL 2 static const struct tok l2tp_cc_direction2str[] = { { L2TP_PPP_DISCON_CC_DIRECTION_GLOBAL, "global error" }, { L2TP_PPP_DISCON_CC_DIRECTION_AT_PEER, "at peer" }, { L2TP_PPP_DISCON_CC_DIRECTION_AT_LOCAL,"at local" }, { 0, NULL } }; #if 0 static char *l2tp_result_code_StopCCN[] = { "Reserved", "General request to clear control connection", "General error--Error Code indicates the problem", "Control channel already exists", "Requester is not authorized to establish a control channel", "The protocol version of the requester is not supported", "Requester is being shut down", "Finite State Machine error" #define L2TP_MAX_RESULT_CODE_STOPCC_INDEX 8 }; #endif #if 0 static char *l2tp_result_code_CDN[] = { "Reserved", "Call disconnected due to loss of carrier", "Call disconnected for the reason indicated in error code", "Call disconnected for administrative reasons", "Call failed due to lack of appropriate facilities being " \ "available (temporary condition)", "Call failed due to lack of appropriate facilities being " \ "available (permanent condition)", "Invalid destination", "Call failed due to no carrier detected", "Call failed due to detection of a busy signal", "Call failed due to lack of a dial tone", "Call was not established within time allotted by LAC", "Call was connected but no appropriate framing was detected" #define L2TP_MAX_RESULT_CODE_CDN_INDEX 12 }; #endif #if 0 static char *l2tp_error_code_general[] = { "No general error", "No control connection exists yet for this LAC-LNS pair", "Length is wrong", "One of the field values was out of range or " \ "reserved field was non-zero" "Insufficient resources to handle this operation now", "The Session ID is invalid in this context", "A generic vendor-specific error occurred in the LAC", "Try another" #define L2TP_MAX_ERROR_CODE_GENERAL_INDEX 8 }; #endif /******************************/ /* generic print out routines */ /******************************/ static void print_string(netdissect_options *ndo, const u_char *dat, u_int length) { u_int i; for (i=0; i<length; i++) { ND_PRINT((ndo, "%c", *dat++)); } } static void print_octets(netdissect_options *ndo, const u_char *dat, u_int length) { u_int i; for (i=0; i<length; i++) { ND_PRINT((ndo, "%02x", *dat++)); } } static void print_16bits_val(netdissect_options *ndo, const uint16_t *dat) { ND_PRINT((ndo, "%u", EXTRACT_16BITS(dat))); } static void print_32bits_val(netdissect_options *ndo, const uint32_t *dat) { ND_PRINT((ndo, "%lu", (u_long)EXTRACT_32BITS(dat))); } /***********************************/ /* AVP-specific print out routines */ /***********************************/ static void l2tp_msgtype_print(netdissect_options *ndo, const u_char *dat) { const uint16_t *ptr = (const uint16_t *)dat; ND_PRINT((ndo, "%s", tok2str(l2tp_msgtype2str, "MSGTYPE-#%u", EXTRACT_16BITS(ptr)))); } static void l2tp_result_code_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint16_t *ptr = (const uint16_t *)dat; ND_PRINT((ndo, "%u", EXTRACT_16BITS(ptr))); ptr++; /* Result Code */ if (length > 2) { /* Error Code (opt) */ ND_PRINT((ndo, "/%u", EXTRACT_16BITS(ptr))); ptr++; } if (length > 4) { /* Error Message (opt) */ ND_PRINT((ndo, " ")); print_string(ndo, (const u_char *)ptr, length - 4); } } static void l2tp_proto_ver_print(netdissect_options *ndo, const uint16_t *dat) { ND_PRINT((ndo, "%u.%u", (EXTRACT_16BITS(dat) >> 8), (EXTRACT_16BITS(dat) & 0xff))); } static void l2tp_framing_cap_print(netdissect_options *ndo, const u_char *dat) { const uint32_t *ptr = (const uint32_t *)dat; if (EXTRACT_32BITS(ptr) & L2TP_FRAMING_CAP_ASYNC_MASK) { ND_PRINT((ndo, "A")); } if (EXTRACT_32BITS(ptr) & L2TP_FRAMING_CAP_SYNC_MASK) { ND_PRINT((ndo, "S")); } } static void l2tp_bearer_cap_print(netdissect_options *ndo, const u_char *dat) { const uint32_t *ptr = (const uint32_t *)dat; if (EXTRACT_32BITS(ptr) & L2TP_BEARER_CAP_ANALOG_MASK) { ND_PRINT((ndo, "A")); } if (EXTRACT_32BITS(ptr) & L2TP_BEARER_CAP_DIGITAL_MASK) { ND_PRINT((ndo, "D")); } } static void l2tp_q931_cc_print(netdissect_options *ndo, const u_char *dat, u_int length) { print_16bits_val(ndo, (const uint16_t *)dat); ND_PRINT((ndo, ", %02x", dat[2])); if (length > 3) { ND_PRINT((ndo, " ")); print_string(ndo, dat+3, length-3); } } static void l2tp_bearer_type_print(netdissect_options *ndo, const u_char *dat) { const uint32_t *ptr = (const uint32_t *)dat; if (EXTRACT_32BITS(ptr) & L2TP_BEARER_TYPE_ANALOG_MASK) { ND_PRINT((ndo, "A")); } if (EXTRACT_32BITS(ptr) & L2TP_BEARER_TYPE_DIGITAL_MASK) { ND_PRINT((ndo, "D")); } } static void l2tp_framing_type_print(netdissect_options *ndo, const u_char *dat) { const uint32_t *ptr = (const uint32_t *)dat; if (EXTRACT_32BITS(ptr) & L2TP_FRAMING_TYPE_ASYNC_MASK) { ND_PRINT((ndo, "A")); } if (EXTRACT_32BITS(ptr) & L2TP_FRAMING_TYPE_SYNC_MASK) { ND_PRINT((ndo, "S")); } } static void l2tp_packet_proc_delay_print(netdissect_options *ndo) { ND_PRINT((ndo, "obsolete")); } static void l2tp_proxy_auth_type_print(netdissect_options *ndo, const u_char *dat) { const uint16_t *ptr = (const uint16_t *)dat; ND_PRINT((ndo, "%s", tok2str(l2tp_authentype2str, "AuthType-#%u", EXTRACT_16BITS(ptr)))); } static void l2tp_proxy_auth_id_print(netdissect_options *ndo, const u_char *dat) { const uint16_t *ptr = (const uint16_t *)dat; ND_PRINT((ndo, "%u", EXTRACT_16BITS(ptr) & L2TP_PROXY_AUTH_ID_MASK)); } static void l2tp_call_errors_print(netdissect_options *ndo, const u_char *dat) { const uint16_t *ptr = (const uint16_t *)dat; uint16_t val_h, val_l; ptr++; /* skip "Reserved" */ val_h = EXTRACT_16BITS(ptr); ptr++; val_l = EXTRACT_16BITS(ptr); ptr++; ND_PRINT((ndo, "CRCErr=%u ", (val_h<<16) + val_l)); val_h = EXTRACT_16BITS(ptr); ptr++; val_l = EXTRACT_16BITS(ptr); ptr++; ND_PRINT((ndo, "FrameErr=%u ", (val_h<<16) + val_l)); val_h = EXTRACT_16BITS(ptr); ptr++; val_l = EXTRACT_16BITS(ptr); ptr++; ND_PRINT((ndo, "HardOver=%u ", (val_h<<16) + val_l)); val_h = EXTRACT_16BITS(ptr); ptr++; val_l = EXTRACT_16BITS(ptr); ptr++; ND_PRINT((ndo, "BufOver=%u ", (val_h<<16) + val_l)); val_h = EXTRACT_16BITS(ptr); ptr++; val_l = EXTRACT_16BITS(ptr); ptr++; ND_PRINT((ndo, "Timeout=%u ", (val_h<<16) + val_l)); val_h = EXTRACT_16BITS(ptr); ptr++; val_l = EXTRACT_16BITS(ptr); ptr++; ND_PRINT((ndo, "AlignErr=%u ", (val_h<<16) + val_l)); } static void l2tp_accm_print(netdissect_options *ndo, const u_char *dat) { const uint16_t *ptr = (const uint16_t *)dat; uint16_t val_h, val_l; ptr++; /* skip "Reserved" */ val_h = EXTRACT_16BITS(ptr); ptr++; val_l = EXTRACT_16BITS(ptr); ptr++; ND_PRINT((ndo, "send=%08x ", (val_h<<16) + val_l)); val_h = EXTRACT_16BITS(ptr); ptr++; val_l = EXTRACT_16BITS(ptr); ptr++; ND_PRINT((ndo, "recv=%08x ", (val_h<<16) + val_l)); } static void l2tp_ppp_discon_cc_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint16_t *ptr = (const uint16_t *)dat; ND_PRINT((ndo, "%04x, ", EXTRACT_16BITS(ptr))); ptr++; /* Disconnect Code */ ND_PRINT((ndo, "%04x ", EXTRACT_16BITS(ptr))); ptr++; /* Control Protocol Number */ ND_PRINT((ndo, "%s", tok2str(l2tp_cc_direction2str, "Direction-#%u", *((const u_char *)ptr++)))); if (length > 5) { ND_PRINT((ndo, " ")); print_string(ndo, (const u_char *)ptr, length-5); } } static void l2tp_avp_print(netdissect_options *ndo, const u_char *dat, int length) { u_int len; const uint16_t *ptr = (const uint16_t *)dat; uint16_t attr_type; int hidden = FALSE; if (length <= 0) { return; } ND_PRINT((ndo, " ")); ND_TCHECK(*ptr); /* Flags & Length */ len = EXTRACT_16BITS(ptr) & L2TP_AVP_HDR_LEN_MASK; /* If it is not long enough to contain the header, we'll give up. */ if (len < 6) goto trunc; /* If it goes past the end of the remaining length of the packet, we'll give up. */ if (len > (u_int)length) goto trunc; /* If it goes past the end of the remaining length of the captured data, we'll give up. */ ND_TCHECK2(*ptr, len); /* After this point, no need to worry about truncation */ if (EXTRACT_16BITS(ptr) & L2TP_AVP_HDR_FLAG_MANDATORY) { ND_PRINT((ndo, "*")); } if (EXTRACT_16BITS(ptr) & L2TP_AVP_HDR_FLAG_HIDDEN) { hidden = TRUE; ND_PRINT((ndo, "?")); } ptr++; if (EXTRACT_16BITS(ptr)) { /* Vendor Specific Attribute */ ND_PRINT((ndo, "VENDOR%04x:", EXTRACT_16BITS(ptr))); ptr++; ND_PRINT((ndo, "ATTR%04x", EXTRACT_16BITS(ptr))); ptr++; ND_PRINT((ndo, "(")); print_octets(ndo, (const u_char *)ptr, len-6); ND_PRINT((ndo, ")")); } else { /* IETF-defined Attributes */ ptr++; attr_type = EXTRACT_16BITS(ptr); ptr++; ND_PRINT((ndo, "%s", tok2str(l2tp_avp2str, "AVP-#%u", attr_type))); ND_PRINT((ndo, "(")); if (hidden) { ND_PRINT((ndo, "???")); } else { switch (attr_type) { case L2TP_AVP_MSGTYPE: l2tp_msgtype_print(ndo, (const u_char *)ptr); break; case L2TP_AVP_RESULT_CODE: l2tp_result_code_print(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_PROTO_VER: l2tp_proto_ver_print(ndo, ptr); break; case L2TP_AVP_FRAMING_CAP: l2tp_framing_cap_print(ndo, (const u_char *)ptr); break; case L2TP_AVP_BEARER_CAP: l2tp_bearer_cap_print(ndo, (const u_char *)ptr); break; case L2TP_AVP_TIE_BREAKER: print_octets(ndo, (const u_char *)ptr, 8); break; case L2TP_AVP_FIRM_VER: case L2TP_AVP_ASSND_TUN_ID: case L2TP_AVP_RECV_WIN_SIZE: case L2TP_AVP_ASSND_SESS_ID: print_16bits_val(ndo, ptr); break; case L2TP_AVP_HOST_NAME: case L2TP_AVP_VENDOR_NAME: case L2TP_AVP_CALLING_NUMBER: case L2TP_AVP_CALLED_NUMBER: case L2TP_AVP_SUB_ADDRESS: case L2TP_AVP_PROXY_AUTH_NAME: case L2TP_AVP_PRIVATE_GRP_ID: print_string(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_CHALLENGE: case L2TP_AVP_INI_RECV_LCP: case L2TP_AVP_LAST_SENT_LCP: case L2TP_AVP_LAST_RECV_LCP: case L2TP_AVP_PROXY_AUTH_CHAL: case L2TP_AVP_PROXY_AUTH_RESP: case L2TP_AVP_RANDOM_VECTOR: print_octets(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_Q931_CC: l2tp_q931_cc_print(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_CHALLENGE_RESP: print_octets(ndo, (const u_char *)ptr, 16); break; case L2TP_AVP_CALL_SER_NUM: case L2TP_AVP_MINIMUM_BPS: case L2TP_AVP_MAXIMUM_BPS: case L2TP_AVP_TX_CONN_SPEED: case L2TP_AVP_PHY_CHANNEL_ID: case L2TP_AVP_RX_CONN_SPEED: print_32bits_val(ndo, (const uint32_t *)ptr); break; case L2TP_AVP_BEARER_TYPE: l2tp_bearer_type_print(ndo, (const u_char *)ptr); break; case L2TP_AVP_FRAMING_TYPE: l2tp_framing_type_print(ndo, (const u_char *)ptr); break; case L2TP_AVP_PACKET_PROC_DELAY: l2tp_packet_proc_delay_print(ndo); break; case L2TP_AVP_PROXY_AUTH_TYPE: l2tp_proxy_auth_type_print(ndo, (const u_char *)ptr); break; case L2TP_AVP_PROXY_AUTH_ID: l2tp_proxy_auth_id_print(ndo, (const u_char *)ptr); break; case L2TP_AVP_CALL_ERRORS: l2tp_call_errors_print(ndo, (const u_char *)ptr); break; case L2TP_AVP_ACCM: l2tp_accm_print(ndo, (const u_char *)ptr); break; case L2TP_AVP_SEQ_REQUIRED: break; /* No Attribute Value */ case L2TP_AVP_PPP_DISCON_CC: l2tp_ppp_discon_cc_print(ndo, (const u_char *)ptr, len-6); break; default: break; } } ND_PRINT((ndo, ")")); } l2tp_avp_print(ndo, dat+len, length-len); return; trunc: ND_PRINT((ndo, "|...")); } void l2tp_print(netdissect_options *ndo, const u_char *dat, u_int length) { const u_char *ptr = dat; u_int cnt = 0; /* total octets consumed */ uint16_t pad; int flag_t, flag_l, flag_s, flag_o; uint16_t l2tp_len; flag_t = flag_l = flag_s = flag_o = FALSE; ND_TCHECK2(*ptr, 2); /* Flags & Version */ if ((EXTRACT_16BITS(ptr) & L2TP_VERSION_MASK) == L2TP_VERSION_L2TP) { ND_PRINT((ndo, " l2tp:")); } else if ((EXTRACT_16BITS(ptr) & L2TP_VERSION_MASK) == L2TP_VERSION_L2F) { ND_PRINT((ndo, " l2f:")); return; /* nothing to do */ } else { ND_PRINT((ndo, " Unknown Version, neither L2F(1) nor L2TP(2)")); return; /* nothing we can do */ } ND_PRINT((ndo, "[")); if (EXTRACT_16BITS(ptr) & L2TP_FLAG_TYPE) { flag_t = TRUE; ND_PRINT((ndo, "T")); } if (EXTRACT_16BITS(ptr) & L2TP_FLAG_LENGTH) { flag_l = TRUE; ND_PRINT((ndo, "L")); } if (EXTRACT_16BITS(ptr) & L2TP_FLAG_SEQUENCE) { flag_s = TRUE; ND_PRINT((ndo, "S")); } if (EXTRACT_16BITS(ptr) & L2TP_FLAG_OFFSET) { flag_o = TRUE; ND_PRINT((ndo, "O")); } if (EXTRACT_16BITS(ptr) & L2TP_FLAG_PRIORITY) ND_PRINT((ndo, "P")); ND_PRINT((ndo, "]")); ptr += 2; cnt += 2; if (flag_l) { ND_TCHECK2(*ptr, 2); /* Length */ l2tp_len = EXTRACT_16BITS(ptr); ptr += 2; cnt += 2; } else { l2tp_len = 0; } ND_TCHECK2(*ptr, 2); /* Tunnel ID */ ND_PRINT((ndo, "(%u/", EXTRACT_16BITS(ptr))); ptr += 2; cnt += 2; ND_TCHECK2(*ptr, 2); /* Session ID */ ND_PRINT((ndo, "%u)", EXTRACT_16BITS(ptr))); ptr += 2; cnt += 2; if (flag_s) { ND_TCHECK2(*ptr, 2); /* Ns */ ND_PRINT((ndo, "Ns=%u,", EXTRACT_16BITS(ptr))); ptr += 2; cnt += 2; ND_TCHECK2(*ptr, 2); /* Nr */ ND_PRINT((ndo, "Nr=%u", EXTRACT_16BITS(ptr))); ptr += 2; cnt += 2; } if (flag_o) { ND_TCHECK2(*ptr, 2); /* Offset Size */ pad = EXTRACT_16BITS(ptr); ptr += (2 + pad); cnt += (2 + pad); } if (flag_l) { if (length < l2tp_len) { ND_PRINT((ndo, " Length %u larger than packet", l2tp_len)); return; } length = l2tp_len; } if (length < cnt) { ND_PRINT((ndo, " Length %u smaller than header length", length)); return; } if (flag_t) { if (!flag_l) { ND_PRINT((ndo, " No length")); return; } if (length - cnt == 0) { ND_PRINT((ndo, " ZLB")); } else { l2tp_avp_print(ndo, ptr, length - cnt); } } else { ND_PRINT((ndo, " {")); ppp_print(ndo, ptr, length - cnt); ND_PRINT((ndo, "}")); } return; trunc: ND_PRINT((ndo, "%s", tstr)); }
/* * Copyright (c) 1991, 1993, 1994, 1995, 1996, 1997 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * L2TP support contributed by Motonori Shindo (mshindo@mshindo.net) */ /* \summary: Layer Two Tunneling Protocol (L2TP) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include "netdissect.h" #include "extract.h" #define L2TP_FLAG_TYPE 0x8000 /* Type (0=Data, 1=Control) */ #define L2TP_FLAG_LENGTH 0x4000 /* Length */ #define L2TP_FLAG_SEQUENCE 0x0800 /* Sequence */ #define L2TP_FLAG_OFFSET 0x0200 /* Offset */ #define L2TP_FLAG_PRIORITY 0x0100 /* Priority */ #define L2TP_VERSION_MASK 0x000f /* Version Mask */ #define L2TP_VERSION_L2F 0x0001 /* L2F */ #define L2TP_VERSION_L2TP 0x0002 /* L2TP */ #define L2TP_AVP_HDR_FLAG_MANDATORY 0x8000 /* Mandatory Flag */ #define L2TP_AVP_HDR_FLAG_HIDDEN 0x4000 /* Hidden Flag */ #define L2TP_AVP_HDR_LEN_MASK 0x03ff /* Length Mask */ #define L2TP_FRAMING_CAP_SYNC_MASK 0x00000001 /* Synchronous */ #define L2TP_FRAMING_CAP_ASYNC_MASK 0x00000002 /* Asynchronous */ #define L2TP_FRAMING_TYPE_SYNC_MASK 0x00000001 /* Synchronous */ #define L2TP_FRAMING_TYPE_ASYNC_MASK 0x00000002 /* Asynchronous */ #define L2TP_BEARER_CAP_DIGITAL_MASK 0x00000001 /* Digital */ #define L2TP_BEARER_CAP_ANALOG_MASK 0x00000002 /* Analog */ #define L2TP_BEARER_TYPE_DIGITAL_MASK 0x00000001 /* Digital */ #define L2TP_BEARER_TYPE_ANALOG_MASK 0x00000002 /* Analog */ /* Authen Type */ #define L2TP_AUTHEN_TYPE_RESERVED 0x0000 /* Reserved */ #define L2TP_AUTHEN_TYPE_TEXTUAL 0x0001 /* Textual username/password exchange */ #define L2TP_AUTHEN_TYPE_CHAP 0x0002 /* PPP CHAP */ #define L2TP_AUTHEN_TYPE_PAP 0x0003 /* PPP PAP */ #define L2TP_AUTHEN_TYPE_NO_AUTH 0x0004 /* No Authentication */ #define L2TP_AUTHEN_TYPE_MSCHAPv1 0x0005 /* MSCHAPv1 */ #define L2TP_PROXY_AUTH_ID_MASK 0x00ff static const char tstr[] = " [|l2tp]"; #define L2TP_MSGTYPE_SCCRQ 1 /* Start-Control-Connection-Request */ #define L2TP_MSGTYPE_SCCRP 2 /* Start-Control-Connection-Reply */ #define L2TP_MSGTYPE_SCCCN 3 /* Start-Control-Connection-Connected */ #define L2TP_MSGTYPE_STOPCCN 4 /* Stop-Control-Connection-Notification */ #define L2TP_MSGTYPE_HELLO 6 /* Hello */ #define L2TP_MSGTYPE_OCRQ 7 /* Outgoing-Call-Request */ #define L2TP_MSGTYPE_OCRP 8 /* Outgoing-Call-Reply */ #define L2TP_MSGTYPE_OCCN 9 /* Outgoing-Call-Connected */ #define L2TP_MSGTYPE_ICRQ 10 /* Incoming-Call-Request */ #define L2TP_MSGTYPE_ICRP 11 /* Incoming-Call-Reply */ #define L2TP_MSGTYPE_ICCN 12 /* Incoming-Call-Connected */ #define L2TP_MSGTYPE_CDN 14 /* Call-Disconnect-Notify */ #define L2TP_MSGTYPE_WEN 15 /* WAN-Error-Notify */ #define L2TP_MSGTYPE_SLI 16 /* Set-Link-Info */ static const struct tok l2tp_msgtype2str[] = { { L2TP_MSGTYPE_SCCRQ, "SCCRQ" }, { L2TP_MSGTYPE_SCCRP, "SCCRP" }, { L2TP_MSGTYPE_SCCCN, "SCCCN" }, { L2TP_MSGTYPE_STOPCCN, "StopCCN" }, { L2TP_MSGTYPE_HELLO, "HELLO" }, { L2TP_MSGTYPE_OCRQ, "OCRQ" }, { L2TP_MSGTYPE_OCRP, "OCRP" }, { L2TP_MSGTYPE_OCCN, "OCCN" }, { L2TP_MSGTYPE_ICRQ, "ICRQ" }, { L2TP_MSGTYPE_ICRP, "ICRP" }, { L2TP_MSGTYPE_ICCN, "ICCN" }, { L2TP_MSGTYPE_CDN, "CDN" }, { L2TP_MSGTYPE_WEN, "WEN" }, { L2TP_MSGTYPE_SLI, "SLI" }, { 0, NULL } }; #define L2TP_AVP_MSGTYPE 0 /* Message Type */ #define L2TP_AVP_RESULT_CODE 1 /* Result Code */ #define L2TP_AVP_PROTO_VER 2 /* Protocol Version */ #define L2TP_AVP_FRAMING_CAP 3 /* Framing Capabilities */ #define L2TP_AVP_BEARER_CAP 4 /* Bearer Capabilities */ #define L2TP_AVP_TIE_BREAKER 5 /* Tie Breaker */ #define L2TP_AVP_FIRM_VER 6 /* Firmware Revision */ #define L2TP_AVP_HOST_NAME 7 /* Host Name */ #define L2TP_AVP_VENDOR_NAME 8 /* Vendor Name */ #define L2TP_AVP_ASSND_TUN_ID 9 /* Assigned Tunnel ID */ #define L2TP_AVP_RECV_WIN_SIZE 10 /* Receive Window Size */ #define L2TP_AVP_CHALLENGE 11 /* Challenge */ #define L2TP_AVP_Q931_CC 12 /* Q.931 Cause Code */ #define L2TP_AVP_CHALLENGE_RESP 13 /* Challenge Response */ #define L2TP_AVP_ASSND_SESS_ID 14 /* Assigned Session ID */ #define L2TP_AVP_CALL_SER_NUM 15 /* Call Serial Number */ #define L2TP_AVP_MINIMUM_BPS 16 /* Minimum BPS */ #define L2TP_AVP_MAXIMUM_BPS 17 /* Maximum BPS */ #define L2TP_AVP_BEARER_TYPE 18 /* Bearer Type */ #define L2TP_AVP_FRAMING_TYPE 19 /* Framing Type */ #define L2TP_AVP_PACKET_PROC_DELAY 20 /* Packet Processing Delay (OBSOLETE) */ #define L2TP_AVP_CALLED_NUMBER 21 /* Called Number */ #define L2TP_AVP_CALLING_NUMBER 22 /* Calling Number */ #define L2TP_AVP_SUB_ADDRESS 23 /* Sub-Address */ #define L2TP_AVP_TX_CONN_SPEED 24 /* (Tx) Connect Speed */ #define L2TP_AVP_PHY_CHANNEL_ID 25 /* Physical Channel ID */ #define L2TP_AVP_INI_RECV_LCP 26 /* Initial Received LCP CONFREQ */ #define L2TP_AVP_LAST_SENT_LCP 27 /* Last Sent LCP CONFREQ */ #define L2TP_AVP_LAST_RECV_LCP 28 /* Last Received LCP CONFREQ */ #define L2TP_AVP_PROXY_AUTH_TYPE 29 /* Proxy Authen Type */ #define L2TP_AVP_PROXY_AUTH_NAME 30 /* Proxy Authen Name */ #define L2TP_AVP_PROXY_AUTH_CHAL 31 /* Proxy Authen Challenge */ #define L2TP_AVP_PROXY_AUTH_ID 32 /* Proxy Authen ID */ #define L2TP_AVP_PROXY_AUTH_RESP 33 /* Proxy Authen Response */ #define L2TP_AVP_CALL_ERRORS 34 /* Call Errors */ #define L2TP_AVP_ACCM 35 /* ACCM */ #define L2TP_AVP_RANDOM_VECTOR 36 /* Random Vector */ #define L2TP_AVP_PRIVATE_GRP_ID 37 /* Private Group ID */ #define L2TP_AVP_RX_CONN_SPEED 38 /* (Rx) Connect Speed */ #define L2TP_AVP_SEQ_REQUIRED 39 /* Sequencing Required */ #define L2TP_AVP_PPP_DISCON_CC 46 /* PPP Disconnect Cause Code */ static const struct tok l2tp_avp2str[] = { { L2TP_AVP_MSGTYPE, "MSGTYPE" }, { L2TP_AVP_RESULT_CODE, "RESULT_CODE" }, { L2TP_AVP_PROTO_VER, "PROTO_VER" }, { L2TP_AVP_FRAMING_CAP, "FRAMING_CAP" }, { L2TP_AVP_BEARER_CAP, "BEARER_CAP" }, { L2TP_AVP_TIE_BREAKER, "TIE_BREAKER" }, { L2TP_AVP_FIRM_VER, "FIRM_VER" }, { L2TP_AVP_HOST_NAME, "HOST_NAME" }, { L2TP_AVP_VENDOR_NAME, "VENDOR_NAME" }, { L2TP_AVP_ASSND_TUN_ID, "ASSND_TUN_ID" }, { L2TP_AVP_RECV_WIN_SIZE, "RECV_WIN_SIZE" }, { L2TP_AVP_CHALLENGE, "CHALLENGE" }, { L2TP_AVP_Q931_CC, "Q931_CC", }, { L2TP_AVP_CHALLENGE_RESP, "CHALLENGE_RESP" }, { L2TP_AVP_ASSND_SESS_ID, "ASSND_SESS_ID" }, { L2TP_AVP_CALL_SER_NUM, "CALL_SER_NUM" }, { L2TP_AVP_MINIMUM_BPS, "MINIMUM_BPS" }, { L2TP_AVP_MAXIMUM_BPS, "MAXIMUM_BPS" }, { L2TP_AVP_BEARER_TYPE, "BEARER_TYPE" }, { L2TP_AVP_FRAMING_TYPE, "FRAMING_TYPE" }, { L2TP_AVP_PACKET_PROC_DELAY, "PACKET_PROC_DELAY" }, { L2TP_AVP_CALLED_NUMBER, "CALLED_NUMBER" }, { L2TP_AVP_CALLING_NUMBER, "CALLING_NUMBER" }, { L2TP_AVP_SUB_ADDRESS, "SUB_ADDRESS" }, { L2TP_AVP_TX_CONN_SPEED, "TX_CONN_SPEED" }, { L2TP_AVP_PHY_CHANNEL_ID, "PHY_CHANNEL_ID" }, { L2TP_AVP_INI_RECV_LCP, "INI_RECV_LCP" }, { L2TP_AVP_LAST_SENT_LCP, "LAST_SENT_LCP" }, { L2TP_AVP_LAST_RECV_LCP, "LAST_RECV_LCP" }, { L2TP_AVP_PROXY_AUTH_TYPE, "PROXY_AUTH_TYPE" }, { L2TP_AVP_PROXY_AUTH_NAME, "PROXY_AUTH_NAME" }, { L2TP_AVP_PROXY_AUTH_CHAL, "PROXY_AUTH_CHAL" }, { L2TP_AVP_PROXY_AUTH_ID, "PROXY_AUTH_ID" }, { L2TP_AVP_PROXY_AUTH_RESP, "PROXY_AUTH_RESP" }, { L2TP_AVP_CALL_ERRORS, "CALL_ERRORS" }, { L2TP_AVP_ACCM, "ACCM" }, { L2TP_AVP_RANDOM_VECTOR, "RANDOM_VECTOR" }, { L2TP_AVP_PRIVATE_GRP_ID, "PRIVATE_GRP_ID" }, { L2TP_AVP_RX_CONN_SPEED, "RX_CONN_SPEED" }, { L2TP_AVP_SEQ_REQUIRED, "SEQ_REQUIRED" }, { L2TP_AVP_PPP_DISCON_CC, "PPP_DISCON_CC" }, { 0, NULL } }; static const struct tok l2tp_authentype2str[] = { { L2TP_AUTHEN_TYPE_RESERVED, "Reserved" }, { L2TP_AUTHEN_TYPE_TEXTUAL, "Textual" }, { L2TP_AUTHEN_TYPE_CHAP, "CHAP" }, { L2TP_AUTHEN_TYPE_PAP, "PAP" }, { L2TP_AUTHEN_TYPE_NO_AUTH, "No Auth" }, { L2TP_AUTHEN_TYPE_MSCHAPv1, "MS-CHAPv1" }, { 0, NULL } }; #define L2TP_PPP_DISCON_CC_DIRECTION_GLOBAL 0 #define L2TP_PPP_DISCON_CC_DIRECTION_AT_PEER 1 #define L2TP_PPP_DISCON_CC_DIRECTION_AT_LOCAL 2 static const struct tok l2tp_cc_direction2str[] = { { L2TP_PPP_DISCON_CC_DIRECTION_GLOBAL, "global error" }, { L2TP_PPP_DISCON_CC_DIRECTION_AT_PEER, "at peer" }, { L2TP_PPP_DISCON_CC_DIRECTION_AT_LOCAL,"at local" }, { 0, NULL } }; #if 0 static char *l2tp_result_code_StopCCN[] = { "Reserved", "General request to clear control connection", "General error--Error Code indicates the problem", "Control channel already exists", "Requester is not authorized to establish a control channel", "The protocol version of the requester is not supported", "Requester is being shut down", "Finite State Machine error" #define L2TP_MAX_RESULT_CODE_STOPCC_INDEX 8 }; #endif #if 0 static char *l2tp_result_code_CDN[] = { "Reserved", "Call disconnected due to loss of carrier", "Call disconnected for the reason indicated in error code", "Call disconnected for administrative reasons", "Call failed due to lack of appropriate facilities being " \ "available (temporary condition)", "Call failed due to lack of appropriate facilities being " \ "available (permanent condition)", "Invalid destination", "Call failed due to no carrier detected", "Call failed due to detection of a busy signal", "Call failed due to lack of a dial tone", "Call was not established within time allotted by LAC", "Call was connected but no appropriate framing was detected" #define L2TP_MAX_RESULT_CODE_CDN_INDEX 12 }; #endif #if 0 static char *l2tp_error_code_general[] = { "No general error", "No control connection exists yet for this LAC-LNS pair", "Length is wrong", "One of the field values was out of range or " \ "reserved field was non-zero" "Insufficient resources to handle this operation now", "The Session ID is invalid in this context", "A generic vendor-specific error occurred in the LAC", "Try another" #define L2TP_MAX_ERROR_CODE_GENERAL_INDEX 8 }; #endif /******************************/ /* generic print out routines */ /******************************/ static void print_string(netdissect_options *ndo, const u_char *dat, u_int length) { u_int i; for (i=0; i<length; i++) { ND_PRINT((ndo, "%c", *dat++)); } } static void print_octets(netdissect_options *ndo, const u_char *dat, u_int length) { u_int i; for (i=0; i<length; i++) { ND_PRINT((ndo, "%02x", *dat++)); } } static void print_16bits_val(netdissect_options *ndo, const uint16_t *dat) { ND_PRINT((ndo, "%u", EXTRACT_16BITS(dat))); } static void print_32bits_val(netdissect_options *ndo, const uint32_t *dat) { ND_PRINT((ndo, "%lu", (u_long)EXTRACT_32BITS(dat))); } /***********************************/ /* AVP-specific print out routines */ /***********************************/ static void l2tp_msgtype_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint16_t *ptr = (const uint16_t *)dat; if (length < 2) { ND_PRINT((ndo, "AVP too short")); return; } ND_PRINT((ndo, "%s", tok2str(l2tp_msgtype2str, "MSGTYPE-#%u", EXTRACT_16BITS(ptr)))); } static void l2tp_result_code_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint16_t *ptr = (const uint16_t *)dat; /* Result Code */ if (length < 2) { ND_PRINT((ndo, "AVP too short")); return; } ND_PRINT((ndo, "%u", EXTRACT_16BITS(ptr))); ptr++; length -= 2; /* Error Code (opt) */ if (length == 0) return; if (length < 2) { ND_PRINT((ndo, " AVP too short")); return; } ND_PRINT((ndo, "/%u", EXTRACT_16BITS(ptr))); ptr++; length -= 2; /* Error Message (opt) */ if (length == 0) return; ND_PRINT((ndo, " ")); print_string(ndo, (const u_char *)ptr, length); } static void l2tp_proto_ver_print(netdissect_options *ndo, const uint16_t *dat, u_int length) { if (length < 2) { ND_PRINT((ndo, "AVP too short")); return; } ND_PRINT((ndo, "%u.%u", (EXTRACT_16BITS(dat) >> 8), (EXTRACT_16BITS(dat) & 0xff))); } static void l2tp_framing_cap_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint32_t *ptr = (const uint32_t *)dat; if (length < 4) { ND_PRINT((ndo, "AVP too short")); return; } if (EXTRACT_32BITS(ptr) & L2TP_FRAMING_CAP_ASYNC_MASK) { ND_PRINT((ndo, "A")); } if (EXTRACT_32BITS(ptr) & L2TP_FRAMING_CAP_SYNC_MASK) { ND_PRINT((ndo, "S")); } } static void l2tp_bearer_cap_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint32_t *ptr = (const uint32_t *)dat; if (length < 4) { ND_PRINT((ndo, "AVP too short")); return; } if (EXTRACT_32BITS(ptr) & L2TP_BEARER_CAP_ANALOG_MASK) { ND_PRINT((ndo, "A")); } if (EXTRACT_32BITS(ptr) & L2TP_BEARER_CAP_DIGITAL_MASK) { ND_PRINT((ndo, "D")); } } static void l2tp_q931_cc_print(netdissect_options *ndo, const u_char *dat, u_int length) { if (length < 3) { ND_PRINT((ndo, "AVP too short")); return; } print_16bits_val(ndo, (const uint16_t *)dat); ND_PRINT((ndo, ", %02x", dat[2])); dat += 3; length -= 3; if (length != 0) { ND_PRINT((ndo, " ")); print_string(ndo, dat, length); } } static void l2tp_bearer_type_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint32_t *ptr = (const uint32_t *)dat; if (length < 4) { ND_PRINT((ndo, "AVP too short")); return; } if (EXTRACT_32BITS(ptr) & L2TP_BEARER_TYPE_ANALOG_MASK) { ND_PRINT((ndo, "A")); } if (EXTRACT_32BITS(ptr) & L2TP_BEARER_TYPE_DIGITAL_MASK) { ND_PRINT((ndo, "D")); } } static void l2tp_framing_type_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint32_t *ptr = (const uint32_t *)dat; if (length < 4) { ND_PRINT((ndo, "AVP too short")); return; } if (EXTRACT_32BITS(ptr) & L2TP_FRAMING_TYPE_ASYNC_MASK) { ND_PRINT((ndo, "A")); } if (EXTRACT_32BITS(ptr) & L2TP_FRAMING_TYPE_SYNC_MASK) { ND_PRINT((ndo, "S")); } } static void l2tp_packet_proc_delay_print(netdissect_options *ndo) { ND_PRINT((ndo, "obsolete")); } static void l2tp_proxy_auth_type_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint16_t *ptr = (const uint16_t *)dat; if (length < 2) { ND_PRINT((ndo, "AVP too short")); return; } ND_PRINT((ndo, "%s", tok2str(l2tp_authentype2str, "AuthType-#%u", EXTRACT_16BITS(ptr)))); } static void l2tp_proxy_auth_id_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint16_t *ptr = (const uint16_t *)dat; if (length < 2) { ND_PRINT((ndo, "AVP too short")); return; } ND_PRINT((ndo, "%u", EXTRACT_16BITS(ptr) & L2TP_PROXY_AUTH_ID_MASK)); } static void l2tp_call_errors_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint16_t *ptr = (const uint16_t *)dat; uint16_t val_h, val_l; if (length < 2) { ND_PRINT((ndo, "AVP too short")); return; } ptr++; /* skip "Reserved" */ length -= 2; if (length < 4) { ND_PRINT((ndo, "AVP too short")); return; } val_h = EXTRACT_16BITS(ptr); ptr++; length -= 2; val_l = EXTRACT_16BITS(ptr); ptr++; length -= 2; ND_PRINT((ndo, "CRCErr=%u ", (val_h<<16) + val_l)); if (length < 4) { ND_PRINT((ndo, "AVP too short")); return; } val_h = EXTRACT_16BITS(ptr); ptr++; length -= 2; val_l = EXTRACT_16BITS(ptr); ptr++; length -= 2; ND_PRINT((ndo, "FrameErr=%u ", (val_h<<16) + val_l)); if (length < 4) { ND_PRINT((ndo, "AVP too short")); return; } val_h = EXTRACT_16BITS(ptr); ptr++; length -= 2; val_l = EXTRACT_16BITS(ptr); ptr++; length -= 2; ND_PRINT((ndo, "HardOver=%u ", (val_h<<16) + val_l)); if (length < 4) { ND_PRINT((ndo, "AVP too short")); return; } val_h = EXTRACT_16BITS(ptr); ptr++; length -= 2; val_l = EXTRACT_16BITS(ptr); ptr++; length -= 2; ND_PRINT((ndo, "BufOver=%u ", (val_h<<16) + val_l)); if (length < 4) { ND_PRINT((ndo, "AVP too short")); return; } val_h = EXTRACT_16BITS(ptr); ptr++; length -= 2; val_l = EXTRACT_16BITS(ptr); ptr++; length -= 2; ND_PRINT((ndo, "Timeout=%u ", (val_h<<16) + val_l)); if (length < 4) { ND_PRINT((ndo, "AVP too short")); return; } val_h = EXTRACT_16BITS(ptr); ptr++; val_l = EXTRACT_16BITS(ptr); ptr++; ND_PRINT((ndo, "AlignErr=%u ", (val_h<<16) + val_l)); } static void l2tp_accm_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint16_t *ptr = (const uint16_t *)dat; uint16_t val_h, val_l; if (length < 2) { ND_PRINT((ndo, "AVP too short")); return; } ptr++; /* skip "Reserved" */ length -= 2; if (length < 4) { ND_PRINT((ndo, "AVP too short")); return; } val_h = EXTRACT_16BITS(ptr); ptr++; length -= 2; val_l = EXTRACT_16BITS(ptr); ptr++; length -= 2; ND_PRINT((ndo, "send=%08x ", (val_h<<16) + val_l)); if (length < 4) { ND_PRINT((ndo, "AVP too short")); return; } val_h = EXTRACT_16BITS(ptr); ptr++; val_l = EXTRACT_16BITS(ptr); ptr++; ND_PRINT((ndo, "recv=%08x ", (val_h<<16) + val_l)); } static void l2tp_ppp_discon_cc_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint16_t *ptr = (const uint16_t *)dat; if (length < 5) { ND_PRINT((ndo, "AVP too short")); return; } /* Disconnect Code */ ND_PRINT((ndo, "%04x, ", EXTRACT_16BITS(dat))); dat += 2; length -= 2; /* Control Protocol Number */ ND_PRINT((ndo, "%04x ", EXTRACT_16BITS(dat))); dat += 2; length -= 2; /* Direction */ ND_PRINT((ndo, "%s", tok2str(l2tp_cc_direction2str, "Direction-#%u", EXTRACT_8BITS(ptr)))); ptr++; length--; if (length != 0) { ND_PRINT((ndo, " ")); print_string(ndo, (const u_char *)ptr, length); } } static void l2tp_avp_print(netdissect_options *ndo, const u_char *dat, int length) { u_int len; const uint16_t *ptr = (const uint16_t *)dat; uint16_t attr_type; int hidden = FALSE; if (length <= 0) { return; } ND_PRINT((ndo, " ")); ND_TCHECK(*ptr); /* Flags & Length */ len = EXTRACT_16BITS(ptr) & L2TP_AVP_HDR_LEN_MASK; /* If it is not long enough to contain the header, we'll give up. */ if (len < 6) goto trunc; /* If it goes past the end of the remaining length of the packet, we'll give up. */ if (len > (u_int)length) goto trunc; /* If it goes past the end of the remaining length of the captured data, we'll give up. */ ND_TCHECK2(*ptr, len); /* * After this point, we don't need to check whether we go past * the length of the captured data; however, we *do* need to * check whether we go past the end of the AVP. */ if (EXTRACT_16BITS(ptr) & L2TP_AVP_HDR_FLAG_MANDATORY) { ND_PRINT((ndo, "*")); } if (EXTRACT_16BITS(ptr) & L2TP_AVP_HDR_FLAG_HIDDEN) { hidden = TRUE; ND_PRINT((ndo, "?")); } ptr++; if (EXTRACT_16BITS(ptr)) { /* Vendor Specific Attribute */ ND_PRINT((ndo, "VENDOR%04x:", EXTRACT_16BITS(ptr))); ptr++; ND_PRINT((ndo, "ATTR%04x", EXTRACT_16BITS(ptr))); ptr++; ND_PRINT((ndo, "(")); print_octets(ndo, (const u_char *)ptr, len-6); ND_PRINT((ndo, ")")); } else { /* IETF-defined Attributes */ ptr++; attr_type = EXTRACT_16BITS(ptr); ptr++; ND_PRINT((ndo, "%s", tok2str(l2tp_avp2str, "AVP-#%u", attr_type))); ND_PRINT((ndo, "(")); if (hidden) { ND_PRINT((ndo, "???")); } else { switch (attr_type) { case L2TP_AVP_MSGTYPE: l2tp_msgtype_print(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_RESULT_CODE: l2tp_result_code_print(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_PROTO_VER: l2tp_proto_ver_print(ndo, ptr, len-6); break; case L2TP_AVP_FRAMING_CAP: l2tp_framing_cap_print(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_BEARER_CAP: l2tp_bearer_cap_print(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_TIE_BREAKER: if (len-6 < 8) { ND_PRINT((ndo, "AVP too short")); break; } print_octets(ndo, (const u_char *)ptr, 8); break; case L2TP_AVP_FIRM_VER: case L2TP_AVP_ASSND_TUN_ID: case L2TP_AVP_RECV_WIN_SIZE: case L2TP_AVP_ASSND_SESS_ID: if (len-6 < 2) { ND_PRINT((ndo, "AVP too short")); break; } print_16bits_val(ndo, ptr); break; case L2TP_AVP_HOST_NAME: case L2TP_AVP_VENDOR_NAME: case L2TP_AVP_CALLING_NUMBER: case L2TP_AVP_CALLED_NUMBER: case L2TP_AVP_SUB_ADDRESS: case L2TP_AVP_PROXY_AUTH_NAME: case L2TP_AVP_PRIVATE_GRP_ID: print_string(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_CHALLENGE: case L2TP_AVP_INI_RECV_LCP: case L2TP_AVP_LAST_SENT_LCP: case L2TP_AVP_LAST_RECV_LCP: case L2TP_AVP_PROXY_AUTH_CHAL: case L2TP_AVP_PROXY_AUTH_RESP: case L2TP_AVP_RANDOM_VECTOR: print_octets(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_Q931_CC: l2tp_q931_cc_print(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_CHALLENGE_RESP: if (len-6 < 16) { ND_PRINT((ndo, "AVP too short")); break; } print_octets(ndo, (const u_char *)ptr, 16); break; case L2TP_AVP_CALL_SER_NUM: case L2TP_AVP_MINIMUM_BPS: case L2TP_AVP_MAXIMUM_BPS: case L2TP_AVP_TX_CONN_SPEED: case L2TP_AVP_PHY_CHANNEL_ID: case L2TP_AVP_RX_CONN_SPEED: if (len-6 < 4) { ND_PRINT((ndo, "AVP too short")); break; } print_32bits_val(ndo, (const uint32_t *)ptr); break; case L2TP_AVP_BEARER_TYPE: l2tp_bearer_type_print(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_FRAMING_TYPE: l2tp_framing_type_print(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_PACKET_PROC_DELAY: l2tp_packet_proc_delay_print(ndo); break; case L2TP_AVP_PROXY_AUTH_TYPE: l2tp_proxy_auth_type_print(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_PROXY_AUTH_ID: l2tp_proxy_auth_id_print(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_CALL_ERRORS: l2tp_call_errors_print(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_ACCM: l2tp_accm_print(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_SEQ_REQUIRED: break; /* No Attribute Value */ case L2TP_AVP_PPP_DISCON_CC: l2tp_ppp_discon_cc_print(ndo, (const u_char *)ptr, len-6); break; default: break; } } ND_PRINT((ndo, ")")); } l2tp_avp_print(ndo, dat+len, length-len); return; trunc: ND_PRINT((ndo, "|...")); } void l2tp_print(netdissect_options *ndo, const u_char *dat, u_int length) { const u_char *ptr = dat; u_int cnt = 0; /* total octets consumed */ uint16_t pad; int flag_t, flag_l, flag_s, flag_o; uint16_t l2tp_len; flag_t = flag_l = flag_s = flag_o = FALSE; ND_TCHECK2(*ptr, 2); /* Flags & Version */ if ((EXTRACT_16BITS(ptr) & L2TP_VERSION_MASK) == L2TP_VERSION_L2TP) { ND_PRINT((ndo, " l2tp:")); } else if ((EXTRACT_16BITS(ptr) & L2TP_VERSION_MASK) == L2TP_VERSION_L2F) { ND_PRINT((ndo, " l2f:")); return; /* nothing to do */ } else { ND_PRINT((ndo, " Unknown Version, neither L2F(1) nor L2TP(2)")); return; /* nothing we can do */ } ND_PRINT((ndo, "[")); if (EXTRACT_16BITS(ptr) & L2TP_FLAG_TYPE) { flag_t = TRUE; ND_PRINT((ndo, "T")); } if (EXTRACT_16BITS(ptr) & L2TP_FLAG_LENGTH) { flag_l = TRUE; ND_PRINT((ndo, "L")); } if (EXTRACT_16BITS(ptr) & L2TP_FLAG_SEQUENCE) { flag_s = TRUE; ND_PRINT((ndo, "S")); } if (EXTRACT_16BITS(ptr) & L2TP_FLAG_OFFSET) { flag_o = TRUE; ND_PRINT((ndo, "O")); } if (EXTRACT_16BITS(ptr) & L2TP_FLAG_PRIORITY) ND_PRINT((ndo, "P")); ND_PRINT((ndo, "]")); ptr += 2; cnt += 2; if (flag_l) { ND_TCHECK2(*ptr, 2); /* Length */ l2tp_len = EXTRACT_16BITS(ptr); ptr += 2; cnt += 2; } else { l2tp_len = 0; } ND_TCHECK2(*ptr, 2); /* Tunnel ID */ ND_PRINT((ndo, "(%u/", EXTRACT_16BITS(ptr))); ptr += 2; cnt += 2; ND_TCHECK2(*ptr, 2); /* Session ID */ ND_PRINT((ndo, "%u)", EXTRACT_16BITS(ptr))); ptr += 2; cnt += 2; if (flag_s) { ND_TCHECK2(*ptr, 2); /* Ns */ ND_PRINT((ndo, "Ns=%u,", EXTRACT_16BITS(ptr))); ptr += 2; cnt += 2; ND_TCHECK2(*ptr, 2); /* Nr */ ND_PRINT((ndo, "Nr=%u", EXTRACT_16BITS(ptr))); ptr += 2; cnt += 2; } if (flag_o) { ND_TCHECK2(*ptr, 2); /* Offset Size */ pad = EXTRACT_16BITS(ptr); ptr += (2 + pad); cnt += (2 + pad); } if (flag_l) { if (length < l2tp_len) { ND_PRINT((ndo, " Length %u larger than packet", l2tp_len)); return; } length = l2tp_len; } if (length < cnt) { ND_PRINT((ndo, " Length %u smaller than header length", length)); return; } if (flag_t) { if (!flag_l) { ND_PRINT((ndo, " No length")); return; } if (length - cnt == 0) { ND_PRINT((ndo, " ZLB")); } else { l2tp_avp_print(ndo, ptr, length - cnt); } } else { ND_PRINT((ndo, " {")); ppp_print(ndo, ptr, length - cnt); ND_PRINT((ndo, "}")); } return; trunc: ND_PRINT((ndo, "%s", tstr)); }
l2tp_bearer_type_print(netdissect_options *ndo, const u_char *dat) { const uint32_t *ptr = (const uint32_t *)dat; if (EXTRACT_32BITS(ptr) & L2TP_BEARER_TYPE_ANALOG_MASK) { ND_PRINT((ndo, "A")); } if (EXTRACT_32BITS(ptr) & L2TP_BEARER_TYPE_DIGITAL_MASK) { ND_PRINT((ndo, "D")); } }
l2tp_bearer_type_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint32_t *ptr = (const uint32_t *)dat; if (length < 4) { ND_PRINT((ndo, "AVP too short")); return; } if (EXTRACT_32BITS(ptr) & L2TP_BEARER_TYPE_ANALOG_MASK) { ND_PRINT((ndo, "A")); } if (EXTRACT_32BITS(ptr) & L2TP_BEARER_TYPE_DIGITAL_MASK) { ND_PRINT((ndo, "D")); } }
{'added': [(300, 'l2tp_msgtype_print(netdissect_options *ndo, const u_char *dat, u_int length)'), (304, '\tif (length < 2) {'), (305, '\t\tND_PRINT((ndo, "AVP too short"));'), (306, '\t\treturn;'), (307, '\t}'), (317, '\t/* Result Code */'), (318, '\tif (length < 2) {'), (319, '\t\tND_PRINT((ndo, "AVP too short"));'), (320, '\t\treturn;'), (322, '\tND_PRINT((ndo, "%u", EXTRACT_16BITS(ptr)));'), (323, '\tptr++;'), (324, '\tlength -= 2;'), (325, ''), (326, '\t/* Error Code (opt) */'), (327, '\tif (length == 0)'), (328, '\t\treturn;'), (329, '\tif (length < 2) {'), (330, '\t\tND_PRINT((ndo, " AVP too short"));'), (331, '\t\treturn;'), (333, '\tND_PRINT((ndo, "/%u", EXTRACT_16BITS(ptr)));'), (334, '\tptr++;'), (335, '\tlength -= 2;'), (336, ''), (337, '\t/* Error Message (opt) */'), (338, '\tif (length == 0)'), (339, '\t\treturn;'), (340, '\tND_PRINT((ndo, " "));'), (341, '\tprint_string(ndo, (const u_char *)ptr, length);'), (345, 'l2tp_proto_ver_print(netdissect_options *ndo, const uint16_t *dat, u_int length)'), (347, '\tif (length < 2) {'), (348, '\t\tND_PRINT((ndo, "AVP too short"));'), (349, '\t\treturn;'), (350, '\t}'), (356, 'l2tp_framing_cap_print(netdissect_options *ndo, const u_char *dat, u_int length)'), (360, '\tif (length < 4) {'), (361, '\t\tND_PRINT((ndo, "AVP too short"));'), (362, '\t\treturn;'), (363, '\t}'), (373, 'l2tp_bearer_cap_print(netdissect_options *ndo, const u_char *dat, u_int length)'), (377, '\tif (length < 4) {'), (378, '\t\tND_PRINT((ndo, "AVP too short"));'), (379, '\t\treturn;'), (380, '\t}'), (392, '\tif (length < 3) {'), (393, '\t\tND_PRINT((ndo, "AVP too short"));'), (394, '\t\treturn;'), (395, '\t}'), (398, '\tdat += 3;'), (399, '\tlength -= 3;'), (400, '\tif (length != 0) {'), (402, '\t\tprint_string(ndo, dat, length);'), (407, 'l2tp_bearer_type_print(netdissect_options *ndo, const u_char *dat, u_int length)'), (411, '\tif (length < 4) {'), (412, '\t\tND_PRINT((ndo, "AVP too short"));'), (413, '\t\treturn;'), (414, '\t}'), (424, 'l2tp_framing_type_print(netdissect_options *ndo, const u_char *dat, u_int length)'), (428, '\tif (length < 4) {'), (429, '\t\tND_PRINT((ndo, "AVP too short"));'), (430, '\t\treturn;'), (431, '\t}'), (447, 'l2tp_proxy_auth_type_print(netdissect_options *ndo, const u_char *dat, u_int length)'), (451, '\tif (length < 2) {'), (452, '\t\tND_PRINT((ndo, "AVP too short"));'), (453, '\t\treturn;'), (454, '\t}'), (460, 'l2tp_proxy_auth_id_print(netdissect_options *ndo, const u_char *dat, u_int length)'), (464, '\tif (length < 2) {'), (465, '\t\tND_PRINT((ndo, "AVP too short"));'), (466, '\t\treturn;'), (467, '\t}'), (472, 'l2tp_call_errors_print(netdissect_options *ndo, const u_char *dat, u_int length)'), (477, '\tif (length < 2) {'), (478, '\t\tND_PRINT((ndo, "AVP too short"));'), (479, '\t\treturn;'), (480, '\t}'), (482, '\tlength -= 2;'), (484, '\tif (length < 4) {'), (485, '\t\tND_PRINT((ndo, "AVP too short"));'), (486, '\t\treturn;'), (487, '\t}'), (488, '\tval_h = EXTRACT_16BITS(ptr); ptr++; length -= 2;'), (489, '\tval_l = EXTRACT_16BITS(ptr); ptr++; length -= 2;'), (492, '\tif (length < 4) {'), (493, '\t\tND_PRINT((ndo, "AVP too short"));'), (494, '\t\treturn;'), (495, '\t}'), (496, '\tval_h = EXTRACT_16BITS(ptr); ptr++; length -= 2;'), (497, '\tval_l = EXTRACT_16BITS(ptr); ptr++; length -= 2;'), (500, '\tif (length < 4) {'), (501, '\t\tND_PRINT((ndo, "AVP too short"));'), (502, '\t\treturn;'), (503, '\t}'), (504, '\tval_h = EXTRACT_16BITS(ptr); ptr++; length -= 2;'), (505, '\tval_l = EXTRACT_16BITS(ptr); ptr++; length -= 2;'), (508, '\tif (length < 4) {'), (509, '\t\tND_PRINT((ndo, "AVP too short"));'), (510, '\t\treturn;'), (511, '\t}'), (512, '\tval_h = EXTRACT_16BITS(ptr); ptr++; length -= 2;'), (513, '\tval_l = EXTRACT_16BITS(ptr); ptr++; length -= 2;'), (516, '\tif (length < 4) {'), (517, '\t\tND_PRINT((ndo, "AVP too short"));'), (518, '\t\treturn;'), (519, '\t}'), (520, '\tval_h = EXTRACT_16BITS(ptr); ptr++; length -= 2;'), (521, '\tval_l = EXTRACT_16BITS(ptr); ptr++; length -= 2;'), (524, '\tif (length < 4) {'), (525, '\t\tND_PRINT((ndo, "AVP too short"));'), (526, '\t\treturn;'), (527, '\t}'), (534, 'l2tp_accm_print(netdissect_options *ndo, const u_char *dat, u_int length)'), (539, '\tif (length < 2) {'), (540, '\t\tND_PRINT((ndo, "AVP too short"));'), (541, '\t\treturn;'), (542, '\t}'), (544, '\tlength -= 2;'), (546, '\tif (length < 4) {'), (547, '\t\tND_PRINT((ndo, "AVP too short"));'), (548, '\t\treturn;'), (549, '\t}'), (550, '\tval_h = EXTRACT_16BITS(ptr); ptr++; length -= 2;'), (551, '\tval_l = EXTRACT_16BITS(ptr); ptr++; length -= 2;'), (554, '\tif (length < 4) {'), (555, '\t\tND_PRINT((ndo, "AVP too short"));'), (556, '\t\treturn;'), (557, '\t}'), (568, '\tif (length < 5) {'), (569, '\t\tND_PRINT((ndo, "AVP too short"));'), (570, '\t\treturn;'), (571, '\t}'), (572, '\t/* Disconnect Code */'), (573, '\tND_PRINT((ndo, "%04x, ", EXTRACT_16BITS(dat)));'), (574, '\tdat += 2;'), (575, '\tlength -= 2;'), (576, '\t/* Control Protocol Number */'), (577, '\tND_PRINT((ndo, "%04x ", EXTRACT_16BITS(dat)));'), (578, '\tdat += 2;'), (579, '\tlength -= 2;'), (580, '\t/* Direction */'), (582, '\t\t\t "Direction-#%u", EXTRACT_8BITS(ptr))));'), (583, '\tptr++;'), (584, '\tlength--;'), (586, '\tif (length != 0) {'), (588, '\t\tprint_string(ndo, (const u_char *)ptr, length);'), (621, ''), (622, '\t/*'), (623, "\t * After this point, we don't need to check whether we go past"), (624, '\t * the length of the captured data; however, we *do* need to'), (625, '\t * check whether we go past the end of the AVP.'), (626, '\t */'), (655, '\t\t\t\tl2tp_msgtype_print(ndo, (const u_char *)ptr, len-6);'), (661, '\t\t\t\tl2tp_proto_ver_print(ndo, ptr, len-6);'), (664, '\t\t\t\tl2tp_framing_cap_print(ndo, (const u_char *)ptr, len-6);'), (667, '\t\t\t\tl2tp_bearer_cap_print(ndo, (const u_char *)ptr, len-6);'), (670, '\t\t\t\tif (len-6 < 8) {'), (671, '\t\t\t\t\tND_PRINT((ndo, "AVP too short"));'), (672, '\t\t\t\t\tbreak;'), (673, '\t\t\t\t}'), (680, '\t\t\t\tif (len-6 < 2) {'), (681, '\t\t\t\t\tND_PRINT((ndo, "AVP too short"));'), (682, '\t\t\t\t\tbreak;'), (683, '\t\t\t\t}'), (708, '\t\t\t\tif (len-6 < 16) {'), (709, '\t\t\t\t\tND_PRINT((ndo, "AVP too short"));'), (710, '\t\t\t\t\tbreak;'), (711, '\t\t\t\t}'), (720, '\t\t\t\tif (len-6 < 4) {'), (721, '\t\t\t\t\tND_PRINT((ndo, "AVP too short"));'), (722, '\t\t\t\t\tbreak;'), (723, '\t\t\t\t}'), (727, '\t\t\t\tl2tp_bearer_type_print(ndo, (const u_char *)ptr, len-6);'), (730, '\t\t\t\tl2tp_framing_type_print(ndo, (const u_char *)ptr, len-6);'), (736, '\t\t\t\tl2tp_proxy_auth_type_print(ndo, (const u_char *)ptr, len-6);'), (739, '\t\t\t\tl2tp_proxy_auth_id_print(ndo, (const u_char *)ptr, len-6);'), (742, '\t\t\t\tl2tp_call_errors_print(ndo, (const u_char *)ptr, len-6);'), (745, '\t\t\t\tl2tp_accm_print(ndo, (const u_char *)ptr, len-6);')], 'deleted': [(300, 'l2tp_msgtype_print(netdissect_options *ndo, const u_char *dat)'), (313, '\tND_PRINT((ndo, "%u", EXTRACT_16BITS(ptr))); ptr++;\t/* Result Code */'), (314, '\tif (length > 2) {\t\t\t\t/* Error Code (opt) */'), (315, '\t ND_PRINT((ndo, "/%u", EXTRACT_16BITS(ptr))); ptr++;'), (317, '\tif (length > 4) {\t\t\t\t/* Error Message (opt) */'), (318, '\t\tND_PRINT((ndo, " "));'), (319, '\t\tprint_string(ndo, (const u_char *)ptr, length - 4);'), (324, 'l2tp_proto_ver_print(netdissect_options *ndo, const uint16_t *dat)'), (331, 'l2tp_framing_cap_print(netdissect_options *ndo, const u_char *dat)'), (344, 'l2tp_bearer_cap_print(netdissect_options *ndo, const u_char *dat)'), (361, '\tif (length > 3) {'), (363, '\t\tprint_string(ndo, dat+3, length-3);'), (368, 'l2tp_bearer_type_print(netdissect_options *ndo, const u_char *dat)'), (381, 'l2tp_framing_type_print(netdissect_options *ndo, const u_char *dat)'), (400, 'l2tp_proxy_auth_type_print(netdissect_options *ndo, const u_char *dat)'), (409, 'l2tp_proxy_auth_id_print(netdissect_options *ndo, const u_char *dat)'), (417, 'l2tp_call_errors_print(netdissect_options *ndo, const u_char *dat)'), (424, '\tval_h = EXTRACT_16BITS(ptr); ptr++;'), (425, '\tval_l = EXTRACT_16BITS(ptr); ptr++;'), (428, '\tval_h = EXTRACT_16BITS(ptr); ptr++;'), (429, '\tval_l = EXTRACT_16BITS(ptr); ptr++;'), (432, '\tval_h = EXTRACT_16BITS(ptr); ptr++;'), (433, '\tval_l = EXTRACT_16BITS(ptr); ptr++;'), (436, '\tval_h = EXTRACT_16BITS(ptr); ptr++;'), (437, '\tval_l = EXTRACT_16BITS(ptr); ptr++;'), (440, '\tval_h = EXTRACT_16BITS(ptr); ptr++;'), (441, '\tval_l = EXTRACT_16BITS(ptr); ptr++;'), (450, 'l2tp_accm_print(netdissect_options *ndo, const u_char *dat)'), (457, '\tval_h = EXTRACT_16BITS(ptr); ptr++;'), (458, '\tval_l = EXTRACT_16BITS(ptr); ptr++;'), (471, '\tND_PRINT((ndo, "%04x, ", EXTRACT_16BITS(ptr))); ptr++;\t/* Disconnect Code */'), (472, '\tND_PRINT((ndo, "%04x ", EXTRACT_16BITS(ptr))); ptr++;\t/* Control Protocol Number */'), (474, '\t\t\t "Direction-#%u", *((const u_char *)ptr++))));'), (476, '\tif (length > 5) {'), (478, '\t\tprint_string(ndo, (const u_char *)ptr, length-5);'), (511, '\t/* After this point, no need to worry about truncation */'), (540, '\t\t\t\tl2tp_msgtype_print(ndo, (const u_char *)ptr);'), (546, '\t\t\t\tl2tp_proto_ver_print(ndo, ptr);'), (549, '\t\t\t\tl2tp_framing_cap_print(ndo, (const u_char *)ptr);'), (552, '\t\t\t\tl2tp_bearer_cap_print(ndo, (const u_char *)ptr);'), (596, '\t\t\t\tl2tp_bearer_type_print(ndo, (const u_char *)ptr);'), (599, '\t\t\t\tl2tp_framing_type_print(ndo, (const u_char *)ptr);'), (605, '\t\t\t\tl2tp_proxy_auth_type_print(ndo, (const u_char *)ptr);'), (608, '\t\t\t\tl2tp_proxy_auth_id_print(ndo, (const u_char *)ptr);'), (611, '\t\t\t\tl2tp_call_errors_print(ndo, (const u_char *)ptr);'), (614, '\t\t\t\tl2tp_accm_print(ndo, (const u_char *)ptr);')]}
177
46
648
3,873
10
65
3
https://github.com/the-tcpdump-group/tcpdump
CVE-2017-13006
CWE-125
1,290
out.c
C
_out_verify
/* * jabberd - Jabber Open Source Server * Copyright (c) 2002 Jeremie Miller, Thomas Muldowney, * Ryan Eatmon, Robert Norris * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA02111-1307USA */ #define _GNU_SOURCE #include <string.h> #include "s2s.h" #include <idna.h> /* * we handle packets going from the router to the world, and stuff * that comes in on connections we initiated. * * action points: * * out_packet(s2s, nad) - send this packet out * - extract to domain * - get dbconn for this domain using out_route * - if dbconn not available bounce packet * - DONE * - if conn in progress (tcp) * - add packet to queue for this domain * - DONE * - if dbconn state valid for this domain, or packet is dialback * - send packet * - DONE * - if dbconn state invalid for this domain * - bounce packet (502) * - DONE * - add packet to queue for this domain * - if dbconn state inprogress for this domain * - DONE * - out_dialback(dbconn, from, to) * * out_route(s2s, route, out, allow_bad) * - if dbconn not found * - check internal resolver cache for domain * - if not found * - ask resolver for name * - DONE * - if outgoing ip/port is to be reused * - get dbconn for any valid ip/port * - if dbconn not found * - create new dbconn * - initiate connect to ip/port * - DONE * - create new dbconn * - initiate connect to ip/port * - DONE * * out_dialback(dbconn, from, to) - initiate dialback * - generate dbkey: sha1(secret+remote+stream id) * - send auth request: <result to='them' from='us'>dbkey</result> * - set dbconn state for this domain to inprogress * - DONE * * out_resolve(s2s, query) - responses from resolver * - store ip/port/ttl in resolver cache * - flush domain queue -> out_packet(s2s, domain) * - DONE * * event_STREAM - ip/port open * - get dbconn for this sx * - for each route handled by this conn, out_dialback(dbconn, from, to) * - DONE * * event_PACKET: <result from='them' to='us' type='xxx'/> - response to our auth request * - get dbconn for this sx * - if type valid * - set dbconn state for this domain to valid * - flush dbconn queue for this domain -> out_packet(s2s, pkt) * - DONE * - set dbconn state for this domain to invalid * - bounce dbconn queue for this domain (502) * - DONE * * event_PACKET: <verify from='them' to='us' id='123' type='xxx'/> - incoming stream authenticated * - get dbconn for given id * - if type is valid * - set dbconn state for this domain to valid * - send result: <result to='them' from='us' type='xxx'/> * - DONE */ /* forward decls */ static int _out_mio_callback(mio_t m, mio_action_t a, mio_fd_t fd, void *data, void *arg); static int _out_sx_callback(sx_t s, sx_event_t e, void *data, void *arg); static void _out_result(conn_t out, nad_t nad); static void _out_verify(conn_t out, nad_t nad); static void _dns_result_aaaa(struct dns_ctx *ctx, struct dns_rr_a6 *result, void *data); static void _dns_result_a(struct dns_ctx *ctx, struct dns_rr_a4 *result, void *data); /** queue the packet */ static void _out_packet_queue(s2s_t s2s, pkt_t pkt) { char *rkey = s2s_route_key(NULL, pkt->from->domain, pkt->to->domain); jqueue_t q = (jqueue_t) xhash_get(s2s->outq, rkey); if(q == NULL) { log_debug(ZONE, "creating new out packet queue for '%s'", rkey); q = jqueue_new(); q->key = rkey; xhash_put(s2s->outq, q->key, (void *) q); } else { free(rkey); } log_debug(ZONE, "queueing packet for '%s'", q->key); jqueue_push(q, (void *) pkt, 0); } static void _out_dialback(conn_t out, char *rkey, int rkeylen) { char *c, *dbkey, *tmp; nad_t nad; int elem, ns; int from_len, to_len; time_t now; now = time(NULL); c = memchr(rkey, '/', rkeylen); from_len = c - rkey; c++; to_len = rkeylen - (c - rkey); /* kick off the dialback */ tmp = strndup(c, to_len); dbkey = s2s_db_key(NULL, out->s2s->local_secret, tmp, out->s->id); free(tmp); nad = nad_new(); /* request auth */ ns = nad_add_namespace(nad, uri_DIALBACK, "db"); elem = nad_append_elem(nad, ns, "result", 0); nad_set_attr(nad, elem, -1, "from", rkey, from_len); nad_set_attr(nad, elem, -1, "to", c, to_len); nad_append_cdata(nad, dbkey, strlen(dbkey), 1); log_debug(ZONE, "sending auth request for %.*s (key %s)", rkeylen, rkey, dbkey); log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] sending dialback auth request for route '%.*s'", out->fd->fd, out->ip, out->port, rkeylen, rkey); /* off it goes */ sx_nad_write(out->s, nad); free(dbkey); /* we're in progress now */ xhash_put(out->states, pstrdupx(xhash_pool(out->states), rkey, rkeylen), (void *) conn_INPROGRESS); /* record the time that we set conn_INPROGRESS state */ xhash_put(out->states_time, pstrdupx(xhash_pool(out->states_time), rkey, rkeylen), (void *) now); } void _out_dns_mark_bad(conn_t out) { if (out->s2s->dns_bad_timeout > 0) { dnsres_t bad; char *ipport; /* mark this host as bad */ ipport = dns_make_ipport(out->ip, out->port); bad = xhash_get(out->s2s->dns_bad, ipport); if (bad == NULL) { bad = (dnsres_t) calloc(1, sizeof(struct dnsres_st)); bad->key = ipport; xhash_put(out->s2s->dns_bad, ipport, bad); } bad->expiry = time(NULL) + out->s2s->dns_bad_timeout; } } int dns_select(s2s_t s2s, char *ip, int *port, time_t now, dnscache_t dns, int allow_bad) { /* list of results */ dnsres_t l_reuse[DNS_MAX_RESULTS]; dnsres_t l_aaaa[DNS_MAX_RESULTS]; dnsres_t l_a[DNS_MAX_RESULTS]; dnsres_t l_bad[DNS_MAX_RESULTS]; /* running weight sums of results */ int rw_reuse[DNS_MAX_RESULTS]; int rw_aaaa[DNS_MAX_RESULTS]; int rw_a[DNS_MAX_RESULTS]; int s_reuse = 0, s_aaaa = 0, s_a = 0, s_bad = 0; /* count */ int p_reuse = 0, p_aaaa = 0, p_a = 0; /* list prio */ int wt_reuse = 0, wt_aaaa = 0, wt_a = 0; /* weight total */ int c_expired_good = 0; union xhashv xhv; dnsres_t res; char *ipport; int ipport_len; char *c; int c_len; char *tmp; /* for all results: * - if not expired * - put highest priority reuseable addrs into list1 * - put highest priority ipv6 addrs into list2 * - put highest priority ipv4 addrs into list3 * - put bad addrs into list4 * - pick weighted random entry from first non-empty list */ if (dns->results == NULL) { log_debug(ZONE, "negative cache entry for '%s'", dns->name); return -1; } log_debug(ZONE, "selecting DNS result for '%s'", dns->name); xhv.dnsres_val = &res; if (xhash_iter_first(dns->results)) { dnsres_t bad = NULL; do { xhash_iter_get(dns->results, (const char **) &ipport, &ipport_len, xhv.val); if (s2s->dns_bad_timeout > 0) bad = xhash_getx(s2s->dns_bad, ipport, ipport_len); if (now > res->expiry) { /* good host? */ if (bad == NULL) c_expired_good++; log_debug(ZONE, "host '%s' expired", res->key); continue; } else if (bad != NULL && !(now > bad->expiry)) { /* bad host (connection failure) */ l_bad[s_bad++] = res; log_debug(ZONE, "host '%s' bad", res->key); } else if (s2s->out_reuse && xhash_getx(s2s->out_host, ipport, ipport_len) != NULL) { /* existing connection */ log_debug(ZONE, "host '%s' exists", res->key); if (s_reuse == 0 || p_reuse > res->prio) { p_reuse = res->prio; s_reuse = 0; wt_reuse = 0; log_debug(ZONE, "reset prio list, using prio %d", res->prio); } if (res->prio <= p_reuse) { l_reuse[s_reuse] = res; wt_reuse += res->weight; rw_reuse[s_reuse] = wt_reuse; s_reuse++; log_debug(ZONE, "added host with weight %d (%d), running weight %d", (res->weight >> 8), res->weight, wt_reuse); } else { log_debug(ZONE, "ignored host with prio %d", res->prio); } } else if (memchr(ipport, ':', ipport_len) != NULL) { /* ipv6 */ log_debug(ZONE, "host '%s' IPv6", res->key); if (s_aaaa == 0 || p_aaaa > res->prio) { p_aaaa = res->prio; s_aaaa = 0; wt_aaaa = 0; log_debug(ZONE, "reset prio list, using prio %d", res->prio); } if (res->prio <= p_aaaa) { l_aaaa[s_aaaa] = res; wt_aaaa += res->weight; rw_aaaa[s_aaaa] = wt_aaaa; s_aaaa++; log_debug(ZONE, "added host with weight %d (%d), running weight %d", (res->weight >> 8), res->weight, wt_aaaa); } else { log_debug(ZONE, "ignored host with prio %d", res->prio); } } else { /* ipv4 */ log_debug(ZONE, "host '%s' IPv4", res->key); if (s_a == 0 || p_a > res->prio) { p_a = res->prio; s_a = 0; wt_a = 0; log_debug(ZONE, "reset prio list, using prio %d", res->prio); } if (res->prio <= p_a) { l_a[s_a] = res; wt_a += res->weight; rw_a[s_a] = wt_a; s_a++; log_debug(ZONE, "added host with weight %d (%d), running weight %d", (res->weight >> 8), res->weight, wt_a); } else { log_debug(ZONE, "ignored host with prio %d", res->prio); } } } while(xhash_iter_next(dns->results)); } /* pick a result at weighted random (RFC 2782) * all weights are guaranteed to be >= 16 && <= 16776960 * (assuming max 50 hosts, the total/running sums won't exceed 2^31) */ ipport = NULL; if (s_reuse > 0) { int i, r; log_debug(ZONE, "using existing hosts, total weight %d", wt_reuse); assert((wt_reuse + 1) > 0); r = rand() % (wt_reuse + 1); log_debug(ZONE, "random number %d", r); for (i = 0; i < s_reuse; i++) if (rw_reuse[i] >= r) { log_debug(ZONE, "selected host '%s', running weight %d", l_reuse[i]->key, rw_reuse[i]); ipport = l_reuse[i]->key; break; } } else if (s_aaaa > 0 && (s_a == 0 || p_aaaa <= p_a)) { int i, r; log_debug(ZONE, "using IPv6 hosts, total weight %d", wt_aaaa); assert((wt_aaaa + 1) > 0); r = rand() % (wt_aaaa + 1); log_debug(ZONE, "random number %d", r); for (i = 0; i < s_aaaa; i++) if (rw_aaaa[i] >= r) { log_debug(ZONE, "selected host '%s', running weight %d", l_aaaa[i]->key, rw_aaaa[i]); ipport = l_aaaa[i]->key; break; } } else if (s_a > 0) { int i, r; log_debug(ZONE, "using IPv4 hosts, total weight %d", wt_a); assert((wt_a + 1) > 0); r = rand() % (wt_a + 1); log_debug(ZONE, "random number %d", r); for (i = 0; i < s_a; i++) if (rw_a[i] >= r) { log_debug(ZONE, "selected host '%s', running weight %d", l_a[i]->key, rw_a[i]); ipport = l_a[i]->key; break; } } else if (s_bad > 0) { ipport = l_bad[rand() % s_bad]->key; log_debug(ZONE, "using bad hosts, allow_bad=%d", allow_bad); /* there are expired good hosts, expire cache immediately */ if (c_expired_good > 0) { log_debug(ZONE, "expiring this DNS cache entry, %d expired hosts", c_expired_good); dns->expiry = 0; } if (!allow_bad) return -1; } /* results cannot all expire before the collection does */ assert(ipport != NULL); /* copy the ip and port to the packet */ ipport_len = strlen(ipport); c = strchr(ipport, '/'); strncpy(ip, ipport, c-ipport); ip[c-ipport] = '\0'; c++; c_len = ipport_len - (c - ipport); tmp = strndup(c, c_len); *port = atoi(tmp); free(tmp); return 0; } /** find/make a connection for a route */ int out_route(s2s_t s2s, char *route, int routelen, conn_t *out, int allow_bad) { dnscache_t dns; char ipport[INET6_ADDRSTRLEN + 16], *dkey, *c; time_t now; int reuse = 0; char ip[INET6_ADDRSTRLEN] = {0}; int port, c_len, from_len; c = memchr(route, '/', routelen); from_len = c - route; c++; c_len = routelen - (c - route); dkey = strndup(c, c_len); log_debug(ZONE, "trying to find connection for '%s'", dkey); *out = (conn_t) xhash_get(s2s->out_dest, dkey); if(*out == NULL) { log_debug(ZONE, "connection for '%s' not found", dkey); /* check resolver cache for ip/port */ dns = xhash_get(s2s->dnscache, dkey); if(dns == NULL) { /* new resolution */ log_debug(ZONE, "no dns for %s, preparing for resolution", dkey); dns = (dnscache_t) calloc(1, sizeof(struct dnscache_st)); strcpy(dns->name, dkey); xhash_put(s2s->dnscache, dns->name, (void *) dns); #if 0 /* this is good for testing */ dns->pending = 0; strcpy(dns->ip, "127.0.0.1"); dns->port = 3000; dns->expiry = time(NULL) + 99999999; #endif } /* resolution in progress */ if(dns->pending) { log_debug(ZONE, "pending resolution"); free(dkey); return 0; } /* has it expired (this is 0 for new cache objects, so they're always expired */ now = time(NULL); /* each entry must be expired no earlier than the collection */ if(now > dns->expiry) { /* resolution required */ log_debug(ZONE, "requesting resolution for %s", dkey); dns->init_time = time(NULL); dns->pending = 1; dns_resolve_domain(s2s, dns); free(dkey); return 0; } /* dns is valid */ if (dns_select(s2s, ip, &port, now, dns, allow_bad)) { /* failed to find anything acceptable */ free(dkey); return -1; } /* re-request resolution if dns_select expired the data */ if (now > dns->expiry) { /* resolution required */ log_debug(ZONE, "requesting resolution for %s", dkey); dns->init_time = time(NULL); dns->pending = 1; dns_resolve_domain(s2s, dns); free(dkey); return 0; } /* generate the ip/port pair, this is the hash key for the conn */ snprintf(ipport, INET6_ADDRSTRLEN + 16, "%s/%d", ip, port); /* try to re-use an existing connection */ if (s2s->out_reuse) *out = (conn_t) xhash_get(s2s->out_host, ipport); if (*out != NULL) { log_write(s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] using connection for '%s'", (*out)->fd->fd, (*out)->ip, (*out)->port, dkey); /* associate existing connection with domain */ xhash_put(s2s->out_dest, s2s->out_reuse ? pstrdup(xhash_pool((*out)->routes), dkey) : dkey, (void *) *out); reuse = 1; } else{ /* no conn, create one */ *out = (conn_t) calloc(1, sizeof(struct conn_st)); (*out)->s2s = s2s; (*out)->key = strdup(ipport); if (s2s->out_reuse) (*out)->dkey = NULL; else (*out)->dkey = dkey; strcpy((*out)->ip, ip); (*out)->port = port; (*out)->states = xhash_new(101); (*out)->states_time = xhash_new(101); (*out)->routes = xhash_new(101); (*out)->init_time = time(NULL); if (s2s->out_reuse) xhash_put(s2s->out_host, (*out)->key, (void *) *out); xhash_put(s2s->out_dest, s2s->out_reuse ? pstrdup(xhash_pool((*out)->routes), dkey) : dkey, (void *) *out); xhash_put((*out)->routes, pstrdupx(xhash_pool((*out)->routes), route, routelen), (void *) 1); /* connect */ log_debug(ZONE, "initiating connection to %s", ipport); /* APPLE: multiple origin_ips may be specified; use IPv6 if possible or otherwise IPv4 */ int ip_is_v6 = 0; if (strchr(ip, ':') != NULL) ip_is_v6 = 1; int i; for (i = 0; i < s2s->origin_nips; i++) { // only bother with mio_connect if the src and dst IPs are of the same type if ((ip_is_v6 && (strchr(s2s->origin_ips[i], ':') != NULL)) || // both are IPv6 (! ip_is_v6 && (strchr(s2s->origin_ips[i], ':') == NULL))) // both are IPv4 (*out)->fd = mio_connect(s2s->mio, port, ip, s2s->origin_ips[i], _out_mio_callback, (void *) *out); if ((*out)->fd != NULL) break; } if ((*out)->fd == NULL) { log_write(s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] mio_connect error: %s (%d)", -1, (*out)->ip, (*out)->port, MIO_STRERROR(MIO_ERROR), MIO_ERROR); _out_dns_mark_bad(*out); if (s2s->out_reuse) xhash_zap(s2s->out_host, (*out)->key); xhash_zap(s2s->out_dest, dkey); xhash_free((*out)->states); xhash_free((*out)->states_time); xhash_free((*out)->routes); free((*out)->key); free((*out)->dkey); free(*out); *out = NULL; /* try again without allowing bad hosts */ return out_route(s2s, route, routelen, out, 0); } else { log_write(s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] outgoing connection for '%s'", (*out)->fd->fd, (*out)->ip, (*out)->port, dkey); (*out)->s = sx_new(s2s->sx_env, (*out)->fd->fd, _out_sx_callback, (void *) *out); #ifdef HAVE_SSL /* Send a stream version of 1.0 if we can do STARTTLS */ if(s2s->sx_ssl != NULL) { sx_client_init((*out)->s, S2S_DB_HEADER, uri_SERVER, dkey, pstrdupx(xhash_pool((*out)->routes), route, from_len), "1.0"); } else { sx_client_init((*out)->s, S2S_DB_HEADER, uri_SERVER, NULL, NULL, NULL); } #else sx_client_init((*out)->s, S2S_DB_HEADER, uri_SERVER, NULL, NULL, NULL); #endif /* dkey is now used by the hash table */ return 0; } } } else { log_debug(ZONE, "connection for '%s' found (%d %s/%d)", dkey, (*out)->fd->fd, (*out)->ip, (*out)->port); } /* connection in progress, or re-using connection: add to routes list */ if (!(*out)->online || reuse) { if (xhash_getx((*out)->routes, route, routelen) == NULL) xhash_put((*out)->routes, pstrdupx(xhash_pool((*out)->routes), route, routelen), (void *) 1); } free(dkey); return 0; } void out_pkt_free(pkt_t pkt) { nad_free(pkt->nad); jid_free(pkt->from); jid_free(pkt->to); free(pkt); } /** send a packet out */ int out_packet(s2s_t s2s, pkt_t pkt) { char *rkey; int rkeylen; conn_t out; conn_state_t state; int ret; /* perform check against whitelist */ if (s2s->enable_whitelist > 0 && (pkt->to->domain != NULL) && (s2s_domain_in_whitelist(s2s, pkt->to->domain) == 0)) { log_write(s2s->log, LOG_NOTICE, "sending a packet to domain not in the whitelist, dropping it"); if (pkt->to != NULL) jid_free(pkt->to); if (pkt->from != NULL) jid_free(pkt->from); if (pkt->nad != NULL) nad_free(pkt->nad); free(pkt); return; } /* new route key */ rkey = s2s_route_key(NULL, pkt->from->domain, pkt->to->domain); rkeylen = strlen(rkey); /* get a connection */ ret = out_route(s2s, rkey, rkeylen, &out, 1); if (out == NULL) { /* connection not available, queue packet */ _out_packet_queue(s2s, pkt); /* check if out_route was successful in attempting a connection */ if (ret) { /* bounce queue */ out_bounce_route_queue(s2s, rkey, rkeylen, stanza_err_SERVICE_UNAVAILABLE); free(rkey); return -1; } free(rkey); return 0; } /* connection in progress */ if(!out->online) { log_debug(ZONE, "connection in progress, queueing packet"); _out_packet_queue(s2s, pkt); free(rkey); return 0; } /* connection state */ state = (conn_state_t) xhash_get(out->states, rkey); /* valid conns or dialback packets */ if(state == conn_VALID || pkt->db) { log_debug(ZONE, "writing packet for %s to outgoing conn %d", rkey, out->fd->fd); /* send it straight out */ if(pkt->db) { /* if this is a db:verify packet, increment counter and set timestamp */ if(NAD_ENAME_L(pkt->nad, 0) == 6 && strncmp("verify", NAD_ENAME(pkt->nad, 0), 6) == 0) { out->verify++; out->last_verify = time(NULL); } /* dialback packet */ sx_nad_write(out->s, pkt->nad); } else { /* if the outgoing stanza has a jabber:client namespace, remove it so that the stream jabber:server namespaces will apply (XMPP 11.2.2) */ int ns = nad_find_namespace(pkt->nad, 1, uri_CLIENT, NULL); if(ns >= 0) { /* clear the namespaces of elem 0 (internal route element) and elem 1 (message|iq|presence) */ pkt->nad->elems[0].ns = -1; pkt->nad->elems[0].my_ns = -1; pkt->nad->elems[1].ns = -1; pkt->nad->elems[1].my_ns = -1; } /* send it out */ sx_nad_write_elem(out->s, pkt->nad, 1); } /* update timestamp */ out->last_packet = time(NULL); jid_free(pkt->from); jid_free(pkt->to); free(pkt); free(rkey); return 0; } /* can't be handled yet, queue */ _out_packet_queue(s2s, pkt); /* if dialback is in progress, then we're done for now */ if(state == conn_INPROGRESS) { free(rkey); return 0; } /* this is a new route - send dialback auth request to piggyback on the existing connection */ if (out->s2s->require_tls == 0 || out->s->ssf > 0) { _out_dialback(out, rkey, rkeylen); } free(rkey); return 0; } char *dns_make_ipport(char *host, int port) { char *c; assert(port > 0 && port < 65536); c = (char *) malloc(strlen(host) + 7); sprintf(c, "%s/%d", host, port); return c; } static void _dns_add_result(dnsquery_t query, char *ip, int port, int prio, int weight, unsigned int ttl) { char *ipport = dns_make_ipport(ip, port); dnsres_t res = xhash_get(query->results, ipport); if (res != NULL) { if (prio < res->prio) res->prio = prio; if (prio < res->prio) { /* duplicate host at lower prio - reset weight */ res->weight = weight; } else if (prio == res->prio) { /* duplicate host at same prio - add to weight */ res->weight += weight; if (res->weight > (65535 << 8)) res->weight = (65535 << 8); } if (ttl > res->expiry) res->expiry = ttl; if (ttl > query->expiry) query->expiry = ttl; log_debug(ZONE, "dns result updated for %s@%p: %s (%d/%d/%d)", query->name, query, ipport, res->prio, (res->weight >> 8), res->expiry); } else if (xhash_count(query->results) < DNS_MAX_RESULTS) { res = pmalloc(xhash_pool(query->results), sizeof(struct dnsres_st)); res->key = pstrdup(xhash_pool(query->results), ipport); res->prio = prio; res->weight = weight; res->expiry = ttl; if (ttl > query->expiry) query->expiry = ttl; xhash_put(query->results, res->key, res); log_debug(ZONE, "dns result added for %s@%p: %s (%d/%d/%d)", query->name, query, ipport, res->prio, (res->weight >> 8), res->expiry); } else { log_debug(ZONE, "dns result ignored for %s@%p: %s (%d/%d/%d)", query->name, query, ipport, prio, (weight >> 8), ttl); } free(ipport); } static void _dns_add_host(dnsquery_t query, char *ip, int port, int prio, int weight, unsigned int ttl) { char *ipport = dns_make_ipport(ip, port); dnsres_t res = xhash_get(query->hosts, ipport); /* update host weights: * RFC 2482 "In the presence of records containing weights greater * than 0, records with weight 0 should have a very small chance of * being selected." * 0 -> 16 * 1-65535 -> 256-16776960 */ if (weight == 0) weight = 1 << 4; else weight <<= 8; if (res != NULL) { if (prio < res->prio) res->prio = prio; if (prio < res->prio) { /* duplicate host at lower prio - reset weight */ res->weight = weight; } else if (prio == res->prio) { /* duplicate host at same prio - add to weight */ res->weight += weight; if (res->weight > (65535 << 8)) res->weight = (65535 << 8); } if (ttl > res->expiry) res->expiry = ttl; log_debug(ZONE, "dns host updated for %s@%p: %s (%d/%d/%d)", query->name, query, ipport, res->prio, (res->weight >> 8), res->expiry); } else if (xhash_count(query->hosts) < DNS_MAX_RESULTS) { res = pmalloc(xhash_pool(query->hosts), sizeof(struct dnsres_st)); res->key = pstrdup(xhash_pool(query->hosts), ipport); res->prio = prio; res->weight = weight; res->expiry = ttl; xhash_put(query->hosts, res->key, res); log_debug(ZONE, "dns host added for %s@%p: %s (%d/%d/%d)", query->name, query, ipport, res->prio, (res->weight >> 8), res->expiry); } else { log_debug(ZONE, "dns host ignored for %s@%p: %s (%d/%d/%d)", query->name, query, ipport, prio, (weight >> 8), ttl); } free(ipport); } /* this function is called with a NULL ctx to start the SRV process */ static void _dns_result_srv(struct dns_ctx *ctx, struct dns_rr_srv *result, void *data) { dnsquery_t query = data; assert(query != NULL); query->query = NULL; if (ctx != NULL && result == NULL) { log_debug(ZONE, "dns failure for %s@%p: SRV %s (%d)", query->name, query, query->s2s->lookup_srv[query->srv_i], dns_status(ctx)); } else if (result != NULL) { int i; log_debug(ZONE, "dns response for %s@%p: SRV %s %d (%d)", query->name, query, result->dnssrv_qname, result->dnssrv_nrr, result->dnssrv_ttl); for (i = 0; i < result->dnssrv_nrr; i++) { if (strlen(result->dnssrv_srv[i].name) > 0 && result->dnssrv_srv[i].port > 0 && result->dnssrv_srv[i].port < 65536) { log_debug(ZONE, "dns response for %s@%p: SRV %s[%d] %s/%d (%d/%d)", query->name, query, result->dnssrv_qname, i, result->dnssrv_srv[i].name, result->dnssrv_srv[i].port, result->dnssrv_srv[i].priority, result->dnssrv_srv[i].weight); _dns_add_host(query, result->dnssrv_srv[i].name, result->dnssrv_srv[i].port, result->dnssrv_srv[i].priority, result->dnssrv_srv[i].weight, result->dnssrv_ttl); } } free(result); } /* check next SRV service name */ query->srv_i++; if (query->srv_i < query->s2s->lookup_nsrv) { log_debug(ZONE, "dns request for %s@%p: SRV %s", query->name, query, query->s2s->lookup_srv[query->srv_i]); query->query = dns_submit_srv(NULL, query->name, query->s2s->lookup_srv[query->srv_i], "tcp", DNS_NOSRCH, _dns_result_srv, query); /* if submit failed, call ourselves with a NULL result */ if (query->query == NULL) _dns_result_srv(ctx, NULL, query); } else { /* no more SRV records to check, resolve hosts */ if (xhash_count(query->hosts) > 0) { _dns_result_a(NULL, NULL, query); /* no SRV records returned, resolve hostname */ } else { query->cur_host = strdup(query->name); query->cur_port = 5269; query->cur_prio = 0; query->cur_weight = 0; query->cur_expiry = 0; if (query->s2s->resolve_aaaa) { log_debug(ZONE, "dns request for %s@%p: AAAA %s", query->name, query, query->name); query->query = dns_submit_a6(NULL, query->name, DNS_NOSRCH, _dns_result_aaaa, query); /* if submit failed, call ourselves with a NULL result */ if (query->query == NULL) _dns_result_aaaa(ctx, NULL, query); } else { log_debug(ZONE, "dns request for %s@%p: A %s", query->name, query, query->name); query->query = dns_submit_a4(NULL, query->name, DNS_NOSRCH, _dns_result_a, query); /* if submit failed, call ourselves with a NULL result */ if (query->query == NULL) _dns_result_a(ctx, NULL, query); } } } } static void _dns_result_aaaa(struct dns_ctx *ctx, struct dns_rr_a6 *result, void *data) { dnsquery_t query = data; char ip[INET6_ADDRSTRLEN]; int i; assert(query != NULL); query->query = NULL; if (ctx != NULL && result == NULL) { log_debug(ZONE, "dns failure for %s@%p: AAAA %s (%d)", query->name, query, query->cur_host, dns_status(ctx)); } else if (result != NULL) { log_debug(ZONE, "dns response for %s@%p: AAAA %s %d (%d)", query->name, query, result->dnsa6_qname, result->dnsa6_nrr, result->dnsa6_ttl); if (query->cur_expiry > 0 && result->dnsa6_ttl > query->cur_expiry) result->dnsa6_ttl = query->cur_expiry; for (i = 0; i < result->dnsa6_nrr; i++) { if (inet_ntop(AF_INET6, &result->dnsa6_addr[i], ip, INET6_ADDRSTRLEN) != NULL) { log_debug(ZONE, "dns response for %s@%p: AAAA %s[%d] %s/%d", query->name, query, result->dnsa6_qname, i, ip, query->cur_port); _dns_add_result(query, ip, query->cur_port, query->cur_prio, query->cur_weight, result->dnsa6_ttl); } } } if (query->cur_host != NULL) { /* do ipv4 resolution too */ log_debug(ZONE, "dns request for %s@%p: A %s", query->name, query, query->cur_host); query->query = dns_submit_a4(NULL, query->cur_host, DNS_NOSRCH, _dns_result_a, query); /* if submit failed, call ourselves with a NULL result */ if (query->query == NULL) _dns_result_a(ctx, NULL, query); } else { /* uh-oh */ log_debug(ZONE, "dns result for %s@%p: AAAA host vanished...", query->name, query); _dns_result_a(NULL, NULL, query); } free(result); } /* try /etc/hosts if the A process did not return any results */ static int _etc_hosts_lookup(const char *cszName, char *szIP, const int ciMaxIPLen) { #define EHL_LINE_LEN 260 int iSuccess = 0; size_t iLen; char szLine[EHL_LINE_LEN + 1]; /* one extra for the space character (*) */ char *pcStart, *pcEnd; FILE *fHosts; do { /* initialization */ fHosts = NULL; /* sanity checks */ if ((cszName == NULL) || (szIP == NULL) || (ciMaxIPLen <= 0)) break; szIP[0] = 0; /* open the hosts file */ #ifdef _WIN32 pcStart = getenv("WINDIR"); if (pcStart != NULL) { sprintf(szLine, "%s\\system32\\drivers\\etc\\hosts", pcStart); } else { strcpy(szLine, "C:\\WINDOWS\\system32\\drivers\\etc\\hosts"); } #else strcpy(szLine, "/etc/hosts"); #endif fHosts = fopen(szLine, "r"); if (fHosts == NULL) break; /* read line by line ... */ while (fgets(szLine, EHL_LINE_LEN, fHosts) != NULL) { /* remove comments */ pcStart = strchr (szLine, '#'); if (pcStart != NULL) *pcStart = 0; strcat(szLine, " "); /* append a space character for easier parsing (*) */ /* first to appear: IP address */ iLen = strspn(szLine, "1234567890."); if ((iLen < 7) || (iLen > 15)) /* superficial test for anything between x.x.x.x and xxx.xxx.xxx.xxx */ continue; pcEnd = szLine + iLen; *pcEnd = 0; pcEnd++; /* not beyond the end of the line yet (*) */ /* check strings separated by blanks, tabs or newlines */ pcStart = pcEnd + strspn(pcEnd, " \t\n"); while (*pcStart != 0) { pcEnd = pcStart + strcspn(pcStart, " \t\n"); *pcEnd = 0; pcEnd++; /* not beyond the end of the line yet (*) */ if (strcasecmp(pcStart, cszName) == 0) { strncpy(szIP, szLine, ciMaxIPLen - 1); szIP[ciMaxIPLen - 1] = '\0'; iSuccess = 1; break; } pcStart = pcEnd + strspn(pcEnd, " \t\n"); } if (iSuccess) break; } } while (0); if (fHosts != NULL) fclose(fHosts); return (iSuccess); } /* this function is called with a NULL ctx to start the A/AAAA process */ static void _dns_result_a(struct dns_ctx *ctx, struct dns_rr_a4 *result, void *data) { dnsquery_t query = data; assert(query != NULL); query->query = NULL; if (ctx != NULL && result == NULL) { #define DRA_IP_LEN 16 char szIP[DRA_IP_LEN]; if (_etc_hosts_lookup (query->name, szIP, DRA_IP_LEN)) { log_debug(ZONE, "/etc/lookup for %s@%p: %s (%d)", query->name, query, szIP, query->s2s->etc_hosts_ttl); _dns_add_result (query, szIP, query->cur_port, query->cur_prio, query->cur_weight, query->s2s->etc_hosts_ttl); } else { log_debug(ZONE, "dns failure for %s@%p: A %s (%d)", query->name, query, query->cur_host, dns_status(ctx)); } } else if (result != NULL) { char ip[INET_ADDRSTRLEN]; int i; log_debug(ZONE, "dns response for %s@%p: A %s %d (%d)", query->name, query, result->dnsa4_qname, result->dnsa4_nrr, result->dnsa4_ttl); if (query->cur_expiry > 0 && result->dnsa4_ttl > query->cur_expiry) result->dnsa4_ttl = query->cur_expiry; for (i = 0; i < result->dnsa4_nrr; i++) { if (inet_ntop(AF_INET, &result->dnsa4_addr[i], ip, INET_ADDRSTRLEN) != NULL) { log_debug(ZONE, "dns response for %s@%p: A %s[%d] %s/%d", query->name, query, result->dnsa4_qname, i, ip, query->cur_port); _dns_add_result(query, ip, query->cur_port, query->cur_prio, query->cur_weight, result->dnsa4_ttl); } } free(result); } /* resolve the next host in the list */ if (xhash_iter_first(query->hosts)) { char *ipport, *c, *tmp; int ipport_len, ip_len, port_len; dnsres_t res; union xhashv xhv; xhv.dnsres_val = &res; /* get the first entry */ xhash_iter_get(query->hosts, (const char **) &ipport, &ipport_len, xhv.val); /* remove the host from the list */ xhash_iter_zap(query->hosts); c = memchr(ipport, '/', ipport_len); ip_len = c - ipport; c++; port_len = ipport_len - (c - ipport); /* resolve hostname */ free(query->cur_host); query->cur_host = strndup(ipport, ip_len); tmp = strndup(c, port_len); query->cur_port = atoi(tmp); free(tmp); query->cur_prio = res->prio; query->cur_weight = res->weight; query->cur_expiry = res->expiry; log_debug(ZONE, "dns ttl for %s@%p limited to %d", query->name, query, query->cur_expiry); if (query->s2s->resolve_aaaa) { log_debug(ZONE, "dns request for %s@%p: AAAA %s", query->name, query, query->cur_host); query->query = dns_submit_a6(NULL, query->cur_host, DNS_NOSRCH, _dns_result_aaaa, query); /* if submit failed, call ourselves with a NULL result */ if (query->query == NULL) _dns_result_aaaa(ctx, NULL, query); } else { log_debug(ZONE, "dns request for %s@%p: A %s", query->name, query, query->cur_host); query->query = dns_submit_a4(NULL, query->cur_host, DNS_NOSRCH, _dns_result_a, query); /* if submit failed, call ourselves with a NULL result */ if (query->query == NULL) _dns_result_a(ctx, NULL, query); } /* finished */ } else { time_t now = time(NULL); char *domain; free(query->cur_host); query->cur_host = NULL; log_debug(ZONE, "dns requests for %s@%p complete: %d (%d)", query->name, query, xhash_count(query->results), query->expiry); /* update query TTL */ if (query->expiry > query->s2s->dns_max_ttl) query->expiry = query->s2s->dns_max_ttl; if (query->expiry < query->s2s->dns_min_ttl) query->expiry = query->s2s->dns_min_ttl; query->expiry += now; /* update result TTLs - the query expiry MUST NOT be longer than all result expiries */ if (xhash_iter_first(query->results)) { union xhashv xhv; dnsres_t res; xhv.dnsres_val = &res; do { xhash_iter_get(query->results, NULL, NULL, xhv.val); if (res->expiry > query->s2s->dns_max_ttl) res->expiry = query->s2s->dns_max_ttl; if (res->expiry < query->s2s->dns_min_ttl) res->expiry = query->s2s->dns_min_ttl; res->expiry += now; } while(xhash_iter_next(query->results)); } xhash_free(query->hosts); query->hosts = NULL; if (idna_to_unicode_8z8z(query->name, &domain, 0) != IDNA_SUCCESS) { log_write(query->s2s->log, LOG_ERR, "idna dns decode for %s failed", query->name); /* fake empty results to shortcut resolution failure */ xhash_free(query->results); query->results = xhash_new(71); query->expiry = time(NULL) + 99999999; domain = strdup(query->name); } out_resolve(query->s2s, domain, query->results, query->expiry); free(domain); free(query->name); free(query); } } void dns_resolve_domain(s2s_t s2s, dnscache_t dns) { dnsquery_t query = (dnsquery_t) calloc(1, sizeof(struct dnsquery_st)); query->s2s = s2s; query->results = xhash_new(71); if (idna_to_ascii_8z(dns->name, &query->name, 0) != IDNA_SUCCESS) { log_write(s2s->log, LOG_ERR, "idna dns encode for %s failed", dns->name); /* shortcut resolution failure */ query->expiry = time(NULL) + 99999999; out_resolve(query->s2s, dns->name, query->results, query->expiry); return; } query->hosts = xhash_new(71); query->srv_i = -1; query->expiry = 0; query->cur_host = NULL; query->cur_port = 0; query->cur_expiry = 0; query->query = NULL; dns->query = query; log_debug(ZONE, "dns resolve for %s@%p started", query->name, query); /* - resolve all SRV records to host/port * - if no results, include domain/5269 * - resolve all host/port combinations * - return result */ _dns_result_srv(NULL, NULL, query); } /** responses from the resolver */ void out_resolve(s2s_t s2s, char *domain, xht results, time_t expiry) { dnscache_t dns; /* no results, resolve failed */ if(xhash_count(results) == 0) { dns = xhash_get(s2s->dnscache, domain); if (dns != NULL) { /* store negative DNS cache */ xhash_free(dns->results); dns->query = NULL; dns->results = NULL; dns->expiry = expiry; dns->pending = 0; } log_write(s2s->log, LOG_NOTICE, "dns lookup for %s failed", domain); /* bounce queue */ out_bounce_domain_queues(s2s, domain, stanza_err_REMOTE_SERVER_NOT_FOUND); xhash_free(results); return; } log_write(s2s->log, LOG_NOTICE, "dns lookup for %s returned %d result%s (ttl %d)", domain, xhash_count(results), xhash_count(results)!=1?"s":"", expiry - time(NULL)); /* get the cache entry */ dns = xhash_get(s2s->dnscache, domain); if(dns == NULL) { /* retry using punycode */ char *punydomain; if (idna_to_ascii_8z(domain, &punydomain, 0) == IDNA_SUCCESS) { dns = xhash_get(s2s->dnscache, punydomain); free(punydomain); } } if(dns == NULL) { log_write(s2s->log, LOG_ERR, "weird, never requested %s resolution", domain); return; } /* fill it out */ xhash_free(dns->results); dns->query = NULL; dns->results = results; dns->expiry = expiry; dns->pending = 0; out_flush_domain_queues(s2s, domain); /* delete the cache entry if caching is disabled */ if (!s2s->dns_cache_enabled && !dns->pending) { xhash_free(dns->results); xhash_zap(s2s->dnscache, domain); free(dns); } } /** mio callback for outgoing conns */ static int _out_mio_callback(mio_t m, mio_action_t a, mio_fd_t fd, void *data, void *arg) { conn_t out = (conn_t) arg; char ipport[INET6_ADDRSTRLEN + 17]; int nbytes; switch(a) { case action_READ: log_debug(ZONE, "read action on fd %d", fd->fd); /* they did something */ out->last_activity = time(NULL); ioctl(fd->fd, FIONREAD, &nbytes); if(nbytes == 0) { sx_kill(out->s); return 0; } return sx_can_read(out->s); case action_WRITE: log_debug(ZONE, "write action on fd %d", fd->fd); /* update activity timestamp */ out->last_activity = time(NULL); return sx_can_write(out->s); case action_CLOSE: log_debug(ZONE, "close action on fd %d", fd->fd); jqueue_push(out->s2s->dead, (void *) out->s, 0); log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] disconnect, packets: %i", fd->fd, out->ip, out->port, out->packet_count); if (out->s2s->out_reuse) { /* generate the ip/port pair */ snprintf(ipport, INET6_ADDRSTRLEN + 16, "%s/%d", out->ip, out->port); xhash_zap(out->s2s->out_host, ipport); } if (xhash_iter_first(out->routes)) { char *rkey; int rkeylen; char *c; int c_len; /* remove all the out_dest entries */ do { xhash_iter_get(out->routes, (const char **) &rkey, &rkeylen, NULL); c = memchr(rkey, '/', rkeylen); c++; c_len = rkeylen - (c - rkey); log_debug(ZONE, "route '%.*s'", rkeylen, rkey); if (xhash_getx(out->s2s->out_dest, c, c_len) != NULL) { log_debug(ZONE, "removing dest entry for '%.*s'", c_len, c); xhash_zapx(out->s2s->out_dest, c, c_len); } } while(xhash_iter_next(out->routes)); } if (xhash_iter_first(out->routes)) { char *rkey; int rkeylen; jqueue_t q; int npkt; /* retry all the routes */ do { xhash_iter_get(out->routes, (const char **) &rkey, &rkeylen, NULL); q = xhash_getx(out->s2s->outq, rkey, rkeylen); if (out->s2s->retry_limit > 0 && q != NULL && jqueue_age(q) > out->s2s->retry_limit) { log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] retry limit reached for '%.*s' queue", fd->fd, out->ip, out->port, rkeylen, rkey); q = NULL; } if (q != NULL && (npkt = jqueue_size(q)) > 0 && xhash_get(out->states, rkey) != (void*) conn_INPROGRESS) { conn_t retry; log_debug(ZONE, "retrying connection for '%.*s' queue", rkeylen, rkey); if (!out_route(out->s2s, rkey, rkeylen, &retry, 0)) { log_debug(ZONE, "retry successful"); if (retry != NULL) { /* flush queue */ out_flush_route_queue(out->s2s, rkey, rkeylen); } } else { log_debug(ZONE, "retry failed"); /* bounce queue */ out_bounce_route_queue(out->s2s, rkey, rkeylen, stanza_err_SERVICE_UNAVAILABLE); _out_dns_mark_bad(out); } } else { /* bounce queue */ out_bounce_route_queue(out->s2s, rkey, rkeylen, stanza_err_REMOTE_SERVER_TIMEOUT); _out_dns_mark_bad(out); } } while(xhash_iter_next(out->routes)); } jqueue_push(out->s2s->dead_conn, (void *) out, 0); case action_ACCEPT: break; } return 0; } void send_dialbacks(conn_t out) { char *rkey; int rkeylen; if (out->s2s->dns_bad_timeout > 0) { dnsres_t bad = xhash_get(out->s2s->dns_bad, out->key); if (bad != NULL) { log_debug(ZONE, "removing bad host entry for '%s'", out->key); xhash_zap(out->s2s->dns_bad, out->key); free(bad->key); free(bad); } } if (xhash_iter_first(out->routes)) { log_debug(ZONE, "sending dialback packets for %s", out->key); do { xhash_iter_get(out->routes, (const char **) &rkey, &rkeylen, NULL); _out_dialback(out, rkey, rkeylen); } while(xhash_iter_next(out->routes)); } return; } static int _out_sx_callback(sx_t s, sx_event_t e, void *data, void *arg) { conn_t out = (conn_t) arg; sx_buf_t buf = (sx_buf_t) data; int len, ns, elem, starttls = 0; sx_error_t *sxe; nad_t nad; switch(e) { case event_WANT_READ: log_debug(ZONE, "want read"); mio_read(out->s2s->mio, out->fd); break; case event_WANT_WRITE: log_debug(ZONE, "want write"); mio_write(out->s2s->mio, out->fd); break; case event_READ: log_debug(ZONE, "reading from %d", out->fd->fd); /* do the read */ len = recv(out->fd->fd, buf->data, buf->len, 0); if(len < 0) { if(MIO_WOULDBLOCK) { buf->len = 0; return 0; } log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] read error: %s (%d)", out->fd->fd, out->ip, out->port, MIO_STRERROR(MIO_ERROR), MIO_ERROR); if (!out->online) { _out_dns_mark_bad(out); } sx_kill(s); return -1; } else if(len == 0) { /* they went away */ sx_kill(s); return -1; } log_debug(ZONE, "read %d bytes", len); buf->len = len; return len; case event_WRITE: log_debug(ZONE, "writing to %d", out->fd->fd); len = send(out->fd->fd, buf->data, buf->len, 0); if(len >= 0) { log_debug(ZONE, "%d bytes written", len); return len; } if(MIO_WOULDBLOCK) return 0; log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] write error: %s (%d)", out->fd->fd, out->ip, out->port, MIO_STRERROR(MIO_ERROR), MIO_ERROR); if (!out->online) { _out_dns_mark_bad(out); } sx_kill(s); return -1; case event_ERROR: sxe = (sx_error_t *) data; log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] error: %s (%s)", out->fd->fd, out->ip, out->port, sxe->generic, sxe->specific); /* mark as bad if we did not manage to connect or there is unrecoverable stream error */ if (!out->online || (sxe->code == SX_ERR_STREAM && (strstr(sxe->specific, "host-gone") || /* it's not there now */ strstr(sxe->specific, "host-unknown") || /* they do not service the host */ strstr(sxe->specific, "not-authorized") || /* they do not want us there */ strstr(sxe->specific, "see-other-host") || /* we do not support redirections yet */ strstr(sxe->specific, "system-shutdown") || /* they are going down */ strstr(sxe->specific, "policy-violation") || /* they do not want us there */ strstr(sxe->specific, "remote-connection-failed") || /* the required remote entity is gone */ strstr(sxe->specific, "unsupported-encoding") || /* they do not like our encoding */ strstr(sxe->specific, "undefined-condition") || /* something bad happend */ strstr(sxe->specific, "internal-server-error") || /* that server is broken */ strstr(sxe->specific, "unsupported-version") /* they do not support our stream version */ ))) { _out_dns_mark_bad(out); } sx_kill(s); return -1; case event_OPEN: log_debug(ZONE, "OPEN event for %s", out->key); break; case event_STREAM: /* check stream version - NULl = pre-xmpp (some jabber1 servers) */ log_debug(ZONE, "STREAM event for %s stream version is %s", out->key, out->s->res_version); /* first time, bring them online */ if(!out->online) { log_debug(ZONE, "outgoing conn to %s is online", out->key); /* if no stream version from either side, kick off dialback for each route, */ /* otherwise wait for stream features */ if (((out->s->res_version==NULL) || (out->s2s->sx_ssl == NULL)) && out->s2s->require_tls == 0) { log_debug(ZONE, "no stream version, sending dialbacks for %s immediately", out->key); out->online = 1; send_dialbacks(out); } else log_debug(ZONE, "outgoing conn to %s - waiting for STREAM features", out->key); } break; case event_PACKET: /* we're counting packets */ out->packet_count++; out->s2s->packet_count++; nad = (nad_t) data; /* watch for the features packet - STARTTLS and/or SASL*/ if ((out->s->res_version!=NULL) && NAD_NURI_L(nad, NAD_ENS(nad, 0)) == strlen(uri_STREAMS) && strncmp(uri_STREAMS, NAD_NURI(nad, NAD_ENS(nad, 0)), strlen(uri_STREAMS)) == 0 && NAD_ENAME_L(nad, 0) == 8 && strncmp("features", NAD_ENAME(nad, 0), 8) == 0) { log_debug(ZONE, "got the stream features packet"); #ifdef HAVE_SSL /* starttls if we can */ if(out->s2s->sx_ssl != NULL && s->ssf == 0) { ns = nad_find_scoped_namespace(nad, uri_TLS, NULL); if(ns >= 0) { elem = nad_find_elem(nad, 0, ns, "starttls", 1); if(elem >= 0) { log_debug(ZONE, "got STARTTLS in stream features"); if(sx_ssl_client_starttls(out->s2s->sx_ssl, s, out->s2s->local_pemfile) == 0) { starttls = 1; nad_free(nad); return 0; } log_write(out->s2s->log, LOG_ERR, "unable to establish encrypted session with peer"); } } } /* If we're not establishing a starttls connection, send dialbacks */ if (!starttls) { if (out->s2s->require_tls == 0 || s->ssf > 0) { log_debug(ZONE, "No STARTTLS, sending dialbacks for %s", out->key); out->online = 1; send_dialbacks(out); } else { log_debug(ZONE, "No STARTTLS, dialbacks disabled for non-TLS connections, cannot complete negotiation"); } } #else if (out->s2s->require_tls == 0) { out->online = 1; send_dialbacks(out); } #endif } /* we only accept dialback packets */ if(NAD_ENS(nad, 0) < 0 || NAD_NURI_L(nad, NAD_ENS(nad, 0)) != uri_DIALBACK_L || strncmp(uri_DIALBACK, NAD_NURI(nad, NAD_ENS(nad, 0)), uri_DIALBACK_L) != 0) { log_debug(ZONE, "got a non-dialback packet on an outgoing conn, dropping it"); nad_free(nad); return 0; } /* and then only result and verify */ if(NAD_ENAME_L(nad, 0) == 6) { if(strncmp("result", NAD_ENAME(nad, 0), 6) == 0) { _out_result(out, nad); return 0; } if(strncmp("verify", NAD_ENAME(nad, 0), 6) == 0) { _out_verify(out, nad); return 0; } } log_debug(ZONE, "unknown dialback packet, dropping it"); nad_free(nad); return 0; case event_CLOSED: if (out->fd != NULL) { mio_close(out->s2s->mio, out->fd); out->fd = NULL; } return -1; } return 0; } /** process incoming auth responses */ static void _out_result(conn_t out, nad_t nad) { int attr; jid_t from, to; char *rkey; int rkeylen; attr = nad_find_attr(nad, 0, -1, "from", NULL); if(attr < 0 || (from = jid_new(NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr))) == NULL) { log_debug(ZONE, "missing or invalid from on db result packet"); nad_free(nad); return; } attr = nad_find_attr(nad, 0, -1, "to", NULL); if(attr < 0 || (to = jid_new(NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr))) == NULL) { log_debug(ZONE, "missing or invalid to on db result packet"); jid_free(from); nad_free(nad); return; } rkey = s2s_route_key(NULL, to->domain, from->domain); rkeylen = strlen(rkey); /* key is valid */ if(nad_find_attr(nad, 0, -1, "type", "valid") >= 0) { log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] outgoing route '%s' is now valid%s%s", out->fd->fd, out->ip, out->port, rkey, (out->s->flags & SX_SSL_WRAPPER) ? ", TLS negotiated" : "", out->s->compressed ? ", ZLIB compression enabled" : ""); xhash_put(out->states, pstrdup(xhash_pool(out->states), rkey), (void *) conn_VALID); /* !!! small leak here */ log_debug(ZONE, "%s valid, flushing queue", rkey); /* flush the queue */ out_flush_route_queue(out->s2s, rkey, rkeylen); free(rkey); jid_free(from); jid_free(to); nad_free(nad); return; } /* invalid */ log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] outgoing route '%s' is now invalid", out->fd->fd, out->ip, out->port, rkey); /* close connection */ log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] closing connection", out->fd->fd, out->ip, out->port); /* report stream error */ sx_error(out->s, stream_err_INVALID_ID, "dialback negotiation failed"); /* close the stream */ sx_close(out->s); /* bounce queue */ out_bounce_route_queue(out->s2s, rkey, rkeylen, stanza_err_SERVICE_UNAVAILABLE); free(rkey); jid_free(from); jid_free(to); nad_free(nad); } /** incoming stream authenticated */ static void _out_verify(conn_t out, nad_t nad) { int attr, ns; jid_t from, to; conn_t in; char *rkey; int valid; attr = nad_find_attr(nad, 0, -1, "from", NULL); if(attr < 0 || (from = jid_new(NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr))) == NULL) { log_debug(ZONE, "missing or invalid from on db verify packet"); nad_free(nad); return; } attr = nad_find_attr(nad, 0, -1, "to", NULL); if(attr < 0 || (to = jid_new(NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr))) == NULL) { log_debug(ZONE, "missing or invalid to on db verify packet"); jid_free(from); nad_free(nad); return; } attr = nad_find_attr(nad, 0, -1, "id", NULL); if(attr < 0) { log_debug(ZONE, "missing id on db verify packet"); jid_free(from); jid_free(to); nad_free(nad); return; } /* get the incoming conn */ in = xhash_getx(out->s2s->in, NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr)); if(in == NULL) { log_debug(ZONE, "got a verify for incoming conn %.*s, but it doesn't exist, dropping the packet", NAD_AVAL_L(nad, attr), NAD_AVAL(nad, attr)); jid_free(from); jid_free(to); nad_free(nad); return; } rkey = s2s_route_key(NULL, to->domain, from->domain); attr = nad_find_attr(nad, 0, -1, "type", "valid"); if(attr >= 0) { xhash_put(in->states, pstrdup(xhash_pool(in->states), rkey), (void *) conn_VALID); log_write(in->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] incoming route '%s' is now valid%s%s", in->fd->fd, in->ip, in->port, rkey, (in->s->flags & SX_SSL_WRAPPER) ? ", TLS negotiated" : "", in->s->compressed ? ", ZLIB compression enabled" : ""); valid = 1; } else { log_write(in->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] incoming route '%s' is now invalid", in->fd->fd, in->ip, in->port, rkey); valid = 0; } free(rkey); nad_free(nad); /* decrement outstanding verify counter */ --out->verify; /* let them know what happened */ nad = nad_new(); ns = nad_add_namespace(nad, uri_DIALBACK, "db"); nad_append_elem(nad, ns, "result", 0); nad_append_attr(nad, -1, "to", from->domain); nad_append_attr(nad, -1, "from", to->domain); nad_append_attr(nad, -1, "type", valid ? "valid" : "invalid"); /* off it goes */ sx_nad_write(in->s, nad); /* if invalid, close the stream */ if (!valid) { /* generate stream error */ sx_error(in->s, stream_err_INVALID_ID, "dialback negotiation failed"); /* close the incoming stream */ sx_close(in->s); } jid_free(from); jid_free(to); } /* bounce all packets in the queues for domain */ int out_bounce_domain_queues(s2s_t s2s, const char *domain, int err) { char *rkey; int rkeylen; int pktcount = 0; if (xhash_iter_first(s2s->outq)) { do { xhash_iter_get(s2s->outq, (const char **) &rkey, &rkeylen, NULL); if(s2s_route_key_match(NULL, (char *) domain, rkey, rkeylen)) pktcount += out_bounce_route_queue(s2s, rkey, rkeylen, err); } while(xhash_iter_next(s2s->outq)); } return pktcount; } /* bounce all packets in the queue for route */ int out_bounce_route_queue(s2s_t s2s, char *rkey, int rkeylen, int err) { jqueue_t q; pkt_t pkt; int pktcount = 0; q = xhash_getx(s2s->outq, rkey, rkeylen); if(q == NULL) return 0; while((pkt = jqueue_pull(q)) != NULL) { /* only packets with content, in namespace jabber:client and not already errors */ if(pkt->nad->ecur > 1 && NAD_NURI_L(pkt->nad, NAD_ENS(pkt->nad, 1)) == strlen(uri_CLIENT) && strncmp(NAD_NURI(pkt->nad, NAD_ENS(pkt->nad, 1)), uri_CLIENT, strlen(uri_CLIENT)) == 0 && nad_find_attr(pkt->nad, 0, -1, "error", NULL) < 0) { sx_nad_write(s2s->router, stanza_tofrom(stanza_tofrom(stanza_error(pkt->nad, 1, err), 1), 0)); pktcount++; } else nad_free(pkt->nad); jid_free(pkt->to); jid_free(pkt->from); free(pkt); } /* delete queue and remove domain from queue hash */ log_debug(ZONE, "deleting out packet queue for %.*s", rkeylen, rkey); rkey = q->key; jqueue_free(q); xhash_zap(s2s->outq, rkey); free(rkey); return pktcount; } int out_bounce_conn_queues(conn_t out, int err) { char *rkey; int rkeylen; int pktcount = 0; /* bounce queues for all domains handled by this connection - iterate through routes */ if (xhash_iter_first(out->routes)) { do { xhash_iter_get(out->routes, (const char **) &rkey, &rkeylen, NULL); pktcount += out_bounce_route_queue(out->s2s, rkey, rkeylen, err); } while(xhash_iter_next(out->routes)); } return pktcount; } void out_flush_domain_queues(s2s_t s2s, const char *domain) { char *rkey; int rkeylen; char *c; int c_len; if (xhash_iter_first(s2s->outq)) { do { xhash_iter_get(s2s->outq, (const char **) &rkey, &rkeylen, NULL); c = memchr(rkey, '/', rkeylen); c++; c_len = rkeylen - (c - rkey); if (strncmp(domain, c, c_len) == 0) out_flush_route_queue(s2s, rkey, rkeylen); } while(xhash_iter_next(s2s->outq)); } } void out_flush_route_queue(s2s_t s2s, char *rkey, int rkeylen) { jqueue_t q; pkt_t pkt; int npkt, i, ret; q = xhash_getx(s2s->outq, rkey, rkeylen); if(q == NULL) return; npkt = jqueue_size(q); log_debug(ZONE, "flushing %d packets for '%.*s' to out_packet", npkt, rkeylen, rkey); for(i = 0; i < npkt; i++) { pkt = jqueue_pull(q); if(pkt) { ret = out_packet(s2s, pkt); if (ret) { /* uh-oh. the queue was deleted... q and pkt have been freed if q->key == rkey, rkey has also been freed */ return; } } } /* delete queue for route and remove route from queue hash */ if (jqueue_size(q) == 0) { log_debug(ZONE, "deleting out packet queue for '%.*s'", rkeylen, rkey); rkey = q->key; jqueue_free(q); xhash_zap(s2s->outq, rkey); free(rkey); } else { log_debug(ZONE, "emptied queue gained more packets..."); } }
/* * jabberd - Jabber Open Source Server * Copyright (c) 2002 Jeremie Miller, Thomas Muldowney, * Ryan Eatmon, Robert Norris * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA02111-1307USA */ #define _GNU_SOURCE #include <string.h> #include "s2s.h" #include <idna.h> /* * we handle packets going from the router to the world, and stuff * that comes in on connections we initiated. * * action points: * * out_packet(s2s, nad) - send this packet out * - extract to domain * - get dbconn for this domain using out_route * - if dbconn not available bounce packet * - DONE * - if conn in progress (tcp) * - add packet to queue for this domain * - DONE * - if dbconn state valid for this domain, or packet is dialback * - send packet * - DONE * - if dbconn state invalid for this domain * - bounce packet (502) * - DONE * - add packet to queue for this domain * - if dbconn state inprogress for this domain * - DONE * - out_dialback(dbconn, from, to) * * out_route(s2s, route, out, allow_bad) * - if dbconn not found * - check internal resolver cache for domain * - if not found * - ask resolver for name * - DONE * - if outgoing ip/port is to be reused * - get dbconn for any valid ip/port * - if dbconn not found * - create new dbconn * - initiate connect to ip/port * - DONE * - create new dbconn * - initiate connect to ip/port * - DONE * * out_dialback(dbconn, from, to) - initiate dialback * - generate dbkey: sha1(secret+remote+stream id) * - send auth request: <result to='them' from='us'>dbkey</result> * - set dbconn state for this domain to inprogress * - DONE * * out_resolve(s2s, query) - responses from resolver * - store ip/port/ttl in resolver cache * - flush domain queue -> out_packet(s2s, domain) * - DONE * * event_STREAM - ip/port open * - get dbconn for this sx * - for each route handled by this conn, out_dialback(dbconn, from, to) * - DONE * * event_PACKET: <result from='them' to='us' type='xxx'/> - response to our auth request * - get dbconn for this sx * - if type valid * - set dbconn state for this domain to valid * - flush dbconn queue for this domain -> out_packet(s2s, pkt) * - DONE * - set dbconn state for this domain to invalid * - bounce dbconn queue for this domain (502) * - DONE * * event_PACKET: <verify from='them' to='us' id='123' type='xxx'/> - incoming stream authenticated * - get dbconn for given id * - if type is valid * - set dbconn state for this domain to valid * - send result: <result to='them' from='us' type='xxx'/> * - DONE */ /* forward decls */ static int _out_mio_callback(mio_t m, mio_action_t a, mio_fd_t fd, void *data, void *arg); static int _out_sx_callback(sx_t s, sx_event_t e, void *data, void *arg); static void _out_result(conn_t out, nad_t nad); static void _out_verify(conn_t out, nad_t nad); static void _dns_result_aaaa(struct dns_ctx *ctx, struct dns_rr_a6 *result, void *data); static void _dns_result_a(struct dns_ctx *ctx, struct dns_rr_a4 *result, void *data); /** queue the packet */ static void _out_packet_queue(s2s_t s2s, pkt_t pkt) { char *rkey = s2s_route_key(NULL, pkt->from->domain, pkt->to->domain); jqueue_t q = (jqueue_t) xhash_get(s2s->outq, rkey); if(q == NULL) { log_debug(ZONE, "creating new out packet queue for '%s'", rkey); q = jqueue_new(); q->key = rkey; xhash_put(s2s->outq, q->key, (void *) q); } else { free(rkey); } log_debug(ZONE, "queueing packet for '%s'", q->key); jqueue_push(q, (void *) pkt, 0); } static void _out_dialback(conn_t out, char *rkey, int rkeylen) { char *c, *dbkey, *tmp; nad_t nad; int elem, ns; int from_len, to_len; time_t now; now = time(NULL); c = memchr(rkey, '/', rkeylen); from_len = c - rkey; c++; to_len = rkeylen - (c - rkey); /* kick off the dialback */ tmp = strndup(c, to_len); dbkey = s2s_db_key(NULL, out->s2s->local_secret, tmp, out->s->id); free(tmp); nad = nad_new(); /* request auth */ ns = nad_add_namespace(nad, uri_DIALBACK, "db"); elem = nad_append_elem(nad, ns, "result", 0); nad_set_attr(nad, elem, -1, "from", rkey, from_len); nad_set_attr(nad, elem, -1, "to", c, to_len); nad_append_cdata(nad, dbkey, strlen(dbkey), 1); log_debug(ZONE, "sending auth request for %.*s (key %s)", rkeylen, rkey, dbkey); log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] sending dialback auth request for route '%.*s'", out->fd->fd, out->ip, out->port, rkeylen, rkey); /* off it goes */ sx_nad_write(out->s, nad); free(dbkey); /* we're in progress now */ xhash_put(out->states, pstrdupx(xhash_pool(out->states), rkey, rkeylen), (void *) conn_INPROGRESS); /* record the time that we set conn_INPROGRESS state */ xhash_put(out->states_time, pstrdupx(xhash_pool(out->states_time), rkey, rkeylen), (void *) now); } void _out_dns_mark_bad(conn_t out) { if (out->s2s->dns_bad_timeout > 0) { dnsres_t bad; char *ipport; /* mark this host as bad */ ipport = dns_make_ipport(out->ip, out->port); bad = xhash_get(out->s2s->dns_bad, ipport); if (bad == NULL) { bad = (dnsres_t) calloc(1, sizeof(struct dnsres_st)); bad->key = ipport; xhash_put(out->s2s->dns_bad, ipport, bad); } bad->expiry = time(NULL) + out->s2s->dns_bad_timeout; } } int dns_select(s2s_t s2s, char *ip, int *port, time_t now, dnscache_t dns, int allow_bad) { /* list of results */ dnsres_t l_reuse[DNS_MAX_RESULTS]; dnsres_t l_aaaa[DNS_MAX_RESULTS]; dnsres_t l_a[DNS_MAX_RESULTS]; dnsres_t l_bad[DNS_MAX_RESULTS]; /* running weight sums of results */ int rw_reuse[DNS_MAX_RESULTS]; int rw_aaaa[DNS_MAX_RESULTS]; int rw_a[DNS_MAX_RESULTS]; int s_reuse = 0, s_aaaa = 0, s_a = 0, s_bad = 0; /* count */ int p_reuse = 0, p_aaaa = 0, p_a = 0; /* list prio */ int wt_reuse = 0, wt_aaaa = 0, wt_a = 0; /* weight total */ int c_expired_good = 0; union xhashv xhv; dnsres_t res; char *ipport; int ipport_len; char *c; int c_len; char *tmp; /* for all results: * - if not expired * - put highest priority reuseable addrs into list1 * - put highest priority ipv6 addrs into list2 * - put highest priority ipv4 addrs into list3 * - put bad addrs into list4 * - pick weighted random entry from first non-empty list */ if (dns->results == NULL) { log_debug(ZONE, "negative cache entry for '%s'", dns->name); return -1; } log_debug(ZONE, "selecting DNS result for '%s'", dns->name); xhv.dnsres_val = &res; if (xhash_iter_first(dns->results)) { dnsres_t bad = NULL; do { xhash_iter_get(dns->results, (const char **) &ipport, &ipport_len, xhv.val); if (s2s->dns_bad_timeout > 0) bad = xhash_getx(s2s->dns_bad, ipport, ipport_len); if (now > res->expiry) { /* good host? */ if (bad == NULL) c_expired_good++; log_debug(ZONE, "host '%s' expired", res->key); continue; } else if (bad != NULL && !(now > bad->expiry)) { /* bad host (connection failure) */ l_bad[s_bad++] = res; log_debug(ZONE, "host '%s' bad", res->key); } else if (s2s->out_reuse && xhash_getx(s2s->out_host, ipport, ipport_len) != NULL) { /* existing connection */ log_debug(ZONE, "host '%s' exists", res->key); if (s_reuse == 0 || p_reuse > res->prio) { p_reuse = res->prio; s_reuse = 0; wt_reuse = 0; log_debug(ZONE, "reset prio list, using prio %d", res->prio); } if (res->prio <= p_reuse) { l_reuse[s_reuse] = res; wt_reuse += res->weight; rw_reuse[s_reuse] = wt_reuse; s_reuse++; log_debug(ZONE, "added host with weight %d (%d), running weight %d", (res->weight >> 8), res->weight, wt_reuse); } else { log_debug(ZONE, "ignored host with prio %d", res->prio); } } else if (memchr(ipport, ':', ipport_len) != NULL) { /* ipv6 */ log_debug(ZONE, "host '%s' IPv6", res->key); if (s_aaaa == 0 || p_aaaa > res->prio) { p_aaaa = res->prio; s_aaaa = 0; wt_aaaa = 0; log_debug(ZONE, "reset prio list, using prio %d", res->prio); } if (res->prio <= p_aaaa) { l_aaaa[s_aaaa] = res; wt_aaaa += res->weight; rw_aaaa[s_aaaa] = wt_aaaa; s_aaaa++; log_debug(ZONE, "added host with weight %d (%d), running weight %d", (res->weight >> 8), res->weight, wt_aaaa); } else { log_debug(ZONE, "ignored host with prio %d", res->prio); } } else { /* ipv4 */ log_debug(ZONE, "host '%s' IPv4", res->key); if (s_a == 0 || p_a > res->prio) { p_a = res->prio; s_a = 0; wt_a = 0; log_debug(ZONE, "reset prio list, using prio %d", res->prio); } if (res->prio <= p_a) { l_a[s_a] = res; wt_a += res->weight; rw_a[s_a] = wt_a; s_a++; log_debug(ZONE, "added host with weight %d (%d), running weight %d", (res->weight >> 8), res->weight, wt_a); } else { log_debug(ZONE, "ignored host with prio %d", res->prio); } } } while(xhash_iter_next(dns->results)); } /* pick a result at weighted random (RFC 2782) * all weights are guaranteed to be >= 16 && <= 16776960 * (assuming max 50 hosts, the total/running sums won't exceed 2^31) */ ipport = NULL; if (s_reuse > 0) { int i, r; log_debug(ZONE, "using existing hosts, total weight %d", wt_reuse); assert((wt_reuse + 1) > 0); r = rand() % (wt_reuse + 1); log_debug(ZONE, "random number %d", r); for (i = 0; i < s_reuse; i++) if (rw_reuse[i] >= r) { log_debug(ZONE, "selected host '%s', running weight %d", l_reuse[i]->key, rw_reuse[i]); ipport = l_reuse[i]->key; break; } } else if (s_aaaa > 0 && (s_a == 0 || p_aaaa <= p_a)) { int i, r; log_debug(ZONE, "using IPv6 hosts, total weight %d", wt_aaaa); assert((wt_aaaa + 1) > 0); r = rand() % (wt_aaaa + 1); log_debug(ZONE, "random number %d", r); for (i = 0; i < s_aaaa; i++) if (rw_aaaa[i] >= r) { log_debug(ZONE, "selected host '%s', running weight %d", l_aaaa[i]->key, rw_aaaa[i]); ipport = l_aaaa[i]->key; break; } } else if (s_a > 0) { int i, r; log_debug(ZONE, "using IPv4 hosts, total weight %d", wt_a); assert((wt_a + 1) > 0); r = rand() % (wt_a + 1); log_debug(ZONE, "random number %d", r); for (i = 0; i < s_a; i++) if (rw_a[i] >= r) { log_debug(ZONE, "selected host '%s', running weight %d", l_a[i]->key, rw_a[i]); ipport = l_a[i]->key; break; } } else if (s_bad > 0) { ipport = l_bad[rand() % s_bad]->key; log_debug(ZONE, "using bad hosts, allow_bad=%d", allow_bad); /* there are expired good hosts, expire cache immediately */ if (c_expired_good > 0) { log_debug(ZONE, "expiring this DNS cache entry, %d expired hosts", c_expired_good); dns->expiry = 0; } if (!allow_bad) return -1; } /* results cannot all expire before the collection does */ assert(ipport != NULL); /* copy the ip and port to the packet */ ipport_len = strlen(ipport); c = strchr(ipport, '/'); strncpy(ip, ipport, c-ipport); ip[c-ipport] = '\0'; c++; c_len = ipport_len - (c - ipport); tmp = strndup(c, c_len); *port = atoi(tmp); free(tmp); return 0; } /** find/make a connection for a route */ int out_route(s2s_t s2s, char *route, int routelen, conn_t *out, int allow_bad) { dnscache_t dns; char ipport[INET6_ADDRSTRLEN + 16], *dkey, *c; time_t now; int reuse = 0; char ip[INET6_ADDRSTRLEN] = {0}; int port, c_len, from_len; c = memchr(route, '/', routelen); from_len = c - route; c++; c_len = routelen - (c - route); dkey = strndup(c, c_len); log_debug(ZONE, "trying to find connection for '%s'", dkey); *out = (conn_t) xhash_get(s2s->out_dest, dkey); if(*out == NULL) { log_debug(ZONE, "connection for '%s' not found", dkey); /* check resolver cache for ip/port */ dns = xhash_get(s2s->dnscache, dkey); if(dns == NULL) { /* new resolution */ log_debug(ZONE, "no dns for %s, preparing for resolution", dkey); dns = (dnscache_t) calloc(1, sizeof(struct dnscache_st)); strcpy(dns->name, dkey); xhash_put(s2s->dnscache, dns->name, (void *) dns); #if 0 /* this is good for testing */ dns->pending = 0; strcpy(dns->ip, "127.0.0.1"); dns->port = 3000; dns->expiry = time(NULL) + 99999999; #endif } /* resolution in progress */ if(dns->pending) { log_debug(ZONE, "pending resolution"); free(dkey); return 0; } /* has it expired (this is 0 for new cache objects, so they're always expired */ now = time(NULL); /* each entry must be expired no earlier than the collection */ if(now > dns->expiry) { /* resolution required */ log_debug(ZONE, "requesting resolution for %s", dkey); dns->init_time = time(NULL); dns->pending = 1; dns_resolve_domain(s2s, dns); free(dkey); return 0; } /* dns is valid */ if (dns_select(s2s, ip, &port, now, dns, allow_bad)) { /* failed to find anything acceptable */ free(dkey); return -1; } /* re-request resolution if dns_select expired the data */ if (now > dns->expiry) { /* resolution required */ log_debug(ZONE, "requesting resolution for %s", dkey); dns->init_time = time(NULL); dns->pending = 1; dns_resolve_domain(s2s, dns); free(dkey); return 0; } /* generate the ip/port pair, this is the hash key for the conn */ snprintf(ipport, INET6_ADDRSTRLEN + 16, "%s/%d", ip, port); /* try to re-use an existing connection */ if (s2s->out_reuse) *out = (conn_t) xhash_get(s2s->out_host, ipport); if (*out != NULL) { log_write(s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] using connection for '%s'", (*out)->fd->fd, (*out)->ip, (*out)->port, dkey); /* associate existing connection with domain */ xhash_put(s2s->out_dest, s2s->out_reuse ? pstrdup(xhash_pool((*out)->routes), dkey) : dkey, (void *) *out); reuse = 1; } else{ /* no conn, create one */ *out = (conn_t) calloc(1, sizeof(struct conn_st)); (*out)->s2s = s2s; (*out)->key = strdup(ipport); if (s2s->out_reuse) (*out)->dkey = NULL; else (*out)->dkey = dkey; strcpy((*out)->ip, ip); (*out)->port = port; (*out)->states = xhash_new(101); (*out)->states_time = xhash_new(101); (*out)->routes = xhash_new(101); (*out)->init_time = time(NULL); if (s2s->out_reuse) xhash_put(s2s->out_host, (*out)->key, (void *) *out); xhash_put(s2s->out_dest, s2s->out_reuse ? pstrdup(xhash_pool((*out)->routes), dkey) : dkey, (void *) *out); xhash_put((*out)->routes, pstrdupx(xhash_pool((*out)->routes), route, routelen), (void *) 1); /* connect */ log_debug(ZONE, "initiating connection to %s", ipport); /* APPLE: multiple origin_ips may be specified; use IPv6 if possible or otherwise IPv4 */ int ip_is_v6 = 0; if (strchr(ip, ':') != NULL) ip_is_v6 = 1; int i; for (i = 0; i < s2s->origin_nips; i++) { // only bother with mio_connect if the src and dst IPs are of the same type if ((ip_is_v6 && (strchr(s2s->origin_ips[i], ':') != NULL)) || // both are IPv6 (! ip_is_v6 && (strchr(s2s->origin_ips[i], ':') == NULL))) // both are IPv4 (*out)->fd = mio_connect(s2s->mio, port, ip, s2s->origin_ips[i], _out_mio_callback, (void *) *out); if ((*out)->fd != NULL) break; } if ((*out)->fd == NULL) { log_write(s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] mio_connect error: %s (%d)", -1, (*out)->ip, (*out)->port, MIO_STRERROR(MIO_ERROR), MIO_ERROR); _out_dns_mark_bad(*out); if (s2s->out_reuse) xhash_zap(s2s->out_host, (*out)->key); xhash_zap(s2s->out_dest, dkey); xhash_free((*out)->states); xhash_free((*out)->states_time); xhash_free((*out)->routes); free((*out)->key); free((*out)->dkey); free(*out); *out = NULL; /* try again without allowing bad hosts */ return out_route(s2s, route, routelen, out, 0); } else { log_write(s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] outgoing connection for '%s'", (*out)->fd->fd, (*out)->ip, (*out)->port, dkey); (*out)->s = sx_new(s2s->sx_env, (*out)->fd->fd, _out_sx_callback, (void *) *out); #ifdef HAVE_SSL /* Send a stream version of 1.0 if we can do STARTTLS */ if(s2s->sx_ssl != NULL) { sx_client_init((*out)->s, S2S_DB_HEADER, uri_SERVER, dkey, pstrdupx(xhash_pool((*out)->routes), route, from_len), "1.0"); } else { sx_client_init((*out)->s, S2S_DB_HEADER, uri_SERVER, NULL, NULL, NULL); } #else sx_client_init((*out)->s, S2S_DB_HEADER, uri_SERVER, NULL, NULL, NULL); #endif /* dkey is now used by the hash table */ return 0; } } } else { log_debug(ZONE, "connection for '%s' found (%d %s/%d)", dkey, (*out)->fd->fd, (*out)->ip, (*out)->port); } /* connection in progress, or re-using connection: add to routes list */ if (!(*out)->online || reuse) { if (xhash_getx((*out)->routes, route, routelen) == NULL) xhash_put((*out)->routes, pstrdupx(xhash_pool((*out)->routes), route, routelen), (void *) 1); } free(dkey); return 0; } void out_pkt_free(pkt_t pkt) { nad_free(pkt->nad); jid_free(pkt->from); jid_free(pkt->to); free(pkt); } /** send a packet out */ int out_packet(s2s_t s2s, pkt_t pkt) { char *rkey; int rkeylen; conn_t out; conn_state_t state; int ret; /* perform check against whitelist */ if (s2s->enable_whitelist > 0 && (pkt->to->domain != NULL) && (s2s_domain_in_whitelist(s2s, pkt->to->domain) == 0)) { log_write(s2s->log, LOG_NOTICE, "sending a packet to domain not in the whitelist, dropping it"); if (pkt->to != NULL) jid_free(pkt->to); if (pkt->from != NULL) jid_free(pkt->from); if (pkt->nad != NULL) nad_free(pkt->nad); free(pkt); return; } /* new route key */ rkey = s2s_route_key(NULL, pkt->from->domain, pkt->to->domain); rkeylen = strlen(rkey); /* get a connection */ ret = out_route(s2s, rkey, rkeylen, &out, 1); if (out == NULL) { /* connection not available, queue packet */ _out_packet_queue(s2s, pkt); /* check if out_route was successful in attempting a connection */ if (ret) { /* bounce queue */ out_bounce_route_queue(s2s, rkey, rkeylen, stanza_err_SERVICE_UNAVAILABLE); free(rkey); return -1; } free(rkey); return 0; } /* connection in progress */ if(!out->online) { log_debug(ZONE, "connection in progress, queueing packet"); _out_packet_queue(s2s, pkt); free(rkey); return 0; } /* connection state */ state = (conn_state_t) xhash_get(out->states, rkey); /* valid conns or dialback packets */ if(state == conn_VALID || pkt->db) { log_debug(ZONE, "writing packet for %s to outgoing conn %d", rkey, out->fd->fd); /* send it straight out */ if(pkt->db) { /* if this is a db:verify packet, increment counter and set timestamp */ if(NAD_ENAME_L(pkt->nad, 0) == 6 && strncmp("verify", NAD_ENAME(pkt->nad, 0), 6) == 0) { out->verify++; out->last_verify = time(NULL); } /* dialback packet */ sx_nad_write(out->s, pkt->nad); } else { /* if the outgoing stanza has a jabber:client namespace, remove it so that the stream jabber:server namespaces will apply (XMPP 11.2.2) */ int ns = nad_find_namespace(pkt->nad, 1, uri_CLIENT, NULL); if(ns >= 0) { /* clear the namespaces of elem 0 (internal route element) and elem 1 (message|iq|presence) */ pkt->nad->elems[0].ns = -1; pkt->nad->elems[0].my_ns = -1; pkt->nad->elems[1].ns = -1; pkt->nad->elems[1].my_ns = -1; } /* send it out */ sx_nad_write_elem(out->s, pkt->nad, 1); } /* update timestamp */ out->last_packet = time(NULL); jid_free(pkt->from); jid_free(pkt->to); free(pkt); free(rkey); return 0; } /* can't be handled yet, queue */ _out_packet_queue(s2s, pkt); /* if dialback is in progress, then we're done for now */ if(state == conn_INPROGRESS) { free(rkey); return 0; } /* this is a new route - send dialback auth request to piggyback on the existing connection */ if (out->s2s->require_tls == 0 || out->s->ssf > 0) { _out_dialback(out, rkey, rkeylen); } free(rkey); return 0; } char *dns_make_ipport(char *host, int port) { char *c; assert(port > 0 && port < 65536); c = (char *) malloc(strlen(host) + 7); sprintf(c, "%s/%d", host, port); return c; } static void _dns_add_result(dnsquery_t query, char *ip, int port, int prio, int weight, unsigned int ttl) { char *ipport = dns_make_ipport(ip, port); dnsres_t res = xhash_get(query->results, ipport); if (res != NULL) { if (prio < res->prio) res->prio = prio; if (prio < res->prio) { /* duplicate host at lower prio - reset weight */ res->weight = weight; } else if (prio == res->prio) { /* duplicate host at same prio - add to weight */ res->weight += weight; if (res->weight > (65535 << 8)) res->weight = (65535 << 8); } if (ttl > res->expiry) res->expiry = ttl; if (ttl > query->expiry) query->expiry = ttl; log_debug(ZONE, "dns result updated for %s@%p: %s (%d/%d/%d)", query->name, query, ipport, res->prio, (res->weight >> 8), res->expiry); } else if (xhash_count(query->results) < DNS_MAX_RESULTS) { res = pmalloc(xhash_pool(query->results), sizeof(struct dnsres_st)); res->key = pstrdup(xhash_pool(query->results), ipport); res->prio = prio; res->weight = weight; res->expiry = ttl; if (ttl > query->expiry) query->expiry = ttl; xhash_put(query->results, res->key, res); log_debug(ZONE, "dns result added for %s@%p: %s (%d/%d/%d)", query->name, query, ipport, res->prio, (res->weight >> 8), res->expiry); } else { log_debug(ZONE, "dns result ignored for %s@%p: %s (%d/%d/%d)", query->name, query, ipport, prio, (weight >> 8), ttl); } free(ipport); } static void _dns_add_host(dnsquery_t query, char *ip, int port, int prio, int weight, unsigned int ttl) { char *ipport = dns_make_ipport(ip, port); dnsres_t res = xhash_get(query->hosts, ipport); /* update host weights: * RFC 2482 "In the presence of records containing weights greater * than 0, records with weight 0 should have a very small chance of * being selected." * 0 -> 16 * 1-65535 -> 256-16776960 */ if (weight == 0) weight = 1 << 4; else weight <<= 8; if (res != NULL) { if (prio < res->prio) res->prio = prio; if (prio < res->prio) { /* duplicate host at lower prio - reset weight */ res->weight = weight; } else if (prio == res->prio) { /* duplicate host at same prio - add to weight */ res->weight += weight; if (res->weight > (65535 << 8)) res->weight = (65535 << 8); } if (ttl > res->expiry) res->expiry = ttl; log_debug(ZONE, "dns host updated for %s@%p: %s (%d/%d/%d)", query->name, query, ipport, res->prio, (res->weight >> 8), res->expiry); } else if (xhash_count(query->hosts) < DNS_MAX_RESULTS) { res = pmalloc(xhash_pool(query->hosts), sizeof(struct dnsres_st)); res->key = pstrdup(xhash_pool(query->hosts), ipport); res->prio = prio; res->weight = weight; res->expiry = ttl; xhash_put(query->hosts, res->key, res); log_debug(ZONE, "dns host added for %s@%p: %s (%d/%d/%d)", query->name, query, ipport, res->prio, (res->weight >> 8), res->expiry); } else { log_debug(ZONE, "dns host ignored for %s@%p: %s (%d/%d/%d)", query->name, query, ipport, prio, (weight >> 8), ttl); } free(ipport); } /* this function is called with a NULL ctx to start the SRV process */ static void _dns_result_srv(struct dns_ctx *ctx, struct dns_rr_srv *result, void *data) { dnsquery_t query = data; assert(query != NULL); query->query = NULL; if (ctx != NULL && result == NULL) { log_debug(ZONE, "dns failure for %s@%p: SRV %s (%d)", query->name, query, query->s2s->lookup_srv[query->srv_i], dns_status(ctx)); } else if (result != NULL) { int i; log_debug(ZONE, "dns response for %s@%p: SRV %s %d (%d)", query->name, query, result->dnssrv_qname, result->dnssrv_nrr, result->dnssrv_ttl); for (i = 0; i < result->dnssrv_nrr; i++) { if (strlen(result->dnssrv_srv[i].name) > 0 && result->dnssrv_srv[i].port > 0 && result->dnssrv_srv[i].port < 65536) { log_debug(ZONE, "dns response for %s@%p: SRV %s[%d] %s/%d (%d/%d)", query->name, query, result->dnssrv_qname, i, result->dnssrv_srv[i].name, result->dnssrv_srv[i].port, result->dnssrv_srv[i].priority, result->dnssrv_srv[i].weight); _dns_add_host(query, result->dnssrv_srv[i].name, result->dnssrv_srv[i].port, result->dnssrv_srv[i].priority, result->dnssrv_srv[i].weight, result->dnssrv_ttl); } } free(result); } /* check next SRV service name */ query->srv_i++; if (query->srv_i < query->s2s->lookup_nsrv) { log_debug(ZONE, "dns request for %s@%p: SRV %s", query->name, query, query->s2s->lookup_srv[query->srv_i]); query->query = dns_submit_srv(NULL, query->name, query->s2s->lookup_srv[query->srv_i], "tcp", DNS_NOSRCH, _dns_result_srv, query); /* if submit failed, call ourselves with a NULL result */ if (query->query == NULL) _dns_result_srv(ctx, NULL, query); } else { /* no more SRV records to check, resolve hosts */ if (xhash_count(query->hosts) > 0) { _dns_result_a(NULL, NULL, query); /* no SRV records returned, resolve hostname */ } else { query->cur_host = strdup(query->name); query->cur_port = 5269; query->cur_prio = 0; query->cur_weight = 0; query->cur_expiry = 0; if (query->s2s->resolve_aaaa) { log_debug(ZONE, "dns request for %s@%p: AAAA %s", query->name, query, query->name); query->query = dns_submit_a6(NULL, query->name, DNS_NOSRCH, _dns_result_aaaa, query); /* if submit failed, call ourselves with a NULL result */ if (query->query == NULL) _dns_result_aaaa(ctx, NULL, query); } else { log_debug(ZONE, "dns request for %s@%p: A %s", query->name, query, query->name); query->query = dns_submit_a4(NULL, query->name, DNS_NOSRCH, _dns_result_a, query); /* if submit failed, call ourselves with a NULL result */ if (query->query == NULL) _dns_result_a(ctx, NULL, query); } } } } static void _dns_result_aaaa(struct dns_ctx *ctx, struct dns_rr_a6 *result, void *data) { dnsquery_t query = data; char ip[INET6_ADDRSTRLEN]; int i; assert(query != NULL); query->query = NULL; if (ctx != NULL && result == NULL) { log_debug(ZONE, "dns failure for %s@%p: AAAA %s (%d)", query->name, query, query->cur_host, dns_status(ctx)); } else if (result != NULL) { log_debug(ZONE, "dns response for %s@%p: AAAA %s %d (%d)", query->name, query, result->dnsa6_qname, result->dnsa6_nrr, result->dnsa6_ttl); if (query->cur_expiry > 0 && result->dnsa6_ttl > query->cur_expiry) result->dnsa6_ttl = query->cur_expiry; for (i = 0; i < result->dnsa6_nrr; i++) { if (inet_ntop(AF_INET6, &result->dnsa6_addr[i], ip, INET6_ADDRSTRLEN) != NULL) { log_debug(ZONE, "dns response for %s@%p: AAAA %s[%d] %s/%d", query->name, query, result->dnsa6_qname, i, ip, query->cur_port); _dns_add_result(query, ip, query->cur_port, query->cur_prio, query->cur_weight, result->dnsa6_ttl); } } } if (query->cur_host != NULL) { /* do ipv4 resolution too */ log_debug(ZONE, "dns request for %s@%p: A %s", query->name, query, query->cur_host); query->query = dns_submit_a4(NULL, query->cur_host, DNS_NOSRCH, _dns_result_a, query); /* if submit failed, call ourselves with a NULL result */ if (query->query == NULL) _dns_result_a(ctx, NULL, query); } else { /* uh-oh */ log_debug(ZONE, "dns result for %s@%p: AAAA host vanished...", query->name, query); _dns_result_a(NULL, NULL, query); } free(result); } /* try /etc/hosts if the A process did not return any results */ static int _etc_hosts_lookup(const char *cszName, char *szIP, const int ciMaxIPLen) { #define EHL_LINE_LEN 260 int iSuccess = 0; size_t iLen; char szLine[EHL_LINE_LEN + 1]; /* one extra for the space character (*) */ char *pcStart, *pcEnd; FILE *fHosts; do { /* initialization */ fHosts = NULL; /* sanity checks */ if ((cszName == NULL) || (szIP == NULL) || (ciMaxIPLen <= 0)) break; szIP[0] = 0; /* open the hosts file */ #ifdef _WIN32 pcStart = getenv("WINDIR"); if (pcStart != NULL) { sprintf(szLine, "%s\\system32\\drivers\\etc\\hosts", pcStart); } else { strcpy(szLine, "C:\\WINDOWS\\system32\\drivers\\etc\\hosts"); } #else strcpy(szLine, "/etc/hosts"); #endif fHosts = fopen(szLine, "r"); if (fHosts == NULL) break; /* read line by line ... */ while (fgets(szLine, EHL_LINE_LEN, fHosts) != NULL) { /* remove comments */ pcStart = strchr (szLine, '#'); if (pcStart != NULL) *pcStart = 0; strcat(szLine, " "); /* append a space character for easier parsing (*) */ /* first to appear: IP address */ iLen = strspn(szLine, "1234567890."); if ((iLen < 7) || (iLen > 15)) /* superficial test for anything between x.x.x.x and xxx.xxx.xxx.xxx */ continue; pcEnd = szLine + iLen; *pcEnd = 0; pcEnd++; /* not beyond the end of the line yet (*) */ /* check strings separated by blanks, tabs or newlines */ pcStart = pcEnd + strspn(pcEnd, " \t\n"); while (*pcStart != 0) { pcEnd = pcStart + strcspn(pcStart, " \t\n"); *pcEnd = 0; pcEnd++; /* not beyond the end of the line yet (*) */ if (strcasecmp(pcStart, cszName) == 0) { strncpy(szIP, szLine, ciMaxIPLen - 1); szIP[ciMaxIPLen - 1] = '\0'; iSuccess = 1; break; } pcStart = pcEnd + strspn(pcEnd, " \t\n"); } if (iSuccess) break; } } while (0); if (fHosts != NULL) fclose(fHosts); return (iSuccess); } /* this function is called with a NULL ctx to start the A/AAAA process */ static void _dns_result_a(struct dns_ctx *ctx, struct dns_rr_a4 *result, void *data) { dnsquery_t query = data; assert(query != NULL); query->query = NULL; if (ctx != NULL && result == NULL) { #define DRA_IP_LEN 16 char szIP[DRA_IP_LEN]; if (_etc_hosts_lookup (query->name, szIP, DRA_IP_LEN)) { log_debug(ZONE, "/etc/lookup for %s@%p: %s (%d)", query->name, query, szIP, query->s2s->etc_hosts_ttl); _dns_add_result (query, szIP, query->cur_port, query->cur_prio, query->cur_weight, query->s2s->etc_hosts_ttl); } else { log_debug(ZONE, "dns failure for %s@%p: A %s (%d)", query->name, query, query->cur_host, dns_status(ctx)); } } else if (result != NULL) { char ip[INET_ADDRSTRLEN]; int i; log_debug(ZONE, "dns response for %s@%p: A %s %d (%d)", query->name, query, result->dnsa4_qname, result->dnsa4_nrr, result->dnsa4_ttl); if (query->cur_expiry > 0 && result->dnsa4_ttl > query->cur_expiry) result->dnsa4_ttl = query->cur_expiry; for (i = 0; i < result->dnsa4_nrr; i++) { if (inet_ntop(AF_INET, &result->dnsa4_addr[i], ip, INET_ADDRSTRLEN) != NULL) { log_debug(ZONE, "dns response for %s@%p: A %s[%d] %s/%d", query->name, query, result->dnsa4_qname, i, ip, query->cur_port); _dns_add_result(query, ip, query->cur_port, query->cur_prio, query->cur_weight, result->dnsa4_ttl); } } free(result); } /* resolve the next host in the list */ if (xhash_iter_first(query->hosts)) { char *ipport, *c, *tmp; int ipport_len, ip_len, port_len; dnsres_t res; union xhashv xhv; xhv.dnsres_val = &res; /* get the first entry */ xhash_iter_get(query->hosts, (const char **) &ipport, &ipport_len, xhv.val); /* remove the host from the list */ xhash_iter_zap(query->hosts); c = memchr(ipport, '/', ipport_len); ip_len = c - ipport; c++; port_len = ipport_len - (c - ipport); /* resolve hostname */ free(query->cur_host); query->cur_host = strndup(ipport, ip_len); tmp = strndup(c, port_len); query->cur_port = atoi(tmp); free(tmp); query->cur_prio = res->prio; query->cur_weight = res->weight; query->cur_expiry = res->expiry; log_debug(ZONE, "dns ttl for %s@%p limited to %d", query->name, query, query->cur_expiry); if (query->s2s->resolve_aaaa) { log_debug(ZONE, "dns request for %s@%p: AAAA %s", query->name, query, query->cur_host); query->query = dns_submit_a6(NULL, query->cur_host, DNS_NOSRCH, _dns_result_aaaa, query); /* if submit failed, call ourselves with a NULL result */ if (query->query == NULL) _dns_result_aaaa(ctx, NULL, query); } else { log_debug(ZONE, "dns request for %s@%p: A %s", query->name, query, query->cur_host); query->query = dns_submit_a4(NULL, query->cur_host, DNS_NOSRCH, _dns_result_a, query); /* if submit failed, call ourselves with a NULL result */ if (query->query == NULL) _dns_result_a(ctx, NULL, query); } /* finished */ } else { time_t now = time(NULL); char *domain; free(query->cur_host); query->cur_host = NULL; log_debug(ZONE, "dns requests for %s@%p complete: %d (%d)", query->name, query, xhash_count(query->results), query->expiry); /* update query TTL */ if (query->expiry > query->s2s->dns_max_ttl) query->expiry = query->s2s->dns_max_ttl; if (query->expiry < query->s2s->dns_min_ttl) query->expiry = query->s2s->dns_min_ttl; query->expiry += now; /* update result TTLs - the query expiry MUST NOT be longer than all result expiries */ if (xhash_iter_first(query->results)) { union xhashv xhv; dnsres_t res; xhv.dnsres_val = &res; do { xhash_iter_get(query->results, NULL, NULL, xhv.val); if (res->expiry > query->s2s->dns_max_ttl) res->expiry = query->s2s->dns_max_ttl; if (res->expiry < query->s2s->dns_min_ttl) res->expiry = query->s2s->dns_min_ttl; res->expiry += now; } while(xhash_iter_next(query->results)); } xhash_free(query->hosts); query->hosts = NULL; if (idna_to_unicode_8z8z(query->name, &domain, 0) != IDNA_SUCCESS) { log_write(query->s2s->log, LOG_ERR, "idna dns decode for %s failed", query->name); /* fake empty results to shortcut resolution failure */ xhash_free(query->results); query->results = xhash_new(71); query->expiry = time(NULL) + 99999999; domain = strdup(query->name); } out_resolve(query->s2s, domain, query->results, query->expiry); free(domain); free(query->name); free(query); } } void dns_resolve_domain(s2s_t s2s, dnscache_t dns) { dnsquery_t query = (dnsquery_t) calloc(1, sizeof(struct dnsquery_st)); query->s2s = s2s; query->results = xhash_new(71); if (idna_to_ascii_8z(dns->name, &query->name, 0) != IDNA_SUCCESS) { log_write(s2s->log, LOG_ERR, "idna dns encode for %s failed", dns->name); /* shortcut resolution failure */ query->expiry = time(NULL) + 99999999; out_resolve(query->s2s, dns->name, query->results, query->expiry); return; } query->hosts = xhash_new(71); query->srv_i = -1; query->expiry = 0; query->cur_host = NULL; query->cur_port = 0; query->cur_expiry = 0; query->query = NULL; dns->query = query; log_debug(ZONE, "dns resolve for %s@%p started", query->name, query); /* - resolve all SRV records to host/port * - if no results, include domain/5269 * - resolve all host/port combinations * - return result */ _dns_result_srv(NULL, NULL, query); } /** responses from the resolver */ void out_resolve(s2s_t s2s, char *domain, xht results, time_t expiry) { dnscache_t dns; /* no results, resolve failed */ if(xhash_count(results) == 0) { dns = xhash_get(s2s->dnscache, domain); if (dns != NULL) { /* store negative DNS cache */ xhash_free(dns->results); dns->query = NULL; dns->results = NULL; dns->expiry = expiry; dns->pending = 0; } log_write(s2s->log, LOG_NOTICE, "dns lookup for %s failed", domain); /* bounce queue */ out_bounce_domain_queues(s2s, domain, stanza_err_REMOTE_SERVER_NOT_FOUND); xhash_free(results); return; } log_write(s2s->log, LOG_NOTICE, "dns lookup for %s returned %d result%s (ttl %d)", domain, xhash_count(results), xhash_count(results)!=1?"s":"", expiry - time(NULL)); /* get the cache entry */ dns = xhash_get(s2s->dnscache, domain); if(dns == NULL) { /* retry using punycode */ char *punydomain; if (idna_to_ascii_8z(domain, &punydomain, 0) == IDNA_SUCCESS) { dns = xhash_get(s2s->dnscache, punydomain); free(punydomain); } } if(dns == NULL) { log_write(s2s->log, LOG_ERR, "weird, never requested %s resolution", domain); return; } /* fill it out */ xhash_free(dns->results); dns->query = NULL; dns->results = results; dns->expiry = expiry; dns->pending = 0; out_flush_domain_queues(s2s, domain); /* delete the cache entry if caching is disabled */ if (!s2s->dns_cache_enabled && !dns->pending) { xhash_free(dns->results); xhash_zap(s2s->dnscache, domain); free(dns); } } /** mio callback for outgoing conns */ static int _out_mio_callback(mio_t m, mio_action_t a, mio_fd_t fd, void *data, void *arg) { conn_t out = (conn_t) arg; char ipport[INET6_ADDRSTRLEN + 17]; int nbytes; switch(a) { case action_READ: log_debug(ZONE, "read action on fd %d", fd->fd); /* they did something */ out->last_activity = time(NULL); ioctl(fd->fd, FIONREAD, &nbytes); if(nbytes == 0) { sx_kill(out->s); return 0; } return sx_can_read(out->s); case action_WRITE: log_debug(ZONE, "write action on fd %d", fd->fd); /* update activity timestamp */ out->last_activity = time(NULL); return sx_can_write(out->s); case action_CLOSE: log_debug(ZONE, "close action on fd %d", fd->fd); jqueue_push(out->s2s->dead, (void *) out->s, 0); log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] disconnect, packets: %i", fd->fd, out->ip, out->port, out->packet_count); if (out->s2s->out_reuse) { /* generate the ip/port pair */ snprintf(ipport, INET6_ADDRSTRLEN + 16, "%s/%d", out->ip, out->port); xhash_zap(out->s2s->out_host, ipport); } if (xhash_iter_first(out->routes)) { char *rkey; int rkeylen; char *c; int c_len; /* remove all the out_dest entries */ do { xhash_iter_get(out->routes, (const char **) &rkey, &rkeylen, NULL); c = memchr(rkey, '/', rkeylen); c++; c_len = rkeylen - (c - rkey); log_debug(ZONE, "route '%.*s'", rkeylen, rkey); if (xhash_getx(out->s2s->out_dest, c, c_len) != NULL) { log_debug(ZONE, "removing dest entry for '%.*s'", c_len, c); xhash_zapx(out->s2s->out_dest, c, c_len); } } while(xhash_iter_next(out->routes)); } if (xhash_iter_first(out->routes)) { char *rkey; int rkeylen; jqueue_t q; int npkt; /* retry all the routes */ do { xhash_iter_get(out->routes, (const char **) &rkey, &rkeylen, NULL); q = xhash_getx(out->s2s->outq, rkey, rkeylen); if (out->s2s->retry_limit > 0 && q != NULL && jqueue_age(q) > out->s2s->retry_limit) { log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] retry limit reached for '%.*s' queue", fd->fd, out->ip, out->port, rkeylen, rkey); q = NULL; } if (q != NULL && (npkt = jqueue_size(q)) > 0 && xhash_get(out->states, rkey) != (void*) conn_INPROGRESS) { conn_t retry; log_debug(ZONE, "retrying connection for '%.*s' queue", rkeylen, rkey); if (!out_route(out->s2s, rkey, rkeylen, &retry, 0)) { log_debug(ZONE, "retry successful"); if (retry != NULL) { /* flush queue */ out_flush_route_queue(out->s2s, rkey, rkeylen); } } else { log_debug(ZONE, "retry failed"); /* bounce queue */ out_bounce_route_queue(out->s2s, rkey, rkeylen, stanza_err_SERVICE_UNAVAILABLE); _out_dns_mark_bad(out); } } else { /* bounce queue */ out_bounce_route_queue(out->s2s, rkey, rkeylen, stanza_err_REMOTE_SERVER_TIMEOUT); _out_dns_mark_bad(out); } } while(xhash_iter_next(out->routes)); } jqueue_push(out->s2s->dead_conn, (void *) out, 0); case action_ACCEPT: break; } return 0; } void send_dialbacks(conn_t out) { char *rkey; int rkeylen; if (out->s2s->dns_bad_timeout > 0) { dnsres_t bad = xhash_get(out->s2s->dns_bad, out->key); if (bad != NULL) { log_debug(ZONE, "removing bad host entry for '%s'", out->key); xhash_zap(out->s2s->dns_bad, out->key); free(bad->key); free(bad); } } if (xhash_iter_first(out->routes)) { log_debug(ZONE, "sending dialback packets for %s", out->key); do { xhash_iter_get(out->routes, (const char **) &rkey, &rkeylen, NULL); _out_dialback(out, rkey, rkeylen); } while(xhash_iter_next(out->routes)); } return; } static int _out_sx_callback(sx_t s, sx_event_t e, void *data, void *arg) { conn_t out = (conn_t) arg; sx_buf_t buf = (sx_buf_t) data; int len, ns, elem, starttls = 0; sx_error_t *sxe; nad_t nad; switch(e) { case event_WANT_READ: log_debug(ZONE, "want read"); mio_read(out->s2s->mio, out->fd); break; case event_WANT_WRITE: log_debug(ZONE, "want write"); mio_write(out->s2s->mio, out->fd); break; case event_READ: log_debug(ZONE, "reading from %d", out->fd->fd); /* do the read */ len = recv(out->fd->fd, buf->data, buf->len, 0); if(len < 0) { if(MIO_WOULDBLOCK) { buf->len = 0; return 0; } log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] read error: %s (%d)", out->fd->fd, out->ip, out->port, MIO_STRERROR(MIO_ERROR), MIO_ERROR); if (!out->online) { _out_dns_mark_bad(out); } sx_kill(s); return -1; } else if(len == 0) { /* they went away */ sx_kill(s); return -1; } log_debug(ZONE, "read %d bytes", len); buf->len = len; return len; case event_WRITE: log_debug(ZONE, "writing to %d", out->fd->fd); len = send(out->fd->fd, buf->data, buf->len, 0); if(len >= 0) { log_debug(ZONE, "%d bytes written", len); return len; } if(MIO_WOULDBLOCK) return 0; log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] write error: %s (%d)", out->fd->fd, out->ip, out->port, MIO_STRERROR(MIO_ERROR), MIO_ERROR); if (!out->online) { _out_dns_mark_bad(out); } sx_kill(s); return -1; case event_ERROR: sxe = (sx_error_t *) data; log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] error: %s (%s)", out->fd->fd, out->ip, out->port, sxe->generic, sxe->specific); /* mark as bad if we did not manage to connect or there is unrecoverable stream error */ if (!out->online || (sxe->code == SX_ERR_STREAM && (strstr(sxe->specific, "host-gone") || /* it's not there now */ strstr(sxe->specific, "host-unknown") || /* they do not service the host */ strstr(sxe->specific, "not-authorized") || /* they do not want us there */ strstr(sxe->specific, "see-other-host") || /* we do not support redirections yet */ strstr(sxe->specific, "system-shutdown") || /* they are going down */ strstr(sxe->specific, "policy-violation") || /* they do not want us there */ strstr(sxe->specific, "remote-connection-failed") || /* the required remote entity is gone */ strstr(sxe->specific, "unsupported-encoding") || /* they do not like our encoding */ strstr(sxe->specific, "undefined-condition") || /* something bad happend */ strstr(sxe->specific, "internal-server-error") || /* that server is broken */ strstr(sxe->specific, "unsupported-version") /* they do not support our stream version */ ))) { _out_dns_mark_bad(out); } sx_kill(s); return -1; case event_OPEN: log_debug(ZONE, "OPEN event for %s", out->key); break; case event_STREAM: /* check stream version - NULl = pre-xmpp (some jabber1 servers) */ log_debug(ZONE, "STREAM event for %s stream version is %s", out->key, out->s->res_version); /* first time, bring them online */ if(!out->online) { log_debug(ZONE, "outgoing conn to %s is online", out->key); /* if no stream version from either side, kick off dialback for each route, */ /* otherwise wait for stream features */ if (((out->s->res_version==NULL) || (out->s2s->sx_ssl == NULL)) && out->s2s->require_tls == 0) { log_debug(ZONE, "no stream version, sending dialbacks for %s immediately", out->key); out->online = 1; send_dialbacks(out); } else log_debug(ZONE, "outgoing conn to %s - waiting for STREAM features", out->key); } break; case event_PACKET: /* we're counting packets */ out->packet_count++; out->s2s->packet_count++; nad = (nad_t) data; /* watch for the features packet - STARTTLS and/or SASL*/ if ((out->s->res_version!=NULL) && NAD_NURI_L(nad, NAD_ENS(nad, 0)) == strlen(uri_STREAMS) && strncmp(uri_STREAMS, NAD_NURI(nad, NAD_ENS(nad, 0)), strlen(uri_STREAMS)) == 0 && NAD_ENAME_L(nad, 0) == 8 && strncmp("features", NAD_ENAME(nad, 0), 8) == 0) { log_debug(ZONE, "got the stream features packet"); #ifdef HAVE_SSL /* starttls if we can */ if(out->s2s->sx_ssl != NULL && s->ssf == 0) { ns = nad_find_scoped_namespace(nad, uri_TLS, NULL); if(ns >= 0) { elem = nad_find_elem(nad, 0, ns, "starttls", 1); if(elem >= 0) { log_debug(ZONE, "got STARTTLS in stream features"); if(sx_ssl_client_starttls(out->s2s->sx_ssl, s, out->s2s->local_pemfile) == 0) { starttls = 1; nad_free(nad); return 0; } log_write(out->s2s->log, LOG_ERR, "unable to establish encrypted session with peer"); } } } /* If we're not establishing a starttls connection, send dialbacks */ if (!starttls) { if (out->s2s->require_tls == 0 || s->ssf > 0) { log_debug(ZONE, "No STARTTLS, sending dialbacks for %s", out->key); out->online = 1; send_dialbacks(out); } else { log_debug(ZONE, "No STARTTLS, dialbacks disabled for non-TLS connections, cannot complete negotiation"); } } #else if (out->s2s->require_tls == 0) { out->online = 1; send_dialbacks(out); } #endif } /* we only accept dialback packets */ if(NAD_ENS(nad, 0) < 0 || NAD_NURI_L(nad, NAD_ENS(nad, 0)) != uri_DIALBACK_L || strncmp(uri_DIALBACK, NAD_NURI(nad, NAD_ENS(nad, 0)), uri_DIALBACK_L) != 0) { log_debug(ZONE, "got a non-dialback packet on an outgoing conn, dropping it"); nad_free(nad); return 0; } /* and then only result and verify */ if(NAD_ENAME_L(nad, 0) == 6) { if(strncmp("result", NAD_ENAME(nad, 0), 6) == 0) { _out_result(out, nad); return 0; } if(strncmp("verify", NAD_ENAME(nad, 0), 6) == 0) { _out_verify(out, nad); return 0; } } log_debug(ZONE, "unknown dialback packet, dropping it"); nad_free(nad); return 0; case event_CLOSED: if (out->fd != NULL) { mio_close(out->s2s->mio, out->fd); out->fd = NULL; } return -1; } return 0; } /** process incoming auth responses */ static void _out_result(conn_t out, nad_t nad) { int attr; jid_t from, to; char *rkey; int rkeylen; attr = nad_find_attr(nad, 0, -1, "from", NULL); if(attr < 0 || (from = jid_new(NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr))) == NULL) { log_debug(ZONE, "missing or invalid from on db result packet"); nad_free(nad); return; } attr = nad_find_attr(nad, 0, -1, "to", NULL); if(attr < 0 || (to = jid_new(NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr))) == NULL) { log_debug(ZONE, "missing or invalid to on db result packet"); jid_free(from); nad_free(nad); return; } rkey = s2s_route_key(NULL, to->domain, from->domain); rkeylen = strlen(rkey); /* key is valid */ if(nad_find_attr(nad, 0, -1, "type", "valid") >= 0 && xhash_get(out->states, rkey) == (void*) conn_INPROGRESS) { log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] outgoing route '%s' is now valid%s%s", out->fd->fd, out->ip, out->port, rkey, (out->s->flags & SX_SSL_WRAPPER) ? ", TLS negotiated" : "", out->s->compressed ? ", ZLIB compression enabled" : ""); xhash_put(out->states, pstrdup(xhash_pool(out->states), rkey), (void *) conn_VALID); /* !!! small leak here */ log_debug(ZONE, "%s valid, flushing queue", rkey); /* flush the queue */ out_flush_route_queue(out->s2s, rkey, rkeylen); free(rkey); jid_free(from); jid_free(to); nad_free(nad); return; } /* invalid */ log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] outgoing route '%s' is now invalid", out->fd->fd, out->ip, out->port, rkey); /* close connection */ log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] closing connection", out->fd->fd, out->ip, out->port); /* report stream error */ sx_error(out->s, stream_err_INVALID_ID, "dialback negotiation failed"); /* close the stream */ sx_close(out->s); /* bounce queue */ out_bounce_route_queue(out->s2s, rkey, rkeylen, stanza_err_SERVICE_UNAVAILABLE); free(rkey); jid_free(from); jid_free(to); nad_free(nad); } /** incoming stream authenticated */ static void _out_verify(conn_t out, nad_t nad) { int attr, ns; jid_t from, to; conn_t in; char *rkey; int valid; attr = nad_find_attr(nad, 0, -1, "from", NULL); if(attr < 0 || (from = jid_new(NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr))) == NULL) { log_debug(ZONE, "missing or invalid from on db verify packet"); nad_free(nad); return; } attr = nad_find_attr(nad, 0, -1, "to", NULL); if(attr < 0 || (to = jid_new(NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr))) == NULL) { log_debug(ZONE, "missing or invalid to on db verify packet"); jid_free(from); nad_free(nad); return; } attr = nad_find_attr(nad, 0, -1, "id", NULL); if(attr < 0) { log_debug(ZONE, "missing id on db verify packet"); jid_free(from); jid_free(to); nad_free(nad); return; } /* get the incoming conn */ in = xhash_getx(out->s2s->in, NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr)); if(in == NULL) { log_debug(ZONE, "got a verify for incoming conn %.*s, but it doesn't exist, dropping the packet", NAD_AVAL_L(nad, attr), NAD_AVAL(nad, attr)); jid_free(from); jid_free(to); nad_free(nad); return; } rkey = s2s_route_key(NULL, to->domain, from->domain); attr = nad_find_attr(nad, 0, -1, "type", "valid"); if(attr >= 0 && xhash_get(in->states, rkey) == (void*) conn_INPROGRESS) { xhash_put(in->states, pstrdup(xhash_pool(in->states), rkey), (void *) conn_VALID); log_write(in->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] incoming route '%s' is now valid%s%s", in->fd->fd, in->ip, in->port, rkey, (in->s->flags & SX_SSL_WRAPPER) ? ", TLS negotiated" : "", in->s->compressed ? ", ZLIB compression enabled" : ""); valid = 1; } else { log_write(in->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] incoming route '%s' is now invalid", in->fd->fd, in->ip, in->port, rkey); valid = 0; } free(rkey); nad_free(nad); /* decrement outstanding verify counter */ --out->verify; /* let them know what happened */ nad = nad_new(); ns = nad_add_namespace(nad, uri_DIALBACK, "db"); nad_append_elem(nad, ns, "result", 0); nad_append_attr(nad, -1, "to", from->domain); nad_append_attr(nad, -1, "from", to->domain); nad_append_attr(nad, -1, "type", valid ? "valid" : "invalid"); /* off it goes */ sx_nad_write(in->s, nad); /* if invalid, close the stream */ if (!valid) { /* generate stream error */ sx_error(in->s, stream_err_INVALID_ID, "dialback negotiation failed"); /* close the incoming stream */ sx_close(in->s); } jid_free(from); jid_free(to); } /* bounce all packets in the queues for domain */ int out_bounce_domain_queues(s2s_t s2s, const char *domain, int err) { char *rkey; int rkeylen; int pktcount = 0; if (xhash_iter_first(s2s->outq)) { do { xhash_iter_get(s2s->outq, (const char **) &rkey, &rkeylen, NULL); if(s2s_route_key_match(NULL, (char *) domain, rkey, rkeylen)) pktcount += out_bounce_route_queue(s2s, rkey, rkeylen, err); } while(xhash_iter_next(s2s->outq)); } return pktcount; } /* bounce all packets in the queue for route */ int out_bounce_route_queue(s2s_t s2s, char *rkey, int rkeylen, int err) { jqueue_t q; pkt_t pkt; int pktcount = 0; q = xhash_getx(s2s->outq, rkey, rkeylen); if(q == NULL) return 0; while((pkt = jqueue_pull(q)) != NULL) { /* only packets with content, in namespace jabber:client and not already errors */ if(pkt->nad->ecur > 1 && NAD_NURI_L(pkt->nad, NAD_ENS(pkt->nad, 1)) == strlen(uri_CLIENT) && strncmp(NAD_NURI(pkt->nad, NAD_ENS(pkt->nad, 1)), uri_CLIENT, strlen(uri_CLIENT)) == 0 && nad_find_attr(pkt->nad, 0, -1, "error", NULL) < 0) { sx_nad_write(s2s->router, stanza_tofrom(stanza_tofrom(stanza_error(pkt->nad, 1, err), 1), 0)); pktcount++; } else nad_free(pkt->nad); jid_free(pkt->to); jid_free(pkt->from); free(pkt); } /* delete queue and remove domain from queue hash */ log_debug(ZONE, "deleting out packet queue for %.*s", rkeylen, rkey); rkey = q->key; jqueue_free(q); xhash_zap(s2s->outq, rkey); free(rkey); return pktcount; } int out_bounce_conn_queues(conn_t out, int err) { char *rkey; int rkeylen; int pktcount = 0; /* bounce queues for all domains handled by this connection - iterate through routes */ if (xhash_iter_first(out->routes)) { do { xhash_iter_get(out->routes, (const char **) &rkey, &rkeylen, NULL); pktcount += out_bounce_route_queue(out->s2s, rkey, rkeylen, err); } while(xhash_iter_next(out->routes)); } return pktcount; } void out_flush_domain_queues(s2s_t s2s, const char *domain) { char *rkey; int rkeylen; char *c; int c_len; if (xhash_iter_first(s2s->outq)) { do { xhash_iter_get(s2s->outq, (const char **) &rkey, &rkeylen, NULL); c = memchr(rkey, '/', rkeylen); c++; c_len = rkeylen - (c - rkey); if (strncmp(domain, c, c_len) == 0) out_flush_route_queue(s2s, rkey, rkeylen); } while(xhash_iter_next(s2s->outq)); } } void out_flush_route_queue(s2s_t s2s, char *rkey, int rkeylen) { jqueue_t q; pkt_t pkt; int npkt, i, ret; q = xhash_getx(s2s->outq, rkey, rkeylen); if(q == NULL) return; npkt = jqueue_size(q); log_debug(ZONE, "flushing %d packets for '%.*s' to out_packet", npkt, rkeylen, rkey); for(i = 0; i < npkt; i++) { pkt = jqueue_pull(q); if(pkt) { ret = out_packet(s2s, pkt); if (ret) { /* uh-oh. the queue was deleted... q and pkt have been freed if q->key == rkey, rkey has also been freed */ return; } } } /* delete queue for route and remove route from queue hash */ if (jqueue_size(q) == 0) { log_debug(ZONE, "deleting out packet queue for '%.*s'", rkeylen, rkey); rkey = q->key; jqueue_free(q); xhash_zap(s2s->outq, rkey); free(rkey); } else { log_debug(ZONE, "emptied queue gained more packets..."); } }
static void _out_verify(conn_t out, nad_t nad) { int attr, ns; jid_t from, to; conn_t in; char *rkey; int valid; attr = nad_find_attr(nad, 0, -1, "from", NULL); if(attr < 0 || (from = jid_new(NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr))) == NULL) { log_debug(ZONE, "missing or invalid from on db verify packet"); nad_free(nad); return; } attr = nad_find_attr(nad, 0, -1, "to", NULL); if(attr < 0 || (to = jid_new(NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr))) == NULL) { log_debug(ZONE, "missing or invalid to on db verify packet"); jid_free(from); nad_free(nad); return; } attr = nad_find_attr(nad, 0, -1, "id", NULL); if(attr < 0) { log_debug(ZONE, "missing id on db verify packet"); jid_free(from); jid_free(to); nad_free(nad); return; } /* get the incoming conn */ in = xhash_getx(out->s2s->in, NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr)); if(in == NULL) { log_debug(ZONE, "got a verify for incoming conn %.*s, but it doesn't exist, dropping the packet", NAD_AVAL_L(nad, attr), NAD_AVAL(nad, attr)); jid_free(from); jid_free(to); nad_free(nad); return; } rkey = s2s_route_key(NULL, to->domain, from->domain); attr = nad_find_attr(nad, 0, -1, "type", "valid"); if(attr >= 0) { xhash_put(in->states, pstrdup(xhash_pool(in->states), rkey), (void *) conn_VALID); log_write(in->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] incoming route '%s' is now valid%s%s", in->fd->fd, in->ip, in->port, rkey, (in->s->flags & SX_SSL_WRAPPER) ? ", TLS negotiated" : "", in->s->compressed ? ", ZLIB compression enabled" : ""); valid = 1; } else { log_write(in->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] incoming route '%s' is now invalid", in->fd->fd, in->ip, in->port, rkey); valid = 0; } free(rkey); nad_free(nad); /* decrement outstanding verify counter */ --out->verify; /* let them know what happened */ nad = nad_new(); ns = nad_add_namespace(nad, uri_DIALBACK, "db"); nad_append_elem(nad, ns, "result", 0); nad_append_attr(nad, -1, "to", from->domain); nad_append_attr(nad, -1, "from", to->domain); nad_append_attr(nad, -1, "type", valid ? "valid" : "invalid"); /* off it goes */ sx_nad_write(in->s, nad); /* if invalid, close the stream */ if (!valid) { /* generate stream error */ sx_error(in->s, stream_err_INVALID_ID, "dialback negotiation failed"); /* close the incoming stream */ sx_close(in->s); } jid_free(from); jid_free(to); }
static void _out_verify(conn_t out, nad_t nad) { int attr, ns; jid_t from, to; conn_t in; char *rkey; int valid; attr = nad_find_attr(nad, 0, -1, "from", NULL); if(attr < 0 || (from = jid_new(NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr))) == NULL) { log_debug(ZONE, "missing or invalid from on db verify packet"); nad_free(nad); return; } attr = nad_find_attr(nad, 0, -1, "to", NULL); if(attr < 0 || (to = jid_new(NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr))) == NULL) { log_debug(ZONE, "missing or invalid to on db verify packet"); jid_free(from); nad_free(nad); return; } attr = nad_find_attr(nad, 0, -1, "id", NULL); if(attr < 0) { log_debug(ZONE, "missing id on db verify packet"); jid_free(from); jid_free(to); nad_free(nad); return; } /* get the incoming conn */ in = xhash_getx(out->s2s->in, NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr)); if(in == NULL) { log_debug(ZONE, "got a verify for incoming conn %.*s, but it doesn't exist, dropping the packet", NAD_AVAL_L(nad, attr), NAD_AVAL(nad, attr)); jid_free(from); jid_free(to); nad_free(nad); return; } rkey = s2s_route_key(NULL, to->domain, from->domain); attr = nad_find_attr(nad, 0, -1, "type", "valid"); if(attr >= 0 && xhash_get(in->states, rkey) == (void*) conn_INPROGRESS) { xhash_put(in->states, pstrdup(xhash_pool(in->states), rkey), (void *) conn_VALID); log_write(in->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] incoming route '%s' is now valid%s%s", in->fd->fd, in->ip, in->port, rkey, (in->s->flags & SX_SSL_WRAPPER) ? ", TLS negotiated" : "", in->s->compressed ? ", ZLIB compression enabled" : ""); valid = 1; } else { log_write(in->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] incoming route '%s' is now invalid", in->fd->fd, in->ip, in->port, rkey); valid = 0; } free(rkey); nad_free(nad); /* decrement outstanding verify counter */ --out->verify; /* let them know what happened */ nad = nad_new(); ns = nad_add_namespace(nad, uri_DIALBACK, "db"); nad_append_elem(nad, ns, "result", 0); nad_append_attr(nad, -1, "to", from->domain); nad_append_attr(nad, -1, "from", to->domain); nad_append_attr(nad, -1, "type", valid ? "valid" : "invalid"); /* off it goes */ sx_nad_write(in->s, nad); /* if invalid, close the stream */ if (!valid) { /* generate stream error */ sx_error(in->s, stream_err_INVALID_ID, "dialback negotiation failed"); /* close the incoming stream */ sx_close(in->s); } jid_free(from); jid_free(to); }
{'added': [(1664, ' if(nad_find_attr(nad, 0, -1, "type", "valid") >= 0 && xhash_get(out->states, rkey) == (void*) conn_INPROGRESS) {'), (1752, ' if(attr >= 0 && xhash_get(in->states, rkey) == (void*) conn_INPROGRESS) {')], 'deleted': [(1664, ' if(nad_find_attr(nad, 0, -1, "type", "valid") >= 0) {'), (1752, ' if(attr >= 0) {')]}
2
2
1,278
10,866
62
564
12
https://github.com/Jabberd2/jabberd2
CVE-2012-3525
CWE-20
603
dbdimp.c
C
dbd_st_prepare
/* * DBD::mysql - DBI driver for the mysql database * * Copyright (c) 2004-2014 Patrick Galbraith * Copyright (c) 2013-2014 Michiel Beijen * Copyright (c) 2004-2007 Alexey Stroganov * Copyright (c) 2003-2005 Rudolf Lippan * Copyright (c) 1997-2003 Jochen Wiedmann * * You may distribute this under the terms of either the GNU General Public * License or the Artistic License, as specified in the Perl README file. */ #ifdef WIN32 #include "windows.h" #include "winsock.h" #endif #include "dbdimp.h" #if defined(WIN32) && defined(WORD) #undef WORD typedef short WORD; #endif #ifdef WIN32 #define MIN min #else #ifndef MIN #define MIN(a, b) ((a) < (b) ? (a) : (b)) #endif #endif #if MYSQL_ASYNC # include <poll.h> # include <errno.h> # define ASYNC_CHECK_RETURN(h, value)\ if(imp_dbh->async_query_in_flight) {\ do_error(h, 2000, "Calling a synchronous function on an asynchronous handle", "HY000");\ return (value);\ } #else # define ASYNC_CHECK_RETURN(h, value) #endif static int parse_number(char *string, STRLEN len, char **end); DBISTATE_DECLARE; typedef struct sql_type_info_s { const char *type_name; int data_type; int column_size; const char *literal_prefix; const char *literal_suffix; const char *create_params; int nullable; int case_sensitive; int searchable; int unsigned_attribute; int fixed_prec_scale; int auto_unique_value; const char *local_type_name; int minimum_scale; int maximum_scale; int num_prec_radix; int sql_datatype; int sql_datetime_sub; int interval_precision; int native_type; int is_num; } sql_type_info_t; /* This function manually counts the number of placeholders in an SQL statement, used for emulated prepare statements < 4.1.3 */ static int count_params(imp_xxh_t *imp_xxh, pTHX_ char *statement, bool bind_comment_placeholders) { bool comment_end= false; char* ptr= statement; int num_params= 0; int comment_length= 0; char c; if (DBIc_DBISTATE(imp_xxh)->debug >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), ">count_params statement %s\n", statement); while ( (c = *ptr++) ) { switch (c) { /* so, this is a -- comment, so let's burn up characters */ case '-': { if (bind_comment_placeholders) { c = *ptr++; break; } else { comment_length= 1; /* let's see if the next one is a dash */ c = *ptr++; if (c == '-') { /* if two dashes, ignore everything until newline */ while ((c = *ptr)) { if (DBIc_DBISTATE(imp_xxh)->debug >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "%c\n", c); ptr++; comment_length++; if (c == '\n') { comment_end= true; break; } } /* if not comment_end, the comment never ended and we need to iterate back to the beginning of where we started and let the database handle whatever is in the statement */ if (! comment_end) ptr-= comment_length; } /* otherwise, only one dash/hyphen, backtrack by one */ else ptr--; break; } } /* c-type comments */ case '/': { if (bind_comment_placeholders) { c = *ptr++; break; } else { c = *ptr++; /* let's check if the next one is an asterisk */ if (c == '*') { comment_length= 0; comment_end= false; /* ignore everything until closing comment */ while ((c= *ptr)) { ptr++; comment_length++; if (c == '*') { c = *ptr++; /* alas, end of comment */ if (c == '/') { comment_end= true; break; } /* nope, just an asterisk, not so fast, not end of comment, go back one */ else ptr--; } } /* if the end of the comment was never found, we have to backtrack to wherever we first started skipping over the possible comment. This means we will pass the statement to the database to see its own fate and issue the error */ if (!comment_end) ptr -= comment_length; } else ptr--; break; } } case '`': case '"': case '\'': /* Skip string */ { char end_token = c; while ((c = *ptr) && c != end_token) { if (c == '\\') if (! *(++ptr)) continue; ++ptr; } if (c) ++ptr; break; } case '?': ++num_params; break; default: break; } } return num_params; } /* allocate memory in statement handle per number of placeholders */ static imp_sth_ph_t *alloc_param(int num_params) { imp_sth_ph_t *params; if (num_params) Newz(908, params, (unsigned int) num_params, imp_sth_ph_t); else params= NULL; return params; } #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION /* allocate memory in MYSQL_BIND bind structure per number of placeholders */ static MYSQL_BIND *alloc_bind(int num_params) { MYSQL_BIND *bind; if (num_params) Newz(908, bind, (unsigned int) num_params, MYSQL_BIND); else bind= NULL; return bind; } /* allocate memory in fbind imp_sth_phb_t structure per number of placeholders */ static imp_sth_phb_t *alloc_fbind(int num_params) { imp_sth_phb_t *fbind; if (num_params) Newz(908, fbind, (unsigned int) num_params, imp_sth_phb_t); else fbind= NULL; return fbind; } /* alloc memory for imp_sth_fbh_t fbuffer per number of fields */ static imp_sth_fbh_t *alloc_fbuffer(int num_fields) { imp_sth_fbh_t *fbh; if (num_fields) Newz(908, fbh, (unsigned int) num_fields, imp_sth_fbh_t); else fbh= NULL; return fbh; } /* free MYSQL_BIND bind struct */ static void free_bind(MYSQL_BIND *bind) { if (bind) Safefree(bind); } /* free imp_sth_phb_t fbind structure */ static void free_fbind(imp_sth_phb_t *fbind) { if (fbind) Safefree(fbind); } /* free imp_sth_fbh_t fbh structure */ static void free_fbuffer(imp_sth_fbh_t *fbh) { if (fbh) Safefree(fbh); } #endif /* free statement param structure per num_params */ static void free_param(pTHX_ imp_sth_ph_t *params, int num_params) { if (params) { int i; for (i= 0; i < num_params; i++) { imp_sth_ph_t *ph= params+i; if (ph->value) { (void) SvREFCNT_dec(ph->value); ph->value= NULL; } } Safefree(params); } } /* Convert a MySQL type to a type that perl can handle NOTE: In the future we may want to return a struct with a lot of information for each type */ static enum enum_field_types mysql_to_perl_type(enum enum_field_types type) { static enum enum_field_types enum_type; switch (type) { case MYSQL_TYPE_DOUBLE: case MYSQL_TYPE_FLOAT: enum_type= MYSQL_TYPE_DOUBLE; break; case MYSQL_TYPE_SHORT: case MYSQL_TYPE_TINY: case MYSQL_TYPE_LONG: case MYSQL_TYPE_INT24: case MYSQL_TYPE_YEAR: #if IVSIZE >= 8 case MYSQL_TYPE_LONGLONG: #endif enum_type= MYSQL_TYPE_LONG; break; #if MYSQL_VERSION_ID > NEW_DATATYPE_VERSION case MYSQL_TYPE_BIT: enum_type= MYSQL_TYPE_BIT; break; #endif #if MYSQL_VERSION_ID > NEW_DATATYPE_VERSION case MYSQL_TYPE_NEWDECIMAL: #endif case MYSQL_TYPE_DECIMAL: enum_type= MYSQL_TYPE_DECIMAL; break; #if IVSIZE < 8 case MYSQL_TYPE_LONGLONG: #endif case MYSQL_TYPE_DATE: case MYSQL_TYPE_TIME: case MYSQL_TYPE_DATETIME: case MYSQL_TYPE_NEWDATE: case MYSQL_TYPE_TIMESTAMP: case MYSQL_TYPE_VAR_STRING: #if MYSQL_VERSION_ID > NEW_DATATYPE_VERSION case MYSQL_TYPE_VARCHAR: #endif case MYSQL_TYPE_STRING: enum_type= MYSQL_TYPE_STRING; break; #if MYSQL_VERSION_ID > GEO_DATATYPE_VERSION case MYSQL_TYPE_GEOMETRY: #endif case MYSQL_TYPE_BLOB: case MYSQL_TYPE_TINY_BLOB: enum_type= MYSQL_TYPE_BLOB; break; default: enum_type= MYSQL_TYPE_STRING; /* MySQL can handle all types as strings */ } return(enum_type); } #if defined(DBD_MYSQL_EMBEDDED) /* count embedded options */ int count_embedded_options(char *st) { int rc; char c; char *ptr; ptr= st; rc= 0; if (st) { while ((c= *ptr++)) { if (c == ',') rc++; } rc++; } return rc; } /* Free embedded options */ int free_embedded_options(char ** options_list, int options_count) { int i; for (i= 0; i < options_count; i++) { if (options_list[i]) free(options_list[i]); } free(options_list); return 1; } /* Print out embedded option settings */ int print_embedded_options(PerlIO *stream, char ** options_list, int options_count) { int i; for (i=0; i<options_count; i++) { if (options_list[i]) PerlIO_printf(stream, "Embedded server, parameter[%d]=%s\n", i, options_list[i]); } return 1; } /* */ char **fill_out_embedded_options(PerlIO *stream, char *options, int options_type, int slen, int cnt) { int ind, len; char c; char *ptr; char **options_list= NULL; if (!(options_list= (char **) calloc(cnt, sizeof(char *)))) { PerlIO_printf(stream, "Initialize embedded server. Out of memory \n"); return NULL; } ptr= options; ind= 0; if (options_type == 0) { /* server_groups list NULL terminated */ options_list[cnt]= (char *) NULL; } if (options_type == 1) { /* first item in server_options list is ignored. fill it with \0 */ if (!(options_list[0]= calloc(1,sizeof(char)))) return NULL; ind++; } while ((c= *ptr++)) { slen--; if (c == ',' || !slen) { len= ptr - options; if (c == ',') len--; if (!(options_list[ind]=calloc(len+1,sizeof(char)))) return NULL; strncpy(options_list[ind], options, len); ind++; options= ptr; } } return options_list; } #endif /* constructs an SQL statement previously prepared with actual values replacing placeholders */ static char *parse_params( imp_xxh_t *imp_xxh, pTHX_ MYSQL *sock, char *statement, STRLEN *slen_ptr, imp_sth_ph_t* params, int num_params, bool bind_type_guessing, bool bind_comment_placeholders) { bool comment_end= false; char *salloc, *statement_ptr; char *statement_ptr_end, *ptr, *valbuf; char *cp, *end; int alen, i; int slen= *slen_ptr; int limit_flag= 0; int comment_length=0; STRLEN vallen; imp_sth_ph_t *ph; if (DBIc_DBISTATE(imp_xxh)->debug >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), ">parse_params statement %s\n", statement); if (num_params == 0) return NULL; while (isspace(*statement)) { ++statement; --slen; } /* Calculate the number of bytes being allocated for the statement */ alen= slen; for (i= 0, ph= params; i < num_params; i++, ph++) { int defined= 0; if (ph->value) { if (SvMAGICAL(ph->value)) mg_get(ph->value); if (SvOK(ph->value)) defined=1; } if (!defined) alen+= 3; /* Erase '?', insert 'NULL' */ else { valbuf= SvPV(ph->value, vallen); alen+= 2+vallen+1; /* this will most likely not happen since line 214 */ /* of mysql.xs hardcodes all types to SQL_VARCHAR */ if (!ph->type) { if (bind_type_guessing) { valbuf= SvPV(ph->value, vallen); ph->type= SQL_INTEGER; if (parse_number(valbuf, vallen, &end) != 0) { ph->type= SQL_VARCHAR; } } else ph->type= SQL_VARCHAR; } } } /* Allocate memory, why *2, well, because we have ptr and statement_ptr */ New(908, salloc, alen*2, char); ptr= salloc; i= 0; /* Now create the statement string; compare count_params above */ statement_ptr_end= (statement_ptr= statement)+ slen; while (statement_ptr < statement_ptr_end) { /* LIMIT should be the last part of the query, in most cases */ if (! limit_flag) { /* it would be good to be able to handle any number of cases and orders */ if ((*statement_ptr == 'l' || *statement_ptr == 'L') && (!strncmp(statement_ptr+1, "imit ?", 6) || !strncmp(statement_ptr+1, "IMIT ?", 6))) { limit_flag = 1; } } switch (*statement_ptr) { /* comment detection. Anything goes in a comment */ case '-': { if (bind_comment_placeholders) { *ptr++= *statement_ptr++; break; } else { comment_length= 1; comment_end= false; *ptr++ = *statement_ptr++; if (*statement_ptr == '-') { /* ignore everything until newline or end of string */ while (*statement_ptr) { comment_length++; *ptr++ = *statement_ptr++; if (!*statement_ptr || *statement_ptr == '\n') { comment_end= true; break; } } /* if not end of comment, go back to where we started, no end found */ if (! comment_end) { statement_ptr -= comment_length; ptr -= comment_length; } } break; } } /* c-type comments */ case '/': { if (bind_comment_placeholders) { *ptr++= *statement_ptr++; break; } else { comment_length= 1; comment_end= false; *ptr++ = *statement_ptr++; if (*statement_ptr == '*') { /* use up characters everything until newline */ while (*statement_ptr) { *ptr++ = *statement_ptr++; comment_length++; if (!strncmp(statement_ptr, "*/", 2)) { comment_length += 2; comment_end= true; break; } } /* Go back to where started if comment end not found */ if (! comment_end) { statement_ptr -= comment_length; ptr -= comment_length; } } break; } } case '`': case '\'': case '"': /* Skip string */ { char endToken = *statement_ptr++; *ptr++ = endToken; while (statement_ptr != statement_ptr_end && *statement_ptr != endToken) { if (*statement_ptr == '\\') { *ptr++ = *statement_ptr++; if (statement_ptr == statement_ptr_end) break; } *ptr++= *statement_ptr++; } if (statement_ptr != statement_ptr_end) *ptr++= *statement_ptr++; } break; case '?': /* Insert parameter */ statement_ptr++; if (i >= num_params) { break; } ph = params+ (i++); if (!ph->value || !SvOK(ph->value)) { *ptr++ = 'N'; *ptr++ = 'U'; *ptr++ = 'L'; *ptr++ = 'L'; } else { int is_num = FALSE; valbuf= SvPV(ph->value, vallen); if (valbuf) { switch (ph->type) { case SQL_NUMERIC: case SQL_DECIMAL: case SQL_INTEGER: case SQL_SMALLINT: case SQL_FLOAT: case SQL_REAL: case SQL_DOUBLE: case SQL_BIGINT: case SQL_TINYINT: is_num = TRUE; break; } /* (note this sets *end, which we use if is_num) */ if ( parse_number(valbuf, vallen, &end) != 0 && is_num) { if (bind_type_guessing) { /* .. not a number, so apparently we guessed wrong */ is_num = 0; ph->type = SQL_VARCHAR; } } /* we're at the end of the query, so any placeholders if */ /* after a LIMIT clause will be numbers and should not be quoted */ if (limit_flag == 1) is_num = TRUE; if (!is_num) { *ptr++ = '\''; ptr += mysql_real_escape_string(sock, ptr, valbuf, vallen); *ptr++ = '\''; } else { for (cp= valbuf; cp < end; cp++) *ptr++= *cp; } } } break; /* in case this is a nested LIMIT */ case ')': limit_flag = 0; *ptr++ = *statement_ptr++; break; default: *ptr++ = *statement_ptr++; break; } } *slen_ptr = ptr - salloc; *ptr++ = '\0'; return(salloc); } int bind_param(imp_sth_ph_t *ph, SV *value, IV sql_type) { dTHX; if (ph->value) { if (SvMAGICAL(ph->value)) mg_get(ph->value); (void) SvREFCNT_dec(ph->value); } ph->value= newSVsv(value); if (sql_type) ph->type = sql_type; return TRUE; } static const sql_type_info_t SQL_GET_TYPE_INFO_values[]= { { "varchar", SQL_VARCHAR, 255, "'", "'", "max length", 1, 0, 3, 0, 0, 0, "variable length string", 0, 0, 0, SQL_VARCHAR, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_VAR_STRING, 0, #else MYSQL_TYPE_STRING, 0, #endif }, { "decimal", SQL_DECIMAL, 15, NULL, NULL, "precision,scale", 1, 0, 3, 0, 0, 0, "double", 0, 6, 2, SQL_DECIMAL, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_DECIMAL, 1 #else MYSQL_TYPE_DECIMAL, 1 #endif }, { "tinyint", SQL_TINYINT, 3, NULL, NULL, NULL, 1, 0, 3, 0, 0, 0, "Tiny integer", 0, 0, 10, SQL_TINYINT, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_TINY, 1 #else MYSQL_TYPE_TINY, 1 #endif }, { "smallint", SQL_SMALLINT, 5, NULL, NULL, NULL, 1, 0, 3, 0, 0, 0, "Short integer", 0, 0, 10, SQL_SMALLINT, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_SHORT, 1 #else MYSQL_TYPE_SHORT, 1 #endif }, { "integer", SQL_INTEGER, 10, NULL, NULL, NULL, 1, 0, 3, 0, 0, 0, "integer", 0, 0, 10, SQL_INTEGER, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_LONG, 1 #else MYSQL_TYPE_LONG, 1 #endif }, { "float", SQL_REAL, 7, NULL, NULL, NULL, 1, 0, 0, 0, 0, 0, "float", 0, 2, 10, SQL_FLOAT, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_FLOAT, 1 #else MYSQL_TYPE_FLOAT, 1 #endif }, { "double", SQL_FLOAT, 15, NULL, NULL, NULL, 1, 0, 3, 0, 0, 0, "double", 0, 4, 2, SQL_FLOAT, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_DOUBLE, 1 #else MYSQL_TYPE_DOUBLE, 1 #endif }, { "double", SQL_DOUBLE, 15, NULL, NULL, NULL, 1, 0, 3, 0, 0, 0, "double", 0, 4, 10, SQL_DOUBLE, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_DOUBLE, 1 #else MYSQL_TYPE_DOUBLE, 1 #endif }, /* FIELD_TYPE_NULL ? */ { "timestamp", SQL_TIMESTAMP, 14, "'", "'", NULL, 0, 0, 3, 0, 0, 0, "timestamp", 0, 0, 0, SQL_TIMESTAMP, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_TIMESTAMP, 0 #else MYSQL_TYPE_TIMESTAMP, 0 #endif }, { "bigint", SQL_BIGINT, 19, NULL, NULL, NULL, 1, 0, 3, 0, 0, 0, "Longlong integer", 0, 0, 10, SQL_BIGINT, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_LONGLONG, 1 #else MYSQL_TYPE_LONGLONG, 1 #endif }, { "mediumint", SQL_INTEGER, 8, NULL, NULL, NULL, 1, 0, 3, 0, 0, 0, "Medium integer", 0, 0, 10, SQL_INTEGER, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_INT24, 1 #else MYSQL_TYPE_INT24, 1 #endif }, { "date", SQL_DATE, 10, "'", "'", NULL, 1, 0, 3, 0, 0, 0, "date", 0, 0, 0, SQL_DATE, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_DATE, 0 #else MYSQL_TYPE_DATE, 0 #endif }, { "time", SQL_TIME, 6, "'", "'", NULL, 1, 0, 3, 0, 0, 0, "time", 0, 0, 0, SQL_TIME, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_TIME, 0 #else MYSQL_TYPE_TIME, 0 #endif }, { "datetime", SQL_TIMESTAMP, 21, "'", "'", NULL, 1, 0, 3, 0, 0, 0, "datetime", 0, 0, 0, SQL_TIMESTAMP, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_DATETIME, 0 #else MYSQL_TYPE_DATETIME, 0 #endif }, { "year", SQL_SMALLINT, 4, NULL, NULL, NULL, 1, 0, 3, 0, 0, 0, "year", 0, 0, 10, SQL_SMALLINT, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_YEAR, 0 #else MYSQL_TYPE_YEAR, 0 #endif }, { "date", SQL_DATE, 10, "'", "'", NULL, 1, 0, 3, 0, 0, 0, "date", 0, 0, 0, SQL_DATE, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_NEWDATE, 0 #else MYSQL_TYPE_NEWDATE, 0 #endif }, { "enum", SQL_VARCHAR, 255, "'", "'", NULL, 1, 0, 1, 0, 0, 0, "enum(value1,value2,value3...)", 0, 0, 0, 0, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_ENUM, 0 #else MYSQL_TYPE_ENUM, 0 #endif }, { "set", SQL_VARCHAR, 255, "'", "'", NULL, 1, 0, 1, 0, 0, 0, "set(value1,value2,value3...)", 0, 0, 0, 0, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_SET, 0 #else MYSQL_TYPE_SET, 0 #endif }, { "blob", SQL_LONGVARBINARY, 65535, "'", "'", NULL, 1, 0, 3, 0, 0, 0, "binary large object (0-65535)", 0, 0, 0, SQL_LONGVARBINARY, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_BLOB, 0 #else MYSQL_TYPE_BLOB, 0 #endif }, { "tinyblob", SQL_VARBINARY, 255, "'", "'", NULL, 1, 0, 3, 0, 0, 0, "binary large object (0-255) ", 0, 0, 0, SQL_VARBINARY, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_TINY_BLOB, 0 #else FIELD_TYPE_TINY_BLOB, 0 #endif }, { "mediumblob", SQL_LONGVARBINARY, 16777215, "'", "'", NULL, 1, 0, 3, 0, 0, 0, "binary large object", 0, 0, 0, SQL_LONGVARBINARY, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_MEDIUM_BLOB, 0 #else MYSQL_TYPE_MEDIUM_BLOB, 0 #endif }, { "longblob", SQL_LONGVARBINARY, 2147483647, "'", "'", NULL, 1, 0, 3, 0, 0, 0, "binary large object, use mediumblob instead", 0, 0, 0, SQL_LONGVARBINARY, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_LONG_BLOB, 0 #else MYSQL_TYPE_LONG_BLOB, 0 #endif }, { "char", SQL_CHAR, 255, "'", "'", "max length", 1, 0, 3, 0, 0, 0, "string", 0, 0, 0, SQL_CHAR, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_STRING, 0 #else MYSQL_TYPE_STRING, 0 #endif }, { "decimal", SQL_NUMERIC, 15, NULL, NULL, "precision,scale", 1, 0, 3, 0, 0, 0, "double", 0, 6, 2, SQL_NUMERIC, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_DECIMAL, 1 #else MYSQL_TYPE_DECIMAL, 1 #endif }, { "tinyint unsigned", SQL_TINYINT, 3, NULL, NULL, NULL, 1, 0, 3, 1, 0, 0, "Tiny integer unsigned", 0, 0, 10, SQL_TINYINT, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_TINY, 1 #else MYSQL_TYPE_TINY, 1 #endif }, { "smallint unsigned", SQL_SMALLINT, 5, NULL, NULL, NULL, 1, 0, 3, 1, 0, 0, "Short integer unsigned", 0, 0, 10, SQL_SMALLINT, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_SHORT, 1 #else MYSQL_TYPE_SHORT, 1 #endif }, { "mediumint unsigned", SQL_INTEGER, 8, NULL, NULL, NULL, 1, 0, 3, 1, 0, 0, "Medium integer unsigned", 0, 0, 10, SQL_INTEGER, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_INT24, 1 #else MYSQL_TYPE_INT24, 1 #endif }, { "int unsigned", SQL_INTEGER, 10, NULL, NULL, NULL, 1, 0, 3, 1, 0, 0, "integer unsigned", 0, 0, 10, SQL_INTEGER, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_LONG, 1 #else MYSQL_TYPE_LONG, 1 #endif }, { "int", SQL_INTEGER, 10, NULL, NULL, NULL, 1, 0, 3, 0, 0, 0, "integer", 0, 0, 10, SQL_INTEGER, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_LONG, 1 #else MYSQL_TYPE_LONG, 1 #endif }, { "integer unsigned", SQL_INTEGER, 10, NULL, NULL, NULL, 1, 0, 3, 1, 0, 0, "integer", 0, 0, 10, SQL_INTEGER, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_LONG, 1 #else MYSQL_TYPE_LONG, 1 #endif }, { "bigint unsigned", SQL_BIGINT, 20, NULL, NULL, NULL, 1, 0, 3, 1, 0, 0, "Longlong integer unsigned", 0, 0, 10, SQL_BIGINT, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_LONGLONG, 1 #else MYSQL_TYPE_LONGLONG, 1 #endif }, { "text", SQL_LONGVARCHAR, 65535, "'", "'", NULL, 1, 0, 3, 0, 0, 0, "large text object (0-65535)", 0, 0, 0, SQL_LONGVARCHAR, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_BLOB, 0 #else MYSQL_TYPE_BLOB, 0 #endif }, { "mediumtext", SQL_LONGVARCHAR, 16777215, "'", "'", NULL, 1, 0, 3, 0, 0, 0, "large text object", 0, 0, 0, SQL_LONGVARCHAR, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_MEDIUM_BLOB, 0 #else MYSQL_TYPE_MEDIUM_BLOB, 0 #endif }, { "mediumint unsigned auto_increment", SQL_INTEGER, 8, NULL, NULL, NULL, 0, 0, 3, 1, 0, 1, "Medium integer unsigned auto_increment", 0, 0, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_INTEGER, 0, 0, FIELD_TYPE_INT24, 1, #else SQL_INTEGER, 0, 0, MYSQL_TYPE_INT24, 1, #endif }, { "tinyint unsigned auto_increment", SQL_TINYINT, 3, NULL, NULL, NULL, 0, 0, 3, 1, 0, 1, "tinyint unsigned auto_increment", 0, 0, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_TINYINT, 0, 0, FIELD_TYPE_TINY, 1 #else SQL_TINYINT, 0, 0, MYSQL_TYPE_TINY, 1 #endif }, { "smallint auto_increment", SQL_SMALLINT, 5, NULL, NULL, NULL, 0, 0, 3, 0, 0, 1, "smallint auto_increment", 0, 0, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_SMALLINT, 0, 0, FIELD_TYPE_SHORT, 1 #else SQL_SMALLINT, 0, 0, MYSQL_TYPE_SHORT, 1 #endif }, { "int unsigned auto_increment", SQL_INTEGER, 10, NULL, NULL, NULL, 0, 0, 3, 1, 0, 1, "integer unsigned auto_increment", 0, 0, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_INTEGER, 0, 0, FIELD_TYPE_LONG, 1 #else SQL_INTEGER, 0, 0, MYSQL_TYPE_LONG, 1 #endif }, { "mediumint", SQL_INTEGER, 7, NULL, NULL, NULL, 1, 0, 3, 0, 0, 0, "Medium integer", 0, 0, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_INTEGER, 0, 0, FIELD_TYPE_INT24, 1 #else SQL_INTEGER, 0, 0, MYSQL_TYPE_INT24, 1 #endif }, { "bit", SQL_BIT, 1, NULL, NULL, NULL, 1, 0, 3, 0, 0, 0, "char(1)", 0, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_BIT, 0, 0, FIELD_TYPE_TINY, 0 #else SQL_BIT, 0, 0, MYSQL_TYPE_TINY, 0 #endif }, { "numeric", SQL_NUMERIC, 19, NULL, NULL, "precision,scale", 1, 0, 3, 0, 0, 0, "numeric", 0, 19, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_NUMERIC, 0, 0, FIELD_TYPE_DECIMAL, 1, #else SQL_NUMERIC, 0, 0, MYSQL_TYPE_DECIMAL, 1, #endif }, { "integer unsigned auto_increment", SQL_INTEGER, 10, NULL, NULL, NULL, 0, 0, 3, 1, 0, 1, "integer unsigned auto_increment", 0, 0, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_INTEGER, 0, 0, FIELD_TYPE_LONG, 1, #else SQL_INTEGER, 0, 0, MYSQL_TYPE_LONG, 1, #endif }, { "mediumint unsigned", SQL_INTEGER, 8, NULL, NULL, NULL, 1, 0, 3, 1, 0, 0, "Medium integer unsigned", 0, 0, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_INTEGER, 0, 0, FIELD_TYPE_INT24, 1 #else SQL_INTEGER, 0, 0, MYSQL_TYPE_INT24, 1 #endif }, { "smallint unsigned auto_increment", SQL_SMALLINT, 5, NULL, NULL, NULL, 0, 0, 3, 1, 0, 1, "smallint unsigned auto_increment", 0, 0, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_SMALLINT, 0, 0, FIELD_TYPE_SHORT, 1 #else SQL_SMALLINT, 0, 0, MYSQL_TYPE_SHORT, 1 #endif }, { "int auto_increment", SQL_INTEGER, 10, NULL, NULL, NULL, 0, 0, 3, 0, 0, 1, "integer auto_increment", 0, 0, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_INTEGER, 0, 0, FIELD_TYPE_LONG, 1 #else SQL_INTEGER, 0, 0, MYSQL_TYPE_LONG, 1 #endif }, { "long varbinary", SQL_LONGVARBINARY, 16777215, "0x", NULL, NULL, 1, 0, 3, 0, 0, 0, "mediumblob", 0, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_LONGVARBINARY, 0, 0, FIELD_TYPE_LONG_BLOB, 0 #else SQL_LONGVARBINARY, 0, 0, MYSQL_TYPE_LONG_BLOB, 0 #endif }, { "double auto_increment", SQL_FLOAT, 15, NULL, NULL, NULL, 0, 0, 3, 0, 0, 1, "double auto_increment", 0, 4, 2, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_FLOAT, 0, 0, FIELD_TYPE_DOUBLE, 1 #else SQL_FLOAT, 0, 0, MYSQL_TYPE_DOUBLE, 1 #endif }, { "double auto_increment", SQL_DOUBLE, 15, NULL, NULL, NULL, 0, 0, 3, 0, 0, 1, "double auto_increment", 0, 4, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_DOUBLE, 0, 0, FIELD_TYPE_DOUBLE, 1 #else SQL_DOUBLE, 0, 0, MYSQL_TYPE_DOUBLE, 1 #endif }, { "integer auto_increment", SQL_INTEGER, 10, NULL, NULL, NULL, 0, 0, 3, 0, 0, 1, "integer auto_increment", 0, 0, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_INTEGER, 0, 0, FIELD_TYPE_LONG, 1, #else SQL_INTEGER, 0, 0, MYSQL_TYPE_LONG, 1, #endif }, { "bigint auto_increment", SQL_BIGINT, 19, NULL, NULL, NULL, 0, 0, 3, 0, 0, 1, "bigint auto_increment", 0, 0, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_BIGINT, 0, 0, FIELD_TYPE_LONGLONG, 1 #else SQL_BIGINT, 0, 0, MYSQL_TYPE_LONGLONG, 1 #endif }, { "bit auto_increment", SQL_BIT, 1, NULL, NULL, NULL, 0, 0, 3, 0, 0, 1, "char(1) auto_increment", 0, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_BIT, 0, 0, FIELD_TYPE_TINY, 1 #else SQL_BIT, 0, 0, MYSQL_TYPE_TINY, 1 #endif }, { "mediumint auto_increment", SQL_INTEGER, 7, NULL, NULL, NULL, 0, 0, 3, 0, 0, 1, "Medium integer auto_increment", 0, 0, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_INTEGER, 0, 0, FIELD_TYPE_INT24, 1 #else SQL_INTEGER, 0, 0, MYSQL_TYPE_INT24, 1 #endif }, { "float auto_increment", SQL_REAL, 7, NULL, NULL, NULL, 0, 0, 0, 0, 0, 1, "float auto_increment", 0, 2, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_FLOAT, 0, 0, FIELD_TYPE_FLOAT, 1 #else SQL_FLOAT, 0, 0, MYSQL_TYPE_FLOAT, 1 #endif }, { "long varchar", SQL_LONGVARCHAR, 16777215, "'", "'", NULL, 1, 0, 3, 0, 0, 0, "mediumtext", 0, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_LONGVARCHAR, 0, 0, FIELD_TYPE_MEDIUM_BLOB, 1 #else SQL_LONGVARCHAR, 0, 0, MYSQL_TYPE_MEDIUM_BLOB, 1 #endif }, { "tinyint auto_increment", SQL_TINYINT, 3, NULL, NULL, NULL, 0, 0, 3, 0, 0, 1, "tinyint auto_increment", 0, 0, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_TINYINT, 0, 0, FIELD_TYPE_TINY, 1 #else SQL_TINYINT, 0, 0, MYSQL_TYPE_TINY, 1 #endif }, { "bigint unsigned auto_increment", SQL_BIGINT, 20, NULL, NULL, NULL, 0, 0, 3, 1, 0, 1, "bigint unsigned auto_increment", 0, 0, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_BIGINT, 0, 0, FIELD_TYPE_LONGLONG, 1 #else SQL_BIGINT, 0, 0, MYSQL_TYPE_LONGLONG, 1 #endif }, /* END MORE STUFF */ }; /* static const sql_type_info_t* native2sql (int t) */ static const sql_type_info_t *native2sql(int t) { switch (t) { case FIELD_TYPE_VAR_STRING: return &SQL_GET_TYPE_INFO_values[0]; case FIELD_TYPE_DECIMAL: return &SQL_GET_TYPE_INFO_values[1]; #ifdef FIELD_TYPE_NEWDECIMAL case FIELD_TYPE_NEWDECIMAL: return &SQL_GET_TYPE_INFO_values[1]; #endif case FIELD_TYPE_TINY: return &SQL_GET_TYPE_INFO_values[2]; case FIELD_TYPE_SHORT: return &SQL_GET_TYPE_INFO_values[3]; case FIELD_TYPE_LONG: return &SQL_GET_TYPE_INFO_values[4]; case FIELD_TYPE_FLOAT: return &SQL_GET_TYPE_INFO_values[5]; /* 6 */ case FIELD_TYPE_DOUBLE: return &SQL_GET_TYPE_INFO_values[7]; case FIELD_TYPE_TIMESTAMP: return &SQL_GET_TYPE_INFO_values[8]; case FIELD_TYPE_LONGLONG: return &SQL_GET_TYPE_INFO_values[9]; case FIELD_TYPE_INT24: return &SQL_GET_TYPE_INFO_values[10]; case FIELD_TYPE_DATE: return &SQL_GET_TYPE_INFO_values[11]; case FIELD_TYPE_TIME: return &SQL_GET_TYPE_INFO_values[12]; case FIELD_TYPE_DATETIME: return &SQL_GET_TYPE_INFO_values[13]; case FIELD_TYPE_YEAR: return &SQL_GET_TYPE_INFO_values[14]; case FIELD_TYPE_NEWDATE: return &SQL_GET_TYPE_INFO_values[15]; case FIELD_TYPE_ENUM: return &SQL_GET_TYPE_INFO_values[16]; case FIELD_TYPE_SET: return &SQL_GET_TYPE_INFO_values[17]; case FIELD_TYPE_BLOB: return &SQL_GET_TYPE_INFO_values[18]; case FIELD_TYPE_TINY_BLOB: return &SQL_GET_TYPE_INFO_values[19]; case FIELD_TYPE_MEDIUM_BLOB: return &SQL_GET_TYPE_INFO_values[20]; case FIELD_TYPE_LONG_BLOB: return &SQL_GET_TYPE_INFO_values[21]; case FIELD_TYPE_STRING: return &SQL_GET_TYPE_INFO_values[22]; default: return &SQL_GET_TYPE_INFO_values[0]; } } #define SQL_GET_TYPE_INFO_num \ (sizeof(SQL_GET_TYPE_INFO_values)/sizeof(sql_type_info_t)) /*************************************************************************** * * Name: dbd_init * * Purpose: Called when the driver is installed by DBI * * Input: dbistate - pointer to the DBI state variable, used for some * DBI internal things * * Returns: Nothing * **************************************************************************/ void dbd_init(dbistate_t* dbistate) { dTHX; DBISTATE_INIT; } /************************************************************************** * * Name: do_error, do_warn * * Purpose: Called to associate an error code and an error message * to some handle * * Input: h - the handle in error condition * rc - the error code * what - the error message * * Returns: Nothing * **************************************************************************/ void do_error(SV* h, int rc, const char* what, const char* sqlstate) { dTHX; D_imp_xxh(h); STRLEN lna; SV *errstr; SV *errstate; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\t--> do_error\n"); errstr= DBIc_ERRSTR(imp_xxh); sv_setiv(DBIc_ERR(imp_xxh), (IV)rc); /* set err early */ sv_setpv(errstr, what); #if MYSQL_VERSION_ID >= SQL_STATE_VERSION if (sqlstate) { errstate= DBIc_STATE(imp_xxh); sv_setpvn(errstate, sqlstate, 5); } #endif /* NO EFFECT DBIh_EVENT2(h, ERROR_event, DBIc_ERR(imp_xxh), errstr); */ if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "%s error %d recorded: %s\n", what, rc, SvPV(errstr,lna)); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\t<-- do_error\n"); } /* void do_warn(SV* h, int rc, char* what) */ void do_warn(SV* h, int rc, char* what) { dTHX; D_imp_xxh(h); STRLEN lna; SV *errstr = DBIc_ERRSTR(imp_xxh); sv_setiv(DBIc_ERR(imp_xxh), (IV)rc); /* set err early */ sv_setpv(errstr, what); /* NO EFFECT DBIh_EVENT2(h, WARN_event, DBIc_ERR(imp_xxh), errstr);*/ if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "%s warning %d recorded: %s\n", what, rc, SvPV(errstr,lna)); warn("%s", what); } #if defined(DBD_MYSQL_EMBEDDED) #define DBD_MYSQL_NAMESPACE "DBD::mysqlEmb::QUIET"; #else #define DBD_MYSQL_NAMESPACE "DBD::mysql::QUIET"; #endif #define doquietwarn(s) \ { \ SV* sv = perl_get_sv(DBD_MYSQL_NAMESPACE, FALSE); \ if (!sv || !SvTRUE(sv)) { \ warn s; \ } \ } /*************************************************************************** * * Name: mysql_dr_connect * * Purpose: Replacement for mysql_connect * * Input: MYSQL* sock - Pointer to a MYSQL structure being * initialized * char* mysql_socket - Name of a UNIX socket being used * or NULL * char* host - Host name being used or NULL for localhost * char* port - Port number being used or NULL for default * char* user - User name being used or NULL * char* password - Password being used or NULL * char* dbname - Database name being used or NULL * char* imp_dbh - Pointer to internal dbh structure * * Returns: The sock argument for success, NULL otherwise; * you have to call do_error in the latter case. * **************************************************************************/ MYSQL *mysql_dr_connect( SV* dbh, MYSQL* sock, char* mysql_socket, char* host, char* port, char* user, char* password, char* dbname, imp_dbh_t *imp_dbh) { int portNr; unsigned int client_flag; MYSQL* result; dTHX; D_imp_xxh(dbh); /* per Monty, already in client.c in API */ /* but still not exist in libmysqld.c */ #if defined(DBD_MYSQL_EMBEDDED) if (host && !*host) host = NULL; #endif portNr= (port && *port) ? atoi(port) : 0; /* already in client.c in API */ /* if (user && !*user) user = NULL; */ /* if (password && !*password) password = NULL; */ if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->mysql_dr_connect: host = |%s|, port = %d," \ " uid = %s, pwd = %s\n", host ? host : "NULL", portNr, user ? user : "NULL", password ? password : "NULL"); { #if defined(DBD_MYSQL_EMBEDDED) if (imp_dbh) { D_imp_drh_from_dbh; SV* sv = DBIc_IMP_DATA(imp_dbh); if (sv && SvROK(sv)) { SV** svp; STRLEN lna; char * options; int server_args_cnt= 0; int server_groups_cnt= 0; int rc= 0; char ** server_args = NULL; char ** server_groups = NULL; HV* hv = (HV*) SvRV(sv); if (SvTYPE(hv) != SVt_PVHV) return NULL; if (!imp_drh->embedded.state) { /* Init embedded server */ if ((svp = hv_fetch(hv, "mysql_embedded_groups", 21, FALSE)) && *svp && SvTRUE(*svp)) { options = SvPV(*svp, lna); imp_drh->embedded.groups=newSVsv(*svp); if ((server_groups_cnt=count_embedded_options(options))) { /* number of server_groups always server_groups+1 */ server_groups=fill_out_embedded_options(DBIc_LOGPIO(imp_xxh), options, 0, (int)lna, ++server_groups_cnt); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) { PerlIO_printf(DBIc_LOGPIO(imp_xxh), "Groups names passed to embedded server:\n"); print_embedded_options(DBIc_LOGPIO(imp_xxh), server_groups, server_groups_cnt); } } } if ((svp = hv_fetch(hv, "mysql_embedded_options", 22, FALSE)) && *svp && SvTRUE(*svp)) { options = SvPV(*svp, lna); imp_drh->embedded.args=newSVsv(*svp); if ((server_args_cnt=count_embedded_options(options))) { /* number of server_options always server_options+1 */ server_args=fill_out_embedded_options(DBIc_LOGPIO(imp_xxh), options, 1, (int)lna, ++server_args_cnt); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) { PerlIO_printf(DBIc_LOGPIO(imp_xxh), "Server options passed to embedded server:\n"); print_embedded_options(DBIc_LOGPIO(imp_xxh), server_args, server_args_cnt); } } } if (mysql_server_init(server_args_cnt, server_args, server_groups)) { do_warn(dbh, AS_ERR_EMBEDDED, "Embedded server was not started. \ Could not initialize environment."); return NULL; } imp_drh->embedded.state=1; if (server_args_cnt) free_embedded_options(server_args, server_args_cnt); if (server_groups_cnt) free_embedded_options(server_groups, server_groups_cnt); } else { /* * Check if embedded parameters passed to connect() differ from * first ones */ if ( ((svp = hv_fetch(hv, "mysql_embedded_groups", 21, FALSE)) && *svp && SvTRUE(*svp))) rc =+ abs(sv_cmp(*svp, imp_drh->embedded.groups)); if ( ((svp = hv_fetch(hv, "mysql_embedded_options", 22, FALSE)) && *svp && SvTRUE(*svp)) ) rc =+ abs(sv_cmp(*svp, imp_drh->embedded.args)); if (rc) { do_warn(dbh, AS_ERR_EMBEDDED, "Embedded server was already started. You cannot pass init\ parameters to embedded server once"); return NULL; } } } } #endif #ifdef MYSQL_NO_CLIENT_FOUND_ROWS client_flag = 0; #else client_flag = CLIENT_FOUND_ROWS; #endif mysql_init(sock); if (imp_dbh) { SV* sv = DBIc_IMP_DATA(imp_dbh); DBIc_set(imp_dbh, DBIcf_AutoCommit, TRUE); if (sv && SvROK(sv)) { HV* hv = (HV*) SvRV(sv); SV** svp; STRLEN lna; /* thanks to Peter John Edwards for mysql_init_command */ if ((svp = hv_fetch(hv, "mysql_init_command", 18, FALSE)) && *svp && SvTRUE(*svp)) { char* df = SvPV(*svp, lna); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->mysql_dr_connect: Setting" \ " init command (%s).\n", df); mysql_options(sock, MYSQL_INIT_COMMAND, df); } if ((svp = hv_fetch(hv, "mysql_compression", 17, FALSE)) && *svp && SvTRUE(*svp)) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->mysql_dr_connect: Enabling" \ " compression.\n"); mysql_options(sock, MYSQL_OPT_COMPRESS, NULL); } if ((svp = hv_fetch(hv, "mysql_connect_timeout", 21, FALSE)) && *svp && SvTRUE(*svp)) { int to = SvIV(*svp); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->mysql_dr_connect: Setting" \ " connect timeout (%d).\n",to); mysql_options(sock, MYSQL_OPT_CONNECT_TIMEOUT, (const char *)&to); } if ((svp = hv_fetch(hv, "mysql_write_timeout", 19, FALSE)) && *svp && SvTRUE(*svp)) { int to = SvIV(*svp); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->mysql_dr_connect: Setting" \ " write timeout (%d).\n",to); mysql_options(sock, MYSQL_OPT_WRITE_TIMEOUT, (const char *)&to); } if ((svp = hv_fetch(hv, "mysql_read_timeout", 18, FALSE)) && *svp && SvTRUE(*svp)) { int to = SvIV(*svp); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->mysql_dr_connect: Setting" \ " read timeout (%d).\n",to); mysql_options(sock, MYSQL_OPT_READ_TIMEOUT, (const char *)&to); } if ((svp = hv_fetch(hv, "mysql_skip_secure_auth", 22, FALSE)) && *svp && SvTRUE(*svp)) { my_bool secauth = 0; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->mysql_dr_connect: Skipping" \ " secure auth\n"); mysql_options(sock, MYSQL_SECURE_AUTH, &secauth); } if ((svp = hv_fetch(hv, "mysql_read_default_file", 23, FALSE)) && *svp && SvTRUE(*svp)) { char* df = SvPV(*svp, lna); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->mysql_dr_connect: Reading" \ " default file %s.\n", df); mysql_options(sock, MYSQL_READ_DEFAULT_FILE, df); } if ((svp = hv_fetch(hv, "mysql_read_default_group", 24, FALSE)) && *svp && SvTRUE(*svp)) { char* gr = SvPV(*svp, lna); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->mysql_dr_connect: Using" \ " default group %s.\n", gr); mysql_options(sock, MYSQL_READ_DEFAULT_GROUP, gr); } #if (MYSQL_VERSION_ID >= 50606) if ((svp = hv_fetch(hv, "mysql_conn_attrs", 16, FALSE)) && *svp) { HV* attrs = (HV*) SvRV(*svp); HE* entry = NULL; I32 num_entries = hv_iterinit(attrs); while (num_entries && (entry = hv_iternext(attrs))) { I32 retlen = 0; char *attr_name = hv_iterkey(entry, &retlen); SV *sv_attr_val = hv_iterval(attrs, entry); char *attr_val = SvPV(sv_attr_val, lna); mysql_options4(sock, MYSQL_OPT_CONNECT_ATTR_ADD, attr_name, attr_val); } } #endif if ((svp = hv_fetch(hv, "mysql_client_found_rows", 23, FALSE)) && *svp) { if (SvTRUE(*svp)) client_flag |= CLIENT_FOUND_ROWS; else client_flag &= ~CLIENT_FOUND_ROWS; } if ((svp = hv_fetch(hv, "mysql_use_result", 16, FALSE)) && *svp) { imp_dbh->use_mysql_use_result = SvTRUE(*svp); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->use_mysql_use_result: %d\n", imp_dbh->use_mysql_use_result); } if ((svp = hv_fetch(hv, "mysql_bind_type_guessing", 24, TRUE)) && *svp) { imp_dbh->bind_type_guessing= SvTRUE(*svp); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->bind_type_guessing: %d\n", imp_dbh->bind_type_guessing); } if ((svp = hv_fetch(hv, "mysql_bind_comment_placeholders", 31, FALSE)) && *svp) { imp_dbh->bind_comment_placeholders = SvTRUE(*svp); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->bind_comment_placeholders: %d\n", imp_dbh->bind_comment_placeholders); } if ((svp = hv_fetch(hv, "mysql_no_autocommit_cmd", 23, FALSE)) && *svp) { imp_dbh->no_autocommit_cmd= SvTRUE(*svp); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->no_autocommit_cmd: %d\n", imp_dbh->no_autocommit_cmd); } #if FABRIC_SUPPORT if ((svp = hv_fetch(hv, "mysql_use_fabric", 16, FALSE)) && *svp && SvTRUE(*svp)) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->use_fabric: Enabling use of" \ " MySQL Fabric.\n"); mysql_options(sock, MYSQL_OPT_USE_FABRIC, NULL); } #endif #if defined(CLIENT_MULTI_STATEMENTS) if ((svp = hv_fetch(hv, "mysql_multi_statements", 22, FALSE)) && *svp) { if (SvTRUE(*svp)) client_flag |= CLIENT_MULTI_STATEMENTS; else client_flag &= ~CLIENT_MULTI_STATEMENTS; } #endif #if MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION /* took out client_flag |= CLIENT_PROTOCOL_41; */ /* because libmysql.c already sets this no matter what */ if ((svp = hv_fetch(hv, "mysql_server_prepare", 20, FALSE)) && *svp) { if (SvTRUE(*svp)) { client_flag |= CLIENT_PROTOCOL_41; imp_dbh->use_server_side_prepare = TRUE; } else { client_flag &= ~CLIENT_PROTOCOL_41; imp_dbh->use_server_side_prepare = FALSE; } } if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->use_server_side_prepare: %d\n", imp_dbh->use_server_side_prepare); #endif /* HELMUT */ #if defined(sv_utf8_decode) && MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION if ((svp = hv_fetch(hv, "mysql_enable_utf8mb4", 20, FALSE)) && *svp && SvTRUE(*svp)) { mysql_options(sock, MYSQL_SET_CHARSET_NAME, "utf8mb4"); } else if ((svp = hv_fetch(hv, "mysql_enable_utf8", 17, FALSE)) && *svp) { /* Do not touch imp_dbh->enable_utf8 as we are called earlier * than it is set and mysql_options() must be before: * mysql_real_connect() */ mysql_options(sock, MYSQL_SET_CHARSET_NAME, (SvTRUE(*svp) ? "utf8" : "latin1")); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "mysql_options: MYSQL_SET_CHARSET_NAME=%s\n", (SvTRUE(*svp) ? "utf8" : "latin1")); } #endif #if defined(DBD_MYSQL_WITH_SSL) && !defined(DBD_MYSQL_EMBEDDED) && \ (defined(CLIENT_SSL) || (MYSQL_VERSION_ID >= 40000)) if ((svp = hv_fetch(hv, "mysql_ssl", 9, FALSE)) && *svp) { if (SvTRUE(*svp)) { char *client_key = NULL; char *client_cert = NULL; char *ca_file = NULL; char *ca_path = NULL; char *cipher = NULL; STRLEN lna; #if MYSQL_VERSION_ID >= SSL_VERIFY_VERSION && MYSQL_VERSION_ID <= SSL_LAST_VERIFY_VERSION /* New code to utilise MySQLs new feature that verifies that the server's hostname that the client connects to matches that of the certificate */ my_bool ssl_verify_true = 0; if ((svp = hv_fetch(hv, "mysql_ssl_verify_server_cert", 28, FALSE)) && *svp) ssl_verify_true = SvTRUE(*svp); #endif if ((svp = hv_fetch(hv, "mysql_ssl_client_key", 20, FALSE)) && *svp) client_key = SvPV(*svp, lna); if ((svp = hv_fetch(hv, "mysql_ssl_client_cert", 21, FALSE)) && *svp) client_cert = SvPV(*svp, lna); if ((svp = hv_fetch(hv, "mysql_ssl_ca_file", 17, FALSE)) && *svp) ca_file = SvPV(*svp, lna); if ((svp = hv_fetch(hv, "mysql_ssl_ca_path", 17, FALSE)) && *svp) ca_path = SvPV(*svp, lna); if ((svp = hv_fetch(hv, "mysql_ssl_cipher", 16, FALSE)) && *svp) cipher = SvPV(*svp, lna); mysql_ssl_set(sock, client_key, client_cert, ca_file, ca_path, cipher); #if MYSQL_VERSION_ID >= SSL_VERIFY_VERSION && MYSQL_VERSION_ID <= SSL_LAST_VERIFY_VERSION mysql_options(sock, MYSQL_OPT_SSL_VERIFY_SERVER_CERT, &ssl_verify_true); #endif client_flag |= CLIENT_SSL; } } #endif #if (MYSQL_VERSION_ID >= 32349) /* * MySQL 3.23.49 disables LOAD DATA LOCAL by default. Use * mysql_local_infile=1 in the DSN to enable it. */ if ((svp = hv_fetch( hv, "mysql_local_infile", 18, FALSE)) && *svp) { unsigned int flag = SvTRUE(*svp); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->mysql_dr_connect: Using" \ " local infile %u.\n", flag); mysql_options(sock, MYSQL_OPT_LOCAL_INFILE, (const char *) &flag); } #endif } } if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->mysql_dr_connect: client_flags = %d\n", client_flag); #if MYSQL_VERSION_ID >= MULTIPLE_RESULT_SET_VERSION client_flag|= CLIENT_MULTI_RESULTS; #endif result = mysql_real_connect(sock, host, user, password, dbname, portNr, mysql_socket, client_flag); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->mysql_dr_connect: <-"); if (result) { #if MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION /* connection succeeded. */ /* imp_dbh == NULL when mysql_dr_connect() is called from mysql.xs functions (_admin_internal(),_ListDBs()). */ if (!(result->client_flag & CLIENT_PROTOCOL_41) && imp_dbh) imp_dbh->use_server_side_prepare = FALSE; #endif #if MYSQL_ASYNC if(imp_dbh) { imp_dbh->async_query_in_flight = NULL; } #endif /* we turn off Mysql's auto reconnect and handle re-connecting ourselves so that we can keep track of when this happens. */ result->reconnect=0; } else { /* sock was allocated with mysql_init() fixes: https://rt.cpan.org/Ticket/Display.html?id=86153 Safefree(sock); rurban: No, we still need this handle later in mysql_dr_error(). RT #97625. It will be freed as imp_dbh->pmysql in dbd_db_destroy(), which is called by the DESTROY handler. */ } return result; } } /* safe_hv_fetch */ static char *safe_hv_fetch(pTHX_ HV *hv, const char *name, int name_length) { SV** svp; STRLEN len; char *res= NULL; if ((svp= hv_fetch(hv, name, name_length, FALSE))) { res= SvPV(*svp, len); if (!len) res= NULL; } return res; } /* Frontend for mysql_dr_connect */ static int my_login(pTHX_ SV* dbh, imp_dbh_t *imp_dbh) { SV* sv; HV* hv; char* dbname; char* host; char* port; char* user; char* password; char* mysql_socket; int result; D_imp_xxh(dbh); /* TODO- resolve this so that it is set only if DBI is 1.607 */ #define TAKE_IMP_DATA_VERSION 1 #if TAKE_IMP_DATA_VERSION if (DBIc_has(imp_dbh, DBIcf_IMPSET)) { /* eg from take_imp_data() */ if (DBIc_has(imp_dbh, DBIcf_ACTIVE)) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "my_login skip connect\n"); /* tell our parent we've adopted an active child */ ++DBIc_ACTIVE_KIDS(DBIc_PARENT_COM(imp_dbh)); return TRUE; } if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "my_login IMPSET but not ACTIVE so connect not skipped\n"); } #endif sv = DBIc_IMP_DATA(imp_dbh); if (!sv || !SvROK(sv)) return FALSE; hv = (HV*) SvRV(sv); if (SvTYPE(hv) != SVt_PVHV) return FALSE; host= safe_hv_fetch(aTHX_ hv, "host", 4); port= safe_hv_fetch(aTHX_ hv, "port", 4); user= safe_hv_fetch(aTHX_ hv, "user", 4); password= safe_hv_fetch(aTHX_ hv, "password", 8); dbname= safe_hv_fetch(aTHX_ hv, "database", 8); mysql_socket= safe_hv_fetch(aTHX_ hv, "mysql_socket", 12); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->my_login : dbname = %s, uid = %s, pwd = %s," \ "host = %s, port = %s\n", dbname ? dbname : "NULL", user ? user : "NULL", password ? password : "NULL", host ? host : "NULL", port ? port : "NULL"); if (!imp_dbh->pmysql) { Newz(908, imp_dbh->pmysql, 1, MYSQL); } result = mysql_dr_connect(dbh, imp_dbh->pmysql, mysql_socket, host, port, user, password, dbname, imp_dbh) ? TRUE : FALSE; return result; } /************************************************************************** * * Name: dbd_db_login * * Purpose: Called for connecting to a database and logging in. * * Input: dbh - database handle being initialized * imp_dbh - drivers private database handle data * dbname - the database we want to log into; may be like * "dbname:host" or "dbname:host:port" * user - user name to connect as * password - password to connect with * * Returns: TRUE for success, FALSE otherwise; do_error has already * been called in the latter case * **************************************************************************/ int dbd_db_login(SV* dbh, imp_dbh_t* imp_dbh, char* dbname, char* user, char* password) { #ifdef dTHR dTHR; #endif dTHX; D_imp_xxh(dbh); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->connect: dsn = %s, uid = %s, pwd = %s\n", dbname ? dbname : "NULL", user ? user : "NULL", password ? password : "NULL"); imp_dbh->stats.auto_reconnects_ok= 0; imp_dbh->stats.auto_reconnects_failed= 0; imp_dbh->bind_type_guessing= FALSE; imp_dbh->bind_comment_placeholders= FALSE; imp_dbh->has_transactions= TRUE; /* Safer we flip this to TRUE perl side if we detect a mod_perl env. */ imp_dbh->auto_reconnect = FALSE; /* HELMUT */ #if defined(sv_utf8_decode) && MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION imp_dbh->enable_utf8 = FALSE; /* initialize mysql_enable_utf8 */ imp_dbh->enable_utf8mb4 = FALSE; /* initialize mysql_enable_utf8mb4 */ #endif if (!my_login(aTHX_ dbh, imp_dbh)) { if(imp_dbh->pmysql) { do_error(dbh, mysql_errno(imp_dbh->pmysql), mysql_error(imp_dbh->pmysql) ,mysql_sqlstate(imp_dbh->pmysql)); Safefree(imp_dbh->pmysql); } return FALSE; } /* * Tell DBI, that dbh->disconnect should be called for this handle */ DBIc_ACTIVE_on(imp_dbh); /* Tell DBI, that dbh->destroy should be called for this handle */ DBIc_on(imp_dbh, DBIcf_IMPSET); return TRUE; } /*************************************************************************** * * Name: dbd_db_commit * dbd_db_rollback * * Purpose: You guess what they should do. * * Input: dbh - database handle being committed or rolled back * imp_dbh - drivers private database handle data * * Returns: TRUE for success, FALSE otherwise; do_error has already * been called in the latter case * **************************************************************************/ int dbd_db_commit(SV* dbh, imp_dbh_t* imp_dbh) { if (DBIc_has(imp_dbh, DBIcf_AutoCommit)) return FALSE; ASYNC_CHECK_RETURN(dbh, FALSE); if (imp_dbh->has_transactions) { #if MYSQL_VERSION_ID < SERVER_PREPARE_VERSION if (mysql_real_query(imp_dbh->pmysql, "COMMIT", 6)) #else if (mysql_commit(imp_dbh->pmysql)) #endif { do_error(dbh, mysql_errno(imp_dbh->pmysql), mysql_error(imp_dbh->pmysql) ,mysql_sqlstate(imp_dbh->pmysql)); return FALSE; } } else do_warn(dbh, JW_ERR_NOT_IMPLEMENTED, "Commit ineffective because transactions are not available"); return TRUE; } /* dbd_db_rollback */ int dbd_db_rollback(SV* dbh, imp_dbh_t* imp_dbh) { /* croak, if not in AutoCommit mode */ if (DBIc_has(imp_dbh, DBIcf_AutoCommit)) return FALSE; ASYNC_CHECK_RETURN(dbh, FALSE); if (imp_dbh->has_transactions) { #if MYSQL_VERSION_ID < SERVER_PREPARE_VERSION if (mysql_real_query(imp_dbh->pmysql, "ROLLBACK", 8)) #else if (mysql_rollback(imp_dbh->pmysql)) #endif { do_error(dbh, mysql_errno(imp_dbh->pmysql), mysql_error(imp_dbh->pmysql) ,mysql_sqlstate(imp_dbh->pmysql)); return FALSE; } } else do_error(dbh, JW_ERR_NOT_IMPLEMENTED, "Rollback ineffective because transactions are not available" ,NULL); return TRUE; } /* *************************************************************************** * * Name: dbd_db_disconnect * * Purpose: Disconnect a database handle from its database * * Input: dbh - database handle being disconnected * imp_dbh - drivers private database handle data * * Returns: TRUE for success, FALSE otherwise; do_error has already * been called in the latter case * **************************************************************************/ int dbd_db_disconnect(SV* dbh, imp_dbh_t* imp_dbh) { #ifdef dTHR dTHR; #endif dTHX; D_imp_xxh(dbh); /* We assume that disconnect will always work */ /* since most errors imply already disconnected. */ DBIc_ACTIVE_off(imp_dbh); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->pmysql: %p\n", imp_dbh->pmysql); mysql_close(imp_dbh->pmysql ); /* We don't free imp_dbh since a reference still exists */ /* The DESTROY method is the only one to 'free' memory. */ return TRUE; } /*************************************************************************** * * Name: dbd_discon_all * * Purpose: Disconnect all database handles at shutdown time * * Input: dbh - database handle being disconnected * imp_dbh - drivers private database handle data * * Returns: TRUE for success, FALSE otherwise; do_error has already * been called in the latter case * **************************************************************************/ int dbd_discon_all (SV *drh, imp_drh_t *imp_drh) { #if defined(dTHR) dTHR; #endif dTHX; D_imp_xxh(drh); #if defined(DBD_MYSQL_EMBEDDED) if (imp_drh->embedded.state) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "Stop embedded server\n"); mysql_server_end(); if (imp_drh->embedded.groups) { (void) SvREFCNT_dec(imp_drh->embedded.groups); imp_drh->embedded.groups = NULL; } if (imp_drh->embedded.args) { (void) SvREFCNT_dec(imp_drh->embedded.args); imp_drh->embedded.args = NULL; } } #else mysql_server_end(); #endif /* The disconnect_all concept is flawed and needs more work */ if (!PL_dirty && !SvTRUE(perl_get_sv("DBI::PERL_ENDING",0))) { sv_setiv(DBIc_ERR(imp_drh), (IV)1); sv_setpv(DBIc_ERRSTR(imp_drh), (char*)"disconnect_all not implemented"); /* NO EFFECT DBIh_EVENT2(drh, ERROR_event, DBIc_ERR(imp_drh), DBIc_ERRSTR(imp_drh)); */ return FALSE; } PL_perl_destruct_level = 0; return FALSE; } /**************************************************************************** * * Name: dbd_db_destroy * * Purpose: Our part of the dbh destructor * * Input: dbh - database handle being destroyed * imp_dbh - drivers private database handle data * * Returns: Nothing * **************************************************************************/ void dbd_db_destroy(SV* dbh, imp_dbh_t* imp_dbh) { /* * Being on the safe side never hurts ... */ if (DBIc_ACTIVE(imp_dbh)) { if (imp_dbh->has_transactions) { if (!DBIc_has(imp_dbh, DBIcf_AutoCommit)) #if MYSQL_VERSION_ID < SERVER_PREPARE_VERSION if ( mysql_real_query(imp_dbh->pmysql, "ROLLBACK", 8)) #else if (mysql_rollback(imp_dbh->pmysql)) #endif do_error(dbh, TX_ERR_ROLLBACK,"ROLLBACK failed" ,NULL); } dbd_db_disconnect(dbh, imp_dbh); } Safefree(imp_dbh->pmysql); /* Tell DBI, that dbh->destroy must no longer be called */ DBIc_off(imp_dbh, DBIcf_IMPSET); } /* *************************************************************************** * * Name: dbd_db_STORE_attrib * * Purpose: Function for storing dbh attributes; we currently support * just nothing. :-) * * Input: dbh - database handle being modified * imp_dbh - drivers private database handle data * keysv - the attribute name * valuesv - the attribute value * * Returns: TRUE for success, FALSE otherwise * **************************************************************************/ int dbd_db_STORE_attrib( SV* dbh, imp_dbh_t* imp_dbh, SV* keysv, SV* valuesv ) { dTHX; STRLEN kl; char *key = SvPV(keysv, kl); SV *cachesv = Nullsv; int cacheit = FALSE; const bool bool_value = SvTRUE(valuesv); if (kl==10 && strEQ(key, "AutoCommit")) { if (imp_dbh->has_transactions) { bool oldval = DBIc_has(imp_dbh,DBIcf_AutoCommit) ? 1 : 0; if (bool_value == oldval) return TRUE; /* if setting AutoCommit on ... */ if (!imp_dbh->no_autocommit_cmd) { if ( #if MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION mysql_autocommit(imp_dbh->pmysql, bool_value) #else mysql_real_query(imp_dbh->pmysql, bool_value ? "SET AUTOCOMMIT=1" : "SET AUTOCOMMIT=0", 16) #endif ) { do_error(dbh, TX_ERR_AUTOCOMMIT, bool_value ? "Turning on AutoCommit failed" : "Turning off AutoCommit failed" ,NULL); return TRUE; /* TRUE means we handled it - important to avoid spurious errors */ } } DBIc_set(imp_dbh, DBIcf_AutoCommit, bool_value); } else { /* * We do support neither transactions nor "AutoCommit". * But we stub it. :-) */ if (!bool_value) { do_error(dbh, JW_ERR_NOT_IMPLEMENTED, "Transactions not supported by database" ,NULL); croak("Transactions not supported by database"); } } } else if (kl == 16 && strEQ(key,"mysql_use_result")) imp_dbh->use_mysql_use_result = bool_value; else if (kl == 20 && strEQ(key,"mysql_auto_reconnect")) imp_dbh->auto_reconnect = bool_value; else if (kl == 20 && strEQ(key, "mysql_server_prepare")) imp_dbh->use_server_side_prepare = bool_value; else if (kl == 23 && strEQ(key,"mysql_no_autocommit_cmd")) imp_dbh->no_autocommit_cmd = bool_value; else if (kl == 24 && strEQ(key,"mysql_bind_type_guessing")) imp_dbh->bind_type_guessing = bool_value; else if (kl == 31 && strEQ(key,"mysql_bind_comment_placeholders")) imp_dbh->bind_type_guessing = bool_value; #if defined(sv_utf8_decode) && MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION else if (kl == 17 && strEQ(key, "mysql_enable_utf8")) imp_dbh->enable_utf8 = bool_value; else if (kl == 20 && strEQ(key, "mysql_enable_utf8mb4")) imp_dbh->enable_utf8mb4 = bool_value; #endif #if FABRIC_SUPPORT else if (kl == 22 && strEQ(key, "mysql_fabric_opt_group")) mysql_options(imp_dbh->pmysql, FABRIC_OPT_GROUP, (void *)SvPVbyte_nolen(valuesv)); else if (kl == 29 && strEQ(key, "mysql_fabric_opt_default_mode")) { if (SvOK(valuesv)) { STRLEN len; const char *str = SvPVbyte(valuesv, len); if ( len == 0 || ( len == 2 && (strnEQ(str, "ro", 3) || strnEQ(str, "rw", 3)) ) ) mysql_options(imp_dbh->pmysql, FABRIC_OPT_DEFAULT_MODE, len == 0 ? NULL : str); else croak("Valid settings for FABRIC_OPT_DEFAULT_MODE are 'ro', 'rw', or undef/empty string"); } else { mysql_options(imp_dbh->pmysql, FABRIC_OPT_DEFAULT_MODE, NULL); } } else if (kl == 21 && strEQ(key, "mysql_fabric_opt_mode")) { STRLEN len; const char *str = SvPVbyte(valuesv, len); if (len != 2 || (strnNE(str, "ro", 3) && strnNE(str, "rw", 3))) croak("Valid settings for FABRIC_OPT_MODE are 'ro' or 'rw'"); mysql_options(imp_dbh->pmysql, FABRIC_OPT_MODE, str); } else if (kl == 34 && strEQ(key, "mysql_fabric_opt_group_credentials")) { croak("'fabric_opt_group_credentials' is not supported"); } #endif else return FALSE; /* Unknown key */ if (cacheit) /* cache value for later DBI 'quick' fetch? */ (void)hv_store((HV*)SvRV(dbh), key, kl, cachesv, 0); return TRUE; } /*************************************************************************** * * Name: dbd_db_FETCH_attrib * * Purpose: Function for fetching dbh attributes * * Input: dbh - database handle being queried * imp_dbh - drivers private database handle data * keysv - the attribute name * * Returns: An SV*, if successful; NULL otherwise * * Notes: Do not forget to call sv_2mortal in the former case! * **************************************************************************/ static SV* my_ulonglong2str(pTHX_ my_ulonglong val) { char buf[64]; char *ptr = buf + sizeof(buf) - 1; if (val == 0) return newSVpvn("0", 1); *ptr = '\0'; while (val > 0) { *(--ptr) = ('0' + (val % 10)); val = val / 10; } return newSVpvn(ptr, (buf+ sizeof(buf) - 1) - ptr); } SV* dbd_db_FETCH_attrib(SV *dbh, imp_dbh_t *imp_dbh, SV *keysv) { dTHX; STRLEN kl; char *key = SvPV(keysv, kl); SV* result = NULL; dbh= dbh; switch (*key) { case 'A': if (strEQ(key, "AutoCommit")) { if (imp_dbh->has_transactions) return sv_2mortal(boolSV(DBIc_has(imp_dbh,DBIcf_AutoCommit))); /* Default */ return &PL_sv_yes; } break; } if (strncmp(key, "mysql_", 6) == 0) { key = key+6; kl = kl-6; } /* MONTY: Check if kl should not be used or used everywhere */ switch(*key) { case 'a': if (kl == strlen("auto_reconnect") && strEQ(key, "auto_reconnect")) result= sv_2mortal(newSViv(imp_dbh->auto_reconnect)); break; case 'b': if (kl == strlen("bind_type_guessing") && strEQ(key, "bind_type_guessing")) { result = sv_2mortal(newSViv(imp_dbh->bind_type_guessing)); } else if (kl == strlen("bind_comment_placeholders") && strEQ(key, "bind_comment_placeholders")) { result = sv_2mortal(newSViv(imp_dbh->bind_comment_placeholders)); } break; case 'c': if (kl == 10 && strEQ(key, "clientinfo")) { const char* clientinfo = mysql_get_client_info(); result= clientinfo ? sv_2mortal(newSVpvn(clientinfo, strlen(clientinfo))) : &PL_sv_undef; } else if (kl == 13 && strEQ(key, "clientversion")) { result= sv_2mortal(my_ulonglong2str(aTHX_ mysql_get_client_version())); } break; case 'e': if (strEQ(key, "errno")) result= sv_2mortal(newSViv((IV)mysql_errno(imp_dbh->pmysql))); else if ( strEQ(key, "error") || strEQ(key, "errmsg")) { /* Note that errmsg is obsolete, as of 2.09! */ const char* msg = mysql_error(imp_dbh->pmysql); result= sv_2mortal(newSVpvn(msg, strlen(msg))); } /* HELMUT */ #if defined(sv_utf8_decode) && MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION else if (kl == strlen("enable_utf8mb4") && strEQ(key, "enable_utf8mb4")) result = sv_2mortal(newSViv(imp_dbh->enable_utf8mb4)); else if (kl == strlen("enable_utf8") && strEQ(key, "enable_utf8")) result = sv_2mortal(newSViv(imp_dbh->enable_utf8)); #endif break; case 'd': if (strEQ(key, "dbd_stats")) { HV* hv = newHV(); (void)hv_store( hv, "auto_reconnects_ok", strlen("auto_reconnects_ok"), newSViv(imp_dbh->stats.auto_reconnects_ok), 0 ); (void)hv_store( hv, "auto_reconnects_failed", strlen("auto_reconnects_failed"), newSViv(imp_dbh->stats.auto_reconnects_failed), 0 ); result= sv_2mortal((newRV_noinc((SV*)hv))); } case 'h': if (strEQ(key, "hostinfo")) { const char* hostinfo = mysql_get_host_info(imp_dbh->pmysql); result= hostinfo ? sv_2mortal(newSVpvn(hostinfo, strlen(hostinfo))) : &PL_sv_undef; } break; case 'i': if (strEQ(key, "info")) { const char* info = mysql_info(imp_dbh->pmysql); result= info ? sv_2mortal(newSVpvn(info, strlen(info))) : &PL_sv_undef; } else if (kl == 8 && strEQ(key, "insertid")) /* We cannot return an IV, because the insertid is a long. */ result= sv_2mortal(my_ulonglong2str(aTHX_ mysql_insert_id(imp_dbh->pmysql))); break; case 'n': if (kl == strlen("no_autocommit_cmd") && strEQ(key, "no_autocommit_cmd")) result = sv_2mortal(newSViv(imp_dbh->no_autocommit_cmd)); break; case 'p': if (kl == 9 && strEQ(key, "protoinfo")) result= sv_2mortal(newSViv(mysql_get_proto_info(imp_dbh->pmysql))); break; case 's': if (kl == 10 && strEQ(key, "serverinfo")) { const char* serverinfo = mysql_get_server_info(imp_dbh->pmysql); result= serverinfo ? sv_2mortal(newSVpvn(serverinfo, strlen(serverinfo))) : &PL_sv_undef; } else if (kl == 13 && strEQ(key, "serverversion")) result= sv_2mortal(my_ulonglong2str(aTHX_ mysql_get_server_version(imp_dbh->pmysql))); else if (strEQ(key, "sock")) result= sv_2mortal(newSViv(PTR2IV(imp_dbh->pmysql))); else if (strEQ(key, "sockfd")) result= sv_2mortal(newSViv((IV) imp_dbh->pmysql->net.fd)); else if (strEQ(key, "stat")) { const char* stats = mysql_stat(imp_dbh->pmysql); result= stats ? sv_2mortal(newSVpvn(stats, strlen(stats))) : &PL_sv_undef; } else if (strEQ(key, "stats")) { /* Obsolete, as of 2.09 */ const char* stats = mysql_stat(imp_dbh->pmysql); result= stats ? sv_2mortal(newSVpvn(stats, strlen(stats))) : &PL_sv_undef; } else if (kl == 14 && strEQ(key,"server_prepare")) result= sv_2mortal(newSViv((IV) imp_dbh->use_server_side_prepare)); break; case 't': if (kl == 9 && strEQ(key, "thread_id")) result= sv_2mortal(newSViv(mysql_thread_id(imp_dbh->pmysql))); break; case 'w': if (kl == 13 && strEQ(key, "warning_count")) result= sv_2mortal(newSViv(mysql_warning_count(imp_dbh->pmysql))); break; case 'u': if (strEQ(key, "use_result")) { result= sv_2mortal(newSViv((IV) imp_dbh->use_mysql_use_result)); } break; } if (result== NULL) return Nullsv; return result; } /* ************************************************************************** * * Name: dbd_st_prepare * * Purpose: Called for preparing an SQL statement; our part of the * statement handle constructor * * Input: sth - statement handle being initialized * imp_sth - drivers private statement handle data * statement - pointer to string with SQL statement * attribs - statement attributes, currently not in use * * Returns: TRUE for success, FALSE otherwise; do_error will * be called in the latter case * **************************************************************************/ int dbd_st_prepare( SV *sth, imp_sth_t *imp_sth, char *statement, SV *attribs) { int i; SV **svp; dTHX; #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION #if MYSQL_VERSION_ID < CALL_PLACEHOLDER_VERSION char *str_ptr, *str_last_ptr; #if MYSQL_VERSION_ID < LIMIT_PLACEHOLDER_VERSION int limit_flag=0; #endif #endif int col_type, prepare_retval; MYSQL_BIND *bind, *bind_end; imp_sth_phb_t *fbind; #endif D_imp_xxh(sth); D_imp_dbh_from_sth; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t-> dbd_st_prepare MYSQL_VERSION_ID %d, SQL statement: %s\n", MYSQL_VERSION_ID, statement); #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION /* Set default value of 'mysql_server_prepare' attribute for sth from dbh */ imp_sth->use_server_side_prepare= imp_dbh->use_server_side_prepare; if (attribs) { svp= DBD_ATTRIB_GET_SVP(attribs, "mysql_server_prepare", 20); imp_sth->use_server_side_prepare = (svp) ? SvTRUE(*svp) : imp_dbh->use_server_side_prepare; svp = DBD_ATTRIB_GET_SVP(attribs, "async", 5); if(svp && SvTRUE(*svp)) { #if MYSQL_ASYNC imp_sth->is_async = TRUE; imp_sth->use_server_side_prepare = FALSE; #else do_error(sth, 2000, "Async support was not built into this version of DBD::mysql", "HY000"); return 0; #endif } } imp_sth->fetch_done= 0; #endif imp_sth->done_desc= 0; imp_sth->result= NULL; imp_sth->currow= 0; /* Set default value of 'mysql_use_result' attribute for sth from dbh */ svp= DBD_ATTRIB_GET_SVP(attribs, "mysql_use_result", 16); imp_sth->use_mysql_use_result= svp ? SvTRUE(*svp) : imp_dbh->use_mysql_use_result; for (i= 0; i < AV_ATTRIB_LAST; i++) imp_sth->av_attr[i]= Nullav; /* Clean-up previous result set(s) for sth to prevent 'Commands out of sync' error */ mysql_st_free_result_sets(sth, imp_sth); #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION && MYSQL_VERSION_ID < CALL_PLACEHOLDER_VERSION if (imp_sth->use_server_side_prepare) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tuse_server_side_prepare set, check restrictions\n"); /* This code is here because placeholder support is not implemented for statements with :- 1. LIMIT < 5.0.7 2. CALL < 5.5.3 (Added support for out & inout parameters) In these cases we have to disable server side prepared statements NOTE: These checks could cause a false positive on statements which include columns / table names that match "call " or " limit " */ if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), #if MYSQL_VERSION_ID < LIMIT_PLACEHOLDER_VERSION "\t\tneed to test for LIMIT & CALL\n"); #else "\t\tneed to test for restrictions\n"); #endif str_last_ptr = statement + strlen(statement); for (str_ptr= statement; str_ptr < str_last_ptr; str_ptr++) { #if MYSQL_VERSION_ID < LIMIT_PLACEHOLDER_VERSION /* Place holders not supported in LIMIT's */ if (limit_flag) { if (*str_ptr == '?') { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tLIMIT and ? found, set to use_server_side_prepare=0\n"); /* ... then we do not want to try server side prepare (use emulation) */ imp_sth->use_server_side_prepare= 0; break; } } else if (str_ptr < str_last_ptr - 6 && isspace(*(str_ptr + 0)) && tolower(*(str_ptr + 1)) == 'l' && tolower(*(str_ptr + 2)) == 'i' && tolower(*(str_ptr + 3)) == 'm' && tolower(*(str_ptr + 4)) == 'i' && tolower(*(str_ptr + 5)) == 't' && isspace(*(str_ptr + 6))) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "LIMIT set limit flag to 1\n"); limit_flag= 1; } #endif /* Place holders not supported in CALL's */ if (str_ptr < str_last_ptr - 4 && tolower(*(str_ptr + 0)) == 'c' && tolower(*(str_ptr + 1)) == 'a' && tolower(*(str_ptr + 2)) == 'l' && tolower(*(str_ptr + 3)) == 'l' && isspace(*(str_ptr + 4))) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "Disable PS mode for CALL()\n"); imp_sth->use_server_side_prepare= 0; break; } } } #endif #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION if (imp_sth->use_server_side_prepare) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tuse_server_side_prepare set\n"); /* do we really need this? If we do, we should return, not just continue */ if (imp_sth->stmt) fprintf(stderr, "ERROR: Trying to prepare new stmt while we have \ already not closed one \n"); imp_sth->stmt= mysql_stmt_init(imp_dbh->pmysql); if (! imp_sth->stmt) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tERROR: Unable to return MYSQL_STMT structure \ from mysql_stmt_init(): ERROR NO: %d ERROR MSG:%s\n", mysql_errno(imp_dbh->pmysql), mysql_error(imp_dbh->pmysql)); } prepare_retval= mysql_stmt_prepare(imp_sth->stmt, statement, strlen(statement)); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tmysql_stmt_prepare returned %d\n", prepare_retval); if (prepare_retval) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tmysql_stmt_prepare %d %s\n", mysql_stmt_errno(imp_sth->stmt), mysql_stmt_error(imp_sth->stmt)); /* For commands that are not supported by server side prepared statement mechanism lets try to pass them through regular API */ if (mysql_stmt_errno(imp_sth->stmt) == ER_UNSUPPORTED_PS) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tSETTING imp_sth->use_server_side_prepare to 0\n"); imp_sth->use_server_side_prepare= 0; } else { do_error(sth, mysql_stmt_errno(imp_sth->stmt), mysql_stmt_error(imp_sth->stmt), mysql_sqlstate(imp_dbh->pmysql)); mysql_stmt_close(imp_sth->stmt); imp_sth->stmt= NULL; return FALSE; } } else { DBIc_NUM_PARAMS(imp_sth)= mysql_stmt_param_count(imp_sth->stmt); /* mysql_stmt_param_count */ if (DBIc_NUM_PARAMS(imp_sth) > 0) { int has_statement_fields= imp_sth->stmt->fields != 0; /* Allocate memory for bind variables */ imp_sth->bind= alloc_bind(DBIc_NUM_PARAMS(imp_sth)); imp_sth->fbind= alloc_fbind(DBIc_NUM_PARAMS(imp_sth)); imp_sth->has_been_bound= 0; /* Initialize ph variables with NULL values */ for (i= 0, bind= imp_sth->bind, fbind= imp_sth->fbind, bind_end= bind+DBIc_NUM_PARAMS(imp_sth); bind < bind_end ; bind++, fbind++, i++ ) { /* if this statement has a result set, field types will be correctly identified. If there is no result set, such as with an INSERT, fields will not be defined, and all buffer_type will default to MYSQL_TYPE_VAR_STRING */ col_type= (has_statement_fields ? imp_sth->stmt->fields[i].type : MYSQL_TYPE_STRING); bind->buffer_type= mysql_to_perl_type(col_type); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tmysql_to_perl_type returned %d\n", col_type); bind->buffer= NULL; bind->length= &(fbind->length); bind->is_null= (char*) &(fbind->is_null); fbind->is_null= 1; fbind->length= 0; } } } } #endif #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION /* Count the number of parameters (driver, vs server-side) */ if (imp_sth->use_server_side_prepare == 0) DBIc_NUM_PARAMS(imp_sth) = count_params((imp_xxh_t *)imp_dbh, aTHX_ statement, imp_dbh->bind_comment_placeholders); #else DBIc_NUM_PARAMS(imp_sth) = count_params((imp_xxh_t *)imp_dbh, aTHX_ statement, imp_dbh->bind_comment_placeholders); #endif /* Allocate memory for parameters */ imp_sth->params= alloc_param(DBIc_NUM_PARAMS(imp_sth)); DBIc_IMPSET_on(imp_sth); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_st_prepare\n"); return 1; } /*************************************************************************** * Name: dbd_st_free_result_sets * * Purpose: Clean-up single or multiple result sets (if any) * * Inputs: sth - Statement handle * imp_sth - driver's private statement handle * * Returns: 1 ok * 0 error *************************************************************************/ int mysql_st_free_result_sets (SV * sth, imp_sth_t * imp_sth) { dTHX; D_imp_dbh_from_sth; D_imp_xxh(sth); int next_result_rc= -1; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t>- dbd_st_free_result_sets\n"); #if MYSQL_VERSION_ID >= MULTIPLE_RESULT_SET_VERSION do { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_st_free_result_sets RC %d\n", next_result_rc); if (next_result_rc == 0) { if (!(imp_sth->result = mysql_use_result(imp_dbh->pmysql))) { /* Check for possible error */ if (mysql_field_count(imp_dbh->pmysql)) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_st_free_result_sets ERROR: %s\n", mysql_error(imp_dbh->pmysql)); do_error(sth, mysql_errno(imp_dbh->pmysql), mysql_error(imp_dbh->pmysql), mysql_sqlstate(imp_dbh->pmysql)); return 0; } } } if (imp_sth->result) { mysql_free_result(imp_sth->result); imp_sth->result=NULL; } } while ((next_result_rc=mysql_next_result(imp_dbh->pmysql))==0); if (next_result_rc > 0) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_st_free_result_sets: Error while processing multi-result set: %s\n", mysql_error(imp_dbh->pmysql)); do_error(sth, mysql_errno(imp_dbh->pmysql), mysql_error(imp_dbh->pmysql), mysql_sqlstate(imp_dbh->pmysql)); } #else if (imp_sth->result) { mysql_free_result(imp_sth->result); imp_sth->result=NULL; } #endif if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_st_free_result_sets\n"); return 1; } #if MYSQL_VERSION_ID >= MULTIPLE_RESULT_SET_VERSION /*************************************************************************** * Name: dbd_st_more_results * * Purpose: Move onto the next result set (if any) * * Inputs: sth - Statement handle * imp_sth - driver's private statement handle * * Returns: 1 if there are more results sets * 0 if there are not * -1 for errors. *************************************************************************/ int dbd_st_more_results(SV* sth, imp_sth_t* imp_sth) { dTHX; D_imp_dbh_from_sth; D_imp_xxh(sth); int use_mysql_use_result=imp_sth->use_mysql_use_result; int next_result_return_code, i; MYSQL* svsock= imp_dbh->pmysql; if (!SvROK(sth) || SvTYPE(SvRV(sth)) != SVt_PVHV) croak("Expected hash array"); if (!mysql_more_results(svsock)) { /* No more pending result set(s)*/ if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\n <- dbs_st_more_results no more results\n"); return 0; } if (imp_sth->use_server_side_prepare) { do_warn(sth, JW_ERR_NOT_IMPLEMENTED, "Processing of multiple result set is not possible with server side prepare"); return 0; } /* * Free cached array attributes */ for (i= 0; i < AV_ATTRIB_LAST; i++) { if (imp_sth->av_attr[i]) SvREFCNT_dec(imp_sth->av_attr[i]); imp_sth->av_attr[i]= Nullav; } /* Release previous MySQL result*/ if (imp_sth->result) mysql_free_result(imp_sth->result); if (DBIc_ACTIVE(imp_sth)) DBIc_ACTIVE_off(imp_sth); next_result_return_code= mysql_next_result(svsock); imp_sth->warning_count = mysql_warning_count(imp_dbh->pmysql); /* mysql_next_result returns 0 if there are more results -1 if there are no more results >0 if there was an error */ if (next_result_return_code > 0) { do_error(sth, mysql_errno(svsock), mysql_error(svsock), mysql_sqlstate(svsock)); return 0; } else if(next_result_return_code == -1) { return 0; } else { /* Store the result from the Query */ imp_sth->result = use_mysql_use_result ? mysql_use_result(svsock) : mysql_store_result(svsock); if (mysql_errno(svsock)) { do_error(sth, mysql_errno(svsock), mysql_error(svsock), mysql_sqlstate(svsock)); return 0; } imp_sth->row_num= mysql_affected_rows(imp_dbh->pmysql); if (imp_sth->result == NULL) { /* No "real" rowset*/ DBIc_NUM_FIELDS(imp_sth)= 0; /* for DBI <= 1.53 */ DBIS->set_attr_k(sth, sv_2mortal(newSVpvn("NUM_OF_FIELDS",13)), 0, sv_2mortal(newSViv(0))); return 1; } else { /* We have a new rowset */ imp_sth->currow=0; /* delete cached handle attributes */ /* XXX should be driven by a list to ease maintenance */ (void)hv_delete((HV*)SvRV(sth), "NAME", 4, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "NULLABLE", 8, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "NUM_OF_FIELDS", 13, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "PRECISION", 9, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "SCALE", 5, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "TYPE", 4, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "mysql_insertid", 14, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "mysql_is_auto_increment", 23, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "mysql_is_blob", 13, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "mysql_is_key", 12, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "mysql_is_num", 12, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "mysql_is_pri_key", 16, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "mysql_length", 12, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "mysql_max_length", 16, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "mysql_table", 11, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "mysql_type", 10, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "mysql_type_name", 15, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "mysql_warning_count", 20, G_DISCARD); /* Adjust NUM_OF_FIELDS - which also adjusts the row buffer size */ DBIc_NUM_FIELDS(imp_sth)= 0; /* for DBI <= 1.53 */ DBIc_DBISTATE(imp_sth)->set_attr_k(sth, sv_2mortal(newSVpvn("NUM_OF_FIELDS",13)), 0, sv_2mortal(newSViv(mysql_num_fields(imp_sth->result))) ); DBIc_ACTIVE_on(imp_sth); imp_sth->done_desc = 0; } imp_dbh->pmysql->net.last_errno= 0; return 1; } } #endif /************************************************************************** * * Name: mysql_st_internal_execute * * Purpose: Internal version for executing a statement, called both from * within the "do" and the "execute" method. * * Inputs: h - object handle, for storing error messages * statement - query being executed * attribs - statement attributes, currently ignored * num_params - number of parameters being bound * params - parameter array * result - where to store results, if any * svsock - socket connected to the database * **************************************************************************/ my_ulonglong mysql_st_internal_execute( SV *h, /* could be sth or dbh */ SV *statement, SV *attribs, int num_params, imp_sth_ph_t *params, MYSQL_RES **result, MYSQL *svsock, int use_mysql_use_result ) { dTHX; bool bind_type_guessing= FALSE; bool bind_comment_placeholders= TRUE; STRLEN slen; char *sbuf = SvPV(statement, slen); char *table; char *salloc; int htype; #if MYSQL_ASYNC bool async = FALSE; #endif my_ulonglong rows= 0; /* thank you DBI.c for this info! */ D_imp_xxh(h); attribs= attribs; htype= DBIc_TYPE(imp_xxh); /* It is important to import imp_dbh properly according to the htype that it is! Also, one might ask why bind_type_guessing is assigned in each block. Well, it's because D_imp_ macros called in these blocks make it so imp_dbh is not "visible" or defined outside of the if/else (when compiled, it fails for imp_dbh not being defined). */ /* h is a dbh */ if (htype == DBIt_DB) { D_imp_dbh(h); /* if imp_dbh is not available, it causes segfault (proper) on OpenBSD */ if (imp_dbh && imp_dbh->bind_type_guessing) { bind_type_guessing= imp_dbh->bind_type_guessing; bind_comment_placeholders= bind_comment_placeholders; } #if MYSQL_ASYNC async = (bool) (imp_dbh->async_query_in_flight != NULL); #endif } /* h is a sth */ else { D_imp_sth(h); D_imp_dbh_from_sth; /* if imp_dbh is not available, it causes segfault (proper) on OpenBSD */ if (imp_dbh) { bind_type_guessing= imp_dbh->bind_type_guessing; bind_comment_placeholders= imp_dbh->bind_comment_placeholders; } #if MYSQL_ASYNC async = imp_sth->is_async; if(async) { imp_dbh->async_query_in_flight = imp_sth; } else { imp_dbh->async_query_in_flight = NULL; } #endif } if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "mysql_st_internal_execute MYSQL_VERSION_ID %d\n", MYSQL_VERSION_ID ); salloc= parse_params(imp_xxh, aTHX_ svsock, sbuf, &slen, params, num_params, bind_type_guessing, bind_comment_placeholders); if (salloc) { sbuf= salloc; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "Binding parameters: %s\n", sbuf); } if (slen >= 11 && (!strncmp(sbuf, "listfields ", 11) || !strncmp(sbuf, "LISTFIELDS ", 11))) { /* remove pre-space */ slen-= 10; sbuf+= 10; while (slen && isspace(*sbuf)) { --slen; ++sbuf; } if (!slen) { do_error(h, JW_ERR_QUERY, "Missing table name" ,NULL); return -2; } if (!(table= malloc(slen+1))) { do_error(h, JW_ERR_MEM, "Out of memory" ,NULL); return -2; } strncpy(table, sbuf, slen); sbuf= table; while (slen && !isspace(*sbuf)) { --slen; ++sbuf; } *sbuf++= '\0'; *result= mysql_list_fields(svsock, table, NULL); free(table); if (!(*result)) { do_error(h, mysql_errno(svsock), mysql_error(svsock) ,mysql_sqlstate(svsock)); return -2; } return 0; } #if MYSQL_ASYNC if(async) { if((mysql_send_query(svsock, sbuf, slen)) && (!mysql_db_reconnect(h) || (mysql_send_query(svsock, sbuf, slen)))) { rows = -2; } else { rows = 0; } } else { #endif if ((mysql_real_query(svsock, sbuf, slen)) && (!mysql_db_reconnect(h) || (mysql_real_query(svsock, sbuf, slen)))) { rows = -2; } else { /** Store the result from the Query */ *result= use_mysql_use_result ? mysql_use_result(svsock) : mysql_store_result(svsock); if (mysql_errno(svsock)) rows = -2; else if (*result) rows = mysql_num_rows(*result); else { rows = mysql_affected_rows(svsock); /* mysql_affected_rows(): -1 indicates that the query returned an error */ if (rows == (my_ulonglong)-1) rows = -2; } } #if MYSQL_ASYNC } #endif if (salloc) Safefree(salloc); if(rows == (my_ulonglong)-2) { do_error(h, mysql_errno(svsock), mysql_error(svsock), mysql_sqlstate(svsock)); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "IGNORING ERROR errno %d\n", mysql_errno(svsock)); } return(rows); } /************************************************************************** * * Name: mysql_st_internal_execute41 * * Purpose: Internal version for executing a prepared statement, called both * from within the "do" and the "execute" method. * MYSQL 4.1 API * * * Inputs: h - object handle, for storing error messages * statement - query being executed * attribs - statement attributes, currently ignored * num_params - number of parameters being bound * params - parameter array * result - where to store results, if any * svsock - socket connected to the database * **************************************************************************/ #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION my_ulonglong mysql_st_internal_execute41( SV *sth, int num_params, MYSQL_RES **result, MYSQL_STMT *stmt, MYSQL_BIND *bind, int *has_been_bound ) { int i; enum enum_field_types enum_type; dTHX; int execute_retval; my_ulonglong rows=0; D_imp_xxh(sth); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t-> mysql_st_internal_execute41\n"); /* free result if exists */ if (*result) { mysql_free_result(*result); *result= 0; } /* If were performed any changes with ph variables we have to rebind them */ if (num_params > 0 && !(*has_been_bound)) { if (mysql_stmt_bind_param(stmt,bind)) goto error; *has_been_bound= 1; } if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tmysql_st_internal_execute41 calling mysql_execute with %d num_params\n", num_params); execute_retval= mysql_stmt_execute(stmt); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tmysql_stmt_execute returned %d\n", execute_retval); if (execute_retval) goto error; /* This statement does not return a result set (INSERT, UPDATE...) */ if (!(*result= mysql_stmt_result_metadata(stmt))) { if (mysql_stmt_errno(stmt)) goto error; rows= mysql_stmt_affected_rows(stmt); /* mysql_stmt_affected_rows(): -1 indicates that the query returned an error */ if (rows == (my_ulonglong)-1) goto error; } /* This statement returns a result set (SELECT...) */ else { for (i = mysql_stmt_field_count(stmt) - 1; i >=0; --i) { enum_type = mysql_to_perl_type(stmt->fields[i].type); if (enum_type != MYSQL_TYPE_DOUBLE && enum_type != MYSQL_TYPE_LONG && enum_type != MYSQL_TYPE_BIT) { /* mysql_stmt_store_result to update MYSQL_FIELD->max_length */ my_bool on = 1; mysql_stmt_attr_set(stmt, STMT_ATTR_UPDATE_MAX_LENGTH, &on); break; } } /* Get the total rows affected and return */ if (mysql_stmt_store_result(stmt)) goto error; else rows= mysql_stmt_num_rows(stmt); } if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- mysql_internal_execute_41 returning %llu rows\n", rows); return(rows); error: if (*result) { mysql_free_result(*result); *result= 0; } if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), " errno %d err message %s\n", mysql_stmt_errno(stmt), mysql_stmt_error(stmt)); do_error(sth, mysql_stmt_errno(stmt), mysql_stmt_error(stmt), mysql_stmt_sqlstate(stmt)); mysql_stmt_reset(stmt); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- mysql_st_internal_execute41\n"); return -2; } #endif /*************************************************************************** * * Name: dbd_st_execute * * Purpose: Called for preparing an SQL statement; our part of the * statement handle constructor * * Input: sth - statement handle being initialized * imp_sth - drivers private statement handle data * * Returns: TRUE for success, FALSE otherwise; do_error will * be called in the latter case * **************************************************************************/ int dbd_st_execute(SV* sth, imp_sth_t* imp_sth) { dTHX; char actual_row_num[64]; int i; SV **statement; D_imp_dbh_from_sth; D_imp_xxh(sth); #if defined (dTHR) dTHR; #endif ASYNC_CHECK_RETURN(sth, -2); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), " -> dbd_st_execute for %p\n", sth); if (!SvROK(sth) || SvTYPE(SvRV(sth)) != SVt_PVHV) croak("Expected hash array"); /* Free cached array attributes */ for (i= 0; i < AV_ATTRIB_LAST; i++) { if (imp_sth->av_attr[i]) SvREFCNT_dec(imp_sth->av_attr[i]); imp_sth->av_attr[i]= Nullav; } statement= hv_fetch((HV*) SvRV(sth), "Statement", 9, FALSE); /* Clean-up previous result set(s) for sth to prevent 'Commands out of sync' error */ mysql_st_free_result_sets (sth, imp_sth); #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION if (imp_sth->use_server_side_prepare && ! imp_sth->use_mysql_use_result) { imp_sth->row_num= mysql_st_internal_execute41( sth, DBIc_NUM_PARAMS(imp_sth), &imp_sth->result, imp_sth->stmt, imp_sth->bind, &imp_sth->has_been_bound ); } else { #endif imp_sth->row_num= mysql_st_internal_execute( sth, *statement, NULL, DBIc_NUM_PARAMS(imp_sth), imp_sth->params, &imp_sth->result, imp_dbh->pmysql, imp_sth->use_mysql_use_result ); #if MYSQL_ASYNC if(imp_dbh->async_query_in_flight) { DBIc_ACTIVE_on(imp_sth); return 0; } #endif } if (imp_sth->row_num+1 != (my_ulonglong)-1) { if (!imp_sth->result) { imp_sth->insertid= mysql_insert_id(imp_dbh->pmysql); #if MYSQL_VERSION_ID >= MULTIPLE_RESULT_SET_VERSION if (mysql_more_results(imp_dbh->pmysql)) DBIc_ACTIVE_on(imp_sth); #endif } else { /** Store the result in the current statement handle */ DBIc_NUM_FIELDS(imp_sth)= mysql_num_fields(imp_sth->result); DBIc_ACTIVE_on(imp_sth); if (!imp_sth->use_server_side_prepare) imp_sth->done_desc= 0; imp_sth->fetch_done= 0; } } imp_sth->warning_count = mysql_warning_count(imp_dbh->pmysql); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) { /* PerlIO_printf doesn't always handle imp_sth->row_num %llu consistently!! */ sprintf(actual_row_num, "%llu", imp_sth->row_num); PerlIO_printf(DBIc_LOGPIO(imp_xxh), " <- dbd_st_execute returning imp_sth->row_num %s\n", actual_row_num); } return (int)imp_sth->row_num; } /************************************************************************** * * Name: dbd_describe * * Purpose: Called from within the fetch method to describe the result * * Input: sth - statement handle being initialized * imp_sth - our part of the statement handle, there's no * need for supplying both; Tim just doesn't remove it * * Returns: TRUE for success, FALSE otherwise; do_error will * be called in the latter case * **************************************************************************/ int dbd_describe(SV* sth, imp_sth_t* imp_sth) { dTHX; D_imp_xxh(sth); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t--> dbd_describe\n"); #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION if (imp_sth->use_server_side_prepare) { int i; int col_type; int num_fields= DBIc_NUM_FIELDS(imp_sth); imp_sth_fbh_t *fbh; MYSQL_BIND *buffer; MYSQL_FIELD *fields; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tdbd_describe() num_fields %d\n", num_fields); if (imp_sth->done_desc) return TRUE; if (!num_fields || !imp_sth->result) { /* no metadata */ do_error(sth, JW_ERR_SEQUENCE, "no metadata information while trying describe result set", NULL); return 0; } /* allocate fields buffers */ if ( !(imp_sth->fbh= alloc_fbuffer(num_fields)) || !(imp_sth->buffer= alloc_bind(num_fields)) ) { /* Out of memory */ do_error(sth, JW_ERR_SEQUENCE, "Out of memory in dbd_sescribe()",NULL); return 0; } fields= mysql_fetch_fields(imp_sth->result); for ( fbh= imp_sth->fbh, buffer= (MYSQL_BIND*)imp_sth->buffer, i= 0; i < num_fields; i++, fbh++, buffer++ ) { /* get the column type */ col_type = fields ? fields[i].type : MYSQL_TYPE_STRING; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) { PerlIO_printf(DBIc_LOGPIO(imp_xxh),"\t\ti %d col_type %d fbh->length %lu\n", i, col_type, fbh->length); PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tfields[i].length %lu fields[i].max_length %lu fields[i].type %d fields[i].charsetnr %d\n", fields[i].length, fields[i].max_length, fields[i].type, fields[i].charsetnr); } fbh->charsetnr = fields[i].charsetnr; #if MYSQL_VERSION_ID < FIELD_CHARSETNR_VERSION fbh->flags = fields[i].flags; #endif buffer->buffer_type= mysql_to_perl_type(col_type); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tmysql_to_perl_type returned %d\n", col_type); buffer->length= &(fbh->length); buffer->is_null= (my_bool*) &(fbh->is_null); buffer->error= (my_bool*) &(fbh->error); switch (buffer->buffer_type) { case MYSQL_TYPE_DOUBLE: buffer->buffer_length= sizeof(fbh->ddata); buffer->buffer= (char*) &fbh->ddata; break; case MYSQL_TYPE_LONG: buffer->buffer_length= sizeof(fbh->ldata); buffer->buffer= (char*) &fbh->ldata; buffer->is_unsigned= (fields[i].flags & UNSIGNED_FLAG) ? 1 : 0; break; case MYSQL_TYPE_BIT: buffer->buffer_length= 8; Newz(908, fbh->data, buffer->buffer_length, char); buffer->buffer= (char *) fbh->data; break; default: buffer->buffer_length= fields[i].max_length ? fields[i].max_length : 1; Newz(908, fbh->data, buffer->buffer_length, char); buffer->buffer= (char *) fbh->data; } } if (mysql_stmt_bind_result(imp_sth->stmt, imp_sth->buffer)) { do_error(sth, mysql_stmt_errno(imp_sth->stmt), mysql_stmt_error(imp_sth->stmt), mysql_stmt_sqlstate(imp_sth->stmt)); return 0; } } #endif imp_sth->done_desc= 1; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_describe\n"); return TRUE; } /************************************************************************** * * Name: dbd_st_fetch * * Purpose: Called for fetching a result row * * Input: sth - statement handle being initialized * imp_sth - drivers private statement handle data * * Returns: array of columns; the array is allocated by DBI via * DBIc_DBISTATE(imp_sth)->get_fbav(imp_sth), even the values * of the array are prepared, we just need to modify them * appropriately * **************************************************************************/ AV* dbd_st_fetch(SV *sth, imp_sth_t* imp_sth) { dTHX; int num_fields, ChopBlanks, i, rc; unsigned long *lengths; AV *av; int av_length, av_readonly; MYSQL_ROW cols; D_imp_dbh_from_sth; MYSQL* svsock= imp_dbh->pmysql; imp_sth_fbh_t *fbh; D_imp_xxh(sth); #if MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION MYSQL_BIND *buffer; #endif MYSQL_FIELD *fields; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t-> dbd_st_fetch\n"); #if MYSQL_ASYNC if(imp_dbh->async_query_in_flight) { if(mysql_db_async_result(sth, &imp_sth->result) <= 0) { return Nullav; } } #endif #if MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION if (imp_sth->use_server_side_prepare) { if (!DBIc_ACTIVE(imp_sth) ) { do_error(sth, JW_ERR_SEQUENCE, "no statement executing\n",NULL); return Nullav; } if (imp_sth->fetch_done) { do_error(sth, JW_ERR_SEQUENCE, "fetch() but fetch already done",NULL); return Nullav; } if (!imp_sth->done_desc) { if (!dbd_describe(sth, imp_sth)) { do_error(sth, JW_ERR_SEQUENCE, "Error while describe result set.", NULL); return Nullav; } } } #endif ChopBlanks = DBIc_is(imp_sth, DBIcf_ChopBlanks); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tdbd_st_fetch for %p, chopblanks %d\n", sth, ChopBlanks); if (!imp_sth->result) { do_error(sth, JW_ERR_SEQUENCE, "fetch() without execute()" ,NULL); return Nullav; } /* fix from 2.9008 */ imp_dbh->pmysql->net.last_errno = 0; #if MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION if (imp_sth->use_server_side_prepare) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tdbd_st_fetch calling mysql_fetch\n"); if ((rc= mysql_stmt_fetch(imp_sth->stmt))) { if (rc == 1) do_error(sth, mysql_stmt_errno(imp_sth->stmt), mysql_stmt_error(imp_sth->stmt), mysql_stmt_sqlstate(imp_sth->stmt)); #if MYSQL_VERSION_ID >= MYSQL_VERSION_5_0 if (rc == MYSQL_DATA_TRUNCATED) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tdbd_st_fetch data truncated\n"); goto process; } #endif if (rc == MYSQL_NO_DATA) { /* Update row_num to affected_rows value */ imp_sth->row_num= mysql_stmt_affected_rows(imp_sth->stmt); imp_sth->fetch_done=1; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tdbd_st_fetch no data\n"); } dbd_st_finish(sth, imp_sth); return Nullav; } process: imp_sth->currow++; av= DBIc_DBISTATE(imp_sth)->get_fbav(imp_sth); num_fields=mysql_stmt_field_count(imp_sth->stmt); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tdbd_st_fetch called mysql_fetch, rc %d num_fields %d\n", rc, num_fields); for ( buffer= imp_sth->buffer, fbh= imp_sth->fbh, i= 0; i < num_fields; i++, fbh++, buffer++ ) { SV *sv= AvARRAY(av)[i]; /* Note: we (re)use the SV in the AV */ STRLEN len; /* This is wrong, null is not being set correctly * This is not the way to determine length (this would break blobs!) */ if (fbh->is_null) (void) SvOK_off(sv); /* Field is NULL, return undef */ else { /* In case of BLOB/TEXT fields we allocate only 8192 bytes in dbd_describe() for data. Here we know real size of field so we should increase buffer size and refetch column value */ if (fbh->length > buffer->buffer_length || fbh->error) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tRefetch BLOB/TEXT column: %d, length: %lu, error: %d\n", i, fbh->length, fbh->error); Renew(fbh->data, fbh->length, char); buffer->buffer_length= fbh->length; buffer->buffer= (char *) fbh->data; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) { int j; int m = MIN(*buffer->length, buffer->buffer_length); char *ptr = (char*)buffer->buffer; PerlIO_printf(DBIc_LOGPIO(imp_xxh),"\t\tbefore buffer->buffer: "); for (j = 0; j < m; j++) { PerlIO_printf(DBIc_LOGPIO(imp_xxh), "%c", *ptr++); } PerlIO_printf(DBIc_LOGPIO(imp_xxh),"\n"); } /*TODO: Use offset instead of 0 to fetch only remain part of data*/ if (mysql_stmt_fetch_column(imp_sth->stmt, buffer , i, 0)) do_error(sth, mysql_stmt_errno(imp_sth->stmt), mysql_stmt_error(imp_sth->stmt), mysql_stmt_sqlstate(imp_sth->stmt)); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) { int j; int m = MIN(*buffer->length, buffer->buffer_length); char *ptr = (char*)buffer->buffer; PerlIO_printf(DBIc_LOGPIO(imp_xxh),"\t\tafter buffer->buffer: "); for (j = 0; j < m; j++) { PerlIO_printf(DBIc_LOGPIO(imp_xxh), "%c", *ptr++); } PerlIO_printf(DBIc_LOGPIO(imp_xxh),"\n"); } } /* This does look a lot like Georg's PHP driver doesn't it? --Brian */ /* Credit due to Georg - mysqli_api.c ;) --PMG */ switch (buffer->buffer_type) { case MYSQL_TYPE_DOUBLE: if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tst_fetch double data %f\n", fbh->ddata); sv_setnv(sv, fbh->ddata); break; case MYSQL_TYPE_LONG: if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tst_fetch int data %"IVdf", unsigned? %d\n", fbh->ldata, buffer->is_unsigned); if (buffer->is_unsigned) sv_setuv(sv, fbh->ldata); else sv_setiv(sv, fbh->ldata); break; case MYSQL_TYPE_BIT: sv_setpvn(sv, fbh->data, fbh->length); break; default: if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tERROR IN st_fetch_string"); len= fbh->length; /* ChopBlanks server-side prepared statement */ if (ChopBlanks) { /* see bottom of: http://www.mysql.org/doc/refman/5.0/en/c-api-datatypes.html */ if (fbh->charsetnr != 63) while (len && fbh->data[len-1] == ' ') { --len; } } /* END OF ChopBlanks */ sv_setpvn(sv, fbh->data, len); /* UTF8 */ /*HELMUT*/ #if defined(sv_utf8_decode) && MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION #if MYSQL_VERSION_ID >= FIELD_CHARSETNR_VERSION /* SHOW COLLATION WHERE Id = 63; -- 63 == charset binary, collation binary */ if ((imp_dbh->enable_utf8 || imp_dbh->enable_utf8mb4) && fbh->charsetnr != 63) #else if ((imp_dbh->enable_utf8 || imp_dbh->enable_utf8mb4) && !(fbh->flags & BINARY_FLAG)) #endif sv_utf8_decode(sv); #endif /* END OF UTF8 */ break; } } } if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_st_fetch, %d cols\n", num_fields); return av; } else { #endif imp_sth->currow++; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) { PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\tdbd_st_fetch result set details\n"); PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\timp_sth->result=%p\n", imp_sth->result); PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\tmysql_num_fields=%u\n", mysql_num_fields(imp_sth->result)); PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\tmysql_num_rows=%llu\n", mysql_num_rows(imp_sth->result)); PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\tmysql_affected_rows=%llu\n", mysql_affected_rows(imp_dbh->pmysql)); PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\tdbd_st_fetch for %p, currow= %d\n", sth,imp_sth->currow); } if (!(cols= mysql_fetch_row(imp_sth->result))) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) { PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\tdbd_st_fetch, no more rows to fetch"); } if (mysql_errno(imp_dbh->pmysql)) do_error(sth, mysql_errno(imp_dbh->pmysql), mysql_error(imp_dbh->pmysql), mysql_sqlstate(imp_dbh->pmysql)); #if MYSQL_VERSION_ID >= MULTIPLE_RESULT_SET_VERSION if (!mysql_more_results(svsock)) #endif dbd_st_finish(sth, imp_sth); return Nullav; } num_fields= mysql_num_fields(imp_sth->result); fields= mysql_fetch_fields(imp_sth->result); lengths= mysql_fetch_lengths(imp_sth->result); if ((av= DBIc_FIELDS_AV(imp_sth)) != Nullav) { av_length= av_len(av)+1; if (av_length != num_fields) /* Resize array if necessary */ { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_st_fetch, size of results array(%d) != num_fields(%d)\n", av_length, num_fields); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_st_fetch, result fields(%d)\n", DBIc_NUM_FIELDS(imp_sth)); av_readonly = SvREADONLY(av); if (av_readonly) SvREADONLY_off( av ); /* DBI sets this readonly */ while (av_length < num_fields) { av_store(av, av_length++, newSV(0)); } while (av_length > num_fields) { SvREFCNT_dec(av_pop(av)); av_length--; } if (av_readonly) SvREADONLY_on(av); } } av= DBIc_DBISTATE(imp_sth)->get_fbav(imp_sth); for (i= 0; i < num_fields; ++i) { char *col= cols[i]; SV *sv= AvARRAY(av)[i]; /* Note: we (re)use the SV in the AV */ if (col) { STRLEN len= lengths[i]; if (ChopBlanks) { while (len && col[len-1] == ' ') { --len; } } /* Set string value returned from mysql server */ sv_setpvn(sv, col, len); switch (mysql_to_perl_type(fields[i].type)) { case MYSQL_TYPE_DOUBLE: /* Coerce to dobule and set scalar as NV */ (void) SvNV(sv); SvNOK_only(sv); break; case MYSQL_TYPE_LONG: /* Coerce to integer and set scalar as UV resp. IV */ if (fields[i].flags & UNSIGNED_FLAG) { (void) SvUV(sv); SvIOK_only_UV(sv); } else { (void) SvIV(sv); SvIOK_only(sv); } break; #if MYSQL_VERSION_ID > NEW_DATATYPE_VERSION case MYSQL_TYPE_BIT: /* Let it as binary string */ break; #endif default: /* UTF8 */ /*HELMUT*/ #if defined(sv_utf8_decode) && MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION /* see bottom of: http://www.mysql.org/doc/refman/5.0/en/c-api-datatypes.html */ if ((imp_dbh->enable_utf8 || imp_dbh->enable_utf8mb4) && fields[i].charsetnr != 63) sv_utf8_decode(sv); #endif /* END OF UTF8 */ break; } } else (void) SvOK_off(sv); /* Field is NULL, return undef */ } if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_st_fetch, %d cols\n", num_fields); return av; #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION } #endif } #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION /* We have to fetch all data from stmt There is may be useful for 2 cases: 1. st_finish when we have undef statement 2. call st_execute again when we have some unfetched data in stmt */ int mysql_st_clean_cursor(SV* sth, imp_sth_t* imp_sth) { if (DBIc_ACTIVE(imp_sth) && dbd_describe(sth, imp_sth) && !imp_sth->fetch_done) mysql_stmt_free_result(imp_sth->stmt); return 1; } #endif /*************************************************************************** * * Name: dbd_st_finish * * Purpose: Called for freeing a mysql result * * Input: sth - statement handle being finished * imp_sth - drivers private statement handle data * * Returns: TRUE for success, FALSE otherwise; do_error() will * be called in the latter case * **************************************************************************/ int dbd_st_finish(SV* sth, imp_sth_t* imp_sth) { dTHX; D_imp_xxh(sth); #if defined (dTHR) dTHR; #endif #if MYSQL_ASYNC D_imp_dbh_from_sth; if(imp_dbh->async_query_in_flight) { mysql_db_async_result(sth, &imp_sth->result); } #endif #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) { PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\n--> dbd_st_finish\n"); } if (imp_sth->use_server_side_prepare) { if (imp_sth && imp_sth->stmt) { if (!mysql_st_clean_cursor(sth, imp_sth)) { do_error(sth, JW_ERR_SEQUENCE, "Error happened while tried to clean up stmt",NULL); return 0; } } } #endif /* Cancel further fetches from this cursor. We don't close the cursor till DESTROY. The application may re execute it. */ if (imp_sth && DBIc_ACTIVE(imp_sth)) { /* Clean-up previous result set(s) for sth to prevent 'Commands out of sync' error */ mysql_st_free_result_sets(sth, imp_sth); } DBIc_ACTIVE_off(imp_sth); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) { PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\n<-- dbd_st_finish\n"); } return 1; } /************************************************************************** * * Name: dbd_st_destroy * * Purpose: Our part of the statement handles destructor * * Input: sth - statement handle being destroyed * imp_sth - drivers private statement handle data * * Returns: Nothing * **************************************************************************/ void dbd_st_destroy(SV *sth, imp_sth_t *imp_sth) { dTHX; D_imp_xxh(sth); #if defined (dTHR) dTHR; #endif int i; #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION imp_sth_fbh_t *fbh; int n; n= DBIc_NUM_PARAMS(imp_sth); if (n) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\tFreeing %d parameters, bind %p fbind %p\n", n, imp_sth->bind, imp_sth->fbind); free_bind(imp_sth->bind); free_fbind(imp_sth->fbind); } fbh= imp_sth->fbh; if (fbh) { n = DBIc_NUM_FIELDS(imp_sth); i = 0; while (i < n) { if (fbh[i].data) Safefree(fbh[i].data); ++i; } free_fbuffer(fbh); if (imp_sth->buffer) free_bind(imp_sth->buffer); } if (imp_sth->stmt) { if (mysql_stmt_close(imp_sth->stmt)) { do_error(DBIc_PARENT_H(imp_sth), mysql_stmt_errno(imp_sth->stmt), mysql_stmt_error(imp_sth->stmt), mysql_stmt_sqlstate(imp_sth->stmt)); } } #endif /* dbd_st_finish has already been called by .xs code if needed. */ /* Free values allocated by dbd_bind_ph */ if (imp_sth->params) { free_param(aTHX_ imp_sth->params, DBIc_NUM_PARAMS(imp_sth)); imp_sth->params= NULL; } /* Free cached array attributes */ for (i= 0; i < AV_ATTRIB_LAST; i++) { if (imp_sth->av_attr[i]) SvREFCNT_dec(imp_sth->av_attr[i]); imp_sth->av_attr[i]= Nullav; } /* let DBI know we've done it */ DBIc_IMPSET_off(imp_sth); } /* ************************************************************************** * * Name: dbd_st_STORE_attrib * * Purpose: Modifies a statement handles attributes; we currently * support just nothing * * Input: sth - statement handle being destroyed * imp_sth - drivers private statement handle data * keysv - attribute name * valuesv - attribute value * * Returns: TRUE for success, FALSE otherwise; do_error will * be called in the latter case * **************************************************************************/ int dbd_st_STORE_attrib( SV *sth, imp_sth_t *imp_sth, SV *keysv, SV *valuesv ) { dTHX; STRLEN(kl); char *key= SvPV(keysv, kl); int retval= FALSE; D_imp_xxh(sth); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\t-> dbd_st_STORE_attrib for %p, key %s\n", sth, key); if (strEQ(key, "mysql_use_result")) { imp_sth->use_mysql_use_result= SvTRUE(valuesv); } if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\t<- dbd_st_STORE_attrib for %p, result %d\n", sth, retval); return retval; } /* ************************************************************************** * * Name: dbd_st_FETCH_internal * * Purpose: Retrieves a statement handles array attributes; we use * a separate function, because creating the array * attributes shares much code and it aids in supporting * enhanced features like caching. * * Input: sth - statement handle; may even be a database handle, * in which case this will be used for storing error * messages only. This is only valid, if cacheit (the * last argument) is set to TRUE. * what - internal attribute number * res - pointer to a DBMS result * cacheit - TRUE, if results may be cached in the sth. * * Returns: RV pointing to result array in case of success, NULL * otherwise; do_error has already been called in the latter * case. * **************************************************************************/ #ifndef IS_KEY #define IS_KEY(A) (((A) & (PRI_KEY_FLAG | UNIQUE_KEY_FLAG | MULTIPLE_KEY_FLAG)) != 0) #endif #if !defined(IS_AUTO_INCREMENT) && defined(AUTO_INCREMENT_FLAG) #define IS_AUTO_INCREMENT(A) (((A) & AUTO_INCREMENT_FLAG) != 0) #endif SV* dbd_st_FETCH_internal( SV *sth, int what, MYSQL_RES *res, int cacheit ) { dTHX; D_imp_sth(sth); AV *av= Nullav; MYSQL_FIELD *curField; /* Are we asking for a legal value? */ if (what < 0 || what >= AV_ATTRIB_LAST) do_error(sth, JW_ERR_NOT_IMPLEMENTED, "Not implemented", NULL); /* Return cached value, if possible */ else if (cacheit && imp_sth->av_attr[what]) av= imp_sth->av_attr[what]; /* Does this sth really have a result? */ else if (!res) do_error(sth, JW_ERR_NOT_ACTIVE, "statement contains no result" ,NULL); /* Do the real work. */ else { av= newAV(); mysql_field_seek(res, 0); while ((curField= mysql_fetch_field(res))) { SV *sv; switch(what) { case AV_ATTRIB_NAME: sv= newSVpvn(curField->name, strlen(curField->name)); break; case AV_ATTRIB_TABLE: sv= newSVpvn(curField->table, strlen(curField->table)); break; case AV_ATTRIB_TYPE: sv= newSViv((int) curField->type); break; case AV_ATTRIB_SQL_TYPE: sv= newSViv((int) native2sql(curField->type)->data_type); break; case AV_ATTRIB_IS_PRI_KEY: sv= boolSV(IS_PRI_KEY(curField->flags)); break; case AV_ATTRIB_IS_NOT_NULL: sv= boolSV(IS_NOT_NULL(curField->flags)); break; case AV_ATTRIB_NULLABLE: sv= boolSV(!IS_NOT_NULL(curField->flags)); break; case AV_ATTRIB_LENGTH: sv= newSViv((int) curField->length); break; case AV_ATTRIB_IS_NUM: sv= newSViv((int) native2sql(curField->type)->is_num); break; case AV_ATTRIB_TYPE_NAME: sv= newSVpv((char*) native2sql(curField->type)->type_name, 0); break; case AV_ATTRIB_MAX_LENGTH: sv= newSViv((int) curField->max_length); break; case AV_ATTRIB_IS_AUTO_INCREMENT: #if defined(AUTO_INCREMENT_FLAG) sv= boolSV(IS_AUTO_INCREMENT(curField->flags)); break; #else croak("AUTO_INCREMENT_FLAG is not supported on this machine"); #endif case AV_ATTRIB_IS_KEY: sv= boolSV(IS_KEY(curField->flags)); break; case AV_ATTRIB_IS_BLOB: sv= boolSV(IS_BLOB(curField->flags)); break; case AV_ATTRIB_SCALE: sv= newSViv((int) curField->decimals); break; case AV_ATTRIB_PRECISION: sv= newSViv((int) (curField->length > curField->max_length) ? curField->length : curField->max_length); break; default: sv= &PL_sv_undef; break; } av_push(av, sv); } /* Ensure that this value is kept, decremented in * dbd_st_destroy and dbd_st_execute. */ if (!cacheit) return sv_2mortal(newRV_noinc((SV*)av)); imp_sth->av_attr[what]= av; } if (av == Nullav) return &PL_sv_undef; return sv_2mortal(newRV_inc((SV*)av)); } /* ************************************************************************** * * Name: dbd_st_FETCH_attrib * * Purpose: Retrieves a statement handles attributes * * Input: sth - statement handle being destroyed * imp_sth - drivers private statement handle data * keysv - attribute name * * Returns: NULL for an unknown attribute, "undef" for error, * attribute value otherwise. * **************************************************************************/ #define ST_FETCH_AV(what) \ dbd_st_FETCH_internal(sth, (what), imp_sth->result, TRUE) SV* dbd_st_FETCH_attrib( SV *sth, imp_sth_t *imp_sth, SV *keysv ) { dTHX; STRLEN(kl); char *key= SvPV(keysv, kl); SV *retsv= Nullsv; D_imp_xxh(sth); if (kl < 2) return Nullsv; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), " -> dbd_st_FETCH_attrib for %p, key %s\n", sth, key); switch (*key) { case 'N': if (strEQ(key, "NAME")) retsv= ST_FETCH_AV(AV_ATTRIB_NAME); else if (strEQ(key, "NULLABLE")) retsv= ST_FETCH_AV(AV_ATTRIB_NULLABLE); break; case 'P': if (strEQ(key, "PRECISION")) retsv= ST_FETCH_AV(AV_ATTRIB_PRECISION); if (strEQ(key, "ParamValues")) { HV *pvhv= newHV(); if (DBIc_NUM_PARAMS(imp_sth)) { int n; char key[100]; I32 keylen; for (n= 0; n < DBIc_NUM_PARAMS(imp_sth); n++) { keylen= sprintf(key, "%d", n); (void)hv_store(pvhv, key, keylen, newSVsv(imp_sth->params[n].value), 0); } } retsv= sv_2mortal(newRV_noinc((SV*)pvhv)); } break; case 'S': if (strEQ(key, "SCALE")) retsv= ST_FETCH_AV(AV_ATTRIB_SCALE); break; case 'T': if (strEQ(key, "TYPE")) retsv= ST_FETCH_AV(AV_ATTRIB_SQL_TYPE); break; case 'm': switch (kl) { case 10: if (strEQ(key, "mysql_type")) retsv= ST_FETCH_AV(AV_ATTRIB_TYPE); break; case 11: if (strEQ(key, "mysql_table")) retsv= ST_FETCH_AV(AV_ATTRIB_TABLE); break; case 12: if ( strEQ(key, "mysql_is_key")) retsv= ST_FETCH_AV(AV_ATTRIB_IS_KEY); else if (strEQ(key, "mysql_is_num")) retsv= ST_FETCH_AV(AV_ATTRIB_IS_NUM); else if (strEQ(key, "mysql_length")) retsv= ST_FETCH_AV(AV_ATTRIB_LENGTH); else if (strEQ(key, "mysql_result")) retsv= sv_2mortal(newSViv(PTR2IV(imp_sth->result))); break; case 13: if (strEQ(key, "mysql_is_blob")) retsv= ST_FETCH_AV(AV_ATTRIB_IS_BLOB); break; case 14: if (strEQ(key, "mysql_insertid")) { /* We cannot return an IV, because the insertid is a long. */ if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "INSERT ID %llu\n", imp_sth->insertid); return sv_2mortal(my_ulonglong2str(aTHX_ imp_sth->insertid)); } break; case 15: if (strEQ(key, "mysql_type_name")) retsv = ST_FETCH_AV(AV_ATTRIB_TYPE_NAME); break; case 16: if ( strEQ(key, "mysql_is_pri_key")) retsv= ST_FETCH_AV(AV_ATTRIB_IS_PRI_KEY); else if (strEQ(key, "mysql_max_length")) retsv= ST_FETCH_AV(AV_ATTRIB_MAX_LENGTH); else if (strEQ(key, "mysql_use_result")) retsv= boolSV(imp_sth->use_mysql_use_result); break; case 19: if (strEQ(key, "mysql_warning_count")) retsv= sv_2mortal(newSViv((IV) imp_sth->warning_count)); break; case 20: if (strEQ(key, "mysql_server_prepare")) #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION retsv= sv_2mortal(newSViv((IV) imp_sth->use_server_side_prepare)); #else retsv= boolSV(0); #endif break; case 23: if (strEQ(key, "mysql_is_auto_increment")) retsv = ST_FETCH_AV(AV_ATTRIB_IS_AUTO_INCREMENT); break; } break; } return retsv; } /*************************************************************************** * * Name: dbd_st_blob_read * * Purpose: Used for blob reads if the statement handles "LongTruncOk" * attribute (currently not supported by DBD::mysql) * * Input: SV* - statement handle from which a blob will be fetched * imp_sth - drivers private statement handle data * field - field number of the blob (note, that a row may * contain more than one blob) * offset - the offset of the field, where to start reading * len - maximum number of bytes to read * destrv - RV* that tells us where to store * destoffset - destination offset * * Returns: TRUE for success, FALSE otherwise; do_error will * be called in the latter case * **************************************************************************/ int dbd_st_blob_read ( SV *sth, imp_sth_t *imp_sth, int field, long offset, long len, SV *destrv, long destoffset) { /* quell warnings */ sth= sth; imp_sth=imp_sth; field= field; offset= offset; len= len; destrv= destrv; destoffset= destoffset; return FALSE; } /*************************************************************************** * * Name: dbd_bind_ph * * Purpose: Binds a statement value to a parameter * * Input: sth - statement handle * imp_sth - drivers private statement handle data * param - parameter number, counting starts with 1 * value - value being inserted for parameter "param" * sql_type - SQL type of the value * attribs - bind parameter attributes, currently this must be * one of the values SQL_CHAR, ... * inout - TRUE, if parameter is an output variable (currently * this is not supported) * maxlen - ??? * * Returns: TRUE for success, FALSE otherwise * **************************************************************************/ int dbd_bind_ph(SV *sth, imp_sth_t *imp_sth, SV *param, SV *value, IV sql_type, SV *attribs, int is_inout, IV maxlen) { dTHX; int rc; int param_num= SvIV(param); int idx= param_num - 1; char *err_msg; D_imp_xxh(sth); #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION STRLEN slen; char *buffer= NULL; int buffer_is_null= 0; int buffer_length= slen; unsigned int buffer_type= 0; #endif D_imp_dbh_from_sth; ASYNC_CHECK_RETURN(sth, FALSE); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), " Called: dbd_bind_ph\n"); attribs= attribs; maxlen= maxlen; if (param_num <= 0 || param_num > DBIc_NUM_PARAMS(imp_sth)) { do_error(sth, JW_ERR_ILLEGAL_PARAM_NUM, "Illegal parameter number", NULL); return FALSE; } /* This fixes the bug whereby no warning was issued upon binding a defined non-numeric as numeric */ if (SvOK(value) && (sql_type == SQL_NUMERIC || sql_type == SQL_DECIMAL || sql_type == SQL_INTEGER || sql_type == SQL_SMALLINT || sql_type == SQL_FLOAT || sql_type == SQL_REAL || sql_type == SQL_DOUBLE) ) { if (! looks_like_number(value)) { err_msg = SvPVX(sv_2mortal(newSVpvf( "Binding non-numeric field %d, value %s as a numeric!", param_num, neatsvpv(value,0)))); do_error(sth, JW_ERR_ILLEGAL_PARAM_NUM, err_msg, NULL); } } if (is_inout) { do_error(sth, JW_ERR_NOT_IMPLEMENTED, "Output parameters not implemented", NULL); return FALSE; } rc = bind_param(&imp_sth->params[idx], value, sql_type); #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION if (imp_sth->use_server_side_prepare) { switch(sql_type) { case SQL_NUMERIC: case SQL_INTEGER: case SQL_SMALLINT: case SQL_BIGINT: case SQL_TINYINT: buffer_type= MYSQL_TYPE_LONG; break; case SQL_DOUBLE: case SQL_DECIMAL: case SQL_FLOAT: case SQL_REAL: buffer_type= MYSQL_TYPE_DOUBLE; break; case SQL_CHAR: case SQL_VARCHAR: case SQL_DATE: case SQL_TIME: case SQL_TIMESTAMP: case SQL_LONGVARCHAR: case SQL_BINARY: case SQL_VARBINARY: case SQL_LONGVARBINARY: buffer_type= MYSQL_TYPE_BLOB; break; default: buffer_type= MYSQL_TYPE_STRING; } buffer_is_null = !(SvOK(imp_sth->params[idx].value) && imp_sth->params[idx].value); if (! buffer_is_null) { switch(buffer_type) { case MYSQL_TYPE_LONG: /* INT */ if (!SvIOK(imp_sth->params[idx].value) && DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tTRY TO BIND AN INT NUMBER\n"); buffer_length = sizeof imp_sth->fbind[idx].numeric_val.lval; imp_sth->fbind[idx].numeric_val.lval= SvIV(imp_sth->params[idx].value); buffer=(void*)&(imp_sth->fbind[idx].numeric_val.lval); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), " SCALAR type %"IVdf" ->%"IVdf"<- IS A INT NUMBER\n", sql_type, *(IV *)buffer); break; case MYSQL_TYPE_DOUBLE: if (!SvNOK(imp_sth->params[idx].value) && DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tTRY TO BIND A FLOAT NUMBER\n"); buffer_length = sizeof imp_sth->fbind[idx].numeric_val.dval; imp_sth->fbind[idx].numeric_val.dval= SvNV(imp_sth->params[idx].value); buffer=(char*)&(imp_sth->fbind[idx].numeric_val.dval); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), " SCALAR type %"IVdf" ->%f<- IS A FLOAT NUMBER\n", sql_type, (double)(*buffer)); break; case MYSQL_TYPE_BLOB: if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), " SCALAR type BLOB\n"); break; case MYSQL_TYPE_STRING: if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), " SCALAR type STRING %"IVdf", buffertype=%d\n", sql_type, buffer_type); break; default: croak("Bug in DBD::Mysql file dbdimp.c#dbd_bind_ph: do not know how to handle unknown buffer type."); } if (buffer_type == MYSQL_TYPE_STRING || buffer_type == MYSQL_TYPE_BLOB) { buffer= SvPV(imp_sth->params[idx].value, slen); buffer_length= slen; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), " SCALAR type %"IVdf" ->length %d<- IS A STRING or BLOB\n", sql_type, buffer_length); } } else { /*case: buffer_is_null != 0*/ buffer= NULL; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), " SCALAR NULL VALUE: buffer type is: %d\n", buffer_type); } /* Type of column was changed. Force to rebind */ if (imp_sth->bind[idx].buffer_type != buffer_type) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), " FORCE REBIND: buffer type changed from %d to %d, sql-type=%"IVdf"\n", (int) imp_sth->bind[idx].buffer_type, buffer_type, sql_type); imp_sth->has_been_bound = 0; } /* prepare has been called */ if (imp_sth->has_been_bound) { imp_sth->stmt->params[idx].buffer= buffer; imp_sth->stmt->params[idx].buffer_length= buffer_length; } imp_sth->bind[idx].buffer_type= buffer_type; imp_sth->bind[idx].buffer= buffer; imp_sth->bind[idx].buffer_length= buffer_length; imp_sth->fbind[idx].length= buffer_length; imp_sth->fbind[idx].is_null= buffer_is_null; } #endif return rc; } /*************************************************************************** * * Name: mysql_db_reconnect * * Purpose: If the server has disconnected, try to reconnect. * * Input: h - database or statement handle * * Returns: TRUE for success, FALSE otherwise * **************************************************************************/ int mysql_db_reconnect(SV* h) { dTHX; D_imp_xxh(h); imp_dbh_t* imp_dbh; MYSQL save_socket; if (DBIc_TYPE(imp_xxh) == DBIt_ST) { imp_dbh = (imp_dbh_t*) DBIc_PARENT_COM(imp_xxh); h = DBIc_PARENT_H(imp_xxh); } else imp_dbh= (imp_dbh_t*) imp_xxh; if (mysql_errno(imp_dbh->pmysql) != CR_SERVER_GONE_ERROR) /* Other error */ return FALSE; if (!DBIc_has(imp_dbh, DBIcf_AutoCommit) || !imp_dbh->auto_reconnect) { /* We never reconnect if AutoCommit is turned off. * Otherwise we might get an inconsistent transaction * state. */ return FALSE; } /* my_login will blow away imp_dbh->mysql so we save a copy of * imp_dbh->mysql and put it back where it belongs if the reconnect * fail. Think server is down & reconnect fails but the application eval{}s * the execute, so next time $dbh->quote() gets called, instant SIGSEGV! */ save_socket= *(imp_dbh->pmysql); memcpy (&save_socket, imp_dbh->pmysql,sizeof(save_socket)); memset (imp_dbh->pmysql,0,sizeof(*(imp_dbh->pmysql))); /* we should disconnect the db handle before reconnecting, this will * prevent my_login from thinking it's adopting an active child which * would prevent the handle from actually reconnecting */ if (!dbd_db_disconnect(h, imp_dbh) || !my_login(aTHX_ h, imp_dbh)) { do_error(h, mysql_errno(imp_dbh->pmysql), mysql_error(imp_dbh->pmysql), mysql_sqlstate(imp_dbh->pmysql)); memcpy (imp_dbh->pmysql, &save_socket, sizeof(save_socket)); ++imp_dbh->stats.auto_reconnects_failed; return FALSE; } /* * Tell DBI, that dbh->disconnect should be called for this handle */ DBIc_ACTIVE_on(imp_dbh); ++imp_dbh->stats.auto_reconnects_ok; return TRUE; } /************************************************************************** * * Name: dbd_db_type_info_all * * Purpose: Implements $dbh->type_info_all * * Input: dbh - database handle * imp_sth - drivers private database handle data * * Returns: RV to AV of types * **************************************************************************/ #define PV_PUSH(c) \ if (c) { \ sv= newSVpv((char*) (c), 0); \ SvREADONLY_on(sv); \ } else { \ sv= &PL_sv_undef; \ } \ av_push(row, sv); #define IV_PUSH(i) sv= newSViv((i)); SvREADONLY_on(sv); av_push(row, sv); AV *dbd_db_type_info_all(SV *dbh, imp_dbh_t *imp_dbh) { dTHX; AV *av= newAV(); AV *row; HV *hv; SV *sv; int i; const char *cols[] = { "TYPE_NAME", "DATA_TYPE", "COLUMN_SIZE", "LITERAL_PREFIX", "LITERAL_SUFFIX", "CREATE_PARAMS", "NULLABLE", "CASE_SENSITIVE", "SEARCHABLE", "UNSIGNED_ATTRIBUTE", "FIXED_PREC_SCALE", "AUTO_UNIQUE_VALUE", "LOCAL_TYPE_NAME", "MINIMUM_SCALE", "MAXIMUM_SCALE", "NUM_PREC_RADIX", "SQL_DATATYPE", "SQL_DATETIME_SUB", "INTERVAL_PRECISION", "mysql_native_type", "mysql_is_num" }; dbh= dbh; imp_dbh= imp_dbh; hv= newHV(); av_push(av, newRV_noinc((SV*) hv)); for (i= 0; i < (int)(sizeof(cols) / sizeof(const char*)); i++) { if (!hv_store(hv, (char*) cols[i], strlen(cols[i]), newSViv(i), 0)) { SvREFCNT_dec((SV*) av); return Nullav; } } for (i= 0; i < (int)SQL_GET_TYPE_INFO_num; i++) { const sql_type_info_t *t= &SQL_GET_TYPE_INFO_values[i]; row= newAV(); av_push(av, newRV_noinc((SV*) row)); PV_PUSH(t->type_name); IV_PUSH(t->data_type); IV_PUSH(t->column_size); PV_PUSH(t->literal_prefix); PV_PUSH(t->literal_suffix); PV_PUSH(t->create_params); IV_PUSH(t->nullable); IV_PUSH(t->case_sensitive); IV_PUSH(t->searchable); IV_PUSH(t->unsigned_attribute); IV_PUSH(t->fixed_prec_scale); IV_PUSH(t->auto_unique_value); PV_PUSH(t->local_type_name); IV_PUSH(t->minimum_scale); IV_PUSH(t->maximum_scale); if (t->num_prec_radix) { IV_PUSH(t->num_prec_radix); } else av_push(row, &PL_sv_undef); IV_PUSH(t->sql_datatype); /* SQL_DATATYPE*/ IV_PUSH(t->sql_datetime_sub); /* SQL_DATETIME_SUB*/ IV_PUSH(t->interval_precision); /* INTERVAL_PERCISION */ IV_PUSH(t->native_type); IV_PUSH(t->is_num); } return av; } /* dbd_db_quote Properly quotes a value */ SV* dbd_db_quote(SV *dbh, SV *str, SV *type) { dTHX; SV *result; if (SvGMAGICAL(str)) mg_get(str); if (!SvOK(str)) result= newSVpvn("NULL", 4); else { char *ptr, *sptr; STRLEN len; D_imp_dbh(dbh); if (type && SvMAGICAL(type)) mg_get(type); if (type && SvOK(type)) { int i; int tp= SvIV(type); for (i= 0; i < (int)SQL_GET_TYPE_INFO_num; i++) { const sql_type_info_t *t= &SQL_GET_TYPE_INFO_values[i]; if (t->data_type == tp) { if (!t->literal_prefix) return Nullsv; break; } } } ptr= SvPV(str, len); result= newSV(len*2+3); #ifdef SvUTF8 if (SvUTF8(str)) SvUTF8_on(result); #endif sptr= SvPVX(result); *sptr++ = '\''; sptr+= mysql_real_escape_string(imp_dbh->pmysql, sptr, ptr, len); *sptr++= '\''; SvPOK_on(result); SvCUR_set(result, sptr - SvPVX(result)); /* Never hurts NUL terminating a Per string */ *sptr++= '\0'; } return result; } #ifdef DBD_MYSQL_INSERT_ID_IS_GOOD SV *mysql_db_last_insert_id(SV *dbh, imp_dbh_t *imp_dbh, SV *catalog, SV *schema, SV *table, SV *field, SV *attr) { dTHX; /* all these non-op settings are to stifle OS X compile warnings */ imp_dbh= imp_dbh; dbh= dbh; catalog= catalog; schema= schema; table= table; field= field; attr= attr; ASYNC_CHECK_RETURN(dbh, &PL_sv_undef); return sv_2mortal(my_ulonglong2str(aTHX_ mysql_insert_id(imp_dbh->pmysql))); } #endif #if MYSQL_ASYNC int mysql_db_async_result(SV* h, MYSQL_RES** resp) { dTHX; D_imp_xxh(h); imp_dbh_t* dbh; MYSQL* svsock = NULL; MYSQL_RES* _res; int retval = 0; int htype; if(! resp) { resp = &_res; } htype = DBIc_TYPE(imp_xxh); if(htype == DBIt_DB) { D_imp_dbh(h); dbh = imp_dbh; } else { D_imp_sth(h); D_imp_dbh_from_sth; dbh = imp_dbh; } if(! dbh->async_query_in_flight) { do_error(h, 2000, "Gathering asynchronous results for a synchronous handle", "HY000"); return -1; } if(dbh->async_query_in_flight != imp_xxh) { do_error(h, 2000, "Gathering async_query_in_flight results for the wrong handle", "HY000"); return -1; } dbh->async_query_in_flight = NULL; svsock= dbh->pmysql; retval= mysql_read_query_result(svsock); if(! retval) { *resp= mysql_store_result(svsock); if (mysql_errno(svsock)) do_error(h, mysql_errno(svsock), mysql_error(svsock), mysql_sqlstate(svsock)); if (!*resp) retval= mysql_affected_rows(svsock); else { retval= mysql_num_rows(*resp); if(resp == &_res) { mysql_free_result(*resp); } } if(htype == DBIt_ST) { D_imp_sth(h); D_imp_dbh_from_sth; if((my_ulonglong)retval+1 != (my_ulonglong)-1) { if(! *resp) { imp_sth->insertid= mysql_insert_id(svsock); #if MYSQL_VERSION_ID >= MULTIPLE_RESULT_SET_VERSION if(! mysql_more_results(svsock)) DBIc_ACTIVE_off(imp_sth); #endif } else { DBIc_NUM_FIELDS(imp_sth)= mysql_num_fields(imp_sth->result); imp_sth->done_desc= 0; imp_sth->fetch_done= 0; } } imp_sth->warning_count = mysql_warning_count(imp_dbh->pmysql); } } else { do_error(h, mysql_errno(svsock), mysql_error(svsock), mysql_sqlstate(svsock)); return -1; } return retval; } int mysql_db_async_ready(SV* h) { dTHX; D_imp_xxh(h); imp_dbh_t* dbh; int htype; htype = DBIc_TYPE(imp_xxh); if(htype == DBIt_DB) { D_imp_dbh(h); dbh = imp_dbh; } else { D_imp_sth(h); D_imp_dbh_from_sth; dbh = imp_dbh; } if(dbh->async_query_in_flight) { if(dbh->async_query_in_flight == imp_xxh) { struct pollfd fds; int retval; fds.fd = dbh->pmysql->net.fd; fds.events = POLLIN; retval = poll(&fds, 1, 0); if(retval < 0) { do_error(h, errno, strerror(errno), "HY000"); } return retval; } else { do_error(h, 2000, "Calling mysql_async_ready on the wrong handle", "HY000"); return -1; } } else { do_error(h, 2000, "Handle is not in asynchronous mode", "HY000"); return -1; } } #endif static int parse_number(char *string, STRLEN len, char **end) { int seen_neg; int seen_dec; int seen_e; int seen_plus; int seen_digit; char *cp; seen_neg= seen_dec= seen_e= seen_plus= seen_digit= 0; if (len <= 0) { len= strlen(string); } cp= string; /* Skip leading whitespace */ while (*cp && isspace(*cp)) cp++; for ( ; *cp; cp++) { if ('-' == *cp) { if (seen_neg >= 2) { /* third '-'. number can contains two '-'. because -1e-10 is valid number */ break; } seen_neg += 1; } else if ('.' == *cp) { if (seen_dec) { /* second '.' */ break; } seen_dec= 1; } else if ('e' == *cp) { if (seen_e) { /* second 'e' */ break; } seen_e= 1; } else if ('+' == *cp) { if (seen_plus) { /* second '+' */ break; } seen_plus= 1; } else if (!isdigit(*cp)) { /* Not sure why this was changed */ /* seen_digit= 1; */ break; } } *end= cp; /* length 0 -> not a number */ /* Need to revisit this */ /*if (len == 0 || cp - string < (int) len || seen_digit == 0) {*/ if (len == 0 || cp - string < (int) len) { return -1; } return 0; }
/* * DBD::mysql - DBI driver for the mysql database * * Copyright (c) 2004-2014 Patrick Galbraith * Copyright (c) 2013-2014 Michiel Beijen * Copyright (c) 2004-2007 Alexey Stroganov * Copyright (c) 2003-2005 Rudolf Lippan * Copyright (c) 1997-2003 Jochen Wiedmann * * You may distribute this under the terms of either the GNU General Public * License or the Artistic License, as specified in the Perl README file. */ #ifdef WIN32 #include "windows.h" #include "winsock.h" #endif #include "dbdimp.h" #if defined(WIN32) && defined(WORD) #undef WORD typedef short WORD; #endif #ifdef WIN32 #define MIN min #else #ifndef MIN #define MIN(a, b) ((a) < (b) ? (a) : (b)) #endif #endif #if MYSQL_ASYNC # include <poll.h> # include <errno.h> # define ASYNC_CHECK_RETURN(h, value)\ if(imp_dbh->async_query_in_flight) {\ do_error(h, 2000, "Calling a synchronous function on an asynchronous handle", "HY000");\ return (value);\ } #else # define ASYNC_CHECK_RETURN(h, value) #endif static int parse_number(char *string, STRLEN len, char **end); DBISTATE_DECLARE; typedef struct sql_type_info_s { const char *type_name; int data_type; int column_size; const char *literal_prefix; const char *literal_suffix; const char *create_params; int nullable; int case_sensitive; int searchable; int unsigned_attribute; int fixed_prec_scale; int auto_unique_value; const char *local_type_name; int minimum_scale; int maximum_scale; int num_prec_radix; int sql_datatype; int sql_datetime_sub; int interval_precision; int native_type; int is_num; } sql_type_info_t; /* This function manually counts the number of placeholders in an SQL statement, used for emulated prepare statements < 4.1.3 */ static int count_params(imp_xxh_t *imp_xxh, pTHX_ char *statement, bool bind_comment_placeholders) { bool comment_end= false; char* ptr= statement; int num_params= 0; int comment_length= 0; char c; if (DBIc_DBISTATE(imp_xxh)->debug >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), ">count_params statement %s\n", statement); while ( (c = *ptr++) ) { switch (c) { /* so, this is a -- comment, so let's burn up characters */ case '-': { if (bind_comment_placeholders) { c = *ptr++; break; } else { comment_length= 1; /* let's see if the next one is a dash */ c = *ptr++; if (c == '-') { /* if two dashes, ignore everything until newline */ while ((c = *ptr)) { if (DBIc_DBISTATE(imp_xxh)->debug >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "%c\n", c); ptr++; comment_length++; if (c == '\n') { comment_end= true; break; } } /* if not comment_end, the comment never ended and we need to iterate back to the beginning of where we started and let the database handle whatever is in the statement */ if (! comment_end) ptr-= comment_length; } /* otherwise, only one dash/hyphen, backtrack by one */ else ptr--; break; } } /* c-type comments */ case '/': { if (bind_comment_placeholders) { c = *ptr++; break; } else { c = *ptr++; /* let's check if the next one is an asterisk */ if (c == '*') { comment_length= 0; comment_end= false; /* ignore everything until closing comment */ while ((c= *ptr)) { ptr++; comment_length++; if (c == '*') { c = *ptr++; /* alas, end of comment */ if (c == '/') { comment_end= true; break; } /* nope, just an asterisk, not so fast, not end of comment, go back one */ else ptr--; } } /* if the end of the comment was never found, we have to backtrack to wherever we first started skipping over the possible comment. This means we will pass the statement to the database to see its own fate and issue the error */ if (!comment_end) ptr -= comment_length; } else ptr--; break; } } case '`': case '"': case '\'': /* Skip string */ { char end_token = c; while ((c = *ptr) && c != end_token) { if (c == '\\') if (! *(++ptr)) continue; ++ptr; } if (c) ++ptr; break; } case '?': ++num_params; break; default: break; } } return num_params; } /* allocate memory in statement handle per number of placeholders */ static imp_sth_ph_t *alloc_param(int num_params) { imp_sth_ph_t *params; if (num_params) Newz(908, params, (unsigned int) num_params, imp_sth_ph_t); else params= NULL; return params; } #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION /* allocate memory in MYSQL_BIND bind structure per number of placeholders */ static MYSQL_BIND *alloc_bind(int num_params) { MYSQL_BIND *bind; if (num_params) Newz(908, bind, (unsigned int) num_params, MYSQL_BIND); else bind= NULL; return bind; } /* allocate memory in fbind imp_sth_phb_t structure per number of placeholders */ static imp_sth_phb_t *alloc_fbind(int num_params) { imp_sth_phb_t *fbind; if (num_params) Newz(908, fbind, (unsigned int) num_params, imp_sth_phb_t); else fbind= NULL; return fbind; } /* alloc memory for imp_sth_fbh_t fbuffer per number of fields */ static imp_sth_fbh_t *alloc_fbuffer(int num_fields) { imp_sth_fbh_t *fbh; if (num_fields) Newz(908, fbh, (unsigned int) num_fields, imp_sth_fbh_t); else fbh= NULL; return fbh; } /* free MYSQL_BIND bind struct */ static void free_bind(MYSQL_BIND *bind) { if (bind) Safefree(bind); } /* free imp_sth_phb_t fbind structure */ static void free_fbind(imp_sth_phb_t *fbind) { if (fbind) Safefree(fbind); } /* free imp_sth_fbh_t fbh structure */ static void free_fbuffer(imp_sth_fbh_t *fbh) { if (fbh) Safefree(fbh); } #endif /* free statement param structure per num_params */ static void free_param(pTHX_ imp_sth_ph_t *params, int num_params) { if (params) { int i; for (i= 0; i < num_params; i++) { imp_sth_ph_t *ph= params+i; if (ph->value) { (void) SvREFCNT_dec(ph->value); ph->value= NULL; } } Safefree(params); } } /* Convert a MySQL type to a type that perl can handle NOTE: In the future we may want to return a struct with a lot of information for each type */ static enum enum_field_types mysql_to_perl_type(enum enum_field_types type) { static enum enum_field_types enum_type; switch (type) { case MYSQL_TYPE_DOUBLE: case MYSQL_TYPE_FLOAT: enum_type= MYSQL_TYPE_DOUBLE; break; case MYSQL_TYPE_SHORT: case MYSQL_TYPE_TINY: case MYSQL_TYPE_LONG: case MYSQL_TYPE_INT24: case MYSQL_TYPE_YEAR: #if IVSIZE >= 8 case MYSQL_TYPE_LONGLONG: #endif enum_type= MYSQL_TYPE_LONG; break; #if MYSQL_VERSION_ID > NEW_DATATYPE_VERSION case MYSQL_TYPE_BIT: enum_type= MYSQL_TYPE_BIT; break; #endif #if MYSQL_VERSION_ID > NEW_DATATYPE_VERSION case MYSQL_TYPE_NEWDECIMAL: #endif case MYSQL_TYPE_DECIMAL: enum_type= MYSQL_TYPE_DECIMAL; break; #if IVSIZE < 8 case MYSQL_TYPE_LONGLONG: #endif case MYSQL_TYPE_DATE: case MYSQL_TYPE_TIME: case MYSQL_TYPE_DATETIME: case MYSQL_TYPE_NEWDATE: case MYSQL_TYPE_TIMESTAMP: case MYSQL_TYPE_VAR_STRING: #if MYSQL_VERSION_ID > NEW_DATATYPE_VERSION case MYSQL_TYPE_VARCHAR: #endif case MYSQL_TYPE_STRING: enum_type= MYSQL_TYPE_STRING; break; #if MYSQL_VERSION_ID > GEO_DATATYPE_VERSION case MYSQL_TYPE_GEOMETRY: #endif case MYSQL_TYPE_BLOB: case MYSQL_TYPE_TINY_BLOB: enum_type= MYSQL_TYPE_BLOB; break; default: enum_type= MYSQL_TYPE_STRING; /* MySQL can handle all types as strings */ } return(enum_type); } #if defined(DBD_MYSQL_EMBEDDED) /* count embedded options */ int count_embedded_options(char *st) { int rc; char c; char *ptr; ptr= st; rc= 0; if (st) { while ((c= *ptr++)) { if (c == ',') rc++; } rc++; } return rc; } /* Free embedded options */ int free_embedded_options(char ** options_list, int options_count) { int i; for (i= 0; i < options_count; i++) { if (options_list[i]) free(options_list[i]); } free(options_list); return 1; } /* Print out embedded option settings */ int print_embedded_options(PerlIO *stream, char ** options_list, int options_count) { int i; for (i=0; i<options_count; i++) { if (options_list[i]) PerlIO_printf(stream, "Embedded server, parameter[%d]=%s\n", i, options_list[i]); } return 1; } /* */ char **fill_out_embedded_options(PerlIO *stream, char *options, int options_type, int slen, int cnt) { int ind, len; char c; char *ptr; char **options_list= NULL; if (!(options_list= (char **) calloc(cnt, sizeof(char *)))) { PerlIO_printf(stream, "Initialize embedded server. Out of memory \n"); return NULL; } ptr= options; ind= 0; if (options_type == 0) { /* server_groups list NULL terminated */ options_list[cnt]= (char *) NULL; } if (options_type == 1) { /* first item in server_options list is ignored. fill it with \0 */ if (!(options_list[0]= calloc(1,sizeof(char)))) return NULL; ind++; } while ((c= *ptr++)) { slen--; if (c == ',' || !slen) { len= ptr - options; if (c == ',') len--; if (!(options_list[ind]=calloc(len+1,sizeof(char)))) return NULL; strncpy(options_list[ind], options, len); ind++; options= ptr; } } return options_list; } #endif /* constructs an SQL statement previously prepared with actual values replacing placeholders */ static char *parse_params( imp_xxh_t *imp_xxh, pTHX_ MYSQL *sock, char *statement, STRLEN *slen_ptr, imp_sth_ph_t* params, int num_params, bool bind_type_guessing, bool bind_comment_placeholders) { bool comment_end= false; char *salloc, *statement_ptr; char *statement_ptr_end, *ptr, *valbuf; char *cp, *end; int alen, i; int slen= *slen_ptr; int limit_flag= 0; int comment_length=0; STRLEN vallen; imp_sth_ph_t *ph; if (DBIc_DBISTATE(imp_xxh)->debug >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), ">parse_params statement %s\n", statement); if (num_params == 0) return NULL; while (isspace(*statement)) { ++statement; --slen; } /* Calculate the number of bytes being allocated for the statement */ alen= slen; for (i= 0, ph= params; i < num_params; i++, ph++) { int defined= 0; if (ph->value) { if (SvMAGICAL(ph->value)) mg_get(ph->value); if (SvOK(ph->value)) defined=1; } if (!defined) alen+= 3; /* Erase '?', insert 'NULL' */ else { valbuf= SvPV(ph->value, vallen); alen+= 2+vallen+1; /* this will most likely not happen since line 214 */ /* of mysql.xs hardcodes all types to SQL_VARCHAR */ if (!ph->type) { if (bind_type_guessing) { valbuf= SvPV(ph->value, vallen); ph->type= SQL_INTEGER; if (parse_number(valbuf, vallen, &end) != 0) { ph->type= SQL_VARCHAR; } } else ph->type= SQL_VARCHAR; } } } /* Allocate memory, why *2, well, because we have ptr and statement_ptr */ New(908, salloc, alen*2, char); ptr= salloc; i= 0; /* Now create the statement string; compare count_params above */ statement_ptr_end= (statement_ptr= statement)+ slen; while (statement_ptr < statement_ptr_end) { /* LIMIT should be the last part of the query, in most cases */ if (! limit_flag) { /* it would be good to be able to handle any number of cases and orders */ if ((*statement_ptr == 'l' || *statement_ptr == 'L') && (!strncmp(statement_ptr+1, "imit ?", 6) || !strncmp(statement_ptr+1, "IMIT ?", 6))) { limit_flag = 1; } } switch (*statement_ptr) { /* comment detection. Anything goes in a comment */ case '-': { if (bind_comment_placeholders) { *ptr++= *statement_ptr++; break; } else { comment_length= 1; comment_end= false; *ptr++ = *statement_ptr++; if (*statement_ptr == '-') { /* ignore everything until newline or end of string */ while (*statement_ptr) { comment_length++; *ptr++ = *statement_ptr++; if (!*statement_ptr || *statement_ptr == '\n') { comment_end= true; break; } } /* if not end of comment, go back to where we started, no end found */ if (! comment_end) { statement_ptr -= comment_length; ptr -= comment_length; } } break; } } /* c-type comments */ case '/': { if (bind_comment_placeholders) { *ptr++= *statement_ptr++; break; } else { comment_length= 1; comment_end= false; *ptr++ = *statement_ptr++; if (*statement_ptr == '*') { /* use up characters everything until newline */ while (*statement_ptr) { *ptr++ = *statement_ptr++; comment_length++; if (!strncmp(statement_ptr, "*/", 2)) { comment_length += 2; comment_end= true; break; } } /* Go back to where started if comment end not found */ if (! comment_end) { statement_ptr -= comment_length; ptr -= comment_length; } } break; } } case '`': case '\'': case '"': /* Skip string */ { char endToken = *statement_ptr++; *ptr++ = endToken; while (statement_ptr != statement_ptr_end && *statement_ptr != endToken) { if (*statement_ptr == '\\') { *ptr++ = *statement_ptr++; if (statement_ptr == statement_ptr_end) break; } *ptr++= *statement_ptr++; } if (statement_ptr != statement_ptr_end) *ptr++= *statement_ptr++; } break; case '?': /* Insert parameter */ statement_ptr++; if (i >= num_params) { break; } ph = params+ (i++); if (!ph->value || !SvOK(ph->value)) { *ptr++ = 'N'; *ptr++ = 'U'; *ptr++ = 'L'; *ptr++ = 'L'; } else { int is_num = FALSE; valbuf= SvPV(ph->value, vallen); if (valbuf) { switch (ph->type) { case SQL_NUMERIC: case SQL_DECIMAL: case SQL_INTEGER: case SQL_SMALLINT: case SQL_FLOAT: case SQL_REAL: case SQL_DOUBLE: case SQL_BIGINT: case SQL_TINYINT: is_num = TRUE; break; } /* (note this sets *end, which we use if is_num) */ if ( parse_number(valbuf, vallen, &end) != 0 && is_num) { if (bind_type_guessing) { /* .. not a number, so apparently we guessed wrong */ is_num = 0; ph->type = SQL_VARCHAR; } } /* we're at the end of the query, so any placeholders if */ /* after a LIMIT clause will be numbers and should not be quoted */ if (limit_flag == 1) is_num = TRUE; if (!is_num) { *ptr++ = '\''; ptr += mysql_real_escape_string(sock, ptr, valbuf, vallen); *ptr++ = '\''; } else { for (cp= valbuf; cp < end; cp++) *ptr++= *cp; } } } break; /* in case this is a nested LIMIT */ case ')': limit_flag = 0; *ptr++ = *statement_ptr++; break; default: *ptr++ = *statement_ptr++; break; } } *slen_ptr = ptr - salloc; *ptr++ = '\0'; return(salloc); } int bind_param(imp_sth_ph_t *ph, SV *value, IV sql_type) { dTHX; if (ph->value) { if (SvMAGICAL(ph->value)) mg_get(ph->value); (void) SvREFCNT_dec(ph->value); } ph->value= newSVsv(value); if (sql_type) ph->type = sql_type; return TRUE; } static const sql_type_info_t SQL_GET_TYPE_INFO_values[]= { { "varchar", SQL_VARCHAR, 255, "'", "'", "max length", 1, 0, 3, 0, 0, 0, "variable length string", 0, 0, 0, SQL_VARCHAR, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_VAR_STRING, 0, #else MYSQL_TYPE_STRING, 0, #endif }, { "decimal", SQL_DECIMAL, 15, NULL, NULL, "precision,scale", 1, 0, 3, 0, 0, 0, "double", 0, 6, 2, SQL_DECIMAL, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_DECIMAL, 1 #else MYSQL_TYPE_DECIMAL, 1 #endif }, { "tinyint", SQL_TINYINT, 3, NULL, NULL, NULL, 1, 0, 3, 0, 0, 0, "Tiny integer", 0, 0, 10, SQL_TINYINT, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_TINY, 1 #else MYSQL_TYPE_TINY, 1 #endif }, { "smallint", SQL_SMALLINT, 5, NULL, NULL, NULL, 1, 0, 3, 0, 0, 0, "Short integer", 0, 0, 10, SQL_SMALLINT, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_SHORT, 1 #else MYSQL_TYPE_SHORT, 1 #endif }, { "integer", SQL_INTEGER, 10, NULL, NULL, NULL, 1, 0, 3, 0, 0, 0, "integer", 0, 0, 10, SQL_INTEGER, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_LONG, 1 #else MYSQL_TYPE_LONG, 1 #endif }, { "float", SQL_REAL, 7, NULL, NULL, NULL, 1, 0, 0, 0, 0, 0, "float", 0, 2, 10, SQL_FLOAT, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_FLOAT, 1 #else MYSQL_TYPE_FLOAT, 1 #endif }, { "double", SQL_FLOAT, 15, NULL, NULL, NULL, 1, 0, 3, 0, 0, 0, "double", 0, 4, 2, SQL_FLOAT, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_DOUBLE, 1 #else MYSQL_TYPE_DOUBLE, 1 #endif }, { "double", SQL_DOUBLE, 15, NULL, NULL, NULL, 1, 0, 3, 0, 0, 0, "double", 0, 4, 10, SQL_DOUBLE, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_DOUBLE, 1 #else MYSQL_TYPE_DOUBLE, 1 #endif }, /* FIELD_TYPE_NULL ? */ { "timestamp", SQL_TIMESTAMP, 14, "'", "'", NULL, 0, 0, 3, 0, 0, 0, "timestamp", 0, 0, 0, SQL_TIMESTAMP, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_TIMESTAMP, 0 #else MYSQL_TYPE_TIMESTAMP, 0 #endif }, { "bigint", SQL_BIGINT, 19, NULL, NULL, NULL, 1, 0, 3, 0, 0, 0, "Longlong integer", 0, 0, 10, SQL_BIGINT, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_LONGLONG, 1 #else MYSQL_TYPE_LONGLONG, 1 #endif }, { "mediumint", SQL_INTEGER, 8, NULL, NULL, NULL, 1, 0, 3, 0, 0, 0, "Medium integer", 0, 0, 10, SQL_INTEGER, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_INT24, 1 #else MYSQL_TYPE_INT24, 1 #endif }, { "date", SQL_DATE, 10, "'", "'", NULL, 1, 0, 3, 0, 0, 0, "date", 0, 0, 0, SQL_DATE, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_DATE, 0 #else MYSQL_TYPE_DATE, 0 #endif }, { "time", SQL_TIME, 6, "'", "'", NULL, 1, 0, 3, 0, 0, 0, "time", 0, 0, 0, SQL_TIME, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_TIME, 0 #else MYSQL_TYPE_TIME, 0 #endif }, { "datetime", SQL_TIMESTAMP, 21, "'", "'", NULL, 1, 0, 3, 0, 0, 0, "datetime", 0, 0, 0, SQL_TIMESTAMP, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_DATETIME, 0 #else MYSQL_TYPE_DATETIME, 0 #endif }, { "year", SQL_SMALLINT, 4, NULL, NULL, NULL, 1, 0, 3, 0, 0, 0, "year", 0, 0, 10, SQL_SMALLINT, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_YEAR, 0 #else MYSQL_TYPE_YEAR, 0 #endif }, { "date", SQL_DATE, 10, "'", "'", NULL, 1, 0, 3, 0, 0, 0, "date", 0, 0, 0, SQL_DATE, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_NEWDATE, 0 #else MYSQL_TYPE_NEWDATE, 0 #endif }, { "enum", SQL_VARCHAR, 255, "'", "'", NULL, 1, 0, 1, 0, 0, 0, "enum(value1,value2,value3...)", 0, 0, 0, 0, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_ENUM, 0 #else MYSQL_TYPE_ENUM, 0 #endif }, { "set", SQL_VARCHAR, 255, "'", "'", NULL, 1, 0, 1, 0, 0, 0, "set(value1,value2,value3...)", 0, 0, 0, 0, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_SET, 0 #else MYSQL_TYPE_SET, 0 #endif }, { "blob", SQL_LONGVARBINARY, 65535, "'", "'", NULL, 1, 0, 3, 0, 0, 0, "binary large object (0-65535)", 0, 0, 0, SQL_LONGVARBINARY, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_BLOB, 0 #else MYSQL_TYPE_BLOB, 0 #endif }, { "tinyblob", SQL_VARBINARY, 255, "'", "'", NULL, 1, 0, 3, 0, 0, 0, "binary large object (0-255) ", 0, 0, 0, SQL_VARBINARY, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_TINY_BLOB, 0 #else FIELD_TYPE_TINY_BLOB, 0 #endif }, { "mediumblob", SQL_LONGVARBINARY, 16777215, "'", "'", NULL, 1, 0, 3, 0, 0, 0, "binary large object", 0, 0, 0, SQL_LONGVARBINARY, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_MEDIUM_BLOB, 0 #else MYSQL_TYPE_MEDIUM_BLOB, 0 #endif }, { "longblob", SQL_LONGVARBINARY, 2147483647, "'", "'", NULL, 1, 0, 3, 0, 0, 0, "binary large object, use mediumblob instead", 0, 0, 0, SQL_LONGVARBINARY, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_LONG_BLOB, 0 #else MYSQL_TYPE_LONG_BLOB, 0 #endif }, { "char", SQL_CHAR, 255, "'", "'", "max length", 1, 0, 3, 0, 0, 0, "string", 0, 0, 0, SQL_CHAR, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_STRING, 0 #else MYSQL_TYPE_STRING, 0 #endif }, { "decimal", SQL_NUMERIC, 15, NULL, NULL, "precision,scale", 1, 0, 3, 0, 0, 0, "double", 0, 6, 2, SQL_NUMERIC, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_DECIMAL, 1 #else MYSQL_TYPE_DECIMAL, 1 #endif }, { "tinyint unsigned", SQL_TINYINT, 3, NULL, NULL, NULL, 1, 0, 3, 1, 0, 0, "Tiny integer unsigned", 0, 0, 10, SQL_TINYINT, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_TINY, 1 #else MYSQL_TYPE_TINY, 1 #endif }, { "smallint unsigned", SQL_SMALLINT, 5, NULL, NULL, NULL, 1, 0, 3, 1, 0, 0, "Short integer unsigned", 0, 0, 10, SQL_SMALLINT, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_SHORT, 1 #else MYSQL_TYPE_SHORT, 1 #endif }, { "mediumint unsigned", SQL_INTEGER, 8, NULL, NULL, NULL, 1, 0, 3, 1, 0, 0, "Medium integer unsigned", 0, 0, 10, SQL_INTEGER, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_INT24, 1 #else MYSQL_TYPE_INT24, 1 #endif }, { "int unsigned", SQL_INTEGER, 10, NULL, NULL, NULL, 1, 0, 3, 1, 0, 0, "integer unsigned", 0, 0, 10, SQL_INTEGER, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_LONG, 1 #else MYSQL_TYPE_LONG, 1 #endif }, { "int", SQL_INTEGER, 10, NULL, NULL, NULL, 1, 0, 3, 0, 0, 0, "integer", 0, 0, 10, SQL_INTEGER, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_LONG, 1 #else MYSQL_TYPE_LONG, 1 #endif }, { "integer unsigned", SQL_INTEGER, 10, NULL, NULL, NULL, 1, 0, 3, 1, 0, 0, "integer", 0, 0, 10, SQL_INTEGER, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_LONG, 1 #else MYSQL_TYPE_LONG, 1 #endif }, { "bigint unsigned", SQL_BIGINT, 20, NULL, NULL, NULL, 1, 0, 3, 1, 0, 0, "Longlong integer unsigned", 0, 0, 10, SQL_BIGINT, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_LONGLONG, 1 #else MYSQL_TYPE_LONGLONG, 1 #endif }, { "text", SQL_LONGVARCHAR, 65535, "'", "'", NULL, 1, 0, 3, 0, 0, 0, "large text object (0-65535)", 0, 0, 0, SQL_LONGVARCHAR, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_BLOB, 0 #else MYSQL_TYPE_BLOB, 0 #endif }, { "mediumtext", SQL_LONGVARCHAR, 16777215, "'", "'", NULL, 1, 0, 3, 0, 0, 0, "large text object", 0, 0, 0, SQL_LONGVARCHAR, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 FIELD_TYPE_MEDIUM_BLOB, 0 #else MYSQL_TYPE_MEDIUM_BLOB, 0 #endif }, { "mediumint unsigned auto_increment", SQL_INTEGER, 8, NULL, NULL, NULL, 0, 0, 3, 1, 0, 1, "Medium integer unsigned auto_increment", 0, 0, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_INTEGER, 0, 0, FIELD_TYPE_INT24, 1, #else SQL_INTEGER, 0, 0, MYSQL_TYPE_INT24, 1, #endif }, { "tinyint unsigned auto_increment", SQL_TINYINT, 3, NULL, NULL, NULL, 0, 0, 3, 1, 0, 1, "tinyint unsigned auto_increment", 0, 0, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_TINYINT, 0, 0, FIELD_TYPE_TINY, 1 #else SQL_TINYINT, 0, 0, MYSQL_TYPE_TINY, 1 #endif }, { "smallint auto_increment", SQL_SMALLINT, 5, NULL, NULL, NULL, 0, 0, 3, 0, 0, 1, "smallint auto_increment", 0, 0, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_SMALLINT, 0, 0, FIELD_TYPE_SHORT, 1 #else SQL_SMALLINT, 0, 0, MYSQL_TYPE_SHORT, 1 #endif }, { "int unsigned auto_increment", SQL_INTEGER, 10, NULL, NULL, NULL, 0, 0, 3, 1, 0, 1, "integer unsigned auto_increment", 0, 0, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_INTEGER, 0, 0, FIELD_TYPE_LONG, 1 #else SQL_INTEGER, 0, 0, MYSQL_TYPE_LONG, 1 #endif }, { "mediumint", SQL_INTEGER, 7, NULL, NULL, NULL, 1, 0, 3, 0, 0, 0, "Medium integer", 0, 0, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_INTEGER, 0, 0, FIELD_TYPE_INT24, 1 #else SQL_INTEGER, 0, 0, MYSQL_TYPE_INT24, 1 #endif }, { "bit", SQL_BIT, 1, NULL, NULL, NULL, 1, 0, 3, 0, 0, 0, "char(1)", 0, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_BIT, 0, 0, FIELD_TYPE_TINY, 0 #else SQL_BIT, 0, 0, MYSQL_TYPE_TINY, 0 #endif }, { "numeric", SQL_NUMERIC, 19, NULL, NULL, "precision,scale", 1, 0, 3, 0, 0, 0, "numeric", 0, 19, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_NUMERIC, 0, 0, FIELD_TYPE_DECIMAL, 1, #else SQL_NUMERIC, 0, 0, MYSQL_TYPE_DECIMAL, 1, #endif }, { "integer unsigned auto_increment", SQL_INTEGER, 10, NULL, NULL, NULL, 0, 0, 3, 1, 0, 1, "integer unsigned auto_increment", 0, 0, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_INTEGER, 0, 0, FIELD_TYPE_LONG, 1, #else SQL_INTEGER, 0, 0, MYSQL_TYPE_LONG, 1, #endif }, { "mediumint unsigned", SQL_INTEGER, 8, NULL, NULL, NULL, 1, 0, 3, 1, 0, 0, "Medium integer unsigned", 0, 0, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_INTEGER, 0, 0, FIELD_TYPE_INT24, 1 #else SQL_INTEGER, 0, 0, MYSQL_TYPE_INT24, 1 #endif }, { "smallint unsigned auto_increment", SQL_SMALLINT, 5, NULL, NULL, NULL, 0, 0, 3, 1, 0, 1, "smallint unsigned auto_increment", 0, 0, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_SMALLINT, 0, 0, FIELD_TYPE_SHORT, 1 #else SQL_SMALLINT, 0, 0, MYSQL_TYPE_SHORT, 1 #endif }, { "int auto_increment", SQL_INTEGER, 10, NULL, NULL, NULL, 0, 0, 3, 0, 0, 1, "integer auto_increment", 0, 0, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_INTEGER, 0, 0, FIELD_TYPE_LONG, 1 #else SQL_INTEGER, 0, 0, MYSQL_TYPE_LONG, 1 #endif }, { "long varbinary", SQL_LONGVARBINARY, 16777215, "0x", NULL, NULL, 1, 0, 3, 0, 0, 0, "mediumblob", 0, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_LONGVARBINARY, 0, 0, FIELD_TYPE_LONG_BLOB, 0 #else SQL_LONGVARBINARY, 0, 0, MYSQL_TYPE_LONG_BLOB, 0 #endif }, { "double auto_increment", SQL_FLOAT, 15, NULL, NULL, NULL, 0, 0, 3, 0, 0, 1, "double auto_increment", 0, 4, 2, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_FLOAT, 0, 0, FIELD_TYPE_DOUBLE, 1 #else SQL_FLOAT, 0, 0, MYSQL_TYPE_DOUBLE, 1 #endif }, { "double auto_increment", SQL_DOUBLE, 15, NULL, NULL, NULL, 0, 0, 3, 0, 0, 1, "double auto_increment", 0, 4, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_DOUBLE, 0, 0, FIELD_TYPE_DOUBLE, 1 #else SQL_DOUBLE, 0, 0, MYSQL_TYPE_DOUBLE, 1 #endif }, { "integer auto_increment", SQL_INTEGER, 10, NULL, NULL, NULL, 0, 0, 3, 0, 0, 1, "integer auto_increment", 0, 0, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_INTEGER, 0, 0, FIELD_TYPE_LONG, 1, #else SQL_INTEGER, 0, 0, MYSQL_TYPE_LONG, 1, #endif }, { "bigint auto_increment", SQL_BIGINT, 19, NULL, NULL, NULL, 0, 0, 3, 0, 0, 1, "bigint auto_increment", 0, 0, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_BIGINT, 0, 0, FIELD_TYPE_LONGLONG, 1 #else SQL_BIGINT, 0, 0, MYSQL_TYPE_LONGLONG, 1 #endif }, { "bit auto_increment", SQL_BIT, 1, NULL, NULL, NULL, 0, 0, 3, 0, 0, 1, "char(1) auto_increment", 0, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_BIT, 0, 0, FIELD_TYPE_TINY, 1 #else SQL_BIT, 0, 0, MYSQL_TYPE_TINY, 1 #endif }, { "mediumint auto_increment", SQL_INTEGER, 7, NULL, NULL, NULL, 0, 0, 3, 0, 0, 1, "Medium integer auto_increment", 0, 0, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_INTEGER, 0, 0, FIELD_TYPE_INT24, 1 #else SQL_INTEGER, 0, 0, MYSQL_TYPE_INT24, 1 #endif }, { "float auto_increment", SQL_REAL, 7, NULL, NULL, NULL, 0, 0, 0, 0, 0, 1, "float auto_increment", 0, 2, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_FLOAT, 0, 0, FIELD_TYPE_FLOAT, 1 #else SQL_FLOAT, 0, 0, MYSQL_TYPE_FLOAT, 1 #endif }, { "long varchar", SQL_LONGVARCHAR, 16777215, "'", "'", NULL, 1, 0, 3, 0, 0, 0, "mediumtext", 0, 0, 0, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_LONGVARCHAR, 0, 0, FIELD_TYPE_MEDIUM_BLOB, 1 #else SQL_LONGVARCHAR, 0, 0, MYSQL_TYPE_MEDIUM_BLOB, 1 #endif }, { "tinyint auto_increment", SQL_TINYINT, 3, NULL, NULL, NULL, 0, 0, 3, 0, 0, 1, "tinyint auto_increment", 0, 0, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_TINYINT, 0, 0, FIELD_TYPE_TINY, 1 #else SQL_TINYINT, 0, 0, MYSQL_TYPE_TINY, 1 #endif }, { "bigint unsigned auto_increment", SQL_BIGINT, 20, NULL, NULL, NULL, 0, 0, 3, 1, 0, 1, "bigint unsigned auto_increment", 0, 0, 10, #if MYSQL_VERSION_ID < MYSQL_VERSION_5_0 SQL_BIGINT, 0, 0, FIELD_TYPE_LONGLONG, 1 #else SQL_BIGINT, 0, 0, MYSQL_TYPE_LONGLONG, 1 #endif }, /* END MORE STUFF */ }; /* static const sql_type_info_t* native2sql (int t) */ static const sql_type_info_t *native2sql(int t) { switch (t) { case FIELD_TYPE_VAR_STRING: return &SQL_GET_TYPE_INFO_values[0]; case FIELD_TYPE_DECIMAL: return &SQL_GET_TYPE_INFO_values[1]; #ifdef FIELD_TYPE_NEWDECIMAL case FIELD_TYPE_NEWDECIMAL: return &SQL_GET_TYPE_INFO_values[1]; #endif case FIELD_TYPE_TINY: return &SQL_GET_TYPE_INFO_values[2]; case FIELD_TYPE_SHORT: return &SQL_GET_TYPE_INFO_values[3]; case FIELD_TYPE_LONG: return &SQL_GET_TYPE_INFO_values[4]; case FIELD_TYPE_FLOAT: return &SQL_GET_TYPE_INFO_values[5]; /* 6 */ case FIELD_TYPE_DOUBLE: return &SQL_GET_TYPE_INFO_values[7]; case FIELD_TYPE_TIMESTAMP: return &SQL_GET_TYPE_INFO_values[8]; case FIELD_TYPE_LONGLONG: return &SQL_GET_TYPE_INFO_values[9]; case FIELD_TYPE_INT24: return &SQL_GET_TYPE_INFO_values[10]; case FIELD_TYPE_DATE: return &SQL_GET_TYPE_INFO_values[11]; case FIELD_TYPE_TIME: return &SQL_GET_TYPE_INFO_values[12]; case FIELD_TYPE_DATETIME: return &SQL_GET_TYPE_INFO_values[13]; case FIELD_TYPE_YEAR: return &SQL_GET_TYPE_INFO_values[14]; case FIELD_TYPE_NEWDATE: return &SQL_GET_TYPE_INFO_values[15]; case FIELD_TYPE_ENUM: return &SQL_GET_TYPE_INFO_values[16]; case FIELD_TYPE_SET: return &SQL_GET_TYPE_INFO_values[17]; case FIELD_TYPE_BLOB: return &SQL_GET_TYPE_INFO_values[18]; case FIELD_TYPE_TINY_BLOB: return &SQL_GET_TYPE_INFO_values[19]; case FIELD_TYPE_MEDIUM_BLOB: return &SQL_GET_TYPE_INFO_values[20]; case FIELD_TYPE_LONG_BLOB: return &SQL_GET_TYPE_INFO_values[21]; case FIELD_TYPE_STRING: return &SQL_GET_TYPE_INFO_values[22]; default: return &SQL_GET_TYPE_INFO_values[0]; } } #define SQL_GET_TYPE_INFO_num \ (sizeof(SQL_GET_TYPE_INFO_values)/sizeof(sql_type_info_t)) /*************************************************************************** * * Name: dbd_init * * Purpose: Called when the driver is installed by DBI * * Input: dbistate - pointer to the DBI state variable, used for some * DBI internal things * * Returns: Nothing * **************************************************************************/ void dbd_init(dbistate_t* dbistate) { dTHX; DBISTATE_INIT; } /************************************************************************** * * Name: do_error, do_warn * * Purpose: Called to associate an error code and an error message * to some handle * * Input: h - the handle in error condition * rc - the error code * what - the error message * * Returns: Nothing * **************************************************************************/ void do_error(SV* h, int rc, const char* what, const char* sqlstate) { dTHX; D_imp_xxh(h); STRLEN lna; SV *errstr; SV *errstate; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\t--> do_error\n"); errstr= DBIc_ERRSTR(imp_xxh); sv_setiv(DBIc_ERR(imp_xxh), (IV)rc); /* set err early */ sv_setpv(errstr, what); #if MYSQL_VERSION_ID >= SQL_STATE_VERSION if (sqlstate) { errstate= DBIc_STATE(imp_xxh); sv_setpvn(errstate, sqlstate, 5); } #endif /* NO EFFECT DBIh_EVENT2(h, ERROR_event, DBIc_ERR(imp_xxh), errstr); */ if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "%s error %d recorded: %s\n", what, rc, SvPV(errstr,lna)); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\t<-- do_error\n"); } /* void do_warn(SV* h, int rc, char* what) */ void do_warn(SV* h, int rc, char* what) { dTHX; D_imp_xxh(h); STRLEN lna; SV *errstr = DBIc_ERRSTR(imp_xxh); sv_setiv(DBIc_ERR(imp_xxh), (IV)rc); /* set err early */ sv_setpv(errstr, what); /* NO EFFECT DBIh_EVENT2(h, WARN_event, DBIc_ERR(imp_xxh), errstr);*/ if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "%s warning %d recorded: %s\n", what, rc, SvPV(errstr,lna)); warn("%s", what); } #if defined(DBD_MYSQL_EMBEDDED) #define DBD_MYSQL_NAMESPACE "DBD::mysqlEmb::QUIET"; #else #define DBD_MYSQL_NAMESPACE "DBD::mysql::QUIET"; #endif #define doquietwarn(s) \ { \ SV* sv = perl_get_sv(DBD_MYSQL_NAMESPACE, FALSE); \ if (!sv || !SvTRUE(sv)) { \ warn s; \ } \ } /*************************************************************************** * * Name: mysql_dr_connect * * Purpose: Replacement for mysql_connect * * Input: MYSQL* sock - Pointer to a MYSQL structure being * initialized * char* mysql_socket - Name of a UNIX socket being used * or NULL * char* host - Host name being used or NULL for localhost * char* port - Port number being used or NULL for default * char* user - User name being used or NULL * char* password - Password being used or NULL * char* dbname - Database name being used or NULL * char* imp_dbh - Pointer to internal dbh structure * * Returns: The sock argument for success, NULL otherwise; * you have to call do_error in the latter case. * **************************************************************************/ MYSQL *mysql_dr_connect( SV* dbh, MYSQL* sock, char* mysql_socket, char* host, char* port, char* user, char* password, char* dbname, imp_dbh_t *imp_dbh) { int portNr; unsigned int client_flag; MYSQL* result; dTHX; D_imp_xxh(dbh); /* per Monty, already in client.c in API */ /* but still not exist in libmysqld.c */ #if defined(DBD_MYSQL_EMBEDDED) if (host && !*host) host = NULL; #endif portNr= (port && *port) ? atoi(port) : 0; /* already in client.c in API */ /* if (user && !*user) user = NULL; */ /* if (password && !*password) password = NULL; */ if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->mysql_dr_connect: host = |%s|, port = %d," \ " uid = %s, pwd = %s\n", host ? host : "NULL", portNr, user ? user : "NULL", password ? password : "NULL"); { #if defined(DBD_MYSQL_EMBEDDED) if (imp_dbh) { D_imp_drh_from_dbh; SV* sv = DBIc_IMP_DATA(imp_dbh); if (sv && SvROK(sv)) { SV** svp; STRLEN lna; char * options; int server_args_cnt= 0; int server_groups_cnt= 0; int rc= 0; char ** server_args = NULL; char ** server_groups = NULL; HV* hv = (HV*) SvRV(sv); if (SvTYPE(hv) != SVt_PVHV) return NULL; if (!imp_drh->embedded.state) { /* Init embedded server */ if ((svp = hv_fetch(hv, "mysql_embedded_groups", 21, FALSE)) && *svp && SvTRUE(*svp)) { options = SvPV(*svp, lna); imp_drh->embedded.groups=newSVsv(*svp); if ((server_groups_cnt=count_embedded_options(options))) { /* number of server_groups always server_groups+1 */ server_groups=fill_out_embedded_options(DBIc_LOGPIO(imp_xxh), options, 0, (int)lna, ++server_groups_cnt); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) { PerlIO_printf(DBIc_LOGPIO(imp_xxh), "Groups names passed to embedded server:\n"); print_embedded_options(DBIc_LOGPIO(imp_xxh), server_groups, server_groups_cnt); } } } if ((svp = hv_fetch(hv, "mysql_embedded_options", 22, FALSE)) && *svp && SvTRUE(*svp)) { options = SvPV(*svp, lna); imp_drh->embedded.args=newSVsv(*svp); if ((server_args_cnt=count_embedded_options(options))) { /* number of server_options always server_options+1 */ server_args=fill_out_embedded_options(DBIc_LOGPIO(imp_xxh), options, 1, (int)lna, ++server_args_cnt); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) { PerlIO_printf(DBIc_LOGPIO(imp_xxh), "Server options passed to embedded server:\n"); print_embedded_options(DBIc_LOGPIO(imp_xxh), server_args, server_args_cnt); } } } if (mysql_server_init(server_args_cnt, server_args, server_groups)) { do_warn(dbh, AS_ERR_EMBEDDED, "Embedded server was not started. \ Could not initialize environment."); return NULL; } imp_drh->embedded.state=1; if (server_args_cnt) free_embedded_options(server_args, server_args_cnt); if (server_groups_cnt) free_embedded_options(server_groups, server_groups_cnt); } else { /* * Check if embedded parameters passed to connect() differ from * first ones */ if ( ((svp = hv_fetch(hv, "mysql_embedded_groups", 21, FALSE)) && *svp && SvTRUE(*svp))) rc =+ abs(sv_cmp(*svp, imp_drh->embedded.groups)); if ( ((svp = hv_fetch(hv, "mysql_embedded_options", 22, FALSE)) && *svp && SvTRUE(*svp)) ) rc =+ abs(sv_cmp(*svp, imp_drh->embedded.args)); if (rc) { do_warn(dbh, AS_ERR_EMBEDDED, "Embedded server was already started. You cannot pass init\ parameters to embedded server once"); return NULL; } } } } #endif #ifdef MYSQL_NO_CLIENT_FOUND_ROWS client_flag = 0; #else client_flag = CLIENT_FOUND_ROWS; #endif mysql_init(sock); if (imp_dbh) { SV* sv = DBIc_IMP_DATA(imp_dbh); DBIc_set(imp_dbh, DBIcf_AutoCommit, TRUE); if (sv && SvROK(sv)) { HV* hv = (HV*) SvRV(sv); SV** svp; STRLEN lna; /* thanks to Peter John Edwards for mysql_init_command */ if ((svp = hv_fetch(hv, "mysql_init_command", 18, FALSE)) && *svp && SvTRUE(*svp)) { char* df = SvPV(*svp, lna); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->mysql_dr_connect: Setting" \ " init command (%s).\n", df); mysql_options(sock, MYSQL_INIT_COMMAND, df); } if ((svp = hv_fetch(hv, "mysql_compression", 17, FALSE)) && *svp && SvTRUE(*svp)) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->mysql_dr_connect: Enabling" \ " compression.\n"); mysql_options(sock, MYSQL_OPT_COMPRESS, NULL); } if ((svp = hv_fetch(hv, "mysql_connect_timeout", 21, FALSE)) && *svp && SvTRUE(*svp)) { int to = SvIV(*svp); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->mysql_dr_connect: Setting" \ " connect timeout (%d).\n",to); mysql_options(sock, MYSQL_OPT_CONNECT_TIMEOUT, (const char *)&to); } if ((svp = hv_fetch(hv, "mysql_write_timeout", 19, FALSE)) && *svp && SvTRUE(*svp)) { int to = SvIV(*svp); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->mysql_dr_connect: Setting" \ " write timeout (%d).\n",to); mysql_options(sock, MYSQL_OPT_WRITE_TIMEOUT, (const char *)&to); } if ((svp = hv_fetch(hv, "mysql_read_timeout", 18, FALSE)) && *svp && SvTRUE(*svp)) { int to = SvIV(*svp); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->mysql_dr_connect: Setting" \ " read timeout (%d).\n",to); mysql_options(sock, MYSQL_OPT_READ_TIMEOUT, (const char *)&to); } if ((svp = hv_fetch(hv, "mysql_skip_secure_auth", 22, FALSE)) && *svp && SvTRUE(*svp)) { my_bool secauth = 0; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->mysql_dr_connect: Skipping" \ " secure auth\n"); mysql_options(sock, MYSQL_SECURE_AUTH, &secauth); } if ((svp = hv_fetch(hv, "mysql_read_default_file", 23, FALSE)) && *svp && SvTRUE(*svp)) { char* df = SvPV(*svp, lna); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->mysql_dr_connect: Reading" \ " default file %s.\n", df); mysql_options(sock, MYSQL_READ_DEFAULT_FILE, df); } if ((svp = hv_fetch(hv, "mysql_read_default_group", 24, FALSE)) && *svp && SvTRUE(*svp)) { char* gr = SvPV(*svp, lna); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->mysql_dr_connect: Using" \ " default group %s.\n", gr); mysql_options(sock, MYSQL_READ_DEFAULT_GROUP, gr); } #if (MYSQL_VERSION_ID >= 50606) if ((svp = hv_fetch(hv, "mysql_conn_attrs", 16, FALSE)) && *svp) { HV* attrs = (HV*) SvRV(*svp); HE* entry = NULL; I32 num_entries = hv_iterinit(attrs); while (num_entries && (entry = hv_iternext(attrs))) { I32 retlen = 0; char *attr_name = hv_iterkey(entry, &retlen); SV *sv_attr_val = hv_iterval(attrs, entry); char *attr_val = SvPV(sv_attr_val, lna); mysql_options4(sock, MYSQL_OPT_CONNECT_ATTR_ADD, attr_name, attr_val); } } #endif if ((svp = hv_fetch(hv, "mysql_client_found_rows", 23, FALSE)) && *svp) { if (SvTRUE(*svp)) client_flag |= CLIENT_FOUND_ROWS; else client_flag &= ~CLIENT_FOUND_ROWS; } if ((svp = hv_fetch(hv, "mysql_use_result", 16, FALSE)) && *svp) { imp_dbh->use_mysql_use_result = SvTRUE(*svp); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->use_mysql_use_result: %d\n", imp_dbh->use_mysql_use_result); } if ((svp = hv_fetch(hv, "mysql_bind_type_guessing", 24, TRUE)) && *svp) { imp_dbh->bind_type_guessing= SvTRUE(*svp); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->bind_type_guessing: %d\n", imp_dbh->bind_type_guessing); } if ((svp = hv_fetch(hv, "mysql_bind_comment_placeholders", 31, FALSE)) && *svp) { imp_dbh->bind_comment_placeholders = SvTRUE(*svp); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->bind_comment_placeholders: %d\n", imp_dbh->bind_comment_placeholders); } if ((svp = hv_fetch(hv, "mysql_no_autocommit_cmd", 23, FALSE)) && *svp) { imp_dbh->no_autocommit_cmd= SvTRUE(*svp); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->no_autocommit_cmd: %d\n", imp_dbh->no_autocommit_cmd); } #if FABRIC_SUPPORT if ((svp = hv_fetch(hv, "mysql_use_fabric", 16, FALSE)) && *svp && SvTRUE(*svp)) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->use_fabric: Enabling use of" \ " MySQL Fabric.\n"); mysql_options(sock, MYSQL_OPT_USE_FABRIC, NULL); } #endif #if defined(CLIENT_MULTI_STATEMENTS) if ((svp = hv_fetch(hv, "mysql_multi_statements", 22, FALSE)) && *svp) { if (SvTRUE(*svp)) client_flag |= CLIENT_MULTI_STATEMENTS; else client_flag &= ~CLIENT_MULTI_STATEMENTS; } #endif #if MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION /* took out client_flag |= CLIENT_PROTOCOL_41; */ /* because libmysql.c already sets this no matter what */ if ((svp = hv_fetch(hv, "mysql_server_prepare", 20, FALSE)) && *svp) { if (SvTRUE(*svp)) { client_flag |= CLIENT_PROTOCOL_41; imp_dbh->use_server_side_prepare = TRUE; } else { client_flag &= ~CLIENT_PROTOCOL_41; imp_dbh->use_server_side_prepare = FALSE; } } if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->use_server_side_prepare: %d\n", imp_dbh->use_server_side_prepare); #endif /* HELMUT */ #if defined(sv_utf8_decode) && MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION if ((svp = hv_fetch(hv, "mysql_enable_utf8mb4", 20, FALSE)) && *svp && SvTRUE(*svp)) { mysql_options(sock, MYSQL_SET_CHARSET_NAME, "utf8mb4"); } else if ((svp = hv_fetch(hv, "mysql_enable_utf8", 17, FALSE)) && *svp) { /* Do not touch imp_dbh->enable_utf8 as we are called earlier * than it is set and mysql_options() must be before: * mysql_real_connect() */ mysql_options(sock, MYSQL_SET_CHARSET_NAME, (SvTRUE(*svp) ? "utf8" : "latin1")); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "mysql_options: MYSQL_SET_CHARSET_NAME=%s\n", (SvTRUE(*svp) ? "utf8" : "latin1")); } #endif #if defined(DBD_MYSQL_WITH_SSL) && !defined(DBD_MYSQL_EMBEDDED) && \ (defined(CLIENT_SSL) || (MYSQL_VERSION_ID >= 40000)) if ((svp = hv_fetch(hv, "mysql_ssl", 9, FALSE)) && *svp) { if (SvTRUE(*svp)) { char *client_key = NULL; char *client_cert = NULL; char *ca_file = NULL; char *ca_path = NULL; char *cipher = NULL; STRLEN lna; #if MYSQL_VERSION_ID >= SSL_VERIFY_VERSION && MYSQL_VERSION_ID <= SSL_LAST_VERIFY_VERSION /* New code to utilise MySQLs new feature that verifies that the server's hostname that the client connects to matches that of the certificate */ my_bool ssl_verify_true = 0; if ((svp = hv_fetch(hv, "mysql_ssl_verify_server_cert", 28, FALSE)) && *svp) ssl_verify_true = SvTRUE(*svp); #endif if ((svp = hv_fetch(hv, "mysql_ssl_client_key", 20, FALSE)) && *svp) client_key = SvPV(*svp, lna); if ((svp = hv_fetch(hv, "mysql_ssl_client_cert", 21, FALSE)) && *svp) client_cert = SvPV(*svp, lna); if ((svp = hv_fetch(hv, "mysql_ssl_ca_file", 17, FALSE)) && *svp) ca_file = SvPV(*svp, lna); if ((svp = hv_fetch(hv, "mysql_ssl_ca_path", 17, FALSE)) && *svp) ca_path = SvPV(*svp, lna); if ((svp = hv_fetch(hv, "mysql_ssl_cipher", 16, FALSE)) && *svp) cipher = SvPV(*svp, lna); mysql_ssl_set(sock, client_key, client_cert, ca_file, ca_path, cipher); #if MYSQL_VERSION_ID >= SSL_VERIFY_VERSION && MYSQL_VERSION_ID <= SSL_LAST_VERIFY_VERSION mysql_options(sock, MYSQL_OPT_SSL_VERIFY_SERVER_CERT, &ssl_verify_true); #endif client_flag |= CLIENT_SSL; } } #endif #if (MYSQL_VERSION_ID >= 32349) /* * MySQL 3.23.49 disables LOAD DATA LOCAL by default. Use * mysql_local_infile=1 in the DSN to enable it. */ if ((svp = hv_fetch( hv, "mysql_local_infile", 18, FALSE)) && *svp) { unsigned int flag = SvTRUE(*svp); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->mysql_dr_connect: Using" \ " local infile %u.\n", flag); mysql_options(sock, MYSQL_OPT_LOCAL_INFILE, (const char *) &flag); } #endif } } if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->mysql_dr_connect: client_flags = %d\n", client_flag); #if MYSQL_VERSION_ID >= MULTIPLE_RESULT_SET_VERSION client_flag|= CLIENT_MULTI_RESULTS; #endif result = mysql_real_connect(sock, host, user, password, dbname, portNr, mysql_socket, client_flag); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->mysql_dr_connect: <-"); if (result) { #if MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION /* connection succeeded. */ /* imp_dbh == NULL when mysql_dr_connect() is called from mysql.xs functions (_admin_internal(),_ListDBs()). */ if (!(result->client_flag & CLIENT_PROTOCOL_41) && imp_dbh) imp_dbh->use_server_side_prepare = FALSE; #endif #if MYSQL_ASYNC if(imp_dbh) { imp_dbh->async_query_in_flight = NULL; } #endif /* we turn off Mysql's auto reconnect and handle re-connecting ourselves so that we can keep track of when this happens. */ result->reconnect=0; } else { /* sock was allocated with mysql_init() fixes: https://rt.cpan.org/Ticket/Display.html?id=86153 Safefree(sock); rurban: No, we still need this handle later in mysql_dr_error(). RT #97625. It will be freed as imp_dbh->pmysql in dbd_db_destroy(), which is called by the DESTROY handler. */ } return result; } } /* safe_hv_fetch */ static char *safe_hv_fetch(pTHX_ HV *hv, const char *name, int name_length) { SV** svp; STRLEN len; char *res= NULL; if ((svp= hv_fetch(hv, name, name_length, FALSE))) { res= SvPV(*svp, len); if (!len) res= NULL; } return res; } /* Frontend for mysql_dr_connect */ static int my_login(pTHX_ SV* dbh, imp_dbh_t *imp_dbh) { SV* sv; HV* hv; char* dbname; char* host; char* port; char* user; char* password; char* mysql_socket; int result; D_imp_xxh(dbh); /* TODO- resolve this so that it is set only if DBI is 1.607 */ #define TAKE_IMP_DATA_VERSION 1 #if TAKE_IMP_DATA_VERSION if (DBIc_has(imp_dbh, DBIcf_IMPSET)) { /* eg from take_imp_data() */ if (DBIc_has(imp_dbh, DBIcf_ACTIVE)) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "my_login skip connect\n"); /* tell our parent we've adopted an active child */ ++DBIc_ACTIVE_KIDS(DBIc_PARENT_COM(imp_dbh)); return TRUE; } if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "my_login IMPSET but not ACTIVE so connect not skipped\n"); } #endif sv = DBIc_IMP_DATA(imp_dbh); if (!sv || !SvROK(sv)) return FALSE; hv = (HV*) SvRV(sv); if (SvTYPE(hv) != SVt_PVHV) return FALSE; host= safe_hv_fetch(aTHX_ hv, "host", 4); port= safe_hv_fetch(aTHX_ hv, "port", 4); user= safe_hv_fetch(aTHX_ hv, "user", 4); password= safe_hv_fetch(aTHX_ hv, "password", 8); dbname= safe_hv_fetch(aTHX_ hv, "database", 8); mysql_socket= safe_hv_fetch(aTHX_ hv, "mysql_socket", 12); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->my_login : dbname = %s, uid = %s, pwd = %s," \ "host = %s, port = %s\n", dbname ? dbname : "NULL", user ? user : "NULL", password ? password : "NULL", host ? host : "NULL", port ? port : "NULL"); if (!imp_dbh->pmysql) { Newz(908, imp_dbh->pmysql, 1, MYSQL); } result = mysql_dr_connect(dbh, imp_dbh->pmysql, mysql_socket, host, port, user, password, dbname, imp_dbh) ? TRUE : FALSE; return result; } /************************************************************************** * * Name: dbd_db_login * * Purpose: Called for connecting to a database and logging in. * * Input: dbh - database handle being initialized * imp_dbh - drivers private database handle data * dbname - the database we want to log into; may be like * "dbname:host" or "dbname:host:port" * user - user name to connect as * password - password to connect with * * Returns: TRUE for success, FALSE otherwise; do_error has already * been called in the latter case * **************************************************************************/ int dbd_db_login(SV* dbh, imp_dbh_t* imp_dbh, char* dbname, char* user, char* password) { #ifdef dTHR dTHR; #endif dTHX; D_imp_xxh(dbh); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->connect: dsn = %s, uid = %s, pwd = %s\n", dbname ? dbname : "NULL", user ? user : "NULL", password ? password : "NULL"); imp_dbh->stats.auto_reconnects_ok= 0; imp_dbh->stats.auto_reconnects_failed= 0; imp_dbh->bind_type_guessing= FALSE; imp_dbh->bind_comment_placeholders= FALSE; imp_dbh->has_transactions= TRUE; /* Safer we flip this to TRUE perl side if we detect a mod_perl env. */ imp_dbh->auto_reconnect = FALSE; /* HELMUT */ #if defined(sv_utf8_decode) && MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION imp_dbh->enable_utf8 = FALSE; /* initialize mysql_enable_utf8 */ imp_dbh->enable_utf8mb4 = FALSE; /* initialize mysql_enable_utf8mb4 */ #endif if (!my_login(aTHX_ dbh, imp_dbh)) { if(imp_dbh->pmysql) { do_error(dbh, mysql_errno(imp_dbh->pmysql), mysql_error(imp_dbh->pmysql) ,mysql_sqlstate(imp_dbh->pmysql)); Safefree(imp_dbh->pmysql); } return FALSE; } /* * Tell DBI, that dbh->disconnect should be called for this handle */ DBIc_ACTIVE_on(imp_dbh); /* Tell DBI, that dbh->destroy should be called for this handle */ DBIc_on(imp_dbh, DBIcf_IMPSET); return TRUE; } /*************************************************************************** * * Name: dbd_db_commit * dbd_db_rollback * * Purpose: You guess what they should do. * * Input: dbh - database handle being committed or rolled back * imp_dbh - drivers private database handle data * * Returns: TRUE for success, FALSE otherwise; do_error has already * been called in the latter case * **************************************************************************/ int dbd_db_commit(SV* dbh, imp_dbh_t* imp_dbh) { if (DBIc_has(imp_dbh, DBIcf_AutoCommit)) return FALSE; ASYNC_CHECK_RETURN(dbh, FALSE); if (imp_dbh->has_transactions) { #if MYSQL_VERSION_ID < SERVER_PREPARE_VERSION if (mysql_real_query(imp_dbh->pmysql, "COMMIT", 6)) #else if (mysql_commit(imp_dbh->pmysql)) #endif { do_error(dbh, mysql_errno(imp_dbh->pmysql), mysql_error(imp_dbh->pmysql) ,mysql_sqlstate(imp_dbh->pmysql)); return FALSE; } } else do_warn(dbh, JW_ERR_NOT_IMPLEMENTED, "Commit ineffective because transactions are not available"); return TRUE; } /* dbd_db_rollback */ int dbd_db_rollback(SV* dbh, imp_dbh_t* imp_dbh) { /* croak, if not in AutoCommit mode */ if (DBIc_has(imp_dbh, DBIcf_AutoCommit)) return FALSE; ASYNC_CHECK_RETURN(dbh, FALSE); if (imp_dbh->has_transactions) { #if MYSQL_VERSION_ID < SERVER_PREPARE_VERSION if (mysql_real_query(imp_dbh->pmysql, "ROLLBACK", 8)) #else if (mysql_rollback(imp_dbh->pmysql)) #endif { do_error(dbh, mysql_errno(imp_dbh->pmysql), mysql_error(imp_dbh->pmysql) ,mysql_sqlstate(imp_dbh->pmysql)); return FALSE; } } else do_error(dbh, JW_ERR_NOT_IMPLEMENTED, "Rollback ineffective because transactions are not available" ,NULL); return TRUE; } /* *************************************************************************** * * Name: dbd_db_disconnect * * Purpose: Disconnect a database handle from its database * * Input: dbh - database handle being disconnected * imp_dbh - drivers private database handle data * * Returns: TRUE for success, FALSE otherwise; do_error has already * been called in the latter case * **************************************************************************/ int dbd_db_disconnect(SV* dbh, imp_dbh_t* imp_dbh) { #ifdef dTHR dTHR; #endif dTHX; D_imp_xxh(dbh); /* We assume that disconnect will always work */ /* since most errors imply already disconnected. */ DBIc_ACTIVE_off(imp_dbh); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "imp_dbh->pmysql: %p\n", imp_dbh->pmysql); mysql_close(imp_dbh->pmysql ); /* We don't free imp_dbh since a reference still exists */ /* The DESTROY method is the only one to 'free' memory. */ return TRUE; } /*************************************************************************** * * Name: dbd_discon_all * * Purpose: Disconnect all database handles at shutdown time * * Input: dbh - database handle being disconnected * imp_dbh - drivers private database handle data * * Returns: TRUE for success, FALSE otherwise; do_error has already * been called in the latter case * **************************************************************************/ int dbd_discon_all (SV *drh, imp_drh_t *imp_drh) { #if defined(dTHR) dTHR; #endif dTHX; D_imp_xxh(drh); #if defined(DBD_MYSQL_EMBEDDED) if (imp_drh->embedded.state) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "Stop embedded server\n"); mysql_server_end(); if (imp_drh->embedded.groups) { (void) SvREFCNT_dec(imp_drh->embedded.groups); imp_drh->embedded.groups = NULL; } if (imp_drh->embedded.args) { (void) SvREFCNT_dec(imp_drh->embedded.args); imp_drh->embedded.args = NULL; } } #else mysql_server_end(); #endif /* The disconnect_all concept is flawed and needs more work */ if (!PL_dirty && !SvTRUE(perl_get_sv("DBI::PERL_ENDING",0))) { sv_setiv(DBIc_ERR(imp_drh), (IV)1); sv_setpv(DBIc_ERRSTR(imp_drh), (char*)"disconnect_all not implemented"); /* NO EFFECT DBIh_EVENT2(drh, ERROR_event, DBIc_ERR(imp_drh), DBIc_ERRSTR(imp_drh)); */ return FALSE; } PL_perl_destruct_level = 0; return FALSE; } /**************************************************************************** * * Name: dbd_db_destroy * * Purpose: Our part of the dbh destructor * * Input: dbh - database handle being destroyed * imp_dbh - drivers private database handle data * * Returns: Nothing * **************************************************************************/ void dbd_db_destroy(SV* dbh, imp_dbh_t* imp_dbh) { /* * Being on the safe side never hurts ... */ if (DBIc_ACTIVE(imp_dbh)) { if (imp_dbh->has_transactions) { if (!DBIc_has(imp_dbh, DBIcf_AutoCommit)) #if MYSQL_VERSION_ID < SERVER_PREPARE_VERSION if ( mysql_real_query(imp_dbh->pmysql, "ROLLBACK", 8)) #else if (mysql_rollback(imp_dbh->pmysql)) #endif do_error(dbh, TX_ERR_ROLLBACK,"ROLLBACK failed" ,NULL); } dbd_db_disconnect(dbh, imp_dbh); } Safefree(imp_dbh->pmysql); /* Tell DBI, that dbh->destroy must no longer be called */ DBIc_off(imp_dbh, DBIcf_IMPSET); } /* *************************************************************************** * * Name: dbd_db_STORE_attrib * * Purpose: Function for storing dbh attributes; we currently support * just nothing. :-) * * Input: dbh - database handle being modified * imp_dbh - drivers private database handle data * keysv - the attribute name * valuesv - the attribute value * * Returns: TRUE for success, FALSE otherwise * **************************************************************************/ int dbd_db_STORE_attrib( SV* dbh, imp_dbh_t* imp_dbh, SV* keysv, SV* valuesv ) { dTHX; STRLEN kl; char *key = SvPV(keysv, kl); SV *cachesv = Nullsv; int cacheit = FALSE; const bool bool_value = SvTRUE(valuesv); if (kl==10 && strEQ(key, "AutoCommit")) { if (imp_dbh->has_transactions) { bool oldval = DBIc_has(imp_dbh,DBIcf_AutoCommit) ? 1 : 0; if (bool_value == oldval) return TRUE; /* if setting AutoCommit on ... */ if (!imp_dbh->no_autocommit_cmd) { if ( #if MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION mysql_autocommit(imp_dbh->pmysql, bool_value) #else mysql_real_query(imp_dbh->pmysql, bool_value ? "SET AUTOCOMMIT=1" : "SET AUTOCOMMIT=0", 16) #endif ) { do_error(dbh, TX_ERR_AUTOCOMMIT, bool_value ? "Turning on AutoCommit failed" : "Turning off AutoCommit failed" ,NULL); return TRUE; /* TRUE means we handled it - important to avoid spurious errors */ } } DBIc_set(imp_dbh, DBIcf_AutoCommit, bool_value); } else { /* * We do support neither transactions nor "AutoCommit". * But we stub it. :-) */ if (!bool_value) { do_error(dbh, JW_ERR_NOT_IMPLEMENTED, "Transactions not supported by database" ,NULL); croak("Transactions not supported by database"); } } } else if (kl == 16 && strEQ(key,"mysql_use_result")) imp_dbh->use_mysql_use_result = bool_value; else if (kl == 20 && strEQ(key,"mysql_auto_reconnect")) imp_dbh->auto_reconnect = bool_value; else if (kl == 20 && strEQ(key, "mysql_server_prepare")) imp_dbh->use_server_side_prepare = bool_value; else if (kl == 23 && strEQ(key,"mysql_no_autocommit_cmd")) imp_dbh->no_autocommit_cmd = bool_value; else if (kl == 24 && strEQ(key,"mysql_bind_type_guessing")) imp_dbh->bind_type_guessing = bool_value; else if (kl == 31 && strEQ(key,"mysql_bind_comment_placeholders")) imp_dbh->bind_type_guessing = bool_value; #if defined(sv_utf8_decode) && MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION else if (kl == 17 && strEQ(key, "mysql_enable_utf8")) imp_dbh->enable_utf8 = bool_value; else if (kl == 20 && strEQ(key, "mysql_enable_utf8mb4")) imp_dbh->enable_utf8mb4 = bool_value; #endif #if FABRIC_SUPPORT else if (kl == 22 && strEQ(key, "mysql_fabric_opt_group")) mysql_options(imp_dbh->pmysql, FABRIC_OPT_GROUP, (void *)SvPVbyte_nolen(valuesv)); else if (kl == 29 && strEQ(key, "mysql_fabric_opt_default_mode")) { if (SvOK(valuesv)) { STRLEN len; const char *str = SvPVbyte(valuesv, len); if ( len == 0 || ( len == 2 && (strnEQ(str, "ro", 3) || strnEQ(str, "rw", 3)) ) ) mysql_options(imp_dbh->pmysql, FABRIC_OPT_DEFAULT_MODE, len == 0 ? NULL : str); else croak("Valid settings for FABRIC_OPT_DEFAULT_MODE are 'ro', 'rw', or undef/empty string"); } else { mysql_options(imp_dbh->pmysql, FABRIC_OPT_DEFAULT_MODE, NULL); } } else if (kl == 21 && strEQ(key, "mysql_fabric_opt_mode")) { STRLEN len; const char *str = SvPVbyte(valuesv, len); if (len != 2 || (strnNE(str, "ro", 3) && strnNE(str, "rw", 3))) croak("Valid settings for FABRIC_OPT_MODE are 'ro' or 'rw'"); mysql_options(imp_dbh->pmysql, FABRIC_OPT_MODE, str); } else if (kl == 34 && strEQ(key, "mysql_fabric_opt_group_credentials")) { croak("'fabric_opt_group_credentials' is not supported"); } #endif else return FALSE; /* Unknown key */ if (cacheit) /* cache value for later DBI 'quick' fetch? */ (void)hv_store((HV*)SvRV(dbh), key, kl, cachesv, 0); return TRUE; } /*************************************************************************** * * Name: dbd_db_FETCH_attrib * * Purpose: Function for fetching dbh attributes * * Input: dbh - database handle being queried * imp_dbh - drivers private database handle data * keysv - the attribute name * * Returns: An SV*, if successful; NULL otherwise * * Notes: Do not forget to call sv_2mortal in the former case! * **************************************************************************/ static SV* my_ulonglong2str(pTHX_ my_ulonglong val) { char buf[64]; char *ptr = buf + sizeof(buf) - 1; if (val == 0) return newSVpvn("0", 1); *ptr = '\0'; while (val > 0) { *(--ptr) = ('0' + (val % 10)); val = val / 10; } return newSVpvn(ptr, (buf+ sizeof(buf) - 1) - ptr); } SV* dbd_db_FETCH_attrib(SV *dbh, imp_dbh_t *imp_dbh, SV *keysv) { dTHX; STRLEN kl; char *key = SvPV(keysv, kl); SV* result = NULL; dbh= dbh; switch (*key) { case 'A': if (strEQ(key, "AutoCommit")) { if (imp_dbh->has_transactions) return sv_2mortal(boolSV(DBIc_has(imp_dbh,DBIcf_AutoCommit))); /* Default */ return &PL_sv_yes; } break; } if (strncmp(key, "mysql_", 6) == 0) { key = key+6; kl = kl-6; } /* MONTY: Check if kl should not be used or used everywhere */ switch(*key) { case 'a': if (kl == strlen("auto_reconnect") && strEQ(key, "auto_reconnect")) result= sv_2mortal(newSViv(imp_dbh->auto_reconnect)); break; case 'b': if (kl == strlen("bind_type_guessing") && strEQ(key, "bind_type_guessing")) { result = sv_2mortal(newSViv(imp_dbh->bind_type_guessing)); } else if (kl == strlen("bind_comment_placeholders") && strEQ(key, "bind_comment_placeholders")) { result = sv_2mortal(newSViv(imp_dbh->bind_comment_placeholders)); } break; case 'c': if (kl == 10 && strEQ(key, "clientinfo")) { const char* clientinfo = mysql_get_client_info(); result= clientinfo ? sv_2mortal(newSVpvn(clientinfo, strlen(clientinfo))) : &PL_sv_undef; } else if (kl == 13 && strEQ(key, "clientversion")) { result= sv_2mortal(my_ulonglong2str(aTHX_ mysql_get_client_version())); } break; case 'e': if (strEQ(key, "errno")) result= sv_2mortal(newSViv((IV)mysql_errno(imp_dbh->pmysql))); else if ( strEQ(key, "error") || strEQ(key, "errmsg")) { /* Note that errmsg is obsolete, as of 2.09! */ const char* msg = mysql_error(imp_dbh->pmysql); result= sv_2mortal(newSVpvn(msg, strlen(msg))); } /* HELMUT */ #if defined(sv_utf8_decode) && MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION else if (kl == strlen("enable_utf8mb4") && strEQ(key, "enable_utf8mb4")) result = sv_2mortal(newSViv(imp_dbh->enable_utf8mb4)); else if (kl == strlen("enable_utf8") && strEQ(key, "enable_utf8")) result = sv_2mortal(newSViv(imp_dbh->enable_utf8)); #endif break; case 'd': if (strEQ(key, "dbd_stats")) { HV* hv = newHV(); (void)hv_store( hv, "auto_reconnects_ok", strlen("auto_reconnects_ok"), newSViv(imp_dbh->stats.auto_reconnects_ok), 0 ); (void)hv_store( hv, "auto_reconnects_failed", strlen("auto_reconnects_failed"), newSViv(imp_dbh->stats.auto_reconnects_failed), 0 ); result= sv_2mortal((newRV_noinc((SV*)hv))); } case 'h': if (strEQ(key, "hostinfo")) { const char* hostinfo = mysql_get_host_info(imp_dbh->pmysql); result= hostinfo ? sv_2mortal(newSVpvn(hostinfo, strlen(hostinfo))) : &PL_sv_undef; } break; case 'i': if (strEQ(key, "info")) { const char* info = mysql_info(imp_dbh->pmysql); result= info ? sv_2mortal(newSVpvn(info, strlen(info))) : &PL_sv_undef; } else if (kl == 8 && strEQ(key, "insertid")) /* We cannot return an IV, because the insertid is a long. */ result= sv_2mortal(my_ulonglong2str(aTHX_ mysql_insert_id(imp_dbh->pmysql))); break; case 'n': if (kl == strlen("no_autocommit_cmd") && strEQ(key, "no_autocommit_cmd")) result = sv_2mortal(newSViv(imp_dbh->no_autocommit_cmd)); break; case 'p': if (kl == 9 && strEQ(key, "protoinfo")) result= sv_2mortal(newSViv(mysql_get_proto_info(imp_dbh->pmysql))); break; case 's': if (kl == 10 && strEQ(key, "serverinfo")) { const char* serverinfo = mysql_get_server_info(imp_dbh->pmysql); result= serverinfo ? sv_2mortal(newSVpvn(serverinfo, strlen(serverinfo))) : &PL_sv_undef; } else if (kl == 13 && strEQ(key, "serverversion")) result= sv_2mortal(my_ulonglong2str(aTHX_ mysql_get_server_version(imp_dbh->pmysql))); else if (strEQ(key, "sock")) result= sv_2mortal(newSViv(PTR2IV(imp_dbh->pmysql))); else if (strEQ(key, "sockfd")) result= sv_2mortal(newSViv((IV) imp_dbh->pmysql->net.fd)); else if (strEQ(key, "stat")) { const char* stats = mysql_stat(imp_dbh->pmysql); result= stats ? sv_2mortal(newSVpvn(stats, strlen(stats))) : &PL_sv_undef; } else if (strEQ(key, "stats")) { /* Obsolete, as of 2.09 */ const char* stats = mysql_stat(imp_dbh->pmysql); result= stats ? sv_2mortal(newSVpvn(stats, strlen(stats))) : &PL_sv_undef; } else if (kl == 14 && strEQ(key,"server_prepare")) result= sv_2mortal(newSViv((IV) imp_dbh->use_server_side_prepare)); break; case 't': if (kl == 9 && strEQ(key, "thread_id")) result= sv_2mortal(newSViv(mysql_thread_id(imp_dbh->pmysql))); break; case 'w': if (kl == 13 && strEQ(key, "warning_count")) result= sv_2mortal(newSViv(mysql_warning_count(imp_dbh->pmysql))); break; case 'u': if (strEQ(key, "use_result")) { result= sv_2mortal(newSViv((IV) imp_dbh->use_mysql_use_result)); } break; } if (result== NULL) return Nullsv; return result; } /* ************************************************************************** * * Name: dbd_st_prepare * * Purpose: Called for preparing an SQL statement; our part of the * statement handle constructor * * Input: sth - statement handle being initialized * imp_sth - drivers private statement handle data * statement - pointer to string with SQL statement * attribs - statement attributes, currently not in use * * Returns: TRUE for success, FALSE otherwise; do_error will * be called in the latter case * **************************************************************************/ int dbd_st_prepare( SV *sth, imp_sth_t *imp_sth, char *statement, SV *attribs) { int i; SV **svp; dTHX; #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION #if MYSQL_VERSION_ID < CALL_PLACEHOLDER_VERSION char *str_ptr, *str_last_ptr; #if MYSQL_VERSION_ID < LIMIT_PLACEHOLDER_VERSION int limit_flag=0; #endif #endif int prepare_retval; MYSQL_BIND *bind, *bind_end; imp_sth_phb_t *fbind; #endif D_imp_xxh(sth); D_imp_dbh_from_sth; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t-> dbd_st_prepare MYSQL_VERSION_ID %d, SQL statement: %s\n", MYSQL_VERSION_ID, statement); #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION /* Set default value of 'mysql_server_prepare' attribute for sth from dbh */ imp_sth->use_server_side_prepare= imp_dbh->use_server_side_prepare; if (attribs) { svp= DBD_ATTRIB_GET_SVP(attribs, "mysql_server_prepare", 20); imp_sth->use_server_side_prepare = (svp) ? SvTRUE(*svp) : imp_dbh->use_server_side_prepare; svp = DBD_ATTRIB_GET_SVP(attribs, "async", 5); if(svp && SvTRUE(*svp)) { #if MYSQL_ASYNC imp_sth->is_async = TRUE; imp_sth->use_server_side_prepare = FALSE; #else do_error(sth, 2000, "Async support was not built into this version of DBD::mysql", "HY000"); return 0; #endif } } imp_sth->fetch_done= 0; #endif imp_sth->done_desc= 0; imp_sth->result= NULL; imp_sth->currow= 0; /* Set default value of 'mysql_use_result' attribute for sth from dbh */ svp= DBD_ATTRIB_GET_SVP(attribs, "mysql_use_result", 16); imp_sth->use_mysql_use_result= svp ? SvTRUE(*svp) : imp_dbh->use_mysql_use_result; for (i= 0; i < AV_ATTRIB_LAST; i++) imp_sth->av_attr[i]= Nullav; /* Clean-up previous result set(s) for sth to prevent 'Commands out of sync' error */ mysql_st_free_result_sets(sth, imp_sth); #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION && MYSQL_VERSION_ID < CALL_PLACEHOLDER_VERSION if (imp_sth->use_server_side_prepare) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tuse_server_side_prepare set, check restrictions\n"); /* This code is here because placeholder support is not implemented for statements with :- 1. LIMIT < 5.0.7 2. CALL < 5.5.3 (Added support for out & inout parameters) In these cases we have to disable server side prepared statements NOTE: These checks could cause a false positive on statements which include columns / table names that match "call " or " limit " */ if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), #if MYSQL_VERSION_ID < LIMIT_PLACEHOLDER_VERSION "\t\tneed to test for LIMIT & CALL\n"); #else "\t\tneed to test for restrictions\n"); #endif str_last_ptr = statement + strlen(statement); for (str_ptr= statement; str_ptr < str_last_ptr; str_ptr++) { #if MYSQL_VERSION_ID < LIMIT_PLACEHOLDER_VERSION /* Place holders not supported in LIMIT's */ if (limit_flag) { if (*str_ptr == '?') { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tLIMIT and ? found, set to use_server_side_prepare=0\n"); /* ... then we do not want to try server side prepare (use emulation) */ imp_sth->use_server_side_prepare= 0; break; } } else if (str_ptr < str_last_ptr - 6 && isspace(*(str_ptr + 0)) && tolower(*(str_ptr + 1)) == 'l' && tolower(*(str_ptr + 2)) == 'i' && tolower(*(str_ptr + 3)) == 'm' && tolower(*(str_ptr + 4)) == 'i' && tolower(*(str_ptr + 5)) == 't' && isspace(*(str_ptr + 6))) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "LIMIT set limit flag to 1\n"); limit_flag= 1; } #endif /* Place holders not supported in CALL's */ if (str_ptr < str_last_ptr - 4 && tolower(*(str_ptr + 0)) == 'c' && tolower(*(str_ptr + 1)) == 'a' && tolower(*(str_ptr + 2)) == 'l' && tolower(*(str_ptr + 3)) == 'l' && isspace(*(str_ptr + 4))) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "Disable PS mode for CALL()\n"); imp_sth->use_server_side_prepare= 0; break; } } } #endif #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION if (imp_sth->use_server_side_prepare) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tuse_server_side_prepare set\n"); /* do we really need this? If we do, we should return, not just continue */ if (imp_sth->stmt) fprintf(stderr, "ERROR: Trying to prepare new stmt while we have \ already not closed one \n"); imp_sth->stmt= mysql_stmt_init(imp_dbh->pmysql); if (! imp_sth->stmt) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tERROR: Unable to return MYSQL_STMT structure \ from mysql_stmt_init(): ERROR NO: %d ERROR MSG:%s\n", mysql_errno(imp_dbh->pmysql), mysql_error(imp_dbh->pmysql)); } prepare_retval= mysql_stmt_prepare(imp_sth->stmt, statement, strlen(statement)); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tmysql_stmt_prepare returned %d\n", prepare_retval); if (prepare_retval) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tmysql_stmt_prepare %d %s\n", mysql_stmt_errno(imp_sth->stmt), mysql_stmt_error(imp_sth->stmt)); /* For commands that are not supported by server side prepared statement mechanism lets try to pass them through regular API */ if (mysql_stmt_errno(imp_sth->stmt) == ER_UNSUPPORTED_PS) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tSETTING imp_sth->use_server_side_prepare to 0\n"); imp_sth->use_server_side_prepare= 0; } else { do_error(sth, mysql_stmt_errno(imp_sth->stmt), mysql_stmt_error(imp_sth->stmt), mysql_sqlstate(imp_dbh->pmysql)); mysql_stmt_close(imp_sth->stmt); imp_sth->stmt= NULL; return FALSE; } } else { DBIc_NUM_PARAMS(imp_sth)= mysql_stmt_param_count(imp_sth->stmt); /* mysql_stmt_param_count */ if (DBIc_NUM_PARAMS(imp_sth) > 0) { /* Allocate memory for bind variables */ imp_sth->bind= alloc_bind(DBIc_NUM_PARAMS(imp_sth)); imp_sth->fbind= alloc_fbind(DBIc_NUM_PARAMS(imp_sth)); imp_sth->has_been_bound= 0; /* Initialize ph variables with NULL values */ for (i= 0, bind= imp_sth->bind, fbind= imp_sth->fbind, bind_end= bind+DBIc_NUM_PARAMS(imp_sth); bind < bind_end ; bind++, fbind++, i++ ) { bind->buffer_type= MYSQL_TYPE_STRING; bind->buffer= NULL; bind->length= &(fbind->length); bind->is_null= (char*) &(fbind->is_null); fbind->is_null= 1; fbind->length= 0; } } } } #endif #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION /* Count the number of parameters (driver, vs server-side) */ if (imp_sth->use_server_side_prepare == 0) DBIc_NUM_PARAMS(imp_sth) = count_params((imp_xxh_t *)imp_dbh, aTHX_ statement, imp_dbh->bind_comment_placeholders); #else DBIc_NUM_PARAMS(imp_sth) = count_params((imp_xxh_t *)imp_dbh, aTHX_ statement, imp_dbh->bind_comment_placeholders); #endif /* Allocate memory for parameters */ imp_sth->params= alloc_param(DBIc_NUM_PARAMS(imp_sth)); DBIc_IMPSET_on(imp_sth); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_st_prepare\n"); return 1; } /*************************************************************************** * Name: dbd_st_free_result_sets * * Purpose: Clean-up single or multiple result sets (if any) * * Inputs: sth - Statement handle * imp_sth - driver's private statement handle * * Returns: 1 ok * 0 error *************************************************************************/ int mysql_st_free_result_sets (SV * sth, imp_sth_t * imp_sth) { dTHX; D_imp_dbh_from_sth; D_imp_xxh(sth); int next_result_rc= -1; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t>- dbd_st_free_result_sets\n"); #if MYSQL_VERSION_ID >= MULTIPLE_RESULT_SET_VERSION do { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_st_free_result_sets RC %d\n", next_result_rc); if (next_result_rc == 0) { if (!(imp_sth->result = mysql_use_result(imp_dbh->pmysql))) { /* Check for possible error */ if (mysql_field_count(imp_dbh->pmysql)) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_st_free_result_sets ERROR: %s\n", mysql_error(imp_dbh->pmysql)); do_error(sth, mysql_errno(imp_dbh->pmysql), mysql_error(imp_dbh->pmysql), mysql_sqlstate(imp_dbh->pmysql)); return 0; } } } if (imp_sth->result) { mysql_free_result(imp_sth->result); imp_sth->result=NULL; } } while ((next_result_rc=mysql_next_result(imp_dbh->pmysql))==0); if (next_result_rc > 0) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_st_free_result_sets: Error while processing multi-result set: %s\n", mysql_error(imp_dbh->pmysql)); do_error(sth, mysql_errno(imp_dbh->pmysql), mysql_error(imp_dbh->pmysql), mysql_sqlstate(imp_dbh->pmysql)); } #else if (imp_sth->result) { mysql_free_result(imp_sth->result); imp_sth->result=NULL; } #endif if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_st_free_result_sets\n"); return 1; } #if MYSQL_VERSION_ID >= MULTIPLE_RESULT_SET_VERSION /*************************************************************************** * Name: dbd_st_more_results * * Purpose: Move onto the next result set (if any) * * Inputs: sth - Statement handle * imp_sth - driver's private statement handle * * Returns: 1 if there are more results sets * 0 if there are not * -1 for errors. *************************************************************************/ int dbd_st_more_results(SV* sth, imp_sth_t* imp_sth) { dTHX; D_imp_dbh_from_sth; D_imp_xxh(sth); int use_mysql_use_result=imp_sth->use_mysql_use_result; int next_result_return_code, i; MYSQL* svsock= imp_dbh->pmysql; if (!SvROK(sth) || SvTYPE(SvRV(sth)) != SVt_PVHV) croak("Expected hash array"); if (!mysql_more_results(svsock)) { /* No more pending result set(s)*/ if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\n <- dbs_st_more_results no more results\n"); return 0; } if (imp_sth->use_server_side_prepare) { do_warn(sth, JW_ERR_NOT_IMPLEMENTED, "Processing of multiple result set is not possible with server side prepare"); return 0; } /* * Free cached array attributes */ for (i= 0; i < AV_ATTRIB_LAST; i++) { if (imp_sth->av_attr[i]) SvREFCNT_dec(imp_sth->av_attr[i]); imp_sth->av_attr[i]= Nullav; } /* Release previous MySQL result*/ if (imp_sth->result) mysql_free_result(imp_sth->result); if (DBIc_ACTIVE(imp_sth)) DBIc_ACTIVE_off(imp_sth); next_result_return_code= mysql_next_result(svsock); imp_sth->warning_count = mysql_warning_count(imp_dbh->pmysql); /* mysql_next_result returns 0 if there are more results -1 if there are no more results >0 if there was an error */ if (next_result_return_code > 0) { do_error(sth, mysql_errno(svsock), mysql_error(svsock), mysql_sqlstate(svsock)); return 0; } else if(next_result_return_code == -1) { return 0; } else { /* Store the result from the Query */ imp_sth->result = use_mysql_use_result ? mysql_use_result(svsock) : mysql_store_result(svsock); if (mysql_errno(svsock)) { do_error(sth, mysql_errno(svsock), mysql_error(svsock), mysql_sqlstate(svsock)); return 0; } imp_sth->row_num= mysql_affected_rows(imp_dbh->pmysql); if (imp_sth->result == NULL) { /* No "real" rowset*/ DBIc_NUM_FIELDS(imp_sth)= 0; /* for DBI <= 1.53 */ DBIS->set_attr_k(sth, sv_2mortal(newSVpvn("NUM_OF_FIELDS",13)), 0, sv_2mortal(newSViv(0))); return 1; } else { /* We have a new rowset */ imp_sth->currow=0; /* delete cached handle attributes */ /* XXX should be driven by a list to ease maintenance */ (void)hv_delete((HV*)SvRV(sth), "NAME", 4, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "NULLABLE", 8, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "NUM_OF_FIELDS", 13, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "PRECISION", 9, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "SCALE", 5, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "TYPE", 4, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "mysql_insertid", 14, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "mysql_is_auto_increment", 23, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "mysql_is_blob", 13, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "mysql_is_key", 12, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "mysql_is_num", 12, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "mysql_is_pri_key", 16, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "mysql_length", 12, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "mysql_max_length", 16, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "mysql_table", 11, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "mysql_type", 10, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "mysql_type_name", 15, G_DISCARD); (void)hv_delete((HV*)SvRV(sth), "mysql_warning_count", 20, G_DISCARD); /* Adjust NUM_OF_FIELDS - which also adjusts the row buffer size */ DBIc_NUM_FIELDS(imp_sth)= 0; /* for DBI <= 1.53 */ DBIc_DBISTATE(imp_sth)->set_attr_k(sth, sv_2mortal(newSVpvn("NUM_OF_FIELDS",13)), 0, sv_2mortal(newSViv(mysql_num_fields(imp_sth->result))) ); DBIc_ACTIVE_on(imp_sth); imp_sth->done_desc = 0; } imp_dbh->pmysql->net.last_errno= 0; return 1; } } #endif /************************************************************************** * * Name: mysql_st_internal_execute * * Purpose: Internal version for executing a statement, called both from * within the "do" and the "execute" method. * * Inputs: h - object handle, for storing error messages * statement - query being executed * attribs - statement attributes, currently ignored * num_params - number of parameters being bound * params - parameter array * result - where to store results, if any * svsock - socket connected to the database * **************************************************************************/ my_ulonglong mysql_st_internal_execute( SV *h, /* could be sth or dbh */ SV *statement, SV *attribs, int num_params, imp_sth_ph_t *params, MYSQL_RES **result, MYSQL *svsock, int use_mysql_use_result ) { dTHX; bool bind_type_guessing= FALSE; bool bind_comment_placeholders= TRUE; STRLEN slen; char *sbuf = SvPV(statement, slen); char *table; char *salloc; int htype; #if MYSQL_ASYNC bool async = FALSE; #endif my_ulonglong rows= 0; /* thank you DBI.c for this info! */ D_imp_xxh(h); attribs= attribs; htype= DBIc_TYPE(imp_xxh); /* It is important to import imp_dbh properly according to the htype that it is! Also, one might ask why bind_type_guessing is assigned in each block. Well, it's because D_imp_ macros called in these blocks make it so imp_dbh is not "visible" or defined outside of the if/else (when compiled, it fails for imp_dbh not being defined). */ /* h is a dbh */ if (htype == DBIt_DB) { D_imp_dbh(h); /* if imp_dbh is not available, it causes segfault (proper) on OpenBSD */ if (imp_dbh && imp_dbh->bind_type_guessing) { bind_type_guessing= imp_dbh->bind_type_guessing; bind_comment_placeholders= bind_comment_placeholders; } #if MYSQL_ASYNC async = (bool) (imp_dbh->async_query_in_flight != NULL); #endif } /* h is a sth */ else { D_imp_sth(h); D_imp_dbh_from_sth; /* if imp_dbh is not available, it causes segfault (proper) on OpenBSD */ if (imp_dbh) { bind_type_guessing= imp_dbh->bind_type_guessing; bind_comment_placeholders= imp_dbh->bind_comment_placeholders; } #if MYSQL_ASYNC async = imp_sth->is_async; if(async) { imp_dbh->async_query_in_flight = imp_sth; } else { imp_dbh->async_query_in_flight = NULL; } #endif } if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "mysql_st_internal_execute MYSQL_VERSION_ID %d\n", MYSQL_VERSION_ID ); salloc= parse_params(imp_xxh, aTHX_ svsock, sbuf, &slen, params, num_params, bind_type_guessing, bind_comment_placeholders); if (salloc) { sbuf= salloc; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "Binding parameters: %s\n", sbuf); } if (slen >= 11 && (!strncmp(sbuf, "listfields ", 11) || !strncmp(sbuf, "LISTFIELDS ", 11))) { /* remove pre-space */ slen-= 10; sbuf+= 10; while (slen && isspace(*sbuf)) { --slen; ++sbuf; } if (!slen) { do_error(h, JW_ERR_QUERY, "Missing table name" ,NULL); return -2; } if (!(table= malloc(slen+1))) { do_error(h, JW_ERR_MEM, "Out of memory" ,NULL); return -2; } strncpy(table, sbuf, slen); sbuf= table; while (slen && !isspace(*sbuf)) { --slen; ++sbuf; } *sbuf++= '\0'; *result= mysql_list_fields(svsock, table, NULL); free(table); if (!(*result)) { do_error(h, mysql_errno(svsock), mysql_error(svsock) ,mysql_sqlstate(svsock)); return -2; } return 0; } #if MYSQL_ASYNC if(async) { if((mysql_send_query(svsock, sbuf, slen)) && (!mysql_db_reconnect(h) || (mysql_send_query(svsock, sbuf, slen)))) { rows = -2; } else { rows = 0; } } else { #endif if ((mysql_real_query(svsock, sbuf, slen)) && (!mysql_db_reconnect(h) || (mysql_real_query(svsock, sbuf, slen)))) { rows = -2; } else { /** Store the result from the Query */ *result= use_mysql_use_result ? mysql_use_result(svsock) : mysql_store_result(svsock); if (mysql_errno(svsock)) rows = -2; else if (*result) rows = mysql_num_rows(*result); else { rows = mysql_affected_rows(svsock); /* mysql_affected_rows(): -1 indicates that the query returned an error */ if (rows == (my_ulonglong)-1) rows = -2; } } #if MYSQL_ASYNC } #endif if (salloc) Safefree(salloc); if(rows == (my_ulonglong)-2) { do_error(h, mysql_errno(svsock), mysql_error(svsock), mysql_sqlstate(svsock)); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "IGNORING ERROR errno %d\n", mysql_errno(svsock)); } return(rows); } /************************************************************************** * * Name: mysql_st_internal_execute41 * * Purpose: Internal version for executing a prepared statement, called both * from within the "do" and the "execute" method. * MYSQL 4.1 API * * * Inputs: h - object handle, for storing error messages * statement - query being executed * attribs - statement attributes, currently ignored * num_params - number of parameters being bound * params - parameter array * result - where to store results, if any * svsock - socket connected to the database * **************************************************************************/ #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION my_ulonglong mysql_st_internal_execute41( SV *sth, int num_params, MYSQL_RES **result, MYSQL_STMT *stmt, MYSQL_BIND *bind, int *has_been_bound ) { int i; enum enum_field_types enum_type; dTHX; int execute_retval; my_ulonglong rows=0; D_imp_xxh(sth); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t-> mysql_st_internal_execute41\n"); /* free result if exists */ if (*result) { mysql_free_result(*result); *result= 0; } /* If were performed any changes with ph variables we have to rebind them */ if (num_params > 0 && !(*has_been_bound)) { if (mysql_stmt_bind_param(stmt,bind)) goto error; *has_been_bound= 1; } if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tmysql_st_internal_execute41 calling mysql_execute with %d num_params\n", num_params); execute_retval= mysql_stmt_execute(stmt); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tmysql_stmt_execute returned %d\n", execute_retval); if (execute_retval) goto error; /* This statement does not return a result set (INSERT, UPDATE...) */ if (!(*result= mysql_stmt_result_metadata(stmt))) { if (mysql_stmt_errno(stmt)) goto error; rows= mysql_stmt_affected_rows(stmt); /* mysql_stmt_affected_rows(): -1 indicates that the query returned an error */ if (rows == (my_ulonglong)-1) goto error; } /* This statement returns a result set (SELECT...) */ else { for (i = mysql_stmt_field_count(stmt) - 1; i >=0; --i) { enum_type = mysql_to_perl_type(stmt->fields[i].type); if (enum_type != MYSQL_TYPE_DOUBLE && enum_type != MYSQL_TYPE_LONG && enum_type != MYSQL_TYPE_BIT) { /* mysql_stmt_store_result to update MYSQL_FIELD->max_length */ my_bool on = 1; mysql_stmt_attr_set(stmt, STMT_ATTR_UPDATE_MAX_LENGTH, &on); break; } } /* Get the total rows affected and return */ if (mysql_stmt_store_result(stmt)) goto error; else rows= mysql_stmt_num_rows(stmt); } if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- mysql_internal_execute_41 returning %llu rows\n", rows); return(rows); error: if (*result) { mysql_free_result(*result); *result= 0; } if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), " errno %d err message %s\n", mysql_stmt_errno(stmt), mysql_stmt_error(stmt)); do_error(sth, mysql_stmt_errno(stmt), mysql_stmt_error(stmt), mysql_stmt_sqlstate(stmt)); mysql_stmt_reset(stmt); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- mysql_st_internal_execute41\n"); return -2; } #endif /*************************************************************************** * * Name: dbd_st_execute * * Purpose: Called for preparing an SQL statement; our part of the * statement handle constructor * * Input: sth - statement handle being initialized * imp_sth - drivers private statement handle data * * Returns: TRUE for success, FALSE otherwise; do_error will * be called in the latter case * **************************************************************************/ int dbd_st_execute(SV* sth, imp_sth_t* imp_sth) { dTHX; char actual_row_num[64]; int i; SV **statement; D_imp_dbh_from_sth; D_imp_xxh(sth); #if defined (dTHR) dTHR; #endif ASYNC_CHECK_RETURN(sth, -2); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), " -> dbd_st_execute for %p\n", sth); if (!SvROK(sth) || SvTYPE(SvRV(sth)) != SVt_PVHV) croak("Expected hash array"); /* Free cached array attributes */ for (i= 0; i < AV_ATTRIB_LAST; i++) { if (imp_sth->av_attr[i]) SvREFCNT_dec(imp_sth->av_attr[i]); imp_sth->av_attr[i]= Nullav; } statement= hv_fetch((HV*) SvRV(sth), "Statement", 9, FALSE); /* Clean-up previous result set(s) for sth to prevent 'Commands out of sync' error */ mysql_st_free_result_sets (sth, imp_sth); #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION if (imp_sth->use_server_side_prepare && ! imp_sth->use_mysql_use_result) { imp_sth->row_num= mysql_st_internal_execute41( sth, DBIc_NUM_PARAMS(imp_sth), &imp_sth->result, imp_sth->stmt, imp_sth->bind, &imp_sth->has_been_bound ); } else { #endif imp_sth->row_num= mysql_st_internal_execute( sth, *statement, NULL, DBIc_NUM_PARAMS(imp_sth), imp_sth->params, &imp_sth->result, imp_dbh->pmysql, imp_sth->use_mysql_use_result ); #if MYSQL_ASYNC if(imp_dbh->async_query_in_flight) { DBIc_ACTIVE_on(imp_sth); return 0; } #endif } if (imp_sth->row_num+1 != (my_ulonglong)-1) { if (!imp_sth->result) { imp_sth->insertid= mysql_insert_id(imp_dbh->pmysql); #if MYSQL_VERSION_ID >= MULTIPLE_RESULT_SET_VERSION if (mysql_more_results(imp_dbh->pmysql)) DBIc_ACTIVE_on(imp_sth); #endif } else { /** Store the result in the current statement handle */ DBIc_NUM_FIELDS(imp_sth)= mysql_num_fields(imp_sth->result); DBIc_ACTIVE_on(imp_sth); if (!imp_sth->use_server_side_prepare) imp_sth->done_desc= 0; imp_sth->fetch_done= 0; } } imp_sth->warning_count = mysql_warning_count(imp_dbh->pmysql); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) { /* PerlIO_printf doesn't always handle imp_sth->row_num %llu consistently!! */ sprintf(actual_row_num, "%llu", imp_sth->row_num); PerlIO_printf(DBIc_LOGPIO(imp_xxh), " <- dbd_st_execute returning imp_sth->row_num %s\n", actual_row_num); } return (int)imp_sth->row_num; } /************************************************************************** * * Name: dbd_describe * * Purpose: Called from within the fetch method to describe the result * * Input: sth - statement handle being initialized * imp_sth - our part of the statement handle, there's no * need for supplying both; Tim just doesn't remove it * * Returns: TRUE for success, FALSE otherwise; do_error will * be called in the latter case * **************************************************************************/ int dbd_describe(SV* sth, imp_sth_t* imp_sth) { dTHX; D_imp_xxh(sth); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t--> dbd_describe\n"); #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION if (imp_sth->use_server_side_prepare) { int i; int col_type; int num_fields= DBIc_NUM_FIELDS(imp_sth); imp_sth_fbh_t *fbh; MYSQL_BIND *buffer; MYSQL_FIELD *fields; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tdbd_describe() num_fields %d\n", num_fields); if (imp_sth->done_desc) return TRUE; if (!num_fields || !imp_sth->result) { /* no metadata */ do_error(sth, JW_ERR_SEQUENCE, "no metadata information while trying describe result set", NULL); return 0; } /* allocate fields buffers */ if ( !(imp_sth->fbh= alloc_fbuffer(num_fields)) || !(imp_sth->buffer= alloc_bind(num_fields)) ) { /* Out of memory */ do_error(sth, JW_ERR_SEQUENCE, "Out of memory in dbd_sescribe()",NULL); return 0; } fields= mysql_fetch_fields(imp_sth->result); for ( fbh= imp_sth->fbh, buffer= (MYSQL_BIND*)imp_sth->buffer, i= 0; i < num_fields; i++, fbh++, buffer++ ) { /* get the column type */ col_type = fields ? fields[i].type : MYSQL_TYPE_STRING; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) { PerlIO_printf(DBIc_LOGPIO(imp_xxh),"\t\ti %d col_type %d fbh->length %lu\n", i, col_type, fbh->length); PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tfields[i].length %lu fields[i].max_length %lu fields[i].type %d fields[i].charsetnr %d\n", fields[i].length, fields[i].max_length, fields[i].type, fields[i].charsetnr); } fbh->charsetnr = fields[i].charsetnr; #if MYSQL_VERSION_ID < FIELD_CHARSETNR_VERSION fbh->flags = fields[i].flags; #endif buffer->buffer_type= mysql_to_perl_type(col_type); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tmysql_to_perl_type returned %d\n", col_type); buffer->length= &(fbh->length); buffer->is_null= (my_bool*) &(fbh->is_null); buffer->error= (my_bool*) &(fbh->error); switch (buffer->buffer_type) { case MYSQL_TYPE_DOUBLE: buffer->buffer_length= sizeof(fbh->ddata); buffer->buffer= (char*) &fbh->ddata; break; case MYSQL_TYPE_LONG: buffer->buffer_length= sizeof(fbh->ldata); buffer->buffer= (char*) &fbh->ldata; buffer->is_unsigned= (fields[i].flags & UNSIGNED_FLAG) ? 1 : 0; break; case MYSQL_TYPE_BIT: buffer->buffer_length= 8; Newz(908, fbh->data, buffer->buffer_length, char); buffer->buffer= (char *) fbh->data; break; default: buffer->buffer_length= fields[i].max_length ? fields[i].max_length : 1; Newz(908, fbh->data, buffer->buffer_length, char); buffer->buffer= (char *) fbh->data; } } if (mysql_stmt_bind_result(imp_sth->stmt, imp_sth->buffer)) { do_error(sth, mysql_stmt_errno(imp_sth->stmt), mysql_stmt_error(imp_sth->stmt), mysql_stmt_sqlstate(imp_sth->stmt)); return 0; } } #endif imp_sth->done_desc= 1; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_describe\n"); return TRUE; } /************************************************************************** * * Name: dbd_st_fetch * * Purpose: Called for fetching a result row * * Input: sth - statement handle being initialized * imp_sth - drivers private statement handle data * * Returns: array of columns; the array is allocated by DBI via * DBIc_DBISTATE(imp_sth)->get_fbav(imp_sth), even the values * of the array are prepared, we just need to modify them * appropriately * **************************************************************************/ AV* dbd_st_fetch(SV *sth, imp_sth_t* imp_sth) { dTHX; int num_fields, ChopBlanks, i, rc; unsigned long *lengths; AV *av; int av_length, av_readonly; MYSQL_ROW cols; D_imp_dbh_from_sth; MYSQL* svsock= imp_dbh->pmysql; imp_sth_fbh_t *fbh; D_imp_xxh(sth); #if MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION MYSQL_BIND *buffer; #endif MYSQL_FIELD *fields; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t-> dbd_st_fetch\n"); #if MYSQL_ASYNC if(imp_dbh->async_query_in_flight) { if(mysql_db_async_result(sth, &imp_sth->result) <= 0) { return Nullav; } } #endif #if MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION if (imp_sth->use_server_side_prepare) { if (!DBIc_ACTIVE(imp_sth) ) { do_error(sth, JW_ERR_SEQUENCE, "no statement executing\n",NULL); return Nullav; } if (imp_sth->fetch_done) { do_error(sth, JW_ERR_SEQUENCE, "fetch() but fetch already done",NULL); return Nullav; } if (!imp_sth->done_desc) { if (!dbd_describe(sth, imp_sth)) { do_error(sth, JW_ERR_SEQUENCE, "Error while describe result set.", NULL); return Nullav; } } } #endif ChopBlanks = DBIc_is(imp_sth, DBIcf_ChopBlanks); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tdbd_st_fetch for %p, chopblanks %d\n", sth, ChopBlanks); if (!imp_sth->result) { do_error(sth, JW_ERR_SEQUENCE, "fetch() without execute()" ,NULL); return Nullav; } /* fix from 2.9008 */ imp_dbh->pmysql->net.last_errno = 0; #if MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION if (imp_sth->use_server_side_prepare) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tdbd_st_fetch calling mysql_fetch\n"); if ((rc= mysql_stmt_fetch(imp_sth->stmt))) { if (rc == 1) do_error(sth, mysql_stmt_errno(imp_sth->stmt), mysql_stmt_error(imp_sth->stmt), mysql_stmt_sqlstate(imp_sth->stmt)); #if MYSQL_VERSION_ID >= MYSQL_VERSION_5_0 if (rc == MYSQL_DATA_TRUNCATED) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tdbd_st_fetch data truncated\n"); goto process; } #endif if (rc == MYSQL_NO_DATA) { /* Update row_num to affected_rows value */ imp_sth->row_num= mysql_stmt_affected_rows(imp_sth->stmt); imp_sth->fetch_done=1; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tdbd_st_fetch no data\n"); } dbd_st_finish(sth, imp_sth); return Nullav; } process: imp_sth->currow++; av= DBIc_DBISTATE(imp_sth)->get_fbav(imp_sth); num_fields=mysql_stmt_field_count(imp_sth->stmt); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tdbd_st_fetch called mysql_fetch, rc %d num_fields %d\n", rc, num_fields); for ( buffer= imp_sth->buffer, fbh= imp_sth->fbh, i= 0; i < num_fields; i++, fbh++, buffer++ ) { SV *sv= AvARRAY(av)[i]; /* Note: we (re)use the SV in the AV */ STRLEN len; /* This is wrong, null is not being set correctly * This is not the way to determine length (this would break blobs!) */ if (fbh->is_null) (void) SvOK_off(sv); /* Field is NULL, return undef */ else { /* In case of BLOB/TEXT fields we allocate only 8192 bytes in dbd_describe() for data. Here we know real size of field so we should increase buffer size and refetch column value */ if (fbh->length > buffer->buffer_length || fbh->error) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tRefetch BLOB/TEXT column: %d, length: %lu, error: %d\n", i, fbh->length, fbh->error); Renew(fbh->data, fbh->length, char); buffer->buffer_length= fbh->length; buffer->buffer= (char *) fbh->data; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) { int j; int m = MIN(*buffer->length, buffer->buffer_length); char *ptr = (char*)buffer->buffer; PerlIO_printf(DBIc_LOGPIO(imp_xxh),"\t\tbefore buffer->buffer: "); for (j = 0; j < m; j++) { PerlIO_printf(DBIc_LOGPIO(imp_xxh), "%c", *ptr++); } PerlIO_printf(DBIc_LOGPIO(imp_xxh),"\n"); } /*TODO: Use offset instead of 0 to fetch only remain part of data*/ if (mysql_stmt_fetch_column(imp_sth->stmt, buffer , i, 0)) do_error(sth, mysql_stmt_errno(imp_sth->stmt), mysql_stmt_error(imp_sth->stmt), mysql_stmt_sqlstate(imp_sth->stmt)); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) { int j; int m = MIN(*buffer->length, buffer->buffer_length); char *ptr = (char*)buffer->buffer; PerlIO_printf(DBIc_LOGPIO(imp_xxh),"\t\tafter buffer->buffer: "); for (j = 0; j < m; j++) { PerlIO_printf(DBIc_LOGPIO(imp_xxh), "%c", *ptr++); } PerlIO_printf(DBIc_LOGPIO(imp_xxh),"\n"); } } /* This does look a lot like Georg's PHP driver doesn't it? --Brian */ /* Credit due to Georg - mysqli_api.c ;) --PMG */ switch (buffer->buffer_type) { case MYSQL_TYPE_DOUBLE: if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tst_fetch double data %f\n", fbh->ddata); sv_setnv(sv, fbh->ddata); break; case MYSQL_TYPE_LONG: if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tst_fetch int data %"IVdf", unsigned? %d\n", fbh->ldata, buffer->is_unsigned); if (buffer->is_unsigned) sv_setuv(sv, fbh->ldata); else sv_setiv(sv, fbh->ldata); break; case MYSQL_TYPE_BIT: sv_setpvn(sv, fbh->data, fbh->length); break; default: if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tERROR IN st_fetch_string"); len= fbh->length; /* ChopBlanks server-side prepared statement */ if (ChopBlanks) { /* see bottom of: http://www.mysql.org/doc/refman/5.0/en/c-api-datatypes.html */ if (fbh->charsetnr != 63) while (len && fbh->data[len-1] == ' ') { --len; } } /* END OF ChopBlanks */ sv_setpvn(sv, fbh->data, len); /* UTF8 */ /*HELMUT*/ #if defined(sv_utf8_decode) && MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION #if MYSQL_VERSION_ID >= FIELD_CHARSETNR_VERSION /* SHOW COLLATION WHERE Id = 63; -- 63 == charset binary, collation binary */ if ((imp_dbh->enable_utf8 || imp_dbh->enable_utf8mb4) && fbh->charsetnr != 63) #else if ((imp_dbh->enable_utf8 || imp_dbh->enable_utf8mb4) && !(fbh->flags & BINARY_FLAG)) #endif sv_utf8_decode(sv); #endif /* END OF UTF8 */ break; } } } if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_st_fetch, %d cols\n", num_fields); return av; } else { #endif imp_sth->currow++; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) { PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\tdbd_st_fetch result set details\n"); PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\timp_sth->result=%p\n", imp_sth->result); PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\tmysql_num_fields=%u\n", mysql_num_fields(imp_sth->result)); PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\tmysql_num_rows=%llu\n", mysql_num_rows(imp_sth->result)); PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\tmysql_affected_rows=%llu\n", mysql_affected_rows(imp_dbh->pmysql)); PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\tdbd_st_fetch for %p, currow= %d\n", sth,imp_sth->currow); } if (!(cols= mysql_fetch_row(imp_sth->result))) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) { PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\tdbd_st_fetch, no more rows to fetch"); } if (mysql_errno(imp_dbh->pmysql)) do_error(sth, mysql_errno(imp_dbh->pmysql), mysql_error(imp_dbh->pmysql), mysql_sqlstate(imp_dbh->pmysql)); #if MYSQL_VERSION_ID >= MULTIPLE_RESULT_SET_VERSION if (!mysql_more_results(svsock)) #endif dbd_st_finish(sth, imp_sth); return Nullav; } num_fields= mysql_num_fields(imp_sth->result); fields= mysql_fetch_fields(imp_sth->result); lengths= mysql_fetch_lengths(imp_sth->result); if ((av= DBIc_FIELDS_AV(imp_sth)) != Nullav) { av_length= av_len(av)+1; if (av_length != num_fields) /* Resize array if necessary */ { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_st_fetch, size of results array(%d) != num_fields(%d)\n", av_length, num_fields); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_st_fetch, result fields(%d)\n", DBIc_NUM_FIELDS(imp_sth)); av_readonly = SvREADONLY(av); if (av_readonly) SvREADONLY_off( av ); /* DBI sets this readonly */ while (av_length < num_fields) { av_store(av, av_length++, newSV(0)); } while (av_length > num_fields) { SvREFCNT_dec(av_pop(av)); av_length--; } if (av_readonly) SvREADONLY_on(av); } } av= DBIc_DBISTATE(imp_sth)->get_fbav(imp_sth); for (i= 0; i < num_fields; ++i) { char *col= cols[i]; SV *sv= AvARRAY(av)[i]; /* Note: we (re)use the SV in the AV */ if (col) { STRLEN len= lengths[i]; if (ChopBlanks) { while (len && col[len-1] == ' ') { --len; } } /* Set string value returned from mysql server */ sv_setpvn(sv, col, len); switch (mysql_to_perl_type(fields[i].type)) { case MYSQL_TYPE_DOUBLE: /* Coerce to dobule and set scalar as NV */ (void) SvNV(sv); SvNOK_only(sv); break; case MYSQL_TYPE_LONG: /* Coerce to integer and set scalar as UV resp. IV */ if (fields[i].flags & UNSIGNED_FLAG) { (void) SvUV(sv); SvIOK_only_UV(sv); } else { (void) SvIV(sv); SvIOK_only(sv); } break; #if MYSQL_VERSION_ID > NEW_DATATYPE_VERSION case MYSQL_TYPE_BIT: /* Let it as binary string */ break; #endif default: /* UTF8 */ /*HELMUT*/ #if defined(sv_utf8_decode) && MYSQL_VERSION_ID >=SERVER_PREPARE_VERSION /* see bottom of: http://www.mysql.org/doc/refman/5.0/en/c-api-datatypes.html */ if ((imp_dbh->enable_utf8 || imp_dbh->enable_utf8mb4) && fields[i].charsetnr != 63) sv_utf8_decode(sv); #endif /* END OF UTF8 */ break; } } else (void) SvOK_off(sv); /* Field is NULL, return undef */ } if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_st_fetch, %d cols\n", num_fields); return av; #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION } #endif } #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION /* We have to fetch all data from stmt There is may be useful for 2 cases: 1. st_finish when we have undef statement 2. call st_execute again when we have some unfetched data in stmt */ int mysql_st_clean_cursor(SV* sth, imp_sth_t* imp_sth) { if (DBIc_ACTIVE(imp_sth) && dbd_describe(sth, imp_sth) && !imp_sth->fetch_done) mysql_stmt_free_result(imp_sth->stmt); return 1; } #endif /*************************************************************************** * * Name: dbd_st_finish * * Purpose: Called for freeing a mysql result * * Input: sth - statement handle being finished * imp_sth - drivers private statement handle data * * Returns: TRUE for success, FALSE otherwise; do_error() will * be called in the latter case * **************************************************************************/ int dbd_st_finish(SV* sth, imp_sth_t* imp_sth) { dTHX; D_imp_xxh(sth); #if defined (dTHR) dTHR; #endif #if MYSQL_ASYNC D_imp_dbh_from_sth; if(imp_dbh->async_query_in_flight) { mysql_db_async_result(sth, &imp_sth->result); } #endif #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) { PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\n--> dbd_st_finish\n"); } if (imp_sth->use_server_side_prepare) { if (imp_sth && imp_sth->stmt) { if (!mysql_st_clean_cursor(sth, imp_sth)) { do_error(sth, JW_ERR_SEQUENCE, "Error happened while tried to clean up stmt",NULL); return 0; } } } #endif /* Cancel further fetches from this cursor. We don't close the cursor till DESTROY. The application may re execute it. */ if (imp_sth && DBIc_ACTIVE(imp_sth)) { /* Clean-up previous result set(s) for sth to prevent 'Commands out of sync' error */ mysql_st_free_result_sets(sth, imp_sth); } DBIc_ACTIVE_off(imp_sth); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) { PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\n<-- dbd_st_finish\n"); } return 1; } /************************************************************************** * * Name: dbd_st_destroy * * Purpose: Our part of the statement handles destructor * * Input: sth - statement handle being destroyed * imp_sth - drivers private statement handle data * * Returns: Nothing * **************************************************************************/ void dbd_st_destroy(SV *sth, imp_sth_t *imp_sth) { dTHX; D_imp_xxh(sth); #if defined (dTHR) dTHR; #endif int i; #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION imp_sth_fbh_t *fbh; int n; n= DBIc_NUM_PARAMS(imp_sth); if (n) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\tFreeing %d parameters, bind %p fbind %p\n", n, imp_sth->bind, imp_sth->fbind); free_bind(imp_sth->bind); free_fbind(imp_sth->fbind); } fbh= imp_sth->fbh; if (fbh) { n = DBIc_NUM_FIELDS(imp_sth); i = 0; while (i < n) { if (fbh[i].data) Safefree(fbh[i].data); ++i; } free_fbuffer(fbh); if (imp_sth->buffer) free_bind(imp_sth->buffer); } if (imp_sth->stmt) { if (mysql_stmt_close(imp_sth->stmt)) { do_error(DBIc_PARENT_H(imp_sth), mysql_stmt_errno(imp_sth->stmt), mysql_stmt_error(imp_sth->stmt), mysql_stmt_sqlstate(imp_sth->stmt)); } } #endif /* dbd_st_finish has already been called by .xs code if needed. */ /* Free values allocated by dbd_bind_ph */ if (imp_sth->params) { free_param(aTHX_ imp_sth->params, DBIc_NUM_PARAMS(imp_sth)); imp_sth->params= NULL; } /* Free cached array attributes */ for (i= 0; i < AV_ATTRIB_LAST; i++) { if (imp_sth->av_attr[i]) SvREFCNT_dec(imp_sth->av_attr[i]); imp_sth->av_attr[i]= Nullav; } /* let DBI know we've done it */ DBIc_IMPSET_off(imp_sth); } /* ************************************************************************** * * Name: dbd_st_STORE_attrib * * Purpose: Modifies a statement handles attributes; we currently * support just nothing * * Input: sth - statement handle being destroyed * imp_sth - drivers private statement handle data * keysv - attribute name * valuesv - attribute value * * Returns: TRUE for success, FALSE otherwise; do_error will * be called in the latter case * **************************************************************************/ int dbd_st_STORE_attrib( SV *sth, imp_sth_t *imp_sth, SV *keysv, SV *valuesv ) { dTHX; STRLEN(kl); char *key= SvPV(keysv, kl); int retval= FALSE; D_imp_xxh(sth); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\t-> dbd_st_STORE_attrib for %p, key %s\n", sth, key); if (strEQ(key, "mysql_use_result")) { imp_sth->use_mysql_use_result= SvTRUE(valuesv); } if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\t<- dbd_st_STORE_attrib for %p, result %d\n", sth, retval); return retval; } /* ************************************************************************** * * Name: dbd_st_FETCH_internal * * Purpose: Retrieves a statement handles array attributes; we use * a separate function, because creating the array * attributes shares much code and it aids in supporting * enhanced features like caching. * * Input: sth - statement handle; may even be a database handle, * in which case this will be used for storing error * messages only. This is only valid, if cacheit (the * last argument) is set to TRUE. * what - internal attribute number * res - pointer to a DBMS result * cacheit - TRUE, if results may be cached in the sth. * * Returns: RV pointing to result array in case of success, NULL * otherwise; do_error has already been called in the latter * case. * **************************************************************************/ #ifndef IS_KEY #define IS_KEY(A) (((A) & (PRI_KEY_FLAG | UNIQUE_KEY_FLAG | MULTIPLE_KEY_FLAG)) != 0) #endif #if !defined(IS_AUTO_INCREMENT) && defined(AUTO_INCREMENT_FLAG) #define IS_AUTO_INCREMENT(A) (((A) & AUTO_INCREMENT_FLAG) != 0) #endif SV* dbd_st_FETCH_internal( SV *sth, int what, MYSQL_RES *res, int cacheit ) { dTHX; D_imp_sth(sth); AV *av= Nullav; MYSQL_FIELD *curField; /* Are we asking for a legal value? */ if (what < 0 || what >= AV_ATTRIB_LAST) do_error(sth, JW_ERR_NOT_IMPLEMENTED, "Not implemented", NULL); /* Return cached value, if possible */ else if (cacheit && imp_sth->av_attr[what]) av= imp_sth->av_attr[what]; /* Does this sth really have a result? */ else if (!res) do_error(sth, JW_ERR_NOT_ACTIVE, "statement contains no result" ,NULL); /* Do the real work. */ else { av= newAV(); mysql_field_seek(res, 0); while ((curField= mysql_fetch_field(res))) { SV *sv; switch(what) { case AV_ATTRIB_NAME: sv= newSVpvn(curField->name, strlen(curField->name)); break; case AV_ATTRIB_TABLE: sv= newSVpvn(curField->table, strlen(curField->table)); break; case AV_ATTRIB_TYPE: sv= newSViv((int) curField->type); break; case AV_ATTRIB_SQL_TYPE: sv= newSViv((int) native2sql(curField->type)->data_type); break; case AV_ATTRIB_IS_PRI_KEY: sv= boolSV(IS_PRI_KEY(curField->flags)); break; case AV_ATTRIB_IS_NOT_NULL: sv= boolSV(IS_NOT_NULL(curField->flags)); break; case AV_ATTRIB_NULLABLE: sv= boolSV(!IS_NOT_NULL(curField->flags)); break; case AV_ATTRIB_LENGTH: sv= newSViv((int) curField->length); break; case AV_ATTRIB_IS_NUM: sv= newSViv((int) native2sql(curField->type)->is_num); break; case AV_ATTRIB_TYPE_NAME: sv= newSVpv((char*) native2sql(curField->type)->type_name, 0); break; case AV_ATTRIB_MAX_LENGTH: sv= newSViv((int) curField->max_length); break; case AV_ATTRIB_IS_AUTO_INCREMENT: #if defined(AUTO_INCREMENT_FLAG) sv= boolSV(IS_AUTO_INCREMENT(curField->flags)); break; #else croak("AUTO_INCREMENT_FLAG is not supported on this machine"); #endif case AV_ATTRIB_IS_KEY: sv= boolSV(IS_KEY(curField->flags)); break; case AV_ATTRIB_IS_BLOB: sv= boolSV(IS_BLOB(curField->flags)); break; case AV_ATTRIB_SCALE: sv= newSViv((int) curField->decimals); break; case AV_ATTRIB_PRECISION: sv= newSViv((int) (curField->length > curField->max_length) ? curField->length : curField->max_length); break; default: sv= &PL_sv_undef; break; } av_push(av, sv); } /* Ensure that this value is kept, decremented in * dbd_st_destroy and dbd_st_execute. */ if (!cacheit) return sv_2mortal(newRV_noinc((SV*)av)); imp_sth->av_attr[what]= av; } if (av == Nullav) return &PL_sv_undef; return sv_2mortal(newRV_inc((SV*)av)); } /* ************************************************************************** * * Name: dbd_st_FETCH_attrib * * Purpose: Retrieves a statement handles attributes * * Input: sth - statement handle being destroyed * imp_sth - drivers private statement handle data * keysv - attribute name * * Returns: NULL for an unknown attribute, "undef" for error, * attribute value otherwise. * **************************************************************************/ #define ST_FETCH_AV(what) \ dbd_st_FETCH_internal(sth, (what), imp_sth->result, TRUE) SV* dbd_st_FETCH_attrib( SV *sth, imp_sth_t *imp_sth, SV *keysv ) { dTHX; STRLEN(kl); char *key= SvPV(keysv, kl); SV *retsv= Nullsv; D_imp_xxh(sth); if (kl < 2) return Nullsv; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), " -> dbd_st_FETCH_attrib for %p, key %s\n", sth, key); switch (*key) { case 'N': if (strEQ(key, "NAME")) retsv= ST_FETCH_AV(AV_ATTRIB_NAME); else if (strEQ(key, "NULLABLE")) retsv= ST_FETCH_AV(AV_ATTRIB_NULLABLE); break; case 'P': if (strEQ(key, "PRECISION")) retsv= ST_FETCH_AV(AV_ATTRIB_PRECISION); if (strEQ(key, "ParamValues")) { HV *pvhv= newHV(); if (DBIc_NUM_PARAMS(imp_sth)) { int n; char key[100]; I32 keylen; for (n= 0; n < DBIc_NUM_PARAMS(imp_sth); n++) { keylen= sprintf(key, "%d", n); (void)hv_store(pvhv, key, keylen, newSVsv(imp_sth->params[n].value), 0); } } retsv= sv_2mortal(newRV_noinc((SV*)pvhv)); } break; case 'S': if (strEQ(key, "SCALE")) retsv= ST_FETCH_AV(AV_ATTRIB_SCALE); break; case 'T': if (strEQ(key, "TYPE")) retsv= ST_FETCH_AV(AV_ATTRIB_SQL_TYPE); break; case 'm': switch (kl) { case 10: if (strEQ(key, "mysql_type")) retsv= ST_FETCH_AV(AV_ATTRIB_TYPE); break; case 11: if (strEQ(key, "mysql_table")) retsv= ST_FETCH_AV(AV_ATTRIB_TABLE); break; case 12: if ( strEQ(key, "mysql_is_key")) retsv= ST_FETCH_AV(AV_ATTRIB_IS_KEY); else if (strEQ(key, "mysql_is_num")) retsv= ST_FETCH_AV(AV_ATTRIB_IS_NUM); else if (strEQ(key, "mysql_length")) retsv= ST_FETCH_AV(AV_ATTRIB_LENGTH); else if (strEQ(key, "mysql_result")) retsv= sv_2mortal(newSViv(PTR2IV(imp_sth->result))); break; case 13: if (strEQ(key, "mysql_is_blob")) retsv= ST_FETCH_AV(AV_ATTRIB_IS_BLOB); break; case 14: if (strEQ(key, "mysql_insertid")) { /* We cannot return an IV, because the insertid is a long. */ if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "INSERT ID %llu\n", imp_sth->insertid); return sv_2mortal(my_ulonglong2str(aTHX_ imp_sth->insertid)); } break; case 15: if (strEQ(key, "mysql_type_name")) retsv = ST_FETCH_AV(AV_ATTRIB_TYPE_NAME); break; case 16: if ( strEQ(key, "mysql_is_pri_key")) retsv= ST_FETCH_AV(AV_ATTRIB_IS_PRI_KEY); else if (strEQ(key, "mysql_max_length")) retsv= ST_FETCH_AV(AV_ATTRIB_MAX_LENGTH); else if (strEQ(key, "mysql_use_result")) retsv= boolSV(imp_sth->use_mysql_use_result); break; case 19: if (strEQ(key, "mysql_warning_count")) retsv= sv_2mortal(newSViv((IV) imp_sth->warning_count)); break; case 20: if (strEQ(key, "mysql_server_prepare")) #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION retsv= sv_2mortal(newSViv((IV) imp_sth->use_server_side_prepare)); #else retsv= boolSV(0); #endif break; case 23: if (strEQ(key, "mysql_is_auto_increment")) retsv = ST_FETCH_AV(AV_ATTRIB_IS_AUTO_INCREMENT); break; } break; } return retsv; } /*************************************************************************** * * Name: dbd_st_blob_read * * Purpose: Used for blob reads if the statement handles "LongTruncOk" * attribute (currently not supported by DBD::mysql) * * Input: SV* - statement handle from which a blob will be fetched * imp_sth - drivers private statement handle data * field - field number of the blob (note, that a row may * contain more than one blob) * offset - the offset of the field, where to start reading * len - maximum number of bytes to read * destrv - RV* that tells us where to store * destoffset - destination offset * * Returns: TRUE for success, FALSE otherwise; do_error will * be called in the latter case * **************************************************************************/ int dbd_st_blob_read ( SV *sth, imp_sth_t *imp_sth, int field, long offset, long len, SV *destrv, long destoffset) { /* quell warnings */ sth= sth; imp_sth=imp_sth; field= field; offset= offset; len= len; destrv= destrv; destoffset= destoffset; return FALSE; } /*************************************************************************** * * Name: dbd_bind_ph * * Purpose: Binds a statement value to a parameter * * Input: sth - statement handle * imp_sth - drivers private statement handle data * param - parameter number, counting starts with 1 * value - value being inserted for parameter "param" * sql_type - SQL type of the value * attribs - bind parameter attributes, currently this must be * one of the values SQL_CHAR, ... * inout - TRUE, if parameter is an output variable (currently * this is not supported) * maxlen - ??? * * Returns: TRUE for success, FALSE otherwise * **************************************************************************/ int dbd_bind_ph(SV *sth, imp_sth_t *imp_sth, SV *param, SV *value, IV sql_type, SV *attribs, int is_inout, IV maxlen) { dTHX; int rc; int param_num= SvIV(param); int idx= param_num - 1; char *err_msg; D_imp_xxh(sth); #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION STRLEN slen; char *buffer= NULL; int buffer_is_null= 0; int buffer_length= slen; unsigned int buffer_type= 0; #endif D_imp_dbh_from_sth; ASYNC_CHECK_RETURN(sth, FALSE); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), " Called: dbd_bind_ph\n"); attribs= attribs; maxlen= maxlen; if (param_num <= 0 || param_num > DBIc_NUM_PARAMS(imp_sth)) { do_error(sth, JW_ERR_ILLEGAL_PARAM_NUM, "Illegal parameter number", NULL); return FALSE; } /* This fixes the bug whereby no warning was issued upon binding a defined non-numeric as numeric */ if (SvOK(value) && (sql_type == SQL_NUMERIC || sql_type == SQL_DECIMAL || sql_type == SQL_INTEGER || sql_type == SQL_SMALLINT || sql_type == SQL_FLOAT || sql_type == SQL_REAL || sql_type == SQL_DOUBLE) ) { if (! looks_like_number(value)) { err_msg = SvPVX(sv_2mortal(newSVpvf( "Binding non-numeric field %d, value %s as a numeric!", param_num, neatsvpv(value,0)))); do_error(sth, JW_ERR_ILLEGAL_PARAM_NUM, err_msg, NULL); } } if (is_inout) { do_error(sth, JW_ERR_NOT_IMPLEMENTED, "Output parameters not implemented", NULL); return FALSE; } rc = bind_param(&imp_sth->params[idx], value, sql_type); #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION if (imp_sth->use_server_side_prepare) { switch(sql_type) { case SQL_NUMERIC: case SQL_INTEGER: case SQL_SMALLINT: case SQL_BIGINT: case SQL_TINYINT: buffer_type= MYSQL_TYPE_LONG; break; case SQL_DOUBLE: case SQL_DECIMAL: case SQL_FLOAT: case SQL_REAL: buffer_type= MYSQL_TYPE_DOUBLE; break; case SQL_CHAR: case SQL_VARCHAR: case SQL_DATE: case SQL_TIME: case SQL_TIMESTAMP: case SQL_LONGVARCHAR: case SQL_BINARY: case SQL_VARBINARY: case SQL_LONGVARBINARY: buffer_type= MYSQL_TYPE_BLOB; break; default: buffer_type= MYSQL_TYPE_STRING; } buffer_is_null = !(SvOK(imp_sth->params[idx].value) && imp_sth->params[idx].value); if (! buffer_is_null) { switch(buffer_type) { case MYSQL_TYPE_LONG: /* INT */ if (!SvIOK(imp_sth->params[idx].value) && DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tTRY TO BIND AN INT NUMBER\n"); buffer_length = sizeof imp_sth->fbind[idx].numeric_val.lval; imp_sth->fbind[idx].numeric_val.lval= SvIV(imp_sth->params[idx].value); buffer=(void*)&(imp_sth->fbind[idx].numeric_val.lval); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), " SCALAR type %"IVdf" ->%"IVdf"<- IS A INT NUMBER\n", sql_type, *(IV *)buffer); break; case MYSQL_TYPE_DOUBLE: if (!SvNOK(imp_sth->params[idx].value) && DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tTRY TO BIND A FLOAT NUMBER\n"); buffer_length = sizeof imp_sth->fbind[idx].numeric_val.dval; imp_sth->fbind[idx].numeric_val.dval= SvNV(imp_sth->params[idx].value); buffer=(char*)&(imp_sth->fbind[idx].numeric_val.dval); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), " SCALAR type %"IVdf" ->%f<- IS A FLOAT NUMBER\n", sql_type, (double)(*buffer)); break; case MYSQL_TYPE_BLOB: if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), " SCALAR type BLOB\n"); break; case MYSQL_TYPE_STRING: if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), " SCALAR type STRING %"IVdf", buffertype=%d\n", sql_type, buffer_type); break; default: croak("Bug in DBD::Mysql file dbdimp.c#dbd_bind_ph: do not know how to handle unknown buffer type."); } if (buffer_type == MYSQL_TYPE_STRING || buffer_type == MYSQL_TYPE_BLOB) { buffer= SvPV(imp_sth->params[idx].value, slen); buffer_length= slen; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), " SCALAR type %"IVdf" ->length %d<- IS A STRING or BLOB\n", sql_type, buffer_length); } } else { /*case: buffer_is_null != 0*/ buffer= NULL; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), " SCALAR NULL VALUE: buffer type is: %d\n", buffer_type); } /* Type of column was changed. Force to rebind */ if (imp_sth->bind[idx].buffer_type != buffer_type) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), " FORCE REBIND: buffer type changed from %d to %d, sql-type=%"IVdf"\n", (int) imp_sth->bind[idx].buffer_type, buffer_type, sql_type); imp_sth->has_been_bound = 0; } /* prepare has been called */ if (imp_sth->has_been_bound) { imp_sth->stmt->params[idx].buffer= buffer; imp_sth->stmt->params[idx].buffer_length= buffer_length; } imp_sth->bind[idx].buffer_type= buffer_type; imp_sth->bind[idx].buffer= buffer; imp_sth->bind[idx].buffer_length= buffer_length; imp_sth->fbind[idx].length= buffer_length; imp_sth->fbind[idx].is_null= buffer_is_null; } #endif return rc; } /*************************************************************************** * * Name: mysql_db_reconnect * * Purpose: If the server has disconnected, try to reconnect. * * Input: h - database or statement handle * * Returns: TRUE for success, FALSE otherwise * **************************************************************************/ int mysql_db_reconnect(SV* h) { dTHX; D_imp_xxh(h); imp_dbh_t* imp_dbh; MYSQL save_socket; if (DBIc_TYPE(imp_xxh) == DBIt_ST) { imp_dbh = (imp_dbh_t*) DBIc_PARENT_COM(imp_xxh); h = DBIc_PARENT_H(imp_xxh); } else imp_dbh= (imp_dbh_t*) imp_xxh; if (mysql_errno(imp_dbh->pmysql) != CR_SERVER_GONE_ERROR) /* Other error */ return FALSE; if (!DBIc_has(imp_dbh, DBIcf_AutoCommit) || !imp_dbh->auto_reconnect) { /* We never reconnect if AutoCommit is turned off. * Otherwise we might get an inconsistent transaction * state. */ return FALSE; } /* my_login will blow away imp_dbh->mysql so we save a copy of * imp_dbh->mysql and put it back where it belongs if the reconnect * fail. Think server is down & reconnect fails but the application eval{}s * the execute, so next time $dbh->quote() gets called, instant SIGSEGV! */ save_socket= *(imp_dbh->pmysql); memcpy (&save_socket, imp_dbh->pmysql,sizeof(save_socket)); memset (imp_dbh->pmysql,0,sizeof(*(imp_dbh->pmysql))); /* we should disconnect the db handle before reconnecting, this will * prevent my_login from thinking it's adopting an active child which * would prevent the handle from actually reconnecting */ if (!dbd_db_disconnect(h, imp_dbh) || !my_login(aTHX_ h, imp_dbh)) { do_error(h, mysql_errno(imp_dbh->pmysql), mysql_error(imp_dbh->pmysql), mysql_sqlstate(imp_dbh->pmysql)); memcpy (imp_dbh->pmysql, &save_socket, sizeof(save_socket)); ++imp_dbh->stats.auto_reconnects_failed; return FALSE; } /* * Tell DBI, that dbh->disconnect should be called for this handle */ DBIc_ACTIVE_on(imp_dbh); ++imp_dbh->stats.auto_reconnects_ok; return TRUE; } /************************************************************************** * * Name: dbd_db_type_info_all * * Purpose: Implements $dbh->type_info_all * * Input: dbh - database handle * imp_sth - drivers private database handle data * * Returns: RV to AV of types * **************************************************************************/ #define PV_PUSH(c) \ if (c) { \ sv= newSVpv((char*) (c), 0); \ SvREADONLY_on(sv); \ } else { \ sv= &PL_sv_undef; \ } \ av_push(row, sv); #define IV_PUSH(i) sv= newSViv((i)); SvREADONLY_on(sv); av_push(row, sv); AV *dbd_db_type_info_all(SV *dbh, imp_dbh_t *imp_dbh) { dTHX; AV *av= newAV(); AV *row; HV *hv; SV *sv; int i; const char *cols[] = { "TYPE_NAME", "DATA_TYPE", "COLUMN_SIZE", "LITERAL_PREFIX", "LITERAL_SUFFIX", "CREATE_PARAMS", "NULLABLE", "CASE_SENSITIVE", "SEARCHABLE", "UNSIGNED_ATTRIBUTE", "FIXED_PREC_SCALE", "AUTO_UNIQUE_VALUE", "LOCAL_TYPE_NAME", "MINIMUM_SCALE", "MAXIMUM_SCALE", "NUM_PREC_RADIX", "SQL_DATATYPE", "SQL_DATETIME_SUB", "INTERVAL_PRECISION", "mysql_native_type", "mysql_is_num" }; dbh= dbh; imp_dbh= imp_dbh; hv= newHV(); av_push(av, newRV_noinc((SV*) hv)); for (i= 0; i < (int)(sizeof(cols) / sizeof(const char*)); i++) { if (!hv_store(hv, (char*) cols[i], strlen(cols[i]), newSViv(i), 0)) { SvREFCNT_dec((SV*) av); return Nullav; } } for (i= 0; i < (int)SQL_GET_TYPE_INFO_num; i++) { const sql_type_info_t *t= &SQL_GET_TYPE_INFO_values[i]; row= newAV(); av_push(av, newRV_noinc((SV*) row)); PV_PUSH(t->type_name); IV_PUSH(t->data_type); IV_PUSH(t->column_size); PV_PUSH(t->literal_prefix); PV_PUSH(t->literal_suffix); PV_PUSH(t->create_params); IV_PUSH(t->nullable); IV_PUSH(t->case_sensitive); IV_PUSH(t->searchable); IV_PUSH(t->unsigned_attribute); IV_PUSH(t->fixed_prec_scale); IV_PUSH(t->auto_unique_value); PV_PUSH(t->local_type_name); IV_PUSH(t->minimum_scale); IV_PUSH(t->maximum_scale); if (t->num_prec_radix) { IV_PUSH(t->num_prec_radix); } else av_push(row, &PL_sv_undef); IV_PUSH(t->sql_datatype); /* SQL_DATATYPE*/ IV_PUSH(t->sql_datetime_sub); /* SQL_DATETIME_SUB*/ IV_PUSH(t->interval_precision); /* INTERVAL_PERCISION */ IV_PUSH(t->native_type); IV_PUSH(t->is_num); } return av; } /* dbd_db_quote Properly quotes a value */ SV* dbd_db_quote(SV *dbh, SV *str, SV *type) { dTHX; SV *result; if (SvGMAGICAL(str)) mg_get(str); if (!SvOK(str)) result= newSVpvn("NULL", 4); else { char *ptr, *sptr; STRLEN len; D_imp_dbh(dbh); if (type && SvMAGICAL(type)) mg_get(type); if (type && SvOK(type)) { int i; int tp= SvIV(type); for (i= 0; i < (int)SQL_GET_TYPE_INFO_num; i++) { const sql_type_info_t *t= &SQL_GET_TYPE_INFO_values[i]; if (t->data_type == tp) { if (!t->literal_prefix) return Nullsv; break; } } } ptr= SvPV(str, len); result= newSV(len*2+3); #ifdef SvUTF8 if (SvUTF8(str)) SvUTF8_on(result); #endif sptr= SvPVX(result); *sptr++ = '\''; sptr+= mysql_real_escape_string(imp_dbh->pmysql, sptr, ptr, len); *sptr++= '\''; SvPOK_on(result); SvCUR_set(result, sptr - SvPVX(result)); /* Never hurts NUL terminating a Per string */ *sptr++= '\0'; } return result; } #ifdef DBD_MYSQL_INSERT_ID_IS_GOOD SV *mysql_db_last_insert_id(SV *dbh, imp_dbh_t *imp_dbh, SV *catalog, SV *schema, SV *table, SV *field, SV *attr) { dTHX; /* all these non-op settings are to stifle OS X compile warnings */ imp_dbh= imp_dbh; dbh= dbh; catalog= catalog; schema= schema; table= table; field= field; attr= attr; ASYNC_CHECK_RETURN(dbh, &PL_sv_undef); return sv_2mortal(my_ulonglong2str(aTHX_ mysql_insert_id(imp_dbh->pmysql))); } #endif #if MYSQL_ASYNC int mysql_db_async_result(SV* h, MYSQL_RES** resp) { dTHX; D_imp_xxh(h); imp_dbh_t* dbh; MYSQL* svsock = NULL; MYSQL_RES* _res; int retval = 0; int htype; if(! resp) { resp = &_res; } htype = DBIc_TYPE(imp_xxh); if(htype == DBIt_DB) { D_imp_dbh(h); dbh = imp_dbh; } else { D_imp_sth(h); D_imp_dbh_from_sth; dbh = imp_dbh; } if(! dbh->async_query_in_flight) { do_error(h, 2000, "Gathering asynchronous results for a synchronous handle", "HY000"); return -1; } if(dbh->async_query_in_flight != imp_xxh) { do_error(h, 2000, "Gathering async_query_in_flight results for the wrong handle", "HY000"); return -1; } dbh->async_query_in_flight = NULL; svsock= dbh->pmysql; retval= mysql_read_query_result(svsock); if(! retval) { *resp= mysql_store_result(svsock); if (mysql_errno(svsock)) do_error(h, mysql_errno(svsock), mysql_error(svsock), mysql_sqlstate(svsock)); if (!*resp) retval= mysql_affected_rows(svsock); else { retval= mysql_num_rows(*resp); if(resp == &_res) { mysql_free_result(*resp); } } if(htype == DBIt_ST) { D_imp_sth(h); D_imp_dbh_from_sth; if((my_ulonglong)retval+1 != (my_ulonglong)-1) { if(! *resp) { imp_sth->insertid= mysql_insert_id(svsock); #if MYSQL_VERSION_ID >= MULTIPLE_RESULT_SET_VERSION if(! mysql_more_results(svsock)) DBIc_ACTIVE_off(imp_sth); #endif } else { DBIc_NUM_FIELDS(imp_sth)= mysql_num_fields(imp_sth->result); imp_sth->done_desc= 0; imp_sth->fetch_done= 0; } } imp_sth->warning_count = mysql_warning_count(imp_dbh->pmysql); } } else { do_error(h, mysql_errno(svsock), mysql_error(svsock), mysql_sqlstate(svsock)); return -1; } return retval; } int mysql_db_async_ready(SV* h) { dTHX; D_imp_xxh(h); imp_dbh_t* dbh; int htype; htype = DBIc_TYPE(imp_xxh); if(htype == DBIt_DB) { D_imp_dbh(h); dbh = imp_dbh; } else { D_imp_sth(h); D_imp_dbh_from_sth; dbh = imp_dbh; } if(dbh->async_query_in_flight) { if(dbh->async_query_in_flight == imp_xxh) { struct pollfd fds; int retval; fds.fd = dbh->pmysql->net.fd; fds.events = POLLIN; retval = poll(&fds, 1, 0); if(retval < 0) { do_error(h, errno, strerror(errno), "HY000"); } return retval; } else { do_error(h, 2000, "Calling mysql_async_ready on the wrong handle", "HY000"); return -1; } } else { do_error(h, 2000, "Handle is not in asynchronous mode", "HY000"); return -1; } } #endif static int parse_number(char *string, STRLEN len, char **end) { int seen_neg; int seen_dec; int seen_e; int seen_plus; int seen_digit; char *cp; seen_neg= seen_dec= seen_e= seen_plus= seen_digit= 0; if (len <= 0) { len= strlen(string); } cp= string; /* Skip leading whitespace */ while (*cp && isspace(*cp)) cp++; for ( ; *cp; cp++) { if ('-' == *cp) { if (seen_neg >= 2) { /* third '-'. number can contains two '-'. because -1e-10 is valid number */ break; } seen_neg += 1; } else if ('.' == *cp) { if (seen_dec) { /* second '.' */ break; } seen_dec= 1; } else if ('e' == *cp) { if (seen_e) { /* second 'e' */ break; } seen_e= 1; } else if ('+' == *cp) { if (seen_plus) { /* second '+' */ break; } seen_plus= 1; } else if (!isdigit(*cp)) { /* Not sure why this was changed */ /* seen_digit= 1; */ break; } } *end= cp; /* length 0 -> not a number */ /* Need to revisit this */ /*if (len == 0 || cp - string < (int) len || seen_digit == 0) {*/ if (len == 0 || cp - string < (int) len) { return -1; } return 0; }
dbd_st_prepare( SV *sth, imp_sth_t *imp_sth, char *statement, SV *attribs) { int i; SV **svp; dTHX; #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION #if MYSQL_VERSION_ID < CALL_PLACEHOLDER_VERSION char *str_ptr, *str_last_ptr; #if MYSQL_VERSION_ID < LIMIT_PLACEHOLDER_VERSION int limit_flag=0; #endif #endif int col_type, prepare_retval; MYSQL_BIND *bind, *bind_end; imp_sth_phb_t *fbind; #endif D_imp_xxh(sth); D_imp_dbh_from_sth; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t-> dbd_st_prepare MYSQL_VERSION_ID %d, SQL statement: %s\n", MYSQL_VERSION_ID, statement); #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION /* Set default value of 'mysql_server_prepare' attribute for sth from dbh */ imp_sth->use_server_side_prepare= imp_dbh->use_server_side_prepare; if (attribs) { svp= DBD_ATTRIB_GET_SVP(attribs, "mysql_server_prepare", 20); imp_sth->use_server_side_prepare = (svp) ? SvTRUE(*svp) : imp_dbh->use_server_side_prepare; svp = DBD_ATTRIB_GET_SVP(attribs, "async", 5); if(svp && SvTRUE(*svp)) { #if MYSQL_ASYNC imp_sth->is_async = TRUE; imp_sth->use_server_side_prepare = FALSE; #else do_error(sth, 2000, "Async support was not built into this version of DBD::mysql", "HY000"); return 0; #endif } } imp_sth->fetch_done= 0; #endif imp_sth->done_desc= 0; imp_sth->result= NULL; imp_sth->currow= 0; /* Set default value of 'mysql_use_result' attribute for sth from dbh */ svp= DBD_ATTRIB_GET_SVP(attribs, "mysql_use_result", 16); imp_sth->use_mysql_use_result= svp ? SvTRUE(*svp) : imp_dbh->use_mysql_use_result; for (i= 0; i < AV_ATTRIB_LAST; i++) imp_sth->av_attr[i]= Nullav; /* Clean-up previous result set(s) for sth to prevent 'Commands out of sync' error */ mysql_st_free_result_sets(sth, imp_sth); #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION && MYSQL_VERSION_ID < CALL_PLACEHOLDER_VERSION if (imp_sth->use_server_side_prepare) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tuse_server_side_prepare set, check restrictions\n"); /* This code is here because placeholder support is not implemented for statements with :- 1. LIMIT < 5.0.7 2. CALL < 5.5.3 (Added support for out & inout parameters) In these cases we have to disable server side prepared statements NOTE: These checks could cause a false positive on statements which include columns / table names that match "call " or " limit " */ if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), #if MYSQL_VERSION_ID < LIMIT_PLACEHOLDER_VERSION "\t\tneed to test for LIMIT & CALL\n"); #else "\t\tneed to test for restrictions\n"); #endif str_last_ptr = statement + strlen(statement); for (str_ptr= statement; str_ptr < str_last_ptr; str_ptr++) { #if MYSQL_VERSION_ID < LIMIT_PLACEHOLDER_VERSION /* Place holders not supported in LIMIT's */ if (limit_flag) { if (*str_ptr == '?') { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tLIMIT and ? found, set to use_server_side_prepare=0\n"); /* ... then we do not want to try server side prepare (use emulation) */ imp_sth->use_server_side_prepare= 0; break; } } else if (str_ptr < str_last_ptr - 6 && isspace(*(str_ptr + 0)) && tolower(*(str_ptr + 1)) == 'l' && tolower(*(str_ptr + 2)) == 'i' && tolower(*(str_ptr + 3)) == 'm' && tolower(*(str_ptr + 4)) == 'i' && tolower(*(str_ptr + 5)) == 't' && isspace(*(str_ptr + 6))) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "LIMIT set limit flag to 1\n"); limit_flag= 1; } #endif /* Place holders not supported in CALL's */ if (str_ptr < str_last_ptr - 4 && tolower(*(str_ptr + 0)) == 'c' && tolower(*(str_ptr + 1)) == 'a' && tolower(*(str_ptr + 2)) == 'l' && tolower(*(str_ptr + 3)) == 'l' && isspace(*(str_ptr + 4))) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "Disable PS mode for CALL()\n"); imp_sth->use_server_side_prepare= 0; break; } } } #endif #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION if (imp_sth->use_server_side_prepare) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tuse_server_side_prepare set\n"); /* do we really need this? If we do, we should return, not just continue */ if (imp_sth->stmt) fprintf(stderr, "ERROR: Trying to prepare new stmt while we have \ already not closed one \n"); imp_sth->stmt= mysql_stmt_init(imp_dbh->pmysql); if (! imp_sth->stmt) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tERROR: Unable to return MYSQL_STMT structure \ from mysql_stmt_init(): ERROR NO: %d ERROR MSG:%s\n", mysql_errno(imp_dbh->pmysql), mysql_error(imp_dbh->pmysql)); } prepare_retval= mysql_stmt_prepare(imp_sth->stmt, statement, strlen(statement)); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tmysql_stmt_prepare returned %d\n", prepare_retval); if (prepare_retval) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tmysql_stmt_prepare %d %s\n", mysql_stmt_errno(imp_sth->stmt), mysql_stmt_error(imp_sth->stmt)); /* For commands that are not supported by server side prepared statement mechanism lets try to pass them through regular API */ if (mysql_stmt_errno(imp_sth->stmt) == ER_UNSUPPORTED_PS) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tSETTING imp_sth->use_server_side_prepare to 0\n"); imp_sth->use_server_side_prepare= 0; } else { do_error(sth, mysql_stmt_errno(imp_sth->stmt), mysql_stmt_error(imp_sth->stmt), mysql_sqlstate(imp_dbh->pmysql)); mysql_stmt_close(imp_sth->stmt); imp_sth->stmt= NULL; return FALSE; } } else { DBIc_NUM_PARAMS(imp_sth)= mysql_stmt_param_count(imp_sth->stmt); /* mysql_stmt_param_count */ if (DBIc_NUM_PARAMS(imp_sth) > 0) { int has_statement_fields= imp_sth->stmt->fields != 0; /* Allocate memory for bind variables */ imp_sth->bind= alloc_bind(DBIc_NUM_PARAMS(imp_sth)); imp_sth->fbind= alloc_fbind(DBIc_NUM_PARAMS(imp_sth)); imp_sth->has_been_bound= 0; /* Initialize ph variables with NULL values */ for (i= 0, bind= imp_sth->bind, fbind= imp_sth->fbind, bind_end= bind+DBIc_NUM_PARAMS(imp_sth); bind < bind_end ; bind++, fbind++, i++ ) { /* if this statement has a result set, field types will be correctly identified. If there is no result set, such as with an INSERT, fields will not be defined, and all buffer_type will default to MYSQL_TYPE_VAR_STRING */ col_type= (has_statement_fields ? imp_sth->stmt->fields[i].type : MYSQL_TYPE_STRING); bind->buffer_type= mysql_to_perl_type(col_type); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tmysql_to_perl_type returned %d\n", col_type); bind->buffer= NULL; bind->length= &(fbind->length); bind->is_null= (char*) &(fbind->is_null); fbind->is_null= 1; fbind->length= 0; } } } } #endif #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION /* Count the number of parameters (driver, vs server-side) */ if (imp_sth->use_server_side_prepare == 0) DBIc_NUM_PARAMS(imp_sth) = count_params((imp_xxh_t *)imp_dbh, aTHX_ statement, imp_dbh->bind_comment_placeholders); #else DBIc_NUM_PARAMS(imp_sth) = count_params((imp_xxh_t *)imp_dbh, aTHX_ statement, imp_dbh->bind_comment_placeholders); #endif /* Allocate memory for parameters */ imp_sth->params= alloc_param(DBIc_NUM_PARAMS(imp_sth)); DBIc_IMPSET_on(imp_sth); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_st_prepare\n"); return 1; }
dbd_st_prepare( SV *sth, imp_sth_t *imp_sth, char *statement, SV *attribs) { int i; SV **svp; dTHX; #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION #if MYSQL_VERSION_ID < CALL_PLACEHOLDER_VERSION char *str_ptr, *str_last_ptr; #if MYSQL_VERSION_ID < LIMIT_PLACEHOLDER_VERSION int limit_flag=0; #endif #endif int prepare_retval; MYSQL_BIND *bind, *bind_end; imp_sth_phb_t *fbind; #endif D_imp_xxh(sth); D_imp_dbh_from_sth; if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t-> dbd_st_prepare MYSQL_VERSION_ID %d, SQL statement: %s\n", MYSQL_VERSION_ID, statement); #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION /* Set default value of 'mysql_server_prepare' attribute for sth from dbh */ imp_sth->use_server_side_prepare= imp_dbh->use_server_side_prepare; if (attribs) { svp= DBD_ATTRIB_GET_SVP(attribs, "mysql_server_prepare", 20); imp_sth->use_server_side_prepare = (svp) ? SvTRUE(*svp) : imp_dbh->use_server_side_prepare; svp = DBD_ATTRIB_GET_SVP(attribs, "async", 5); if(svp && SvTRUE(*svp)) { #if MYSQL_ASYNC imp_sth->is_async = TRUE; imp_sth->use_server_side_prepare = FALSE; #else do_error(sth, 2000, "Async support was not built into this version of DBD::mysql", "HY000"); return 0; #endif } } imp_sth->fetch_done= 0; #endif imp_sth->done_desc= 0; imp_sth->result= NULL; imp_sth->currow= 0; /* Set default value of 'mysql_use_result' attribute for sth from dbh */ svp= DBD_ATTRIB_GET_SVP(attribs, "mysql_use_result", 16); imp_sth->use_mysql_use_result= svp ? SvTRUE(*svp) : imp_dbh->use_mysql_use_result; for (i= 0; i < AV_ATTRIB_LAST; i++) imp_sth->av_attr[i]= Nullav; /* Clean-up previous result set(s) for sth to prevent 'Commands out of sync' error */ mysql_st_free_result_sets(sth, imp_sth); #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION && MYSQL_VERSION_ID < CALL_PLACEHOLDER_VERSION if (imp_sth->use_server_side_prepare) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tuse_server_side_prepare set, check restrictions\n"); /* This code is here because placeholder support is not implemented for statements with :- 1. LIMIT < 5.0.7 2. CALL < 5.5.3 (Added support for out & inout parameters) In these cases we have to disable server side prepared statements NOTE: These checks could cause a false positive on statements which include columns / table names that match "call " or " limit " */ if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), #if MYSQL_VERSION_ID < LIMIT_PLACEHOLDER_VERSION "\t\tneed to test for LIMIT & CALL\n"); #else "\t\tneed to test for restrictions\n"); #endif str_last_ptr = statement + strlen(statement); for (str_ptr= statement; str_ptr < str_last_ptr; str_ptr++) { #if MYSQL_VERSION_ID < LIMIT_PLACEHOLDER_VERSION /* Place holders not supported in LIMIT's */ if (limit_flag) { if (*str_ptr == '?') { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tLIMIT and ? found, set to use_server_side_prepare=0\n"); /* ... then we do not want to try server side prepare (use emulation) */ imp_sth->use_server_side_prepare= 0; break; } } else if (str_ptr < str_last_ptr - 6 && isspace(*(str_ptr + 0)) && tolower(*(str_ptr + 1)) == 'l' && tolower(*(str_ptr + 2)) == 'i' && tolower(*(str_ptr + 3)) == 'm' && tolower(*(str_ptr + 4)) == 'i' && tolower(*(str_ptr + 5)) == 't' && isspace(*(str_ptr + 6))) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "LIMIT set limit flag to 1\n"); limit_flag= 1; } #endif /* Place holders not supported in CALL's */ if (str_ptr < str_last_ptr - 4 && tolower(*(str_ptr + 0)) == 'c' && tolower(*(str_ptr + 1)) == 'a' && tolower(*(str_ptr + 2)) == 'l' && tolower(*(str_ptr + 3)) == 'l' && isspace(*(str_ptr + 4))) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "Disable PS mode for CALL()\n"); imp_sth->use_server_side_prepare= 0; break; } } } #endif #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION if (imp_sth->use_server_side_prepare) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tuse_server_side_prepare set\n"); /* do we really need this? If we do, we should return, not just continue */ if (imp_sth->stmt) fprintf(stderr, "ERROR: Trying to prepare new stmt while we have \ already not closed one \n"); imp_sth->stmt= mysql_stmt_init(imp_dbh->pmysql); if (! imp_sth->stmt) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tERROR: Unable to return MYSQL_STMT structure \ from mysql_stmt_init(): ERROR NO: %d ERROR MSG:%s\n", mysql_errno(imp_dbh->pmysql), mysql_error(imp_dbh->pmysql)); } prepare_retval= mysql_stmt_prepare(imp_sth->stmt, statement, strlen(statement)); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tmysql_stmt_prepare returned %d\n", prepare_retval); if (prepare_retval) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tmysql_stmt_prepare %d %s\n", mysql_stmt_errno(imp_sth->stmt), mysql_stmt_error(imp_sth->stmt)); /* For commands that are not supported by server side prepared statement mechanism lets try to pass them through regular API */ if (mysql_stmt_errno(imp_sth->stmt) == ER_UNSUPPORTED_PS) { if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t\tSETTING imp_sth->use_server_side_prepare to 0\n"); imp_sth->use_server_side_prepare= 0; } else { do_error(sth, mysql_stmt_errno(imp_sth->stmt), mysql_stmt_error(imp_sth->stmt), mysql_sqlstate(imp_dbh->pmysql)); mysql_stmt_close(imp_sth->stmt); imp_sth->stmt= NULL; return FALSE; } } else { DBIc_NUM_PARAMS(imp_sth)= mysql_stmt_param_count(imp_sth->stmt); /* mysql_stmt_param_count */ if (DBIc_NUM_PARAMS(imp_sth) > 0) { /* Allocate memory for bind variables */ imp_sth->bind= alloc_bind(DBIc_NUM_PARAMS(imp_sth)); imp_sth->fbind= alloc_fbind(DBIc_NUM_PARAMS(imp_sth)); imp_sth->has_been_bound= 0; /* Initialize ph variables with NULL values */ for (i= 0, bind= imp_sth->bind, fbind= imp_sth->fbind, bind_end= bind+DBIc_NUM_PARAMS(imp_sth); bind < bind_end ; bind++, fbind++, i++ ) { bind->buffer_type= MYSQL_TYPE_STRING; bind->buffer= NULL; bind->length= &(fbind->length); bind->is_null= (char*) &(fbind->is_null); fbind->is_null= 1; fbind->length= 0; } } } } #endif #if MYSQL_VERSION_ID >= SERVER_PREPARE_VERSION /* Count the number of parameters (driver, vs server-side) */ if (imp_sth->use_server_side_prepare == 0) DBIc_NUM_PARAMS(imp_sth) = count_params((imp_xxh_t *)imp_dbh, aTHX_ statement, imp_dbh->bind_comment_placeholders); #else DBIc_NUM_PARAMS(imp_sth) = count_params((imp_xxh_t *)imp_dbh, aTHX_ statement, imp_dbh->bind_comment_placeholders); #endif /* Allocate memory for parameters */ imp_sth->params= alloc_param(DBIc_NUM_PARAMS(imp_sth)); DBIc_IMPSET_on(imp_sth); if (DBIc_TRACE_LEVEL(imp_xxh) >= 2) PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\t<- dbd_st_prepare\n"); return 1; }
{'added': [(2753, ' int prepare_retval;'), (2962, ' bind->buffer_type= MYSQL_TYPE_STRING;')], 'deleted': [(2753, ' int col_type, prepare_retval;'), (2949, ' int has_statement_fields= imp_sth->stmt->fields != 0;'), (2963, ' /*'), (2964, ' if this statement has a result set, field types will be'), (2965, ' correctly identified. If there is no result set, such as'), (2966, ' with an INSERT, fields will not be defined, and all buffer_type'), (2967, ' will default to MYSQL_TYPE_VAR_STRING'), (2968, ' */'), (2969, ' col_type= (has_statement_fields ?'), (2970, ' imp_sth->stmt->fields[i].type : MYSQL_TYPE_STRING);'), (2971, ''), (2972, ' bind->buffer_type= mysql_to_perl_type(col_type);'), (2973, ''), (2974, ' if (DBIc_TRACE_LEVEL(imp_xxh) >= 2)'), (2975, ' PerlIO_printf(DBIc_LOGPIO(imp_xxh), "\\t\\tmysql_to_perl_type returned %d\\n", col_type);'), (2976, '')]}
2
16
3,668
21,583
186
1,126
57
https://github.com/perl5-dbi/DBD-mysql
CVE-2016-1249
CWE-125
331
secure_enclave.c
C
trustedGetPublicEcdsaKeyAES
/* Modifications Copyright (C) 2019-2020 SKALE Labs Copyright 2018 Intel Corporation Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <math.h> #include <string.h> #include <stdio.h> #include <stdbool.h> #include <assert.h> #include "secure_enclave_t.h" #include "sgx_tcrypto.h" #include "sgx_tseal.h" #include <sgx_tgmp.h> #include <sgx_trts.h> #include <sgx_key.h> #include "Point.h" #include "DomainParameters.h" #include "Signature.h" #include "Curves.h" #include "DHDkg.h" #include "AESUtils.h" #include "EnclaveConstants.h" #include "EnclaveCommon.h" #include "SIGNED_ENCLAVE_VERSION" #define STRINGIFY(x) #x #define TOSTRING(x) STRINGIFY(x) #define INIT_ERROR_STATE *errString = 0; *errStatus = UNKNOWN_ERROR; #define SET_SUCCESS *errStatus = 0; #define CHECK_STATE(_EXPRESSION_) \ if (!(_EXPRESSION_)) { \ LOG_ERROR("State check failed::");LOG_ERROR(#_EXPRESSION_); \ LOG_ERROR((const char*) __FILE__); \ snprintf(errString, BUF_LEN, "State check failed. Check log."); \ *errStatus = -1; \ return;} #define CHECK_STATE_CLEAN(_EXPRESSION_) \ if (!(_EXPRESSION_)) { \ LOG_ERROR("State check failed::");LOG_ERROR(#_EXPRESSION_); \ LOG_ERROR(__FILE__); LOG_ERROR(__LINE__); \ snprintf(errString, BUF_LEN, "State check failed. Check log."); \ *errStatus = -1; \ goto clean;} #define CHECK_STATUS(__ERRMESSAGE__) if (status != SGX_SUCCESS) { \ LOG_ERROR(__FUNCTION__); \ snprintf(errString, BUF_LEN, "failed with status %d : %s", status, __ERRMESSAGE__); \ LOG_ERROR(errString); \ *errStatus = status; \ goto clean; \ }; #define CHECK_STATUS2(__ERRMESSAGE__) if (status != SGX_SUCCESS) { \ snprintf(errString, BUF_LEN, __ERRMESSAGE__, status); \ LOG_ERROR(errString); \ *errStatus = status; \ goto clean; \ }; void *(*gmp_realloc_func)(void *, size_t, size_t); void *(*oc_realloc_func)(void *, size_t, size_t); void (*gmp_free_func)(void *, size_t); void (*oc_free_func)(void *, size_t); void *reallocate_function(void *, size_t, size_t); void free_function(void *, size_t); unsigned char *globalRandom = NULL; #define CALL_ONCE \ static volatile bool called = false;\ if (called) { \ LOG_ERROR(__FUNCTION__); \ LOG_ERROR("This function shouldnt be called twice. Aborting!"); \ abort(); \ } else {called = true;}; void trustedEnclaveInit(uint32_t _logLevel) { CALL_ONCE LOG_INFO(__FUNCTION__); globalLogLevel_ = _logLevel; oc_realloc_func = &reallocate_function; oc_free_func = &free_function; LOG_INFO("Setting memory functions"); mp_get_memory_functions(NULL, &gmp_realloc_func, &gmp_free_func); mp_set_memory_functions(NULL, oc_realloc_func, oc_free_func); LOG_INFO("Calling enclave init"); enclave_init(); LOG_INFO("Reading random"); globalRandom = calloc(32,1); int ret = sgx_read_rand(globalRandom, 32); if(ret != SGX_SUCCESS) { LOG_ERROR("sgx_read_rand failed. Aboring enclave."); abort(); } LOG_INFO("Successfully inited enclave. Signed enclave version:" SIGNED_ENCLAVE_VERSION ); #ifndef SGX_DEBUG LOG_INFO("SECURITY WARNING: sgxwallet is running in INSECURE DEBUG MODE! NEVER USE IN PRODUCTION!"); #endif #if SGX_DEBUG != 0 LOG_INFO("SECURITY WARNING: sgxwallet is running in INSECURE DEBUG MODE! NEVER USE IN PRODUCTION!"); #endif #if SGX_MODE == SIM LOG_INFO("SECURITY WARNING: sgxwallet is running in INSECURE SIMULATION MODE! NEVER USE IN PRODUCTION!"); #endif } void free_function(void *ptr, size_t sz) { if (sgx_is_within_enclave(ptr, sz)) gmp_free_func(ptr, sz); else { sgx_status_t status; status = oc_free(ptr, sz); if (status != SGX_SUCCESS) abort(); } } void *reallocate_function(void *ptr, size_t osize, size_t nsize) { uint64_t nptr; sgx_status_t status; if (sgx_is_within_enclave(ptr, osize)) { return gmp_realloc_func(ptr, osize, nsize); } status = oc_realloc(&nptr, ptr, osize, nsize); if (status != SGX_SUCCESS) abort(); /* * If the entire range of allocated memory is not outside the enclave * then something truly terrible has happened. In theory, we could * free() and try again, but would you trust the OS at this point? */ if (!sgx_is_outside_enclave((void *) ptr, nsize)) abort(); return (void *) nptr; } void get_global_random(unsigned char *_randBuff, uint64_t _size) { char errString[BUF_LEN]; int status; int *errStatus = &status; INIT_ERROR_STATE CHECK_STATE(_size <= 32) CHECK_STATE(_randBuff); sgx_sha_state_handle_t shaStateHandle; CHECK_STATE(sgx_sha256_init(&shaStateHandle) == SGX_SUCCESS); CHECK_STATE(sgx_sha256_update(globalRandom, 32, shaStateHandle) == SGX_SUCCESS); CHECK_STATE(sgx_sha256_get_hash(shaStateHandle, (sgx_sha256_hash_t *)globalRandom) == SGX_SUCCESS); CHECK_STATE(sgx_sha256_close(shaStateHandle) == SGX_SUCCESS); memcpy(_randBuff, globalRandom, _size); } void sealHexSEK(int *errStatus, char *errString, uint8_t *encrypted_sek, uint32_t *enc_len, char *sek_hex) { CALL_ONCE LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_sek); CHECK_STATE(sek_hex); CHECK_STATE(strnlen(sek_hex, 33) == 32) uint64_t plaintextLen = strlen(sek_hex) + 1; uint64_t sealedLen = sgx_calc_sealed_data_size(0, plaintextLen); sgx_attributes_t attribute_mask; attribute_mask.flags = 0xfffffffffffffff3; attribute_mask.xfrm = 0x0; sgx_misc_select_t misc = 0xF0000000; sgx_status_t status = sgx_seal_data_ex(SGX_KEYPOLICY_MRENCLAVE, attribute_mask, misc, 0, NULL, plaintextLen, (uint8_t *) sek_hex, sealedLen, (sgx_sealed_data_t *) encrypted_sek); CHECK_STATUS("seal SEK failed after SEK generation"); uint32_t encrypt_text_length = sgx_get_encrypt_txt_len((const sgx_sealed_data_t *)encrypted_sek); CHECK_STATE(encrypt_text_length = plaintextLen); SAFE_CHAR_BUF(unsealedKey, BUF_LEN); uint32_t decLen = BUF_LEN; uint32_t add_text_length = sgx_get_add_mac_txt_len((const sgx_sealed_data_t *)encrypted_sek); CHECK_STATE(add_text_length == 0); CHECK_STATE(sgx_is_within_enclave(encrypted_sek,sizeof(sgx_sealed_data_t))); status = sgx_unseal_data((const sgx_sealed_data_t *)encrypted_sek, NULL, NULL, (uint8_t *) unsealedKey, &decLen ); CHECK_STATUS("seal/unseal SEK failed after SEK generation in unseal"); *enc_len = sealedLen; SET_SUCCESS clean: LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGenerateSEK(int *errStatus, char *errString, uint8_t *encrypted_sek, uint32_t *enc_len, char *sek_hex) { CALL_ONCE LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_sek); CHECK_STATE(sek_hex); RANDOM_CHAR_BUF(SEK_raw, SGX_AESGCM_KEY_SIZE); carray2Hex((uint8_t*) SEK_raw, SGX_AESGCM_KEY_SIZE, sek_hex); memcpy(AES_key, SEK_raw, SGX_AESGCM_KEY_SIZE); derive_DH_Key(); sealHexSEK(errStatus, errString, encrypted_sek, enc_len, sek_hex); if (*errStatus != 0) { LOG_ERROR("sealHexSEK failed"); goto clean; } SET_SUCCESS clean: LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedSetSEK(int *errStatus, char *errString, uint8_t *encrypted_sek) { CALL_ONCE LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_sek); SAFE_CHAR_BUF(aes_key_hex, BUF_LEN); uint32_t dec_len = BUF_LEN; sgx_status_t status = sgx_unseal_data( (const sgx_sealed_data_t *) encrypted_sek, NULL, 0, (uint8_t *)aes_key_hex, &dec_len); if (status == 0x3001) { LOG_ERROR("Could not decrypt LevelDB storage! \n" "If you upgraded sgxwallet software or if you are restoring from backup, please run sgxwallet with -b flag and " "pass your backup key."); } CHECK_STATUS2("sgx unseal SEK failed with status %d"); uint64_t len; hex2carray(aes_key_hex, &len, (uint8_t *) AES_key); derive_DH_Key(); SET_SUCCESS clean: LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedSetSEK_backup(int *errStatus, char *errString, uint8_t *encrypted_sek, uint32_t *enc_len, const char *sek_hex) { CALL_ONCE LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_sek); CHECK_STATE(sek_hex); uint64_t len; hex2carray(sek_hex, &len, (uint8_t *) AES_key); derive_DH_Key(); sealHexSEK(errStatus, errString, encrypted_sek, enc_len, (char *)sek_hex); if (*errStatus != 0) { LOG_ERROR("sealHexSEK failed"); goto clean; } SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGenerateEcdsaKeyAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint32_t *enc_len, char *pub_key_x, char *pub_key_y) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encryptedPrivateKey); CHECK_STATE(pub_key_x); CHECK_STATE(pub_key_y); RANDOM_CHAR_BUF(rand_char, 32); mpz_t seed; mpz_init(seed); mpz_t skey; mpz_init(skey); point Pkey = point_init(); mpz_import(seed, 32, 1, sizeof(rand_char[0]), 0, 0, rand_char); mpz_mod(skey, seed, curve->p); signature_extract_public_key(Pkey, skey, curve); SAFE_CHAR_BUF(arr_x, BUF_LEN); mpz_get_str(arr_x, ECDSA_SKEY_BASE, Pkey->x); int n_zeroes = 64 - strlen(arr_x); for (int i = 0; i < n_zeroes; i++) { pub_key_x[i] = '0'; } strncpy(pub_key_x + n_zeroes, arr_x, 1024 - n_zeroes); SAFE_CHAR_BUF(arr_y, BUF_LEN); mpz_get_str(arr_y, ECDSA_SKEY_BASE, Pkey->y); n_zeroes = 64 - strlen(arr_y); for (int i = 0; i < n_zeroes; i++) { pub_key_y[i] = '0'; } strncpy(pub_key_y + n_zeroes, arr_y, 1024 - n_zeroes); SAFE_CHAR_BUF(skey_str, ECDSA_SKEY_LEN);SAFE_CHAR_BUF(arr_skey_str, mpz_sizeinbase(skey, ECDSA_SKEY_BASE) + 2); mpz_get_str(arr_skey_str, ECDSA_SKEY_BASE, skey); n_zeroes = 64 - strlen(arr_skey_str); for (int i = 0; i < n_zeroes; i++) { skey_str[i] = '0'; } strncpy(skey_str + n_zeroes, arr_skey_str, 65 - n_zeroes); skey_str[ECDSA_SKEY_LEN - 1] = 0; snprintf(errString, BUF_LEN, "skey len is %d\n", (int) strlen(skey_str)); int status = AES_encrypt((char *) skey_str, encryptedPrivateKey, BUF_LEN); CHECK_STATUS("ecdsa private key encryption failed"); *enc_len = strlen(skey_str) + SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE; status = AES_decrypt(encryptedPrivateKey, *enc_len, skey_str, ECDSA_SKEY_LEN); CHECK_STATUS2("ecdsa private key decr failed with status %d"); SET_SUCCESS clean: mpz_clear(seed); mpz_clear(skey); point_clear(Pkey); LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGetPublicEcdsaKeyAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint32_t enc_len, char *pub_key_x, char *pub_key_y) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN); mpz_t privateKeyMpz; mpz_init(privateKeyMpz); point pKey = point_init(); point pKey_test = point_init(); CHECK_STATE(encryptedPrivateKey); CHECK_STATE(pub_key_x); CHECK_STATE(pub_key_y); int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, ECDSA_SKEY_LEN); CHECK_STATUS2("AES_decrypt failed with status %d"); skey[enc_len - SGX_AESGCM_MAC_SIZE - SGX_AESGCM_IV_SIZE] = '\0'; strncpy(errString, skey, 1024); status = mpz_set_str(privateKeyMpz, skey, ECDSA_SKEY_BASE); CHECK_STATUS("mpz_set_str failed for private key"); signature_extract_public_key(pKey, privateKeyMpz, curve); point_multiplication(pKey_test, privateKeyMpz, curve->G, curve); if (!point_cmp(pKey, pKey_test)) { snprintf(errString, BUF_LEN, "Points are not equal"); LOG_ERROR(errString); *errStatus = -11; goto clean; } SAFE_CHAR_BUF(arr_x, BUF_LEN); mpz_get_str(arr_x, ECDSA_SKEY_BASE, pKey->x); int n_zeroes = 64 - strlen(arr_x); for (int i = 0; i < n_zeroes; i++) { pub_key_x[i] = '0'; } strncpy(pub_key_x + n_zeroes, arr_x, 1024 - n_zeroes); SAFE_CHAR_BUF(arr_y, BUF_LEN); mpz_get_str(arr_y, ECDSA_SKEY_BASE, pKey->y); n_zeroes = 64 - strlen(arr_y); for (int i = 0; i < n_zeroes; i++) { pub_key_y[i] = '0'; } strncpy(pub_key_y + n_zeroes, arr_y, 1024 - n_zeroes); SET_SUCCESS clean: mpz_clear(privateKeyMpz); point_clear(pKey); point_clear(pKey_test); static uint64_t counter = 0; if (counter % 1000 == 0) { LOG_INFO(__FUNCTION__); LOG_INFO("Thousand SGX calls completed"); } counter++; } static uint64_t sigCounter = 0; void trustedEcdsaSignAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint32_t enc_len, const char *hash, char *sigR, char *sigS, uint8_t *sig_v, int base) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encryptedPrivateKey); CHECK_STATE(hash); CHECK_STATE(sigR); CHECK_STATE(sigS); SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN); mpz_t privateKeyMpz; mpz_init(privateKeyMpz); mpz_t msgMpz; mpz_init(msgMpz); signature sign = signature_init(); int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, ECDSA_SKEY_LEN); CHECK_STATUS2("aes decrypt failed with status %d"); skey[enc_len - SGX_AESGCM_MAC_SIZE - SGX_AESGCM_IV_SIZE] = '\0'; if (mpz_set_str(privateKeyMpz, skey, ECDSA_SKEY_BASE) == -1) { *errStatus = -1; snprintf(errString, BUF_LEN, "invalid secret key"); LOG_ERROR(errString); goto clean; } if (mpz_set_str(msgMpz, hash, 16) == -1) { *errStatus = -1; snprintf(errString, BUF_LEN, "invalid message hash"); LOG_ERROR(errString); goto clean; } signature_sign(sign, msgMpz, privateKeyMpz, curve); sigCounter++; if (sigCounter % 1000 == 0) { point Pkey = point_init(); signature_extract_public_key(Pkey, privateKeyMpz, curve); if (!signature_verify(msgMpz, sign, Pkey, curve)) { *errStatus = -2; snprintf(errString, BUF_LEN, "signature is not verified! "); point_clear(Pkey); goto clean; } point_clear(Pkey); } SAFE_CHAR_BUF(arrM, BUF_LEN); mpz_get_str(arrM, 16, msgMpz); snprintf(errString, BUF_LEN, "message is %s ", arrM); SAFE_CHAR_BUF(arrR, BUF_LEN); mpz_get_str(arrR, base, sign->r); strncpy(sigR, arrR, 1024); SAFE_CHAR_BUF(arrS, BUF_LEN); mpz_get_str(arrS, base, sign->s); strncpy(sigS, arrS, 1024); *sig_v = sign->v; SET_SUCCESS clean: mpz_clear(privateKeyMpz); mpz_clear(msgMpz); signature_free(sign); LOG_DEBUG(__FUNCTION__ ); LOG_DEBUG("SGX call completed"); } void trustedDecryptKeyAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint32_t enc_len, char *key) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encryptedPrivateKey); CHECK_STATE(key); *errStatus = -9; int status = AES_decrypt_DH(encryptedPrivateKey, enc_len, key, 3072); if (status != 0) { *errStatus = status; snprintf(errString, BUF_LEN, "aes decrypt failed with status %d", status); LOG_ERROR(errString); goto clean; } *errStatus = -10; uint64_t keyLen = strnlen(key, MAX_KEY_LENGTH); if (keyLen == MAX_KEY_LENGTH) { snprintf(errString, BUF_LEN, "Key is not null terminated"); LOG_ERROR(errString); goto clean; } SET_SUCCESS clean: ; } void trustedEncryptKeyAES(int *errStatus, char *errString, const char *key, uint8_t *encryptedPrivateKey, uint32_t *enc_len) { LOG_INFO(__FUNCTION__); *errString = 0; *errStatus = UNKNOWN_ERROR; CHECK_STATE(key); CHECK_STATE(encryptedPrivateKey); *errStatus = UNKNOWN_ERROR; int status = AES_encrypt_DH((char *)key, encryptedPrivateKey, BUF_LEN); CHECK_STATUS2("AES encrypt failed with status %d"); *enc_len = strlen(key) + SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE; SAFE_CHAR_BUF(decryptedKey, BUF_LEN); status = AES_decrypt_DH(encryptedPrivateKey, *enc_len, decryptedKey, BUF_LEN); CHECK_STATUS2("trustedDecryptKey failed with status %d"); uint64_t decryptedKeyLen = strnlen(decryptedKey, MAX_KEY_LENGTH); if (decryptedKeyLen == MAX_KEY_LENGTH) { snprintf(errString, BUF_LEN, "Decrypted key is not null terminated"); LOG_ERROR(errString); goto clean; } *errStatus = -8; if (strncmp(key, decryptedKey, MAX_KEY_LENGTH) != 0) { snprintf(errString, BUF_LEN, "Decrypted key does not match original key"); LOG_ERROR(errString); goto clean; } SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedBlsSignMessageAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint32_t enc_len, char *_hashX, char *_hashY, char *signature) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encryptedPrivateKey); CHECK_STATE(_hashX); CHECK_STATE(_hashY); CHECK_STATE(signature); SAFE_CHAR_BUF(key, BUF_LEN);SAFE_CHAR_BUF(sig, BUF_LEN); int status = AES_decrypt(encryptedPrivateKey, enc_len, key, BUF_LEN); CHECK_STATUS("AES decrypt failed") if (!enclave_sign(key, _hashX, _hashY, sig)) { strncpy(errString, "Enclave failed to create bls signature", BUF_LEN); LOG_ERROR(errString); *errStatus = -1; goto clean; } strncpy(signature, sig, BUF_LEN); if (strnlen(signature, BUF_LEN) < 10) { strncpy(errString, "Signature too short", BUF_LEN); LOG_ERROR(errString); *errStatus = -1; goto clean; } SET_SUCCESS LOG_DEBUG("SGX call completed"); clean: ; LOG_DEBUG("SGX call completed"); } void trustedGenDkgSecretAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint32_t *enc_len, size_t _t) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_dkg_secret); SAFE_CHAR_BUF(dkg_secret, DKG_BUFER_LENGTH); int status = gen_dkg_poly(dkg_secret, _t); CHECK_STATUS("gen_dkg_poly failed") status = AES_encrypt(dkg_secret, encrypted_dkg_secret, 3 * BUF_LEN); CHECK_STATUS("SGX AES encrypt DKG poly failed"); *enc_len = strlen(dkg_secret) + SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE; SAFE_CHAR_BUF(decr_dkg_secret, DKG_BUFER_LENGTH); status = AES_decrypt(encrypted_dkg_secret, *enc_len, decr_dkg_secret, DKG_BUFER_LENGTH); CHECK_STATUS("aes decrypt dkg poly failed"); if (strcmp(dkg_secret, decr_dkg_secret) != 0) { snprintf(errString, BUF_LEN, "encrypted poly is not equal to decrypted poly"); LOG_ERROR(errString); *errStatus = -333; goto clean; } SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedDecryptDkgSecretAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint32_t enc_len, uint8_t *decrypted_dkg_secret) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_dkg_secret); CHECK_STATE(decrypted_dkg_secret); int status = AES_decrypt(encrypted_dkg_secret, enc_len, (char *) decrypted_dkg_secret, 3072); CHECK_STATUS2("aes decrypt data - encrypted_dkg_secret failed with status %d") SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedSetEncryptedDkgPolyAES(int *errStatus, char *errString, uint8_t *encrypted_poly, uint32_t enc_len) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_poly); memset(getThreadLocalDecryptedDkgPoly(), 0, DKG_BUFER_LENGTH); int status = AES_decrypt(encrypted_poly, enc_len, (char *) getThreadLocalDecryptedDkgPoly(), DKG_BUFER_LENGTH); CHECK_STATUS2("sgx_unseal_data - encrypted_poly failed with status %d") SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGetEncryptedSecretShareAES(int *errStatus, char *errString, uint8_t *encrypted_skey, uint32_t *dec_len, char *result_str, char *s_shareG2, char *pub_keyB, uint8_t _t, uint8_t _n, uint8_t ind) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE uint32_t enc_len; int status; CHECK_STATE(encrypted_skey); CHECK_STATE(result_str); CHECK_STATE(s_shareG2); CHECK_STATE(pub_keyB); LOG_DEBUG(__FUNCTION__); SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN); SAFE_CHAR_BUF(pub_key_x, BUF_LEN);SAFE_CHAR_BUF(pub_key_y, BUF_LEN); trustedGenerateEcdsaKeyAES(&status, errString, encrypted_skey, &enc_len, pub_key_x, pub_key_y); CHECK_STATUS("trustedGenerateEcdsaKeyAES failed"); status = AES_decrypt(encrypted_skey, enc_len, skey, ECDSA_SKEY_LEN); skey[ECDSA_SKEY_LEN - 1] = 0; CHECK_STATUS2("AES_decrypt failed (in trustedGetEncryptedSecretShareAES) with status %d"); *dec_len = enc_len; SAFE_CHAR_BUF(common_key, ECDSA_SKEY_LEN); status = gen_session_key(skey, pub_keyB, common_key); CHECK_STATUS("gen_session_key failed") SAFE_CHAR_BUF(s_share, ECDSA_SKEY_LEN); status = calc_secret_share(getThreadLocalDecryptedDkgPoly(), s_share, _t, _n, ind); CHECK_STATUS("calc secret share failed") status = calc_secret_shareG2(s_share, s_shareG2); CHECK_STATUS("invalid decr secret share"); SAFE_CHAR_BUF(cypher, ECDSA_SKEY_LEN); status=xor_encrypt(common_key, s_share, cypher); CHECK_STATUS("xor_encrypt failed") strncpy(result_str, cypher, strlen(cypher)); strncpy(result_str + strlen(cypher), pub_key_x, strlen(pub_key_x)); strncpy(result_str + strlen(pub_key_x) + strlen(pub_key_y), pub_key_y, strlen(pub_key_y)); SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGetPublicSharesAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint32_t enc_len, char *public_shares, unsigned _t, unsigned _n) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_dkg_secret); CHECK_STATE(public_shares); CHECK_STATE(_t <= _n && _n > 0) SAFE_CHAR_BUF(decrypted_dkg_secret, DKG_MAX_SEALED_LEN); int status = AES_decrypt(encrypted_dkg_secret, enc_len, decrypted_dkg_secret, DKG_MAX_SEALED_LEN); CHECK_STATUS2("aes decrypt data - encrypted_dkg_secret failed with status %d"); status = calc_public_shares(decrypted_dkg_secret, public_shares, _t) != 0; CHECK_STATUS("t does not match polynomial in db"); SET_SUCCESS clean: ; LOG_INFO("SGX call completed"); } void trustedDkgVerifyAES(int *errStatus, char *errString, const char *public_shares, const char *s_share, uint8_t *encryptedPrivateKey, uint64_t enc_len, unsigned _t, int _ind, int *result) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(public_shares); CHECK_STATE(s_share); CHECK_STATE(encryptedPrivateKey); SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN); mpz_t s; mpz_init(s); int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, ECDSA_SKEY_LEN); CHECK_STATUS2("AES_decrypt failed (in trustedDkgVerifyAES) with status %d"); SAFE_CHAR_BUF(encr_sshare, ECDSA_SKEY_LEN); strncpy(encr_sshare, s_share, ECDSA_SKEY_LEN - 1); SAFE_CHAR_BUF(common_key, ECDSA_SKEY_LEN); status = session_key_recover(skey, s_share, common_key); CHECK_STATUS("session_key_recover failed"); SAFE_CHAR_BUF(decr_sshare, ECDSA_SKEY_LEN); status=xor_decrypt(common_key, encr_sshare, decr_sshare); CHECK_STATUS("xor_decrypt failed") status = mpz_set_str(s, decr_sshare, 16); CHECK_STATUS("invalid decr secret share"); *result = Verification(public_shares, s, _t, _ind); SET_SUCCESS clean: mpz_clear(s); LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedCreateBlsKeyAES(int *errStatus, char *errString, const char *s_shares, uint8_t *encryptedPrivateKey, uint64_t key_len, uint8_t *encr_bls_key, uint32_t *enc_bls_key_len) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(s_shares); CHECK_STATE(encryptedPrivateKey); CHECK_STATE(encr_bls_key); SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN); mpz_t sum; mpz_init(sum); mpz_set_ui(sum, 0); mpz_t q; mpz_init(q); mpz_set_str(q, "21888242871839275222246405745257275088548364400416034343698204186575808495617", 10); mpz_t bls_key; mpz_init(bls_key); int status = AES_decrypt(encryptedPrivateKey, key_len, skey, ECDSA_SKEY_LEN); CHECK_STATUS2("aes decrypt failed with status %d"); skey[ECDSA_SKEY_LEN - 1] = 0; int num_shares = strlen(s_shares) / 192; for (int i = 0; i < num_shares; i++) { SAFE_CHAR_BUF(encr_sshare, 65); strncpy(encr_sshare, s_shares + 192 * i, 64); encr_sshare[64] = 0; SAFE_CHAR_BUF(s_share, 193); strncpy(s_share, s_shares + 192 * i, 192); s_share[192] = 0; SAFE_CHAR_BUF(common_key, 65); status = session_key_recover(skey, s_share, common_key); CHECK_STATUS("session_key_recover failed"); common_key[64] = 0; SAFE_CHAR_BUF(decr_sshare, 65); status = xor_decrypt(common_key, encr_sshare, decr_sshare); CHECK_STATUS("xor_decrypt failed"); decr_sshare[64] = 0; mpz_t decr_secret_share; mpz_init(decr_secret_share); if (mpz_set_str(decr_secret_share, decr_sshare, 16) == -1) { *errStatus = 111; snprintf(errString, BUF_LEN, "invalid decrypted secret share"); LOG_ERROR(errString); mpz_clear(decr_secret_share); goto clean; } mpz_addmul_ui(sum, decr_secret_share, 1); mpz_clear(decr_secret_share); } mpz_mod(bls_key, sum, q); SAFE_CHAR_BUF(key_share, BLS_KEY_LENGTH); SAFE_CHAR_BUF(arr_skey_str, BUF_LEN); mpz_get_str(arr_skey_str, 16, bls_key); int n_zeroes = 64 - strlen(arr_skey_str); for (int i = 0; i < n_zeroes; i++) { key_share[i] = '0'; } strncpy(key_share + n_zeroes, arr_skey_str, 65 - n_zeroes); key_share[BLS_KEY_LENGTH - 1] = 0; status = AES_encrypt(key_share, encr_bls_key, BUF_LEN); CHECK_STATUS2("aes encrypt bls private key failed with status %d "); *enc_bls_key_len = strlen(key_share) + SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE; SET_SUCCESS clean: mpz_clear(bls_key); mpz_clear(sum); mpz_clear(q); LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGetBlsPubKeyAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint64_t key_len, char *bls_pub_key) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(bls_pub_key); CHECK_STATE(encryptedPrivateKey); SAFE_CHAR_BUF(skey_hex, ECDSA_SKEY_LEN); int status = AES_decrypt(encryptedPrivateKey, key_len, skey_hex, ECDSA_SKEY_LEN); CHECK_STATUS2("AES decrypt failed %d"); skey_hex[ECDSA_SKEY_LEN - 1] = 0; status = calc_bls_public_key(skey_hex, bls_pub_key); CHECK_STATUS("could not calculate bls public key"); SET_SUCCESS static uint64_t counter = 0; clean: if (counter % 1000 == 0) { LOG_INFO(__FUNCTION__); LOG_INFO("Thousand SGX calls completed"); } counter++; }
/* Modifications Copyright (C) 2019-2020 SKALE Labs Copyright 2018 Intel Corporation Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <math.h> #include <string.h> #include <stdio.h> #include <stdbool.h> #include <assert.h> #include "secure_enclave_t.h" #include "sgx_tcrypto.h" #include "sgx_tseal.h" #include <sgx_tgmp.h> #include <sgx_trts.h> #include <sgx_key.h> #include "Point.h" #include "DomainParameters.h" #include "Signature.h" #include "Curves.h" #include "DHDkg.h" #include "AESUtils.h" #include "EnclaveConstants.h" #include "EnclaveCommon.h" #include "SIGNED_ENCLAVE_VERSION" #define STRINGIFY(x) #x #define TOSTRING(x) STRINGIFY(x) #define INIT_ERROR_STATE *errString = 0; *errStatus = UNKNOWN_ERROR; #define SET_SUCCESS *errStatus = 0; #define CHECK_STATE(_EXPRESSION_) \ if (!(_EXPRESSION_)) { \ LOG_ERROR("State check failed::");LOG_ERROR(#_EXPRESSION_); \ LOG_ERROR((const char*) __FILE__); \ snprintf(errString, BUF_LEN, "State check failed. Check log."); \ *errStatus = -1; \ return;} #define CHECK_STATE_CLEAN(_EXPRESSION_) \ if (!(_EXPRESSION_)) { \ LOG_ERROR("State check failed::");LOG_ERROR(#_EXPRESSION_); \ LOG_ERROR(__FILE__); LOG_ERROR(__LINE__); \ snprintf(errString, BUF_LEN, "State check failed. Check log."); \ *errStatus = -1; \ goto clean;} #define CHECK_STATUS(__ERRMESSAGE__) if (status != SGX_SUCCESS) { \ LOG_ERROR(__FUNCTION__); \ snprintf(errString, BUF_LEN, "failed with status %d : %s", status, __ERRMESSAGE__); \ LOG_ERROR(errString); \ *errStatus = status; \ goto clean; \ }; #define CHECK_STATUS2(__ERRMESSAGE__) if (status != SGX_SUCCESS) { \ snprintf(errString, BUF_LEN, __ERRMESSAGE__, status); \ LOG_ERROR(errString); \ *errStatus = status; \ goto clean; \ }; void *(*gmp_realloc_func)(void *, size_t, size_t); void *(*oc_realloc_func)(void *, size_t, size_t); void (*gmp_free_func)(void *, size_t); void (*oc_free_func)(void *, size_t); void *reallocate_function(void *, size_t, size_t); void free_function(void *, size_t); unsigned char *globalRandom = NULL; #define CALL_ONCE \ static volatile bool called = false;\ if (called) { \ LOG_ERROR(__FUNCTION__); \ LOG_ERROR("This function shouldnt be called twice. Aborting!"); \ abort(); \ } else {called = true;}; void trustedEnclaveInit(uint64_t _logLevel) { CALL_ONCE LOG_INFO(__FUNCTION__); globalLogLevel_ = _logLevel; oc_realloc_func = &reallocate_function; oc_free_func = &free_function; LOG_INFO("Setting memory functions"); mp_get_memory_functions(NULL, &gmp_realloc_func, &gmp_free_func); mp_set_memory_functions(NULL, oc_realloc_func, oc_free_func); LOG_INFO("Calling enclave init"); enclave_init(); LOG_INFO("Reading random"); globalRandom = calloc(32,1); int ret = sgx_read_rand(globalRandom, 32); if(ret != SGX_SUCCESS) { LOG_ERROR("sgx_read_rand failed. Aboring enclave."); abort(); } LOG_INFO("Successfully inited enclave. Signed enclave version:" SIGNED_ENCLAVE_VERSION ); #ifndef SGX_DEBUG LOG_INFO("SECURITY WARNING: sgxwallet is running in INSECURE DEBUG MODE! NEVER USE IN PRODUCTION!"); #endif #if SGX_DEBUG != 0 LOG_INFO("SECURITY WARNING: sgxwallet is running in INSECURE DEBUG MODE! NEVER USE IN PRODUCTION!"); #endif #if SGX_MODE == SIM LOG_INFO("SECURITY WARNING: sgxwallet is running in INSECURE SIMULATION MODE! NEVER USE IN PRODUCTION!"); #endif } void free_function(void *ptr, size_t sz) { if (sgx_is_within_enclave(ptr, sz)) gmp_free_func(ptr, sz); else { sgx_status_t status; status = oc_free(ptr, sz); if (status != SGX_SUCCESS) abort(); } } void *reallocate_function(void *ptr, size_t osize, size_t nsize) { uint64_t nptr; sgx_status_t status; if (sgx_is_within_enclave(ptr, osize)) { return gmp_realloc_func(ptr, osize, nsize); } status = oc_realloc(&nptr, ptr, osize, nsize); if (status != SGX_SUCCESS) abort(); /* * If the entire range of allocated memory is not outside the enclave * then something truly terrible has happened. In theory, we could * free() and try again, but would you trust the OS at this point? */ if (!sgx_is_outside_enclave((void *) ptr, nsize)) abort(); return (void *) nptr; } void get_global_random(unsigned char *_randBuff, uint64_t _size) { char errString[BUF_LEN]; int status; int *errStatus = &status; INIT_ERROR_STATE CHECK_STATE(_size <= 32) CHECK_STATE(_randBuff); sgx_sha_state_handle_t shaStateHandle; CHECK_STATE(sgx_sha256_init(&shaStateHandle) == SGX_SUCCESS); CHECK_STATE(sgx_sha256_update(globalRandom, 32, shaStateHandle) == SGX_SUCCESS); CHECK_STATE(sgx_sha256_get_hash(shaStateHandle, (sgx_sha256_hash_t *)globalRandom) == SGX_SUCCESS); CHECK_STATE(sgx_sha256_close(shaStateHandle) == SGX_SUCCESS); memcpy(_randBuff, globalRandom, _size); } void sealHexSEK(int *errStatus, char *errString, uint8_t *encrypted_sek, uint64_t *enc_len, char *sek_hex) { CALL_ONCE LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_sek); CHECK_STATE(sek_hex); CHECK_STATE(strnlen(sek_hex, 33) == 32) uint64_t plaintextLen = strlen(sek_hex) + 1; uint64_t sealedLen = sgx_calc_sealed_data_size(0, plaintextLen); sgx_attributes_t attribute_mask; attribute_mask.flags = 0xfffffffffffffff3; attribute_mask.xfrm = 0x0; sgx_misc_select_t misc = 0xF0000000; sgx_status_t status = sgx_seal_data_ex(SGX_KEYPOLICY_MRENCLAVE, attribute_mask, misc, 0, NULL, plaintextLen, (uint8_t *) sek_hex, sealedLen, (sgx_sealed_data_t *) encrypted_sek); CHECK_STATUS("seal SEK failed after SEK generation"); uint64_t encrypt_text_length = sgx_get_encrypt_txt_len((const sgx_sealed_data_t *)encrypted_sek); CHECK_STATE(encrypt_text_length = plaintextLen); SAFE_CHAR_BUF(unsealedKey, BUF_LEN); uint32_t decLen = BUF_LEN; uint64_t add_text_length = sgx_get_add_mac_txt_len((const sgx_sealed_data_t *)encrypted_sek); CHECK_STATE(add_text_length == 0); CHECK_STATE(sgx_is_within_enclave(encrypted_sek,sizeof(sgx_sealed_data_t))); status = sgx_unseal_data((const sgx_sealed_data_t *)encrypted_sek, NULL, NULL, (uint8_t *) unsealedKey, &decLen ); CHECK_STATUS("seal/unseal SEK failed after SEK generation in unseal"); *enc_len = sealedLen; SET_SUCCESS clean: LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGenerateSEK(int *errStatus, char *errString, uint8_t *encrypted_sek, uint64_t *enc_len, char *sek_hex) { CALL_ONCE LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_sek); CHECK_STATE(sek_hex); RANDOM_CHAR_BUF(SEK_raw, SGX_AESGCM_KEY_SIZE); carray2Hex((uint8_t*) SEK_raw, SGX_AESGCM_KEY_SIZE, sek_hex); memcpy(AES_key, SEK_raw, SGX_AESGCM_KEY_SIZE); derive_DH_Key(); sealHexSEK(errStatus, errString, encrypted_sek, enc_len, sek_hex); if (*errStatus != 0) { LOG_ERROR("sealHexSEK failed"); goto clean; } SET_SUCCESS clean: LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedSetSEK(int *errStatus, char *errString, uint8_t *encrypted_sek) { CALL_ONCE LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_sek); SAFE_CHAR_BUF(aes_key_hex, BUF_LEN); uint32_t dec_len = BUF_LEN; sgx_status_t status = sgx_unseal_data( (const sgx_sealed_data_t *) encrypted_sek, NULL, 0, (uint8_t *)aes_key_hex, &dec_len); if (status == 0x3001) { LOG_ERROR("Could not decrypt LevelDB storage! \n" "If you upgraded sgxwallet software or if you are restoring from backup, please run sgxwallet with -b flag and " "pass your backup key."); } CHECK_STATUS2("sgx unseal SEK failed with status %d"); uint64_t len; hex2carray(aes_key_hex, &len, (uint8_t *) AES_key); derive_DH_Key(); SET_SUCCESS clean: LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedSetSEK_backup(int *errStatus, char *errString, uint8_t *encrypted_sek, uint64_t *enc_len, const char *sek_hex) { CALL_ONCE LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_sek); CHECK_STATE(sek_hex); uint64_t len; hex2carray(sek_hex, &len, (uint8_t *) AES_key); derive_DH_Key(); sealHexSEK(errStatus, errString, encrypted_sek, enc_len, (char *)sek_hex); if (*errStatus != 0) { LOG_ERROR("sealHexSEK failed"); goto clean; } SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGenerateEcdsaKeyAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint64_t *enc_len, char *pub_key_x, char *pub_key_y) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encryptedPrivateKey); CHECK_STATE(pub_key_x); CHECK_STATE(pub_key_y); RANDOM_CHAR_BUF(rand_char, 32); mpz_t seed; mpz_init(seed); mpz_t skey; mpz_init(skey); point Pkey = point_init(); mpz_import(seed, 32, 1, sizeof(rand_char[0]), 0, 0, rand_char); mpz_mod(skey, seed, curve->p); signature_extract_public_key(Pkey, skey, curve); SAFE_CHAR_BUF(arr_x, BUF_LEN); mpz_get_str(arr_x, ECDSA_SKEY_BASE, Pkey->x); int n_zeroes = 64 - strlen(arr_x); for (int i = 0; i < n_zeroes; i++) { pub_key_x[i] = '0'; } strncpy(pub_key_x + n_zeroes, arr_x, 1024 - n_zeroes); SAFE_CHAR_BUF(arr_y, BUF_LEN); mpz_get_str(arr_y, ECDSA_SKEY_BASE, Pkey->y); n_zeroes = 64 - strlen(arr_y); for (int i = 0; i < n_zeroes; i++) { pub_key_y[i] = '0'; } strncpy(pub_key_y + n_zeroes, arr_y, 1024 - n_zeroes); SAFE_CHAR_BUF(skey_str, BUF_LEN); SAFE_CHAR_BUF(arr_skey_str, mpz_sizeinbase(skey, ECDSA_SKEY_BASE) + 2); mpz_get_str(arr_skey_str, ECDSA_SKEY_BASE, skey); n_zeroes = 64 - strlen(arr_skey_str); for (int i = 0; i < n_zeroes; i++) { skey_str[i] = '0'; } strncpy(skey_str + n_zeroes, arr_skey_str, 65 - n_zeroes); snprintf(errString, BUF_LEN, "skey len is %d\n", (int) strlen(skey_str)); int status = AES_encrypt((char *) skey_str, encryptedPrivateKey, BUF_LEN, ECDSA, NON_DECRYPTABLE, enc_len); CHECK_STATUS("ecdsa private key encryption failed"); status = AES_decrypt(encryptedPrivateKey, *enc_len, skey_str, BUF_LEN); CHECK_STATUS2("ecdsa private key decr failed with status %d"); SET_SUCCESS clean: mpz_clear(seed); mpz_clear(skey); point_clear(Pkey); LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGetPublicEcdsaKeyAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint64_t enc_len, char *pub_key_x, char *pub_key_y) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE SAFE_CHAR_BUF(skey, BUF_LEN); mpz_t privateKeyMpz; mpz_init(privateKeyMpz); point pKey = point_init(); point pKey_test = point_init(); CHECK_STATE(encryptedPrivateKey); CHECK_STATE(pub_key_x); CHECK_STATE(pub_key_y); int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, BUF_LEN); CHECK_STATUS2("AES_decrypt failed with status %d"); skey[enc_len - SGX_AESGCM_MAC_SIZE - SGX_AESGCM_IV_SIZE] = '\0'; strncpy(errString, skey, 1024); status = mpz_set_str(privateKeyMpz, skey, ECDSA_SKEY_BASE); CHECK_STATUS("mpz_set_str failed for private key"); signature_extract_public_key(pKey, privateKeyMpz, curve); point_multiplication(pKey_test, privateKeyMpz, curve->G, curve); if (!point_cmp(pKey, pKey_test)) { snprintf(errString, BUF_LEN, "Points are not equal"); LOG_ERROR(errString); *errStatus = -11; goto clean; } SAFE_CHAR_BUF(arr_x, BUF_LEN); mpz_get_str(arr_x, ECDSA_SKEY_BASE, pKey->x); int n_zeroes = 64 - strlen(arr_x); for (int i = 0; i < n_zeroes; i++) { pub_key_x[i] = '0'; } strncpy(pub_key_x + n_zeroes, arr_x, 1024 - n_zeroes); SAFE_CHAR_BUF(arr_y, BUF_LEN); mpz_get_str(arr_y, ECDSA_SKEY_BASE, pKey->y); n_zeroes = 64 - strlen(arr_y); for (int i = 0; i < n_zeroes; i++) { pub_key_y[i] = '0'; } strncpy(pub_key_y + n_zeroes, arr_y, 1024 - n_zeroes); SET_SUCCESS clean: mpz_clear(privateKeyMpz); point_clear(pKey); point_clear(pKey_test); static uint64_t counter = 0; if (counter % 1000 == 0) { LOG_INFO(__FUNCTION__); LOG_INFO("Thousand SGX calls completed"); } counter++; } static uint64_t sigCounter = 0; void trustedEcdsaSignAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint64_t enc_len, const char *hash, char *sigR, char *sigS, uint8_t *sig_v, int base) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encryptedPrivateKey); CHECK_STATE(hash); CHECK_STATE(sigR); CHECK_STATE(sigS); SAFE_CHAR_BUF(skey, BUF_LEN); mpz_t privateKeyMpz; mpz_init(privateKeyMpz); mpz_t msgMpz; mpz_init(msgMpz); signature sign = signature_init(); int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, BUF_LEN); CHECK_STATUS2("aes decrypt failed with status %d"); skey[enc_len - SGX_AESGCM_MAC_SIZE - SGX_AESGCM_IV_SIZE] = '\0'; if (mpz_set_str(privateKeyMpz, skey, ECDSA_SKEY_BASE) == -1) { *errStatus = -1; snprintf(errString, BUF_LEN, "invalid secret key"); LOG_ERROR(errString); goto clean; } if (mpz_set_str(msgMpz, hash, 16) == -1) { *errStatus = -1; snprintf(errString, BUF_LEN, "invalid message hash"); LOG_ERROR(errString); goto clean; } signature_sign(sign, msgMpz, privateKeyMpz, curve); sigCounter++; if (sigCounter % 1000 == 0) { point Pkey = point_init(); signature_extract_public_key(Pkey, privateKeyMpz, curve); if (!signature_verify(msgMpz, sign, Pkey, curve)) { *errStatus = -2; snprintf(errString, BUF_LEN, "signature is not verified! "); point_clear(Pkey); goto clean; } point_clear(Pkey); } SAFE_CHAR_BUF(arrM, BUF_LEN); mpz_get_str(arrM, 16, msgMpz); snprintf(errString, BUF_LEN, "message is %s ", arrM); SAFE_CHAR_BUF(arrR, BUF_LEN); mpz_get_str(arrR, base, sign->r); strncpy(sigR, arrR, 1024); SAFE_CHAR_BUF(arrS, BUF_LEN); mpz_get_str(arrS, base, sign->s); strncpy(sigS, arrS, 1024); *sig_v = sign->v; SET_SUCCESS clean: mpz_clear(privateKeyMpz); mpz_clear(msgMpz); signature_free(sign); LOG_DEBUG(__FUNCTION__ ); LOG_DEBUG("SGX call completed"); } void trustedDecryptKeyAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint64_t enc_len, char *key) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encryptedPrivateKey); CHECK_STATE(key); *errStatus = -9; int status = AES_decrypt(encryptedPrivateKey, enc_len, key, 3072); if (status != 0) { *errStatus = status; snprintf(errString, BUF_LEN, "aes decrypt failed with status %d", status); LOG_ERROR(errString); goto clean; } *errStatus = -10; uint64_t keyLen = strnlen(key, MAX_KEY_LENGTH); if (keyLen == MAX_KEY_LENGTH) { snprintf(errString, BUF_LEN, "Key is not null terminated"); LOG_ERROR(errString); goto clean; } SET_SUCCESS clean: ; } void trustedEncryptKeyAES(int *errStatus, char *errString, const char *key, uint8_t *encryptedPrivateKey, uint64_t *enc_len) { LOG_INFO(__FUNCTION__); *errString = 0; *errStatus = UNKNOWN_ERROR; CHECK_STATE(key); CHECK_STATE(encryptedPrivateKey); *errStatus = UNKNOWN_ERROR; int status = AES_encrypt((char *)key, encryptedPrivateKey, BUF_LEN, DKG, DECRYPTABLE, enc_len); CHECK_STATUS2("AES encrypt failed with status %d"); SAFE_CHAR_BUF(decryptedKey, BUF_LEN); status = AES_decrypt(encryptedPrivateKey, *enc_len, decryptedKey, BUF_LEN); CHECK_STATUS2("trustedDecryptKey failed with status %d"); uint64_t decryptedKeyLen = strnlen(decryptedKey, MAX_KEY_LENGTH); if (decryptedKeyLen == MAX_KEY_LENGTH) { snprintf(errString, BUF_LEN, "Decrypted key is not null terminated"); LOG_ERROR(errString); goto clean; } *errStatus = -8; if (strncmp(key, decryptedKey, MAX_KEY_LENGTH) != 0) { snprintf(errString, BUF_LEN, "Decrypted key does not match original key"); LOG_ERROR(key); LOG_ERROR(decryptedKey); LOG_ERROR(errString); goto clean; } SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedBlsSignMessageAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint64_t enc_len, char *_hashX, char *_hashY, char *signature) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encryptedPrivateKey); CHECK_STATE(_hashX); CHECK_STATE(_hashY); CHECK_STATE(signature); SAFE_CHAR_BUF(key, BUF_LEN);SAFE_CHAR_BUF(sig, BUF_LEN); int status = AES_decrypt(encryptedPrivateKey, enc_len, key, BUF_LEN); CHECK_STATUS("AES decrypt failed") if (!enclave_sign(key, _hashX, _hashY, sig)) { strncpy(errString, "Enclave failed to create bls signature", BUF_LEN); LOG_ERROR(errString); *errStatus = -1; goto clean; } strncpy(signature, sig, BUF_LEN); if (strnlen(signature, BUF_LEN) < 10) { strncpy(errString, "Signature too short", BUF_LEN); LOG_ERROR(errString); *errStatus = -1; goto clean; } SET_SUCCESS LOG_DEBUG("SGX call completed"); clean: ; LOG_DEBUG("SGX call completed"); } void trustedGenDkgSecretAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint64_t *enc_len, size_t _t) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_dkg_secret); SAFE_CHAR_BUF(dkg_secret, DKG_BUFER_LENGTH); int status = gen_dkg_poly(dkg_secret, _t); CHECK_STATUS("gen_dkg_poly failed") status = AES_encrypt(dkg_secret, encrypted_dkg_secret, 3 * BUF_LEN, DKG, DECRYPTABLE, enc_len); CHECK_STATUS("SGX AES encrypt DKG poly failed"); SAFE_CHAR_BUF(decr_dkg_secret, DKG_BUFER_LENGTH); status = AES_decrypt(encrypted_dkg_secret, *enc_len, decr_dkg_secret, DKG_BUFER_LENGTH); CHECK_STATUS("aes decrypt dkg poly failed"); if (strcmp(dkg_secret, decr_dkg_secret) != 0) { snprintf(errString, BUF_LEN, "encrypted poly is not equal to decrypted poly"); LOG_ERROR(errString); *errStatus = -333; goto clean; } SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedDecryptDkgSecretAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint64_t enc_len, uint8_t *decrypted_dkg_secret) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_dkg_secret); CHECK_STATE(decrypted_dkg_secret); int status = AES_decrypt(encrypted_dkg_secret, enc_len, (char *) decrypted_dkg_secret, 3072); CHECK_STATUS2("aes decrypt data - encrypted_dkg_secret failed with status %d") SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedSetEncryptedDkgPolyAES(int *errStatus, char *errString, uint8_t *encrypted_poly, uint64_t enc_len) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_poly); memset(getThreadLocalDecryptedDkgPoly(), 0, DKG_BUFER_LENGTH); int status = AES_decrypt(encrypted_poly, enc_len, (char *) getThreadLocalDecryptedDkgPoly(), DKG_BUFER_LENGTH); CHECK_STATUS2("sgx_unseal_data - encrypted_poly failed with status %d") SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGetEncryptedSecretShareAES(int *errStatus, char *errString, uint8_t *encrypted_skey, uint64_t *dec_len, char *result_str, char *s_shareG2, char *pub_keyB, uint8_t _t, uint8_t _n, uint8_t ind) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE uint64_t enc_len; int status; CHECK_STATE(encrypted_skey); CHECK_STATE(result_str); CHECK_STATE(s_shareG2); CHECK_STATE(pub_keyB); LOG_DEBUG(__FUNCTION__); SAFE_CHAR_BUF(skey, BUF_LEN); SAFE_CHAR_BUF(pub_key_x, BUF_LEN);SAFE_CHAR_BUF(pub_key_y, BUF_LEN); trustedGenerateEcdsaKeyAES(&status, errString, encrypted_skey, &enc_len, pub_key_x, pub_key_y); CHECK_STATUS("trustedGenerateEcdsaKeyAES failed"); status = AES_decrypt(encrypted_skey, enc_len, skey, BUF_LEN); skey[ECDSA_SKEY_LEN - 1] = 0; CHECK_STATUS2("AES_decrypt failed (in trustedGetEncryptedSecretShareAES) with status %d"); *dec_len = enc_len; SAFE_CHAR_BUF(common_key, BUF_LEN); status = gen_session_key(skey, pub_keyB, common_key); CHECK_STATUS("gen_session_key failed") SAFE_CHAR_BUF(s_share, BUF_LEN); status = calc_secret_share(getThreadLocalDecryptedDkgPoly(), s_share, _t, _n, ind); CHECK_STATUS("calc secret share failed") status = calc_secret_shareG2(s_share, s_shareG2); CHECK_STATUS("invalid decr secret share"); SAFE_CHAR_BUF(cypher, BUF_LEN); status=xor_encrypt(common_key, s_share, cypher); CHECK_STATUS("xor_encrypt failed") strncpy(result_str, cypher, strlen(cypher)); strncpy(result_str + strlen(cypher), pub_key_x, strlen(pub_key_x)); strncpy(result_str + strlen(pub_key_x) + strlen(pub_key_y), pub_key_y, strlen(pub_key_y)); SET_SUCCESS clean: ; LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGetPublicSharesAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint64_t enc_len, char *public_shares, unsigned _t, unsigned _n) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(encrypted_dkg_secret); CHECK_STATE(public_shares); CHECK_STATE(_t <= _n && _n > 0) SAFE_CHAR_BUF(decrypted_dkg_secret, DKG_MAX_SEALED_LEN); int status = AES_decrypt(encrypted_dkg_secret, enc_len, decrypted_dkg_secret, DKG_MAX_SEALED_LEN); CHECK_STATUS2("aes decrypt data - encrypted_dkg_secret failed with status %d"); status = calc_public_shares(decrypted_dkg_secret, public_shares, _t) != 0; CHECK_STATUS("t does not match polynomial in db"); SET_SUCCESS clean: ; LOG_INFO("SGX call completed"); } void trustedDkgVerifyAES(int *errStatus, char *errString, const char *public_shares, const char *s_share, uint8_t *encryptedPrivateKey, uint64_t enc_len, unsigned _t, int _ind, int *result) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(public_shares); CHECK_STATE(s_share); CHECK_STATE(encryptedPrivateKey); SAFE_CHAR_BUF(skey,BUF_LEN); mpz_t s; mpz_init(s); int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, BUF_LEN); CHECK_STATUS2("AES_decrypt failed (in trustedDkgVerifyAES) with status %d"); SAFE_CHAR_BUF(encr_sshare, BUF_LEN); strncpy(encr_sshare, s_share, ECDSA_SKEY_LEN - 1); SAFE_CHAR_BUF(common_key, BUF_LEN); status = session_key_recover(skey, s_share, common_key); CHECK_STATUS("session_key_recover failed"); SAFE_CHAR_BUF(decr_sshare, BUF_LEN); status=xor_decrypt(common_key, encr_sshare, decr_sshare); CHECK_STATUS("xor_decrypt failed") status = mpz_set_str(s, decr_sshare, 16); CHECK_STATUS("invalid decr secret share"); *result = Verification(public_shares, s, _t, _ind); SET_SUCCESS clean: mpz_clear(s); LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedCreateBlsKeyAES(int *errStatus, char *errString, const char *s_shares, uint8_t *encryptedPrivateKey, uint64_t key_len, uint8_t *encr_bls_key, uint64_t *enc_bls_key_len) { LOG_INFO(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(s_shares); CHECK_STATE(encryptedPrivateKey); CHECK_STATE(encr_bls_key); SAFE_CHAR_BUF(skey, BUF_LEN); mpz_t sum; mpz_init(sum); mpz_set_ui(sum, 0); mpz_t q; mpz_init(q); mpz_set_str(q, "21888242871839275222246405745257275088548364400416034343698204186575808495617", 10); mpz_t bls_key; mpz_init(bls_key); int status = AES_decrypt(encryptedPrivateKey, key_len, skey, BUF_LEN); CHECK_STATUS2("aes decrypt failed with status %d"); skey[ECDSA_SKEY_LEN - 1] = 0; int num_shares = strlen(s_shares) / 192; for (int i = 0; i < num_shares; i++) { SAFE_CHAR_BUF(encr_sshare, 65); strncpy(encr_sshare, s_shares + 192 * i, 64); encr_sshare[64] = 0; SAFE_CHAR_BUF(s_share, 193); strncpy(s_share, s_shares + 192 * i, 192); s_share[192] = 0; SAFE_CHAR_BUF(common_key, 65); status = session_key_recover(skey, s_share, common_key); CHECK_STATUS("session_key_recover failed"); common_key[64] = 0; SAFE_CHAR_BUF(decr_sshare, 65); status = xor_decrypt(common_key, encr_sshare, decr_sshare); CHECK_STATUS("xor_decrypt failed"); decr_sshare[64] = 0; mpz_t decr_secret_share; mpz_init(decr_secret_share); if (mpz_set_str(decr_secret_share, decr_sshare, 16) == -1) { *errStatus = 111; snprintf(errString, BUF_LEN, "invalid decrypted secret share"); LOG_ERROR(errString); mpz_clear(decr_secret_share); goto clean; } mpz_addmul_ui(sum, decr_secret_share, 1); mpz_clear(decr_secret_share); } mpz_mod(bls_key, sum, q); SAFE_CHAR_BUF(key_share, BLS_KEY_LENGTH); SAFE_CHAR_BUF(arr_skey_str, BUF_LEN); mpz_get_str(arr_skey_str, 16, bls_key); int n_zeroes = 64 - strlen(arr_skey_str); for (int i = 0; i < n_zeroes; i++) { key_share[i] = '0'; } strncpy(key_share + n_zeroes, arr_skey_str, 65 - n_zeroes); key_share[BLS_KEY_LENGTH - 1] = 0; status = AES_encrypt(key_share, encr_bls_key, BUF_LEN, BLS, NON_DECRYPTABLE, enc_bls_key_len); CHECK_STATUS2("aes encrypt bls private key failed with status %d "); SET_SUCCESS clean: mpz_clear(bls_key); mpz_clear(sum); mpz_clear(q); LOG_INFO(__FUNCTION__ ); LOG_INFO("SGX call completed"); } void trustedGetBlsPubKeyAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint64_t key_len, char *bls_pub_key) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE CHECK_STATE(bls_pub_key); CHECK_STATE(encryptedPrivateKey); SAFE_CHAR_BUF(skey_hex, BUF_LEN); int status = AES_decrypt(encryptedPrivateKey, key_len, skey_hex, BUF_LEN); CHECK_STATUS2("AES decrypt failed %d"); skey_hex[ECDSA_SKEY_LEN - 1] = 0; status = calc_bls_public_key(skey_hex, bls_pub_key); CHECK_STATUS("could not calculate bls public key"); SET_SUCCESS static uint64_t counter = 0; clean: if (counter % 1000 == 0) { LOG_INFO(__FUNCTION__); LOG_INFO("Thousand SGX calls completed"); } counter++; }
void trustedGetPublicEcdsaKeyAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint32_t enc_len, char *pub_key_x, char *pub_key_y) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN); mpz_t privateKeyMpz; mpz_init(privateKeyMpz); point pKey = point_init(); point pKey_test = point_init(); CHECK_STATE(encryptedPrivateKey); CHECK_STATE(pub_key_x); CHECK_STATE(pub_key_y); int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, ECDSA_SKEY_LEN); CHECK_STATUS2("AES_decrypt failed with status %d"); skey[enc_len - SGX_AESGCM_MAC_SIZE - SGX_AESGCM_IV_SIZE] = '\0'; strncpy(errString, skey, 1024); status = mpz_set_str(privateKeyMpz, skey, ECDSA_SKEY_BASE); CHECK_STATUS("mpz_set_str failed for private key"); signature_extract_public_key(pKey, privateKeyMpz, curve); point_multiplication(pKey_test, privateKeyMpz, curve->G, curve); if (!point_cmp(pKey, pKey_test)) { snprintf(errString, BUF_LEN, "Points are not equal"); LOG_ERROR(errString); *errStatus = -11; goto clean; } SAFE_CHAR_BUF(arr_x, BUF_LEN); mpz_get_str(arr_x, ECDSA_SKEY_BASE, pKey->x); int n_zeroes = 64 - strlen(arr_x); for (int i = 0; i < n_zeroes; i++) { pub_key_x[i] = '0'; } strncpy(pub_key_x + n_zeroes, arr_x, 1024 - n_zeroes); SAFE_CHAR_BUF(arr_y, BUF_LEN); mpz_get_str(arr_y, ECDSA_SKEY_BASE, pKey->y); n_zeroes = 64 - strlen(arr_y); for (int i = 0; i < n_zeroes; i++) { pub_key_y[i] = '0'; } strncpy(pub_key_y + n_zeroes, arr_y, 1024 - n_zeroes); SET_SUCCESS clean: mpz_clear(privateKeyMpz); point_clear(pKey); point_clear(pKey_test); static uint64_t counter = 0; if (counter % 1000 == 0) { LOG_INFO(__FUNCTION__); LOG_INFO("Thousand SGX calls completed"); } counter++; }
void trustedGetPublicEcdsaKeyAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint64_t enc_len, char *pub_key_x, char *pub_key_y) { LOG_DEBUG(__FUNCTION__); INIT_ERROR_STATE SAFE_CHAR_BUF(skey, BUF_LEN); mpz_t privateKeyMpz; mpz_init(privateKeyMpz); point pKey = point_init(); point pKey_test = point_init(); CHECK_STATE(encryptedPrivateKey); CHECK_STATE(pub_key_x); CHECK_STATE(pub_key_y); int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, BUF_LEN); CHECK_STATUS2("AES_decrypt failed with status %d"); skey[enc_len - SGX_AESGCM_MAC_SIZE - SGX_AESGCM_IV_SIZE] = '\0'; strncpy(errString, skey, 1024); status = mpz_set_str(privateKeyMpz, skey, ECDSA_SKEY_BASE); CHECK_STATUS("mpz_set_str failed for private key"); signature_extract_public_key(pKey, privateKeyMpz, curve); point_multiplication(pKey_test, privateKeyMpz, curve->G, curve); if (!point_cmp(pKey, pKey_test)) { snprintf(errString, BUF_LEN, "Points are not equal"); LOG_ERROR(errString); *errStatus = -11; goto clean; } SAFE_CHAR_BUF(arr_x, BUF_LEN); mpz_get_str(arr_x, ECDSA_SKEY_BASE, pKey->x); int n_zeroes = 64 - strlen(arr_x); for (int i = 0; i < n_zeroes; i++) { pub_key_x[i] = '0'; } strncpy(pub_key_x + n_zeroes, arr_x, 1024 - n_zeroes); SAFE_CHAR_BUF(arr_y, BUF_LEN); mpz_get_str(arr_y, ECDSA_SKEY_BASE, pKey->y); n_zeroes = 64 - strlen(arr_y); for (int i = 0; i < n_zeroes; i++) { pub_key_y[i] = '0'; } strncpy(pub_key_y + n_zeroes, arr_y, 1024 - n_zeroes); SET_SUCCESS clean: mpz_clear(privateKeyMpz); point_clear(pKey); point_clear(pKey_test); static uint64_t counter = 0; if (counter % 1000 == 0) { LOG_INFO(__FUNCTION__); LOG_INFO("Thousand SGX calls completed"); } counter++; }
{'added': [(125, 'void trustedEnclaveInit(uint64_t _logLevel) {'), (235, ' uint8_t *encrypted_sek, uint64_t *enc_len, char *sek_hex) {'), (258, ' uint64_t encrypt_text_length = sgx_get_encrypt_txt_len((const sgx_sealed_data_t *)encrypted_sek);'), (266, ' uint64_t add_text_length = sgx_get_add_mac_txt_len((const sgx_sealed_data_t *)encrypted_sek);'), (282, ' uint8_t *encrypted_sek, uint64_t *enc_len, char *sek_hex) {'), (344, ' uint8_t *encrypted_sek, uint64_t *enc_len, const char *sek_hex) {'), (373, ' uint8_t *encryptedPrivateKey, uint64_t *enc_len, char *pub_key_x, char *pub_key_y) {'), (413, ' SAFE_CHAR_BUF(skey_str, BUF_LEN);'), (414, ' SAFE_CHAR_BUF(arr_skey_str, mpz_sizeinbase(skey, ECDSA_SKEY_BASE) + 2);'), (423, ' int status = AES_encrypt((char *) skey_str, encryptedPrivateKey, BUF_LEN,'), (424, ' ECDSA, NON_DECRYPTABLE, enc_len);'), (427, ' status = AES_decrypt(encryptedPrivateKey, *enc_len, skey_str, BUF_LEN);'), (441, ' uint8_t *encryptedPrivateKey, uint64_t enc_len, char *pub_key_x, char *pub_key_y) {'), (445, ' SAFE_CHAR_BUF(skey, BUF_LEN);'), (457, ' int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, BUF_LEN);'), (517, 'void trustedEcdsaSignAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint64_t enc_len,'), (528, ' SAFE_CHAR_BUF(skey, BUF_LEN);'), (536, ' int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, BUF_LEN);'), (602, ' uint64_t enc_len, char *key) {'), (612, ' int status = AES_decrypt(encryptedPrivateKey, enc_len, key, 3072);'), (638, ' uint8_t *encryptedPrivateKey, uint64_t *enc_len) {'), (649, ' int status = AES_encrypt((char *)key, encryptedPrivateKey, BUF_LEN,'), (650, ' DKG, DECRYPTABLE, enc_len);'), (656, ' status = AES_decrypt(encryptedPrivateKey, *enc_len, decryptedKey, BUF_LEN);'), (672, ' LOG_ERROR(key);'), (673, ' LOG_ERROR(decryptedKey);'), (687, ' uint64_t enc_len, char *_hashX,'), (729, 'trustedGenDkgSecretAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint64_t *enc_len, size_t _t) {'), (741, ' status = AES_encrypt(dkg_secret, encrypted_dkg_secret, 3 * BUF_LEN,'), (742, ' DKG, DECRYPTABLE, enc_len);'), (746, ''), (772, ' uint64_t enc_len,'), (794, 'void trustedSetEncryptedDkgPolyAES(int *errStatus, char *errString, uint8_t *encrypted_poly, uint64_t enc_len) {'), (814, 'void trustedGetEncryptedSecretShareAES(int *errStatus, char *errString, uint8_t *encrypted_skey, uint64_t *dec_len,'), (821, ' uint64_t enc_len;'), (831, ' SAFE_CHAR_BUF(skey, BUF_LEN);'), (839, ' status = AES_decrypt(encrypted_skey, enc_len, skey, BUF_LEN);'), (847, ' SAFE_CHAR_BUF(common_key, BUF_LEN);'), (853, ' SAFE_CHAR_BUF(s_share, BUF_LEN);'), (862, ' SAFE_CHAR_BUF(cypher, BUF_LEN);'), (879, 'void trustedGetPublicSharesAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint64_t enc_len,'), (917, ' SAFE_CHAR_BUF(skey,BUF_LEN);'), (922, ' int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, BUF_LEN);'), (926, ' SAFE_CHAR_BUF(encr_sshare, BUF_LEN);'), (930, ' SAFE_CHAR_BUF(common_key, BUF_LEN);'), (936, ' SAFE_CHAR_BUF(decr_sshare, BUF_LEN);'), (958, ' uint64_t *enc_bls_key_len) {'), (968, ' SAFE_CHAR_BUF(skey, BUF_LEN);'), (982, ' int status = AES_decrypt(encryptedPrivateKey, key_len, skey, BUF_LEN);'), (1042, ' status = AES_encrypt(key_share, encr_bls_key, BUF_LEN, BLS, NON_DECRYPTABLE, enc_bls_key_len);'), (1066, ' SAFE_CHAR_BUF(skey_hex, BUF_LEN);'), (1068, ' int status = AES_decrypt(encryptedPrivateKey, key_len, skey_hex, BUF_LEN);')], 'deleted': [(125, 'void trustedEnclaveInit(uint32_t _logLevel) {'), (235, ' uint8_t *encrypted_sek, uint32_t *enc_len, char *sek_hex) {'), (258, ' uint32_t encrypt_text_length = sgx_get_encrypt_txt_len((const sgx_sealed_data_t *)encrypted_sek);'), (266, ' uint32_t add_text_length = sgx_get_add_mac_txt_len((const sgx_sealed_data_t *)encrypted_sek);'), (282, ' uint8_t *encrypted_sek, uint32_t *enc_len, char *sek_hex) {'), (344, ' uint8_t *encrypted_sek, uint32_t *enc_len, const char *sek_hex) {'), (373, ' uint8_t *encryptedPrivateKey, uint32_t *enc_len, char *pub_key_x, char *pub_key_y) {'), (413, ' SAFE_CHAR_BUF(skey_str, ECDSA_SKEY_LEN);SAFE_CHAR_BUF(arr_skey_str, mpz_sizeinbase(skey, ECDSA_SKEY_BASE) + 2);'), (420, ' skey_str[ECDSA_SKEY_LEN - 1] = 0;'), (423, ' int status = AES_encrypt((char *) skey_str, encryptedPrivateKey, BUF_LEN);'), (426, ' *enc_len = strlen(skey_str) + SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE;'), (427, ''), (428, ' status = AES_decrypt(encryptedPrivateKey, *enc_len, skey_str, ECDSA_SKEY_LEN);'), (442, ' uint8_t *encryptedPrivateKey, uint32_t enc_len, char *pub_key_x, char *pub_key_y) {'), (446, ' SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN);'), (458, ' int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, ECDSA_SKEY_LEN);'), (518, 'void trustedEcdsaSignAES(int *errStatus, char *errString, uint8_t *encryptedPrivateKey, uint32_t enc_len,'), (529, ' SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN);'), (537, ' int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, ECDSA_SKEY_LEN);'), (603, ' uint32_t enc_len, char *key) {'), (613, ' int status = AES_decrypt_DH(encryptedPrivateKey, enc_len, key, 3072);'), (639, ' uint8_t *encryptedPrivateKey, uint32_t *enc_len) {'), (650, ' int status = AES_encrypt_DH((char *)key, encryptedPrivateKey, BUF_LEN);'), (654, ' *enc_len = strlen(key) + SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE;'), (655, ''), (658, ' status = AES_decrypt_DH(encryptedPrivateKey, *enc_len, decryptedKey, BUF_LEN);'), (687, ' uint32_t enc_len, char *_hashX,'), (729, 'trustedGenDkgSecretAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint32_t *enc_len, size_t _t) {'), (741, ' status = AES_encrypt(dkg_secret, encrypted_dkg_secret, 3 * BUF_LEN);'), (745, ' *enc_len = strlen(dkg_secret) + SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE;'), (771, ' uint32_t enc_len,'), (793, 'void trustedSetEncryptedDkgPolyAES(int *errStatus, char *errString, uint8_t *encrypted_poly, uint32_t enc_len) {'), (813, 'void trustedGetEncryptedSecretShareAES(int *errStatus, char *errString, uint8_t *encrypted_skey, uint32_t *dec_len,'), (820, ' uint32_t enc_len;'), (830, ' SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN);'), (838, ' status = AES_decrypt(encrypted_skey, enc_len, skey, ECDSA_SKEY_LEN);'), (846, ' SAFE_CHAR_BUF(common_key, ECDSA_SKEY_LEN);'), (852, ' SAFE_CHAR_BUF(s_share, ECDSA_SKEY_LEN);'), (861, ' SAFE_CHAR_BUF(cypher, ECDSA_SKEY_LEN);'), (878, 'void trustedGetPublicSharesAES(int *errStatus, char *errString, uint8_t *encrypted_dkg_secret, uint32_t enc_len,'), (916, ' SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN);'), (921, ' int status = AES_decrypt(encryptedPrivateKey, enc_len, skey, ECDSA_SKEY_LEN);'), (925, ' SAFE_CHAR_BUF(encr_sshare, ECDSA_SKEY_LEN);'), (929, ' SAFE_CHAR_BUF(common_key, ECDSA_SKEY_LEN);'), (935, ' SAFE_CHAR_BUF(decr_sshare, ECDSA_SKEY_LEN);'), (957, ' uint32_t *enc_bls_key_len) {'), (967, ' SAFE_CHAR_BUF(skey, ECDSA_SKEY_LEN);'), (981, ' int status = AES_decrypt(encryptedPrivateKey, key_len, skey, ECDSA_SKEY_LEN);'), (1041, ' status = AES_encrypt(key_share, encr_bls_key, BUF_LEN);'), (1045, ' *enc_bls_key_len = strlen(key_share) + SGX_AESGCM_MAC_SIZE + SGX_AESGCM_IV_SIZE;'), (1046, ''), (1067, ' SAFE_CHAR_BUF(skey_hex, ECDSA_SKEY_LEN);'), (1069, ' int status = AES_decrypt(encryptedPrivateKey, key_len, skey_hex, ECDSA_SKEY_LEN);')]}
52
53
678
4,334
52
363
5
https://github.com/skalenetwork/sgxwallet
CVE-2021-36218
CWE-787
1,794
makePreview.cpp
C++
generatePreview
/////////////////////////////////////////////////////////////////////////// // // Copyright (c) 2004, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Industrial Light & Magic nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////// //---------------------------------------------------------------------------- // // Add a preview image to an OpenEXR file. // //---------------------------------------------------------------------------- #include "makePreview.h" #include <ImfInputFile.h> #include <ImfOutputFile.h> #include <ImfTiledOutputFile.h> #include <ImfRgbaFile.h> #include <ImfPreviewImage.h> #include <ImfArray.h> #include <ImathMath.h> #include <ImathFun.h> #include <math.h> #include <iostream> #include <algorithm> #include <OpenEXRConfig.h> using namespace OPENEXR_IMF_NAMESPACE; using namespace IMATH_NAMESPACE; using namespace std; namespace { float knee (float x, float f) { return log (x * f + 1) / f; } unsigned char gamma (half h, float m) { // // Conversion from half to unsigned char pixel data, // with gamma correction. The conversion is the same // as in the exrdisplay program's ImageView class, // except with defog, kneeLow, and kneeHigh fixed // at 0.0, 0.0, and 5.0 respectively. // float x = max (0.f, h * m); if (x > 1) x = 1 + knee (x - 1, 0.184874f); return (unsigned char) (IMATH_NAMESPACE::clamp (Math<float>::pow (x, 0.4545f) * 84.66f, 0.f, 255.f)); } void generatePreview (const char inFileName[], float exposure, int previewWidth, int &previewHeight, Array2D <PreviewRgba> &previewPixels) { // // Read the input file // RgbaInputFile in (inFileName); Box2i dw = in.dataWindow(); float a = in.pixelAspectRatio(); int w = dw.max.x - dw.min.x + 1; int h = dw.max.y - dw.min.y + 1; Array2D <Rgba> pixels (h, w); in.setFrameBuffer (ComputeBasePointer (&pixels[0][0], dw), 1, w); in.readPixels (dw.min.y, dw.max.y); // // Make a preview image // previewHeight = max (int (h / (w * a) * previewWidth + .5f), 1); previewPixels.resizeErase (previewHeight, previewWidth); float fx = (previewWidth > 0)? (float (w - 1) / (previewWidth - 1)): 1; float fy = (previewHeight > 0)? (float (h - 1) / (previewHeight - 1)): 1; float m = Math<float>::pow (2.f, IMATH_NAMESPACE::clamp (exposure + 2.47393f, -20.f, 20.f)); for (int y = 0; y < previewHeight; ++y) { for (int x = 0; x < previewWidth; ++x) { PreviewRgba &preview = previewPixels[y][x]; const Rgba &pixel = pixels[int (y * fy + .5f)][int (x * fx + .5f)]; preview.r = gamma (pixel.r, m); preview.g = gamma (pixel.g, m); preview.b = gamma (pixel.b, m); preview.a = int (IMATH_NAMESPACE::clamp (pixel.a * 255.f, 0.f, 255.f) + .5f); } } } } // namespace void makePreview (const char inFileName[], const char outFileName[], int previewWidth, float exposure, bool verbose) { if (verbose) cout << "generating preview image" << endl; Array2D <PreviewRgba> previewPixels; int previewHeight; generatePreview (inFileName, exposure, previewWidth, previewHeight, previewPixels); InputFile in (inFileName); Header header = in.header(); header.setPreviewImage (PreviewImage (previewWidth, previewHeight, &previewPixels[0][0])); if (verbose) cout << "copying " << inFileName << " to " << outFileName << endl; if (header.hasTileDescription()) { TiledOutputFile out (outFileName, header); out.copyPixels (in); } else { OutputFile out (outFileName, header); out.copyPixels (in); } if (verbose) cout << "done." << endl; }
/////////////////////////////////////////////////////////////////////////// // // Copyright (c) 2004, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Industrial Light & Magic nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////// //---------------------------------------------------------------------------- // // Add a preview image to an OpenEXR file. // //---------------------------------------------------------------------------- #include "makePreview.h" #include <ImfInputFile.h> #include <ImfOutputFile.h> #include <ImfTiledOutputFile.h> #include <ImfRgbaFile.h> #include <ImfPreviewImage.h> #include <ImfArray.h> #include <ImathMath.h> #include <ImathFun.h> #include <math.h> #include <iostream> #include <algorithm> #include <OpenEXRConfig.h> using namespace OPENEXR_IMF_NAMESPACE; using namespace IMATH_NAMESPACE; using namespace std; namespace { float knee (float x, float f) { return log (x * f + 1) / f; } unsigned char gamma (half h, float m) { // // Conversion from half to unsigned char pixel data, // with gamma correction. The conversion is the same // as in the exrdisplay program's ImageView class, // except with defog, kneeLow, and kneeHigh fixed // at 0.0, 0.0, and 5.0 respectively. // float x = max (0.f, h * m); if (x > 1) x = 1 + knee (x - 1, 0.184874f); return (unsigned char) (IMATH_NAMESPACE::clamp (Math<float>::pow (x, 0.4545f) * 84.66f, 0.f, 255.f)); } void generatePreview (const char inFileName[], float exposure, int previewWidth, int &previewHeight, Array2D <PreviewRgba> &previewPixels) { // // Read the input file // RgbaInputFile in (inFileName); Box2i dw = in.dataWindow(); float a = in.pixelAspectRatio(); int w = dw.max.x - dw.min.x + 1; int h = dw.max.y - dw.min.y + 1; Array2D <Rgba> pixels (h, w); in.setFrameBuffer (ComputeBasePointer (&pixels[0][0], dw), 1, w); in.readPixels (dw.min.y, dw.max.y); // // Make a preview image // previewHeight = max (int (h / (w * a) * previewWidth + .5f), 1); previewPixels.resizeErase (previewHeight, previewWidth); float fx = (previewWidth > 1)? (float (w - 1) / (previewWidth - 1)): 1; float fy = (previewHeight > 1)? (float (h - 1) / (previewHeight - 1)): 1; float m = Math<float>::pow (2.f, IMATH_NAMESPACE::clamp (exposure + 2.47393f, -20.f, 20.f)); for (int y = 0; y < previewHeight; ++y) { for (int x = 0; x < previewWidth; ++x) { PreviewRgba &preview = previewPixels[y][x]; const Rgba &pixel = pixels[int (y * fy + .5f)][int (x * fx + .5f)]; preview.r = gamma (pixel.r, m); preview.g = gamma (pixel.g, m); preview.b = gamma (pixel.b, m); preview.a = int (IMATH_NAMESPACE::clamp (pixel.a * 255.f, 0.f, 255.f) + .5f); } } } } // namespace void makePreview (const char inFileName[], const char outFileName[], int previewWidth, float exposure, bool verbose) { if (verbose) cout << "generating preview image" << endl; Array2D <PreviewRgba> previewPixels; int previewHeight; generatePreview (inFileName, exposure, previewWidth, previewHeight, previewPixels); InputFile in (inFileName); Header header = in.header(); header.setPreviewImage (PreviewImage (previewWidth, previewHeight, &previewPixels[0][0])); if (verbose) cout << "copying " << inFileName << " to " << outFileName << endl; if (header.hasTileDescription()) { TiledOutputFile out (outFileName, header); out.copyPixels (in); } else { OutputFile out (outFileName, header); out.copyPixels (in); } if (verbose) cout << "done." << endl; }
generatePreview (const char inFileName[], float exposure, int previewWidth, int &previewHeight, Array2D <PreviewRgba> &previewPixels) { // // Read the input file // RgbaInputFile in (inFileName); Box2i dw = in.dataWindow(); float a = in.pixelAspectRatio(); int w = dw.max.x - dw.min.x + 1; int h = dw.max.y - dw.min.y + 1; Array2D <Rgba> pixels (h, w); in.setFrameBuffer (ComputeBasePointer (&pixels[0][0], dw), 1, w); in.readPixels (dw.min.y, dw.max.y); // // Make a preview image // previewHeight = max (int (h / (w * a) * previewWidth + .5f), 1); previewPixels.resizeErase (previewHeight, previewWidth); float fx = (previewWidth > 0)? (float (w - 1) / (previewWidth - 1)): 1; float fy = (previewHeight > 0)? (float (h - 1) / (previewHeight - 1)): 1; float m = Math<float>::pow (2.f, IMATH_NAMESPACE::clamp (exposure + 2.47393f, -20.f, 20.f)); for (int y = 0; y < previewHeight; ++y) { for (int x = 0; x < previewWidth; ++x) { PreviewRgba &preview = previewPixels[y][x]; const Rgba &pixel = pixels[int (y * fy + .5f)][int (x * fx + .5f)]; preview.r = gamma (pixel.r, m); preview.g = gamma (pixel.g, m); preview.b = gamma (pixel.b, m); preview.a = int (IMATH_NAMESPACE::clamp (pixel.a * 255.f, 0.f, 255.f) + .5f); } } }
generatePreview (const char inFileName[], float exposure, int previewWidth, int &previewHeight, Array2D <PreviewRgba> &previewPixels) { // // Read the input file // RgbaInputFile in (inFileName); Box2i dw = in.dataWindow(); float a = in.pixelAspectRatio(); int w = dw.max.x - dw.min.x + 1; int h = dw.max.y - dw.min.y + 1; Array2D <Rgba> pixels (h, w); in.setFrameBuffer (ComputeBasePointer (&pixels[0][0], dw), 1, w); in.readPixels (dw.min.y, dw.max.y); // // Make a preview image // previewHeight = max (int (h / (w * a) * previewWidth + .5f), 1); previewPixels.resizeErase (previewHeight, previewWidth); float fx = (previewWidth > 1)? (float (w - 1) / (previewWidth - 1)): 1; float fy = (previewHeight > 1)? (float (h - 1) / (previewHeight - 1)): 1; float m = Math<float>::pow (2.f, IMATH_NAMESPACE::clamp (exposure + 2.47393f, -20.f, 20.f)); for (int y = 0; y < previewHeight; ++y) { for (int x = 0; x < previewWidth; ++x) { PreviewRgba &preview = previewPixels[y][x]; const Rgba &pixel = pixels[int (y * fy + .5f)][int (x * fx + .5f)]; preview.r = gamma (pixel.r, m); preview.g = gamma (pixel.g, m); preview.b = gamma (pixel.b, m); preview.a = int (IMATH_NAMESPACE::clamp (pixel.a * 255.f, 0.f, 255.f) + .5f); } } }
{'added': [(123, ' float fx = (previewWidth > 1)? (float (w - 1) / (previewWidth - 1)): 1;'), (124, ' float fy = (previewHeight > 1)? (float (h - 1) / (previewHeight - 1)): 1;')], 'deleted': [(123, ' float fx = (previewWidth > 0)? (float (w - 1) / (previewWidth - 1)): 1;'), (124, ' float fy = (previewHeight > 0)? (float (h - 1) / (previewHeight - 1)): 1;')]}
2
2
101
720
32
398
5
https://github.com/AcademySoftwareFoundation/openexr
CVE-2020-16588
CWE-476
636
DKGCrypto.cpp
C++
getSecretShares
/* Copyright (C) 2019-Present SKALE Labs This file is part of sgxwallet. sgxwallet is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. sgxwallet is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with sgxwallet. If not, see <https://www.gnu.org/licenses/>. @file DKGCrypto.cpp @author Stan Kladko @date 2019 */ #include <iostream> #include <memory> #include "third_party/spdlog/spdlog.h" #include "sgxwallet.h" #include "SGXException.h" #include "common.h" #include "SGXWalletServer.hpp" #include "DKGCrypto.h" #include "BLSCrypto.h" vector <string> splitString(const char *coeffs, const char symbol) { CHECK_STATE(coeffs); string str(coeffs); string delim; delim.push_back(symbol); vector <string> G2_strings; size_t prev = 0, pos = 0; do { pos = str.find(delim, prev); if (pos == string::npos) pos = str.length(); string token = str.substr(prev, pos - prev); if (!token.empty()) { string coeff(token.c_str()); G2_strings.push_back(coeff); } prev = pos + delim.length(); } while (pos < str.length() && prev < str.length()); return G2_strings; } template<class T> string ConvertToString(T field_elem, int base = 10) { mpz_t t; mpz_init(t); field_elem.as_bigint().to_mpz(t); SAFE_CHAR_BUF(arr,mpz_sizeinbase(t, base) + 2); mpz_get_str(arr, base, t); mpz_clear(t); string output = arr; return output; } string convertHexToDec(const string &hex_str) { mpz_t dec; mpz_init(dec); string ret = ""; try { if (mpz_set_str(dec, hex_str.c_str(), 16) == -1) { mpz_clear(dec); return ret; } SAFE_CHAR_BUF(arr,mpz_sizeinbase(dec, 10) + 2); mpz_get_str(arr, 10, dec); ret = arr; } catch (exception &e) { mpz_clear(dec); throw SGXException(INCORRECT_STRING_CONVERSION, e.what()); } catch (...) { mpz_clear(dec); throw SGXException(UNKNOWN_ERROR, ""); } return ret; } string convertG2ToString(const libff::alt_bn128_G2 &elem, int base, const string &delim) { string result = ""; try { result += ConvertToString(elem.X.c0); result += delim; result += ConvertToString(elem.X.c1); result += delim; result += ConvertToString(elem.Y.c0); result += delim; result += ConvertToString(elem.Y.c1); return result; } catch (exception &e) { throw SGXException(INCORRECT_STRING_CONVERSION, e.what()); return result; } catch (...) { throw SGXException(UNKNOWN_ERROR, ""); return result; } return result; } string gen_dkg_poly(int _t) { vector<char> errMsg(BUF_LEN, 0); int errStatus = 0; uint32_t enc_len = 0; vector <uint8_t> encrypted_dkg_secret(BUF_LEN, 0); sgx_status_t status = trustedGenDkgSecretAES(eid, &errStatus, errMsg.data(), encrypted_dkg_secret.data(), &enc_len, _t); HANDLE_TRUSTED_FUNCTION_ERROR(status, errStatus, errMsg.data()); uint64_t length = enc_len;; vector<char> hexEncrPoly(BUF_LEN, 0); CHECK_STATE(encrypted_dkg_secret.size() >= length); carray2Hex(encrypted_dkg_secret.data(), length, hexEncrPoly.data(), BUF_LEN); string result(hexEncrPoly.data()); return result; } vector <vector<string>> get_verif_vect(const char *encryptedPolyHex, int t, int n) { CHECK_STATE(encryptedPolyHex); vector<char> errMsg(BUF_LEN, 0); int errStatus = 0; vector<char> pubShares(10000, 0); uint64_t encLen = 0; vector <uint8_t> encrDKGPoly(2 * BUF_LEN, 0); if (!hex2carray(encryptedPolyHex, &encLen, encrDKGPoly.data(), 6100)) { throw SGXException(INVALID_HEX, "Invalid encryptedPolyHex"); } sgx_status_t status = trustedGetPublicSharesAES(eid, &errStatus, errMsg.data(), encrDKGPoly.data(), encLen, pubShares.data(), t, n); HANDLE_TRUSTED_FUNCTION_ERROR(status, errStatus, errMsg.data()); vector <string> g2Strings = splitString(pubShares.data(), ','); vector <vector<string>> pubSharesVect; for (uint64_t i = 0; i < g2Strings.size(); i++) { vector <string> coeffStr = splitString(g2Strings.at(i).c_str(), ':'); pubSharesVect.push_back(coeffStr); } return pubSharesVect; } string getSecretShares(const string &_polyName, const char *_encryptedPolyHex, const vector <string> &_publicKeys, int _t, int _n) { CHECK_STATE(_encryptedPolyHex); vector<char> hexEncrKey(BUF_LEN, 0); vector<char> errMsg1(BUF_LEN, 0); vector <uint8_t> encrDKGPoly(BUF_LEN, 0); int errStatus = 0; uint64_t encLen = 0; if (!hex2carray(_encryptedPolyHex, &encLen, encrDKGPoly.data(), BUF_LEN)) { throw SGXException(INVALID_HEX, "Invalid encryptedPolyHex"); } sgx_status_t status = trustedSetEncryptedDkgPolyAES(eid, &errStatus, errMsg1.data(), encrDKGPoly.data(), encLen); HANDLE_TRUSTED_FUNCTION_ERROR(status, errStatus, errMsg1.data()); string result; for (int i = 0; i < _n; i++) { vector <uint8_t> encryptedSkey(BUF_LEN, 0); uint32_t decLen; vector<char> currentShare(193, 0); vector<char> sShareG2(320, 0); string pub_keyB = _publicKeys.at(i); vector<char> pubKeyB(129, 0); strncpy(pubKeyB.data(), pub_keyB.c_str(), 128); pubKeyB.at(128) = 0; spdlog::debug("pubKeyB is {}", pub_keyB); sgx_status_t status = trustedGetEncryptedSecretShareAES(eid, &errStatus, errMsg1.data(), encryptedSkey.data(), &decLen, currentShare.data(), sShareG2.data(), pubKeyB.data(), _t, _n, i + 1); HANDLE_TRUSTED_FUNCTION_ERROR(status, errStatus, errMsg1.data()); spdlog::debug("cur_share is {}", currentShare.data()); result += string(currentShare.data()); spdlog::debug("dec len is {}", decLen); carray2Hex(encryptedSkey.data(), decLen, hexEncrKey.data(), BUF_LEN); string dhKeyName = "DKG_DH_KEY_" + _polyName + "_" + to_string(i) + ":"; spdlog::debug("hexEncr DH Key: { }", hexEncrKey.data()); spdlog::debug("name to write to db is {}", dhKeyName); SGXWalletServer::writeDataToDB(dhKeyName, hexEncrKey.data()); string shareG2_name = "shareG2_" + _polyName + "_" + to_string(i) + ":"; spdlog::debug("name to write to db is {}", shareG2_name); spdlog::debug("s_shareG2: {}", sShareG2.data()); SGXWalletServer::writeDataToDB(shareG2_name, sShareG2.data()); } return result; } bool verifyShares(const char *publicShares, const char *encr_sshare, const char *encryptedKeyHex, int t, int n, int ind) { CHECK_STATE(publicShares); CHECK_STATE(encr_sshare); CHECK_STATE(encryptedKeyHex); vector<char> errMsg(BUF_LEN, 0); int errStatus = 0; uint64_t decKeyLen = 0; int result = 0; SAFE_UINT8_BUF(encr_key, BUF_LEN); if (!hex2carray(encryptedKeyHex, &decKeyLen, encr_key, BUF_LEN)) { throw SGXException(INVALID_HEX, "Invalid encryptedPolyHex"); } SAFE_CHAR_BUF(pshares,8193); strncpy(pshares, publicShares, strlen(publicShares)); sgx_status_t status = trustedDkgVerifyAES(eid, &errStatus, errMsg.data(), pshares, encr_sshare, encr_key, decKeyLen, t, ind, &result); HANDLE_TRUSTED_FUNCTION_ERROR(status, errStatus, errMsg.data()); if (result == 2) { throw SGXException(INVALID_HEX, "Invalid public shares"); } return result; } bool createBLSShare(const string &blsKeyName, const char *s_shares, const char *encryptedKeyHex) { CHECK_STATE(s_shares); CHECK_STATE(encryptedKeyHex); vector<char> errMsg(BUF_LEN,0); int errStatus = 0; uint64_t decKeyLen; SAFE_UINT8_BUF(encr_bls_key,BUF_LEN); SAFE_UINT8_BUF(encr_key,BUF_LEN); if (!hex2carray(encryptedKeyHex, &decKeyLen, encr_key, BUF_LEN)) { throw SGXException(INVALID_HEX, "Invalid encryptedKeyHex"); } uint32_t enc_bls_len = 0; sgx_status_t status = trustedCreateBlsKeyAES(eid, &errStatus, errMsg.data(), s_shares, encr_key, decKeyLen, encr_bls_key, &enc_bls_len); HANDLE_TRUSTED_FUNCTION_ERROR(status, errStatus, errMsg.data()); SAFE_CHAR_BUF(hexBLSKey,2 * BUF_LEN) carray2Hex(encr_bls_key, enc_bls_len, hexBLSKey, 2 * BUF_LEN); SGXWalletServer::writeDataToDB(blsKeyName, hexBLSKey); return true; } vector <string> getBLSPubKey(const char *encryptedKeyHex) { CHECK_STATE(encryptedKeyHex); vector<char> errMsg1(BUF_LEN, 0); int errStatus = 0; uint64_t decKeyLen = 0; SAFE_UINT8_BUF(encrKey, BUF_LEN); if (!hex2carray(encryptedKeyHex, &decKeyLen, encrKey, BUF_LEN)) { throw SGXException(INVALID_HEX, "Invalid encryptedKeyHex"); } SAFE_CHAR_BUF(pubKey,320) sgx_status_t status = trustedGetBlsPubKeyAES(eid, &errStatus, errMsg1.data(), encrKey, decKeyLen, pubKey); HANDLE_TRUSTED_FUNCTION_ERROR(status, errStatus, errMsg1.data()); vector <string> pubKeyVect = splitString(pubKey, ':'); spdlog::debug("pub key is "); for (int i = 0; i < 4; i++) spdlog::debug("{}", pubKeyVect.at(i)); return pubKeyVect; } vector <string> calculateAllBlsPublicKeys(const vector <string> &public_shares) { size_t n = public_shares.size(); size_t t = public_shares[0].length() / 256; uint64_t share_length = 256; uint8_t coord_length = 64; vector <libff::alt_bn128_G2> public_keys(n, libff::alt_bn128_G2::zero()); vector <libff::alt_bn128_G2> public_values(t, libff::alt_bn128_G2::zero()); for (size_t i = 0; i < n; ++i) { for (size_t j = 0; j < t; ++j) { libff::alt_bn128_G2 public_share; uint64_t pos0 = share_length * j; string x_c0_str = convertHexToDec(public_shares[i].substr(pos0, coord_length)); string x_c1_str = convertHexToDec(public_shares[i].substr(pos0 + coord_length, coord_length)); string y_c0_str = convertHexToDec(public_shares[i].substr(pos0 + 2 * coord_length, coord_length)); string y_c1_str = convertHexToDec(public_shares[i].substr(pos0 + 3 * coord_length, coord_length)); if (x_c0_str == "" || x_c1_str == "" || y_c0_str == "" || y_c1_str == "") { return {}; } public_share.X.c0 = libff::alt_bn128_Fq(x_c0_str.c_str()); public_share.X.c1 = libff::alt_bn128_Fq(x_c1_str.c_str()); public_share.Y.c0 = libff::alt_bn128_Fq(y_c0_str.c_str()); public_share.Y.c1 = libff::alt_bn128_Fq(y_c1_str.c_str()); public_share.Z = libff::alt_bn128_Fq2::one(); public_values[j] = public_values[j] + public_share; } } for (size_t i = 0; i < n; ++i) { for (size_t j = 0; j < t; ++j) { public_keys[i] = public_keys[i] + libff::power(libff::alt_bn128_Fr(i + 1), j) * public_values[j]; } public_keys[i].to_affine_coordinates(); } vector <string> result(n); for (size_t i = 0; i < n; ++i) { result[i] = convertG2ToString(public_keys[i]); } return result; } string decryptDHKey(const string &polyName, int ind) { vector<char> errMsg1(BUF_LEN, 0); int errStatus = 0; string DH_key_name = polyName + "_" + to_string(ind) + ":"; shared_ptr <string> hexEncrKeyPtr = SGXWalletServer::readFromDb(DH_key_name, "DKG_DH_KEY_"); spdlog::debug("encr DH key is {}", *hexEncrKeyPtr); vector<char> hexEncrKey(2 * BUF_LEN, 0); uint64_t dhEncLen = 0; SAFE_UINT8_BUF(encryptedDHKey, BUF_LEN); if (!hex2carray(hexEncrKeyPtr->c_str(), &dhEncLen, encryptedDHKey, BUF_LEN)) { throw SGXException(INVALID_HEX, "Invalid hexEncrKey"); } spdlog::debug("encr DH key length is {}", dhEncLen); spdlog::debug("hex encr DH key length is {}", hexEncrKeyPtr->length()); SAFE_CHAR_BUF(DHKey, ECDSA_SKEY_LEN); sgx_status_t status = trustedDecryptKeyAES(eid, &errStatus, errMsg1.data(), encryptedDHKey, dhEncLen, DHKey); HANDLE_TRUSTED_FUNCTION_ERROR(status, errStatus, errMsg1.data()); return DHKey; } vector <string> mult_G2(const string &x) { vector <string> result(4); libff::alt_bn128_Fr el(x.c_str()); libff::alt_bn128_G2 elG2 = el * libff::alt_bn128_G2::one(); elG2.to_affine_coordinates(); result[0] = ConvertToString(elG2.X.c0); result[1] = ConvertToString(elG2.X.c1); result[2] = ConvertToString(elG2.Y.c0); result[3] = ConvertToString(elG2.Y.c1); return result; }
/* Copyright (C) 2019-Present SKALE Labs This file is part of sgxwallet. sgxwallet is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. sgxwallet is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with sgxwallet. If not, see <https://www.gnu.org/licenses/>. @file DKGCrypto.cpp @author Stan Kladko @date 2019 */ #include <iostream> #include <memory> #include "third_party/spdlog/spdlog.h" #include "sgxwallet.h" #include "SGXException.h" #include "common.h" #include "SGXWalletServer.hpp" #include "DKGCrypto.h" #include "BLSCrypto.h" vector <string> splitString(const char *coeffs, const char symbol) { CHECK_STATE(coeffs); string str(coeffs); string delim; delim.push_back(symbol); vector <string> G2_strings; size_t prev = 0, pos = 0; do { pos = str.find(delim, prev); if (pos == string::npos) pos = str.length(); string token = str.substr(prev, pos - prev); if (!token.empty()) { string coeff(token.c_str()); G2_strings.push_back(coeff); } prev = pos + delim.length(); } while (pos < str.length() && prev < str.length()); return G2_strings; } template<class T> string ConvertToString(T field_elem, int base = 10) { mpz_t t; mpz_init(t); field_elem.as_bigint().to_mpz(t); SAFE_CHAR_BUF(arr,mpz_sizeinbase(t, base) + 2); mpz_get_str(arr, base, t); mpz_clear(t); string output = arr; return output; } string convertHexToDec(const string &hex_str) { mpz_t dec; mpz_init(dec); string ret = ""; try { if (mpz_set_str(dec, hex_str.c_str(), 16) == -1) { mpz_clear(dec); return ret; } SAFE_CHAR_BUF(arr,mpz_sizeinbase(dec, 10) + 2); mpz_get_str(arr, 10, dec); ret = arr; } catch (exception &e) { mpz_clear(dec); throw SGXException(INCORRECT_STRING_CONVERSION, e.what()); } catch (...) { mpz_clear(dec); throw SGXException(UNKNOWN_ERROR, ""); } return ret; } string convertG2ToString(const libff::alt_bn128_G2 &elem, int base, const string &delim) { string result = ""; try { result += ConvertToString(elem.X.c0); result += delim; result += ConvertToString(elem.X.c1); result += delim; result += ConvertToString(elem.Y.c0); result += delim; result += ConvertToString(elem.Y.c1); return result; } catch (exception &e) { throw SGXException(INCORRECT_STRING_CONVERSION, e.what()); return result; } catch (...) { throw SGXException(UNKNOWN_ERROR, ""); return result; } return result; } string gen_dkg_poly(int _t) { vector<char> errMsg(BUF_LEN, 0); int errStatus = 0; uint64_t enc_len = 0; vector <uint8_t> encrypted_dkg_secret(BUF_LEN, 0); sgx_status_t status = trustedGenDkgSecretAES( eid, &errStatus,errMsg.data(), encrypted_dkg_secret.data(), &enc_len, _t); HANDLE_TRUSTED_FUNCTION_ERROR(status, errStatus, errMsg.data()); uint64_t length = enc_len;; vector<char> hexEncrPoly(BUF_LEN, 0); CHECK_STATE(encrypted_dkg_secret.size() >= length); carray2Hex(encrypted_dkg_secret.data(), length, hexEncrPoly.data(), BUF_LEN); string result(hexEncrPoly.data()); return result; } vector <vector<string>> get_verif_vect(const char *encryptedPolyHex, int t, int n) { CHECK_STATE(encryptedPolyHex); vector<char> errMsg(BUF_LEN, 0); int errStatus = 0; vector<char> pubShares(10000, 0); uint64_t encLen = 0; vector <uint8_t> encrDKGPoly(2 * BUF_LEN, 0); if (!hex2carray(encryptedPolyHex, &encLen, encrDKGPoly.data(), 6100)) { throw SGXException(INVALID_HEX, "Invalid encryptedPolyHex"); } sgx_status_t status = trustedGetPublicSharesAES(eid, &errStatus, errMsg.data(), encrDKGPoly.data(), encLen, pubShares.data(), t, n); HANDLE_TRUSTED_FUNCTION_ERROR(status, errStatus, errMsg.data()); vector <string> g2Strings = splitString(pubShares.data(), ','); vector <vector<string>> pubSharesVect; for (uint64_t i = 0; i < g2Strings.size(); i++) { vector <string> coeffStr = splitString(g2Strings.at(i).c_str(), ':'); pubSharesVect.push_back(coeffStr); } return pubSharesVect; } string getSecretShares(const string &_polyName, const char *_encryptedPolyHex, const vector <string> &_publicKeys, int _t, int _n) { CHECK_STATE(_encryptedPolyHex); vector<char> hexEncrKey(BUF_LEN, 0); vector<char> errMsg1(BUF_LEN, 0); vector <uint8_t> encrDKGPoly(BUF_LEN, 0); int errStatus = 0; uint64_t encLen = 0; if (!hex2carray(_encryptedPolyHex, &encLen, encrDKGPoly.data(), BUF_LEN)) { throw SGXException(INVALID_HEX, "Invalid encryptedPolyHex"); } sgx_status_t status = trustedSetEncryptedDkgPolyAES(eid, &errStatus, errMsg1.data(), encrDKGPoly.data(), encLen); HANDLE_TRUSTED_FUNCTION_ERROR(status, errStatus, errMsg1.data()); string result; for (int i = 0; i < _n; i++) { vector <uint8_t> encryptedSkey(BUF_LEN, 0); uint64_t decLen; vector<char> currentShare(193, 0); vector<char> sShareG2(320, 0); string pub_keyB = _publicKeys.at(i); vector<char> pubKeyB(129, 0); strncpy(pubKeyB.data(), pub_keyB.c_str(), 128); pubKeyB.at(128) = 0; spdlog::debug("pubKeyB is {}", pub_keyB); sgx_status_t status = trustedGetEncryptedSecretShareAES(eid, &errStatus, errMsg1.data(), encryptedSkey.data(), &decLen, currentShare.data(), sShareG2.data(), pubKeyB.data(), _t, _n, i + 1); HANDLE_TRUSTED_FUNCTION_ERROR(status, errStatus, errMsg1.data()); spdlog::debug("cur_share is {}", currentShare.data()); result += string(currentShare.data()); spdlog::debug("dec len is {}", decLen); carray2Hex(encryptedSkey.data(), decLen, hexEncrKey.data(), BUF_LEN); string dhKeyName = "DKG_DH_KEY_" + _polyName + "_" + to_string(i) + ":"; spdlog::debug("hexEncr DH Key: { }", hexEncrKey.data()); spdlog::debug("name to write to db is {}", dhKeyName); SGXWalletServer::writeDataToDB(dhKeyName, hexEncrKey.data()); string shareG2_name = "shareG2_" + _polyName + "_" + to_string(i) + ":"; spdlog::debug("name to write to db is {}", shareG2_name); spdlog::debug("s_shareG2: {}", sShareG2.data()); SGXWalletServer::writeDataToDB(shareG2_name, sShareG2.data()); } return result; } bool verifyShares(const char *publicShares, const char *encr_sshare, const char *encryptedKeyHex, int t, int n, int ind) { CHECK_STATE(publicShares); CHECK_STATE(encr_sshare); CHECK_STATE(encryptedKeyHex); vector<char> errMsg(BUF_LEN, 0); int errStatus = 0; uint64_t decKeyLen = 0; int result = 0; SAFE_UINT8_BUF(encr_key, BUF_LEN); if (!hex2carray(encryptedKeyHex, &decKeyLen, encr_key, BUF_LEN)) { throw SGXException(INVALID_HEX, "Invalid encryptedPolyHex"); } SAFE_CHAR_BUF(pshares,8193); strncpy(pshares, publicShares, strlen(publicShares)); sgx_status_t status = trustedDkgVerifyAES(eid, &errStatus, errMsg.data(), pshares, encr_sshare, encr_key, decKeyLen, t, ind, &result); HANDLE_TRUSTED_FUNCTION_ERROR(status, errStatus, errMsg.data()); if (result == 2) { throw SGXException(INVALID_HEX, "Invalid public shares"); } return result; } bool createBLSShare(const string &blsKeyName, const char *s_shares, const char *encryptedKeyHex) { CHECK_STATE(s_shares); CHECK_STATE(encryptedKeyHex); vector<char> errMsg(BUF_LEN,0); int errStatus = 0; uint64_t decKeyLen; SAFE_UINT8_BUF(encr_bls_key,BUF_LEN); SAFE_UINT8_BUF(encr_key,BUF_LEN); if (!hex2carray(encryptedKeyHex, &decKeyLen, encr_key, BUF_LEN)) { throw SGXException(INVALID_HEX, "Invalid encryptedKeyHex"); } uint64_t enc_bls_len = 0; sgx_status_t status = trustedCreateBlsKeyAES(eid, &errStatus, errMsg.data(), s_shares, encr_key, decKeyLen, encr_bls_key, &enc_bls_len); HANDLE_TRUSTED_FUNCTION_ERROR(status, errStatus, errMsg.data()); SAFE_CHAR_BUF(hexBLSKey,2 * BUF_LEN) carray2Hex(encr_bls_key, enc_bls_len, hexBLSKey, 2 * BUF_LEN); SGXWalletServer::writeDataToDB(blsKeyName, hexBLSKey); return true; } vector <string> getBLSPubKey(const char *encryptedKeyHex) { CHECK_STATE(encryptedKeyHex); vector<char> errMsg1(BUF_LEN, 0); int errStatus = 0; uint64_t decKeyLen = 0; SAFE_UINT8_BUF(encrKey, BUF_LEN); if (!hex2carray(encryptedKeyHex, &decKeyLen, encrKey, BUF_LEN)) { throw SGXException(INVALID_HEX, "Invalid encryptedKeyHex"); } SAFE_CHAR_BUF(pubKey,320) sgx_status_t status = trustedGetBlsPubKeyAES(eid, &errStatus, errMsg1.data(), encrKey, decKeyLen, pubKey); HANDLE_TRUSTED_FUNCTION_ERROR(status, errStatus, errMsg1.data()); vector <string> pubKeyVect = splitString(pubKey, ':'); spdlog::debug("pub key is "); for (int i = 0; i < 4; i++) spdlog::debug("{}", pubKeyVect.at(i)); return pubKeyVect; } vector <string> calculateAllBlsPublicKeys(const vector <string> &public_shares) { size_t n = public_shares.size(); size_t t = public_shares[0].length() / 256; uint64_t share_length = 256; uint8_t coord_length = 64; vector <libff::alt_bn128_G2> public_keys(n, libff::alt_bn128_G2::zero()); vector <libff::alt_bn128_G2> public_values(t, libff::alt_bn128_G2::zero()); for (size_t i = 0; i < n; ++i) { for (size_t j = 0; j < t; ++j) { libff::alt_bn128_G2 public_share; uint64_t pos0 = share_length * j; string x_c0_str = convertHexToDec(public_shares[i].substr(pos0, coord_length)); string x_c1_str = convertHexToDec(public_shares[i].substr(pos0 + coord_length, coord_length)); string y_c0_str = convertHexToDec(public_shares[i].substr(pos0 + 2 * coord_length, coord_length)); string y_c1_str = convertHexToDec(public_shares[i].substr(pos0 + 3 * coord_length, coord_length)); if (x_c0_str == "" || x_c1_str == "" || y_c0_str == "" || y_c1_str == "") { return {}; } public_share.X.c0 = libff::alt_bn128_Fq(x_c0_str.c_str()); public_share.X.c1 = libff::alt_bn128_Fq(x_c1_str.c_str()); public_share.Y.c0 = libff::alt_bn128_Fq(y_c0_str.c_str()); public_share.Y.c1 = libff::alt_bn128_Fq(y_c1_str.c_str()); public_share.Z = libff::alt_bn128_Fq2::one(); public_values[j] = public_values[j] + public_share; } } for (size_t i = 0; i < n; ++i) { for (size_t j = 0; j < t; ++j) { public_keys[i] = public_keys[i] + libff::power(libff::alt_bn128_Fr(i + 1), j) * public_values[j]; } public_keys[i].to_affine_coordinates(); } vector <string> result(n); for (size_t i = 0; i < n; ++i) { result[i] = convertG2ToString(public_keys[i]); } return result; } string decryptDHKey(const string &polyName, int ind) { vector<char> errMsg1(BUF_LEN, 0); int errStatus = 0; string DH_key_name = polyName + "_" + to_string(ind) + ":"; shared_ptr <string> hexEncrKeyPtr = SGXWalletServer::readFromDb(DH_key_name, "DKG_DH_KEY_"); spdlog::debug("encr DH key is {}", *hexEncrKeyPtr); vector<char> hexEncrKey(2 * BUF_LEN, 0); uint64_t dhEncLen = 0; SAFE_UINT8_BUF(encryptedDHKey, BUF_LEN); if (!hex2carray(hexEncrKeyPtr->c_str(), &dhEncLen, encryptedDHKey, BUF_LEN)) { throw SGXException(INVALID_HEX, "Invalid hexEncrKey"); } spdlog::debug("encr DH key length is {}", dhEncLen); spdlog::debug("hex encr DH key length is {}", hexEncrKeyPtr->length()); SAFE_CHAR_BUF(DHKey, ECDSA_SKEY_LEN); sgx_status_t status = trustedDecryptKeyAES(eid, &errStatus, errMsg1.data(), encryptedDHKey, dhEncLen, DHKey); HANDLE_TRUSTED_FUNCTION_ERROR(status, errStatus, errMsg1.data()); return DHKey; } vector <string> mult_G2(const string &x) { vector <string> result(4); libff::alt_bn128_Fr el(x.c_str()); libff::alt_bn128_G2 elG2 = el * libff::alt_bn128_G2::one(); elG2.to_affine_coordinates(); result[0] = ConvertToString(elG2.X.c0); result[1] = ConvertToString(elG2.X.c1); result[2] = ConvertToString(elG2.Y.c0); result[3] = ConvertToString(elG2.Y.c1); return result; }
getSecretShares(const string &_polyName, const char *_encryptedPolyHex, const vector <string> &_publicKeys, int _t, int _n) { CHECK_STATE(_encryptedPolyHex); vector<char> hexEncrKey(BUF_LEN, 0); vector<char> errMsg1(BUF_LEN, 0); vector <uint8_t> encrDKGPoly(BUF_LEN, 0); int errStatus = 0; uint64_t encLen = 0; if (!hex2carray(_encryptedPolyHex, &encLen, encrDKGPoly.data(), BUF_LEN)) { throw SGXException(INVALID_HEX, "Invalid encryptedPolyHex"); } sgx_status_t status = trustedSetEncryptedDkgPolyAES(eid, &errStatus, errMsg1.data(), encrDKGPoly.data(), encLen); HANDLE_TRUSTED_FUNCTION_ERROR(status, errStatus, errMsg1.data()); string result; for (int i = 0; i < _n; i++) { vector <uint8_t> encryptedSkey(BUF_LEN, 0); uint32_t decLen; vector<char> currentShare(193, 0); vector<char> sShareG2(320, 0); string pub_keyB = _publicKeys.at(i); vector<char> pubKeyB(129, 0); strncpy(pubKeyB.data(), pub_keyB.c_str(), 128); pubKeyB.at(128) = 0; spdlog::debug("pubKeyB is {}", pub_keyB); sgx_status_t status = trustedGetEncryptedSecretShareAES(eid, &errStatus, errMsg1.data(), encryptedSkey.data(), &decLen, currentShare.data(), sShareG2.data(), pubKeyB.data(), _t, _n, i + 1); HANDLE_TRUSTED_FUNCTION_ERROR(status, errStatus, errMsg1.data()); spdlog::debug("cur_share is {}", currentShare.data()); result += string(currentShare.data()); spdlog::debug("dec len is {}", decLen); carray2Hex(encryptedSkey.data(), decLen, hexEncrKey.data(), BUF_LEN); string dhKeyName = "DKG_DH_KEY_" + _polyName + "_" + to_string(i) + ":"; spdlog::debug("hexEncr DH Key: { }", hexEncrKey.data()); spdlog::debug("name to write to db is {}", dhKeyName); SGXWalletServer::writeDataToDB(dhKeyName, hexEncrKey.data()); string shareG2_name = "shareG2_" + _polyName + "_" + to_string(i) + ":"; spdlog::debug("name to write to db is {}", shareG2_name); spdlog::debug("s_shareG2: {}", sShareG2.data()); SGXWalletServer::writeDataToDB(shareG2_name, sShareG2.data()); } return result; }
getSecretShares(const string &_polyName, const char *_encryptedPolyHex, const vector <string> &_publicKeys, int _t, int _n) { CHECK_STATE(_encryptedPolyHex); vector<char> hexEncrKey(BUF_LEN, 0); vector<char> errMsg1(BUF_LEN, 0); vector <uint8_t> encrDKGPoly(BUF_LEN, 0); int errStatus = 0; uint64_t encLen = 0; if (!hex2carray(_encryptedPolyHex, &encLen, encrDKGPoly.data(), BUF_LEN)) { throw SGXException(INVALID_HEX, "Invalid encryptedPolyHex"); } sgx_status_t status = trustedSetEncryptedDkgPolyAES(eid, &errStatus, errMsg1.data(), encrDKGPoly.data(), encLen); HANDLE_TRUSTED_FUNCTION_ERROR(status, errStatus, errMsg1.data()); string result; for (int i = 0; i < _n; i++) { vector <uint8_t> encryptedSkey(BUF_LEN, 0); uint64_t decLen; vector<char> currentShare(193, 0); vector<char> sShareG2(320, 0); string pub_keyB = _publicKeys.at(i); vector<char> pubKeyB(129, 0); strncpy(pubKeyB.data(), pub_keyB.c_str(), 128); pubKeyB.at(128) = 0; spdlog::debug("pubKeyB is {}", pub_keyB); sgx_status_t status = trustedGetEncryptedSecretShareAES(eid, &errStatus, errMsg1.data(), encryptedSkey.data(), &decLen, currentShare.data(), sShareG2.data(), pubKeyB.data(), _t, _n, i + 1); HANDLE_TRUSTED_FUNCTION_ERROR(status, errStatus, errMsg1.data()); spdlog::debug("cur_share is {}", currentShare.data()); result += string(currentShare.data()); spdlog::debug("dec len is {}", decLen); carray2Hex(encryptedSkey.data(), decLen, hexEncrKey.data(), BUF_LEN); string dhKeyName = "DKG_DH_KEY_" + _polyName + "_" + to_string(i) + ":"; spdlog::debug("hexEncr DH Key: { }", hexEncrKey.data()); spdlog::debug("name to write to db is {}", dhKeyName); SGXWalletServer::writeDataToDB(dhKeyName, hexEncrKey.data()); string shareG2_name = "shareG2_" + _polyName + "_" + to_string(i) + ":"; spdlog::debug("name to write to db is {}", shareG2_name); spdlog::debug("s_shareG2: {}", sShareG2.data()); SGXWalletServer::writeDataToDB(shareG2_name, sShareG2.data()); } return result; }
{'added': [(138, ' uint64_t enc_len = 0;'), (142, ' sgx_status_t status = trustedGenDkgSecretAES('), (143, ' eid, &errStatus,errMsg.data(), encrypted_dkg_secret.data(), &enc_len, _t);'), (217, ' uint64_t decLen;'), (303, ' uint64_t enc_bls_len = 0;')], 'deleted': [(138, ' uint32_t enc_len = 0;'), (143, ''), (144, ' sgx_status_t status = trustedGenDkgSecretAES(eid, &errStatus, errMsg.data(), encrypted_dkg_secret.data(), &enc_len, _t);'), (217, ' uint32_t decLen;'), (303, ' uint32_t enc_bls_len = 0;')]}
5
5
289
2,613
43
476
3
https://github.com/skalenetwork/sgxwallet
CVE-2021-36218
CWE-787
1,689
GPMF_demo.c
C
main
/*! @file GPMF_demo.c * * @brief Demo to extract GPMF from an MP4 * * @version 1.0.1 * * (C) Copyright 2017 GoPro Inc (http://gopro.com/). * * Licensed under either: * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0 * - MIT license, http://opensource.org/licenses/MIT * at your option. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <stdint.h> #include "../GPMF_parser.h" #include "GPMF_mp4reader.h" extern void PrintGPMF(GPMF_stream *ms); int main(int argc, char *argv[]) { int32_t ret = GPMF_OK; GPMF_stream metadata_stream, *ms = &metadata_stream; double metadatalength; uint32_t *payload = NULL; //buffer to store GPMF samples from the MP4. // get file return data if (argc != 2) { printf("usage: %s <file_with_GPMF>\n", argv[0]); return -1; } size_t mp4 = OpenMP4Source(argv[1], MOV_GPMF_TRAK_TYPE, MOV_GPMF_TRAK_SUBTYPE); // size_t mp4 = OpenMP4SourceUDTA(argv[1]); //Search for GPMF payload with MP4's udta metadatalength = GetDuration(mp4); if (metadatalength > 0.0) { uint32_t index, payloads = GetNumberPayloads(mp4); // printf("found %.2fs of metadata, from %d payloads, within %s\n", metadatalength, payloads, argv[1]); #if 1 if (payloads == 1) // Printf the contents of the single payload { uint32_t payloadsize = GetPayloadSize(mp4,0); payload = GetPayload(mp4, payload, 0); if(payload == NULL) goto cleanup; ret = GPMF_Init(ms, payload, payloadsize); if (ret != GPMF_OK) goto cleanup; // Output (printf) all the contained GPMF data within this payload ret = GPMF_Validate(ms, GPMF_RECURSE_LEVELS); // optional if (GPMF_OK != ret) { printf("Invalid Structure\n"); goto cleanup; } GPMF_ResetState(ms); do { PrintGPMF(ms); // printf current GPMF KLV } while (GPMF_OK == GPMF_Next(ms, GPMF_RECURSE_LEVELS)); GPMF_ResetState(ms); printf("\n"); } #endif for (index = 0; index < payloads; index++) { uint32_t payloadsize = GetPayloadSize(mp4, index); float in = 0.0, out = 0.0; //times payload = GetPayload(mp4, payload, index); if (payload == NULL) goto cleanup; ret = GetPayloadTime(mp4, index, &in, &out); if (ret != GPMF_OK) goto cleanup; ret = GPMF_Init(ms, payload, payloadsize); if (ret != GPMF_OK) goto cleanup; #if 1 // Find all the available Streams and the data carrying FourCC if (index == 0) // show first payload { ret = GPMF_FindNext(ms, GPMF_KEY_STREAM, GPMF_RECURSE_LEVELS); while (GPMF_OK == ret) { ret = GPMF_SeekToSamples(ms); if (GPMF_OK == ret) //find the last FOURCC within the stream { uint32_t key = GPMF_Key(ms); GPMF_SampleType type = GPMF_Type(ms); uint32_t elements = GPMF_ElementsInStruct(ms); //uint32_t samples = GPMF_Repeat(ms); uint32_t samples = GPMF_PayloadSampleCount(ms); if (samples) { printf(" STRM of %c%c%c%c ", PRINTF_4CC(key)); if (type == GPMF_TYPE_COMPLEX) { GPMF_stream find_stream; GPMF_CopyState(ms, &find_stream); if (GPMF_OK == GPMF_FindPrev(&find_stream, GPMF_KEY_TYPE, GPMF_CURRENT_LEVEL)) { char tmp[64]; char *data = (char *)GPMF_RawData(&find_stream); int size = GPMF_RawDataSize(&find_stream); if (size < sizeof(tmp)) { memcpy(tmp, data, size); tmp[size] = 0; printf("of type %s ", tmp); } } } else { printf("of type %c ", type); } printf("with %d sample%s ", samples, samples > 1 ? "s" : ""); if (elements > 1) printf("-- %d elements per sample", elements); printf("\n"); } ret = GPMF_FindNext(ms, GPMF_KEY_STREAM, GPMF_RECURSE_LEVELS); } else { if (ret == GPMF_ERROR_BAD_STRUCTURE) // some payload element was corrupt, skip to the next valid GPMF KLV at the previous level. { ret = GPMF_Next(ms, GPMF_CURRENT_LEVEL); // this will be the next stream if any more are present. } } } GPMF_ResetState(ms); printf("\n"); } #endif #if 1 // Find GPS values and return scaled doubles. if (index == 0) // show first payload { if (GPMF_OK == GPMF_FindNext(ms, STR2FOURCC("GPS5"), GPMF_RECURSE_LEVELS) || //GoPro Hero5/6/7 GPS GPMF_OK == GPMF_FindNext(ms, STR2FOURCC("GPRI"), GPMF_RECURSE_LEVELS)) //GoPro Karma GPS { uint32_t key = GPMF_Key(ms); uint32_t samples = GPMF_Repeat(ms); uint32_t elements = GPMF_ElementsInStruct(ms); uint32_t buffersize = samples * elements * sizeof(double); GPMF_stream find_stream; double *ptr, *tmpbuffer = malloc(buffersize); char units[10][6] = { "" }; uint32_t unit_samples = 1; printf("MP4 Payload time %.3f to %.3f seconds\n", in, out); if (tmpbuffer && samples) { uint32_t i, j; //Search for any units to display GPMF_CopyState(ms, &find_stream); if (GPMF_OK == GPMF_FindPrev(&find_stream, GPMF_KEY_SI_UNITS, GPMF_CURRENT_LEVEL) || GPMF_OK == GPMF_FindPrev(&find_stream, GPMF_KEY_UNITS, GPMF_CURRENT_LEVEL)) { char *data = (char *)GPMF_RawData(&find_stream); int ssize = GPMF_StructSize(&find_stream); unit_samples = GPMF_Repeat(&find_stream); for (i = 0; i < unit_samples; i++) { memcpy(units[i], data, ssize); units[i][ssize] = 0; data += ssize; } } //GPMF_FormattedData(ms, tmpbuffer, buffersize, 0, samples); // Output data in LittleEnd, but no scale GPMF_ScaledData(ms, tmpbuffer, buffersize, 0, samples, GPMF_TYPE_DOUBLE); //Output scaled data as floats ptr = tmpbuffer; for (i = 0; i < samples; i++) { printf("%c%c%c%c ", PRINTF_4CC(key)); for (j = 0; j < elements; j++) printf("%.3f%s, ", *ptr++, units[j%unit_samples]); printf("\n"); } free(tmpbuffer); } } GPMF_ResetState(ms); printf("\n"); } #endif } #if 1 // Find all the available Streams and compute they sample rates while (GPMF_OK == GPMF_FindNext(ms, GPMF_KEY_STREAM, GPMF_RECURSE_LEVELS)) { if (GPMF_OK == GPMF_SeekToSamples(ms)) //find the last FOURCC within the stream { uint32_t fourcc = GPMF_Key(ms); double rate = GetGPMFSampleRate(mp4, fourcc, GPMF_SAMPLE_RATE_PRECISE);// GPMF_SAMPLE_RATE_FAST); printf("%c%c%c%c sampling rate = %f Hz\n", PRINTF_4CC(fourcc), rate); } } #endif cleanup: if (payload) FreePayload(payload); payload = NULL; CloseSource(mp4); } return ret; }
/*! @file GPMF_demo.c * * @brief Demo to extract GPMF from an MP4 * * @version 1.0.1 * * (C) Copyright 2017 GoPro Inc (http://gopro.com/). * * Licensed under either: * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0 * - MIT license, http://opensource.org/licenses/MIT * at your option. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <stdint.h> #include "../GPMF_parser.h" #include "GPMF_mp4reader.h" extern void PrintGPMF(GPMF_stream *ms); int main(int argc, char *argv[]) { int32_t ret = GPMF_OK; GPMF_stream metadata_stream, *ms = &metadata_stream; double metadatalength; uint32_t *payload = NULL; //buffer to store GPMF samples from the MP4. // get file return data if (argc != 2) { printf("usage: %s <file_with_GPMF>\n", argv[0]); return -1; } size_t mp4 = OpenMP4Source(argv[1], MOV_GPMF_TRAK_TYPE, MOV_GPMF_TRAK_SUBTYPE); if (mp4 == 0) { printf("error: %s is an invalid MP4/MOV\n", argv[1]); return -1; } // size_t mp4 = OpenMP4SourceUDTA(argv[1]); //Search for GPMF payload with MP4's udta metadatalength = GetDuration(mp4); if (metadatalength > 0.0) { uint32_t index, payloads = GetNumberPayloads(mp4); // printf("found %.2fs of metadata, from %d payloads, within %s\n", metadatalength, payloads, argv[1]); #if 1 if (payloads == 1) // Printf the contents of the single payload { uint32_t payloadsize = GetPayloadSize(mp4,0); payload = GetPayload(mp4, payload, 0); if(payload == NULL) goto cleanup; ret = GPMF_Init(ms, payload, payloadsize); if (ret != GPMF_OK) goto cleanup; // Output (printf) all the contained GPMF data within this payload ret = GPMF_Validate(ms, GPMF_RECURSE_LEVELS); // optional if (GPMF_OK != ret) { printf("Invalid Structure\n"); goto cleanup; } GPMF_ResetState(ms); do { PrintGPMF(ms); // printf current GPMF KLV } while (GPMF_OK == GPMF_Next(ms, GPMF_RECURSE_LEVELS)); GPMF_ResetState(ms); printf("\n"); } #endif for (index = 0; index < payloads; index++) { uint32_t payloadsize = GetPayloadSize(mp4, index); double in = 0.0, out = 0.0; //times payload = GetPayload(mp4, payload, index); if (payload == NULL) goto cleanup; ret = GetPayloadTime(mp4, index, &in, &out); if (ret != GPMF_OK) goto cleanup; ret = GPMF_Init(ms, payload, payloadsize); if (ret != GPMF_OK) goto cleanup; #if 1 // Find all the available Streams and the data carrying FourCC if (index == 0) // show first payload { ret = GPMF_FindNext(ms, GPMF_KEY_STREAM, GPMF_RECURSE_LEVELS); while (GPMF_OK == ret) { ret = GPMF_SeekToSamples(ms); if (GPMF_OK == ret) //find the last FOURCC within the stream { uint32_t key = GPMF_Key(ms); GPMF_SampleType type = GPMF_Type(ms); uint32_t elements = GPMF_ElementsInStruct(ms); //uint32_t samples = GPMF_Repeat(ms); uint32_t samples = GPMF_PayloadSampleCount(ms); if (samples) { printf(" STRM of %c%c%c%c ", PRINTF_4CC(key)); if (type == GPMF_TYPE_COMPLEX) { GPMF_stream find_stream; GPMF_CopyState(ms, &find_stream); if (GPMF_OK == GPMF_FindPrev(&find_stream, GPMF_KEY_TYPE, GPMF_CURRENT_LEVEL)) { char tmp[64]; char *data = (char *)GPMF_RawData(&find_stream); int size = GPMF_RawDataSize(&find_stream); if (size < sizeof(tmp)) { memcpy(tmp, data, size); tmp[size] = 0; printf("of type %s ", tmp); } } } else { printf("of type %c ", type); } printf("with %d sample%s ", samples, samples > 1 ? "s" : ""); if (elements > 1) printf("-- %d elements per sample", elements); printf("\n"); } ret = GPMF_FindNext(ms, GPMF_KEY_STREAM, GPMF_RECURSE_LEVELS); } else { if (ret == GPMF_ERROR_BAD_STRUCTURE) // some payload element was corrupt, skip to the next valid GPMF KLV at the previous level. { ret = GPMF_Next(ms, GPMF_CURRENT_LEVEL); // this will be the next stream if any more are present. } } } GPMF_ResetState(ms); printf("\n"); } #endif #if 1 // Find GPS values and return scaled doubles. if (index == 0) // show first payload { if (GPMF_OK == GPMF_FindNext(ms, STR2FOURCC("GPS5"), GPMF_RECURSE_LEVELS) || //GoPro Hero5/6/7 GPS GPMF_OK == GPMF_FindNext(ms, STR2FOURCC("GPRI"), GPMF_RECURSE_LEVELS)) //GoPro Karma GPS { uint32_t key = GPMF_Key(ms); uint32_t samples = GPMF_Repeat(ms); uint32_t elements = GPMF_ElementsInStruct(ms); uint32_t buffersize = samples * elements * sizeof(double); GPMF_stream find_stream; double *ptr, *tmpbuffer = malloc(buffersize); char units[10][6] = { "" }; uint32_t unit_samples = 1; printf("MP4 Payload time %.3f to %.3f seconds\n", in, out); if (tmpbuffer && samples) { uint32_t i, j; //Search for any units to display GPMF_CopyState(ms, &find_stream); if (GPMF_OK == GPMF_FindPrev(&find_stream, GPMF_KEY_SI_UNITS, GPMF_CURRENT_LEVEL) || GPMF_OK == GPMF_FindPrev(&find_stream, GPMF_KEY_UNITS, GPMF_CURRENT_LEVEL)) { char *data = (char *)GPMF_RawData(&find_stream); int ssize = GPMF_StructSize(&find_stream); unit_samples = GPMF_Repeat(&find_stream); for (i = 0; i < unit_samples; i++) { memcpy(units[i], data, ssize); units[i][ssize] = 0; data += ssize; } } //GPMF_FormattedData(ms, tmpbuffer, buffersize, 0, samples); // Output data in LittleEnd, but no scale GPMF_ScaledData(ms, tmpbuffer, buffersize, 0, samples, GPMF_TYPE_DOUBLE); //Output scaled data as floats ptr = tmpbuffer; for (i = 0; i < samples; i++) { printf("%c%c%c%c ", PRINTF_4CC(key)); for (j = 0; j < elements; j++) printf("%.3f%s, ", *ptr++, units[j%unit_samples]); printf("\n"); } free(tmpbuffer); } } GPMF_ResetState(ms); printf("\n"); } #endif } #if 1 // Find all the available Streams and compute they sample rates while (GPMF_OK == GPMF_FindNext(ms, GPMF_KEY_STREAM, GPMF_RECURSE_LEVELS)) { if (GPMF_OK == GPMF_SeekToSamples(ms)) //find the last FOURCC within the stream { double in = 0.0, out = 0.0; uint32_t fourcc = GPMF_Key(ms); double rate = GetGPMFSampleRate(mp4, fourcc, GPMF_SAMPLE_RATE_PRECISE, &in, &out);// GPMF_SAMPLE_RATE_FAST); printf("%c%c%c%c sampling rate = %f Hz (from %f to %f)\n", PRINTF_4CC(fourcc), rate, in, out); } } #endif cleanup: if (payload) FreePayload(payload); payload = NULL; CloseSource(mp4); } return ret; }
int main(int argc, char *argv[]) { int32_t ret = GPMF_OK; GPMF_stream metadata_stream, *ms = &metadata_stream; double metadatalength; uint32_t *payload = NULL; //buffer to store GPMF samples from the MP4. // get file return data if (argc != 2) { printf("usage: %s <file_with_GPMF>\n", argv[0]); return -1; } size_t mp4 = OpenMP4Source(argv[1], MOV_GPMF_TRAK_TYPE, MOV_GPMF_TRAK_SUBTYPE); // size_t mp4 = OpenMP4SourceUDTA(argv[1]); //Search for GPMF payload with MP4's udta metadatalength = GetDuration(mp4); if (metadatalength > 0.0) { uint32_t index, payloads = GetNumberPayloads(mp4); // printf("found %.2fs of metadata, from %d payloads, within %s\n", metadatalength, payloads, argv[1]); #if 1 if (payloads == 1) // Printf the contents of the single payload { uint32_t payloadsize = GetPayloadSize(mp4,0); payload = GetPayload(mp4, payload, 0); if(payload == NULL) goto cleanup; ret = GPMF_Init(ms, payload, payloadsize); if (ret != GPMF_OK) goto cleanup; // Output (printf) all the contained GPMF data within this payload ret = GPMF_Validate(ms, GPMF_RECURSE_LEVELS); // optional if (GPMF_OK != ret) { printf("Invalid Structure\n"); goto cleanup; } GPMF_ResetState(ms); do { PrintGPMF(ms); // printf current GPMF KLV } while (GPMF_OK == GPMF_Next(ms, GPMF_RECURSE_LEVELS)); GPMF_ResetState(ms); printf("\n"); } #endif for (index = 0; index < payloads; index++) { uint32_t payloadsize = GetPayloadSize(mp4, index); float in = 0.0, out = 0.0; //times payload = GetPayload(mp4, payload, index); if (payload == NULL) goto cleanup; ret = GetPayloadTime(mp4, index, &in, &out); if (ret != GPMF_OK) goto cleanup; ret = GPMF_Init(ms, payload, payloadsize); if (ret != GPMF_OK) goto cleanup; #if 1 // Find all the available Streams and the data carrying FourCC if (index == 0) // show first payload { ret = GPMF_FindNext(ms, GPMF_KEY_STREAM, GPMF_RECURSE_LEVELS); while (GPMF_OK == ret) { ret = GPMF_SeekToSamples(ms); if (GPMF_OK == ret) //find the last FOURCC within the stream { uint32_t key = GPMF_Key(ms); GPMF_SampleType type = GPMF_Type(ms); uint32_t elements = GPMF_ElementsInStruct(ms); //uint32_t samples = GPMF_Repeat(ms); uint32_t samples = GPMF_PayloadSampleCount(ms); if (samples) { printf(" STRM of %c%c%c%c ", PRINTF_4CC(key)); if (type == GPMF_TYPE_COMPLEX) { GPMF_stream find_stream; GPMF_CopyState(ms, &find_stream); if (GPMF_OK == GPMF_FindPrev(&find_stream, GPMF_KEY_TYPE, GPMF_CURRENT_LEVEL)) { char tmp[64]; char *data = (char *)GPMF_RawData(&find_stream); int size = GPMF_RawDataSize(&find_stream); if (size < sizeof(tmp)) { memcpy(tmp, data, size); tmp[size] = 0; printf("of type %s ", tmp); } } } else { printf("of type %c ", type); } printf("with %d sample%s ", samples, samples > 1 ? "s" : ""); if (elements > 1) printf("-- %d elements per sample", elements); printf("\n"); } ret = GPMF_FindNext(ms, GPMF_KEY_STREAM, GPMF_RECURSE_LEVELS); } else { if (ret == GPMF_ERROR_BAD_STRUCTURE) // some payload element was corrupt, skip to the next valid GPMF KLV at the previous level. { ret = GPMF_Next(ms, GPMF_CURRENT_LEVEL); // this will be the next stream if any more are present. } } } GPMF_ResetState(ms); printf("\n"); } #endif #if 1 // Find GPS values and return scaled doubles. if (index == 0) // show first payload { if (GPMF_OK == GPMF_FindNext(ms, STR2FOURCC("GPS5"), GPMF_RECURSE_LEVELS) || //GoPro Hero5/6/7 GPS GPMF_OK == GPMF_FindNext(ms, STR2FOURCC("GPRI"), GPMF_RECURSE_LEVELS)) //GoPro Karma GPS { uint32_t key = GPMF_Key(ms); uint32_t samples = GPMF_Repeat(ms); uint32_t elements = GPMF_ElementsInStruct(ms); uint32_t buffersize = samples * elements * sizeof(double); GPMF_stream find_stream; double *ptr, *tmpbuffer = malloc(buffersize); char units[10][6] = { "" }; uint32_t unit_samples = 1; printf("MP4 Payload time %.3f to %.3f seconds\n", in, out); if (tmpbuffer && samples) { uint32_t i, j; //Search for any units to display GPMF_CopyState(ms, &find_stream); if (GPMF_OK == GPMF_FindPrev(&find_stream, GPMF_KEY_SI_UNITS, GPMF_CURRENT_LEVEL) || GPMF_OK == GPMF_FindPrev(&find_stream, GPMF_KEY_UNITS, GPMF_CURRENT_LEVEL)) { char *data = (char *)GPMF_RawData(&find_stream); int ssize = GPMF_StructSize(&find_stream); unit_samples = GPMF_Repeat(&find_stream); for (i = 0; i < unit_samples; i++) { memcpy(units[i], data, ssize); units[i][ssize] = 0; data += ssize; } } //GPMF_FormattedData(ms, tmpbuffer, buffersize, 0, samples); // Output data in LittleEnd, but no scale GPMF_ScaledData(ms, tmpbuffer, buffersize, 0, samples, GPMF_TYPE_DOUBLE); //Output scaled data as floats ptr = tmpbuffer; for (i = 0; i < samples; i++) { printf("%c%c%c%c ", PRINTF_4CC(key)); for (j = 0; j < elements; j++) printf("%.3f%s, ", *ptr++, units[j%unit_samples]); printf("\n"); } free(tmpbuffer); } } GPMF_ResetState(ms); printf("\n"); } #endif } #if 1 // Find all the available Streams and compute they sample rates while (GPMF_OK == GPMF_FindNext(ms, GPMF_KEY_STREAM, GPMF_RECURSE_LEVELS)) { if (GPMF_OK == GPMF_SeekToSamples(ms)) //find the last FOURCC within the stream { uint32_t fourcc = GPMF_Key(ms); double rate = GetGPMFSampleRate(mp4, fourcc, GPMF_SAMPLE_RATE_PRECISE);// GPMF_SAMPLE_RATE_FAST); printf("%c%c%c%c sampling rate = %f Hz\n", PRINTF_4CC(fourcc), rate); } } #endif cleanup: if (payload) FreePayload(payload); payload = NULL; CloseSource(mp4); } return ret; }
int main(int argc, char *argv[]) { int32_t ret = GPMF_OK; GPMF_stream metadata_stream, *ms = &metadata_stream; double metadatalength; uint32_t *payload = NULL; //buffer to store GPMF samples from the MP4. // get file return data if (argc != 2) { printf("usage: %s <file_with_GPMF>\n", argv[0]); return -1; } size_t mp4 = OpenMP4Source(argv[1], MOV_GPMF_TRAK_TYPE, MOV_GPMF_TRAK_SUBTYPE); if (mp4 == 0) { printf("error: %s is an invalid MP4/MOV\n", argv[1]); return -1; } // size_t mp4 = OpenMP4SourceUDTA(argv[1]); //Search for GPMF payload with MP4's udta metadatalength = GetDuration(mp4); if (metadatalength > 0.0) { uint32_t index, payloads = GetNumberPayloads(mp4); // printf("found %.2fs of metadata, from %d payloads, within %s\n", metadatalength, payloads, argv[1]); #if 1 if (payloads == 1) // Printf the contents of the single payload { uint32_t payloadsize = GetPayloadSize(mp4,0); payload = GetPayload(mp4, payload, 0); if(payload == NULL) goto cleanup; ret = GPMF_Init(ms, payload, payloadsize); if (ret != GPMF_OK) goto cleanup; // Output (printf) all the contained GPMF data within this payload ret = GPMF_Validate(ms, GPMF_RECURSE_LEVELS); // optional if (GPMF_OK != ret) { printf("Invalid Structure\n"); goto cleanup; } GPMF_ResetState(ms); do { PrintGPMF(ms); // printf current GPMF KLV } while (GPMF_OK == GPMF_Next(ms, GPMF_RECURSE_LEVELS)); GPMF_ResetState(ms); printf("\n"); } #endif for (index = 0; index < payloads; index++) { uint32_t payloadsize = GetPayloadSize(mp4, index); double in = 0.0, out = 0.0; //times payload = GetPayload(mp4, payload, index); if (payload == NULL) goto cleanup; ret = GetPayloadTime(mp4, index, &in, &out); if (ret != GPMF_OK) goto cleanup; ret = GPMF_Init(ms, payload, payloadsize); if (ret != GPMF_OK) goto cleanup; #if 1 // Find all the available Streams and the data carrying FourCC if (index == 0) // show first payload { ret = GPMF_FindNext(ms, GPMF_KEY_STREAM, GPMF_RECURSE_LEVELS); while (GPMF_OK == ret) { ret = GPMF_SeekToSamples(ms); if (GPMF_OK == ret) //find the last FOURCC within the stream { uint32_t key = GPMF_Key(ms); GPMF_SampleType type = GPMF_Type(ms); uint32_t elements = GPMF_ElementsInStruct(ms); //uint32_t samples = GPMF_Repeat(ms); uint32_t samples = GPMF_PayloadSampleCount(ms); if (samples) { printf(" STRM of %c%c%c%c ", PRINTF_4CC(key)); if (type == GPMF_TYPE_COMPLEX) { GPMF_stream find_stream; GPMF_CopyState(ms, &find_stream); if (GPMF_OK == GPMF_FindPrev(&find_stream, GPMF_KEY_TYPE, GPMF_CURRENT_LEVEL)) { char tmp[64]; char *data = (char *)GPMF_RawData(&find_stream); int size = GPMF_RawDataSize(&find_stream); if (size < sizeof(tmp)) { memcpy(tmp, data, size); tmp[size] = 0; printf("of type %s ", tmp); } } } else { printf("of type %c ", type); } printf("with %d sample%s ", samples, samples > 1 ? "s" : ""); if (elements > 1) printf("-- %d elements per sample", elements); printf("\n"); } ret = GPMF_FindNext(ms, GPMF_KEY_STREAM, GPMF_RECURSE_LEVELS); } else { if (ret == GPMF_ERROR_BAD_STRUCTURE) // some payload element was corrupt, skip to the next valid GPMF KLV at the previous level. { ret = GPMF_Next(ms, GPMF_CURRENT_LEVEL); // this will be the next stream if any more are present. } } } GPMF_ResetState(ms); printf("\n"); } #endif #if 1 // Find GPS values and return scaled doubles. if (index == 0) // show first payload { if (GPMF_OK == GPMF_FindNext(ms, STR2FOURCC("GPS5"), GPMF_RECURSE_LEVELS) || //GoPro Hero5/6/7 GPS GPMF_OK == GPMF_FindNext(ms, STR2FOURCC("GPRI"), GPMF_RECURSE_LEVELS)) //GoPro Karma GPS { uint32_t key = GPMF_Key(ms); uint32_t samples = GPMF_Repeat(ms); uint32_t elements = GPMF_ElementsInStruct(ms); uint32_t buffersize = samples * elements * sizeof(double); GPMF_stream find_stream; double *ptr, *tmpbuffer = malloc(buffersize); char units[10][6] = { "" }; uint32_t unit_samples = 1; printf("MP4 Payload time %.3f to %.3f seconds\n", in, out); if (tmpbuffer && samples) { uint32_t i, j; //Search for any units to display GPMF_CopyState(ms, &find_stream); if (GPMF_OK == GPMF_FindPrev(&find_stream, GPMF_KEY_SI_UNITS, GPMF_CURRENT_LEVEL) || GPMF_OK == GPMF_FindPrev(&find_stream, GPMF_KEY_UNITS, GPMF_CURRENT_LEVEL)) { char *data = (char *)GPMF_RawData(&find_stream); int ssize = GPMF_StructSize(&find_stream); unit_samples = GPMF_Repeat(&find_stream); for (i = 0; i < unit_samples; i++) { memcpy(units[i], data, ssize); units[i][ssize] = 0; data += ssize; } } //GPMF_FormattedData(ms, tmpbuffer, buffersize, 0, samples); // Output data in LittleEnd, but no scale GPMF_ScaledData(ms, tmpbuffer, buffersize, 0, samples, GPMF_TYPE_DOUBLE); //Output scaled data as floats ptr = tmpbuffer; for (i = 0; i < samples; i++) { printf("%c%c%c%c ", PRINTF_4CC(key)); for (j = 0; j < elements; j++) printf("%.3f%s, ", *ptr++, units[j%unit_samples]); printf("\n"); } free(tmpbuffer); } } GPMF_ResetState(ms); printf("\n"); } #endif } #if 1 // Find all the available Streams and compute they sample rates while (GPMF_OK == GPMF_FindNext(ms, GPMF_KEY_STREAM, GPMF_RECURSE_LEVELS)) { if (GPMF_OK == GPMF_SeekToSamples(ms)) //find the last FOURCC within the stream { double in = 0.0, out = 0.0; uint32_t fourcc = GPMF_Key(ms); double rate = GetGPMFSampleRate(mp4, fourcc, GPMF_SAMPLE_RATE_PRECISE, &in, &out);// GPMF_SAMPLE_RATE_FAST); printf("%c%c%c%c sampling rate = %f Hz (from %f to %f)\n", PRINTF_4CC(fourcc), rate, in, out); } } #endif cleanup: if (payload) FreePayload(payload); payload = NULL; CloseSource(mp4); } return ret; }
{'added': [(49, '\tif (mp4 == 0)'), (50, '\t{'), (51, '\t\tprintf("error: %s is an invalid MP4/MOV\\n", argv[1]);'), (52, '\t\treturn -1;'), (53, '\t}'), (54, ''), (99, '\t\t\tdouble in = 0.0, out = 0.0; //times'), (247, '\t\t\t\tdouble in = 0.0, out = 0.0;'), (249, '\t\t\t\tdouble rate = GetGPMFSampleRate(mp4, fourcc, GPMF_SAMPLE_RATE_PRECISE, &in, &out);// GPMF_SAMPLE_RATE_FAST);'), (250, '\t\t\t\tprintf("%c%c%c%c sampling rate = %f Hz (from %f to %f)\\n", PRINTF_4CC(fourcc), rate, in, out);')], 'deleted': [(93, '\t\t\tfloat in = 0.0, out = 0.0; //times'), (242, '\t\t\t\tdouble rate = GetGPMFSampleRate(mp4, fourcc, GPMF_SAMPLE_RATE_PRECISE);// GPMF_SAMPLE_RATE_FAST);'), (243, '\t\t\t\tprintf("%c%c%c%c sampling rate = %f Hz\\n", PRINTF_4CC(fourcc), rate);')]}
10
3
181
1,072
168
1,005
39
https://github.com/gopro/gpmf-parser
CVE-2019-15146
CWE-125
466
Python-ast.c
C
Assign
/* File automatically generated by Parser/asdl_c.py. */ #include <stddef.h> #include "Python.h" #include "Python-ast.h" static PyTypeObject AST_type; static PyTypeObject *mod_type; static PyObject* ast2obj_mod(void*); static PyTypeObject *Module_type; _Py_IDENTIFIER(body); static char *Module_fields[]={ "body", }; static PyTypeObject *Interactive_type; static char *Interactive_fields[]={ "body", }; static PyTypeObject *Expression_type; static char *Expression_fields[]={ "body", }; static PyTypeObject *Suite_type; static char *Suite_fields[]={ "body", }; static PyTypeObject *stmt_type; _Py_IDENTIFIER(lineno); _Py_IDENTIFIER(col_offset); _Py_IDENTIFIER(end_lineno); _Py_IDENTIFIER(end_col_offset); static char *stmt_attributes[] = { "lineno", "col_offset", "end_lineno", "end_col_offset", }; static PyObject* ast2obj_stmt(void*); static PyTypeObject *FunctionDef_type; _Py_IDENTIFIER(name); _Py_IDENTIFIER(args); _Py_IDENTIFIER(decorator_list); _Py_IDENTIFIER(returns); static char *FunctionDef_fields[]={ "name", "args", "body", "decorator_list", "returns", }; static PyTypeObject *AsyncFunctionDef_type; static char *AsyncFunctionDef_fields[]={ "name", "args", "body", "decorator_list", "returns", }; static PyTypeObject *ClassDef_type; _Py_IDENTIFIER(bases); _Py_IDENTIFIER(keywords); static char *ClassDef_fields[]={ "name", "bases", "keywords", "body", "decorator_list", }; static PyTypeObject *Return_type; _Py_IDENTIFIER(value); static char *Return_fields[]={ "value", }; static PyTypeObject *Delete_type; _Py_IDENTIFIER(targets); static char *Delete_fields[]={ "targets", }; static PyTypeObject *Assign_type; static char *Assign_fields[]={ "targets", "value", }; static PyTypeObject *AugAssign_type; _Py_IDENTIFIER(target); _Py_IDENTIFIER(op); static char *AugAssign_fields[]={ "target", "op", "value", }; static PyTypeObject *AnnAssign_type; _Py_IDENTIFIER(annotation); _Py_IDENTIFIER(simple); static char *AnnAssign_fields[]={ "target", "annotation", "value", "simple", }; static PyTypeObject *For_type; _Py_IDENTIFIER(iter); _Py_IDENTIFIER(orelse); static char *For_fields[]={ "target", "iter", "body", "orelse", }; static PyTypeObject *AsyncFor_type; static char *AsyncFor_fields[]={ "target", "iter", "body", "orelse", }; static PyTypeObject *While_type; _Py_IDENTIFIER(test); static char *While_fields[]={ "test", "body", "orelse", }; static PyTypeObject *If_type; static char *If_fields[]={ "test", "body", "orelse", }; static PyTypeObject *With_type; _Py_IDENTIFIER(items); static char *With_fields[]={ "items", "body", }; static PyTypeObject *AsyncWith_type; static char *AsyncWith_fields[]={ "items", "body", }; static PyTypeObject *Raise_type; _Py_IDENTIFIER(exc); _Py_IDENTIFIER(cause); static char *Raise_fields[]={ "exc", "cause", }; static PyTypeObject *Try_type; _Py_IDENTIFIER(handlers); _Py_IDENTIFIER(finalbody); static char *Try_fields[]={ "body", "handlers", "orelse", "finalbody", }; static PyTypeObject *Assert_type; _Py_IDENTIFIER(msg); static char *Assert_fields[]={ "test", "msg", }; static PyTypeObject *Import_type; _Py_IDENTIFIER(names); static char *Import_fields[]={ "names", }; static PyTypeObject *ImportFrom_type; _Py_IDENTIFIER(module); _Py_IDENTIFIER(level); static char *ImportFrom_fields[]={ "module", "names", "level", }; static PyTypeObject *Global_type; static char *Global_fields[]={ "names", }; static PyTypeObject *Nonlocal_type; static char *Nonlocal_fields[]={ "names", }; static PyTypeObject *Expr_type; static char *Expr_fields[]={ "value", }; static PyTypeObject *Pass_type; static PyTypeObject *Break_type; static PyTypeObject *Continue_type; static PyTypeObject *expr_type; static char *expr_attributes[] = { "lineno", "col_offset", "end_lineno", "end_col_offset", }; static PyObject* ast2obj_expr(void*); static PyTypeObject *BoolOp_type; _Py_IDENTIFIER(values); static char *BoolOp_fields[]={ "op", "values", }; static PyTypeObject *NamedExpr_type; static char *NamedExpr_fields[]={ "target", "value", }; static PyTypeObject *BinOp_type; _Py_IDENTIFIER(left); _Py_IDENTIFIER(right); static char *BinOp_fields[]={ "left", "op", "right", }; static PyTypeObject *UnaryOp_type; _Py_IDENTIFIER(operand); static char *UnaryOp_fields[]={ "op", "operand", }; static PyTypeObject *Lambda_type; static char *Lambda_fields[]={ "args", "body", }; static PyTypeObject *IfExp_type; static char *IfExp_fields[]={ "test", "body", "orelse", }; static PyTypeObject *Dict_type; _Py_IDENTIFIER(keys); static char *Dict_fields[]={ "keys", "values", }; static PyTypeObject *Set_type; _Py_IDENTIFIER(elts); static char *Set_fields[]={ "elts", }; static PyTypeObject *ListComp_type; _Py_IDENTIFIER(elt); _Py_IDENTIFIER(generators); static char *ListComp_fields[]={ "elt", "generators", }; static PyTypeObject *SetComp_type; static char *SetComp_fields[]={ "elt", "generators", }; static PyTypeObject *DictComp_type; _Py_IDENTIFIER(key); static char *DictComp_fields[]={ "key", "value", "generators", }; static PyTypeObject *GeneratorExp_type; static char *GeneratorExp_fields[]={ "elt", "generators", }; static PyTypeObject *Await_type; static char *Await_fields[]={ "value", }; static PyTypeObject *Yield_type; static char *Yield_fields[]={ "value", }; static PyTypeObject *YieldFrom_type; static char *YieldFrom_fields[]={ "value", }; static PyTypeObject *Compare_type; _Py_IDENTIFIER(ops); _Py_IDENTIFIER(comparators); static char *Compare_fields[]={ "left", "ops", "comparators", }; static PyTypeObject *Call_type; _Py_IDENTIFIER(func); static char *Call_fields[]={ "func", "args", "keywords", }; static PyTypeObject *FormattedValue_type; _Py_IDENTIFIER(conversion); _Py_IDENTIFIER(format_spec); static char *FormattedValue_fields[]={ "value", "conversion", "format_spec", }; static PyTypeObject *JoinedStr_type; static char *JoinedStr_fields[]={ "values", }; static PyTypeObject *Constant_type; static char *Constant_fields[]={ "value", }; static PyTypeObject *Attribute_type; _Py_IDENTIFIER(attr); _Py_IDENTIFIER(ctx); static char *Attribute_fields[]={ "value", "attr", "ctx", }; static PyTypeObject *Subscript_type; _Py_IDENTIFIER(slice); static char *Subscript_fields[]={ "value", "slice", "ctx", }; static PyTypeObject *Starred_type; static char *Starred_fields[]={ "value", "ctx", }; static PyTypeObject *Name_type; _Py_IDENTIFIER(id); static char *Name_fields[]={ "id", "ctx", }; static PyTypeObject *List_type; static char *List_fields[]={ "elts", "ctx", }; static PyTypeObject *Tuple_type; static char *Tuple_fields[]={ "elts", "ctx", }; static PyTypeObject *expr_context_type; static PyObject *Load_singleton, *Store_singleton, *Del_singleton, *AugLoad_singleton, *AugStore_singleton, *Param_singleton, *NamedStore_singleton; static PyObject* ast2obj_expr_context(expr_context_ty); static PyTypeObject *Load_type; static PyTypeObject *Store_type; static PyTypeObject *Del_type; static PyTypeObject *AugLoad_type; static PyTypeObject *AugStore_type; static PyTypeObject *Param_type; static PyTypeObject *NamedStore_type; static PyTypeObject *slice_type; static PyObject* ast2obj_slice(void*); static PyTypeObject *Slice_type; _Py_IDENTIFIER(lower); _Py_IDENTIFIER(upper); _Py_IDENTIFIER(step); static char *Slice_fields[]={ "lower", "upper", "step", }; static PyTypeObject *ExtSlice_type; _Py_IDENTIFIER(dims); static char *ExtSlice_fields[]={ "dims", }; static PyTypeObject *Index_type; static char *Index_fields[]={ "value", }; static PyTypeObject *boolop_type; static PyObject *And_singleton, *Or_singleton; static PyObject* ast2obj_boolop(boolop_ty); static PyTypeObject *And_type; static PyTypeObject *Or_type; static PyTypeObject *operator_type; static PyObject *Add_singleton, *Sub_singleton, *Mult_singleton, *MatMult_singleton, *Div_singleton, *Mod_singleton, *Pow_singleton, *LShift_singleton, *RShift_singleton, *BitOr_singleton, *BitXor_singleton, *BitAnd_singleton, *FloorDiv_singleton; static PyObject* ast2obj_operator(operator_ty); static PyTypeObject *Add_type; static PyTypeObject *Sub_type; static PyTypeObject *Mult_type; static PyTypeObject *MatMult_type; static PyTypeObject *Div_type; static PyTypeObject *Mod_type; static PyTypeObject *Pow_type; static PyTypeObject *LShift_type; static PyTypeObject *RShift_type; static PyTypeObject *BitOr_type; static PyTypeObject *BitXor_type; static PyTypeObject *BitAnd_type; static PyTypeObject *FloorDiv_type; static PyTypeObject *unaryop_type; static PyObject *Invert_singleton, *Not_singleton, *UAdd_singleton, *USub_singleton; static PyObject* ast2obj_unaryop(unaryop_ty); static PyTypeObject *Invert_type; static PyTypeObject *Not_type; static PyTypeObject *UAdd_type; static PyTypeObject *USub_type; static PyTypeObject *cmpop_type; static PyObject *Eq_singleton, *NotEq_singleton, *Lt_singleton, *LtE_singleton, *Gt_singleton, *GtE_singleton, *Is_singleton, *IsNot_singleton, *In_singleton, *NotIn_singleton; static PyObject* ast2obj_cmpop(cmpop_ty); static PyTypeObject *Eq_type; static PyTypeObject *NotEq_type; static PyTypeObject *Lt_type; static PyTypeObject *LtE_type; static PyTypeObject *Gt_type; static PyTypeObject *GtE_type; static PyTypeObject *Is_type; static PyTypeObject *IsNot_type; static PyTypeObject *In_type; static PyTypeObject *NotIn_type; static PyTypeObject *comprehension_type; static PyObject* ast2obj_comprehension(void*); _Py_IDENTIFIER(ifs); _Py_IDENTIFIER(is_async); static char *comprehension_fields[]={ "target", "iter", "ifs", "is_async", }; static PyTypeObject *excepthandler_type; static char *excepthandler_attributes[] = { "lineno", "col_offset", "end_lineno", "end_col_offset", }; static PyObject* ast2obj_excepthandler(void*); static PyTypeObject *ExceptHandler_type; _Py_IDENTIFIER(type); static char *ExceptHandler_fields[]={ "type", "name", "body", }; static PyTypeObject *arguments_type; static PyObject* ast2obj_arguments(void*); _Py_IDENTIFIER(vararg); _Py_IDENTIFIER(kwonlyargs); _Py_IDENTIFIER(kw_defaults); _Py_IDENTIFIER(kwarg); _Py_IDENTIFIER(defaults); static char *arguments_fields[]={ "args", "vararg", "kwonlyargs", "kw_defaults", "kwarg", "defaults", }; static PyTypeObject *arg_type; static PyObject* ast2obj_arg(void*); static char *arg_attributes[] = { "lineno", "col_offset", "end_lineno", "end_col_offset", }; _Py_IDENTIFIER(arg); static char *arg_fields[]={ "arg", "annotation", }; static PyTypeObject *keyword_type; static PyObject* ast2obj_keyword(void*); static char *keyword_fields[]={ "arg", "value", }; static PyTypeObject *alias_type; static PyObject* ast2obj_alias(void*); _Py_IDENTIFIER(asname); static char *alias_fields[]={ "name", "asname", }; static PyTypeObject *withitem_type; static PyObject* ast2obj_withitem(void*); _Py_IDENTIFIER(context_expr); _Py_IDENTIFIER(optional_vars); static char *withitem_fields[]={ "context_expr", "optional_vars", }; _Py_IDENTIFIER(_fields); _Py_IDENTIFIER(_attributes); typedef struct { PyObject_HEAD PyObject *dict; } AST_object; static void ast_dealloc(AST_object *self) { /* bpo-31095: UnTrack is needed before calling any callbacks */ PyObject_GC_UnTrack(self); Py_CLEAR(self->dict); Py_TYPE(self)->tp_free(self); } static int ast_traverse(AST_object *self, visitproc visit, void *arg) { Py_VISIT(self->dict); return 0; } static int ast_clear(AST_object *self) { Py_CLEAR(self->dict); return 0; } static int ast_type_init(PyObject *self, PyObject *args, PyObject *kw) { Py_ssize_t i, numfields = 0; int res = -1; PyObject *key, *value, *fields; if (_PyObject_LookupAttrId((PyObject*)Py_TYPE(self), &PyId__fields, &fields) < 0) { goto cleanup; } if (fields) { numfields = PySequence_Size(fields); if (numfields == -1) goto cleanup; } res = 0; /* if no error occurs, this stays 0 to the end */ if (numfields < PyTuple_GET_SIZE(args)) { PyErr_Format(PyExc_TypeError, "%.400s constructor takes at most " "%zd positional argument%s", Py_TYPE(self)->tp_name, numfields, numfields == 1 ? "" : "s"); res = -1; goto cleanup; } for (i = 0; i < PyTuple_GET_SIZE(args); i++) { /* cannot be reached when fields is NULL */ PyObject *name = PySequence_GetItem(fields, i); if (!name) { res = -1; goto cleanup; } res = PyObject_SetAttr(self, name, PyTuple_GET_ITEM(args, i)); Py_DECREF(name); if (res < 0) goto cleanup; } if (kw) { i = 0; /* needed by PyDict_Next */ while (PyDict_Next(kw, &i, &key, &value)) { res = PyObject_SetAttr(self, key, value); if (res < 0) goto cleanup; } } cleanup: Py_XDECREF(fields); return res; } /* Pickling support */ static PyObject * ast_type_reduce(PyObject *self, PyObject *unused) { _Py_IDENTIFIER(__dict__); PyObject *dict; if (_PyObject_LookupAttrId(self, &PyId___dict__, &dict) < 0) { return NULL; } if (dict) { return Py_BuildValue("O()N", Py_TYPE(self), dict); } return Py_BuildValue("O()", Py_TYPE(self)); } static PyMethodDef ast_type_methods[] = { {"__reduce__", ast_type_reduce, METH_NOARGS, NULL}, {NULL} }; static PyGetSetDef ast_type_getsets[] = { {"__dict__", PyObject_GenericGetDict, PyObject_GenericSetDict}, {NULL} }; static PyTypeObject AST_type = { PyVarObject_HEAD_INIT(&PyType_Type, 0) "_ast.AST", sizeof(AST_object), 0, (destructor)ast_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_reserved */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ PyObject_GenericGetAttr, /* tp_getattro */ PyObject_GenericSetAttr, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC, /* tp_flags */ 0, /* tp_doc */ (traverseproc)ast_traverse, /* tp_traverse */ (inquiry)ast_clear, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ ast_type_methods, /* tp_methods */ 0, /* tp_members */ ast_type_getsets, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ offsetof(AST_object, dict),/* tp_dictoffset */ (initproc)ast_type_init, /* tp_init */ PyType_GenericAlloc, /* tp_alloc */ PyType_GenericNew, /* tp_new */ PyObject_GC_Del, /* tp_free */ }; static PyTypeObject* make_type(char *type, PyTypeObject* base, char**fields, int num_fields) { _Py_IDENTIFIER(__module__); _Py_IDENTIFIER(_ast); PyObject *fnames, *result; int i; fnames = PyTuple_New(num_fields); if (!fnames) return NULL; for (i = 0; i < num_fields; i++) { PyObject *field = PyUnicode_FromString(fields[i]); if (!field) { Py_DECREF(fnames); return NULL; } PyTuple_SET_ITEM(fnames, i, field); } result = PyObject_CallFunction((PyObject*)&PyType_Type, "s(O){OOOO}", type, base, _PyUnicode_FromId(&PyId__fields), fnames, _PyUnicode_FromId(&PyId___module__), _PyUnicode_FromId(&PyId__ast)); Py_DECREF(fnames); return (PyTypeObject*)result; } static int add_attributes(PyTypeObject* type, char**attrs, int num_fields) { int i, result; PyObject *s, *l = PyTuple_New(num_fields); if (!l) return 0; for (i = 0; i < num_fields; i++) { s = PyUnicode_FromString(attrs[i]); if (!s) { Py_DECREF(l); return 0; } PyTuple_SET_ITEM(l, i, s); } result = _PyObject_SetAttrId((PyObject*)type, &PyId__attributes, l) >= 0; Py_DECREF(l); return result; } /* Conversion AST -> Python */ static PyObject* ast2obj_list(asdl_seq *seq, PyObject* (*func)(void*)) { Py_ssize_t i, n = asdl_seq_LEN(seq); PyObject *result = PyList_New(n); PyObject *value; if (!result) return NULL; for (i = 0; i < n; i++) { value = func(asdl_seq_GET(seq, i)); if (!value) { Py_DECREF(result); return NULL; } PyList_SET_ITEM(result, i, value); } return result; } static PyObject* ast2obj_object(void *o) { if (!o) o = Py_None; Py_INCREF((PyObject*)o); return (PyObject*)o; } #define ast2obj_singleton ast2obj_object #define ast2obj_constant ast2obj_object #define ast2obj_identifier ast2obj_object #define ast2obj_string ast2obj_object #define ast2obj_bytes ast2obj_object static PyObject* ast2obj_int(long b) { return PyLong_FromLong(b); } /* Conversion Python -> AST */ static int obj2ast_object(PyObject* obj, PyObject** out, PyArena* arena) { if (obj == Py_None) obj = NULL; if (obj) { if (PyArena_AddPyObject(arena, obj) < 0) { *out = NULL; return -1; } Py_INCREF(obj); } *out = obj; return 0; } static int obj2ast_constant(PyObject* obj, PyObject** out, PyArena* arena) { if (PyArena_AddPyObject(arena, obj) < 0) { *out = NULL; return -1; } Py_INCREF(obj); *out = obj; return 0; } static int obj2ast_identifier(PyObject* obj, PyObject** out, PyArena* arena) { if (!PyUnicode_CheckExact(obj) && obj != Py_None) { PyErr_SetString(PyExc_TypeError, "AST identifier must be of type str"); return 1; } return obj2ast_object(obj, out, arena); } static int obj2ast_int(PyObject* obj, int* out, PyArena* arena) { int i; if (!PyLong_Check(obj)) { PyErr_Format(PyExc_ValueError, "invalid integer value: %R", obj); return 1; } i = _PyLong_AsInt(obj); if (i == -1 && PyErr_Occurred()) return 1; *out = i; return 0; } static int add_ast_fields(void) { PyObject *empty_tuple, *d; if (PyType_Ready(&AST_type) < 0) return -1; d = AST_type.tp_dict; empty_tuple = PyTuple_New(0); if (!empty_tuple || _PyDict_SetItemId(d, &PyId__fields, empty_tuple) < 0 || _PyDict_SetItemId(d, &PyId__attributes, empty_tuple) < 0) { Py_XDECREF(empty_tuple); return -1; } Py_DECREF(empty_tuple); return 0; } static int init_types(void) { static int initialized; if (initialized) return 1; if (add_ast_fields() < 0) return 0; mod_type = make_type("mod", &AST_type, NULL, 0); if (!mod_type) return 0; if (!add_attributes(mod_type, NULL, 0)) return 0; Module_type = make_type("Module", mod_type, Module_fields, 1); if (!Module_type) return 0; Interactive_type = make_type("Interactive", mod_type, Interactive_fields, 1); if (!Interactive_type) return 0; Expression_type = make_type("Expression", mod_type, Expression_fields, 1); if (!Expression_type) return 0; Suite_type = make_type("Suite", mod_type, Suite_fields, 1); if (!Suite_type) return 0; stmt_type = make_type("stmt", &AST_type, NULL, 0); if (!stmt_type) return 0; if (!add_attributes(stmt_type, stmt_attributes, 4)) return 0; FunctionDef_type = make_type("FunctionDef", stmt_type, FunctionDef_fields, 5); if (!FunctionDef_type) return 0; AsyncFunctionDef_type = make_type("AsyncFunctionDef", stmt_type, AsyncFunctionDef_fields, 5); if (!AsyncFunctionDef_type) return 0; ClassDef_type = make_type("ClassDef", stmt_type, ClassDef_fields, 5); if (!ClassDef_type) return 0; Return_type = make_type("Return", stmt_type, Return_fields, 1); if (!Return_type) return 0; Delete_type = make_type("Delete", stmt_type, Delete_fields, 1); if (!Delete_type) return 0; Assign_type = make_type("Assign", stmt_type, Assign_fields, 2); if (!Assign_type) return 0; AugAssign_type = make_type("AugAssign", stmt_type, AugAssign_fields, 3); if (!AugAssign_type) return 0; AnnAssign_type = make_type("AnnAssign", stmt_type, AnnAssign_fields, 4); if (!AnnAssign_type) return 0; For_type = make_type("For", stmt_type, For_fields, 4); if (!For_type) return 0; AsyncFor_type = make_type("AsyncFor", stmt_type, AsyncFor_fields, 4); if (!AsyncFor_type) return 0; While_type = make_type("While", stmt_type, While_fields, 3); if (!While_type) return 0; If_type = make_type("If", stmt_type, If_fields, 3); if (!If_type) return 0; With_type = make_type("With", stmt_type, With_fields, 2); if (!With_type) return 0; AsyncWith_type = make_type("AsyncWith", stmt_type, AsyncWith_fields, 2); if (!AsyncWith_type) return 0; Raise_type = make_type("Raise", stmt_type, Raise_fields, 2); if (!Raise_type) return 0; Try_type = make_type("Try", stmt_type, Try_fields, 4); if (!Try_type) return 0; Assert_type = make_type("Assert", stmt_type, Assert_fields, 2); if (!Assert_type) return 0; Import_type = make_type("Import", stmt_type, Import_fields, 1); if (!Import_type) return 0; ImportFrom_type = make_type("ImportFrom", stmt_type, ImportFrom_fields, 3); if (!ImportFrom_type) return 0; Global_type = make_type("Global", stmt_type, Global_fields, 1); if (!Global_type) return 0; Nonlocal_type = make_type("Nonlocal", stmt_type, Nonlocal_fields, 1); if (!Nonlocal_type) return 0; Expr_type = make_type("Expr", stmt_type, Expr_fields, 1); if (!Expr_type) return 0; Pass_type = make_type("Pass", stmt_type, NULL, 0); if (!Pass_type) return 0; Break_type = make_type("Break", stmt_type, NULL, 0); if (!Break_type) return 0; Continue_type = make_type("Continue", stmt_type, NULL, 0); if (!Continue_type) return 0; expr_type = make_type("expr", &AST_type, NULL, 0); if (!expr_type) return 0; if (!add_attributes(expr_type, expr_attributes, 4)) return 0; BoolOp_type = make_type("BoolOp", expr_type, BoolOp_fields, 2); if (!BoolOp_type) return 0; NamedExpr_type = make_type("NamedExpr", expr_type, NamedExpr_fields, 2); if (!NamedExpr_type) return 0; BinOp_type = make_type("BinOp", expr_type, BinOp_fields, 3); if (!BinOp_type) return 0; UnaryOp_type = make_type("UnaryOp", expr_type, UnaryOp_fields, 2); if (!UnaryOp_type) return 0; Lambda_type = make_type("Lambda", expr_type, Lambda_fields, 2); if (!Lambda_type) return 0; IfExp_type = make_type("IfExp", expr_type, IfExp_fields, 3); if (!IfExp_type) return 0; Dict_type = make_type("Dict", expr_type, Dict_fields, 2); if (!Dict_type) return 0; Set_type = make_type("Set", expr_type, Set_fields, 1); if (!Set_type) return 0; ListComp_type = make_type("ListComp", expr_type, ListComp_fields, 2); if (!ListComp_type) return 0; SetComp_type = make_type("SetComp", expr_type, SetComp_fields, 2); if (!SetComp_type) return 0; DictComp_type = make_type("DictComp", expr_type, DictComp_fields, 3); if (!DictComp_type) return 0; GeneratorExp_type = make_type("GeneratorExp", expr_type, GeneratorExp_fields, 2); if (!GeneratorExp_type) return 0; Await_type = make_type("Await", expr_type, Await_fields, 1); if (!Await_type) return 0; Yield_type = make_type("Yield", expr_type, Yield_fields, 1); if (!Yield_type) return 0; YieldFrom_type = make_type("YieldFrom", expr_type, YieldFrom_fields, 1); if (!YieldFrom_type) return 0; Compare_type = make_type("Compare", expr_type, Compare_fields, 3); if (!Compare_type) return 0; Call_type = make_type("Call", expr_type, Call_fields, 3); if (!Call_type) return 0; FormattedValue_type = make_type("FormattedValue", expr_type, FormattedValue_fields, 3); if (!FormattedValue_type) return 0; JoinedStr_type = make_type("JoinedStr", expr_type, JoinedStr_fields, 1); if (!JoinedStr_type) return 0; Constant_type = make_type("Constant", expr_type, Constant_fields, 1); if (!Constant_type) return 0; Attribute_type = make_type("Attribute", expr_type, Attribute_fields, 3); if (!Attribute_type) return 0; Subscript_type = make_type("Subscript", expr_type, Subscript_fields, 3); if (!Subscript_type) return 0; Starred_type = make_type("Starred", expr_type, Starred_fields, 2); if (!Starred_type) return 0; Name_type = make_type("Name", expr_type, Name_fields, 2); if (!Name_type) return 0; List_type = make_type("List", expr_type, List_fields, 2); if (!List_type) return 0; Tuple_type = make_type("Tuple", expr_type, Tuple_fields, 2); if (!Tuple_type) return 0; expr_context_type = make_type("expr_context", &AST_type, NULL, 0); if (!expr_context_type) return 0; if (!add_attributes(expr_context_type, NULL, 0)) return 0; Load_type = make_type("Load", expr_context_type, NULL, 0); if (!Load_type) return 0; Load_singleton = PyType_GenericNew(Load_type, NULL, NULL); if (!Load_singleton) return 0; Store_type = make_type("Store", expr_context_type, NULL, 0); if (!Store_type) return 0; Store_singleton = PyType_GenericNew(Store_type, NULL, NULL); if (!Store_singleton) return 0; Del_type = make_type("Del", expr_context_type, NULL, 0); if (!Del_type) return 0; Del_singleton = PyType_GenericNew(Del_type, NULL, NULL); if (!Del_singleton) return 0; AugLoad_type = make_type("AugLoad", expr_context_type, NULL, 0); if (!AugLoad_type) return 0; AugLoad_singleton = PyType_GenericNew(AugLoad_type, NULL, NULL); if (!AugLoad_singleton) return 0; AugStore_type = make_type("AugStore", expr_context_type, NULL, 0); if (!AugStore_type) return 0; AugStore_singleton = PyType_GenericNew(AugStore_type, NULL, NULL); if (!AugStore_singleton) return 0; Param_type = make_type("Param", expr_context_type, NULL, 0); if (!Param_type) return 0; Param_singleton = PyType_GenericNew(Param_type, NULL, NULL); if (!Param_singleton) return 0; NamedStore_type = make_type("NamedStore", expr_context_type, NULL, 0); if (!NamedStore_type) return 0; NamedStore_singleton = PyType_GenericNew(NamedStore_type, NULL, NULL); if (!NamedStore_singleton) return 0; slice_type = make_type("slice", &AST_type, NULL, 0); if (!slice_type) return 0; if (!add_attributes(slice_type, NULL, 0)) return 0; Slice_type = make_type("Slice", slice_type, Slice_fields, 3); if (!Slice_type) return 0; ExtSlice_type = make_type("ExtSlice", slice_type, ExtSlice_fields, 1); if (!ExtSlice_type) return 0; Index_type = make_type("Index", slice_type, Index_fields, 1); if (!Index_type) return 0; boolop_type = make_type("boolop", &AST_type, NULL, 0); if (!boolop_type) return 0; if (!add_attributes(boolop_type, NULL, 0)) return 0; And_type = make_type("And", boolop_type, NULL, 0); if (!And_type) return 0; And_singleton = PyType_GenericNew(And_type, NULL, NULL); if (!And_singleton) return 0; Or_type = make_type("Or", boolop_type, NULL, 0); if (!Or_type) return 0; Or_singleton = PyType_GenericNew(Or_type, NULL, NULL); if (!Or_singleton) return 0; operator_type = make_type("operator", &AST_type, NULL, 0); if (!operator_type) return 0; if (!add_attributes(operator_type, NULL, 0)) return 0; Add_type = make_type("Add", operator_type, NULL, 0); if (!Add_type) return 0; Add_singleton = PyType_GenericNew(Add_type, NULL, NULL); if (!Add_singleton) return 0; Sub_type = make_type("Sub", operator_type, NULL, 0); if (!Sub_type) return 0; Sub_singleton = PyType_GenericNew(Sub_type, NULL, NULL); if (!Sub_singleton) return 0; Mult_type = make_type("Mult", operator_type, NULL, 0); if (!Mult_type) return 0; Mult_singleton = PyType_GenericNew(Mult_type, NULL, NULL); if (!Mult_singleton) return 0; MatMult_type = make_type("MatMult", operator_type, NULL, 0); if (!MatMult_type) return 0; MatMult_singleton = PyType_GenericNew(MatMult_type, NULL, NULL); if (!MatMult_singleton) return 0; Div_type = make_type("Div", operator_type, NULL, 0); if (!Div_type) return 0; Div_singleton = PyType_GenericNew(Div_type, NULL, NULL); if (!Div_singleton) return 0; Mod_type = make_type("Mod", operator_type, NULL, 0); if (!Mod_type) return 0; Mod_singleton = PyType_GenericNew(Mod_type, NULL, NULL); if (!Mod_singleton) return 0; Pow_type = make_type("Pow", operator_type, NULL, 0); if (!Pow_type) return 0; Pow_singleton = PyType_GenericNew(Pow_type, NULL, NULL); if (!Pow_singleton) return 0; LShift_type = make_type("LShift", operator_type, NULL, 0); if (!LShift_type) return 0; LShift_singleton = PyType_GenericNew(LShift_type, NULL, NULL); if (!LShift_singleton) return 0; RShift_type = make_type("RShift", operator_type, NULL, 0); if (!RShift_type) return 0; RShift_singleton = PyType_GenericNew(RShift_type, NULL, NULL); if (!RShift_singleton) return 0; BitOr_type = make_type("BitOr", operator_type, NULL, 0); if (!BitOr_type) return 0; BitOr_singleton = PyType_GenericNew(BitOr_type, NULL, NULL); if (!BitOr_singleton) return 0; BitXor_type = make_type("BitXor", operator_type, NULL, 0); if (!BitXor_type) return 0; BitXor_singleton = PyType_GenericNew(BitXor_type, NULL, NULL); if (!BitXor_singleton) return 0; BitAnd_type = make_type("BitAnd", operator_type, NULL, 0); if (!BitAnd_type) return 0; BitAnd_singleton = PyType_GenericNew(BitAnd_type, NULL, NULL); if (!BitAnd_singleton) return 0; FloorDiv_type = make_type("FloorDiv", operator_type, NULL, 0); if (!FloorDiv_type) return 0; FloorDiv_singleton = PyType_GenericNew(FloorDiv_type, NULL, NULL); if (!FloorDiv_singleton) return 0; unaryop_type = make_type("unaryop", &AST_type, NULL, 0); if (!unaryop_type) return 0; if (!add_attributes(unaryop_type, NULL, 0)) return 0; Invert_type = make_type("Invert", unaryop_type, NULL, 0); if (!Invert_type) return 0; Invert_singleton = PyType_GenericNew(Invert_type, NULL, NULL); if (!Invert_singleton) return 0; Not_type = make_type("Not", unaryop_type, NULL, 0); if (!Not_type) return 0; Not_singleton = PyType_GenericNew(Not_type, NULL, NULL); if (!Not_singleton) return 0; UAdd_type = make_type("UAdd", unaryop_type, NULL, 0); if (!UAdd_type) return 0; UAdd_singleton = PyType_GenericNew(UAdd_type, NULL, NULL); if (!UAdd_singleton) return 0; USub_type = make_type("USub", unaryop_type, NULL, 0); if (!USub_type) return 0; USub_singleton = PyType_GenericNew(USub_type, NULL, NULL); if (!USub_singleton) return 0; cmpop_type = make_type("cmpop", &AST_type, NULL, 0); if (!cmpop_type) return 0; if (!add_attributes(cmpop_type, NULL, 0)) return 0; Eq_type = make_type("Eq", cmpop_type, NULL, 0); if (!Eq_type) return 0; Eq_singleton = PyType_GenericNew(Eq_type, NULL, NULL); if (!Eq_singleton) return 0; NotEq_type = make_type("NotEq", cmpop_type, NULL, 0); if (!NotEq_type) return 0; NotEq_singleton = PyType_GenericNew(NotEq_type, NULL, NULL); if (!NotEq_singleton) return 0; Lt_type = make_type("Lt", cmpop_type, NULL, 0); if (!Lt_type) return 0; Lt_singleton = PyType_GenericNew(Lt_type, NULL, NULL); if (!Lt_singleton) return 0; LtE_type = make_type("LtE", cmpop_type, NULL, 0); if (!LtE_type) return 0; LtE_singleton = PyType_GenericNew(LtE_type, NULL, NULL); if (!LtE_singleton) return 0; Gt_type = make_type("Gt", cmpop_type, NULL, 0); if (!Gt_type) return 0; Gt_singleton = PyType_GenericNew(Gt_type, NULL, NULL); if (!Gt_singleton) return 0; GtE_type = make_type("GtE", cmpop_type, NULL, 0); if (!GtE_type) return 0; GtE_singleton = PyType_GenericNew(GtE_type, NULL, NULL); if (!GtE_singleton) return 0; Is_type = make_type("Is", cmpop_type, NULL, 0); if (!Is_type) return 0; Is_singleton = PyType_GenericNew(Is_type, NULL, NULL); if (!Is_singleton) return 0; IsNot_type = make_type("IsNot", cmpop_type, NULL, 0); if (!IsNot_type) return 0; IsNot_singleton = PyType_GenericNew(IsNot_type, NULL, NULL); if (!IsNot_singleton) return 0; In_type = make_type("In", cmpop_type, NULL, 0); if (!In_type) return 0; In_singleton = PyType_GenericNew(In_type, NULL, NULL); if (!In_singleton) return 0; NotIn_type = make_type("NotIn", cmpop_type, NULL, 0); if (!NotIn_type) return 0; NotIn_singleton = PyType_GenericNew(NotIn_type, NULL, NULL); if (!NotIn_singleton) return 0; comprehension_type = make_type("comprehension", &AST_type, comprehension_fields, 4); if (!comprehension_type) return 0; if (!add_attributes(comprehension_type, NULL, 0)) return 0; excepthandler_type = make_type("excepthandler", &AST_type, NULL, 0); if (!excepthandler_type) return 0; if (!add_attributes(excepthandler_type, excepthandler_attributes, 4)) return 0; ExceptHandler_type = make_type("ExceptHandler", excepthandler_type, ExceptHandler_fields, 3); if (!ExceptHandler_type) return 0; arguments_type = make_type("arguments", &AST_type, arguments_fields, 6); if (!arguments_type) return 0; if (!add_attributes(arguments_type, NULL, 0)) return 0; arg_type = make_type("arg", &AST_type, arg_fields, 2); if (!arg_type) return 0; if (!add_attributes(arg_type, arg_attributes, 4)) return 0; keyword_type = make_type("keyword", &AST_type, keyword_fields, 2); if (!keyword_type) return 0; if (!add_attributes(keyword_type, NULL, 0)) return 0; alias_type = make_type("alias", &AST_type, alias_fields, 2); if (!alias_type) return 0; if (!add_attributes(alias_type, NULL, 0)) return 0; withitem_type = make_type("withitem", &AST_type, withitem_fields, 2); if (!withitem_type) return 0; if (!add_attributes(withitem_type, NULL, 0)) return 0; initialized = 1; return 1; } static int obj2ast_mod(PyObject* obj, mod_ty* out, PyArena* arena); static int obj2ast_stmt(PyObject* obj, stmt_ty* out, PyArena* arena); static int obj2ast_expr(PyObject* obj, expr_ty* out, PyArena* arena); static int obj2ast_expr_context(PyObject* obj, expr_context_ty* out, PyArena* arena); static int obj2ast_slice(PyObject* obj, slice_ty* out, PyArena* arena); static int obj2ast_boolop(PyObject* obj, boolop_ty* out, PyArena* arena); static int obj2ast_operator(PyObject* obj, operator_ty* out, PyArena* arena); static int obj2ast_unaryop(PyObject* obj, unaryop_ty* out, PyArena* arena); static int obj2ast_cmpop(PyObject* obj, cmpop_ty* out, PyArena* arena); static int obj2ast_comprehension(PyObject* obj, comprehension_ty* out, PyArena* arena); static int obj2ast_excepthandler(PyObject* obj, excepthandler_ty* out, PyArena* arena); static int obj2ast_arguments(PyObject* obj, arguments_ty* out, PyArena* arena); static int obj2ast_arg(PyObject* obj, arg_ty* out, PyArena* arena); static int obj2ast_keyword(PyObject* obj, keyword_ty* out, PyArena* arena); static int obj2ast_alias(PyObject* obj, alias_ty* out, PyArena* arena); static int obj2ast_withitem(PyObject* obj, withitem_ty* out, PyArena* arena); mod_ty Module(asdl_seq * body, PyArena *arena) { mod_ty p; p = (mod_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Module_kind; p->v.Module.body = body; return p; } mod_ty Interactive(asdl_seq * body, PyArena *arena) { mod_ty p; p = (mod_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Interactive_kind; p->v.Interactive.body = body; return p; } mod_ty Expression(expr_ty body, PyArena *arena) { mod_ty p; if (!body) { PyErr_SetString(PyExc_ValueError, "field body is required for Expression"); return NULL; } p = (mod_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Expression_kind; p->v.Expression.body = body; return p; } mod_ty Suite(asdl_seq * body, PyArena *arena) { mod_ty p; p = (mod_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Suite_kind; p->v.Suite.body = body; return p; } stmt_ty FunctionDef(identifier name, arguments_ty args, asdl_seq * body, asdl_seq * decorator_list, expr_ty returns, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; if (!name) { PyErr_SetString(PyExc_ValueError, "field name is required for FunctionDef"); return NULL; } if (!args) { PyErr_SetString(PyExc_ValueError, "field args is required for FunctionDef"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = FunctionDef_kind; p->v.FunctionDef.name = name; p->v.FunctionDef.args = args; p->v.FunctionDef.body = body; p->v.FunctionDef.decorator_list = decorator_list; p->v.FunctionDef.returns = returns; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty AsyncFunctionDef(identifier name, arguments_ty args, asdl_seq * body, asdl_seq * decorator_list, expr_ty returns, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; if (!name) { PyErr_SetString(PyExc_ValueError, "field name is required for AsyncFunctionDef"); return NULL; } if (!args) { PyErr_SetString(PyExc_ValueError, "field args is required for AsyncFunctionDef"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = AsyncFunctionDef_kind; p->v.AsyncFunctionDef.name = name; p->v.AsyncFunctionDef.args = args; p->v.AsyncFunctionDef.body = body; p->v.AsyncFunctionDef.decorator_list = decorator_list; p->v.AsyncFunctionDef.returns = returns; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty ClassDef(identifier name, asdl_seq * bases, asdl_seq * keywords, asdl_seq * body, asdl_seq * decorator_list, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; if (!name) { PyErr_SetString(PyExc_ValueError, "field name is required for ClassDef"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = ClassDef_kind; p->v.ClassDef.name = name; p->v.ClassDef.bases = bases; p->v.ClassDef.keywords = keywords; p->v.ClassDef.body = body; p->v.ClassDef.decorator_list = decorator_list; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty Return(expr_ty value, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Return_kind; p->v.Return.value = value; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty Delete(asdl_seq * targets, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Delete_kind; p->v.Delete.targets = targets; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty Assign(asdl_seq * targets, expr_ty value, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Assign"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Assign_kind; p->v.Assign.targets = targets; p->v.Assign.value = value; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty AugAssign(expr_ty target, operator_ty op, expr_ty value, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; if (!target) { PyErr_SetString(PyExc_ValueError, "field target is required for AugAssign"); return NULL; } if (!op) { PyErr_SetString(PyExc_ValueError, "field op is required for AugAssign"); return NULL; } if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for AugAssign"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = AugAssign_kind; p->v.AugAssign.target = target; p->v.AugAssign.op = op; p->v.AugAssign.value = value; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty AnnAssign(expr_ty target, expr_ty annotation, expr_ty value, int simple, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; if (!target) { PyErr_SetString(PyExc_ValueError, "field target is required for AnnAssign"); return NULL; } if (!annotation) { PyErr_SetString(PyExc_ValueError, "field annotation is required for AnnAssign"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = AnnAssign_kind; p->v.AnnAssign.target = target; p->v.AnnAssign.annotation = annotation; p->v.AnnAssign.value = value; p->v.AnnAssign.simple = simple; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty For(expr_ty target, expr_ty iter, asdl_seq * body, asdl_seq * orelse, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; if (!target) { PyErr_SetString(PyExc_ValueError, "field target is required for For"); return NULL; } if (!iter) { PyErr_SetString(PyExc_ValueError, "field iter is required for For"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = For_kind; p->v.For.target = target; p->v.For.iter = iter; p->v.For.body = body; p->v.For.orelse = orelse; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty AsyncFor(expr_ty target, expr_ty iter, asdl_seq * body, asdl_seq * orelse, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; if (!target) { PyErr_SetString(PyExc_ValueError, "field target is required for AsyncFor"); return NULL; } if (!iter) { PyErr_SetString(PyExc_ValueError, "field iter is required for AsyncFor"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = AsyncFor_kind; p->v.AsyncFor.target = target; p->v.AsyncFor.iter = iter; p->v.AsyncFor.body = body; p->v.AsyncFor.orelse = orelse; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty While(expr_ty test, asdl_seq * body, asdl_seq * orelse, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; if (!test) { PyErr_SetString(PyExc_ValueError, "field test is required for While"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = While_kind; p->v.While.test = test; p->v.While.body = body; p->v.While.orelse = orelse; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty If(expr_ty test, asdl_seq * body, asdl_seq * orelse, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; if (!test) { PyErr_SetString(PyExc_ValueError, "field test is required for If"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = If_kind; p->v.If.test = test; p->v.If.body = body; p->v.If.orelse = orelse; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty With(asdl_seq * items, asdl_seq * body, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = With_kind; p->v.With.items = items; p->v.With.body = body; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty AsyncWith(asdl_seq * items, asdl_seq * body, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = AsyncWith_kind; p->v.AsyncWith.items = items; p->v.AsyncWith.body = body; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty Raise(expr_ty exc, expr_ty cause, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Raise_kind; p->v.Raise.exc = exc; p->v.Raise.cause = cause; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty Try(asdl_seq * body, asdl_seq * handlers, asdl_seq * orelse, asdl_seq * finalbody, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Try_kind; p->v.Try.body = body; p->v.Try.handlers = handlers; p->v.Try.orelse = orelse; p->v.Try.finalbody = finalbody; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty Assert(expr_ty test, expr_ty msg, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; if (!test) { PyErr_SetString(PyExc_ValueError, "field test is required for Assert"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Assert_kind; p->v.Assert.test = test; p->v.Assert.msg = msg; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty Import(asdl_seq * names, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Import_kind; p->v.Import.names = names; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty ImportFrom(identifier module, asdl_seq * names, int level, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = ImportFrom_kind; p->v.ImportFrom.module = module; p->v.ImportFrom.names = names; p->v.ImportFrom.level = level; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty Global(asdl_seq * names, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Global_kind; p->v.Global.names = names; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty Nonlocal(asdl_seq * names, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Nonlocal_kind; p->v.Nonlocal.names = names; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty Expr(expr_ty value, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Expr"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Expr_kind; p->v.Expr.value = value; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty Pass(int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Pass_kind; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty Break(int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Break_kind; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty Continue(int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Continue_kind; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty BoolOp(boolop_ty op, asdl_seq * values, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!op) { PyErr_SetString(PyExc_ValueError, "field op is required for BoolOp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = BoolOp_kind; p->v.BoolOp.op = op; p->v.BoolOp.values = values; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty NamedExpr(expr_ty target, expr_ty value, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!target) { PyErr_SetString(PyExc_ValueError, "field target is required for NamedExpr"); return NULL; } if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for NamedExpr"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = NamedExpr_kind; p->v.NamedExpr.target = target; p->v.NamedExpr.value = value; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty BinOp(expr_ty left, operator_ty op, expr_ty right, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!left) { PyErr_SetString(PyExc_ValueError, "field left is required for BinOp"); return NULL; } if (!op) { PyErr_SetString(PyExc_ValueError, "field op is required for BinOp"); return NULL; } if (!right) { PyErr_SetString(PyExc_ValueError, "field right is required for BinOp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = BinOp_kind; p->v.BinOp.left = left; p->v.BinOp.op = op; p->v.BinOp.right = right; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty UnaryOp(unaryop_ty op, expr_ty operand, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!op) { PyErr_SetString(PyExc_ValueError, "field op is required for UnaryOp"); return NULL; } if (!operand) { PyErr_SetString(PyExc_ValueError, "field operand is required for UnaryOp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = UnaryOp_kind; p->v.UnaryOp.op = op; p->v.UnaryOp.operand = operand; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty Lambda(arguments_ty args, expr_ty body, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!args) { PyErr_SetString(PyExc_ValueError, "field args is required for Lambda"); return NULL; } if (!body) { PyErr_SetString(PyExc_ValueError, "field body is required for Lambda"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Lambda_kind; p->v.Lambda.args = args; p->v.Lambda.body = body; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty IfExp(expr_ty test, expr_ty body, expr_ty orelse, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!test) { PyErr_SetString(PyExc_ValueError, "field test is required for IfExp"); return NULL; } if (!body) { PyErr_SetString(PyExc_ValueError, "field body is required for IfExp"); return NULL; } if (!orelse) { PyErr_SetString(PyExc_ValueError, "field orelse is required for IfExp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = IfExp_kind; p->v.IfExp.test = test; p->v.IfExp.body = body; p->v.IfExp.orelse = orelse; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty Dict(asdl_seq * keys, asdl_seq * values, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Dict_kind; p->v.Dict.keys = keys; p->v.Dict.values = values; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty Set(asdl_seq * elts, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Set_kind; p->v.Set.elts = elts; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty ListComp(expr_ty elt, asdl_seq * generators, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!elt) { PyErr_SetString(PyExc_ValueError, "field elt is required for ListComp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = ListComp_kind; p->v.ListComp.elt = elt; p->v.ListComp.generators = generators; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty SetComp(expr_ty elt, asdl_seq * generators, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!elt) { PyErr_SetString(PyExc_ValueError, "field elt is required for SetComp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = SetComp_kind; p->v.SetComp.elt = elt; p->v.SetComp.generators = generators; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty DictComp(expr_ty key, expr_ty value, asdl_seq * generators, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!key) { PyErr_SetString(PyExc_ValueError, "field key is required for DictComp"); return NULL; } if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for DictComp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = DictComp_kind; p->v.DictComp.key = key; p->v.DictComp.value = value; p->v.DictComp.generators = generators; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty GeneratorExp(expr_ty elt, asdl_seq * generators, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!elt) { PyErr_SetString(PyExc_ValueError, "field elt is required for GeneratorExp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = GeneratorExp_kind; p->v.GeneratorExp.elt = elt; p->v.GeneratorExp.generators = generators; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty Await(expr_ty value, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Await"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Await_kind; p->v.Await.value = value; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty Yield(expr_ty value, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Yield_kind; p->v.Yield.value = value; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty YieldFrom(expr_ty value, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for YieldFrom"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = YieldFrom_kind; p->v.YieldFrom.value = value; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty Compare(expr_ty left, asdl_int_seq * ops, asdl_seq * comparators, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!left) { PyErr_SetString(PyExc_ValueError, "field left is required for Compare"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Compare_kind; p->v.Compare.left = left; p->v.Compare.ops = ops; p->v.Compare.comparators = comparators; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty Call(expr_ty func, asdl_seq * args, asdl_seq * keywords, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!func) { PyErr_SetString(PyExc_ValueError, "field func is required for Call"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Call_kind; p->v.Call.func = func; p->v.Call.args = args; p->v.Call.keywords = keywords; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty FormattedValue(expr_ty value, int conversion, expr_ty format_spec, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for FormattedValue"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = FormattedValue_kind; p->v.FormattedValue.value = value; p->v.FormattedValue.conversion = conversion; p->v.FormattedValue.format_spec = format_spec; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty JoinedStr(asdl_seq * values, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = JoinedStr_kind; p->v.JoinedStr.values = values; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty Constant(constant value, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Constant"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Constant_kind; p->v.Constant.value = value; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty Attribute(expr_ty value, identifier attr, expr_context_ty ctx, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Attribute"); return NULL; } if (!attr) { PyErr_SetString(PyExc_ValueError, "field attr is required for Attribute"); return NULL; } if (!ctx) { PyErr_SetString(PyExc_ValueError, "field ctx is required for Attribute"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Attribute_kind; p->v.Attribute.value = value; p->v.Attribute.attr = attr; p->v.Attribute.ctx = ctx; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty Subscript(expr_ty value, slice_ty slice, expr_context_ty ctx, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Subscript"); return NULL; } if (!slice) { PyErr_SetString(PyExc_ValueError, "field slice is required for Subscript"); return NULL; } if (!ctx) { PyErr_SetString(PyExc_ValueError, "field ctx is required for Subscript"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Subscript_kind; p->v.Subscript.value = value; p->v.Subscript.slice = slice; p->v.Subscript.ctx = ctx; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty Starred(expr_ty value, expr_context_ty ctx, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Starred"); return NULL; } if (!ctx) { PyErr_SetString(PyExc_ValueError, "field ctx is required for Starred"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Starred_kind; p->v.Starred.value = value; p->v.Starred.ctx = ctx; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty Name(identifier id, expr_context_ty ctx, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!id) { PyErr_SetString(PyExc_ValueError, "field id is required for Name"); return NULL; } if (!ctx) { PyErr_SetString(PyExc_ValueError, "field ctx is required for Name"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Name_kind; p->v.Name.id = id; p->v.Name.ctx = ctx; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty List(asdl_seq * elts, expr_context_ty ctx, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!ctx) { PyErr_SetString(PyExc_ValueError, "field ctx is required for List"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = List_kind; p->v.List.elts = elts; p->v.List.ctx = ctx; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty Tuple(asdl_seq * elts, expr_context_ty ctx, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!ctx) { PyErr_SetString(PyExc_ValueError, "field ctx is required for Tuple"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Tuple_kind; p->v.Tuple.elts = elts; p->v.Tuple.ctx = ctx; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } slice_ty Slice(expr_ty lower, expr_ty upper, expr_ty step, PyArena *arena) { slice_ty p; p = (slice_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Slice_kind; p->v.Slice.lower = lower; p->v.Slice.upper = upper; p->v.Slice.step = step; return p; } slice_ty ExtSlice(asdl_seq * dims, PyArena *arena) { slice_ty p; p = (slice_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = ExtSlice_kind; p->v.ExtSlice.dims = dims; return p; } slice_ty Index(expr_ty value, PyArena *arena) { slice_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Index"); return NULL; } p = (slice_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Index_kind; p->v.Index.value = value; return p; } comprehension_ty comprehension(expr_ty target, expr_ty iter, asdl_seq * ifs, int is_async, PyArena *arena) { comprehension_ty p; if (!target) { PyErr_SetString(PyExc_ValueError, "field target is required for comprehension"); return NULL; } if (!iter) { PyErr_SetString(PyExc_ValueError, "field iter is required for comprehension"); return NULL; } p = (comprehension_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->target = target; p->iter = iter; p->ifs = ifs; p->is_async = is_async; return p; } excepthandler_ty ExceptHandler(expr_ty type, identifier name, asdl_seq * body, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { excepthandler_ty p; p = (excepthandler_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = ExceptHandler_kind; p->v.ExceptHandler.type = type; p->v.ExceptHandler.name = name; p->v.ExceptHandler.body = body; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } arguments_ty arguments(asdl_seq * args, arg_ty vararg, asdl_seq * kwonlyargs, asdl_seq * kw_defaults, arg_ty kwarg, asdl_seq * defaults, PyArena *arena) { arguments_ty p; p = (arguments_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->args = args; p->vararg = vararg; p->kwonlyargs = kwonlyargs; p->kw_defaults = kw_defaults; p->kwarg = kwarg; p->defaults = defaults; return p; } arg_ty arg(identifier arg, expr_ty annotation, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { arg_ty p; if (!arg) { PyErr_SetString(PyExc_ValueError, "field arg is required for arg"); return NULL; } p = (arg_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->arg = arg; p->annotation = annotation; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } keyword_ty keyword(identifier arg, expr_ty value, PyArena *arena) { keyword_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for keyword"); return NULL; } p = (keyword_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->arg = arg; p->value = value; return p; } alias_ty alias(identifier name, identifier asname, PyArena *arena) { alias_ty p; if (!name) { PyErr_SetString(PyExc_ValueError, "field name is required for alias"); return NULL; } p = (alias_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->name = name; p->asname = asname; return p; } withitem_ty withitem(expr_ty context_expr, expr_ty optional_vars, PyArena *arena) { withitem_ty p; if (!context_expr) { PyErr_SetString(PyExc_ValueError, "field context_expr is required for withitem"); return NULL; } p = (withitem_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->context_expr = context_expr; p->optional_vars = optional_vars; return p; } PyObject* ast2obj_mod(void* _o) { mod_ty o = (mod_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } switch (o->kind) { case Module_kind: result = PyType_GenericNew(Module_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Module.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); break; case Interactive_kind: result = PyType_GenericNew(Interactive_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Interactive.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); break; case Expression_kind: result = PyType_GenericNew(Expression_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Expression.body); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); break; case Suite_kind: result = PyType_GenericNew(Suite_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Suite.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); break; } return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_stmt(void* _o) { stmt_ty o = (stmt_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } switch (o->kind) { case FunctionDef_kind: result = PyType_GenericNew(FunctionDef_type, NULL, NULL); if (!result) goto failed; value = ast2obj_identifier(o->v.FunctionDef.name); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_name, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_arguments(o->v.FunctionDef.args); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_args, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.FunctionDef.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.FunctionDef.decorator_list, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_decorator_list, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.FunctionDef.returns); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_returns, value) == -1) goto failed; Py_DECREF(value); break; case AsyncFunctionDef_kind: result = PyType_GenericNew(AsyncFunctionDef_type, NULL, NULL); if (!result) goto failed; value = ast2obj_identifier(o->v.AsyncFunctionDef.name); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_name, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_arguments(o->v.AsyncFunctionDef.args); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_args, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.AsyncFunctionDef.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.AsyncFunctionDef.decorator_list, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_decorator_list, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.AsyncFunctionDef.returns); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_returns, value) == -1) goto failed; Py_DECREF(value); break; case ClassDef_kind: result = PyType_GenericNew(ClassDef_type, NULL, NULL); if (!result) goto failed; value = ast2obj_identifier(o->v.ClassDef.name); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_name, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.ClassDef.bases, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_bases, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.ClassDef.keywords, ast2obj_keyword); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_keywords, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.ClassDef.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.ClassDef.decorator_list, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_decorator_list, value) == -1) goto failed; Py_DECREF(value); break; case Return_kind: result = PyType_GenericNew(Return_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Return.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case Delete_kind: result = PyType_GenericNew(Delete_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Delete.targets, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_targets, value) == -1) goto failed; Py_DECREF(value); break; case Assign_kind: result = PyType_GenericNew(Assign_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Assign.targets, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_targets, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.Assign.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case AugAssign_kind: result = PyType_GenericNew(AugAssign_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.AugAssign.target); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_target, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_operator(o->v.AugAssign.op); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_op, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.AugAssign.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case AnnAssign_kind: result = PyType_GenericNew(AnnAssign_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.AnnAssign.target); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_target, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.AnnAssign.annotation); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_annotation, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.AnnAssign.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_int(o->v.AnnAssign.simple); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_simple, value) == -1) goto failed; Py_DECREF(value); break; case For_kind: result = PyType_GenericNew(For_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.For.target); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_target, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.For.iter); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_iter, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.For.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.For.orelse, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_orelse, value) == -1) goto failed; Py_DECREF(value); break; case AsyncFor_kind: result = PyType_GenericNew(AsyncFor_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.AsyncFor.target); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_target, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.AsyncFor.iter); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_iter, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.AsyncFor.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.AsyncFor.orelse, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_orelse, value) == -1) goto failed; Py_DECREF(value); break; case While_kind: result = PyType_GenericNew(While_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.While.test); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_test, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.While.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.While.orelse, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_orelse, value) == -1) goto failed; Py_DECREF(value); break; case If_kind: result = PyType_GenericNew(If_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.If.test); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_test, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.If.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.If.orelse, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_orelse, value) == -1) goto failed; Py_DECREF(value); break; case With_kind: result = PyType_GenericNew(With_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.With.items, ast2obj_withitem); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_items, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.With.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); break; case AsyncWith_kind: result = PyType_GenericNew(AsyncWith_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.AsyncWith.items, ast2obj_withitem); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_items, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.AsyncWith.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); break; case Raise_kind: result = PyType_GenericNew(Raise_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Raise.exc); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_exc, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.Raise.cause); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_cause, value) == -1) goto failed; Py_DECREF(value); break; case Try_kind: result = PyType_GenericNew(Try_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Try.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Try.handlers, ast2obj_excepthandler); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_handlers, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Try.orelse, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_orelse, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Try.finalbody, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_finalbody, value) == -1) goto failed; Py_DECREF(value); break; case Assert_kind: result = PyType_GenericNew(Assert_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Assert.test); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_test, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.Assert.msg); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_msg, value) == -1) goto failed; Py_DECREF(value); break; case Import_kind: result = PyType_GenericNew(Import_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Import.names, ast2obj_alias); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_names, value) == -1) goto failed; Py_DECREF(value); break; case ImportFrom_kind: result = PyType_GenericNew(ImportFrom_type, NULL, NULL); if (!result) goto failed; value = ast2obj_identifier(o->v.ImportFrom.module); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_module, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.ImportFrom.names, ast2obj_alias); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_names, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_int(o->v.ImportFrom.level); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_level, value) == -1) goto failed; Py_DECREF(value); break; case Global_kind: result = PyType_GenericNew(Global_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Global.names, ast2obj_identifier); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_names, value) == -1) goto failed; Py_DECREF(value); break; case Nonlocal_kind: result = PyType_GenericNew(Nonlocal_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Nonlocal.names, ast2obj_identifier); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_names, value) == -1) goto failed; Py_DECREF(value); break; case Expr_kind: result = PyType_GenericNew(Expr_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Expr.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case Pass_kind: result = PyType_GenericNew(Pass_type, NULL, NULL); if (!result) goto failed; break; case Break_kind: result = PyType_GenericNew(Break_type, NULL, NULL); if (!result) goto failed; break; case Continue_kind: result = PyType_GenericNew(Continue_type, NULL, NULL); if (!result) goto failed; break; } value = ast2obj_int(o->lineno); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_lineno, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->col_offset); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_col_offset, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->end_lineno); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_end_lineno, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->end_col_offset); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_end_col_offset, value) < 0) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_expr(void* _o) { expr_ty o = (expr_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } switch (o->kind) { case BoolOp_kind: result = PyType_GenericNew(BoolOp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_boolop(o->v.BoolOp.op); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_op, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.BoolOp.values, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_values, value) == -1) goto failed; Py_DECREF(value); break; case NamedExpr_kind: result = PyType_GenericNew(NamedExpr_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.NamedExpr.target); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_target, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.NamedExpr.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case BinOp_kind: result = PyType_GenericNew(BinOp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.BinOp.left); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_left, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_operator(o->v.BinOp.op); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_op, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.BinOp.right); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_right, value) == -1) goto failed; Py_DECREF(value); break; case UnaryOp_kind: result = PyType_GenericNew(UnaryOp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_unaryop(o->v.UnaryOp.op); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_op, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.UnaryOp.operand); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_operand, value) == -1) goto failed; Py_DECREF(value); break; case Lambda_kind: result = PyType_GenericNew(Lambda_type, NULL, NULL); if (!result) goto failed; value = ast2obj_arguments(o->v.Lambda.args); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_args, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.Lambda.body); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); break; case IfExp_kind: result = PyType_GenericNew(IfExp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.IfExp.test); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_test, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.IfExp.body); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.IfExp.orelse); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_orelse, value) == -1) goto failed; Py_DECREF(value); break; case Dict_kind: result = PyType_GenericNew(Dict_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Dict.keys, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_keys, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Dict.values, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_values, value) == -1) goto failed; Py_DECREF(value); break; case Set_kind: result = PyType_GenericNew(Set_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Set.elts, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_elts, value) == -1) goto failed; Py_DECREF(value); break; case ListComp_kind: result = PyType_GenericNew(ListComp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.ListComp.elt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_elt, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.ListComp.generators, ast2obj_comprehension); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_generators, value) == -1) goto failed; Py_DECREF(value); break; case SetComp_kind: result = PyType_GenericNew(SetComp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.SetComp.elt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_elt, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.SetComp.generators, ast2obj_comprehension); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_generators, value) == -1) goto failed; Py_DECREF(value); break; case DictComp_kind: result = PyType_GenericNew(DictComp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.DictComp.key); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_key, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.DictComp.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.DictComp.generators, ast2obj_comprehension); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_generators, value) == -1) goto failed; Py_DECREF(value); break; case GeneratorExp_kind: result = PyType_GenericNew(GeneratorExp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.GeneratorExp.elt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_elt, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.GeneratorExp.generators, ast2obj_comprehension); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_generators, value) == -1) goto failed; Py_DECREF(value); break; case Await_kind: result = PyType_GenericNew(Await_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Await.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case Yield_kind: result = PyType_GenericNew(Yield_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Yield.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case YieldFrom_kind: result = PyType_GenericNew(YieldFrom_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.YieldFrom.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case Compare_kind: result = PyType_GenericNew(Compare_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Compare.left); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_left, value) == -1) goto failed; Py_DECREF(value); { Py_ssize_t i, n = asdl_seq_LEN(o->v.Compare.ops); value = PyList_New(n); if (!value) goto failed; for(i = 0; i < n; i++) PyList_SET_ITEM(value, i, ast2obj_cmpop((cmpop_ty)asdl_seq_GET(o->v.Compare.ops, i))); } if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ops, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Compare.comparators, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_comparators, value) == -1) goto failed; Py_DECREF(value); break; case Call_kind: result = PyType_GenericNew(Call_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Call.func); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_func, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Call.args, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_args, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Call.keywords, ast2obj_keyword); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_keywords, value) == -1) goto failed; Py_DECREF(value); break; case FormattedValue_kind: result = PyType_GenericNew(FormattedValue_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.FormattedValue.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_int(o->v.FormattedValue.conversion); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_conversion, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.FormattedValue.format_spec); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_format_spec, value) == -1) goto failed; Py_DECREF(value); break; case JoinedStr_kind: result = PyType_GenericNew(JoinedStr_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.JoinedStr.values, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_values, value) == -1) goto failed; Py_DECREF(value); break; case Constant_kind: result = PyType_GenericNew(Constant_type, NULL, NULL); if (!result) goto failed; value = ast2obj_constant(o->v.Constant.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case Attribute_kind: result = PyType_GenericNew(Attribute_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Attribute.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_identifier(o->v.Attribute.attr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_attr, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr_context(o->v.Attribute.ctx); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ctx, value) == -1) goto failed; Py_DECREF(value); break; case Subscript_kind: result = PyType_GenericNew(Subscript_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Subscript.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_slice(o->v.Subscript.slice); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_slice, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr_context(o->v.Subscript.ctx); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ctx, value) == -1) goto failed; Py_DECREF(value); break; case Starred_kind: result = PyType_GenericNew(Starred_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Starred.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr_context(o->v.Starred.ctx); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ctx, value) == -1) goto failed; Py_DECREF(value); break; case Name_kind: result = PyType_GenericNew(Name_type, NULL, NULL); if (!result) goto failed; value = ast2obj_identifier(o->v.Name.id); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_id, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr_context(o->v.Name.ctx); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ctx, value) == -1) goto failed; Py_DECREF(value); break; case List_kind: result = PyType_GenericNew(List_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.List.elts, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_elts, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr_context(o->v.List.ctx); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ctx, value) == -1) goto failed; Py_DECREF(value); break; case Tuple_kind: result = PyType_GenericNew(Tuple_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Tuple.elts, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_elts, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr_context(o->v.Tuple.ctx); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ctx, value) == -1) goto failed; Py_DECREF(value); break; } value = ast2obj_int(o->lineno); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_lineno, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->col_offset); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_col_offset, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->end_lineno); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_end_lineno, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->end_col_offset); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_end_col_offset, value) < 0) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_expr_context(expr_context_ty o) { switch(o) { case Load: Py_INCREF(Load_singleton); return Load_singleton; case Store: Py_INCREF(Store_singleton); return Store_singleton; case Del: Py_INCREF(Del_singleton); return Del_singleton; case AugLoad: Py_INCREF(AugLoad_singleton); return AugLoad_singleton; case AugStore: Py_INCREF(AugStore_singleton); return AugStore_singleton; case Param: Py_INCREF(Param_singleton); return Param_singleton; case NamedStore: Py_INCREF(NamedStore_singleton); return NamedStore_singleton; default: /* should never happen, but just in case ... */ PyErr_Format(PyExc_SystemError, "unknown expr_context found"); return NULL; } } PyObject* ast2obj_slice(void* _o) { slice_ty o = (slice_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } switch (o->kind) { case Slice_kind: result = PyType_GenericNew(Slice_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Slice.lower); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_lower, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.Slice.upper); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_upper, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.Slice.step); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_step, value) == -1) goto failed; Py_DECREF(value); break; case ExtSlice_kind: result = PyType_GenericNew(ExtSlice_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.ExtSlice.dims, ast2obj_slice); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_dims, value) == -1) goto failed; Py_DECREF(value); break; case Index_kind: result = PyType_GenericNew(Index_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Index.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; } return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_boolop(boolop_ty o) { switch(o) { case And: Py_INCREF(And_singleton); return And_singleton; case Or: Py_INCREF(Or_singleton); return Or_singleton; default: /* should never happen, but just in case ... */ PyErr_Format(PyExc_SystemError, "unknown boolop found"); return NULL; } } PyObject* ast2obj_operator(operator_ty o) { switch(o) { case Add: Py_INCREF(Add_singleton); return Add_singleton; case Sub: Py_INCREF(Sub_singleton); return Sub_singleton; case Mult: Py_INCREF(Mult_singleton); return Mult_singleton; case MatMult: Py_INCREF(MatMult_singleton); return MatMult_singleton; case Div: Py_INCREF(Div_singleton); return Div_singleton; case Mod: Py_INCREF(Mod_singleton); return Mod_singleton; case Pow: Py_INCREF(Pow_singleton); return Pow_singleton; case LShift: Py_INCREF(LShift_singleton); return LShift_singleton; case RShift: Py_INCREF(RShift_singleton); return RShift_singleton; case BitOr: Py_INCREF(BitOr_singleton); return BitOr_singleton; case BitXor: Py_INCREF(BitXor_singleton); return BitXor_singleton; case BitAnd: Py_INCREF(BitAnd_singleton); return BitAnd_singleton; case FloorDiv: Py_INCREF(FloorDiv_singleton); return FloorDiv_singleton; default: /* should never happen, but just in case ... */ PyErr_Format(PyExc_SystemError, "unknown operator found"); return NULL; } } PyObject* ast2obj_unaryop(unaryop_ty o) { switch(o) { case Invert: Py_INCREF(Invert_singleton); return Invert_singleton; case Not: Py_INCREF(Not_singleton); return Not_singleton; case UAdd: Py_INCREF(UAdd_singleton); return UAdd_singleton; case USub: Py_INCREF(USub_singleton); return USub_singleton; default: /* should never happen, but just in case ... */ PyErr_Format(PyExc_SystemError, "unknown unaryop found"); return NULL; } } PyObject* ast2obj_cmpop(cmpop_ty o) { switch(o) { case Eq: Py_INCREF(Eq_singleton); return Eq_singleton; case NotEq: Py_INCREF(NotEq_singleton); return NotEq_singleton; case Lt: Py_INCREF(Lt_singleton); return Lt_singleton; case LtE: Py_INCREF(LtE_singleton); return LtE_singleton; case Gt: Py_INCREF(Gt_singleton); return Gt_singleton; case GtE: Py_INCREF(GtE_singleton); return GtE_singleton; case Is: Py_INCREF(Is_singleton); return Is_singleton; case IsNot: Py_INCREF(IsNot_singleton); return IsNot_singleton; case In: Py_INCREF(In_singleton); return In_singleton; case NotIn: Py_INCREF(NotIn_singleton); return NotIn_singleton; default: /* should never happen, but just in case ... */ PyErr_Format(PyExc_SystemError, "unknown cmpop found"); return NULL; } } PyObject* ast2obj_comprehension(void* _o) { comprehension_ty o = (comprehension_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } result = PyType_GenericNew(comprehension_type, NULL, NULL); if (!result) return NULL; value = ast2obj_expr(o->target); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_target, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->iter); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_iter, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->ifs, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ifs, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_int(o->is_async); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_is_async, value) == -1) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_excepthandler(void* _o) { excepthandler_ty o = (excepthandler_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } switch (o->kind) { case ExceptHandler_kind: result = PyType_GenericNew(ExceptHandler_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.ExceptHandler.type); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_identifier(o->v.ExceptHandler.name); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_name, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.ExceptHandler.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); break; } value = ast2obj_int(o->lineno); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_lineno, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->col_offset); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_col_offset, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->end_lineno); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_end_lineno, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->end_col_offset); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_end_col_offset, value) < 0) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_arguments(void* _o) { arguments_ty o = (arguments_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } result = PyType_GenericNew(arguments_type, NULL, NULL); if (!result) return NULL; value = ast2obj_list(o->args, ast2obj_arg); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_args, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_arg(o->vararg); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_vararg, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->kwonlyargs, ast2obj_arg); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_kwonlyargs, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->kw_defaults, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_kw_defaults, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_arg(o->kwarg); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_kwarg, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->defaults, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_defaults, value) == -1) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_arg(void* _o) { arg_ty o = (arg_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } result = PyType_GenericNew(arg_type, NULL, NULL); if (!result) return NULL; value = ast2obj_identifier(o->arg); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_arg, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->annotation); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_annotation, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_int(o->lineno); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_lineno, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->col_offset); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_col_offset, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->end_lineno); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_end_lineno, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->end_col_offset); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_end_col_offset, value) < 0) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_keyword(void* _o) { keyword_ty o = (keyword_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } result = PyType_GenericNew(keyword_type, NULL, NULL); if (!result) return NULL; value = ast2obj_identifier(o->arg); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_arg, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_alias(void* _o) { alias_ty o = (alias_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } result = PyType_GenericNew(alias_type, NULL, NULL); if (!result) return NULL; value = ast2obj_identifier(o->name); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_name, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_identifier(o->asname); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_asname, value) == -1) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_withitem(void* _o) { withitem_ty o = (withitem_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } result = PyType_GenericNew(withitem_type, NULL, NULL); if (!result) return NULL; value = ast2obj_expr(o->context_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_context_expr, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->optional_vars); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_optional_vars, value) == -1) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } int obj2ast_mod(PyObject* obj, mod_ty* out, PyArena* arena) { int isinstance; PyObject *tmp = NULL; if (obj == Py_None) { *out = NULL; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Module_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* body; if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from Module"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Module field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Py_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Module field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } *out = Module(body, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Interactive_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* body; if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from Interactive"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Interactive field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Py_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Interactive field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } *out = Interactive(body, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Expression_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty body; if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from Expression"); return 1; } else { int res; res = obj2ast_expr(tmp, &body, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Expression(body, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Suite_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* body; if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from Suite"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Suite field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Py_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Suite field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } *out = Suite(body, arena); if (*out == NULL) goto failed; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of mod, but got %R", obj); failed: Py_XDECREF(tmp); return 1; } int obj2ast_stmt(PyObject* obj, stmt_ty* out, PyArena* arena) { int isinstance; PyObject *tmp = NULL; int lineno; int col_offset; int end_lineno; int end_col_offset; if (obj == Py_None) { *out = NULL; return 0; } if (_PyObject_LookupAttrId(obj, &PyId_lineno, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"lineno\" missing from stmt"); return 1; } else { int res; res = obj2ast_int(tmp, &lineno, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_col_offset, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"col_offset\" missing from stmt"); return 1; } else { int res; res = obj2ast_int(tmp, &col_offset, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_end_lineno, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); end_lineno = 0; } else { int res; res = obj2ast_int(tmp, &end_lineno, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_end_col_offset, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); end_col_offset = 0; } else { int res; res = obj2ast_int(tmp, &end_col_offset, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } isinstance = PyObject_IsInstance(obj, (PyObject*)FunctionDef_type); if (isinstance == -1) { return 1; } if (isinstance) { identifier name; arguments_ty args; asdl_seq* body; asdl_seq* decorator_list; expr_ty returns; if (_PyObject_LookupAttrId(obj, &PyId_name, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"name\" missing from FunctionDef"); return 1; } else { int res; res = obj2ast_identifier(tmp, &name, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_args, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"args\" missing from FunctionDef"); return 1; } else { int res; res = obj2ast_arguments(tmp, &args, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from FunctionDef"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "FunctionDef field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Py_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "FunctionDef field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_decorator_list, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"decorator_list\" missing from FunctionDef"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "FunctionDef field \"decorator_list\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); decorator_list = _Py_asdl_seq_new(len, arena); if (decorator_list == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "FunctionDef field \"decorator_list\" changed size during iteration"); goto failed; } asdl_seq_SET(decorator_list, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_returns, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); returns = NULL; } else { int res; res = obj2ast_expr(tmp, &returns, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = FunctionDef(name, args, body, decorator_list, returns, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)AsyncFunctionDef_type); if (isinstance == -1) { return 1; } if (isinstance) { identifier name; arguments_ty args; asdl_seq* body; asdl_seq* decorator_list; expr_ty returns; if (_PyObject_LookupAttrId(obj, &PyId_name, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"name\" missing from AsyncFunctionDef"); return 1; } else { int res; res = obj2ast_identifier(tmp, &name, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_args, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"args\" missing from AsyncFunctionDef"); return 1; } else { int res; res = obj2ast_arguments(tmp, &args, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from AsyncFunctionDef"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "AsyncFunctionDef field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Py_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "AsyncFunctionDef field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_decorator_list, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"decorator_list\" missing from AsyncFunctionDef"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "AsyncFunctionDef field \"decorator_list\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); decorator_list = _Py_asdl_seq_new(len, arena); if (decorator_list == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "AsyncFunctionDef field \"decorator_list\" changed size during iteration"); goto failed; } asdl_seq_SET(decorator_list, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_returns, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); returns = NULL; } else { int res; res = obj2ast_expr(tmp, &returns, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = AsyncFunctionDef(name, args, body, decorator_list, returns, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)ClassDef_type); if (isinstance == -1) { return 1; } if (isinstance) { identifier name; asdl_seq* bases; asdl_seq* keywords; asdl_seq* body; asdl_seq* decorator_list; if (_PyObject_LookupAttrId(obj, &PyId_name, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"name\" missing from ClassDef"); return 1; } else { int res; res = obj2ast_identifier(tmp, &name, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_bases, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"bases\" missing from ClassDef"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ClassDef field \"bases\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); bases = _Py_asdl_seq_new(len, arena); if (bases == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ClassDef field \"bases\" changed size during iteration"); goto failed; } asdl_seq_SET(bases, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_keywords, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"keywords\" missing from ClassDef"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ClassDef field \"keywords\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); keywords = _Py_asdl_seq_new(len, arena); if (keywords == NULL) goto failed; for (i = 0; i < len; i++) { keyword_ty val; res = obj2ast_keyword(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ClassDef field \"keywords\" changed size during iteration"); goto failed; } asdl_seq_SET(keywords, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from ClassDef"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ClassDef field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Py_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ClassDef field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_decorator_list, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"decorator_list\" missing from ClassDef"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ClassDef field \"decorator_list\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); decorator_list = _Py_asdl_seq_new(len, arena); if (decorator_list == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ClassDef field \"decorator_list\" changed size during iteration"); goto failed; } asdl_seq_SET(decorator_list, i, val); } Py_CLEAR(tmp); } *out = ClassDef(name, bases, keywords, body, decorator_list, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Return_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); value = NULL; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Return(value, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Delete_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* targets; if (_PyObject_LookupAttrId(obj, &PyId_targets, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"targets\" missing from Delete"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Delete field \"targets\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); targets = _Py_asdl_seq_new(len, arena); if (targets == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Delete field \"targets\" changed size during iteration"); goto failed; } asdl_seq_SET(targets, i, val); } Py_CLEAR(tmp); } *out = Delete(targets, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Assign_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* targets; expr_ty value; if (_PyObject_LookupAttrId(obj, &PyId_targets, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"targets\" missing from Assign"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Assign field \"targets\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); targets = _Py_asdl_seq_new(len, arena); if (targets == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Assign field \"targets\" changed size during iteration"); goto failed; } asdl_seq_SET(targets, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Assign"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Assign(targets, value, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)AugAssign_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty target; operator_ty op; expr_ty value; if (_PyObject_LookupAttrId(obj, &PyId_target, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"target\" missing from AugAssign"); return 1; } else { int res; res = obj2ast_expr(tmp, &target, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_op, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"op\" missing from AugAssign"); return 1; } else { int res; res = obj2ast_operator(tmp, &op, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from AugAssign"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = AugAssign(target, op, value, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)AnnAssign_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty target; expr_ty annotation; expr_ty value; int simple; if (_PyObject_LookupAttrId(obj, &PyId_target, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"target\" missing from AnnAssign"); return 1; } else { int res; res = obj2ast_expr(tmp, &target, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_annotation, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"annotation\" missing from AnnAssign"); return 1; } else { int res; res = obj2ast_expr(tmp, &annotation, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); value = NULL; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_simple, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"simple\" missing from AnnAssign"); return 1; } else { int res; res = obj2ast_int(tmp, &simple, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = AnnAssign(target, annotation, value, simple, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)For_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty target; expr_ty iter; asdl_seq* body; asdl_seq* orelse; if (_PyObject_LookupAttrId(obj, &PyId_target, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"target\" missing from For"); return 1; } else { int res; res = obj2ast_expr(tmp, &target, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_iter, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"iter\" missing from For"); return 1; } else { int res; res = obj2ast_expr(tmp, &iter, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from For"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "For field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Py_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "For field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_orelse, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"orelse\" missing from For"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "For field \"orelse\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); orelse = _Py_asdl_seq_new(len, arena); if (orelse == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "For field \"orelse\" changed size during iteration"); goto failed; } asdl_seq_SET(orelse, i, val); } Py_CLEAR(tmp); } *out = For(target, iter, body, orelse, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)AsyncFor_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty target; expr_ty iter; asdl_seq* body; asdl_seq* orelse; if (_PyObject_LookupAttrId(obj, &PyId_target, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"target\" missing from AsyncFor"); return 1; } else { int res; res = obj2ast_expr(tmp, &target, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_iter, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"iter\" missing from AsyncFor"); return 1; } else { int res; res = obj2ast_expr(tmp, &iter, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from AsyncFor"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "AsyncFor field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Py_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "AsyncFor field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_orelse, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"orelse\" missing from AsyncFor"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "AsyncFor field \"orelse\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); orelse = _Py_asdl_seq_new(len, arena); if (orelse == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "AsyncFor field \"orelse\" changed size during iteration"); goto failed; } asdl_seq_SET(orelse, i, val); } Py_CLEAR(tmp); } *out = AsyncFor(target, iter, body, orelse, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)While_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty test; asdl_seq* body; asdl_seq* orelse; if (_PyObject_LookupAttrId(obj, &PyId_test, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"test\" missing from While"); return 1; } else { int res; res = obj2ast_expr(tmp, &test, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from While"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "While field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Py_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "While field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_orelse, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"orelse\" missing from While"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "While field \"orelse\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); orelse = _Py_asdl_seq_new(len, arena); if (orelse == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "While field \"orelse\" changed size during iteration"); goto failed; } asdl_seq_SET(orelse, i, val); } Py_CLEAR(tmp); } *out = While(test, body, orelse, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)If_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty test; asdl_seq* body; asdl_seq* orelse; if (_PyObject_LookupAttrId(obj, &PyId_test, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"test\" missing from If"); return 1; } else { int res; res = obj2ast_expr(tmp, &test, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from If"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "If field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Py_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "If field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_orelse, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"orelse\" missing from If"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "If field \"orelse\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); orelse = _Py_asdl_seq_new(len, arena); if (orelse == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "If field \"orelse\" changed size during iteration"); goto failed; } asdl_seq_SET(orelse, i, val); } Py_CLEAR(tmp); } *out = If(test, body, orelse, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)With_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* items; asdl_seq* body; if (_PyObject_LookupAttrId(obj, &PyId_items, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"items\" missing from With"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "With field \"items\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); items = _Py_asdl_seq_new(len, arena); if (items == NULL) goto failed; for (i = 0; i < len; i++) { withitem_ty val; res = obj2ast_withitem(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "With field \"items\" changed size during iteration"); goto failed; } asdl_seq_SET(items, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from With"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "With field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Py_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "With field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } *out = With(items, body, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)AsyncWith_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* items; asdl_seq* body; if (_PyObject_LookupAttrId(obj, &PyId_items, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"items\" missing from AsyncWith"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "AsyncWith field \"items\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); items = _Py_asdl_seq_new(len, arena); if (items == NULL) goto failed; for (i = 0; i < len; i++) { withitem_ty val; res = obj2ast_withitem(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "AsyncWith field \"items\" changed size during iteration"); goto failed; } asdl_seq_SET(items, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from AsyncWith"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "AsyncWith field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Py_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "AsyncWith field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } *out = AsyncWith(items, body, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Raise_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty exc; expr_ty cause; if (_PyObject_LookupAttrId(obj, &PyId_exc, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); exc = NULL; } else { int res; res = obj2ast_expr(tmp, &exc, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_cause, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); cause = NULL; } else { int res; res = obj2ast_expr(tmp, &cause, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Raise(exc, cause, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Try_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* body; asdl_seq* handlers; asdl_seq* orelse; asdl_seq* finalbody; if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from Try"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Try field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Py_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Try field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_handlers, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"handlers\" missing from Try"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Try field \"handlers\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); handlers = _Py_asdl_seq_new(len, arena); if (handlers == NULL) goto failed; for (i = 0; i < len; i++) { excepthandler_ty val; res = obj2ast_excepthandler(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Try field \"handlers\" changed size during iteration"); goto failed; } asdl_seq_SET(handlers, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_orelse, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"orelse\" missing from Try"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Try field \"orelse\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); orelse = _Py_asdl_seq_new(len, arena); if (orelse == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Try field \"orelse\" changed size during iteration"); goto failed; } asdl_seq_SET(orelse, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_finalbody, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"finalbody\" missing from Try"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Try field \"finalbody\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); finalbody = _Py_asdl_seq_new(len, arena); if (finalbody == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Try field \"finalbody\" changed size during iteration"); goto failed; } asdl_seq_SET(finalbody, i, val); } Py_CLEAR(tmp); } *out = Try(body, handlers, orelse, finalbody, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Assert_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty test; expr_ty msg; if (_PyObject_LookupAttrId(obj, &PyId_test, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"test\" missing from Assert"); return 1; } else { int res; res = obj2ast_expr(tmp, &test, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_msg, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); msg = NULL; } else { int res; res = obj2ast_expr(tmp, &msg, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Assert(test, msg, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Import_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* names; if (_PyObject_LookupAttrId(obj, &PyId_names, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"names\" missing from Import"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Import field \"names\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); names = _Py_asdl_seq_new(len, arena); if (names == NULL) goto failed; for (i = 0; i < len; i++) { alias_ty val; res = obj2ast_alias(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Import field \"names\" changed size during iteration"); goto failed; } asdl_seq_SET(names, i, val); } Py_CLEAR(tmp); } *out = Import(names, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)ImportFrom_type); if (isinstance == -1) { return 1; } if (isinstance) { identifier module; asdl_seq* names; int level; if (_PyObject_LookupAttrId(obj, &PyId_module, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); module = NULL; } else { int res; res = obj2ast_identifier(tmp, &module, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_names, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"names\" missing from ImportFrom"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ImportFrom field \"names\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); names = _Py_asdl_seq_new(len, arena); if (names == NULL) goto failed; for (i = 0; i < len; i++) { alias_ty val; res = obj2ast_alias(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ImportFrom field \"names\" changed size during iteration"); goto failed; } asdl_seq_SET(names, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_level, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); level = 0; } else { int res; res = obj2ast_int(tmp, &level, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = ImportFrom(module, names, level, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Global_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* names; if (_PyObject_LookupAttrId(obj, &PyId_names, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"names\" missing from Global"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Global field \"names\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); names = _Py_asdl_seq_new(len, arena); if (names == NULL) goto failed; for (i = 0; i < len; i++) { identifier val; res = obj2ast_identifier(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Global field \"names\" changed size during iteration"); goto failed; } asdl_seq_SET(names, i, val); } Py_CLEAR(tmp); } *out = Global(names, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Nonlocal_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* names; if (_PyObject_LookupAttrId(obj, &PyId_names, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"names\" missing from Nonlocal"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Nonlocal field \"names\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); names = _Py_asdl_seq_new(len, arena); if (names == NULL) goto failed; for (i = 0; i < len; i++) { identifier val; res = obj2ast_identifier(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Nonlocal field \"names\" changed size during iteration"); goto failed; } asdl_seq_SET(names, i, val); } Py_CLEAR(tmp); } *out = Nonlocal(names, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Expr_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Expr"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Expr(value, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Pass_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Pass(lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Break_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Break(lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Continue_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Continue(lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of stmt, but got %R", obj); failed: Py_XDECREF(tmp); return 1; } int obj2ast_expr(PyObject* obj, expr_ty* out, PyArena* arena) { int isinstance; PyObject *tmp = NULL; int lineno; int col_offset; int end_lineno; int end_col_offset; if (obj == Py_None) { *out = NULL; return 0; } if (_PyObject_LookupAttrId(obj, &PyId_lineno, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"lineno\" missing from expr"); return 1; } else { int res; res = obj2ast_int(tmp, &lineno, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_col_offset, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"col_offset\" missing from expr"); return 1; } else { int res; res = obj2ast_int(tmp, &col_offset, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_end_lineno, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); end_lineno = 0; } else { int res; res = obj2ast_int(tmp, &end_lineno, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_end_col_offset, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); end_col_offset = 0; } else { int res; res = obj2ast_int(tmp, &end_col_offset, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } isinstance = PyObject_IsInstance(obj, (PyObject*)BoolOp_type); if (isinstance == -1) { return 1; } if (isinstance) { boolop_ty op; asdl_seq* values; if (_PyObject_LookupAttrId(obj, &PyId_op, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"op\" missing from BoolOp"); return 1; } else { int res; res = obj2ast_boolop(tmp, &op, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_values, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"values\" missing from BoolOp"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "BoolOp field \"values\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); values = _Py_asdl_seq_new(len, arena); if (values == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "BoolOp field \"values\" changed size during iteration"); goto failed; } asdl_seq_SET(values, i, val); } Py_CLEAR(tmp); } *out = BoolOp(op, values, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)NamedExpr_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty target; expr_ty value; if (_PyObject_LookupAttrId(obj, &PyId_target, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"target\" missing from NamedExpr"); return 1; } else { int res; res = obj2ast_expr(tmp, &target, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from NamedExpr"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = NamedExpr(target, value, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)BinOp_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty left; operator_ty op; expr_ty right; if (_PyObject_LookupAttrId(obj, &PyId_left, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"left\" missing from BinOp"); return 1; } else { int res; res = obj2ast_expr(tmp, &left, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_op, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"op\" missing from BinOp"); return 1; } else { int res; res = obj2ast_operator(tmp, &op, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_right, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"right\" missing from BinOp"); return 1; } else { int res; res = obj2ast_expr(tmp, &right, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = BinOp(left, op, right, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)UnaryOp_type); if (isinstance == -1) { return 1; } if (isinstance) { unaryop_ty op; expr_ty operand; if (_PyObject_LookupAttrId(obj, &PyId_op, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"op\" missing from UnaryOp"); return 1; } else { int res; res = obj2ast_unaryop(tmp, &op, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_operand, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"operand\" missing from UnaryOp"); return 1; } else { int res; res = obj2ast_expr(tmp, &operand, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = UnaryOp(op, operand, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Lambda_type); if (isinstance == -1) { return 1; } if (isinstance) { arguments_ty args; expr_ty body; if (_PyObject_LookupAttrId(obj, &PyId_args, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"args\" missing from Lambda"); return 1; } else { int res; res = obj2ast_arguments(tmp, &args, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from Lambda"); return 1; } else { int res; res = obj2ast_expr(tmp, &body, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Lambda(args, body, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)IfExp_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty test; expr_ty body; expr_ty orelse; if (_PyObject_LookupAttrId(obj, &PyId_test, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"test\" missing from IfExp"); return 1; } else { int res; res = obj2ast_expr(tmp, &test, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from IfExp"); return 1; } else { int res; res = obj2ast_expr(tmp, &body, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_orelse, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"orelse\" missing from IfExp"); return 1; } else { int res; res = obj2ast_expr(tmp, &orelse, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = IfExp(test, body, orelse, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Dict_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* keys; asdl_seq* values; if (_PyObject_LookupAttrId(obj, &PyId_keys, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"keys\" missing from Dict"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Dict field \"keys\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); keys = _Py_asdl_seq_new(len, arena); if (keys == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Dict field \"keys\" changed size during iteration"); goto failed; } asdl_seq_SET(keys, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_values, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"values\" missing from Dict"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Dict field \"values\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); values = _Py_asdl_seq_new(len, arena); if (values == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Dict field \"values\" changed size during iteration"); goto failed; } asdl_seq_SET(values, i, val); } Py_CLEAR(tmp); } *out = Dict(keys, values, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Set_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* elts; if (_PyObject_LookupAttrId(obj, &PyId_elts, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"elts\" missing from Set"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Set field \"elts\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); elts = _Py_asdl_seq_new(len, arena); if (elts == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Set field \"elts\" changed size during iteration"); goto failed; } asdl_seq_SET(elts, i, val); } Py_CLEAR(tmp); } *out = Set(elts, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)ListComp_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty elt; asdl_seq* generators; if (_PyObject_LookupAttrId(obj, &PyId_elt, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"elt\" missing from ListComp"); return 1; } else { int res; res = obj2ast_expr(tmp, &elt, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_generators, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"generators\" missing from ListComp"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ListComp field \"generators\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); generators = _Py_asdl_seq_new(len, arena); if (generators == NULL) goto failed; for (i = 0; i < len; i++) { comprehension_ty val; res = obj2ast_comprehension(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ListComp field \"generators\" changed size during iteration"); goto failed; } asdl_seq_SET(generators, i, val); } Py_CLEAR(tmp); } *out = ListComp(elt, generators, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)SetComp_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty elt; asdl_seq* generators; if (_PyObject_LookupAttrId(obj, &PyId_elt, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"elt\" missing from SetComp"); return 1; } else { int res; res = obj2ast_expr(tmp, &elt, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_generators, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"generators\" missing from SetComp"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "SetComp field \"generators\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); generators = _Py_asdl_seq_new(len, arena); if (generators == NULL) goto failed; for (i = 0; i < len; i++) { comprehension_ty val; res = obj2ast_comprehension(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "SetComp field \"generators\" changed size during iteration"); goto failed; } asdl_seq_SET(generators, i, val); } Py_CLEAR(tmp); } *out = SetComp(elt, generators, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)DictComp_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty key; expr_ty value; asdl_seq* generators; if (_PyObject_LookupAttrId(obj, &PyId_key, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"key\" missing from DictComp"); return 1; } else { int res; res = obj2ast_expr(tmp, &key, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from DictComp"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_generators, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"generators\" missing from DictComp"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "DictComp field \"generators\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); generators = _Py_asdl_seq_new(len, arena); if (generators == NULL) goto failed; for (i = 0; i < len; i++) { comprehension_ty val; res = obj2ast_comprehension(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "DictComp field \"generators\" changed size during iteration"); goto failed; } asdl_seq_SET(generators, i, val); } Py_CLEAR(tmp); } *out = DictComp(key, value, generators, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)GeneratorExp_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty elt; asdl_seq* generators; if (_PyObject_LookupAttrId(obj, &PyId_elt, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"elt\" missing from GeneratorExp"); return 1; } else { int res; res = obj2ast_expr(tmp, &elt, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_generators, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"generators\" missing from GeneratorExp"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "GeneratorExp field \"generators\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); generators = _Py_asdl_seq_new(len, arena); if (generators == NULL) goto failed; for (i = 0; i < len; i++) { comprehension_ty val; res = obj2ast_comprehension(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "GeneratorExp field \"generators\" changed size during iteration"); goto failed; } asdl_seq_SET(generators, i, val); } Py_CLEAR(tmp); } *out = GeneratorExp(elt, generators, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Await_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Await"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Await(value, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Yield_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); value = NULL; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Yield(value, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)YieldFrom_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from YieldFrom"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = YieldFrom(value, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Compare_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty left; asdl_int_seq* ops; asdl_seq* comparators; if (_PyObject_LookupAttrId(obj, &PyId_left, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"left\" missing from Compare"); return 1; } else { int res; res = obj2ast_expr(tmp, &left, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_ops, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"ops\" missing from Compare"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Compare field \"ops\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); ops = _Py_asdl_int_seq_new(len, arena); if (ops == NULL) goto failed; for (i = 0; i < len; i++) { cmpop_ty val; res = obj2ast_cmpop(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Compare field \"ops\" changed size during iteration"); goto failed; } asdl_seq_SET(ops, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_comparators, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"comparators\" missing from Compare"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Compare field \"comparators\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); comparators = _Py_asdl_seq_new(len, arena); if (comparators == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Compare field \"comparators\" changed size during iteration"); goto failed; } asdl_seq_SET(comparators, i, val); } Py_CLEAR(tmp); } *out = Compare(left, ops, comparators, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Call_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty func; asdl_seq* args; asdl_seq* keywords; if (_PyObject_LookupAttrId(obj, &PyId_func, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"func\" missing from Call"); return 1; } else { int res; res = obj2ast_expr(tmp, &func, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_args, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"args\" missing from Call"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Call field \"args\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); args = _Py_asdl_seq_new(len, arena); if (args == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Call field \"args\" changed size during iteration"); goto failed; } asdl_seq_SET(args, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_keywords, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"keywords\" missing from Call"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Call field \"keywords\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); keywords = _Py_asdl_seq_new(len, arena); if (keywords == NULL) goto failed; for (i = 0; i < len; i++) { keyword_ty val; res = obj2ast_keyword(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Call field \"keywords\" changed size during iteration"); goto failed; } asdl_seq_SET(keywords, i, val); } Py_CLEAR(tmp); } *out = Call(func, args, keywords, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)FormattedValue_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; int conversion; expr_ty format_spec; if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from FormattedValue"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_conversion, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); conversion = 0; } else { int res; res = obj2ast_int(tmp, &conversion, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_format_spec, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); format_spec = NULL; } else { int res; res = obj2ast_expr(tmp, &format_spec, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = FormattedValue(value, conversion, format_spec, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)JoinedStr_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* values; if (_PyObject_LookupAttrId(obj, &PyId_values, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"values\" missing from JoinedStr"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "JoinedStr field \"values\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); values = _Py_asdl_seq_new(len, arena); if (values == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "JoinedStr field \"values\" changed size during iteration"); goto failed; } asdl_seq_SET(values, i, val); } Py_CLEAR(tmp); } *out = JoinedStr(values, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Constant_type); if (isinstance == -1) { return 1; } if (isinstance) { constant value; if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Constant"); return 1; } else { int res; res = obj2ast_constant(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Constant(value, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Attribute_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; identifier attr; expr_context_ty ctx; if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Attribute"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_attr, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"attr\" missing from Attribute"); return 1; } else { int res; res = obj2ast_identifier(tmp, &attr, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_ctx, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"ctx\" missing from Attribute"); return 1; } else { int res; res = obj2ast_expr_context(tmp, &ctx, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Attribute(value, attr, ctx, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Subscript_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; slice_ty slice; expr_context_ty ctx; if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Subscript"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_slice, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"slice\" missing from Subscript"); return 1; } else { int res; res = obj2ast_slice(tmp, &slice, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_ctx, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"ctx\" missing from Subscript"); return 1; } else { int res; res = obj2ast_expr_context(tmp, &ctx, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Subscript(value, slice, ctx, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Starred_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; expr_context_ty ctx; if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Starred"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_ctx, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"ctx\" missing from Starred"); return 1; } else { int res; res = obj2ast_expr_context(tmp, &ctx, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Starred(value, ctx, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Name_type); if (isinstance == -1) { return 1; } if (isinstance) { identifier id; expr_context_ty ctx; if (_PyObject_LookupAttrId(obj, &PyId_id, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"id\" missing from Name"); return 1; } else { int res; res = obj2ast_identifier(tmp, &id, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_ctx, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"ctx\" missing from Name"); return 1; } else { int res; res = obj2ast_expr_context(tmp, &ctx, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Name(id, ctx, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)List_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* elts; expr_context_ty ctx; if (_PyObject_LookupAttrId(obj, &PyId_elts, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"elts\" missing from List"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "List field \"elts\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); elts = _Py_asdl_seq_new(len, arena); if (elts == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "List field \"elts\" changed size during iteration"); goto failed; } asdl_seq_SET(elts, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_ctx, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"ctx\" missing from List"); return 1; } else { int res; res = obj2ast_expr_context(tmp, &ctx, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = List(elts, ctx, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Tuple_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* elts; expr_context_ty ctx; if (_PyObject_LookupAttrId(obj, &PyId_elts, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"elts\" missing from Tuple"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Tuple field \"elts\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); elts = _Py_asdl_seq_new(len, arena); if (elts == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Tuple field \"elts\" changed size during iteration"); goto failed; } asdl_seq_SET(elts, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_ctx, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"ctx\" missing from Tuple"); return 1; } else { int res; res = obj2ast_expr_context(tmp, &ctx, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Tuple(elts, ctx, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of expr, but got %R", obj); failed: Py_XDECREF(tmp); return 1; } int obj2ast_expr_context(PyObject* obj, expr_context_ty* out, PyArena* arena) { int isinstance; isinstance = PyObject_IsInstance(obj, (PyObject *)Load_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Load; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Store_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Store; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Del_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Del; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)AugLoad_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = AugLoad; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)AugStore_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = AugStore; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Param_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Param; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)NamedStore_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = NamedStore; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of expr_context, but got %R", obj); return 1; } int obj2ast_slice(PyObject* obj, slice_ty* out, PyArena* arena) { int isinstance; PyObject *tmp = NULL; if (obj == Py_None) { *out = NULL; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Slice_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty lower; expr_ty upper; expr_ty step; if (_PyObject_LookupAttrId(obj, &PyId_lower, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); lower = NULL; } else { int res; res = obj2ast_expr(tmp, &lower, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_upper, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); upper = NULL; } else { int res; res = obj2ast_expr(tmp, &upper, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_step, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); step = NULL; } else { int res; res = obj2ast_expr(tmp, &step, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Slice(lower, upper, step, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)ExtSlice_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* dims; if (_PyObject_LookupAttrId(obj, &PyId_dims, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"dims\" missing from ExtSlice"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ExtSlice field \"dims\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); dims = _Py_asdl_seq_new(len, arena); if (dims == NULL) goto failed; for (i = 0; i < len; i++) { slice_ty val; res = obj2ast_slice(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ExtSlice field \"dims\" changed size during iteration"); goto failed; } asdl_seq_SET(dims, i, val); } Py_CLEAR(tmp); } *out = ExtSlice(dims, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Index_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Index"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Index(value, arena); if (*out == NULL) goto failed; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of slice, but got %R", obj); failed: Py_XDECREF(tmp); return 1; } int obj2ast_boolop(PyObject* obj, boolop_ty* out, PyArena* arena) { int isinstance; isinstance = PyObject_IsInstance(obj, (PyObject *)And_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = And; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Or_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Or; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of boolop, but got %R", obj); return 1; } int obj2ast_operator(PyObject* obj, operator_ty* out, PyArena* arena) { int isinstance; isinstance = PyObject_IsInstance(obj, (PyObject *)Add_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Add; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Sub_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Sub; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Mult_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Mult; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)MatMult_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = MatMult; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Div_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Div; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Mod_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Mod; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Pow_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Pow; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)LShift_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = LShift; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)RShift_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = RShift; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)BitOr_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = BitOr; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)BitXor_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = BitXor; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)BitAnd_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = BitAnd; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)FloorDiv_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = FloorDiv; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of operator, but got %R", obj); return 1; } int obj2ast_unaryop(PyObject* obj, unaryop_ty* out, PyArena* arena) { int isinstance; isinstance = PyObject_IsInstance(obj, (PyObject *)Invert_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Invert; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Not_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Not; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)UAdd_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = UAdd; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)USub_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = USub; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of unaryop, but got %R", obj); return 1; } int obj2ast_cmpop(PyObject* obj, cmpop_ty* out, PyArena* arena) { int isinstance; isinstance = PyObject_IsInstance(obj, (PyObject *)Eq_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Eq; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)NotEq_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = NotEq; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Lt_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Lt; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)LtE_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = LtE; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Gt_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Gt; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)GtE_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = GtE; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Is_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Is; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)IsNot_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = IsNot; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)In_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = In; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)NotIn_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = NotIn; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of cmpop, but got %R", obj); return 1; } int obj2ast_comprehension(PyObject* obj, comprehension_ty* out, PyArena* arena) { PyObject* tmp = NULL; expr_ty target; expr_ty iter; asdl_seq* ifs; int is_async; if (_PyObject_LookupAttrId(obj, &PyId_target, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"target\" missing from comprehension"); return 1; } else { int res; res = obj2ast_expr(tmp, &target, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_iter, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"iter\" missing from comprehension"); return 1; } else { int res; res = obj2ast_expr(tmp, &iter, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_ifs, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"ifs\" missing from comprehension"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "comprehension field \"ifs\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); ifs = _Py_asdl_seq_new(len, arena); if (ifs == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "comprehension field \"ifs\" changed size during iteration"); goto failed; } asdl_seq_SET(ifs, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_is_async, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"is_async\" missing from comprehension"); return 1; } else { int res; res = obj2ast_int(tmp, &is_async, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = comprehension(target, iter, ifs, is_async, arena); return 0; failed: Py_XDECREF(tmp); return 1; } int obj2ast_excepthandler(PyObject* obj, excepthandler_ty* out, PyArena* arena) { int isinstance; PyObject *tmp = NULL; int lineno; int col_offset; int end_lineno; int end_col_offset; if (obj == Py_None) { *out = NULL; return 0; } if (_PyObject_LookupAttrId(obj, &PyId_lineno, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"lineno\" missing from excepthandler"); return 1; } else { int res; res = obj2ast_int(tmp, &lineno, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_col_offset, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"col_offset\" missing from excepthandler"); return 1; } else { int res; res = obj2ast_int(tmp, &col_offset, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_end_lineno, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); end_lineno = 0; } else { int res; res = obj2ast_int(tmp, &end_lineno, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_end_col_offset, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); end_col_offset = 0; } else { int res; res = obj2ast_int(tmp, &end_col_offset, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } isinstance = PyObject_IsInstance(obj, (PyObject*)ExceptHandler_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty type; identifier name; asdl_seq* body; if (_PyObject_LookupAttrId(obj, &PyId_type, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); type = NULL; } else { int res; res = obj2ast_expr(tmp, &type, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_name, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); name = NULL; } else { int res; res = obj2ast_identifier(tmp, &name, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from ExceptHandler"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ExceptHandler field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Py_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ExceptHandler field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } *out = ExceptHandler(type, name, body, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of excepthandler, but got %R", obj); failed: Py_XDECREF(tmp); return 1; } int obj2ast_arguments(PyObject* obj, arguments_ty* out, PyArena* arena) { PyObject* tmp = NULL; asdl_seq* args; arg_ty vararg; asdl_seq* kwonlyargs; asdl_seq* kw_defaults; arg_ty kwarg; asdl_seq* defaults; if (_PyObject_LookupAttrId(obj, &PyId_args, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"args\" missing from arguments"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "arguments field \"args\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); args = _Py_asdl_seq_new(len, arena); if (args == NULL) goto failed; for (i = 0; i < len; i++) { arg_ty val; res = obj2ast_arg(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "arguments field \"args\" changed size during iteration"); goto failed; } asdl_seq_SET(args, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_vararg, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); vararg = NULL; } else { int res; res = obj2ast_arg(tmp, &vararg, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_kwonlyargs, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"kwonlyargs\" missing from arguments"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "arguments field \"kwonlyargs\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); kwonlyargs = _Py_asdl_seq_new(len, arena); if (kwonlyargs == NULL) goto failed; for (i = 0; i < len; i++) { arg_ty val; res = obj2ast_arg(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "arguments field \"kwonlyargs\" changed size during iteration"); goto failed; } asdl_seq_SET(kwonlyargs, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_kw_defaults, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"kw_defaults\" missing from arguments"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "arguments field \"kw_defaults\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); kw_defaults = _Py_asdl_seq_new(len, arena); if (kw_defaults == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "arguments field \"kw_defaults\" changed size during iteration"); goto failed; } asdl_seq_SET(kw_defaults, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_kwarg, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); kwarg = NULL; } else { int res; res = obj2ast_arg(tmp, &kwarg, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_defaults, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"defaults\" missing from arguments"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "arguments field \"defaults\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); defaults = _Py_asdl_seq_new(len, arena); if (defaults == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "arguments field \"defaults\" changed size during iteration"); goto failed; } asdl_seq_SET(defaults, i, val); } Py_CLEAR(tmp); } *out = arguments(args, vararg, kwonlyargs, kw_defaults, kwarg, defaults, arena); return 0; failed: Py_XDECREF(tmp); return 1; } int obj2ast_arg(PyObject* obj, arg_ty* out, PyArena* arena) { PyObject* tmp = NULL; identifier arg; expr_ty annotation; int lineno; int col_offset; int end_lineno; int end_col_offset; if (_PyObject_LookupAttrId(obj, &PyId_arg, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"arg\" missing from arg"); return 1; } else { int res; res = obj2ast_identifier(tmp, &arg, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_annotation, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); annotation = NULL; } else { int res; res = obj2ast_expr(tmp, &annotation, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_lineno, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"lineno\" missing from arg"); return 1; } else { int res; res = obj2ast_int(tmp, &lineno, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_col_offset, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"col_offset\" missing from arg"); return 1; } else { int res; res = obj2ast_int(tmp, &col_offset, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_end_lineno, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); end_lineno = 0; } else { int res; res = obj2ast_int(tmp, &end_lineno, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_end_col_offset, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); end_col_offset = 0; } else { int res; res = obj2ast_int(tmp, &end_col_offset, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = arg(arg, annotation, lineno, col_offset, end_lineno, end_col_offset, arena); return 0; failed: Py_XDECREF(tmp); return 1; } int obj2ast_keyword(PyObject* obj, keyword_ty* out, PyArena* arena) { PyObject* tmp = NULL; identifier arg; expr_ty value; if (_PyObject_LookupAttrId(obj, &PyId_arg, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); arg = NULL; } else { int res; res = obj2ast_identifier(tmp, &arg, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from keyword"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = keyword(arg, value, arena); return 0; failed: Py_XDECREF(tmp); return 1; } int obj2ast_alias(PyObject* obj, alias_ty* out, PyArena* arena) { PyObject* tmp = NULL; identifier name; identifier asname; if (_PyObject_LookupAttrId(obj, &PyId_name, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"name\" missing from alias"); return 1; } else { int res; res = obj2ast_identifier(tmp, &name, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_asname, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); asname = NULL; } else { int res; res = obj2ast_identifier(tmp, &asname, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = alias(name, asname, arena); return 0; failed: Py_XDECREF(tmp); return 1; } int obj2ast_withitem(PyObject* obj, withitem_ty* out, PyArena* arena) { PyObject* tmp = NULL; expr_ty context_expr; expr_ty optional_vars; if (_PyObject_LookupAttrId(obj, &PyId_context_expr, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"context_expr\" missing from withitem"); return 1; } else { int res; res = obj2ast_expr(tmp, &context_expr, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_optional_vars, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); optional_vars = NULL; } else { int res; res = obj2ast_expr(tmp, &optional_vars, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = withitem(context_expr, optional_vars, arena); return 0; failed: Py_XDECREF(tmp); return 1; } static struct PyModuleDef _astmodule = { PyModuleDef_HEAD_INIT, "_ast" }; PyMODINIT_FUNC PyInit__ast(void) { PyObject *m, *d; if (!init_types()) return NULL; m = PyModule_Create(&_astmodule); if (!m) return NULL; d = PyModule_GetDict(m); if (PyDict_SetItemString(d, "AST", (PyObject*)&AST_type) < 0) return NULL; if (PyModule_AddIntMacro(m, PyCF_ONLY_AST) < 0) return NULL; if (PyDict_SetItemString(d, "mod", (PyObject*)mod_type) < 0) return NULL; if (PyDict_SetItemString(d, "Module", (PyObject*)Module_type) < 0) return NULL; if (PyDict_SetItemString(d, "Interactive", (PyObject*)Interactive_type) < 0) return NULL; if (PyDict_SetItemString(d, "Expression", (PyObject*)Expression_type) < 0) return NULL; if (PyDict_SetItemString(d, "Suite", (PyObject*)Suite_type) < 0) return NULL; if (PyDict_SetItemString(d, "stmt", (PyObject*)stmt_type) < 0) return NULL; if (PyDict_SetItemString(d, "FunctionDef", (PyObject*)FunctionDef_type) < 0) return NULL; if (PyDict_SetItemString(d, "AsyncFunctionDef", (PyObject*)AsyncFunctionDef_type) < 0) return NULL; if (PyDict_SetItemString(d, "ClassDef", (PyObject*)ClassDef_type) < 0) return NULL; if (PyDict_SetItemString(d, "Return", (PyObject*)Return_type) < 0) return NULL; if (PyDict_SetItemString(d, "Delete", (PyObject*)Delete_type) < 0) return NULL; if (PyDict_SetItemString(d, "Assign", (PyObject*)Assign_type) < 0) return NULL; if (PyDict_SetItemString(d, "AugAssign", (PyObject*)AugAssign_type) < 0) return NULL; if (PyDict_SetItemString(d, "AnnAssign", (PyObject*)AnnAssign_type) < 0) return NULL; if (PyDict_SetItemString(d, "For", (PyObject*)For_type) < 0) return NULL; if (PyDict_SetItemString(d, "AsyncFor", (PyObject*)AsyncFor_type) < 0) return NULL; if (PyDict_SetItemString(d, "While", (PyObject*)While_type) < 0) return NULL; if (PyDict_SetItemString(d, "If", (PyObject*)If_type) < 0) return NULL; if (PyDict_SetItemString(d, "With", (PyObject*)With_type) < 0) return NULL; if (PyDict_SetItemString(d, "AsyncWith", (PyObject*)AsyncWith_type) < 0) return NULL; if (PyDict_SetItemString(d, "Raise", (PyObject*)Raise_type) < 0) return NULL; if (PyDict_SetItemString(d, "Try", (PyObject*)Try_type) < 0) return NULL; if (PyDict_SetItemString(d, "Assert", (PyObject*)Assert_type) < 0) return NULL; if (PyDict_SetItemString(d, "Import", (PyObject*)Import_type) < 0) return NULL; if (PyDict_SetItemString(d, "ImportFrom", (PyObject*)ImportFrom_type) < 0) return NULL; if (PyDict_SetItemString(d, "Global", (PyObject*)Global_type) < 0) return NULL; if (PyDict_SetItemString(d, "Nonlocal", (PyObject*)Nonlocal_type) < 0) return NULL; if (PyDict_SetItemString(d, "Expr", (PyObject*)Expr_type) < 0) return NULL; if (PyDict_SetItemString(d, "Pass", (PyObject*)Pass_type) < 0) return NULL; if (PyDict_SetItemString(d, "Break", (PyObject*)Break_type) < 0) return NULL; if (PyDict_SetItemString(d, "Continue", (PyObject*)Continue_type) < 0) return NULL; if (PyDict_SetItemString(d, "expr", (PyObject*)expr_type) < 0) return NULL; if (PyDict_SetItemString(d, "BoolOp", (PyObject*)BoolOp_type) < 0) return NULL; if (PyDict_SetItemString(d, "NamedExpr", (PyObject*)NamedExpr_type) < 0) return NULL; if (PyDict_SetItemString(d, "BinOp", (PyObject*)BinOp_type) < 0) return NULL; if (PyDict_SetItemString(d, "UnaryOp", (PyObject*)UnaryOp_type) < 0) return NULL; if (PyDict_SetItemString(d, "Lambda", (PyObject*)Lambda_type) < 0) return NULL; if (PyDict_SetItemString(d, "IfExp", (PyObject*)IfExp_type) < 0) return NULL; if (PyDict_SetItemString(d, "Dict", (PyObject*)Dict_type) < 0) return NULL; if (PyDict_SetItemString(d, "Set", (PyObject*)Set_type) < 0) return NULL; if (PyDict_SetItemString(d, "ListComp", (PyObject*)ListComp_type) < 0) return NULL; if (PyDict_SetItemString(d, "SetComp", (PyObject*)SetComp_type) < 0) return NULL; if (PyDict_SetItemString(d, "DictComp", (PyObject*)DictComp_type) < 0) return NULL; if (PyDict_SetItemString(d, "GeneratorExp", (PyObject*)GeneratorExp_type) < 0) return NULL; if (PyDict_SetItemString(d, "Await", (PyObject*)Await_type) < 0) return NULL; if (PyDict_SetItemString(d, "Yield", (PyObject*)Yield_type) < 0) return NULL; if (PyDict_SetItemString(d, "YieldFrom", (PyObject*)YieldFrom_type) < 0) return NULL; if (PyDict_SetItemString(d, "Compare", (PyObject*)Compare_type) < 0) return NULL; if (PyDict_SetItemString(d, "Call", (PyObject*)Call_type) < 0) return NULL; if (PyDict_SetItemString(d, "FormattedValue", (PyObject*)FormattedValue_type) < 0) return NULL; if (PyDict_SetItemString(d, "JoinedStr", (PyObject*)JoinedStr_type) < 0) return NULL; if (PyDict_SetItemString(d, "Constant", (PyObject*)Constant_type) < 0) return NULL; if (PyDict_SetItemString(d, "Attribute", (PyObject*)Attribute_type) < 0) return NULL; if (PyDict_SetItemString(d, "Subscript", (PyObject*)Subscript_type) < 0) return NULL; if (PyDict_SetItemString(d, "Starred", (PyObject*)Starred_type) < 0) return NULL; if (PyDict_SetItemString(d, "Name", (PyObject*)Name_type) < 0) return NULL; if (PyDict_SetItemString(d, "List", (PyObject*)List_type) < 0) return NULL; if (PyDict_SetItemString(d, "Tuple", (PyObject*)Tuple_type) < 0) return NULL; if (PyDict_SetItemString(d, "expr_context", (PyObject*)expr_context_type) < 0) return NULL; if (PyDict_SetItemString(d, "Load", (PyObject*)Load_type) < 0) return NULL; if (PyDict_SetItemString(d, "Store", (PyObject*)Store_type) < 0) return NULL; if (PyDict_SetItemString(d, "Del", (PyObject*)Del_type) < 0) return NULL; if (PyDict_SetItemString(d, "AugLoad", (PyObject*)AugLoad_type) < 0) return NULL; if (PyDict_SetItemString(d, "AugStore", (PyObject*)AugStore_type) < 0) return NULL; if (PyDict_SetItemString(d, "Param", (PyObject*)Param_type) < 0) return NULL; if (PyDict_SetItemString(d, "NamedStore", (PyObject*)NamedStore_type) < 0) return NULL; if (PyDict_SetItemString(d, "slice", (PyObject*)slice_type) < 0) return NULL; if (PyDict_SetItemString(d, "Slice", (PyObject*)Slice_type) < 0) return NULL; if (PyDict_SetItemString(d, "ExtSlice", (PyObject*)ExtSlice_type) < 0) return NULL; if (PyDict_SetItemString(d, "Index", (PyObject*)Index_type) < 0) return NULL; if (PyDict_SetItemString(d, "boolop", (PyObject*)boolop_type) < 0) return NULL; if (PyDict_SetItemString(d, "And", (PyObject*)And_type) < 0) return NULL; if (PyDict_SetItemString(d, "Or", (PyObject*)Or_type) < 0) return NULL; if (PyDict_SetItemString(d, "operator", (PyObject*)operator_type) < 0) return NULL; if (PyDict_SetItemString(d, "Add", (PyObject*)Add_type) < 0) return NULL; if (PyDict_SetItemString(d, "Sub", (PyObject*)Sub_type) < 0) return NULL; if (PyDict_SetItemString(d, "Mult", (PyObject*)Mult_type) < 0) return NULL; if (PyDict_SetItemString(d, "MatMult", (PyObject*)MatMult_type) < 0) return NULL; if (PyDict_SetItemString(d, "Div", (PyObject*)Div_type) < 0) return NULL; if (PyDict_SetItemString(d, "Mod", (PyObject*)Mod_type) < 0) return NULL; if (PyDict_SetItemString(d, "Pow", (PyObject*)Pow_type) < 0) return NULL; if (PyDict_SetItemString(d, "LShift", (PyObject*)LShift_type) < 0) return NULL; if (PyDict_SetItemString(d, "RShift", (PyObject*)RShift_type) < 0) return NULL; if (PyDict_SetItemString(d, "BitOr", (PyObject*)BitOr_type) < 0) return NULL; if (PyDict_SetItemString(d, "BitXor", (PyObject*)BitXor_type) < 0) return NULL; if (PyDict_SetItemString(d, "BitAnd", (PyObject*)BitAnd_type) < 0) return NULL; if (PyDict_SetItemString(d, "FloorDiv", (PyObject*)FloorDiv_type) < 0) return NULL; if (PyDict_SetItemString(d, "unaryop", (PyObject*)unaryop_type) < 0) return NULL; if (PyDict_SetItemString(d, "Invert", (PyObject*)Invert_type) < 0) return NULL; if (PyDict_SetItemString(d, "Not", (PyObject*)Not_type) < 0) return NULL; if (PyDict_SetItemString(d, "UAdd", (PyObject*)UAdd_type) < 0) return NULL; if (PyDict_SetItemString(d, "USub", (PyObject*)USub_type) < 0) return NULL; if (PyDict_SetItemString(d, "cmpop", (PyObject*)cmpop_type) < 0) return NULL; if (PyDict_SetItemString(d, "Eq", (PyObject*)Eq_type) < 0) return NULL; if (PyDict_SetItemString(d, "NotEq", (PyObject*)NotEq_type) < 0) return NULL; if (PyDict_SetItemString(d, "Lt", (PyObject*)Lt_type) < 0) return NULL; if (PyDict_SetItemString(d, "LtE", (PyObject*)LtE_type) < 0) return NULL; if (PyDict_SetItemString(d, "Gt", (PyObject*)Gt_type) < 0) return NULL; if (PyDict_SetItemString(d, "GtE", (PyObject*)GtE_type) < 0) return NULL; if (PyDict_SetItemString(d, "Is", (PyObject*)Is_type) < 0) return NULL; if (PyDict_SetItemString(d, "IsNot", (PyObject*)IsNot_type) < 0) return NULL; if (PyDict_SetItemString(d, "In", (PyObject*)In_type) < 0) return NULL; if (PyDict_SetItemString(d, "NotIn", (PyObject*)NotIn_type) < 0) return NULL; if (PyDict_SetItemString(d, "comprehension", (PyObject*)comprehension_type) < 0) return NULL; if (PyDict_SetItemString(d, "excepthandler", (PyObject*)excepthandler_type) < 0) return NULL; if (PyDict_SetItemString(d, "ExceptHandler", (PyObject*)ExceptHandler_type) < 0) return NULL; if (PyDict_SetItemString(d, "arguments", (PyObject*)arguments_type) < 0) return NULL; if (PyDict_SetItemString(d, "arg", (PyObject*)arg_type) < 0) return NULL; if (PyDict_SetItemString(d, "keyword", (PyObject*)keyword_type) < 0) return NULL; if (PyDict_SetItemString(d, "alias", (PyObject*)alias_type) < 0) return NULL; if (PyDict_SetItemString(d, "withitem", (PyObject*)withitem_type) < 0) return NULL; return m; } PyObject* PyAST_mod2obj(mod_ty t) { if (!init_types()) return NULL; return ast2obj_mod(t); } /* mode is 0 for "exec", 1 for "eval" and 2 for "single" input */ mod_ty PyAST_obj2mod(PyObject* ast, PyArena* arena, int mode) { mod_ty res; PyObject *req_type[3]; char *req_name[] = {"Module", "Expression", "Interactive"}; int isinstance; req_type[0] = (PyObject*)Module_type; req_type[1] = (PyObject*)Expression_type; req_type[2] = (PyObject*)Interactive_type; assert(0 <= mode && mode <= 2); if (!init_types()) return NULL; isinstance = PyObject_IsInstance(ast, req_type[mode]); if (isinstance == -1) return NULL; if (!isinstance) { PyErr_Format(PyExc_TypeError, "expected %s node, got %.400s", req_name[mode], Py_TYPE(ast)->tp_name); return NULL; } if (obj2ast_mod(ast, &res, arena) != 0) return NULL; else return res; } int PyAST_Check(PyObject* obj) { if (!init_types()) return -1; return PyObject_IsInstance(obj, (PyObject*)&AST_type); }
/* File automatically generated by Parser/asdl_c.py. */ #include <stddef.h> #include "Python.h" #include "Python-ast.h" static PyTypeObject AST_type; static PyTypeObject *mod_type; static PyObject* ast2obj_mod(void*); static PyTypeObject *Module_type; _Py_IDENTIFIER(body); _Py_IDENTIFIER(type_ignores); static char *Module_fields[]={ "body", "type_ignores", }; static PyTypeObject *Interactive_type; static char *Interactive_fields[]={ "body", }; static PyTypeObject *Expression_type; static char *Expression_fields[]={ "body", }; static PyTypeObject *FunctionType_type; _Py_IDENTIFIER(argtypes); _Py_IDENTIFIER(returns); static char *FunctionType_fields[]={ "argtypes", "returns", }; static PyTypeObject *Suite_type; static char *Suite_fields[]={ "body", }; static PyTypeObject *stmt_type; _Py_IDENTIFIER(lineno); _Py_IDENTIFIER(col_offset); _Py_IDENTIFIER(end_lineno); _Py_IDENTIFIER(end_col_offset); static char *stmt_attributes[] = { "lineno", "col_offset", "end_lineno", "end_col_offset", }; static PyObject* ast2obj_stmt(void*); static PyTypeObject *FunctionDef_type; _Py_IDENTIFIER(name); _Py_IDENTIFIER(args); _Py_IDENTIFIER(decorator_list); _Py_IDENTIFIER(type_comment); static char *FunctionDef_fields[]={ "name", "args", "body", "decorator_list", "returns", "type_comment", }; static PyTypeObject *AsyncFunctionDef_type; static char *AsyncFunctionDef_fields[]={ "name", "args", "body", "decorator_list", "returns", "type_comment", }; static PyTypeObject *ClassDef_type; _Py_IDENTIFIER(bases); _Py_IDENTIFIER(keywords); static char *ClassDef_fields[]={ "name", "bases", "keywords", "body", "decorator_list", }; static PyTypeObject *Return_type; _Py_IDENTIFIER(value); static char *Return_fields[]={ "value", }; static PyTypeObject *Delete_type; _Py_IDENTIFIER(targets); static char *Delete_fields[]={ "targets", }; static PyTypeObject *Assign_type; static char *Assign_fields[]={ "targets", "value", "type_comment", }; static PyTypeObject *AugAssign_type; _Py_IDENTIFIER(target); _Py_IDENTIFIER(op); static char *AugAssign_fields[]={ "target", "op", "value", }; static PyTypeObject *AnnAssign_type; _Py_IDENTIFIER(annotation); _Py_IDENTIFIER(simple); static char *AnnAssign_fields[]={ "target", "annotation", "value", "simple", }; static PyTypeObject *For_type; _Py_IDENTIFIER(iter); _Py_IDENTIFIER(orelse); static char *For_fields[]={ "target", "iter", "body", "orelse", "type_comment", }; static PyTypeObject *AsyncFor_type; static char *AsyncFor_fields[]={ "target", "iter", "body", "orelse", "type_comment", }; static PyTypeObject *While_type; _Py_IDENTIFIER(test); static char *While_fields[]={ "test", "body", "orelse", }; static PyTypeObject *If_type; static char *If_fields[]={ "test", "body", "orelse", }; static PyTypeObject *With_type; _Py_IDENTIFIER(items); static char *With_fields[]={ "items", "body", "type_comment", }; static PyTypeObject *AsyncWith_type; static char *AsyncWith_fields[]={ "items", "body", "type_comment", }; static PyTypeObject *Raise_type; _Py_IDENTIFIER(exc); _Py_IDENTIFIER(cause); static char *Raise_fields[]={ "exc", "cause", }; static PyTypeObject *Try_type; _Py_IDENTIFIER(handlers); _Py_IDENTIFIER(finalbody); static char *Try_fields[]={ "body", "handlers", "orelse", "finalbody", }; static PyTypeObject *Assert_type; _Py_IDENTIFIER(msg); static char *Assert_fields[]={ "test", "msg", }; static PyTypeObject *Import_type; _Py_IDENTIFIER(names); static char *Import_fields[]={ "names", }; static PyTypeObject *ImportFrom_type; _Py_IDENTIFIER(module); _Py_IDENTIFIER(level); static char *ImportFrom_fields[]={ "module", "names", "level", }; static PyTypeObject *Global_type; static char *Global_fields[]={ "names", }; static PyTypeObject *Nonlocal_type; static char *Nonlocal_fields[]={ "names", }; static PyTypeObject *Expr_type; static char *Expr_fields[]={ "value", }; static PyTypeObject *Pass_type; static PyTypeObject *Break_type; static PyTypeObject *Continue_type; static PyTypeObject *expr_type; static char *expr_attributes[] = { "lineno", "col_offset", "end_lineno", "end_col_offset", }; static PyObject* ast2obj_expr(void*); static PyTypeObject *BoolOp_type; _Py_IDENTIFIER(values); static char *BoolOp_fields[]={ "op", "values", }; static PyTypeObject *NamedExpr_type; static char *NamedExpr_fields[]={ "target", "value", }; static PyTypeObject *BinOp_type; _Py_IDENTIFIER(left); _Py_IDENTIFIER(right); static char *BinOp_fields[]={ "left", "op", "right", }; static PyTypeObject *UnaryOp_type; _Py_IDENTIFIER(operand); static char *UnaryOp_fields[]={ "op", "operand", }; static PyTypeObject *Lambda_type; static char *Lambda_fields[]={ "args", "body", }; static PyTypeObject *IfExp_type; static char *IfExp_fields[]={ "test", "body", "orelse", }; static PyTypeObject *Dict_type; _Py_IDENTIFIER(keys); static char *Dict_fields[]={ "keys", "values", }; static PyTypeObject *Set_type; _Py_IDENTIFIER(elts); static char *Set_fields[]={ "elts", }; static PyTypeObject *ListComp_type; _Py_IDENTIFIER(elt); _Py_IDENTIFIER(generators); static char *ListComp_fields[]={ "elt", "generators", }; static PyTypeObject *SetComp_type; static char *SetComp_fields[]={ "elt", "generators", }; static PyTypeObject *DictComp_type; _Py_IDENTIFIER(key); static char *DictComp_fields[]={ "key", "value", "generators", }; static PyTypeObject *GeneratorExp_type; static char *GeneratorExp_fields[]={ "elt", "generators", }; static PyTypeObject *Await_type; static char *Await_fields[]={ "value", }; static PyTypeObject *Yield_type; static char *Yield_fields[]={ "value", }; static PyTypeObject *YieldFrom_type; static char *YieldFrom_fields[]={ "value", }; static PyTypeObject *Compare_type; _Py_IDENTIFIER(ops); _Py_IDENTIFIER(comparators); static char *Compare_fields[]={ "left", "ops", "comparators", }; static PyTypeObject *Call_type; _Py_IDENTIFIER(func); static char *Call_fields[]={ "func", "args", "keywords", }; static PyTypeObject *FormattedValue_type; _Py_IDENTIFIER(conversion); _Py_IDENTIFIER(format_spec); static char *FormattedValue_fields[]={ "value", "conversion", "format_spec", }; static PyTypeObject *JoinedStr_type; static char *JoinedStr_fields[]={ "values", }; static PyTypeObject *Constant_type; static char *Constant_fields[]={ "value", }; static PyTypeObject *Attribute_type; _Py_IDENTIFIER(attr); _Py_IDENTIFIER(ctx); static char *Attribute_fields[]={ "value", "attr", "ctx", }; static PyTypeObject *Subscript_type; _Py_IDENTIFIER(slice); static char *Subscript_fields[]={ "value", "slice", "ctx", }; static PyTypeObject *Starred_type; static char *Starred_fields[]={ "value", "ctx", }; static PyTypeObject *Name_type; _Py_IDENTIFIER(id); static char *Name_fields[]={ "id", "ctx", }; static PyTypeObject *List_type; static char *List_fields[]={ "elts", "ctx", }; static PyTypeObject *Tuple_type; static char *Tuple_fields[]={ "elts", "ctx", }; static PyTypeObject *expr_context_type; static PyObject *Load_singleton, *Store_singleton, *Del_singleton, *AugLoad_singleton, *AugStore_singleton, *Param_singleton, *NamedStore_singleton; static PyObject* ast2obj_expr_context(expr_context_ty); static PyTypeObject *Load_type; static PyTypeObject *Store_type; static PyTypeObject *Del_type; static PyTypeObject *AugLoad_type; static PyTypeObject *AugStore_type; static PyTypeObject *Param_type; static PyTypeObject *NamedStore_type; static PyTypeObject *slice_type; static PyObject* ast2obj_slice(void*); static PyTypeObject *Slice_type; _Py_IDENTIFIER(lower); _Py_IDENTIFIER(upper); _Py_IDENTIFIER(step); static char *Slice_fields[]={ "lower", "upper", "step", }; static PyTypeObject *ExtSlice_type; _Py_IDENTIFIER(dims); static char *ExtSlice_fields[]={ "dims", }; static PyTypeObject *Index_type; static char *Index_fields[]={ "value", }; static PyTypeObject *boolop_type; static PyObject *And_singleton, *Or_singleton; static PyObject* ast2obj_boolop(boolop_ty); static PyTypeObject *And_type; static PyTypeObject *Or_type; static PyTypeObject *operator_type; static PyObject *Add_singleton, *Sub_singleton, *Mult_singleton, *MatMult_singleton, *Div_singleton, *Mod_singleton, *Pow_singleton, *LShift_singleton, *RShift_singleton, *BitOr_singleton, *BitXor_singleton, *BitAnd_singleton, *FloorDiv_singleton; static PyObject* ast2obj_operator(operator_ty); static PyTypeObject *Add_type; static PyTypeObject *Sub_type; static PyTypeObject *Mult_type; static PyTypeObject *MatMult_type; static PyTypeObject *Div_type; static PyTypeObject *Mod_type; static PyTypeObject *Pow_type; static PyTypeObject *LShift_type; static PyTypeObject *RShift_type; static PyTypeObject *BitOr_type; static PyTypeObject *BitXor_type; static PyTypeObject *BitAnd_type; static PyTypeObject *FloorDiv_type; static PyTypeObject *unaryop_type; static PyObject *Invert_singleton, *Not_singleton, *UAdd_singleton, *USub_singleton; static PyObject* ast2obj_unaryop(unaryop_ty); static PyTypeObject *Invert_type; static PyTypeObject *Not_type; static PyTypeObject *UAdd_type; static PyTypeObject *USub_type; static PyTypeObject *cmpop_type; static PyObject *Eq_singleton, *NotEq_singleton, *Lt_singleton, *LtE_singleton, *Gt_singleton, *GtE_singleton, *Is_singleton, *IsNot_singleton, *In_singleton, *NotIn_singleton; static PyObject* ast2obj_cmpop(cmpop_ty); static PyTypeObject *Eq_type; static PyTypeObject *NotEq_type; static PyTypeObject *Lt_type; static PyTypeObject *LtE_type; static PyTypeObject *Gt_type; static PyTypeObject *GtE_type; static PyTypeObject *Is_type; static PyTypeObject *IsNot_type; static PyTypeObject *In_type; static PyTypeObject *NotIn_type; static PyTypeObject *comprehension_type; static PyObject* ast2obj_comprehension(void*); _Py_IDENTIFIER(ifs); _Py_IDENTIFIER(is_async); static char *comprehension_fields[]={ "target", "iter", "ifs", "is_async", }; static PyTypeObject *excepthandler_type; static char *excepthandler_attributes[] = { "lineno", "col_offset", "end_lineno", "end_col_offset", }; static PyObject* ast2obj_excepthandler(void*); static PyTypeObject *ExceptHandler_type; _Py_IDENTIFIER(type); static char *ExceptHandler_fields[]={ "type", "name", "body", }; static PyTypeObject *arguments_type; static PyObject* ast2obj_arguments(void*); _Py_IDENTIFIER(vararg); _Py_IDENTIFIER(kwonlyargs); _Py_IDENTIFIER(kw_defaults); _Py_IDENTIFIER(kwarg); _Py_IDENTIFIER(defaults); static char *arguments_fields[]={ "args", "vararg", "kwonlyargs", "kw_defaults", "kwarg", "defaults", }; static PyTypeObject *arg_type; static PyObject* ast2obj_arg(void*); static char *arg_attributes[] = { "lineno", "col_offset", "end_lineno", "end_col_offset", }; _Py_IDENTIFIER(arg); static char *arg_fields[]={ "arg", "annotation", "type_comment", }; static PyTypeObject *keyword_type; static PyObject* ast2obj_keyword(void*); static char *keyword_fields[]={ "arg", "value", }; static PyTypeObject *alias_type; static PyObject* ast2obj_alias(void*); _Py_IDENTIFIER(asname); static char *alias_fields[]={ "name", "asname", }; static PyTypeObject *withitem_type; static PyObject* ast2obj_withitem(void*); _Py_IDENTIFIER(context_expr); _Py_IDENTIFIER(optional_vars); static char *withitem_fields[]={ "context_expr", "optional_vars", }; static PyTypeObject *type_ignore_type; static PyObject* ast2obj_type_ignore(void*); static PyTypeObject *TypeIgnore_type; static char *TypeIgnore_fields[]={ "lineno", }; _Py_IDENTIFIER(_fields); _Py_IDENTIFIER(_attributes); typedef struct { PyObject_HEAD PyObject *dict; } AST_object; static void ast_dealloc(AST_object *self) { /* bpo-31095: UnTrack is needed before calling any callbacks */ PyObject_GC_UnTrack(self); Py_CLEAR(self->dict); Py_TYPE(self)->tp_free(self); } static int ast_traverse(AST_object *self, visitproc visit, void *arg) { Py_VISIT(self->dict); return 0; } static int ast_clear(AST_object *self) { Py_CLEAR(self->dict); return 0; } static int ast_type_init(PyObject *self, PyObject *args, PyObject *kw) { Py_ssize_t i, numfields = 0; int res = -1; PyObject *key, *value, *fields; if (_PyObject_LookupAttrId((PyObject*)Py_TYPE(self), &PyId__fields, &fields) < 0) { goto cleanup; } if (fields) { numfields = PySequence_Size(fields); if (numfields == -1) goto cleanup; } res = 0; /* if no error occurs, this stays 0 to the end */ if (numfields < PyTuple_GET_SIZE(args)) { PyErr_Format(PyExc_TypeError, "%.400s constructor takes at most " "%zd positional argument%s", Py_TYPE(self)->tp_name, numfields, numfields == 1 ? "" : "s"); res = -1; goto cleanup; } for (i = 0; i < PyTuple_GET_SIZE(args); i++) { /* cannot be reached when fields is NULL */ PyObject *name = PySequence_GetItem(fields, i); if (!name) { res = -1; goto cleanup; } res = PyObject_SetAttr(self, name, PyTuple_GET_ITEM(args, i)); Py_DECREF(name); if (res < 0) goto cleanup; } if (kw) { i = 0; /* needed by PyDict_Next */ while (PyDict_Next(kw, &i, &key, &value)) { res = PyObject_SetAttr(self, key, value); if (res < 0) goto cleanup; } } cleanup: Py_XDECREF(fields); return res; } /* Pickling support */ static PyObject * ast_type_reduce(PyObject *self, PyObject *unused) { _Py_IDENTIFIER(__dict__); PyObject *dict; if (_PyObject_LookupAttrId(self, &PyId___dict__, &dict) < 0) { return NULL; } if (dict) { return Py_BuildValue("O()N", Py_TYPE(self), dict); } return Py_BuildValue("O()", Py_TYPE(self)); } static PyMethodDef ast_type_methods[] = { {"__reduce__", ast_type_reduce, METH_NOARGS, NULL}, {NULL} }; static PyGetSetDef ast_type_getsets[] = { {"__dict__", PyObject_GenericGetDict, PyObject_GenericSetDict}, {NULL} }; static PyTypeObject AST_type = { PyVarObject_HEAD_INIT(&PyType_Type, 0) "_ast.AST", sizeof(AST_object), 0, (destructor)ast_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_reserved */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ PyObject_GenericGetAttr, /* tp_getattro */ PyObject_GenericSetAttr, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC, /* tp_flags */ 0, /* tp_doc */ (traverseproc)ast_traverse, /* tp_traverse */ (inquiry)ast_clear, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ ast_type_methods, /* tp_methods */ 0, /* tp_members */ ast_type_getsets, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ offsetof(AST_object, dict),/* tp_dictoffset */ (initproc)ast_type_init, /* tp_init */ PyType_GenericAlloc, /* tp_alloc */ PyType_GenericNew, /* tp_new */ PyObject_GC_Del, /* tp_free */ }; static PyTypeObject* make_type(char *type, PyTypeObject* base, char**fields, int num_fields) { _Py_IDENTIFIER(__module__); _Py_IDENTIFIER(_ast); PyObject *fnames, *result; int i; fnames = PyTuple_New(num_fields); if (!fnames) return NULL; for (i = 0; i < num_fields; i++) { PyObject *field = PyUnicode_FromString(fields[i]); if (!field) { Py_DECREF(fnames); return NULL; } PyTuple_SET_ITEM(fnames, i, field); } result = PyObject_CallFunction((PyObject*)&PyType_Type, "s(O){OOOO}", type, base, _PyUnicode_FromId(&PyId__fields), fnames, _PyUnicode_FromId(&PyId___module__), _PyUnicode_FromId(&PyId__ast)); Py_DECREF(fnames); return (PyTypeObject*)result; } static int add_attributes(PyTypeObject* type, char**attrs, int num_fields) { int i, result; PyObject *s, *l = PyTuple_New(num_fields); if (!l) return 0; for (i = 0; i < num_fields; i++) { s = PyUnicode_FromString(attrs[i]); if (!s) { Py_DECREF(l); return 0; } PyTuple_SET_ITEM(l, i, s); } result = _PyObject_SetAttrId((PyObject*)type, &PyId__attributes, l) >= 0; Py_DECREF(l); return result; } /* Conversion AST -> Python */ static PyObject* ast2obj_list(asdl_seq *seq, PyObject* (*func)(void*)) { Py_ssize_t i, n = asdl_seq_LEN(seq); PyObject *result = PyList_New(n); PyObject *value; if (!result) return NULL; for (i = 0; i < n; i++) { value = func(asdl_seq_GET(seq, i)); if (!value) { Py_DECREF(result); return NULL; } PyList_SET_ITEM(result, i, value); } return result; } static PyObject* ast2obj_object(void *o) { if (!o) o = Py_None; Py_INCREF((PyObject*)o); return (PyObject*)o; } #define ast2obj_singleton ast2obj_object #define ast2obj_constant ast2obj_object #define ast2obj_identifier ast2obj_object #define ast2obj_string ast2obj_object #define ast2obj_bytes ast2obj_object static PyObject* ast2obj_int(long b) { return PyLong_FromLong(b); } /* Conversion Python -> AST */ static int obj2ast_object(PyObject* obj, PyObject** out, PyArena* arena) { if (obj == Py_None) obj = NULL; if (obj) { if (PyArena_AddPyObject(arena, obj) < 0) { *out = NULL; return -1; } Py_INCREF(obj); } *out = obj; return 0; } static int obj2ast_constant(PyObject* obj, PyObject** out, PyArena* arena) { if (PyArena_AddPyObject(arena, obj) < 0) { *out = NULL; return -1; } Py_INCREF(obj); *out = obj; return 0; } static int obj2ast_identifier(PyObject* obj, PyObject** out, PyArena* arena) { if (!PyUnicode_CheckExact(obj) && obj != Py_None) { PyErr_SetString(PyExc_TypeError, "AST identifier must be of type str"); return 1; } return obj2ast_object(obj, out, arena); } static int obj2ast_string(PyObject* obj, PyObject** out, PyArena* arena) { if (!PyUnicode_CheckExact(obj) && !PyBytes_CheckExact(obj)) { PyErr_SetString(PyExc_TypeError, "AST string must be of type str"); return 1; } return obj2ast_object(obj, out, arena); } static int obj2ast_int(PyObject* obj, int* out, PyArena* arena) { int i; if (!PyLong_Check(obj)) { PyErr_Format(PyExc_ValueError, "invalid integer value: %R", obj); return 1; } i = _PyLong_AsInt(obj); if (i == -1 && PyErr_Occurred()) return 1; *out = i; return 0; } static int add_ast_fields(void) { PyObject *empty_tuple, *d; if (PyType_Ready(&AST_type) < 0) return -1; d = AST_type.tp_dict; empty_tuple = PyTuple_New(0); if (!empty_tuple || _PyDict_SetItemId(d, &PyId__fields, empty_tuple) < 0 || _PyDict_SetItemId(d, &PyId__attributes, empty_tuple) < 0) { Py_XDECREF(empty_tuple); return -1; } Py_DECREF(empty_tuple); return 0; } static int init_types(void) { static int initialized; if (initialized) return 1; if (add_ast_fields() < 0) return 0; mod_type = make_type("mod", &AST_type, NULL, 0); if (!mod_type) return 0; if (!add_attributes(mod_type, NULL, 0)) return 0; Module_type = make_type("Module", mod_type, Module_fields, 2); if (!Module_type) return 0; Interactive_type = make_type("Interactive", mod_type, Interactive_fields, 1); if (!Interactive_type) return 0; Expression_type = make_type("Expression", mod_type, Expression_fields, 1); if (!Expression_type) return 0; FunctionType_type = make_type("FunctionType", mod_type, FunctionType_fields, 2); if (!FunctionType_type) return 0; Suite_type = make_type("Suite", mod_type, Suite_fields, 1); if (!Suite_type) return 0; stmt_type = make_type("stmt", &AST_type, NULL, 0); if (!stmt_type) return 0; if (!add_attributes(stmt_type, stmt_attributes, 4)) return 0; FunctionDef_type = make_type("FunctionDef", stmt_type, FunctionDef_fields, 6); if (!FunctionDef_type) return 0; AsyncFunctionDef_type = make_type("AsyncFunctionDef", stmt_type, AsyncFunctionDef_fields, 6); if (!AsyncFunctionDef_type) return 0; ClassDef_type = make_type("ClassDef", stmt_type, ClassDef_fields, 5); if (!ClassDef_type) return 0; Return_type = make_type("Return", stmt_type, Return_fields, 1); if (!Return_type) return 0; Delete_type = make_type("Delete", stmt_type, Delete_fields, 1); if (!Delete_type) return 0; Assign_type = make_type("Assign", stmt_type, Assign_fields, 3); if (!Assign_type) return 0; AugAssign_type = make_type("AugAssign", stmt_type, AugAssign_fields, 3); if (!AugAssign_type) return 0; AnnAssign_type = make_type("AnnAssign", stmt_type, AnnAssign_fields, 4); if (!AnnAssign_type) return 0; For_type = make_type("For", stmt_type, For_fields, 5); if (!For_type) return 0; AsyncFor_type = make_type("AsyncFor", stmt_type, AsyncFor_fields, 5); if (!AsyncFor_type) return 0; While_type = make_type("While", stmt_type, While_fields, 3); if (!While_type) return 0; If_type = make_type("If", stmt_type, If_fields, 3); if (!If_type) return 0; With_type = make_type("With", stmt_type, With_fields, 3); if (!With_type) return 0; AsyncWith_type = make_type("AsyncWith", stmt_type, AsyncWith_fields, 3); if (!AsyncWith_type) return 0; Raise_type = make_type("Raise", stmt_type, Raise_fields, 2); if (!Raise_type) return 0; Try_type = make_type("Try", stmt_type, Try_fields, 4); if (!Try_type) return 0; Assert_type = make_type("Assert", stmt_type, Assert_fields, 2); if (!Assert_type) return 0; Import_type = make_type("Import", stmt_type, Import_fields, 1); if (!Import_type) return 0; ImportFrom_type = make_type("ImportFrom", stmt_type, ImportFrom_fields, 3); if (!ImportFrom_type) return 0; Global_type = make_type("Global", stmt_type, Global_fields, 1); if (!Global_type) return 0; Nonlocal_type = make_type("Nonlocal", stmt_type, Nonlocal_fields, 1); if (!Nonlocal_type) return 0; Expr_type = make_type("Expr", stmt_type, Expr_fields, 1); if (!Expr_type) return 0; Pass_type = make_type("Pass", stmt_type, NULL, 0); if (!Pass_type) return 0; Break_type = make_type("Break", stmt_type, NULL, 0); if (!Break_type) return 0; Continue_type = make_type("Continue", stmt_type, NULL, 0); if (!Continue_type) return 0; expr_type = make_type("expr", &AST_type, NULL, 0); if (!expr_type) return 0; if (!add_attributes(expr_type, expr_attributes, 4)) return 0; BoolOp_type = make_type("BoolOp", expr_type, BoolOp_fields, 2); if (!BoolOp_type) return 0; NamedExpr_type = make_type("NamedExpr", expr_type, NamedExpr_fields, 2); if (!NamedExpr_type) return 0; BinOp_type = make_type("BinOp", expr_type, BinOp_fields, 3); if (!BinOp_type) return 0; UnaryOp_type = make_type("UnaryOp", expr_type, UnaryOp_fields, 2); if (!UnaryOp_type) return 0; Lambda_type = make_type("Lambda", expr_type, Lambda_fields, 2); if (!Lambda_type) return 0; IfExp_type = make_type("IfExp", expr_type, IfExp_fields, 3); if (!IfExp_type) return 0; Dict_type = make_type("Dict", expr_type, Dict_fields, 2); if (!Dict_type) return 0; Set_type = make_type("Set", expr_type, Set_fields, 1); if (!Set_type) return 0; ListComp_type = make_type("ListComp", expr_type, ListComp_fields, 2); if (!ListComp_type) return 0; SetComp_type = make_type("SetComp", expr_type, SetComp_fields, 2); if (!SetComp_type) return 0; DictComp_type = make_type("DictComp", expr_type, DictComp_fields, 3); if (!DictComp_type) return 0; GeneratorExp_type = make_type("GeneratorExp", expr_type, GeneratorExp_fields, 2); if (!GeneratorExp_type) return 0; Await_type = make_type("Await", expr_type, Await_fields, 1); if (!Await_type) return 0; Yield_type = make_type("Yield", expr_type, Yield_fields, 1); if (!Yield_type) return 0; YieldFrom_type = make_type("YieldFrom", expr_type, YieldFrom_fields, 1); if (!YieldFrom_type) return 0; Compare_type = make_type("Compare", expr_type, Compare_fields, 3); if (!Compare_type) return 0; Call_type = make_type("Call", expr_type, Call_fields, 3); if (!Call_type) return 0; FormattedValue_type = make_type("FormattedValue", expr_type, FormattedValue_fields, 3); if (!FormattedValue_type) return 0; JoinedStr_type = make_type("JoinedStr", expr_type, JoinedStr_fields, 1); if (!JoinedStr_type) return 0; Constant_type = make_type("Constant", expr_type, Constant_fields, 1); if (!Constant_type) return 0; Attribute_type = make_type("Attribute", expr_type, Attribute_fields, 3); if (!Attribute_type) return 0; Subscript_type = make_type("Subscript", expr_type, Subscript_fields, 3); if (!Subscript_type) return 0; Starred_type = make_type("Starred", expr_type, Starred_fields, 2); if (!Starred_type) return 0; Name_type = make_type("Name", expr_type, Name_fields, 2); if (!Name_type) return 0; List_type = make_type("List", expr_type, List_fields, 2); if (!List_type) return 0; Tuple_type = make_type("Tuple", expr_type, Tuple_fields, 2); if (!Tuple_type) return 0; expr_context_type = make_type("expr_context", &AST_type, NULL, 0); if (!expr_context_type) return 0; if (!add_attributes(expr_context_type, NULL, 0)) return 0; Load_type = make_type("Load", expr_context_type, NULL, 0); if (!Load_type) return 0; Load_singleton = PyType_GenericNew(Load_type, NULL, NULL); if (!Load_singleton) return 0; Store_type = make_type("Store", expr_context_type, NULL, 0); if (!Store_type) return 0; Store_singleton = PyType_GenericNew(Store_type, NULL, NULL); if (!Store_singleton) return 0; Del_type = make_type("Del", expr_context_type, NULL, 0); if (!Del_type) return 0; Del_singleton = PyType_GenericNew(Del_type, NULL, NULL); if (!Del_singleton) return 0; AugLoad_type = make_type("AugLoad", expr_context_type, NULL, 0); if (!AugLoad_type) return 0; AugLoad_singleton = PyType_GenericNew(AugLoad_type, NULL, NULL); if (!AugLoad_singleton) return 0; AugStore_type = make_type("AugStore", expr_context_type, NULL, 0); if (!AugStore_type) return 0; AugStore_singleton = PyType_GenericNew(AugStore_type, NULL, NULL); if (!AugStore_singleton) return 0; Param_type = make_type("Param", expr_context_type, NULL, 0); if (!Param_type) return 0; Param_singleton = PyType_GenericNew(Param_type, NULL, NULL); if (!Param_singleton) return 0; NamedStore_type = make_type("NamedStore", expr_context_type, NULL, 0); if (!NamedStore_type) return 0; NamedStore_singleton = PyType_GenericNew(NamedStore_type, NULL, NULL); if (!NamedStore_singleton) return 0; slice_type = make_type("slice", &AST_type, NULL, 0); if (!slice_type) return 0; if (!add_attributes(slice_type, NULL, 0)) return 0; Slice_type = make_type("Slice", slice_type, Slice_fields, 3); if (!Slice_type) return 0; ExtSlice_type = make_type("ExtSlice", slice_type, ExtSlice_fields, 1); if (!ExtSlice_type) return 0; Index_type = make_type("Index", slice_type, Index_fields, 1); if (!Index_type) return 0; boolop_type = make_type("boolop", &AST_type, NULL, 0); if (!boolop_type) return 0; if (!add_attributes(boolop_type, NULL, 0)) return 0; And_type = make_type("And", boolop_type, NULL, 0); if (!And_type) return 0; And_singleton = PyType_GenericNew(And_type, NULL, NULL); if (!And_singleton) return 0; Or_type = make_type("Or", boolop_type, NULL, 0); if (!Or_type) return 0; Or_singleton = PyType_GenericNew(Or_type, NULL, NULL); if (!Or_singleton) return 0; operator_type = make_type("operator", &AST_type, NULL, 0); if (!operator_type) return 0; if (!add_attributes(operator_type, NULL, 0)) return 0; Add_type = make_type("Add", operator_type, NULL, 0); if (!Add_type) return 0; Add_singleton = PyType_GenericNew(Add_type, NULL, NULL); if (!Add_singleton) return 0; Sub_type = make_type("Sub", operator_type, NULL, 0); if (!Sub_type) return 0; Sub_singleton = PyType_GenericNew(Sub_type, NULL, NULL); if (!Sub_singleton) return 0; Mult_type = make_type("Mult", operator_type, NULL, 0); if (!Mult_type) return 0; Mult_singleton = PyType_GenericNew(Mult_type, NULL, NULL); if (!Mult_singleton) return 0; MatMult_type = make_type("MatMult", operator_type, NULL, 0); if (!MatMult_type) return 0; MatMult_singleton = PyType_GenericNew(MatMult_type, NULL, NULL); if (!MatMult_singleton) return 0; Div_type = make_type("Div", operator_type, NULL, 0); if (!Div_type) return 0; Div_singleton = PyType_GenericNew(Div_type, NULL, NULL); if (!Div_singleton) return 0; Mod_type = make_type("Mod", operator_type, NULL, 0); if (!Mod_type) return 0; Mod_singleton = PyType_GenericNew(Mod_type, NULL, NULL); if (!Mod_singleton) return 0; Pow_type = make_type("Pow", operator_type, NULL, 0); if (!Pow_type) return 0; Pow_singleton = PyType_GenericNew(Pow_type, NULL, NULL); if (!Pow_singleton) return 0; LShift_type = make_type("LShift", operator_type, NULL, 0); if (!LShift_type) return 0; LShift_singleton = PyType_GenericNew(LShift_type, NULL, NULL); if (!LShift_singleton) return 0; RShift_type = make_type("RShift", operator_type, NULL, 0); if (!RShift_type) return 0; RShift_singleton = PyType_GenericNew(RShift_type, NULL, NULL); if (!RShift_singleton) return 0; BitOr_type = make_type("BitOr", operator_type, NULL, 0); if (!BitOr_type) return 0; BitOr_singleton = PyType_GenericNew(BitOr_type, NULL, NULL); if (!BitOr_singleton) return 0; BitXor_type = make_type("BitXor", operator_type, NULL, 0); if (!BitXor_type) return 0; BitXor_singleton = PyType_GenericNew(BitXor_type, NULL, NULL); if (!BitXor_singleton) return 0; BitAnd_type = make_type("BitAnd", operator_type, NULL, 0); if (!BitAnd_type) return 0; BitAnd_singleton = PyType_GenericNew(BitAnd_type, NULL, NULL); if (!BitAnd_singleton) return 0; FloorDiv_type = make_type("FloorDiv", operator_type, NULL, 0); if (!FloorDiv_type) return 0; FloorDiv_singleton = PyType_GenericNew(FloorDiv_type, NULL, NULL); if (!FloorDiv_singleton) return 0; unaryop_type = make_type("unaryop", &AST_type, NULL, 0); if (!unaryop_type) return 0; if (!add_attributes(unaryop_type, NULL, 0)) return 0; Invert_type = make_type("Invert", unaryop_type, NULL, 0); if (!Invert_type) return 0; Invert_singleton = PyType_GenericNew(Invert_type, NULL, NULL); if (!Invert_singleton) return 0; Not_type = make_type("Not", unaryop_type, NULL, 0); if (!Not_type) return 0; Not_singleton = PyType_GenericNew(Not_type, NULL, NULL); if (!Not_singleton) return 0; UAdd_type = make_type("UAdd", unaryop_type, NULL, 0); if (!UAdd_type) return 0; UAdd_singleton = PyType_GenericNew(UAdd_type, NULL, NULL); if (!UAdd_singleton) return 0; USub_type = make_type("USub", unaryop_type, NULL, 0); if (!USub_type) return 0; USub_singleton = PyType_GenericNew(USub_type, NULL, NULL); if (!USub_singleton) return 0; cmpop_type = make_type("cmpop", &AST_type, NULL, 0); if (!cmpop_type) return 0; if (!add_attributes(cmpop_type, NULL, 0)) return 0; Eq_type = make_type("Eq", cmpop_type, NULL, 0); if (!Eq_type) return 0; Eq_singleton = PyType_GenericNew(Eq_type, NULL, NULL); if (!Eq_singleton) return 0; NotEq_type = make_type("NotEq", cmpop_type, NULL, 0); if (!NotEq_type) return 0; NotEq_singleton = PyType_GenericNew(NotEq_type, NULL, NULL); if (!NotEq_singleton) return 0; Lt_type = make_type("Lt", cmpop_type, NULL, 0); if (!Lt_type) return 0; Lt_singleton = PyType_GenericNew(Lt_type, NULL, NULL); if (!Lt_singleton) return 0; LtE_type = make_type("LtE", cmpop_type, NULL, 0); if (!LtE_type) return 0; LtE_singleton = PyType_GenericNew(LtE_type, NULL, NULL); if (!LtE_singleton) return 0; Gt_type = make_type("Gt", cmpop_type, NULL, 0); if (!Gt_type) return 0; Gt_singleton = PyType_GenericNew(Gt_type, NULL, NULL); if (!Gt_singleton) return 0; GtE_type = make_type("GtE", cmpop_type, NULL, 0); if (!GtE_type) return 0; GtE_singleton = PyType_GenericNew(GtE_type, NULL, NULL); if (!GtE_singleton) return 0; Is_type = make_type("Is", cmpop_type, NULL, 0); if (!Is_type) return 0; Is_singleton = PyType_GenericNew(Is_type, NULL, NULL); if (!Is_singleton) return 0; IsNot_type = make_type("IsNot", cmpop_type, NULL, 0); if (!IsNot_type) return 0; IsNot_singleton = PyType_GenericNew(IsNot_type, NULL, NULL); if (!IsNot_singleton) return 0; In_type = make_type("In", cmpop_type, NULL, 0); if (!In_type) return 0; In_singleton = PyType_GenericNew(In_type, NULL, NULL); if (!In_singleton) return 0; NotIn_type = make_type("NotIn", cmpop_type, NULL, 0); if (!NotIn_type) return 0; NotIn_singleton = PyType_GenericNew(NotIn_type, NULL, NULL); if (!NotIn_singleton) return 0; comprehension_type = make_type("comprehension", &AST_type, comprehension_fields, 4); if (!comprehension_type) return 0; if (!add_attributes(comprehension_type, NULL, 0)) return 0; excepthandler_type = make_type("excepthandler", &AST_type, NULL, 0); if (!excepthandler_type) return 0; if (!add_attributes(excepthandler_type, excepthandler_attributes, 4)) return 0; ExceptHandler_type = make_type("ExceptHandler", excepthandler_type, ExceptHandler_fields, 3); if (!ExceptHandler_type) return 0; arguments_type = make_type("arguments", &AST_type, arguments_fields, 6); if (!arguments_type) return 0; if (!add_attributes(arguments_type, NULL, 0)) return 0; arg_type = make_type("arg", &AST_type, arg_fields, 3); if (!arg_type) return 0; if (!add_attributes(arg_type, arg_attributes, 4)) return 0; keyword_type = make_type("keyword", &AST_type, keyword_fields, 2); if (!keyword_type) return 0; if (!add_attributes(keyword_type, NULL, 0)) return 0; alias_type = make_type("alias", &AST_type, alias_fields, 2); if (!alias_type) return 0; if (!add_attributes(alias_type, NULL, 0)) return 0; withitem_type = make_type("withitem", &AST_type, withitem_fields, 2); if (!withitem_type) return 0; if (!add_attributes(withitem_type, NULL, 0)) return 0; type_ignore_type = make_type("type_ignore", &AST_type, NULL, 0); if (!type_ignore_type) return 0; if (!add_attributes(type_ignore_type, NULL, 0)) return 0; TypeIgnore_type = make_type("TypeIgnore", type_ignore_type, TypeIgnore_fields, 1); if (!TypeIgnore_type) return 0; initialized = 1; return 1; } static int obj2ast_mod(PyObject* obj, mod_ty* out, PyArena* arena); static int obj2ast_stmt(PyObject* obj, stmt_ty* out, PyArena* arena); static int obj2ast_expr(PyObject* obj, expr_ty* out, PyArena* arena); static int obj2ast_expr_context(PyObject* obj, expr_context_ty* out, PyArena* arena); static int obj2ast_slice(PyObject* obj, slice_ty* out, PyArena* arena); static int obj2ast_boolop(PyObject* obj, boolop_ty* out, PyArena* arena); static int obj2ast_operator(PyObject* obj, operator_ty* out, PyArena* arena); static int obj2ast_unaryop(PyObject* obj, unaryop_ty* out, PyArena* arena); static int obj2ast_cmpop(PyObject* obj, cmpop_ty* out, PyArena* arena); static int obj2ast_comprehension(PyObject* obj, comprehension_ty* out, PyArena* arena); static int obj2ast_excepthandler(PyObject* obj, excepthandler_ty* out, PyArena* arena); static int obj2ast_arguments(PyObject* obj, arguments_ty* out, PyArena* arena); static int obj2ast_arg(PyObject* obj, arg_ty* out, PyArena* arena); static int obj2ast_keyword(PyObject* obj, keyword_ty* out, PyArena* arena); static int obj2ast_alias(PyObject* obj, alias_ty* out, PyArena* arena); static int obj2ast_withitem(PyObject* obj, withitem_ty* out, PyArena* arena); static int obj2ast_type_ignore(PyObject* obj, type_ignore_ty* out, PyArena* arena); mod_ty Module(asdl_seq * body, asdl_seq * type_ignores, PyArena *arena) { mod_ty p; p = (mod_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Module_kind; p->v.Module.body = body; p->v.Module.type_ignores = type_ignores; return p; } mod_ty Interactive(asdl_seq * body, PyArena *arena) { mod_ty p; p = (mod_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Interactive_kind; p->v.Interactive.body = body; return p; } mod_ty Expression(expr_ty body, PyArena *arena) { mod_ty p; if (!body) { PyErr_SetString(PyExc_ValueError, "field body is required for Expression"); return NULL; } p = (mod_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Expression_kind; p->v.Expression.body = body; return p; } mod_ty FunctionType(asdl_seq * argtypes, expr_ty returns, PyArena *arena) { mod_ty p; if (!returns) { PyErr_SetString(PyExc_ValueError, "field returns is required for FunctionType"); return NULL; } p = (mod_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = FunctionType_kind; p->v.FunctionType.argtypes = argtypes; p->v.FunctionType.returns = returns; return p; } mod_ty Suite(asdl_seq * body, PyArena *arena) { mod_ty p; p = (mod_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Suite_kind; p->v.Suite.body = body; return p; } stmt_ty FunctionDef(identifier name, arguments_ty args, asdl_seq * body, asdl_seq * decorator_list, expr_ty returns, string type_comment, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; if (!name) { PyErr_SetString(PyExc_ValueError, "field name is required for FunctionDef"); return NULL; } if (!args) { PyErr_SetString(PyExc_ValueError, "field args is required for FunctionDef"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = FunctionDef_kind; p->v.FunctionDef.name = name; p->v.FunctionDef.args = args; p->v.FunctionDef.body = body; p->v.FunctionDef.decorator_list = decorator_list; p->v.FunctionDef.returns = returns; p->v.FunctionDef.type_comment = type_comment; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty AsyncFunctionDef(identifier name, arguments_ty args, asdl_seq * body, asdl_seq * decorator_list, expr_ty returns, string type_comment, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; if (!name) { PyErr_SetString(PyExc_ValueError, "field name is required for AsyncFunctionDef"); return NULL; } if (!args) { PyErr_SetString(PyExc_ValueError, "field args is required for AsyncFunctionDef"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = AsyncFunctionDef_kind; p->v.AsyncFunctionDef.name = name; p->v.AsyncFunctionDef.args = args; p->v.AsyncFunctionDef.body = body; p->v.AsyncFunctionDef.decorator_list = decorator_list; p->v.AsyncFunctionDef.returns = returns; p->v.AsyncFunctionDef.type_comment = type_comment; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty ClassDef(identifier name, asdl_seq * bases, asdl_seq * keywords, asdl_seq * body, asdl_seq * decorator_list, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; if (!name) { PyErr_SetString(PyExc_ValueError, "field name is required for ClassDef"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = ClassDef_kind; p->v.ClassDef.name = name; p->v.ClassDef.bases = bases; p->v.ClassDef.keywords = keywords; p->v.ClassDef.body = body; p->v.ClassDef.decorator_list = decorator_list; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty Return(expr_ty value, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Return_kind; p->v.Return.value = value; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty Delete(asdl_seq * targets, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Delete_kind; p->v.Delete.targets = targets; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty Assign(asdl_seq * targets, expr_ty value, string type_comment, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Assign"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Assign_kind; p->v.Assign.targets = targets; p->v.Assign.value = value; p->v.Assign.type_comment = type_comment; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty AugAssign(expr_ty target, operator_ty op, expr_ty value, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; if (!target) { PyErr_SetString(PyExc_ValueError, "field target is required for AugAssign"); return NULL; } if (!op) { PyErr_SetString(PyExc_ValueError, "field op is required for AugAssign"); return NULL; } if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for AugAssign"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = AugAssign_kind; p->v.AugAssign.target = target; p->v.AugAssign.op = op; p->v.AugAssign.value = value; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty AnnAssign(expr_ty target, expr_ty annotation, expr_ty value, int simple, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; if (!target) { PyErr_SetString(PyExc_ValueError, "field target is required for AnnAssign"); return NULL; } if (!annotation) { PyErr_SetString(PyExc_ValueError, "field annotation is required for AnnAssign"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = AnnAssign_kind; p->v.AnnAssign.target = target; p->v.AnnAssign.annotation = annotation; p->v.AnnAssign.value = value; p->v.AnnAssign.simple = simple; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty For(expr_ty target, expr_ty iter, asdl_seq * body, asdl_seq * orelse, string type_comment, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; if (!target) { PyErr_SetString(PyExc_ValueError, "field target is required for For"); return NULL; } if (!iter) { PyErr_SetString(PyExc_ValueError, "field iter is required for For"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = For_kind; p->v.For.target = target; p->v.For.iter = iter; p->v.For.body = body; p->v.For.orelse = orelse; p->v.For.type_comment = type_comment; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty AsyncFor(expr_ty target, expr_ty iter, asdl_seq * body, asdl_seq * orelse, string type_comment, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; if (!target) { PyErr_SetString(PyExc_ValueError, "field target is required for AsyncFor"); return NULL; } if (!iter) { PyErr_SetString(PyExc_ValueError, "field iter is required for AsyncFor"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = AsyncFor_kind; p->v.AsyncFor.target = target; p->v.AsyncFor.iter = iter; p->v.AsyncFor.body = body; p->v.AsyncFor.orelse = orelse; p->v.AsyncFor.type_comment = type_comment; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty While(expr_ty test, asdl_seq * body, asdl_seq * orelse, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; if (!test) { PyErr_SetString(PyExc_ValueError, "field test is required for While"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = While_kind; p->v.While.test = test; p->v.While.body = body; p->v.While.orelse = orelse; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty If(expr_ty test, asdl_seq * body, asdl_seq * orelse, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; if (!test) { PyErr_SetString(PyExc_ValueError, "field test is required for If"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = If_kind; p->v.If.test = test; p->v.If.body = body; p->v.If.orelse = orelse; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty With(asdl_seq * items, asdl_seq * body, string type_comment, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = With_kind; p->v.With.items = items; p->v.With.body = body; p->v.With.type_comment = type_comment; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty AsyncWith(asdl_seq * items, asdl_seq * body, string type_comment, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = AsyncWith_kind; p->v.AsyncWith.items = items; p->v.AsyncWith.body = body; p->v.AsyncWith.type_comment = type_comment; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty Raise(expr_ty exc, expr_ty cause, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Raise_kind; p->v.Raise.exc = exc; p->v.Raise.cause = cause; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty Try(asdl_seq * body, asdl_seq * handlers, asdl_seq * orelse, asdl_seq * finalbody, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Try_kind; p->v.Try.body = body; p->v.Try.handlers = handlers; p->v.Try.orelse = orelse; p->v.Try.finalbody = finalbody; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty Assert(expr_ty test, expr_ty msg, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; if (!test) { PyErr_SetString(PyExc_ValueError, "field test is required for Assert"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Assert_kind; p->v.Assert.test = test; p->v.Assert.msg = msg; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty Import(asdl_seq * names, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Import_kind; p->v.Import.names = names; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty ImportFrom(identifier module, asdl_seq * names, int level, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = ImportFrom_kind; p->v.ImportFrom.module = module; p->v.ImportFrom.names = names; p->v.ImportFrom.level = level; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty Global(asdl_seq * names, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Global_kind; p->v.Global.names = names; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty Nonlocal(asdl_seq * names, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Nonlocal_kind; p->v.Nonlocal.names = names; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty Expr(expr_ty value, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Expr"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Expr_kind; p->v.Expr.value = value; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty Pass(int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Pass_kind; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty Break(int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Break_kind; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } stmt_ty Continue(int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Continue_kind; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty BoolOp(boolop_ty op, asdl_seq * values, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!op) { PyErr_SetString(PyExc_ValueError, "field op is required for BoolOp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = BoolOp_kind; p->v.BoolOp.op = op; p->v.BoolOp.values = values; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty NamedExpr(expr_ty target, expr_ty value, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!target) { PyErr_SetString(PyExc_ValueError, "field target is required for NamedExpr"); return NULL; } if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for NamedExpr"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = NamedExpr_kind; p->v.NamedExpr.target = target; p->v.NamedExpr.value = value; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty BinOp(expr_ty left, operator_ty op, expr_ty right, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!left) { PyErr_SetString(PyExc_ValueError, "field left is required for BinOp"); return NULL; } if (!op) { PyErr_SetString(PyExc_ValueError, "field op is required for BinOp"); return NULL; } if (!right) { PyErr_SetString(PyExc_ValueError, "field right is required for BinOp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = BinOp_kind; p->v.BinOp.left = left; p->v.BinOp.op = op; p->v.BinOp.right = right; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty UnaryOp(unaryop_ty op, expr_ty operand, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!op) { PyErr_SetString(PyExc_ValueError, "field op is required for UnaryOp"); return NULL; } if (!operand) { PyErr_SetString(PyExc_ValueError, "field operand is required for UnaryOp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = UnaryOp_kind; p->v.UnaryOp.op = op; p->v.UnaryOp.operand = operand; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty Lambda(arguments_ty args, expr_ty body, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!args) { PyErr_SetString(PyExc_ValueError, "field args is required for Lambda"); return NULL; } if (!body) { PyErr_SetString(PyExc_ValueError, "field body is required for Lambda"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Lambda_kind; p->v.Lambda.args = args; p->v.Lambda.body = body; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty IfExp(expr_ty test, expr_ty body, expr_ty orelse, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!test) { PyErr_SetString(PyExc_ValueError, "field test is required for IfExp"); return NULL; } if (!body) { PyErr_SetString(PyExc_ValueError, "field body is required for IfExp"); return NULL; } if (!orelse) { PyErr_SetString(PyExc_ValueError, "field orelse is required for IfExp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = IfExp_kind; p->v.IfExp.test = test; p->v.IfExp.body = body; p->v.IfExp.orelse = orelse; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty Dict(asdl_seq * keys, asdl_seq * values, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Dict_kind; p->v.Dict.keys = keys; p->v.Dict.values = values; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty Set(asdl_seq * elts, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Set_kind; p->v.Set.elts = elts; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty ListComp(expr_ty elt, asdl_seq * generators, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!elt) { PyErr_SetString(PyExc_ValueError, "field elt is required for ListComp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = ListComp_kind; p->v.ListComp.elt = elt; p->v.ListComp.generators = generators; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty SetComp(expr_ty elt, asdl_seq * generators, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!elt) { PyErr_SetString(PyExc_ValueError, "field elt is required for SetComp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = SetComp_kind; p->v.SetComp.elt = elt; p->v.SetComp.generators = generators; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty DictComp(expr_ty key, expr_ty value, asdl_seq * generators, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!key) { PyErr_SetString(PyExc_ValueError, "field key is required for DictComp"); return NULL; } if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for DictComp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = DictComp_kind; p->v.DictComp.key = key; p->v.DictComp.value = value; p->v.DictComp.generators = generators; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty GeneratorExp(expr_ty elt, asdl_seq * generators, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!elt) { PyErr_SetString(PyExc_ValueError, "field elt is required for GeneratorExp"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = GeneratorExp_kind; p->v.GeneratorExp.elt = elt; p->v.GeneratorExp.generators = generators; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty Await(expr_ty value, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Await"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Await_kind; p->v.Await.value = value; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty Yield(expr_ty value, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Yield_kind; p->v.Yield.value = value; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty YieldFrom(expr_ty value, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for YieldFrom"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = YieldFrom_kind; p->v.YieldFrom.value = value; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty Compare(expr_ty left, asdl_int_seq * ops, asdl_seq * comparators, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!left) { PyErr_SetString(PyExc_ValueError, "field left is required for Compare"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Compare_kind; p->v.Compare.left = left; p->v.Compare.ops = ops; p->v.Compare.comparators = comparators; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty Call(expr_ty func, asdl_seq * args, asdl_seq * keywords, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!func) { PyErr_SetString(PyExc_ValueError, "field func is required for Call"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Call_kind; p->v.Call.func = func; p->v.Call.args = args; p->v.Call.keywords = keywords; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty FormattedValue(expr_ty value, int conversion, expr_ty format_spec, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for FormattedValue"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = FormattedValue_kind; p->v.FormattedValue.value = value; p->v.FormattedValue.conversion = conversion; p->v.FormattedValue.format_spec = format_spec; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty JoinedStr(asdl_seq * values, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = JoinedStr_kind; p->v.JoinedStr.values = values; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty Constant(constant value, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Constant"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Constant_kind; p->v.Constant.value = value; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty Attribute(expr_ty value, identifier attr, expr_context_ty ctx, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Attribute"); return NULL; } if (!attr) { PyErr_SetString(PyExc_ValueError, "field attr is required for Attribute"); return NULL; } if (!ctx) { PyErr_SetString(PyExc_ValueError, "field ctx is required for Attribute"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Attribute_kind; p->v.Attribute.value = value; p->v.Attribute.attr = attr; p->v.Attribute.ctx = ctx; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty Subscript(expr_ty value, slice_ty slice, expr_context_ty ctx, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Subscript"); return NULL; } if (!slice) { PyErr_SetString(PyExc_ValueError, "field slice is required for Subscript"); return NULL; } if (!ctx) { PyErr_SetString(PyExc_ValueError, "field ctx is required for Subscript"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Subscript_kind; p->v.Subscript.value = value; p->v.Subscript.slice = slice; p->v.Subscript.ctx = ctx; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty Starred(expr_ty value, expr_context_ty ctx, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Starred"); return NULL; } if (!ctx) { PyErr_SetString(PyExc_ValueError, "field ctx is required for Starred"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Starred_kind; p->v.Starred.value = value; p->v.Starred.ctx = ctx; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty Name(identifier id, expr_context_ty ctx, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!id) { PyErr_SetString(PyExc_ValueError, "field id is required for Name"); return NULL; } if (!ctx) { PyErr_SetString(PyExc_ValueError, "field ctx is required for Name"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Name_kind; p->v.Name.id = id; p->v.Name.ctx = ctx; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty List(asdl_seq * elts, expr_context_ty ctx, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!ctx) { PyErr_SetString(PyExc_ValueError, "field ctx is required for List"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = List_kind; p->v.List.elts = elts; p->v.List.ctx = ctx; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } expr_ty Tuple(asdl_seq * elts, expr_context_ty ctx, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { expr_ty p; if (!ctx) { PyErr_SetString(PyExc_ValueError, "field ctx is required for Tuple"); return NULL; } p = (expr_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Tuple_kind; p->v.Tuple.elts = elts; p->v.Tuple.ctx = ctx; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } slice_ty Slice(expr_ty lower, expr_ty upper, expr_ty step, PyArena *arena) { slice_ty p; p = (slice_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Slice_kind; p->v.Slice.lower = lower; p->v.Slice.upper = upper; p->v.Slice.step = step; return p; } slice_ty ExtSlice(asdl_seq * dims, PyArena *arena) { slice_ty p; p = (slice_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = ExtSlice_kind; p->v.ExtSlice.dims = dims; return p; } slice_ty Index(expr_ty value, PyArena *arena) { slice_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Index"); return NULL; } p = (slice_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Index_kind; p->v.Index.value = value; return p; } comprehension_ty comprehension(expr_ty target, expr_ty iter, asdl_seq * ifs, int is_async, PyArena *arena) { comprehension_ty p; if (!target) { PyErr_SetString(PyExc_ValueError, "field target is required for comprehension"); return NULL; } if (!iter) { PyErr_SetString(PyExc_ValueError, "field iter is required for comprehension"); return NULL; } p = (comprehension_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->target = target; p->iter = iter; p->ifs = ifs; p->is_async = is_async; return p; } excepthandler_ty ExceptHandler(expr_ty type, identifier name, asdl_seq * body, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { excepthandler_ty p; p = (excepthandler_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = ExceptHandler_kind; p->v.ExceptHandler.type = type; p->v.ExceptHandler.name = name; p->v.ExceptHandler.body = body; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } arguments_ty arguments(asdl_seq * args, arg_ty vararg, asdl_seq * kwonlyargs, asdl_seq * kw_defaults, arg_ty kwarg, asdl_seq * defaults, PyArena *arena) { arguments_ty p; p = (arguments_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->args = args; p->vararg = vararg; p->kwonlyargs = kwonlyargs; p->kw_defaults = kw_defaults; p->kwarg = kwarg; p->defaults = defaults; return p; } arg_ty arg(identifier arg, expr_ty annotation, string type_comment, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { arg_ty p; if (!arg) { PyErr_SetString(PyExc_ValueError, "field arg is required for arg"); return NULL; } p = (arg_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->arg = arg; p->annotation = annotation; p->type_comment = type_comment; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; } keyword_ty keyword(identifier arg, expr_ty value, PyArena *arena) { keyword_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for keyword"); return NULL; } p = (keyword_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->arg = arg; p->value = value; return p; } alias_ty alias(identifier name, identifier asname, PyArena *arena) { alias_ty p; if (!name) { PyErr_SetString(PyExc_ValueError, "field name is required for alias"); return NULL; } p = (alias_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->name = name; p->asname = asname; return p; } withitem_ty withitem(expr_ty context_expr, expr_ty optional_vars, PyArena *arena) { withitem_ty p; if (!context_expr) { PyErr_SetString(PyExc_ValueError, "field context_expr is required for withitem"); return NULL; } p = (withitem_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->context_expr = context_expr; p->optional_vars = optional_vars; return p; } type_ignore_ty TypeIgnore(int lineno, PyArena *arena) { type_ignore_ty p; p = (type_ignore_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = TypeIgnore_kind; p->v.TypeIgnore.lineno = lineno; return p; } PyObject* ast2obj_mod(void* _o) { mod_ty o = (mod_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } switch (o->kind) { case Module_kind: result = PyType_GenericNew(Module_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Module.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Module.type_ignores, ast2obj_type_ignore); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type_ignores, value) == -1) goto failed; Py_DECREF(value); break; case Interactive_kind: result = PyType_GenericNew(Interactive_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Interactive.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); break; case Expression_kind: result = PyType_GenericNew(Expression_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Expression.body); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); break; case FunctionType_kind: result = PyType_GenericNew(FunctionType_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.FunctionType.argtypes, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_argtypes, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.FunctionType.returns); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_returns, value) == -1) goto failed; Py_DECREF(value); break; case Suite_kind: result = PyType_GenericNew(Suite_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Suite.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); break; } return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_stmt(void* _o) { stmt_ty o = (stmt_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } switch (o->kind) { case FunctionDef_kind: result = PyType_GenericNew(FunctionDef_type, NULL, NULL); if (!result) goto failed; value = ast2obj_identifier(o->v.FunctionDef.name); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_name, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_arguments(o->v.FunctionDef.args); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_args, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.FunctionDef.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.FunctionDef.decorator_list, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_decorator_list, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.FunctionDef.returns); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_returns, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_string(o->v.FunctionDef.type_comment); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1) goto failed; Py_DECREF(value); break; case AsyncFunctionDef_kind: result = PyType_GenericNew(AsyncFunctionDef_type, NULL, NULL); if (!result) goto failed; value = ast2obj_identifier(o->v.AsyncFunctionDef.name); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_name, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_arguments(o->v.AsyncFunctionDef.args); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_args, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.AsyncFunctionDef.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.AsyncFunctionDef.decorator_list, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_decorator_list, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.AsyncFunctionDef.returns); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_returns, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_string(o->v.AsyncFunctionDef.type_comment); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1) goto failed; Py_DECREF(value); break; case ClassDef_kind: result = PyType_GenericNew(ClassDef_type, NULL, NULL); if (!result) goto failed; value = ast2obj_identifier(o->v.ClassDef.name); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_name, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.ClassDef.bases, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_bases, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.ClassDef.keywords, ast2obj_keyword); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_keywords, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.ClassDef.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.ClassDef.decorator_list, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_decorator_list, value) == -1) goto failed; Py_DECREF(value); break; case Return_kind: result = PyType_GenericNew(Return_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Return.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case Delete_kind: result = PyType_GenericNew(Delete_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Delete.targets, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_targets, value) == -1) goto failed; Py_DECREF(value); break; case Assign_kind: result = PyType_GenericNew(Assign_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Assign.targets, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_targets, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.Assign.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_string(o->v.Assign.type_comment); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1) goto failed; Py_DECREF(value); break; case AugAssign_kind: result = PyType_GenericNew(AugAssign_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.AugAssign.target); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_target, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_operator(o->v.AugAssign.op); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_op, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.AugAssign.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case AnnAssign_kind: result = PyType_GenericNew(AnnAssign_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.AnnAssign.target); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_target, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.AnnAssign.annotation); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_annotation, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.AnnAssign.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_int(o->v.AnnAssign.simple); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_simple, value) == -1) goto failed; Py_DECREF(value); break; case For_kind: result = PyType_GenericNew(For_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.For.target); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_target, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.For.iter); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_iter, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.For.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.For.orelse, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_orelse, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_string(o->v.For.type_comment); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1) goto failed; Py_DECREF(value); break; case AsyncFor_kind: result = PyType_GenericNew(AsyncFor_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.AsyncFor.target); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_target, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.AsyncFor.iter); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_iter, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.AsyncFor.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.AsyncFor.orelse, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_orelse, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_string(o->v.AsyncFor.type_comment); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1) goto failed; Py_DECREF(value); break; case While_kind: result = PyType_GenericNew(While_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.While.test); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_test, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.While.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.While.orelse, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_orelse, value) == -1) goto failed; Py_DECREF(value); break; case If_kind: result = PyType_GenericNew(If_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.If.test); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_test, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.If.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.If.orelse, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_orelse, value) == -1) goto failed; Py_DECREF(value); break; case With_kind: result = PyType_GenericNew(With_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.With.items, ast2obj_withitem); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_items, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.With.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_string(o->v.With.type_comment); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1) goto failed; Py_DECREF(value); break; case AsyncWith_kind: result = PyType_GenericNew(AsyncWith_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.AsyncWith.items, ast2obj_withitem); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_items, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.AsyncWith.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_string(o->v.AsyncWith.type_comment); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1) goto failed; Py_DECREF(value); break; case Raise_kind: result = PyType_GenericNew(Raise_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Raise.exc); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_exc, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.Raise.cause); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_cause, value) == -1) goto failed; Py_DECREF(value); break; case Try_kind: result = PyType_GenericNew(Try_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Try.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Try.handlers, ast2obj_excepthandler); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_handlers, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Try.orelse, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_orelse, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Try.finalbody, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_finalbody, value) == -1) goto failed; Py_DECREF(value); break; case Assert_kind: result = PyType_GenericNew(Assert_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Assert.test); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_test, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.Assert.msg); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_msg, value) == -1) goto failed; Py_DECREF(value); break; case Import_kind: result = PyType_GenericNew(Import_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Import.names, ast2obj_alias); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_names, value) == -1) goto failed; Py_DECREF(value); break; case ImportFrom_kind: result = PyType_GenericNew(ImportFrom_type, NULL, NULL); if (!result) goto failed; value = ast2obj_identifier(o->v.ImportFrom.module); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_module, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.ImportFrom.names, ast2obj_alias); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_names, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_int(o->v.ImportFrom.level); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_level, value) == -1) goto failed; Py_DECREF(value); break; case Global_kind: result = PyType_GenericNew(Global_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Global.names, ast2obj_identifier); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_names, value) == -1) goto failed; Py_DECREF(value); break; case Nonlocal_kind: result = PyType_GenericNew(Nonlocal_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Nonlocal.names, ast2obj_identifier); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_names, value) == -1) goto failed; Py_DECREF(value); break; case Expr_kind: result = PyType_GenericNew(Expr_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Expr.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case Pass_kind: result = PyType_GenericNew(Pass_type, NULL, NULL); if (!result) goto failed; break; case Break_kind: result = PyType_GenericNew(Break_type, NULL, NULL); if (!result) goto failed; break; case Continue_kind: result = PyType_GenericNew(Continue_type, NULL, NULL); if (!result) goto failed; break; } value = ast2obj_int(o->lineno); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_lineno, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->col_offset); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_col_offset, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->end_lineno); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_end_lineno, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->end_col_offset); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_end_col_offset, value) < 0) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_expr(void* _o) { expr_ty o = (expr_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } switch (o->kind) { case BoolOp_kind: result = PyType_GenericNew(BoolOp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_boolop(o->v.BoolOp.op); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_op, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.BoolOp.values, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_values, value) == -1) goto failed; Py_DECREF(value); break; case NamedExpr_kind: result = PyType_GenericNew(NamedExpr_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.NamedExpr.target); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_target, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.NamedExpr.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case BinOp_kind: result = PyType_GenericNew(BinOp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.BinOp.left); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_left, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_operator(o->v.BinOp.op); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_op, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.BinOp.right); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_right, value) == -1) goto failed; Py_DECREF(value); break; case UnaryOp_kind: result = PyType_GenericNew(UnaryOp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_unaryop(o->v.UnaryOp.op); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_op, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.UnaryOp.operand); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_operand, value) == -1) goto failed; Py_DECREF(value); break; case Lambda_kind: result = PyType_GenericNew(Lambda_type, NULL, NULL); if (!result) goto failed; value = ast2obj_arguments(o->v.Lambda.args); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_args, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.Lambda.body); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); break; case IfExp_kind: result = PyType_GenericNew(IfExp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.IfExp.test); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_test, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.IfExp.body); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.IfExp.orelse); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_orelse, value) == -1) goto failed; Py_DECREF(value); break; case Dict_kind: result = PyType_GenericNew(Dict_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Dict.keys, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_keys, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Dict.values, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_values, value) == -1) goto failed; Py_DECREF(value); break; case Set_kind: result = PyType_GenericNew(Set_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Set.elts, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_elts, value) == -1) goto failed; Py_DECREF(value); break; case ListComp_kind: result = PyType_GenericNew(ListComp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.ListComp.elt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_elt, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.ListComp.generators, ast2obj_comprehension); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_generators, value) == -1) goto failed; Py_DECREF(value); break; case SetComp_kind: result = PyType_GenericNew(SetComp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.SetComp.elt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_elt, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.SetComp.generators, ast2obj_comprehension); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_generators, value) == -1) goto failed; Py_DECREF(value); break; case DictComp_kind: result = PyType_GenericNew(DictComp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.DictComp.key); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_key, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.DictComp.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.DictComp.generators, ast2obj_comprehension); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_generators, value) == -1) goto failed; Py_DECREF(value); break; case GeneratorExp_kind: result = PyType_GenericNew(GeneratorExp_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.GeneratorExp.elt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_elt, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.GeneratorExp.generators, ast2obj_comprehension); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_generators, value) == -1) goto failed; Py_DECREF(value); break; case Await_kind: result = PyType_GenericNew(Await_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Await.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case Yield_kind: result = PyType_GenericNew(Yield_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Yield.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case YieldFrom_kind: result = PyType_GenericNew(YieldFrom_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.YieldFrom.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case Compare_kind: result = PyType_GenericNew(Compare_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Compare.left); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_left, value) == -1) goto failed; Py_DECREF(value); { Py_ssize_t i, n = asdl_seq_LEN(o->v.Compare.ops); value = PyList_New(n); if (!value) goto failed; for(i = 0; i < n; i++) PyList_SET_ITEM(value, i, ast2obj_cmpop((cmpop_ty)asdl_seq_GET(o->v.Compare.ops, i))); } if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ops, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Compare.comparators, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_comparators, value) == -1) goto failed; Py_DECREF(value); break; case Call_kind: result = PyType_GenericNew(Call_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Call.func); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_func, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Call.args, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_args, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.Call.keywords, ast2obj_keyword); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_keywords, value) == -1) goto failed; Py_DECREF(value); break; case FormattedValue_kind: result = PyType_GenericNew(FormattedValue_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.FormattedValue.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_int(o->v.FormattedValue.conversion); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_conversion, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.FormattedValue.format_spec); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_format_spec, value) == -1) goto failed; Py_DECREF(value); break; case JoinedStr_kind: result = PyType_GenericNew(JoinedStr_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.JoinedStr.values, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_values, value) == -1) goto failed; Py_DECREF(value); break; case Constant_kind: result = PyType_GenericNew(Constant_type, NULL, NULL); if (!result) goto failed; value = ast2obj_constant(o->v.Constant.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; case Attribute_kind: result = PyType_GenericNew(Attribute_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Attribute.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_identifier(o->v.Attribute.attr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_attr, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr_context(o->v.Attribute.ctx); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ctx, value) == -1) goto failed; Py_DECREF(value); break; case Subscript_kind: result = PyType_GenericNew(Subscript_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Subscript.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_slice(o->v.Subscript.slice); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_slice, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr_context(o->v.Subscript.ctx); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ctx, value) == -1) goto failed; Py_DECREF(value); break; case Starred_kind: result = PyType_GenericNew(Starred_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Starred.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr_context(o->v.Starred.ctx); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ctx, value) == -1) goto failed; Py_DECREF(value); break; case Name_kind: result = PyType_GenericNew(Name_type, NULL, NULL); if (!result) goto failed; value = ast2obj_identifier(o->v.Name.id); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_id, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr_context(o->v.Name.ctx); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ctx, value) == -1) goto failed; Py_DECREF(value); break; case List_kind: result = PyType_GenericNew(List_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.List.elts, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_elts, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr_context(o->v.List.ctx); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ctx, value) == -1) goto failed; Py_DECREF(value); break; case Tuple_kind: result = PyType_GenericNew(Tuple_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.Tuple.elts, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_elts, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr_context(o->v.Tuple.ctx); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ctx, value) == -1) goto failed; Py_DECREF(value); break; } value = ast2obj_int(o->lineno); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_lineno, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->col_offset); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_col_offset, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->end_lineno); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_end_lineno, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->end_col_offset); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_end_col_offset, value) < 0) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_expr_context(expr_context_ty o) { switch(o) { case Load: Py_INCREF(Load_singleton); return Load_singleton; case Store: Py_INCREF(Store_singleton); return Store_singleton; case Del: Py_INCREF(Del_singleton); return Del_singleton; case AugLoad: Py_INCREF(AugLoad_singleton); return AugLoad_singleton; case AugStore: Py_INCREF(AugStore_singleton); return AugStore_singleton; case Param: Py_INCREF(Param_singleton); return Param_singleton; case NamedStore: Py_INCREF(NamedStore_singleton); return NamedStore_singleton; default: /* should never happen, but just in case ... */ PyErr_Format(PyExc_SystemError, "unknown expr_context found"); return NULL; } } PyObject* ast2obj_slice(void* _o) { slice_ty o = (slice_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } switch (o->kind) { case Slice_kind: result = PyType_GenericNew(Slice_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Slice.lower); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_lower, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.Slice.upper); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_upper, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->v.Slice.step); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_step, value) == -1) goto failed; Py_DECREF(value); break; case ExtSlice_kind: result = PyType_GenericNew(ExtSlice_type, NULL, NULL); if (!result) goto failed; value = ast2obj_list(o->v.ExtSlice.dims, ast2obj_slice); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_dims, value) == -1) goto failed; Py_DECREF(value); break; case Index_kind: result = PyType_GenericNew(Index_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.Index.value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); break; } return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_boolop(boolop_ty o) { switch(o) { case And: Py_INCREF(And_singleton); return And_singleton; case Or: Py_INCREF(Or_singleton); return Or_singleton; default: /* should never happen, but just in case ... */ PyErr_Format(PyExc_SystemError, "unknown boolop found"); return NULL; } } PyObject* ast2obj_operator(operator_ty o) { switch(o) { case Add: Py_INCREF(Add_singleton); return Add_singleton; case Sub: Py_INCREF(Sub_singleton); return Sub_singleton; case Mult: Py_INCREF(Mult_singleton); return Mult_singleton; case MatMult: Py_INCREF(MatMult_singleton); return MatMult_singleton; case Div: Py_INCREF(Div_singleton); return Div_singleton; case Mod: Py_INCREF(Mod_singleton); return Mod_singleton; case Pow: Py_INCREF(Pow_singleton); return Pow_singleton; case LShift: Py_INCREF(LShift_singleton); return LShift_singleton; case RShift: Py_INCREF(RShift_singleton); return RShift_singleton; case BitOr: Py_INCREF(BitOr_singleton); return BitOr_singleton; case BitXor: Py_INCREF(BitXor_singleton); return BitXor_singleton; case BitAnd: Py_INCREF(BitAnd_singleton); return BitAnd_singleton; case FloorDiv: Py_INCREF(FloorDiv_singleton); return FloorDiv_singleton; default: /* should never happen, but just in case ... */ PyErr_Format(PyExc_SystemError, "unknown operator found"); return NULL; } } PyObject* ast2obj_unaryop(unaryop_ty o) { switch(o) { case Invert: Py_INCREF(Invert_singleton); return Invert_singleton; case Not: Py_INCREF(Not_singleton); return Not_singleton; case UAdd: Py_INCREF(UAdd_singleton); return UAdd_singleton; case USub: Py_INCREF(USub_singleton); return USub_singleton; default: /* should never happen, but just in case ... */ PyErr_Format(PyExc_SystemError, "unknown unaryop found"); return NULL; } } PyObject* ast2obj_cmpop(cmpop_ty o) { switch(o) { case Eq: Py_INCREF(Eq_singleton); return Eq_singleton; case NotEq: Py_INCREF(NotEq_singleton); return NotEq_singleton; case Lt: Py_INCREF(Lt_singleton); return Lt_singleton; case LtE: Py_INCREF(LtE_singleton); return LtE_singleton; case Gt: Py_INCREF(Gt_singleton); return Gt_singleton; case GtE: Py_INCREF(GtE_singleton); return GtE_singleton; case Is: Py_INCREF(Is_singleton); return Is_singleton; case IsNot: Py_INCREF(IsNot_singleton); return IsNot_singleton; case In: Py_INCREF(In_singleton); return In_singleton; case NotIn: Py_INCREF(NotIn_singleton); return NotIn_singleton; default: /* should never happen, but just in case ... */ PyErr_Format(PyExc_SystemError, "unknown cmpop found"); return NULL; } } PyObject* ast2obj_comprehension(void* _o) { comprehension_ty o = (comprehension_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } result = PyType_GenericNew(comprehension_type, NULL, NULL); if (!result) return NULL; value = ast2obj_expr(o->target); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_target, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->iter); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_iter, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->ifs, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_ifs, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_int(o->is_async); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_is_async, value) == -1) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_excepthandler(void* _o) { excepthandler_ty o = (excepthandler_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } switch (o->kind) { case ExceptHandler_kind: result = PyType_GenericNew(ExceptHandler_type, NULL, NULL); if (!result) goto failed; value = ast2obj_expr(o->v.ExceptHandler.type); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_identifier(o->v.ExceptHandler.name); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_name, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->v.ExceptHandler.body, ast2obj_stmt); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_body, value) == -1) goto failed; Py_DECREF(value); break; } value = ast2obj_int(o->lineno); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_lineno, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->col_offset); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_col_offset, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->end_lineno); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_end_lineno, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->end_col_offset); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_end_col_offset, value) < 0) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_arguments(void* _o) { arguments_ty o = (arguments_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } result = PyType_GenericNew(arguments_type, NULL, NULL); if (!result) return NULL; value = ast2obj_list(o->args, ast2obj_arg); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_args, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_arg(o->vararg); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_vararg, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->kwonlyargs, ast2obj_arg); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_kwonlyargs, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->kw_defaults, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_kw_defaults, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_arg(o->kwarg); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_kwarg, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_list(o->defaults, ast2obj_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_defaults, value) == -1) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_arg(void* _o) { arg_ty o = (arg_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } result = PyType_GenericNew(arg_type, NULL, NULL); if (!result) return NULL; value = ast2obj_identifier(o->arg); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_arg, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->annotation); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_annotation, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_string(o->type_comment); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_int(o->lineno); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_lineno, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->col_offset); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_col_offset, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->end_lineno); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_end_lineno, value) < 0) goto failed; Py_DECREF(value); value = ast2obj_int(o->end_col_offset); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_end_col_offset, value) < 0) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_keyword(void* _o) { keyword_ty o = (keyword_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } result = PyType_GenericNew(keyword_type, NULL, NULL); if (!result) return NULL; value = ast2obj_identifier(o->arg); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_arg, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->value); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_value, value) == -1) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_alias(void* _o) { alias_ty o = (alias_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } result = PyType_GenericNew(alias_type, NULL, NULL); if (!result) return NULL; value = ast2obj_identifier(o->name); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_name, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_identifier(o->asname); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_asname, value) == -1) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_withitem(void* _o) { withitem_ty o = (withitem_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } result = PyType_GenericNew(withitem_type, NULL, NULL); if (!result) return NULL; value = ast2obj_expr(o->context_expr); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_context_expr, value) == -1) goto failed; Py_DECREF(value); value = ast2obj_expr(o->optional_vars); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_optional_vars, value) == -1) goto failed; Py_DECREF(value); return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } PyObject* ast2obj_type_ignore(void* _o) { type_ignore_ty o = (type_ignore_ty)_o; PyObject *result = NULL, *value = NULL; if (!o) { Py_RETURN_NONE; } switch (o->kind) { case TypeIgnore_kind: result = PyType_GenericNew(TypeIgnore_type, NULL, NULL); if (!result) goto failed; value = ast2obj_int(o->v.TypeIgnore.lineno); if (!value) goto failed; if (_PyObject_SetAttrId(result, &PyId_lineno, value) == -1) goto failed; Py_DECREF(value); break; } return result; failed: Py_XDECREF(value); Py_XDECREF(result); return NULL; } int obj2ast_mod(PyObject* obj, mod_ty* out, PyArena* arena) { int isinstance; PyObject *tmp = NULL; if (obj == Py_None) { *out = NULL; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Module_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* body; asdl_seq* type_ignores; if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from Module"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Module field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Py_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Module field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_type_ignores, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"type_ignores\" missing from Module"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Module field \"type_ignores\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); type_ignores = _Py_asdl_seq_new(len, arena); if (type_ignores == NULL) goto failed; for (i = 0; i < len; i++) { type_ignore_ty val; res = obj2ast_type_ignore(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Module field \"type_ignores\" changed size during iteration"); goto failed; } asdl_seq_SET(type_ignores, i, val); } Py_CLEAR(tmp); } *out = Module(body, type_ignores, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Interactive_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* body; if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from Interactive"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Interactive field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Py_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Interactive field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } *out = Interactive(body, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Expression_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty body; if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from Expression"); return 1; } else { int res; res = obj2ast_expr(tmp, &body, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Expression(body, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)FunctionType_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* argtypes; expr_ty returns; if (_PyObject_LookupAttrId(obj, &PyId_argtypes, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"argtypes\" missing from FunctionType"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "FunctionType field \"argtypes\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); argtypes = _Py_asdl_seq_new(len, arena); if (argtypes == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "FunctionType field \"argtypes\" changed size during iteration"); goto failed; } asdl_seq_SET(argtypes, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_returns, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"returns\" missing from FunctionType"); return 1; } else { int res; res = obj2ast_expr(tmp, &returns, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = FunctionType(argtypes, returns, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Suite_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* body; if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from Suite"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Suite field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Py_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Suite field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } *out = Suite(body, arena); if (*out == NULL) goto failed; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of mod, but got %R", obj); failed: Py_XDECREF(tmp); return 1; } int obj2ast_stmt(PyObject* obj, stmt_ty* out, PyArena* arena) { int isinstance; PyObject *tmp = NULL; int lineno; int col_offset; int end_lineno; int end_col_offset; if (obj == Py_None) { *out = NULL; return 0; } if (_PyObject_LookupAttrId(obj, &PyId_lineno, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"lineno\" missing from stmt"); return 1; } else { int res; res = obj2ast_int(tmp, &lineno, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_col_offset, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"col_offset\" missing from stmt"); return 1; } else { int res; res = obj2ast_int(tmp, &col_offset, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_end_lineno, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); end_lineno = 0; } else { int res; res = obj2ast_int(tmp, &end_lineno, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_end_col_offset, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); end_col_offset = 0; } else { int res; res = obj2ast_int(tmp, &end_col_offset, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } isinstance = PyObject_IsInstance(obj, (PyObject*)FunctionDef_type); if (isinstance == -1) { return 1; } if (isinstance) { identifier name; arguments_ty args; asdl_seq* body; asdl_seq* decorator_list; expr_ty returns; string type_comment; if (_PyObject_LookupAttrId(obj, &PyId_name, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"name\" missing from FunctionDef"); return 1; } else { int res; res = obj2ast_identifier(tmp, &name, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_args, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"args\" missing from FunctionDef"); return 1; } else { int res; res = obj2ast_arguments(tmp, &args, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from FunctionDef"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "FunctionDef field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Py_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "FunctionDef field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_decorator_list, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"decorator_list\" missing from FunctionDef"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "FunctionDef field \"decorator_list\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); decorator_list = _Py_asdl_seq_new(len, arena); if (decorator_list == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "FunctionDef field \"decorator_list\" changed size during iteration"); goto failed; } asdl_seq_SET(decorator_list, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_returns, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); returns = NULL; } else { int res; res = obj2ast_expr(tmp, &returns, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_type_comment, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); type_comment = NULL; } else { int res; res = obj2ast_string(tmp, &type_comment, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = FunctionDef(name, args, body, decorator_list, returns, type_comment, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)AsyncFunctionDef_type); if (isinstance == -1) { return 1; } if (isinstance) { identifier name; arguments_ty args; asdl_seq* body; asdl_seq* decorator_list; expr_ty returns; string type_comment; if (_PyObject_LookupAttrId(obj, &PyId_name, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"name\" missing from AsyncFunctionDef"); return 1; } else { int res; res = obj2ast_identifier(tmp, &name, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_args, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"args\" missing from AsyncFunctionDef"); return 1; } else { int res; res = obj2ast_arguments(tmp, &args, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from AsyncFunctionDef"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "AsyncFunctionDef field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Py_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "AsyncFunctionDef field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_decorator_list, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"decorator_list\" missing from AsyncFunctionDef"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "AsyncFunctionDef field \"decorator_list\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); decorator_list = _Py_asdl_seq_new(len, arena); if (decorator_list == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "AsyncFunctionDef field \"decorator_list\" changed size during iteration"); goto failed; } asdl_seq_SET(decorator_list, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_returns, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); returns = NULL; } else { int res; res = obj2ast_expr(tmp, &returns, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_type_comment, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); type_comment = NULL; } else { int res; res = obj2ast_string(tmp, &type_comment, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = AsyncFunctionDef(name, args, body, decorator_list, returns, type_comment, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)ClassDef_type); if (isinstance == -1) { return 1; } if (isinstance) { identifier name; asdl_seq* bases; asdl_seq* keywords; asdl_seq* body; asdl_seq* decorator_list; if (_PyObject_LookupAttrId(obj, &PyId_name, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"name\" missing from ClassDef"); return 1; } else { int res; res = obj2ast_identifier(tmp, &name, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_bases, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"bases\" missing from ClassDef"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ClassDef field \"bases\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); bases = _Py_asdl_seq_new(len, arena); if (bases == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ClassDef field \"bases\" changed size during iteration"); goto failed; } asdl_seq_SET(bases, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_keywords, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"keywords\" missing from ClassDef"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ClassDef field \"keywords\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); keywords = _Py_asdl_seq_new(len, arena); if (keywords == NULL) goto failed; for (i = 0; i < len; i++) { keyword_ty val; res = obj2ast_keyword(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ClassDef field \"keywords\" changed size during iteration"); goto failed; } asdl_seq_SET(keywords, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from ClassDef"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ClassDef field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Py_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ClassDef field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_decorator_list, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"decorator_list\" missing from ClassDef"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ClassDef field \"decorator_list\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); decorator_list = _Py_asdl_seq_new(len, arena); if (decorator_list == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ClassDef field \"decorator_list\" changed size during iteration"); goto failed; } asdl_seq_SET(decorator_list, i, val); } Py_CLEAR(tmp); } *out = ClassDef(name, bases, keywords, body, decorator_list, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Return_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); value = NULL; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Return(value, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Delete_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* targets; if (_PyObject_LookupAttrId(obj, &PyId_targets, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"targets\" missing from Delete"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Delete field \"targets\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); targets = _Py_asdl_seq_new(len, arena); if (targets == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Delete field \"targets\" changed size during iteration"); goto failed; } asdl_seq_SET(targets, i, val); } Py_CLEAR(tmp); } *out = Delete(targets, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Assign_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* targets; expr_ty value; string type_comment; if (_PyObject_LookupAttrId(obj, &PyId_targets, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"targets\" missing from Assign"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Assign field \"targets\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); targets = _Py_asdl_seq_new(len, arena); if (targets == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Assign field \"targets\" changed size during iteration"); goto failed; } asdl_seq_SET(targets, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Assign"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_type_comment, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); type_comment = NULL; } else { int res; res = obj2ast_string(tmp, &type_comment, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Assign(targets, value, type_comment, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)AugAssign_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty target; operator_ty op; expr_ty value; if (_PyObject_LookupAttrId(obj, &PyId_target, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"target\" missing from AugAssign"); return 1; } else { int res; res = obj2ast_expr(tmp, &target, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_op, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"op\" missing from AugAssign"); return 1; } else { int res; res = obj2ast_operator(tmp, &op, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from AugAssign"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = AugAssign(target, op, value, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)AnnAssign_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty target; expr_ty annotation; expr_ty value; int simple; if (_PyObject_LookupAttrId(obj, &PyId_target, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"target\" missing from AnnAssign"); return 1; } else { int res; res = obj2ast_expr(tmp, &target, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_annotation, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"annotation\" missing from AnnAssign"); return 1; } else { int res; res = obj2ast_expr(tmp, &annotation, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); value = NULL; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_simple, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"simple\" missing from AnnAssign"); return 1; } else { int res; res = obj2ast_int(tmp, &simple, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = AnnAssign(target, annotation, value, simple, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)For_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty target; expr_ty iter; asdl_seq* body; asdl_seq* orelse; string type_comment; if (_PyObject_LookupAttrId(obj, &PyId_target, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"target\" missing from For"); return 1; } else { int res; res = obj2ast_expr(tmp, &target, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_iter, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"iter\" missing from For"); return 1; } else { int res; res = obj2ast_expr(tmp, &iter, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from For"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "For field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Py_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "For field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_orelse, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"orelse\" missing from For"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "For field \"orelse\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); orelse = _Py_asdl_seq_new(len, arena); if (orelse == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "For field \"orelse\" changed size during iteration"); goto failed; } asdl_seq_SET(orelse, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_type_comment, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); type_comment = NULL; } else { int res; res = obj2ast_string(tmp, &type_comment, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = For(target, iter, body, orelse, type_comment, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)AsyncFor_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty target; expr_ty iter; asdl_seq* body; asdl_seq* orelse; string type_comment; if (_PyObject_LookupAttrId(obj, &PyId_target, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"target\" missing from AsyncFor"); return 1; } else { int res; res = obj2ast_expr(tmp, &target, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_iter, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"iter\" missing from AsyncFor"); return 1; } else { int res; res = obj2ast_expr(tmp, &iter, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from AsyncFor"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "AsyncFor field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Py_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "AsyncFor field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_orelse, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"orelse\" missing from AsyncFor"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "AsyncFor field \"orelse\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); orelse = _Py_asdl_seq_new(len, arena); if (orelse == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "AsyncFor field \"orelse\" changed size during iteration"); goto failed; } asdl_seq_SET(orelse, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_type_comment, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); type_comment = NULL; } else { int res; res = obj2ast_string(tmp, &type_comment, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = AsyncFor(target, iter, body, orelse, type_comment, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)While_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty test; asdl_seq* body; asdl_seq* orelse; if (_PyObject_LookupAttrId(obj, &PyId_test, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"test\" missing from While"); return 1; } else { int res; res = obj2ast_expr(tmp, &test, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from While"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "While field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Py_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "While field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_orelse, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"orelse\" missing from While"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "While field \"orelse\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); orelse = _Py_asdl_seq_new(len, arena); if (orelse == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "While field \"orelse\" changed size during iteration"); goto failed; } asdl_seq_SET(orelse, i, val); } Py_CLEAR(tmp); } *out = While(test, body, orelse, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)If_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty test; asdl_seq* body; asdl_seq* orelse; if (_PyObject_LookupAttrId(obj, &PyId_test, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"test\" missing from If"); return 1; } else { int res; res = obj2ast_expr(tmp, &test, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from If"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "If field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Py_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "If field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_orelse, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"orelse\" missing from If"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "If field \"orelse\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); orelse = _Py_asdl_seq_new(len, arena); if (orelse == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "If field \"orelse\" changed size during iteration"); goto failed; } asdl_seq_SET(orelse, i, val); } Py_CLEAR(tmp); } *out = If(test, body, orelse, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)With_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* items; asdl_seq* body; string type_comment; if (_PyObject_LookupAttrId(obj, &PyId_items, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"items\" missing from With"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "With field \"items\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); items = _Py_asdl_seq_new(len, arena); if (items == NULL) goto failed; for (i = 0; i < len; i++) { withitem_ty val; res = obj2ast_withitem(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "With field \"items\" changed size during iteration"); goto failed; } asdl_seq_SET(items, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from With"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "With field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Py_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "With field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_type_comment, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); type_comment = NULL; } else { int res; res = obj2ast_string(tmp, &type_comment, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = With(items, body, type_comment, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)AsyncWith_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* items; asdl_seq* body; string type_comment; if (_PyObject_LookupAttrId(obj, &PyId_items, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"items\" missing from AsyncWith"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "AsyncWith field \"items\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); items = _Py_asdl_seq_new(len, arena); if (items == NULL) goto failed; for (i = 0; i < len; i++) { withitem_ty val; res = obj2ast_withitem(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "AsyncWith field \"items\" changed size during iteration"); goto failed; } asdl_seq_SET(items, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from AsyncWith"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "AsyncWith field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Py_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "AsyncWith field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_type_comment, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); type_comment = NULL; } else { int res; res = obj2ast_string(tmp, &type_comment, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = AsyncWith(items, body, type_comment, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Raise_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty exc; expr_ty cause; if (_PyObject_LookupAttrId(obj, &PyId_exc, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); exc = NULL; } else { int res; res = obj2ast_expr(tmp, &exc, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_cause, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); cause = NULL; } else { int res; res = obj2ast_expr(tmp, &cause, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Raise(exc, cause, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Try_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* body; asdl_seq* handlers; asdl_seq* orelse; asdl_seq* finalbody; if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from Try"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Try field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Py_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Try field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_handlers, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"handlers\" missing from Try"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Try field \"handlers\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); handlers = _Py_asdl_seq_new(len, arena); if (handlers == NULL) goto failed; for (i = 0; i < len; i++) { excepthandler_ty val; res = obj2ast_excepthandler(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Try field \"handlers\" changed size during iteration"); goto failed; } asdl_seq_SET(handlers, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_orelse, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"orelse\" missing from Try"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Try field \"orelse\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); orelse = _Py_asdl_seq_new(len, arena); if (orelse == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Try field \"orelse\" changed size during iteration"); goto failed; } asdl_seq_SET(orelse, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_finalbody, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"finalbody\" missing from Try"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Try field \"finalbody\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); finalbody = _Py_asdl_seq_new(len, arena); if (finalbody == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Try field \"finalbody\" changed size during iteration"); goto failed; } asdl_seq_SET(finalbody, i, val); } Py_CLEAR(tmp); } *out = Try(body, handlers, orelse, finalbody, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Assert_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty test; expr_ty msg; if (_PyObject_LookupAttrId(obj, &PyId_test, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"test\" missing from Assert"); return 1; } else { int res; res = obj2ast_expr(tmp, &test, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_msg, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); msg = NULL; } else { int res; res = obj2ast_expr(tmp, &msg, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Assert(test, msg, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Import_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* names; if (_PyObject_LookupAttrId(obj, &PyId_names, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"names\" missing from Import"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Import field \"names\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); names = _Py_asdl_seq_new(len, arena); if (names == NULL) goto failed; for (i = 0; i < len; i++) { alias_ty val; res = obj2ast_alias(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Import field \"names\" changed size during iteration"); goto failed; } asdl_seq_SET(names, i, val); } Py_CLEAR(tmp); } *out = Import(names, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)ImportFrom_type); if (isinstance == -1) { return 1; } if (isinstance) { identifier module; asdl_seq* names; int level; if (_PyObject_LookupAttrId(obj, &PyId_module, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); module = NULL; } else { int res; res = obj2ast_identifier(tmp, &module, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_names, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"names\" missing from ImportFrom"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ImportFrom field \"names\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); names = _Py_asdl_seq_new(len, arena); if (names == NULL) goto failed; for (i = 0; i < len; i++) { alias_ty val; res = obj2ast_alias(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ImportFrom field \"names\" changed size during iteration"); goto failed; } asdl_seq_SET(names, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_level, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); level = 0; } else { int res; res = obj2ast_int(tmp, &level, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = ImportFrom(module, names, level, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Global_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* names; if (_PyObject_LookupAttrId(obj, &PyId_names, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"names\" missing from Global"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Global field \"names\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); names = _Py_asdl_seq_new(len, arena); if (names == NULL) goto failed; for (i = 0; i < len; i++) { identifier val; res = obj2ast_identifier(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Global field \"names\" changed size during iteration"); goto failed; } asdl_seq_SET(names, i, val); } Py_CLEAR(tmp); } *out = Global(names, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Nonlocal_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* names; if (_PyObject_LookupAttrId(obj, &PyId_names, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"names\" missing from Nonlocal"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Nonlocal field \"names\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); names = _Py_asdl_seq_new(len, arena); if (names == NULL) goto failed; for (i = 0; i < len; i++) { identifier val; res = obj2ast_identifier(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Nonlocal field \"names\" changed size during iteration"); goto failed; } asdl_seq_SET(names, i, val); } Py_CLEAR(tmp); } *out = Nonlocal(names, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Expr_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Expr"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Expr(value, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Pass_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Pass(lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Break_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Break(lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Continue_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Continue(lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of stmt, but got %R", obj); failed: Py_XDECREF(tmp); return 1; } int obj2ast_expr(PyObject* obj, expr_ty* out, PyArena* arena) { int isinstance; PyObject *tmp = NULL; int lineno; int col_offset; int end_lineno; int end_col_offset; if (obj == Py_None) { *out = NULL; return 0; } if (_PyObject_LookupAttrId(obj, &PyId_lineno, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"lineno\" missing from expr"); return 1; } else { int res; res = obj2ast_int(tmp, &lineno, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_col_offset, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"col_offset\" missing from expr"); return 1; } else { int res; res = obj2ast_int(tmp, &col_offset, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_end_lineno, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); end_lineno = 0; } else { int res; res = obj2ast_int(tmp, &end_lineno, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_end_col_offset, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); end_col_offset = 0; } else { int res; res = obj2ast_int(tmp, &end_col_offset, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } isinstance = PyObject_IsInstance(obj, (PyObject*)BoolOp_type); if (isinstance == -1) { return 1; } if (isinstance) { boolop_ty op; asdl_seq* values; if (_PyObject_LookupAttrId(obj, &PyId_op, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"op\" missing from BoolOp"); return 1; } else { int res; res = obj2ast_boolop(tmp, &op, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_values, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"values\" missing from BoolOp"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "BoolOp field \"values\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); values = _Py_asdl_seq_new(len, arena); if (values == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "BoolOp field \"values\" changed size during iteration"); goto failed; } asdl_seq_SET(values, i, val); } Py_CLEAR(tmp); } *out = BoolOp(op, values, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)NamedExpr_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty target; expr_ty value; if (_PyObject_LookupAttrId(obj, &PyId_target, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"target\" missing from NamedExpr"); return 1; } else { int res; res = obj2ast_expr(tmp, &target, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from NamedExpr"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = NamedExpr(target, value, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)BinOp_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty left; operator_ty op; expr_ty right; if (_PyObject_LookupAttrId(obj, &PyId_left, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"left\" missing from BinOp"); return 1; } else { int res; res = obj2ast_expr(tmp, &left, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_op, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"op\" missing from BinOp"); return 1; } else { int res; res = obj2ast_operator(tmp, &op, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_right, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"right\" missing from BinOp"); return 1; } else { int res; res = obj2ast_expr(tmp, &right, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = BinOp(left, op, right, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)UnaryOp_type); if (isinstance == -1) { return 1; } if (isinstance) { unaryop_ty op; expr_ty operand; if (_PyObject_LookupAttrId(obj, &PyId_op, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"op\" missing from UnaryOp"); return 1; } else { int res; res = obj2ast_unaryop(tmp, &op, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_operand, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"operand\" missing from UnaryOp"); return 1; } else { int res; res = obj2ast_expr(tmp, &operand, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = UnaryOp(op, operand, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Lambda_type); if (isinstance == -1) { return 1; } if (isinstance) { arguments_ty args; expr_ty body; if (_PyObject_LookupAttrId(obj, &PyId_args, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"args\" missing from Lambda"); return 1; } else { int res; res = obj2ast_arguments(tmp, &args, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from Lambda"); return 1; } else { int res; res = obj2ast_expr(tmp, &body, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Lambda(args, body, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)IfExp_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty test; expr_ty body; expr_ty orelse; if (_PyObject_LookupAttrId(obj, &PyId_test, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"test\" missing from IfExp"); return 1; } else { int res; res = obj2ast_expr(tmp, &test, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from IfExp"); return 1; } else { int res; res = obj2ast_expr(tmp, &body, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_orelse, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"orelse\" missing from IfExp"); return 1; } else { int res; res = obj2ast_expr(tmp, &orelse, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = IfExp(test, body, orelse, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Dict_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* keys; asdl_seq* values; if (_PyObject_LookupAttrId(obj, &PyId_keys, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"keys\" missing from Dict"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Dict field \"keys\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); keys = _Py_asdl_seq_new(len, arena); if (keys == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Dict field \"keys\" changed size during iteration"); goto failed; } asdl_seq_SET(keys, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_values, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"values\" missing from Dict"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Dict field \"values\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); values = _Py_asdl_seq_new(len, arena); if (values == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Dict field \"values\" changed size during iteration"); goto failed; } asdl_seq_SET(values, i, val); } Py_CLEAR(tmp); } *out = Dict(keys, values, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Set_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* elts; if (_PyObject_LookupAttrId(obj, &PyId_elts, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"elts\" missing from Set"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Set field \"elts\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); elts = _Py_asdl_seq_new(len, arena); if (elts == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Set field \"elts\" changed size during iteration"); goto failed; } asdl_seq_SET(elts, i, val); } Py_CLEAR(tmp); } *out = Set(elts, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)ListComp_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty elt; asdl_seq* generators; if (_PyObject_LookupAttrId(obj, &PyId_elt, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"elt\" missing from ListComp"); return 1; } else { int res; res = obj2ast_expr(tmp, &elt, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_generators, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"generators\" missing from ListComp"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ListComp field \"generators\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); generators = _Py_asdl_seq_new(len, arena); if (generators == NULL) goto failed; for (i = 0; i < len; i++) { comprehension_ty val; res = obj2ast_comprehension(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ListComp field \"generators\" changed size during iteration"); goto failed; } asdl_seq_SET(generators, i, val); } Py_CLEAR(tmp); } *out = ListComp(elt, generators, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)SetComp_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty elt; asdl_seq* generators; if (_PyObject_LookupAttrId(obj, &PyId_elt, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"elt\" missing from SetComp"); return 1; } else { int res; res = obj2ast_expr(tmp, &elt, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_generators, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"generators\" missing from SetComp"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "SetComp field \"generators\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); generators = _Py_asdl_seq_new(len, arena); if (generators == NULL) goto failed; for (i = 0; i < len; i++) { comprehension_ty val; res = obj2ast_comprehension(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "SetComp field \"generators\" changed size during iteration"); goto failed; } asdl_seq_SET(generators, i, val); } Py_CLEAR(tmp); } *out = SetComp(elt, generators, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)DictComp_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty key; expr_ty value; asdl_seq* generators; if (_PyObject_LookupAttrId(obj, &PyId_key, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"key\" missing from DictComp"); return 1; } else { int res; res = obj2ast_expr(tmp, &key, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from DictComp"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_generators, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"generators\" missing from DictComp"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "DictComp field \"generators\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); generators = _Py_asdl_seq_new(len, arena); if (generators == NULL) goto failed; for (i = 0; i < len; i++) { comprehension_ty val; res = obj2ast_comprehension(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "DictComp field \"generators\" changed size during iteration"); goto failed; } asdl_seq_SET(generators, i, val); } Py_CLEAR(tmp); } *out = DictComp(key, value, generators, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)GeneratorExp_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty elt; asdl_seq* generators; if (_PyObject_LookupAttrId(obj, &PyId_elt, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"elt\" missing from GeneratorExp"); return 1; } else { int res; res = obj2ast_expr(tmp, &elt, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_generators, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"generators\" missing from GeneratorExp"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "GeneratorExp field \"generators\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); generators = _Py_asdl_seq_new(len, arena); if (generators == NULL) goto failed; for (i = 0; i < len; i++) { comprehension_ty val; res = obj2ast_comprehension(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "GeneratorExp field \"generators\" changed size during iteration"); goto failed; } asdl_seq_SET(generators, i, val); } Py_CLEAR(tmp); } *out = GeneratorExp(elt, generators, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Await_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Await"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Await(value, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Yield_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); value = NULL; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Yield(value, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)YieldFrom_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from YieldFrom"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = YieldFrom(value, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Compare_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty left; asdl_int_seq* ops; asdl_seq* comparators; if (_PyObject_LookupAttrId(obj, &PyId_left, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"left\" missing from Compare"); return 1; } else { int res; res = obj2ast_expr(tmp, &left, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_ops, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"ops\" missing from Compare"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Compare field \"ops\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); ops = _Py_asdl_int_seq_new(len, arena); if (ops == NULL) goto failed; for (i = 0; i < len; i++) { cmpop_ty val; res = obj2ast_cmpop(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Compare field \"ops\" changed size during iteration"); goto failed; } asdl_seq_SET(ops, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_comparators, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"comparators\" missing from Compare"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Compare field \"comparators\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); comparators = _Py_asdl_seq_new(len, arena); if (comparators == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Compare field \"comparators\" changed size during iteration"); goto failed; } asdl_seq_SET(comparators, i, val); } Py_CLEAR(tmp); } *out = Compare(left, ops, comparators, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Call_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty func; asdl_seq* args; asdl_seq* keywords; if (_PyObject_LookupAttrId(obj, &PyId_func, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"func\" missing from Call"); return 1; } else { int res; res = obj2ast_expr(tmp, &func, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_args, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"args\" missing from Call"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Call field \"args\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); args = _Py_asdl_seq_new(len, arena); if (args == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Call field \"args\" changed size during iteration"); goto failed; } asdl_seq_SET(args, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_keywords, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"keywords\" missing from Call"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Call field \"keywords\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); keywords = _Py_asdl_seq_new(len, arena); if (keywords == NULL) goto failed; for (i = 0; i < len; i++) { keyword_ty val; res = obj2ast_keyword(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Call field \"keywords\" changed size during iteration"); goto failed; } asdl_seq_SET(keywords, i, val); } Py_CLEAR(tmp); } *out = Call(func, args, keywords, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)FormattedValue_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; int conversion; expr_ty format_spec; if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from FormattedValue"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_conversion, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); conversion = 0; } else { int res; res = obj2ast_int(tmp, &conversion, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_format_spec, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); format_spec = NULL; } else { int res; res = obj2ast_expr(tmp, &format_spec, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = FormattedValue(value, conversion, format_spec, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)JoinedStr_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* values; if (_PyObject_LookupAttrId(obj, &PyId_values, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"values\" missing from JoinedStr"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "JoinedStr field \"values\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); values = _Py_asdl_seq_new(len, arena); if (values == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "JoinedStr field \"values\" changed size during iteration"); goto failed; } asdl_seq_SET(values, i, val); } Py_CLEAR(tmp); } *out = JoinedStr(values, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Constant_type); if (isinstance == -1) { return 1; } if (isinstance) { constant value; if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Constant"); return 1; } else { int res; res = obj2ast_constant(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Constant(value, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Attribute_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; identifier attr; expr_context_ty ctx; if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Attribute"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_attr, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"attr\" missing from Attribute"); return 1; } else { int res; res = obj2ast_identifier(tmp, &attr, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_ctx, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"ctx\" missing from Attribute"); return 1; } else { int res; res = obj2ast_expr_context(tmp, &ctx, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Attribute(value, attr, ctx, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Subscript_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; slice_ty slice; expr_context_ty ctx; if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Subscript"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_slice, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"slice\" missing from Subscript"); return 1; } else { int res; res = obj2ast_slice(tmp, &slice, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_ctx, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"ctx\" missing from Subscript"); return 1; } else { int res; res = obj2ast_expr_context(tmp, &ctx, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Subscript(value, slice, ctx, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Starred_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; expr_context_ty ctx; if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Starred"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_ctx, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"ctx\" missing from Starred"); return 1; } else { int res; res = obj2ast_expr_context(tmp, &ctx, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Starred(value, ctx, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Name_type); if (isinstance == -1) { return 1; } if (isinstance) { identifier id; expr_context_ty ctx; if (_PyObject_LookupAttrId(obj, &PyId_id, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"id\" missing from Name"); return 1; } else { int res; res = obj2ast_identifier(tmp, &id, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_ctx, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"ctx\" missing from Name"); return 1; } else { int res; res = obj2ast_expr_context(tmp, &ctx, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Name(id, ctx, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)List_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* elts; expr_context_ty ctx; if (_PyObject_LookupAttrId(obj, &PyId_elts, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"elts\" missing from List"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "List field \"elts\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); elts = _Py_asdl_seq_new(len, arena); if (elts == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "List field \"elts\" changed size during iteration"); goto failed; } asdl_seq_SET(elts, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_ctx, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"ctx\" missing from List"); return 1; } else { int res; res = obj2ast_expr_context(tmp, &ctx, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = List(elts, ctx, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Tuple_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* elts; expr_context_ty ctx; if (_PyObject_LookupAttrId(obj, &PyId_elts, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"elts\" missing from Tuple"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "Tuple field \"elts\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); elts = _Py_asdl_seq_new(len, arena); if (elts == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "Tuple field \"elts\" changed size during iteration"); goto failed; } asdl_seq_SET(elts, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_ctx, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"ctx\" missing from Tuple"); return 1; } else { int res; res = obj2ast_expr_context(tmp, &ctx, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Tuple(elts, ctx, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of expr, but got %R", obj); failed: Py_XDECREF(tmp); return 1; } int obj2ast_expr_context(PyObject* obj, expr_context_ty* out, PyArena* arena) { int isinstance; isinstance = PyObject_IsInstance(obj, (PyObject *)Load_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Load; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Store_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Store; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Del_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Del; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)AugLoad_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = AugLoad; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)AugStore_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = AugStore; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Param_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Param; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)NamedStore_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = NamedStore; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of expr_context, but got %R", obj); return 1; } int obj2ast_slice(PyObject* obj, slice_ty* out, PyArena* arena) { int isinstance; PyObject *tmp = NULL; if (obj == Py_None) { *out = NULL; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Slice_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty lower; expr_ty upper; expr_ty step; if (_PyObject_LookupAttrId(obj, &PyId_lower, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); lower = NULL; } else { int res; res = obj2ast_expr(tmp, &lower, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_upper, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); upper = NULL; } else { int res; res = obj2ast_expr(tmp, &upper, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_step, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); step = NULL; } else { int res; res = obj2ast_expr(tmp, &step, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Slice(lower, upper, step, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)ExtSlice_type); if (isinstance == -1) { return 1; } if (isinstance) { asdl_seq* dims; if (_PyObject_LookupAttrId(obj, &PyId_dims, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"dims\" missing from ExtSlice"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ExtSlice field \"dims\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); dims = _Py_asdl_seq_new(len, arena); if (dims == NULL) goto failed; for (i = 0; i < len; i++) { slice_ty val; res = obj2ast_slice(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ExtSlice field \"dims\" changed size during iteration"); goto failed; } asdl_seq_SET(dims, i, val); } Py_CLEAR(tmp); } *out = ExtSlice(dims, arena); if (*out == NULL) goto failed; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)Index_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty value; if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from Index"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = Index(value, arena); if (*out == NULL) goto failed; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of slice, but got %R", obj); failed: Py_XDECREF(tmp); return 1; } int obj2ast_boolop(PyObject* obj, boolop_ty* out, PyArena* arena) { int isinstance; isinstance = PyObject_IsInstance(obj, (PyObject *)And_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = And; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Or_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Or; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of boolop, but got %R", obj); return 1; } int obj2ast_operator(PyObject* obj, operator_ty* out, PyArena* arena) { int isinstance; isinstance = PyObject_IsInstance(obj, (PyObject *)Add_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Add; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Sub_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Sub; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Mult_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Mult; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)MatMult_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = MatMult; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Div_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Div; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Mod_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Mod; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Pow_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Pow; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)LShift_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = LShift; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)RShift_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = RShift; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)BitOr_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = BitOr; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)BitXor_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = BitXor; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)BitAnd_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = BitAnd; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)FloorDiv_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = FloorDiv; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of operator, but got %R", obj); return 1; } int obj2ast_unaryop(PyObject* obj, unaryop_ty* out, PyArena* arena) { int isinstance; isinstance = PyObject_IsInstance(obj, (PyObject *)Invert_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Invert; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Not_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Not; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)UAdd_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = UAdd; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)USub_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = USub; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of unaryop, but got %R", obj); return 1; } int obj2ast_cmpop(PyObject* obj, cmpop_ty* out, PyArena* arena) { int isinstance; isinstance = PyObject_IsInstance(obj, (PyObject *)Eq_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Eq; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)NotEq_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = NotEq; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Lt_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Lt; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)LtE_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = LtE; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Gt_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Gt; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)GtE_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = GtE; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)Is_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = Is; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)IsNot_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = IsNot; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)In_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = In; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject *)NotIn_type); if (isinstance == -1) { return 1; } if (isinstance) { *out = NotIn; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of cmpop, but got %R", obj); return 1; } int obj2ast_comprehension(PyObject* obj, comprehension_ty* out, PyArena* arena) { PyObject* tmp = NULL; expr_ty target; expr_ty iter; asdl_seq* ifs; int is_async; if (_PyObject_LookupAttrId(obj, &PyId_target, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"target\" missing from comprehension"); return 1; } else { int res; res = obj2ast_expr(tmp, &target, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_iter, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"iter\" missing from comprehension"); return 1; } else { int res; res = obj2ast_expr(tmp, &iter, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_ifs, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"ifs\" missing from comprehension"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "comprehension field \"ifs\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); ifs = _Py_asdl_seq_new(len, arena); if (ifs == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "comprehension field \"ifs\" changed size during iteration"); goto failed; } asdl_seq_SET(ifs, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_is_async, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"is_async\" missing from comprehension"); return 1; } else { int res; res = obj2ast_int(tmp, &is_async, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = comprehension(target, iter, ifs, is_async, arena); return 0; failed: Py_XDECREF(tmp); return 1; } int obj2ast_excepthandler(PyObject* obj, excepthandler_ty* out, PyArena* arena) { int isinstance; PyObject *tmp = NULL; int lineno; int col_offset; int end_lineno; int end_col_offset; if (obj == Py_None) { *out = NULL; return 0; } if (_PyObject_LookupAttrId(obj, &PyId_lineno, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"lineno\" missing from excepthandler"); return 1; } else { int res; res = obj2ast_int(tmp, &lineno, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_col_offset, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"col_offset\" missing from excepthandler"); return 1; } else { int res; res = obj2ast_int(tmp, &col_offset, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_end_lineno, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); end_lineno = 0; } else { int res; res = obj2ast_int(tmp, &end_lineno, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_end_col_offset, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); end_col_offset = 0; } else { int res; res = obj2ast_int(tmp, &end_col_offset, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } isinstance = PyObject_IsInstance(obj, (PyObject*)ExceptHandler_type); if (isinstance == -1) { return 1; } if (isinstance) { expr_ty type; identifier name; asdl_seq* body; if (_PyObject_LookupAttrId(obj, &PyId_type, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); type = NULL; } else { int res; res = obj2ast_expr(tmp, &type, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_name, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); name = NULL; } else { int res; res = obj2ast_identifier(tmp, &name, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_body, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"body\" missing from ExceptHandler"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "ExceptHandler field \"body\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); body = _Py_asdl_seq_new(len, arena); if (body == NULL) goto failed; for (i = 0; i < len; i++) { stmt_ty val; res = obj2ast_stmt(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "ExceptHandler field \"body\" changed size during iteration"); goto failed; } asdl_seq_SET(body, i, val); } Py_CLEAR(tmp); } *out = ExceptHandler(type, name, body, lineno, col_offset, end_lineno, end_col_offset, arena); if (*out == NULL) goto failed; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of excepthandler, but got %R", obj); failed: Py_XDECREF(tmp); return 1; } int obj2ast_arguments(PyObject* obj, arguments_ty* out, PyArena* arena) { PyObject* tmp = NULL; asdl_seq* args; arg_ty vararg; asdl_seq* kwonlyargs; asdl_seq* kw_defaults; arg_ty kwarg; asdl_seq* defaults; if (_PyObject_LookupAttrId(obj, &PyId_args, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"args\" missing from arguments"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "arguments field \"args\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); args = _Py_asdl_seq_new(len, arena); if (args == NULL) goto failed; for (i = 0; i < len; i++) { arg_ty val; res = obj2ast_arg(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "arguments field \"args\" changed size during iteration"); goto failed; } asdl_seq_SET(args, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_vararg, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); vararg = NULL; } else { int res; res = obj2ast_arg(tmp, &vararg, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_kwonlyargs, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"kwonlyargs\" missing from arguments"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "arguments field \"kwonlyargs\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); kwonlyargs = _Py_asdl_seq_new(len, arena); if (kwonlyargs == NULL) goto failed; for (i = 0; i < len; i++) { arg_ty val; res = obj2ast_arg(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "arguments field \"kwonlyargs\" changed size during iteration"); goto failed; } asdl_seq_SET(kwonlyargs, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_kw_defaults, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"kw_defaults\" missing from arguments"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "arguments field \"kw_defaults\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); kw_defaults = _Py_asdl_seq_new(len, arena); if (kw_defaults == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "arguments field \"kw_defaults\" changed size during iteration"); goto failed; } asdl_seq_SET(kw_defaults, i, val); } Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_kwarg, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); kwarg = NULL; } else { int res; res = obj2ast_arg(tmp, &kwarg, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_defaults, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"defaults\" missing from arguments"); return 1; } else { int res; Py_ssize_t len; Py_ssize_t i; if (!PyList_Check(tmp)) { PyErr_Format(PyExc_TypeError, "arguments field \"defaults\" must be a list, not a %.200s", tmp->ob_type->tp_name); goto failed; } len = PyList_GET_SIZE(tmp); defaults = _Py_asdl_seq_new(len, arena); if (defaults == NULL) goto failed; for (i = 0; i < len; i++) { expr_ty val; res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena); if (res != 0) goto failed; if (len != PyList_GET_SIZE(tmp)) { PyErr_SetString(PyExc_RuntimeError, "arguments field \"defaults\" changed size during iteration"); goto failed; } asdl_seq_SET(defaults, i, val); } Py_CLEAR(tmp); } *out = arguments(args, vararg, kwonlyargs, kw_defaults, kwarg, defaults, arena); return 0; failed: Py_XDECREF(tmp); return 1; } int obj2ast_arg(PyObject* obj, arg_ty* out, PyArena* arena) { PyObject* tmp = NULL; identifier arg; expr_ty annotation; string type_comment; int lineno; int col_offset; int end_lineno; int end_col_offset; if (_PyObject_LookupAttrId(obj, &PyId_arg, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"arg\" missing from arg"); return 1; } else { int res; res = obj2ast_identifier(tmp, &arg, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_annotation, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); annotation = NULL; } else { int res; res = obj2ast_expr(tmp, &annotation, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_type_comment, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); type_comment = NULL; } else { int res; res = obj2ast_string(tmp, &type_comment, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_lineno, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"lineno\" missing from arg"); return 1; } else { int res; res = obj2ast_int(tmp, &lineno, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_col_offset, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"col_offset\" missing from arg"); return 1; } else { int res; res = obj2ast_int(tmp, &col_offset, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_end_lineno, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); end_lineno = 0; } else { int res; res = obj2ast_int(tmp, &end_lineno, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_end_col_offset, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); end_col_offset = 0; } else { int res; res = obj2ast_int(tmp, &end_col_offset, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = arg(arg, annotation, type_comment, lineno, col_offset, end_lineno, end_col_offset, arena); return 0; failed: Py_XDECREF(tmp); return 1; } int obj2ast_keyword(PyObject* obj, keyword_ty* out, PyArena* arena) { PyObject* tmp = NULL; identifier arg; expr_ty value; if (_PyObject_LookupAttrId(obj, &PyId_arg, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); arg = NULL; } else { int res; res = obj2ast_identifier(tmp, &arg, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_value, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"value\" missing from keyword"); return 1; } else { int res; res = obj2ast_expr(tmp, &value, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = keyword(arg, value, arena); return 0; failed: Py_XDECREF(tmp); return 1; } int obj2ast_alias(PyObject* obj, alias_ty* out, PyArena* arena) { PyObject* tmp = NULL; identifier name; identifier asname; if (_PyObject_LookupAttrId(obj, &PyId_name, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"name\" missing from alias"); return 1; } else { int res; res = obj2ast_identifier(tmp, &name, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_asname, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); asname = NULL; } else { int res; res = obj2ast_identifier(tmp, &asname, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = alias(name, asname, arena); return 0; failed: Py_XDECREF(tmp); return 1; } int obj2ast_withitem(PyObject* obj, withitem_ty* out, PyArena* arena) { PyObject* tmp = NULL; expr_ty context_expr; expr_ty optional_vars; if (_PyObject_LookupAttrId(obj, &PyId_context_expr, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"context_expr\" missing from withitem"); return 1; } else { int res; res = obj2ast_expr(tmp, &context_expr, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } if (_PyObject_LookupAttrId(obj, &PyId_optional_vars, &tmp) < 0) { return 1; } if (tmp == NULL || tmp == Py_None) { Py_CLEAR(tmp); optional_vars = NULL; } else { int res; res = obj2ast_expr(tmp, &optional_vars, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = withitem(context_expr, optional_vars, arena); return 0; failed: Py_XDECREF(tmp); return 1; } int obj2ast_type_ignore(PyObject* obj, type_ignore_ty* out, PyArena* arena) { int isinstance; PyObject *tmp = NULL; if (obj == Py_None) { *out = NULL; return 0; } isinstance = PyObject_IsInstance(obj, (PyObject*)TypeIgnore_type); if (isinstance == -1) { return 1; } if (isinstance) { int lineno; if (_PyObject_LookupAttrId(obj, &PyId_lineno, &tmp) < 0) { return 1; } if (tmp == NULL) { PyErr_SetString(PyExc_TypeError, "required field \"lineno\" missing from TypeIgnore"); return 1; } else { int res; res = obj2ast_int(tmp, &lineno, arena); if (res != 0) goto failed; Py_CLEAR(tmp); } *out = TypeIgnore(lineno, arena); if (*out == NULL) goto failed; return 0; } PyErr_Format(PyExc_TypeError, "expected some sort of type_ignore, but got %R", obj); failed: Py_XDECREF(tmp); return 1; } static struct PyModuleDef _astmodule = { PyModuleDef_HEAD_INIT, "_ast" }; PyMODINIT_FUNC PyInit__ast(void) { PyObject *m, *d; if (!init_types()) return NULL; m = PyModule_Create(&_astmodule); if (!m) return NULL; d = PyModule_GetDict(m); if (PyDict_SetItemString(d, "AST", (PyObject*)&AST_type) < 0) return NULL; if (PyModule_AddIntMacro(m, PyCF_ONLY_AST) < 0) return NULL; if (PyModule_AddIntMacro(m, PyCF_TYPE_COMMENTS) < 0) return NULL; if (PyDict_SetItemString(d, "mod", (PyObject*)mod_type) < 0) return NULL; if (PyDict_SetItemString(d, "Module", (PyObject*)Module_type) < 0) return NULL; if (PyDict_SetItemString(d, "Interactive", (PyObject*)Interactive_type) < 0) return NULL; if (PyDict_SetItemString(d, "Expression", (PyObject*)Expression_type) < 0) return NULL; if (PyDict_SetItemString(d, "FunctionType", (PyObject*)FunctionType_type) < 0) return NULL; if (PyDict_SetItemString(d, "Suite", (PyObject*)Suite_type) < 0) return NULL; if (PyDict_SetItemString(d, "stmt", (PyObject*)stmt_type) < 0) return NULL; if (PyDict_SetItemString(d, "FunctionDef", (PyObject*)FunctionDef_type) < 0) return NULL; if (PyDict_SetItemString(d, "AsyncFunctionDef", (PyObject*)AsyncFunctionDef_type) < 0) return NULL; if (PyDict_SetItemString(d, "ClassDef", (PyObject*)ClassDef_type) < 0) return NULL; if (PyDict_SetItemString(d, "Return", (PyObject*)Return_type) < 0) return NULL; if (PyDict_SetItemString(d, "Delete", (PyObject*)Delete_type) < 0) return NULL; if (PyDict_SetItemString(d, "Assign", (PyObject*)Assign_type) < 0) return NULL; if (PyDict_SetItemString(d, "AugAssign", (PyObject*)AugAssign_type) < 0) return NULL; if (PyDict_SetItemString(d, "AnnAssign", (PyObject*)AnnAssign_type) < 0) return NULL; if (PyDict_SetItemString(d, "For", (PyObject*)For_type) < 0) return NULL; if (PyDict_SetItemString(d, "AsyncFor", (PyObject*)AsyncFor_type) < 0) return NULL; if (PyDict_SetItemString(d, "While", (PyObject*)While_type) < 0) return NULL; if (PyDict_SetItemString(d, "If", (PyObject*)If_type) < 0) return NULL; if (PyDict_SetItemString(d, "With", (PyObject*)With_type) < 0) return NULL; if (PyDict_SetItemString(d, "AsyncWith", (PyObject*)AsyncWith_type) < 0) return NULL; if (PyDict_SetItemString(d, "Raise", (PyObject*)Raise_type) < 0) return NULL; if (PyDict_SetItemString(d, "Try", (PyObject*)Try_type) < 0) return NULL; if (PyDict_SetItemString(d, "Assert", (PyObject*)Assert_type) < 0) return NULL; if (PyDict_SetItemString(d, "Import", (PyObject*)Import_type) < 0) return NULL; if (PyDict_SetItemString(d, "ImportFrom", (PyObject*)ImportFrom_type) < 0) return NULL; if (PyDict_SetItemString(d, "Global", (PyObject*)Global_type) < 0) return NULL; if (PyDict_SetItemString(d, "Nonlocal", (PyObject*)Nonlocal_type) < 0) return NULL; if (PyDict_SetItemString(d, "Expr", (PyObject*)Expr_type) < 0) return NULL; if (PyDict_SetItemString(d, "Pass", (PyObject*)Pass_type) < 0) return NULL; if (PyDict_SetItemString(d, "Break", (PyObject*)Break_type) < 0) return NULL; if (PyDict_SetItemString(d, "Continue", (PyObject*)Continue_type) < 0) return NULL; if (PyDict_SetItemString(d, "expr", (PyObject*)expr_type) < 0) return NULL; if (PyDict_SetItemString(d, "BoolOp", (PyObject*)BoolOp_type) < 0) return NULL; if (PyDict_SetItemString(d, "NamedExpr", (PyObject*)NamedExpr_type) < 0) return NULL; if (PyDict_SetItemString(d, "BinOp", (PyObject*)BinOp_type) < 0) return NULL; if (PyDict_SetItemString(d, "UnaryOp", (PyObject*)UnaryOp_type) < 0) return NULL; if (PyDict_SetItemString(d, "Lambda", (PyObject*)Lambda_type) < 0) return NULL; if (PyDict_SetItemString(d, "IfExp", (PyObject*)IfExp_type) < 0) return NULL; if (PyDict_SetItemString(d, "Dict", (PyObject*)Dict_type) < 0) return NULL; if (PyDict_SetItemString(d, "Set", (PyObject*)Set_type) < 0) return NULL; if (PyDict_SetItemString(d, "ListComp", (PyObject*)ListComp_type) < 0) return NULL; if (PyDict_SetItemString(d, "SetComp", (PyObject*)SetComp_type) < 0) return NULL; if (PyDict_SetItemString(d, "DictComp", (PyObject*)DictComp_type) < 0) return NULL; if (PyDict_SetItemString(d, "GeneratorExp", (PyObject*)GeneratorExp_type) < 0) return NULL; if (PyDict_SetItemString(d, "Await", (PyObject*)Await_type) < 0) return NULL; if (PyDict_SetItemString(d, "Yield", (PyObject*)Yield_type) < 0) return NULL; if (PyDict_SetItemString(d, "YieldFrom", (PyObject*)YieldFrom_type) < 0) return NULL; if (PyDict_SetItemString(d, "Compare", (PyObject*)Compare_type) < 0) return NULL; if (PyDict_SetItemString(d, "Call", (PyObject*)Call_type) < 0) return NULL; if (PyDict_SetItemString(d, "FormattedValue", (PyObject*)FormattedValue_type) < 0) return NULL; if (PyDict_SetItemString(d, "JoinedStr", (PyObject*)JoinedStr_type) < 0) return NULL; if (PyDict_SetItemString(d, "Constant", (PyObject*)Constant_type) < 0) return NULL; if (PyDict_SetItemString(d, "Attribute", (PyObject*)Attribute_type) < 0) return NULL; if (PyDict_SetItemString(d, "Subscript", (PyObject*)Subscript_type) < 0) return NULL; if (PyDict_SetItemString(d, "Starred", (PyObject*)Starred_type) < 0) return NULL; if (PyDict_SetItemString(d, "Name", (PyObject*)Name_type) < 0) return NULL; if (PyDict_SetItemString(d, "List", (PyObject*)List_type) < 0) return NULL; if (PyDict_SetItemString(d, "Tuple", (PyObject*)Tuple_type) < 0) return NULL; if (PyDict_SetItemString(d, "expr_context", (PyObject*)expr_context_type) < 0) return NULL; if (PyDict_SetItemString(d, "Load", (PyObject*)Load_type) < 0) return NULL; if (PyDict_SetItemString(d, "Store", (PyObject*)Store_type) < 0) return NULL; if (PyDict_SetItemString(d, "Del", (PyObject*)Del_type) < 0) return NULL; if (PyDict_SetItemString(d, "AugLoad", (PyObject*)AugLoad_type) < 0) return NULL; if (PyDict_SetItemString(d, "AugStore", (PyObject*)AugStore_type) < 0) return NULL; if (PyDict_SetItemString(d, "Param", (PyObject*)Param_type) < 0) return NULL; if (PyDict_SetItemString(d, "NamedStore", (PyObject*)NamedStore_type) < 0) return NULL; if (PyDict_SetItemString(d, "slice", (PyObject*)slice_type) < 0) return NULL; if (PyDict_SetItemString(d, "Slice", (PyObject*)Slice_type) < 0) return NULL; if (PyDict_SetItemString(d, "ExtSlice", (PyObject*)ExtSlice_type) < 0) return NULL; if (PyDict_SetItemString(d, "Index", (PyObject*)Index_type) < 0) return NULL; if (PyDict_SetItemString(d, "boolop", (PyObject*)boolop_type) < 0) return NULL; if (PyDict_SetItemString(d, "And", (PyObject*)And_type) < 0) return NULL; if (PyDict_SetItemString(d, "Or", (PyObject*)Or_type) < 0) return NULL; if (PyDict_SetItemString(d, "operator", (PyObject*)operator_type) < 0) return NULL; if (PyDict_SetItemString(d, "Add", (PyObject*)Add_type) < 0) return NULL; if (PyDict_SetItemString(d, "Sub", (PyObject*)Sub_type) < 0) return NULL; if (PyDict_SetItemString(d, "Mult", (PyObject*)Mult_type) < 0) return NULL; if (PyDict_SetItemString(d, "MatMult", (PyObject*)MatMult_type) < 0) return NULL; if (PyDict_SetItemString(d, "Div", (PyObject*)Div_type) < 0) return NULL; if (PyDict_SetItemString(d, "Mod", (PyObject*)Mod_type) < 0) return NULL; if (PyDict_SetItemString(d, "Pow", (PyObject*)Pow_type) < 0) return NULL; if (PyDict_SetItemString(d, "LShift", (PyObject*)LShift_type) < 0) return NULL; if (PyDict_SetItemString(d, "RShift", (PyObject*)RShift_type) < 0) return NULL; if (PyDict_SetItemString(d, "BitOr", (PyObject*)BitOr_type) < 0) return NULL; if (PyDict_SetItemString(d, "BitXor", (PyObject*)BitXor_type) < 0) return NULL; if (PyDict_SetItemString(d, "BitAnd", (PyObject*)BitAnd_type) < 0) return NULL; if (PyDict_SetItemString(d, "FloorDiv", (PyObject*)FloorDiv_type) < 0) return NULL; if (PyDict_SetItemString(d, "unaryop", (PyObject*)unaryop_type) < 0) return NULL; if (PyDict_SetItemString(d, "Invert", (PyObject*)Invert_type) < 0) return NULL; if (PyDict_SetItemString(d, "Not", (PyObject*)Not_type) < 0) return NULL; if (PyDict_SetItemString(d, "UAdd", (PyObject*)UAdd_type) < 0) return NULL; if (PyDict_SetItemString(d, "USub", (PyObject*)USub_type) < 0) return NULL; if (PyDict_SetItemString(d, "cmpop", (PyObject*)cmpop_type) < 0) return NULL; if (PyDict_SetItemString(d, "Eq", (PyObject*)Eq_type) < 0) return NULL; if (PyDict_SetItemString(d, "NotEq", (PyObject*)NotEq_type) < 0) return NULL; if (PyDict_SetItemString(d, "Lt", (PyObject*)Lt_type) < 0) return NULL; if (PyDict_SetItemString(d, "LtE", (PyObject*)LtE_type) < 0) return NULL; if (PyDict_SetItemString(d, "Gt", (PyObject*)Gt_type) < 0) return NULL; if (PyDict_SetItemString(d, "GtE", (PyObject*)GtE_type) < 0) return NULL; if (PyDict_SetItemString(d, "Is", (PyObject*)Is_type) < 0) return NULL; if (PyDict_SetItemString(d, "IsNot", (PyObject*)IsNot_type) < 0) return NULL; if (PyDict_SetItemString(d, "In", (PyObject*)In_type) < 0) return NULL; if (PyDict_SetItemString(d, "NotIn", (PyObject*)NotIn_type) < 0) return NULL; if (PyDict_SetItemString(d, "comprehension", (PyObject*)comprehension_type) < 0) return NULL; if (PyDict_SetItemString(d, "excepthandler", (PyObject*)excepthandler_type) < 0) return NULL; if (PyDict_SetItemString(d, "ExceptHandler", (PyObject*)ExceptHandler_type) < 0) return NULL; if (PyDict_SetItemString(d, "arguments", (PyObject*)arguments_type) < 0) return NULL; if (PyDict_SetItemString(d, "arg", (PyObject*)arg_type) < 0) return NULL; if (PyDict_SetItemString(d, "keyword", (PyObject*)keyword_type) < 0) return NULL; if (PyDict_SetItemString(d, "alias", (PyObject*)alias_type) < 0) return NULL; if (PyDict_SetItemString(d, "withitem", (PyObject*)withitem_type) < 0) return NULL; if (PyDict_SetItemString(d, "type_ignore", (PyObject*)type_ignore_type) < 0) return NULL; if (PyDict_SetItemString(d, "TypeIgnore", (PyObject*)TypeIgnore_type) < 0) return NULL; return m; } PyObject* PyAST_mod2obj(mod_ty t) { if (!init_types()) return NULL; return ast2obj_mod(t); } /* mode is 0 for "exec", 1 for "eval" and 2 for "single" input */ /* and 3 for "func_type" */ mod_ty PyAST_obj2mod(PyObject* ast, PyArena* arena, int mode) { mod_ty res; PyObject *req_type[3]; char *req_name[] = {"Module", "Expression", "Interactive", "FunctionType"}; int isinstance; req_type[0] = (PyObject*)Module_type; req_type[1] = (PyObject*)Expression_type; req_type[2] = (PyObject*)Interactive_type; assert(0 <= mode && mode <= 3); if (!init_types()) return NULL; isinstance = PyObject_IsInstance(ast, req_type[mode]); if (isinstance == -1) return NULL; if (!isinstance) { PyErr_Format(PyExc_TypeError, "expected %s node, got %.400s", req_name[mode], Py_TYPE(ast)->tp_name); return NULL; } if (obj2ast_mod(ast, &res, arena) != 0) return NULL; else return res; } int PyAST_Check(PyObject* obj) { if (!init_types()) return -1; return PyObject_IsInstance(obj, (PyObject*)&AST_type); }
Assign(asdl_seq * targets, expr_ty value, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Assign"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Assign_kind; p->v.Assign.targets = targets; p->v.Assign.value = value; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; }
Assign(asdl_seq * targets, expr_ty value, string type_comment, int lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena) { stmt_ty p; if (!value) { PyErr_SetString(PyExc_ValueError, "field value is required for Assign"); return NULL; } p = (stmt_ty)PyArena_Malloc(arena, sizeof(*p)); if (!p) return NULL; p->kind = Assign_kind; p->v.Assign.targets = targets; p->v.Assign.value = value; p->v.Assign.type_comment = type_comment; p->lineno = lineno; p->col_offset = col_offset; p->end_lineno = end_lineno; p->end_col_offset = end_col_offset; return p; }
{'added': [(13, '_Py_IDENTIFIER(type_ignores);'), (16, ' "type_ignores",'), (26, 'static PyTypeObject *FunctionType_type;'), (27, '_Py_IDENTIFIER(argtypes);'), (28, '_Py_IDENTIFIER(returns);'), (29, 'static char *FunctionType_fields[]={'), (30, ' "argtypes",'), (31, ' "returns",'), (32, '};'), (53, '_Py_IDENTIFIER(type_comment);'), (60, ' "type_comment",'), (69, ' "type_comment",'), (95, ' "type_comment",'), (122, ' "type_comment",'), (130, ' "type_comment",'), (150, ' "type_comment",'), (156, ' "type_comment",'), (497, ' "type_comment",'), (520, 'static PyTypeObject *type_ignore_type;'), (521, 'static PyObject* ast2obj_type_ignore(void*);'), (522, 'static PyTypeObject *TypeIgnore_type;'), (523, 'static char *TypeIgnore_fields[]={'), (524, ' "lineno",'), (525, '};'), (795, 'static int obj2ast_string(PyObject* obj, PyObject** out, PyArena* arena)'), (796, '{'), (797, ' if (!PyUnicode_CheckExact(obj) && !PyBytes_CheckExact(obj)) {'), (798, ' PyErr_SetString(PyExc_TypeError, "AST string must be of type str");'), (799, ' return 1;'), (800, ' }'), (801, ' return obj2ast_object(obj, out, arena);'), (802, '}'), (803, ''), (845, ' Module_type = make_type("Module", mod_type, Module_fields, 2);'), (852, ' FunctionType_type = make_type("FunctionType", mod_type,'), (853, ' FunctionType_fields, 2);'), (854, ' if (!FunctionType_type) return 0;'), (861, ' 6);'), (864, ' AsyncFunctionDef_fields, 6);'), (872, ' Assign_type = make_type("Assign", stmt_type, Assign_fields, 3);'), (878, ' For_type = make_type("For", stmt_type, For_fields, 5);'), (880, ' AsyncFor_type = make_type("AsyncFor", stmt_type, AsyncFor_fields, 5);'), (886, ' With_type = make_type("With", stmt_type, With_fields, 3);'), (888, ' AsyncWith_type = make_type("AsyncWith", stmt_type, AsyncWith_fields, 3);'), (1151, ' arg_type = make_type("arg", &AST_type, arg_fields, 3);'), (1163, ' type_ignore_type = make_type("type_ignore", &AST_type, NULL, 0);'), (1164, ' if (!type_ignore_type) return 0;'), (1165, ' if (!add_attributes(type_ignore_type, NULL, 0)) return 0;'), (1166, ' TypeIgnore_type = make_type("TypeIgnore", type_ignore_type,'), (1167, ' TypeIgnore_fields, 1);'), (1168, ' if (!TypeIgnore_type) return 0;'), (1192, 'static int obj2ast_type_ignore(PyObject* obj, type_ignore_ty* out, PyArena*'), (1193, ' arena);'), (1196, 'Module(asdl_seq * body, asdl_seq * type_ignores, PyArena *arena)'), (1204, ' p->v.Module.type_ignores = type_ignores;'), (1237, 'mod_ty'), (1238, 'FunctionType(asdl_seq * argtypes, expr_ty returns, PyArena *arena)'), (1239, '{'), (1240, ' mod_ty p;'), (1241, ' if (!returns) {'), (1242, ' PyErr_SetString(PyExc_ValueError,'), (1243, ' "field returns is required for FunctionType");'), (1244, ' return NULL;'), (1245, ' }'), (1246, ' p = (mod_ty)PyArena_Malloc(arena, sizeof(*p));'), (1247, ' if (!p)'), (1248, ' return NULL;'), (1249, ' p->kind = FunctionType_kind;'), (1250, ' p->v.FunctionType.argtypes = argtypes;'), (1251, ' p->v.FunctionType.returns = returns;'), (1252, ' return p;'), (1253, '}'), (1254, ''), (1269, ' decorator_list, expr_ty returns, string type_comment, int lineno,'), (1270, ' int col_offset, int end_lineno, int end_col_offset, PyArena *arena)'), (1292, ' p->v.FunctionDef.type_comment = type_comment;'), (1302, ' * decorator_list, expr_ty returns, string type_comment, int'), (1303, ' lineno, int col_offset, int end_lineno, int end_col_offset,'), (1304, ' PyArena *arena)'), (1326, ' p->v.AsyncFunctionDef.type_comment = type_comment;'), (1396, 'Assign(asdl_seq * targets, expr_ty value, string type_comment, int lineno, int'), (1397, ' col_offset, int end_lineno, int end_col_offset, PyArena *arena)'), (1411, ' p->v.Assign.type_comment = type_comment;'), (1485, 'For(expr_ty target, expr_ty iter, asdl_seq * body, asdl_seq * orelse, string'), (1486, ' type_comment, int lineno, int col_offset, int end_lineno, int'), (1487, ' end_col_offset, PyArena *arena)'), (1508, ' p->v.For.type_comment = type_comment;'), (1517, 'AsyncFor(expr_ty target, expr_ty iter, asdl_seq * body, asdl_seq * orelse,'), (1518, ' string type_comment, int lineno, int col_offset, int end_lineno, int'), (1519, ' end_col_offset, PyArena *arena)'), (1540, ' p->v.AsyncFor.type_comment = type_comment;'), (1597, 'With(asdl_seq * items, asdl_seq * body, string type_comment, int lineno, int'), (1598, ' col_offset, int end_lineno, int end_col_offset, PyArena *arena)'), (1607, ' p->v.With.type_comment = type_comment;'), (1616, 'AsyncWith(asdl_seq * items, asdl_seq * body, string type_comment, int lineno,'), (1617, ' int col_offset, int end_lineno, int end_col_offset, PyArena *arena)'), (1626, ' p->v.AsyncWith.type_comment = type_comment;'), (2592, 'arg(identifier arg, expr_ty annotation, string type_comment, int lineno, int'), (2593, ' col_offset, int end_lineno, int end_col_offset, PyArena *arena)'), (2606, ' p->type_comment = type_comment;'), (2665, 'type_ignore_ty'), (2666, 'TypeIgnore(int lineno, PyArena *arena)'), (2667, '{'), (2668, ' type_ignore_ty p;'), (2669, ' p = (type_ignore_ty)PyArena_Malloc(arena, sizeof(*p));'), (2670, ' if (!p)'), (2671, ' return NULL;'), (2672, ' p->kind = TypeIgnore_kind;'), (2673, ' p->v.TypeIgnore.lineno = lineno;'), (2674, ' return p;'), (2675, '}'), (2676, ''), (2696, ' value = ast2obj_list(o->v.Module.type_ignores, ast2obj_type_ignore);'), (2697, ' if (!value) goto failed;'), (2698, ' if (_PyObject_SetAttrId(result, &PyId_type_ignores, value) == -1)'), (2699, ' goto failed;'), (2700, ' Py_DECREF(value);'), (2720, ' case FunctionType_kind:'), (2721, ' result = PyType_GenericNew(FunctionType_type, NULL, NULL);'), (2722, ' if (!result) goto failed;'), (2723, ' value = ast2obj_list(o->v.FunctionType.argtypes, ast2obj_expr);'), (2724, ' if (!value) goto failed;'), (2725, ' if (_PyObject_SetAttrId(result, &PyId_argtypes, value) == -1)'), (2726, ' goto failed;'), (2727, ' Py_DECREF(value);'), (2728, ' value = ast2obj_expr(o->v.FunctionType.returns);'), (2729, ' if (!value) goto failed;'), (2730, ' if (_PyObject_SetAttrId(result, &PyId_returns, value) == -1)'), (2731, ' goto failed;'), (2732, ' Py_DECREF(value);'), (2733, ' break;'), (2789, ' value = ast2obj_string(o->v.FunctionDef.type_comment);'), (2790, ' if (!value) goto failed;'), (2791, ' if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1)'), (2792, ' goto failed;'), (2793, ' Py_DECREF(value);'), (2824, ' value = ast2obj_string(o->v.AsyncFunctionDef.type_comment);'), (2825, ' if (!value) goto failed;'), (2826, ' if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1)'), (2827, ' goto failed;'), (2828, ' Py_DECREF(value);'), (2890, ' value = ast2obj_string(o->v.Assign.type_comment);'), (2891, ' if (!value) goto failed;'), (2892, ' if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1)'), (2893, ' goto failed;'), (2894, ' Py_DECREF(value);'), (2962, ' value = ast2obj_string(o->v.For.type_comment);'), (2963, ' if (!value) goto failed;'), (2964, ' if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1)'), (2965, ' goto failed;'), (2966, ' Py_DECREF(value);'), (2991, ' value = ast2obj_string(o->v.AsyncFor.type_comment);'), (2992, ' if (!value) goto failed;'), (2993, ' if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1)'), (2994, ' goto failed;'), (2995, ' Py_DECREF(value);'), (3048, ' value = ast2obj_string(o->v.With.type_comment);'), (3049, ' if (!value) goto failed;'), (3050, ' if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1)'), (3051, ' goto failed;'), (3052, ' Py_DECREF(value);'), (3067, ' value = ast2obj_string(o->v.AsyncWith.type_comment);'), (3068, ' if (!value) goto failed;'), (3069, ' if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1)'), (3070, ' goto failed;'), (3071, ' Py_DECREF(value);'), (4011, ' value = ast2obj_string(o->type_comment);'), (4012, ' if (!value) goto failed;'), (4013, ' if (_PyObject_SetAttrId(result, &PyId_type_comment, value) == -1)'), (4014, ' goto failed;'), (4015, ' Py_DECREF(value);'), (4127, 'PyObject*'), (4128, 'ast2obj_type_ignore(void* _o)'), (4129, '{'), (4130, ' type_ignore_ty o = (type_ignore_ty)_o;'), (4131, ' PyObject *result = NULL, *value = NULL;'), (4132, ' if (!o) {'), (4133, ' Py_RETURN_NONE;'), (4134, ' }'), (4135, ''), (4136, ' switch (o->kind) {'), (4137, ' case TypeIgnore_kind:'), (4138, ' result = PyType_GenericNew(TypeIgnore_type, NULL, NULL);'), (4139, ' if (!result) goto failed;'), (4140, ' value = ast2obj_int(o->v.TypeIgnore.lineno);'), (4141, ' if (!value) goto failed;'), (4142, ' if (_PyObject_SetAttrId(result, &PyId_lineno, value) == -1)'), (4143, ' goto failed;'), (4144, ' Py_DECREF(value);'), (4145, ' break;'), (4146, ' }'), (4147, ' return result;'), (4148, 'failed:'), (4149, ' Py_XDECREF(value);'), (4150, ' Py_XDECREF(result);'), (4151, ' return NULL;'), (4152, '}'), (4153, ''), (4172, ' asdl_seq* type_ignores;'), (4204, ' if (_PyObject_LookupAttrId(obj, &PyId_type_ignores, &tmp) < 0) {'), (4205, ' return 1;'), (4206, ' }'), (4207, ' if (tmp == NULL) {'), (4208, ' PyErr_SetString(PyExc_TypeError, "required field \\"type_ignores\\" missing from Module");'), (4209, ' return 1;'), (4210, ' }'), (4211, ' else {'), (4212, ' int res;'), (4213, ' Py_ssize_t len;'), (4214, ' Py_ssize_t i;'), (4215, ' if (!PyList_Check(tmp)) {'), (4216, ' PyErr_Format(PyExc_TypeError, "Module field \\"type_ignores\\" must be a list, not a %.200s", tmp->ob_type->tp_name);'), (4217, ' goto failed;'), (4218, ' }'), (4219, ' len = PyList_GET_SIZE(tmp);'), (4220, ' type_ignores = _Py_asdl_seq_new(len, arena);'), (4221, ' if (type_ignores == NULL) goto failed;'), (4222, ' for (i = 0; i < len; i++) {'), (4223, ' type_ignore_ty val;'), (4224, ' res = obj2ast_type_ignore(PyList_GET_ITEM(tmp, i), &val, arena);'), (4225, ' if (res != 0) goto failed;'), (4226, ' if (len != PyList_GET_SIZE(tmp)) {'), (4227, ' PyErr_SetString(PyExc_RuntimeError, "Module field \\"type_ignores\\" changed size during iteration");'), (4228, ' goto failed;'), (4229, ' }'), (4230, ' asdl_seq_SET(type_ignores, i, val);'), (4231, ' }'), (4232, ' Py_CLEAR(tmp);'), (4233, ' }'), (4234, ' *out = Module(body, type_ignores, arena);'), (4303, ' isinstance = PyObject_IsInstance(obj, (PyObject*)FunctionType_type);'), (4304, ' if (isinstance == -1) {'), (4305, ' return 1;'), (4306, ' }'), (4307, ' if (isinstance) {'), (4308, ' asdl_seq* argtypes;'), (4309, ' expr_ty returns;'), (4310, ''), (4311, ' if (_PyObject_LookupAttrId(obj, &PyId_argtypes, &tmp) < 0) {'), (4312, ' return 1;'), (4313, ' }'), (4314, ' if (tmp == NULL) {'), (4315, ' PyErr_SetString(PyExc_TypeError, "required field \\"argtypes\\" missing from FunctionType");'), (4316, ' return 1;'), (4317, ' }'), (4318, ' else {'), (4319, ' int res;'), (4320, ' Py_ssize_t len;'), (4321, ' Py_ssize_t i;'), (4322, ' if (!PyList_Check(tmp)) {'), (4323, ' PyErr_Format(PyExc_TypeError, "FunctionType field \\"argtypes\\" must be a list, not a %.200s", tmp->ob_type->tp_name);'), (4324, ' goto failed;'), (4325, ' }'), (4326, ' len = PyList_GET_SIZE(tmp);'), (4327, ' argtypes = _Py_asdl_seq_new(len, arena);'), (4328, ' if (argtypes == NULL) goto failed;'), (4329, ' for (i = 0; i < len; i++) {'), (4330, ' expr_ty val;'), (4331, ' res = obj2ast_expr(PyList_GET_ITEM(tmp, i), &val, arena);'), (4332, ' if (res != 0) goto failed;'), (4333, ' if (len != PyList_GET_SIZE(tmp)) {'), (4334, ' PyErr_SetString(PyExc_RuntimeError, "FunctionType field \\"argtypes\\" changed size during iteration");'), (4335, ' goto failed;'), (4336, ' }'), (4337, ' asdl_seq_SET(argtypes, i, val);'), (4338, ' }'), (4339, ' Py_CLEAR(tmp);'), (4340, ' }'), (4341, ' if (_PyObject_LookupAttrId(obj, &PyId_returns, &tmp) < 0) {'), (4342, ' return 1;'), (4343, ' }'), (4344, ' if (tmp == NULL) {'), (4345, ' PyErr_SetString(PyExc_TypeError, "required field \\"returns\\" missing from FunctionType");'), (4346, ' return 1;'), (4347, ' }'), (4348, ' else {'), (4349, ' int res;'), (4350, ' res = obj2ast_expr(tmp, &returns, arena);'), (4351, ' if (res != 0) goto failed;'), (4352, ' Py_CLEAR(tmp);'), (4353, ' }'), (4354, ' *out = FunctionType(argtypes, returns, arena);'), (4355, ' if (*out == NULL) goto failed;'), (4356, ' return 0;'), (4357, ' }'), (4483, ' string type_comment;'), (4584, ' if (_PyObject_LookupAttrId(obj, &PyId_type_comment, &tmp) < 0) {'), (4585, ' return 1;'), (4586, ' }'), (4587, ' if (tmp == NULL || tmp == Py_None) {'), (4588, ' Py_CLEAR(tmp);'), (4589, ' type_comment = NULL;'), (4590, ' }'), (4591, ' else {'), (4592, ' int res;'), (4593, ' res = obj2ast_string(tmp, &type_comment, arena);'), (4594, ' if (res != 0) goto failed;'), (4595, ' Py_CLEAR(tmp);'), (4596, ' }'), (4597, ' *out = FunctionDef(name, args, body, decorator_list, returns,'), (4598, ' type_comment, lineno, col_offset, end_lineno,'), (4599, ' end_col_offset, arena);'), (4613, ' string type_comment;'), (4714, ' if (_PyObject_LookupAttrId(obj, &PyId_type_comment, &tmp) < 0) {'), (4715, ' return 1;'), (4716, ' }'), (4717, ' if (tmp == NULL || tmp == Py_None) {'), (4718, ' Py_CLEAR(tmp);'), (4719, ' type_comment = NULL;'), (4720, ' }'), (4721, ' else {'), (4722, ' int res;'), (4723, ' res = obj2ast_string(tmp, &type_comment, arena);'), (4724, ' if (res != 0) goto failed;'), (4725, ' Py_CLEAR(tmp);'), (4726, ' }'), (4728, ' type_comment, lineno, col_offset, end_lineno,'), (4729, ' end_col_offset, arena);'), (4956, ' string type_comment;'), (5001, ' if (_PyObject_LookupAttrId(obj, &PyId_type_comment, &tmp) < 0) {'), (5002, ' return 1;'), (5003, ' }'), (5004, ' if (tmp == NULL || tmp == Py_None) {'), (5005, ' Py_CLEAR(tmp);'), (5006, ' type_comment = NULL;'), (5007, ' }'), (5008, ' else {'), (5009, ' int res;'), (5010, ' res = obj2ast_string(tmp, &type_comment, arena);'), (5011, ' if (res != 0) goto failed;'), (5012, ' Py_CLEAR(tmp);'), (5013, ' }'), (5014, ' *out = Assign(targets, value, type_comment, lineno, col_offset,'), (5015, ' end_lineno, end_col_offset, arena);'), (5148, ' string type_comment;'), (5236, ' if (_PyObject_LookupAttrId(obj, &PyId_type_comment, &tmp) < 0) {'), (5237, ' return 1;'), (5238, ' }'), (5239, ' if (tmp == NULL || tmp == Py_None) {'), (5240, ' Py_CLEAR(tmp);'), (5241, ' type_comment = NULL;'), (5242, ' }'), (5243, ' else {'), (5244, ' int res;'), (5245, ' res = obj2ast_string(tmp, &type_comment, arena);'), (5246, ' if (res != 0) goto failed;'), (5247, ' Py_CLEAR(tmp);'), (5248, ' }'), (5249, ' *out = For(target, iter, body, orelse, type_comment, lineno,'), (5250, ' col_offset, end_lineno, end_col_offset, arena);'), (5263, ' string type_comment;'), (5351, ' if (_PyObject_LookupAttrId(obj, &PyId_type_comment, &tmp) < 0) {'), (5352, ' return 1;'), (5353, ' }'), (5354, ' if (tmp == NULL || tmp == Py_None) {'), (5355, ' Py_CLEAR(tmp);'), (5356, ' type_comment = NULL;'), (5357, ' }'), (5358, ' else {'), (5359, ' int res;'), (5360, ' res = obj2ast_string(tmp, &type_comment, arena);'), (5361, ' if (res != 0) goto failed;'), (5362, ' Py_CLEAR(tmp);'), (5363, ' }'), (5364, ' *out = AsyncFor(target, iter, body, orelse, type_comment, lineno,'), (5365, ' col_offset, end_lineno, end_col_offset, arena);'), (5550, ' string type_comment;'), (5612, ' if (_PyObject_LookupAttrId(obj, &PyId_type_comment, &tmp) < 0) {'), (5613, ' return 1;'), (5614, ' }'), (5615, ' if (tmp == NULL || tmp == Py_None) {'), (5616, ' Py_CLEAR(tmp);'), (5617, ' type_comment = NULL;'), (5618, ' }'), (5619, ' else {'), (5620, ' int res;'), (5621, ' res = obj2ast_string(tmp, &type_comment, arena);'), (5622, ' if (res != 0) goto failed;'), (5623, ' Py_CLEAR(tmp);'), (5624, ' }'), (5625, ' *out = With(items, body, type_comment, lineno, col_offset, end_lineno,'), (5637, ' string type_comment;'), (5699, ' if (_PyObject_LookupAttrId(obj, &PyId_type_comment, &tmp) < 0) {'), (5700, ' return 1;'), (5701, ' }'), (5702, ' if (tmp == NULL || tmp == Py_None) {'), (5703, ' Py_CLEAR(tmp);'), (5704, ' type_comment = NULL;'), (5705, ' }'), (5706, ' else {'), (5707, ' int res;'), (5708, ' res = obj2ast_string(tmp, &type_comment, arena);'), (5709, ' if (res != 0) goto failed;'), (5710, ' Py_CLEAR(tmp);'), (5711, ' }'), (5712, ' *out = AsyncWith(items, body, type_comment, lineno, col_offset,'), (5713, ' end_lineno, end_col_offset, arena);'), (8431, ' string type_comment;'), (8463, ' if (_PyObject_LookupAttrId(obj, &PyId_type_comment, &tmp) < 0) {'), (8464, ' return 1;'), (8465, ' }'), (8466, ' if (tmp == NULL || tmp == Py_None) {'), (8467, ' Py_CLEAR(tmp);'), (8468, ' type_comment = NULL;'), (8469, ' }'), (8470, ' else {'), (8471, ' int res;'), (8472, ' res = obj2ast_string(tmp, &type_comment, arena);'), (8473, ' if (res != 0) goto failed;'), (8474, ' Py_CLEAR(tmp);'), (8475, ' }'), (8528, ' *out = arg(arg, annotation, type_comment, lineno, col_offset, end_lineno,'), (8529, ' end_col_offset, arena);'), (8656, 'int'), (8657, 'obj2ast_type_ignore(PyObject* obj, type_ignore_ty* out, PyArena* arena)'), (8658, '{'), (8659, ' int isinstance;'), (8660, ''), (8661, ' PyObject *tmp = NULL;'), (8662, ''), (8663, ' if (obj == Py_None) {'), (8664, ' *out = NULL;'), (8665, ' return 0;'), (8666, ' }'), (8667, ' isinstance = PyObject_IsInstance(obj, (PyObject*)TypeIgnore_type);'), (8668, ' if (isinstance == -1) {'), (8669, ' return 1;'), (8670, ' }'), (8671, ' if (isinstance) {'), (8672, ' int lineno;'), (8673, ''), (8674, ' if (_PyObject_LookupAttrId(obj, &PyId_lineno, &tmp) < 0) {'), (8675, ' return 1;'), (8676, ' }'), (8677, ' if (tmp == NULL) {'), (8678, ' PyErr_SetString(PyExc_TypeError, "required field \\"lineno\\" missing from TypeIgnore");'), (8679, ' return 1;'), (8680, ' }'), (8681, ' else {'), (8682, ' int res;'), (8683, ' res = obj2ast_int(tmp, &lineno, arena);'), (8684, ' if (res != 0) goto failed;'), (8685, ' Py_CLEAR(tmp);'), (8686, ' }'), (8687, ' *out = TypeIgnore(lineno, arena);'), (8688, ' if (*out == NULL) goto failed;'), (8689, ' return 0;'), (8690, ' }'), (8691, ''), (8692, ' PyErr_Format(PyExc_TypeError, "expected some sort of type_ignore, but got %R", obj);'), (8693, ' failed:'), (8694, ' Py_XDECREF(tmp);'), (8695, ' return 1;'), (8696, '}'), (8697, ''), (8713, ' if (PyModule_AddIntMacro(m, PyCF_TYPE_COMMENTS) < 0)'), (8714, ' return NULL;'), (8722, ' if (PyDict_SetItemString(d, "FunctionType", (PyObject*)FunctionType_type) <'), (8723, ' 0) return NULL;'), (8904, ' if (PyDict_SetItemString(d, "type_ignore", (PyObject*)type_ignore_type) <'), (8905, ' 0) return NULL;'), (8906, ' if (PyDict_SetItemString(d, "TypeIgnore", (PyObject*)TypeIgnore_type) < 0)'), (8907, ' return NULL;'), (8920, '/* and 3 for "func_type" */'), (8925, ' char *req_name[] = {"Module", "Expression", "Interactive", "FunctionType"};'), (8932, ' assert(0 <= mode && mode <= 3);')], 'deleted': [(44, '_Py_IDENTIFIER(returns);'), (813, ' Module_type = make_type("Module", mod_type, Module_fields, 1);'), (826, ' 5);'), (829, ' AsyncFunctionDef_fields, 5);'), (837, ' Assign_type = make_type("Assign", stmt_type, Assign_fields, 2);'), (843, ' For_type = make_type("For", stmt_type, For_fields, 4);'), (845, ' AsyncFor_type = make_type("AsyncFor", stmt_type, AsyncFor_fields, 4);'), (851, ' With_type = make_type("With", stmt_type, With_fields, 2);'), (853, ' AsyncWith_type = make_type("AsyncWith", stmt_type, AsyncWith_fields, 2);'), (1116, ' arg_type = make_type("arg", &AST_type, arg_fields, 2);'), (1153, 'Module(asdl_seq * body, PyArena *arena)'), (1207, ' decorator_list, expr_ty returns, int lineno, int col_offset, int'), (1208, ' end_lineno, int end_col_offset, PyArena *arena)'), (1239, ' * decorator_list, expr_ty returns, int lineno, int col_offset,'), (1240, ' int end_lineno, int end_col_offset, PyArena *arena)'), (1331, 'Assign(asdl_seq * targets, expr_ty value, int lineno, int col_offset, int'), (1332, ' end_lineno, int end_col_offset, PyArena *arena)'), (1419, 'For(expr_ty target, expr_ty iter, asdl_seq * body, asdl_seq * orelse, int'), (1420, ' lineno, int col_offset, int end_lineno, int end_col_offset, PyArena *arena)'), (1449, 'AsyncFor(expr_ty target, expr_ty iter, asdl_seq * body, asdl_seq * orelse, int'), (1450, ' lineno, int col_offset, int end_lineno, int end_col_offset, PyArena'), (1451, ' *arena)'), (1528, 'With(asdl_seq * items, asdl_seq * body, int lineno, int col_offset, int'), (1529, ' end_lineno, int end_col_offset, PyArena *arena)'), (1546, 'AsyncWith(asdl_seq * items, asdl_seq * body, int lineno, int col_offset, int'), (1547, ' end_lineno, int end_col_offset, PyArena *arena)'), (2521, 'arg(identifier arg, expr_ty annotation, int lineno, int col_offset, int'), (2522, ' end_lineno, int end_col_offset, PyArena *arena)'), (4033, ' *out = Module(body, arena);'), (4327, ' *out = FunctionDef(name, args, body, decorator_list, returns, lineno,'), (4328, ' col_offset, end_lineno, end_col_offset, arena);'), (4443, ' lineno, col_offset, end_lineno, end_col_offset,'), (4444, ' arena);'), (4715, ' *out = Assign(targets, value, lineno, col_offset, end_lineno,'), (4716, ' end_col_offset, arena);'), (4936, ' *out = For(target, iter, body, orelse, lineno, col_offset, end_lineno,'), (4937, ' end_col_offset, arena);'), (5037, ' *out = AsyncFor(target, iter, body, orelse, lineno, col_offset,'), (5038, ' end_lineno, end_col_offset, arena);'), (5284, ' *out = With(items, body, lineno, col_offset, end_lineno,'), (5357, ' *out = AsyncWith(items, body, lineno, col_offset, end_lineno,'), (5358, ' end_col_offset, arena);'), (8159, ' *out = arg(arg, annotation, lineno, col_offset, end_lineno, end_col_offset,'), (8160, ' arena);'), (8505, ' char *req_name[] = {"Module", "Expression", "Interactive"};'), (8512, ' assert(0 <= mode && mode <= 2);')]}
466
46
8,690
53,284
21
124
3
https://github.com/python/cpython
CVE-2019-19274
CWE-125
2,554
boot.c
C
read_boot
/* boot.c - Read and analyze ia PC/MS-DOS boot sector Copyright (C) 1993 Werner Almesberger <werner.almesberger@lrc.di.epfl.ch> Copyright (C) 1998 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de> Copyright (C) 2008-2014 Daniel Baumann <mail@daniel-baumann.ch> Copyright (C) 2015 Andreas Bombe <aeb@debian.org> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. The complete text of the GNU General Public License can be found in /usr/share/common-licenses/GPL-3 file. */ /* FAT32, VFAT, Atari format support, and various fixes additions May 1998 * by Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de> */ #include <stdio.h> #include <stdint.h> #include <string.h> #include <stdlib.h> #include <sys/types.h> #include <time.h> #include "common.h" #include "fsck.fat.h" #include "fat.h" #include "io.h" #include "boot.h" #include "check.h" #define ROUND_TO_MULTIPLE(n,m) ((n) && (m) ? (n)+(m)-1-((n)-1)%(m) : 0) /* don't divide by zero */ /* cut-over cluster counts for FAT12 and FAT16 */ #define FAT12_THRESHOLD 4085 #define FAT16_THRESHOLD 65525 static struct { uint8_t media; const char *descr; } mediabytes[] = { { 0xf0, "5.25\" or 3.5\" HD floppy"}, { 0xf8, "hard disk"}, { 0xf9, "3,5\" 720k floppy 2s/80tr/9sec or " "5.25\" 1.2M floppy 2s/80tr/15sec"}, { 0xfa, "5.25\" 320k floppy 1s/80tr/8sec"}, { 0xfb, "3.5\" 640k floppy 2s/80tr/8sec"}, { 0xfc, "5.25\" 180k floppy 1s/40tr/9sec"}, { 0xfd, "5.25\" 360k floppy 2s/40tr/9sec"}, { 0xfe, "5.25\" 160k floppy 1s/40tr/8sec"}, { 0xff, "5.25\" 320k floppy 2s/40tr/8sec"},}; /* Unaligned fields must first be accessed byte-wise */ #define GET_UNALIGNED_W(f) \ ( (uint16_t)f[0] | ((uint16_t)f[1]<<8) ) static const char *get_media_descr(unsigned char media) { int i; for (i = 0; i < sizeof(mediabytes) / sizeof(*mediabytes); ++i) { if (mediabytes[i].media == media) return (mediabytes[i].descr); } return ("undefined"); } static void dump_boot(DOS_FS * fs, struct boot_sector *b, unsigned lss) { unsigned short sectors; printf("Boot sector contents:\n"); if (!atari_format) { char id[9]; strncpy(id, (const char *)b->system_id, 8); id[8] = 0; printf("System ID \"%s\"\n", id); } else { /* On Atari, a 24 bit serial number is stored at offset 8 of the boot * sector */ printf("Serial number 0x%x\n", b->system_id[5] | (b->system_id[6] << 8) | (b-> system_id[7] << 16)); } printf("Media byte 0x%02x (%s)\n", b->media, get_media_descr(b->media)); printf("%10d bytes per logical sector\n", GET_UNALIGNED_W(b->sector_size)); printf("%10d bytes per cluster\n", fs->cluster_size); printf("%10d reserved sector%s\n", le16toh(b->reserved), le16toh(b->reserved) == 1 ? "" : "s"); printf("First FAT starts at byte %llu (sector %llu)\n", (unsigned long long)fs->fat_start, (unsigned long long)fs->fat_start / lss); printf("%10d FATs, %d bit entries\n", b->fats, fs->fat_bits); printf("%10d bytes per FAT (= %u sectors)\n", fs->fat_size, fs->fat_size / lss); if (!fs->root_cluster) { printf("Root directory starts at byte %llu (sector %llu)\n", (unsigned long long)fs->root_start, (unsigned long long)fs->root_start / lss); printf("%10d root directory entries\n", fs->root_entries); } else { printf("Root directory start at cluster %lu (arbitrary size)\n", (unsigned long)fs->root_cluster); } printf("Data area starts at byte %llu (sector %llu)\n", (unsigned long long)fs->data_start, (unsigned long long)fs->data_start / lss); printf("%10lu data clusters (%llu bytes)\n", (unsigned long)fs->data_clusters, (unsigned long long)fs->data_clusters * fs->cluster_size); printf("%u sectors/track, %u heads\n", le16toh(b->secs_track), le16toh(b->heads)); printf("%10u hidden sectors\n", atari_format ? /* On Atari, the hidden field is only 16 bit wide and unused */ (((unsigned char *)&b->hidden)[0] | ((unsigned char *)&b->hidden)[1] << 8) : le32toh(b->hidden)); sectors = GET_UNALIGNED_W(b->sectors); printf("%10u sectors total\n", sectors ? sectors : le32toh(b->total_sect)); } static void check_backup_boot(DOS_FS * fs, struct boot_sector *b, int lss) { struct boot_sector b2; if (!fs->backupboot_start) { printf("There is no backup boot sector.\n"); if (le16toh(b->reserved) < 3) { printf("And there is no space for creating one!\n"); return; } if (interactive) printf("1) Create one\n2) Do without a backup\n"); else printf(" Auto-creating backup boot block.\n"); if (!interactive || get_key("12", "?") == '1') { int bbs; /* The usual place for the backup boot sector is sector 6. Choose * that or the last reserved sector. */ if (le16toh(b->reserved) >= 7 && le16toh(b->info_sector) != 6) bbs = 6; else { bbs = le16toh(b->reserved) - 1; if (bbs == le16toh(b->info_sector)) --bbs; /* this is never 0, as we checked reserved >= 3! */ } fs->backupboot_start = bbs * lss; b->backup_boot = htole16(bbs); fs_write(fs->backupboot_start, sizeof(*b), b); fs_write(offsetof(struct boot_sector, backup_boot), sizeof(b->backup_boot), &b->backup_boot); printf("Created backup of boot sector in sector %d\n", bbs); return; } else return; } fs_read(fs->backupboot_start, sizeof(b2), &b2); if (memcmp(b, &b2, sizeof(b2)) != 0) { /* there are any differences */ uint8_t *p, *q; int i, pos, first = 1; char buf[20]; printf("There are differences between boot sector and its backup.\n"); printf("This is mostly harmless. Differences: (offset:original/backup)\n "); pos = 2; for (p = (uint8_t *) b, q = (uint8_t *) & b2, i = 0; i < sizeof(b2); ++p, ++q, ++i) { if (*p != *q) { sprintf(buf, "%s%u:%02x/%02x", first ? "" : ", ", (unsigned)(p - (uint8_t *) b), *p, *q); if (pos + strlen(buf) > 78) printf("\n "), pos = 2; printf("%s", buf); pos += strlen(buf); first = 0; } } printf("\n"); if (interactive) printf("1) Copy original to backup\n" "2) Copy backup to original\n" "3) No action\n"); else printf(" Not automatically fixing this.\n"); switch (interactive ? get_key("123", "?") : '3') { case '1': fs_write(fs->backupboot_start, sizeof(*b), b); break; case '2': fs_write(0, sizeof(b2), &b2); break; default: break; } } } static void init_fsinfo(struct info_sector *i) { i->magic = htole32(0x41615252); i->signature = htole32(0x61417272); i->free_clusters = htole32(-1); i->next_cluster = htole32(2); i->boot_sign = htole16(0xaa55); } static void read_fsinfo(DOS_FS * fs, struct boot_sector *b, int lss) { struct info_sector i; if (!b->info_sector) { printf("No FSINFO sector\n"); if (interactive) printf("1) Create one\n2) Do without FSINFO\n"); else printf(" Not automatically creating it.\n"); if (interactive && get_key("12", "?") == '1') { /* search for a free reserved sector (not boot sector and not * backup boot sector) */ uint32_t s; for (s = 1; s < le16toh(b->reserved); ++s) if (s != le16toh(b->backup_boot)) break; if (s > 0 && s < le16toh(b->reserved)) { init_fsinfo(&i); fs_write((off_t)s * lss, sizeof(i), &i); b->info_sector = htole16(s); fs_write(offsetof(struct boot_sector, info_sector), sizeof(b->info_sector), &b->info_sector); if (fs->backupboot_start) fs_write(fs->backupboot_start + offsetof(struct boot_sector, info_sector), sizeof(b->info_sector), &b->info_sector); } else { printf("No free reserved sector found -- " "no space for FSINFO sector!\n"); return; } } else return; } fs->fsinfo_start = le16toh(b->info_sector) * lss; fs_read(fs->fsinfo_start, sizeof(i), &i); if (i.magic != htole32(0x41615252) || i.signature != htole32(0x61417272) || i.boot_sign != htole16(0xaa55)) { printf("FSINFO sector has bad magic number(s):\n"); if (i.magic != htole32(0x41615252)) printf(" Offset %llu: 0x%08x != expected 0x%08x\n", (unsigned long long)offsetof(struct info_sector, magic), le32toh(i.magic), 0x41615252); if (i.signature != htole32(0x61417272)) printf(" Offset %llu: 0x%08x != expected 0x%08x\n", (unsigned long long)offsetof(struct info_sector, signature), le32toh(i.signature), 0x61417272); if (i.boot_sign != htole16(0xaa55)) printf(" Offset %llu: 0x%04x != expected 0x%04x\n", (unsigned long long)offsetof(struct info_sector, boot_sign), le16toh(i.boot_sign), 0xaa55); if (interactive) printf("1) Correct\n2) Don't correct (FSINFO invalid then)\n"); else printf(" Auto-correcting it.\n"); if (!interactive || get_key("12", "?") == '1') { init_fsinfo(&i); fs_write(fs->fsinfo_start, sizeof(i), &i); } else fs->fsinfo_start = 0; } if (fs->fsinfo_start) fs->free_clusters = le32toh(i.free_clusters); } static char print_fat_dirty_state(void) { printf("Dirty bit is set. Fs was not properly unmounted and" " some data may be corrupt.\n"); if (interactive) { printf("1) Remove dirty bit\n" "2) No action\n"); return get_key("12", "?"); } else printf(" Automatically removing dirty bit.\n"); return '1'; } static void check_fat_state_bit(DOS_FS * fs, void *b) { if (fs->fat_bits == 32) { struct boot_sector *b32 = b; if (b32->reserved3 & FAT_STATE_DIRTY) { printf("0x41: "); if (print_fat_dirty_state() == '1') { b32->reserved3 &= ~FAT_STATE_DIRTY; fs_write(0, sizeof(*b32), b32); } } } else { struct boot_sector_16 *b16 = b; if (b16->reserved2 & FAT_STATE_DIRTY) { printf("0x25: "); if (print_fat_dirty_state() == '1') { b16->reserved2 &= ~FAT_STATE_DIRTY; fs_write(0, sizeof(*b16), b16); } } } } void read_boot(DOS_FS * fs) { struct boot_sector b; unsigned total_sectors; unsigned short logical_sector_size, sectors; unsigned fat_length; unsigned total_fat_entries; off_t data_size; fs_read(0, sizeof(b), &b); logical_sector_size = GET_UNALIGNED_W(b.sector_size); if (!logical_sector_size) die("Logical sector size is zero."); /* This was moved up because it's the first thing that will fail */ /* if the platform needs special handling of unaligned multibyte accesses */ /* but such handling isn't being provided. See GET_UNALIGNED_W() above. */ if (logical_sector_size & (SECTOR_SIZE - 1)) die("Logical sector size (%d bytes) is not a multiple of the physical " "sector size.", logical_sector_size); fs->cluster_size = b.cluster_size * logical_sector_size; if (!fs->cluster_size) die("Cluster size is zero."); if (b.fats != 2 && b.fats != 1) die("Currently, only 1 or 2 FATs are supported, not %d.\n", b.fats); fs->nfats = b.fats; sectors = GET_UNALIGNED_W(b.sectors); total_sectors = sectors ? sectors : le32toh(b.total_sect); if (verbose) printf("Checking we can access the last sector of the filesystem\n"); /* Can't access last odd sector anyway, so round down */ fs_test((off_t)((total_sectors & ~1) - 1) * logical_sector_size, logical_sector_size); fat_length = le16toh(b.fat_length) ? le16toh(b.fat_length) : le32toh(b.fat32_length); fs->fat_start = (off_t)le16toh(b.reserved) * logical_sector_size; fs->root_start = ((off_t)le16toh(b.reserved) + b.fats * fat_length) * logical_sector_size; fs->root_entries = GET_UNALIGNED_W(b.dir_entries); fs->data_start = fs->root_start + ROUND_TO_MULTIPLE(fs->root_entries << MSDOS_DIR_BITS, logical_sector_size); data_size = (off_t)total_sectors * logical_sector_size - fs->data_start; fs->data_clusters = data_size / fs->cluster_size; fs->root_cluster = 0; /* indicates standard, pre-FAT32 root dir */ fs->fsinfo_start = 0; /* no FSINFO structure */ fs->free_clusters = -1; /* unknown */ if (!b.fat_length && b.fat32_length) { fs->fat_bits = 32; fs->root_cluster = le32toh(b.root_cluster); if (!fs->root_cluster && fs->root_entries) /* M$ hasn't specified this, but it looks reasonable: If * root_cluster is 0 but there is a separate root dir * (root_entries != 0), we handle the root dir the old way. Give a * warning, but convertig to a root dir in a cluster chain seems * to complex for now... */ printf("Warning: FAT32 root dir not in cluster chain! " "Compatibility mode...\n"); else if (!fs->root_cluster && !fs->root_entries) die("No root directory!"); else if (fs->root_cluster && fs->root_entries) printf("Warning: FAT32 root dir is in a cluster chain, but " "a separate root dir\n" " area is defined. Cannot fix this easily.\n"); if (fs->data_clusters < FAT16_THRESHOLD) printf("Warning: Filesystem is FAT32 according to fat_length " "and fat32_length fields,\n" " but has only %lu clusters, less than the required " "minimum of %d.\n" " This may lead to problems on some systems.\n", (unsigned long)fs->data_clusters, FAT16_THRESHOLD); check_fat_state_bit(fs, &b); fs->backupboot_start = le16toh(b.backup_boot) * logical_sector_size; check_backup_boot(fs, &b, logical_sector_size); read_fsinfo(fs, &b, logical_sector_size); } else if (!atari_format) { /* On real MS-DOS, a 16 bit FAT is used whenever there would be too * much clusers otherwise. */ fs->fat_bits = (fs->data_clusters >= FAT12_THRESHOLD) ? 16 : 12; if (fs->data_clusters >= FAT16_THRESHOLD) die("Too many clusters (%lu) for FAT16 filesystem.", fs->data_clusters); check_fat_state_bit(fs, &b); } else { /* On Atari, things are more difficult: GEMDOS always uses 12bit FATs * on floppies, and always 16 bit on harddisks. */ fs->fat_bits = 16; /* assume 16 bit FAT for now */ /* If more clusters than fat entries in 16-bit fat, we assume * it's a real MSDOS FS with 12-bit fat. */ if (fs->data_clusters + 2 > fat_length * logical_sector_size * 8 / 16 || /* if it has one of the usual floppy sizes -> 12bit FAT */ (total_sectors == 720 || total_sectors == 1440 || total_sectors == 2880)) fs->fat_bits = 12; } /* On FAT32, the high 4 bits of a FAT entry are reserved */ fs->eff_fat_bits = (fs->fat_bits == 32) ? 28 : fs->fat_bits; fs->fat_size = fat_length * logical_sector_size; fs->label = calloc(12, sizeof(uint8_t)); if (fs->fat_bits == 12 || fs->fat_bits == 16) { struct boot_sector_16 *b16 = (struct boot_sector_16 *)&b; if (b16->extended_sig == 0x29) memmove(fs->label, b16->label, 11); else fs->label = NULL; } else if (fs->fat_bits == 32) { if (b.extended_sig == 0x29) memmove(fs->label, &b.label, 11); else fs->label = NULL; } total_fat_entries = (uint64_t)fs->fat_size * 8 / fs->fat_bits; if (fs->data_clusters > total_fat_entries - 2) die("Filesystem has %u clusters but only space for %u FAT entries.", fs->data_clusters, total_fat_entries - 2); if (!fs->root_entries && !fs->root_cluster) die("Root directory has zero size."); if (fs->root_entries & (MSDOS_DPS - 1)) die("Root directory (%d entries) doesn't span an integral number of " "sectors.", fs->root_entries); if (logical_sector_size & (SECTOR_SIZE - 1)) die("Logical sector size (%d bytes) is not a multiple of the physical " "sector size.", logical_sector_size); #if 0 /* linux kernel doesn't check that either */ /* ++roman: On Atari, these two fields are often left uninitialized */ if (!atari_format && (!b.secs_track || !b.heads)) die("Invalid disk format in boot sector."); #endif if (verbose) dump_boot(fs, &b, logical_sector_size); } static void write_boot_label(DOS_FS * fs, char *label) { if (fs->fat_bits == 12 || fs->fat_bits == 16) { struct boot_sector_16 b16; fs_read(0, sizeof(b16), &b16); if (b16.extended_sig != 0x29) { b16.extended_sig = 0x29; b16.serial = 0; memmove(b16.fs_type, fs->fat_bits == 12 ? "FAT12 " : "FAT16 ", 8); } memmove(b16.label, label, 11); fs_write(0, sizeof(b16), &b16); } else if (fs->fat_bits == 32) { struct boot_sector b; fs_read(0, sizeof(b), &b); if (b.extended_sig != 0x29) { b.extended_sig = 0x29; b.serial = 0; memmove(b.fs_type, "FAT32 ", 8); } memmove(b.label, label, 11); fs_write(0, sizeof(b), &b); if (fs->backupboot_start) fs_write(fs->backupboot_start, sizeof(b), &b); } } off_t find_volume_de(DOS_FS * fs, DIR_ENT * de) { uint32_t cluster; off_t offset; int i; if (fs->root_cluster) { for (cluster = fs->root_cluster; cluster != 0 && cluster != -1; cluster = next_cluster(fs, cluster)) { offset = cluster_start(fs, cluster); for (i = 0; i * sizeof(DIR_ENT) < fs->cluster_size; i++) { fs_read(offset, sizeof(DIR_ENT), de); if (de->attr != VFAT_LN_ATTR && de->attr & ATTR_VOLUME) return offset; offset += sizeof(DIR_ENT); } } } else { for (i = 0; i < fs->root_entries; i++) { offset = fs->root_start + i * sizeof(DIR_ENT); fs_read(offset, sizeof(DIR_ENT), de); if (de->attr != VFAT_LN_ATTR && de->attr & ATTR_VOLUME) return offset; } } return 0; } static void write_volume_label(DOS_FS * fs, char *label) { time_t now = time(NULL); struct tm *mtime = localtime(&now); off_t offset; int created; DIR_ENT de; created = 0; offset = find_volume_de(fs, &de); if (offset == 0) { created = 1; offset = alloc_rootdir_entry(fs, &de, label); } memcpy(de.name, label, 11); de.time = htole16((unsigned short)((mtime->tm_sec >> 1) + (mtime->tm_min << 5) + (mtime->tm_hour << 11))); de.date = htole16((unsigned short)(mtime->tm_mday + ((mtime->tm_mon + 1) << 5) + ((mtime->tm_year - 80) << 9))); if (created) { de.attr = ATTR_VOLUME; de.ctime_ms = 0; de.ctime = de.time; de.cdate = de.date; de.adate = de.date; de.starthi = 0; de.start = 0; de.size = 0; } fs_write(offset, sizeof(DIR_ENT), &de); } void write_label(DOS_FS * fs, char *label) { int l = strlen(label); while (l < 11) label[l++] = ' '; write_boot_label(fs, label); write_volume_label(fs, label); }
/* boot.c - Read and analyze ia PC/MS-DOS boot sector Copyright (C) 1993 Werner Almesberger <werner.almesberger@lrc.di.epfl.ch> Copyright (C) 1998 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de> Copyright (C) 2008-2014 Daniel Baumann <mail@daniel-baumann.ch> Copyright (C) 2015 Andreas Bombe <aeb@debian.org> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. The complete text of the GNU General Public License can be found in /usr/share/common-licenses/GPL-3 file. */ /* FAT32, VFAT, Atari format support, and various fixes additions May 1998 * by Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de> */ #include <stdio.h> #include <stdint.h> #include <string.h> #include <stdlib.h> #include <sys/types.h> #include <time.h> #include "common.h" #include "fsck.fat.h" #include "fat.h" #include "io.h" #include "boot.h" #include "check.h" #define ROUND_TO_MULTIPLE(n,m) ((n) && (m) ? (n)+(m)-1-((n)-1)%(m) : 0) /* don't divide by zero */ /* cut-over cluster counts for FAT12 and FAT16 */ #define FAT12_THRESHOLD 4085 #define FAT16_THRESHOLD 65525 static struct { uint8_t media; const char *descr; } mediabytes[] = { { 0xf0, "5.25\" or 3.5\" HD floppy"}, { 0xf8, "hard disk"}, { 0xf9, "3,5\" 720k floppy 2s/80tr/9sec or " "5.25\" 1.2M floppy 2s/80tr/15sec"}, { 0xfa, "5.25\" 320k floppy 1s/80tr/8sec"}, { 0xfb, "3.5\" 640k floppy 2s/80tr/8sec"}, { 0xfc, "5.25\" 180k floppy 1s/40tr/9sec"}, { 0xfd, "5.25\" 360k floppy 2s/40tr/9sec"}, { 0xfe, "5.25\" 160k floppy 1s/40tr/8sec"}, { 0xff, "5.25\" 320k floppy 2s/40tr/8sec"},}; /* Unaligned fields must first be accessed byte-wise */ #define GET_UNALIGNED_W(f) \ ( (uint16_t)f[0] | ((uint16_t)f[1]<<8) ) static const char *get_media_descr(unsigned char media) { int i; for (i = 0; i < sizeof(mediabytes) / sizeof(*mediabytes); ++i) { if (mediabytes[i].media == media) return (mediabytes[i].descr); } return ("undefined"); } static void dump_boot(DOS_FS * fs, struct boot_sector *b, unsigned lss) { unsigned short sectors; printf("Boot sector contents:\n"); if (!atari_format) { char id[9]; strncpy(id, (const char *)b->system_id, 8); id[8] = 0; printf("System ID \"%s\"\n", id); } else { /* On Atari, a 24 bit serial number is stored at offset 8 of the boot * sector */ printf("Serial number 0x%x\n", b->system_id[5] | (b->system_id[6] << 8) | (b-> system_id[7] << 16)); } printf("Media byte 0x%02x (%s)\n", b->media, get_media_descr(b->media)); printf("%10d bytes per logical sector\n", GET_UNALIGNED_W(b->sector_size)); printf("%10d bytes per cluster\n", fs->cluster_size); printf("%10d reserved sector%s\n", le16toh(b->reserved), le16toh(b->reserved) == 1 ? "" : "s"); printf("First FAT starts at byte %llu (sector %llu)\n", (unsigned long long)fs->fat_start, (unsigned long long)fs->fat_start / lss); printf("%10d FATs, %d bit entries\n", b->fats, fs->fat_bits); printf("%10lld bytes per FAT (= %llu sectors)\n", (long long)fs->fat_size, (long long)fs->fat_size / lss); if (!fs->root_cluster) { printf("Root directory starts at byte %llu (sector %llu)\n", (unsigned long long)fs->root_start, (unsigned long long)fs->root_start / lss); printf("%10d root directory entries\n", fs->root_entries); } else { printf("Root directory start at cluster %lu (arbitrary size)\n", (unsigned long)fs->root_cluster); } printf("Data area starts at byte %llu (sector %llu)\n", (unsigned long long)fs->data_start, (unsigned long long)fs->data_start / lss); printf("%10lu data clusters (%llu bytes)\n", (unsigned long)fs->data_clusters, (unsigned long long)fs->data_clusters * fs->cluster_size); printf("%u sectors/track, %u heads\n", le16toh(b->secs_track), le16toh(b->heads)); printf("%10u hidden sectors\n", atari_format ? /* On Atari, the hidden field is only 16 bit wide and unused */ (((unsigned char *)&b->hidden)[0] | ((unsigned char *)&b->hidden)[1] << 8) : le32toh(b->hidden)); sectors = GET_UNALIGNED_W(b->sectors); printf("%10u sectors total\n", sectors ? sectors : le32toh(b->total_sect)); } static void check_backup_boot(DOS_FS * fs, struct boot_sector *b, int lss) { struct boot_sector b2; if (!fs->backupboot_start) { printf("There is no backup boot sector.\n"); if (le16toh(b->reserved) < 3) { printf("And there is no space for creating one!\n"); return; } if (interactive) printf("1) Create one\n2) Do without a backup\n"); else printf(" Auto-creating backup boot block.\n"); if (!interactive || get_key("12", "?") == '1') { int bbs; /* The usual place for the backup boot sector is sector 6. Choose * that or the last reserved sector. */ if (le16toh(b->reserved) >= 7 && le16toh(b->info_sector) != 6) bbs = 6; else { bbs = le16toh(b->reserved) - 1; if (bbs == le16toh(b->info_sector)) --bbs; /* this is never 0, as we checked reserved >= 3! */ } fs->backupboot_start = bbs * lss; b->backup_boot = htole16(bbs); fs_write(fs->backupboot_start, sizeof(*b), b); fs_write(offsetof(struct boot_sector, backup_boot), sizeof(b->backup_boot), &b->backup_boot); printf("Created backup of boot sector in sector %d\n", bbs); return; } else return; } fs_read(fs->backupboot_start, sizeof(b2), &b2); if (memcmp(b, &b2, sizeof(b2)) != 0) { /* there are any differences */ uint8_t *p, *q; int i, pos, first = 1; char buf[20]; printf("There are differences between boot sector and its backup.\n"); printf("This is mostly harmless. Differences: (offset:original/backup)\n "); pos = 2; for (p = (uint8_t *) b, q = (uint8_t *) & b2, i = 0; i < sizeof(b2); ++p, ++q, ++i) { if (*p != *q) { sprintf(buf, "%s%u:%02x/%02x", first ? "" : ", ", (unsigned)(p - (uint8_t *) b), *p, *q); if (pos + strlen(buf) > 78) printf("\n "), pos = 2; printf("%s", buf); pos += strlen(buf); first = 0; } } printf("\n"); if (interactive) printf("1) Copy original to backup\n" "2) Copy backup to original\n" "3) No action\n"); else printf(" Not automatically fixing this.\n"); switch (interactive ? get_key("123", "?") : '3') { case '1': fs_write(fs->backupboot_start, sizeof(*b), b); break; case '2': fs_write(0, sizeof(b2), &b2); break; default: break; } } } static void init_fsinfo(struct info_sector *i) { i->magic = htole32(0x41615252); i->signature = htole32(0x61417272); i->free_clusters = htole32(-1); i->next_cluster = htole32(2); i->boot_sign = htole16(0xaa55); } static void read_fsinfo(DOS_FS * fs, struct boot_sector *b, int lss) { struct info_sector i; if (!b->info_sector) { printf("No FSINFO sector\n"); if (interactive) printf("1) Create one\n2) Do without FSINFO\n"); else printf(" Not automatically creating it.\n"); if (interactive && get_key("12", "?") == '1') { /* search for a free reserved sector (not boot sector and not * backup boot sector) */ uint32_t s; for (s = 1; s < le16toh(b->reserved); ++s) if (s != le16toh(b->backup_boot)) break; if (s > 0 && s < le16toh(b->reserved)) { init_fsinfo(&i); fs_write((off_t)s * lss, sizeof(i), &i); b->info_sector = htole16(s); fs_write(offsetof(struct boot_sector, info_sector), sizeof(b->info_sector), &b->info_sector); if (fs->backupboot_start) fs_write(fs->backupboot_start + offsetof(struct boot_sector, info_sector), sizeof(b->info_sector), &b->info_sector); } else { printf("No free reserved sector found -- " "no space for FSINFO sector!\n"); return; } } else return; } fs->fsinfo_start = le16toh(b->info_sector) * lss; fs_read(fs->fsinfo_start, sizeof(i), &i); if (i.magic != htole32(0x41615252) || i.signature != htole32(0x61417272) || i.boot_sign != htole16(0xaa55)) { printf("FSINFO sector has bad magic number(s):\n"); if (i.magic != htole32(0x41615252)) printf(" Offset %llu: 0x%08x != expected 0x%08x\n", (unsigned long long)offsetof(struct info_sector, magic), le32toh(i.magic), 0x41615252); if (i.signature != htole32(0x61417272)) printf(" Offset %llu: 0x%08x != expected 0x%08x\n", (unsigned long long)offsetof(struct info_sector, signature), le32toh(i.signature), 0x61417272); if (i.boot_sign != htole16(0xaa55)) printf(" Offset %llu: 0x%04x != expected 0x%04x\n", (unsigned long long)offsetof(struct info_sector, boot_sign), le16toh(i.boot_sign), 0xaa55); if (interactive) printf("1) Correct\n2) Don't correct (FSINFO invalid then)\n"); else printf(" Auto-correcting it.\n"); if (!interactive || get_key("12", "?") == '1') { init_fsinfo(&i); fs_write(fs->fsinfo_start, sizeof(i), &i); } else fs->fsinfo_start = 0; } if (fs->fsinfo_start) fs->free_clusters = le32toh(i.free_clusters); } static char print_fat_dirty_state(void) { printf("Dirty bit is set. Fs was not properly unmounted and" " some data may be corrupt.\n"); if (interactive) { printf("1) Remove dirty bit\n" "2) No action\n"); return get_key("12", "?"); } else printf(" Automatically removing dirty bit.\n"); return '1'; } static void check_fat_state_bit(DOS_FS * fs, void *b) { if (fs->fat_bits == 32) { struct boot_sector *b32 = b; if (b32->reserved3 & FAT_STATE_DIRTY) { printf("0x41: "); if (print_fat_dirty_state() == '1') { b32->reserved3 &= ~FAT_STATE_DIRTY; fs_write(0, sizeof(*b32), b32); } } } else { struct boot_sector_16 *b16 = b; if (b16->reserved2 & FAT_STATE_DIRTY) { printf("0x25: "); if (print_fat_dirty_state() == '1') { b16->reserved2 &= ~FAT_STATE_DIRTY; fs_write(0, sizeof(*b16), b16); } } } } void read_boot(DOS_FS * fs) { struct boot_sector b; unsigned total_sectors; unsigned short logical_sector_size, sectors; off_t fat_length; unsigned total_fat_entries; off_t data_size; fs_read(0, sizeof(b), &b); logical_sector_size = GET_UNALIGNED_W(b.sector_size); if (!logical_sector_size) die("Logical sector size is zero."); /* This was moved up because it's the first thing that will fail */ /* if the platform needs special handling of unaligned multibyte accesses */ /* but such handling isn't being provided. See GET_UNALIGNED_W() above. */ if (logical_sector_size & (SECTOR_SIZE - 1)) die("Logical sector size (%d bytes) is not a multiple of the physical " "sector size.", logical_sector_size); fs->cluster_size = b.cluster_size * logical_sector_size; if (!fs->cluster_size) die("Cluster size is zero."); if (b.fats != 2 && b.fats != 1) die("Currently, only 1 or 2 FATs are supported, not %d.\n", b.fats); fs->nfats = b.fats; sectors = GET_UNALIGNED_W(b.sectors); total_sectors = sectors ? sectors : le32toh(b.total_sect); if (verbose) printf("Checking we can access the last sector of the filesystem\n"); /* Can't access last odd sector anyway, so round down */ fs_test((off_t)((total_sectors & ~1) - 1) * logical_sector_size, logical_sector_size); fat_length = le16toh(b.fat_length) ? le16toh(b.fat_length) : le32toh(b.fat32_length); if (!fat_length) die("FAT size is zero."); fs->fat_start = (off_t)le16toh(b.reserved) * logical_sector_size; fs->root_start = ((off_t)le16toh(b.reserved) + b.fats * fat_length) * logical_sector_size; fs->root_entries = GET_UNALIGNED_W(b.dir_entries); fs->data_start = fs->root_start + ROUND_TO_MULTIPLE(fs->root_entries << MSDOS_DIR_BITS, logical_sector_size); data_size = (off_t)total_sectors * logical_sector_size - fs->data_start; if (data_size < fs->cluster_size) die("Filesystem has no space for any data clusters"); fs->data_clusters = data_size / fs->cluster_size; fs->root_cluster = 0; /* indicates standard, pre-FAT32 root dir */ fs->fsinfo_start = 0; /* no FSINFO structure */ fs->free_clusters = -1; /* unknown */ if (!b.fat_length && b.fat32_length) { fs->fat_bits = 32; fs->root_cluster = le32toh(b.root_cluster); if (!fs->root_cluster && fs->root_entries) /* M$ hasn't specified this, but it looks reasonable: If * root_cluster is 0 but there is a separate root dir * (root_entries != 0), we handle the root dir the old way. Give a * warning, but convertig to a root dir in a cluster chain seems * to complex for now... */ printf("Warning: FAT32 root dir not in cluster chain! " "Compatibility mode...\n"); else if (!fs->root_cluster && !fs->root_entries) die("No root directory!"); else if (fs->root_cluster && fs->root_entries) printf("Warning: FAT32 root dir is in a cluster chain, but " "a separate root dir\n" " area is defined. Cannot fix this easily.\n"); if (fs->data_clusters < FAT16_THRESHOLD) printf("Warning: Filesystem is FAT32 according to fat_length " "and fat32_length fields,\n" " but has only %lu clusters, less than the required " "minimum of %d.\n" " This may lead to problems on some systems.\n", (unsigned long)fs->data_clusters, FAT16_THRESHOLD); check_fat_state_bit(fs, &b); fs->backupboot_start = le16toh(b.backup_boot) * logical_sector_size; check_backup_boot(fs, &b, logical_sector_size); read_fsinfo(fs, &b, logical_sector_size); } else if (!atari_format) { /* On real MS-DOS, a 16 bit FAT is used whenever there would be too * much clusers otherwise. */ fs->fat_bits = (fs->data_clusters >= FAT12_THRESHOLD) ? 16 : 12; if (fs->data_clusters >= FAT16_THRESHOLD) die("Too many clusters (%lu) for FAT16 filesystem.", fs->data_clusters); check_fat_state_bit(fs, &b); } else { /* On Atari, things are more difficult: GEMDOS always uses 12bit FATs * on floppies, and always 16 bit on harddisks. */ fs->fat_bits = 16; /* assume 16 bit FAT for now */ /* If more clusters than fat entries in 16-bit fat, we assume * it's a real MSDOS FS with 12-bit fat. */ if (fs->data_clusters + 2 > fat_length * logical_sector_size * 8 / 16 || /* if it has one of the usual floppy sizes -> 12bit FAT */ (total_sectors == 720 || total_sectors == 1440 || total_sectors == 2880)) fs->fat_bits = 12; } /* On FAT32, the high 4 bits of a FAT entry are reserved */ fs->eff_fat_bits = (fs->fat_bits == 32) ? 28 : fs->fat_bits; fs->fat_size = fat_length * logical_sector_size; fs->label = calloc(12, sizeof(uint8_t)); if (fs->fat_bits == 12 || fs->fat_bits == 16) { struct boot_sector_16 *b16 = (struct boot_sector_16 *)&b; if (b16->extended_sig == 0x29) memmove(fs->label, b16->label, 11); else fs->label = NULL; } else if (fs->fat_bits == 32) { if (b.extended_sig == 0x29) memmove(fs->label, &b.label, 11); else fs->label = NULL; } total_fat_entries = (uint64_t)fs->fat_size * 8 / fs->fat_bits; if (fs->data_clusters > total_fat_entries - 2) die("Filesystem has %u clusters but only space for %u FAT entries.", fs->data_clusters, total_fat_entries - 2); if (!fs->root_entries && !fs->root_cluster) die("Root directory has zero size."); if (fs->root_entries & (MSDOS_DPS - 1)) die("Root directory (%d entries) doesn't span an integral number of " "sectors.", fs->root_entries); if (logical_sector_size & (SECTOR_SIZE - 1)) die("Logical sector size (%d bytes) is not a multiple of the physical " "sector size.", logical_sector_size); #if 0 /* linux kernel doesn't check that either */ /* ++roman: On Atari, these two fields are often left uninitialized */ if (!atari_format && (!b.secs_track || !b.heads)) die("Invalid disk format in boot sector."); #endif if (verbose) dump_boot(fs, &b, logical_sector_size); } static void write_boot_label(DOS_FS * fs, char *label) { if (fs->fat_bits == 12 || fs->fat_bits == 16) { struct boot_sector_16 b16; fs_read(0, sizeof(b16), &b16); if (b16.extended_sig != 0x29) { b16.extended_sig = 0x29; b16.serial = 0; memmove(b16.fs_type, fs->fat_bits == 12 ? "FAT12 " : "FAT16 ", 8); } memmove(b16.label, label, 11); fs_write(0, sizeof(b16), &b16); } else if (fs->fat_bits == 32) { struct boot_sector b; fs_read(0, sizeof(b), &b); if (b.extended_sig != 0x29) { b.extended_sig = 0x29; b.serial = 0; memmove(b.fs_type, "FAT32 ", 8); } memmove(b.label, label, 11); fs_write(0, sizeof(b), &b); if (fs->backupboot_start) fs_write(fs->backupboot_start, sizeof(b), &b); } } off_t find_volume_de(DOS_FS * fs, DIR_ENT * de) { uint32_t cluster; off_t offset; int i; if (fs->root_cluster) { for (cluster = fs->root_cluster; cluster != 0 && cluster != -1; cluster = next_cluster(fs, cluster)) { offset = cluster_start(fs, cluster); for (i = 0; i * sizeof(DIR_ENT) < fs->cluster_size; i++) { fs_read(offset, sizeof(DIR_ENT), de); if (de->attr != VFAT_LN_ATTR && de->attr & ATTR_VOLUME) return offset; offset += sizeof(DIR_ENT); } } } else { for (i = 0; i < fs->root_entries; i++) { offset = fs->root_start + i * sizeof(DIR_ENT); fs_read(offset, sizeof(DIR_ENT), de); if (de->attr != VFAT_LN_ATTR && de->attr & ATTR_VOLUME) return offset; } } return 0; } static void write_volume_label(DOS_FS * fs, char *label) { time_t now = time(NULL); struct tm *mtime = localtime(&now); off_t offset; int created; DIR_ENT de; created = 0; offset = find_volume_de(fs, &de); if (offset == 0) { created = 1; offset = alloc_rootdir_entry(fs, &de, label); } memcpy(de.name, label, 11); de.time = htole16((unsigned short)((mtime->tm_sec >> 1) + (mtime->tm_min << 5) + (mtime->tm_hour << 11))); de.date = htole16((unsigned short)(mtime->tm_mday + ((mtime->tm_mon + 1) << 5) + ((mtime->tm_year - 80) << 9))); if (created) { de.attr = ATTR_VOLUME; de.ctime_ms = 0; de.ctime = de.time; de.cdate = de.date; de.adate = de.date; de.starthi = 0; de.start = 0; de.size = 0; } fs_write(offset, sizeof(DIR_ENT), &de); } void write_label(DOS_FS * fs, char *label) { int l = strlen(label); while (l < 11) label[l++] = ' '; write_boot_label(fs, label); write_volume_label(fs, label); }
void read_boot(DOS_FS * fs) { struct boot_sector b; unsigned total_sectors; unsigned short logical_sector_size, sectors; unsigned fat_length; unsigned total_fat_entries; off_t data_size; fs_read(0, sizeof(b), &b); logical_sector_size = GET_UNALIGNED_W(b.sector_size); if (!logical_sector_size) die("Logical sector size is zero."); /* This was moved up because it's the first thing that will fail */ /* if the platform needs special handling of unaligned multibyte accesses */ /* but such handling isn't being provided. See GET_UNALIGNED_W() above. */ if (logical_sector_size & (SECTOR_SIZE - 1)) die("Logical sector size (%d bytes) is not a multiple of the physical " "sector size.", logical_sector_size); fs->cluster_size = b.cluster_size * logical_sector_size; if (!fs->cluster_size) die("Cluster size is zero."); if (b.fats != 2 && b.fats != 1) die("Currently, only 1 or 2 FATs are supported, not %d.\n", b.fats); fs->nfats = b.fats; sectors = GET_UNALIGNED_W(b.sectors); total_sectors = sectors ? sectors : le32toh(b.total_sect); if (verbose) printf("Checking we can access the last sector of the filesystem\n"); /* Can't access last odd sector anyway, so round down */ fs_test((off_t)((total_sectors & ~1) - 1) * logical_sector_size, logical_sector_size); fat_length = le16toh(b.fat_length) ? le16toh(b.fat_length) : le32toh(b.fat32_length); fs->fat_start = (off_t)le16toh(b.reserved) * logical_sector_size; fs->root_start = ((off_t)le16toh(b.reserved) + b.fats * fat_length) * logical_sector_size; fs->root_entries = GET_UNALIGNED_W(b.dir_entries); fs->data_start = fs->root_start + ROUND_TO_MULTIPLE(fs->root_entries << MSDOS_DIR_BITS, logical_sector_size); data_size = (off_t)total_sectors * logical_sector_size - fs->data_start; fs->data_clusters = data_size / fs->cluster_size; fs->root_cluster = 0; /* indicates standard, pre-FAT32 root dir */ fs->fsinfo_start = 0; /* no FSINFO structure */ fs->free_clusters = -1; /* unknown */ if (!b.fat_length && b.fat32_length) { fs->fat_bits = 32; fs->root_cluster = le32toh(b.root_cluster); if (!fs->root_cluster && fs->root_entries) /* M$ hasn't specified this, but it looks reasonable: If * root_cluster is 0 but there is a separate root dir * (root_entries != 0), we handle the root dir the old way. Give a * warning, but convertig to a root dir in a cluster chain seems * to complex for now... */ printf("Warning: FAT32 root dir not in cluster chain! " "Compatibility mode...\n"); else if (!fs->root_cluster && !fs->root_entries) die("No root directory!"); else if (fs->root_cluster && fs->root_entries) printf("Warning: FAT32 root dir is in a cluster chain, but " "a separate root dir\n" " area is defined. Cannot fix this easily.\n"); if (fs->data_clusters < FAT16_THRESHOLD) printf("Warning: Filesystem is FAT32 according to fat_length " "and fat32_length fields,\n" " but has only %lu clusters, less than the required " "minimum of %d.\n" " This may lead to problems on some systems.\n", (unsigned long)fs->data_clusters, FAT16_THRESHOLD); check_fat_state_bit(fs, &b); fs->backupboot_start = le16toh(b.backup_boot) * logical_sector_size; check_backup_boot(fs, &b, logical_sector_size); read_fsinfo(fs, &b, logical_sector_size); } else if (!atari_format) { /* On real MS-DOS, a 16 bit FAT is used whenever there would be too * much clusers otherwise. */ fs->fat_bits = (fs->data_clusters >= FAT12_THRESHOLD) ? 16 : 12; if (fs->data_clusters >= FAT16_THRESHOLD) die("Too many clusters (%lu) for FAT16 filesystem.", fs->data_clusters); check_fat_state_bit(fs, &b); } else { /* On Atari, things are more difficult: GEMDOS always uses 12bit FATs * on floppies, and always 16 bit on harddisks. */ fs->fat_bits = 16; /* assume 16 bit FAT for now */ /* If more clusters than fat entries in 16-bit fat, we assume * it's a real MSDOS FS with 12-bit fat. */ if (fs->data_clusters + 2 > fat_length * logical_sector_size * 8 / 16 || /* if it has one of the usual floppy sizes -> 12bit FAT */ (total_sectors == 720 || total_sectors == 1440 || total_sectors == 2880)) fs->fat_bits = 12; } /* On FAT32, the high 4 bits of a FAT entry are reserved */ fs->eff_fat_bits = (fs->fat_bits == 32) ? 28 : fs->fat_bits; fs->fat_size = fat_length * logical_sector_size; fs->label = calloc(12, sizeof(uint8_t)); if (fs->fat_bits == 12 || fs->fat_bits == 16) { struct boot_sector_16 *b16 = (struct boot_sector_16 *)&b; if (b16->extended_sig == 0x29) memmove(fs->label, b16->label, 11); else fs->label = NULL; } else if (fs->fat_bits == 32) { if (b.extended_sig == 0x29) memmove(fs->label, &b.label, 11); else fs->label = NULL; } total_fat_entries = (uint64_t)fs->fat_size * 8 / fs->fat_bits; if (fs->data_clusters > total_fat_entries - 2) die("Filesystem has %u clusters but only space for %u FAT entries.", fs->data_clusters, total_fat_entries - 2); if (!fs->root_entries && !fs->root_cluster) die("Root directory has zero size."); if (fs->root_entries & (MSDOS_DPS - 1)) die("Root directory (%d entries) doesn't span an integral number of " "sectors.", fs->root_entries); if (logical_sector_size & (SECTOR_SIZE - 1)) die("Logical sector size (%d bytes) is not a multiple of the physical " "sector size.", logical_sector_size); #if 0 /* linux kernel doesn't check that either */ /* ++roman: On Atari, these two fields are often left uninitialized */ if (!atari_format && (!b.secs_track || !b.heads)) die("Invalid disk format in boot sector."); #endif if (verbose) dump_boot(fs, &b, logical_sector_size); }
void read_boot(DOS_FS * fs) { struct boot_sector b; unsigned total_sectors; unsigned short logical_sector_size, sectors; off_t fat_length; unsigned total_fat_entries; off_t data_size; fs_read(0, sizeof(b), &b); logical_sector_size = GET_UNALIGNED_W(b.sector_size); if (!logical_sector_size) die("Logical sector size is zero."); /* This was moved up because it's the first thing that will fail */ /* if the platform needs special handling of unaligned multibyte accesses */ /* but such handling isn't being provided. See GET_UNALIGNED_W() above. */ if (logical_sector_size & (SECTOR_SIZE - 1)) die("Logical sector size (%d bytes) is not a multiple of the physical " "sector size.", logical_sector_size); fs->cluster_size = b.cluster_size * logical_sector_size; if (!fs->cluster_size) die("Cluster size is zero."); if (b.fats != 2 && b.fats != 1) die("Currently, only 1 or 2 FATs are supported, not %d.\n", b.fats); fs->nfats = b.fats; sectors = GET_UNALIGNED_W(b.sectors); total_sectors = sectors ? sectors : le32toh(b.total_sect); if (verbose) printf("Checking we can access the last sector of the filesystem\n"); /* Can't access last odd sector anyway, so round down */ fs_test((off_t)((total_sectors & ~1) - 1) * logical_sector_size, logical_sector_size); fat_length = le16toh(b.fat_length) ? le16toh(b.fat_length) : le32toh(b.fat32_length); if (!fat_length) die("FAT size is zero."); fs->fat_start = (off_t)le16toh(b.reserved) * logical_sector_size; fs->root_start = ((off_t)le16toh(b.reserved) + b.fats * fat_length) * logical_sector_size; fs->root_entries = GET_UNALIGNED_W(b.dir_entries); fs->data_start = fs->root_start + ROUND_TO_MULTIPLE(fs->root_entries << MSDOS_DIR_BITS, logical_sector_size); data_size = (off_t)total_sectors * logical_sector_size - fs->data_start; if (data_size < fs->cluster_size) die("Filesystem has no space for any data clusters"); fs->data_clusters = data_size / fs->cluster_size; fs->root_cluster = 0; /* indicates standard, pre-FAT32 root dir */ fs->fsinfo_start = 0; /* no FSINFO structure */ fs->free_clusters = -1; /* unknown */ if (!b.fat_length && b.fat32_length) { fs->fat_bits = 32; fs->root_cluster = le32toh(b.root_cluster); if (!fs->root_cluster && fs->root_entries) /* M$ hasn't specified this, but it looks reasonable: If * root_cluster is 0 but there is a separate root dir * (root_entries != 0), we handle the root dir the old way. Give a * warning, but convertig to a root dir in a cluster chain seems * to complex for now... */ printf("Warning: FAT32 root dir not in cluster chain! " "Compatibility mode...\n"); else if (!fs->root_cluster && !fs->root_entries) die("No root directory!"); else if (fs->root_cluster && fs->root_entries) printf("Warning: FAT32 root dir is in a cluster chain, but " "a separate root dir\n" " area is defined. Cannot fix this easily.\n"); if (fs->data_clusters < FAT16_THRESHOLD) printf("Warning: Filesystem is FAT32 according to fat_length " "and fat32_length fields,\n" " but has only %lu clusters, less than the required " "minimum of %d.\n" " This may lead to problems on some systems.\n", (unsigned long)fs->data_clusters, FAT16_THRESHOLD); check_fat_state_bit(fs, &b); fs->backupboot_start = le16toh(b.backup_boot) * logical_sector_size; check_backup_boot(fs, &b, logical_sector_size); read_fsinfo(fs, &b, logical_sector_size); } else if (!atari_format) { /* On real MS-DOS, a 16 bit FAT is used whenever there would be too * much clusers otherwise. */ fs->fat_bits = (fs->data_clusters >= FAT12_THRESHOLD) ? 16 : 12; if (fs->data_clusters >= FAT16_THRESHOLD) die("Too many clusters (%lu) for FAT16 filesystem.", fs->data_clusters); check_fat_state_bit(fs, &b); } else { /* On Atari, things are more difficult: GEMDOS always uses 12bit FATs * on floppies, and always 16 bit on harddisks. */ fs->fat_bits = 16; /* assume 16 bit FAT for now */ /* If more clusters than fat entries in 16-bit fat, we assume * it's a real MSDOS FS with 12-bit fat. */ if (fs->data_clusters + 2 > fat_length * logical_sector_size * 8 / 16 || /* if it has one of the usual floppy sizes -> 12bit FAT */ (total_sectors == 720 || total_sectors == 1440 || total_sectors == 2880)) fs->fat_bits = 12; } /* On FAT32, the high 4 bits of a FAT entry are reserved */ fs->eff_fat_bits = (fs->fat_bits == 32) ? 28 : fs->fat_bits; fs->fat_size = fat_length * logical_sector_size; fs->label = calloc(12, sizeof(uint8_t)); if (fs->fat_bits == 12 || fs->fat_bits == 16) { struct boot_sector_16 *b16 = (struct boot_sector_16 *)&b; if (b16->extended_sig == 0x29) memmove(fs->label, b16->label, 11); else fs->label = NULL; } else if (fs->fat_bits == 32) { if (b.extended_sig == 0x29) memmove(fs->label, &b.label, 11); else fs->label = NULL; } total_fat_entries = (uint64_t)fs->fat_size * 8 / fs->fat_bits; if (fs->data_clusters > total_fat_entries - 2) die("Filesystem has %u clusters but only space for %u FAT entries.", fs->data_clusters, total_fat_entries - 2); if (!fs->root_entries && !fs->root_cluster) die("Root directory has zero size."); if (fs->root_entries & (MSDOS_DPS - 1)) die("Root directory (%d entries) doesn't span an integral number of " "sectors.", fs->root_entries); if (logical_sector_size & (SECTOR_SIZE - 1)) die("Logical sector size (%d bytes) is not a multiple of the physical " "sector size.", logical_sector_size); #if 0 /* linux kernel doesn't check that either */ /* ++roman: On Atari, these two fields are often left uninitialized */ if (!atari_format && (!b.secs_track || !b.heads)) die("Invalid disk format in boot sector."); #endif if (verbose) dump_boot(fs, &b, logical_sector_size); }
{'added': [(106, ' printf("%10lld bytes per FAT (= %llu sectors)\\n", (long long)fs->fat_size,'), (107, '\t (long long)fs->fat_size / lss);'), (332, ' off_t fat_length;'), (361, ''), (364, ' if (!fat_length)'), (365, '\tdie("FAT size is zero.");'), (366, ''), (374, ''), (376, ' if (data_size < fs->cluster_size)'), (377, '\tdie("Filesystem has no space for any data clusters");'), (378, '')], 'deleted': [(106, ' printf("%10d bytes per FAT (= %u sectors)\\n", fs->fat_size,'), (107, '\t fs->fat_size / lss);'), (332, ' unsigned fat_length;')]}
11
3
463
3,341
108
833
41
https://github.com/dosfstools/dosfstools
CVE-2016-4804
CWE-119
1,139
pdf.c
C
get_object
/****************************************************************************** * pdf.c * * pdfresurrect - PDF history extraction tool * * Copyright (C) 2008-2010, 2012-2013, 2017-19, Matt Davis (enferex). * * Special thanks to all of the contributors: See AUTHORS. * * Special thanks to 757labs (757 crew), they are a great group * of people to hack on projects and brainstorm with. * * pdf.c is part of pdfresurrect. * pdfresurrect is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * pdfresurrect is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with pdfresurrect. If not, see <http://www.gnu.org/licenses/>. *****************************************************************************/ #include <stdlib.h> #include <string.h> #include <ctype.h> #include "pdf.h" #include "main.h" /* * Macros */ /* SAFE_F * * Safe file read: use for fgetc() calls, this is really ugly looking. * _fp: FILE * handle * _expr: The expression with fgetc() in it: * * example: If we get a character from the file and it is ascii character 'a' * This assumes the coder wants to store the 'a' in variable ch * Kinda pointless if you already know that you have 'a', but for * illustrative purposes. * * if (SAFE_F(my_fp, ((c=fgetc(my_fp)) == 'a'))) * do_way_cool_stuff(); */ #define SAFE_F(_fp, _expr) \ ((!ferror(_fp) && !feof(_fp) && (_expr))) /* SAFE_E * * Safe expression handling. This macro is a wrapper * that compares the result of an expression (_expr) to the expected * value (_cmp). * * _expr: Expression to test. * _cmp: Expected value, error if this returns false. * _msg: What to say when an error occurs. */ #define SAFE_E(_expr, _cmp, _msg) \ do { \ if ((_expr) != (_cmp)) { \ ERR(_msg); \ exit(EXIT_FAILURE); \ } \ } while (0) /* * Forwards */ static int is_valid_xref(FILE *fp, pdf_t *pdf, xref_t *xref); static void load_xref_entries(FILE *fp, xref_t *xref); static void load_xref_from_plaintext(FILE *fp, xref_t *xref); static void load_xref_from_stream(FILE *fp, xref_t *xref); static void get_xref_linear_skipped(FILE *fp, xref_t *xref); static void resolve_linearized_pdf(pdf_t *pdf); static pdf_creator_t *new_creator(int *n_elements); static void load_creator(FILE *fp, pdf_t *pdf); static void load_creator_from_buf(FILE *fp, xref_t *xref, const char *buf); static void load_creator_from_xml(xref_t *xref, const char *buf); static void load_creator_from_old_format( FILE *fp, xref_t *xref, const char *buf); static char *get_object_from_here(FILE *fp, size_t *size, int *is_stream); static char *get_object( FILE *fp, int obj_id, const xref_t *xref, size_t *size, int *is_stream); static void add_kid(int id, xref_t *xref); static void load_kids(FILE *fp, int pages_id, xref_t *xref); static const char *get_type(FILE *fp, int obj_id, const xref_t *xref); /* static int get_page(int obj_id, const xref_t *xref); */ static char *get_header(FILE *fp); static char *decode_text_string(const char *str, size_t str_len); static int get_next_eof(FILE *fp); /* * Defined */ pdf_t *pdf_new(const char *name) { const char *n; pdf_t *pdf; pdf = calloc(1, sizeof(pdf_t)); if (name) { /* Just get the file name (not path) */ if ((n = strrchr(name, '/'))) ++n; else n = name; pdf->name = malloc(strlen(n) + 1); strcpy(pdf->name, n); } else /* !name */ { pdf->name = malloc(strlen("Unknown") + 1); strcpy(pdf->name, "Unknown"); } return pdf; } void pdf_delete(pdf_t *pdf) { int i; for (i=0; i<pdf->n_xrefs; i++) { free(pdf->xrefs[i].creator); free(pdf->xrefs[i].entries); free(pdf->xrefs[i].kids); } free(pdf->name); free(pdf->xrefs); free(pdf); } int pdf_is_pdf(FILE *fp) { int is_pdf; char *header; header = get_header(fp); if (header && strstr(header, "%PDF-")) is_pdf = 1; else is_pdf = 0; free(header); return is_pdf; } void pdf_get_version(FILE *fp, pdf_t *pdf) { char *header, *c; header = get_header(fp); /* Locate version string start and make sure we dont go past header */ if ((c = strstr(header, "%PDF-")) && (c + strlen("%PDF-M.m") + 2)) { pdf->pdf_major_version = atoi(c + strlen("%PDF-")); pdf->pdf_minor_version = atoi(c + strlen("%PDF-M.")); } free(header); } int pdf_load_xrefs(FILE *fp, pdf_t *pdf) { int i, ver, is_linear; long pos, pos_count; char x, *c, buf[256]; c = NULL; /* Count number of xrefs */ pdf->n_xrefs = 0; fseek(fp, 0, SEEK_SET); while (get_next_eof(fp) >= 0) ++pdf->n_xrefs; if (!pdf->n_xrefs) return 0; /* Load in the start/end positions */ fseek(fp, 0, SEEK_SET); pdf->xrefs = calloc(1, sizeof(xref_t) * pdf->n_xrefs); ver = 1; for (i=0; i<pdf->n_xrefs; i++) { /* Seek to %%EOF */ if ((pos = get_next_eof(fp)) < 0) break; /* Set and increment the version */ pdf->xrefs[i].version = ver++; /* Rewind until we find end of "startxref" */ pos_count = 0; while (SAFE_F(fp, ((x = fgetc(fp)) != 'f'))) fseek(fp, pos - (++pos_count), SEEK_SET); /* Suck in end of "startxref" to start of %%EOF */ if (pos_count >= sizeof(buf)) { ERR("Failed to locate the startxref token. " "This might be a corrupt PDF.\n"); return -1; } memset(buf, 0, sizeof(buf)); SAFE_E(fread(buf, 1, pos_count, fp), pos_count, "Failed to read startxref.\n"); c = buf; while (*c == ' ' || *c == '\n' || *c == '\r') ++c; /* xref start position */ pdf->xrefs[i].start = atol(c); /* If xref is 0 handle linear xref table */ if (pdf->xrefs[i].start == 0) get_xref_linear_skipped(fp, &pdf->xrefs[i]); /* Non-linear, normal operation, so just find the end of the xref */ else { /* xref end position */ pos = ftell(fp); fseek(fp, pdf->xrefs[i].start, SEEK_SET); pdf->xrefs[i].end = get_next_eof(fp); /* Look for next EOF and xref data */ fseek(fp, pos, SEEK_SET); } /* Check validity */ if (!is_valid_xref(fp, pdf, &pdf->xrefs[i])) { is_linear = pdf->xrefs[i].is_linear; memset(&pdf->xrefs[i], 0, sizeof(xref_t)); pdf->xrefs[i].is_linear = is_linear; rewind(fp); get_next_eof(fp); continue; } /* Load the entries from the xref */ load_xref_entries(fp, &pdf->xrefs[i]); } /* Now we have all xref tables, if this is linearized, we need * to make adjustments so that things spit out properly */ if (pdf->xrefs[0].is_linear) resolve_linearized_pdf(pdf); /* Ok now we have all xref data. Go through those versions of the * PDF and try to obtain creator information */ load_creator(fp, pdf); return pdf->n_xrefs; } /* Load page information */ void pdf_load_pages_kids(FILE *fp, pdf_t *pdf) { int i, id, dummy; char *buf, *c; long start, sz; start = ftell(fp); /* Load all kids for all xref tables (versions) */ for (i=0; i<pdf->n_xrefs; i++) { if (pdf->xrefs[i].version && (pdf->xrefs[i].end != 0)) { fseek(fp, pdf->xrefs[i].start, SEEK_SET); while (SAFE_F(fp, (fgetc(fp) != 't'))) ; /* Iterate to trailer */ /* Get root catalog */ sz = pdf->xrefs[i].end - ftell(fp); buf = malloc(sz + 1); SAFE_E(fread(buf, 1, sz, fp), sz, "Failed to load /Root.\n"); buf[sz] = '\0'; if (!(c = strstr(buf, "/Root"))) { free(buf); continue; } /* Jump to catalog (root) */ id = atoi(c + strlen("/Root") + 1); free(buf); buf = get_object(fp, id, &pdf->xrefs[i], NULL, &dummy); if (!buf || !(c = strstr(buf, "/Pages"))) { free(buf); continue; } /* Start at the first Pages obj and get kids */ id = atoi(c + strlen("/Pages") + 1); load_kids(fp, id, &pdf->xrefs[i]); free(buf); } } fseek(fp, start, SEEK_SET); } char pdf_get_object_status( const pdf_t *pdf, int xref_idx, int entry_idx) { int i, curr_ver; const xref_t *prev_xref; const xref_entry_t *prev, *curr; curr = &pdf->xrefs[xref_idx].entries[entry_idx]; curr_ver = pdf->xrefs[xref_idx].version; if (curr_ver == 1) return 'A'; /* Deleted (freed) */ if (curr->f_or_n == 'f') return 'D'; /* Get previous version */ prev_xref = NULL; for (i=xref_idx; i>-1; --i) if (pdf->xrefs[i].version < curr_ver) { prev_xref = &pdf->xrefs[i]; break; } if (!prev_xref) return '?'; /* Locate the object in the previous one that matches current one */ prev = NULL; for (i=0; i<prev_xref->n_entries; ++i) if (prev_xref->entries[i].obj_id == curr->obj_id) { prev = &prev_xref->entries[i]; break; } /* Added in place of a previously freed id */ if (!prev || ((prev->f_or_n == 'f') && (curr->f_or_n == 'n'))) return 'A'; /* Modified */ else if (prev->offset != curr->offset) return 'M'; return '?'; } void pdf_zero_object( FILE *fp, const pdf_t *pdf, int xref_idx, int entry_idx) { int i; char *obj; size_t obj_sz; xref_entry_t *entry; entry = &pdf->xrefs[xref_idx].entries[entry_idx]; fseek(fp, entry->offset, SEEK_SET); /* Get object and size */ obj = get_object(fp, entry->obj_id, &pdf->xrefs[xref_idx], NULL, NULL); i = obj_sz = 0; while (strncmp((++i)+obj, "endobj", 6)) ++obj_sz; if (obj_sz) obj_sz += strlen("endobj") + 1; /* Zero object */ for (i=0; i<obj_sz; i++) fputc('0', fp); printf("Zeroed object %d\n", entry->obj_id); free(obj); } /* Output information per version */ void pdf_summarize( FILE *fp, const pdf_t *pdf, const char *name, pdf_flag_t flags) { int i, j, page, n_versions, n_entries; FILE *dst, *out; char *dst_name, *c; dst = NULL; dst_name = NULL; if (name) { dst_name = malloc(strlen(name) * 2 + 16); sprintf(dst_name, "%s/%s", name, name); if ((c = strrchr(dst_name, '.')) && (strncmp(c, ".pdf", 4) == 0)) *c = '\0'; strcat(dst_name, ".summary"); if (!(dst = fopen(dst_name, "w"))) { ERR("Could not open file '%s' for writing\n", dst_name); return; } } /* Send output to file or stdout */ out = (dst) ? dst : stdout; /* Count versions */ n_versions = pdf->n_xrefs; if (n_versions && pdf->xrefs[0].is_linear) --n_versions; /* Ignore bad xref entry */ for (i=1; i<pdf->n_xrefs; ++i) if (pdf->xrefs[i].end == 0) --n_versions; /* If we have no valid versions but linear, count that */ if (!pdf->n_xrefs || (!n_versions && pdf->xrefs[0].is_linear)) n_versions = 1; /* Compare each object (if we dont have xref streams) */ n_entries = 0; for (i=0; !(const int)pdf->has_xref_streams && i<pdf->n_xrefs; i++) { if (flags & PDF_FLAG_QUIET) continue; for (j=0; j<pdf->xrefs[i].n_entries; j++) { ++n_entries; fprintf(out, "%s: --%c-- Version %d -- Object %d (%s)", pdf->name, pdf_get_object_status(pdf, i, j), pdf->xrefs[i].version, pdf->xrefs[i].entries[j].obj_id, get_type(fp, pdf->xrefs[i].entries[j].obj_id, &pdf->xrefs[i])); /* TODO page = get_page(pdf->xrefs[i].entries[j].obj_id, &pdf->xrefs[i]); */ if (0 /*page*/) fprintf(out, " Page(%d)\n", page); else fprintf(out, "\n"); } } /* Trailing summary */ if (!(flags & PDF_FLAG_QUIET)) { /* Let the user know that we cannot we print a per-object summary. * If we have a 1.5 PDF using streams for xref, we have not objects * to display, so let the user know whats up. */ if (pdf->has_xref_streams || !n_entries) fprintf(out, "%s: This PDF contains potential cross reference streams.\n" "%s: An object summary is not available.\n", pdf->name, pdf->name); fprintf(out, "---------- %s ----------\n" "Versions: %d\n", pdf->name, n_versions); /* Count entries for summary */ if (!pdf->has_xref_streams) for (i=0; i<pdf->n_xrefs; i++) { if (pdf->xrefs[i].is_linear) continue; n_entries = pdf->xrefs[i].n_entries; /* If we are a linearized PDF, all versions are made from those * objects too. So count em' */ if (pdf->xrefs[0].is_linear) n_entries += pdf->xrefs[0].n_entries; if (pdf->xrefs[i].version && n_entries) fprintf(out, "Version %d -- %d objects\n", pdf->xrefs[i].version, n_entries); } } else /* Quiet output */ fprintf(out, "%s: %d\n", pdf->name, n_versions); if (dst) { fclose(dst); free(dst_name); } } /* Returns '1' if we successfully display data (means its probably not xml) */ int pdf_display_creator(const pdf_t *pdf, int xref_idx) { int i; if (!pdf->xrefs[xref_idx].creator) return 0; for (i=0; i<pdf->xrefs[xref_idx].n_creator_entries; ++i) printf("%s: %s\n", pdf->xrefs[xref_idx].creator[i].key, pdf->xrefs[xref_idx].creator[i].value); return (i > 0); } /* Checks if the xref is valid and sets 'is_stream' flag if the xref is a * stream (PDF 1.5 or higher) */ static int is_valid_xref(FILE *fp, pdf_t *pdf, xref_t *xref) { int is_valid; long start; char *c, buf[16]; memset(buf, 0, sizeof(buf)); is_valid = 0; start = ftell(fp); fseek(fp, xref->start, SEEK_SET); if (fgets(buf, 16, fp) == NULL) { ERR("Failed to load xref string."); exit(EXIT_FAILURE); } if (strncmp(buf, "xref", strlen("xref")) == 0) is_valid = 1; else { /* PDFv1.5+ allows for xref data to be stored in streams vs plaintext */ fseek(fp, xref->start, SEEK_SET); c = get_object_from_here(fp, NULL, &xref->is_stream); if (c && xref->is_stream) { free(c); pdf->has_xref_streams = 1; is_valid = 1; } } fseek(fp, start, SEEK_SET); return is_valid; } static void load_xref_entries(FILE *fp, xref_t *xref) { if (xref->is_stream) load_xref_from_stream(fp, xref); else load_xref_from_plaintext(fp, xref); } static void load_xref_from_plaintext(FILE *fp, xref_t *xref) { int i, buf_idx, obj_id, added_entries; char c, buf[32] = {0}; long start, pos; start = ftell(fp); /* Get number of entries */ pos = xref->end; fseek(fp, pos, SEEK_SET); while (ftell(fp) != 0) if (SAFE_F(fp, (fgetc(fp) == '/' && fgetc(fp) == 'S'))) break; else SAFE_E(fseek(fp, --pos, SEEK_SET), 0, "Failed seek to xref /Size.\n"); SAFE_E(fread(buf, 1, 21, fp), 21, "Failed to load entry Size string.\n"); xref->n_entries = atoi(buf + strlen("ize ")); xref->entries = calloc(1, xref->n_entries * sizeof(struct _xref_entry)); /* Load entry data */ obj_id = 0; fseek(fp, xref->start + strlen("xref"), SEEK_SET); added_entries = 0; for (i=0; i<xref->n_entries; i++) { /* Advance past newlines. */ c = fgetc(fp); while (c == '\n' || c == '\r') c = fgetc(fp); /* Collect data up until the following newline. */ buf_idx = 0; while (c != '\n' && c != '\r' && !feof(fp) && !ferror(fp) && buf_idx < sizeof(buf)) { buf[buf_idx++] = c; c = fgetc(fp); } if (buf_idx >= sizeof(buf)) { ERR("Failed to locate newline character. " "This might be a corrupt PDF.\n"); exit(EXIT_FAILURE); } buf[buf_idx] = '\0'; /* Went to far and hit start of trailer */ if (strchr(buf, 't')) break; /* Entry or object id */ if (strlen(buf) > 17) { xref->entries[i].obj_id = obj_id++; xref->entries[i].offset = atol(strtok(buf, " ")); xref->entries[i].gen_num = atoi(strtok(NULL, " ")); xref->entries[i].f_or_n = buf[17]; ++added_entries; } else { obj_id = atoi(buf); --i; } } xref->n_entries = added_entries; fseek(fp, start, SEEK_SET); } /* Load an xref table from a stream (PDF v1.5 +) */ static void load_xref_from_stream(FILE *fp, xref_t *xref) { long start; int is_stream; char *stream; size_t size; start = ftell(fp); fseek(fp, xref->start, SEEK_SET); stream = NULL; stream = get_object_from_here(fp, &size, &is_stream); fseek(fp, start, SEEK_SET); /* TODO: decode and analyize stream */ free(stream); return; } static void get_xref_linear_skipped(FILE *fp, xref_t *xref) { int err; char ch, buf[256]; if (xref->start != 0) return; /* Special case (Linearized PDF with initial startxref at 0) */ xref->is_linear = 1; /* Seek to %%EOF */ if ((xref->end = get_next_eof(fp)) < 0) return; /* Locate the trailer */ err = 0; while (!(err = ferror(fp)) && fread(buf, 1, 8, fp)) { if (strncmp(buf, "trailer", strlen("trailer")) == 0) break; else if ((ftell(fp) - 9) < 0) return; fseek(fp, -9, SEEK_CUR); } if (err) return; /* If we found 'trailer' look backwards for 'xref' */ ch = 0; while (SAFE_F(fp, ((ch = fgetc(fp)) != 'x'))) fseek(fp, -2, SEEK_CUR); if (ch == 'x') { xref->start = ftell(fp) - 1; fseek(fp, -1, SEEK_CUR); } /* Now continue to next eof ... */ fseek(fp, xref->start, SEEK_SET); } /* This must only be called after all xref and entries have been acquired */ static void resolve_linearized_pdf(pdf_t *pdf) { int i; xref_t buf; if (pdf->n_xrefs < 2) return; if (!pdf->xrefs[0].is_linear) return; /* Swap Linear with Version 1 */ buf = pdf->xrefs[0]; pdf->xrefs[0] = pdf->xrefs[1]; pdf->xrefs[1] = buf; /* Resolve is_linear flag and version */ pdf->xrefs[0].is_linear = 1; pdf->xrefs[0].version = 1; pdf->xrefs[1].is_linear = 0; pdf->xrefs[1].version = 1; /* Adjust the other version values now */ for (i=2; i<pdf->n_xrefs; ++i) --pdf->xrefs[i].version; } static pdf_creator_t *new_creator(int *n_elements) { pdf_creator_t *daddy; static const pdf_creator_t creator_template[] = { {"Title", ""}, {"Author", ""}, {"Subject", ""}, {"Keywords", ""}, {"Creator", ""}, {"Producer", ""}, {"CreationDate", ""}, {"ModDate", ""}, {"Trapped", ""}, }; daddy = malloc(sizeof(creator_template)); memcpy(daddy, creator_template, sizeof(creator_template)); if (n_elements) *n_elements = sizeof(creator_template) / sizeof(creator_template[0]); return daddy; } #define END_OF_TRAILER(_c, _st, _fp) \ { \ if (_c == '>') \ { \ fseek(_fp, _st, SEEK_SET); \ continue; \ } \ } static void load_creator(FILE *fp, pdf_t *pdf) { int i, buf_idx; char c, *buf, obj_id_buf[32] = {0}; long start; size_t sz; start = ftell(fp); /* For each PDF version */ for (i=0; i<pdf->n_xrefs; ++i) { if (!pdf->xrefs[i].version) continue; /* Find trailer */ fseek(fp, pdf->xrefs[i].start, SEEK_SET); while (SAFE_F(fp, (fgetc(fp) != 't'))) ; /* Iterate to "trailer" */ /* Look for "<< ....... /Info ......" */ c = '\0'; while (SAFE_F(fp, ((c = fgetc(fp)) != '>'))) if (SAFE_F(fp, ((c == '/') && (fgetc(fp) == 'I') && ((fgetc(fp) == 'n'))))) break; /* Could not find /Info in trailer */ END_OF_TRAILER(c, start, fp); while (SAFE_F(fp, (!isspace(c = fgetc(fp)) && (c != '>')))) ; /* Iterate to first white space /Info<space><data> */ /* No space between /Info and it's data */ END_OF_TRAILER(c, start, fp); while (SAFE_F(fp, (isspace(c = fgetc(fp)) && (c != '>')))) ; /* Iterate right on top of first non-whitespace /Info data */ /* No data for /Info */ END_OF_TRAILER(c, start, fp); /* Get obj id as number */ buf_idx = 0; obj_id_buf[buf_idx++] = c; while ((buf_idx < (sizeof(obj_id_buf) - 1)) && SAFE_F(fp, (!isspace(c = fgetc(fp)) && (c != '>')))) obj_id_buf[buf_idx++] = c; END_OF_TRAILER(c, start, fp); /* Get the object for the creator data. If linear, try both xrefs */ buf = get_object(fp, atoll(obj_id_buf), &pdf->xrefs[i], &sz, NULL); if (!buf && pdf->xrefs[i].is_linear && (i+1 < pdf->n_xrefs)) buf = get_object(fp, atoll(obj_id_buf), &pdf->xrefs[i+1], &sz, NULL); load_creator_from_buf(fp, &pdf->xrefs[i], buf); free(buf); } fseek(fp, start, SEEK_SET); } static void load_creator_from_buf(FILE *fp, xref_t *xref, const char *buf) { int is_xml; char *c; if (!buf) return; /* Check to see if this is xml or old-school */ if ((c = strstr(buf, "/Type"))) while (*c && !isspace(*c)) ++c; /* Probably "Metadata" */ is_xml = 0; if (c && (*c == 'M')) is_xml = 1; /* Is the buffer XML(PDF 1.4+) or old format? */ if (is_xml) load_creator_from_xml(xref, buf); else load_creator_from_old_format(fp, xref, buf); } static void load_creator_from_xml(xref_t *xref, const char *buf) { /* TODO */ } static void load_creator_from_old_format( FILE *fp, xref_t *xref, const char *buf) { int i, n_eles, length, is_escaped, obj_id; char *c, *ascii, *start, *s, *saved_buf_search, *obj; pdf_creator_t *info; info = new_creator(&n_eles); for (i=0; i<n_eles; ++i) { if (!(c = strstr(buf, info[i].key))) continue; /* Find the value (skipping whitespace) */ c += strlen(info[i].key); while (isspace(*c)) ++c; /* If looking at the start of a pdf token, we have gone too far */ if (*c == '/') continue; /* If the value is a number and not a '(' then the data is located in * an object we need to fetch, and not inline */ obj = saved_buf_search = NULL; if (isdigit(*c)) { obj_id = atoi(c); saved_buf_search = c; s = saved_buf_search; obj = get_object(fp, obj_id, xref, NULL, NULL); c = obj; /* Iterate to '(' */ while (c && (*c != '(')) ++c; /* Advance the search to the next token */ while (s && (*s == '/')) ++s; saved_buf_search = s; } /* Find the end of the value */ start = c; length = is_escaped = 0; while (c && ((*c != '\r') && (*c != '\n') && (*c != '<'))) { /* Bail out if we see an un-escaped ')' closing character */ if (!is_escaped && (*c == ')')) break; else if (*c == '\\') is_escaped = 1; else is_escaped = 0; ++c; ++length; } if (length == 0) continue; /* Add 1 to length so it gets the closing ')' when we copy */ if (length) length += 1; length = (length > KV_MAX_VALUE_LENGTH) ? KV_MAX_VALUE_LENGTH : length; strncpy(info[i].value, start, length); info[i].value[KV_MAX_VALUE_LENGTH - 1] = '\0'; /* Restore where we were searching from */ if (saved_buf_search) { /* Release memory from get_object() called earlier */ free(obj); c = saved_buf_search; } } /* For all creation information tags */ /* Go through the values and convert if encoded */ for (i=0; i<n_eles; ++i) if ((ascii = decode_text_string(info[i].value, strlen(info[i].value)))) { strncpy(info[i].value, ascii, strlen(info[i].value)); free(ascii); } xref->creator = info; xref->n_creator_entries = n_eles; } /* Returns object data at the start of the file pointer * This interfaces to 'get_object' */ static char *get_object_from_here(FILE *fp, size_t *size, int *is_stream) { long start; char buf[256]; int obj_id; xref_t xref; xref_entry_t entry; start = ftell(fp); /* Object ID */ memset(buf, 0, 256); SAFE_E(fread(buf, 1, 255, fp), 255, "Failed to load object ID.\n"); if (!(obj_id = atoi(buf))) { fseek(fp, start, SEEK_SET); return NULL; } /* Create xref entry to pass to the get_object routine */ memset(&entry, 0, sizeof(xref_entry_t)); entry.obj_id = obj_id; entry.offset = start; /* Xref and single entry for the object we want data from */ memset(&xref, 0, sizeof(xref_t)); xref.n_entries = 1; xref.entries = &entry; fseek(fp, start, SEEK_SET); return get_object(fp, obj_id, &xref, size, is_stream); } static char *get_object( FILE *fp, int obj_id, const xref_t *xref, size_t *size, int *is_stream) { static const int blk_sz = 256; int i, total_sz, read_sz, n_blks, search, stream; size_t obj_sz; char *c, *data; long start; const xref_entry_t *entry; if (size) *size = 0; if (is_stream) *is_stream = 0; start = ftell(fp); /* Find object */ entry = NULL; for (i=0; i<xref->n_entries; i++) if (xref->entries[i].obj_id == obj_id) { entry = &xref->entries[i]; break; } if (!entry) return NULL; /* Jump to object start */ fseek(fp, entry->offset, SEEK_SET); /* Initial allocate */ obj_sz = 0; /* Bytes in object */ total_sz = 0; /* Bytes read in */ n_blks = 1; data = malloc(blk_sz * n_blks); memset(data, 0, blk_sz * n_blks); /* Suck in data */ stream = 0; while ((read_sz = fread(data+total_sz, 1, blk_sz-1, fp)) && !ferror(fp)) { total_sz += read_sz; *(data + total_sz) = '\0'; if (total_sz + blk_sz >= (blk_sz * n_blks)) data = realloc(data, blk_sz * (++n_blks)); search = total_sz - read_sz; if (search < 0) search = 0; if ((c = strstr(data + search, "endobj"))) { *(c + strlen("endobj") + 1) = '\0'; obj_sz = (void *)strstr(data + search, "endobj") - (void *)data; obj_sz += strlen("endobj") + 1; break; } else if (strstr(data, "stream")) stream = 1; } clearerr(fp); fseek(fp, start, SEEK_SET); if (size) *size = obj_sz; if (is_stream) *is_stream = stream; return data; } static void add_kid(int id, xref_t *xref) { /* Make some space */ if (((xref->n_kids + 1) * KID_SIZE) > (xref->n_kids_allocs*KIDS_PER_ALLOC)) xref->kids = realloc( xref->kids, (++xref->n_kids_allocs)*(KIDS_PER_ALLOC * KID_SIZE)); xref->kids[xref->n_kids++] = id; } /* Recursive */ static void load_kids(FILE *fp, int pages_id, xref_t *xref) { int dummy, buf_idx, kid_id; char *data, *c, buf[32]; /* Get kids */ data = get_object(fp, pages_id, xref, NULL, &dummy); if (!data || !(c = strstr(data, "/Kids"))) { free(data); return; } c = strchr(c, '['); buf_idx = 0; memset(buf, 0, sizeof(buf)); while (*(++c) != ']') { if (isdigit(*c) || (*c == ' ')) buf[buf_idx++] = *c; else if (isalpha(*c)) { kid_id = atoi(buf); add_kid(kid_id, xref); buf_idx = 0; memset(buf, 0, sizeof(buf)); /* Check kids of kid */ load_kids(fp, kid_id, xref); } else if (*c == ']') break; } free(data); } static const char *get_type(FILE *fp, int obj_id, const xref_t *xref) { int is_stream; char *c, *obj, *endobj; static char buf[32]; long start; start = ftell(fp); if (!(obj = get_object(fp, obj_id, xref, NULL, &is_stream)) || is_stream || !(endobj = strstr(obj, "endobj"))) { free(obj); fseek(fp, start, SEEK_SET); if (is_stream) return "Stream"; else return "Unknown"; } /* Get the Type value (avoiding font names like Type1) */ c = obj; while ((c = strstr(c, "/Type")) && (c < endobj)) if (isdigit(*(c + strlen("/Type")))) { ++c; continue; } else break; if (!c || (c && (c > endobj))) { free(obj); fseek(fp, start, SEEK_SET); return "Unknown"; } /* Skip to first blank/whitespace */ c += strlen("/Type"); while (isspace(*c) || *c == '/') ++c; /* Return the value by storing it in static mem */ memcpy(buf, c, (((c - obj) < sizeof(buf)) ? c - obj : sizeof(buf))); c = buf; while (!(isspace(*c) || *c=='/' || *c=='>')) ++c; *c = '\0'; free(obj); fseek(fp, start, SEEK_SET); return buf; } /* TODO static int get_page(int obj_id, const xref_t *xref) { int i; for (i=0; i<xref->n_kids; i++) if (xref->kids[i] == obj_id) break; return i; } */ static char *get_header(FILE *fp) { long start; /* First 1024 bytes of doc must be header (1.7 spec pg 1102) */ char *header; header = calloc(1, 1024); start = ftell(fp); fseek(fp, 0, SEEK_SET); SAFE_E(fread(header, 1, 1023, fp), 1023, "Failed to load PDF header.\n"); fseek(fp, start, SEEK_SET); return header; } static char *decode_text_string(const char *str, size_t str_len) { int idx, is_hex, is_utf16be, ascii_idx; char *ascii, hex_buf[5] = {0}; is_hex = is_utf16be = idx = ascii_idx = 0; /* Regular encoding */ if (str[0] == '(') { ascii = malloc(strlen(str) + 1); strncpy(ascii, str, strlen(str) + 1); return ascii; } else if (str[0] == '<') { is_hex = 1; ++idx; } /* Text strings can be either PDFDocEncoding or UTF-16BE */ if (is_hex && (str_len > 5) && (str[idx] == 'F') && (str[idx+1] == 'E') && (str[idx+2] == 'F') && (str[idx+3] == 'F')) { is_utf16be = 1; idx += 4; } else return NULL; /* Now decode as hex */ ascii = malloc(str_len); for ( ; idx<str_len; ++idx) { hex_buf[0] = str[idx++]; hex_buf[1] = str[idx++]; hex_buf[2] = str[idx++]; hex_buf[3] = str[idx]; ascii[ascii_idx++] = strtol(hex_buf, NULL, 16); } return ascii; } /* Return the offset to the beginning of the %%EOF string. * A negative value is returned when done scanning. */ static int get_next_eof(FILE *fp) { int match, c; const char buf[] = "%%EOF"; match = 0; while ((c = fgetc(fp)) != EOF) { if (c == buf[match]) ++match; else match = 0; if (match == 5) /* strlen("%%EOF") */ return ftell(fp) - 5; } return -1; }
/****************************************************************************** * pdf.c * * pdfresurrect - PDF history extraction tool * * Copyright (C) 2008-2010, 2012-2013, 2017-19, Matt Davis (enferex). * * Special thanks to all of the contributors: See AUTHORS. * * Special thanks to 757labs (757 crew), they are a great group * of people to hack on projects and brainstorm with. * * pdf.c is part of pdfresurrect. * pdfresurrect is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * pdfresurrect is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with pdfresurrect. If not, see <http://www.gnu.org/licenses/>. *****************************************************************************/ #include <stdlib.h> #include <string.h> #include <ctype.h> #include "pdf.h" #include "main.h" /* * Macros */ /* SAFE_F * * Safe file read: use for fgetc() calls, this is really ugly looking. * _fp: FILE * handle * _expr: The expression with fgetc() in it: * * example: If we get a character from the file and it is ascii character 'a' * This assumes the coder wants to store the 'a' in variable ch * Kinda pointless if you already know that you have 'a', but for * illustrative purposes. * * if (SAFE_F(my_fp, ((c=fgetc(my_fp)) == 'a'))) * do_way_cool_stuff(); */ #define SAFE_F(_fp, _expr) \ ((!ferror(_fp) && !feof(_fp) && (_expr))) /* SAFE_E * * Safe expression handling. This macro is a wrapper * that compares the result of an expression (_expr) to the expected * value (_cmp). * * _expr: Expression to test. * _cmp: Expected value, error if this returns false. * _msg: What to say when an error occurs. */ #define SAFE_E(_expr, _cmp, _msg) \ do { \ if ((_expr) != (_cmp)) { \ ERR(_msg); \ exit(EXIT_FAILURE); \ } \ } while (0) /* * Forwards */ static int is_valid_xref(FILE *fp, pdf_t *pdf, xref_t *xref); static void load_xref_entries(FILE *fp, xref_t *xref); static void load_xref_from_plaintext(FILE *fp, xref_t *xref); static void load_xref_from_stream(FILE *fp, xref_t *xref); static void get_xref_linear_skipped(FILE *fp, xref_t *xref); static void resolve_linearized_pdf(pdf_t *pdf); static pdf_creator_t *new_creator(int *n_elements); static void load_creator(FILE *fp, pdf_t *pdf); static void load_creator_from_buf(FILE *fp, xref_t *xref, const char *buf); static void load_creator_from_xml(xref_t *xref, const char *buf); static void load_creator_from_old_format( FILE *fp, xref_t *xref, const char *buf); static char *get_object_from_here(FILE *fp, size_t *size, int *is_stream); static char *get_object( FILE *fp, int obj_id, const xref_t *xref, size_t *size, int *is_stream); static void add_kid(int id, xref_t *xref); static void load_kids(FILE *fp, int pages_id, xref_t *xref); static const char *get_type(FILE *fp, int obj_id, const xref_t *xref); /* static int get_page(int obj_id, const xref_t *xref); */ static char *get_header(FILE *fp); static char *decode_text_string(const char *str, size_t str_len); static int get_next_eof(FILE *fp); /* * Defined */ pdf_t *pdf_new(const char *name) { const char *n; pdf_t *pdf; pdf = safe_calloc(sizeof(pdf_t)); if (name) { /* Just get the file name (not path) */ if ((n = strrchr(name, '/'))) ++n; else n = name; pdf->name = safe_calloc(strlen(n) + 1); strcpy(pdf->name, n); } else /* !name */ { pdf->name = safe_calloc(strlen("Unknown") + 1); strcpy(pdf->name, "Unknown"); } return pdf; } void pdf_delete(pdf_t *pdf) { int i; for (i=0; i<pdf->n_xrefs; i++) { free(pdf->xrefs[i].creator); free(pdf->xrefs[i].entries); free(pdf->xrefs[i].kids); } free(pdf->name); free(pdf->xrefs); free(pdf); } int pdf_is_pdf(FILE *fp) { int is_pdf; char *header; header = get_header(fp); if (header && strstr(header, "%PDF-")) is_pdf = 1; else is_pdf = 0; free(header); return is_pdf; } void pdf_get_version(FILE *fp, pdf_t *pdf) { char *header, *c; header = get_header(fp); /* Locate version string start and make sure we dont go past header */ if ((c = strstr(header, "%PDF-")) && (c + strlen("%PDF-M.m") + 2)) { pdf->pdf_major_version = atoi(c + strlen("%PDF-")); pdf->pdf_minor_version = atoi(c + strlen("%PDF-M.")); } free(header); } int pdf_load_xrefs(FILE *fp, pdf_t *pdf) { int i, ver, is_linear; long pos, pos_count; char x, *c, buf[256]; c = NULL; /* Count number of xrefs */ pdf->n_xrefs = 0; fseek(fp, 0, SEEK_SET); while (get_next_eof(fp) >= 0) ++pdf->n_xrefs; if (!pdf->n_xrefs) return 0; /* Load in the start/end positions */ fseek(fp, 0, SEEK_SET); pdf->xrefs = safe_calloc(sizeof(xref_t) * pdf->n_xrefs); ver = 1; for (i=0; i<pdf->n_xrefs; i++) { /* Seek to %%EOF */ if ((pos = get_next_eof(fp)) < 0) break; /* Set and increment the version */ pdf->xrefs[i].version = ver++; /* Rewind until we find end of "startxref" */ pos_count = 0; while (SAFE_F(fp, ((x = fgetc(fp)) != 'f'))) fseek(fp, pos - (++pos_count), SEEK_SET); /* Suck in end of "startxref" to start of %%EOF */ if (pos_count >= sizeof(buf)) { ERR("Failed to locate the startxref token. " "This might be a corrupt PDF.\n"); return -1; } memset(buf, 0, sizeof(buf)); SAFE_E(fread(buf, 1, pos_count, fp), pos_count, "Failed to read startxref.\n"); c = buf; while (*c == ' ' || *c == '\n' || *c == '\r') ++c; /* xref start position */ pdf->xrefs[i].start = atol(c); /* If xref is 0 handle linear xref table */ if (pdf->xrefs[i].start == 0) get_xref_linear_skipped(fp, &pdf->xrefs[i]); /* Non-linear, normal operation, so just find the end of the xref */ else { /* xref end position */ pos = ftell(fp); fseek(fp, pdf->xrefs[i].start, SEEK_SET); pdf->xrefs[i].end = get_next_eof(fp); /* Look for next EOF and xref data */ fseek(fp, pos, SEEK_SET); } /* Check validity */ if (!is_valid_xref(fp, pdf, &pdf->xrefs[i])) { is_linear = pdf->xrefs[i].is_linear; memset(&pdf->xrefs[i], 0, sizeof(xref_t)); pdf->xrefs[i].is_linear = is_linear; rewind(fp); get_next_eof(fp); continue; } /* Load the entries from the xref */ load_xref_entries(fp, &pdf->xrefs[i]); } /* Now we have all xref tables, if this is linearized, we need * to make adjustments so that things spit out properly */ if (pdf->xrefs[0].is_linear) resolve_linearized_pdf(pdf); /* Ok now we have all xref data. Go through those versions of the * PDF and try to obtain creator information */ load_creator(fp, pdf); return pdf->n_xrefs; } /* Load page information */ void pdf_load_pages_kids(FILE *fp, pdf_t *pdf) { int i, id, dummy; char *buf, *c; long start, sz; start = ftell(fp); /* Load all kids for all xref tables (versions) */ for (i=0; i<pdf->n_xrefs; i++) { if (pdf->xrefs[i].version && (pdf->xrefs[i].end != 0)) { fseek(fp, pdf->xrefs[i].start, SEEK_SET); while (SAFE_F(fp, (fgetc(fp) != 't'))) ; /* Iterate to trailer */ /* Get root catalog */ sz = pdf->xrefs[i].end - ftell(fp); buf = safe_calloc(sz + 1); SAFE_E(fread(buf, 1, sz, fp), sz, "Failed to load /Root.\n"); buf[sz] = '\0'; if (!(c = strstr(buf, "/Root"))) { free(buf); continue; } /* Jump to catalog (root) */ id = atoi(c + strlen("/Root") + 1); free(buf); buf = get_object(fp, id, &pdf->xrefs[i], NULL, &dummy); if (!buf || !(c = strstr(buf, "/Pages"))) { free(buf); continue; } /* Start at the first Pages obj and get kids */ id = atoi(c + strlen("/Pages") + 1); load_kids(fp, id, &pdf->xrefs[i]); free(buf); } } fseek(fp, start, SEEK_SET); } char pdf_get_object_status( const pdf_t *pdf, int xref_idx, int entry_idx) { int i, curr_ver; const xref_t *prev_xref; const xref_entry_t *prev, *curr; curr = &pdf->xrefs[xref_idx].entries[entry_idx]; curr_ver = pdf->xrefs[xref_idx].version; if (curr_ver == 1) return 'A'; /* Deleted (freed) */ if (curr->f_or_n == 'f') return 'D'; /* Get previous version */ prev_xref = NULL; for (i=xref_idx; i>-1; --i) if (pdf->xrefs[i].version < curr_ver) { prev_xref = &pdf->xrefs[i]; break; } if (!prev_xref) return '?'; /* Locate the object in the previous one that matches current one */ prev = NULL; for (i=0; i<prev_xref->n_entries; ++i) if (prev_xref->entries[i].obj_id == curr->obj_id) { prev = &prev_xref->entries[i]; break; } /* Added in place of a previously freed id */ if (!prev || ((prev->f_or_n == 'f') && (curr->f_or_n == 'n'))) return 'A'; /* Modified */ else if (prev->offset != curr->offset) return 'M'; return '?'; } void pdf_zero_object( FILE *fp, const pdf_t *pdf, int xref_idx, int entry_idx) { int i; char *obj; size_t obj_sz; xref_entry_t *entry; entry = &pdf->xrefs[xref_idx].entries[entry_idx]; fseek(fp, entry->offset, SEEK_SET); /* Get object and size */ obj = get_object(fp, entry->obj_id, &pdf->xrefs[xref_idx], NULL, NULL); i = obj_sz = 0; while (strncmp((++i)+obj, "endobj", 6)) ++obj_sz; if (obj_sz) obj_sz += strlen("endobj") + 1; /* Zero object */ for (i=0; i<obj_sz; i++) fputc('0', fp); printf("Zeroed object %d\n", entry->obj_id); free(obj); } /* Output information per version */ void pdf_summarize( FILE *fp, const pdf_t *pdf, const char *name, pdf_flag_t flags) { int i, j, page, n_versions, n_entries; FILE *dst, *out; char *dst_name, *c; dst = NULL; dst_name = NULL; if (name) { dst_name = safe_calloc(strlen(name) * 2 + 16); sprintf(dst_name, "%s/%s", name, name); if ((c = strrchr(dst_name, '.')) && (strncmp(c, ".pdf", 4) == 0)) *c = '\0'; strcat(dst_name, ".summary"); if (!(dst = fopen(dst_name, "w"))) { ERR("Could not open file '%s' for writing\n", dst_name); return; } } /* Send output to file or stdout */ out = (dst) ? dst : stdout; /* Count versions */ n_versions = pdf->n_xrefs; if (n_versions && pdf->xrefs[0].is_linear) --n_versions; /* Ignore bad xref entry */ for (i=1; i<pdf->n_xrefs; ++i) if (pdf->xrefs[i].end == 0) --n_versions; /* If we have no valid versions but linear, count that */ if (!pdf->n_xrefs || (!n_versions && pdf->xrefs[0].is_linear)) n_versions = 1; /* Compare each object (if we dont have xref streams) */ n_entries = 0; for (i=0; !(const int)pdf->has_xref_streams && i<pdf->n_xrefs; i++) { if (flags & PDF_FLAG_QUIET) continue; for (j=0; j<pdf->xrefs[i].n_entries; j++) { ++n_entries; fprintf(out, "%s: --%c-- Version %d -- Object %d (%s)", pdf->name, pdf_get_object_status(pdf, i, j), pdf->xrefs[i].version, pdf->xrefs[i].entries[j].obj_id, get_type(fp, pdf->xrefs[i].entries[j].obj_id, &pdf->xrefs[i])); /* TODO page = get_page(pdf->xrefs[i].entries[j].obj_id, &pdf->xrefs[i]); */ if (0 /*page*/) fprintf(out, " Page(%d)\n", page); else fprintf(out, "\n"); } } /* Trailing summary */ if (!(flags & PDF_FLAG_QUIET)) { /* Let the user know that we cannot we print a per-object summary. * If we have a 1.5 PDF using streams for xref, we have not objects * to display, so let the user know whats up. */ if (pdf->has_xref_streams || !n_entries) fprintf(out, "%s: This PDF contains potential cross reference streams.\n" "%s: An object summary is not available.\n", pdf->name, pdf->name); fprintf(out, "---------- %s ----------\n" "Versions: %d\n", pdf->name, n_versions); /* Count entries for summary */ if (!pdf->has_xref_streams) for (i=0; i<pdf->n_xrefs; i++) { if (pdf->xrefs[i].is_linear) continue; n_entries = pdf->xrefs[i].n_entries; /* If we are a linearized PDF, all versions are made from those * objects too. So count em' */ if (pdf->xrefs[0].is_linear) n_entries += pdf->xrefs[0].n_entries; if (pdf->xrefs[i].version && n_entries) fprintf(out, "Version %d -- %d objects\n", pdf->xrefs[i].version, n_entries); } } else /* Quiet output */ fprintf(out, "%s: %d\n", pdf->name, n_versions); if (dst) { fclose(dst); free(dst_name); } } /* Returns '1' if we successfully display data (means its probably not xml) */ int pdf_display_creator(const pdf_t *pdf, int xref_idx) { int i; if (!pdf->xrefs[xref_idx].creator) return 0; for (i=0; i<pdf->xrefs[xref_idx].n_creator_entries; ++i) printf("%s: %s\n", pdf->xrefs[xref_idx].creator[i].key, pdf->xrefs[xref_idx].creator[i].value); return (i > 0); } /* Checks if the xref is valid and sets 'is_stream' flag if the xref is a * stream (PDF 1.5 or higher) */ static int is_valid_xref(FILE *fp, pdf_t *pdf, xref_t *xref) { int is_valid; long start; char *c, buf[16]; memset(buf, 0, sizeof(buf)); is_valid = 0; start = ftell(fp); fseek(fp, xref->start, SEEK_SET); if (fgets(buf, 16, fp) == NULL) { ERR("Failed to load xref string."); exit(EXIT_FAILURE); } if (strncmp(buf, "xref", strlen("xref")) == 0) is_valid = 1; else { /* PDFv1.5+ allows for xref data to be stored in streams vs plaintext */ fseek(fp, xref->start, SEEK_SET); c = get_object_from_here(fp, NULL, &xref->is_stream); if (c && xref->is_stream) { free(c); pdf->has_xref_streams = 1; is_valid = 1; } } fseek(fp, start, SEEK_SET); return is_valid; } static void load_xref_entries(FILE *fp, xref_t *xref) { if (xref->is_stream) load_xref_from_stream(fp, xref); else load_xref_from_plaintext(fp, xref); } static void load_xref_from_plaintext(FILE *fp, xref_t *xref) { int i, buf_idx, obj_id, added_entries; char c, buf[32] = {0}; long start, pos; start = ftell(fp); /* Get number of entries */ pos = xref->end; fseek(fp, pos, SEEK_SET); while (ftell(fp) != 0) if (SAFE_F(fp, (fgetc(fp) == '/' && fgetc(fp) == 'S'))) break; else SAFE_E(fseek(fp, --pos, SEEK_SET), 0, "Failed seek to xref /Size.\n"); SAFE_E(fread(buf, 1, 21, fp), 21, "Failed to load entry Size string.\n"); xref->n_entries = atoi(buf + strlen("ize ")); xref->entries = safe_calloc(xref->n_entries * sizeof(struct _xref_entry)); /* Load entry data */ obj_id = 0; fseek(fp, xref->start + strlen("xref"), SEEK_SET); added_entries = 0; for (i=0; i<xref->n_entries; i++) { /* Advance past newlines. */ c = fgetc(fp); while (c == '\n' || c == '\r') c = fgetc(fp); /* Collect data up until the following newline. */ buf_idx = 0; while (c != '\n' && c != '\r' && !feof(fp) && !ferror(fp) && buf_idx < sizeof(buf)) { buf[buf_idx++] = c; c = fgetc(fp); } if (buf_idx >= sizeof(buf)) { ERR("Failed to locate newline character. " "This might be a corrupt PDF.\n"); exit(EXIT_FAILURE); } buf[buf_idx] = '\0'; /* Went to far and hit start of trailer */ if (strchr(buf, 't')) break; /* Entry or object id */ if (strlen(buf) > 17) { xref->entries[i].obj_id = obj_id++; xref->entries[i].offset = atol(strtok(buf, " ")); xref->entries[i].gen_num = atoi(strtok(NULL, " ")); xref->entries[i].f_or_n = buf[17]; ++added_entries; } else { obj_id = atoi(buf); --i; } } xref->n_entries = added_entries; fseek(fp, start, SEEK_SET); } /* Load an xref table from a stream (PDF v1.5 +) */ static void load_xref_from_stream(FILE *fp, xref_t *xref) { long start; int is_stream; char *stream; size_t size; start = ftell(fp); fseek(fp, xref->start, SEEK_SET); stream = NULL; stream = get_object_from_here(fp, &size, &is_stream); fseek(fp, start, SEEK_SET); /* TODO: decode and analyize stream */ free(stream); return; } static void get_xref_linear_skipped(FILE *fp, xref_t *xref) { int err; char ch, buf[256]; if (xref->start != 0) return; /* Special case (Linearized PDF with initial startxref at 0) */ xref->is_linear = 1; /* Seek to %%EOF */ if ((xref->end = get_next_eof(fp)) < 0) return; /* Locate the trailer */ err = 0; while (!(err = ferror(fp)) && fread(buf, 1, 8, fp)) { if (strncmp(buf, "trailer", strlen("trailer")) == 0) break; else if ((ftell(fp) - 9) < 0) return; fseek(fp, -9, SEEK_CUR); } if (err) return; /* If we found 'trailer' look backwards for 'xref' */ ch = 0; while (SAFE_F(fp, ((ch = fgetc(fp)) != 'x'))) fseek(fp, -2, SEEK_CUR); if (ch == 'x') { xref->start = ftell(fp) - 1; fseek(fp, -1, SEEK_CUR); } /* Now continue to next eof ... */ fseek(fp, xref->start, SEEK_SET); } /* This must only be called after all xref and entries have been acquired */ static void resolve_linearized_pdf(pdf_t *pdf) { int i; xref_t buf; if (pdf->n_xrefs < 2) return; if (!pdf->xrefs[0].is_linear) return; /* Swap Linear with Version 1 */ buf = pdf->xrefs[0]; pdf->xrefs[0] = pdf->xrefs[1]; pdf->xrefs[1] = buf; /* Resolve is_linear flag and version */ pdf->xrefs[0].is_linear = 1; pdf->xrefs[0].version = 1; pdf->xrefs[1].is_linear = 0; pdf->xrefs[1].version = 1; /* Adjust the other version values now */ for (i=2; i<pdf->n_xrefs; ++i) --pdf->xrefs[i].version; } static pdf_creator_t *new_creator(int *n_elements) { pdf_creator_t *daddy; static const pdf_creator_t creator_template[] = { {"Title", ""}, {"Author", ""}, {"Subject", ""}, {"Keywords", ""}, {"Creator", ""}, {"Producer", ""}, {"CreationDate", ""}, {"ModDate", ""}, {"Trapped", ""}, }; daddy = safe_calloc(sizeof(creator_template)); memcpy(daddy, creator_template, sizeof(creator_template)); if (n_elements) *n_elements = sizeof(creator_template) / sizeof(creator_template[0]); return daddy; } #define END_OF_TRAILER(_c, _st, _fp) \ { \ if (_c == '>') \ { \ fseek(_fp, _st, SEEK_SET); \ continue; \ } \ } static void load_creator(FILE *fp, pdf_t *pdf) { int i, buf_idx; char c, *buf, obj_id_buf[32] = {0}; long start; size_t sz; start = ftell(fp); /* For each PDF version */ for (i=0; i<pdf->n_xrefs; ++i) { if (!pdf->xrefs[i].version) continue; /* Find trailer */ fseek(fp, pdf->xrefs[i].start, SEEK_SET); while (SAFE_F(fp, (fgetc(fp) != 't'))) ; /* Iterate to "trailer" */ /* Look for "<< ....... /Info ......" */ c = '\0'; while (SAFE_F(fp, ((c = fgetc(fp)) != '>'))) if (SAFE_F(fp, ((c == '/') && (fgetc(fp) == 'I') && ((fgetc(fp) == 'n'))))) break; /* Could not find /Info in trailer */ END_OF_TRAILER(c, start, fp); while (SAFE_F(fp, (!isspace(c = fgetc(fp)) && (c != '>')))) ; /* Iterate to first white space /Info<space><data> */ /* No space between /Info and it's data */ END_OF_TRAILER(c, start, fp); while (SAFE_F(fp, (isspace(c = fgetc(fp)) && (c != '>')))) ; /* Iterate right on top of first non-whitespace /Info data */ /* No data for /Info */ END_OF_TRAILER(c, start, fp); /* Get obj id as number */ buf_idx = 0; obj_id_buf[buf_idx++] = c; while ((buf_idx < (sizeof(obj_id_buf) - 1)) && SAFE_F(fp, (!isspace(c = fgetc(fp)) && (c != '>')))) obj_id_buf[buf_idx++] = c; END_OF_TRAILER(c, start, fp); /* Get the object for the creator data. If linear, try both xrefs */ buf = get_object(fp, atoll(obj_id_buf), &pdf->xrefs[i], &sz, NULL); if (!buf && pdf->xrefs[i].is_linear && (i+1 < pdf->n_xrefs)) buf = get_object(fp, atoll(obj_id_buf), &pdf->xrefs[i+1], &sz, NULL); load_creator_from_buf(fp, &pdf->xrefs[i], buf); free(buf); } fseek(fp, start, SEEK_SET); } static void load_creator_from_buf(FILE *fp, xref_t *xref, const char *buf) { int is_xml; char *c; if (!buf) return; /* Check to see if this is xml or old-school */ if ((c = strstr(buf, "/Type"))) while (*c && !isspace(*c)) ++c; /* Probably "Metadata" */ is_xml = 0; if (c && (*c == 'M')) is_xml = 1; /* Is the buffer XML(PDF 1.4+) or old format? */ if (is_xml) load_creator_from_xml(xref, buf); else load_creator_from_old_format(fp, xref, buf); } static void load_creator_from_xml(xref_t *xref, const char *buf) { /* TODO */ } static void load_creator_from_old_format( FILE *fp, xref_t *xref, const char *buf) { int i, n_eles, length, is_escaped, obj_id; char *c, *ascii, *start, *s, *saved_buf_search, *obj; pdf_creator_t *info; info = new_creator(&n_eles); for (i=0; i<n_eles; ++i) { if (!(c = strstr(buf, info[i].key))) continue; /* Find the value (skipping whitespace) */ c += strlen(info[i].key); while (isspace(*c)) ++c; /* If looking at the start of a pdf token, we have gone too far */ if (*c == '/') continue; /* If the value is a number and not a '(' then the data is located in * an object we need to fetch, and not inline */ obj = saved_buf_search = NULL; if (isdigit(*c)) { obj_id = atoi(c); saved_buf_search = c; s = saved_buf_search; obj = get_object(fp, obj_id, xref, NULL, NULL); c = obj; /* Iterate to '(' */ while (c && (*c != '(')) ++c; /* Advance the search to the next token */ while (s && (*s == '/')) ++s; saved_buf_search = s; } /* Find the end of the value */ start = c; length = is_escaped = 0; while (c && ((*c != '\r') && (*c != '\n') && (*c != '<'))) { /* Bail out if we see an un-escaped ')' closing character */ if (!is_escaped && (*c == ')')) break; else if (*c == '\\') is_escaped = 1; else is_escaped = 0; ++c; ++length; } if (length == 0) continue; /* Add 1 to length so it gets the closing ')' when we copy */ if (length) length += 1; length = (length > KV_MAX_VALUE_LENGTH) ? KV_MAX_VALUE_LENGTH : length; strncpy(info[i].value, start, length); info[i].value[KV_MAX_VALUE_LENGTH - 1] = '\0'; /* Restore where we were searching from */ if (saved_buf_search) { /* Release memory from get_object() called earlier */ free(obj); c = saved_buf_search; } } /* For all creation information tags */ /* Go through the values and convert if encoded */ for (i=0; i<n_eles; ++i) if ((ascii = decode_text_string(info[i].value, strlen(info[i].value)))) { strncpy(info[i].value, ascii, strlen(info[i].value)); free(ascii); } xref->creator = info; xref->n_creator_entries = n_eles; } /* Returns object data at the start of the file pointer * This interfaces to 'get_object' */ static char *get_object_from_here(FILE *fp, size_t *size, int *is_stream) { long start; char buf[256]; int obj_id; xref_t xref; xref_entry_t entry; start = ftell(fp); /* Object ID */ memset(buf, 0, 256); SAFE_E(fread(buf, 1, 255, fp), 255, "Failed to load object ID.\n"); if (!(obj_id = atoi(buf))) { fseek(fp, start, SEEK_SET); return NULL; } /* Create xref entry to pass to the get_object routine */ memset(&entry, 0, sizeof(xref_entry_t)); entry.obj_id = obj_id; entry.offset = start; /* Xref and single entry for the object we want data from */ memset(&xref, 0, sizeof(xref_t)); xref.n_entries = 1; xref.entries = &entry; fseek(fp, start, SEEK_SET); return get_object(fp, obj_id, &xref, size, is_stream); } static char *get_object( FILE *fp, int obj_id, const xref_t *xref, size_t *size, int *is_stream) { static const int blk_sz = 256; int i, total_sz, read_sz, n_blks, search, stream; size_t obj_sz; char *c, *data; long start; const xref_entry_t *entry; if (size) *size = 0; if (is_stream) *is_stream = 0; start = ftell(fp); /* Find object */ entry = NULL; for (i=0; i<xref->n_entries; i++) if (xref->entries[i].obj_id == obj_id) { entry = &xref->entries[i]; break; } if (!entry) return NULL; /* Jump to object start */ fseek(fp, entry->offset, SEEK_SET); /* Initial allocate */ obj_sz = 0; /* Bytes in object */ total_sz = 0; /* Bytes read in */ n_blks = 1; data = safe_calloc(blk_sz * n_blks); /* Suck in data */ stream = 0; while ((read_sz = fread(data+total_sz, 1, blk_sz-1, fp)) && !ferror(fp)) { total_sz += read_sz; *(data + total_sz) = '\0'; if (total_sz + blk_sz >= (blk_sz * n_blks)) data = realloc(data, blk_sz * (++n_blks)); search = total_sz - read_sz; if (search < 0) search = 0; if ((c = strstr(data + search, "endobj"))) { *(c + strlen("endobj") + 1) = '\0'; obj_sz = (void *)strstr(data + search, "endobj") - (void *)data; obj_sz += strlen("endobj") + 1; break; } else if (strstr(data, "stream")) stream = 1; } clearerr(fp); fseek(fp, start, SEEK_SET); if (size) *size = obj_sz; if (is_stream) *is_stream = stream; return data; } static void add_kid(int id, xref_t *xref) { /* Make some space */ if (((xref->n_kids + 1) * KID_SIZE) > (xref->n_kids_allocs*KIDS_PER_ALLOC)) xref->kids = realloc( xref->kids, (++xref->n_kids_allocs)*(KIDS_PER_ALLOC * KID_SIZE)); xref->kids[xref->n_kids++] = id; } /* Recursive */ static void load_kids(FILE *fp, int pages_id, xref_t *xref) { int dummy, buf_idx, kid_id; char *data, *c, buf[32]; /* Get kids */ data = get_object(fp, pages_id, xref, NULL, &dummy); if (!data || !(c = strstr(data, "/Kids"))) { free(data); return; } c = strchr(c, '['); buf_idx = 0; memset(buf, 0, sizeof(buf)); while (*(++c) != ']') { if (isdigit(*c) || (*c == ' ')) buf[buf_idx++] = *c; else if (isalpha(*c)) { kid_id = atoi(buf); add_kid(kid_id, xref); buf_idx = 0; memset(buf, 0, sizeof(buf)); /* Check kids of kid */ load_kids(fp, kid_id, xref); } else if (*c == ']') break; } free(data); } static const char *get_type(FILE *fp, int obj_id, const xref_t *xref) { int is_stream; char *c, *obj, *endobj; static char buf[32]; long start; start = ftell(fp); if (!(obj = get_object(fp, obj_id, xref, NULL, &is_stream)) || is_stream || !(endobj = strstr(obj, "endobj"))) { free(obj); fseek(fp, start, SEEK_SET); if (is_stream) return "Stream"; else return "Unknown"; } /* Get the Type value (avoiding font names like Type1) */ c = obj; while ((c = strstr(c, "/Type")) && (c < endobj)) if (isdigit(*(c + strlen("/Type")))) { ++c; continue; } else break; if (!c || (c && (c > endobj))) { free(obj); fseek(fp, start, SEEK_SET); return "Unknown"; } /* Skip to first blank/whitespace */ c += strlen("/Type"); while (isspace(*c) || *c == '/') ++c; /* Return the value by storing it in static mem */ memcpy(buf, c, (((c - obj) < sizeof(buf)) ? c - obj : sizeof(buf))); c = buf; while (!(isspace(*c) || *c=='/' || *c=='>')) ++c; *c = '\0'; free(obj); fseek(fp, start, SEEK_SET); return buf; } /* TODO static int get_page(int obj_id, const xref_t *xref) { int i; for (i=0; i<xref->n_kids; i++) if (xref->kids[i] == obj_id) break; return i; } */ static char *get_header(FILE *fp) { /* First 1024 bytes of doc must be header (1.7 spec pg 1102) */ char *header = safe_calloc(1024); long start = ftell(fp); fseek(fp, 0, SEEK_SET); SAFE_E(fread(header, 1, 1023, fp), 1023, "Failed to load PDF header.\n"); fseek(fp, start, SEEK_SET); return header; } static char *decode_text_string(const char *str, size_t str_len) { int idx, is_hex, is_utf16be, ascii_idx; char *ascii, hex_buf[5] = {0}; is_hex = is_utf16be = idx = ascii_idx = 0; /* Regular encoding */ if (str[0] == '(') { ascii = safe_calloc(strlen(str) + 1); strncpy(ascii, str, strlen(str) + 1); return ascii; } else if (str[0] == '<') { is_hex = 1; ++idx; } /* Text strings can be either PDFDocEncoding or UTF-16BE */ if (is_hex && (str_len > 5) && (str[idx] == 'F') && (str[idx+1] == 'E') && (str[idx+2] == 'F') && (str[idx+3] == 'F')) { is_utf16be = 1; idx += 4; } else return NULL; /* Now decode as hex */ ascii = safe_calloc(str_len); for ( ; idx<str_len; ++idx) { hex_buf[0] = str[idx++]; hex_buf[1] = str[idx++]; hex_buf[2] = str[idx++]; hex_buf[3] = str[idx]; ascii[ascii_idx++] = strtol(hex_buf, NULL, 16); } return ascii; } /* Return the offset to the beginning of the %%EOF string. * A negative value is returned when done scanning. */ static int get_next_eof(FILE *fp) { int match, c; const char buf[] = "%%EOF"; match = 0; while ((c = fgetc(fp)) != EOF) { if (c == buf[match]) ++match; else match = 0; if (match == 5) /* strlen("%%EOF") */ return ftell(fp) - 5; } return -1; }
static char *get_object( FILE *fp, int obj_id, const xref_t *xref, size_t *size, int *is_stream) { static const int blk_sz = 256; int i, total_sz, read_sz, n_blks, search, stream; size_t obj_sz; char *c, *data; long start; const xref_entry_t *entry; if (size) *size = 0; if (is_stream) *is_stream = 0; start = ftell(fp); /* Find object */ entry = NULL; for (i=0; i<xref->n_entries; i++) if (xref->entries[i].obj_id == obj_id) { entry = &xref->entries[i]; break; } if (!entry) return NULL; /* Jump to object start */ fseek(fp, entry->offset, SEEK_SET); /* Initial allocate */ obj_sz = 0; /* Bytes in object */ total_sz = 0; /* Bytes read in */ n_blks = 1; data = malloc(blk_sz * n_blks); memset(data, 0, blk_sz * n_blks); /* Suck in data */ stream = 0; while ((read_sz = fread(data+total_sz, 1, blk_sz-1, fp)) && !ferror(fp)) { total_sz += read_sz; *(data + total_sz) = '\0'; if (total_sz + blk_sz >= (blk_sz * n_blks)) data = realloc(data, blk_sz * (++n_blks)); search = total_sz - read_sz; if (search < 0) search = 0; if ((c = strstr(data + search, "endobj"))) { *(c + strlen("endobj") + 1) = '\0'; obj_sz = (void *)strstr(data + search, "endobj") - (void *)data; obj_sz += strlen("endobj") + 1; break; } else if (strstr(data, "stream")) stream = 1; } clearerr(fp); fseek(fp, start, SEEK_SET); if (size) *size = obj_sz; if (is_stream) *is_stream = stream; return data; }
static char *get_object( FILE *fp, int obj_id, const xref_t *xref, size_t *size, int *is_stream) { static const int blk_sz = 256; int i, total_sz, read_sz, n_blks, search, stream; size_t obj_sz; char *c, *data; long start; const xref_entry_t *entry; if (size) *size = 0; if (is_stream) *is_stream = 0; start = ftell(fp); /* Find object */ entry = NULL; for (i=0; i<xref->n_entries; i++) if (xref->entries[i].obj_id == obj_id) { entry = &xref->entries[i]; break; } if (!entry) return NULL; /* Jump to object start */ fseek(fp, entry->offset, SEEK_SET); /* Initial allocate */ obj_sz = 0; /* Bytes in object */ total_sz = 0; /* Bytes read in */ n_blks = 1; data = safe_calloc(blk_sz * n_blks); /* Suck in data */ stream = 0; while ((read_sz = fread(data+total_sz, 1, blk_sz-1, fp)) && !ferror(fp)) { total_sz += read_sz; *(data + total_sz) = '\0'; if (total_sz + blk_sz >= (blk_sz * n_blks)) data = realloc(data, blk_sz * (++n_blks)); search = total_sz - read_sz; if (search < 0) search = 0; if ((c = strstr(data + search, "endobj"))) { *(c + strlen("endobj") + 1) = '\0'; obj_sz = (void *)strstr(data + search, "endobj") - (void *)data; obj_sz += strlen("endobj") + 1; break; } else if (strstr(data, "stream")) stream = 1; } clearerr(fp); fseek(fp, start, SEEK_SET); if (size) *size = obj_sz; if (is_stream) *is_stream = stream; return data; }
{'added': [(125, ' pdf = safe_calloc(sizeof(pdf_t));'), (135, ' pdf->name = safe_calloc(strlen(n) + 1);'), (140, ' pdf->name = safe_calloc(strlen("Unknown") + 1);'), (219, ' pdf->xrefs = safe_calloc(sizeof(xref_t) * pdf->n_xrefs);'), (317, ' buf = safe_calloc(sz + 1);'), (447, ' dst_name = safe_calloc(strlen(name) * 2 + 16);'), (646, ' xref->entries = safe_calloc(xref->n_entries * sizeof(struct _xref_entry));'), (813, ' daddy = safe_calloc(sizeof(creator_template));'), (1102, ' data = safe_calloc(blk_sz * n_blks);'), (1269, ' char *header = safe_calloc(1024);'), (1270, ' long start = ftell(fp);'), (1288, ' ascii = safe_calloc(strlen(str) + 1);'), (1310, ' ascii = safe_calloc(str_len);')], 'deleted': [(125, ' pdf = calloc(1, sizeof(pdf_t));'), (135, ' pdf->name = malloc(strlen(n) + 1);'), (140, ' pdf->name = malloc(strlen("Unknown") + 1);'), (219, ' pdf->xrefs = calloc(1, sizeof(xref_t) * pdf->n_xrefs);'), (317, ' buf = malloc(sz + 1);'), (447, ' dst_name = malloc(strlen(name) * 2 + 16);'), (646, ' xref->entries = calloc(1, xref->n_entries * sizeof(struct _xref_entry));'), (813, ' daddy = malloc(sizeof(creator_template));'), (1102, ' data = malloc(blk_sz * n_blks);'), (1103, ' memset(data, 0, blk_sz * n_blks);'), (1269, ' long start;'), (1270, ''), (1272, ' char *header;'), (1273, ''), (1274, ' header = calloc(1, 1024);'), (1275, ''), (1276, ' start = ftell(fp);'), (1280, ''), (1295, ' ascii = malloc(strlen(str) + 1);'), (1317, ' ascii = malloc(str_len);')]}
13
20
878
6,108
61
384
14
https://github.com/enferex/pdfresurrect
CVE-2019-14934
CWE-787
350
raw_socket.c
C++
rawSocketSendEthPacket
/** * @file raw_socket.c * @brief TCP/IP raw sockets * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @section Description * * A raw socket is a type of socket that allows access to the * underlying transport provider * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.0 **/ //Switch to the appropriate trace level #define TRACE_LEVEL RAW_SOCKET_TRACE_LEVEL //Dependencies #include <string.h> #include "core/net.h" #include "core/socket.h" #include "core/raw_socket.h" #include "core/ethernet_misc.h" #include "ipv4/ipv4.h" #include "ipv4/ipv4_misc.h" #include "ipv6/ipv6.h" #include "ipv6/ipv6_misc.h" #include "mibs/mib2_module.h" #include "mibs/if_mib_module.h" #include "debug.h" //Check TCP/IP stack configuration #if (RAW_SOCKET_SUPPORT == ENABLED) /** * @brief Process incoming IP packet * @param[in] interface Underlying network interface * @param[in] pseudoHeader IPv4 or IPv6 pseudo header * @param[in] buffer Multi-part buffer containing the IP packet * @param[in] offset Offset to the first byte of the IP packet * @param[in] ancillary Additional options passed to the stack along with * the packet * @return Error code **/ error_t rawSocketProcessIpPacket(NetInterface *interface, IpPseudoHeader *pseudoHeader, const NetBuffer *buffer, size_t offset, NetRxAncillary *ancillary) { uint_t i; size_t length; Socket *socket; SocketQueueItem *queueItem; NetBuffer *p; //Retrieve the length of the raw IP packet length = netBufferGetLength(buffer) - offset; //Loop through opened sockets for(i = 0; i < SOCKET_MAX_COUNT; i++) { //Point to the current socket socket = socketTable + i; //Raw socket found? if(socket->type != SOCKET_TYPE_RAW_IP) continue; //Check whether the socket is bound to a particular interface if(socket->interface && socket->interface != interface) continue; #if (IPV4_SUPPORT == ENABLED) //IPv4 packet received? if(pseudoHeader->length == sizeof(Ipv4PseudoHeader)) { //Check protocol field if(socket->protocol != pseudoHeader->ipv4Data.protocol) continue; //Destination IP address filtering if(socket->localIpAddr.length != 0) { //An IPv4 address is expected if(socket->localIpAddr.length != sizeof(Ipv4Addr)) continue; //Filter out non-matching addresses if(socket->localIpAddr.ipv4Addr != pseudoHeader->ipv4Data.destAddr) continue; } //Source IP address filtering if(socket->remoteIpAddr.length != 0) { //An IPv4 address is expected if(socket->remoteIpAddr.length != sizeof(Ipv4Addr)) continue; //Filter out non-matching addresses if(socket->remoteIpAddr.ipv4Addr != pseudoHeader->ipv4Data.srcAddr) continue; } } else #endif #if (IPV6_SUPPORT == ENABLED) //IPv6 packet received? if(pseudoHeader->length == sizeof(Ipv6PseudoHeader)) { //Check protocol field if(socket->protocol != pseudoHeader->ipv6Data.nextHeader) continue; //Destination IP address filtering if(socket->localIpAddr.length != 0) { //An IPv6 address is expected if(socket->localIpAddr.length != sizeof(Ipv6Addr)) continue; //Filter out non-matching addresses if(!ipv6CompAddr(&socket->localIpAddr.ipv6Addr, &pseudoHeader->ipv6Data.destAddr)) continue; } //Source IP address filtering if(socket->remoteIpAddr.length != 0) { //An IPv6 address is expected if(socket->remoteIpAddr.length != sizeof(Ipv6Addr)) continue; //Filter out non-matching addresses if(!ipv6CompAddr(&socket->remoteIpAddr.ipv6Addr, &pseudoHeader->ipv6Data.srcAddr)) continue; } } else #endif //Invalid packet received? { //This should never occur... continue; } //The current socket meets all the criteria break; } //Drop incoming packet if no matching socket was found if(i >= SOCKET_MAX_COUNT) return ERROR_PROTOCOL_UNREACHABLE; //Empty receive queue? if(socket->receiveQueue == NULL) { //Allocate a memory buffer to hold the data and the associated descriptor p = netBufferAlloc(sizeof(SocketQueueItem) + length); //Successful memory allocation? if(p != NULL) { //Point to the newly created item queueItem = netBufferAt(p, 0); queueItem->buffer = p; //Add the newly created item to the queue socket->receiveQueue = queueItem; } else { //Memory allocation failed queueItem = NULL; } } else { //Point to the very first item queueItem = socket->receiveQueue; //Reach the last item in the receive queue for(i = 1; queueItem->next; i++) { queueItem = queueItem->next; } //Check whether the receive queue is full if(i >= RAW_SOCKET_RX_QUEUE_SIZE) { //Number of inbound packets which were chosen to be discarded even //though no errors had been detected MIB2_INC_COUNTER32(ifGroup.ifTable[interface->index].ifInDiscards, 1); IF_MIB_INC_COUNTER32(ifTable[interface->index].ifInDiscards, 1); //Report an error return ERROR_RECEIVE_QUEUE_FULL; } //Allocate a memory buffer to hold the data and the associated descriptor p = netBufferAlloc(sizeof(SocketQueueItem) + length); //Successful memory allocation? if(p != NULL) { //Add the newly created item to the queue queueItem->next = netBufferAt(p, 0); //Point to the newly created item queueItem = queueItem->next; queueItem->buffer = p; } else { //Memory allocation failed queueItem = NULL; } } //Not enough resources to properly handle the packet? if(queueItem == NULL) { //Number of inbound packets which were chosen to be discarded even //though no errors had been detected MIB2_INC_COUNTER32(ifGroup.ifTable[interface->index].ifInDiscards, 1); IF_MIB_INC_COUNTER32(ifTable[interface->index].ifInDiscards, 1); //Report an error return ERROR_OUT_OF_MEMORY; } //Initialize next field queueItem->next = NULL; //Port number is unused queueItem->srcPort = 0; #if (IPV4_SUPPORT == ENABLED) //IPv4 remote address? if(pseudoHeader->length == sizeof(Ipv4PseudoHeader)) { //Save the source IPv4 address queueItem->srcIpAddr.length = sizeof(Ipv4Addr); queueItem->srcIpAddr.ipv4Addr = pseudoHeader->ipv4Data.srcAddr; //Save the destination IPv4 address queueItem->destIpAddr.length = sizeof(Ipv4Addr); queueItem->destIpAddr.ipv4Addr = pseudoHeader->ipv4Data.destAddr; } #endif #if (IPV6_SUPPORT == ENABLED) //IPv6 remote address? if(pseudoHeader->length == sizeof(Ipv6PseudoHeader)) { //Save the source IPv6 address queueItem->srcIpAddr.length = sizeof(Ipv6Addr); queueItem->srcIpAddr.ipv6Addr = pseudoHeader->ipv6Data.srcAddr; //Save the destination IPv6 address queueItem->destIpAddr.length = sizeof(Ipv6Addr); queueItem->destIpAddr.ipv6Addr = pseudoHeader->ipv6Data.destAddr; } #endif //Offset to the raw IP packet queueItem->offset = sizeof(SocketQueueItem); //Copy the raw data netBufferCopy(queueItem->buffer, queueItem->offset, buffer, offset, length); //Additional options can be passed to the stack along with the packet queueItem->ancillary = *ancillary; //Notify user that data is available rawSocketUpdateEvents(socket); //Successful processing return NO_ERROR; } /** * @brief Process incoming Ethernet packet * @param[in] interface Underlying network interface * @param[in] header Pointer to the Ethernet header * @param[in] data Pointer to the payload data * @param[in] length Length of the payload data, in bytes * @param[in] ancillary Additional options passed to the stack along with * the packet **/ void rawSocketProcessEthPacket(NetInterface *interface, EthHeader *header, const uint8_t *data, size_t length, NetRxAncillary *ancillary) { uint_t i; Socket *socket; SocketQueueItem *queueItem; NetBuffer *p; //Loop through opened sockets for(i = 0; i < SOCKET_MAX_COUNT; i++) { //Point to the current socket socket = socketTable + i; //Raw socket found? if(socket->type != SOCKET_TYPE_RAW_ETH) continue; //Check whether the socket is bound to a particular interface if(socket->interface && socket->interface != interface) continue; //Check protocol field if(socket->protocol == SOCKET_ETH_PROTO_ALL) { //Accept all EtherType values } else if(socket->protocol == SOCKET_ETH_PROTO_LLC) { //Only accept LLC frames if(ntohs(header->type) > ETH_MTU) continue; } else { //Only accept frames with the correct EtherType value if(ntohs(header->type) != socket->protocol) continue; } //The current socket meets all the criteria break; } //Drop incoming packet if no matching socket was found if(i >= SOCKET_MAX_COUNT) return; //Empty receive queue? if(socket->receiveQueue == NULL) { //Allocate a memory buffer to hold the data and the associated descriptor p = netBufferAlloc(sizeof(SocketQueueItem) + sizeof(EthHeader) + length); //Successful memory allocation? if(p != NULL) { //Point to the newly created item queueItem = netBufferAt(p, 0); queueItem->buffer = p; //Add the newly created item to the queue socket->receiveQueue = queueItem; } else { //Memory allocation failed queueItem = NULL; } } else { //Point to the very first item queueItem = socket->receiveQueue; //Reach the last item in the receive queue for(i = 1; queueItem->next; i++) { queueItem = queueItem->next; } //Check whether the receive queue is full if(i >= RAW_SOCKET_RX_QUEUE_SIZE) { //Number of inbound packets which were chosen to be discarded even //though no errors had been detected MIB2_INC_COUNTER32(ifGroup.ifTable[interface->index].ifInDiscards, 1); IF_MIB_INC_COUNTER32(ifTable[interface->index].ifInDiscards, 1); //Exit immediately return; } //Allocate a memory buffer to hold the data and the associated descriptor p = netBufferAlloc(sizeof(SocketQueueItem) + sizeof(EthHeader) + length); //Successful memory allocation? if(p != NULL) { //Add the newly created item to the queue queueItem->next = netBufferAt(p, 0); //Point to the newly created item queueItem = queueItem->next; queueItem->buffer = p; } else { //Memory allocation failed queueItem = NULL; } } //Not enough resources to properly handle the packet? if(queueItem == NULL) { //Number of inbound packets which were chosen to be discarded even //though no errors had been detected MIB2_INC_COUNTER32(ifGroup.ifTable[interface->index].ifInDiscards, 1); IF_MIB_INC_COUNTER32(ifTable[interface->index].ifInDiscards, 1); //Exit immediately return; } //Initialize next field queueItem->next = NULL; //Other fields are meaningless queueItem->srcPort = 0; queueItem->srcIpAddr = IP_ADDR_ANY; queueItem->destIpAddr = IP_ADDR_ANY; //Offset to the raw datagram queueItem->offset = sizeof(SocketQueueItem); //Copy the Ethernet header netBufferWrite(queueItem->buffer, queueItem->offset, header, sizeof(EthHeader)); //Copy the payload netBufferWrite(queueItem->buffer, queueItem->offset + sizeof(EthHeader), data, length); //Additional options can be passed to the stack along with the packet queueItem->ancillary = *ancillary; //Notify user that data is available rawSocketUpdateEvents(socket); } /** * @brief Send an raw IP packet * @param[in] socket Handle referencing the socket * @param[in] message Pointer to the structure describing the raw packet * @param[in] flags Set of flags that influences the behavior of this function * @return Error code **/ error_t rawSocketSendIpPacket(Socket *socket, const SocketMsg *message, uint_t flags) { error_t error; size_t offset; NetBuffer *buffer; NetInterface *interface; IpPseudoHeader pseudoHeader; NetTxAncillary ancillary; //The socket may be bound to a particular network interface interface = socket->interface; //Allocate a buffer memory to hold the raw IP datagram buffer = ipAllocBuffer(0, &offset); //Failed to allocate memory? if(buffer == NULL) return ERROR_OUT_OF_MEMORY; //Start of exception handling block do { //Copy the raw data error = netBufferAppend(buffer, message->data, message->length); //Any error to report? if(error) break; #if (IPV4_SUPPORT == ENABLED) //Destination address is an IPv4 address? if(message->destIpAddr.length == sizeof(Ipv4Addr)) { Ipv4Addr srcIpAddr; //Select the source IPv4 address and the relevant network interface //to use when sending data to the specified destination host error = ipv4SelectSourceAddr(&interface, message->destIpAddr.ipv4Addr, &srcIpAddr); //Any error to report? if(error) break; //Format IPv4 pseudo header pseudoHeader.length = sizeof(Ipv4PseudoHeader); pseudoHeader.ipv4Data.srcAddr = srcIpAddr; pseudoHeader.ipv4Data.destAddr = message->destIpAddr.ipv4Addr; pseudoHeader.ipv4Data.reserved = 0; pseudoHeader.ipv4Data.protocol = socket->protocol; pseudoHeader.ipv4Data.length = htons(message->length); } else #endif #if (IPV6_SUPPORT == ENABLED) //Destination address is an IPv6 address? if(message->destIpAddr.length == sizeof(Ipv6Addr)) { //Select the source IPv6 address and the relevant network interface //to use when sending data to the specified destination host error = ipv6SelectSourceAddr(&interface, &message->destIpAddr.ipv6Addr, &pseudoHeader.ipv6Data.srcAddr); //Any error to report? if(error) break; //Format IPv6 pseudo header pseudoHeader.length = sizeof(Ipv6PseudoHeader); pseudoHeader.ipv6Data.destAddr = message->destIpAddr.ipv6Addr; pseudoHeader.ipv6Data.length = htonl(message->length); pseudoHeader.ipv6Data.reserved[0] = 0; pseudoHeader.ipv6Data.reserved[1] = 0; pseudoHeader.ipv6Data.reserved[2] = 0; pseudoHeader.ipv6Data.nextHeader = socket->protocol; } else #endif //Invalid destination address? { //An internal error has occurred error = ERROR_FAILURE; //Exit immediately break; } //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_TX_ANCILLARY; //Set the TTL value to be used if(message->ttl != 0) { ancillary.ttl = message->ttl; } else if(ipIsMulticastAddr(&message->destIpAddr)) { ancillary.ttl = socket->multicastTtl; } else { ancillary.ttl = socket->ttl; } //This flag tells the stack that the destination is on a locally attached //network and not to perform a lookup of the routing table if(flags & SOCKET_FLAG_DONT_ROUTE) { ancillary.dontRoute = TRUE; } #if (IP_DIFF_SERV_SUPPORT == ENABLED) //Set DSCP field ancillary.dscp = socket->dscp; #endif #if (ETH_SUPPORT == ENABLED) //Set source and destination MAC addresses ancillary.srcMacAddr = message->srcMacAddr; ancillary.destMacAddr = message->destMacAddr; #endif #if (ETH_VLAN_SUPPORT == ENABLED) //Set VLAN PCP and DEI fields ancillary.vlanPcp = socket->vlanPcp; ancillary.vlanDei = socket->vlanDei; #endif #if (ETH_VMAN_SUPPORT == ENABLED) //Set VMAN PCP and DEI fields ancillary.vmanPcp = socket->vmanPcp; ancillary.vmanDei = socket->vmanDei; #endif #if (ETH_PORT_TAGGING_SUPPORT == ENABLED) //Set switch port identifier ancillary.port = message->switchPort; #endif #if (ETH_TIMESTAMP_SUPPORT == ENABLED) //Unique identifier for hardware time stamping ancillary.timestampId = message->timestampId; #endif //Send raw IP datagram error = ipSendDatagram(interface, &pseudoHeader, buffer, offset, &ancillary); //Failed to send data? if(error) break; //End of exception handling block } while(0); //Free previously allocated memory block netBufferFree(buffer); //Return status code return error; } /** * @brief Send an raw Ethernet packet * @param[in] socket Handle referencing the socket * @param[in] message Pointer to the structure describing the raw packet * @param[in] flags Set of flags that influences the behavior of this function * @return Error code **/ error_t rawSocketSendEthPacket(Socket *socket, const SocketMsg *message, uint_t flags) { error_t error; #if (ETH_SUPPORT == ENABLED) size_t length; NetBuffer *buffer; NetInterface *interface; //Select the relevant network interface if(socket->interface != NULL) { interface = socket->interface; } else { interface = netGetDefaultInterface(); } //Forward the frame to the physical interface interface = nicGetPhysicalInterface(interface); //Ethernet interface? if(interface->nicDriver != NULL && interface->nicDriver->type == NIC_TYPE_ETHERNET) { //Allocate a buffer memory to hold the raw Ethernet packet buffer = netBufferAlloc(0); //Failed to allocate buffer? if(buffer == NULL) return ERROR_OUT_OF_MEMORY; //Get the length of the raw data length = message->length; //Copy the raw data error = netBufferAppend(buffer, message->data, length); //Check status code if(!error) { //Automatic padding not supported by hardware? if(!interface->nicDriver->autoPadding) { //The host controller should manually add padding //to the packet before transmitting it if(length < (ETH_MIN_FRAME_SIZE - ETH_CRC_SIZE)) { size_t n; //Add padding as necessary n = (ETH_MIN_FRAME_SIZE - ETH_CRC_SIZE) - length; //Append padding bytes error = netBufferAppend(buffer, ethPadding, n); //Adjust frame length length += n; } } } //Check status code if(!error) { //CRC calculation not supported by hardware? if(!interface->nicDriver->autoCrcCalc) { uint32_t crc; //Compute CRC over the header and payload crc = ethCalcCrcEx(buffer, 0, length); //Convert from host byte order to little-endian byte order crc = htole32(crc); //Append the calculated CRC value error = netBufferAppend(buffer, &crc, sizeof(crc)); //Adjust frame length length += sizeof(crc); } } //Check status code if(!error) { NetTxAncillary ancillary; //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_TX_ANCILLARY; #if (ETH_PORT_TAGGING_SUPPORT == ENABLED) //Set switch port identifier ancillary.port = message->switchPort; #endif #if (ETH_TIMESTAMP_SUPPORT == ENABLED) //Unique identifier for hardware time stamping ancillary.timestampId = message->timestampId; #endif //Debug message TRACE_DEBUG("Sending raw Ethernet frame (%" PRIuSIZE " bytes)...\r\n", length); //Send the resulting packet over the specified link error = nicSendPacket(interface, buffer, 0, &ancillary); } //Free previously allocated memory block netBufferFree(buffer); } else #endif //Unknown interface type? { //Report an error error = ERROR_INVALID_INTERFACE; } //Return status code return error; } /** * @brief Receive an IP packet from a raw socket * @param[in] socket Handle referencing the socket * @param[out] srcIpAddr Source IP address (optional) * @param[out] destIpAddr Destination IP address (optional) * @param[out] data Buffer where to store the incoming data * @param[in] size Maximum number of bytes that can be received * @param[out] received Number of bytes that have been received * @param[in] flags Set of flags that influences the behavior of this function * @return Error code **/ error_t rawSocketReceiveIpPacket(Socket *socket, SocketMsg *message, uint_t flags) { error_t error; SocketQueueItem *queueItem; //The SOCKET_FLAG_DONT_WAIT enables non-blocking operation if((flags & SOCKET_FLAG_DONT_WAIT) == 0) { //Check whether the receive queue is empty if(socket->receiveQueue == NULL) { //Set the events the application is interested in socket->eventMask = SOCKET_EVENT_RX_READY; //Reset the event object osResetEvent(&socket->event); //Release exclusive access osReleaseMutex(&netMutex); //Wait until an event is triggered osWaitForEvent(&socket->event, socket->timeout); //Get exclusive access osAcquireMutex(&netMutex); } } //Any packet received? if(socket->receiveQueue != NULL) { //Point to the first item in the receive queue queueItem = socket->receiveQueue; //Copy data to user buffer message->length = netBufferRead(message->data, queueItem->buffer, queueItem->offset, message->size); //Save the source IP address message->srcIpAddr = queueItem->srcIpAddr; //Save the source port number message->srcPort = queueItem->srcPort; //Save the destination IP address message->destIpAddr = queueItem->destIpAddr; //Save TTL value message->ttl = queueItem->ancillary.ttl; #if (ETH_SUPPORT == ENABLED) //Save source and destination MAC addresses message->srcMacAddr = queueItem->ancillary.srcMacAddr; message->destMacAddr = queueItem->ancillary.destMacAddr; #endif #if (ETH_PORT_TAGGING_SUPPORT == ENABLED) //Save switch port identifier message->switchPort = queueItem->ancillary.port; #endif #if (ETH_TIMESTAMP_SUPPORT == ENABLED) //Save captured time stamp message->timestamp = queueItem->ancillary.timestamp; #endif //If the SOCKET_FLAG_PEEK flag is set, the data is copied into the //buffer but is not removed from the input queue if((flags & SOCKET_FLAG_PEEK) == 0) { //Remove the item from the receive queue socket->receiveQueue = queueItem->next; //Deallocate memory buffer netBufferFree(queueItem->buffer); } //Update the state of events rawSocketUpdateEvents(socket); //Successful read operation error = NO_ERROR; } else { //Total number of data that have been received message->length = 0; //Report a timeout error error = ERROR_TIMEOUT; } //Return status code return error; } /** * @brief Receive an Ethernet packet from a raw socket * @param[in] socket Handle referencing the socket * @param[out] data Buffer where to store the incoming data * @param[in] size Maximum number of bytes that can be received * @param[out] received Number of bytes that have been received * @param[in] flags Set of flags that influences the behavior of this function * @return Error code **/ error_t rawSocketReceiveEthPacket(Socket *socket, SocketMsg *message, uint_t flags) { error_t error; SocketQueueItem *queueItem; //The SOCKET_FLAG_DONT_WAIT enables non-blocking operation if((flags & SOCKET_FLAG_DONT_WAIT) == 0) { //Check whether the receive queue is empty if(socket->receiveQueue == NULL) { //Set the events the application is interested in socket->eventMask = SOCKET_EVENT_RX_READY; //Reset the event object osResetEvent(&socket->event); //Release exclusive access osReleaseMutex(&netMutex); //Wait until an event is triggered osWaitForEvent(&socket->event, socket->timeout); //Get exclusive access osAcquireMutex(&netMutex); } } //Any packet received? if(socket->receiveQueue != NULL) { //Point to the first item in the receive queue queueItem = socket->receiveQueue; //Copy data to user buffer message->length = netBufferRead(message->data, queueItem->buffer, queueItem->offset, message->size); #if (ETH_SUPPORT == ENABLED) //Save source and destination MAC addresses message->srcMacAddr = queueItem->ancillary.srcMacAddr; message->destMacAddr = queueItem->ancillary.destMacAddr; #endif #if (ETH_PORT_TAGGING_SUPPORT == ENABLED) //Save switch port identifier message->switchPort = queueItem->ancillary.port; #endif #if (ETH_TIMESTAMP_SUPPORT == ENABLED) //Save captured time stamp message->timestamp = queueItem->ancillary.timestamp; #endif //If the SOCKET_FLAG_PEEK flag is set, the data is copied into the //buffer but is not removed from the input queue if((flags & SOCKET_FLAG_PEEK) == 0) { //Remove the item from the receive queue socket->receiveQueue = queueItem->next; //Deallocate memory buffer netBufferFree(queueItem->buffer); } //Update the state of events rawSocketUpdateEvents(socket); //Successful read operation error = NO_ERROR; } else { //Total number of data that have been received message->length = 0; //Report a timeout error error = ERROR_TIMEOUT; } //Return status code return error; } /** * @brief Update event state for raw sockets * @param[in] socket Handle referencing the socket **/ void rawSocketUpdateEvents(Socket *socket) { //Clear event flags socket->eventFlags = 0; //The socket is marked as readable if a datagram is pending in the queue if(socket->receiveQueue) socket->eventFlags |= SOCKET_EVENT_RX_READY; //Check whether the socket is bound to a particular network interface if(socket->interface != NULL) { //Handle link up and link down events if(socket->interface->linkState) socket->eventFlags |= SOCKET_EVENT_LINK_UP; else socket->eventFlags |= SOCKET_EVENT_LINK_DOWN; } //Mask unused events socket->eventFlags &= socket->eventMask; //Any event to signal? if(socket->eventFlags) { //Unblock I/O operations currently in waiting state osSetEvent(&socket->event); //Set user event to signaled state if necessary if(socket->userEvent != NULL) { osSetEvent(socket->userEvent); } } } #endif
/** * @file raw_socket.c * @brief TCP/IP raw sockets * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @section Description * * A raw socket is a type of socket that allows access to the * underlying transport provider * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.2 **/ //Switch to the appropriate trace level #define TRACE_LEVEL RAW_SOCKET_TRACE_LEVEL //Dependencies #include <string.h> #include "core/net.h" #include "core/socket.h" #include "core/raw_socket.h" #include "core/ethernet_misc.h" #include "ipv4/ipv4.h" #include "ipv4/ipv4_misc.h" #include "ipv6/ipv6.h" #include "ipv6/ipv6_misc.h" #include "mibs/mib2_module.h" #include "mibs/if_mib_module.h" #include "debug.h" //Check TCP/IP stack configuration #if (RAW_SOCKET_SUPPORT == ENABLED) /** * @brief Process incoming IP packet * @param[in] interface Underlying network interface * @param[in] pseudoHeader IPv4 or IPv6 pseudo header * @param[in] buffer Multi-part buffer containing the IP packet * @param[in] offset Offset to the first byte of the IP packet * @param[in] ancillary Additional options passed to the stack along with * the packet * @return Error code **/ error_t rawSocketProcessIpPacket(NetInterface *interface, IpPseudoHeader *pseudoHeader, const NetBuffer *buffer, size_t offset, NetRxAncillary *ancillary) { uint_t i; size_t length; Socket *socket; SocketQueueItem *queueItem; NetBuffer *p; //Retrieve the length of the raw IP packet length = netBufferGetLength(buffer) - offset; //Loop through opened sockets for(i = 0; i < SOCKET_MAX_COUNT; i++) { //Point to the current socket socket = socketTable + i; //Raw socket found? if(socket->type != SOCKET_TYPE_RAW_IP) continue; //Check whether the socket is bound to a particular interface if(socket->interface && socket->interface != interface) continue; #if (IPV4_SUPPORT == ENABLED) //IPv4 packet received? if(pseudoHeader->length == sizeof(Ipv4PseudoHeader)) { //Check protocol field if(socket->protocol != pseudoHeader->ipv4Data.protocol) continue; //Destination IP address filtering if(socket->localIpAddr.length != 0) { //An IPv4 address is expected if(socket->localIpAddr.length != sizeof(Ipv4Addr)) continue; //Filter out non-matching addresses if(socket->localIpAddr.ipv4Addr != pseudoHeader->ipv4Data.destAddr) continue; } //Source IP address filtering if(socket->remoteIpAddr.length != 0) { //An IPv4 address is expected if(socket->remoteIpAddr.length != sizeof(Ipv4Addr)) continue; //Filter out non-matching addresses if(socket->remoteIpAddr.ipv4Addr != pseudoHeader->ipv4Data.srcAddr) continue; } } else #endif #if (IPV6_SUPPORT == ENABLED) //IPv6 packet received? if(pseudoHeader->length == sizeof(Ipv6PseudoHeader)) { //Check protocol field if(socket->protocol != pseudoHeader->ipv6Data.nextHeader) continue; //Destination IP address filtering if(socket->localIpAddr.length != 0) { //An IPv6 address is expected if(socket->localIpAddr.length != sizeof(Ipv6Addr)) continue; //Filter out non-matching addresses if(!ipv6CompAddr(&socket->localIpAddr.ipv6Addr, &pseudoHeader->ipv6Data.destAddr)) continue; } //Source IP address filtering if(socket->remoteIpAddr.length != 0) { //An IPv6 address is expected if(socket->remoteIpAddr.length != sizeof(Ipv6Addr)) continue; //Filter out non-matching addresses if(!ipv6CompAddr(&socket->remoteIpAddr.ipv6Addr, &pseudoHeader->ipv6Data.srcAddr)) continue; } } else #endif //Invalid packet received? { //This should never occur... continue; } //The current socket meets all the criteria break; } //Drop incoming packet if no matching socket was found if(i >= SOCKET_MAX_COUNT) return ERROR_PROTOCOL_UNREACHABLE; //Empty receive queue? if(socket->receiveQueue == NULL) { //Allocate a memory buffer to hold the data and the associated descriptor p = netBufferAlloc(sizeof(SocketQueueItem) + length); //Successful memory allocation? if(p != NULL) { //Point to the newly created item queueItem = netBufferAt(p, 0); queueItem->buffer = p; //Add the newly created item to the queue socket->receiveQueue = queueItem; } else { //Memory allocation failed queueItem = NULL; } } else { //Point to the very first item queueItem = socket->receiveQueue; //Reach the last item in the receive queue for(i = 1; queueItem->next; i++) { queueItem = queueItem->next; } //Check whether the receive queue is full if(i >= RAW_SOCKET_RX_QUEUE_SIZE) { //Number of inbound packets which were chosen to be discarded even //though no errors had been detected MIB2_INC_COUNTER32(ifGroup.ifTable[interface->index].ifInDiscards, 1); IF_MIB_INC_COUNTER32(ifTable[interface->index].ifInDiscards, 1); //Report an error return ERROR_RECEIVE_QUEUE_FULL; } //Allocate a memory buffer to hold the data and the associated descriptor p = netBufferAlloc(sizeof(SocketQueueItem) + length); //Successful memory allocation? if(p != NULL) { //Add the newly created item to the queue queueItem->next = netBufferAt(p, 0); //Point to the newly created item queueItem = queueItem->next; queueItem->buffer = p; } else { //Memory allocation failed queueItem = NULL; } } //Not enough resources to properly handle the packet? if(queueItem == NULL) { //Number of inbound packets which were chosen to be discarded even //though no errors had been detected MIB2_INC_COUNTER32(ifGroup.ifTable[interface->index].ifInDiscards, 1); IF_MIB_INC_COUNTER32(ifTable[interface->index].ifInDiscards, 1); //Report an error return ERROR_OUT_OF_MEMORY; } //Initialize next field queueItem->next = NULL; //Network interface where the packet was received queueItem->interface = interface; //Port number is unused queueItem->srcPort = 0; #if (IPV4_SUPPORT == ENABLED) //IPv4 remote address? if(pseudoHeader->length == sizeof(Ipv4PseudoHeader)) { //Save the source IPv4 address queueItem->srcIpAddr.length = sizeof(Ipv4Addr); queueItem->srcIpAddr.ipv4Addr = pseudoHeader->ipv4Data.srcAddr; //Save the destination IPv4 address queueItem->destIpAddr.length = sizeof(Ipv4Addr); queueItem->destIpAddr.ipv4Addr = pseudoHeader->ipv4Data.destAddr; } #endif #if (IPV6_SUPPORT == ENABLED) //IPv6 remote address? if(pseudoHeader->length == sizeof(Ipv6PseudoHeader)) { //Save the source IPv6 address queueItem->srcIpAddr.length = sizeof(Ipv6Addr); queueItem->srcIpAddr.ipv6Addr = pseudoHeader->ipv6Data.srcAddr; //Save the destination IPv6 address queueItem->destIpAddr.length = sizeof(Ipv6Addr); queueItem->destIpAddr.ipv6Addr = pseudoHeader->ipv6Data.destAddr; } #endif //Offset to the raw IP packet queueItem->offset = sizeof(SocketQueueItem); //Copy the raw data netBufferCopy(queueItem->buffer, queueItem->offset, buffer, offset, length); //Additional options can be passed to the stack along with the packet queueItem->ancillary = *ancillary; //Notify user that data is available rawSocketUpdateEvents(socket); //Successful processing return NO_ERROR; } /** * @brief Process incoming Ethernet packet * @param[in] interface Underlying network interface * @param[in] header Pointer to the Ethernet header * @param[in] data Pointer to the payload data * @param[in] length Length of the payload data, in bytes * @param[in] ancillary Additional options passed to the stack along with * the packet **/ void rawSocketProcessEthPacket(NetInterface *interface, EthHeader *header, const uint8_t *data, size_t length, NetRxAncillary *ancillary) { uint_t i; Socket *socket; SocketQueueItem *queueItem; NetBuffer *p; //Loop through opened sockets for(i = 0; i < SOCKET_MAX_COUNT; i++) { //Point to the current socket socket = socketTable + i; //Raw socket found? if(socket->type != SOCKET_TYPE_RAW_ETH) continue; //Check whether the socket is bound to a particular interface if(socket->interface && socket->interface != interface) continue; //Check protocol field if(socket->protocol == SOCKET_ETH_PROTO_ALL) { //Accept all EtherType values } else if(socket->protocol == SOCKET_ETH_PROTO_LLC) { //Only accept LLC frames if(ntohs(header->type) > ETH_MTU) continue; } else { //Only accept frames with the correct EtherType value if(ntohs(header->type) != socket->protocol) continue; } //The current socket meets all the criteria break; } //Drop incoming packet if no matching socket was found if(i >= SOCKET_MAX_COUNT) return; //Empty receive queue? if(socket->receiveQueue == NULL) { //Allocate a memory buffer to hold the data and the associated descriptor p = netBufferAlloc(sizeof(SocketQueueItem) + sizeof(EthHeader) + length); //Successful memory allocation? if(p != NULL) { //Point to the newly created item queueItem = netBufferAt(p, 0); queueItem->buffer = p; //Add the newly created item to the queue socket->receiveQueue = queueItem; } else { //Memory allocation failed queueItem = NULL; } } else { //Point to the very first item queueItem = socket->receiveQueue; //Reach the last item in the receive queue for(i = 1; queueItem->next; i++) { queueItem = queueItem->next; } //Check whether the receive queue is full if(i >= RAW_SOCKET_RX_QUEUE_SIZE) { //Number of inbound packets which were chosen to be discarded even //though no errors had been detected MIB2_INC_COUNTER32(ifGroup.ifTable[interface->index].ifInDiscards, 1); IF_MIB_INC_COUNTER32(ifTable[interface->index].ifInDiscards, 1); //Exit immediately return; } //Allocate a memory buffer to hold the data and the associated descriptor p = netBufferAlloc(sizeof(SocketQueueItem) + sizeof(EthHeader) + length); //Successful memory allocation? if(p != NULL) { //Add the newly created item to the queue queueItem->next = netBufferAt(p, 0); //Point to the newly created item queueItem = queueItem->next; queueItem->buffer = p; } else { //Memory allocation failed queueItem = NULL; } } //Not enough resources to properly handle the packet? if(queueItem == NULL) { //Number of inbound packets which were chosen to be discarded even //though no errors had been detected MIB2_INC_COUNTER32(ifGroup.ifTable[interface->index].ifInDiscards, 1); IF_MIB_INC_COUNTER32(ifTable[interface->index].ifInDiscards, 1); //Exit immediately return; } //Initialize next field queueItem->next = NULL; //Network interface where the packet was received queueItem->interface = interface; //Other fields are meaningless queueItem->srcPort = 0; queueItem->srcIpAddr = IP_ADDR_ANY; queueItem->destIpAddr = IP_ADDR_ANY; //Offset to the raw datagram queueItem->offset = sizeof(SocketQueueItem); //Copy the Ethernet header netBufferWrite(queueItem->buffer, queueItem->offset, header, sizeof(EthHeader)); //Copy the payload netBufferWrite(queueItem->buffer, queueItem->offset + sizeof(EthHeader), data, length); //Additional options can be passed to the stack along with the packet queueItem->ancillary = *ancillary; //Notify user that data is available rawSocketUpdateEvents(socket); } /** * @brief Send an raw IP packet * @param[in] socket Handle referencing the socket * @param[in] message Pointer to the structure describing the raw packet * @param[in] flags Set of flags that influences the behavior of this function * @return Error code **/ error_t rawSocketSendIpPacket(Socket *socket, const SocketMsg *message, uint_t flags) { error_t error; size_t offset; NetBuffer *buffer; NetInterface *interface; IpPseudoHeader pseudoHeader; NetTxAncillary ancillary; //Select the relevant network interface if(message->interface != NULL) { interface = message->interface; } else { interface = socket->interface; } //Allocate a buffer memory to hold the raw IP datagram buffer = ipAllocBuffer(0, &offset); //Failed to allocate memory? if(buffer == NULL) return ERROR_OUT_OF_MEMORY; //Start of exception handling block do { //Copy the raw data error = netBufferAppend(buffer, message->data, message->length); //Any error to report? if(error) break; #if (IPV4_SUPPORT == ENABLED) //Destination address is an IPv4 address? if(message->destIpAddr.length == sizeof(Ipv4Addr)) { Ipv4Addr srcIpAddr; //Select the source IPv4 address and the relevant network interface //to use when sending data to the specified destination host error = ipv4SelectSourceAddr(&interface, message->destIpAddr.ipv4Addr, &srcIpAddr); //Any error to report? if(error) break; //Format IPv4 pseudo header pseudoHeader.length = sizeof(Ipv4PseudoHeader); pseudoHeader.ipv4Data.srcAddr = srcIpAddr; pseudoHeader.ipv4Data.destAddr = message->destIpAddr.ipv4Addr; pseudoHeader.ipv4Data.reserved = 0; pseudoHeader.ipv4Data.protocol = socket->protocol; pseudoHeader.ipv4Data.length = htons(message->length); } else #endif #if (IPV6_SUPPORT == ENABLED) //Destination address is an IPv6 address? if(message->destIpAddr.length == sizeof(Ipv6Addr)) { //Select the source IPv6 address and the relevant network interface //to use when sending data to the specified destination host error = ipv6SelectSourceAddr(&interface, &message->destIpAddr.ipv6Addr, &pseudoHeader.ipv6Data.srcAddr); //Any error to report? if(error) break; //Format IPv6 pseudo header pseudoHeader.length = sizeof(Ipv6PseudoHeader); pseudoHeader.ipv6Data.destAddr = message->destIpAddr.ipv6Addr; pseudoHeader.ipv6Data.length = htonl(message->length); pseudoHeader.ipv6Data.reserved[0] = 0; pseudoHeader.ipv6Data.reserved[1] = 0; pseudoHeader.ipv6Data.reserved[2] = 0; pseudoHeader.ipv6Data.nextHeader = socket->protocol; } else #endif //Invalid destination address? { //An internal error has occurred error = ERROR_FAILURE; //Exit immediately break; } //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_TX_ANCILLARY; //Set the TTL value to be used if(message->ttl != 0) { ancillary.ttl = message->ttl; } else if(ipIsMulticastAddr(&message->destIpAddr)) { ancillary.ttl = socket->multicastTtl; } else { ancillary.ttl = socket->ttl; } //This flag tells the stack that the destination is on a locally attached //network and not to perform a lookup of the routing table if(flags & SOCKET_FLAG_DONT_ROUTE) { ancillary.dontRoute = TRUE; } #if (IP_DIFF_SERV_SUPPORT == ENABLED) //Set DSCP field ancillary.dscp = socket->dscp; #endif #if (ETH_SUPPORT == ENABLED) //Set source and destination MAC addresses ancillary.srcMacAddr = message->srcMacAddr; ancillary.destMacAddr = message->destMacAddr; #endif #if (ETH_VLAN_SUPPORT == ENABLED) //Set VLAN PCP and DEI fields ancillary.vlanPcp = socket->vlanPcp; ancillary.vlanDei = socket->vlanDei; #endif #if (ETH_VMAN_SUPPORT == ENABLED) //Set VMAN PCP and DEI fields ancillary.vmanPcp = socket->vmanPcp; ancillary.vmanDei = socket->vmanDei; #endif #if (ETH_PORT_TAGGING_SUPPORT == ENABLED) //Set switch port identifier ancillary.port = message->switchPort; #endif #if (ETH_TIMESTAMP_SUPPORT == ENABLED) //Unique identifier for hardware time stamping ancillary.timestampId = message->timestampId; #endif //Send raw IP datagram error = ipSendDatagram(interface, &pseudoHeader, buffer, offset, &ancillary); //Failed to send data? if(error) break; //End of exception handling block } while(0); //Free previously allocated memory block netBufferFree(buffer); //Return status code return error; } /** * @brief Send an raw Ethernet packet * @param[in] socket Handle referencing the socket * @param[in] message Pointer to the structure describing the raw packet * @param[in] flags Set of flags that influences the behavior of this function * @return Error code **/ error_t rawSocketSendEthPacket(Socket *socket, const SocketMsg *message, uint_t flags) { error_t error; #if (ETH_SUPPORT == ENABLED) size_t length; NetBuffer *buffer; NetInterface *interface; //Select the relevant network interface if(message->interface != NULL) { interface = message->interface; } else if(socket->interface != NULL) { interface = socket->interface; } else { interface = netGetDefaultInterface(); } //Forward the frame to the physical interface interface = nicGetPhysicalInterface(interface); //Ethernet interface? if(interface->nicDriver != NULL && interface->nicDriver->type == NIC_TYPE_ETHERNET) { //Allocate a buffer memory to hold the raw Ethernet packet buffer = netBufferAlloc(0); //Failed to allocate buffer? if(buffer == NULL) return ERROR_OUT_OF_MEMORY; //Get the length of the raw data length = message->length; //Copy the raw data error = netBufferAppend(buffer, message->data, length); //Check status code if(!error) { //Automatic padding not supported by hardware? if(!interface->nicDriver->autoPadding) { //The host controller should manually add padding //to the packet before transmitting it if(length < (ETH_MIN_FRAME_SIZE - ETH_CRC_SIZE)) { size_t n; //Add padding as necessary n = (ETH_MIN_FRAME_SIZE - ETH_CRC_SIZE) - length; //Append padding bytes error = netBufferAppend(buffer, ethPadding, n); //Adjust frame length length += n; } } } //Check status code if(!error) { //CRC calculation not supported by hardware? if(!interface->nicDriver->autoCrcCalc) { uint32_t crc; //Compute CRC over the header and payload crc = ethCalcCrcEx(buffer, 0, length); //Convert from host byte order to little-endian byte order crc = htole32(crc); //Append the calculated CRC value error = netBufferAppend(buffer, &crc, sizeof(crc)); //Adjust frame length length += sizeof(crc); } } //Check status code if(!error) { NetTxAncillary ancillary; //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_TX_ANCILLARY; #if (ETH_PORT_TAGGING_SUPPORT == ENABLED) //Set switch port identifier ancillary.port = message->switchPort; #endif #if (ETH_TIMESTAMP_SUPPORT == ENABLED) //Unique identifier for hardware time stamping ancillary.timestampId = message->timestampId; #endif //Debug message TRACE_DEBUG("Sending raw Ethernet frame (%" PRIuSIZE " bytes)...\r\n", length); //Send the resulting packet over the specified link error = nicSendPacket(interface, buffer, 0, &ancillary); } //Free previously allocated memory block netBufferFree(buffer); } else #endif //Unknown interface type? { //Report an error error = ERROR_INVALID_INTERFACE; } //Return status code return error; } /** * @brief Receive an IP packet from a raw socket * @param[in] socket Handle referencing the socket * @param[out] message Received IP packet and ancillary data * @param[in] flags Set of flags that influences the behavior of this function * @return Error code **/ error_t rawSocketReceiveIpPacket(Socket *socket, SocketMsg *message, uint_t flags) { error_t error; SocketQueueItem *queueItem; //The SOCKET_FLAG_DONT_WAIT enables non-blocking operation if((flags & SOCKET_FLAG_DONT_WAIT) == 0) { //Check whether the receive queue is empty if(socket->receiveQueue == NULL) { //Set the events the application is interested in socket->eventMask = SOCKET_EVENT_RX_READY; //Reset the event object osResetEvent(&socket->event); //Release exclusive access osReleaseMutex(&netMutex); //Wait until an event is triggered osWaitForEvent(&socket->event, socket->timeout); //Get exclusive access osAcquireMutex(&netMutex); } } //Any packet received? if(socket->receiveQueue != NULL) { //Point to the first item in the receive queue queueItem = socket->receiveQueue; //Copy data to user buffer message->length = netBufferRead(message->data, queueItem->buffer, queueItem->offset, message->size); //Network interface where the packet was received message->interface = queueItem->interface; //Save the source IP address message->srcIpAddr = queueItem->srcIpAddr; //Save the source port number message->srcPort = queueItem->srcPort; //Save the destination IP address message->destIpAddr = queueItem->destIpAddr; //Save TTL value message->ttl = queueItem->ancillary.ttl; #if (ETH_SUPPORT == ENABLED) //Save source and destination MAC addresses message->srcMacAddr = queueItem->ancillary.srcMacAddr; message->destMacAddr = queueItem->ancillary.destMacAddr; #endif #if (ETH_PORT_TAGGING_SUPPORT == ENABLED) //Save switch port identifier message->switchPort = queueItem->ancillary.port; #endif #if (ETH_TIMESTAMP_SUPPORT == ENABLED) //Save captured time stamp message->timestamp = queueItem->ancillary.timestamp; #endif //If the SOCKET_FLAG_PEEK flag is set, the data is copied into the //buffer but is not removed from the input queue if((flags & SOCKET_FLAG_PEEK) == 0) { //Remove the item from the receive queue socket->receiveQueue = queueItem->next; //Deallocate memory buffer netBufferFree(queueItem->buffer); } //Update the state of events rawSocketUpdateEvents(socket); //Successful read operation error = NO_ERROR; } else { //Total number of data that have been received message->length = 0; //Report a timeout error error = ERROR_TIMEOUT; } //Return status code return error; } /** * @brief Receive an Ethernet packet from a raw socket * @param[in] socket Handle referencing the socket * @param[out] message Received Ethernet packet and ancillary data * @param[in] flags Set of flags that influences the behavior of this function * @return Error code **/ error_t rawSocketReceiveEthPacket(Socket *socket, SocketMsg *message, uint_t flags) { error_t error; SocketQueueItem *queueItem; //The SOCKET_FLAG_DONT_WAIT enables non-blocking operation if((flags & SOCKET_FLAG_DONT_WAIT) == 0) { //Check whether the receive queue is empty if(socket->receiveQueue == NULL) { //Set the events the application is interested in socket->eventMask = SOCKET_EVENT_RX_READY; //Reset the event object osResetEvent(&socket->event); //Release exclusive access osReleaseMutex(&netMutex); //Wait until an event is triggered osWaitForEvent(&socket->event, socket->timeout); //Get exclusive access osAcquireMutex(&netMutex); } } //Any packet received? if(socket->receiveQueue != NULL) { //Point to the first item in the receive queue queueItem = socket->receiveQueue; //Copy data to user buffer message->length = netBufferRead(message->data, queueItem->buffer, queueItem->offset, message->size); //Network interface where the packet was received message->interface = queueItem->interface; #if (ETH_SUPPORT == ENABLED) //Save source and destination MAC addresses message->srcMacAddr = queueItem->ancillary.srcMacAddr; message->destMacAddr = queueItem->ancillary.destMacAddr; #endif #if (ETH_PORT_TAGGING_SUPPORT == ENABLED) //Save switch port identifier message->switchPort = queueItem->ancillary.port; #endif #if (ETH_TIMESTAMP_SUPPORT == ENABLED) //Save captured time stamp message->timestamp = queueItem->ancillary.timestamp; #endif //If the SOCKET_FLAG_PEEK flag is set, the data is copied into the //buffer but is not removed from the input queue if((flags & SOCKET_FLAG_PEEK) == 0) { //Remove the item from the receive queue socket->receiveQueue = queueItem->next; //Deallocate memory buffer netBufferFree(queueItem->buffer); } //Update the state of events rawSocketUpdateEvents(socket); //Successful read operation error = NO_ERROR; } else { //Total number of data that have been received message->length = 0; //Report a timeout error error = ERROR_TIMEOUT; } //Return status code return error; } /** * @brief Update event state for raw sockets * @param[in] socket Handle referencing the socket **/ void rawSocketUpdateEvents(Socket *socket) { //Clear event flags socket->eventFlags = 0; //The socket is marked as readable if a datagram is pending in the queue if(socket->receiveQueue) socket->eventFlags |= SOCKET_EVENT_RX_READY; //Check whether the socket is bound to a particular network interface if(socket->interface != NULL) { //Handle link up and link down events if(socket->interface->linkState) socket->eventFlags |= SOCKET_EVENT_LINK_UP; else socket->eventFlags |= SOCKET_EVENT_LINK_DOWN; } //Mask unused events socket->eventFlags &= socket->eventMask; //Any event to signal? if(socket->eventFlags) { //Unblock I/O operations currently in waiting state osSetEvent(&socket->event); //Set user event to signaled state if necessary if(socket->userEvent != NULL) { osSetEvent(socket->userEvent); } } } #endif
error_t rawSocketSendEthPacket(Socket *socket, const SocketMsg *message, uint_t flags) { error_t error; #if (ETH_SUPPORT == ENABLED) size_t length; NetBuffer *buffer; NetInterface *interface; //Select the relevant network interface if(socket->interface != NULL) { interface = socket->interface; } else { interface = netGetDefaultInterface(); } //Forward the frame to the physical interface interface = nicGetPhysicalInterface(interface); //Ethernet interface? if(interface->nicDriver != NULL && interface->nicDriver->type == NIC_TYPE_ETHERNET) { //Allocate a buffer memory to hold the raw Ethernet packet buffer = netBufferAlloc(0); //Failed to allocate buffer? if(buffer == NULL) return ERROR_OUT_OF_MEMORY; //Get the length of the raw data length = message->length; //Copy the raw data error = netBufferAppend(buffer, message->data, length); //Check status code if(!error) { //Automatic padding not supported by hardware? if(!interface->nicDriver->autoPadding) { //The host controller should manually add padding //to the packet before transmitting it if(length < (ETH_MIN_FRAME_SIZE - ETH_CRC_SIZE)) { size_t n; //Add padding as necessary n = (ETH_MIN_FRAME_SIZE - ETH_CRC_SIZE) - length; //Append padding bytes error = netBufferAppend(buffer, ethPadding, n); //Adjust frame length length += n; } } } //Check status code if(!error) { //CRC calculation not supported by hardware? if(!interface->nicDriver->autoCrcCalc) { uint32_t crc; //Compute CRC over the header and payload crc = ethCalcCrcEx(buffer, 0, length); //Convert from host byte order to little-endian byte order crc = htole32(crc); //Append the calculated CRC value error = netBufferAppend(buffer, &crc, sizeof(crc)); //Adjust frame length length += sizeof(crc); } } //Check status code if(!error) { NetTxAncillary ancillary; //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_TX_ANCILLARY; #if (ETH_PORT_TAGGING_SUPPORT == ENABLED) //Set switch port identifier ancillary.port = message->switchPort; #endif #if (ETH_TIMESTAMP_SUPPORT == ENABLED) //Unique identifier for hardware time stamping ancillary.timestampId = message->timestampId; #endif //Debug message TRACE_DEBUG("Sending raw Ethernet frame (%" PRIuSIZE " bytes)...\r\n", length); //Send the resulting packet over the specified link error = nicSendPacket(interface, buffer, 0, &ancillary); } //Free previously allocated memory block netBufferFree(buffer); } else #endif //Unknown interface type? { //Report an error error = ERROR_INVALID_INTERFACE; } //Return status code return error; }
error_t rawSocketSendEthPacket(Socket *socket, const SocketMsg *message, uint_t flags) { error_t error; #if (ETH_SUPPORT == ENABLED) size_t length; NetBuffer *buffer; NetInterface *interface; //Select the relevant network interface if(message->interface != NULL) { interface = message->interface; } else if(socket->interface != NULL) { interface = socket->interface; } else { interface = netGetDefaultInterface(); } //Forward the frame to the physical interface interface = nicGetPhysicalInterface(interface); //Ethernet interface? if(interface->nicDriver != NULL && interface->nicDriver->type == NIC_TYPE_ETHERNET) { //Allocate a buffer memory to hold the raw Ethernet packet buffer = netBufferAlloc(0); //Failed to allocate buffer? if(buffer == NULL) return ERROR_OUT_OF_MEMORY; //Get the length of the raw data length = message->length; //Copy the raw data error = netBufferAppend(buffer, message->data, length); //Check status code if(!error) { //Automatic padding not supported by hardware? if(!interface->nicDriver->autoPadding) { //The host controller should manually add padding //to the packet before transmitting it if(length < (ETH_MIN_FRAME_SIZE - ETH_CRC_SIZE)) { size_t n; //Add padding as necessary n = (ETH_MIN_FRAME_SIZE - ETH_CRC_SIZE) - length; //Append padding bytes error = netBufferAppend(buffer, ethPadding, n); //Adjust frame length length += n; } } } //Check status code if(!error) { //CRC calculation not supported by hardware? if(!interface->nicDriver->autoCrcCalc) { uint32_t crc; //Compute CRC over the header and payload crc = ethCalcCrcEx(buffer, 0, length); //Convert from host byte order to little-endian byte order crc = htole32(crc); //Append the calculated CRC value error = netBufferAppend(buffer, &crc, sizeof(crc)); //Adjust frame length length += sizeof(crc); } } //Check status code if(!error) { NetTxAncillary ancillary; //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_TX_ANCILLARY; #if (ETH_PORT_TAGGING_SUPPORT == ENABLED) //Set switch port identifier ancillary.port = message->switchPort; #endif #if (ETH_TIMESTAMP_SUPPORT == ENABLED) //Unique identifier for hardware time stamping ancillary.timestampId = message->timestampId; #endif //Debug message TRACE_DEBUG("Sending raw Ethernet frame (%" PRIuSIZE " bytes)...\r\n", length); //Send the resulting packet over the specified link error = nicSendPacket(interface, buffer, 0, &ancillary); } //Free previously allocated memory block netBufferFree(buffer); } else #endif //Unknown interface type? { //Report an error error = ERROR_INVALID_INTERFACE; } //Return status code return error; }
{'added': [(9, ' * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved.'), (33, ' * @version 2.0.2'), (249, ' //Network interface where the packet was received'), (250, ' queueItem->interface = interface;'), (431, ' //Network interface where the packet was received'), (432, ' queueItem->interface = interface;'), (433, ''), (476, ' //Select the relevant network interface'), (477, ' if(message->interface != NULL)'), (478, ' {'), (479, ' interface = message->interface;'), (480, ' }'), (481, ' else'), (482, ' {'), (483, ' interface = socket->interface;'), (484, ' }'), (650, ' if(message->interface != NULL)'), (651, ' {'), (652, ' interface = message->interface;'), (653, ' }'), (654, ' else if(socket->interface != NULL)'), (770, ' * @param[out] message Received IP packet and ancillary data'), (812, ' //Network interface where the packet was received'), (813, ' message->interface = queueItem->interface;'), (874, ' * @param[out] message Received Ethernet packet and ancillary data'), (916, ' //Network interface where the packet was received'), (917, ' message->interface = queueItem->interface;'), (918, '')], 'deleted': [(9, ' * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved.'), (33, ' * @version 2.0.0'), (471, ' //The socket may be bound to a particular network interface'), (472, ' interface = socket->interface;'), (638, ' if(socket->interface != NULL)'), (754, ' * @param[out] srcIpAddr Source IP address (optional)'), (755, ' * @param[out] destIpAddr Destination IP address (optional)'), (756, ' * @param[out] data Buffer where to store the incoming data'), (757, ' * @param[in] size Maximum number of bytes that can be received'), (758, ' * @param[out] received Number of bytes that have been received'), (860, ' * @param[out] data Buffer where to store the incoming data'), (861, ' * @param[in] size Maximum number of bytes that can be received'), (862, ' * @param[out] received Number of bytes that have been received')]}
28
13
503
2,646
65
302
14
https://github.com/Oryx-Embedded/CycloneTCP
CVE-2021-26788
CWE-20
1,086
tee_svc.c
C
utee_param_to_param
// SPDX-License-Identifier: BSD-2-Clause /* * Copyright (c) 2014, STMicroelectronics International N.V. */ #include <util.h> #include <kernel/tee_common_otp.h> #include <kernel/tee_common.h> #include <tee_api_types.h> #include <kernel/tee_ta_manager.h> #include <utee_types.h> #include <tee/tee_svc.h> #include <tee/tee_cryp_utl.h> #include <mm/tee_mmu.h> #include <mm/tee_mm.h> #include <mm/core_memprot.h> #include <kernel/tee_time.h> #include <user_ta_header.h> #include <trace.h> #include <kernel/trace_ta.h> #include <kernel/chip_services.h> #include <kernel/pseudo_ta.h> #include <mm/mobj.h> vaddr_t tee_svc_uref_base; void syscall_log(const void *buf __maybe_unused, size_t len __maybe_unused) { #ifdef CFG_TEE_CORE_TA_TRACE char *kbuf; if (len == 0) return; kbuf = malloc(len + 1); if (kbuf == NULL) return; if (tee_svc_copy_from_user(kbuf, buf, len) == TEE_SUCCESS) { kbuf[len] = '\0'; trace_ext_puts(kbuf); } free(kbuf); #endif } TEE_Result syscall_not_supported(void) { return TEE_ERROR_NOT_SUPPORTED; } /* Configuration properties */ /* API implementation version */ static const char api_vers[] = TO_STR(CFG_TEE_API_VERSION); /* Implementation description (implementation-dependent) */ static const char descr[] = TO_STR(CFG_TEE_IMPL_DESCR); /* * TA persistent time protection level * 100: Persistent time based on an REE-controlled real-time clock * and on the TEE Trusted Storage for the storage of origins (default). * 1000: Persistent time based on a TEE-controlled real-time clock * and the TEE Trusted Storage. * The real-time clock MUST be out of reach of software attacks * from the REE. */ static const uint32_t ta_time_prot_lvl = 100; /* Elliptic Curve Cryptographic support */ #ifdef CFG_CRYPTO_ECC static const bool crypto_ecc_en = 1; #else static const bool crypto_ecc_en; #endif /* * Trusted storage anti rollback protection level * 0 (or missing): No antirollback protection (default) * 100: Antirollback enforced at REE level * 1000: Antirollback TEE-controlled hardware */ #ifdef CFG_RPMB_FS static const uint32_t ts_antiroll_prot_lvl = 1000; #else static const uint32_t ts_antiroll_prot_lvl; #endif /* Trusted OS implementation version */ static const char trustedos_impl_version[] = TO_STR(TEE_IMPL_VERSION); /* Trusted OS implementation version (binary value) */ static const uint32_t trustedos_impl_bin_version; /* 0 by default */ /* Trusted OS implementation manufacturer name */ static const char trustedos_manufacturer[] = TO_STR(CFG_TEE_MANUFACTURER); /* Trusted firmware version */ static const char fw_impl_version[] = TO_STR(CFG_TEE_FW_IMPL_VERSION); /* Trusted firmware version (binary value) */ static const uint32_t fw_impl_bin_version; /* 0 by default */ /* Trusted firmware manufacturer name */ static const char fw_manufacturer[] = TO_STR(CFG_TEE_FW_MANUFACTURER); static TEE_Result get_prop_tee_dev_id(struct tee_ta_session *sess __unused, void *buf, size_t *blen) { TEE_Result res; TEE_UUID uuid; const size_t nslen = 5; uint8_t data[5 + FVR_DIE_ID_NUM_REGS * sizeof(uint32_t)] = { 'O', 'P', 'T', 'E', 'E' }; if (*blen < sizeof(uuid)) { *blen = sizeof(uuid); return TEE_ERROR_SHORT_BUFFER; } *blen = sizeof(uuid); if (tee_otp_get_die_id(data + nslen, sizeof(data) - nslen)) return TEE_ERROR_BAD_STATE; res = tee_hash_createdigest(TEE_ALG_SHA256, data, sizeof(data), (uint8_t *)&uuid, sizeof(uuid)); if (res != TEE_SUCCESS) return TEE_ERROR_BAD_STATE; /* * Changes the random value into and UUID as specifiec * in RFC 4122. The magic values are from the example * code in the RFC. * * TEE_UUID is defined slightly different from the RFC, * but close enough for our purpose. */ uuid.timeHiAndVersion &= 0x0fff; uuid.timeHiAndVersion |= 5 << 12; /* uuid.clock_seq_hi_and_reserved in the RFC */ uuid.clockSeqAndNode[0] &= 0x3f; uuid.clockSeqAndNode[0] |= 0x80; return tee_svc_copy_to_user(buf, &uuid, sizeof(TEE_UUID)); } static TEE_Result get_prop_tee_sys_time_prot_level( struct tee_ta_session *sess __unused, void *buf, size_t *blen) { uint32_t prot; if (*blen < sizeof(prot)) { *blen = sizeof(prot); return TEE_ERROR_SHORT_BUFFER; } *blen = sizeof(prot); prot = tee_time_get_sys_time_protection_level(); return tee_svc_copy_to_user(buf, &prot, sizeof(prot)); } static TEE_Result get_prop_client_id(struct tee_ta_session *sess __unused, void *buf, size_t *blen) { if (*blen < sizeof(TEE_Identity)) { *blen = sizeof(TEE_Identity); return TEE_ERROR_SHORT_BUFFER; } *blen = sizeof(TEE_Identity); return tee_svc_copy_to_user(buf, &sess->clnt_id, sizeof(TEE_Identity)); } static TEE_Result get_prop_ta_app_id(struct tee_ta_session *sess, void *buf, size_t *blen) { if (*blen < sizeof(TEE_UUID)) { *blen = sizeof(TEE_UUID); return TEE_ERROR_SHORT_BUFFER; } *blen = sizeof(TEE_UUID); return tee_svc_copy_to_user(buf, &sess->ctx->uuid, sizeof(TEE_UUID)); } /* Properties of the set TEE_PROPSET_CURRENT_CLIENT */ const struct tee_props tee_propset_client[] = { { .name = "gpd.client.identity", .prop_type = USER_TA_PROP_TYPE_IDENTITY, .get_prop_func = get_prop_client_id }, }; /* Properties of the set TEE_PROPSET_CURRENT_TA */ const struct tee_props tee_propset_ta[] = { { .name = "gpd.ta.appID", .prop_type = USER_TA_PROP_TYPE_UUID, .get_prop_func = get_prop_ta_app_id }, /* * Following properties are processed directly in libutee: * TA_PROP_STR_SINGLE_INSTANCE * TA_PROP_STR_MULTI_SESSION * TA_PROP_STR_KEEP_ALIVE * TA_PROP_STR_DATA_SIZE * TA_PROP_STR_STACK_SIZE * TA_PROP_STR_VERSION * TA_PROP_STR_DESCRIPTION * USER_TA_PROP_TYPE_STRING, * TA_DESCRIPTION */ }; /* Properties of the set TEE_PROPSET_TEE_IMPLEMENTATION */ const struct tee_props tee_propset_tee[] = { { .name = "gpd.tee.apiversion", .prop_type = USER_TA_PROP_TYPE_STRING, .data = api_vers, .len = sizeof(api_vers), }, { .name = "gpd.tee.description", .prop_type = USER_TA_PROP_TYPE_STRING, .data = descr, .len = sizeof(descr) }, { .name = "gpd.tee.deviceID", .prop_type = USER_TA_PROP_TYPE_UUID, .get_prop_func = get_prop_tee_dev_id }, { .name = "gpd.tee.systemTime.protectionLevel", .prop_type = USER_TA_PROP_TYPE_U32, .get_prop_func = get_prop_tee_sys_time_prot_level }, { .name = "gpd.tee.TAPersistentTime.protectionLevel", .prop_type = USER_TA_PROP_TYPE_U32, .data = &ta_time_prot_lvl, .len = sizeof(ta_time_prot_lvl) }, { .name = "gpd.tee.cryptography.ecc", .prop_type = USER_TA_PROP_TYPE_BOOL, .data = &crypto_ecc_en, .len = sizeof(crypto_ecc_en) }, { .name = "gpd.tee.trustedStorage.antiRollback.protectionLevel", .prop_type = USER_TA_PROP_TYPE_U32, .data = &ts_antiroll_prot_lvl, .len = sizeof(ts_antiroll_prot_lvl) }, { .name = "gpd.tee.trustedos.implementation.version", .prop_type = USER_TA_PROP_TYPE_STRING, .data = trustedos_impl_version, .len = sizeof(trustedos_impl_version) }, { .name = "gpd.tee.trustedos.implementation.binaryversion", .prop_type = USER_TA_PROP_TYPE_U32, .data = &trustedos_impl_bin_version, .len = sizeof(trustedos_impl_bin_version) }, { .name = "gpd.tee.trustedos.manufacturer", .prop_type = USER_TA_PROP_TYPE_STRING, .data = trustedos_manufacturer, .len = sizeof(trustedos_manufacturer) }, { .name = "gpd.tee.firmware.implementation.version", .prop_type = USER_TA_PROP_TYPE_STRING, .data = fw_impl_version, .len = sizeof(fw_impl_version) }, { .name = "gpd.tee.firmware.implementation.binaryversion", .prop_type = USER_TA_PROP_TYPE_U32, .data = &fw_impl_bin_version, .len = sizeof(fw_impl_bin_version) }, { .name = "gpd.tee.firmware.manufacturer", .prop_type = USER_TA_PROP_TYPE_STRING, .data = fw_manufacturer, .len = sizeof(fw_manufacturer) }, /* * Following properties are processed directly in libutee: * gpd.tee.arith.maxBigIntSize */ }; __weak const struct tee_vendor_props vendor_props_client; __weak const struct tee_vendor_props vendor_props_ta; __weak const struct tee_vendor_props vendor_props_tee; static void get_prop_set(unsigned long prop_set, const struct tee_props **props, size_t *size, const struct tee_props **vendor_props, size_t *vendor_size) { if ((TEE_PropSetHandle)prop_set == TEE_PROPSET_CURRENT_CLIENT) { *props = tee_propset_client; *size = ARRAY_SIZE(tee_propset_client); *vendor_props = vendor_props_client.props; *vendor_size = vendor_props_client.len; } else if ((TEE_PropSetHandle)prop_set == TEE_PROPSET_CURRENT_TA) { *props = tee_propset_ta; *size = ARRAY_SIZE(tee_propset_ta); *vendor_props = vendor_props_ta.props; *vendor_size = vendor_props_ta.len; } else if ((TEE_PropSetHandle)prop_set == TEE_PROPSET_TEE_IMPLEMENTATION) { *props = tee_propset_tee; *size = ARRAY_SIZE(tee_propset_tee); *vendor_props = vendor_props_tee.props; *vendor_size = vendor_props_tee.len; } else { *props = NULL; *size = 0; *vendor_props = NULL; *vendor_size = 0; } } static const struct tee_props *get_prop_struct(unsigned long prop_set, unsigned long index) { const struct tee_props *props; const struct tee_props *vendor_props; size_t size; size_t vendor_size; get_prop_set(prop_set, &props, &size, &vendor_props, &vendor_size); if (index < size) return &(props[index]); index -= size; if (index < vendor_size) return &(vendor_props[index]); return NULL; } /* * prop_set is part of TEE_PROPSET_xxx * index is the index in the Property Set to retrieve * if name is not NULL, the name of "index" property is returned * if buf is not NULL, the property is returned */ TEE_Result syscall_get_property(unsigned long prop_set, unsigned long index, void *name, uint32_t *name_len, void *buf, uint32_t *blen, uint32_t *prop_type) { struct tee_ta_session *sess; TEE_Result res; TEE_Result res2; const struct tee_props *prop; uint32_t klen; size_t klen_size; uint32_t elen; prop = get_prop_struct(prop_set, index); if (!prop) return TEE_ERROR_ITEM_NOT_FOUND; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; /* Get the property type */ if (prop_type) { res = tee_svc_copy_to_user(prop_type, &prop->prop_type, sizeof(*prop_type)); if (res != TEE_SUCCESS) return res; } /* Get the property */ if (buf && blen) { res = tee_svc_copy_from_user(&klen, blen, sizeof(klen)); if (res != TEE_SUCCESS) return res; if (prop->get_prop_func) { klen_size = klen; res = prop->get_prop_func(sess, buf, &klen_size); klen = klen_size; res2 = tee_svc_copy_to_user(blen, &klen, sizeof(*blen)); } else { if (klen < prop->len) res = TEE_ERROR_SHORT_BUFFER; else res = tee_svc_copy_to_user(buf, prop->data, prop->len); res2 = tee_svc_copy_to_user(blen, &prop->len, sizeof(*blen)); } if (res2 != TEE_SUCCESS) return res2; if (res != TEE_SUCCESS) return res; } /* Get the property name */ if (name && name_len) { res = tee_svc_copy_from_user(&klen, name_len, sizeof(klen)); if (res != TEE_SUCCESS) return res; elen = strlen(prop->name) + 1; if (klen < elen) res = TEE_ERROR_SHORT_BUFFER; else res = tee_svc_copy_to_user(name, prop->name, elen); res2 = tee_svc_copy_to_user(name_len, &elen, sizeof(*name_len)); if (res2 != TEE_SUCCESS) return res2; if (res != TEE_SUCCESS) return res; } return res; } /* * prop_set is part of TEE_PROPSET_xxx */ TEE_Result syscall_get_property_name_to_index(unsigned long prop_set, void *name, unsigned long name_len, uint32_t *index) { TEE_Result res; struct tee_ta_session *sess; const struct tee_props *props; size_t size; const struct tee_props *vendor_props; size_t vendor_size; char *kname = 0; uint32_t i; get_prop_set(prop_set, &props, &size, &vendor_props, &vendor_size); if (!props) return TEE_ERROR_ITEM_NOT_FOUND; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) goto out; if (!name || !name_len) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } kname = malloc(name_len); if (!kname) return TEE_ERROR_OUT_OF_MEMORY; res = tee_svc_copy_from_user(kname, name, name_len); if (res != TEE_SUCCESS) goto out; kname[name_len - 1] = 0; res = TEE_ERROR_ITEM_NOT_FOUND; for (i = 0; i < size; i++) { if (!strcmp(kname, props[i].name)) { res = tee_svc_copy_to_user(index, &i, sizeof(*index)); goto out; } } for (i = size; i < size + vendor_size; i++) { if (!strcmp(kname, vendor_props[i - size].name)) { res = tee_svc_copy_to_user(index, &i, sizeof(*index)); goto out; } } out: free(kname); return res; } static void utee_param_to_param(struct tee_ta_param *p, struct utee_params *up) { size_t n; uint32_t types = up->types; p->types = types; for (n = 0; n < TEE_NUM_PARAMS; n++) { uintptr_t a = up->vals[n * 2]; size_t b = up->vals[n * 2 + 1]; switch (TEE_PARAM_TYPE_GET(types, n)) { case TEE_PARAM_TYPE_MEMREF_INPUT: case TEE_PARAM_TYPE_MEMREF_OUTPUT: case TEE_PARAM_TYPE_MEMREF_INOUT: p->u[n].mem.mobj = &mobj_virt; p->u[n].mem.offs = a; p->u[n].mem.size = b; break; case TEE_PARAM_TYPE_VALUE_INPUT: case TEE_PARAM_TYPE_VALUE_INOUT: p->u[n].val.a = a; p->u[n].val.b = b; break; default: memset(&p->u[n], 0, sizeof(p->u[n])); break; } } } static TEE_Result alloc_temp_sec_mem(size_t size, struct mobj **mobj, uint8_t **va) { /* Allocate section in secure DDR */ #ifdef CFG_PAGED_USER_TA *mobj = mobj_seccpy_shm_alloc(size); #else *mobj = mobj_mm_alloc(mobj_sec_ddr, size, &tee_mm_sec_ddr); #endif if (!*mobj) return TEE_ERROR_GENERIC; *va = mobj_get_va(*mobj, 0); return TEE_SUCCESS; } /* * TA invokes some TA with parameter. * If some parameters are memory references: * - either the memref is inside TA private RAM: TA is not allowed to expose * its private RAM: use a temporary memory buffer and copy the data. * - or the memref is not in the TA private RAM: * - if the memref was mapped to the TA, TA is allowed to expose it. * - if so, converts memref virtual address into a physical address. */ static TEE_Result tee_svc_copy_param(struct tee_ta_session *sess, struct tee_ta_session *called_sess, struct utee_params *callee_params, struct tee_ta_param *param, void *tmp_buf_va[TEE_NUM_PARAMS], struct mobj **mobj_tmp) { size_t n; TEE_Result res; size_t req_mem = 0; size_t s; uint8_t *dst = 0; bool ta_private_memref[TEE_NUM_PARAMS]; struct user_ta_ctx *utc = to_user_ta_ctx(sess->ctx); void *va; size_t dst_offs; /* fill 'param' input struct with caller params description buffer */ if (!callee_params) { memset(param, 0, sizeof(*param)); } else { res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)callee_params, sizeof(struct utee_params)); if (res != TEE_SUCCESS) return res; utee_param_to_param(param, callee_params); } if (called_sess && is_pseudo_ta_ctx(called_sess->ctx)) { /* pseudo TA borrows the mapping of the calling TA */ return TEE_SUCCESS; } /* All mobj in param are of type MOJB_TYPE_VIRT */ for (n = 0; n < TEE_NUM_PARAMS; n++) { ta_private_memref[n] = false; switch (TEE_PARAM_TYPE_GET(param->types, n)) { case TEE_PARAM_TYPE_MEMREF_INPUT: case TEE_PARAM_TYPE_MEMREF_OUTPUT: case TEE_PARAM_TYPE_MEMREF_INOUT: va = (void *)param->u[n].mem.offs; s = param->u[n].mem.size; if (!va) { if (s) return TEE_ERROR_BAD_PARAMETERS; break; } /* uTA cannot expose its private memory */ if (tee_mmu_is_vbuf_inside_ta_private(utc, va, s)) { s = ROUNDUP(s, sizeof(uint32_t)); if (ADD_OVERFLOW(req_mem, s, &req_mem)) return TEE_ERROR_BAD_PARAMETERS; ta_private_memref[n] = true; break; } res = tee_mmu_vbuf_to_mobj_offs(utc, va, s, &param->u[n].mem.mobj, &param->u[n].mem.offs); if (res != TEE_SUCCESS) return res; break; default: break; } } if (req_mem == 0) return TEE_SUCCESS; res = alloc_temp_sec_mem(req_mem, mobj_tmp, &dst); if (res != TEE_SUCCESS) return res; dst_offs = 0; for (n = 0; n < TEE_NUM_PARAMS; n++) { if (!ta_private_memref[n]) continue; s = ROUNDUP(param->u[n].mem.size, sizeof(uint32_t)); switch (TEE_PARAM_TYPE_GET(param->types, n)) { case TEE_PARAM_TYPE_MEMREF_INPUT: case TEE_PARAM_TYPE_MEMREF_INOUT: va = (void *)param->u[n].mem.offs; if (va) { res = tee_svc_copy_from_user(dst, va, param->u[n].mem.size); if (res != TEE_SUCCESS) return res; param->u[n].mem.offs = dst_offs; param->u[n].mem.mobj = *mobj_tmp; tmp_buf_va[n] = dst; dst += s; dst_offs += s; } break; case TEE_PARAM_TYPE_MEMREF_OUTPUT: va = (void *)param->u[n].mem.offs; if (va) { param->u[n].mem.offs = dst_offs; param->u[n].mem.mobj = *mobj_tmp; tmp_buf_va[n] = dst; dst += s; dst_offs += s; } break; default: continue; } } return TEE_SUCCESS; } /* * Back from execution of service: update parameters passed from TA: * If some parameters were memory references: * - either the memref was temporary: copy back data and update size * - or it was the original TA memref: update only the size value. */ static TEE_Result tee_svc_update_out_param( struct tee_ta_param *param, void *tmp_buf_va[TEE_NUM_PARAMS], struct utee_params *usr_param) { size_t n; uint64_t *vals = usr_param->vals; for (n = 0; n < TEE_NUM_PARAMS; n++) { switch (TEE_PARAM_TYPE_GET(param->types, n)) { case TEE_PARAM_TYPE_MEMREF_OUTPUT: case TEE_PARAM_TYPE_MEMREF_INOUT: /* * Memory copy is only needed if there's a temporary * buffer involved, tmp_buf_va[n] is only update if * a temporary buffer is used. Otherwise only the * size needs to be updated. */ if (tmp_buf_va[n] && param->u[n].mem.size <= vals[n * 2 + 1]) { void *src = tmp_buf_va[n]; void *dst = (void *)(uintptr_t)vals[n * 2]; TEE_Result res; res = tee_svc_copy_to_user(dst, src, param->u[n].mem.size); if (res != TEE_SUCCESS) return res; } usr_param->vals[n * 2 + 1] = param->u[n].mem.size; break; case TEE_PARAM_TYPE_VALUE_OUTPUT: case TEE_PARAM_TYPE_VALUE_INOUT: vals[n * 2] = param->u[n].val.a; vals[n * 2 + 1] = param->u[n].val.b; break; default: continue; } } return TEE_SUCCESS; } /* Called when a TA calls an OpenSession on another TA */ TEE_Result syscall_open_ta_session(const TEE_UUID *dest, unsigned long cancel_req_to, struct utee_params *usr_param, uint32_t *ta_sess, uint32_t *ret_orig) { TEE_Result res; uint32_t ret_o = TEE_ORIGIN_TEE; struct tee_ta_session *s = NULL; struct tee_ta_session *sess; struct mobj *mobj_param = NULL; TEE_UUID *uuid = malloc(sizeof(TEE_UUID)); struct tee_ta_param *param = malloc(sizeof(struct tee_ta_param)); TEE_Identity *clnt_id = malloc(sizeof(TEE_Identity)); void *tmp_buf_va[TEE_NUM_PARAMS] = { NULL }; struct user_ta_ctx *utc; if (uuid == NULL || param == NULL || clnt_id == NULL) { res = TEE_ERROR_OUT_OF_MEMORY; goto out_free_only; } memset(param, 0, sizeof(struct tee_ta_param)); res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) goto out_free_only; utc = to_user_ta_ctx(sess->ctx); res = tee_svc_copy_from_user(uuid, dest, sizeof(TEE_UUID)); if (res != TEE_SUCCESS) goto function_exit; clnt_id->login = TEE_LOGIN_TRUSTED_APP; memcpy(&clnt_id->uuid, &sess->ctx->uuid, sizeof(TEE_UUID)); res = tee_svc_copy_param(sess, NULL, usr_param, param, tmp_buf_va, &mobj_param); if (res != TEE_SUCCESS) goto function_exit; res = tee_ta_open_session(&ret_o, &s, &utc->open_sessions, uuid, clnt_id, cancel_req_to, param); tee_mmu_set_ctx(&utc->ctx); if (res != TEE_SUCCESS) goto function_exit; res = tee_svc_update_out_param(param, tmp_buf_va, usr_param); function_exit: mobj_free(mobj_param); if (res == TEE_SUCCESS) tee_svc_copy_kaddr_to_uref(ta_sess, s); tee_svc_copy_to_user(ret_orig, &ret_o, sizeof(ret_o)); out_free_only: free(param); free(uuid); free(clnt_id); return res; } TEE_Result syscall_close_ta_session(unsigned long ta_sess) { TEE_Result res; struct tee_ta_session *sess; TEE_Identity clnt_id; struct tee_ta_session *s = tee_svc_uref_to_kaddr(ta_sess); struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); clnt_id.login = TEE_LOGIN_TRUSTED_APP; memcpy(&clnt_id.uuid, &sess->ctx->uuid, sizeof(TEE_UUID)); return tee_ta_close_session(s, &utc->open_sessions, &clnt_id); } TEE_Result syscall_invoke_ta_command(unsigned long ta_sess, unsigned long cancel_req_to, unsigned long cmd_id, struct utee_params *usr_param, uint32_t *ret_orig) { TEE_Result res; TEE_Result res2; uint32_t ret_o = TEE_ORIGIN_TEE; struct tee_ta_param param = { 0 }; TEE_Identity clnt_id; struct tee_ta_session *sess; struct tee_ta_session *called_sess; struct mobj *mobj_param = NULL; void *tmp_buf_va[TEE_NUM_PARAMS] = { NULL }; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); called_sess = tee_ta_get_session( (vaddr_t)tee_svc_uref_to_kaddr(ta_sess), true, &utc->open_sessions); if (!called_sess) return TEE_ERROR_BAD_PARAMETERS; clnt_id.login = TEE_LOGIN_TRUSTED_APP; memcpy(&clnt_id.uuid, &sess->ctx->uuid, sizeof(TEE_UUID)); res = tee_svc_copy_param(sess, called_sess, usr_param, &param, tmp_buf_va, &mobj_param); if (res != TEE_SUCCESS) goto function_exit; res = tee_ta_invoke_command(&ret_o, called_sess, &clnt_id, cancel_req_to, cmd_id, &param); res2 = tee_svc_update_out_param(&param, tmp_buf_va, usr_param); if (res2 != TEE_SUCCESS) { /* * Spec for TEE_InvokeTACommand() says: * "If the return origin is different from * TEE_ORIGIN_TRUSTED_APP, then the function has failed * before it could reach the destination Trusted * Application." * * But if we can't update params to the caller we have no * choice we need to return some error to indicate that * parameters aren't updated as expected. */ ret_o = TEE_ORIGIN_TEE; res = res2; } function_exit: tee_ta_put_session(called_sess); mobj_free(mobj_param); if (ret_orig) tee_svc_copy_to_user(ret_orig, &ret_o, sizeof(ret_o)); return res; } TEE_Result syscall_check_access_rights(unsigned long flags, const void *buf, size_t len) { TEE_Result res; struct tee_ta_session *s; res = tee_ta_get_current_session(&s); if (res != TEE_SUCCESS) return res; return tee_mmu_check_access_rights(to_user_ta_ctx(s->ctx), flags, (uaddr_t)buf, len); } TEE_Result tee_svc_copy_from_user(void *kaddr, const void *uaddr, size_t len) { TEE_Result res; struct tee_ta_session *s; res = tee_ta_get_current_session(&s); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(s->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)uaddr, len); if (res != TEE_SUCCESS) return res; memcpy(kaddr, uaddr, len); return TEE_SUCCESS; } TEE_Result tee_svc_copy_to_user(void *uaddr, const void *kaddr, size_t len) { TEE_Result res; struct tee_ta_session *s; res = tee_ta_get_current_session(&s); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(s->ctx), TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)uaddr, len); if (res != TEE_SUCCESS) return res; memcpy(uaddr, kaddr, len); return TEE_SUCCESS; } TEE_Result tee_svc_copy_kaddr_to_uref(uint32_t *uref, void *kaddr) { uint32_t ref = tee_svc_kaddr_to_uref(kaddr); return tee_svc_copy_to_user(uref, &ref, sizeof(ref)); } TEE_Result syscall_get_cancellation_flag(uint32_t *cancel) { TEE_Result res; struct tee_ta_session *s = NULL; uint32_t c; res = tee_ta_get_current_session(&s); if (res != TEE_SUCCESS) return res; c = tee_ta_session_is_cancelled(s, NULL); return tee_svc_copy_to_user(cancel, &c, sizeof(c)); } TEE_Result syscall_unmask_cancellation(uint32_t *old_mask) { TEE_Result res; struct tee_ta_session *s = NULL; uint32_t m; res = tee_ta_get_current_session(&s); if (res != TEE_SUCCESS) return res; m = s->cancel_mask; s->cancel_mask = false; return tee_svc_copy_to_user(old_mask, &m, sizeof(m)); } TEE_Result syscall_mask_cancellation(uint32_t *old_mask) { TEE_Result res; struct tee_ta_session *s = NULL; uint32_t m; res = tee_ta_get_current_session(&s); if (res != TEE_SUCCESS) return res; m = s->cancel_mask; s->cancel_mask = true; return tee_svc_copy_to_user(old_mask, &m, sizeof(m)); } TEE_Result syscall_wait(unsigned long timeout) { TEE_Result res = TEE_SUCCESS; uint32_t mytime = 0; struct tee_ta_session *s; TEE_Time base_time; TEE_Time current_time; res = tee_ta_get_current_session(&s); if (res != TEE_SUCCESS) return res; res = tee_time_get_sys_time(&base_time); if (res != TEE_SUCCESS) return res; while (true) { res = tee_time_get_sys_time(&current_time); if (res != TEE_SUCCESS) return res; if (tee_ta_session_is_cancelled(s, &current_time)) return TEE_ERROR_CANCEL; mytime = (current_time.seconds - base_time.seconds) * 1000 + (int)current_time.millis - (int)base_time.millis; if (mytime >= timeout) return TEE_SUCCESS; tee_time_wait(timeout - mytime); } return res; } TEE_Result syscall_get_time(unsigned long cat, TEE_Time *mytime) { TEE_Result res, res2; struct tee_ta_session *s = NULL; TEE_Time t; res = tee_ta_get_current_session(&s); if (res != TEE_SUCCESS) return res; switch (cat) { case UTEE_TIME_CAT_SYSTEM: res = tee_time_get_sys_time(&t); break; case UTEE_TIME_CAT_TA_PERSISTENT: res = tee_time_get_ta_time((const void *)&s->ctx->uuid, &t); break; case UTEE_TIME_CAT_REE: res = tee_time_get_ree_time(&t); break; default: res = TEE_ERROR_BAD_PARAMETERS; break; } if (res == TEE_SUCCESS || res == TEE_ERROR_OVERFLOW) { res2 = tee_svc_copy_to_user(mytime, &t, sizeof(t)); if (res2 != TEE_SUCCESS) res = res2; } return res; } TEE_Result syscall_set_ta_time(const TEE_Time *mytime) { TEE_Result res; struct tee_ta_session *s = NULL; TEE_Time t; res = tee_ta_get_current_session(&s); if (res != TEE_SUCCESS) return res; res = tee_svc_copy_from_user(&t, mytime, sizeof(t)); if (res != TEE_SUCCESS) return res; return tee_time_set_ta_time((const void *)&s->ctx->uuid, &t); }
// SPDX-License-Identifier: BSD-2-Clause /* * Copyright (c) 2014, STMicroelectronics International N.V. */ #include <util.h> #include <kernel/tee_common_otp.h> #include <kernel/tee_common.h> #include <tee_api_types.h> #include <kernel/tee_ta_manager.h> #include <utee_types.h> #include <tee/tee_svc.h> #include <tee/tee_cryp_utl.h> #include <mm/tee_mmu.h> #include <mm/tee_mm.h> #include <mm/core_memprot.h> #include <kernel/tee_time.h> #include <user_ta_header.h> #include <trace.h> #include <kernel/trace_ta.h> #include <kernel/chip_services.h> #include <kernel/pseudo_ta.h> #include <mm/mobj.h> vaddr_t tee_svc_uref_base; void syscall_log(const void *buf __maybe_unused, size_t len __maybe_unused) { #ifdef CFG_TEE_CORE_TA_TRACE char *kbuf; if (len == 0) return; kbuf = malloc(len + 1); if (kbuf == NULL) return; if (tee_svc_copy_from_user(kbuf, buf, len) == TEE_SUCCESS) { kbuf[len] = '\0'; trace_ext_puts(kbuf); } free(kbuf); #endif } TEE_Result syscall_not_supported(void) { return TEE_ERROR_NOT_SUPPORTED; } /* Configuration properties */ /* API implementation version */ static const char api_vers[] = TO_STR(CFG_TEE_API_VERSION); /* Implementation description (implementation-dependent) */ static const char descr[] = TO_STR(CFG_TEE_IMPL_DESCR); /* * TA persistent time protection level * 100: Persistent time based on an REE-controlled real-time clock * and on the TEE Trusted Storage for the storage of origins (default). * 1000: Persistent time based on a TEE-controlled real-time clock * and the TEE Trusted Storage. * The real-time clock MUST be out of reach of software attacks * from the REE. */ static const uint32_t ta_time_prot_lvl = 100; /* Elliptic Curve Cryptographic support */ #ifdef CFG_CRYPTO_ECC static const bool crypto_ecc_en = 1; #else static const bool crypto_ecc_en; #endif /* * Trusted storage anti rollback protection level * 0 (or missing): No antirollback protection (default) * 100: Antirollback enforced at REE level * 1000: Antirollback TEE-controlled hardware */ #ifdef CFG_RPMB_FS static const uint32_t ts_antiroll_prot_lvl = 1000; #else static const uint32_t ts_antiroll_prot_lvl; #endif /* Trusted OS implementation version */ static const char trustedos_impl_version[] = TO_STR(TEE_IMPL_VERSION); /* Trusted OS implementation version (binary value) */ static const uint32_t trustedos_impl_bin_version; /* 0 by default */ /* Trusted OS implementation manufacturer name */ static const char trustedos_manufacturer[] = TO_STR(CFG_TEE_MANUFACTURER); /* Trusted firmware version */ static const char fw_impl_version[] = TO_STR(CFG_TEE_FW_IMPL_VERSION); /* Trusted firmware version (binary value) */ static const uint32_t fw_impl_bin_version; /* 0 by default */ /* Trusted firmware manufacturer name */ static const char fw_manufacturer[] = TO_STR(CFG_TEE_FW_MANUFACTURER); static TEE_Result get_prop_tee_dev_id(struct tee_ta_session *sess __unused, void *buf, size_t *blen) { TEE_Result res; TEE_UUID uuid; const size_t nslen = 5; uint8_t data[5 + FVR_DIE_ID_NUM_REGS * sizeof(uint32_t)] = { 'O', 'P', 'T', 'E', 'E' }; if (*blen < sizeof(uuid)) { *blen = sizeof(uuid); return TEE_ERROR_SHORT_BUFFER; } *blen = sizeof(uuid); if (tee_otp_get_die_id(data + nslen, sizeof(data) - nslen)) return TEE_ERROR_BAD_STATE; res = tee_hash_createdigest(TEE_ALG_SHA256, data, sizeof(data), (uint8_t *)&uuid, sizeof(uuid)); if (res != TEE_SUCCESS) return TEE_ERROR_BAD_STATE; /* * Changes the random value into and UUID as specifiec * in RFC 4122. The magic values are from the example * code in the RFC. * * TEE_UUID is defined slightly different from the RFC, * but close enough for our purpose. */ uuid.timeHiAndVersion &= 0x0fff; uuid.timeHiAndVersion |= 5 << 12; /* uuid.clock_seq_hi_and_reserved in the RFC */ uuid.clockSeqAndNode[0] &= 0x3f; uuid.clockSeqAndNode[0] |= 0x80; return tee_svc_copy_to_user(buf, &uuid, sizeof(TEE_UUID)); } static TEE_Result get_prop_tee_sys_time_prot_level( struct tee_ta_session *sess __unused, void *buf, size_t *blen) { uint32_t prot; if (*blen < sizeof(prot)) { *blen = sizeof(prot); return TEE_ERROR_SHORT_BUFFER; } *blen = sizeof(prot); prot = tee_time_get_sys_time_protection_level(); return tee_svc_copy_to_user(buf, &prot, sizeof(prot)); } static TEE_Result get_prop_client_id(struct tee_ta_session *sess __unused, void *buf, size_t *blen) { if (*blen < sizeof(TEE_Identity)) { *blen = sizeof(TEE_Identity); return TEE_ERROR_SHORT_BUFFER; } *blen = sizeof(TEE_Identity); return tee_svc_copy_to_user(buf, &sess->clnt_id, sizeof(TEE_Identity)); } static TEE_Result get_prop_ta_app_id(struct tee_ta_session *sess, void *buf, size_t *blen) { if (*blen < sizeof(TEE_UUID)) { *blen = sizeof(TEE_UUID); return TEE_ERROR_SHORT_BUFFER; } *blen = sizeof(TEE_UUID); return tee_svc_copy_to_user(buf, &sess->ctx->uuid, sizeof(TEE_UUID)); } /* Properties of the set TEE_PROPSET_CURRENT_CLIENT */ const struct tee_props tee_propset_client[] = { { .name = "gpd.client.identity", .prop_type = USER_TA_PROP_TYPE_IDENTITY, .get_prop_func = get_prop_client_id }, }; /* Properties of the set TEE_PROPSET_CURRENT_TA */ const struct tee_props tee_propset_ta[] = { { .name = "gpd.ta.appID", .prop_type = USER_TA_PROP_TYPE_UUID, .get_prop_func = get_prop_ta_app_id }, /* * Following properties are processed directly in libutee: * TA_PROP_STR_SINGLE_INSTANCE * TA_PROP_STR_MULTI_SESSION * TA_PROP_STR_KEEP_ALIVE * TA_PROP_STR_DATA_SIZE * TA_PROP_STR_STACK_SIZE * TA_PROP_STR_VERSION * TA_PROP_STR_DESCRIPTION * USER_TA_PROP_TYPE_STRING, * TA_DESCRIPTION */ }; /* Properties of the set TEE_PROPSET_TEE_IMPLEMENTATION */ const struct tee_props tee_propset_tee[] = { { .name = "gpd.tee.apiversion", .prop_type = USER_TA_PROP_TYPE_STRING, .data = api_vers, .len = sizeof(api_vers), }, { .name = "gpd.tee.description", .prop_type = USER_TA_PROP_TYPE_STRING, .data = descr, .len = sizeof(descr) }, { .name = "gpd.tee.deviceID", .prop_type = USER_TA_PROP_TYPE_UUID, .get_prop_func = get_prop_tee_dev_id }, { .name = "gpd.tee.systemTime.protectionLevel", .prop_type = USER_TA_PROP_TYPE_U32, .get_prop_func = get_prop_tee_sys_time_prot_level }, { .name = "gpd.tee.TAPersistentTime.protectionLevel", .prop_type = USER_TA_PROP_TYPE_U32, .data = &ta_time_prot_lvl, .len = sizeof(ta_time_prot_lvl) }, { .name = "gpd.tee.cryptography.ecc", .prop_type = USER_TA_PROP_TYPE_BOOL, .data = &crypto_ecc_en, .len = sizeof(crypto_ecc_en) }, { .name = "gpd.tee.trustedStorage.antiRollback.protectionLevel", .prop_type = USER_TA_PROP_TYPE_U32, .data = &ts_antiroll_prot_lvl, .len = sizeof(ts_antiroll_prot_lvl) }, { .name = "gpd.tee.trustedos.implementation.version", .prop_type = USER_TA_PROP_TYPE_STRING, .data = trustedos_impl_version, .len = sizeof(trustedos_impl_version) }, { .name = "gpd.tee.trustedos.implementation.binaryversion", .prop_type = USER_TA_PROP_TYPE_U32, .data = &trustedos_impl_bin_version, .len = sizeof(trustedos_impl_bin_version) }, { .name = "gpd.tee.trustedos.manufacturer", .prop_type = USER_TA_PROP_TYPE_STRING, .data = trustedos_manufacturer, .len = sizeof(trustedos_manufacturer) }, { .name = "gpd.tee.firmware.implementation.version", .prop_type = USER_TA_PROP_TYPE_STRING, .data = fw_impl_version, .len = sizeof(fw_impl_version) }, { .name = "gpd.tee.firmware.implementation.binaryversion", .prop_type = USER_TA_PROP_TYPE_U32, .data = &fw_impl_bin_version, .len = sizeof(fw_impl_bin_version) }, { .name = "gpd.tee.firmware.manufacturer", .prop_type = USER_TA_PROP_TYPE_STRING, .data = fw_manufacturer, .len = sizeof(fw_manufacturer) }, /* * Following properties are processed directly in libutee: * gpd.tee.arith.maxBigIntSize */ }; __weak const struct tee_vendor_props vendor_props_client; __weak const struct tee_vendor_props vendor_props_ta; __weak const struct tee_vendor_props vendor_props_tee; static void get_prop_set(unsigned long prop_set, const struct tee_props **props, size_t *size, const struct tee_props **vendor_props, size_t *vendor_size) { if ((TEE_PropSetHandle)prop_set == TEE_PROPSET_CURRENT_CLIENT) { *props = tee_propset_client; *size = ARRAY_SIZE(tee_propset_client); *vendor_props = vendor_props_client.props; *vendor_size = vendor_props_client.len; } else if ((TEE_PropSetHandle)prop_set == TEE_PROPSET_CURRENT_TA) { *props = tee_propset_ta; *size = ARRAY_SIZE(tee_propset_ta); *vendor_props = vendor_props_ta.props; *vendor_size = vendor_props_ta.len; } else if ((TEE_PropSetHandle)prop_set == TEE_PROPSET_TEE_IMPLEMENTATION) { *props = tee_propset_tee; *size = ARRAY_SIZE(tee_propset_tee); *vendor_props = vendor_props_tee.props; *vendor_size = vendor_props_tee.len; } else { *props = NULL; *size = 0; *vendor_props = NULL; *vendor_size = 0; } } static const struct tee_props *get_prop_struct(unsigned long prop_set, unsigned long index) { const struct tee_props *props; const struct tee_props *vendor_props; size_t size; size_t vendor_size; get_prop_set(prop_set, &props, &size, &vendor_props, &vendor_size); if (index < size) return &(props[index]); index -= size; if (index < vendor_size) return &(vendor_props[index]); return NULL; } /* * prop_set is part of TEE_PROPSET_xxx * index is the index in the Property Set to retrieve * if name is not NULL, the name of "index" property is returned * if buf is not NULL, the property is returned */ TEE_Result syscall_get_property(unsigned long prop_set, unsigned long index, void *name, uint32_t *name_len, void *buf, uint32_t *blen, uint32_t *prop_type) { struct tee_ta_session *sess; TEE_Result res; TEE_Result res2; const struct tee_props *prop; uint32_t klen; size_t klen_size; uint32_t elen; prop = get_prop_struct(prop_set, index); if (!prop) return TEE_ERROR_ITEM_NOT_FOUND; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; /* Get the property type */ if (prop_type) { res = tee_svc_copy_to_user(prop_type, &prop->prop_type, sizeof(*prop_type)); if (res != TEE_SUCCESS) return res; } /* Get the property */ if (buf && blen) { res = tee_svc_copy_from_user(&klen, blen, sizeof(klen)); if (res != TEE_SUCCESS) return res; if (prop->get_prop_func) { klen_size = klen; res = prop->get_prop_func(sess, buf, &klen_size); klen = klen_size; res2 = tee_svc_copy_to_user(blen, &klen, sizeof(*blen)); } else { if (klen < prop->len) res = TEE_ERROR_SHORT_BUFFER; else res = tee_svc_copy_to_user(buf, prop->data, prop->len); res2 = tee_svc_copy_to_user(blen, &prop->len, sizeof(*blen)); } if (res2 != TEE_SUCCESS) return res2; if (res != TEE_SUCCESS) return res; } /* Get the property name */ if (name && name_len) { res = tee_svc_copy_from_user(&klen, name_len, sizeof(klen)); if (res != TEE_SUCCESS) return res; elen = strlen(prop->name) + 1; if (klen < elen) res = TEE_ERROR_SHORT_BUFFER; else res = tee_svc_copy_to_user(name, prop->name, elen); res2 = tee_svc_copy_to_user(name_len, &elen, sizeof(*name_len)); if (res2 != TEE_SUCCESS) return res2; if (res != TEE_SUCCESS) return res; } return res; } /* * prop_set is part of TEE_PROPSET_xxx */ TEE_Result syscall_get_property_name_to_index(unsigned long prop_set, void *name, unsigned long name_len, uint32_t *index) { TEE_Result res; struct tee_ta_session *sess; const struct tee_props *props; size_t size; const struct tee_props *vendor_props; size_t vendor_size; char *kname = 0; uint32_t i; get_prop_set(prop_set, &props, &size, &vendor_props, &vendor_size); if (!props) return TEE_ERROR_ITEM_NOT_FOUND; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) goto out; if (!name || !name_len) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } kname = malloc(name_len); if (!kname) return TEE_ERROR_OUT_OF_MEMORY; res = tee_svc_copy_from_user(kname, name, name_len); if (res != TEE_SUCCESS) goto out; kname[name_len - 1] = 0; res = TEE_ERROR_ITEM_NOT_FOUND; for (i = 0; i < size; i++) { if (!strcmp(kname, props[i].name)) { res = tee_svc_copy_to_user(index, &i, sizeof(*index)); goto out; } } for (i = size; i < size + vendor_size; i++) { if (!strcmp(kname, vendor_props[i - size].name)) { res = tee_svc_copy_to_user(index, &i, sizeof(*index)); goto out; } } out: free(kname); return res; } static TEE_Result utee_param_to_param(struct user_ta_ctx *utc, struct tee_ta_param *p, struct utee_params *up) { size_t n; uint32_t types = up->types; p->types = types; for (n = 0; n < TEE_NUM_PARAMS; n++) { uintptr_t a = up->vals[n * 2]; size_t b = up->vals[n * 2 + 1]; uint32_t flags = TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER; switch (TEE_PARAM_TYPE_GET(types, n)) { case TEE_PARAM_TYPE_MEMREF_OUTPUT: case TEE_PARAM_TYPE_MEMREF_INOUT: flags |= TEE_MEMORY_ACCESS_WRITE; /*FALLTHROUGH*/ case TEE_PARAM_TYPE_MEMREF_INPUT: p->u[n].mem.mobj = &mobj_virt; p->u[n].mem.offs = a; p->u[n].mem.size = b; if (tee_mmu_check_access_rights(utc, flags, a, b)) return TEE_ERROR_ACCESS_DENIED; break; case TEE_PARAM_TYPE_VALUE_INPUT: case TEE_PARAM_TYPE_VALUE_INOUT: p->u[n].val.a = a; p->u[n].val.b = b; break; default: memset(&p->u[n], 0, sizeof(p->u[n])); break; } } return TEE_SUCCESS; } static TEE_Result alloc_temp_sec_mem(size_t size, struct mobj **mobj, uint8_t **va) { /* Allocate section in secure DDR */ #ifdef CFG_PAGED_USER_TA *mobj = mobj_seccpy_shm_alloc(size); #else *mobj = mobj_mm_alloc(mobj_sec_ddr, size, &tee_mm_sec_ddr); #endif if (!*mobj) return TEE_ERROR_GENERIC; *va = mobj_get_va(*mobj, 0); return TEE_SUCCESS; } /* * TA invokes some TA with parameter. * If some parameters are memory references: * - either the memref is inside TA private RAM: TA is not allowed to expose * its private RAM: use a temporary memory buffer and copy the data. * - or the memref is not in the TA private RAM: * - if the memref was mapped to the TA, TA is allowed to expose it. * - if so, converts memref virtual address into a physical address. */ static TEE_Result tee_svc_copy_param(struct tee_ta_session *sess, struct tee_ta_session *called_sess, struct utee_params *callee_params, struct tee_ta_param *param, void *tmp_buf_va[TEE_NUM_PARAMS], struct mobj **mobj_tmp) { size_t n; TEE_Result res; size_t req_mem = 0; size_t s; uint8_t *dst = 0; bool ta_private_memref[TEE_NUM_PARAMS]; struct user_ta_ctx *utc = to_user_ta_ctx(sess->ctx); void *va; size_t dst_offs; /* fill 'param' input struct with caller params description buffer */ if (!callee_params) { memset(param, 0, sizeof(*param)); } else { res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)callee_params, sizeof(struct utee_params)); if (res != TEE_SUCCESS) return res; res = utee_param_to_param(utc, param, callee_params); if (res != TEE_SUCCESS) return res; } if (called_sess && is_pseudo_ta_ctx(called_sess->ctx)) { /* pseudo TA borrows the mapping of the calling TA */ return TEE_SUCCESS; } /* All mobj in param are of type MOJB_TYPE_VIRT */ for (n = 0; n < TEE_NUM_PARAMS; n++) { ta_private_memref[n] = false; switch (TEE_PARAM_TYPE_GET(param->types, n)) { case TEE_PARAM_TYPE_MEMREF_INPUT: case TEE_PARAM_TYPE_MEMREF_OUTPUT: case TEE_PARAM_TYPE_MEMREF_INOUT: va = (void *)param->u[n].mem.offs; s = param->u[n].mem.size; if (!va) { if (s) return TEE_ERROR_BAD_PARAMETERS; break; } /* uTA cannot expose its private memory */ if (tee_mmu_is_vbuf_inside_ta_private(utc, va, s)) { s = ROUNDUP(s, sizeof(uint32_t)); if (ADD_OVERFLOW(req_mem, s, &req_mem)) return TEE_ERROR_BAD_PARAMETERS; ta_private_memref[n] = true; break; } res = tee_mmu_vbuf_to_mobj_offs(utc, va, s, &param->u[n].mem.mobj, &param->u[n].mem.offs); if (res != TEE_SUCCESS) return res; break; default: break; } } if (req_mem == 0) return TEE_SUCCESS; res = alloc_temp_sec_mem(req_mem, mobj_tmp, &dst); if (res != TEE_SUCCESS) return res; dst_offs = 0; for (n = 0; n < TEE_NUM_PARAMS; n++) { if (!ta_private_memref[n]) continue; s = ROUNDUP(param->u[n].mem.size, sizeof(uint32_t)); switch (TEE_PARAM_TYPE_GET(param->types, n)) { case TEE_PARAM_TYPE_MEMREF_INPUT: case TEE_PARAM_TYPE_MEMREF_INOUT: va = (void *)param->u[n].mem.offs; if (va) { res = tee_svc_copy_from_user(dst, va, param->u[n].mem.size); if (res != TEE_SUCCESS) return res; param->u[n].mem.offs = dst_offs; param->u[n].mem.mobj = *mobj_tmp; tmp_buf_va[n] = dst; dst += s; dst_offs += s; } break; case TEE_PARAM_TYPE_MEMREF_OUTPUT: va = (void *)param->u[n].mem.offs; if (va) { param->u[n].mem.offs = dst_offs; param->u[n].mem.mobj = *mobj_tmp; tmp_buf_va[n] = dst; dst += s; dst_offs += s; } break; default: continue; } } return TEE_SUCCESS; } /* * Back from execution of service: update parameters passed from TA: * If some parameters were memory references: * - either the memref was temporary: copy back data and update size * - or it was the original TA memref: update only the size value. */ static TEE_Result tee_svc_update_out_param( struct tee_ta_param *param, void *tmp_buf_va[TEE_NUM_PARAMS], struct utee_params *usr_param) { size_t n; uint64_t *vals = usr_param->vals; for (n = 0; n < TEE_NUM_PARAMS; n++) { switch (TEE_PARAM_TYPE_GET(param->types, n)) { case TEE_PARAM_TYPE_MEMREF_OUTPUT: case TEE_PARAM_TYPE_MEMREF_INOUT: /* * Memory copy is only needed if there's a temporary * buffer involved, tmp_buf_va[n] is only update if * a temporary buffer is used. Otherwise only the * size needs to be updated. */ if (tmp_buf_va[n] && param->u[n].mem.size <= vals[n * 2 + 1]) { void *src = tmp_buf_va[n]; void *dst = (void *)(uintptr_t)vals[n * 2]; TEE_Result res; res = tee_svc_copy_to_user(dst, src, param->u[n].mem.size); if (res != TEE_SUCCESS) return res; } usr_param->vals[n * 2 + 1] = param->u[n].mem.size; break; case TEE_PARAM_TYPE_VALUE_OUTPUT: case TEE_PARAM_TYPE_VALUE_INOUT: vals[n * 2] = param->u[n].val.a; vals[n * 2 + 1] = param->u[n].val.b; break; default: continue; } } return TEE_SUCCESS; } /* Called when a TA calls an OpenSession on another TA */ TEE_Result syscall_open_ta_session(const TEE_UUID *dest, unsigned long cancel_req_to, struct utee_params *usr_param, uint32_t *ta_sess, uint32_t *ret_orig) { TEE_Result res; uint32_t ret_o = TEE_ORIGIN_TEE; struct tee_ta_session *s = NULL; struct tee_ta_session *sess; struct mobj *mobj_param = NULL; TEE_UUID *uuid = malloc(sizeof(TEE_UUID)); struct tee_ta_param *param = malloc(sizeof(struct tee_ta_param)); TEE_Identity *clnt_id = malloc(sizeof(TEE_Identity)); void *tmp_buf_va[TEE_NUM_PARAMS] = { NULL }; struct user_ta_ctx *utc; if (uuid == NULL || param == NULL || clnt_id == NULL) { res = TEE_ERROR_OUT_OF_MEMORY; goto out_free_only; } memset(param, 0, sizeof(struct tee_ta_param)); res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) goto out_free_only; utc = to_user_ta_ctx(sess->ctx); res = tee_svc_copy_from_user(uuid, dest, sizeof(TEE_UUID)); if (res != TEE_SUCCESS) goto function_exit; clnt_id->login = TEE_LOGIN_TRUSTED_APP; memcpy(&clnt_id->uuid, &sess->ctx->uuid, sizeof(TEE_UUID)); res = tee_svc_copy_param(sess, NULL, usr_param, param, tmp_buf_va, &mobj_param); if (res != TEE_SUCCESS) goto function_exit; res = tee_ta_open_session(&ret_o, &s, &utc->open_sessions, uuid, clnt_id, cancel_req_to, param); tee_mmu_set_ctx(&utc->ctx); if (res != TEE_SUCCESS) goto function_exit; res = tee_svc_update_out_param(param, tmp_buf_va, usr_param); function_exit: mobj_free(mobj_param); if (res == TEE_SUCCESS) tee_svc_copy_kaddr_to_uref(ta_sess, s); tee_svc_copy_to_user(ret_orig, &ret_o, sizeof(ret_o)); out_free_only: free(param); free(uuid); free(clnt_id); return res; } TEE_Result syscall_close_ta_session(unsigned long ta_sess) { TEE_Result res; struct tee_ta_session *sess; TEE_Identity clnt_id; struct tee_ta_session *s = tee_svc_uref_to_kaddr(ta_sess); struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); clnt_id.login = TEE_LOGIN_TRUSTED_APP; memcpy(&clnt_id.uuid, &sess->ctx->uuid, sizeof(TEE_UUID)); return tee_ta_close_session(s, &utc->open_sessions, &clnt_id); } TEE_Result syscall_invoke_ta_command(unsigned long ta_sess, unsigned long cancel_req_to, unsigned long cmd_id, struct utee_params *usr_param, uint32_t *ret_orig) { TEE_Result res; TEE_Result res2; uint32_t ret_o = TEE_ORIGIN_TEE; struct tee_ta_param param = { 0 }; TEE_Identity clnt_id; struct tee_ta_session *sess; struct tee_ta_session *called_sess; struct mobj *mobj_param = NULL; void *tmp_buf_va[TEE_NUM_PARAMS] = { NULL }; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); called_sess = tee_ta_get_session( (vaddr_t)tee_svc_uref_to_kaddr(ta_sess), true, &utc->open_sessions); if (!called_sess) return TEE_ERROR_BAD_PARAMETERS; clnt_id.login = TEE_LOGIN_TRUSTED_APP; memcpy(&clnt_id.uuid, &sess->ctx->uuid, sizeof(TEE_UUID)); res = tee_svc_copy_param(sess, called_sess, usr_param, &param, tmp_buf_va, &mobj_param); if (res != TEE_SUCCESS) goto function_exit; res = tee_ta_invoke_command(&ret_o, called_sess, &clnt_id, cancel_req_to, cmd_id, &param); res2 = tee_svc_update_out_param(&param, tmp_buf_va, usr_param); if (res2 != TEE_SUCCESS) { /* * Spec for TEE_InvokeTACommand() says: * "If the return origin is different from * TEE_ORIGIN_TRUSTED_APP, then the function has failed * before it could reach the destination Trusted * Application." * * But if we can't update params to the caller we have no * choice we need to return some error to indicate that * parameters aren't updated as expected. */ ret_o = TEE_ORIGIN_TEE; res = res2; } function_exit: tee_ta_put_session(called_sess); mobj_free(mobj_param); if (ret_orig) tee_svc_copy_to_user(ret_orig, &ret_o, sizeof(ret_o)); return res; } TEE_Result syscall_check_access_rights(unsigned long flags, const void *buf, size_t len) { TEE_Result res; struct tee_ta_session *s; res = tee_ta_get_current_session(&s); if (res != TEE_SUCCESS) return res; return tee_mmu_check_access_rights(to_user_ta_ctx(s->ctx), flags, (uaddr_t)buf, len); } TEE_Result tee_svc_copy_from_user(void *kaddr, const void *uaddr, size_t len) { TEE_Result res; struct tee_ta_session *s; res = tee_ta_get_current_session(&s); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(s->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)uaddr, len); if (res != TEE_SUCCESS) return res; memcpy(kaddr, uaddr, len); return TEE_SUCCESS; } TEE_Result tee_svc_copy_to_user(void *uaddr, const void *kaddr, size_t len) { TEE_Result res; struct tee_ta_session *s; res = tee_ta_get_current_session(&s); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(s->ctx), TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)uaddr, len); if (res != TEE_SUCCESS) return res; memcpy(uaddr, kaddr, len); return TEE_SUCCESS; } TEE_Result tee_svc_copy_kaddr_to_uref(uint32_t *uref, void *kaddr) { uint32_t ref = tee_svc_kaddr_to_uref(kaddr); return tee_svc_copy_to_user(uref, &ref, sizeof(ref)); } TEE_Result syscall_get_cancellation_flag(uint32_t *cancel) { TEE_Result res; struct tee_ta_session *s = NULL; uint32_t c; res = tee_ta_get_current_session(&s); if (res != TEE_SUCCESS) return res; c = tee_ta_session_is_cancelled(s, NULL); return tee_svc_copy_to_user(cancel, &c, sizeof(c)); } TEE_Result syscall_unmask_cancellation(uint32_t *old_mask) { TEE_Result res; struct tee_ta_session *s = NULL; uint32_t m; res = tee_ta_get_current_session(&s); if (res != TEE_SUCCESS) return res; m = s->cancel_mask; s->cancel_mask = false; return tee_svc_copy_to_user(old_mask, &m, sizeof(m)); } TEE_Result syscall_mask_cancellation(uint32_t *old_mask) { TEE_Result res; struct tee_ta_session *s = NULL; uint32_t m; res = tee_ta_get_current_session(&s); if (res != TEE_SUCCESS) return res; m = s->cancel_mask; s->cancel_mask = true; return tee_svc_copy_to_user(old_mask, &m, sizeof(m)); } TEE_Result syscall_wait(unsigned long timeout) { TEE_Result res = TEE_SUCCESS; uint32_t mytime = 0; struct tee_ta_session *s; TEE_Time base_time; TEE_Time current_time; res = tee_ta_get_current_session(&s); if (res != TEE_SUCCESS) return res; res = tee_time_get_sys_time(&base_time); if (res != TEE_SUCCESS) return res; while (true) { res = tee_time_get_sys_time(&current_time); if (res != TEE_SUCCESS) return res; if (tee_ta_session_is_cancelled(s, &current_time)) return TEE_ERROR_CANCEL; mytime = (current_time.seconds - base_time.seconds) * 1000 + (int)current_time.millis - (int)base_time.millis; if (mytime >= timeout) return TEE_SUCCESS; tee_time_wait(timeout - mytime); } return res; } TEE_Result syscall_get_time(unsigned long cat, TEE_Time *mytime) { TEE_Result res, res2; struct tee_ta_session *s = NULL; TEE_Time t; res = tee_ta_get_current_session(&s); if (res != TEE_SUCCESS) return res; switch (cat) { case UTEE_TIME_CAT_SYSTEM: res = tee_time_get_sys_time(&t); break; case UTEE_TIME_CAT_TA_PERSISTENT: res = tee_time_get_ta_time((const void *)&s->ctx->uuid, &t); break; case UTEE_TIME_CAT_REE: res = tee_time_get_ree_time(&t); break; default: res = TEE_ERROR_BAD_PARAMETERS; break; } if (res == TEE_SUCCESS || res == TEE_ERROR_OVERFLOW) { res2 = tee_svc_copy_to_user(mytime, &t, sizeof(t)); if (res2 != TEE_SUCCESS) res = res2; } return res; } TEE_Result syscall_set_ta_time(const TEE_Time *mytime) { TEE_Result res; struct tee_ta_session *s = NULL; TEE_Time t; res = tee_ta_get_current_session(&s); if (res != TEE_SUCCESS) return res; res = tee_svc_copy_from_user(&t, mytime, sizeof(t)); if (res != TEE_SUCCESS) return res; return tee_time_set_ta_time((const void *)&s->ctx->uuid, &t); }
static void utee_param_to_param(struct tee_ta_param *p, struct utee_params *up) { size_t n; uint32_t types = up->types; p->types = types; for (n = 0; n < TEE_NUM_PARAMS; n++) { uintptr_t a = up->vals[n * 2]; size_t b = up->vals[n * 2 + 1]; switch (TEE_PARAM_TYPE_GET(types, n)) { case TEE_PARAM_TYPE_MEMREF_INPUT: case TEE_PARAM_TYPE_MEMREF_OUTPUT: case TEE_PARAM_TYPE_MEMREF_INOUT: p->u[n].mem.mobj = &mobj_virt; p->u[n].mem.offs = a; p->u[n].mem.size = b; break; case TEE_PARAM_TYPE_VALUE_INPUT: case TEE_PARAM_TYPE_VALUE_INOUT: p->u[n].val.a = a; p->u[n].val.b = b; break; default: memset(&p->u[n], 0, sizeof(p->u[n])); break; } } }
static TEE_Result utee_param_to_param(struct user_ta_ctx *utc, struct tee_ta_param *p, struct utee_params *up) { size_t n; uint32_t types = up->types; p->types = types; for (n = 0; n < TEE_NUM_PARAMS; n++) { uintptr_t a = up->vals[n * 2]; size_t b = up->vals[n * 2 + 1]; uint32_t flags = TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER; switch (TEE_PARAM_TYPE_GET(types, n)) { case TEE_PARAM_TYPE_MEMREF_OUTPUT: case TEE_PARAM_TYPE_MEMREF_INOUT: flags |= TEE_MEMORY_ACCESS_WRITE; /*FALLTHROUGH*/ case TEE_PARAM_TYPE_MEMREF_INPUT: p->u[n].mem.mobj = &mobj_virt; p->u[n].mem.offs = a; p->u[n].mem.size = b; if (tee_mmu_check_access_rights(utc, flags, a, b)) return TEE_ERROR_ACCESS_DENIED; break; case TEE_PARAM_TYPE_VALUE_INPUT: case TEE_PARAM_TYPE_VALUE_INOUT: p->u[n].val.a = a; p->u[n].val.b = b; break; default: memset(&p->u[n], 0, sizeof(p->u[n])); break; } } return TEE_SUCCESS; }
{'added': [(497, 'static TEE_Result utee_param_to_param(struct user_ta_ctx *utc,'), (498, '\t\t\t\t struct tee_ta_param *p,'), (499, '\t\t\t\t struct utee_params *up)'), (508, '\t\tuint32_t flags = TEE_MEMORY_ACCESS_READ |'), (509, '\t\t\t\t TEE_MEMORY_ACCESS_ANY_OWNER;'), (514, '\t\t\tflags |= TEE_MEMORY_ACCESS_WRITE;'), (515, '\t\t\t/*FALLTHROUGH*/'), (516, '\t\tcase TEE_PARAM_TYPE_MEMREF_INPUT:'), (520, '\t\t\tif (tee_mmu_check_access_rights(utc, flags, a, b))'), (521, '\t\t\t\treturn TEE_ERROR_ACCESS_DENIED;'), (533, ''), (534, '\treturn TEE_SUCCESS;'), (588, '\t\tres = utee_param_to_param(utc, param, callee_params);'), (589, '\t\tif (res != TEE_SUCCESS)'), (590, '\t\t\treturn res;')], 'deleted': [(497, 'static void utee_param_to_param(struct tee_ta_param *p, struct utee_params *up)'), (508, '\t\tcase TEE_PARAM_TYPE_MEMREF_INPUT:'), (578, '\t\tutee_param_to_param(param, callee_params);')]}
15
3
800
4,667
27
194
7
https://github.com/OP-TEE/optee_os
CVE-2019-1010295
CWE-20
2,345
keyring.c
C
__releases
/* Keyring handling * * Copyright (C) 2004-2005, 2008, 2013 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/security.h> #include <linux/seq_file.h> #include <linux/err.h> #include <keys/keyring-type.h> #include <keys/user-type.h> #include <linux/assoc_array_priv.h> #include <linux/uaccess.h> #include "internal.h" /* * When plumbing the depths of the key tree, this sets a hard limit * set on how deep we're willing to go. */ #define KEYRING_SEARCH_MAX_DEPTH 6 /* * We keep all named keyrings in a hash to speed looking them up. */ #define KEYRING_NAME_HASH_SIZE (1 << 5) /* * We mark pointers we pass to the associative array with bit 1 set if * they're keyrings and clear otherwise. */ #define KEYRING_PTR_SUBTYPE 0x2UL static inline bool keyring_ptr_is_keyring(const struct assoc_array_ptr *x) { return (unsigned long)x & KEYRING_PTR_SUBTYPE; } static inline struct key *keyring_ptr_to_key(const struct assoc_array_ptr *x) { void *object = assoc_array_ptr_to_leaf(x); return (struct key *)((unsigned long)object & ~KEYRING_PTR_SUBTYPE); } static inline void *keyring_key_to_ptr(struct key *key) { if (key->type == &key_type_keyring) return (void *)((unsigned long)key | KEYRING_PTR_SUBTYPE); return key; } static struct list_head keyring_name_hash[KEYRING_NAME_HASH_SIZE]; static DEFINE_RWLOCK(keyring_name_lock); static inline unsigned keyring_hash(const char *desc) { unsigned bucket = 0; for (; *desc; desc++) bucket += (unsigned char)*desc; return bucket & (KEYRING_NAME_HASH_SIZE - 1); } /* * The keyring key type definition. Keyrings are simply keys of this type and * can be treated as ordinary keys in addition to having their own special * operations. */ static int keyring_preparse(struct key_preparsed_payload *prep); static void keyring_free_preparse(struct key_preparsed_payload *prep); static int keyring_instantiate(struct key *keyring, struct key_preparsed_payload *prep); static void keyring_revoke(struct key *keyring); static void keyring_destroy(struct key *keyring); static void keyring_describe(const struct key *keyring, struct seq_file *m); static long keyring_read(const struct key *keyring, char __user *buffer, size_t buflen); struct key_type key_type_keyring = { .name = "keyring", .def_datalen = 0, .preparse = keyring_preparse, .free_preparse = keyring_free_preparse, .instantiate = keyring_instantiate, .revoke = keyring_revoke, .destroy = keyring_destroy, .describe = keyring_describe, .read = keyring_read, }; EXPORT_SYMBOL(key_type_keyring); /* * Semaphore to serialise link/link calls to prevent two link calls in parallel * introducing a cycle. */ static DECLARE_RWSEM(keyring_serialise_link_sem); /* * Publish the name of a keyring so that it can be found by name (if it has * one). */ static void keyring_publish_name(struct key *keyring) { int bucket; if (keyring->description) { bucket = keyring_hash(keyring->description); write_lock(&keyring_name_lock); if (!keyring_name_hash[bucket].next) INIT_LIST_HEAD(&keyring_name_hash[bucket]); list_add_tail(&keyring->type_data.link, &keyring_name_hash[bucket]); write_unlock(&keyring_name_lock); } } /* * Preparse a keyring payload */ static int keyring_preparse(struct key_preparsed_payload *prep) { return prep->datalen != 0 ? -EINVAL : 0; } /* * Free a preparse of a user defined key payload */ static void keyring_free_preparse(struct key_preparsed_payload *prep) { } /* * Initialise a keyring. * * Returns 0 on success, -EINVAL if given any data. */ static int keyring_instantiate(struct key *keyring, struct key_preparsed_payload *prep) { assoc_array_init(&keyring->keys); /* make the keyring available by name if it has one */ keyring_publish_name(keyring); return 0; } /* * Multiply 64-bits by 32-bits to 96-bits and fold back to 64-bit. Ideally we'd * fold the carry back too, but that requires inline asm. */ static u64 mult_64x32_and_fold(u64 x, u32 y) { u64 hi = (u64)(u32)(x >> 32) * y; u64 lo = (u64)(u32)(x) * y; return lo + ((u64)(u32)hi << 32) + (u32)(hi >> 32); } /* * Hash a key type and description. */ static unsigned long hash_key_type_and_desc(const struct keyring_index_key *index_key) { const unsigned level_shift = ASSOC_ARRAY_LEVEL_STEP; const unsigned long fan_mask = ASSOC_ARRAY_FAN_MASK; const char *description = index_key->description; unsigned long hash, type; u32 piece; u64 acc; int n, desc_len = index_key->desc_len; type = (unsigned long)index_key->type; acc = mult_64x32_and_fold(type, desc_len + 13); acc = mult_64x32_and_fold(acc, 9207); for (;;) { n = desc_len; if (n <= 0) break; if (n > 4) n = 4; piece = 0; memcpy(&piece, description, n); description += n; desc_len -= n; acc = mult_64x32_and_fold(acc, piece); acc = mult_64x32_and_fold(acc, 9207); } /* Fold the hash down to 32 bits if need be. */ hash = acc; if (ASSOC_ARRAY_KEY_CHUNK_SIZE == 32) hash ^= acc >> 32; /* Squidge all the keyrings into a separate part of the tree to * ordinary keys by making sure the lowest level segment in the hash is * zero for keyrings and non-zero otherwise. */ if (index_key->type != &key_type_keyring && (hash & fan_mask) == 0) return hash | (hash >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - level_shift)) | 1; if (index_key->type == &key_type_keyring && (hash & fan_mask) != 0) return (hash + (hash << level_shift)) & ~fan_mask; return hash; } /* * Build the next index key chunk. * * On 32-bit systems the index key is laid out as: * * 0 4 5 9... * hash desclen typeptr desc[] * * On 64-bit systems: * * 0 8 9 17... * hash desclen typeptr desc[] * * We return it one word-sized chunk at a time. */ static unsigned long keyring_get_key_chunk(const void *data, int level) { const struct keyring_index_key *index_key = data; unsigned long chunk = 0; long offset = 0; int desc_len = index_key->desc_len, n = sizeof(chunk); level /= ASSOC_ARRAY_KEY_CHUNK_SIZE; switch (level) { case 0: return hash_key_type_and_desc(index_key); case 1: return ((unsigned long)index_key->type << 8) | desc_len; case 2: if (desc_len == 0) return (u8)((unsigned long)index_key->type >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8)); n--; offset = 1; default: offset += sizeof(chunk) - 1; offset += (level - 3) * sizeof(chunk); if (offset >= desc_len) return 0; desc_len -= offset; if (desc_len > n) desc_len = n; offset += desc_len; do { chunk <<= 8; chunk |= ((u8*)index_key->description)[--offset]; } while (--desc_len > 0); if (level == 2) { chunk <<= 8; chunk |= (u8)((unsigned long)index_key->type >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8)); } return chunk; } } static unsigned long keyring_get_object_key_chunk(const void *object, int level) { const struct key *key = keyring_ptr_to_key(object); return keyring_get_key_chunk(&key->index_key, level); } static bool keyring_compare_object(const void *object, const void *data) { const struct keyring_index_key *index_key = data; const struct key *key = keyring_ptr_to_key(object); return key->index_key.type == index_key->type && key->index_key.desc_len == index_key->desc_len && memcmp(key->index_key.description, index_key->description, index_key->desc_len) == 0; } /* * Compare the index keys of a pair of objects and determine the bit position * at which they differ - if they differ. */ static int keyring_diff_objects(const void *object, const void *data) { const struct key *key_a = keyring_ptr_to_key(object); const struct keyring_index_key *a = &key_a->index_key; const struct keyring_index_key *b = data; unsigned long seg_a, seg_b; int level, i; level = 0; seg_a = hash_key_type_and_desc(a); seg_b = hash_key_type_and_desc(b); if ((seg_a ^ seg_b) != 0) goto differ; /* The number of bits contributed by the hash is controlled by a * constant in the assoc_array headers. Everything else thereafter we * can deal with as being machine word-size dependent. */ level += ASSOC_ARRAY_KEY_CHUNK_SIZE / 8; seg_a = a->desc_len; seg_b = b->desc_len; if ((seg_a ^ seg_b) != 0) goto differ; /* The next bit may not work on big endian */ level++; seg_a = (unsigned long)a->type; seg_b = (unsigned long)b->type; if ((seg_a ^ seg_b) != 0) goto differ; level += sizeof(unsigned long); if (a->desc_len == 0) goto same; i = 0; if (((unsigned long)a->description | (unsigned long)b->description) & (sizeof(unsigned long) - 1)) { do { seg_a = *(unsigned long *)(a->description + i); seg_b = *(unsigned long *)(b->description + i); if ((seg_a ^ seg_b) != 0) goto differ_plus_i; i += sizeof(unsigned long); } while (i < (a->desc_len & (sizeof(unsigned long) - 1))); } for (; i < a->desc_len; i++) { seg_a = *(unsigned char *)(a->description + i); seg_b = *(unsigned char *)(b->description + i); if ((seg_a ^ seg_b) != 0) goto differ_plus_i; } same: return -1; differ_plus_i: level += i; differ: i = level * 8 + __ffs(seg_a ^ seg_b); return i; } /* * Free an object after stripping the keyring flag off of the pointer. */ static void keyring_free_object(void *object) { key_put(keyring_ptr_to_key(object)); } /* * Operations for keyring management by the index-tree routines. */ static const struct assoc_array_ops keyring_assoc_array_ops = { .get_key_chunk = keyring_get_key_chunk, .get_object_key_chunk = keyring_get_object_key_chunk, .compare_object = keyring_compare_object, .diff_objects = keyring_diff_objects, .free_object = keyring_free_object, }; /* * Clean up a keyring when it is destroyed. Unpublish its name if it had one * and dispose of its data. * * The garbage collector detects the final key_put(), removes the keyring from * the serial number tree and then does RCU synchronisation before coming here, * so we shouldn't need to worry about code poking around here with the RCU * readlock held by this time. */ static void keyring_destroy(struct key *keyring) { if (keyring->description) { write_lock(&keyring_name_lock); if (keyring->type_data.link.next != NULL && !list_empty(&keyring->type_data.link)) list_del(&keyring->type_data.link); write_unlock(&keyring_name_lock); } assoc_array_destroy(&keyring->keys, &keyring_assoc_array_ops); } /* * Describe a keyring for /proc. */ static void keyring_describe(const struct key *keyring, struct seq_file *m) { if (keyring->description) seq_puts(m, keyring->description); else seq_puts(m, "[anon]"); if (key_is_instantiated(keyring)) { if (keyring->keys.nr_leaves_on_tree != 0) seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree); else seq_puts(m, ": empty"); } } struct keyring_read_iterator_context { size_t qty; size_t count; key_serial_t __user *buffer; }; static int keyring_read_iterator(const void *object, void *data) { struct keyring_read_iterator_context *ctx = data; const struct key *key = keyring_ptr_to_key(object); int ret; kenter("{%s,%d},,{%zu/%zu}", key->type->name, key->serial, ctx->count, ctx->qty); if (ctx->count >= ctx->qty) return 1; ret = put_user(key->serial, ctx->buffer); if (ret < 0) return ret; ctx->buffer++; ctx->count += sizeof(key->serial); return 0; } /* * Read a list of key IDs from the keyring's contents in binary form * * The keyring's semaphore is read-locked by the caller. This prevents someone * from modifying it under us - which could cause us to read key IDs multiple * times. */ static long keyring_read(const struct key *keyring, char __user *buffer, size_t buflen) { struct keyring_read_iterator_context ctx; unsigned long nr_keys; int ret; kenter("{%d},,%zu", key_serial(keyring), buflen); if (buflen & (sizeof(key_serial_t) - 1)) return -EINVAL; nr_keys = keyring->keys.nr_leaves_on_tree; if (nr_keys == 0) return 0; /* Calculate how much data we could return */ ctx.qty = nr_keys * sizeof(key_serial_t); if (!buffer || !buflen) return ctx.qty; if (buflen > ctx.qty) ctx.qty = buflen; /* Copy the IDs of the subscribed keys into the buffer */ ctx.buffer = (key_serial_t __user *)buffer; ctx.count = 0; ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx); if (ret < 0) { kleave(" = %d [iterate]", ret); return ret; } kleave(" = %zu [ok]", ctx.count); return ctx.count; } /* * Allocate a keyring and link into the destination keyring. */ struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid, const struct cred *cred, key_perm_t perm, unsigned long flags, struct key *dest) { struct key *keyring; int ret; keyring = key_alloc(&key_type_keyring, description, uid, gid, cred, perm, flags); if (!IS_ERR(keyring)) { ret = key_instantiate_and_link(keyring, NULL, 0, dest, NULL); if (ret < 0) { key_put(keyring); keyring = ERR_PTR(ret); } } return keyring; } EXPORT_SYMBOL(keyring_alloc); /* * By default, we keys found by getting an exact match on their descriptions. */ bool key_default_cmp(const struct key *key, const struct key_match_data *match_data) { return strcmp(key->description, match_data->raw_data) == 0; } /* * Iteration function to consider each key found. */ static int keyring_search_iterator(const void *object, void *iterator_data) { struct keyring_search_context *ctx = iterator_data; const struct key *key = keyring_ptr_to_key(object); unsigned long kflags = key->flags; kenter("{%d}", key->serial); /* ignore keys not of this type */ if (key->type != ctx->index_key.type) { kleave(" = 0 [!type]"); return 0; } /* skip invalidated, revoked and expired keys */ if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { if (kflags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED))) { ctx->result = ERR_PTR(-EKEYREVOKED); kleave(" = %d [invrev]", ctx->skipped_ret); goto skipped; } if (key->expiry && ctx->now.tv_sec >= key->expiry) { if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED)) ctx->result = ERR_PTR(-EKEYEXPIRED); kleave(" = %d [expire]", ctx->skipped_ret); goto skipped; } } /* keys that don't match */ if (!ctx->match_data.cmp(key, &ctx->match_data)) { kleave(" = 0 [!match]"); return 0; } /* key must have search permissions */ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) && key_task_permission(make_key_ref(key, ctx->possessed), ctx->cred, KEY_NEED_SEARCH) < 0) { ctx->result = ERR_PTR(-EACCES); kleave(" = %d [!perm]", ctx->skipped_ret); goto skipped; } if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { /* we set a different error code if we pass a negative key */ if (kflags & (1 << KEY_FLAG_NEGATIVE)) { smp_rmb(); ctx->result = ERR_PTR(key->type_data.reject_error); kleave(" = %d [neg]", ctx->skipped_ret); goto skipped; } } /* Found */ ctx->result = make_key_ref(key, ctx->possessed); kleave(" = 1 [found]"); return 1; skipped: return ctx->skipped_ret; } /* * Search inside a keyring for a key. We can search by walking to it * directly based on its index-key or we can iterate over the entire * tree looking for it, based on the match function. */ static int search_keyring(struct key *keyring, struct keyring_search_context *ctx) { if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_DIRECT) { const void *object; object = assoc_array_find(&keyring->keys, &keyring_assoc_array_ops, &ctx->index_key); return object ? ctx->iterator(object, ctx) : 0; } return assoc_array_iterate(&keyring->keys, ctx->iterator, ctx); } /* * Search a tree of keyrings that point to other keyrings up to the maximum * depth. */ static bool search_nested_keyrings(struct key *keyring, struct keyring_search_context *ctx) { struct { struct key *keyring; struct assoc_array_node *node; int slot; } stack[KEYRING_SEARCH_MAX_DEPTH]; struct assoc_array_shortcut *shortcut; struct assoc_array_node *node; struct assoc_array_ptr *ptr; struct key *key; int sp = 0, slot; kenter("{%d},{%s,%s}", keyring->serial, ctx->index_key.type->name, ctx->index_key.description); #define STATE_CHECKS (KEYRING_SEARCH_NO_STATE_CHECK | KEYRING_SEARCH_DO_STATE_CHECK) BUG_ON((ctx->flags & STATE_CHECKS) == 0 || (ctx->flags & STATE_CHECKS) == STATE_CHECKS); if (ctx->index_key.description) ctx->index_key.desc_len = strlen(ctx->index_key.description); /* Check to see if this top-level keyring is what we are looking for * and whether it is valid or not. */ if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_ITERATE || keyring_compare_object(keyring, &ctx->index_key)) { ctx->skipped_ret = 2; switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) { case 1: goto found; case 2: return false; default: break; } } ctx->skipped_ret = 0; /* Start processing a new keyring */ descend_to_keyring: kdebug("descend to %d", keyring->serial); if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED))) goto not_this_keyring; /* Search through the keys in this keyring before its searching its * subtrees. */ if (search_keyring(keyring, ctx)) goto found; /* Then manually iterate through the keyrings nested in this one. * * Start from the root node of the index tree. Because of the way the * hash function has been set up, keyrings cluster on the leftmost * branch of the root node (root slot 0) or in the root node itself. * Non-keyrings avoid the leftmost branch of the root entirely (root * slots 1-15). */ ptr = ACCESS_ONCE(keyring->keys.root); if (!ptr) goto not_this_keyring; if (assoc_array_ptr_is_shortcut(ptr)) { /* If the root is a shortcut, either the keyring only contains * keyring pointers (everything clusters behind root slot 0) or * doesn't contain any keyring pointers. */ shortcut = assoc_array_ptr_to_shortcut(ptr); smp_read_barrier_depends(); if ((shortcut->index_key[0] & ASSOC_ARRAY_FAN_MASK) != 0) goto not_this_keyring; ptr = ACCESS_ONCE(shortcut->next_node); node = assoc_array_ptr_to_node(ptr); goto begin_node; } node = assoc_array_ptr_to_node(ptr); smp_read_barrier_depends(); ptr = node->slots[0]; if (!assoc_array_ptr_is_meta(ptr)) goto begin_node; descend_to_node: /* Descend to a more distal node in this keyring's content tree and go * through that. */ kdebug("descend"); if (assoc_array_ptr_is_shortcut(ptr)) { shortcut = assoc_array_ptr_to_shortcut(ptr); smp_read_barrier_depends(); ptr = ACCESS_ONCE(shortcut->next_node); BUG_ON(!assoc_array_ptr_is_node(ptr)); } node = assoc_array_ptr_to_node(ptr); begin_node: kdebug("begin_node"); smp_read_barrier_depends(); slot = 0; ascend_to_node: /* Go through the slots in a node */ for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { ptr = ACCESS_ONCE(node->slots[slot]); if (assoc_array_ptr_is_meta(ptr) && node->back_pointer) goto descend_to_node; if (!keyring_ptr_is_keyring(ptr)) continue; key = keyring_ptr_to_key(ptr); if (sp >= KEYRING_SEARCH_MAX_DEPTH) { if (ctx->flags & KEYRING_SEARCH_DETECT_TOO_DEEP) { ctx->result = ERR_PTR(-ELOOP); return false; } goto not_this_keyring; } /* Search a nested keyring */ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) && key_task_permission(make_key_ref(key, ctx->possessed), ctx->cred, KEY_NEED_SEARCH) < 0) continue; /* stack the current position */ stack[sp].keyring = keyring; stack[sp].node = node; stack[sp].slot = slot; sp++; /* begin again with the new keyring */ keyring = key; goto descend_to_keyring; } /* We've dealt with all the slots in the current node, so now we need * to ascend to the parent and continue processing there. */ ptr = ACCESS_ONCE(node->back_pointer); slot = node->parent_slot; if (ptr && assoc_array_ptr_is_shortcut(ptr)) { shortcut = assoc_array_ptr_to_shortcut(ptr); smp_read_barrier_depends(); ptr = ACCESS_ONCE(shortcut->back_pointer); slot = shortcut->parent_slot; } if (!ptr) goto not_this_keyring; node = assoc_array_ptr_to_node(ptr); smp_read_barrier_depends(); slot++; /* If we've ascended to the root (zero backpointer), we must have just * finished processing the leftmost branch rather than the root slots - * so there can't be any more keyrings for us to find. */ if (node->back_pointer) { kdebug("ascend %d", slot); goto ascend_to_node; } /* The keyring we're looking at was disqualified or didn't contain a * matching key. */ not_this_keyring: kdebug("not_this_keyring %d", sp); if (sp <= 0) { kleave(" = false"); return false; } /* Resume the processing of a keyring higher up in the tree */ sp--; keyring = stack[sp].keyring; node = stack[sp].node; slot = stack[sp].slot + 1; kdebug("ascend to %d [%d]", keyring->serial, slot); goto ascend_to_node; /* We found a viable match */ found: key = key_ref_to_ptr(ctx->result); key_check(key); if (!(ctx->flags & KEYRING_SEARCH_NO_UPDATE_TIME)) { key->last_used_at = ctx->now.tv_sec; keyring->last_used_at = ctx->now.tv_sec; while (sp > 0) stack[--sp].keyring->last_used_at = ctx->now.tv_sec; } kleave(" = true"); return true; } /** * keyring_search_aux - Search a keyring tree for a key matching some criteria * @keyring_ref: A pointer to the keyring with possession indicator. * @ctx: The keyring search context. * * Search the supplied keyring tree for a key that matches the criteria given. * The root keyring and any linked keyrings must grant Search permission to the * caller to be searchable and keys can only be found if they too grant Search * to the caller. The possession flag on the root keyring pointer controls use * of the possessor bits in permissions checking of the entire tree. In * addition, the LSM gets to forbid keyring searches and key matches. * * The search is performed as a breadth-then-depth search up to the prescribed * limit (KEYRING_SEARCH_MAX_DEPTH). * * Keys are matched to the type provided and are then filtered by the match * function, which is given the description to use in any way it sees fit. The * match function may use any attributes of a key that it wishes to to * determine the match. Normally the match function from the key type would be * used. * * RCU can be used to prevent the keyring key lists from disappearing without * the need to take lots of locks. * * Returns a pointer to the found key and increments the key usage count if * successful; -EAGAIN if no matching keys were found, or if expired or revoked * keys were found; -ENOKEY if only negative keys were found; -ENOTDIR if the * specified keyring wasn't a keyring. * * In the case of a successful return, the possession attribute from * @keyring_ref is propagated to the returned key reference. */ key_ref_t keyring_search_aux(key_ref_t keyring_ref, struct keyring_search_context *ctx) { struct key *keyring; long err; ctx->iterator = keyring_search_iterator; ctx->possessed = is_key_possessed(keyring_ref); ctx->result = ERR_PTR(-EAGAIN); keyring = key_ref_to_ptr(keyring_ref); key_check(keyring); if (keyring->type != &key_type_keyring) return ERR_PTR(-ENOTDIR); if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM)) { err = key_task_permission(keyring_ref, ctx->cred, KEY_NEED_SEARCH); if (err < 0) return ERR_PTR(err); } rcu_read_lock(); ctx->now = current_kernel_time(); if (search_nested_keyrings(keyring, ctx)) __key_get(key_ref_to_ptr(ctx->result)); rcu_read_unlock(); return ctx->result; } /** * keyring_search - Search the supplied keyring tree for a matching key * @keyring: The root of the keyring tree to be searched. * @type: The type of keyring we want to find. * @description: The name of the keyring we want to find. * * As keyring_search_aux() above, but using the current task's credentials and * type's default matching function and preferred search method. */ key_ref_t keyring_search(key_ref_t keyring, struct key_type *type, const char *description) { struct keyring_search_context ctx = { .index_key.type = type, .index_key.description = description, .cred = current_cred(), .match_data.cmp = key_default_cmp, .match_data.raw_data = description, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, .flags = KEYRING_SEARCH_DO_STATE_CHECK, }; key_ref_t key; int ret; if (type->match_preparse) { ret = type->match_preparse(&ctx.match_data); if (ret < 0) return ERR_PTR(ret); } key = keyring_search_aux(keyring, &ctx); if (type->match_free) type->match_free(&ctx.match_data); return key; } EXPORT_SYMBOL(keyring_search); /* * Search the given keyring for a key that might be updated. * * The caller must guarantee that the keyring is a keyring and that the * permission is granted to modify the keyring as no check is made here. The * caller must also hold a lock on the keyring semaphore. * * Returns a pointer to the found key with usage count incremented if * successful and returns NULL if not found. Revoked and invalidated keys are * skipped over. * * If successful, the possession indicator is propagated from the keyring ref * to the returned key reference. */ key_ref_t find_key_to_update(key_ref_t keyring_ref, const struct keyring_index_key *index_key) { struct key *keyring, *key; const void *object; keyring = key_ref_to_ptr(keyring_ref); kenter("{%d},{%s,%s}", keyring->serial, index_key->type->name, index_key->description); object = assoc_array_find(&keyring->keys, &keyring_assoc_array_ops, index_key); if (object) goto found; kleave(" = NULL"); return NULL; found: key = keyring_ptr_to_key(object); if (key->flags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED))) { kleave(" = NULL [x]"); return NULL; } __key_get(key); kleave(" = {%d}", key->serial); return make_key_ref(key, is_key_possessed(keyring_ref)); } /* * Find a keyring with the specified name. * * All named keyrings in the current user namespace are searched, provided they * grant Search permission directly to the caller (unless this check is * skipped). Keyrings whose usage points have reached zero or who have been * revoked are skipped. * * Returns a pointer to the keyring with the keyring's refcount having being * incremented on success. -ENOKEY is returned if a key could not be found. */ struct key *find_keyring_by_name(const char *name, bool skip_perm_check) { struct key *keyring; int bucket; if (!name) return ERR_PTR(-EINVAL); bucket = keyring_hash(name); read_lock(&keyring_name_lock); if (keyring_name_hash[bucket].next) { /* search this hash bucket for a keyring with a matching name * that's readable and that hasn't been revoked */ list_for_each_entry(keyring, &keyring_name_hash[bucket], type_data.link ) { if (!kuid_has_mapping(current_user_ns(), keyring->user->uid)) continue; if (test_bit(KEY_FLAG_REVOKED, &keyring->flags)) continue; if (strcmp(keyring->description, name) != 0) continue; if (!skip_perm_check && key_permission(make_key_ref(keyring, 0), KEY_NEED_SEARCH) < 0) continue; /* we've got a match but we might end up racing with * key_cleanup() if the keyring is currently 'dead' * (ie. it has a zero usage count) */ if (!atomic_inc_not_zero(&keyring->usage)) continue; keyring->last_used_at = current_kernel_time().tv_sec; goto out; } } keyring = ERR_PTR(-ENOKEY); out: read_unlock(&keyring_name_lock); return keyring; } static int keyring_detect_cycle_iterator(const void *object, void *iterator_data) { struct keyring_search_context *ctx = iterator_data; const struct key *key = keyring_ptr_to_key(object); kenter("{%d}", key->serial); /* We might get a keyring with matching index-key that is nonetheless a * different keyring. */ if (key != ctx->match_data.raw_data) return 0; ctx->result = ERR_PTR(-EDEADLK); return 1; } /* * See if a cycle will will be created by inserting acyclic tree B in acyclic * tree A at the topmost level (ie: as a direct child of A). * * Since we are adding B to A at the top level, checking for cycles should just * be a matter of seeing if node A is somewhere in tree B. */ static int keyring_detect_cycle(struct key *A, struct key *B) { struct keyring_search_context ctx = { .index_key = A->index_key, .match_data.raw_data = A, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, .iterator = keyring_detect_cycle_iterator, .flags = (KEYRING_SEARCH_NO_STATE_CHECK | KEYRING_SEARCH_NO_UPDATE_TIME | KEYRING_SEARCH_NO_CHECK_PERM | KEYRING_SEARCH_DETECT_TOO_DEEP), }; rcu_read_lock(); search_nested_keyrings(B, &ctx); rcu_read_unlock(); return PTR_ERR(ctx.result) == -EAGAIN ? 0 : PTR_ERR(ctx.result); } /* * Preallocate memory so that a key can be linked into to a keyring. */ int __key_link_begin(struct key *keyring, const struct keyring_index_key *index_key, struct assoc_array_edit **_edit) __acquires(&keyring->sem) __acquires(&keyring_serialise_link_sem) { struct assoc_array_edit *edit; int ret; kenter("%d,%s,%s,", keyring->serial, index_key->type->name, index_key->description); BUG_ON(index_key->desc_len == 0); if (keyring->type != &key_type_keyring) return -ENOTDIR; down_write(&keyring->sem); ret = -EKEYREVOKED; if (test_bit(KEY_FLAG_REVOKED, &keyring->flags)) goto error_krsem; /* serialise link/link calls to prevent parallel calls causing a cycle * when linking two keyring in opposite orders */ if (index_key->type == &key_type_keyring) down_write(&keyring_serialise_link_sem); /* Create an edit script that will insert/replace the key in the * keyring tree. */ edit = assoc_array_insert(&keyring->keys, &keyring_assoc_array_ops, index_key, NULL); if (IS_ERR(edit)) { ret = PTR_ERR(edit); goto error_sem; } /* If we're not replacing a link in-place then we're going to need some * extra quota. */ if (!edit->dead_leaf) { ret = key_payload_reserve(keyring, keyring->datalen + KEYQUOTA_LINK_BYTES); if (ret < 0) goto error_cancel; } *_edit = edit; kleave(" = 0"); return 0; error_cancel: assoc_array_cancel_edit(edit); error_sem: if (index_key->type == &key_type_keyring) up_write(&keyring_serialise_link_sem); error_krsem: up_write(&keyring->sem); kleave(" = %d", ret); return ret; } /* * Check already instantiated keys aren't going to be a problem. * * The caller must have called __key_link_begin(). Don't need to call this for * keys that were created since __key_link_begin() was called. */ int __key_link_check_live_key(struct key *keyring, struct key *key) { if (key->type == &key_type_keyring) /* check that we aren't going to create a cycle by linking one * keyring to another */ return keyring_detect_cycle(keyring, key); return 0; } /* * Link a key into to a keyring. * * Must be called with __key_link_begin() having being called. Discards any * already extant link to matching key if there is one, so that each keyring * holds at most one link to any given key of a particular type+description * combination. */ void __key_link(struct key *key, struct assoc_array_edit **_edit) { __key_get(key); assoc_array_insert_set_object(*_edit, keyring_key_to_ptr(key)); assoc_array_apply_edit(*_edit); *_edit = NULL; } /* * Finish linking a key into to a keyring. * * Must be called with __key_link_begin() having being called. */ void __key_link_end(struct key *keyring, const struct keyring_index_key *index_key, struct assoc_array_edit *edit) __releases(&keyring->sem) __releases(&keyring_serialise_link_sem) { BUG_ON(index_key->type == NULL); kenter("%d,%s,", keyring->serial, index_key->type->name); if (index_key->type == &key_type_keyring) up_write(&keyring_serialise_link_sem); if (edit && !edit->dead_leaf) { key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES); assoc_array_cancel_edit(edit); } up_write(&keyring->sem); } /** * key_link - Link a key to a keyring * @keyring: The keyring to make the link in. * @key: The key to link to. * * Make a link in a keyring to a key, such that the keyring holds a reference * on that key and the key can potentially be found by searching that keyring. * * This function will write-lock the keyring's semaphore and will consume some * of the user's key data quota to hold the link. * * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring, * -EKEYREVOKED if the keyring has been revoked, -ENFILE if the keyring is * full, -EDQUOT if there is insufficient key data quota remaining to add * another link or -ENOMEM if there's insufficient memory. * * It is assumed that the caller has checked that it is permitted for a link to * be made (the keyring should have Write permission and the key Link * permission). */ int key_link(struct key *keyring, struct key *key) { struct assoc_array_edit *edit; int ret; kenter("{%d,%d}", keyring->serial, atomic_read(&keyring->usage)); key_check(keyring); key_check(key); if (test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags) && !test_bit(KEY_FLAG_TRUSTED, &key->flags)) return -EPERM; ret = __key_link_begin(keyring, &key->index_key, &edit); if (ret == 0) { kdebug("begun {%d,%d}", keyring->serial, atomic_read(&keyring->usage)); ret = __key_link_check_live_key(keyring, key); if (ret == 0) __key_link(key, &edit); __key_link_end(keyring, &key->index_key, edit); } kleave(" = %d {%d,%d}", ret, keyring->serial, atomic_read(&keyring->usage)); return ret; } EXPORT_SYMBOL(key_link); /** * key_unlink - Unlink the first link to a key from a keyring. * @keyring: The keyring to remove the link from. * @key: The key the link is to. * * Remove a link from a keyring to a key. * * This function will write-lock the keyring's semaphore. * * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring, -ENOENT if * the key isn't linked to by the keyring or -ENOMEM if there's insufficient * memory. * * It is assumed that the caller has checked that it is permitted for a link to * be removed (the keyring should have Write permission; no permissions are * required on the key). */ int key_unlink(struct key *keyring, struct key *key) { struct assoc_array_edit *edit; int ret; key_check(keyring); key_check(key); if (keyring->type != &key_type_keyring) return -ENOTDIR; down_write(&keyring->sem); edit = assoc_array_delete(&keyring->keys, &keyring_assoc_array_ops, &key->index_key); if (IS_ERR(edit)) { ret = PTR_ERR(edit); goto error; } ret = -ENOENT; if (edit == NULL) goto error; assoc_array_apply_edit(edit); key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES); ret = 0; error: up_write(&keyring->sem); return ret; } EXPORT_SYMBOL(key_unlink); /** * keyring_clear - Clear a keyring * @keyring: The keyring to clear. * * Clear the contents of the specified keyring. * * Returns 0 if successful or -ENOTDIR if the keyring isn't a keyring. */ int keyring_clear(struct key *keyring) { struct assoc_array_edit *edit; int ret; if (keyring->type != &key_type_keyring) return -ENOTDIR; down_write(&keyring->sem); edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops); if (IS_ERR(edit)) { ret = PTR_ERR(edit); } else { if (edit) assoc_array_apply_edit(edit); key_payload_reserve(keyring, 0); ret = 0; } up_write(&keyring->sem); return ret; } EXPORT_SYMBOL(keyring_clear); /* * Dispose of the links from a revoked keyring. * * This is called with the key sem write-locked. */ static void keyring_revoke(struct key *keyring) { struct assoc_array_edit *edit; edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops); if (!IS_ERR(edit)) { if (edit) assoc_array_apply_edit(edit); key_payload_reserve(keyring, 0); } } static bool keyring_gc_select_iterator(void *object, void *iterator_data) { struct key *key = keyring_ptr_to_key(object); time_t *limit = iterator_data; if (key_is_dead(key, *limit)) return false; key_get(key); return true; } static int keyring_gc_check_iterator(const void *object, void *iterator_data) { const struct key *key = keyring_ptr_to_key(object); time_t *limit = iterator_data; key_check(key); return key_is_dead(key, *limit); } /* * Garbage collect pointers from a keyring. * * Not called with any locks held. The keyring's key struct will not be * deallocated under us as only our caller may deallocate it. */ void keyring_gc(struct key *keyring, time_t limit) { int result; kenter("%x{%s}", keyring->serial, keyring->description ?: ""); if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED))) goto dont_gc; /* scan the keyring looking for dead keys */ rcu_read_lock(); result = assoc_array_iterate(&keyring->keys, keyring_gc_check_iterator, &limit); rcu_read_unlock(); if (result == true) goto do_gc; dont_gc: kleave(" [no gc]"); return; do_gc: down_write(&keyring->sem); assoc_array_gc(&keyring->keys, &keyring_assoc_array_ops, keyring_gc_select_iterator, &limit); up_write(&keyring->sem); kleave(" [gc]"); }
/* Keyring handling * * Copyright (C) 2004-2005, 2008, 2013 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/security.h> #include <linux/seq_file.h> #include <linux/err.h> #include <keys/keyring-type.h> #include <keys/user-type.h> #include <linux/assoc_array_priv.h> #include <linux/uaccess.h> #include "internal.h" /* * When plumbing the depths of the key tree, this sets a hard limit * set on how deep we're willing to go. */ #define KEYRING_SEARCH_MAX_DEPTH 6 /* * We keep all named keyrings in a hash to speed looking them up. */ #define KEYRING_NAME_HASH_SIZE (1 << 5) /* * We mark pointers we pass to the associative array with bit 1 set if * they're keyrings and clear otherwise. */ #define KEYRING_PTR_SUBTYPE 0x2UL static inline bool keyring_ptr_is_keyring(const struct assoc_array_ptr *x) { return (unsigned long)x & KEYRING_PTR_SUBTYPE; } static inline struct key *keyring_ptr_to_key(const struct assoc_array_ptr *x) { void *object = assoc_array_ptr_to_leaf(x); return (struct key *)((unsigned long)object & ~KEYRING_PTR_SUBTYPE); } static inline void *keyring_key_to_ptr(struct key *key) { if (key->type == &key_type_keyring) return (void *)((unsigned long)key | KEYRING_PTR_SUBTYPE); return key; } static struct list_head keyring_name_hash[KEYRING_NAME_HASH_SIZE]; static DEFINE_RWLOCK(keyring_name_lock); static inline unsigned keyring_hash(const char *desc) { unsigned bucket = 0; for (; *desc; desc++) bucket += (unsigned char)*desc; return bucket & (KEYRING_NAME_HASH_SIZE - 1); } /* * The keyring key type definition. Keyrings are simply keys of this type and * can be treated as ordinary keys in addition to having their own special * operations. */ static int keyring_preparse(struct key_preparsed_payload *prep); static void keyring_free_preparse(struct key_preparsed_payload *prep); static int keyring_instantiate(struct key *keyring, struct key_preparsed_payload *prep); static void keyring_revoke(struct key *keyring); static void keyring_destroy(struct key *keyring); static void keyring_describe(const struct key *keyring, struct seq_file *m); static long keyring_read(const struct key *keyring, char __user *buffer, size_t buflen); struct key_type key_type_keyring = { .name = "keyring", .def_datalen = 0, .preparse = keyring_preparse, .free_preparse = keyring_free_preparse, .instantiate = keyring_instantiate, .revoke = keyring_revoke, .destroy = keyring_destroy, .describe = keyring_describe, .read = keyring_read, }; EXPORT_SYMBOL(key_type_keyring); /* * Semaphore to serialise link/link calls to prevent two link calls in parallel * introducing a cycle. */ static DECLARE_RWSEM(keyring_serialise_link_sem); /* * Publish the name of a keyring so that it can be found by name (if it has * one). */ static void keyring_publish_name(struct key *keyring) { int bucket; if (keyring->description) { bucket = keyring_hash(keyring->description); write_lock(&keyring_name_lock); if (!keyring_name_hash[bucket].next) INIT_LIST_HEAD(&keyring_name_hash[bucket]); list_add_tail(&keyring->type_data.link, &keyring_name_hash[bucket]); write_unlock(&keyring_name_lock); } } /* * Preparse a keyring payload */ static int keyring_preparse(struct key_preparsed_payload *prep) { return prep->datalen != 0 ? -EINVAL : 0; } /* * Free a preparse of a user defined key payload */ static void keyring_free_preparse(struct key_preparsed_payload *prep) { } /* * Initialise a keyring. * * Returns 0 on success, -EINVAL if given any data. */ static int keyring_instantiate(struct key *keyring, struct key_preparsed_payload *prep) { assoc_array_init(&keyring->keys); /* make the keyring available by name if it has one */ keyring_publish_name(keyring); return 0; } /* * Multiply 64-bits by 32-bits to 96-bits and fold back to 64-bit. Ideally we'd * fold the carry back too, but that requires inline asm. */ static u64 mult_64x32_and_fold(u64 x, u32 y) { u64 hi = (u64)(u32)(x >> 32) * y; u64 lo = (u64)(u32)(x) * y; return lo + ((u64)(u32)hi << 32) + (u32)(hi >> 32); } /* * Hash a key type and description. */ static unsigned long hash_key_type_and_desc(const struct keyring_index_key *index_key) { const unsigned level_shift = ASSOC_ARRAY_LEVEL_STEP; const unsigned long fan_mask = ASSOC_ARRAY_FAN_MASK; const char *description = index_key->description; unsigned long hash, type; u32 piece; u64 acc; int n, desc_len = index_key->desc_len; type = (unsigned long)index_key->type; acc = mult_64x32_and_fold(type, desc_len + 13); acc = mult_64x32_and_fold(acc, 9207); for (;;) { n = desc_len; if (n <= 0) break; if (n > 4) n = 4; piece = 0; memcpy(&piece, description, n); description += n; desc_len -= n; acc = mult_64x32_and_fold(acc, piece); acc = mult_64x32_and_fold(acc, 9207); } /* Fold the hash down to 32 bits if need be. */ hash = acc; if (ASSOC_ARRAY_KEY_CHUNK_SIZE == 32) hash ^= acc >> 32; /* Squidge all the keyrings into a separate part of the tree to * ordinary keys by making sure the lowest level segment in the hash is * zero for keyrings and non-zero otherwise. */ if (index_key->type != &key_type_keyring && (hash & fan_mask) == 0) return hash | (hash >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - level_shift)) | 1; if (index_key->type == &key_type_keyring && (hash & fan_mask) != 0) return (hash + (hash << level_shift)) & ~fan_mask; return hash; } /* * Build the next index key chunk. * * On 32-bit systems the index key is laid out as: * * 0 4 5 9... * hash desclen typeptr desc[] * * On 64-bit systems: * * 0 8 9 17... * hash desclen typeptr desc[] * * We return it one word-sized chunk at a time. */ static unsigned long keyring_get_key_chunk(const void *data, int level) { const struct keyring_index_key *index_key = data; unsigned long chunk = 0; long offset = 0; int desc_len = index_key->desc_len, n = sizeof(chunk); level /= ASSOC_ARRAY_KEY_CHUNK_SIZE; switch (level) { case 0: return hash_key_type_and_desc(index_key); case 1: return ((unsigned long)index_key->type << 8) | desc_len; case 2: if (desc_len == 0) return (u8)((unsigned long)index_key->type >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8)); n--; offset = 1; default: offset += sizeof(chunk) - 1; offset += (level - 3) * sizeof(chunk); if (offset >= desc_len) return 0; desc_len -= offset; if (desc_len > n) desc_len = n; offset += desc_len; do { chunk <<= 8; chunk |= ((u8*)index_key->description)[--offset]; } while (--desc_len > 0); if (level == 2) { chunk <<= 8; chunk |= (u8)((unsigned long)index_key->type >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8)); } return chunk; } } static unsigned long keyring_get_object_key_chunk(const void *object, int level) { const struct key *key = keyring_ptr_to_key(object); return keyring_get_key_chunk(&key->index_key, level); } static bool keyring_compare_object(const void *object, const void *data) { const struct keyring_index_key *index_key = data; const struct key *key = keyring_ptr_to_key(object); return key->index_key.type == index_key->type && key->index_key.desc_len == index_key->desc_len && memcmp(key->index_key.description, index_key->description, index_key->desc_len) == 0; } /* * Compare the index keys of a pair of objects and determine the bit position * at which they differ - if they differ. */ static int keyring_diff_objects(const void *object, const void *data) { const struct key *key_a = keyring_ptr_to_key(object); const struct keyring_index_key *a = &key_a->index_key; const struct keyring_index_key *b = data; unsigned long seg_a, seg_b; int level, i; level = 0; seg_a = hash_key_type_and_desc(a); seg_b = hash_key_type_and_desc(b); if ((seg_a ^ seg_b) != 0) goto differ; /* The number of bits contributed by the hash is controlled by a * constant in the assoc_array headers. Everything else thereafter we * can deal with as being machine word-size dependent. */ level += ASSOC_ARRAY_KEY_CHUNK_SIZE / 8; seg_a = a->desc_len; seg_b = b->desc_len; if ((seg_a ^ seg_b) != 0) goto differ; /* The next bit may not work on big endian */ level++; seg_a = (unsigned long)a->type; seg_b = (unsigned long)b->type; if ((seg_a ^ seg_b) != 0) goto differ; level += sizeof(unsigned long); if (a->desc_len == 0) goto same; i = 0; if (((unsigned long)a->description | (unsigned long)b->description) & (sizeof(unsigned long) - 1)) { do { seg_a = *(unsigned long *)(a->description + i); seg_b = *(unsigned long *)(b->description + i); if ((seg_a ^ seg_b) != 0) goto differ_plus_i; i += sizeof(unsigned long); } while (i < (a->desc_len & (sizeof(unsigned long) - 1))); } for (; i < a->desc_len; i++) { seg_a = *(unsigned char *)(a->description + i); seg_b = *(unsigned char *)(b->description + i); if ((seg_a ^ seg_b) != 0) goto differ_plus_i; } same: return -1; differ_plus_i: level += i; differ: i = level * 8 + __ffs(seg_a ^ seg_b); return i; } /* * Free an object after stripping the keyring flag off of the pointer. */ static void keyring_free_object(void *object) { key_put(keyring_ptr_to_key(object)); } /* * Operations for keyring management by the index-tree routines. */ static const struct assoc_array_ops keyring_assoc_array_ops = { .get_key_chunk = keyring_get_key_chunk, .get_object_key_chunk = keyring_get_object_key_chunk, .compare_object = keyring_compare_object, .diff_objects = keyring_diff_objects, .free_object = keyring_free_object, }; /* * Clean up a keyring when it is destroyed. Unpublish its name if it had one * and dispose of its data. * * The garbage collector detects the final key_put(), removes the keyring from * the serial number tree and then does RCU synchronisation before coming here, * so we shouldn't need to worry about code poking around here with the RCU * readlock held by this time. */ static void keyring_destroy(struct key *keyring) { if (keyring->description) { write_lock(&keyring_name_lock); if (keyring->type_data.link.next != NULL && !list_empty(&keyring->type_data.link)) list_del(&keyring->type_data.link); write_unlock(&keyring_name_lock); } assoc_array_destroy(&keyring->keys, &keyring_assoc_array_ops); } /* * Describe a keyring for /proc. */ static void keyring_describe(const struct key *keyring, struct seq_file *m) { if (keyring->description) seq_puts(m, keyring->description); else seq_puts(m, "[anon]"); if (key_is_instantiated(keyring)) { if (keyring->keys.nr_leaves_on_tree != 0) seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree); else seq_puts(m, ": empty"); } } struct keyring_read_iterator_context { size_t qty; size_t count; key_serial_t __user *buffer; }; static int keyring_read_iterator(const void *object, void *data) { struct keyring_read_iterator_context *ctx = data; const struct key *key = keyring_ptr_to_key(object); int ret; kenter("{%s,%d},,{%zu/%zu}", key->type->name, key->serial, ctx->count, ctx->qty); if (ctx->count >= ctx->qty) return 1; ret = put_user(key->serial, ctx->buffer); if (ret < 0) return ret; ctx->buffer++; ctx->count += sizeof(key->serial); return 0; } /* * Read a list of key IDs from the keyring's contents in binary form * * The keyring's semaphore is read-locked by the caller. This prevents someone * from modifying it under us - which could cause us to read key IDs multiple * times. */ static long keyring_read(const struct key *keyring, char __user *buffer, size_t buflen) { struct keyring_read_iterator_context ctx; unsigned long nr_keys; int ret; kenter("{%d},,%zu", key_serial(keyring), buflen); if (buflen & (sizeof(key_serial_t) - 1)) return -EINVAL; nr_keys = keyring->keys.nr_leaves_on_tree; if (nr_keys == 0) return 0; /* Calculate how much data we could return */ ctx.qty = nr_keys * sizeof(key_serial_t); if (!buffer || !buflen) return ctx.qty; if (buflen > ctx.qty) ctx.qty = buflen; /* Copy the IDs of the subscribed keys into the buffer */ ctx.buffer = (key_serial_t __user *)buffer; ctx.count = 0; ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx); if (ret < 0) { kleave(" = %d [iterate]", ret); return ret; } kleave(" = %zu [ok]", ctx.count); return ctx.count; } /* * Allocate a keyring and link into the destination keyring. */ struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid, const struct cred *cred, key_perm_t perm, unsigned long flags, struct key *dest) { struct key *keyring; int ret; keyring = key_alloc(&key_type_keyring, description, uid, gid, cred, perm, flags); if (!IS_ERR(keyring)) { ret = key_instantiate_and_link(keyring, NULL, 0, dest, NULL); if (ret < 0) { key_put(keyring); keyring = ERR_PTR(ret); } } return keyring; } EXPORT_SYMBOL(keyring_alloc); /* * By default, we keys found by getting an exact match on their descriptions. */ bool key_default_cmp(const struct key *key, const struct key_match_data *match_data) { return strcmp(key->description, match_data->raw_data) == 0; } /* * Iteration function to consider each key found. */ static int keyring_search_iterator(const void *object, void *iterator_data) { struct keyring_search_context *ctx = iterator_data; const struct key *key = keyring_ptr_to_key(object); unsigned long kflags = key->flags; kenter("{%d}", key->serial); /* ignore keys not of this type */ if (key->type != ctx->index_key.type) { kleave(" = 0 [!type]"); return 0; } /* skip invalidated, revoked and expired keys */ if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { if (kflags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED))) { ctx->result = ERR_PTR(-EKEYREVOKED); kleave(" = %d [invrev]", ctx->skipped_ret); goto skipped; } if (key->expiry && ctx->now.tv_sec >= key->expiry) { if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED)) ctx->result = ERR_PTR(-EKEYEXPIRED); kleave(" = %d [expire]", ctx->skipped_ret); goto skipped; } } /* keys that don't match */ if (!ctx->match_data.cmp(key, &ctx->match_data)) { kleave(" = 0 [!match]"); return 0; } /* key must have search permissions */ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) && key_task_permission(make_key_ref(key, ctx->possessed), ctx->cred, KEY_NEED_SEARCH) < 0) { ctx->result = ERR_PTR(-EACCES); kleave(" = %d [!perm]", ctx->skipped_ret); goto skipped; } if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { /* we set a different error code if we pass a negative key */ if (kflags & (1 << KEY_FLAG_NEGATIVE)) { smp_rmb(); ctx->result = ERR_PTR(key->type_data.reject_error); kleave(" = %d [neg]", ctx->skipped_ret); goto skipped; } } /* Found */ ctx->result = make_key_ref(key, ctx->possessed); kleave(" = 1 [found]"); return 1; skipped: return ctx->skipped_ret; } /* * Search inside a keyring for a key. We can search by walking to it * directly based on its index-key or we can iterate over the entire * tree looking for it, based on the match function. */ static int search_keyring(struct key *keyring, struct keyring_search_context *ctx) { if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_DIRECT) { const void *object; object = assoc_array_find(&keyring->keys, &keyring_assoc_array_ops, &ctx->index_key); return object ? ctx->iterator(object, ctx) : 0; } return assoc_array_iterate(&keyring->keys, ctx->iterator, ctx); } /* * Search a tree of keyrings that point to other keyrings up to the maximum * depth. */ static bool search_nested_keyrings(struct key *keyring, struct keyring_search_context *ctx) { struct { struct key *keyring; struct assoc_array_node *node; int slot; } stack[KEYRING_SEARCH_MAX_DEPTH]; struct assoc_array_shortcut *shortcut; struct assoc_array_node *node; struct assoc_array_ptr *ptr; struct key *key; int sp = 0, slot; kenter("{%d},{%s,%s}", keyring->serial, ctx->index_key.type->name, ctx->index_key.description); #define STATE_CHECKS (KEYRING_SEARCH_NO_STATE_CHECK | KEYRING_SEARCH_DO_STATE_CHECK) BUG_ON((ctx->flags & STATE_CHECKS) == 0 || (ctx->flags & STATE_CHECKS) == STATE_CHECKS); if (ctx->index_key.description) ctx->index_key.desc_len = strlen(ctx->index_key.description); /* Check to see if this top-level keyring is what we are looking for * and whether it is valid or not. */ if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_ITERATE || keyring_compare_object(keyring, &ctx->index_key)) { ctx->skipped_ret = 2; switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) { case 1: goto found; case 2: return false; default: break; } } ctx->skipped_ret = 0; /* Start processing a new keyring */ descend_to_keyring: kdebug("descend to %d", keyring->serial); if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED))) goto not_this_keyring; /* Search through the keys in this keyring before its searching its * subtrees. */ if (search_keyring(keyring, ctx)) goto found; /* Then manually iterate through the keyrings nested in this one. * * Start from the root node of the index tree. Because of the way the * hash function has been set up, keyrings cluster on the leftmost * branch of the root node (root slot 0) or in the root node itself. * Non-keyrings avoid the leftmost branch of the root entirely (root * slots 1-15). */ ptr = ACCESS_ONCE(keyring->keys.root); if (!ptr) goto not_this_keyring; if (assoc_array_ptr_is_shortcut(ptr)) { /* If the root is a shortcut, either the keyring only contains * keyring pointers (everything clusters behind root slot 0) or * doesn't contain any keyring pointers. */ shortcut = assoc_array_ptr_to_shortcut(ptr); smp_read_barrier_depends(); if ((shortcut->index_key[0] & ASSOC_ARRAY_FAN_MASK) != 0) goto not_this_keyring; ptr = ACCESS_ONCE(shortcut->next_node); node = assoc_array_ptr_to_node(ptr); goto begin_node; } node = assoc_array_ptr_to_node(ptr); smp_read_barrier_depends(); ptr = node->slots[0]; if (!assoc_array_ptr_is_meta(ptr)) goto begin_node; descend_to_node: /* Descend to a more distal node in this keyring's content tree and go * through that. */ kdebug("descend"); if (assoc_array_ptr_is_shortcut(ptr)) { shortcut = assoc_array_ptr_to_shortcut(ptr); smp_read_barrier_depends(); ptr = ACCESS_ONCE(shortcut->next_node); BUG_ON(!assoc_array_ptr_is_node(ptr)); } node = assoc_array_ptr_to_node(ptr); begin_node: kdebug("begin_node"); smp_read_barrier_depends(); slot = 0; ascend_to_node: /* Go through the slots in a node */ for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { ptr = ACCESS_ONCE(node->slots[slot]); if (assoc_array_ptr_is_meta(ptr) && node->back_pointer) goto descend_to_node; if (!keyring_ptr_is_keyring(ptr)) continue; key = keyring_ptr_to_key(ptr); if (sp >= KEYRING_SEARCH_MAX_DEPTH) { if (ctx->flags & KEYRING_SEARCH_DETECT_TOO_DEEP) { ctx->result = ERR_PTR(-ELOOP); return false; } goto not_this_keyring; } /* Search a nested keyring */ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) && key_task_permission(make_key_ref(key, ctx->possessed), ctx->cred, KEY_NEED_SEARCH) < 0) continue; /* stack the current position */ stack[sp].keyring = keyring; stack[sp].node = node; stack[sp].slot = slot; sp++; /* begin again with the new keyring */ keyring = key; goto descend_to_keyring; } /* We've dealt with all the slots in the current node, so now we need * to ascend to the parent and continue processing there. */ ptr = ACCESS_ONCE(node->back_pointer); slot = node->parent_slot; if (ptr && assoc_array_ptr_is_shortcut(ptr)) { shortcut = assoc_array_ptr_to_shortcut(ptr); smp_read_barrier_depends(); ptr = ACCESS_ONCE(shortcut->back_pointer); slot = shortcut->parent_slot; } if (!ptr) goto not_this_keyring; node = assoc_array_ptr_to_node(ptr); smp_read_barrier_depends(); slot++; /* If we've ascended to the root (zero backpointer), we must have just * finished processing the leftmost branch rather than the root slots - * so there can't be any more keyrings for us to find. */ if (node->back_pointer) { kdebug("ascend %d", slot); goto ascend_to_node; } /* The keyring we're looking at was disqualified or didn't contain a * matching key. */ not_this_keyring: kdebug("not_this_keyring %d", sp); if (sp <= 0) { kleave(" = false"); return false; } /* Resume the processing of a keyring higher up in the tree */ sp--; keyring = stack[sp].keyring; node = stack[sp].node; slot = stack[sp].slot + 1; kdebug("ascend to %d [%d]", keyring->serial, slot); goto ascend_to_node; /* We found a viable match */ found: key = key_ref_to_ptr(ctx->result); key_check(key); if (!(ctx->flags & KEYRING_SEARCH_NO_UPDATE_TIME)) { key->last_used_at = ctx->now.tv_sec; keyring->last_used_at = ctx->now.tv_sec; while (sp > 0) stack[--sp].keyring->last_used_at = ctx->now.tv_sec; } kleave(" = true"); return true; } /** * keyring_search_aux - Search a keyring tree for a key matching some criteria * @keyring_ref: A pointer to the keyring with possession indicator. * @ctx: The keyring search context. * * Search the supplied keyring tree for a key that matches the criteria given. * The root keyring and any linked keyrings must grant Search permission to the * caller to be searchable and keys can only be found if they too grant Search * to the caller. The possession flag on the root keyring pointer controls use * of the possessor bits in permissions checking of the entire tree. In * addition, the LSM gets to forbid keyring searches and key matches. * * The search is performed as a breadth-then-depth search up to the prescribed * limit (KEYRING_SEARCH_MAX_DEPTH). * * Keys are matched to the type provided and are then filtered by the match * function, which is given the description to use in any way it sees fit. The * match function may use any attributes of a key that it wishes to to * determine the match. Normally the match function from the key type would be * used. * * RCU can be used to prevent the keyring key lists from disappearing without * the need to take lots of locks. * * Returns a pointer to the found key and increments the key usage count if * successful; -EAGAIN if no matching keys were found, or if expired or revoked * keys were found; -ENOKEY if only negative keys were found; -ENOTDIR if the * specified keyring wasn't a keyring. * * In the case of a successful return, the possession attribute from * @keyring_ref is propagated to the returned key reference. */ key_ref_t keyring_search_aux(key_ref_t keyring_ref, struct keyring_search_context *ctx) { struct key *keyring; long err; ctx->iterator = keyring_search_iterator; ctx->possessed = is_key_possessed(keyring_ref); ctx->result = ERR_PTR(-EAGAIN); keyring = key_ref_to_ptr(keyring_ref); key_check(keyring); if (keyring->type != &key_type_keyring) return ERR_PTR(-ENOTDIR); if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM)) { err = key_task_permission(keyring_ref, ctx->cred, KEY_NEED_SEARCH); if (err < 0) return ERR_PTR(err); } rcu_read_lock(); ctx->now = current_kernel_time(); if (search_nested_keyrings(keyring, ctx)) __key_get(key_ref_to_ptr(ctx->result)); rcu_read_unlock(); return ctx->result; } /** * keyring_search - Search the supplied keyring tree for a matching key * @keyring: The root of the keyring tree to be searched. * @type: The type of keyring we want to find. * @description: The name of the keyring we want to find. * * As keyring_search_aux() above, but using the current task's credentials and * type's default matching function and preferred search method. */ key_ref_t keyring_search(key_ref_t keyring, struct key_type *type, const char *description) { struct keyring_search_context ctx = { .index_key.type = type, .index_key.description = description, .cred = current_cred(), .match_data.cmp = key_default_cmp, .match_data.raw_data = description, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, .flags = KEYRING_SEARCH_DO_STATE_CHECK, }; key_ref_t key; int ret; if (type->match_preparse) { ret = type->match_preparse(&ctx.match_data); if (ret < 0) return ERR_PTR(ret); } key = keyring_search_aux(keyring, &ctx); if (type->match_free) type->match_free(&ctx.match_data); return key; } EXPORT_SYMBOL(keyring_search); /* * Search the given keyring for a key that might be updated. * * The caller must guarantee that the keyring is a keyring and that the * permission is granted to modify the keyring as no check is made here. The * caller must also hold a lock on the keyring semaphore. * * Returns a pointer to the found key with usage count incremented if * successful and returns NULL if not found. Revoked and invalidated keys are * skipped over. * * If successful, the possession indicator is propagated from the keyring ref * to the returned key reference. */ key_ref_t find_key_to_update(key_ref_t keyring_ref, const struct keyring_index_key *index_key) { struct key *keyring, *key; const void *object; keyring = key_ref_to_ptr(keyring_ref); kenter("{%d},{%s,%s}", keyring->serial, index_key->type->name, index_key->description); object = assoc_array_find(&keyring->keys, &keyring_assoc_array_ops, index_key); if (object) goto found; kleave(" = NULL"); return NULL; found: key = keyring_ptr_to_key(object); if (key->flags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED))) { kleave(" = NULL [x]"); return NULL; } __key_get(key); kleave(" = {%d}", key->serial); return make_key_ref(key, is_key_possessed(keyring_ref)); } /* * Find a keyring with the specified name. * * All named keyrings in the current user namespace are searched, provided they * grant Search permission directly to the caller (unless this check is * skipped). Keyrings whose usage points have reached zero or who have been * revoked are skipped. * * Returns a pointer to the keyring with the keyring's refcount having being * incremented on success. -ENOKEY is returned if a key could not be found. */ struct key *find_keyring_by_name(const char *name, bool skip_perm_check) { struct key *keyring; int bucket; if (!name) return ERR_PTR(-EINVAL); bucket = keyring_hash(name); read_lock(&keyring_name_lock); if (keyring_name_hash[bucket].next) { /* search this hash bucket for a keyring with a matching name * that's readable and that hasn't been revoked */ list_for_each_entry(keyring, &keyring_name_hash[bucket], type_data.link ) { if (!kuid_has_mapping(current_user_ns(), keyring->user->uid)) continue; if (test_bit(KEY_FLAG_REVOKED, &keyring->flags)) continue; if (strcmp(keyring->description, name) != 0) continue; if (!skip_perm_check && key_permission(make_key_ref(keyring, 0), KEY_NEED_SEARCH) < 0) continue; /* we've got a match but we might end up racing with * key_cleanup() if the keyring is currently 'dead' * (ie. it has a zero usage count) */ if (!atomic_inc_not_zero(&keyring->usage)) continue; keyring->last_used_at = current_kernel_time().tv_sec; goto out; } } keyring = ERR_PTR(-ENOKEY); out: read_unlock(&keyring_name_lock); return keyring; } static int keyring_detect_cycle_iterator(const void *object, void *iterator_data) { struct keyring_search_context *ctx = iterator_data; const struct key *key = keyring_ptr_to_key(object); kenter("{%d}", key->serial); /* We might get a keyring with matching index-key that is nonetheless a * different keyring. */ if (key != ctx->match_data.raw_data) return 0; ctx->result = ERR_PTR(-EDEADLK); return 1; } /* * See if a cycle will will be created by inserting acyclic tree B in acyclic * tree A at the topmost level (ie: as a direct child of A). * * Since we are adding B to A at the top level, checking for cycles should just * be a matter of seeing if node A is somewhere in tree B. */ static int keyring_detect_cycle(struct key *A, struct key *B) { struct keyring_search_context ctx = { .index_key = A->index_key, .match_data.raw_data = A, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, .iterator = keyring_detect_cycle_iterator, .flags = (KEYRING_SEARCH_NO_STATE_CHECK | KEYRING_SEARCH_NO_UPDATE_TIME | KEYRING_SEARCH_NO_CHECK_PERM | KEYRING_SEARCH_DETECT_TOO_DEEP), }; rcu_read_lock(); search_nested_keyrings(B, &ctx); rcu_read_unlock(); return PTR_ERR(ctx.result) == -EAGAIN ? 0 : PTR_ERR(ctx.result); } /* * Preallocate memory so that a key can be linked into to a keyring. */ int __key_link_begin(struct key *keyring, const struct keyring_index_key *index_key, struct assoc_array_edit **_edit) __acquires(&keyring->sem) __acquires(&keyring_serialise_link_sem) { struct assoc_array_edit *edit; int ret; kenter("%d,%s,%s,", keyring->serial, index_key->type->name, index_key->description); BUG_ON(index_key->desc_len == 0); if (keyring->type != &key_type_keyring) return -ENOTDIR; down_write(&keyring->sem); ret = -EKEYREVOKED; if (test_bit(KEY_FLAG_REVOKED, &keyring->flags)) goto error_krsem; /* serialise link/link calls to prevent parallel calls causing a cycle * when linking two keyring in opposite orders */ if (index_key->type == &key_type_keyring) down_write(&keyring_serialise_link_sem); /* Create an edit script that will insert/replace the key in the * keyring tree. */ edit = assoc_array_insert(&keyring->keys, &keyring_assoc_array_ops, index_key, NULL); if (IS_ERR(edit)) { ret = PTR_ERR(edit); goto error_sem; } /* If we're not replacing a link in-place then we're going to need some * extra quota. */ if (!edit->dead_leaf) { ret = key_payload_reserve(keyring, keyring->datalen + KEYQUOTA_LINK_BYTES); if (ret < 0) goto error_cancel; } *_edit = edit; kleave(" = 0"); return 0; error_cancel: assoc_array_cancel_edit(edit); error_sem: if (index_key->type == &key_type_keyring) up_write(&keyring_serialise_link_sem); error_krsem: up_write(&keyring->sem); kleave(" = %d", ret); return ret; } /* * Check already instantiated keys aren't going to be a problem. * * The caller must have called __key_link_begin(). Don't need to call this for * keys that were created since __key_link_begin() was called. */ int __key_link_check_live_key(struct key *keyring, struct key *key) { if (key->type == &key_type_keyring) /* check that we aren't going to create a cycle by linking one * keyring to another */ return keyring_detect_cycle(keyring, key); return 0; } /* * Link a key into to a keyring. * * Must be called with __key_link_begin() having being called. Discards any * already extant link to matching key if there is one, so that each keyring * holds at most one link to any given key of a particular type+description * combination. */ void __key_link(struct key *key, struct assoc_array_edit **_edit) { __key_get(key); assoc_array_insert_set_object(*_edit, keyring_key_to_ptr(key)); assoc_array_apply_edit(*_edit); *_edit = NULL; } /* * Finish linking a key into to a keyring. * * Must be called with __key_link_begin() having being called. */ void __key_link_end(struct key *keyring, const struct keyring_index_key *index_key, struct assoc_array_edit *edit) __releases(&keyring->sem) __releases(&keyring_serialise_link_sem) { BUG_ON(index_key->type == NULL); kenter("%d,%s,", keyring->serial, index_key->type->name); if (index_key->type == &key_type_keyring) up_write(&keyring_serialise_link_sem); if (edit) { if (!edit->dead_leaf) { key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES); } assoc_array_cancel_edit(edit); } up_write(&keyring->sem); } /** * key_link - Link a key to a keyring * @keyring: The keyring to make the link in. * @key: The key to link to. * * Make a link in a keyring to a key, such that the keyring holds a reference * on that key and the key can potentially be found by searching that keyring. * * This function will write-lock the keyring's semaphore and will consume some * of the user's key data quota to hold the link. * * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring, * -EKEYREVOKED if the keyring has been revoked, -ENFILE if the keyring is * full, -EDQUOT if there is insufficient key data quota remaining to add * another link or -ENOMEM if there's insufficient memory. * * It is assumed that the caller has checked that it is permitted for a link to * be made (the keyring should have Write permission and the key Link * permission). */ int key_link(struct key *keyring, struct key *key) { struct assoc_array_edit *edit; int ret; kenter("{%d,%d}", keyring->serial, atomic_read(&keyring->usage)); key_check(keyring); key_check(key); if (test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags) && !test_bit(KEY_FLAG_TRUSTED, &key->flags)) return -EPERM; ret = __key_link_begin(keyring, &key->index_key, &edit); if (ret == 0) { kdebug("begun {%d,%d}", keyring->serial, atomic_read(&keyring->usage)); ret = __key_link_check_live_key(keyring, key); if (ret == 0) __key_link(key, &edit); __key_link_end(keyring, &key->index_key, edit); } kleave(" = %d {%d,%d}", ret, keyring->serial, atomic_read(&keyring->usage)); return ret; } EXPORT_SYMBOL(key_link); /** * key_unlink - Unlink the first link to a key from a keyring. * @keyring: The keyring to remove the link from. * @key: The key the link is to. * * Remove a link from a keyring to a key. * * This function will write-lock the keyring's semaphore. * * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring, -ENOENT if * the key isn't linked to by the keyring or -ENOMEM if there's insufficient * memory. * * It is assumed that the caller has checked that it is permitted for a link to * be removed (the keyring should have Write permission; no permissions are * required on the key). */ int key_unlink(struct key *keyring, struct key *key) { struct assoc_array_edit *edit; int ret; key_check(keyring); key_check(key); if (keyring->type != &key_type_keyring) return -ENOTDIR; down_write(&keyring->sem); edit = assoc_array_delete(&keyring->keys, &keyring_assoc_array_ops, &key->index_key); if (IS_ERR(edit)) { ret = PTR_ERR(edit); goto error; } ret = -ENOENT; if (edit == NULL) goto error; assoc_array_apply_edit(edit); key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES); ret = 0; error: up_write(&keyring->sem); return ret; } EXPORT_SYMBOL(key_unlink); /** * keyring_clear - Clear a keyring * @keyring: The keyring to clear. * * Clear the contents of the specified keyring. * * Returns 0 if successful or -ENOTDIR if the keyring isn't a keyring. */ int keyring_clear(struct key *keyring) { struct assoc_array_edit *edit; int ret; if (keyring->type != &key_type_keyring) return -ENOTDIR; down_write(&keyring->sem); edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops); if (IS_ERR(edit)) { ret = PTR_ERR(edit); } else { if (edit) assoc_array_apply_edit(edit); key_payload_reserve(keyring, 0); ret = 0; } up_write(&keyring->sem); return ret; } EXPORT_SYMBOL(keyring_clear); /* * Dispose of the links from a revoked keyring. * * This is called with the key sem write-locked. */ static void keyring_revoke(struct key *keyring) { struct assoc_array_edit *edit; edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops); if (!IS_ERR(edit)) { if (edit) assoc_array_apply_edit(edit); key_payload_reserve(keyring, 0); } } static bool keyring_gc_select_iterator(void *object, void *iterator_data) { struct key *key = keyring_ptr_to_key(object); time_t *limit = iterator_data; if (key_is_dead(key, *limit)) return false; key_get(key); return true; } static int keyring_gc_check_iterator(const void *object, void *iterator_data) { const struct key *key = keyring_ptr_to_key(object); time_t *limit = iterator_data; key_check(key); return key_is_dead(key, *limit); } /* * Garbage collect pointers from a keyring. * * Not called with any locks held. The keyring's key struct will not be * deallocated under us as only our caller may deallocate it. */ void keyring_gc(struct key *keyring, time_t limit) { int result; kenter("%x{%s}", keyring->serial, keyring->description ?: ""); if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED))) goto dont_gc; /* scan the keyring looking for dead keys */ rcu_read_lock(); result = assoc_array_iterate(&keyring->keys, keyring_gc_check_iterator, &limit); rcu_read_unlock(); if (result == true) goto do_gc; dont_gc: kleave(" [no gc]"); return; do_gc: down_write(&keyring->sem); assoc_array_gc(&keyring->keys, &keyring_assoc_array_ops, keyring_gc_select_iterator, &limit); up_write(&keyring->sem); kleave(" [gc]"); }
__releases(&keyring_serialise_link_sem) { BUG_ON(index_key->type == NULL); kenter("%d,%s,", keyring->serial, index_key->type->name); if (index_key->type == &key_type_keyring) up_write(&keyring_serialise_link_sem); if (edit && !edit->dead_leaf) { key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES); assoc_array_cancel_edit(edit); } up_write(&keyring->sem); }
__releases(&keyring_serialise_link_sem) { BUG_ON(index_key->type == NULL); kenter("%d,%s,", keyring->serial, index_key->type->name); if (index_key->type == &key_type_keyring) up_write(&keyring_serialise_link_sem); if (edit) { if (!edit->dead_leaf) { key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES); } assoc_array_cancel_edit(edit); } up_write(&keyring->sem); }
{'added': [(1184, '\tif (edit) {'), (1185, '\t\tif (!edit->dead_leaf) {'), (1186, '\t\t\tkey_payload_reserve(keyring,'), (1187, '\t\t\t\tkeyring->datalen - KEYQUOTA_LINK_BYTES);'), (1188, '\t\t}')], 'deleted': [(1184, '\tif (edit && !edit->dead_leaf) {'), (1185, '\t\tkey_payload_reserve(keyring,'), (1186, '\t\t\t\t keyring->datalen - KEYQUOTA_LINK_BYTES);')]}
5
3
864
5,382
13
80
4
https://github.com/torvalds/linux
CVE-2015-1333
CWE-119
463
audio_microfrontend.cc
C++
tflite::ops::custom::audio_microfrontend::Prepare
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "flatbuffers/flexbuffers.h" // from @flatbuffers #include "tensorflow/lite/context.h" #include "tensorflow/lite/experimental/microfrontend/lib/frontend.h" #include "tensorflow/lite/experimental/microfrontend/lib/frontend_util.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace ops { namespace custom { namespace audio_microfrontend { constexpr int kInputTensor = 0; constexpr int kOutputTensor = 0; typedef struct { int sample_rate; FrontendState* state; int left_context; int right_context; int frame_stride; bool zero_padding; int out_scale; bool out_float; } TfLiteAudioMicrofrontendParams; void* Init(TfLiteContext* context, const char* buffer, size_t length) { auto* data = new TfLiteAudioMicrofrontendParams; const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer); const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap(); data->sample_rate = m["sample_rate"].AsInt32(); struct FrontendConfig config; config.window.size_ms = m["window_size"].AsInt32(); config.window.step_size_ms = m["window_step"].AsInt32(); config.filterbank.num_channels = m["num_channels"].AsInt32(); config.filterbank.upper_band_limit = m["upper_band_limit"].AsFloat(); config.filterbank.lower_band_limit = m["lower_band_limit"].AsFloat(); config.noise_reduction.smoothing_bits = m["smoothing_bits"].AsInt32(); config.noise_reduction.even_smoothing = m["even_smoothing"].AsFloat(); config.noise_reduction.odd_smoothing = m["odd_smoothing"].AsFloat(); config.noise_reduction.min_signal_remaining = m["min_signal_remaining"].AsFloat(); config.pcan_gain_control.enable_pcan = m["enable_pcan"].AsBool(); config.pcan_gain_control.strength = m["pcan_strength"].AsFloat(); config.pcan_gain_control.offset = m["pcan_offset"].AsFloat(); config.pcan_gain_control.gain_bits = m["gain_bits"].AsInt32(); config.log_scale.enable_log = m["enable_log"].AsBool(); config.log_scale.scale_shift = m["scale_shift"].AsInt32(); data->state = new FrontendState; FrontendPopulateState(&config, data->state, data->sample_rate); data->left_context = m["left_context"].AsInt32(); data->right_context = m["right_context"].AsInt32(); data->frame_stride = m["frame_stride"].AsInt32(); data->zero_padding = m["zero_padding"].AsBool(); data->out_scale = m["out_scale"].AsInt32(); data->out_float = m["out_float"].AsBool(); return data; } void Free(TfLiteContext* context, void* buffer) { auto* data = reinterpret_cast<TfLiteAudioMicrofrontendParams*>(buffer); FrontendFreeStateContents(data->state); delete data->state; delete data; } TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* data = reinterpret_cast<TfLiteAudioMicrofrontendParams*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1); TF_LITE_ENSURE_EQ(context, input->type, kTfLiteInt16); output->type = kTfLiteInt32; if (data->out_float) { output->type = kTfLiteFloat32; } TfLiteIntArray* output_size = TfLiteIntArrayCreate(2); int num_frames = 0; if (input->dims->data[0] >= data->state->window.size) { num_frames = (input->dims->data[0] - data->state->window.size) / data->state->window.step / data->frame_stride + 1; } output_size->data[0] = num_frames; output_size->data[1] = data->state->filterbank.num_channels * (1 + data->left_context + data->right_context); return context->ResizeTensor(context, output, output_size); } template <typename T> void GenerateFeatures(TfLiteAudioMicrofrontendParams* data, const TfLiteTensor* input, TfLiteTensor* output) { const int16_t* audio_data = GetTensorData<int16_t>(input); int64_t audio_size = input->dims->data[0]; T* filterbanks_flat = GetTensorData<T>(output); int num_frames = 0; if (audio_size >= data->state->window.size) { num_frames = (input->dims->data[0] - data->state->window.size) / data->state->window.step + 1; } std::vector<std::vector<T>> frame_buffer(num_frames); int frame_index = 0; while (audio_size > 0) { size_t num_samples_read; struct FrontendOutput output = FrontendProcessSamples( data->state, audio_data, audio_size, &num_samples_read); audio_data += num_samples_read; audio_size -= num_samples_read; if (output.values != nullptr) { frame_buffer[frame_index].reserve(output.size); int i; for (i = 0; i < output.size; ++i) { frame_buffer[frame_index].push_back(static_cast<T>(output.values[i]) / data->out_scale); } ++frame_index; } } int index = 0; std::vector<T> pad(data->state->filterbank.num_channels, 0); int anchor; for (anchor = 0; anchor < frame_buffer.size(); anchor += data->frame_stride) { int frame; for (frame = anchor - data->left_context; frame <= anchor + data->right_context; ++frame) { std::vector<T>* feature; if (data->zero_padding && (frame < 0 || frame >= frame_buffer.size())) { feature = &pad; } else if (frame < 0) { feature = &frame_buffer[0]; } else if (frame >= frame_buffer.size()) { feature = &frame_buffer[frame_buffer.size() - 1]; } else { feature = &frame_buffer[frame]; } for (auto f : *feature) { filterbanks_flat[index++] = f; } } } } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* data = reinterpret_cast<TfLiteAudioMicrofrontendParams*>(node->user_data); FrontendReset(data->state); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (data->out_float) { GenerateFeatures<float>(data, input, output); } else { GenerateFeatures<int32>(data, input, output); } return kTfLiteOk; } } // namespace audio_microfrontend TfLiteRegistration* Register_AUDIO_MICROFRONTEND() { static TfLiteRegistration r = { audio_microfrontend::Init, audio_microfrontend::Free, audio_microfrontend::Prepare, audio_microfrontend::Eval}; return &r; } } // namespace custom } // namespace ops } // namespace tflite
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "flatbuffers/flexbuffers.h" // from @flatbuffers #include "tensorflow/lite/context.h" #include "tensorflow/lite/experimental/microfrontend/lib/frontend.h" #include "tensorflow/lite/experimental/microfrontend/lib/frontend_util.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace ops { namespace custom { namespace audio_microfrontend { constexpr int kInputTensor = 0; constexpr int kOutputTensor = 0; typedef struct { int sample_rate; FrontendState* state; int left_context; int right_context; int frame_stride; bool zero_padding; int out_scale; bool out_float; } TfLiteAudioMicrofrontendParams; void* Init(TfLiteContext* context, const char* buffer, size_t length) { auto* data = new TfLiteAudioMicrofrontendParams; const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer); const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap(); data->sample_rate = m["sample_rate"].AsInt32(); struct FrontendConfig config; config.window.size_ms = m["window_size"].AsInt32(); config.window.step_size_ms = m["window_step"].AsInt32(); config.filterbank.num_channels = m["num_channels"].AsInt32(); config.filterbank.upper_band_limit = m["upper_band_limit"].AsFloat(); config.filterbank.lower_band_limit = m["lower_band_limit"].AsFloat(); config.noise_reduction.smoothing_bits = m["smoothing_bits"].AsInt32(); config.noise_reduction.even_smoothing = m["even_smoothing"].AsFloat(); config.noise_reduction.odd_smoothing = m["odd_smoothing"].AsFloat(); config.noise_reduction.min_signal_remaining = m["min_signal_remaining"].AsFloat(); config.pcan_gain_control.enable_pcan = m["enable_pcan"].AsBool(); config.pcan_gain_control.strength = m["pcan_strength"].AsFloat(); config.pcan_gain_control.offset = m["pcan_offset"].AsFloat(); config.pcan_gain_control.gain_bits = m["gain_bits"].AsInt32(); config.log_scale.enable_log = m["enable_log"].AsBool(); config.log_scale.scale_shift = m["scale_shift"].AsInt32(); data->state = new FrontendState; FrontendPopulateState(&config, data->state, data->sample_rate); data->left_context = m["left_context"].AsInt32(); data->right_context = m["right_context"].AsInt32(); data->frame_stride = m["frame_stride"].AsInt32(); data->zero_padding = m["zero_padding"].AsBool(); data->out_scale = m["out_scale"].AsInt32(); data->out_float = m["out_float"].AsBool(); return data; } void Free(TfLiteContext* context, void* buffer) { auto* data = reinterpret_cast<TfLiteAudioMicrofrontendParams*>(buffer); FrontendFreeStateContents(data->state); delete data->state; delete data; } TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* data = reinterpret_cast<TfLiteAudioMicrofrontendParams*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1); TF_LITE_ENSURE_EQ(context, input->type, kTfLiteInt16); output->type = kTfLiteInt32; if (data->out_float) { output->type = kTfLiteFloat32; } TfLiteIntArray* output_size = TfLiteIntArrayCreate(2); int num_frames = 0; if (input->dims->data[0] >= data->state->window.size) { num_frames = (input->dims->data[0] - data->state->window.size) / data->state->window.step / data->frame_stride + 1; } output_size->data[0] = num_frames; output_size->data[1] = data->state->filterbank.num_channels * (1 + data->left_context + data->right_context); return context->ResizeTensor(context, output, output_size); } template <typename T> void GenerateFeatures(TfLiteAudioMicrofrontendParams* data, const TfLiteTensor* input, TfLiteTensor* output) { const int16_t* audio_data = GetTensorData<int16_t>(input); int64_t audio_size = input->dims->data[0]; T* filterbanks_flat = GetTensorData<T>(output); int num_frames = 0; if (audio_size >= data->state->window.size) { num_frames = (input->dims->data[0] - data->state->window.size) / data->state->window.step + 1; } std::vector<std::vector<T>> frame_buffer(num_frames); int frame_index = 0; while (audio_size > 0) { size_t num_samples_read; struct FrontendOutput output = FrontendProcessSamples( data->state, audio_data, audio_size, &num_samples_read); audio_data += num_samples_read; audio_size -= num_samples_read; if (output.values != nullptr) { frame_buffer[frame_index].reserve(output.size); int i; for (i = 0; i < output.size; ++i) { frame_buffer[frame_index].push_back(static_cast<T>(output.values[i]) / data->out_scale); } ++frame_index; } } int index = 0; std::vector<T> pad(data->state->filterbank.num_channels, 0); int anchor; for (anchor = 0; anchor < frame_buffer.size(); anchor += data->frame_stride) { int frame; for (frame = anchor - data->left_context; frame <= anchor + data->right_context; ++frame) { std::vector<T>* feature; if (data->zero_padding && (frame < 0 || frame >= frame_buffer.size())) { feature = &pad; } else if (frame < 0) { feature = &frame_buffer[0]; } else if (frame >= frame_buffer.size()) { feature = &frame_buffer[frame_buffer.size() - 1]; } else { feature = &frame_buffer[frame]; } for (auto f : *feature) { filterbanks_flat[index++] = f; } } } } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* data = reinterpret_cast<TfLiteAudioMicrofrontendParams*>(node->user_data); FrontendReset(data->state); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); if (data->out_float) { GenerateFeatures<float>(data, input, output); } else { GenerateFeatures<int32>(data, input, output); } return kTfLiteOk; } } // namespace audio_microfrontend TfLiteRegistration* Register_AUDIO_MICROFRONTEND() { static TfLiteRegistration r = { audio_microfrontend::Init, audio_microfrontend::Free, audio_microfrontend::Prepare, audio_microfrontend::Eval}; return &r; } } // namespace custom } // namespace ops } // namespace tflite
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* data = reinterpret_cast<TfLiteAudioMicrofrontendParams*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1); TF_LITE_ENSURE_EQ(context, input->type, kTfLiteInt16); output->type = kTfLiteInt32; if (data->out_float) { output->type = kTfLiteFloat32; } TfLiteIntArray* output_size = TfLiteIntArrayCreate(2); int num_frames = 0; if (input->dims->data[0] >= data->state->window.size) { num_frames = (input->dims->data[0] - data->state->window.size) / data->state->window.step / data->frame_stride + 1; } output_size->data[0] = num_frames; output_size->data[1] = data->state->filterbank.num_channels * (1 + data->left_context + data->right_context); return context->ResizeTensor(context, output, output_size); }
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* data = reinterpret_cast<TfLiteAudioMicrofrontendParams*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1); TF_LITE_ENSURE_EQ(context, input->type, kTfLiteInt16); output->type = kTfLiteInt32; if (data->out_float) { output->type = kTfLiteFloat32; } TfLiteIntArray* output_size = TfLiteIntArrayCreate(2); int num_frames = 0; if (input->dims->data[0] >= data->state->window.size) { num_frames = (input->dims->data[0] - data->state->window.size) / data->state->window.step / data->frame_stride + 1; } output_size->data[0] = num_frames; output_size->data[1] = data->state->filterbank.num_channels * (1 + data->left_context + data->right_context); return context->ResizeTensor(context, output, output_size); }
{'added': [(94, ' const TfLiteTensor* input;'), (95, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));'), (96, ' TfLiteTensor* output;'), (97, ' TF_LITE_ENSURE_OK(context,'), (98, ' GetOutputSafe(context, node, kOutputTensor, &output));'), (186, ' const TfLiteTensor* input;'), (187, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));'), (188, ' TfLiteTensor* output;'), (189, ' TF_LITE_ENSURE_OK(context,'), (190, ' GetOutputSafe(context, node, kOutputTensor, &output));')], 'deleted': [(94, ' const TfLiteTensor* input = GetInput(context, node, kInputTensor);'), (95, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);'), (183, ' const TfLiteTensor* input = GetInput(context, node, kInputTensor);'), (184, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);')]}
10
4
167
1,343
25
239
3
https://github.com/tensorflow/tensorflow
CVE-2020-15211
CWE-125
642
key.c
C
key_reject_and_link
/* Basic authentication token and access key management * * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/poison.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/security.h> #include <linux/workqueue.h> #include <linux/random.h> #include <linux/err.h> #include "internal.h" struct kmem_cache *key_jar; struct rb_root key_serial_tree; /* tree of keys indexed by serial */ DEFINE_SPINLOCK(key_serial_lock); struct rb_root key_user_tree; /* tree of quota records indexed by UID */ DEFINE_SPINLOCK(key_user_lock); unsigned int key_quota_root_maxkeys = 1000000; /* root's key count quota */ unsigned int key_quota_root_maxbytes = 25000000; /* root's key space quota */ unsigned int key_quota_maxkeys = 200; /* general key count quota */ unsigned int key_quota_maxbytes = 20000; /* general key space quota */ static LIST_HEAD(key_types_list); static DECLARE_RWSEM(key_types_sem); /* We serialise key instantiation and link */ DEFINE_MUTEX(key_construction_mutex); #ifdef KEY_DEBUGGING void __key_check(const struct key *key) { printk("__key_check: key %p {%08x} should be {%08x}\n", key, key->magic, KEY_DEBUG_MAGIC); BUG(); } #endif /* * Get the key quota record for a user, allocating a new record if one doesn't * already exist. */ struct key_user *key_user_lookup(kuid_t uid) { struct key_user *candidate = NULL, *user; struct rb_node *parent, **p; try_again: parent = NULL; p = &key_user_tree.rb_node; spin_lock(&key_user_lock); /* search the tree for a user record with a matching UID */ while (*p) { parent = *p; user = rb_entry(parent, struct key_user, node); if (uid_lt(uid, user->uid)) p = &(*p)->rb_left; else if (uid_gt(uid, user->uid)) p = &(*p)->rb_right; else goto found; } /* if we get here, we failed to find a match in the tree */ if (!candidate) { /* allocate a candidate user record if we don't already have * one */ spin_unlock(&key_user_lock); user = NULL; candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL); if (unlikely(!candidate)) goto out; /* the allocation may have scheduled, so we need to repeat the * search lest someone else added the record whilst we were * asleep */ goto try_again; } /* if we get here, then the user record still hadn't appeared on the * second pass - so we use the candidate record */ refcount_set(&candidate->usage, 1); atomic_set(&candidate->nkeys, 0); atomic_set(&candidate->nikeys, 0); candidate->uid = uid; candidate->qnkeys = 0; candidate->qnbytes = 0; spin_lock_init(&candidate->lock); mutex_init(&candidate->cons_lock); rb_link_node(&candidate->node, parent, p); rb_insert_color(&candidate->node, &key_user_tree); spin_unlock(&key_user_lock); user = candidate; goto out; /* okay - we found a user record for this UID */ found: refcount_inc(&user->usage); spin_unlock(&key_user_lock); kfree(candidate); out: return user; } /* * Dispose of a user structure */ void key_user_put(struct key_user *user) { if (refcount_dec_and_lock(&user->usage, &key_user_lock)) { rb_erase(&user->node, &key_user_tree); spin_unlock(&key_user_lock); kfree(user); } } /* * Allocate a serial number for a key. These are assigned randomly to avoid * security issues through covert channel problems. */ static inline void key_alloc_serial(struct key *key) { struct rb_node *parent, **p; struct key *xkey; /* propose a random serial number and look for a hole for it in the * serial number tree */ do { get_random_bytes(&key->serial, sizeof(key->serial)); key->serial >>= 1; /* negative numbers are not permitted */ } while (key->serial < 3); spin_lock(&key_serial_lock); attempt_insertion: parent = NULL; p = &key_serial_tree.rb_node; while (*p) { parent = *p; xkey = rb_entry(parent, struct key, serial_node); if (key->serial < xkey->serial) p = &(*p)->rb_left; else if (key->serial > xkey->serial) p = &(*p)->rb_right; else goto serial_exists; } /* we've found a suitable hole - arrange for this key to occupy it */ rb_link_node(&key->serial_node, parent, p); rb_insert_color(&key->serial_node, &key_serial_tree); spin_unlock(&key_serial_lock); return; /* we found a key with the proposed serial number - walk the tree from * that point looking for the next unused serial number */ serial_exists: for (;;) { key->serial++; if (key->serial < 3) { key->serial = 3; goto attempt_insertion; } parent = rb_next(parent); if (!parent) goto attempt_insertion; xkey = rb_entry(parent, struct key, serial_node); if (key->serial < xkey->serial) goto attempt_insertion; } } /** * key_alloc - Allocate a key of the specified type. * @type: The type of key to allocate. * @desc: The key description to allow the key to be searched out. * @uid: The owner of the new key. * @gid: The group ID for the new key's group permissions. * @cred: The credentials specifying UID namespace. * @perm: The permissions mask of the new key. * @flags: Flags specifying quota properties. * @restrict_link: Optional link restriction for new keyrings. * * Allocate a key of the specified type with the attributes given. The key is * returned in an uninstantiated state and the caller needs to instantiate the * key before returning. * * The restrict_link structure (if not NULL) will be freed when the * keyring is destroyed, so it must be dynamically allocated. * * The user's key count quota is updated to reflect the creation of the key and * the user's key data quota has the default for the key type reserved. The * instantiation function should amend this as necessary. If insufficient * quota is available, -EDQUOT will be returned. * * The LSM security modules can prevent a key being created, in which case * -EACCES will be returned. * * Returns a pointer to the new key if successful and an error code otherwise. * * Note that the caller needs to ensure the key type isn't uninstantiated. * Internally this can be done by locking key_types_sem. Externally, this can * be done by either never unregistering the key type, or making sure * key_alloc() calls don't race with module unloading. */ struct key *key_alloc(struct key_type *type, const char *desc, kuid_t uid, kgid_t gid, const struct cred *cred, key_perm_t perm, unsigned long flags, struct key_restriction *restrict_link) { struct key_user *user = NULL; struct key *key; size_t desclen, quotalen; int ret; key = ERR_PTR(-EINVAL); if (!desc || !*desc) goto error; if (type->vet_description) { ret = type->vet_description(desc); if (ret < 0) { key = ERR_PTR(ret); goto error; } } desclen = strlen(desc); quotalen = desclen + 1 + type->def_datalen; /* get hold of the key tracking for this user */ user = key_user_lookup(uid); if (!user) goto no_memory_1; /* check that the user's quota permits allocation of another key and * its description */ if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxkeys : key_quota_maxkeys; unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxbytes : key_quota_maxbytes; spin_lock(&user->lock); if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) { if (user->qnkeys + 1 >= maxkeys || user->qnbytes + quotalen >= maxbytes || user->qnbytes + quotalen < user->qnbytes) goto no_quota; } user->qnkeys++; user->qnbytes += quotalen; spin_unlock(&user->lock); } /* allocate and initialise the key and its description */ key = kmem_cache_zalloc(key_jar, GFP_KERNEL); if (!key) goto no_memory_2; key->index_key.desc_len = desclen; key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL); if (!key->index_key.description) goto no_memory_3; refcount_set(&key->usage, 1); init_rwsem(&key->sem); lockdep_set_class(&key->sem, &type->lock_class); key->index_key.type = type; key->user = user; key->quotalen = quotalen; key->datalen = type->def_datalen; key->uid = uid; key->gid = gid; key->perm = perm; key->restrict_link = restrict_link; if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) key->flags |= 1 << KEY_FLAG_IN_QUOTA; if (flags & KEY_ALLOC_BUILT_IN) key->flags |= 1 << KEY_FLAG_BUILTIN; if (flags & KEY_ALLOC_UID_KEYRING) key->flags |= 1 << KEY_FLAG_UID_KEYRING; #ifdef KEY_DEBUGGING key->magic = KEY_DEBUG_MAGIC; #endif /* let the security module know about the key */ ret = security_key_alloc(key, cred, flags); if (ret < 0) goto security_error; /* publish the key by giving it a serial number */ atomic_inc(&user->nkeys); key_alloc_serial(key); error: return key; security_error: kfree(key->description); kmem_cache_free(key_jar, key); if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { spin_lock(&user->lock); user->qnkeys--; user->qnbytes -= quotalen; spin_unlock(&user->lock); } key_user_put(user); key = ERR_PTR(ret); goto error; no_memory_3: kmem_cache_free(key_jar, key); no_memory_2: if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { spin_lock(&user->lock); user->qnkeys--; user->qnbytes -= quotalen; spin_unlock(&user->lock); } key_user_put(user); no_memory_1: key = ERR_PTR(-ENOMEM); goto error; no_quota: spin_unlock(&user->lock); key_user_put(user); key = ERR_PTR(-EDQUOT); goto error; } EXPORT_SYMBOL(key_alloc); /** * key_payload_reserve - Adjust data quota reservation for the key's payload * @key: The key to make the reservation for. * @datalen: The amount of data payload the caller now wants. * * Adjust the amount of the owning user's key data quota that a key reserves. * If the amount is increased, then -EDQUOT may be returned if there isn't * enough free quota available. * * If successful, 0 is returned. */ int key_payload_reserve(struct key *key, size_t datalen) { int delta = (int)datalen - key->datalen; int ret = 0; key_check(key); /* contemplate the quota adjustment */ if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { unsigned maxbytes = uid_eq(key->user->uid, GLOBAL_ROOT_UID) ? key_quota_root_maxbytes : key_quota_maxbytes; spin_lock(&key->user->lock); if (delta > 0 && (key->user->qnbytes + delta >= maxbytes || key->user->qnbytes + delta < key->user->qnbytes)) { ret = -EDQUOT; } else { key->user->qnbytes += delta; key->quotalen += delta; } spin_unlock(&key->user->lock); } /* change the recorded data length if that didn't generate an error */ if (ret == 0) key->datalen = datalen; return ret; } EXPORT_SYMBOL(key_payload_reserve); /* * Instantiate a key and link it into the target keyring atomically. Must be * called with the target keyring's semaphore writelocked. The target key's * semaphore need not be locked as instantiation is serialised by * key_construction_mutex. */ static int __key_instantiate_and_link(struct key *key, struct key_preparsed_payload *prep, struct key *keyring, struct key *authkey, struct assoc_array_edit **_edit) { int ret, awaken; key_check(key); key_check(keyring); awaken = 0; ret = -EBUSY; mutex_lock(&key_construction_mutex); /* can't instantiate twice */ if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { /* instantiate the key */ ret = key->type->instantiate(key, prep); if (ret == 0) { /* mark the key as being instantiated */ atomic_inc(&key->user->nikeys); set_bit(KEY_FLAG_INSTANTIATED, &key->flags); if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) awaken = 1; /* and link it into the destination keyring */ if (keyring) { if (test_bit(KEY_FLAG_KEEP, &keyring->flags)) set_bit(KEY_FLAG_KEEP, &key->flags); __key_link(key, _edit); } /* disable the authorisation key */ if (authkey) key_revoke(authkey); if (prep->expiry != TIME_T_MAX) { key->expiry = prep->expiry; key_schedule_gc(prep->expiry + key_gc_delay); } } } mutex_unlock(&key_construction_mutex); /* wake up anyone waiting for a key to be constructed */ if (awaken) wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); return ret; } /** * key_instantiate_and_link - Instantiate a key and link it into the keyring. * @key: The key to instantiate. * @data: The data to use to instantiate the keyring. * @datalen: The length of @data. * @keyring: Keyring to create a link in on success (or NULL). * @authkey: The authorisation token permitting instantiation. * * Instantiate a key that's in the uninstantiated state using the provided data * and, if successful, link it in to the destination keyring if one is * supplied. * * If successful, 0 is returned, the authorisation token is revoked and anyone * waiting for the key is woken up. If the key was already instantiated, * -EBUSY will be returned. */ int key_instantiate_and_link(struct key *key, const void *data, size_t datalen, struct key *keyring, struct key *authkey) { struct key_preparsed_payload prep; struct assoc_array_edit *edit; int ret; memset(&prep, 0, sizeof(prep)); prep.data = data; prep.datalen = datalen; prep.quotalen = key->type->def_datalen; prep.expiry = TIME_T_MAX; if (key->type->preparse) { ret = key->type->preparse(&prep); if (ret < 0) goto error; } if (keyring) { ret = __key_link_begin(keyring, &key->index_key, &edit); if (ret < 0) goto error; if (keyring->restrict_link && keyring->restrict_link->check) { struct key_restriction *keyres = keyring->restrict_link; ret = keyres->check(keyring, key->type, &prep.payload, keyres->key); if (ret < 0) goto error_link_end; } } ret = __key_instantiate_and_link(key, &prep, keyring, authkey, &edit); error_link_end: if (keyring) __key_link_end(keyring, &key->index_key, edit); error: if (key->type->preparse) key->type->free_preparse(&prep); return ret; } EXPORT_SYMBOL(key_instantiate_and_link); /** * key_reject_and_link - Negatively instantiate a key and link it into the keyring. * @key: The key to instantiate. * @timeout: The timeout on the negative key. * @error: The error to return when the key is hit. * @keyring: Keyring to create a link in on success (or NULL). * @authkey: The authorisation token permitting instantiation. * * Negatively instantiate a key that's in the uninstantiated state and, if * successful, set its timeout and stored error and link it in to the * destination keyring if one is supplied. The key and any links to the key * will be automatically garbage collected after the timeout expires. * * Negative keys are used to rate limit repeated request_key() calls by causing * them to return the stored error code (typically ENOKEY) until the negative * key expires. * * If successful, 0 is returned, the authorisation token is revoked and anyone * waiting for the key is woken up. If the key was already instantiated, * -EBUSY will be returned. */ int key_reject_and_link(struct key *key, unsigned timeout, unsigned error, struct key *keyring, struct key *authkey) { struct assoc_array_edit *edit; struct timespec now; int ret, awaken, link_ret = 0; key_check(key); key_check(keyring); awaken = 0; ret = -EBUSY; if (keyring) { if (keyring->restrict_link) return -EPERM; link_ret = __key_link_begin(keyring, &key->index_key, &edit); } mutex_lock(&key_construction_mutex); /* can't instantiate twice */ if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { /* mark the key as being negatively instantiated */ atomic_inc(&key->user->nikeys); key->reject_error = -error; smp_wmb(); set_bit(KEY_FLAG_NEGATIVE, &key->flags); set_bit(KEY_FLAG_INSTANTIATED, &key->flags); now = current_kernel_time(); key->expiry = now.tv_sec + timeout; key_schedule_gc(key->expiry + key_gc_delay); if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) awaken = 1; ret = 0; /* and link it into the destination keyring */ if (keyring && link_ret == 0) __key_link(key, &edit); /* disable the authorisation key */ if (authkey) key_revoke(authkey); } mutex_unlock(&key_construction_mutex); if (keyring && link_ret == 0) __key_link_end(keyring, &key->index_key, edit); /* wake up anyone waiting for a key to be constructed */ if (awaken) wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); return ret == 0 ? link_ret : ret; } EXPORT_SYMBOL(key_reject_and_link); /** * key_put - Discard a reference to a key. * @key: The key to discard a reference from. * * Discard a reference to a key, and when all the references are gone, we * schedule the cleanup task to come and pull it out of the tree in process * context at some later time. */ void key_put(struct key *key) { if (key) { key_check(key); if (refcount_dec_and_test(&key->usage)) schedule_work(&key_gc_work); } } EXPORT_SYMBOL(key_put); /* * Find a key by its serial number. */ struct key *key_lookup(key_serial_t id) { struct rb_node *n; struct key *key; spin_lock(&key_serial_lock); /* search the tree for the specified key */ n = key_serial_tree.rb_node; while (n) { key = rb_entry(n, struct key, serial_node); if (id < key->serial) n = n->rb_left; else if (id > key->serial) n = n->rb_right; else goto found; } not_found: key = ERR_PTR(-ENOKEY); goto error; found: /* A key is allowed to be looked up only if someone still owns a * reference to it - otherwise it's awaiting the gc. */ if (!refcount_inc_not_zero(&key->usage)) goto not_found; error: spin_unlock(&key_serial_lock); return key; } /* * Find and lock the specified key type against removal. * * We return with the sem read-locked if successful. If the type wasn't * available -ENOKEY is returned instead. */ struct key_type *key_type_lookup(const char *type) { struct key_type *ktype; down_read(&key_types_sem); /* look up the key type to see if it's one of the registered kernel * types */ list_for_each_entry(ktype, &key_types_list, link) { if (strcmp(ktype->name, type) == 0) goto found_kernel_type; } up_read(&key_types_sem); ktype = ERR_PTR(-ENOKEY); found_kernel_type: return ktype; } void key_set_timeout(struct key *key, unsigned timeout) { struct timespec now; time_t expiry = 0; /* make the changes with the locks held to prevent races */ down_write(&key->sem); if (timeout > 0) { now = current_kernel_time(); expiry = now.tv_sec + timeout; } key->expiry = expiry; key_schedule_gc(key->expiry + key_gc_delay); up_write(&key->sem); } EXPORT_SYMBOL_GPL(key_set_timeout); /* * Unlock a key type locked by key_type_lookup(). */ void key_type_put(struct key_type *ktype) { up_read(&key_types_sem); } /* * Attempt to update an existing key. * * The key is given to us with an incremented refcount that we need to discard * if we get an error. */ static inline key_ref_t __key_update(key_ref_t key_ref, struct key_preparsed_payload *prep) { struct key *key = key_ref_to_ptr(key_ref); int ret; /* need write permission on the key to update it */ ret = key_permission(key_ref, KEY_NEED_WRITE); if (ret < 0) goto error; ret = -EEXIST; if (!key->type->update) goto error; down_write(&key->sem); ret = key->type->update(key, prep); if (ret == 0) /* updating a negative key instantiates it */ clear_bit(KEY_FLAG_NEGATIVE, &key->flags); up_write(&key->sem); if (ret < 0) goto error; out: return key_ref; error: key_put(key); key_ref = ERR_PTR(ret); goto out; } /** * key_create_or_update - Update or create and instantiate a key. * @keyring_ref: A pointer to the destination keyring with possession flag. * @type: The type of key. * @description: The searchable description for the key. * @payload: The data to use to instantiate or update the key. * @plen: The length of @payload. * @perm: The permissions mask for a new key. * @flags: The quota flags for a new key. * * Search the destination keyring for a key of the same description and if one * is found, update it, otherwise create and instantiate a new one and create a * link to it from that keyring. * * If perm is KEY_PERM_UNDEF then an appropriate key permissions mask will be * concocted. * * Returns a pointer to the new key if successful, -ENODEV if the key type * wasn't available, -ENOTDIR if the keyring wasn't a keyring, -EACCES if the * caller isn't permitted to modify the keyring or the LSM did not permit * creation of the key. * * On success, the possession flag from the keyring ref will be tacked on to * the key ref before it is returned. */ key_ref_t key_create_or_update(key_ref_t keyring_ref, const char *type, const char *description, const void *payload, size_t plen, key_perm_t perm, unsigned long flags) { struct keyring_index_key index_key = { .description = description, }; struct key_preparsed_payload prep; struct assoc_array_edit *edit; const struct cred *cred = current_cred(); struct key *keyring, *key = NULL; key_ref_t key_ref; int ret; struct key_restriction *restrict_link = NULL; /* look up the key type to see if it's one of the registered kernel * types */ index_key.type = key_type_lookup(type); if (IS_ERR(index_key.type)) { key_ref = ERR_PTR(-ENODEV); goto error; } key_ref = ERR_PTR(-EINVAL); if (!index_key.type->instantiate || (!index_key.description && !index_key.type->preparse)) goto error_put_type; keyring = key_ref_to_ptr(keyring_ref); key_check(keyring); key_ref = ERR_PTR(-EPERM); if (!(flags & KEY_ALLOC_BYPASS_RESTRICTION)) restrict_link = keyring->restrict_link; key_ref = ERR_PTR(-ENOTDIR); if (keyring->type != &key_type_keyring) goto error_put_type; memset(&prep, 0, sizeof(prep)); prep.data = payload; prep.datalen = plen; prep.quotalen = index_key.type->def_datalen; prep.expiry = TIME_T_MAX; if (index_key.type->preparse) { ret = index_key.type->preparse(&prep); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_free_prep; } if (!index_key.description) index_key.description = prep.description; key_ref = ERR_PTR(-EINVAL); if (!index_key.description) goto error_free_prep; } index_key.desc_len = strlen(index_key.description); ret = __key_link_begin(keyring, &index_key, &edit); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_free_prep; } if (restrict_link && restrict_link->check) { ret = restrict_link->check(keyring, index_key.type, &prep.payload, restrict_link->key); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_link_end; } } /* if we're going to allocate a new key, we're going to have * to modify the keyring */ ret = key_permission(keyring_ref, KEY_NEED_WRITE); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_link_end; } /* if it's possible to update this type of key, search for an existing * key of the same type and description in the destination keyring and * update that instead if possible */ if (index_key.type->update) { key_ref = find_key_to_update(keyring_ref, &index_key); if (key_ref) goto found_matching_key; } /* if the client doesn't provide, decide on the permissions we want */ if (perm == KEY_PERM_UNDEF) { perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; perm |= KEY_USR_VIEW; if (index_key.type->read) perm |= KEY_POS_READ; if (index_key.type == &key_type_keyring || index_key.type->update) perm |= KEY_POS_WRITE; } /* allocate a new key */ key = key_alloc(index_key.type, index_key.description, cred->fsuid, cred->fsgid, cred, perm, flags, NULL); if (IS_ERR(key)) { key_ref = ERR_CAST(key); goto error_link_end; } /* instantiate it and link it into the target keyring */ ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit); if (ret < 0) { key_put(key); key_ref = ERR_PTR(ret); goto error_link_end; } key_ref = make_key_ref(key, is_key_possessed(keyring_ref)); error_link_end: __key_link_end(keyring, &index_key, edit); error_free_prep: if (index_key.type->preparse) index_key.type->free_preparse(&prep); error_put_type: key_type_put(index_key.type); error: return key_ref; found_matching_key: /* we found a matching key, so we're going to try to update it * - we can drop the locks first as we have the key pinned */ __key_link_end(keyring, &index_key, edit); key_ref = __key_update(key_ref, &prep); goto error_free_prep; } EXPORT_SYMBOL(key_create_or_update); /** * key_update - Update a key's contents. * @key_ref: The pointer (plus possession flag) to the key. * @payload: The data to be used to update the key. * @plen: The length of @payload. * * Attempt to update the contents of a key with the given payload data. The * caller must be granted Write permission on the key. Negative keys can be * instantiated by this method. * * Returns 0 on success, -EACCES if not permitted and -EOPNOTSUPP if the key * type does not support updating. The key type may return other errors. */ int key_update(key_ref_t key_ref, const void *payload, size_t plen) { struct key_preparsed_payload prep; struct key *key = key_ref_to_ptr(key_ref); int ret; key_check(key); /* the key must be writable */ ret = key_permission(key_ref, KEY_NEED_WRITE); if (ret < 0) return ret; /* attempt to update it if supported */ if (!key->type->update) return -EOPNOTSUPP; memset(&prep, 0, sizeof(prep)); prep.data = payload; prep.datalen = plen; prep.quotalen = key->type->def_datalen; prep.expiry = TIME_T_MAX; if (key->type->preparse) { ret = key->type->preparse(&prep); if (ret < 0) goto error; } down_write(&key->sem); ret = key->type->update(key, &prep); if (ret == 0) /* updating a negative key instantiates it */ clear_bit(KEY_FLAG_NEGATIVE, &key->flags); up_write(&key->sem); error: if (key->type->preparse) key->type->free_preparse(&prep); return ret; } EXPORT_SYMBOL(key_update); /** * key_revoke - Revoke a key. * @key: The key to be revoked. * * Mark a key as being revoked and ask the type to free up its resources. The * revocation timeout is set and the key and all its links will be * automatically garbage collected after key_gc_delay amount of time if they * are not manually dealt with first. */ void key_revoke(struct key *key) { struct timespec now; time_t time; key_check(key); /* make sure no one's trying to change or use the key when we mark it * - we tell lockdep that we might nest because we might be revoking an * authorisation key whilst holding the sem on a key we've just * instantiated */ down_write_nested(&key->sem, 1); if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) && key->type->revoke) key->type->revoke(key); /* set the death time to no more than the expiry time */ now = current_kernel_time(); time = now.tv_sec; if (key->revoked_at == 0 || key->revoked_at > time) { key->revoked_at = time; key_schedule_gc(key->revoked_at + key_gc_delay); } up_write(&key->sem); } EXPORT_SYMBOL(key_revoke); /** * key_invalidate - Invalidate a key. * @key: The key to be invalidated. * * Mark a key as being invalidated and have it cleaned up immediately. The key * is ignored by all searches and other operations from this point. */ void key_invalidate(struct key *key) { kenter("%d", key_serial(key)); key_check(key); if (!test_bit(KEY_FLAG_INVALIDATED, &key->flags)) { down_write_nested(&key->sem, 1); if (!test_and_set_bit(KEY_FLAG_INVALIDATED, &key->flags)) key_schedule_gc_links(); up_write(&key->sem); } } EXPORT_SYMBOL(key_invalidate); /** * generic_key_instantiate - Simple instantiation of a key from preparsed data * @key: The key to be instantiated * @prep: The preparsed data to load. * * Instantiate a key from preparsed data. We assume we can just copy the data * in directly and clear the old pointers. * * This can be pointed to directly by the key type instantiate op pointer. */ int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep) { int ret; pr_devel("==>%s()\n", __func__); ret = key_payload_reserve(key, prep->quotalen); if (ret == 0) { rcu_assign_keypointer(key, prep->payload.data[0]); key->payload.data[1] = prep->payload.data[1]; key->payload.data[2] = prep->payload.data[2]; key->payload.data[3] = prep->payload.data[3]; prep->payload.data[0] = NULL; prep->payload.data[1] = NULL; prep->payload.data[2] = NULL; prep->payload.data[3] = NULL; } pr_devel("<==%s() = %d\n", __func__, ret); return ret; } EXPORT_SYMBOL(generic_key_instantiate); /** * register_key_type - Register a type of key. * @ktype: The new key type. * * Register a new key type. * * Returns 0 on success or -EEXIST if a type of this name already exists. */ int register_key_type(struct key_type *ktype) { struct key_type *p; int ret; memset(&ktype->lock_class, 0, sizeof(ktype->lock_class)); ret = -EEXIST; down_write(&key_types_sem); /* disallow key types with the same name */ list_for_each_entry(p, &key_types_list, link) { if (strcmp(p->name, ktype->name) == 0) goto out; } /* store the type */ list_add(&ktype->link, &key_types_list); pr_notice("Key type %s registered\n", ktype->name); ret = 0; out: up_write(&key_types_sem); return ret; } EXPORT_SYMBOL(register_key_type); /** * unregister_key_type - Unregister a type of key. * @ktype: The key type. * * Unregister a key type and mark all the extant keys of this type as dead. * Those keys of this type are then destroyed to get rid of their payloads and * they and their links will be garbage collected as soon as possible. */ void unregister_key_type(struct key_type *ktype) { down_write(&key_types_sem); list_del_init(&ktype->link); downgrade_write(&key_types_sem); key_gc_keytype(ktype); pr_notice("Key type %s unregistered\n", ktype->name); up_read(&key_types_sem); } EXPORT_SYMBOL(unregister_key_type); /* * Initialise the key management state. */ void __init key_init(void) { /* allocate a slab in which we can store keys */ key_jar = kmem_cache_create("key_jar", sizeof(struct key), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); /* add the special key types */ list_add_tail(&key_type_keyring.link, &key_types_list); list_add_tail(&key_type_dead.link, &key_types_list); list_add_tail(&key_type_user.link, &key_types_list); list_add_tail(&key_type_logon.link, &key_types_list); /* record the root user tracking */ rb_link_node(&root_key_user.node, NULL, &key_user_tree.rb_node); rb_insert_color(&root_key_user.node, &key_user_tree); }
/* Basic authentication token and access key management * * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/poison.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/security.h> #include <linux/workqueue.h> #include <linux/random.h> #include <linux/err.h> #include "internal.h" struct kmem_cache *key_jar; struct rb_root key_serial_tree; /* tree of keys indexed by serial */ DEFINE_SPINLOCK(key_serial_lock); struct rb_root key_user_tree; /* tree of quota records indexed by UID */ DEFINE_SPINLOCK(key_user_lock); unsigned int key_quota_root_maxkeys = 1000000; /* root's key count quota */ unsigned int key_quota_root_maxbytes = 25000000; /* root's key space quota */ unsigned int key_quota_maxkeys = 200; /* general key count quota */ unsigned int key_quota_maxbytes = 20000; /* general key space quota */ static LIST_HEAD(key_types_list); static DECLARE_RWSEM(key_types_sem); /* We serialise key instantiation and link */ DEFINE_MUTEX(key_construction_mutex); #ifdef KEY_DEBUGGING void __key_check(const struct key *key) { printk("__key_check: key %p {%08x} should be {%08x}\n", key, key->magic, KEY_DEBUG_MAGIC); BUG(); } #endif /* * Get the key quota record for a user, allocating a new record if one doesn't * already exist. */ struct key_user *key_user_lookup(kuid_t uid) { struct key_user *candidate = NULL, *user; struct rb_node *parent, **p; try_again: parent = NULL; p = &key_user_tree.rb_node; spin_lock(&key_user_lock); /* search the tree for a user record with a matching UID */ while (*p) { parent = *p; user = rb_entry(parent, struct key_user, node); if (uid_lt(uid, user->uid)) p = &(*p)->rb_left; else if (uid_gt(uid, user->uid)) p = &(*p)->rb_right; else goto found; } /* if we get here, we failed to find a match in the tree */ if (!candidate) { /* allocate a candidate user record if we don't already have * one */ spin_unlock(&key_user_lock); user = NULL; candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL); if (unlikely(!candidate)) goto out; /* the allocation may have scheduled, so we need to repeat the * search lest someone else added the record whilst we were * asleep */ goto try_again; } /* if we get here, then the user record still hadn't appeared on the * second pass - so we use the candidate record */ refcount_set(&candidate->usage, 1); atomic_set(&candidate->nkeys, 0); atomic_set(&candidate->nikeys, 0); candidate->uid = uid; candidate->qnkeys = 0; candidate->qnbytes = 0; spin_lock_init(&candidate->lock); mutex_init(&candidate->cons_lock); rb_link_node(&candidate->node, parent, p); rb_insert_color(&candidate->node, &key_user_tree); spin_unlock(&key_user_lock); user = candidate; goto out; /* okay - we found a user record for this UID */ found: refcount_inc(&user->usage); spin_unlock(&key_user_lock); kfree(candidate); out: return user; } /* * Dispose of a user structure */ void key_user_put(struct key_user *user) { if (refcount_dec_and_lock(&user->usage, &key_user_lock)) { rb_erase(&user->node, &key_user_tree); spin_unlock(&key_user_lock); kfree(user); } } /* * Allocate a serial number for a key. These are assigned randomly to avoid * security issues through covert channel problems. */ static inline void key_alloc_serial(struct key *key) { struct rb_node *parent, **p; struct key *xkey; /* propose a random serial number and look for a hole for it in the * serial number tree */ do { get_random_bytes(&key->serial, sizeof(key->serial)); key->serial >>= 1; /* negative numbers are not permitted */ } while (key->serial < 3); spin_lock(&key_serial_lock); attempt_insertion: parent = NULL; p = &key_serial_tree.rb_node; while (*p) { parent = *p; xkey = rb_entry(parent, struct key, serial_node); if (key->serial < xkey->serial) p = &(*p)->rb_left; else if (key->serial > xkey->serial) p = &(*p)->rb_right; else goto serial_exists; } /* we've found a suitable hole - arrange for this key to occupy it */ rb_link_node(&key->serial_node, parent, p); rb_insert_color(&key->serial_node, &key_serial_tree); spin_unlock(&key_serial_lock); return; /* we found a key with the proposed serial number - walk the tree from * that point looking for the next unused serial number */ serial_exists: for (;;) { key->serial++; if (key->serial < 3) { key->serial = 3; goto attempt_insertion; } parent = rb_next(parent); if (!parent) goto attempt_insertion; xkey = rb_entry(parent, struct key, serial_node); if (key->serial < xkey->serial) goto attempt_insertion; } } /** * key_alloc - Allocate a key of the specified type. * @type: The type of key to allocate. * @desc: The key description to allow the key to be searched out. * @uid: The owner of the new key. * @gid: The group ID for the new key's group permissions. * @cred: The credentials specifying UID namespace. * @perm: The permissions mask of the new key. * @flags: Flags specifying quota properties. * @restrict_link: Optional link restriction for new keyrings. * * Allocate a key of the specified type with the attributes given. The key is * returned in an uninstantiated state and the caller needs to instantiate the * key before returning. * * The restrict_link structure (if not NULL) will be freed when the * keyring is destroyed, so it must be dynamically allocated. * * The user's key count quota is updated to reflect the creation of the key and * the user's key data quota has the default for the key type reserved. The * instantiation function should amend this as necessary. If insufficient * quota is available, -EDQUOT will be returned. * * The LSM security modules can prevent a key being created, in which case * -EACCES will be returned. * * Returns a pointer to the new key if successful and an error code otherwise. * * Note that the caller needs to ensure the key type isn't uninstantiated. * Internally this can be done by locking key_types_sem. Externally, this can * be done by either never unregistering the key type, or making sure * key_alloc() calls don't race with module unloading. */ struct key *key_alloc(struct key_type *type, const char *desc, kuid_t uid, kgid_t gid, const struct cred *cred, key_perm_t perm, unsigned long flags, struct key_restriction *restrict_link) { struct key_user *user = NULL; struct key *key; size_t desclen, quotalen; int ret; key = ERR_PTR(-EINVAL); if (!desc || !*desc) goto error; if (type->vet_description) { ret = type->vet_description(desc); if (ret < 0) { key = ERR_PTR(ret); goto error; } } desclen = strlen(desc); quotalen = desclen + 1 + type->def_datalen; /* get hold of the key tracking for this user */ user = key_user_lookup(uid); if (!user) goto no_memory_1; /* check that the user's quota permits allocation of another key and * its description */ if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxkeys : key_quota_maxkeys; unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxbytes : key_quota_maxbytes; spin_lock(&user->lock); if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) { if (user->qnkeys + 1 >= maxkeys || user->qnbytes + quotalen >= maxbytes || user->qnbytes + quotalen < user->qnbytes) goto no_quota; } user->qnkeys++; user->qnbytes += quotalen; spin_unlock(&user->lock); } /* allocate and initialise the key and its description */ key = kmem_cache_zalloc(key_jar, GFP_KERNEL); if (!key) goto no_memory_2; key->index_key.desc_len = desclen; key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL); if (!key->index_key.description) goto no_memory_3; refcount_set(&key->usage, 1); init_rwsem(&key->sem); lockdep_set_class(&key->sem, &type->lock_class); key->index_key.type = type; key->user = user; key->quotalen = quotalen; key->datalen = type->def_datalen; key->uid = uid; key->gid = gid; key->perm = perm; key->restrict_link = restrict_link; if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) key->flags |= 1 << KEY_FLAG_IN_QUOTA; if (flags & KEY_ALLOC_BUILT_IN) key->flags |= 1 << KEY_FLAG_BUILTIN; if (flags & KEY_ALLOC_UID_KEYRING) key->flags |= 1 << KEY_FLAG_UID_KEYRING; #ifdef KEY_DEBUGGING key->magic = KEY_DEBUG_MAGIC; #endif /* let the security module know about the key */ ret = security_key_alloc(key, cred, flags); if (ret < 0) goto security_error; /* publish the key by giving it a serial number */ atomic_inc(&user->nkeys); key_alloc_serial(key); error: return key; security_error: kfree(key->description); kmem_cache_free(key_jar, key); if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { spin_lock(&user->lock); user->qnkeys--; user->qnbytes -= quotalen; spin_unlock(&user->lock); } key_user_put(user); key = ERR_PTR(ret); goto error; no_memory_3: kmem_cache_free(key_jar, key); no_memory_2: if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { spin_lock(&user->lock); user->qnkeys--; user->qnbytes -= quotalen; spin_unlock(&user->lock); } key_user_put(user); no_memory_1: key = ERR_PTR(-ENOMEM); goto error; no_quota: spin_unlock(&user->lock); key_user_put(user); key = ERR_PTR(-EDQUOT); goto error; } EXPORT_SYMBOL(key_alloc); /** * key_payload_reserve - Adjust data quota reservation for the key's payload * @key: The key to make the reservation for. * @datalen: The amount of data payload the caller now wants. * * Adjust the amount of the owning user's key data quota that a key reserves. * If the amount is increased, then -EDQUOT may be returned if there isn't * enough free quota available. * * If successful, 0 is returned. */ int key_payload_reserve(struct key *key, size_t datalen) { int delta = (int)datalen - key->datalen; int ret = 0; key_check(key); /* contemplate the quota adjustment */ if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { unsigned maxbytes = uid_eq(key->user->uid, GLOBAL_ROOT_UID) ? key_quota_root_maxbytes : key_quota_maxbytes; spin_lock(&key->user->lock); if (delta > 0 && (key->user->qnbytes + delta >= maxbytes || key->user->qnbytes + delta < key->user->qnbytes)) { ret = -EDQUOT; } else { key->user->qnbytes += delta; key->quotalen += delta; } spin_unlock(&key->user->lock); } /* change the recorded data length if that didn't generate an error */ if (ret == 0) key->datalen = datalen; return ret; } EXPORT_SYMBOL(key_payload_reserve); /* * Change the key state to being instantiated. */ static void mark_key_instantiated(struct key *key, int reject_error) { /* Commit the payload before setting the state; barrier versus * key_read_state(). */ smp_store_release(&key->state, (reject_error < 0) ? reject_error : KEY_IS_POSITIVE); } /* * Instantiate a key and link it into the target keyring atomically. Must be * called with the target keyring's semaphore writelocked. The target key's * semaphore need not be locked as instantiation is serialised by * key_construction_mutex. */ static int __key_instantiate_and_link(struct key *key, struct key_preparsed_payload *prep, struct key *keyring, struct key *authkey, struct assoc_array_edit **_edit) { int ret, awaken; key_check(key); key_check(keyring); awaken = 0; ret = -EBUSY; mutex_lock(&key_construction_mutex); /* can't instantiate twice */ if (key->state == KEY_IS_UNINSTANTIATED) { /* instantiate the key */ ret = key->type->instantiate(key, prep); if (ret == 0) { /* mark the key as being instantiated */ atomic_inc(&key->user->nikeys); mark_key_instantiated(key, 0); if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) awaken = 1; /* and link it into the destination keyring */ if (keyring) { if (test_bit(KEY_FLAG_KEEP, &keyring->flags)) set_bit(KEY_FLAG_KEEP, &key->flags); __key_link(key, _edit); } /* disable the authorisation key */ if (authkey) key_revoke(authkey); if (prep->expiry != TIME_T_MAX) { key->expiry = prep->expiry; key_schedule_gc(prep->expiry + key_gc_delay); } } } mutex_unlock(&key_construction_mutex); /* wake up anyone waiting for a key to be constructed */ if (awaken) wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); return ret; } /** * key_instantiate_and_link - Instantiate a key and link it into the keyring. * @key: The key to instantiate. * @data: The data to use to instantiate the keyring. * @datalen: The length of @data. * @keyring: Keyring to create a link in on success (or NULL). * @authkey: The authorisation token permitting instantiation. * * Instantiate a key that's in the uninstantiated state using the provided data * and, if successful, link it in to the destination keyring if one is * supplied. * * If successful, 0 is returned, the authorisation token is revoked and anyone * waiting for the key is woken up. If the key was already instantiated, * -EBUSY will be returned. */ int key_instantiate_and_link(struct key *key, const void *data, size_t datalen, struct key *keyring, struct key *authkey) { struct key_preparsed_payload prep; struct assoc_array_edit *edit; int ret; memset(&prep, 0, sizeof(prep)); prep.data = data; prep.datalen = datalen; prep.quotalen = key->type->def_datalen; prep.expiry = TIME_T_MAX; if (key->type->preparse) { ret = key->type->preparse(&prep); if (ret < 0) goto error; } if (keyring) { ret = __key_link_begin(keyring, &key->index_key, &edit); if (ret < 0) goto error; if (keyring->restrict_link && keyring->restrict_link->check) { struct key_restriction *keyres = keyring->restrict_link; ret = keyres->check(keyring, key->type, &prep.payload, keyres->key); if (ret < 0) goto error_link_end; } } ret = __key_instantiate_and_link(key, &prep, keyring, authkey, &edit); error_link_end: if (keyring) __key_link_end(keyring, &key->index_key, edit); error: if (key->type->preparse) key->type->free_preparse(&prep); return ret; } EXPORT_SYMBOL(key_instantiate_and_link); /** * key_reject_and_link - Negatively instantiate a key and link it into the keyring. * @key: The key to instantiate. * @timeout: The timeout on the negative key. * @error: The error to return when the key is hit. * @keyring: Keyring to create a link in on success (or NULL). * @authkey: The authorisation token permitting instantiation. * * Negatively instantiate a key that's in the uninstantiated state and, if * successful, set its timeout and stored error and link it in to the * destination keyring if one is supplied. The key and any links to the key * will be automatically garbage collected after the timeout expires. * * Negative keys are used to rate limit repeated request_key() calls by causing * them to return the stored error code (typically ENOKEY) until the negative * key expires. * * If successful, 0 is returned, the authorisation token is revoked and anyone * waiting for the key is woken up. If the key was already instantiated, * -EBUSY will be returned. */ int key_reject_and_link(struct key *key, unsigned timeout, unsigned error, struct key *keyring, struct key *authkey) { struct assoc_array_edit *edit; struct timespec now; int ret, awaken, link_ret = 0; key_check(key); key_check(keyring); awaken = 0; ret = -EBUSY; if (keyring) { if (keyring->restrict_link) return -EPERM; link_ret = __key_link_begin(keyring, &key->index_key, &edit); } mutex_lock(&key_construction_mutex); /* can't instantiate twice */ if (key->state == KEY_IS_UNINSTANTIATED) { /* mark the key as being negatively instantiated */ atomic_inc(&key->user->nikeys); mark_key_instantiated(key, -error); now = current_kernel_time(); key->expiry = now.tv_sec + timeout; key_schedule_gc(key->expiry + key_gc_delay); if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) awaken = 1; ret = 0; /* and link it into the destination keyring */ if (keyring && link_ret == 0) __key_link(key, &edit); /* disable the authorisation key */ if (authkey) key_revoke(authkey); } mutex_unlock(&key_construction_mutex); if (keyring && link_ret == 0) __key_link_end(keyring, &key->index_key, edit); /* wake up anyone waiting for a key to be constructed */ if (awaken) wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); return ret == 0 ? link_ret : ret; } EXPORT_SYMBOL(key_reject_and_link); /** * key_put - Discard a reference to a key. * @key: The key to discard a reference from. * * Discard a reference to a key, and when all the references are gone, we * schedule the cleanup task to come and pull it out of the tree in process * context at some later time. */ void key_put(struct key *key) { if (key) { key_check(key); if (refcount_dec_and_test(&key->usage)) schedule_work(&key_gc_work); } } EXPORT_SYMBOL(key_put); /* * Find a key by its serial number. */ struct key *key_lookup(key_serial_t id) { struct rb_node *n; struct key *key; spin_lock(&key_serial_lock); /* search the tree for the specified key */ n = key_serial_tree.rb_node; while (n) { key = rb_entry(n, struct key, serial_node); if (id < key->serial) n = n->rb_left; else if (id > key->serial) n = n->rb_right; else goto found; } not_found: key = ERR_PTR(-ENOKEY); goto error; found: /* A key is allowed to be looked up only if someone still owns a * reference to it - otherwise it's awaiting the gc. */ if (!refcount_inc_not_zero(&key->usage)) goto not_found; error: spin_unlock(&key_serial_lock); return key; } /* * Find and lock the specified key type against removal. * * We return with the sem read-locked if successful. If the type wasn't * available -ENOKEY is returned instead. */ struct key_type *key_type_lookup(const char *type) { struct key_type *ktype; down_read(&key_types_sem); /* look up the key type to see if it's one of the registered kernel * types */ list_for_each_entry(ktype, &key_types_list, link) { if (strcmp(ktype->name, type) == 0) goto found_kernel_type; } up_read(&key_types_sem); ktype = ERR_PTR(-ENOKEY); found_kernel_type: return ktype; } void key_set_timeout(struct key *key, unsigned timeout) { struct timespec now; time_t expiry = 0; /* make the changes with the locks held to prevent races */ down_write(&key->sem); if (timeout > 0) { now = current_kernel_time(); expiry = now.tv_sec + timeout; } key->expiry = expiry; key_schedule_gc(key->expiry + key_gc_delay); up_write(&key->sem); } EXPORT_SYMBOL_GPL(key_set_timeout); /* * Unlock a key type locked by key_type_lookup(). */ void key_type_put(struct key_type *ktype) { up_read(&key_types_sem); } /* * Attempt to update an existing key. * * The key is given to us with an incremented refcount that we need to discard * if we get an error. */ static inline key_ref_t __key_update(key_ref_t key_ref, struct key_preparsed_payload *prep) { struct key *key = key_ref_to_ptr(key_ref); int ret; /* need write permission on the key to update it */ ret = key_permission(key_ref, KEY_NEED_WRITE); if (ret < 0) goto error; ret = -EEXIST; if (!key->type->update) goto error; down_write(&key->sem); ret = key->type->update(key, prep); if (ret == 0) /* Updating a negative key positively instantiates it */ mark_key_instantiated(key, 0); up_write(&key->sem); if (ret < 0) goto error; out: return key_ref; error: key_put(key); key_ref = ERR_PTR(ret); goto out; } /** * key_create_or_update - Update or create and instantiate a key. * @keyring_ref: A pointer to the destination keyring with possession flag. * @type: The type of key. * @description: The searchable description for the key. * @payload: The data to use to instantiate or update the key. * @plen: The length of @payload. * @perm: The permissions mask for a new key. * @flags: The quota flags for a new key. * * Search the destination keyring for a key of the same description and if one * is found, update it, otherwise create and instantiate a new one and create a * link to it from that keyring. * * If perm is KEY_PERM_UNDEF then an appropriate key permissions mask will be * concocted. * * Returns a pointer to the new key if successful, -ENODEV if the key type * wasn't available, -ENOTDIR if the keyring wasn't a keyring, -EACCES if the * caller isn't permitted to modify the keyring or the LSM did not permit * creation of the key. * * On success, the possession flag from the keyring ref will be tacked on to * the key ref before it is returned. */ key_ref_t key_create_or_update(key_ref_t keyring_ref, const char *type, const char *description, const void *payload, size_t plen, key_perm_t perm, unsigned long flags) { struct keyring_index_key index_key = { .description = description, }; struct key_preparsed_payload prep; struct assoc_array_edit *edit; const struct cred *cred = current_cred(); struct key *keyring, *key = NULL; key_ref_t key_ref; int ret; struct key_restriction *restrict_link = NULL; /* look up the key type to see if it's one of the registered kernel * types */ index_key.type = key_type_lookup(type); if (IS_ERR(index_key.type)) { key_ref = ERR_PTR(-ENODEV); goto error; } key_ref = ERR_PTR(-EINVAL); if (!index_key.type->instantiate || (!index_key.description && !index_key.type->preparse)) goto error_put_type; keyring = key_ref_to_ptr(keyring_ref); key_check(keyring); key_ref = ERR_PTR(-EPERM); if (!(flags & KEY_ALLOC_BYPASS_RESTRICTION)) restrict_link = keyring->restrict_link; key_ref = ERR_PTR(-ENOTDIR); if (keyring->type != &key_type_keyring) goto error_put_type; memset(&prep, 0, sizeof(prep)); prep.data = payload; prep.datalen = plen; prep.quotalen = index_key.type->def_datalen; prep.expiry = TIME_T_MAX; if (index_key.type->preparse) { ret = index_key.type->preparse(&prep); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_free_prep; } if (!index_key.description) index_key.description = prep.description; key_ref = ERR_PTR(-EINVAL); if (!index_key.description) goto error_free_prep; } index_key.desc_len = strlen(index_key.description); ret = __key_link_begin(keyring, &index_key, &edit); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_free_prep; } if (restrict_link && restrict_link->check) { ret = restrict_link->check(keyring, index_key.type, &prep.payload, restrict_link->key); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_link_end; } } /* if we're going to allocate a new key, we're going to have * to modify the keyring */ ret = key_permission(keyring_ref, KEY_NEED_WRITE); if (ret < 0) { key_ref = ERR_PTR(ret); goto error_link_end; } /* if it's possible to update this type of key, search for an existing * key of the same type and description in the destination keyring and * update that instead if possible */ if (index_key.type->update) { key_ref = find_key_to_update(keyring_ref, &index_key); if (key_ref) goto found_matching_key; } /* if the client doesn't provide, decide on the permissions we want */ if (perm == KEY_PERM_UNDEF) { perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; perm |= KEY_USR_VIEW; if (index_key.type->read) perm |= KEY_POS_READ; if (index_key.type == &key_type_keyring || index_key.type->update) perm |= KEY_POS_WRITE; } /* allocate a new key */ key = key_alloc(index_key.type, index_key.description, cred->fsuid, cred->fsgid, cred, perm, flags, NULL); if (IS_ERR(key)) { key_ref = ERR_CAST(key); goto error_link_end; } /* instantiate it and link it into the target keyring */ ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit); if (ret < 0) { key_put(key); key_ref = ERR_PTR(ret); goto error_link_end; } key_ref = make_key_ref(key, is_key_possessed(keyring_ref)); error_link_end: __key_link_end(keyring, &index_key, edit); error_free_prep: if (index_key.type->preparse) index_key.type->free_preparse(&prep); error_put_type: key_type_put(index_key.type); error: return key_ref; found_matching_key: /* we found a matching key, so we're going to try to update it * - we can drop the locks first as we have the key pinned */ __key_link_end(keyring, &index_key, edit); key_ref = __key_update(key_ref, &prep); goto error_free_prep; } EXPORT_SYMBOL(key_create_or_update); /** * key_update - Update a key's contents. * @key_ref: The pointer (plus possession flag) to the key. * @payload: The data to be used to update the key. * @plen: The length of @payload. * * Attempt to update the contents of a key with the given payload data. The * caller must be granted Write permission on the key. Negative keys can be * instantiated by this method. * * Returns 0 on success, -EACCES if not permitted and -EOPNOTSUPP if the key * type does not support updating. The key type may return other errors. */ int key_update(key_ref_t key_ref, const void *payload, size_t plen) { struct key_preparsed_payload prep; struct key *key = key_ref_to_ptr(key_ref); int ret; key_check(key); /* the key must be writable */ ret = key_permission(key_ref, KEY_NEED_WRITE); if (ret < 0) return ret; /* attempt to update it if supported */ if (!key->type->update) return -EOPNOTSUPP; memset(&prep, 0, sizeof(prep)); prep.data = payload; prep.datalen = plen; prep.quotalen = key->type->def_datalen; prep.expiry = TIME_T_MAX; if (key->type->preparse) { ret = key->type->preparse(&prep); if (ret < 0) goto error; } down_write(&key->sem); ret = key->type->update(key, &prep); if (ret == 0) /* Updating a negative key positively instantiates it */ mark_key_instantiated(key, 0); up_write(&key->sem); error: if (key->type->preparse) key->type->free_preparse(&prep); return ret; } EXPORT_SYMBOL(key_update); /** * key_revoke - Revoke a key. * @key: The key to be revoked. * * Mark a key as being revoked and ask the type to free up its resources. The * revocation timeout is set and the key and all its links will be * automatically garbage collected after key_gc_delay amount of time if they * are not manually dealt with first. */ void key_revoke(struct key *key) { struct timespec now; time_t time; key_check(key); /* make sure no one's trying to change or use the key when we mark it * - we tell lockdep that we might nest because we might be revoking an * authorisation key whilst holding the sem on a key we've just * instantiated */ down_write_nested(&key->sem, 1); if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) && key->type->revoke) key->type->revoke(key); /* set the death time to no more than the expiry time */ now = current_kernel_time(); time = now.tv_sec; if (key->revoked_at == 0 || key->revoked_at > time) { key->revoked_at = time; key_schedule_gc(key->revoked_at + key_gc_delay); } up_write(&key->sem); } EXPORT_SYMBOL(key_revoke); /** * key_invalidate - Invalidate a key. * @key: The key to be invalidated. * * Mark a key as being invalidated and have it cleaned up immediately. The key * is ignored by all searches and other operations from this point. */ void key_invalidate(struct key *key) { kenter("%d", key_serial(key)); key_check(key); if (!test_bit(KEY_FLAG_INVALIDATED, &key->flags)) { down_write_nested(&key->sem, 1); if (!test_and_set_bit(KEY_FLAG_INVALIDATED, &key->flags)) key_schedule_gc_links(); up_write(&key->sem); } } EXPORT_SYMBOL(key_invalidate); /** * generic_key_instantiate - Simple instantiation of a key from preparsed data * @key: The key to be instantiated * @prep: The preparsed data to load. * * Instantiate a key from preparsed data. We assume we can just copy the data * in directly and clear the old pointers. * * This can be pointed to directly by the key type instantiate op pointer. */ int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep) { int ret; pr_devel("==>%s()\n", __func__); ret = key_payload_reserve(key, prep->quotalen); if (ret == 0) { rcu_assign_keypointer(key, prep->payload.data[0]); key->payload.data[1] = prep->payload.data[1]; key->payload.data[2] = prep->payload.data[2]; key->payload.data[3] = prep->payload.data[3]; prep->payload.data[0] = NULL; prep->payload.data[1] = NULL; prep->payload.data[2] = NULL; prep->payload.data[3] = NULL; } pr_devel("<==%s() = %d\n", __func__, ret); return ret; } EXPORT_SYMBOL(generic_key_instantiate); /** * register_key_type - Register a type of key. * @ktype: The new key type. * * Register a new key type. * * Returns 0 on success or -EEXIST if a type of this name already exists. */ int register_key_type(struct key_type *ktype) { struct key_type *p; int ret; memset(&ktype->lock_class, 0, sizeof(ktype->lock_class)); ret = -EEXIST; down_write(&key_types_sem); /* disallow key types with the same name */ list_for_each_entry(p, &key_types_list, link) { if (strcmp(p->name, ktype->name) == 0) goto out; } /* store the type */ list_add(&ktype->link, &key_types_list); pr_notice("Key type %s registered\n", ktype->name); ret = 0; out: up_write(&key_types_sem); return ret; } EXPORT_SYMBOL(register_key_type); /** * unregister_key_type - Unregister a type of key. * @ktype: The key type. * * Unregister a key type and mark all the extant keys of this type as dead. * Those keys of this type are then destroyed to get rid of their payloads and * they and their links will be garbage collected as soon as possible. */ void unregister_key_type(struct key_type *ktype) { down_write(&key_types_sem); list_del_init(&ktype->link); downgrade_write(&key_types_sem); key_gc_keytype(ktype); pr_notice("Key type %s unregistered\n", ktype->name); up_read(&key_types_sem); } EXPORT_SYMBOL(unregister_key_type); /* * Initialise the key management state. */ void __init key_init(void) { /* allocate a slab in which we can store keys */ key_jar = kmem_cache_create("key_jar", sizeof(struct key), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); /* add the special key types */ list_add_tail(&key_type_keyring.link, &key_types_list); list_add_tail(&key_type_dead.link, &key_types_list); list_add_tail(&key_type_user.link, &key_types_list); list_add_tail(&key_type_logon.link, &key_types_list); /* record the root user tracking */ rb_link_node(&root_key_user.node, NULL, &key_user_tree.rb_node); rb_insert_color(&root_key_user.node, &key_user_tree); }
int key_reject_and_link(struct key *key, unsigned timeout, unsigned error, struct key *keyring, struct key *authkey) { struct assoc_array_edit *edit; struct timespec now; int ret, awaken, link_ret = 0; key_check(key); key_check(keyring); awaken = 0; ret = -EBUSY; if (keyring) { if (keyring->restrict_link) return -EPERM; link_ret = __key_link_begin(keyring, &key->index_key, &edit); } mutex_lock(&key_construction_mutex); /* can't instantiate twice */ if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { /* mark the key as being negatively instantiated */ atomic_inc(&key->user->nikeys); key->reject_error = -error; smp_wmb(); set_bit(KEY_FLAG_NEGATIVE, &key->flags); set_bit(KEY_FLAG_INSTANTIATED, &key->flags); now = current_kernel_time(); key->expiry = now.tv_sec + timeout; key_schedule_gc(key->expiry + key_gc_delay); if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) awaken = 1; ret = 0; /* and link it into the destination keyring */ if (keyring && link_ret == 0) __key_link(key, &edit); /* disable the authorisation key */ if (authkey) key_revoke(authkey); } mutex_unlock(&key_construction_mutex); if (keyring && link_ret == 0) __key_link_end(keyring, &key->index_key, edit); /* wake up anyone waiting for a key to be constructed */ if (awaken) wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); return ret == 0 ? link_ret : ret; }
int key_reject_and_link(struct key *key, unsigned timeout, unsigned error, struct key *keyring, struct key *authkey) { struct assoc_array_edit *edit; struct timespec now; int ret, awaken, link_ret = 0; key_check(key); key_check(keyring); awaken = 0; ret = -EBUSY; if (keyring) { if (keyring->restrict_link) return -EPERM; link_ret = __key_link_begin(keyring, &key->index_key, &edit); } mutex_lock(&key_construction_mutex); /* can't instantiate twice */ if (key->state == KEY_IS_UNINSTANTIATED) { /* mark the key as being negatively instantiated */ atomic_inc(&key->user->nikeys); mark_key_instantiated(key, -error); now = current_kernel_time(); key->expiry = now.tv_sec + timeout; key_schedule_gc(key->expiry + key_gc_delay); if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) awaken = 1; ret = 0; /* and link it into the destination keyring */ if (keyring && link_ret == 0) __key_link(key, &edit); /* disable the authorisation key */ if (authkey) key_revoke(authkey); } mutex_unlock(&key_construction_mutex); if (keyring && link_ret == 0) __key_link_end(keyring, &key->index_key, edit); /* wake up anyone waiting for a key to be constructed */ if (awaken) wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); return ret == 0 ? link_ret : ret; }
{'added': [(404, '/*'), (405, ' * Change the key state to being instantiated.'), (406, ' */'), (407, 'static void mark_key_instantiated(struct key *key, int reject_error)'), (408, '{'), (409, '\t/* Commit the payload before setting the state; barrier versus'), (410, '\t * key_read_state().'), (411, '\t */'), (412, '\tsmp_store_release(&key->state,'), (413, '\t\t\t (reject_error < 0) ? reject_error : KEY_IS_POSITIVE);'), (414, '}'), (415, ''), (439, '\tif (key->state == KEY_IS_UNINSTANTIATED) {'), (446, '\t\t\tmark_key_instantiated(key, 0);'), (592, '\tif (key->state == KEY_IS_UNINSTANTIATED) {'), (595, '\t\tmark_key_instantiated(key, -error);'), (764, '\t\t/* Updating a negative key positively instantiates it */'), (765, '\t\tmark_key_instantiated(key, 0);'), (998, '\t\t/* Updating a negative key positively instantiates it */'), (999, '\t\tmark_key_instantiated(key, 0);')], 'deleted': [(427, '\tif (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {'), (434, '\t\t\tset_bit(KEY_FLAG_INSTANTIATED, &key->flags);'), (580, '\tif (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {'), (583, '\t\tkey->reject_error = -error;'), (584, '\t\tsmp_wmb();'), (585, '\t\tset_bit(KEY_FLAG_NEGATIVE, &key->flags);'), (586, '\t\tset_bit(KEY_FLAG_INSTANTIATED, &key->flags);'), (755, '\t\t/* updating a negative key instantiates it */'), (756, '\t\tclear_bit(KEY_FLAG_NEGATIVE, &key->flags);'), (989, '\t\t/* updating a negative key instantiates it */'), (990, '\t\tclear_bit(KEY_FLAG_NEGATIVE, &key->flags);')]}
20
11
703
4,290
43
274
12
https://github.com/torvalds/linux
CVE-2017-15951
CWE-20
732
codegen.c
C
gen_assignment
/* ** codegen.c - mruby code generator ** ** See Copyright Notice in mruby.h */ #include <mruby.h> #include <mruby/compile.h> #include <mruby/proc.h> #include <mruby/dump.h> #include <mruby/numeric.h> #include <mruby/string.h> #include <mruby/debug.h> #include <mruby/presym.h> #include "node.h" #include <mruby/opcode.h> #include <mruby/re.h> #include <mruby/throw.h> #include <ctype.h> #include <string.h> #ifndef MRB_CODEGEN_LEVEL_MAX #define MRB_CODEGEN_LEVEL_MAX 256 #endif #define MAXARG_S (1<<16) typedef mrb_ast_node node; typedef struct mrb_parser_state parser_state; enum looptype { LOOP_NORMAL, LOOP_BLOCK, LOOP_FOR, LOOP_BEGIN, LOOP_RESCUE, }; struct loopinfo { enum looptype type; uint32_t pc0; /* `next` destination */ uint32_t pc1; /* `redo` destination */ uint32_t pc2; /* `break` destination */ int reg; /* destination register */ struct loopinfo *prev; }; typedef struct scope { mrb_state *mrb; mrb_pool *mpool; struct scope *prev; node *lv; uint16_t sp; uint32_t pc; uint32_t lastpc; uint32_t lastlabel; uint16_t ainfo:15; mrb_bool mscope:1; struct loopinfo *loop; mrb_sym filename_sym; uint16_t lineno; mrb_code *iseq; uint16_t *lines; uint32_t icapa; mrb_irep *irep; mrb_pool_value *pool; mrb_sym *syms; mrb_irep **reps; struct mrb_irep_catch_handler *catch_table; uint32_t pcapa, scapa, rcapa; uint16_t nlocals; uint16_t nregs; int ai; int debug_start_pos; uint16_t filename_index; parser_state* parser; int rlev; /* recursion levels */ } codegen_scope; static codegen_scope* scope_new(mrb_state *mrb, codegen_scope *prev, node *lv); static void scope_finish(codegen_scope *s); static struct loopinfo *loop_push(codegen_scope *s, enum looptype t); static void loop_break(codegen_scope *s, node *tree); static void loop_pop(codegen_scope *s, int val); /* * The search for catch handlers starts at the end of the table in mrb_vm_run(). * Therefore, the next handler to be added must meet one of the following conditions. * - Larger start position * - Same start position but smaller end position */ static int catch_handler_new(codegen_scope *s); static void catch_handler_set(codegen_scope *s, int ent, enum mrb_catch_type type, uint32_t begin, uint32_t end, uint32_t target); static void gen_assignment(codegen_scope *s, node *tree, node *rhs, int sp, int val); static void gen_vmassignment(codegen_scope *s, node *tree, int sp, int val); static void codegen(codegen_scope *s, node *tree, int val); static void raise_error(codegen_scope *s, const char *msg); static void codegen_error(codegen_scope *s, const char *message) { if (!s) return; #ifndef MRB_NO_STDIO if (s->filename_sym && s->lineno) { const char *filename = mrb_sym_name_len(s->mrb, s->filename_sym, NULL); fprintf(stderr, "%s:%d: %s\n", filename, s->lineno, message); } else { fprintf(stderr, "%s\n", message); } #endif while (s->prev) { codegen_scope *tmp = s->prev; if (s->irep) { mrb_free(s->mrb, s->iseq); for (int i=0; i<s->irep->plen; i++) { mrb_pool_value *pv = &s->pool[i]; if ((pv->tt & 0x3) == IREP_TT_STR || pv->tt == IREP_TT_BIGINT) { mrb_free(s->mrb, (void*)pv->u.str); } } mrb_free(s->mrb, s->pool); mrb_free(s->mrb, s->syms); mrb_free(s->mrb, s->catch_table); if (s->reps) { /* copied from mrb_irep_free() in state.c */ for (int i=0; i<s->irep->rlen; i++) { if (s->reps[i]) mrb_irep_decref(s->mrb, (mrb_irep*)s->reps[i]); } mrb_free(s->mrb, s->reps); } mrb_free(s->mrb, s->lines); } mrb_pool_close(s->mpool); s = tmp; } MRB_THROW(s->mrb->jmp); } static void* codegen_palloc(codegen_scope *s, size_t len) { void *p = mrb_pool_alloc(s->mpool, len); if (!p) codegen_error(s, "pool memory allocation"); return p; } static void* codegen_realloc(codegen_scope *s, void *p, size_t len) { p = mrb_realloc_simple(s->mrb, p, len); if (!p && len > 0) codegen_error(s, "mrb_realloc"); return p; } static void check_no_ext_ops(codegen_scope *s, uint16_t a, uint16_t b) { if (s->parser->no_ext_ops && (a | b) > 0xff) { codegen_error(s, "need OP_EXTs instruction (currently OP_EXTs are prohibited)"); } } static int new_label(codegen_scope *s) { return s->lastlabel = s->pc; } static void emit_B(codegen_scope *s, uint32_t pc, uint8_t i) { if (pc >= s->icapa) { if (pc == UINT32_MAX) { codegen_error(s, "too big code block"); } if (pc >= UINT32_MAX / 2) { pc = UINT32_MAX; } else { s->icapa *= 2; } s->iseq = (mrb_code *)codegen_realloc(s, s->iseq, sizeof(mrb_code)*s->icapa); if (s->lines) { s->lines = (uint16_t*)codegen_realloc(s, s->lines, sizeof(uint16_t)*s->icapa); } } if (s->lines) { if (s->lineno > 0 || pc == 0) s->lines[pc] = s->lineno; else s->lines[pc] = s->lines[pc-1]; } s->iseq[pc] = i; } static void emit_S(codegen_scope *s, int pc, uint16_t i) { uint8_t hi = i>>8; uint8_t lo = i&0xff; emit_B(s, pc, hi); emit_B(s, pc+1, lo); } static void gen_B(codegen_scope *s, uint8_t i) { emit_B(s, s->pc, i); s->pc++; } static void gen_S(codegen_scope *s, uint16_t i) { emit_S(s, s->pc, i); s->pc += 2; } static void genop_0(codegen_scope *s, mrb_code i) { s->lastpc = s->pc; gen_B(s, i); } static void genop_1(codegen_scope *s, mrb_code i, uint16_t a) { s->lastpc = s->pc; check_no_ext_ops(s, a, 0); if (a > 0xff) { gen_B(s, OP_EXT1); gen_B(s, i); gen_S(s, a); } else { gen_B(s, i); gen_B(s, (uint8_t)a); } } static void genop_2(codegen_scope *s, mrb_code i, uint16_t a, uint16_t b) { s->lastpc = s->pc; check_no_ext_ops(s, a, b); if (a > 0xff && b > 0xff) { gen_B(s, OP_EXT3); gen_B(s, i); gen_S(s, a); gen_S(s, b); } else if (b > 0xff) { gen_B(s, OP_EXT2); gen_B(s, i); gen_B(s, (uint8_t)a); gen_S(s, b); } else if (a > 0xff) { gen_B(s, OP_EXT1); gen_B(s, i); gen_S(s, a); gen_B(s, (uint8_t)b); } else { gen_B(s, i); gen_B(s, (uint8_t)a); gen_B(s, (uint8_t)b); } } static void genop_3(codegen_scope *s, mrb_code i, uint16_t a, uint16_t b, uint8_t c) { genop_2(s, i, a, b); gen_B(s, c); } static void genop_2S(codegen_scope *s, mrb_code i, uint16_t a, uint16_t b) { genop_1(s, i, a); gen_S(s, b); } static void genop_2SS(codegen_scope *s, mrb_code i, uint16_t a, uint32_t b) { genop_1(s, i, a); gen_S(s, b>>16); gen_S(s, b&0xffff); } static void genop_W(codegen_scope *s, mrb_code i, uint32_t a) { uint8_t a1 = (a>>16) & 0xff; uint8_t a2 = (a>>8) & 0xff; uint8_t a3 = a & 0xff; s->lastpc = s->pc; gen_B(s, i); gen_B(s, a1); gen_B(s, a2); gen_B(s, a3); } #define NOVAL 0 #define VAL 1 static mrb_bool no_optimize(codegen_scope *s) { if (s && s->parser && s->parser->no_optimize) return TRUE; return FALSE; } struct mrb_insn_data mrb_decode_insn(const mrb_code *pc) { struct mrb_insn_data data = { 0 }; if (pc == 0) return data; data.addr = pc; mrb_code insn = READ_B(); uint16_t a = 0; uint16_t b = 0; uint16_t c = 0; switch (insn) { #define FETCH_Z() /* empty */ #define OPCODE(i,x) case OP_ ## i: FETCH_ ## x (); break; #include "mruby/ops.h" #undef OPCODE } switch (insn) { case OP_EXT1: insn = READ_B(); switch (insn) { #define OPCODE(i,x) case OP_ ## i: FETCH_ ## x ## _1 (); break; #include "mruby/ops.h" #undef OPCODE } break; case OP_EXT2: insn = READ_B(); switch (insn) { #define OPCODE(i,x) case OP_ ## i: FETCH_ ## x ## _2 (); break; #include "mruby/ops.h" #undef OPCODE } break; case OP_EXT3: insn = READ_B(); switch (insn) { #define OPCODE(i,x) case OP_ ## i: FETCH_ ## x ## _3 (); break; #include "mruby/ops.h" #undef OPCODE } break; default: break; } data.insn = insn; data.a = a; data.b = b; data.c = c; return data; } #undef OPCODE #define Z 1 #define S 3 #define W 4 #define OPCODE(_,x) x, /* instruction sizes */ static uint8_t mrb_insn_size[] = { #define B 2 #define BB 3 #define BBB 4 #define BS 4 #define BSS 6 #include "mruby/ops.h" #undef B #undef BB #undef BBB #undef BS #undef BSS }; /* EXT1 instruction sizes */ static uint8_t mrb_insn_size1[] = { #define B 3 #define BB 4 #define BBB 5 #define BS 5 #define BSS 7 #include "mruby/ops.h" #undef B #undef BS #undef BSS }; /* EXT2 instruction sizes */ static uint8_t mrb_insn_size2[] = { #define B 2 #define BS 4 #define BSS 6 #include "mruby/ops.h" #undef B #undef BB #undef BBB #undef BS #undef BSS }; /* EXT3 instruction sizes */ #define B 3 #define BB 5 #define BBB 6 #define BS 5 #define BSS 7 static uint8_t mrb_insn_size3[] = { #include "mruby/ops.h" }; #undef B #undef BB #undef BBB #undef BS #undef BSS #undef OPCODE static const mrb_code* mrb_prev_pc(codegen_scope *s, const mrb_code *pc) { const mrb_code *prev_pc = NULL; const mrb_code *i = s->iseq; while (i<pc) { uint8_t insn = i[0]; prev_pc = i; switch (insn) { case OP_EXT1: i += mrb_insn_size1[i[1]] + 1; break; case OP_EXT2: i += mrb_insn_size2[i[1]] + 1; break; case OP_EXT3: i += mrb_insn_size3[i[1]] + 1; break; default: i += mrb_insn_size[insn]; break; } } return prev_pc; } #define pc_addr(s) &((s)->iseq[(s)->pc]) #define addr_pc(s, addr) (uint32_t)((addr) - s->iseq) #define rewind_pc(s) s->pc = s->lastpc static struct mrb_insn_data mrb_last_insn(codegen_scope *s) { if (s->pc == 0) { struct mrb_insn_data data = { OP_NOP, 0 }; return data; } return mrb_decode_insn(&s->iseq[s->lastpc]); } static mrb_bool no_peephole(codegen_scope *s) { return no_optimize(s) || s->lastlabel == s->pc || s->pc == 0 || s->pc == s->lastpc; } #define JMPLINK_START UINT32_MAX static void gen_jmpdst(codegen_scope *s, uint32_t pc) { if (pc == JMPLINK_START) { pc = 0; } uint32_t pos2 = s->pc+2; int32_t off = pc - pos2; if (off > INT16_MAX || INT16_MIN > off) { codegen_error(s, "too big jump offset"); } gen_S(s, (uint16_t)off); } static uint32_t genjmp(codegen_scope *s, mrb_code i, uint32_t pc) { uint32_t pos; genop_0(s, i); pos = s->pc; gen_jmpdst(s, pc); return pos; } #define genjmp_0(s,i) genjmp(s,i,JMPLINK_START) static uint32_t genjmp2(codegen_scope *s, mrb_code i, uint16_t a, uint32_t pc, int val) { uint32_t pos; if (!no_peephole(s) && !val) { struct mrb_insn_data data = mrb_last_insn(s); switch (data.insn) { case OP_MOVE: if (data.a == a && data.a > s->nlocals) { rewind_pc(s); a = data.b; } break; case OP_LOADNIL: case OP_LOADF: if (data.a == a || data.a > s->nlocals) { s->pc = addr_pc(s, data.addr); if (i == OP_JMPNOT || (i == OP_JMPNIL && data.insn == OP_LOADNIL)) { return genjmp(s, OP_JMP, pc); } else { /* OP_JMPIF */ return JMPLINK_START; } } break; case OP_LOADT: case OP_LOADI: case OP_LOADINEG: case OP_LOADI__1: case OP_LOADI_0: case OP_LOADI_1: case OP_LOADI_2: case OP_LOADI_3: case OP_LOADI_4: case OP_LOADI_5: case OP_LOADI_6: case OP_LOADI_7: if (data.a == a || data.a > s->nlocals) { s->pc = addr_pc(s, data.addr); if (i == OP_JMPIF) { return genjmp(s, OP_JMP, pc); } else { /* OP_JMPNOT and OP_JMPNIL */ return JMPLINK_START; } } break; } } if (a > 0xff) { check_no_ext_ops(s, a, 0); gen_B(s, OP_EXT1); genop_0(s, i); gen_S(s, a); } else { genop_0(s, i); gen_B(s, (uint8_t)a); } pos = s->pc; gen_jmpdst(s, pc); return pos; } #define genjmp2_0(s,i,a,val) genjmp2(s,i,a,JMPLINK_START,val) static mrb_bool get_int_operand(codegen_scope *s, struct mrb_insn_data *data, mrb_int *ns); static void gen_int(codegen_scope *s, uint16_t dst, mrb_int i); static void gen_move(codegen_scope *s, uint16_t dst, uint16_t src, int nopeep) { if (nopeep || no_peephole(s)) goto normal; else if (dst == src) return; else { struct mrb_insn_data data = mrb_last_insn(s); switch (data.insn) { case OP_MOVE: if (dst == src) return; /* remove useless MOVE */ if (data.b == dst && data.a == src) /* skip swapping MOVE */ return; goto normal; case OP_LOADNIL: case OP_LOADSELF: case OP_LOADT: case OP_LOADF: case OP_LOADI__1: case OP_LOADI_0: case OP_LOADI_1: case OP_LOADI_2: case OP_LOADI_3: case OP_LOADI_4: case OP_LOADI_5: case OP_LOADI_6: case OP_LOADI_7: if (data.a != src || data.a < s->nlocals) goto normal; rewind_pc(s); genop_1(s, data.insn, dst); return; case OP_HASH: case OP_ARRAY: if (data.b != 0) goto normal; /* fall through */ case OP_LOADI: case OP_LOADINEG: case OP_LOADL: case OP_LOADSYM: case OP_GETGV: case OP_GETSV: case OP_GETIV: case OP_GETCV: case OP_GETCONST: case OP_STRING: case OP_LAMBDA: case OP_BLOCK: case OP_METHOD: case OP_BLKPUSH: if (data.a != src || data.a < s->nlocals) goto normal; rewind_pc(s); genop_2(s, data.insn, dst, data.b); return; case OP_LOADI16: if (data.a != src || data.a < s->nlocals) goto normal; rewind_pc(s); genop_2S(s, data.insn, dst, data.b); return; case OP_LOADI32: if (data.a != src || data.a < s->nlocals) goto normal; else { uint32_t i = (uint32_t)data.b<<16|data.c; rewind_pc(s); genop_2SS(s, data.insn, dst, i); } return; case OP_AREF: case OP_GETUPVAR: if (data.a != src || data.a < s->nlocals) goto normal; rewind_pc(s); genop_3(s, data.insn, dst, data.b, data.c); return; case OP_ADDI: case OP_SUBI: if (addr_pc(s, data.addr) == s->lastlabel || data.a != src || data.a < s->nlocals) goto normal; else { struct mrb_insn_data data0 = mrb_decode_insn(mrb_prev_pc(s, data.addr)); if (data0.insn != OP_MOVE || data0.a != data.a || data0.b != dst) goto normal; s->pc = addr_pc(s, data0.addr); if (addr_pc(s, data0.addr) != s->lastlabel) { /* constant folding */ data0 = mrb_decode_insn(mrb_prev_pc(s, data0.addr)); mrb_int n; if (data0.a == dst && get_int_operand(s, &data0, &n)) { if ((data.insn == OP_ADDI && !mrb_int_add_overflow(n, data.b, &n)) || (data.insn == OP_SUBI && !mrb_int_sub_overflow(n, data.b, &n))) { s->pc = addr_pc(s, data0.addr); gen_int(s, dst, n); return; } } } } genop_2(s, data.insn, dst, data.b); return; default: break; } } normal: genop_2(s, OP_MOVE, dst, src); return; } static int search_upvar(codegen_scope *s, mrb_sym id, int *idx); static void gen_getupvar(codegen_scope *s, uint16_t dst, mrb_sym id) { int idx; int lv = search_upvar(s, id, &idx); if (!no_peephole(s)) { struct mrb_insn_data data = mrb_last_insn(s); if (data.insn == OP_SETUPVAR && data.a == dst && data.b == idx && data.c == lv) { /* skip GETUPVAR right after SETUPVAR */ return; } } genop_3(s, OP_GETUPVAR, dst, idx, lv); } static void gen_setupvar(codegen_scope *s, uint16_t dst, mrb_sym id) { int idx; int lv = search_upvar(s, id, &idx); if (!no_peephole(s)) { struct mrb_insn_data data = mrb_last_insn(s); if (data.insn == OP_MOVE && data.a == dst) { dst = data.b; rewind_pc(s); } } genop_3(s, OP_SETUPVAR, dst, idx, lv); } static void gen_return(codegen_scope *s, uint8_t op, uint16_t src) { if (no_peephole(s)) { genop_1(s, op, src); } else { struct mrb_insn_data data = mrb_last_insn(s); if (data.insn == OP_MOVE && src == data.a) { rewind_pc(s); genop_1(s, op, data.b); } else if (data.insn != OP_RETURN) { genop_1(s, op, src); } } } static mrb_bool get_int_operand(codegen_scope *s, struct mrb_insn_data *data, mrb_int *n) { switch (data->insn) { case OP_LOADI__1: *n = -1; return TRUE; case OP_LOADINEG: *n = -data->b; return TRUE; case OP_LOADI_0: case OP_LOADI_1: case OP_LOADI_2: case OP_LOADI_3: case OP_LOADI_4: case OP_LOADI_5: case OP_LOADI_6: case OP_LOADI_7: *n = data->insn - OP_LOADI_0; return TRUE; case OP_LOADI: case OP_LOADI16: *n = (int16_t)data->b; return TRUE; case OP_LOADI32: *n = (mrb_int)((uint32_t)data->b<<16)+data->c; return TRUE; case OP_LOADL: { mrb_pool_value *pv = &s->pool[data->b]; if (pv->tt == IREP_TT_INT32) { *n = (mrb_int)pv->u.i32; } #ifdef MRB_INT64 else if (pv->tt == IREP_TT_INT64) { *n = (mrb_int)pv->u.i64; } #endif else { return FALSE; } } return TRUE; default: return FALSE; } } static void gen_addsub(codegen_scope *s, uint8_t op, uint16_t dst) { if (no_peephole(s)) { normal: genop_1(s, op, dst); return; } else { struct mrb_insn_data data = mrb_last_insn(s); mrb_int n; if (!get_int_operand(s, &data, &n)) { /* not integer immediate */ goto normal; } struct mrb_insn_data data0 = mrb_decode_insn(mrb_prev_pc(s, data.addr)); mrb_int n0; if (addr_pc(s, data.addr) == s->lastlabel || !get_int_operand(s, &data0, &n0)) { /* OP_ADDI/OP_SUBI takes upto 8bits */ if (n > INT8_MAX || n < INT8_MIN) goto normal; rewind_pc(s); if (n == 0) return; if (n > 0) { if (op == OP_ADD) genop_2(s, OP_ADDI, dst, (uint16_t)n); else genop_2(s, OP_SUBI, dst, (uint16_t)n); } else { /* n < 0 */ n = -n; if (op == OP_ADD) genop_2(s, OP_SUBI, dst, (uint16_t)n); else genop_2(s, OP_ADDI, dst, (uint16_t)n); } return; } if (op == OP_ADD) { if (mrb_int_add_overflow(n0, n, &n)) goto normal; } else { /* OP_SUB */ if (mrb_int_sub_overflow(n0, n, &n)) goto normal; } s->pc = addr_pc(s, data0.addr); gen_int(s, dst, n); } } static void gen_muldiv(codegen_scope *s, uint8_t op, uint16_t dst) { if (no_peephole(s)) { normal: genop_1(s, op, dst); return; } else { struct mrb_insn_data data = mrb_last_insn(s); mrb_int n, n0; if (addr_pc(s, data.addr) == s->lastlabel || !get_int_operand(s, &data, &n)) { /* not integer immediate */ goto normal; } struct mrb_insn_data data0 = mrb_decode_insn(mrb_prev_pc(s, data.addr)); if (!get_int_operand(s, &data0, &n0) || n == 0) { goto normal; } if (op == OP_MUL) { if (mrb_int_mul_overflow(n0, n, &n)) goto normal; } else { /* OP_DIV */ if (n0 == MRB_INT_MIN && n == -1) goto normal; n = n0 / n; } s->pc = addr_pc(s, data0.addr); gen_int(s, dst, n); } } mrb_bool mrb_num_shift(mrb_state *mrb, mrb_int val, mrb_int width, mrb_int *num); static mrb_bool gen_binop(codegen_scope *s, mrb_sym op, uint16_t dst) { if (no_peephole(s)) return FALSE; else if (op == MRB_OPSYM_2(s->mrb, aref)) { genop_1(s, OP_GETIDX, dst); return TRUE; } else { struct mrb_insn_data data = mrb_last_insn(s); mrb_int n, n0; if (addr_pc(s, data.addr) == s->lastlabel || !get_int_operand(s, &data, &n)) { /* not integer immediate */ return FALSE; } struct mrb_insn_data data0 = mrb_decode_insn(mrb_prev_pc(s, data.addr)); if (!get_int_operand(s, &data0, &n0)) { return FALSE; } if (op == MRB_OPSYM_2(s->mrb, lshift)) { if (!mrb_num_shift(s->mrb, n0, n, &n)) return FALSE; } else if (op == MRB_OPSYM_2(s->mrb, rshift)) { if (n == MRB_INT_MIN) return FALSE; if (!mrb_num_shift(s->mrb, n0, -n, &n)) return FALSE; } else if (op == MRB_OPSYM_2(s->mrb, mod) && n != 0) { if (n0 == MRB_INT_MIN && n == -1) { n = 0; } else { mrb_int n1 = n0 % n; if ((n0 < 0) != (n < 0) && n1 != 0) { n1 += n; } n = n1; } } else if (op == MRB_OPSYM_2(s->mrb, and)) { n = n0 & n; } else if (op == MRB_OPSYM_2(s->mrb, or)) { n = n0 | n; } else if (op == MRB_OPSYM_2(s->mrb, xor)) { n = n0 ^ n; } else { return FALSE; } s->pc = addr_pc(s, data0.addr); gen_int(s, dst, n); return TRUE; } } static uint32_t dispatch(codegen_scope *s, uint32_t pos0) { int32_t pos1; int32_t offset; int16_t newpos; if (pos0 == JMPLINK_START) return 0; pos1 = pos0 + 2; offset = s->pc - pos1; if (offset > INT16_MAX) { codegen_error(s, "too big jmp offset"); } s->lastlabel = s->pc; newpos = (int16_t)PEEK_S(s->iseq+pos0); emit_S(s, pos0, (uint16_t)offset); if (newpos == 0) return 0; return pos1+newpos; } static void dispatch_linked(codegen_scope *s, uint32_t pos) { if (pos==JMPLINK_START) return; for (;;) { pos = dispatch(s, pos); if (pos==0) break; } } #define nregs_update do {if (s->sp > s->nregs) s->nregs = s->sp;} while (0) static void push_n_(codegen_scope *s, int n) { if (s->sp+n >= 0xffff) { codegen_error(s, "too complex expression"); } s->sp+=n; nregs_update; } static void pop_n_(codegen_scope *s, int n) { if ((int)s->sp-n < 0) { codegen_error(s, "stack pointer underflow"); } s->sp-=n; } #define push() push_n_(s,1) #define push_n(n) push_n_(s,n) #define pop() pop_n_(s,1) #define pop_n(n) pop_n_(s,n) #define cursp() (s->sp) static int new_litbn(codegen_scope *s, const char *p, int base, mrb_bool neg) { int i; size_t plen; mrb_pool_value *pv; plen = strlen(p); if (plen > 255) { codegen_error(s, "integer too big"); } for (i=0; i<s->irep->plen; i++) { size_t len; pv = &s->pool[i]; if (pv->tt != IREP_TT_BIGINT) continue; len = pv->u.str[0]; if (len == plen && pv->u.str[1] == base && memcmp(pv->u.str+2, p, len) == 0) return i; } if (s->irep->plen == s->pcapa) { s->pcapa *= 2; s->pool = (mrb_pool_value*)codegen_realloc(s, s->pool, sizeof(mrb_pool_value)*s->pcapa); } pv = &s->pool[s->irep->plen]; i = s->irep->plen++; { char *buf; pv->tt = IREP_TT_BIGINT; buf = (char*)codegen_realloc(s, NULL, plen+3); buf[0] = (char)plen; buf[1] = base; if (neg) buf[1] = 0x80; memcpy(buf+2, p, plen); buf[plen+2] = '\0'; pv->u.str = buf; } return i; } static int new_lit(codegen_scope *s, mrb_value val) { int i; mrb_pool_value *pv; switch (mrb_type(val)) { case MRB_TT_STRING: for (i=0; i<s->irep->plen; i++) { mrb_int len; pv = &s->pool[i]; if (pv->tt & IREP_TT_NFLAG) continue; len = pv->tt>>2; if (RSTRING_LEN(val) != len) continue; if (memcmp(pv->u.str, RSTRING_PTR(val), len) == 0) return i; } break; #ifndef MRB_NO_FLOAT case MRB_TT_FLOAT: for (i=0; i<s->irep->plen; i++) { mrb_float f1, f2; pv = &s->pool[i]; if (pv->tt != IREP_TT_FLOAT) continue; pv = &s->pool[i]; f1 = pv->u.f; f2 = mrb_float(val); if (f1 == f2 && !signbit(f1) == !signbit(f2)) return i; } break; #endif case MRB_TT_INTEGER: for (i=0; i<s->irep->plen; i++) { mrb_int v = mrb_integer(val); pv = &s->pool[i]; if (pv->tt == IREP_TT_INT32) { if (v == pv->u.i32) return i; } #ifdef MRB_64BIT else if (pv->tt == IREP_TT_INT64) { if (v == pv->u.i64) return i; } continue; #endif } break; default: /* should not happen */ return 0; } if (s->irep->plen == s->pcapa) { s->pcapa *= 2; s->pool = (mrb_pool_value*)codegen_realloc(s, s->pool, sizeof(mrb_pool_value)*s->pcapa); } pv = &s->pool[s->irep->plen]; i = s->irep->plen++; switch (mrb_type(val)) { case MRB_TT_STRING: if (RSTR_NOFREE_P(RSTRING(val))) { pv->tt = (uint32_t)(RSTRING_LEN(val)<<2) | IREP_TT_SSTR; pv->u.str = RSTRING_PTR(val); } else { char *p; mrb_int len = RSTRING_LEN(val); pv->tt = (uint32_t)(len<<2) | IREP_TT_STR; p = (char*)codegen_realloc(s, NULL, len+1); memcpy(p, RSTRING_PTR(val), len); p[len] = '\0'; pv->u.str = p; } break; #ifndef MRB_NO_FLOAT case MRB_TT_FLOAT: pv->tt = IREP_TT_FLOAT; pv->u.f = mrb_float(val); break; #endif case MRB_TT_INTEGER: #ifdef MRB_INT64 pv->tt = IREP_TT_INT64; pv->u.i64 = mrb_integer(val); #else pv->tt = IREP_TT_INT32; pv->u.i32 = mrb_integer(val); #endif break; default: /* should not happen */ break; } return i; } static int new_sym(codegen_scope *s, mrb_sym sym) { int i, len; mrb_assert(s->irep); len = s->irep->slen; for (i=0; i<len; i++) { if (s->syms[i] == sym) return i; } if (s->irep->slen >= s->scapa) { s->scapa *= 2; if (s->scapa > 0xffff) { codegen_error(s, "too many symbols"); } s->syms = (mrb_sym*)codegen_realloc(s, s->syms, sizeof(mrb_sym)*s->scapa); } s->syms[s->irep->slen] = sym; return s->irep->slen++; } static void gen_setxv(codegen_scope *s, uint8_t op, uint16_t dst, mrb_sym sym, int val) { int idx = new_sym(s, sym); if (!val && !no_peephole(s)) { struct mrb_insn_data data = mrb_last_insn(s); if (data.insn == OP_MOVE && data.a == dst) { dst = data.b; rewind_pc(s); } } genop_2(s, op, dst, idx); } static void gen_int(codegen_scope *s, uint16_t dst, mrb_int i) { if (i < 0) { if (i == -1) genop_1(s, OP_LOADI__1, dst); else if (i >= -0xff) genop_2(s, OP_LOADINEG, dst, (uint16_t)-i); else if (i >= INT16_MIN) genop_2S(s, OP_LOADI16, dst, (uint16_t)i); else if (i >= INT32_MIN) genop_2SS(s, OP_LOADI32, dst, (uint32_t)i); else goto int_lit; } else if (i < 8) genop_1(s, OP_LOADI_0 + (uint8_t)i, dst); else if (i <= 0xff) genop_2(s, OP_LOADI, dst, (uint16_t)i); else if (i <= INT16_MAX) genop_2S(s, OP_LOADI16, dst, (uint16_t)i); else if (i <= INT32_MAX) genop_2SS(s, OP_LOADI32, dst, (uint32_t)i); else { int_lit: genop_2(s, OP_LOADL, dst, new_lit(s, mrb_int_value(s->mrb, i))); } } static mrb_bool gen_uniop(codegen_scope *s, mrb_sym sym, uint16_t dst) { if (no_peephole(s)) return FALSE; struct mrb_insn_data data = mrb_last_insn(s); mrb_int n; if (!get_int_operand(s, &data, &n)) return FALSE; if (sym == MRB_OPSYM_2(s->mrb, plus)) { /* unary plus does nothing */ } else if (sym == MRB_OPSYM_2(s->mrb, minus)) { if (n == MRB_INT_MIN) return FALSE; n = -n; } else if (sym == MRB_OPSYM_2(s->mrb, neg)) { n = ~n; } else { return FALSE; } s->pc = addr_pc(s, data.addr); gen_int(s, dst, n); return TRUE; } static int node_len(node *tree) { int n = 0; while (tree) { n++; tree = tree->cdr; } return n; } #define nint(x) ((int)(intptr_t)(x)) #define nchar(x) ((char)(intptr_t)(x)) #define nsym(x) ((mrb_sym)(intptr_t)(x)) #define lv_name(lv) nsym((lv)->car) static int lv_idx(codegen_scope *s, mrb_sym id) { node *lv = s->lv; int n = 1; while (lv) { if (lv_name(lv) == id) return n; n++; lv = lv->cdr; } return 0; } static int search_upvar(codegen_scope *s, mrb_sym id, int *idx) { const struct RProc *u; int lv = 0; codegen_scope *up = s->prev; while (up) { *idx = lv_idx(up, id); if (*idx > 0) { return lv; } lv ++; up = up->prev; } if (lv < 1) lv = 1; u = s->parser->upper; while (u && !MRB_PROC_CFUNC_P(u)) { const struct mrb_irep *ir = u->body.irep; uint_fast16_t n = ir->nlocals; int i; const mrb_sym *v = ir->lv; if (v) { for (i=1; n > 1; n--, v++, i++) { if (*v == id) { *idx = i; return lv - 1; } } } if (MRB_PROC_SCOPE_P(u)) break; u = u->upper; lv ++; } codegen_error(s, "Can't found local variables"); return -1; /* not reached */ } static void for_body(codegen_scope *s, node *tree) { codegen_scope *prev = s; int idx; struct loopinfo *lp; node *n2; /* generate receiver */ codegen(s, tree->cdr->car, VAL); /* generate loop-block */ s = scope_new(s->mrb, s, NULL); push(); /* push for a block parameter */ /* generate loop variable */ n2 = tree->car; genop_W(s, OP_ENTER, 0x40000); if (n2->car && !n2->car->cdr && !n2->cdr) { gen_assignment(s, n2->car->car, NULL, 1, NOVAL); } else { gen_vmassignment(s, n2, 1, VAL); } /* construct loop */ lp = loop_push(s, LOOP_FOR); lp->pc1 = new_label(s); /* loop body */ codegen(s, tree->cdr->cdr->car, VAL); pop(); gen_return(s, OP_RETURN, cursp()); loop_pop(s, NOVAL); scope_finish(s); s = prev; genop_2(s, OP_BLOCK, cursp(), s->irep->rlen-1); push();pop(); /* space for a block */ pop(); idx = new_sym(s, MRB_SYM_2(s->mrb, each)); genop_3(s, OP_SENDB, cursp(), idx, 0); } static int lambda_body(codegen_scope *s, node *tree, int blk) { codegen_scope *parent = s; s = scope_new(s->mrb, s, tree->car); s->mscope = !blk; if (blk) { struct loopinfo *lp = loop_push(s, LOOP_BLOCK); lp->pc0 = new_label(s); } tree = tree->cdr; if (tree->car == NULL) { genop_W(s, OP_ENTER, 0); s->ainfo = 0; } else { mrb_aspec a; int ma, oa, ra, pa, ka, kd, ba, i; uint32_t pos; node *opt; node *margs, *pargs; node *tail; /* mandatory arguments */ ma = node_len(tree->car->car); margs = tree->car->car; tail = tree->car->cdr->cdr->cdr->cdr; /* optional arguments */ oa = node_len(tree->car->cdr->car); /* rest argument? */ ra = tree->car->cdr->cdr->car ? 1 : 0; /* mandatory arguments after rest argument */ pa = node_len(tree->car->cdr->cdr->cdr->car); pargs = tree->car->cdr->cdr->cdr->car; /* keyword arguments */ ka = tail? node_len(tail->cdr->car) : 0; /* keyword dictionary? */ kd = tail && tail->cdr->cdr->car? 1 : 0; /* block argument? */ ba = tail && tail->cdr->cdr->cdr->car ? 1 : 0; if (ma > 0x1f || oa > 0x1f || pa > 0x1f || ka > 0x1f) { codegen_error(s, "too many formal arguments"); } /* (23bits = 5:5:1:5:5:1:1) */ a = MRB_ARGS_REQ(ma) | MRB_ARGS_OPT(oa) | (ra? MRB_ARGS_REST() : 0) | MRB_ARGS_POST(pa) | MRB_ARGS_KEY(ka, kd) | (ba? MRB_ARGS_BLOCK() : 0); genop_W(s, OP_ENTER, a); /* (12bits = 5:1:5:1) */ s->ainfo = (((ma+oa) & 0x3f) << 7) | ((ra & 0x1) << 6) | ((pa & 0x1f) << 1) | ((ka | kd) ? 1 : 0); /* generate jump table for optional arguments initializer */ pos = new_label(s); for (i=0; i<oa; i++) { new_label(s); genjmp_0(s, OP_JMP); } if (oa > 0) { genjmp_0(s, OP_JMP); } opt = tree->car->cdr->car; i = 0; while (opt) { int idx; mrb_sym id = nsym(opt->car->car); dispatch(s, pos+i*3+1); codegen(s, opt->car->cdr, VAL); pop(); idx = lv_idx(s, id); if (idx > 0) { gen_move(s, idx, cursp(), 0); } else { gen_getupvar(s, cursp(), id); } i++; opt = opt->cdr; } if (oa > 0) { dispatch(s, pos+i*3+1); } /* keyword arguments */ if (tail) { node *kwds = tail->cdr->car; int kwrest = 0; if (tail->cdr->cdr->car) { kwrest = 1; } mrb_assert(nint(tail->car) == NODE_ARGS_TAIL); mrb_assert(node_len(tail) == 4); while (kwds) { int jmpif_key_p, jmp_def_set = -1; node *kwd = kwds->car, *def_arg = kwd->cdr->cdr->car; mrb_sym kwd_sym = nsym(kwd->cdr->car); mrb_assert(nint(kwd->car) == NODE_KW_ARG); if (def_arg) { int idx; genop_2(s, OP_KEY_P, lv_idx(s, kwd_sym), new_sym(s, kwd_sym)); jmpif_key_p = genjmp2_0(s, OP_JMPIF, lv_idx(s, kwd_sym), NOVAL); codegen(s, def_arg, VAL); pop(); idx = lv_idx(s, kwd_sym); if (idx > 0) { gen_move(s, idx, cursp(), 0); } else { gen_getupvar(s, cursp(), kwd_sym); } jmp_def_set = genjmp_0(s, OP_JMP); dispatch(s, jmpif_key_p); } genop_2(s, OP_KARG, lv_idx(s, kwd_sym), new_sym(s, kwd_sym)); if (jmp_def_set != -1) { dispatch(s, jmp_def_set); } i++; kwds = kwds->cdr; } if (tail->cdr->car && !kwrest) { genop_0(s, OP_KEYEND); } } /* argument destructuring */ if (margs) { node *n = margs; pos = 1; while (n) { if (nint(n->car->car) == NODE_MASGN) { gen_vmassignment(s, n->car->cdr->car, pos, NOVAL); } pos++; n = n->cdr; } } if (pargs) { node *n = pargs; pos = ma+oa+ra+1; while (n) { if (nint(n->car->car) == NODE_MASGN) { gen_vmassignment(s, n->car->cdr->car, pos, NOVAL); } pos++; n = n->cdr; } } } codegen(s, tree->cdr->car, VAL); pop(); if (s->pc > 0) { gen_return(s, OP_RETURN, cursp()); } if (blk) { loop_pop(s, NOVAL); } scope_finish(s); return parent->irep->rlen - 1; } static int scope_body(codegen_scope *s, node *tree, int val) { codegen_scope *scope = scope_new(s->mrb, s, tree->car); codegen(scope, tree->cdr, VAL); gen_return(scope, OP_RETURN, scope->sp-1); if (!s->iseq) { genop_0(scope, OP_STOP); } scope_finish(scope); if (!s->irep) { /* should not happen */ return 0; } return s->irep->rlen - 1; } static mrb_bool nosplat(node *t) { while (t) { if (nint(t->car->car) == NODE_SPLAT) return FALSE; t = t->cdr; } return TRUE; } static mrb_sym attrsym(codegen_scope *s, mrb_sym a) { const char *name; mrb_int len; char *name2; name = mrb_sym_name_len(s->mrb, a, &len); name2 = (char *)codegen_palloc(s, (size_t)len + 1 /* '=' */ + 1 /* '\0' */ ); mrb_assert_int_fit(mrb_int, len, size_t, SIZE_MAX); memcpy(name2, name, (size_t)len); name2[len] = '='; name2[len+1] = '\0'; return mrb_intern(s->mrb, name2, len+1); } #define CALL_MAXARGS 15 #define GEN_LIT_ARY_MAX 64 #define GEN_VAL_STACK_MAX 99 static int gen_values(codegen_scope *s, node *t, int val, int limit) { int n = 0; int first = 1; int slimit = GEN_VAL_STACK_MAX; if (limit == 0) limit = GEN_LIT_ARY_MAX; if (cursp() >= slimit) slimit = INT16_MAX; if (!val) { while (t) { codegen(s, t->car, NOVAL); n++; t = t->cdr; } return n; } while (t) { int is_splat = nint(t->car->car) == NODE_SPLAT; if (is_splat || n >= limit-1 || cursp() >= slimit) { /* flush stack */ pop_n(n); if (first) { if (n == 0) { genop_1(s, OP_LOADNIL, cursp()); } else { genop_2(s, OP_ARRAY, cursp(), n); } push(); first = 0; limit = GEN_LIT_ARY_MAX; } else if (n > 0) { pop(); genop_2(s, OP_ARYPUSH, cursp(), n); push(); } n = 0; } codegen(s, t->car, val); if (is_splat) { pop(); pop(); genop_1(s, OP_ARYCAT, cursp()); push(); } else { n++; } t = t->cdr; } if (!first) { pop(); if (n > 0) { pop_n(n); genop_2(s, OP_ARYPUSH, cursp(), n); } return -1; /* variable length */ } return n; } static int gen_hash(codegen_scope *s, node *tree, int val, int limit) { int slimit = GEN_VAL_STACK_MAX; if (cursp() >= GEN_LIT_ARY_MAX) slimit = INT16_MAX; int len = 0; mrb_bool update = FALSE; while (tree) { if (nint(tree->car->car->car) == NODE_KW_REST_ARGS) { if (val && len > 0) { pop_n(len*2); if (!update) { genop_2(s, OP_HASH, cursp(), len); } else { pop(); genop_2(s, OP_HASHADD, cursp(), len); } push(); } codegen(s, tree->car->cdr, val); if (val && (len > 0 || update)) { pop(); pop(); genop_1(s, OP_HASHCAT, cursp()); push(); } update = TRUE; len = 0; } else { codegen(s, tree->car->car, val); codegen(s, tree->car->cdr, val); len++; } tree = tree->cdr; if (val && cursp() >= slimit) { pop_n(len*2); if (!update) { genop_2(s, OP_HASH, cursp(), len); } else { pop(); genop_2(s, OP_HASHADD, cursp(), len); } push(); update = TRUE; len = 0; } } if (val && len > limit) { pop_n(len*2); genop_2(s, OP_HASH, cursp(), len); push(); return -1; } if (update) { if (val && len > 0) { pop_n(len*2+1); genop_2(s, OP_HASHADD, cursp(), len); push(); } return -1; /* variable length */ } return len; } static void gen_call(codegen_scope *s, node *tree, int val, int safe) { mrb_sym sym = nsym(tree->cdr->car); int skip = 0, n = 0, nk = 0, noop = 0, noself = 0, blk = 0, sp_save = cursp(); if (!tree->car) { noself = noop = 1; push(); } else { codegen(s, tree->car, VAL); /* receiver */ } if (safe) { int recv = cursp()-1; gen_move(s, cursp(), recv, 1); skip = genjmp2_0(s, OP_JMPNIL, cursp(), val); } tree = tree->cdr->cdr->car; if (tree) { if (tree->car) { /* positional arguments */ n = gen_values(s, tree->car, VAL, 14); if (n < 0) { /* variable length */ noop = 1; /* not operator */ n = 15; push(); } } if (tree->cdr->car) { /* keyword arguments */ noop = 1; nk = gen_hash(s, tree->cdr->car->cdr, VAL, 14); if (nk < 0) nk = 15; } } if (tree && tree->cdr && tree->cdr->cdr) { codegen(s, tree->cdr->cdr, VAL); pop(); noop = 1; blk = 1; } push();pop(); s->sp = sp_save; if (!noop && sym == MRB_OPSYM_2(s->mrb, add) && n == 1) { gen_addsub(s, OP_ADD, cursp()); } else if (!noop && sym == MRB_OPSYM_2(s->mrb, sub) && n == 1) { gen_addsub(s, OP_SUB, cursp()); } else if (!noop && sym == MRB_OPSYM_2(s->mrb, mul) && n == 1) { gen_muldiv(s, OP_MUL, cursp()); } else if (!noop && sym == MRB_OPSYM_2(s->mrb, div) && n == 1) { gen_muldiv(s, OP_DIV, cursp()); } else if (!noop && sym == MRB_OPSYM_2(s->mrb, lt) && n == 1) { genop_1(s, OP_LT, cursp()); } else if (!noop && sym == MRB_OPSYM_2(s->mrb, le) && n == 1) { genop_1(s, OP_LE, cursp()); } else if (!noop && sym == MRB_OPSYM_2(s->mrb, gt) && n == 1) { genop_1(s, OP_GT, cursp()); } else if (!noop && sym == MRB_OPSYM_2(s->mrb, ge) && n == 1) { genop_1(s, OP_GE, cursp()); } else if (!noop && sym == MRB_OPSYM_2(s->mrb, eq) && n == 1) { genop_1(s, OP_EQ, cursp()); } else if (!noop && sym == MRB_OPSYM_2(s->mrb, aset) && n == 2) { genop_1(s, OP_SETIDX, cursp()); } else if (!noop && n == 0 && gen_uniop(s, sym, cursp())) { /* constant folding succeeded */ } else if (!noop && n == 1 && gen_binop(s, sym, cursp())) { /* constant folding succeeded */ } else if (noself){ genop_3(s, blk ? OP_SSENDB : OP_SSEND, cursp(), new_sym(s, sym), n|(nk<<4)); } else { genop_3(s, blk ? OP_SENDB : OP_SEND, cursp(), new_sym(s, sym), n|(nk<<4)); } if (safe) { dispatch(s, skip); } if (val) { push(); } } static void gen_assignment(codegen_scope *s, node *tree, node *rhs, int sp, int val) { int idx; int type = nint(tree->car); switch (type) { case NODE_GVAR: case NODE_ARG: case NODE_LVAR: case NODE_IVAR: case NODE_CVAR: case NODE_CONST: case NODE_NIL: case NODE_MASGN: if (rhs) { codegen(s, rhs, VAL); pop(); sp = cursp(); } break; case NODE_COLON2: case NODE_CALL: case NODE_SCALL: /* keep evaluation order */ break; case NODE_NVAR: codegen_error(s, "Can't assign to numbered parameter"); break; default: codegen_error(s, "unknown lhs"); break; } tree = tree->cdr; switch (type) { case NODE_GVAR: gen_setxv(s, OP_SETGV, sp, nsym(tree), val); break; case NODE_ARG: case NODE_LVAR: idx = lv_idx(s, nsym(tree)); if (idx > 0) { if (idx != sp) { gen_move(s, idx, sp, val); } break; } else { /* upvar */ gen_setupvar(s, sp, nsym(tree)); } break; case NODE_IVAR: gen_setxv(s, OP_SETIV, sp, nsym(tree), val); break; case NODE_CVAR: gen_setxv(s, OP_SETCV, sp, nsym(tree), val); break; case NODE_CONST: gen_setxv(s, OP_SETCONST, sp, nsym(tree), val); break; case NODE_COLON2: if (sp) { gen_move(s, cursp(), sp, 0); } sp = cursp(); push(); codegen(s, tree->car, VAL); if (rhs) { codegen(s, rhs, VAL); pop(); gen_move(s, sp, cursp(), 0); } pop_n(2); idx = new_sym(s, nsym(tree->cdr)); genop_2(s, OP_SETMCNST, sp, idx); break; case NODE_CALL: case NODE_SCALL: { int noself = 0, safe = (type == NODE_SCALL), skip = 0, top, call, n = 0; mrb_sym mid = nsym(tree->cdr->car); top = cursp(); if (val || sp == cursp()) { push(); /* room for retval */ } call = cursp(); if (!tree->car) { noself = 1; push(); } else { codegen(s, tree->car, VAL); /* receiver */ } if (safe) { int recv = cursp()-1; gen_move(s, cursp(), recv, 1); skip = genjmp2_0(s, OP_JMPNIL, cursp(), val); } tree = tree->cdr->cdr->car; if (tree) { if (tree->car) { /* positional arguments */ n = gen_values(s, tree->car, VAL, (tree->cdr->car)?13:14); if (n < 0) { /* variable length */ n = 15; push(); } } if (tree->cdr->car) { /* keyword arguments */ gen_hash(s, tree->cdr->car->cdr, VAL, 0); if (n < 14) { n++; push(); } else { pop(); genop_2(s, OP_ARYPUSH, cursp(), 1); } } } if (rhs) { codegen(s, rhs, VAL); pop(); } else { gen_move(s, cursp(), sp, 0); } if (val) { gen_move(s, top, cursp(), 1); } if (n < 14) { n++; } else { pop(); genop_2(s, OP_ARYPUSH, cursp(), 1); } s->sp = call; if (mid == MRB_OPSYM_2(s->mrb, aref) && n == 2) { genop_1(s, OP_SETIDX, cursp()); } else { genop_3(s, noself ? OP_SSEND : OP_SEND, cursp(), new_sym(s, attrsym(s, mid)), n); } if (safe) { dispatch(s, skip); } s->sp = top; } break; case NODE_MASGN: gen_vmassignment(s, tree->car, sp, val); break; /* splat without assignment */ case NODE_NIL: break; default: codegen_error(s, "unknown lhs"); break; } if (val) push(); } static void gen_vmassignment(codegen_scope *s, node *tree, int rhs, int val) { int n = 0, post = 0; node *t, *p; if (tree->car) { /* pre */ t = tree->car; n = 0; while (t) { int sp = cursp(); genop_3(s, OP_AREF, sp, rhs, n); push(); gen_assignment(s, t->car, NULL, sp, NOVAL); pop(); n++; t = t->cdr; } } t = tree->cdr; if (t) { if (t->cdr) { /* post count */ p = t->cdr->car; while (p) { post++; p = p->cdr; } } gen_move(s, cursp(), rhs, val); push_n(post+1); pop_n(post+1); genop_3(s, OP_APOST, cursp(), n, post); n = 1; if (t->car && t->car != (node*)-1) { /* rest */ gen_assignment(s, t->car, NULL, cursp(), NOVAL); } if (t->cdr && t->cdr->car) { t = t->cdr->car; while (t) { gen_assignment(s, t->car, NULL, cursp()+n, NOVAL); t = t->cdr; n++; } } if (val) { gen_move(s, cursp(), rhs, 0); } } } static void gen_intern(codegen_scope *s) { pop(); if (!no_peephole(s)) { struct mrb_insn_data data = mrb_last_insn(s); if (data.insn == OP_STRING && data.a == cursp()) { rewind_pc(s); genop_2(s, OP_SYMBOL, data.a, data.b); push(); return; } } genop_1(s, OP_INTERN, cursp()); push(); } static void gen_literal_array(codegen_scope *s, node *tree, mrb_bool sym, int val) { if (val) { int i = 0, j = 0, gen = 0; while (tree) { switch (nint(tree->car->car)) { case NODE_STR: if ((tree->cdr == NULL) && (nint(tree->car->cdr->cdr) == 0)) break; /* fall through */ case NODE_BEGIN: codegen(s, tree->car, VAL); ++j; break; case NODE_LITERAL_DELIM: if (j > 0) { j = 0; ++i; if (sym) gen_intern(s); } break; } while (j >= 2) { pop(); pop(); genop_1(s, OP_STRCAT, cursp()); push(); j--; } if (i > GEN_LIT_ARY_MAX) { pop_n(i); if (gen) { pop(); genop_2(s, OP_ARYPUSH, cursp(), i); } else { genop_2(s, OP_ARRAY, cursp(), i); gen = 1; } push(); i = 0; } tree = tree->cdr; } if (j > 0) { ++i; if (sym) gen_intern(s); } pop_n(i); if (gen) { pop(); genop_2(s, OP_ARYPUSH, cursp(), i); } else { genop_2(s, OP_ARRAY, cursp(), i); } push(); } else { while (tree) { switch (nint(tree->car->car)) { case NODE_BEGIN: case NODE_BLOCK: codegen(s, tree->car, NOVAL); } tree = tree->cdr; } } } static void raise_error(codegen_scope *s, const char *msg) { int idx = new_lit(s, mrb_str_new_cstr(s->mrb, msg)); genop_1(s, OP_ERR, idx); } static mrb_int readint(codegen_scope *s, const char *p, int base, mrb_bool neg, mrb_bool *overflow) { const char *e = p + strlen(p); mrb_int result = 0; mrb_assert(base >= 2 && base <= 16); if (*p == '+') p++; while (p < e) { int n; char c = *p; switch (c) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': n = c - '0'; break; case '8': case '9': n = c - '0'; break; case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': n = c - 'a' + 10; break; case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': n = c - 'A' + 10; break; default: codegen_error(s, "malformed readint input"); *overflow = TRUE; /* not reached */ return result; } if (mrb_int_mul_overflow(result, base, &result)) { overflow: *overflow = TRUE; return 0; } mrb_uint tmp = ((mrb_uint)result)+n; if (neg && tmp == (mrb_uint)MRB_INT_MAX+1) { *overflow = FALSE; return MRB_INT_MIN; } if (tmp > MRB_INT_MAX) goto overflow; result = (mrb_int)tmp; p++; } *overflow = FALSE; if (neg) return -result; return result; } static void gen_retval(codegen_scope *s, node *tree) { if (nint(tree->car) == NODE_SPLAT) { codegen(s, tree, VAL); pop(); genop_1(s, OP_ARYDUP, cursp()); } else { codegen(s, tree, VAL); pop(); } } static mrb_bool true_always(node *tree) { switch (nint(tree->car)) { case NODE_TRUE: case NODE_INT: case NODE_STR: case NODE_SYM: return TRUE; default: return FALSE; } } static mrb_bool false_always(node *tree) { switch (nint(tree->car)) { case NODE_FALSE: case NODE_NIL: return TRUE; default: return FALSE; } } static void gen_blkmove(codegen_scope *s, uint16_t ainfo, int lv) { int m1 = (ainfo>>7)&0x3f; int r = (ainfo>>6)&0x1; int m2 = (ainfo>>1)&0x1f; int kd = (ainfo)&0x1; int off = m1+r+m2+kd+1; if (lv == 0) { gen_move(s, cursp(), off, 0); } else { genop_3(s, OP_GETUPVAR, cursp(), off, lv); } push(); } static void codegen(codegen_scope *s, node *tree, int val) { int nt; int rlev = s->rlev; if (!tree) { if (val) { genop_1(s, OP_LOADNIL, cursp()); push(); } return; } s->rlev++; if (s->rlev > MRB_CODEGEN_LEVEL_MAX) { codegen_error(s, "too complex expression"); } if (s->irep && s->filename_index != tree->filename_index) { mrb_sym fname = mrb_parser_get_filename(s->parser, s->filename_index); const char *filename = mrb_sym_name_len(s->mrb, fname, NULL); mrb_debug_info_append_file(s->mrb, s->irep->debug_info, filename, s->lines, s->debug_start_pos, s->pc); s->debug_start_pos = s->pc; s->filename_index = tree->filename_index; s->filename_sym = mrb_parser_get_filename(s->parser, tree->filename_index); } nt = nint(tree->car); s->lineno = tree->lineno; tree = tree->cdr; switch (nt) { case NODE_BEGIN: if (val && !tree) { genop_1(s, OP_LOADNIL, cursp()); push(); } while (tree) { codegen(s, tree->car, tree->cdr ? NOVAL : val); tree = tree->cdr; } break; case NODE_RESCUE: { int noexc; uint32_t exend, pos1, pos2, tmp; struct loopinfo *lp; int catch_entry, begin, end; if (tree->car == NULL) goto exit; lp = loop_push(s, LOOP_BEGIN); lp->pc0 = new_label(s); catch_entry = catch_handler_new(s); begin = s->pc; codegen(s, tree->car, VAL); pop(); lp->type = LOOP_RESCUE; end = s->pc; noexc = genjmp_0(s, OP_JMP); catch_handler_set(s, catch_entry, MRB_CATCH_RESCUE, begin, end, s->pc); tree = tree->cdr; exend = JMPLINK_START; pos1 = JMPLINK_START; if (tree->car) { node *n2 = tree->car; int exc = cursp(); genop_1(s, OP_EXCEPT, exc); push(); while (n2) { node *n3 = n2->car; node *n4 = n3->car; dispatch(s, pos1); pos2 = JMPLINK_START; do { if (n4 && n4->car && nint(n4->car->car) == NODE_SPLAT) { codegen(s, n4->car, VAL); gen_move(s, cursp(), exc, 0); push_n(2); pop_n(2); /* space for one arg and a block */ pop(); genop_3(s, OP_SEND, cursp(), new_sym(s, MRB_SYM_2(s->mrb, __case_eqq)), 1); } else { if (n4) { codegen(s, n4->car, VAL); } else { genop_2(s, OP_GETCONST, cursp(), new_sym(s, MRB_SYM_2(s->mrb, StandardError))); push(); } pop(); genop_2(s, OP_RESCUE, exc, cursp()); } tmp = genjmp2(s, OP_JMPIF, cursp(), pos2, val); pos2 = tmp; if (n4) { n4 = n4->cdr; } } while (n4); pos1 = genjmp_0(s, OP_JMP); dispatch_linked(s, pos2); pop(); if (n3->cdr->car) { gen_assignment(s, n3->cdr->car, NULL, exc, NOVAL); } if (n3->cdr->cdr->car) { codegen(s, n3->cdr->cdr->car, val); if (val) pop(); } tmp = genjmp(s, OP_JMP, exend); exend = tmp; n2 = n2->cdr; push(); } if (pos1 != JMPLINK_START) { dispatch(s, pos1); genop_1(s, OP_RAISEIF, exc); } } pop(); tree = tree->cdr; dispatch(s, noexc); if (tree->car) { codegen(s, tree->car, val); } else if (val) { push(); } dispatch_linked(s, exend); loop_pop(s, NOVAL); } break; case NODE_ENSURE: if (!tree->cdr || !tree->cdr->cdr || (nint(tree->cdr->cdr->car) == NODE_BEGIN && tree->cdr->cdr->cdr)) { int catch_entry, begin, end, target; int idx; catch_entry = catch_handler_new(s); begin = s->pc; codegen(s, tree->car, val); end = target = s->pc; push(); idx = cursp(); genop_1(s, OP_EXCEPT, idx); push(); codegen(s, tree->cdr->cdr, NOVAL); pop(); genop_1(s, OP_RAISEIF, idx); pop(); catch_handler_set(s, catch_entry, MRB_CATCH_ENSURE, begin, end, target); } else { /* empty ensure ignored */ codegen(s, tree->car, val); } break; case NODE_LAMBDA: if (val) { int idx = lambda_body(s, tree, 1); genop_2(s, OP_LAMBDA, cursp(), idx); push(); } break; case NODE_BLOCK: if (val) { int idx = lambda_body(s, tree, 1); genop_2(s, OP_BLOCK, cursp(), idx); push(); } break; case NODE_IF: { uint32_t pos1, pos2; mrb_bool nil_p = FALSE; node *elsepart = tree->cdr->cdr->car; if (!tree->car) { codegen(s, elsepart, val); goto exit; } if (true_always(tree->car)) { codegen(s, tree->cdr->car, val); goto exit; } if (false_always(tree->car)) { codegen(s, elsepart, val); goto exit; } if (nint(tree->car->car) == NODE_CALL) { node *n = tree->car->cdr; mrb_sym mid = nsym(n->cdr->car); mrb_sym sym_nil_p = MRB_SYM_Q_2(s->mrb, nil); if (mid == sym_nil_p && n->cdr->cdr->car == NULL) { nil_p = TRUE; codegen(s, n->car, VAL); } } if (!nil_p) { codegen(s, tree->car, VAL); } pop(); if (val || tree->cdr->car) { if (nil_p) { pos2 = genjmp2_0(s, OP_JMPNIL, cursp(), val); pos1 = genjmp_0(s, OP_JMP); dispatch(s, pos2); } else { pos1 = genjmp2_0(s, OP_JMPNOT, cursp(), val); } codegen(s, tree->cdr->car, val); if (val) pop(); if (elsepart || val) { pos2 = genjmp_0(s, OP_JMP); dispatch(s, pos1); codegen(s, elsepart, val); dispatch(s, pos2); } else { dispatch(s, pos1); } } else { /* empty then-part */ if (elsepart) { if (nil_p) { pos1 = genjmp2_0(s, OP_JMPNIL, cursp(), val); } else { pos1 = genjmp2_0(s, OP_JMPIF, cursp(), val); } codegen(s, elsepart, val); dispatch(s, pos1); } else if (val && !nil_p) { genop_1(s, OP_LOADNIL, cursp()); push(); } } } break; case NODE_AND: { uint32_t pos; if (true_always(tree->car)) { codegen(s, tree->cdr, val); goto exit; } if (false_always(tree->car)) { codegen(s, tree->car, val); goto exit; } codegen(s, tree->car, VAL); pop(); pos = genjmp2_0(s, OP_JMPNOT, cursp(), val); codegen(s, tree->cdr, val); dispatch(s, pos); } break; case NODE_OR: { uint32_t pos; if (true_always(tree->car)) { codegen(s, tree->car, val); goto exit; } if (false_always(tree->car)) { codegen(s, tree->cdr, val); goto exit; } codegen(s, tree->car, VAL); pop(); pos = genjmp2_0(s, OP_JMPIF, cursp(), val); codegen(s, tree->cdr, val); dispatch(s, pos); } break; case NODE_WHILE: case NODE_UNTIL: { if (true_always(tree->car)) { if (nt == NODE_UNTIL) { if (val) { genop_1(s, OP_LOADNIL, cursp()); push(); } goto exit; } } else if (false_always(tree->car)) { if (nt == NODE_WHILE) { if (val) { genop_1(s, OP_LOADNIL, cursp()); push(); } goto exit; } } uint32_t pos = JMPLINK_START; struct loopinfo *lp = loop_push(s, LOOP_NORMAL); if (!val) lp->reg = -1; lp->pc0 = new_label(s); codegen(s, tree->car, VAL); pop(); if (nt == NODE_WHILE) { pos = genjmp2_0(s, OP_JMPNOT, cursp(), NOVAL); } else { pos = genjmp2_0(s, OP_JMPIF, cursp(), NOVAL); } lp->pc1 = new_label(s); codegen(s, tree->cdr, NOVAL); genjmp(s, OP_JMP, lp->pc0); dispatch(s, pos); loop_pop(s, val); } break; case NODE_FOR: for_body(s, tree); if (val) push(); break; case NODE_CASE: { int head = 0; uint32_t pos1, pos2, pos3, tmp; node *n; pos3 = JMPLINK_START; if (tree->car) { head = cursp(); codegen(s, tree->car, VAL); } tree = tree->cdr; while (tree) { n = tree->car->car; pos1 = pos2 = JMPLINK_START; while (n) { codegen(s, n->car, VAL); if (head) { gen_move(s, cursp(), head, 0); push(); push(); pop(); pop(); pop(); if (nint(n->car->car) == NODE_SPLAT) { genop_3(s, OP_SEND, cursp(), new_sym(s, MRB_SYM_2(s->mrb, __case_eqq)), 1); } else { genop_3(s, OP_SEND, cursp(), new_sym(s, MRB_OPSYM_2(s->mrb, eqq)), 1); } } else { pop(); } tmp = genjmp2(s, OP_JMPIF, cursp(), pos2, NOVAL); pos2 = tmp; n = n->cdr; } if (tree->car->car) { pos1 = genjmp_0(s, OP_JMP); dispatch_linked(s, pos2); } codegen(s, tree->car->cdr, val); if (val) pop(); tmp = genjmp(s, OP_JMP, pos3); pos3 = tmp; dispatch(s, pos1); tree = tree->cdr; } if (val) { uint32_t pos = cursp(); genop_1(s, OP_LOADNIL, cursp()); if (pos3 != JMPLINK_START) dispatch_linked(s, pos3); if (head) pop(); if (cursp() != pos) { gen_move(s, cursp(), pos, 0); } push(); } else { if (pos3 != JMPLINK_START) { dispatch_linked(s, pos3); } if (head) { pop(); } } } break; case NODE_SCOPE: scope_body(s, tree, NOVAL); break; case NODE_FCALL: case NODE_CALL: gen_call(s, tree, val, 0); break; case NODE_SCALL: gen_call(s, tree, val, 1); break; case NODE_DOT2: codegen(s, tree->car, val); codegen(s, tree->cdr, val); if (val) { pop(); pop(); genop_1(s, OP_RANGE_INC, cursp()); push(); } break; case NODE_DOT3: codegen(s, tree->car, val); codegen(s, tree->cdr, val); if (val) { pop(); pop(); genop_1(s, OP_RANGE_EXC, cursp()); push(); } break; case NODE_COLON2: { int sym = new_sym(s, nsym(tree->cdr)); codegen(s, tree->car, VAL); pop(); genop_2(s, OP_GETMCNST, cursp(), sym); if (val) push(); } break; case NODE_COLON3: { int sym = new_sym(s, nsym(tree)); genop_1(s, OP_OCLASS, cursp()); genop_2(s, OP_GETMCNST, cursp(), sym); if (val) push(); } break; case NODE_ARRAY: { int n; n = gen_values(s, tree, val, 0); if (val) { if (n >= 0) { pop_n(n); genop_2(s, OP_ARRAY, cursp(), n); } push(); } } break; case NODE_HASH: case NODE_KW_HASH: { int nk = gen_hash(s, tree, val, GEN_LIT_ARY_MAX); if (val && nk >= 0) { pop_n(nk*2); genop_2(s, OP_HASH, cursp(), nk); push(); } } break; case NODE_SPLAT: codegen(s, tree, val); break; case NODE_ASGN: gen_assignment(s, tree->car, tree->cdr, 0, val); break; case NODE_MASGN: { int len = 0, n = 0, post = 0; node *t = tree->cdr, *p; int rhs = cursp(); if (nint(t->car) == NODE_ARRAY && t->cdr && nosplat(t->cdr)) { /* fixed rhs */ t = t->cdr; while (t) { codegen(s, t->car, VAL); len++; t = t->cdr; } tree = tree->car; if (tree->car) { /* pre */ t = tree->car; n = 0; while (t) { if (n < len) { gen_assignment(s, t->car, NULL, rhs+n, NOVAL); n++; } else { genop_1(s, OP_LOADNIL, rhs+n); gen_assignment(s, t->car, NULL, rhs+n, NOVAL); } t = t->cdr; } } t = tree->cdr; if (t) { if (t->cdr) { /* post count */ p = t->cdr->car; while (p) { post++; p = p->cdr; } } if (t->car) { /* rest (len - pre - post) */ int rn; if (len < post + n) { rn = 0; } else { rn = len - post - n; } genop_3(s, OP_ARRAY2, cursp(), rhs+n, rn); gen_assignment(s, t->car, NULL, cursp(), NOVAL); n += rn; } if (t->cdr && t->cdr->car) { t = t->cdr->car; while (n<len) { gen_assignment(s, t->car, NULL, rhs+n, NOVAL); t = t->cdr; n++; } } } pop_n(len); if (val) { genop_2(s, OP_ARRAY, rhs, len); push(); } } else { /* variable rhs */ codegen(s, t, VAL); gen_vmassignment(s, tree->car, rhs, val); if (!val) { pop(); } } } break; case NODE_OP_ASGN: { mrb_sym sym = nsym(tree->cdr->car); mrb_int len; const char *name = mrb_sym_name_len(s->mrb, sym, &len); int idx, callargs = -1, vsp = -1; if ((len == 2 && name[0] == '|' && name[1] == '|') && (nint(tree->car->car) == NODE_CONST || nint(tree->car->car) == NODE_CVAR)) { int catch_entry, begin, end; int noexc, exc; struct loopinfo *lp; lp = loop_push(s, LOOP_BEGIN); lp->pc0 = new_label(s); catch_entry = catch_handler_new(s); begin = s->pc; exc = cursp(); codegen(s, tree->car, VAL); end = s->pc; noexc = genjmp_0(s, OP_JMP); lp->type = LOOP_RESCUE; catch_handler_set(s, catch_entry, MRB_CATCH_RESCUE, begin, end, s->pc); genop_1(s, OP_EXCEPT, exc); genop_1(s, OP_LOADF, exc); dispatch(s, noexc); loop_pop(s, NOVAL); } else if (nint(tree->car->car) == NODE_CALL) { node *n = tree->car->cdr; int base, i, nargs = 0; callargs = 0; if (val) { vsp = cursp(); push(); } codegen(s, n->car, VAL); /* receiver */ idx = new_sym(s, nsym(n->cdr->car)); base = cursp()-1; if (n->cdr->cdr->car) { nargs = gen_values(s, n->cdr->cdr->car->car, VAL, 13); if (nargs >= 0) { callargs = nargs; } else { /* varargs */ push(); nargs = 1; callargs = CALL_MAXARGS; } } /* copy receiver and arguments */ gen_move(s, cursp(), base, 1); for (i=0; i<nargs; i++) { gen_move(s, cursp()+i+1, base+i+1, 1); } push_n(nargs+2);pop_n(nargs+2); /* space for receiver, arguments and a block */ genop_3(s, OP_SEND, cursp(), idx, callargs); push(); } else { codegen(s, tree->car, VAL); } if (len == 2 && ((name[0] == '|' && name[1] == '|') || (name[0] == '&' && name[1] == '&'))) { uint32_t pos; pop(); if (val) { if (vsp >= 0) { gen_move(s, vsp, cursp(), 1); } pos = genjmp2_0(s, name[0]=='|'?OP_JMPIF:OP_JMPNOT, cursp(), val); } else { pos = genjmp2_0(s, name[0]=='|'?OP_JMPIF:OP_JMPNOT, cursp(), val); } codegen(s, tree->cdr->cdr->car, VAL); pop(); if (val && vsp >= 0) { gen_move(s, vsp, cursp(), 1); } if (nint(tree->car->car) == NODE_CALL) { if (callargs == CALL_MAXARGS) { pop(); genop_2(s, OP_ARYPUSH, cursp(), 1); } else { pop_n(callargs); callargs++; } pop(); idx = new_sym(s, attrsym(s, nsym(tree->car->cdr->cdr->car))); genop_3(s, OP_SEND, cursp(), idx, callargs); } else { gen_assignment(s, tree->car, NULL, cursp(), val); } dispatch(s, pos); goto exit; } codegen(s, tree->cdr->cdr->car, VAL); push(); pop(); pop(); pop(); if (len == 1 && name[0] == '+') { gen_addsub(s, OP_ADD, cursp()); } else if (len == 1 && name[0] == '-') { gen_addsub(s, OP_SUB, cursp()); } else if (len == 1 && name[0] == '*') { genop_1(s, OP_MUL, cursp()); } else if (len == 1 && name[0] == '/') { genop_1(s, OP_DIV, cursp()); } else if (len == 1 && name[0] == '<') { genop_1(s, OP_LT, cursp()); } else if (len == 2 && name[0] == '<' && name[1] == '=') { genop_1(s, OP_LE, cursp()); } else if (len == 1 && name[0] == '>') { genop_1(s, OP_GT, cursp()); } else if (len == 2 && name[0] == '>' && name[1] == '=') { genop_1(s, OP_GE, cursp()); } else { idx = new_sym(s, sym); genop_3(s, OP_SEND, cursp(), idx, 1); } if (callargs < 0) { gen_assignment(s, tree->car, NULL, cursp(), val); } else { if (val && vsp >= 0) { gen_move(s, vsp, cursp(), 0); } if (callargs == CALL_MAXARGS) { pop(); genop_2(s, OP_ARYPUSH, cursp(), 1); } else { pop_n(callargs); callargs++; } pop(); idx = new_sym(s, attrsym(s,nsym(tree->car->cdr->cdr->car))); genop_3(s, OP_SEND, cursp(), idx, callargs); } } break; case NODE_SUPER: { codegen_scope *s2 = s; int lv = 0; int n = 0, nk = 0, st = 0; push(); while (!s2->mscope) { lv++; s2 = s2->prev; if (!s2) break; } if (tree) { node *args = tree->car; if (args) { st = n = gen_values(s, args, VAL, 14); if (n < 0) { st = 1; n = 15; push(); } } /* keyword arguments */ if (s2 && (s2->ainfo & 0x1) && tree->cdr->car) { nk = gen_hash(s, tree->cdr->car->cdr, VAL, 14); if (nk < 0) {st++; nk = 15;} else st += nk*2; n |= nk<<4; } /* block arguments */ if (tree->cdr->cdr) { codegen(s, tree->cdr->cdr, VAL); } else if (!s2) {/* super at top-level */ push(); /* no need to push block */ } else { gen_blkmove(s, s2->ainfo, lv); } st++; } else { if (!s2) push(); else gen_blkmove(s, s2->ainfo, lv); st++; } pop_n(st+1); genop_2(s, OP_SUPER, cursp(), n); if (val) push(); } break; case NODE_ZSUPER: { codegen_scope *s2 = s; int lv = 0; uint16_t ainfo = 0; int n = CALL_MAXARGS; int sp = cursp(); push(); /* room for receiver */ while (!s2->mscope) { lv++; s2 = s2->prev; if (!s2) break; } if (s2 && s2->ainfo > 0) { ainfo = s2->ainfo; } if (ainfo > 0) { genop_2S(s, OP_ARGARY, cursp(), (ainfo<<4)|(lv & 0xf)); push(); push(); push(); /* ARGARY pushes 3 values at most */ pop(); pop(); pop(); /* keyword arguments */ if (ainfo & 0x1) { n |= CALL_MAXARGS<<4; push(); } /* block argument */ if (tree && tree->cdr && tree->cdr->cdr) { push(); codegen(s, tree->cdr->cdr, VAL); } } else { /* block argument */ if (tree && tree->cdr && tree->cdr->cdr) { codegen(s, tree->cdr->cdr, VAL); } else { gen_blkmove(s, 0, lv); } n = 0; } s->sp = sp; genop_2(s, OP_SUPER, cursp(), n); if (val) push(); } break; case NODE_RETURN: if (tree) { gen_retval(s, tree); } else { genop_1(s, OP_LOADNIL, cursp()); } if (s->loop) { gen_return(s, OP_RETURN_BLK, cursp()); } else { gen_return(s, OP_RETURN, cursp()); } if (val) push(); break; case NODE_YIELD: { codegen_scope *s2 = s; int lv = 0, ainfo = -1; int n = 0, sendv = 0; while (!s2->mscope) { lv++; s2 = s2->prev; if (!s2) break; } if (s2) { ainfo = (int)s2->ainfo; } if (ainfo < 0) codegen_error(s, "invalid yield (SyntaxError)"); push(); if (tree) { n = gen_values(s, tree, VAL, 14); if (n < 0) { n = sendv = 1; push(); } } push();pop(); /* space for a block */ pop_n(n+1); genop_2S(s, OP_BLKPUSH, cursp(), (ainfo<<4)|(lv & 0xf)); if (sendv) n = CALL_MAXARGS; genop_3(s, OP_SEND, cursp(), new_sym(s, MRB_SYM_2(s->mrb, call)), n); if (val) push(); } break; case NODE_BREAK: loop_break(s, tree); if (val) push(); break; case NODE_NEXT: if (!s->loop) { raise_error(s, "unexpected next"); } else if (s->loop->type == LOOP_NORMAL) { codegen(s, tree, NOVAL); genjmp(s, OP_JMPUW, s->loop->pc0); } else { if (tree) { codegen(s, tree, VAL); pop(); } else { genop_1(s, OP_LOADNIL, cursp()); } gen_return(s, OP_RETURN, cursp()); } if (val) push(); break; case NODE_REDO: if (!s->loop || s->loop->type == LOOP_BEGIN || s->loop->type == LOOP_RESCUE) { raise_error(s, "unexpected redo"); } else { genjmp(s, OP_JMPUW, s->loop->pc1); } if (val) push(); break; case NODE_RETRY: { const char *msg = "unexpected retry"; const struct loopinfo *lp = s->loop; while (lp && lp->type != LOOP_RESCUE) { lp = lp->prev; } if (!lp) { raise_error(s, msg); } else { genjmp(s, OP_JMPUW, lp->pc0); } if (val) push(); } break; case NODE_LVAR: if (val) { int idx = lv_idx(s, nsym(tree)); if (idx > 0) { gen_move(s, cursp(), idx, val); } else { gen_getupvar(s, cursp(), nsym(tree)); } push(); } break; case NODE_NVAR: if (val) { int idx = nint(tree); gen_move(s, cursp(), idx, val); push(); } break; case NODE_GVAR: { int sym = new_sym(s, nsym(tree)); genop_2(s, OP_GETGV, cursp(), sym); if (val) push(); } break; case NODE_IVAR: { int sym = new_sym(s, nsym(tree)); genop_2(s, OP_GETIV, cursp(), sym); if (val) push(); } break; case NODE_CVAR: { int sym = new_sym(s, nsym(tree)); genop_2(s, OP_GETCV, cursp(), sym); if (val) push(); } break; case NODE_CONST: { int sym = new_sym(s, nsym(tree)); genop_2(s, OP_GETCONST, cursp(), sym); if (val) push(); } break; case NODE_BACK_REF: if (val) { char buf[] = {'$', nchar(tree)}; int sym = new_sym(s, mrb_intern(s->mrb, buf, sizeof(buf))); genop_2(s, OP_GETGV, cursp(), sym); push(); } break; case NODE_NTH_REF: if (val) { mrb_state *mrb = s->mrb; mrb_value str; int sym; str = mrb_format(mrb, "$%d", nint(tree)); sym = new_sym(s, mrb_intern_str(mrb, str)); genop_2(s, OP_GETGV, cursp(), sym); push(); } break; case NODE_ARG: /* should not happen */ break; case NODE_BLOCK_ARG: if (!tree) { int idx = lv_idx(s, MRB_OPSYM_2(s->mrb, and)); if (idx == 0) { codegen_error(s, "no anonymous block argument"); } gen_move(s, cursp(), idx, val); } else { codegen(s, tree, val); } break; case NODE_INT: if (val) { char *p = (char*)tree->car; int base = nint(tree->cdr->car); mrb_int i; mrb_bool overflow; i = readint(s, p, base, FALSE, &overflow); if (overflow) { int off = new_litbn(s, p, base, FALSE); genop_2(s, OP_LOADL, cursp(), off); } else { gen_int(s, cursp(), i); } push(); } break; #ifndef MRB_NO_FLOAT case NODE_FLOAT: if (val) { char *p = (char*)tree; mrb_float f = mrb_float_read(p, NULL); int off = new_lit(s, mrb_float_value(s->mrb, f)); genop_2(s, OP_LOADL, cursp(), off); push(); } break; #endif case NODE_NEGATE: { nt = nint(tree->car); switch (nt) { #ifndef MRB_NO_FLOAT case NODE_FLOAT: if (val) { char *p = (char*)tree->cdr; mrb_float f = mrb_float_read(p, NULL); int off = new_lit(s, mrb_float_value(s->mrb, -f)); genop_2(s, OP_LOADL, cursp(), off); push(); } break; #endif case NODE_INT: if (val) { char *p = (char*)tree->cdr->car; int base = nint(tree->cdr->cdr->car); mrb_int i; mrb_bool overflow; i = readint(s, p, base, TRUE, &overflow); if (overflow) { int off = new_litbn(s, p, base, TRUE); genop_2(s, OP_LOADL, cursp(), off); } else { gen_int(s, cursp(), i); } push(); } break; default: if (val) { codegen(s, tree, VAL); pop(); push_n(2);pop_n(2); /* space for receiver&block */ mrb_sym minus = MRB_OPSYM_2(s->mrb, minus); if (!gen_uniop(s, minus, cursp())) { genop_3(s, OP_SEND, cursp(), new_sym(s, minus), 0); } push(); } else { codegen(s, tree, NOVAL); } break; } } break; case NODE_STR: if (val) { char *p = (char*)tree->car; size_t len = (intptr_t)tree->cdr; int ai = mrb_gc_arena_save(s->mrb); int off = new_lit(s, mrb_str_new(s->mrb, p, len)); mrb_gc_arena_restore(s->mrb, ai); genop_2(s, OP_STRING, cursp(), off); push(); } break; case NODE_HEREDOC: tree = ((struct mrb_parser_heredoc_info *)tree)->doc; /* fall through */ case NODE_DSTR: if (val) { node *n = tree; if (!n) { genop_1(s, OP_LOADNIL, cursp()); push(); break; } codegen(s, n->car, VAL); n = n->cdr; while (n) { codegen(s, n->car, VAL); pop(); pop(); genop_1(s, OP_STRCAT, cursp()); push(); n = n->cdr; } } else { node *n = tree; while (n) { if (nint(n->car->car) != NODE_STR) { codegen(s, n->car, NOVAL); } n = n->cdr; } } break; case NODE_WORDS: gen_literal_array(s, tree, FALSE, val); break; case NODE_SYMBOLS: gen_literal_array(s, tree, TRUE, val); break; case NODE_DXSTR: { node *n; int ai = mrb_gc_arena_save(s->mrb); int sym = new_sym(s, MRB_SYM_2(s->mrb, Kernel)); genop_1(s, OP_LOADSELF, cursp()); push(); codegen(s, tree->car, VAL); n = tree->cdr; while (n) { if (nint(n->car->car) == NODE_XSTR) { n->car->car = (struct mrb_ast_node*)(intptr_t)NODE_STR; mrb_assert(!n->cdr); /* must be the end */ } codegen(s, n->car, VAL); pop(); pop(); genop_1(s, OP_STRCAT, cursp()); push(); n = n->cdr; } push(); /* for block */ pop_n(3); sym = new_sym(s, MRB_OPSYM_2(s->mrb, tick)); /* ` */ genop_3(s, OP_SEND, cursp(), sym, 1); if (val) push(); mrb_gc_arena_restore(s->mrb, ai); } break; case NODE_XSTR: { char *p = (char*)tree->car; size_t len = (intptr_t)tree->cdr; int ai = mrb_gc_arena_save(s->mrb); int off = new_lit(s, mrb_str_new(s->mrb, p, len)); int sym; genop_1(s, OP_LOADSELF, cursp()); push(); genop_2(s, OP_STRING, cursp(), off); push(); push(); pop_n(3); sym = new_sym(s, MRB_OPSYM_2(s->mrb, tick)); /* ` */ genop_3(s, OP_SEND, cursp(), sym, 1); if (val) push(); mrb_gc_arena_restore(s->mrb, ai); } break; case NODE_REGX: if (val) { char *p1 = (char*)tree->car; char *p2 = (char*)tree->cdr->car; char *p3 = (char*)tree->cdr->cdr; int ai = mrb_gc_arena_save(s->mrb); int sym = new_sym(s, mrb_intern_lit(s->mrb, REGEXP_CLASS)); int off = new_lit(s, mrb_str_new_cstr(s->mrb, p1)); int argc = 1; genop_1(s, OP_OCLASS, cursp()); genop_2(s, OP_GETMCNST, cursp(), sym); push(); genop_2(s, OP_STRING, cursp(), off); push(); if (p2 || p3) { if (p2) { /* opt */ off = new_lit(s, mrb_str_new_cstr(s->mrb, p2)); genop_2(s, OP_STRING, cursp(), off); } else { genop_1(s, OP_LOADNIL, cursp()); } push(); argc++; if (p3) { /* enc */ off = new_lit(s, mrb_str_new(s->mrb, p3, 1)); genop_2(s, OP_STRING, cursp(), off); push(); argc++; } } push(); /* space for a block */ pop_n(argc+2); sym = new_sym(s, MRB_SYM_2(s->mrb, compile)); genop_3(s, OP_SEND, cursp(), sym, argc); mrb_gc_arena_restore(s->mrb, ai); push(); } break; case NODE_DREGX: if (val) { node *n = tree->car; int ai = mrb_gc_arena_save(s->mrb); int sym = new_sym(s, mrb_intern_lit(s->mrb, REGEXP_CLASS)); int argc = 1; int off; char *p; genop_1(s, OP_OCLASS, cursp()); genop_2(s, OP_GETMCNST, cursp(), sym); push(); codegen(s, n->car, VAL); n = n->cdr; while (n) { codegen(s, n->car, VAL); pop(); pop(); genop_1(s, OP_STRCAT, cursp()); push(); n = n->cdr; } n = tree->cdr->cdr; if (n->car) { /* tail */ p = (char*)n->car; off = new_lit(s, mrb_str_new_cstr(s->mrb, p)); codegen(s, tree->car, VAL); genop_2(s, OP_STRING, cursp(), off); pop(); genop_1(s, OP_STRCAT, cursp()); push(); } if (n->cdr->car) { /* opt */ char *p2 = (char*)n->cdr->car; off = new_lit(s, mrb_str_new_cstr(s->mrb, p2)); genop_2(s, OP_STRING, cursp(), off); push(); argc++; } if (n->cdr->cdr) { /* enc */ char *p2 = (char*)n->cdr->cdr; off = new_lit(s, mrb_str_new_cstr(s->mrb, p2)); genop_2(s, OP_STRING, cursp(), off); push(); argc++; } push(); /* space for a block */ pop_n(argc+2); sym = new_sym(s, MRB_SYM_2(s->mrb, compile)); genop_3(s, OP_SEND, cursp(), sym, argc); mrb_gc_arena_restore(s->mrb, ai); push(); } else { node *n = tree->car; while (n) { if (nint(n->car->car) != NODE_STR) { codegen(s, n->car, NOVAL); } n = n->cdr; } } break; case NODE_SYM: if (val) { int sym = new_sym(s, nsym(tree)); genop_2(s, OP_LOADSYM, cursp(), sym); push(); } break; case NODE_DSYM: codegen(s, tree, val); if (val) { gen_intern(s); } break; case NODE_SELF: if (val) { genop_1(s, OP_LOADSELF, cursp()); push(); } break; case NODE_NIL: if (val) { genop_1(s, OP_LOADNIL, cursp()); push(); } break; case NODE_TRUE: if (val) { genop_1(s, OP_LOADT, cursp()); push(); } break; case NODE_FALSE: if (val) { genop_1(s, OP_LOADF, cursp()); push(); } break; case NODE_ALIAS: { int a = new_sym(s, nsym(tree->car)); int b = new_sym(s, nsym(tree->cdr)); genop_2(s, OP_ALIAS, a, b); if (val) { genop_1(s, OP_LOADNIL, cursp()); push(); } } break; case NODE_UNDEF: { node *t = tree; while (t) { int symbol = new_sym(s, nsym(t->car)); genop_1(s, OP_UNDEF, symbol); t = t->cdr; } if (val) { genop_1(s, OP_LOADNIL, cursp()); push(); } } break; case NODE_CLASS: { int idx; node *body; if (tree->car->car == (node*)0) { genop_1(s, OP_LOADNIL, cursp()); push(); } else if (tree->car->car == (node*)1) { genop_1(s, OP_OCLASS, cursp()); push(); } else { codegen(s, tree->car->car, VAL); } if (tree->cdr->car) { codegen(s, tree->cdr->car, VAL); } else { genop_1(s, OP_LOADNIL, cursp()); push(); } pop(); pop(); idx = new_sym(s, nsym(tree->car->cdr)); genop_2(s, OP_CLASS, cursp(), idx); body = tree->cdr->cdr->car; if (nint(body->cdr->car) == NODE_BEGIN && body->cdr->cdr == NULL) { genop_1(s, OP_LOADNIL, cursp()); } else { idx = scope_body(s, body, val); genop_2(s, OP_EXEC, cursp(), idx); } if (val) { push(); } } break; case NODE_MODULE: { int idx; if (tree->car->car == (node*)0) { genop_1(s, OP_LOADNIL, cursp()); push(); } else if (tree->car->car == (node*)1) { genop_1(s, OP_OCLASS, cursp()); push(); } else { codegen(s, tree->car->car, VAL); } pop(); idx = new_sym(s, nsym(tree->car->cdr)); genop_2(s, OP_MODULE, cursp(), idx); if (nint(tree->cdr->car->cdr->car) == NODE_BEGIN && tree->cdr->car->cdr->cdr == NULL) { genop_1(s, OP_LOADNIL, cursp()); } else { idx = scope_body(s, tree->cdr->car, val); genop_2(s, OP_EXEC, cursp(), idx); } if (val) { push(); } } break; case NODE_SCLASS: { int idx; codegen(s, tree->car, VAL); pop(); genop_1(s, OP_SCLASS, cursp()); if (nint(tree->cdr->car->cdr->car) == NODE_BEGIN && tree->cdr->car->cdr->cdr == NULL) { genop_1(s, OP_LOADNIL, cursp()); } else { idx = scope_body(s, tree->cdr->car, val); genop_2(s, OP_EXEC, cursp(), idx); } if (val) { push(); } } break; case NODE_DEF: { int sym = new_sym(s, nsym(tree->car)); int idx = lambda_body(s, tree->cdr, 0); genop_1(s, OP_TCLASS, cursp()); push(); genop_2(s, OP_METHOD, cursp(), idx); push(); pop(); pop(); genop_2(s, OP_DEF, cursp(), sym); if (val) push(); } break; case NODE_SDEF: { node *recv = tree->car; int sym = new_sym(s, nsym(tree->cdr->car)); int idx = lambda_body(s, tree->cdr->cdr, 0); codegen(s, recv, VAL); pop(); genop_1(s, OP_SCLASS, cursp()); push(); genop_2(s, OP_METHOD, cursp(), idx); pop(); genop_2(s, OP_DEF, cursp(), sym); if (val) push(); } break; case NODE_POSTEXE: codegen(s, tree, NOVAL); break; default: break; } exit: s->rlev = rlev; } static void scope_add_irep(codegen_scope *s) { mrb_irep *irep; codegen_scope *prev = s->prev; if (prev->irep == NULL) { irep = mrb_add_irep(s->mrb); prev->irep = s->irep = irep; return; } else { if (prev->irep->rlen == UINT16_MAX) { codegen_error(s, "too many nested blocks/methods"); } s->irep = irep = mrb_add_irep(s->mrb); if (prev->irep->rlen == prev->rcapa) { prev->rcapa *= 2; prev->reps = (mrb_irep**)codegen_realloc(s, prev->reps, sizeof(mrb_irep*)*prev->rcapa); } prev->reps[prev->irep->rlen] = irep; prev->irep->rlen++; } } static codegen_scope* scope_new(mrb_state *mrb, codegen_scope *prev, node *nlv) { static const codegen_scope codegen_scope_zero = { 0 }; mrb_pool *pool = mrb_pool_open(mrb); codegen_scope *s = (codegen_scope *)mrb_pool_alloc(pool, sizeof(codegen_scope)); if (!s) { if (prev) codegen_error(prev, "unexpected scope"); return NULL; } *s = codegen_scope_zero; s->mrb = mrb; s->mpool = pool; if (!prev) return s; s->prev = prev; s->ainfo = 0; s->mscope = 0; scope_add_irep(s); s->rcapa = 8; s->reps = (mrb_irep**)mrb_malloc(mrb, sizeof(mrb_irep*)*s->rcapa); s->icapa = 1024; s->iseq = (mrb_code*)mrb_malloc(mrb, sizeof(mrb_code)*s->icapa); s->pcapa = 32; s->pool = (mrb_pool_value*)mrb_malloc(mrb, sizeof(mrb_pool_value)*s->pcapa); s->scapa = 256; s->syms = (mrb_sym*)mrb_malloc(mrb, sizeof(mrb_sym)*s->scapa); s->lv = nlv; s->sp += node_len(nlv)+1; /* add self */ s->nlocals = s->sp; if (nlv) { mrb_sym *lv; node *n = nlv; size_t i = 0; s->irep->lv = lv = (mrb_sym*)mrb_malloc(mrb, sizeof(mrb_sym)*(s->nlocals-1)); for (i=0, n=nlv; n; i++,n=n->cdr) { lv[i] = lv_name(n); } mrb_assert(i + 1 == s->nlocals); } s->ai = mrb_gc_arena_save(mrb); s->filename_sym = prev->filename_sym; if (s->filename_sym) { s->lines = (uint16_t*)mrb_malloc(mrb, sizeof(short)*s->icapa); } s->lineno = prev->lineno; /* debug setting */ s->debug_start_pos = 0; if (s->filename_sym) { mrb_debug_info_alloc(mrb, s->irep); } else { s->irep->debug_info = NULL; } s->parser = prev->parser; s->filename_index = prev->filename_index; s->rlev = prev->rlev+1; return s; } static void scope_finish(codegen_scope *s) { mrb_state *mrb = s->mrb; mrb_irep *irep = s->irep; if (s->nlocals > 0xff) { codegen_error(s, "too many local variables"); } irep->flags = 0; if (s->iseq) { size_t catchsize = sizeof(struct mrb_irep_catch_handler) * irep->clen; irep->iseq = (const mrb_code *)codegen_realloc(s, s->iseq, sizeof(mrb_code)*s->pc + catchsize); irep->ilen = s->pc; if (irep->clen > 0) { memcpy((void *)(irep->iseq + irep->ilen), s->catch_table, catchsize); } } else { irep->clen = 0; } mrb_free(s->mrb, s->catch_table); s->catch_table = NULL; irep->pool = (const mrb_pool_value*)codegen_realloc(s, s->pool, sizeof(mrb_pool_value)*irep->plen); irep->syms = (const mrb_sym*)codegen_realloc(s, s->syms, sizeof(mrb_sym)*irep->slen); irep->reps = (const mrb_irep**)codegen_realloc(s, s->reps, sizeof(mrb_irep*)*irep->rlen); if (s->filename_sym) { mrb_sym fname = mrb_parser_get_filename(s->parser, s->filename_index); const char *filename = mrb_sym_name_len(s->mrb, fname, NULL); mrb_debug_info_append_file(s->mrb, s->irep->debug_info, filename, s->lines, s->debug_start_pos, s->pc); } mrb_free(s->mrb, s->lines); irep->nlocals = s->nlocals; irep->nregs = s->nregs; mrb_gc_arena_restore(mrb, s->ai); mrb_pool_close(s->mpool); } static struct loopinfo* loop_push(codegen_scope *s, enum looptype t) { struct loopinfo *p = (struct loopinfo *)codegen_palloc(s, sizeof(struct loopinfo)); p->type = t; p->pc0 = p->pc1 = p->pc2 = JMPLINK_START; p->prev = s->loop; p->reg = cursp(); s->loop = p; return p; } static void loop_break(codegen_scope *s, node *tree) { if (!s->loop) { codegen(s, tree, NOVAL); raise_error(s, "unexpected break"); } else { struct loopinfo *loop; loop = s->loop; if (tree) { if (loop->reg < 0) { codegen(s, tree, NOVAL); } else { gen_retval(s, tree); } } while (loop) { if (loop->type == LOOP_BEGIN) { loop = loop->prev; } else if (loop->type == LOOP_RESCUE) { loop = loop->prev; } else{ break; } } if (!loop) { raise_error(s, "unexpected break"); return; } if (loop->type == LOOP_NORMAL) { int tmp; if (loop->reg >= 0) { if (tree) { gen_move(s, loop->reg, cursp(), 0); } else { genop_1(s, OP_LOADNIL, loop->reg); } } tmp = genjmp(s, OP_JMPUW, loop->pc2); loop->pc2 = tmp; } else { if (!tree) { genop_1(s, OP_LOADNIL, cursp()); } gen_return(s, OP_BREAK, cursp()); } } } static void loop_pop(codegen_scope *s, int val) { if (val) { genop_1(s, OP_LOADNIL, cursp()); } dispatch_linked(s, s->loop->pc2); s->loop = s->loop->prev; if (val) push(); } static int catch_handler_new(codegen_scope *s) { size_t newsize = sizeof(struct mrb_irep_catch_handler) * (s->irep->clen + 1); s->catch_table = (struct mrb_irep_catch_handler *)codegen_realloc(s, (void *)s->catch_table, newsize); return s->irep->clen ++; } static void catch_handler_set(codegen_scope *s, int ent, enum mrb_catch_type type, uint32_t begin, uint32_t end, uint32_t target) { struct mrb_irep_catch_handler *e; mrb_assert(ent >= 0 && ent < s->irep->clen); e = &s->catch_table[ent]; uint8_to_bin(type, &e->type); mrb_irep_catch_handler_pack(begin, e->begin); mrb_irep_catch_handler_pack(end, e->end); mrb_irep_catch_handler_pack(target, e->target); } static struct RProc* generate_code(mrb_state *mrb, parser_state *p, int val) { codegen_scope *scope = scope_new(mrb, 0, 0); struct mrb_jmpbuf *prev_jmp = mrb->jmp; struct mrb_jmpbuf jmpbuf; struct RProc *proc; mrb->jmp = &jmpbuf; scope->mrb = mrb; scope->parser = p; scope->filename_sym = p->filename_sym; scope->filename_index = p->current_filename_index; MRB_TRY(mrb->jmp) { /* prepare irep */ codegen(scope, p->tree, val); proc = mrb_proc_new(mrb, scope->irep); mrb_irep_decref(mrb, scope->irep); mrb_pool_close(scope->mpool); proc->c = NULL; if (mrb->c->cibase && mrb->c->cibase->proc == proc->upper) { proc->upper = NULL; } mrb->jmp = prev_jmp; return proc; } MRB_CATCH(mrb->jmp) { mrb_irep_decref(mrb, scope->irep); mrb_pool_close(scope->mpool); mrb->jmp = prev_jmp; return NULL; } MRB_END_EXC(mrb->jmp); } MRB_API struct RProc* mrb_generate_code(mrb_state *mrb, parser_state *p) { return generate_code(mrb, p, VAL); } void mrb_irep_remove_lv(mrb_state *mrb, mrb_irep *irep) { int i; if (irep->flags & MRB_IREP_NO_FREE) return; if (irep->lv) { mrb_free(mrb, (void*)irep->lv); irep->lv = NULL; } if (!irep->reps) return; for (i = 0; i < irep->rlen; ++i) { mrb_irep_remove_lv(mrb, (mrb_irep*)irep->reps[i]); } }
/* ** codegen.c - mruby code generator ** ** See Copyright Notice in mruby.h */ #include <mruby.h> #include <mruby/compile.h> #include <mruby/proc.h> #include <mruby/dump.h> #include <mruby/numeric.h> #include <mruby/string.h> #include <mruby/debug.h> #include <mruby/presym.h> #include "node.h" #include <mruby/opcode.h> #include <mruby/re.h> #include <mruby/throw.h> #include <ctype.h> #include <string.h> #ifndef MRB_CODEGEN_LEVEL_MAX #define MRB_CODEGEN_LEVEL_MAX 256 #endif #define MAXARG_S (1<<16) typedef mrb_ast_node node; typedef struct mrb_parser_state parser_state; enum looptype { LOOP_NORMAL, LOOP_BLOCK, LOOP_FOR, LOOP_BEGIN, LOOP_RESCUE, }; struct loopinfo { enum looptype type; uint32_t pc0; /* `next` destination */ uint32_t pc1; /* `redo` destination */ uint32_t pc2; /* `break` destination */ int reg; /* destination register */ struct loopinfo *prev; }; typedef struct scope { mrb_state *mrb; mrb_pool *mpool; struct scope *prev; node *lv; uint16_t sp; uint32_t pc; uint32_t lastpc; uint32_t lastlabel; uint16_t ainfo:15; mrb_bool mscope:1; struct loopinfo *loop; mrb_sym filename_sym; uint16_t lineno; mrb_code *iseq; uint16_t *lines; uint32_t icapa; mrb_irep *irep; mrb_pool_value *pool; mrb_sym *syms; mrb_irep **reps; struct mrb_irep_catch_handler *catch_table; uint32_t pcapa, scapa, rcapa; uint16_t nlocals; uint16_t nregs; int ai; int debug_start_pos; uint16_t filename_index; parser_state* parser; int rlev; /* recursion levels */ } codegen_scope; static codegen_scope* scope_new(mrb_state *mrb, codegen_scope *prev, node *lv); static void scope_finish(codegen_scope *s); static struct loopinfo *loop_push(codegen_scope *s, enum looptype t); static void loop_break(codegen_scope *s, node *tree); static void loop_pop(codegen_scope *s, int val); /* * The search for catch handlers starts at the end of the table in mrb_vm_run(). * Therefore, the next handler to be added must meet one of the following conditions. * - Larger start position * - Same start position but smaller end position */ static int catch_handler_new(codegen_scope *s); static void catch_handler_set(codegen_scope *s, int ent, enum mrb_catch_type type, uint32_t begin, uint32_t end, uint32_t target); static void gen_assignment(codegen_scope *s, node *tree, node *rhs, int sp, int val); static void gen_vmassignment(codegen_scope *s, node *tree, int sp, int val); static void codegen(codegen_scope *s, node *tree, int val); static void raise_error(codegen_scope *s, const char *msg); static void codegen_error(codegen_scope *s, const char *message) { if (!s) return; #ifndef MRB_NO_STDIO if (s->filename_sym && s->lineno) { const char *filename = mrb_sym_name_len(s->mrb, s->filename_sym, NULL); fprintf(stderr, "%s:%d: %s\n", filename, s->lineno, message); } else { fprintf(stderr, "%s\n", message); } #endif while (s->prev) { codegen_scope *tmp = s->prev; if (s->irep) { mrb_free(s->mrb, s->iseq); for (int i=0; i<s->irep->plen; i++) { mrb_pool_value *pv = &s->pool[i]; if ((pv->tt & 0x3) == IREP_TT_STR || pv->tt == IREP_TT_BIGINT) { mrb_free(s->mrb, (void*)pv->u.str); } } mrb_free(s->mrb, s->pool); mrb_free(s->mrb, s->syms); mrb_free(s->mrb, s->catch_table); if (s->reps) { /* copied from mrb_irep_free() in state.c */ for (int i=0; i<s->irep->rlen; i++) { if (s->reps[i]) mrb_irep_decref(s->mrb, (mrb_irep*)s->reps[i]); } mrb_free(s->mrb, s->reps); } mrb_free(s->mrb, s->lines); } mrb_pool_close(s->mpool); s = tmp; } MRB_THROW(s->mrb->jmp); } static void* codegen_palloc(codegen_scope *s, size_t len) { void *p = mrb_pool_alloc(s->mpool, len); if (!p) codegen_error(s, "pool memory allocation"); return p; } static void* codegen_realloc(codegen_scope *s, void *p, size_t len) { p = mrb_realloc_simple(s->mrb, p, len); if (!p && len > 0) codegen_error(s, "mrb_realloc"); return p; } static void check_no_ext_ops(codegen_scope *s, uint16_t a, uint16_t b) { if (s->parser->no_ext_ops && (a | b) > 0xff) { codegen_error(s, "need OP_EXTs instruction (currently OP_EXTs are prohibited)"); } } static int new_label(codegen_scope *s) { return s->lastlabel = s->pc; } static void emit_B(codegen_scope *s, uint32_t pc, uint8_t i) { if (pc >= s->icapa) { if (pc == UINT32_MAX) { codegen_error(s, "too big code block"); } if (pc >= UINT32_MAX / 2) { pc = UINT32_MAX; } else { s->icapa *= 2; } s->iseq = (mrb_code *)codegen_realloc(s, s->iseq, sizeof(mrb_code)*s->icapa); if (s->lines) { s->lines = (uint16_t*)codegen_realloc(s, s->lines, sizeof(uint16_t)*s->icapa); } } if (s->lines) { if (s->lineno > 0 || pc == 0) s->lines[pc] = s->lineno; else s->lines[pc] = s->lines[pc-1]; } s->iseq[pc] = i; } static void emit_S(codegen_scope *s, int pc, uint16_t i) { uint8_t hi = i>>8; uint8_t lo = i&0xff; emit_B(s, pc, hi); emit_B(s, pc+1, lo); } static void gen_B(codegen_scope *s, uint8_t i) { emit_B(s, s->pc, i); s->pc++; } static void gen_S(codegen_scope *s, uint16_t i) { emit_S(s, s->pc, i); s->pc += 2; } static void genop_0(codegen_scope *s, mrb_code i) { s->lastpc = s->pc; gen_B(s, i); } static void genop_1(codegen_scope *s, mrb_code i, uint16_t a) { s->lastpc = s->pc; check_no_ext_ops(s, a, 0); if (a > 0xff) { gen_B(s, OP_EXT1); gen_B(s, i); gen_S(s, a); } else { gen_B(s, i); gen_B(s, (uint8_t)a); } } static void genop_2(codegen_scope *s, mrb_code i, uint16_t a, uint16_t b) { s->lastpc = s->pc; check_no_ext_ops(s, a, b); if (a > 0xff && b > 0xff) { gen_B(s, OP_EXT3); gen_B(s, i); gen_S(s, a); gen_S(s, b); } else if (b > 0xff) { gen_B(s, OP_EXT2); gen_B(s, i); gen_B(s, (uint8_t)a); gen_S(s, b); } else if (a > 0xff) { gen_B(s, OP_EXT1); gen_B(s, i); gen_S(s, a); gen_B(s, (uint8_t)b); } else { gen_B(s, i); gen_B(s, (uint8_t)a); gen_B(s, (uint8_t)b); } } static void genop_3(codegen_scope *s, mrb_code i, uint16_t a, uint16_t b, uint8_t c) { genop_2(s, i, a, b); gen_B(s, c); } static void genop_2S(codegen_scope *s, mrb_code i, uint16_t a, uint16_t b) { genop_1(s, i, a); gen_S(s, b); } static void genop_2SS(codegen_scope *s, mrb_code i, uint16_t a, uint32_t b) { genop_1(s, i, a); gen_S(s, b>>16); gen_S(s, b&0xffff); } static void genop_W(codegen_scope *s, mrb_code i, uint32_t a) { uint8_t a1 = (a>>16) & 0xff; uint8_t a2 = (a>>8) & 0xff; uint8_t a3 = a & 0xff; s->lastpc = s->pc; gen_B(s, i); gen_B(s, a1); gen_B(s, a2); gen_B(s, a3); } #define NOVAL 0 #define VAL 1 static mrb_bool no_optimize(codegen_scope *s) { if (s && s->parser && s->parser->no_optimize) return TRUE; return FALSE; } struct mrb_insn_data mrb_decode_insn(const mrb_code *pc) { struct mrb_insn_data data = { 0 }; if (pc == 0) return data; data.addr = pc; mrb_code insn = READ_B(); uint16_t a = 0; uint16_t b = 0; uint16_t c = 0; switch (insn) { #define FETCH_Z() /* empty */ #define OPCODE(i,x) case OP_ ## i: FETCH_ ## x (); break; #include "mruby/ops.h" #undef OPCODE } switch (insn) { case OP_EXT1: insn = READ_B(); switch (insn) { #define OPCODE(i,x) case OP_ ## i: FETCH_ ## x ## _1 (); break; #include "mruby/ops.h" #undef OPCODE } break; case OP_EXT2: insn = READ_B(); switch (insn) { #define OPCODE(i,x) case OP_ ## i: FETCH_ ## x ## _2 (); break; #include "mruby/ops.h" #undef OPCODE } break; case OP_EXT3: insn = READ_B(); switch (insn) { #define OPCODE(i,x) case OP_ ## i: FETCH_ ## x ## _3 (); break; #include "mruby/ops.h" #undef OPCODE } break; default: break; } data.insn = insn; data.a = a; data.b = b; data.c = c; return data; } #undef OPCODE #define Z 1 #define S 3 #define W 4 #define OPCODE(_,x) x, /* instruction sizes */ static uint8_t mrb_insn_size[] = { #define B 2 #define BB 3 #define BBB 4 #define BS 4 #define BSS 6 #include "mruby/ops.h" #undef B #undef BB #undef BBB #undef BS #undef BSS }; /* EXT1 instruction sizes */ static uint8_t mrb_insn_size1[] = { #define B 3 #define BB 4 #define BBB 5 #define BS 5 #define BSS 7 #include "mruby/ops.h" #undef B #undef BS #undef BSS }; /* EXT2 instruction sizes */ static uint8_t mrb_insn_size2[] = { #define B 2 #define BS 4 #define BSS 6 #include "mruby/ops.h" #undef B #undef BB #undef BBB #undef BS #undef BSS }; /* EXT3 instruction sizes */ #define B 3 #define BB 5 #define BBB 6 #define BS 5 #define BSS 7 static uint8_t mrb_insn_size3[] = { #include "mruby/ops.h" }; #undef B #undef BB #undef BBB #undef BS #undef BSS #undef OPCODE static const mrb_code* mrb_prev_pc(codegen_scope *s, const mrb_code *pc) { const mrb_code *prev_pc = NULL; const mrb_code *i = s->iseq; while (i<pc) { uint8_t insn = i[0]; prev_pc = i; switch (insn) { case OP_EXT1: i += mrb_insn_size1[i[1]] + 1; break; case OP_EXT2: i += mrb_insn_size2[i[1]] + 1; break; case OP_EXT3: i += mrb_insn_size3[i[1]] + 1; break; default: i += mrb_insn_size[insn]; break; } } return prev_pc; } #define pc_addr(s) &((s)->iseq[(s)->pc]) #define addr_pc(s, addr) (uint32_t)((addr) - s->iseq) #define rewind_pc(s) s->pc = s->lastpc static struct mrb_insn_data mrb_last_insn(codegen_scope *s) { if (s->pc == 0) { struct mrb_insn_data data = { OP_NOP, 0 }; return data; } return mrb_decode_insn(&s->iseq[s->lastpc]); } static mrb_bool no_peephole(codegen_scope *s) { return no_optimize(s) || s->lastlabel == s->pc || s->pc == 0 || s->pc == s->lastpc; } #define JMPLINK_START UINT32_MAX static void gen_jmpdst(codegen_scope *s, uint32_t pc) { if (pc == JMPLINK_START) { pc = 0; } uint32_t pos2 = s->pc+2; int32_t off = pc - pos2; if (off > INT16_MAX || INT16_MIN > off) { codegen_error(s, "too big jump offset"); } gen_S(s, (uint16_t)off); } static uint32_t genjmp(codegen_scope *s, mrb_code i, uint32_t pc) { uint32_t pos; genop_0(s, i); pos = s->pc; gen_jmpdst(s, pc); return pos; } #define genjmp_0(s,i) genjmp(s,i,JMPLINK_START) static uint32_t genjmp2(codegen_scope *s, mrb_code i, uint16_t a, uint32_t pc, int val) { uint32_t pos; if (!no_peephole(s) && !val) { struct mrb_insn_data data = mrb_last_insn(s); switch (data.insn) { case OP_MOVE: if (data.a == a && data.a > s->nlocals) { rewind_pc(s); a = data.b; } break; case OP_LOADNIL: case OP_LOADF: if (data.a == a || data.a > s->nlocals) { s->pc = addr_pc(s, data.addr); if (i == OP_JMPNOT || (i == OP_JMPNIL && data.insn == OP_LOADNIL)) { return genjmp(s, OP_JMP, pc); } else { /* OP_JMPIF */ return JMPLINK_START; } } break; case OP_LOADT: case OP_LOADI: case OP_LOADINEG: case OP_LOADI__1: case OP_LOADI_0: case OP_LOADI_1: case OP_LOADI_2: case OP_LOADI_3: case OP_LOADI_4: case OP_LOADI_5: case OP_LOADI_6: case OP_LOADI_7: if (data.a == a || data.a > s->nlocals) { s->pc = addr_pc(s, data.addr); if (i == OP_JMPIF) { return genjmp(s, OP_JMP, pc); } else { /* OP_JMPNOT and OP_JMPNIL */ return JMPLINK_START; } } break; } } if (a > 0xff) { check_no_ext_ops(s, a, 0); gen_B(s, OP_EXT1); genop_0(s, i); gen_S(s, a); } else { genop_0(s, i); gen_B(s, (uint8_t)a); } pos = s->pc; gen_jmpdst(s, pc); return pos; } #define genjmp2_0(s,i,a,val) genjmp2(s,i,a,JMPLINK_START,val) static mrb_bool get_int_operand(codegen_scope *s, struct mrb_insn_data *data, mrb_int *ns); static void gen_int(codegen_scope *s, uint16_t dst, mrb_int i); static void gen_move(codegen_scope *s, uint16_t dst, uint16_t src, int nopeep) { if (nopeep || no_peephole(s)) goto normal; else if (dst == src) return; else { struct mrb_insn_data data = mrb_last_insn(s); switch (data.insn) { case OP_MOVE: if (dst == src) return; /* remove useless MOVE */ if (data.b == dst && data.a == src) /* skip swapping MOVE */ return; goto normal; case OP_LOADNIL: case OP_LOADSELF: case OP_LOADT: case OP_LOADF: case OP_LOADI__1: case OP_LOADI_0: case OP_LOADI_1: case OP_LOADI_2: case OP_LOADI_3: case OP_LOADI_4: case OP_LOADI_5: case OP_LOADI_6: case OP_LOADI_7: if (data.a != src || data.a < s->nlocals) goto normal; rewind_pc(s); genop_1(s, data.insn, dst); return; case OP_HASH: case OP_ARRAY: if (data.b != 0) goto normal; /* fall through */ case OP_LOADI: case OP_LOADINEG: case OP_LOADL: case OP_LOADSYM: case OP_GETGV: case OP_GETSV: case OP_GETIV: case OP_GETCV: case OP_GETCONST: case OP_STRING: case OP_LAMBDA: case OP_BLOCK: case OP_METHOD: case OP_BLKPUSH: if (data.a != src || data.a < s->nlocals) goto normal; rewind_pc(s); genop_2(s, data.insn, dst, data.b); return; case OP_LOADI16: if (data.a != src || data.a < s->nlocals) goto normal; rewind_pc(s); genop_2S(s, data.insn, dst, data.b); return; case OP_LOADI32: if (data.a != src || data.a < s->nlocals) goto normal; else { uint32_t i = (uint32_t)data.b<<16|data.c; rewind_pc(s); genop_2SS(s, data.insn, dst, i); } return; case OP_AREF: case OP_GETUPVAR: if (data.a != src || data.a < s->nlocals) goto normal; rewind_pc(s); genop_3(s, data.insn, dst, data.b, data.c); return; case OP_ADDI: case OP_SUBI: if (addr_pc(s, data.addr) == s->lastlabel || data.a != src || data.a < s->nlocals) goto normal; else { struct mrb_insn_data data0 = mrb_decode_insn(mrb_prev_pc(s, data.addr)); if (data0.insn != OP_MOVE || data0.a != data.a || data0.b != dst) goto normal; s->pc = addr_pc(s, data0.addr); if (addr_pc(s, data0.addr) != s->lastlabel) { /* constant folding */ data0 = mrb_decode_insn(mrb_prev_pc(s, data0.addr)); mrb_int n; if (data0.a == dst && get_int_operand(s, &data0, &n)) { if ((data.insn == OP_ADDI && !mrb_int_add_overflow(n, data.b, &n)) || (data.insn == OP_SUBI && !mrb_int_sub_overflow(n, data.b, &n))) { s->pc = addr_pc(s, data0.addr); gen_int(s, dst, n); return; } } } } genop_2(s, data.insn, dst, data.b); return; default: break; } } normal: genop_2(s, OP_MOVE, dst, src); return; } static int search_upvar(codegen_scope *s, mrb_sym id, int *idx); static void gen_getupvar(codegen_scope *s, uint16_t dst, mrb_sym id) { int idx; int lv = search_upvar(s, id, &idx); if (!no_peephole(s)) { struct mrb_insn_data data = mrb_last_insn(s); if (data.insn == OP_SETUPVAR && data.a == dst && data.b == idx && data.c == lv) { /* skip GETUPVAR right after SETUPVAR */ return; } } genop_3(s, OP_GETUPVAR, dst, idx, lv); } static void gen_setupvar(codegen_scope *s, uint16_t dst, mrb_sym id) { int idx; int lv = search_upvar(s, id, &idx); if (!no_peephole(s)) { struct mrb_insn_data data = mrb_last_insn(s); if (data.insn == OP_MOVE && data.a == dst) { dst = data.b; rewind_pc(s); } } genop_3(s, OP_SETUPVAR, dst, idx, lv); } static void gen_return(codegen_scope *s, uint8_t op, uint16_t src) { if (no_peephole(s)) { genop_1(s, op, src); } else { struct mrb_insn_data data = mrb_last_insn(s); if (data.insn == OP_MOVE && src == data.a) { rewind_pc(s); genop_1(s, op, data.b); } else if (data.insn != OP_RETURN) { genop_1(s, op, src); } } } static mrb_bool get_int_operand(codegen_scope *s, struct mrb_insn_data *data, mrb_int *n) { switch (data->insn) { case OP_LOADI__1: *n = -1; return TRUE; case OP_LOADINEG: *n = -data->b; return TRUE; case OP_LOADI_0: case OP_LOADI_1: case OP_LOADI_2: case OP_LOADI_3: case OP_LOADI_4: case OP_LOADI_5: case OP_LOADI_6: case OP_LOADI_7: *n = data->insn - OP_LOADI_0; return TRUE; case OP_LOADI: case OP_LOADI16: *n = (int16_t)data->b; return TRUE; case OP_LOADI32: *n = (mrb_int)((uint32_t)data->b<<16)+data->c; return TRUE; case OP_LOADL: { mrb_pool_value *pv = &s->pool[data->b]; if (pv->tt == IREP_TT_INT32) { *n = (mrb_int)pv->u.i32; } #ifdef MRB_INT64 else if (pv->tt == IREP_TT_INT64) { *n = (mrb_int)pv->u.i64; } #endif else { return FALSE; } } return TRUE; default: return FALSE; } } static void gen_addsub(codegen_scope *s, uint8_t op, uint16_t dst) { if (no_peephole(s)) { normal: genop_1(s, op, dst); return; } else { struct mrb_insn_data data = mrb_last_insn(s); mrb_int n; if (!get_int_operand(s, &data, &n)) { /* not integer immediate */ goto normal; } struct mrb_insn_data data0 = mrb_decode_insn(mrb_prev_pc(s, data.addr)); mrb_int n0; if (addr_pc(s, data.addr) == s->lastlabel || !get_int_operand(s, &data0, &n0)) { /* OP_ADDI/OP_SUBI takes upto 8bits */ if (n > INT8_MAX || n < INT8_MIN) goto normal; rewind_pc(s); if (n == 0) return; if (n > 0) { if (op == OP_ADD) genop_2(s, OP_ADDI, dst, (uint16_t)n); else genop_2(s, OP_SUBI, dst, (uint16_t)n); } else { /* n < 0 */ n = -n; if (op == OP_ADD) genop_2(s, OP_SUBI, dst, (uint16_t)n); else genop_2(s, OP_ADDI, dst, (uint16_t)n); } return; } if (op == OP_ADD) { if (mrb_int_add_overflow(n0, n, &n)) goto normal; } else { /* OP_SUB */ if (mrb_int_sub_overflow(n0, n, &n)) goto normal; } s->pc = addr_pc(s, data0.addr); gen_int(s, dst, n); } } static void gen_muldiv(codegen_scope *s, uint8_t op, uint16_t dst) { if (no_peephole(s)) { normal: genop_1(s, op, dst); return; } else { struct mrb_insn_data data = mrb_last_insn(s); mrb_int n, n0; if (addr_pc(s, data.addr) == s->lastlabel || !get_int_operand(s, &data, &n)) { /* not integer immediate */ goto normal; } struct mrb_insn_data data0 = mrb_decode_insn(mrb_prev_pc(s, data.addr)); if (!get_int_operand(s, &data0, &n0) || n == 0) { goto normal; } if (op == OP_MUL) { if (mrb_int_mul_overflow(n0, n, &n)) goto normal; } else { /* OP_DIV */ if (n0 == MRB_INT_MIN && n == -1) goto normal; n = n0 / n; } s->pc = addr_pc(s, data0.addr); gen_int(s, dst, n); } } mrb_bool mrb_num_shift(mrb_state *mrb, mrb_int val, mrb_int width, mrb_int *num); static mrb_bool gen_binop(codegen_scope *s, mrb_sym op, uint16_t dst) { if (no_peephole(s)) return FALSE; else if (op == MRB_OPSYM_2(s->mrb, aref)) { genop_1(s, OP_GETIDX, dst); return TRUE; } else { struct mrb_insn_data data = mrb_last_insn(s); mrb_int n, n0; if (addr_pc(s, data.addr) == s->lastlabel || !get_int_operand(s, &data, &n)) { /* not integer immediate */ return FALSE; } struct mrb_insn_data data0 = mrb_decode_insn(mrb_prev_pc(s, data.addr)); if (!get_int_operand(s, &data0, &n0)) { return FALSE; } if (op == MRB_OPSYM_2(s->mrb, lshift)) { if (!mrb_num_shift(s->mrb, n0, n, &n)) return FALSE; } else if (op == MRB_OPSYM_2(s->mrb, rshift)) { if (n == MRB_INT_MIN) return FALSE; if (!mrb_num_shift(s->mrb, n0, -n, &n)) return FALSE; } else if (op == MRB_OPSYM_2(s->mrb, mod) && n != 0) { if (n0 == MRB_INT_MIN && n == -1) { n = 0; } else { mrb_int n1 = n0 % n; if ((n0 < 0) != (n < 0) && n1 != 0) { n1 += n; } n = n1; } } else if (op == MRB_OPSYM_2(s->mrb, and)) { n = n0 & n; } else if (op == MRB_OPSYM_2(s->mrb, or)) { n = n0 | n; } else if (op == MRB_OPSYM_2(s->mrb, xor)) { n = n0 ^ n; } else { return FALSE; } s->pc = addr_pc(s, data0.addr); gen_int(s, dst, n); return TRUE; } } static uint32_t dispatch(codegen_scope *s, uint32_t pos0) { int32_t pos1; int32_t offset; int16_t newpos; if (pos0 == JMPLINK_START) return 0; pos1 = pos0 + 2; offset = s->pc - pos1; if (offset > INT16_MAX) { codegen_error(s, "too big jmp offset"); } s->lastlabel = s->pc; newpos = (int16_t)PEEK_S(s->iseq+pos0); emit_S(s, pos0, (uint16_t)offset); if (newpos == 0) return 0; return pos1+newpos; } static void dispatch_linked(codegen_scope *s, uint32_t pos) { if (pos==JMPLINK_START) return; for (;;) { pos = dispatch(s, pos); if (pos==0) break; } } #define nregs_update do {if (s->sp > s->nregs) s->nregs = s->sp;} while (0) static void push_n_(codegen_scope *s, int n) { if (s->sp+n >= 0xffff) { codegen_error(s, "too complex expression"); } s->sp+=n; nregs_update; } static void pop_n_(codegen_scope *s, int n) { if ((int)s->sp-n < 0) { codegen_error(s, "stack pointer underflow"); } s->sp-=n; } #define push() push_n_(s,1) #define push_n(n) push_n_(s,n) #define pop() pop_n_(s,1) #define pop_n(n) pop_n_(s,n) #define cursp() (s->sp) static int new_litbn(codegen_scope *s, const char *p, int base, mrb_bool neg) { int i; size_t plen; mrb_pool_value *pv; plen = strlen(p); if (plen > 255) { codegen_error(s, "integer too big"); } for (i=0; i<s->irep->plen; i++) { size_t len; pv = &s->pool[i]; if (pv->tt != IREP_TT_BIGINT) continue; len = pv->u.str[0]; if (len == plen && pv->u.str[1] == base && memcmp(pv->u.str+2, p, len) == 0) return i; } if (s->irep->plen == s->pcapa) { s->pcapa *= 2; s->pool = (mrb_pool_value*)codegen_realloc(s, s->pool, sizeof(mrb_pool_value)*s->pcapa); } pv = &s->pool[s->irep->plen]; i = s->irep->plen++; { char *buf; pv->tt = IREP_TT_BIGINT; buf = (char*)codegen_realloc(s, NULL, plen+3); buf[0] = (char)plen; buf[1] = base; if (neg) buf[1] = 0x80; memcpy(buf+2, p, plen); buf[plen+2] = '\0'; pv->u.str = buf; } return i; } static int new_lit(codegen_scope *s, mrb_value val) { int i; mrb_pool_value *pv; switch (mrb_type(val)) { case MRB_TT_STRING: for (i=0; i<s->irep->plen; i++) { mrb_int len; pv = &s->pool[i]; if (pv->tt & IREP_TT_NFLAG) continue; len = pv->tt>>2; if (RSTRING_LEN(val) != len) continue; if (memcmp(pv->u.str, RSTRING_PTR(val), len) == 0) return i; } break; #ifndef MRB_NO_FLOAT case MRB_TT_FLOAT: for (i=0; i<s->irep->plen; i++) { mrb_float f1, f2; pv = &s->pool[i]; if (pv->tt != IREP_TT_FLOAT) continue; pv = &s->pool[i]; f1 = pv->u.f; f2 = mrb_float(val); if (f1 == f2 && !signbit(f1) == !signbit(f2)) return i; } break; #endif case MRB_TT_INTEGER: for (i=0; i<s->irep->plen; i++) { mrb_int v = mrb_integer(val); pv = &s->pool[i]; if (pv->tt == IREP_TT_INT32) { if (v == pv->u.i32) return i; } #ifdef MRB_64BIT else if (pv->tt == IREP_TT_INT64) { if (v == pv->u.i64) return i; } continue; #endif } break; default: /* should not happen */ return 0; } if (s->irep->plen == s->pcapa) { s->pcapa *= 2; s->pool = (mrb_pool_value*)codegen_realloc(s, s->pool, sizeof(mrb_pool_value)*s->pcapa); } pv = &s->pool[s->irep->plen]; i = s->irep->plen++; switch (mrb_type(val)) { case MRB_TT_STRING: if (RSTR_NOFREE_P(RSTRING(val))) { pv->tt = (uint32_t)(RSTRING_LEN(val)<<2) | IREP_TT_SSTR; pv->u.str = RSTRING_PTR(val); } else { char *p; mrb_int len = RSTRING_LEN(val); pv->tt = (uint32_t)(len<<2) | IREP_TT_STR; p = (char*)codegen_realloc(s, NULL, len+1); memcpy(p, RSTRING_PTR(val), len); p[len] = '\0'; pv->u.str = p; } break; #ifndef MRB_NO_FLOAT case MRB_TT_FLOAT: pv->tt = IREP_TT_FLOAT; pv->u.f = mrb_float(val); break; #endif case MRB_TT_INTEGER: #ifdef MRB_INT64 pv->tt = IREP_TT_INT64; pv->u.i64 = mrb_integer(val); #else pv->tt = IREP_TT_INT32; pv->u.i32 = mrb_integer(val); #endif break; default: /* should not happen */ break; } return i; } static int new_sym(codegen_scope *s, mrb_sym sym) { int i, len; mrb_assert(s->irep); len = s->irep->slen; for (i=0; i<len; i++) { if (s->syms[i] == sym) return i; } if (s->irep->slen >= s->scapa) { s->scapa *= 2; if (s->scapa > 0xffff) { codegen_error(s, "too many symbols"); } s->syms = (mrb_sym*)codegen_realloc(s, s->syms, sizeof(mrb_sym)*s->scapa); } s->syms[s->irep->slen] = sym; return s->irep->slen++; } static void gen_setxv(codegen_scope *s, uint8_t op, uint16_t dst, mrb_sym sym, int val) { int idx = new_sym(s, sym); if (!val && !no_peephole(s)) { struct mrb_insn_data data = mrb_last_insn(s); if (data.insn == OP_MOVE && data.a == dst) { dst = data.b; rewind_pc(s); } } genop_2(s, op, dst, idx); } static void gen_int(codegen_scope *s, uint16_t dst, mrb_int i) { if (i < 0) { if (i == -1) genop_1(s, OP_LOADI__1, dst); else if (i >= -0xff) genop_2(s, OP_LOADINEG, dst, (uint16_t)-i); else if (i >= INT16_MIN) genop_2S(s, OP_LOADI16, dst, (uint16_t)i); else if (i >= INT32_MIN) genop_2SS(s, OP_LOADI32, dst, (uint32_t)i); else goto int_lit; } else if (i < 8) genop_1(s, OP_LOADI_0 + (uint8_t)i, dst); else if (i <= 0xff) genop_2(s, OP_LOADI, dst, (uint16_t)i); else if (i <= INT16_MAX) genop_2S(s, OP_LOADI16, dst, (uint16_t)i); else if (i <= INT32_MAX) genop_2SS(s, OP_LOADI32, dst, (uint32_t)i); else { int_lit: genop_2(s, OP_LOADL, dst, new_lit(s, mrb_int_value(s->mrb, i))); } } static mrb_bool gen_uniop(codegen_scope *s, mrb_sym sym, uint16_t dst) { if (no_peephole(s)) return FALSE; struct mrb_insn_data data = mrb_last_insn(s); mrb_int n; if (!get_int_operand(s, &data, &n)) return FALSE; if (sym == MRB_OPSYM_2(s->mrb, plus)) { /* unary plus does nothing */ } else if (sym == MRB_OPSYM_2(s->mrb, minus)) { if (n == MRB_INT_MIN) return FALSE; n = -n; } else if (sym == MRB_OPSYM_2(s->mrb, neg)) { n = ~n; } else { return FALSE; } s->pc = addr_pc(s, data.addr); gen_int(s, dst, n); return TRUE; } static int node_len(node *tree) { int n = 0; while (tree) { n++; tree = tree->cdr; } return n; } #define nint(x) ((int)(intptr_t)(x)) #define nchar(x) ((char)(intptr_t)(x)) #define nsym(x) ((mrb_sym)(intptr_t)(x)) #define lv_name(lv) nsym((lv)->car) static int lv_idx(codegen_scope *s, mrb_sym id) { node *lv = s->lv; int n = 1; while (lv) { if (lv_name(lv) == id) return n; n++; lv = lv->cdr; } return 0; } static int search_upvar(codegen_scope *s, mrb_sym id, int *idx) { const struct RProc *u; int lv = 0; codegen_scope *up = s->prev; while (up) { *idx = lv_idx(up, id); if (*idx > 0) { return lv; } lv ++; up = up->prev; } if (lv < 1) lv = 1; u = s->parser->upper; while (u && !MRB_PROC_CFUNC_P(u)) { const struct mrb_irep *ir = u->body.irep; uint_fast16_t n = ir->nlocals; int i; const mrb_sym *v = ir->lv; if (v) { for (i=1; n > 1; n--, v++, i++) { if (*v == id) { *idx = i; return lv - 1; } } } if (MRB_PROC_SCOPE_P(u)) break; u = u->upper; lv ++; } codegen_error(s, "Can't found local variables"); return -1; /* not reached */ } static void for_body(codegen_scope *s, node *tree) { codegen_scope *prev = s; int idx; struct loopinfo *lp; node *n2; /* generate receiver */ codegen(s, tree->cdr->car, VAL); /* generate loop-block */ s = scope_new(s->mrb, s, NULL); push(); /* push for a block parameter */ /* generate loop variable */ n2 = tree->car; genop_W(s, OP_ENTER, 0x40000); if (n2->car && !n2->car->cdr && !n2->cdr) { gen_assignment(s, n2->car->car, NULL, 1, NOVAL); } else { gen_vmassignment(s, n2, 1, VAL); } /* construct loop */ lp = loop_push(s, LOOP_FOR); lp->pc1 = new_label(s); /* loop body */ codegen(s, tree->cdr->cdr->car, VAL); pop(); gen_return(s, OP_RETURN, cursp()); loop_pop(s, NOVAL); scope_finish(s); s = prev; genop_2(s, OP_BLOCK, cursp(), s->irep->rlen-1); push();pop(); /* space for a block */ pop(); idx = new_sym(s, MRB_SYM_2(s->mrb, each)); genop_3(s, OP_SENDB, cursp(), idx, 0); } static int lambda_body(codegen_scope *s, node *tree, int blk) { codegen_scope *parent = s; s = scope_new(s->mrb, s, tree->car); s->mscope = !blk; if (blk) { struct loopinfo *lp = loop_push(s, LOOP_BLOCK); lp->pc0 = new_label(s); } tree = tree->cdr; if (tree->car == NULL) { genop_W(s, OP_ENTER, 0); s->ainfo = 0; } else { mrb_aspec a; int ma, oa, ra, pa, ka, kd, ba, i; uint32_t pos; node *opt; node *margs, *pargs; node *tail; /* mandatory arguments */ ma = node_len(tree->car->car); margs = tree->car->car; tail = tree->car->cdr->cdr->cdr->cdr; /* optional arguments */ oa = node_len(tree->car->cdr->car); /* rest argument? */ ra = tree->car->cdr->cdr->car ? 1 : 0; /* mandatory arguments after rest argument */ pa = node_len(tree->car->cdr->cdr->cdr->car); pargs = tree->car->cdr->cdr->cdr->car; /* keyword arguments */ ka = tail? node_len(tail->cdr->car) : 0; /* keyword dictionary? */ kd = tail && tail->cdr->cdr->car? 1 : 0; /* block argument? */ ba = tail && tail->cdr->cdr->cdr->car ? 1 : 0; if (ma > 0x1f || oa > 0x1f || pa > 0x1f || ka > 0x1f) { codegen_error(s, "too many formal arguments"); } /* (23bits = 5:5:1:5:5:1:1) */ a = MRB_ARGS_REQ(ma) | MRB_ARGS_OPT(oa) | (ra? MRB_ARGS_REST() : 0) | MRB_ARGS_POST(pa) | MRB_ARGS_KEY(ka, kd) | (ba? MRB_ARGS_BLOCK() : 0); genop_W(s, OP_ENTER, a); /* (12bits = 5:1:5:1) */ s->ainfo = (((ma+oa) & 0x3f) << 7) | ((ra & 0x1) << 6) | ((pa & 0x1f) << 1) | ((ka | kd) ? 1 : 0); /* generate jump table for optional arguments initializer */ pos = new_label(s); for (i=0; i<oa; i++) { new_label(s); genjmp_0(s, OP_JMP); } if (oa > 0) { genjmp_0(s, OP_JMP); } opt = tree->car->cdr->car; i = 0; while (opt) { int idx; mrb_sym id = nsym(opt->car->car); dispatch(s, pos+i*3+1); codegen(s, opt->car->cdr, VAL); pop(); idx = lv_idx(s, id); if (idx > 0) { gen_move(s, idx, cursp(), 0); } else { gen_getupvar(s, cursp(), id); } i++; opt = opt->cdr; } if (oa > 0) { dispatch(s, pos+i*3+1); } /* keyword arguments */ if (tail) { node *kwds = tail->cdr->car; int kwrest = 0; if (tail->cdr->cdr->car) { kwrest = 1; } mrb_assert(nint(tail->car) == NODE_ARGS_TAIL); mrb_assert(node_len(tail) == 4); while (kwds) { int jmpif_key_p, jmp_def_set = -1; node *kwd = kwds->car, *def_arg = kwd->cdr->cdr->car; mrb_sym kwd_sym = nsym(kwd->cdr->car); mrb_assert(nint(kwd->car) == NODE_KW_ARG); if (def_arg) { int idx; genop_2(s, OP_KEY_P, lv_idx(s, kwd_sym), new_sym(s, kwd_sym)); jmpif_key_p = genjmp2_0(s, OP_JMPIF, lv_idx(s, kwd_sym), NOVAL); codegen(s, def_arg, VAL); pop(); idx = lv_idx(s, kwd_sym); if (idx > 0) { gen_move(s, idx, cursp(), 0); } else { gen_getupvar(s, cursp(), kwd_sym); } jmp_def_set = genjmp_0(s, OP_JMP); dispatch(s, jmpif_key_p); } genop_2(s, OP_KARG, lv_idx(s, kwd_sym), new_sym(s, kwd_sym)); if (jmp_def_set != -1) { dispatch(s, jmp_def_set); } i++; kwds = kwds->cdr; } if (tail->cdr->car && !kwrest) { genop_0(s, OP_KEYEND); } } /* argument destructuring */ if (margs) { node *n = margs; pos = 1; while (n) { if (nint(n->car->car) == NODE_MASGN) { gen_vmassignment(s, n->car->cdr->car, pos, NOVAL); } pos++; n = n->cdr; } } if (pargs) { node *n = pargs; pos = ma+oa+ra+1; while (n) { if (nint(n->car->car) == NODE_MASGN) { gen_vmassignment(s, n->car->cdr->car, pos, NOVAL); } pos++; n = n->cdr; } } } codegen(s, tree->cdr->car, VAL); pop(); if (s->pc > 0) { gen_return(s, OP_RETURN, cursp()); } if (blk) { loop_pop(s, NOVAL); } scope_finish(s); return parent->irep->rlen - 1; } static int scope_body(codegen_scope *s, node *tree, int val) { codegen_scope *scope = scope_new(s->mrb, s, tree->car); codegen(scope, tree->cdr, VAL); gen_return(scope, OP_RETURN, scope->sp-1); if (!s->iseq) { genop_0(scope, OP_STOP); } scope_finish(scope); if (!s->irep) { /* should not happen */ return 0; } return s->irep->rlen - 1; } static mrb_bool nosplat(node *t) { while (t) { if (nint(t->car->car) == NODE_SPLAT) return FALSE; t = t->cdr; } return TRUE; } static mrb_sym attrsym(codegen_scope *s, mrb_sym a) { const char *name; mrb_int len; char *name2; name = mrb_sym_name_len(s->mrb, a, &len); name2 = (char *)codegen_palloc(s, (size_t)len + 1 /* '=' */ + 1 /* '\0' */ ); mrb_assert_int_fit(mrb_int, len, size_t, SIZE_MAX); memcpy(name2, name, (size_t)len); name2[len] = '='; name2[len+1] = '\0'; return mrb_intern(s->mrb, name2, len+1); } #define CALL_MAXARGS 15 #define GEN_LIT_ARY_MAX 64 #define GEN_VAL_STACK_MAX 99 static int gen_values(codegen_scope *s, node *t, int val, int limit) { int n = 0; int first = 1; int slimit = GEN_VAL_STACK_MAX; if (limit == 0) limit = GEN_LIT_ARY_MAX; if (cursp() >= slimit) slimit = INT16_MAX; if (!val) { while (t) { codegen(s, t->car, NOVAL); n++; t = t->cdr; } return n; } while (t) { int is_splat = nint(t->car->car) == NODE_SPLAT; if (is_splat || n >= limit-1 || cursp() >= slimit) { /* flush stack */ pop_n(n); if (first) { if (n == 0) { genop_1(s, OP_LOADNIL, cursp()); } else { genop_2(s, OP_ARRAY, cursp(), n); } push(); first = 0; limit = GEN_LIT_ARY_MAX; } else if (n > 0) { pop(); genop_2(s, OP_ARYPUSH, cursp(), n); push(); } n = 0; } codegen(s, t->car, val); if (is_splat) { pop(); pop(); genop_1(s, OP_ARYCAT, cursp()); push(); } else { n++; } t = t->cdr; } if (!first) { pop(); if (n > 0) { pop_n(n); genop_2(s, OP_ARYPUSH, cursp(), n); } return -1; /* variable length */ } return n; } static int gen_hash(codegen_scope *s, node *tree, int val, int limit) { int slimit = GEN_VAL_STACK_MAX; if (cursp() >= GEN_LIT_ARY_MAX) slimit = INT16_MAX; int len = 0; mrb_bool update = FALSE; while (tree) { if (nint(tree->car->car->car) == NODE_KW_REST_ARGS) { if (val && len > 0) { pop_n(len*2); if (!update) { genop_2(s, OP_HASH, cursp(), len); } else { pop(); genop_2(s, OP_HASHADD, cursp(), len); } push(); } codegen(s, tree->car->cdr, val); if (val && (len > 0 || update)) { pop(); pop(); genop_1(s, OP_HASHCAT, cursp()); push(); } update = TRUE; len = 0; } else { codegen(s, tree->car->car, val); codegen(s, tree->car->cdr, val); len++; } tree = tree->cdr; if (val && cursp() >= slimit) { pop_n(len*2); if (!update) { genop_2(s, OP_HASH, cursp(), len); } else { pop(); genop_2(s, OP_HASHADD, cursp(), len); } push(); update = TRUE; len = 0; } } if (val && len > limit) { pop_n(len*2); genop_2(s, OP_HASH, cursp(), len); push(); return -1; } if (update) { if (val && len > 0) { pop_n(len*2+1); genop_2(s, OP_HASHADD, cursp(), len); push(); } return -1; /* variable length */ } return len; } static void gen_call(codegen_scope *s, node *tree, int val, int safe) { mrb_sym sym = nsym(tree->cdr->car); int skip = 0, n = 0, nk = 0, noop = 0, noself = 0, blk = 0, sp_save = cursp(); if (!tree->car) { noself = noop = 1; push(); } else { codegen(s, tree->car, VAL); /* receiver */ } if (safe) { int recv = cursp()-1; gen_move(s, cursp(), recv, 1); skip = genjmp2_0(s, OP_JMPNIL, cursp(), val); } tree = tree->cdr->cdr->car; if (tree) { if (tree->car) { /* positional arguments */ n = gen_values(s, tree->car, VAL, 14); if (n < 0) { /* variable length */ noop = 1; /* not operator */ n = 15; push(); } } if (tree->cdr->car) { /* keyword arguments */ noop = 1; nk = gen_hash(s, tree->cdr->car->cdr, VAL, 14); if (nk < 0) nk = 15; } } if (tree && tree->cdr && tree->cdr->cdr) { codegen(s, tree->cdr->cdr, VAL); pop(); noop = 1; blk = 1; } push();pop(); s->sp = sp_save; if (!noop && sym == MRB_OPSYM_2(s->mrb, add) && n == 1) { gen_addsub(s, OP_ADD, cursp()); } else if (!noop && sym == MRB_OPSYM_2(s->mrb, sub) && n == 1) { gen_addsub(s, OP_SUB, cursp()); } else if (!noop && sym == MRB_OPSYM_2(s->mrb, mul) && n == 1) { gen_muldiv(s, OP_MUL, cursp()); } else if (!noop && sym == MRB_OPSYM_2(s->mrb, div) && n == 1) { gen_muldiv(s, OP_DIV, cursp()); } else if (!noop && sym == MRB_OPSYM_2(s->mrb, lt) && n == 1) { genop_1(s, OP_LT, cursp()); } else if (!noop && sym == MRB_OPSYM_2(s->mrb, le) && n == 1) { genop_1(s, OP_LE, cursp()); } else if (!noop && sym == MRB_OPSYM_2(s->mrb, gt) && n == 1) { genop_1(s, OP_GT, cursp()); } else if (!noop && sym == MRB_OPSYM_2(s->mrb, ge) && n == 1) { genop_1(s, OP_GE, cursp()); } else if (!noop && sym == MRB_OPSYM_2(s->mrb, eq) && n == 1) { genop_1(s, OP_EQ, cursp()); } else if (!noop && sym == MRB_OPSYM_2(s->mrb, aset) && n == 2) { genop_1(s, OP_SETIDX, cursp()); } else if (!noop && n == 0 && gen_uniop(s, sym, cursp())) { /* constant folding succeeded */ } else if (!noop && n == 1 && gen_binop(s, sym, cursp())) { /* constant folding succeeded */ } else if (noself){ genop_3(s, blk ? OP_SSENDB : OP_SSEND, cursp(), new_sym(s, sym), n|(nk<<4)); } else { genop_3(s, blk ? OP_SENDB : OP_SEND, cursp(), new_sym(s, sym), n|(nk<<4)); } if (safe) { dispatch(s, skip); } if (val) { push(); } } static void gen_assignment(codegen_scope *s, node *tree, node *rhs, int sp, int val) { int idx; int type = nint(tree->car); switch (type) { case NODE_GVAR: case NODE_ARG: case NODE_LVAR: case NODE_IVAR: case NODE_CVAR: case NODE_CONST: case NODE_NIL: case NODE_MASGN: if (rhs) { codegen(s, rhs, VAL); pop(); sp = cursp(); } break; case NODE_COLON2: case NODE_CALL: case NODE_SCALL: /* keep evaluation order */ break; case NODE_NVAR: codegen_error(s, "Can't assign to numbered parameter"); break; default: codegen_error(s, "unknown lhs"); break; } tree = tree->cdr; switch (type) { case NODE_GVAR: gen_setxv(s, OP_SETGV, sp, nsym(tree), val); break; case NODE_ARG: case NODE_LVAR: idx = lv_idx(s, nsym(tree)); if (idx > 0) { if (idx != sp) { gen_move(s, idx, sp, val); } break; } else { /* upvar */ gen_setupvar(s, sp, nsym(tree)); } break; case NODE_IVAR: gen_setxv(s, OP_SETIV, sp, nsym(tree), val); break; case NODE_CVAR: gen_setxv(s, OP_SETCV, sp, nsym(tree), val); break; case NODE_CONST: gen_setxv(s, OP_SETCONST, sp, nsym(tree), val); break; case NODE_COLON2: if (sp) { gen_move(s, cursp(), sp, 0); } sp = cursp(); push(); codegen(s, tree->car, VAL); if (rhs) { codegen(s, rhs, VAL); pop(); gen_move(s, sp, cursp(), 0); } pop_n(2); idx = new_sym(s, nsym(tree->cdr)); genop_2(s, OP_SETMCNST, sp, idx); break; case NODE_CALL: case NODE_SCALL: { int noself = 0, safe = (type == NODE_SCALL), skip = 0, top, call, n = 0; mrb_sym mid = nsym(tree->cdr->car); top = cursp(); if (val || sp == cursp()) { push(); /* room for retval */ } call = cursp(); if (!tree->car) { noself = 1; push(); } else { codegen(s, tree->car, VAL); /* receiver */ } if (safe) { int recv = cursp()-1; gen_move(s, cursp(), recv, 1); skip = genjmp2_0(s, OP_JMPNIL, cursp(), val); } tree = tree->cdr->cdr->car; if (tree) { if (tree->car) { /* positional arguments */ n = gen_values(s, tree->car, VAL, (tree->cdr->car)?13:14); if (n < 0) { /* variable length */ n = 15; push(); } } if (tree->cdr->car) { /* keyword arguments */ if (n == 14) { pop_n(n); genop_2(s, OP_ARRAY, cursp(), n); push(); n = 15; } gen_hash(s, tree->cdr->car->cdr, VAL, 0); if (n < 14) { n++; } else { pop_n(2); genop_2(s, OP_ARYPUSH, cursp(), 1); } push(); } } if (rhs) { codegen(s, rhs, VAL); pop(); } else { gen_move(s, cursp(), sp, 0); } if (val) { gen_move(s, top, cursp(), 1); } if (n < 14) { n++; } else { pop(); genop_2(s, OP_ARYPUSH, cursp(), 1); } s->sp = call; if (mid == MRB_OPSYM_2(s->mrb, aref) && n == 2) { genop_1(s, OP_SETIDX, cursp()); } else { genop_3(s, noself ? OP_SSEND : OP_SEND, cursp(), new_sym(s, attrsym(s, mid)), n); } if (safe) { dispatch(s, skip); } s->sp = top; } break; case NODE_MASGN: gen_vmassignment(s, tree->car, sp, val); break; /* splat without assignment */ case NODE_NIL: break; default: codegen_error(s, "unknown lhs"); break; } if (val) push(); } static void gen_vmassignment(codegen_scope *s, node *tree, int rhs, int val) { int n = 0, post = 0; node *t, *p; if (tree->car) { /* pre */ t = tree->car; n = 0; while (t) { int sp = cursp(); genop_3(s, OP_AREF, sp, rhs, n); push(); gen_assignment(s, t->car, NULL, sp, NOVAL); pop(); n++; t = t->cdr; } } t = tree->cdr; if (t) { if (t->cdr) { /* post count */ p = t->cdr->car; while (p) { post++; p = p->cdr; } } gen_move(s, cursp(), rhs, val); push_n(post+1); pop_n(post+1); genop_3(s, OP_APOST, cursp(), n, post); n = 1; if (t->car && t->car != (node*)-1) { /* rest */ gen_assignment(s, t->car, NULL, cursp(), NOVAL); } if (t->cdr && t->cdr->car) { t = t->cdr->car; while (t) { gen_assignment(s, t->car, NULL, cursp()+n, NOVAL); t = t->cdr; n++; } } if (val) { gen_move(s, cursp(), rhs, 0); } } } static void gen_intern(codegen_scope *s) { pop(); if (!no_peephole(s)) { struct mrb_insn_data data = mrb_last_insn(s); if (data.insn == OP_STRING && data.a == cursp()) { rewind_pc(s); genop_2(s, OP_SYMBOL, data.a, data.b); push(); return; } } genop_1(s, OP_INTERN, cursp()); push(); } static void gen_literal_array(codegen_scope *s, node *tree, mrb_bool sym, int val) { if (val) { int i = 0, j = 0, gen = 0; while (tree) { switch (nint(tree->car->car)) { case NODE_STR: if ((tree->cdr == NULL) && (nint(tree->car->cdr->cdr) == 0)) break; /* fall through */ case NODE_BEGIN: codegen(s, tree->car, VAL); ++j; break; case NODE_LITERAL_DELIM: if (j > 0) { j = 0; ++i; if (sym) gen_intern(s); } break; } while (j >= 2) { pop(); pop(); genop_1(s, OP_STRCAT, cursp()); push(); j--; } if (i > GEN_LIT_ARY_MAX) { pop_n(i); if (gen) { pop(); genop_2(s, OP_ARYPUSH, cursp(), i); } else { genop_2(s, OP_ARRAY, cursp(), i); gen = 1; } push(); i = 0; } tree = tree->cdr; } if (j > 0) { ++i; if (sym) gen_intern(s); } pop_n(i); if (gen) { pop(); genop_2(s, OP_ARYPUSH, cursp(), i); } else { genop_2(s, OP_ARRAY, cursp(), i); } push(); } else { while (tree) { switch (nint(tree->car->car)) { case NODE_BEGIN: case NODE_BLOCK: codegen(s, tree->car, NOVAL); } tree = tree->cdr; } } } static void raise_error(codegen_scope *s, const char *msg) { int idx = new_lit(s, mrb_str_new_cstr(s->mrb, msg)); genop_1(s, OP_ERR, idx); } static mrb_int readint(codegen_scope *s, const char *p, int base, mrb_bool neg, mrb_bool *overflow) { const char *e = p + strlen(p); mrb_int result = 0; mrb_assert(base >= 2 && base <= 16); if (*p == '+') p++; while (p < e) { int n; char c = *p; switch (c) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': n = c - '0'; break; case '8': case '9': n = c - '0'; break; case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': n = c - 'a' + 10; break; case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': n = c - 'A' + 10; break; default: codegen_error(s, "malformed readint input"); *overflow = TRUE; /* not reached */ return result; } if (mrb_int_mul_overflow(result, base, &result)) { overflow: *overflow = TRUE; return 0; } mrb_uint tmp = ((mrb_uint)result)+n; if (neg && tmp == (mrb_uint)MRB_INT_MAX+1) { *overflow = FALSE; return MRB_INT_MIN; } if (tmp > MRB_INT_MAX) goto overflow; result = (mrb_int)tmp; p++; } *overflow = FALSE; if (neg) return -result; return result; } static void gen_retval(codegen_scope *s, node *tree) { if (nint(tree->car) == NODE_SPLAT) { codegen(s, tree, VAL); pop(); genop_1(s, OP_ARYDUP, cursp()); } else { codegen(s, tree, VAL); pop(); } } static mrb_bool true_always(node *tree) { switch (nint(tree->car)) { case NODE_TRUE: case NODE_INT: case NODE_STR: case NODE_SYM: return TRUE; default: return FALSE; } } static mrb_bool false_always(node *tree) { switch (nint(tree->car)) { case NODE_FALSE: case NODE_NIL: return TRUE; default: return FALSE; } } static void gen_blkmove(codegen_scope *s, uint16_t ainfo, int lv) { int m1 = (ainfo>>7)&0x3f; int r = (ainfo>>6)&0x1; int m2 = (ainfo>>1)&0x1f; int kd = (ainfo)&0x1; int off = m1+r+m2+kd+1; if (lv == 0) { gen_move(s, cursp(), off, 0); } else { genop_3(s, OP_GETUPVAR, cursp(), off, lv); } push(); } static void codegen(codegen_scope *s, node *tree, int val) { int nt; int rlev = s->rlev; if (!tree) { if (val) { genop_1(s, OP_LOADNIL, cursp()); push(); } return; } s->rlev++; if (s->rlev > MRB_CODEGEN_LEVEL_MAX) { codegen_error(s, "too complex expression"); } if (s->irep && s->filename_index != tree->filename_index) { mrb_sym fname = mrb_parser_get_filename(s->parser, s->filename_index); const char *filename = mrb_sym_name_len(s->mrb, fname, NULL); mrb_debug_info_append_file(s->mrb, s->irep->debug_info, filename, s->lines, s->debug_start_pos, s->pc); s->debug_start_pos = s->pc; s->filename_index = tree->filename_index; s->filename_sym = mrb_parser_get_filename(s->parser, tree->filename_index); } nt = nint(tree->car); s->lineno = tree->lineno; tree = tree->cdr; switch (nt) { case NODE_BEGIN: if (val && !tree) { genop_1(s, OP_LOADNIL, cursp()); push(); } while (tree) { codegen(s, tree->car, tree->cdr ? NOVAL : val); tree = tree->cdr; } break; case NODE_RESCUE: { int noexc; uint32_t exend, pos1, pos2, tmp; struct loopinfo *lp; int catch_entry, begin, end; if (tree->car == NULL) goto exit; lp = loop_push(s, LOOP_BEGIN); lp->pc0 = new_label(s); catch_entry = catch_handler_new(s); begin = s->pc; codegen(s, tree->car, VAL); pop(); lp->type = LOOP_RESCUE; end = s->pc; noexc = genjmp_0(s, OP_JMP); catch_handler_set(s, catch_entry, MRB_CATCH_RESCUE, begin, end, s->pc); tree = tree->cdr; exend = JMPLINK_START; pos1 = JMPLINK_START; if (tree->car) { node *n2 = tree->car; int exc = cursp(); genop_1(s, OP_EXCEPT, exc); push(); while (n2) { node *n3 = n2->car; node *n4 = n3->car; dispatch(s, pos1); pos2 = JMPLINK_START; do { if (n4 && n4->car && nint(n4->car->car) == NODE_SPLAT) { codegen(s, n4->car, VAL); gen_move(s, cursp(), exc, 0); push_n(2); pop_n(2); /* space for one arg and a block */ pop(); genop_3(s, OP_SEND, cursp(), new_sym(s, MRB_SYM_2(s->mrb, __case_eqq)), 1); } else { if (n4) { codegen(s, n4->car, VAL); } else { genop_2(s, OP_GETCONST, cursp(), new_sym(s, MRB_SYM_2(s->mrb, StandardError))); push(); } pop(); genop_2(s, OP_RESCUE, exc, cursp()); } tmp = genjmp2(s, OP_JMPIF, cursp(), pos2, val); pos2 = tmp; if (n4) { n4 = n4->cdr; } } while (n4); pos1 = genjmp_0(s, OP_JMP); dispatch_linked(s, pos2); pop(); if (n3->cdr->car) { gen_assignment(s, n3->cdr->car, NULL, exc, NOVAL); } if (n3->cdr->cdr->car) { codegen(s, n3->cdr->cdr->car, val); if (val) pop(); } tmp = genjmp(s, OP_JMP, exend); exend = tmp; n2 = n2->cdr; push(); } if (pos1 != JMPLINK_START) { dispatch(s, pos1); genop_1(s, OP_RAISEIF, exc); } } pop(); tree = tree->cdr; dispatch(s, noexc); if (tree->car) { codegen(s, tree->car, val); } else if (val) { push(); } dispatch_linked(s, exend); loop_pop(s, NOVAL); } break; case NODE_ENSURE: if (!tree->cdr || !tree->cdr->cdr || (nint(tree->cdr->cdr->car) == NODE_BEGIN && tree->cdr->cdr->cdr)) { int catch_entry, begin, end, target; int idx; catch_entry = catch_handler_new(s); begin = s->pc; codegen(s, tree->car, val); end = target = s->pc; push(); idx = cursp(); genop_1(s, OP_EXCEPT, idx); push(); codegen(s, tree->cdr->cdr, NOVAL); pop(); genop_1(s, OP_RAISEIF, idx); pop(); catch_handler_set(s, catch_entry, MRB_CATCH_ENSURE, begin, end, target); } else { /* empty ensure ignored */ codegen(s, tree->car, val); } break; case NODE_LAMBDA: if (val) { int idx = lambda_body(s, tree, 1); genop_2(s, OP_LAMBDA, cursp(), idx); push(); } break; case NODE_BLOCK: if (val) { int idx = lambda_body(s, tree, 1); genop_2(s, OP_BLOCK, cursp(), idx); push(); } break; case NODE_IF: { uint32_t pos1, pos2; mrb_bool nil_p = FALSE; node *elsepart = tree->cdr->cdr->car; if (!tree->car) { codegen(s, elsepart, val); goto exit; } if (true_always(tree->car)) { codegen(s, tree->cdr->car, val); goto exit; } if (false_always(tree->car)) { codegen(s, elsepart, val); goto exit; } if (nint(tree->car->car) == NODE_CALL) { node *n = tree->car->cdr; mrb_sym mid = nsym(n->cdr->car); mrb_sym sym_nil_p = MRB_SYM_Q_2(s->mrb, nil); if (mid == sym_nil_p && n->cdr->cdr->car == NULL) { nil_p = TRUE; codegen(s, n->car, VAL); } } if (!nil_p) { codegen(s, tree->car, VAL); } pop(); if (val || tree->cdr->car) { if (nil_p) { pos2 = genjmp2_0(s, OP_JMPNIL, cursp(), val); pos1 = genjmp_0(s, OP_JMP); dispatch(s, pos2); } else { pos1 = genjmp2_0(s, OP_JMPNOT, cursp(), val); } codegen(s, tree->cdr->car, val); if (val) pop(); if (elsepart || val) { pos2 = genjmp_0(s, OP_JMP); dispatch(s, pos1); codegen(s, elsepart, val); dispatch(s, pos2); } else { dispatch(s, pos1); } } else { /* empty then-part */ if (elsepart) { if (nil_p) { pos1 = genjmp2_0(s, OP_JMPNIL, cursp(), val); } else { pos1 = genjmp2_0(s, OP_JMPIF, cursp(), val); } codegen(s, elsepart, val); dispatch(s, pos1); } else if (val && !nil_p) { genop_1(s, OP_LOADNIL, cursp()); push(); } } } break; case NODE_AND: { uint32_t pos; if (true_always(tree->car)) { codegen(s, tree->cdr, val); goto exit; } if (false_always(tree->car)) { codegen(s, tree->car, val); goto exit; } codegen(s, tree->car, VAL); pop(); pos = genjmp2_0(s, OP_JMPNOT, cursp(), val); codegen(s, tree->cdr, val); dispatch(s, pos); } break; case NODE_OR: { uint32_t pos; if (true_always(tree->car)) { codegen(s, tree->car, val); goto exit; } if (false_always(tree->car)) { codegen(s, tree->cdr, val); goto exit; } codegen(s, tree->car, VAL); pop(); pos = genjmp2_0(s, OP_JMPIF, cursp(), val); codegen(s, tree->cdr, val); dispatch(s, pos); } break; case NODE_WHILE: case NODE_UNTIL: { if (true_always(tree->car)) { if (nt == NODE_UNTIL) { if (val) { genop_1(s, OP_LOADNIL, cursp()); push(); } goto exit; } } else if (false_always(tree->car)) { if (nt == NODE_WHILE) { if (val) { genop_1(s, OP_LOADNIL, cursp()); push(); } goto exit; } } uint32_t pos = JMPLINK_START; struct loopinfo *lp = loop_push(s, LOOP_NORMAL); if (!val) lp->reg = -1; lp->pc0 = new_label(s); codegen(s, tree->car, VAL); pop(); if (nt == NODE_WHILE) { pos = genjmp2_0(s, OP_JMPNOT, cursp(), NOVAL); } else { pos = genjmp2_0(s, OP_JMPIF, cursp(), NOVAL); } lp->pc1 = new_label(s); codegen(s, tree->cdr, NOVAL); genjmp(s, OP_JMP, lp->pc0); dispatch(s, pos); loop_pop(s, val); } break; case NODE_FOR: for_body(s, tree); if (val) push(); break; case NODE_CASE: { int head = 0; uint32_t pos1, pos2, pos3, tmp; node *n; pos3 = JMPLINK_START; if (tree->car) { head = cursp(); codegen(s, tree->car, VAL); } tree = tree->cdr; while (tree) { n = tree->car->car; pos1 = pos2 = JMPLINK_START; while (n) { codegen(s, n->car, VAL); if (head) { gen_move(s, cursp(), head, 0); push(); push(); pop(); pop(); pop(); if (nint(n->car->car) == NODE_SPLAT) { genop_3(s, OP_SEND, cursp(), new_sym(s, MRB_SYM_2(s->mrb, __case_eqq)), 1); } else { genop_3(s, OP_SEND, cursp(), new_sym(s, MRB_OPSYM_2(s->mrb, eqq)), 1); } } else { pop(); } tmp = genjmp2(s, OP_JMPIF, cursp(), pos2, NOVAL); pos2 = tmp; n = n->cdr; } if (tree->car->car) { pos1 = genjmp_0(s, OP_JMP); dispatch_linked(s, pos2); } codegen(s, tree->car->cdr, val); if (val) pop(); tmp = genjmp(s, OP_JMP, pos3); pos3 = tmp; dispatch(s, pos1); tree = tree->cdr; } if (val) { uint32_t pos = cursp(); genop_1(s, OP_LOADNIL, cursp()); if (pos3 != JMPLINK_START) dispatch_linked(s, pos3); if (head) pop(); if (cursp() != pos) { gen_move(s, cursp(), pos, 0); } push(); } else { if (pos3 != JMPLINK_START) { dispatch_linked(s, pos3); } if (head) { pop(); } } } break; case NODE_SCOPE: scope_body(s, tree, NOVAL); break; case NODE_FCALL: case NODE_CALL: gen_call(s, tree, val, 0); break; case NODE_SCALL: gen_call(s, tree, val, 1); break; case NODE_DOT2: codegen(s, tree->car, val); codegen(s, tree->cdr, val); if (val) { pop(); pop(); genop_1(s, OP_RANGE_INC, cursp()); push(); } break; case NODE_DOT3: codegen(s, tree->car, val); codegen(s, tree->cdr, val); if (val) { pop(); pop(); genop_1(s, OP_RANGE_EXC, cursp()); push(); } break; case NODE_COLON2: { int sym = new_sym(s, nsym(tree->cdr)); codegen(s, tree->car, VAL); pop(); genop_2(s, OP_GETMCNST, cursp(), sym); if (val) push(); } break; case NODE_COLON3: { int sym = new_sym(s, nsym(tree)); genop_1(s, OP_OCLASS, cursp()); genop_2(s, OP_GETMCNST, cursp(), sym); if (val) push(); } break; case NODE_ARRAY: { int n; n = gen_values(s, tree, val, 0); if (val) { if (n >= 0) { pop_n(n); genop_2(s, OP_ARRAY, cursp(), n); } push(); } } break; case NODE_HASH: case NODE_KW_HASH: { int nk = gen_hash(s, tree, val, GEN_LIT_ARY_MAX); if (val && nk >= 0) { pop_n(nk*2); genop_2(s, OP_HASH, cursp(), nk); push(); } } break; case NODE_SPLAT: codegen(s, tree, val); break; case NODE_ASGN: gen_assignment(s, tree->car, tree->cdr, 0, val); break; case NODE_MASGN: { int len = 0, n = 0, post = 0; node *t = tree->cdr, *p; int rhs = cursp(); if (nint(t->car) == NODE_ARRAY && t->cdr && nosplat(t->cdr)) { /* fixed rhs */ t = t->cdr; while (t) { codegen(s, t->car, VAL); len++; t = t->cdr; } tree = tree->car; if (tree->car) { /* pre */ t = tree->car; n = 0; while (t) { if (n < len) { gen_assignment(s, t->car, NULL, rhs+n, NOVAL); n++; } else { genop_1(s, OP_LOADNIL, rhs+n); gen_assignment(s, t->car, NULL, rhs+n, NOVAL); } t = t->cdr; } } t = tree->cdr; if (t) { if (t->cdr) { /* post count */ p = t->cdr->car; while (p) { post++; p = p->cdr; } } if (t->car) { /* rest (len - pre - post) */ int rn; if (len < post + n) { rn = 0; } else { rn = len - post - n; } genop_3(s, OP_ARRAY2, cursp(), rhs+n, rn); gen_assignment(s, t->car, NULL, cursp(), NOVAL); n += rn; } if (t->cdr && t->cdr->car) { t = t->cdr->car; while (n<len) { gen_assignment(s, t->car, NULL, rhs+n, NOVAL); t = t->cdr; n++; } } } pop_n(len); if (val) { genop_2(s, OP_ARRAY, rhs, len); push(); } } else { /* variable rhs */ codegen(s, t, VAL); gen_vmassignment(s, tree->car, rhs, val); if (!val) { pop(); } } } break; case NODE_OP_ASGN: { mrb_sym sym = nsym(tree->cdr->car); mrb_int len; const char *name = mrb_sym_name_len(s->mrb, sym, &len); int idx, callargs = -1, vsp = -1; if ((len == 2 && name[0] == '|' && name[1] == '|') && (nint(tree->car->car) == NODE_CONST || nint(tree->car->car) == NODE_CVAR)) { int catch_entry, begin, end; int noexc, exc; struct loopinfo *lp; lp = loop_push(s, LOOP_BEGIN); lp->pc0 = new_label(s); catch_entry = catch_handler_new(s); begin = s->pc; exc = cursp(); codegen(s, tree->car, VAL); end = s->pc; noexc = genjmp_0(s, OP_JMP); lp->type = LOOP_RESCUE; catch_handler_set(s, catch_entry, MRB_CATCH_RESCUE, begin, end, s->pc); genop_1(s, OP_EXCEPT, exc); genop_1(s, OP_LOADF, exc); dispatch(s, noexc); loop_pop(s, NOVAL); } else if (nint(tree->car->car) == NODE_CALL) { node *n = tree->car->cdr; int base, i, nargs = 0; callargs = 0; if (val) { vsp = cursp(); push(); } codegen(s, n->car, VAL); /* receiver */ idx = new_sym(s, nsym(n->cdr->car)); base = cursp()-1; if (n->cdr->cdr->car) { nargs = gen_values(s, n->cdr->cdr->car->car, VAL, 13); if (nargs >= 0) { callargs = nargs; } else { /* varargs */ push(); nargs = 1; callargs = CALL_MAXARGS; } } /* copy receiver and arguments */ gen_move(s, cursp(), base, 1); for (i=0; i<nargs; i++) { gen_move(s, cursp()+i+1, base+i+1, 1); } push_n(nargs+2);pop_n(nargs+2); /* space for receiver, arguments and a block */ genop_3(s, OP_SEND, cursp(), idx, callargs); push(); } else { codegen(s, tree->car, VAL); } if (len == 2 && ((name[0] == '|' && name[1] == '|') || (name[0] == '&' && name[1] == '&'))) { uint32_t pos; pop(); if (val) { if (vsp >= 0) { gen_move(s, vsp, cursp(), 1); } pos = genjmp2_0(s, name[0]=='|'?OP_JMPIF:OP_JMPNOT, cursp(), val); } else { pos = genjmp2_0(s, name[0]=='|'?OP_JMPIF:OP_JMPNOT, cursp(), val); } codegen(s, tree->cdr->cdr->car, VAL); pop(); if (val && vsp >= 0) { gen_move(s, vsp, cursp(), 1); } if (nint(tree->car->car) == NODE_CALL) { if (callargs == CALL_MAXARGS) { pop(); genop_2(s, OP_ARYPUSH, cursp(), 1); } else { pop_n(callargs); callargs++; } pop(); idx = new_sym(s, attrsym(s, nsym(tree->car->cdr->cdr->car))); genop_3(s, OP_SEND, cursp(), idx, callargs); } else { gen_assignment(s, tree->car, NULL, cursp(), val); } dispatch(s, pos); goto exit; } codegen(s, tree->cdr->cdr->car, VAL); push(); pop(); pop(); pop(); if (len == 1 && name[0] == '+') { gen_addsub(s, OP_ADD, cursp()); } else if (len == 1 && name[0] == '-') { gen_addsub(s, OP_SUB, cursp()); } else if (len == 1 && name[0] == '*') { genop_1(s, OP_MUL, cursp()); } else if (len == 1 && name[0] == '/') { genop_1(s, OP_DIV, cursp()); } else if (len == 1 && name[0] == '<') { genop_1(s, OP_LT, cursp()); } else if (len == 2 && name[0] == '<' && name[1] == '=') { genop_1(s, OP_LE, cursp()); } else if (len == 1 && name[0] == '>') { genop_1(s, OP_GT, cursp()); } else if (len == 2 && name[0] == '>' && name[1] == '=') { genop_1(s, OP_GE, cursp()); } else { idx = new_sym(s, sym); genop_3(s, OP_SEND, cursp(), idx, 1); } if (callargs < 0) { gen_assignment(s, tree->car, NULL, cursp(), val); } else { if (val && vsp >= 0) { gen_move(s, vsp, cursp(), 0); } if (callargs == CALL_MAXARGS) { pop(); genop_2(s, OP_ARYPUSH, cursp(), 1); } else { pop_n(callargs); callargs++; } pop(); idx = new_sym(s, attrsym(s,nsym(tree->car->cdr->cdr->car))); genop_3(s, OP_SEND, cursp(), idx, callargs); } } break; case NODE_SUPER: { codegen_scope *s2 = s; int lv = 0; int n = 0, nk = 0, st = 0; push(); while (!s2->mscope) { lv++; s2 = s2->prev; if (!s2) break; } if (tree) { node *args = tree->car; if (args) { st = n = gen_values(s, args, VAL, 14); if (n < 0) { st = 1; n = 15; push(); } } /* keyword arguments */ if (s2 && (s2->ainfo & 0x1) && tree->cdr->car) { nk = gen_hash(s, tree->cdr->car->cdr, VAL, 14); if (nk < 0) {st++; nk = 15;} else st += nk*2; n |= nk<<4; } /* block arguments */ if (tree->cdr->cdr) { codegen(s, tree->cdr->cdr, VAL); } else if (!s2) {/* super at top-level */ push(); /* no need to push block */ } else { gen_blkmove(s, s2->ainfo, lv); } st++; } else { if (!s2) push(); else gen_blkmove(s, s2->ainfo, lv); st++; } pop_n(st+1); genop_2(s, OP_SUPER, cursp(), n); if (val) push(); } break; case NODE_ZSUPER: { codegen_scope *s2 = s; int lv = 0; uint16_t ainfo = 0; int n = CALL_MAXARGS; int sp = cursp(); push(); /* room for receiver */ while (!s2->mscope) { lv++; s2 = s2->prev; if (!s2) break; } if (s2 && s2->ainfo > 0) { ainfo = s2->ainfo; } if (ainfo > 0) { genop_2S(s, OP_ARGARY, cursp(), (ainfo<<4)|(lv & 0xf)); push(); push(); push(); /* ARGARY pushes 3 values at most */ pop(); pop(); pop(); /* keyword arguments */ if (ainfo & 0x1) { n |= CALL_MAXARGS<<4; push(); } /* block argument */ if (tree && tree->cdr && tree->cdr->cdr) { push(); codegen(s, tree->cdr->cdr, VAL); } } else { /* block argument */ if (tree && tree->cdr && tree->cdr->cdr) { codegen(s, tree->cdr->cdr, VAL); } else { gen_blkmove(s, 0, lv); } n = 0; } s->sp = sp; genop_2(s, OP_SUPER, cursp(), n); if (val) push(); } break; case NODE_RETURN: if (tree) { gen_retval(s, tree); } else { genop_1(s, OP_LOADNIL, cursp()); } if (s->loop) { gen_return(s, OP_RETURN_BLK, cursp()); } else { gen_return(s, OP_RETURN, cursp()); } if (val) push(); break; case NODE_YIELD: { codegen_scope *s2 = s; int lv = 0, ainfo = -1; int n = 0, sendv = 0; while (!s2->mscope) { lv++; s2 = s2->prev; if (!s2) break; } if (s2) { ainfo = (int)s2->ainfo; } if (ainfo < 0) codegen_error(s, "invalid yield (SyntaxError)"); push(); if (tree) { n = gen_values(s, tree, VAL, 14); if (n < 0) { n = sendv = 1; push(); } } push();pop(); /* space for a block */ pop_n(n+1); genop_2S(s, OP_BLKPUSH, cursp(), (ainfo<<4)|(lv & 0xf)); if (sendv) n = CALL_MAXARGS; genop_3(s, OP_SEND, cursp(), new_sym(s, MRB_SYM_2(s->mrb, call)), n); if (val) push(); } break; case NODE_BREAK: loop_break(s, tree); if (val) push(); break; case NODE_NEXT: if (!s->loop) { raise_error(s, "unexpected next"); } else if (s->loop->type == LOOP_NORMAL) { codegen(s, tree, NOVAL); genjmp(s, OP_JMPUW, s->loop->pc0); } else { if (tree) { codegen(s, tree, VAL); pop(); } else { genop_1(s, OP_LOADNIL, cursp()); } gen_return(s, OP_RETURN, cursp()); } if (val) push(); break; case NODE_REDO: if (!s->loop || s->loop->type == LOOP_BEGIN || s->loop->type == LOOP_RESCUE) { raise_error(s, "unexpected redo"); } else { genjmp(s, OP_JMPUW, s->loop->pc1); } if (val) push(); break; case NODE_RETRY: { const char *msg = "unexpected retry"; const struct loopinfo *lp = s->loop; while (lp && lp->type != LOOP_RESCUE) { lp = lp->prev; } if (!lp) { raise_error(s, msg); } else { genjmp(s, OP_JMPUW, lp->pc0); } if (val) push(); } break; case NODE_LVAR: if (val) { int idx = lv_idx(s, nsym(tree)); if (idx > 0) { gen_move(s, cursp(), idx, val); } else { gen_getupvar(s, cursp(), nsym(tree)); } push(); } break; case NODE_NVAR: if (val) { int idx = nint(tree); gen_move(s, cursp(), idx, val); push(); } break; case NODE_GVAR: { int sym = new_sym(s, nsym(tree)); genop_2(s, OP_GETGV, cursp(), sym); if (val) push(); } break; case NODE_IVAR: { int sym = new_sym(s, nsym(tree)); genop_2(s, OP_GETIV, cursp(), sym); if (val) push(); } break; case NODE_CVAR: { int sym = new_sym(s, nsym(tree)); genop_2(s, OP_GETCV, cursp(), sym); if (val) push(); } break; case NODE_CONST: { int sym = new_sym(s, nsym(tree)); genop_2(s, OP_GETCONST, cursp(), sym); if (val) push(); } break; case NODE_BACK_REF: if (val) { char buf[] = {'$', nchar(tree)}; int sym = new_sym(s, mrb_intern(s->mrb, buf, sizeof(buf))); genop_2(s, OP_GETGV, cursp(), sym); push(); } break; case NODE_NTH_REF: if (val) { mrb_state *mrb = s->mrb; mrb_value str; int sym; str = mrb_format(mrb, "$%d", nint(tree)); sym = new_sym(s, mrb_intern_str(mrb, str)); genop_2(s, OP_GETGV, cursp(), sym); push(); } break; case NODE_ARG: /* should not happen */ break; case NODE_BLOCK_ARG: if (!tree) { int idx = lv_idx(s, MRB_OPSYM_2(s->mrb, and)); if (idx == 0) { codegen_error(s, "no anonymous block argument"); } gen_move(s, cursp(), idx, val); } else { codegen(s, tree, val); } break; case NODE_INT: if (val) { char *p = (char*)tree->car; int base = nint(tree->cdr->car); mrb_int i; mrb_bool overflow; i = readint(s, p, base, FALSE, &overflow); if (overflow) { int off = new_litbn(s, p, base, FALSE); genop_2(s, OP_LOADL, cursp(), off); } else { gen_int(s, cursp(), i); } push(); } break; #ifndef MRB_NO_FLOAT case NODE_FLOAT: if (val) { char *p = (char*)tree; mrb_float f = mrb_float_read(p, NULL); int off = new_lit(s, mrb_float_value(s->mrb, f)); genop_2(s, OP_LOADL, cursp(), off); push(); } break; #endif case NODE_NEGATE: { nt = nint(tree->car); switch (nt) { #ifndef MRB_NO_FLOAT case NODE_FLOAT: if (val) { char *p = (char*)tree->cdr; mrb_float f = mrb_float_read(p, NULL); int off = new_lit(s, mrb_float_value(s->mrb, -f)); genop_2(s, OP_LOADL, cursp(), off); push(); } break; #endif case NODE_INT: if (val) { char *p = (char*)tree->cdr->car; int base = nint(tree->cdr->cdr->car); mrb_int i; mrb_bool overflow; i = readint(s, p, base, TRUE, &overflow); if (overflow) { int off = new_litbn(s, p, base, TRUE); genop_2(s, OP_LOADL, cursp(), off); } else { gen_int(s, cursp(), i); } push(); } break; default: if (val) { codegen(s, tree, VAL); pop(); push_n(2);pop_n(2); /* space for receiver&block */ mrb_sym minus = MRB_OPSYM_2(s->mrb, minus); if (!gen_uniop(s, minus, cursp())) { genop_3(s, OP_SEND, cursp(), new_sym(s, minus), 0); } push(); } else { codegen(s, tree, NOVAL); } break; } } break; case NODE_STR: if (val) { char *p = (char*)tree->car; size_t len = (intptr_t)tree->cdr; int ai = mrb_gc_arena_save(s->mrb); int off = new_lit(s, mrb_str_new(s->mrb, p, len)); mrb_gc_arena_restore(s->mrb, ai); genop_2(s, OP_STRING, cursp(), off); push(); } break; case NODE_HEREDOC: tree = ((struct mrb_parser_heredoc_info *)tree)->doc; /* fall through */ case NODE_DSTR: if (val) { node *n = tree; if (!n) { genop_1(s, OP_LOADNIL, cursp()); push(); break; } codegen(s, n->car, VAL); n = n->cdr; while (n) { codegen(s, n->car, VAL); pop(); pop(); genop_1(s, OP_STRCAT, cursp()); push(); n = n->cdr; } } else { node *n = tree; while (n) { if (nint(n->car->car) != NODE_STR) { codegen(s, n->car, NOVAL); } n = n->cdr; } } break; case NODE_WORDS: gen_literal_array(s, tree, FALSE, val); break; case NODE_SYMBOLS: gen_literal_array(s, tree, TRUE, val); break; case NODE_DXSTR: { node *n; int ai = mrb_gc_arena_save(s->mrb); int sym = new_sym(s, MRB_SYM_2(s->mrb, Kernel)); genop_1(s, OP_LOADSELF, cursp()); push(); codegen(s, tree->car, VAL); n = tree->cdr; while (n) { if (nint(n->car->car) == NODE_XSTR) { n->car->car = (struct mrb_ast_node*)(intptr_t)NODE_STR; mrb_assert(!n->cdr); /* must be the end */ } codegen(s, n->car, VAL); pop(); pop(); genop_1(s, OP_STRCAT, cursp()); push(); n = n->cdr; } push(); /* for block */ pop_n(3); sym = new_sym(s, MRB_OPSYM_2(s->mrb, tick)); /* ` */ genop_3(s, OP_SEND, cursp(), sym, 1); if (val) push(); mrb_gc_arena_restore(s->mrb, ai); } break; case NODE_XSTR: { char *p = (char*)tree->car; size_t len = (intptr_t)tree->cdr; int ai = mrb_gc_arena_save(s->mrb); int off = new_lit(s, mrb_str_new(s->mrb, p, len)); int sym; genop_1(s, OP_LOADSELF, cursp()); push(); genop_2(s, OP_STRING, cursp(), off); push(); push(); pop_n(3); sym = new_sym(s, MRB_OPSYM_2(s->mrb, tick)); /* ` */ genop_3(s, OP_SEND, cursp(), sym, 1); if (val) push(); mrb_gc_arena_restore(s->mrb, ai); } break; case NODE_REGX: if (val) { char *p1 = (char*)tree->car; char *p2 = (char*)tree->cdr->car; char *p3 = (char*)tree->cdr->cdr; int ai = mrb_gc_arena_save(s->mrb); int sym = new_sym(s, mrb_intern_lit(s->mrb, REGEXP_CLASS)); int off = new_lit(s, mrb_str_new_cstr(s->mrb, p1)); int argc = 1; genop_1(s, OP_OCLASS, cursp()); genop_2(s, OP_GETMCNST, cursp(), sym); push(); genop_2(s, OP_STRING, cursp(), off); push(); if (p2 || p3) { if (p2) { /* opt */ off = new_lit(s, mrb_str_new_cstr(s->mrb, p2)); genop_2(s, OP_STRING, cursp(), off); } else { genop_1(s, OP_LOADNIL, cursp()); } push(); argc++; if (p3) { /* enc */ off = new_lit(s, mrb_str_new(s->mrb, p3, 1)); genop_2(s, OP_STRING, cursp(), off); push(); argc++; } } push(); /* space for a block */ pop_n(argc+2); sym = new_sym(s, MRB_SYM_2(s->mrb, compile)); genop_3(s, OP_SEND, cursp(), sym, argc); mrb_gc_arena_restore(s->mrb, ai); push(); } break; case NODE_DREGX: if (val) { node *n = tree->car; int ai = mrb_gc_arena_save(s->mrb); int sym = new_sym(s, mrb_intern_lit(s->mrb, REGEXP_CLASS)); int argc = 1; int off; char *p; genop_1(s, OP_OCLASS, cursp()); genop_2(s, OP_GETMCNST, cursp(), sym); push(); codegen(s, n->car, VAL); n = n->cdr; while (n) { codegen(s, n->car, VAL); pop(); pop(); genop_1(s, OP_STRCAT, cursp()); push(); n = n->cdr; } n = tree->cdr->cdr; if (n->car) { /* tail */ p = (char*)n->car; off = new_lit(s, mrb_str_new_cstr(s->mrb, p)); codegen(s, tree->car, VAL); genop_2(s, OP_STRING, cursp(), off); pop(); genop_1(s, OP_STRCAT, cursp()); push(); } if (n->cdr->car) { /* opt */ char *p2 = (char*)n->cdr->car; off = new_lit(s, mrb_str_new_cstr(s->mrb, p2)); genop_2(s, OP_STRING, cursp(), off); push(); argc++; } if (n->cdr->cdr) { /* enc */ char *p2 = (char*)n->cdr->cdr; off = new_lit(s, mrb_str_new_cstr(s->mrb, p2)); genop_2(s, OP_STRING, cursp(), off); push(); argc++; } push(); /* space for a block */ pop_n(argc+2); sym = new_sym(s, MRB_SYM_2(s->mrb, compile)); genop_3(s, OP_SEND, cursp(), sym, argc); mrb_gc_arena_restore(s->mrb, ai); push(); } else { node *n = tree->car; while (n) { if (nint(n->car->car) != NODE_STR) { codegen(s, n->car, NOVAL); } n = n->cdr; } } break; case NODE_SYM: if (val) { int sym = new_sym(s, nsym(tree)); genop_2(s, OP_LOADSYM, cursp(), sym); push(); } break; case NODE_DSYM: codegen(s, tree, val); if (val) { gen_intern(s); } break; case NODE_SELF: if (val) { genop_1(s, OP_LOADSELF, cursp()); push(); } break; case NODE_NIL: if (val) { genop_1(s, OP_LOADNIL, cursp()); push(); } break; case NODE_TRUE: if (val) { genop_1(s, OP_LOADT, cursp()); push(); } break; case NODE_FALSE: if (val) { genop_1(s, OP_LOADF, cursp()); push(); } break; case NODE_ALIAS: { int a = new_sym(s, nsym(tree->car)); int b = new_sym(s, nsym(tree->cdr)); genop_2(s, OP_ALIAS, a, b); if (val) { genop_1(s, OP_LOADNIL, cursp()); push(); } } break; case NODE_UNDEF: { node *t = tree; while (t) { int symbol = new_sym(s, nsym(t->car)); genop_1(s, OP_UNDEF, symbol); t = t->cdr; } if (val) { genop_1(s, OP_LOADNIL, cursp()); push(); } } break; case NODE_CLASS: { int idx; node *body; if (tree->car->car == (node*)0) { genop_1(s, OP_LOADNIL, cursp()); push(); } else if (tree->car->car == (node*)1) { genop_1(s, OP_OCLASS, cursp()); push(); } else { codegen(s, tree->car->car, VAL); } if (tree->cdr->car) { codegen(s, tree->cdr->car, VAL); } else { genop_1(s, OP_LOADNIL, cursp()); push(); } pop(); pop(); idx = new_sym(s, nsym(tree->car->cdr)); genop_2(s, OP_CLASS, cursp(), idx); body = tree->cdr->cdr->car; if (nint(body->cdr->car) == NODE_BEGIN && body->cdr->cdr == NULL) { genop_1(s, OP_LOADNIL, cursp()); } else { idx = scope_body(s, body, val); genop_2(s, OP_EXEC, cursp(), idx); } if (val) { push(); } } break; case NODE_MODULE: { int idx; if (tree->car->car == (node*)0) { genop_1(s, OP_LOADNIL, cursp()); push(); } else if (tree->car->car == (node*)1) { genop_1(s, OP_OCLASS, cursp()); push(); } else { codegen(s, tree->car->car, VAL); } pop(); idx = new_sym(s, nsym(tree->car->cdr)); genop_2(s, OP_MODULE, cursp(), idx); if (nint(tree->cdr->car->cdr->car) == NODE_BEGIN && tree->cdr->car->cdr->cdr == NULL) { genop_1(s, OP_LOADNIL, cursp()); } else { idx = scope_body(s, tree->cdr->car, val); genop_2(s, OP_EXEC, cursp(), idx); } if (val) { push(); } } break; case NODE_SCLASS: { int idx; codegen(s, tree->car, VAL); pop(); genop_1(s, OP_SCLASS, cursp()); if (nint(tree->cdr->car->cdr->car) == NODE_BEGIN && tree->cdr->car->cdr->cdr == NULL) { genop_1(s, OP_LOADNIL, cursp()); } else { idx = scope_body(s, tree->cdr->car, val); genop_2(s, OP_EXEC, cursp(), idx); } if (val) { push(); } } break; case NODE_DEF: { int sym = new_sym(s, nsym(tree->car)); int idx = lambda_body(s, tree->cdr, 0); genop_1(s, OP_TCLASS, cursp()); push(); genop_2(s, OP_METHOD, cursp(), idx); push(); pop(); pop(); genop_2(s, OP_DEF, cursp(), sym); if (val) push(); } break; case NODE_SDEF: { node *recv = tree->car; int sym = new_sym(s, nsym(tree->cdr->car)); int idx = lambda_body(s, tree->cdr->cdr, 0); codegen(s, recv, VAL); pop(); genop_1(s, OP_SCLASS, cursp()); push(); genop_2(s, OP_METHOD, cursp(), idx); pop(); genop_2(s, OP_DEF, cursp(), sym); if (val) push(); } break; case NODE_POSTEXE: codegen(s, tree, NOVAL); break; default: break; } exit: s->rlev = rlev; } static void scope_add_irep(codegen_scope *s) { mrb_irep *irep; codegen_scope *prev = s->prev; if (prev->irep == NULL) { irep = mrb_add_irep(s->mrb); prev->irep = s->irep = irep; return; } else { if (prev->irep->rlen == UINT16_MAX) { codegen_error(s, "too many nested blocks/methods"); } s->irep = irep = mrb_add_irep(s->mrb); if (prev->irep->rlen == prev->rcapa) { prev->rcapa *= 2; prev->reps = (mrb_irep**)codegen_realloc(s, prev->reps, sizeof(mrb_irep*)*prev->rcapa); } prev->reps[prev->irep->rlen] = irep; prev->irep->rlen++; } } static codegen_scope* scope_new(mrb_state *mrb, codegen_scope *prev, node *nlv) { static const codegen_scope codegen_scope_zero = { 0 }; mrb_pool *pool = mrb_pool_open(mrb); codegen_scope *s = (codegen_scope *)mrb_pool_alloc(pool, sizeof(codegen_scope)); if (!s) { if (prev) codegen_error(prev, "unexpected scope"); return NULL; } *s = codegen_scope_zero; s->mrb = mrb; s->mpool = pool; if (!prev) return s; s->prev = prev; s->ainfo = 0; s->mscope = 0; scope_add_irep(s); s->rcapa = 8; s->reps = (mrb_irep**)mrb_malloc(mrb, sizeof(mrb_irep*)*s->rcapa); s->icapa = 1024; s->iseq = (mrb_code*)mrb_malloc(mrb, sizeof(mrb_code)*s->icapa); s->pcapa = 32; s->pool = (mrb_pool_value*)mrb_malloc(mrb, sizeof(mrb_pool_value)*s->pcapa); s->scapa = 256; s->syms = (mrb_sym*)mrb_malloc(mrb, sizeof(mrb_sym)*s->scapa); s->lv = nlv; s->sp += node_len(nlv)+1; /* add self */ s->nlocals = s->sp; if (nlv) { mrb_sym *lv; node *n = nlv; size_t i = 0; s->irep->lv = lv = (mrb_sym*)mrb_malloc(mrb, sizeof(mrb_sym)*(s->nlocals-1)); for (i=0, n=nlv; n; i++,n=n->cdr) { lv[i] = lv_name(n); } mrb_assert(i + 1 == s->nlocals); } s->ai = mrb_gc_arena_save(mrb); s->filename_sym = prev->filename_sym; if (s->filename_sym) { s->lines = (uint16_t*)mrb_malloc(mrb, sizeof(short)*s->icapa); } s->lineno = prev->lineno; /* debug setting */ s->debug_start_pos = 0; if (s->filename_sym) { mrb_debug_info_alloc(mrb, s->irep); } else { s->irep->debug_info = NULL; } s->parser = prev->parser; s->filename_index = prev->filename_index; s->rlev = prev->rlev+1; return s; } static void scope_finish(codegen_scope *s) { mrb_state *mrb = s->mrb; mrb_irep *irep = s->irep; if (s->nlocals > 0xff) { codegen_error(s, "too many local variables"); } irep->flags = 0; if (s->iseq) { size_t catchsize = sizeof(struct mrb_irep_catch_handler) * irep->clen; irep->iseq = (const mrb_code *)codegen_realloc(s, s->iseq, sizeof(mrb_code)*s->pc + catchsize); irep->ilen = s->pc; if (irep->clen > 0) { memcpy((void *)(irep->iseq + irep->ilen), s->catch_table, catchsize); } } else { irep->clen = 0; } mrb_free(s->mrb, s->catch_table); s->catch_table = NULL; irep->pool = (const mrb_pool_value*)codegen_realloc(s, s->pool, sizeof(mrb_pool_value)*irep->plen); irep->syms = (const mrb_sym*)codegen_realloc(s, s->syms, sizeof(mrb_sym)*irep->slen); irep->reps = (const mrb_irep**)codegen_realloc(s, s->reps, sizeof(mrb_irep*)*irep->rlen); if (s->filename_sym) { mrb_sym fname = mrb_parser_get_filename(s->parser, s->filename_index); const char *filename = mrb_sym_name_len(s->mrb, fname, NULL); mrb_debug_info_append_file(s->mrb, s->irep->debug_info, filename, s->lines, s->debug_start_pos, s->pc); } mrb_free(s->mrb, s->lines); irep->nlocals = s->nlocals; irep->nregs = s->nregs; mrb_gc_arena_restore(mrb, s->ai); mrb_pool_close(s->mpool); } static struct loopinfo* loop_push(codegen_scope *s, enum looptype t) { struct loopinfo *p = (struct loopinfo *)codegen_palloc(s, sizeof(struct loopinfo)); p->type = t; p->pc0 = p->pc1 = p->pc2 = JMPLINK_START; p->prev = s->loop; p->reg = cursp(); s->loop = p; return p; } static void loop_break(codegen_scope *s, node *tree) { if (!s->loop) { codegen(s, tree, NOVAL); raise_error(s, "unexpected break"); } else { struct loopinfo *loop; loop = s->loop; if (tree) { if (loop->reg < 0) { codegen(s, tree, NOVAL); } else { gen_retval(s, tree); } } while (loop) { if (loop->type == LOOP_BEGIN) { loop = loop->prev; } else if (loop->type == LOOP_RESCUE) { loop = loop->prev; } else{ break; } } if (!loop) { raise_error(s, "unexpected break"); return; } if (loop->type == LOOP_NORMAL) { int tmp; if (loop->reg >= 0) { if (tree) { gen_move(s, loop->reg, cursp(), 0); } else { genop_1(s, OP_LOADNIL, loop->reg); } } tmp = genjmp(s, OP_JMPUW, loop->pc2); loop->pc2 = tmp; } else { if (!tree) { genop_1(s, OP_LOADNIL, cursp()); } gen_return(s, OP_BREAK, cursp()); } } } static void loop_pop(codegen_scope *s, int val) { if (val) { genop_1(s, OP_LOADNIL, cursp()); } dispatch_linked(s, s->loop->pc2); s->loop = s->loop->prev; if (val) push(); } static int catch_handler_new(codegen_scope *s) { size_t newsize = sizeof(struct mrb_irep_catch_handler) * (s->irep->clen + 1); s->catch_table = (struct mrb_irep_catch_handler *)codegen_realloc(s, (void *)s->catch_table, newsize); return s->irep->clen ++; } static void catch_handler_set(codegen_scope *s, int ent, enum mrb_catch_type type, uint32_t begin, uint32_t end, uint32_t target) { struct mrb_irep_catch_handler *e; mrb_assert(ent >= 0 && ent < s->irep->clen); e = &s->catch_table[ent]; uint8_to_bin(type, &e->type); mrb_irep_catch_handler_pack(begin, e->begin); mrb_irep_catch_handler_pack(end, e->end); mrb_irep_catch_handler_pack(target, e->target); } static struct RProc* generate_code(mrb_state *mrb, parser_state *p, int val) { codegen_scope *scope = scope_new(mrb, 0, 0); struct mrb_jmpbuf *prev_jmp = mrb->jmp; struct mrb_jmpbuf jmpbuf; struct RProc *proc; mrb->jmp = &jmpbuf; scope->mrb = mrb; scope->parser = p; scope->filename_sym = p->filename_sym; scope->filename_index = p->current_filename_index; MRB_TRY(mrb->jmp) { /* prepare irep */ codegen(scope, p->tree, val); proc = mrb_proc_new(mrb, scope->irep); mrb_irep_decref(mrb, scope->irep); mrb_pool_close(scope->mpool); proc->c = NULL; if (mrb->c->cibase && mrb->c->cibase->proc == proc->upper) { proc->upper = NULL; } mrb->jmp = prev_jmp; return proc; } MRB_CATCH(mrb->jmp) { mrb_irep_decref(mrb, scope->irep); mrb_pool_close(scope->mpool); mrb->jmp = prev_jmp; return NULL; } MRB_END_EXC(mrb->jmp); } MRB_API struct RProc* mrb_generate_code(mrb_state *mrb, parser_state *p) { return generate_code(mrb, p, VAL); } void mrb_irep_remove_lv(mrb_state *mrb, mrb_irep *irep) { int i; if (irep->flags & MRB_IREP_NO_FREE) return; if (irep->lv) { mrb_free(mrb, (void*)irep->lv); irep->lv = NULL; } if (!irep->reps) return; for (i = 0; i < irep->rlen; ++i) { mrb_irep_remove_lv(mrb, (mrb_irep*)irep->reps[i]); } }
gen_assignment(codegen_scope *s, node *tree, node *rhs, int sp, int val) { int idx; int type = nint(tree->car); switch (type) { case NODE_GVAR: case NODE_ARG: case NODE_LVAR: case NODE_IVAR: case NODE_CVAR: case NODE_CONST: case NODE_NIL: case NODE_MASGN: if (rhs) { codegen(s, rhs, VAL); pop(); sp = cursp(); } break; case NODE_COLON2: case NODE_CALL: case NODE_SCALL: /* keep evaluation order */ break; case NODE_NVAR: codegen_error(s, "Can't assign to numbered parameter"); break; default: codegen_error(s, "unknown lhs"); break; } tree = tree->cdr; switch (type) { case NODE_GVAR: gen_setxv(s, OP_SETGV, sp, nsym(tree), val); break; case NODE_ARG: case NODE_LVAR: idx = lv_idx(s, nsym(tree)); if (idx > 0) { if (idx != sp) { gen_move(s, idx, sp, val); } break; } else { /* upvar */ gen_setupvar(s, sp, nsym(tree)); } break; case NODE_IVAR: gen_setxv(s, OP_SETIV, sp, nsym(tree), val); break; case NODE_CVAR: gen_setxv(s, OP_SETCV, sp, nsym(tree), val); break; case NODE_CONST: gen_setxv(s, OP_SETCONST, sp, nsym(tree), val); break; case NODE_COLON2: if (sp) { gen_move(s, cursp(), sp, 0); } sp = cursp(); push(); codegen(s, tree->car, VAL); if (rhs) { codegen(s, rhs, VAL); pop(); gen_move(s, sp, cursp(), 0); } pop_n(2); idx = new_sym(s, nsym(tree->cdr)); genop_2(s, OP_SETMCNST, sp, idx); break; case NODE_CALL: case NODE_SCALL: { int noself = 0, safe = (type == NODE_SCALL), skip = 0, top, call, n = 0; mrb_sym mid = nsym(tree->cdr->car); top = cursp(); if (val || sp == cursp()) { push(); /* room for retval */ } call = cursp(); if (!tree->car) { noself = 1; push(); } else { codegen(s, tree->car, VAL); /* receiver */ } if (safe) { int recv = cursp()-1; gen_move(s, cursp(), recv, 1); skip = genjmp2_0(s, OP_JMPNIL, cursp(), val); } tree = tree->cdr->cdr->car; if (tree) { if (tree->car) { /* positional arguments */ n = gen_values(s, tree->car, VAL, (tree->cdr->car)?13:14); if (n < 0) { /* variable length */ n = 15; push(); } } if (tree->cdr->car) { /* keyword arguments */ gen_hash(s, tree->cdr->car->cdr, VAL, 0); if (n < 14) { n++; push(); } else { pop(); genop_2(s, OP_ARYPUSH, cursp(), 1); } } } if (rhs) { codegen(s, rhs, VAL); pop(); } else { gen_move(s, cursp(), sp, 0); } if (val) { gen_move(s, top, cursp(), 1); } if (n < 14) { n++; } else { pop(); genop_2(s, OP_ARYPUSH, cursp(), 1); } s->sp = call; if (mid == MRB_OPSYM_2(s->mrb, aref) && n == 2) { genop_1(s, OP_SETIDX, cursp()); } else { genop_3(s, noself ? OP_SSEND : OP_SEND, cursp(), new_sym(s, attrsym(s, mid)), n); } if (safe) { dispatch(s, skip); } s->sp = top; } break; case NODE_MASGN: gen_vmassignment(s, tree->car, sp, val); break; /* splat without assignment */ case NODE_NIL: break; default: codegen_error(s, "unknown lhs"); break; } if (val) push(); }
gen_assignment(codegen_scope *s, node *tree, node *rhs, int sp, int val) { int idx; int type = nint(tree->car); switch (type) { case NODE_GVAR: case NODE_ARG: case NODE_LVAR: case NODE_IVAR: case NODE_CVAR: case NODE_CONST: case NODE_NIL: case NODE_MASGN: if (rhs) { codegen(s, rhs, VAL); pop(); sp = cursp(); } break; case NODE_COLON2: case NODE_CALL: case NODE_SCALL: /* keep evaluation order */ break; case NODE_NVAR: codegen_error(s, "Can't assign to numbered parameter"); break; default: codegen_error(s, "unknown lhs"); break; } tree = tree->cdr; switch (type) { case NODE_GVAR: gen_setxv(s, OP_SETGV, sp, nsym(tree), val); break; case NODE_ARG: case NODE_LVAR: idx = lv_idx(s, nsym(tree)); if (idx > 0) { if (idx != sp) { gen_move(s, idx, sp, val); } break; } else { /* upvar */ gen_setupvar(s, sp, nsym(tree)); } break; case NODE_IVAR: gen_setxv(s, OP_SETIV, sp, nsym(tree), val); break; case NODE_CVAR: gen_setxv(s, OP_SETCV, sp, nsym(tree), val); break; case NODE_CONST: gen_setxv(s, OP_SETCONST, sp, nsym(tree), val); break; case NODE_COLON2: if (sp) { gen_move(s, cursp(), sp, 0); } sp = cursp(); push(); codegen(s, tree->car, VAL); if (rhs) { codegen(s, rhs, VAL); pop(); gen_move(s, sp, cursp(), 0); } pop_n(2); idx = new_sym(s, nsym(tree->cdr)); genop_2(s, OP_SETMCNST, sp, idx); break; case NODE_CALL: case NODE_SCALL: { int noself = 0, safe = (type == NODE_SCALL), skip = 0, top, call, n = 0; mrb_sym mid = nsym(tree->cdr->car); top = cursp(); if (val || sp == cursp()) { push(); /* room for retval */ } call = cursp(); if (!tree->car) { noself = 1; push(); } else { codegen(s, tree->car, VAL); /* receiver */ } if (safe) { int recv = cursp()-1; gen_move(s, cursp(), recv, 1); skip = genjmp2_0(s, OP_JMPNIL, cursp(), val); } tree = tree->cdr->cdr->car; if (tree) { if (tree->car) { /* positional arguments */ n = gen_values(s, tree->car, VAL, (tree->cdr->car)?13:14); if (n < 0) { /* variable length */ n = 15; push(); } } if (tree->cdr->car) { /* keyword arguments */ if (n == 14) { pop_n(n); genop_2(s, OP_ARRAY, cursp(), n); push(); n = 15; } gen_hash(s, tree->cdr->car->cdr, VAL, 0); if (n < 14) { n++; } else { pop_n(2); genop_2(s, OP_ARYPUSH, cursp(), 1); } push(); } } if (rhs) { codegen(s, rhs, VAL); pop(); } else { gen_move(s, cursp(), sp, 0); } if (val) { gen_move(s, top, cursp(), 1); } if (n < 14) { n++; } else { pop(); genop_2(s, OP_ARYPUSH, cursp(), 1); } s->sp = call; if (mid == MRB_OPSYM_2(s->mrb, aref) && n == 2) { genop_1(s, OP_SETIDX, cursp()); } else { genop_3(s, noself ? OP_SSEND : OP_SEND, cursp(), new_sym(s, attrsym(s, mid)), n); } if (safe) { dispatch(s, skip); } s->sp = top; } break; case NODE_MASGN: gen_vmassignment(s, tree->car, sp, val); break; /* splat without assignment */ case NODE_NIL: break; default: codegen_error(s, "unknown lhs"); break; } if (val) push(); }
{'added': [(1868, ' if (n == 14) {'), (1869, ' pop_n(n);'), (1870, ' genop_2(s, OP_ARRAY, cursp(), n);'), (1871, ' push();'), (1872, ' n = 15;'), (1873, ' }'), (1879, ' pop_n(2);'), (1882, ' push();')], 'deleted': [(1871, ' push();'), (1874, ' pop();')]}
8
2
3,512
22,972
156
883
47
https://github.com/mruby/mruby
CVE-2022-0525
CWE-125
2,338
debug.c
C
dump_mm
// SPDX-License-Identifier: GPL-2.0 /* * mm/debug.c * * mm/ specific debug routines. * */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/trace_events.h> #include <linux/memcontrol.h> #include <trace/events/mmflags.h> #include <linux/migrate.h> #include <linux/page_owner.h> #include "internal.h" char *migrate_reason_names[MR_TYPES] = { "compaction", "memory_failure", "memory_hotplug", "syscall_or_cpuset", "mempolicy_mbind", "numa_misplaced", "cma", }; const struct trace_print_flags pageflag_names[] = { __def_pageflag_names, {0, NULL} }; const struct trace_print_flags gfpflag_names[] = { __def_gfpflag_names, {0, NULL} }; const struct trace_print_flags vmaflag_names[] = { __def_vmaflag_names, {0, NULL} }; void __dump_page(struct page *page, const char *reason) { bool page_poisoned = PagePoisoned(page); int mapcount; /* * If struct page is poisoned don't access Page*() functions as that * leads to recursive loop. Page*() check for poisoned pages, and calls * dump_page() when detected. */ if (page_poisoned) { pr_emerg("page:%px is uninitialized and poisoned", page); goto hex_only; } /* * Avoid VM_BUG_ON() in page_mapcount(). * page->_mapcount space in struct page is used by sl[aou]b pages to * encode own info. */ mapcount = PageSlab(page) ? 0 : page_mapcount(page); pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx", page, page_ref_count(page), mapcount, page->mapping, page_to_pgoff(page)); if (PageCompound(page)) pr_cont(" compound_mapcount: %d", compound_mapcount(page)); pr_cont("\n"); BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1); pr_emerg("flags: %#lx(%pGp)\n", page->flags, &page->flags); hex_only: print_hex_dump(KERN_ALERT, "raw: ", DUMP_PREFIX_NONE, 32, sizeof(unsigned long), page, sizeof(struct page), false); if (reason) pr_alert("page dumped because: %s\n", reason); #ifdef CONFIG_MEMCG if (!page_poisoned && page->mem_cgroup) pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup); #endif } void dump_page(struct page *page, const char *reason) { __dump_page(page, reason); dump_page_owner(page); } EXPORT_SYMBOL(dump_page); #ifdef CONFIG_DEBUG_VM void dump_vma(const struct vm_area_struct *vma) { pr_emerg("vma %px start %px end %px\n" "next %px prev %px mm %px\n" "prot %lx anon_vma %px vm_ops %px\n" "pgoff %lx file %px private_data %px\n" "flags: %#lx(%pGv)\n", vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next, vma->vm_prev, vma->vm_mm, (unsigned long)pgprot_val(vma->vm_page_prot), vma->anon_vma, vma->vm_ops, vma->vm_pgoff, vma->vm_file, vma->vm_private_data, vma->vm_flags, &vma->vm_flags); } EXPORT_SYMBOL(dump_vma); void dump_mm(const struct mm_struct *mm) { pr_emerg("mm %px mmap %px seqnum %d task_size %lu\n" #ifdef CONFIG_MMU "get_unmapped_area %px\n" #endif "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n" "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n" "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" "pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n" "start_code %lx end_code %lx start_data %lx end_data %lx\n" "start_brk %lx brk %lx start_stack %lx\n" "arg_start %lx arg_end %lx env_start %lx env_end %lx\n" "binfmt %px flags %lx core_state %px\n" #ifdef CONFIG_AIO "ioctx_table %px\n" #endif #ifdef CONFIG_MEMCG "owner %px " #endif "exe_file %px\n" #ifdef CONFIG_MMU_NOTIFIER "mmu_notifier_mm %px\n" #endif #ifdef CONFIG_NUMA_BALANCING "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" #endif "tlb_flush_pending %d\n" "def_flags: %#lx(%pGv)\n", mm, mm->mmap, mm->vmacache_seqnum, mm->task_size, #ifdef CONFIG_MMU mm->get_unmapped_area, #endif mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end, mm->pgd, atomic_read(&mm->mm_users), atomic_read(&mm->mm_count), mm_pgtables_bytes(mm), mm->map_count, mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, mm->pinned_vm, mm->data_vm, mm->exec_vm, mm->stack_vm, mm->start_code, mm->end_code, mm->start_data, mm->end_data, mm->start_brk, mm->brk, mm->start_stack, mm->arg_start, mm->arg_end, mm->env_start, mm->env_end, mm->binfmt, mm->flags, mm->core_state, #ifdef CONFIG_AIO mm->ioctx_table, #endif #ifdef CONFIG_MEMCG mm->owner, #endif mm->exe_file, #ifdef CONFIG_MMU_NOTIFIER mm->mmu_notifier_mm, #endif #ifdef CONFIG_NUMA_BALANCING mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq, #endif atomic_read(&mm->tlb_flush_pending), mm->def_flags, &mm->def_flags ); } #endif /* CONFIG_DEBUG_VM */
// SPDX-License-Identifier: GPL-2.0 /* * mm/debug.c * * mm/ specific debug routines. * */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/trace_events.h> #include <linux/memcontrol.h> #include <trace/events/mmflags.h> #include <linux/migrate.h> #include <linux/page_owner.h> #include "internal.h" char *migrate_reason_names[MR_TYPES] = { "compaction", "memory_failure", "memory_hotplug", "syscall_or_cpuset", "mempolicy_mbind", "numa_misplaced", "cma", }; const struct trace_print_flags pageflag_names[] = { __def_pageflag_names, {0, NULL} }; const struct trace_print_flags gfpflag_names[] = { __def_gfpflag_names, {0, NULL} }; const struct trace_print_flags vmaflag_names[] = { __def_vmaflag_names, {0, NULL} }; void __dump_page(struct page *page, const char *reason) { bool page_poisoned = PagePoisoned(page); int mapcount; /* * If struct page is poisoned don't access Page*() functions as that * leads to recursive loop. Page*() check for poisoned pages, and calls * dump_page() when detected. */ if (page_poisoned) { pr_emerg("page:%px is uninitialized and poisoned", page); goto hex_only; } /* * Avoid VM_BUG_ON() in page_mapcount(). * page->_mapcount space in struct page is used by sl[aou]b pages to * encode own info. */ mapcount = PageSlab(page) ? 0 : page_mapcount(page); pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx", page, page_ref_count(page), mapcount, page->mapping, page_to_pgoff(page)); if (PageCompound(page)) pr_cont(" compound_mapcount: %d", compound_mapcount(page)); pr_cont("\n"); BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1); pr_emerg("flags: %#lx(%pGp)\n", page->flags, &page->flags); hex_only: print_hex_dump(KERN_ALERT, "raw: ", DUMP_PREFIX_NONE, 32, sizeof(unsigned long), page, sizeof(struct page), false); if (reason) pr_alert("page dumped because: %s\n", reason); #ifdef CONFIG_MEMCG if (!page_poisoned && page->mem_cgroup) pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup); #endif } void dump_page(struct page *page, const char *reason) { __dump_page(page, reason); dump_page_owner(page); } EXPORT_SYMBOL(dump_page); #ifdef CONFIG_DEBUG_VM void dump_vma(const struct vm_area_struct *vma) { pr_emerg("vma %px start %px end %px\n" "next %px prev %px mm %px\n" "prot %lx anon_vma %px vm_ops %px\n" "pgoff %lx file %px private_data %px\n" "flags: %#lx(%pGv)\n", vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next, vma->vm_prev, vma->vm_mm, (unsigned long)pgprot_val(vma->vm_page_prot), vma->anon_vma, vma->vm_ops, vma->vm_pgoff, vma->vm_file, vma->vm_private_data, vma->vm_flags, &vma->vm_flags); } EXPORT_SYMBOL(dump_vma); void dump_mm(const struct mm_struct *mm) { pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n" #ifdef CONFIG_MMU "get_unmapped_area %px\n" #endif "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n" "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n" "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" "pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n" "start_code %lx end_code %lx start_data %lx end_data %lx\n" "start_brk %lx brk %lx start_stack %lx\n" "arg_start %lx arg_end %lx env_start %lx env_end %lx\n" "binfmt %px flags %lx core_state %px\n" #ifdef CONFIG_AIO "ioctx_table %px\n" #endif #ifdef CONFIG_MEMCG "owner %px " #endif "exe_file %px\n" #ifdef CONFIG_MMU_NOTIFIER "mmu_notifier_mm %px\n" #endif #ifdef CONFIG_NUMA_BALANCING "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" #endif "tlb_flush_pending %d\n" "def_flags: %#lx(%pGv)\n", mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size, #ifdef CONFIG_MMU mm->get_unmapped_area, #endif mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end, mm->pgd, atomic_read(&mm->mm_users), atomic_read(&mm->mm_count), mm_pgtables_bytes(mm), mm->map_count, mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, mm->pinned_vm, mm->data_vm, mm->exec_vm, mm->stack_vm, mm->start_code, mm->end_code, mm->start_data, mm->end_data, mm->start_brk, mm->brk, mm->start_stack, mm->arg_start, mm->arg_end, mm->env_start, mm->env_end, mm->binfmt, mm->flags, mm->core_state, #ifdef CONFIG_AIO mm->ioctx_table, #endif #ifdef CONFIG_MEMCG mm->owner, #endif mm->exe_file, #ifdef CONFIG_MMU_NOTIFIER mm->mmu_notifier_mm, #endif #ifdef CONFIG_NUMA_BALANCING mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq, #endif atomic_read(&mm->tlb_flush_pending), mm->def_flags, &mm->def_flags ); } #endif /* CONFIG_DEBUG_VM */
void dump_mm(const struct mm_struct *mm) { pr_emerg("mm %px mmap %px seqnum %d task_size %lu\n" #ifdef CONFIG_MMU "get_unmapped_area %px\n" #endif "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n" "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n" "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" "pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n" "start_code %lx end_code %lx start_data %lx end_data %lx\n" "start_brk %lx brk %lx start_stack %lx\n" "arg_start %lx arg_end %lx env_start %lx env_end %lx\n" "binfmt %px flags %lx core_state %px\n" #ifdef CONFIG_AIO "ioctx_table %px\n" #endif #ifdef CONFIG_MEMCG "owner %px " #endif "exe_file %px\n" #ifdef CONFIG_MMU_NOTIFIER "mmu_notifier_mm %px\n" #endif #ifdef CONFIG_NUMA_BALANCING "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" #endif "tlb_flush_pending %d\n" "def_flags: %#lx(%pGv)\n", mm, mm->mmap, mm->vmacache_seqnum, mm->task_size, #ifdef CONFIG_MMU mm->get_unmapped_area, #endif mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end, mm->pgd, atomic_read(&mm->mm_users), atomic_read(&mm->mm_count), mm_pgtables_bytes(mm), mm->map_count, mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, mm->pinned_vm, mm->data_vm, mm->exec_vm, mm->stack_vm, mm->start_code, mm->end_code, mm->start_data, mm->end_data, mm->start_brk, mm->brk, mm->start_stack, mm->arg_start, mm->arg_end, mm->env_start, mm->env_end, mm->binfmt, mm->flags, mm->core_state, #ifdef CONFIG_AIO mm->ioctx_table, #endif #ifdef CONFIG_MEMCG mm->owner, #endif mm->exe_file, #ifdef CONFIG_MMU_NOTIFIER mm->mmu_notifier_mm, #endif #ifdef CONFIG_NUMA_BALANCING mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq, #endif atomic_read(&mm->tlb_flush_pending), mm->def_flags, &mm->def_flags ); }
void dump_mm(const struct mm_struct *mm) { pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n" #ifdef CONFIG_MMU "get_unmapped_area %px\n" #endif "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n" "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n" "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" "pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n" "start_code %lx end_code %lx start_data %lx end_data %lx\n" "start_brk %lx brk %lx start_stack %lx\n" "arg_start %lx arg_end %lx env_start %lx env_end %lx\n" "binfmt %px flags %lx core_state %px\n" #ifdef CONFIG_AIO "ioctx_table %px\n" #endif #ifdef CONFIG_MEMCG "owner %px " #endif "exe_file %px\n" #ifdef CONFIG_MMU_NOTIFIER "mmu_notifier_mm %px\n" #endif #ifdef CONFIG_NUMA_BALANCING "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" #endif "tlb_flush_pending %d\n" "def_flags: %#lx(%pGv)\n", mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size, #ifdef CONFIG_MMU mm->get_unmapped_area, #endif mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end, mm->pgd, atomic_read(&mm->mm_users), atomic_read(&mm->mm_count), mm_pgtables_bytes(mm), mm->map_count, mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, mm->pinned_vm, mm->data_vm, mm->exec_vm, mm->stack_vm, mm->start_code, mm->end_code, mm->start_data, mm->end_data, mm->start_brk, mm->brk, mm->start_stack, mm->arg_start, mm->arg_end, mm->env_start, mm->env_end, mm->binfmt, mm->flags, mm->core_state, #ifdef CONFIG_AIO mm->ioctx_table, #endif #ifdef CONFIG_MEMCG mm->owner, #endif mm->exe_file, #ifdef CONFIG_MMU_NOTIFIER mm->mmu_notifier_mm, #endif #ifdef CONFIG_NUMA_BALANCING mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq, #endif atomic_read(&mm->tlb_flush_pending), mm->def_flags, &mm->def_flags ); }
{'added': [(117, '\tpr_emerg("mm %px mmap %px seqnum %llu task_size %lu\\n"'), (145, '\t\tmm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,')], 'deleted': [(117, '\tpr_emerg("mm %px mmap %px seqnum %d task_size %lu\\n"'), (145, '\t\tmm, mm->mmap, mm->vmacache_seqnum, mm->task_size,')]}
2
2
117
631
41
223
11
https://github.com/torvalds/linux
CVE-2018-17182
CWE-416
1,035
print-bgp.c
C++
decode_rt_routing_info
/* * Copyright (C) 1999 WIDE Project. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the project nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Extensively modified by Hannes Gredler (hannes@gredler.at) for more * complete BGP support. */ /* \summary: Border Gateway Protocol (BGP) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include <stdio.h> #include <string.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "af.h" #include "l2vpn.h" struct bgp { uint8_t bgp_marker[16]; uint16_t bgp_len; uint8_t bgp_type; }; #define BGP_SIZE 19 /* unaligned */ #define BGP_OPEN 1 #define BGP_UPDATE 2 #define BGP_NOTIFICATION 3 #define BGP_KEEPALIVE 4 #define BGP_ROUTE_REFRESH 5 static const struct tok bgp_msg_values[] = { { BGP_OPEN, "Open"}, { BGP_UPDATE, "Update"}, { BGP_NOTIFICATION, "Notification"}, { BGP_KEEPALIVE, "Keepalive"}, { BGP_ROUTE_REFRESH, "Route Refresh"}, { 0, NULL} }; struct bgp_open { uint8_t bgpo_marker[16]; uint16_t bgpo_len; uint8_t bgpo_type; uint8_t bgpo_version; uint16_t bgpo_myas; uint16_t bgpo_holdtime; uint32_t bgpo_id; uint8_t bgpo_optlen; /* options should follow */ }; #define BGP_OPEN_SIZE 29 /* unaligned */ struct bgp_opt { uint8_t bgpopt_type; uint8_t bgpopt_len; /* variable length */ }; #define BGP_OPT_SIZE 2 /* some compilers may pad to 4 bytes */ #define BGP_CAP_HEADER_SIZE 2 /* some compilers may pad to 4 bytes */ struct bgp_notification { uint8_t bgpn_marker[16]; uint16_t bgpn_len; uint8_t bgpn_type; uint8_t bgpn_major; uint8_t bgpn_minor; }; #define BGP_NOTIFICATION_SIZE 21 /* unaligned */ struct bgp_route_refresh { uint8_t bgp_marker[16]; uint16_t len; uint8_t type; uint8_t afi[2]; /* the compiler messes this structure up */ uint8_t res; /* when doing misaligned sequences of int8 and int16 */ uint8_t safi; /* afi should be int16 - so we have to access it using */ }; /* EXTRACT_16BITS(&bgp_route_refresh->afi) (sigh) */ #define BGP_ROUTE_REFRESH_SIZE 23 #define bgp_attr_lenlen(flags, p) \ (((flags) & 0x10) ? 2 : 1) #define bgp_attr_len(flags, p) \ (((flags) & 0x10) ? EXTRACT_16BITS(p) : *(p)) #define BGPTYPE_ORIGIN 1 #define BGPTYPE_AS_PATH 2 #define BGPTYPE_NEXT_HOP 3 #define BGPTYPE_MULTI_EXIT_DISC 4 #define BGPTYPE_LOCAL_PREF 5 #define BGPTYPE_ATOMIC_AGGREGATE 6 #define BGPTYPE_AGGREGATOR 7 #define BGPTYPE_COMMUNITIES 8 /* RFC1997 */ #define BGPTYPE_ORIGINATOR_ID 9 /* RFC4456 */ #define BGPTYPE_CLUSTER_LIST 10 /* RFC4456 */ #define BGPTYPE_DPA 11 /* deprecated, draft-ietf-idr-bgp-dpa */ #define BGPTYPE_ADVERTISERS 12 /* deprecated RFC1863 */ #define BGPTYPE_RCID_PATH 13 /* deprecated RFC1863 */ #define BGPTYPE_MP_REACH_NLRI 14 /* RFC4760 */ #define BGPTYPE_MP_UNREACH_NLRI 15 /* RFC4760 */ #define BGPTYPE_EXTD_COMMUNITIES 16 /* RFC4360 */ #define BGPTYPE_AS4_PATH 17 /* RFC6793 */ #define BGPTYPE_AGGREGATOR4 18 /* RFC6793 */ #define BGPTYPE_PMSI_TUNNEL 22 /* RFC6514 */ #define BGPTYPE_TUNNEL_ENCAP 23 /* RFC5512 */ #define BGPTYPE_TRAFFIC_ENG 24 /* RFC5543 */ #define BGPTYPE_IPV6_EXTD_COMMUNITIES 25 /* RFC5701 */ #define BGPTYPE_AIGP 26 /* RFC7311 */ #define BGPTYPE_PE_DISTINGUISHER_LABEL 27 /* RFC6514 */ #define BGPTYPE_ENTROPY_LABEL 28 /* RFC6790 */ #define BGPTYPE_LARGE_COMMUNITY 32 /* draft-ietf-idr-large-community-05 */ #define BGPTYPE_ATTR_SET 128 /* RFC6368 */ #define BGP_MP_NLRI_MINSIZE 3 /* End of RIB Marker detection */ static const struct tok bgp_attr_values[] = { { BGPTYPE_ORIGIN, "Origin"}, { BGPTYPE_AS_PATH, "AS Path"}, { BGPTYPE_AS4_PATH, "AS4 Path"}, { BGPTYPE_NEXT_HOP, "Next Hop"}, { BGPTYPE_MULTI_EXIT_DISC, "Multi Exit Discriminator"}, { BGPTYPE_LOCAL_PREF, "Local Preference"}, { BGPTYPE_ATOMIC_AGGREGATE, "Atomic Aggregate"}, { BGPTYPE_AGGREGATOR, "Aggregator"}, { BGPTYPE_AGGREGATOR4, "Aggregator4"}, { BGPTYPE_COMMUNITIES, "Community"}, { BGPTYPE_ORIGINATOR_ID, "Originator ID"}, { BGPTYPE_CLUSTER_LIST, "Cluster List"}, { BGPTYPE_DPA, "DPA"}, { BGPTYPE_ADVERTISERS, "Advertisers"}, { BGPTYPE_RCID_PATH, "RCID Path / Cluster ID"}, { BGPTYPE_MP_REACH_NLRI, "Multi-Protocol Reach NLRI"}, { BGPTYPE_MP_UNREACH_NLRI, "Multi-Protocol Unreach NLRI"}, { BGPTYPE_EXTD_COMMUNITIES, "Extended Community"}, { BGPTYPE_PMSI_TUNNEL, "PMSI Tunnel"}, { BGPTYPE_TUNNEL_ENCAP, "Tunnel Encapsulation"}, { BGPTYPE_TRAFFIC_ENG, "Traffic Engineering"}, { BGPTYPE_IPV6_EXTD_COMMUNITIES, "IPv6 Extended Community"}, { BGPTYPE_AIGP, "Accumulated IGP Metric"}, { BGPTYPE_PE_DISTINGUISHER_LABEL, "PE Distinguisher Label"}, { BGPTYPE_ENTROPY_LABEL, "Entropy Label"}, { BGPTYPE_LARGE_COMMUNITY, "Large Community"}, { BGPTYPE_ATTR_SET, "Attribute Set"}, { 255, "Reserved for development"}, { 0, NULL} }; #define BGP_AS_SET 1 #define BGP_AS_SEQUENCE 2 #define BGP_CONFED_AS_SEQUENCE 3 /* draft-ietf-idr-rfc3065bis-01 */ #define BGP_CONFED_AS_SET 4 /* draft-ietf-idr-rfc3065bis-01 */ #define BGP_AS_SEG_TYPE_MIN BGP_AS_SET #define BGP_AS_SEG_TYPE_MAX BGP_CONFED_AS_SET static const struct tok bgp_as_path_segment_open_values[] = { { BGP_AS_SEQUENCE, ""}, { BGP_AS_SET, "{ "}, { BGP_CONFED_AS_SEQUENCE, "( "}, { BGP_CONFED_AS_SET, "({ "}, { 0, NULL} }; static const struct tok bgp_as_path_segment_close_values[] = { { BGP_AS_SEQUENCE, ""}, { BGP_AS_SET, "}"}, { BGP_CONFED_AS_SEQUENCE, ")"}, { BGP_CONFED_AS_SET, "})"}, { 0, NULL} }; #define BGP_OPT_AUTH 1 #define BGP_OPT_CAP 2 static const struct tok bgp_opt_values[] = { { BGP_OPT_AUTH, "Authentication Information"}, { BGP_OPT_CAP, "Capabilities Advertisement"}, { 0, NULL} }; #define BGP_CAPCODE_MP 1 /* RFC2858 */ #define BGP_CAPCODE_RR 2 /* RFC2918 */ #define BGP_CAPCODE_ORF 3 /* RFC5291 */ #define BGP_CAPCODE_MR 4 /* RFC3107 */ #define BGP_CAPCODE_EXT_NH 5 /* RFC5549 */ #define BGP_CAPCODE_RESTART 64 /* RFC4724 */ #define BGP_CAPCODE_AS_NEW 65 /* RFC6793 */ #define BGP_CAPCODE_DYN_CAP 67 /* draft-ietf-idr-dynamic-cap */ #define BGP_CAPCODE_MULTISESS 68 /* draft-ietf-idr-bgp-multisession */ #define BGP_CAPCODE_ADD_PATH 69 /* RFC7911 */ #define BGP_CAPCODE_ENH_RR 70 /* draft-keyur-bgp-enhanced-route-refresh */ #define BGP_CAPCODE_RR_CISCO 128 static const struct tok bgp_capcode_values[] = { { BGP_CAPCODE_MP, "Multiprotocol Extensions"}, { BGP_CAPCODE_RR, "Route Refresh"}, { BGP_CAPCODE_ORF, "Cooperative Route Filtering"}, { BGP_CAPCODE_MR, "Multiple Routes to a Destination"}, { BGP_CAPCODE_EXT_NH, "Extended Next Hop Encoding"}, { BGP_CAPCODE_RESTART, "Graceful Restart"}, { BGP_CAPCODE_AS_NEW, "32-Bit AS Number"}, { BGP_CAPCODE_DYN_CAP, "Dynamic Capability"}, { BGP_CAPCODE_MULTISESS, "Multisession BGP"}, { BGP_CAPCODE_ADD_PATH, "Multiple Paths"}, { BGP_CAPCODE_ENH_RR, "Enhanced Route Refresh"}, { BGP_CAPCODE_RR_CISCO, "Route Refresh (Cisco)"}, { 0, NULL} }; #define BGP_NOTIFY_MAJOR_MSG 1 #define BGP_NOTIFY_MAJOR_OPEN 2 #define BGP_NOTIFY_MAJOR_UPDATE 3 #define BGP_NOTIFY_MAJOR_HOLDTIME 4 #define BGP_NOTIFY_MAJOR_FSM 5 #define BGP_NOTIFY_MAJOR_CEASE 6 #define BGP_NOTIFY_MAJOR_CAP 7 static const struct tok bgp_notify_major_values[] = { { BGP_NOTIFY_MAJOR_MSG, "Message Header Error"}, { BGP_NOTIFY_MAJOR_OPEN, "OPEN Message Error"}, { BGP_NOTIFY_MAJOR_UPDATE, "UPDATE Message Error"}, { BGP_NOTIFY_MAJOR_HOLDTIME,"Hold Timer Expired"}, { BGP_NOTIFY_MAJOR_FSM, "Finite State Machine Error"}, { BGP_NOTIFY_MAJOR_CEASE, "Cease"}, { BGP_NOTIFY_MAJOR_CAP, "Capability Message Error"}, { 0, NULL} }; /* draft-ietf-idr-cease-subcode-02 */ #define BGP_NOTIFY_MINOR_CEASE_MAXPRFX 1 /* draft-ietf-idr-shutdown-07 */ #define BGP_NOTIFY_MINOR_CEASE_SHUT 2 #define BGP_NOTIFY_MINOR_CEASE_RESET 4 #define BGP_NOTIFY_MINOR_CEASE_ADMIN_SHUTDOWN_LEN 128 static const struct tok bgp_notify_minor_cease_values[] = { { BGP_NOTIFY_MINOR_CEASE_MAXPRFX, "Maximum Number of Prefixes Reached"}, { BGP_NOTIFY_MINOR_CEASE_SHUT, "Administrative Shutdown"}, { 3, "Peer Unconfigured"}, { BGP_NOTIFY_MINOR_CEASE_RESET, "Administrative Reset"}, { 5, "Connection Rejected"}, { 6, "Other Configuration Change"}, { 7, "Connection Collision Resolution"}, { 0, NULL} }; static const struct tok bgp_notify_minor_msg_values[] = { { 1, "Connection Not Synchronized"}, { 2, "Bad Message Length"}, { 3, "Bad Message Type"}, { 0, NULL} }; static const struct tok bgp_notify_minor_open_values[] = { { 1, "Unsupported Version Number"}, { 2, "Bad Peer AS"}, { 3, "Bad BGP Identifier"}, { 4, "Unsupported Optional Parameter"}, { 5, "Authentication Failure"}, { 6, "Unacceptable Hold Time"}, { 7, "Capability Message Error"}, { 0, NULL} }; static const struct tok bgp_notify_minor_update_values[] = { { 1, "Malformed Attribute List"}, { 2, "Unrecognized Well-known Attribute"}, { 3, "Missing Well-known Attribute"}, { 4, "Attribute Flags Error"}, { 5, "Attribute Length Error"}, { 6, "Invalid ORIGIN Attribute"}, { 7, "AS Routing Loop"}, { 8, "Invalid NEXT_HOP Attribute"}, { 9, "Optional Attribute Error"}, { 10, "Invalid Network Field"}, { 11, "Malformed AS_PATH"}, { 0, NULL} }; static const struct tok bgp_notify_minor_fsm_values[] = { { 0, "Unspecified Error"}, { 1, "In OpenSent State"}, { 2, "In OpenConfirm State"}, { 3, "In Established State"}, { 0, NULL } }; static const struct tok bgp_notify_minor_cap_values[] = { { 1, "Invalid Action Value" }, { 2, "Invalid Capability Length" }, { 3, "Malformed Capability Value" }, { 4, "Unsupported Capability Code" }, { 0, NULL } }; static const struct tok bgp_origin_values[] = { { 0, "IGP"}, { 1, "EGP"}, { 2, "Incomplete"}, { 0, NULL} }; #define BGP_PMSI_TUNNEL_RSVP_P2MP 1 #define BGP_PMSI_TUNNEL_LDP_P2MP 2 #define BGP_PMSI_TUNNEL_PIM_SSM 3 #define BGP_PMSI_TUNNEL_PIM_SM 4 #define BGP_PMSI_TUNNEL_PIM_BIDIR 5 #define BGP_PMSI_TUNNEL_INGRESS 6 #define BGP_PMSI_TUNNEL_LDP_MP2MP 7 static const struct tok bgp_pmsi_tunnel_values[] = { { BGP_PMSI_TUNNEL_RSVP_P2MP, "RSVP-TE P2MP LSP"}, { BGP_PMSI_TUNNEL_LDP_P2MP, "LDP P2MP LSP"}, { BGP_PMSI_TUNNEL_PIM_SSM, "PIM-SSM Tree"}, { BGP_PMSI_TUNNEL_PIM_SM, "PIM-SM Tree"}, { BGP_PMSI_TUNNEL_PIM_BIDIR, "PIM-Bidir Tree"}, { BGP_PMSI_TUNNEL_INGRESS, "Ingress Replication"}, { BGP_PMSI_TUNNEL_LDP_MP2MP, "LDP MP2MP LSP"}, { 0, NULL} }; static const struct tok bgp_pmsi_flag_values[] = { { 0x01, "Leaf Information required"}, { 0, NULL} }; #define BGP_AIGP_TLV 1 static const struct tok bgp_aigp_values[] = { { BGP_AIGP_TLV, "AIGP"}, { 0, NULL} }; /* Subsequent address family identifier, RFC2283 section 7 */ #define SAFNUM_RES 0 #define SAFNUM_UNICAST 1 #define SAFNUM_MULTICAST 2 #define SAFNUM_UNIMULTICAST 3 /* deprecated now */ /* labeled BGP RFC3107 */ #define SAFNUM_LABUNICAST 4 /* RFC6514 */ #define SAFNUM_MULTICAST_VPN 5 /* draft-nalawade-kapoor-tunnel-safi */ #define SAFNUM_TUNNEL 64 /* RFC4761 */ #define SAFNUM_VPLS 65 /* RFC6037 */ #define SAFNUM_MDT 66 /* RFC4364 */ #define SAFNUM_VPNUNICAST 128 /* RFC6513 */ #define SAFNUM_VPNMULTICAST 129 #define SAFNUM_VPNUNIMULTICAST 130 /* deprecated now */ /* RFC4684 */ #define SAFNUM_RT_ROUTING_INFO 132 #define BGP_VPN_RD_LEN 8 static const struct tok bgp_safi_values[] = { { SAFNUM_RES, "Reserved"}, { SAFNUM_UNICAST, "Unicast"}, { SAFNUM_MULTICAST, "Multicast"}, { SAFNUM_UNIMULTICAST, "Unicast+Multicast"}, { SAFNUM_LABUNICAST, "labeled Unicast"}, { SAFNUM_TUNNEL, "Tunnel"}, { SAFNUM_VPLS, "VPLS"}, { SAFNUM_MDT, "MDT"}, { SAFNUM_VPNUNICAST, "labeled VPN Unicast"}, { SAFNUM_VPNMULTICAST, "labeled VPN Multicast"}, { SAFNUM_VPNUNIMULTICAST, "labeled VPN Unicast+Multicast"}, { SAFNUM_RT_ROUTING_INFO, "Route Target Routing Information"}, { SAFNUM_MULTICAST_VPN, "Multicast VPN"}, { 0, NULL } }; /* well-known community */ #define BGP_COMMUNITY_NO_EXPORT 0xffffff01 #define BGP_COMMUNITY_NO_ADVERT 0xffffff02 #define BGP_COMMUNITY_NO_EXPORT_SUBCONFED 0xffffff03 /* Extended community type - draft-ietf-idr-bgp-ext-communities-05 */ #define BGP_EXT_COM_RT_0 0x0002 /* Route Target,Format AS(2bytes):AN(4bytes) */ #define BGP_EXT_COM_RT_1 0x0102 /* Route Target,Format IP address:AN(2bytes) */ #define BGP_EXT_COM_RT_2 0x0202 /* Route Target,Format AN(4bytes):local(2bytes) */ #define BGP_EXT_COM_RO_0 0x0003 /* Route Origin,Format AS(2bytes):AN(4bytes) */ #define BGP_EXT_COM_RO_1 0x0103 /* Route Origin,Format IP address:AN(2bytes) */ #define BGP_EXT_COM_RO_2 0x0203 /* Route Origin,Format AN(4bytes):local(2bytes) */ #define BGP_EXT_COM_LINKBAND 0x4004 /* Link Bandwidth,Format AS(2B):Bandwidth(4B) */ /* rfc2547 bgp-mpls-vpns */ #define BGP_EXT_COM_VPN_ORIGIN 0x0005 /* OSPF Domain ID / VPN of Origin - draft-rosen-vpns-ospf-bgp-mpls */ #define BGP_EXT_COM_VPN_ORIGIN2 0x0105 /* duplicate - keep for backwards compatability */ #define BGP_EXT_COM_VPN_ORIGIN3 0x0205 /* duplicate - keep for backwards compatability */ #define BGP_EXT_COM_VPN_ORIGIN4 0x8005 /* duplicate - keep for backwards compatability */ #define BGP_EXT_COM_OSPF_RTYPE 0x0306 /* OSPF Route Type,Format Area(4B):RouteType(1B):Options(1B) */ #define BGP_EXT_COM_OSPF_RTYPE2 0x8000 /* duplicate - keep for backwards compatability */ #define BGP_EXT_COM_OSPF_RID 0x0107 /* OSPF Router ID,Format RouterID(4B):Unused(2B) */ #define BGP_EXT_COM_OSPF_RID2 0x8001 /* duplicate - keep for backwards compatability */ #define BGP_EXT_COM_L2INFO 0x800a /* draft-kompella-ppvpn-l2vpn */ #define BGP_EXT_COM_SOURCE_AS 0x0009 /* RFC-ietf-l3vpn-2547bis-mcast-bgp-08.txt */ #define BGP_EXT_COM_VRF_RT_IMP 0x010b /* RFC-ietf-l3vpn-2547bis-mcast-bgp-08.txt */ #define BGP_EXT_COM_L2VPN_RT_0 0x000a /* L2VPN Identifier,Format AS(2bytes):AN(4bytes) */ #define BGP_EXT_COM_L2VPN_RT_1 0xF10a /* L2VPN Identifier,Format IP address:AN(2bytes) */ /* http://www.cisco.com/en/US/tech/tk436/tk428/technologies_tech_note09186a00801eb09a.shtml */ #define BGP_EXT_COM_EIGRP_GEN 0x8800 #define BGP_EXT_COM_EIGRP_METRIC_AS_DELAY 0x8801 #define BGP_EXT_COM_EIGRP_METRIC_REL_NH_BW 0x8802 #define BGP_EXT_COM_EIGRP_METRIC_LOAD_MTU 0x8803 #define BGP_EXT_COM_EIGRP_EXT_REMAS_REMID 0x8804 #define BGP_EXT_COM_EIGRP_EXT_REMPROTO_REMMETRIC 0x8805 static const struct tok bgp_extd_comm_flag_values[] = { { 0x8000, "vendor-specific"}, { 0x4000, "non-transitive"}, { 0, NULL}, }; static const struct tok bgp_extd_comm_subtype_values[] = { { BGP_EXT_COM_RT_0, "target"}, { BGP_EXT_COM_RT_1, "target"}, { BGP_EXT_COM_RT_2, "target"}, { BGP_EXT_COM_RO_0, "origin"}, { BGP_EXT_COM_RO_1, "origin"}, { BGP_EXT_COM_RO_2, "origin"}, { BGP_EXT_COM_LINKBAND, "link-BW"}, { BGP_EXT_COM_VPN_ORIGIN, "ospf-domain"}, { BGP_EXT_COM_VPN_ORIGIN2, "ospf-domain"}, { BGP_EXT_COM_VPN_ORIGIN3, "ospf-domain"}, { BGP_EXT_COM_VPN_ORIGIN4, "ospf-domain"}, { BGP_EXT_COM_OSPF_RTYPE, "ospf-route-type"}, { BGP_EXT_COM_OSPF_RTYPE2, "ospf-route-type"}, { BGP_EXT_COM_OSPF_RID, "ospf-router-id"}, { BGP_EXT_COM_OSPF_RID2, "ospf-router-id"}, { BGP_EXT_COM_L2INFO, "layer2-info"}, { BGP_EXT_COM_EIGRP_GEN , "eigrp-general-route (flag, tag)" }, { BGP_EXT_COM_EIGRP_METRIC_AS_DELAY , "eigrp-route-metric (AS, delay)" }, { BGP_EXT_COM_EIGRP_METRIC_REL_NH_BW , "eigrp-route-metric (reliability, nexthop, bandwidth)" }, { BGP_EXT_COM_EIGRP_METRIC_LOAD_MTU , "eigrp-route-metric (load, MTU)" }, { BGP_EXT_COM_EIGRP_EXT_REMAS_REMID , "eigrp-external-route (remote-AS, remote-ID)" }, { BGP_EXT_COM_EIGRP_EXT_REMPROTO_REMMETRIC , "eigrp-external-route (remote-proto, remote-metric)" }, { BGP_EXT_COM_SOURCE_AS, "source-AS" }, { BGP_EXT_COM_VRF_RT_IMP, "vrf-route-import"}, { BGP_EXT_COM_L2VPN_RT_0, "l2vpn-id"}, { BGP_EXT_COM_L2VPN_RT_1, "l2vpn-id"}, { 0, NULL}, }; /* OSPF codes for BGP_EXT_COM_OSPF_RTYPE draft-rosen-vpns-ospf-bgp-mpls */ #define BGP_OSPF_RTYPE_RTR 1 /* OSPF Router LSA */ #define BGP_OSPF_RTYPE_NET 2 /* OSPF Network LSA */ #define BGP_OSPF_RTYPE_SUM 3 /* OSPF Summary LSA */ #define BGP_OSPF_RTYPE_EXT 5 /* OSPF External LSA, note that ASBR doesn't apply to MPLS-VPN */ #define BGP_OSPF_RTYPE_NSSA 7 /* OSPF NSSA External*/ #define BGP_OSPF_RTYPE_SHAM 129 /* OSPF-MPLS-VPN Sham link */ #define BGP_OSPF_RTYPE_METRIC_TYPE 0x1 /* LSB of RTYPE Options Field */ static const struct tok bgp_extd_comm_ospf_rtype_values[] = { { BGP_OSPF_RTYPE_RTR, "Router" }, { BGP_OSPF_RTYPE_NET, "Network" }, { BGP_OSPF_RTYPE_SUM, "Summary" }, { BGP_OSPF_RTYPE_EXT, "External" }, { BGP_OSPF_RTYPE_NSSA,"NSSA External" }, { BGP_OSPF_RTYPE_SHAM,"MPLS-VPN Sham" }, { 0, NULL }, }; /* ADD-PATH Send/Receive field values */ static const struct tok bgp_add_path_recvsend[] = { { 1, "Receive" }, { 2, "Send" }, { 3, "Both" }, { 0, NULL }, }; static char astostr[20]; /* * as_printf * * Convert an AS number into a string and return string pointer. * * Depending on bflag is set or not, AS number is converted into ASDOT notation * or plain number notation. * */ static char * as_printf(netdissect_options *ndo, char *str, int size, u_int asnum) { if (!ndo->ndo_bflag || asnum <= 0xFFFF) { snprintf(str, size, "%u", asnum); } else { snprintf(str, size, "%u.%u", asnum >> 16, asnum & 0xFFFF); } return str; } #define ITEMCHECK(minlen) if (itemlen < minlen) goto badtlv; int decode_prefix4(netdissect_options *ndo, const u_char *pptr, u_int itemlen, char *buf, u_int buflen) { struct in_addr addr; u_int plen, plenbytes; ND_TCHECK(pptr[0]); ITEMCHECK(1); plen = pptr[0]; if (32 < plen) return -1; itemlen -= 1; memset(&addr, 0, sizeof(addr)); plenbytes = (plen + 7) / 8; ND_TCHECK2(pptr[1], plenbytes); ITEMCHECK(plenbytes); memcpy(&addr, &pptr[1], plenbytes); if (plen % 8) { ((u_char *)&addr)[plenbytes - 1] &= ((0xff00 >> (plen % 8)) & 0xff); } snprintf(buf, buflen, "%s/%d", ipaddr_string(ndo, &addr), plen); return 1 + plenbytes; trunc: return -2; badtlv: return -3; } static int decode_labeled_prefix4(netdissect_options *ndo, const u_char *pptr, u_int itemlen, char *buf, u_int buflen) { struct in_addr addr; u_int plen, plenbytes; /* prefix length and label = 4 bytes */ ND_TCHECK2(pptr[0], 4); ITEMCHECK(4); plen = pptr[0]; /* get prefix length */ /* this is one of the weirdnesses of rfc3107 the label length (actually the label + COS bits) is added to the prefix length; we also do only read out just one label - there is no real application for advertisement of stacked labels in a single BGP message */ if (24 > plen) return -1; plen-=24; /* adjust prefixlen - labellength */ if (32 < plen) return -1; itemlen -= 4; memset(&addr, 0, sizeof(addr)); plenbytes = (plen + 7) / 8; ND_TCHECK2(pptr[4], plenbytes); ITEMCHECK(plenbytes); memcpy(&addr, &pptr[4], plenbytes); if (plen % 8) { ((u_char *)&addr)[plenbytes - 1] &= ((0xff00 >> (plen % 8)) & 0xff); } /* the label may get offsetted by 4 bits so lets shift it right */ snprintf(buf, buflen, "%s/%d, label:%u %s", ipaddr_string(ndo, &addr), plen, EXTRACT_24BITS(pptr+1)>>4, ((pptr[3]&1)==0) ? "(BOGUS: Bottom of Stack NOT set!)" : "(bottom)" ); return 4 + plenbytes; trunc: return -2; badtlv: return -3; } /* * bgp_vpn_ip_print * * print an ipv4 or ipv6 address into a buffer dependend on address length. */ static char * bgp_vpn_ip_print(netdissect_options *ndo, const u_char *pptr, u_int addr_length) { /* worst case string is s fully formatted v6 address */ static char addr[sizeof("1234:5678:89ab:cdef:1234:5678:89ab:cdef")]; char *pos = addr; switch(addr_length) { case (sizeof(struct in_addr) << 3): /* 32 */ ND_TCHECK2(pptr[0], sizeof(struct in_addr)); snprintf(pos, sizeof(addr), "%s", ipaddr_string(ndo, pptr)); break; case (sizeof(struct in6_addr) << 3): /* 128 */ ND_TCHECK2(pptr[0], sizeof(struct in6_addr)); snprintf(pos, sizeof(addr), "%s", ip6addr_string(ndo, pptr)); break; default: snprintf(pos, sizeof(addr), "bogus address length %u", addr_length); break; } pos += strlen(pos); trunc: *(pos) = '\0'; return (addr); } /* * bgp_vpn_sg_print * * print an multicast s,g entry into a buffer. * the s,g entry is encoded like this. * * +-----------------------------------+ * | Multicast Source Length (1 octet) | * +-----------------------------------+ * | Multicast Source (Variable) | * +-----------------------------------+ * | Multicast Group Length (1 octet) | * +-----------------------------------+ * | Multicast Group (Variable) | * +-----------------------------------+ * * return the number of bytes read from the wire. */ static int bgp_vpn_sg_print(netdissect_options *ndo, const u_char *pptr, char *buf, u_int buflen) { uint8_t addr_length; u_int total_length, offset; total_length = 0; /* Source address length, encoded in bits */ ND_TCHECK2(pptr[0], 1); addr_length = *pptr++; /* Source address */ ND_TCHECK2(pptr[0], (addr_length >> 3)); total_length += (addr_length >> 3) + 1; offset = strlen(buf); if (addr_length) { snprintf(buf + offset, buflen - offset, ", Source %s", bgp_vpn_ip_print(ndo, pptr, addr_length)); pptr += (addr_length >> 3); } /* Group address length, encoded in bits */ ND_TCHECK2(pptr[0], 1); addr_length = *pptr++; /* Group address */ ND_TCHECK2(pptr[0], (addr_length >> 3)); total_length += (addr_length >> 3) + 1; offset = strlen(buf); if (addr_length) { snprintf(buf + offset, buflen - offset, ", Group %s", bgp_vpn_ip_print(ndo, pptr, addr_length)); pptr += (addr_length >> 3); } trunc: return (total_length); } /* RDs and RTs share the same semantics * we use bgp_vpn_rd_print for * printing route targets inside a NLRI */ char * bgp_vpn_rd_print(netdissect_options *ndo, const u_char *pptr) { /* allocate space for the largest possible string */ static char rd[sizeof("xxxxxxxxxx:xxxxx (xxx.xxx.xxx.xxx:xxxxx)")]; char *pos = rd; /* ok lets load the RD format */ switch (EXTRACT_16BITS(pptr)) { /* 2-byte-AS:number fmt*/ case 0: snprintf(pos, sizeof(rd) - (pos - rd), "%u:%u (= %u.%u.%u.%u)", EXTRACT_16BITS(pptr+2), EXTRACT_32BITS(pptr+4), *(pptr+4), *(pptr+5), *(pptr+6), *(pptr+7)); break; /* IP-address:AS fmt*/ case 1: snprintf(pos, sizeof(rd) - (pos - rd), "%u.%u.%u.%u:%u", *(pptr+2), *(pptr+3), *(pptr+4), *(pptr+5), EXTRACT_16BITS(pptr+6)); break; /* 4-byte-AS:number fmt*/ case 2: snprintf(pos, sizeof(rd) - (pos - rd), "%s:%u (%u.%u.%u.%u:%u)", as_printf(ndo, astostr, sizeof(astostr), EXTRACT_32BITS(pptr+2)), EXTRACT_16BITS(pptr+6), *(pptr+2), *(pptr+3), *(pptr+4), *(pptr+5), EXTRACT_16BITS(pptr+6)); break; default: snprintf(pos, sizeof(rd) - (pos - rd), "unknown RD format"); break; } pos += strlen(pos); *(pos) = '\0'; return (rd); } static int decode_rt_routing_info(netdissect_options *ndo, const u_char *pptr, char *buf, u_int buflen) { uint8_t route_target[8]; u_int plen; ND_TCHECK(pptr[0]); plen = pptr[0]; /* get prefix length */ if (0 == plen) { snprintf(buf, buflen, "default route target"); return 1; } if (32 > plen) return -1; plen-=32; /* adjust prefix length */ if (64 < plen) return -1; memset(&route_target, 0, sizeof(route_target)); ND_TCHECK2(pptr[1], (plen + 7) / 8); memcpy(&route_target, &pptr[1], (plen + 7) / 8); if (plen % 8) { ((u_char *)&route_target)[(plen + 7) / 8 - 1] &= ((0xff00 >> (plen % 8)) & 0xff); } snprintf(buf, buflen, "origin AS: %s, route target %s", as_printf(ndo, astostr, sizeof(astostr), EXTRACT_32BITS(pptr+1)), bgp_vpn_rd_print(ndo, (u_char *)&route_target)); return 5 + (plen + 7) / 8; trunc: return -2; } static int decode_labeled_vpn_prefix4(netdissect_options *ndo, const u_char *pptr, char *buf, u_int buflen) { struct in_addr addr; u_int plen; ND_TCHECK(pptr[0]); plen = pptr[0]; /* get prefix length */ if ((24+64) > plen) return -1; plen-=(24+64); /* adjust prefixlen - labellength - RD len*/ if (32 < plen) return -1; memset(&addr, 0, sizeof(addr)); ND_TCHECK2(pptr[12], (plen + 7) / 8); memcpy(&addr, &pptr[12], (plen + 7) / 8); if (plen % 8) { ((u_char *)&addr)[(plen + 7) / 8 - 1] &= ((0xff00 >> (plen % 8)) & 0xff); } /* the label may get offsetted by 4 bits so lets shift it right */ snprintf(buf, buflen, "RD: %s, %s/%d, label:%u %s", bgp_vpn_rd_print(ndo, pptr+4), ipaddr_string(ndo, &addr), plen, EXTRACT_24BITS(pptr+1)>>4, ((pptr[3]&1)==0) ? "(BOGUS: Bottom of Stack NOT set!)" : "(bottom)" ); return 12 + (plen + 7) / 8; trunc: return -2; } /* * +-------------------------------+ * | | * | RD:IPv4-address (12 octets) | * | | * +-------------------------------+ * | MDT Group-address (4 octets) | * +-------------------------------+ */ #define MDT_VPN_NLRI_LEN 16 static int decode_mdt_vpn_nlri(netdissect_options *ndo, const u_char *pptr, char *buf, u_int buflen) { const u_char *rd; const u_char *vpn_ip; ND_TCHECK(pptr[0]); /* if the NLRI is not predefined length, quit.*/ if (*pptr != MDT_VPN_NLRI_LEN * 8) return -1; pptr++; /* RD */ ND_TCHECK2(pptr[0], 8); rd = pptr; pptr+=8; /* IPv4 address */ ND_TCHECK2(pptr[0], sizeof(struct in_addr)); vpn_ip = pptr; pptr+=sizeof(struct in_addr); /* MDT Group Address */ ND_TCHECK2(pptr[0], sizeof(struct in_addr)); snprintf(buf, buflen, "RD: %s, VPN IP Address: %s, MC Group Address: %s", bgp_vpn_rd_print(ndo, rd), ipaddr_string(ndo, vpn_ip), ipaddr_string(ndo, pptr)); return MDT_VPN_NLRI_LEN + 1; trunc: return -2; } #define BGP_MULTICAST_VPN_ROUTE_TYPE_INTRA_AS_I_PMSI 1 #define BGP_MULTICAST_VPN_ROUTE_TYPE_INTER_AS_I_PMSI 2 #define BGP_MULTICAST_VPN_ROUTE_TYPE_S_PMSI 3 #define BGP_MULTICAST_VPN_ROUTE_TYPE_INTRA_AS_SEG_LEAF 4 #define BGP_MULTICAST_VPN_ROUTE_TYPE_SOURCE_ACTIVE 5 #define BGP_MULTICAST_VPN_ROUTE_TYPE_SHARED_TREE_JOIN 6 #define BGP_MULTICAST_VPN_ROUTE_TYPE_SOURCE_TREE_JOIN 7 static const struct tok bgp_multicast_vpn_route_type_values[] = { { BGP_MULTICAST_VPN_ROUTE_TYPE_INTRA_AS_I_PMSI, "Intra-AS I-PMSI"}, { BGP_MULTICAST_VPN_ROUTE_TYPE_INTER_AS_I_PMSI, "Inter-AS I-PMSI"}, { BGP_MULTICAST_VPN_ROUTE_TYPE_S_PMSI, "S-PMSI"}, { BGP_MULTICAST_VPN_ROUTE_TYPE_INTRA_AS_SEG_LEAF, "Intra-AS Segment-Leaf"}, { BGP_MULTICAST_VPN_ROUTE_TYPE_SOURCE_ACTIVE, "Source-Active"}, { BGP_MULTICAST_VPN_ROUTE_TYPE_SHARED_TREE_JOIN, "Shared Tree Join"}, { BGP_MULTICAST_VPN_ROUTE_TYPE_SOURCE_TREE_JOIN, "Source Tree Join"}, { 0, NULL} }; static int decode_multicast_vpn(netdissect_options *ndo, const u_char *pptr, char *buf, u_int buflen) { uint8_t route_type, route_length, addr_length, sg_length; u_int offset; ND_TCHECK2(pptr[0], 2); route_type = *pptr++; route_length = *pptr++; snprintf(buf, buflen, "Route-Type: %s (%u), length: %u", tok2str(bgp_multicast_vpn_route_type_values, "Unknown", route_type), route_type, route_length); switch(route_type) { case BGP_MULTICAST_VPN_ROUTE_TYPE_INTRA_AS_I_PMSI: ND_TCHECK2(pptr[0], BGP_VPN_RD_LEN); offset = strlen(buf); snprintf(buf + offset, buflen - offset, ", RD: %s, Originator %s", bgp_vpn_rd_print(ndo, pptr), bgp_vpn_ip_print(ndo, pptr + BGP_VPN_RD_LEN, (route_length - BGP_VPN_RD_LEN) << 3)); break; case BGP_MULTICAST_VPN_ROUTE_TYPE_INTER_AS_I_PMSI: ND_TCHECK2(pptr[0], BGP_VPN_RD_LEN + 4); offset = strlen(buf); snprintf(buf + offset, buflen - offset, ", RD: %s, Source-AS %s", bgp_vpn_rd_print(ndo, pptr), as_printf(ndo, astostr, sizeof(astostr), EXTRACT_32BITS(pptr + BGP_VPN_RD_LEN))); break; case BGP_MULTICAST_VPN_ROUTE_TYPE_S_PMSI: ND_TCHECK2(pptr[0], BGP_VPN_RD_LEN); offset = strlen(buf); snprintf(buf + offset, buflen - offset, ", RD: %s", bgp_vpn_rd_print(ndo, pptr)); pptr += BGP_VPN_RD_LEN; sg_length = bgp_vpn_sg_print(ndo, pptr, buf, buflen); addr_length = route_length - sg_length; ND_TCHECK2(pptr[0], addr_length); offset = strlen(buf); snprintf(buf + offset, buflen - offset, ", Originator %s", bgp_vpn_ip_print(ndo, pptr, addr_length << 3)); break; case BGP_MULTICAST_VPN_ROUTE_TYPE_SOURCE_ACTIVE: ND_TCHECK2(pptr[0], BGP_VPN_RD_LEN); offset = strlen(buf); snprintf(buf + offset, buflen - offset, ", RD: %s", bgp_vpn_rd_print(ndo, pptr)); pptr += BGP_VPN_RD_LEN; bgp_vpn_sg_print(ndo, pptr, buf, buflen); break; case BGP_MULTICAST_VPN_ROUTE_TYPE_SHARED_TREE_JOIN: /* fall through */ case BGP_MULTICAST_VPN_ROUTE_TYPE_SOURCE_TREE_JOIN: ND_TCHECK2(pptr[0], BGP_VPN_RD_LEN + 4); offset = strlen(buf); snprintf(buf + offset, buflen - offset, ", RD: %s, Source-AS %s", bgp_vpn_rd_print(ndo, pptr), as_printf(ndo, astostr, sizeof(astostr), EXTRACT_32BITS(pptr + BGP_VPN_RD_LEN))); pptr += BGP_VPN_RD_LEN + 4; bgp_vpn_sg_print(ndo, pptr, buf, buflen); break; /* * no per route-type printing yet. */ case BGP_MULTICAST_VPN_ROUTE_TYPE_INTRA_AS_SEG_LEAF: default: break; } return route_length + 2; trunc: return -2; } /* * As I remember, some versions of systems have an snprintf() that * returns -1 if the buffer would have overflowed. If the return * value is negative, set buflen to 0, to indicate that we've filled * the buffer up. * * If the return value is greater than buflen, that means that * the buffer would have overflowed; again, set buflen to 0 in * that case. */ #define UPDATE_BUF_BUFLEN(buf, buflen, stringlen) \ if (stringlen<0) \ buflen=0; \ else if ((u_int)stringlen>buflen) \ buflen=0; \ else { \ buflen-=stringlen; \ buf+=stringlen; \ } static int decode_labeled_vpn_l2(netdissect_options *ndo, const u_char *pptr, char *buf, u_int buflen) { int plen,tlen,stringlen,tlv_type,tlv_len,ttlv_len; ND_TCHECK2(pptr[0], 2); plen=EXTRACT_16BITS(pptr); tlen=plen; pptr+=2; /* Old and new L2VPN NLRI share AFI/SAFI * -> Assume a 12 Byte-length NLRI is auto-discovery-only * and > 17 as old format. Complain for the middle case */ if (plen==12) { /* assume AD-only with RD, BGPNH */ ND_TCHECK2(pptr[0],12); buf[0]='\0'; stringlen=snprintf(buf, buflen, "RD: %s, BGPNH: %s", bgp_vpn_rd_print(ndo, pptr), ipaddr_string(ndo, pptr+8) ); UPDATE_BUF_BUFLEN(buf, buflen, stringlen); pptr+=12; tlen-=12; return plen; } else if (plen>17) { /* assume old format */ /* RD, ID, LBLKOFF, LBLBASE */ ND_TCHECK2(pptr[0],15); buf[0]='\0'; stringlen=snprintf(buf, buflen, "RD: %s, CE-ID: %u, Label-Block Offset: %u, Label Base %u", bgp_vpn_rd_print(ndo, pptr), EXTRACT_16BITS(pptr+8), EXTRACT_16BITS(pptr+10), EXTRACT_24BITS(pptr+12)>>4); /* the label is offsetted by 4 bits so lets shift it right */ UPDATE_BUF_BUFLEN(buf, buflen, stringlen); pptr+=15; tlen-=15; /* ok now the variable part - lets read out TLVs*/ while (tlen>0) { if (tlen < 3) return -1; ND_TCHECK2(pptr[0], 3); tlv_type=*pptr++; tlv_len=EXTRACT_16BITS(pptr); ttlv_len=tlv_len; pptr+=2; switch(tlv_type) { case 1: if (buflen!=0) { stringlen=snprintf(buf,buflen, "\n\t\tcircuit status vector (%u) length: %u: 0x", tlv_type, tlv_len); UPDATE_BUF_BUFLEN(buf, buflen, stringlen); } ttlv_len=ttlv_len/8+1; /* how many bytes do we need to read ? */ while (ttlv_len>0) { ND_TCHECK(pptr[0]); if (buflen!=0) { stringlen=snprintf(buf,buflen, "%02x",*pptr++); UPDATE_BUF_BUFLEN(buf, buflen, stringlen); } ttlv_len--; } break; default: if (buflen!=0) { stringlen=snprintf(buf,buflen, "\n\t\tunknown TLV #%u, length: %u", tlv_type, tlv_len); UPDATE_BUF_BUFLEN(buf, buflen, stringlen); } break; } tlen-=(tlv_len<<3); /* the tlv-length is expressed in bits so lets shift it right */ } return plen+2; } else { /* complain bitterly ? */ /* fall through */ goto trunc; } trunc: return -2; } int decode_prefix6(netdissect_options *ndo, const u_char *pd, u_int itemlen, char *buf, u_int buflen) { struct in6_addr addr; u_int plen, plenbytes; ND_TCHECK(pd[0]); ITEMCHECK(1); plen = pd[0]; if (128 < plen) return -1; itemlen -= 1; memset(&addr, 0, sizeof(addr)); plenbytes = (plen + 7) / 8; ND_TCHECK2(pd[1], plenbytes); ITEMCHECK(plenbytes); memcpy(&addr, &pd[1], plenbytes); if (plen % 8) { addr.s6_addr[plenbytes - 1] &= ((0xff00 >> (plen % 8)) & 0xff); } snprintf(buf, buflen, "%s/%d", ip6addr_string(ndo, &addr), plen); return 1 + plenbytes; trunc: return -2; badtlv: return -3; } static int decode_labeled_prefix6(netdissect_options *ndo, const u_char *pptr, u_int itemlen, char *buf, u_int buflen) { struct in6_addr addr; u_int plen, plenbytes; /* prefix length and label = 4 bytes */ ND_TCHECK2(pptr[0], 4); ITEMCHECK(4); plen = pptr[0]; /* get prefix length */ if (24 > plen) return -1; plen-=24; /* adjust prefixlen - labellength */ if (128 < plen) return -1; itemlen -= 4; memset(&addr, 0, sizeof(addr)); plenbytes = (plen + 7) / 8; ND_TCHECK2(pptr[4], plenbytes); memcpy(&addr, &pptr[4], plenbytes); if (plen % 8) { addr.s6_addr[plenbytes - 1] &= ((0xff00 >> (plen % 8)) & 0xff); } /* the label may get offsetted by 4 bits so lets shift it right */ snprintf(buf, buflen, "%s/%d, label:%u %s", ip6addr_string(ndo, &addr), plen, EXTRACT_24BITS(pptr+1)>>4, ((pptr[3]&1)==0) ? "(BOGUS: Bottom of Stack NOT set!)" : "(bottom)" ); return 4 + plenbytes; trunc: return -2; badtlv: return -3; } static int decode_labeled_vpn_prefix6(netdissect_options *ndo, const u_char *pptr, char *buf, u_int buflen) { struct in6_addr addr; u_int plen; ND_TCHECK(pptr[0]); plen = pptr[0]; /* get prefix length */ if ((24+64) > plen) return -1; plen-=(24+64); /* adjust prefixlen - labellength - RD len*/ if (128 < plen) return -1; memset(&addr, 0, sizeof(addr)); ND_TCHECK2(pptr[12], (plen + 7) / 8); memcpy(&addr, &pptr[12], (plen + 7) / 8); if (plen % 8) { addr.s6_addr[(plen + 7) / 8 - 1] &= ((0xff00 >> (plen % 8)) & 0xff); } /* the label may get offsetted by 4 bits so lets shift it right */ snprintf(buf, buflen, "RD: %s, %s/%d, label:%u %s", bgp_vpn_rd_print(ndo, pptr+4), ip6addr_string(ndo, &addr), plen, EXTRACT_24BITS(pptr+1)>>4, ((pptr[3]&1)==0) ? "(BOGUS: Bottom of Stack NOT set!)" : "(bottom)" ); return 12 + (plen + 7) / 8; trunc: return -2; } static int decode_clnp_prefix(netdissect_options *ndo, const u_char *pptr, char *buf, u_int buflen) { uint8_t addr[19]; u_int plen; ND_TCHECK(pptr[0]); plen = pptr[0]; /* get prefix length */ if (152 < plen) return -1; memset(&addr, 0, sizeof(addr)); ND_TCHECK2(pptr[4], (plen + 7) / 8); memcpy(&addr, &pptr[4], (plen + 7) / 8); if (plen % 8) { addr[(plen + 7) / 8 - 1] &= ((0xff00 >> (plen % 8)) & 0xff); } snprintf(buf, buflen, "%s/%d", isonsap_string(ndo, addr,(plen + 7) / 8), plen); return 1 + (plen + 7) / 8; trunc: return -2; } static int decode_labeled_vpn_clnp_prefix(netdissect_options *ndo, const u_char *pptr, char *buf, u_int buflen) { uint8_t addr[19]; u_int plen; ND_TCHECK(pptr[0]); plen = pptr[0]; /* get prefix length */ if ((24+64) > plen) return -1; plen-=(24+64); /* adjust prefixlen - labellength - RD len*/ if (152 < plen) return -1; memset(&addr, 0, sizeof(addr)); ND_TCHECK2(pptr[12], (plen + 7) / 8); memcpy(&addr, &pptr[12], (plen + 7) / 8); if (plen % 8) { addr[(plen + 7) / 8 - 1] &= ((0xff00 >> (plen % 8)) & 0xff); } /* the label may get offsetted by 4 bits so lets shift it right */ snprintf(buf, buflen, "RD: %s, %s/%d, label:%u %s", bgp_vpn_rd_print(ndo, pptr+4), isonsap_string(ndo, addr,(plen + 7) / 8), plen, EXTRACT_24BITS(pptr+1)>>4, ((pptr[3]&1)==0) ? "(BOGUS: Bottom of Stack NOT set!)" : "(bottom)" ); return 12 + (plen + 7) / 8; trunc: return -2; } /* * bgp_attr_get_as_size * * Try to find the size of the ASs encoded in an as-path. It is not obvious, as * both Old speakers that do not support 4 byte AS, and the new speakers that do * support, exchange AS-Path with the same path-attribute type value 0x02. */ static int bgp_attr_get_as_size(netdissect_options *ndo, uint8_t bgpa_type, const u_char *pptr, int len) { const u_char *tptr = pptr; /* * If the path attribute is the optional AS4 path type, then we already * know, that ASs must be encoded in 4 byte format. */ if (bgpa_type == BGPTYPE_AS4_PATH) { return 4; } /* * Let us assume that ASs are of 2 bytes in size, and check if the AS-Path * TLV is good. If not, ask the caller to try with AS encoded as 4 bytes * each. */ while (tptr < pptr + len) { ND_TCHECK(tptr[0]); /* * If we do not find a valid segment type, our guess might be wrong. */ if (tptr[0] < BGP_AS_SEG_TYPE_MIN || tptr[0] > BGP_AS_SEG_TYPE_MAX) { goto trunc; } ND_TCHECK(tptr[1]); tptr += 2 + tptr[1] * 2; } /* * If we correctly reached end of the AS path attribute data content, * then most likely ASs were indeed encoded as 2 bytes. */ if (tptr == pptr + len) { return 2; } trunc: /* * We can come here, either we did not have enough data, or if we * try to decode 4 byte ASs in 2 byte format. Either way, return 4, * so that calller can try to decode each AS as of 4 bytes. If indeed * there was not enough data, it will crib and end the parse anyways. */ return 4; } static int bgp_attr_print(netdissect_options *ndo, u_int atype, const u_char *pptr, u_int len) { int i; uint16_t af; uint8_t safi, snpa, nhlen; union { /* copy buffer for bandwidth values */ float f; uint32_t i; } bw; int advance; u_int tlen; const u_char *tptr; char buf[MAXHOSTNAMELEN + 100]; int as_size; tptr = pptr; tlen=len; switch (atype) { case BGPTYPE_ORIGIN: if (len != 1) ND_PRINT((ndo, "invalid len")); else { ND_TCHECK(*tptr); ND_PRINT((ndo, "%s", tok2str(bgp_origin_values, "Unknown Origin Typecode", tptr[0]))); } break; /* * Process AS4 byte path and AS2 byte path attributes here. */ case BGPTYPE_AS4_PATH: case BGPTYPE_AS_PATH: if (len % 2) { ND_PRINT((ndo, "invalid len")); break; } if (!len) { ND_PRINT((ndo, "empty")); break; } /* * BGP updates exchanged between New speakers that support 4 * byte AS, ASs are always encoded in 4 bytes. There is no * definitive way to find this, just by the packet's * contents. So, check for packet's TLV's sanity assuming * 2 bytes first, and it does not pass, assume that ASs are * encoded in 4 bytes format and move on. */ as_size = bgp_attr_get_as_size(ndo, atype, pptr, len); while (tptr < pptr + len) { ND_TCHECK(tptr[0]); ND_PRINT((ndo, "%s", tok2str(bgp_as_path_segment_open_values, "?", tptr[0]))); ND_TCHECK(tptr[1]); for (i = 0; i < tptr[1] * as_size; i += as_size) { ND_TCHECK2(tptr[2 + i], as_size); ND_PRINT((ndo, "%s ", as_printf(ndo, astostr, sizeof(astostr), as_size == 2 ? EXTRACT_16BITS(&tptr[2 + i]) : EXTRACT_32BITS(&tptr[2 + i])))); } ND_TCHECK(tptr[0]); ND_PRINT((ndo, "%s", tok2str(bgp_as_path_segment_close_values, "?", tptr[0]))); ND_TCHECK(tptr[1]); tptr += 2 + tptr[1] * as_size; } break; case BGPTYPE_NEXT_HOP: if (len != 4) ND_PRINT((ndo, "invalid len")); else { ND_TCHECK2(tptr[0], 4); ND_PRINT((ndo, "%s", ipaddr_string(ndo, tptr))); } break; case BGPTYPE_MULTI_EXIT_DISC: case BGPTYPE_LOCAL_PREF: if (len != 4) ND_PRINT((ndo, "invalid len")); else { ND_TCHECK2(tptr[0], 4); ND_PRINT((ndo, "%u", EXTRACT_32BITS(tptr))); } break; case BGPTYPE_ATOMIC_AGGREGATE: if (len != 0) ND_PRINT((ndo, "invalid len")); break; case BGPTYPE_AGGREGATOR: /* * Depending on the AS encoded is of 2 bytes or of 4 bytes, * the length of this PA can be either 6 bytes or 8 bytes. */ if (len != 6 && len != 8) { ND_PRINT((ndo, "invalid len")); break; } ND_TCHECK2(tptr[0], len); if (len == 6) { ND_PRINT((ndo, " AS #%s, origin %s", as_printf(ndo, astostr, sizeof(astostr), EXTRACT_16BITS(tptr)), ipaddr_string(ndo, tptr + 2))); } else { ND_PRINT((ndo, " AS #%s, origin %s", as_printf(ndo, astostr, sizeof(astostr), EXTRACT_32BITS(tptr)), ipaddr_string(ndo, tptr + 4))); } break; case BGPTYPE_AGGREGATOR4: if (len != 8) { ND_PRINT((ndo, "invalid len")); break; } ND_TCHECK2(tptr[0], 8); ND_PRINT((ndo, " AS #%s, origin %s", as_printf(ndo, astostr, sizeof(astostr), EXTRACT_32BITS(tptr)), ipaddr_string(ndo, tptr + 4))); break; case BGPTYPE_COMMUNITIES: if (len % 4) { ND_PRINT((ndo, "invalid len")); break; } while (tlen>0) { uint32_t comm; ND_TCHECK2(tptr[0], 4); comm = EXTRACT_32BITS(tptr); switch (comm) { case BGP_COMMUNITY_NO_EXPORT: ND_PRINT((ndo, " NO_EXPORT")); break; case BGP_COMMUNITY_NO_ADVERT: ND_PRINT((ndo, " NO_ADVERTISE")); break; case BGP_COMMUNITY_NO_EXPORT_SUBCONFED: ND_PRINT((ndo, " NO_EXPORT_SUBCONFED")); break; default: ND_PRINT((ndo, "%u:%u%s", (comm >> 16) & 0xffff, comm & 0xffff, (tlen>4) ? ", " : "")); break; } tlen -=4; tptr +=4; } break; case BGPTYPE_ORIGINATOR_ID: if (len != 4) { ND_PRINT((ndo, "invalid len")); break; } ND_TCHECK2(tptr[0], 4); ND_PRINT((ndo, "%s",ipaddr_string(ndo, tptr))); break; case BGPTYPE_CLUSTER_LIST: if (len % 4) { ND_PRINT((ndo, "invalid len")); break; } while (tlen>0) { ND_TCHECK2(tptr[0], 4); ND_PRINT((ndo, "%s%s", ipaddr_string(ndo, tptr), (tlen>4) ? ", " : "")); tlen -=4; tptr +=4; } break; case BGPTYPE_MP_REACH_NLRI: ND_TCHECK2(tptr[0], 3); af = EXTRACT_16BITS(tptr); safi = tptr[2]; ND_PRINT((ndo, "\n\t AFI: %s (%u), %sSAFI: %s (%u)", tok2str(af_values, "Unknown AFI", af), af, (safi>128) ? "vendor specific " : "", /* 128 is meanwhile wellknown */ tok2str(bgp_safi_values, "Unknown SAFI", safi), safi)); switch(af<<8 | safi) { case (AFNUM_INET<<8 | SAFNUM_UNICAST): case (AFNUM_INET<<8 | SAFNUM_MULTICAST): case (AFNUM_INET<<8 | SAFNUM_UNIMULTICAST): case (AFNUM_INET<<8 | SAFNUM_LABUNICAST): case (AFNUM_INET<<8 | SAFNUM_RT_ROUTING_INFO): case (AFNUM_INET<<8 | SAFNUM_VPNUNICAST): case (AFNUM_INET<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_INET<<8 | SAFNUM_VPNUNIMULTICAST): case (AFNUM_INET<<8 | SAFNUM_MULTICAST_VPN): case (AFNUM_INET<<8 | SAFNUM_MDT): case (AFNUM_INET6<<8 | SAFNUM_UNICAST): case (AFNUM_INET6<<8 | SAFNUM_MULTICAST): case (AFNUM_INET6<<8 | SAFNUM_UNIMULTICAST): case (AFNUM_INET6<<8 | SAFNUM_LABUNICAST): case (AFNUM_INET6<<8 | SAFNUM_VPNUNICAST): case (AFNUM_INET6<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_INET6<<8 | SAFNUM_VPNUNIMULTICAST): case (AFNUM_NSAP<<8 | SAFNUM_UNICAST): case (AFNUM_NSAP<<8 | SAFNUM_MULTICAST): case (AFNUM_NSAP<<8 | SAFNUM_UNIMULTICAST): case (AFNUM_NSAP<<8 | SAFNUM_VPNUNICAST): case (AFNUM_NSAP<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_NSAP<<8 | SAFNUM_VPNUNIMULTICAST): case (AFNUM_L2VPN<<8 | SAFNUM_VPNUNICAST): case (AFNUM_L2VPN<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_L2VPN<<8 | SAFNUM_VPNUNIMULTICAST): case (AFNUM_VPLS<<8 | SAFNUM_VPLS): break; default: ND_TCHECK2(tptr[0], tlen); ND_PRINT((ndo, "\n\t no AFI %u / SAFI %u decoder", af, safi)); if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, tptr, "\n\t ", tlen); goto done; break; } tptr +=3; ND_TCHECK(tptr[0]); nhlen = tptr[0]; tlen = nhlen; tptr++; if (tlen) { int nnh = 0; ND_PRINT((ndo, "\n\t nexthop: ")); while (tlen > 0) { if ( nnh++ > 0 ) { ND_PRINT((ndo, ", " )); } switch(af<<8 | safi) { case (AFNUM_INET<<8 | SAFNUM_UNICAST): case (AFNUM_INET<<8 | SAFNUM_MULTICAST): case (AFNUM_INET<<8 | SAFNUM_UNIMULTICAST): case (AFNUM_INET<<8 | SAFNUM_LABUNICAST): case (AFNUM_INET<<8 | SAFNUM_RT_ROUTING_INFO): case (AFNUM_INET<<8 | SAFNUM_MULTICAST_VPN): case (AFNUM_INET<<8 | SAFNUM_MDT): if (tlen < (int)sizeof(struct in_addr)) { ND_PRINT((ndo, "invalid len")); tlen = 0; } else { ND_TCHECK2(tptr[0], sizeof(struct in_addr)); ND_PRINT((ndo, "%s",ipaddr_string(ndo, tptr))); tlen -= sizeof(struct in_addr); tptr += sizeof(struct in_addr); } break; case (AFNUM_INET<<8 | SAFNUM_VPNUNICAST): case (AFNUM_INET<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_INET<<8 | SAFNUM_VPNUNIMULTICAST): if (tlen < (int)(sizeof(struct in_addr)+BGP_VPN_RD_LEN)) { ND_PRINT((ndo, "invalid len")); tlen = 0; } else { ND_TCHECK2(tptr[0], sizeof(struct in_addr)+BGP_VPN_RD_LEN); ND_PRINT((ndo, "RD: %s, %s", bgp_vpn_rd_print(ndo, tptr), ipaddr_string(ndo, tptr+BGP_VPN_RD_LEN))); tlen -= (sizeof(struct in_addr)+BGP_VPN_RD_LEN); tptr += (sizeof(struct in_addr)+BGP_VPN_RD_LEN); } break; case (AFNUM_INET6<<8 | SAFNUM_UNICAST): case (AFNUM_INET6<<8 | SAFNUM_MULTICAST): case (AFNUM_INET6<<8 | SAFNUM_UNIMULTICAST): case (AFNUM_INET6<<8 | SAFNUM_LABUNICAST): if (tlen < (int)sizeof(struct in6_addr)) { ND_PRINT((ndo, "invalid len")); tlen = 0; } else { ND_TCHECK2(tptr[0], sizeof(struct in6_addr)); ND_PRINT((ndo, "%s", ip6addr_string(ndo, tptr))); tlen -= sizeof(struct in6_addr); tptr += sizeof(struct in6_addr); } break; case (AFNUM_INET6<<8 | SAFNUM_VPNUNICAST): case (AFNUM_INET6<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_INET6<<8 | SAFNUM_VPNUNIMULTICAST): if (tlen < (int)(sizeof(struct in6_addr)+BGP_VPN_RD_LEN)) { ND_PRINT((ndo, "invalid len")); tlen = 0; } else { ND_TCHECK2(tptr[0], sizeof(struct in6_addr)+BGP_VPN_RD_LEN); ND_PRINT((ndo, "RD: %s, %s", bgp_vpn_rd_print(ndo, tptr), ip6addr_string(ndo, tptr+BGP_VPN_RD_LEN))); tlen -= (sizeof(struct in6_addr)+BGP_VPN_RD_LEN); tptr += (sizeof(struct in6_addr)+BGP_VPN_RD_LEN); } break; case (AFNUM_VPLS<<8 | SAFNUM_VPLS): case (AFNUM_L2VPN<<8 | SAFNUM_VPNUNICAST): case (AFNUM_L2VPN<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_L2VPN<<8 | SAFNUM_VPNUNIMULTICAST): if (tlen < (int)sizeof(struct in_addr)) { ND_PRINT((ndo, "invalid len")); tlen = 0; } else { ND_TCHECK2(tptr[0], sizeof(struct in_addr)); ND_PRINT((ndo, "%s", ipaddr_string(ndo, tptr))); tlen -= (sizeof(struct in_addr)); tptr += (sizeof(struct in_addr)); } break; case (AFNUM_NSAP<<8 | SAFNUM_UNICAST): case (AFNUM_NSAP<<8 | SAFNUM_MULTICAST): case (AFNUM_NSAP<<8 | SAFNUM_UNIMULTICAST): ND_TCHECK2(tptr[0], tlen); ND_PRINT((ndo, "%s", isonsap_string(ndo, tptr, tlen))); tptr += tlen; tlen = 0; break; case (AFNUM_NSAP<<8 | SAFNUM_VPNUNICAST): case (AFNUM_NSAP<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_NSAP<<8 | SAFNUM_VPNUNIMULTICAST): if (tlen < BGP_VPN_RD_LEN+1) { ND_PRINT((ndo, "invalid len")); tlen = 0; } else { ND_TCHECK2(tptr[0], tlen); ND_PRINT((ndo, "RD: %s, %s", bgp_vpn_rd_print(ndo, tptr), isonsap_string(ndo, tptr+BGP_VPN_RD_LEN,tlen-BGP_VPN_RD_LEN))); /* rfc986 mapped IPv4 address ? */ if (EXTRACT_32BITS(tptr+BGP_VPN_RD_LEN) == 0x47000601) ND_PRINT((ndo, " = %s", ipaddr_string(ndo, tptr+BGP_VPN_RD_LEN+4))); /* rfc1888 mapped IPv6 address ? */ else if (EXTRACT_24BITS(tptr+BGP_VPN_RD_LEN) == 0x350000) ND_PRINT((ndo, " = %s", ip6addr_string(ndo, tptr+BGP_VPN_RD_LEN+3))); tptr += tlen; tlen = 0; } break; default: ND_TCHECK2(tptr[0], tlen); ND_PRINT((ndo, "no AFI %u/SAFI %u decoder", af, safi)); if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, tptr, "\n\t ", tlen); tptr += tlen; tlen = 0; goto done; break; } } } ND_PRINT((ndo, ", nh-length: %u", nhlen)); tptr += tlen; ND_TCHECK(tptr[0]); snpa = tptr[0]; tptr++; if (snpa) { ND_PRINT((ndo, "\n\t %u SNPA", snpa)); for (/*nothing*/; snpa > 0; snpa--) { ND_TCHECK(tptr[0]); ND_PRINT((ndo, "\n\t %d bytes", tptr[0])); tptr += tptr[0] + 1; } } else { ND_PRINT((ndo, ", no SNPA")); } while (tptr < pptr + len) { switch (af<<8 | safi) { case (AFNUM_INET<<8 | SAFNUM_UNICAST): case (AFNUM_INET<<8 | SAFNUM_MULTICAST): case (AFNUM_INET<<8 | SAFNUM_UNIMULTICAST): advance = decode_prefix4(ndo, tptr, len, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else if (advance == -3) break; /* bytes left, but not enough */ else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_INET<<8 | SAFNUM_LABUNICAST): advance = decode_labeled_prefix4(ndo, tptr, len, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else if (advance == -3) break; /* bytes left, but not enough */ else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_INET<<8 | SAFNUM_VPNUNICAST): case (AFNUM_INET<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_INET<<8 | SAFNUM_VPNUNIMULTICAST): advance = decode_labeled_vpn_prefix4(ndo, tptr, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_INET<<8 | SAFNUM_RT_ROUTING_INFO): advance = decode_rt_routing_info(ndo, tptr, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_INET<<8 | SAFNUM_MULTICAST_VPN): /* fall through */ case (AFNUM_INET6<<8 | SAFNUM_MULTICAST_VPN): advance = decode_multicast_vpn(ndo, tptr, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_INET<<8 | SAFNUM_MDT): advance = decode_mdt_vpn_nlri(ndo, tptr, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_INET6<<8 | SAFNUM_UNICAST): case (AFNUM_INET6<<8 | SAFNUM_MULTICAST): case (AFNUM_INET6<<8 | SAFNUM_UNIMULTICAST): advance = decode_prefix6(ndo, tptr, len, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else if (advance == -3) break; /* bytes left, but not enough */ else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_INET6<<8 | SAFNUM_LABUNICAST): advance = decode_labeled_prefix6(ndo, tptr, len, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else if (advance == -3) break; /* bytes left, but not enough */ else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_INET6<<8 | SAFNUM_VPNUNICAST): case (AFNUM_INET6<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_INET6<<8 | SAFNUM_VPNUNIMULTICAST): advance = decode_labeled_vpn_prefix6(ndo, tptr, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_VPLS<<8 | SAFNUM_VPLS): case (AFNUM_L2VPN<<8 | SAFNUM_VPNUNICAST): case (AFNUM_L2VPN<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_L2VPN<<8 | SAFNUM_VPNUNIMULTICAST): advance = decode_labeled_vpn_l2(ndo, tptr, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal length)")); else if (advance == -2) goto trunc; else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_NSAP<<8 | SAFNUM_UNICAST): case (AFNUM_NSAP<<8 | SAFNUM_MULTICAST): case (AFNUM_NSAP<<8 | SAFNUM_UNIMULTICAST): advance = decode_clnp_prefix(ndo, tptr, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_NSAP<<8 | SAFNUM_VPNUNICAST): case (AFNUM_NSAP<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_NSAP<<8 | SAFNUM_VPNUNIMULTICAST): advance = decode_labeled_vpn_clnp_prefix(ndo, tptr, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else ND_PRINT((ndo, "\n\t %s", buf)); break; default: ND_TCHECK2(*tptr,tlen); ND_PRINT((ndo, "\n\t no AFI %u / SAFI %u decoder", af, safi)); if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, tptr, "\n\t ", tlen); advance = 0; tptr = pptr + len; break; } if (advance < 0) break; tptr += advance; } done: break; case BGPTYPE_MP_UNREACH_NLRI: ND_TCHECK2(tptr[0], BGP_MP_NLRI_MINSIZE); af = EXTRACT_16BITS(tptr); safi = tptr[2]; ND_PRINT((ndo, "\n\t AFI: %s (%u), %sSAFI: %s (%u)", tok2str(af_values, "Unknown AFI", af), af, (safi>128) ? "vendor specific " : "", /* 128 is meanwhile wellknown */ tok2str(bgp_safi_values, "Unknown SAFI", safi), safi)); if (len == BGP_MP_NLRI_MINSIZE) ND_PRINT((ndo, "\n\t End-of-Rib Marker (empty NLRI)")); tptr += 3; while (tptr < pptr + len) { switch (af<<8 | safi) { case (AFNUM_INET<<8 | SAFNUM_UNICAST): case (AFNUM_INET<<8 | SAFNUM_MULTICAST): case (AFNUM_INET<<8 | SAFNUM_UNIMULTICAST): advance = decode_prefix4(ndo, tptr, len, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else if (advance == -3) break; /* bytes left, but not enough */ else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_INET<<8 | SAFNUM_LABUNICAST): advance = decode_labeled_prefix4(ndo, tptr, len, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else if (advance == -3) break; /* bytes left, but not enough */ else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_INET<<8 | SAFNUM_VPNUNICAST): case (AFNUM_INET<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_INET<<8 | SAFNUM_VPNUNIMULTICAST): advance = decode_labeled_vpn_prefix4(ndo, tptr, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_INET6<<8 | SAFNUM_UNICAST): case (AFNUM_INET6<<8 | SAFNUM_MULTICAST): case (AFNUM_INET6<<8 | SAFNUM_UNIMULTICAST): advance = decode_prefix6(ndo, tptr, len, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else if (advance == -3) break; /* bytes left, but not enough */ else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_INET6<<8 | SAFNUM_LABUNICAST): advance = decode_labeled_prefix6(ndo, tptr, len, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else if (advance == -3) break; /* bytes left, but not enough */ else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_INET6<<8 | SAFNUM_VPNUNICAST): case (AFNUM_INET6<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_INET6<<8 | SAFNUM_VPNUNIMULTICAST): advance = decode_labeled_vpn_prefix6(ndo, tptr, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_VPLS<<8 | SAFNUM_VPLS): case (AFNUM_L2VPN<<8 | SAFNUM_VPNUNICAST): case (AFNUM_L2VPN<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_L2VPN<<8 | SAFNUM_VPNUNIMULTICAST): advance = decode_labeled_vpn_l2(ndo, tptr, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal length)")); else if (advance == -2) goto trunc; else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_NSAP<<8 | SAFNUM_UNICAST): case (AFNUM_NSAP<<8 | SAFNUM_MULTICAST): case (AFNUM_NSAP<<8 | SAFNUM_UNIMULTICAST): advance = decode_clnp_prefix(ndo, tptr, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_NSAP<<8 | SAFNUM_VPNUNICAST): case (AFNUM_NSAP<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_NSAP<<8 | SAFNUM_VPNUNIMULTICAST): advance = decode_labeled_vpn_clnp_prefix(ndo, tptr, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_INET<<8 | SAFNUM_MDT): advance = decode_mdt_vpn_nlri(ndo, tptr, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_INET<<8 | SAFNUM_MULTICAST_VPN): /* fall through */ case (AFNUM_INET6<<8 | SAFNUM_MULTICAST_VPN): advance = decode_multicast_vpn(ndo, tptr, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else ND_PRINT((ndo, "\n\t %s", buf)); break; default: ND_TCHECK2(*(tptr-3),tlen); ND_PRINT((ndo, "no AFI %u / SAFI %u decoder", af, safi)); if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, tptr-3, "\n\t ", tlen); advance = 0; tptr = pptr + len; break; } if (advance < 0) break; tptr += advance; } break; case BGPTYPE_EXTD_COMMUNITIES: if (len % 8) { ND_PRINT((ndo, "invalid len")); break; } while (tlen>0) { uint16_t extd_comm; ND_TCHECK2(tptr[0], 2); extd_comm=EXTRACT_16BITS(tptr); ND_PRINT((ndo, "\n\t %s (0x%04x), Flags [%s]", tok2str(bgp_extd_comm_subtype_values, "unknown extd community typecode", extd_comm), extd_comm, bittok2str(bgp_extd_comm_flag_values, "none", extd_comm))); ND_TCHECK2(*(tptr+2), 6); switch(extd_comm) { case BGP_EXT_COM_RT_0: case BGP_EXT_COM_RO_0: case BGP_EXT_COM_L2VPN_RT_0: ND_PRINT((ndo, ": %u:%u (= %s)", EXTRACT_16BITS(tptr+2), EXTRACT_32BITS(tptr+4), ipaddr_string(ndo, tptr+4))); break; case BGP_EXT_COM_RT_1: case BGP_EXT_COM_RO_1: case BGP_EXT_COM_L2VPN_RT_1: case BGP_EXT_COM_VRF_RT_IMP: ND_PRINT((ndo, ": %s:%u", ipaddr_string(ndo, tptr+2), EXTRACT_16BITS(tptr+6))); break; case BGP_EXT_COM_RT_2: case BGP_EXT_COM_RO_2: ND_PRINT((ndo, ": %s:%u", as_printf(ndo, astostr, sizeof(astostr), EXTRACT_32BITS(tptr+2)), EXTRACT_16BITS(tptr+6))); break; case BGP_EXT_COM_LINKBAND: bw.i = EXTRACT_32BITS(tptr+2); ND_PRINT((ndo, ": bandwidth: %.3f Mbps", bw.f*8/1000000)); break; case BGP_EXT_COM_VPN_ORIGIN: case BGP_EXT_COM_VPN_ORIGIN2: case BGP_EXT_COM_VPN_ORIGIN3: case BGP_EXT_COM_VPN_ORIGIN4: case BGP_EXT_COM_OSPF_RID: case BGP_EXT_COM_OSPF_RID2: ND_PRINT((ndo, "%s", ipaddr_string(ndo, tptr+2))); break; case BGP_EXT_COM_OSPF_RTYPE: case BGP_EXT_COM_OSPF_RTYPE2: ND_PRINT((ndo, ": area:%s, router-type:%s, metric-type:%s%s", ipaddr_string(ndo, tptr+2), tok2str(bgp_extd_comm_ospf_rtype_values, "unknown (0x%02x)", *(tptr+6)), (*(tptr+7) & BGP_OSPF_RTYPE_METRIC_TYPE) ? "E2" : "", ((*(tptr+6) == BGP_OSPF_RTYPE_EXT) || (*(tptr+6) == BGP_OSPF_RTYPE_NSSA)) ? "E1" : "")); break; case BGP_EXT_COM_L2INFO: ND_PRINT((ndo, ": %s Control Flags [0x%02x]:MTU %u", tok2str(l2vpn_encaps_values, "unknown encaps", *(tptr+2)), *(tptr+3), EXTRACT_16BITS(tptr+4))); break; case BGP_EXT_COM_SOURCE_AS: ND_PRINT((ndo, ": AS %u", EXTRACT_16BITS(tptr+2))); break; default: ND_TCHECK2(*tptr,8); print_unknown_data(ndo, tptr, "\n\t ", 8); break; } tlen -=8; tptr +=8; } break; case BGPTYPE_PMSI_TUNNEL: { uint8_t tunnel_type, flags; ND_TCHECK2(tptr[0], 5); tunnel_type = *(tptr+1); flags = *tptr; tlen = len; ND_PRINT((ndo, "\n\t Tunnel-type %s (%u), Flags [%s], MPLS Label %u", tok2str(bgp_pmsi_tunnel_values, "Unknown", tunnel_type), tunnel_type, bittok2str(bgp_pmsi_flag_values, "none", flags), EXTRACT_24BITS(tptr+2)>>4)); tptr +=5; tlen -= 5; switch (tunnel_type) { case BGP_PMSI_TUNNEL_PIM_SM: /* fall through */ case BGP_PMSI_TUNNEL_PIM_BIDIR: ND_TCHECK2(tptr[0], 8); ND_PRINT((ndo, "\n\t Sender %s, P-Group %s", ipaddr_string(ndo, tptr), ipaddr_string(ndo, tptr+4))); break; case BGP_PMSI_TUNNEL_PIM_SSM: ND_TCHECK2(tptr[0], 8); ND_PRINT((ndo, "\n\t Root-Node %s, P-Group %s", ipaddr_string(ndo, tptr), ipaddr_string(ndo, tptr+4))); break; case BGP_PMSI_TUNNEL_INGRESS: ND_TCHECK2(tptr[0], 4); ND_PRINT((ndo, "\n\t Tunnel-Endpoint %s", ipaddr_string(ndo, tptr))); break; case BGP_PMSI_TUNNEL_LDP_P2MP: /* fall through */ case BGP_PMSI_TUNNEL_LDP_MP2MP: ND_TCHECK2(tptr[0], 8); ND_PRINT((ndo, "\n\t Root-Node %s, LSP-ID 0x%08x", ipaddr_string(ndo, tptr), EXTRACT_32BITS(tptr+4))); break; case BGP_PMSI_TUNNEL_RSVP_P2MP: ND_TCHECK2(tptr[0], 8); ND_PRINT((ndo, "\n\t Extended-Tunnel-ID %s, P2MP-ID 0x%08x", ipaddr_string(ndo, tptr), EXTRACT_32BITS(tptr+4))); break; default: if (ndo->ndo_vflag <= 1) { print_unknown_data(ndo, tptr, "\n\t ", tlen); } } break; } case BGPTYPE_AIGP: { uint8_t type; uint16_t length; tlen = len; while (tlen >= 3) { ND_TCHECK2(tptr[0], 3); type = *tptr; length = EXTRACT_16BITS(tptr+1); tptr += 3; tlen -= 3; ND_PRINT((ndo, "\n\t %s TLV (%u), length %u", tok2str(bgp_aigp_values, "Unknown", type), type, length)); if (length < 3) goto trunc; length -= 3; /* * Check if we can read the TLV data. */ ND_TCHECK2(tptr[3], length); switch (type) { case BGP_AIGP_TLV: if (length < 8) goto trunc; ND_PRINT((ndo, ", metric %" PRIu64, EXTRACT_64BITS(tptr))); break; default: if (ndo->ndo_vflag <= 1) { print_unknown_data(ndo, tptr,"\n\t ", length); } } tptr += length; tlen -= length; } break; } case BGPTYPE_ATTR_SET: ND_TCHECK2(tptr[0], 4); if (len < 4) goto trunc; ND_PRINT((ndo, "\n\t Origin AS: %s", as_printf(ndo, astostr, sizeof(astostr), EXTRACT_32BITS(tptr)))); tptr+=4; len -=4; while (len) { u_int aflags, alenlen, alen; ND_TCHECK2(tptr[0], 2); if (len < 2) goto trunc; aflags = *tptr; atype = *(tptr + 1); tptr += 2; len -= 2; alenlen = bgp_attr_lenlen(aflags, tptr); ND_TCHECK2(tptr[0], alenlen); if (len < alenlen) goto trunc; alen = bgp_attr_len(aflags, tptr); tptr += alenlen; len -= alenlen; ND_PRINT((ndo, "\n\t %s (%u), length: %u", tok2str(bgp_attr_values, "Unknown Attribute", atype), atype, alen)); if (aflags) { ND_PRINT((ndo, ", Flags [%s%s%s%s", aflags & 0x80 ? "O" : "", aflags & 0x40 ? "T" : "", aflags & 0x20 ? "P" : "", aflags & 0x10 ? "E" : "")); if (aflags & 0xf) ND_PRINT((ndo, "+%x", aflags & 0xf)); ND_PRINT((ndo, "]: ")); } /* FIXME check for recursion */ if (!bgp_attr_print(ndo, atype, tptr, alen)) return 0; tptr += alen; len -= alen; } break; case BGPTYPE_LARGE_COMMUNITY: if (len == 0 || len % 12) { ND_PRINT((ndo, "invalid len")); break; } ND_PRINT((ndo, "\n\t ")); while (len > 0) { ND_TCHECK2(*tptr, 12); ND_PRINT((ndo, "%u:%u:%u%s", EXTRACT_32BITS(tptr), EXTRACT_32BITS(tptr + 4), EXTRACT_32BITS(tptr + 8), (len > 12) ? ", " : "")); tptr += 12; len -= 12; } break; default: ND_TCHECK2(*pptr,len); ND_PRINT((ndo, "\n\t no Attribute %u decoder", atype)); /* we have no decoder for the attribute */ if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, pptr, "\n\t ", len); break; } if (ndo->ndo_vflag > 1 && len) { /* omit zero length attributes*/ ND_TCHECK2(*pptr,len); print_unknown_data(ndo, pptr, "\n\t ", len); } return 1; trunc: return 0; } static void bgp_capabilities_print(netdissect_options *ndo, const u_char *opt, int caps_len) { int cap_type, cap_len, tcap_len, cap_offset; int i = 0; while (i < caps_len) { ND_TCHECK2(opt[i], BGP_CAP_HEADER_SIZE); cap_type=opt[i]; cap_len=opt[i+1]; tcap_len=cap_len; ND_PRINT((ndo, "\n\t %s (%u), length: %u", tok2str(bgp_capcode_values, "Unknown", cap_type), cap_type, cap_len)); ND_TCHECK2(opt[i+2], cap_len); switch (cap_type) { case BGP_CAPCODE_MP: ND_PRINT((ndo, "\n\t\tAFI %s (%u), SAFI %s (%u)", tok2str(af_values, "Unknown", EXTRACT_16BITS(opt+i+2)), EXTRACT_16BITS(opt+i+2), tok2str(bgp_safi_values, "Unknown", opt[i+5]), opt[i+5])); break; case BGP_CAPCODE_RESTART: ND_PRINT((ndo, "\n\t\tRestart Flags: [%s], Restart Time %us", ((opt[i+2])&0x80) ? "R" : "none", EXTRACT_16BITS(opt+i+2)&0xfff)); tcap_len-=2; cap_offset=4; while(tcap_len>=4) { ND_PRINT((ndo, "\n\t\t AFI %s (%u), SAFI %s (%u), Forwarding state preserved: %s", tok2str(af_values,"Unknown", EXTRACT_16BITS(opt+i+cap_offset)), EXTRACT_16BITS(opt+i+cap_offset), tok2str(bgp_safi_values,"Unknown", opt[i+cap_offset+2]), opt[i+cap_offset+2], ((opt[i+cap_offset+3])&0x80) ? "yes" : "no" )); tcap_len-=4; cap_offset+=4; } break; case BGP_CAPCODE_RR: case BGP_CAPCODE_RR_CISCO: break; case BGP_CAPCODE_AS_NEW: /* * Extract the 4 byte AS number encoded. */ if (cap_len == 4) { ND_PRINT((ndo, "\n\t\t 4 Byte AS %s", as_printf(ndo, astostr, sizeof(astostr), EXTRACT_32BITS(opt + i + 2)))); } break; case BGP_CAPCODE_ADD_PATH: cap_offset=2; if (tcap_len == 0) { ND_PRINT((ndo, " (bogus)")); /* length */ break; } while (tcap_len > 0) { if (tcap_len < 4) { ND_PRINT((ndo, "\n\t\t(invalid)")); break; } ND_PRINT((ndo, "\n\t\tAFI %s (%u), SAFI %s (%u), Send/Receive: %s", tok2str(af_values,"Unknown",EXTRACT_16BITS(opt+i+cap_offset)), EXTRACT_16BITS(opt+i+cap_offset), tok2str(bgp_safi_values,"Unknown",opt[i+cap_offset+2]), opt[i+cap_offset+2], tok2str(bgp_add_path_recvsend,"Bogus (0x%02x)",opt[i+cap_offset+3]) )); tcap_len-=4; cap_offset+=4; } break; default: ND_PRINT((ndo, "\n\t\tno decoder for Capability %u", cap_type)); if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, &opt[i+2], "\n\t\t", cap_len); break; } if (ndo->ndo_vflag > 1 && cap_len > 0) { print_unknown_data(ndo, &opt[i+2], "\n\t\t", cap_len); } i += BGP_CAP_HEADER_SIZE + cap_len; } return; trunc: ND_PRINT((ndo, "[|BGP]")); } static void bgp_open_print(netdissect_options *ndo, const u_char *dat, int length) { struct bgp_open bgpo; struct bgp_opt bgpopt; const u_char *opt; int i; ND_TCHECK2(dat[0], BGP_OPEN_SIZE); memcpy(&bgpo, dat, BGP_OPEN_SIZE); ND_PRINT((ndo, "\n\t Version %d, ", bgpo.bgpo_version)); ND_PRINT((ndo, "my AS %s, ", as_printf(ndo, astostr, sizeof(astostr), ntohs(bgpo.bgpo_myas)))); ND_PRINT((ndo, "Holdtime %us, ", ntohs(bgpo.bgpo_holdtime))); ND_PRINT((ndo, "ID %s", ipaddr_string(ndo, &bgpo.bgpo_id))); ND_PRINT((ndo, "\n\t Optional parameters, length: %u", bgpo.bgpo_optlen)); /* some little sanity checking */ if (length < bgpo.bgpo_optlen+BGP_OPEN_SIZE) return; /* ugly! */ opt = &((const struct bgp_open *)dat)->bgpo_optlen; opt++; i = 0; while (i < bgpo.bgpo_optlen) { ND_TCHECK2(opt[i], BGP_OPT_SIZE); memcpy(&bgpopt, &opt[i], BGP_OPT_SIZE); if (i + 2 + bgpopt.bgpopt_len > bgpo.bgpo_optlen) { ND_PRINT((ndo, "\n\t Option %d, length: %u", bgpopt.bgpopt_type, bgpopt.bgpopt_len)); break; } ND_PRINT((ndo, "\n\t Option %s (%u), length: %u", tok2str(bgp_opt_values,"Unknown", bgpopt.bgpopt_type), bgpopt.bgpopt_type, bgpopt.bgpopt_len)); /* now let's decode the options we know*/ switch(bgpopt.bgpopt_type) { case BGP_OPT_CAP: bgp_capabilities_print(ndo, &opt[i+BGP_OPT_SIZE], bgpopt.bgpopt_len); break; case BGP_OPT_AUTH: default: ND_PRINT((ndo, "\n\t no decoder for option %u", bgpopt.bgpopt_type)); break; } i += BGP_OPT_SIZE + bgpopt.bgpopt_len; } return; trunc: ND_PRINT((ndo, "[|BGP]")); } static void bgp_update_print(netdissect_options *ndo, const u_char *dat, int length) { struct bgp bgp; const u_char *p; int withdrawn_routes_len; int len; int i; ND_TCHECK2(dat[0], BGP_SIZE); if (length < BGP_SIZE) goto trunc; memcpy(&bgp, dat, BGP_SIZE); p = dat + BGP_SIZE; /*XXX*/ length -= BGP_SIZE; /* Unfeasible routes */ ND_TCHECK2(p[0], 2); if (length < 2) goto trunc; withdrawn_routes_len = EXTRACT_16BITS(p); p += 2; length -= 2; if (withdrawn_routes_len) { /* * Without keeping state from the original NLRI message, * it's not possible to tell if this a v4 or v6 route, * so only try to decode it if we're not v6 enabled. */ ND_TCHECK2(p[0], withdrawn_routes_len); if (length < withdrawn_routes_len) goto trunc; ND_PRINT((ndo, "\n\t Withdrawn routes: %d bytes", withdrawn_routes_len)); p += withdrawn_routes_len; length -= withdrawn_routes_len; } ND_TCHECK2(p[0], 2); if (length < 2) goto trunc; len = EXTRACT_16BITS(p); p += 2; length -= 2; if (withdrawn_routes_len == 0 && len == 0 && length == 0) { /* No withdrawn routes, no path attributes, no NLRI */ ND_PRINT((ndo, "\n\t End-of-Rib Marker (empty NLRI)")); return; } if (len) { /* do something more useful!*/ while (len) { int aflags, atype, alenlen, alen; ND_TCHECK2(p[0], 2); if (len < 2) goto trunc; if (length < 2) goto trunc; aflags = *p; atype = *(p + 1); p += 2; len -= 2; length -= 2; alenlen = bgp_attr_lenlen(aflags, p); ND_TCHECK2(p[0], alenlen); if (len < alenlen) goto trunc; if (length < alenlen) goto trunc; alen = bgp_attr_len(aflags, p); p += alenlen; len -= alenlen; length -= alenlen; ND_PRINT((ndo, "\n\t %s (%u), length: %u", tok2str(bgp_attr_values, "Unknown Attribute", atype), atype, alen)); if (aflags) { ND_PRINT((ndo, ", Flags [%s%s%s%s", aflags & 0x80 ? "O" : "", aflags & 0x40 ? "T" : "", aflags & 0x20 ? "P" : "", aflags & 0x10 ? "E" : "")); if (aflags & 0xf) ND_PRINT((ndo, "+%x", aflags & 0xf)); ND_PRINT((ndo, "]: ")); } if (len < alen) goto trunc; if (length < alen) goto trunc; if (!bgp_attr_print(ndo, atype, p, alen)) goto trunc; p += alen; len -= alen; length -= alen; } } if (length) { /* * XXX - what if they're using the "Advertisement of * Multiple Paths in BGP" feature: * * https://datatracker.ietf.org/doc/draft-ietf-idr-add-paths/ * * http://tools.ietf.org/html/draft-ietf-idr-add-paths-06 */ ND_PRINT((ndo, "\n\t Updated routes:")); while (length) { char buf[MAXHOSTNAMELEN + 100]; i = decode_prefix4(ndo, p, length, buf, sizeof(buf)); if (i == -1) { ND_PRINT((ndo, "\n\t (illegal prefix length)")); break; } else if (i == -2) goto trunc; else if (i == -3) goto trunc; /* bytes left, but not enough */ else { ND_PRINT((ndo, "\n\t %s", buf)); p += i; length -= i; } } } return; trunc: ND_PRINT((ndo, "[|BGP]")); } static void bgp_notification_print(netdissect_options *ndo, const u_char *dat, int length) { struct bgp_notification bgpn; const u_char *tptr; uint8_t shutdown_comm_length; uint8_t remainder_offset; ND_TCHECK2(dat[0], BGP_NOTIFICATION_SIZE); memcpy(&bgpn, dat, BGP_NOTIFICATION_SIZE); /* some little sanity checking */ if (length<BGP_NOTIFICATION_SIZE) return; ND_PRINT((ndo, ", %s (%u)", tok2str(bgp_notify_major_values, "Unknown Error", bgpn.bgpn_major), bgpn.bgpn_major)); switch (bgpn.bgpn_major) { case BGP_NOTIFY_MAJOR_MSG: ND_PRINT((ndo, ", subcode %s (%u)", tok2str(bgp_notify_minor_msg_values, "Unknown", bgpn.bgpn_minor), bgpn.bgpn_minor)); break; case BGP_NOTIFY_MAJOR_OPEN: ND_PRINT((ndo, ", subcode %s (%u)", tok2str(bgp_notify_minor_open_values, "Unknown", bgpn.bgpn_minor), bgpn.bgpn_minor)); break; case BGP_NOTIFY_MAJOR_UPDATE: ND_PRINT((ndo, ", subcode %s (%u)", tok2str(bgp_notify_minor_update_values, "Unknown", bgpn.bgpn_minor), bgpn.bgpn_minor)); break; case BGP_NOTIFY_MAJOR_FSM: ND_PRINT((ndo, " subcode %s (%u)", tok2str(bgp_notify_minor_fsm_values, "Unknown", bgpn.bgpn_minor), bgpn.bgpn_minor)); break; case BGP_NOTIFY_MAJOR_CAP: ND_PRINT((ndo, " subcode %s (%u)", tok2str(bgp_notify_minor_cap_values, "Unknown", bgpn.bgpn_minor), bgpn.bgpn_minor)); break; case BGP_NOTIFY_MAJOR_CEASE: ND_PRINT((ndo, ", subcode %s (%u)", tok2str(bgp_notify_minor_cease_values, "Unknown", bgpn.bgpn_minor), bgpn.bgpn_minor)); /* draft-ietf-idr-cease-subcode-02 mentions optionally 7 bytes * for the maxprefix subtype, which may contain AFI, SAFI and MAXPREFIXES */ if(bgpn.bgpn_minor == BGP_NOTIFY_MINOR_CEASE_MAXPRFX && length >= BGP_NOTIFICATION_SIZE + 7) { tptr = dat + BGP_NOTIFICATION_SIZE; ND_TCHECK2(*tptr, 7); ND_PRINT((ndo, ", AFI %s (%u), SAFI %s (%u), Max Prefixes: %u", tok2str(af_values, "Unknown", EXTRACT_16BITS(tptr)), EXTRACT_16BITS(tptr), tok2str(bgp_safi_values, "Unknown", *(tptr+2)), *(tptr+2), EXTRACT_32BITS(tptr+3))); } /* * draft-ietf-idr-shutdown describes a method to send a communication * intended for human consumption regarding the Administrative Shutdown */ if ((bgpn.bgpn_minor == BGP_NOTIFY_MINOR_CEASE_SHUT || bgpn.bgpn_minor == BGP_NOTIFY_MINOR_CEASE_RESET) && length >= BGP_NOTIFICATION_SIZE + 1) { tptr = dat + BGP_NOTIFICATION_SIZE; ND_TCHECK2(*tptr, 1); shutdown_comm_length = *(tptr); remainder_offset = 0; /* garbage, hexdump it all */ if (shutdown_comm_length > BGP_NOTIFY_MINOR_CEASE_ADMIN_SHUTDOWN_LEN || shutdown_comm_length > length - (BGP_NOTIFICATION_SIZE + 1)) { ND_PRINT((ndo, ", invalid Shutdown Communication length")); } else if (shutdown_comm_length == 0) { ND_PRINT((ndo, ", empty Shutdown Communication")); remainder_offset += 1; } /* a proper shutdown communication */ else { ND_TCHECK2(*(tptr+1), shutdown_comm_length); ND_PRINT((ndo, ", Shutdown Communication (length: %u): \"", shutdown_comm_length)); (void)fn_printn(ndo, tptr+1, shutdown_comm_length, NULL); ND_PRINT((ndo, "\"")); remainder_offset += shutdown_comm_length + 1; } /* if there is trailing data, hexdump it */ if(length - (remainder_offset + BGP_NOTIFICATION_SIZE) > 0) { ND_PRINT((ndo, ", Data: (length: %u)", length - (remainder_offset + BGP_NOTIFICATION_SIZE))); hex_print(ndo, "\n\t\t", tptr + remainder_offset, length - (remainder_offset + BGP_NOTIFICATION_SIZE)); } } break; default: break; } return; trunc: ND_PRINT((ndo, "[|BGP]")); } static void bgp_route_refresh_print(netdissect_options *ndo, const u_char *pptr, int len) { const struct bgp_route_refresh *bgp_route_refresh_header; ND_TCHECK2(pptr[0], BGP_ROUTE_REFRESH_SIZE); /* some little sanity checking */ if (len<BGP_ROUTE_REFRESH_SIZE) return; bgp_route_refresh_header = (const struct bgp_route_refresh *)pptr; ND_PRINT((ndo, "\n\t AFI %s (%u), SAFI %s (%u)", tok2str(af_values,"Unknown", /* this stinks but the compiler pads the structure * weird */ EXTRACT_16BITS(&bgp_route_refresh_header->afi)), EXTRACT_16BITS(&bgp_route_refresh_header->afi), tok2str(bgp_safi_values,"Unknown", bgp_route_refresh_header->safi), bgp_route_refresh_header->safi)); if (ndo->ndo_vflag > 1) { ND_TCHECK2(*pptr, len); print_unknown_data(ndo, pptr, "\n\t ", len); } return; trunc: ND_PRINT((ndo, "[|BGP]")); } static int bgp_header_print(netdissect_options *ndo, const u_char *dat, int length) { struct bgp bgp; ND_TCHECK2(dat[0], BGP_SIZE); memcpy(&bgp, dat, BGP_SIZE); ND_PRINT((ndo, "\n\t%s Message (%u), length: %u", tok2str(bgp_msg_values, "Unknown", bgp.bgp_type), bgp.bgp_type, length)); switch (bgp.bgp_type) { case BGP_OPEN: bgp_open_print(ndo, dat, length); break; case BGP_UPDATE: bgp_update_print(ndo, dat, length); break; case BGP_NOTIFICATION: bgp_notification_print(ndo, dat, length); break; case BGP_KEEPALIVE: break; case BGP_ROUTE_REFRESH: bgp_route_refresh_print(ndo, dat, length); break; default: /* we have no decoder for the BGP message */ ND_TCHECK2(*dat, length); ND_PRINT((ndo, "\n\t no Message %u decoder", bgp.bgp_type)); print_unknown_data(ndo, dat, "\n\t ", length); break; } return 1; trunc: ND_PRINT((ndo, "[|BGP]")); return 0; } void bgp_print(netdissect_options *ndo, const u_char *dat, int length) { const u_char *p; const u_char *ep; const u_char *start; const u_char marker[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }; struct bgp bgp; uint16_t hlen; ep = dat + length; if (ndo->ndo_snapend < dat + length) ep = ndo->ndo_snapend; ND_PRINT((ndo, ": BGP")); if (ndo->ndo_vflag < 1) /* lets be less chatty */ return; p = dat; start = p; while (p < ep) { if (!ND_TTEST2(p[0], 1)) break; if (p[0] != 0xff) { p++; continue; } if (!ND_TTEST2(p[0], sizeof(marker))) break; if (memcmp(p, marker, sizeof(marker)) != 0) { p++; continue; } /* found BGP header */ ND_TCHECK2(p[0], BGP_SIZE); /*XXX*/ memcpy(&bgp, p, BGP_SIZE); if (start != p) ND_PRINT((ndo, " [|BGP]")); hlen = ntohs(bgp.bgp_len); if (hlen < BGP_SIZE) { ND_PRINT((ndo, "\n[|BGP Bogus header length %u < %u]", hlen, BGP_SIZE)); break; } if (ND_TTEST2(p[0], hlen)) { if (!bgp_header_print(ndo, p, hlen)) return; p += hlen; start = p; } else { ND_PRINT((ndo, "\n[|BGP %s]", tok2str(bgp_msg_values, "Unknown Message Type", bgp.bgp_type))); break; } } return; trunc: ND_PRINT((ndo, " [|BGP]")); } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 4 * End: */
/* * Copyright (C) 1999 WIDE Project. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the project nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Extensively modified by Hannes Gredler (hannes@gredler.at) for more * complete BGP support. */ /* \summary: Border Gateway Protocol (BGP) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include <stdio.h> #include <string.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "af.h" #include "l2vpn.h" struct bgp { uint8_t bgp_marker[16]; uint16_t bgp_len; uint8_t bgp_type; }; #define BGP_SIZE 19 /* unaligned */ #define BGP_OPEN 1 #define BGP_UPDATE 2 #define BGP_NOTIFICATION 3 #define BGP_KEEPALIVE 4 #define BGP_ROUTE_REFRESH 5 static const struct tok bgp_msg_values[] = { { BGP_OPEN, "Open"}, { BGP_UPDATE, "Update"}, { BGP_NOTIFICATION, "Notification"}, { BGP_KEEPALIVE, "Keepalive"}, { BGP_ROUTE_REFRESH, "Route Refresh"}, { 0, NULL} }; struct bgp_open { uint8_t bgpo_marker[16]; uint16_t bgpo_len; uint8_t bgpo_type; uint8_t bgpo_version; uint16_t bgpo_myas; uint16_t bgpo_holdtime; uint32_t bgpo_id; uint8_t bgpo_optlen; /* options should follow */ }; #define BGP_OPEN_SIZE 29 /* unaligned */ struct bgp_opt { uint8_t bgpopt_type; uint8_t bgpopt_len; /* variable length */ }; #define BGP_OPT_SIZE 2 /* some compilers may pad to 4 bytes */ #define BGP_CAP_HEADER_SIZE 2 /* some compilers may pad to 4 bytes */ struct bgp_notification { uint8_t bgpn_marker[16]; uint16_t bgpn_len; uint8_t bgpn_type; uint8_t bgpn_major; uint8_t bgpn_minor; }; #define BGP_NOTIFICATION_SIZE 21 /* unaligned */ struct bgp_route_refresh { uint8_t bgp_marker[16]; uint16_t len; uint8_t type; uint8_t afi[2]; /* the compiler messes this structure up */ uint8_t res; /* when doing misaligned sequences of int8 and int16 */ uint8_t safi; /* afi should be int16 - so we have to access it using */ }; /* EXTRACT_16BITS(&bgp_route_refresh->afi) (sigh) */ #define BGP_ROUTE_REFRESH_SIZE 23 #define bgp_attr_lenlen(flags, p) \ (((flags) & 0x10) ? 2 : 1) #define bgp_attr_len(flags, p) \ (((flags) & 0x10) ? EXTRACT_16BITS(p) : *(p)) #define BGPTYPE_ORIGIN 1 #define BGPTYPE_AS_PATH 2 #define BGPTYPE_NEXT_HOP 3 #define BGPTYPE_MULTI_EXIT_DISC 4 #define BGPTYPE_LOCAL_PREF 5 #define BGPTYPE_ATOMIC_AGGREGATE 6 #define BGPTYPE_AGGREGATOR 7 #define BGPTYPE_COMMUNITIES 8 /* RFC1997 */ #define BGPTYPE_ORIGINATOR_ID 9 /* RFC4456 */ #define BGPTYPE_CLUSTER_LIST 10 /* RFC4456 */ #define BGPTYPE_DPA 11 /* deprecated, draft-ietf-idr-bgp-dpa */ #define BGPTYPE_ADVERTISERS 12 /* deprecated RFC1863 */ #define BGPTYPE_RCID_PATH 13 /* deprecated RFC1863 */ #define BGPTYPE_MP_REACH_NLRI 14 /* RFC4760 */ #define BGPTYPE_MP_UNREACH_NLRI 15 /* RFC4760 */ #define BGPTYPE_EXTD_COMMUNITIES 16 /* RFC4360 */ #define BGPTYPE_AS4_PATH 17 /* RFC6793 */ #define BGPTYPE_AGGREGATOR4 18 /* RFC6793 */ #define BGPTYPE_PMSI_TUNNEL 22 /* RFC6514 */ #define BGPTYPE_TUNNEL_ENCAP 23 /* RFC5512 */ #define BGPTYPE_TRAFFIC_ENG 24 /* RFC5543 */ #define BGPTYPE_IPV6_EXTD_COMMUNITIES 25 /* RFC5701 */ #define BGPTYPE_AIGP 26 /* RFC7311 */ #define BGPTYPE_PE_DISTINGUISHER_LABEL 27 /* RFC6514 */ #define BGPTYPE_ENTROPY_LABEL 28 /* RFC6790 */ #define BGPTYPE_LARGE_COMMUNITY 32 /* draft-ietf-idr-large-community-05 */ #define BGPTYPE_ATTR_SET 128 /* RFC6368 */ #define BGP_MP_NLRI_MINSIZE 3 /* End of RIB Marker detection */ static const struct tok bgp_attr_values[] = { { BGPTYPE_ORIGIN, "Origin"}, { BGPTYPE_AS_PATH, "AS Path"}, { BGPTYPE_AS4_PATH, "AS4 Path"}, { BGPTYPE_NEXT_HOP, "Next Hop"}, { BGPTYPE_MULTI_EXIT_DISC, "Multi Exit Discriminator"}, { BGPTYPE_LOCAL_PREF, "Local Preference"}, { BGPTYPE_ATOMIC_AGGREGATE, "Atomic Aggregate"}, { BGPTYPE_AGGREGATOR, "Aggregator"}, { BGPTYPE_AGGREGATOR4, "Aggregator4"}, { BGPTYPE_COMMUNITIES, "Community"}, { BGPTYPE_ORIGINATOR_ID, "Originator ID"}, { BGPTYPE_CLUSTER_LIST, "Cluster List"}, { BGPTYPE_DPA, "DPA"}, { BGPTYPE_ADVERTISERS, "Advertisers"}, { BGPTYPE_RCID_PATH, "RCID Path / Cluster ID"}, { BGPTYPE_MP_REACH_NLRI, "Multi-Protocol Reach NLRI"}, { BGPTYPE_MP_UNREACH_NLRI, "Multi-Protocol Unreach NLRI"}, { BGPTYPE_EXTD_COMMUNITIES, "Extended Community"}, { BGPTYPE_PMSI_TUNNEL, "PMSI Tunnel"}, { BGPTYPE_TUNNEL_ENCAP, "Tunnel Encapsulation"}, { BGPTYPE_TRAFFIC_ENG, "Traffic Engineering"}, { BGPTYPE_IPV6_EXTD_COMMUNITIES, "IPv6 Extended Community"}, { BGPTYPE_AIGP, "Accumulated IGP Metric"}, { BGPTYPE_PE_DISTINGUISHER_LABEL, "PE Distinguisher Label"}, { BGPTYPE_ENTROPY_LABEL, "Entropy Label"}, { BGPTYPE_LARGE_COMMUNITY, "Large Community"}, { BGPTYPE_ATTR_SET, "Attribute Set"}, { 255, "Reserved for development"}, { 0, NULL} }; #define BGP_AS_SET 1 #define BGP_AS_SEQUENCE 2 #define BGP_CONFED_AS_SEQUENCE 3 /* draft-ietf-idr-rfc3065bis-01 */ #define BGP_CONFED_AS_SET 4 /* draft-ietf-idr-rfc3065bis-01 */ #define BGP_AS_SEG_TYPE_MIN BGP_AS_SET #define BGP_AS_SEG_TYPE_MAX BGP_CONFED_AS_SET static const struct tok bgp_as_path_segment_open_values[] = { { BGP_AS_SEQUENCE, ""}, { BGP_AS_SET, "{ "}, { BGP_CONFED_AS_SEQUENCE, "( "}, { BGP_CONFED_AS_SET, "({ "}, { 0, NULL} }; static const struct tok bgp_as_path_segment_close_values[] = { { BGP_AS_SEQUENCE, ""}, { BGP_AS_SET, "}"}, { BGP_CONFED_AS_SEQUENCE, ")"}, { BGP_CONFED_AS_SET, "})"}, { 0, NULL} }; #define BGP_OPT_AUTH 1 #define BGP_OPT_CAP 2 static const struct tok bgp_opt_values[] = { { BGP_OPT_AUTH, "Authentication Information"}, { BGP_OPT_CAP, "Capabilities Advertisement"}, { 0, NULL} }; #define BGP_CAPCODE_MP 1 /* RFC2858 */ #define BGP_CAPCODE_RR 2 /* RFC2918 */ #define BGP_CAPCODE_ORF 3 /* RFC5291 */ #define BGP_CAPCODE_MR 4 /* RFC3107 */ #define BGP_CAPCODE_EXT_NH 5 /* RFC5549 */ #define BGP_CAPCODE_RESTART 64 /* RFC4724 */ #define BGP_CAPCODE_AS_NEW 65 /* RFC6793 */ #define BGP_CAPCODE_DYN_CAP 67 /* draft-ietf-idr-dynamic-cap */ #define BGP_CAPCODE_MULTISESS 68 /* draft-ietf-idr-bgp-multisession */ #define BGP_CAPCODE_ADD_PATH 69 /* RFC7911 */ #define BGP_CAPCODE_ENH_RR 70 /* draft-keyur-bgp-enhanced-route-refresh */ #define BGP_CAPCODE_RR_CISCO 128 static const struct tok bgp_capcode_values[] = { { BGP_CAPCODE_MP, "Multiprotocol Extensions"}, { BGP_CAPCODE_RR, "Route Refresh"}, { BGP_CAPCODE_ORF, "Cooperative Route Filtering"}, { BGP_CAPCODE_MR, "Multiple Routes to a Destination"}, { BGP_CAPCODE_EXT_NH, "Extended Next Hop Encoding"}, { BGP_CAPCODE_RESTART, "Graceful Restart"}, { BGP_CAPCODE_AS_NEW, "32-Bit AS Number"}, { BGP_CAPCODE_DYN_CAP, "Dynamic Capability"}, { BGP_CAPCODE_MULTISESS, "Multisession BGP"}, { BGP_CAPCODE_ADD_PATH, "Multiple Paths"}, { BGP_CAPCODE_ENH_RR, "Enhanced Route Refresh"}, { BGP_CAPCODE_RR_CISCO, "Route Refresh (Cisco)"}, { 0, NULL} }; #define BGP_NOTIFY_MAJOR_MSG 1 #define BGP_NOTIFY_MAJOR_OPEN 2 #define BGP_NOTIFY_MAJOR_UPDATE 3 #define BGP_NOTIFY_MAJOR_HOLDTIME 4 #define BGP_NOTIFY_MAJOR_FSM 5 #define BGP_NOTIFY_MAJOR_CEASE 6 #define BGP_NOTIFY_MAJOR_CAP 7 static const struct tok bgp_notify_major_values[] = { { BGP_NOTIFY_MAJOR_MSG, "Message Header Error"}, { BGP_NOTIFY_MAJOR_OPEN, "OPEN Message Error"}, { BGP_NOTIFY_MAJOR_UPDATE, "UPDATE Message Error"}, { BGP_NOTIFY_MAJOR_HOLDTIME,"Hold Timer Expired"}, { BGP_NOTIFY_MAJOR_FSM, "Finite State Machine Error"}, { BGP_NOTIFY_MAJOR_CEASE, "Cease"}, { BGP_NOTIFY_MAJOR_CAP, "Capability Message Error"}, { 0, NULL} }; /* draft-ietf-idr-cease-subcode-02 */ #define BGP_NOTIFY_MINOR_CEASE_MAXPRFX 1 /* draft-ietf-idr-shutdown-07 */ #define BGP_NOTIFY_MINOR_CEASE_SHUT 2 #define BGP_NOTIFY_MINOR_CEASE_RESET 4 #define BGP_NOTIFY_MINOR_CEASE_ADMIN_SHUTDOWN_LEN 128 static const struct tok bgp_notify_minor_cease_values[] = { { BGP_NOTIFY_MINOR_CEASE_MAXPRFX, "Maximum Number of Prefixes Reached"}, { BGP_NOTIFY_MINOR_CEASE_SHUT, "Administrative Shutdown"}, { 3, "Peer Unconfigured"}, { BGP_NOTIFY_MINOR_CEASE_RESET, "Administrative Reset"}, { 5, "Connection Rejected"}, { 6, "Other Configuration Change"}, { 7, "Connection Collision Resolution"}, { 0, NULL} }; static const struct tok bgp_notify_minor_msg_values[] = { { 1, "Connection Not Synchronized"}, { 2, "Bad Message Length"}, { 3, "Bad Message Type"}, { 0, NULL} }; static const struct tok bgp_notify_minor_open_values[] = { { 1, "Unsupported Version Number"}, { 2, "Bad Peer AS"}, { 3, "Bad BGP Identifier"}, { 4, "Unsupported Optional Parameter"}, { 5, "Authentication Failure"}, { 6, "Unacceptable Hold Time"}, { 7, "Capability Message Error"}, { 0, NULL} }; static const struct tok bgp_notify_minor_update_values[] = { { 1, "Malformed Attribute List"}, { 2, "Unrecognized Well-known Attribute"}, { 3, "Missing Well-known Attribute"}, { 4, "Attribute Flags Error"}, { 5, "Attribute Length Error"}, { 6, "Invalid ORIGIN Attribute"}, { 7, "AS Routing Loop"}, { 8, "Invalid NEXT_HOP Attribute"}, { 9, "Optional Attribute Error"}, { 10, "Invalid Network Field"}, { 11, "Malformed AS_PATH"}, { 0, NULL} }; static const struct tok bgp_notify_minor_fsm_values[] = { { 0, "Unspecified Error"}, { 1, "In OpenSent State"}, { 2, "In OpenConfirm State"}, { 3, "In Established State"}, { 0, NULL } }; static const struct tok bgp_notify_minor_cap_values[] = { { 1, "Invalid Action Value" }, { 2, "Invalid Capability Length" }, { 3, "Malformed Capability Value" }, { 4, "Unsupported Capability Code" }, { 0, NULL } }; static const struct tok bgp_origin_values[] = { { 0, "IGP"}, { 1, "EGP"}, { 2, "Incomplete"}, { 0, NULL} }; #define BGP_PMSI_TUNNEL_RSVP_P2MP 1 #define BGP_PMSI_TUNNEL_LDP_P2MP 2 #define BGP_PMSI_TUNNEL_PIM_SSM 3 #define BGP_PMSI_TUNNEL_PIM_SM 4 #define BGP_PMSI_TUNNEL_PIM_BIDIR 5 #define BGP_PMSI_TUNNEL_INGRESS 6 #define BGP_PMSI_TUNNEL_LDP_MP2MP 7 static const struct tok bgp_pmsi_tunnel_values[] = { { BGP_PMSI_TUNNEL_RSVP_P2MP, "RSVP-TE P2MP LSP"}, { BGP_PMSI_TUNNEL_LDP_P2MP, "LDP P2MP LSP"}, { BGP_PMSI_TUNNEL_PIM_SSM, "PIM-SSM Tree"}, { BGP_PMSI_TUNNEL_PIM_SM, "PIM-SM Tree"}, { BGP_PMSI_TUNNEL_PIM_BIDIR, "PIM-Bidir Tree"}, { BGP_PMSI_TUNNEL_INGRESS, "Ingress Replication"}, { BGP_PMSI_TUNNEL_LDP_MP2MP, "LDP MP2MP LSP"}, { 0, NULL} }; static const struct tok bgp_pmsi_flag_values[] = { { 0x01, "Leaf Information required"}, { 0, NULL} }; #define BGP_AIGP_TLV 1 static const struct tok bgp_aigp_values[] = { { BGP_AIGP_TLV, "AIGP"}, { 0, NULL} }; /* Subsequent address family identifier, RFC2283 section 7 */ #define SAFNUM_RES 0 #define SAFNUM_UNICAST 1 #define SAFNUM_MULTICAST 2 #define SAFNUM_UNIMULTICAST 3 /* deprecated now */ /* labeled BGP RFC3107 */ #define SAFNUM_LABUNICAST 4 /* RFC6514 */ #define SAFNUM_MULTICAST_VPN 5 /* draft-nalawade-kapoor-tunnel-safi */ #define SAFNUM_TUNNEL 64 /* RFC4761 */ #define SAFNUM_VPLS 65 /* RFC6037 */ #define SAFNUM_MDT 66 /* RFC4364 */ #define SAFNUM_VPNUNICAST 128 /* RFC6513 */ #define SAFNUM_VPNMULTICAST 129 #define SAFNUM_VPNUNIMULTICAST 130 /* deprecated now */ /* RFC4684 */ #define SAFNUM_RT_ROUTING_INFO 132 #define BGP_VPN_RD_LEN 8 static const struct tok bgp_safi_values[] = { { SAFNUM_RES, "Reserved"}, { SAFNUM_UNICAST, "Unicast"}, { SAFNUM_MULTICAST, "Multicast"}, { SAFNUM_UNIMULTICAST, "Unicast+Multicast"}, { SAFNUM_LABUNICAST, "labeled Unicast"}, { SAFNUM_TUNNEL, "Tunnel"}, { SAFNUM_VPLS, "VPLS"}, { SAFNUM_MDT, "MDT"}, { SAFNUM_VPNUNICAST, "labeled VPN Unicast"}, { SAFNUM_VPNMULTICAST, "labeled VPN Multicast"}, { SAFNUM_VPNUNIMULTICAST, "labeled VPN Unicast+Multicast"}, { SAFNUM_RT_ROUTING_INFO, "Route Target Routing Information"}, { SAFNUM_MULTICAST_VPN, "Multicast VPN"}, { 0, NULL } }; /* well-known community */ #define BGP_COMMUNITY_NO_EXPORT 0xffffff01 #define BGP_COMMUNITY_NO_ADVERT 0xffffff02 #define BGP_COMMUNITY_NO_EXPORT_SUBCONFED 0xffffff03 /* Extended community type - draft-ietf-idr-bgp-ext-communities-05 */ #define BGP_EXT_COM_RT_0 0x0002 /* Route Target,Format AS(2bytes):AN(4bytes) */ #define BGP_EXT_COM_RT_1 0x0102 /* Route Target,Format IP address:AN(2bytes) */ #define BGP_EXT_COM_RT_2 0x0202 /* Route Target,Format AN(4bytes):local(2bytes) */ #define BGP_EXT_COM_RO_0 0x0003 /* Route Origin,Format AS(2bytes):AN(4bytes) */ #define BGP_EXT_COM_RO_1 0x0103 /* Route Origin,Format IP address:AN(2bytes) */ #define BGP_EXT_COM_RO_2 0x0203 /* Route Origin,Format AN(4bytes):local(2bytes) */ #define BGP_EXT_COM_LINKBAND 0x4004 /* Link Bandwidth,Format AS(2B):Bandwidth(4B) */ /* rfc2547 bgp-mpls-vpns */ #define BGP_EXT_COM_VPN_ORIGIN 0x0005 /* OSPF Domain ID / VPN of Origin - draft-rosen-vpns-ospf-bgp-mpls */ #define BGP_EXT_COM_VPN_ORIGIN2 0x0105 /* duplicate - keep for backwards compatability */ #define BGP_EXT_COM_VPN_ORIGIN3 0x0205 /* duplicate - keep for backwards compatability */ #define BGP_EXT_COM_VPN_ORIGIN4 0x8005 /* duplicate - keep for backwards compatability */ #define BGP_EXT_COM_OSPF_RTYPE 0x0306 /* OSPF Route Type,Format Area(4B):RouteType(1B):Options(1B) */ #define BGP_EXT_COM_OSPF_RTYPE2 0x8000 /* duplicate - keep for backwards compatability */ #define BGP_EXT_COM_OSPF_RID 0x0107 /* OSPF Router ID,Format RouterID(4B):Unused(2B) */ #define BGP_EXT_COM_OSPF_RID2 0x8001 /* duplicate - keep for backwards compatability */ #define BGP_EXT_COM_L2INFO 0x800a /* draft-kompella-ppvpn-l2vpn */ #define BGP_EXT_COM_SOURCE_AS 0x0009 /* RFC-ietf-l3vpn-2547bis-mcast-bgp-08.txt */ #define BGP_EXT_COM_VRF_RT_IMP 0x010b /* RFC-ietf-l3vpn-2547bis-mcast-bgp-08.txt */ #define BGP_EXT_COM_L2VPN_RT_0 0x000a /* L2VPN Identifier,Format AS(2bytes):AN(4bytes) */ #define BGP_EXT_COM_L2VPN_RT_1 0xF10a /* L2VPN Identifier,Format IP address:AN(2bytes) */ /* http://www.cisco.com/en/US/tech/tk436/tk428/technologies_tech_note09186a00801eb09a.shtml */ #define BGP_EXT_COM_EIGRP_GEN 0x8800 #define BGP_EXT_COM_EIGRP_METRIC_AS_DELAY 0x8801 #define BGP_EXT_COM_EIGRP_METRIC_REL_NH_BW 0x8802 #define BGP_EXT_COM_EIGRP_METRIC_LOAD_MTU 0x8803 #define BGP_EXT_COM_EIGRP_EXT_REMAS_REMID 0x8804 #define BGP_EXT_COM_EIGRP_EXT_REMPROTO_REMMETRIC 0x8805 static const struct tok bgp_extd_comm_flag_values[] = { { 0x8000, "vendor-specific"}, { 0x4000, "non-transitive"}, { 0, NULL}, }; static const struct tok bgp_extd_comm_subtype_values[] = { { BGP_EXT_COM_RT_0, "target"}, { BGP_EXT_COM_RT_1, "target"}, { BGP_EXT_COM_RT_2, "target"}, { BGP_EXT_COM_RO_0, "origin"}, { BGP_EXT_COM_RO_1, "origin"}, { BGP_EXT_COM_RO_2, "origin"}, { BGP_EXT_COM_LINKBAND, "link-BW"}, { BGP_EXT_COM_VPN_ORIGIN, "ospf-domain"}, { BGP_EXT_COM_VPN_ORIGIN2, "ospf-domain"}, { BGP_EXT_COM_VPN_ORIGIN3, "ospf-domain"}, { BGP_EXT_COM_VPN_ORIGIN4, "ospf-domain"}, { BGP_EXT_COM_OSPF_RTYPE, "ospf-route-type"}, { BGP_EXT_COM_OSPF_RTYPE2, "ospf-route-type"}, { BGP_EXT_COM_OSPF_RID, "ospf-router-id"}, { BGP_EXT_COM_OSPF_RID2, "ospf-router-id"}, { BGP_EXT_COM_L2INFO, "layer2-info"}, { BGP_EXT_COM_EIGRP_GEN , "eigrp-general-route (flag, tag)" }, { BGP_EXT_COM_EIGRP_METRIC_AS_DELAY , "eigrp-route-metric (AS, delay)" }, { BGP_EXT_COM_EIGRP_METRIC_REL_NH_BW , "eigrp-route-metric (reliability, nexthop, bandwidth)" }, { BGP_EXT_COM_EIGRP_METRIC_LOAD_MTU , "eigrp-route-metric (load, MTU)" }, { BGP_EXT_COM_EIGRP_EXT_REMAS_REMID , "eigrp-external-route (remote-AS, remote-ID)" }, { BGP_EXT_COM_EIGRP_EXT_REMPROTO_REMMETRIC , "eigrp-external-route (remote-proto, remote-metric)" }, { BGP_EXT_COM_SOURCE_AS, "source-AS" }, { BGP_EXT_COM_VRF_RT_IMP, "vrf-route-import"}, { BGP_EXT_COM_L2VPN_RT_0, "l2vpn-id"}, { BGP_EXT_COM_L2VPN_RT_1, "l2vpn-id"}, { 0, NULL}, }; /* OSPF codes for BGP_EXT_COM_OSPF_RTYPE draft-rosen-vpns-ospf-bgp-mpls */ #define BGP_OSPF_RTYPE_RTR 1 /* OSPF Router LSA */ #define BGP_OSPF_RTYPE_NET 2 /* OSPF Network LSA */ #define BGP_OSPF_RTYPE_SUM 3 /* OSPF Summary LSA */ #define BGP_OSPF_RTYPE_EXT 5 /* OSPF External LSA, note that ASBR doesn't apply to MPLS-VPN */ #define BGP_OSPF_RTYPE_NSSA 7 /* OSPF NSSA External*/ #define BGP_OSPF_RTYPE_SHAM 129 /* OSPF-MPLS-VPN Sham link */ #define BGP_OSPF_RTYPE_METRIC_TYPE 0x1 /* LSB of RTYPE Options Field */ static const struct tok bgp_extd_comm_ospf_rtype_values[] = { { BGP_OSPF_RTYPE_RTR, "Router" }, { BGP_OSPF_RTYPE_NET, "Network" }, { BGP_OSPF_RTYPE_SUM, "Summary" }, { BGP_OSPF_RTYPE_EXT, "External" }, { BGP_OSPF_RTYPE_NSSA,"NSSA External" }, { BGP_OSPF_RTYPE_SHAM,"MPLS-VPN Sham" }, { 0, NULL }, }; /* ADD-PATH Send/Receive field values */ static const struct tok bgp_add_path_recvsend[] = { { 1, "Receive" }, { 2, "Send" }, { 3, "Both" }, { 0, NULL }, }; static char astostr[20]; /* * as_printf * * Convert an AS number into a string and return string pointer. * * Depending on bflag is set or not, AS number is converted into ASDOT notation * or plain number notation. * */ static char * as_printf(netdissect_options *ndo, char *str, int size, u_int asnum) { if (!ndo->ndo_bflag || asnum <= 0xFFFF) { snprintf(str, size, "%u", asnum); } else { snprintf(str, size, "%u.%u", asnum >> 16, asnum & 0xFFFF); } return str; } #define ITEMCHECK(minlen) if (itemlen < minlen) goto badtlv; int decode_prefix4(netdissect_options *ndo, const u_char *pptr, u_int itemlen, char *buf, u_int buflen) { struct in_addr addr; u_int plen, plenbytes; ND_TCHECK(pptr[0]); ITEMCHECK(1); plen = pptr[0]; if (32 < plen) return -1; itemlen -= 1; memset(&addr, 0, sizeof(addr)); plenbytes = (plen + 7) / 8; ND_TCHECK2(pptr[1], plenbytes); ITEMCHECK(plenbytes); memcpy(&addr, &pptr[1], plenbytes); if (plen % 8) { ((u_char *)&addr)[plenbytes - 1] &= ((0xff00 >> (plen % 8)) & 0xff); } snprintf(buf, buflen, "%s/%d", ipaddr_string(ndo, &addr), plen); return 1 + plenbytes; trunc: return -2; badtlv: return -3; } static int decode_labeled_prefix4(netdissect_options *ndo, const u_char *pptr, u_int itemlen, char *buf, u_int buflen) { struct in_addr addr; u_int plen, plenbytes; /* prefix length and label = 4 bytes */ ND_TCHECK2(pptr[0], 4); ITEMCHECK(4); plen = pptr[0]; /* get prefix length */ /* this is one of the weirdnesses of rfc3107 the label length (actually the label + COS bits) is added to the prefix length; we also do only read out just one label - there is no real application for advertisement of stacked labels in a single BGP message */ if (24 > plen) return -1; plen-=24; /* adjust prefixlen - labellength */ if (32 < plen) return -1; itemlen -= 4; memset(&addr, 0, sizeof(addr)); plenbytes = (plen + 7) / 8; ND_TCHECK2(pptr[4], plenbytes); ITEMCHECK(plenbytes); memcpy(&addr, &pptr[4], plenbytes); if (plen % 8) { ((u_char *)&addr)[plenbytes - 1] &= ((0xff00 >> (plen % 8)) & 0xff); } /* the label may get offsetted by 4 bits so lets shift it right */ snprintf(buf, buflen, "%s/%d, label:%u %s", ipaddr_string(ndo, &addr), plen, EXTRACT_24BITS(pptr+1)>>4, ((pptr[3]&1)==0) ? "(BOGUS: Bottom of Stack NOT set!)" : "(bottom)" ); return 4 + plenbytes; trunc: return -2; badtlv: return -3; } /* * bgp_vpn_ip_print * * print an ipv4 or ipv6 address into a buffer dependend on address length. */ static char * bgp_vpn_ip_print(netdissect_options *ndo, const u_char *pptr, u_int addr_length) { /* worst case string is s fully formatted v6 address */ static char addr[sizeof("1234:5678:89ab:cdef:1234:5678:89ab:cdef")]; char *pos = addr; switch(addr_length) { case (sizeof(struct in_addr) << 3): /* 32 */ ND_TCHECK2(pptr[0], sizeof(struct in_addr)); snprintf(pos, sizeof(addr), "%s", ipaddr_string(ndo, pptr)); break; case (sizeof(struct in6_addr) << 3): /* 128 */ ND_TCHECK2(pptr[0], sizeof(struct in6_addr)); snprintf(pos, sizeof(addr), "%s", ip6addr_string(ndo, pptr)); break; default: snprintf(pos, sizeof(addr), "bogus address length %u", addr_length); break; } pos += strlen(pos); trunc: *(pos) = '\0'; return (addr); } /* * bgp_vpn_sg_print * * print an multicast s,g entry into a buffer. * the s,g entry is encoded like this. * * +-----------------------------------+ * | Multicast Source Length (1 octet) | * +-----------------------------------+ * | Multicast Source (Variable) | * +-----------------------------------+ * | Multicast Group Length (1 octet) | * +-----------------------------------+ * | Multicast Group (Variable) | * +-----------------------------------+ * * return the number of bytes read from the wire. */ static int bgp_vpn_sg_print(netdissect_options *ndo, const u_char *pptr, char *buf, u_int buflen) { uint8_t addr_length; u_int total_length, offset; total_length = 0; /* Source address length, encoded in bits */ ND_TCHECK2(pptr[0], 1); addr_length = *pptr++; /* Source address */ ND_TCHECK2(pptr[0], (addr_length >> 3)); total_length += (addr_length >> 3) + 1; offset = strlen(buf); if (addr_length) { snprintf(buf + offset, buflen - offset, ", Source %s", bgp_vpn_ip_print(ndo, pptr, addr_length)); pptr += (addr_length >> 3); } /* Group address length, encoded in bits */ ND_TCHECK2(pptr[0], 1); addr_length = *pptr++; /* Group address */ ND_TCHECK2(pptr[0], (addr_length >> 3)); total_length += (addr_length >> 3) + 1; offset = strlen(buf); if (addr_length) { snprintf(buf + offset, buflen - offset, ", Group %s", bgp_vpn_ip_print(ndo, pptr, addr_length)); pptr += (addr_length >> 3); } trunc: return (total_length); } /* RDs and RTs share the same semantics * we use bgp_vpn_rd_print for * printing route targets inside a NLRI */ char * bgp_vpn_rd_print(netdissect_options *ndo, const u_char *pptr) { /* allocate space for the largest possible string */ static char rd[sizeof("xxxxxxxxxx:xxxxx (xxx.xxx.xxx.xxx:xxxxx)")]; char *pos = rd; /* ok lets load the RD format */ switch (EXTRACT_16BITS(pptr)) { /* 2-byte-AS:number fmt*/ case 0: snprintf(pos, sizeof(rd) - (pos - rd), "%u:%u (= %u.%u.%u.%u)", EXTRACT_16BITS(pptr+2), EXTRACT_32BITS(pptr+4), *(pptr+4), *(pptr+5), *(pptr+6), *(pptr+7)); break; /* IP-address:AS fmt*/ case 1: snprintf(pos, sizeof(rd) - (pos - rd), "%u.%u.%u.%u:%u", *(pptr+2), *(pptr+3), *(pptr+4), *(pptr+5), EXTRACT_16BITS(pptr+6)); break; /* 4-byte-AS:number fmt*/ case 2: snprintf(pos, sizeof(rd) - (pos - rd), "%s:%u (%u.%u.%u.%u:%u)", as_printf(ndo, astostr, sizeof(astostr), EXTRACT_32BITS(pptr+2)), EXTRACT_16BITS(pptr+6), *(pptr+2), *(pptr+3), *(pptr+4), *(pptr+5), EXTRACT_16BITS(pptr+6)); break; default: snprintf(pos, sizeof(rd) - (pos - rd), "unknown RD format"); break; } pos += strlen(pos); *(pos) = '\0'; return (rd); } static int decode_rt_routing_info(netdissect_options *ndo, const u_char *pptr, char *buf, u_int buflen) { uint8_t route_target[8]; u_int plen; char asbuf[sizeof(astostr)]; /* bgp_vpn_rd_print() overwrites astostr */ /* NLRI "prefix length" from RFC 2858 Section 4. */ ND_TCHECK(pptr[0]); plen = pptr[0]; /* get prefix length */ /* NLRI "prefix" (ibid), valid lengths are { 0, 32, 33, ..., 96 } bits. * RFC 4684 Section 4 defines the layout of "origin AS" and "route * target" fields inside the "prefix" depending on its length. */ if (0 == plen) { /* Without "origin AS", without "route target". */ snprintf(buf, buflen, "default route target"); return 1; } if (32 > plen) return -1; /* With at least "origin AS", possibly with "route target". */ ND_TCHECK_32BITS(pptr + 1); as_printf(ndo, asbuf, sizeof(asbuf), EXTRACT_32BITS(pptr + 1)); plen-=32; /* adjust prefix length */ if (64 < plen) return -1; /* From now on (plen + 7) / 8 evaluates to { 0, 1, 2, ..., 8 } * and gives the number of octets in the variable-length "route * target" field inside this NLRI "prefix". Look for it. */ memset(&route_target, 0, sizeof(route_target)); ND_TCHECK2(pptr[5], (plen + 7) / 8); memcpy(&route_target, &pptr[5], (plen + 7) / 8); /* Which specification says to do this? */ if (plen % 8) { ((u_char *)&route_target)[(plen + 7) / 8 - 1] &= ((0xff00 >> (plen % 8)) & 0xff); } snprintf(buf, buflen, "origin AS: %s, route target %s", asbuf, bgp_vpn_rd_print(ndo, (u_char *)&route_target)); return 5 + (plen + 7) / 8; trunc: return -2; } static int decode_labeled_vpn_prefix4(netdissect_options *ndo, const u_char *pptr, char *buf, u_int buflen) { struct in_addr addr; u_int plen; ND_TCHECK(pptr[0]); plen = pptr[0]; /* get prefix length */ if ((24+64) > plen) return -1; plen-=(24+64); /* adjust prefixlen - labellength - RD len*/ if (32 < plen) return -1; memset(&addr, 0, sizeof(addr)); ND_TCHECK2(pptr[12], (plen + 7) / 8); memcpy(&addr, &pptr[12], (plen + 7) / 8); if (plen % 8) { ((u_char *)&addr)[(plen + 7) / 8 - 1] &= ((0xff00 >> (plen % 8)) & 0xff); } /* the label may get offsetted by 4 bits so lets shift it right */ snprintf(buf, buflen, "RD: %s, %s/%d, label:%u %s", bgp_vpn_rd_print(ndo, pptr+4), ipaddr_string(ndo, &addr), plen, EXTRACT_24BITS(pptr+1)>>4, ((pptr[3]&1)==0) ? "(BOGUS: Bottom of Stack NOT set!)" : "(bottom)" ); return 12 + (plen + 7) / 8; trunc: return -2; } /* * +-------------------------------+ * | | * | RD:IPv4-address (12 octets) | * | | * +-------------------------------+ * | MDT Group-address (4 octets) | * +-------------------------------+ */ #define MDT_VPN_NLRI_LEN 16 static int decode_mdt_vpn_nlri(netdissect_options *ndo, const u_char *pptr, char *buf, u_int buflen) { const u_char *rd; const u_char *vpn_ip; ND_TCHECK(pptr[0]); /* if the NLRI is not predefined length, quit.*/ if (*pptr != MDT_VPN_NLRI_LEN * 8) return -1; pptr++; /* RD */ ND_TCHECK2(pptr[0], 8); rd = pptr; pptr+=8; /* IPv4 address */ ND_TCHECK2(pptr[0], sizeof(struct in_addr)); vpn_ip = pptr; pptr+=sizeof(struct in_addr); /* MDT Group Address */ ND_TCHECK2(pptr[0], sizeof(struct in_addr)); snprintf(buf, buflen, "RD: %s, VPN IP Address: %s, MC Group Address: %s", bgp_vpn_rd_print(ndo, rd), ipaddr_string(ndo, vpn_ip), ipaddr_string(ndo, pptr)); return MDT_VPN_NLRI_LEN + 1; trunc: return -2; } #define BGP_MULTICAST_VPN_ROUTE_TYPE_INTRA_AS_I_PMSI 1 #define BGP_MULTICAST_VPN_ROUTE_TYPE_INTER_AS_I_PMSI 2 #define BGP_MULTICAST_VPN_ROUTE_TYPE_S_PMSI 3 #define BGP_MULTICAST_VPN_ROUTE_TYPE_INTRA_AS_SEG_LEAF 4 #define BGP_MULTICAST_VPN_ROUTE_TYPE_SOURCE_ACTIVE 5 #define BGP_MULTICAST_VPN_ROUTE_TYPE_SHARED_TREE_JOIN 6 #define BGP_MULTICAST_VPN_ROUTE_TYPE_SOURCE_TREE_JOIN 7 static const struct tok bgp_multicast_vpn_route_type_values[] = { { BGP_MULTICAST_VPN_ROUTE_TYPE_INTRA_AS_I_PMSI, "Intra-AS I-PMSI"}, { BGP_MULTICAST_VPN_ROUTE_TYPE_INTER_AS_I_PMSI, "Inter-AS I-PMSI"}, { BGP_MULTICAST_VPN_ROUTE_TYPE_S_PMSI, "S-PMSI"}, { BGP_MULTICAST_VPN_ROUTE_TYPE_INTRA_AS_SEG_LEAF, "Intra-AS Segment-Leaf"}, { BGP_MULTICAST_VPN_ROUTE_TYPE_SOURCE_ACTIVE, "Source-Active"}, { BGP_MULTICAST_VPN_ROUTE_TYPE_SHARED_TREE_JOIN, "Shared Tree Join"}, { BGP_MULTICAST_VPN_ROUTE_TYPE_SOURCE_TREE_JOIN, "Source Tree Join"}, { 0, NULL} }; static int decode_multicast_vpn(netdissect_options *ndo, const u_char *pptr, char *buf, u_int buflen) { uint8_t route_type, route_length, addr_length, sg_length; u_int offset; ND_TCHECK2(pptr[0], 2); route_type = *pptr++; route_length = *pptr++; snprintf(buf, buflen, "Route-Type: %s (%u), length: %u", tok2str(bgp_multicast_vpn_route_type_values, "Unknown", route_type), route_type, route_length); switch(route_type) { case BGP_MULTICAST_VPN_ROUTE_TYPE_INTRA_AS_I_PMSI: ND_TCHECK2(pptr[0], BGP_VPN_RD_LEN); offset = strlen(buf); snprintf(buf + offset, buflen - offset, ", RD: %s, Originator %s", bgp_vpn_rd_print(ndo, pptr), bgp_vpn_ip_print(ndo, pptr + BGP_VPN_RD_LEN, (route_length - BGP_VPN_RD_LEN) << 3)); break; case BGP_MULTICAST_VPN_ROUTE_TYPE_INTER_AS_I_PMSI: ND_TCHECK2(pptr[0], BGP_VPN_RD_LEN + 4); offset = strlen(buf); snprintf(buf + offset, buflen - offset, ", RD: %s, Source-AS %s", bgp_vpn_rd_print(ndo, pptr), as_printf(ndo, astostr, sizeof(astostr), EXTRACT_32BITS(pptr + BGP_VPN_RD_LEN))); break; case BGP_MULTICAST_VPN_ROUTE_TYPE_S_PMSI: ND_TCHECK2(pptr[0], BGP_VPN_RD_LEN); offset = strlen(buf); snprintf(buf + offset, buflen - offset, ", RD: %s", bgp_vpn_rd_print(ndo, pptr)); pptr += BGP_VPN_RD_LEN; sg_length = bgp_vpn_sg_print(ndo, pptr, buf, buflen); addr_length = route_length - sg_length; ND_TCHECK2(pptr[0], addr_length); offset = strlen(buf); snprintf(buf + offset, buflen - offset, ", Originator %s", bgp_vpn_ip_print(ndo, pptr, addr_length << 3)); break; case BGP_MULTICAST_VPN_ROUTE_TYPE_SOURCE_ACTIVE: ND_TCHECK2(pptr[0], BGP_VPN_RD_LEN); offset = strlen(buf); snprintf(buf + offset, buflen - offset, ", RD: %s", bgp_vpn_rd_print(ndo, pptr)); pptr += BGP_VPN_RD_LEN; bgp_vpn_sg_print(ndo, pptr, buf, buflen); break; case BGP_MULTICAST_VPN_ROUTE_TYPE_SHARED_TREE_JOIN: /* fall through */ case BGP_MULTICAST_VPN_ROUTE_TYPE_SOURCE_TREE_JOIN: ND_TCHECK2(pptr[0], BGP_VPN_RD_LEN + 4); offset = strlen(buf); snprintf(buf + offset, buflen - offset, ", RD: %s, Source-AS %s", bgp_vpn_rd_print(ndo, pptr), as_printf(ndo, astostr, sizeof(astostr), EXTRACT_32BITS(pptr + BGP_VPN_RD_LEN))); pptr += BGP_VPN_RD_LEN + 4; bgp_vpn_sg_print(ndo, pptr, buf, buflen); break; /* * no per route-type printing yet. */ case BGP_MULTICAST_VPN_ROUTE_TYPE_INTRA_AS_SEG_LEAF: default: break; } return route_length + 2; trunc: return -2; } /* * As I remember, some versions of systems have an snprintf() that * returns -1 if the buffer would have overflowed. If the return * value is negative, set buflen to 0, to indicate that we've filled * the buffer up. * * If the return value is greater than buflen, that means that * the buffer would have overflowed; again, set buflen to 0 in * that case. */ #define UPDATE_BUF_BUFLEN(buf, buflen, stringlen) \ if (stringlen<0) \ buflen=0; \ else if ((u_int)stringlen>buflen) \ buflen=0; \ else { \ buflen-=stringlen; \ buf+=stringlen; \ } static int decode_labeled_vpn_l2(netdissect_options *ndo, const u_char *pptr, char *buf, u_int buflen) { int plen,tlen,stringlen,tlv_type,tlv_len,ttlv_len; ND_TCHECK2(pptr[0], 2); plen=EXTRACT_16BITS(pptr); tlen=plen; pptr+=2; /* Old and new L2VPN NLRI share AFI/SAFI * -> Assume a 12 Byte-length NLRI is auto-discovery-only * and > 17 as old format. Complain for the middle case */ if (plen==12) { /* assume AD-only with RD, BGPNH */ ND_TCHECK2(pptr[0],12); buf[0]='\0'; stringlen=snprintf(buf, buflen, "RD: %s, BGPNH: %s", bgp_vpn_rd_print(ndo, pptr), ipaddr_string(ndo, pptr+8) ); UPDATE_BUF_BUFLEN(buf, buflen, stringlen); pptr+=12; tlen-=12; return plen; } else if (plen>17) { /* assume old format */ /* RD, ID, LBLKOFF, LBLBASE */ ND_TCHECK2(pptr[0],15); buf[0]='\0'; stringlen=snprintf(buf, buflen, "RD: %s, CE-ID: %u, Label-Block Offset: %u, Label Base %u", bgp_vpn_rd_print(ndo, pptr), EXTRACT_16BITS(pptr+8), EXTRACT_16BITS(pptr+10), EXTRACT_24BITS(pptr+12)>>4); /* the label is offsetted by 4 bits so lets shift it right */ UPDATE_BUF_BUFLEN(buf, buflen, stringlen); pptr+=15; tlen-=15; /* ok now the variable part - lets read out TLVs*/ while (tlen>0) { if (tlen < 3) return -1; ND_TCHECK2(pptr[0], 3); tlv_type=*pptr++; tlv_len=EXTRACT_16BITS(pptr); ttlv_len=tlv_len; pptr+=2; switch(tlv_type) { case 1: if (buflen!=0) { stringlen=snprintf(buf,buflen, "\n\t\tcircuit status vector (%u) length: %u: 0x", tlv_type, tlv_len); UPDATE_BUF_BUFLEN(buf, buflen, stringlen); } ttlv_len=ttlv_len/8+1; /* how many bytes do we need to read ? */ while (ttlv_len>0) { ND_TCHECK(pptr[0]); if (buflen!=0) { stringlen=snprintf(buf,buflen, "%02x",*pptr++); UPDATE_BUF_BUFLEN(buf, buflen, stringlen); } ttlv_len--; } break; default: if (buflen!=0) { stringlen=snprintf(buf,buflen, "\n\t\tunknown TLV #%u, length: %u", tlv_type, tlv_len); UPDATE_BUF_BUFLEN(buf, buflen, stringlen); } break; } tlen-=(tlv_len<<3); /* the tlv-length is expressed in bits so lets shift it right */ } return plen+2; } else { /* complain bitterly ? */ /* fall through */ goto trunc; } trunc: return -2; } int decode_prefix6(netdissect_options *ndo, const u_char *pd, u_int itemlen, char *buf, u_int buflen) { struct in6_addr addr; u_int plen, plenbytes; ND_TCHECK(pd[0]); ITEMCHECK(1); plen = pd[0]; if (128 < plen) return -1; itemlen -= 1; memset(&addr, 0, sizeof(addr)); plenbytes = (plen + 7) / 8; ND_TCHECK2(pd[1], plenbytes); ITEMCHECK(plenbytes); memcpy(&addr, &pd[1], plenbytes); if (plen % 8) { addr.s6_addr[plenbytes - 1] &= ((0xff00 >> (plen % 8)) & 0xff); } snprintf(buf, buflen, "%s/%d", ip6addr_string(ndo, &addr), plen); return 1 + plenbytes; trunc: return -2; badtlv: return -3; } static int decode_labeled_prefix6(netdissect_options *ndo, const u_char *pptr, u_int itemlen, char *buf, u_int buflen) { struct in6_addr addr; u_int plen, plenbytes; /* prefix length and label = 4 bytes */ ND_TCHECK2(pptr[0], 4); ITEMCHECK(4); plen = pptr[0]; /* get prefix length */ if (24 > plen) return -1; plen-=24; /* adjust prefixlen - labellength */ if (128 < plen) return -1; itemlen -= 4; memset(&addr, 0, sizeof(addr)); plenbytes = (plen + 7) / 8; ND_TCHECK2(pptr[4], plenbytes); memcpy(&addr, &pptr[4], plenbytes); if (plen % 8) { addr.s6_addr[plenbytes - 1] &= ((0xff00 >> (plen % 8)) & 0xff); } /* the label may get offsetted by 4 bits so lets shift it right */ snprintf(buf, buflen, "%s/%d, label:%u %s", ip6addr_string(ndo, &addr), plen, EXTRACT_24BITS(pptr+1)>>4, ((pptr[3]&1)==0) ? "(BOGUS: Bottom of Stack NOT set!)" : "(bottom)" ); return 4 + plenbytes; trunc: return -2; badtlv: return -3; } static int decode_labeled_vpn_prefix6(netdissect_options *ndo, const u_char *pptr, char *buf, u_int buflen) { struct in6_addr addr; u_int plen; ND_TCHECK(pptr[0]); plen = pptr[0]; /* get prefix length */ if ((24+64) > plen) return -1; plen-=(24+64); /* adjust prefixlen - labellength - RD len*/ if (128 < plen) return -1; memset(&addr, 0, sizeof(addr)); ND_TCHECK2(pptr[12], (plen + 7) / 8); memcpy(&addr, &pptr[12], (plen + 7) / 8); if (plen % 8) { addr.s6_addr[(plen + 7) / 8 - 1] &= ((0xff00 >> (plen % 8)) & 0xff); } /* the label may get offsetted by 4 bits so lets shift it right */ snprintf(buf, buflen, "RD: %s, %s/%d, label:%u %s", bgp_vpn_rd_print(ndo, pptr+4), ip6addr_string(ndo, &addr), plen, EXTRACT_24BITS(pptr+1)>>4, ((pptr[3]&1)==0) ? "(BOGUS: Bottom of Stack NOT set!)" : "(bottom)" ); return 12 + (plen + 7) / 8; trunc: return -2; } static int decode_clnp_prefix(netdissect_options *ndo, const u_char *pptr, char *buf, u_int buflen) { uint8_t addr[19]; u_int plen; ND_TCHECK(pptr[0]); plen = pptr[0]; /* get prefix length */ if (152 < plen) return -1; memset(&addr, 0, sizeof(addr)); ND_TCHECK2(pptr[4], (plen + 7) / 8); memcpy(&addr, &pptr[4], (plen + 7) / 8); if (plen % 8) { addr[(plen + 7) / 8 - 1] &= ((0xff00 >> (plen % 8)) & 0xff); } snprintf(buf, buflen, "%s/%d", isonsap_string(ndo, addr,(plen + 7) / 8), plen); return 1 + (plen + 7) / 8; trunc: return -2; } static int decode_labeled_vpn_clnp_prefix(netdissect_options *ndo, const u_char *pptr, char *buf, u_int buflen) { uint8_t addr[19]; u_int plen; ND_TCHECK(pptr[0]); plen = pptr[0]; /* get prefix length */ if ((24+64) > plen) return -1; plen-=(24+64); /* adjust prefixlen - labellength - RD len*/ if (152 < plen) return -1; memset(&addr, 0, sizeof(addr)); ND_TCHECK2(pptr[12], (plen + 7) / 8); memcpy(&addr, &pptr[12], (plen + 7) / 8); if (plen % 8) { addr[(plen + 7) / 8 - 1] &= ((0xff00 >> (plen % 8)) & 0xff); } /* the label may get offsetted by 4 bits so lets shift it right */ snprintf(buf, buflen, "RD: %s, %s/%d, label:%u %s", bgp_vpn_rd_print(ndo, pptr+4), isonsap_string(ndo, addr,(plen + 7) / 8), plen, EXTRACT_24BITS(pptr+1)>>4, ((pptr[3]&1)==0) ? "(BOGUS: Bottom of Stack NOT set!)" : "(bottom)" ); return 12 + (plen + 7) / 8; trunc: return -2; } /* * bgp_attr_get_as_size * * Try to find the size of the ASs encoded in an as-path. It is not obvious, as * both Old speakers that do not support 4 byte AS, and the new speakers that do * support, exchange AS-Path with the same path-attribute type value 0x02. */ static int bgp_attr_get_as_size(netdissect_options *ndo, uint8_t bgpa_type, const u_char *pptr, int len) { const u_char *tptr = pptr; /* * If the path attribute is the optional AS4 path type, then we already * know, that ASs must be encoded in 4 byte format. */ if (bgpa_type == BGPTYPE_AS4_PATH) { return 4; } /* * Let us assume that ASs are of 2 bytes in size, and check if the AS-Path * TLV is good. If not, ask the caller to try with AS encoded as 4 bytes * each. */ while (tptr < pptr + len) { ND_TCHECK(tptr[0]); /* * If we do not find a valid segment type, our guess might be wrong. */ if (tptr[0] < BGP_AS_SEG_TYPE_MIN || tptr[0] > BGP_AS_SEG_TYPE_MAX) { goto trunc; } ND_TCHECK(tptr[1]); tptr += 2 + tptr[1] * 2; } /* * If we correctly reached end of the AS path attribute data content, * then most likely ASs were indeed encoded as 2 bytes. */ if (tptr == pptr + len) { return 2; } trunc: /* * We can come here, either we did not have enough data, or if we * try to decode 4 byte ASs in 2 byte format. Either way, return 4, * so that calller can try to decode each AS as of 4 bytes. If indeed * there was not enough data, it will crib and end the parse anyways. */ return 4; } static int bgp_attr_print(netdissect_options *ndo, u_int atype, const u_char *pptr, u_int len) { int i; uint16_t af; uint8_t safi, snpa, nhlen; union { /* copy buffer for bandwidth values */ float f; uint32_t i; } bw; int advance; u_int tlen; const u_char *tptr; char buf[MAXHOSTNAMELEN + 100]; int as_size; tptr = pptr; tlen=len; switch (atype) { case BGPTYPE_ORIGIN: if (len != 1) ND_PRINT((ndo, "invalid len")); else { ND_TCHECK(*tptr); ND_PRINT((ndo, "%s", tok2str(bgp_origin_values, "Unknown Origin Typecode", tptr[0]))); } break; /* * Process AS4 byte path and AS2 byte path attributes here. */ case BGPTYPE_AS4_PATH: case BGPTYPE_AS_PATH: if (len % 2) { ND_PRINT((ndo, "invalid len")); break; } if (!len) { ND_PRINT((ndo, "empty")); break; } /* * BGP updates exchanged between New speakers that support 4 * byte AS, ASs are always encoded in 4 bytes. There is no * definitive way to find this, just by the packet's * contents. So, check for packet's TLV's sanity assuming * 2 bytes first, and it does not pass, assume that ASs are * encoded in 4 bytes format and move on. */ as_size = bgp_attr_get_as_size(ndo, atype, pptr, len); while (tptr < pptr + len) { ND_TCHECK(tptr[0]); ND_PRINT((ndo, "%s", tok2str(bgp_as_path_segment_open_values, "?", tptr[0]))); ND_TCHECK(tptr[1]); for (i = 0; i < tptr[1] * as_size; i += as_size) { ND_TCHECK2(tptr[2 + i], as_size); ND_PRINT((ndo, "%s ", as_printf(ndo, astostr, sizeof(astostr), as_size == 2 ? EXTRACT_16BITS(&tptr[2 + i]) : EXTRACT_32BITS(&tptr[2 + i])))); } ND_TCHECK(tptr[0]); ND_PRINT((ndo, "%s", tok2str(bgp_as_path_segment_close_values, "?", tptr[0]))); ND_TCHECK(tptr[1]); tptr += 2 + tptr[1] * as_size; } break; case BGPTYPE_NEXT_HOP: if (len != 4) ND_PRINT((ndo, "invalid len")); else { ND_TCHECK2(tptr[0], 4); ND_PRINT((ndo, "%s", ipaddr_string(ndo, tptr))); } break; case BGPTYPE_MULTI_EXIT_DISC: case BGPTYPE_LOCAL_PREF: if (len != 4) ND_PRINT((ndo, "invalid len")); else { ND_TCHECK2(tptr[0], 4); ND_PRINT((ndo, "%u", EXTRACT_32BITS(tptr))); } break; case BGPTYPE_ATOMIC_AGGREGATE: if (len != 0) ND_PRINT((ndo, "invalid len")); break; case BGPTYPE_AGGREGATOR: /* * Depending on the AS encoded is of 2 bytes or of 4 bytes, * the length of this PA can be either 6 bytes or 8 bytes. */ if (len != 6 && len != 8) { ND_PRINT((ndo, "invalid len")); break; } ND_TCHECK2(tptr[0], len); if (len == 6) { ND_PRINT((ndo, " AS #%s, origin %s", as_printf(ndo, astostr, sizeof(astostr), EXTRACT_16BITS(tptr)), ipaddr_string(ndo, tptr + 2))); } else { ND_PRINT((ndo, " AS #%s, origin %s", as_printf(ndo, astostr, sizeof(astostr), EXTRACT_32BITS(tptr)), ipaddr_string(ndo, tptr + 4))); } break; case BGPTYPE_AGGREGATOR4: if (len != 8) { ND_PRINT((ndo, "invalid len")); break; } ND_TCHECK2(tptr[0], 8); ND_PRINT((ndo, " AS #%s, origin %s", as_printf(ndo, astostr, sizeof(astostr), EXTRACT_32BITS(tptr)), ipaddr_string(ndo, tptr + 4))); break; case BGPTYPE_COMMUNITIES: if (len % 4) { ND_PRINT((ndo, "invalid len")); break; } while (tlen>0) { uint32_t comm; ND_TCHECK2(tptr[0], 4); comm = EXTRACT_32BITS(tptr); switch (comm) { case BGP_COMMUNITY_NO_EXPORT: ND_PRINT((ndo, " NO_EXPORT")); break; case BGP_COMMUNITY_NO_ADVERT: ND_PRINT((ndo, " NO_ADVERTISE")); break; case BGP_COMMUNITY_NO_EXPORT_SUBCONFED: ND_PRINT((ndo, " NO_EXPORT_SUBCONFED")); break; default: ND_PRINT((ndo, "%u:%u%s", (comm >> 16) & 0xffff, comm & 0xffff, (tlen>4) ? ", " : "")); break; } tlen -=4; tptr +=4; } break; case BGPTYPE_ORIGINATOR_ID: if (len != 4) { ND_PRINT((ndo, "invalid len")); break; } ND_TCHECK2(tptr[0], 4); ND_PRINT((ndo, "%s",ipaddr_string(ndo, tptr))); break; case BGPTYPE_CLUSTER_LIST: if (len % 4) { ND_PRINT((ndo, "invalid len")); break; } while (tlen>0) { ND_TCHECK2(tptr[0], 4); ND_PRINT((ndo, "%s%s", ipaddr_string(ndo, tptr), (tlen>4) ? ", " : "")); tlen -=4; tptr +=4; } break; case BGPTYPE_MP_REACH_NLRI: ND_TCHECK2(tptr[0], 3); af = EXTRACT_16BITS(tptr); safi = tptr[2]; ND_PRINT((ndo, "\n\t AFI: %s (%u), %sSAFI: %s (%u)", tok2str(af_values, "Unknown AFI", af), af, (safi>128) ? "vendor specific " : "", /* 128 is meanwhile wellknown */ tok2str(bgp_safi_values, "Unknown SAFI", safi), safi)); switch(af<<8 | safi) { case (AFNUM_INET<<8 | SAFNUM_UNICAST): case (AFNUM_INET<<8 | SAFNUM_MULTICAST): case (AFNUM_INET<<8 | SAFNUM_UNIMULTICAST): case (AFNUM_INET<<8 | SAFNUM_LABUNICAST): case (AFNUM_INET<<8 | SAFNUM_RT_ROUTING_INFO): case (AFNUM_INET<<8 | SAFNUM_VPNUNICAST): case (AFNUM_INET<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_INET<<8 | SAFNUM_VPNUNIMULTICAST): case (AFNUM_INET<<8 | SAFNUM_MULTICAST_VPN): case (AFNUM_INET<<8 | SAFNUM_MDT): case (AFNUM_INET6<<8 | SAFNUM_UNICAST): case (AFNUM_INET6<<8 | SAFNUM_MULTICAST): case (AFNUM_INET6<<8 | SAFNUM_UNIMULTICAST): case (AFNUM_INET6<<8 | SAFNUM_LABUNICAST): case (AFNUM_INET6<<8 | SAFNUM_VPNUNICAST): case (AFNUM_INET6<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_INET6<<8 | SAFNUM_VPNUNIMULTICAST): case (AFNUM_NSAP<<8 | SAFNUM_UNICAST): case (AFNUM_NSAP<<8 | SAFNUM_MULTICAST): case (AFNUM_NSAP<<8 | SAFNUM_UNIMULTICAST): case (AFNUM_NSAP<<8 | SAFNUM_VPNUNICAST): case (AFNUM_NSAP<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_NSAP<<8 | SAFNUM_VPNUNIMULTICAST): case (AFNUM_L2VPN<<8 | SAFNUM_VPNUNICAST): case (AFNUM_L2VPN<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_L2VPN<<8 | SAFNUM_VPNUNIMULTICAST): case (AFNUM_VPLS<<8 | SAFNUM_VPLS): break; default: ND_TCHECK2(tptr[0], tlen); ND_PRINT((ndo, "\n\t no AFI %u / SAFI %u decoder", af, safi)); if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, tptr, "\n\t ", tlen); goto done; break; } tptr +=3; ND_TCHECK(tptr[0]); nhlen = tptr[0]; tlen = nhlen; tptr++; if (tlen) { int nnh = 0; ND_PRINT((ndo, "\n\t nexthop: ")); while (tlen > 0) { if ( nnh++ > 0 ) { ND_PRINT((ndo, ", " )); } switch(af<<8 | safi) { case (AFNUM_INET<<8 | SAFNUM_UNICAST): case (AFNUM_INET<<8 | SAFNUM_MULTICAST): case (AFNUM_INET<<8 | SAFNUM_UNIMULTICAST): case (AFNUM_INET<<8 | SAFNUM_LABUNICAST): case (AFNUM_INET<<8 | SAFNUM_RT_ROUTING_INFO): case (AFNUM_INET<<8 | SAFNUM_MULTICAST_VPN): case (AFNUM_INET<<8 | SAFNUM_MDT): if (tlen < (int)sizeof(struct in_addr)) { ND_PRINT((ndo, "invalid len")); tlen = 0; } else { ND_TCHECK2(tptr[0], sizeof(struct in_addr)); ND_PRINT((ndo, "%s",ipaddr_string(ndo, tptr))); tlen -= sizeof(struct in_addr); tptr += sizeof(struct in_addr); } break; case (AFNUM_INET<<8 | SAFNUM_VPNUNICAST): case (AFNUM_INET<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_INET<<8 | SAFNUM_VPNUNIMULTICAST): if (tlen < (int)(sizeof(struct in_addr)+BGP_VPN_RD_LEN)) { ND_PRINT((ndo, "invalid len")); tlen = 0; } else { ND_TCHECK2(tptr[0], sizeof(struct in_addr)+BGP_VPN_RD_LEN); ND_PRINT((ndo, "RD: %s, %s", bgp_vpn_rd_print(ndo, tptr), ipaddr_string(ndo, tptr+BGP_VPN_RD_LEN))); tlen -= (sizeof(struct in_addr)+BGP_VPN_RD_LEN); tptr += (sizeof(struct in_addr)+BGP_VPN_RD_LEN); } break; case (AFNUM_INET6<<8 | SAFNUM_UNICAST): case (AFNUM_INET6<<8 | SAFNUM_MULTICAST): case (AFNUM_INET6<<8 | SAFNUM_UNIMULTICAST): case (AFNUM_INET6<<8 | SAFNUM_LABUNICAST): if (tlen < (int)sizeof(struct in6_addr)) { ND_PRINT((ndo, "invalid len")); tlen = 0; } else { ND_TCHECK2(tptr[0], sizeof(struct in6_addr)); ND_PRINT((ndo, "%s", ip6addr_string(ndo, tptr))); tlen -= sizeof(struct in6_addr); tptr += sizeof(struct in6_addr); } break; case (AFNUM_INET6<<8 | SAFNUM_VPNUNICAST): case (AFNUM_INET6<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_INET6<<8 | SAFNUM_VPNUNIMULTICAST): if (tlen < (int)(sizeof(struct in6_addr)+BGP_VPN_RD_LEN)) { ND_PRINT((ndo, "invalid len")); tlen = 0; } else { ND_TCHECK2(tptr[0], sizeof(struct in6_addr)+BGP_VPN_RD_LEN); ND_PRINT((ndo, "RD: %s, %s", bgp_vpn_rd_print(ndo, tptr), ip6addr_string(ndo, tptr+BGP_VPN_RD_LEN))); tlen -= (sizeof(struct in6_addr)+BGP_VPN_RD_LEN); tptr += (sizeof(struct in6_addr)+BGP_VPN_RD_LEN); } break; case (AFNUM_VPLS<<8 | SAFNUM_VPLS): case (AFNUM_L2VPN<<8 | SAFNUM_VPNUNICAST): case (AFNUM_L2VPN<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_L2VPN<<8 | SAFNUM_VPNUNIMULTICAST): if (tlen < (int)sizeof(struct in_addr)) { ND_PRINT((ndo, "invalid len")); tlen = 0; } else { ND_TCHECK2(tptr[0], sizeof(struct in_addr)); ND_PRINT((ndo, "%s", ipaddr_string(ndo, tptr))); tlen -= (sizeof(struct in_addr)); tptr += (sizeof(struct in_addr)); } break; case (AFNUM_NSAP<<8 | SAFNUM_UNICAST): case (AFNUM_NSAP<<8 | SAFNUM_MULTICAST): case (AFNUM_NSAP<<8 | SAFNUM_UNIMULTICAST): ND_TCHECK2(tptr[0], tlen); ND_PRINT((ndo, "%s", isonsap_string(ndo, tptr, tlen))); tptr += tlen; tlen = 0; break; case (AFNUM_NSAP<<8 | SAFNUM_VPNUNICAST): case (AFNUM_NSAP<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_NSAP<<8 | SAFNUM_VPNUNIMULTICAST): if (tlen < BGP_VPN_RD_LEN+1) { ND_PRINT((ndo, "invalid len")); tlen = 0; } else { ND_TCHECK2(tptr[0], tlen); ND_PRINT((ndo, "RD: %s, %s", bgp_vpn_rd_print(ndo, tptr), isonsap_string(ndo, tptr+BGP_VPN_RD_LEN,tlen-BGP_VPN_RD_LEN))); /* rfc986 mapped IPv4 address ? */ if (EXTRACT_32BITS(tptr+BGP_VPN_RD_LEN) == 0x47000601) ND_PRINT((ndo, " = %s", ipaddr_string(ndo, tptr+BGP_VPN_RD_LEN+4))); /* rfc1888 mapped IPv6 address ? */ else if (EXTRACT_24BITS(tptr+BGP_VPN_RD_LEN) == 0x350000) ND_PRINT((ndo, " = %s", ip6addr_string(ndo, tptr+BGP_VPN_RD_LEN+3))); tptr += tlen; tlen = 0; } break; default: ND_TCHECK2(tptr[0], tlen); ND_PRINT((ndo, "no AFI %u/SAFI %u decoder", af, safi)); if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, tptr, "\n\t ", tlen); tptr += tlen; tlen = 0; goto done; break; } } } ND_PRINT((ndo, ", nh-length: %u", nhlen)); tptr += tlen; ND_TCHECK(tptr[0]); snpa = tptr[0]; tptr++; if (snpa) { ND_PRINT((ndo, "\n\t %u SNPA", snpa)); for (/*nothing*/; snpa > 0; snpa--) { ND_TCHECK(tptr[0]); ND_PRINT((ndo, "\n\t %d bytes", tptr[0])); tptr += tptr[0] + 1; } } else { ND_PRINT((ndo, ", no SNPA")); } while (tptr < pptr + len) { switch (af<<8 | safi) { case (AFNUM_INET<<8 | SAFNUM_UNICAST): case (AFNUM_INET<<8 | SAFNUM_MULTICAST): case (AFNUM_INET<<8 | SAFNUM_UNIMULTICAST): advance = decode_prefix4(ndo, tptr, len, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else if (advance == -3) break; /* bytes left, but not enough */ else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_INET<<8 | SAFNUM_LABUNICAST): advance = decode_labeled_prefix4(ndo, tptr, len, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else if (advance == -3) break; /* bytes left, but not enough */ else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_INET<<8 | SAFNUM_VPNUNICAST): case (AFNUM_INET<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_INET<<8 | SAFNUM_VPNUNIMULTICAST): advance = decode_labeled_vpn_prefix4(ndo, tptr, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_INET<<8 | SAFNUM_RT_ROUTING_INFO): advance = decode_rt_routing_info(ndo, tptr, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_INET<<8 | SAFNUM_MULTICAST_VPN): /* fall through */ case (AFNUM_INET6<<8 | SAFNUM_MULTICAST_VPN): advance = decode_multicast_vpn(ndo, tptr, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_INET<<8 | SAFNUM_MDT): advance = decode_mdt_vpn_nlri(ndo, tptr, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_INET6<<8 | SAFNUM_UNICAST): case (AFNUM_INET6<<8 | SAFNUM_MULTICAST): case (AFNUM_INET6<<8 | SAFNUM_UNIMULTICAST): advance = decode_prefix6(ndo, tptr, len, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else if (advance == -3) break; /* bytes left, but not enough */ else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_INET6<<8 | SAFNUM_LABUNICAST): advance = decode_labeled_prefix6(ndo, tptr, len, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else if (advance == -3) break; /* bytes left, but not enough */ else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_INET6<<8 | SAFNUM_VPNUNICAST): case (AFNUM_INET6<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_INET6<<8 | SAFNUM_VPNUNIMULTICAST): advance = decode_labeled_vpn_prefix6(ndo, tptr, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_VPLS<<8 | SAFNUM_VPLS): case (AFNUM_L2VPN<<8 | SAFNUM_VPNUNICAST): case (AFNUM_L2VPN<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_L2VPN<<8 | SAFNUM_VPNUNIMULTICAST): advance = decode_labeled_vpn_l2(ndo, tptr, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal length)")); else if (advance == -2) goto trunc; else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_NSAP<<8 | SAFNUM_UNICAST): case (AFNUM_NSAP<<8 | SAFNUM_MULTICAST): case (AFNUM_NSAP<<8 | SAFNUM_UNIMULTICAST): advance = decode_clnp_prefix(ndo, tptr, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_NSAP<<8 | SAFNUM_VPNUNICAST): case (AFNUM_NSAP<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_NSAP<<8 | SAFNUM_VPNUNIMULTICAST): advance = decode_labeled_vpn_clnp_prefix(ndo, tptr, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else ND_PRINT((ndo, "\n\t %s", buf)); break; default: ND_TCHECK2(*tptr,tlen); ND_PRINT((ndo, "\n\t no AFI %u / SAFI %u decoder", af, safi)); if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, tptr, "\n\t ", tlen); advance = 0; tptr = pptr + len; break; } if (advance < 0) break; tptr += advance; } done: break; case BGPTYPE_MP_UNREACH_NLRI: ND_TCHECK2(tptr[0], BGP_MP_NLRI_MINSIZE); af = EXTRACT_16BITS(tptr); safi = tptr[2]; ND_PRINT((ndo, "\n\t AFI: %s (%u), %sSAFI: %s (%u)", tok2str(af_values, "Unknown AFI", af), af, (safi>128) ? "vendor specific " : "", /* 128 is meanwhile wellknown */ tok2str(bgp_safi_values, "Unknown SAFI", safi), safi)); if (len == BGP_MP_NLRI_MINSIZE) ND_PRINT((ndo, "\n\t End-of-Rib Marker (empty NLRI)")); tptr += 3; while (tptr < pptr + len) { switch (af<<8 | safi) { case (AFNUM_INET<<8 | SAFNUM_UNICAST): case (AFNUM_INET<<8 | SAFNUM_MULTICAST): case (AFNUM_INET<<8 | SAFNUM_UNIMULTICAST): advance = decode_prefix4(ndo, tptr, len, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else if (advance == -3) break; /* bytes left, but not enough */ else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_INET<<8 | SAFNUM_LABUNICAST): advance = decode_labeled_prefix4(ndo, tptr, len, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else if (advance == -3) break; /* bytes left, but not enough */ else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_INET<<8 | SAFNUM_VPNUNICAST): case (AFNUM_INET<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_INET<<8 | SAFNUM_VPNUNIMULTICAST): advance = decode_labeled_vpn_prefix4(ndo, tptr, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_INET6<<8 | SAFNUM_UNICAST): case (AFNUM_INET6<<8 | SAFNUM_MULTICAST): case (AFNUM_INET6<<8 | SAFNUM_UNIMULTICAST): advance = decode_prefix6(ndo, tptr, len, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else if (advance == -3) break; /* bytes left, but not enough */ else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_INET6<<8 | SAFNUM_LABUNICAST): advance = decode_labeled_prefix6(ndo, tptr, len, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else if (advance == -3) break; /* bytes left, but not enough */ else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_INET6<<8 | SAFNUM_VPNUNICAST): case (AFNUM_INET6<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_INET6<<8 | SAFNUM_VPNUNIMULTICAST): advance = decode_labeled_vpn_prefix6(ndo, tptr, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_VPLS<<8 | SAFNUM_VPLS): case (AFNUM_L2VPN<<8 | SAFNUM_VPNUNICAST): case (AFNUM_L2VPN<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_L2VPN<<8 | SAFNUM_VPNUNIMULTICAST): advance = decode_labeled_vpn_l2(ndo, tptr, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal length)")); else if (advance == -2) goto trunc; else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_NSAP<<8 | SAFNUM_UNICAST): case (AFNUM_NSAP<<8 | SAFNUM_MULTICAST): case (AFNUM_NSAP<<8 | SAFNUM_UNIMULTICAST): advance = decode_clnp_prefix(ndo, tptr, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_NSAP<<8 | SAFNUM_VPNUNICAST): case (AFNUM_NSAP<<8 | SAFNUM_VPNMULTICAST): case (AFNUM_NSAP<<8 | SAFNUM_VPNUNIMULTICAST): advance = decode_labeled_vpn_clnp_prefix(ndo, tptr, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_INET<<8 | SAFNUM_MDT): advance = decode_mdt_vpn_nlri(ndo, tptr, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else ND_PRINT((ndo, "\n\t %s", buf)); break; case (AFNUM_INET<<8 | SAFNUM_MULTICAST_VPN): /* fall through */ case (AFNUM_INET6<<8 | SAFNUM_MULTICAST_VPN): advance = decode_multicast_vpn(ndo, tptr, buf, sizeof(buf)); if (advance == -1) ND_PRINT((ndo, "\n\t (illegal prefix length)")); else if (advance == -2) goto trunc; else ND_PRINT((ndo, "\n\t %s", buf)); break; default: ND_TCHECK2(*(tptr-3),tlen); ND_PRINT((ndo, "no AFI %u / SAFI %u decoder", af, safi)); if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, tptr-3, "\n\t ", tlen); advance = 0; tptr = pptr + len; break; } if (advance < 0) break; tptr += advance; } break; case BGPTYPE_EXTD_COMMUNITIES: if (len % 8) { ND_PRINT((ndo, "invalid len")); break; } while (tlen>0) { uint16_t extd_comm; ND_TCHECK2(tptr[0], 2); extd_comm=EXTRACT_16BITS(tptr); ND_PRINT((ndo, "\n\t %s (0x%04x), Flags [%s]", tok2str(bgp_extd_comm_subtype_values, "unknown extd community typecode", extd_comm), extd_comm, bittok2str(bgp_extd_comm_flag_values, "none", extd_comm))); ND_TCHECK2(*(tptr+2), 6); switch(extd_comm) { case BGP_EXT_COM_RT_0: case BGP_EXT_COM_RO_0: case BGP_EXT_COM_L2VPN_RT_0: ND_PRINT((ndo, ": %u:%u (= %s)", EXTRACT_16BITS(tptr+2), EXTRACT_32BITS(tptr+4), ipaddr_string(ndo, tptr+4))); break; case BGP_EXT_COM_RT_1: case BGP_EXT_COM_RO_1: case BGP_EXT_COM_L2VPN_RT_1: case BGP_EXT_COM_VRF_RT_IMP: ND_PRINT((ndo, ": %s:%u", ipaddr_string(ndo, tptr+2), EXTRACT_16BITS(tptr+6))); break; case BGP_EXT_COM_RT_2: case BGP_EXT_COM_RO_2: ND_PRINT((ndo, ": %s:%u", as_printf(ndo, astostr, sizeof(astostr), EXTRACT_32BITS(tptr+2)), EXTRACT_16BITS(tptr+6))); break; case BGP_EXT_COM_LINKBAND: bw.i = EXTRACT_32BITS(tptr+2); ND_PRINT((ndo, ": bandwidth: %.3f Mbps", bw.f*8/1000000)); break; case BGP_EXT_COM_VPN_ORIGIN: case BGP_EXT_COM_VPN_ORIGIN2: case BGP_EXT_COM_VPN_ORIGIN3: case BGP_EXT_COM_VPN_ORIGIN4: case BGP_EXT_COM_OSPF_RID: case BGP_EXT_COM_OSPF_RID2: ND_PRINT((ndo, "%s", ipaddr_string(ndo, tptr+2))); break; case BGP_EXT_COM_OSPF_RTYPE: case BGP_EXT_COM_OSPF_RTYPE2: ND_PRINT((ndo, ": area:%s, router-type:%s, metric-type:%s%s", ipaddr_string(ndo, tptr+2), tok2str(bgp_extd_comm_ospf_rtype_values, "unknown (0x%02x)", *(tptr+6)), (*(tptr+7) & BGP_OSPF_RTYPE_METRIC_TYPE) ? "E2" : "", ((*(tptr+6) == BGP_OSPF_RTYPE_EXT) || (*(tptr+6) == BGP_OSPF_RTYPE_NSSA)) ? "E1" : "")); break; case BGP_EXT_COM_L2INFO: ND_PRINT((ndo, ": %s Control Flags [0x%02x]:MTU %u", tok2str(l2vpn_encaps_values, "unknown encaps", *(tptr+2)), *(tptr+3), EXTRACT_16BITS(tptr+4))); break; case BGP_EXT_COM_SOURCE_AS: ND_PRINT((ndo, ": AS %u", EXTRACT_16BITS(tptr+2))); break; default: ND_TCHECK2(*tptr,8); print_unknown_data(ndo, tptr, "\n\t ", 8); break; } tlen -=8; tptr +=8; } break; case BGPTYPE_PMSI_TUNNEL: { uint8_t tunnel_type, flags; ND_TCHECK2(tptr[0], 5); tunnel_type = *(tptr+1); flags = *tptr; tlen = len; ND_PRINT((ndo, "\n\t Tunnel-type %s (%u), Flags [%s], MPLS Label %u", tok2str(bgp_pmsi_tunnel_values, "Unknown", tunnel_type), tunnel_type, bittok2str(bgp_pmsi_flag_values, "none", flags), EXTRACT_24BITS(tptr+2)>>4)); tptr +=5; tlen -= 5; switch (tunnel_type) { case BGP_PMSI_TUNNEL_PIM_SM: /* fall through */ case BGP_PMSI_TUNNEL_PIM_BIDIR: ND_TCHECK2(tptr[0], 8); ND_PRINT((ndo, "\n\t Sender %s, P-Group %s", ipaddr_string(ndo, tptr), ipaddr_string(ndo, tptr+4))); break; case BGP_PMSI_TUNNEL_PIM_SSM: ND_TCHECK2(tptr[0], 8); ND_PRINT((ndo, "\n\t Root-Node %s, P-Group %s", ipaddr_string(ndo, tptr), ipaddr_string(ndo, tptr+4))); break; case BGP_PMSI_TUNNEL_INGRESS: ND_TCHECK2(tptr[0], 4); ND_PRINT((ndo, "\n\t Tunnel-Endpoint %s", ipaddr_string(ndo, tptr))); break; case BGP_PMSI_TUNNEL_LDP_P2MP: /* fall through */ case BGP_PMSI_TUNNEL_LDP_MP2MP: ND_TCHECK2(tptr[0], 8); ND_PRINT((ndo, "\n\t Root-Node %s, LSP-ID 0x%08x", ipaddr_string(ndo, tptr), EXTRACT_32BITS(tptr+4))); break; case BGP_PMSI_TUNNEL_RSVP_P2MP: ND_TCHECK2(tptr[0], 8); ND_PRINT((ndo, "\n\t Extended-Tunnel-ID %s, P2MP-ID 0x%08x", ipaddr_string(ndo, tptr), EXTRACT_32BITS(tptr+4))); break; default: if (ndo->ndo_vflag <= 1) { print_unknown_data(ndo, tptr, "\n\t ", tlen); } } break; } case BGPTYPE_AIGP: { uint8_t type; uint16_t length; tlen = len; while (tlen >= 3) { ND_TCHECK2(tptr[0], 3); type = *tptr; length = EXTRACT_16BITS(tptr+1); tptr += 3; tlen -= 3; ND_PRINT((ndo, "\n\t %s TLV (%u), length %u", tok2str(bgp_aigp_values, "Unknown", type), type, length)); if (length < 3) goto trunc; length -= 3; /* * Check if we can read the TLV data. */ ND_TCHECK2(tptr[3], length); switch (type) { case BGP_AIGP_TLV: if (length < 8) goto trunc; ND_PRINT((ndo, ", metric %" PRIu64, EXTRACT_64BITS(tptr))); break; default: if (ndo->ndo_vflag <= 1) { print_unknown_data(ndo, tptr,"\n\t ", length); } } tptr += length; tlen -= length; } break; } case BGPTYPE_ATTR_SET: ND_TCHECK2(tptr[0], 4); if (len < 4) goto trunc; ND_PRINT((ndo, "\n\t Origin AS: %s", as_printf(ndo, astostr, sizeof(astostr), EXTRACT_32BITS(tptr)))); tptr+=4; len -=4; while (len) { u_int aflags, alenlen, alen; ND_TCHECK2(tptr[0], 2); if (len < 2) goto trunc; aflags = *tptr; atype = *(tptr + 1); tptr += 2; len -= 2; alenlen = bgp_attr_lenlen(aflags, tptr); ND_TCHECK2(tptr[0], alenlen); if (len < alenlen) goto trunc; alen = bgp_attr_len(aflags, tptr); tptr += alenlen; len -= alenlen; ND_PRINT((ndo, "\n\t %s (%u), length: %u", tok2str(bgp_attr_values, "Unknown Attribute", atype), atype, alen)); if (aflags) { ND_PRINT((ndo, ", Flags [%s%s%s%s", aflags & 0x80 ? "O" : "", aflags & 0x40 ? "T" : "", aflags & 0x20 ? "P" : "", aflags & 0x10 ? "E" : "")); if (aflags & 0xf) ND_PRINT((ndo, "+%x", aflags & 0xf)); ND_PRINT((ndo, "]: ")); } /* FIXME check for recursion */ if (!bgp_attr_print(ndo, atype, tptr, alen)) return 0; tptr += alen; len -= alen; } break; case BGPTYPE_LARGE_COMMUNITY: if (len == 0 || len % 12) { ND_PRINT((ndo, "invalid len")); break; } ND_PRINT((ndo, "\n\t ")); while (len > 0) { ND_TCHECK2(*tptr, 12); ND_PRINT((ndo, "%u:%u:%u%s", EXTRACT_32BITS(tptr), EXTRACT_32BITS(tptr + 4), EXTRACT_32BITS(tptr + 8), (len > 12) ? ", " : "")); tptr += 12; len -= 12; } break; default: ND_TCHECK2(*pptr,len); ND_PRINT((ndo, "\n\t no Attribute %u decoder", atype)); /* we have no decoder for the attribute */ if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, pptr, "\n\t ", len); break; } if (ndo->ndo_vflag > 1 && len) { /* omit zero length attributes*/ ND_TCHECK2(*pptr,len); print_unknown_data(ndo, pptr, "\n\t ", len); } return 1; trunc: return 0; } static void bgp_capabilities_print(netdissect_options *ndo, const u_char *opt, int caps_len) { int cap_type, cap_len, tcap_len, cap_offset; int i = 0; while (i < caps_len) { ND_TCHECK2(opt[i], BGP_CAP_HEADER_SIZE); cap_type=opt[i]; cap_len=opt[i+1]; tcap_len=cap_len; ND_PRINT((ndo, "\n\t %s (%u), length: %u", tok2str(bgp_capcode_values, "Unknown", cap_type), cap_type, cap_len)); ND_TCHECK2(opt[i+2], cap_len); switch (cap_type) { case BGP_CAPCODE_MP: ND_PRINT((ndo, "\n\t\tAFI %s (%u), SAFI %s (%u)", tok2str(af_values, "Unknown", EXTRACT_16BITS(opt+i+2)), EXTRACT_16BITS(opt+i+2), tok2str(bgp_safi_values, "Unknown", opt[i+5]), opt[i+5])); break; case BGP_CAPCODE_RESTART: ND_PRINT((ndo, "\n\t\tRestart Flags: [%s], Restart Time %us", ((opt[i+2])&0x80) ? "R" : "none", EXTRACT_16BITS(opt+i+2)&0xfff)); tcap_len-=2; cap_offset=4; while(tcap_len>=4) { ND_PRINT((ndo, "\n\t\t AFI %s (%u), SAFI %s (%u), Forwarding state preserved: %s", tok2str(af_values,"Unknown", EXTRACT_16BITS(opt+i+cap_offset)), EXTRACT_16BITS(opt+i+cap_offset), tok2str(bgp_safi_values,"Unknown", opt[i+cap_offset+2]), opt[i+cap_offset+2], ((opt[i+cap_offset+3])&0x80) ? "yes" : "no" )); tcap_len-=4; cap_offset+=4; } break; case BGP_CAPCODE_RR: case BGP_CAPCODE_RR_CISCO: break; case BGP_CAPCODE_AS_NEW: /* * Extract the 4 byte AS number encoded. */ if (cap_len == 4) { ND_PRINT((ndo, "\n\t\t 4 Byte AS %s", as_printf(ndo, astostr, sizeof(astostr), EXTRACT_32BITS(opt + i + 2)))); } break; case BGP_CAPCODE_ADD_PATH: cap_offset=2; if (tcap_len == 0) { ND_PRINT((ndo, " (bogus)")); /* length */ break; } while (tcap_len > 0) { if (tcap_len < 4) { ND_PRINT((ndo, "\n\t\t(invalid)")); break; } ND_PRINT((ndo, "\n\t\tAFI %s (%u), SAFI %s (%u), Send/Receive: %s", tok2str(af_values,"Unknown",EXTRACT_16BITS(opt+i+cap_offset)), EXTRACT_16BITS(opt+i+cap_offset), tok2str(bgp_safi_values,"Unknown",opt[i+cap_offset+2]), opt[i+cap_offset+2], tok2str(bgp_add_path_recvsend,"Bogus (0x%02x)",opt[i+cap_offset+3]) )); tcap_len-=4; cap_offset+=4; } break; default: ND_PRINT((ndo, "\n\t\tno decoder for Capability %u", cap_type)); if (ndo->ndo_vflag <= 1) print_unknown_data(ndo, &opt[i+2], "\n\t\t", cap_len); break; } if (ndo->ndo_vflag > 1 && cap_len > 0) { print_unknown_data(ndo, &opt[i+2], "\n\t\t", cap_len); } i += BGP_CAP_HEADER_SIZE + cap_len; } return; trunc: ND_PRINT((ndo, "[|BGP]")); } static void bgp_open_print(netdissect_options *ndo, const u_char *dat, int length) { struct bgp_open bgpo; struct bgp_opt bgpopt; const u_char *opt; int i; ND_TCHECK2(dat[0], BGP_OPEN_SIZE); memcpy(&bgpo, dat, BGP_OPEN_SIZE); ND_PRINT((ndo, "\n\t Version %d, ", bgpo.bgpo_version)); ND_PRINT((ndo, "my AS %s, ", as_printf(ndo, astostr, sizeof(astostr), ntohs(bgpo.bgpo_myas)))); ND_PRINT((ndo, "Holdtime %us, ", ntohs(bgpo.bgpo_holdtime))); ND_PRINT((ndo, "ID %s", ipaddr_string(ndo, &bgpo.bgpo_id))); ND_PRINT((ndo, "\n\t Optional parameters, length: %u", bgpo.bgpo_optlen)); /* some little sanity checking */ if (length < bgpo.bgpo_optlen+BGP_OPEN_SIZE) return; /* ugly! */ opt = &((const struct bgp_open *)dat)->bgpo_optlen; opt++; i = 0; while (i < bgpo.bgpo_optlen) { ND_TCHECK2(opt[i], BGP_OPT_SIZE); memcpy(&bgpopt, &opt[i], BGP_OPT_SIZE); if (i + 2 + bgpopt.bgpopt_len > bgpo.bgpo_optlen) { ND_PRINT((ndo, "\n\t Option %d, length: %u", bgpopt.bgpopt_type, bgpopt.bgpopt_len)); break; } ND_PRINT((ndo, "\n\t Option %s (%u), length: %u", tok2str(bgp_opt_values,"Unknown", bgpopt.bgpopt_type), bgpopt.bgpopt_type, bgpopt.bgpopt_len)); /* now let's decode the options we know*/ switch(bgpopt.bgpopt_type) { case BGP_OPT_CAP: bgp_capabilities_print(ndo, &opt[i+BGP_OPT_SIZE], bgpopt.bgpopt_len); break; case BGP_OPT_AUTH: default: ND_PRINT((ndo, "\n\t no decoder for option %u", bgpopt.bgpopt_type)); break; } i += BGP_OPT_SIZE + bgpopt.bgpopt_len; } return; trunc: ND_PRINT((ndo, "[|BGP]")); } static void bgp_update_print(netdissect_options *ndo, const u_char *dat, int length) { struct bgp bgp; const u_char *p; int withdrawn_routes_len; int len; int i; ND_TCHECK2(dat[0], BGP_SIZE); if (length < BGP_SIZE) goto trunc; memcpy(&bgp, dat, BGP_SIZE); p = dat + BGP_SIZE; /*XXX*/ length -= BGP_SIZE; /* Unfeasible routes */ ND_TCHECK2(p[0], 2); if (length < 2) goto trunc; withdrawn_routes_len = EXTRACT_16BITS(p); p += 2; length -= 2; if (withdrawn_routes_len) { /* * Without keeping state from the original NLRI message, * it's not possible to tell if this a v4 or v6 route, * so only try to decode it if we're not v6 enabled. */ ND_TCHECK2(p[0], withdrawn_routes_len); if (length < withdrawn_routes_len) goto trunc; ND_PRINT((ndo, "\n\t Withdrawn routes: %d bytes", withdrawn_routes_len)); p += withdrawn_routes_len; length -= withdrawn_routes_len; } ND_TCHECK2(p[0], 2); if (length < 2) goto trunc; len = EXTRACT_16BITS(p); p += 2; length -= 2; if (withdrawn_routes_len == 0 && len == 0 && length == 0) { /* No withdrawn routes, no path attributes, no NLRI */ ND_PRINT((ndo, "\n\t End-of-Rib Marker (empty NLRI)")); return; } if (len) { /* do something more useful!*/ while (len) { int aflags, atype, alenlen, alen; ND_TCHECK2(p[0], 2); if (len < 2) goto trunc; if (length < 2) goto trunc; aflags = *p; atype = *(p + 1); p += 2; len -= 2; length -= 2; alenlen = bgp_attr_lenlen(aflags, p); ND_TCHECK2(p[0], alenlen); if (len < alenlen) goto trunc; if (length < alenlen) goto trunc; alen = bgp_attr_len(aflags, p); p += alenlen; len -= alenlen; length -= alenlen; ND_PRINT((ndo, "\n\t %s (%u), length: %u", tok2str(bgp_attr_values, "Unknown Attribute", atype), atype, alen)); if (aflags) { ND_PRINT((ndo, ", Flags [%s%s%s%s", aflags & 0x80 ? "O" : "", aflags & 0x40 ? "T" : "", aflags & 0x20 ? "P" : "", aflags & 0x10 ? "E" : "")); if (aflags & 0xf) ND_PRINT((ndo, "+%x", aflags & 0xf)); ND_PRINT((ndo, "]: ")); } if (len < alen) goto trunc; if (length < alen) goto trunc; if (!bgp_attr_print(ndo, atype, p, alen)) goto trunc; p += alen; len -= alen; length -= alen; } } if (length) { /* * XXX - what if they're using the "Advertisement of * Multiple Paths in BGP" feature: * * https://datatracker.ietf.org/doc/draft-ietf-idr-add-paths/ * * http://tools.ietf.org/html/draft-ietf-idr-add-paths-06 */ ND_PRINT((ndo, "\n\t Updated routes:")); while (length) { char buf[MAXHOSTNAMELEN + 100]; i = decode_prefix4(ndo, p, length, buf, sizeof(buf)); if (i == -1) { ND_PRINT((ndo, "\n\t (illegal prefix length)")); break; } else if (i == -2) goto trunc; else if (i == -3) goto trunc; /* bytes left, but not enough */ else { ND_PRINT((ndo, "\n\t %s", buf)); p += i; length -= i; } } } return; trunc: ND_PRINT((ndo, "[|BGP]")); } static void bgp_notification_print(netdissect_options *ndo, const u_char *dat, int length) { struct bgp_notification bgpn; const u_char *tptr; uint8_t shutdown_comm_length; uint8_t remainder_offset; ND_TCHECK2(dat[0], BGP_NOTIFICATION_SIZE); memcpy(&bgpn, dat, BGP_NOTIFICATION_SIZE); /* some little sanity checking */ if (length<BGP_NOTIFICATION_SIZE) return; ND_PRINT((ndo, ", %s (%u)", tok2str(bgp_notify_major_values, "Unknown Error", bgpn.bgpn_major), bgpn.bgpn_major)); switch (bgpn.bgpn_major) { case BGP_NOTIFY_MAJOR_MSG: ND_PRINT((ndo, ", subcode %s (%u)", tok2str(bgp_notify_minor_msg_values, "Unknown", bgpn.bgpn_minor), bgpn.bgpn_minor)); break; case BGP_NOTIFY_MAJOR_OPEN: ND_PRINT((ndo, ", subcode %s (%u)", tok2str(bgp_notify_minor_open_values, "Unknown", bgpn.bgpn_minor), bgpn.bgpn_minor)); break; case BGP_NOTIFY_MAJOR_UPDATE: ND_PRINT((ndo, ", subcode %s (%u)", tok2str(bgp_notify_minor_update_values, "Unknown", bgpn.bgpn_minor), bgpn.bgpn_minor)); break; case BGP_NOTIFY_MAJOR_FSM: ND_PRINT((ndo, " subcode %s (%u)", tok2str(bgp_notify_minor_fsm_values, "Unknown", bgpn.bgpn_minor), bgpn.bgpn_minor)); break; case BGP_NOTIFY_MAJOR_CAP: ND_PRINT((ndo, " subcode %s (%u)", tok2str(bgp_notify_minor_cap_values, "Unknown", bgpn.bgpn_minor), bgpn.bgpn_minor)); break; case BGP_NOTIFY_MAJOR_CEASE: ND_PRINT((ndo, ", subcode %s (%u)", tok2str(bgp_notify_minor_cease_values, "Unknown", bgpn.bgpn_minor), bgpn.bgpn_minor)); /* draft-ietf-idr-cease-subcode-02 mentions optionally 7 bytes * for the maxprefix subtype, which may contain AFI, SAFI and MAXPREFIXES */ if(bgpn.bgpn_minor == BGP_NOTIFY_MINOR_CEASE_MAXPRFX && length >= BGP_NOTIFICATION_SIZE + 7) { tptr = dat + BGP_NOTIFICATION_SIZE; ND_TCHECK2(*tptr, 7); ND_PRINT((ndo, ", AFI %s (%u), SAFI %s (%u), Max Prefixes: %u", tok2str(af_values, "Unknown", EXTRACT_16BITS(tptr)), EXTRACT_16BITS(tptr), tok2str(bgp_safi_values, "Unknown", *(tptr+2)), *(tptr+2), EXTRACT_32BITS(tptr+3))); } /* * draft-ietf-idr-shutdown describes a method to send a communication * intended for human consumption regarding the Administrative Shutdown */ if ((bgpn.bgpn_minor == BGP_NOTIFY_MINOR_CEASE_SHUT || bgpn.bgpn_minor == BGP_NOTIFY_MINOR_CEASE_RESET) && length >= BGP_NOTIFICATION_SIZE + 1) { tptr = dat + BGP_NOTIFICATION_SIZE; ND_TCHECK2(*tptr, 1); shutdown_comm_length = *(tptr); remainder_offset = 0; /* garbage, hexdump it all */ if (shutdown_comm_length > BGP_NOTIFY_MINOR_CEASE_ADMIN_SHUTDOWN_LEN || shutdown_comm_length > length - (BGP_NOTIFICATION_SIZE + 1)) { ND_PRINT((ndo, ", invalid Shutdown Communication length")); } else if (shutdown_comm_length == 0) { ND_PRINT((ndo, ", empty Shutdown Communication")); remainder_offset += 1; } /* a proper shutdown communication */ else { ND_TCHECK2(*(tptr+1), shutdown_comm_length); ND_PRINT((ndo, ", Shutdown Communication (length: %u): \"", shutdown_comm_length)); (void)fn_printn(ndo, tptr+1, shutdown_comm_length, NULL); ND_PRINT((ndo, "\"")); remainder_offset += shutdown_comm_length + 1; } /* if there is trailing data, hexdump it */ if(length - (remainder_offset + BGP_NOTIFICATION_SIZE) > 0) { ND_PRINT((ndo, ", Data: (length: %u)", length - (remainder_offset + BGP_NOTIFICATION_SIZE))); hex_print(ndo, "\n\t\t", tptr + remainder_offset, length - (remainder_offset + BGP_NOTIFICATION_SIZE)); } } break; default: break; } return; trunc: ND_PRINT((ndo, "[|BGP]")); } static void bgp_route_refresh_print(netdissect_options *ndo, const u_char *pptr, int len) { const struct bgp_route_refresh *bgp_route_refresh_header; ND_TCHECK2(pptr[0], BGP_ROUTE_REFRESH_SIZE); /* some little sanity checking */ if (len<BGP_ROUTE_REFRESH_SIZE) return; bgp_route_refresh_header = (const struct bgp_route_refresh *)pptr; ND_PRINT((ndo, "\n\t AFI %s (%u), SAFI %s (%u)", tok2str(af_values,"Unknown", /* this stinks but the compiler pads the structure * weird */ EXTRACT_16BITS(&bgp_route_refresh_header->afi)), EXTRACT_16BITS(&bgp_route_refresh_header->afi), tok2str(bgp_safi_values,"Unknown", bgp_route_refresh_header->safi), bgp_route_refresh_header->safi)); if (ndo->ndo_vflag > 1) { ND_TCHECK2(*pptr, len); print_unknown_data(ndo, pptr, "\n\t ", len); } return; trunc: ND_PRINT((ndo, "[|BGP]")); } static int bgp_header_print(netdissect_options *ndo, const u_char *dat, int length) { struct bgp bgp; ND_TCHECK2(dat[0], BGP_SIZE); memcpy(&bgp, dat, BGP_SIZE); ND_PRINT((ndo, "\n\t%s Message (%u), length: %u", tok2str(bgp_msg_values, "Unknown", bgp.bgp_type), bgp.bgp_type, length)); switch (bgp.bgp_type) { case BGP_OPEN: bgp_open_print(ndo, dat, length); break; case BGP_UPDATE: bgp_update_print(ndo, dat, length); break; case BGP_NOTIFICATION: bgp_notification_print(ndo, dat, length); break; case BGP_KEEPALIVE: break; case BGP_ROUTE_REFRESH: bgp_route_refresh_print(ndo, dat, length); break; default: /* we have no decoder for the BGP message */ ND_TCHECK2(*dat, length); ND_PRINT((ndo, "\n\t no Message %u decoder", bgp.bgp_type)); print_unknown_data(ndo, dat, "\n\t ", length); break; } return 1; trunc: ND_PRINT((ndo, "[|BGP]")); return 0; } void bgp_print(netdissect_options *ndo, const u_char *dat, int length) { const u_char *p; const u_char *ep; const u_char *start; const u_char marker[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }; struct bgp bgp; uint16_t hlen; ep = dat + length; if (ndo->ndo_snapend < dat + length) ep = ndo->ndo_snapend; ND_PRINT((ndo, ": BGP")); if (ndo->ndo_vflag < 1) /* lets be less chatty */ return; p = dat; start = p; while (p < ep) { if (!ND_TTEST2(p[0], 1)) break; if (p[0] != 0xff) { p++; continue; } if (!ND_TTEST2(p[0], sizeof(marker))) break; if (memcmp(p, marker, sizeof(marker)) != 0) { p++; continue; } /* found BGP header */ ND_TCHECK2(p[0], BGP_SIZE); /*XXX*/ memcpy(&bgp, p, BGP_SIZE); if (start != p) ND_PRINT((ndo, " [|BGP]")); hlen = ntohs(bgp.bgp_len); if (hlen < BGP_SIZE) { ND_PRINT((ndo, "\n[|BGP Bogus header length %u < %u]", hlen, BGP_SIZE)); break; } if (ND_TTEST2(p[0], hlen)) { if (!bgp_header_print(ndo, p, hlen)) return; p += hlen; start = p; } else { ND_PRINT((ndo, "\n[|BGP %s]", tok2str(bgp_msg_values, "Unknown Message Type", bgp.bgp_type))); break; } } return; trunc: ND_PRINT((ndo, " [|BGP]")); } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 4 * End: */
decode_rt_routing_info(netdissect_options *ndo, const u_char *pptr, char *buf, u_int buflen) { uint8_t route_target[8]; u_int plen; ND_TCHECK(pptr[0]); plen = pptr[0]; /* get prefix length */ if (0 == plen) { snprintf(buf, buflen, "default route target"); return 1; } if (32 > plen) return -1; plen-=32; /* adjust prefix length */ if (64 < plen) return -1; memset(&route_target, 0, sizeof(route_target)); ND_TCHECK2(pptr[1], (plen + 7) / 8); memcpy(&route_target, &pptr[1], (plen + 7) / 8); if (plen % 8) { ((u_char *)&route_target)[(plen + 7) / 8 - 1] &= ((0xff00 >> (plen % 8)) & 0xff); } snprintf(buf, buflen, "origin AS: %s, route target %s", as_printf(ndo, astostr, sizeof(astostr), EXTRACT_32BITS(pptr+1)), bgp_vpn_rd_print(ndo, (u_char *)&route_target)); return 5 + (plen + 7) / 8; trunc: return -2; }
decode_rt_routing_info(netdissect_options *ndo, const u_char *pptr, char *buf, u_int buflen) { uint8_t route_target[8]; u_int plen; char asbuf[sizeof(astostr)]; /* bgp_vpn_rd_print() overwrites astostr */ /* NLRI "prefix length" from RFC 2858 Section 4. */ ND_TCHECK(pptr[0]); plen = pptr[0]; /* get prefix length */ /* NLRI "prefix" (ibid), valid lengths are { 0, 32, 33, ..., 96 } bits. * RFC 4684 Section 4 defines the layout of "origin AS" and "route * target" fields inside the "prefix" depending on its length. */ if (0 == plen) { /* Without "origin AS", without "route target". */ snprintf(buf, buflen, "default route target"); return 1; } if (32 > plen) return -1; /* With at least "origin AS", possibly with "route target". */ ND_TCHECK_32BITS(pptr + 1); as_printf(ndo, asbuf, sizeof(asbuf), EXTRACT_32BITS(pptr + 1)); plen-=32; /* adjust prefix length */ if (64 < plen) return -1; /* From now on (plen + 7) / 8 evaluates to { 0, 1, 2, ..., 8 } * and gives the number of octets in the variable-length "route * target" field inside this NLRI "prefix". Look for it. */ memset(&route_target, 0, sizeof(route_target)); ND_TCHECK2(pptr[5], (plen + 7) / 8); memcpy(&route_target, &pptr[5], (plen + 7) / 8); /* Which specification says to do this? */ if (plen % 8) { ((u_char *)&route_target)[(plen + 7) / 8 - 1] &= ((0xff00 >> (plen % 8)) & 0xff); } snprintf(buf, buflen, "origin AS: %s, route target %s", asbuf, bgp_vpn_rd_print(ndo, (u_char *)&route_target)); return 5 + (plen + 7) / 8; trunc: return -2; }
{'added': [(764, '\tchar asbuf[sizeof(astostr)]; /* bgp_vpn_rd_print() overwrites astostr */'), (766, '\t/* NLRI "prefix length" from RFC 2858 Section 4. */'), (770, '\t/* NLRI "prefix" (ibid), valid lengths are { 0, 32, 33, ..., 96 } bits.'), (771, '\t * RFC 4684 Section 4 defines the layout of "origin AS" and "route'), (772, '\t * target" fields inside the "prefix" depending on its length.'), (773, '\t */'), (775, '\t\t/* Without "origin AS", without "route target". */'), (783, '\t/* With at least "origin AS", possibly with "route target". */'), (784, '\tND_TCHECK_32BITS(pptr + 1);'), (785, '\tas_printf(ndo, asbuf, sizeof(asbuf), EXTRACT_32BITS(pptr + 1));'), (786, ''), (792, '\t/* From now on (plen + 7) / 8 evaluates to { 0, 1, 2, ..., 8 }'), (793, '\t * and gives the number of octets in the variable-length "route'), (794, '\t * target" field inside this NLRI "prefix". Look for it.'), (795, '\t */'), (797, '\tND_TCHECK2(pptr[5], (plen + 7) / 8);'), (798, '\tmemcpy(&route_target, &pptr[5], (plen + 7) / 8);'), (799, '\t/* Which specification says to do this? */'), (805, '\t asbuf,')], 'deleted': [(782, '\tND_TCHECK2(pptr[1], (plen + 7) / 8);'), (783, '\tmemcpy(&route_target, &pptr[1], (plen + 7) / 8);'), (789, '\t as_printf(ndo, astostr, sizeof(astostr), EXTRACT_32BITS(pptr+1)),')]}
19
3
2,210
14,348
30
237
5
https://github.com/the-tcpdump-group/tcpdump
CVE-2017-13053
CWE-125
995
QPDFObjectHandle.cc
C++
QPDFObjectHandle::parseContentStream_internal
#include <qpdf/QPDFObjectHandle.hh> #include <qpdf/QPDF.hh> #include <qpdf/QPDF_Bool.hh> #include <qpdf/QPDF_Null.hh> #include <qpdf/QPDF_Integer.hh> #include <qpdf/QPDF_Real.hh> #include <qpdf/QPDF_Name.hh> #include <qpdf/QPDF_String.hh> #include <qpdf/QPDF_Operator.hh> #include <qpdf/QPDF_InlineImage.hh> #include <qpdf/QPDF_Array.hh> #include <qpdf/QPDF_Dictionary.hh> #include <qpdf/QPDF_Stream.hh> #include <qpdf/QPDF_Reserved.hh> #include <qpdf/Pl_Buffer.hh> #include <qpdf/BufferInputSource.hh> #include <qpdf/QPDFExc.hh> #include <qpdf/QTC.hh> #include <qpdf/QUtil.hh> #include <stdexcept> #include <stdlib.h> #include <ctype.h> class TerminateParsing { }; void QPDFObjectHandle::ParserCallbacks::terminateParsing() { throw TerminateParsing(); } QPDFObjectHandle::QPDFObjectHandle() : initialized(false), qpdf(0), objid(0), generation(0), reserved(false) { } QPDFObjectHandle::QPDFObjectHandle(QPDF* qpdf, int objid, int generation) : initialized(true), qpdf(qpdf), objid(objid), generation(generation), reserved(false) { } QPDFObjectHandle::QPDFObjectHandle(QPDFObject* data) : initialized(true), qpdf(0), objid(0), generation(0), obj(data), reserved(false) { } void QPDFObjectHandle::releaseResolved() { // Recursively break any resolved references to indirect objects. // Do not cross over indirect object boundaries to avoid an // infinite loop. This method may only be called during final // destruction. See comments in QPDF::~QPDF(). if (isIndirect()) { if (this->obj.getPointer()) { this->obj = 0; } } else { QPDFObject::ObjAccessor::releaseResolved(this->obj.getPointer()); } } bool QPDFObjectHandle::isInitialized() const { return this->initialized; } QPDFObject::object_type_e QPDFObjectHandle::getTypeCode() { if (this->initialized) { dereference(); return obj->getTypeCode(); } else { return QPDFObject::ot_uninitialized; } } char const* QPDFObjectHandle::getTypeName() { if (this->initialized) { dereference(); return obj->getTypeName(); } else { return "uninitialized"; } } template <class T> class QPDFObjectTypeAccessor { public: static bool check(QPDFObject* o) { return (o && dynamic_cast<T*>(o)); } }; bool QPDFObjectHandle::isBool() { dereference(); return QPDFObjectTypeAccessor<QPDF_Bool>::check(obj.getPointer()); } bool QPDFObjectHandle::isNull() { dereference(); return QPDFObjectTypeAccessor<QPDF_Null>::check(obj.getPointer()); } bool QPDFObjectHandle::isInteger() { dereference(); return QPDFObjectTypeAccessor<QPDF_Integer>::check(obj.getPointer()); } bool QPDFObjectHandle::isReal() { dereference(); return QPDFObjectTypeAccessor<QPDF_Real>::check(obj.getPointer()); } bool QPDFObjectHandle::isNumber() { return (isInteger() || isReal()); } double QPDFObjectHandle::getNumericValue() { double result = 0.0; if (isInteger()) { result = static_cast<double>(getIntValue()); } else if (isReal()) { result = atof(getRealValue().c_str()); } else { throw std::logic_error("getNumericValue called for non-numeric object"); } return result; } bool QPDFObjectHandle::isName() { dereference(); return QPDFObjectTypeAccessor<QPDF_Name>::check(obj.getPointer()); } bool QPDFObjectHandle::isString() { dereference(); return QPDFObjectTypeAccessor<QPDF_String>::check(obj.getPointer()); } bool QPDFObjectHandle::isOperator() { dereference(); return QPDFObjectTypeAccessor<QPDF_Operator>::check(obj.getPointer()); } bool QPDFObjectHandle::isInlineImage() { dereference(); return QPDFObjectTypeAccessor<QPDF_InlineImage>::check(obj.getPointer()); } bool QPDFObjectHandle::isArray() { dereference(); return QPDFObjectTypeAccessor<QPDF_Array>::check(obj.getPointer()); } bool QPDFObjectHandle::isDictionary() { dereference(); return QPDFObjectTypeAccessor<QPDF_Dictionary>::check(obj.getPointer()); } bool QPDFObjectHandle::isStream() { dereference(); return QPDFObjectTypeAccessor<QPDF_Stream>::check(obj.getPointer()); } bool QPDFObjectHandle::isReserved() { // dereference will clear reserved if this has been replaced dereference(); return this->reserved; } bool QPDFObjectHandle::isIndirect() { assertInitialized(); return (this->objid != 0); } bool QPDFObjectHandle::isScalar() { return (! (isArray() || isDictionary() || isStream() || isOperator() || isInlineImage())); } // Bool accessors bool QPDFObjectHandle::getBoolValue() { assertBool(); return dynamic_cast<QPDF_Bool*>(obj.getPointer())->getVal(); } // Integer accessors long long QPDFObjectHandle::getIntValue() { assertInteger(); return dynamic_cast<QPDF_Integer*>(obj.getPointer())->getVal(); } // Real accessors std::string QPDFObjectHandle::getRealValue() { assertReal(); return dynamic_cast<QPDF_Real*>(obj.getPointer())->getVal(); } // Name accessors std::string QPDFObjectHandle::getName() { assertName(); return dynamic_cast<QPDF_Name*>(obj.getPointer())->getName(); } // String accessors std::string QPDFObjectHandle::getStringValue() { assertString(); return dynamic_cast<QPDF_String*>(obj.getPointer())->getVal(); } std::string QPDFObjectHandle::getUTF8Value() { assertString(); return dynamic_cast<QPDF_String*>(obj.getPointer())->getUTF8Val(); } // Operator and Inline Image accessors std::string QPDFObjectHandle::getOperatorValue() { assertOperator(); return dynamic_cast<QPDF_Operator*>(obj.getPointer())->getVal(); } std::string QPDFObjectHandle::getInlineImageValue() { assertInlineImage(); return dynamic_cast<QPDF_InlineImage*>(obj.getPointer())->getVal(); } // Array accessors int QPDFObjectHandle::getArrayNItems() { assertArray(); return dynamic_cast<QPDF_Array*>(obj.getPointer())->getNItems(); } QPDFObjectHandle QPDFObjectHandle::getArrayItem(int n) { assertArray(); return dynamic_cast<QPDF_Array*>(obj.getPointer())->getItem(n); } std::vector<QPDFObjectHandle> QPDFObjectHandle::getArrayAsVector() { assertArray(); return dynamic_cast<QPDF_Array*>(obj.getPointer())->getAsVector(); } // Array mutators void QPDFObjectHandle::setArrayItem(int n, QPDFObjectHandle const& item) { assertArray(); return dynamic_cast<QPDF_Array*>(obj.getPointer())->setItem(n, item); } void QPDFObjectHandle::setArrayFromVector(std::vector<QPDFObjectHandle> const& items) { assertArray(); return dynamic_cast<QPDF_Array*>(obj.getPointer())->setFromVector(items); } void QPDFObjectHandle::insertItem(int at, QPDFObjectHandle const& item) { assertArray(); return dynamic_cast<QPDF_Array*>(obj.getPointer())->insertItem(at, item); } void QPDFObjectHandle::appendItem(QPDFObjectHandle const& item) { assertArray(); return dynamic_cast<QPDF_Array*>(obj.getPointer())->appendItem(item); } void QPDFObjectHandle::eraseItem(int at) { assertArray(); return dynamic_cast<QPDF_Array*>(obj.getPointer())->eraseItem(at); } // Dictionary accessors bool QPDFObjectHandle::hasKey(std::string const& key) { assertDictionary(); return dynamic_cast<QPDF_Dictionary*>(obj.getPointer())->hasKey(key); } QPDFObjectHandle QPDFObjectHandle::getKey(std::string const& key) { assertDictionary(); return dynamic_cast<QPDF_Dictionary*>(obj.getPointer())->getKey(key); } std::set<std::string> QPDFObjectHandle::getKeys() { assertDictionary(); return dynamic_cast<QPDF_Dictionary*>(obj.getPointer())->getKeys(); } std::map<std::string, QPDFObjectHandle> QPDFObjectHandle::getDictAsMap() { assertDictionary(); return dynamic_cast<QPDF_Dictionary*>(obj.getPointer())->getAsMap(); } // Array and Name accessors bool QPDFObjectHandle::isOrHasName(std::string const& value) { if (isName() && (getName() == value)) { return true; } else if (isArray()) { int n = getArrayNItems(); for (int i = 0; i < n; ++i) { QPDFObjectHandle item = getArrayItem(0); if (item.isName() && (item.getName() == value)) { return true; } } } return false; } // Indirect object accessors QPDF* QPDFObjectHandle::getOwningQPDF() { // Will be null for direct objects return this->qpdf; } // Dictionary mutators void QPDFObjectHandle::replaceKey(std::string const& key, QPDFObjectHandle const& value) { assertDictionary(); return dynamic_cast<QPDF_Dictionary*>( obj.getPointer())->replaceKey(key, value); } void QPDFObjectHandle::removeKey(std::string const& key) { assertDictionary(); return dynamic_cast<QPDF_Dictionary*>(obj.getPointer())->removeKey(key); } void QPDFObjectHandle::replaceOrRemoveKey(std::string const& key, QPDFObjectHandle value) { assertDictionary(); return dynamic_cast<QPDF_Dictionary*>( obj.getPointer())->replaceOrRemoveKey(key, value); } // Stream accessors QPDFObjectHandle QPDFObjectHandle::getDict() { assertStream(); return dynamic_cast<QPDF_Stream*>(obj.getPointer())->getDict(); } void QPDFObjectHandle::replaceDict(QPDFObjectHandle new_dict) { assertStream(); dynamic_cast<QPDF_Stream*>(obj.getPointer())->replaceDict(new_dict); } PointerHolder<Buffer> QPDFObjectHandle::getStreamData(qpdf_stream_decode_level_e level) { assertStream(); return dynamic_cast<QPDF_Stream*>(obj.getPointer())->getStreamData(level); } PointerHolder<Buffer> QPDFObjectHandle::getRawStreamData() { assertStream(); return dynamic_cast<QPDF_Stream*>(obj.getPointer())->getRawStreamData(); } bool QPDFObjectHandle::pipeStreamData(Pipeline* p, unsigned long encode_flags, qpdf_stream_decode_level_e decode_level, bool suppress_warnings) { assertStream(); return dynamic_cast<QPDF_Stream*>(obj.getPointer())->pipeStreamData( p, encode_flags, decode_level, suppress_warnings); } bool QPDFObjectHandle::pipeStreamData(Pipeline* p, bool filter, bool normalize, bool compress) { unsigned long encode_flags = 0; qpdf_stream_decode_level_e decode_level = qpdf_dl_none; if (filter) { decode_level = qpdf_dl_generalized; if (normalize) { encode_flags |= qpdf_ef_normalize; } if (compress) { encode_flags |= qpdf_ef_compress; } } return pipeStreamData(p, encode_flags, decode_level, false); } void QPDFObjectHandle::replaceStreamData(PointerHolder<Buffer> data, QPDFObjectHandle const& filter, QPDFObjectHandle const& decode_parms) { assertStream(); dynamic_cast<QPDF_Stream*>(obj.getPointer())->replaceStreamData( data, filter, decode_parms); } void QPDFObjectHandle::replaceStreamData(std::string const& data, QPDFObjectHandle const& filter, QPDFObjectHandle const& decode_parms) { assertStream(); PointerHolder<Buffer> b = new Buffer(data.length()); unsigned char* bp = b->getBuffer(); memcpy(bp, data.c_str(), data.length()); dynamic_cast<QPDF_Stream*>(obj.getPointer())->replaceStreamData( b, filter, decode_parms); } void QPDFObjectHandle::replaceStreamData(PointerHolder<StreamDataProvider> provider, QPDFObjectHandle const& filter, QPDFObjectHandle const& decode_parms) { assertStream(); dynamic_cast<QPDF_Stream*>(obj.getPointer())->replaceStreamData( provider, filter, decode_parms); } QPDFObjGen QPDFObjectHandle::getObjGen() const { return QPDFObjGen(this->objid, this->generation); } int QPDFObjectHandle::getObjectID() const { return this->objid; } int QPDFObjectHandle::getGeneration() const { return this->generation; } std::map<std::string, QPDFObjectHandle> QPDFObjectHandle::getPageImages() { assertPageObject(); // Note: this code doesn't handle inherited resources. If this // page dictionary doesn't have a /Resources key or has one whose // value is null or an empty dictionary, you are supposed to walk // up the page tree until you find a /Resources dictionary. As of // this writing, I don't have any test files that use inherited // resources, and hand-generating one won't be a good test because // any mistakes in my understanding would be present in both the // code and the test file. // NOTE: If support of inherited resources (see above comment) is // implemented, edit comment in QPDFObjectHandle.hh for this // function. Also remove call to pushInheritedAttributesToPage // from qpdf.cc when show_page_images is true. std::map<std::string, QPDFObjectHandle> result; if (this->hasKey("/Resources")) { QPDFObjectHandle resources = this->getKey("/Resources"); if (resources.hasKey("/XObject")) { QPDFObjectHandle xobject = resources.getKey("/XObject"); std::set<std::string> keys = xobject.getKeys(); for (std::set<std::string>::iterator iter = keys.begin(); iter != keys.end(); ++iter) { std::string key = (*iter); QPDFObjectHandle value = xobject.getKey(key); if (value.isStream()) { QPDFObjectHandle dict = value.getDict(); if (dict.hasKey("/Subtype") && (dict.getKey("/Subtype").getName() == "/Image") && (! dict.hasKey("/ImageMask"))) { result[key] = value; } } } } } return result; } std::vector<QPDFObjectHandle> QPDFObjectHandle::getPageContents() { assertPageObject(); std::vector<QPDFObjectHandle> result; QPDFObjectHandle contents = this->getKey("/Contents"); if (contents.isArray()) { int n_items = contents.getArrayNItems(); for (int i = 0; i < n_items; ++i) { QPDFObjectHandle item = contents.getArrayItem(i); if (item.isStream()) { result.push_back(item); } else { throw std::runtime_error( "unknown item type while inspecting " "element of /Contents array in page " "dictionary"); } } } else if (contents.isStream()) { result.push_back(contents); } else if (! contents.isNull()) { throw std::runtime_error("unknown object type inspecting /Contents " "key in page dictionary"); } return result; } void QPDFObjectHandle::addPageContents(QPDFObjectHandle new_contents, bool first) { assertPageObject(); new_contents.assertStream(); std::vector<QPDFObjectHandle> orig_contents = getPageContents(); std::vector<QPDFObjectHandle> content_streams; if (first) { QTC::TC("qpdf", "QPDFObjectHandle prepend page contents"); content_streams.push_back(new_contents); } for (std::vector<QPDFObjectHandle>::iterator iter = orig_contents.begin(); iter != orig_contents.end(); ++iter) { QTC::TC("qpdf", "QPDFObjectHandle append page contents"); content_streams.push_back(*iter); } if (! first) { content_streams.push_back(new_contents); } QPDFObjectHandle contents = QPDFObjectHandle::newArray(content_streams); this->replaceKey("/Contents", contents); } void QPDFObjectHandle::rotatePage(int angle, bool relative) { assertPageObject(); if ((angle % 90) != 0) { throw std::runtime_error( "QPDF::rotatePage called with an" " angle that is not a multiple of 90"); } int new_angle = angle; if (relative) { int old_angle = 0; bool found_rotate = false; QPDFObjectHandle cur_obj = *this; bool searched_parent = false; std::set<QPDFObjGen> visited; while (! found_rotate) { if (visited.count(cur_obj.getObjGen())) { // Don't get stuck in an infinite loop break; } if (! visited.empty()) { searched_parent = true; } visited.insert(cur_obj.getObjGen()); if (cur_obj.getKey("/Rotate").isInteger()) { found_rotate = true; old_angle = cur_obj.getKey("/Rotate").getIntValue(); } else if (cur_obj.getKey("/Parent").isDictionary()) { cur_obj = cur_obj.getKey("/Parent"); } else { break; } } QTC::TC("qpdf", "QPDFObjectHandle found old angle", searched_parent ? 0 : 1); if ((old_angle % 90) != 0) { old_angle = 0; } new_angle += old_angle; } new_angle = (new_angle + 360) % 360; replaceKey("/Rotate", QPDFObjectHandle::newInteger(new_angle)); } std::string QPDFObjectHandle::unparse() { std::string result; if (this->isIndirect()) { result = QUtil::int_to_string(this->objid) + " " + QUtil::int_to_string(this->generation) + " R"; } else { result = unparseResolved(); } return result; } std::string QPDFObjectHandle::unparseResolved() { if (this->reserved) { throw std::logic_error( "QPDFObjectHandle: attempting to unparse a reserved object"); } dereference(); return this->obj->unparse(); } QPDFObjectHandle QPDFObjectHandle::parse(std::string const& object_str, std::string const& object_description) { PointerHolder<InputSource> input = new BufferInputSource("parsed object", object_str); QPDFTokenizer tokenizer; bool empty = false; QPDFObjectHandle result = parse(input, object_description, tokenizer, empty, 0, 0); size_t offset = input->tell(); while (offset < object_str.length()) { if (! isspace(object_str.at(offset))) { QTC::TC("qpdf", "QPDFObjectHandle trailing data in parse"); throw QPDFExc(qpdf_e_damaged_pdf, input->getName(), object_description, input->getLastOffset(), "trailing data found parsing object from string"); } ++offset; } return result; } void QPDFObjectHandle::parseContentStream(QPDFObjectHandle stream_or_array, ParserCallbacks* callbacks) { std::vector<QPDFObjectHandle> streams; if (stream_or_array.isArray()) { streams = stream_or_array.getArrayAsVector(); } else { streams.push_back(stream_or_array); } Pl_Buffer buf("concatenated stream data buffer"); std::string all_description = "content stream objects"; bool first = true; for (std::vector<QPDFObjectHandle>::iterator iter = streams.begin(); iter != streams.end(); ++iter) { QPDFObjectHandle stream = *iter; if (! stream.isStream()) { QTC::TC("qpdf", "QPDFObjectHandle non-stream in parsecontent"); warn(stream.getOwningQPDF(), QPDFExc(qpdf_e_damaged_pdf, "content stream", "", 0, "ignoring non-stream while parsing content streams")); } else { std::string og = QUtil::int_to_string(stream.getObjectID()) + " " + QUtil::int_to_string(stream.getGeneration()); std::string description = "content stream object " + og; if (first) { first = false; } else { all_description += ","; } all_description += " " + og; if (! stream.pipeStreamData(&buf, 0, qpdf_dl_specialized)) { QTC::TC("qpdf", "QPDFObjectHandle errors in parsecontent"); warn(stream.getOwningQPDF(), QPDFExc(qpdf_e_damaged_pdf, "content stream", description, 0, "errors while decoding content stream")); } } } PointerHolder<Buffer> stream_data = buf.getBuffer(); try { parseContentStream_internal(stream_data, all_description, callbacks); } catch (TerminateParsing&) { return; } callbacks->handleEOF(); } void QPDFObjectHandle::parseContentStream_internal(PointerHolder<Buffer> stream_data, std::string const& description, ParserCallbacks* callbacks) { size_t length = stream_data->getSize(); PointerHolder<InputSource> input = new BufferInputSource(description, stream_data.getPointer()); QPDFTokenizer tokenizer; tokenizer.allowEOF(); bool empty = false; while (static_cast<size_t>(input->tell()) < length) { QPDFObjectHandle obj = parseInternal(input, "content", tokenizer, empty, 0, 0, false, false, true); if (! obj.isInitialized()) { // EOF break; } callbacks->handleObject(obj); if (obj.isOperator() && (obj.getOperatorValue() == "ID")) { // Discard next character; it is the space after ID that // terminated the token. Read until end of inline image. char ch; input->read(&ch, 1); char buf[4]; memset(buf, '\0', sizeof(buf)); bool done = false; std::string inline_image; while (! done) { if (input->read(&ch, 1) == 0) { QTC::TC("qpdf", "QPDFObjectHandle EOF in inline image"); throw QPDFExc(qpdf_e_damaged_pdf, input->getName(), "stream data", input->tell(), "EOF found while reading inline image"); } inline_image += ch; memmove(buf, buf + 1, sizeof(buf) - 1); buf[sizeof(buf) - 1] = ch; if (strchr(" \t\n\v\f\r", buf[0]) && (buf[1] == 'E') && (buf[2] == 'I') && strchr(" \t\n\v\f\r", buf[3])) { // We've found an EI operator. done = true; input->seek(-3, SEEK_CUR); for (int i = 0; i < 4; ++i) { if (inline_image.length() > 0) { inline_image.erase(inline_image.length() - 1); } } } } QTC::TC("qpdf", "QPDFObjectHandle inline image token"); callbacks->handleObject( QPDFObjectHandle::newInlineImage(inline_image)); } } } QPDFObjectHandle QPDFObjectHandle::parse(PointerHolder<InputSource> input, std::string const& object_description, QPDFTokenizer& tokenizer, bool& empty, StringDecrypter* decrypter, QPDF* context) { return parseInternal(input, object_description, tokenizer, empty, decrypter, context, false, false, false); } QPDFObjectHandle QPDFObjectHandle::parseInternal(PointerHolder<InputSource> input, std::string const& object_description, QPDFTokenizer& tokenizer, bool& empty, StringDecrypter* decrypter, QPDF* context, bool in_array, bool in_dictionary, bool content_stream) { // This method must take care not to resolve any objects. Don't // check the type of any object without first ensuring that it is // a direct object. Otherwise, doing so may have the side effect // of reading the object and changing the file pointer. empty = false; if (in_dictionary && in_array) { // Although dictionaries and arrays arbitrarily nest, these // variables indicate what is at the top of the stack right // now, so they can, by definition, never both be true. throw std::logic_error( "INTERNAL ERROR: parseInternal: in_dict && in_array"); } QPDFObjectHandle object; qpdf_offset_t offset = input->tell(); std::vector<QPDFObjectHandle> olist; bool done = false; while (! done) { object = QPDFObjectHandle(); QPDFTokenizer::Token token = tokenizer.readToken(input, object_description); switch (token.getType()) { case QPDFTokenizer::tt_eof: if (content_stream) { // Return uninitialized object to indicate EOF return object; } else { // When not in content stream mode, EOF is tt_bad and // throws an exception before we get here. throw std::logic_error( "EOF received while not in content stream mode"); } break; case QPDFTokenizer::tt_brace_open: case QPDFTokenizer::tt_brace_close: QTC::TC("qpdf", "QPDFObjectHandle bad brace"); warn(context, QPDFExc(qpdf_e_damaged_pdf, input->getName(), object_description, input->getLastOffset(), "treating unexpected brace token as null")); object = newNull(); break; case QPDFTokenizer::tt_array_close: if (in_array) { done = true; } else { QTC::TC("qpdf", "QPDFObjectHandle bad array close"); warn(context, QPDFExc(qpdf_e_damaged_pdf, input->getName(), object_description, input->getLastOffset(), "treating unexpected array close token as null")); object = newNull(); } break; case QPDFTokenizer::tt_dict_close: if (in_dictionary) { done = true; } else { QTC::TC("qpdf", "QPDFObjectHandle bad dictionary close"); warn(context, QPDFExc(qpdf_e_damaged_pdf, input->getName(), object_description, input->getLastOffset(), "unexpected dictionary close token")); object = newNull(); } break; case QPDFTokenizer::tt_array_open: object = parseInternal( input, object_description, tokenizer, empty, decrypter, context, true, false, content_stream); break; case QPDFTokenizer::tt_dict_open: object = parseInternal( input, object_description, tokenizer, empty, decrypter, context, false, true, content_stream); break; case QPDFTokenizer::tt_bool: object = newBool((token.getValue() == "true")); break; case QPDFTokenizer::tt_null: object = newNull(); break; case QPDFTokenizer::tt_integer: object = newInteger(QUtil::string_to_ll(token.getValue().c_str())); break; case QPDFTokenizer::tt_real: object = newReal(token.getValue()); break; case QPDFTokenizer::tt_name: object = newName(token.getValue()); break; case QPDFTokenizer::tt_word: { std::string const& value = token.getValue(); if (content_stream) { object = QPDFObjectHandle::newOperator(value); } else if ((value == "R") && (in_array || in_dictionary) && (olist.size() >= 2) && (! olist.at(olist.size() - 1).isIndirect()) && (olist.at(olist.size() - 1).isInteger()) && (! olist.at(olist.size() - 2).isIndirect()) && (olist.at(olist.size() - 2).isInteger())) { if (context == 0) { QTC::TC("qpdf", "QPDFObjectHandle indirect without context"); throw std::logic_error( "QPDFObjectHandle::parse called without context" " on an object with indirect references"); } // Try to resolve indirect objects object = newIndirect( context, olist.at(olist.size() - 2).getIntValue(), olist.at(olist.size() - 1).getIntValue()); olist.pop_back(); olist.pop_back(); } else if ((value == "endobj") && (! (in_array || in_dictionary))) { // We just saw endobj without having read // anything. Treat this as a null and do not move // the input source's offset. object = newNull(); input->seek(input->getLastOffset(), SEEK_SET); empty = true; } else { QTC::TC("qpdf", "QPDFObjectHandle treat word as string"); warn(context, QPDFExc(qpdf_e_damaged_pdf, input->getName(), object_description, input->getLastOffset(), "unknown token while reading object;" " treating as string")); object = newString(value); } } break; case QPDFTokenizer::tt_string: { std::string val = token.getValue(); if (decrypter) { decrypter->decryptString(val); } object = QPDFObjectHandle::newString(val); } break; default: warn(context, QPDFExc(qpdf_e_damaged_pdf, input->getName(), object_description, input->getLastOffset(), "treating unknown token type as null while " "reading object")); object = newNull(); break; } if (in_dictionary || in_array) { if (! done) { olist.push_back(object); } } else if (! object.isInitialized()) { warn(context, QPDFExc(qpdf_e_damaged_pdf, input->getName(), object_description, input->getLastOffset(), "parse error while reading object")); object = newNull(); } else { done = true; } } if (in_array) { object = newArray(olist); } else if (in_dictionary) { // Convert list to map. Alternating elements are keys. Attempt // to recover more or less gracefully from invalid // dictionaries. std::set<std::string> names; for (std::vector<QPDFObjectHandle>::iterator iter = olist.begin(); iter != olist.end(); ++iter) { if ((! (*iter).isIndirect()) && (*iter).isName()) { names.insert((*iter).getName()); } } std::map<std::string, QPDFObjectHandle> dict; int next_fake_key = 1; for (unsigned int i = 0; i < olist.size(); ++i) { QPDFObjectHandle key_obj = olist.at(i); QPDFObjectHandle val; if (key_obj.isIndirect() || (! key_obj.isName())) { bool found_fake = false; std::string candidate; while (! found_fake) { candidate = "/QPDFFake" + QUtil::int_to_string(next_fake_key++); found_fake = (names.count(candidate) == 0); QTC::TC("qpdf", "QPDFObjectHandle found fake", (found_fake ? 0 : 1)); } warn(context, QPDFExc( qpdf_e_damaged_pdf, input->getName(), object_description, offset, "expected dictionary key but found" " non-name object; inserting key " + candidate)); val = key_obj; key_obj = newName(candidate); } else if (i + 1 >= olist.size()) { QTC::TC("qpdf", "QPDFObjectHandle no val for last key"); warn(context, QPDFExc( qpdf_e_damaged_pdf, input->getName(), object_description, offset, "dictionary ended prematurely; using null as value" " for last key")); val = newNull(); } else { val = olist.at(++i); } dict[key_obj.getName()] = val; } object = newDictionary(dict); } return object; } QPDFObjectHandle QPDFObjectHandle::newIndirect(QPDF* qpdf, int objid, int generation) { if (objid == 0) { // Special case: QPDF uses objid 0 as a sentinel for direct // objects, and the PDF specification doesn't allow for object // 0. Treat indirect references to object 0 as null so that we // never create an indirect object with objid 0. QTC::TC("qpdf", "QPDFObjectHandle indirect with 0 objid"); return newNull(); } return QPDFObjectHandle(qpdf, objid, generation); } QPDFObjectHandle QPDFObjectHandle::newBool(bool value) { return QPDFObjectHandle(new QPDF_Bool(value)); } QPDFObjectHandle QPDFObjectHandle::newNull() { return QPDFObjectHandle(new QPDF_Null()); } QPDFObjectHandle QPDFObjectHandle::newInteger(long long value) { return QPDFObjectHandle(new QPDF_Integer(value)); } QPDFObjectHandle QPDFObjectHandle::newReal(std::string const& value) { return QPDFObjectHandle(new QPDF_Real(value)); } QPDFObjectHandle QPDFObjectHandle::newReal(double value, int decimal_places) { return QPDFObjectHandle(new QPDF_Real(value, decimal_places)); } QPDFObjectHandle QPDFObjectHandle::newName(std::string const& name) { return QPDFObjectHandle(new QPDF_Name(name)); } QPDFObjectHandle QPDFObjectHandle::newString(std::string const& str) { return QPDFObjectHandle(new QPDF_String(str)); } QPDFObjectHandle QPDFObjectHandle::newOperator(std::string const& value) { return QPDFObjectHandle(new QPDF_Operator(value)); } QPDFObjectHandle QPDFObjectHandle::newInlineImage(std::string const& value) { return QPDFObjectHandle(new QPDF_InlineImage(value)); } QPDFObjectHandle QPDFObjectHandle::newArray() { return newArray(std::vector<QPDFObjectHandle>()); } QPDFObjectHandle QPDFObjectHandle::newArray(std::vector<QPDFObjectHandle> const& items) { return QPDFObjectHandle(new QPDF_Array(items)); } QPDFObjectHandle QPDFObjectHandle::newDictionary() { return newDictionary(std::map<std::string, QPDFObjectHandle>()); } QPDFObjectHandle QPDFObjectHandle::newDictionary( std::map<std::string, QPDFObjectHandle> const& items) { return QPDFObjectHandle(new QPDF_Dictionary(items)); } QPDFObjectHandle QPDFObjectHandle::newStream(QPDF* qpdf, int objid, int generation, QPDFObjectHandle stream_dict, qpdf_offset_t offset, size_t length) { return QPDFObjectHandle(new QPDF_Stream( qpdf, objid, generation, stream_dict, offset, length)); } QPDFObjectHandle QPDFObjectHandle::newStream(QPDF* qpdf) { QTC::TC("qpdf", "QPDFObjectHandle newStream"); QPDFObjectHandle stream_dict = newDictionary(); QPDFObjectHandle result = qpdf->makeIndirectObject( QPDFObjectHandle( new QPDF_Stream(qpdf, 0, 0, stream_dict, 0, 0))); result.dereference(); QPDF_Stream* stream = dynamic_cast<QPDF_Stream*>(result.obj.getPointer()); stream->setObjGen(result.getObjectID(), result.getGeneration()); return result; } QPDFObjectHandle QPDFObjectHandle::newStream(QPDF* qpdf, PointerHolder<Buffer> data) { QTC::TC("qpdf", "QPDFObjectHandle newStream with data"); QPDFObjectHandle result = newStream(qpdf); result.replaceStreamData(data, newNull(), newNull()); return result; } QPDFObjectHandle QPDFObjectHandle::newStream(QPDF* qpdf, std::string const& data) { QTC::TC("qpdf", "QPDFObjectHandle newStream with string"); QPDFObjectHandle result = newStream(qpdf); result.replaceStreamData(data, newNull(), newNull()); return result; } QPDFObjectHandle QPDFObjectHandle::newReserved(QPDF* qpdf) { // Reserve a spot for this object by assigning it an object // number, but then return an unresolved handle to the object. QPDFObjectHandle reserved = qpdf->makeIndirectObject( QPDFObjectHandle(new QPDF_Reserved())); QPDFObjectHandle result = newIndirect(qpdf, reserved.objid, reserved.generation); result.reserved = true; return result; } QPDFObjectHandle QPDFObjectHandle::shallowCopy() { assertInitialized(); if (isStream()) { QTC::TC("qpdf", "QPDFObjectHandle ERR shallow copy stream"); throw std::runtime_error( "attempt to make a shallow copy of a stream"); } QPDFObjectHandle new_obj; if (isArray()) { QTC::TC("qpdf", "QPDFObjectHandle shallow copy array"); new_obj = newArray(getArrayAsVector()); } else if (isDictionary()) { QTC::TC("qpdf", "QPDFObjectHandle shallow copy dictionary"); new_obj = newDictionary(getDictAsMap()); } else { QTC::TC("qpdf", "QPDFObjectHandle shallow copy scalar"); new_obj = *this; } return new_obj; } void QPDFObjectHandle::makeDirectInternal(std::set<int>& visited) { assertInitialized(); if (isStream()) { QTC::TC("qpdf", "QPDFObjectHandle ERR clone stream"); throw std::runtime_error( "attempt to make a stream into a direct object"); } int cur_objid = this->objid; if (cur_objid != 0) { if (visited.count(cur_objid)) { QTC::TC("qpdf", "QPDFObjectHandle makeDirect loop"); throw std::runtime_error( "loop detected while converting object from " "indirect to direct"); } visited.insert(cur_objid); } if (isReserved()) { throw std::logic_error( "QPDFObjectHandle: attempting to make a" " reserved object handle direct"); } dereference(); this->qpdf = 0; this->objid = 0; this->generation = 0; PointerHolder<QPDFObject> new_obj; if (isBool()) { QTC::TC("qpdf", "QPDFObjectHandle clone bool"); new_obj = new QPDF_Bool(getBoolValue()); } else if (isNull()) { QTC::TC("qpdf", "QPDFObjectHandle clone null"); new_obj = new QPDF_Null(); } else if (isInteger()) { QTC::TC("qpdf", "QPDFObjectHandle clone integer"); new_obj = new QPDF_Integer(getIntValue()); } else if (isReal()) { QTC::TC("qpdf", "QPDFObjectHandle clone real"); new_obj = new QPDF_Real(getRealValue()); } else if (isName()) { QTC::TC("qpdf", "QPDFObjectHandle clone name"); new_obj = new QPDF_Name(getName()); } else if (isString()) { QTC::TC("qpdf", "QPDFObjectHandle clone string"); new_obj = new QPDF_String(getStringValue()); } else if (isArray()) { QTC::TC("qpdf", "QPDFObjectHandle clone array"); std::vector<QPDFObjectHandle> items; int n = getArrayNItems(); for (int i = 0; i < n; ++i) { items.push_back(getArrayItem(i)); items.back().makeDirectInternal(visited); } new_obj = new QPDF_Array(items); } else if (isDictionary()) { QTC::TC("qpdf", "QPDFObjectHandle clone dictionary"); std::set<std::string> keys = getKeys(); std::map<std::string, QPDFObjectHandle> items; for (std::set<std::string>::iterator iter = keys.begin(); iter != keys.end(); ++iter) { items[*iter] = getKey(*iter); items[*iter].makeDirectInternal(visited); } new_obj = new QPDF_Dictionary(items); } else { throw std::logic_error("QPDFObjectHandle::makeDirectInternal: " "unknown object type"); } this->obj = new_obj; if (cur_objid) { visited.erase(cur_objid); } } void QPDFObjectHandle::makeDirect() { std::set<int> visited; makeDirectInternal(visited); } void QPDFObjectHandle::assertInitialized() const { if (! this->initialized) { throw std::logic_error("operation attempted on uninitialized " "QPDFObjectHandle"); } } void QPDFObjectHandle::assertType(char const* type_name, bool istype) const { if (! istype) { throw std::logic_error(std::string("operation for ") + type_name + " object attempted on object of wrong type"); } } void QPDFObjectHandle::assertNull() { assertType("Null", isNull()); } void QPDFObjectHandle::assertBool() { assertType("Boolean", isBool()); } void QPDFObjectHandle::assertInteger() { assertType("Integer", isInteger()); } void QPDFObjectHandle::assertReal() { assertType("Real", isReal()); } void QPDFObjectHandle::assertName() { assertType("Name", isName()); } void QPDFObjectHandle::assertString() { assertType("String", isString()); } void QPDFObjectHandle::assertOperator() { assertType("Operator", isOperator()); } void QPDFObjectHandle::assertInlineImage() { assertType("InlineImage", isInlineImage()); } void QPDFObjectHandle::assertArray() { assertType("Array", isArray()); } void QPDFObjectHandle::assertDictionary() { assertType("Dictionary", isDictionary()); } void QPDFObjectHandle::assertStream() { assertType("Stream", isStream()); } void QPDFObjectHandle::assertReserved() { assertType("Reserved", isReserved()); } void QPDFObjectHandle::assertIndirect() { if (! isIndirect()) { throw std::logic_error( "operation for indirect object attempted on direct object"); } } void QPDFObjectHandle::assertScalar() { assertType("Scalar", isScalar()); } void QPDFObjectHandle::assertNumber() { assertType("Number", isNumber()); } bool QPDFObjectHandle::isPageObject() { return (this->isDictionary() && this->hasKey("/Type") && (this->getKey("/Type").getName() == "/Page")); } bool QPDFObjectHandle::isPagesObject() { return (this->isDictionary() && this->hasKey("/Type") && (this->getKey("/Type").getName() == "/Pages")); } void QPDFObjectHandle::assertPageObject() { if (! isPageObject()) { throw std::logic_error("page operation called on non-Page object"); } } void QPDFObjectHandle::dereference() { if (this->obj.getPointer() == 0) { PointerHolder<QPDFObject> obj = QPDF::Resolver::resolve( this->qpdf, this->objid, this->generation); if (obj.getPointer() == 0) { QTC::TC("qpdf", "QPDFObjectHandle indirect to unknown"); this->obj = new QPDF_Null(); } else if (dynamic_cast<QPDF_Reserved*>(obj.getPointer())) { // Do not resolve } else { this->reserved = false; this->obj = obj; } } } void QPDFObjectHandle::warn(QPDF* qpdf, QPDFExc const& e) { // If parsing on behalf of a QPDF object and want to give a // warning, we can warn through the object. If parsing for some // other reason, such as an explicit creation of an object from a // string, then just throw the exception. if (qpdf) { QPDF::Warner::warn(qpdf, e); } else { throw e; } }
#include <qpdf/QPDFObjectHandle.hh> #include <qpdf/QPDF.hh> #include <qpdf/QPDF_Bool.hh> #include <qpdf/QPDF_Null.hh> #include <qpdf/QPDF_Integer.hh> #include <qpdf/QPDF_Real.hh> #include <qpdf/QPDF_Name.hh> #include <qpdf/QPDF_String.hh> #include <qpdf/QPDF_Operator.hh> #include <qpdf/QPDF_InlineImage.hh> #include <qpdf/QPDF_Array.hh> #include <qpdf/QPDF_Dictionary.hh> #include <qpdf/QPDF_Stream.hh> #include <qpdf/QPDF_Reserved.hh> #include <qpdf/Pl_Buffer.hh> #include <qpdf/BufferInputSource.hh> #include <qpdf/QPDFExc.hh> #include <qpdf/QTC.hh> #include <qpdf/QUtil.hh> #include <stdexcept> #include <stdlib.h> #include <ctype.h> class TerminateParsing { }; void QPDFObjectHandle::ParserCallbacks::terminateParsing() { throw TerminateParsing(); } QPDFObjectHandle::QPDFObjectHandle() : initialized(false), qpdf(0), objid(0), generation(0), reserved(false) { } QPDFObjectHandle::QPDFObjectHandle(QPDF* qpdf, int objid, int generation) : initialized(true), qpdf(qpdf), objid(objid), generation(generation), reserved(false) { } QPDFObjectHandle::QPDFObjectHandle(QPDFObject* data) : initialized(true), qpdf(0), objid(0), generation(0), obj(data), reserved(false) { } void QPDFObjectHandle::releaseResolved() { // Recursively break any resolved references to indirect objects. // Do not cross over indirect object boundaries to avoid an // infinite loop. This method may only be called during final // destruction. See comments in QPDF::~QPDF(). if (isIndirect()) { if (this->obj.getPointer()) { this->obj = 0; } } else { QPDFObject::ObjAccessor::releaseResolved(this->obj.getPointer()); } } bool QPDFObjectHandle::isInitialized() const { return this->initialized; } QPDFObject::object_type_e QPDFObjectHandle::getTypeCode() { if (this->initialized) { dereference(); return obj->getTypeCode(); } else { return QPDFObject::ot_uninitialized; } } char const* QPDFObjectHandle::getTypeName() { if (this->initialized) { dereference(); return obj->getTypeName(); } else { return "uninitialized"; } } template <class T> class QPDFObjectTypeAccessor { public: static bool check(QPDFObject* o) { return (o && dynamic_cast<T*>(o)); } }; bool QPDFObjectHandle::isBool() { dereference(); return QPDFObjectTypeAccessor<QPDF_Bool>::check(obj.getPointer()); } bool QPDFObjectHandle::isNull() { dereference(); return QPDFObjectTypeAccessor<QPDF_Null>::check(obj.getPointer()); } bool QPDFObjectHandle::isInteger() { dereference(); return QPDFObjectTypeAccessor<QPDF_Integer>::check(obj.getPointer()); } bool QPDFObjectHandle::isReal() { dereference(); return QPDFObjectTypeAccessor<QPDF_Real>::check(obj.getPointer()); } bool QPDFObjectHandle::isNumber() { return (isInteger() || isReal()); } double QPDFObjectHandle::getNumericValue() { double result = 0.0; if (isInteger()) { result = static_cast<double>(getIntValue()); } else if (isReal()) { result = atof(getRealValue().c_str()); } else { throw std::logic_error("getNumericValue called for non-numeric object"); } return result; } bool QPDFObjectHandle::isName() { dereference(); return QPDFObjectTypeAccessor<QPDF_Name>::check(obj.getPointer()); } bool QPDFObjectHandle::isString() { dereference(); return QPDFObjectTypeAccessor<QPDF_String>::check(obj.getPointer()); } bool QPDFObjectHandle::isOperator() { dereference(); return QPDFObjectTypeAccessor<QPDF_Operator>::check(obj.getPointer()); } bool QPDFObjectHandle::isInlineImage() { dereference(); return QPDFObjectTypeAccessor<QPDF_InlineImage>::check(obj.getPointer()); } bool QPDFObjectHandle::isArray() { dereference(); return QPDFObjectTypeAccessor<QPDF_Array>::check(obj.getPointer()); } bool QPDFObjectHandle::isDictionary() { dereference(); return QPDFObjectTypeAccessor<QPDF_Dictionary>::check(obj.getPointer()); } bool QPDFObjectHandle::isStream() { dereference(); return QPDFObjectTypeAccessor<QPDF_Stream>::check(obj.getPointer()); } bool QPDFObjectHandle::isReserved() { // dereference will clear reserved if this has been replaced dereference(); return this->reserved; } bool QPDFObjectHandle::isIndirect() { assertInitialized(); return (this->objid != 0); } bool QPDFObjectHandle::isScalar() { return (! (isArray() || isDictionary() || isStream() || isOperator() || isInlineImage())); } // Bool accessors bool QPDFObjectHandle::getBoolValue() { assertBool(); return dynamic_cast<QPDF_Bool*>(obj.getPointer())->getVal(); } // Integer accessors long long QPDFObjectHandle::getIntValue() { assertInteger(); return dynamic_cast<QPDF_Integer*>(obj.getPointer())->getVal(); } // Real accessors std::string QPDFObjectHandle::getRealValue() { assertReal(); return dynamic_cast<QPDF_Real*>(obj.getPointer())->getVal(); } // Name accessors std::string QPDFObjectHandle::getName() { assertName(); return dynamic_cast<QPDF_Name*>(obj.getPointer())->getName(); } // String accessors std::string QPDFObjectHandle::getStringValue() { assertString(); return dynamic_cast<QPDF_String*>(obj.getPointer())->getVal(); } std::string QPDFObjectHandle::getUTF8Value() { assertString(); return dynamic_cast<QPDF_String*>(obj.getPointer())->getUTF8Val(); } // Operator and Inline Image accessors std::string QPDFObjectHandle::getOperatorValue() { assertOperator(); return dynamic_cast<QPDF_Operator*>(obj.getPointer())->getVal(); } std::string QPDFObjectHandle::getInlineImageValue() { assertInlineImage(); return dynamic_cast<QPDF_InlineImage*>(obj.getPointer())->getVal(); } // Array accessors int QPDFObjectHandle::getArrayNItems() { assertArray(); return dynamic_cast<QPDF_Array*>(obj.getPointer())->getNItems(); } QPDFObjectHandle QPDFObjectHandle::getArrayItem(int n) { assertArray(); return dynamic_cast<QPDF_Array*>(obj.getPointer())->getItem(n); } std::vector<QPDFObjectHandle> QPDFObjectHandle::getArrayAsVector() { assertArray(); return dynamic_cast<QPDF_Array*>(obj.getPointer())->getAsVector(); } // Array mutators void QPDFObjectHandle::setArrayItem(int n, QPDFObjectHandle const& item) { assertArray(); return dynamic_cast<QPDF_Array*>(obj.getPointer())->setItem(n, item); } void QPDFObjectHandle::setArrayFromVector(std::vector<QPDFObjectHandle> const& items) { assertArray(); return dynamic_cast<QPDF_Array*>(obj.getPointer())->setFromVector(items); } void QPDFObjectHandle::insertItem(int at, QPDFObjectHandle const& item) { assertArray(); return dynamic_cast<QPDF_Array*>(obj.getPointer())->insertItem(at, item); } void QPDFObjectHandle::appendItem(QPDFObjectHandle const& item) { assertArray(); return dynamic_cast<QPDF_Array*>(obj.getPointer())->appendItem(item); } void QPDFObjectHandle::eraseItem(int at) { assertArray(); return dynamic_cast<QPDF_Array*>(obj.getPointer())->eraseItem(at); } // Dictionary accessors bool QPDFObjectHandle::hasKey(std::string const& key) { assertDictionary(); return dynamic_cast<QPDF_Dictionary*>(obj.getPointer())->hasKey(key); } QPDFObjectHandle QPDFObjectHandle::getKey(std::string const& key) { assertDictionary(); return dynamic_cast<QPDF_Dictionary*>(obj.getPointer())->getKey(key); } std::set<std::string> QPDFObjectHandle::getKeys() { assertDictionary(); return dynamic_cast<QPDF_Dictionary*>(obj.getPointer())->getKeys(); } std::map<std::string, QPDFObjectHandle> QPDFObjectHandle::getDictAsMap() { assertDictionary(); return dynamic_cast<QPDF_Dictionary*>(obj.getPointer())->getAsMap(); } // Array and Name accessors bool QPDFObjectHandle::isOrHasName(std::string const& value) { if (isName() && (getName() == value)) { return true; } else if (isArray()) { int n = getArrayNItems(); for (int i = 0; i < n; ++i) { QPDFObjectHandle item = getArrayItem(0); if (item.isName() && (item.getName() == value)) { return true; } } } return false; } // Indirect object accessors QPDF* QPDFObjectHandle::getOwningQPDF() { // Will be null for direct objects return this->qpdf; } // Dictionary mutators void QPDFObjectHandle::replaceKey(std::string const& key, QPDFObjectHandle const& value) { assertDictionary(); return dynamic_cast<QPDF_Dictionary*>( obj.getPointer())->replaceKey(key, value); } void QPDFObjectHandle::removeKey(std::string const& key) { assertDictionary(); return dynamic_cast<QPDF_Dictionary*>(obj.getPointer())->removeKey(key); } void QPDFObjectHandle::replaceOrRemoveKey(std::string const& key, QPDFObjectHandle value) { assertDictionary(); return dynamic_cast<QPDF_Dictionary*>( obj.getPointer())->replaceOrRemoveKey(key, value); } // Stream accessors QPDFObjectHandle QPDFObjectHandle::getDict() { assertStream(); return dynamic_cast<QPDF_Stream*>(obj.getPointer())->getDict(); } void QPDFObjectHandle::replaceDict(QPDFObjectHandle new_dict) { assertStream(); dynamic_cast<QPDF_Stream*>(obj.getPointer())->replaceDict(new_dict); } PointerHolder<Buffer> QPDFObjectHandle::getStreamData(qpdf_stream_decode_level_e level) { assertStream(); return dynamic_cast<QPDF_Stream*>(obj.getPointer())->getStreamData(level); } PointerHolder<Buffer> QPDFObjectHandle::getRawStreamData() { assertStream(); return dynamic_cast<QPDF_Stream*>(obj.getPointer())->getRawStreamData(); } bool QPDFObjectHandle::pipeStreamData(Pipeline* p, unsigned long encode_flags, qpdf_stream_decode_level_e decode_level, bool suppress_warnings) { assertStream(); return dynamic_cast<QPDF_Stream*>(obj.getPointer())->pipeStreamData( p, encode_flags, decode_level, suppress_warnings); } bool QPDFObjectHandle::pipeStreamData(Pipeline* p, bool filter, bool normalize, bool compress) { unsigned long encode_flags = 0; qpdf_stream_decode_level_e decode_level = qpdf_dl_none; if (filter) { decode_level = qpdf_dl_generalized; if (normalize) { encode_flags |= qpdf_ef_normalize; } if (compress) { encode_flags |= qpdf_ef_compress; } } return pipeStreamData(p, encode_flags, decode_level, false); } void QPDFObjectHandle::replaceStreamData(PointerHolder<Buffer> data, QPDFObjectHandle const& filter, QPDFObjectHandle const& decode_parms) { assertStream(); dynamic_cast<QPDF_Stream*>(obj.getPointer())->replaceStreamData( data, filter, decode_parms); } void QPDFObjectHandle::replaceStreamData(std::string const& data, QPDFObjectHandle const& filter, QPDFObjectHandle const& decode_parms) { assertStream(); PointerHolder<Buffer> b = new Buffer(data.length()); unsigned char* bp = b->getBuffer(); memcpy(bp, data.c_str(), data.length()); dynamic_cast<QPDF_Stream*>(obj.getPointer())->replaceStreamData( b, filter, decode_parms); } void QPDFObjectHandle::replaceStreamData(PointerHolder<StreamDataProvider> provider, QPDFObjectHandle const& filter, QPDFObjectHandle const& decode_parms) { assertStream(); dynamic_cast<QPDF_Stream*>(obj.getPointer())->replaceStreamData( provider, filter, decode_parms); } QPDFObjGen QPDFObjectHandle::getObjGen() const { return QPDFObjGen(this->objid, this->generation); } int QPDFObjectHandle::getObjectID() const { return this->objid; } int QPDFObjectHandle::getGeneration() const { return this->generation; } std::map<std::string, QPDFObjectHandle> QPDFObjectHandle::getPageImages() { assertPageObject(); // Note: this code doesn't handle inherited resources. If this // page dictionary doesn't have a /Resources key or has one whose // value is null or an empty dictionary, you are supposed to walk // up the page tree until you find a /Resources dictionary. As of // this writing, I don't have any test files that use inherited // resources, and hand-generating one won't be a good test because // any mistakes in my understanding would be present in both the // code and the test file. // NOTE: If support of inherited resources (see above comment) is // implemented, edit comment in QPDFObjectHandle.hh for this // function. Also remove call to pushInheritedAttributesToPage // from qpdf.cc when show_page_images is true. std::map<std::string, QPDFObjectHandle> result; if (this->hasKey("/Resources")) { QPDFObjectHandle resources = this->getKey("/Resources"); if (resources.hasKey("/XObject")) { QPDFObjectHandle xobject = resources.getKey("/XObject"); std::set<std::string> keys = xobject.getKeys(); for (std::set<std::string>::iterator iter = keys.begin(); iter != keys.end(); ++iter) { std::string key = (*iter); QPDFObjectHandle value = xobject.getKey(key); if (value.isStream()) { QPDFObjectHandle dict = value.getDict(); if (dict.hasKey("/Subtype") && (dict.getKey("/Subtype").getName() == "/Image") && (! dict.hasKey("/ImageMask"))) { result[key] = value; } } } } } return result; } std::vector<QPDFObjectHandle> QPDFObjectHandle::getPageContents() { assertPageObject(); std::vector<QPDFObjectHandle> result; QPDFObjectHandle contents = this->getKey("/Contents"); if (contents.isArray()) { int n_items = contents.getArrayNItems(); for (int i = 0; i < n_items; ++i) { QPDFObjectHandle item = contents.getArrayItem(i); if (item.isStream()) { result.push_back(item); } else { throw std::runtime_error( "unknown item type while inspecting " "element of /Contents array in page " "dictionary"); } } } else if (contents.isStream()) { result.push_back(contents); } else if (! contents.isNull()) { throw std::runtime_error("unknown object type inspecting /Contents " "key in page dictionary"); } return result; } void QPDFObjectHandle::addPageContents(QPDFObjectHandle new_contents, bool first) { assertPageObject(); new_contents.assertStream(); std::vector<QPDFObjectHandle> orig_contents = getPageContents(); std::vector<QPDFObjectHandle> content_streams; if (first) { QTC::TC("qpdf", "QPDFObjectHandle prepend page contents"); content_streams.push_back(new_contents); } for (std::vector<QPDFObjectHandle>::iterator iter = orig_contents.begin(); iter != orig_contents.end(); ++iter) { QTC::TC("qpdf", "QPDFObjectHandle append page contents"); content_streams.push_back(*iter); } if (! first) { content_streams.push_back(new_contents); } QPDFObjectHandle contents = QPDFObjectHandle::newArray(content_streams); this->replaceKey("/Contents", contents); } void QPDFObjectHandle::rotatePage(int angle, bool relative) { assertPageObject(); if ((angle % 90) != 0) { throw std::runtime_error( "QPDF::rotatePage called with an" " angle that is not a multiple of 90"); } int new_angle = angle; if (relative) { int old_angle = 0; bool found_rotate = false; QPDFObjectHandle cur_obj = *this; bool searched_parent = false; std::set<QPDFObjGen> visited; while (! found_rotate) { if (visited.count(cur_obj.getObjGen())) { // Don't get stuck in an infinite loop break; } if (! visited.empty()) { searched_parent = true; } visited.insert(cur_obj.getObjGen()); if (cur_obj.getKey("/Rotate").isInteger()) { found_rotate = true; old_angle = cur_obj.getKey("/Rotate").getIntValue(); } else if (cur_obj.getKey("/Parent").isDictionary()) { cur_obj = cur_obj.getKey("/Parent"); } else { break; } } QTC::TC("qpdf", "QPDFObjectHandle found old angle", searched_parent ? 0 : 1); if ((old_angle % 90) != 0) { old_angle = 0; } new_angle += old_angle; } new_angle = (new_angle + 360) % 360; replaceKey("/Rotate", QPDFObjectHandle::newInteger(new_angle)); } std::string QPDFObjectHandle::unparse() { std::string result; if (this->isIndirect()) { result = QUtil::int_to_string(this->objid) + " " + QUtil::int_to_string(this->generation) + " R"; } else { result = unparseResolved(); } return result; } std::string QPDFObjectHandle::unparseResolved() { if (this->reserved) { throw std::logic_error( "QPDFObjectHandle: attempting to unparse a reserved object"); } dereference(); return this->obj->unparse(); } QPDFObjectHandle QPDFObjectHandle::parse(std::string const& object_str, std::string const& object_description) { PointerHolder<InputSource> input = new BufferInputSource("parsed object", object_str); QPDFTokenizer tokenizer; bool empty = false; QPDFObjectHandle result = parse(input, object_description, tokenizer, empty, 0, 0); size_t offset = input->tell(); while (offset < object_str.length()) { if (! isspace(object_str.at(offset))) { QTC::TC("qpdf", "QPDFObjectHandle trailing data in parse"); throw QPDFExc(qpdf_e_damaged_pdf, input->getName(), object_description, input->getLastOffset(), "trailing data found parsing object from string"); } ++offset; } return result; } void QPDFObjectHandle::parseContentStream(QPDFObjectHandle stream_or_array, ParserCallbacks* callbacks) { std::vector<QPDFObjectHandle> streams; if (stream_or_array.isArray()) { streams = stream_or_array.getArrayAsVector(); } else { streams.push_back(stream_or_array); } Pl_Buffer buf("concatenated stream data buffer"); std::string all_description = "content stream objects"; bool first = true; for (std::vector<QPDFObjectHandle>::iterator iter = streams.begin(); iter != streams.end(); ++iter) { QPDFObjectHandle stream = *iter; if (! stream.isStream()) { QTC::TC("qpdf", "QPDFObjectHandle non-stream in parsecontent"); warn(stream.getOwningQPDF(), QPDFExc(qpdf_e_damaged_pdf, "content stream", "", 0, "ignoring non-stream while parsing content streams")); } else { std::string og = QUtil::int_to_string(stream.getObjectID()) + " " + QUtil::int_to_string(stream.getGeneration()); std::string description = "content stream object " + og; if (first) { first = false; } else { all_description += ","; } all_description += " " + og; if (! stream.pipeStreamData(&buf, 0, qpdf_dl_specialized)) { QTC::TC("qpdf", "QPDFObjectHandle errors in parsecontent"); warn(stream.getOwningQPDF(), QPDFExc(qpdf_e_damaged_pdf, "content stream", description, 0, "errors while decoding content stream")); } } } PointerHolder<Buffer> stream_data = buf.getBuffer(); try { parseContentStream_internal(stream_data, all_description, callbacks); } catch (TerminateParsing&) { return; } callbacks->handleEOF(); } void QPDFObjectHandle::parseContentStream_internal(PointerHolder<Buffer> stream_data, std::string const& description, ParserCallbacks* callbacks) { size_t length = stream_data->getSize(); PointerHolder<InputSource> input = new BufferInputSource(description, stream_data.getPointer()); QPDFTokenizer tokenizer; tokenizer.allowEOF(); bool empty = false; while (static_cast<size_t>(input->tell()) < length) { QPDFObjectHandle obj = parseInternal(input, "content", tokenizer, empty, 0, 0, true); if (! obj.isInitialized()) { // EOF break; } callbacks->handleObject(obj); if (obj.isOperator() && (obj.getOperatorValue() == "ID")) { // Discard next character; it is the space after ID that // terminated the token. Read until end of inline image. char ch; input->read(&ch, 1); char buf[4]; memset(buf, '\0', sizeof(buf)); bool done = false; std::string inline_image; while (! done) { if (input->read(&ch, 1) == 0) { QTC::TC("qpdf", "QPDFObjectHandle EOF in inline image"); throw QPDFExc(qpdf_e_damaged_pdf, input->getName(), "stream data", input->tell(), "EOF found while reading inline image"); } inline_image += ch; memmove(buf, buf + 1, sizeof(buf) - 1); buf[sizeof(buf) - 1] = ch; if (strchr(" \t\n\v\f\r", buf[0]) && (buf[1] == 'E') && (buf[2] == 'I') && strchr(" \t\n\v\f\r", buf[3])) { // We've found an EI operator. done = true; input->seek(-3, SEEK_CUR); for (int i = 0; i < 4; ++i) { if (inline_image.length() > 0) { inline_image.erase(inline_image.length() - 1); } } } } QTC::TC("qpdf", "QPDFObjectHandle inline image token"); callbacks->handleObject( QPDFObjectHandle::newInlineImage(inline_image)); } } } QPDFObjectHandle QPDFObjectHandle::parse(PointerHolder<InputSource> input, std::string const& object_description, QPDFTokenizer& tokenizer, bool& empty, StringDecrypter* decrypter, QPDF* context) { return parseInternal(input, object_description, tokenizer, empty, decrypter, context, false); } QPDFObjectHandle QPDFObjectHandle::parseInternal(PointerHolder<InputSource> input, std::string const& object_description, QPDFTokenizer& tokenizer, bool& empty, StringDecrypter* decrypter, QPDF* context, bool content_stream) { // This method must take care not to resolve any objects. Don't // check the type of any object without first ensuring that it is // a direct object. Otherwise, doing so may have the side effect // of reading the object and changing the file pointer. empty = false; QPDFObjectHandle object; std::vector<std::vector<QPDFObjectHandle> > olist_stack; olist_stack.push_back(std::vector<QPDFObjectHandle>()); enum state_e { st_top, st_start, st_stop, st_eof, st_dictionary, st_array }; std::vector<state_e> state_stack; state_stack.push_back(st_top); std::vector<qpdf_offset_t> offset_stack; offset_stack.push_back(input->tell()); bool done = false; while (! done) { std::vector<QPDFObjectHandle>& olist = olist_stack.back(); state_e state = state_stack.back(); qpdf_offset_t offset = offset_stack.back(); object = QPDFObjectHandle(); QPDFTokenizer::Token token = tokenizer.readToken(input, object_description); switch (token.getType()) { case QPDFTokenizer::tt_eof: if (content_stream) { state = st_eof; } else { // When not in content stream mode, EOF is tt_bad and // throws an exception before we get here. throw std::logic_error( "EOF received while not in content stream mode"); } break; case QPDFTokenizer::tt_brace_open: case QPDFTokenizer::tt_brace_close: QTC::TC("qpdf", "QPDFObjectHandle bad brace"); warn(context, QPDFExc(qpdf_e_damaged_pdf, input->getName(), object_description, input->getLastOffset(), "treating unexpected brace token as null")); object = newNull(); break; case QPDFTokenizer::tt_array_close: if (state == st_array) { state = st_stop; } else { QTC::TC("qpdf", "QPDFObjectHandle bad array close"); warn(context, QPDFExc(qpdf_e_damaged_pdf, input->getName(), object_description, input->getLastOffset(), "treating unexpected array close token as null")); object = newNull(); } break; case QPDFTokenizer::tt_dict_close: if (state == st_dictionary) { state = st_stop; } else { QTC::TC("qpdf", "QPDFObjectHandle bad dictionary close"); warn(context, QPDFExc(qpdf_e_damaged_pdf, input->getName(), object_description, input->getLastOffset(), "unexpected dictionary close token")); object = newNull(); } break; case QPDFTokenizer::tt_array_open: case QPDFTokenizer::tt_dict_open: olist_stack.push_back(std::vector<QPDFObjectHandle>()); state = st_start; offset_stack.push_back(input->tell()); state_stack.push_back( (token.getType() == QPDFTokenizer::tt_array_open) ? st_array : st_dictionary); break; case QPDFTokenizer::tt_bool: object = newBool((token.getValue() == "true")); break; case QPDFTokenizer::tt_null: object = newNull(); break; case QPDFTokenizer::tt_integer: object = newInteger(QUtil::string_to_ll(token.getValue().c_str())); break; case QPDFTokenizer::tt_real: object = newReal(token.getValue()); break; case QPDFTokenizer::tt_name: object = newName(token.getValue()); break; case QPDFTokenizer::tt_word: { std::string const& value = token.getValue(); if (content_stream) { object = QPDFObjectHandle::newOperator(value); } else if ((value == "R") && (state != st_top) && (olist.size() >= 2) && (! olist.at(olist.size() - 1).isIndirect()) && (olist.at(olist.size() - 1).isInteger()) && (! olist.at(olist.size() - 2).isIndirect()) && (olist.at(olist.size() - 2).isInteger())) { if (context == 0) { QTC::TC("qpdf", "QPDFObjectHandle indirect without context"); throw std::logic_error( "QPDFObjectHandle::parse called without context" " on an object with indirect references"); } // Try to resolve indirect objects object = newIndirect( context, olist.at(olist.size() - 2).getIntValue(), olist.at(olist.size() - 1).getIntValue()); olist.pop_back(); olist.pop_back(); } else if ((value == "endobj") && (state == st_top)) { // We just saw endobj without having read // anything. Treat this as a null and do not move // the input source's offset. object = newNull(); input->seek(input->getLastOffset(), SEEK_SET); empty = true; } else { QTC::TC("qpdf", "QPDFObjectHandle treat word as string"); warn(context, QPDFExc(qpdf_e_damaged_pdf, input->getName(), object_description, input->getLastOffset(), "unknown token while reading object;" " treating as string")); object = newString(value); } } break; case QPDFTokenizer::tt_string: { std::string val = token.getValue(); if (decrypter) { decrypter->decryptString(val); } object = QPDFObjectHandle::newString(val); } break; default: warn(context, QPDFExc(qpdf_e_damaged_pdf, input->getName(), object_description, input->getLastOffset(), "treating unknown token type as null while " "reading object")); object = newNull(); break; } if ((! object.isInitialized()) && (! ((state == st_start) || (state == st_stop) || (state == st_eof)))) { throw std::logic_error( "QPDFObjectHandle::parseInternal: " "unexpected uninitialized object"); object = newNull(); } switch (state) { case st_eof: if (state_stack.size() > 1) { warn(context, QPDFExc(qpdf_e_damaged_pdf, input->getName(), object_description, input->getLastOffset(), "parse error while reading object")); } done = true; // Leave object uninitialized to indicate EOF break; case st_dictionary: case st_array: olist.push_back(object); break; case st_top: done = true; break; case st_start: break; case st_stop: if ((state_stack.size() < 2) || (olist_stack.size() < 2)) { throw std::logic_error( "QPDFObjectHandle::parseInternal: st_stop encountered" " with insufficient elements in stack"); } state_e old_state = state_stack.back(); state_stack.pop_back(); if (old_state == st_array) { object = newArray(olist); } else if (old_state == st_dictionary) { // Convert list to map. Alternating elements are keys. // Attempt to recover more or less gracefully from // invalid dictionaries. std::set<std::string> names; for (std::vector<QPDFObjectHandle>::iterator iter = olist.begin(); iter != olist.end(); ++iter) { if ((! (*iter).isIndirect()) && (*iter).isName()) { names.insert((*iter).getName()); } } std::map<std::string, QPDFObjectHandle> dict; int next_fake_key = 1; for (unsigned int i = 0; i < olist.size(); ++i) { QPDFObjectHandle key_obj = olist.at(i); QPDFObjectHandle val; if (key_obj.isIndirect() || (! key_obj.isName())) { bool found_fake = false; std::string candidate; while (! found_fake) { candidate = "/QPDFFake" + QUtil::int_to_string(next_fake_key++); found_fake = (names.count(candidate) == 0); QTC::TC("qpdf", "QPDFObjectHandle found fake", (found_fake ? 0 : 1)); } warn(context, QPDFExc( qpdf_e_damaged_pdf, input->getName(), object_description, offset, "expected dictionary key but found" " non-name object; inserting key " + candidate)); val = key_obj; key_obj = newName(candidate); } else if (i + 1 >= olist.size()) { QTC::TC("qpdf", "QPDFObjectHandle no val for last key"); warn(context, QPDFExc( qpdf_e_damaged_pdf, input->getName(), object_description, offset, "dictionary ended prematurely; " "using null as value for last key")); val = newNull(); } else { val = olist.at(++i); } dict[key_obj.getName()] = val; } object = newDictionary(dict); } olist_stack.pop_back(); offset_stack.pop_back(); if (state_stack.back() == st_top) { done = true; } else { olist_stack.back().push_back(object); } } } return object; } QPDFObjectHandle QPDFObjectHandle::newIndirect(QPDF* qpdf, int objid, int generation) { if (objid == 0) { // Special case: QPDF uses objid 0 as a sentinel for direct // objects, and the PDF specification doesn't allow for object // 0. Treat indirect references to object 0 as null so that we // never create an indirect object with objid 0. QTC::TC("qpdf", "QPDFObjectHandle indirect with 0 objid"); return newNull(); } return QPDFObjectHandle(qpdf, objid, generation); } QPDFObjectHandle QPDFObjectHandle::newBool(bool value) { return QPDFObjectHandle(new QPDF_Bool(value)); } QPDFObjectHandle QPDFObjectHandle::newNull() { return QPDFObjectHandle(new QPDF_Null()); } QPDFObjectHandle QPDFObjectHandle::newInteger(long long value) { return QPDFObjectHandle(new QPDF_Integer(value)); } QPDFObjectHandle QPDFObjectHandle::newReal(std::string const& value) { return QPDFObjectHandle(new QPDF_Real(value)); } QPDFObjectHandle QPDFObjectHandle::newReal(double value, int decimal_places) { return QPDFObjectHandle(new QPDF_Real(value, decimal_places)); } QPDFObjectHandle QPDFObjectHandle::newName(std::string const& name) { return QPDFObjectHandle(new QPDF_Name(name)); } QPDFObjectHandle QPDFObjectHandle::newString(std::string const& str) { return QPDFObjectHandle(new QPDF_String(str)); } QPDFObjectHandle QPDFObjectHandle::newOperator(std::string const& value) { return QPDFObjectHandle(new QPDF_Operator(value)); } QPDFObjectHandle QPDFObjectHandle::newInlineImage(std::string const& value) { return QPDFObjectHandle(new QPDF_InlineImage(value)); } QPDFObjectHandle QPDFObjectHandle::newArray() { return newArray(std::vector<QPDFObjectHandle>()); } QPDFObjectHandle QPDFObjectHandle::newArray(std::vector<QPDFObjectHandle> const& items) { return QPDFObjectHandle(new QPDF_Array(items)); } QPDFObjectHandle QPDFObjectHandle::newDictionary() { return newDictionary(std::map<std::string, QPDFObjectHandle>()); } QPDFObjectHandle QPDFObjectHandle::newDictionary( std::map<std::string, QPDFObjectHandle> const& items) { return QPDFObjectHandle(new QPDF_Dictionary(items)); } QPDFObjectHandle QPDFObjectHandle::newStream(QPDF* qpdf, int objid, int generation, QPDFObjectHandle stream_dict, qpdf_offset_t offset, size_t length) { return QPDFObjectHandle(new QPDF_Stream( qpdf, objid, generation, stream_dict, offset, length)); } QPDFObjectHandle QPDFObjectHandle::newStream(QPDF* qpdf) { QTC::TC("qpdf", "QPDFObjectHandle newStream"); QPDFObjectHandle stream_dict = newDictionary(); QPDFObjectHandle result = qpdf->makeIndirectObject( QPDFObjectHandle( new QPDF_Stream(qpdf, 0, 0, stream_dict, 0, 0))); result.dereference(); QPDF_Stream* stream = dynamic_cast<QPDF_Stream*>(result.obj.getPointer()); stream->setObjGen(result.getObjectID(), result.getGeneration()); return result; } QPDFObjectHandle QPDFObjectHandle::newStream(QPDF* qpdf, PointerHolder<Buffer> data) { QTC::TC("qpdf", "QPDFObjectHandle newStream with data"); QPDFObjectHandle result = newStream(qpdf); result.replaceStreamData(data, newNull(), newNull()); return result; } QPDFObjectHandle QPDFObjectHandle::newStream(QPDF* qpdf, std::string const& data) { QTC::TC("qpdf", "QPDFObjectHandle newStream with string"); QPDFObjectHandle result = newStream(qpdf); result.replaceStreamData(data, newNull(), newNull()); return result; } QPDFObjectHandle QPDFObjectHandle::newReserved(QPDF* qpdf) { // Reserve a spot for this object by assigning it an object // number, but then return an unresolved handle to the object. QPDFObjectHandle reserved = qpdf->makeIndirectObject( QPDFObjectHandle(new QPDF_Reserved())); QPDFObjectHandle result = newIndirect(qpdf, reserved.objid, reserved.generation); result.reserved = true; return result; } QPDFObjectHandle QPDFObjectHandle::shallowCopy() { assertInitialized(); if (isStream()) { QTC::TC("qpdf", "QPDFObjectHandle ERR shallow copy stream"); throw std::runtime_error( "attempt to make a shallow copy of a stream"); } QPDFObjectHandle new_obj; if (isArray()) { QTC::TC("qpdf", "QPDFObjectHandle shallow copy array"); new_obj = newArray(getArrayAsVector()); } else if (isDictionary()) { QTC::TC("qpdf", "QPDFObjectHandle shallow copy dictionary"); new_obj = newDictionary(getDictAsMap()); } else { QTC::TC("qpdf", "QPDFObjectHandle shallow copy scalar"); new_obj = *this; } return new_obj; } void QPDFObjectHandle::makeDirectInternal(std::set<int>& visited) { assertInitialized(); if (isStream()) { QTC::TC("qpdf", "QPDFObjectHandle ERR clone stream"); throw std::runtime_error( "attempt to make a stream into a direct object"); } int cur_objid = this->objid; if (cur_objid != 0) { if (visited.count(cur_objid)) { QTC::TC("qpdf", "QPDFObjectHandle makeDirect loop"); throw std::runtime_error( "loop detected while converting object from " "indirect to direct"); } visited.insert(cur_objid); } if (isReserved()) { throw std::logic_error( "QPDFObjectHandle: attempting to make a" " reserved object handle direct"); } dereference(); this->qpdf = 0; this->objid = 0; this->generation = 0; PointerHolder<QPDFObject> new_obj; if (isBool()) { QTC::TC("qpdf", "QPDFObjectHandle clone bool"); new_obj = new QPDF_Bool(getBoolValue()); } else if (isNull()) { QTC::TC("qpdf", "QPDFObjectHandle clone null"); new_obj = new QPDF_Null(); } else if (isInteger()) { QTC::TC("qpdf", "QPDFObjectHandle clone integer"); new_obj = new QPDF_Integer(getIntValue()); } else if (isReal()) { QTC::TC("qpdf", "QPDFObjectHandle clone real"); new_obj = new QPDF_Real(getRealValue()); } else if (isName()) { QTC::TC("qpdf", "QPDFObjectHandle clone name"); new_obj = new QPDF_Name(getName()); } else if (isString()) { QTC::TC("qpdf", "QPDFObjectHandle clone string"); new_obj = new QPDF_String(getStringValue()); } else if (isArray()) { QTC::TC("qpdf", "QPDFObjectHandle clone array"); std::vector<QPDFObjectHandle> items; int n = getArrayNItems(); for (int i = 0; i < n; ++i) { items.push_back(getArrayItem(i)); items.back().makeDirectInternal(visited); } new_obj = new QPDF_Array(items); } else if (isDictionary()) { QTC::TC("qpdf", "QPDFObjectHandle clone dictionary"); std::set<std::string> keys = getKeys(); std::map<std::string, QPDFObjectHandle> items; for (std::set<std::string>::iterator iter = keys.begin(); iter != keys.end(); ++iter) { items[*iter] = getKey(*iter); items[*iter].makeDirectInternal(visited); } new_obj = new QPDF_Dictionary(items); } else { throw std::logic_error("QPDFObjectHandle::makeDirectInternal: " "unknown object type"); } this->obj = new_obj; if (cur_objid) { visited.erase(cur_objid); } } void QPDFObjectHandle::makeDirect() { std::set<int> visited; makeDirectInternal(visited); } void QPDFObjectHandle::assertInitialized() const { if (! this->initialized) { throw std::logic_error("operation attempted on uninitialized " "QPDFObjectHandle"); } } void QPDFObjectHandle::assertType(char const* type_name, bool istype) const { if (! istype) { throw std::logic_error(std::string("operation for ") + type_name + " object attempted on object of wrong type"); } } void QPDFObjectHandle::assertNull() { assertType("Null", isNull()); } void QPDFObjectHandle::assertBool() { assertType("Boolean", isBool()); } void QPDFObjectHandle::assertInteger() { assertType("Integer", isInteger()); } void QPDFObjectHandle::assertReal() { assertType("Real", isReal()); } void QPDFObjectHandle::assertName() { assertType("Name", isName()); } void QPDFObjectHandle::assertString() { assertType("String", isString()); } void QPDFObjectHandle::assertOperator() { assertType("Operator", isOperator()); } void QPDFObjectHandle::assertInlineImage() { assertType("InlineImage", isInlineImage()); } void QPDFObjectHandle::assertArray() { assertType("Array", isArray()); } void QPDFObjectHandle::assertDictionary() { assertType("Dictionary", isDictionary()); } void QPDFObjectHandle::assertStream() { assertType("Stream", isStream()); } void QPDFObjectHandle::assertReserved() { assertType("Reserved", isReserved()); } void QPDFObjectHandle::assertIndirect() { if (! isIndirect()) { throw std::logic_error( "operation for indirect object attempted on direct object"); } } void QPDFObjectHandle::assertScalar() { assertType("Scalar", isScalar()); } void QPDFObjectHandle::assertNumber() { assertType("Number", isNumber()); } bool QPDFObjectHandle::isPageObject() { return (this->isDictionary() && this->hasKey("/Type") && (this->getKey("/Type").getName() == "/Page")); } bool QPDFObjectHandle::isPagesObject() { return (this->isDictionary() && this->hasKey("/Type") && (this->getKey("/Type").getName() == "/Pages")); } void QPDFObjectHandle::assertPageObject() { if (! isPageObject()) { throw std::logic_error("page operation called on non-Page object"); } } void QPDFObjectHandle::dereference() { if (this->obj.getPointer() == 0) { PointerHolder<QPDFObject> obj = QPDF::Resolver::resolve( this->qpdf, this->objid, this->generation); if (obj.getPointer() == 0) { QTC::TC("qpdf", "QPDFObjectHandle indirect to unknown"); this->obj = new QPDF_Null(); } else if (dynamic_cast<QPDF_Reserved*>(obj.getPointer())) { // Do not resolve } else { this->reserved = false; this->obj = obj; } } } void QPDFObjectHandle::warn(QPDF* qpdf, QPDFExc const& e) { // If parsing on behalf of a QPDF object and want to give a // warning, we can warn through the object. If parsing for some // other reason, such as an explicit creation of an object from a // string, then just throw the exception. if (qpdf) { QPDF::Warner::warn(qpdf, e); } else { throw e; } }
QPDFObjectHandle::parseContentStream_internal(PointerHolder<Buffer> stream_data, std::string const& description, ParserCallbacks* callbacks) { size_t length = stream_data->getSize(); PointerHolder<InputSource> input = new BufferInputSource(description, stream_data.getPointer()); QPDFTokenizer tokenizer; tokenizer.allowEOF(); bool empty = false; while (static_cast<size_t>(input->tell()) < length) { QPDFObjectHandle obj = parseInternal(input, "content", tokenizer, empty, 0, 0, false, false, true); if (! obj.isInitialized()) { // EOF break; } callbacks->handleObject(obj); if (obj.isOperator() && (obj.getOperatorValue() == "ID")) { // Discard next character; it is the space after ID that // terminated the token. Read until end of inline image. char ch; input->read(&ch, 1); char buf[4]; memset(buf, '\0', sizeof(buf)); bool done = false; std::string inline_image; while (! done) { if (input->read(&ch, 1) == 0) { QTC::TC("qpdf", "QPDFObjectHandle EOF in inline image"); throw QPDFExc(qpdf_e_damaged_pdf, input->getName(), "stream data", input->tell(), "EOF found while reading inline image"); } inline_image += ch; memmove(buf, buf + 1, sizeof(buf) - 1); buf[sizeof(buf) - 1] = ch; if (strchr(" \t\n\v\f\r", buf[0]) && (buf[1] == 'E') && (buf[2] == 'I') && strchr(" \t\n\v\f\r", buf[3])) { // We've found an EI operator. done = true; input->seek(-3, SEEK_CUR); for (int i = 0; i < 4; ++i) { if (inline_image.length() > 0) { inline_image.erase(inline_image.length() - 1); } } } } QTC::TC("qpdf", "QPDFObjectHandle inline image token"); callbacks->handleObject( QPDFObjectHandle::newInlineImage(inline_image)); } } }
QPDFObjectHandle::parseContentStream_internal(PointerHolder<Buffer> stream_data, std::string const& description, ParserCallbacks* callbacks) { size_t length = stream_data->getSize(); PointerHolder<InputSource> input = new BufferInputSource(description, stream_data.getPointer()); QPDFTokenizer tokenizer; tokenizer.allowEOF(); bool empty = false; while (static_cast<size_t>(input->tell()) < length) { QPDFObjectHandle obj = parseInternal(input, "content", tokenizer, empty, 0, 0, true); if (! obj.isInitialized()) { // EOF break; } callbacks->handleObject(obj); if (obj.isOperator() && (obj.getOperatorValue() == "ID")) { // Discard next character; it is the space after ID that // terminated the token. Read until end of inline image. char ch; input->read(&ch, 1); char buf[4]; memset(buf, '\0', sizeof(buf)); bool done = false; std::string inline_image; while (! done) { if (input->read(&ch, 1) == 0) { QTC::TC("qpdf", "QPDFObjectHandle EOF in inline image"); throw QPDFExc(qpdf_e_damaged_pdf, input->getName(), "stream data", input->tell(), "EOF found while reading inline image"); } inline_image += ch; memmove(buf, buf + 1, sizeof(buf) - 1); buf[sizeof(buf) - 1] = ch; if (strchr(" \t\n\v\f\r", buf[0]) && (buf[1] == 'E') && (buf[2] == 'I') && strchr(" \t\n\v\f\r", buf[3])) { // We've found an EI operator. done = true; input->seek(-3, SEEK_CUR); for (int i = 0; i < 4; ++i) { if (inline_image.length() > 0) { inline_image.erase(inline_image.length() - 1); } } } } QTC::TC("qpdf", "QPDFObjectHandle inline image token"); callbacks->handleObject( QPDFObjectHandle::newInlineImage(inline_image)); } } }
{'added': [(886, ' parseInternal(input, "content", tokenizer, empty, 0, 0, true);'), (947, ' decrypter, context, false);'), (966, ' std::vector<std::vector<QPDFObjectHandle> > olist_stack;'), (967, ' olist_stack.push_back(std::vector<QPDFObjectHandle>());'), (968, ' enum state_e { st_top, st_start, st_stop, st_eof, st_dictionary, st_array };'), (969, ' std::vector<state_e> state_stack;'), (970, ' state_stack.push_back(st_top);'), (971, ' std::vector<qpdf_offset_t> offset_stack;'), (972, ' offset_stack.push_back(input->tell());'), (976, ' std::vector<QPDFObjectHandle>& olist = olist_stack.back();'), (977, ' state_e state = state_stack.back();'), (978, ' qpdf_offset_t offset = offset_stack.back();'), (979, ''), (990, ' state = st_eof;'), (1013, '\t if (state == st_array)'), (1015, ' state = st_stop;'), (1030, '\t if (state == st_dictionary)'), (1032, ' state = st_stop;'), (1048, ' olist_stack.push_back(std::vector<QPDFObjectHandle>());'), (1049, ' state = st_start;'), (1050, ' offset_stack.push_back(input->tell());'), (1051, ' state_stack.push_back('), (1052, ' (token.getType() == QPDFTokenizer::tt_array_open) ?'), (1053, ' st_array : st_dictionary);'), (1083, '\t\telse if ((value == "R") && (state != st_top) &&'), (1084, ' (olist.size() >= 2) &&'), (1085, ' (! olist.at(olist.size() - 1).isIndirect()) &&'), (1086, ' (olist.at(olist.size() - 1).isInteger()) &&'), (1087, ' (! olist.at(olist.size() - 2).isIndirect()) &&'), (1088, ' (olist.at(olist.size() - 2).isInteger()))'), (1105, '\t\telse if ((value == "endobj") && (state == st_top))'), (1151, ' if ((! object.isInitialized()) &&'), (1152, ' (! ((state == st_start) ||'), (1153, ' (state == st_stop) ||'), (1154, ' (state == st_eof))))'), (1155, ' {'), (1156, ' throw std::logic_error('), (1157, ' "QPDFObjectHandle::parseInternal: "'), (1158, ' "unexpected uninitialized object");'), (1160, ' }'), (1162, ' switch (state)'), (1164, ' case st_eof:'), (1165, ' if (state_stack.size() > 1)'), (1167, ' warn(context,'), (1168, ' QPDFExc(qpdf_e_damaged_pdf, input->getName(),'), (1169, ' object_description,'), (1170, ' input->getLastOffset(),'), (1171, ' "parse error while reading object"));'), (1173, ' done = true;'), (1174, ' // Leave object uninitialized to indicate EOF'), (1175, ' break;'), (1177, ' case st_dictionary:'), (1178, ' case st_array:'), (1179, ' olist.push_back(object);'), (1180, ' break;'), (1181, ''), (1182, ' case st_top:'), (1183, ' done = true;'), (1184, ' break;'), (1185, ''), (1186, ' case st_start:'), (1187, ' break;'), (1188, ''), (1189, ' case st_stop:'), (1190, ' if ((state_stack.size() < 2) || (olist_stack.size() < 2))'), (1191, ' {'), (1192, ' throw std::logic_error('), (1193, ' "QPDFObjectHandle::parseInternal: st_stop encountered"'), (1194, ' " with insufficient elements in stack");'), (1195, ' }'), (1196, ' state_e old_state = state_stack.back();'), (1197, ' state_stack.pop_back();'), (1198, ' if (old_state == st_array)'), (1200, ' object = newArray(olist);'), (1201, ' }'), (1202, ' else if (old_state == st_dictionary)'), (1203, ' {'), (1204, ' // Convert list to map. Alternating elements are keys.'), (1205, ' // Attempt to recover more or less gracefully from'), (1206, ' // invalid dictionaries.'), (1207, ' std::set<std::string> names;'), (1208, ' for (std::vector<QPDFObjectHandle>::iterator iter ='), (1209, ' olist.begin();'), (1210, ' iter != olist.end(); ++iter)'), (1211, ' {'), (1212, ' if ((! (*iter).isIndirect()) && (*iter).isName())'), (1213, ' {'), (1214, ' names.insert((*iter).getName());'), (1215, ' }'), (1216, ' }'), (1217, ''), (1218, ' std::map<std::string, QPDFObjectHandle> dict;'), (1219, ' int next_fake_key = 1;'), (1220, ' for (unsigned int i = 0; i < olist.size(); ++i)'), (1222, ' QPDFObjectHandle key_obj = olist.at(i);'), (1223, ' QPDFObjectHandle val;'), (1224, ' if (key_obj.isIndirect() || (! key_obj.isName()))'), (1225, ' {'), (1226, ' bool found_fake = false;'), (1227, ' std::string candidate;'), (1228, ' while (! found_fake)'), (1229, ' {'), (1230, ' candidate ='), (1231, ' "/QPDFFake" +'), (1232, ' QUtil::int_to_string(next_fake_key++);'), (1233, ' found_fake = (names.count(candidate) == 0);'), (1234, ' QTC::TC("qpdf", "QPDFObjectHandle found fake",'), (1235, ' (found_fake ? 0 : 1));'), (1236, ' }'), (1237, ' warn(context,'), (1238, ' QPDFExc('), (1239, ' qpdf_e_damaged_pdf,'), (1240, ' input->getName(), object_description, offset,'), (1241, ' "expected dictionary key but found"'), (1242, ' " non-name object; inserting key " +'), (1243, ' candidate));'), (1244, ' val = key_obj;'), (1245, ' key_obj = newName(candidate);'), (1246, ' }'), (1247, ' else if (i + 1 >= olist.size())'), (1248, ' {'), (1249, ' QTC::TC("qpdf", "QPDFObjectHandle no val for last key");'), (1250, ' warn(context,'), (1251, ' QPDFExc('), (1252, ' qpdf_e_damaged_pdf,'), (1253, ' input->getName(), object_description, offset,'), (1254, ' "dictionary ended prematurely; "'), (1255, ' "using null as value for last key"));'), (1256, ' val = newNull();'), (1257, ' }'), (1258, ' else'), (1259, ' {'), (1260, ' val = olist.at(++i);'), (1261, ' }'), (1262, ' dict[key_obj.getName()] = val;'), (1264, ' object = newDictionary(dict);'), (1266, ' olist_stack.pop_back();'), (1267, ' offset_stack.pop_back();'), (1268, ' if (state_stack.back() == st_top)'), (1270, ' done = true;'), (1274, ' olist_stack.back().push_back(object);')], 'deleted': [(886, ' parseInternal(input, "content", tokenizer, empty,'), (887, ' 0, 0, false, false, true);'), (948, ' decrypter, context, false, false, false);'), (956, ' bool in_array, bool in_dictionary,'), (965, ' if (in_dictionary && in_array)'), (966, ' {'), (967, '\t// Although dictionaries and arrays arbitrarily nest, these'), (968, '\t// variables indicate what is at the top of the stack right'), (969, '\t// now, so they can, by definition, never both be true.'), (970, '\tthrow std::logic_error('), (971, '\t "INTERNAL ERROR: parseInternal: in_dict && in_array");'), (972, ' }'), (976, ' qpdf_offset_t offset = input->tell();'), (977, ' std::vector<QPDFObjectHandle> olist;'), (991, ' // Return uninitialized object to indicate EOF'), (992, ' return object;'), (1015, '\t if (in_array)'), (1017, '\t\tdone = true;'), (1032, '\t if (in_dictionary)'), (1034, '\t\tdone = true;'), (1049, '\t object = parseInternal('), (1050, '\t\tinput, object_description, tokenizer, empty,'), (1051, ' decrypter, context, true, false, content_stream);'), (1052, '\t break;'), (1053, ''), (1055, '\t object = parseInternal('), (1056, '\t\tinput, object_description, tokenizer, empty,'), (1057, ' decrypter, context, false, true, content_stream);'), (1087, '\t\telse if ((value == "R") && (in_array || in_dictionary) &&'), (1088, '\t\t (olist.size() >= 2) &&'), (1089, ' (! olist.at(olist.size() - 1).isIndirect()) &&'), (1090, '\t\t (olist.at(olist.size() - 1).isInteger()) &&'), (1091, ' (! olist.at(olist.size() - 2).isIndirect()) &&'), (1092, '\t\t (olist.at(olist.size() - 2).isInteger()))'), (1109, '\t\telse if ((value == "endobj") &&'), (1110, '\t\t\t (! (in_array || in_dictionary)))'), (1156, '\tif (in_dictionary || in_array)'), (1157, '\t{'), (1158, '\t if (! done)'), (1159, '\t {'), (1160, '\t\tolist.push_back(object);'), (1161, '\t }'), (1162, '\t}'), (1163, '\telse if (! object.isInitialized())'), (1164, '\t{'), (1165, ' warn(context,'), (1166, ' QPDFExc(qpdf_e_damaged_pdf, input->getName(),'), (1167, ' object_description,'), (1168, ' input->getLastOffset(),'), (1169, ' "parse error while reading object"));'), (1171, '\t}'), (1172, '\telse'), (1173, '\t{'), (1174, '\t done = true;'), (1175, '\t}'), (1176, ' }'), (1178, ' if (in_array)'), (1179, ' {'), (1180, '\tobject = newArray(olist);'), (1181, ' }'), (1182, ' else if (in_dictionary)'), (1183, ' {'), (1184, ' // Convert list to map. Alternating elements are keys. Attempt'), (1185, ' // to recover more or less gracefully from invalid'), (1186, ' // dictionaries.'), (1187, ' std::set<std::string> names;'), (1188, ' for (std::vector<QPDFObjectHandle>::iterator iter = olist.begin();'), (1189, ' iter != olist.end(); ++iter)'), (1191, ' if ((! (*iter).isIndirect()) && (*iter).isName())'), (1193, ' names.insert((*iter).getName());'), (1195, ' }'), (1197, ' std::map<std::string, QPDFObjectHandle> dict;'), (1198, ' int next_fake_key = 1;'), (1199, ' for (unsigned int i = 0; i < olist.size(); ++i)'), (1200, ' {'), (1201, ' QPDFObjectHandle key_obj = olist.at(i);'), (1202, ' QPDFObjectHandle val;'), (1203, ' if (key_obj.isIndirect() || (! key_obj.isName()))'), (1205, ' bool found_fake = false;'), (1206, ' std::string candidate;'), (1207, ' while (! found_fake)'), (1209, ' candidate ='), (1210, ' "/QPDFFake" + QUtil::int_to_string(next_fake_key++);'), (1211, ' found_fake = (names.count(candidate) == 0);'), (1212, ' QTC::TC("qpdf", "QPDFObjectHandle found fake",'), (1213, ' (found_fake ? 0 : 1));'), (1215, ' warn(context,'), (1216, ' QPDFExc('), (1217, ' qpdf_e_damaged_pdf,'), (1218, ' input->getName(), object_description, offset,'), (1219, ' "expected dictionary key but found"'), (1220, ' " non-name object; inserting key " +'), (1221, ' candidate));'), (1222, ' val = key_obj;'), (1223, ' key_obj = newName(candidate);'), (1225, ' else if (i + 1 >= olist.size())'), (1227, ' QTC::TC("qpdf", "QPDFObjectHandle no val for last key");'), (1228, ' warn(context,'), (1229, ' QPDFExc('), (1230, ' qpdf_e_damaged_pdf,'), (1231, ' input->getName(), object_description, offset,'), (1232, ' "dictionary ended prematurely; using null as value"'), (1233, ' " for last key"));'), (1234, ' val = newNull();'), (1238, ' val = olist.at(++i);'), (1240, ' dict[key_obj.getName()] = val;'), (1242, ' object = newDictionary(dict);')]}
141
107
1,514
7,387
62
391
13
https://github.com/qpdf/qpdf
CVE-2017-12595
CWE-20
1,255
print-l2tp.c
C
l2tp_framing_type_print
/* * Copyright (c) 1991, 1993, 1994, 1995, 1996, 1997 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * L2TP support contributed by Motonori Shindo (mshindo@mshindo.net) */ /* \summary: Layer Two Tunneling Protocol (L2TP) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include "netdissect.h" #include "extract.h" #define L2TP_FLAG_TYPE 0x8000 /* Type (0=Data, 1=Control) */ #define L2TP_FLAG_LENGTH 0x4000 /* Length */ #define L2TP_FLAG_SEQUENCE 0x0800 /* Sequence */ #define L2TP_FLAG_OFFSET 0x0200 /* Offset */ #define L2TP_FLAG_PRIORITY 0x0100 /* Priority */ #define L2TP_VERSION_MASK 0x000f /* Version Mask */ #define L2TP_VERSION_L2F 0x0001 /* L2F */ #define L2TP_VERSION_L2TP 0x0002 /* L2TP */ #define L2TP_AVP_HDR_FLAG_MANDATORY 0x8000 /* Mandatory Flag */ #define L2TP_AVP_HDR_FLAG_HIDDEN 0x4000 /* Hidden Flag */ #define L2TP_AVP_HDR_LEN_MASK 0x03ff /* Length Mask */ #define L2TP_FRAMING_CAP_SYNC_MASK 0x00000001 /* Synchronous */ #define L2TP_FRAMING_CAP_ASYNC_MASK 0x00000002 /* Asynchronous */ #define L2TP_FRAMING_TYPE_SYNC_MASK 0x00000001 /* Synchronous */ #define L2TP_FRAMING_TYPE_ASYNC_MASK 0x00000002 /* Asynchronous */ #define L2TP_BEARER_CAP_DIGITAL_MASK 0x00000001 /* Digital */ #define L2TP_BEARER_CAP_ANALOG_MASK 0x00000002 /* Analog */ #define L2TP_BEARER_TYPE_DIGITAL_MASK 0x00000001 /* Digital */ #define L2TP_BEARER_TYPE_ANALOG_MASK 0x00000002 /* Analog */ /* Authen Type */ #define L2TP_AUTHEN_TYPE_RESERVED 0x0000 /* Reserved */ #define L2TP_AUTHEN_TYPE_TEXTUAL 0x0001 /* Textual username/password exchange */ #define L2TP_AUTHEN_TYPE_CHAP 0x0002 /* PPP CHAP */ #define L2TP_AUTHEN_TYPE_PAP 0x0003 /* PPP PAP */ #define L2TP_AUTHEN_TYPE_NO_AUTH 0x0004 /* No Authentication */ #define L2TP_AUTHEN_TYPE_MSCHAPv1 0x0005 /* MSCHAPv1 */ #define L2TP_PROXY_AUTH_ID_MASK 0x00ff static const char tstr[] = " [|l2tp]"; #define L2TP_MSGTYPE_SCCRQ 1 /* Start-Control-Connection-Request */ #define L2TP_MSGTYPE_SCCRP 2 /* Start-Control-Connection-Reply */ #define L2TP_MSGTYPE_SCCCN 3 /* Start-Control-Connection-Connected */ #define L2TP_MSGTYPE_STOPCCN 4 /* Stop-Control-Connection-Notification */ #define L2TP_MSGTYPE_HELLO 6 /* Hello */ #define L2TP_MSGTYPE_OCRQ 7 /* Outgoing-Call-Request */ #define L2TP_MSGTYPE_OCRP 8 /* Outgoing-Call-Reply */ #define L2TP_MSGTYPE_OCCN 9 /* Outgoing-Call-Connected */ #define L2TP_MSGTYPE_ICRQ 10 /* Incoming-Call-Request */ #define L2TP_MSGTYPE_ICRP 11 /* Incoming-Call-Reply */ #define L2TP_MSGTYPE_ICCN 12 /* Incoming-Call-Connected */ #define L2TP_MSGTYPE_CDN 14 /* Call-Disconnect-Notify */ #define L2TP_MSGTYPE_WEN 15 /* WAN-Error-Notify */ #define L2TP_MSGTYPE_SLI 16 /* Set-Link-Info */ static const struct tok l2tp_msgtype2str[] = { { L2TP_MSGTYPE_SCCRQ, "SCCRQ" }, { L2TP_MSGTYPE_SCCRP, "SCCRP" }, { L2TP_MSGTYPE_SCCCN, "SCCCN" }, { L2TP_MSGTYPE_STOPCCN, "StopCCN" }, { L2TP_MSGTYPE_HELLO, "HELLO" }, { L2TP_MSGTYPE_OCRQ, "OCRQ" }, { L2TP_MSGTYPE_OCRP, "OCRP" }, { L2TP_MSGTYPE_OCCN, "OCCN" }, { L2TP_MSGTYPE_ICRQ, "ICRQ" }, { L2TP_MSGTYPE_ICRP, "ICRP" }, { L2TP_MSGTYPE_ICCN, "ICCN" }, { L2TP_MSGTYPE_CDN, "CDN" }, { L2TP_MSGTYPE_WEN, "WEN" }, { L2TP_MSGTYPE_SLI, "SLI" }, { 0, NULL } }; #define L2TP_AVP_MSGTYPE 0 /* Message Type */ #define L2TP_AVP_RESULT_CODE 1 /* Result Code */ #define L2TP_AVP_PROTO_VER 2 /* Protocol Version */ #define L2TP_AVP_FRAMING_CAP 3 /* Framing Capabilities */ #define L2TP_AVP_BEARER_CAP 4 /* Bearer Capabilities */ #define L2TP_AVP_TIE_BREAKER 5 /* Tie Breaker */ #define L2TP_AVP_FIRM_VER 6 /* Firmware Revision */ #define L2TP_AVP_HOST_NAME 7 /* Host Name */ #define L2TP_AVP_VENDOR_NAME 8 /* Vendor Name */ #define L2TP_AVP_ASSND_TUN_ID 9 /* Assigned Tunnel ID */ #define L2TP_AVP_RECV_WIN_SIZE 10 /* Receive Window Size */ #define L2TP_AVP_CHALLENGE 11 /* Challenge */ #define L2TP_AVP_Q931_CC 12 /* Q.931 Cause Code */ #define L2TP_AVP_CHALLENGE_RESP 13 /* Challenge Response */ #define L2TP_AVP_ASSND_SESS_ID 14 /* Assigned Session ID */ #define L2TP_AVP_CALL_SER_NUM 15 /* Call Serial Number */ #define L2TP_AVP_MINIMUM_BPS 16 /* Minimum BPS */ #define L2TP_AVP_MAXIMUM_BPS 17 /* Maximum BPS */ #define L2TP_AVP_BEARER_TYPE 18 /* Bearer Type */ #define L2TP_AVP_FRAMING_TYPE 19 /* Framing Type */ #define L2TP_AVP_PACKET_PROC_DELAY 20 /* Packet Processing Delay (OBSOLETE) */ #define L2TP_AVP_CALLED_NUMBER 21 /* Called Number */ #define L2TP_AVP_CALLING_NUMBER 22 /* Calling Number */ #define L2TP_AVP_SUB_ADDRESS 23 /* Sub-Address */ #define L2TP_AVP_TX_CONN_SPEED 24 /* (Tx) Connect Speed */ #define L2TP_AVP_PHY_CHANNEL_ID 25 /* Physical Channel ID */ #define L2TP_AVP_INI_RECV_LCP 26 /* Initial Received LCP CONFREQ */ #define L2TP_AVP_LAST_SENT_LCP 27 /* Last Sent LCP CONFREQ */ #define L2TP_AVP_LAST_RECV_LCP 28 /* Last Received LCP CONFREQ */ #define L2TP_AVP_PROXY_AUTH_TYPE 29 /* Proxy Authen Type */ #define L2TP_AVP_PROXY_AUTH_NAME 30 /* Proxy Authen Name */ #define L2TP_AVP_PROXY_AUTH_CHAL 31 /* Proxy Authen Challenge */ #define L2TP_AVP_PROXY_AUTH_ID 32 /* Proxy Authen ID */ #define L2TP_AVP_PROXY_AUTH_RESP 33 /* Proxy Authen Response */ #define L2TP_AVP_CALL_ERRORS 34 /* Call Errors */ #define L2TP_AVP_ACCM 35 /* ACCM */ #define L2TP_AVP_RANDOM_VECTOR 36 /* Random Vector */ #define L2TP_AVP_PRIVATE_GRP_ID 37 /* Private Group ID */ #define L2TP_AVP_RX_CONN_SPEED 38 /* (Rx) Connect Speed */ #define L2TP_AVP_SEQ_REQUIRED 39 /* Sequencing Required */ #define L2TP_AVP_PPP_DISCON_CC 46 /* PPP Disconnect Cause Code */ static const struct tok l2tp_avp2str[] = { { L2TP_AVP_MSGTYPE, "MSGTYPE" }, { L2TP_AVP_RESULT_CODE, "RESULT_CODE" }, { L2TP_AVP_PROTO_VER, "PROTO_VER" }, { L2TP_AVP_FRAMING_CAP, "FRAMING_CAP" }, { L2TP_AVP_BEARER_CAP, "BEARER_CAP" }, { L2TP_AVP_TIE_BREAKER, "TIE_BREAKER" }, { L2TP_AVP_FIRM_VER, "FIRM_VER" }, { L2TP_AVP_HOST_NAME, "HOST_NAME" }, { L2TP_AVP_VENDOR_NAME, "VENDOR_NAME" }, { L2TP_AVP_ASSND_TUN_ID, "ASSND_TUN_ID" }, { L2TP_AVP_RECV_WIN_SIZE, "RECV_WIN_SIZE" }, { L2TP_AVP_CHALLENGE, "CHALLENGE" }, { L2TP_AVP_Q931_CC, "Q931_CC", }, { L2TP_AVP_CHALLENGE_RESP, "CHALLENGE_RESP" }, { L2TP_AVP_ASSND_SESS_ID, "ASSND_SESS_ID" }, { L2TP_AVP_CALL_SER_NUM, "CALL_SER_NUM" }, { L2TP_AVP_MINIMUM_BPS, "MINIMUM_BPS" }, { L2TP_AVP_MAXIMUM_BPS, "MAXIMUM_BPS" }, { L2TP_AVP_BEARER_TYPE, "BEARER_TYPE" }, { L2TP_AVP_FRAMING_TYPE, "FRAMING_TYPE" }, { L2TP_AVP_PACKET_PROC_DELAY, "PACKET_PROC_DELAY" }, { L2TP_AVP_CALLED_NUMBER, "CALLED_NUMBER" }, { L2TP_AVP_CALLING_NUMBER, "CALLING_NUMBER" }, { L2TP_AVP_SUB_ADDRESS, "SUB_ADDRESS" }, { L2TP_AVP_TX_CONN_SPEED, "TX_CONN_SPEED" }, { L2TP_AVP_PHY_CHANNEL_ID, "PHY_CHANNEL_ID" }, { L2TP_AVP_INI_RECV_LCP, "INI_RECV_LCP" }, { L2TP_AVP_LAST_SENT_LCP, "LAST_SENT_LCP" }, { L2TP_AVP_LAST_RECV_LCP, "LAST_RECV_LCP" }, { L2TP_AVP_PROXY_AUTH_TYPE, "PROXY_AUTH_TYPE" }, { L2TP_AVP_PROXY_AUTH_NAME, "PROXY_AUTH_NAME" }, { L2TP_AVP_PROXY_AUTH_CHAL, "PROXY_AUTH_CHAL" }, { L2TP_AVP_PROXY_AUTH_ID, "PROXY_AUTH_ID" }, { L2TP_AVP_PROXY_AUTH_RESP, "PROXY_AUTH_RESP" }, { L2TP_AVP_CALL_ERRORS, "CALL_ERRORS" }, { L2TP_AVP_ACCM, "ACCM" }, { L2TP_AVP_RANDOM_VECTOR, "RANDOM_VECTOR" }, { L2TP_AVP_PRIVATE_GRP_ID, "PRIVATE_GRP_ID" }, { L2TP_AVP_RX_CONN_SPEED, "RX_CONN_SPEED" }, { L2TP_AVP_SEQ_REQUIRED, "SEQ_REQUIRED" }, { L2TP_AVP_PPP_DISCON_CC, "PPP_DISCON_CC" }, { 0, NULL } }; static const struct tok l2tp_authentype2str[] = { { L2TP_AUTHEN_TYPE_RESERVED, "Reserved" }, { L2TP_AUTHEN_TYPE_TEXTUAL, "Textual" }, { L2TP_AUTHEN_TYPE_CHAP, "CHAP" }, { L2TP_AUTHEN_TYPE_PAP, "PAP" }, { L2TP_AUTHEN_TYPE_NO_AUTH, "No Auth" }, { L2TP_AUTHEN_TYPE_MSCHAPv1, "MS-CHAPv1" }, { 0, NULL } }; #define L2TP_PPP_DISCON_CC_DIRECTION_GLOBAL 0 #define L2TP_PPP_DISCON_CC_DIRECTION_AT_PEER 1 #define L2TP_PPP_DISCON_CC_DIRECTION_AT_LOCAL 2 static const struct tok l2tp_cc_direction2str[] = { { L2TP_PPP_DISCON_CC_DIRECTION_GLOBAL, "global error" }, { L2TP_PPP_DISCON_CC_DIRECTION_AT_PEER, "at peer" }, { L2TP_PPP_DISCON_CC_DIRECTION_AT_LOCAL,"at local" }, { 0, NULL } }; #if 0 static char *l2tp_result_code_StopCCN[] = { "Reserved", "General request to clear control connection", "General error--Error Code indicates the problem", "Control channel already exists", "Requester is not authorized to establish a control channel", "The protocol version of the requester is not supported", "Requester is being shut down", "Finite State Machine error" #define L2TP_MAX_RESULT_CODE_STOPCC_INDEX 8 }; #endif #if 0 static char *l2tp_result_code_CDN[] = { "Reserved", "Call disconnected due to loss of carrier", "Call disconnected for the reason indicated in error code", "Call disconnected for administrative reasons", "Call failed due to lack of appropriate facilities being " \ "available (temporary condition)", "Call failed due to lack of appropriate facilities being " \ "available (permanent condition)", "Invalid destination", "Call failed due to no carrier detected", "Call failed due to detection of a busy signal", "Call failed due to lack of a dial tone", "Call was not established within time allotted by LAC", "Call was connected but no appropriate framing was detected" #define L2TP_MAX_RESULT_CODE_CDN_INDEX 12 }; #endif #if 0 static char *l2tp_error_code_general[] = { "No general error", "No control connection exists yet for this LAC-LNS pair", "Length is wrong", "One of the field values was out of range or " \ "reserved field was non-zero" "Insufficient resources to handle this operation now", "The Session ID is invalid in this context", "A generic vendor-specific error occurred in the LAC", "Try another" #define L2TP_MAX_ERROR_CODE_GENERAL_INDEX 8 }; #endif /******************************/ /* generic print out routines */ /******************************/ static void print_string(netdissect_options *ndo, const u_char *dat, u_int length) { u_int i; for (i=0; i<length; i++) { ND_PRINT((ndo, "%c", *dat++)); } } static void print_octets(netdissect_options *ndo, const u_char *dat, u_int length) { u_int i; for (i=0; i<length; i++) { ND_PRINT((ndo, "%02x", *dat++)); } } static void print_16bits_val(netdissect_options *ndo, const uint16_t *dat) { ND_PRINT((ndo, "%u", EXTRACT_16BITS(dat))); } static void print_32bits_val(netdissect_options *ndo, const uint32_t *dat) { ND_PRINT((ndo, "%lu", (u_long)EXTRACT_32BITS(dat))); } /***********************************/ /* AVP-specific print out routines */ /***********************************/ static void l2tp_msgtype_print(netdissect_options *ndo, const u_char *dat) { const uint16_t *ptr = (const uint16_t *)dat; ND_PRINT((ndo, "%s", tok2str(l2tp_msgtype2str, "MSGTYPE-#%u", EXTRACT_16BITS(ptr)))); } static void l2tp_result_code_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint16_t *ptr = (const uint16_t *)dat; ND_PRINT((ndo, "%u", EXTRACT_16BITS(ptr))); ptr++; /* Result Code */ if (length > 2) { /* Error Code (opt) */ ND_PRINT((ndo, "/%u", EXTRACT_16BITS(ptr))); ptr++; } if (length > 4) { /* Error Message (opt) */ ND_PRINT((ndo, " ")); print_string(ndo, (const u_char *)ptr, length - 4); } } static void l2tp_proto_ver_print(netdissect_options *ndo, const uint16_t *dat) { ND_PRINT((ndo, "%u.%u", (EXTRACT_16BITS(dat) >> 8), (EXTRACT_16BITS(dat) & 0xff))); } static void l2tp_framing_cap_print(netdissect_options *ndo, const u_char *dat) { const uint32_t *ptr = (const uint32_t *)dat; if (EXTRACT_32BITS(ptr) & L2TP_FRAMING_CAP_ASYNC_MASK) { ND_PRINT((ndo, "A")); } if (EXTRACT_32BITS(ptr) & L2TP_FRAMING_CAP_SYNC_MASK) { ND_PRINT((ndo, "S")); } } static void l2tp_bearer_cap_print(netdissect_options *ndo, const u_char *dat) { const uint32_t *ptr = (const uint32_t *)dat; if (EXTRACT_32BITS(ptr) & L2TP_BEARER_CAP_ANALOG_MASK) { ND_PRINT((ndo, "A")); } if (EXTRACT_32BITS(ptr) & L2TP_BEARER_CAP_DIGITAL_MASK) { ND_PRINT((ndo, "D")); } } static void l2tp_q931_cc_print(netdissect_options *ndo, const u_char *dat, u_int length) { print_16bits_val(ndo, (const uint16_t *)dat); ND_PRINT((ndo, ", %02x", dat[2])); if (length > 3) { ND_PRINT((ndo, " ")); print_string(ndo, dat+3, length-3); } } static void l2tp_bearer_type_print(netdissect_options *ndo, const u_char *dat) { const uint32_t *ptr = (const uint32_t *)dat; if (EXTRACT_32BITS(ptr) & L2TP_BEARER_TYPE_ANALOG_MASK) { ND_PRINT((ndo, "A")); } if (EXTRACT_32BITS(ptr) & L2TP_BEARER_TYPE_DIGITAL_MASK) { ND_PRINT((ndo, "D")); } } static void l2tp_framing_type_print(netdissect_options *ndo, const u_char *dat) { const uint32_t *ptr = (const uint32_t *)dat; if (EXTRACT_32BITS(ptr) & L2TP_FRAMING_TYPE_ASYNC_MASK) { ND_PRINT((ndo, "A")); } if (EXTRACT_32BITS(ptr) & L2TP_FRAMING_TYPE_SYNC_MASK) { ND_PRINT((ndo, "S")); } } static void l2tp_packet_proc_delay_print(netdissect_options *ndo) { ND_PRINT((ndo, "obsolete")); } static void l2tp_proxy_auth_type_print(netdissect_options *ndo, const u_char *dat) { const uint16_t *ptr = (const uint16_t *)dat; ND_PRINT((ndo, "%s", tok2str(l2tp_authentype2str, "AuthType-#%u", EXTRACT_16BITS(ptr)))); } static void l2tp_proxy_auth_id_print(netdissect_options *ndo, const u_char *dat) { const uint16_t *ptr = (const uint16_t *)dat; ND_PRINT((ndo, "%u", EXTRACT_16BITS(ptr) & L2TP_PROXY_AUTH_ID_MASK)); } static void l2tp_call_errors_print(netdissect_options *ndo, const u_char *dat) { const uint16_t *ptr = (const uint16_t *)dat; uint16_t val_h, val_l; ptr++; /* skip "Reserved" */ val_h = EXTRACT_16BITS(ptr); ptr++; val_l = EXTRACT_16BITS(ptr); ptr++; ND_PRINT((ndo, "CRCErr=%u ", (val_h<<16) + val_l)); val_h = EXTRACT_16BITS(ptr); ptr++; val_l = EXTRACT_16BITS(ptr); ptr++; ND_PRINT((ndo, "FrameErr=%u ", (val_h<<16) + val_l)); val_h = EXTRACT_16BITS(ptr); ptr++; val_l = EXTRACT_16BITS(ptr); ptr++; ND_PRINT((ndo, "HardOver=%u ", (val_h<<16) + val_l)); val_h = EXTRACT_16BITS(ptr); ptr++; val_l = EXTRACT_16BITS(ptr); ptr++; ND_PRINT((ndo, "BufOver=%u ", (val_h<<16) + val_l)); val_h = EXTRACT_16BITS(ptr); ptr++; val_l = EXTRACT_16BITS(ptr); ptr++; ND_PRINT((ndo, "Timeout=%u ", (val_h<<16) + val_l)); val_h = EXTRACT_16BITS(ptr); ptr++; val_l = EXTRACT_16BITS(ptr); ptr++; ND_PRINT((ndo, "AlignErr=%u ", (val_h<<16) + val_l)); } static void l2tp_accm_print(netdissect_options *ndo, const u_char *dat) { const uint16_t *ptr = (const uint16_t *)dat; uint16_t val_h, val_l; ptr++; /* skip "Reserved" */ val_h = EXTRACT_16BITS(ptr); ptr++; val_l = EXTRACT_16BITS(ptr); ptr++; ND_PRINT((ndo, "send=%08x ", (val_h<<16) + val_l)); val_h = EXTRACT_16BITS(ptr); ptr++; val_l = EXTRACT_16BITS(ptr); ptr++; ND_PRINT((ndo, "recv=%08x ", (val_h<<16) + val_l)); } static void l2tp_ppp_discon_cc_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint16_t *ptr = (const uint16_t *)dat; ND_PRINT((ndo, "%04x, ", EXTRACT_16BITS(ptr))); ptr++; /* Disconnect Code */ ND_PRINT((ndo, "%04x ", EXTRACT_16BITS(ptr))); ptr++; /* Control Protocol Number */ ND_PRINT((ndo, "%s", tok2str(l2tp_cc_direction2str, "Direction-#%u", *((const u_char *)ptr++)))); if (length > 5) { ND_PRINT((ndo, " ")); print_string(ndo, (const u_char *)ptr, length-5); } } static void l2tp_avp_print(netdissect_options *ndo, const u_char *dat, int length) { u_int len; const uint16_t *ptr = (const uint16_t *)dat; uint16_t attr_type; int hidden = FALSE; if (length <= 0) { return; } ND_PRINT((ndo, " ")); ND_TCHECK(*ptr); /* Flags & Length */ len = EXTRACT_16BITS(ptr) & L2TP_AVP_HDR_LEN_MASK; /* If it is not long enough to contain the header, we'll give up. */ if (len < 6) goto trunc; /* If it goes past the end of the remaining length of the packet, we'll give up. */ if (len > (u_int)length) goto trunc; /* If it goes past the end of the remaining length of the captured data, we'll give up. */ ND_TCHECK2(*ptr, len); /* After this point, no need to worry about truncation */ if (EXTRACT_16BITS(ptr) & L2TP_AVP_HDR_FLAG_MANDATORY) { ND_PRINT((ndo, "*")); } if (EXTRACT_16BITS(ptr) & L2TP_AVP_HDR_FLAG_HIDDEN) { hidden = TRUE; ND_PRINT((ndo, "?")); } ptr++; if (EXTRACT_16BITS(ptr)) { /* Vendor Specific Attribute */ ND_PRINT((ndo, "VENDOR%04x:", EXTRACT_16BITS(ptr))); ptr++; ND_PRINT((ndo, "ATTR%04x", EXTRACT_16BITS(ptr))); ptr++; ND_PRINT((ndo, "(")); print_octets(ndo, (const u_char *)ptr, len-6); ND_PRINT((ndo, ")")); } else { /* IETF-defined Attributes */ ptr++; attr_type = EXTRACT_16BITS(ptr); ptr++; ND_PRINT((ndo, "%s", tok2str(l2tp_avp2str, "AVP-#%u", attr_type))); ND_PRINT((ndo, "(")); if (hidden) { ND_PRINT((ndo, "???")); } else { switch (attr_type) { case L2TP_AVP_MSGTYPE: l2tp_msgtype_print(ndo, (const u_char *)ptr); break; case L2TP_AVP_RESULT_CODE: l2tp_result_code_print(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_PROTO_VER: l2tp_proto_ver_print(ndo, ptr); break; case L2TP_AVP_FRAMING_CAP: l2tp_framing_cap_print(ndo, (const u_char *)ptr); break; case L2TP_AVP_BEARER_CAP: l2tp_bearer_cap_print(ndo, (const u_char *)ptr); break; case L2TP_AVP_TIE_BREAKER: print_octets(ndo, (const u_char *)ptr, 8); break; case L2TP_AVP_FIRM_VER: case L2TP_AVP_ASSND_TUN_ID: case L2TP_AVP_RECV_WIN_SIZE: case L2TP_AVP_ASSND_SESS_ID: print_16bits_val(ndo, ptr); break; case L2TP_AVP_HOST_NAME: case L2TP_AVP_VENDOR_NAME: case L2TP_AVP_CALLING_NUMBER: case L2TP_AVP_CALLED_NUMBER: case L2TP_AVP_SUB_ADDRESS: case L2TP_AVP_PROXY_AUTH_NAME: case L2TP_AVP_PRIVATE_GRP_ID: print_string(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_CHALLENGE: case L2TP_AVP_INI_RECV_LCP: case L2TP_AVP_LAST_SENT_LCP: case L2TP_AVP_LAST_RECV_LCP: case L2TP_AVP_PROXY_AUTH_CHAL: case L2TP_AVP_PROXY_AUTH_RESP: case L2TP_AVP_RANDOM_VECTOR: print_octets(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_Q931_CC: l2tp_q931_cc_print(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_CHALLENGE_RESP: print_octets(ndo, (const u_char *)ptr, 16); break; case L2TP_AVP_CALL_SER_NUM: case L2TP_AVP_MINIMUM_BPS: case L2TP_AVP_MAXIMUM_BPS: case L2TP_AVP_TX_CONN_SPEED: case L2TP_AVP_PHY_CHANNEL_ID: case L2TP_AVP_RX_CONN_SPEED: print_32bits_val(ndo, (const uint32_t *)ptr); break; case L2TP_AVP_BEARER_TYPE: l2tp_bearer_type_print(ndo, (const u_char *)ptr); break; case L2TP_AVP_FRAMING_TYPE: l2tp_framing_type_print(ndo, (const u_char *)ptr); break; case L2TP_AVP_PACKET_PROC_DELAY: l2tp_packet_proc_delay_print(ndo); break; case L2TP_AVP_PROXY_AUTH_TYPE: l2tp_proxy_auth_type_print(ndo, (const u_char *)ptr); break; case L2TP_AVP_PROXY_AUTH_ID: l2tp_proxy_auth_id_print(ndo, (const u_char *)ptr); break; case L2TP_AVP_CALL_ERRORS: l2tp_call_errors_print(ndo, (const u_char *)ptr); break; case L2TP_AVP_ACCM: l2tp_accm_print(ndo, (const u_char *)ptr); break; case L2TP_AVP_SEQ_REQUIRED: break; /* No Attribute Value */ case L2TP_AVP_PPP_DISCON_CC: l2tp_ppp_discon_cc_print(ndo, (const u_char *)ptr, len-6); break; default: break; } } ND_PRINT((ndo, ")")); } l2tp_avp_print(ndo, dat+len, length-len); return; trunc: ND_PRINT((ndo, "|...")); } void l2tp_print(netdissect_options *ndo, const u_char *dat, u_int length) { const u_char *ptr = dat; u_int cnt = 0; /* total octets consumed */ uint16_t pad; int flag_t, flag_l, flag_s, flag_o; uint16_t l2tp_len; flag_t = flag_l = flag_s = flag_o = FALSE; ND_TCHECK2(*ptr, 2); /* Flags & Version */ if ((EXTRACT_16BITS(ptr) & L2TP_VERSION_MASK) == L2TP_VERSION_L2TP) { ND_PRINT((ndo, " l2tp:")); } else if ((EXTRACT_16BITS(ptr) & L2TP_VERSION_MASK) == L2TP_VERSION_L2F) { ND_PRINT((ndo, " l2f:")); return; /* nothing to do */ } else { ND_PRINT((ndo, " Unknown Version, neither L2F(1) nor L2TP(2)")); return; /* nothing we can do */ } ND_PRINT((ndo, "[")); if (EXTRACT_16BITS(ptr) & L2TP_FLAG_TYPE) { flag_t = TRUE; ND_PRINT((ndo, "T")); } if (EXTRACT_16BITS(ptr) & L2TP_FLAG_LENGTH) { flag_l = TRUE; ND_PRINT((ndo, "L")); } if (EXTRACT_16BITS(ptr) & L2TP_FLAG_SEQUENCE) { flag_s = TRUE; ND_PRINT((ndo, "S")); } if (EXTRACT_16BITS(ptr) & L2TP_FLAG_OFFSET) { flag_o = TRUE; ND_PRINT((ndo, "O")); } if (EXTRACT_16BITS(ptr) & L2TP_FLAG_PRIORITY) ND_PRINT((ndo, "P")); ND_PRINT((ndo, "]")); ptr += 2; cnt += 2; if (flag_l) { ND_TCHECK2(*ptr, 2); /* Length */ l2tp_len = EXTRACT_16BITS(ptr); ptr += 2; cnt += 2; } else { l2tp_len = 0; } ND_TCHECK2(*ptr, 2); /* Tunnel ID */ ND_PRINT((ndo, "(%u/", EXTRACT_16BITS(ptr))); ptr += 2; cnt += 2; ND_TCHECK2(*ptr, 2); /* Session ID */ ND_PRINT((ndo, "%u)", EXTRACT_16BITS(ptr))); ptr += 2; cnt += 2; if (flag_s) { ND_TCHECK2(*ptr, 2); /* Ns */ ND_PRINT((ndo, "Ns=%u,", EXTRACT_16BITS(ptr))); ptr += 2; cnt += 2; ND_TCHECK2(*ptr, 2); /* Nr */ ND_PRINT((ndo, "Nr=%u", EXTRACT_16BITS(ptr))); ptr += 2; cnt += 2; } if (flag_o) { ND_TCHECK2(*ptr, 2); /* Offset Size */ pad = EXTRACT_16BITS(ptr); ptr += (2 + pad); cnt += (2 + pad); } if (flag_l) { if (length < l2tp_len) { ND_PRINT((ndo, " Length %u larger than packet", l2tp_len)); return; } length = l2tp_len; } if (length < cnt) { ND_PRINT((ndo, " Length %u smaller than header length", length)); return; } if (flag_t) { if (!flag_l) { ND_PRINT((ndo, " No length")); return; } if (length - cnt == 0) { ND_PRINT((ndo, " ZLB")); } else { l2tp_avp_print(ndo, ptr, length - cnt); } } else { ND_PRINT((ndo, " {")); ppp_print(ndo, ptr, length - cnt); ND_PRINT((ndo, "}")); } return; trunc: ND_PRINT((ndo, "%s", tstr)); }
/* * Copyright (c) 1991, 1993, 1994, 1995, 1996, 1997 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * L2TP support contributed by Motonori Shindo (mshindo@mshindo.net) */ /* \summary: Layer Two Tunneling Protocol (L2TP) printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include "netdissect.h" #include "extract.h" #define L2TP_FLAG_TYPE 0x8000 /* Type (0=Data, 1=Control) */ #define L2TP_FLAG_LENGTH 0x4000 /* Length */ #define L2TP_FLAG_SEQUENCE 0x0800 /* Sequence */ #define L2TP_FLAG_OFFSET 0x0200 /* Offset */ #define L2TP_FLAG_PRIORITY 0x0100 /* Priority */ #define L2TP_VERSION_MASK 0x000f /* Version Mask */ #define L2TP_VERSION_L2F 0x0001 /* L2F */ #define L2TP_VERSION_L2TP 0x0002 /* L2TP */ #define L2TP_AVP_HDR_FLAG_MANDATORY 0x8000 /* Mandatory Flag */ #define L2TP_AVP_HDR_FLAG_HIDDEN 0x4000 /* Hidden Flag */ #define L2TP_AVP_HDR_LEN_MASK 0x03ff /* Length Mask */ #define L2TP_FRAMING_CAP_SYNC_MASK 0x00000001 /* Synchronous */ #define L2TP_FRAMING_CAP_ASYNC_MASK 0x00000002 /* Asynchronous */ #define L2TP_FRAMING_TYPE_SYNC_MASK 0x00000001 /* Synchronous */ #define L2TP_FRAMING_TYPE_ASYNC_MASK 0x00000002 /* Asynchronous */ #define L2TP_BEARER_CAP_DIGITAL_MASK 0x00000001 /* Digital */ #define L2TP_BEARER_CAP_ANALOG_MASK 0x00000002 /* Analog */ #define L2TP_BEARER_TYPE_DIGITAL_MASK 0x00000001 /* Digital */ #define L2TP_BEARER_TYPE_ANALOG_MASK 0x00000002 /* Analog */ /* Authen Type */ #define L2TP_AUTHEN_TYPE_RESERVED 0x0000 /* Reserved */ #define L2TP_AUTHEN_TYPE_TEXTUAL 0x0001 /* Textual username/password exchange */ #define L2TP_AUTHEN_TYPE_CHAP 0x0002 /* PPP CHAP */ #define L2TP_AUTHEN_TYPE_PAP 0x0003 /* PPP PAP */ #define L2TP_AUTHEN_TYPE_NO_AUTH 0x0004 /* No Authentication */ #define L2TP_AUTHEN_TYPE_MSCHAPv1 0x0005 /* MSCHAPv1 */ #define L2TP_PROXY_AUTH_ID_MASK 0x00ff static const char tstr[] = " [|l2tp]"; #define L2TP_MSGTYPE_SCCRQ 1 /* Start-Control-Connection-Request */ #define L2TP_MSGTYPE_SCCRP 2 /* Start-Control-Connection-Reply */ #define L2TP_MSGTYPE_SCCCN 3 /* Start-Control-Connection-Connected */ #define L2TP_MSGTYPE_STOPCCN 4 /* Stop-Control-Connection-Notification */ #define L2TP_MSGTYPE_HELLO 6 /* Hello */ #define L2TP_MSGTYPE_OCRQ 7 /* Outgoing-Call-Request */ #define L2TP_MSGTYPE_OCRP 8 /* Outgoing-Call-Reply */ #define L2TP_MSGTYPE_OCCN 9 /* Outgoing-Call-Connected */ #define L2TP_MSGTYPE_ICRQ 10 /* Incoming-Call-Request */ #define L2TP_MSGTYPE_ICRP 11 /* Incoming-Call-Reply */ #define L2TP_MSGTYPE_ICCN 12 /* Incoming-Call-Connected */ #define L2TP_MSGTYPE_CDN 14 /* Call-Disconnect-Notify */ #define L2TP_MSGTYPE_WEN 15 /* WAN-Error-Notify */ #define L2TP_MSGTYPE_SLI 16 /* Set-Link-Info */ static const struct tok l2tp_msgtype2str[] = { { L2TP_MSGTYPE_SCCRQ, "SCCRQ" }, { L2TP_MSGTYPE_SCCRP, "SCCRP" }, { L2TP_MSGTYPE_SCCCN, "SCCCN" }, { L2TP_MSGTYPE_STOPCCN, "StopCCN" }, { L2TP_MSGTYPE_HELLO, "HELLO" }, { L2TP_MSGTYPE_OCRQ, "OCRQ" }, { L2TP_MSGTYPE_OCRP, "OCRP" }, { L2TP_MSGTYPE_OCCN, "OCCN" }, { L2TP_MSGTYPE_ICRQ, "ICRQ" }, { L2TP_MSGTYPE_ICRP, "ICRP" }, { L2TP_MSGTYPE_ICCN, "ICCN" }, { L2TP_MSGTYPE_CDN, "CDN" }, { L2TP_MSGTYPE_WEN, "WEN" }, { L2TP_MSGTYPE_SLI, "SLI" }, { 0, NULL } }; #define L2TP_AVP_MSGTYPE 0 /* Message Type */ #define L2TP_AVP_RESULT_CODE 1 /* Result Code */ #define L2TP_AVP_PROTO_VER 2 /* Protocol Version */ #define L2TP_AVP_FRAMING_CAP 3 /* Framing Capabilities */ #define L2TP_AVP_BEARER_CAP 4 /* Bearer Capabilities */ #define L2TP_AVP_TIE_BREAKER 5 /* Tie Breaker */ #define L2TP_AVP_FIRM_VER 6 /* Firmware Revision */ #define L2TP_AVP_HOST_NAME 7 /* Host Name */ #define L2TP_AVP_VENDOR_NAME 8 /* Vendor Name */ #define L2TP_AVP_ASSND_TUN_ID 9 /* Assigned Tunnel ID */ #define L2TP_AVP_RECV_WIN_SIZE 10 /* Receive Window Size */ #define L2TP_AVP_CHALLENGE 11 /* Challenge */ #define L2TP_AVP_Q931_CC 12 /* Q.931 Cause Code */ #define L2TP_AVP_CHALLENGE_RESP 13 /* Challenge Response */ #define L2TP_AVP_ASSND_SESS_ID 14 /* Assigned Session ID */ #define L2TP_AVP_CALL_SER_NUM 15 /* Call Serial Number */ #define L2TP_AVP_MINIMUM_BPS 16 /* Minimum BPS */ #define L2TP_AVP_MAXIMUM_BPS 17 /* Maximum BPS */ #define L2TP_AVP_BEARER_TYPE 18 /* Bearer Type */ #define L2TP_AVP_FRAMING_TYPE 19 /* Framing Type */ #define L2TP_AVP_PACKET_PROC_DELAY 20 /* Packet Processing Delay (OBSOLETE) */ #define L2TP_AVP_CALLED_NUMBER 21 /* Called Number */ #define L2TP_AVP_CALLING_NUMBER 22 /* Calling Number */ #define L2TP_AVP_SUB_ADDRESS 23 /* Sub-Address */ #define L2TP_AVP_TX_CONN_SPEED 24 /* (Tx) Connect Speed */ #define L2TP_AVP_PHY_CHANNEL_ID 25 /* Physical Channel ID */ #define L2TP_AVP_INI_RECV_LCP 26 /* Initial Received LCP CONFREQ */ #define L2TP_AVP_LAST_SENT_LCP 27 /* Last Sent LCP CONFREQ */ #define L2TP_AVP_LAST_RECV_LCP 28 /* Last Received LCP CONFREQ */ #define L2TP_AVP_PROXY_AUTH_TYPE 29 /* Proxy Authen Type */ #define L2TP_AVP_PROXY_AUTH_NAME 30 /* Proxy Authen Name */ #define L2TP_AVP_PROXY_AUTH_CHAL 31 /* Proxy Authen Challenge */ #define L2TP_AVP_PROXY_AUTH_ID 32 /* Proxy Authen ID */ #define L2TP_AVP_PROXY_AUTH_RESP 33 /* Proxy Authen Response */ #define L2TP_AVP_CALL_ERRORS 34 /* Call Errors */ #define L2TP_AVP_ACCM 35 /* ACCM */ #define L2TP_AVP_RANDOM_VECTOR 36 /* Random Vector */ #define L2TP_AVP_PRIVATE_GRP_ID 37 /* Private Group ID */ #define L2TP_AVP_RX_CONN_SPEED 38 /* (Rx) Connect Speed */ #define L2TP_AVP_SEQ_REQUIRED 39 /* Sequencing Required */ #define L2TP_AVP_PPP_DISCON_CC 46 /* PPP Disconnect Cause Code */ static const struct tok l2tp_avp2str[] = { { L2TP_AVP_MSGTYPE, "MSGTYPE" }, { L2TP_AVP_RESULT_CODE, "RESULT_CODE" }, { L2TP_AVP_PROTO_VER, "PROTO_VER" }, { L2TP_AVP_FRAMING_CAP, "FRAMING_CAP" }, { L2TP_AVP_BEARER_CAP, "BEARER_CAP" }, { L2TP_AVP_TIE_BREAKER, "TIE_BREAKER" }, { L2TP_AVP_FIRM_VER, "FIRM_VER" }, { L2TP_AVP_HOST_NAME, "HOST_NAME" }, { L2TP_AVP_VENDOR_NAME, "VENDOR_NAME" }, { L2TP_AVP_ASSND_TUN_ID, "ASSND_TUN_ID" }, { L2TP_AVP_RECV_WIN_SIZE, "RECV_WIN_SIZE" }, { L2TP_AVP_CHALLENGE, "CHALLENGE" }, { L2TP_AVP_Q931_CC, "Q931_CC", }, { L2TP_AVP_CHALLENGE_RESP, "CHALLENGE_RESP" }, { L2TP_AVP_ASSND_SESS_ID, "ASSND_SESS_ID" }, { L2TP_AVP_CALL_SER_NUM, "CALL_SER_NUM" }, { L2TP_AVP_MINIMUM_BPS, "MINIMUM_BPS" }, { L2TP_AVP_MAXIMUM_BPS, "MAXIMUM_BPS" }, { L2TP_AVP_BEARER_TYPE, "BEARER_TYPE" }, { L2TP_AVP_FRAMING_TYPE, "FRAMING_TYPE" }, { L2TP_AVP_PACKET_PROC_DELAY, "PACKET_PROC_DELAY" }, { L2TP_AVP_CALLED_NUMBER, "CALLED_NUMBER" }, { L2TP_AVP_CALLING_NUMBER, "CALLING_NUMBER" }, { L2TP_AVP_SUB_ADDRESS, "SUB_ADDRESS" }, { L2TP_AVP_TX_CONN_SPEED, "TX_CONN_SPEED" }, { L2TP_AVP_PHY_CHANNEL_ID, "PHY_CHANNEL_ID" }, { L2TP_AVP_INI_RECV_LCP, "INI_RECV_LCP" }, { L2TP_AVP_LAST_SENT_LCP, "LAST_SENT_LCP" }, { L2TP_AVP_LAST_RECV_LCP, "LAST_RECV_LCP" }, { L2TP_AVP_PROXY_AUTH_TYPE, "PROXY_AUTH_TYPE" }, { L2TP_AVP_PROXY_AUTH_NAME, "PROXY_AUTH_NAME" }, { L2TP_AVP_PROXY_AUTH_CHAL, "PROXY_AUTH_CHAL" }, { L2TP_AVP_PROXY_AUTH_ID, "PROXY_AUTH_ID" }, { L2TP_AVP_PROXY_AUTH_RESP, "PROXY_AUTH_RESP" }, { L2TP_AVP_CALL_ERRORS, "CALL_ERRORS" }, { L2TP_AVP_ACCM, "ACCM" }, { L2TP_AVP_RANDOM_VECTOR, "RANDOM_VECTOR" }, { L2TP_AVP_PRIVATE_GRP_ID, "PRIVATE_GRP_ID" }, { L2TP_AVP_RX_CONN_SPEED, "RX_CONN_SPEED" }, { L2TP_AVP_SEQ_REQUIRED, "SEQ_REQUIRED" }, { L2TP_AVP_PPP_DISCON_CC, "PPP_DISCON_CC" }, { 0, NULL } }; static const struct tok l2tp_authentype2str[] = { { L2TP_AUTHEN_TYPE_RESERVED, "Reserved" }, { L2TP_AUTHEN_TYPE_TEXTUAL, "Textual" }, { L2TP_AUTHEN_TYPE_CHAP, "CHAP" }, { L2TP_AUTHEN_TYPE_PAP, "PAP" }, { L2TP_AUTHEN_TYPE_NO_AUTH, "No Auth" }, { L2TP_AUTHEN_TYPE_MSCHAPv1, "MS-CHAPv1" }, { 0, NULL } }; #define L2TP_PPP_DISCON_CC_DIRECTION_GLOBAL 0 #define L2TP_PPP_DISCON_CC_DIRECTION_AT_PEER 1 #define L2TP_PPP_DISCON_CC_DIRECTION_AT_LOCAL 2 static const struct tok l2tp_cc_direction2str[] = { { L2TP_PPP_DISCON_CC_DIRECTION_GLOBAL, "global error" }, { L2TP_PPP_DISCON_CC_DIRECTION_AT_PEER, "at peer" }, { L2TP_PPP_DISCON_CC_DIRECTION_AT_LOCAL,"at local" }, { 0, NULL } }; #if 0 static char *l2tp_result_code_StopCCN[] = { "Reserved", "General request to clear control connection", "General error--Error Code indicates the problem", "Control channel already exists", "Requester is not authorized to establish a control channel", "The protocol version of the requester is not supported", "Requester is being shut down", "Finite State Machine error" #define L2TP_MAX_RESULT_CODE_STOPCC_INDEX 8 }; #endif #if 0 static char *l2tp_result_code_CDN[] = { "Reserved", "Call disconnected due to loss of carrier", "Call disconnected for the reason indicated in error code", "Call disconnected for administrative reasons", "Call failed due to lack of appropriate facilities being " \ "available (temporary condition)", "Call failed due to lack of appropriate facilities being " \ "available (permanent condition)", "Invalid destination", "Call failed due to no carrier detected", "Call failed due to detection of a busy signal", "Call failed due to lack of a dial tone", "Call was not established within time allotted by LAC", "Call was connected but no appropriate framing was detected" #define L2TP_MAX_RESULT_CODE_CDN_INDEX 12 }; #endif #if 0 static char *l2tp_error_code_general[] = { "No general error", "No control connection exists yet for this LAC-LNS pair", "Length is wrong", "One of the field values was out of range or " \ "reserved field was non-zero" "Insufficient resources to handle this operation now", "The Session ID is invalid in this context", "A generic vendor-specific error occurred in the LAC", "Try another" #define L2TP_MAX_ERROR_CODE_GENERAL_INDEX 8 }; #endif /******************************/ /* generic print out routines */ /******************************/ static void print_string(netdissect_options *ndo, const u_char *dat, u_int length) { u_int i; for (i=0; i<length; i++) { ND_PRINT((ndo, "%c", *dat++)); } } static void print_octets(netdissect_options *ndo, const u_char *dat, u_int length) { u_int i; for (i=0; i<length; i++) { ND_PRINT((ndo, "%02x", *dat++)); } } static void print_16bits_val(netdissect_options *ndo, const uint16_t *dat) { ND_PRINT((ndo, "%u", EXTRACT_16BITS(dat))); } static void print_32bits_val(netdissect_options *ndo, const uint32_t *dat) { ND_PRINT((ndo, "%lu", (u_long)EXTRACT_32BITS(dat))); } /***********************************/ /* AVP-specific print out routines */ /***********************************/ static void l2tp_msgtype_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint16_t *ptr = (const uint16_t *)dat; if (length < 2) { ND_PRINT((ndo, "AVP too short")); return; } ND_PRINT((ndo, "%s", tok2str(l2tp_msgtype2str, "MSGTYPE-#%u", EXTRACT_16BITS(ptr)))); } static void l2tp_result_code_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint16_t *ptr = (const uint16_t *)dat; /* Result Code */ if (length < 2) { ND_PRINT((ndo, "AVP too short")); return; } ND_PRINT((ndo, "%u", EXTRACT_16BITS(ptr))); ptr++; length -= 2; /* Error Code (opt) */ if (length == 0) return; if (length < 2) { ND_PRINT((ndo, " AVP too short")); return; } ND_PRINT((ndo, "/%u", EXTRACT_16BITS(ptr))); ptr++; length -= 2; /* Error Message (opt) */ if (length == 0) return; ND_PRINT((ndo, " ")); print_string(ndo, (const u_char *)ptr, length); } static void l2tp_proto_ver_print(netdissect_options *ndo, const uint16_t *dat, u_int length) { if (length < 2) { ND_PRINT((ndo, "AVP too short")); return; } ND_PRINT((ndo, "%u.%u", (EXTRACT_16BITS(dat) >> 8), (EXTRACT_16BITS(dat) & 0xff))); } static void l2tp_framing_cap_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint32_t *ptr = (const uint32_t *)dat; if (length < 4) { ND_PRINT((ndo, "AVP too short")); return; } if (EXTRACT_32BITS(ptr) & L2TP_FRAMING_CAP_ASYNC_MASK) { ND_PRINT((ndo, "A")); } if (EXTRACT_32BITS(ptr) & L2TP_FRAMING_CAP_SYNC_MASK) { ND_PRINT((ndo, "S")); } } static void l2tp_bearer_cap_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint32_t *ptr = (const uint32_t *)dat; if (length < 4) { ND_PRINT((ndo, "AVP too short")); return; } if (EXTRACT_32BITS(ptr) & L2TP_BEARER_CAP_ANALOG_MASK) { ND_PRINT((ndo, "A")); } if (EXTRACT_32BITS(ptr) & L2TP_BEARER_CAP_DIGITAL_MASK) { ND_PRINT((ndo, "D")); } } static void l2tp_q931_cc_print(netdissect_options *ndo, const u_char *dat, u_int length) { if (length < 3) { ND_PRINT((ndo, "AVP too short")); return; } print_16bits_val(ndo, (const uint16_t *)dat); ND_PRINT((ndo, ", %02x", dat[2])); dat += 3; length -= 3; if (length != 0) { ND_PRINT((ndo, " ")); print_string(ndo, dat, length); } } static void l2tp_bearer_type_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint32_t *ptr = (const uint32_t *)dat; if (length < 4) { ND_PRINT((ndo, "AVP too short")); return; } if (EXTRACT_32BITS(ptr) & L2TP_BEARER_TYPE_ANALOG_MASK) { ND_PRINT((ndo, "A")); } if (EXTRACT_32BITS(ptr) & L2TP_BEARER_TYPE_DIGITAL_MASK) { ND_PRINT((ndo, "D")); } } static void l2tp_framing_type_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint32_t *ptr = (const uint32_t *)dat; if (length < 4) { ND_PRINT((ndo, "AVP too short")); return; } if (EXTRACT_32BITS(ptr) & L2TP_FRAMING_TYPE_ASYNC_MASK) { ND_PRINT((ndo, "A")); } if (EXTRACT_32BITS(ptr) & L2TP_FRAMING_TYPE_SYNC_MASK) { ND_PRINT((ndo, "S")); } } static void l2tp_packet_proc_delay_print(netdissect_options *ndo) { ND_PRINT((ndo, "obsolete")); } static void l2tp_proxy_auth_type_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint16_t *ptr = (const uint16_t *)dat; if (length < 2) { ND_PRINT((ndo, "AVP too short")); return; } ND_PRINT((ndo, "%s", tok2str(l2tp_authentype2str, "AuthType-#%u", EXTRACT_16BITS(ptr)))); } static void l2tp_proxy_auth_id_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint16_t *ptr = (const uint16_t *)dat; if (length < 2) { ND_PRINT((ndo, "AVP too short")); return; } ND_PRINT((ndo, "%u", EXTRACT_16BITS(ptr) & L2TP_PROXY_AUTH_ID_MASK)); } static void l2tp_call_errors_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint16_t *ptr = (const uint16_t *)dat; uint16_t val_h, val_l; if (length < 2) { ND_PRINT((ndo, "AVP too short")); return; } ptr++; /* skip "Reserved" */ length -= 2; if (length < 4) { ND_PRINT((ndo, "AVP too short")); return; } val_h = EXTRACT_16BITS(ptr); ptr++; length -= 2; val_l = EXTRACT_16BITS(ptr); ptr++; length -= 2; ND_PRINT((ndo, "CRCErr=%u ", (val_h<<16) + val_l)); if (length < 4) { ND_PRINT((ndo, "AVP too short")); return; } val_h = EXTRACT_16BITS(ptr); ptr++; length -= 2; val_l = EXTRACT_16BITS(ptr); ptr++; length -= 2; ND_PRINT((ndo, "FrameErr=%u ", (val_h<<16) + val_l)); if (length < 4) { ND_PRINT((ndo, "AVP too short")); return; } val_h = EXTRACT_16BITS(ptr); ptr++; length -= 2; val_l = EXTRACT_16BITS(ptr); ptr++; length -= 2; ND_PRINT((ndo, "HardOver=%u ", (val_h<<16) + val_l)); if (length < 4) { ND_PRINT((ndo, "AVP too short")); return; } val_h = EXTRACT_16BITS(ptr); ptr++; length -= 2; val_l = EXTRACT_16BITS(ptr); ptr++; length -= 2; ND_PRINT((ndo, "BufOver=%u ", (val_h<<16) + val_l)); if (length < 4) { ND_PRINT((ndo, "AVP too short")); return; } val_h = EXTRACT_16BITS(ptr); ptr++; length -= 2; val_l = EXTRACT_16BITS(ptr); ptr++; length -= 2; ND_PRINT((ndo, "Timeout=%u ", (val_h<<16) + val_l)); if (length < 4) { ND_PRINT((ndo, "AVP too short")); return; } val_h = EXTRACT_16BITS(ptr); ptr++; val_l = EXTRACT_16BITS(ptr); ptr++; ND_PRINT((ndo, "AlignErr=%u ", (val_h<<16) + val_l)); } static void l2tp_accm_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint16_t *ptr = (const uint16_t *)dat; uint16_t val_h, val_l; if (length < 2) { ND_PRINT((ndo, "AVP too short")); return; } ptr++; /* skip "Reserved" */ length -= 2; if (length < 4) { ND_PRINT((ndo, "AVP too short")); return; } val_h = EXTRACT_16BITS(ptr); ptr++; length -= 2; val_l = EXTRACT_16BITS(ptr); ptr++; length -= 2; ND_PRINT((ndo, "send=%08x ", (val_h<<16) + val_l)); if (length < 4) { ND_PRINT((ndo, "AVP too short")); return; } val_h = EXTRACT_16BITS(ptr); ptr++; val_l = EXTRACT_16BITS(ptr); ptr++; ND_PRINT((ndo, "recv=%08x ", (val_h<<16) + val_l)); } static void l2tp_ppp_discon_cc_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint16_t *ptr = (const uint16_t *)dat; if (length < 5) { ND_PRINT((ndo, "AVP too short")); return; } /* Disconnect Code */ ND_PRINT((ndo, "%04x, ", EXTRACT_16BITS(dat))); dat += 2; length -= 2; /* Control Protocol Number */ ND_PRINT((ndo, "%04x ", EXTRACT_16BITS(dat))); dat += 2; length -= 2; /* Direction */ ND_PRINT((ndo, "%s", tok2str(l2tp_cc_direction2str, "Direction-#%u", EXTRACT_8BITS(ptr)))); ptr++; length--; if (length != 0) { ND_PRINT((ndo, " ")); print_string(ndo, (const u_char *)ptr, length); } } static void l2tp_avp_print(netdissect_options *ndo, const u_char *dat, int length) { u_int len; const uint16_t *ptr = (const uint16_t *)dat; uint16_t attr_type; int hidden = FALSE; if (length <= 0) { return; } ND_PRINT((ndo, " ")); ND_TCHECK(*ptr); /* Flags & Length */ len = EXTRACT_16BITS(ptr) & L2TP_AVP_HDR_LEN_MASK; /* If it is not long enough to contain the header, we'll give up. */ if (len < 6) goto trunc; /* If it goes past the end of the remaining length of the packet, we'll give up. */ if (len > (u_int)length) goto trunc; /* If it goes past the end of the remaining length of the captured data, we'll give up. */ ND_TCHECK2(*ptr, len); /* * After this point, we don't need to check whether we go past * the length of the captured data; however, we *do* need to * check whether we go past the end of the AVP. */ if (EXTRACT_16BITS(ptr) & L2TP_AVP_HDR_FLAG_MANDATORY) { ND_PRINT((ndo, "*")); } if (EXTRACT_16BITS(ptr) & L2TP_AVP_HDR_FLAG_HIDDEN) { hidden = TRUE; ND_PRINT((ndo, "?")); } ptr++; if (EXTRACT_16BITS(ptr)) { /* Vendor Specific Attribute */ ND_PRINT((ndo, "VENDOR%04x:", EXTRACT_16BITS(ptr))); ptr++; ND_PRINT((ndo, "ATTR%04x", EXTRACT_16BITS(ptr))); ptr++; ND_PRINT((ndo, "(")); print_octets(ndo, (const u_char *)ptr, len-6); ND_PRINT((ndo, ")")); } else { /* IETF-defined Attributes */ ptr++; attr_type = EXTRACT_16BITS(ptr); ptr++; ND_PRINT((ndo, "%s", tok2str(l2tp_avp2str, "AVP-#%u", attr_type))); ND_PRINT((ndo, "(")); if (hidden) { ND_PRINT((ndo, "???")); } else { switch (attr_type) { case L2TP_AVP_MSGTYPE: l2tp_msgtype_print(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_RESULT_CODE: l2tp_result_code_print(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_PROTO_VER: l2tp_proto_ver_print(ndo, ptr, len-6); break; case L2TP_AVP_FRAMING_CAP: l2tp_framing_cap_print(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_BEARER_CAP: l2tp_bearer_cap_print(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_TIE_BREAKER: if (len-6 < 8) { ND_PRINT((ndo, "AVP too short")); break; } print_octets(ndo, (const u_char *)ptr, 8); break; case L2TP_AVP_FIRM_VER: case L2TP_AVP_ASSND_TUN_ID: case L2TP_AVP_RECV_WIN_SIZE: case L2TP_AVP_ASSND_SESS_ID: if (len-6 < 2) { ND_PRINT((ndo, "AVP too short")); break; } print_16bits_val(ndo, ptr); break; case L2TP_AVP_HOST_NAME: case L2TP_AVP_VENDOR_NAME: case L2TP_AVP_CALLING_NUMBER: case L2TP_AVP_CALLED_NUMBER: case L2TP_AVP_SUB_ADDRESS: case L2TP_AVP_PROXY_AUTH_NAME: case L2TP_AVP_PRIVATE_GRP_ID: print_string(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_CHALLENGE: case L2TP_AVP_INI_RECV_LCP: case L2TP_AVP_LAST_SENT_LCP: case L2TP_AVP_LAST_RECV_LCP: case L2TP_AVP_PROXY_AUTH_CHAL: case L2TP_AVP_PROXY_AUTH_RESP: case L2TP_AVP_RANDOM_VECTOR: print_octets(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_Q931_CC: l2tp_q931_cc_print(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_CHALLENGE_RESP: if (len-6 < 16) { ND_PRINT((ndo, "AVP too short")); break; } print_octets(ndo, (const u_char *)ptr, 16); break; case L2TP_AVP_CALL_SER_NUM: case L2TP_AVP_MINIMUM_BPS: case L2TP_AVP_MAXIMUM_BPS: case L2TP_AVP_TX_CONN_SPEED: case L2TP_AVP_PHY_CHANNEL_ID: case L2TP_AVP_RX_CONN_SPEED: if (len-6 < 4) { ND_PRINT((ndo, "AVP too short")); break; } print_32bits_val(ndo, (const uint32_t *)ptr); break; case L2TP_AVP_BEARER_TYPE: l2tp_bearer_type_print(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_FRAMING_TYPE: l2tp_framing_type_print(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_PACKET_PROC_DELAY: l2tp_packet_proc_delay_print(ndo); break; case L2TP_AVP_PROXY_AUTH_TYPE: l2tp_proxy_auth_type_print(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_PROXY_AUTH_ID: l2tp_proxy_auth_id_print(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_CALL_ERRORS: l2tp_call_errors_print(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_ACCM: l2tp_accm_print(ndo, (const u_char *)ptr, len-6); break; case L2TP_AVP_SEQ_REQUIRED: break; /* No Attribute Value */ case L2TP_AVP_PPP_DISCON_CC: l2tp_ppp_discon_cc_print(ndo, (const u_char *)ptr, len-6); break; default: break; } } ND_PRINT((ndo, ")")); } l2tp_avp_print(ndo, dat+len, length-len); return; trunc: ND_PRINT((ndo, "|...")); } void l2tp_print(netdissect_options *ndo, const u_char *dat, u_int length) { const u_char *ptr = dat; u_int cnt = 0; /* total octets consumed */ uint16_t pad; int flag_t, flag_l, flag_s, flag_o; uint16_t l2tp_len; flag_t = flag_l = flag_s = flag_o = FALSE; ND_TCHECK2(*ptr, 2); /* Flags & Version */ if ((EXTRACT_16BITS(ptr) & L2TP_VERSION_MASK) == L2TP_VERSION_L2TP) { ND_PRINT((ndo, " l2tp:")); } else if ((EXTRACT_16BITS(ptr) & L2TP_VERSION_MASK) == L2TP_VERSION_L2F) { ND_PRINT((ndo, " l2f:")); return; /* nothing to do */ } else { ND_PRINT((ndo, " Unknown Version, neither L2F(1) nor L2TP(2)")); return; /* nothing we can do */ } ND_PRINT((ndo, "[")); if (EXTRACT_16BITS(ptr) & L2TP_FLAG_TYPE) { flag_t = TRUE; ND_PRINT((ndo, "T")); } if (EXTRACT_16BITS(ptr) & L2TP_FLAG_LENGTH) { flag_l = TRUE; ND_PRINT((ndo, "L")); } if (EXTRACT_16BITS(ptr) & L2TP_FLAG_SEQUENCE) { flag_s = TRUE; ND_PRINT((ndo, "S")); } if (EXTRACT_16BITS(ptr) & L2TP_FLAG_OFFSET) { flag_o = TRUE; ND_PRINT((ndo, "O")); } if (EXTRACT_16BITS(ptr) & L2TP_FLAG_PRIORITY) ND_PRINT((ndo, "P")); ND_PRINT((ndo, "]")); ptr += 2; cnt += 2; if (flag_l) { ND_TCHECK2(*ptr, 2); /* Length */ l2tp_len = EXTRACT_16BITS(ptr); ptr += 2; cnt += 2; } else { l2tp_len = 0; } ND_TCHECK2(*ptr, 2); /* Tunnel ID */ ND_PRINT((ndo, "(%u/", EXTRACT_16BITS(ptr))); ptr += 2; cnt += 2; ND_TCHECK2(*ptr, 2); /* Session ID */ ND_PRINT((ndo, "%u)", EXTRACT_16BITS(ptr))); ptr += 2; cnt += 2; if (flag_s) { ND_TCHECK2(*ptr, 2); /* Ns */ ND_PRINT((ndo, "Ns=%u,", EXTRACT_16BITS(ptr))); ptr += 2; cnt += 2; ND_TCHECK2(*ptr, 2); /* Nr */ ND_PRINT((ndo, "Nr=%u", EXTRACT_16BITS(ptr))); ptr += 2; cnt += 2; } if (flag_o) { ND_TCHECK2(*ptr, 2); /* Offset Size */ pad = EXTRACT_16BITS(ptr); ptr += (2 + pad); cnt += (2 + pad); } if (flag_l) { if (length < l2tp_len) { ND_PRINT((ndo, " Length %u larger than packet", l2tp_len)); return; } length = l2tp_len; } if (length < cnt) { ND_PRINT((ndo, " Length %u smaller than header length", length)); return; } if (flag_t) { if (!flag_l) { ND_PRINT((ndo, " No length")); return; } if (length - cnt == 0) { ND_PRINT((ndo, " ZLB")); } else { l2tp_avp_print(ndo, ptr, length - cnt); } } else { ND_PRINT((ndo, " {")); ppp_print(ndo, ptr, length - cnt); ND_PRINT((ndo, "}")); } return; trunc: ND_PRINT((ndo, "%s", tstr)); }
l2tp_framing_type_print(netdissect_options *ndo, const u_char *dat) { const uint32_t *ptr = (const uint32_t *)dat; if (EXTRACT_32BITS(ptr) & L2TP_FRAMING_TYPE_ASYNC_MASK) { ND_PRINT((ndo, "A")); } if (EXTRACT_32BITS(ptr) & L2TP_FRAMING_TYPE_SYNC_MASK) { ND_PRINT((ndo, "S")); } }
l2tp_framing_type_print(netdissect_options *ndo, const u_char *dat, u_int length) { const uint32_t *ptr = (const uint32_t *)dat; if (length < 4) { ND_PRINT((ndo, "AVP too short")); return; } if (EXTRACT_32BITS(ptr) & L2TP_FRAMING_TYPE_ASYNC_MASK) { ND_PRINT((ndo, "A")); } if (EXTRACT_32BITS(ptr) & L2TP_FRAMING_TYPE_SYNC_MASK) { ND_PRINT((ndo, "S")); } }
{'added': [(300, 'l2tp_msgtype_print(netdissect_options *ndo, const u_char *dat, u_int length)'), (304, '\tif (length < 2) {'), (305, '\t\tND_PRINT((ndo, "AVP too short"));'), (306, '\t\treturn;'), (307, '\t}'), (317, '\t/* Result Code */'), (318, '\tif (length < 2) {'), (319, '\t\tND_PRINT((ndo, "AVP too short"));'), (320, '\t\treturn;'), (322, '\tND_PRINT((ndo, "%u", EXTRACT_16BITS(ptr)));'), (323, '\tptr++;'), (324, '\tlength -= 2;'), (325, ''), (326, '\t/* Error Code (opt) */'), (327, '\tif (length == 0)'), (328, '\t\treturn;'), (329, '\tif (length < 2) {'), (330, '\t\tND_PRINT((ndo, " AVP too short"));'), (331, '\t\treturn;'), (333, '\tND_PRINT((ndo, "/%u", EXTRACT_16BITS(ptr)));'), (334, '\tptr++;'), (335, '\tlength -= 2;'), (336, ''), (337, '\t/* Error Message (opt) */'), (338, '\tif (length == 0)'), (339, '\t\treturn;'), (340, '\tND_PRINT((ndo, " "));'), (341, '\tprint_string(ndo, (const u_char *)ptr, length);'), (345, 'l2tp_proto_ver_print(netdissect_options *ndo, const uint16_t *dat, u_int length)'), (347, '\tif (length < 2) {'), (348, '\t\tND_PRINT((ndo, "AVP too short"));'), (349, '\t\treturn;'), (350, '\t}'), (356, 'l2tp_framing_cap_print(netdissect_options *ndo, const u_char *dat, u_int length)'), (360, '\tif (length < 4) {'), (361, '\t\tND_PRINT((ndo, "AVP too short"));'), (362, '\t\treturn;'), (363, '\t}'), (373, 'l2tp_bearer_cap_print(netdissect_options *ndo, const u_char *dat, u_int length)'), (377, '\tif (length < 4) {'), (378, '\t\tND_PRINT((ndo, "AVP too short"));'), (379, '\t\treturn;'), (380, '\t}'), (392, '\tif (length < 3) {'), (393, '\t\tND_PRINT((ndo, "AVP too short"));'), (394, '\t\treturn;'), (395, '\t}'), (398, '\tdat += 3;'), (399, '\tlength -= 3;'), (400, '\tif (length != 0) {'), (402, '\t\tprint_string(ndo, dat, length);'), (407, 'l2tp_bearer_type_print(netdissect_options *ndo, const u_char *dat, u_int length)'), (411, '\tif (length < 4) {'), (412, '\t\tND_PRINT((ndo, "AVP too short"));'), (413, '\t\treturn;'), (414, '\t}'), (424, 'l2tp_framing_type_print(netdissect_options *ndo, const u_char *dat, u_int length)'), (428, '\tif (length < 4) {'), (429, '\t\tND_PRINT((ndo, "AVP too short"));'), (430, '\t\treturn;'), (431, '\t}'), (447, 'l2tp_proxy_auth_type_print(netdissect_options *ndo, const u_char *dat, u_int length)'), (451, '\tif (length < 2) {'), (452, '\t\tND_PRINT((ndo, "AVP too short"));'), (453, '\t\treturn;'), (454, '\t}'), (460, 'l2tp_proxy_auth_id_print(netdissect_options *ndo, const u_char *dat, u_int length)'), (464, '\tif (length < 2) {'), (465, '\t\tND_PRINT((ndo, "AVP too short"));'), (466, '\t\treturn;'), (467, '\t}'), (472, 'l2tp_call_errors_print(netdissect_options *ndo, const u_char *dat, u_int length)'), (477, '\tif (length < 2) {'), (478, '\t\tND_PRINT((ndo, "AVP too short"));'), (479, '\t\treturn;'), (480, '\t}'), (482, '\tlength -= 2;'), (484, '\tif (length < 4) {'), (485, '\t\tND_PRINT((ndo, "AVP too short"));'), (486, '\t\treturn;'), (487, '\t}'), (488, '\tval_h = EXTRACT_16BITS(ptr); ptr++; length -= 2;'), (489, '\tval_l = EXTRACT_16BITS(ptr); ptr++; length -= 2;'), (492, '\tif (length < 4) {'), (493, '\t\tND_PRINT((ndo, "AVP too short"));'), (494, '\t\treturn;'), (495, '\t}'), (496, '\tval_h = EXTRACT_16BITS(ptr); ptr++; length -= 2;'), (497, '\tval_l = EXTRACT_16BITS(ptr); ptr++; length -= 2;'), (500, '\tif (length < 4) {'), (501, '\t\tND_PRINT((ndo, "AVP too short"));'), (502, '\t\treturn;'), (503, '\t}'), (504, '\tval_h = EXTRACT_16BITS(ptr); ptr++; length -= 2;'), (505, '\tval_l = EXTRACT_16BITS(ptr); ptr++; length -= 2;'), (508, '\tif (length < 4) {'), (509, '\t\tND_PRINT((ndo, "AVP too short"));'), (510, '\t\treturn;'), (511, '\t}'), (512, '\tval_h = EXTRACT_16BITS(ptr); ptr++; length -= 2;'), (513, '\tval_l = EXTRACT_16BITS(ptr); ptr++; length -= 2;'), (516, '\tif (length < 4) {'), (517, '\t\tND_PRINT((ndo, "AVP too short"));'), (518, '\t\treturn;'), (519, '\t}'), (520, '\tval_h = EXTRACT_16BITS(ptr); ptr++; length -= 2;'), (521, '\tval_l = EXTRACT_16BITS(ptr); ptr++; length -= 2;'), (524, '\tif (length < 4) {'), (525, '\t\tND_PRINT((ndo, "AVP too short"));'), (526, '\t\treturn;'), (527, '\t}'), (534, 'l2tp_accm_print(netdissect_options *ndo, const u_char *dat, u_int length)'), (539, '\tif (length < 2) {'), (540, '\t\tND_PRINT((ndo, "AVP too short"));'), (541, '\t\treturn;'), (542, '\t}'), (544, '\tlength -= 2;'), (546, '\tif (length < 4) {'), (547, '\t\tND_PRINT((ndo, "AVP too short"));'), (548, '\t\treturn;'), (549, '\t}'), (550, '\tval_h = EXTRACT_16BITS(ptr); ptr++; length -= 2;'), (551, '\tval_l = EXTRACT_16BITS(ptr); ptr++; length -= 2;'), (554, '\tif (length < 4) {'), (555, '\t\tND_PRINT((ndo, "AVP too short"));'), (556, '\t\treturn;'), (557, '\t}'), (568, '\tif (length < 5) {'), (569, '\t\tND_PRINT((ndo, "AVP too short"));'), (570, '\t\treturn;'), (571, '\t}'), (572, '\t/* Disconnect Code */'), (573, '\tND_PRINT((ndo, "%04x, ", EXTRACT_16BITS(dat)));'), (574, '\tdat += 2;'), (575, '\tlength -= 2;'), (576, '\t/* Control Protocol Number */'), (577, '\tND_PRINT((ndo, "%04x ", EXTRACT_16BITS(dat)));'), (578, '\tdat += 2;'), (579, '\tlength -= 2;'), (580, '\t/* Direction */'), (582, '\t\t\t "Direction-#%u", EXTRACT_8BITS(ptr))));'), (583, '\tptr++;'), (584, '\tlength--;'), (586, '\tif (length != 0) {'), (588, '\t\tprint_string(ndo, (const u_char *)ptr, length);'), (621, ''), (622, '\t/*'), (623, "\t * After this point, we don't need to check whether we go past"), (624, '\t * the length of the captured data; however, we *do* need to'), (625, '\t * check whether we go past the end of the AVP.'), (626, '\t */'), (655, '\t\t\t\tl2tp_msgtype_print(ndo, (const u_char *)ptr, len-6);'), (661, '\t\t\t\tl2tp_proto_ver_print(ndo, ptr, len-6);'), (664, '\t\t\t\tl2tp_framing_cap_print(ndo, (const u_char *)ptr, len-6);'), (667, '\t\t\t\tl2tp_bearer_cap_print(ndo, (const u_char *)ptr, len-6);'), (670, '\t\t\t\tif (len-6 < 8) {'), (671, '\t\t\t\t\tND_PRINT((ndo, "AVP too short"));'), (672, '\t\t\t\t\tbreak;'), (673, '\t\t\t\t}'), (680, '\t\t\t\tif (len-6 < 2) {'), (681, '\t\t\t\t\tND_PRINT((ndo, "AVP too short"));'), (682, '\t\t\t\t\tbreak;'), (683, '\t\t\t\t}'), (708, '\t\t\t\tif (len-6 < 16) {'), (709, '\t\t\t\t\tND_PRINT((ndo, "AVP too short"));'), (710, '\t\t\t\t\tbreak;'), (711, '\t\t\t\t}'), (720, '\t\t\t\tif (len-6 < 4) {'), (721, '\t\t\t\t\tND_PRINT((ndo, "AVP too short"));'), (722, '\t\t\t\t\tbreak;'), (723, '\t\t\t\t}'), (727, '\t\t\t\tl2tp_bearer_type_print(ndo, (const u_char *)ptr, len-6);'), (730, '\t\t\t\tl2tp_framing_type_print(ndo, (const u_char *)ptr, len-6);'), (736, '\t\t\t\tl2tp_proxy_auth_type_print(ndo, (const u_char *)ptr, len-6);'), (739, '\t\t\t\tl2tp_proxy_auth_id_print(ndo, (const u_char *)ptr, len-6);'), (742, '\t\t\t\tl2tp_call_errors_print(ndo, (const u_char *)ptr, len-6);'), (745, '\t\t\t\tl2tp_accm_print(ndo, (const u_char *)ptr, len-6);')], 'deleted': [(300, 'l2tp_msgtype_print(netdissect_options *ndo, const u_char *dat)'), (313, '\tND_PRINT((ndo, "%u", EXTRACT_16BITS(ptr))); ptr++;\t/* Result Code */'), (314, '\tif (length > 2) {\t\t\t\t/* Error Code (opt) */'), (315, '\t ND_PRINT((ndo, "/%u", EXTRACT_16BITS(ptr))); ptr++;'), (317, '\tif (length > 4) {\t\t\t\t/* Error Message (opt) */'), (318, '\t\tND_PRINT((ndo, " "));'), (319, '\t\tprint_string(ndo, (const u_char *)ptr, length - 4);'), (324, 'l2tp_proto_ver_print(netdissect_options *ndo, const uint16_t *dat)'), (331, 'l2tp_framing_cap_print(netdissect_options *ndo, const u_char *dat)'), (344, 'l2tp_bearer_cap_print(netdissect_options *ndo, const u_char *dat)'), (361, '\tif (length > 3) {'), (363, '\t\tprint_string(ndo, dat+3, length-3);'), (368, 'l2tp_bearer_type_print(netdissect_options *ndo, const u_char *dat)'), (381, 'l2tp_framing_type_print(netdissect_options *ndo, const u_char *dat)'), (400, 'l2tp_proxy_auth_type_print(netdissect_options *ndo, const u_char *dat)'), (409, 'l2tp_proxy_auth_id_print(netdissect_options *ndo, const u_char *dat)'), (417, 'l2tp_call_errors_print(netdissect_options *ndo, const u_char *dat)'), (424, '\tval_h = EXTRACT_16BITS(ptr); ptr++;'), (425, '\tval_l = EXTRACT_16BITS(ptr); ptr++;'), (428, '\tval_h = EXTRACT_16BITS(ptr); ptr++;'), (429, '\tval_l = EXTRACT_16BITS(ptr); ptr++;'), (432, '\tval_h = EXTRACT_16BITS(ptr); ptr++;'), (433, '\tval_l = EXTRACT_16BITS(ptr); ptr++;'), (436, '\tval_h = EXTRACT_16BITS(ptr); ptr++;'), (437, '\tval_l = EXTRACT_16BITS(ptr); ptr++;'), (440, '\tval_h = EXTRACT_16BITS(ptr); ptr++;'), (441, '\tval_l = EXTRACT_16BITS(ptr); ptr++;'), (450, 'l2tp_accm_print(netdissect_options *ndo, const u_char *dat)'), (457, '\tval_h = EXTRACT_16BITS(ptr); ptr++;'), (458, '\tval_l = EXTRACT_16BITS(ptr); ptr++;'), (471, '\tND_PRINT((ndo, "%04x, ", EXTRACT_16BITS(ptr))); ptr++;\t/* Disconnect Code */'), (472, '\tND_PRINT((ndo, "%04x ", EXTRACT_16BITS(ptr))); ptr++;\t/* Control Protocol Number */'), (474, '\t\t\t "Direction-#%u", *((const u_char *)ptr++))));'), (476, '\tif (length > 5) {'), (478, '\t\tprint_string(ndo, (const u_char *)ptr, length-5);'), (511, '\t/* After this point, no need to worry about truncation */'), (540, '\t\t\t\tl2tp_msgtype_print(ndo, (const u_char *)ptr);'), (546, '\t\t\t\tl2tp_proto_ver_print(ndo, ptr);'), (549, '\t\t\t\tl2tp_framing_cap_print(ndo, (const u_char *)ptr);'), (552, '\t\t\t\tl2tp_bearer_cap_print(ndo, (const u_char *)ptr);'), (596, '\t\t\t\tl2tp_bearer_type_print(ndo, (const u_char *)ptr);'), (599, '\t\t\t\tl2tp_framing_type_print(ndo, (const u_char *)ptr);'), (605, '\t\t\t\tl2tp_proxy_auth_type_print(ndo, (const u_char *)ptr);'), (608, '\t\t\t\tl2tp_proxy_auth_id_print(ndo, (const u_char *)ptr);'), (611, '\t\t\t\tl2tp_call_errors_print(ndo, (const u_char *)ptr);'), (614, '\t\t\t\tl2tp_accm_print(ndo, (const u_char *)ptr);')]}
177
46
648
3,873
10
65
3
https://github.com/the-tcpdump-group/tcpdump
CVE-2017-13006
CWE-125
514
util.c
C
parse_reconnect
/* * Copyright 2011-2013 Con Kolivas * Copyright 2010 Jeff Garzik * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include <stdio.h> #include <stdlib.h> #include <ctype.h> #include <stdarg.h> #include <string.h> #include <jansson.h> #ifdef HAVE_LIBCURL #include <curl/curl.h> #endif #include <time.h> #include <errno.h> #include <unistd.h> #include <sys/types.h> #ifndef WIN32 #include <fcntl.h> # ifdef __linux # include <sys/prctl.h> # endif # include <sys/socket.h> # include <netinet/in.h> # include <netinet/tcp.h> # include <netdb.h> #else # include <winsock2.h> # include <ws2tcpip.h> # include <mmsystem.h> #endif #include "miner.h" #include "elist.h" #include "compat.h" #include "util.h" #define DEFAULT_SOCKWAIT 60 bool successful_connect = false; static void keep_sockalive(SOCKETTYPE fd) { const int tcp_one = 1; #ifndef WIN32 const int tcp_keepidle = 45; const int tcp_keepintvl = 30; int flags = fcntl(fd, F_GETFL, 0); fcntl(fd, F_SETFL, O_NONBLOCK | flags); #else u_long flags = 1; ioctlsocket(fd, FIONBIO, &flags); #endif setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (const void *)&tcp_one, sizeof(tcp_one)); if (!opt_delaynet) #ifndef __linux setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (const void *)&tcp_one, sizeof(tcp_one)); #else /* __linux */ setsockopt(fd, SOL_TCP, TCP_NODELAY, (const void *)&tcp_one, sizeof(tcp_one)); setsockopt(fd, SOL_TCP, TCP_KEEPCNT, &tcp_one, sizeof(tcp_one)); setsockopt(fd, SOL_TCP, TCP_KEEPIDLE, &tcp_keepidle, sizeof(tcp_keepidle)); setsockopt(fd, SOL_TCP, TCP_KEEPINTVL, &tcp_keepintvl, sizeof(tcp_keepintvl)); #endif /* __linux */ #ifdef __APPLE_CC__ setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &tcp_keepintvl, sizeof(tcp_keepintvl)); #endif /* __APPLE_CC__ */ } struct tq_ent { void *data; struct list_head q_node; }; #ifdef HAVE_LIBCURL struct timeval nettime; struct data_buffer { void *buf; size_t len; }; struct upload_buffer { const void *buf; size_t len; }; struct header_info { char *lp_path; int rolltime; char *reason; char *stratum_url; bool hadrolltime; bool canroll; bool hadexpire; }; static void databuf_free(struct data_buffer *db) { if (!db) return; free(db->buf); memset(db, 0, sizeof(*db)); } static size_t all_data_cb(const void *ptr, size_t size, size_t nmemb, void *user_data) { struct data_buffer *db = user_data; size_t len = size * nmemb; size_t oldlen, newlen; void *newmem; static const unsigned char zero = 0; oldlen = db->len; newlen = oldlen + len; newmem = realloc(db->buf, newlen + 1); if (!newmem) return 0; db->buf = newmem; db->len = newlen; memcpy(db->buf + oldlen, ptr, len); memcpy(db->buf + newlen, &zero, 1); /* null terminate */ return len; } static size_t upload_data_cb(void *ptr, size_t size, size_t nmemb, void *user_data) { struct upload_buffer *ub = user_data; unsigned int len = size * nmemb; if (len > ub->len) len = ub->len; if (len) { memcpy(ptr, ub->buf, len); ub->buf += len; ub->len -= len; } return len; } static size_t resp_hdr_cb(void *ptr, size_t size, size_t nmemb, void *user_data) { struct header_info *hi = user_data; size_t remlen, slen, ptrlen = size * nmemb; char *rem, *val = NULL, *key = NULL; void *tmp; val = calloc(1, ptrlen); key = calloc(1, ptrlen); if (!key || !val) goto out; tmp = memchr(ptr, ':', ptrlen); if (!tmp || (tmp == ptr)) /* skip empty keys / blanks */ goto out; slen = tmp - ptr; if ((slen + 1) == ptrlen) /* skip key w/ no value */ goto out; memcpy(key, ptr, slen); /* store & nul term key */ key[slen] = 0; rem = ptr + slen + 1; /* trim value's leading whitespace */ remlen = ptrlen - slen - 1; while ((remlen > 0) && (isspace(*rem))) { remlen--; rem++; } memcpy(val, rem, remlen); /* store value, trim trailing ws */ val[remlen] = 0; while ((*val) && (isspace(val[strlen(val) - 1]))) val[strlen(val) - 1] = 0; if (!*val) /* skip blank value */ goto out; if (opt_protocol) applog(LOG_DEBUG, "HTTP hdr(%s): %s", key, val); if (!strcasecmp("X-Roll-Ntime", key)) { hi->hadrolltime = true; if (!strncasecmp("N", val, 1)) applog(LOG_DEBUG, "X-Roll-Ntime: N found"); else { hi->canroll = true; /* Check to see if expire= is supported and if not, set * the rolltime to the default scantime */ if (strlen(val) > 7 && !strncasecmp("expire=", val, 7)) { sscanf(val + 7, "%d", &hi->rolltime); hi->hadexpire = true; } else hi->rolltime = opt_scantime; applog(LOG_DEBUG, "X-Roll-Ntime expiry set to %d", hi->rolltime); } } if (!strcasecmp("X-Long-Polling", key)) { hi->lp_path = val; /* steal memory reference */ val = NULL; } if (!strcasecmp("X-Reject-Reason", key)) { hi->reason = val; /* steal memory reference */ val = NULL; } if (!strcasecmp("X-Stratum", key)) { hi->stratum_url = val; val = NULL; } out: free(key); free(val); return ptrlen; } static void last_nettime(struct timeval *last) { rd_lock(&netacc_lock); last->tv_sec = nettime.tv_sec; last->tv_usec = nettime.tv_usec; rd_unlock(&netacc_lock); } static void set_nettime(void) { wr_lock(&netacc_lock); cgtime(&nettime); wr_unlock(&netacc_lock); } #if CURL_HAS_KEEPALIVE static void keep_curlalive(CURL *curl) { const int tcp_keepidle = 45; const int tcp_keepintvl = 30; const long int keepalive = 1; curl_easy_setopt(curl, CURLOPT_TCP_KEEPALIVE, keepalive); curl_easy_setopt(curl, CURLOPT_TCP_KEEPIDLE, tcp_keepidle); curl_easy_setopt(curl, CURLOPT_TCP_KEEPINTVL, tcp_keepintvl); } #else static void keep_curlalive(CURL *curl) { SOCKETTYPE sock; curl_easy_getinfo(curl, CURLINFO_LASTSOCKET, (long *)&sock); keep_sockalive(sock); } #endif static int curl_debug_cb(__maybe_unused CURL *handle, curl_infotype type, __maybe_unused char *data, size_t size, void *userdata) { struct pool *pool = (struct pool *)userdata; switch(type) { case CURLINFO_HEADER_IN: case CURLINFO_DATA_IN: case CURLINFO_SSL_DATA_IN: pool->cgminer_pool_stats.net_bytes_received += size; break; case CURLINFO_HEADER_OUT: case CURLINFO_DATA_OUT: case CURLINFO_SSL_DATA_OUT: pool->cgminer_pool_stats.net_bytes_sent += size; break; case CURLINFO_TEXT: default: break; } return 0; } json_t *json_web_config(const char *url) { struct data_buffer all_data = {NULL, 0}; char curl_err_str[CURL_ERROR_SIZE]; long timeout = 60; json_error_t err; json_t *val; CURL *curl; int rc; memset(&err, 0, sizeof(err)); curl = curl_easy_init(); if (unlikely(!curl)) quithere(1, "CURL initialisation failed"); curl_easy_setopt(curl, CURLOPT_TIMEOUT, timeout); curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1); curl_easy_setopt(curl, CURLOPT_URL, url); curl_easy_setopt(curl, CURLOPT_ENCODING, ""); curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1); curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, all_data_cb); curl_easy_setopt(curl, CURLOPT_WRITEDATA, &all_data); curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, curl_err_str); curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1); curl_easy_setopt(curl, CURLOPT_USE_SSL, CURLUSESSL_TRY); val = NULL; rc = curl_easy_perform(curl); curl_easy_cleanup(curl); if (rc) { applog(LOG_ERR, "HTTP config request of '%s' failed: %s", url, curl_err_str); goto c_out; } if (!all_data.buf) { applog(LOG_ERR, "Empty config data received from '%s'", url); goto c_out; } val = JSON_LOADS(all_data.buf, &err); if (!val) { applog(LOG_ERR, "JSON config decode of '%s' failed(%d): %s", url, err.line, err.text); } databuf_free(&all_data); c_out: return val; } json_t *json_rpc_call(CURL *curl, const char *url, const char *userpass, const char *rpc_req, bool probe, bool longpoll, int *rolltime, struct pool *pool, bool share) { long timeout = longpoll ? (60 * 60) : 60; struct data_buffer all_data = {NULL, 0}; struct header_info hi = {NULL, 0, NULL, NULL, false, false, false}; char len_hdr[64], user_agent_hdr[128]; char curl_err_str[CURL_ERROR_SIZE]; struct curl_slist *headers = NULL; struct upload_buffer upload_data; json_t *val, *err_val, *res_val; bool probing = false; double byte_count; json_error_t err; int rc; memset(&err, 0, sizeof(err)); /* it is assumed that 'curl' is freshly [re]initialized at this pt */ if (probe) probing = !pool->probed; curl_easy_setopt(curl, CURLOPT_TIMEOUT, timeout); // CURLOPT_VERBOSE won't write to stderr if we use CURLOPT_DEBUGFUNCTION curl_easy_setopt(curl, CURLOPT_DEBUGFUNCTION, curl_debug_cb); curl_easy_setopt(curl, CURLOPT_DEBUGDATA, (void *)pool); curl_easy_setopt(curl, CURLOPT_VERBOSE, 1); curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1); curl_easy_setopt(curl, CURLOPT_URL, url); curl_easy_setopt(curl, CURLOPT_ENCODING, ""); curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1); /* Shares are staggered already and delays in submission can be costly * so do not delay them */ if (!opt_delaynet || share) curl_easy_setopt(curl, CURLOPT_TCP_NODELAY, 1); curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, all_data_cb); curl_easy_setopt(curl, CURLOPT_WRITEDATA, &all_data); curl_easy_setopt(curl, CURLOPT_READFUNCTION, upload_data_cb); curl_easy_setopt(curl, CURLOPT_READDATA, &upload_data); curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, curl_err_str); curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1); curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, resp_hdr_cb); curl_easy_setopt(curl, CURLOPT_HEADERDATA, &hi); curl_easy_setopt(curl, CURLOPT_USE_SSL, CURLUSESSL_TRY); if (pool->rpc_proxy) { curl_easy_setopt(curl, CURLOPT_PROXY, pool->rpc_proxy); curl_easy_setopt(curl, CURLOPT_PROXYTYPE, pool->rpc_proxytype); } else if (opt_socks_proxy) { curl_easy_setopt(curl, CURLOPT_PROXY, opt_socks_proxy); curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS4); } if (userpass) { curl_easy_setopt(curl, CURLOPT_USERPWD, userpass); curl_easy_setopt(curl, CURLOPT_HTTPAUTH, CURLAUTH_BASIC); } if (longpoll) keep_curlalive(curl); curl_easy_setopt(curl, CURLOPT_POST, 1); if (opt_protocol) applog(LOG_DEBUG, "JSON protocol request:\n%s", rpc_req); upload_data.buf = rpc_req; upload_data.len = strlen(rpc_req); sprintf(len_hdr, "Content-Length: %lu", (unsigned long) upload_data.len); sprintf(user_agent_hdr, "User-Agent: %s", PACKAGE_STRING); headers = curl_slist_append(headers, "Content-type: application/json"); headers = curl_slist_append(headers, "X-Mining-Extensions: longpoll midstate rollntime submitold"); if (likely(global_hashrate)) { char ghashrate[255]; sprintf(ghashrate, "X-Mining-Hashrate: %llu", global_hashrate); headers = curl_slist_append(headers, ghashrate); } headers = curl_slist_append(headers, len_hdr); headers = curl_slist_append(headers, user_agent_hdr); headers = curl_slist_append(headers, "Expect:"); /* disable Expect hdr*/ curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); if (opt_delaynet) { /* Don't delay share submission, but still track the nettime */ if (!share) { long long now_msecs, last_msecs; struct timeval now, last; cgtime(&now); last_nettime(&last); now_msecs = (long long)now.tv_sec * 1000; now_msecs += now.tv_usec / 1000; last_msecs = (long long)last.tv_sec * 1000; last_msecs += last.tv_usec / 1000; if (now_msecs > last_msecs && now_msecs - last_msecs < 250) { struct timespec rgtp; rgtp.tv_sec = 0; rgtp.tv_nsec = (250 - (now_msecs - last_msecs)) * 1000000; nanosleep(&rgtp, NULL); } } set_nettime(); } rc = curl_easy_perform(curl); if (rc) { applog(LOG_INFO, "HTTP request failed: %s", curl_err_str); goto err_out; } if (!all_data.buf) { applog(LOG_DEBUG, "Empty data received in json_rpc_call."); goto err_out; } pool->cgminer_pool_stats.times_sent++; if (curl_easy_getinfo(curl, CURLINFO_SIZE_UPLOAD, &byte_count) == CURLE_OK) pool->cgminer_pool_stats.bytes_sent += byte_count; pool->cgminer_pool_stats.times_received++; if (curl_easy_getinfo(curl, CURLINFO_SIZE_DOWNLOAD, &byte_count) == CURLE_OK) pool->cgminer_pool_stats.bytes_received += byte_count; if (probing) { pool->probed = true; /* If X-Long-Polling was found, activate long polling */ if (hi.lp_path) { if (pool->hdr_path != NULL) free(pool->hdr_path); pool->hdr_path = hi.lp_path; } else pool->hdr_path = NULL; if (hi.stratum_url) { pool->stratum_url = hi.stratum_url; hi.stratum_url = NULL; } } else { if (hi.lp_path) { free(hi.lp_path); hi.lp_path = NULL; } if (hi.stratum_url) { free(hi.stratum_url); hi.stratum_url = NULL; } } *rolltime = hi.rolltime; pool->cgminer_pool_stats.rolltime = hi.rolltime; pool->cgminer_pool_stats.hadrolltime = hi.hadrolltime; pool->cgminer_pool_stats.canroll = hi.canroll; pool->cgminer_pool_stats.hadexpire = hi.hadexpire; val = JSON_LOADS(all_data.buf, &err); if (!val) { applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text); if (opt_protocol) applog(LOG_DEBUG, "JSON protocol response:\n%s", (char *)(all_data.buf)); goto err_out; } if (opt_protocol) { char *s = json_dumps(val, JSON_INDENT(3)); applog(LOG_DEBUG, "JSON protocol response:\n%s", s); free(s); } /* JSON-RPC valid response returns a non-null 'result', * and a null 'error'. */ res_val = json_object_get(val, "result"); err_val = json_object_get(val, "error"); if (!res_val ||(err_val && !json_is_null(err_val))) { char *s; if (err_val) s = json_dumps(err_val, JSON_INDENT(3)); else s = strdup("(unknown reason)"); applog(LOG_INFO, "JSON-RPC call failed: %s", s); free(s); goto err_out; } if (hi.reason) { json_object_set_new(val, "reject-reason", json_string(hi.reason)); free(hi.reason); hi.reason = NULL; } successful_connect = true; databuf_free(&all_data); curl_slist_free_all(headers); curl_easy_reset(curl); return val; err_out: databuf_free(&all_data); curl_slist_free_all(headers); curl_easy_reset(curl); if (!successful_connect) applog(LOG_DEBUG, "Failed to connect in json_rpc_call"); curl_easy_setopt(curl, CURLOPT_FRESH_CONNECT, 1); return NULL; } #define PROXY_HTTP CURLPROXY_HTTP #define PROXY_HTTP_1_0 CURLPROXY_HTTP_1_0 #define PROXY_SOCKS4 CURLPROXY_SOCKS4 #define PROXY_SOCKS5 CURLPROXY_SOCKS5 #define PROXY_SOCKS4A CURLPROXY_SOCKS4A #define PROXY_SOCKS5H CURLPROXY_SOCKS5_HOSTNAME #else /* HAVE_LIBCURL */ #define PROXY_HTTP 0 #define PROXY_HTTP_1_0 1 #define PROXY_SOCKS4 2 #define PROXY_SOCKS5 3 #define PROXY_SOCKS4A 4 #define PROXY_SOCKS5H 5 #endif /* HAVE_LIBCURL */ static struct { const char *name; proxytypes_t proxytype; } proxynames[] = { { "http:", PROXY_HTTP }, { "http0:", PROXY_HTTP_1_0 }, { "socks4:", PROXY_SOCKS4 }, { "socks5:", PROXY_SOCKS5 }, { "socks4a:", PROXY_SOCKS4A }, { "socks5h:", PROXY_SOCKS5H }, { NULL, 0 } }; const char *proxytype(proxytypes_t proxytype) { int i; for (i = 0; proxynames[i].name; i++) if (proxynames[i].proxytype == proxytype) return proxynames[i].name; return "invalid"; } char *get_proxy(char *url, struct pool *pool) { pool->rpc_proxy = NULL; char *split; int plen, len, i; for (i = 0; proxynames[i].name; i++) { plen = strlen(proxynames[i].name); if (strncmp(url, proxynames[i].name, plen) == 0) { if (!(split = strchr(url, '|'))) return url; *split = '\0'; len = split - url; pool->rpc_proxy = malloc(1 + len - plen); if (!(pool->rpc_proxy)) quithere(1, "Failed to malloc rpc_proxy"); strcpy(pool->rpc_proxy, url + plen); extract_sockaddr(pool->rpc_proxy, &pool->sockaddr_proxy_url, &pool->sockaddr_proxy_port); pool->rpc_proxytype = proxynames[i].proxytype; url = split + 1; break; } } return url; } /* Adequate size s==len*2 + 1 must be alloced to use this variant */ void __bin2hex(char *s, const unsigned char *p, size_t len) { int i; static const char hex[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; for (i = 0; i < (int)len; i++) { *s++ = hex[p[i] >> 4]; *s++ = hex[p[i] & 0xF]; } *s++ = '\0'; } /* Returns a malloced array string of a binary value of arbitrary length. The * array is rounded up to a 4 byte size to appease architectures that need * aligned array sizes */ char *bin2hex(const unsigned char *p, size_t len) { ssize_t slen; char *s; slen = len * 2 + 1; if (slen % 4) slen += 4 - (slen % 4); s = calloc(slen, 1); if (unlikely(!s)) quithere(1, "Failed to calloc"); __bin2hex(s, p, len); return s; } static const int hex2bin_tbl[256] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1, -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, }; /* Does the reverse of bin2hex but does not allocate any ram */ bool hex2bin(unsigned char *p, const char *hexstr, size_t len) { int nibble1, nibble2; unsigned char idx; bool ret = false; while (*hexstr && len) { if (unlikely(!hexstr[1])) { applog(LOG_ERR, "hex2bin str truncated"); return ret; } idx = *hexstr++; nibble1 = hex2bin_tbl[idx]; idx = *hexstr++; nibble2 = hex2bin_tbl[idx]; if (unlikely((nibble1 < 0) || (nibble2 < 0))) { applog(LOG_ERR, "hex2bin scan failed"); return ret; } *p++ = (((unsigned char)nibble1) << 4) | ((unsigned char)nibble2); --len; } if (likely(len == 0 && *hexstr == 0)) ret = true; return ret; } static const int b58tobin_tbl[] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, -1, -1, -1, -1, 9, 10, 11, 12, 13, 14, 15, 16, -1, 17, 18, 19, 20, 21, -1, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, -1, -1, -1, -1, -1, -1, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, -1, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57 }; /* b58bin should always be at least 25 bytes long and already checked to be * valid. */ void b58tobin(unsigned char *b58bin, const char *b58) { uint32_t c, bin32[7]; int len, i, j; uint64_t t; memset(bin32, 0, 7 * sizeof(uint32_t)); len = strlen(b58); for (i = 0; i < len; i++) { c = b58[i]; c = b58tobin_tbl[c]; for (j = 6; j >= 0; j--) { t = ((uint64_t)bin32[j]) * 58 + c; c = (t & 0x3f00000000ull) >> 32; bin32[j] = t & 0xffffffffull; } } *(b58bin++) = bin32[0] & 0xff; for (i = 1; i < 7; i++) { *((uint32_t *)b58bin) = htobe32(bin32[i]); b58bin += sizeof(uint32_t); } } void address_to_pubkeyhash(unsigned char *pkh, const char *addr) { unsigned char b58bin[25]; memset(b58bin, 0, 25); b58tobin(b58bin, addr); pkh[0] = 0x76; pkh[1] = 0xa9; pkh[2] = 0x14; memcpy(&pkh[3], &b58bin[1], 20); pkh[23] = 0x88; pkh[24] = 0xac; } /* For encoding nHeight into coinbase, return how many bytes were used */ int ser_number(unsigned char *s, int32_t val) { int32_t *i32 = (int32_t *)&s[1]; int len; if (val < 128) len = 1; else if (val < 16512) len = 2; else if (val < 2113664) len = 3; else len = 4; *i32 = htole32(val); s[0] = len++; return len; } /* For encoding variable length strings */ unsigned char *ser_string(char *s, int *slen) { size_t len = strlen(s); unsigned char *ret; ret = malloc(1 + len + 8); // Leave room for largest size if (unlikely(!ret)) quit(1, "Failed to malloc ret in ser_string"); if (len < 253) { ret[0] = len; memcpy(ret + 1, s, len); *slen = len + 1; } else if (len < 0x10000) { uint16_t *u16 = (uint16_t *)&ret[1]; ret[0] = 253; *u16 = htobe16(len); memcpy(ret + 3, s, len); *slen = len + 3; } else { /* size_t is only 32 bit on many platforms anyway */ uint32_t *u32 = (uint32_t *)&ret[1]; ret[0] = 254; *u32 = htobe32(len); memcpy(ret + 5, s, len); *slen = len + 5; } return ret; } bool fulltest(const unsigned char *hash, const unsigned char *target) { uint32_t *hash32 = (uint32_t *)hash; uint32_t *target32 = (uint32_t *)target; bool rc = true; int i; for (i = 28 / 4; i >= 0; i--) { uint32_t h32tmp = le32toh(hash32[i]); uint32_t t32tmp = le32toh(target32[i]); if (h32tmp > t32tmp) { rc = false; break; } if (h32tmp < t32tmp) { rc = true; break; } } if (opt_debug) { unsigned char hash_swap[32], target_swap[32]; char *hash_str, *target_str; swab256(hash_swap, hash); swab256(target_swap, target); hash_str = bin2hex(hash_swap, 32); target_str = bin2hex(target_swap, 32); applog(LOG_DEBUG, " Proof: %s\nTarget: %s\nTrgVal? %s", hash_str, target_str, rc ? "YES (hash <= target)" : "no (false positive; hash > target)"); free(hash_str); free(target_str); } return rc; } struct thread_q *tq_new(void) { struct thread_q *tq; tq = calloc(1, sizeof(*tq)); if (!tq) return NULL; INIT_LIST_HEAD(&tq->q); pthread_mutex_init(&tq->mutex, NULL); pthread_cond_init(&tq->cond, NULL); return tq; } void tq_free(struct thread_q *tq) { struct tq_ent *ent, *iter; if (!tq) return; list_for_each_entry_safe(ent, iter, &tq->q, q_node) { list_del(&ent->q_node); free(ent); } pthread_cond_destroy(&tq->cond); pthread_mutex_destroy(&tq->mutex); memset(tq, 0, sizeof(*tq)); /* poison */ free(tq); } static void tq_freezethaw(struct thread_q *tq, bool frozen) { mutex_lock(&tq->mutex); tq->frozen = frozen; pthread_cond_signal(&tq->cond); mutex_unlock(&tq->mutex); } void tq_freeze(struct thread_q *tq) { tq_freezethaw(tq, true); } void tq_thaw(struct thread_q *tq) { tq_freezethaw(tq, false); } bool tq_push(struct thread_q *tq, void *data) { struct tq_ent *ent; bool rc = true; ent = calloc(1, sizeof(*ent)); if (!ent) return false; ent->data = data; INIT_LIST_HEAD(&ent->q_node); mutex_lock(&tq->mutex); if (!tq->frozen) { list_add_tail(&ent->q_node, &tq->q); } else { free(ent); rc = false; } pthread_cond_signal(&tq->cond); mutex_unlock(&tq->mutex); return rc; } void *tq_pop(struct thread_q *tq, const struct timespec *abstime) { struct tq_ent *ent; void *rval = NULL; int rc; mutex_lock(&tq->mutex); if (!list_empty(&tq->q)) goto pop; if (abstime) rc = pthread_cond_timedwait(&tq->cond, &tq->mutex, abstime); else rc = pthread_cond_wait(&tq->cond, &tq->mutex); if (rc) goto out; if (list_empty(&tq->q)) goto out; pop: ent = list_entry(tq->q.next, struct tq_ent, q_node); rval = ent->data; list_del(&ent->q_node); free(ent); out: mutex_unlock(&tq->mutex); return rval; } int thr_info_create(struct thr_info *thr, pthread_attr_t *attr, void *(*start) (void *), void *arg) { cgsem_init(&thr->sem); return pthread_create(&thr->pth, attr, start, arg); } void thr_info_cancel(struct thr_info *thr) { if (!thr) return; if (PTH(thr) != 0L) { pthread_cancel(thr->pth); PTH(thr) = 0L; } cgsem_destroy(&thr->sem); } void subtime(struct timeval *a, struct timeval *b) { timersub(a, b, b); } void addtime(struct timeval *a, struct timeval *b) { timeradd(a, b, b); } bool time_more(struct timeval *a, struct timeval *b) { return timercmp(a, b, >); } bool time_less(struct timeval *a, struct timeval *b) { return timercmp(a, b, <); } void copy_time(struct timeval *dest, const struct timeval *src) { memcpy(dest, src, sizeof(struct timeval)); } void timespec_to_val(struct timeval *val, const struct timespec *spec) { val->tv_sec = spec->tv_sec; val->tv_usec = spec->tv_nsec / 1000; } void timeval_to_spec(struct timespec *spec, const struct timeval *val) { spec->tv_sec = val->tv_sec; spec->tv_nsec = val->tv_usec * 1000; } void us_to_timeval(struct timeval *val, int64_t us) { lldiv_t tvdiv = lldiv(us, 1000000); val->tv_sec = tvdiv.quot; val->tv_usec = tvdiv.rem; } void us_to_timespec(struct timespec *spec, int64_t us) { lldiv_t tvdiv = lldiv(us, 1000000); spec->tv_sec = tvdiv.quot; spec->tv_nsec = tvdiv.rem * 1000; } void ms_to_timespec(struct timespec *spec, int64_t ms) { lldiv_t tvdiv = lldiv(ms, 1000); spec->tv_sec = tvdiv.quot; spec->tv_nsec = tvdiv.rem * 1000000; } void ms_to_timeval(struct timeval *val, int64_t ms) { lldiv_t tvdiv = lldiv(ms, 1000); val->tv_sec = tvdiv.quot; val->tv_usec = tvdiv.rem * 1000; } void timeraddspec(struct timespec *a, const struct timespec *b) { a->tv_sec += b->tv_sec; a->tv_nsec += b->tv_nsec; if (a->tv_nsec >= 1000000000) { a->tv_nsec -= 1000000000; a->tv_sec++; } } static int __maybe_unused timespec_to_ms(struct timespec *ts) { return ts->tv_sec * 1000 + ts->tv_nsec / 1000000; } /* Subtract b from a */ static void __maybe_unused timersubspec(struct timespec *a, const struct timespec *b) { a->tv_sec -= b->tv_sec; a->tv_nsec -= b->tv_nsec; if (a->tv_nsec < 0) { a->tv_nsec += 1000000000; a->tv_sec--; } } /* These are cgminer specific sleep functions that use an absolute nanosecond * resolution timer to avoid poor usleep accuracy and overruns. */ #ifdef WIN32 /* Windows start time is since 1601 LOL so convert it to unix epoch 1970. */ #define EPOCHFILETIME (116444736000000000LL) /* Return the system time as an lldiv_t in decimicroseconds. */ static void decius_time(lldiv_t *lidiv) { FILETIME ft; LARGE_INTEGER li; GetSystemTimeAsFileTime(&ft); li.LowPart = ft.dwLowDateTime; li.HighPart = ft.dwHighDateTime; li.QuadPart -= EPOCHFILETIME; /* SystemTime is in decimicroseconds so divide by an unusual number */ *lidiv = lldiv(li.QuadPart, 10000000); } /* This is a cgminer gettimeofday wrapper. Since we always call gettimeofday * with tz set to NULL, and windows' default resolution is only 15ms, this * gives us higher resolution times on windows. */ void cgtime(struct timeval *tv) { lldiv_t lidiv; decius_time(&lidiv); tv->tv_sec = lidiv.quot; tv->tv_usec = lidiv.rem / 10; } #else /* WIN32 */ void cgtime(struct timeval *tv) { gettimeofday(tv, NULL); } int cgtimer_to_ms(cgtimer_t *cgt) { return timespec_to_ms(cgt); } /* Subtracts b from a and stores it in res. */ void cgtimer_sub(cgtimer_t *a, cgtimer_t *b, cgtimer_t *res) { res->tv_sec = a->tv_sec - b->tv_sec; res->tv_nsec = a->tv_nsec - b->tv_nsec; if (res->tv_nsec < 0) { res->tv_nsec += 1000000000; res->tv_sec--; } } #endif /* WIN32 */ #ifdef CLOCK_MONOTONIC /* Essentially just linux */ void cgtimer_time(cgtimer_t *ts_start) { clock_gettime(CLOCK_MONOTONIC, ts_start); } static void nanosleep_abstime(struct timespec *ts_end) { int ret; do { ret = clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, ts_end, NULL); } while (ret == EINTR); } /* Reentrant version of cgsleep functions allow start time to be set separately * from the beginning of the actual sleep, allowing scheduling delays to be * counted in the sleep. */ void cgsleep_ms_r(cgtimer_t *ts_start, int ms) { struct timespec ts_end; ms_to_timespec(&ts_end, ms); timeraddspec(&ts_end, ts_start); nanosleep_abstime(&ts_end); } void cgsleep_us_r(cgtimer_t *ts_start, int64_t us) { struct timespec ts_end; us_to_timespec(&ts_end, us); timeraddspec(&ts_end, ts_start); nanosleep_abstime(&ts_end); } #else /* CLOCK_MONOTONIC */ #ifdef __MACH__ #include <mach/clock.h> #include <mach/mach.h> void cgtimer_time(cgtimer_t *ts_start) { clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), SYSTEM_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); ts_start->tv_sec = mts.tv_sec; ts_start->tv_nsec = mts.tv_nsec; } #elif !defined(WIN32) /* __MACH__ - Everything not linux/macosx/win32 */ void cgtimer_time(cgtimer_t *ts_start) { struct timeval tv; cgtime(&tv); ts_start->tv_sec = tv->tv_sec; ts_start->tv_nsec = tv->tv_usec * 1000; } #endif /* __MACH__ */ #ifdef WIN32 /* For windows we use the SystemTime stored as a LARGE_INTEGER as the cgtimer_t * typedef, allowing us to have sub-microsecond resolution for times, do simple * arithmetic for timer calculations, and use windows' own hTimers to get * accurate absolute timeouts. */ int cgtimer_to_ms(cgtimer_t *cgt) { return (int)(cgt->QuadPart / 10000LL); } /* Subtracts b from a and stores it in res. */ void cgtimer_sub(cgtimer_t *a, cgtimer_t *b, cgtimer_t *res) { res->QuadPart = a->QuadPart - b->QuadPart; } /* Note that cgtimer time is NOT offset by the unix epoch since we use absolute * timeouts with hTimers. */ void cgtimer_time(cgtimer_t *ts_start) { FILETIME ft; GetSystemTimeAsFileTime(&ft); ts_start->LowPart = ft.dwLowDateTime; ts_start->HighPart = ft.dwHighDateTime; } static void liSleep(LARGE_INTEGER *li, int timeout) { HANDLE hTimer; DWORD ret; if (unlikely(timeout <= 0)) return; hTimer = CreateWaitableTimer(NULL, TRUE, NULL); if (unlikely(!hTimer)) quit(1, "Failed to create hTimer in liSleep"); ret = SetWaitableTimer(hTimer, li, 0, NULL, NULL, 0); if (unlikely(!ret)) quit(1, "Failed to SetWaitableTimer in liSleep"); /* We still use a timeout as a sanity check in case the system time * is changed while we're running */ ret = WaitForSingleObject(hTimer, timeout); if (unlikely(ret != WAIT_OBJECT_0 && ret != WAIT_TIMEOUT)) quit(1, "Failed to WaitForSingleObject in liSleep"); CloseHandle(hTimer); } void cgsleep_ms_r(cgtimer_t *ts_start, int ms) { LARGE_INTEGER li; li.QuadPart = ts_start->QuadPart + (int64_t)ms * 10000LL; liSleep(&li, ms); } void cgsleep_us_r(cgtimer_t *ts_start, int64_t us) { LARGE_INTEGER li; int ms; li.QuadPart = ts_start->QuadPart + us * 10LL; ms = us / 1000; if (!ms) ms = 1; liSleep(&li, ms); } #else /* WIN32 */ static void cgsleep_spec(struct timespec *ts_diff, const struct timespec *ts_start) { struct timespec now; timeraddspec(ts_diff, ts_start); cgtimer_time(&now); timersubspec(ts_diff, &now); if (unlikely(ts_diff->tv_sec < 0)) return; nanosleep(ts_diff, NULL); } void cgsleep_ms_r(cgtimer_t *ts_start, int ms) { struct timespec ts_diff; ms_to_timespec(&ts_diff, ms); cgsleep_spec(&ts_diff, ts_start); } void cgsleep_us_r(cgtimer_t *ts_start, int64_t us) { struct timespec ts_diff; us_to_timespec(&ts_diff, us); cgsleep_spec(&ts_diff, ts_start); } #endif /* WIN32 */ #endif /* CLOCK_MONOTONIC */ void cgsleep_ms(int ms) { cgtimer_t ts_start; cgsleep_prepare_r(&ts_start); cgsleep_ms_r(&ts_start, ms); } void cgsleep_us(int64_t us) { cgtimer_t ts_start; cgsleep_prepare_r(&ts_start); cgsleep_us_r(&ts_start, us); } /* Returns the microseconds difference between end and start times as a double */ double us_tdiff(struct timeval *end, struct timeval *start) { /* Sanity check. We should only be using this for small differences so * limit the max to 60 seconds. */ if (unlikely(end->tv_sec - start->tv_sec > 60)) return 60000000; return (end->tv_sec - start->tv_sec) * 1000000 + (end->tv_usec - start->tv_usec); } /* Returns the milliseconds difference between end and start times */ int ms_tdiff(struct timeval *end, struct timeval *start) { /* Like us_tdiff, limit to 1 hour. */ if (unlikely(end->tv_sec - start->tv_sec > 3600)) return 3600000; return (end->tv_sec - start->tv_sec) * 1000 + (end->tv_usec - start->tv_usec) / 1000; } /* Returns the seconds difference between end and start times as a double */ double tdiff(struct timeval *end, struct timeval *start) { return end->tv_sec - start->tv_sec + (end->tv_usec - start->tv_usec) / 1000000.0; } bool extract_sockaddr(char *url, char **sockaddr_url, char **sockaddr_port) { char *url_begin, *url_end, *ipv6_begin, *ipv6_end, *port_start = NULL; char url_address[256], port[6]; int url_len, port_len = 0; *sockaddr_url = url; url_begin = strstr(url, "//"); if (!url_begin) url_begin = url; else url_begin += 2; /* Look for numeric ipv6 entries */ ipv6_begin = strstr(url_begin, "["); ipv6_end = strstr(url_begin, "]"); if (ipv6_begin && ipv6_end && ipv6_end > ipv6_begin) url_end = strstr(ipv6_end, ":"); else url_end = strstr(url_begin, ":"); if (url_end) { url_len = url_end - url_begin; port_len = strlen(url_begin) - url_len - 1; if (port_len < 1) return false; port_start = url_end + 1; } else url_len = strlen(url_begin); if (url_len < 1) return false; sprintf(url_address, "%.*s", url_len, url_begin); if (port_len) { char *slash; snprintf(port, 6, "%.*s", port_len, port_start); slash = strchr(port, '/'); if (slash) *slash = '\0'; } else strcpy(port, "80"); *sockaddr_port = strdup(port); *sockaddr_url = strdup(url_address); return true; } enum send_ret { SEND_OK, SEND_SELECTFAIL, SEND_SENDFAIL, SEND_INACTIVE }; /* Send a single command across a socket, appending \n to it. This should all * be done under stratum lock except when first establishing the socket */ static enum send_ret __stratum_send(struct pool *pool, char *s, ssize_t len) { SOCKETTYPE sock = pool->sock; ssize_t ssent = 0; strcat(s, "\n"); len++; while (len > 0 ) { struct timeval timeout = {1, 0}; ssize_t sent; fd_set wd; retry: FD_ZERO(&wd); FD_SET(sock, &wd); if (select(sock + 1, NULL, &wd, NULL, &timeout) < 1) { if (interrupted()) goto retry; return SEND_SELECTFAIL; } #ifdef __APPLE__ sent = send(pool->sock, s + ssent, len, SO_NOSIGPIPE); #elif WIN32 sent = send(pool->sock, s + ssent, len, 0); #else sent = send(pool->sock, s + ssent, len, MSG_NOSIGNAL); #endif if (sent < 0) { if (!sock_blocks()) return SEND_SENDFAIL; sent = 0; } ssent += sent; len -= sent; } pool->cgminer_pool_stats.times_sent++; pool->cgminer_pool_stats.bytes_sent += ssent; pool->cgminer_pool_stats.net_bytes_sent += ssent; return SEND_OK; } bool stratum_send(struct pool *pool, char *s, ssize_t len) { enum send_ret ret = SEND_INACTIVE; if (opt_protocol) applog(LOG_DEBUG, "SEND: %s", s); mutex_lock(&pool->stratum_lock); if (pool->stratum_active) ret = __stratum_send(pool, s, len); mutex_unlock(&pool->stratum_lock); /* This is to avoid doing applog under stratum_lock */ switch (ret) { default: case SEND_OK: break; case SEND_SELECTFAIL: applog(LOG_DEBUG, "Write select failed on pool %d sock", pool->pool_no); suspend_stratum(pool); break; case SEND_SENDFAIL: applog(LOG_DEBUG, "Failed to send in stratum_send"); suspend_stratum(pool); break; case SEND_INACTIVE: applog(LOG_DEBUG, "Stratum send failed due to no pool stratum_active"); break; } return (ret == SEND_OK); } static bool socket_full(struct pool *pool, int wait) { SOCKETTYPE sock = pool->sock; struct timeval timeout; fd_set rd; if (unlikely(wait < 0)) wait = 0; FD_ZERO(&rd); FD_SET(sock, &rd); timeout.tv_usec = 0; timeout.tv_sec = wait; if (select(sock + 1, &rd, NULL, NULL, &timeout) > 0) return true; return false; } /* Check to see if Santa's been good to you */ bool sock_full(struct pool *pool) { if (strlen(pool->sockbuf)) return true; return (socket_full(pool, 0)); } static void clear_sockbuf(struct pool *pool) { strcpy(pool->sockbuf, ""); } static void clear_sock(struct pool *pool) { ssize_t n; mutex_lock(&pool->stratum_lock); do { if (pool->sock) n = recv(pool->sock, pool->sockbuf, RECVSIZE, 0); else n = 0; } while (n > 0); mutex_unlock(&pool->stratum_lock); clear_sockbuf(pool); } /* Realloc memory to new size and zero any extra memory added */ void _recalloc(void **ptr, size_t old, size_t new, const char *file, const char *func, const int line) { if (new == old) return; *ptr = realloc(*ptr, new); if (unlikely(!*ptr)) quitfrom(1, file, func, line, "Failed to realloc"); if (new > old) memset(*ptr + old, 0, new - old); } /* Make sure the pool sockbuf is large enough to cope with any coinbase size * by reallocing it to a large enough size rounded up to a multiple of RBUFSIZE * and zeroing the new memory */ static void recalloc_sock(struct pool *pool, size_t len) { size_t old, new; old = strlen(pool->sockbuf); new = old + len + 1; if (new < pool->sockbuf_size) return; new = new + (RBUFSIZE - (new % RBUFSIZE)); // Avoid potentially recursive locking // applog(LOG_DEBUG, "Recallocing pool sockbuf to %d", new); pool->sockbuf = realloc(pool->sockbuf, new); if (!pool->sockbuf) quithere(1, "Failed to realloc pool sockbuf"); memset(pool->sockbuf + old, 0, new - old); pool->sockbuf_size = new; } /* Peeks at a socket to find the first end of line and then reads just that * from the socket and returns that as a malloced char */ char *recv_line(struct pool *pool) { char *tok, *sret = NULL; ssize_t len, buflen; int waited = 0; if (!strstr(pool->sockbuf, "\n")) { struct timeval rstart, now; cgtime(&rstart); if (!socket_full(pool, DEFAULT_SOCKWAIT)) { applog(LOG_DEBUG, "Timed out waiting for data on socket_full"); goto out; } do { char s[RBUFSIZE]; size_t slen; ssize_t n; memset(s, 0, RBUFSIZE); n = recv(pool->sock, s, RECVSIZE, 0); if (!n) { applog(LOG_DEBUG, "Socket closed waiting in recv_line"); suspend_stratum(pool); break; } cgtime(&now); waited = tdiff(&now, &rstart); if (n < 0) { if (!sock_blocks() || !socket_full(pool, DEFAULT_SOCKWAIT - waited)) { applog(LOG_DEBUG, "Failed to recv sock in recv_line"); suspend_stratum(pool); break; } } else { slen = strlen(s); recalloc_sock(pool, slen); strcat(pool->sockbuf, s); } } while (waited < DEFAULT_SOCKWAIT && !strstr(pool->sockbuf, "\n")); } buflen = strlen(pool->sockbuf); tok = strtok(pool->sockbuf, "\n"); if (!tok) { applog(LOG_DEBUG, "Failed to parse a \\n terminated string in recv_line"); goto out; } sret = strdup(tok); len = strlen(sret); /* Copy what's left in the buffer after the \n, including the * terminating \0 */ if (buflen > len + 1) memmove(pool->sockbuf, pool->sockbuf + len + 1, buflen - len + 1); else strcpy(pool->sockbuf, ""); pool->cgminer_pool_stats.times_received++; pool->cgminer_pool_stats.bytes_received += len; pool->cgminer_pool_stats.net_bytes_received += len; out: if (!sret) clear_sock(pool); else if (opt_protocol) applog(LOG_DEBUG, "RECVD: %s", sret); return sret; } /* Extracts a string value from a json array with error checking. To be used * when the value of the string returned is only examined and not to be stored. * See json_array_string below */ static char *__json_array_string(json_t *val, unsigned int entry) { json_t *arr_entry; if (json_is_null(val)) return NULL; if (!json_is_array(val)) return NULL; if (entry > json_array_size(val)) return NULL; arr_entry = json_array_get(val, entry); if (!json_is_string(arr_entry)) return NULL; return (char *)json_string_value(arr_entry); } /* Creates a freshly malloced dup of __json_array_string */ static char *json_array_string(json_t *val, unsigned int entry) { char *buf = __json_array_string(val, entry); if (buf) return strdup(buf); return NULL; } static char *blank_merkle = "0000000000000000000000000000000000000000000000000000000000000000"; static bool parse_notify(struct pool *pool, json_t *val) { char *job_id, *prev_hash, *coinbase1, *coinbase2, *bbversion, *nbit, *ntime, header[228]; unsigned char *cb1 = NULL, *cb2 = NULL; size_t cb1_len, cb2_len, alloc_len; bool clean, ret = false; int merkles, i; json_t *arr; arr = json_array_get(val, 4); if (!arr || !json_is_array(arr)) goto out; merkles = json_array_size(arr); job_id = json_array_string(val, 0); prev_hash = __json_array_string(val, 1); coinbase1 = json_array_string(val, 2); coinbase2 = json_array_string(val, 3); bbversion = __json_array_string(val, 5); nbit = __json_array_string(val, 6); ntime = __json_array_string(val, 7); clean = json_is_true(json_array_get(val, 8)); if (!job_id || !prev_hash || !coinbase1 || !coinbase2 || !bbversion || !nbit || !ntime) { /* Annoying but we must not leak memory */ if (job_id) free(job_id); if (coinbase1) free(coinbase1); if (coinbase2) free(coinbase2); goto out; } cg_wlock(&pool->data_lock); free(pool->swork.job_id); pool->swork.job_id = job_id; snprintf(pool->prev_hash, 65, "%s", prev_hash); cb1_len = strlen(coinbase1) / 2; cb2_len = strlen(coinbase2) / 2; snprintf(pool->bbversion, 9, "%s", bbversion); snprintf(pool->nbit, 9, "%s", nbit); snprintf(pool->ntime, 9, "%s", ntime); pool->swork.clean = clean; alloc_len = pool->coinbase_len = cb1_len + pool->n1_len + pool->n2size + cb2_len; pool->nonce2_offset = cb1_len + pool->n1_len; for (i = 0; i < pool->merkles; i++) free(pool->swork.merkle_bin[i]); if (merkles) { pool->swork.merkle_bin = realloc(pool->swork.merkle_bin, sizeof(char *) * merkles + 1); for (i = 0; i < merkles; i++) { char *merkle = json_array_string(arr, i); pool->swork.merkle_bin[i] = malloc(32); if (unlikely(!pool->swork.merkle_bin[i])) quit(1, "Failed to malloc pool swork merkle_bin"); if (opt_protocol) applog(LOG_DEBUG, "merkle %d: %s", i, merkle); ret = hex2bin(pool->swork.merkle_bin[i], merkle, 32); free(merkle); if (unlikely(!ret)) { applog(LOG_ERR, "Failed to convert merkle to merkle_bin in parse_notify"); goto out_unlock; } } } pool->merkles = merkles; if (clean) pool->nonce2 = 0; #if 0 header_len = strlen(pool->bbversion) + strlen(pool->prev_hash); /* merkle_hash */ 32 + strlen(pool->ntime) + strlen(pool->nbit) + /* nonce */ 8 + /* workpadding */ 96; #endif snprintf(header, 225, "%s%s%s%s%s%s%s", pool->bbversion, pool->prev_hash, blank_merkle, pool->ntime, pool->nbit, "00000000", /* nonce */ workpadding); ret = hex2bin(pool->header_bin, header, 112); if (unlikely(!ret)) { applog(LOG_ERR, "Failed to convert header to header_bin in parse_notify"); goto out_unlock; } cb1 = alloca(cb1_len); ret = hex2bin(cb1, coinbase1, cb1_len); if (unlikely(!ret)) { applog(LOG_ERR, "Failed to convert cb1 to cb1_bin in parse_notify"); goto out_unlock; } cb2 = alloca(cb2_len); ret = hex2bin(cb2, coinbase2, cb2_len); if (unlikely(!ret)) { applog(LOG_ERR, "Failed to convert cb2 to cb2_bin in parse_notify"); goto out_unlock; } free(pool->coinbase); align_len(&alloc_len); pool->coinbase = calloc(alloc_len, 1); if (unlikely(!pool->coinbase)) quit(1, "Failed to calloc pool coinbase in parse_notify"); memcpy(pool->coinbase, cb1, cb1_len); memcpy(pool->coinbase + cb1_len, pool->nonce1bin, pool->n1_len); memcpy(pool->coinbase + cb1_len + pool->n1_len + pool->n2size, cb2, cb2_len); if (opt_debug) { char *cb = bin2hex(pool->coinbase, pool->coinbase_len); applog(LOG_DEBUG, "Pool %d coinbase %s", pool->pool_no, cb); free(cb); } out_unlock: cg_wunlock(&pool->data_lock); if (opt_protocol) { applog(LOG_DEBUG, "job_id: %s", job_id); applog(LOG_DEBUG, "prev_hash: %s", prev_hash); applog(LOG_DEBUG, "coinbase1: %s", coinbase1); applog(LOG_DEBUG, "coinbase2: %s", coinbase2); applog(LOG_DEBUG, "bbversion: %s", bbversion); applog(LOG_DEBUG, "nbit: %s", nbit); applog(LOG_DEBUG, "ntime: %s", ntime); applog(LOG_DEBUG, "clean: %s", clean ? "yes" : "no"); } free(coinbase1); free(coinbase2); /* A notify message is the closest stratum gets to a getwork */ pool->getwork_requested++; total_getworks++; if (pool == current_pool()) opt_work_update = true; out: return ret; } static bool parse_diff(struct pool *pool, json_t *val) { double old_diff, diff; diff = json_number_value(json_array_get(val, 0)); if (diff == 0) return false; cg_wlock(&pool->data_lock); old_diff = pool->sdiff; pool->sdiff = diff; cg_wunlock(&pool->data_lock); if (old_diff != diff) { int idiff = diff; if ((double)idiff == diff) applog(LOG_NOTICE, "Pool %d difficulty changed to %d", pool->pool_no, idiff); else applog(LOG_NOTICE, "Pool %d difficulty changed to %.1f", pool->pool_no, diff); } else applog(LOG_DEBUG, "Pool %d difficulty set to %f", pool->pool_no, diff); return true; } static void __suspend_stratum(struct pool *pool) { clear_sockbuf(pool); pool->stratum_active = pool->stratum_notify = false; if (pool->sock) CLOSESOCKET(pool->sock); pool->sock = 0; } static bool parse_reconnect(struct pool *pool, json_t *val) { char *sockaddr_url, *stratum_port, *tmp; char *url, *port, address[256]; memset(address, 0, 255); url = (char *)json_string_value(json_array_get(val, 0)); if (!url) url = pool->sockaddr_url; else { char *dot_pool, *dot_reconnect; dot_pool = strchr(pool->sockaddr_url, '.'); if (!dot_pool) { applog(LOG_ERR, "Denied stratum reconnect request for pool without domain '%s'", pool->sockaddr_url); return false; } dot_reconnect = strchr(url, '.'); if (!dot_reconnect) { applog(LOG_ERR, "Denied stratum reconnect request to url without domain '%s'", url); return false; } if (strcmp(dot_pool, dot_reconnect)) { applog(LOG_ERR, "Denied stratum reconnect request to non-matching domain url '%s'", pool->sockaddr_url); return false; } } port = (char *)json_string_value(json_array_get(val, 1)); if (!port) port = pool->stratum_port; sprintf(address, "%s:%s", url, port); if (!extract_sockaddr(address, &sockaddr_url, &stratum_port)) return false; applog(LOG_WARNING, "Stratum reconnect requested from pool %d to %s", pool->pool_no, address); clear_pool_work(pool); mutex_lock(&pool->stratum_lock); __suspend_stratum(pool); tmp = pool->sockaddr_url; pool->sockaddr_url = sockaddr_url; pool->stratum_url = pool->sockaddr_url; free(tmp); tmp = pool->stratum_port; pool->stratum_port = stratum_port; free(tmp); mutex_unlock(&pool->stratum_lock); if (!restart_stratum(pool)) { pool_failed(pool); return false; } return true; } static bool send_version(struct pool *pool, json_t *val) { char s[RBUFSIZE]; int id = json_integer_value(json_object_get(val, "id")); if (!id) return false; sprintf(s, "{\"id\": %d, \"result\": \""PACKAGE"/"VERSION"\", \"error\": null}", id); if (!stratum_send(pool, s, strlen(s))) return false; return true; } static bool show_message(struct pool *pool, json_t *val) { char *msg; if (!json_is_array(val)) return false; msg = (char *)json_string_value(json_array_get(val, 0)); if (!msg) return false; applog(LOG_NOTICE, "Pool %d message: %s", pool->pool_no, msg); return true; } bool parse_method(struct pool *pool, char *s) { json_t *val = NULL, *method, *err_val, *params; json_error_t err; bool ret = false; char *buf; if (!s) goto out; val = JSON_LOADS(s, &err); if (!val) { applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text); goto out; } method = json_object_get(val, "method"); if (!method) goto out_decref; err_val = json_object_get(val, "error"); params = json_object_get(val, "params"); if (err_val && !json_is_null(err_val)) { char *ss; if (err_val) ss = json_dumps(err_val, JSON_INDENT(3)); else ss = strdup("(unknown reason)"); applog(LOG_INFO, "JSON-RPC method decode failed: %s", ss); free(ss); goto out_decref; } buf = (char *)json_string_value(method); if (!buf) goto out_decref; if (!strncasecmp(buf, "mining.notify", 13)) { if (parse_notify(pool, params)) pool->stratum_notify = ret = true; else pool->stratum_notify = ret = false; goto out_decref; } if (!strncasecmp(buf, "mining.set_difficulty", 21)) { ret = parse_diff(pool, params); goto out_decref; } if (!strncasecmp(buf, "client.reconnect", 16)) { ret = parse_reconnect(pool, params); goto out_decref; } if (!strncasecmp(buf, "client.get_version", 18)) { ret = send_version(pool, val); goto out_decref; } if (!strncasecmp(buf, "client.show_message", 19)) { ret = show_message(pool, params); goto out_decref; } out_decref: json_decref(val); out: return ret; } bool auth_stratum(struct pool *pool) { json_t *val = NULL, *res_val, *err_val; char s[RBUFSIZE], *sret = NULL; json_error_t err; bool ret = false; sprintf(s, "{\"id\": %d, \"method\": \"mining.authorize\", \"params\": [\"%s\", \"%s\"]}", swork_id++, pool->rpc_user, pool->rpc_pass); if (!stratum_send(pool, s, strlen(s))) return ret; /* Parse all data in the queue and anything left should be auth */ while (42) { sret = recv_line(pool); if (!sret) return ret; if (parse_method(pool, sret)) free(sret); else break; } val = JSON_LOADS(sret, &err); free(sret); res_val = json_object_get(val, "result"); err_val = json_object_get(val, "error"); if (!res_val || json_is_false(res_val) || (err_val && !json_is_null(err_val))) { char *ss; if (err_val) ss = json_dumps(err_val, JSON_INDENT(3)); else ss = strdup("(unknown reason)"); applog(LOG_INFO, "pool %d JSON stratum auth failed: %s", pool->pool_no, ss); free(ss); suspend_stratum(pool); goto out; } ret = true; applog(LOG_INFO, "Stratum authorisation success for pool %d", pool->pool_no); pool->probed = true; successful_connect = true; out: json_decref(val); return ret; } static int recv_byte(int sockd) { char c; if (recv(sockd, &c, 1, 0) != -1) return c; return -1; } static bool http_negotiate(struct pool *pool, int sockd, bool http0) { char buf[1024]; int i, len; if (http0) { snprintf(buf, 1024, "CONNECT %s:%s HTTP/1.0\r\n\r\n", pool->sockaddr_url, pool->stratum_port); } else { snprintf(buf, 1024, "CONNECT %s:%s HTTP/1.1\r\nHost: %s:%s\r\n\r\n", pool->sockaddr_url, pool->stratum_port, pool->sockaddr_url, pool->stratum_port); } applog(LOG_DEBUG, "Sending proxy %s:%s - %s", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port, buf); send(sockd, buf, strlen(buf), 0); len = recv(sockd, buf, 12, 0); if (len <= 0) { applog(LOG_WARNING, "Couldn't read from proxy %s:%s after sending CONNECT", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); return false; } buf[len] = '\0'; applog(LOG_DEBUG, "Received from proxy %s:%s - %s", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port, buf); if (strcmp(buf, "HTTP/1.1 200") && strcmp(buf, "HTTP/1.0 200")) { applog(LOG_WARNING, "HTTP Error from proxy %s:%s - %s", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port, buf); return false; } /* Ignore unwanted headers till we get desired response */ for (i = 0; i < 4; i++) { buf[i] = recv_byte(sockd); if (buf[i] == (char)-1) { applog(LOG_WARNING, "Couldn't read HTTP byte from proxy %s:%s", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); return false; } } while (strncmp(buf, "\r\n\r\n", 4)) { for (i = 0; i < 3; i++) buf[i] = buf[i + 1]; buf[3] = recv_byte(sockd); if (buf[3] == (char)-1) { applog(LOG_WARNING, "Couldn't read HTTP byte from proxy %s:%s", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); return false; } } applog(LOG_DEBUG, "Success negotiating with %s:%s HTTP proxy", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); return true; } static bool socks5_negotiate(struct pool *pool, int sockd) { unsigned char atyp, uclen; unsigned short port; char buf[515]; int i, len; buf[0] = 0x05; buf[1] = 0x01; buf[2] = 0x00; applog(LOG_DEBUG, "Attempting to negotiate with %s:%s SOCKS5 proxy", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port ); send(sockd, buf, 3, 0); if (recv_byte(sockd) != 0x05 || recv_byte(sockd) != buf[2]) { applog(LOG_WARNING, "Bad response from %s:%s SOCKS5 server", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port ); return false; } buf[0] = 0x05; buf[1] = 0x01; buf[2] = 0x00; buf[3] = 0x03; len = (strlen(pool->sockaddr_url)); if (len > 255) len = 255; uclen = len; buf[4] = (uclen & 0xff); memcpy(buf + 5, pool->sockaddr_url, len); port = atoi(pool->stratum_port); buf[5 + len] = (port >> 8); buf[6 + len] = (port & 0xff); send(sockd, buf, (7 + len), 0); if (recv_byte(sockd) != 0x05 || recv_byte(sockd) != 0x00) { applog(LOG_WARNING, "Bad response from %s:%s SOCKS5 server", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port ); return false; } recv_byte(sockd); atyp = recv_byte(sockd); if (atyp == 0x01) { for (i = 0; i < 4; i++) recv_byte(sockd); } else if (atyp == 0x03) { len = recv_byte(sockd); for (i = 0; i < len; i++) recv_byte(sockd); } else { applog(LOG_WARNING, "Bad response from %s:%s SOCKS5 server", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port ); return false; } for (i = 0; i < 2; i++) recv_byte(sockd); applog(LOG_DEBUG, "Success negotiating with %s:%s SOCKS5 proxy", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); return true; } static bool socks4_negotiate(struct pool *pool, int sockd, bool socks4a) { unsigned short port; in_addr_t inp; char buf[515]; int i, len; buf[0] = 0x04; buf[1] = 0x01; port = atoi(pool->stratum_port); buf[2] = port >> 8; buf[3] = port & 0xff; sprintf(&buf[8], "CGMINER"); /* See if we've been given an IP address directly to avoid needing to * resolve it. */ inp = inet_addr(pool->sockaddr_url); inp = ntohl(inp); if ((int)inp != -1) socks4a = false; else { /* Try to extract the IP address ourselves first */ struct addrinfo servinfobase, *servinfo, hints; servinfo = &servinfobase; memset(&hints, 0, sizeof(struct addrinfo)); hints.ai_family = AF_INET; /* IPV4 only */ if (!getaddrinfo(pool->sockaddr_url, NULL, &hints, &servinfo)) { struct sockaddr_in *saddr_in = (struct sockaddr_in *)servinfo->ai_addr; inp = ntohl(saddr_in->sin_addr.s_addr); socks4a = false; freeaddrinfo(servinfo); } } if (!socks4a) { if ((int)inp == -1) { applog(LOG_WARNING, "Invalid IP address specified for socks4 proxy: %s", pool->sockaddr_url); return false; } buf[4] = (inp >> 24) & 0xFF; buf[5] = (inp >> 16) & 0xFF; buf[6] = (inp >> 8) & 0xFF; buf[7] = (inp >> 0) & 0xFF; send(sockd, buf, 16, 0); } else { /* This appears to not be working but hopefully most will be * able to resolve IP addresses themselves. */ buf[4] = 0; buf[5] = 0; buf[6] = 0; buf[7] = 1; len = strlen(pool->sockaddr_url); if (len > 255) len = 255; memcpy(&buf[16], pool->sockaddr_url, len); len += 16; buf[len++] = '\0'; send(sockd, buf, len, 0); } if (recv_byte(sockd) != 0x00 || recv_byte(sockd) != 0x5a) { applog(LOG_WARNING, "Bad response from %s:%s SOCKS4 server", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); return false; } for (i = 0; i < 6; i++) recv_byte(sockd); return true; } static void noblock_socket(SOCKETTYPE fd) { #ifndef WIN32 int flags = fcntl(fd, F_GETFL, 0); fcntl(fd, F_SETFL, O_NONBLOCK | flags); #else u_long flags = 1; ioctlsocket(fd, FIONBIO, &flags); #endif } static void block_socket(SOCKETTYPE fd) { #ifndef WIN32 int flags = fcntl(fd, F_GETFL, 0); fcntl(fd, F_SETFL, flags & ~O_NONBLOCK); #else u_long flags = 0; ioctlsocket(fd, FIONBIO, &flags); #endif } static bool sock_connecting(void) { #ifndef WIN32 return errno == EINPROGRESS; #else return WSAGetLastError() == WSAEWOULDBLOCK; #endif } static bool setup_stratum_socket(struct pool *pool) { struct addrinfo servinfobase, *servinfo, *hints, *p; char *sockaddr_url, *sockaddr_port; int sockd; mutex_lock(&pool->stratum_lock); pool->stratum_active = false; if (pool->sock) CLOSESOCKET(pool->sock); pool->sock = 0; mutex_unlock(&pool->stratum_lock); hints = &pool->stratum_hints; memset(hints, 0, sizeof(struct addrinfo)); hints->ai_family = AF_UNSPEC; hints->ai_socktype = SOCK_STREAM; servinfo = &servinfobase; if (!pool->rpc_proxy && opt_socks_proxy) { pool->rpc_proxy = opt_socks_proxy; extract_sockaddr(pool->rpc_proxy, &pool->sockaddr_proxy_url, &pool->sockaddr_proxy_port); pool->rpc_proxytype = PROXY_SOCKS5; } if (pool->rpc_proxy) { sockaddr_url = pool->sockaddr_proxy_url; sockaddr_port = pool->sockaddr_proxy_port; } else { sockaddr_url = pool->sockaddr_url; sockaddr_port = pool->stratum_port; } if (getaddrinfo(sockaddr_url, sockaddr_port, hints, &servinfo) != 0) { if (!pool->probed) { applog(LOG_WARNING, "Failed to resolve (?wrong URL) %s:%s", sockaddr_url, sockaddr_port); pool->probed = true; } else { applog(LOG_INFO, "Failed to getaddrinfo for %s:%s", sockaddr_url, sockaddr_port); } return false; } for (p = servinfo; p != NULL; p = p->ai_next) { sockd = socket(p->ai_family, p->ai_socktype, p->ai_protocol); if (sockd == -1) { applog(LOG_DEBUG, "Failed socket"); continue; } /* Iterate non blocking over entries returned by getaddrinfo * to cope with round robin DNS entries, finding the first one * we can connect to quickly. */ noblock_socket(sockd); if (connect(sockd, p->ai_addr, p->ai_addrlen) == -1) { struct timeval tv_timeout = {1, 0}; int selret; fd_set rw; if (!sock_connecting()) { CLOSESOCKET(sockd); applog(LOG_DEBUG, "Failed sock connect"); continue; } retry: FD_ZERO(&rw); FD_SET(sockd, &rw); selret = select(sockd + 1, NULL, &rw, NULL, &tv_timeout); if (selret > 0 && FD_ISSET(sockd, &rw)) { socklen_t len; int err, n; len = sizeof(err); n = getsockopt(sockd, SOL_SOCKET, SO_ERROR, (void *)&err, &len); if (!n && !err) { applog(LOG_DEBUG, "Succeeded delayed connect"); block_socket(sockd); break; } } if (selret < 0 && interrupted()) goto retry; CLOSESOCKET(sockd); applog(LOG_DEBUG, "Select timeout/failed connect"); continue; } applog(LOG_WARNING, "Succeeded immediate connect"); block_socket(sockd); break; } if (p == NULL) { applog(LOG_INFO, "Failed to connect to stratum on %s:%s", sockaddr_url, sockaddr_port); freeaddrinfo(servinfo); return false; } freeaddrinfo(servinfo); if (pool->rpc_proxy) { switch (pool->rpc_proxytype) { case PROXY_HTTP_1_0: if (!http_negotiate(pool, sockd, true)) return false; break; case PROXY_HTTP: if (!http_negotiate(pool, sockd, false)) return false; break; case PROXY_SOCKS5: case PROXY_SOCKS5H: if (!socks5_negotiate(pool, sockd)) return false; break; case PROXY_SOCKS4: if (!socks4_negotiate(pool, sockd, false)) return false; break; case PROXY_SOCKS4A: if (!socks4_negotiate(pool, sockd, true)) return false; break; default: applog(LOG_WARNING, "Unsupported proxy type for %s:%s", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); return false; break; } } if (!pool->sockbuf) { pool->sockbuf = calloc(RBUFSIZE, 1); if (!pool->sockbuf) quithere(1, "Failed to calloc pool sockbuf"); pool->sockbuf_size = RBUFSIZE; } pool->sock = sockd; keep_sockalive(sockd); return true; } static char *get_sessionid(json_t *val) { char *ret = NULL; json_t *arr_val; int arrsize, i; arr_val = json_array_get(val, 0); if (!arr_val || !json_is_array(arr_val)) goto out; arrsize = json_array_size(arr_val); for (i = 0; i < arrsize; i++) { json_t *arr = json_array_get(arr_val, i); char *notify; if (!arr | !json_is_array(arr)) break; notify = __json_array_string(arr, 0); if (!notify) continue; if (!strncasecmp(notify, "mining.notify", 13)) { ret = json_array_string(arr, 1); break; } } out: return ret; } void suspend_stratum(struct pool *pool) { applog(LOG_INFO, "Closing socket for stratum pool %d", pool->pool_no); mutex_lock(&pool->stratum_lock); __suspend_stratum(pool); mutex_unlock(&pool->stratum_lock); } bool initiate_stratum(struct pool *pool) { bool ret = false, recvd = false, noresume = false, sockd = false; char s[RBUFSIZE], *sret = NULL, *nonce1, *sessionid; json_t *val = NULL, *res_val, *err_val; json_error_t err; int n2size; resend: if (!setup_stratum_socket(pool)) { sockd = false; goto out; } sockd = true; if (recvd) { /* Get rid of any crap lying around if we're resending */ clear_sock(pool); sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": []}", swork_id++); } else { if (pool->sessionid) sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": [\""PACKAGE"/"VERSION"\", \"%s\"]}", swork_id++, pool->sessionid); else sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": [\""PACKAGE"/"VERSION"\"]}", swork_id++); } if (__stratum_send(pool, s, strlen(s)) != SEND_OK) { applog(LOG_DEBUG, "Failed to send s in initiate_stratum"); goto out; } if (!socket_full(pool, DEFAULT_SOCKWAIT)) { applog(LOG_DEBUG, "Timed out waiting for response in initiate_stratum"); goto out; } sret = recv_line(pool); if (!sret) goto out; recvd = true; val = JSON_LOADS(sret, &err); free(sret); if (!val) { applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text); goto out; } res_val = json_object_get(val, "result"); err_val = json_object_get(val, "error"); if (!res_val || json_is_null(res_val) || (err_val && !json_is_null(err_val))) { char *ss; if (err_val) ss = json_dumps(err_val, JSON_INDENT(3)); else ss = strdup("(unknown reason)"); applog(LOG_INFO, "JSON-RPC decode failed: %s", ss); free(ss); goto out; } sessionid = get_sessionid(res_val); if (!sessionid) applog(LOG_DEBUG, "Failed to get sessionid in initiate_stratum"); nonce1 = json_array_string(res_val, 1); if (!nonce1) { applog(LOG_INFO, "Failed to get nonce1 in initiate_stratum"); free(sessionid); goto out; } n2size = json_integer_value(json_array_get(res_val, 2)); if (!n2size) { applog(LOG_INFO, "Failed to get n2size in initiate_stratum"); free(sessionid); free(nonce1); goto out; } cg_wlock(&pool->data_lock); pool->sessionid = sessionid; pool->nonce1 = nonce1; pool->n1_len = strlen(nonce1) / 2; free(pool->nonce1bin); pool->nonce1bin = calloc(pool->n1_len, 1); if (unlikely(!pool->nonce1bin)) quithere(1, "Failed to calloc pool->nonce1bin"); hex2bin(pool->nonce1bin, pool->nonce1, pool->n1_len); pool->n2size = n2size; cg_wunlock(&pool->data_lock); if (sessionid) applog(LOG_DEBUG, "Pool %d stratum session id: %s", pool->pool_no, pool->sessionid); ret = true; out: if (ret) { if (!pool->stratum_url) pool->stratum_url = pool->sockaddr_url; pool->stratum_active = true; pool->sdiff = 1; if (opt_protocol) { applog(LOG_DEBUG, "Pool %d confirmed mining.subscribe with extranonce1 %s extran2size %d", pool->pool_no, pool->nonce1, pool->n2size); } } else { if (recvd && !noresume) { /* Reset the sessionid used for stratum resuming in case the pool * does not support it, or does not know how to respond to the * presence of the sessionid parameter. */ cg_wlock(&pool->data_lock); free(pool->sessionid); free(pool->nonce1); pool->sessionid = pool->nonce1 = NULL; cg_wunlock(&pool->data_lock); applog(LOG_DEBUG, "Failed to resume stratum, trying afresh"); noresume = true; json_decref(val); goto resend; } applog(LOG_DEBUG, "Initiate stratum failed"); if (sockd) suspend_stratum(pool); } json_decref(val); return ret; } bool restart_stratum(struct pool *pool) { if (pool->stratum_active) suspend_stratum(pool); if (!initiate_stratum(pool)) return false; if (!auth_stratum(pool)) return false; return true; } void dev_error(struct cgpu_info *dev, enum dev_reason reason) { dev->device_last_not_well = time(NULL); dev->device_not_well_reason = reason; switch (reason) { case REASON_THREAD_FAIL_INIT: dev->thread_fail_init_count++; break; case REASON_THREAD_ZERO_HASH: dev->thread_zero_hash_count++; break; case REASON_THREAD_FAIL_QUEUE: dev->thread_fail_queue_count++; break; case REASON_DEV_SICK_IDLE_60: dev->dev_sick_idle_60_count++; break; case REASON_DEV_DEAD_IDLE_600: dev->dev_dead_idle_600_count++; break; case REASON_DEV_NOSTART: dev->dev_nostart_count++; break; case REASON_DEV_OVER_HEAT: dev->dev_over_heat_count++; break; case REASON_DEV_THERMAL_CUTOFF: dev->dev_thermal_cutoff_count++; break; case REASON_DEV_COMMS_ERROR: dev->dev_comms_error_count++; break; case REASON_DEV_THROTTLE: dev->dev_throttle_count++; break; } } /* Realloc an existing string to fit an extra string s, appending s to it. */ void *realloc_strcat(char *ptr, char *s) { size_t old = 0, len = strlen(s); char *ret; if (!len) return ptr; if (ptr) old = strlen(ptr); len += old + 1; align_len(&len); ret = malloc(len); if (unlikely(!ret)) quithere(1, "Failed to malloc"); if (ptr) { sprintf(ret, "%s%s", ptr, s); free(ptr); } else sprintf(ret, "%s", s); return ret; } /* Make a text readable version of a string using 0xNN for < ' ' or > '~' * Including 0x00 at the end * You must free the result yourself */ void *str_text(char *ptr) { unsigned char *uptr; char *ret, *txt; if (ptr == NULL) { ret = strdup("(null)"); if (unlikely(!ret)) quithere(1, "Failed to malloc null"); } uptr = (unsigned char *)ptr; ret = txt = malloc(strlen(ptr)*4+5); // Guaranteed >= needed if (unlikely(!txt)) quithere(1, "Failed to malloc txt"); do { if (*uptr < ' ' || *uptr > '~') { sprintf(txt, "0x%02x", *uptr); txt += 4; } else *(txt++) = *uptr; } while (*(uptr++)); *txt = '\0'; return ret; } void RenameThread(const char* name) { char buf[16]; snprintf(buf, sizeof(buf), "cg@%s", name); #if defined(PR_SET_NAME) // Only the first 15 characters are used (16 - NUL terminator) prctl(PR_SET_NAME, buf, 0, 0, 0); #elif (defined(__FreeBSD__) || defined(__OpenBSD__)) pthread_set_name_np(pthread_self(), buf); #elif defined(MAC_OSX) pthread_setname_np(buf); #else // Prevent warnings (void)buf; #endif } /* cgminer specific wrappers for true unnamed semaphore usage on platforms * that support them and for apple which does not. We use a single byte across * a pipe to emulate semaphore behaviour there. */ #ifdef __APPLE__ void _cgsem_init(cgsem_t *cgsem, const char *file, const char *func, const int line) { int flags, fd, i; if (pipe(cgsem->pipefd) == -1) quitfrom(1, file, func, line, "Failed pipe errno=%d", errno); /* Make the pipes FD_CLOEXEC to allow them to close should we call * execv on restart. */ for (i = 0; i < 2; i++) { fd = cgsem->pipefd[i]; flags = fcntl(fd, F_GETFD, 0); flags |= FD_CLOEXEC; if (fcntl(fd, F_SETFD, flags) == -1) quitfrom(1, file, func, line, "Failed to fcntl errno=%d", errno); } } void _cgsem_post(cgsem_t *cgsem, const char *file, const char *func, const int line) { const char buf = 1; int ret; retry: ret = write(cgsem->pipefd[1], &buf, 1); if (unlikely(ret == 0)) applog(LOG_WARNING, "Failed to write errno=%d" IN_FMT_FFL, errno, file, func, line); else if (unlikely(ret < 0 && interrupted)) goto retry; } void _cgsem_wait(cgsem_t *cgsem, const char *file, const char *func, const int line) { char buf; int ret; retry: ret = read(cgsem->pipefd[0], &buf, 1); if (unlikely(ret == 0)) applog(LOG_WARNING, "Failed to read errno=%d" IN_FMT_FFL, errno, file, func, line); else if (unlikely(ret < 0 && interrupted)) goto retry; } void cgsem_destroy(cgsem_t *cgsem) { close(cgsem->pipefd[1]); close(cgsem->pipefd[0]); } /* This is similar to sem_timedwait but takes a millisecond value */ int _cgsem_mswait(cgsem_t *cgsem, int ms, const char *file, const char *func, const int line) { struct timeval timeout; int ret, fd; fd_set rd; char buf; retry: fd = cgsem->pipefd[0]; FD_ZERO(&rd); FD_SET(fd, &rd); ms_to_timeval(&timeout, ms); ret = select(fd + 1, &rd, NULL, NULL, &timeout); if (ret > 0) { ret = read(fd, &buf, 1); return 0; } if (likely(!ret)) return ETIMEDOUT; if (interrupted()) goto retry; quitfrom(1, file, func, line, "Failed to sem_timedwait errno=%d cgsem=0x%p", errno, cgsem); /* We don't reach here */ return 0; } /* Reset semaphore count back to zero */ void cgsem_reset(cgsem_t *cgsem) { int ret, fd; fd_set rd; char buf; fd = cgsem->pipefd[0]; FD_ZERO(&rd); FD_SET(fd, &rd); do { struct timeval timeout = {0, 0}; ret = select(fd + 1, &rd, NULL, NULL, &timeout); if (ret > 0) ret = read(fd, &buf, 1); else if (unlikely(ret < 0 && interrupted())) ret = 1; } while (ret > 0); } #else void _cgsem_init(cgsem_t *cgsem, const char *file, const char *func, const int line) { int ret; if ((ret = sem_init(cgsem, 0, 0))) quitfrom(1, file, func, line, "Failed to sem_init ret=%d errno=%d", ret, errno); } void _cgsem_post(cgsem_t *cgsem, const char *file, const char *func, const int line) { if (unlikely(sem_post(cgsem))) quitfrom(1, file, func, line, "Failed to sem_post errno=%d cgsem=0x%p", errno, cgsem); } void _cgsem_wait(cgsem_t *cgsem, const char *file, const char *func, const int line) { retry: if (unlikely(sem_wait(cgsem))) { if (interrupted()) goto retry; quitfrom(1, file, func, line, "Failed to sem_wait errno=%d cgsem=0x%p", errno, cgsem); } } int _cgsem_mswait(cgsem_t *cgsem, int ms, const char *file, const char *func, const int line) { struct timespec abs_timeout, ts_now; struct timeval tv_now; int ret; cgtime(&tv_now); timeval_to_spec(&ts_now, &tv_now); ms_to_timespec(&abs_timeout, ms); retry: timeraddspec(&abs_timeout, &ts_now); ret = sem_timedwait(cgsem, &abs_timeout); if (ret) { if (likely(sock_timeout())) return ETIMEDOUT; if (interrupted()) goto retry; quitfrom(1, file, func, line, "Failed to sem_timedwait errno=%d cgsem=0x%p", errno, cgsem); } return 0; } void cgsem_reset(cgsem_t *cgsem) { int ret; do { ret = sem_trywait(cgsem); if (unlikely(ret < 0 && interrupted())) ret = 0; } while (!ret); } void cgsem_destroy(cgsem_t *cgsem) { sem_destroy(cgsem); } #endif /* Provide a completion_timeout helper function for unreliable functions that * may die due to driver issues etc that time out if the function fails and * can then reliably return. */ struct cg_completion { cgsem_t cgsem; void (*fn)(void *fnarg); void *fnarg; }; void *completion_thread(void *arg) { struct cg_completion *cgc = (struct cg_completion *)arg; pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); cgc->fn(cgc->fnarg); cgsem_post(&cgc->cgsem); return NULL; } bool cg_completion_timeout(void *fn, void *fnarg, int timeout) { struct cg_completion *cgc; pthread_t pthread; bool ret = false; cgc = malloc(sizeof(struct cg_completion)); if (unlikely(!cgc)) return ret; cgsem_init(&cgc->cgsem); cgc->fn = fn; cgc->fnarg = fnarg; pthread_create(&pthread, NULL, completion_thread, (void *)cgc); ret = cgsem_mswait(&cgc->cgsem, timeout); if (!ret) { pthread_join(pthread, NULL); free(cgc); } else pthread_cancel(pthread); return !ret; } void _cg_memcpy(void *dest, const void *src, unsigned int n, const char *file, const char *func, const int line) { if (unlikely(n < 1 || n > (1ul << 31))) { applog(LOG_ERR, "ERR: Asked to memcpy %u bytes from %s %s():%d", n, file, func, line); return; } memcpy(dest, src, n); }
/* * Copyright 2011-2014 Con Kolivas * Copyright 2010 Jeff Garzik * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include <stdio.h> #include <stdlib.h> #include <ctype.h> #include <stdarg.h> #include <string.h> #include <jansson.h> #ifdef HAVE_LIBCURL #include <curl/curl.h> #endif #include <time.h> #include <errno.h> #include <unistd.h> #include <sys/types.h> #ifndef WIN32 #include <fcntl.h> # ifdef __linux # include <sys/prctl.h> # endif # include <sys/socket.h> # include <netinet/in.h> # include <netinet/tcp.h> # include <netdb.h> #else # include <winsock2.h> # include <ws2tcpip.h> # include <mmsystem.h> #endif #include "miner.h" #include "elist.h" #include "compat.h" #include "util.h" #define DEFAULT_SOCKWAIT 60 bool successful_connect = false; static void keep_sockalive(SOCKETTYPE fd) { const int tcp_one = 1; #ifndef WIN32 const int tcp_keepidle = 45; const int tcp_keepintvl = 30; int flags = fcntl(fd, F_GETFL, 0); fcntl(fd, F_SETFL, O_NONBLOCK | flags); #else u_long flags = 1; ioctlsocket(fd, FIONBIO, &flags); #endif setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (const void *)&tcp_one, sizeof(tcp_one)); if (!opt_delaynet) #ifndef __linux setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (const void *)&tcp_one, sizeof(tcp_one)); #else /* __linux */ setsockopt(fd, SOL_TCP, TCP_NODELAY, (const void *)&tcp_one, sizeof(tcp_one)); setsockopt(fd, SOL_TCP, TCP_KEEPCNT, &tcp_one, sizeof(tcp_one)); setsockopt(fd, SOL_TCP, TCP_KEEPIDLE, &tcp_keepidle, sizeof(tcp_keepidle)); setsockopt(fd, SOL_TCP, TCP_KEEPINTVL, &tcp_keepintvl, sizeof(tcp_keepintvl)); #endif /* __linux */ #ifdef __APPLE_CC__ setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &tcp_keepintvl, sizeof(tcp_keepintvl)); #endif /* __APPLE_CC__ */ } struct tq_ent { void *data; struct list_head q_node; }; #ifdef HAVE_LIBCURL struct timeval nettime; struct data_buffer { void *buf; size_t len; }; struct upload_buffer { const void *buf; size_t len; }; struct header_info { char *lp_path; int rolltime; char *reason; char *stratum_url; bool hadrolltime; bool canroll; bool hadexpire; }; static void databuf_free(struct data_buffer *db) { if (!db) return; free(db->buf); memset(db, 0, sizeof(*db)); } static size_t all_data_cb(const void *ptr, size_t size, size_t nmemb, void *user_data) { struct data_buffer *db = user_data; size_t len = size * nmemb; size_t oldlen, newlen; void *newmem; static const unsigned char zero = 0; oldlen = db->len; newlen = oldlen + len; newmem = realloc(db->buf, newlen + 1); if (!newmem) return 0; db->buf = newmem; db->len = newlen; memcpy(db->buf + oldlen, ptr, len); memcpy(db->buf + newlen, &zero, 1); /* null terminate */ return len; } static size_t upload_data_cb(void *ptr, size_t size, size_t nmemb, void *user_data) { struct upload_buffer *ub = user_data; unsigned int len = size * nmemb; if (len > ub->len) len = ub->len; if (len) { memcpy(ptr, ub->buf, len); ub->buf += len; ub->len -= len; } return len; } static size_t resp_hdr_cb(void *ptr, size_t size, size_t nmemb, void *user_data) { struct header_info *hi = user_data; size_t remlen, slen, ptrlen = size * nmemb; char *rem, *val = NULL, *key = NULL; void *tmp; val = calloc(1, ptrlen); key = calloc(1, ptrlen); if (!key || !val) goto out; tmp = memchr(ptr, ':', ptrlen); if (!tmp || (tmp == ptr)) /* skip empty keys / blanks */ goto out; slen = tmp - ptr; if ((slen + 1) == ptrlen) /* skip key w/ no value */ goto out; memcpy(key, ptr, slen); /* store & nul term key */ key[slen] = 0; rem = ptr + slen + 1; /* trim value's leading whitespace */ remlen = ptrlen - slen - 1; while ((remlen > 0) && (isspace(*rem))) { remlen--; rem++; } memcpy(val, rem, remlen); /* store value, trim trailing ws */ val[remlen] = 0; while ((*val) && (isspace(val[strlen(val) - 1]))) val[strlen(val) - 1] = 0; if (!*val) /* skip blank value */ goto out; if (opt_protocol) applog(LOG_DEBUG, "HTTP hdr(%s): %s", key, val); if (!strcasecmp("X-Roll-Ntime", key)) { hi->hadrolltime = true; if (!strncasecmp("N", val, 1)) applog(LOG_DEBUG, "X-Roll-Ntime: N found"); else { hi->canroll = true; /* Check to see if expire= is supported and if not, set * the rolltime to the default scantime */ if (strlen(val) > 7 && !strncasecmp("expire=", val, 7)) { sscanf(val + 7, "%d", &hi->rolltime); hi->hadexpire = true; } else hi->rolltime = opt_scantime; applog(LOG_DEBUG, "X-Roll-Ntime expiry set to %d", hi->rolltime); } } if (!strcasecmp("X-Long-Polling", key)) { hi->lp_path = val; /* steal memory reference */ val = NULL; } if (!strcasecmp("X-Reject-Reason", key)) { hi->reason = val; /* steal memory reference */ val = NULL; } if (!strcasecmp("X-Stratum", key)) { hi->stratum_url = val; val = NULL; } out: free(key); free(val); return ptrlen; } static void last_nettime(struct timeval *last) { rd_lock(&netacc_lock); last->tv_sec = nettime.tv_sec; last->tv_usec = nettime.tv_usec; rd_unlock(&netacc_lock); } static void set_nettime(void) { wr_lock(&netacc_lock); cgtime(&nettime); wr_unlock(&netacc_lock); } #if CURL_HAS_KEEPALIVE static void keep_curlalive(CURL *curl) { const int tcp_keepidle = 45; const int tcp_keepintvl = 30; const long int keepalive = 1; curl_easy_setopt(curl, CURLOPT_TCP_KEEPALIVE, keepalive); curl_easy_setopt(curl, CURLOPT_TCP_KEEPIDLE, tcp_keepidle); curl_easy_setopt(curl, CURLOPT_TCP_KEEPINTVL, tcp_keepintvl); } #else static void keep_curlalive(CURL *curl) { SOCKETTYPE sock; curl_easy_getinfo(curl, CURLINFO_LASTSOCKET, (long *)&sock); keep_sockalive(sock); } #endif static int curl_debug_cb(__maybe_unused CURL *handle, curl_infotype type, __maybe_unused char *data, size_t size, void *userdata) { struct pool *pool = (struct pool *)userdata; switch(type) { case CURLINFO_HEADER_IN: case CURLINFO_DATA_IN: case CURLINFO_SSL_DATA_IN: pool->cgminer_pool_stats.net_bytes_received += size; break; case CURLINFO_HEADER_OUT: case CURLINFO_DATA_OUT: case CURLINFO_SSL_DATA_OUT: pool->cgminer_pool_stats.net_bytes_sent += size; break; case CURLINFO_TEXT: default: break; } return 0; } json_t *json_web_config(const char *url) { struct data_buffer all_data = {NULL, 0}; char curl_err_str[CURL_ERROR_SIZE]; long timeout = 60; json_error_t err; json_t *val; CURL *curl; int rc; memset(&err, 0, sizeof(err)); curl = curl_easy_init(); if (unlikely(!curl)) quithere(1, "CURL initialisation failed"); curl_easy_setopt(curl, CURLOPT_TIMEOUT, timeout); curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1); curl_easy_setopt(curl, CURLOPT_URL, url); curl_easy_setopt(curl, CURLOPT_ENCODING, ""); curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1); curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, all_data_cb); curl_easy_setopt(curl, CURLOPT_WRITEDATA, &all_data); curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, curl_err_str); curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1); curl_easy_setopt(curl, CURLOPT_USE_SSL, CURLUSESSL_TRY); val = NULL; rc = curl_easy_perform(curl); curl_easy_cleanup(curl); if (rc) { applog(LOG_ERR, "HTTP config request of '%s' failed: %s", url, curl_err_str); goto c_out; } if (!all_data.buf) { applog(LOG_ERR, "Empty config data received from '%s'", url); goto c_out; } val = JSON_LOADS(all_data.buf, &err); if (!val) { applog(LOG_ERR, "JSON config decode of '%s' failed(%d): %s", url, err.line, err.text); } databuf_free(&all_data); c_out: return val; } json_t *json_rpc_call(CURL *curl, const char *url, const char *userpass, const char *rpc_req, bool probe, bool longpoll, int *rolltime, struct pool *pool, bool share) { long timeout = longpoll ? (60 * 60) : 60; struct data_buffer all_data = {NULL, 0}; struct header_info hi = {NULL, 0, NULL, NULL, false, false, false}; char len_hdr[64], user_agent_hdr[128]; char curl_err_str[CURL_ERROR_SIZE]; struct curl_slist *headers = NULL; struct upload_buffer upload_data; json_t *val, *err_val, *res_val; bool probing = false; double byte_count; json_error_t err; int rc; memset(&err, 0, sizeof(err)); /* it is assumed that 'curl' is freshly [re]initialized at this pt */ if (probe) probing = !pool->probed; curl_easy_setopt(curl, CURLOPT_TIMEOUT, timeout); // CURLOPT_VERBOSE won't write to stderr if we use CURLOPT_DEBUGFUNCTION curl_easy_setopt(curl, CURLOPT_DEBUGFUNCTION, curl_debug_cb); curl_easy_setopt(curl, CURLOPT_DEBUGDATA, (void *)pool); curl_easy_setopt(curl, CURLOPT_VERBOSE, 1); curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1); curl_easy_setopt(curl, CURLOPT_URL, url); curl_easy_setopt(curl, CURLOPT_ENCODING, ""); curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1); /* Shares are staggered already and delays in submission can be costly * so do not delay them */ if (!opt_delaynet || share) curl_easy_setopt(curl, CURLOPT_TCP_NODELAY, 1); curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, all_data_cb); curl_easy_setopt(curl, CURLOPT_WRITEDATA, &all_data); curl_easy_setopt(curl, CURLOPT_READFUNCTION, upload_data_cb); curl_easy_setopt(curl, CURLOPT_READDATA, &upload_data); curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, curl_err_str); curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1); curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, resp_hdr_cb); curl_easy_setopt(curl, CURLOPT_HEADERDATA, &hi); curl_easy_setopt(curl, CURLOPT_USE_SSL, CURLUSESSL_TRY); if (pool->rpc_proxy) { curl_easy_setopt(curl, CURLOPT_PROXY, pool->rpc_proxy); curl_easy_setopt(curl, CURLOPT_PROXYTYPE, pool->rpc_proxytype); } else if (opt_socks_proxy) { curl_easy_setopt(curl, CURLOPT_PROXY, opt_socks_proxy); curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS4); } if (userpass) { curl_easy_setopt(curl, CURLOPT_USERPWD, userpass); curl_easy_setopt(curl, CURLOPT_HTTPAUTH, CURLAUTH_BASIC); } if (longpoll) keep_curlalive(curl); curl_easy_setopt(curl, CURLOPT_POST, 1); if (opt_protocol) applog(LOG_DEBUG, "JSON protocol request:\n%s", rpc_req); upload_data.buf = rpc_req; upload_data.len = strlen(rpc_req); sprintf(len_hdr, "Content-Length: %lu", (unsigned long) upload_data.len); sprintf(user_agent_hdr, "User-Agent: %s", PACKAGE_STRING); headers = curl_slist_append(headers, "Content-type: application/json"); headers = curl_slist_append(headers, "X-Mining-Extensions: longpoll midstate rollntime submitold"); if (likely(global_hashrate)) { char ghashrate[255]; sprintf(ghashrate, "X-Mining-Hashrate: %llu", global_hashrate); headers = curl_slist_append(headers, ghashrate); } headers = curl_slist_append(headers, len_hdr); headers = curl_slist_append(headers, user_agent_hdr); headers = curl_slist_append(headers, "Expect:"); /* disable Expect hdr*/ curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); if (opt_delaynet) { /* Don't delay share submission, but still track the nettime */ if (!share) { long long now_msecs, last_msecs; struct timeval now, last; cgtime(&now); last_nettime(&last); now_msecs = (long long)now.tv_sec * 1000; now_msecs += now.tv_usec / 1000; last_msecs = (long long)last.tv_sec * 1000; last_msecs += last.tv_usec / 1000; if (now_msecs > last_msecs && now_msecs - last_msecs < 250) { struct timespec rgtp; rgtp.tv_sec = 0; rgtp.tv_nsec = (250 - (now_msecs - last_msecs)) * 1000000; nanosleep(&rgtp, NULL); } } set_nettime(); } rc = curl_easy_perform(curl); if (rc) { applog(LOG_INFO, "HTTP request failed: %s", curl_err_str); goto err_out; } if (!all_data.buf) { applog(LOG_DEBUG, "Empty data received in json_rpc_call."); goto err_out; } pool->cgminer_pool_stats.times_sent++; if (curl_easy_getinfo(curl, CURLINFO_SIZE_UPLOAD, &byte_count) == CURLE_OK) pool->cgminer_pool_stats.bytes_sent += byte_count; pool->cgminer_pool_stats.times_received++; if (curl_easy_getinfo(curl, CURLINFO_SIZE_DOWNLOAD, &byte_count) == CURLE_OK) pool->cgminer_pool_stats.bytes_received += byte_count; if (probing) { pool->probed = true; /* If X-Long-Polling was found, activate long polling */ if (hi.lp_path) { if (pool->hdr_path != NULL) free(pool->hdr_path); pool->hdr_path = hi.lp_path; } else pool->hdr_path = NULL; if (hi.stratum_url) { pool->stratum_url = hi.stratum_url; hi.stratum_url = NULL; } } else { if (hi.lp_path) { free(hi.lp_path); hi.lp_path = NULL; } if (hi.stratum_url) { free(hi.stratum_url); hi.stratum_url = NULL; } } *rolltime = hi.rolltime; pool->cgminer_pool_stats.rolltime = hi.rolltime; pool->cgminer_pool_stats.hadrolltime = hi.hadrolltime; pool->cgminer_pool_stats.canroll = hi.canroll; pool->cgminer_pool_stats.hadexpire = hi.hadexpire; val = JSON_LOADS(all_data.buf, &err); if (!val) { applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text); if (opt_protocol) applog(LOG_DEBUG, "JSON protocol response:\n%s", (char *)(all_data.buf)); goto err_out; } if (opt_protocol) { char *s = json_dumps(val, JSON_INDENT(3)); applog(LOG_DEBUG, "JSON protocol response:\n%s", s); free(s); } /* JSON-RPC valid response returns a non-null 'result', * and a null 'error'. */ res_val = json_object_get(val, "result"); err_val = json_object_get(val, "error"); if (!res_val ||(err_val && !json_is_null(err_val))) { char *s; if (err_val) s = json_dumps(err_val, JSON_INDENT(3)); else s = strdup("(unknown reason)"); applog(LOG_INFO, "JSON-RPC call failed: %s", s); free(s); goto err_out; } if (hi.reason) { json_object_set_new(val, "reject-reason", json_string(hi.reason)); free(hi.reason); hi.reason = NULL; } successful_connect = true; databuf_free(&all_data); curl_slist_free_all(headers); curl_easy_reset(curl); return val; err_out: databuf_free(&all_data); curl_slist_free_all(headers); curl_easy_reset(curl); if (!successful_connect) applog(LOG_DEBUG, "Failed to connect in json_rpc_call"); curl_easy_setopt(curl, CURLOPT_FRESH_CONNECT, 1); return NULL; } #define PROXY_HTTP CURLPROXY_HTTP #define PROXY_HTTP_1_0 CURLPROXY_HTTP_1_0 #define PROXY_SOCKS4 CURLPROXY_SOCKS4 #define PROXY_SOCKS5 CURLPROXY_SOCKS5 #define PROXY_SOCKS4A CURLPROXY_SOCKS4A #define PROXY_SOCKS5H CURLPROXY_SOCKS5_HOSTNAME #else /* HAVE_LIBCURL */ #define PROXY_HTTP 0 #define PROXY_HTTP_1_0 1 #define PROXY_SOCKS4 2 #define PROXY_SOCKS5 3 #define PROXY_SOCKS4A 4 #define PROXY_SOCKS5H 5 #endif /* HAVE_LIBCURL */ static struct { const char *name; proxytypes_t proxytype; } proxynames[] = { { "http:", PROXY_HTTP }, { "http0:", PROXY_HTTP_1_0 }, { "socks4:", PROXY_SOCKS4 }, { "socks5:", PROXY_SOCKS5 }, { "socks4a:", PROXY_SOCKS4A }, { "socks5h:", PROXY_SOCKS5H }, { NULL, 0 } }; const char *proxytype(proxytypes_t proxytype) { int i; for (i = 0; proxynames[i].name; i++) if (proxynames[i].proxytype == proxytype) return proxynames[i].name; return "invalid"; } char *get_proxy(char *url, struct pool *pool) { pool->rpc_proxy = NULL; char *split; int plen, len, i; for (i = 0; proxynames[i].name; i++) { plen = strlen(proxynames[i].name); if (strncmp(url, proxynames[i].name, plen) == 0) { if (!(split = strchr(url, '|'))) return url; *split = '\0'; len = split - url; pool->rpc_proxy = malloc(1 + len - plen); if (!(pool->rpc_proxy)) quithere(1, "Failed to malloc rpc_proxy"); strcpy(pool->rpc_proxy, url + plen); extract_sockaddr(pool->rpc_proxy, &pool->sockaddr_proxy_url, &pool->sockaddr_proxy_port); pool->rpc_proxytype = proxynames[i].proxytype; url = split + 1; break; } } return url; } /* Adequate size s==len*2 + 1 must be alloced to use this variant */ void __bin2hex(char *s, const unsigned char *p, size_t len) { int i; static const char hex[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; for (i = 0; i < (int)len; i++) { *s++ = hex[p[i] >> 4]; *s++ = hex[p[i] & 0xF]; } *s++ = '\0'; } /* Returns a malloced array string of a binary value of arbitrary length. The * array is rounded up to a 4 byte size to appease architectures that need * aligned array sizes */ char *bin2hex(const unsigned char *p, size_t len) { ssize_t slen; char *s; slen = len * 2 + 1; if (slen % 4) slen += 4 - (slen % 4); s = calloc(slen, 1); if (unlikely(!s)) quithere(1, "Failed to calloc"); __bin2hex(s, p, len); return s; } static const int hex2bin_tbl[256] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1, -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, }; /* Does the reverse of bin2hex but does not allocate any ram */ bool hex2bin(unsigned char *p, const char *hexstr, size_t len) { int nibble1, nibble2; unsigned char idx; bool ret = false; while (*hexstr && len) { if (unlikely(!hexstr[1])) { applog(LOG_ERR, "hex2bin str truncated"); return ret; } idx = *hexstr++; nibble1 = hex2bin_tbl[idx]; idx = *hexstr++; nibble2 = hex2bin_tbl[idx]; if (unlikely((nibble1 < 0) || (nibble2 < 0))) { applog(LOG_ERR, "hex2bin scan failed"); return ret; } *p++ = (((unsigned char)nibble1) << 4) | ((unsigned char)nibble2); --len; } if (likely(len == 0 && *hexstr == 0)) ret = true; return ret; } static bool _valid_hex(char *s, const char *file, const char *func, const int line) { bool ret = false; int i, len; if (unlikely(!s)) { applog(LOG_ERR, "Null string passed to valid_hex from"IN_FMT_FFL, file, func, line); return ret; } len = strlen(s); if (unlikely(!len)) { applog(LOG_ERR, "Zero length string passed to valid_hex from"IN_FMT_FFL, file, func, line); return ret; } for (i = 0; i < len; i++) { unsigned char idx = s[i]; if (unlikely(hex2bin_tbl[idx] < 0)) { applog(LOG_ERR, "Invalid char %x passed to valid_hex from"IN_FMT_FFL, idx, file, func, line); return ret; } } ret = true; return ret; } #define valid_hex(s) _valid_hex(s, __FILE__, __func__, __LINE__) static const int b58tobin_tbl[] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, -1, -1, -1, -1, 9, 10, 11, 12, 13, 14, 15, 16, -1, 17, 18, 19, 20, 21, -1, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, -1, -1, -1, -1, -1, -1, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, -1, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57 }; /* b58bin should always be at least 25 bytes long and already checked to be * valid. */ void b58tobin(unsigned char *b58bin, const char *b58) { uint32_t c, bin32[7]; int len, i, j; uint64_t t; memset(bin32, 0, 7 * sizeof(uint32_t)); len = strlen(b58); for (i = 0; i < len; i++) { c = b58[i]; c = b58tobin_tbl[c]; for (j = 6; j >= 0; j--) { t = ((uint64_t)bin32[j]) * 58 + c; c = (t & 0x3f00000000ull) >> 32; bin32[j] = t & 0xffffffffull; } } *(b58bin++) = bin32[0] & 0xff; for (i = 1; i < 7; i++) { *((uint32_t *)b58bin) = htobe32(bin32[i]); b58bin += sizeof(uint32_t); } } void address_to_pubkeyhash(unsigned char *pkh, const char *addr) { unsigned char b58bin[25]; memset(b58bin, 0, 25); b58tobin(b58bin, addr); pkh[0] = 0x76; pkh[1] = 0xa9; pkh[2] = 0x14; memcpy(&pkh[3], &b58bin[1], 20); pkh[23] = 0x88; pkh[24] = 0xac; } /* For encoding nHeight into coinbase, return how many bytes were used */ int ser_number(unsigned char *s, int32_t val) { int32_t *i32 = (int32_t *)&s[1]; int len; if (val < 128) len = 1; else if (val < 16512) len = 2; else if (val < 2113664) len = 3; else len = 4; *i32 = htole32(val); s[0] = len++; return len; } /* For encoding variable length strings */ unsigned char *ser_string(char *s, int *slen) { size_t len = strlen(s); unsigned char *ret; ret = malloc(1 + len + 8); // Leave room for largest size if (unlikely(!ret)) quit(1, "Failed to malloc ret in ser_string"); if (len < 253) { ret[0] = len; memcpy(ret + 1, s, len); *slen = len + 1; } else if (len < 0x10000) { uint16_t *u16 = (uint16_t *)&ret[1]; ret[0] = 253; *u16 = htobe16(len); memcpy(ret + 3, s, len); *slen = len + 3; } else { /* size_t is only 32 bit on many platforms anyway */ uint32_t *u32 = (uint32_t *)&ret[1]; ret[0] = 254; *u32 = htobe32(len); memcpy(ret + 5, s, len); *slen = len + 5; } return ret; } bool fulltest(const unsigned char *hash, const unsigned char *target) { uint32_t *hash32 = (uint32_t *)hash; uint32_t *target32 = (uint32_t *)target; bool rc = true; int i; for (i = 28 / 4; i >= 0; i--) { uint32_t h32tmp = le32toh(hash32[i]); uint32_t t32tmp = le32toh(target32[i]); if (h32tmp > t32tmp) { rc = false; break; } if (h32tmp < t32tmp) { rc = true; break; } } if (opt_debug) { unsigned char hash_swap[32], target_swap[32]; char *hash_str, *target_str; swab256(hash_swap, hash); swab256(target_swap, target); hash_str = bin2hex(hash_swap, 32); target_str = bin2hex(target_swap, 32); applog(LOG_DEBUG, " Proof: %s\nTarget: %s\nTrgVal? %s", hash_str, target_str, rc ? "YES (hash <= target)" : "no (false positive; hash > target)"); free(hash_str); free(target_str); } return rc; } struct thread_q *tq_new(void) { struct thread_q *tq; tq = calloc(1, sizeof(*tq)); if (!tq) return NULL; INIT_LIST_HEAD(&tq->q); pthread_mutex_init(&tq->mutex, NULL); pthread_cond_init(&tq->cond, NULL); return tq; } void tq_free(struct thread_q *tq) { struct tq_ent *ent, *iter; if (!tq) return; list_for_each_entry_safe(ent, iter, &tq->q, q_node) { list_del(&ent->q_node); free(ent); } pthread_cond_destroy(&tq->cond); pthread_mutex_destroy(&tq->mutex); memset(tq, 0, sizeof(*tq)); /* poison */ free(tq); } static void tq_freezethaw(struct thread_q *tq, bool frozen) { mutex_lock(&tq->mutex); tq->frozen = frozen; pthread_cond_signal(&tq->cond); mutex_unlock(&tq->mutex); } void tq_freeze(struct thread_q *tq) { tq_freezethaw(tq, true); } void tq_thaw(struct thread_q *tq) { tq_freezethaw(tq, false); } bool tq_push(struct thread_q *tq, void *data) { struct tq_ent *ent; bool rc = true; ent = calloc(1, sizeof(*ent)); if (!ent) return false; ent->data = data; INIT_LIST_HEAD(&ent->q_node); mutex_lock(&tq->mutex); if (!tq->frozen) { list_add_tail(&ent->q_node, &tq->q); } else { free(ent); rc = false; } pthread_cond_signal(&tq->cond); mutex_unlock(&tq->mutex); return rc; } void *tq_pop(struct thread_q *tq, const struct timespec *abstime) { struct tq_ent *ent; void *rval = NULL; int rc; mutex_lock(&tq->mutex); if (!list_empty(&tq->q)) goto pop; if (abstime) rc = pthread_cond_timedwait(&tq->cond, &tq->mutex, abstime); else rc = pthread_cond_wait(&tq->cond, &tq->mutex); if (rc) goto out; if (list_empty(&tq->q)) goto out; pop: ent = list_entry(tq->q.next, struct tq_ent, q_node); rval = ent->data; list_del(&ent->q_node); free(ent); out: mutex_unlock(&tq->mutex); return rval; } int thr_info_create(struct thr_info *thr, pthread_attr_t *attr, void *(*start) (void *), void *arg) { cgsem_init(&thr->sem); return pthread_create(&thr->pth, attr, start, arg); } void thr_info_cancel(struct thr_info *thr) { if (!thr) return; if (PTH(thr) != 0L) { pthread_cancel(thr->pth); PTH(thr) = 0L; } cgsem_destroy(&thr->sem); } void subtime(struct timeval *a, struct timeval *b) { timersub(a, b, b); } void addtime(struct timeval *a, struct timeval *b) { timeradd(a, b, b); } bool time_more(struct timeval *a, struct timeval *b) { return timercmp(a, b, >); } bool time_less(struct timeval *a, struct timeval *b) { return timercmp(a, b, <); } void copy_time(struct timeval *dest, const struct timeval *src) { memcpy(dest, src, sizeof(struct timeval)); } void timespec_to_val(struct timeval *val, const struct timespec *spec) { val->tv_sec = spec->tv_sec; val->tv_usec = spec->tv_nsec / 1000; } void timeval_to_spec(struct timespec *spec, const struct timeval *val) { spec->tv_sec = val->tv_sec; spec->tv_nsec = val->tv_usec * 1000; } void us_to_timeval(struct timeval *val, int64_t us) { lldiv_t tvdiv = lldiv(us, 1000000); val->tv_sec = tvdiv.quot; val->tv_usec = tvdiv.rem; } void us_to_timespec(struct timespec *spec, int64_t us) { lldiv_t tvdiv = lldiv(us, 1000000); spec->tv_sec = tvdiv.quot; spec->tv_nsec = tvdiv.rem * 1000; } void ms_to_timespec(struct timespec *spec, int64_t ms) { lldiv_t tvdiv = lldiv(ms, 1000); spec->tv_sec = tvdiv.quot; spec->tv_nsec = tvdiv.rem * 1000000; } void ms_to_timeval(struct timeval *val, int64_t ms) { lldiv_t tvdiv = lldiv(ms, 1000); val->tv_sec = tvdiv.quot; val->tv_usec = tvdiv.rem * 1000; } void timeraddspec(struct timespec *a, const struct timespec *b) { a->tv_sec += b->tv_sec; a->tv_nsec += b->tv_nsec; if (a->tv_nsec >= 1000000000) { a->tv_nsec -= 1000000000; a->tv_sec++; } } static int __maybe_unused timespec_to_ms(struct timespec *ts) { return ts->tv_sec * 1000 + ts->tv_nsec / 1000000; } /* Subtract b from a */ static void __maybe_unused timersubspec(struct timespec *a, const struct timespec *b) { a->tv_sec -= b->tv_sec; a->tv_nsec -= b->tv_nsec; if (a->tv_nsec < 0) { a->tv_nsec += 1000000000; a->tv_sec--; } } /* These are cgminer specific sleep functions that use an absolute nanosecond * resolution timer to avoid poor usleep accuracy and overruns. */ #ifdef WIN32 /* Windows start time is since 1601 LOL so convert it to unix epoch 1970. */ #define EPOCHFILETIME (116444736000000000LL) /* Return the system time as an lldiv_t in decimicroseconds. */ static void decius_time(lldiv_t *lidiv) { FILETIME ft; LARGE_INTEGER li; GetSystemTimeAsFileTime(&ft); li.LowPart = ft.dwLowDateTime; li.HighPart = ft.dwHighDateTime; li.QuadPart -= EPOCHFILETIME; /* SystemTime is in decimicroseconds so divide by an unusual number */ *lidiv = lldiv(li.QuadPart, 10000000); } /* This is a cgminer gettimeofday wrapper. Since we always call gettimeofday * with tz set to NULL, and windows' default resolution is only 15ms, this * gives us higher resolution times on windows. */ void cgtime(struct timeval *tv) { lldiv_t lidiv; decius_time(&lidiv); tv->tv_sec = lidiv.quot; tv->tv_usec = lidiv.rem / 10; } #else /* WIN32 */ void cgtime(struct timeval *tv) { gettimeofday(tv, NULL); } int cgtimer_to_ms(cgtimer_t *cgt) { return timespec_to_ms(cgt); } /* Subtracts b from a and stores it in res. */ void cgtimer_sub(cgtimer_t *a, cgtimer_t *b, cgtimer_t *res) { res->tv_sec = a->tv_sec - b->tv_sec; res->tv_nsec = a->tv_nsec - b->tv_nsec; if (res->tv_nsec < 0) { res->tv_nsec += 1000000000; res->tv_sec--; } } #endif /* WIN32 */ #ifdef CLOCK_MONOTONIC /* Essentially just linux */ void cgtimer_time(cgtimer_t *ts_start) { clock_gettime(CLOCK_MONOTONIC, ts_start); } static void nanosleep_abstime(struct timespec *ts_end) { int ret; do { ret = clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, ts_end, NULL); } while (ret == EINTR); } /* Reentrant version of cgsleep functions allow start time to be set separately * from the beginning of the actual sleep, allowing scheduling delays to be * counted in the sleep. */ void cgsleep_ms_r(cgtimer_t *ts_start, int ms) { struct timespec ts_end; ms_to_timespec(&ts_end, ms); timeraddspec(&ts_end, ts_start); nanosleep_abstime(&ts_end); } void cgsleep_us_r(cgtimer_t *ts_start, int64_t us) { struct timespec ts_end; us_to_timespec(&ts_end, us); timeraddspec(&ts_end, ts_start); nanosleep_abstime(&ts_end); } #else /* CLOCK_MONOTONIC */ #ifdef __MACH__ #include <mach/clock.h> #include <mach/mach.h> void cgtimer_time(cgtimer_t *ts_start) { clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), SYSTEM_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); ts_start->tv_sec = mts.tv_sec; ts_start->tv_nsec = mts.tv_nsec; } #elif !defined(WIN32) /* __MACH__ - Everything not linux/macosx/win32 */ void cgtimer_time(cgtimer_t *ts_start) { struct timeval tv; cgtime(&tv); ts_start->tv_sec = tv->tv_sec; ts_start->tv_nsec = tv->tv_usec * 1000; } #endif /* __MACH__ */ #ifdef WIN32 /* For windows we use the SystemTime stored as a LARGE_INTEGER as the cgtimer_t * typedef, allowing us to have sub-microsecond resolution for times, do simple * arithmetic for timer calculations, and use windows' own hTimers to get * accurate absolute timeouts. */ int cgtimer_to_ms(cgtimer_t *cgt) { return (int)(cgt->QuadPart / 10000LL); } /* Subtracts b from a and stores it in res. */ void cgtimer_sub(cgtimer_t *a, cgtimer_t *b, cgtimer_t *res) { res->QuadPart = a->QuadPart - b->QuadPart; } /* Note that cgtimer time is NOT offset by the unix epoch since we use absolute * timeouts with hTimers. */ void cgtimer_time(cgtimer_t *ts_start) { FILETIME ft; GetSystemTimeAsFileTime(&ft); ts_start->LowPart = ft.dwLowDateTime; ts_start->HighPart = ft.dwHighDateTime; } static void liSleep(LARGE_INTEGER *li, int timeout) { HANDLE hTimer; DWORD ret; if (unlikely(timeout <= 0)) return; hTimer = CreateWaitableTimer(NULL, TRUE, NULL); if (unlikely(!hTimer)) quit(1, "Failed to create hTimer in liSleep"); ret = SetWaitableTimer(hTimer, li, 0, NULL, NULL, 0); if (unlikely(!ret)) quit(1, "Failed to SetWaitableTimer in liSleep"); /* We still use a timeout as a sanity check in case the system time * is changed while we're running */ ret = WaitForSingleObject(hTimer, timeout); if (unlikely(ret != WAIT_OBJECT_0 && ret != WAIT_TIMEOUT)) quit(1, "Failed to WaitForSingleObject in liSleep"); CloseHandle(hTimer); } void cgsleep_ms_r(cgtimer_t *ts_start, int ms) { LARGE_INTEGER li; li.QuadPart = ts_start->QuadPart + (int64_t)ms * 10000LL; liSleep(&li, ms); } void cgsleep_us_r(cgtimer_t *ts_start, int64_t us) { LARGE_INTEGER li; int ms; li.QuadPart = ts_start->QuadPart + us * 10LL; ms = us / 1000; if (!ms) ms = 1; liSleep(&li, ms); } #else /* WIN32 */ static void cgsleep_spec(struct timespec *ts_diff, const struct timespec *ts_start) { struct timespec now; timeraddspec(ts_diff, ts_start); cgtimer_time(&now); timersubspec(ts_diff, &now); if (unlikely(ts_diff->tv_sec < 0)) return; nanosleep(ts_diff, NULL); } void cgsleep_ms_r(cgtimer_t *ts_start, int ms) { struct timespec ts_diff; ms_to_timespec(&ts_diff, ms); cgsleep_spec(&ts_diff, ts_start); } void cgsleep_us_r(cgtimer_t *ts_start, int64_t us) { struct timespec ts_diff; us_to_timespec(&ts_diff, us); cgsleep_spec(&ts_diff, ts_start); } #endif /* WIN32 */ #endif /* CLOCK_MONOTONIC */ void cgsleep_ms(int ms) { cgtimer_t ts_start; cgsleep_prepare_r(&ts_start); cgsleep_ms_r(&ts_start, ms); } void cgsleep_us(int64_t us) { cgtimer_t ts_start; cgsleep_prepare_r(&ts_start); cgsleep_us_r(&ts_start, us); } /* Returns the microseconds difference between end and start times as a double */ double us_tdiff(struct timeval *end, struct timeval *start) { /* Sanity check. We should only be using this for small differences so * limit the max to 60 seconds. */ if (unlikely(end->tv_sec - start->tv_sec > 60)) return 60000000; return (end->tv_sec - start->tv_sec) * 1000000 + (end->tv_usec - start->tv_usec); } /* Returns the milliseconds difference between end and start times */ int ms_tdiff(struct timeval *end, struct timeval *start) { /* Like us_tdiff, limit to 1 hour. */ if (unlikely(end->tv_sec - start->tv_sec > 3600)) return 3600000; return (end->tv_sec - start->tv_sec) * 1000 + (end->tv_usec - start->tv_usec) / 1000; } /* Returns the seconds difference between end and start times as a double */ double tdiff(struct timeval *end, struct timeval *start) { return end->tv_sec - start->tv_sec + (end->tv_usec - start->tv_usec) / 1000000.0; } bool extract_sockaddr(char *url, char **sockaddr_url, char **sockaddr_port) { char *url_begin, *url_end, *ipv6_begin, *ipv6_end, *port_start = NULL; char url_address[256], port[6]; int url_len, port_len = 0; *sockaddr_url = url; url_begin = strstr(url, "//"); if (!url_begin) url_begin = url; else url_begin += 2; /* Look for numeric ipv6 entries */ ipv6_begin = strstr(url_begin, "["); ipv6_end = strstr(url_begin, "]"); if (ipv6_begin && ipv6_end && ipv6_end > ipv6_begin) url_end = strstr(ipv6_end, ":"); else url_end = strstr(url_begin, ":"); if (url_end) { url_len = url_end - url_begin; port_len = strlen(url_begin) - url_len - 1; if (port_len < 1) return false; port_start = url_end + 1; } else url_len = strlen(url_begin); if (url_len < 1) return false; snprintf(url_address, 254, "%.*s", url_len, url_begin); if (port_len) { char *slash; snprintf(port, 6, "%.*s", port_len, port_start); slash = strchr(port, '/'); if (slash) *slash = '\0'; } else strcpy(port, "80"); *sockaddr_port = strdup(port); *sockaddr_url = strdup(url_address); return true; } enum send_ret { SEND_OK, SEND_SELECTFAIL, SEND_SENDFAIL, SEND_INACTIVE }; /* Send a single command across a socket, appending \n to it. This should all * be done under stratum lock except when first establishing the socket */ static enum send_ret __stratum_send(struct pool *pool, char *s, ssize_t len) { SOCKETTYPE sock = pool->sock; ssize_t ssent = 0; strcat(s, "\n"); len++; while (len > 0 ) { struct timeval timeout = {1, 0}; ssize_t sent; fd_set wd; retry: FD_ZERO(&wd); FD_SET(sock, &wd); if (select(sock + 1, NULL, &wd, NULL, &timeout) < 1) { if (interrupted()) goto retry; return SEND_SELECTFAIL; } #ifdef __APPLE__ sent = send(pool->sock, s + ssent, len, SO_NOSIGPIPE); #elif WIN32 sent = send(pool->sock, s + ssent, len, 0); #else sent = send(pool->sock, s + ssent, len, MSG_NOSIGNAL); #endif if (sent < 0) { if (!sock_blocks()) return SEND_SENDFAIL; sent = 0; } ssent += sent; len -= sent; } pool->cgminer_pool_stats.times_sent++; pool->cgminer_pool_stats.bytes_sent += ssent; pool->cgminer_pool_stats.net_bytes_sent += ssent; return SEND_OK; } bool stratum_send(struct pool *pool, char *s, ssize_t len) { enum send_ret ret = SEND_INACTIVE; if (opt_protocol) applog(LOG_DEBUG, "SEND: %s", s); mutex_lock(&pool->stratum_lock); if (pool->stratum_active) ret = __stratum_send(pool, s, len); mutex_unlock(&pool->stratum_lock); /* This is to avoid doing applog under stratum_lock */ switch (ret) { default: case SEND_OK: break; case SEND_SELECTFAIL: applog(LOG_DEBUG, "Write select failed on pool %d sock", pool->pool_no); suspend_stratum(pool); break; case SEND_SENDFAIL: applog(LOG_DEBUG, "Failed to send in stratum_send"); suspend_stratum(pool); break; case SEND_INACTIVE: applog(LOG_DEBUG, "Stratum send failed due to no pool stratum_active"); break; } return (ret == SEND_OK); } static bool socket_full(struct pool *pool, int wait) { SOCKETTYPE sock = pool->sock; struct timeval timeout; fd_set rd; if (unlikely(wait < 0)) wait = 0; FD_ZERO(&rd); FD_SET(sock, &rd); timeout.tv_usec = 0; timeout.tv_sec = wait; if (select(sock + 1, &rd, NULL, NULL, &timeout) > 0) return true; return false; } /* Check to see if Santa's been good to you */ bool sock_full(struct pool *pool) { if (strlen(pool->sockbuf)) return true; return (socket_full(pool, 0)); } static void clear_sockbuf(struct pool *pool) { strcpy(pool->sockbuf, ""); } static void clear_sock(struct pool *pool) { ssize_t n; mutex_lock(&pool->stratum_lock); do { if (pool->sock) n = recv(pool->sock, pool->sockbuf, RECVSIZE, 0); else n = 0; } while (n > 0); mutex_unlock(&pool->stratum_lock); clear_sockbuf(pool); } /* Realloc memory to new size and zero any extra memory added */ void _recalloc(void **ptr, size_t old, size_t new, const char *file, const char *func, const int line) { if (new == old) return; *ptr = realloc(*ptr, new); if (unlikely(!*ptr)) quitfrom(1, file, func, line, "Failed to realloc"); if (new > old) memset(*ptr + old, 0, new - old); } /* Make sure the pool sockbuf is large enough to cope with any coinbase size * by reallocing it to a large enough size rounded up to a multiple of RBUFSIZE * and zeroing the new memory */ static void recalloc_sock(struct pool *pool, size_t len) { size_t old, new; old = strlen(pool->sockbuf); new = old + len + 1; if (new < pool->sockbuf_size) return; new = new + (RBUFSIZE - (new % RBUFSIZE)); // Avoid potentially recursive locking // applog(LOG_DEBUG, "Recallocing pool sockbuf to %d", new); pool->sockbuf = realloc(pool->sockbuf, new); if (!pool->sockbuf) quithere(1, "Failed to realloc pool sockbuf"); memset(pool->sockbuf + old, 0, new - old); pool->sockbuf_size = new; } /* Peeks at a socket to find the first end of line and then reads just that * from the socket and returns that as a malloced char */ char *recv_line(struct pool *pool) { char *tok, *sret = NULL; ssize_t len, buflen; int waited = 0; if (!strstr(pool->sockbuf, "\n")) { struct timeval rstart, now; cgtime(&rstart); if (!socket_full(pool, DEFAULT_SOCKWAIT)) { applog(LOG_DEBUG, "Timed out waiting for data on socket_full"); goto out; } do { char s[RBUFSIZE]; size_t slen; ssize_t n; memset(s, 0, RBUFSIZE); n = recv(pool->sock, s, RECVSIZE, 0); if (!n) { applog(LOG_DEBUG, "Socket closed waiting in recv_line"); suspend_stratum(pool); break; } cgtime(&now); waited = tdiff(&now, &rstart); if (n < 0) { if (!sock_blocks() || !socket_full(pool, DEFAULT_SOCKWAIT - waited)) { applog(LOG_DEBUG, "Failed to recv sock in recv_line"); suspend_stratum(pool); break; } } else { slen = strlen(s); recalloc_sock(pool, slen); strcat(pool->sockbuf, s); } } while (waited < DEFAULT_SOCKWAIT && !strstr(pool->sockbuf, "\n")); } buflen = strlen(pool->sockbuf); tok = strtok(pool->sockbuf, "\n"); if (!tok) { applog(LOG_DEBUG, "Failed to parse a \\n terminated string in recv_line"); goto out; } sret = strdup(tok); len = strlen(sret); /* Copy what's left in the buffer after the \n, including the * terminating \0 */ if (buflen > len + 1) memmove(pool->sockbuf, pool->sockbuf + len + 1, buflen - len + 1); else strcpy(pool->sockbuf, ""); pool->cgminer_pool_stats.times_received++; pool->cgminer_pool_stats.bytes_received += len; pool->cgminer_pool_stats.net_bytes_received += len; out: if (!sret) clear_sock(pool); else if (opt_protocol) applog(LOG_DEBUG, "RECVD: %s", sret); return sret; } /* Extracts a string value from a json array with error checking. To be used * when the value of the string returned is only examined and not to be stored. * See json_array_string below */ static char *__json_array_string(json_t *val, unsigned int entry) { json_t *arr_entry; if (json_is_null(val)) return NULL; if (!json_is_array(val)) return NULL; if (entry > json_array_size(val)) return NULL; arr_entry = json_array_get(val, entry); if (!json_is_string(arr_entry)) return NULL; return (char *)json_string_value(arr_entry); } /* Creates a freshly malloced dup of __json_array_string */ static char *json_array_string(json_t *val, unsigned int entry) { char *buf = __json_array_string(val, entry); if (buf) return strdup(buf); return NULL; } static char *blank_merkle = "0000000000000000000000000000000000000000000000000000000000000000"; static bool parse_notify(struct pool *pool, json_t *val) { char *job_id, *prev_hash, *coinbase1, *coinbase2, *bbversion, *nbit, *ntime, header[228]; unsigned char *cb1 = NULL, *cb2 = NULL; size_t cb1_len, cb2_len, alloc_len; bool clean, ret = false; int merkles, i; json_t *arr; arr = json_array_get(val, 4); if (!arr || !json_is_array(arr)) goto out; merkles = json_array_size(arr); job_id = json_array_string(val, 0); prev_hash = __json_array_string(val, 1); coinbase1 = json_array_string(val, 2); coinbase2 = json_array_string(val, 3); bbversion = __json_array_string(val, 5); nbit = __json_array_string(val, 6); ntime = __json_array_string(val, 7); clean = json_is_true(json_array_get(val, 8)); if (!valid_hex(job_id) || !valid_hex(prev_hash) || !valid_hex(coinbase1) || !valid_hex(coinbase2) || !valid_hex(bbversion) || !valid_hex(nbit) || !valid_hex(ntime)) { /* Annoying but we must not leak memory */ free(job_id); free(coinbase1); free(coinbase2); goto out; } cg_wlock(&pool->data_lock); free(pool->swork.job_id); pool->swork.job_id = job_id; snprintf(pool->prev_hash, 65, "%s", prev_hash); cb1_len = strlen(coinbase1) / 2; cb2_len = strlen(coinbase2) / 2; snprintf(pool->bbversion, 9, "%s", bbversion); snprintf(pool->nbit, 9, "%s", nbit); snprintf(pool->ntime, 9, "%s", ntime); pool->swork.clean = clean; alloc_len = pool->coinbase_len = cb1_len + pool->n1_len + pool->n2size + cb2_len; pool->nonce2_offset = cb1_len + pool->n1_len; for (i = 0; i < pool->merkles; i++) free(pool->swork.merkle_bin[i]); if (merkles) { pool->swork.merkle_bin = realloc(pool->swork.merkle_bin, sizeof(char *) * merkles + 1); for (i = 0; i < merkles; i++) { char *merkle = json_array_string(arr, i); pool->swork.merkle_bin[i] = malloc(32); if (unlikely(!pool->swork.merkle_bin[i])) quit(1, "Failed to malloc pool swork merkle_bin"); if (opt_protocol) applog(LOG_DEBUG, "merkle %d: %s", i, merkle); ret = hex2bin(pool->swork.merkle_bin[i], merkle, 32); free(merkle); if (unlikely(!ret)) { applog(LOG_ERR, "Failed to convert merkle to merkle_bin in parse_notify"); goto out_unlock; } } } pool->merkles = merkles; if (clean) pool->nonce2 = 0; #if 0 header_len = strlen(pool->bbversion) + strlen(pool->prev_hash); /* merkle_hash */ 32 + strlen(pool->ntime) + strlen(pool->nbit) + /* nonce */ 8 + /* workpadding */ 96; #endif snprintf(header, 225, "%s%s%s%s%s%s%s", pool->bbversion, pool->prev_hash, blank_merkle, pool->ntime, pool->nbit, "00000000", /* nonce */ workpadding); ret = hex2bin(pool->header_bin, header, 112); if (unlikely(!ret)) { applog(LOG_ERR, "Failed to convert header to header_bin in parse_notify"); goto out_unlock; } cb1 = alloca(cb1_len); ret = hex2bin(cb1, coinbase1, cb1_len); if (unlikely(!ret)) { applog(LOG_ERR, "Failed to convert cb1 to cb1_bin in parse_notify"); goto out_unlock; } cb2 = alloca(cb2_len); ret = hex2bin(cb2, coinbase2, cb2_len); if (unlikely(!ret)) { applog(LOG_ERR, "Failed to convert cb2 to cb2_bin in parse_notify"); goto out_unlock; } free(pool->coinbase); align_len(&alloc_len); pool->coinbase = calloc(alloc_len, 1); if (unlikely(!pool->coinbase)) quit(1, "Failed to calloc pool coinbase in parse_notify"); memcpy(pool->coinbase, cb1, cb1_len); memcpy(pool->coinbase + cb1_len, pool->nonce1bin, pool->n1_len); memcpy(pool->coinbase + cb1_len + pool->n1_len + pool->n2size, cb2, cb2_len); if (opt_debug) { char *cb = bin2hex(pool->coinbase, pool->coinbase_len); applog(LOG_DEBUG, "Pool %d coinbase %s", pool->pool_no, cb); free(cb); } out_unlock: cg_wunlock(&pool->data_lock); if (opt_protocol) { applog(LOG_DEBUG, "job_id: %s", job_id); applog(LOG_DEBUG, "prev_hash: %s", prev_hash); applog(LOG_DEBUG, "coinbase1: %s", coinbase1); applog(LOG_DEBUG, "coinbase2: %s", coinbase2); applog(LOG_DEBUG, "bbversion: %s", bbversion); applog(LOG_DEBUG, "nbit: %s", nbit); applog(LOG_DEBUG, "ntime: %s", ntime); applog(LOG_DEBUG, "clean: %s", clean ? "yes" : "no"); } free(coinbase1); free(coinbase2); /* A notify message is the closest stratum gets to a getwork */ pool->getwork_requested++; total_getworks++; if (pool == current_pool()) opt_work_update = true; out: return ret; } static bool parse_diff(struct pool *pool, json_t *val) { double old_diff, diff; diff = json_number_value(json_array_get(val, 0)); if (diff == 0) return false; cg_wlock(&pool->data_lock); old_diff = pool->sdiff; pool->sdiff = diff; cg_wunlock(&pool->data_lock); if (old_diff != diff) { int idiff = diff; if ((double)idiff == diff) applog(LOG_NOTICE, "Pool %d difficulty changed to %d", pool->pool_no, idiff); else applog(LOG_NOTICE, "Pool %d difficulty changed to %.1f", pool->pool_no, diff); } else applog(LOG_DEBUG, "Pool %d difficulty set to %f", pool->pool_no, diff); return true; } static void __suspend_stratum(struct pool *pool) { clear_sockbuf(pool); pool->stratum_active = pool->stratum_notify = false; if (pool->sock) CLOSESOCKET(pool->sock); pool->sock = 0; } static bool parse_reconnect(struct pool *pool, json_t *val) { char *sockaddr_url, *stratum_port, *tmp; char *url, *port, address[256]; memset(address, 0, 255); url = (char *)json_string_value(json_array_get(val, 0)); if (!url) url = pool->sockaddr_url; else { char *dot_pool, *dot_reconnect; dot_pool = strchr(pool->sockaddr_url, '.'); if (!dot_pool) { applog(LOG_ERR, "Denied stratum reconnect request for pool without domain '%s'", pool->sockaddr_url); return false; } dot_reconnect = strchr(url, '.'); if (!dot_reconnect) { applog(LOG_ERR, "Denied stratum reconnect request to url without domain '%s'", url); return false; } if (strcmp(dot_pool, dot_reconnect)) { applog(LOG_ERR, "Denied stratum reconnect request to non-matching domain url '%s'", pool->sockaddr_url); return false; } } port = (char *)json_string_value(json_array_get(val, 1)); if (!port) port = pool->stratum_port; snprintf(address, 254, "%s:%s", url, port); if (!extract_sockaddr(address, &sockaddr_url, &stratum_port)) return false; applog(LOG_WARNING, "Stratum reconnect requested from pool %d to %s", pool->pool_no, address); clear_pool_work(pool); mutex_lock(&pool->stratum_lock); __suspend_stratum(pool); tmp = pool->sockaddr_url; pool->sockaddr_url = sockaddr_url; pool->stratum_url = pool->sockaddr_url; free(tmp); tmp = pool->stratum_port; pool->stratum_port = stratum_port; free(tmp); mutex_unlock(&pool->stratum_lock); if (!restart_stratum(pool)) { pool_failed(pool); return false; } return true; } static bool send_version(struct pool *pool, json_t *val) { char s[RBUFSIZE]; int id = json_integer_value(json_object_get(val, "id")); if (!id) return false; sprintf(s, "{\"id\": %d, \"result\": \""PACKAGE"/"VERSION"\", \"error\": null}", id); if (!stratum_send(pool, s, strlen(s))) return false; return true; } static bool show_message(struct pool *pool, json_t *val) { char *msg; if (!json_is_array(val)) return false; msg = (char *)json_string_value(json_array_get(val, 0)); if (!msg) return false; applog(LOG_NOTICE, "Pool %d message: %s", pool->pool_no, msg); return true; } bool parse_method(struct pool *pool, char *s) { json_t *val = NULL, *method, *err_val, *params; json_error_t err; bool ret = false; char *buf; if (!s) goto out; val = JSON_LOADS(s, &err); if (!val) { applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text); goto out; } method = json_object_get(val, "method"); if (!method) goto out_decref; err_val = json_object_get(val, "error"); params = json_object_get(val, "params"); if (err_val && !json_is_null(err_val)) { char *ss; if (err_val) ss = json_dumps(err_val, JSON_INDENT(3)); else ss = strdup("(unknown reason)"); applog(LOG_INFO, "JSON-RPC method decode failed: %s", ss); free(ss); goto out_decref; } buf = (char *)json_string_value(method); if (!buf) goto out_decref; if (!strncasecmp(buf, "mining.notify", 13)) { if (parse_notify(pool, params)) pool->stratum_notify = ret = true; else pool->stratum_notify = ret = false; goto out_decref; } if (!strncasecmp(buf, "mining.set_difficulty", 21)) { ret = parse_diff(pool, params); goto out_decref; } if (!strncasecmp(buf, "client.reconnect", 16)) { ret = parse_reconnect(pool, params); goto out_decref; } if (!strncasecmp(buf, "client.get_version", 18)) { ret = send_version(pool, val); goto out_decref; } if (!strncasecmp(buf, "client.show_message", 19)) { ret = show_message(pool, params); goto out_decref; } out_decref: json_decref(val); out: return ret; } bool auth_stratum(struct pool *pool) { json_t *val = NULL, *res_val, *err_val; char s[RBUFSIZE], *sret = NULL; json_error_t err; bool ret = false; sprintf(s, "{\"id\": %d, \"method\": \"mining.authorize\", \"params\": [\"%s\", \"%s\"]}", swork_id++, pool->rpc_user, pool->rpc_pass); if (!stratum_send(pool, s, strlen(s))) return ret; /* Parse all data in the queue and anything left should be auth */ while (42) { sret = recv_line(pool); if (!sret) return ret; if (parse_method(pool, sret)) free(sret); else break; } val = JSON_LOADS(sret, &err); free(sret); res_val = json_object_get(val, "result"); err_val = json_object_get(val, "error"); if (!res_val || json_is_false(res_val) || (err_val && !json_is_null(err_val))) { char *ss; if (err_val) ss = json_dumps(err_val, JSON_INDENT(3)); else ss = strdup("(unknown reason)"); applog(LOG_INFO, "pool %d JSON stratum auth failed: %s", pool->pool_no, ss); free(ss); suspend_stratum(pool); goto out; } ret = true; applog(LOG_INFO, "Stratum authorisation success for pool %d", pool->pool_no); pool->probed = true; successful_connect = true; out: json_decref(val); return ret; } static int recv_byte(int sockd) { char c; if (recv(sockd, &c, 1, 0) != -1) return c; return -1; } static bool http_negotiate(struct pool *pool, int sockd, bool http0) { char buf[1024]; int i, len; if (http0) { snprintf(buf, 1024, "CONNECT %s:%s HTTP/1.0\r\n\r\n", pool->sockaddr_url, pool->stratum_port); } else { snprintf(buf, 1024, "CONNECT %s:%s HTTP/1.1\r\nHost: %s:%s\r\n\r\n", pool->sockaddr_url, pool->stratum_port, pool->sockaddr_url, pool->stratum_port); } applog(LOG_DEBUG, "Sending proxy %s:%s - %s", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port, buf); send(sockd, buf, strlen(buf), 0); len = recv(sockd, buf, 12, 0); if (len <= 0) { applog(LOG_WARNING, "Couldn't read from proxy %s:%s after sending CONNECT", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); return false; } buf[len] = '\0'; applog(LOG_DEBUG, "Received from proxy %s:%s - %s", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port, buf); if (strcmp(buf, "HTTP/1.1 200") && strcmp(buf, "HTTP/1.0 200")) { applog(LOG_WARNING, "HTTP Error from proxy %s:%s - %s", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port, buf); return false; } /* Ignore unwanted headers till we get desired response */ for (i = 0; i < 4; i++) { buf[i] = recv_byte(sockd); if (buf[i] == (char)-1) { applog(LOG_WARNING, "Couldn't read HTTP byte from proxy %s:%s", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); return false; } } while (strncmp(buf, "\r\n\r\n", 4)) { for (i = 0; i < 3; i++) buf[i] = buf[i + 1]; buf[3] = recv_byte(sockd); if (buf[3] == (char)-1) { applog(LOG_WARNING, "Couldn't read HTTP byte from proxy %s:%s", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); return false; } } applog(LOG_DEBUG, "Success negotiating with %s:%s HTTP proxy", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); return true; } static bool socks5_negotiate(struct pool *pool, int sockd) { unsigned char atyp, uclen; unsigned short port; char buf[515]; int i, len; buf[0] = 0x05; buf[1] = 0x01; buf[2] = 0x00; applog(LOG_DEBUG, "Attempting to negotiate with %s:%s SOCKS5 proxy", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port ); send(sockd, buf, 3, 0); if (recv_byte(sockd) != 0x05 || recv_byte(sockd) != buf[2]) { applog(LOG_WARNING, "Bad response from %s:%s SOCKS5 server", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port ); return false; } buf[0] = 0x05; buf[1] = 0x01; buf[2] = 0x00; buf[3] = 0x03; len = (strlen(pool->sockaddr_url)); if (len > 255) len = 255; uclen = len; buf[4] = (uclen & 0xff); memcpy(buf + 5, pool->sockaddr_url, len); port = atoi(pool->stratum_port); buf[5 + len] = (port >> 8); buf[6 + len] = (port & 0xff); send(sockd, buf, (7 + len), 0); if (recv_byte(sockd) != 0x05 || recv_byte(sockd) != 0x00) { applog(LOG_WARNING, "Bad response from %s:%s SOCKS5 server", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port ); return false; } recv_byte(sockd); atyp = recv_byte(sockd); if (atyp == 0x01) { for (i = 0; i < 4; i++) recv_byte(sockd); } else if (atyp == 0x03) { len = recv_byte(sockd); for (i = 0; i < len; i++) recv_byte(sockd); } else { applog(LOG_WARNING, "Bad response from %s:%s SOCKS5 server", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port ); return false; } for (i = 0; i < 2; i++) recv_byte(sockd); applog(LOG_DEBUG, "Success negotiating with %s:%s SOCKS5 proxy", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); return true; } static bool socks4_negotiate(struct pool *pool, int sockd, bool socks4a) { unsigned short port; in_addr_t inp; char buf[515]; int i, len; buf[0] = 0x04; buf[1] = 0x01; port = atoi(pool->stratum_port); buf[2] = port >> 8; buf[3] = port & 0xff; sprintf(&buf[8], "CGMINER"); /* See if we've been given an IP address directly to avoid needing to * resolve it. */ inp = inet_addr(pool->sockaddr_url); inp = ntohl(inp); if ((int)inp != -1) socks4a = false; else { /* Try to extract the IP address ourselves first */ struct addrinfo servinfobase, *servinfo, hints; servinfo = &servinfobase; memset(&hints, 0, sizeof(struct addrinfo)); hints.ai_family = AF_INET; /* IPV4 only */ if (!getaddrinfo(pool->sockaddr_url, NULL, &hints, &servinfo)) { struct sockaddr_in *saddr_in = (struct sockaddr_in *)servinfo->ai_addr; inp = ntohl(saddr_in->sin_addr.s_addr); socks4a = false; freeaddrinfo(servinfo); } } if (!socks4a) { if ((int)inp == -1) { applog(LOG_WARNING, "Invalid IP address specified for socks4 proxy: %s", pool->sockaddr_url); return false; } buf[4] = (inp >> 24) & 0xFF; buf[5] = (inp >> 16) & 0xFF; buf[6] = (inp >> 8) & 0xFF; buf[7] = (inp >> 0) & 0xFF; send(sockd, buf, 16, 0); } else { /* This appears to not be working but hopefully most will be * able to resolve IP addresses themselves. */ buf[4] = 0; buf[5] = 0; buf[6] = 0; buf[7] = 1; len = strlen(pool->sockaddr_url); if (len > 255) len = 255; memcpy(&buf[16], pool->sockaddr_url, len); len += 16; buf[len++] = '\0'; send(sockd, buf, len, 0); } if (recv_byte(sockd) != 0x00 || recv_byte(sockd) != 0x5a) { applog(LOG_WARNING, "Bad response from %s:%s SOCKS4 server", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); return false; } for (i = 0; i < 6; i++) recv_byte(sockd); return true; } static void noblock_socket(SOCKETTYPE fd) { #ifndef WIN32 int flags = fcntl(fd, F_GETFL, 0); fcntl(fd, F_SETFL, O_NONBLOCK | flags); #else u_long flags = 1; ioctlsocket(fd, FIONBIO, &flags); #endif } static void block_socket(SOCKETTYPE fd) { #ifndef WIN32 int flags = fcntl(fd, F_GETFL, 0); fcntl(fd, F_SETFL, flags & ~O_NONBLOCK); #else u_long flags = 0; ioctlsocket(fd, FIONBIO, &flags); #endif } static bool sock_connecting(void) { #ifndef WIN32 return errno == EINPROGRESS; #else return WSAGetLastError() == WSAEWOULDBLOCK; #endif } static bool setup_stratum_socket(struct pool *pool) { struct addrinfo servinfobase, *servinfo, *hints, *p; char *sockaddr_url, *sockaddr_port; int sockd; mutex_lock(&pool->stratum_lock); pool->stratum_active = false; if (pool->sock) CLOSESOCKET(pool->sock); pool->sock = 0; mutex_unlock(&pool->stratum_lock); hints = &pool->stratum_hints; memset(hints, 0, sizeof(struct addrinfo)); hints->ai_family = AF_UNSPEC; hints->ai_socktype = SOCK_STREAM; servinfo = &servinfobase; if (!pool->rpc_proxy && opt_socks_proxy) { pool->rpc_proxy = opt_socks_proxy; extract_sockaddr(pool->rpc_proxy, &pool->sockaddr_proxy_url, &pool->sockaddr_proxy_port); pool->rpc_proxytype = PROXY_SOCKS5; } if (pool->rpc_proxy) { sockaddr_url = pool->sockaddr_proxy_url; sockaddr_port = pool->sockaddr_proxy_port; } else { sockaddr_url = pool->sockaddr_url; sockaddr_port = pool->stratum_port; } if (getaddrinfo(sockaddr_url, sockaddr_port, hints, &servinfo) != 0) { if (!pool->probed) { applog(LOG_WARNING, "Failed to resolve (?wrong URL) %s:%s", sockaddr_url, sockaddr_port); pool->probed = true; } else { applog(LOG_INFO, "Failed to getaddrinfo for %s:%s", sockaddr_url, sockaddr_port); } return false; } for (p = servinfo; p != NULL; p = p->ai_next) { sockd = socket(p->ai_family, p->ai_socktype, p->ai_protocol); if (sockd == -1) { applog(LOG_DEBUG, "Failed socket"); continue; } /* Iterate non blocking over entries returned by getaddrinfo * to cope with round robin DNS entries, finding the first one * we can connect to quickly. */ noblock_socket(sockd); if (connect(sockd, p->ai_addr, p->ai_addrlen) == -1) { struct timeval tv_timeout = {1, 0}; int selret; fd_set rw; if (!sock_connecting()) { CLOSESOCKET(sockd); applog(LOG_DEBUG, "Failed sock connect"); continue; } retry: FD_ZERO(&rw); FD_SET(sockd, &rw); selret = select(sockd + 1, NULL, &rw, NULL, &tv_timeout); if (selret > 0 && FD_ISSET(sockd, &rw)) { socklen_t len; int err, n; len = sizeof(err); n = getsockopt(sockd, SOL_SOCKET, SO_ERROR, (void *)&err, &len); if (!n && !err) { applog(LOG_DEBUG, "Succeeded delayed connect"); block_socket(sockd); break; } } if (selret < 0 && interrupted()) goto retry; CLOSESOCKET(sockd); applog(LOG_DEBUG, "Select timeout/failed connect"); continue; } applog(LOG_WARNING, "Succeeded immediate connect"); block_socket(sockd); break; } if (p == NULL) { applog(LOG_INFO, "Failed to connect to stratum on %s:%s", sockaddr_url, sockaddr_port); freeaddrinfo(servinfo); return false; } freeaddrinfo(servinfo); if (pool->rpc_proxy) { switch (pool->rpc_proxytype) { case PROXY_HTTP_1_0: if (!http_negotiate(pool, sockd, true)) return false; break; case PROXY_HTTP: if (!http_negotiate(pool, sockd, false)) return false; break; case PROXY_SOCKS5: case PROXY_SOCKS5H: if (!socks5_negotiate(pool, sockd)) return false; break; case PROXY_SOCKS4: if (!socks4_negotiate(pool, sockd, false)) return false; break; case PROXY_SOCKS4A: if (!socks4_negotiate(pool, sockd, true)) return false; break; default: applog(LOG_WARNING, "Unsupported proxy type for %s:%s", pool->sockaddr_proxy_url, pool->sockaddr_proxy_port); return false; break; } } if (!pool->sockbuf) { pool->sockbuf = calloc(RBUFSIZE, 1); if (!pool->sockbuf) quithere(1, "Failed to calloc pool sockbuf"); pool->sockbuf_size = RBUFSIZE; } pool->sock = sockd; keep_sockalive(sockd); return true; } static char *get_sessionid(json_t *val) { char *ret = NULL; json_t *arr_val; int arrsize, i; arr_val = json_array_get(val, 0); if (!arr_val || !json_is_array(arr_val)) goto out; arrsize = json_array_size(arr_val); for (i = 0; i < arrsize; i++) { json_t *arr = json_array_get(arr_val, i); char *notify; if (!arr | !json_is_array(arr)) break; notify = __json_array_string(arr, 0); if (!notify) continue; if (!strncasecmp(notify, "mining.notify", 13)) { ret = json_array_string(arr, 1); break; } } out: return ret; } void suspend_stratum(struct pool *pool) { applog(LOG_INFO, "Closing socket for stratum pool %d", pool->pool_no); mutex_lock(&pool->stratum_lock); __suspend_stratum(pool); mutex_unlock(&pool->stratum_lock); } bool initiate_stratum(struct pool *pool) { bool ret = false, recvd = false, noresume = false, sockd = false; char s[RBUFSIZE], *sret = NULL, *nonce1, *sessionid; json_t *val = NULL, *res_val, *err_val; json_error_t err; int n2size; resend: if (!setup_stratum_socket(pool)) { sockd = false; goto out; } sockd = true; if (recvd) { /* Get rid of any crap lying around if we're resending */ clear_sock(pool); sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": []}", swork_id++); } else { if (pool->sessionid) sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": [\""PACKAGE"/"VERSION"\", \"%s\"]}", swork_id++, pool->sessionid); else sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": [\""PACKAGE"/"VERSION"\"]}", swork_id++); } if (__stratum_send(pool, s, strlen(s)) != SEND_OK) { applog(LOG_DEBUG, "Failed to send s in initiate_stratum"); goto out; } if (!socket_full(pool, DEFAULT_SOCKWAIT)) { applog(LOG_DEBUG, "Timed out waiting for response in initiate_stratum"); goto out; } sret = recv_line(pool); if (!sret) goto out; recvd = true; val = JSON_LOADS(sret, &err); free(sret); if (!val) { applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text); goto out; } res_val = json_object_get(val, "result"); err_val = json_object_get(val, "error"); if (!res_val || json_is_null(res_val) || (err_val && !json_is_null(err_val))) { char *ss; if (err_val) ss = json_dumps(err_val, JSON_INDENT(3)); else ss = strdup("(unknown reason)"); applog(LOG_INFO, "JSON-RPC decode failed: %s", ss); free(ss); goto out; } sessionid = get_sessionid(res_val); if (!sessionid) applog(LOG_DEBUG, "Failed to get sessionid in initiate_stratum"); nonce1 = json_array_string(res_val, 1); if (!valid_hex(nonce1)) { applog(LOG_INFO, "Failed to get valid nonce1 in initiate_stratum"); free(sessionid); goto out; } n2size = json_integer_value(json_array_get(res_val, 2)); if (n2size < 2 || n2size > 16) { applog(LOG_INFO, "Failed to get valid n2size in initiate_stratum"); free(sessionid); free(nonce1); goto out; } cg_wlock(&pool->data_lock); pool->sessionid = sessionid; pool->nonce1 = nonce1; pool->n1_len = strlen(nonce1) / 2; free(pool->nonce1bin); pool->nonce1bin = calloc(pool->n1_len, 1); if (unlikely(!pool->nonce1bin)) quithere(1, "Failed to calloc pool->nonce1bin"); hex2bin(pool->nonce1bin, pool->nonce1, pool->n1_len); pool->n2size = n2size; cg_wunlock(&pool->data_lock); if (sessionid) applog(LOG_DEBUG, "Pool %d stratum session id: %s", pool->pool_no, pool->sessionid); ret = true; out: if (ret) { if (!pool->stratum_url) pool->stratum_url = pool->sockaddr_url; pool->stratum_active = true; pool->sdiff = 1; if (opt_protocol) { applog(LOG_DEBUG, "Pool %d confirmed mining.subscribe with extranonce1 %s extran2size %d", pool->pool_no, pool->nonce1, pool->n2size); } } else { if (recvd && !noresume) { /* Reset the sessionid used for stratum resuming in case the pool * does not support it, or does not know how to respond to the * presence of the sessionid parameter. */ cg_wlock(&pool->data_lock); free(pool->sessionid); free(pool->nonce1); pool->sessionid = pool->nonce1 = NULL; cg_wunlock(&pool->data_lock); applog(LOG_DEBUG, "Failed to resume stratum, trying afresh"); noresume = true; json_decref(val); goto resend; } applog(LOG_DEBUG, "Initiate stratum failed"); if (sockd) suspend_stratum(pool); } json_decref(val); return ret; } bool restart_stratum(struct pool *pool) { if (pool->stratum_active) suspend_stratum(pool); if (!initiate_stratum(pool)) return false; if (!auth_stratum(pool)) return false; return true; } void dev_error(struct cgpu_info *dev, enum dev_reason reason) { dev->device_last_not_well = time(NULL); dev->device_not_well_reason = reason; switch (reason) { case REASON_THREAD_FAIL_INIT: dev->thread_fail_init_count++; break; case REASON_THREAD_ZERO_HASH: dev->thread_zero_hash_count++; break; case REASON_THREAD_FAIL_QUEUE: dev->thread_fail_queue_count++; break; case REASON_DEV_SICK_IDLE_60: dev->dev_sick_idle_60_count++; break; case REASON_DEV_DEAD_IDLE_600: dev->dev_dead_idle_600_count++; break; case REASON_DEV_NOSTART: dev->dev_nostart_count++; break; case REASON_DEV_OVER_HEAT: dev->dev_over_heat_count++; break; case REASON_DEV_THERMAL_CUTOFF: dev->dev_thermal_cutoff_count++; break; case REASON_DEV_COMMS_ERROR: dev->dev_comms_error_count++; break; case REASON_DEV_THROTTLE: dev->dev_throttle_count++; break; } } /* Realloc an existing string to fit an extra string s, appending s to it. */ void *realloc_strcat(char *ptr, char *s) { size_t old = 0, len = strlen(s); char *ret; if (!len) return ptr; if (ptr) old = strlen(ptr); len += old + 1; align_len(&len); ret = malloc(len); if (unlikely(!ret)) quithere(1, "Failed to malloc"); if (ptr) { sprintf(ret, "%s%s", ptr, s); free(ptr); } else sprintf(ret, "%s", s); return ret; } /* Make a text readable version of a string using 0xNN for < ' ' or > '~' * Including 0x00 at the end * You must free the result yourself */ void *str_text(char *ptr) { unsigned char *uptr; char *ret, *txt; if (ptr == NULL) { ret = strdup("(null)"); if (unlikely(!ret)) quithere(1, "Failed to malloc null"); } uptr = (unsigned char *)ptr; ret = txt = malloc(strlen(ptr)*4+5); // Guaranteed >= needed if (unlikely(!txt)) quithere(1, "Failed to malloc txt"); do { if (*uptr < ' ' || *uptr > '~') { sprintf(txt, "0x%02x", *uptr); txt += 4; } else *(txt++) = *uptr; } while (*(uptr++)); *txt = '\0'; return ret; } void RenameThread(const char* name) { char buf[16]; snprintf(buf, sizeof(buf), "cg@%s", name); #if defined(PR_SET_NAME) // Only the first 15 characters are used (16 - NUL terminator) prctl(PR_SET_NAME, buf, 0, 0, 0); #elif (defined(__FreeBSD__) || defined(__OpenBSD__)) pthread_set_name_np(pthread_self(), buf); #elif defined(MAC_OSX) pthread_setname_np(buf); #else // Prevent warnings (void)buf; #endif } /* cgminer specific wrappers for true unnamed semaphore usage on platforms * that support them and for apple which does not. We use a single byte across * a pipe to emulate semaphore behaviour there. */ #ifdef __APPLE__ void _cgsem_init(cgsem_t *cgsem, const char *file, const char *func, const int line) { int flags, fd, i; if (pipe(cgsem->pipefd) == -1) quitfrom(1, file, func, line, "Failed pipe errno=%d", errno); /* Make the pipes FD_CLOEXEC to allow them to close should we call * execv on restart. */ for (i = 0; i < 2; i++) { fd = cgsem->pipefd[i]; flags = fcntl(fd, F_GETFD, 0); flags |= FD_CLOEXEC; if (fcntl(fd, F_SETFD, flags) == -1) quitfrom(1, file, func, line, "Failed to fcntl errno=%d", errno); } } void _cgsem_post(cgsem_t *cgsem, const char *file, const char *func, const int line) { const char buf = 1; int ret; retry: ret = write(cgsem->pipefd[1], &buf, 1); if (unlikely(ret == 0)) applog(LOG_WARNING, "Failed to write errno=%d" IN_FMT_FFL, errno, file, func, line); else if (unlikely(ret < 0 && interrupted)) goto retry; } void _cgsem_wait(cgsem_t *cgsem, const char *file, const char *func, const int line) { char buf; int ret; retry: ret = read(cgsem->pipefd[0], &buf, 1); if (unlikely(ret == 0)) applog(LOG_WARNING, "Failed to read errno=%d" IN_FMT_FFL, errno, file, func, line); else if (unlikely(ret < 0 && interrupted)) goto retry; } void cgsem_destroy(cgsem_t *cgsem) { close(cgsem->pipefd[1]); close(cgsem->pipefd[0]); } /* This is similar to sem_timedwait but takes a millisecond value */ int _cgsem_mswait(cgsem_t *cgsem, int ms, const char *file, const char *func, const int line) { struct timeval timeout; int ret, fd; fd_set rd; char buf; retry: fd = cgsem->pipefd[0]; FD_ZERO(&rd); FD_SET(fd, &rd); ms_to_timeval(&timeout, ms); ret = select(fd + 1, &rd, NULL, NULL, &timeout); if (ret > 0) { ret = read(fd, &buf, 1); return 0; } if (likely(!ret)) return ETIMEDOUT; if (interrupted()) goto retry; quitfrom(1, file, func, line, "Failed to sem_timedwait errno=%d cgsem=0x%p", errno, cgsem); /* We don't reach here */ return 0; } /* Reset semaphore count back to zero */ void cgsem_reset(cgsem_t *cgsem) { int ret, fd; fd_set rd; char buf; fd = cgsem->pipefd[0]; FD_ZERO(&rd); FD_SET(fd, &rd); do { struct timeval timeout = {0, 0}; ret = select(fd + 1, &rd, NULL, NULL, &timeout); if (ret > 0) ret = read(fd, &buf, 1); else if (unlikely(ret < 0 && interrupted())) ret = 1; } while (ret > 0); } #else void _cgsem_init(cgsem_t *cgsem, const char *file, const char *func, const int line) { int ret; if ((ret = sem_init(cgsem, 0, 0))) quitfrom(1, file, func, line, "Failed to sem_init ret=%d errno=%d", ret, errno); } void _cgsem_post(cgsem_t *cgsem, const char *file, const char *func, const int line) { if (unlikely(sem_post(cgsem))) quitfrom(1, file, func, line, "Failed to sem_post errno=%d cgsem=0x%p", errno, cgsem); } void _cgsem_wait(cgsem_t *cgsem, const char *file, const char *func, const int line) { retry: if (unlikely(sem_wait(cgsem))) { if (interrupted()) goto retry; quitfrom(1, file, func, line, "Failed to sem_wait errno=%d cgsem=0x%p", errno, cgsem); } } int _cgsem_mswait(cgsem_t *cgsem, int ms, const char *file, const char *func, const int line) { struct timespec abs_timeout, ts_now; struct timeval tv_now; int ret; cgtime(&tv_now); timeval_to_spec(&ts_now, &tv_now); ms_to_timespec(&abs_timeout, ms); retry: timeraddspec(&abs_timeout, &ts_now); ret = sem_timedwait(cgsem, &abs_timeout); if (ret) { if (likely(sock_timeout())) return ETIMEDOUT; if (interrupted()) goto retry; quitfrom(1, file, func, line, "Failed to sem_timedwait errno=%d cgsem=0x%p", errno, cgsem); } return 0; } void cgsem_reset(cgsem_t *cgsem) { int ret; do { ret = sem_trywait(cgsem); if (unlikely(ret < 0 && interrupted())) ret = 0; } while (!ret); } void cgsem_destroy(cgsem_t *cgsem) { sem_destroy(cgsem); } #endif /* Provide a completion_timeout helper function for unreliable functions that * may die due to driver issues etc that time out if the function fails and * can then reliably return. */ struct cg_completion { cgsem_t cgsem; void (*fn)(void *fnarg); void *fnarg; }; void *completion_thread(void *arg) { struct cg_completion *cgc = (struct cg_completion *)arg; pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); cgc->fn(cgc->fnarg); cgsem_post(&cgc->cgsem); return NULL; } bool cg_completion_timeout(void *fn, void *fnarg, int timeout) { struct cg_completion *cgc; pthread_t pthread; bool ret = false; cgc = malloc(sizeof(struct cg_completion)); if (unlikely(!cgc)) return ret; cgsem_init(&cgc->cgsem); cgc->fn = fn; cgc->fnarg = fnarg; pthread_create(&pthread, NULL, completion_thread, (void *)cgc); ret = cgsem_mswait(&cgc->cgsem, timeout); if (!ret) { pthread_join(pthread, NULL); free(cgc); } else pthread_cancel(pthread); return !ret; } void _cg_memcpy(void *dest, const void *src, unsigned int n, const char *file, const char *func, const int line) { if (unlikely(n < 1 || n > (1ul << 31))) { applog(LOG_ERR, "ERR: Asked to memcpy %u bytes from %s %s():%d", n, file, func, line); return; } memcpy(dest, src, n); }
static bool parse_reconnect(struct pool *pool, json_t *val) { char *sockaddr_url, *stratum_port, *tmp; char *url, *port, address[256]; memset(address, 0, 255); url = (char *)json_string_value(json_array_get(val, 0)); if (!url) url = pool->sockaddr_url; else { char *dot_pool, *dot_reconnect; dot_pool = strchr(pool->sockaddr_url, '.'); if (!dot_pool) { applog(LOG_ERR, "Denied stratum reconnect request for pool without domain '%s'", pool->sockaddr_url); return false; } dot_reconnect = strchr(url, '.'); if (!dot_reconnect) { applog(LOG_ERR, "Denied stratum reconnect request to url without domain '%s'", url); return false; } if (strcmp(dot_pool, dot_reconnect)) { applog(LOG_ERR, "Denied stratum reconnect request to non-matching domain url '%s'", pool->sockaddr_url); return false; } } port = (char *)json_string_value(json_array_get(val, 1)); if (!port) port = pool->stratum_port; sprintf(address, "%s:%s", url, port); if (!extract_sockaddr(address, &sockaddr_url, &stratum_port)) return false; applog(LOG_WARNING, "Stratum reconnect requested from pool %d to %s", pool->pool_no, address); clear_pool_work(pool); mutex_lock(&pool->stratum_lock); __suspend_stratum(pool); tmp = pool->sockaddr_url; pool->sockaddr_url = sockaddr_url; pool->stratum_url = pool->sockaddr_url; free(tmp); tmp = pool->stratum_port; pool->stratum_port = stratum_port; free(tmp); mutex_unlock(&pool->stratum_lock); if (!restart_stratum(pool)) { pool_failed(pool); return false; } return true; }
static bool parse_reconnect(struct pool *pool, json_t *val) { char *sockaddr_url, *stratum_port, *tmp; char *url, *port, address[256]; memset(address, 0, 255); url = (char *)json_string_value(json_array_get(val, 0)); if (!url) url = pool->sockaddr_url; else { char *dot_pool, *dot_reconnect; dot_pool = strchr(pool->sockaddr_url, '.'); if (!dot_pool) { applog(LOG_ERR, "Denied stratum reconnect request for pool without domain '%s'", pool->sockaddr_url); return false; } dot_reconnect = strchr(url, '.'); if (!dot_reconnect) { applog(LOG_ERR, "Denied stratum reconnect request to url without domain '%s'", url); return false; } if (strcmp(dot_pool, dot_reconnect)) { applog(LOG_ERR, "Denied stratum reconnect request to non-matching domain url '%s'", pool->sockaddr_url); return false; } } port = (char *)json_string_value(json_array_get(val, 1)); if (!port) port = pool->stratum_port; snprintf(address, 254, "%s:%s", url, port); if (!extract_sockaddr(address, &sockaddr_url, &stratum_port)) return false; applog(LOG_WARNING, "Stratum reconnect requested from pool %d to %s", pool->pool_no, address); clear_pool_work(pool); mutex_lock(&pool->stratum_lock); __suspend_stratum(pool); tmp = pool->sockaddr_url; pool->sockaddr_url = sockaddr_url; pool->stratum_url = pool->sockaddr_url; free(tmp); tmp = pool->stratum_port; pool->stratum_port = stratum_port; free(tmp); mutex_unlock(&pool->stratum_lock); if (!restart_stratum(pool)) { pool_failed(pool); return false; } return true; }
{'added': [(2, ' * Copyright 2011-2014 Con Kolivas'), (723, 'static bool _valid_hex(char *s, const char *file, const char *func, const int line)'), (724, '{'), (725, '\tbool ret = false;'), (726, '\tint i, len;'), (727, ''), (728, '\tif (unlikely(!s)) {'), (729, '\t\tapplog(LOG_ERR, "Null string passed to valid_hex from"IN_FMT_FFL, file, func, line);'), (730, '\t\treturn ret;'), (731, '\t}'), (732, '\tlen = strlen(s);'), (733, '\tif (unlikely(!len)) {'), (734, '\t\tapplog(LOG_ERR, "Zero length string passed to valid_hex from"IN_FMT_FFL, file, func, line);'), (735, '\t\treturn ret;'), (736, '\t}'), (737, '\tfor (i = 0; i < len; i++) {'), (738, '\t\tunsigned char idx = s[i];'), (739, ''), (740, '\t\tif (unlikely(hex2bin_tbl[idx] < 0)) {'), (741, '\t\t\tapplog(LOG_ERR, "Invalid char %x passed to valid_hex from"IN_FMT_FFL, idx, file, func, line);'), (742, '\t\t\treturn ret;'), (743, '\t\t}'), (744, '\t}'), (745, '\tret = true;'), (746, '\treturn ret;'), (747, '}'), (748, ''), (749, '#define valid_hex(s) _valid_hex(s, __FILE__, __func__, __LINE__)'), (750, ''), (1406, '\tsnprintf(url_address, 254, "%.*s", url_len, url_begin);'), (1716, '\tif (!valid_hex(job_id) || !valid_hex(prev_hash) || !valid_hex(coinbase1) ||'), (1717, '\t !valid_hex(coinbase2) || !valid_hex(bbversion) || !valid_hex(nbit) ||'), (1718, '\t !valid_hex(ntime)) {'), (1720, '\t\tfree(job_id);'), (1721, '\t\tfree(coinbase1);'), (1722, '\t\tfree(coinbase2);'), (1910, '\tsnprintf(address, 254, "%s:%s", url, port);'), (2581, '\tif (!valid_hex(nonce1)) {'), (2582, '\t\tapplog(LOG_INFO, "Failed to get valid nonce1 in initiate_stratum");'), (2587, '\tif (n2size < 2 || n2size > 16) {'), (2588, '\t\tapplog(LOG_INFO, "Failed to get valid n2size in initiate_stratum");')], 'deleted': [(2, ' * Copyright 2011-2013 Con Kolivas'), (1378, '\tsprintf(url_address, "%.*s", url_len, url_begin);'), (1688, '\tif (!job_id || !prev_hash || !coinbase1 || !coinbase2 || !bbversion || !nbit || !ntime) {'), (1690, '\t\tif (job_id)'), (1691, '\t\t\tfree(job_id);'), (1692, '\t\tif (coinbase1)'), (1693, '\t\t\tfree(coinbase1);'), (1694, '\t\tif (coinbase2)'), (1695, '\t\t\tfree(coinbase2);'), (1883, '\tsprintf(address, "%s:%s", url, port);'), (2554, '\tif (!nonce1) {'), (2555, '\t\tapplog(LOG_INFO, "Failed to get nonce1 in initiate_stratum");'), (2560, '\tif (!n2size) {'), (2561, '\t\tapplog(LOG_INFO, "Failed to get n2size in initiate_stratum");')]}
41
14
2,371
16,204
52
323
8
https://github.com/ckolivas/cgminer
CVE-2014-4501
CWE-119
2,656
http.c
C
http_connect
/* * HTTP protocol for ffmpeg client * Copyright (c) 2000, 2001 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "config.h" #if CONFIG_ZLIB #include <zlib.h> #endif /* CONFIG_ZLIB */ #include "libavutil/avassert.h" #include "libavutil/avstring.h" #include "libavutil/opt.h" #include "libavutil/time.h" #include "avformat.h" #include "http.h" #include "httpauth.h" #include "internal.h" #include "network.h" #include "os_support.h" #include "url.h" /* XXX: POST protocol is not completely implemented because ffmpeg uses * only a subset of it. */ /* The IO buffer size is unrelated to the max URL size in itself, but needs * to be large enough to fit the full request headers (including long * path names). */ #define BUFFER_SIZE MAX_URL_SIZE #define MAX_REDIRECTS 8 #define HTTP_SINGLE 1 #define HTTP_MUTLI 2 typedef enum { LOWER_PROTO, READ_HEADERS, WRITE_REPLY_HEADERS, FINISH }HandshakeState; typedef struct HTTPContext { const AVClass *class; URLContext *hd; unsigned char buffer[BUFFER_SIZE], *buf_ptr, *buf_end; int line_count; int http_code; /* Used if "Transfer-Encoding: chunked" otherwise -1. */ int64_t chunksize; int64_t off, end_off, filesize; char *location; HTTPAuthState auth_state; HTTPAuthState proxy_auth_state; char *http_proxy; char *headers; char *mime_type; char *user_agent; #if FF_API_HTTP_USER_AGENT char *user_agent_deprecated; #endif char *content_type; /* Set if the server correctly handles Connection: close and will close * the connection after feeding us the content. */ int willclose; int seekable; /**< Control seekability, 0 = disable, 1 = enable, -1 = probe. */ int chunked_post; /* A flag which indicates if the end of chunked encoding has been sent. */ int end_chunked_post; /* A flag which indicates we have finished to read POST reply. */ int end_header; /* A flag which indicates if we use persistent connections. */ int multiple_requests; uint8_t *post_data; int post_datalen; int is_akamai; int is_mediagateway; char *cookies; ///< holds newline (\n) delimited Set-Cookie header field values (without the "Set-Cookie: " field name) /* A dictionary containing cookies keyed by cookie name */ AVDictionary *cookie_dict; int icy; /* how much data was read since the last ICY metadata packet */ int icy_data_read; /* after how many bytes of read data a new metadata packet will be found */ int icy_metaint; char *icy_metadata_headers; char *icy_metadata_packet; AVDictionary *metadata; #if CONFIG_ZLIB int compressed; z_stream inflate_stream; uint8_t *inflate_buffer; #endif /* CONFIG_ZLIB */ AVDictionary *chained_options; int send_expect_100; char *method; int reconnect; int reconnect_at_eof; int reconnect_streamed; int reconnect_delay; int reconnect_delay_max; int listen; char *resource; int reply_code; int is_multi_client; HandshakeState handshake_step; int is_connected_server; } HTTPContext; #define OFFSET(x) offsetof(HTTPContext, x) #define D AV_OPT_FLAG_DECODING_PARAM #define E AV_OPT_FLAG_ENCODING_PARAM #define DEFAULT_USER_AGENT "Lavf/" AV_STRINGIFY(LIBAVFORMAT_VERSION) static const AVOption options[] = { { "seekable", "control seekability of connection", OFFSET(seekable), AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, D }, { "chunked_post", "use chunked transfer-encoding for posts", OFFSET(chunked_post), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, E }, { "http_proxy", "set HTTP proxy to tunnel through", OFFSET(http_proxy), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, D | E }, { "headers", "set custom HTTP headers, can override built in default headers", OFFSET(headers), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, D | E }, { "content_type", "set a specific content type for the POST messages", OFFSET(content_type), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, D | E }, { "user_agent", "override User-Agent header", OFFSET(user_agent), AV_OPT_TYPE_STRING, { .str = DEFAULT_USER_AGENT }, 0, 0, D }, #if FF_API_HTTP_USER_AGENT { "user-agent", "override User-Agent header", OFFSET(user_agent_deprecated), AV_OPT_TYPE_STRING, { .str = DEFAULT_USER_AGENT }, 0, 0, D }, #endif { "multiple_requests", "use persistent connections", OFFSET(multiple_requests), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, D | E }, { "post_data", "set custom HTTP post data", OFFSET(post_data), AV_OPT_TYPE_BINARY, .flags = D | E }, { "mime_type", "export the MIME type", OFFSET(mime_type), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, AV_OPT_FLAG_EXPORT | AV_OPT_FLAG_READONLY }, { "cookies", "set cookies to be sent in applicable future requests, use newline delimited Set-Cookie HTTP field value syntax", OFFSET(cookies), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, D }, { "icy", "request ICY metadata", OFFSET(icy), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, D }, { "icy_metadata_headers", "return ICY metadata headers", OFFSET(icy_metadata_headers), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, AV_OPT_FLAG_EXPORT }, { "icy_metadata_packet", "return current ICY metadata packet", OFFSET(icy_metadata_packet), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, AV_OPT_FLAG_EXPORT }, { "metadata", "metadata read from the bitstream", OFFSET(metadata), AV_OPT_TYPE_DICT, {0}, 0, 0, AV_OPT_FLAG_EXPORT }, { "auth_type", "HTTP authentication type", OFFSET(auth_state.auth_type), AV_OPT_TYPE_INT, { .i64 = HTTP_AUTH_NONE }, HTTP_AUTH_NONE, HTTP_AUTH_BASIC, D | E, "auth_type"}, { "none", "No auth method set, autodetect", 0, AV_OPT_TYPE_CONST, { .i64 = HTTP_AUTH_NONE }, 0, 0, D | E, "auth_type"}, { "basic", "HTTP basic authentication", 0, AV_OPT_TYPE_CONST, { .i64 = HTTP_AUTH_BASIC }, 0, 0, D | E, "auth_type"}, { "send_expect_100", "Force sending an Expect: 100-continue header for POST", OFFSET(send_expect_100), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E }, { "location", "The actual location of the data received", OFFSET(location), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, D | E }, { "offset", "initial byte offset", OFFSET(off), AV_OPT_TYPE_INT64, { .i64 = 0 }, 0, INT64_MAX, D }, { "end_offset", "try to limit the request to bytes preceding this offset", OFFSET(end_off), AV_OPT_TYPE_INT64, { .i64 = 0 }, 0, INT64_MAX, D }, { "method", "Override the HTTP method or set the expected HTTP method from a client", OFFSET(method), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, D | E }, { "reconnect", "auto reconnect after disconnect before EOF", OFFSET(reconnect), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, D }, { "reconnect_at_eof", "auto reconnect at EOF", OFFSET(reconnect_at_eof), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, D }, { "reconnect_streamed", "auto reconnect streamed / non seekable streams", OFFSET(reconnect_streamed), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, D }, { "reconnect_delay_max", "max reconnect delay in seconds after which to give up", OFFSET(reconnect_delay_max), AV_OPT_TYPE_INT, { .i64 = 120 }, 0, UINT_MAX/1000/1000, D }, { "listen", "listen on HTTP", OFFSET(listen), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 2, D | E }, { "resource", "The resource requested by a client", OFFSET(resource), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, E }, { "reply_code", "The http status code to return to a client", OFFSET(reply_code), AV_OPT_TYPE_INT, { .i64 = 200}, INT_MIN, 599, E}, { NULL } }; static int http_connect(URLContext *h, const char *path, const char *local_path, const char *hoststr, const char *auth, const char *proxyauth, int *new_location); static int http_read_header(URLContext *h, int *new_location); void ff_http_init_auth_state(URLContext *dest, const URLContext *src) { memcpy(&((HTTPContext *)dest->priv_data)->auth_state, &((HTTPContext *)src->priv_data)->auth_state, sizeof(HTTPAuthState)); memcpy(&((HTTPContext *)dest->priv_data)->proxy_auth_state, &((HTTPContext *)src->priv_data)->proxy_auth_state, sizeof(HTTPAuthState)); } static int http_open_cnx_internal(URLContext *h, AVDictionary **options) { const char *path, *proxy_path, *lower_proto = "tcp", *local_path; char hostname[1024], hoststr[1024], proto[10]; char auth[1024], proxyauth[1024] = ""; char path1[MAX_URL_SIZE]; char buf[1024], urlbuf[MAX_URL_SIZE]; int port, use_proxy, err, location_changed = 0; HTTPContext *s = h->priv_data; av_url_split(proto, sizeof(proto), auth, sizeof(auth), hostname, sizeof(hostname), &port, path1, sizeof(path1), s->location); ff_url_join(hoststr, sizeof(hoststr), NULL, NULL, hostname, port, NULL); proxy_path = s->http_proxy ? s->http_proxy : getenv("http_proxy"); use_proxy = !ff_http_match_no_proxy(getenv("no_proxy"), hostname) && proxy_path && av_strstart(proxy_path, "http://", NULL); if (!strcmp(proto, "https")) { lower_proto = "tls"; use_proxy = 0; if (port < 0) port = 443; } if (port < 0) port = 80; if (path1[0] == '\0') path = "/"; else path = path1; local_path = path; if (use_proxy) { /* Reassemble the request URL without auth string - we don't * want to leak the auth to the proxy. */ ff_url_join(urlbuf, sizeof(urlbuf), proto, NULL, hostname, port, "%s", path1); path = urlbuf; av_url_split(NULL, 0, proxyauth, sizeof(proxyauth), hostname, sizeof(hostname), &port, NULL, 0, proxy_path); } ff_url_join(buf, sizeof(buf), lower_proto, NULL, hostname, port, NULL); if (!s->hd) { err = ffurl_open_whitelist(&s->hd, buf, AVIO_FLAG_READ_WRITE, &h->interrupt_callback, options, h->protocol_whitelist, h->protocol_blacklist, h); if (err < 0) return err; } err = http_connect(h, path, local_path, hoststr, auth, proxyauth, &location_changed); if (err < 0) return err; return location_changed; } /* return non zero if error */ static int http_open_cnx(URLContext *h, AVDictionary **options) { HTTPAuthType cur_auth_type, cur_proxy_auth_type; HTTPContext *s = h->priv_data; int location_changed, attempts = 0, redirects = 0; redo: av_dict_copy(options, s->chained_options, 0); cur_auth_type = s->auth_state.auth_type; cur_proxy_auth_type = s->auth_state.auth_type; location_changed = http_open_cnx_internal(h, options); if (location_changed < 0) goto fail; attempts++; if (s->http_code == 401) { if ((cur_auth_type == HTTP_AUTH_NONE || s->auth_state.stale) && s->auth_state.auth_type != HTTP_AUTH_NONE && attempts < 4) { ffurl_closep(&s->hd); goto redo; } else goto fail; } if (s->http_code == 407) { if ((cur_proxy_auth_type == HTTP_AUTH_NONE || s->proxy_auth_state.stale) && s->proxy_auth_state.auth_type != HTTP_AUTH_NONE && attempts < 4) { ffurl_closep(&s->hd); goto redo; } else goto fail; } if ((s->http_code == 301 || s->http_code == 302 || s->http_code == 303 || s->http_code == 307) && location_changed == 1) { /* url moved, get next */ ffurl_closep(&s->hd); if (redirects++ >= MAX_REDIRECTS) return AVERROR(EIO); /* Restart the authentication process with the new target, which * might use a different auth mechanism. */ memset(&s->auth_state, 0, sizeof(s->auth_state)); attempts = 0; location_changed = 0; goto redo; } return 0; fail: if (s->hd) ffurl_closep(&s->hd); if (location_changed < 0) return location_changed; return ff_http_averror(s->http_code, AVERROR(EIO)); } int ff_http_do_new_request(URLContext *h, const char *uri) { HTTPContext *s = h->priv_data; AVDictionary *options = NULL; int ret; s->off = 0; s->icy_data_read = 0; av_free(s->location); s->location = av_strdup(uri); if (!s->location) return AVERROR(ENOMEM); ret = http_open_cnx(h, &options); av_dict_free(&options); return ret; } int ff_http_averror(int status_code, int default_averror) { switch (status_code) { case 400: return AVERROR_HTTP_BAD_REQUEST; case 401: return AVERROR_HTTP_UNAUTHORIZED; case 403: return AVERROR_HTTP_FORBIDDEN; case 404: return AVERROR_HTTP_NOT_FOUND; default: break; } if (status_code >= 400 && status_code <= 499) return AVERROR_HTTP_OTHER_4XX; else if (status_code >= 500) return AVERROR_HTTP_SERVER_ERROR; else return default_averror; } static int http_write_reply(URLContext* h, int status_code) { int ret, body = 0, reply_code, message_len; const char *reply_text, *content_type; HTTPContext *s = h->priv_data; char message[BUFFER_SIZE]; content_type = "text/plain"; if (status_code < 0) body = 1; switch (status_code) { case AVERROR_HTTP_BAD_REQUEST: case 400: reply_code = 400; reply_text = "Bad Request"; break; case AVERROR_HTTP_FORBIDDEN: case 403: reply_code = 403; reply_text = "Forbidden"; break; case AVERROR_HTTP_NOT_FOUND: case 404: reply_code = 404; reply_text = "Not Found"; break; case 200: reply_code = 200; reply_text = "OK"; content_type = s->content_type ? s->content_type : "application/octet-stream"; break; case AVERROR_HTTP_SERVER_ERROR: case 500: reply_code = 500; reply_text = "Internal server error"; break; default: return AVERROR(EINVAL); } if (body) { s->chunked_post = 0; message_len = snprintf(message, sizeof(message), "HTTP/1.1 %03d %s\r\n" "Content-Type: %s\r\n" "Content-Length: %"SIZE_SPECIFIER"\r\n" "%s" "\r\n" "%03d %s\r\n", reply_code, reply_text, content_type, strlen(reply_text) + 6, // 3 digit status code + space + \r\n s->headers ? s->headers : "", reply_code, reply_text); } else { s->chunked_post = 1; message_len = snprintf(message, sizeof(message), "HTTP/1.1 %03d %s\r\n" "Content-Type: %s\r\n" "Transfer-Encoding: chunked\r\n" "%s" "\r\n", reply_code, reply_text, content_type, s->headers ? s->headers : ""); } av_log(h, AV_LOG_TRACE, "HTTP reply header: \n%s----\n", message); if ((ret = ffurl_write(s->hd, message, message_len)) < 0) return ret; return 0; } static void handle_http_errors(URLContext *h, int error) { av_assert0(error < 0); http_write_reply(h, error); } static int http_handshake(URLContext *c) { int ret, err, new_location; HTTPContext *ch = c->priv_data; URLContext *cl = ch->hd; switch (ch->handshake_step) { case LOWER_PROTO: av_log(c, AV_LOG_TRACE, "Lower protocol\n"); if ((ret = ffurl_handshake(cl)) > 0) return 2 + ret; if (ret < 0) return ret; ch->handshake_step = READ_HEADERS; ch->is_connected_server = 1; return 2; case READ_HEADERS: av_log(c, AV_LOG_TRACE, "Read headers\n"); if ((err = http_read_header(c, &new_location)) < 0) { handle_http_errors(c, err); return err; } ch->handshake_step = WRITE_REPLY_HEADERS; return 1; case WRITE_REPLY_HEADERS: av_log(c, AV_LOG_TRACE, "Reply code: %d\n", ch->reply_code); if ((err = http_write_reply(c, ch->reply_code)) < 0) return err; ch->handshake_step = FINISH; return 1; case FINISH: return 0; } // this should never be reached. return AVERROR(EINVAL); } static int http_listen(URLContext *h, const char *uri, int flags, AVDictionary **options) { HTTPContext *s = h->priv_data; int ret; char hostname[1024], proto[10]; char lower_url[100]; const char *lower_proto = "tcp"; int port; av_url_split(proto, sizeof(proto), NULL, 0, hostname, sizeof(hostname), &port, NULL, 0, uri); if (!strcmp(proto, "https")) lower_proto = "tls"; ff_url_join(lower_url, sizeof(lower_url), lower_proto, NULL, hostname, port, NULL); if ((ret = av_dict_set_int(options, "listen", s->listen, 0)) < 0) goto fail; if ((ret = ffurl_open_whitelist(&s->hd, lower_url, AVIO_FLAG_READ_WRITE, &h->interrupt_callback, options, h->protocol_whitelist, h->protocol_blacklist, h )) < 0) goto fail; s->handshake_step = LOWER_PROTO; if (s->listen == HTTP_SINGLE) { /* single client */ s->reply_code = 200; while ((ret = http_handshake(h)) > 0); } fail: av_dict_free(&s->chained_options); return ret; } static int http_open(URLContext *h, const char *uri, int flags, AVDictionary **options) { HTTPContext *s = h->priv_data; int ret; if( s->seekable == 1 ) h->is_streamed = 0; else h->is_streamed = 1; s->filesize = -1; s->location = av_strdup(uri); if (!s->location) return AVERROR(ENOMEM); if (options) av_dict_copy(&s->chained_options, *options, 0); if (s->headers) { int len = strlen(s->headers); if (len < 2 || strcmp("\r\n", s->headers + len - 2)) { av_log(h, AV_LOG_WARNING, "No trailing CRLF found in HTTP header.\n"); ret = av_reallocp(&s->headers, len + 3); if (ret < 0) return ret; s->headers[len] = '\r'; s->headers[len + 1] = '\n'; s->headers[len + 2] = '\0'; } } if (s->listen) { return http_listen(h, uri, flags, options); } ret = http_open_cnx(h, options); if (ret < 0) av_dict_free(&s->chained_options); return ret; } static int http_accept(URLContext *s, URLContext **c) { int ret; HTTPContext *sc = s->priv_data; HTTPContext *cc; URLContext *sl = sc->hd; URLContext *cl = NULL; av_assert0(sc->listen); if ((ret = ffurl_alloc(c, s->filename, s->flags, &sl->interrupt_callback)) < 0) goto fail; cc = (*c)->priv_data; if ((ret = ffurl_accept(sl, &cl)) < 0) goto fail; cc->hd = cl; cc->is_multi_client = 1; fail: return ret; } static int http_getc(HTTPContext *s) { int len; if (s->buf_ptr >= s->buf_end) { len = ffurl_read(s->hd, s->buffer, BUFFER_SIZE); if (len < 0) { return len; } else if (len == 0) { return AVERROR_EOF; } else { s->buf_ptr = s->buffer; s->buf_end = s->buffer + len; } } return *s->buf_ptr++; } static int http_get_line(HTTPContext *s, char *line, int line_size) { int ch; char *q; q = line; for (;;) { ch = http_getc(s); if (ch < 0) return ch; if (ch == '\n') { /* process line */ if (q > line && q[-1] == '\r') q--; *q = '\0'; return 0; } else { if ((q - line) < line_size - 1) *q++ = ch; } } } static int check_http_code(URLContext *h, int http_code, const char *end) { HTTPContext *s = h->priv_data; /* error codes are 4xx and 5xx, but regard 401 as a success, so we * don't abort until all headers have been parsed. */ if (http_code >= 400 && http_code < 600 && (http_code != 401 || s->auth_state.auth_type != HTTP_AUTH_NONE) && (http_code != 407 || s->proxy_auth_state.auth_type != HTTP_AUTH_NONE)) { end += strspn(end, SPACE_CHARS); av_log(h, AV_LOG_WARNING, "HTTP error %d %s\n", http_code, end); return ff_http_averror(http_code, AVERROR(EIO)); } return 0; } static int parse_location(HTTPContext *s, const char *p) { char redirected_location[MAX_URL_SIZE], *new_loc; ff_make_absolute_url(redirected_location, sizeof(redirected_location), s->location, p); new_loc = av_strdup(redirected_location); if (!new_loc) return AVERROR(ENOMEM); av_free(s->location); s->location = new_loc; return 0; } /* "bytes $from-$to/$document_size" */ static void parse_content_range(URLContext *h, const char *p) { HTTPContext *s = h->priv_data; const char *slash; if (!strncmp(p, "bytes ", 6)) { p += 6; s->off = strtoll(p, NULL, 10); if ((slash = strchr(p, '/')) && strlen(slash) > 0) s->filesize = strtoll(slash + 1, NULL, 10); } if (s->seekable == -1 && (!s->is_akamai || s->filesize != 2147483647)) h->is_streamed = 0; /* we _can_ in fact seek */ } static int parse_content_encoding(URLContext *h, const char *p) { if (!av_strncasecmp(p, "gzip", 4) || !av_strncasecmp(p, "deflate", 7)) { #if CONFIG_ZLIB HTTPContext *s = h->priv_data; s->compressed = 1; inflateEnd(&s->inflate_stream); if (inflateInit2(&s->inflate_stream, 32 + 15) != Z_OK) { av_log(h, AV_LOG_WARNING, "Error during zlib initialisation: %s\n", s->inflate_stream.msg); return AVERROR(ENOSYS); } if (zlibCompileFlags() & (1 << 17)) { av_log(h, AV_LOG_WARNING, "Your zlib was compiled without gzip support.\n"); return AVERROR(ENOSYS); } #else av_log(h, AV_LOG_WARNING, "Compressed (%s) content, need zlib with gzip support\n", p); return AVERROR(ENOSYS); #endif /* CONFIG_ZLIB */ } else if (!av_strncasecmp(p, "identity", 8)) { // The normal, no-encoding case (although servers shouldn't include // the header at all if this is the case). } else { av_log(h, AV_LOG_WARNING, "Unknown content coding: %s\n", p); } return 0; } // Concat all Icy- header lines static int parse_icy(HTTPContext *s, const char *tag, const char *p) { int len = 4 + strlen(p) + strlen(tag); int is_first = !s->icy_metadata_headers; int ret; av_dict_set(&s->metadata, tag, p, 0); if (s->icy_metadata_headers) len += strlen(s->icy_metadata_headers); if ((ret = av_reallocp(&s->icy_metadata_headers, len)) < 0) return ret; if (is_first) *s->icy_metadata_headers = '\0'; av_strlcatf(s->icy_metadata_headers, len, "%s: %s\n", tag, p); return 0; } static int parse_cookie(HTTPContext *s, const char *p, AVDictionary **cookies) { char *eql, *name; // duplicate the cookie name (dict will dupe the value) if (!(eql = strchr(p, '='))) return AVERROR(EINVAL); if (!(name = av_strndup(p, eql - p))) return AVERROR(ENOMEM); // add the cookie to the dictionary av_dict_set(cookies, name, eql, AV_DICT_DONT_STRDUP_KEY); return 0; } static int cookie_string(AVDictionary *dict, char **cookies) { AVDictionaryEntry *e = NULL; int len = 1; // determine how much memory is needed for the cookies string while (e = av_dict_get(dict, "", e, AV_DICT_IGNORE_SUFFIX)) len += strlen(e->key) + strlen(e->value) + 1; // reallocate the cookies e = NULL; if (*cookies) av_free(*cookies); *cookies = av_malloc(len); if (!*cookies) return AVERROR(ENOMEM); *cookies[0] = '\0'; // write out the cookies while (e = av_dict_get(dict, "", e, AV_DICT_IGNORE_SUFFIX)) av_strlcatf(*cookies, len, "%s%s\n", e->key, e->value); return 0; } static int process_line(URLContext *h, char *line, int line_count, int *new_location) { HTTPContext *s = h->priv_data; const char *auto_method = h->flags & AVIO_FLAG_READ ? "POST" : "GET"; char *tag, *p, *end, *method, *resource, *version; int ret; /* end of header */ if (line[0] == '\0') { s->end_header = 1; return 0; } p = line; if (line_count == 0) { if (s->is_connected_server) { // HTTP method method = p; while (*p && !av_isspace(*p)) p++; *(p++) = '\0'; av_log(h, AV_LOG_TRACE, "Received method: %s\n", method); if (s->method) { if (av_strcasecmp(s->method, method)) { av_log(h, AV_LOG_ERROR, "Received and expected HTTP method do not match. (%s expected, %s received)\n", s->method, method); return ff_http_averror(400, AVERROR(EIO)); } } else { // use autodetected HTTP method to expect av_log(h, AV_LOG_TRACE, "Autodetected %s HTTP method\n", auto_method); if (av_strcasecmp(auto_method, method)) { av_log(h, AV_LOG_ERROR, "Received and autodetected HTTP method did not match " "(%s autodetected %s received)\n", auto_method, method); return ff_http_averror(400, AVERROR(EIO)); } if (!(s->method = av_strdup(method))) return AVERROR(ENOMEM); } // HTTP resource while (av_isspace(*p)) p++; resource = p; while (!av_isspace(*p)) p++; *(p++) = '\0'; av_log(h, AV_LOG_TRACE, "Requested resource: %s\n", resource); if (!(s->resource = av_strdup(resource))) return AVERROR(ENOMEM); // HTTP version while (av_isspace(*p)) p++; version = p; while (*p && !av_isspace(*p)) p++; *p = '\0'; if (av_strncasecmp(version, "HTTP/", 5)) { av_log(h, AV_LOG_ERROR, "Malformed HTTP version string.\n"); return ff_http_averror(400, AVERROR(EIO)); } av_log(h, AV_LOG_TRACE, "HTTP version string: %s\n", version); } else { while (!av_isspace(*p) && *p != '\0') p++; while (av_isspace(*p)) p++; s->http_code = strtol(p, &end, 10); av_log(h, AV_LOG_TRACE, "http_code=%d\n", s->http_code); if ((ret = check_http_code(h, s->http_code, end)) < 0) return ret; } } else { while (*p != '\0' && *p != ':') p++; if (*p != ':') return 1; *p = '\0'; tag = line; p++; while (av_isspace(*p)) p++; if (!av_strcasecmp(tag, "Location")) { if ((ret = parse_location(s, p)) < 0) return ret; *new_location = 1; } else if (!av_strcasecmp(tag, "Content-Length") && s->filesize == -1) { s->filesize = strtoll(p, NULL, 10); } else if (!av_strcasecmp(tag, "Content-Range")) { parse_content_range(h, p); } else if (!av_strcasecmp(tag, "Accept-Ranges") && !strncmp(p, "bytes", 5) && s->seekable == -1) { h->is_streamed = 0; } else if (!av_strcasecmp(tag, "Transfer-Encoding") && !av_strncasecmp(p, "chunked", 7)) { s->filesize = -1; s->chunksize = 0; } else if (!av_strcasecmp(tag, "WWW-Authenticate")) { ff_http_auth_handle_header(&s->auth_state, tag, p); } else if (!av_strcasecmp(tag, "Authentication-Info")) { ff_http_auth_handle_header(&s->auth_state, tag, p); } else if (!av_strcasecmp(tag, "Proxy-Authenticate")) { ff_http_auth_handle_header(&s->proxy_auth_state, tag, p); } else if (!av_strcasecmp(tag, "Connection")) { if (!strcmp(p, "close")) s->willclose = 1; } else if (!av_strcasecmp(tag, "Server")) { if (!av_strcasecmp(p, "AkamaiGHost")) { s->is_akamai = 1; } else if (!av_strncasecmp(p, "MediaGateway", 12)) { s->is_mediagateway = 1; } } else if (!av_strcasecmp(tag, "Content-Type")) { av_free(s->mime_type); s->mime_type = av_strdup(p); } else if (!av_strcasecmp(tag, "Set-Cookie")) { if (parse_cookie(s, p, &s->cookie_dict)) av_log(h, AV_LOG_WARNING, "Unable to parse '%s'\n", p); } else if (!av_strcasecmp(tag, "Icy-MetaInt")) { s->icy_metaint = strtoll(p, NULL, 10); } else if (!av_strncasecmp(tag, "Icy-", 4)) { if ((ret = parse_icy(s, tag, p)) < 0) return ret; } else if (!av_strcasecmp(tag, "Content-Encoding")) { if ((ret = parse_content_encoding(h, p)) < 0) return ret; } } return 1; } /** * Create a string containing cookie values for use as a HTTP cookie header * field value for a particular path and domain from the cookie values stored in * the HTTP protocol context. The cookie string is stored in *cookies. * * @return a negative value if an error condition occurred, 0 otherwise */ static int get_cookies(HTTPContext *s, char **cookies, const char *path, const char *domain) { // cookie strings will look like Set-Cookie header field values. Multiple // Set-Cookie fields will result in multiple values delimited by a newline int ret = 0; char *next, *cookie, *set_cookies = av_strdup(s->cookies), *cset_cookies = set_cookies; if (!set_cookies) return AVERROR(EINVAL); // destroy any cookies in the dictionary. av_dict_free(&s->cookie_dict); *cookies = NULL; while ((cookie = av_strtok(set_cookies, "\n", &next))) { int domain_offset = 0; char *param, *next_param, *cdomain = NULL, *cpath = NULL, *cvalue = NULL; set_cookies = NULL; // store the cookie in a dict in case it is updated in the response if (parse_cookie(s, cookie, &s->cookie_dict)) av_log(s, AV_LOG_WARNING, "Unable to parse '%s'\n", cookie); while ((param = av_strtok(cookie, "; ", &next_param))) { if (cookie) { // first key-value pair is the actual cookie value cvalue = av_strdup(param); cookie = NULL; } else if (!av_strncasecmp("path=", param, 5)) { av_free(cpath); cpath = av_strdup(&param[5]); } else if (!av_strncasecmp("domain=", param, 7)) { // if the cookie specifies a sub-domain, skip the leading dot thereby // supporting URLs that point to sub-domains and the master domain int leading_dot = (param[7] == '.'); av_free(cdomain); cdomain = av_strdup(&param[7+leading_dot]); } else { // ignore unknown attributes } } if (!cdomain) cdomain = av_strdup(domain); // ensure all of the necessary values are valid if (!cdomain || !cpath || !cvalue) { av_log(s, AV_LOG_WARNING, "Invalid cookie found, no value, path or domain specified\n"); goto done_cookie; } // check if the request path matches the cookie path if (av_strncasecmp(path, cpath, strlen(cpath))) goto done_cookie; // the domain should be at least the size of our cookie domain domain_offset = strlen(domain) - strlen(cdomain); if (domain_offset < 0) goto done_cookie; // match the cookie domain if (av_strcasecmp(&domain[domain_offset], cdomain)) goto done_cookie; // cookie parameters match, so copy the value if (!*cookies) { if (!(*cookies = av_strdup(cvalue))) { ret = AVERROR(ENOMEM); goto done_cookie; } } else { char *tmp = *cookies; size_t str_size = strlen(cvalue) + strlen(*cookies) + 3; if (!(*cookies = av_malloc(str_size))) { ret = AVERROR(ENOMEM); goto done_cookie; } snprintf(*cookies, str_size, "%s; %s", tmp, cvalue); av_free(tmp); } done_cookie: av_freep(&cdomain); av_freep(&cpath); av_freep(&cvalue); if (ret < 0) { if (*cookies) av_freep(cookies); av_free(cset_cookies); return ret; } } av_free(cset_cookies); return 0; } static inline int has_header(const char *str, const char *header) { /* header + 2 to skip over CRLF prefix. (make sure you have one!) */ if (!str) return 0; return av_stristart(str, header + 2, NULL) || av_stristr(str, header); } static int http_read_header(URLContext *h, int *new_location) { HTTPContext *s = h->priv_data; char line[MAX_URL_SIZE]; int err = 0; s->chunksize = -1; for (;;) { if ((err = http_get_line(s, line, sizeof(line))) < 0) return err; av_log(h, AV_LOG_TRACE, "header='%s'\n", line); err = process_line(h, line, s->line_count, new_location); if (err < 0) return err; if (err == 0) break; s->line_count++; } if (s->seekable == -1 && s->is_mediagateway && s->filesize == 2000000000) h->is_streamed = 1; /* we can in fact _not_ seek */ // add any new cookies into the existing cookie string cookie_string(s->cookie_dict, &s->cookies); av_dict_free(&s->cookie_dict); return err; } static int http_connect(URLContext *h, const char *path, const char *local_path, const char *hoststr, const char *auth, const char *proxyauth, int *new_location) { HTTPContext *s = h->priv_data; int post, err; char headers[HTTP_HEADERS_SIZE] = ""; char *authstr = NULL, *proxyauthstr = NULL; int64_t off = s->off; int len = 0; const char *method; int send_expect_100 = 0; /* send http header */ post = h->flags & AVIO_FLAG_WRITE; if (s->post_data) { /* force POST method and disable chunked encoding when * custom HTTP post data is set */ post = 1; s->chunked_post = 0; } if (s->method) method = s->method; else method = post ? "POST" : "GET"; authstr = ff_http_auth_create_response(&s->auth_state, auth, local_path, method); proxyauthstr = ff_http_auth_create_response(&s->proxy_auth_state, proxyauth, local_path, method); if (post && !s->post_data) { send_expect_100 = s->send_expect_100; /* The user has supplied authentication but we don't know the auth type, * send Expect: 100-continue to get the 401 response including the * WWW-Authenticate header, or an 100 continue if no auth actually * is needed. */ if (auth && *auth && s->auth_state.auth_type == HTTP_AUTH_NONE && s->http_code != 401) send_expect_100 = 1; } #if FF_API_HTTP_USER_AGENT if (strcmp(s->user_agent_deprecated, DEFAULT_USER_AGENT)) { av_log(s, AV_LOG_WARNING, "the user-agent option is deprecated, please use user_agent option\n"); s->user_agent = av_strdup(s->user_agent_deprecated); } #endif /* set default headers if needed */ if (!has_header(s->headers, "\r\nUser-Agent: ")) len += av_strlcatf(headers + len, sizeof(headers) - len, "User-Agent: %s\r\n", s->user_agent); if (!has_header(s->headers, "\r\nAccept: ")) len += av_strlcpy(headers + len, "Accept: */*\r\n", sizeof(headers) - len); // Note: we send this on purpose even when s->off is 0 when we're probing, // since it allows us to detect more reliably if a (non-conforming) // server supports seeking by analysing the reply headers. if (!has_header(s->headers, "\r\nRange: ") && !post && (s->off > 0 || s->end_off || s->seekable == -1)) { len += av_strlcatf(headers + len, sizeof(headers) - len, "Range: bytes=%"PRId64"-", s->off); if (s->end_off) len += av_strlcatf(headers + len, sizeof(headers) - len, "%"PRId64, s->end_off - 1); len += av_strlcpy(headers + len, "\r\n", sizeof(headers) - len); } if (send_expect_100 && !has_header(s->headers, "\r\nExpect: ")) len += av_strlcatf(headers + len, sizeof(headers) - len, "Expect: 100-continue\r\n"); if (!has_header(s->headers, "\r\nConnection: ")) { if (s->multiple_requests) len += av_strlcpy(headers + len, "Connection: keep-alive\r\n", sizeof(headers) - len); else len += av_strlcpy(headers + len, "Connection: close\r\n", sizeof(headers) - len); } if (!has_header(s->headers, "\r\nHost: ")) len += av_strlcatf(headers + len, sizeof(headers) - len, "Host: %s\r\n", hoststr); if (!has_header(s->headers, "\r\nContent-Length: ") && s->post_data) len += av_strlcatf(headers + len, sizeof(headers) - len, "Content-Length: %d\r\n", s->post_datalen); if (!has_header(s->headers, "\r\nContent-Type: ") && s->content_type) len += av_strlcatf(headers + len, sizeof(headers) - len, "Content-Type: %s\r\n", s->content_type); if (!has_header(s->headers, "\r\nCookie: ") && s->cookies) { char *cookies = NULL; if (!get_cookies(s, &cookies, path, hoststr) && cookies) { len += av_strlcatf(headers + len, sizeof(headers) - len, "Cookie: %s\r\n", cookies); av_free(cookies); } } if (!has_header(s->headers, "\r\nIcy-MetaData: ") && s->icy) len += av_strlcatf(headers + len, sizeof(headers) - len, "Icy-MetaData: %d\r\n", 1); /* now add in custom headers */ if (s->headers) av_strlcpy(headers + len, s->headers, sizeof(headers) - len); snprintf(s->buffer, sizeof(s->buffer), "%s %s HTTP/1.1\r\n" "%s" "%s" "%s" "%s%s" "\r\n", method, path, post && s->chunked_post ? "Transfer-Encoding: chunked\r\n" : "", headers, authstr ? authstr : "", proxyauthstr ? "Proxy-" : "", proxyauthstr ? proxyauthstr : ""); av_log(h, AV_LOG_DEBUG, "request: %s\n", s->buffer); if ((err = ffurl_write(s->hd, s->buffer, strlen(s->buffer))) < 0) goto done; if (s->post_data) if ((err = ffurl_write(s->hd, s->post_data, s->post_datalen)) < 0) goto done; /* init input buffer */ s->buf_ptr = s->buffer; s->buf_end = s->buffer; s->line_count = 0; s->off = 0; s->icy_data_read = 0; s->filesize = -1; s->willclose = 0; s->end_chunked_post = 0; s->end_header = 0; if (post && !s->post_data && !send_expect_100) { /* Pretend that it did work. We didn't read any header yet, since * we've still to send the POST data, but the code calling this * function will check http_code after we return. */ s->http_code = 200; err = 0; goto done; } /* wait for header */ err = http_read_header(h, new_location); if (err < 0) goto done; if (*new_location) s->off = off; err = (off == s->off) ? 0 : -1; done: av_freep(&authstr); av_freep(&proxyauthstr); return err; } static int http_buf_read(URLContext *h, uint8_t *buf, int size) { HTTPContext *s = h->priv_data; int len; /* read bytes from input buffer first */ len = s->buf_end - s->buf_ptr; if (len > 0) { if (len > size) len = size; memcpy(buf, s->buf_ptr, len); s->buf_ptr += len; } else { int64_t target_end = s->end_off ? s->end_off : s->filesize; if ((!s->willclose || s->chunksize < 0) && target_end >= 0 && s->off >= target_end) return AVERROR_EOF; len = ffurl_read(s->hd, buf, size); if (!len && (!s->willclose || s->chunksize < 0) && target_end >= 0 && s->off < target_end) { av_log(h, AV_LOG_ERROR, "Stream ends prematurely at %"PRId64", should be %"PRId64"\n", s->off, target_end ); return AVERROR(EIO); } } if (len > 0) { s->off += len; if (s->chunksize > 0) s->chunksize -= len; } return len; } #if CONFIG_ZLIB #define DECOMPRESS_BUF_SIZE (256 * 1024) static int http_buf_read_compressed(URLContext *h, uint8_t *buf, int size) { HTTPContext *s = h->priv_data; int ret; if (!s->inflate_buffer) { s->inflate_buffer = av_malloc(DECOMPRESS_BUF_SIZE); if (!s->inflate_buffer) return AVERROR(ENOMEM); } if (s->inflate_stream.avail_in == 0) { int read = http_buf_read(h, s->inflate_buffer, DECOMPRESS_BUF_SIZE); if (read <= 0) return read; s->inflate_stream.next_in = s->inflate_buffer; s->inflate_stream.avail_in = read; } s->inflate_stream.avail_out = size; s->inflate_stream.next_out = buf; ret = inflate(&s->inflate_stream, Z_SYNC_FLUSH); if (ret != Z_OK && ret != Z_STREAM_END) av_log(h, AV_LOG_WARNING, "inflate return value: %d, %s\n", ret, s->inflate_stream.msg); return size - s->inflate_stream.avail_out; } #endif /* CONFIG_ZLIB */ static int64_t http_seek_internal(URLContext *h, int64_t off, int whence, int force_reconnect); static int http_read_stream(URLContext *h, uint8_t *buf, int size) { HTTPContext *s = h->priv_data; int err, new_location, read_ret; int64_t seek_ret; if (!s->hd) return AVERROR_EOF; if (s->end_chunked_post && !s->end_header) { err = http_read_header(h, &new_location); if (err < 0) return err; } if (s->chunksize >= 0) { if (!s->chunksize) { char line[32]; do { if ((err = http_get_line(s, line, sizeof(line))) < 0) return err; } while (!*line); /* skip CR LF from last chunk */ s->chunksize = strtoll(line, NULL, 16); av_log(NULL, AV_LOG_TRACE, "Chunked encoding data size: %"PRId64"'\n", s->chunksize); if (!s->chunksize) return 0; } size = FFMIN(size, s->chunksize); } #if CONFIG_ZLIB if (s->compressed) return http_buf_read_compressed(h, buf, size); #endif /* CONFIG_ZLIB */ read_ret = http_buf_read(h, buf, size); if ( (read_ret < 0 && s->reconnect && (!h->is_streamed || s->reconnect_streamed) && s->filesize > 0 && s->off < s->filesize) || (read_ret == 0 && s->reconnect_at_eof && (!h->is_streamed || s->reconnect_streamed))) { int64_t target = h->is_streamed ? 0 : s->off; if (s->reconnect_delay > s->reconnect_delay_max) return AVERROR(EIO); av_log(h, AV_LOG_INFO, "Will reconnect at %"PRId64" error=%s.\n", s->off, av_err2str(read_ret)); av_usleep(1000U*1000*s->reconnect_delay); s->reconnect_delay = 1 + 2*s->reconnect_delay; seek_ret = http_seek_internal(h, target, SEEK_SET, 1); if (seek_ret != target) { av_log(h, AV_LOG_ERROR, "Failed to reconnect at %"PRId64".\n", target); return read_ret; } read_ret = http_buf_read(h, buf, size); } else s->reconnect_delay = 0; return read_ret; } // Like http_read_stream(), but no short reads. // Assumes partial reads are an error. static int http_read_stream_all(URLContext *h, uint8_t *buf, int size) { int pos = 0; while (pos < size) { int len = http_read_stream(h, buf + pos, size - pos); if (len < 0) return len; pos += len; } return pos; } static void update_metadata(HTTPContext *s, char *data) { char *key; char *val; char *end; char *next = data; while (*next) { key = next; val = strstr(key, "='"); if (!val) break; end = strstr(val, "';"); if (!end) break; *val = '\0'; *end = '\0'; val += 2; av_dict_set(&s->metadata, key, val, 0); next = end + 2; } } static int store_icy(URLContext *h, int size) { HTTPContext *s = h->priv_data; /* until next metadata packet */ int remaining = s->icy_metaint - s->icy_data_read; if (remaining < 0) return AVERROR_INVALIDDATA; if (!remaining) { /* The metadata packet is variable sized. It has a 1 byte header * which sets the length of the packet (divided by 16). If it's 0, * the metadata doesn't change. After the packet, icy_metaint bytes * of normal data follows. */ uint8_t ch; int len = http_read_stream_all(h, &ch, 1); if (len < 0) return len; if (ch > 0) { char data[255 * 16 + 1]; int ret; len = ch * 16; ret = http_read_stream_all(h, data, len); if (ret < 0) return ret; data[len + 1] = 0; if ((ret = av_opt_set(s, "icy_metadata_packet", data, 0)) < 0) return ret; update_metadata(s, data); } s->icy_data_read = 0; remaining = s->icy_metaint; } return FFMIN(size, remaining); } static int http_read(URLContext *h, uint8_t *buf, int size) { HTTPContext *s = h->priv_data; if (s->icy_metaint > 0) { size = store_icy(h, size); if (size < 0) return size; } size = http_read_stream(h, buf, size); if (size > 0) s->icy_data_read += size; return size; } /* used only when posting data */ static int http_write(URLContext *h, const uint8_t *buf, int size) { char temp[11] = ""; /* 32-bit hex + CRLF + nul */ int ret; char crlf[] = "\r\n"; HTTPContext *s = h->priv_data; if (!s->chunked_post) { /* non-chunked data is sent without any special encoding */ return ffurl_write(s->hd, buf, size); } /* silently ignore zero-size data since chunk encoding that would * signal EOF */ if (size > 0) { /* upload data using chunked encoding */ snprintf(temp, sizeof(temp), "%x\r\n", size); if ((ret = ffurl_write(s->hd, temp, strlen(temp))) < 0 || (ret = ffurl_write(s->hd, buf, size)) < 0 || (ret = ffurl_write(s->hd, crlf, sizeof(crlf) - 1)) < 0) return ret; } return size; } static int http_shutdown(URLContext *h, int flags) { int ret = 0; char footer[] = "0\r\n\r\n"; HTTPContext *s = h->priv_data; /* signal end of chunked encoding if used */ if (((flags & AVIO_FLAG_WRITE) && s->chunked_post) || ((flags & AVIO_FLAG_READ) && s->chunked_post && s->listen)) { ret = ffurl_write(s->hd, footer, sizeof(footer) - 1); ret = ret > 0 ? 0 : ret; s->end_chunked_post = 1; } return ret; } static int http_close(URLContext *h) { int ret = 0; HTTPContext *s = h->priv_data; #if CONFIG_ZLIB inflateEnd(&s->inflate_stream); av_freep(&s->inflate_buffer); #endif /* CONFIG_ZLIB */ if (!s->end_chunked_post) /* Close the write direction by sending the end of chunked encoding. */ ret = http_shutdown(h, h->flags); if (s->hd) ffurl_closep(&s->hd); av_dict_free(&s->chained_options); return ret; } static int64_t http_seek_internal(URLContext *h, int64_t off, int whence, int force_reconnect) { HTTPContext *s = h->priv_data; URLContext *old_hd = s->hd; int64_t old_off = s->off; uint8_t old_buf[BUFFER_SIZE]; int old_buf_size, ret; AVDictionary *options = NULL; if (whence == AVSEEK_SIZE) return s->filesize; else if (!force_reconnect && ((whence == SEEK_CUR && off == 0) || (whence == SEEK_SET && off == s->off))) return s->off; else if ((s->filesize == -1 && whence == SEEK_END)) return AVERROR(ENOSYS); if (whence == SEEK_CUR) off += s->off; else if (whence == SEEK_END) off += s->filesize; else if (whence != SEEK_SET) return AVERROR(EINVAL); if (off < 0) return AVERROR(EINVAL); s->off = off; if (s->off && h->is_streamed) return AVERROR(ENOSYS); /* we save the old context in case the seek fails */ old_buf_size = s->buf_end - s->buf_ptr; memcpy(old_buf, s->buf_ptr, old_buf_size); s->hd = NULL; /* if it fails, continue on old connection */ if ((ret = http_open_cnx(h, &options)) < 0) { av_dict_free(&options); memcpy(s->buffer, old_buf, old_buf_size); s->buf_ptr = s->buffer; s->buf_end = s->buffer + old_buf_size; s->hd = old_hd; s->off = old_off; return ret; } av_dict_free(&options); ffurl_close(old_hd); return off; } static int64_t http_seek(URLContext *h, int64_t off, int whence) { return http_seek_internal(h, off, whence, 0); } static int http_get_file_handle(URLContext *h) { HTTPContext *s = h->priv_data; return ffurl_get_file_handle(s->hd); } #define HTTP_CLASS(flavor) \ static const AVClass flavor ## _context_class = { \ .class_name = # flavor, \ .item_name = av_default_item_name, \ .option = options, \ .version = LIBAVUTIL_VERSION_INT, \ } #if CONFIG_HTTP_PROTOCOL HTTP_CLASS(http); const URLProtocol ff_http_protocol = { .name = "http", .url_open2 = http_open, .url_accept = http_accept, .url_handshake = http_handshake, .url_read = http_read, .url_write = http_write, .url_seek = http_seek, .url_close = http_close, .url_get_file_handle = http_get_file_handle, .url_shutdown = http_shutdown, .priv_data_size = sizeof(HTTPContext), .priv_data_class = &http_context_class, .flags = URL_PROTOCOL_FLAG_NETWORK, .default_whitelist = "http,https,tls,rtp,tcp,udp,crypto,httpproxy" }; #endif /* CONFIG_HTTP_PROTOCOL */ #if CONFIG_HTTPS_PROTOCOL HTTP_CLASS(https); const URLProtocol ff_https_protocol = { .name = "https", .url_open2 = http_open, .url_read = http_read, .url_write = http_write, .url_seek = http_seek, .url_close = http_close, .url_get_file_handle = http_get_file_handle, .url_shutdown = http_shutdown, .priv_data_size = sizeof(HTTPContext), .priv_data_class = &https_context_class, .flags = URL_PROTOCOL_FLAG_NETWORK, .default_whitelist = "http,https,tls,rtp,tcp,udp,crypto,httpproxy" }; #endif /* CONFIG_HTTPS_PROTOCOL */ #if CONFIG_HTTPPROXY_PROTOCOL static int http_proxy_close(URLContext *h) { HTTPContext *s = h->priv_data; if (s->hd) ffurl_closep(&s->hd); return 0; } static int http_proxy_open(URLContext *h, const char *uri, int flags) { HTTPContext *s = h->priv_data; char hostname[1024], hoststr[1024]; char auth[1024], pathbuf[1024], *path; char lower_url[100]; int port, ret = 0, attempts = 0; HTTPAuthType cur_auth_type; char *authstr; int new_loc; if( s->seekable == 1 ) h->is_streamed = 0; else h->is_streamed = 1; av_url_split(NULL, 0, auth, sizeof(auth), hostname, sizeof(hostname), &port, pathbuf, sizeof(pathbuf), uri); ff_url_join(hoststr, sizeof(hoststr), NULL, NULL, hostname, port, NULL); path = pathbuf; if (*path == '/') path++; ff_url_join(lower_url, sizeof(lower_url), "tcp", NULL, hostname, port, NULL); redo: ret = ffurl_open_whitelist(&s->hd, lower_url, AVIO_FLAG_READ_WRITE, &h->interrupt_callback, NULL, h->protocol_whitelist, h->protocol_blacklist, h); if (ret < 0) return ret; authstr = ff_http_auth_create_response(&s->proxy_auth_state, auth, path, "CONNECT"); snprintf(s->buffer, sizeof(s->buffer), "CONNECT %s HTTP/1.1\r\n" "Host: %s\r\n" "Connection: close\r\n" "%s%s" "\r\n", path, hoststr, authstr ? "Proxy-" : "", authstr ? authstr : ""); av_freep(&authstr); if ((ret = ffurl_write(s->hd, s->buffer, strlen(s->buffer))) < 0) goto fail; s->buf_ptr = s->buffer; s->buf_end = s->buffer; s->line_count = 0; s->filesize = -1; cur_auth_type = s->proxy_auth_state.auth_type; /* Note: This uses buffering, potentially reading more than the * HTTP header. If tunneling a protocol where the server starts * the conversation, we might buffer part of that here, too. * Reading that requires using the proper ffurl_read() function * on this URLContext, not using the fd directly (as the tls * protocol does). This shouldn't be an issue for tls though, * since the client starts the conversation there, so there * is no extra data that we might buffer up here. */ ret = http_read_header(h, &new_loc); if (ret < 0) goto fail; attempts++; if (s->http_code == 407 && (cur_auth_type == HTTP_AUTH_NONE || s->proxy_auth_state.stale) && s->proxy_auth_state.auth_type != HTTP_AUTH_NONE && attempts < 2) { ffurl_closep(&s->hd); goto redo; } if (s->http_code < 400) return 0; ret = ff_http_averror(s->http_code, AVERROR(EIO)); fail: http_proxy_close(h); return ret; } static int http_proxy_write(URLContext *h, const uint8_t *buf, int size) { HTTPContext *s = h->priv_data; return ffurl_write(s->hd, buf, size); } const URLProtocol ff_httpproxy_protocol = { .name = "httpproxy", .url_open = http_proxy_open, .url_read = http_buf_read, .url_write = http_proxy_write, .url_close = http_proxy_close, .url_get_file_handle = http_get_file_handle, .priv_data_size = sizeof(HTTPContext), .flags = URL_PROTOCOL_FLAG_NETWORK, }; #endif /* CONFIG_HTTPPROXY_PROTOCOL */
/* * HTTP protocol for ffmpeg client * Copyright (c) 2000, 2001 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "config.h" #if CONFIG_ZLIB #include <zlib.h> #endif /* CONFIG_ZLIB */ #include "libavutil/avassert.h" #include "libavutil/avstring.h" #include "libavutil/opt.h" #include "libavutil/time.h" #include "avformat.h" #include "http.h" #include "httpauth.h" #include "internal.h" #include "network.h" #include "os_support.h" #include "url.h" /* XXX: POST protocol is not completely implemented because ffmpeg uses * only a subset of it. */ /* The IO buffer size is unrelated to the max URL size in itself, but needs * to be large enough to fit the full request headers (including long * path names). */ #define BUFFER_SIZE MAX_URL_SIZE #define MAX_REDIRECTS 8 #define HTTP_SINGLE 1 #define HTTP_MUTLI 2 typedef enum { LOWER_PROTO, READ_HEADERS, WRITE_REPLY_HEADERS, FINISH }HandshakeState; typedef struct HTTPContext { const AVClass *class; URLContext *hd; unsigned char buffer[BUFFER_SIZE], *buf_ptr, *buf_end; int line_count; int http_code; /* Used if "Transfer-Encoding: chunked" otherwise -1. */ uint64_t chunksize; uint64_t off, end_off, filesize; char *location; HTTPAuthState auth_state; HTTPAuthState proxy_auth_state; char *http_proxy; char *headers; char *mime_type; char *user_agent; #if FF_API_HTTP_USER_AGENT char *user_agent_deprecated; #endif char *content_type; /* Set if the server correctly handles Connection: close and will close * the connection after feeding us the content. */ int willclose; int seekable; /**< Control seekability, 0 = disable, 1 = enable, -1 = probe. */ int chunked_post; /* A flag which indicates if the end of chunked encoding has been sent. */ int end_chunked_post; /* A flag which indicates we have finished to read POST reply. */ int end_header; /* A flag which indicates if we use persistent connections. */ int multiple_requests; uint8_t *post_data; int post_datalen; int is_akamai; int is_mediagateway; char *cookies; ///< holds newline (\n) delimited Set-Cookie header field values (without the "Set-Cookie: " field name) /* A dictionary containing cookies keyed by cookie name */ AVDictionary *cookie_dict; int icy; /* how much data was read since the last ICY metadata packet */ uint64_t icy_data_read; /* after how many bytes of read data a new metadata packet will be found */ uint64_t icy_metaint; char *icy_metadata_headers; char *icy_metadata_packet; AVDictionary *metadata; #if CONFIG_ZLIB int compressed; z_stream inflate_stream; uint8_t *inflate_buffer; #endif /* CONFIG_ZLIB */ AVDictionary *chained_options; int send_expect_100; char *method; int reconnect; int reconnect_at_eof; int reconnect_streamed; int reconnect_delay; int reconnect_delay_max; int listen; char *resource; int reply_code; int is_multi_client; HandshakeState handshake_step; int is_connected_server; } HTTPContext; #define OFFSET(x) offsetof(HTTPContext, x) #define D AV_OPT_FLAG_DECODING_PARAM #define E AV_OPT_FLAG_ENCODING_PARAM #define DEFAULT_USER_AGENT "Lavf/" AV_STRINGIFY(LIBAVFORMAT_VERSION) static const AVOption options[] = { { "seekable", "control seekability of connection", OFFSET(seekable), AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, D }, { "chunked_post", "use chunked transfer-encoding for posts", OFFSET(chunked_post), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, E }, { "http_proxy", "set HTTP proxy to tunnel through", OFFSET(http_proxy), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, D | E }, { "headers", "set custom HTTP headers, can override built in default headers", OFFSET(headers), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, D | E }, { "content_type", "set a specific content type for the POST messages", OFFSET(content_type), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, D | E }, { "user_agent", "override User-Agent header", OFFSET(user_agent), AV_OPT_TYPE_STRING, { .str = DEFAULT_USER_AGENT }, 0, 0, D }, #if FF_API_HTTP_USER_AGENT { "user-agent", "override User-Agent header", OFFSET(user_agent_deprecated), AV_OPT_TYPE_STRING, { .str = DEFAULT_USER_AGENT }, 0, 0, D }, #endif { "multiple_requests", "use persistent connections", OFFSET(multiple_requests), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, D | E }, { "post_data", "set custom HTTP post data", OFFSET(post_data), AV_OPT_TYPE_BINARY, .flags = D | E }, { "mime_type", "export the MIME type", OFFSET(mime_type), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, AV_OPT_FLAG_EXPORT | AV_OPT_FLAG_READONLY }, { "cookies", "set cookies to be sent in applicable future requests, use newline delimited Set-Cookie HTTP field value syntax", OFFSET(cookies), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, D }, { "icy", "request ICY metadata", OFFSET(icy), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, D }, { "icy_metadata_headers", "return ICY metadata headers", OFFSET(icy_metadata_headers), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, AV_OPT_FLAG_EXPORT }, { "icy_metadata_packet", "return current ICY metadata packet", OFFSET(icy_metadata_packet), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, AV_OPT_FLAG_EXPORT }, { "metadata", "metadata read from the bitstream", OFFSET(metadata), AV_OPT_TYPE_DICT, {0}, 0, 0, AV_OPT_FLAG_EXPORT }, { "auth_type", "HTTP authentication type", OFFSET(auth_state.auth_type), AV_OPT_TYPE_INT, { .i64 = HTTP_AUTH_NONE }, HTTP_AUTH_NONE, HTTP_AUTH_BASIC, D | E, "auth_type"}, { "none", "No auth method set, autodetect", 0, AV_OPT_TYPE_CONST, { .i64 = HTTP_AUTH_NONE }, 0, 0, D | E, "auth_type"}, { "basic", "HTTP basic authentication", 0, AV_OPT_TYPE_CONST, { .i64 = HTTP_AUTH_BASIC }, 0, 0, D | E, "auth_type"}, { "send_expect_100", "Force sending an Expect: 100-continue header for POST", OFFSET(send_expect_100), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E }, { "location", "The actual location of the data received", OFFSET(location), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, D | E }, { "offset", "initial byte offset", OFFSET(off), AV_OPT_TYPE_INT64, { .i64 = 0 }, 0, INT64_MAX, D }, { "end_offset", "try to limit the request to bytes preceding this offset", OFFSET(end_off), AV_OPT_TYPE_INT64, { .i64 = 0 }, 0, INT64_MAX, D }, { "method", "Override the HTTP method or set the expected HTTP method from a client", OFFSET(method), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, D | E }, { "reconnect", "auto reconnect after disconnect before EOF", OFFSET(reconnect), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, D }, { "reconnect_at_eof", "auto reconnect at EOF", OFFSET(reconnect_at_eof), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, D }, { "reconnect_streamed", "auto reconnect streamed / non seekable streams", OFFSET(reconnect_streamed), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, D }, { "reconnect_delay_max", "max reconnect delay in seconds after which to give up", OFFSET(reconnect_delay_max), AV_OPT_TYPE_INT, { .i64 = 120 }, 0, UINT_MAX/1000/1000, D }, { "listen", "listen on HTTP", OFFSET(listen), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 2, D | E }, { "resource", "The resource requested by a client", OFFSET(resource), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, E }, { "reply_code", "The http status code to return to a client", OFFSET(reply_code), AV_OPT_TYPE_INT, { .i64 = 200}, INT_MIN, 599, E}, { NULL } }; static int http_connect(URLContext *h, const char *path, const char *local_path, const char *hoststr, const char *auth, const char *proxyauth, int *new_location); static int http_read_header(URLContext *h, int *new_location); void ff_http_init_auth_state(URLContext *dest, const URLContext *src) { memcpy(&((HTTPContext *)dest->priv_data)->auth_state, &((HTTPContext *)src->priv_data)->auth_state, sizeof(HTTPAuthState)); memcpy(&((HTTPContext *)dest->priv_data)->proxy_auth_state, &((HTTPContext *)src->priv_data)->proxy_auth_state, sizeof(HTTPAuthState)); } static int http_open_cnx_internal(URLContext *h, AVDictionary **options) { const char *path, *proxy_path, *lower_proto = "tcp", *local_path; char hostname[1024], hoststr[1024], proto[10]; char auth[1024], proxyauth[1024] = ""; char path1[MAX_URL_SIZE]; char buf[1024], urlbuf[MAX_URL_SIZE]; int port, use_proxy, err, location_changed = 0; HTTPContext *s = h->priv_data; av_url_split(proto, sizeof(proto), auth, sizeof(auth), hostname, sizeof(hostname), &port, path1, sizeof(path1), s->location); ff_url_join(hoststr, sizeof(hoststr), NULL, NULL, hostname, port, NULL); proxy_path = s->http_proxy ? s->http_proxy : getenv("http_proxy"); use_proxy = !ff_http_match_no_proxy(getenv("no_proxy"), hostname) && proxy_path && av_strstart(proxy_path, "http://", NULL); if (!strcmp(proto, "https")) { lower_proto = "tls"; use_proxy = 0; if (port < 0) port = 443; } if (port < 0) port = 80; if (path1[0] == '\0') path = "/"; else path = path1; local_path = path; if (use_proxy) { /* Reassemble the request URL without auth string - we don't * want to leak the auth to the proxy. */ ff_url_join(urlbuf, sizeof(urlbuf), proto, NULL, hostname, port, "%s", path1); path = urlbuf; av_url_split(NULL, 0, proxyauth, sizeof(proxyauth), hostname, sizeof(hostname), &port, NULL, 0, proxy_path); } ff_url_join(buf, sizeof(buf), lower_proto, NULL, hostname, port, NULL); if (!s->hd) { err = ffurl_open_whitelist(&s->hd, buf, AVIO_FLAG_READ_WRITE, &h->interrupt_callback, options, h->protocol_whitelist, h->protocol_blacklist, h); if (err < 0) return err; } err = http_connect(h, path, local_path, hoststr, auth, proxyauth, &location_changed); if (err < 0) return err; return location_changed; } /* return non zero if error */ static int http_open_cnx(URLContext *h, AVDictionary **options) { HTTPAuthType cur_auth_type, cur_proxy_auth_type; HTTPContext *s = h->priv_data; int location_changed, attempts = 0, redirects = 0; redo: av_dict_copy(options, s->chained_options, 0); cur_auth_type = s->auth_state.auth_type; cur_proxy_auth_type = s->auth_state.auth_type; location_changed = http_open_cnx_internal(h, options); if (location_changed < 0) goto fail; attempts++; if (s->http_code == 401) { if ((cur_auth_type == HTTP_AUTH_NONE || s->auth_state.stale) && s->auth_state.auth_type != HTTP_AUTH_NONE && attempts < 4) { ffurl_closep(&s->hd); goto redo; } else goto fail; } if (s->http_code == 407) { if ((cur_proxy_auth_type == HTTP_AUTH_NONE || s->proxy_auth_state.stale) && s->proxy_auth_state.auth_type != HTTP_AUTH_NONE && attempts < 4) { ffurl_closep(&s->hd); goto redo; } else goto fail; } if ((s->http_code == 301 || s->http_code == 302 || s->http_code == 303 || s->http_code == 307) && location_changed == 1) { /* url moved, get next */ ffurl_closep(&s->hd); if (redirects++ >= MAX_REDIRECTS) return AVERROR(EIO); /* Restart the authentication process with the new target, which * might use a different auth mechanism. */ memset(&s->auth_state, 0, sizeof(s->auth_state)); attempts = 0; location_changed = 0; goto redo; } return 0; fail: if (s->hd) ffurl_closep(&s->hd); if (location_changed < 0) return location_changed; return ff_http_averror(s->http_code, AVERROR(EIO)); } int ff_http_do_new_request(URLContext *h, const char *uri) { HTTPContext *s = h->priv_data; AVDictionary *options = NULL; int ret; s->off = 0; s->icy_data_read = 0; av_free(s->location); s->location = av_strdup(uri); if (!s->location) return AVERROR(ENOMEM); ret = http_open_cnx(h, &options); av_dict_free(&options); return ret; } int ff_http_averror(int status_code, int default_averror) { switch (status_code) { case 400: return AVERROR_HTTP_BAD_REQUEST; case 401: return AVERROR_HTTP_UNAUTHORIZED; case 403: return AVERROR_HTTP_FORBIDDEN; case 404: return AVERROR_HTTP_NOT_FOUND; default: break; } if (status_code >= 400 && status_code <= 499) return AVERROR_HTTP_OTHER_4XX; else if (status_code >= 500) return AVERROR_HTTP_SERVER_ERROR; else return default_averror; } static int http_write_reply(URLContext* h, int status_code) { int ret, body = 0, reply_code, message_len; const char *reply_text, *content_type; HTTPContext *s = h->priv_data; char message[BUFFER_SIZE]; content_type = "text/plain"; if (status_code < 0) body = 1; switch (status_code) { case AVERROR_HTTP_BAD_REQUEST: case 400: reply_code = 400; reply_text = "Bad Request"; break; case AVERROR_HTTP_FORBIDDEN: case 403: reply_code = 403; reply_text = "Forbidden"; break; case AVERROR_HTTP_NOT_FOUND: case 404: reply_code = 404; reply_text = "Not Found"; break; case 200: reply_code = 200; reply_text = "OK"; content_type = s->content_type ? s->content_type : "application/octet-stream"; break; case AVERROR_HTTP_SERVER_ERROR: case 500: reply_code = 500; reply_text = "Internal server error"; break; default: return AVERROR(EINVAL); } if (body) { s->chunked_post = 0; message_len = snprintf(message, sizeof(message), "HTTP/1.1 %03d %s\r\n" "Content-Type: %s\r\n" "Content-Length: %"SIZE_SPECIFIER"\r\n" "%s" "\r\n" "%03d %s\r\n", reply_code, reply_text, content_type, strlen(reply_text) + 6, // 3 digit status code + space + \r\n s->headers ? s->headers : "", reply_code, reply_text); } else { s->chunked_post = 1; message_len = snprintf(message, sizeof(message), "HTTP/1.1 %03d %s\r\n" "Content-Type: %s\r\n" "Transfer-Encoding: chunked\r\n" "%s" "\r\n", reply_code, reply_text, content_type, s->headers ? s->headers : ""); } av_log(h, AV_LOG_TRACE, "HTTP reply header: \n%s----\n", message); if ((ret = ffurl_write(s->hd, message, message_len)) < 0) return ret; return 0; } static void handle_http_errors(URLContext *h, int error) { av_assert0(error < 0); http_write_reply(h, error); } static int http_handshake(URLContext *c) { int ret, err, new_location; HTTPContext *ch = c->priv_data; URLContext *cl = ch->hd; switch (ch->handshake_step) { case LOWER_PROTO: av_log(c, AV_LOG_TRACE, "Lower protocol\n"); if ((ret = ffurl_handshake(cl)) > 0) return 2 + ret; if (ret < 0) return ret; ch->handshake_step = READ_HEADERS; ch->is_connected_server = 1; return 2; case READ_HEADERS: av_log(c, AV_LOG_TRACE, "Read headers\n"); if ((err = http_read_header(c, &new_location)) < 0) { handle_http_errors(c, err); return err; } ch->handshake_step = WRITE_REPLY_HEADERS; return 1; case WRITE_REPLY_HEADERS: av_log(c, AV_LOG_TRACE, "Reply code: %d\n", ch->reply_code); if ((err = http_write_reply(c, ch->reply_code)) < 0) return err; ch->handshake_step = FINISH; return 1; case FINISH: return 0; } // this should never be reached. return AVERROR(EINVAL); } static int http_listen(URLContext *h, const char *uri, int flags, AVDictionary **options) { HTTPContext *s = h->priv_data; int ret; char hostname[1024], proto[10]; char lower_url[100]; const char *lower_proto = "tcp"; int port; av_url_split(proto, sizeof(proto), NULL, 0, hostname, sizeof(hostname), &port, NULL, 0, uri); if (!strcmp(proto, "https")) lower_proto = "tls"; ff_url_join(lower_url, sizeof(lower_url), lower_proto, NULL, hostname, port, NULL); if ((ret = av_dict_set_int(options, "listen", s->listen, 0)) < 0) goto fail; if ((ret = ffurl_open_whitelist(&s->hd, lower_url, AVIO_FLAG_READ_WRITE, &h->interrupt_callback, options, h->protocol_whitelist, h->protocol_blacklist, h )) < 0) goto fail; s->handshake_step = LOWER_PROTO; if (s->listen == HTTP_SINGLE) { /* single client */ s->reply_code = 200; while ((ret = http_handshake(h)) > 0); } fail: av_dict_free(&s->chained_options); return ret; } static int http_open(URLContext *h, const char *uri, int flags, AVDictionary **options) { HTTPContext *s = h->priv_data; int ret; if( s->seekable == 1 ) h->is_streamed = 0; else h->is_streamed = 1; s->filesize = UINT64_MAX; s->location = av_strdup(uri); if (!s->location) return AVERROR(ENOMEM); if (options) av_dict_copy(&s->chained_options, *options, 0); if (s->headers) { int len = strlen(s->headers); if (len < 2 || strcmp("\r\n", s->headers + len - 2)) { av_log(h, AV_LOG_WARNING, "No trailing CRLF found in HTTP header.\n"); ret = av_reallocp(&s->headers, len + 3); if (ret < 0) return ret; s->headers[len] = '\r'; s->headers[len + 1] = '\n'; s->headers[len + 2] = '\0'; } } if (s->listen) { return http_listen(h, uri, flags, options); } ret = http_open_cnx(h, options); if (ret < 0) av_dict_free(&s->chained_options); return ret; } static int http_accept(URLContext *s, URLContext **c) { int ret; HTTPContext *sc = s->priv_data; HTTPContext *cc; URLContext *sl = sc->hd; URLContext *cl = NULL; av_assert0(sc->listen); if ((ret = ffurl_alloc(c, s->filename, s->flags, &sl->interrupt_callback)) < 0) goto fail; cc = (*c)->priv_data; if ((ret = ffurl_accept(sl, &cl)) < 0) goto fail; cc->hd = cl; cc->is_multi_client = 1; fail: return ret; } static int http_getc(HTTPContext *s) { int len; if (s->buf_ptr >= s->buf_end) { len = ffurl_read(s->hd, s->buffer, BUFFER_SIZE); if (len < 0) { return len; } else if (len == 0) { return AVERROR_EOF; } else { s->buf_ptr = s->buffer; s->buf_end = s->buffer + len; } } return *s->buf_ptr++; } static int http_get_line(HTTPContext *s, char *line, int line_size) { int ch; char *q; q = line; for (;;) { ch = http_getc(s); if (ch < 0) return ch; if (ch == '\n') { /* process line */ if (q > line && q[-1] == '\r') q--; *q = '\0'; return 0; } else { if ((q - line) < line_size - 1) *q++ = ch; } } } static int check_http_code(URLContext *h, int http_code, const char *end) { HTTPContext *s = h->priv_data; /* error codes are 4xx and 5xx, but regard 401 as a success, so we * don't abort until all headers have been parsed. */ if (http_code >= 400 && http_code < 600 && (http_code != 401 || s->auth_state.auth_type != HTTP_AUTH_NONE) && (http_code != 407 || s->proxy_auth_state.auth_type != HTTP_AUTH_NONE)) { end += strspn(end, SPACE_CHARS); av_log(h, AV_LOG_WARNING, "HTTP error %d %s\n", http_code, end); return ff_http_averror(http_code, AVERROR(EIO)); } return 0; } static int parse_location(HTTPContext *s, const char *p) { char redirected_location[MAX_URL_SIZE], *new_loc; ff_make_absolute_url(redirected_location, sizeof(redirected_location), s->location, p); new_loc = av_strdup(redirected_location); if (!new_loc) return AVERROR(ENOMEM); av_free(s->location); s->location = new_loc; return 0; } /* "bytes $from-$to/$document_size" */ static void parse_content_range(URLContext *h, const char *p) { HTTPContext *s = h->priv_data; const char *slash; if (!strncmp(p, "bytes ", 6)) { p += 6; s->off = strtoull(p, NULL, 10); if ((slash = strchr(p, '/')) && strlen(slash) > 0) s->filesize = strtoull(slash + 1, NULL, 10); } if (s->seekable == -1 && (!s->is_akamai || s->filesize != 2147483647)) h->is_streamed = 0; /* we _can_ in fact seek */ } static int parse_content_encoding(URLContext *h, const char *p) { if (!av_strncasecmp(p, "gzip", 4) || !av_strncasecmp(p, "deflate", 7)) { #if CONFIG_ZLIB HTTPContext *s = h->priv_data; s->compressed = 1; inflateEnd(&s->inflate_stream); if (inflateInit2(&s->inflate_stream, 32 + 15) != Z_OK) { av_log(h, AV_LOG_WARNING, "Error during zlib initialisation: %s\n", s->inflate_stream.msg); return AVERROR(ENOSYS); } if (zlibCompileFlags() & (1 << 17)) { av_log(h, AV_LOG_WARNING, "Your zlib was compiled without gzip support.\n"); return AVERROR(ENOSYS); } #else av_log(h, AV_LOG_WARNING, "Compressed (%s) content, need zlib with gzip support\n", p); return AVERROR(ENOSYS); #endif /* CONFIG_ZLIB */ } else if (!av_strncasecmp(p, "identity", 8)) { // The normal, no-encoding case (although servers shouldn't include // the header at all if this is the case). } else { av_log(h, AV_LOG_WARNING, "Unknown content coding: %s\n", p); } return 0; } // Concat all Icy- header lines static int parse_icy(HTTPContext *s, const char *tag, const char *p) { int len = 4 + strlen(p) + strlen(tag); int is_first = !s->icy_metadata_headers; int ret; av_dict_set(&s->metadata, tag, p, 0); if (s->icy_metadata_headers) len += strlen(s->icy_metadata_headers); if ((ret = av_reallocp(&s->icy_metadata_headers, len)) < 0) return ret; if (is_first) *s->icy_metadata_headers = '\0'; av_strlcatf(s->icy_metadata_headers, len, "%s: %s\n", tag, p); return 0; } static int parse_cookie(HTTPContext *s, const char *p, AVDictionary **cookies) { char *eql, *name; // duplicate the cookie name (dict will dupe the value) if (!(eql = strchr(p, '='))) return AVERROR(EINVAL); if (!(name = av_strndup(p, eql - p))) return AVERROR(ENOMEM); // add the cookie to the dictionary av_dict_set(cookies, name, eql, AV_DICT_DONT_STRDUP_KEY); return 0; } static int cookie_string(AVDictionary *dict, char **cookies) { AVDictionaryEntry *e = NULL; int len = 1; // determine how much memory is needed for the cookies string while (e = av_dict_get(dict, "", e, AV_DICT_IGNORE_SUFFIX)) len += strlen(e->key) + strlen(e->value) + 1; // reallocate the cookies e = NULL; if (*cookies) av_free(*cookies); *cookies = av_malloc(len); if (!*cookies) return AVERROR(ENOMEM); *cookies[0] = '\0'; // write out the cookies while (e = av_dict_get(dict, "", e, AV_DICT_IGNORE_SUFFIX)) av_strlcatf(*cookies, len, "%s%s\n", e->key, e->value); return 0; } static int process_line(URLContext *h, char *line, int line_count, int *new_location) { HTTPContext *s = h->priv_data; const char *auto_method = h->flags & AVIO_FLAG_READ ? "POST" : "GET"; char *tag, *p, *end, *method, *resource, *version; int ret; /* end of header */ if (line[0] == '\0') { s->end_header = 1; return 0; } p = line; if (line_count == 0) { if (s->is_connected_server) { // HTTP method method = p; while (*p && !av_isspace(*p)) p++; *(p++) = '\0'; av_log(h, AV_LOG_TRACE, "Received method: %s\n", method); if (s->method) { if (av_strcasecmp(s->method, method)) { av_log(h, AV_LOG_ERROR, "Received and expected HTTP method do not match. (%s expected, %s received)\n", s->method, method); return ff_http_averror(400, AVERROR(EIO)); } } else { // use autodetected HTTP method to expect av_log(h, AV_LOG_TRACE, "Autodetected %s HTTP method\n", auto_method); if (av_strcasecmp(auto_method, method)) { av_log(h, AV_LOG_ERROR, "Received and autodetected HTTP method did not match " "(%s autodetected %s received)\n", auto_method, method); return ff_http_averror(400, AVERROR(EIO)); } if (!(s->method = av_strdup(method))) return AVERROR(ENOMEM); } // HTTP resource while (av_isspace(*p)) p++; resource = p; while (!av_isspace(*p)) p++; *(p++) = '\0'; av_log(h, AV_LOG_TRACE, "Requested resource: %s\n", resource); if (!(s->resource = av_strdup(resource))) return AVERROR(ENOMEM); // HTTP version while (av_isspace(*p)) p++; version = p; while (*p && !av_isspace(*p)) p++; *p = '\0'; if (av_strncasecmp(version, "HTTP/", 5)) { av_log(h, AV_LOG_ERROR, "Malformed HTTP version string.\n"); return ff_http_averror(400, AVERROR(EIO)); } av_log(h, AV_LOG_TRACE, "HTTP version string: %s\n", version); } else { while (!av_isspace(*p) && *p != '\0') p++; while (av_isspace(*p)) p++; s->http_code = strtol(p, &end, 10); av_log(h, AV_LOG_TRACE, "http_code=%d\n", s->http_code); if ((ret = check_http_code(h, s->http_code, end)) < 0) return ret; } } else { while (*p != '\0' && *p != ':') p++; if (*p != ':') return 1; *p = '\0'; tag = line; p++; while (av_isspace(*p)) p++; if (!av_strcasecmp(tag, "Location")) { if ((ret = parse_location(s, p)) < 0) return ret; *new_location = 1; } else if (!av_strcasecmp(tag, "Content-Length") && s->filesize == UINT64_MAX) { s->filesize = strtoull(p, NULL, 10); } else if (!av_strcasecmp(tag, "Content-Range")) { parse_content_range(h, p); } else if (!av_strcasecmp(tag, "Accept-Ranges") && !strncmp(p, "bytes", 5) && s->seekable == -1) { h->is_streamed = 0; } else if (!av_strcasecmp(tag, "Transfer-Encoding") && !av_strncasecmp(p, "chunked", 7)) { s->filesize = UINT64_MAX; s->chunksize = 0; } else if (!av_strcasecmp(tag, "WWW-Authenticate")) { ff_http_auth_handle_header(&s->auth_state, tag, p); } else if (!av_strcasecmp(tag, "Authentication-Info")) { ff_http_auth_handle_header(&s->auth_state, tag, p); } else if (!av_strcasecmp(tag, "Proxy-Authenticate")) { ff_http_auth_handle_header(&s->proxy_auth_state, tag, p); } else if (!av_strcasecmp(tag, "Connection")) { if (!strcmp(p, "close")) s->willclose = 1; } else if (!av_strcasecmp(tag, "Server")) { if (!av_strcasecmp(p, "AkamaiGHost")) { s->is_akamai = 1; } else if (!av_strncasecmp(p, "MediaGateway", 12)) { s->is_mediagateway = 1; } } else if (!av_strcasecmp(tag, "Content-Type")) { av_free(s->mime_type); s->mime_type = av_strdup(p); } else if (!av_strcasecmp(tag, "Set-Cookie")) { if (parse_cookie(s, p, &s->cookie_dict)) av_log(h, AV_LOG_WARNING, "Unable to parse '%s'\n", p); } else if (!av_strcasecmp(tag, "Icy-MetaInt")) { s->icy_metaint = strtoull(p, NULL, 10); } else if (!av_strncasecmp(tag, "Icy-", 4)) { if ((ret = parse_icy(s, tag, p)) < 0) return ret; } else if (!av_strcasecmp(tag, "Content-Encoding")) { if ((ret = parse_content_encoding(h, p)) < 0) return ret; } } return 1; } /** * Create a string containing cookie values for use as a HTTP cookie header * field value for a particular path and domain from the cookie values stored in * the HTTP protocol context. The cookie string is stored in *cookies. * * @return a negative value if an error condition occurred, 0 otherwise */ static int get_cookies(HTTPContext *s, char **cookies, const char *path, const char *domain) { // cookie strings will look like Set-Cookie header field values. Multiple // Set-Cookie fields will result in multiple values delimited by a newline int ret = 0; char *next, *cookie, *set_cookies = av_strdup(s->cookies), *cset_cookies = set_cookies; if (!set_cookies) return AVERROR(EINVAL); // destroy any cookies in the dictionary. av_dict_free(&s->cookie_dict); *cookies = NULL; while ((cookie = av_strtok(set_cookies, "\n", &next))) { int domain_offset = 0; char *param, *next_param, *cdomain = NULL, *cpath = NULL, *cvalue = NULL; set_cookies = NULL; // store the cookie in a dict in case it is updated in the response if (parse_cookie(s, cookie, &s->cookie_dict)) av_log(s, AV_LOG_WARNING, "Unable to parse '%s'\n", cookie); while ((param = av_strtok(cookie, "; ", &next_param))) { if (cookie) { // first key-value pair is the actual cookie value cvalue = av_strdup(param); cookie = NULL; } else if (!av_strncasecmp("path=", param, 5)) { av_free(cpath); cpath = av_strdup(&param[5]); } else if (!av_strncasecmp("domain=", param, 7)) { // if the cookie specifies a sub-domain, skip the leading dot thereby // supporting URLs that point to sub-domains and the master domain int leading_dot = (param[7] == '.'); av_free(cdomain); cdomain = av_strdup(&param[7+leading_dot]); } else { // ignore unknown attributes } } if (!cdomain) cdomain = av_strdup(domain); // ensure all of the necessary values are valid if (!cdomain || !cpath || !cvalue) { av_log(s, AV_LOG_WARNING, "Invalid cookie found, no value, path or domain specified\n"); goto done_cookie; } // check if the request path matches the cookie path if (av_strncasecmp(path, cpath, strlen(cpath))) goto done_cookie; // the domain should be at least the size of our cookie domain domain_offset = strlen(domain) - strlen(cdomain); if (domain_offset < 0) goto done_cookie; // match the cookie domain if (av_strcasecmp(&domain[domain_offset], cdomain)) goto done_cookie; // cookie parameters match, so copy the value if (!*cookies) { if (!(*cookies = av_strdup(cvalue))) { ret = AVERROR(ENOMEM); goto done_cookie; } } else { char *tmp = *cookies; size_t str_size = strlen(cvalue) + strlen(*cookies) + 3; if (!(*cookies = av_malloc(str_size))) { ret = AVERROR(ENOMEM); goto done_cookie; } snprintf(*cookies, str_size, "%s; %s", tmp, cvalue); av_free(tmp); } done_cookie: av_freep(&cdomain); av_freep(&cpath); av_freep(&cvalue); if (ret < 0) { if (*cookies) av_freep(cookies); av_free(cset_cookies); return ret; } } av_free(cset_cookies); return 0; } static inline int has_header(const char *str, const char *header) { /* header + 2 to skip over CRLF prefix. (make sure you have one!) */ if (!str) return 0; return av_stristart(str, header + 2, NULL) || av_stristr(str, header); } static int http_read_header(URLContext *h, int *new_location) { HTTPContext *s = h->priv_data; char line[MAX_URL_SIZE]; int err = 0; s->chunksize = UINT64_MAX; for (;;) { if ((err = http_get_line(s, line, sizeof(line))) < 0) return err; av_log(h, AV_LOG_TRACE, "header='%s'\n", line); err = process_line(h, line, s->line_count, new_location); if (err < 0) return err; if (err == 0) break; s->line_count++; } if (s->seekable == -1 && s->is_mediagateway && s->filesize == 2000000000) h->is_streamed = 1; /* we can in fact _not_ seek */ // add any new cookies into the existing cookie string cookie_string(s->cookie_dict, &s->cookies); av_dict_free(&s->cookie_dict); return err; } static int http_connect(URLContext *h, const char *path, const char *local_path, const char *hoststr, const char *auth, const char *proxyauth, int *new_location) { HTTPContext *s = h->priv_data; int post, err; char headers[HTTP_HEADERS_SIZE] = ""; char *authstr = NULL, *proxyauthstr = NULL; uint64_t off = s->off; int len = 0; const char *method; int send_expect_100 = 0; /* send http header */ post = h->flags & AVIO_FLAG_WRITE; if (s->post_data) { /* force POST method and disable chunked encoding when * custom HTTP post data is set */ post = 1; s->chunked_post = 0; } if (s->method) method = s->method; else method = post ? "POST" : "GET"; authstr = ff_http_auth_create_response(&s->auth_state, auth, local_path, method); proxyauthstr = ff_http_auth_create_response(&s->proxy_auth_state, proxyauth, local_path, method); if (post && !s->post_data) { send_expect_100 = s->send_expect_100; /* The user has supplied authentication but we don't know the auth type, * send Expect: 100-continue to get the 401 response including the * WWW-Authenticate header, or an 100 continue if no auth actually * is needed. */ if (auth && *auth && s->auth_state.auth_type == HTTP_AUTH_NONE && s->http_code != 401) send_expect_100 = 1; } #if FF_API_HTTP_USER_AGENT if (strcmp(s->user_agent_deprecated, DEFAULT_USER_AGENT)) { av_log(s, AV_LOG_WARNING, "the user-agent option is deprecated, please use user_agent option\n"); s->user_agent = av_strdup(s->user_agent_deprecated); } #endif /* set default headers if needed */ if (!has_header(s->headers, "\r\nUser-Agent: ")) len += av_strlcatf(headers + len, sizeof(headers) - len, "User-Agent: %s\r\n", s->user_agent); if (!has_header(s->headers, "\r\nAccept: ")) len += av_strlcpy(headers + len, "Accept: */*\r\n", sizeof(headers) - len); // Note: we send this on purpose even when s->off is 0 when we're probing, // since it allows us to detect more reliably if a (non-conforming) // server supports seeking by analysing the reply headers. if (!has_header(s->headers, "\r\nRange: ") && !post && (s->off > 0 || s->end_off || s->seekable == -1)) { len += av_strlcatf(headers + len, sizeof(headers) - len, "Range: bytes=%"PRIu64"-", s->off); if (s->end_off) len += av_strlcatf(headers + len, sizeof(headers) - len, "%"PRId64, s->end_off - 1); len += av_strlcpy(headers + len, "\r\n", sizeof(headers) - len); } if (send_expect_100 && !has_header(s->headers, "\r\nExpect: ")) len += av_strlcatf(headers + len, sizeof(headers) - len, "Expect: 100-continue\r\n"); if (!has_header(s->headers, "\r\nConnection: ")) { if (s->multiple_requests) len += av_strlcpy(headers + len, "Connection: keep-alive\r\n", sizeof(headers) - len); else len += av_strlcpy(headers + len, "Connection: close\r\n", sizeof(headers) - len); } if (!has_header(s->headers, "\r\nHost: ")) len += av_strlcatf(headers + len, sizeof(headers) - len, "Host: %s\r\n", hoststr); if (!has_header(s->headers, "\r\nContent-Length: ") && s->post_data) len += av_strlcatf(headers + len, sizeof(headers) - len, "Content-Length: %d\r\n", s->post_datalen); if (!has_header(s->headers, "\r\nContent-Type: ") && s->content_type) len += av_strlcatf(headers + len, sizeof(headers) - len, "Content-Type: %s\r\n", s->content_type); if (!has_header(s->headers, "\r\nCookie: ") && s->cookies) { char *cookies = NULL; if (!get_cookies(s, &cookies, path, hoststr) && cookies) { len += av_strlcatf(headers + len, sizeof(headers) - len, "Cookie: %s\r\n", cookies); av_free(cookies); } } if (!has_header(s->headers, "\r\nIcy-MetaData: ") && s->icy) len += av_strlcatf(headers + len, sizeof(headers) - len, "Icy-MetaData: %d\r\n", 1); /* now add in custom headers */ if (s->headers) av_strlcpy(headers + len, s->headers, sizeof(headers) - len); snprintf(s->buffer, sizeof(s->buffer), "%s %s HTTP/1.1\r\n" "%s" "%s" "%s" "%s%s" "\r\n", method, path, post && s->chunked_post ? "Transfer-Encoding: chunked\r\n" : "", headers, authstr ? authstr : "", proxyauthstr ? "Proxy-" : "", proxyauthstr ? proxyauthstr : ""); av_log(h, AV_LOG_DEBUG, "request: %s\n", s->buffer); if ((err = ffurl_write(s->hd, s->buffer, strlen(s->buffer))) < 0) goto done; if (s->post_data) if ((err = ffurl_write(s->hd, s->post_data, s->post_datalen)) < 0) goto done; /* init input buffer */ s->buf_ptr = s->buffer; s->buf_end = s->buffer; s->line_count = 0; s->off = 0; s->icy_data_read = 0; s->filesize = UINT64_MAX; s->willclose = 0; s->end_chunked_post = 0; s->end_header = 0; if (post && !s->post_data && !send_expect_100) { /* Pretend that it did work. We didn't read any header yet, since * we've still to send the POST data, but the code calling this * function will check http_code after we return. */ s->http_code = 200; err = 0; goto done; } /* wait for header */ err = http_read_header(h, new_location); if (err < 0) goto done; if (*new_location) s->off = off; err = (off == s->off) ? 0 : -1; done: av_freep(&authstr); av_freep(&proxyauthstr); return err; } static int http_buf_read(URLContext *h, uint8_t *buf, int size) { HTTPContext *s = h->priv_data; int len; /* read bytes from input buffer first */ len = s->buf_end - s->buf_ptr; if (len > 0) { if (len > size) len = size; memcpy(buf, s->buf_ptr, len); s->buf_ptr += len; } else { uint64_t target_end = s->end_off ? s->end_off : s->filesize; if ((!s->willclose || s->chunksize == UINT64_MAX) && s->off >= target_end) return AVERROR_EOF; len = ffurl_read(s->hd, buf, size); if (!len && (!s->willclose || s->chunksize == UINT64_MAX) && s->off < target_end) { av_log(h, AV_LOG_ERROR, "Stream ends prematurely at %"PRIu64", should be %"PRIu64"\n", s->off, target_end ); return AVERROR(EIO); } } if (len > 0) { s->off += len; if (s->chunksize > 0) s->chunksize -= len; } return len; } #if CONFIG_ZLIB #define DECOMPRESS_BUF_SIZE (256 * 1024) static int http_buf_read_compressed(URLContext *h, uint8_t *buf, int size) { HTTPContext *s = h->priv_data; int ret; if (!s->inflate_buffer) { s->inflate_buffer = av_malloc(DECOMPRESS_BUF_SIZE); if (!s->inflate_buffer) return AVERROR(ENOMEM); } if (s->inflate_stream.avail_in == 0) { int read = http_buf_read(h, s->inflate_buffer, DECOMPRESS_BUF_SIZE); if (read <= 0) return read; s->inflate_stream.next_in = s->inflate_buffer; s->inflate_stream.avail_in = read; } s->inflate_stream.avail_out = size; s->inflate_stream.next_out = buf; ret = inflate(&s->inflate_stream, Z_SYNC_FLUSH); if (ret != Z_OK && ret != Z_STREAM_END) av_log(h, AV_LOG_WARNING, "inflate return value: %d, %s\n", ret, s->inflate_stream.msg); return size - s->inflate_stream.avail_out; } #endif /* CONFIG_ZLIB */ static int64_t http_seek_internal(URLContext *h, int64_t off, int whence, int force_reconnect); static int http_read_stream(URLContext *h, uint8_t *buf, int size) { HTTPContext *s = h->priv_data; int err, new_location, read_ret; int64_t seek_ret; if (!s->hd) return AVERROR_EOF; if (s->end_chunked_post && !s->end_header) { err = http_read_header(h, &new_location); if (err < 0) return err; } if (s->chunksize != UINT64_MAX) { if (!s->chunksize) { char line[32]; do { if ((err = http_get_line(s, line, sizeof(line))) < 0) return err; } while (!*line); /* skip CR LF from last chunk */ s->chunksize = strtoull(line, NULL, 16); av_log(h, AV_LOG_TRACE, "Chunked encoding data size: %"PRIu64"'\n", s->chunksize); if (!s->chunksize) return 0; else if (s->chunksize == UINT64_MAX) { av_log(h, AV_LOG_ERROR, "Invalid chunk size %"PRIu64"\n", s->chunksize); return AVERROR(EINVAL); } } size = FFMIN(size, s->chunksize); } #if CONFIG_ZLIB if (s->compressed) return http_buf_read_compressed(h, buf, size); #endif /* CONFIG_ZLIB */ read_ret = http_buf_read(h, buf, size); if ( (read_ret < 0 && s->reconnect && (!h->is_streamed || s->reconnect_streamed) && s->filesize > 0 && s->off < s->filesize) || (read_ret == 0 && s->reconnect_at_eof && (!h->is_streamed || s->reconnect_streamed))) { uint64_t target = h->is_streamed ? 0 : s->off; if (s->reconnect_delay > s->reconnect_delay_max) return AVERROR(EIO); av_log(h, AV_LOG_INFO, "Will reconnect at %"PRIu64" error=%s.\n", s->off, av_err2str(read_ret)); av_usleep(1000U*1000*s->reconnect_delay); s->reconnect_delay = 1 + 2*s->reconnect_delay; seek_ret = http_seek_internal(h, target, SEEK_SET, 1); if (seek_ret != target) { av_log(h, AV_LOG_ERROR, "Failed to reconnect at %"PRIu64".\n", target); return read_ret; } read_ret = http_buf_read(h, buf, size); } else s->reconnect_delay = 0; return read_ret; } // Like http_read_stream(), but no short reads. // Assumes partial reads are an error. static int http_read_stream_all(URLContext *h, uint8_t *buf, int size) { int pos = 0; while (pos < size) { int len = http_read_stream(h, buf + pos, size - pos); if (len < 0) return len; pos += len; } return pos; } static void update_metadata(HTTPContext *s, char *data) { char *key; char *val; char *end; char *next = data; while (*next) { key = next; val = strstr(key, "='"); if (!val) break; end = strstr(val, "';"); if (!end) break; *val = '\0'; *end = '\0'; val += 2; av_dict_set(&s->metadata, key, val, 0); next = end + 2; } } static int store_icy(URLContext *h, int size) { HTTPContext *s = h->priv_data; /* until next metadata packet */ uint64_t remaining; if (s->icy_metaint < s->icy_data_read) return AVERROR_INVALIDDATA; remaining = s->icy_metaint - s->icy_data_read; if (!remaining) { /* The metadata packet is variable sized. It has a 1 byte header * which sets the length of the packet (divided by 16). If it's 0, * the metadata doesn't change. After the packet, icy_metaint bytes * of normal data follows. */ uint8_t ch; int len = http_read_stream_all(h, &ch, 1); if (len < 0) return len; if (ch > 0) { char data[255 * 16 + 1]; int ret; len = ch * 16; ret = http_read_stream_all(h, data, len); if (ret < 0) return ret; data[len + 1] = 0; if ((ret = av_opt_set(s, "icy_metadata_packet", data, 0)) < 0) return ret; update_metadata(s, data); } s->icy_data_read = 0; remaining = s->icy_metaint; } return FFMIN(size, remaining); } static int http_read(URLContext *h, uint8_t *buf, int size) { HTTPContext *s = h->priv_data; if (s->icy_metaint > 0) { size = store_icy(h, size); if (size < 0) return size; } size = http_read_stream(h, buf, size); if (size > 0) s->icy_data_read += size; return size; } /* used only when posting data */ static int http_write(URLContext *h, const uint8_t *buf, int size) { char temp[11] = ""; /* 32-bit hex + CRLF + nul */ int ret; char crlf[] = "\r\n"; HTTPContext *s = h->priv_data; if (!s->chunked_post) { /* non-chunked data is sent without any special encoding */ return ffurl_write(s->hd, buf, size); } /* silently ignore zero-size data since chunk encoding that would * signal EOF */ if (size > 0) { /* upload data using chunked encoding */ snprintf(temp, sizeof(temp), "%x\r\n", size); if ((ret = ffurl_write(s->hd, temp, strlen(temp))) < 0 || (ret = ffurl_write(s->hd, buf, size)) < 0 || (ret = ffurl_write(s->hd, crlf, sizeof(crlf) - 1)) < 0) return ret; } return size; } static int http_shutdown(URLContext *h, int flags) { int ret = 0; char footer[] = "0\r\n\r\n"; HTTPContext *s = h->priv_data; /* signal end of chunked encoding if used */ if (((flags & AVIO_FLAG_WRITE) && s->chunked_post) || ((flags & AVIO_FLAG_READ) && s->chunked_post && s->listen)) { ret = ffurl_write(s->hd, footer, sizeof(footer) - 1); ret = ret > 0 ? 0 : ret; s->end_chunked_post = 1; } return ret; } static int http_close(URLContext *h) { int ret = 0; HTTPContext *s = h->priv_data; #if CONFIG_ZLIB inflateEnd(&s->inflate_stream); av_freep(&s->inflate_buffer); #endif /* CONFIG_ZLIB */ if (!s->end_chunked_post) /* Close the write direction by sending the end of chunked encoding. */ ret = http_shutdown(h, h->flags); if (s->hd) ffurl_closep(&s->hd); av_dict_free(&s->chained_options); return ret; } static int64_t http_seek_internal(URLContext *h, int64_t off, int whence, int force_reconnect) { HTTPContext *s = h->priv_data; URLContext *old_hd = s->hd; uint64_t old_off = s->off; uint8_t old_buf[BUFFER_SIZE]; int old_buf_size, ret; AVDictionary *options = NULL; if (whence == AVSEEK_SIZE) return s->filesize; else if (!force_reconnect && ((whence == SEEK_CUR && off == 0) || (whence == SEEK_SET && off == s->off))) return s->off; else if ((s->filesize == UINT64_MAX && whence == SEEK_END)) return AVERROR(ENOSYS); if (whence == SEEK_CUR) off += s->off; else if (whence == SEEK_END) off += s->filesize; else if (whence != SEEK_SET) return AVERROR(EINVAL); if (off < 0) return AVERROR(EINVAL); s->off = off; if (s->off && h->is_streamed) return AVERROR(ENOSYS); /* we save the old context in case the seek fails */ old_buf_size = s->buf_end - s->buf_ptr; memcpy(old_buf, s->buf_ptr, old_buf_size); s->hd = NULL; /* if it fails, continue on old connection */ if ((ret = http_open_cnx(h, &options)) < 0) { av_dict_free(&options); memcpy(s->buffer, old_buf, old_buf_size); s->buf_ptr = s->buffer; s->buf_end = s->buffer + old_buf_size; s->hd = old_hd; s->off = old_off; return ret; } av_dict_free(&options); ffurl_close(old_hd); return off; } static int64_t http_seek(URLContext *h, int64_t off, int whence) { return http_seek_internal(h, off, whence, 0); } static int http_get_file_handle(URLContext *h) { HTTPContext *s = h->priv_data; return ffurl_get_file_handle(s->hd); } #define HTTP_CLASS(flavor) \ static const AVClass flavor ## _context_class = { \ .class_name = # flavor, \ .item_name = av_default_item_name, \ .option = options, \ .version = LIBAVUTIL_VERSION_INT, \ } #if CONFIG_HTTP_PROTOCOL HTTP_CLASS(http); const URLProtocol ff_http_protocol = { .name = "http", .url_open2 = http_open, .url_accept = http_accept, .url_handshake = http_handshake, .url_read = http_read, .url_write = http_write, .url_seek = http_seek, .url_close = http_close, .url_get_file_handle = http_get_file_handle, .url_shutdown = http_shutdown, .priv_data_size = sizeof(HTTPContext), .priv_data_class = &http_context_class, .flags = URL_PROTOCOL_FLAG_NETWORK, .default_whitelist = "http,https,tls,rtp,tcp,udp,crypto,httpproxy" }; #endif /* CONFIG_HTTP_PROTOCOL */ #if CONFIG_HTTPS_PROTOCOL HTTP_CLASS(https); const URLProtocol ff_https_protocol = { .name = "https", .url_open2 = http_open, .url_read = http_read, .url_write = http_write, .url_seek = http_seek, .url_close = http_close, .url_get_file_handle = http_get_file_handle, .url_shutdown = http_shutdown, .priv_data_size = sizeof(HTTPContext), .priv_data_class = &https_context_class, .flags = URL_PROTOCOL_FLAG_NETWORK, .default_whitelist = "http,https,tls,rtp,tcp,udp,crypto,httpproxy" }; #endif /* CONFIG_HTTPS_PROTOCOL */ #if CONFIG_HTTPPROXY_PROTOCOL static int http_proxy_close(URLContext *h) { HTTPContext *s = h->priv_data; if (s->hd) ffurl_closep(&s->hd); return 0; } static int http_proxy_open(URLContext *h, const char *uri, int flags) { HTTPContext *s = h->priv_data; char hostname[1024], hoststr[1024]; char auth[1024], pathbuf[1024], *path; char lower_url[100]; int port, ret = 0, attempts = 0; HTTPAuthType cur_auth_type; char *authstr; int new_loc; if( s->seekable == 1 ) h->is_streamed = 0; else h->is_streamed = 1; av_url_split(NULL, 0, auth, sizeof(auth), hostname, sizeof(hostname), &port, pathbuf, sizeof(pathbuf), uri); ff_url_join(hoststr, sizeof(hoststr), NULL, NULL, hostname, port, NULL); path = pathbuf; if (*path == '/') path++; ff_url_join(lower_url, sizeof(lower_url), "tcp", NULL, hostname, port, NULL); redo: ret = ffurl_open_whitelist(&s->hd, lower_url, AVIO_FLAG_READ_WRITE, &h->interrupt_callback, NULL, h->protocol_whitelist, h->protocol_blacklist, h); if (ret < 0) return ret; authstr = ff_http_auth_create_response(&s->proxy_auth_state, auth, path, "CONNECT"); snprintf(s->buffer, sizeof(s->buffer), "CONNECT %s HTTP/1.1\r\n" "Host: %s\r\n" "Connection: close\r\n" "%s%s" "\r\n", path, hoststr, authstr ? "Proxy-" : "", authstr ? authstr : ""); av_freep(&authstr); if ((ret = ffurl_write(s->hd, s->buffer, strlen(s->buffer))) < 0) goto fail; s->buf_ptr = s->buffer; s->buf_end = s->buffer; s->line_count = 0; s->filesize = UINT64_MAX; cur_auth_type = s->proxy_auth_state.auth_type; /* Note: This uses buffering, potentially reading more than the * HTTP header. If tunneling a protocol where the server starts * the conversation, we might buffer part of that here, too. * Reading that requires using the proper ffurl_read() function * on this URLContext, not using the fd directly (as the tls * protocol does). This shouldn't be an issue for tls though, * since the client starts the conversation there, so there * is no extra data that we might buffer up here. */ ret = http_read_header(h, &new_loc); if (ret < 0) goto fail; attempts++; if (s->http_code == 407 && (cur_auth_type == HTTP_AUTH_NONE || s->proxy_auth_state.stale) && s->proxy_auth_state.auth_type != HTTP_AUTH_NONE && attempts < 2) { ffurl_closep(&s->hd); goto redo; } if (s->http_code < 400) return 0; ret = ff_http_averror(s->http_code, AVERROR(EIO)); fail: http_proxy_close(h); return ret; } static int http_proxy_write(URLContext *h, const uint8_t *buf, int size) { HTTPContext *s = h->priv_data; return ffurl_write(s->hd, buf, size); } const URLProtocol ff_httpproxy_protocol = { .name = "httpproxy", .url_open = http_proxy_open, .url_read = http_buf_read, .url_write = http_proxy_write, .url_close = http_proxy_close, .url_get_file_handle = http_get_file_handle, .priv_data_size = sizeof(HTTPContext), .flags = URL_PROTOCOL_FLAG_NETWORK, }; #endif /* CONFIG_HTTPPROXY_PROTOCOL */
static int http_connect(URLContext *h, const char *path, const char *local_path, const char *hoststr, const char *auth, const char *proxyauth, int *new_location) { HTTPContext *s = h->priv_data; int post, err; char headers[HTTP_HEADERS_SIZE] = ""; char *authstr = NULL, *proxyauthstr = NULL; int64_t off = s->off; int len = 0; const char *method; int send_expect_100 = 0; /* send http header */ post = h->flags & AVIO_FLAG_WRITE; if (s->post_data) { /* force POST method and disable chunked encoding when * custom HTTP post data is set */ post = 1; s->chunked_post = 0; } if (s->method) method = s->method; else method = post ? "POST" : "GET"; authstr = ff_http_auth_create_response(&s->auth_state, auth, local_path, method); proxyauthstr = ff_http_auth_create_response(&s->proxy_auth_state, proxyauth, local_path, method); if (post && !s->post_data) { send_expect_100 = s->send_expect_100; /* The user has supplied authentication but we don't know the auth type, * send Expect: 100-continue to get the 401 response including the * WWW-Authenticate header, or an 100 continue if no auth actually * is needed. */ if (auth && *auth && s->auth_state.auth_type == HTTP_AUTH_NONE && s->http_code != 401) send_expect_100 = 1; } #if FF_API_HTTP_USER_AGENT if (strcmp(s->user_agent_deprecated, DEFAULT_USER_AGENT)) { av_log(s, AV_LOG_WARNING, "the user-agent option is deprecated, please use user_agent option\n"); s->user_agent = av_strdup(s->user_agent_deprecated); } #endif /* set default headers if needed */ if (!has_header(s->headers, "\r\nUser-Agent: ")) len += av_strlcatf(headers + len, sizeof(headers) - len, "User-Agent: %s\r\n", s->user_agent); if (!has_header(s->headers, "\r\nAccept: ")) len += av_strlcpy(headers + len, "Accept: */*\r\n", sizeof(headers) - len); // Note: we send this on purpose even when s->off is 0 when we're probing, // since it allows us to detect more reliably if a (non-conforming) // server supports seeking by analysing the reply headers. if (!has_header(s->headers, "\r\nRange: ") && !post && (s->off > 0 || s->end_off || s->seekable == -1)) { len += av_strlcatf(headers + len, sizeof(headers) - len, "Range: bytes=%"PRId64"-", s->off); if (s->end_off) len += av_strlcatf(headers + len, sizeof(headers) - len, "%"PRId64, s->end_off - 1); len += av_strlcpy(headers + len, "\r\n", sizeof(headers) - len); } if (send_expect_100 && !has_header(s->headers, "\r\nExpect: ")) len += av_strlcatf(headers + len, sizeof(headers) - len, "Expect: 100-continue\r\n"); if (!has_header(s->headers, "\r\nConnection: ")) { if (s->multiple_requests) len += av_strlcpy(headers + len, "Connection: keep-alive\r\n", sizeof(headers) - len); else len += av_strlcpy(headers + len, "Connection: close\r\n", sizeof(headers) - len); } if (!has_header(s->headers, "\r\nHost: ")) len += av_strlcatf(headers + len, sizeof(headers) - len, "Host: %s\r\n", hoststr); if (!has_header(s->headers, "\r\nContent-Length: ") && s->post_data) len += av_strlcatf(headers + len, sizeof(headers) - len, "Content-Length: %d\r\n", s->post_datalen); if (!has_header(s->headers, "\r\nContent-Type: ") && s->content_type) len += av_strlcatf(headers + len, sizeof(headers) - len, "Content-Type: %s\r\n", s->content_type); if (!has_header(s->headers, "\r\nCookie: ") && s->cookies) { char *cookies = NULL; if (!get_cookies(s, &cookies, path, hoststr) && cookies) { len += av_strlcatf(headers + len, sizeof(headers) - len, "Cookie: %s\r\n", cookies); av_free(cookies); } } if (!has_header(s->headers, "\r\nIcy-MetaData: ") && s->icy) len += av_strlcatf(headers + len, sizeof(headers) - len, "Icy-MetaData: %d\r\n", 1); /* now add in custom headers */ if (s->headers) av_strlcpy(headers + len, s->headers, sizeof(headers) - len); snprintf(s->buffer, sizeof(s->buffer), "%s %s HTTP/1.1\r\n" "%s" "%s" "%s" "%s%s" "\r\n", method, path, post && s->chunked_post ? "Transfer-Encoding: chunked\r\n" : "", headers, authstr ? authstr : "", proxyauthstr ? "Proxy-" : "", proxyauthstr ? proxyauthstr : ""); av_log(h, AV_LOG_DEBUG, "request: %s\n", s->buffer); if ((err = ffurl_write(s->hd, s->buffer, strlen(s->buffer))) < 0) goto done; if (s->post_data) if ((err = ffurl_write(s->hd, s->post_data, s->post_datalen)) < 0) goto done; /* init input buffer */ s->buf_ptr = s->buffer; s->buf_end = s->buffer; s->line_count = 0; s->off = 0; s->icy_data_read = 0; s->filesize = -1; s->willclose = 0; s->end_chunked_post = 0; s->end_header = 0; if (post && !s->post_data && !send_expect_100) { /* Pretend that it did work. We didn't read any header yet, since * we've still to send the POST data, but the code calling this * function will check http_code after we return. */ s->http_code = 200; err = 0; goto done; } /* wait for header */ err = http_read_header(h, new_location); if (err < 0) goto done; if (*new_location) s->off = off; err = (off == s->off) ? 0 : -1; done: av_freep(&authstr); av_freep(&proxyauthstr); return err; }
static int http_connect(URLContext *h, const char *path, const char *local_path, const char *hoststr, const char *auth, const char *proxyauth, int *new_location) { HTTPContext *s = h->priv_data; int post, err; char headers[HTTP_HEADERS_SIZE] = ""; char *authstr = NULL, *proxyauthstr = NULL; uint64_t off = s->off; int len = 0; const char *method; int send_expect_100 = 0; /* send http header */ post = h->flags & AVIO_FLAG_WRITE; if (s->post_data) { /* force POST method and disable chunked encoding when * custom HTTP post data is set */ post = 1; s->chunked_post = 0; } if (s->method) method = s->method; else method = post ? "POST" : "GET"; authstr = ff_http_auth_create_response(&s->auth_state, auth, local_path, method); proxyauthstr = ff_http_auth_create_response(&s->proxy_auth_state, proxyauth, local_path, method); if (post && !s->post_data) { send_expect_100 = s->send_expect_100; /* The user has supplied authentication but we don't know the auth type, * send Expect: 100-continue to get the 401 response including the * WWW-Authenticate header, or an 100 continue if no auth actually * is needed. */ if (auth && *auth && s->auth_state.auth_type == HTTP_AUTH_NONE && s->http_code != 401) send_expect_100 = 1; } #if FF_API_HTTP_USER_AGENT if (strcmp(s->user_agent_deprecated, DEFAULT_USER_AGENT)) { av_log(s, AV_LOG_WARNING, "the user-agent option is deprecated, please use user_agent option\n"); s->user_agent = av_strdup(s->user_agent_deprecated); } #endif /* set default headers if needed */ if (!has_header(s->headers, "\r\nUser-Agent: ")) len += av_strlcatf(headers + len, sizeof(headers) - len, "User-Agent: %s\r\n", s->user_agent); if (!has_header(s->headers, "\r\nAccept: ")) len += av_strlcpy(headers + len, "Accept: */*\r\n", sizeof(headers) - len); // Note: we send this on purpose even when s->off is 0 when we're probing, // since it allows us to detect more reliably if a (non-conforming) // server supports seeking by analysing the reply headers. if (!has_header(s->headers, "\r\nRange: ") && !post && (s->off > 0 || s->end_off || s->seekable == -1)) { len += av_strlcatf(headers + len, sizeof(headers) - len, "Range: bytes=%"PRIu64"-", s->off); if (s->end_off) len += av_strlcatf(headers + len, sizeof(headers) - len, "%"PRId64, s->end_off - 1); len += av_strlcpy(headers + len, "\r\n", sizeof(headers) - len); } if (send_expect_100 && !has_header(s->headers, "\r\nExpect: ")) len += av_strlcatf(headers + len, sizeof(headers) - len, "Expect: 100-continue\r\n"); if (!has_header(s->headers, "\r\nConnection: ")) { if (s->multiple_requests) len += av_strlcpy(headers + len, "Connection: keep-alive\r\n", sizeof(headers) - len); else len += av_strlcpy(headers + len, "Connection: close\r\n", sizeof(headers) - len); } if (!has_header(s->headers, "\r\nHost: ")) len += av_strlcatf(headers + len, sizeof(headers) - len, "Host: %s\r\n", hoststr); if (!has_header(s->headers, "\r\nContent-Length: ") && s->post_data) len += av_strlcatf(headers + len, sizeof(headers) - len, "Content-Length: %d\r\n", s->post_datalen); if (!has_header(s->headers, "\r\nContent-Type: ") && s->content_type) len += av_strlcatf(headers + len, sizeof(headers) - len, "Content-Type: %s\r\n", s->content_type); if (!has_header(s->headers, "\r\nCookie: ") && s->cookies) { char *cookies = NULL; if (!get_cookies(s, &cookies, path, hoststr) && cookies) { len += av_strlcatf(headers + len, sizeof(headers) - len, "Cookie: %s\r\n", cookies); av_free(cookies); } } if (!has_header(s->headers, "\r\nIcy-MetaData: ") && s->icy) len += av_strlcatf(headers + len, sizeof(headers) - len, "Icy-MetaData: %d\r\n", 1); /* now add in custom headers */ if (s->headers) av_strlcpy(headers + len, s->headers, sizeof(headers) - len); snprintf(s->buffer, sizeof(s->buffer), "%s %s HTTP/1.1\r\n" "%s" "%s" "%s" "%s%s" "\r\n", method, path, post && s->chunked_post ? "Transfer-Encoding: chunked\r\n" : "", headers, authstr ? authstr : "", proxyauthstr ? "Proxy-" : "", proxyauthstr ? proxyauthstr : ""); av_log(h, AV_LOG_DEBUG, "request: %s\n", s->buffer); if ((err = ffurl_write(s->hd, s->buffer, strlen(s->buffer))) < 0) goto done; if (s->post_data) if ((err = ffurl_write(s->hd, s->post_data, s->post_datalen)) < 0) goto done; /* init input buffer */ s->buf_ptr = s->buffer; s->buf_end = s->buffer; s->line_count = 0; s->off = 0; s->icy_data_read = 0; s->filesize = UINT64_MAX; s->willclose = 0; s->end_chunked_post = 0; s->end_header = 0; if (post && !s->post_data && !send_expect_100) { /* Pretend that it did work. We didn't read any header yet, since * we've still to send the POST data, but the code calling this * function will check http_code after we return. */ s->http_code = 200; err = 0; goto done; } /* wait for header */ err = http_read_header(h, new_location); if (err < 0) goto done; if (*new_location) s->off = off; err = (off == s->off) ? 0 : -1; done: av_freep(&authstr); av_freep(&proxyauthstr); return err; }
{'added': [(65, ' uint64_t chunksize;'), (66, ' uint64_t off, end_off, filesize;'), (98, ' uint64_t icy_data_read;'), (100, ' uint64_t icy_metaint;'), (492, ' s->filesize = UINT64_MAX;'), (619, ' s->off = strtoull(p, NULL, 10);'), (621, ' s->filesize = strtoull(slash + 1, NULL, 10);'), (811, ' } else if (!av_strcasecmp(tag, "Content-Length") &&'), (812, ' s->filesize == UINT64_MAX) {'), (813, ' s->filesize = strtoull(p, NULL, 10);'), (822, ' s->filesize = UINT64_MAX;'), (846, ' s->icy_metaint = strtoull(p, NULL, 10);'), (976, ' s->chunksize = UINT64_MAX;'), (1010, ' uint64_t off = s->off;'), (1064, ' "Range: bytes=%"PRIu64"-", s->off);'), (1139, ' s->filesize = UINT64_MAX;'), (1179, ' uint64_t target_end = s->end_off ? s->end_off : s->filesize;'), (1180, ' if ((!s->willclose || s->chunksize == UINT64_MAX) && s->off >= target_end)'), (1183, ' if (!len && (!s->willclose || s->chunksize == UINT64_MAX) && s->off < target_end) {'), (1185, ' "Stream ends prematurely at %"PRIu64", should be %"PRIu64"\\n",'), (1249, ' if (s->chunksize != UINT64_MAX) {'), (1258, ' s->chunksize = strtoull(line, NULL, 16);'), (1260, ' av_log(h, AV_LOG_TRACE,'), (1261, ' "Chunked encoding data size: %"PRIu64"\'\\n",'), (1266, ' else if (s->chunksize == UINT64_MAX) {'), (1267, ' av_log(h, AV_LOG_ERROR, "Invalid chunk size %"PRIu64"\\n",'), (1268, ' s->chunksize);'), (1269, ' return AVERROR(EINVAL);'), (1270, ' }'), (1281, ' uint64_t target = h->is_streamed ? 0 : s->off;'), (1286, ' av_log(h, AV_LOG_INFO, "Will reconnect at %"PRIu64" error=%s.\\n", s->off, av_err2str(read_ret));'), (1291, ' av_log(h, AV_LOG_ERROR, "Failed to reconnect at %"PRIu64".\\n", target);'), (1346, ' uint64_t remaining;'), (1348, ' if (s->icy_metaint < s->icy_data_read)'), (1350, ' remaining = s->icy_metaint - s->icy_data_read;'), (1464, ' uint64_t old_off = s->off;'), (1475, ' else if ((s->filesize == UINT64_MAX && whence == SEEK_END))'), (1630, ' s->filesize = UINT64_MAX;')], 'deleted': [(65, ' int64_t chunksize;'), (66, ' int64_t off, end_off, filesize;'), (98, ' int icy_data_read;'), (100, ' int icy_metaint;'), (492, ' s->filesize = -1;'), (619, ' s->off = strtoll(p, NULL, 10);'), (621, ' s->filesize = strtoll(slash + 1, NULL, 10);'), (811, ' } else if (!av_strcasecmp(tag, "Content-Length") && s->filesize == -1) {'), (812, ' s->filesize = strtoll(p, NULL, 10);'), (821, ' s->filesize = -1;'), (845, ' s->icy_metaint = strtoll(p, NULL, 10);'), (975, ' s->chunksize = -1;'), (1009, ' int64_t off = s->off;'), (1063, ' "Range: bytes=%"PRId64"-", s->off);'), (1138, ' s->filesize = -1;'), (1178, ' int64_t target_end = s->end_off ? s->end_off : s->filesize;'), (1179, ' if ((!s->willclose || s->chunksize < 0) &&'), (1180, ' target_end >= 0 && s->off >= target_end)'), (1183, ' if (!len && (!s->willclose || s->chunksize < 0) &&'), (1184, ' target_end >= 0 && s->off < target_end) {'), (1186, ' "Stream ends prematurely at %"PRId64", should be %"PRId64"\\n",'), (1250, ' if (s->chunksize >= 0) {'), (1259, ' s->chunksize = strtoll(line, NULL, 16);'), (1261, ' av_log(NULL, AV_LOG_TRACE, "Chunked encoding data size: %"PRId64"\'\\n",'), (1276, ' int64_t target = h->is_streamed ? 0 : s->off;'), (1281, ' av_log(h, AV_LOG_INFO, "Will reconnect at %"PRId64" error=%s.\\n", s->off, av_err2str(read_ret));'), (1286, ' av_log(h, AV_LOG_ERROR, "Failed to reconnect at %"PRId64".\\n", target);'), (1341, ' int remaining = s->icy_metaint - s->icy_data_read;'), (1343, ' if (remaining < 0)'), (1458, ' int64_t old_off = s->off;'), (1469, ' else if ((s->filesize == -1 && whence == SEEK_END))'), (1624, ' s->filesize = -1;')]}
38
32
1,328
9,692
128
1,019
50
https://github.com/FFmpeg/FFmpeg
CVE-2016-10190
CWE-119
1,184
main.cpp
C++
main
/* * Copyright (C) 2017 ~ 2017 Deepin Technology Co., Ltd. * * Author: zccrs <zccrs@live.com> * * Maintainer: zccrs <zhangjide@deepin.com> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <QDebug> #include <DLog> #ifdef ENABLE_GUI #include <DApplication> #include <DTitlebar> #include <DThemeManager> #include <QDesktopServices> #include "mainwindow.h" #include "dvirtualimagefileio.h" #include <pwd.h> #include <unistd.h> DWIDGET_USE_NAMESPACE #else #include <QCoreApplication> #endif #include "helper.h" #include "dglobal.h" #include "clonejob.h" #include "commandlineparser.h" bool Global::isOverride = true; bool Global::disableMD5CheckForDimFile = false; bool Global::disableLoopDevice = true; bool Global::fixBoot = false; #ifdef ENABLE_GUI bool Global::isTUIMode = false; #else bool Global::isTUIMode = true; #endif int Global::bufferSize = 1024 * 1024; int Global::compressionLevel = 0; int Global::debugLevel = 1; DCORE_USE_NAMESPACE inline static bool isTUIMode(int argc, char *argv[]) { #ifndef ENABLE_GUI Q_UNUSED(argc) Q_UNUSED(argv) return true; #endif if (qEnvironmentVariableIsEmpty("DISPLAY")) return true; const QByteArrayList in_tui_args = { "--tui", "-i", "--info", "--dim-info", "--to-serial-url", "--from-serial-url", "-f", "--fix-boot", "-v", "--version", "-h", "--help", "--re-checksum" }; for (int i = 1; i < argc; ++i) if (in_tui_args.contains(argv[i])) return true; return false; } static QString logFormat = "[%{time}{yyyy-MM-dd, HH:mm:ss.zzz}] [%{type:-7}] [%{file}=>%{function}: %{line}] %{message}\n"; int main(int argc, char *argv[]) { QCoreApplication *a; if (isTUIMode(argc, argv)) { Global::isTUIMode = true; a = new QCoreApplication(argc, argv); } #ifdef ENABLE_GUI else { ConsoleAppender *consoleAppender = new ConsoleAppender; consoleAppender->setFormat(logFormat); RollingFileAppender *rollingFileAppender = new RollingFileAppender("/tmp/.deepin-clone.log"); rollingFileAppender->setFormat(logFormat); rollingFileAppender->setLogFilesLimit(5); rollingFileAppender->setDatePattern(RollingFileAppender::DailyRollover); logger->registerAppender(consoleAppender); logger->registerAppender(rollingFileAppender); if (qEnvironmentVariableIsSet("PKEXEC_UID")) { const quint32 pkexec_uid = qgetenv("PKEXEC_UID").toUInt(); const QDir user_home(getpwuid(pkexec_uid)->pw_dir); QFile pam_file(user_home.absoluteFilePath(".pam_environment")); if (pam_file.open(QIODevice::ReadOnly)) { while (!pam_file.atEnd()) { const QByteArray &line = pam_file.readLine().simplified(); if (line.startsWith("QT_SCALE_FACTOR")) { const QByteArrayList &list = line.split('='); if (list.count() == 2) { qputenv("QT_SCALE_FACTOR", list.last()); break; } } } pam_file.close(); } } DApplication::loadDXcbPlugin(); DApplication *app = new DApplication(argc, argv); app->setAttribute(Qt::AA_UseHighDpiPixmaps); if (!qApp->setSingleInstance("_deepin_clone_")) { qCritical() << "As well as the process is running"; return -1; } if (!app->loadTranslator()) { dError("Load translator failed"); } app->setApplicationDisplayName(QObject::tr("Deepin Clone")); app->setApplicationDescription(QObject::tr("Deepin Clone is a backup and restore tool in deepin. " "It supports disk or partition clone, backup and restore, and other functions.")); app->setApplicationAcknowledgementPage("https://www.deepin.org/acknowledgments/deepin-clone/"); app->setTheme("light"); a = app; } #endif a->setApplicationName("deepin-clone"); #ifdef ENABLE_GUI a->setApplicationVersion(DApplication::buildVersion("1.0.0.1")); #else a->setApplicationVersion("1.0.0.1"); #endif a->setOrganizationName("deepin"); CommandLineParser parser; QFile arguments_file("/lib/live/mount/medium/.tmp/deepin-clone.arguments"); QStringList arguments; bool load_arg_from_file = arguments_file.exists() && !Global::isTUIMode && !a->arguments().contains("--tui"); if (load_arg_from_file) { arguments.append(a->arguments().first()); if (!arguments_file.open(QIODevice::ReadOnly)) { qCritical() << "Open \"/lib/live/mount/medium/.tmp/deepin-clone.arguments\" failed, error:" << arguments_file.errorString(); } else { while (!arguments_file.atEnd()) { const QString &arg = QString::fromUtf8(arguments_file.readLine().trimmed()); if (!arg.isEmpty()) arguments.append(arg); } arguments_file.close(); arguments_file.remove(); } qDebug() << arguments; } else { arguments = a->arguments(); } parser.process(arguments); ConsoleAppender *consoleAppender = new ConsoleAppender; consoleAppender->setFormat(logFormat); RollingFileAppender *rollingFileAppender = new RollingFileAppender(parser.logFile()); rollingFileAppender->setFormat(logFormat); rollingFileAppender->setLogFilesLimit(5); rollingFileAppender->setDatePattern(RollingFileAppender::DailyRollover); logger->registerCategoryAppender("deepin.ghost", consoleAppender); logger->registerCategoryAppender("deepin.ghost", rollingFileAppender); parser.parse(); if (load_arg_from_file) { dCDebug("Load arguments from \"%s\"", qPrintable(arguments_file.fileName())); } dCInfo("Application command line: %s", qPrintable(arguments.join(' '))); if (Global::debugLevel == 0) { QLoggingCategory::setFilterRules("deepin.ghost.debug=false"); } if (Global::isTUIMode) { if (!parser.target().isEmpty()) { CloneJob *job = new CloneJob; QObject::connect(job, &QThread::finished, a, &QCoreApplication::quit); job->start(parser.source(), parser.target()); } } #ifdef ENABLE_GUI else { if (!parser.isSetOverride()) Global::isOverride = true; if (!parser.isSetDebug()) Global::debugLevel = 2; MainWindow *window = new MainWindow; window->setFixedSize(860, 660); window->setStyleSheet(DThemeManager::instance()->getQssForWidget("main", window)); window->setWindowIcon(QIcon::fromTheme("deepin-clone")); window->setWindowFlags(Qt::WindowCloseButtonHint | Qt::WindowMinimizeButtonHint | Qt::WindowSystemMenuHint); window->titlebar()->setIcon(window->windowIcon()); window->titlebar()->setTitle(QString()); #if DTK_VERSION > DTK_VERSION_CHECK(2, 0, 6, 0) window->titlebar()->setBackgroundTransparent(true); #endif window->show(); qApp->setProductIcon(window->windowIcon()); if (!parser.source().isEmpty()) { window->startWithFile(parser.source(), parser.target()); } QObject::connect(a, &QCoreApplication::aboutToQuit, window, &MainWindow::deleteLater); QDesktopServices::setUrlHandler("https", window, "openUrl"); } #endif int exitCode = Global::isTUIMode ? a->exec() : qApp->exec(); QString log_backup_file = parser.logBackupFile(); if (log_backup_file.startsWith("serial://")) { log_backup_file = Helper::parseSerialUrl(log_backup_file); } if (log_backup_file.isEmpty()) { return exitCode; } if (!QFile::copy(parser.logFile(), log_backup_file)) { dCWarning("failed to copy log file to \"%s\"", qPrintable(log_backup_file)); } return exitCode; }
/* * Copyright (C) 2017 ~ 2017 Deepin Technology Co., Ltd. * * Author: zccrs <zccrs@live.com> * * Maintainer: zccrs <zhangjide@deepin.com> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <QDebug> #include <DLog> #ifdef ENABLE_GUI #include <DApplication> #include <DTitlebar> #include <DThemeManager> #include <QDesktopServices> #include "mainwindow.h" #include "dvirtualimagefileio.h" #include <pwd.h> #include <unistd.h> DWIDGET_USE_NAMESPACE #else #include <QCoreApplication> #endif #include "helper.h" #include "dglobal.h" #include "clonejob.h" #include "commandlineparser.h" bool Global::isOverride = true; bool Global::disableMD5CheckForDimFile = false; bool Global::disableLoopDevice = true; bool Global::fixBoot = false; #ifdef ENABLE_GUI bool Global::isTUIMode = false; #else bool Global::isTUIMode = true; #endif int Global::bufferSize = 1024 * 1024; int Global::compressionLevel = 0; int Global::debugLevel = 1; DCORE_USE_NAMESPACE inline static bool isTUIMode(int argc, char *argv[]) { #ifndef ENABLE_GUI Q_UNUSED(argc) Q_UNUSED(argv) return true; #endif if (qEnvironmentVariableIsEmpty("DISPLAY")) return true; const QByteArrayList in_tui_args = { "--tui", "-i", "--info", "--dim-info", "--to-serial-url", "--from-serial-url", "-f", "--fix-boot", "-v", "--version", "-h", "--help", "--re-checksum" }; for (int i = 1; i < argc; ++i) if (in_tui_args.contains(argv[i])) return true; return false; } static QString logFormat = "[%{time}{yyyy-MM-dd, HH:mm:ss.zzz}] [%{type:-7}] [%{file}=>%{function}: %{line}] %{message}\n"; int main(int argc, char *argv[]) { QCoreApplication *a; if (isTUIMode(argc, argv)) { Global::isTUIMode = true; a = new QCoreApplication(argc, argv); } #ifdef ENABLE_GUI else { ConsoleAppender *consoleAppender = new ConsoleAppender; consoleAppender->setFormat(logFormat); const QString log_file("/var/log/deepin-clone.log"); RollingFileAppender *rollingFileAppender = new RollingFileAppender(log_file); rollingFileAppender->setFormat(logFormat); rollingFileAppender->setLogFilesLimit(5); rollingFileAppender->setDatePattern(RollingFileAppender::DailyRollover); logger->registerAppender(rollingFileAppender); logger->registerAppender(consoleAppender); if (qEnvironmentVariableIsSet("PKEXEC_UID")) { const quint32 pkexec_uid = qgetenv("PKEXEC_UID").toUInt(); DApplication::customQtThemeConfigPathByUserHome(getpwuid(pkexec_uid)->pw_dir); } DApplication::loadDXcbPlugin(); DApplication *app = new DApplication(argc, argv); app->setAttribute(Qt::AA_UseHighDpiPixmaps); if (!qApp->setSingleInstance("_deepin_clone_")) { qCritical() << "As well as the process is running"; return -1; } if (!app->loadTranslator()) { dError("Load translator failed"); } app->setApplicationDisplayName(QObject::tr("Deepin Clone")); app->setApplicationDescription(QObject::tr("Deepin Clone is a backup and restore tool in deepin. " "It supports disk or partition clone, backup and restore, and other functions.")); app->setApplicationAcknowledgementPage("https://www.deepin.org/acknowledgments/deepin-clone/"); app->setTheme("light"); a = app; } #endif a->setApplicationName("deepin-clone"); #ifdef ENABLE_GUI a->setApplicationVersion(DApplication::buildVersion("1.0.0.1")); #else a->setApplicationVersion("1.0.0.1"); #endif a->setOrganizationName("deepin"); CommandLineParser parser; QFile arguments_file("/lib/live/mount/medium/.tmp/deepin-clone.arguments"); QStringList arguments; bool load_arg_from_file = arguments_file.exists() && !Global::isTUIMode && !a->arguments().contains("--tui"); if (load_arg_from_file) { arguments.append(a->arguments().first()); if (!arguments_file.open(QIODevice::ReadOnly)) { qCritical() << "Open \"/lib/live/mount/medium/.tmp/deepin-clone.arguments\" failed, error:" << arguments_file.errorString(); } else { while (!arguments_file.atEnd()) { const QString &arg = QString::fromUtf8(arguments_file.readLine().trimmed()); if (!arg.isEmpty()) arguments.append(arg); } arguments_file.close(); arguments_file.remove(); } qDebug() << arguments; } else { arguments = a->arguments(); } parser.process(arguments); ConsoleAppender *consoleAppender = new ConsoleAppender; consoleAppender->setFormat(logFormat); RollingFileAppender *rollingFileAppender = new RollingFileAppender(parser.logFile()); rollingFileAppender->setFormat(logFormat); rollingFileAppender->setLogFilesLimit(5); rollingFileAppender->setDatePattern(RollingFileAppender::DailyRollover); logger->registerCategoryAppender("deepin.ghost", consoleAppender); logger->registerCategoryAppender("deepin.ghost", rollingFileAppender); parser.parse(); if (load_arg_from_file) { dCDebug("Load arguments from \"%s\"", qPrintable(arguments_file.fileName())); } dCInfo("Application command line: %s", qPrintable(arguments.join(' '))); if (Global::debugLevel == 0) { QLoggingCategory::setFilterRules("deepin.ghost.debug=false"); } if (Global::isTUIMode) { if (!parser.target().isEmpty()) { CloneJob *job = new CloneJob; QObject::connect(job, &QThread::finished, a, &QCoreApplication::quit); job->start(parser.source(), parser.target()); } } #ifdef ENABLE_GUI else { if (!parser.isSetOverride()) Global::isOverride = true; if (!parser.isSetDebug()) Global::debugLevel = 2; MainWindow *window = new MainWindow; window->setFixedSize(860, 660); window->setStyleSheet(DThemeManager::instance()->getQssForWidget("main", window)); window->setWindowIcon(QIcon::fromTheme("deepin-clone")); window->setWindowFlags(Qt::WindowCloseButtonHint | Qt::WindowMinimizeButtonHint | Qt::WindowSystemMenuHint); window->titlebar()->setIcon(window->windowIcon()); window->titlebar()->setTitle(QString()); #if DTK_VERSION > DTK_VERSION_CHECK(2, 0, 6, 0) window->titlebar()->setBackgroundTransparent(true); #endif window->show(); qApp->setProductIcon(window->windowIcon()); if (!parser.source().isEmpty()) { window->startWithFile(parser.source(), parser.target()); } QObject::connect(a, &QCoreApplication::aboutToQuit, window, &MainWindow::deleteLater); QDesktopServices::setUrlHandler("https", window, "openUrl"); } #endif int exitCode = Global::isTUIMode ? a->exec() : qApp->exec(); QString log_backup_file = parser.logBackupFile(); if (log_backup_file.startsWith("serial://")) { log_backup_file = Helper::parseSerialUrl(log_backup_file); } if (log_backup_file.isEmpty()) { return exitCode; } if (!QFile::copy(parser.logFile(), log_backup_file)) { dCWarning("failed to copy log file to \"%s\"", qPrintable(log_backup_file)); } return exitCode; }
int main(int argc, char *argv[]) { QCoreApplication *a; if (isTUIMode(argc, argv)) { Global::isTUIMode = true; a = new QCoreApplication(argc, argv); } #ifdef ENABLE_GUI else { ConsoleAppender *consoleAppender = new ConsoleAppender; consoleAppender->setFormat(logFormat); RollingFileAppender *rollingFileAppender = new RollingFileAppender("/tmp/.deepin-clone.log"); rollingFileAppender->setFormat(logFormat); rollingFileAppender->setLogFilesLimit(5); rollingFileAppender->setDatePattern(RollingFileAppender::DailyRollover); logger->registerAppender(consoleAppender); logger->registerAppender(rollingFileAppender); if (qEnvironmentVariableIsSet("PKEXEC_UID")) { const quint32 pkexec_uid = qgetenv("PKEXEC_UID").toUInt(); const QDir user_home(getpwuid(pkexec_uid)->pw_dir); QFile pam_file(user_home.absoluteFilePath(".pam_environment")); if (pam_file.open(QIODevice::ReadOnly)) { while (!pam_file.atEnd()) { const QByteArray &line = pam_file.readLine().simplified(); if (line.startsWith("QT_SCALE_FACTOR")) { const QByteArrayList &list = line.split('='); if (list.count() == 2) { qputenv("QT_SCALE_FACTOR", list.last()); break; } } } pam_file.close(); } } DApplication::loadDXcbPlugin(); DApplication *app = new DApplication(argc, argv); app->setAttribute(Qt::AA_UseHighDpiPixmaps); if (!qApp->setSingleInstance("_deepin_clone_")) { qCritical() << "As well as the process is running"; return -1; } if (!app->loadTranslator()) { dError("Load translator failed"); } app->setApplicationDisplayName(QObject::tr("Deepin Clone")); app->setApplicationDescription(QObject::tr("Deepin Clone is a backup and restore tool in deepin. " "It supports disk or partition clone, backup and restore, and other functions.")); app->setApplicationAcknowledgementPage("https://www.deepin.org/acknowledgments/deepin-clone/"); app->setTheme("light"); a = app; } #endif a->setApplicationName("deepin-clone"); #ifdef ENABLE_GUI a->setApplicationVersion(DApplication::buildVersion("1.0.0.1")); #else a->setApplicationVersion("1.0.0.1"); #endif a->setOrganizationName("deepin"); CommandLineParser parser; QFile arguments_file("/lib/live/mount/medium/.tmp/deepin-clone.arguments"); QStringList arguments; bool load_arg_from_file = arguments_file.exists() && !Global::isTUIMode && !a->arguments().contains("--tui"); if (load_arg_from_file) { arguments.append(a->arguments().first()); if (!arguments_file.open(QIODevice::ReadOnly)) { qCritical() << "Open \"/lib/live/mount/medium/.tmp/deepin-clone.arguments\" failed, error:" << arguments_file.errorString(); } else { while (!arguments_file.atEnd()) { const QString &arg = QString::fromUtf8(arguments_file.readLine().trimmed()); if (!arg.isEmpty()) arguments.append(arg); } arguments_file.close(); arguments_file.remove(); } qDebug() << arguments; } else { arguments = a->arguments(); } parser.process(arguments); ConsoleAppender *consoleAppender = new ConsoleAppender; consoleAppender->setFormat(logFormat); RollingFileAppender *rollingFileAppender = new RollingFileAppender(parser.logFile()); rollingFileAppender->setFormat(logFormat); rollingFileAppender->setLogFilesLimit(5); rollingFileAppender->setDatePattern(RollingFileAppender::DailyRollover); logger->registerCategoryAppender("deepin.ghost", consoleAppender); logger->registerCategoryAppender("deepin.ghost", rollingFileAppender); parser.parse(); if (load_arg_from_file) { dCDebug("Load arguments from \"%s\"", qPrintable(arguments_file.fileName())); } dCInfo("Application command line: %s", qPrintable(arguments.join(' '))); if (Global::debugLevel == 0) { QLoggingCategory::setFilterRules("deepin.ghost.debug=false"); } if (Global::isTUIMode) { if (!parser.target().isEmpty()) { CloneJob *job = new CloneJob; QObject::connect(job, &QThread::finished, a, &QCoreApplication::quit); job->start(parser.source(), parser.target()); } } #ifdef ENABLE_GUI else { if (!parser.isSetOverride()) Global::isOverride = true; if (!parser.isSetDebug()) Global::debugLevel = 2; MainWindow *window = new MainWindow; window->setFixedSize(860, 660); window->setStyleSheet(DThemeManager::instance()->getQssForWidget("main", window)); window->setWindowIcon(QIcon::fromTheme("deepin-clone")); window->setWindowFlags(Qt::WindowCloseButtonHint | Qt::WindowMinimizeButtonHint | Qt::WindowSystemMenuHint); window->titlebar()->setIcon(window->windowIcon()); window->titlebar()->setTitle(QString()); #if DTK_VERSION > DTK_VERSION_CHECK(2, 0, 6, 0) window->titlebar()->setBackgroundTransparent(true); #endif window->show(); qApp->setProductIcon(window->windowIcon()); if (!parser.source().isEmpty()) { window->startWithFile(parser.source(), parser.target()); } QObject::connect(a, &QCoreApplication::aboutToQuit, window, &MainWindow::deleteLater); QDesktopServices::setUrlHandler("https", window, "openUrl"); } #endif int exitCode = Global::isTUIMode ? a->exec() : qApp->exec(); QString log_backup_file = parser.logBackupFile(); if (log_backup_file.startsWith("serial://")) { log_backup_file = Helper::parseSerialUrl(log_backup_file); } if (log_backup_file.isEmpty()) { return exitCode; } if (!QFile::copy(parser.logFile(), log_backup_file)) { dCWarning("failed to copy log file to \"%s\"", qPrintable(log_backup_file)); } return exitCode; }
int main(int argc, char *argv[]) { QCoreApplication *a; if (isTUIMode(argc, argv)) { Global::isTUIMode = true; a = new QCoreApplication(argc, argv); } #ifdef ENABLE_GUI else { ConsoleAppender *consoleAppender = new ConsoleAppender; consoleAppender->setFormat(logFormat); const QString log_file("/var/log/deepin-clone.log"); RollingFileAppender *rollingFileAppender = new RollingFileAppender(log_file); rollingFileAppender->setFormat(logFormat); rollingFileAppender->setLogFilesLimit(5); rollingFileAppender->setDatePattern(RollingFileAppender::DailyRollover); logger->registerAppender(rollingFileAppender); logger->registerAppender(consoleAppender); if (qEnvironmentVariableIsSet("PKEXEC_UID")) { const quint32 pkexec_uid = qgetenv("PKEXEC_UID").toUInt(); DApplication::customQtThemeConfigPathByUserHome(getpwuid(pkexec_uid)->pw_dir); } DApplication::loadDXcbPlugin(); DApplication *app = new DApplication(argc, argv); app->setAttribute(Qt::AA_UseHighDpiPixmaps); if (!qApp->setSingleInstance("_deepin_clone_")) { qCritical() << "As well as the process is running"; return -1; } if (!app->loadTranslator()) { dError("Load translator failed"); } app->setApplicationDisplayName(QObject::tr("Deepin Clone")); app->setApplicationDescription(QObject::tr("Deepin Clone is a backup and restore tool in deepin. " "It supports disk or partition clone, backup and restore, and other functions.")); app->setApplicationAcknowledgementPage("https://www.deepin.org/acknowledgments/deepin-clone/"); app->setTheme("light"); a = app; } #endif a->setApplicationName("deepin-clone"); #ifdef ENABLE_GUI a->setApplicationVersion(DApplication::buildVersion("1.0.0.1")); #else a->setApplicationVersion("1.0.0.1"); #endif a->setOrganizationName("deepin"); CommandLineParser parser; QFile arguments_file("/lib/live/mount/medium/.tmp/deepin-clone.arguments"); QStringList arguments; bool load_arg_from_file = arguments_file.exists() && !Global::isTUIMode && !a->arguments().contains("--tui"); if (load_arg_from_file) { arguments.append(a->arguments().first()); if (!arguments_file.open(QIODevice::ReadOnly)) { qCritical() << "Open \"/lib/live/mount/medium/.tmp/deepin-clone.arguments\" failed, error:" << arguments_file.errorString(); } else { while (!arguments_file.atEnd()) { const QString &arg = QString::fromUtf8(arguments_file.readLine().trimmed()); if (!arg.isEmpty()) arguments.append(arg); } arguments_file.close(); arguments_file.remove(); } qDebug() << arguments; } else { arguments = a->arguments(); } parser.process(arguments); ConsoleAppender *consoleAppender = new ConsoleAppender; consoleAppender->setFormat(logFormat); RollingFileAppender *rollingFileAppender = new RollingFileAppender(parser.logFile()); rollingFileAppender->setFormat(logFormat); rollingFileAppender->setLogFilesLimit(5); rollingFileAppender->setDatePattern(RollingFileAppender::DailyRollover); logger->registerCategoryAppender("deepin.ghost", consoleAppender); logger->registerCategoryAppender("deepin.ghost", rollingFileAppender); parser.parse(); if (load_arg_from_file) { dCDebug("Load arguments from \"%s\"", qPrintable(arguments_file.fileName())); } dCInfo("Application command line: %s", qPrintable(arguments.join(' '))); if (Global::debugLevel == 0) { QLoggingCategory::setFilterRules("deepin.ghost.debug=false"); } if (Global::isTUIMode) { if (!parser.target().isEmpty()) { CloneJob *job = new CloneJob; QObject::connect(job, &QThread::finished, a, &QCoreApplication::quit); job->start(parser.source(), parser.target()); } } #ifdef ENABLE_GUI else { if (!parser.isSetOverride()) Global::isOverride = true; if (!parser.isSetDebug()) Global::debugLevel = 2; MainWindow *window = new MainWindow; window->setFixedSize(860, 660); window->setStyleSheet(DThemeManager::instance()->getQssForWidget("main", window)); window->setWindowIcon(QIcon::fromTheme("deepin-clone")); window->setWindowFlags(Qt::WindowCloseButtonHint | Qt::WindowMinimizeButtonHint | Qt::WindowSystemMenuHint); window->titlebar()->setIcon(window->windowIcon()); window->titlebar()->setTitle(QString()); #if DTK_VERSION > DTK_VERSION_CHECK(2, 0, 6, 0) window->titlebar()->setBackgroundTransparent(true); #endif window->show(); qApp->setProductIcon(window->windowIcon()); if (!parser.source().isEmpty()) { window->startWithFile(parser.source(), parser.target()); } QObject::connect(a, &QCoreApplication::aboutToQuit, window, &MainWindow::deleteLater); QDesktopServices::setUrlHandler("https", window, "openUrl"); } #endif int exitCode = Global::isTUIMode ? a->exec() : qApp->exec(); QString log_backup_file = parser.logBackupFile(); if (log_backup_file.startsWith("serial://")) { log_backup_file = Helper::parseSerialUrl(log_backup_file); } if (log_backup_file.isEmpty()) { return exitCode; } if (!QFile::copy(parser.logFile(), log_backup_file)) { dCWarning("failed to copy log file to \"%s\"", qPrintable(log_backup_file)); } return exitCode; }
{'added': [(105, ' const QString log_file("/var/log/deepin-clone.log");'), (106, ''), (107, ' RollingFileAppender *rollingFileAppender = new RollingFileAppender(log_file);'), (113, ' logger->registerAppender(consoleAppender);'), (118, ' DApplication::customQtThemeConfigPathByUserHome(getpwuid(pkexec_uid)->pw_dir);')], 'deleted': [(105, ' RollingFileAppender *rollingFileAppender = new RollingFileAppender("/tmp/.deepin-clone.log");'), (110, ' logger->registerAppender(consoleAppender);'), (115, ' const QDir user_home(getpwuid(pkexec_uid)->pw_dir);'), (116, ''), (117, ' QFile pam_file(user_home.absoluteFilePath(".pam_environment"));'), (119, ' if (pam_file.open(QIODevice::ReadOnly)) {'), (120, ' while (!pam_file.atEnd()) {'), (121, ' const QByteArray &line = pam_file.readLine().simplified();'), (122, ''), (123, ' if (line.startsWith("QT_SCALE_FACTOR")) {'), (124, " const QByteArrayList &list = line.split('=');"), (125, ''), (126, ' if (list.count() == 2) {'), (127, ' qputenv("QT_SCALE_FACTOR", list.last());'), (128, ' break;'), (129, ' }'), (130, ' }'), (131, ' }'), (132, ''), (133, ' pam_file.close();'), (134, ' }')]}
5
21
165
1,169
134
1,061
30
https://github.com/linuxdeepin/deepin-clone
CVE-2019-13226
CWE-362
2,512
evalvars.c
C
get_user_var_name
/* vi:set ts=8 sts=4 sw=4 noet: * * VIM - Vi IMproved by Bram Moolenaar * * Do ":help uganda" in Vim to read copying and usage conditions. * Do ":help credits" in Vim to see a list of people who contributed. * See README.txt for an overview of the Vim source code. */ /* * evalvars.c: functions for dealing with variables */ #include "vim.h" #if defined(FEAT_EVAL) || defined(PROTO) static dictitem_T globvars_var; // variable used for g: static dict_T globvardict; // Dictionary with g: variables #define globvarht globvardict.dv_hashtab /* * Old Vim variables such as "v:version" are also available without the "v:". * Also in functions. We need a special hashtable for them. */ static hashtab_T compat_hashtab; /* * Array to hold the value of v: variables. * The value is in a dictitem, so that it can also be used in the v: scope. * The reason to use this table anyway is for very quick access to the * variables with the VV_ defines. */ // values for vv_flags: #define VV_COMPAT 1 // compatible, also used without "v:" #define VV_RO 2 // read-only #define VV_RO_SBX 4 // read-only in the sandbox #define VV_NAME(s, t) s, {{t, 0, {0}}, 0, {0}} typedef struct vimvar vimvar_T; static struct vimvar { char *vv_name; // name of variable, without v: dictitem16_T vv_di; // value and name for key (max 16 chars!) type_T *vv_type; // type or NULL char vv_flags; // VV_COMPAT, VV_RO, VV_RO_SBX } vimvars[VV_LEN] = { // The order here must match the VV_ defines in vim.h! // Initializing a union does not work, leave tv.vval empty to get zero's. {VV_NAME("count", VAR_NUMBER), NULL, VV_COMPAT+VV_RO}, {VV_NAME("count1", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("prevcount", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("errmsg", VAR_STRING), NULL, VV_COMPAT}, {VV_NAME("warningmsg", VAR_STRING), NULL, 0}, {VV_NAME("statusmsg", VAR_STRING), NULL, 0}, {VV_NAME("shell_error", VAR_NUMBER), NULL, VV_COMPAT+VV_RO}, {VV_NAME("this_session", VAR_STRING), NULL, VV_COMPAT}, {VV_NAME("version", VAR_NUMBER), NULL, VV_COMPAT+VV_RO}, {VV_NAME("lnum", VAR_NUMBER), NULL, VV_RO_SBX}, {VV_NAME("termresponse", VAR_STRING), NULL, VV_RO}, {VV_NAME("fname", VAR_STRING), NULL, VV_RO}, {VV_NAME("lang", VAR_STRING), NULL, VV_RO}, {VV_NAME("lc_time", VAR_STRING), NULL, VV_RO}, {VV_NAME("ctype", VAR_STRING), NULL, VV_RO}, {VV_NAME("charconvert_from", VAR_STRING), NULL, VV_RO}, {VV_NAME("charconvert_to", VAR_STRING), NULL, VV_RO}, {VV_NAME("fname_in", VAR_STRING), NULL, VV_RO}, {VV_NAME("fname_out", VAR_STRING), NULL, VV_RO}, {VV_NAME("fname_new", VAR_STRING), NULL, VV_RO}, {VV_NAME("fname_diff", VAR_STRING), NULL, VV_RO}, {VV_NAME("cmdarg", VAR_STRING), NULL, VV_RO}, {VV_NAME("foldstart", VAR_NUMBER), NULL, VV_RO_SBX}, {VV_NAME("foldend", VAR_NUMBER), NULL, VV_RO_SBX}, {VV_NAME("folddashes", VAR_STRING), NULL, VV_RO_SBX}, {VV_NAME("foldlevel", VAR_NUMBER), NULL, VV_RO_SBX}, {VV_NAME("progname", VAR_STRING), NULL, VV_RO}, {VV_NAME("servername", VAR_STRING), NULL, VV_RO}, {VV_NAME("dying", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("exception", VAR_STRING), NULL, VV_RO}, {VV_NAME("throwpoint", VAR_STRING), NULL, VV_RO}, {VV_NAME("register", VAR_STRING), NULL, VV_RO}, {VV_NAME("cmdbang", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("insertmode", VAR_STRING), NULL, VV_RO}, {VV_NAME("val", VAR_UNKNOWN), NULL, VV_RO}, {VV_NAME("key", VAR_UNKNOWN), NULL, VV_RO}, {VV_NAME("profiling", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("fcs_reason", VAR_STRING), NULL, VV_RO}, {VV_NAME("fcs_choice", VAR_STRING), NULL, 0}, {VV_NAME("beval_bufnr", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("beval_winnr", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("beval_winid", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("beval_lnum", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("beval_col", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("beval_text", VAR_STRING), NULL, VV_RO}, {VV_NAME("scrollstart", VAR_STRING), NULL, 0}, {VV_NAME("swapname", VAR_STRING), NULL, VV_RO}, {VV_NAME("swapchoice", VAR_STRING), NULL, 0}, {VV_NAME("swapcommand", VAR_STRING), NULL, VV_RO}, {VV_NAME("char", VAR_STRING), NULL, 0}, {VV_NAME("mouse_win", VAR_NUMBER), NULL, 0}, {VV_NAME("mouse_winid", VAR_NUMBER), NULL, 0}, {VV_NAME("mouse_lnum", VAR_NUMBER), NULL, 0}, {VV_NAME("mouse_col", VAR_NUMBER), NULL, 0}, {VV_NAME("operator", VAR_STRING), NULL, VV_RO}, {VV_NAME("searchforward", VAR_NUMBER), NULL, 0}, {VV_NAME("hlsearch", VAR_NUMBER), NULL, 0}, {VV_NAME("oldfiles", VAR_LIST), &t_list_string, 0}, {VV_NAME("windowid", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("progpath", VAR_STRING), NULL, VV_RO}, {VV_NAME("completed_item", VAR_DICT), &t_dict_string, VV_RO}, {VV_NAME("option_new", VAR_STRING), NULL, VV_RO}, {VV_NAME("option_old", VAR_STRING), NULL, VV_RO}, {VV_NAME("option_oldlocal", VAR_STRING), NULL, VV_RO}, {VV_NAME("option_oldglobal", VAR_STRING), NULL, VV_RO}, {VV_NAME("option_command", VAR_STRING), NULL, VV_RO}, {VV_NAME("option_type", VAR_STRING), NULL, VV_RO}, {VV_NAME("errors", VAR_LIST), &t_list_string, 0}, {VV_NAME("false", VAR_BOOL), NULL, VV_RO}, {VV_NAME("true", VAR_BOOL), NULL, VV_RO}, {VV_NAME("none", VAR_SPECIAL), NULL, VV_RO}, {VV_NAME("null", VAR_SPECIAL), NULL, VV_RO}, {VV_NAME("numbermax", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("numbermin", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("numbersize", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("vim_did_enter", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("testing", VAR_NUMBER), NULL, 0}, {VV_NAME("t_number", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("t_string", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("t_func", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("t_list", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("t_dict", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("t_float", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("t_bool", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("t_none", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("t_job", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("t_channel", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("t_blob", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("termrfgresp", VAR_STRING), NULL, VV_RO}, {VV_NAME("termrbgresp", VAR_STRING), NULL, VV_RO}, {VV_NAME("termu7resp", VAR_STRING), NULL, VV_RO}, {VV_NAME("termstyleresp", VAR_STRING), NULL, VV_RO}, {VV_NAME("termblinkresp", VAR_STRING), NULL, VV_RO}, {VV_NAME("event", VAR_DICT), NULL, VV_RO}, {VV_NAME("versionlong", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("echospace", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("argv", VAR_LIST), &t_list_string, VV_RO}, {VV_NAME("collate", VAR_STRING), NULL, VV_RO}, {VV_NAME("exiting", VAR_SPECIAL), NULL, VV_RO}, {VV_NAME("colornames", VAR_DICT), &t_dict_string, VV_RO}, {VV_NAME("sizeofint", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("sizeoflong", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("sizeofpointer", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("maxcol", VAR_NUMBER), NULL, VV_RO}, }; // shorthand #define vv_tv_type vv_di.di_tv.v_type #define vv_nr vv_di.di_tv.vval.v_number #define vv_float vv_di.di_tv.vval.v_float #define vv_str vv_di.di_tv.vval.v_string #define vv_list vv_di.di_tv.vval.v_list #define vv_dict vv_di.di_tv.vval.v_dict #define vv_blob vv_di.di_tv.vval.v_blob #define vv_tv vv_di.di_tv static dictitem_T vimvars_var; // variable used for v: static dict_T vimvardict; // Dictionary with v: variables #define vimvarht vimvardict.dv_hashtab // for VIM_VERSION_ defines #include "version.h" static void list_glob_vars(int *first); static void list_buf_vars(int *first); static void list_win_vars(int *first); static void list_tab_vars(int *first); static char_u *list_arg_vars(exarg_T *eap, char_u *arg, int *first); static char_u *ex_let_one(char_u *arg, typval_T *tv, int copy, int flags, char_u *endchars, char_u *op, int var_idx); static int do_unlet_var(lval_T *lp, char_u *name_end, exarg_T *eap, int deep, void *cookie); static int do_lock_var(lval_T *lp, char_u *name_end, exarg_T *eap, int deep, void *cookie); static void list_one_var(dictitem_T *v, char *prefix, int *first); static void list_one_var_a(char *prefix, char_u *name, int type, char_u *string, int *first); /* * Initialize global and vim special variables */ void evalvars_init(void) { int i; struct vimvar *p; init_var_dict(&globvardict, &globvars_var, VAR_DEF_SCOPE); init_var_dict(&vimvardict, &vimvars_var, VAR_SCOPE); vimvardict.dv_lock = VAR_FIXED; hash_init(&compat_hashtab); for (i = 0; i < VV_LEN; ++i) { p = &vimvars[i]; if (STRLEN(p->vv_name) > DICTITEM16_KEY_LEN) { iemsg("INTERNAL: name too long, increase size of dictitem16_T"); getout(1); } STRCPY(p->vv_di.di_key, p->vv_name); if (p->vv_flags & VV_RO) p->vv_di.di_flags = DI_FLAGS_RO | DI_FLAGS_FIX; else if (p->vv_flags & VV_RO_SBX) p->vv_di.di_flags = DI_FLAGS_RO_SBX | DI_FLAGS_FIX; else p->vv_di.di_flags = DI_FLAGS_FIX; // add to v: scope dict, unless the value is not always available if (p->vv_tv_type != VAR_UNKNOWN) hash_add(&vimvarht, p->vv_di.di_key); if (p->vv_flags & VV_COMPAT) // add to compat scope dict hash_add(&compat_hashtab, p->vv_di.di_key); } set_vim_var_nr(VV_VERSION, VIM_VERSION_100); set_vim_var_nr(VV_VERSIONLONG, VIM_VERSION_100 * 10000 + highest_patch()); set_vim_var_nr(VV_SEARCHFORWARD, 1L); set_vim_var_nr(VV_HLSEARCH, 1L); set_vim_var_nr(VV_EXITING, VVAL_NULL); set_vim_var_dict(VV_COMPLETED_ITEM, dict_alloc_lock(VAR_FIXED)); set_vim_var_list(VV_ERRORS, list_alloc()); set_vim_var_dict(VV_EVENT, dict_alloc_lock(VAR_FIXED)); set_vim_var_nr(VV_FALSE, VVAL_FALSE); set_vim_var_nr(VV_TRUE, VVAL_TRUE); set_vim_var_nr(VV_NONE, VVAL_NONE); set_vim_var_nr(VV_NULL, VVAL_NULL); set_vim_var_nr(VV_NUMBERMAX, VARNUM_MAX); set_vim_var_nr(VV_NUMBERMIN, VARNUM_MIN); set_vim_var_nr(VV_NUMBERSIZE, sizeof(varnumber_T) * 8); set_vim_var_nr(VV_SIZEOFINT, sizeof(int)); set_vim_var_nr(VV_SIZEOFLONG, sizeof(long)); set_vim_var_nr(VV_SIZEOFPOINTER, sizeof(char *)); set_vim_var_nr(VV_MAXCOL, MAXCOL); set_vim_var_nr(VV_TYPE_NUMBER, VAR_TYPE_NUMBER); set_vim_var_nr(VV_TYPE_STRING, VAR_TYPE_STRING); set_vim_var_nr(VV_TYPE_FUNC, VAR_TYPE_FUNC); set_vim_var_nr(VV_TYPE_LIST, VAR_TYPE_LIST); set_vim_var_nr(VV_TYPE_DICT, VAR_TYPE_DICT); set_vim_var_nr(VV_TYPE_FLOAT, VAR_TYPE_FLOAT); set_vim_var_nr(VV_TYPE_BOOL, VAR_TYPE_BOOL); set_vim_var_nr(VV_TYPE_NONE, VAR_TYPE_NONE); set_vim_var_nr(VV_TYPE_JOB, VAR_TYPE_JOB); set_vim_var_nr(VV_TYPE_CHANNEL, VAR_TYPE_CHANNEL); set_vim_var_nr(VV_TYPE_BLOB, VAR_TYPE_BLOB); set_vim_var_nr(VV_ECHOSPACE, sc_col - 1); set_vim_var_dict(VV_COLORNAMES, dict_alloc()); // Default for v:register is not 0 but '"'. This is adjusted once the // clipboard has been setup by calling reset_reg_var(). set_reg_var(0); } #if defined(EXITFREE) || defined(PROTO) /* * Free all vim variables information on exit */ void evalvars_clear(void) { int i; struct vimvar *p; for (i = 0; i < VV_LEN; ++i) { p = &vimvars[i]; if (p->vv_di.di_tv.v_type == VAR_STRING) VIM_CLEAR(p->vv_str); else if (p->vv_di.di_tv.v_type == VAR_LIST) { list_unref(p->vv_list); p->vv_list = NULL; } } hash_clear(&vimvarht); hash_init(&vimvarht); // garbage_collect() will access it hash_clear(&compat_hashtab); // global variables vars_clear(&globvarht); // Script-local variables. Clear all the variables here. // The scriptvar_T is cleared later in free_scriptnames(), because a // variable in one script might hold a reference to the whole scope of // another script. for (i = 1; i <= script_items.ga_len; ++i) vars_clear(&SCRIPT_VARS(i)); } #endif int garbage_collect_globvars(int copyID) { return set_ref_in_ht(&globvarht, copyID, NULL); } int garbage_collect_vimvars(int copyID) { return set_ref_in_ht(&vimvarht, copyID, NULL); } int garbage_collect_scriptvars(int copyID) { int i; int idx; int abort = FALSE; scriptitem_T *si; for (i = 1; i <= script_items.ga_len; ++i) { abort = abort || set_ref_in_ht(&SCRIPT_VARS(i), copyID, NULL); si = SCRIPT_ITEM(i); for (idx = 0; idx < si->sn_var_vals.ga_len; ++idx) { svar_T *sv = ((svar_T *)si->sn_var_vals.ga_data) + idx; if (sv->sv_name != NULL) abort = abort || set_ref_in_item(sv->sv_tv, copyID, NULL, NULL); } } return abort; } /* * Set an internal variable to a string value. Creates the variable if it does * not already exist. */ void set_internal_string_var(char_u *name, char_u *value) { char_u *val; typval_T *tvp; val = vim_strsave(value); if (val != NULL) { tvp = alloc_string_tv(val); if (tvp != NULL) { set_var(name, tvp, FALSE); free_tv(tvp); } } } int eval_charconvert( char_u *enc_from, char_u *enc_to, char_u *fname_from, char_u *fname_to) { int err = FALSE; sctx_T saved_sctx = current_sctx; sctx_T *ctx; set_vim_var_string(VV_CC_FROM, enc_from, -1); set_vim_var_string(VV_CC_TO, enc_to, -1); set_vim_var_string(VV_FNAME_IN, fname_from, -1); set_vim_var_string(VV_FNAME_OUT, fname_to, -1); ctx = get_option_sctx("charconvert"); if (ctx != NULL) current_sctx = *ctx; if (eval_to_bool(p_ccv, &err, NULL, FALSE)) err = TRUE; set_vim_var_string(VV_CC_FROM, NULL, -1); set_vim_var_string(VV_CC_TO, NULL, -1); set_vim_var_string(VV_FNAME_IN, NULL, -1); set_vim_var_string(VV_FNAME_OUT, NULL, -1); current_sctx = saved_sctx; if (err) return FAIL; return OK; } # if defined(FEAT_POSTSCRIPT) || defined(PROTO) int eval_printexpr(char_u *fname, char_u *args) { int err = FALSE; sctx_T saved_sctx = current_sctx; sctx_T *ctx; set_vim_var_string(VV_FNAME_IN, fname, -1); set_vim_var_string(VV_CMDARG, args, -1); ctx = get_option_sctx("printexpr"); if (ctx != NULL) current_sctx = *ctx; if (eval_to_bool(p_pexpr, &err, NULL, FALSE)) err = TRUE; set_vim_var_string(VV_FNAME_IN, NULL, -1); set_vim_var_string(VV_CMDARG, NULL, -1); current_sctx = saved_sctx; if (err) { mch_remove(fname); return FAIL; } return OK; } # endif # if defined(FEAT_DIFF) || defined(PROTO) void eval_diff( char_u *origfile, char_u *newfile, char_u *outfile) { sctx_T saved_sctx = current_sctx; sctx_T *ctx; typval_T *tv; set_vim_var_string(VV_FNAME_IN, origfile, -1); set_vim_var_string(VV_FNAME_NEW, newfile, -1); set_vim_var_string(VV_FNAME_OUT, outfile, -1); ctx = get_option_sctx("diffexpr"); if (ctx != NULL) current_sctx = *ctx; // errors are ignored tv = eval_expr(p_dex, NULL); free_tv(tv); set_vim_var_string(VV_FNAME_IN, NULL, -1); set_vim_var_string(VV_FNAME_NEW, NULL, -1); set_vim_var_string(VV_FNAME_OUT, NULL, -1); current_sctx = saved_sctx; } void eval_patch( char_u *origfile, char_u *difffile, char_u *outfile) { sctx_T saved_sctx = current_sctx; sctx_T *ctx; typval_T *tv; set_vim_var_string(VV_FNAME_IN, origfile, -1); set_vim_var_string(VV_FNAME_DIFF, difffile, -1); set_vim_var_string(VV_FNAME_OUT, outfile, -1); ctx = get_option_sctx("patchexpr"); if (ctx != NULL) current_sctx = *ctx; // errors are ignored tv = eval_expr(p_pex, NULL); free_tv(tv); set_vim_var_string(VV_FNAME_IN, NULL, -1); set_vim_var_string(VV_FNAME_DIFF, NULL, -1); set_vim_var_string(VV_FNAME_OUT, NULL, -1); current_sctx = saved_sctx; } # endif #if defined(FEAT_SPELL) || defined(PROTO) /* * Evaluate an expression to a list with suggestions. * For the "expr:" part of 'spellsuggest'. * Returns NULL when there is an error. */ list_T * eval_spell_expr(char_u *badword, char_u *expr) { typval_T save_val; typval_T rettv; list_T *list = NULL; char_u *p = skipwhite(expr); sctx_T saved_sctx = current_sctx; sctx_T *ctx; // Set "v:val" to the bad word. prepare_vimvar(VV_VAL, &save_val); set_vim_var_string(VV_VAL, badword, -1); if (p_verbose == 0) ++emsg_off; ctx = get_option_sctx("spellsuggest"); if (ctx != NULL) current_sctx = *ctx; if (eval1(&p, &rettv, &EVALARG_EVALUATE) == OK) { if (rettv.v_type != VAR_LIST) clear_tv(&rettv); else list = rettv.vval.v_list; } if (p_verbose == 0) --emsg_off; clear_tv(get_vim_var_tv(VV_VAL)); restore_vimvar(VV_VAL, &save_val); current_sctx = saved_sctx; return list; } /* * "list" is supposed to contain two items: a word and a number. Return the * word in "pp" and the number as the return value. * Return -1 if anything isn't right. * Used to get the good word and score from the eval_spell_expr() result. */ int get_spellword(list_T *list, char_u **pp) { listitem_T *li; li = list->lv_first; if (li == NULL) return -1; *pp = tv_get_string(&li->li_tv); li = li->li_next; if (li == NULL) return -1; return (int)tv_get_number(&li->li_tv); } #endif /* * Prepare v: variable "idx" to be used. * Save the current typeval in "save_tv" and clear it. * When not used yet add the variable to the v: hashtable. */ void prepare_vimvar(int idx, typval_T *save_tv) { *save_tv = vimvars[idx].vv_tv; vimvars[idx].vv_str = NULL; // don't free it now if (vimvars[idx].vv_tv_type == VAR_UNKNOWN) hash_add(&vimvarht, vimvars[idx].vv_di.di_key); } /* * Restore v: variable "idx" to typeval "save_tv". * Note that the v: variable must have been cleared already. * When no longer defined, remove the variable from the v: hashtable. */ void restore_vimvar(int idx, typval_T *save_tv) { hashitem_T *hi; vimvars[idx].vv_tv = *save_tv; if (vimvars[idx].vv_tv_type == VAR_UNKNOWN) { hi = hash_find(&vimvarht, vimvars[idx].vv_di.di_key); if (HASHITEM_EMPTY(hi)) internal_error("restore_vimvar()"); else hash_remove(&vimvarht, hi); } } /* * List Vim variables. */ static void list_vim_vars(int *first) { list_hashtable_vars(&vimvarht, "v:", FALSE, first); } /* * List script-local variables, if there is a script. */ static void list_script_vars(int *first) { if (SCRIPT_ID_VALID(current_sctx.sc_sid)) list_hashtable_vars(&SCRIPT_VARS(current_sctx.sc_sid), "s:", FALSE, first); } /* * Get a list of lines from a HERE document. The here document is a list of * lines surrounded by a marker. * cmd << {marker} * {line1} * {line2} * .... * {marker} * * The {marker} is a string. If the optional 'trim' word is supplied before the * marker, then the leading indentation before the lines (matching the * indentation in the 'cmd' line) is stripped. * * When getting lines for an embedded script (e.g. python, lua, perl, ruby, * tcl, mzscheme), script_get is set to TRUE. In this case, if the marker is * missing, then '.' is accepted as a marker. * * Returns a List with {lines} or NULL. */ list_T * heredoc_get(exarg_T *eap, char_u *cmd, int script_get) { char_u *theline; char_u *marker; list_T *l; char_u *p; int marker_indent_len = 0; int text_indent_len = 0; char_u *text_indent = NULL; char_u dot[] = "."; int comment_char = in_vim9script() ? '#' : '"'; if (eap->getline == NULL) { emsg(_(e_cannot_use_heredoc_here)); return NULL; } // Check for the optional 'trim' word before the marker cmd = skipwhite(cmd); if (STRNCMP(cmd, "trim", 4) == 0 && (cmd[4] == NUL || VIM_ISWHITE(cmd[4]))) { cmd = skipwhite(cmd + 4); // Trim the indentation from all the lines in the here document. // The amount of indentation trimmed is the same as the indentation of // the first line after the :let command line. To find the end marker // the indent of the :let command line is trimmed. p = *eap->cmdlinep; while (VIM_ISWHITE(*p)) { p++; marker_indent_len++; } text_indent_len = -1; } // The marker is the next word. if (*cmd != NUL && *cmd != comment_char) { marker = skipwhite(cmd); p = skiptowhite(marker); if (*skipwhite(p) != NUL && *skipwhite(p) != comment_char) { semsg(_(e_trailing_characters_str), p); return NULL; } *p = NUL; if (!script_get && vim_islower(*marker)) { emsg(_(e_marker_cannot_start_with_lower_case_letter)); return NULL; } } else { // When getting lines for an embedded script, if the marker is missing, // accept '.' as the marker. if (script_get) marker = dot; else { emsg(_(e_missing_marker)); return NULL; } } l = list_alloc(); if (l == NULL) return NULL; for (;;) { int mi = 0; int ti = 0; theline = eap->getline(NUL, eap->cookie, 0, FALSE); if (theline == NULL) { semsg(_(e_missing_end_marker_str), marker); break; } // with "trim": skip the indent matching the :let line to find the // marker if (marker_indent_len > 0 && STRNCMP(theline, *eap->cmdlinep, marker_indent_len) == 0) mi = marker_indent_len; if (STRCMP(marker, theline + mi) == 0) { vim_free(theline); break; } if (text_indent_len == -1 && *theline != NUL) { // set the text indent from the first line. p = theline; text_indent_len = 0; while (VIM_ISWHITE(*p)) { p++; text_indent_len++; } text_indent = vim_strnsave(theline, text_indent_len); } // with "trim": skip the indent matching the first line if (text_indent != NULL) for (ti = 0; ti < text_indent_len; ++ti) if (theline[ti] != text_indent[ti]) break; if (list_append_string(l, theline + ti, -1) == FAIL) break; vim_free(theline); } vim_free(text_indent); return l; } /* * Vim9 variable declaration: * ":var name" * ":var name: type" * ":var name = expr" * ":var name: type = expr" * etc. */ void ex_var(exarg_T *eap) { if (!in_vim9script()) { semsg(_(e_str_cannot_be_used_in_legacy_vim_script), ":var"); return; } ex_let(eap); } /* * ":let" list all variable values * ":let var1 var2" list variable values * ":let var = expr" assignment command. * ":let var += expr" assignment command. * ":let var -= expr" assignment command. * ":let var *= expr" assignment command. * ":let var /= expr" assignment command. * ":let var %= expr" assignment command. * ":let var .= expr" assignment command. * ":let var ..= expr" assignment command. * ":let [var1, var2] = expr" unpack list. * ":let var =<< ..." heredoc * ":let var: string" Vim9 declaration * * ":final var = expr" assignment command. * ":final [var1, var2] = expr" unpack list. * * ":const" list all variable values * ":const var1 var2" list variable values * ":const var = expr" assignment command. * ":const [var1, var2] = expr" unpack list. */ void ex_let(exarg_T *eap) { char_u *arg = eap->arg; char_u *expr = NULL; typval_T rettv; int i; int var_count = 0; int semicolon = 0; char_u op[4]; char_u *argend; int first = TRUE; int concat; int has_assign; int flags = 0; int vim9script = in_vim9script(); if (eap->cmdidx == CMD_final && !vim9script) { // In legacy Vim script ":final" is short for ":finally". ex_finally(eap); return; } if (eap->cmdidx == CMD_let && vim9script) { emsg(_(e_cannot_use_let_in_vim9_script)); return; } if (eap->cmdidx == CMD_const) flags |= ASSIGN_CONST; else if (eap->cmdidx == CMD_final) flags |= ASSIGN_FINAL; // Vim9 assignment without ":let", ":const" or ":final" if (eap->arg == eap->cmd) flags |= ASSIGN_NO_DECL; argend = skip_var_list(arg, TRUE, &var_count, &semicolon, FALSE); if (argend == NULL) return; if (argend > arg && argend[-1] == '.') // for var.='str' --argend; expr = skipwhite(argend); concat = expr[0] == '.' && ((expr[1] == '=' && in_old_script(2)) || (expr[1] == '.' && expr[2] == '=')); has_assign = *expr == '=' || (vim_strchr((char_u *)"+-*/%", *expr) != NULL && expr[1] == '='); if (!has_assign && !concat) { // ":let" without "=": list variables if (*arg == '[') emsg(_(e_invalid_argument)); else if (expr[0] == '.' && expr[1] == '=') emsg(_(e_dot_equal_not_supported_with_script_version_two)); else if (!ends_excmd2(eap->cmd, arg)) { if (vim9script) { if (!ends_excmd2(eap->cmd, skipwhite(argend))) semsg(_(e_trailing_characters_str), argend); else // Vim9 declaration ":var name: type" arg = vim9_declare_scriptvar(eap, arg); } else { // ":let var1 var2" - list values arg = list_arg_vars(eap, arg, &first); } } else if (!eap->skip) { // ":let" list_glob_vars(&first); list_buf_vars(&first); list_win_vars(&first); list_tab_vars(&first); list_script_vars(&first); list_func_vars(&first); list_vim_vars(&first); } set_nextcmd(eap, arg); } else if (expr[0] == '=' && expr[1] == '<' && expr[2] == '<') { list_T *l; long cur_lnum = SOURCING_LNUM; // HERE document l = heredoc_get(eap, expr + 3, FALSE); if (l != NULL) { rettv_list_set(&rettv, l); if (!eap->skip) { // errors are for the assignment, not the end marker SOURCING_LNUM = cur_lnum; op[0] = '='; op[1] = NUL; (void)ex_let_vars(eap->arg, &rettv, FALSE, semicolon, var_count, flags, op); } clear_tv(&rettv); } } else { evalarg_T evalarg; int len = 1; CLEAR_FIELD(rettv); i = FAIL; if (has_assign || concat) { int cur_lnum; op[0] = '='; op[1] = NUL; if (*expr != '=') { if (vim9script && (flags & ASSIGN_NO_DECL) == 0) { // +=, /=, etc. require an existing variable semsg(_(e_cannot_use_operator_on_new_variable), eap->arg); i = FAIL; } else if (vim_strchr((char_u *)"+-*/%.", *expr) != NULL) { op[0] = *expr; // +=, -=, *=, /=, %= or .= ++len; if (expr[0] == '.' && expr[1] == '.') // ..= { ++expr; ++len; } } expr += 2; } else ++expr; if (vim9script && !eap->skip && (!VIM_ISWHITE(*argend) || !IS_WHITE_OR_NUL(*expr))) { vim_strncpy(op, expr - len, len); semsg(_(e_white_space_required_before_and_after_str_at_str), op, argend); i = FAIL; } if (eap->skip) ++emsg_skip; fill_evalarg_from_eap(&evalarg, eap, eap->skip); expr = skipwhite_and_linebreak(expr, &evalarg); cur_lnum = SOURCING_LNUM; i = eval0(expr, &rettv, eap, &evalarg); if (eap->skip) --emsg_skip; clear_evalarg(&evalarg, eap); // Restore the line number so that any type error is given for the // declaration, not the expression. SOURCING_LNUM = cur_lnum; } if (eap->skip) { if (i != FAIL) clear_tv(&rettv); } else if (i != FAIL) { (void)ex_let_vars(eap->arg, &rettv, FALSE, semicolon, var_count, flags, op); clear_tv(&rettv); } } } /* * Assign the typeval "tv" to the variable or variables at "arg_start". * Handles both "var" with any type and "[var, var; var]" with a list type. * When "op" is not NULL it points to a string with characters that * must appear after the variable(s). Use "+", "-" or "." for add, subtract * or concatenate. * Returns OK or FAIL; */ int ex_let_vars( char_u *arg_start, typval_T *tv, int copy, // copy values from "tv", don't move int semicolon, // from skip_var_list() int var_count, // from skip_var_list() int flags, // ASSIGN_FINAL, ASSIGN_CONST, etc. char_u *op) { char_u *arg = arg_start; list_T *l; int i; int var_idx = 0; listitem_T *item; typval_T ltv; if (*arg != '[') { // ":let var = expr" or ":for var in list" if (ex_let_one(arg, tv, copy, flags, op, op, var_idx) == NULL) return FAIL; return OK; } // ":let [v1, v2] = list" or ":for [v1, v2] in listlist" if (tv->v_type != VAR_LIST || (l = tv->vval.v_list) == NULL) { emsg(_(e_list_required)); return FAIL; } i = list_len(l); if (semicolon == 0 && var_count < i) { emsg(_(e_less_targets_than_list_items)); return FAIL; } if (var_count - semicolon > i) { emsg(_(e_more_targets_than_list_items)); return FAIL; } CHECK_LIST_MATERIALIZE(l); item = l->lv_first; while (*arg != ']') { arg = skipwhite(arg + 1); ++var_idx; arg = ex_let_one(arg, &item->li_tv, TRUE, flags | ASSIGN_UNPACK, (char_u *)",;]", op, var_idx); item = item->li_next; if (arg == NULL) return FAIL; arg = skipwhite(arg); if (*arg == ';') { // Put the rest of the list (may be empty) in the var after ';'. // Create a new list for this. l = list_alloc(); if (l == NULL) return FAIL; while (item != NULL) { list_append_tv(l, &item->li_tv); item = item->li_next; } ltv.v_type = VAR_LIST; ltv.v_lock = 0; ltv.vval.v_list = l; l->lv_refcount = 1; ++var_idx; arg = ex_let_one(skipwhite(arg + 1), &ltv, FALSE, flags | ASSIGN_UNPACK, (char_u *)"]", op, var_idx); clear_tv(&ltv); if (arg == NULL) return FAIL; break; } else if (*arg != ',' && *arg != ']') { internal_error("ex_let_vars()"); return FAIL; } } return OK; } /* * Skip over assignable variable "var" or list of variables "[var, var]". * Used for ":let varvar = expr" and ":for varvar in expr". * For "[var, var]" increment "*var_count" for each variable. * for "[var, var; var]" set "semicolon" to 1. * If "silent" is TRUE do not give an "invalid argument" error message. * Return NULL for an error. */ char_u * skip_var_list( char_u *arg, int include_type, int *var_count, int *semicolon, int silent) { char_u *p, *s; if (*arg == '[') { // "[var, var]": find the matching ']'. p = arg; for (;;) { p = skipwhite(p + 1); // skip whites after '[', ';' or ',' s = skip_var_one(p, include_type); if (s == p) { if (!silent) semsg(_(e_invalid_argument_str), p); return NULL; } ++*var_count; p = skipwhite(s); if (*p == ']') break; else if (*p == ';') { if (*semicolon == 1) { if (!silent) emsg(_(e_double_semicolon_in_list_of_variables)); return NULL; } *semicolon = 1; } else if (*p != ',') { if (!silent) semsg(_(e_invalid_argument_str), p); return NULL; } } return p + 1; } else return skip_var_one(arg, include_type); } /* * Skip one (assignable) variable name, including @r, $VAR, &option, d.key, * l[idx]. * In Vim9 script also skip over ": type" if "include_type" is TRUE. */ char_u * skip_var_one(char_u *arg, int include_type) { char_u *end; int vim9 = in_vim9script(); if (*arg == '@' && arg[1] != NUL) return arg + 2; end = find_name_end(*arg == '$' || *arg == '&' ? arg + 1 : arg, NULL, NULL, FNE_INCL_BR | FNE_CHECK_START); // "a: type" is declaring variable "a" with a type, not "a:". // Same for "s: type". if (vim9 && end == arg + 2 && end[-1] == ':') --end; if (include_type && vim9) { if (*end == ':') end = skip_type(skipwhite(end + 1), FALSE); } return end; } /* * List variables for hashtab "ht" with prefix "prefix". * If "empty" is TRUE also list NULL strings as empty strings. */ void list_hashtable_vars( hashtab_T *ht, char *prefix, int empty, int *first) { hashitem_T *hi; dictitem_T *di; int todo; char_u buf[IOSIZE]; todo = (int)ht->ht_used; for (hi = ht->ht_array; todo > 0 && !got_int; ++hi) { if (!HASHITEM_EMPTY(hi)) { --todo; di = HI2DI(hi); // apply :filter /pat/ to variable name vim_strncpy((char_u *)buf, (char_u *)prefix, IOSIZE - 1); vim_strcat((char_u *)buf, di->di_key, IOSIZE); if (message_filtered(buf)) continue; if (empty || di->di_tv.v_type != VAR_STRING || di->di_tv.vval.v_string != NULL) list_one_var(di, prefix, first); } } } /* * List global variables. */ static void list_glob_vars(int *first) { list_hashtable_vars(&globvarht, "", TRUE, first); } /* * List buffer variables. */ static void list_buf_vars(int *first) { list_hashtable_vars(&curbuf->b_vars->dv_hashtab, "b:", TRUE, first); } /* * List window variables. */ static void list_win_vars(int *first) { list_hashtable_vars(&curwin->w_vars->dv_hashtab, "w:", TRUE, first); } /* * List tab page variables. */ static void list_tab_vars(int *first) { list_hashtable_vars(&curtab->tp_vars->dv_hashtab, "t:", TRUE, first); } /* * List variables in "arg". */ static char_u * list_arg_vars(exarg_T *eap, char_u *arg, int *first) { int error = FALSE; int len; char_u *name; char_u *name_start; char_u *arg_subsc; char_u *tofree; typval_T tv; while (!ends_excmd2(eap->cmd, arg) && !got_int) { if (error || eap->skip) { arg = find_name_end(arg, NULL, NULL, FNE_INCL_BR | FNE_CHECK_START); if (!VIM_ISWHITE(*arg) && !ends_excmd(*arg)) { emsg_severe = TRUE; if (!did_emsg) semsg(_(e_trailing_characters_str), arg); break; } } else { // get_name_len() takes care of expanding curly braces name_start = name = arg; len = get_name_len(&arg, &tofree, TRUE, TRUE); if (len <= 0) { // This is mainly to keep test 49 working: when expanding // curly braces fails overrule the exception error message. if (len < 0 && !aborting()) { emsg_severe = TRUE; semsg(_(e_invalid_argument_str), arg); break; } error = TRUE; } else { arg = skipwhite(arg); if (tofree != NULL) name = tofree; if (eval_variable(name, len, 0, &tv, NULL, EVAL_VAR_VERBOSE) == FAIL) error = TRUE; else { // handle d.key, l[idx], f(expr) arg_subsc = arg; if (handle_subscript(&arg, name_start, &tv, &EVALARG_EVALUATE, TRUE) == FAIL) error = TRUE; else { if (arg == arg_subsc && len == 2 && name[1] == ':') { switch (*name) { case 'g': list_glob_vars(first); break; case 'b': list_buf_vars(first); break; case 'w': list_win_vars(first); break; case 't': list_tab_vars(first); break; case 'v': list_vim_vars(first); break; case 's': list_script_vars(first); break; case 'l': list_func_vars(first); break; default: semsg(_(e_cant_list_variables_for_str), name); } } else { char_u numbuf[NUMBUFLEN]; char_u *tf; int c; char_u *s; s = echo_string(&tv, &tf, numbuf, 0); c = *arg; *arg = NUL; list_one_var_a("", arg == arg_subsc ? name : name_start, tv.v_type, s == NULL ? (char_u *)"" : s, first); *arg = c; vim_free(tf); } clear_tv(&tv); } } } vim_free(tofree); } arg = skipwhite(arg); } return arg; } /* * Set an environment variable, part of ex_let_one(). */ static char_u * ex_let_env( char_u *arg, typval_T *tv, int flags, char_u *endchars, char_u *op) { char_u *arg_end = NULL; char_u *name; int len; if ((flags & (ASSIGN_CONST | ASSIGN_FINAL)) && (flags & ASSIGN_FOR_LOOP) == 0) { emsg(_(e_cannot_lock_environment_variable)); return NULL; } // Find the end of the name. ++arg; name = arg; len = get_env_len(&arg); if (len == 0) semsg(_(e_invalid_argument_str), name - 1); else { if (op != NULL && vim_strchr((char_u *)"+-*/%", *op) != NULL) semsg(_(e_wrong_variable_type_for_str_equal), op); else if (endchars != NULL && vim_strchr(endchars, *skipwhite(arg)) == NULL) emsg(_(e_unexpected_characters_in_let)); else if (!check_secure()) { char_u *tofree = NULL; int c1 = name[len]; char_u *p; name[len] = NUL; p = tv_get_string_chk(tv); if (p != NULL && op != NULL && *op == '.') { int mustfree = FALSE; char_u *s = vim_getenv(name, &mustfree); if (s != NULL) { p = tofree = concat_str(s, p); if (mustfree) vim_free(s); } } if (p != NULL) { vim_setenv_ext(name, p); arg_end = arg; } name[len] = c1; vim_free(tofree); } } return arg_end; } /* * Set an option, part of ex_let_one(). */ static char_u * ex_let_option( char_u *arg, typval_T *tv, int flags, char_u *endchars, char_u *op) { char_u *p; int scope; char_u *arg_end = NULL; if ((flags & (ASSIGN_CONST | ASSIGN_FINAL)) && (flags & ASSIGN_FOR_LOOP) == 0) { emsg(_(e_cannot_lock_option)); return NULL; } // Find the end of the name. p = find_option_end(&arg, &scope); if (p == NULL || (endchars != NULL && vim_strchr(endchars, *skipwhite(p)) == NULL)) emsg(_(e_unexpected_characters_in_let)); else { int c1; long n = 0; getoption_T opt_type; long numval; char_u *stringval = NULL; char_u *s = NULL; int failed = FALSE; int opt_p_flags; char_u *tofree = NULL; char_u numbuf[NUMBUFLEN]; c1 = *p; *p = NUL; opt_type = get_option_value(arg, &numval, &stringval, &opt_p_flags, scope); if ((opt_type == gov_bool || opt_type == gov_number || opt_type == gov_hidden_bool || opt_type == gov_hidden_number) && (tv->v_type != VAR_STRING || !in_vim9script())) { if (opt_type == gov_bool || opt_type == gov_hidden_bool) // bool, possibly hidden n = (long)tv_get_bool(tv); else // number, possibly hidden n = (long)tv_get_number(tv); } if ((opt_p_flags & P_FUNC) && (tv->v_type == VAR_PARTIAL || tv->v_type == VAR_FUNC)) { // If the option can be set to a function reference or a lambda // and the passed value is a function reference, then convert it to // the name (string) of the function reference. s = tv2string(tv, &tofree, numbuf, 0); } // Avoid setting a string option to the text "v:false" or similar. // In Vim9 script also don't convert a number to string. else if (tv->v_type != VAR_BOOL && tv->v_type != VAR_SPECIAL && (!in_vim9script() || tv->v_type != VAR_NUMBER)) s = tv_get_string_chk(tv); if (op != NULL && *op != '=') { if (((opt_type == gov_bool || opt_type == gov_number) && *op == '.') || (opt_type == gov_string && *op != '.')) { semsg(_(e_wrong_variable_type_for_str_equal), op); failed = TRUE; // don't set the value } else { // number, in legacy script also bool if (opt_type == gov_number || (opt_type == gov_bool && !in_vim9script())) { switch (*op) { case '+': n = numval + n; break; case '-': n = numval - n; break; case '*': n = numval * n; break; case '/': n = (long)num_divide(numval, n, &failed); break; case '%': n = (long)num_modulus(numval, n, &failed); break; } s = NULL; } else if (opt_type == gov_string && stringval != NULL && s != NULL) { // string s = concat_str(stringval, s); vim_free(stringval); stringval = s; } } } if (!failed) { if (opt_type != gov_string || s != NULL) { set_option_value(arg, n, s, scope); arg_end = p; } else emsg(_(e_string_required)); } *p = c1; vim_free(stringval); vim_free(tofree); } return arg_end; } /* * Set a register, part of ex_let_one(). */ static char_u * ex_let_register( char_u *arg, typval_T *tv, int flags, char_u *endchars, char_u *op) { char_u *arg_end = NULL; if ((flags & (ASSIGN_CONST | ASSIGN_FINAL)) && (flags & ASSIGN_FOR_LOOP) == 0) { emsg(_(e_cannot_lock_register)); return NULL; } ++arg; if (op != NULL && vim_strchr((char_u *)"+-*/%", *op) != NULL) semsg(_(e_wrong_variable_type_for_str_equal), op); else if (endchars != NULL && vim_strchr(endchars, *skipwhite(arg + 1)) == NULL) emsg(_(e_unexpected_characters_in_let)); else { char_u *ptofree = NULL; char_u *p; p = tv_get_string_chk(tv); if (p != NULL && op != NULL && *op == '.') { char_u *s = get_reg_contents(*arg == '@' ? '"' : *arg, GREG_EXPR_SRC); if (s != NULL) { p = ptofree = concat_str(s, p); vim_free(s); } } if (p != NULL) { write_reg_contents(*arg == '@' ? '"' : *arg, p, -1, FALSE); arg_end = arg + 1; } vim_free(ptofree); } return arg_end; } /* * Set one item of ":let var = expr" or ":let [v1, v2] = list" to its value. * Returns a pointer to the char just after the var name. * Returns NULL if there is an error. */ static char_u * ex_let_one( char_u *arg, // points to variable name typval_T *tv, // value to assign to variable int copy, // copy value from "tv" int flags, // ASSIGN_CONST, ASSIGN_FINAL, etc. char_u *endchars, // valid chars after variable name or NULL char_u *op, // "+", "-", "." or NULL int var_idx) // variable index for "let [a, b] = list" { char_u *arg_end = NULL; if (in_vim9script() && (flags & (ASSIGN_NO_DECL | ASSIGN_DECL)) == 0 && (flags & (ASSIGN_CONST | ASSIGN_FINAL)) == 0 && vim_strchr((char_u *)"$@&", *arg) != NULL) { vim9_declare_error(arg); return NULL; } if (*arg == '$') { // ":let $VAR = expr": Set environment variable. return ex_let_env(arg, tv, flags, endchars, op); } else if (*arg == '&') { // ":let &option = expr": Set option value. // ":let &l:option = expr": Set local option value. // ":let &g:option = expr": Set global option value. // ":for &ts in range(8)": Set option value for for loop return ex_let_option(arg, tv, flags, endchars, op); } else if (*arg == '@') { // ":let @r = expr": Set register contents. return ex_let_register(arg, tv, flags, endchars, op); } else if (eval_isnamec1(*arg) || *arg == '{') { lval_T lv; char_u *p; // ":let var = expr": Set internal variable. // ":let var: type = expr": Set internal variable with type. // ":let {expr} = expr": Idem, name made with curly braces p = get_lval(arg, tv, &lv, FALSE, FALSE, (flags & (ASSIGN_NO_DECL | ASSIGN_DECL)) ? GLV_NO_DECL : 0, FNE_CHECK_START); if (p != NULL && lv.ll_name != NULL) { if (endchars != NULL && vim_strchr(endchars, *skipwhite(lv.ll_name_end)) == NULL) { emsg(_(e_unexpected_characters_in_let)); } else { set_var_lval(&lv, p, tv, copy, flags, op, var_idx); arg_end = lv.ll_name_end; } } clear_lval(&lv); } else semsg(_(e_invalid_argument_str), arg); return arg_end; } /* * ":unlet[!] var1 ... " command. */ void ex_unlet(exarg_T *eap) { ex_unletlock(eap, eap->arg, 0, 0, do_unlet_var, NULL); } /* * ":lockvar" and ":unlockvar" commands */ void ex_lockvar(exarg_T *eap) { char_u *arg = eap->arg; int deep = 2; if (eap->forceit) deep = -1; else if (vim_isdigit(*arg)) { deep = getdigits(&arg); arg = skipwhite(arg); } ex_unletlock(eap, arg, deep, 0, do_lock_var, NULL); } /* * ":unlet", ":lockvar" and ":unlockvar" are quite similar. * Also used for Vim9 script. "callback" is invoked as: * callback(&lv, name_end, eap, deep, cookie) */ void ex_unletlock( exarg_T *eap, char_u *argstart, int deep, int glv_flags, int (*callback)(lval_T *, char_u *, exarg_T *, int, void *), void *cookie) { char_u *arg = argstart; char_u *name_end; int error = FALSE; lval_T lv; do { if (*arg == '$') { lv.ll_name = arg; lv.ll_tv = NULL; ++arg; if (get_env_len(&arg) == 0) { semsg(_(e_invalid_argument_str), arg - 1); return; } if (!error && !eap->skip && callback(&lv, arg, eap, deep, cookie) == FAIL) error = TRUE; name_end = arg; } else { // Parse the name and find the end. name_end = get_lval(arg, NULL, &lv, TRUE, eap->skip || error, glv_flags | GLV_NO_DECL, FNE_CHECK_START); if (lv.ll_name == NULL) error = TRUE; // error but continue parsing if (name_end == NULL || (!VIM_ISWHITE(*name_end) && !ends_excmd(*name_end))) { if (name_end != NULL) { emsg_severe = TRUE; semsg(_(e_trailing_characters_str), name_end); } if (!(eap->skip || error)) clear_lval(&lv); break; } if (!error && !eap->skip && callback(&lv, name_end, eap, deep, cookie) == FAIL) error = TRUE; if (!eap->skip) clear_lval(&lv); } arg = skipwhite(name_end); } while (!ends_excmd2(name_end, arg)); set_nextcmd(eap, arg); } static int do_unlet_var( lval_T *lp, char_u *name_end, exarg_T *eap, int deep UNUSED, void *cookie UNUSED) { int forceit = eap->forceit; int ret = OK; int cc; if (lp->ll_tv == NULL) { cc = *name_end; *name_end = NUL; // Environment variable, normal name or expanded name. if (*lp->ll_name == '$') vim_unsetenv(lp->ll_name + 1); else if (do_unlet(lp->ll_name, forceit) == FAIL) ret = FAIL; *name_end = cc; } else if ((lp->ll_list != NULL && value_check_lock(lp->ll_list->lv_lock, lp->ll_name, FALSE)) || (lp->ll_dict != NULL && value_check_lock(lp->ll_dict->dv_lock, lp->ll_name, FALSE))) return FAIL; else if (lp->ll_range) { if (list_unlet_range(lp->ll_list, lp->ll_li, lp->ll_name, lp->ll_n1, !lp->ll_empty2, lp->ll_n2) == FAIL) return FAIL; } else { if (lp->ll_list != NULL) // unlet a List item. listitem_remove(lp->ll_list, lp->ll_li); else // unlet a Dictionary item. dictitem_remove(lp->ll_dict, lp->ll_di); } return ret; } /* * Unlet one item or a range of items from a list. * Return OK or FAIL. */ int list_unlet_range( list_T *l, listitem_T *li_first, char_u *name, long n1_arg, int has_n2, long n2) { listitem_T *li = li_first; int n1 = n1_arg; while (li != NULL && (!has_n2 || n2 >= n1)) { if (value_check_lock(li->li_tv.v_lock, name, FALSE)) return FAIL; li = li->li_next; ++n1; } // Delete a range of List items. li = li_first; n1 = n1_arg; while (li != NULL && (!has_n2 || n2 >= n1)) { listitem_T *next = li->li_next; listitem_remove(l, li); li = next; ++n1; } return OK; } /* * "unlet" a variable. Return OK if it existed, FAIL if not. * When "forceit" is TRUE don't complain if the variable doesn't exist. */ int do_unlet(char_u *name, int forceit) { hashtab_T *ht; hashitem_T *hi; char_u *varname; dict_T *d; dictitem_T *di; // can't :unlet a script variable in Vim9 script if (in_vim9script() && check_vim9_unlet(name) == FAIL) return FAIL; ht = find_var_ht(name, &varname); // can't :unlet a script variable in Vim9 script from a function if (ht == get_script_local_ht() && SCRIPT_ID_VALID(current_sctx.sc_sid) && SCRIPT_ITEM(current_sctx.sc_sid)->sn_version == SCRIPT_VERSION_VIM9 && check_vim9_unlet(name) == FAIL) return FAIL; if (ht != NULL && *varname != NUL) { d = get_current_funccal_dict(ht); if (d == NULL) { if (ht == &globvarht) d = &globvardict; else if (ht == &compat_hashtab) d = &vimvardict; else { di = find_var_in_ht(ht, *name, (char_u *)"", FALSE); d = di == NULL ? NULL : di->di_tv.vval.v_dict; } if (d == NULL) { internal_error("do_unlet()"); return FAIL; } } hi = hash_find(ht, varname); if (HASHITEM_EMPTY(hi)) hi = find_hi_in_scoped_ht(name, &ht); if (hi != NULL && !HASHITEM_EMPTY(hi)) { di = HI2DI(hi); if (var_check_fixed(di->di_flags, name, FALSE) || var_check_ro(di->di_flags, name, FALSE) || value_check_lock(d->dv_lock, name, FALSE)) return FAIL; delete_var(ht, hi); return OK; } } if (forceit) return OK; semsg(_(e_no_such_variable_str), name); return FAIL; } /* * Lock or unlock variable indicated by "lp". * "deep" is the levels to go (-1 for unlimited); * "lock" is TRUE for ":lockvar", FALSE for ":unlockvar". */ static int do_lock_var( lval_T *lp, char_u *name_end, exarg_T *eap, int deep, void *cookie UNUSED) { int lock = eap->cmdidx == CMD_lockvar; int ret = OK; int cc; dictitem_T *di; if (lp->ll_tv == NULL) { cc = *name_end; *name_end = NUL; if (*lp->ll_name == '$') { semsg(_(e_cannot_lock_or_unlock_variable_str), lp->ll_name); ret = FAIL; } else { // Normal name or expanded name. di = find_var(lp->ll_name, NULL, TRUE); if (di == NULL) { if (in_vim9script()) semsg(_(e_cannot_find_variable_to_unlock_str), lp->ll_name); ret = FAIL; } else if ((di->di_flags & DI_FLAGS_FIX) && di->di_tv.v_type != VAR_DICT && di->di_tv.v_type != VAR_LIST) { // For historic reasons this error is not given for a list or // dict. E.g., the b: dict could be locked/unlocked. semsg(_(e_cannot_lock_or_unlock_variable_str), lp->ll_name); ret = FAIL; } else { if (lock) di->di_flags |= DI_FLAGS_LOCK; else di->di_flags &= ~DI_FLAGS_LOCK; if (deep != 0) item_lock(&di->di_tv, deep, lock, FALSE); } } *name_end = cc; } else if (deep == 0) { // nothing to do } else if (lp->ll_range) { listitem_T *li = lp->ll_li; // (un)lock a range of List items. while (li != NULL && (lp->ll_empty2 || lp->ll_n2 >= lp->ll_n1)) { item_lock(&li->li_tv, deep, lock, FALSE); li = li->li_next; ++lp->ll_n1; } } else if (lp->ll_list != NULL) // (un)lock a List item. item_lock(&lp->ll_li->li_tv, deep, lock, FALSE); else // (un)lock a Dictionary item. item_lock(&lp->ll_di->di_tv, deep, lock, FALSE); return ret; } /* * Lock or unlock an item. "deep" is nr of levels to go. * When "check_refcount" is TRUE do not lock a list or dict with a reference * count larger than 1. */ void item_lock(typval_T *tv, int deep, int lock, int check_refcount) { static int recurse = 0; list_T *l; listitem_T *li; dict_T *d; blob_T *b; hashitem_T *hi; int todo; if (recurse >= DICT_MAXNEST) { emsg(_(e_variable_nested_too_deep_for_unlock)); return; } if (deep == 0) return; ++recurse; // lock/unlock the item itself if (lock) tv->v_lock |= VAR_LOCKED; else tv->v_lock &= ~VAR_LOCKED; switch (tv->v_type) { case VAR_UNKNOWN: case VAR_ANY: case VAR_VOID: case VAR_NUMBER: case VAR_BOOL: case VAR_STRING: case VAR_FUNC: case VAR_PARTIAL: case VAR_FLOAT: case VAR_SPECIAL: case VAR_JOB: case VAR_CHANNEL: case VAR_INSTR: break; case VAR_BLOB: if ((b = tv->vval.v_blob) != NULL && !(check_refcount && b->bv_refcount > 1)) { if (lock) b->bv_lock |= VAR_LOCKED; else b->bv_lock &= ~VAR_LOCKED; } break; case VAR_LIST: if ((l = tv->vval.v_list) != NULL && !(check_refcount && l->lv_refcount > 1)) { if (lock) l->lv_lock |= VAR_LOCKED; else l->lv_lock &= ~VAR_LOCKED; if (deep < 0 || deep > 1) { if (l->lv_first == &range_list_item) l->lv_lock |= VAR_ITEMS_LOCKED; else { // recursive: lock/unlock the items the List contains CHECK_LIST_MATERIALIZE(l); FOR_ALL_LIST_ITEMS(l, li) item_lock(&li->li_tv, deep - 1, lock, check_refcount); } } } break; case VAR_DICT: if ((d = tv->vval.v_dict) != NULL && !(check_refcount && d->dv_refcount > 1)) { if (lock) d->dv_lock |= VAR_LOCKED; else d->dv_lock &= ~VAR_LOCKED; if (deep < 0 || deep > 1) { // recursive: lock/unlock the items the List contains todo = (int)d->dv_hashtab.ht_used; for (hi = d->dv_hashtab.ht_array; todo > 0; ++hi) { if (!HASHITEM_EMPTY(hi)) { --todo; item_lock(&HI2DI(hi)->di_tv, deep - 1, lock, check_refcount); } } } } } --recurse; } #if (defined(FEAT_MENU) && defined(FEAT_MULTI_LANG)) || defined(PROTO) /* * Delete all "menutrans_" variables. */ void del_menutrans_vars(void) { hashitem_T *hi; int todo; hash_lock(&globvarht); todo = (int)globvarht.ht_used; for (hi = globvarht.ht_array; todo > 0 && !got_int; ++hi) { if (!HASHITEM_EMPTY(hi)) { --todo; if (STRNCMP(HI2DI(hi)->di_key, "menutrans_", 10) == 0) delete_var(&globvarht, hi); } } hash_unlock(&globvarht); } #endif /* * Local string buffer for the next two functions to store a variable name * with its prefix. Allocated in cat_prefix_varname(), freed later in * get_user_var_name(). */ static char_u *varnamebuf = NULL; static int varnamebuflen = 0; /* * Function to concatenate a prefix and a variable name. */ char_u * cat_prefix_varname(int prefix, char_u *name) { int len; len = (int)STRLEN(name) + 3; if (len > varnamebuflen) { vim_free(varnamebuf); len += 10; // some additional space varnamebuf = alloc(len); if (varnamebuf == NULL) { varnamebuflen = 0; return NULL; } varnamebuflen = len; } *varnamebuf = prefix; varnamebuf[1] = ':'; STRCPY(varnamebuf + 2, name); return varnamebuf; } /* * Function given to ExpandGeneric() to obtain the list of user defined * (global/buffer/window/built-in) variable names. */ char_u * get_user_var_name(expand_T *xp, int idx) { static long_u gdone; static long_u bdone; static long_u wdone; static long_u tdone; static int vidx; static hashitem_T *hi; hashtab_T *ht; if (idx == 0) { gdone = bdone = wdone = vidx = 0; tdone = 0; } // Global variables if (gdone < globvarht.ht_used) { if (gdone++ == 0) hi = globvarht.ht_array; else ++hi; while (HASHITEM_EMPTY(hi)) ++hi; if (STRNCMP("g:", xp->xp_pattern, 2) == 0) return cat_prefix_varname('g', hi->hi_key); return hi->hi_key; } // b: variables ht = #ifdef FEAT_CMDWIN // In cmdwin, the alternative buffer should be used. is_in_cmdwin() ? &prevwin->w_buffer->b_vars->dv_hashtab : #endif &curbuf->b_vars->dv_hashtab; if (bdone < ht->ht_used) { if (bdone++ == 0) hi = ht->ht_array; else ++hi; while (HASHITEM_EMPTY(hi)) ++hi; return cat_prefix_varname('b', hi->hi_key); } // w: variables ht = #ifdef FEAT_CMDWIN // In cmdwin, the alternative window should be used. is_in_cmdwin() ? &prevwin->w_vars->dv_hashtab : #endif &curwin->w_vars->dv_hashtab; if (wdone < ht->ht_used) { if (wdone++ == 0) hi = ht->ht_array; else ++hi; while (HASHITEM_EMPTY(hi)) ++hi; return cat_prefix_varname('w', hi->hi_key); } // t: variables ht = &curtab->tp_vars->dv_hashtab; if (tdone < ht->ht_used) { if (tdone++ == 0) hi = ht->ht_array; else ++hi; while (HASHITEM_EMPTY(hi)) ++hi; return cat_prefix_varname('t', hi->hi_key); } // v: variables if (vidx < VV_LEN) return cat_prefix_varname('v', (char_u *)vimvars[vidx++].vv_name); VIM_CLEAR(varnamebuf); varnamebuflen = 0; return NULL; } char * get_var_special_name(int nr) { switch (nr) { case VVAL_FALSE: return in_vim9script() ? "false" : "v:false"; case VVAL_TRUE: return in_vim9script() ? "true" : "v:true"; case VVAL_NULL: return in_vim9script() ? "null" : "v:null"; case VVAL_NONE: return "v:none"; } internal_error("get_var_special_name()"); return "42"; } /* * Returns the global variable dictionary */ dict_T * get_globvar_dict(void) { return &globvardict; } /* * Returns the global variable hash table */ hashtab_T * get_globvar_ht(void) { return &globvarht; } /* * Returns the v: variable dictionary */ dict_T * get_vimvar_dict(void) { return &vimvardict; } /* * Returns the index of a v:variable. Negative if not found. * Returns DI_ flags in "di_flags". */ int find_vim_var(char_u *name, int *di_flags) { dictitem_T *di = find_var_in_ht(&vimvarht, 0, name, TRUE); struct vimvar *vv; if (di == NULL) return -1; *di_flags = di->di_flags; vv = (struct vimvar *)((char *)di - offsetof(vimvar_T, vv_di)); return (int)(vv - vimvars); } /* * Set type of v: variable to "type". */ void set_vim_var_type(int idx, vartype_T type) { vimvars[idx].vv_tv_type = type; } /* * Set number v: variable to "val". * Note that this does not set the type, use set_vim_var_type() for that. */ void set_vim_var_nr(int idx, varnumber_T val) { vimvars[idx].vv_nr = val; } char * get_vim_var_name(int idx) { return vimvars[idx].vv_name; } /* * Get typval_T v: variable value. */ typval_T * get_vim_var_tv(int idx) { return &vimvars[idx].vv_tv; } type_T * get_vim_var_type(int idx, garray_T *type_list) { if (vimvars[idx].vv_type != NULL) return vimvars[idx].vv_type; return typval2type_vimvar(&vimvars[idx].vv_tv, type_list); } /* * Set v: variable to "tv". Only accepts the same type. * Takes over the value of "tv". */ int set_vim_var_tv(int idx, typval_T *tv) { if (vimvars[idx].vv_tv_type != tv->v_type) { emsg(_(e_type_mismatch_for_v_variable)); clear_tv(tv); return FAIL; } // VV_RO is also checked when compiling, but let's check here as well. if (vimvars[idx].vv_flags & VV_RO) { semsg(_(e_cannot_change_readonly_variable_str), vimvars[idx].vv_name); return FAIL; } if (sandbox && (vimvars[idx].vv_flags & VV_RO_SBX)) { semsg(_(e_cannot_set_variable_in_sandbox_str), vimvars[idx].vv_name); return FAIL; } clear_tv(&vimvars[idx].vv_di.di_tv); vimvars[idx].vv_di.di_tv = *tv; return OK; } /* * Get number v: variable value. */ varnumber_T get_vim_var_nr(int idx) { return vimvars[idx].vv_nr; } /* * Get string v: variable value. Uses a static buffer, can only be used once. * If the String variable has never been set, return an empty string. * Never returns NULL; */ char_u * get_vim_var_str(int idx) { return tv_get_string(&vimvars[idx].vv_tv); } /* * Get List v: variable value. Caller must take care of reference count when * needed. */ list_T * get_vim_var_list(int idx) { return vimvars[idx].vv_list; } /* * Get Dict v: variable value. Caller must take care of reference count when * needed. */ dict_T * get_vim_var_dict(int idx) { return vimvars[idx].vv_dict; } /* * Set v:char to character "c". */ void set_vim_var_char(int c) { char_u buf[MB_MAXBYTES + 1]; if (has_mbyte) buf[(*mb_char2bytes)(c, buf)] = NUL; else { buf[0] = c; buf[1] = NUL; } set_vim_var_string(VV_CHAR, buf, -1); } /* * Set v:count to "count" and v:count1 to "count1". * When "set_prevcount" is TRUE first set v:prevcount from v:count. */ void set_vcount( long count, long count1, int set_prevcount) { if (set_prevcount) vimvars[VV_PREVCOUNT].vv_nr = vimvars[VV_COUNT].vv_nr; vimvars[VV_COUNT].vv_nr = count; vimvars[VV_COUNT1].vv_nr = count1; } /* * Save variables that might be changed as a side effect. Used when executing * a timer callback. */ void save_vimvars(vimvars_save_T *vvsave) { vvsave->vv_prevcount = vimvars[VV_PREVCOUNT].vv_nr; vvsave->vv_count = vimvars[VV_COUNT].vv_nr; vvsave->vv_count1 = vimvars[VV_COUNT1].vv_nr; } /* * Restore variables saved by save_vimvars(). */ void restore_vimvars(vimvars_save_T *vvsave) { vimvars[VV_PREVCOUNT].vv_nr = vvsave->vv_prevcount; vimvars[VV_COUNT].vv_nr = vvsave->vv_count; vimvars[VV_COUNT1].vv_nr = vvsave->vv_count1; } /* * Set string v: variable to a copy of "val". If 'copy' is FALSE, then set the * value. */ void set_vim_var_string( int idx, char_u *val, int len) // length of "val" to use or -1 (whole string) { clear_tv(&vimvars[idx].vv_di.di_tv); vimvars[idx].vv_tv_type = VAR_STRING; if (val == NULL) vimvars[idx].vv_str = NULL; else if (len == -1) vimvars[idx].vv_str = vim_strsave(val); else vimvars[idx].vv_str = vim_strnsave(val, len); } /* * Set List v: variable to "val". */ void set_vim_var_list(int idx, list_T *val) { clear_tv(&vimvars[idx].vv_di.di_tv); vimvars[idx].vv_tv_type = VAR_LIST; vimvars[idx].vv_list = val; if (val != NULL) ++val->lv_refcount; } /* * Set Dictionary v: variable to "val". */ void set_vim_var_dict(int idx, dict_T *val) { clear_tv(&vimvars[idx].vv_di.di_tv); vimvars[idx].vv_tv_type = VAR_DICT; vimvars[idx].vv_dict = val; if (val != NULL) { ++val->dv_refcount; dict_set_items_ro(val); } } /* * Set the v:argv list. */ void set_argv_var(char **argv, int argc) { list_T *l = list_alloc(); int i; if (l == NULL) getout(1); l->lv_lock = VAR_FIXED; for (i = 0; i < argc; ++i) { if (list_append_string(l, (char_u *)argv[i], -1) == FAIL) getout(1); l->lv_u.mat.lv_last->li_tv.v_lock = VAR_FIXED; } set_vim_var_list(VV_ARGV, l); } /* * Reset v:register, taking the 'clipboard' setting into account. */ void reset_reg_var(void) { int regname = 0; // Adjust the register according to 'clipboard', so that when // "unnamed" is present it becomes '*' or '+' instead of '"'. #ifdef FEAT_CLIPBOARD adjust_clip_reg(&regname); #endif set_reg_var(regname); } /* * Set v:register if needed. */ void set_reg_var(int c) { char_u regname; if (c == 0 || c == ' ') regname = '"'; else regname = c; // Avoid free/alloc when the value is already right. if (vimvars[VV_REG].vv_str == NULL || vimvars[VV_REG].vv_str[0] != c) set_vim_var_string(VV_REG, &regname, 1); } /* * Get or set v:exception. If "oldval" == NULL, return the current value. * Otherwise, restore the value to "oldval" and return NULL. * Must always be called in pairs to save and restore v:exception! Does not * take care of memory allocations. */ char_u * v_exception(char_u *oldval) { if (oldval == NULL) return vimvars[VV_EXCEPTION].vv_str; vimvars[VV_EXCEPTION].vv_str = oldval; return NULL; } /* * Get or set v:throwpoint. If "oldval" == NULL, return the current value. * Otherwise, restore the value to "oldval" and return NULL. * Must always be called in pairs to save and restore v:throwpoint! Does not * take care of memory allocations. */ char_u * v_throwpoint(char_u *oldval) { if (oldval == NULL) return vimvars[VV_THROWPOINT].vv_str; vimvars[VV_THROWPOINT].vv_str = oldval; return NULL; } /* * Set v:cmdarg. * If "eap" != NULL, use "eap" to generate the value and return the old value. * If "oldarg" != NULL, restore the value to "oldarg" and return NULL. * Must always be called in pairs! */ char_u * set_cmdarg(exarg_T *eap, char_u *oldarg) { char_u *oldval; char_u *newval; unsigned len; oldval = vimvars[VV_CMDARG].vv_str; if (eap == NULL) { vim_free(oldval); vimvars[VV_CMDARG].vv_str = oldarg; return NULL; } if (eap->force_bin == FORCE_BIN) len = 6; else if (eap->force_bin == FORCE_NOBIN) len = 8; else len = 0; if (eap->read_edit) len += 7; if (eap->force_ff != 0) len += 10; // " ++ff=unix" if (eap->force_enc != 0) len += (unsigned)STRLEN(eap->cmd + eap->force_enc) + 7; if (eap->bad_char != 0) len += 7 + 4; // " ++bad=" + "keep" or "drop" newval = alloc(len + 1); if (newval == NULL) return NULL; if (eap->force_bin == FORCE_BIN) sprintf((char *)newval, " ++bin"); else if (eap->force_bin == FORCE_NOBIN) sprintf((char *)newval, " ++nobin"); else *newval = NUL; if (eap->read_edit) STRCAT(newval, " ++edit"); if (eap->force_ff != 0) sprintf((char *)newval + STRLEN(newval), " ++ff=%s", eap->force_ff == 'u' ? "unix" : eap->force_ff == 'd' ? "dos" : "mac"); if (eap->force_enc != 0) sprintf((char *)newval + STRLEN(newval), " ++enc=%s", eap->cmd + eap->force_enc); if (eap->bad_char == BAD_KEEP) STRCPY(newval + STRLEN(newval), " ++bad=keep"); else if (eap->bad_char == BAD_DROP) STRCPY(newval + STRLEN(newval), " ++bad=drop"); else if (eap->bad_char != 0) sprintf((char *)newval + STRLEN(newval), " ++bad=%c", eap->bad_char); vimvars[VV_CMDARG].vv_str = newval; return oldval; } /* * Get the value of internal variable "name". * If "flags" has EVAL_VAR_IMPORT may return a VAR_ANY with v_number set to the * imported script ID. * Return OK or FAIL. If OK is returned "rettv" must be cleared. */ int eval_variable( char_u *name, int len, // length of "name" scid_T sid, // script ID for imported item or zero typval_T *rettv, // NULL when only checking existence dictitem_T **dip, // non-NULL when typval's dict item is needed int flags) // EVAL_VAR_ flags { int ret = OK; typval_T *tv = NULL; int found = FALSE; hashtab_T *ht = NULL; int cc; type_T *type = NULL; // truncate the name, so that we can use strcmp() cc = name[len]; name[len] = NUL; // Check for local variable when debugging. if ((tv = lookup_debug_var(name)) == NULL) { // Check for user-defined variables. dictitem_T *v = find_var(name, &ht, flags & EVAL_VAR_NOAUTOLOAD); if (v != NULL) { tv = &v->di_tv; if (dip != NULL) *dip = v; } else ht = NULL; } if (tv == NULL && (in_vim9script() || STRNCMP(name, "s:", 2) == 0)) { imported_T *import = NULL; char_u *p = STRNCMP(name, "s:", 2) == 0 ? name + 2 : name; if (sid == 0) import = find_imported(p, 0, TRUE); // imported variable from another script if (import != NULL || sid != 0) { if ((flags & EVAL_VAR_IMPORT) == 0) { if (SCRIPT_ID_VALID(sid)) { ht = &SCRIPT_VARS(sid); if (ht != NULL) { dictitem_T *v = find_var_in_ht(ht, 0, name, flags & EVAL_VAR_NOAUTOLOAD); if (v != NULL) { tv = &v->di_tv; if (dip != NULL) *dip = v; } else ht = NULL; } } else { if (flags & EVAL_VAR_VERBOSE) semsg(_(e_expected_dot_after_name_str), name); ret = FAIL; } } else { if (rettv != NULL) { rettv->v_type = VAR_ANY; rettv->vval.v_number = sid != 0 ? sid : import->imp_sid; } found = TRUE; } } else if (in_vim9script() && (flags & EVAL_VAR_NO_FUNC) == 0) { int has_g_prefix = STRNCMP(name, "g:", 2) == 0; ufunc_T *ufunc = find_func(name, FALSE); // In Vim9 script we can get a function reference by using the // function name. For a global non-autoload function "g:" is // required. if (ufunc != NULL && (has_g_prefix || !func_requires_g_prefix(ufunc))) { found = TRUE; if (rettv != NULL) { rettv->v_type = VAR_FUNC; if (has_g_prefix) // Keep the "g:", otherwise script-local may be // assumed. rettv->vval.v_string = vim_strsave(name); else rettv->vval.v_string = vim_strsave(ufunc->uf_name); if (rettv->vval.v_string != NULL) func_ref(ufunc->uf_name); } } } } if (!found) { if (tv == NULL) { if (rettv != NULL && (flags & EVAL_VAR_VERBOSE)) semsg(_(e_undefined_variable_str), name); ret = FAIL; } else if (rettv != NULL) { if (ht != NULL && ht == get_script_local_ht() && tv != &SCRIPT_SV(current_sctx.sc_sid)->sv_var.di_tv) { svar_T *sv = find_typval_in_script(tv, 0); if (sv != NULL) type = sv->sv_type; } // If a list or dict variable wasn't initialized, do it now. if (tv->v_type == VAR_DICT && tv->vval.v_dict == NULL) { tv->vval.v_dict = dict_alloc(); if (tv->vval.v_dict != NULL) { ++tv->vval.v_dict->dv_refcount; tv->vval.v_dict->dv_type = alloc_type(type); } } else if (tv->v_type == VAR_LIST && tv->vval.v_list == NULL) { tv->vval.v_list = list_alloc(); if (tv->vval.v_list != NULL) { ++tv->vval.v_list->lv_refcount; tv->vval.v_list->lv_type = alloc_type(type); } } else if (tv->v_type == VAR_BLOB && tv->vval.v_blob == NULL) { tv->vval.v_blob = blob_alloc(); if (tv->vval.v_blob != NULL) ++tv->vval.v_blob->bv_refcount; } copy_tv(tv, rettv); } } name[len] = cc; return ret; } /* * Check if variable "name[len]" is a local variable or an argument. * If so, "*eval_lavars_used" is set to TRUE. */ void check_vars(char_u *name, int len) { int cc; char_u *varname; hashtab_T *ht; if (eval_lavars_used == NULL) return; // truncate the name, so that we can use strcmp() cc = name[len]; name[len] = NUL; ht = find_var_ht(name, &varname); if (ht == get_funccal_local_ht() || ht == get_funccal_args_ht()) { if (find_var(name, NULL, TRUE) != NULL) *eval_lavars_used = TRUE; } name[len] = cc; } /* * Find variable "name" in the list of variables. * Return a pointer to it if found, NULL if not found. * Careful: "a:0" variables don't have a name. * When "htp" is not NULL set "htp" to the hashtab_T used. */ dictitem_T * find_var(char_u *name, hashtab_T **htp, int no_autoload) { char_u *varname; hashtab_T *ht; dictitem_T *ret = NULL; ht = find_var_ht(name, &varname); if (htp != NULL) *htp = ht; if (ht == NULL) return NULL; ret = find_var_in_ht(ht, *name, varname, no_autoload); if (ret != NULL) return ret; // Search in parent scope for lambda ret = find_var_in_scoped_ht(name, no_autoload); if (ret != NULL) return ret; // in Vim9 script items without a scope can be script-local if (in_vim9script() && name[0] != NUL && name[1] != ':') { ht = get_script_local_ht(); if (ht != NULL) { ret = find_var_in_ht(ht, *name, varname, no_autoload); if (ret != NULL) { if (htp != NULL) *htp = ht; return ret; } } } // When using "vim9script autoload" script-local items are prefixed but can // be used with s:name. if (SCRIPT_ID_VALID(current_sctx.sc_sid) && name[0] == 's' && name[1] == ':') { scriptitem_T *si = SCRIPT_ITEM(current_sctx.sc_sid); if (si->sn_autoload_prefix != NULL) { char_u *auto_name = concat_str(si->sn_autoload_prefix, name + 2); if (auto_name != NULL) { ht = &globvarht; ret = find_var_in_ht(ht, *name, auto_name, TRUE); vim_free(auto_name); if (ret != NULL) { if (htp != NULL) *htp = ht; return ret; } } } } return NULL; } /* * Like find_var() but if the name starts with <SNR>99_ then look in the * referenced script (used for a funcref). */ dictitem_T * find_var_also_in_script(char_u *name, hashtab_T **htp, int no_autoload) { if (STRNCMP(name, "<SNR>", 5) == 0 && isdigit(name[5])) { char_u *p = name + 5; int sid = getdigits(&p); if (SCRIPT_ID_VALID(sid) && *p == '_') { hashtab_T *ht = &SCRIPT_VARS(sid); if (ht != NULL) { dictitem_T *di = find_var_in_ht(ht, 0, p + 1, no_autoload); if (di != NULL) { if (htp != NULL) *htp = ht; return di; } } } } return find_var(name, htp, no_autoload); } /* * Find variable "varname" in hashtab "ht" with name "htname". * When "varname" is empty returns curwin/curtab/etc vars dictionary. * Returns NULL if not found. */ dictitem_T * find_var_in_ht( hashtab_T *ht, int htname, char_u *varname, int no_autoload) { hashitem_T *hi; if (*varname == NUL) { // Must be something like "s:", otherwise "ht" would be NULL. switch (htname) { case 's': return &SCRIPT_SV(current_sctx.sc_sid)->sv_var; case 'g': return &globvars_var; case 'v': return &vimvars_var; case 'b': return &curbuf->b_bufvar; case 'w': return &curwin->w_winvar; case 't': return &curtab->tp_winvar; case 'l': return get_funccal_local_var(); case 'a': return get_funccal_args_var(); } return NULL; } hi = hash_find(ht, varname); if (HASHITEM_EMPTY(hi)) { // For global variables we may try auto-loading the script. If it // worked find the variable again. Don't auto-load a script if it was // loaded already, otherwise it would be loaded every time when // checking if a function name is a Funcref variable. if (ht == &globvarht && !no_autoload) { // Note: script_autoload() may make "hi" invalid. It must either // be obtained again or not used. if (!script_autoload(varname, FALSE) || aborting()) return NULL; hi = hash_find(ht, varname); } if (HASHITEM_EMPTY(hi)) return NULL; } return HI2DI(hi); } /* * Get the script-local hashtab. NULL if not in a script context. */ hashtab_T * get_script_local_ht(void) { scid_T sid = current_sctx.sc_sid; if (SCRIPT_ID_VALID(sid)) return &SCRIPT_VARS(sid); return NULL; } /* * Look for "name[len]" in script-local variables and functions. * When "cmd" is TRUE it must look like a command, a function must be followed * by "(" or "->". * Return OK when found, FAIL when not found. */ int lookup_scriptitem( char_u *name, size_t len, int cmd, cctx_T *dummy UNUSED) { hashtab_T *ht = get_script_local_ht(); char_u buffer[30]; char_u *p; int res; hashitem_T *hi; int is_global = FALSE; char_u *fname = name; if (ht == NULL) return FAIL; if (len < sizeof(buffer) - 1) { // avoid an alloc/free for short names vim_strncpy(buffer, name, len); p = buffer; } else { p = vim_strnsave(name, len); if (p == NULL) return FAIL; } hi = hash_find(ht, p); res = HASHITEM_EMPTY(hi) ? FAIL : OK; // if not script-local, then perhaps imported if (res == FAIL && find_imported(p, 0, FALSE) != NULL) res = OK; if (p != buffer) vim_free(p); // Find a function, so that a following "->" works. // When used as a command require "(" or "->" to follow, "Cmd" is a user // command while "Cmd()" is a function call. if (res != OK) { p = skipwhite(name + len); if (!cmd || name[len] == '(' || (p[0] == '-' && p[1] == '>')) { // Do not check for an internal function, since it might also be a // valid command, such as ":split" versus "split()". // Skip "g:" before a function name. if (name[0] == 'g' && name[1] == ':') { is_global = TRUE; fname = name + 2; } if (find_func(fname, is_global) != NULL) res = OK; } } return res; } /* * Find the hashtab used for a variable name. * Return NULL if the name is not valid. * Set "varname" to the start of name without ':'. */ hashtab_T * find_var_ht(char_u *name, char_u **varname) { hashitem_T *hi; hashtab_T *ht; if (name[0] == NUL) return NULL; if (name[1] != ':') { // The name must not start with a colon or #. if (name[0] == ':' || name[0] == AUTOLOAD_CHAR) return NULL; *varname = name; // "version" is "v:version" in all scopes if scriptversion < 3. // Same for a few other variables marked with VV_COMPAT. if (in_old_script(3)) { hi = hash_find(&compat_hashtab, name); if (!HASHITEM_EMPTY(hi)) return &compat_hashtab; } ht = get_funccal_local_ht(); if (ht != NULL) return ht; // local variable // In Vim9 script items at the script level are script-local, except // for autoload names. if (in_vim9script() && vim_strchr(name, AUTOLOAD_CHAR) == NULL) { ht = get_script_local_ht(); if (ht != NULL) return ht; } return &globvarht; // global variable } *varname = name + 2; if (*name == 'g') // global variable return &globvarht; // There must be no ':' or '#' in the rest of the name, unless g: is used if (vim_strchr(name + 2, ':') != NULL || vim_strchr(name + 2, AUTOLOAD_CHAR) != NULL) return NULL; if (*name == 'b') // buffer variable return &curbuf->b_vars->dv_hashtab; if (*name == 'w') // window variable return &curwin->w_vars->dv_hashtab; if (*name == 't') // tab page variable return &curtab->tp_vars->dv_hashtab; if (*name == 'v') // v: variable return &vimvarht; if (get_current_funccal() != NULL && get_current_funccal()->func->uf_def_status == UF_NOT_COMPILED) { // a: and l: are only used in functions defined with ":function" if (*name == 'a') // a: function argument return get_funccal_args_ht(); if (*name == 'l') // l: local function variable return get_funccal_local_ht(); } if (*name == 's') // script variable { ht = get_script_local_ht(); if (ht != NULL) return ht; } return NULL; } /* * Get the string value of a (global/local) variable. * Note: see tv_get_string() for how long the pointer remains valid. * Returns NULL when it doesn't exist. */ char_u * get_var_value(char_u *name) { dictitem_T *v; v = find_var(name, NULL, FALSE); if (v == NULL) return NULL; return tv_get_string(&v->di_tv); } /* * Allocate a new hashtab for a sourced script. It will be used while * sourcing this script and when executing functions defined in the script. */ void new_script_vars(scid_T id) { scriptvar_T *sv; sv = ALLOC_CLEAR_ONE(scriptvar_T); if (sv == NULL) return; init_var_dict(&sv->sv_dict, &sv->sv_var, VAR_SCOPE); SCRIPT_ITEM(id)->sn_vars = sv; } /* * Initialize dictionary "dict" as a scope and set variable "dict_var" to * point to it. */ void init_var_dict(dict_T *dict, dictitem_T *dict_var, int scope) { hash_init(&dict->dv_hashtab); dict->dv_lock = 0; dict->dv_scope = scope; dict->dv_refcount = DO_NOT_FREE_CNT; dict->dv_copyID = 0; dict_var->di_tv.vval.v_dict = dict; dict_var->di_tv.v_type = VAR_DICT; dict_var->di_tv.v_lock = VAR_FIXED; dict_var->di_flags = DI_FLAGS_RO | DI_FLAGS_FIX; dict_var->di_key[0] = NUL; } /* * Unreference a dictionary initialized by init_var_dict(). */ void unref_var_dict(dict_T *dict) { // Now the dict needs to be freed if no one else is using it, go back to // normal reference counting. dict->dv_refcount -= DO_NOT_FREE_CNT - 1; dict_unref(dict); } /* * Clean up a list of internal variables. * Frees all allocated variables and the value they contain. * Clears hashtab "ht", does not free it. */ void vars_clear(hashtab_T *ht) { vars_clear_ext(ht, TRUE); } /* * Like vars_clear(), but only free the value if "free_val" is TRUE. */ void vars_clear_ext(hashtab_T *ht, int free_val) { int todo; hashitem_T *hi; dictitem_T *v; hash_lock(ht); todo = (int)ht->ht_used; for (hi = ht->ht_array; todo > 0; ++hi) { if (!HASHITEM_EMPTY(hi)) { --todo; // Free the variable. Don't remove it from the hashtab, // ht_array might change then. hash_clear() takes care of it // later. v = HI2DI(hi); if (free_val) clear_tv(&v->di_tv); if (v->di_flags & DI_FLAGS_ALLOC) vim_free(v); } } hash_clear(ht); hash_init(ht); } /* * Delete a variable from hashtab "ht" at item "hi". * Clear the variable value and free the dictitem. */ void delete_var(hashtab_T *ht, hashitem_T *hi) { dictitem_T *di = HI2DI(hi); hash_remove(ht, hi); clear_tv(&di->di_tv); vim_free(di); } /* * List the value of one internal variable. */ static void list_one_var(dictitem_T *v, char *prefix, int *first) { char_u *tofree; char_u *s; char_u numbuf[NUMBUFLEN]; s = echo_string(&v->di_tv, &tofree, numbuf, get_copyID()); list_one_var_a(prefix, v->di_key, v->di_tv.v_type, s == NULL ? (char_u *)"" : s, first); vim_free(tofree); } static void list_one_var_a( char *prefix, char_u *name, int type, char_u *string, int *first) // when TRUE clear rest of screen and set to FALSE { // don't use msg() or msg_attr() to avoid overwriting "v:statusmsg" msg_start(); msg_puts(prefix); if (name != NULL) // "a:" vars don't have a name stored msg_puts((char *)name); msg_putchar(' '); msg_advance(22); if (type == VAR_NUMBER) msg_putchar('#'); else if (type == VAR_FUNC || type == VAR_PARTIAL) msg_putchar('*'); else if (type == VAR_LIST) { msg_putchar('['); if (*string == '[') ++string; } else if (type == VAR_DICT) { msg_putchar('{'); if (*string == '{') ++string; } else msg_putchar(' '); msg_outtrans(string); if (type == VAR_FUNC || type == VAR_PARTIAL) msg_puts("()"); if (*first) { msg_clr_eos(); *first = FALSE; } } /* * Set variable "name" to value in "tv". * If the variable already exists, the value is updated. * Otherwise the variable is created. */ void set_var( char_u *name, typval_T *tv, int copy) // make copy of value in "tv" { set_var_const(name, 0, NULL, tv, copy, ASSIGN_DECL, 0); } /* * Set variable "name" to value in "tv_arg". * When "sid" is non-zero "name" is in the script with this ID. * If the variable already exists and "is_const" is FALSE the value is updated. * Otherwise the variable is created. */ void set_var_const( char_u *name, scid_T sid, type_T *type_arg, typval_T *tv_arg, int copy, // make copy of value in "tv" int flags_arg, // ASSIGN_CONST, ASSIGN_FINAL, etc. int var_idx) // index for ":let [a, b] = list" { typval_T *tv = tv_arg; type_T *type = type_arg; typval_T bool_tv; dictitem_T *di; typval_T *dest_tv = NULL; char_u *varname; char_u *name_tofree = NULL; hashtab_T *ht = NULL; int is_script_local; int vim9script = in_vim9script(); int var_in_vim9script; int var_in_autoload = FALSE; int flags = flags_arg; int free_tv_arg = !copy; // free tv_arg if not used if (sid != 0) { if (SCRIPT_ID_VALID(sid)) ht = &SCRIPT_VARS(sid); varname = name; } else { scriptitem_T *si; if (in_vim9script() && is_export && SCRIPT_ID_VALID(current_sctx.sc_sid) && (si = SCRIPT_ITEM(current_sctx.sc_sid)) ->sn_autoload_prefix != NULL) { // In a vim9 autoload script an exported variable is put in the // global namespace with the autoload prefix. var_in_autoload = TRUE; varname = concat_str(si->sn_autoload_prefix, name); if (varname == NULL) goto failed; name_tofree = varname; ht = &globvarht; } else ht = find_var_ht(name, &varname); } if (ht == NULL || *varname == NUL) { semsg(_(e_illegal_variable_name_str), name); goto failed; } is_script_local = ht == get_script_local_ht() || sid != 0 || var_in_autoload; if (vim9script && !is_script_local && (flags & (ASSIGN_NO_DECL | ASSIGN_DECL)) == 0 && (flags & (ASSIGN_CONST | ASSIGN_FINAL)) == 0 && name[1] == ':') { vim9_declare_error(name); goto failed; } if ((flags & ASSIGN_FOR_LOOP) && name[1] == ':' && vim_strchr((char_u *)"gwbt", name[0]) != NULL) // Do not make g:var, w:var, b:var or t:var final. flags &= ~ASSIGN_FINAL; var_in_vim9script = is_script_local && current_script_is_vim9(); if (var_in_vim9script && name[0] == '_' && name[1] == NUL) { // For "[a, _] = list" the underscore is ignored. if ((flags & ASSIGN_UNPACK) == 0) emsg(_(e_cannot_use_underscore_here)); goto failed; } di = find_var_in_ht(ht, 0, varname, TRUE); if (di == NULL && var_in_vim9script) { imported_T *import = find_imported(varname, 0, FALSE); if (import != NULL) { // imported name space cannot be used if ((flags & ASSIGN_NO_DECL) == 0) { semsg(_(e_redefining_imported_item_str), name); goto failed; } semsg(_(e_cannot_use_str_itself_it_is_imported), name); goto failed; } if (!in_vim9script()) { semsg(_(e_cannot_create_vim9_script_variable_in_function_str), name); goto failed; } } if (dest_tv == NULL) { // Search in parent scope which is possible to reference from lambda if (di == NULL) di = find_var_in_scoped_ht(name, TRUE); if ((tv->v_type == VAR_FUNC || tv->v_type == VAR_PARTIAL) && var_wrong_func_name(name, di == NULL)) goto failed; if (need_convert_to_bool(type, tv)) { // Destination is a bool and the value is not, but it can be // converted. CLEAR_FIELD(bool_tv); bool_tv.v_type = VAR_BOOL; bool_tv.vval.v_number = tv2bool(tv) ? VVAL_TRUE : VVAL_FALSE; tv = &bool_tv; } if (di != NULL) { // Item already exists. Allowed to replace when reloading. if ((di->di_flags & DI_FLAGS_RELOAD) == 0) { if ((flags & (ASSIGN_CONST | ASSIGN_FINAL)) && (flags & ASSIGN_FOR_LOOP) == 0) { emsg(_(e_cannot_modify_existing_variable)); goto failed; } if (is_script_local && vim9script && (flags & (ASSIGN_NO_DECL | ASSIGN_DECL)) == 0) { semsg(_(e_redefining_script_item_str), name); goto failed; } if (var_in_vim9script && (flags & ASSIGN_FOR_LOOP) == 0) { where_T where = WHERE_INIT; svar_T *sv = find_typval_in_script(&di->di_tv, sid); if (sv != NULL) { // check the type and adjust to bool if needed where.wt_index = var_idx; where.wt_variable = TRUE; if (check_script_var_type(sv, tv, name, where) == FAIL) goto failed; if (type == NULL) type = sv->sv_type; } } if ((flags & ASSIGN_FOR_LOOP) == 0 && var_check_permission(di, name) == FAIL) goto failed; } else { // can only redefine once di->di_flags &= ~DI_FLAGS_RELOAD; // A Vim9 script-local variable is also present in sn_all_vars // and sn_var_vals. It may set "type" from "tv". if (var_in_vim9script || var_in_autoload) update_vim9_script_var(FALSE, di, var_in_autoload ? name : di->di_key, flags, tv, &type, (flags & ASSIGN_NO_MEMBER_TYPE) == 0); } // existing variable, need to clear the value // Handle setting internal di: variables separately where needed to // prevent changing the type. if (ht == &vimvarht) { if (di->di_tv.v_type == VAR_STRING) { VIM_CLEAR(di->di_tv.vval.v_string); if (copy || tv->v_type != VAR_STRING) { char_u *val = tv_get_string(tv); // Careful: when assigning to v:errmsg and // tv_get_string() causes an error message the variable // will already be set. if (di->di_tv.vval.v_string == NULL) di->di_tv.vval.v_string = vim_strsave(val); } else { // Take over the string to avoid an extra alloc/free. di->di_tv.vval.v_string = tv->vval.v_string; tv->vval.v_string = NULL; } goto failed; } else if (di->di_tv.v_type == VAR_NUMBER) { di->di_tv.vval.v_number = tv_get_number(tv); if (STRCMP(varname, "searchforward") == 0) set_search_direction(di->di_tv.vval.v_number ? '/' : '?'); #ifdef FEAT_SEARCH_EXTRA else if (STRCMP(varname, "hlsearch") == 0) { no_hlsearch = !di->di_tv.vval.v_number; redraw_all_later(SOME_VALID); } #endif goto failed; } else if (di->di_tv.v_type != tv->v_type) { semsg(_(e_setting_str_to_value_with_wrong_type), name); goto failed; } } clear_tv(&di->di_tv); } else { // Item not found, check if a function already exists. if (is_script_local && (flags & (ASSIGN_NO_DECL | ASSIGN_DECL)) == 0 && lookup_scriptitem(name, STRLEN(name), FALSE, NULL) == OK) { semsg(_(e_redefining_script_item_str), name); goto failed; } // add a new variable if (var_in_vim9script && (flags & ASSIGN_NO_DECL)) { semsg(_(e_unknown_variable_str), name); goto failed; } // Can't add "v:" or "a:" variable. if (ht == &vimvarht || ht == get_funccal_args_ht()) { semsg(_(e_illegal_variable_name_str), name); goto failed; } // Make sure the variable name is valid. In Vim9 script an // autoload variable must be prefixed with "g:" unless in an // autoload script. if (!valid_varname(varname, -1, !vim9script || STRNCMP(name, "g:", 2) == 0 || var_in_autoload)) goto failed; di = alloc(sizeof(dictitem_T) + STRLEN(varname)); if (di == NULL) goto failed; STRCPY(di->di_key, varname); if (hash_add(ht, DI2HIKEY(di)) == FAIL) { vim_free(di); goto failed; } di->di_flags = DI_FLAGS_ALLOC; if (flags & (ASSIGN_CONST | ASSIGN_FINAL)) di->di_flags |= DI_FLAGS_LOCK; // A Vim9 script-local variable is also added to sn_all_vars and // sn_var_vals. It may set "type" from "tv". if (var_in_vim9script || var_in_autoload) update_vim9_script_var(TRUE, di, var_in_autoload ? name : di->di_key, flags, tv, &type, (flags & ASSIGN_NO_MEMBER_TYPE) == 0); } dest_tv = &di->di_tv; } if (copy || tv->v_type == VAR_NUMBER || tv->v_type == VAR_FLOAT) copy_tv(tv, dest_tv); else { *dest_tv = *tv; dest_tv->v_lock = 0; init_tv(tv); } free_tv_arg = FALSE; if (vim9script && type != NULL) set_tv_type(dest_tv, type); // ":const var = value" locks the value // ":final var = value" locks "var" if (flags & ASSIGN_CONST) // Like :lockvar! name: lock the value and what it contains, but only // if the reference count is up to one. That locks only literal // values. item_lock(dest_tv, DICT_MAXNEST, TRUE, TRUE); failed: vim_free(name_tofree); if (free_tv_arg) clear_tv(tv_arg); } /* * Check in this order for backwards compatibility: * - Whether the variable is read-only * - Whether the variable value is locked * - Whether the variable is locked */ int var_check_permission(dictitem_T *di, char_u *name) { if (var_check_ro(di->di_flags, name, FALSE) || value_check_lock(di->di_tv.v_lock, name, FALSE) || var_check_lock(di->di_flags, name, FALSE)) return FAIL; return OK; } /* * Return TRUE if di_flags "flags" indicates variable "name" is read-only. * Also give an error message. */ int var_check_ro(int flags, char_u *name, int use_gettext) { if (flags & DI_FLAGS_RO) { if (name == NULL) emsg(_(e_cannot_change_readonly_variable)); else semsg(_(e_cannot_change_readonly_variable_str), use_gettext ? (char_u *)_(name) : name); return TRUE; } if ((flags & DI_FLAGS_RO_SBX) && sandbox) { if (name == NULL) emsg(_(e_cannot_set_variable_in_sandbox)); else semsg(_(e_cannot_set_variable_in_sandbox_str), use_gettext ? (char_u *)_(name) : name); return TRUE; } return FALSE; } /* * Return TRUE if di_flags "flags" indicates variable "name" is locked. * Also give an error message. */ int var_check_lock(int flags, char_u *name, int use_gettext) { if (flags & DI_FLAGS_LOCK) { semsg(_(e_variable_is_locked_str), use_gettext ? (char_u *)_(name) : name); return TRUE; } return FALSE; } /* * Return TRUE if di_flags "flags" indicates variable "name" is fixed. * Also give an error message. */ int var_check_fixed(int flags, char_u *name, int use_gettext) { if (flags & DI_FLAGS_FIX) { if (name == NULL) emsg(_(e_cannot_delete_variable)); else semsg(_(e_cannot_delete_variable_str), use_gettext ? (char_u *)_(name) : name); return TRUE; } return FALSE; } /* * Check if a funcref is assigned to a valid variable name. * Return TRUE and give an error if not. */ int var_wrong_func_name( char_u *name, // points to start of variable name int new_var) // TRUE when creating the variable { // Allow for w: b: s: and t:. In Vim9 script s: is not allowed, because // the name can be used without the s: prefix. if (!((vim_strchr((char_u *)"wbt", name[0]) != NULL || (!in_vim9script() && name[0] == 's')) && name[1] == ':') && !ASCII_ISUPPER((name[0] != NUL && name[1] == ':') ? name[2] : name[0])) { semsg(_(e_funcref_variable_name_must_start_with_capital_str), name); return TRUE; } // Don't allow hiding a function. When "v" is not NULL we might be // assigning another function to the same var, the type is checked // below. if (new_var && function_exists(name, FALSE)) { semsg(_(e_variable_name_conflicts_with_existing_function_str), name); return TRUE; } return FALSE; } /* * Return TRUE if "flags" indicates variable "name" has a locked (immutable) * value. Also give an error message, using "name" or _("name") when * "use_gettext" is TRUE. */ int value_check_lock(int lock, char_u *name, int use_gettext) { if (lock & VAR_LOCKED) { if (name == NULL) emsg(_(e_value_is_locked)); else semsg(_(e_value_is_locked_str), use_gettext ? (char_u *)_(name) : name); return TRUE; } if (lock & VAR_FIXED) { if (name == NULL) emsg(_(e_cannot_change_value)); else semsg(_(e_cannot_change_value_of_str), use_gettext ? (char_u *)_(name) : name); return TRUE; } return FALSE; } /* * Check if a variable name is valid. When "autoload" is true "#" is allowed. * If "len" is -1 use all of "varname", otherwise up to "varname[len]". * Return FALSE and give an error if not. */ int valid_varname(char_u *varname, int len, int autoload) { char_u *p; for (p = varname; len < 0 ? *p != NUL : p < varname + len; ++p) if (!eval_isnamec1(*p) && (p == varname || !VIM_ISDIGIT(*p)) && !(autoload && *p == AUTOLOAD_CHAR)) { semsg(_(e_illegal_variable_name_str), varname); return FALSE; } return TRUE; } /* * getwinvar() and gettabwinvar() */ static void getwinvar( typval_T *argvars, typval_T *rettv, int off) // 1 for gettabwinvar() { win_T *win; char_u *varname; dictitem_T *v; tabpage_T *tp = NULL; int done = FALSE; switchwin_T switchwin; int need_switch_win; if (off == 1) tp = find_tabpage((int)tv_get_number_chk(&argvars[0], NULL)); else tp = curtab; win = find_win_by_nr(&argvars[off], tp); varname = tv_get_string_chk(&argvars[off + 1]); ++emsg_off; rettv->v_type = VAR_STRING; rettv->vval.v_string = NULL; if (win != NULL && varname != NULL) { // Set curwin to be our win, temporarily. Also set the tabpage, // otherwise the window is not valid. Only do this when needed, // autocommands get blocked. need_switch_win = !(tp == curtab && win == curwin); if (!need_switch_win || switch_win(&switchwin, win, tp, TRUE) == OK) { if (*varname == '&') { if (varname[1] == NUL) { // get all window-local options in a dict dict_T *opts = get_winbuf_options(FALSE); if (opts != NULL) { rettv_dict_set(rettv, opts); done = TRUE; } } else if (eval_option(&varname, rettv, 1) == OK) // window-local-option done = TRUE; } else { // Look up the variable. // Let getwinvar({nr}, "") return the "w:" dictionary. v = find_var_in_ht(&win->w_vars->dv_hashtab, 'w', varname, FALSE); if (v != NULL) { copy_tv(&v->di_tv, rettv); done = TRUE; } } } if (need_switch_win) // restore previous notion of curwin restore_win(&switchwin, TRUE); } if (!done && argvars[off + 2].v_type != VAR_UNKNOWN) // use the default return value copy_tv(&argvars[off + 2], rettv); --emsg_off; } /* * Set option "varname" to the value of "varp" for the current buffer/window. */ static void set_option_from_tv(char_u *varname, typval_T *varp) { long numval = 0; char_u *strval; char_u nbuf[NUMBUFLEN]; int error = FALSE; if (varp->v_type == VAR_BOOL) { numval = (long)varp->vval.v_number; strval = (char_u *)"0"; // avoid using "false" } else { if (!in_vim9script() || varp->v_type != VAR_STRING) numval = (long)tv_get_number_chk(varp, &error); strval = tv_get_string_buf_chk(varp, nbuf); } if (!error && strval != NULL) set_option_value(varname, numval, strval, OPT_LOCAL); } /* * "setwinvar()" and "settabwinvar()" functions */ static void setwinvar(typval_T *argvars, int off) { win_T *win; switchwin_T switchwin; int need_switch_win; char_u *varname, *winvarname; typval_T *varp; tabpage_T *tp = NULL; if (check_secure()) return; if (off == 1) tp = find_tabpage((int)tv_get_number_chk(&argvars[0], NULL)); else tp = curtab; win = find_win_by_nr(&argvars[off], tp); varname = tv_get_string_chk(&argvars[off + 1]); varp = &argvars[off + 2]; if (win != NULL && varname != NULL && varp != NULL) { need_switch_win = !(tp == curtab && win == curwin); if (!need_switch_win || switch_win(&switchwin, win, tp, TRUE) == OK) { if (*varname == '&') set_option_from_tv(varname + 1, varp); else { winvarname = alloc(STRLEN(varname) + 3); if (winvarname != NULL) { STRCPY(winvarname, "w:"); STRCPY(winvarname + 2, varname); set_var(winvarname, varp, TRUE); vim_free(winvarname); } } } if (need_switch_win) restore_win(&switchwin, TRUE); } } /* * reset v:option_new, v:option_old, v:option_oldlocal, v:option_oldglobal, * v:option_type, and v:option_command. */ void reset_v_option_vars(void) { set_vim_var_string(VV_OPTION_NEW, NULL, -1); set_vim_var_string(VV_OPTION_OLD, NULL, -1); set_vim_var_string(VV_OPTION_OLDLOCAL, NULL, -1); set_vim_var_string(VV_OPTION_OLDGLOBAL, NULL, -1); set_vim_var_string(VV_OPTION_TYPE, NULL, -1); set_vim_var_string(VV_OPTION_COMMAND, NULL, -1); } /* * Add an assert error to v:errors. */ void assert_error(garray_T *gap) { struct vimvar *vp = &vimvars[VV_ERRORS]; if (vp->vv_tv_type != VAR_LIST || vimvars[VV_ERRORS].vv_list == NULL) // Make sure v:errors is a list. set_vim_var_list(VV_ERRORS, list_alloc()); list_append_string(vimvars[VV_ERRORS].vv_list, gap->ga_data, gap->ga_len); } int var_exists(char_u *var) { char_u *arg = var; char_u *name; char_u *tofree; typval_T tv; int len = 0; int n = FALSE; // get_name_len() takes care of expanding curly braces name = var; len = get_name_len(&arg, &tofree, TRUE, FALSE); if (len > 0) { if (tofree != NULL) name = tofree; n = (eval_variable(name, len, 0, &tv, NULL, EVAL_VAR_NOAUTOLOAD + EVAL_VAR_IMPORT) == OK); if (n) { // handle d.key, l[idx], f(expr) arg = skipwhite(arg); n = (handle_subscript(&arg, name, &tv, &EVALARG_EVALUATE, FALSE) == OK); if (n) clear_tv(&tv); } } if (*arg != NUL) n = FALSE; vim_free(tofree); return n; } static lval_T *redir_lval = NULL; #define EVALCMD_BUSY (redir_lval == (lval_T *)&redir_lval) static garray_T redir_ga; // only valid when redir_lval is not NULL static char_u *redir_endp = NULL; static char_u *redir_varname = NULL; int alloc_redir_lval(void) { redir_lval = ALLOC_CLEAR_ONE(lval_T); if (redir_lval == NULL) return FAIL; return OK; } void clear_redir_lval(void) { VIM_CLEAR(redir_lval); } void init_redir_ga(void) { ga_init2(&redir_ga, sizeof(char), 500); } /* * Start recording command output to a variable * When "append" is TRUE append to an existing variable. * Returns OK if successfully completed the setup. FAIL otherwise. */ int var_redir_start(char_u *name, int append) { int called_emsg_before; typval_T tv; // Catch a bad name early. if (!eval_isnamec1(*name)) { emsg(_(e_invalid_argument)); return FAIL; } // Make a copy of the name, it is used in redir_lval until redir ends. redir_varname = vim_strsave(name); if (redir_varname == NULL) return FAIL; if (alloc_redir_lval() == FAIL) { var_redir_stop(); return FAIL; } // The output is stored in growarray "redir_ga" until redirection ends. init_redir_ga(); // Parse the variable name (can be a dict or list entry). redir_endp = get_lval(redir_varname, NULL, redir_lval, FALSE, FALSE, 0, FNE_CHECK_START); if (redir_endp == NULL || redir_lval->ll_name == NULL || *redir_endp != NUL) { clear_lval(redir_lval); if (redir_endp != NULL && *redir_endp != NUL) // Trailing characters are present after the variable name semsg(_(e_trailing_characters_str), redir_endp); else semsg(_(e_invalid_argument_str), name); redir_endp = NULL; // don't store a value, only cleanup var_redir_stop(); return FAIL; } // check if we can write to the variable: set it to or append an empty // string called_emsg_before = called_emsg; tv.v_type = VAR_STRING; tv.vval.v_string = (char_u *)""; if (append) set_var_lval(redir_lval, redir_endp, &tv, TRUE, ASSIGN_NO_DECL, (char_u *)".", 0); else set_var_lval(redir_lval, redir_endp, &tv, TRUE, ASSIGN_NO_DECL, (char_u *)"=", 0); clear_lval(redir_lval); if (called_emsg > called_emsg_before) { redir_endp = NULL; // don't store a value, only cleanup var_redir_stop(); return FAIL; } return OK; } /* * Append "value[value_len]" to the variable set by var_redir_start(). * The actual appending is postponed until redirection ends, because the value * appended may in fact be the string we write to, changing it may cause freed * memory to be used: * :redir => foo * :let foo * :redir END */ void var_redir_str(char_u *value, int value_len) { int len; if (redir_lval == NULL) return; if (value_len == -1) len = (int)STRLEN(value); // Append the entire string else len = value_len; // Append only "value_len" characters if (ga_grow(&redir_ga, len) == OK) { mch_memmove((char *)redir_ga.ga_data + redir_ga.ga_len, value, len); redir_ga.ga_len += len; } else var_redir_stop(); } /* * Stop redirecting command output to a variable. * Frees the allocated memory. */ void var_redir_stop(void) { typval_T tv; if (EVALCMD_BUSY) { redir_lval = NULL; return; } if (redir_lval != NULL) { // If there was no error: assign the text to the variable. if (redir_endp != NULL) { ga_append(&redir_ga, NUL); // Append the trailing NUL. tv.v_type = VAR_STRING; tv.vval.v_string = redir_ga.ga_data; // Call get_lval() again, if it's inside a Dict or List it may // have changed. redir_endp = get_lval(redir_varname, NULL, redir_lval, FALSE, FALSE, 0, FNE_CHECK_START); if (redir_endp != NULL && redir_lval->ll_name != NULL) set_var_lval(redir_lval, redir_endp, &tv, FALSE, 0, (char_u *)".", 0); clear_lval(redir_lval); } // free the collected output VIM_CLEAR(redir_ga.ga_data); VIM_CLEAR(redir_lval); } VIM_CLEAR(redir_varname); } /* * Get the collected redirected text and clear redir_ga. */ char_u * get_clear_redir_ga(void) { char_u *res; ga_append(&redir_ga, NUL); // Append the trailing NUL. res = redir_ga.ga_data; redir_ga.ga_data = NULL; return res; } /* * "gettabvar()" function */ void f_gettabvar(typval_T *argvars, typval_T *rettv) { switchwin_T switchwin; tabpage_T *tp; dictitem_T *v; char_u *varname; int done = FALSE; rettv->v_type = VAR_STRING; rettv->vval.v_string = NULL; if (in_vim9script() && (check_for_number_arg(argvars, 0) == FAIL || check_for_string_arg(argvars, 1) == FAIL)) return; varname = tv_get_string_chk(&argvars[1]); tp = find_tabpage((int)tv_get_number_chk(&argvars[0], NULL)); if (tp != NULL && varname != NULL) { // Set tp to be our tabpage, temporarily. Also set the window to the // first window in the tabpage, otherwise the window is not valid. if (switch_win(&switchwin, tp == curtab || tp->tp_firstwin == NULL ? firstwin : tp->tp_firstwin, tp, TRUE) == OK) { // look up the variable // Let gettabvar({nr}, "") return the "t:" dictionary. v = find_var_in_ht(&tp->tp_vars->dv_hashtab, 't', varname, FALSE); if (v != NULL) { copy_tv(&v->di_tv, rettv); done = TRUE; } } // restore previous notion of curwin restore_win(&switchwin, TRUE); } if (!done && argvars[2].v_type != VAR_UNKNOWN) copy_tv(&argvars[2], rettv); } /* * "gettabwinvar()" function */ void f_gettabwinvar(typval_T *argvars, typval_T *rettv) { if (in_vim9script() && (check_for_number_arg(argvars, 0) == FAIL || check_for_number_arg(argvars, 1) == FAIL || check_for_string_arg(argvars, 2) == FAIL)) return; getwinvar(argvars, rettv, 1); } /* * "getwinvar()" function */ void f_getwinvar(typval_T *argvars, typval_T *rettv) { if (in_vim9script() && (check_for_number_arg(argvars, 0) == FAIL || check_for_string_arg(argvars, 1) == FAIL)) return; getwinvar(argvars, rettv, 0); } /* * "getbufvar()" function */ void f_getbufvar(typval_T *argvars, typval_T *rettv) { buf_T *buf; char_u *varname; dictitem_T *v; int done = FALSE; if (in_vim9script() && (check_for_buffer_arg(argvars, 0) == FAIL || check_for_string_arg(argvars, 1) == FAIL)) return; varname = tv_get_string_chk(&argvars[1]); buf = tv_get_buf_from_arg(&argvars[0]); rettv->v_type = VAR_STRING; rettv->vval.v_string = NULL; if (buf != NULL && varname != NULL) { if (*varname == '&') { buf_T *save_curbuf = curbuf; // set curbuf to be our buf, temporarily curbuf = buf; if (varname[1] == NUL) { // get all buffer-local options in a dict dict_T *opts = get_winbuf_options(TRUE); if (opts != NULL) { rettv_dict_set(rettv, opts); done = TRUE; } } else if (eval_option(&varname, rettv, TRUE) == OK) // buffer-local-option done = TRUE; // restore previous notion of curbuf curbuf = save_curbuf; } else { // Look up the variable. if (*varname == NUL) // Let getbufvar({nr}, "") return the "b:" dictionary. v = &buf->b_bufvar; else v = find_var_in_ht(&buf->b_vars->dv_hashtab, 'b', varname, FALSE); if (v != NULL) { copy_tv(&v->di_tv, rettv); done = TRUE; } } } if (!done && argvars[2].v_type != VAR_UNKNOWN) // use the default value copy_tv(&argvars[2], rettv); } /* * "settabvar()" function */ void f_settabvar(typval_T *argvars, typval_T *rettv UNUSED) { tabpage_T *save_curtab; tabpage_T *tp; char_u *varname, *tabvarname; typval_T *varp; if (check_secure()) return; if (in_vim9script() && (check_for_number_arg(argvars, 0) == FAIL || check_for_string_arg(argvars, 1) == FAIL)) return; tp = find_tabpage((int)tv_get_number_chk(&argvars[0], NULL)); varname = tv_get_string_chk(&argvars[1]); varp = &argvars[2]; if (varname != NULL && varp != NULL && tp != NULL) { save_curtab = curtab; goto_tabpage_tp(tp, FALSE, FALSE); tabvarname = alloc(STRLEN(varname) + 3); if (tabvarname != NULL) { STRCPY(tabvarname, "t:"); STRCPY(tabvarname + 2, varname); set_var(tabvarname, varp, TRUE); vim_free(tabvarname); } // Restore current tabpage if (valid_tabpage(save_curtab)) goto_tabpage_tp(save_curtab, FALSE, FALSE); } } /* * "settabwinvar()" function */ void f_settabwinvar(typval_T *argvars, typval_T *rettv UNUSED) { if (in_vim9script() && (check_for_number_arg(argvars, 0) == FAIL || check_for_number_arg(argvars, 1) == FAIL || check_for_string_arg(argvars, 2) == FAIL)) return; setwinvar(argvars, 1); } /* * "setwinvar()" function */ void f_setwinvar(typval_T *argvars, typval_T *rettv UNUSED) { if (in_vim9script() && (check_for_number_arg(argvars, 0) == FAIL || check_for_string_arg(argvars, 1) == FAIL)) return; setwinvar(argvars, 0); } /* * "setbufvar()" function */ void f_setbufvar(typval_T *argvars, typval_T *rettv UNUSED) { buf_T *buf; char_u *varname, *bufvarname; typval_T *varp; if (check_secure()) return; if (in_vim9script() && (check_for_buffer_arg(argvars, 0) == FAIL || check_for_string_arg(argvars, 1) == FAIL)) return; varname = tv_get_string_chk(&argvars[1]); buf = tv_get_buf_from_arg(&argvars[0]); varp = &argvars[2]; if (buf != NULL && varname != NULL && varp != NULL) { if (*varname == '&') { aco_save_T aco; // set curbuf to be our buf, temporarily aucmd_prepbuf(&aco, buf); set_option_from_tv(varname + 1, varp); // reset notion of buffer aucmd_restbuf(&aco); } else { bufvarname = alloc(STRLEN(varname) + 3); if (bufvarname != NULL) { buf_T *save_curbuf = curbuf; curbuf = buf; STRCPY(bufvarname, "b:"); STRCPY(bufvarname + 2, varname); set_var(bufvarname, varp, TRUE); vim_free(bufvarname); curbuf = save_curbuf; } } } } /* * Get a callback from "arg". It can be a Funcref or a function name. * When "arg" is zero return an empty string. * "cb_name" is not allocated. * "cb_name" is set to NULL for an invalid argument. */ callback_T get_callback(typval_T *arg) { callback_T res; int r = OK; res.cb_free_name = FALSE; if (arg->v_type == VAR_PARTIAL && arg->vval.v_partial != NULL) { res.cb_partial = arg->vval.v_partial; ++res.cb_partial->pt_refcount; res.cb_name = partial_name(res.cb_partial); } else { res.cb_partial = NULL; if (arg->v_type == VAR_STRING && arg->vval.v_string != NULL && isdigit(*arg->vval.v_string)) r = FAIL; else if (arg->v_type == VAR_FUNC || arg->v_type == VAR_STRING) { if (arg->v_type == VAR_STRING) { char_u *name; name = get_scriptlocal_funcname(arg->vval.v_string); if (name != NULL) { vim_free(arg->vval.v_string); arg->vval.v_string = name; } } res.cb_name = arg->vval.v_string; func_ref(res.cb_name); } else if (arg->v_type == VAR_NUMBER && arg->vval.v_number == 0) res.cb_name = (char_u *)""; else r = FAIL; if (r == FAIL) { emsg(_(e_invalid_callback_argument)); res.cb_name = NULL; } } return res; } /* * Copy a callback into a typval_T. */ void put_callback(callback_T *cb, typval_T *tv) { if (cb->cb_partial != NULL) { tv->v_type = VAR_PARTIAL; tv->vval.v_partial = cb->cb_partial; ++tv->vval.v_partial->pt_refcount; } else { tv->v_type = VAR_FUNC; tv->vval.v_string = vim_strsave(cb->cb_name); func_ref(cb->cb_name); } } /* * Make a copy of "src" into "dest", allocating the function name if needed, * without incrementing the refcount. */ void set_callback(callback_T *dest, callback_T *src) { if (src->cb_partial == NULL) { // just a function name, make a copy dest->cb_name = vim_strsave(src->cb_name); dest->cb_free_name = TRUE; } else { // cb_name is a pointer into cb_partial dest->cb_name = src->cb_name; dest->cb_free_name = FALSE; } dest->cb_partial = src->cb_partial; } /* * Copy callback from "src" to "dest", incrementing the refcounts. */ void copy_callback(callback_T *dest, callback_T *src) { dest->cb_partial = src->cb_partial; if (dest->cb_partial != NULL) { dest->cb_name = src->cb_name; dest->cb_free_name = FALSE; ++dest->cb_partial->pt_refcount; } else { dest->cb_name = vim_strsave(src->cb_name); dest->cb_free_name = TRUE; func_ref(src->cb_name); } } /* * When a callback refers to an autoload import, change the function name to * the "path#name" form. Uses the current script context. * Only works when the name is allocated. */ void expand_autload_callback(callback_T *cb) { char_u *name; char_u *p; imported_T *import; if (!in_vim9script() || cb->cb_name == NULL || (!cb->cb_free_name && (cb->cb_partial == NULL || cb->cb_partial->pt_name == NULL))) return; if (cb->cb_partial != NULL) name = cb->cb_partial->pt_name; else name = cb->cb_name; p = vim_strchr(name, '.'); if (p == NULL) return; import = find_imported(name, p - name, FALSE); if (import != NULL && SCRIPT_ID_VALID(import->imp_sid)) { scriptitem_T *si = SCRIPT_ITEM(import->imp_sid); if (si->sn_autoload_prefix != NULL) { char_u *newname = concat_str(si->sn_autoload_prefix, p + 1); if (newname != NULL) { if (cb->cb_partial != NULL) { if (cb->cb_name == cb->cb_partial->pt_name) cb->cb_name = newname; vim_free(cb->cb_partial->pt_name); cb->cb_partial->pt_name = newname; } else { vim_free(cb->cb_name); cb->cb_name = newname; } } } } } /* * Unref/free "callback" returned by get_callback() or set_callback(). */ void free_callback(callback_T *callback) { if (callback->cb_partial != NULL) { partial_unref(callback->cb_partial); callback->cb_partial = NULL; } else if (callback->cb_name != NULL) func_unref(callback->cb_name); if (callback->cb_free_name) { vim_free(callback->cb_name); callback->cb_free_name = FALSE; } callback->cb_name = NULL; } #endif // FEAT_EVAL
/* vi:set ts=8 sts=4 sw=4 noet: * * VIM - Vi IMproved by Bram Moolenaar * * Do ":help uganda" in Vim to read copying and usage conditions. * Do ":help credits" in Vim to see a list of people who contributed. * See README.txt for an overview of the Vim source code. */ /* * evalvars.c: functions for dealing with variables */ #include "vim.h" #if defined(FEAT_EVAL) || defined(PROTO) static dictitem_T globvars_var; // variable used for g: static dict_T globvardict; // Dictionary with g: variables #define globvarht globvardict.dv_hashtab /* * Old Vim variables such as "v:version" are also available without the "v:". * Also in functions. We need a special hashtable for them. */ static hashtab_T compat_hashtab; /* * Array to hold the value of v: variables. * The value is in a dictitem, so that it can also be used in the v: scope. * The reason to use this table anyway is for very quick access to the * variables with the VV_ defines. */ // values for vv_flags: #define VV_COMPAT 1 // compatible, also used without "v:" #define VV_RO 2 // read-only #define VV_RO_SBX 4 // read-only in the sandbox #define VV_NAME(s, t) s, {{t, 0, {0}}, 0, {0}} typedef struct vimvar vimvar_T; static struct vimvar { char *vv_name; // name of variable, without v: dictitem16_T vv_di; // value and name for key (max 16 chars!) type_T *vv_type; // type or NULL char vv_flags; // VV_COMPAT, VV_RO, VV_RO_SBX } vimvars[VV_LEN] = { // The order here must match the VV_ defines in vim.h! // Initializing a union does not work, leave tv.vval empty to get zero's. {VV_NAME("count", VAR_NUMBER), NULL, VV_COMPAT+VV_RO}, {VV_NAME("count1", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("prevcount", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("errmsg", VAR_STRING), NULL, VV_COMPAT}, {VV_NAME("warningmsg", VAR_STRING), NULL, 0}, {VV_NAME("statusmsg", VAR_STRING), NULL, 0}, {VV_NAME("shell_error", VAR_NUMBER), NULL, VV_COMPAT+VV_RO}, {VV_NAME("this_session", VAR_STRING), NULL, VV_COMPAT}, {VV_NAME("version", VAR_NUMBER), NULL, VV_COMPAT+VV_RO}, {VV_NAME("lnum", VAR_NUMBER), NULL, VV_RO_SBX}, {VV_NAME("termresponse", VAR_STRING), NULL, VV_RO}, {VV_NAME("fname", VAR_STRING), NULL, VV_RO}, {VV_NAME("lang", VAR_STRING), NULL, VV_RO}, {VV_NAME("lc_time", VAR_STRING), NULL, VV_RO}, {VV_NAME("ctype", VAR_STRING), NULL, VV_RO}, {VV_NAME("charconvert_from", VAR_STRING), NULL, VV_RO}, {VV_NAME("charconvert_to", VAR_STRING), NULL, VV_RO}, {VV_NAME("fname_in", VAR_STRING), NULL, VV_RO}, {VV_NAME("fname_out", VAR_STRING), NULL, VV_RO}, {VV_NAME("fname_new", VAR_STRING), NULL, VV_RO}, {VV_NAME("fname_diff", VAR_STRING), NULL, VV_RO}, {VV_NAME("cmdarg", VAR_STRING), NULL, VV_RO}, {VV_NAME("foldstart", VAR_NUMBER), NULL, VV_RO_SBX}, {VV_NAME("foldend", VAR_NUMBER), NULL, VV_RO_SBX}, {VV_NAME("folddashes", VAR_STRING), NULL, VV_RO_SBX}, {VV_NAME("foldlevel", VAR_NUMBER), NULL, VV_RO_SBX}, {VV_NAME("progname", VAR_STRING), NULL, VV_RO}, {VV_NAME("servername", VAR_STRING), NULL, VV_RO}, {VV_NAME("dying", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("exception", VAR_STRING), NULL, VV_RO}, {VV_NAME("throwpoint", VAR_STRING), NULL, VV_RO}, {VV_NAME("register", VAR_STRING), NULL, VV_RO}, {VV_NAME("cmdbang", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("insertmode", VAR_STRING), NULL, VV_RO}, {VV_NAME("val", VAR_UNKNOWN), NULL, VV_RO}, {VV_NAME("key", VAR_UNKNOWN), NULL, VV_RO}, {VV_NAME("profiling", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("fcs_reason", VAR_STRING), NULL, VV_RO}, {VV_NAME("fcs_choice", VAR_STRING), NULL, 0}, {VV_NAME("beval_bufnr", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("beval_winnr", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("beval_winid", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("beval_lnum", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("beval_col", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("beval_text", VAR_STRING), NULL, VV_RO}, {VV_NAME("scrollstart", VAR_STRING), NULL, 0}, {VV_NAME("swapname", VAR_STRING), NULL, VV_RO}, {VV_NAME("swapchoice", VAR_STRING), NULL, 0}, {VV_NAME("swapcommand", VAR_STRING), NULL, VV_RO}, {VV_NAME("char", VAR_STRING), NULL, 0}, {VV_NAME("mouse_win", VAR_NUMBER), NULL, 0}, {VV_NAME("mouse_winid", VAR_NUMBER), NULL, 0}, {VV_NAME("mouse_lnum", VAR_NUMBER), NULL, 0}, {VV_NAME("mouse_col", VAR_NUMBER), NULL, 0}, {VV_NAME("operator", VAR_STRING), NULL, VV_RO}, {VV_NAME("searchforward", VAR_NUMBER), NULL, 0}, {VV_NAME("hlsearch", VAR_NUMBER), NULL, 0}, {VV_NAME("oldfiles", VAR_LIST), &t_list_string, 0}, {VV_NAME("windowid", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("progpath", VAR_STRING), NULL, VV_RO}, {VV_NAME("completed_item", VAR_DICT), &t_dict_string, VV_RO}, {VV_NAME("option_new", VAR_STRING), NULL, VV_RO}, {VV_NAME("option_old", VAR_STRING), NULL, VV_RO}, {VV_NAME("option_oldlocal", VAR_STRING), NULL, VV_RO}, {VV_NAME("option_oldglobal", VAR_STRING), NULL, VV_RO}, {VV_NAME("option_command", VAR_STRING), NULL, VV_RO}, {VV_NAME("option_type", VAR_STRING), NULL, VV_RO}, {VV_NAME("errors", VAR_LIST), &t_list_string, 0}, {VV_NAME("false", VAR_BOOL), NULL, VV_RO}, {VV_NAME("true", VAR_BOOL), NULL, VV_RO}, {VV_NAME("none", VAR_SPECIAL), NULL, VV_RO}, {VV_NAME("null", VAR_SPECIAL), NULL, VV_RO}, {VV_NAME("numbermax", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("numbermin", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("numbersize", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("vim_did_enter", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("testing", VAR_NUMBER), NULL, 0}, {VV_NAME("t_number", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("t_string", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("t_func", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("t_list", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("t_dict", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("t_float", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("t_bool", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("t_none", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("t_job", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("t_channel", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("t_blob", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("termrfgresp", VAR_STRING), NULL, VV_RO}, {VV_NAME("termrbgresp", VAR_STRING), NULL, VV_RO}, {VV_NAME("termu7resp", VAR_STRING), NULL, VV_RO}, {VV_NAME("termstyleresp", VAR_STRING), NULL, VV_RO}, {VV_NAME("termblinkresp", VAR_STRING), NULL, VV_RO}, {VV_NAME("event", VAR_DICT), NULL, VV_RO}, {VV_NAME("versionlong", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("echospace", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("argv", VAR_LIST), &t_list_string, VV_RO}, {VV_NAME("collate", VAR_STRING), NULL, VV_RO}, {VV_NAME("exiting", VAR_SPECIAL), NULL, VV_RO}, {VV_NAME("colornames", VAR_DICT), &t_dict_string, VV_RO}, {VV_NAME("sizeofint", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("sizeoflong", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("sizeofpointer", VAR_NUMBER), NULL, VV_RO}, {VV_NAME("maxcol", VAR_NUMBER), NULL, VV_RO}, }; // shorthand #define vv_tv_type vv_di.di_tv.v_type #define vv_nr vv_di.di_tv.vval.v_number #define vv_float vv_di.di_tv.vval.v_float #define vv_str vv_di.di_tv.vval.v_string #define vv_list vv_di.di_tv.vval.v_list #define vv_dict vv_di.di_tv.vval.v_dict #define vv_blob vv_di.di_tv.vval.v_blob #define vv_tv vv_di.di_tv static dictitem_T vimvars_var; // variable used for v: static dict_T vimvardict; // Dictionary with v: variables #define vimvarht vimvardict.dv_hashtab // for VIM_VERSION_ defines #include "version.h" static void list_glob_vars(int *first); static void list_buf_vars(int *first); static void list_win_vars(int *first); static void list_tab_vars(int *first); static char_u *list_arg_vars(exarg_T *eap, char_u *arg, int *first); static char_u *ex_let_one(char_u *arg, typval_T *tv, int copy, int flags, char_u *endchars, char_u *op, int var_idx); static int do_unlet_var(lval_T *lp, char_u *name_end, exarg_T *eap, int deep, void *cookie); static int do_lock_var(lval_T *lp, char_u *name_end, exarg_T *eap, int deep, void *cookie); static void list_one_var(dictitem_T *v, char *prefix, int *first); static void list_one_var_a(char *prefix, char_u *name, int type, char_u *string, int *first); /* * Initialize global and vim special variables */ void evalvars_init(void) { int i; struct vimvar *p; init_var_dict(&globvardict, &globvars_var, VAR_DEF_SCOPE); init_var_dict(&vimvardict, &vimvars_var, VAR_SCOPE); vimvardict.dv_lock = VAR_FIXED; hash_init(&compat_hashtab); for (i = 0; i < VV_LEN; ++i) { p = &vimvars[i]; if (STRLEN(p->vv_name) > DICTITEM16_KEY_LEN) { iemsg("INTERNAL: name too long, increase size of dictitem16_T"); getout(1); } STRCPY(p->vv_di.di_key, p->vv_name); if (p->vv_flags & VV_RO) p->vv_di.di_flags = DI_FLAGS_RO | DI_FLAGS_FIX; else if (p->vv_flags & VV_RO_SBX) p->vv_di.di_flags = DI_FLAGS_RO_SBX | DI_FLAGS_FIX; else p->vv_di.di_flags = DI_FLAGS_FIX; // add to v: scope dict, unless the value is not always available if (p->vv_tv_type != VAR_UNKNOWN) hash_add(&vimvarht, p->vv_di.di_key); if (p->vv_flags & VV_COMPAT) // add to compat scope dict hash_add(&compat_hashtab, p->vv_di.di_key); } set_vim_var_nr(VV_VERSION, VIM_VERSION_100); set_vim_var_nr(VV_VERSIONLONG, VIM_VERSION_100 * 10000 + highest_patch()); set_vim_var_nr(VV_SEARCHFORWARD, 1L); set_vim_var_nr(VV_HLSEARCH, 1L); set_vim_var_nr(VV_EXITING, VVAL_NULL); set_vim_var_dict(VV_COMPLETED_ITEM, dict_alloc_lock(VAR_FIXED)); set_vim_var_list(VV_ERRORS, list_alloc()); set_vim_var_dict(VV_EVENT, dict_alloc_lock(VAR_FIXED)); set_vim_var_nr(VV_FALSE, VVAL_FALSE); set_vim_var_nr(VV_TRUE, VVAL_TRUE); set_vim_var_nr(VV_NONE, VVAL_NONE); set_vim_var_nr(VV_NULL, VVAL_NULL); set_vim_var_nr(VV_NUMBERMAX, VARNUM_MAX); set_vim_var_nr(VV_NUMBERMIN, VARNUM_MIN); set_vim_var_nr(VV_NUMBERSIZE, sizeof(varnumber_T) * 8); set_vim_var_nr(VV_SIZEOFINT, sizeof(int)); set_vim_var_nr(VV_SIZEOFLONG, sizeof(long)); set_vim_var_nr(VV_SIZEOFPOINTER, sizeof(char *)); set_vim_var_nr(VV_MAXCOL, MAXCOL); set_vim_var_nr(VV_TYPE_NUMBER, VAR_TYPE_NUMBER); set_vim_var_nr(VV_TYPE_STRING, VAR_TYPE_STRING); set_vim_var_nr(VV_TYPE_FUNC, VAR_TYPE_FUNC); set_vim_var_nr(VV_TYPE_LIST, VAR_TYPE_LIST); set_vim_var_nr(VV_TYPE_DICT, VAR_TYPE_DICT); set_vim_var_nr(VV_TYPE_FLOAT, VAR_TYPE_FLOAT); set_vim_var_nr(VV_TYPE_BOOL, VAR_TYPE_BOOL); set_vim_var_nr(VV_TYPE_NONE, VAR_TYPE_NONE); set_vim_var_nr(VV_TYPE_JOB, VAR_TYPE_JOB); set_vim_var_nr(VV_TYPE_CHANNEL, VAR_TYPE_CHANNEL); set_vim_var_nr(VV_TYPE_BLOB, VAR_TYPE_BLOB); set_vim_var_nr(VV_ECHOSPACE, sc_col - 1); set_vim_var_dict(VV_COLORNAMES, dict_alloc()); // Default for v:register is not 0 but '"'. This is adjusted once the // clipboard has been setup by calling reset_reg_var(). set_reg_var(0); } #if defined(EXITFREE) || defined(PROTO) /* * Free all vim variables information on exit */ void evalvars_clear(void) { int i; struct vimvar *p; for (i = 0; i < VV_LEN; ++i) { p = &vimvars[i]; if (p->vv_di.di_tv.v_type == VAR_STRING) VIM_CLEAR(p->vv_str); else if (p->vv_di.di_tv.v_type == VAR_LIST) { list_unref(p->vv_list); p->vv_list = NULL; } } hash_clear(&vimvarht); hash_init(&vimvarht); // garbage_collect() will access it hash_clear(&compat_hashtab); // global variables vars_clear(&globvarht); // Script-local variables. Clear all the variables here. // The scriptvar_T is cleared later in free_scriptnames(), because a // variable in one script might hold a reference to the whole scope of // another script. for (i = 1; i <= script_items.ga_len; ++i) vars_clear(&SCRIPT_VARS(i)); } #endif int garbage_collect_globvars(int copyID) { return set_ref_in_ht(&globvarht, copyID, NULL); } int garbage_collect_vimvars(int copyID) { return set_ref_in_ht(&vimvarht, copyID, NULL); } int garbage_collect_scriptvars(int copyID) { int i; int idx; int abort = FALSE; scriptitem_T *si; for (i = 1; i <= script_items.ga_len; ++i) { abort = abort || set_ref_in_ht(&SCRIPT_VARS(i), copyID, NULL); si = SCRIPT_ITEM(i); for (idx = 0; idx < si->sn_var_vals.ga_len; ++idx) { svar_T *sv = ((svar_T *)si->sn_var_vals.ga_data) + idx; if (sv->sv_name != NULL) abort = abort || set_ref_in_item(sv->sv_tv, copyID, NULL, NULL); } } return abort; } /* * Set an internal variable to a string value. Creates the variable if it does * not already exist. */ void set_internal_string_var(char_u *name, char_u *value) { char_u *val; typval_T *tvp; val = vim_strsave(value); if (val != NULL) { tvp = alloc_string_tv(val); if (tvp != NULL) { set_var(name, tvp, FALSE); free_tv(tvp); } } } int eval_charconvert( char_u *enc_from, char_u *enc_to, char_u *fname_from, char_u *fname_to) { int err = FALSE; sctx_T saved_sctx = current_sctx; sctx_T *ctx; set_vim_var_string(VV_CC_FROM, enc_from, -1); set_vim_var_string(VV_CC_TO, enc_to, -1); set_vim_var_string(VV_FNAME_IN, fname_from, -1); set_vim_var_string(VV_FNAME_OUT, fname_to, -1); ctx = get_option_sctx("charconvert"); if (ctx != NULL) current_sctx = *ctx; if (eval_to_bool(p_ccv, &err, NULL, FALSE)) err = TRUE; set_vim_var_string(VV_CC_FROM, NULL, -1); set_vim_var_string(VV_CC_TO, NULL, -1); set_vim_var_string(VV_FNAME_IN, NULL, -1); set_vim_var_string(VV_FNAME_OUT, NULL, -1); current_sctx = saved_sctx; if (err) return FAIL; return OK; } # if defined(FEAT_POSTSCRIPT) || defined(PROTO) int eval_printexpr(char_u *fname, char_u *args) { int err = FALSE; sctx_T saved_sctx = current_sctx; sctx_T *ctx; set_vim_var_string(VV_FNAME_IN, fname, -1); set_vim_var_string(VV_CMDARG, args, -1); ctx = get_option_sctx("printexpr"); if (ctx != NULL) current_sctx = *ctx; if (eval_to_bool(p_pexpr, &err, NULL, FALSE)) err = TRUE; set_vim_var_string(VV_FNAME_IN, NULL, -1); set_vim_var_string(VV_CMDARG, NULL, -1); current_sctx = saved_sctx; if (err) { mch_remove(fname); return FAIL; } return OK; } # endif # if defined(FEAT_DIFF) || defined(PROTO) void eval_diff( char_u *origfile, char_u *newfile, char_u *outfile) { sctx_T saved_sctx = current_sctx; sctx_T *ctx; typval_T *tv; set_vim_var_string(VV_FNAME_IN, origfile, -1); set_vim_var_string(VV_FNAME_NEW, newfile, -1); set_vim_var_string(VV_FNAME_OUT, outfile, -1); ctx = get_option_sctx("diffexpr"); if (ctx != NULL) current_sctx = *ctx; // errors are ignored tv = eval_expr(p_dex, NULL); free_tv(tv); set_vim_var_string(VV_FNAME_IN, NULL, -1); set_vim_var_string(VV_FNAME_NEW, NULL, -1); set_vim_var_string(VV_FNAME_OUT, NULL, -1); current_sctx = saved_sctx; } void eval_patch( char_u *origfile, char_u *difffile, char_u *outfile) { sctx_T saved_sctx = current_sctx; sctx_T *ctx; typval_T *tv; set_vim_var_string(VV_FNAME_IN, origfile, -1); set_vim_var_string(VV_FNAME_DIFF, difffile, -1); set_vim_var_string(VV_FNAME_OUT, outfile, -1); ctx = get_option_sctx("patchexpr"); if (ctx != NULL) current_sctx = *ctx; // errors are ignored tv = eval_expr(p_pex, NULL); free_tv(tv); set_vim_var_string(VV_FNAME_IN, NULL, -1); set_vim_var_string(VV_FNAME_DIFF, NULL, -1); set_vim_var_string(VV_FNAME_OUT, NULL, -1); current_sctx = saved_sctx; } # endif #if defined(FEAT_SPELL) || defined(PROTO) /* * Evaluate an expression to a list with suggestions. * For the "expr:" part of 'spellsuggest'. * Returns NULL when there is an error. */ list_T * eval_spell_expr(char_u *badword, char_u *expr) { typval_T save_val; typval_T rettv; list_T *list = NULL; char_u *p = skipwhite(expr); sctx_T saved_sctx = current_sctx; sctx_T *ctx; // Set "v:val" to the bad word. prepare_vimvar(VV_VAL, &save_val); set_vim_var_string(VV_VAL, badword, -1); if (p_verbose == 0) ++emsg_off; ctx = get_option_sctx("spellsuggest"); if (ctx != NULL) current_sctx = *ctx; if (eval1(&p, &rettv, &EVALARG_EVALUATE) == OK) { if (rettv.v_type != VAR_LIST) clear_tv(&rettv); else list = rettv.vval.v_list; } if (p_verbose == 0) --emsg_off; clear_tv(get_vim_var_tv(VV_VAL)); restore_vimvar(VV_VAL, &save_val); current_sctx = saved_sctx; return list; } /* * "list" is supposed to contain two items: a word and a number. Return the * word in "pp" and the number as the return value. * Return -1 if anything isn't right. * Used to get the good word and score from the eval_spell_expr() result. */ int get_spellword(list_T *list, char_u **pp) { listitem_T *li; li = list->lv_first; if (li == NULL) return -1; *pp = tv_get_string(&li->li_tv); li = li->li_next; if (li == NULL) return -1; return (int)tv_get_number(&li->li_tv); } #endif /* * Prepare v: variable "idx" to be used. * Save the current typeval in "save_tv" and clear it. * When not used yet add the variable to the v: hashtable. */ void prepare_vimvar(int idx, typval_T *save_tv) { *save_tv = vimvars[idx].vv_tv; vimvars[idx].vv_str = NULL; // don't free it now if (vimvars[idx].vv_tv_type == VAR_UNKNOWN) hash_add(&vimvarht, vimvars[idx].vv_di.di_key); } /* * Restore v: variable "idx" to typeval "save_tv". * Note that the v: variable must have been cleared already. * When no longer defined, remove the variable from the v: hashtable. */ void restore_vimvar(int idx, typval_T *save_tv) { hashitem_T *hi; vimvars[idx].vv_tv = *save_tv; if (vimvars[idx].vv_tv_type == VAR_UNKNOWN) { hi = hash_find(&vimvarht, vimvars[idx].vv_di.di_key); if (HASHITEM_EMPTY(hi)) internal_error("restore_vimvar()"); else hash_remove(&vimvarht, hi); } } /* * List Vim variables. */ static void list_vim_vars(int *first) { list_hashtable_vars(&vimvarht, "v:", FALSE, first); } /* * List script-local variables, if there is a script. */ static void list_script_vars(int *first) { if (SCRIPT_ID_VALID(current_sctx.sc_sid)) list_hashtable_vars(&SCRIPT_VARS(current_sctx.sc_sid), "s:", FALSE, first); } /* * Get a list of lines from a HERE document. The here document is a list of * lines surrounded by a marker. * cmd << {marker} * {line1} * {line2} * .... * {marker} * * The {marker} is a string. If the optional 'trim' word is supplied before the * marker, then the leading indentation before the lines (matching the * indentation in the 'cmd' line) is stripped. * * When getting lines for an embedded script (e.g. python, lua, perl, ruby, * tcl, mzscheme), script_get is set to TRUE. In this case, if the marker is * missing, then '.' is accepted as a marker. * * Returns a List with {lines} or NULL. */ list_T * heredoc_get(exarg_T *eap, char_u *cmd, int script_get) { char_u *theline; char_u *marker; list_T *l; char_u *p; int marker_indent_len = 0; int text_indent_len = 0; char_u *text_indent = NULL; char_u dot[] = "."; int comment_char = in_vim9script() ? '#' : '"'; if (eap->getline == NULL) { emsg(_(e_cannot_use_heredoc_here)); return NULL; } // Check for the optional 'trim' word before the marker cmd = skipwhite(cmd); if (STRNCMP(cmd, "trim", 4) == 0 && (cmd[4] == NUL || VIM_ISWHITE(cmd[4]))) { cmd = skipwhite(cmd + 4); // Trim the indentation from all the lines in the here document. // The amount of indentation trimmed is the same as the indentation of // the first line after the :let command line. To find the end marker // the indent of the :let command line is trimmed. p = *eap->cmdlinep; while (VIM_ISWHITE(*p)) { p++; marker_indent_len++; } text_indent_len = -1; } // The marker is the next word. if (*cmd != NUL && *cmd != comment_char) { marker = skipwhite(cmd); p = skiptowhite(marker); if (*skipwhite(p) != NUL && *skipwhite(p) != comment_char) { semsg(_(e_trailing_characters_str), p); return NULL; } *p = NUL; if (!script_get && vim_islower(*marker)) { emsg(_(e_marker_cannot_start_with_lower_case_letter)); return NULL; } } else { // When getting lines for an embedded script, if the marker is missing, // accept '.' as the marker. if (script_get) marker = dot; else { emsg(_(e_missing_marker)); return NULL; } } l = list_alloc(); if (l == NULL) return NULL; for (;;) { int mi = 0; int ti = 0; theline = eap->getline(NUL, eap->cookie, 0, FALSE); if (theline == NULL) { semsg(_(e_missing_end_marker_str), marker); break; } // with "trim": skip the indent matching the :let line to find the // marker if (marker_indent_len > 0 && STRNCMP(theline, *eap->cmdlinep, marker_indent_len) == 0) mi = marker_indent_len; if (STRCMP(marker, theline + mi) == 0) { vim_free(theline); break; } if (text_indent_len == -1 && *theline != NUL) { // set the text indent from the first line. p = theline; text_indent_len = 0; while (VIM_ISWHITE(*p)) { p++; text_indent_len++; } text_indent = vim_strnsave(theline, text_indent_len); } // with "trim": skip the indent matching the first line if (text_indent != NULL) for (ti = 0; ti < text_indent_len; ++ti) if (theline[ti] != text_indent[ti]) break; if (list_append_string(l, theline + ti, -1) == FAIL) break; vim_free(theline); } vim_free(text_indent); return l; } /* * Vim9 variable declaration: * ":var name" * ":var name: type" * ":var name = expr" * ":var name: type = expr" * etc. */ void ex_var(exarg_T *eap) { if (!in_vim9script()) { semsg(_(e_str_cannot_be_used_in_legacy_vim_script), ":var"); return; } ex_let(eap); } /* * ":let" list all variable values * ":let var1 var2" list variable values * ":let var = expr" assignment command. * ":let var += expr" assignment command. * ":let var -= expr" assignment command. * ":let var *= expr" assignment command. * ":let var /= expr" assignment command. * ":let var %= expr" assignment command. * ":let var .= expr" assignment command. * ":let var ..= expr" assignment command. * ":let [var1, var2] = expr" unpack list. * ":let var =<< ..." heredoc * ":let var: string" Vim9 declaration * * ":final var = expr" assignment command. * ":final [var1, var2] = expr" unpack list. * * ":const" list all variable values * ":const var1 var2" list variable values * ":const var = expr" assignment command. * ":const [var1, var2] = expr" unpack list. */ void ex_let(exarg_T *eap) { char_u *arg = eap->arg; char_u *expr = NULL; typval_T rettv; int i; int var_count = 0; int semicolon = 0; char_u op[4]; char_u *argend; int first = TRUE; int concat; int has_assign; int flags = 0; int vim9script = in_vim9script(); if (eap->cmdidx == CMD_final && !vim9script) { // In legacy Vim script ":final" is short for ":finally". ex_finally(eap); return; } if (eap->cmdidx == CMD_let && vim9script) { emsg(_(e_cannot_use_let_in_vim9_script)); return; } if (eap->cmdidx == CMD_const) flags |= ASSIGN_CONST; else if (eap->cmdidx == CMD_final) flags |= ASSIGN_FINAL; // Vim9 assignment without ":let", ":const" or ":final" if (eap->arg == eap->cmd) flags |= ASSIGN_NO_DECL; argend = skip_var_list(arg, TRUE, &var_count, &semicolon, FALSE); if (argend == NULL) return; if (argend > arg && argend[-1] == '.') // for var.='str' --argend; expr = skipwhite(argend); concat = expr[0] == '.' && ((expr[1] == '=' && in_old_script(2)) || (expr[1] == '.' && expr[2] == '=')); has_assign = *expr == '=' || (vim_strchr((char_u *)"+-*/%", *expr) != NULL && expr[1] == '='); if (!has_assign && !concat) { // ":let" without "=": list variables if (*arg == '[') emsg(_(e_invalid_argument)); else if (expr[0] == '.' && expr[1] == '=') emsg(_(e_dot_equal_not_supported_with_script_version_two)); else if (!ends_excmd2(eap->cmd, arg)) { if (vim9script) { if (!ends_excmd2(eap->cmd, skipwhite(argend))) semsg(_(e_trailing_characters_str), argend); else // Vim9 declaration ":var name: type" arg = vim9_declare_scriptvar(eap, arg); } else { // ":let var1 var2" - list values arg = list_arg_vars(eap, arg, &first); } } else if (!eap->skip) { // ":let" list_glob_vars(&first); list_buf_vars(&first); list_win_vars(&first); list_tab_vars(&first); list_script_vars(&first); list_func_vars(&first); list_vim_vars(&first); } set_nextcmd(eap, arg); } else if (expr[0] == '=' && expr[1] == '<' && expr[2] == '<') { list_T *l; long cur_lnum = SOURCING_LNUM; // HERE document l = heredoc_get(eap, expr + 3, FALSE); if (l != NULL) { rettv_list_set(&rettv, l); if (!eap->skip) { // errors are for the assignment, not the end marker SOURCING_LNUM = cur_lnum; op[0] = '='; op[1] = NUL; (void)ex_let_vars(eap->arg, &rettv, FALSE, semicolon, var_count, flags, op); } clear_tv(&rettv); } } else { evalarg_T evalarg; int len = 1; CLEAR_FIELD(rettv); i = FAIL; if (has_assign || concat) { int cur_lnum; op[0] = '='; op[1] = NUL; if (*expr != '=') { if (vim9script && (flags & ASSIGN_NO_DECL) == 0) { // +=, /=, etc. require an existing variable semsg(_(e_cannot_use_operator_on_new_variable), eap->arg); i = FAIL; } else if (vim_strchr((char_u *)"+-*/%.", *expr) != NULL) { op[0] = *expr; // +=, -=, *=, /=, %= or .= ++len; if (expr[0] == '.' && expr[1] == '.') // ..= { ++expr; ++len; } } expr += 2; } else ++expr; if (vim9script && !eap->skip && (!VIM_ISWHITE(*argend) || !IS_WHITE_OR_NUL(*expr))) { vim_strncpy(op, expr - len, len); semsg(_(e_white_space_required_before_and_after_str_at_str), op, argend); i = FAIL; } if (eap->skip) ++emsg_skip; fill_evalarg_from_eap(&evalarg, eap, eap->skip); expr = skipwhite_and_linebreak(expr, &evalarg); cur_lnum = SOURCING_LNUM; i = eval0(expr, &rettv, eap, &evalarg); if (eap->skip) --emsg_skip; clear_evalarg(&evalarg, eap); // Restore the line number so that any type error is given for the // declaration, not the expression. SOURCING_LNUM = cur_lnum; } if (eap->skip) { if (i != FAIL) clear_tv(&rettv); } else if (i != FAIL) { (void)ex_let_vars(eap->arg, &rettv, FALSE, semicolon, var_count, flags, op); clear_tv(&rettv); } } } /* * Assign the typeval "tv" to the variable or variables at "arg_start". * Handles both "var" with any type and "[var, var; var]" with a list type. * When "op" is not NULL it points to a string with characters that * must appear after the variable(s). Use "+", "-" or "." for add, subtract * or concatenate. * Returns OK or FAIL; */ int ex_let_vars( char_u *arg_start, typval_T *tv, int copy, // copy values from "tv", don't move int semicolon, // from skip_var_list() int var_count, // from skip_var_list() int flags, // ASSIGN_FINAL, ASSIGN_CONST, etc. char_u *op) { char_u *arg = arg_start; list_T *l; int i; int var_idx = 0; listitem_T *item; typval_T ltv; if (*arg != '[') { // ":let var = expr" or ":for var in list" if (ex_let_one(arg, tv, copy, flags, op, op, var_idx) == NULL) return FAIL; return OK; } // ":let [v1, v2] = list" or ":for [v1, v2] in listlist" if (tv->v_type != VAR_LIST || (l = tv->vval.v_list) == NULL) { emsg(_(e_list_required)); return FAIL; } i = list_len(l); if (semicolon == 0 && var_count < i) { emsg(_(e_less_targets_than_list_items)); return FAIL; } if (var_count - semicolon > i) { emsg(_(e_more_targets_than_list_items)); return FAIL; } CHECK_LIST_MATERIALIZE(l); item = l->lv_first; while (*arg != ']') { arg = skipwhite(arg + 1); ++var_idx; arg = ex_let_one(arg, &item->li_tv, TRUE, flags | ASSIGN_UNPACK, (char_u *)",;]", op, var_idx); item = item->li_next; if (arg == NULL) return FAIL; arg = skipwhite(arg); if (*arg == ';') { // Put the rest of the list (may be empty) in the var after ';'. // Create a new list for this. l = list_alloc(); if (l == NULL) return FAIL; while (item != NULL) { list_append_tv(l, &item->li_tv); item = item->li_next; } ltv.v_type = VAR_LIST; ltv.v_lock = 0; ltv.vval.v_list = l; l->lv_refcount = 1; ++var_idx; arg = ex_let_one(skipwhite(arg + 1), &ltv, FALSE, flags | ASSIGN_UNPACK, (char_u *)"]", op, var_idx); clear_tv(&ltv); if (arg == NULL) return FAIL; break; } else if (*arg != ',' && *arg != ']') { internal_error("ex_let_vars()"); return FAIL; } } return OK; } /* * Skip over assignable variable "var" or list of variables "[var, var]". * Used for ":let varvar = expr" and ":for varvar in expr". * For "[var, var]" increment "*var_count" for each variable. * for "[var, var; var]" set "semicolon" to 1. * If "silent" is TRUE do not give an "invalid argument" error message. * Return NULL for an error. */ char_u * skip_var_list( char_u *arg, int include_type, int *var_count, int *semicolon, int silent) { char_u *p, *s; if (*arg == '[') { // "[var, var]": find the matching ']'. p = arg; for (;;) { p = skipwhite(p + 1); // skip whites after '[', ';' or ',' s = skip_var_one(p, include_type); if (s == p) { if (!silent) semsg(_(e_invalid_argument_str), p); return NULL; } ++*var_count; p = skipwhite(s); if (*p == ']') break; else if (*p == ';') { if (*semicolon == 1) { if (!silent) emsg(_(e_double_semicolon_in_list_of_variables)); return NULL; } *semicolon = 1; } else if (*p != ',') { if (!silent) semsg(_(e_invalid_argument_str), p); return NULL; } } return p + 1; } else return skip_var_one(arg, include_type); } /* * Skip one (assignable) variable name, including @r, $VAR, &option, d.key, * l[idx]. * In Vim9 script also skip over ": type" if "include_type" is TRUE. */ char_u * skip_var_one(char_u *arg, int include_type) { char_u *end; int vim9 = in_vim9script(); if (*arg == '@' && arg[1] != NUL) return arg + 2; end = find_name_end(*arg == '$' || *arg == '&' ? arg + 1 : arg, NULL, NULL, FNE_INCL_BR | FNE_CHECK_START); // "a: type" is declaring variable "a" with a type, not "a:". // Same for "s: type". if (vim9 && end == arg + 2 && end[-1] == ':') --end; if (include_type && vim9) { if (*end == ':') end = skip_type(skipwhite(end + 1), FALSE); } return end; } /* * List variables for hashtab "ht" with prefix "prefix". * If "empty" is TRUE also list NULL strings as empty strings. */ void list_hashtable_vars( hashtab_T *ht, char *prefix, int empty, int *first) { hashitem_T *hi; dictitem_T *di; int todo; char_u buf[IOSIZE]; todo = (int)ht->ht_used; for (hi = ht->ht_array; todo > 0 && !got_int; ++hi) { if (!HASHITEM_EMPTY(hi)) { --todo; di = HI2DI(hi); // apply :filter /pat/ to variable name vim_strncpy((char_u *)buf, (char_u *)prefix, IOSIZE - 1); vim_strcat((char_u *)buf, di->di_key, IOSIZE); if (message_filtered(buf)) continue; if (empty || di->di_tv.v_type != VAR_STRING || di->di_tv.vval.v_string != NULL) list_one_var(di, prefix, first); } } } /* * List global variables. */ static void list_glob_vars(int *first) { list_hashtable_vars(&globvarht, "", TRUE, first); } /* * List buffer variables. */ static void list_buf_vars(int *first) { list_hashtable_vars(&curbuf->b_vars->dv_hashtab, "b:", TRUE, first); } /* * List window variables. */ static void list_win_vars(int *first) { list_hashtable_vars(&curwin->w_vars->dv_hashtab, "w:", TRUE, first); } /* * List tab page variables. */ static void list_tab_vars(int *first) { list_hashtable_vars(&curtab->tp_vars->dv_hashtab, "t:", TRUE, first); } /* * List variables in "arg". */ static char_u * list_arg_vars(exarg_T *eap, char_u *arg, int *first) { int error = FALSE; int len; char_u *name; char_u *name_start; char_u *arg_subsc; char_u *tofree; typval_T tv; while (!ends_excmd2(eap->cmd, arg) && !got_int) { if (error || eap->skip) { arg = find_name_end(arg, NULL, NULL, FNE_INCL_BR | FNE_CHECK_START); if (!VIM_ISWHITE(*arg) && !ends_excmd(*arg)) { emsg_severe = TRUE; if (!did_emsg) semsg(_(e_trailing_characters_str), arg); break; } } else { // get_name_len() takes care of expanding curly braces name_start = name = arg; len = get_name_len(&arg, &tofree, TRUE, TRUE); if (len <= 0) { // This is mainly to keep test 49 working: when expanding // curly braces fails overrule the exception error message. if (len < 0 && !aborting()) { emsg_severe = TRUE; semsg(_(e_invalid_argument_str), arg); break; } error = TRUE; } else { arg = skipwhite(arg); if (tofree != NULL) name = tofree; if (eval_variable(name, len, 0, &tv, NULL, EVAL_VAR_VERBOSE) == FAIL) error = TRUE; else { // handle d.key, l[idx], f(expr) arg_subsc = arg; if (handle_subscript(&arg, name_start, &tv, &EVALARG_EVALUATE, TRUE) == FAIL) error = TRUE; else { if (arg == arg_subsc && len == 2 && name[1] == ':') { switch (*name) { case 'g': list_glob_vars(first); break; case 'b': list_buf_vars(first); break; case 'w': list_win_vars(first); break; case 't': list_tab_vars(first); break; case 'v': list_vim_vars(first); break; case 's': list_script_vars(first); break; case 'l': list_func_vars(first); break; default: semsg(_(e_cant_list_variables_for_str), name); } } else { char_u numbuf[NUMBUFLEN]; char_u *tf; int c; char_u *s; s = echo_string(&tv, &tf, numbuf, 0); c = *arg; *arg = NUL; list_one_var_a("", arg == arg_subsc ? name : name_start, tv.v_type, s == NULL ? (char_u *)"" : s, first); *arg = c; vim_free(tf); } clear_tv(&tv); } } } vim_free(tofree); } arg = skipwhite(arg); } return arg; } /* * Set an environment variable, part of ex_let_one(). */ static char_u * ex_let_env( char_u *arg, typval_T *tv, int flags, char_u *endchars, char_u *op) { char_u *arg_end = NULL; char_u *name; int len; if ((flags & (ASSIGN_CONST | ASSIGN_FINAL)) && (flags & ASSIGN_FOR_LOOP) == 0) { emsg(_(e_cannot_lock_environment_variable)); return NULL; } // Find the end of the name. ++arg; name = arg; len = get_env_len(&arg); if (len == 0) semsg(_(e_invalid_argument_str), name - 1); else { if (op != NULL && vim_strchr((char_u *)"+-*/%", *op) != NULL) semsg(_(e_wrong_variable_type_for_str_equal), op); else if (endchars != NULL && vim_strchr(endchars, *skipwhite(arg)) == NULL) emsg(_(e_unexpected_characters_in_let)); else if (!check_secure()) { char_u *tofree = NULL; int c1 = name[len]; char_u *p; name[len] = NUL; p = tv_get_string_chk(tv); if (p != NULL && op != NULL && *op == '.') { int mustfree = FALSE; char_u *s = vim_getenv(name, &mustfree); if (s != NULL) { p = tofree = concat_str(s, p); if (mustfree) vim_free(s); } } if (p != NULL) { vim_setenv_ext(name, p); arg_end = arg; } name[len] = c1; vim_free(tofree); } } return arg_end; } /* * Set an option, part of ex_let_one(). */ static char_u * ex_let_option( char_u *arg, typval_T *tv, int flags, char_u *endchars, char_u *op) { char_u *p; int scope; char_u *arg_end = NULL; if ((flags & (ASSIGN_CONST | ASSIGN_FINAL)) && (flags & ASSIGN_FOR_LOOP) == 0) { emsg(_(e_cannot_lock_option)); return NULL; } // Find the end of the name. p = find_option_end(&arg, &scope); if (p == NULL || (endchars != NULL && vim_strchr(endchars, *skipwhite(p)) == NULL)) emsg(_(e_unexpected_characters_in_let)); else { int c1; long n = 0; getoption_T opt_type; long numval; char_u *stringval = NULL; char_u *s = NULL; int failed = FALSE; int opt_p_flags; char_u *tofree = NULL; char_u numbuf[NUMBUFLEN]; c1 = *p; *p = NUL; opt_type = get_option_value(arg, &numval, &stringval, &opt_p_flags, scope); if ((opt_type == gov_bool || opt_type == gov_number || opt_type == gov_hidden_bool || opt_type == gov_hidden_number) && (tv->v_type != VAR_STRING || !in_vim9script())) { if (opt_type == gov_bool || opt_type == gov_hidden_bool) // bool, possibly hidden n = (long)tv_get_bool(tv); else // number, possibly hidden n = (long)tv_get_number(tv); } if ((opt_p_flags & P_FUNC) && (tv->v_type == VAR_PARTIAL || tv->v_type == VAR_FUNC)) { // If the option can be set to a function reference or a lambda // and the passed value is a function reference, then convert it to // the name (string) of the function reference. s = tv2string(tv, &tofree, numbuf, 0); } // Avoid setting a string option to the text "v:false" or similar. // In Vim9 script also don't convert a number to string. else if (tv->v_type != VAR_BOOL && tv->v_type != VAR_SPECIAL && (!in_vim9script() || tv->v_type != VAR_NUMBER)) s = tv_get_string_chk(tv); if (op != NULL && *op != '=') { if (((opt_type == gov_bool || opt_type == gov_number) && *op == '.') || (opt_type == gov_string && *op != '.')) { semsg(_(e_wrong_variable_type_for_str_equal), op); failed = TRUE; // don't set the value } else { // number, in legacy script also bool if (opt_type == gov_number || (opt_type == gov_bool && !in_vim9script())) { switch (*op) { case '+': n = numval + n; break; case '-': n = numval - n; break; case '*': n = numval * n; break; case '/': n = (long)num_divide(numval, n, &failed); break; case '%': n = (long)num_modulus(numval, n, &failed); break; } s = NULL; } else if (opt_type == gov_string && stringval != NULL && s != NULL) { // string s = concat_str(stringval, s); vim_free(stringval); stringval = s; } } } if (!failed) { if (opt_type != gov_string || s != NULL) { set_option_value(arg, n, s, scope); arg_end = p; } else emsg(_(e_string_required)); } *p = c1; vim_free(stringval); vim_free(tofree); } return arg_end; } /* * Set a register, part of ex_let_one(). */ static char_u * ex_let_register( char_u *arg, typval_T *tv, int flags, char_u *endchars, char_u *op) { char_u *arg_end = NULL; if ((flags & (ASSIGN_CONST | ASSIGN_FINAL)) && (flags & ASSIGN_FOR_LOOP) == 0) { emsg(_(e_cannot_lock_register)); return NULL; } ++arg; if (op != NULL && vim_strchr((char_u *)"+-*/%", *op) != NULL) semsg(_(e_wrong_variable_type_for_str_equal), op); else if (endchars != NULL && vim_strchr(endchars, *skipwhite(arg + 1)) == NULL) emsg(_(e_unexpected_characters_in_let)); else { char_u *ptofree = NULL; char_u *p; p = tv_get_string_chk(tv); if (p != NULL && op != NULL && *op == '.') { char_u *s = get_reg_contents(*arg == '@' ? '"' : *arg, GREG_EXPR_SRC); if (s != NULL) { p = ptofree = concat_str(s, p); vim_free(s); } } if (p != NULL) { write_reg_contents(*arg == '@' ? '"' : *arg, p, -1, FALSE); arg_end = arg + 1; } vim_free(ptofree); } return arg_end; } /* * Set one item of ":let var = expr" or ":let [v1, v2] = list" to its value. * Returns a pointer to the char just after the var name. * Returns NULL if there is an error. */ static char_u * ex_let_one( char_u *arg, // points to variable name typval_T *tv, // value to assign to variable int copy, // copy value from "tv" int flags, // ASSIGN_CONST, ASSIGN_FINAL, etc. char_u *endchars, // valid chars after variable name or NULL char_u *op, // "+", "-", "." or NULL int var_idx) // variable index for "let [a, b] = list" { char_u *arg_end = NULL; if (in_vim9script() && (flags & (ASSIGN_NO_DECL | ASSIGN_DECL)) == 0 && (flags & (ASSIGN_CONST | ASSIGN_FINAL)) == 0 && vim_strchr((char_u *)"$@&", *arg) != NULL) { vim9_declare_error(arg); return NULL; } if (*arg == '$') { // ":let $VAR = expr": Set environment variable. return ex_let_env(arg, tv, flags, endchars, op); } else if (*arg == '&') { // ":let &option = expr": Set option value. // ":let &l:option = expr": Set local option value. // ":let &g:option = expr": Set global option value. // ":for &ts in range(8)": Set option value for for loop return ex_let_option(arg, tv, flags, endchars, op); } else if (*arg == '@') { // ":let @r = expr": Set register contents. return ex_let_register(arg, tv, flags, endchars, op); } else if (eval_isnamec1(*arg) || *arg == '{') { lval_T lv; char_u *p; // ":let var = expr": Set internal variable. // ":let var: type = expr": Set internal variable with type. // ":let {expr} = expr": Idem, name made with curly braces p = get_lval(arg, tv, &lv, FALSE, FALSE, (flags & (ASSIGN_NO_DECL | ASSIGN_DECL)) ? GLV_NO_DECL : 0, FNE_CHECK_START); if (p != NULL && lv.ll_name != NULL) { if (endchars != NULL && vim_strchr(endchars, *skipwhite(lv.ll_name_end)) == NULL) { emsg(_(e_unexpected_characters_in_let)); } else { set_var_lval(&lv, p, tv, copy, flags, op, var_idx); arg_end = lv.ll_name_end; } } clear_lval(&lv); } else semsg(_(e_invalid_argument_str), arg); return arg_end; } /* * ":unlet[!] var1 ... " command. */ void ex_unlet(exarg_T *eap) { ex_unletlock(eap, eap->arg, 0, 0, do_unlet_var, NULL); } /* * ":lockvar" and ":unlockvar" commands */ void ex_lockvar(exarg_T *eap) { char_u *arg = eap->arg; int deep = 2; if (eap->forceit) deep = -1; else if (vim_isdigit(*arg)) { deep = getdigits(&arg); arg = skipwhite(arg); } ex_unletlock(eap, arg, deep, 0, do_lock_var, NULL); } /* * ":unlet", ":lockvar" and ":unlockvar" are quite similar. * Also used for Vim9 script. "callback" is invoked as: * callback(&lv, name_end, eap, deep, cookie) */ void ex_unletlock( exarg_T *eap, char_u *argstart, int deep, int glv_flags, int (*callback)(lval_T *, char_u *, exarg_T *, int, void *), void *cookie) { char_u *arg = argstart; char_u *name_end; int error = FALSE; lval_T lv; do { if (*arg == '$') { lv.ll_name = arg; lv.ll_tv = NULL; ++arg; if (get_env_len(&arg) == 0) { semsg(_(e_invalid_argument_str), arg - 1); return; } if (!error && !eap->skip && callback(&lv, arg, eap, deep, cookie) == FAIL) error = TRUE; name_end = arg; } else { // Parse the name and find the end. name_end = get_lval(arg, NULL, &lv, TRUE, eap->skip || error, glv_flags | GLV_NO_DECL, FNE_CHECK_START); if (lv.ll_name == NULL) error = TRUE; // error but continue parsing if (name_end == NULL || (!VIM_ISWHITE(*name_end) && !ends_excmd(*name_end))) { if (name_end != NULL) { emsg_severe = TRUE; semsg(_(e_trailing_characters_str), name_end); } if (!(eap->skip || error)) clear_lval(&lv); break; } if (!error && !eap->skip && callback(&lv, name_end, eap, deep, cookie) == FAIL) error = TRUE; if (!eap->skip) clear_lval(&lv); } arg = skipwhite(name_end); } while (!ends_excmd2(name_end, arg)); set_nextcmd(eap, arg); } static int do_unlet_var( lval_T *lp, char_u *name_end, exarg_T *eap, int deep UNUSED, void *cookie UNUSED) { int forceit = eap->forceit; int ret = OK; int cc; if (lp->ll_tv == NULL) { cc = *name_end; *name_end = NUL; // Environment variable, normal name or expanded name. if (*lp->ll_name == '$') vim_unsetenv(lp->ll_name + 1); else if (do_unlet(lp->ll_name, forceit) == FAIL) ret = FAIL; *name_end = cc; } else if ((lp->ll_list != NULL && value_check_lock(lp->ll_list->lv_lock, lp->ll_name, FALSE)) || (lp->ll_dict != NULL && value_check_lock(lp->ll_dict->dv_lock, lp->ll_name, FALSE))) return FAIL; else if (lp->ll_range) { if (list_unlet_range(lp->ll_list, lp->ll_li, lp->ll_name, lp->ll_n1, !lp->ll_empty2, lp->ll_n2) == FAIL) return FAIL; } else { if (lp->ll_list != NULL) // unlet a List item. listitem_remove(lp->ll_list, lp->ll_li); else // unlet a Dictionary item. dictitem_remove(lp->ll_dict, lp->ll_di); } return ret; } /* * Unlet one item or a range of items from a list. * Return OK or FAIL. */ int list_unlet_range( list_T *l, listitem_T *li_first, char_u *name, long n1_arg, int has_n2, long n2) { listitem_T *li = li_first; int n1 = n1_arg; while (li != NULL && (!has_n2 || n2 >= n1)) { if (value_check_lock(li->li_tv.v_lock, name, FALSE)) return FAIL; li = li->li_next; ++n1; } // Delete a range of List items. li = li_first; n1 = n1_arg; while (li != NULL && (!has_n2 || n2 >= n1)) { listitem_T *next = li->li_next; listitem_remove(l, li); li = next; ++n1; } return OK; } /* * "unlet" a variable. Return OK if it existed, FAIL if not. * When "forceit" is TRUE don't complain if the variable doesn't exist. */ int do_unlet(char_u *name, int forceit) { hashtab_T *ht; hashitem_T *hi; char_u *varname; dict_T *d; dictitem_T *di; // can't :unlet a script variable in Vim9 script if (in_vim9script() && check_vim9_unlet(name) == FAIL) return FAIL; ht = find_var_ht(name, &varname); // can't :unlet a script variable in Vim9 script from a function if (ht == get_script_local_ht() && SCRIPT_ID_VALID(current_sctx.sc_sid) && SCRIPT_ITEM(current_sctx.sc_sid)->sn_version == SCRIPT_VERSION_VIM9 && check_vim9_unlet(name) == FAIL) return FAIL; if (ht != NULL && *varname != NUL) { d = get_current_funccal_dict(ht); if (d == NULL) { if (ht == &globvarht) d = &globvardict; else if (ht == &compat_hashtab) d = &vimvardict; else { di = find_var_in_ht(ht, *name, (char_u *)"", FALSE); d = di == NULL ? NULL : di->di_tv.vval.v_dict; } if (d == NULL) { internal_error("do_unlet()"); return FAIL; } } hi = hash_find(ht, varname); if (HASHITEM_EMPTY(hi)) hi = find_hi_in_scoped_ht(name, &ht); if (hi != NULL && !HASHITEM_EMPTY(hi)) { di = HI2DI(hi); if (var_check_fixed(di->di_flags, name, FALSE) || var_check_ro(di->di_flags, name, FALSE) || value_check_lock(d->dv_lock, name, FALSE)) return FAIL; delete_var(ht, hi); return OK; } } if (forceit) return OK; semsg(_(e_no_such_variable_str), name); return FAIL; } /* * Lock or unlock variable indicated by "lp". * "deep" is the levels to go (-1 for unlimited); * "lock" is TRUE for ":lockvar", FALSE for ":unlockvar". */ static int do_lock_var( lval_T *lp, char_u *name_end, exarg_T *eap, int deep, void *cookie UNUSED) { int lock = eap->cmdidx == CMD_lockvar; int ret = OK; int cc; dictitem_T *di; if (lp->ll_tv == NULL) { cc = *name_end; *name_end = NUL; if (*lp->ll_name == '$') { semsg(_(e_cannot_lock_or_unlock_variable_str), lp->ll_name); ret = FAIL; } else { // Normal name or expanded name. di = find_var(lp->ll_name, NULL, TRUE); if (di == NULL) { if (in_vim9script()) semsg(_(e_cannot_find_variable_to_unlock_str), lp->ll_name); ret = FAIL; } else if ((di->di_flags & DI_FLAGS_FIX) && di->di_tv.v_type != VAR_DICT && di->di_tv.v_type != VAR_LIST) { // For historic reasons this error is not given for a list or // dict. E.g., the b: dict could be locked/unlocked. semsg(_(e_cannot_lock_or_unlock_variable_str), lp->ll_name); ret = FAIL; } else { if (lock) di->di_flags |= DI_FLAGS_LOCK; else di->di_flags &= ~DI_FLAGS_LOCK; if (deep != 0) item_lock(&di->di_tv, deep, lock, FALSE); } } *name_end = cc; } else if (deep == 0) { // nothing to do } else if (lp->ll_range) { listitem_T *li = lp->ll_li; // (un)lock a range of List items. while (li != NULL && (lp->ll_empty2 || lp->ll_n2 >= lp->ll_n1)) { item_lock(&li->li_tv, deep, lock, FALSE); li = li->li_next; ++lp->ll_n1; } } else if (lp->ll_list != NULL) // (un)lock a List item. item_lock(&lp->ll_li->li_tv, deep, lock, FALSE); else // (un)lock a Dictionary item. item_lock(&lp->ll_di->di_tv, deep, lock, FALSE); return ret; } /* * Lock or unlock an item. "deep" is nr of levels to go. * When "check_refcount" is TRUE do not lock a list or dict with a reference * count larger than 1. */ void item_lock(typval_T *tv, int deep, int lock, int check_refcount) { static int recurse = 0; list_T *l; listitem_T *li; dict_T *d; blob_T *b; hashitem_T *hi; int todo; if (recurse >= DICT_MAXNEST) { emsg(_(e_variable_nested_too_deep_for_unlock)); return; } if (deep == 0) return; ++recurse; // lock/unlock the item itself if (lock) tv->v_lock |= VAR_LOCKED; else tv->v_lock &= ~VAR_LOCKED; switch (tv->v_type) { case VAR_UNKNOWN: case VAR_ANY: case VAR_VOID: case VAR_NUMBER: case VAR_BOOL: case VAR_STRING: case VAR_FUNC: case VAR_PARTIAL: case VAR_FLOAT: case VAR_SPECIAL: case VAR_JOB: case VAR_CHANNEL: case VAR_INSTR: break; case VAR_BLOB: if ((b = tv->vval.v_blob) != NULL && !(check_refcount && b->bv_refcount > 1)) { if (lock) b->bv_lock |= VAR_LOCKED; else b->bv_lock &= ~VAR_LOCKED; } break; case VAR_LIST: if ((l = tv->vval.v_list) != NULL && !(check_refcount && l->lv_refcount > 1)) { if (lock) l->lv_lock |= VAR_LOCKED; else l->lv_lock &= ~VAR_LOCKED; if (deep < 0 || deep > 1) { if (l->lv_first == &range_list_item) l->lv_lock |= VAR_ITEMS_LOCKED; else { // recursive: lock/unlock the items the List contains CHECK_LIST_MATERIALIZE(l); FOR_ALL_LIST_ITEMS(l, li) item_lock(&li->li_tv, deep - 1, lock, check_refcount); } } } break; case VAR_DICT: if ((d = tv->vval.v_dict) != NULL && !(check_refcount && d->dv_refcount > 1)) { if (lock) d->dv_lock |= VAR_LOCKED; else d->dv_lock &= ~VAR_LOCKED; if (deep < 0 || deep > 1) { // recursive: lock/unlock the items the List contains todo = (int)d->dv_hashtab.ht_used; for (hi = d->dv_hashtab.ht_array; todo > 0; ++hi) { if (!HASHITEM_EMPTY(hi)) { --todo; item_lock(&HI2DI(hi)->di_tv, deep - 1, lock, check_refcount); } } } } } --recurse; } #if (defined(FEAT_MENU) && defined(FEAT_MULTI_LANG)) || defined(PROTO) /* * Delete all "menutrans_" variables. */ void del_menutrans_vars(void) { hashitem_T *hi; int todo; hash_lock(&globvarht); todo = (int)globvarht.ht_used; for (hi = globvarht.ht_array; todo > 0 && !got_int; ++hi) { if (!HASHITEM_EMPTY(hi)) { --todo; if (STRNCMP(HI2DI(hi)->di_key, "menutrans_", 10) == 0) delete_var(&globvarht, hi); } } hash_unlock(&globvarht); } #endif /* * Local string buffer for the next two functions to store a variable name * with its prefix. Allocated in cat_prefix_varname(), freed later in * get_user_var_name(). */ static char_u *varnamebuf = NULL; static int varnamebuflen = 0; /* * Function to concatenate a prefix and a variable name. */ char_u * cat_prefix_varname(int prefix, char_u *name) { int len; len = (int)STRLEN(name) + 3; if (len > varnamebuflen) { vim_free(varnamebuf); len += 10; // some additional space varnamebuf = alloc(len); if (varnamebuf == NULL) { varnamebuflen = 0; return NULL; } varnamebuflen = len; } *varnamebuf = prefix; varnamebuf[1] = ':'; STRCPY(varnamebuf + 2, name); return varnamebuf; } /* * Function given to ExpandGeneric() to obtain the list of user defined * (global/buffer/window/built-in) variable names. */ char_u * get_user_var_name(expand_T *xp, int idx) { static long_u gdone; static long_u bdone; static long_u wdone; static long_u tdone; static int vidx; static hashitem_T *hi; hashtab_T *ht; if (idx == 0) { gdone = bdone = wdone = vidx = 0; tdone = 0; } // Global variables if (gdone < globvarht.ht_used) { if (gdone++ == 0) hi = globvarht.ht_array; else ++hi; while (HASHITEM_EMPTY(hi)) ++hi; if (STRNCMP("g:", xp->xp_pattern, 2) == 0) return cat_prefix_varname('g', hi->hi_key); return hi->hi_key; } // b: variables ht = &prevwin_curwin()->w_buffer->b_vars->dv_hashtab; if (bdone < ht->ht_used) { if (bdone++ == 0) hi = ht->ht_array; else ++hi; while (HASHITEM_EMPTY(hi)) ++hi; return cat_prefix_varname('b', hi->hi_key); } // w: variables ht = &prevwin_curwin()->w_vars->dv_hashtab; if (wdone < ht->ht_used) { if (wdone++ == 0) hi = ht->ht_array; else ++hi; while (HASHITEM_EMPTY(hi)) ++hi; return cat_prefix_varname('w', hi->hi_key); } // t: variables ht = &curtab->tp_vars->dv_hashtab; if (tdone < ht->ht_used) { if (tdone++ == 0) hi = ht->ht_array; else ++hi; while (HASHITEM_EMPTY(hi)) ++hi; return cat_prefix_varname('t', hi->hi_key); } // v: variables if (vidx < VV_LEN) return cat_prefix_varname('v', (char_u *)vimvars[vidx++].vv_name); VIM_CLEAR(varnamebuf); varnamebuflen = 0; return NULL; } char * get_var_special_name(int nr) { switch (nr) { case VVAL_FALSE: return in_vim9script() ? "false" : "v:false"; case VVAL_TRUE: return in_vim9script() ? "true" : "v:true"; case VVAL_NULL: return in_vim9script() ? "null" : "v:null"; case VVAL_NONE: return "v:none"; } internal_error("get_var_special_name()"); return "42"; } /* * Returns the global variable dictionary */ dict_T * get_globvar_dict(void) { return &globvardict; } /* * Returns the global variable hash table */ hashtab_T * get_globvar_ht(void) { return &globvarht; } /* * Returns the v: variable dictionary */ dict_T * get_vimvar_dict(void) { return &vimvardict; } /* * Returns the index of a v:variable. Negative if not found. * Returns DI_ flags in "di_flags". */ int find_vim_var(char_u *name, int *di_flags) { dictitem_T *di = find_var_in_ht(&vimvarht, 0, name, TRUE); struct vimvar *vv; if (di == NULL) return -1; *di_flags = di->di_flags; vv = (struct vimvar *)((char *)di - offsetof(vimvar_T, vv_di)); return (int)(vv - vimvars); } /* * Set type of v: variable to "type". */ void set_vim_var_type(int idx, vartype_T type) { vimvars[idx].vv_tv_type = type; } /* * Set number v: variable to "val". * Note that this does not set the type, use set_vim_var_type() for that. */ void set_vim_var_nr(int idx, varnumber_T val) { vimvars[idx].vv_nr = val; } char * get_vim_var_name(int idx) { return vimvars[idx].vv_name; } /* * Get typval_T v: variable value. */ typval_T * get_vim_var_tv(int idx) { return &vimvars[idx].vv_tv; } type_T * get_vim_var_type(int idx, garray_T *type_list) { if (vimvars[idx].vv_type != NULL) return vimvars[idx].vv_type; return typval2type_vimvar(&vimvars[idx].vv_tv, type_list); } /* * Set v: variable to "tv". Only accepts the same type. * Takes over the value of "tv". */ int set_vim_var_tv(int idx, typval_T *tv) { if (vimvars[idx].vv_tv_type != tv->v_type) { emsg(_(e_type_mismatch_for_v_variable)); clear_tv(tv); return FAIL; } // VV_RO is also checked when compiling, but let's check here as well. if (vimvars[idx].vv_flags & VV_RO) { semsg(_(e_cannot_change_readonly_variable_str), vimvars[idx].vv_name); return FAIL; } if (sandbox && (vimvars[idx].vv_flags & VV_RO_SBX)) { semsg(_(e_cannot_set_variable_in_sandbox_str), vimvars[idx].vv_name); return FAIL; } clear_tv(&vimvars[idx].vv_di.di_tv); vimvars[idx].vv_di.di_tv = *tv; return OK; } /* * Get number v: variable value. */ varnumber_T get_vim_var_nr(int idx) { return vimvars[idx].vv_nr; } /* * Get string v: variable value. Uses a static buffer, can only be used once. * If the String variable has never been set, return an empty string. * Never returns NULL; */ char_u * get_vim_var_str(int idx) { return tv_get_string(&vimvars[idx].vv_tv); } /* * Get List v: variable value. Caller must take care of reference count when * needed. */ list_T * get_vim_var_list(int idx) { return vimvars[idx].vv_list; } /* * Get Dict v: variable value. Caller must take care of reference count when * needed. */ dict_T * get_vim_var_dict(int idx) { return vimvars[idx].vv_dict; } /* * Set v:char to character "c". */ void set_vim_var_char(int c) { char_u buf[MB_MAXBYTES + 1]; if (has_mbyte) buf[(*mb_char2bytes)(c, buf)] = NUL; else { buf[0] = c; buf[1] = NUL; } set_vim_var_string(VV_CHAR, buf, -1); } /* * Set v:count to "count" and v:count1 to "count1". * When "set_prevcount" is TRUE first set v:prevcount from v:count. */ void set_vcount( long count, long count1, int set_prevcount) { if (set_prevcount) vimvars[VV_PREVCOUNT].vv_nr = vimvars[VV_COUNT].vv_nr; vimvars[VV_COUNT].vv_nr = count; vimvars[VV_COUNT1].vv_nr = count1; } /* * Save variables that might be changed as a side effect. Used when executing * a timer callback. */ void save_vimvars(vimvars_save_T *vvsave) { vvsave->vv_prevcount = vimvars[VV_PREVCOUNT].vv_nr; vvsave->vv_count = vimvars[VV_COUNT].vv_nr; vvsave->vv_count1 = vimvars[VV_COUNT1].vv_nr; } /* * Restore variables saved by save_vimvars(). */ void restore_vimvars(vimvars_save_T *vvsave) { vimvars[VV_PREVCOUNT].vv_nr = vvsave->vv_prevcount; vimvars[VV_COUNT].vv_nr = vvsave->vv_count; vimvars[VV_COUNT1].vv_nr = vvsave->vv_count1; } /* * Set string v: variable to a copy of "val". If 'copy' is FALSE, then set the * value. */ void set_vim_var_string( int idx, char_u *val, int len) // length of "val" to use or -1 (whole string) { clear_tv(&vimvars[idx].vv_di.di_tv); vimvars[idx].vv_tv_type = VAR_STRING; if (val == NULL) vimvars[idx].vv_str = NULL; else if (len == -1) vimvars[idx].vv_str = vim_strsave(val); else vimvars[idx].vv_str = vim_strnsave(val, len); } /* * Set List v: variable to "val". */ void set_vim_var_list(int idx, list_T *val) { clear_tv(&vimvars[idx].vv_di.di_tv); vimvars[idx].vv_tv_type = VAR_LIST; vimvars[idx].vv_list = val; if (val != NULL) ++val->lv_refcount; } /* * Set Dictionary v: variable to "val". */ void set_vim_var_dict(int idx, dict_T *val) { clear_tv(&vimvars[idx].vv_di.di_tv); vimvars[idx].vv_tv_type = VAR_DICT; vimvars[idx].vv_dict = val; if (val != NULL) { ++val->dv_refcount; dict_set_items_ro(val); } } /* * Set the v:argv list. */ void set_argv_var(char **argv, int argc) { list_T *l = list_alloc(); int i; if (l == NULL) getout(1); l->lv_lock = VAR_FIXED; for (i = 0; i < argc; ++i) { if (list_append_string(l, (char_u *)argv[i], -1) == FAIL) getout(1); l->lv_u.mat.lv_last->li_tv.v_lock = VAR_FIXED; } set_vim_var_list(VV_ARGV, l); } /* * Reset v:register, taking the 'clipboard' setting into account. */ void reset_reg_var(void) { int regname = 0; // Adjust the register according to 'clipboard', so that when // "unnamed" is present it becomes '*' or '+' instead of '"'. #ifdef FEAT_CLIPBOARD adjust_clip_reg(&regname); #endif set_reg_var(regname); } /* * Set v:register if needed. */ void set_reg_var(int c) { char_u regname; if (c == 0 || c == ' ') regname = '"'; else regname = c; // Avoid free/alloc when the value is already right. if (vimvars[VV_REG].vv_str == NULL || vimvars[VV_REG].vv_str[0] != c) set_vim_var_string(VV_REG, &regname, 1); } /* * Get or set v:exception. If "oldval" == NULL, return the current value. * Otherwise, restore the value to "oldval" and return NULL. * Must always be called in pairs to save and restore v:exception! Does not * take care of memory allocations. */ char_u * v_exception(char_u *oldval) { if (oldval == NULL) return vimvars[VV_EXCEPTION].vv_str; vimvars[VV_EXCEPTION].vv_str = oldval; return NULL; } /* * Get or set v:throwpoint. If "oldval" == NULL, return the current value. * Otherwise, restore the value to "oldval" and return NULL. * Must always be called in pairs to save and restore v:throwpoint! Does not * take care of memory allocations. */ char_u * v_throwpoint(char_u *oldval) { if (oldval == NULL) return vimvars[VV_THROWPOINT].vv_str; vimvars[VV_THROWPOINT].vv_str = oldval; return NULL; } /* * Set v:cmdarg. * If "eap" != NULL, use "eap" to generate the value and return the old value. * If "oldarg" != NULL, restore the value to "oldarg" and return NULL. * Must always be called in pairs! */ char_u * set_cmdarg(exarg_T *eap, char_u *oldarg) { char_u *oldval; char_u *newval; unsigned len; oldval = vimvars[VV_CMDARG].vv_str; if (eap == NULL) { vim_free(oldval); vimvars[VV_CMDARG].vv_str = oldarg; return NULL; } if (eap->force_bin == FORCE_BIN) len = 6; else if (eap->force_bin == FORCE_NOBIN) len = 8; else len = 0; if (eap->read_edit) len += 7; if (eap->force_ff != 0) len += 10; // " ++ff=unix" if (eap->force_enc != 0) len += (unsigned)STRLEN(eap->cmd + eap->force_enc) + 7; if (eap->bad_char != 0) len += 7 + 4; // " ++bad=" + "keep" or "drop" newval = alloc(len + 1); if (newval == NULL) return NULL; if (eap->force_bin == FORCE_BIN) sprintf((char *)newval, " ++bin"); else if (eap->force_bin == FORCE_NOBIN) sprintf((char *)newval, " ++nobin"); else *newval = NUL; if (eap->read_edit) STRCAT(newval, " ++edit"); if (eap->force_ff != 0) sprintf((char *)newval + STRLEN(newval), " ++ff=%s", eap->force_ff == 'u' ? "unix" : eap->force_ff == 'd' ? "dos" : "mac"); if (eap->force_enc != 0) sprintf((char *)newval + STRLEN(newval), " ++enc=%s", eap->cmd + eap->force_enc); if (eap->bad_char == BAD_KEEP) STRCPY(newval + STRLEN(newval), " ++bad=keep"); else if (eap->bad_char == BAD_DROP) STRCPY(newval + STRLEN(newval), " ++bad=drop"); else if (eap->bad_char != 0) sprintf((char *)newval + STRLEN(newval), " ++bad=%c", eap->bad_char); vimvars[VV_CMDARG].vv_str = newval; return oldval; } /* * Get the value of internal variable "name". * If "flags" has EVAL_VAR_IMPORT may return a VAR_ANY with v_number set to the * imported script ID. * Return OK or FAIL. If OK is returned "rettv" must be cleared. */ int eval_variable( char_u *name, int len, // length of "name" scid_T sid, // script ID for imported item or zero typval_T *rettv, // NULL when only checking existence dictitem_T **dip, // non-NULL when typval's dict item is needed int flags) // EVAL_VAR_ flags { int ret = OK; typval_T *tv = NULL; int found = FALSE; hashtab_T *ht = NULL; int cc; type_T *type = NULL; // truncate the name, so that we can use strcmp() cc = name[len]; name[len] = NUL; // Check for local variable when debugging. if ((tv = lookup_debug_var(name)) == NULL) { // Check for user-defined variables. dictitem_T *v = find_var(name, &ht, flags & EVAL_VAR_NOAUTOLOAD); if (v != NULL) { tv = &v->di_tv; if (dip != NULL) *dip = v; } else ht = NULL; } if (tv == NULL && (in_vim9script() || STRNCMP(name, "s:", 2) == 0)) { imported_T *import = NULL; char_u *p = STRNCMP(name, "s:", 2) == 0 ? name + 2 : name; if (sid == 0) import = find_imported(p, 0, TRUE); // imported variable from another script if (import != NULL || sid != 0) { if ((flags & EVAL_VAR_IMPORT) == 0) { if (SCRIPT_ID_VALID(sid)) { ht = &SCRIPT_VARS(sid); if (ht != NULL) { dictitem_T *v = find_var_in_ht(ht, 0, name, flags & EVAL_VAR_NOAUTOLOAD); if (v != NULL) { tv = &v->di_tv; if (dip != NULL) *dip = v; } else ht = NULL; } } else { if (flags & EVAL_VAR_VERBOSE) semsg(_(e_expected_dot_after_name_str), name); ret = FAIL; } } else { if (rettv != NULL) { rettv->v_type = VAR_ANY; rettv->vval.v_number = sid != 0 ? sid : import->imp_sid; } found = TRUE; } } else if (in_vim9script() && (flags & EVAL_VAR_NO_FUNC) == 0) { int has_g_prefix = STRNCMP(name, "g:", 2) == 0; ufunc_T *ufunc = find_func(name, FALSE); // In Vim9 script we can get a function reference by using the // function name. For a global non-autoload function "g:" is // required. if (ufunc != NULL && (has_g_prefix || !func_requires_g_prefix(ufunc))) { found = TRUE; if (rettv != NULL) { rettv->v_type = VAR_FUNC; if (has_g_prefix) // Keep the "g:", otherwise script-local may be // assumed. rettv->vval.v_string = vim_strsave(name); else rettv->vval.v_string = vim_strsave(ufunc->uf_name); if (rettv->vval.v_string != NULL) func_ref(ufunc->uf_name); } } } } if (!found) { if (tv == NULL) { if (rettv != NULL && (flags & EVAL_VAR_VERBOSE)) semsg(_(e_undefined_variable_str), name); ret = FAIL; } else if (rettv != NULL) { if (ht != NULL && ht == get_script_local_ht() && tv != &SCRIPT_SV(current_sctx.sc_sid)->sv_var.di_tv) { svar_T *sv = find_typval_in_script(tv, 0); if (sv != NULL) type = sv->sv_type; } // If a list or dict variable wasn't initialized, do it now. if (tv->v_type == VAR_DICT && tv->vval.v_dict == NULL) { tv->vval.v_dict = dict_alloc(); if (tv->vval.v_dict != NULL) { ++tv->vval.v_dict->dv_refcount; tv->vval.v_dict->dv_type = alloc_type(type); } } else if (tv->v_type == VAR_LIST && tv->vval.v_list == NULL) { tv->vval.v_list = list_alloc(); if (tv->vval.v_list != NULL) { ++tv->vval.v_list->lv_refcount; tv->vval.v_list->lv_type = alloc_type(type); } } else if (tv->v_type == VAR_BLOB && tv->vval.v_blob == NULL) { tv->vval.v_blob = blob_alloc(); if (tv->vval.v_blob != NULL) ++tv->vval.v_blob->bv_refcount; } copy_tv(tv, rettv); } } name[len] = cc; return ret; } /* * Check if variable "name[len]" is a local variable or an argument. * If so, "*eval_lavars_used" is set to TRUE. */ void check_vars(char_u *name, int len) { int cc; char_u *varname; hashtab_T *ht; if (eval_lavars_used == NULL) return; // truncate the name, so that we can use strcmp() cc = name[len]; name[len] = NUL; ht = find_var_ht(name, &varname); if (ht == get_funccal_local_ht() || ht == get_funccal_args_ht()) { if (find_var(name, NULL, TRUE) != NULL) *eval_lavars_used = TRUE; } name[len] = cc; } /* * Find variable "name" in the list of variables. * Return a pointer to it if found, NULL if not found. * Careful: "a:0" variables don't have a name. * When "htp" is not NULL set "htp" to the hashtab_T used. */ dictitem_T * find_var(char_u *name, hashtab_T **htp, int no_autoload) { char_u *varname; hashtab_T *ht; dictitem_T *ret = NULL; ht = find_var_ht(name, &varname); if (htp != NULL) *htp = ht; if (ht == NULL) return NULL; ret = find_var_in_ht(ht, *name, varname, no_autoload); if (ret != NULL) return ret; // Search in parent scope for lambda ret = find_var_in_scoped_ht(name, no_autoload); if (ret != NULL) return ret; // in Vim9 script items without a scope can be script-local if (in_vim9script() && name[0] != NUL && name[1] != ':') { ht = get_script_local_ht(); if (ht != NULL) { ret = find_var_in_ht(ht, *name, varname, no_autoload); if (ret != NULL) { if (htp != NULL) *htp = ht; return ret; } } } // When using "vim9script autoload" script-local items are prefixed but can // be used with s:name. if (SCRIPT_ID_VALID(current_sctx.sc_sid) && name[0] == 's' && name[1] == ':') { scriptitem_T *si = SCRIPT_ITEM(current_sctx.sc_sid); if (si->sn_autoload_prefix != NULL) { char_u *auto_name = concat_str(si->sn_autoload_prefix, name + 2); if (auto_name != NULL) { ht = &globvarht; ret = find_var_in_ht(ht, *name, auto_name, TRUE); vim_free(auto_name); if (ret != NULL) { if (htp != NULL) *htp = ht; return ret; } } } } return NULL; } /* * Like find_var() but if the name starts with <SNR>99_ then look in the * referenced script (used for a funcref). */ dictitem_T * find_var_also_in_script(char_u *name, hashtab_T **htp, int no_autoload) { if (STRNCMP(name, "<SNR>", 5) == 0 && isdigit(name[5])) { char_u *p = name + 5; int sid = getdigits(&p); if (SCRIPT_ID_VALID(sid) && *p == '_') { hashtab_T *ht = &SCRIPT_VARS(sid); if (ht != NULL) { dictitem_T *di = find_var_in_ht(ht, 0, p + 1, no_autoload); if (di != NULL) { if (htp != NULL) *htp = ht; return di; } } } } return find_var(name, htp, no_autoload); } /* * Find variable "varname" in hashtab "ht" with name "htname". * When "varname" is empty returns curwin/curtab/etc vars dictionary. * Returns NULL if not found. */ dictitem_T * find_var_in_ht( hashtab_T *ht, int htname, char_u *varname, int no_autoload) { hashitem_T *hi; if (*varname == NUL) { // Must be something like "s:", otherwise "ht" would be NULL. switch (htname) { case 's': return &SCRIPT_SV(current_sctx.sc_sid)->sv_var; case 'g': return &globvars_var; case 'v': return &vimvars_var; case 'b': return &curbuf->b_bufvar; case 'w': return &curwin->w_winvar; case 't': return &curtab->tp_winvar; case 'l': return get_funccal_local_var(); case 'a': return get_funccal_args_var(); } return NULL; } hi = hash_find(ht, varname); if (HASHITEM_EMPTY(hi)) { // For global variables we may try auto-loading the script. If it // worked find the variable again. Don't auto-load a script if it was // loaded already, otherwise it would be loaded every time when // checking if a function name is a Funcref variable. if (ht == &globvarht && !no_autoload) { // Note: script_autoload() may make "hi" invalid. It must either // be obtained again or not used. if (!script_autoload(varname, FALSE) || aborting()) return NULL; hi = hash_find(ht, varname); } if (HASHITEM_EMPTY(hi)) return NULL; } return HI2DI(hi); } /* * Get the script-local hashtab. NULL if not in a script context. */ hashtab_T * get_script_local_ht(void) { scid_T sid = current_sctx.sc_sid; if (SCRIPT_ID_VALID(sid)) return &SCRIPT_VARS(sid); return NULL; } /* * Look for "name[len]" in script-local variables and functions. * When "cmd" is TRUE it must look like a command, a function must be followed * by "(" or "->". * Return OK when found, FAIL when not found. */ int lookup_scriptitem( char_u *name, size_t len, int cmd, cctx_T *dummy UNUSED) { hashtab_T *ht = get_script_local_ht(); char_u buffer[30]; char_u *p; int res; hashitem_T *hi; int is_global = FALSE; char_u *fname = name; if (ht == NULL) return FAIL; if (len < sizeof(buffer) - 1) { // avoid an alloc/free for short names vim_strncpy(buffer, name, len); p = buffer; } else { p = vim_strnsave(name, len); if (p == NULL) return FAIL; } hi = hash_find(ht, p); res = HASHITEM_EMPTY(hi) ? FAIL : OK; // if not script-local, then perhaps imported if (res == FAIL && find_imported(p, 0, FALSE) != NULL) res = OK; if (p != buffer) vim_free(p); // Find a function, so that a following "->" works. // When used as a command require "(" or "->" to follow, "Cmd" is a user // command while "Cmd()" is a function call. if (res != OK) { p = skipwhite(name + len); if (!cmd || name[len] == '(' || (p[0] == '-' && p[1] == '>')) { // Do not check for an internal function, since it might also be a // valid command, such as ":split" versus "split()". // Skip "g:" before a function name. if (name[0] == 'g' && name[1] == ':') { is_global = TRUE; fname = name + 2; } if (find_func(fname, is_global) != NULL) res = OK; } } return res; } /* * Find the hashtab used for a variable name. * Return NULL if the name is not valid. * Set "varname" to the start of name without ':'. */ hashtab_T * find_var_ht(char_u *name, char_u **varname) { hashitem_T *hi; hashtab_T *ht; if (name[0] == NUL) return NULL; if (name[1] != ':') { // The name must not start with a colon or #. if (name[0] == ':' || name[0] == AUTOLOAD_CHAR) return NULL; *varname = name; // "version" is "v:version" in all scopes if scriptversion < 3. // Same for a few other variables marked with VV_COMPAT. if (in_old_script(3)) { hi = hash_find(&compat_hashtab, name); if (!HASHITEM_EMPTY(hi)) return &compat_hashtab; } ht = get_funccal_local_ht(); if (ht != NULL) return ht; // local variable // In Vim9 script items at the script level are script-local, except // for autoload names. if (in_vim9script() && vim_strchr(name, AUTOLOAD_CHAR) == NULL) { ht = get_script_local_ht(); if (ht != NULL) return ht; } return &globvarht; // global variable } *varname = name + 2; if (*name == 'g') // global variable return &globvarht; // There must be no ':' or '#' in the rest of the name, unless g: is used if (vim_strchr(name + 2, ':') != NULL || vim_strchr(name + 2, AUTOLOAD_CHAR) != NULL) return NULL; if (*name == 'b') // buffer variable return &curbuf->b_vars->dv_hashtab; if (*name == 'w') // window variable return &curwin->w_vars->dv_hashtab; if (*name == 't') // tab page variable return &curtab->tp_vars->dv_hashtab; if (*name == 'v') // v: variable return &vimvarht; if (get_current_funccal() != NULL && get_current_funccal()->func->uf_def_status == UF_NOT_COMPILED) { // a: and l: are only used in functions defined with ":function" if (*name == 'a') // a: function argument return get_funccal_args_ht(); if (*name == 'l') // l: local function variable return get_funccal_local_ht(); } if (*name == 's') // script variable { ht = get_script_local_ht(); if (ht != NULL) return ht; } return NULL; } /* * Get the string value of a (global/local) variable. * Note: see tv_get_string() for how long the pointer remains valid. * Returns NULL when it doesn't exist. */ char_u * get_var_value(char_u *name) { dictitem_T *v; v = find_var(name, NULL, FALSE); if (v == NULL) return NULL; return tv_get_string(&v->di_tv); } /* * Allocate a new hashtab for a sourced script. It will be used while * sourcing this script and when executing functions defined in the script. */ void new_script_vars(scid_T id) { scriptvar_T *sv; sv = ALLOC_CLEAR_ONE(scriptvar_T); if (sv == NULL) return; init_var_dict(&sv->sv_dict, &sv->sv_var, VAR_SCOPE); SCRIPT_ITEM(id)->sn_vars = sv; } /* * Initialize dictionary "dict" as a scope and set variable "dict_var" to * point to it. */ void init_var_dict(dict_T *dict, dictitem_T *dict_var, int scope) { hash_init(&dict->dv_hashtab); dict->dv_lock = 0; dict->dv_scope = scope; dict->dv_refcount = DO_NOT_FREE_CNT; dict->dv_copyID = 0; dict_var->di_tv.vval.v_dict = dict; dict_var->di_tv.v_type = VAR_DICT; dict_var->di_tv.v_lock = VAR_FIXED; dict_var->di_flags = DI_FLAGS_RO | DI_FLAGS_FIX; dict_var->di_key[0] = NUL; } /* * Unreference a dictionary initialized by init_var_dict(). */ void unref_var_dict(dict_T *dict) { // Now the dict needs to be freed if no one else is using it, go back to // normal reference counting. dict->dv_refcount -= DO_NOT_FREE_CNT - 1; dict_unref(dict); } /* * Clean up a list of internal variables. * Frees all allocated variables and the value they contain. * Clears hashtab "ht", does not free it. */ void vars_clear(hashtab_T *ht) { vars_clear_ext(ht, TRUE); } /* * Like vars_clear(), but only free the value if "free_val" is TRUE. */ void vars_clear_ext(hashtab_T *ht, int free_val) { int todo; hashitem_T *hi; dictitem_T *v; hash_lock(ht); todo = (int)ht->ht_used; for (hi = ht->ht_array; todo > 0; ++hi) { if (!HASHITEM_EMPTY(hi)) { --todo; // Free the variable. Don't remove it from the hashtab, // ht_array might change then. hash_clear() takes care of it // later. v = HI2DI(hi); if (free_val) clear_tv(&v->di_tv); if (v->di_flags & DI_FLAGS_ALLOC) vim_free(v); } } hash_clear(ht); hash_init(ht); } /* * Delete a variable from hashtab "ht" at item "hi". * Clear the variable value and free the dictitem. */ void delete_var(hashtab_T *ht, hashitem_T *hi) { dictitem_T *di = HI2DI(hi); hash_remove(ht, hi); clear_tv(&di->di_tv); vim_free(di); } /* * List the value of one internal variable. */ static void list_one_var(dictitem_T *v, char *prefix, int *first) { char_u *tofree; char_u *s; char_u numbuf[NUMBUFLEN]; s = echo_string(&v->di_tv, &tofree, numbuf, get_copyID()); list_one_var_a(prefix, v->di_key, v->di_tv.v_type, s == NULL ? (char_u *)"" : s, first); vim_free(tofree); } static void list_one_var_a( char *prefix, char_u *name, int type, char_u *string, int *first) // when TRUE clear rest of screen and set to FALSE { // don't use msg() or msg_attr() to avoid overwriting "v:statusmsg" msg_start(); msg_puts(prefix); if (name != NULL) // "a:" vars don't have a name stored msg_puts((char *)name); msg_putchar(' '); msg_advance(22); if (type == VAR_NUMBER) msg_putchar('#'); else if (type == VAR_FUNC || type == VAR_PARTIAL) msg_putchar('*'); else if (type == VAR_LIST) { msg_putchar('['); if (*string == '[') ++string; } else if (type == VAR_DICT) { msg_putchar('{'); if (*string == '{') ++string; } else msg_putchar(' '); msg_outtrans(string); if (type == VAR_FUNC || type == VAR_PARTIAL) msg_puts("()"); if (*first) { msg_clr_eos(); *first = FALSE; } } /* * Set variable "name" to value in "tv". * If the variable already exists, the value is updated. * Otherwise the variable is created. */ void set_var( char_u *name, typval_T *tv, int copy) // make copy of value in "tv" { set_var_const(name, 0, NULL, tv, copy, ASSIGN_DECL, 0); } /* * Set variable "name" to value in "tv_arg". * When "sid" is non-zero "name" is in the script with this ID. * If the variable already exists and "is_const" is FALSE the value is updated. * Otherwise the variable is created. */ void set_var_const( char_u *name, scid_T sid, type_T *type_arg, typval_T *tv_arg, int copy, // make copy of value in "tv" int flags_arg, // ASSIGN_CONST, ASSIGN_FINAL, etc. int var_idx) // index for ":let [a, b] = list" { typval_T *tv = tv_arg; type_T *type = type_arg; typval_T bool_tv; dictitem_T *di; typval_T *dest_tv = NULL; char_u *varname; char_u *name_tofree = NULL; hashtab_T *ht = NULL; int is_script_local; int vim9script = in_vim9script(); int var_in_vim9script; int var_in_autoload = FALSE; int flags = flags_arg; int free_tv_arg = !copy; // free tv_arg if not used if (sid != 0) { if (SCRIPT_ID_VALID(sid)) ht = &SCRIPT_VARS(sid); varname = name; } else { scriptitem_T *si; if (in_vim9script() && is_export && SCRIPT_ID_VALID(current_sctx.sc_sid) && (si = SCRIPT_ITEM(current_sctx.sc_sid)) ->sn_autoload_prefix != NULL) { // In a vim9 autoload script an exported variable is put in the // global namespace with the autoload prefix. var_in_autoload = TRUE; varname = concat_str(si->sn_autoload_prefix, name); if (varname == NULL) goto failed; name_tofree = varname; ht = &globvarht; } else ht = find_var_ht(name, &varname); } if (ht == NULL || *varname == NUL) { semsg(_(e_illegal_variable_name_str), name); goto failed; } is_script_local = ht == get_script_local_ht() || sid != 0 || var_in_autoload; if (vim9script && !is_script_local && (flags & (ASSIGN_NO_DECL | ASSIGN_DECL)) == 0 && (flags & (ASSIGN_CONST | ASSIGN_FINAL)) == 0 && name[1] == ':') { vim9_declare_error(name); goto failed; } if ((flags & ASSIGN_FOR_LOOP) && name[1] == ':' && vim_strchr((char_u *)"gwbt", name[0]) != NULL) // Do not make g:var, w:var, b:var or t:var final. flags &= ~ASSIGN_FINAL; var_in_vim9script = is_script_local && current_script_is_vim9(); if (var_in_vim9script && name[0] == '_' && name[1] == NUL) { // For "[a, _] = list" the underscore is ignored. if ((flags & ASSIGN_UNPACK) == 0) emsg(_(e_cannot_use_underscore_here)); goto failed; } di = find_var_in_ht(ht, 0, varname, TRUE); if (di == NULL && var_in_vim9script) { imported_T *import = find_imported(varname, 0, FALSE); if (import != NULL) { // imported name space cannot be used if ((flags & ASSIGN_NO_DECL) == 0) { semsg(_(e_redefining_imported_item_str), name); goto failed; } semsg(_(e_cannot_use_str_itself_it_is_imported), name); goto failed; } if (!in_vim9script()) { semsg(_(e_cannot_create_vim9_script_variable_in_function_str), name); goto failed; } } if (dest_tv == NULL) { // Search in parent scope which is possible to reference from lambda if (di == NULL) di = find_var_in_scoped_ht(name, TRUE); if ((tv->v_type == VAR_FUNC || tv->v_type == VAR_PARTIAL) && var_wrong_func_name(name, di == NULL)) goto failed; if (need_convert_to_bool(type, tv)) { // Destination is a bool and the value is not, but it can be // converted. CLEAR_FIELD(bool_tv); bool_tv.v_type = VAR_BOOL; bool_tv.vval.v_number = tv2bool(tv) ? VVAL_TRUE : VVAL_FALSE; tv = &bool_tv; } if (di != NULL) { // Item already exists. Allowed to replace when reloading. if ((di->di_flags & DI_FLAGS_RELOAD) == 0) { if ((flags & (ASSIGN_CONST | ASSIGN_FINAL)) && (flags & ASSIGN_FOR_LOOP) == 0) { emsg(_(e_cannot_modify_existing_variable)); goto failed; } if (is_script_local && vim9script && (flags & (ASSIGN_NO_DECL | ASSIGN_DECL)) == 0) { semsg(_(e_redefining_script_item_str), name); goto failed; } if (var_in_vim9script && (flags & ASSIGN_FOR_LOOP) == 0) { where_T where = WHERE_INIT; svar_T *sv = find_typval_in_script(&di->di_tv, sid); if (sv != NULL) { // check the type and adjust to bool if needed where.wt_index = var_idx; where.wt_variable = TRUE; if (check_script_var_type(sv, tv, name, where) == FAIL) goto failed; if (type == NULL) type = sv->sv_type; } } if ((flags & ASSIGN_FOR_LOOP) == 0 && var_check_permission(di, name) == FAIL) goto failed; } else { // can only redefine once di->di_flags &= ~DI_FLAGS_RELOAD; // A Vim9 script-local variable is also present in sn_all_vars // and sn_var_vals. It may set "type" from "tv". if (var_in_vim9script || var_in_autoload) update_vim9_script_var(FALSE, di, var_in_autoload ? name : di->di_key, flags, tv, &type, (flags & ASSIGN_NO_MEMBER_TYPE) == 0); } // existing variable, need to clear the value // Handle setting internal di: variables separately where needed to // prevent changing the type. if (ht == &vimvarht) { if (di->di_tv.v_type == VAR_STRING) { VIM_CLEAR(di->di_tv.vval.v_string); if (copy || tv->v_type != VAR_STRING) { char_u *val = tv_get_string(tv); // Careful: when assigning to v:errmsg and // tv_get_string() causes an error message the variable // will already be set. if (di->di_tv.vval.v_string == NULL) di->di_tv.vval.v_string = vim_strsave(val); } else { // Take over the string to avoid an extra alloc/free. di->di_tv.vval.v_string = tv->vval.v_string; tv->vval.v_string = NULL; } goto failed; } else if (di->di_tv.v_type == VAR_NUMBER) { di->di_tv.vval.v_number = tv_get_number(tv); if (STRCMP(varname, "searchforward") == 0) set_search_direction(di->di_tv.vval.v_number ? '/' : '?'); #ifdef FEAT_SEARCH_EXTRA else if (STRCMP(varname, "hlsearch") == 0) { no_hlsearch = !di->di_tv.vval.v_number; redraw_all_later(SOME_VALID); } #endif goto failed; } else if (di->di_tv.v_type != tv->v_type) { semsg(_(e_setting_str_to_value_with_wrong_type), name); goto failed; } } clear_tv(&di->di_tv); } else { // Item not found, check if a function already exists. if (is_script_local && (flags & (ASSIGN_NO_DECL | ASSIGN_DECL)) == 0 && lookup_scriptitem(name, STRLEN(name), FALSE, NULL) == OK) { semsg(_(e_redefining_script_item_str), name); goto failed; } // add a new variable if (var_in_vim9script && (flags & ASSIGN_NO_DECL)) { semsg(_(e_unknown_variable_str), name); goto failed; } // Can't add "v:" or "a:" variable. if (ht == &vimvarht || ht == get_funccal_args_ht()) { semsg(_(e_illegal_variable_name_str), name); goto failed; } // Make sure the variable name is valid. In Vim9 script an // autoload variable must be prefixed with "g:" unless in an // autoload script. if (!valid_varname(varname, -1, !vim9script || STRNCMP(name, "g:", 2) == 0 || var_in_autoload)) goto failed; di = alloc(sizeof(dictitem_T) + STRLEN(varname)); if (di == NULL) goto failed; STRCPY(di->di_key, varname); if (hash_add(ht, DI2HIKEY(di)) == FAIL) { vim_free(di); goto failed; } di->di_flags = DI_FLAGS_ALLOC; if (flags & (ASSIGN_CONST | ASSIGN_FINAL)) di->di_flags |= DI_FLAGS_LOCK; // A Vim9 script-local variable is also added to sn_all_vars and // sn_var_vals. It may set "type" from "tv". if (var_in_vim9script || var_in_autoload) update_vim9_script_var(TRUE, di, var_in_autoload ? name : di->di_key, flags, tv, &type, (flags & ASSIGN_NO_MEMBER_TYPE) == 0); } dest_tv = &di->di_tv; } if (copy || tv->v_type == VAR_NUMBER || tv->v_type == VAR_FLOAT) copy_tv(tv, dest_tv); else { *dest_tv = *tv; dest_tv->v_lock = 0; init_tv(tv); } free_tv_arg = FALSE; if (vim9script && type != NULL) set_tv_type(dest_tv, type); // ":const var = value" locks the value // ":final var = value" locks "var" if (flags & ASSIGN_CONST) // Like :lockvar! name: lock the value and what it contains, but only // if the reference count is up to one. That locks only literal // values. item_lock(dest_tv, DICT_MAXNEST, TRUE, TRUE); failed: vim_free(name_tofree); if (free_tv_arg) clear_tv(tv_arg); } /* * Check in this order for backwards compatibility: * - Whether the variable is read-only * - Whether the variable value is locked * - Whether the variable is locked */ int var_check_permission(dictitem_T *di, char_u *name) { if (var_check_ro(di->di_flags, name, FALSE) || value_check_lock(di->di_tv.v_lock, name, FALSE) || var_check_lock(di->di_flags, name, FALSE)) return FAIL; return OK; } /* * Return TRUE if di_flags "flags" indicates variable "name" is read-only. * Also give an error message. */ int var_check_ro(int flags, char_u *name, int use_gettext) { if (flags & DI_FLAGS_RO) { if (name == NULL) emsg(_(e_cannot_change_readonly_variable)); else semsg(_(e_cannot_change_readonly_variable_str), use_gettext ? (char_u *)_(name) : name); return TRUE; } if ((flags & DI_FLAGS_RO_SBX) && sandbox) { if (name == NULL) emsg(_(e_cannot_set_variable_in_sandbox)); else semsg(_(e_cannot_set_variable_in_sandbox_str), use_gettext ? (char_u *)_(name) : name); return TRUE; } return FALSE; } /* * Return TRUE if di_flags "flags" indicates variable "name" is locked. * Also give an error message. */ int var_check_lock(int flags, char_u *name, int use_gettext) { if (flags & DI_FLAGS_LOCK) { semsg(_(e_variable_is_locked_str), use_gettext ? (char_u *)_(name) : name); return TRUE; } return FALSE; } /* * Return TRUE if di_flags "flags" indicates variable "name" is fixed. * Also give an error message. */ int var_check_fixed(int flags, char_u *name, int use_gettext) { if (flags & DI_FLAGS_FIX) { if (name == NULL) emsg(_(e_cannot_delete_variable)); else semsg(_(e_cannot_delete_variable_str), use_gettext ? (char_u *)_(name) : name); return TRUE; } return FALSE; } /* * Check if a funcref is assigned to a valid variable name. * Return TRUE and give an error if not. */ int var_wrong_func_name( char_u *name, // points to start of variable name int new_var) // TRUE when creating the variable { // Allow for w: b: s: and t:. In Vim9 script s: is not allowed, because // the name can be used without the s: prefix. if (!((vim_strchr((char_u *)"wbt", name[0]) != NULL || (!in_vim9script() && name[0] == 's')) && name[1] == ':') && !ASCII_ISUPPER((name[0] != NUL && name[1] == ':') ? name[2] : name[0])) { semsg(_(e_funcref_variable_name_must_start_with_capital_str), name); return TRUE; } // Don't allow hiding a function. When "v" is not NULL we might be // assigning another function to the same var, the type is checked // below. if (new_var && function_exists(name, FALSE)) { semsg(_(e_variable_name_conflicts_with_existing_function_str), name); return TRUE; } return FALSE; } /* * Return TRUE if "flags" indicates variable "name" has a locked (immutable) * value. Also give an error message, using "name" or _("name") when * "use_gettext" is TRUE. */ int value_check_lock(int lock, char_u *name, int use_gettext) { if (lock & VAR_LOCKED) { if (name == NULL) emsg(_(e_value_is_locked)); else semsg(_(e_value_is_locked_str), use_gettext ? (char_u *)_(name) : name); return TRUE; } if (lock & VAR_FIXED) { if (name == NULL) emsg(_(e_cannot_change_value)); else semsg(_(e_cannot_change_value_of_str), use_gettext ? (char_u *)_(name) : name); return TRUE; } return FALSE; } /* * Check if a variable name is valid. When "autoload" is true "#" is allowed. * If "len" is -1 use all of "varname", otherwise up to "varname[len]". * Return FALSE and give an error if not. */ int valid_varname(char_u *varname, int len, int autoload) { char_u *p; for (p = varname; len < 0 ? *p != NUL : p < varname + len; ++p) if (!eval_isnamec1(*p) && (p == varname || !VIM_ISDIGIT(*p)) && !(autoload && *p == AUTOLOAD_CHAR)) { semsg(_(e_illegal_variable_name_str), varname); return FALSE; } return TRUE; } /* * getwinvar() and gettabwinvar() */ static void getwinvar( typval_T *argvars, typval_T *rettv, int off) // 1 for gettabwinvar() { win_T *win; char_u *varname; dictitem_T *v; tabpage_T *tp = NULL; int done = FALSE; switchwin_T switchwin; int need_switch_win; if (off == 1) tp = find_tabpage((int)tv_get_number_chk(&argvars[0], NULL)); else tp = curtab; win = find_win_by_nr(&argvars[off], tp); varname = tv_get_string_chk(&argvars[off + 1]); ++emsg_off; rettv->v_type = VAR_STRING; rettv->vval.v_string = NULL; if (win != NULL && varname != NULL) { // Set curwin to be our win, temporarily. Also set the tabpage, // otherwise the window is not valid. Only do this when needed, // autocommands get blocked. need_switch_win = !(tp == curtab && win == curwin); if (!need_switch_win || switch_win(&switchwin, win, tp, TRUE) == OK) { if (*varname == '&') { if (varname[1] == NUL) { // get all window-local options in a dict dict_T *opts = get_winbuf_options(FALSE); if (opts != NULL) { rettv_dict_set(rettv, opts); done = TRUE; } } else if (eval_option(&varname, rettv, 1) == OK) // window-local-option done = TRUE; } else { // Look up the variable. // Let getwinvar({nr}, "") return the "w:" dictionary. v = find_var_in_ht(&win->w_vars->dv_hashtab, 'w', varname, FALSE); if (v != NULL) { copy_tv(&v->di_tv, rettv); done = TRUE; } } } if (need_switch_win) // restore previous notion of curwin restore_win(&switchwin, TRUE); } if (!done && argvars[off + 2].v_type != VAR_UNKNOWN) // use the default return value copy_tv(&argvars[off + 2], rettv); --emsg_off; } /* * Set option "varname" to the value of "varp" for the current buffer/window. */ static void set_option_from_tv(char_u *varname, typval_T *varp) { long numval = 0; char_u *strval; char_u nbuf[NUMBUFLEN]; int error = FALSE; if (varp->v_type == VAR_BOOL) { numval = (long)varp->vval.v_number; strval = (char_u *)"0"; // avoid using "false" } else { if (!in_vim9script() || varp->v_type != VAR_STRING) numval = (long)tv_get_number_chk(varp, &error); strval = tv_get_string_buf_chk(varp, nbuf); } if (!error && strval != NULL) set_option_value(varname, numval, strval, OPT_LOCAL); } /* * "setwinvar()" and "settabwinvar()" functions */ static void setwinvar(typval_T *argvars, int off) { win_T *win; switchwin_T switchwin; int need_switch_win; char_u *varname, *winvarname; typval_T *varp; tabpage_T *tp = NULL; if (check_secure()) return; if (off == 1) tp = find_tabpage((int)tv_get_number_chk(&argvars[0], NULL)); else tp = curtab; win = find_win_by_nr(&argvars[off], tp); varname = tv_get_string_chk(&argvars[off + 1]); varp = &argvars[off + 2]; if (win != NULL && varname != NULL && varp != NULL) { need_switch_win = !(tp == curtab && win == curwin); if (!need_switch_win || switch_win(&switchwin, win, tp, TRUE) == OK) { if (*varname == '&') set_option_from_tv(varname + 1, varp); else { winvarname = alloc(STRLEN(varname) + 3); if (winvarname != NULL) { STRCPY(winvarname, "w:"); STRCPY(winvarname + 2, varname); set_var(winvarname, varp, TRUE); vim_free(winvarname); } } } if (need_switch_win) restore_win(&switchwin, TRUE); } } /* * reset v:option_new, v:option_old, v:option_oldlocal, v:option_oldglobal, * v:option_type, and v:option_command. */ void reset_v_option_vars(void) { set_vim_var_string(VV_OPTION_NEW, NULL, -1); set_vim_var_string(VV_OPTION_OLD, NULL, -1); set_vim_var_string(VV_OPTION_OLDLOCAL, NULL, -1); set_vim_var_string(VV_OPTION_OLDGLOBAL, NULL, -1); set_vim_var_string(VV_OPTION_TYPE, NULL, -1); set_vim_var_string(VV_OPTION_COMMAND, NULL, -1); } /* * Add an assert error to v:errors. */ void assert_error(garray_T *gap) { struct vimvar *vp = &vimvars[VV_ERRORS]; if (vp->vv_tv_type != VAR_LIST || vimvars[VV_ERRORS].vv_list == NULL) // Make sure v:errors is a list. set_vim_var_list(VV_ERRORS, list_alloc()); list_append_string(vimvars[VV_ERRORS].vv_list, gap->ga_data, gap->ga_len); } int var_exists(char_u *var) { char_u *arg = var; char_u *name; char_u *tofree; typval_T tv; int len = 0; int n = FALSE; // get_name_len() takes care of expanding curly braces name = var; len = get_name_len(&arg, &tofree, TRUE, FALSE); if (len > 0) { if (tofree != NULL) name = tofree; n = (eval_variable(name, len, 0, &tv, NULL, EVAL_VAR_NOAUTOLOAD + EVAL_VAR_IMPORT) == OK); if (n) { // handle d.key, l[idx], f(expr) arg = skipwhite(arg); n = (handle_subscript(&arg, name, &tv, &EVALARG_EVALUATE, FALSE) == OK); if (n) clear_tv(&tv); } } if (*arg != NUL) n = FALSE; vim_free(tofree); return n; } static lval_T *redir_lval = NULL; #define EVALCMD_BUSY (redir_lval == (lval_T *)&redir_lval) static garray_T redir_ga; // only valid when redir_lval is not NULL static char_u *redir_endp = NULL; static char_u *redir_varname = NULL; int alloc_redir_lval(void) { redir_lval = ALLOC_CLEAR_ONE(lval_T); if (redir_lval == NULL) return FAIL; return OK; } void clear_redir_lval(void) { VIM_CLEAR(redir_lval); } void init_redir_ga(void) { ga_init2(&redir_ga, sizeof(char), 500); } /* * Start recording command output to a variable * When "append" is TRUE append to an existing variable. * Returns OK if successfully completed the setup. FAIL otherwise. */ int var_redir_start(char_u *name, int append) { int called_emsg_before; typval_T tv; // Catch a bad name early. if (!eval_isnamec1(*name)) { emsg(_(e_invalid_argument)); return FAIL; } // Make a copy of the name, it is used in redir_lval until redir ends. redir_varname = vim_strsave(name); if (redir_varname == NULL) return FAIL; if (alloc_redir_lval() == FAIL) { var_redir_stop(); return FAIL; } // The output is stored in growarray "redir_ga" until redirection ends. init_redir_ga(); // Parse the variable name (can be a dict or list entry). redir_endp = get_lval(redir_varname, NULL, redir_lval, FALSE, FALSE, 0, FNE_CHECK_START); if (redir_endp == NULL || redir_lval->ll_name == NULL || *redir_endp != NUL) { clear_lval(redir_lval); if (redir_endp != NULL && *redir_endp != NUL) // Trailing characters are present after the variable name semsg(_(e_trailing_characters_str), redir_endp); else semsg(_(e_invalid_argument_str), name); redir_endp = NULL; // don't store a value, only cleanup var_redir_stop(); return FAIL; } // check if we can write to the variable: set it to or append an empty // string called_emsg_before = called_emsg; tv.v_type = VAR_STRING; tv.vval.v_string = (char_u *)""; if (append) set_var_lval(redir_lval, redir_endp, &tv, TRUE, ASSIGN_NO_DECL, (char_u *)".", 0); else set_var_lval(redir_lval, redir_endp, &tv, TRUE, ASSIGN_NO_DECL, (char_u *)"=", 0); clear_lval(redir_lval); if (called_emsg > called_emsg_before) { redir_endp = NULL; // don't store a value, only cleanup var_redir_stop(); return FAIL; } return OK; } /* * Append "value[value_len]" to the variable set by var_redir_start(). * The actual appending is postponed until redirection ends, because the value * appended may in fact be the string we write to, changing it may cause freed * memory to be used: * :redir => foo * :let foo * :redir END */ void var_redir_str(char_u *value, int value_len) { int len; if (redir_lval == NULL) return; if (value_len == -1) len = (int)STRLEN(value); // Append the entire string else len = value_len; // Append only "value_len" characters if (ga_grow(&redir_ga, len) == OK) { mch_memmove((char *)redir_ga.ga_data + redir_ga.ga_len, value, len); redir_ga.ga_len += len; } else var_redir_stop(); } /* * Stop redirecting command output to a variable. * Frees the allocated memory. */ void var_redir_stop(void) { typval_T tv; if (EVALCMD_BUSY) { redir_lval = NULL; return; } if (redir_lval != NULL) { // If there was no error: assign the text to the variable. if (redir_endp != NULL) { ga_append(&redir_ga, NUL); // Append the trailing NUL. tv.v_type = VAR_STRING; tv.vval.v_string = redir_ga.ga_data; // Call get_lval() again, if it's inside a Dict or List it may // have changed. redir_endp = get_lval(redir_varname, NULL, redir_lval, FALSE, FALSE, 0, FNE_CHECK_START); if (redir_endp != NULL && redir_lval->ll_name != NULL) set_var_lval(redir_lval, redir_endp, &tv, FALSE, 0, (char_u *)".", 0); clear_lval(redir_lval); } // free the collected output VIM_CLEAR(redir_ga.ga_data); VIM_CLEAR(redir_lval); } VIM_CLEAR(redir_varname); } /* * Get the collected redirected text and clear redir_ga. */ char_u * get_clear_redir_ga(void) { char_u *res; ga_append(&redir_ga, NUL); // Append the trailing NUL. res = redir_ga.ga_data; redir_ga.ga_data = NULL; return res; } /* * "gettabvar()" function */ void f_gettabvar(typval_T *argvars, typval_T *rettv) { switchwin_T switchwin; tabpage_T *tp; dictitem_T *v; char_u *varname; int done = FALSE; rettv->v_type = VAR_STRING; rettv->vval.v_string = NULL; if (in_vim9script() && (check_for_number_arg(argvars, 0) == FAIL || check_for_string_arg(argvars, 1) == FAIL)) return; varname = tv_get_string_chk(&argvars[1]); tp = find_tabpage((int)tv_get_number_chk(&argvars[0], NULL)); if (tp != NULL && varname != NULL) { // Set tp to be our tabpage, temporarily. Also set the window to the // first window in the tabpage, otherwise the window is not valid. if (switch_win(&switchwin, tp == curtab || tp->tp_firstwin == NULL ? firstwin : tp->tp_firstwin, tp, TRUE) == OK) { // look up the variable // Let gettabvar({nr}, "") return the "t:" dictionary. v = find_var_in_ht(&tp->tp_vars->dv_hashtab, 't', varname, FALSE); if (v != NULL) { copy_tv(&v->di_tv, rettv); done = TRUE; } } // restore previous notion of curwin restore_win(&switchwin, TRUE); } if (!done && argvars[2].v_type != VAR_UNKNOWN) copy_tv(&argvars[2], rettv); } /* * "gettabwinvar()" function */ void f_gettabwinvar(typval_T *argvars, typval_T *rettv) { if (in_vim9script() && (check_for_number_arg(argvars, 0) == FAIL || check_for_number_arg(argvars, 1) == FAIL || check_for_string_arg(argvars, 2) == FAIL)) return; getwinvar(argvars, rettv, 1); } /* * "getwinvar()" function */ void f_getwinvar(typval_T *argvars, typval_T *rettv) { if (in_vim9script() && (check_for_number_arg(argvars, 0) == FAIL || check_for_string_arg(argvars, 1) == FAIL)) return; getwinvar(argvars, rettv, 0); } /* * "getbufvar()" function */ void f_getbufvar(typval_T *argvars, typval_T *rettv) { buf_T *buf; char_u *varname; dictitem_T *v; int done = FALSE; if (in_vim9script() && (check_for_buffer_arg(argvars, 0) == FAIL || check_for_string_arg(argvars, 1) == FAIL)) return; varname = tv_get_string_chk(&argvars[1]); buf = tv_get_buf_from_arg(&argvars[0]); rettv->v_type = VAR_STRING; rettv->vval.v_string = NULL; if (buf != NULL && varname != NULL) { if (*varname == '&') { buf_T *save_curbuf = curbuf; // set curbuf to be our buf, temporarily curbuf = buf; if (varname[1] == NUL) { // get all buffer-local options in a dict dict_T *opts = get_winbuf_options(TRUE); if (opts != NULL) { rettv_dict_set(rettv, opts); done = TRUE; } } else if (eval_option(&varname, rettv, TRUE) == OK) // buffer-local-option done = TRUE; // restore previous notion of curbuf curbuf = save_curbuf; } else { // Look up the variable. if (*varname == NUL) // Let getbufvar({nr}, "") return the "b:" dictionary. v = &buf->b_bufvar; else v = find_var_in_ht(&buf->b_vars->dv_hashtab, 'b', varname, FALSE); if (v != NULL) { copy_tv(&v->di_tv, rettv); done = TRUE; } } } if (!done && argvars[2].v_type != VAR_UNKNOWN) // use the default value copy_tv(&argvars[2], rettv); } /* * "settabvar()" function */ void f_settabvar(typval_T *argvars, typval_T *rettv UNUSED) { tabpage_T *save_curtab; tabpage_T *tp; char_u *varname, *tabvarname; typval_T *varp; if (check_secure()) return; if (in_vim9script() && (check_for_number_arg(argvars, 0) == FAIL || check_for_string_arg(argvars, 1) == FAIL)) return; tp = find_tabpage((int)tv_get_number_chk(&argvars[0], NULL)); varname = tv_get_string_chk(&argvars[1]); varp = &argvars[2]; if (varname != NULL && varp != NULL && tp != NULL) { save_curtab = curtab; goto_tabpage_tp(tp, FALSE, FALSE); tabvarname = alloc(STRLEN(varname) + 3); if (tabvarname != NULL) { STRCPY(tabvarname, "t:"); STRCPY(tabvarname + 2, varname); set_var(tabvarname, varp, TRUE); vim_free(tabvarname); } // Restore current tabpage if (valid_tabpage(save_curtab)) goto_tabpage_tp(save_curtab, FALSE, FALSE); } } /* * "settabwinvar()" function */ void f_settabwinvar(typval_T *argvars, typval_T *rettv UNUSED) { if (in_vim9script() && (check_for_number_arg(argvars, 0) == FAIL || check_for_number_arg(argvars, 1) == FAIL || check_for_string_arg(argvars, 2) == FAIL)) return; setwinvar(argvars, 1); } /* * "setwinvar()" function */ void f_setwinvar(typval_T *argvars, typval_T *rettv UNUSED) { if (in_vim9script() && (check_for_number_arg(argvars, 0) == FAIL || check_for_string_arg(argvars, 1) == FAIL)) return; setwinvar(argvars, 0); } /* * "setbufvar()" function */ void f_setbufvar(typval_T *argvars, typval_T *rettv UNUSED) { buf_T *buf; char_u *varname, *bufvarname; typval_T *varp; if (check_secure()) return; if (in_vim9script() && (check_for_buffer_arg(argvars, 0) == FAIL || check_for_string_arg(argvars, 1) == FAIL)) return; varname = tv_get_string_chk(&argvars[1]); buf = tv_get_buf_from_arg(&argvars[0]); varp = &argvars[2]; if (buf != NULL && varname != NULL && varp != NULL) { if (*varname == '&') { aco_save_T aco; // set curbuf to be our buf, temporarily aucmd_prepbuf(&aco, buf); set_option_from_tv(varname + 1, varp); // reset notion of buffer aucmd_restbuf(&aco); } else { bufvarname = alloc(STRLEN(varname) + 3); if (bufvarname != NULL) { buf_T *save_curbuf = curbuf; curbuf = buf; STRCPY(bufvarname, "b:"); STRCPY(bufvarname + 2, varname); set_var(bufvarname, varp, TRUE); vim_free(bufvarname); curbuf = save_curbuf; } } } } /* * Get a callback from "arg". It can be a Funcref or a function name. * When "arg" is zero return an empty string. * "cb_name" is not allocated. * "cb_name" is set to NULL for an invalid argument. */ callback_T get_callback(typval_T *arg) { callback_T res; int r = OK; res.cb_free_name = FALSE; if (arg->v_type == VAR_PARTIAL && arg->vval.v_partial != NULL) { res.cb_partial = arg->vval.v_partial; ++res.cb_partial->pt_refcount; res.cb_name = partial_name(res.cb_partial); } else { res.cb_partial = NULL; if (arg->v_type == VAR_STRING && arg->vval.v_string != NULL && isdigit(*arg->vval.v_string)) r = FAIL; else if (arg->v_type == VAR_FUNC || arg->v_type == VAR_STRING) { if (arg->v_type == VAR_STRING) { char_u *name; name = get_scriptlocal_funcname(arg->vval.v_string); if (name != NULL) { vim_free(arg->vval.v_string); arg->vval.v_string = name; } } res.cb_name = arg->vval.v_string; func_ref(res.cb_name); } else if (arg->v_type == VAR_NUMBER && arg->vval.v_number == 0) res.cb_name = (char_u *)""; else r = FAIL; if (r == FAIL) { emsg(_(e_invalid_callback_argument)); res.cb_name = NULL; } } return res; } /* * Copy a callback into a typval_T. */ void put_callback(callback_T *cb, typval_T *tv) { if (cb->cb_partial != NULL) { tv->v_type = VAR_PARTIAL; tv->vval.v_partial = cb->cb_partial; ++tv->vval.v_partial->pt_refcount; } else { tv->v_type = VAR_FUNC; tv->vval.v_string = vim_strsave(cb->cb_name); func_ref(cb->cb_name); } } /* * Make a copy of "src" into "dest", allocating the function name if needed, * without incrementing the refcount. */ void set_callback(callback_T *dest, callback_T *src) { if (src->cb_partial == NULL) { // just a function name, make a copy dest->cb_name = vim_strsave(src->cb_name); dest->cb_free_name = TRUE; } else { // cb_name is a pointer into cb_partial dest->cb_name = src->cb_name; dest->cb_free_name = FALSE; } dest->cb_partial = src->cb_partial; } /* * Copy callback from "src" to "dest", incrementing the refcounts. */ void copy_callback(callback_T *dest, callback_T *src) { dest->cb_partial = src->cb_partial; if (dest->cb_partial != NULL) { dest->cb_name = src->cb_name; dest->cb_free_name = FALSE; ++dest->cb_partial->pt_refcount; } else { dest->cb_name = vim_strsave(src->cb_name); dest->cb_free_name = TRUE; func_ref(src->cb_name); } } /* * When a callback refers to an autoload import, change the function name to * the "path#name" form. Uses the current script context. * Only works when the name is allocated. */ void expand_autload_callback(callback_T *cb) { char_u *name; char_u *p; imported_T *import; if (!in_vim9script() || cb->cb_name == NULL || (!cb->cb_free_name && (cb->cb_partial == NULL || cb->cb_partial->pt_name == NULL))) return; if (cb->cb_partial != NULL) name = cb->cb_partial->pt_name; else name = cb->cb_name; p = vim_strchr(name, '.'); if (p == NULL) return; import = find_imported(name, p - name, FALSE); if (import != NULL && SCRIPT_ID_VALID(import->imp_sid)) { scriptitem_T *si = SCRIPT_ITEM(import->imp_sid); if (si->sn_autoload_prefix != NULL) { char_u *newname = concat_str(si->sn_autoload_prefix, p + 1); if (newname != NULL) { if (cb->cb_partial != NULL) { if (cb->cb_name == cb->cb_partial->pt_name) cb->cb_name = newname; vim_free(cb->cb_partial->pt_name); cb->cb_partial->pt_name = newname; } else { vim_free(cb->cb_name); cb->cb_name = newname; } } } } } /* * Unref/free "callback" returned by get_callback() or set_callback(). */ void free_callback(callback_T *callback) { if (callback->cb_partial != NULL) { partial_unref(callback->cb_partial); callback->cb_partial = NULL; } else if (callback->cb_name != NULL) func_unref(callback->cb_name); if (callback->cb_free_name) { vim_free(callback->cb_name); callback->cb_free_name = FALSE; } callback->cb_name = NULL; } #endif // FEAT_EVAL
get_user_var_name(expand_T *xp, int idx) { static long_u gdone; static long_u bdone; static long_u wdone; static long_u tdone; static int vidx; static hashitem_T *hi; hashtab_T *ht; if (idx == 0) { gdone = bdone = wdone = vidx = 0; tdone = 0; } // Global variables if (gdone < globvarht.ht_used) { if (gdone++ == 0) hi = globvarht.ht_array; else ++hi; while (HASHITEM_EMPTY(hi)) ++hi; if (STRNCMP("g:", xp->xp_pattern, 2) == 0) return cat_prefix_varname('g', hi->hi_key); return hi->hi_key; } // b: variables ht = #ifdef FEAT_CMDWIN // In cmdwin, the alternative buffer should be used. is_in_cmdwin() ? &prevwin->w_buffer->b_vars->dv_hashtab : #endif &curbuf->b_vars->dv_hashtab; if (bdone < ht->ht_used) { if (bdone++ == 0) hi = ht->ht_array; else ++hi; while (HASHITEM_EMPTY(hi)) ++hi; return cat_prefix_varname('b', hi->hi_key); } // w: variables ht = #ifdef FEAT_CMDWIN // In cmdwin, the alternative window should be used. is_in_cmdwin() ? &prevwin->w_vars->dv_hashtab : #endif &curwin->w_vars->dv_hashtab; if (wdone < ht->ht_used) { if (wdone++ == 0) hi = ht->ht_array; else ++hi; while (HASHITEM_EMPTY(hi)) ++hi; return cat_prefix_varname('w', hi->hi_key); } // t: variables ht = &curtab->tp_vars->dv_hashtab; if (tdone < ht->ht_used) { if (tdone++ == 0) hi = ht->ht_array; else ++hi; while (HASHITEM_EMPTY(hi)) ++hi; return cat_prefix_varname('t', hi->hi_key); } // v: variables if (vidx < VV_LEN) return cat_prefix_varname('v', (char_u *)vimvars[vidx++].vv_name); VIM_CLEAR(varnamebuf); varnamebuflen = 0; return NULL; }
get_user_var_name(expand_T *xp, int idx) { static long_u gdone; static long_u bdone; static long_u wdone; static long_u tdone; static int vidx; static hashitem_T *hi; hashtab_T *ht; if (idx == 0) { gdone = bdone = wdone = vidx = 0; tdone = 0; } // Global variables if (gdone < globvarht.ht_used) { if (gdone++ == 0) hi = globvarht.ht_array; else ++hi; while (HASHITEM_EMPTY(hi)) ++hi; if (STRNCMP("g:", xp->xp_pattern, 2) == 0) return cat_prefix_varname('g', hi->hi_key); return hi->hi_key; } // b: variables ht = &prevwin_curwin()->w_buffer->b_vars->dv_hashtab; if (bdone < ht->ht_used) { if (bdone++ == 0) hi = ht->ht_array; else ++hi; while (HASHITEM_EMPTY(hi)) ++hi; return cat_prefix_varname('b', hi->hi_key); } // w: variables ht = &prevwin_curwin()->w_vars->dv_hashtab; if (wdone < ht->ht_used) { if (wdone++ == 0) hi = ht->ht_array; else ++hi; while (HASHITEM_EMPTY(hi)) ++hi; return cat_prefix_varname('w', hi->hi_key); } // t: variables ht = &curtab->tp_vars->dv_hashtab; if (tdone < ht->ht_used) { if (tdone++ == 0) hi = ht->ht_array; else ++hi; while (HASHITEM_EMPTY(hi)) ++hi; return cat_prefix_varname('t', hi->hi_key); } // v: variables if (vidx < VV_LEN) return cat_prefix_varname('v', (char_u *)vimvars[vidx++].vv_name); VIM_CLEAR(varnamebuf); varnamebuflen = 0; return NULL; }
{'added': [(2202, ' ht = &prevwin_curwin()->w_buffer->b_vars->dv_hashtab;'), (2215, ' ht = &prevwin_curwin()->w_vars->dv_hashtab;')], 'deleted': [(2202, ' ht ='), (2203, '#ifdef FEAT_CMDWIN'), (2204, '\t// In cmdwin, the alternative buffer should be used.'), (2205, '\tis_in_cmdwin() ? &prevwin->w_buffer->b_vars->dv_hashtab :'), (2206, '#endif'), (2207, '\t&curbuf->b_vars->dv_hashtab;'), (2220, ' ht ='), (2221, '#ifdef FEAT_CMDWIN'), (2222, '\t// In cmdwin, the alternative window should be used.'), (2223, '\tis_in_cmdwin() ? &prevwin->w_vars->dv_hashtab :'), (2224, '#endif'), (2225, '\t&curwin->w_vars->dv_hashtab;')]}
2
12
3,550
19,987
69
357
20
https://github.com/vim/vim
CVE-2022-0696
CWE-476
375
sysctl_net.c
C
net_ctl_permissions
/* -*- linux-c -*- * sysctl_net.c: sysctl interface to net subsystem. * * Begun April 1, 1996, Mike Shaver. * Added /proc/sys/net directories for each protocol family. [MS] * * Revision 1.2 1996/05/08 20:24:40 shaver * Added bits for NET_BRIDGE and the NET_IPV4_ARP stuff and * NET_IPV4_IP_FORWARD. * * */ #include <linux/mm.h> #include <linux/export.h> #include <linux/sysctl.h> #include <linux/nsproxy.h> #include <net/sock.h> #ifdef CONFIG_INET #include <net/ip.h> #endif #ifdef CONFIG_NET #include <linux/if_ether.h> #endif static struct ctl_table_set * net_ctl_header_lookup(struct ctl_table_root *root, struct nsproxy *namespaces) { return &namespaces->net_ns->sysctls; } static int is_seen(struct ctl_table_set *set) { return &current->nsproxy->net_ns->sysctls == set; } /* Return standard mode bits for table entry. */ static int net_ctl_permissions(struct ctl_table_header *head, struct ctl_table *table) { struct net *net = container_of(head->set, struct net, sysctls); kuid_t root_uid = make_kuid(net->user_ns, 0); kgid_t root_gid = make_kgid(net->user_ns, 0); /* Allow network administrator to have same access as root. */ if (ns_capable(net->user_ns, CAP_NET_ADMIN) || uid_eq(root_uid, current_uid())) { int mode = (table->mode >> 6) & 7; return (mode << 6) | (mode << 3) | mode; } /* Allow netns root group to have the same access as the root group */ if (gid_eq(root_gid, current_gid())) { int mode = (table->mode >> 3) & 7; return (mode << 3) | mode; } return table->mode; } static struct ctl_table_root net_sysctl_root = { .lookup = net_ctl_header_lookup, .permissions = net_ctl_permissions, }; static int __net_init sysctl_net_init(struct net *net) { setup_sysctl_set(&net->sysctls, &net_sysctl_root, is_seen); return 0; } static void __net_exit sysctl_net_exit(struct net *net) { retire_sysctl_set(&net->sysctls); } static struct pernet_operations sysctl_pernet_ops = { .init = sysctl_net_init, .exit = sysctl_net_exit, }; static struct ctl_table_header *net_header; __init int net_sysctl_init(void) { static struct ctl_table empty[1]; int ret = -ENOMEM; /* Avoid limitations in the sysctl implementation by * registering "/proc/sys/net" as an empty directory not in a * network namespace. */ net_header = register_sysctl("net", empty); if (!net_header) goto out; ret = register_pernet_subsys(&sysctl_pernet_ops); if (ret) goto out; register_sysctl_root(&net_sysctl_root); out: return ret; } struct ctl_table_header *register_net_sysctl(struct net *net, const char *path, struct ctl_table *table) { return __register_sysctl_table(&net->sysctls, path, table); } EXPORT_SYMBOL_GPL(register_net_sysctl); void unregister_net_sysctl_table(struct ctl_table_header *header) { unregister_sysctl_table(header); } EXPORT_SYMBOL_GPL(unregister_net_sysctl_table);
/* -*- linux-c -*- * sysctl_net.c: sysctl interface to net subsystem. * * Begun April 1, 1996, Mike Shaver. * Added /proc/sys/net directories for each protocol family. [MS] * * Revision 1.2 1996/05/08 20:24:40 shaver * Added bits for NET_BRIDGE and the NET_IPV4_ARP stuff and * NET_IPV4_IP_FORWARD. * * */ #include <linux/mm.h> #include <linux/export.h> #include <linux/sysctl.h> #include <linux/nsproxy.h> #include <net/sock.h> #ifdef CONFIG_INET #include <net/ip.h> #endif #ifdef CONFIG_NET #include <linux/if_ether.h> #endif static struct ctl_table_set * net_ctl_header_lookup(struct ctl_table_root *root, struct nsproxy *namespaces) { return &namespaces->net_ns->sysctls; } static int is_seen(struct ctl_table_set *set) { return &current->nsproxy->net_ns->sysctls == set; } /* Return standard mode bits for table entry. */ static int net_ctl_permissions(struct ctl_table_header *head, struct ctl_table *table) { struct net *net = container_of(head->set, struct net, sysctls); kuid_t root_uid = make_kuid(net->user_ns, 0); kgid_t root_gid = make_kgid(net->user_ns, 0); /* Allow network administrator to have same access as root. */ if (ns_capable(net->user_ns, CAP_NET_ADMIN) || uid_eq(root_uid, current_euid())) { int mode = (table->mode >> 6) & 7; return (mode << 6) | (mode << 3) | mode; } /* Allow netns root group to have the same access as the root group */ if (in_egroup_p(root_gid)) { int mode = (table->mode >> 3) & 7; return (mode << 3) | mode; } return table->mode; } static struct ctl_table_root net_sysctl_root = { .lookup = net_ctl_header_lookup, .permissions = net_ctl_permissions, }; static int __net_init sysctl_net_init(struct net *net) { setup_sysctl_set(&net->sysctls, &net_sysctl_root, is_seen); return 0; } static void __net_exit sysctl_net_exit(struct net *net) { retire_sysctl_set(&net->sysctls); } static struct pernet_operations sysctl_pernet_ops = { .init = sysctl_net_init, .exit = sysctl_net_exit, }; static struct ctl_table_header *net_header; __init int net_sysctl_init(void) { static struct ctl_table empty[1]; int ret = -ENOMEM; /* Avoid limitations in the sysctl implementation by * registering "/proc/sys/net" as an empty directory not in a * network namespace. */ net_header = register_sysctl("net", empty); if (!net_header) goto out; ret = register_pernet_subsys(&sysctl_pernet_ops); if (ret) goto out; register_sysctl_root(&net_sysctl_root); out: return ret; } struct ctl_table_header *register_net_sysctl(struct net *net, const char *path, struct ctl_table *table) { return __register_sysctl_table(&net->sysctls, path, table); } EXPORT_SYMBOL_GPL(register_net_sysctl); void unregister_net_sysctl_table(struct ctl_table_header *header) { unregister_sysctl_table(header); } EXPORT_SYMBOL_GPL(unregister_net_sysctl_table);
static int net_ctl_permissions(struct ctl_table_header *head, struct ctl_table *table) { struct net *net = container_of(head->set, struct net, sysctls); kuid_t root_uid = make_kuid(net->user_ns, 0); kgid_t root_gid = make_kgid(net->user_ns, 0); /* Allow network administrator to have same access as root. */ if (ns_capable(net->user_ns, CAP_NET_ADMIN) || uid_eq(root_uid, current_uid())) { int mode = (table->mode >> 6) & 7; return (mode << 6) | (mode << 3) | mode; } /* Allow netns root group to have the same access as the root group */ if (gid_eq(root_gid, current_gid())) { int mode = (table->mode >> 3) & 7; return (mode << 3) | mode; } return table->mode; }
static int net_ctl_permissions(struct ctl_table_header *head, struct ctl_table *table) { struct net *net = container_of(head->set, struct net, sysctls); kuid_t root_uid = make_kuid(net->user_ns, 0); kgid_t root_gid = make_kgid(net->user_ns, 0); /* Allow network administrator to have same access as root. */ if (ns_capable(net->user_ns, CAP_NET_ADMIN) || uid_eq(root_uid, current_euid())) { int mode = (table->mode >> 6) & 7; return (mode << 6) | (mode << 3) | mode; } /* Allow netns root group to have the same access as the root group */ if (in_egroup_p(root_gid)) { int mode = (table->mode >> 3) & 7; return (mode << 3) | mode; } return table->mode; }
{'added': [(50, '\t uid_eq(root_uid, current_euid())) {'), (55, '\tif (in_egroup_p(root_gid)) {')], 'deleted': [(50, '\t uid_eq(root_uid, current_uid())) {'), (55, '\tif (gid_eq(root_gid, current_gid())) {')]}
2
2
76
426
17
150
4
https://github.com/torvalds/linux
CVE-2013-4270
CWE-20
745
zeros_like.cc
C++
tflite::ops::builtin::zeros_like::Eval
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <stdint.h> #include <string.h> #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace ops { namespace builtin { namespace zeros_like { constexpr int kInputTensor = 0; constexpr int kOutputTensor = 0; TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); output->type = input->type; return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const int num_elements = NumElements(input); switch (input->type) { case kTfLiteInt64: memset(GetTensorData<int64_t>(output), 0, num_elements * sizeof(int64_t)); break; case kTfLiteInt32: memset(GetTensorData<int32_t>(output), 0, num_elements * sizeof(int32_t)); break; case kTfLiteFloat32: memset(GetTensorData<float>(output), 0, num_elements * sizeof(float)); break; default: context->ReportError(context, "ZerosLike only currently supports int64, int32, " "and float32, got %d.", input->type); return kTfLiteError; } return kTfLiteOk; } } // namespace zeros_like TfLiteRegistration* Register_ZEROS_LIKE() { static TfLiteRegistration r = {/*init=*/nullptr, /*free=*/nullptr, zeros_like::Prepare, zeros_like::Eval}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <stdint.h> #include <string.h> #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace ops { namespace builtin { namespace zeros_like { constexpr int kInputTensor = 0; constexpr int kOutputTensor = 0; TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); output->type = input->type; return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); const int num_elements = NumElements(input); switch (input->type) { case kTfLiteInt64: memset(GetTensorData<int64_t>(output), 0, num_elements * sizeof(int64_t)); break; case kTfLiteInt32: memset(GetTensorData<int32_t>(output), 0, num_elements * sizeof(int32_t)); break; case kTfLiteFloat32: memset(GetTensorData<float>(output), 0, num_elements * sizeof(float)); break; default: context->ReportError(context, "ZerosLike only currently supports int64, int32, " "and float32, got %d.", input->type); return kTfLiteError; } return kTfLiteOk; } } // namespace zeros_like TfLiteRegistration* Register_ZEROS_LIKE() { static TfLiteRegistration r = {/*init=*/nullptr, /*free=*/nullptr, zeros_like::Prepare, zeros_like::Eval}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const int num_elements = NumElements(input); switch (input->type) { case kTfLiteInt64: memset(GetTensorData<int64_t>(output), 0, num_elements * sizeof(int64_t)); break; case kTfLiteInt32: memset(GetTensorData<int32_t>(output), 0, num_elements * sizeof(int32_t)); break; case kTfLiteFloat32: memset(GetTensorData<float>(output), 0, num_elements * sizeof(float)); break; default: context->ReportError(context, "ZerosLike only currently supports int64, int32, " "and float32, got %d.", input->type); return kTfLiteError; } return kTfLiteOk; }
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); const int num_elements = NumElements(input); switch (input->type) { case kTfLiteInt64: memset(GetTensorData<int64_t>(output), 0, num_elements * sizeof(int64_t)); break; case kTfLiteInt32: memset(GetTensorData<int32_t>(output), 0, num_elements * sizeof(int32_t)); break; case kTfLiteFloat32: memset(GetTensorData<float>(output), 0, num_elements * sizeof(float)); break; default: context->ReportError(context, "ZerosLike only currently supports int64, int32, " "and float32, got %d.", input->type); return kTfLiteError; } return kTfLiteOk; }
{'added': [(35, ' const TfLiteTensor* input;'), (36, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));'), (37, ' TfLiteTensor* output;'), (38, ' TF_LITE_ENSURE_OK(context,'), (39, ' GetOutputSafe(context, node, kOutputTensor, &output));'), (47, ' const TfLiteTensor* input;'), (48, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));'), (49, ' TfLiteTensor* output;'), (50, ' TF_LITE_ENSURE_OK(context,'), (51, ' GetOutputSafe(context, node, kOutputTensor, &output));')], 'deleted': [(35, ' const TfLiteTensor* input = GetInput(context, node, kInputTensor);'), (36, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);'), (44, ' const TfLiteTensor* input = GetInput(context, node, kInputTensor);'), (45, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);')]}
10
4
59
344
23
153
4
https://github.com/tensorflow/tensorflow
CVE-2020-15211
CWE-125
3,161
print-null.c
C
null_if_print
/* * Copyright (c) 1991, 1993, 1994, 1995, 1996, 1997 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: BSD loopback device printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include <string.h> #include "netdissect.h" #include "af.h" /* * The DLT_NULL packet header is 4 bytes long. It contains a host-byte-order * 32-bit integer that specifies the family, e.g. AF_INET. * * Note here that "host" refers to the host on which the packets were * captured; that isn't necessarily *this* host. * * The OpenBSD DLT_LOOP packet header is the same, except that the integer * is in network byte order. */ #define NULL_HDRLEN 4 /* * Byte-swap a 32-bit number. * ("htonl()" or "ntohl()" won't work - we want to byte-swap even on * big-endian platforms.) */ #define SWAPLONG(y) \ ((((y)&0xff)<<24) | (((y)&0xff00)<<8) | (((y)&0xff0000)>>8) | (((y)>>24)&0xff)) static inline void null_hdr_print(netdissect_options *ndo, u_int family, u_int length) { if (!ndo->ndo_qflag) { ND_PRINT((ndo, "AF %s (%u)", tok2str(bsd_af_values,"Unknown",family),family)); } else { ND_PRINT((ndo, "%s", tok2str(bsd_af_values,"Unknown AF %u",family))); } ND_PRINT((ndo, ", length %u: ", length)); } /* * This is the top level routine of the printer. 'p' points * to the ether header of the packet, 'h->ts' is the timestamp, * 'h->len' is the length of the packet off the wire, and 'h->caplen' * is the number of bytes actually captured. */ u_int null_if_print(netdissect_options *ndo, const struct pcap_pkthdr *h, const u_char *p) { u_int length = h->len; u_int caplen = h->caplen; u_int family; if (caplen < NULL_HDRLEN) { ND_PRINT((ndo, "[|null]")); return (NULL_HDRLEN); } memcpy((char *)&family, (const char *)p, sizeof(family)); /* * This isn't necessarily in our host byte order; if this is * a DLT_LOOP capture, it's in network byte order, and if * this is a DLT_NULL capture from a machine with the opposite * byte-order, it's in the opposite byte order from ours. * * If the upper 16 bits aren't all zero, assume it's byte-swapped. */ if ((family & 0xFFFF0000) != 0) family = SWAPLONG(family); if (ndo->ndo_eflag) null_hdr_print(ndo, family, length); length -= NULL_HDRLEN; caplen -= NULL_HDRLEN; p += NULL_HDRLEN; switch (family) { case BSD_AFNUM_INET: ip_print(ndo, p, length); break; case BSD_AFNUM_INET6_BSD: case BSD_AFNUM_INET6_FREEBSD: case BSD_AFNUM_INET6_DARWIN: ip6_print(ndo, p, length); break; case BSD_AFNUM_ISO: isoclns_print(ndo, p, length, caplen); break; case BSD_AFNUM_APPLETALK: atalk_print(ndo, p, length); break; case BSD_AFNUM_IPX: ipx_print(ndo, p, length); break; default: /* unknown AF_ value */ if (!ndo->ndo_eflag) null_hdr_print(ndo, family, length + NULL_HDRLEN); if (!ndo->ndo_suppress_default_print) ND_DEFAULTPRINT(p, caplen); } return (NULL_HDRLEN); } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
/* * Copyright (c) 1991, 1993, 1994, 1995, 1996, 1997 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: BSD loopback device printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include <string.h> #include "netdissect.h" #include "af.h" /* * The DLT_NULL packet header is 4 bytes long. It contains a host-byte-order * 32-bit integer that specifies the family, e.g. AF_INET. * * Note here that "host" refers to the host on which the packets were * captured; that isn't necessarily *this* host. * * The OpenBSD DLT_LOOP packet header is the same, except that the integer * is in network byte order. */ #define NULL_HDRLEN 4 /* * Byte-swap a 32-bit number. * ("htonl()" or "ntohl()" won't work - we want to byte-swap even on * big-endian platforms.) */ #define SWAPLONG(y) \ ((((y)&0xff)<<24) | (((y)&0xff00)<<8) | (((y)&0xff0000)>>8) | (((y)>>24)&0xff)) static inline void null_hdr_print(netdissect_options *ndo, u_int family, u_int length) { if (!ndo->ndo_qflag) { ND_PRINT((ndo, "AF %s (%u)", tok2str(bsd_af_values,"Unknown",family),family)); } else { ND_PRINT((ndo, "%s", tok2str(bsd_af_values,"Unknown AF %u",family))); } ND_PRINT((ndo, ", length %u: ", length)); } /* * This is the top level routine of the printer. 'p' points * to the ether header of the packet, 'h->ts' is the timestamp, * 'h->len' is the length of the packet off the wire, and 'h->caplen' * is the number of bytes actually captured. */ u_int null_if_print(netdissect_options *ndo, const struct pcap_pkthdr *h, const u_char *p) { u_int length = h->len; u_int caplen = h->caplen; u_int family; if (caplen < NULL_HDRLEN) { ND_PRINT((ndo, "[|null]")); return (NULL_HDRLEN); } memcpy((char *)&family, (const char *)p, sizeof(family)); /* * This isn't necessarily in our host byte order; if this is * a DLT_LOOP capture, it's in network byte order, and if * this is a DLT_NULL capture from a machine with the opposite * byte-order, it's in the opposite byte order from ours. * * If the upper 16 bits aren't all zero, assume it's byte-swapped. */ if ((family & 0xFFFF0000) != 0) family = SWAPLONG(family); if (ndo->ndo_eflag) null_hdr_print(ndo, family, length); length -= NULL_HDRLEN; caplen -= NULL_HDRLEN; p += NULL_HDRLEN; switch (family) { case BSD_AFNUM_INET: ip_print(ndo, p, length); break; case BSD_AFNUM_INET6_BSD: case BSD_AFNUM_INET6_FREEBSD: case BSD_AFNUM_INET6_DARWIN: ip6_print(ndo, p, length); break; case BSD_AFNUM_ISO: isoclns_print(ndo, p, length); break; case BSD_AFNUM_APPLETALK: atalk_print(ndo, p, length); break; case BSD_AFNUM_IPX: ipx_print(ndo, p, length); break; default: /* unknown AF_ value */ if (!ndo->ndo_eflag) null_hdr_print(ndo, family, length + NULL_HDRLEN); if (!ndo->ndo_suppress_default_print) ND_DEFAULTPRINT(p, caplen); } return (NULL_HDRLEN); } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
null_if_print(netdissect_options *ndo, const struct pcap_pkthdr *h, const u_char *p) { u_int length = h->len; u_int caplen = h->caplen; u_int family; if (caplen < NULL_HDRLEN) { ND_PRINT((ndo, "[|null]")); return (NULL_HDRLEN); } memcpy((char *)&family, (const char *)p, sizeof(family)); /* * This isn't necessarily in our host byte order; if this is * a DLT_LOOP capture, it's in network byte order, and if * this is a DLT_NULL capture from a machine with the opposite * byte-order, it's in the opposite byte order from ours. * * If the upper 16 bits aren't all zero, assume it's byte-swapped. */ if ((family & 0xFFFF0000) != 0) family = SWAPLONG(family); if (ndo->ndo_eflag) null_hdr_print(ndo, family, length); length -= NULL_HDRLEN; caplen -= NULL_HDRLEN; p += NULL_HDRLEN; switch (family) { case BSD_AFNUM_INET: ip_print(ndo, p, length); break; case BSD_AFNUM_INET6_BSD: case BSD_AFNUM_INET6_FREEBSD: case BSD_AFNUM_INET6_DARWIN: ip6_print(ndo, p, length); break; case BSD_AFNUM_ISO: isoclns_print(ndo, p, length, caplen); break; case BSD_AFNUM_APPLETALK: atalk_print(ndo, p, length); break; case BSD_AFNUM_IPX: ipx_print(ndo, p, length); break; default: /* unknown AF_ value */ if (!ndo->ndo_eflag) null_hdr_print(ndo, family, length + NULL_HDRLEN); if (!ndo->ndo_suppress_default_print) ND_DEFAULTPRINT(p, caplen); } return (NULL_HDRLEN); }
null_if_print(netdissect_options *ndo, const struct pcap_pkthdr *h, const u_char *p) { u_int length = h->len; u_int caplen = h->caplen; u_int family; if (caplen < NULL_HDRLEN) { ND_PRINT((ndo, "[|null]")); return (NULL_HDRLEN); } memcpy((char *)&family, (const char *)p, sizeof(family)); /* * This isn't necessarily in our host byte order; if this is * a DLT_LOOP capture, it's in network byte order, and if * this is a DLT_NULL capture from a machine with the opposite * byte-order, it's in the opposite byte order from ours. * * If the upper 16 bits aren't all zero, assume it's byte-swapped. */ if ((family & 0xFFFF0000) != 0) family = SWAPLONG(family); if (ndo->ndo_eflag) null_hdr_print(ndo, family, length); length -= NULL_HDRLEN; caplen -= NULL_HDRLEN; p += NULL_HDRLEN; switch (family) { case BSD_AFNUM_INET: ip_print(ndo, p, length); break; case BSD_AFNUM_INET6_BSD: case BSD_AFNUM_INET6_FREEBSD: case BSD_AFNUM_INET6_DARWIN: ip6_print(ndo, p, length); break; case BSD_AFNUM_ISO: isoclns_print(ndo, p, length); break; case BSD_AFNUM_APPLETALK: atalk_print(ndo, p, length); break; case BSD_AFNUM_IPX: ipx_print(ndo, p, length); break; default: /* unknown AF_ value */ if (!ndo->ndo_eflag) null_hdr_print(ndo, family, length + NULL_HDRLEN); if (!ndo->ndo_suppress_default_print) ND_DEFAULTPRINT(p, caplen); } return (NULL_HDRLEN); }
{'added': [(120, '\t\tisoclns_print(ndo, p, length);')], 'deleted': [(120, '\t\tisoclns_print(ndo, p, length, caplen);')]}
1
1
61
334
43
247
13
https://github.com/the-tcpdump-group/tcpdump
CVE-2017-12897
CWE-125
852
tiffcp.c
C
pickCopyFunc
/* $Id$ */ /* * Copyright (c) 1988-1997 Sam Leffler * Copyright (c) 1991-1997 Silicon Graphics, Inc. * * Revised: 2/18/01 BAR -- added syntax for extracting single images from * multi-image TIFF files. * * New syntax is: sourceFileName,image# * * image# ranges from 0..<n-1> where n is the # of images in the file. * There may be no white space between the comma and the filename or * image number. * * Example: tiffcp source.tif,1 destination.tif * * Copies the 2nd image in source.tif to the destination. * ***** * Permission to use, copy, modify, distribute, and sell this software and * its documentation for any purpose is hereby granted without fee, provided * that (i) the above copyright notices and this permission notice appear in * all copies of the software and related documentation, and (ii) the names of * Sam Leffler and Silicon Graphics may not be used in any advertising or * publicity relating to the software without the specific, prior written * permission of Sam Leffler and Silicon Graphics. * * THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND, * EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. * * IN NO EVENT SHALL SAM LEFFLER OR SILICON GRAPHICS BE LIABLE FOR * ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, * OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF * LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ #include "tif_config.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #ifdef HAVE_UNISTD_H # include <unistd.h> #endif #include "tiffio.h" #ifndef HAVE_GETOPT extern int getopt(int, char**, char*); #endif #if defined(VMS) # define unlink delete #endif #define streq(a,b) (strcmp(a,b) == 0) #define strneq(a,b,n) (strncmp(a,b,n) == 0) #define TRUE 1 #define FALSE 0 static int outtiled = -1; static uint32 tilewidth; static uint32 tilelength; static uint16 config; static uint16 compression; static uint16 predictor; static int preset; static uint16 fillorder; static uint16 orientation; static uint32 rowsperstrip; static uint32 g3opts; static int ignore = FALSE; /* if true, ignore read errors */ static uint32 defg3opts = (uint32) -1; static int quality = 75; /* JPEG quality */ static int jpegcolormode = JPEGCOLORMODE_RGB; static uint16 defcompression = (uint16) -1; static uint16 defpredictor = (uint16) -1; static int defpreset = -1; static int tiffcp(TIFF*, TIFF*); static int processCompressOptions(char*); static void usage(void); static char comma = ','; /* (default) comma separator character */ static TIFF* bias = NULL; static int pageNum = 0; static int pageInSeq = 0; static int nextSrcImage (TIFF *tif, char **imageSpec) /* seek to the next image specified in *imageSpec returns 1 if success, 0 if no more images to process *imageSpec=NULL if subsequent images should be processed in sequence */ { if (**imageSpec == comma) { /* if not @comma, we've done all images */ char *start = *imageSpec + 1; tdir_t nextImage = (tdir_t)strtol(start, imageSpec, 0); if (start == *imageSpec) nextImage = TIFFCurrentDirectory (tif); if (**imageSpec) { if (**imageSpec == comma) { /* a trailing comma denotes remaining images in sequence */ if ((*imageSpec)[1] == '\0') *imageSpec = NULL; }else{ fprintf (stderr, "Expected a %c separated image # list after %s\n", comma, TIFFFileName (tif)); exit (-4); /* syntax error */ } } if (TIFFSetDirectory (tif, nextImage)) return 1; fprintf (stderr, "%s%c%d not found!\n", TIFFFileName(tif), comma, (int) nextImage); } return 0; } static TIFF* openSrcImage (char **imageSpec) /* imageSpec points to a pointer to a filename followed by optional ,image#'s Open the TIFF file and assign *imageSpec to either NULL if there are no images specified, or a pointer to the next image number text */ { TIFF *tif; char *fn = *imageSpec; *imageSpec = strchr (fn, comma); if (*imageSpec) { /* there is at least one image number specifier */ **imageSpec = '\0'; tif = TIFFOpen (fn, "r"); /* but, ignore any single trailing comma */ if (!(*imageSpec)[1]) {*imageSpec = NULL; return tif;} if (tif) { **imageSpec = comma; /* replace the comma */ if (!nextSrcImage(tif, imageSpec)) { TIFFClose (tif); tif = NULL; } } }else tif = TIFFOpen (fn, "r"); return tif; } int main(int argc, char* argv[]) { uint16 defconfig = (uint16) -1; uint16 deffillorder = 0; uint32 deftilewidth = (uint32) -1; uint32 deftilelength = (uint32) -1; uint32 defrowsperstrip = (uint32) 0; uint64 diroff = 0; TIFF* in; TIFF* out; char mode[10]; char* mp = mode; int c; #if !HAVE_DECL_OPTARG extern int optind; extern char* optarg; #endif *mp++ = 'w'; *mp = '\0'; while ((c = getopt(argc, argv, ",:b:c:f:l:o:p:r:w:aistBLMC8x")) != -1) switch (c) { case ',': if (optarg[0] != '=') usage(); comma = optarg[1]; break; case 'b': /* this file is bias image subtracted from others */ if (bias) { fputs ("Only 1 bias image may be specified\n", stderr); exit (-2); } { uint16 samples = (uint16) -1; char **biasFn = &optarg; bias = openSrcImage (biasFn); if (!bias) exit (-5); if (TIFFIsTiled (bias)) { fputs ("Bias image must be organized in strips\n", stderr); exit (-7); } TIFFGetField(bias, TIFFTAG_SAMPLESPERPIXEL, &samples); if (samples != 1) { fputs ("Bias image must be monochrome\n", stderr); exit (-7); } } break; case 'a': /* append to output */ mode[0] = 'a'; break; case 'c': /* compression scheme */ if (!processCompressOptions(optarg)) usage(); break; case 'f': /* fill order */ if (streq(optarg, "lsb2msb")) deffillorder = FILLORDER_LSB2MSB; else if (streq(optarg, "msb2lsb")) deffillorder = FILLORDER_MSB2LSB; else usage(); break; case 'i': /* ignore errors */ ignore = TRUE; break; case 'l': /* tile length */ outtiled = TRUE; deftilelength = atoi(optarg); break; case 'o': /* initial directory offset */ diroff = strtoul(optarg, NULL, 0); break; case 'p': /* planar configuration */ if (streq(optarg, "separate")) defconfig = PLANARCONFIG_SEPARATE; else if (streq(optarg, "contig")) defconfig = PLANARCONFIG_CONTIG; else usage(); break; case 'r': /* rows/strip */ defrowsperstrip = atol(optarg); break; case 's': /* generate stripped output */ outtiled = FALSE; break; case 't': /* generate tiled output */ outtiled = TRUE; break; case 'w': /* tile width */ outtiled = TRUE; deftilewidth = atoi(optarg); break; case 'B': *mp++ = 'b'; *mp = '\0'; break; case 'L': *mp++ = 'l'; *mp = '\0'; break; case 'M': *mp++ = 'm'; *mp = '\0'; break; case 'C': *mp++ = 'c'; *mp = '\0'; break; case '8': *mp++ = '8'; *mp = '\0'; break; case 'x': pageInSeq = 1; break; case '?': usage(); /*NOTREACHED*/ } if (argc - optind < 2) usage(); out = TIFFOpen(argv[argc-1], mode); if (out == NULL) return (-2); if ((argc - optind) == 2) pageNum = -1; for (; optind < argc-1 ; optind++) { char *imageCursor = argv[optind]; in = openSrcImage (&imageCursor); if (in == NULL) { (void) TIFFClose(out); return (-3); } if (diroff != 0 && !TIFFSetSubDirectory(in, diroff)) { TIFFError(TIFFFileName(in), "Error, setting subdirectory at " TIFF_UINT64_FORMAT, diroff); (void) TIFFClose(in); (void) TIFFClose(out); return (1); } for (;;) { config = defconfig; compression = defcompression; predictor = defpredictor; preset = defpreset; fillorder = deffillorder; rowsperstrip = defrowsperstrip; tilewidth = deftilewidth; tilelength = deftilelength; g3opts = defg3opts; if (!tiffcp(in, out) || !TIFFWriteDirectory(out)) { (void) TIFFClose(in); (void) TIFFClose(out); return (1); } if (imageCursor) { /* seek next image directory */ if (!nextSrcImage(in, &imageCursor)) break; }else if (!TIFFReadDirectory(in)) break; } (void) TIFFClose(in); } (void) TIFFClose(out); return (0); } static void processZIPOptions(char* cp) { if ( (cp = strchr(cp, ':')) ) { do { cp++; if (isdigit((int)*cp)) defpredictor = atoi(cp); else if (*cp == 'p') defpreset = atoi(++cp); else usage(); } while( (cp = strchr(cp, ':')) ); } } static void processG3Options(char* cp) { if( (cp = strchr(cp, ':')) ) { if (defg3opts == (uint32) -1) defg3opts = 0; do { cp++; if (strneq(cp, "1d", 2)) defg3opts &= ~GROUP3OPT_2DENCODING; else if (strneq(cp, "2d", 2)) defg3opts |= GROUP3OPT_2DENCODING; else if (strneq(cp, "fill", 4)) defg3opts |= GROUP3OPT_FILLBITS; else usage(); } while( (cp = strchr(cp, ':')) ); } } static int processCompressOptions(char* opt) { if (streq(opt, "none")) { defcompression = COMPRESSION_NONE; } else if (streq(opt, "packbits")) { defcompression = COMPRESSION_PACKBITS; } else if (strneq(opt, "jpeg", 4)) { char* cp = strchr(opt, ':'); defcompression = COMPRESSION_JPEG; while( cp ) { if (isdigit((int)cp[1])) quality = atoi(cp+1); else if (cp[1] == 'r' ) jpegcolormode = JPEGCOLORMODE_RAW; else usage(); cp = strchr(cp+1,':'); } } else if (strneq(opt, "g3", 2)) { processG3Options(opt); defcompression = COMPRESSION_CCITTFAX3; } else if (streq(opt, "g4")) { defcompression = COMPRESSION_CCITTFAX4; } else if (strneq(opt, "lzw", 3)) { char* cp = strchr(opt, ':'); if (cp) defpredictor = atoi(cp+1); defcompression = COMPRESSION_LZW; } else if (strneq(opt, "zip", 3)) { processZIPOptions(opt); defcompression = COMPRESSION_ADOBE_DEFLATE; } else if (strneq(opt, "lzma", 4)) { processZIPOptions(opt); defcompression = COMPRESSION_LZMA; } else if (strneq(opt, "jbig", 4)) { defcompression = COMPRESSION_JBIG; } else if (strneq(opt, "sgilog", 6)) { defcompression = COMPRESSION_SGILOG; } else return (0); return (1); } char* stuff[] = { "usage: tiffcp [options] input... output", "where options are:", " -a append to output instead of overwriting", " -o offset set initial directory offset", " -p contig pack samples contiguously (e.g. RGBRGB...)", " -p separate store samples separately (e.g. RRR...GGG...BBB...)", " -s write output in strips", " -t write output in tiles", " -x force the merged tiff pages in sequence", " -8 write BigTIFF instead of default ClassicTIFF", " -B write big-endian instead of native byte order", " -L write little-endian instead of native byte order", " -M disable use of memory-mapped files", " -C disable strip chopping", " -i ignore read errors", " -b file[,#] bias (dark) monochrome image to be subtracted from all others", " -,=% use % rather than , to separate image #'s (per Note below)", "", " -r # make each strip have no more than # rows", " -w # set output tile width (pixels)", " -l # set output tile length (pixels)", "", " -f lsb2msb force lsb-to-msb FillOrder for output", " -f msb2lsb force msb-to-lsb FillOrder for output", "", " -c lzw[:opts] compress output with Lempel-Ziv & Welch encoding", " -c zip[:opts] compress output with deflate encoding", " -c lzma[:opts] compress output with LZMA2 encoding", " -c jpeg[:opts] compress output with JPEG encoding", " -c jbig compress output with ISO JBIG encoding", " -c packbits compress output with packbits encoding", " -c g3[:opts] compress output with CCITT Group 3 encoding", " -c g4 compress output with CCITT Group 4 encoding", " -c sgilog compress output with SGILOG encoding", " -c none use no compression algorithm on output", "", "Group 3 options:", " 1d use default CCITT Group 3 1D-encoding", " 2d use optional CCITT Group 3 2D-encoding", " fill byte-align EOL codes", "For example, -c g3:2d:fill to get G3-2D-encoded data with byte-aligned EOLs", "", "JPEG options:", " # set compression quality level (0-100, default 75)", " r output color image as RGB rather than YCbCr", "For example, -c jpeg:r:50 to get JPEG-encoded RGB data with 50% comp. quality", "", "LZW, Deflate (ZIP) and LZMA2 options:", " # set predictor value", " p# set compression level (preset)", "For example, -c lzw:2 to get LZW-encoded data with horizontal differencing,", "-c zip:3:p9 for Deflate encoding with maximum compression level and floating", "point predictor.", "", "Note that input filenames may be of the form filename,x,y,z", "where x, y, and z specify image numbers in the filename to copy.", "example: tiffcp -c none -b esp.tif,1 esp.tif,0 test.tif", " subtract 2nd image in esp.tif from 1st yielding uncompressed result test.tif", NULL }; static void usage(void) { char buf[BUFSIZ]; int i; setbuf(stderr, buf); fprintf(stderr, "%s\n\n", TIFFGetVersion()); for (i = 0; stuff[i] != NULL; i++) fprintf(stderr, "%s\n", stuff[i]); exit(-1); } #define CopyField(tag, v) \ if (TIFFGetField(in, tag, &v)) TIFFSetField(out, tag, v) #define CopyField2(tag, v1, v2) \ if (TIFFGetField(in, tag, &v1, &v2)) TIFFSetField(out, tag, v1, v2) #define CopyField3(tag, v1, v2, v3) \ if (TIFFGetField(in, tag, &v1, &v2, &v3)) TIFFSetField(out, tag, v1, v2, v3) #define CopyField4(tag, v1, v2, v3, v4) \ if (TIFFGetField(in, tag, &v1, &v2, &v3, &v4)) TIFFSetField(out, tag, v1, v2, v3, v4) static void cpTag(TIFF* in, TIFF* out, uint16 tag, uint16 count, TIFFDataType type) { switch (type) { case TIFF_SHORT: if (count == 1) { uint16 shortv; CopyField(tag, shortv); } else if (count == 2) { uint16 shortv1, shortv2; CopyField2(tag, shortv1, shortv2); } else if (count == 4) { uint16 *tr, *tg, *tb, *ta; CopyField4(tag, tr, tg, tb, ta); } else if (count == (uint16) -1) { uint16 shortv1; uint16* shortav; CopyField2(tag, shortv1, shortav); } break; case TIFF_LONG: { uint32 longv; CopyField(tag, longv); } break; case TIFF_RATIONAL: if (count == 1) { float floatv; CopyField(tag, floatv); } else if (count == (uint16) -1) { float* floatav; CopyField(tag, floatav); } break; case TIFF_ASCII: { char* stringv; CopyField(tag, stringv); } break; case TIFF_DOUBLE: if (count == 1) { double doublev; CopyField(tag, doublev); } else if (count == (uint16) -1) { double* doubleav; CopyField(tag, doubleav); } break; default: TIFFError(TIFFFileName(in), "Data type %d is not supported, tag %d skipped.", tag, type); } } static struct cpTag { uint16 tag; uint16 count; TIFFDataType type; } tags[] = { { TIFFTAG_SUBFILETYPE, 1, TIFF_LONG }, { TIFFTAG_THRESHHOLDING, 1, TIFF_SHORT }, { TIFFTAG_DOCUMENTNAME, 1, TIFF_ASCII }, { TIFFTAG_IMAGEDESCRIPTION, 1, TIFF_ASCII }, { TIFFTAG_MAKE, 1, TIFF_ASCII }, { TIFFTAG_MODEL, 1, TIFF_ASCII }, { TIFFTAG_MINSAMPLEVALUE, 1, TIFF_SHORT }, { TIFFTAG_MAXSAMPLEVALUE, 1, TIFF_SHORT }, { TIFFTAG_XRESOLUTION, 1, TIFF_RATIONAL }, { TIFFTAG_YRESOLUTION, 1, TIFF_RATIONAL }, { TIFFTAG_PAGENAME, 1, TIFF_ASCII }, { TIFFTAG_XPOSITION, 1, TIFF_RATIONAL }, { TIFFTAG_YPOSITION, 1, TIFF_RATIONAL }, { TIFFTAG_RESOLUTIONUNIT, 1, TIFF_SHORT }, { TIFFTAG_SOFTWARE, 1, TIFF_ASCII }, { TIFFTAG_DATETIME, 1, TIFF_ASCII }, { TIFFTAG_ARTIST, 1, TIFF_ASCII }, { TIFFTAG_HOSTCOMPUTER, 1, TIFF_ASCII }, { TIFFTAG_WHITEPOINT, (uint16) -1, TIFF_RATIONAL }, { TIFFTAG_PRIMARYCHROMATICITIES,(uint16) -1,TIFF_RATIONAL }, { TIFFTAG_HALFTONEHINTS, 2, TIFF_SHORT }, { TIFFTAG_INKSET, 1, TIFF_SHORT }, { TIFFTAG_DOTRANGE, 2, TIFF_SHORT }, { TIFFTAG_TARGETPRINTER, 1, TIFF_ASCII }, { TIFFTAG_SAMPLEFORMAT, 1, TIFF_SHORT }, { TIFFTAG_YCBCRCOEFFICIENTS, (uint16) -1,TIFF_RATIONAL }, { TIFFTAG_YCBCRSUBSAMPLING, 2, TIFF_SHORT }, { TIFFTAG_YCBCRPOSITIONING, 1, TIFF_SHORT }, { TIFFTAG_REFERENCEBLACKWHITE, (uint16) -1,TIFF_RATIONAL }, { TIFFTAG_EXTRASAMPLES, (uint16) -1, TIFF_SHORT }, { TIFFTAG_SMINSAMPLEVALUE, 1, TIFF_DOUBLE }, { TIFFTAG_SMAXSAMPLEVALUE, 1, TIFF_DOUBLE }, { TIFFTAG_STONITS, 1, TIFF_DOUBLE }, }; #define NTAGS (sizeof (tags) / sizeof (tags[0])) #define CopyTag(tag, count, type) cpTag(in, out, tag, count, type) typedef int (*copyFunc) (TIFF* in, TIFF* out, uint32 l, uint32 w, uint16 samplesperpixel); static copyFunc pickCopyFunc(TIFF*, TIFF*, uint16, uint16); /* PODD */ static int tiffcp(TIFF* in, TIFF* out) { uint16 bitspersample, samplesperpixel = 1; uint16 input_compression, input_photometric = PHOTOMETRIC_MINISBLACK; copyFunc cf; uint32 width, length; struct cpTag* p; CopyField(TIFFTAG_IMAGEWIDTH, width); CopyField(TIFFTAG_IMAGELENGTH, length); CopyField(TIFFTAG_BITSPERSAMPLE, bitspersample); CopyField(TIFFTAG_SAMPLESPERPIXEL, samplesperpixel); if (compression != (uint16)-1) TIFFSetField(out, TIFFTAG_COMPRESSION, compression); else CopyField(TIFFTAG_COMPRESSION, compression); TIFFGetFieldDefaulted(in, TIFFTAG_COMPRESSION, &input_compression); TIFFGetFieldDefaulted(in, TIFFTAG_PHOTOMETRIC, &input_photometric); if (input_compression == COMPRESSION_JPEG) { /* Force conversion to RGB */ TIFFSetField(in, TIFFTAG_JPEGCOLORMODE, JPEGCOLORMODE_RGB); } else if (input_photometric == PHOTOMETRIC_YCBCR) { /* Otherwise, can't handle subsampled input */ uint16 subsamplinghor,subsamplingver; TIFFGetFieldDefaulted(in, TIFFTAG_YCBCRSUBSAMPLING, &subsamplinghor, &subsamplingver); if (subsamplinghor!=1 || subsamplingver!=1) { fprintf(stderr, "tiffcp: %s: Can't copy/convert subsampled image.\n", TIFFFileName(in)); return FALSE; } } if (compression == COMPRESSION_JPEG) { if (input_photometric == PHOTOMETRIC_RGB && jpegcolormode == JPEGCOLORMODE_RGB) TIFFSetField(out, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_YCBCR); else TIFFSetField(out, TIFFTAG_PHOTOMETRIC, input_photometric); } else if (compression == COMPRESSION_SGILOG || compression == COMPRESSION_SGILOG24) TIFFSetField(out, TIFFTAG_PHOTOMETRIC, samplesperpixel == 1 ? PHOTOMETRIC_LOGL : PHOTOMETRIC_LOGLUV); else if (input_compression == COMPRESSION_JPEG && samplesperpixel == 3 ) { /* RGB conversion was forced above hence the output will be of the same type */ TIFFSetField(out, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_RGB); } else CopyTag(TIFFTAG_PHOTOMETRIC, 1, TIFF_SHORT); if (fillorder != 0) TIFFSetField(out, TIFFTAG_FILLORDER, fillorder); else CopyTag(TIFFTAG_FILLORDER, 1, TIFF_SHORT); /* * Will copy `Orientation' tag from input image */ TIFFGetFieldDefaulted(in, TIFFTAG_ORIENTATION, &orientation); switch (orientation) { case ORIENTATION_BOTRIGHT: case ORIENTATION_RIGHTBOT: /* XXX */ TIFFWarning(TIFFFileName(in), "using bottom-left orientation"); orientation = ORIENTATION_BOTLEFT; /* fall thru... */ case ORIENTATION_LEFTBOT: /* XXX */ case ORIENTATION_BOTLEFT: break; case ORIENTATION_TOPRIGHT: case ORIENTATION_RIGHTTOP: /* XXX */ default: TIFFWarning(TIFFFileName(in), "using top-left orientation"); orientation = ORIENTATION_TOPLEFT; /* fall thru... */ case ORIENTATION_LEFTTOP: /* XXX */ case ORIENTATION_TOPLEFT: break; } TIFFSetField(out, TIFFTAG_ORIENTATION, orientation); /* * Choose tiles/strip for the output image according to * the command line arguments (-tiles, -strips) and the * structure of the input image. */ if (outtiled == -1) outtiled = TIFFIsTiled(in); if (outtiled) { /* * Setup output file's tile width&height. If either * is not specified, use either the value from the * input image or, if nothing is defined, use the * library default. */ if (tilewidth == (uint32) -1) TIFFGetField(in, TIFFTAG_TILEWIDTH, &tilewidth); if (tilelength == (uint32) -1) TIFFGetField(in, TIFFTAG_TILELENGTH, &tilelength); TIFFDefaultTileSize(out, &tilewidth, &tilelength); TIFFSetField(out, TIFFTAG_TILEWIDTH, tilewidth); TIFFSetField(out, TIFFTAG_TILELENGTH, tilelength); } else { /* * RowsPerStrip is left unspecified: use either the * value from the input image or, if nothing is defined, * use the library default. */ if (rowsperstrip == (uint32) 0) { if (!TIFFGetField(in, TIFFTAG_ROWSPERSTRIP, &rowsperstrip)) { rowsperstrip = TIFFDefaultStripSize(out, rowsperstrip); } if (rowsperstrip > length && rowsperstrip != (uint32)-1) rowsperstrip = length; } else if (rowsperstrip == (uint32) -1) rowsperstrip = length; TIFFSetField(out, TIFFTAG_ROWSPERSTRIP, rowsperstrip); } if (config != (uint16) -1) TIFFSetField(out, TIFFTAG_PLANARCONFIG, config); else CopyField(TIFFTAG_PLANARCONFIG, config); if (samplesperpixel <= 4) CopyTag(TIFFTAG_TRANSFERFUNCTION, 4, TIFF_SHORT); CopyTag(TIFFTAG_COLORMAP, 4, TIFF_SHORT); /* SMinSampleValue & SMaxSampleValue */ switch (compression) { case COMPRESSION_JPEG: TIFFSetField(out, TIFFTAG_JPEGQUALITY, quality); TIFFSetField(out, TIFFTAG_JPEGCOLORMODE, jpegcolormode); break; case COMPRESSION_JBIG: CopyTag(TIFFTAG_FAXRECVPARAMS, 1, TIFF_LONG); CopyTag(TIFFTAG_FAXRECVTIME, 1, TIFF_LONG); CopyTag(TIFFTAG_FAXSUBADDRESS, 1, TIFF_ASCII); CopyTag(TIFFTAG_FAXDCS, 1, TIFF_ASCII); break; case COMPRESSION_LZW: case COMPRESSION_ADOBE_DEFLATE: case COMPRESSION_DEFLATE: case COMPRESSION_LZMA: if (predictor != (uint16)-1) TIFFSetField(out, TIFFTAG_PREDICTOR, predictor); else CopyField(TIFFTAG_PREDICTOR, predictor); if (preset != -1) { if (compression == COMPRESSION_ADOBE_DEFLATE || compression == COMPRESSION_DEFLATE) TIFFSetField(out, TIFFTAG_ZIPQUALITY, preset); else if (compression == COMPRESSION_LZMA) TIFFSetField(out, TIFFTAG_LZMAPRESET, preset); } break; case COMPRESSION_CCITTFAX3: case COMPRESSION_CCITTFAX4: if (compression == COMPRESSION_CCITTFAX3) { if (g3opts != (uint32) -1) TIFFSetField(out, TIFFTAG_GROUP3OPTIONS, g3opts); else CopyField(TIFFTAG_GROUP3OPTIONS, g3opts); } else CopyTag(TIFFTAG_GROUP4OPTIONS, 1, TIFF_LONG); CopyTag(TIFFTAG_BADFAXLINES, 1, TIFF_LONG); CopyTag(TIFFTAG_CLEANFAXDATA, 1, TIFF_LONG); CopyTag(TIFFTAG_CONSECUTIVEBADFAXLINES, 1, TIFF_LONG); CopyTag(TIFFTAG_FAXRECVPARAMS, 1, TIFF_LONG); CopyTag(TIFFTAG_FAXRECVTIME, 1, TIFF_LONG); CopyTag(TIFFTAG_FAXSUBADDRESS, 1, TIFF_ASCII); break; } { uint32 len32; void** data; if (TIFFGetField(in, TIFFTAG_ICCPROFILE, &len32, &data)) TIFFSetField(out, TIFFTAG_ICCPROFILE, len32, data); } { uint16 ninks; const char* inknames; if (TIFFGetField(in, TIFFTAG_NUMBEROFINKS, &ninks)) { TIFFSetField(out, TIFFTAG_NUMBEROFINKS, ninks); if (TIFFGetField(in, TIFFTAG_INKNAMES, &inknames)) { int inknameslen = strlen(inknames) + 1; const char* cp = inknames; while (ninks > 1) { cp = strchr(cp, '\0'); cp++; inknameslen += (strlen(cp) + 1); ninks--; } TIFFSetField(out, TIFFTAG_INKNAMES, inknameslen, inknames); } } } { unsigned short pg0, pg1; if (pageInSeq == 1) { if (pageNum < 0) /* only one input file */ { if (TIFFGetField(in, TIFFTAG_PAGENUMBER, &pg0, &pg1)) TIFFSetField(out, TIFFTAG_PAGENUMBER, pg0, pg1); } else TIFFSetField(out, TIFFTAG_PAGENUMBER, pageNum++, 0); } else { if (TIFFGetField(in, TIFFTAG_PAGENUMBER, &pg0, &pg1)) { if (pageNum < 0) /* only one input file */ TIFFSetField(out, TIFFTAG_PAGENUMBER, pg0, pg1); else TIFFSetField(out, TIFFTAG_PAGENUMBER, pageNum++, 0); } } } for (p = tags; p < &tags[NTAGS]; p++) CopyTag(p->tag, p->count, p->type); cf = pickCopyFunc(in, out, bitspersample, samplesperpixel); return (cf ? (*cf)(in, out, length, width, samplesperpixel) : FALSE); } /* * Copy Functions. */ #define DECLAREcpFunc(x) \ static int x(TIFF* in, TIFF* out, \ uint32 imagelength, uint32 imagewidth, tsample_t spp) #define DECLAREreadFunc(x) \ static int x(TIFF* in, \ uint8* buf, uint32 imagelength, uint32 imagewidth, tsample_t spp) typedef int (*readFunc)(TIFF*, uint8*, uint32, uint32, tsample_t); #define DECLAREwriteFunc(x) \ static int x(TIFF* out, \ uint8* buf, uint32 imagelength, uint32 imagewidth, tsample_t spp) typedef int (*writeFunc)(TIFF*, uint8*, uint32, uint32, tsample_t); /* * Contig -> contig by scanline for rows/strip change. */ DECLAREcpFunc(cpContig2ContigByRow) { tsize_t scanlinesize = TIFFScanlineSize(in); tdata_t buf; uint32 row; buf = _TIFFmalloc(scanlinesize); if (!buf) return 0; _TIFFmemset(buf, 0, scanlinesize); (void) imagewidth; (void) spp; for (row = 0; row < imagelength; row++) { if (TIFFReadScanline(in, buf, row, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); goto bad; } if (TIFFWriteScanline(out, buf, row, 0) < 0) { TIFFError(TIFFFileName(out), "Error, can't write scanline %lu", (unsigned long) row); goto bad; } } _TIFFfree(buf); return 1; bad: _TIFFfree(buf); return 0; } typedef void biasFn (void *image, void *bias, uint32 pixels); #define subtract(bits) \ static void subtract##bits (void *i, void *b, uint32 pixels)\ {\ uint##bits *image = i;\ uint##bits *bias = b;\ while (pixels--) {\ *image = *image > *bias ? *image-*bias : 0;\ image++, bias++; \ } \ } subtract(8) subtract(16) subtract(32) static biasFn *lineSubtractFn (unsigned bits) { switch (bits) { case 8: return subtract8; case 16: return subtract16; case 32: return subtract32; } return NULL; } /* * Contig -> contig by scanline while subtracting a bias image. */ DECLAREcpFunc(cpBiasedContig2Contig) { if (spp == 1) { tsize_t biasSize = TIFFScanlineSize(bias); tsize_t bufSize = TIFFScanlineSize(in); tdata_t buf, biasBuf; uint32 biasWidth = 0, biasLength = 0; TIFFGetField(bias, TIFFTAG_IMAGEWIDTH, &biasWidth); TIFFGetField(bias, TIFFTAG_IMAGELENGTH, &biasLength); if (biasSize == bufSize && imagelength == biasLength && imagewidth == biasWidth) { uint16 sampleBits = 0; biasFn *subtractLine; TIFFGetField(in, TIFFTAG_BITSPERSAMPLE, &sampleBits); subtractLine = lineSubtractFn (sampleBits); if (subtractLine) { uint32 row; buf = _TIFFmalloc(bufSize); biasBuf = _TIFFmalloc(bufSize); for (row = 0; row < imagelength; row++) { if (TIFFReadScanline(in, buf, row, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); goto bad; } if (TIFFReadScanline(bias, biasBuf, row, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read biased scanline %lu", (unsigned long) row); goto bad; } subtractLine (buf, biasBuf, imagewidth); if (TIFFWriteScanline(out, buf, row, 0) < 0) { TIFFError(TIFFFileName(out), "Error, can't write scanline %lu", (unsigned long) row); goto bad; } } _TIFFfree(buf); _TIFFfree(biasBuf); TIFFSetDirectory(bias, TIFFCurrentDirectory(bias)); /* rewind */ return 1; bad: _TIFFfree(buf); _TIFFfree(biasBuf); return 0; } else { TIFFError(TIFFFileName(in), "No support for biasing %d bit pixels\n", sampleBits); return 0; } } TIFFError(TIFFFileName(in), "Bias image %s,%d\nis not the same size as %s,%d\n", TIFFFileName(bias), TIFFCurrentDirectory(bias), TIFFFileName(in), TIFFCurrentDirectory(in)); return 0; } else { TIFFError(TIFFFileName(in), "Can't bias %s,%d as it has >1 Sample/Pixel\n", TIFFFileName(in), TIFFCurrentDirectory(in)); return 0; } } /* * Strip -> strip for change in encoding. */ DECLAREcpFunc(cpDecodedStrips) { tsize_t stripsize = TIFFStripSize(in); tdata_t buf = _TIFFmalloc(stripsize); (void) imagewidth; (void) spp; if (buf) { tstrip_t s, ns = TIFFNumberOfStrips(in); uint32 row = 0; _TIFFmemset(buf, 0, stripsize); for (s = 0; s < ns && row < imagelength; s++) { tsize_t cc = (row + rowsperstrip > imagelength) ? TIFFVStripSize(in, imagelength - row) : stripsize; if (TIFFReadEncodedStrip(in, s, buf, cc) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read strip %lu", (unsigned long) s); goto bad; } if (TIFFWriteEncodedStrip(out, s, buf, cc) < 0) { TIFFError(TIFFFileName(out), "Error, can't write strip %lu", (unsigned long) s); goto bad; } row += rowsperstrip; } _TIFFfree(buf); return 1; } else { TIFFError(TIFFFileName(in), "Error, can't allocate memory buffer of size %lu " "to read strips", (unsigned long) stripsize); return 0; } bad: _TIFFfree(buf); return 0; } /* * Separate -> separate by row for rows/strip change. */ DECLAREcpFunc(cpSeparate2SeparateByRow) { tsize_t scanlinesize = TIFFScanlineSize(in); tdata_t buf; uint32 row; tsample_t s; (void) imagewidth; buf = _TIFFmalloc(scanlinesize); if (!buf) return 0; _TIFFmemset(buf, 0, scanlinesize); for (s = 0; s < spp; s++) { for (row = 0; row < imagelength; row++) { if (TIFFReadScanline(in, buf, row, s) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); goto bad; } if (TIFFWriteScanline(out, buf, row, s) < 0) { TIFFError(TIFFFileName(out), "Error, can't write scanline %lu", (unsigned long) row); goto bad; } } } _TIFFfree(buf); return 1; bad: _TIFFfree(buf); return 0; } /* * Contig -> separate by row. */ DECLAREcpFunc(cpContig2SeparateByRow) { tsize_t scanlinesizein = TIFFScanlineSize(in); tsize_t scanlinesizeout = TIFFScanlineSize(out); tdata_t inbuf; tdata_t outbuf; register uint8 *inp, *outp; register uint32 n; uint32 row; tsample_t s; inbuf = _TIFFmalloc(scanlinesizein); outbuf = _TIFFmalloc(scanlinesizeout); if (!inbuf || !outbuf) goto bad; _TIFFmemset(inbuf, 0, scanlinesizein); _TIFFmemset(outbuf, 0, scanlinesizeout); /* unpack channels */ for (s = 0; s < spp; s++) { for (row = 0; row < imagelength; row++) { if (TIFFReadScanline(in, inbuf, row, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); goto bad; } inp = ((uint8*)inbuf) + s; outp = (uint8*)outbuf; for (n = imagewidth; n-- > 0;) { *outp++ = *inp; inp += spp; } if (TIFFWriteScanline(out, outbuf, row, s) < 0) { TIFFError(TIFFFileName(out), "Error, can't write scanline %lu", (unsigned long) row); goto bad; } } } if (inbuf) _TIFFfree(inbuf); if (outbuf) _TIFFfree(outbuf); return 1; bad: if (inbuf) _TIFFfree(inbuf); if (outbuf) _TIFFfree(outbuf); return 0; } /* * Separate -> contig by row. */ DECLAREcpFunc(cpSeparate2ContigByRow) { tsize_t scanlinesizein = TIFFScanlineSize(in); tsize_t scanlinesizeout = TIFFScanlineSize(out); tdata_t inbuf; tdata_t outbuf; register uint8 *inp, *outp; register uint32 n; uint32 row; tsample_t s; inbuf = _TIFFmalloc(scanlinesizein); outbuf = _TIFFmalloc(scanlinesizeout); if (!inbuf || !outbuf) goto bad; _TIFFmemset(inbuf, 0, scanlinesizein); _TIFFmemset(outbuf, 0, scanlinesizeout); for (row = 0; row < imagelength; row++) { /* merge channels */ for (s = 0; s < spp; s++) { if (TIFFReadScanline(in, inbuf, row, s) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); goto bad; } inp = (uint8*)inbuf; outp = ((uint8*)outbuf) + s; for (n = imagewidth; n-- > 0;) { *outp = *inp++; outp += spp; } } if (TIFFWriteScanline(out, outbuf, row, 0) < 0) { TIFFError(TIFFFileName(out), "Error, can't write scanline %lu", (unsigned long) row); goto bad; } } if (inbuf) _TIFFfree(inbuf); if (outbuf) _TIFFfree(outbuf); return 1; bad: if (inbuf) _TIFFfree(inbuf); if (outbuf) _TIFFfree(outbuf); return 0; } static void cpStripToTile(uint8* out, uint8* in, uint32 rows, uint32 cols, int outskew, int64 inskew) { while (rows-- > 0) { uint32 j = cols; while (j-- > 0) *out++ = *in++; out += outskew; in += inskew; } } static void cpContigBufToSeparateBuf(uint8* out, uint8* in, uint32 rows, uint32 cols, int outskew, int inskew, tsample_t spp, int bytes_per_sample ) { while (rows-- > 0) { uint32 j = cols; while (j-- > 0) { int n = bytes_per_sample; while( n-- ) { *out++ = *in++; } in += (spp-1) * bytes_per_sample; } out += outskew; in += inskew; } } static void cpSeparateBufToContigBuf(uint8* out, uint8* in, uint32 rows, uint32 cols, int outskew, int inskew, tsample_t spp, int bytes_per_sample) { while (rows-- > 0) { uint32 j = cols; while (j-- > 0) { int n = bytes_per_sample; while( n-- ) { *out++ = *in++; } out += (spp-1)*bytes_per_sample; } out += outskew; in += inskew; } } static int cpImage(TIFF* in, TIFF* out, readFunc fin, writeFunc fout, uint32 imagelength, uint32 imagewidth, tsample_t spp) { int status = 0; tdata_t buf = NULL; tsize_t scanlinesize = TIFFRasterScanlineSize(in); tsize_t bytes = scanlinesize * (tsize_t)imagelength; /* * XXX: Check for integer overflow. */ if (scanlinesize && imagelength && bytes / (tsize_t)imagelength == scanlinesize) { buf = _TIFFmalloc(bytes); if (buf) { if ((*fin)(in, (uint8*)buf, imagelength, imagewidth, spp)) { status = (*fout)(out, (uint8*)buf, imagelength, imagewidth, spp); } _TIFFfree(buf); } else { TIFFError(TIFFFileName(in), "Error, can't allocate space for image buffer"); } } else { TIFFError(TIFFFileName(in), "Error, no space for image buffer"); } return status; } DECLAREreadFunc(readContigStripsIntoBuffer) { tsize_t scanlinesize = TIFFScanlineSize(in); uint8* bufp = buf; uint32 row; (void) imagewidth; (void) spp; for (row = 0; row < imagelength; row++) { if (TIFFReadScanline(in, (tdata_t) bufp, row, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); return 0; } bufp += scanlinesize; } return 1; } DECLAREreadFunc(readSeparateStripsIntoBuffer) { int status = 1; tsize_t scanlinesize = TIFFScanlineSize(in); tdata_t scanline; if (!scanlinesize) return 0; scanline = _TIFFmalloc(scanlinesize); if (!scanline) return 0; _TIFFmemset(scanline, 0, scanlinesize); (void) imagewidth; if (scanline) { uint8* bufp = (uint8*) buf; uint32 row; tsample_t s; for (row = 0; row < imagelength; row++) { /* merge channels */ for (s = 0; s < spp; s++) { uint8* bp = bufp + s; tsize_t n = scanlinesize; uint8* sbuf = scanline; if (TIFFReadScanline(in, scanline, row, s) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); status = 0; goto done; } while (n-- > 0) *bp = *sbuf++, bp += spp; } bufp += scanlinesize * spp; } } done: _TIFFfree(scanline); return status; } DECLAREreadFunc(readContigTilesIntoBuffer) { int status = 1; tsize_t tilesize = TIFFTileSize(in); tdata_t tilebuf; uint32 imagew = TIFFScanlineSize(in); uint32 tilew = TIFFTileRowSize(in); int64 iskew = (int64)imagew - (int64)tilew; uint8* bufp = (uint8*) buf; uint32 tw, tl; uint32 row; (void) spp; tilebuf = _TIFFmalloc(tilesize); if (tilebuf == 0) return 0; _TIFFmemset(tilebuf, 0, tilesize); (void) TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw); (void) TIFFGetField(in, TIFFTAG_TILELENGTH, &tl); for (row = 0; row < imagelength; row += tl) { uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl; uint32 colb = 0; uint32 col; for (col = 0; col < imagewidth && colb < imagew; col += tw) { if (TIFFReadTile(in, tilebuf, col, row, 0, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read tile at %lu %lu", (unsigned long) col, (unsigned long) row); status = 0; goto done; } if (colb > iskew) { uint32 width = imagew - colb; uint32 oskew = tilew - width; cpStripToTile(bufp + colb, tilebuf, nrow, width, oskew + iskew, oskew ); } else cpStripToTile(bufp + colb, tilebuf, nrow, tilew, iskew, 0); colb += tilew; } bufp += imagew * nrow; } done: _TIFFfree(tilebuf); return status; } DECLAREreadFunc(readSeparateTilesIntoBuffer) { int status = 1; uint32 imagew = TIFFRasterScanlineSize(in); uint32 tilew = TIFFTileRowSize(in); int iskew = imagew - tilew*spp; tsize_t tilesize = TIFFTileSize(in); tdata_t tilebuf; uint8* bufp = (uint8*) buf; uint32 tw, tl; uint32 row; uint16 bps = 0, bytes_per_sample; tilebuf = _TIFFmalloc(tilesize); if (tilebuf == 0) return 0; _TIFFmemset(tilebuf, 0, tilesize); (void) TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw); (void) TIFFGetField(in, TIFFTAG_TILELENGTH, &tl); (void) TIFFGetField(in, TIFFTAG_BITSPERSAMPLE, &bps); if( bps == 0 ) { TIFFError(TIFFFileName(in), "Error, cannot read BitsPerSample"); status = 0; goto done; } if( (bps % 8) != 0 ) { TIFFError(TIFFFileName(in), "Error, cannot handle BitsPerSample that is not a multiple of 8"); status = 0; goto done; } bytes_per_sample = bps/8; for (row = 0; row < imagelength; row += tl) { uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl; uint32 colb = 0; uint32 col; for (col = 0; col < imagewidth; col += tw) { tsample_t s; for (s = 0; s < spp; s++) { if (TIFFReadTile(in, tilebuf, col, row, 0, s) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read tile at %lu %lu, " "sample %lu", (unsigned long) col, (unsigned long) row, (unsigned long) s); status = 0; goto done; } /* * Tile is clipped horizontally. Calculate * visible portion and skewing factors. */ if (colb + tilew*spp > imagew) { uint32 width = imagew - colb; int oskew = tilew*spp - width; cpSeparateBufToContigBuf( bufp+colb+s*bytes_per_sample, tilebuf, nrow, width/(spp*bytes_per_sample), oskew + iskew, oskew/spp, spp, bytes_per_sample); } else cpSeparateBufToContigBuf( bufp+colb+s*bytes_per_sample, tilebuf, nrow, tw, iskew, 0, spp, bytes_per_sample); } colb += tilew*spp; } bufp += imagew * nrow; } done: _TIFFfree(tilebuf); return status; } DECLAREwriteFunc(writeBufferToContigStrips) { uint32 row, rowsperstrip; tstrip_t strip = 0; (void) imagewidth; (void) spp; (void) TIFFGetFieldDefaulted(out, TIFFTAG_ROWSPERSTRIP, &rowsperstrip); for (row = 0; row < imagelength; row += rowsperstrip) { uint32 nrows = (row+rowsperstrip > imagelength) ? imagelength-row : rowsperstrip; tsize_t stripsize = TIFFVStripSize(out, nrows); if (TIFFWriteEncodedStrip(out, strip++, buf, stripsize) < 0) { TIFFError(TIFFFileName(out), "Error, can't write strip %u", strip - 1); return 0; } buf += stripsize; } return 1; } DECLAREwriteFunc(writeBufferToSeparateStrips) { uint32 rowsize = imagewidth * spp; uint32 rowsperstrip; tsize_t stripsize = TIFFStripSize(out); tdata_t obuf; tstrip_t strip = 0; tsample_t s; obuf = _TIFFmalloc(stripsize); if (obuf == NULL) return (0); _TIFFmemset(obuf, 0, stripsize); (void) TIFFGetFieldDefaulted(out, TIFFTAG_ROWSPERSTRIP, &rowsperstrip); for (s = 0; s < spp; s++) { uint32 row; for (row = 0; row < imagelength; row += rowsperstrip) { uint32 nrows = (row+rowsperstrip > imagelength) ? imagelength-row : rowsperstrip; tsize_t stripsize = TIFFVStripSize(out, nrows); cpContigBufToSeparateBuf( obuf, (uint8*) buf + row*rowsize + s, nrows, imagewidth, 0, 0, spp, 1); if (TIFFWriteEncodedStrip(out, strip++, obuf, stripsize) < 0) { TIFFError(TIFFFileName(out), "Error, can't write strip %u", strip - 1); _TIFFfree(obuf); return 0; } } } _TIFFfree(obuf); return 1; } DECLAREwriteFunc(writeBufferToContigTiles) { uint32 imagew = TIFFScanlineSize(out); uint32 tilew = TIFFTileRowSize(out); int iskew = imagew - tilew; tsize_t tilesize = TIFFTileSize(out); tdata_t obuf; uint8* bufp = (uint8*) buf; uint32 tl, tw; uint32 row; (void) spp; obuf = _TIFFmalloc(TIFFTileSize(out)); if (obuf == NULL) return 0; _TIFFmemset(obuf, 0, tilesize); (void) TIFFGetField(out, TIFFTAG_TILELENGTH, &tl); (void) TIFFGetField(out, TIFFTAG_TILEWIDTH, &tw); for (row = 0; row < imagelength; row += tilelength) { uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl; uint32 colb = 0; uint32 col; for (col = 0; col < imagewidth && colb < imagew; col += tw) { /* * Tile is clipped horizontally. Calculate * visible portion and skewing factors. */ if (colb + tilew > imagew) { uint32 width = imagew - colb; int oskew = tilew - width; cpStripToTile(obuf, bufp + colb, nrow, width, oskew, oskew + iskew); } else cpStripToTile(obuf, bufp + colb, nrow, tilew, 0, iskew); if (TIFFWriteTile(out, obuf, col, row, 0, 0) < 0) { TIFFError(TIFFFileName(out), "Error, can't write tile at %lu %lu", (unsigned long) col, (unsigned long) row); _TIFFfree(obuf); return 0; } colb += tilew; } bufp += nrow * imagew; } _TIFFfree(obuf); return 1; } DECLAREwriteFunc(writeBufferToSeparateTiles) { uint32 imagew = TIFFScanlineSize(out); tsize_t tilew = TIFFTileRowSize(out); uint32 iimagew = TIFFRasterScanlineSize(out); int iskew = iimagew - tilew*spp; tsize_t tilesize = TIFFTileSize(out); tdata_t obuf; uint8* bufp = (uint8*) buf; uint32 tl, tw; uint32 row; uint16 bps = 0, bytes_per_sample; obuf = _TIFFmalloc(TIFFTileSize(out)); if (obuf == NULL) return 0; _TIFFmemset(obuf, 0, tilesize); (void) TIFFGetField(out, TIFFTAG_TILELENGTH, &tl); (void) TIFFGetField(out, TIFFTAG_TILEWIDTH, &tw); (void) TIFFGetField(out, TIFFTAG_BITSPERSAMPLE, &bps); if( bps == 0 ) { TIFFError(TIFFFileName(out), "Error, cannot read BitsPerSample"); _TIFFfree(obuf); return 0; } if( (bps % 8) != 0 ) { TIFFError(TIFFFileName(out), "Error, cannot handle BitsPerSample that is not a multiple of 8"); _TIFFfree(obuf); return 0; } bytes_per_sample = bps/8; for (row = 0; row < imagelength; row += tl) { uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl; uint32 colb = 0; uint32 col; for (col = 0; col < imagewidth; col += tw) { tsample_t s; for (s = 0; s < spp; s++) { /* * Tile is clipped horizontally. Calculate * visible portion and skewing factors. */ if (colb + tilew > imagew) { uint32 width = (imagew - colb); int oskew = tilew - width; cpContigBufToSeparateBuf(obuf, bufp + (colb*spp) + s, nrow, width/bytes_per_sample, oskew, (oskew*spp)+iskew, spp, bytes_per_sample); } else cpContigBufToSeparateBuf(obuf, bufp + (colb*spp) + s, nrow, tilewidth, 0, iskew, spp, bytes_per_sample); if (TIFFWriteTile(out, obuf, col, row, 0, s) < 0) { TIFFError(TIFFFileName(out), "Error, can't write tile at %lu %lu " "sample %lu", (unsigned long) col, (unsigned long) row, (unsigned long) s); _TIFFfree(obuf); return 0; } } colb += tilew; } bufp += nrow * iimagew; } _TIFFfree(obuf); return 1; } /* * Contig strips -> contig tiles. */ DECLAREcpFunc(cpContigStrips2ContigTiles) { return cpImage(in, out, readContigStripsIntoBuffer, writeBufferToContigTiles, imagelength, imagewidth, spp); } /* * Contig strips -> separate tiles. */ DECLAREcpFunc(cpContigStrips2SeparateTiles) { return cpImage(in, out, readContigStripsIntoBuffer, writeBufferToSeparateTiles, imagelength, imagewidth, spp); } /* * Separate strips -> contig tiles. */ DECLAREcpFunc(cpSeparateStrips2ContigTiles) { return cpImage(in, out, readSeparateStripsIntoBuffer, writeBufferToContigTiles, imagelength, imagewidth, spp); } /* * Separate strips -> separate tiles. */ DECLAREcpFunc(cpSeparateStrips2SeparateTiles) { return cpImage(in, out, readSeparateStripsIntoBuffer, writeBufferToSeparateTiles, imagelength, imagewidth, spp); } /* * Contig strips -> contig tiles. */ DECLAREcpFunc(cpContigTiles2ContigTiles) { return cpImage(in, out, readContigTilesIntoBuffer, writeBufferToContigTiles, imagelength, imagewidth, spp); } /* * Contig tiles -> separate tiles. */ DECLAREcpFunc(cpContigTiles2SeparateTiles) { return cpImage(in, out, readContigTilesIntoBuffer, writeBufferToSeparateTiles, imagelength, imagewidth, spp); } /* * Separate tiles -> contig tiles. */ DECLAREcpFunc(cpSeparateTiles2ContigTiles) { return cpImage(in, out, readSeparateTilesIntoBuffer, writeBufferToContigTiles, imagelength, imagewidth, spp); } /* * Separate tiles -> separate tiles (tile dimension change). */ DECLAREcpFunc(cpSeparateTiles2SeparateTiles) { return cpImage(in, out, readSeparateTilesIntoBuffer, writeBufferToSeparateTiles, imagelength, imagewidth, spp); } /* * Contig tiles -> contig tiles (tile dimension change). */ DECLAREcpFunc(cpContigTiles2ContigStrips) { return cpImage(in, out, readContigTilesIntoBuffer, writeBufferToContigStrips, imagelength, imagewidth, spp); } /* * Contig tiles -> separate strips. */ DECLAREcpFunc(cpContigTiles2SeparateStrips) { return cpImage(in, out, readContigTilesIntoBuffer, writeBufferToSeparateStrips, imagelength, imagewidth, spp); } /* * Separate tiles -> contig strips. */ DECLAREcpFunc(cpSeparateTiles2ContigStrips) { return cpImage(in, out, readSeparateTilesIntoBuffer, writeBufferToContigStrips, imagelength, imagewidth, spp); } /* * Separate tiles -> separate strips. */ DECLAREcpFunc(cpSeparateTiles2SeparateStrips) { return cpImage(in, out, readSeparateTilesIntoBuffer, writeBufferToSeparateStrips, imagelength, imagewidth, spp); } /* * Select the appropriate copy function to use. */ static copyFunc pickCopyFunc(TIFF* in, TIFF* out, uint16 bitspersample, uint16 samplesperpixel) { uint16 shortv; uint32 w, l, tw, tl; int bychunk; (void) TIFFGetField(in, TIFFTAG_PLANARCONFIG, &shortv); if (shortv != config && bitspersample != 8 && samplesperpixel > 1) { fprintf(stderr, "%s: Cannot handle different planar configuration w/ bits/sample != 8\n", TIFFFileName(in)); return (NULL); } TIFFGetField(in, TIFFTAG_IMAGEWIDTH, &w); TIFFGetField(in, TIFFTAG_IMAGELENGTH, &l); if (!(TIFFIsTiled(out) || TIFFIsTiled(in))) { uint32 irps = (uint32) -1L; TIFFGetField(in, TIFFTAG_ROWSPERSTRIP, &irps); /* if biased, force decoded copying to allow image subtraction */ bychunk = !bias && (rowsperstrip == irps); }else{ /* either in or out is tiled */ if (bias) { fprintf(stderr, "%s: Cannot handle tiled configuration w/bias image\n", TIFFFileName(in)); return (NULL); } if (TIFFIsTiled(out)) { if (!TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw)) tw = w; if (!TIFFGetField(in, TIFFTAG_TILELENGTH, &tl)) tl = l; bychunk = (tw == tilewidth && tl == tilelength); } else { /* out's not, so in must be tiled */ TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw); TIFFGetField(in, TIFFTAG_TILELENGTH, &tl); bychunk = (tw == w && tl == rowsperstrip); } } #define T 1 #define F 0 #define pack(a,b,c,d,e) ((long)(((a)<<11)|((b)<<3)|((c)<<2)|((d)<<1)|(e))) switch(pack(shortv,config,TIFFIsTiled(in),TIFFIsTiled(out),bychunk)) { /* Strips -> Tiles */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,T,T): return cpContigStrips2ContigTiles; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,T,T): return cpContigStrips2SeparateTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,T,T): return cpSeparateStrips2ContigTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,T,T): return cpSeparateStrips2SeparateTiles; /* Tiles -> Tiles */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,T,T): return cpContigTiles2ContigTiles; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,T,T): return cpContigTiles2SeparateTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,T,T): return cpSeparateTiles2ContigTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,T,T): return cpSeparateTiles2SeparateTiles; /* Tiles -> Strips */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,F,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,F,T): return cpContigTiles2ContigStrips; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,F,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,F,T): return cpContigTiles2SeparateStrips; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,F,T): return cpSeparateTiles2ContigStrips; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,F,T): return cpSeparateTiles2SeparateStrips; /* Strips -> Strips */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,F,F): return bias ? cpBiasedContig2Contig : cpContig2ContigByRow; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,F,T): return cpDecodedStrips; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,F,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,F,T): return cpContig2SeparateByRow; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,F,T): return cpSeparate2ContigByRow; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,F,T): return cpSeparate2SeparateByRow; } #undef pack #undef F #undef T fprintf(stderr, "tiffcp: %s: Don't know how to copy/convert image.\n", TIFFFileName(in)); return (NULL); } /* vim: set ts=8 sts=8 sw=8 noet: */ /* * Local Variables: * mode: c * c-basic-offset: 8 * fill-column: 78 * End: */
/* $Id$ */ /* * Copyright (c) 1988-1997 Sam Leffler * Copyright (c) 1991-1997 Silicon Graphics, Inc. * * Revised: 2/18/01 BAR -- added syntax for extracting single images from * multi-image TIFF files. * * New syntax is: sourceFileName,image# * * image# ranges from 0..<n-1> where n is the # of images in the file. * There may be no white space between the comma and the filename or * image number. * * Example: tiffcp source.tif,1 destination.tif * * Copies the 2nd image in source.tif to the destination. * ***** * Permission to use, copy, modify, distribute, and sell this software and * its documentation for any purpose is hereby granted without fee, provided * that (i) the above copyright notices and this permission notice appear in * all copies of the software and related documentation, and (ii) the names of * Sam Leffler and Silicon Graphics may not be used in any advertising or * publicity relating to the software without the specific, prior written * permission of Sam Leffler and Silicon Graphics. * * THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND, * EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. * * IN NO EVENT SHALL SAM LEFFLER OR SILICON GRAPHICS BE LIABLE FOR * ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, * OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF * LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ #include "tif_config.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #ifdef HAVE_UNISTD_H # include <unistd.h> #endif #include "tiffio.h" #ifndef HAVE_GETOPT extern int getopt(int, char**, char*); #endif #if defined(VMS) # define unlink delete #endif #define streq(a,b) (strcmp(a,b) == 0) #define strneq(a,b,n) (strncmp(a,b,n) == 0) #define TRUE 1 #define FALSE 0 static int outtiled = -1; static uint32 tilewidth; static uint32 tilelength; static uint16 config; static uint16 compression; static uint16 predictor; static int preset; static uint16 fillorder; static uint16 orientation; static uint32 rowsperstrip; static uint32 g3opts; static int ignore = FALSE; /* if true, ignore read errors */ static uint32 defg3opts = (uint32) -1; static int quality = 75; /* JPEG quality */ static int jpegcolormode = JPEGCOLORMODE_RGB; static uint16 defcompression = (uint16) -1; static uint16 defpredictor = (uint16) -1; static int defpreset = -1; static int tiffcp(TIFF*, TIFF*); static int processCompressOptions(char*); static void usage(void); static char comma = ','; /* (default) comma separator character */ static TIFF* bias = NULL; static int pageNum = 0; static int pageInSeq = 0; static int nextSrcImage (TIFF *tif, char **imageSpec) /* seek to the next image specified in *imageSpec returns 1 if success, 0 if no more images to process *imageSpec=NULL if subsequent images should be processed in sequence */ { if (**imageSpec == comma) { /* if not @comma, we've done all images */ char *start = *imageSpec + 1; tdir_t nextImage = (tdir_t)strtol(start, imageSpec, 0); if (start == *imageSpec) nextImage = TIFFCurrentDirectory (tif); if (**imageSpec) { if (**imageSpec == comma) { /* a trailing comma denotes remaining images in sequence */ if ((*imageSpec)[1] == '\0') *imageSpec = NULL; }else{ fprintf (stderr, "Expected a %c separated image # list after %s\n", comma, TIFFFileName (tif)); exit (-4); /* syntax error */ } } if (TIFFSetDirectory (tif, nextImage)) return 1; fprintf (stderr, "%s%c%d not found!\n", TIFFFileName(tif), comma, (int) nextImage); } return 0; } static TIFF* openSrcImage (char **imageSpec) /* imageSpec points to a pointer to a filename followed by optional ,image#'s Open the TIFF file and assign *imageSpec to either NULL if there are no images specified, or a pointer to the next image number text */ { TIFF *tif; char *fn = *imageSpec; *imageSpec = strchr (fn, comma); if (*imageSpec) { /* there is at least one image number specifier */ **imageSpec = '\0'; tif = TIFFOpen (fn, "r"); /* but, ignore any single trailing comma */ if (!(*imageSpec)[1]) {*imageSpec = NULL; return tif;} if (tif) { **imageSpec = comma; /* replace the comma */ if (!nextSrcImage(tif, imageSpec)) { TIFFClose (tif); tif = NULL; } } }else tif = TIFFOpen (fn, "r"); return tif; } int main(int argc, char* argv[]) { uint16 defconfig = (uint16) -1; uint16 deffillorder = 0; uint32 deftilewidth = (uint32) -1; uint32 deftilelength = (uint32) -1; uint32 defrowsperstrip = (uint32) 0; uint64 diroff = 0; TIFF* in; TIFF* out; char mode[10]; char* mp = mode; int c; #if !HAVE_DECL_OPTARG extern int optind; extern char* optarg; #endif *mp++ = 'w'; *mp = '\0'; while ((c = getopt(argc, argv, ",:b:c:f:l:o:p:r:w:aistBLMC8x")) != -1) switch (c) { case ',': if (optarg[0] != '=') usage(); comma = optarg[1]; break; case 'b': /* this file is bias image subtracted from others */ if (bias) { fputs ("Only 1 bias image may be specified\n", stderr); exit (-2); } { uint16 samples = (uint16) -1; char **biasFn = &optarg; bias = openSrcImage (biasFn); if (!bias) exit (-5); if (TIFFIsTiled (bias)) { fputs ("Bias image must be organized in strips\n", stderr); exit (-7); } TIFFGetField(bias, TIFFTAG_SAMPLESPERPIXEL, &samples); if (samples != 1) { fputs ("Bias image must be monochrome\n", stderr); exit (-7); } } break; case 'a': /* append to output */ mode[0] = 'a'; break; case 'c': /* compression scheme */ if (!processCompressOptions(optarg)) usage(); break; case 'f': /* fill order */ if (streq(optarg, "lsb2msb")) deffillorder = FILLORDER_LSB2MSB; else if (streq(optarg, "msb2lsb")) deffillorder = FILLORDER_MSB2LSB; else usage(); break; case 'i': /* ignore errors */ ignore = TRUE; break; case 'l': /* tile length */ outtiled = TRUE; deftilelength = atoi(optarg); break; case 'o': /* initial directory offset */ diroff = strtoul(optarg, NULL, 0); break; case 'p': /* planar configuration */ if (streq(optarg, "separate")) defconfig = PLANARCONFIG_SEPARATE; else if (streq(optarg, "contig")) defconfig = PLANARCONFIG_CONTIG; else usage(); break; case 'r': /* rows/strip */ defrowsperstrip = atol(optarg); break; case 's': /* generate stripped output */ outtiled = FALSE; break; case 't': /* generate tiled output */ outtiled = TRUE; break; case 'w': /* tile width */ outtiled = TRUE; deftilewidth = atoi(optarg); break; case 'B': *mp++ = 'b'; *mp = '\0'; break; case 'L': *mp++ = 'l'; *mp = '\0'; break; case 'M': *mp++ = 'm'; *mp = '\0'; break; case 'C': *mp++ = 'c'; *mp = '\0'; break; case '8': *mp++ = '8'; *mp = '\0'; break; case 'x': pageInSeq = 1; break; case '?': usage(); /*NOTREACHED*/ } if (argc - optind < 2) usage(); out = TIFFOpen(argv[argc-1], mode); if (out == NULL) return (-2); if ((argc - optind) == 2) pageNum = -1; for (; optind < argc-1 ; optind++) { char *imageCursor = argv[optind]; in = openSrcImage (&imageCursor); if (in == NULL) { (void) TIFFClose(out); return (-3); } if (diroff != 0 && !TIFFSetSubDirectory(in, diroff)) { TIFFError(TIFFFileName(in), "Error, setting subdirectory at " TIFF_UINT64_FORMAT, diroff); (void) TIFFClose(in); (void) TIFFClose(out); return (1); } for (;;) { config = defconfig; compression = defcompression; predictor = defpredictor; preset = defpreset; fillorder = deffillorder; rowsperstrip = defrowsperstrip; tilewidth = deftilewidth; tilelength = deftilelength; g3opts = defg3opts; if (!tiffcp(in, out) || !TIFFWriteDirectory(out)) { (void) TIFFClose(in); (void) TIFFClose(out); return (1); } if (imageCursor) { /* seek next image directory */ if (!nextSrcImage(in, &imageCursor)) break; }else if (!TIFFReadDirectory(in)) break; } (void) TIFFClose(in); } (void) TIFFClose(out); return (0); } static void processZIPOptions(char* cp) { if ( (cp = strchr(cp, ':')) ) { do { cp++; if (isdigit((int)*cp)) defpredictor = atoi(cp); else if (*cp == 'p') defpreset = atoi(++cp); else usage(); } while( (cp = strchr(cp, ':')) ); } } static void processG3Options(char* cp) { if( (cp = strchr(cp, ':')) ) { if (defg3opts == (uint32) -1) defg3opts = 0; do { cp++; if (strneq(cp, "1d", 2)) defg3opts &= ~GROUP3OPT_2DENCODING; else if (strneq(cp, "2d", 2)) defg3opts |= GROUP3OPT_2DENCODING; else if (strneq(cp, "fill", 4)) defg3opts |= GROUP3OPT_FILLBITS; else usage(); } while( (cp = strchr(cp, ':')) ); } } static int processCompressOptions(char* opt) { if (streq(opt, "none")) { defcompression = COMPRESSION_NONE; } else if (streq(opt, "packbits")) { defcompression = COMPRESSION_PACKBITS; } else if (strneq(opt, "jpeg", 4)) { char* cp = strchr(opt, ':'); defcompression = COMPRESSION_JPEG; while( cp ) { if (isdigit((int)cp[1])) quality = atoi(cp+1); else if (cp[1] == 'r' ) jpegcolormode = JPEGCOLORMODE_RAW; else usage(); cp = strchr(cp+1,':'); } } else if (strneq(opt, "g3", 2)) { processG3Options(opt); defcompression = COMPRESSION_CCITTFAX3; } else if (streq(opt, "g4")) { defcompression = COMPRESSION_CCITTFAX4; } else if (strneq(opt, "lzw", 3)) { char* cp = strchr(opt, ':'); if (cp) defpredictor = atoi(cp+1); defcompression = COMPRESSION_LZW; } else if (strneq(opt, "zip", 3)) { processZIPOptions(opt); defcompression = COMPRESSION_ADOBE_DEFLATE; } else if (strneq(opt, "lzma", 4)) { processZIPOptions(opt); defcompression = COMPRESSION_LZMA; } else if (strneq(opt, "jbig", 4)) { defcompression = COMPRESSION_JBIG; } else if (strneq(opt, "sgilog", 6)) { defcompression = COMPRESSION_SGILOG; } else return (0); return (1); } char* stuff[] = { "usage: tiffcp [options] input... output", "where options are:", " -a append to output instead of overwriting", " -o offset set initial directory offset", " -p contig pack samples contiguously (e.g. RGBRGB...)", " -p separate store samples separately (e.g. RRR...GGG...BBB...)", " -s write output in strips", " -t write output in tiles", " -x force the merged tiff pages in sequence", " -8 write BigTIFF instead of default ClassicTIFF", " -B write big-endian instead of native byte order", " -L write little-endian instead of native byte order", " -M disable use of memory-mapped files", " -C disable strip chopping", " -i ignore read errors", " -b file[,#] bias (dark) monochrome image to be subtracted from all others", " -,=% use % rather than , to separate image #'s (per Note below)", "", " -r # make each strip have no more than # rows", " -w # set output tile width (pixels)", " -l # set output tile length (pixels)", "", " -f lsb2msb force lsb-to-msb FillOrder for output", " -f msb2lsb force msb-to-lsb FillOrder for output", "", " -c lzw[:opts] compress output with Lempel-Ziv & Welch encoding", " -c zip[:opts] compress output with deflate encoding", " -c lzma[:opts] compress output with LZMA2 encoding", " -c jpeg[:opts] compress output with JPEG encoding", " -c jbig compress output with ISO JBIG encoding", " -c packbits compress output with packbits encoding", " -c g3[:opts] compress output with CCITT Group 3 encoding", " -c g4 compress output with CCITT Group 4 encoding", " -c sgilog compress output with SGILOG encoding", " -c none use no compression algorithm on output", "", "Group 3 options:", " 1d use default CCITT Group 3 1D-encoding", " 2d use optional CCITT Group 3 2D-encoding", " fill byte-align EOL codes", "For example, -c g3:2d:fill to get G3-2D-encoded data with byte-aligned EOLs", "", "JPEG options:", " # set compression quality level (0-100, default 75)", " r output color image as RGB rather than YCbCr", "For example, -c jpeg:r:50 to get JPEG-encoded RGB data with 50% comp. quality", "", "LZW, Deflate (ZIP) and LZMA2 options:", " # set predictor value", " p# set compression level (preset)", "For example, -c lzw:2 to get LZW-encoded data with horizontal differencing,", "-c zip:3:p9 for Deflate encoding with maximum compression level and floating", "point predictor.", "", "Note that input filenames may be of the form filename,x,y,z", "where x, y, and z specify image numbers in the filename to copy.", "example: tiffcp -c none -b esp.tif,1 esp.tif,0 test.tif", " subtract 2nd image in esp.tif from 1st yielding uncompressed result test.tif", NULL }; static void usage(void) { char buf[BUFSIZ]; int i; setbuf(stderr, buf); fprintf(stderr, "%s\n\n", TIFFGetVersion()); for (i = 0; stuff[i] != NULL; i++) fprintf(stderr, "%s\n", stuff[i]); exit(-1); } #define CopyField(tag, v) \ if (TIFFGetField(in, tag, &v)) TIFFSetField(out, tag, v) #define CopyField2(tag, v1, v2) \ if (TIFFGetField(in, tag, &v1, &v2)) TIFFSetField(out, tag, v1, v2) #define CopyField3(tag, v1, v2, v3) \ if (TIFFGetField(in, tag, &v1, &v2, &v3)) TIFFSetField(out, tag, v1, v2, v3) #define CopyField4(tag, v1, v2, v3, v4) \ if (TIFFGetField(in, tag, &v1, &v2, &v3, &v4)) TIFFSetField(out, tag, v1, v2, v3, v4) static void cpTag(TIFF* in, TIFF* out, uint16 tag, uint16 count, TIFFDataType type) { switch (type) { case TIFF_SHORT: if (count == 1) { uint16 shortv; CopyField(tag, shortv); } else if (count == 2) { uint16 shortv1, shortv2; CopyField2(tag, shortv1, shortv2); } else if (count == 4) { uint16 *tr, *tg, *tb, *ta; CopyField4(tag, tr, tg, tb, ta); } else if (count == (uint16) -1) { uint16 shortv1; uint16* shortav; CopyField2(tag, shortv1, shortav); } break; case TIFF_LONG: { uint32 longv; CopyField(tag, longv); } break; case TIFF_RATIONAL: if (count == 1) { float floatv; CopyField(tag, floatv); } else if (count == (uint16) -1) { float* floatav; CopyField(tag, floatav); } break; case TIFF_ASCII: { char* stringv; CopyField(tag, stringv); } break; case TIFF_DOUBLE: if (count == 1) { double doublev; CopyField(tag, doublev); } else if (count == (uint16) -1) { double* doubleav; CopyField(tag, doubleav); } break; default: TIFFError(TIFFFileName(in), "Data type %d is not supported, tag %d skipped.", tag, type); } } static struct cpTag { uint16 tag; uint16 count; TIFFDataType type; } tags[] = { { TIFFTAG_SUBFILETYPE, 1, TIFF_LONG }, { TIFFTAG_THRESHHOLDING, 1, TIFF_SHORT }, { TIFFTAG_DOCUMENTNAME, 1, TIFF_ASCII }, { TIFFTAG_IMAGEDESCRIPTION, 1, TIFF_ASCII }, { TIFFTAG_MAKE, 1, TIFF_ASCII }, { TIFFTAG_MODEL, 1, TIFF_ASCII }, { TIFFTAG_MINSAMPLEVALUE, 1, TIFF_SHORT }, { TIFFTAG_MAXSAMPLEVALUE, 1, TIFF_SHORT }, { TIFFTAG_XRESOLUTION, 1, TIFF_RATIONAL }, { TIFFTAG_YRESOLUTION, 1, TIFF_RATIONAL }, { TIFFTAG_PAGENAME, 1, TIFF_ASCII }, { TIFFTAG_XPOSITION, 1, TIFF_RATIONAL }, { TIFFTAG_YPOSITION, 1, TIFF_RATIONAL }, { TIFFTAG_RESOLUTIONUNIT, 1, TIFF_SHORT }, { TIFFTAG_SOFTWARE, 1, TIFF_ASCII }, { TIFFTAG_DATETIME, 1, TIFF_ASCII }, { TIFFTAG_ARTIST, 1, TIFF_ASCII }, { TIFFTAG_HOSTCOMPUTER, 1, TIFF_ASCII }, { TIFFTAG_WHITEPOINT, (uint16) -1, TIFF_RATIONAL }, { TIFFTAG_PRIMARYCHROMATICITIES,(uint16) -1,TIFF_RATIONAL }, { TIFFTAG_HALFTONEHINTS, 2, TIFF_SHORT }, { TIFFTAG_INKSET, 1, TIFF_SHORT }, { TIFFTAG_DOTRANGE, 2, TIFF_SHORT }, { TIFFTAG_TARGETPRINTER, 1, TIFF_ASCII }, { TIFFTAG_SAMPLEFORMAT, 1, TIFF_SHORT }, { TIFFTAG_YCBCRCOEFFICIENTS, (uint16) -1,TIFF_RATIONAL }, { TIFFTAG_YCBCRSUBSAMPLING, 2, TIFF_SHORT }, { TIFFTAG_YCBCRPOSITIONING, 1, TIFF_SHORT }, { TIFFTAG_REFERENCEBLACKWHITE, (uint16) -1,TIFF_RATIONAL }, { TIFFTAG_EXTRASAMPLES, (uint16) -1, TIFF_SHORT }, { TIFFTAG_SMINSAMPLEVALUE, 1, TIFF_DOUBLE }, { TIFFTAG_SMAXSAMPLEVALUE, 1, TIFF_DOUBLE }, { TIFFTAG_STONITS, 1, TIFF_DOUBLE }, }; #define NTAGS (sizeof (tags) / sizeof (tags[0])) #define CopyTag(tag, count, type) cpTag(in, out, tag, count, type) typedef int (*copyFunc) (TIFF* in, TIFF* out, uint32 l, uint32 w, uint16 samplesperpixel); static copyFunc pickCopyFunc(TIFF*, TIFF*, uint16, uint16); /* PODD */ static int tiffcp(TIFF* in, TIFF* out) { uint16 bitspersample = 1, samplesperpixel = 1; uint16 input_compression, input_photometric = PHOTOMETRIC_MINISBLACK; copyFunc cf; uint32 width, length; struct cpTag* p; CopyField(TIFFTAG_IMAGEWIDTH, width); CopyField(TIFFTAG_IMAGELENGTH, length); CopyField(TIFFTAG_BITSPERSAMPLE, bitspersample); CopyField(TIFFTAG_SAMPLESPERPIXEL, samplesperpixel); if (compression != (uint16)-1) TIFFSetField(out, TIFFTAG_COMPRESSION, compression); else CopyField(TIFFTAG_COMPRESSION, compression); TIFFGetFieldDefaulted(in, TIFFTAG_COMPRESSION, &input_compression); TIFFGetFieldDefaulted(in, TIFFTAG_PHOTOMETRIC, &input_photometric); if (input_compression == COMPRESSION_JPEG) { /* Force conversion to RGB */ TIFFSetField(in, TIFFTAG_JPEGCOLORMODE, JPEGCOLORMODE_RGB); } else if (input_photometric == PHOTOMETRIC_YCBCR) { /* Otherwise, can't handle subsampled input */ uint16 subsamplinghor,subsamplingver; TIFFGetFieldDefaulted(in, TIFFTAG_YCBCRSUBSAMPLING, &subsamplinghor, &subsamplingver); if (subsamplinghor!=1 || subsamplingver!=1) { fprintf(stderr, "tiffcp: %s: Can't copy/convert subsampled image.\n", TIFFFileName(in)); return FALSE; } } if (compression == COMPRESSION_JPEG) { if (input_photometric == PHOTOMETRIC_RGB && jpegcolormode == JPEGCOLORMODE_RGB) TIFFSetField(out, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_YCBCR); else TIFFSetField(out, TIFFTAG_PHOTOMETRIC, input_photometric); } else if (compression == COMPRESSION_SGILOG || compression == COMPRESSION_SGILOG24) TIFFSetField(out, TIFFTAG_PHOTOMETRIC, samplesperpixel == 1 ? PHOTOMETRIC_LOGL : PHOTOMETRIC_LOGLUV); else if (input_compression == COMPRESSION_JPEG && samplesperpixel == 3 ) { /* RGB conversion was forced above hence the output will be of the same type */ TIFFSetField(out, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_RGB); } else CopyTag(TIFFTAG_PHOTOMETRIC, 1, TIFF_SHORT); if (fillorder != 0) TIFFSetField(out, TIFFTAG_FILLORDER, fillorder); else CopyTag(TIFFTAG_FILLORDER, 1, TIFF_SHORT); /* * Will copy `Orientation' tag from input image */ TIFFGetFieldDefaulted(in, TIFFTAG_ORIENTATION, &orientation); switch (orientation) { case ORIENTATION_BOTRIGHT: case ORIENTATION_RIGHTBOT: /* XXX */ TIFFWarning(TIFFFileName(in), "using bottom-left orientation"); orientation = ORIENTATION_BOTLEFT; /* fall thru... */ case ORIENTATION_LEFTBOT: /* XXX */ case ORIENTATION_BOTLEFT: break; case ORIENTATION_TOPRIGHT: case ORIENTATION_RIGHTTOP: /* XXX */ default: TIFFWarning(TIFFFileName(in), "using top-left orientation"); orientation = ORIENTATION_TOPLEFT; /* fall thru... */ case ORIENTATION_LEFTTOP: /* XXX */ case ORIENTATION_TOPLEFT: break; } TIFFSetField(out, TIFFTAG_ORIENTATION, orientation); /* * Choose tiles/strip for the output image according to * the command line arguments (-tiles, -strips) and the * structure of the input image. */ if (outtiled == -1) outtiled = TIFFIsTiled(in); if (outtiled) { /* * Setup output file's tile width&height. If either * is not specified, use either the value from the * input image or, if nothing is defined, use the * library default. */ if (tilewidth == (uint32) -1) TIFFGetField(in, TIFFTAG_TILEWIDTH, &tilewidth); if (tilelength == (uint32) -1) TIFFGetField(in, TIFFTAG_TILELENGTH, &tilelength); TIFFDefaultTileSize(out, &tilewidth, &tilelength); TIFFSetField(out, TIFFTAG_TILEWIDTH, tilewidth); TIFFSetField(out, TIFFTAG_TILELENGTH, tilelength); } else { /* * RowsPerStrip is left unspecified: use either the * value from the input image or, if nothing is defined, * use the library default. */ if (rowsperstrip == (uint32) 0) { if (!TIFFGetField(in, TIFFTAG_ROWSPERSTRIP, &rowsperstrip)) { rowsperstrip = TIFFDefaultStripSize(out, rowsperstrip); } if (rowsperstrip > length && rowsperstrip != (uint32)-1) rowsperstrip = length; } else if (rowsperstrip == (uint32) -1) rowsperstrip = length; TIFFSetField(out, TIFFTAG_ROWSPERSTRIP, rowsperstrip); } if (config != (uint16) -1) TIFFSetField(out, TIFFTAG_PLANARCONFIG, config); else CopyField(TIFFTAG_PLANARCONFIG, config); if (samplesperpixel <= 4) CopyTag(TIFFTAG_TRANSFERFUNCTION, 4, TIFF_SHORT); CopyTag(TIFFTAG_COLORMAP, 4, TIFF_SHORT); /* SMinSampleValue & SMaxSampleValue */ switch (compression) { case COMPRESSION_JPEG: TIFFSetField(out, TIFFTAG_JPEGQUALITY, quality); TIFFSetField(out, TIFFTAG_JPEGCOLORMODE, jpegcolormode); break; case COMPRESSION_JBIG: CopyTag(TIFFTAG_FAXRECVPARAMS, 1, TIFF_LONG); CopyTag(TIFFTAG_FAXRECVTIME, 1, TIFF_LONG); CopyTag(TIFFTAG_FAXSUBADDRESS, 1, TIFF_ASCII); CopyTag(TIFFTAG_FAXDCS, 1, TIFF_ASCII); break; case COMPRESSION_LZW: case COMPRESSION_ADOBE_DEFLATE: case COMPRESSION_DEFLATE: case COMPRESSION_LZMA: if (predictor != (uint16)-1) TIFFSetField(out, TIFFTAG_PREDICTOR, predictor); else CopyField(TIFFTAG_PREDICTOR, predictor); if (preset != -1) { if (compression == COMPRESSION_ADOBE_DEFLATE || compression == COMPRESSION_DEFLATE) TIFFSetField(out, TIFFTAG_ZIPQUALITY, preset); else if (compression == COMPRESSION_LZMA) TIFFSetField(out, TIFFTAG_LZMAPRESET, preset); } break; case COMPRESSION_CCITTFAX3: case COMPRESSION_CCITTFAX4: if (compression == COMPRESSION_CCITTFAX3) { if (g3opts != (uint32) -1) TIFFSetField(out, TIFFTAG_GROUP3OPTIONS, g3opts); else CopyField(TIFFTAG_GROUP3OPTIONS, g3opts); } else CopyTag(TIFFTAG_GROUP4OPTIONS, 1, TIFF_LONG); CopyTag(TIFFTAG_BADFAXLINES, 1, TIFF_LONG); CopyTag(TIFFTAG_CLEANFAXDATA, 1, TIFF_LONG); CopyTag(TIFFTAG_CONSECUTIVEBADFAXLINES, 1, TIFF_LONG); CopyTag(TIFFTAG_FAXRECVPARAMS, 1, TIFF_LONG); CopyTag(TIFFTAG_FAXRECVTIME, 1, TIFF_LONG); CopyTag(TIFFTAG_FAXSUBADDRESS, 1, TIFF_ASCII); break; } { uint32 len32; void** data; if (TIFFGetField(in, TIFFTAG_ICCPROFILE, &len32, &data)) TIFFSetField(out, TIFFTAG_ICCPROFILE, len32, data); } { uint16 ninks; const char* inknames; if (TIFFGetField(in, TIFFTAG_NUMBEROFINKS, &ninks)) { TIFFSetField(out, TIFFTAG_NUMBEROFINKS, ninks); if (TIFFGetField(in, TIFFTAG_INKNAMES, &inknames)) { int inknameslen = strlen(inknames) + 1; const char* cp = inknames; while (ninks > 1) { cp = strchr(cp, '\0'); cp++; inknameslen += (strlen(cp) + 1); ninks--; } TIFFSetField(out, TIFFTAG_INKNAMES, inknameslen, inknames); } } } { unsigned short pg0, pg1; if (pageInSeq == 1) { if (pageNum < 0) /* only one input file */ { if (TIFFGetField(in, TIFFTAG_PAGENUMBER, &pg0, &pg1)) TIFFSetField(out, TIFFTAG_PAGENUMBER, pg0, pg1); } else TIFFSetField(out, TIFFTAG_PAGENUMBER, pageNum++, 0); } else { if (TIFFGetField(in, TIFFTAG_PAGENUMBER, &pg0, &pg1)) { if (pageNum < 0) /* only one input file */ TIFFSetField(out, TIFFTAG_PAGENUMBER, pg0, pg1); else TIFFSetField(out, TIFFTAG_PAGENUMBER, pageNum++, 0); } } } for (p = tags; p < &tags[NTAGS]; p++) CopyTag(p->tag, p->count, p->type); cf = pickCopyFunc(in, out, bitspersample, samplesperpixel); return (cf ? (*cf)(in, out, length, width, samplesperpixel) : FALSE); } /* * Copy Functions. */ #define DECLAREcpFunc(x) \ static int x(TIFF* in, TIFF* out, \ uint32 imagelength, uint32 imagewidth, tsample_t spp) #define DECLAREreadFunc(x) \ static int x(TIFF* in, \ uint8* buf, uint32 imagelength, uint32 imagewidth, tsample_t spp) typedef int (*readFunc)(TIFF*, uint8*, uint32, uint32, tsample_t); #define DECLAREwriteFunc(x) \ static int x(TIFF* out, \ uint8* buf, uint32 imagelength, uint32 imagewidth, tsample_t spp) typedef int (*writeFunc)(TIFF*, uint8*, uint32, uint32, tsample_t); /* * Contig -> contig by scanline for rows/strip change. */ DECLAREcpFunc(cpContig2ContigByRow) { tsize_t scanlinesize = TIFFScanlineSize(in); tdata_t buf; uint32 row; buf = _TIFFmalloc(scanlinesize); if (!buf) return 0; _TIFFmemset(buf, 0, scanlinesize); (void) imagewidth; (void) spp; for (row = 0; row < imagelength; row++) { if (TIFFReadScanline(in, buf, row, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); goto bad; } if (TIFFWriteScanline(out, buf, row, 0) < 0) { TIFFError(TIFFFileName(out), "Error, can't write scanline %lu", (unsigned long) row); goto bad; } } _TIFFfree(buf); return 1; bad: _TIFFfree(buf); return 0; } typedef void biasFn (void *image, void *bias, uint32 pixels); #define subtract(bits) \ static void subtract##bits (void *i, void *b, uint32 pixels)\ {\ uint##bits *image = i;\ uint##bits *bias = b;\ while (pixels--) {\ *image = *image > *bias ? *image-*bias : 0;\ image++, bias++; \ } \ } subtract(8) subtract(16) subtract(32) static biasFn *lineSubtractFn (unsigned bits) { switch (bits) { case 8: return subtract8; case 16: return subtract16; case 32: return subtract32; } return NULL; } /* * Contig -> contig by scanline while subtracting a bias image. */ DECLAREcpFunc(cpBiasedContig2Contig) { if (spp == 1) { tsize_t biasSize = TIFFScanlineSize(bias); tsize_t bufSize = TIFFScanlineSize(in); tdata_t buf, biasBuf; uint32 biasWidth = 0, biasLength = 0; TIFFGetField(bias, TIFFTAG_IMAGEWIDTH, &biasWidth); TIFFGetField(bias, TIFFTAG_IMAGELENGTH, &biasLength); if (biasSize == bufSize && imagelength == biasLength && imagewidth == biasWidth) { uint16 sampleBits = 0; biasFn *subtractLine; TIFFGetField(in, TIFFTAG_BITSPERSAMPLE, &sampleBits); subtractLine = lineSubtractFn (sampleBits); if (subtractLine) { uint32 row; buf = _TIFFmalloc(bufSize); biasBuf = _TIFFmalloc(bufSize); for (row = 0; row < imagelength; row++) { if (TIFFReadScanline(in, buf, row, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); goto bad; } if (TIFFReadScanline(bias, biasBuf, row, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read biased scanline %lu", (unsigned long) row); goto bad; } subtractLine (buf, biasBuf, imagewidth); if (TIFFWriteScanline(out, buf, row, 0) < 0) { TIFFError(TIFFFileName(out), "Error, can't write scanline %lu", (unsigned long) row); goto bad; } } _TIFFfree(buf); _TIFFfree(biasBuf); TIFFSetDirectory(bias, TIFFCurrentDirectory(bias)); /* rewind */ return 1; bad: _TIFFfree(buf); _TIFFfree(biasBuf); return 0; } else { TIFFError(TIFFFileName(in), "No support for biasing %d bit pixels\n", sampleBits); return 0; } } TIFFError(TIFFFileName(in), "Bias image %s,%d\nis not the same size as %s,%d\n", TIFFFileName(bias), TIFFCurrentDirectory(bias), TIFFFileName(in), TIFFCurrentDirectory(in)); return 0; } else { TIFFError(TIFFFileName(in), "Can't bias %s,%d as it has >1 Sample/Pixel\n", TIFFFileName(in), TIFFCurrentDirectory(in)); return 0; } } /* * Strip -> strip for change in encoding. */ DECLAREcpFunc(cpDecodedStrips) { tsize_t stripsize = TIFFStripSize(in); tdata_t buf = _TIFFmalloc(stripsize); (void) imagewidth; (void) spp; if (buf) { tstrip_t s, ns = TIFFNumberOfStrips(in); uint32 row = 0; _TIFFmemset(buf, 0, stripsize); for (s = 0; s < ns && row < imagelength; s++) { tsize_t cc = (row + rowsperstrip > imagelength) ? TIFFVStripSize(in, imagelength - row) : stripsize; if (TIFFReadEncodedStrip(in, s, buf, cc) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read strip %lu", (unsigned long) s); goto bad; } if (TIFFWriteEncodedStrip(out, s, buf, cc) < 0) { TIFFError(TIFFFileName(out), "Error, can't write strip %lu", (unsigned long) s); goto bad; } row += rowsperstrip; } _TIFFfree(buf); return 1; } else { TIFFError(TIFFFileName(in), "Error, can't allocate memory buffer of size %lu " "to read strips", (unsigned long) stripsize); return 0; } bad: _TIFFfree(buf); return 0; } /* * Separate -> separate by row for rows/strip change. */ DECLAREcpFunc(cpSeparate2SeparateByRow) { tsize_t scanlinesize = TIFFScanlineSize(in); tdata_t buf; uint32 row; tsample_t s; (void) imagewidth; buf = _TIFFmalloc(scanlinesize); if (!buf) return 0; _TIFFmemset(buf, 0, scanlinesize); for (s = 0; s < spp; s++) { for (row = 0; row < imagelength; row++) { if (TIFFReadScanline(in, buf, row, s) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); goto bad; } if (TIFFWriteScanline(out, buf, row, s) < 0) { TIFFError(TIFFFileName(out), "Error, can't write scanline %lu", (unsigned long) row); goto bad; } } } _TIFFfree(buf); return 1; bad: _TIFFfree(buf); return 0; } /* * Contig -> separate by row. */ DECLAREcpFunc(cpContig2SeparateByRow) { tsize_t scanlinesizein = TIFFScanlineSize(in); tsize_t scanlinesizeout = TIFFScanlineSize(out); tdata_t inbuf; tdata_t outbuf; register uint8 *inp, *outp; register uint32 n; uint32 row; tsample_t s; uint16 bps = 0; (void) TIFFGetField(in, TIFFTAG_BITSPERSAMPLE, &bps); if( bps != 8 ) { TIFFError(TIFFFileName(in), "Error, can only handle BitsPerSample=8 in %s", "cpContig2SeparateByRow"); return 0; } inbuf = _TIFFmalloc(scanlinesizein); outbuf = _TIFFmalloc(scanlinesizeout); if (!inbuf || !outbuf) goto bad; _TIFFmemset(inbuf, 0, scanlinesizein); _TIFFmemset(outbuf, 0, scanlinesizeout); /* unpack channels */ for (s = 0; s < spp; s++) { for (row = 0; row < imagelength; row++) { if (TIFFReadScanline(in, inbuf, row, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); goto bad; } inp = ((uint8*)inbuf) + s; outp = (uint8*)outbuf; for (n = imagewidth; n-- > 0;) { *outp++ = *inp; inp += spp; } if (TIFFWriteScanline(out, outbuf, row, s) < 0) { TIFFError(TIFFFileName(out), "Error, can't write scanline %lu", (unsigned long) row); goto bad; } } } if (inbuf) _TIFFfree(inbuf); if (outbuf) _TIFFfree(outbuf); return 1; bad: if (inbuf) _TIFFfree(inbuf); if (outbuf) _TIFFfree(outbuf); return 0; } /* * Separate -> contig by row. */ DECLAREcpFunc(cpSeparate2ContigByRow) { tsize_t scanlinesizein = TIFFScanlineSize(in); tsize_t scanlinesizeout = TIFFScanlineSize(out); tdata_t inbuf; tdata_t outbuf; register uint8 *inp, *outp; register uint32 n; uint32 row; tsample_t s; uint16 bps = 0; (void) TIFFGetField(in, TIFFTAG_BITSPERSAMPLE, &bps); if( bps != 8 ) { TIFFError(TIFFFileName(in), "Error, can only handle BitsPerSample=8 in %s", "cpSeparate2ContigByRow"); return 0; } inbuf = _TIFFmalloc(scanlinesizein); outbuf = _TIFFmalloc(scanlinesizeout); if (!inbuf || !outbuf) goto bad; _TIFFmemset(inbuf, 0, scanlinesizein); _TIFFmemset(outbuf, 0, scanlinesizeout); for (row = 0; row < imagelength; row++) { /* merge channels */ for (s = 0; s < spp; s++) { if (TIFFReadScanline(in, inbuf, row, s) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); goto bad; } inp = (uint8*)inbuf; outp = ((uint8*)outbuf) + s; for (n = imagewidth; n-- > 0;) { *outp = *inp++; outp += spp; } } if (TIFFWriteScanline(out, outbuf, row, 0) < 0) { TIFFError(TIFFFileName(out), "Error, can't write scanline %lu", (unsigned long) row); goto bad; } } if (inbuf) _TIFFfree(inbuf); if (outbuf) _TIFFfree(outbuf); return 1; bad: if (inbuf) _TIFFfree(inbuf); if (outbuf) _TIFFfree(outbuf); return 0; } static void cpStripToTile(uint8* out, uint8* in, uint32 rows, uint32 cols, int outskew, int64 inskew) { while (rows-- > 0) { uint32 j = cols; while (j-- > 0) *out++ = *in++; out += outskew; in += inskew; } } static void cpContigBufToSeparateBuf(uint8* out, uint8* in, uint32 rows, uint32 cols, int outskew, int inskew, tsample_t spp, int bytes_per_sample ) { while (rows-- > 0) { uint32 j = cols; while (j-- > 0) { int n = bytes_per_sample; while( n-- ) { *out++ = *in++; } in += (spp-1) * bytes_per_sample; } out += outskew; in += inskew; } } static void cpSeparateBufToContigBuf(uint8* out, uint8* in, uint32 rows, uint32 cols, int outskew, int inskew, tsample_t spp, int bytes_per_sample) { while (rows-- > 0) { uint32 j = cols; while (j-- > 0) { int n = bytes_per_sample; while( n-- ) { *out++ = *in++; } out += (spp-1)*bytes_per_sample; } out += outskew; in += inskew; } } static int cpImage(TIFF* in, TIFF* out, readFunc fin, writeFunc fout, uint32 imagelength, uint32 imagewidth, tsample_t spp) { int status = 0; tdata_t buf = NULL; tsize_t scanlinesize = TIFFRasterScanlineSize(in); tsize_t bytes = scanlinesize * (tsize_t)imagelength; /* * XXX: Check for integer overflow. */ if (scanlinesize && imagelength && bytes / (tsize_t)imagelength == scanlinesize) { buf = _TIFFmalloc(bytes); if (buf) { if ((*fin)(in, (uint8*)buf, imagelength, imagewidth, spp)) { status = (*fout)(out, (uint8*)buf, imagelength, imagewidth, spp); } _TIFFfree(buf); } else { TIFFError(TIFFFileName(in), "Error, can't allocate space for image buffer"); } } else { TIFFError(TIFFFileName(in), "Error, no space for image buffer"); } return status; } DECLAREreadFunc(readContigStripsIntoBuffer) { tsize_t scanlinesize = TIFFScanlineSize(in); uint8* bufp = buf; uint32 row; (void) imagewidth; (void) spp; for (row = 0; row < imagelength; row++) { if (TIFFReadScanline(in, (tdata_t) bufp, row, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); return 0; } bufp += scanlinesize; } return 1; } DECLAREreadFunc(readSeparateStripsIntoBuffer) { int status = 1; tsize_t scanlinesize = TIFFScanlineSize(in); tdata_t scanline; if (!scanlinesize) return 0; scanline = _TIFFmalloc(scanlinesize); if (!scanline) return 0; _TIFFmemset(scanline, 0, scanlinesize); (void) imagewidth; if (scanline) { uint8* bufp = (uint8*) buf; uint32 row; tsample_t s; for (row = 0; row < imagelength; row++) { /* merge channels */ for (s = 0; s < spp; s++) { uint8* bp = bufp + s; tsize_t n = scanlinesize; uint8* sbuf = scanline; if (TIFFReadScanline(in, scanline, row, s) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read scanline %lu", (unsigned long) row); status = 0; goto done; } while (n-- > 0) *bp = *sbuf++, bp += spp; } bufp += scanlinesize * spp; } } done: _TIFFfree(scanline); return status; } DECLAREreadFunc(readContigTilesIntoBuffer) { int status = 1; tsize_t tilesize = TIFFTileSize(in); tdata_t tilebuf; uint32 imagew = TIFFScanlineSize(in); uint32 tilew = TIFFTileRowSize(in); int64 iskew = (int64)imagew - (int64)tilew; uint8* bufp = (uint8*) buf; uint32 tw, tl; uint32 row; (void) spp; tilebuf = _TIFFmalloc(tilesize); if (tilebuf == 0) return 0; _TIFFmemset(tilebuf, 0, tilesize); (void) TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw); (void) TIFFGetField(in, TIFFTAG_TILELENGTH, &tl); for (row = 0; row < imagelength; row += tl) { uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl; uint32 colb = 0; uint32 col; for (col = 0; col < imagewidth && colb < imagew; col += tw) { if (TIFFReadTile(in, tilebuf, col, row, 0, 0) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read tile at %lu %lu", (unsigned long) col, (unsigned long) row); status = 0; goto done; } if (colb > iskew) { uint32 width = imagew - colb; uint32 oskew = tilew - width; cpStripToTile(bufp + colb, tilebuf, nrow, width, oskew + iskew, oskew ); } else cpStripToTile(bufp + colb, tilebuf, nrow, tilew, iskew, 0); colb += tilew; } bufp += imagew * nrow; } done: _TIFFfree(tilebuf); return status; } DECLAREreadFunc(readSeparateTilesIntoBuffer) { int status = 1; uint32 imagew = TIFFRasterScanlineSize(in); uint32 tilew = TIFFTileRowSize(in); int iskew = imagew - tilew*spp; tsize_t tilesize = TIFFTileSize(in); tdata_t tilebuf; uint8* bufp = (uint8*) buf; uint32 tw, tl; uint32 row; uint16 bps = 0, bytes_per_sample; tilebuf = _TIFFmalloc(tilesize); if (tilebuf == 0) return 0; _TIFFmemset(tilebuf, 0, tilesize); (void) TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw); (void) TIFFGetField(in, TIFFTAG_TILELENGTH, &tl); (void) TIFFGetField(in, TIFFTAG_BITSPERSAMPLE, &bps); if( bps == 0 ) { TIFFError(TIFFFileName(in), "Error, cannot read BitsPerSample"); status = 0; goto done; } if( (bps % 8) != 0 ) { TIFFError(TIFFFileName(in), "Error, cannot handle BitsPerSample that is not a multiple of 8"); status = 0; goto done; } bytes_per_sample = bps/8; for (row = 0; row < imagelength; row += tl) { uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl; uint32 colb = 0; uint32 col; for (col = 0; col < imagewidth; col += tw) { tsample_t s; for (s = 0; s < spp; s++) { if (TIFFReadTile(in, tilebuf, col, row, 0, s) < 0 && !ignore) { TIFFError(TIFFFileName(in), "Error, can't read tile at %lu %lu, " "sample %lu", (unsigned long) col, (unsigned long) row, (unsigned long) s); status = 0; goto done; } /* * Tile is clipped horizontally. Calculate * visible portion and skewing factors. */ if (colb + tilew*spp > imagew) { uint32 width = imagew - colb; int oskew = tilew*spp - width; cpSeparateBufToContigBuf( bufp+colb+s*bytes_per_sample, tilebuf, nrow, width/(spp*bytes_per_sample), oskew + iskew, oskew/spp, spp, bytes_per_sample); } else cpSeparateBufToContigBuf( bufp+colb+s*bytes_per_sample, tilebuf, nrow, tw, iskew, 0, spp, bytes_per_sample); } colb += tilew*spp; } bufp += imagew * nrow; } done: _TIFFfree(tilebuf); return status; } DECLAREwriteFunc(writeBufferToContigStrips) { uint32 row, rowsperstrip; tstrip_t strip = 0; (void) imagewidth; (void) spp; (void) TIFFGetFieldDefaulted(out, TIFFTAG_ROWSPERSTRIP, &rowsperstrip); for (row = 0; row < imagelength; row += rowsperstrip) { uint32 nrows = (row+rowsperstrip > imagelength) ? imagelength-row : rowsperstrip; tsize_t stripsize = TIFFVStripSize(out, nrows); if (TIFFWriteEncodedStrip(out, strip++, buf, stripsize) < 0) { TIFFError(TIFFFileName(out), "Error, can't write strip %u", strip - 1); return 0; } buf += stripsize; } return 1; } DECLAREwriteFunc(writeBufferToSeparateStrips) { uint32 rowsize = imagewidth * spp; uint32 rowsperstrip; tsize_t stripsize = TIFFStripSize(out); tdata_t obuf; tstrip_t strip = 0; tsample_t s; obuf = _TIFFmalloc(stripsize); if (obuf == NULL) return (0); _TIFFmemset(obuf, 0, stripsize); (void) TIFFGetFieldDefaulted(out, TIFFTAG_ROWSPERSTRIP, &rowsperstrip); for (s = 0; s < spp; s++) { uint32 row; for (row = 0; row < imagelength; row += rowsperstrip) { uint32 nrows = (row+rowsperstrip > imagelength) ? imagelength-row : rowsperstrip; tsize_t stripsize = TIFFVStripSize(out, nrows); cpContigBufToSeparateBuf( obuf, (uint8*) buf + row*rowsize + s, nrows, imagewidth, 0, 0, spp, 1); if (TIFFWriteEncodedStrip(out, strip++, obuf, stripsize) < 0) { TIFFError(TIFFFileName(out), "Error, can't write strip %u", strip - 1); _TIFFfree(obuf); return 0; } } } _TIFFfree(obuf); return 1; } DECLAREwriteFunc(writeBufferToContigTiles) { uint32 imagew = TIFFScanlineSize(out); uint32 tilew = TIFFTileRowSize(out); int iskew = imagew - tilew; tsize_t tilesize = TIFFTileSize(out); tdata_t obuf; uint8* bufp = (uint8*) buf; uint32 tl, tw; uint32 row; (void) spp; obuf = _TIFFmalloc(TIFFTileSize(out)); if (obuf == NULL) return 0; _TIFFmemset(obuf, 0, tilesize); (void) TIFFGetField(out, TIFFTAG_TILELENGTH, &tl); (void) TIFFGetField(out, TIFFTAG_TILEWIDTH, &tw); for (row = 0; row < imagelength; row += tilelength) { uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl; uint32 colb = 0; uint32 col; for (col = 0; col < imagewidth && colb < imagew; col += tw) { /* * Tile is clipped horizontally. Calculate * visible portion and skewing factors. */ if (colb + tilew > imagew) { uint32 width = imagew - colb; int oskew = tilew - width; cpStripToTile(obuf, bufp + colb, nrow, width, oskew, oskew + iskew); } else cpStripToTile(obuf, bufp + colb, nrow, tilew, 0, iskew); if (TIFFWriteTile(out, obuf, col, row, 0, 0) < 0) { TIFFError(TIFFFileName(out), "Error, can't write tile at %lu %lu", (unsigned long) col, (unsigned long) row); _TIFFfree(obuf); return 0; } colb += tilew; } bufp += nrow * imagew; } _TIFFfree(obuf); return 1; } DECLAREwriteFunc(writeBufferToSeparateTiles) { uint32 imagew = TIFFScanlineSize(out); tsize_t tilew = TIFFTileRowSize(out); uint32 iimagew = TIFFRasterScanlineSize(out); int iskew = iimagew - tilew*spp; tsize_t tilesize = TIFFTileSize(out); tdata_t obuf; uint8* bufp = (uint8*) buf; uint32 tl, tw; uint32 row; uint16 bps = 0, bytes_per_sample; obuf = _TIFFmalloc(TIFFTileSize(out)); if (obuf == NULL) return 0; _TIFFmemset(obuf, 0, tilesize); (void) TIFFGetField(out, TIFFTAG_TILELENGTH, &tl); (void) TIFFGetField(out, TIFFTAG_TILEWIDTH, &tw); (void) TIFFGetField(out, TIFFTAG_BITSPERSAMPLE, &bps); if( bps == 0 ) { TIFFError(TIFFFileName(out), "Error, cannot read BitsPerSample"); _TIFFfree(obuf); return 0; } if( (bps % 8) != 0 ) { TIFFError(TIFFFileName(out), "Error, cannot handle BitsPerSample that is not a multiple of 8"); _TIFFfree(obuf); return 0; } bytes_per_sample = bps/8; for (row = 0; row < imagelength; row += tl) { uint32 nrow = (row+tl > imagelength) ? imagelength-row : tl; uint32 colb = 0; uint32 col; for (col = 0; col < imagewidth; col += tw) { tsample_t s; for (s = 0; s < spp; s++) { /* * Tile is clipped horizontally. Calculate * visible portion and skewing factors. */ if (colb + tilew > imagew) { uint32 width = (imagew - colb); int oskew = tilew - width; cpContigBufToSeparateBuf(obuf, bufp + (colb*spp) + s, nrow, width/bytes_per_sample, oskew, (oskew*spp)+iskew, spp, bytes_per_sample); } else cpContigBufToSeparateBuf(obuf, bufp + (colb*spp) + s, nrow, tilewidth, 0, iskew, spp, bytes_per_sample); if (TIFFWriteTile(out, obuf, col, row, 0, s) < 0) { TIFFError(TIFFFileName(out), "Error, can't write tile at %lu %lu " "sample %lu", (unsigned long) col, (unsigned long) row, (unsigned long) s); _TIFFfree(obuf); return 0; } } colb += tilew; } bufp += nrow * iimagew; } _TIFFfree(obuf); return 1; } /* * Contig strips -> contig tiles. */ DECLAREcpFunc(cpContigStrips2ContigTiles) { return cpImage(in, out, readContigStripsIntoBuffer, writeBufferToContigTiles, imagelength, imagewidth, spp); } /* * Contig strips -> separate tiles. */ DECLAREcpFunc(cpContigStrips2SeparateTiles) { return cpImage(in, out, readContigStripsIntoBuffer, writeBufferToSeparateTiles, imagelength, imagewidth, spp); } /* * Separate strips -> contig tiles. */ DECLAREcpFunc(cpSeparateStrips2ContigTiles) { return cpImage(in, out, readSeparateStripsIntoBuffer, writeBufferToContigTiles, imagelength, imagewidth, spp); } /* * Separate strips -> separate tiles. */ DECLAREcpFunc(cpSeparateStrips2SeparateTiles) { return cpImage(in, out, readSeparateStripsIntoBuffer, writeBufferToSeparateTiles, imagelength, imagewidth, spp); } /* * Contig strips -> contig tiles. */ DECLAREcpFunc(cpContigTiles2ContigTiles) { return cpImage(in, out, readContigTilesIntoBuffer, writeBufferToContigTiles, imagelength, imagewidth, spp); } /* * Contig tiles -> separate tiles. */ DECLAREcpFunc(cpContigTiles2SeparateTiles) { return cpImage(in, out, readContigTilesIntoBuffer, writeBufferToSeparateTiles, imagelength, imagewidth, spp); } /* * Separate tiles -> contig tiles. */ DECLAREcpFunc(cpSeparateTiles2ContigTiles) { return cpImage(in, out, readSeparateTilesIntoBuffer, writeBufferToContigTiles, imagelength, imagewidth, spp); } /* * Separate tiles -> separate tiles (tile dimension change). */ DECLAREcpFunc(cpSeparateTiles2SeparateTiles) { return cpImage(in, out, readSeparateTilesIntoBuffer, writeBufferToSeparateTiles, imagelength, imagewidth, spp); } /* * Contig tiles -> contig tiles (tile dimension change). */ DECLAREcpFunc(cpContigTiles2ContigStrips) { return cpImage(in, out, readContigTilesIntoBuffer, writeBufferToContigStrips, imagelength, imagewidth, spp); } /* * Contig tiles -> separate strips. */ DECLAREcpFunc(cpContigTiles2SeparateStrips) { return cpImage(in, out, readContigTilesIntoBuffer, writeBufferToSeparateStrips, imagelength, imagewidth, spp); } /* * Separate tiles -> contig strips. */ DECLAREcpFunc(cpSeparateTiles2ContigStrips) { return cpImage(in, out, readSeparateTilesIntoBuffer, writeBufferToContigStrips, imagelength, imagewidth, spp); } /* * Separate tiles -> separate strips. */ DECLAREcpFunc(cpSeparateTiles2SeparateStrips) { return cpImage(in, out, readSeparateTilesIntoBuffer, writeBufferToSeparateStrips, imagelength, imagewidth, spp); } /* * Select the appropriate copy function to use. */ static copyFunc pickCopyFunc(TIFF* in, TIFF* out, uint16 bitspersample, uint16 samplesperpixel) { uint16 shortv; uint32 w, l, tw, tl; int bychunk; (void) TIFFGetFieldDefaulted(in, TIFFTAG_PLANARCONFIG, &shortv); if (shortv != config && bitspersample != 8 && samplesperpixel > 1) { fprintf(stderr, "%s: Cannot handle different planar configuration w/ bits/sample != 8\n", TIFFFileName(in)); return (NULL); } TIFFGetField(in, TIFFTAG_IMAGEWIDTH, &w); TIFFGetField(in, TIFFTAG_IMAGELENGTH, &l); if (!(TIFFIsTiled(out) || TIFFIsTiled(in))) { uint32 irps = (uint32) -1L; TIFFGetField(in, TIFFTAG_ROWSPERSTRIP, &irps); /* if biased, force decoded copying to allow image subtraction */ bychunk = !bias && (rowsperstrip == irps); }else{ /* either in or out is tiled */ if (bias) { fprintf(stderr, "%s: Cannot handle tiled configuration w/bias image\n", TIFFFileName(in)); return (NULL); } if (TIFFIsTiled(out)) { if (!TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw)) tw = w; if (!TIFFGetField(in, TIFFTAG_TILELENGTH, &tl)) tl = l; bychunk = (tw == tilewidth && tl == tilelength); } else { /* out's not, so in must be tiled */ TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw); TIFFGetField(in, TIFFTAG_TILELENGTH, &tl); bychunk = (tw == w && tl == rowsperstrip); } } #define T 1 #define F 0 #define pack(a,b,c,d,e) ((long)(((a)<<11)|((b)<<3)|((c)<<2)|((d)<<1)|(e))) switch(pack(shortv,config,TIFFIsTiled(in),TIFFIsTiled(out),bychunk)) { /* Strips -> Tiles */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,T,T): return cpContigStrips2ContigTiles; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,T,T): return cpContigStrips2SeparateTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,T,T): return cpSeparateStrips2ContigTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,T,T): return cpSeparateStrips2SeparateTiles; /* Tiles -> Tiles */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,T,T): return cpContigTiles2ContigTiles; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,T,T): return cpContigTiles2SeparateTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,T,T): return cpSeparateTiles2ContigTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,T,T): return cpSeparateTiles2SeparateTiles; /* Tiles -> Strips */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,F,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,F,T): return cpContigTiles2ContigStrips; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,F,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,F,T): return cpContigTiles2SeparateStrips; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,F,T): return cpSeparateTiles2ContigStrips; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,F,T): return cpSeparateTiles2SeparateStrips; /* Strips -> Strips */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,F,F): return bias ? cpBiasedContig2Contig : cpContig2ContigByRow; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,F,T): return cpDecodedStrips; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,F,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,F,T): return cpContig2SeparateByRow; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,F,T): return cpSeparate2ContigByRow; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,F,T): return cpSeparate2SeparateByRow; } #undef pack #undef F #undef T fprintf(stderr, "tiffcp: %s: Don't know how to copy/convert image.\n", TIFFFileName(in)); return (NULL); } /* vim: set ts=8 sts=8 sw=8 noet: */ /* * Local Variables: * mode: c * c-basic-offset: 8 * fill-column: 78 * End: */
pickCopyFunc(TIFF* in, TIFF* out, uint16 bitspersample, uint16 samplesperpixel) { uint16 shortv; uint32 w, l, tw, tl; int bychunk; (void) TIFFGetField(in, TIFFTAG_PLANARCONFIG, &shortv); if (shortv != config && bitspersample != 8 && samplesperpixel > 1) { fprintf(stderr, "%s: Cannot handle different planar configuration w/ bits/sample != 8\n", TIFFFileName(in)); return (NULL); } TIFFGetField(in, TIFFTAG_IMAGEWIDTH, &w); TIFFGetField(in, TIFFTAG_IMAGELENGTH, &l); if (!(TIFFIsTiled(out) || TIFFIsTiled(in))) { uint32 irps = (uint32) -1L; TIFFGetField(in, TIFFTAG_ROWSPERSTRIP, &irps); /* if biased, force decoded copying to allow image subtraction */ bychunk = !bias && (rowsperstrip == irps); }else{ /* either in or out is tiled */ if (bias) { fprintf(stderr, "%s: Cannot handle tiled configuration w/bias image\n", TIFFFileName(in)); return (NULL); } if (TIFFIsTiled(out)) { if (!TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw)) tw = w; if (!TIFFGetField(in, TIFFTAG_TILELENGTH, &tl)) tl = l; bychunk = (tw == tilewidth && tl == tilelength); } else { /* out's not, so in must be tiled */ TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw); TIFFGetField(in, TIFFTAG_TILELENGTH, &tl); bychunk = (tw == w && tl == rowsperstrip); } } #define T 1 #define F 0 #define pack(a,b,c,d,e) ((long)(((a)<<11)|((b)<<3)|((c)<<2)|((d)<<1)|(e))) switch(pack(shortv,config,TIFFIsTiled(in),TIFFIsTiled(out),bychunk)) { /* Strips -> Tiles */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,T,T): return cpContigStrips2ContigTiles; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,T,T): return cpContigStrips2SeparateTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,T,T): return cpSeparateStrips2ContigTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,T,T): return cpSeparateStrips2SeparateTiles; /* Tiles -> Tiles */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,T,T): return cpContigTiles2ContigTiles; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,T,T): return cpContigTiles2SeparateTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,T,T): return cpSeparateTiles2ContigTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,T,T): return cpSeparateTiles2SeparateTiles; /* Tiles -> Strips */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,F,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,F,T): return cpContigTiles2ContigStrips; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,F,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,F,T): return cpContigTiles2SeparateStrips; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,F,T): return cpSeparateTiles2ContigStrips; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,F,T): return cpSeparateTiles2SeparateStrips; /* Strips -> Strips */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,F,F): return bias ? cpBiasedContig2Contig : cpContig2ContigByRow; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,F,T): return cpDecodedStrips; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,F,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,F,T): return cpContig2SeparateByRow; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,F,T): return cpSeparate2ContigByRow; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,F,T): return cpSeparate2SeparateByRow; } #undef pack #undef F #undef T fprintf(stderr, "tiffcp: %s: Don't know how to copy/convert image.\n", TIFFFileName(in)); return (NULL); }
pickCopyFunc(TIFF* in, TIFF* out, uint16 bitspersample, uint16 samplesperpixel) { uint16 shortv; uint32 w, l, tw, tl; int bychunk; (void) TIFFGetFieldDefaulted(in, TIFFTAG_PLANARCONFIG, &shortv); if (shortv != config && bitspersample != 8 && samplesperpixel > 1) { fprintf(stderr, "%s: Cannot handle different planar configuration w/ bits/sample != 8\n", TIFFFileName(in)); return (NULL); } TIFFGetField(in, TIFFTAG_IMAGEWIDTH, &w); TIFFGetField(in, TIFFTAG_IMAGELENGTH, &l); if (!(TIFFIsTiled(out) || TIFFIsTiled(in))) { uint32 irps = (uint32) -1L; TIFFGetField(in, TIFFTAG_ROWSPERSTRIP, &irps); /* if biased, force decoded copying to allow image subtraction */ bychunk = !bias && (rowsperstrip == irps); }else{ /* either in or out is tiled */ if (bias) { fprintf(stderr, "%s: Cannot handle tiled configuration w/bias image\n", TIFFFileName(in)); return (NULL); } if (TIFFIsTiled(out)) { if (!TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw)) tw = w; if (!TIFFGetField(in, TIFFTAG_TILELENGTH, &tl)) tl = l; bychunk = (tw == tilewidth && tl == tilelength); } else { /* out's not, so in must be tiled */ TIFFGetField(in, TIFFTAG_TILEWIDTH, &tw); TIFFGetField(in, TIFFTAG_TILELENGTH, &tl); bychunk = (tw == w && tl == rowsperstrip); } } #define T 1 #define F 0 #define pack(a,b,c,d,e) ((long)(((a)<<11)|((b)<<3)|((c)<<2)|((d)<<1)|(e))) switch(pack(shortv,config,TIFFIsTiled(in),TIFFIsTiled(out),bychunk)) { /* Strips -> Tiles */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,T,T): return cpContigStrips2ContigTiles; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,T,T): return cpContigStrips2SeparateTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,T,T): return cpSeparateStrips2ContigTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,T,T): return cpSeparateStrips2SeparateTiles; /* Tiles -> Tiles */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,T,T): return cpContigTiles2ContigTiles; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,T,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,T,T): return cpContigTiles2SeparateTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,T,T): return cpSeparateTiles2ContigTiles; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,T,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,T,T): return cpSeparateTiles2SeparateTiles; /* Tiles -> Strips */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,F,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, T,F,T): return cpContigTiles2ContigStrips; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,F,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, T,F,T): return cpContigTiles2SeparateStrips; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, T,F,T): return cpSeparateTiles2ContigStrips; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, T,F,T): return cpSeparateTiles2SeparateStrips; /* Strips -> Strips */ case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,F,F): return bias ? cpBiasedContig2Contig : cpContig2ContigByRow; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_CONTIG, F,F,T): return cpDecodedStrips; case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,F,F): case pack(PLANARCONFIG_CONTIG, PLANARCONFIG_SEPARATE, F,F,T): return cpContig2SeparateByRow; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_CONTIG, F,F,T): return cpSeparate2ContigByRow; case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,F,F): case pack(PLANARCONFIG_SEPARATE, PLANARCONFIG_SEPARATE, F,F,T): return cpSeparate2SeparateByRow; } #undef pack #undef F #undef T fprintf(stderr, "tiffcp: %s: Don't know how to copy/convert image.\n", TIFFFileName(in)); return (NULL); }
{'added': [(594, '\tuint16 bitspersample = 1, samplesperpixel = 1;'), (1070, ' uint16 bps = 0;'), (1071, ''), (1072, ' (void) TIFFGetField(in, TIFFTAG_BITSPERSAMPLE, &bps);'), (1073, ' if( bps != 8 )'), (1074, ' {'), (1075, ' TIFFError(TIFFFileName(in),'), (1076, ' "Error, can only handle BitsPerSample=8 in %s",'), (1077, ' "cpContig2SeparateByRow");'), (1078, ' return 0;'), (1079, ' }'), (1133, ' uint16 bps = 0;'), (1134, ''), (1135, ' (void) TIFFGetField(in, TIFFTAG_BITSPERSAMPLE, &bps);'), (1136, ' if( bps != 8 )'), (1137, ' {'), (1138, ' TIFFError(TIFFFileName(in),'), (1139, ' "Error, can only handle BitsPerSample=8 in %s",'), (1140, ' "cpSeparate2ContigByRow");'), (1141, ' return 0;'), (1142, ' }'), (1807, '\t(void) TIFFGetFieldDefaulted(in, TIFFTAG_PLANARCONFIG, &shortv);')], 'deleted': [(594, '\tuint16 bitspersample, samplesperpixel = 1;'), (1787, '\t(void) TIFFGetField(in, TIFFTAG_PLANARCONFIG, &shortv);')]}
22
2
1,577
9,168
92
805
46
https://github.com/vadz/libtiff
CVE-2017-5225
CWE-119
2,273
xmlparse.c
C
externalParEntProcessor
/* 69df5be70289a11fb834869ce4a91c23c1d9dd04baffcbd10e86742d149a080c (2.2.7+) __ __ _ ___\ \/ /_ __ __ _| |_ / _ \\ /| '_ \ / _` | __| | __// \| |_) | (_| | |_ \___/_/\_\ .__/ \__,_|\__| |_| XML parser Copyright (c) 1997-2000 Thai Open Source Software Center Ltd Copyright (c) 2000-2017 Expat development team Licensed under the MIT license: Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #if ! defined(_GNU_SOURCE) # define _GNU_SOURCE 1 /* syscall prototype */ #endif #ifdef _WIN32 /* force stdlib to define rand_s() */ # define _CRT_RAND_S #endif #include <stddef.h> #include <string.h> /* memset(), memcpy() */ #include <assert.h> #include <limits.h> /* UINT_MAX */ #include <stdio.h> /* fprintf */ #include <stdlib.h> /* getenv, rand_s */ #ifdef _WIN32 # define getpid GetCurrentProcessId #else # include <sys/time.h> /* gettimeofday() */ # include <sys/types.h> /* getpid() */ # include <unistd.h> /* getpid() */ # include <fcntl.h> /* O_RDONLY */ # include <errno.h> #endif #define XML_BUILDING_EXPAT 1 #ifdef _WIN32 # include "winconfig.h" #elif defined(HAVE_EXPAT_CONFIG_H) # include <expat_config.h> #endif /* ndef _WIN32 */ #include "ascii.h" #include "expat.h" #include "siphash.h" #if defined(HAVE_GETRANDOM) || defined(HAVE_SYSCALL_GETRANDOM) # if defined(HAVE_GETRANDOM) # include <sys/random.h> /* getrandom */ # else # include <unistd.h> /* syscall */ # include <sys/syscall.h> /* SYS_getrandom */ # endif # if ! defined(GRND_NONBLOCK) # define GRND_NONBLOCK 0x0001 # endif /* defined(GRND_NONBLOCK) */ #endif /* defined(HAVE_GETRANDOM) || defined(HAVE_SYSCALL_GETRANDOM) */ #if defined(HAVE_LIBBSD) \ && (defined(HAVE_ARC4RANDOM_BUF) || defined(HAVE_ARC4RANDOM)) # include <bsd/stdlib.h> #endif #if defined(_WIN32) && ! defined(LOAD_LIBRARY_SEARCH_SYSTEM32) # define LOAD_LIBRARY_SEARCH_SYSTEM32 0x00000800 #endif #if ! defined(HAVE_GETRANDOM) && ! defined(HAVE_SYSCALL_GETRANDOM) \ && ! defined(HAVE_ARC4RANDOM_BUF) && ! defined(HAVE_ARC4RANDOM) \ && ! defined(XML_DEV_URANDOM) && ! defined(_WIN32) \ && ! defined(XML_POOR_ENTROPY) # error You do not have support for any sources of high quality entropy \ enabled. For end user security, that is probably not what you want. \ \ Your options include: \ * Linux + glibc >=2.25 (getrandom): HAVE_GETRANDOM, \ * Linux + glibc <2.25 (syscall SYS_getrandom): HAVE_SYSCALL_GETRANDOM, \ * BSD / macOS >=10.7 (arc4random_buf): HAVE_ARC4RANDOM_BUF, \ * BSD / macOS <10.7 (arc4random): HAVE_ARC4RANDOM, \ * libbsd (arc4random_buf): HAVE_ARC4RANDOM_BUF + HAVE_LIBBSD, \ * libbsd (arc4random): HAVE_ARC4RANDOM + HAVE_LIBBSD, \ * Linux / BSD / macOS (/dev/urandom): XML_DEV_URANDOM \ * Windows (rand_s): _WIN32. \ \ If insist on not using any of these, bypass this error by defining \ XML_POOR_ENTROPY; you have been warned. \ \ If you have reasons to patch this detection code away or need changes \ to the build system, please open a bug. Thank you! #endif #ifdef XML_UNICODE # define XML_ENCODE_MAX XML_UTF16_ENCODE_MAX # define XmlConvert XmlUtf16Convert # define XmlGetInternalEncoding XmlGetUtf16InternalEncoding # define XmlGetInternalEncodingNS XmlGetUtf16InternalEncodingNS # define XmlEncode XmlUtf16Encode /* Using pointer subtraction to convert to integer type. */ # define MUST_CONVERT(enc, s) \ (! (enc)->isUtf16 || (((char *)(s) - (char *)NULL) & 1)) typedef unsigned short ICHAR; #else # define XML_ENCODE_MAX XML_UTF8_ENCODE_MAX # define XmlConvert XmlUtf8Convert # define XmlGetInternalEncoding XmlGetUtf8InternalEncoding # define XmlGetInternalEncodingNS XmlGetUtf8InternalEncodingNS # define XmlEncode XmlUtf8Encode # define MUST_CONVERT(enc, s) (! (enc)->isUtf8) typedef char ICHAR; #endif #ifndef XML_NS # define XmlInitEncodingNS XmlInitEncoding # define XmlInitUnknownEncodingNS XmlInitUnknownEncoding # undef XmlGetInternalEncodingNS # define XmlGetInternalEncodingNS XmlGetInternalEncoding # define XmlParseXmlDeclNS XmlParseXmlDecl #endif #ifdef XML_UNICODE # ifdef XML_UNICODE_WCHAR_T # define XML_T(x) (const wchar_t) x # define XML_L(x) L##x # else # define XML_T(x) (const unsigned short)x # define XML_L(x) x # endif #else # define XML_T(x) x # define XML_L(x) x #endif /* Round up n to be a multiple of sz, where sz is a power of 2. */ #define ROUND_UP(n, sz) (((n) + ((sz)-1)) & ~((sz)-1)) /* Do safe (NULL-aware) pointer arithmetic */ #define EXPAT_SAFE_PTR_DIFF(p, q) (((p) && (q)) ? ((p) - (q)) : 0) #include "internal.h" #include "xmltok.h" #include "xmlrole.h" typedef const XML_Char *KEY; typedef struct { KEY name; } NAMED; typedef struct { NAMED **v; unsigned char power; size_t size; size_t used; const XML_Memory_Handling_Suite *mem; } HASH_TABLE; static size_t keylen(KEY s); static void copy_salt_to_sipkey(XML_Parser parser, struct sipkey *key); /* For probing (after a collision) we need a step size relative prime to the hash table size, which is a power of 2. We use double-hashing, since we can calculate a second hash value cheaply by taking those bits of the first hash value that were discarded (masked out) when the table index was calculated: index = hash & mask, where mask = table->size - 1. We limit the maximum step size to table->size / 4 (mask >> 2) and make it odd, since odd numbers are always relative prime to a power of 2. */ #define SECOND_HASH(hash, mask, power) \ ((((hash) & ~(mask)) >> ((power)-1)) & ((mask) >> 2)) #define PROBE_STEP(hash, mask, power) \ ((unsigned char)((SECOND_HASH(hash, mask, power)) | 1)) typedef struct { NAMED **p; NAMED **end; } HASH_TABLE_ITER; #define INIT_TAG_BUF_SIZE 32 /* must be a multiple of sizeof(XML_Char) */ #define INIT_DATA_BUF_SIZE 1024 #define INIT_ATTS_SIZE 16 #define INIT_ATTS_VERSION 0xFFFFFFFF #define INIT_BLOCK_SIZE 1024 #define INIT_BUFFER_SIZE 1024 #define EXPAND_SPARE 24 typedef struct binding { struct prefix *prefix; struct binding *nextTagBinding; struct binding *prevPrefixBinding; const struct attribute_id *attId; XML_Char *uri; int uriLen; int uriAlloc; } BINDING; typedef struct prefix { const XML_Char *name; BINDING *binding; } PREFIX; typedef struct { const XML_Char *str; const XML_Char *localPart; const XML_Char *prefix; int strLen; int uriLen; int prefixLen; } TAG_NAME; /* TAG represents an open element. The name of the element is stored in both the document and API encodings. The memory buffer 'buf' is a separately-allocated memory area which stores the name. During the XML_Parse()/ XMLParseBuffer() when the element is open, the memory for the 'raw' version of the name (in the document encoding) is shared with the document buffer. If the element is open across calls to XML_Parse()/XML_ParseBuffer(), the buffer is re-allocated to contain the 'raw' name as well. A parser re-uses these structures, maintaining a list of allocated TAG objects in a free list. */ typedef struct tag { struct tag *parent; /* parent of this element */ const char *rawName; /* tagName in the original encoding */ int rawNameLength; TAG_NAME name; /* tagName in the API encoding */ char *buf; /* buffer for name components */ char *bufEnd; /* end of the buffer */ BINDING *bindings; } TAG; typedef struct { const XML_Char *name; const XML_Char *textPtr; int textLen; /* length in XML_Chars */ int processed; /* # of processed bytes - when suspended */ const XML_Char *systemId; const XML_Char *base; const XML_Char *publicId; const XML_Char *notation; XML_Bool open; XML_Bool is_param; XML_Bool is_internal; /* true if declared in internal subset outside PE */ } ENTITY; typedef struct { enum XML_Content_Type type; enum XML_Content_Quant quant; const XML_Char *name; int firstchild; int lastchild; int childcnt; int nextsib; } CONTENT_SCAFFOLD; #define INIT_SCAFFOLD_ELEMENTS 32 typedef struct block { struct block *next; int size; XML_Char s[1]; } BLOCK; typedef struct { BLOCK *blocks; BLOCK *freeBlocks; const XML_Char *end; XML_Char *ptr; XML_Char *start; const XML_Memory_Handling_Suite *mem; } STRING_POOL; /* The XML_Char before the name is used to determine whether an attribute has been specified. */ typedef struct attribute_id { XML_Char *name; PREFIX *prefix; XML_Bool maybeTokenized; XML_Bool xmlns; } ATTRIBUTE_ID; typedef struct { const ATTRIBUTE_ID *id; XML_Bool isCdata; const XML_Char *value; } DEFAULT_ATTRIBUTE; typedef struct { unsigned long version; unsigned long hash; const XML_Char *uriName; } NS_ATT; typedef struct { const XML_Char *name; PREFIX *prefix; const ATTRIBUTE_ID *idAtt; int nDefaultAtts; int allocDefaultAtts; DEFAULT_ATTRIBUTE *defaultAtts; } ELEMENT_TYPE; typedef struct { HASH_TABLE generalEntities; HASH_TABLE elementTypes; HASH_TABLE attributeIds; HASH_TABLE prefixes; STRING_POOL pool; STRING_POOL entityValuePool; /* false once a parameter entity reference has been skipped */ XML_Bool keepProcessing; /* true once an internal or external PE reference has been encountered; this includes the reference to an external subset */ XML_Bool hasParamEntityRefs; XML_Bool standalone; #ifdef XML_DTD /* indicates if external PE has been read */ XML_Bool paramEntityRead; HASH_TABLE paramEntities; #endif /* XML_DTD */ PREFIX defaultPrefix; /* === scaffolding for building content model === */ XML_Bool in_eldecl; CONTENT_SCAFFOLD *scaffold; unsigned contentStringLen; unsigned scaffSize; unsigned scaffCount; int scaffLevel; int *scaffIndex; } DTD; typedef struct open_internal_entity { const char *internalEventPtr; const char *internalEventEndPtr; struct open_internal_entity *next; ENTITY *entity; int startTagLevel; XML_Bool betweenDecl; /* WFC: PE Between Declarations */ } OPEN_INTERNAL_ENTITY; typedef enum XML_Error PTRCALL Processor(XML_Parser parser, const char *start, const char *end, const char **endPtr); static Processor prologProcessor; static Processor prologInitProcessor; static Processor contentProcessor; static Processor cdataSectionProcessor; #ifdef XML_DTD static Processor ignoreSectionProcessor; static Processor externalParEntProcessor; static Processor externalParEntInitProcessor; static Processor entityValueProcessor; static Processor entityValueInitProcessor; #endif /* XML_DTD */ static Processor epilogProcessor; static Processor errorProcessor; static Processor externalEntityInitProcessor; static Processor externalEntityInitProcessor2; static Processor externalEntityInitProcessor3; static Processor externalEntityContentProcessor; static Processor internalEntityProcessor; static enum XML_Error handleUnknownEncoding(XML_Parser parser, const XML_Char *encodingName); static enum XML_Error processXmlDecl(XML_Parser parser, int isGeneralTextEntity, const char *s, const char *next); static enum XML_Error initializeEncoding(XML_Parser parser); static enum XML_Error doProlog(XML_Parser parser, const ENCODING *enc, const char *s, const char *end, int tok, const char *next, const char **nextPtr, XML_Bool haveMore); static enum XML_Error processInternalEntity(XML_Parser parser, ENTITY *entity, XML_Bool betweenDecl); static enum XML_Error doContent(XML_Parser parser, int startTagLevel, const ENCODING *enc, const char *start, const char *end, const char **endPtr, XML_Bool haveMore); static enum XML_Error doCdataSection(XML_Parser parser, const ENCODING *, const char **startPtr, const char *end, const char **nextPtr, XML_Bool haveMore); #ifdef XML_DTD static enum XML_Error doIgnoreSection(XML_Parser parser, const ENCODING *, const char **startPtr, const char *end, const char **nextPtr, XML_Bool haveMore); #endif /* XML_DTD */ static void freeBindings(XML_Parser parser, BINDING *bindings); static enum XML_Error storeAtts(XML_Parser parser, const ENCODING *, const char *s, TAG_NAME *tagNamePtr, BINDING **bindingsPtr); static enum XML_Error addBinding(XML_Parser parser, PREFIX *prefix, const ATTRIBUTE_ID *attId, const XML_Char *uri, BINDING **bindingsPtr); static int defineAttribute(ELEMENT_TYPE *type, ATTRIBUTE_ID *, XML_Bool isCdata, XML_Bool isId, const XML_Char *dfltValue, XML_Parser parser); static enum XML_Error storeAttributeValue(XML_Parser parser, const ENCODING *, XML_Bool isCdata, const char *, const char *, STRING_POOL *); static enum XML_Error appendAttributeValue(XML_Parser parser, const ENCODING *, XML_Bool isCdata, const char *, const char *, STRING_POOL *); static ATTRIBUTE_ID *getAttributeId(XML_Parser parser, const ENCODING *enc, const char *start, const char *end); static int setElementTypePrefix(XML_Parser parser, ELEMENT_TYPE *); static enum XML_Error storeEntityValue(XML_Parser parser, const ENCODING *enc, const char *start, const char *end); static int reportProcessingInstruction(XML_Parser parser, const ENCODING *enc, const char *start, const char *end); static int reportComment(XML_Parser parser, const ENCODING *enc, const char *start, const char *end); static void reportDefault(XML_Parser parser, const ENCODING *enc, const char *start, const char *end); static const XML_Char *getContext(XML_Parser parser); static XML_Bool setContext(XML_Parser parser, const XML_Char *context); static void FASTCALL normalizePublicId(XML_Char *s); static DTD *dtdCreate(const XML_Memory_Handling_Suite *ms); /* do not call if m_parentParser != NULL */ static void dtdReset(DTD *p, const XML_Memory_Handling_Suite *ms); static void dtdDestroy(DTD *p, XML_Bool isDocEntity, const XML_Memory_Handling_Suite *ms); static int dtdCopy(XML_Parser oldParser, DTD *newDtd, const DTD *oldDtd, const XML_Memory_Handling_Suite *ms); static int copyEntityTable(XML_Parser oldParser, HASH_TABLE *, STRING_POOL *, const HASH_TABLE *); static NAMED *lookup(XML_Parser parser, HASH_TABLE *table, KEY name, size_t createSize); static void FASTCALL hashTableInit(HASH_TABLE *, const XML_Memory_Handling_Suite *ms); static void FASTCALL hashTableClear(HASH_TABLE *); static void FASTCALL hashTableDestroy(HASH_TABLE *); static void FASTCALL hashTableIterInit(HASH_TABLE_ITER *, const HASH_TABLE *); static NAMED *FASTCALL hashTableIterNext(HASH_TABLE_ITER *); static void FASTCALL poolInit(STRING_POOL *, const XML_Memory_Handling_Suite *ms); static void FASTCALL poolClear(STRING_POOL *); static void FASTCALL poolDestroy(STRING_POOL *); static XML_Char *poolAppend(STRING_POOL *pool, const ENCODING *enc, const char *ptr, const char *end); static XML_Char *poolStoreString(STRING_POOL *pool, const ENCODING *enc, const char *ptr, const char *end); static XML_Bool FASTCALL poolGrow(STRING_POOL *pool); static const XML_Char *FASTCALL poolCopyString(STRING_POOL *pool, const XML_Char *s); static const XML_Char *poolCopyStringN(STRING_POOL *pool, const XML_Char *s, int n); static const XML_Char *FASTCALL poolAppendString(STRING_POOL *pool, const XML_Char *s); static int FASTCALL nextScaffoldPart(XML_Parser parser); static XML_Content *build_model(XML_Parser parser); static ELEMENT_TYPE *getElementType(XML_Parser parser, const ENCODING *enc, const char *ptr, const char *end); static XML_Char *copyString(const XML_Char *s, const XML_Memory_Handling_Suite *memsuite); static unsigned long generate_hash_secret_salt(XML_Parser parser); static XML_Bool startParsing(XML_Parser parser); static XML_Parser parserCreate(const XML_Char *encodingName, const XML_Memory_Handling_Suite *memsuite, const XML_Char *nameSep, DTD *dtd); static void parserInit(XML_Parser parser, const XML_Char *encodingName); #define poolStart(pool) ((pool)->start) #define poolEnd(pool) ((pool)->ptr) #define poolLength(pool) ((pool)->ptr - (pool)->start) #define poolChop(pool) ((void)--(pool->ptr)) #define poolLastChar(pool) (((pool)->ptr)[-1]) #define poolDiscard(pool) ((pool)->ptr = (pool)->start) #define poolFinish(pool) ((pool)->start = (pool)->ptr) #define poolAppendChar(pool, c) \ (((pool)->ptr == (pool)->end && ! poolGrow(pool)) \ ? 0 \ : ((*((pool)->ptr)++ = c), 1)) struct XML_ParserStruct { /* The first member must be m_userData so that the XML_GetUserData macro works. */ void *m_userData; void *m_handlerArg; char *m_buffer; const XML_Memory_Handling_Suite m_mem; /* first character to be parsed */ const char *m_bufferPtr; /* past last character to be parsed */ char *m_bufferEnd; /* allocated end of m_buffer */ const char *m_bufferLim; XML_Index m_parseEndByteIndex; const char *m_parseEndPtr; XML_Char *m_dataBuf; XML_Char *m_dataBufEnd; XML_StartElementHandler m_startElementHandler; XML_EndElementHandler m_endElementHandler; XML_CharacterDataHandler m_characterDataHandler; XML_ProcessingInstructionHandler m_processingInstructionHandler; XML_CommentHandler m_commentHandler; XML_StartCdataSectionHandler m_startCdataSectionHandler; XML_EndCdataSectionHandler m_endCdataSectionHandler; XML_DefaultHandler m_defaultHandler; XML_StartDoctypeDeclHandler m_startDoctypeDeclHandler; XML_EndDoctypeDeclHandler m_endDoctypeDeclHandler; XML_UnparsedEntityDeclHandler m_unparsedEntityDeclHandler; XML_NotationDeclHandler m_notationDeclHandler; XML_StartNamespaceDeclHandler m_startNamespaceDeclHandler; XML_EndNamespaceDeclHandler m_endNamespaceDeclHandler; XML_NotStandaloneHandler m_notStandaloneHandler; XML_ExternalEntityRefHandler m_externalEntityRefHandler; XML_Parser m_externalEntityRefHandlerArg; XML_SkippedEntityHandler m_skippedEntityHandler; XML_UnknownEncodingHandler m_unknownEncodingHandler; XML_ElementDeclHandler m_elementDeclHandler; XML_AttlistDeclHandler m_attlistDeclHandler; XML_EntityDeclHandler m_entityDeclHandler; XML_XmlDeclHandler m_xmlDeclHandler; const ENCODING *m_encoding; INIT_ENCODING m_initEncoding; const ENCODING *m_internalEncoding; const XML_Char *m_protocolEncodingName; XML_Bool m_ns; XML_Bool m_ns_triplets; void *m_unknownEncodingMem; void *m_unknownEncodingData; void *m_unknownEncodingHandlerData; void(XMLCALL *m_unknownEncodingRelease)(void *); PROLOG_STATE m_prologState; Processor *m_processor; enum XML_Error m_errorCode; const char *m_eventPtr; const char *m_eventEndPtr; const char *m_positionPtr; OPEN_INTERNAL_ENTITY *m_openInternalEntities; OPEN_INTERNAL_ENTITY *m_freeInternalEntities; XML_Bool m_defaultExpandInternalEntities; int m_tagLevel; ENTITY *m_declEntity; const XML_Char *m_doctypeName; const XML_Char *m_doctypeSysid; const XML_Char *m_doctypePubid; const XML_Char *m_declAttributeType; const XML_Char *m_declNotationName; const XML_Char *m_declNotationPublicId; ELEMENT_TYPE *m_declElementType; ATTRIBUTE_ID *m_declAttributeId; XML_Bool m_declAttributeIsCdata; XML_Bool m_declAttributeIsId; DTD *m_dtd; const XML_Char *m_curBase; TAG *m_tagStack; TAG *m_freeTagList; BINDING *m_inheritedBindings; BINDING *m_freeBindingList; int m_attsSize; int m_nSpecifiedAtts; int m_idAttIndex; ATTRIBUTE *m_atts; NS_ATT *m_nsAtts; unsigned long m_nsAttsVersion; unsigned char m_nsAttsPower; #ifdef XML_ATTR_INFO XML_AttrInfo *m_attInfo; #endif POSITION m_position; STRING_POOL m_tempPool; STRING_POOL m_temp2Pool; char *m_groupConnector; unsigned int m_groupSize; XML_Char m_namespaceSeparator; XML_Parser m_parentParser; XML_ParsingStatus m_parsingStatus; #ifdef XML_DTD XML_Bool m_isParamEntity; XML_Bool m_useForeignDTD; enum XML_ParamEntityParsing m_paramEntityParsing; #endif unsigned long m_hash_secret_salt; }; #define MALLOC(parser, s) (parser->m_mem.malloc_fcn((s))) #define REALLOC(parser, p, s) (parser->m_mem.realloc_fcn((p), (s))) #define FREE(parser, p) (parser->m_mem.free_fcn((p))) XML_Parser XMLCALL XML_ParserCreate(const XML_Char *encodingName) { return XML_ParserCreate_MM(encodingName, NULL, NULL); } XML_Parser XMLCALL XML_ParserCreateNS(const XML_Char *encodingName, XML_Char nsSep) { XML_Char tmp[2]; *tmp = nsSep; return XML_ParserCreate_MM(encodingName, NULL, tmp); } static const XML_Char implicitContext[] = {ASCII_x, ASCII_m, ASCII_l, ASCII_EQUALS, ASCII_h, ASCII_t, ASCII_t, ASCII_p, ASCII_COLON, ASCII_SLASH, ASCII_SLASH, ASCII_w, ASCII_w, ASCII_w, ASCII_PERIOD, ASCII_w, ASCII_3, ASCII_PERIOD, ASCII_o, ASCII_r, ASCII_g, ASCII_SLASH, ASCII_X, ASCII_M, ASCII_L, ASCII_SLASH, ASCII_1, ASCII_9, ASCII_9, ASCII_8, ASCII_SLASH, ASCII_n, ASCII_a, ASCII_m, ASCII_e, ASCII_s, ASCII_p, ASCII_a, ASCII_c, ASCII_e, '\0'}; /* To avoid warnings about unused functions: */ #if ! defined(HAVE_ARC4RANDOM_BUF) && ! defined(HAVE_ARC4RANDOM) # if defined(HAVE_GETRANDOM) || defined(HAVE_SYSCALL_GETRANDOM) /* Obtain entropy on Linux 3.17+ */ static int writeRandomBytes_getrandom_nonblock(void *target, size_t count) { int success = 0; /* full count bytes written? */ size_t bytesWrittenTotal = 0; const unsigned int getrandomFlags = GRND_NONBLOCK; do { void *const currentTarget = (void *)((char *)target + bytesWrittenTotal); const size_t bytesToWrite = count - bytesWrittenTotal; const int bytesWrittenMore = # if defined(HAVE_GETRANDOM) getrandom(currentTarget, bytesToWrite, getrandomFlags); # else syscall(SYS_getrandom, currentTarget, bytesToWrite, getrandomFlags); # endif if (bytesWrittenMore > 0) { bytesWrittenTotal += bytesWrittenMore; if (bytesWrittenTotal >= count) success = 1; } } while (! success && (errno == EINTR)); return success; } # endif /* defined(HAVE_GETRANDOM) || defined(HAVE_SYSCALL_GETRANDOM) */ # if ! defined(_WIN32) && defined(XML_DEV_URANDOM) /* Extract entropy from /dev/urandom */ static int writeRandomBytes_dev_urandom(void *target, size_t count) { int success = 0; /* full count bytes written? */ size_t bytesWrittenTotal = 0; const int fd = open("/dev/urandom", O_RDONLY); if (fd < 0) { return 0; } do { void *const currentTarget = (void *)((char *)target + bytesWrittenTotal); const size_t bytesToWrite = count - bytesWrittenTotal; const ssize_t bytesWrittenMore = read(fd, currentTarget, bytesToWrite); if (bytesWrittenMore > 0) { bytesWrittenTotal += bytesWrittenMore; if (bytesWrittenTotal >= count) success = 1; } } while (! success && (errno == EINTR)); close(fd); return success; } # endif /* ! defined(_WIN32) && defined(XML_DEV_URANDOM) */ #endif /* ! defined(HAVE_ARC4RANDOM_BUF) && ! defined(HAVE_ARC4RANDOM) */ #if defined(HAVE_ARC4RANDOM) && ! defined(HAVE_ARC4RANDOM_BUF) static void writeRandomBytes_arc4random(void *target, size_t count) { size_t bytesWrittenTotal = 0; while (bytesWrittenTotal < count) { const uint32_t random32 = arc4random(); size_t i = 0; for (; (i < sizeof(random32)) && (bytesWrittenTotal < count); i++, bytesWrittenTotal++) { const uint8_t random8 = (uint8_t)(random32 >> (i * 8)); ((uint8_t *)target)[bytesWrittenTotal] = random8; } } } #endif /* defined(HAVE_ARC4RANDOM) && ! defined(HAVE_ARC4RANDOM_BUF) */ #ifdef _WIN32 /* Obtain entropy on Windows using the rand_s() function which * generates cryptographically secure random numbers. Internally it * uses RtlGenRandom API which is present in Windows XP and later. */ static int writeRandomBytes_rand_s(void *target, size_t count) { size_t bytesWrittenTotal = 0; while (bytesWrittenTotal < count) { unsigned int random32 = 0; size_t i = 0; if (rand_s(&random32)) return 0; /* failure */ for (; (i < sizeof(random32)) && (bytesWrittenTotal < count); i++, bytesWrittenTotal++) { const uint8_t random8 = (uint8_t)(random32 >> (i * 8)); ((uint8_t *)target)[bytesWrittenTotal] = random8; } } return 1; /* success */ } #endif /* _WIN32 */ #if ! defined(HAVE_ARC4RANDOM_BUF) && ! defined(HAVE_ARC4RANDOM) static unsigned long gather_time_entropy(void) { # ifdef _WIN32 FILETIME ft; GetSystemTimeAsFileTime(&ft); /* never fails */ return ft.dwHighDateTime ^ ft.dwLowDateTime; # else struct timeval tv; int gettimeofday_res; gettimeofday_res = gettimeofday(&tv, NULL); # if defined(NDEBUG) (void)gettimeofday_res; # else assert(gettimeofday_res == 0); # endif /* defined(NDEBUG) */ /* Microseconds time is <20 bits entropy */ return tv.tv_usec; # endif } #endif /* ! defined(HAVE_ARC4RANDOM_BUF) && ! defined(HAVE_ARC4RANDOM) */ static unsigned long ENTROPY_DEBUG(const char *label, unsigned long entropy) { const char *const EXPAT_ENTROPY_DEBUG = getenv("EXPAT_ENTROPY_DEBUG"); if (EXPAT_ENTROPY_DEBUG && ! strcmp(EXPAT_ENTROPY_DEBUG, "1")) { fprintf(stderr, "Entropy: %s --> 0x%0*lx (%lu bytes)\n", label, (int)sizeof(entropy) * 2, entropy, (unsigned long)sizeof(entropy)); } return entropy; } static unsigned long generate_hash_secret_salt(XML_Parser parser) { unsigned long entropy; (void)parser; /* "Failproof" high quality providers: */ #if defined(HAVE_ARC4RANDOM_BUF) arc4random_buf(&entropy, sizeof(entropy)); return ENTROPY_DEBUG("arc4random_buf", entropy); #elif defined(HAVE_ARC4RANDOM) writeRandomBytes_arc4random((void *)&entropy, sizeof(entropy)); return ENTROPY_DEBUG("arc4random", entropy); #else /* Try high quality providers first .. */ # ifdef _WIN32 if (writeRandomBytes_rand_s((void *)&entropy, sizeof(entropy))) { return ENTROPY_DEBUG("rand_s", entropy); } # elif defined(HAVE_GETRANDOM) || defined(HAVE_SYSCALL_GETRANDOM) if (writeRandomBytes_getrandom_nonblock((void *)&entropy, sizeof(entropy))) { return ENTROPY_DEBUG("getrandom", entropy); } # endif # if ! defined(_WIN32) && defined(XML_DEV_URANDOM) if (writeRandomBytes_dev_urandom((void *)&entropy, sizeof(entropy))) { return ENTROPY_DEBUG("/dev/urandom", entropy); } # endif /* ! defined(_WIN32) && defined(XML_DEV_URANDOM) */ /* .. and self-made low quality for backup: */ /* Process ID is 0 bits entropy if attacker has local access */ entropy = gather_time_entropy() ^ getpid(); /* Factors are 2^31-1 and 2^61-1 (Mersenne primes M31 and M61) */ if (sizeof(unsigned long) == 4) { return ENTROPY_DEBUG("fallback(4)", entropy * 2147483647); } else { return ENTROPY_DEBUG("fallback(8)", entropy * (unsigned long)2305843009213693951ULL); } #endif } static unsigned long get_hash_secret_salt(XML_Parser parser) { if (parser->m_parentParser != NULL) return get_hash_secret_salt(parser->m_parentParser); return parser->m_hash_secret_salt; } static XML_Bool /* only valid for root parser */ startParsing(XML_Parser parser) { /* hash functions must be initialized before setContext() is called */ if (parser->m_hash_secret_salt == 0) parser->m_hash_secret_salt = generate_hash_secret_salt(parser); if (parser->m_ns) { /* implicit context only set for root parser, since child parsers (i.e. external entity parsers) will inherit it */ return setContext(parser, implicitContext); } return XML_TRUE; } XML_Parser XMLCALL XML_ParserCreate_MM(const XML_Char *encodingName, const XML_Memory_Handling_Suite *memsuite, const XML_Char *nameSep) { return parserCreate(encodingName, memsuite, nameSep, NULL); } static XML_Parser parserCreate(const XML_Char *encodingName, const XML_Memory_Handling_Suite *memsuite, const XML_Char *nameSep, DTD *dtd) { XML_Parser parser; if (memsuite) { XML_Memory_Handling_Suite *mtemp; parser = (XML_Parser)memsuite->malloc_fcn(sizeof(struct XML_ParserStruct)); if (parser != NULL) { mtemp = (XML_Memory_Handling_Suite *)&(parser->m_mem); mtemp->malloc_fcn = memsuite->malloc_fcn; mtemp->realloc_fcn = memsuite->realloc_fcn; mtemp->free_fcn = memsuite->free_fcn; } } else { XML_Memory_Handling_Suite *mtemp; parser = (XML_Parser)malloc(sizeof(struct XML_ParserStruct)); if (parser != NULL) { mtemp = (XML_Memory_Handling_Suite *)&(parser->m_mem); mtemp->malloc_fcn = malloc; mtemp->realloc_fcn = realloc; mtemp->free_fcn = free; } } if (! parser) return parser; parser->m_buffer = NULL; parser->m_bufferLim = NULL; parser->m_attsSize = INIT_ATTS_SIZE; parser->m_atts = (ATTRIBUTE *)MALLOC(parser, parser->m_attsSize * sizeof(ATTRIBUTE)); if (parser->m_atts == NULL) { FREE(parser, parser); return NULL; } #ifdef XML_ATTR_INFO parser->m_attInfo = (XML_AttrInfo *)MALLOC( parser, parser->m_attsSize * sizeof(XML_AttrInfo)); if (parser->m_attInfo == NULL) { FREE(parser, parser->m_atts); FREE(parser, parser); return NULL; } #endif parser->m_dataBuf = (XML_Char *)MALLOC(parser, INIT_DATA_BUF_SIZE * sizeof(XML_Char)); if (parser->m_dataBuf == NULL) { FREE(parser, parser->m_atts); #ifdef XML_ATTR_INFO FREE(parser, parser->m_attInfo); #endif FREE(parser, parser); return NULL; } parser->m_dataBufEnd = parser->m_dataBuf + INIT_DATA_BUF_SIZE; if (dtd) parser->m_dtd = dtd; else { parser->m_dtd = dtdCreate(&parser->m_mem); if (parser->m_dtd == NULL) { FREE(parser, parser->m_dataBuf); FREE(parser, parser->m_atts); #ifdef XML_ATTR_INFO FREE(parser, parser->m_attInfo); #endif FREE(parser, parser); return NULL; } } parser->m_freeBindingList = NULL; parser->m_freeTagList = NULL; parser->m_freeInternalEntities = NULL; parser->m_groupSize = 0; parser->m_groupConnector = NULL; parser->m_unknownEncodingHandler = NULL; parser->m_unknownEncodingHandlerData = NULL; parser->m_namespaceSeparator = ASCII_EXCL; parser->m_ns = XML_FALSE; parser->m_ns_triplets = XML_FALSE; parser->m_nsAtts = NULL; parser->m_nsAttsVersion = 0; parser->m_nsAttsPower = 0; parser->m_protocolEncodingName = NULL; poolInit(&parser->m_tempPool, &(parser->m_mem)); poolInit(&parser->m_temp2Pool, &(parser->m_mem)); parserInit(parser, encodingName); if (encodingName && ! parser->m_protocolEncodingName) { XML_ParserFree(parser); return NULL; } if (nameSep) { parser->m_ns = XML_TRUE; parser->m_internalEncoding = XmlGetInternalEncodingNS(); parser->m_namespaceSeparator = *nameSep; } else { parser->m_internalEncoding = XmlGetInternalEncoding(); } return parser; } static void parserInit(XML_Parser parser, const XML_Char *encodingName) { parser->m_processor = prologInitProcessor; XmlPrologStateInit(&parser->m_prologState); if (encodingName != NULL) { parser->m_protocolEncodingName = copyString(encodingName, &(parser->m_mem)); } parser->m_curBase = NULL; XmlInitEncoding(&parser->m_initEncoding, &parser->m_encoding, 0); parser->m_userData = NULL; parser->m_handlerArg = NULL; parser->m_startElementHandler = NULL; parser->m_endElementHandler = NULL; parser->m_characterDataHandler = NULL; parser->m_processingInstructionHandler = NULL; parser->m_commentHandler = NULL; parser->m_startCdataSectionHandler = NULL; parser->m_endCdataSectionHandler = NULL; parser->m_defaultHandler = NULL; parser->m_startDoctypeDeclHandler = NULL; parser->m_endDoctypeDeclHandler = NULL; parser->m_unparsedEntityDeclHandler = NULL; parser->m_notationDeclHandler = NULL; parser->m_startNamespaceDeclHandler = NULL; parser->m_endNamespaceDeclHandler = NULL; parser->m_notStandaloneHandler = NULL; parser->m_externalEntityRefHandler = NULL; parser->m_externalEntityRefHandlerArg = parser; parser->m_skippedEntityHandler = NULL; parser->m_elementDeclHandler = NULL; parser->m_attlistDeclHandler = NULL; parser->m_entityDeclHandler = NULL; parser->m_xmlDeclHandler = NULL; parser->m_bufferPtr = parser->m_buffer; parser->m_bufferEnd = parser->m_buffer; parser->m_parseEndByteIndex = 0; parser->m_parseEndPtr = NULL; parser->m_declElementType = NULL; parser->m_declAttributeId = NULL; parser->m_declEntity = NULL; parser->m_doctypeName = NULL; parser->m_doctypeSysid = NULL; parser->m_doctypePubid = NULL; parser->m_declAttributeType = NULL; parser->m_declNotationName = NULL; parser->m_declNotationPublicId = NULL; parser->m_declAttributeIsCdata = XML_FALSE; parser->m_declAttributeIsId = XML_FALSE; memset(&parser->m_position, 0, sizeof(POSITION)); parser->m_errorCode = XML_ERROR_NONE; parser->m_eventPtr = NULL; parser->m_eventEndPtr = NULL; parser->m_positionPtr = NULL; parser->m_openInternalEntities = NULL; parser->m_defaultExpandInternalEntities = XML_TRUE; parser->m_tagLevel = 0; parser->m_tagStack = NULL; parser->m_inheritedBindings = NULL; parser->m_nSpecifiedAtts = 0; parser->m_unknownEncodingMem = NULL; parser->m_unknownEncodingRelease = NULL; parser->m_unknownEncodingData = NULL; parser->m_parentParser = NULL; parser->m_parsingStatus.parsing = XML_INITIALIZED; #ifdef XML_DTD parser->m_isParamEntity = XML_FALSE; parser->m_useForeignDTD = XML_FALSE; parser->m_paramEntityParsing = XML_PARAM_ENTITY_PARSING_NEVER; #endif parser->m_hash_secret_salt = 0; } /* moves list of bindings to m_freeBindingList */ static void FASTCALL moveToFreeBindingList(XML_Parser parser, BINDING *bindings) { while (bindings) { BINDING *b = bindings; bindings = bindings->nextTagBinding; b->nextTagBinding = parser->m_freeBindingList; parser->m_freeBindingList = b; } } XML_Bool XMLCALL XML_ParserReset(XML_Parser parser, const XML_Char *encodingName) { TAG *tStk; OPEN_INTERNAL_ENTITY *openEntityList; if (parser == NULL) return XML_FALSE; if (parser->m_parentParser) return XML_FALSE; /* move m_tagStack to m_freeTagList */ tStk = parser->m_tagStack; while (tStk) { TAG *tag = tStk; tStk = tStk->parent; tag->parent = parser->m_freeTagList; moveToFreeBindingList(parser, tag->bindings); tag->bindings = NULL; parser->m_freeTagList = tag; } /* move m_openInternalEntities to m_freeInternalEntities */ openEntityList = parser->m_openInternalEntities; while (openEntityList) { OPEN_INTERNAL_ENTITY *openEntity = openEntityList; openEntityList = openEntity->next; openEntity->next = parser->m_freeInternalEntities; parser->m_freeInternalEntities = openEntity; } moveToFreeBindingList(parser, parser->m_inheritedBindings); FREE(parser, parser->m_unknownEncodingMem); if (parser->m_unknownEncodingRelease) parser->m_unknownEncodingRelease(parser->m_unknownEncodingData); poolClear(&parser->m_tempPool); poolClear(&parser->m_temp2Pool); FREE(parser, (void *)parser->m_protocolEncodingName); parser->m_protocolEncodingName = NULL; parserInit(parser, encodingName); dtdReset(parser->m_dtd, &parser->m_mem); return XML_TRUE; } enum XML_Status XMLCALL XML_SetEncoding(XML_Parser parser, const XML_Char *encodingName) { if (parser == NULL) return XML_STATUS_ERROR; /* Block after XML_Parse()/XML_ParseBuffer() has been called. XXX There's no way for the caller to determine which of the XXX possible error cases caused the XML_STATUS_ERROR return. */ if (parser->m_parsingStatus.parsing == XML_PARSING || parser->m_parsingStatus.parsing == XML_SUSPENDED) return XML_STATUS_ERROR; /* Get rid of any previous encoding name */ FREE(parser, (void *)parser->m_protocolEncodingName); if (encodingName == NULL) /* No new encoding name */ parser->m_protocolEncodingName = NULL; else { /* Copy the new encoding name into allocated memory */ parser->m_protocolEncodingName = copyString(encodingName, &(parser->m_mem)); if (! parser->m_protocolEncodingName) return XML_STATUS_ERROR; } return XML_STATUS_OK; } XML_Parser XMLCALL XML_ExternalEntityParserCreate(XML_Parser oldParser, const XML_Char *context, const XML_Char *encodingName) { XML_Parser parser = oldParser; DTD *newDtd = NULL; DTD *oldDtd; XML_StartElementHandler oldStartElementHandler; XML_EndElementHandler oldEndElementHandler; XML_CharacterDataHandler oldCharacterDataHandler; XML_ProcessingInstructionHandler oldProcessingInstructionHandler; XML_CommentHandler oldCommentHandler; XML_StartCdataSectionHandler oldStartCdataSectionHandler; XML_EndCdataSectionHandler oldEndCdataSectionHandler; XML_DefaultHandler oldDefaultHandler; XML_UnparsedEntityDeclHandler oldUnparsedEntityDeclHandler; XML_NotationDeclHandler oldNotationDeclHandler; XML_StartNamespaceDeclHandler oldStartNamespaceDeclHandler; XML_EndNamespaceDeclHandler oldEndNamespaceDeclHandler; XML_NotStandaloneHandler oldNotStandaloneHandler; XML_ExternalEntityRefHandler oldExternalEntityRefHandler; XML_SkippedEntityHandler oldSkippedEntityHandler; XML_UnknownEncodingHandler oldUnknownEncodingHandler; XML_ElementDeclHandler oldElementDeclHandler; XML_AttlistDeclHandler oldAttlistDeclHandler; XML_EntityDeclHandler oldEntityDeclHandler; XML_XmlDeclHandler oldXmlDeclHandler; ELEMENT_TYPE *oldDeclElementType; void *oldUserData; void *oldHandlerArg; XML_Bool oldDefaultExpandInternalEntities; XML_Parser oldExternalEntityRefHandlerArg; #ifdef XML_DTD enum XML_ParamEntityParsing oldParamEntityParsing; int oldInEntityValue; #endif XML_Bool oldns_triplets; /* Note that the new parser shares the same hash secret as the old parser, so that dtdCopy and copyEntityTable can lookup values from hash tables associated with either parser without us having to worry which hash secrets each table has. */ unsigned long oldhash_secret_salt; /* Validate the oldParser parameter before we pull everything out of it */ if (oldParser == NULL) return NULL; /* Stash the original parser contents on the stack */ oldDtd = parser->m_dtd; oldStartElementHandler = parser->m_startElementHandler; oldEndElementHandler = parser->m_endElementHandler; oldCharacterDataHandler = parser->m_characterDataHandler; oldProcessingInstructionHandler = parser->m_processingInstructionHandler; oldCommentHandler = parser->m_commentHandler; oldStartCdataSectionHandler = parser->m_startCdataSectionHandler; oldEndCdataSectionHandler = parser->m_endCdataSectionHandler; oldDefaultHandler = parser->m_defaultHandler; oldUnparsedEntityDeclHandler = parser->m_unparsedEntityDeclHandler; oldNotationDeclHandler = parser->m_notationDeclHandler; oldStartNamespaceDeclHandler = parser->m_startNamespaceDeclHandler; oldEndNamespaceDeclHandler = parser->m_endNamespaceDeclHandler; oldNotStandaloneHandler = parser->m_notStandaloneHandler; oldExternalEntityRefHandler = parser->m_externalEntityRefHandler; oldSkippedEntityHandler = parser->m_skippedEntityHandler; oldUnknownEncodingHandler = parser->m_unknownEncodingHandler; oldElementDeclHandler = parser->m_elementDeclHandler; oldAttlistDeclHandler = parser->m_attlistDeclHandler; oldEntityDeclHandler = parser->m_entityDeclHandler; oldXmlDeclHandler = parser->m_xmlDeclHandler; oldDeclElementType = parser->m_declElementType; oldUserData = parser->m_userData; oldHandlerArg = parser->m_handlerArg; oldDefaultExpandInternalEntities = parser->m_defaultExpandInternalEntities; oldExternalEntityRefHandlerArg = parser->m_externalEntityRefHandlerArg; #ifdef XML_DTD oldParamEntityParsing = parser->m_paramEntityParsing; oldInEntityValue = parser->m_prologState.inEntityValue; #endif oldns_triplets = parser->m_ns_triplets; /* Note that the new parser shares the same hash secret as the old parser, so that dtdCopy and copyEntityTable can lookup values from hash tables associated with either parser without us having to worry which hash secrets each table has. */ oldhash_secret_salt = parser->m_hash_secret_salt; #ifdef XML_DTD if (! context) newDtd = oldDtd; #endif /* XML_DTD */ /* Note that the magical uses of the pre-processor to make field access look more like C++ require that `parser' be overwritten here. This makes this function more painful to follow than it would be otherwise. */ if (parser->m_ns) { XML_Char tmp[2]; *tmp = parser->m_namespaceSeparator; parser = parserCreate(encodingName, &parser->m_mem, tmp, newDtd); } else { parser = parserCreate(encodingName, &parser->m_mem, NULL, newDtd); } if (! parser) return NULL; parser->m_startElementHandler = oldStartElementHandler; parser->m_endElementHandler = oldEndElementHandler; parser->m_characterDataHandler = oldCharacterDataHandler; parser->m_processingInstructionHandler = oldProcessingInstructionHandler; parser->m_commentHandler = oldCommentHandler; parser->m_startCdataSectionHandler = oldStartCdataSectionHandler; parser->m_endCdataSectionHandler = oldEndCdataSectionHandler; parser->m_defaultHandler = oldDefaultHandler; parser->m_unparsedEntityDeclHandler = oldUnparsedEntityDeclHandler; parser->m_notationDeclHandler = oldNotationDeclHandler; parser->m_startNamespaceDeclHandler = oldStartNamespaceDeclHandler; parser->m_endNamespaceDeclHandler = oldEndNamespaceDeclHandler; parser->m_notStandaloneHandler = oldNotStandaloneHandler; parser->m_externalEntityRefHandler = oldExternalEntityRefHandler; parser->m_skippedEntityHandler = oldSkippedEntityHandler; parser->m_unknownEncodingHandler = oldUnknownEncodingHandler; parser->m_elementDeclHandler = oldElementDeclHandler; parser->m_attlistDeclHandler = oldAttlistDeclHandler; parser->m_entityDeclHandler = oldEntityDeclHandler; parser->m_xmlDeclHandler = oldXmlDeclHandler; parser->m_declElementType = oldDeclElementType; parser->m_userData = oldUserData; if (oldUserData == oldHandlerArg) parser->m_handlerArg = parser->m_userData; else parser->m_handlerArg = parser; if (oldExternalEntityRefHandlerArg != oldParser) parser->m_externalEntityRefHandlerArg = oldExternalEntityRefHandlerArg; parser->m_defaultExpandInternalEntities = oldDefaultExpandInternalEntities; parser->m_ns_triplets = oldns_triplets; parser->m_hash_secret_salt = oldhash_secret_salt; parser->m_parentParser = oldParser; #ifdef XML_DTD parser->m_paramEntityParsing = oldParamEntityParsing; parser->m_prologState.inEntityValue = oldInEntityValue; if (context) { #endif /* XML_DTD */ if (! dtdCopy(oldParser, parser->m_dtd, oldDtd, &parser->m_mem) || ! setContext(parser, context)) { XML_ParserFree(parser); return NULL; } parser->m_processor = externalEntityInitProcessor; #ifdef XML_DTD } else { /* The DTD instance referenced by parser->m_dtd is shared between the document's root parser and external PE parsers, therefore one does not need to call setContext. In addition, one also *must* not call setContext, because this would overwrite existing prefix->binding pointers in parser->m_dtd with ones that get destroyed with the external PE parser. This would leave those prefixes with dangling pointers. */ parser->m_isParamEntity = XML_TRUE; XmlPrologStateInitExternalEntity(&parser->m_prologState); parser->m_processor = externalParEntInitProcessor; } #endif /* XML_DTD */ return parser; } static void FASTCALL destroyBindings(BINDING *bindings, XML_Parser parser) { for (;;) { BINDING *b = bindings; if (! b) break; bindings = b->nextTagBinding; FREE(parser, b->uri); FREE(parser, b); } } void XMLCALL XML_ParserFree(XML_Parser parser) { TAG *tagList; OPEN_INTERNAL_ENTITY *entityList; if (parser == NULL) return; /* free m_tagStack and m_freeTagList */ tagList = parser->m_tagStack; for (;;) { TAG *p; if (tagList == NULL) { if (parser->m_freeTagList == NULL) break; tagList = parser->m_freeTagList; parser->m_freeTagList = NULL; } p = tagList; tagList = tagList->parent; FREE(parser, p->buf); destroyBindings(p->bindings, parser); FREE(parser, p); } /* free m_openInternalEntities and m_freeInternalEntities */ entityList = parser->m_openInternalEntities; for (;;) { OPEN_INTERNAL_ENTITY *openEntity; if (entityList == NULL) { if (parser->m_freeInternalEntities == NULL) break; entityList = parser->m_freeInternalEntities; parser->m_freeInternalEntities = NULL; } openEntity = entityList; entityList = entityList->next; FREE(parser, openEntity); } destroyBindings(parser->m_freeBindingList, parser); destroyBindings(parser->m_inheritedBindings, parser); poolDestroy(&parser->m_tempPool); poolDestroy(&parser->m_temp2Pool); FREE(parser, (void *)parser->m_protocolEncodingName); #ifdef XML_DTD /* external parameter entity parsers share the DTD structure parser->m_dtd with the root parser, so we must not destroy it */ if (! parser->m_isParamEntity && parser->m_dtd) #else if (parser->m_dtd) #endif /* XML_DTD */ dtdDestroy(parser->m_dtd, (XML_Bool)! parser->m_parentParser, &parser->m_mem); FREE(parser, (void *)parser->m_atts); #ifdef XML_ATTR_INFO FREE(parser, (void *)parser->m_attInfo); #endif FREE(parser, parser->m_groupConnector); FREE(parser, parser->m_buffer); FREE(parser, parser->m_dataBuf); FREE(parser, parser->m_nsAtts); FREE(parser, parser->m_unknownEncodingMem); if (parser->m_unknownEncodingRelease) parser->m_unknownEncodingRelease(parser->m_unknownEncodingData); FREE(parser, parser); } void XMLCALL XML_UseParserAsHandlerArg(XML_Parser parser) { if (parser != NULL) parser->m_handlerArg = parser; } enum XML_Error XMLCALL XML_UseForeignDTD(XML_Parser parser, XML_Bool useDTD) { if (parser == NULL) return XML_ERROR_INVALID_ARGUMENT; #ifdef XML_DTD /* block after XML_Parse()/XML_ParseBuffer() has been called */ if (parser->m_parsingStatus.parsing == XML_PARSING || parser->m_parsingStatus.parsing == XML_SUSPENDED) return XML_ERROR_CANT_CHANGE_FEATURE_ONCE_PARSING; parser->m_useForeignDTD = useDTD; return XML_ERROR_NONE; #else return XML_ERROR_FEATURE_REQUIRES_XML_DTD; #endif } void XMLCALL XML_SetReturnNSTriplet(XML_Parser parser, int do_nst) { if (parser == NULL) return; /* block after XML_Parse()/XML_ParseBuffer() has been called */ if (parser->m_parsingStatus.parsing == XML_PARSING || parser->m_parsingStatus.parsing == XML_SUSPENDED) return; parser->m_ns_triplets = do_nst ? XML_TRUE : XML_FALSE; } void XMLCALL XML_SetUserData(XML_Parser parser, void *p) { if (parser == NULL) return; if (parser->m_handlerArg == parser->m_userData) parser->m_handlerArg = parser->m_userData = p; else parser->m_userData = p; } enum XML_Status XMLCALL XML_SetBase(XML_Parser parser, const XML_Char *p) { if (parser == NULL) return XML_STATUS_ERROR; if (p) { p = poolCopyString(&parser->m_dtd->pool, p); if (! p) return XML_STATUS_ERROR; parser->m_curBase = p; } else parser->m_curBase = NULL; return XML_STATUS_OK; } const XML_Char *XMLCALL XML_GetBase(XML_Parser parser) { if (parser == NULL) return NULL; return parser->m_curBase; } int XMLCALL XML_GetSpecifiedAttributeCount(XML_Parser parser) { if (parser == NULL) return -1; return parser->m_nSpecifiedAtts; } int XMLCALL XML_GetIdAttributeIndex(XML_Parser parser) { if (parser == NULL) return -1; return parser->m_idAttIndex; } #ifdef XML_ATTR_INFO const XML_AttrInfo *XMLCALL XML_GetAttributeInfo(XML_Parser parser) { if (parser == NULL) return NULL; return parser->m_attInfo; } #endif void XMLCALL XML_SetElementHandler(XML_Parser parser, XML_StartElementHandler start, XML_EndElementHandler end) { if (parser == NULL) return; parser->m_startElementHandler = start; parser->m_endElementHandler = end; } void XMLCALL XML_SetStartElementHandler(XML_Parser parser, XML_StartElementHandler start) { if (parser != NULL) parser->m_startElementHandler = start; } void XMLCALL XML_SetEndElementHandler(XML_Parser parser, XML_EndElementHandler end) { if (parser != NULL) parser->m_endElementHandler = end; } void XMLCALL XML_SetCharacterDataHandler(XML_Parser parser, XML_CharacterDataHandler handler) { if (parser != NULL) parser->m_characterDataHandler = handler; } void XMLCALL XML_SetProcessingInstructionHandler(XML_Parser parser, XML_ProcessingInstructionHandler handler) { if (parser != NULL) parser->m_processingInstructionHandler = handler; } void XMLCALL XML_SetCommentHandler(XML_Parser parser, XML_CommentHandler handler) { if (parser != NULL) parser->m_commentHandler = handler; } void XMLCALL XML_SetCdataSectionHandler(XML_Parser parser, XML_StartCdataSectionHandler start, XML_EndCdataSectionHandler end) { if (parser == NULL) return; parser->m_startCdataSectionHandler = start; parser->m_endCdataSectionHandler = end; } void XMLCALL XML_SetStartCdataSectionHandler(XML_Parser parser, XML_StartCdataSectionHandler start) { if (parser != NULL) parser->m_startCdataSectionHandler = start; } void XMLCALL XML_SetEndCdataSectionHandler(XML_Parser parser, XML_EndCdataSectionHandler end) { if (parser != NULL) parser->m_endCdataSectionHandler = end; } void XMLCALL XML_SetDefaultHandler(XML_Parser parser, XML_DefaultHandler handler) { if (parser == NULL) return; parser->m_defaultHandler = handler; parser->m_defaultExpandInternalEntities = XML_FALSE; } void XMLCALL XML_SetDefaultHandlerExpand(XML_Parser parser, XML_DefaultHandler handler) { if (parser == NULL) return; parser->m_defaultHandler = handler; parser->m_defaultExpandInternalEntities = XML_TRUE; } void XMLCALL XML_SetDoctypeDeclHandler(XML_Parser parser, XML_StartDoctypeDeclHandler start, XML_EndDoctypeDeclHandler end) { if (parser == NULL) return; parser->m_startDoctypeDeclHandler = start; parser->m_endDoctypeDeclHandler = end; } void XMLCALL XML_SetStartDoctypeDeclHandler(XML_Parser parser, XML_StartDoctypeDeclHandler start) { if (parser != NULL) parser->m_startDoctypeDeclHandler = start; } void XMLCALL XML_SetEndDoctypeDeclHandler(XML_Parser parser, XML_EndDoctypeDeclHandler end) { if (parser != NULL) parser->m_endDoctypeDeclHandler = end; } void XMLCALL XML_SetUnparsedEntityDeclHandler(XML_Parser parser, XML_UnparsedEntityDeclHandler handler) { if (parser != NULL) parser->m_unparsedEntityDeclHandler = handler; } void XMLCALL XML_SetNotationDeclHandler(XML_Parser parser, XML_NotationDeclHandler handler) { if (parser != NULL) parser->m_notationDeclHandler = handler; } void XMLCALL XML_SetNamespaceDeclHandler(XML_Parser parser, XML_StartNamespaceDeclHandler start, XML_EndNamespaceDeclHandler end) { if (parser == NULL) return; parser->m_startNamespaceDeclHandler = start; parser->m_endNamespaceDeclHandler = end; } void XMLCALL XML_SetStartNamespaceDeclHandler(XML_Parser parser, XML_StartNamespaceDeclHandler start) { if (parser != NULL) parser->m_startNamespaceDeclHandler = start; } void XMLCALL XML_SetEndNamespaceDeclHandler(XML_Parser parser, XML_EndNamespaceDeclHandler end) { if (parser != NULL) parser->m_endNamespaceDeclHandler = end; } void XMLCALL XML_SetNotStandaloneHandler(XML_Parser parser, XML_NotStandaloneHandler handler) { if (parser != NULL) parser->m_notStandaloneHandler = handler; } void XMLCALL XML_SetExternalEntityRefHandler(XML_Parser parser, XML_ExternalEntityRefHandler handler) { if (parser != NULL) parser->m_externalEntityRefHandler = handler; } void XMLCALL XML_SetExternalEntityRefHandlerArg(XML_Parser parser, void *arg) { if (parser == NULL) return; if (arg) parser->m_externalEntityRefHandlerArg = (XML_Parser)arg; else parser->m_externalEntityRefHandlerArg = parser; } void XMLCALL XML_SetSkippedEntityHandler(XML_Parser parser, XML_SkippedEntityHandler handler) { if (parser != NULL) parser->m_skippedEntityHandler = handler; } void XMLCALL XML_SetUnknownEncodingHandler(XML_Parser parser, XML_UnknownEncodingHandler handler, void *data) { if (parser == NULL) return; parser->m_unknownEncodingHandler = handler; parser->m_unknownEncodingHandlerData = data; } void XMLCALL XML_SetElementDeclHandler(XML_Parser parser, XML_ElementDeclHandler eldecl) { if (parser != NULL) parser->m_elementDeclHandler = eldecl; } void XMLCALL XML_SetAttlistDeclHandler(XML_Parser parser, XML_AttlistDeclHandler attdecl) { if (parser != NULL) parser->m_attlistDeclHandler = attdecl; } void XMLCALL XML_SetEntityDeclHandler(XML_Parser parser, XML_EntityDeclHandler handler) { if (parser != NULL) parser->m_entityDeclHandler = handler; } void XMLCALL XML_SetXmlDeclHandler(XML_Parser parser, XML_XmlDeclHandler handler) { if (parser != NULL) parser->m_xmlDeclHandler = handler; } int XMLCALL XML_SetParamEntityParsing(XML_Parser parser, enum XML_ParamEntityParsing peParsing) { if (parser == NULL) return 0; /* block after XML_Parse()/XML_ParseBuffer() has been called */ if (parser->m_parsingStatus.parsing == XML_PARSING || parser->m_parsingStatus.parsing == XML_SUSPENDED) return 0; #ifdef XML_DTD parser->m_paramEntityParsing = peParsing; return 1; #else return peParsing == XML_PARAM_ENTITY_PARSING_NEVER; #endif } int XMLCALL XML_SetHashSalt(XML_Parser parser, unsigned long hash_salt) { if (parser == NULL) return 0; if (parser->m_parentParser) return XML_SetHashSalt(parser->m_parentParser, hash_salt); /* block after XML_Parse()/XML_ParseBuffer() has been called */ if (parser->m_parsingStatus.parsing == XML_PARSING || parser->m_parsingStatus.parsing == XML_SUSPENDED) return 0; parser->m_hash_secret_salt = hash_salt; return 1; } enum XML_Status XMLCALL XML_Parse(XML_Parser parser, const char *s, int len, int isFinal) { if ((parser == NULL) || (len < 0) || ((s == NULL) && (len != 0))) { if (parser != NULL) parser->m_errorCode = XML_ERROR_INVALID_ARGUMENT; return XML_STATUS_ERROR; } switch (parser->m_parsingStatus.parsing) { case XML_SUSPENDED: parser->m_errorCode = XML_ERROR_SUSPENDED; return XML_STATUS_ERROR; case XML_FINISHED: parser->m_errorCode = XML_ERROR_FINISHED; return XML_STATUS_ERROR; case XML_INITIALIZED: if (parser->m_parentParser == NULL && ! startParsing(parser)) { parser->m_errorCode = XML_ERROR_NO_MEMORY; return XML_STATUS_ERROR; } /* fall through */ default: parser->m_parsingStatus.parsing = XML_PARSING; } if (len == 0) { parser->m_parsingStatus.finalBuffer = (XML_Bool)isFinal; if (! isFinal) return XML_STATUS_OK; parser->m_positionPtr = parser->m_bufferPtr; parser->m_parseEndPtr = parser->m_bufferEnd; /* If data are left over from last buffer, and we now know that these data are the final chunk of input, then we have to check them again to detect errors based on that fact. */ parser->m_errorCode = parser->m_processor(parser, parser->m_bufferPtr, parser->m_parseEndPtr, &parser->m_bufferPtr); if (parser->m_errorCode == XML_ERROR_NONE) { switch (parser->m_parsingStatus.parsing) { case XML_SUSPENDED: /* It is hard to be certain, but it seems that this case * cannot occur. This code is cleaning up a previous parse * with no new data (since len == 0). Changing the parsing * state requires getting to execute a handler function, and * there doesn't seem to be an opportunity for that while in * this circumstance. * * Given the uncertainty, we retain the code but exclude it * from coverage tests. * * LCOV_EXCL_START */ XmlUpdatePosition(parser->m_encoding, parser->m_positionPtr, parser->m_bufferPtr, &parser->m_position); parser->m_positionPtr = parser->m_bufferPtr; return XML_STATUS_SUSPENDED; /* LCOV_EXCL_STOP */ case XML_INITIALIZED: case XML_PARSING: parser->m_parsingStatus.parsing = XML_FINISHED; /* fall through */ default: return XML_STATUS_OK; } } parser->m_eventEndPtr = parser->m_eventPtr; parser->m_processor = errorProcessor; return XML_STATUS_ERROR; } #ifndef XML_CONTEXT_BYTES else if (parser->m_bufferPtr == parser->m_bufferEnd) { const char *end; int nLeftOver; enum XML_Status result; /* Detect overflow (a+b > MAX <==> b > MAX-a) */ if (len > ((XML_Size)-1) / 2 - parser->m_parseEndByteIndex) { parser->m_errorCode = XML_ERROR_NO_MEMORY; parser->m_eventPtr = parser->m_eventEndPtr = NULL; parser->m_processor = errorProcessor; return XML_STATUS_ERROR; } parser->m_parseEndByteIndex += len; parser->m_positionPtr = s; parser->m_parsingStatus.finalBuffer = (XML_Bool)isFinal; parser->m_errorCode = parser->m_processor(parser, s, parser->m_parseEndPtr = s + len, &end); if (parser->m_errorCode != XML_ERROR_NONE) { parser->m_eventEndPtr = parser->m_eventPtr; parser->m_processor = errorProcessor; return XML_STATUS_ERROR; } else { switch (parser->m_parsingStatus.parsing) { case XML_SUSPENDED: result = XML_STATUS_SUSPENDED; break; case XML_INITIALIZED: case XML_PARSING: if (isFinal) { parser->m_parsingStatus.parsing = XML_FINISHED; return XML_STATUS_OK; } /* fall through */ default: result = XML_STATUS_OK; } } XmlUpdatePosition(parser->m_encoding, parser->m_positionPtr, end, &parser->m_position); nLeftOver = s + len - end; if (nLeftOver) { if (parser->m_buffer == NULL || nLeftOver > parser->m_bufferLim - parser->m_buffer) { /* avoid _signed_ integer overflow */ char *temp = NULL; const int bytesToAllocate = (int)((unsigned)len * 2U); if (bytesToAllocate > 0) { temp = (char *)REALLOC(parser, parser->m_buffer, bytesToAllocate); } if (temp == NULL) { parser->m_errorCode = XML_ERROR_NO_MEMORY; parser->m_eventPtr = parser->m_eventEndPtr = NULL; parser->m_processor = errorProcessor; return XML_STATUS_ERROR; } parser->m_buffer = temp; parser->m_bufferLim = parser->m_buffer + bytesToAllocate; } memcpy(parser->m_buffer, end, nLeftOver); } parser->m_bufferPtr = parser->m_buffer; parser->m_bufferEnd = parser->m_buffer + nLeftOver; parser->m_positionPtr = parser->m_bufferPtr; parser->m_parseEndPtr = parser->m_bufferEnd; parser->m_eventPtr = parser->m_bufferPtr; parser->m_eventEndPtr = parser->m_bufferPtr; return result; } #endif /* not defined XML_CONTEXT_BYTES */ else { void *buff = XML_GetBuffer(parser, len); if (buff == NULL) return XML_STATUS_ERROR; else { memcpy(buff, s, len); return XML_ParseBuffer(parser, len, isFinal); } } } enum XML_Status XMLCALL XML_ParseBuffer(XML_Parser parser, int len, int isFinal) { const char *start; enum XML_Status result = XML_STATUS_OK; if (parser == NULL) return XML_STATUS_ERROR; switch (parser->m_parsingStatus.parsing) { case XML_SUSPENDED: parser->m_errorCode = XML_ERROR_SUSPENDED; return XML_STATUS_ERROR; case XML_FINISHED: parser->m_errorCode = XML_ERROR_FINISHED; return XML_STATUS_ERROR; case XML_INITIALIZED: if (parser->m_parentParser == NULL && ! startParsing(parser)) { parser->m_errorCode = XML_ERROR_NO_MEMORY; return XML_STATUS_ERROR; } /* fall through */ default: parser->m_parsingStatus.parsing = XML_PARSING; } start = parser->m_bufferPtr; parser->m_positionPtr = start; parser->m_bufferEnd += len; parser->m_parseEndPtr = parser->m_bufferEnd; parser->m_parseEndByteIndex += len; parser->m_parsingStatus.finalBuffer = (XML_Bool)isFinal; parser->m_errorCode = parser->m_processor( parser, start, parser->m_parseEndPtr, &parser->m_bufferPtr); if (parser->m_errorCode != XML_ERROR_NONE) { parser->m_eventEndPtr = parser->m_eventPtr; parser->m_processor = errorProcessor; return XML_STATUS_ERROR; } else { switch (parser->m_parsingStatus.parsing) { case XML_SUSPENDED: result = XML_STATUS_SUSPENDED; break; case XML_INITIALIZED: case XML_PARSING: if (isFinal) { parser->m_parsingStatus.parsing = XML_FINISHED; return result; } default:; /* should not happen */ } } XmlUpdatePosition(parser->m_encoding, parser->m_positionPtr, parser->m_bufferPtr, &parser->m_position); parser->m_positionPtr = parser->m_bufferPtr; return result; } void *XMLCALL XML_GetBuffer(XML_Parser parser, int len) { if (parser == NULL) return NULL; if (len < 0) { parser->m_errorCode = XML_ERROR_NO_MEMORY; return NULL; } switch (parser->m_parsingStatus.parsing) { case XML_SUSPENDED: parser->m_errorCode = XML_ERROR_SUSPENDED; return NULL; case XML_FINISHED: parser->m_errorCode = XML_ERROR_FINISHED; return NULL; default:; } if (len > EXPAT_SAFE_PTR_DIFF(parser->m_bufferLim, parser->m_bufferEnd)) { #ifdef XML_CONTEXT_BYTES int keep; #endif /* defined XML_CONTEXT_BYTES */ /* Do not invoke signed arithmetic overflow: */ int neededSize = (int)((unsigned)len + (unsigned)EXPAT_SAFE_PTR_DIFF( parser->m_bufferEnd, parser->m_bufferPtr)); if (neededSize < 0) { parser->m_errorCode = XML_ERROR_NO_MEMORY; return NULL; } #ifdef XML_CONTEXT_BYTES keep = (int)EXPAT_SAFE_PTR_DIFF(parser->m_bufferPtr, parser->m_buffer); if (keep > XML_CONTEXT_BYTES) keep = XML_CONTEXT_BYTES; neededSize += keep; #endif /* defined XML_CONTEXT_BYTES */ if (neededSize <= EXPAT_SAFE_PTR_DIFF(parser->m_bufferLim, parser->m_buffer)) { #ifdef XML_CONTEXT_BYTES if (keep < EXPAT_SAFE_PTR_DIFF(parser->m_bufferPtr, parser->m_buffer)) { int offset = (int)EXPAT_SAFE_PTR_DIFF(parser->m_bufferPtr, parser->m_buffer) - keep; /* The buffer pointers cannot be NULL here; we have at least some bytes * in the buffer */ memmove(parser->m_buffer, &parser->m_buffer[offset], parser->m_bufferEnd - parser->m_bufferPtr + keep); parser->m_bufferEnd -= offset; parser->m_bufferPtr -= offset; } #else if (parser->m_buffer && parser->m_bufferPtr) { memmove(parser->m_buffer, parser->m_bufferPtr, EXPAT_SAFE_PTR_DIFF(parser->m_bufferEnd, parser->m_bufferPtr)); parser->m_bufferEnd = parser->m_buffer + EXPAT_SAFE_PTR_DIFF(parser->m_bufferEnd, parser->m_bufferPtr); parser->m_bufferPtr = parser->m_buffer; } #endif /* not defined XML_CONTEXT_BYTES */ } else { char *newBuf; int bufferSize = (int)EXPAT_SAFE_PTR_DIFF(parser->m_bufferLim, parser->m_bufferPtr); if (bufferSize == 0) bufferSize = INIT_BUFFER_SIZE; do { /* Do not invoke signed arithmetic overflow: */ bufferSize = (int)(2U * (unsigned)bufferSize); } while (bufferSize < neededSize && bufferSize > 0); if (bufferSize <= 0) { parser->m_errorCode = XML_ERROR_NO_MEMORY; return NULL; } newBuf = (char *)MALLOC(parser, bufferSize); if (newBuf == 0) { parser->m_errorCode = XML_ERROR_NO_MEMORY; return NULL; } parser->m_bufferLim = newBuf + bufferSize; #ifdef XML_CONTEXT_BYTES if (parser->m_bufferPtr) { memcpy(newBuf, &parser->m_bufferPtr[-keep], EXPAT_SAFE_PTR_DIFF(parser->m_bufferEnd, parser->m_bufferPtr) + keep); FREE(parser, parser->m_buffer); parser->m_buffer = newBuf; parser->m_bufferEnd = parser->m_buffer + EXPAT_SAFE_PTR_DIFF(parser->m_bufferEnd, parser->m_bufferPtr) + keep; parser->m_bufferPtr = parser->m_buffer + keep; } else { /* This must be a brand new buffer with no data in it yet */ parser->m_bufferEnd = newBuf; parser->m_bufferPtr = parser->m_buffer = newBuf; } #else if (parser->m_bufferPtr) { memcpy(newBuf, parser->m_bufferPtr, EXPAT_SAFE_PTR_DIFF(parser->m_bufferEnd, parser->m_bufferPtr)); FREE(parser, parser->m_buffer); parser->m_bufferEnd = newBuf + EXPAT_SAFE_PTR_DIFF(parser->m_bufferEnd, parser->m_bufferPtr); } else { /* This must be a brand new buffer with no data in it yet */ parser->m_bufferEnd = newBuf; } parser->m_bufferPtr = parser->m_buffer = newBuf; #endif /* not defined XML_CONTEXT_BYTES */ } parser->m_eventPtr = parser->m_eventEndPtr = NULL; parser->m_positionPtr = NULL; } return parser->m_bufferEnd; } enum XML_Status XMLCALL XML_StopParser(XML_Parser parser, XML_Bool resumable) { if (parser == NULL) return XML_STATUS_ERROR; switch (parser->m_parsingStatus.parsing) { case XML_SUSPENDED: if (resumable) { parser->m_errorCode = XML_ERROR_SUSPENDED; return XML_STATUS_ERROR; } parser->m_parsingStatus.parsing = XML_FINISHED; break; case XML_FINISHED: parser->m_errorCode = XML_ERROR_FINISHED; return XML_STATUS_ERROR; default: if (resumable) { #ifdef XML_DTD if (parser->m_isParamEntity) { parser->m_errorCode = XML_ERROR_SUSPEND_PE; return XML_STATUS_ERROR; } #endif parser->m_parsingStatus.parsing = XML_SUSPENDED; } else parser->m_parsingStatus.parsing = XML_FINISHED; } return XML_STATUS_OK; } enum XML_Status XMLCALL XML_ResumeParser(XML_Parser parser) { enum XML_Status result = XML_STATUS_OK; if (parser == NULL) return XML_STATUS_ERROR; if (parser->m_parsingStatus.parsing != XML_SUSPENDED) { parser->m_errorCode = XML_ERROR_NOT_SUSPENDED; return XML_STATUS_ERROR; } parser->m_parsingStatus.parsing = XML_PARSING; parser->m_errorCode = parser->m_processor( parser, parser->m_bufferPtr, parser->m_parseEndPtr, &parser->m_bufferPtr); if (parser->m_errorCode != XML_ERROR_NONE) { parser->m_eventEndPtr = parser->m_eventPtr; parser->m_processor = errorProcessor; return XML_STATUS_ERROR; } else { switch (parser->m_parsingStatus.parsing) { case XML_SUSPENDED: result = XML_STATUS_SUSPENDED; break; case XML_INITIALIZED: case XML_PARSING: if (parser->m_parsingStatus.finalBuffer) { parser->m_parsingStatus.parsing = XML_FINISHED; return result; } default:; } } XmlUpdatePosition(parser->m_encoding, parser->m_positionPtr, parser->m_bufferPtr, &parser->m_position); parser->m_positionPtr = parser->m_bufferPtr; return result; } void XMLCALL XML_GetParsingStatus(XML_Parser parser, XML_ParsingStatus *status) { if (parser == NULL) return; assert(status != NULL); *status = parser->m_parsingStatus; } enum XML_Error XMLCALL XML_GetErrorCode(XML_Parser parser) { if (parser == NULL) return XML_ERROR_INVALID_ARGUMENT; return parser->m_errorCode; } XML_Index XMLCALL XML_GetCurrentByteIndex(XML_Parser parser) { if (parser == NULL) return -1; if (parser->m_eventPtr) return (XML_Index)(parser->m_parseEndByteIndex - (parser->m_parseEndPtr - parser->m_eventPtr)); return -1; } int XMLCALL XML_GetCurrentByteCount(XML_Parser parser) { if (parser == NULL) return 0; if (parser->m_eventEndPtr && parser->m_eventPtr) return (int)(parser->m_eventEndPtr - parser->m_eventPtr); return 0; } const char *XMLCALL XML_GetInputContext(XML_Parser parser, int *offset, int *size) { #ifdef XML_CONTEXT_BYTES if (parser == NULL) return NULL; if (parser->m_eventPtr && parser->m_buffer) { if (offset != NULL) *offset = (int)(parser->m_eventPtr - parser->m_buffer); if (size != NULL) *size = (int)(parser->m_bufferEnd - parser->m_buffer); return parser->m_buffer; } #else (void)parser; (void)offset; (void)size; #endif /* defined XML_CONTEXT_BYTES */ return (char *)0; } XML_Size XMLCALL XML_GetCurrentLineNumber(XML_Parser parser) { if (parser == NULL) return 0; if (parser->m_eventPtr && parser->m_eventPtr >= parser->m_positionPtr) { XmlUpdatePosition(parser->m_encoding, parser->m_positionPtr, parser->m_eventPtr, &parser->m_position); parser->m_positionPtr = parser->m_eventPtr; } return parser->m_position.lineNumber + 1; } XML_Size XMLCALL XML_GetCurrentColumnNumber(XML_Parser parser) { if (parser == NULL) return 0; if (parser->m_eventPtr && parser->m_eventPtr >= parser->m_positionPtr) { XmlUpdatePosition(parser->m_encoding, parser->m_positionPtr, parser->m_eventPtr, &parser->m_position); parser->m_positionPtr = parser->m_eventPtr; } return parser->m_position.columnNumber; } void XMLCALL XML_FreeContentModel(XML_Parser parser, XML_Content *model) { if (parser != NULL) FREE(parser, model); } void *XMLCALL XML_MemMalloc(XML_Parser parser, size_t size) { if (parser == NULL) return NULL; return MALLOC(parser, size); } void *XMLCALL XML_MemRealloc(XML_Parser parser, void *ptr, size_t size) { if (parser == NULL) return NULL; return REALLOC(parser, ptr, size); } void XMLCALL XML_MemFree(XML_Parser parser, void *ptr) { if (parser != NULL) FREE(parser, ptr); } void XMLCALL XML_DefaultCurrent(XML_Parser parser) { if (parser == NULL) return; if (parser->m_defaultHandler) { if (parser->m_openInternalEntities) reportDefault(parser, parser->m_internalEncoding, parser->m_openInternalEntities->internalEventPtr, parser->m_openInternalEntities->internalEventEndPtr); else reportDefault(parser, parser->m_encoding, parser->m_eventPtr, parser->m_eventEndPtr); } } const XML_LChar *XMLCALL XML_ErrorString(enum XML_Error code) { switch (code) { case XML_ERROR_NONE: return NULL; case XML_ERROR_NO_MEMORY: return XML_L("out of memory"); case XML_ERROR_SYNTAX: return XML_L("syntax error"); case XML_ERROR_NO_ELEMENTS: return XML_L("no element found"); case XML_ERROR_INVALID_TOKEN: return XML_L("not well-formed (invalid token)"); case XML_ERROR_UNCLOSED_TOKEN: return XML_L("unclosed token"); case XML_ERROR_PARTIAL_CHAR: return XML_L("partial character"); case XML_ERROR_TAG_MISMATCH: return XML_L("mismatched tag"); case XML_ERROR_DUPLICATE_ATTRIBUTE: return XML_L("duplicate attribute"); case XML_ERROR_JUNK_AFTER_DOC_ELEMENT: return XML_L("junk after document element"); case XML_ERROR_PARAM_ENTITY_REF: return XML_L("illegal parameter entity reference"); case XML_ERROR_UNDEFINED_ENTITY: return XML_L("undefined entity"); case XML_ERROR_RECURSIVE_ENTITY_REF: return XML_L("recursive entity reference"); case XML_ERROR_ASYNC_ENTITY: return XML_L("asynchronous entity"); case XML_ERROR_BAD_CHAR_REF: return XML_L("reference to invalid character number"); case XML_ERROR_BINARY_ENTITY_REF: return XML_L("reference to binary entity"); case XML_ERROR_ATTRIBUTE_EXTERNAL_ENTITY_REF: return XML_L("reference to external entity in attribute"); case XML_ERROR_MISPLACED_XML_PI: return XML_L("XML or text declaration not at start of entity"); case XML_ERROR_UNKNOWN_ENCODING: return XML_L("unknown encoding"); case XML_ERROR_INCORRECT_ENCODING: return XML_L("encoding specified in XML declaration is incorrect"); case XML_ERROR_UNCLOSED_CDATA_SECTION: return XML_L("unclosed CDATA section"); case XML_ERROR_EXTERNAL_ENTITY_HANDLING: return XML_L("error in processing external entity reference"); case XML_ERROR_NOT_STANDALONE: return XML_L("document is not standalone"); case XML_ERROR_UNEXPECTED_STATE: return XML_L("unexpected parser state - please send a bug report"); case XML_ERROR_ENTITY_DECLARED_IN_PE: return XML_L("entity declared in parameter entity"); case XML_ERROR_FEATURE_REQUIRES_XML_DTD: return XML_L("requested feature requires XML_DTD support in Expat"); case XML_ERROR_CANT_CHANGE_FEATURE_ONCE_PARSING: return XML_L("cannot change setting once parsing has begun"); /* Added in 1.95.7. */ case XML_ERROR_UNBOUND_PREFIX: return XML_L("unbound prefix"); /* Added in 1.95.8. */ case XML_ERROR_UNDECLARING_PREFIX: return XML_L("must not undeclare prefix"); case XML_ERROR_INCOMPLETE_PE: return XML_L("incomplete markup in parameter entity"); case XML_ERROR_XML_DECL: return XML_L("XML declaration not well-formed"); case XML_ERROR_TEXT_DECL: return XML_L("text declaration not well-formed"); case XML_ERROR_PUBLICID: return XML_L("illegal character(s) in public id"); case XML_ERROR_SUSPENDED: return XML_L("parser suspended"); case XML_ERROR_NOT_SUSPENDED: return XML_L("parser not suspended"); case XML_ERROR_ABORTED: return XML_L("parsing aborted"); case XML_ERROR_FINISHED: return XML_L("parsing finished"); case XML_ERROR_SUSPEND_PE: return XML_L("cannot suspend in external parameter entity"); /* Added in 2.0.0. */ case XML_ERROR_RESERVED_PREFIX_XML: return XML_L( "reserved prefix (xml) must not be undeclared or bound to another namespace name"); case XML_ERROR_RESERVED_PREFIX_XMLNS: return XML_L("reserved prefix (xmlns) must not be declared or undeclared"); case XML_ERROR_RESERVED_NAMESPACE_URI: return XML_L( "prefix must not be bound to one of the reserved namespace names"); /* Added in 2.2.5. */ case XML_ERROR_INVALID_ARGUMENT: /* Constant added in 2.2.1, already */ return XML_L("invalid argument"); } return NULL; } const XML_LChar *XMLCALL XML_ExpatVersion(void) { /* V1 is used to string-ize the version number. However, it would string-ize the actual version macro *names* unless we get them substituted before being passed to V1. CPP is defined to expand a macro, then rescan for more expansions. Thus, we use V2 to expand the version macros, then CPP will expand the resulting V1() macro with the correct numerals. */ /* ### I'm assuming cpp is portable in this respect... */ #define V1(a, b, c) XML_L(#a) XML_L(".") XML_L(#b) XML_L(".") XML_L(#c) #define V2(a, b, c) XML_L("expat_") V1(a, b, c) return V2(XML_MAJOR_VERSION, XML_MINOR_VERSION, XML_MICRO_VERSION); #undef V1 #undef V2 } XML_Expat_Version XMLCALL XML_ExpatVersionInfo(void) { XML_Expat_Version version; version.major = XML_MAJOR_VERSION; version.minor = XML_MINOR_VERSION; version.micro = XML_MICRO_VERSION; return version; } const XML_Feature *XMLCALL XML_GetFeatureList(void) { static const XML_Feature features[] = {{XML_FEATURE_SIZEOF_XML_CHAR, XML_L("sizeof(XML_Char)"), sizeof(XML_Char)}, {XML_FEATURE_SIZEOF_XML_LCHAR, XML_L("sizeof(XML_LChar)"), sizeof(XML_LChar)}, #ifdef XML_UNICODE {XML_FEATURE_UNICODE, XML_L("XML_UNICODE"), 0}, #endif #ifdef XML_UNICODE_WCHAR_T {XML_FEATURE_UNICODE_WCHAR_T, XML_L("XML_UNICODE_WCHAR_T"), 0}, #endif #ifdef XML_DTD {XML_FEATURE_DTD, XML_L("XML_DTD"), 0}, #endif #ifdef XML_CONTEXT_BYTES {XML_FEATURE_CONTEXT_BYTES, XML_L("XML_CONTEXT_BYTES"), XML_CONTEXT_BYTES}, #endif #ifdef XML_MIN_SIZE {XML_FEATURE_MIN_SIZE, XML_L("XML_MIN_SIZE"), 0}, #endif #ifdef XML_NS {XML_FEATURE_NS, XML_L("XML_NS"), 0}, #endif #ifdef XML_LARGE_SIZE {XML_FEATURE_LARGE_SIZE, XML_L("XML_LARGE_SIZE"), 0}, #endif #ifdef XML_ATTR_INFO {XML_FEATURE_ATTR_INFO, XML_L("XML_ATTR_INFO"), 0}, #endif {XML_FEATURE_END, NULL, 0}}; return features; } /* Initially tag->rawName always points into the parse buffer; for those TAG instances opened while the current parse buffer was processed, and not yet closed, we need to store tag->rawName in a more permanent location, since the parse buffer is about to be discarded. */ static XML_Bool storeRawNames(XML_Parser parser) { TAG *tag = parser->m_tagStack; while (tag) { int bufSize; int nameLen = sizeof(XML_Char) * (tag->name.strLen + 1); char *rawNameBuf = tag->buf + nameLen; /* Stop if already stored. Since m_tagStack is a stack, we can stop at the first entry that has already been copied; everything below it in the stack is already been accounted for in a previous call to this function. */ if (tag->rawName == rawNameBuf) break; /* For re-use purposes we need to ensure that the size of tag->buf is a multiple of sizeof(XML_Char). */ bufSize = nameLen + ROUND_UP(tag->rawNameLength, sizeof(XML_Char)); if (bufSize > tag->bufEnd - tag->buf) { char *temp = (char *)REALLOC(parser, tag->buf, bufSize); if (temp == NULL) return XML_FALSE; /* if tag->name.str points to tag->buf (only when namespace processing is off) then we have to update it */ if (tag->name.str == (XML_Char *)tag->buf) tag->name.str = (XML_Char *)temp; /* if tag->name.localPart is set (when namespace processing is on) then update it as well, since it will always point into tag->buf */ if (tag->name.localPart) tag->name.localPart = (XML_Char *)temp + (tag->name.localPart - (XML_Char *)tag->buf); tag->buf = temp; tag->bufEnd = temp + bufSize; rawNameBuf = temp + nameLen; } memcpy(rawNameBuf, tag->rawName, tag->rawNameLength); tag->rawName = rawNameBuf; tag = tag->parent; } return XML_TRUE; } static enum XML_Error PTRCALL contentProcessor(XML_Parser parser, const char *start, const char *end, const char **endPtr) { enum XML_Error result = doContent(parser, 0, parser->m_encoding, start, end, endPtr, (XML_Bool)! parser->m_parsingStatus.finalBuffer); if (result == XML_ERROR_NONE) { if (! storeRawNames(parser)) return XML_ERROR_NO_MEMORY; } return result; } static enum XML_Error PTRCALL externalEntityInitProcessor(XML_Parser parser, const char *start, const char *end, const char **endPtr) { enum XML_Error result = initializeEncoding(parser); if (result != XML_ERROR_NONE) return result; parser->m_processor = externalEntityInitProcessor2; return externalEntityInitProcessor2(parser, start, end, endPtr); } static enum XML_Error PTRCALL externalEntityInitProcessor2(XML_Parser parser, const char *start, const char *end, const char **endPtr) { const char *next = start; /* XmlContentTok doesn't always set the last arg */ int tok = XmlContentTok(parser->m_encoding, start, end, &next); switch (tok) { case XML_TOK_BOM: /* If we are at the end of the buffer, this would cause the next stage, i.e. externalEntityInitProcessor3, to pass control directly to doContent (by detecting XML_TOK_NONE) without processing any xml text declaration - causing the error XML_ERROR_MISPLACED_XML_PI in doContent. */ if (next == end && ! parser->m_parsingStatus.finalBuffer) { *endPtr = next; return XML_ERROR_NONE; } start = next; break; case XML_TOK_PARTIAL: if (! parser->m_parsingStatus.finalBuffer) { *endPtr = start; return XML_ERROR_NONE; } parser->m_eventPtr = start; return XML_ERROR_UNCLOSED_TOKEN; case XML_TOK_PARTIAL_CHAR: if (! parser->m_parsingStatus.finalBuffer) { *endPtr = start; return XML_ERROR_NONE; } parser->m_eventPtr = start; return XML_ERROR_PARTIAL_CHAR; } parser->m_processor = externalEntityInitProcessor3; return externalEntityInitProcessor3(parser, start, end, endPtr); } static enum XML_Error PTRCALL externalEntityInitProcessor3(XML_Parser parser, const char *start, const char *end, const char **endPtr) { int tok; const char *next = start; /* XmlContentTok doesn't always set the last arg */ parser->m_eventPtr = start; tok = XmlContentTok(parser->m_encoding, start, end, &next); parser->m_eventEndPtr = next; switch (tok) { case XML_TOK_XML_DECL: { enum XML_Error result; result = processXmlDecl(parser, 1, start, next); if (result != XML_ERROR_NONE) return result; switch (parser->m_parsingStatus.parsing) { case XML_SUSPENDED: *endPtr = next; return XML_ERROR_NONE; case XML_FINISHED: return XML_ERROR_ABORTED; default: start = next; } } break; case XML_TOK_PARTIAL: if (! parser->m_parsingStatus.finalBuffer) { *endPtr = start; return XML_ERROR_NONE; } return XML_ERROR_UNCLOSED_TOKEN; case XML_TOK_PARTIAL_CHAR: if (! parser->m_parsingStatus.finalBuffer) { *endPtr = start; return XML_ERROR_NONE; } return XML_ERROR_PARTIAL_CHAR; } parser->m_processor = externalEntityContentProcessor; parser->m_tagLevel = 1; return externalEntityContentProcessor(parser, start, end, endPtr); } static enum XML_Error PTRCALL externalEntityContentProcessor(XML_Parser parser, const char *start, const char *end, const char **endPtr) { enum XML_Error result = doContent(parser, 1, parser->m_encoding, start, end, endPtr, (XML_Bool)! parser->m_parsingStatus.finalBuffer); if (result == XML_ERROR_NONE) { if (! storeRawNames(parser)) return XML_ERROR_NO_MEMORY; } return result; } static enum XML_Error doContent(XML_Parser parser, int startTagLevel, const ENCODING *enc, const char *s, const char *end, const char **nextPtr, XML_Bool haveMore) { /* save one level of indirection */ DTD *const dtd = parser->m_dtd; const char **eventPP; const char **eventEndPP; if (enc == parser->m_encoding) { eventPP = &parser->m_eventPtr; eventEndPP = &parser->m_eventEndPtr; } else { eventPP = &(parser->m_openInternalEntities->internalEventPtr); eventEndPP = &(parser->m_openInternalEntities->internalEventEndPtr); } *eventPP = s; for (;;) { const char *next = s; /* XmlContentTok doesn't always set the last arg */ int tok = XmlContentTok(enc, s, end, &next); *eventEndPP = next; switch (tok) { case XML_TOK_TRAILING_CR: if (haveMore) { *nextPtr = s; return XML_ERROR_NONE; } *eventEndPP = end; if (parser->m_characterDataHandler) { XML_Char c = 0xA; parser->m_characterDataHandler(parser->m_handlerArg, &c, 1); } else if (parser->m_defaultHandler) reportDefault(parser, enc, s, end); /* We are at the end of the final buffer, should we check for XML_SUSPENDED, XML_FINISHED? */ if (startTagLevel == 0) return XML_ERROR_NO_ELEMENTS; if (parser->m_tagLevel != startTagLevel) return XML_ERROR_ASYNC_ENTITY; *nextPtr = end; return XML_ERROR_NONE; case XML_TOK_NONE: if (haveMore) { *nextPtr = s; return XML_ERROR_NONE; } if (startTagLevel > 0) { if (parser->m_tagLevel != startTagLevel) return XML_ERROR_ASYNC_ENTITY; *nextPtr = s; return XML_ERROR_NONE; } return XML_ERROR_NO_ELEMENTS; case XML_TOK_INVALID: *eventPP = next; return XML_ERROR_INVALID_TOKEN; case XML_TOK_PARTIAL: if (haveMore) { *nextPtr = s; return XML_ERROR_NONE; } return XML_ERROR_UNCLOSED_TOKEN; case XML_TOK_PARTIAL_CHAR: if (haveMore) { *nextPtr = s; return XML_ERROR_NONE; } return XML_ERROR_PARTIAL_CHAR; case XML_TOK_ENTITY_REF: { const XML_Char *name; ENTITY *entity; XML_Char ch = (XML_Char)XmlPredefinedEntityName( enc, s + enc->minBytesPerChar, next - enc->minBytesPerChar); if (ch) { if (parser->m_characterDataHandler) parser->m_characterDataHandler(parser->m_handlerArg, &ch, 1); else if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); break; } name = poolStoreString(&dtd->pool, enc, s + enc->minBytesPerChar, next - enc->minBytesPerChar); if (! name) return XML_ERROR_NO_MEMORY; entity = (ENTITY *)lookup(parser, &dtd->generalEntities, name, 0); poolDiscard(&dtd->pool); /* First, determine if a check for an existing declaration is needed; if yes, check that the entity exists, and that it is internal, otherwise call the skipped entity or default handler. */ if (! dtd->hasParamEntityRefs || dtd->standalone) { if (! entity) return XML_ERROR_UNDEFINED_ENTITY; else if (! entity->is_internal) return XML_ERROR_ENTITY_DECLARED_IN_PE; } else if (! entity) { if (parser->m_skippedEntityHandler) parser->m_skippedEntityHandler(parser->m_handlerArg, name, 0); else if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); break; } if (entity->open) return XML_ERROR_RECURSIVE_ENTITY_REF; if (entity->notation) return XML_ERROR_BINARY_ENTITY_REF; if (entity->textPtr) { enum XML_Error result; if (! parser->m_defaultExpandInternalEntities) { if (parser->m_skippedEntityHandler) parser->m_skippedEntityHandler(parser->m_handlerArg, entity->name, 0); else if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); break; } result = processInternalEntity(parser, entity, XML_FALSE); if (result != XML_ERROR_NONE) return result; } else if (parser->m_externalEntityRefHandler) { const XML_Char *context; entity->open = XML_TRUE; context = getContext(parser); entity->open = XML_FALSE; if (! context) return XML_ERROR_NO_MEMORY; if (! parser->m_externalEntityRefHandler( parser->m_externalEntityRefHandlerArg, context, entity->base, entity->systemId, entity->publicId)) return XML_ERROR_EXTERNAL_ENTITY_HANDLING; poolDiscard(&parser->m_tempPool); } else if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); break; } case XML_TOK_START_TAG_NO_ATTS: /* fall through */ case XML_TOK_START_TAG_WITH_ATTS: { TAG *tag; enum XML_Error result; XML_Char *toPtr; if (parser->m_freeTagList) { tag = parser->m_freeTagList; parser->m_freeTagList = parser->m_freeTagList->parent; } else { tag = (TAG *)MALLOC(parser, sizeof(TAG)); if (! tag) return XML_ERROR_NO_MEMORY; tag->buf = (char *)MALLOC(parser, INIT_TAG_BUF_SIZE); if (! tag->buf) { FREE(parser, tag); return XML_ERROR_NO_MEMORY; } tag->bufEnd = tag->buf + INIT_TAG_BUF_SIZE; } tag->bindings = NULL; tag->parent = parser->m_tagStack; parser->m_tagStack = tag; tag->name.localPart = NULL; tag->name.prefix = NULL; tag->rawName = s + enc->minBytesPerChar; tag->rawNameLength = XmlNameLength(enc, tag->rawName); ++parser->m_tagLevel; { const char *rawNameEnd = tag->rawName + tag->rawNameLength; const char *fromPtr = tag->rawName; toPtr = (XML_Char *)tag->buf; for (;;) { int bufSize; int convLen; const enum XML_Convert_Result convert_res = XmlConvert(enc, &fromPtr, rawNameEnd, (ICHAR **)&toPtr, (ICHAR *)tag->bufEnd - 1); convLen = (int)(toPtr - (XML_Char *)tag->buf); if ((fromPtr >= rawNameEnd) || (convert_res == XML_CONVERT_INPUT_INCOMPLETE)) { tag->name.strLen = convLen; break; } bufSize = (int)(tag->bufEnd - tag->buf) << 1; { char *temp = (char *)REALLOC(parser, tag->buf, bufSize); if (temp == NULL) return XML_ERROR_NO_MEMORY; tag->buf = temp; tag->bufEnd = temp + bufSize; toPtr = (XML_Char *)temp + convLen; } } } tag->name.str = (XML_Char *)tag->buf; *toPtr = XML_T('\0'); result = storeAtts(parser, enc, s, &(tag->name), &(tag->bindings)); if (result) return result; if (parser->m_startElementHandler) parser->m_startElementHandler(parser->m_handlerArg, tag->name.str, (const XML_Char **)parser->m_atts); else if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); poolClear(&parser->m_tempPool); break; } case XML_TOK_EMPTY_ELEMENT_NO_ATTS: /* fall through */ case XML_TOK_EMPTY_ELEMENT_WITH_ATTS: { const char *rawName = s + enc->minBytesPerChar; enum XML_Error result; BINDING *bindings = NULL; XML_Bool noElmHandlers = XML_TRUE; TAG_NAME name; name.str = poolStoreString(&parser->m_tempPool, enc, rawName, rawName + XmlNameLength(enc, rawName)); if (! name.str) return XML_ERROR_NO_MEMORY; poolFinish(&parser->m_tempPool); result = storeAtts(parser, enc, s, &name, &bindings); if (result != XML_ERROR_NONE) { freeBindings(parser, bindings); return result; } poolFinish(&parser->m_tempPool); if (parser->m_startElementHandler) { parser->m_startElementHandler(parser->m_handlerArg, name.str, (const XML_Char **)parser->m_atts); noElmHandlers = XML_FALSE; } if (parser->m_endElementHandler) { if (parser->m_startElementHandler) *eventPP = *eventEndPP; parser->m_endElementHandler(parser->m_handlerArg, name.str); noElmHandlers = XML_FALSE; } if (noElmHandlers && parser->m_defaultHandler) reportDefault(parser, enc, s, next); poolClear(&parser->m_tempPool); freeBindings(parser, bindings); } if ((parser->m_tagLevel == 0) && (parser->m_parsingStatus.parsing != XML_FINISHED)) { if (parser->m_parsingStatus.parsing == XML_SUSPENDED) parser->m_processor = epilogProcessor; else return epilogProcessor(parser, next, end, nextPtr); } break; case XML_TOK_END_TAG: if (parser->m_tagLevel == startTagLevel) return XML_ERROR_ASYNC_ENTITY; else { int len; const char *rawName; TAG *tag = parser->m_tagStack; parser->m_tagStack = tag->parent; tag->parent = parser->m_freeTagList; parser->m_freeTagList = tag; rawName = s + enc->minBytesPerChar * 2; len = XmlNameLength(enc, rawName); if (len != tag->rawNameLength || memcmp(tag->rawName, rawName, len) != 0) { *eventPP = rawName; return XML_ERROR_TAG_MISMATCH; } --parser->m_tagLevel; if (parser->m_endElementHandler) { const XML_Char *localPart; const XML_Char *prefix; XML_Char *uri; localPart = tag->name.localPart; if (parser->m_ns && localPart) { /* localPart and prefix may have been overwritten in tag->name.str, since this points to the binding->uri buffer which gets re-used; so we have to add them again */ uri = (XML_Char *)tag->name.str + tag->name.uriLen; /* don't need to check for space - already done in storeAtts() */ while (*localPart) *uri++ = *localPart++; prefix = (XML_Char *)tag->name.prefix; if (parser->m_ns_triplets && prefix) { *uri++ = parser->m_namespaceSeparator; while (*prefix) *uri++ = *prefix++; } *uri = XML_T('\0'); } parser->m_endElementHandler(parser->m_handlerArg, tag->name.str); } else if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); while (tag->bindings) { BINDING *b = tag->bindings; if (parser->m_endNamespaceDeclHandler) parser->m_endNamespaceDeclHandler(parser->m_handlerArg, b->prefix->name); tag->bindings = tag->bindings->nextTagBinding; b->nextTagBinding = parser->m_freeBindingList; parser->m_freeBindingList = b; b->prefix->binding = b->prevPrefixBinding; } if ((parser->m_tagLevel == 0) && (parser->m_parsingStatus.parsing != XML_FINISHED)) { if (parser->m_parsingStatus.parsing == XML_SUSPENDED) parser->m_processor = epilogProcessor; else return epilogProcessor(parser, next, end, nextPtr); } } break; case XML_TOK_CHAR_REF: { int n = XmlCharRefNumber(enc, s); if (n < 0) return XML_ERROR_BAD_CHAR_REF; if (parser->m_characterDataHandler) { XML_Char buf[XML_ENCODE_MAX]; parser->m_characterDataHandler(parser->m_handlerArg, buf, XmlEncode(n, (ICHAR *)buf)); } else if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); } break; case XML_TOK_XML_DECL: return XML_ERROR_MISPLACED_XML_PI; case XML_TOK_DATA_NEWLINE: if (parser->m_characterDataHandler) { XML_Char c = 0xA; parser->m_characterDataHandler(parser->m_handlerArg, &c, 1); } else if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); break; case XML_TOK_CDATA_SECT_OPEN: { enum XML_Error result; if (parser->m_startCdataSectionHandler) parser->m_startCdataSectionHandler(parser->m_handlerArg); /* BEGIN disabled code */ /* Suppose you doing a transformation on a document that involves changing only the character data. You set up a defaultHandler and a characterDataHandler. The defaultHandler simply copies characters through. The characterDataHandler does the transformation and writes the characters out escaping them as necessary. This case will fail to work if we leave out the following two lines (because & and < inside CDATA sections will be incorrectly escaped). However, now we have a start/endCdataSectionHandler, so it seems easier to let the user deal with this. */ else if (0 && parser->m_characterDataHandler) parser->m_characterDataHandler(parser->m_handlerArg, parser->m_dataBuf, 0); /* END disabled code */ else if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); result = doCdataSection(parser, enc, &next, end, nextPtr, haveMore); if (result != XML_ERROR_NONE) return result; else if (! next) { parser->m_processor = cdataSectionProcessor; return result; } } break; case XML_TOK_TRAILING_RSQB: if (haveMore) { *nextPtr = s; return XML_ERROR_NONE; } if (parser->m_characterDataHandler) { if (MUST_CONVERT(enc, s)) { ICHAR *dataPtr = (ICHAR *)parser->m_dataBuf; XmlConvert(enc, &s, end, &dataPtr, (ICHAR *)parser->m_dataBufEnd); parser->m_characterDataHandler( parser->m_handlerArg, parser->m_dataBuf, (int)(dataPtr - (ICHAR *)parser->m_dataBuf)); } else parser->m_characterDataHandler( parser->m_handlerArg, (XML_Char *)s, (int)((XML_Char *)end - (XML_Char *)s)); } else if (parser->m_defaultHandler) reportDefault(parser, enc, s, end); /* We are at the end of the final buffer, should we check for XML_SUSPENDED, XML_FINISHED? */ if (startTagLevel == 0) { *eventPP = end; return XML_ERROR_NO_ELEMENTS; } if (parser->m_tagLevel != startTagLevel) { *eventPP = end; return XML_ERROR_ASYNC_ENTITY; } *nextPtr = end; return XML_ERROR_NONE; case XML_TOK_DATA_CHARS: { XML_CharacterDataHandler charDataHandler = parser->m_characterDataHandler; if (charDataHandler) { if (MUST_CONVERT(enc, s)) { for (;;) { ICHAR *dataPtr = (ICHAR *)parser->m_dataBuf; const enum XML_Convert_Result convert_res = XmlConvert( enc, &s, next, &dataPtr, (ICHAR *)parser->m_dataBufEnd); *eventEndPP = s; charDataHandler(parser->m_handlerArg, parser->m_dataBuf, (int)(dataPtr - (ICHAR *)parser->m_dataBuf)); if ((convert_res == XML_CONVERT_COMPLETED) || (convert_res == XML_CONVERT_INPUT_INCOMPLETE)) break; *eventPP = s; } } else charDataHandler(parser->m_handlerArg, (XML_Char *)s, (int)((XML_Char *)next - (XML_Char *)s)); } else if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); } break; case XML_TOK_PI: if (! reportProcessingInstruction(parser, enc, s, next)) return XML_ERROR_NO_MEMORY; break; case XML_TOK_COMMENT: if (! reportComment(parser, enc, s, next)) return XML_ERROR_NO_MEMORY; break; default: /* All of the tokens produced by XmlContentTok() have their own * explicit cases, so this default is not strictly necessary. * However it is a useful safety net, so we retain the code and * simply exclude it from the coverage tests. * * LCOV_EXCL_START */ if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); break; /* LCOV_EXCL_STOP */ } *eventPP = s = next; switch (parser->m_parsingStatus.parsing) { case XML_SUSPENDED: *nextPtr = next; return XML_ERROR_NONE; case XML_FINISHED: return XML_ERROR_ABORTED; default:; } } /* not reached */ } /* This function does not call free() on the allocated memory, merely * moving it to the parser's m_freeBindingList where it can be freed or * reused as appropriate. */ static void freeBindings(XML_Parser parser, BINDING *bindings) { while (bindings) { BINDING *b = bindings; /* m_startNamespaceDeclHandler will have been called for this * binding in addBindings(), so call the end handler now. */ if (parser->m_endNamespaceDeclHandler) parser->m_endNamespaceDeclHandler(parser->m_handlerArg, b->prefix->name); bindings = bindings->nextTagBinding; b->nextTagBinding = parser->m_freeBindingList; parser->m_freeBindingList = b; b->prefix->binding = b->prevPrefixBinding; } } /* Precondition: all arguments must be non-NULL; Purpose: - normalize attributes - check attributes for well-formedness - generate namespace aware attribute names (URI, prefix) - build list of attributes for startElementHandler - default attributes - process namespace declarations (check and report them) - generate namespace aware element name (URI, prefix) */ static enum XML_Error storeAtts(XML_Parser parser, const ENCODING *enc, const char *attStr, TAG_NAME *tagNamePtr, BINDING **bindingsPtr) { DTD *const dtd = parser->m_dtd; /* save one level of indirection */ ELEMENT_TYPE *elementType; int nDefaultAtts; const XML_Char **appAtts; /* the attribute list for the application */ int attIndex = 0; int prefixLen; int i; int n; XML_Char *uri; int nPrefixes = 0; BINDING *binding; const XML_Char *localPart; /* lookup the element type name */ elementType = (ELEMENT_TYPE *)lookup(parser, &dtd->elementTypes, tagNamePtr->str, 0); if (! elementType) { const XML_Char *name = poolCopyString(&dtd->pool, tagNamePtr->str); if (! name) return XML_ERROR_NO_MEMORY; elementType = (ELEMENT_TYPE *)lookup(parser, &dtd->elementTypes, name, sizeof(ELEMENT_TYPE)); if (! elementType) return XML_ERROR_NO_MEMORY; if (parser->m_ns && ! setElementTypePrefix(parser, elementType)) return XML_ERROR_NO_MEMORY; } nDefaultAtts = elementType->nDefaultAtts; /* get the attributes from the tokenizer */ n = XmlGetAttributes(enc, attStr, parser->m_attsSize, parser->m_atts); if (n + nDefaultAtts > parser->m_attsSize) { int oldAttsSize = parser->m_attsSize; ATTRIBUTE *temp; #ifdef XML_ATTR_INFO XML_AttrInfo *temp2; #endif parser->m_attsSize = n + nDefaultAtts + INIT_ATTS_SIZE; temp = (ATTRIBUTE *)REALLOC(parser, (void *)parser->m_atts, parser->m_attsSize * sizeof(ATTRIBUTE)); if (temp == NULL) { parser->m_attsSize = oldAttsSize; return XML_ERROR_NO_MEMORY; } parser->m_atts = temp; #ifdef XML_ATTR_INFO temp2 = (XML_AttrInfo *)REALLOC(parser, (void *)parser->m_attInfo, parser->m_attsSize * sizeof(XML_AttrInfo)); if (temp2 == NULL) { parser->m_attsSize = oldAttsSize; return XML_ERROR_NO_MEMORY; } parser->m_attInfo = temp2; #endif if (n > oldAttsSize) XmlGetAttributes(enc, attStr, n, parser->m_atts); } appAtts = (const XML_Char **)parser->m_atts; for (i = 0; i < n; i++) { ATTRIBUTE *currAtt = &parser->m_atts[i]; #ifdef XML_ATTR_INFO XML_AttrInfo *currAttInfo = &parser->m_attInfo[i]; #endif /* add the name and value to the attribute list */ ATTRIBUTE_ID *attId = getAttributeId(parser, enc, currAtt->name, currAtt->name + XmlNameLength(enc, currAtt->name)); if (! attId) return XML_ERROR_NO_MEMORY; #ifdef XML_ATTR_INFO currAttInfo->nameStart = parser->m_parseEndByteIndex - (parser->m_parseEndPtr - currAtt->name); currAttInfo->nameEnd = currAttInfo->nameStart + XmlNameLength(enc, currAtt->name); currAttInfo->valueStart = parser->m_parseEndByteIndex - (parser->m_parseEndPtr - currAtt->valuePtr); currAttInfo->valueEnd = parser->m_parseEndByteIndex - (parser->m_parseEndPtr - currAtt->valueEnd); #endif /* Detect duplicate attributes by their QNames. This does not work when namespace processing is turned on and different prefixes for the same namespace are used. For this case we have a check further down. */ if ((attId->name)[-1]) { if (enc == parser->m_encoding) parser->m_eventPtr = parser->m_atts[i].name; return XML_ERROR_DUPLICATE_ATTRIBUTE; } (attId->name)[-1] = 1; appAtts[attIndex++] = attId->name; if (! parser->m_atts[i].normalized) { enum XML_Error result; XML_Bool isCdata = XML_TRUE; /* figure out whether declared as other than CDATA */ if (attId->maybeTokenized) { int j; for (j = 0; j < nDefaultAtts; j++) { if (attId == elementType->defaultAtts[j].id) { isCdata = elementType->defaultAtts[j].isCdata; break; } } } /* normalize the attribute value */ result = storeAttributeValue( parser, enc, isCdata, parser->m_atts[i].valuePtr, parser->m_atts[i].valueEnd, &parser->m_tempPool); if (result) return result; appAtts[attIndex] = poolStart(&parser->m_tempPool); poolFinish(&parser->m_tempPool); } else { /* the value did not need normalizing */ appAtts[attIndex] = poolStoreString(&parser->m_tempPool, enc, parser->m_atts[i].valuePtr, parser->m_atts[i].valueEnd); if (appAtts[attIndex] == 0) return XML_ERROR_NO_MEMORY; poolFinish(&parser->m_tempPool); } /* handle prefixed attribute names */ if (attId->prefix) { if (attId->xmlns) { /* deal with namespace declarations here */ enum XML_Error result = addBinding(parser, attId->prefix, attId, appAtts[attIndex], bindingsPtr); if (result) return result; --attIndex; } else { /* deal with other prefixed names later */ attIndex++; nPrefixes++; (attId->name)[-1] = 2; } } else attIndex++; } /* set-up for XML_GetSpecifiedAttributeCount and XML_GetIdAttributeIndex */ parser->m_nSpecifiedAtts = attIndex; if (elementType->idAtt && (elementType->idAtt->name)[-1]) { for (i = 0; i < attIndex; i += 2) if (appAtts[i] == elementType->idAtt->name) { parser->m_idAttIndex = i; break; } } else parser->m_idAttIndex = -1; /* do attribute defaulting */ for (i = 0; i < nDefaultAtts; i++) { const DEFAULT_ATTRIBUTE *da = elementType->defaultAtts + i; if (! (da->id->name)[-1] && da->value) { if (da->id->prefix) { if (da->id->xmlns) { enum XML_Error result = addBinding(parser, da->id->prefix, da->id, da->value, bindingsPtr); if (result) return result; } else { (da->id->name)[-1] = 2; nPrefixes++; appAtts[attIndex++] = da->id->name; appAtts[attIndex++] = da->value; } } else { (da->id->name)[-1] = 1; appAtts[attIndex++] = da->id->name; appAtts[attIndex++] = da->value; } } } appAtts[attIndex] = 0; /* expand prefixed attribute names, check for duplicates, and clear flags that say whether attributes were specified */ i = 0; if (nPrefixes) { int j; /* hash table index */ unsigned long version = parser->m_nsAttsVersion; int nsAttsSize = (int)1 << parser->m_nsAttsPower; unsigned char oldNsAttsPower = parser->m_nsAttsPower; /* size of hash table must be at least 2 * (# of prefixed attributes) */ if ((nPrefixes << 1) >> parser->m_nsAttsPower) { /* true for m_nsAttsPower = 0 */ NS_ATT *temp; /* hash table size must also be a power of 2 and >= 8 */ while (nPrefixes >> parser->m_nsAttsPower++) ; if (parser->m_nsAttsPower < 3) parser->m_nsAttsPower = 3; nsAttsSize = (int)1 << parser->m_nsAttsPower; temp = (NS_ATT *)REALLOC(parser, parser->m_nsAtts, nsAttsSize * sizeof(NS_ATT)); if (! temp) { /* Restore actual size of memory in m_nsAtts */ parser->m_nsAttsPower = oldNsAttsPower; return XML_ERROR_NO_MEMORY; } parser->m_nsAtts = temp; version = 0; /* force re-initialization of m_nsAtts hash table */ } /* using a version flag saves us from initializing m_nsAtts every time */ if (! version) { /* initialize version flags when version wraps around */ version = INIT_ATTS_VERSION; for (j = nsAttsSize; j != 0;) parser->m_nsAtts[--j].version = version; } parser->m_nsAttsVersion = --version; /* expand prefixed names and check for duplicates */ for (; i < attIndex; i += 2) { const XML_Char *s = appAtts[i]; if (s[-1] == 2) { /* prefixed */ ATTRIBUTE_ID *id; const BINDING *b; unsigned long uriHash; struct siphash sip_state; struct sipkey sip_key; copy_salt_to_sipkey(parser, &sip_key); sip24_init(&sip_state, &sip_key); ((XML_Char *)s)[-1] = 0; /* clear flag */ id = (ATTRIBUTE_ID *)lookup(parser, &dtd->attributeIds, s, 0); if (! id || ! id->prefix) { /* This code is walking through the appAtts array, dealing * with (in this case) a prefixed attribute name. To be in * the array, the attribute must have already been bound, so * has to have passed through the hash table lookup once * already. That implies that an entry for it already * exists, so the lookup above will return a pointer to * already allocated memory. There is no opportunaity for * the allocator to fail, so the condition above cannot be * fulfilled. * * Since it is difficult to be certain that the above * analysis is complete, we retain the test and merely * remove the code from coverage tests. */ return XML_ERROR_NO_MEMORY; /* LCOV_EXCL_LINE */ } b = id->prefix->binding; if (! b) return XML_ERROR_UNBOUND_PREFIX; for (j = 0; j < b->uriLen; j++) { const XML_Char c = b->uri[j]; if (! poolAppendChar(&parser->m_tempPool, c)) return XML_ERROR_NO_MEMORY; } sip24_update(&sip_state, b->uri, b->uriLen * sizeof(XML_Char)); while (*s++ != XML_T(ASCII_COLON)) ; sip24_update(&sip_state, s, keylen(s) * sizeof(XML_Char)); do { /* copies null terminator */ if (! poolAppendChar(&parser->m_tempPool, *s)) return XML_ERROR_NO_MEMORY; } while (*s++); uriHash = (unsigned long)sip24_final(&sip_state); { /* Check hash table for duplicate of expanded name (uriName). Derived from code in lookup(parser, HASH_TABLE *table, ...). */ unsigned char step = 0; unsigned long mask = nsAttsSize - 1; j = uriHash & mask; /* index into hash table */ while (parser->m_nsAtts[j].version == version) { /* for speed we compare stored hash values first */ if (uriHash == parser->m_nsAtts[j].hash) { const XML_Char *s1 = poolStart(&parser->m_tempPool); const XML_Char *s2 = parser->m_nsAtts[j].uriName; /* s1 is null terminated, but not s2 */ for (; *s1 == *s2 && *s1 != 0; s1++, s2++) ; if (*s1 == 0) return XML_ERROR_DUPLICATE_ATTRIBUTE; } if (! step) step = PROBE_STEP(uriHash, mask, parser->m_nsAttsPower); j < step ? (j += nsAttsSize - step) : (j -= step); } } if (parser->m_ns_triplets) { /* append namespace separator and prefix */ parser->m_tempPool.ptr[-1] = parser->m_namespaceSeparator; s = b->prefix->name; do { if (! poolAppendChar(&parser->m_tempPool, *s)) return XML_ERROR_NO_MEMORY; } while (*s++); } /* store expanded name in attribute list */ s = poolStart(&parser->m_tempPool); poolFinish(&parser->m_tempPool); appAtts[i] = s; /* fill empty slot with new version, uriName and hash value */ parser->m_nsAtts[j].version = version; parser->m_nsAtts[j].hash = uriHash; parser->m_nsAtts[j].uriName = s; if (! --nPrefixes) { i += 2; break; } } else /* not prefixed */ ((XML_Char *)s)[-1] = 0; /* clear flag */ } } /* clear flags for the remaining attributes */ for (; i < attIndex; i += 2) ((XML_Char *)(appAtts[i]))[-1] = 0; for (binding = *bindingsPtr; binding; binding = binding->nextTagBinding) binding->attId->name[-1] = 0; if (! parser->m_ns) return XML_ERROR_NONE; /* expand the element type name */ if (elementType->prefix) { binding = elementType->prefix->binding; if (! binding) return XML_ERROR_UNBOUND_PREFIX; localPart = tagNamePtr->str; while (*localPart++ != XML_T(ASCII_COLON)) ; } else if (dtd->defaultPrefix.binding) { binding = dtd->defaultPrefix.binding; localPart = tagNamePtr->str; } else return XML_ERROR_NONE; prefixLen = 0; if (parser->m_ns_triplets && binding->prefix->name) { for (; binding->prefix->name[prefixLen++];) ; /* prefixLen includes null terminator */ } tagNamePtr->localPart = localPart; tagNamePtr->uriLen = binding->uriLen; tagNamePtr->prefix = binding->prefix->name; tagNamePtr->prefixLen = prefixLen; for (i = 0; localPart[i++];) ; /* i includes null terminator */ n = i + binding->uriLen + prefixLen; if (n > binding->uriAlloc) { TAG *p; uri = (XML_Char *)MALLOC(parser, (n + EXPAND_SPARE) * sizeof(XML_Char)); if (! uri) return XML_ERROR_NO_MEMORY; binding->uriAlloc = n + EXPAND_SPARE; memcpy(uri, binding->uri, binding->uriLen * sizeof(XML_Char)); for (p = parser->m_tagStack; p; p = p->parent) if (p->name.str == binding->uri) p->name.str = uri; FREE(parser, binding->uri); binding->uri = uri; } /* if m_namespaceSeparator != '\0' then uri includes it already */ uri = binding->uri + binding->uriLen; memcpy(uri, localPart, i * sizeof(XML_Char)); /* we always have a namespace separator between localPart and prefix */ if (prefixLen) { uri += i - 1; *uri = parser->m_namespaceSeparator; /* replace null terminator */ memcpy(uri + 1, binding->prefix->name, prefixLen * sizeof(XML_Char)); } tagNamePtr->str = binding->uri; return XML_ERROR_NONE; } /* addBinding() overwrites the value of prefix->binding without checking. Therefore one must keep track of the old value outside of addBinding(). */ static enum XML_Error addBinding(XML_Parser parser, PREFIX *prefix, const ATTRIBUTE_ID *attId, const XML_Char *uri, BINDING **bindingsPtr) { static const XML_Char xmlNamespace[] = {ASCII_h, ASCII_t, ASCII_t, ASCII_p, ASCII_COLON, ASCII_SLASH, ASCII_SLASH, ASCII_w, ASCII_w, ASCII_w, ASCII_PERIOD, ASCII_w, ASCII_3, ASCII_PERIOD, ASCII_o, ASCII_r, ASCII_g, ASCII_SLASH, ASCII_X, ASCII_M, ASCII_L, ASCII_SLASH, ASCII_1, ASCII_9, ASCII_9, ASCII_8, ASCII_SLASH, ASCII_n, ASCII_a, ASCII_m, ASCII_e, ASCII_s, ASCII_p, ASCII_a, ASCII_c, ASCII_e, '\0'}; static const int xmlLen = (int)sizeof(xmlNamespace) / sizeof(XML_Char) - 1; static const XML_Char xmlnsNamespace[] = {ASCII_h, ASCII_t, ASCII_t, ASCII_p, ASCII_COLON, ASCII_SLASH, ASCII_SLASH, ASCII_w, ASCII_w, ASCII_w, ASCII_PERIOD, ASCII_w, ASCII_3, ASCII_PERIOD, ASCII_o, ASCII_r, ASCII_g, ASCII_SLASH, ASCII_2, ASCII_0, ASCII_0, ASCII_0, ASCII_SLASH, ASCII_x, ASCII_m, ASCII_l, ASCII_n, ASCII_s, ASCII_SLASH, '\0'}; static const int xmlnsLen = (int)sizeof(xmlnsNamespace) / sizeof(XML_Char) - 1; XML_Bool mustBeXML = XML_FALSE; XML_Bool isXML = XML_TRUE; XML_Bool isXMLNS = XML_TRUE; BINDING *b; int len; /* empty URI is only valid for default namespace per XML NS 1.0 (not 1.1) */ if (*uri == XML_T('\0') && prefix->name) return XML_ERROR_UNDECLARING_PREFIX; if (prefix->name && prefix->name[0] == XML_T(ASCII_x) && prefix->name[1] == XML_T(ASCII_m) && prefix->name[2] == XML_T(ASCII_l)) { /* Not allowed to bind xmlns */ if (prefix->name[3] == XML_T(ASCII_n) && prefix->name[4] == XML_T(ASCII_s) && prefix->name[5] == XML_T('\0')) return XML_ERROR_RESERVED_PREFIX_XMLNS; if (prefix->name[3] == XML_T('\0')) mustBeXML = XML_TRUE; } for (len = 0; uri[len]; len++) { if (isXML && (len > xmlLen || uri[len] != xmlNamespace[len])) isXML = XML_FALSE; if (! mustBeXML && isXMLNS && (len > xmlnsLen || uri[len] != xmlnsNamespace[len])) isXMLNS = XML_FALSE; } isXML = isXML && len == xmlLen; isXMLNS = isXMLNS && len == xmlnsLen; if (mustBeXML != isXML) return mustBeXML ? XML_ERROR_RESERVED_PREFIX_XML : XML_ERROR_RESERVED_NAMESPACE_URI; if (isXMLNS) return XML_ERROR_RESERVED_NAMESPACE_URI; if (parser->m_namespaceSeparator) len++; if (parser->m_freeBindingList) { b = parser->m_freeBindingList; if (len > b->uriAlloc) { XML_Char *temp = (XML_Char *)REALLOC( parser, b->uri, sizeof(XML_Char) * (len + EXPAND_SPARE)); if (temp == NULL) return XML_ERROR_NO_MEMORY; b->uri = temp; b->uriAlloc = len + EXPAND_SPARE; } parser->m_freeBindingList = b->nextTagBinding; } else { b = (BINDING *)MALLOC(parser, sizeof(BINDING)); if (! b) return XML_ERROR_NO_MEMORY; b->uri = (XML_Char *)MALLOC(parser, sizeof(XML_Char) * (len + EXPAND_SPARE)); if (! b->uri) { FREE(parser, b); return XML_ERROR_NO_MEMORY; } b->uriAlloc = len + EXPAND_SPARE; } b->uriLen = len; memcpy(b->uri, uri, len * sizeof(XML_Char)); if (parser->m_namespaceSeparator) b->uri[len - 1] = parser->m_namespaceSeparator; b->prefix = prefix; b->attId = attId; b->prevPrefixBinding = prefix->binding; /* NULL binding when default namespace undeclared */ if (*uri == XML_T('\0') && prefix == &parser->m_dtd->defaultPrefix) prefix->binding = NULL; else prefix->binding = b; b->nextTagBinding = *bindingsPtr; *bindingsPtr = b; /* if attId == NULL then we are not starting a namespace scope */ if (attId && parser->m_startNamespaceDeclHandler) parser->m_startNamespaceDeclHandler(parser->m_handlerArg, prefix->name, prefix->binding ? uri : 0); return XML_ERROR_NONE; } /* The idea here is to avoid using stack for each CDATA section when the whole file is parsed with one call. */ static enum XML_Error PTRCALL cdataSectionProcessor(XML_Parser parser, const char *start, const char *end, const char **endPtr) { enum XML_Error result = doCdataSection(parser, parser->m_encoding, &start, end, endPtr, (XML_Bool)! parser->m_parsingStatus.finalBuffer); if (result != XML_ERROR_NONE) return result; if (start) { if (parser->m_parentParser) { /* we are parsing an external entity */ parser->m_processor = externalEntityContentProcessor; return externalEntityContentProcessor(parser, start, end, endPtr); } else { parser->m_processor = contentProcessor; return contentProcessor(parser, start, end, endPtr); } } return result; } /* startPtr gets set to non-null if the section is closed, and to null if the section is not yet closed. */ static enum XML_Error doCdataSection(XML_Parser parser, const ENCODING *enc, const char **startPtr, const char *end, const char **nextPtr, XML_Bool haveMore) { const char *s = *startPtr; const char **eventPP; const char **eventEndPP; if (enc == parser->m_encoding) { eventPP = &parser->m_eventPtr; *eventPP = s; eventEndPP = &parser->m_eventEndPtr; } else { eventPP = &(parser->m_openInternalEntities->internalEventPtr); eventEndPP = &(parser->m_openInternalEntities->internalEventEndPtr); } *eventPP = s; *startPtr = NULL; for (;;) { const char *next; int tok = XmlCdataSectionTok(enc, s, end, &next); *eventEndPP = next; switch (tok) { case XML_TOK_CDATA_SECT_CLOSE: if (parser->m_endCdataSectionHandler) parser->m_endCdataSectionHandler(parser->m_handlerArg); /* BEGIN disabled code */ /* see comment under XML_TOK_CDATA_SECT_OPEN */ else if (0 && parser->m_characterDataHandler) parser->m_characterDataHandler(parser->m_handlerArg, parser->m_dataBuf, 0); /* END disabled code */ else if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); *startPtr = next; *nextPtr = next; if (parser->m_parsingStatus.parsing == XML_FINISHED) return XML_ERROR_ABORTED; else return XML_ERROR_NONE; case XML_TOK_DATA_NEWLINE: if (parser->m_characterDataHandler) { XML_Char c = 0xA; parser->m_characterDataHandler(parser->m_handlerArg, &c, 1); } else if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); break; case XML_TOK_DATA_CHARS: { XML_CharacterDataHandler charDataHandler = parser->m_characterDataHandler; if (charDataHandler) { if (MUST_CONVERT(enc, s)) { for (;;) { ICHAR *dataPtr = (ICHAR *)parser->m_dataBuf; const enum XML_Convert_Result convert_res = XmlConvert( enc, &s, next, &dataPtr, (ICHAR *)parser->m_dataBufEnd); *eventEndPP = next; charDataHandler(parser->m_handlerArg, parser->m_dataBuf, (int)(dataPtr - (ICHAR *)parser->m_dataBuf)); if ((convert_res == XML_CONVERT_COMPLETED) || (convert_res == XML_CONVERT_INPUT_INCOMPLETE)) break; *eventPP = s; } } else charDataHandler(parser->m_handlerArg, (XML_Char *)s, (int)((XML_Char *)next - (XML_Char *)s)); } else if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); } break; case XML_TOK_INVALID: *eventPP = next; return XML_ERROR_INVALID_TOKEN; case XML_TOK_PARTIAL_CHAR: if (haveMore) { *nextPtr = s; return XML_ERROR_NONE; } return XML_ERROR_PARTIAL_CHAR; case XML_TOK_PARTIAL: case XML_TOK_NONE: if (haveMore) { *nextPtr = s; return XML_ERROR_NONE; } return XML_ERROR_UNCLOSED_CDATA_SECTION; default: /* Every token returned by XmlCdataSectionTok() has its own * explicit case, so this default case will never be executed. * We retain it as a safety net and exclude it from the coverage * statistics. * * LCOV_EXCL_START */ *eventPP = next; return XML_ERROR_UNEXPECTED_STATE; /* LCOV_EXCL_STOP */ } *eventPP = s = next; switch (parser->m_parsingStatus.parsing) { case XML_SUSPENDED: *nextPtr = next; return XML_ERROR_NONE; case XML_FINISHED: return XML_ERROR_ABORTED; default:; } } /* not reached */ } #ifdef XML_DTD /* The idea here is to avoid using stack for each IGNORE section when the whole file is parsed with one call. */ static enum XML_Error PTRCALL ignoreSectionProcessor(XML_Parser parser, const char *start, const char *end, const char **endPtr) { enum XML_Error result = doIgnoreSection(parser, parser->m_encoding, &start, end, endPtr, (XML_Bool)! parser->m_parsingStatus.finalBuffer); if (result != XML_ERROR_NONE) return result; if (start) { parser->m_processor = prologProcessor; return prologProcessor(parser, start, end, endPtr); } return result; } /* startPtr gets set to non-null is the section is closed, and to null if the section is not yet closed. */ static enum XML_Error doIgnoreSection(XML_Parser parser, const ENCODING *enc, const char **startPtr, const char *end, const char **nextPtr, XML_Bool haveMore) { const char *next; int tok; const char *s = *startPtr; const char **eventPP; const char **eventEndPP; if (enc == parser->m_encoding) { eventPP = &parser->m_eventPtr; *eventPP = s; eventEndPP = &parser->m_eventEndPtr; } else { /* It's not entirely clear, but it seems the following two lines * of code cannot be executed. The only occasions on which 'enc' * is not 'encoding' are when this function is called * from the internal entity processing, and IGNORE sections are an * error in internal entities. * * Since it really isn't clear that this is true, we keep the code * and just remove it from our coverage tests. * * LCOV_EXCL_START */ eventPP = &(parser->m_openInternalEntities->internalEventPtr); eventEndPP = &(parser->m_openInternalEntities->internalEventEndPtr); /* LCOV_EXCL_STOP */ } *eventPP = s; *startPtr = NULL; tok = XmlIgnoreSectionTok(enc, s, end, &next); *eventEndPP = next; switch (tok) { case XML_TOK_IGNORE_SECT: if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); *startPtr = next; *nextPtr = next; if (parser->m_parsingStatus.parsing == XML_FINISHED) return XML_ERROR_ABORTED; else return XML_ERROR_NONE; case XML_TOK_INVALID: *eventPP = next; return XML_ERROR_INVALID_TOKEN; case XML_TOK_PARTIAL_CHAR: if (haveMore) { *nextPtr = s; return XML_ERROR_NONE; } return XML_ERROR_PARTIAL_CHAR; case XML_TOK_PARTIAL: case XML_TOK_NONE: if (haveMore) { *nextPtr = s; return XML_ERROR_NONE; } return XML_ERROR_SYNTAX; /* XML_ERROR_UNCLOSED_IGNORE_SECTION */ default: /* All of the tokens that XmlIgnoreSectionTok() returns have * explicit cases to handle them, so this default case is never * executed. We keep it as a safety net anyway, and remove it * from our test coverage statistics. * * LCOV_EXCL_START */ *eventPP = next; return XML_ERROR_UNEXPECTED_STATE; /* LCOV_EXCL_STOP */ } /* not reached */ } #endif /* XML_DTD */ static enum XML_Error initializeEncoding(XML_Parser parser) { const char *s; #ifdef XML_UNICODE char encodingBuf[128]; /* See comments abount `protoclEncodingName` in parserInit() */ if (! parser->m_protocolEncodingName) s = NULL; else { int i; for (i = 0; parser->m_protocolEncodingName[i]; i++) { if (i == sizeof(encodingBuf) - 1 || (parser->m_protocolEncodingName[i] & ~0x7f) != 0) { encodingBuf[0] = '\0'; break; } encodingBuf[i] = (char)parser->m_protocolEncodingName[i]; } encodingBuf[i] = '\0'; s = encodingBuf; } #else s = parser->m_protocolEncodingName; #endif if ((parser->m_ns ? XmlInitEncodingNS : XmlInitEncoding)( &parser->m_initEncoding, &parser->m_encoding, s)) return XML_ERROR_NONE; return handleUnknownEncoding(parser, parser->m_protocolEncodingName); } static enum XML_Error processXmlDecl(XML_Parser parser, int isGeneralTextEntity, const char *s, const char *next) { const char *encodingName = NULL; const XML_Char *storedEncName = NULL; const ENCODING *newEncoding = NULL; const char *version = NULL; const char *versionend; const XML_Char *storedversion = NULL; int standalone = -1; if (! (parser->m_ns ? XmlParseXmlDeclNS : XmlParseXmlDecl)( isGeneralTextEntity, parser->m_encoding, s, next, &parser->m_eventPtr, &version, &versionend, &encodingName, &newEncoding, &standalone)) { if (isGeneralTextEntity) return XML_ERROR_TEXT_DECL; else return XML_ERROR_XML_DECL; } if (! isGeneralTextEntity && standalone == 1) { parser->m_dtd->standalone = XML_TRUE; #ifdef XML_DTD if (parser->m_paramEntityParsing == XML_PARAM_ENTITY_PARSING_UNLESS_STANDALONE) parser->m_paramEntityParsing = XML_PARAM_ENTITY_PARSING_NEVER; #endif /* XML_DTD */ } if (parser->m_xmlDeclHandler) { if (encodingName != NULL) { storedEncName = poolStoreString( &parser->m_temp2Pool, parser->m_encoding, encodingName, encodingName + XmlNameLength(parser->m_encoding, encodingName)); if (! storedEncName) return XML_ERROR_NO_MEMORY; poolFinish(&parser->m_temp2Pool); } if (version) { storedversion = poolStoreString(&parser->m_temp2Pool, parser->m_encoding, version, versionend - parser->m_encoding->minBytesPerChar); if (! storedversion) return XML_ERROR_NO_MEMORY; } parser->m_xmlDeclHandler(parser->m_handlerArg, storedversion, storedEncName, standalone); } else if (parser->m_defaultHandler) reportDefault(parser, parser->m_encoding, s, next); if (parser->m_protocolEncodingName == NULL) { if (newEncoding) { /* Check that the specified encoding does not conflict with what * the parser has already deduced. Do we have the same number * of bytes in the smallest representation of a character? If * this is UTF-16, is it the same endianness? */ if (newEncoding->minBytesPerChar != parser->m_encoding->minBytesPerChar || (newEncoding->minBytesPerChar == 2 && newEncoding != parser->m_encoding)) { parser->m_eventPtr = encodingName; return XML_ERROR_INCORRECT_ENCODING; } parser->m_encoding = newEncoding; } else if (encodingName) { enum XML_Error result; if (! storedEncName) { storedEncName = poolStoreString( &parser->m_temp2Pool, parser->m_encoding, encodingName, encodingName + XmlNameLength(parser->m_encoding, encodingName)); if (! storedEncName) return XML_ERROR_NO_MEMORY; } result = handleUnknownEncoding(parser, storedEncName); poolClear(&parser->m_temp2Pool); if (result == XML_ERROR_UNKNOWN_ENCODING) parser->m_eventPtr = encodingName; return result; } } if (storedEncName || storedversion) poolClear(&parser->m_temp2Pool); return XML_ERROR_NONE; } static enum XML_Error handleUnknownEncoding(XML_Parser parser, const XML_Char *encodingName) { if (parser->m_unknownEncodingHandler) { XML_Encoding info; int i; for (i = 0; i < 256; i++) info.map[i] = -1; info.convert = NULL; info.data = NULL; info.release = NULL; if (parser->m_unknownEncodingHandler(parser->m_unknownEncodingHandlerData, encodingName, &info)) { ENCODING *enc; parser->m_unknownEncodingMem = MALLOC(parser, XmlSizeOfUnknownEncoding()); if (! parser->m_unknownEncodingMem) { if (info.release) info.release(info.data); return XML_ERROR_NO_MEMORY; } enc = (parser->m_ns ? XmlInitUnknownEncodingNS : XmlInitUnknownEncoding)( parser->m_unknownEncodingMem, info.map, info.convert, info.data); if (enc) { parser->m_unknownEncodingData = info.data; parser->m_unknownEncodingRelease = info.release; parser->m_encoding = enc; return XML_ERROR_NONE; } } if (info.release != NULL) info.release(info.data); } return XML_ERROR_UNKNOWN_ENCODING; } static enum XML_Error PTRCALL prologInitProcessor(XML_Parser parser, const char *s, const char *end, const char **nextPtr) { enum XML_Error result = initializeEncoding(parser); if (result != XML_ERROR_NONE) return result; parser->m_processor = prologProcessor; return prologProcessor(parser, s, end, nextPtr); } #ifdef XML_DTD static enum XML_Error PTRCALL externalParEntInitProcessor(XML_Parser parser, const char *s, const char *end, const char **nextPtr) { enum XML_Error result = initializeEncoding(parser); if (result != XML_ERROR_NONE) return result; /* we know now that XML_Parse(Buffer) has been called, so we consider the external parameter entity read */ parser->m_dtd->paramEntityRead = XML_TRUE; if (parser->m_prologState.inEntityValue) { parser->m_processor = entityValueInitProcessor; return entityValueInitProcessor(parser, s, end, nextPtr); } else { parser->m_processor = externalParEntProcessor; return externalParEntProcessor(parser, s, end, nextPtr); } } static enum XML_Error PTRCALL entityValueInitProcessor(XML_Parser parser, const char *s, const char *end, const char **nextPtr) { int tok; const char *start = s; const char *next = start; parser->m_eventPtr = start; for (;;) { tok = XmlPrologTok(parser->m_encoding, start, end, &next); parser->m_eventEndPtr = next; if (tok <= 0) { if (! parser->m_parsingStatus.finalBuffer && tok != XML_TOK_INVALID) { *nextPtr = s; return XML_ERROR_NONE; } switch (tok) { case XML_TOK_INVALID: return XML_ERROR_INVALID_TOKEN; case XML_TOK_PARTIAL: return XML_ERROR_UNCLOSED_TOKEN; case XML_TOK_PARTIAL_CHAR: return XML_ERROR_PARTIAL_CHAR; case XML_TOK_NONE: /* start == end */ default: break; } /* found end of entity value - can store it now */ return storeEntityValue(parser, parser->m_encoding, s, end); } else if (tok == XML_TOK_XML_DECL) { enum XML_Error result; result = processXmlDecl(parser, 0, start, next); if (result != XML_ERROR_NONE) return result; /* At this point, m_parsingStatus.parsing cannot be XML_SUSPENDED. For * that to happen, a parameter entity parsing handler must have attempted * to suspend the parser, which fails and raises an error. The parser can * be aborted, but can't be suspended. */ if (parser->m_parsingStatus.parsing == XML_FINISHED) return XML_ERROR_ABORTED; *nextPtr = next; /* stop scanning for text declaration - we found one */ parser->m_processor = entityValueProcessor; return entityValueProcessor(parser, next, end, nextPtr); } /* If we are at the end of the buffer, this would cause XmlPrologTok to return XML_TOK_NONE on the next call, which would then cause the function to exit with *nextPtr set to s - that is what we want for other tokens, but not for the BOM - we would rather like to skip it; then, when this routine is entered the next time, XmlPrologTok will return XML_TOK_INVALID, since the BOM is still in the buffer */ else if (tok == XML_TOK_BOM && next == end && ! parser->m_parsingStatus.finalBuffer) { *nextPtr = next; return XML_ERROR_NONE; } /* If we get this token, we have the start of what might be a normal tag, but not a declaration (i.e. it doesn't begin with "<!"). In a DTD context, that isn't legal. */ else if (tok == XML_TOK_INSTANCE_START) { *nextPtr = next; return XML_ERROR_SYNTAX; } start = next; parser->m_eventPtr = start; } } static enum XML_Error PTRCALL externalParEntProcessor(XML_Parser parser, const char *s, const char *end, const char **nextPtr) { const char *next = s; int tok; tok = XmlPrologTok(parser->m_encoding, s, end, &next); if (tok <= 0) { if (! parser->m_parsingStatus.finalBuffer && tok != XML_TOK_INVALID) { *nextPtr = s; return XML_ERROR_NONE; } switch (tok) { case XML_TOK_INVALID: return XML_ERROR_INVALID_TOKEN; case XML_TOK_PARTIAL: return XML_ERROR_UNCLOSED_TOKEN; case XML_TOK_PARTIAL_CHAR: return XML_ERROR_PARTIAL_CHAR; case XML_TOK_NONE: /* start == end */ default: break; } } /* This would cause the next stage, i.e. doProlog to be passed XML_TOK_BOM. However, when parsing an external subset, doProlog will not accept a BOM as valid, and report a syntax error, so we have to skip the BOM */ else if (tok == XML_TOK_BOM) { s = next; tok = XmlPrologTok(parser->m_encoding, s, end, &next); } parser->m_processor = prologProcessor; return doProlog(parser, parser->m_encoding, s, end, tok, next, nextPtr, (XML_Bool)! parser->m_parsingStatus.finalBuffer); } static enum XML_Error PTRCALL entityValueProcessor(XML_Parser parser, const char *s, const char *end, const char **nextPtr) { const char *start = s; const char *next = s; const ENCODING *enc = parser->m_encoding; int tok; for (;;) { tok = XmlPrologTok(enc, start, end, &next); if (tok <= 0) { if (! parser->m_parsingStatus.finalBuffer && tok != XML_TOK_INVALID) { *nextPtr = s; return XML_ERROR_NONE; } switch (tok) { case XML_TOK_INVALID: return XML_ERROR_INVALID_TOKEN; case XML_TOK_PARTIAL: return XML_ERROR_UNCLOSED_TOKEN; case XML_TOK_PARTIAL_CHAR: return XML_ERROR_PARTIAL_CHAR; case XML_TOK_NONE: /* start == end */ default: break; } /* found end of entity value - can store it now */ return storeEntityValue(parser, enc, s, end); } start = next; } } #endif /* XML_DTD */ static enum XML_Error PTRCALL prologProcessor(XML_Parser parser, const char *s, const char *end, const char **nextPtr) { const char *next = s; int tok = XmlPrologTok(parser->m_encoding, s, end, &next); return doProlog(parser, parser->m_encoding, s, end, tok, next, nextPtr, (XML_Bool)! parser->m_parsingStatus.finalBuffer); } static enum XML_Error doProlog(XML_Parser parser, const ENCODING *enc, const char *s, const char *end, int tok, const char *next, const char **nextPtr, XML_Bool haveMore) { #ifdef XML_DTD static const XML_Char externalSubsetName[] = {ASCII_HASH, '\0'}; #endif /* XML_DTD */ static const XML_Char atypeCDATA[] = {ASCII_C, ASCII_D, ASCII_A, ASCII_T, ASCII_A, '\0'}; static const XML_Char atypeID[] = {ASCII_I, ASCII_D, '\0'}; static const XML_Char atypeIDREF[] = {ASCII_I, ASCII_D, ASCII_R, ASCII_E, ASCII_F, '\0'}; static const XML_Char atypeIDREFS[] = {ASCII_I, ASCII_D, ASCII_R, ASCII_E, ASCII_F, ASCII_S, '\0'}; static const XML_Char atypeENTITY[] = {ASCII_E, ASCII_N, ASCII_T, ASCII_I, ASCII_T, ASCII_Y, '\0'}; static const XML_Char atypeENTITIES[] = {ASCII_E, ASCII_N, ASCII_T, ASCII_I, ASCII_T, ASCII_I, ASCII_E, ASCII_S, '\0'}; static const XML_Char atypeNMTOKEN[] = {ASCII_N, ASCII_M, ASCII_T, ASCII_O, ASCII_K, ASCII_E, ASCII_N, '\0'}; static const XML_Char atypeNMTOKENS[] = {ASCII_N, ASCII_M, ASCII_T, ASCII_O, ASCII_K, ASCII_E, ASCII_N, ASCII_S, '\0'}; static const XML_Char notationPrefix[] = {ASCII_N, ASCII_O, ASCII_T, ASCII_A, ASCII_T, ASCII_I, ASCII_O, ASCII_N, ASCII_LPAREN, '\0'}; static const XML_Char enumValueSep[] = {ASCII_PIPE, '\0'}; static const XML_Char enumValueStart[] = {ASCII_LPAREN, '\0'}; /* save one level of indirection */ DTD *const dtd = parser->m_dtd; const char **eventPP; const char **eventEndPP; enum XML_Content_Quant quant; if (enc == parser->m_encoding) { eventPP = &parser->m_eventPtr; eventEndPP = &parser->m_eventEndPtr; } else { eventPP = &(parser->m_openInternalEntities->internalEventPtr); eventEndPP = &(parser->m_openInternalEntities->internalEventEndPtr); } for (;;) { int role; XML_Bool handleDefault = XML_TRUE; *eventPP = s; *eventEndPP = next; if (tok <= 0) { if (haveMore && tok != XML_TOK_INVALID) { *nextPtr = s; return XML_ERROR_NONE; } switch (tok) { case XML_TOK_INVALID: *eventPP = next; return XML_ERROR_INVALID_TOKEN; case XML_TOK_PARTIAL: return XML_ERROR_UNCLOSED_TOKEN; case XML_TOK_PARTIAL_CHAR: return XML_ERROR_PARTIAL_CHAR; case -XML_TOK_PROLOG_S: tok = -tok; break; case XML_TOK_NONE: #ifdef XML_DTD /* for internal PE NOT referenced between declarations */ if (enc != parser->m_encoding && ! parser->m_openInternalEntities->betweenDecl) { *nextPtr = s; return XML_ERROR_NONE; } /* WFC: PE Between Declarations - must check that PE contains complete markup, not only for external PEs, but also for internal PEs if the reference occurs between declarations. */ if (parser->m_isParamEntity || enc != parser->m_encoding) { if (XmlTokenRole(&parser->m_prologState, XML_TOK_NONE, end, end, enc) == XML_ROLE_ERROR) return XML_ERROR_INCOMPLETE_PE; *nextPtr = s; return XML_ERROR_NONE; } #endif /* XML_DTD */ return XML_ERROR_NO_ELEMENTS; default: tok = -tok; next = end; break; } } role = XmlTokenRole(&parser->m_prologState, tok, s, next, enc); switch (role) { case XML_ROLE_XML_DECL: { enum XML_Error result = processXmlDecl(parser, 0, s, next); if (result != XML_ERROR_NONE) return result; enc = parser->m_encoding; handleDefault = XML_FALSE; } break; case XML_ROLE_DOCTYPE_NAME: if (parser->m_startDoctypeDeclHandler) { parser->m_doctypeName = poolStoreString(&parser->m_tempPool, enc, s, next); if (! parser->m_doctypeName) return XML_ERROR_NO_MEMORY; poolFinish(&parser->m_tempPool); parser->m_doctypePubid = NULL; handleDefault = XML_FALSE; } parser->m_doctypeSysid = NULL; /* always initialize to NULL */ break; case XML_ROLE_DOCTYPE_INTERNAL_SUBSET: if (parser->m_startDoctypeDeclHandler) { parser->m_startDoctypeDeclHandler( parser->m_handlerArg, parser->m_doctypeName, parser->m_doctypeSysid, parser->m_doctypePubid, 1); parser->m_doctypeName = NULL; poolClear(&parser->m_tempPool); handleDefault = XML_FALSE; } break; #ifdef XML_DTD case XML_ROLE_TEXT_DECL: { enum XML_Error result = processXmlDecl(parser, 1, s, next); if (result != XML_ERROR_NONE) return result; enc = parser->m_encoding; handleDefault = XML_FALSE; } break; #endif /* XML_DTD */ case XML_ROLE_DOCTYPE_PUBLIC_ID: #ifdef XML_DTD parser->m_useForeignDTD = XML_FALSE; parser->m_declEntity = (ENTITY *)lookup( parser, &dtd->paramEntities, externalSubsetName, sizeof(ENTITY)); if (! parser->m_declEntity) return XML_ERROR_NO_MEMORY; #endif /* XML_DTD */ dtd->hasParamEntityRefs = XML_TRUE; if (parser->m_startDoctypeDeclHandler) { XML_Char *pubId; if (! XmlIsPublicId(enc, s, next, eventPP)) return XML_ERROR_PUBLICID; pubId = poolStoreString(&parser->m_tempPool, enc, s + enc->minBytesPerChar, next - enc->minBytesPerChar); if (! pubId) return XML_ERROR_NO_MEMORY; normalizePublicId(pubId); poolFinish(&parser->m_tempPool); parser->m_doctypePubid = pubId; handleDefault = XML_FALSE; goto alreadyChecked; } /* fall through */ case XML_ROLE_ENTITY_PUBLIC_ID: if (! XmlIsPublicId(enc, s, next, eventPP)) return XML_ERROR_PUBLICID; alreadyChecked: if (dtd->keepProcessing && parser->m_declEntity) { XML_Char *tem = poolStoreString(&dtd->pool, enc, s + enc->minBytesPerChar, next - enc->minBytesPerChar); if (! tem) return XML_ERROR_NO_MEMORY; normalizePublicId(tem); parser->m_declEntity->publicId = tem; poolFinish(&dtd->pool); /* Don't suppress the default handler if we fell through from * the XML_ROLE_DOCTYPE_PUBLIC_ID case. */ if (parser->m_entityDeclHandler && role == XML_ROLE_ENTITY_PUBLIC_ID) handleDefault = XML_FALSE; } break; case XML_ROLE_DOCTYPE_CLOSE: if (parser->m_doctypeName) { parser->m_startDoctypeDeclHandler( parser->m_handlerArg, parser->m_doctypeName, parser->m_doctypeSysid, parser->m_doctypePubid, 0); poolClear(&parser->m_tempPool); handleDefault = XML_FALSE; } /* parser->m_doctypeSysid will be non-NULL in the case of a previous XML_ROLE_DOCTYPE_SYSTEM_ID, even if parser->m_startDoctypeDeclHandler was not set, indicating an external subset */ #ifdef XML_DTD if (parser->m_doctypeSysid || parser->m_useForeignDTD) { XML_Bool hadParamEntityRefs = dtd->hasParamEntityRefs; dtd->hasParamEntityRefs = XML_TRUE; if (parser->m_paramEntityParsing && parser->m_externalEntityRefHandler) { ENTITY *entity = (ENTITY *)lookup(parser, &dtd->paramEntities, externalSubsetName, sizeof(ENTITY)); if (! entity) { /* The external subset name "#" will have already been * inserted into the hash table at the start of the * external entity parsing, so no allocation will happen * and lookup() cannot fail. */ return XML_ERROR_NO_MEMORY; /* LCOV_EXCL_LINE */ } if (parser->m_useForeignDTD) entity->base = parser->m_curBase; dtd->paramEntityRead = XML_FALSE; if (! parser->m_externalEntityRefHandler( parser->m_externalEntityRefHandlerArg, 0, entity->base, entity->systemId, entity->publicId)) return XML_ERROR_EXTERNAL_ENTITY_HANDLING; if (dtd->paramEntityRead) { if (! dtd->standalone && parser->m_notStandaloneHandler && ! parser->m_notStandaloneHandler(parser->m_handlerArg)) return XML_ERROR_NOT_STANDALONE; } /* if we didn't read the foreign DTD then this means that there is no external subset and we must reset dtd->hasParamEntityRefs */ else if (! parser->m_doctypeSysid) dtd->hasParamEntityRefs = hadParamEntityRefs; /* end of DTD - no need to update dtd->keepProcessing */ } parser->m_useForeignDTD = XML_FALSE; } #endif /* XML_DTD */ if (parser->m_endDoctypeDeclHandler) { parser->m_endDoctypeDeclHandler(parser->m_handlerArg); handleDefault = XML_FALSE; } break; case XML_ROLE_INSTANCE_START: #ifdef XML_DTD /* if there is no DOCTYPE declaration then now is the last chance to read the foreign DTD */ if (parser->m_useForeignDTD) { XML_Bool hadParamEntityRefs = dtd->hasParamEntityRefs; dtd->hasParamEntityRefs = XML_TRUE; if (parser->m_paramEntityParsing && parser->m_externalEntityRefHandler) { ENTITY *entity = (ENTITY *)lookup(parser, &dtd->paramEntities, externalSubsetName, sizeof(ENTITY)); if (! entity) return XML_ERROR_NO_MEMORY; entity->base = parser->m_curBase; dtd->paramEntityRead = XML_FALSE; if (! parser->m_externalEntityRefHandler( parser->m_externalEntityRefHandlerArg, 0, entity->base, entity->systemId, entity->publicId)) return XML_ERROR_EXTERNAL_ENTITY_HANDLING; if (dtd->paramEntityRead) { if (! dtd->standalone && parser->m_notStandaloneHandler && ! parser->m_notStandaloneHandler(parser->m_handlerArg)) return XML_ERROR_NOT_STANDALONE; } /* if we didn't read the foreign DTD then this means that there is no external subset and we must reset dtd->hasParamEntityRefs */ else dtd->hasParamEntityRefs = hadParamEntityRefs; /* end of DTD - no need to update dtd->keepProcessing */ } } #endif /* XML_DTD */ parser->m_processor = contentProcessor; return contentProcessor(parser, s, end, nextPtr); case XML_ROLE_ATTLIST_ELEMENT_NAME: parser->m_declElementType = getElementType(parser, enc, s, next); if (! parser->m_declElementType) return XML_ERROR_NO_MEMORY; goto checkAttListDeclHandler; case XML_ROLE_ATTRIBUTE_NAME: parser->m_declAttributeId = getAttributeId(parser, enc, s, next); if (! parser->m_declAttributeId) return XML_ERROR_NO_MEMORY; parser->m_declAttributeIsCdata = XML_FALSE; parser->m_declAttributeType = NULL; parser->m_declAttributeIsId = XML_FALSE; goto checkAttListDeclHandler; case XML_ROLE_ATTRIBUTE_TYPE_CDATA: parser->m_declAttributeIsCdata = XML_TRUE; parser->m_declAttributeType = atypeCDATA; goto checkAttListDeclHandler; case XML_ROLE_ATTRIBUTE_TYPE_ID: parser->m_declAttributeIsId = XML_TRUE; parser->m_declAttributeType = atypeID; goto checkAttListDeclHandler; case XML_ROLE_ATTRIBUTE_TYPE_IDREF: parser->m_declAttributeType = atypeIDREF; goto checkAttListDeclHandler; case XML_ROLE_ATTRIBUTE_TYPE_IDREFS: parser->m_declAttributeType = atypeIDREFS; goto checkAttListDeclHandler; case XML_ROLE_ATTRIBUTE_TYPE_ENTITY: parser->m_declAttributeType = atypeENTITY; goto checkAttListDeclHandler; case XML_ROLE_ATTRIBUTE_TYPE_ENTITIES: parser->m_declAttributeType = atypeENTITIES; goto checkAttListDeclHandler; case XML_ROLE_ATTRIBUTE_TYPE_NMTOKEN: parser->m_declAttributeType = atypeNMTOKEN; goto checkAttListDeclHandler; case XML_ROLE_ATTRIBUTE_TYPE_NMTOKENS: parser->m_declAttributeType = atypeNMTOKENS; checkAttListDeclHandler: if (dtd->keepProcessing && parser->m_attlistDeclHandler) handleDefault = XML_FALSE; break; case XML_ROLE_ATTRIBUTE_ENUM_VALUE: case XML_ROLE_ATTRIBUTE_NOTATION_VALUE: if (dtd->keepProcessing && parser->m_attlistDeclHandler) { const XML_Char *prefix; if (parser->m_declAttributeType) { prefix = enumValueSep; } else { prefix = (role == XML_ROLE_ATTRIBUTE_NOTATION_VALUE ? notationPrefix : enumValueStart); } if (! poolAppendString(&parser->m_tempPool, prefix)) return XML_ERROR_NO_MEMORY; if (! poolAppend(&parser->m_tempPool, enc, s, next)) return XML_ERROR_NO_MEMORY; parser->m_declAttributeType = parser->m_tempPool.start; handleDefault = XML_FALSE; } break; case XML_ROLE_IMPLIED_ATTRIBUTE_VALUE: case XML_ROLE_REQUIRED_ATTRIBUTE_VALUE: if (dtd->keepProcessing) { if (! defineAttribute(parser->m_declElementType, parser->m_declAttributeId, parser->m_declAttributeIsCdata, parser->m_declAttributeIsId, 0, parser)) return XML_ERROR_NO_MEMORY; if (parser->m_attlistDeclHandler && parser->m_declAttributeType) { if (*parser->m_declAttributeType == XML_T(ASCII_LPAREN) || (*parser->m_declAttributeType == XML_T(ASCII_N) && parser->m_declAttributeType[1] == XML_T(ASCII_O))) { /* Enumerated or Notation type */ if (! poolAppendChar(&parser->m_tempPool, XML_T(ASCII_RPAREN)) || ! poolAppendChar(&parser->m_tempPool, XML_T('\0'))) return XML_ERROR_NO_MEMORY; parser->m_declAttributeType = parser->m_tempPool.start; poolFinish(&parser->m_tempPool); } *eventEndPP = s; parser->m_attlistDeclHandler( parser->m_handlerArg, parser->m_declElementType->name, parser->m_declAttributeId->name, parser->m_declAttributeType, 0, role == XML_ROLE_REQUIRED_ATTRIBUTE_VALUE); poolClear(&parser->m_tempPool); handleDefault = XML_FALSE; } } break; case XML_ROLE_DEFAULT_ATTRIBUTE_VALUE: case XML_ROLE_FIXED_ATTRIBUTE_VALUE: if (dtd->keepProcessing) { const XML_Char *attVal; enum XML_Error result = storeAttributeValue( parser, enc, parser->m_declAttributeIsCdata, s + enc->minBytesPerChar, next - enc->minBytesPerChar, &dtd->pool); if (result) return result; attVal = poolStart(&dtd->pool); poolFinish(&dtd->pool); /* ID attributes aren't allowed to have a default */ if (! defineAttribute( parser->m_declElementType, parser->m_declAttributeId, parser->m_declAttributeIsCdata, XML_FALSE, attVal, parser)) return XML_ERROR_NO_MEMORY; if (parser->m_attlistDeclHandler && parser->m_declAttributeType) { if (*parser->m_declAttributeType == XML_T(ASCII_LPAREN) || (*parser->m_declAttributeType == XML_T(ASCII_N) && parser->m_declAttributeType[1] == XML_T(ASCII_O))) { /* Enumerated or Notation type */ if (! poolAppendChar(&parser->m_tempPool, XML_T(ASCII_RPAREN)) || ! poolAppendChar(&parser->m_tempPool, XML_T('\0'))) return XML_ERROR_NO_MEMORY; parser->m_declAttributeType = parser->m_tempPool.start; poolFinish(&parser->m_tempPool); } *eventEndPP = s; parser->m_attlistDeclHandler( parser->m_handlerArg, parser->m_declElementType->name, parser->m_declAttributeId->name, parser->m_declAttributeType, attVal, role == XML_ROLE_FIXED_ATTRIBUTE_VALUE); poolClear(&parser->m_tempPool); handleDefault = XML_FALSE; } } break; case XML_ROLE_ENTITY_VALUE: if (dtd->keepProcessing) { enum XML_Error result = storeEntityValue( parser, enc, s + enc->minBytesPerChar, next - enc->minBytesPerChar); if (parser->m_declEntity) { parser->m_declEntity->textPtr = poolStart(&dtd->entityValuePool); parser->m_declEntity->textLen = (int)(poolLength(&dtd->entityValuePool)); poolFinish(&dtd->entityValuePool); if (parser->m_entityDeclHandler) { *eventEndPP = s; parser->m_entityDeclHandler( parser->m_handlerArg, parser->m_declEntity->name, parser->m_declEntity->is_param, parser->m_declEntity->textPtr, parser->m_declEntity->textLen, parser->m_curBase, 0, 0, 0); handleDefault = XML_FALSE; } } else poolDiscard(&dtd->entityValuePool); if (result != XML_ERROR_NONE) return result; } break; case XML_ROLE_DOCTYPE_SYSTEM_ID: #ifdef XML_DTD parser->m_useForeignDTD = XML_FALSE; #endif /* XML_DTD */ dtd->hasParamEntityRefs = XML_TRUE; if (parser->m_startDoctypeDeclHandler) { parser->m_doctypeSysid = poolStoreString(&parser->m_tempPool, enc, s + enc->minBytesPerChar, next - enc->minBytesPerChar); if (parser->m_doctypeSysid == NULL) return XML_ERROR_NO_MEMORY; poolFinish(&parser->m_tempPool); handleDefault = XML_FALSE; } #ifdef XML_DTD else /* use externalSubsetName to make parser->m_doctypeSysid non-NULL for the case where no parser->m_startDoctypeDeclHandler is set */ parser->m_doctypeSysid = externalSubsetName; #endif /* XML_DTD */ if (! dtd->standalone #ifdef XML_DTD && ! parser->m_paramEntityParsing #endif /* XML_DTD */ && parser->m_notStandaloneHandler && ! parser->m_notStandaloneHandler(parser->m_handlerArg)) return XML_ERROR_NOT_STANDALONE; #ifndef XML_DTD break; #else /* XML_DTD */ if (! parser->m_declEntity) { parser->m_declEntity = (ENTITY *)lookup( parser, &dtd->paramEntities, externalSubsetName, sizeof(ENTITY)); if (! parser->m_declEntity) return XML_ERROR_NO_MEMORY; parser->m_declEntity->publicId = NULL; } #endif /* XML_DTD */ /* fall through */ case XML_ROLE_ENTITY_SYSTEM_ID: if (dtd->keepProcessing && parser->m_declEntity) { parser->m_declEntity->systemId = poolStoreString(&dtd->pool, enc, s + enc->minBytesPerChar, next - enc->minBytesPerChar); if (! parser->m_declEntity->systemId) return XML_ERROR_NO_MEMORY; parser->m_declEntity->base = parser->m_curBase; poolFinish(&dtd->pool); /* Don't suppress the default handler if we fell through from * the XML_ROLE_DOCTYPE_SYSTEM_ID case. */ if (parser->m_entityDeclHandler && role == XML_ROLE_ENTITY_SYSTEM_ID) handleDefault = XML_FALSE; } break; case XML_ROLE_ENTITY_COMPLETE: if (dtd->keepProcessing && parser->m_declEntity && parser->m_entityDeclHandler) { *eventEndPP = s; parser->m_entityDeclHandler( parser->m_handlerArg, parser->m_declEntity->name, parser->m_declEntity->is_param, 0, 0, parser->m_declEntity->base, parser->m_declEntity->systemId, parser->m_declEntity->publicId, 0); handleDefault = XML_FALSE; } break; case XML_ROLE_ENTITY_NOTATION_NAME: if (dtd->keepProcessing && parser->m_declEntity) { parser->m_declEntity->notation = poolStoreString(&dtd->pool, enc, s, next); if (! parser->m_declEntity->notation) return XML_ERROR_NO_MEMORY; poolFinish(&dtd->pool); if (parser->m_unparsedEntityDeclHandler) { *eventEndPP = s; parser->m_unparsedEntityDeclHandler( parser->m_handlerArg, parser->m_declEntity->name, parser->m_declEntity->base, parser->m_declEntity->systemId, parser->m_declEntity->publicId, parser->m_declEntity->notation); handleDefault = XML_FALSE; } else if (parser->m_entityDeclHandler) { *eventEndPP = s; parser->m_entityDeclHandler( parser->m_handlerArg, parser->m_declEntity->name, 0, 0, 0, parser->m_declEntity->base, parser->m_declEntity->systemId, parser->m_declEntity->publicId, parser->m_declEntity->notation); handleDefault = XML_FALSE; } } break; case XML_ROLE_GENERAL_ENTITY_NAME: { if (XmlPredefinedEntityName(enc, s, next)) { parser->m_declEntity = NULL; break; } if (dtd->keepProcessing) { const XML_Char *name = poolStoreString(&dtd->pool, enc, s, next); if (! name) return XML_ERROR_NO_MEMORY; parser->m_declEntity = (ENTITY *)lookup(parser, &dtd->generalEntities, name, sizeof(ENTITY)); if (! parser->m_declEntity) return XML_ERROR_NO_MEMORY; if (parser->m_declEntity->name != name) { poolDiscard(&dtd->pool); parser->m_declEntity = NULL; } else { poolFinish(&dtd->pool); parser->m_declEntity->publicId = NULL; parser->m_declEntity->is_param = XML_FALSE; /* if we have a parent parser or are reading an internal parameter entity, then the entity declaration is not considered "internal" */ parser->m_declEntity->is_internal = ! (parser->m_parentParser || parser->m_openInternalEntities); if (parser->m_entityDeclHandler) handleDefault = XML_FALSE; } } else { poolDiscard(&dtd->pool); parser->m_declEntity = NULL; } } break; case XML_ROLE_PARAM_ENTITY_NAME: #ifdef XML_DTD if (dtd->keepProcessing) { const XML_Char *name = poolStoreString(&dtd->pool, enc, s, next); if (! name) return XML_ERROR_NO_MEMORY; parser->m_declEntity = (ENTITY *)lookup(parser, &dtd->paramEntities, name, sizeof(ENTITY)); if (! parser->m_declEntity) return XML_ERROR_NO_MEMORY; if (parser->m_declEntity->name != name) { poolDiscard(&dtd->pool); parser->m_declEntity = NULL; } else { poolFinish(&dtd->pool); parser->m_declEntity->publicId = NULL; parser->m_declEntity->is_param = XML_TRUE; /* if we have a parent parser or are reading an internal parameter entity, then the entity declaration is not considered "internal" */ parser->m_declEntity->is_internal = ! (parser->m_parentParser || parser->m_openInternalEntities); if (parser->m_entityDeclHandler) handleDefault = XML_FALSE; } } else { poolDiscard(&dtd->pool); parser->m_declEntity = NULL; } #else /* not XML_DTD */ parser->m_declEntity = NULL; #endif /* XML_DTD */ break; case XML_ROLE_NOTATION_NAME: parser->m_declNotationPublicId = NULL; parser->m_declNotationName = NULL; if (parser->m_notationDeclHandler) { parser->m_declNotationName = poolStoreString(&parser->m_tempPool, enc, s, next); if (! parser->m_declNotationName) return XML_ERROR_NO_MEMORY; poolFinish(&parser->m_tempPool); handleDefault = XML_FALSE; } break; case XML_ROLE_NOTATION_PUBLIC_ID: if (! XmlIsPublicId(enc, s, next, eventPP)) return XML_ERROR_PUBLICID; if (parser ->m_declNotationName) { /* means m_notationDeclHandler != NULL */ XML_Char *tem = poolStoreString(&parser->m_tempPool, enc, s + enc->minBytesPerChar, next - enc->minBytesPerChar); if (! tem) return XML_ERROR_NO_MEMORY; normalizePublicId(tem); parser->m_declNotationPublicId = tem; poolFinish(&parser->m_tempPool); handleDefault = XML_FALSE; } break; case XML_ROLE_NOTATION_SYSTEM_ID: if (parser->m_declNotationName && parser->m_notationDeclHandler) { const XML_Char *systemId = poolStoreString(&parser->m_tempPool, enc, s + enc->minBytesPerChar, next - enc->minBytesPerChar); if (! systemId) return XML_ERROR_NO_MEMORY; *eventEndPP = s; parser->m_notationDeclHandler( parser->m_handlerArg, parser->m_declNotationName, parser->m_curBase, systemId, parser->m_declNotationPublicId); handleDefault = XML_FALSE; } poolClear(&parser->m_tempPool); break; case XML_ROLE_NOTATION_NO_SYSTEM_ID: if (parser->m_declNotationPublicId && parser->m_notationDeclHandler) { *eventEndPP = s; parser->m_notationDeclHandler( parser->m_handlerArg, parser->m_declNotationName, parser->m_curBase, 0, parser->m_declNotationPublicId); handleDefault = XML_FALSE; } poolClear(&parser->m_tempPool); break; case XML_ROLE_ERROR: switch (tok) { case XML_TOK_PARAM_ENTITY_REF: /* PE references in internal subset are not allowed within declarations. */ return XML_ERROR_PARAM_ENTITY_REF; case XML_TOK_XML_DECL: return XML_ERROR_MISPLACED_XML_PI; default: return XML_ERROR_SYNTAX; } #ifdef XML_DTD case XML_ROLE_IGNORE_SECT: { enum XML_Error result; if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); handleDefault = XML_FALSE; result = doIgnoreSection(parser, enc, &next, end, nextPtr, haveMore); if (result != XML_ERROR_NONE) return result; else if (! next) { parser->m_processor = ignoreSectionProcessor; return result; } } break; #endif /* XML_DTD */ case XML_ROLE_GROUP_OPEN: if (parser->m_prologState.level >= parser->m_groupSize) { if (parser->m_groupSize) { { char *const new_connector = (char *)REALLOC( parser, parser->m_groupConnector, parser->m_groupSize *= 2); if (new_connector == NULL) { parser->m_groupSize /= 2; return XML_ERROR_NO_MEMORY; } parser->m_groupConnector = new_connector; } if (dtd->scaffIndex) { int *const new_scaff_index = (int *)REALLOC( parser, dtd->scaffIndex, parser->m_groupSize * sizeof(int)); if (new_scaff_index == NULL) return XML_ERROR_NO_MEMORY; dtd->scaffIndex = new_scaff_index; } } else { parser->m_groupConnector = (char *)MALLOC(parser, parser->m_groupSize = 32); if (! parser->m_groupConnector) { parser->m_groupSize = 0; return XML_ERROR_NO_MEMORY; } } } parser->m_groupConnector[parser->m_prologState.level] = 0; if (dtd->in_eldecl) { int myindex = nextScaffoldPart(parser); if (myindex < 0) return XML_ERROR_NO_MEMORY; assert(dtd->scaffIndex != NULL); dtd->scaffIndex[dtd->scaffLevel] = myindex; dtd->scaffLevel++; dtd->scaffold[myindex].type = XML_CTYPE_SEQ; if (parser->m_elementDeclHandler) handleDefault = XML_FALSE; } break; case XML_ROLE_GROUP_SEQUENCE: if (parser->m_groupConnector[parser->m_prologState.level] == ASCII_PIPE) return XML_ERROR_SYNTAX; parser->m_groupConnector[parser->m_prologState.level] = ASCII_COMMA; if (dtd->in_eldecl && parser->m_elementDeclHandler) handleDefault = XML_FALSE; break; case XML_ROLE_GROUP_CHOICE: if (parser->m_groupConnector[parser->m_prologState.level] == ASCII_COMMA) return XML_ERROR_SYNTAX; if (dtd->in_eldecl && ! parser->m_groupConnector[parser->m_prologState.level] && (dtd->scaffold[dtd->scaffIndex[dtd->scaffLevel - 1]].type != XML_CTYPE_MIXED)) { dtd->scaffold[dtd->scaffIndex[dtd->scaffLevel - 1]].type = XML_CTYPE_CHOICE; if (parser->m_elementDeclHandler) handleDefault = XML_FALSE; } parser->m_groupConnector[parser->m_prologState.level] = ASCII_PIPE; break; case XML_ROLE_PARAM_ENTITY_REF: #ifdef XML_DTD case XML_ROLE_INNER_PARAM_ENTITY_REF: dtd->hasParamEntityRefs = XML_TRUE; if (! parser->m_paramEntityParsing) dtd->keepProcessing = dtd->standalone; else { const XML_Char *name; ENTITY *entity; name = poolStoreString(&dtd->pool, enc, s + enc->minBytesPerChar, next - enc->minBytesPerChar); if (! name) return XML_ERROR_NO_MEMORY; entity = (ENTITY *)lookup(parser, &dtd->paramEntities, name, 0); poolDiscard(&dtd->pool); /* first, determine if a check for an existing declaration is needed; if yes, check that the entity exists, and that it is internal, otherwise call the skipped entity handler */ if (parser->m_prologState.documentEntity && (dtd->standalone ? ! parser->m_openInternalEntities : ! dtd->hasParamEntityRefs)) { if (! entity) return XML_ERROR_UNDEFINED_ENTITY; else if (! entity->is_internal) { /* It's hard to exhaustively search the code to be sure, * but there doesn't seem to be a way of executing the * following line. There are two cases: * * If 'standalone' is false, the DTD must have no * parameter entities or we wouldn't have passed the outer * 'if' statement. That measn the only entity in the hash * table is the external subset name "#" which cannot be * given as a parameter entity name in XML syntax, so the * lookup must have returned NULL and we don't even reach * the test for an internal entity. * * If 'standalone' is true, it does not seem to be * possible to create entities taking this code path that * are not internal entities, so fail the test above. * * Because this analysis is very uncertain, the code is * being left in place and merely removed from the * coverage test statistics. */ return XML_ERROR_ENTITY_DECLARED_IN_PE; /* LCOV_EXCL_LINE */ } } else if (! entity) { dtd->keepProcessing = dtd->standalone; /* cannot report skipped entities in declarations */ if ((role == XML_ROLE_PARAM_ENTITY_REF) && parser->m_skippedEntityHandler) { parser->m_skippedEntityHandler(parser->m_handlerArg, name, 1); handleDefault = XML_FALSE; } break; } if (entity->open) return XML_ERROR_RECURSIVE_ENTITY_REF; if (entity->textPtr) { enum XML_Error result; XML_Bool betweenDecl = (role == XML_ROLE_PARAM_ENTITY_REF ? XML_TRUE : XML_FALSE); result = processInternalEntity(parser, entity, betweenDecl); if (result != XML_ERROR_NONE) return result; handleDefault = XML_FALSE; break; } if (parser->m_externalEntityRefHandler) { dtd->paramEntityRead = XML_FALSE; entity->open = XML_TRUE; if (! parser->m_externalEntityRefHandler( parser->m_externalEntityRefHandlerArg, 0, entity->base, entity->systemId, entity->publicId)) { entity->open = XML_FALSE; return XML_ERROR_EXTERNAL_ENTITY_HANDLING; } entity->open = XML_FALSE; handleDefault = XML_FALSE; if (! dtd->paramEntityRead) { dtd->keepProcessing = dtd->standalone; break; } } else { dtd->keepProcessing = dtd->standalone; break; } } #endif /* XML_DTD */ if (! dtd->standalone && parser->m_notStandaloneHandler && ! parser->m_notStandaloneHandler(parser->m_handlerArg)) return XML_ERROR_NOT_STANDALONE; break; /* Element declaration stuff */ case XML_ROLE_ELEMENT_NAME: if (parser->m_elementDeclHandler) { parser->m_declElementType = getElementType(parser, enc, s, next); if (! parser->m_declElementType) return XML_ERROR_NO_MEMORY; dtd->scaffLevel = 0; dtd->scaffCount = 0; dtd->in_eldecl = XML_TRUE; handleDefault = XML_FALSE; } break; case XML_ROLE_CONTENT_ANY: case XML_ROLE_CONTENT_EMPTY: if (dtd->in_eldecl) { if (parser->m_elementDeclHandler) { XML_Content *content = (XML_Content *)MALLOC(parser, sizeof(XML_Content)); if (! content) return XML_ERROR_NO_MEMORY; content->quant = XML_CQUANT_NONE; content->name = NULL; content->numchildren = 0; content->children = NULL; content->type = ((role == XML_ROLE_CONTENT_ANY) ? XML_CTYPE_ANY : XML_CTYPE_EMPTY); *eventEndPP = s; parser->m_elementDeclHandler( parser->m_handlerArg, parser->m_declElementType->name, content); handleDefault = XML_FALSE; } dtd->in_eldecl = XML_FALSE; } break; case XML_ROLE_CONTENT_PCDATA: if (dtd->in_eldecl) { dtd->scaffold[dtd->scaffIndex[dtd->scaffLevel - 1]].type = XML_CTYPE_MIXED; if (parser->m_elementDeclHandler) handleDefault = XML_FALSE; } break; case XML_ROLE_CONTENT_ELEMENT: quant = XML_CQUANT_NONE; goto elementContent; case XML_ROLE_CONTENT_ELEMENT_OPT: quant = XML_CQUANT_OPT; goto elementContent; case XML_ROLE_CONTENT_ELEMENT_REP: quant = XML_CQUANT_REP; goto elementContent; case XML_ROLE_CONTENT_ELEMENT_PLUS: quant = XML_CQUANT_PLUS; elementContent: if (dtd->in_eldecl) { ELEMENT_TYPE *el; const XML_Char *name; int nameLen; const char *nxt = (quant == XML_CQUANT_NONE ? next : next - enc->minBytesPerChar); int myindex = nextScaffoldPart(parser); if (myindex < 0) return XML_ERROR_NO_MEMORY; dtd->scaffold[myindex].type = XML_CTYPE_NAME; dtd->scaffold[myindex].quant = quant; el = getElementType(parser, enc, s, nxt); if (! el) return XML_ERROR_NO_MEMORY; name = el->name; dtd->scaffold[myindex].name = name; nameLen = 0; for (; name[nameLen++];) ; dtd->contentStringLen += nameLen; if (parser->m_elementDeclHandler) handleDefault = XML_FALSE; } break; case XML_ROLE_GROUP_CLOSE: quant = XML_CQUANT_NONE; goto closeGroup; case XML_ROLE_GROUP_CLOSE_OPT: quant = XML_CQUANT_OPT; goto closeGroup; case XML_ROLE_GROUP_CLOSE_REP: quant = XML_CQUANT_REP; goto closeGroup; case XML_ROLE_GROUP_CLOSE_PLUS: quant = XML_CQUANT_PLUS; closeGroup: if (dtd->in_eldecl) { if (parser->m_elementDeclHandler) handleDefault = XML_FALSE; dtd->scaffLevel--; dtd->scaffold[dtd->scaffIndex[dtd->scaffLevel]].quant = quant; if (dtd->scaffLevel == 0) { if (! handleDefault) { XML_Content *model = build_model(parser); if (! model) return XML_ERROR_NO_MEMORY; *eventEndPP = s; parser->m_elementDeclHandler( parser->m_handlerArg, parser->m_declElementType->name, model); } dtd->in_eldecl = XML_FALSE; dtd->contentStringLen = 0; } } break; /* End element declaration stuff */ case XML_ROLE_PI: if (! reportProcessingInstruction(parser, enc, s, next)) return XML_ERROR_NO_MEMORY; handleDefault = XML_FALSE; break; case XML_ROLE_COMMENT: if (! reportComment(parser, enc, s, next)) return XML_ERROR_NO_MEMORY; handleDefault = XML_FALSE; break; case XML_ROLE_NONE: switch (tok) { case XML_TOK_BOM: handleDefault = XML_FALSE; break; } break; case XML_ROLE_DOCTYPE_NONE: if (parser->m_startDoctypeDeclHandler) handleDefault = XML_FALSE; break; case XML_ROLE_ENTITY_NONE: if (dtd->keepProcessing && parser->m_entityDeclHandler) handleDefault = XML_FALSE; break; case XML_ROLE_NOTATION_NONE: if (parser->m_notationDeclHandler) handleDefault = XML_FALSE; break; case XML_ROLE_ATTLIST_NONE: if (dtd->keepProcessing && parser->m_attlistDeclHandler) handleDefault = XML_FALSE; break; case XML_ROLE_ELEMENT_NONE: if (parser->m_elementDeclHandler) handleDefault = XML_FALSE; break; } /* end of big switch */ if (handleDefault && parser->m_defaultHandler) reportDefault(parser, enc, s, next); switch (parser->m_parsingStatus.parsing) { case XML_SUSPENDED: *nextPtr = next; return XML_ERROR_NONE; case XML_FINISHED: return XML_ERROR_ABORTED; default: s = next; tok = XmlPrologTok(enc, s, end, &next); } } /* not reached */ } static enum XML_Error PTRCALL epilogProcessor(XML_Parser parser, const char *s, const char *end, const char **nextPtr) { parser->m_processor = epilogProcessor; parser->m_eventPtr = s; for (;;) { const char *next = NULL; int tok = XmlPrologTok(parser->m_encoding, s, end, &next); parser->m_eventEndPtr = next; switch (tok) { /* report partial linebreak - it might be the last token */ case -XML_TOK_PROLOG_S: if (parser->m_defaultHandler) { reportDefault(parser, parser->m_encoding, s, next); if (parser->m_parsingStatus.parsing == XML_FINISHED) return XML_ERROR_ABORTED; } *nextPtr = next; return XML_ERROR_NONE; case XML_TOK_NONE: *nextPtr = s; return XML_ERROR_NONE; case XML_TOK_PROLOG_S: if (parser->m_defaultHandler) reportDefault(parser, parser->m_encoding, s, next); break; case XML_TOK_PI: if (! reportProcessingInstruction(parser, parser->m_encoding, s, next)) return XML_ERROR_NO_MEMORY; break; case XML_TOK_COMMENT: if (! reportComment(parser, parser->m_encoding, s, next)) return XML_ERROR_NO_MEMORY; break; case XML_TOK_INVALID: parser->m_eventPtr = next; return XML_ERROR_INVALID_TOKEN; case XML_TOK_PARTIAL: if (! parser->m_parsingStatus.finalBuffer) { *nextPtr = s; return XML_ERROR_NONE; } return XML_ERROR_UNCLOSED_TOKEN; case XML_TOK_PARTIAL_CHAR: if (! parser->m_parsingStatus.finalBuffer) { *nextPtr = s; return XML_ERROR_NONE; } return XML_ERROR_PARTIAL_CHAR; default: return XML_ERROR_JUNK_AFTER_DOC_ELEMENT; } parser->m_eventPtr = s = next; switch (parser->m_parsingStatus.parsing) { case XML_SUSPENDED: *nextPtr = next; return XML_ERROR_NONE; case XML_FINISHED: return XML_ERROR_ABORTED; default:; } } } static enum XML_Error processInternalEntity(XML_Parser parser, ENTITY *entity, XML_Bool betweenDecl) { const char *textStart, *textEnd; const char *next; enum XML_Error result; OPEN_INTERNAL_ENTITY *openEntity; if (parser->m_freeInternalEntities) { openEntity = parser->m_freeInternalEntities; parser->m_freeInternalEntities = openEntity->next; } else { openEntity = (OPEN_INTERNAL_ENTITY *)MALLOC(parser, sizeof(OPEN_INTERNAL_ENTITY)); if (! openEntity) return XML_ERROR_NO_MEMORY; } entity->open = XML_TRUE; entity->processed = 0; openEntity->next = parser->m_openInternalEntities; parser->m_openInternalEntities = openEntity; openEntity->entity = entity; openEntity->startTagLevel = parser->m_tagLevel; openEntity->betweenDecl = betweenDecl; openEntity->internalEventPtr = NULL; openEntity->internalEventEndPtr = NULL; textStart = (char *)entity->textPtr; textEnd = (char *)(entity->textPtr + entity->textLen); /* Set a safe default value in case 'next' does not get set */ next = textStart; #ifdef XML_DTD if (entity->is_param) { int tok = XmlPrologTok(parser->m_internalEncoding, textStart, textEnd, &next); result = doProlog(parser, parser->m_internalEncoding, textStart, textEnd, tok, next, &next, XML_FALSE); } else #endif /* XML_DTD */ result = doContent(parser, parser->m_tagLevel, parser->m_internalEncoding, textStart, textEnd, &next, XML_FALSE); if (result == XML_ERROR_NONE) { if (textEnd != next && parser->m_parsingStatus.parsing == XML_SUSPENDED) { entity->processed = (int)(next - textStart); parser->m_processor = internalEntityProcessor; } else { entity->open = XML_FALSE; parser->m_openInternalEntities = openEntity->next; /* put openEntity back in list of free instances */ openEntity->next = parser->m_freeInternalEntities; parser->m_freeInternalEntities = openEntity; } } return result; } static enum XML_Error PTRCALL internalEntityProcessor(XML_Parser parser, const char *s, const char *end, const char **nextPtr) { ENTITY *entity; const char *textStart, *textEnd; const char *next; enum XML_Error result; OPEN_INTERNAL_ENTITY *openEntity = parser->m_openInternalEntities; if (! openEntity) return XML_ERROR_UNEXPECTED_STATE; entity = openEntity->entity; textStart = ((char *)entity->textPtr) + entity->processed; textEnd = (char *)(entity->textPtr + entity->textLen); /* Set a safe default value in case 'next' does not get set */ next = textStart; #ifdef XML_DTD if (entity->is_param) { int tok = XmlPrologTok(parser->m_internalEncoding, textStart, textEnd, &next); result = doProlog(parser, parser->m_internalEncoding, textStart, textEnd, tok, next, &next, XML_FALSE); } else #endif /* XML_DTD */ result = doContent(parser, openEntity->startTagLevel, parser->m_internalEncoding, textStart, textEnd, &next, XML_FALSE); if (result != XML_ERROR_NONE) return result; else if (textEnd != next && parser->m_parsingStatus.parsing == XML_SUSPENDED) { entity->processed = (int)(next - (char *)entity->textPtr); return result; } else { entity->open = XML_FALSE; parser->m_openInternalEntities = openEntity->next; /* put openEntity back in list of free instances */ openEntity->next = parser->m_freeInternalEntities; parser->m_freeInternalEntities = openEntity; } #ifdef XML_DTD if (entity->is_param) { int tok; parser->m_processor = prologProcessor; tok = XmlPrologTok(parser->m_encoding, s, end, &next); return doProlog(parser, parser->m_encoding, s, end, tok, next, nextPtr, (XML_Bool)! parser->m_parsingStatus.finalBuffer); } else #endif /* XML_DTD */ { parser->m_processor = contentProcessor; /* see externalEntityContentProcessor vs contentProcessor */ return doContent(parser, parser->m_parentParser ? 1 : 0, parser->m_encoding, s, end, nextPtr, (XML_Bool)! parser->m_parsingStatus.finalBuffer); } } static enum XML_Error PTRCALL errorProcessor(XML_Parser parser, const char *s, const char *end, const char **nextPtr) { UNUSED_P(s); UNUSED_P(end); UNUSED_P(nextPtr); return parser->m_errorCode; } static enum XML_Error storeAttributeValue(XML_Parser parser, const ENCODING *enc, XML_Bool isCdata, const char *ptr, const char *end, STRING_POOL *pool) { enum XML_Error result = appendAttributeValue(parser, enc, isCdata, ptr, end, pool); if (result) return result; if (! isCdata && poolLength(pool) && poolLastChar(pool) == 0x20) poolChop(pool); if (! poolAppendChar(pool, XML_T('\0'))) return XML_ERROR_NO_MEMORY; return XML_ERROR_NONE; } static enum XML_Error appendAttributeValue(XML_Parser parser, const ENCODING *enc, XML_Bool isCdata, const char *ptr, const char *end, STRING_POOL *pool) { DTD *const dtd = parser->m_dtd; /* save one level of indirection */ for (;;) { const char *next; int tok = XmlAttributeValueTok(enc, ptr, end, &next); switch (tok) { case XML_TOK_NONE: return XML_ERROR_NONE; case XML_TOK_INVALID: if (enc == parser->m_encoding) parser->m_eventPtr = next; return XML_ERROR_INVALID_TOKEN; case XML_TOK_PARTIAL: if (enc == parser->m_encoding) parser->m_eventPtr = ptr; return XML_ERROR_INVALID_TOKEN; case XML_TOK_CHAR_REF: { XML_Char buf[XML_ENCODE_MAX]; int i; int n = XmlCharRefNumber(enc, ptr); if (n < 0) { if (enc == parser->m_encoding) parser->m_eventPtr = ptr; return XML_ERROR_BAD_CHAR_REF; } if (! isCdata && n == 0x20 /* space */ && (poolLength(pool) == 0 || poolLastChar(pool) == 0x20)) break; n = XmlEncode(n, (ICHAR *)buf); /* The XmlEncode() functions can never return 0 here. That * error return happens if the code point passed in is either * negative or greater than or equal to 0x110000. The * XmlCharRefNumber() functions will all return a number * strictly less than 0x110000 or a negative value if an error * occurred. The negative value is intercepted above, so * XmlEncode() is never passed a value it might return an * error for. */ for (i = 0; i < n; i++) { if (! poolAppendChar(pool, buf[i])) return XML_ERROR_NO_MEMORY; } } break; case XML_TOK_DATA_CHARS: if (! poolAppend(pool, enc, ptr, next)) return XML_ERROR_NO_MEMORY; break; case XML_TOK_TRAILING_CR: next = ptr + enc->minBytesPerChar; /* fall through */ case XML_TOK_ATTRIBUTE_VALUE_S: case XML_TOK_DATA_NEWLINE: if (! isCdata && (poolLength(pool) == 0 || poolLastChar(pool) == 0x20)) break; if (! poolAppendChar(pool, 0x20)) return XML_ERROR_NO_MEMORY; break; case XML_TOK_ENTITY_REF: { const XML_Char *name; ENTITY *entity; char checkEntityDecl; XML_Char ch = (XML_Char)XmlPredefinedEntityName( enc, ptr + enc->minBytesPerChar, next - enc->minBytesPerChar); if (ch) { if (! poolAppendChar(pool, ch)) return XML_ERROR_NO_MEMORY; break; } name = poolStoreString(&parser->m_temp2Pool, enc, ptr + enc->minBytesPerChar, next - enc->minBytesPerChar); if (! name) return XML_ERROR_NO_MEMORY; entity = (ENTITY *)lookup(parser, &dtd->generalEntities, name, 0); poolDiscard(&parser->m_temp2Pool); /* First, determine if a check for an existing declaration is needed; if yes, check that the entity exists, and that it is internal. */ if (pool == &dtd->pool) /* are we called from prolog? */ checkEntityDecl = #ifdef XML_DTD parser->m_prologState.documentEntity && #endif /* XML_DTD */ (dtd->standalone ? ! parser->m_openInternalEntities : ! dtd->hasParamEntityRefs); else /* if (pool == &parser->m_tempPool): we are called from content */ checkEntityDecl = ! dtd->hasParamEntityRefs || dtd->standalone; if (checkEntityDecl) { if (! entity) return XML_ERROR_UNDEFINED_ENTITY; else if (! entity->is_internal) return XML_ERROR_ENTITY_DECLARED_IN_PE; } else if (! entity) { /* Cannot report skipped entity here - see comments on parser->m_skippedEntityHandler. if (parser->m_skippedEntityHandler) parser->m_skippedEntityHandler(parser->m_handlerArg, name, 0); */ /* Cannot call the default handler because this would be out of sync with the call to the startElementHandler. if ((pool == &parser->m_tempPool) && parser->m_defaultHandler) reportDefault(parser, enc, ptr, next); */ break; } if (entity->open) { if (enc == parser->m_encoding) { /* It does not appear that this line can be executed. * * The "if (entity->open)" check catches recursive entity * definitions. In order to be called with an open * entity, it must have gone through this code before and * been through the recursive call to * appendAttributeValue() some lines below. That call * sets the local encoding ("enc") to the parser's * internal encoding (internal_utf8 or internal_utf16), * which can never be the same as the principle encoding. * It doesn't appear there is another code path that gets * here with entity->open being TRUE. * * Since it is not certain that this logic is watertight, * we keep the line and merely exclude it from coverage * tests. */ parser->m_eventPtr = ptr; /* LCOV_EXCL_LINE */ } return XML_ERROR_RECURSIVE_ENTITY_REF; } if (entity->notation) { if (enc == parser->m_encoding) parser->m_eventPtr = ptr; return XML_ERROR_BINARY_ENTITY_REF; } if (! entity->textPtr) { if (enc == parser->m_encoding) parser->m_eventPtr = ptr; return XML_ERROR_ATTRIBUTE_EXTERNAL_ENTITY_REF; } else { enum XML_Error result; const XML_Char *textEnd = entity->textPtr + entity->textLen; entity->open = XML_TRUE; result = appendAttributeValue(parser, parser->m_internalEncoding, isCdata, (char *)entity->textPtr, (char *)textEnd, pool); entity->open = XML_FALSE; if (result) return result; } } break; default: /* The only token returned by XmlAttributeValueTok() that does * not have an explicit case here is XML_TOK_PARTIAL_CHAR. * Getting that would require an entity name to contain an * incomplete XML character (e.g. \xE2\x82); however previous * tokenisers will have already recognised and rejected such * names before XmlAttributeValueTok() gets a look-in. This * default case should be retained as a safety net, but the code * excluded from coverage tests. * * LCOV_EXCL_START */ if (enc == parser->m_encoding) parser->m_eventPtr = ptr; return XML_ERROR_UNEXPECTED_STATE; /* LCOV_EXCL_STOP */ } ptr = next; } /* not reached */ } static enum XML_Error storeEntityValue(XML_Parser parser, const ENCODING *enc, const char *entityTextPtr, const char *entityTextEnd) { DTD *const dtd = parser->m_dtd; /* save one level of indirection */ STRING_POOL *pool = &(dtd->entityValuePool); enum XML_Error result = XML_ERROR_NONE; #ifdef XML_DTD int oldInEntityValue = parser->m_prologState.inEntityValue; parser->m_prologState.inEntityValue = 1; #endif /* XML_DTD */ /* never return Null for the value argument in EntityDeclHandler, since this would indicate an external entity; therefore we have to make sure that entityValuePool.start is not null */ if (! pool->blocks) { if (! poolGrow(pool)) return XML_ERROR_NO_MEMORY; } for (;;) { const char *next; int tok = XmlEntityValueTok(enc, entityTextPtr, entityTextEnd, &next); switch (tok) { case XML_TOK_PARAM_ENTITY_REF: #ifdef XML_DTD if (parser->m_isParamEntity || enc != parser->m_encoding) { const XML_Char *name; ENTITY *entity; name = poolStoreString(&parser->m_tempPool, enc, entityTextPtr + enc->minBytesPerChar, next - enc->minBytesPerChar); if (! name) { result = XML_ERROR_NO_MEMORY; goto endEntityValue; } entity = (ENTITY *)lookup(parser, &dtd->paramEntities, name, 0); poolDiscard(&parser->m_tempPool); if (! entity) { /* not a well-formedness error - see XML 1.0: WFC Entity Declared */ /* cannot report skipped entity here - see comments on parser->m_skippedEntityHandler if (parser->m_skippedEntityHandler) parser->m_skippedEntityHandler(parser->m_handlerArg, name, 0); */ dtd->keepProcessing = dtd->standalone; goto endEntityValue; } if (entity->open) { if (enc == parser->m_encoding) parser->m_eventPtr = entityTextPtr; result = XML_ERROR_RECURSIVE_ENTITY_REF; goto endEntityValue; } if (entity->systemId) { if (parser->m_externalEntityRefHandler) { dtd->paramEntityRead = XML_FALSE; entity->open = XML_TRUE; if (! parser->m_externalEntityRefHandler( parser->m_externalEntityRefHandlerArg, 0, entity->base, entity->systemId, entity->publicId)) { entity->open = XML_FALSE; result = XML_ERROR_EXTERNAL_ENTITY_HANDLING; goto endEntityValue; } entity->open = XML_FALSE; if (! dtd->paramEntityRead) dtd->keepProcessing = dtd->standalone; } else dtd->keepProcessing = dtd->standalone; } else { entity->open = XML_TRUE; result = storeEntityValue( parser, parser->m_internalEncoding, (char *)entity->textPtr, (char *)(entity->textPtr + entity->textLen)); entity->open = XML_FALSE; if (result) goto endEntityValue; } break; } #endif /* XML_DTD */ /* In the internal subset, PE references are not legal within markup declarations, e.g entity values in this case. */ parser->m_eventPtr = entityTextPtr; result = XML_ERROR_PARAM_ENTITY_REF; goto endEntityValue; case XML_TOK_NONE: result = XML_ERROR_NONE; goto endEntityValue; case XML_TOK_ENTITY_REF: case XML_TOK_DATA_CHARS: if (! poolAppend(pool, enc, entityTextPtr, next)) { result = XML_ERROR_NO_MEMORY; goto endEntityValue; } break; case XML_TOK_TRAILING_CR: next = entityTextPtr + enc->minBytesPerChar; /* fall through */ case XML_TOK_DATA_NEWLINE: if (pool->end == pool->ptr && ! poolGrow(pool)) { result = XML_ERROR_NO_MEMORY; goto endEntityValue; } *(pool->ptr)++ = 0xA; break; case XML_TOK_CHAR_REF: { XML_Char buf[XML_ENCODE_MAX]; int i; int n = XmlCharRefNumber(enc, entityTextPtr); if (n < 0) { if (enc == parser->m_encoding) parser->m_eventPtr = entityTextPtr; result = XML_ERROR_BAD_CHAR_REF; goto endEntityValue; } n = XmlEncode(n, (ICHAR *)buf); /* The XmlEncode() functions can never return 0 here. That * error return happens if the code point passed in is either * negative or greater than or equal to 0x110000. The * XmlCharRefNumber() functions will all return a number * strictly less than 0x110000 or a negative value if an error * occurred. The negative value is intercepted above, so * XmlEncode() is never passed a value it might return an * error for. */ for (i = 0; i < n; i++) { if (pool->end == pool->ptr && ! poolGrow(pool)) { result = XML_ERROR_NO_MEMORY; goto endEntityValue; } *(pool->ptr)++ = buf[i]; } } break; case XML_TOK_PARTIAL: if (enc == parser->m_encoding) parser->m_eventPtr = entityTextPtr; result = XML_ERROR_INVALID_TOKEN; goto endEntityValue; case XML_TOK_INVALID: if (enc == parser->m_encoding) parser->m_eventPtr = next; result = XML_ERROR_INVALID_TOKEN; goto endEntityValue; default: /* This default case should be unnecessary -- all the tokens * that XmlEntityValueTok() can return have their own explicit * cases -- but should be retained for safety. We do however * exclude it from the coverage statistics. * * LCOV_EXCL_START */ if (enc == parser->m_encoding) parser->m_eventPtr = entityTextPtr; result = XML_ERROR_UNEXPECTED_STATE; goto endEntityValue; /* LCOV_EXCL_STOP */ } entityTextPtr = next; } endEntityValue: #ifdef XML_DTD parser->m_prologState.inEntityValue = oldInEntityValue; #endif /* XML_DTD */ return result; } static void FASTCALL normalizeLines(XML_Char *s) { XML_Char *p; for (;; s++) { if (*s == XML_T('\0')) return; if (*s == 0xD) break; } p = s; do { if (*s == 0xD) { *p++ = 0xA; if (*++s == 0xA) s++; } else *p++ = *s++; } while (*s); *p = XML_T('\0'); } static int reportProcessingInstruction(XML_Parser parser, const ENCODING *enc, const char *start, const char *end) { const XML_Char *target; XML_Char *data; const char *tem; if (! parser->m_processingInstructionHandler) { if (parser->m_defaultHandler) reportDefault(parser, enc, start, end); return 1; } start += enc->minBytesPerChar * 2; tem = start + XmlNameLength(enc, start); target = poolStoreString(&parser->m_tempPool, enc, start, tem); if (! target) return 0; poolFinish(&parser->m_tempPool); data = poolStoreString(&parser->m_tempPool, enc, XmlSkipS(enc, tem), end - enc->minBytesPerChar * 2); if (! data) return 0; normalizeLines(data); parser->m_processingInstructionHandler(parser->m_handlerArg, target, data); poolClear(&parser->m_tempPool); return 1; } static int reportComment(XML_Parser parser, const ENCODING *enc, const char *start, const char *end) { XML_Char *data; if (! parser->m_commentHandler) { if (parser->m_defaultHandler) reportDefault(parser, enc, start, end); return 1; } data = poolStoreString(&parser->m_tempPool, enc, start + enc->minBytesPerChar * 4, end - enc->minBytesPerChar * 3); if (! data) return 0; normalizeLines(data); parser->m_commentHandler(parser->m_handlerArg, data); poolClear(&parser->m_tempPool); return 1; } static void reportDefault(XML_Parser parser, const ENCODING *enc, const char *s, const char *end) { if (MUST_CONVERT(enc, s)) { enum XML_Convert_Result convert_res; const char **eventPP; const char **eventEndPP; if (enc == parser->m_encoding) { eventPP = &parser->m_eventPtr; eventEndPP = &parser->m_eventEndPtr; } else { /* To get here, two things must be true; the parser must be * using a character encoding that is not the same as the * encoding passed in, and the encoding passed in must need * conversion to the internal format (UTF-8 unless XML_UNICODE * is defined). The only occasions on which the encoding passed * in is not the same as the parser's encoding are when it is * the internal encoding (e.g. a previously defined parameter * entity, already converted to internal format). This by * definition doesn't need conversion, so the whole branch never * gets executed. * * For safety's sake we don't delete these lines and merely * exclude them from coverage statistics. * * LCOV_EXCL_START */ eventPP = &(parser->m_openInternalEntities->internalEventPtr); eventEndPP = &(parser->m_openInternalEntities->internalEventEndPtr); /* LCOV_EXCL_STOP */ } do { ICHAR *dataPtr = (ICHAR *)parser->m_dataBuf; convert_res = XmlConvert(enc, &s, end, &dataPtr, (ICHAR *)parser->m_dataBufEnd); *eventEndPP = s; parser->m_defaultHandler(parser->m_handlerArg, parser->m_dataBuf, (int)(dataPtr - (ICHAR *)parser->m_dataBuf)); *eventPP = s; } while ((convert_res != XML_CONVERT_COMPLETED) && (convert_res != XML_CONVERT_INPUT_INCOMPLETE)); } else parser->m_defaultHandler(parser->m_handlerArg, (XML_Char *)s, (int)((XML_Char *)end - (XML_Char *)s)); } static int defineAttribute(ELEMENT_TYPE *type, ATTRIBUTE_ID *attId, XML_Bool isCdata, XML_Bool isId, const XML_Char *value, XML_Parser parser) { DEFAULT_ATTRIBUTE *att; if (value || isId) { /* The handling of default attributes gets messed up if we have a default which duplicates a non-default. */ int i; for (i = 0; i < type->nDefaultAtts; i++) if (attId == type->defaultAtts[i].id) return 1; if (isId && ! type->idAtt && ! attId->xmlns) type->idAtt = attId; } if (type->nDefaultAtts == type->allocDefaultAtts) { if (type->allocDefaultAtts == 0) { type->allocDefaultAtts = 8; type->defaultAtts = (DEFAULT_ATTRIBUTE *)MALLOC( parser, type->allocDefaultAtts * sizeof(DEFAULT_ATTRIBUTE)); if (! type->defaultAtts) { type->allocDefaultAtts = 0; return 0; } } else { DEFAULT_ATTRIBUTE *temp; int count = type->allocDefaultAtts * 2; temp = (DEFAULT_ATTRIBUTE *)REALLOC(parser, type->defaultAtts, (count * sizeof(DEFAULT_ATTRIBUTE))); if (temp == NULL) return 0; type->allocDefaultAtts = count; type->defaultAtts = temp; } } att = type->defaultAtts + type->nDefaultAtts; att->id = attId; att->value = value; att->isCdata = isCdata; if (! isCdata) attId->maybeTokenized = XML_TRUE; type->nDefaultAtts += 1; return 1; } static int setElementTypePrefix(XML_Parser parser, ELEMENT_TYPE *elementType) { DTD *const dtd = parser->m_dtd; /* save one level of indirection */ const XML_Char *name; for (name = elementType->name; *name; name++) { if (*name == XML_T(ASCII_COLON)) { PREFIX *prefix; const XML_Char *s; for (s = elementType->name; s != name; s++) { if (! poolAppendChar(&dtd->pool, *s)) return 0; } if (! poolAppendChar(&dtd->pool, XML_T('\0'))) return 0; prefix = (PREFIX *)lookup(parser, &dtd->prefixes, poolStart(&dtd->pool), sizeof(PREFIX)); if (! prefix) return 0; if (prefix->name == poolStart(&dtd->pool)) poolFinish(&dtd->pool); else poolDiscard(&dtd->pool); elementType->prefix = prefix; break; } } return 1; } static ATTRIBUTE_ID * getAttributeId(XML_Parser parser, const ENCODING *enc, const char *start, const char *end) { DTD *const dtd = parser->m_dtd; /* save one level of indirection */ ATTRIBUTE_ID *id; const XML_Char *name; if (! poolAppendChar(&dtd->pool, XML_T('\0'))) return NULL; name = poolStoreString(&dtd->pool, enc, start, end); if (! name) return NULL; /* skip quotation mark - its storage will be re-used (like in name[-1]) */ ++name; id = (ATTRIBUTE_ID *)lookup(parser, &dtd->attributeIds, name, sizeof(ATTRIBUTE_ID)); if (! id) return NULL; if (id->name != name) poolDiscard(&dtd->pool); else { poolFinish(&dtd->pool); if (! parser->m_ns) ; else if (name[0] == XML_T(ASCII_x) && name[1] == XML_T(ASCII_m) && name[2] == XML_T(ASCII_l) && name[3] == XML_T(ASCII_n) && name[4] == XML_T(ASCII_s) && (name[5] == XML_T('\0') || name[5] == XML_T(ASCII_COLON))) { if (name[5] == XML_T('\0')) id->prefix = &dtd->defaultPrefix; else id->prefix = (PREFIX *)lookup(parser, &dtd->prefixes, name + 6, sizeof(PREFIX)); id->xmlns = XML_TRUE; } else { int i; for (i = 0; name[i]; i++) { /* attributes without prefix are *not* in the default namespace */ if (name[i] == XML_T(ASCII_COLON)) { int j; for (j = 0; j < i; j++) { if (! poolAppendChar(&dtd->pool, name[j])) return NULL; } if (! poolAppendChar(&dtd->pool, XML_T('\0'))) return NULL; id->prefix = (PREFIX *)lookup(parser, &dtd->prefixes, poolStart(&dtd->pool), sizeof(PREFIX)); if (! id->prefix) return NULL; if (id->prefix->name == poolStart(&dtd->pool)) poolFinish(&dtd->pool); else poolDiscard(&dtd->pool); break; } } } } return id; } #define CONTEXT_SEP XML_T(ASCII_FF) static const XML_Char * getContext(XML_Parser parser) { DTD *const dtd = parser->m_dtd; /* save one level of indirection */ HASH_TABLE_ITER iter; XML_Bool needSep = XML_FALSE; if (dtd->defaultPrefix.binding) { int i; int len; if (! poolAppendChar(&parser->m_tempPool, XML_T(ASCII_EQUALS))) return NULL; len = dtd->defaultPrefix.binding->uriLen; if (parser->m_namespaceSeparator) len--; for (i = 0; i < len; i++) { if (! poolAppendChar(&parser->m_tempPool, dtd->defaultPrefix.binding->uri[i])) { /* Because of memory caching, I don't believe this line can be * executed. * * This is part of a loop copying the default prefix binding * URI into the parser's temporary string pool. Previously, * that URI was copied into the same string pool, with a * terminating NUL character, as part of setContext(). When * the pool was cleared, that leaves a block definitely big * enough to hold the URI on the free block list of the pool. * The URI copy in getContext() therefore cannot run out of * memory. * * If the pool is used between the setContext() and * getContext() calls, the worst it can do is leave a bigger * block on the front of the free list. Given that this is * all somewhat inobvious and program logic can be changed, we * don't delete the line but we do exclude it from the test * coverage statistics. */ return NULL; /* LCOV_EXCL_LINE */ } } needSep = XML_TRUE; } hashTableIterInit(&iter, &(dtd->prefixes)); for (;;) { int i; int len; const XML_Char *s; PREFIX *prefix = (PREFIX *)hashTableIterNext(&iter); if (! prefix) break; if (! prefix->binding) { /* This test appears to be (justifiable) paranoia. There does * not seem to be a way of injecting a prefix without a binding * that doesn't get errored long before this function is called. * The test should remain for safety's sake, so we instead * exclude the following line from the coverage statistics. */ continue; /* LCOV_EXCL_LINE */ } if (needSep && ! poolAppendChar(&parser->m_tempPool, CONTEXT_SEP)) return NULL; for (s = prefix->name; *s; s++) if (! poolAppendChar(&parser->m_tempPool, *s)) return NULL; if (! poolAppendChar(&parser->m_tempPool, XML_T(ASCII_EQUALS))) return NULL; len = prefix->binding->uriLen; if (parser->m_namespaceSeparator) len--; for (i = 0; i < len; i++) if (! poolAppendChar(&parser->m_tempPool, prefix->binding->uri[i])) return NULL; needSep = XML_TRUE; } hashTableIterInit(&iter, &(dtd->generalEntities)); for (;;) { const XML_Char *s; ENTITY *e = (ENTITY *)hashTableIterNext(&iter); if (! e) break; if (! e->open) continue; if (needSep && ! poolAppendChar(&parser->m_tempPool, CONTEXT_SEP)) return NULL; for (s = e->name; *s; s++) if (! poolAppendChar(&parser->m_tempPool, *s)) return 0; needSep = XML_TRUE; } if (! poolAppendChar(&parser->m_tempPool, XML_T('\0'))) return NULL; return parser->m_tempPool.start; } static XML_Bool setContext(XML_Parser parser, const XML_Char *context) { DTD *const dtd = parser->m_dtd; /* save one level of indirection */ const XML_Char *s = context; while (*context != XML_T('\0')) { if (*s == CONTEXT_SEP || *s == XML_T('\0')) { ENTITY *e; if (! poolAppendChar(&parser->m_tempPool, XML_T('\0'))) return XML_FALSE; e = (ENTITY *)lookup(parser, &dtd->generalEntities, poolStart(&parser->m_tempPool), 0); if (e) e->open = XML_TRUE; if (*s != XML_T('\0')) s++; context = s; poolDiscard(&parser->m_tempPool); } else if (*s == XML_T(ASCII_EQUALS)) { PREFIX *prefix; if (poolLength(&parser->m_tempPool) == 0) prefix = &dtd->defaultPrefix; else { if (! poolAppendChar(&parser->m_tempPool, XML_T('\0'))) return XML_FALSE; prefix = (PREFIX *)lookup(parser, &dtd->prefixes, poolStart(&parser->m_tempPool), sizeof(PREFIX)); if (! prefix) return XML_FALSE; if (prefix->name == poolStart(&parser->m_tempPool)) { prefix->name = poolCopyString(&dtd->pool, prefix->name); if (! prefix->name) return XML_FALSE; } poolDiscard(&parser->m_tempPool); } for (context = s + 1; *context != CONTEXT_SEP && *context != XML_T('\0'); context++) if (! poolAppendChar(&parser->m_tempPool, *context)) return XML_FALSE; if (! poolAppendChar(&parser->m_tempPool, XML_T('\0'))) return XML_FALSE; if (addBinding(parser, prefix, NULL, poolStart(&parser->m_tempPool), &parser->m_inheritedBindings) != XML_ERROR_NONE) return XML_FALSE; poolDiscard(&parser->m_tempPool); if (*context != XML_T('\0')) ++context; s = context; } else { if (! poolAppendChar(&parser->m_tempPool, *s)) return XML_FALSE; s++; } } return XML_TRUE; } static void FASTCALL normalizePublicId(XML_Char *publicId) { XML_Char *p = publicId; XML_Char *s; for (s = publicId; *s; s++) { switch (*s) { case 0x20: case 0xD: case 0xA: if (p != publicId && p[-1] != 0x20) *p++ = 0x20; break; default: *p++ = *s; } } if (p != publicId && p[-1] == 0x20) --p; *p = XML_T('\0'); } static DTD * dtdCreate(const XML_Memory_Handling_Suite *ms) { DTD *p = (DTD *)ms->malloc_fcn(sizeof(DTD)); if (p == NULL) return p; poolInit(&(p->pool), ms); poolInit(&(p->entityValuePool), ms); hashTableInit(&(p->generalEntities), ms); hashTableInit(&(p->elementTypes), ms); hashTableInit(&(p->attributeIds), ms); hashTableInit(&(p->prefixes), ms); #ifdef XML_DTD p->paramEntityRead = XML_FALSE; hashTableInit(&(p->paramEntities), ms); #endif /* XML_DTD */ p->defaultPrefix.name = NULL; p->defaultPrefix.binding = NULL; p->in_eldecl = XML_FALSE; p->scaffIndex = NULL; p->scaffold = NULL; p->scaffLevel = 0; p->scaffSize = 0; p->scaffCount = 0; p->contentStringLen = 0; p->keepProcessing = XML_TRUE; p->hasParamEntityRefs = XML_FALSE; p->standalone = XML_FALSE; return p; } static void dtdReset(DTD *p, const XML_Memory_Handling_Suite *ms) { HASH_TABLE_ITER iter; hashTableIterInit(&iter, &(p->elementTypes)); for (;;) { ELEMENT_TYPE *e = (ELEMENT_TYPE *)hashTableIterNext(&iter); if (! e) break; if (e->allocDefaultAtts != 0) ms->free_fcn(e->defaultAtts); } hashTableClear(&(p->generalEntities)); #ifdef XML_DTD p->paramEntityRead = XML_FALSE; hashTableClear(&(p->paramEntities)); #endif /* XML_DTD */ hashTableClear(&(p->elementTypes)); hashTableClear(&(p->attributeIds)); hashTableClear(&(p->prefixes)); poolClear(&(p->pool)); poolClear(&(p->entityValuePool)); p->defaultPrefix.name = NULL; p->defaultPrefix.binding = NULL; p->in_eldecl = XML_FALSE; ms->free_fcn(p->scaffIndex); p->scaffIndex = NULL; ms->free_fcn(p->scaffold); p->scaffold = NULL; p->scaffLevel = 0; p->scaffSize = 0; p->scaffCount = 0; p->contentStringLen = 0; p->keepProcessing = XML_TRUE; p->hasParamEntityRefs = XML_FALSE; p->standalone = XML_FALSE; } static void dtdDestroy(DTD *p, XML_Bool isDocEntity, const XML_Memory_Handling_Suite *ms) { HASH_TABLE_ITER iter; hashTableIterInit(&iter, &(p->elementTypes)); for (;;) { ELEMENT_TYPE *e = (ELEMENT_TYPE *)hashTableIterNext(&iter); if (! e) break; if (e->allocDefaultAtts != 0) ms->free_fcn(e->defaultAtts); } hashTableDestroy(&(p->generalEntities)); #ifdef XML_DTD hashTableDestroy(&(p->paramEntities)); #endif /* XML_DTD */ hashTableDestroy(&(p->elementTypes)); hashTableDestroy(&(p->attributeIds)); hashTableDestroy(&(p->prefixes)); poolDestroy(&(p->pool)); poolDestroy(&(p->entityValuePool)); if (isDocEntity) { ms->free_fcn(p->scaffIndex); ms->free_fcn(p->scaffold); } ms->free_fcn(p); } /* Do a deep copy of the DTD. Return 0 for out of memory, non-zero otherwise. The new DTD has already been initialized. */ static int dtdCopy(XML_Parser oldParser, DTD *newDtd, const DTD *oldDtd, const XML_Memory_Handling_Suite *ms) { HASH_TABLE_ITER iter; /* Copy the prefix table. */ hashTableIterInit(&iter, &(oldDtd->prefixes)); for (;;) { const XML_Char *name; const PREFIX *oldP = (PREFIX *)hashTableIterNext(&iter); if (! oldP) break; name = poolCopyString(&(newDtd->pool), oldP->name); if (! name) return 0; if (! lookup(oldParser, &(newDtd->prefixes), name, sizeof(PREFIX))) return 0; } hashTableIterInit(&iter, &(oldDtd->attributeIds)); /* Copy the attribute id table. */ for (;;) { ATTRIBUTE_ID *newA; const XML_Char *name; const ATTRIBUTE_ID *oldA = (ATTRIBUTE_ID *)hashTableIterNext(&iter); if (! oldA) break; /* Remember to allocate the scratch byte before the name. */ if (! poolAppendChar(&(newDtd->pool), XML_T('\0'))) return 0; name = poolCopyString(&(newDtd->pool), oldA->name); if (! name) return 0; ++name; newA = (ATTRIBUTE_ID *)lookup(oldParser, &(newDtd->attributeIds), name, sizeof(ATTRIBUTE_ID)); if (! newA) return 0; newA->maybeTokenized = oldA->maybeTokenized; if (oldA->prefix) { newA->xmlns = oldA->xmlns; if (oldA->prefix == &oldDtd->defaultPrefix) newA->prefix = &newDtd->defaultPrefix; else newA->prefix = (PREFIX *)lookup(oldParser, &(newDtd->prefixes), oldA->prefix->name, 0); } } /* Copy the element type table. */ hashTableIterInit(&iter, &(oldDtd->elementTypes)); for (;;) { int i; ELEMENT_TYPE *newE; const XML_Char *name; const ELEMENT_TYPE *oldE = (ELEMENT_TYPE *)hashTableIterNext(&iter); if (! oldE) break; name = poolCopyString(&(newDtd->pool), oldE->name); if (! name) return 0; newE = (ELEMENT_TYPE *)lookup(oldParser, &(newDtd->elementTypes), name, sizeof(ELEMENT_TYPE)); if (! newE) return 0; if (oldE->nDefaultAtts) { newE->defaultAtts = (DEFAULT_ATTRIBUTE *)ms->malloc_fcn( oldE->nDefaultAtts * sizeof(DEFAULT_ATTRIBUTE)); if (! newE->defaultAtts) { return 0; } } if (oldE->idAtt) newE->idAtt = (ATTRIBUTE_ID *)lookup(oldParser, &(newDtd->attributeIds), oldE->idAtt->name, 0); newE->allocDefaultAtts = newE->nDefaultAtts = oldE->nDefaultAtts; if (oldE->prefix) newE->prefix = (PREFIX *)lookup(oldParser, &(newDtd->prefixes), oldE->prefix->name, 0); for (i = 0; i < newE->nDefaultAtts; i++) { newE->defaultAtts[i].id = (ATTRIBUTE_ID *)lookup( oldParser, &(newDtd->attributeIds), oldE->defaultAtts[i].id->name, 0); newE->defaultAtts[i].isCdata = oldE->defaultAtts[i].isCdata; if (oldE->defaultAtts[i].value) { newE->defaultAtts[i].value = poolCopyString(&(newDtd->pool), oldE->defaultAtts[i].value); if (! newE->defaultAtts[i].value) return 0; } else newE->defaultAtts[i].value = NULL; } } /* Copy the entity tables. */ if (! copyEntityTable(oldParser, &(newDtd->generalEntities), &(newDtd->pool), &(oldDtd->generalEntities))) return 0; #ifdef XML_DTD if (! copyEntityTable(oldParser, &(newDtd->paramEntities), &(newDtd->pool), &(oldDtd->paramEntities))) return 0; newDtd->paramEntityRead = oldDtd->paramEntityRead; #endif /* XML_DTD */ newDtd->keepProcessing = oldDtd->keepProcessing; newDtd->hasParamEntityRefs = oldDtd->hasParamEntityRefs; newDtd->standalone = oldDtd->standalone; /* Don't want deep copying for scaffolding */ newDtd->in_eldecl = oldDtd->in_eldecl; newDtd->scaffold = oldDtd->scaffold; newDtd->contentStringLen = oldDtd->contentStringLen; newDtd->scaffSize = oldDtd->scaffSize; newDtd->scaffLevel = oldDtd->scaffLevel; newDtd->scaffIndex = oldDtd->scaffIndex; return 1; } /* End dtdCopy */ static int copyEntityTable(XML_Parser oldParser, HASH_TABLE *newTable, STRING_POOL *newPool, const HASH_TABLE *oldTable) { HASH_TABLE_ITER iter; const XML_Char *cachedOldBase = NULL; const XML_Char *cachedNewBase = NULL; hashTableIterInit(&iter, oldTable); for (;;) { ENTITY *newE; const XML_Char *name; const ENTITY *oldE = (ENTITY *)hashTableIterNext(&iter); if (! oldE) break; name = poolCopyString(newPool, oldE->name); if (! name) return 0; newE = (ENTITY *)lookup(oldParser, newTable, name, sizeof(ENTITY)); if (! newE) return 0; if (oldE->systemId) { const XML_Char *tem = poolCopyString(newPool, oldE->systemId); if (! tem) return 0; newE->systemId = tem; if (oldE->base) { if (oldE->base == cachedOldBase) newE->base = cachedNewBase; else { cachedOldBase = oldE->base; tem = poolCopyString(newPool, cachedOldBase); if (! tem) return 0; cachedNewBase = newE->base = tem; } } if (oldE->publicId) { tem = poolCopyString(newPool, oldE->publicId); if (! tem) return 0; newE->publicId = tem; } } else { const XML_Char *tem = poolCopyStringN(newPool, oldE->textPtr, oldE->textLen); if (! tem) return 0; newE->textPtr = tem; newE->textLen = oldE->textLen; } if (oldE->notation) { const XML_Char *tem = poolCopyString(newPool, oldE->notation); if (! tem) return 0; newE->notation = tem; } newE->is_param = oldE->is_param; newE->is_internal = oldE->is_internal; } return 1; } #define INIT_POWER 6 static XML_Bool FASTCALL keyeq(KEY s1, KEY s2) { for (; *s1 == *s2; s1++, s2++) if (*s1 == 0) return XML_TRUE; return XML_FALSE; } static size_t keylen(KEY s) { size_t len = 0; for (; *s; s++, len++) ; return len; } static void copy_salt_to_sipkey(XML_Parser parser, struct sipkey *key) { key->k[0] = 0; key->k[1] = get_hash_secret_salt(parser); } static unsigned long FASTCALL hash(XML_Parser parser, KEY s) { struct siphash state; struct sipkey key; (void)sip24_valid; copy_salt_to_sipkey(parser, &key); sip24_init(&state, &key); sip24_update(&state, s, keylen(s) * sizeof(XML_Char)); return (unsigned long)sip24_final(&state); } static NAMED * lookup(XML_Parser parser, HASH_TABLE *table, KEY name, size_t createSize) { size_t i; if (table->size == 0) { size_t tsize; if (! createSize) return NULL; table->power = INIT_POWER; /* table->size is a power of 2 */ table->size = (size_t)1 << INIT_POWER; tsize = table->size * sizeof(NAMED *); table->v = (NAMED **)table->mem->malloc_fcn(tsize); if (! table->v) { table->size = 0; return NULL; } memset(table->v, 0, tsize); i = hash(parser, name) & ((unsigned long)table->size - 1); } else { unsigned long h = hash(parser, name); unsigned long mask = (unsigned long)table->size - 1; unsigned char step = 0; i = h & mask; while (table->v[i]) { if (keyeq(name, table->v[i]->name)) return table->v[i]; if (! step) step = PROBE_STEP(h, mask, table->power); i < step ? (i += table->size - step) : (i -= step); } if (! createSize) return NULL; /* check for overflow (table is half full) */ if (table->used >> (table->power - 1)) { unsigned char newPower = table->power + 1; size_t newSize = (size_t)1 << newPower; unsigned long newMask = (unsigned long)newSize - 1; size_t tsize = newSize * sizeof(NAMED *); NAMED **newV = (NAMED **)table->mem->malloc_fcn(tsize); if (! newV) return NULL; memset(newV, 0, tsize); for (i = 0; i < table->size; i++) if (table->v[i]) { unsigned long newHash = hash(parser, table->v[i]->name); size_t j = newHash & newMask; step = 0; while (newV[j]) { if (! step) step = PROBE_STEP(newHash, newMask, newPower); j < step ? (j += newSize - step) : (j -= step); } newV[j] = table->v[i]; } table->mem->free_fcn(table->v); table->v = newV; table->power = newPower; table->size = newSize; i = h & newMask; step = 0; while (table->v[i]) { if (! step) step = PROBE_STEP(h, newMask, newPower); i < step ? (i += newSize - step) : (i -= step); } } } table->v[i] = (NAMED *)table->mem->malloc_fcn(createSize); if (! table->v[i]) return NULL; memset(table->v[i], 0, createSize); table->v[i]->name = name; (table->used)++; return table->v[i]; } static void FASTCALL hashTableClear(HASH_TABLE *table) { size_t i; for (i = 0; i < table->size; i++) { table->mem->free_fcn(table->v[i]); table->v[i] = NULL; } table->used = 0; } static void FASTCALL hashTableDestroy(HASH_TABLE *table) { size_t i; for (i = 0; i < table->size; i++) table->mem->free_fcn(table->v[i]); table->mem->free_fcn(table->v); } static void FASTCALL hashTableInit(HASH_TABLE *p, const XML_Memory_Handling_Suite *ms) { p->power = 0; p->size = 0; p->used = 0; p->v = NULL; p->mem = ms; } static void FASTCALL hashTableIterInit(HASH_TABLE_ITER *iter, const HASH_TABLE *table) { iter->p = table->v; iter->end = iter->p + table->size; } static NAMED *FASTCALL hashTableIterNext(HASH_TABLE_ITER *iter) { while (iter->p != iter->end) { NAMED *tem = *(iter->p)++; if (tem) return tem; } return NULL; } static void FASTCALL poolInit(STRING_POOL *pool, const XML_Memory_Handling_Suite *ms) { pool->blocks = NULL; pool->freeBlocks = NULL; pool->start = NULL; pool->ptr = NULL; pool->end = NULL; pool->mem = ms; } static void FASTCALL poolClear(STRING_POOL *pool) { if (! pool->freeBlocks) pool->freeBlocks = pool->blocks; else { BLOCK *p = pool->blocks; while (p) { BLOCK *tem = p->next; p->next = pool->freeBlocks; pool->freeBlocks = p; p = tem; } } pool->blocks = NULL; pool->start = NULL; pool->ptr = NULL; pool->end = NULL; } static void FASTCALL poolDestroy(STRING_POOL *pool) { BLOCK *p = pool->blocks; while (p) { BLOCK *tem = p->next; pool->mem->free_fcn(p); p = tem; } p = pool->freeBlocks; while (p) { BLOCK *tem = p->next; pool->mem->free_fcn(p); p = tem; } } static XML_Char * poolAppend(STRING_POOL *pool, const ENCODING *enc, const char *ptr, const char *end) { if (! pool->ptr && ! poolGrow(pool)) return NULL; for (;;) { const enum XML_Convert_Result convert_res = XmlConvert( enc, &ptr, end, (ICHAR **)&(pool->ptr), (ICHAR *)pool->end); if ((convert_res == XML_CONVERT_COMPLETED) || (convert_res == XML_CONVERT_INPUT_INCOMPLETE)) break; if (! poolGrow(pool)) return NULL; } return pool->start; } static const XML_Char *FASTCALL poolCopyString(STRING_POOL *pool, const XML_Char *s) { do { if (! poolAppendChar(pool, *s)) return NULL; } while (*s++); s = pool->start; poolFinish(pool); return s; } static const XML_Char * poolCopyStringN(STRING_POOL *pool, const XML_Char *s, int n) { if (! pool->ptr && ! poolGrow(pool)) { /* The following line is unreachable given the current usage of * poolCopyStringN(). Currently it is called from exactly one * place to copy the text of a simple general entity. By that * point, the name of the entity is already stored in the pool, so * pool->ptr cannot be NULL. * * If poolCopyStringN() is used elsewhere as it well might be, * this line may well become executable again. Regardless, this * sort of check shouldn't be removed lightly, so we just exclude * it from the coverage statistics. */ return NULL; /* LCOV_EXCL_LINE */ } for (; n > 0; --n, s++) { if (! poolAppendChar(pool, *s)) return NULL; } s = pool->start; poolFinish(pool); return s; } static const XML_Char *FASTCALL poolAppendString(STRING_POOL *pool, const XML_Char *s) { while (*s) { if (! poolAppendChar(pool, *s)) return NULL; s++; } return pool->start; } static XML_Char * poolStoreString(STRING_POOL *pool, const ENCODING *enc, const char *ptr, const char *end) { if (! poolAppend(pool, enc, ptr, end)) return NULL; if (pool->ptr == pool->end && ! poolGrow(pool)) return NULL; *(pool->ptr)++ = 0; return pool->start; } static size_t poolBytesToAllocateFor(int blockSize) { /* Unprotected math would be: ** return offsetof(BLOCK, s) + blockSize * sizeof(XML_Char); ** ** Detect overflow, avoiding _signed_ overflow undefined behavior ** For a + b * c we check b * c in isolation first, so that addition of a ** on top has no chance of making us accept a small non-negative number */ const size_t stretch = sizeof(XML_Char); /* can be 4 bytes */ if (blockSize <= 0) return 0; if (blockSize > (int)(INT_MAX / stretch)) return 0; { const int stretchedBlockSize = blockSize * (int)stretch; const int bytesToAllocate = (int)(offsetof(BLOCK, s) + (unsigned)stretchedBlockSize); if (bytesToAllocate < 0) return 0; return (size_t)bytesToAllocate; } } static XML_Bool FASTCALL poolGrow(STRING_POOL *pool) { if (pool->freeBlocks) { if (pool->start == 0) { pool->blocks = pool->freeBlocks; pool->freeBlocks = pool->freeBlocks->next; pool->blocks->next = NULL; pool->start = pool->blocks->s; pool->end = pool->start + pool->blocks->size; pool->ptr = pool->start; return XML_TRUE; } if (pool->end - pool->start < pool->freeBlocks->size) { BLOCK *tem = pool->freeBlocks->next; pool->freeBlocks->next = pool->blocks; pool->blocks = pool->freeBlocks; pool->freeBlocks = tem; memcpy(pool->blocks->s, pool->start, (pool->end - pool->start) * sizeof(XML_Char)); pool->ptr = pool->blocks->s + (pool->ptr - pool->start); pool->start = pool->blocks->s; pool->end = pool->start + pool->blocks->size; return XML_TRUE; } } if (pool->blocks && pool->start == pool->blocks->s) { BLOCK *temp; int blockSize = (int)((unsigned)(pool->end - pool->start) * 2U); size_t bytesToAllocate; /* NOTE: Needs to be calculated prior to calling `realloc` to avoid dangling pointers: */ const ptrdiff_t offsetInsideBlock = pool->ptr - pool->start; if (blockSize < 0) { /* This condition traps a situation where either more than * INT_MAX/2 bytes have already been allocated. This isn't * readily testable, since it is unlikely that an average * machine will have that much memory, so we exclude it from the * coverage statistics. */ return XML_FALSE; /* LCOV_EXCL_LINE */ } bytesToAllocate = poolBytesToAllocateFor(blockSize); if (bytesToAllocate == 0) return XML_FALSE; temp = (BLOCK *)pool->mem->realloc_fcn(pool->blocks, (unsigned)bytesToAllocate); if (temp == NULL) return XML_FALSE; pool->blocks = temp; pool->blocks->size = blockSize; pool->ptr = pool->blocks->s + offsetInsideBlock; pool->start = pool->blocks->s; pool->end = pool->start + blockSize; } else { BLOCK *tem; int blockSize = (int)(pool->end - pool->start); size_t bytesToAllocate; if (blockSize < 0) { /* This condition traps a situation where either more than * INT_MAX bytes have already been allocated (which is prevented * by various pieces of program logic, not least this one, never * mind the unlikelihood of actually having that much memory) or * the pool control fields have been corrupted (which could * conceivably happen in an extremely buggy user handler * function). Either way it isn't readily testable, so we * exclude it from the coverage statistics. */ return XML_FALSE; /* LCOV_EXCL_LINE */ } if (blockSize < INIT_BLOCK_SIZE) blockSize = INIT_BLOCK_SIZE; else { /* Detect overflow, avoiding _signed_ overflow undefined behavior */ if ((int)((unsigned)blockSize * 2U) < 0) { return XML_FALSE; } blockSize *= 2; } bytesToAllocate = poolBytesToAllocateFor(blockSize); if (bytesToAllocate == 0) return XML_FALSE; tem = (BLOCK *)pool->mem->malloc_fcn(bytesToAllocate); if (! tem) return XML_FALSE; tem->size = blockSize; tem->next = pool->blocks; pool->blocks = tem; if (pool->ptr != pool->start) memcpy(tem->s, pool->start, (pool->ptr - pool->start) * sizeof(XML_Char)); pool->ptr = tem->s + (pool->ptr - pool->start); pool->start = tem->s; pool->end = tem->s + blockSize; } return XML_TRUE; } static int FASTCALL nextScaffoldPart(XML_Parser parser) { DTD *const dtd = parser->m_dtd; /* save one level of indirection */ CONTENT_SCAFFOLD *me; int next; if (! dtd->scaffIndex) { dtd->scaffIndex = (int *)MALLOC(parser, parser->m_groupSize * sizeof(int)); if (! dtd->scaffIndex) return -1; dtd->scaffIndex[0] = 0; } if (dtd->scaffCount >= dtd->scaffSize) { CONTENT_SCAFFOLD *temp; if (dtd->scaffold) { temp = (CONTENT_SCAFFOLD *)REALLOC( parser, dtd->scaffold, dtd->scaffSize * 2 * sizeof(CONTENT_SCAFFOLD)); if (temp == NULL) return -1; dtd->scaffSize *= 2; } else { temp = (CONTENT_SCAFFOLD *)MALLOC(parser, INIT_SCAFFOLD_ELEMENTS * sizeof(CONTENT_SCAFFOLD)); if (temp == NULL) return -1; dtd->scaffSize = INIT_SCAFFOLD_ELEMENTS; } dtd->scaffold = temp; } next = dtd->scaffCount++; me = &dtd->scaffold[next]; if (dtd->scaffLevel) { CONTENT_SCAFFOLD *parent = &dtd->scaffold[dtd->scaffIndex[dtd->scaffLevel - 1]]; if (parent->lastchild) { dtd->scaffold[parent->lastchild].nextsib = next; } if (! parent->childcnt) parent->firstchild = next; parent->lastchild = next; parent->childcnt++; } me->firstchild = me->lastchild = me->childcnt = me->nextsib = 0; return next; } static void build_node(XML_Parser parser, int src_node, XML_Content *dest, XML_Content **contpos, XML_Char **strpos) { DTD *const dtd = parser->m_dtd; /* save one level of indirection */ dest->type = dtd->scaffold[src_node].type; dest->quant = dtd->scaffold[src_node].quant; if (dest->type == XML_CTYPE_NAME) { const XML_Char *src; dest->name = *strpos; src = dtd->scaffold[src_node].name; for (;;) { *(*strpos)++ = *src; if (! *src) break; src++; } dest->numchildren = 0; dest->children = NULL; } else { unsigned int i; int cn; dest->numchildren = dtd->scaffold[src_node].childcnt; dest->children = *contpos; *contpos += dest->numchildren; for (i = 0, cn = dtd->scaffold[src_node].firstchild; i < dest->numchildren; i++, cn = dtd->scaffold[cn].nextsib) { build_node(parser, cn, &(dest->children[i]), contpos, strpos); } dest->name = NULL; } } static XML_Content * build_model(XML_Parser parser) { DTD *const dtd = parser->m_dtd; /* save one level of indirection */ XML_Content *ret; XML_Content *cpos; XML_Char *str; int allocsize = (dtd->scaffCount * sizeof(XML_Content) + (dtd->contentStringLen * sizeof(XML_Char))); ret = (XML_Content *)MALLOC(parser, allocsize); if (! ret) return NULL; str = (XML_Char *)(&ret[dtd->scaffCount]); cpos = &ret[1]; build_node(parser, 0, ret, &cpos, &str); return ret; } static ELEMENT_TYPE * getElementType(XML_Parser parser, const ENCODING *enc, const char *ptr, const char *end) { DTD *const dtd = parser->m_dtd; /* save one level of indirection */ const XML_Char *name = poolStoreString(&dtd->pool, enc, ptr, end); ELEMENT_TYPE *ret; if (! name) return NULL; ret = (ELEMENT_TYPE *)lookup(parser, &dtd->elementTypes, name, sizeof(ELEMENT_TYPE)); if (! ret) return NULL; if (ret->name != name) poolDiscard(&dtd->pool); else { poolFinish(&dtd->pool); if (! setElementTypePrefix(parser, ret)) return NULL; } return ret; } static XML_Char * copyString(const XML_Char *s, const XML_Memory_Handling_Suite *memsuite) { int charsRequired = 0; XML_Char *result; /* First determine how long the string is */ while (s[charsRequired] != 0) { charsRequired++; } /* Include the terminator */ charsRequired++; /* Now allocate space for the copy */ result = memsuite->malloc_fcn(charsRequired * sizeof(XML_Char)); if (result == NULL) return NULL; /* Copy the original into place */ memcpy(result, s, charsRequired * sizeof(XML_Char)); return result; }
/* 69df5be70289a11fb834869ce4a91c23c1d9dd04baffcbd10e86742d149a080c (2.2.7+) __ __ _ ___\ \/ /_ __ __ _| |_ / _ \\ /| '_ \ / _` | __| | __// \| |_) | (_| | |_ \___/_/\_\ .__/ \__,_|\__| |_| XML parser Copyright (c) 1997-2000 Thai Open Source Software Center Ltd Copyright (c) 2000-2017 Expat development team Licensed under the MIT license: Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #if ! defined(_GNU_SOURCE) # define _GNU_SOURCE 1 /* syscall prototype */ #endif #ifdef _WIN32 /* force stdlib to define rand_s() */ # define _CRT_RAND_S #endif #include <stddef.h> #include <string.h> /* memset(), memcpy() */ #include <assert.h> #include <limits.h> /* UINT_MAX */ #include <stdio.h> /* fprintf */ #include <stdlib.h> /* getenv, rand_s */ #ifdef _WIN32 # define getpid GetCurrentProcessId #else # include <sys/time.h> /* gettimeofday() */ # include <sys/types.h> /* getpid() */ # include <unistd.h> /* getpid() */ # include <fcntl.h> /* O_RDONLY */ # include <errno.h> #endif #define XML_BUILDING_EXPAT 1 #ifdef _WIN32 # include "winconfig.h" #elif defined(HAVE_EXPAT_CONFIG_H) # include <expat_config.h> #endif /* ndef _WIN32 */ #include "ascii.h" #include "expat.h" #include "siphash.h" #if defined(HAVE_GETRANDOM) || defined(HAVE_SYSCALL_GETRANDOM) # if defined(HAVE_GETRANDOM) # include <sys/random.h> /* getrandom */ # else # include <unistd.h> /* syscall */ # include <sys/syscall.h> /* SYS_getrandom */ # endif # if ! defined(GRND_NONBLOCK) # define GRND_NONBLOCK 0x0001 # endif /* defined(GRND_NONBLOCK) */ #endif /* defined(HAVE_GETRANDOM) || defined(HAVE_SYSCALL_GETRANDOM) */ #if defined(HAVE_LIBBSD) \ && (defined(HAVE_ARC4RANDOM_BUF) || defined(HAVE_ARC4RANDOM)) # include <bsd/stdlib.h> #endif #if defined(_WIN32) && ! defined(LOAD_LIBRARY_SEARCH_SYSTEM32) # define LOAD_LIBRARY_SEARCH_SYSTEM32 0x00000800 #endif #if ! defined(HAVE_GETRANDOM) && ! defined(HAVE_SYSCALL_GETRANDOM) \ && ! defined(HAVE_ARC4RANDOM_BUF) && ! defined(HAVE_ARC4RANDOM) \ && ! defined(XML_DEV_URANDOM) && ! defined(_WIN32) \ && ! defined(XML_POOR_ENTROPY) # error You do not have support for any sources of high quality entropy \ enabled. For end user security, that is probably not what you want. \ \ Your options include: \ * Linux + glibc >=2.25 (getrandom): HAVE_GETRANDOM, \ * Linux + glibc <2.25 (syscall SYS_getrandom): HAVE_SYSCALL_GETRANDOM, \ * BSD / macOS >=10.7 (arc4random_buf): HAVE_ARC4RANDOM_BUF, \ * BSD / macOS <10.7 (arc4random): HAVE_ARC4RANDOM, \ * libbsd (arc4random_buf): HAVE_ARC4RANDOM_BUF + HAVE_LIBBSD, \ * libbsd (arc4random): HAVE_ARC4RANDOM + HAVE_LIBBSD, \ * Linux / BSD / macOS (/dev/urandom): XML_DEV_URANDOM \ * Windows (rand_s): _WIN32. \ \ If insist on not using any of these, bypass this error by defining \ XML_POOR_ENTROPY; you have been warned. \ \ If you have reasons to patch this detection code away or need changes \ to the build system, please open a bug. Thank you! #endif #ifdef XML_UNICODE # define XML_ENCODE_MAX XML_UTF16_ENCODE_MAX # define XmlConvert XmlUtf16Convert # define XmlGetInternalEncoding XmlGetUtf16InternalEncoding # define XmlGetInternalEncodingNS XmlGetUtf16InternalEncodingNS # define XmlEncode XmlUtf16Encode /* Using pointer subtraction to convert to integer type. */ # define MUST_CONVERT(enc, s) \ (! (enc)->isUtf16 || (((char *)(s) - (char *)NULL) & 1)) typedef unsigned short ICHAR; #else # define XML_ENCODE_MAX XML_UTF8_ENCODE_MAX # define XmlConvert XmlUtf8Convert # define XmlGetInternalEncoding XmlGetUtf8InternalEncoding # define XmlGetInternalEncodingNS XmlGetUtf8InternalEncodingNS # define XmlEncode XmlUtf8Encode # define MUST_CONVERT(enc, s) (! (enc)->isUtf8) typedef char ICHAR; #endif #ifndef XML_NS # define XmlInitEncodingNS XmlInitEncoding # define XmlInitUnknownEncodingNS XmlInitUnknownEncoding # undef XmlGetInternalEncodingNS # define XmlGetInternalEncodingNS XmlGetInternalEncoding # define XmlParseXmlDeclNS XmlParseXmlDecl #endif #ifdef XML_UNICODE # ifdef XML_UNICODE_WCHAR_T # define XML_T(x) (const wchar_t) x # define XML_L(x) L##x # else # define XML_T(x) (const unsigned short)x # define XML_L(x) x # endif #else # define XML_T(x) x # define XML_L(x) x #endif /* Round up n to be a multiple of sz, where sz is a power of 2. */ #define ROUND_UP(n, sz) (((n) + ((sz)-1)) & ~((sz)-1)) /* Do safe (NULL-aware) pointer arithmetic */ #define EXPAT_SAFE_PTR_DIFF(p, q) (((p) && (q)) ? ((p) - (q)) : 0) #include "internal.h" #include "xmltok.h" #include "xmlrole.h" typedef const XML_Char *KEY; typedef struct { KEY name; } NAMED; typedef struct { NAMED **v; unsigned char power; size_t size; size_t used; const XML_Memory_Handling_Suite *mem; } HASH_TABLE; static size_t keylen(KEY s); static void copy_salt_to_sipkey(XML_Parser parser, struct sipkey *key); /* For probing (after a collision) we need a step size relative prime to the hash table size, which is a power of 2. We use double-hashing, since we can calculate a second hash value cheaply by taking those bits of the first hash value that were discarded (masked out) when the table index was calculated: index = hash & mask, where mask = table->size - 1. We limit the maximum step size to table->size / 4 (mask >> 2) and make it odd, since odd numbers are always relative prime to a power of 2. */ #define SECOND_HASH(hash, mask, power) \ ((((hash) & ~(mask)) >> ((power)-1)) & ((mask) >> 2)) #define PROBE_STEP(hash, mask, power) \ ((unsigned char)((SECOND_HASH(hash, mask, power)) | 1)) typedef struct { NAMED **p; NAMED **end; } HASH_TABLE_ITER; #define INIT_TAG_BUF_SIZE 32 /* must be a multiple of sizeof(XML_Char) */ #define INIT_DATA_BUF_SIZE 1024 #define INIT_ATTS_SIZE 16 #define INIT_ATTS_VERSION 0xFFFFFFFF #define INIT_BLOCK_SIZE 1024 #define INIT_BUFFER_SIZE 1024 #define EXPAND_SPARE 24 typedef struct binding { struct prefix *prefix; struct binding *nextTagBinding; struct binding *prevPrefixBinding; const struct attribute_id *attId; XML_Char *uri; int uriLen; int uriAlloc; } BINDING; typedef struct prefix { const XML_Char *name; BINDING *binding; } PREFIX; typedef struct { const XML_Char *str; const XML_Char *localPart; const XML_Char *prefix; int strLen; int uriLen; int prefixLen; } TAG_NAME; /* TAG represents an open element. The name of the element is stored in both the document and API encodings. The memory buffer 'buf' is a separately-allocated memory area which stores the name. During the XML_Parse()/ XMLParseBuffer() when the element is open, the memory for the 'raw' version of the name (in the document encoding) is shared with the document buffer. If the element is open across calls to XML_Parse()/XML_ParseBuffer(), the buffer is re-allocated to contain the 'raw' name as well. A parser re-uses these structures, maintaining a list of allocated TAG objects in a free list. */ typedef struct tag { struct tag *parent; /* parent of this element */ const char *rawName; /* tagName in the original encoding */ int rawNameLength; TAG_NAME name; /* tagName in the API encoding */ char *buf; /* buffer for name components */ char *bufEnd; /* end of the buffer */ BINDING *bindings; } TAG; typedef struct { const XML_Char *name; const XML_Char *textPtr; int textLen; /* length in XML_Chars */ int processed; /* # of processed bytes - when suspended */ const XML_Char *systemId; const XML_Char *base; const XML_Char *publicId; const XML_Char *notation; XML_Bool open; XML_Bool is_param; XML_Bool is_internal; /* true if declared in internal subset outside PE */ } ENTITY; typedef struct { enum XML_Content_Type type; enum XML_Content_Quant quant; const XML_Char *name; int firstchild; int lastchild; int childcnt; int nextsib; } CONTENT_SCAFFOLD; #define INIT_SCAFFOLD_ELEMENTS 32 typedef struct block { struct block *next; int size; XML_Char s[1]; } BLOCK; typedef struct { BLOCK *blocks; BLOCK *freeBlocks; const XML_Char *end; XML_Char *ptr; XML_Char *start; const XML_Memory_Handling_Suite *mem; } STRING_POOL; /* The XML_Char before the name is used to determine whether an attribute has been specified. */ typedef struct attribute_id { XML_Char *name; PREFIX *prefix; XML_Bool maybeTokenized; XML_Bool xmlns; } ATTRIBUTE_ID; typedef struct { const ATTRIBUTE_ID *id; XML_Bool isCdata; const XML_Char *value; } DEFAULT_ATTRIBUTE; typedef struct { unsigned long version; unsigned long hash; const XML_Char *uriName; } NS_ATT; typedef struct { const XML_Char *name; PREFIX *prefix; const ATTRIBUTE_ID *idAtt; int nDefaultAtts; int allocDefaultAtts; DEFAULT_ATTRIBUTE *defaultAtts; } ELEMENT_TYPE; typedef struct { HASH_TABLE generalEntities; HASH_TABLE elementTypes; HASH_TABLE attributeIds; HASH_TABLE prefixes; STRING_POOL pool; STRING_POOL entityValuePool; /* false once a parameter entity reference has been skipped */ XML_Bool keepProcessing; /* true once an internal or external PE reference has been encountered; this includes the reference to an external subset */ XML_Bool hasParamEntityRefs; XML_Bool standalone; #ifdef XML_DTD /* indicates if external PE has been read */ XML_Bool paramEntityRead; HASH_TABLE paramEntities; #endif /* XML_DTD */ PREFIX defaultPrefix; /* === scaffolding for building content model === */ XML_Bool in_eldecl; CONTENT_SCAFFOLD *scaffold; unsigned contentStringLen; unsigned scaffSize; unsigned scaffCount; int scaffLevel; int *scaffIndex; } DTD; typedef struct open_internal_entity { const char *internalEventPtr; const char *internalEventEndPtr; struct open_internal_entity *next; ENTITY *entity; int startTagLevel; XML_Bool betweenDecl; /* WFC: PE Between Declarations */ } OPEN_INTERNAL_ENTITY; typedef enum XML_Error PTRCALL Processor(XML_Parser parser, const char *start, const char *end, const char **endPtr); static Processor prologProcessor; static Processor prologInitProcessor; static Processor contentProcessor; static Processor cdataSectionProcessor; #ifdef XML_DTD static Processor ignoreSectionProcessor; static Processor externalParEntProcessor; static Processor externalParEntInitProcessor; static Processor entityValueProcessor; static Processor entityValueInitProcessor; #endif /* XML_DTD */ static Processor epilogProcessor; static Processor errorProcessor; static Processor externalEntityInitProcessor; static Processor externalEntityInitProcessor2; static Processor externalEntityInitProcessor3; static Processor externalEntityContentProcessor; static Processor internalEntityProcessor; static enum XML_Error handleUnknownEncoding(XML_Parser parser, const XML_Char *encodingName); static enum XML_Error processXmlDecl(XML_Parser parser, int isGeneralTextEntity, const char *s, const char *next); static enum XML_Error initializeEncoding(XML_Parser parser); static enum XML_Error doProlog(XML_Parser parser, const ENCODING *enc, const char *s, const char *end, int tok, const char *next, const char **nextPtr, XML_Bool haveMore, XML_Bool allowClosingDoctype); static enum XML_Error processInternalEntity(XML_Parser parser, ENTITY *entity, XML_Bool betweenDecl); static enum XML_Error doContent(XML_Parser parser, int startTagLevel, const ENCODING *enc, const char *start, const char *end, const char **endPtr, XML_Bool haveMore); static enum XML_Error doCdataSection(XML_Parser parser, const ENCODING *, const char **startPtr, const char *end, const char **nextPtr, XML_Bool haveMore); #ifdef XML_DTD static enum XML_Error doIgnoreSection(XML_Parser parser, const ENCODING *, const char **startPtr, const char *end, const char **nextPtr, XML_Bool haveMore); #endif /* XML_DTD */ static void freeBindings(XML_Parser parser, BINDING *bindings); static enum XML_Error storeAtts(XML_Parser parser, const ENCODING *, const char *s, TAG_NAME *tagNamePtr, BINDING **bindingsPtr); static enum XML_Error addBinding(XML_Parser parser, PREFIX *prefix, const ATTRIBUTE_ID *attId, const XML_Char *uri, BINDING **bindingsPtr); static int defineAttribute(ELEMENT_TYPE *type, ATTRIBUTE_ID *, XML_Bool isCdata, XML_Bool isId, const XML_Char *dfltValue, XML_Parser parser); static enum XML_Error storeAttributeValue(XML_Parser parser, const ENCODING *, XML_Bool isCdata, const char *, const char *, STRING_POOL *); static enum XML_Error appendAttributeValue(XML_Parser parser, const ENCODING *, XML_Bool isCdata, const char *, const char *, STRING_POOL *); static ATTRIBUTE_ID *getAttributeId(XML_Parser parser, const ENCODING *enc, const char *start, const char *end); static int setElementTypePrefix(XML_Parser parser, ELEMENT_TYPE *); static enum XML_Error storeEntityValue(XML_Parser parser, const ENCODING *enc, const char *start, const char *end); static int reportProcessingInstruction(XML_Parser parser, const ENCODING *enc, const char *start, const char *end); static int reportComment(XML_Parser parser, const ENCODING *enc, const char *start, const char *end); static void reportDefault(XML_Parser parser, const ENCODING *enc, const char *start, const char *end); static const XML_Char *getContext(XML_Parser parser); static XML_Bool setContext(XML_Parser parser, const XML_Char *context); static void FASTCALL normalizePublicId(XML_Char *s); static DTD *dtdCreate(const XML_Memory_Handling_Suite *ms); /* do not call if m_parentParser != NULL */ static void dtdReset(DTD *p, const XML_Memory_Handling_Suite *ms); static void dtdDestroy(DTD *p, XML_Bool isDocEntity, const XML_Memory_Handling_Suite *ms); static int dtdCopy(XML_Parser oldParser, DTD *newDtd, const DTD *oldDtd, const XML_Memory_Handling_Suite *ms); static int copyEntityTable(XML_Parser oldParser, HASH_TABLE *, STRING_POOL *, const HASH_TABLE *); static NAMED *lookup(XML_Parser parser, HASH_TABLE *table, KEY name, size_t createSize); static void FASTCALL hashTableInit(HASH_TABLE *, const XML_Memory_Handling_Suite *ms); static void FASTCALL hashTableClear(HASH_TABLE *); static void FASTCALL hashTableDestroy(HASH_TABLE *); static void FASTCALL hashTableIterInit(HASH_TABLE_ITER *, const HASH_TABLE *); static NAMED *FASTCALL hashTableIterNext(HASH_TABLE_ITER *); static void FASTCALL poolInit(STRING_POOL *, const XML_Memory_Handling_Suite *ms); static void FASTCALL poolClear(STRING_POOL *); static void FASTCALL poolDestroy(STRING_POOL *); static XML_Char *poolAppend(STRING_POOL *pool, const ENCODING *enc, const char *ptr, const char *end); static XML_Char *poolStoreString(STRING_POOL *pool, const ENCODING *enc, const char *ptr, const char *end); static XML_Bool FASTCALL poolGrow(STRING_POOL *pool); static const XML_Char *FASTCALL poolCopyString(STRING_POOL *pool, const XML_Char *s); static const XML_Char *poolCopyStringN(STRING_POOL *pool, const XML_Char *s, int n); static const XML_Char *FASTCALL poolAppendString(STRING_POOL *pool, const XML_Char *s); static int FASTCALL nextScaffoldPart(XML_Parser parser); static XML_Content *build_model(XML_Parser parser); static ELEMENT_TYPE *getElementType(XML_Parser parser, const ENCODING *enc, const char *ptr, const char *end); static XML_Char *copyString(const XML_Char *s, const XML_Memory_Handling_Suite *memsuite); static unsigned long generate_hash_secret_salt(XML_Parser parser); static XML_Bool startParsing(XML_Parser parser); static XML_Parser parserCreate(const XML_Char *encodingName, const XML_Memory_Handling_Suite *memsuite, const XML_Char *nameSep, DTD *dtd); static void parserInit(XML_Parser parser, const XML_Char *encodingName); #define poolStart(pool) ((pool)->start) #define poolEnd(pool) ((pool)->ptr) #define poolLength(pool) ((pool)->ptr - (pool)->start) #define poolChop(pool) ((void)--(pool->ptr)) #define poolLastChar(pool) (((pool)->ptr)[-1]) #define poolDiscard(pool) ((pool)->ptr = (pool)->start) #define poolFinish(pool) ((pool)->start = (pool)->ptr) #define poolAppendChar(pool, c) \ (((pool)->ptr == (pool)->end && ! poolGrow(pool)) \ ? 0 \ : ((*((pool)->ptr)++ = c), 1)) struct XML_ParserStruct { /* The first member must be m_userData so that the XML_GetUserData macro works. */ void *m_userData; void *m_handlerArg; char *m_buffer; const XML_Memory_Handling_Suite m_mem; /* first character to be parsed */ const char *m_bufferPtr; /* past last character to be parsed */ char *m_bufferEnd; /* allocated end of m_buffer */ const char *m_bufferLim; XML_Index m_parseEndByteIndex; const char *m_parseEndPtr; XML_Char *m_dataBuf; XML_Char *m_dataBufEnd; XML_StartElementHandler m_startElementHandler; XML_EndElementHandler m_endElementHandler; XML_CharacterDataHandler m_characterDataHandler; XML_ProcessingInstructionHandler m_processingInstructionHandler; XML_CommentHandler m_commentHandler; XML_StartCdataSectionHandler m_startCdataSectionHandler; XML_EndCdataSectionHandler m_endCdataSectionHandler; XML_DefaultHandler m_defaultHandler; XML_StartDoctypeDeclHandler m_startDoctypeDeclHandler; XML_EndDoctypeDeclHandler m_endDoctypeDeclHandler; XML_UnparsedEntityDeclHandler m_unparsedEntityDeclHandler; XML_NotationDeclHandler m_notationDeclHandler; XML_StartNamespaceDeclHandler m_startNamespaceDeclHandler; XML_EndNamespaceDeclHandler m_endNamespaceDeclHandler; XML_NotStandaloneHandler m_notStandaloneHandler; XML_ExternalEntityRefHandler m_externalEntityRefHandler; XML_Parser m_externalEntityRefHandlerArg; XML_SkippedEntityHandler m_skippedEntityHandler; XML_UnknownEncodingHandler m_unknownEncodingHandler; XML_ElementDeclHandler m_elementDeclHandler; XML_AttlistDeclHandler m_attlistDeclHandler; XML_EntityDeclHandler m_entityDeclHandler; XML_XmlDeclHandler m_xmlDeclHandler; const ENCODING *m_encoding; INIT_ENCODING m_initEncoding; const ENCODING *m_internalEncoding; const XML_Char *m_protocolEncodingName; XML_Bool m_ns; XML_Bool m_ns_triplets; void *m_unknownEncodingMem; void *m_unknownEncodingData; void *m_unknownEncodingHandlerData; void(XMLCALL *m_unknownEncodingRelease)(void *); PROLOG_STATE m_prologState; Processor *m_processor; enum XML_Error m_errorCode; const char *m_eventPtr; const char *m_eventEndPtr; const char *m_positionPtr; OPEN_INTERNAL_ENTITY *m_openInternalEntities; OPEN_INTERNAL_ENTITY *m_freeInternalEntities; XML_Bool m_defaultExpandInternalEntities; int m_tagLevel; ENTITY *m_declEntity; const XML_Char *m_doctypeName; const XML_Char *m_doctypeSysid; const XML_Char *m_doctypePubid; const XML_Char *m_declAttributeType; const XML_Char *m_declNotationName; const XML_Char *m_declNotationPublicId; ELEMENT_TYPE *m_declElementType; ATTRIBUTE_ID *m_declAttributeId; XML_Bool m_declAttributeIsCdata; XML_Bool m_declAttributeIsId; DTD *m_dtd; const XML_Char *m_curBase; TAG *m_tagStack; TAG *m_freeTagList; BINDING *m_inheritedBindings; BINDING *m_freeBindingList; int m_attsSize; int m_nSpecifiedAtts; int m_idAttIndex; ATTRIBUTE *m_atts; NS_ATT *m_nsAtts; unsigned long m_nsAttsVersion; unsigned char m_nsAttsPower; #ifdef XML_ATTR_INFO XML_AttrInfo *m_attInfo; #endif POSITION m_position; STRING_POOL m_tempPool; STRING_POOL m_temp2Pool; char *m_groupConnector; unsigned int m_groupSize; XML_Char m_namespaceSeparator; XML_Parser m_parentParser; XML_ParsingStatus m_parsingStatus; #ifdef XML_DTD XML_Bool m_isParamEntity; XML_Bool m_useForeignDTD; enum XML_ParamEntityParsing m_paramEntityParsing; #endif unsigned long m_hash_secret_salt; }; #define MALLOC(parser, s) (parser->m_mem.malloc_fcn((s))) #define REALLOC(parser, p, s) (parser->m_mem.realloc_fcn((p), (s))) #define FREE(parser, p) (parser->m_mem.free_fcn((p))) XML_Parser XMLCALL XML_ParserCreate(const XML_Char *encodingName) { return XML_ParserCreate_MM(encodingName, NULL, NULL); } XML_Parser XMLCALL XML_ParserCreateNS(const XML_Char *encodingName, XML_Char nsSep) { XML_Char tmp[2]; *tmp = nsSep; return XML_ParserCreate_MM(encodingName, NULL, tmp); } static const XML_Char implicitContext[] = {ASCII_x, ASCII_m, ASCII_l, ASCII_EQUALS, ASCII_h, ASCII_t, ASCII_t, ASCII_p, ASCII_COLON, ASCII_SLASH, ASCII_SLASH, ASCII_w, ASCII_w, ASCII_w, ASCII_PERIOD, ASCII_w, ASCII_3, ASCII_PERIOD, ASCII_o, ASCII_r, ASCII_g, ASCII_SLASH, ASCII_X, ASCII_M, ASCII_L, ASCII_SLASH, ASCII_1, ASCII_9, ASCII_9, ASCII_8, ASCII_SLASH, ASCII_n, ASCII_a, ASCII_m, ASCII_e, ASCII_s, ASCII_p, ASCII_a, ASCII_c, ASCII_e, '\0'}; /* To avoid warnings about unused functions: */ #if ! defined(HAVE_ARC4RANDOM_BUF) && ! defined(HAVE_ARC4RANDOM) # if defined(HAVE_GETRANDOM) || defined(HAVE_SYSCALL_GETRANDOM) /* Obtain entropy on Linux 3.17+ */ static int writeRandomBytes_getrandom_nonblock(void *target, size_t count) { int success = 0; /* full count bytes written? */ size_t bytesWrittenTotal = 0; const unsigned int getrandomFlags = GRND_NONBLOCK; do { void *const currentTarget = (void *)((char *)target + bytesWrittenTotal); const size_t bytesToWrite = count - bytesWrittenTotal; const int bytesWrittenMore = # if defined(HAVE_GETRANDOM) getrandom(currentTarget, bytesToWrite, getrandomFlags); # else syscall(SYS_getrandom, currentTarget, bytesToWrite, getrandomFlags); # endif if (bytesWrittenMore > 0) { bytesWrittenTotal += bytesWrittenMore; if (bytesWrittenTotal >= count) success = 1; } } while (! success && (errno == EINTR)); return success; } # endif /* defined(HAVE_GETRANDOM) || defined(HAVE_SYSCALL_GETRANDOM) */ # if ! defined(_WIN32) && defined(XML_DEV_URANDOM) /* Extract entropy from /dev/urandom */ static int writeRandomBytes_dev_urandom(void *target, size_t count) { int success = 0; /* full count bytes written? */ size_t bytesWrittenTotal = 0; const int fd = open("/dev/urandom", O_RDONLY); if (fd < 0) { return 0; } do { void *const currentTarget = (void *)((char *)target + bytesWrittenTotal); const size_t bytesToWrite = count - bytesWrittenTotal; const ssize_t bytesWrittenMore = read(fd, currentTarget, bytesToWrite); if (bytesWrittenMore > 0) { bytesWrittenTotal += bytesWrittenMore; if (bytesWrittenTotal >= count) success = 1; } } while (! success && (errno == EINTR)); close(fd); return success; } # endif /* ! defined(_WIN32) && defined(XML_DEV_URANDOM) */ #endif /* ! defined(HAVE_ARC4RANDOM_BUF) && ! defined(HAVE_ARC4RANDOM) */ #if defined(HAVE_ARC4RANDOM) && ! defined(HAVE_ARC4RANDOM_BUF) static void writeRandomBytes_arc4random(void *target, size_t count) { size_t bytesWrittenTotal = 0; while (bytesWrittenTotal < count) { const uint32_t random32 = arc4random(); size_t i = 0; for (; (i < sizeof(random32)) && (bytesWrittenTotal < count); i++, bytesWrittenTotal++) { const uint8_t random8 = (uint8_t)(random32 >> (i * 8)); ((uint8_t *)target)[bytesWrittenTotal] = random8; } } } #endif /* defined(HAVE_ARC4RANDOM) && ! defined(HAVE_ARC4RANDOM_BUF) */ #ifdef _WIN32 /* Obtain entropy on Windows using the rand_s() function which * generates cryptographically secure random numbers. Internally it * uses RtlGenRandom API which is present in Windows XP and later. */ static int writeRandomBytes_rand_s(void *target, size_t count) { size_t bytesWrittenTotal = 0; while (bytesWrittenTotal < count) { unsigned int random32 = 0; size_t i = 0; if (rand_s(&random32)) return 0; /* failure */ for (; (i < sizeof(random32)) && (bytesWrittenTotal < count); i++, bytesWrittenTotal++) { const uint8_t random8 = (uint8_t)(random32 >> (i * 8)); ((uint8_t *)target)[bytesWrittenTotal] = random8; } } return 1; /* success */ } #endif /* _WIN32 */ #if ! defined(HAVE_ARC4RANDOM_BUF) && ! defined(HAVE_ARC4RANDOM) static unsigned long gather_time_entropy(void) { # ifdef _WIN32 FILETIME ft; GetSystemTimeAsFileTime(&ft); /* never fails */ return ft.dwHighDateTime ^ ft.dwLowDateTime; # else struct timeval tv; int gettimeofday_res; gettimeofday_res = gettimeofday(&tv, NULL); # if defined(NDEBUG) (void)gettimeofday_res; # else assert(gettimeofday_res == 0); # endif /* defined(NDEBUG) */ /* Microseconds time is <20 bits entropy */ return tv.tv_usec; # endif } #endif /* ! defined(HAVE_ARC4RANDOM_BUF) && ! defined(HAVE_ARC4RANDOM) */ static unsigned long ENTROPY_DEBUG(const char *label, unsigned long entropy) { const char *const EXPAT_ENTROPY_DEBUG = getenv("EXPAT_ENTROPY_DEBUG"); if (EXPAT_ENTROPY_DEBUG && ! strcmp(EXPAT_ENTROPY_DEBUG, "1")) { fprintf(stderr, "Entropy: %s --> 0x%0*lx (%lu bytes)\n", label, (int)sizeof(entropy) * 2, entropy, (unsigned long)sizeof(entropy)); } return entropy; } static unsigned long generate_hash_secret_salt(XML_Parser parser) { unsigned long entropy; (void)parser; /* "Failproof" high quality providers: */ #if defined(HAVE_ARC4RANDOM_BUF) arc4random_buf(&entropy, sizeof(entropy)); return ENTROPY_DEBUG("arc4random_buf", entropy); #elif defined(HAVE_ARC4RANDOM) writeRandomBytes_arc4random((void *)&entropy, sizeof(entropy)); return ENTROPY_DEBUG("arc4random", entropy); #else /* Try high quality providers first .. */ # ifdef _WIN32 if (writeRandomBytes_rand_s((void *)&entropy, sizeof(entropy))) { return ENTROPY_DEBUG("rand_s", entropy); } # elif defined(HAVE_GETRANDOM) || defined(HAVE_SYSCALL_GETRANDOM) if (writeRandomBytes_getrandom_nonblock((void *)&entropy, sizeof(entropy))) { return ENTROPY_DEBUG("getrandom", entropy); } # endif # if ! defined(_WIN32) && defined(XML_DEV_URANDOM) if (writeRandomBytes_dev_urandom((void *)&entropy, sizeof(entropy))) { return ENTROPY_DEBUG("/dev/urandom", entropy); } # endif /* ! defined(_WIN32) && defined(XML_DEV_URANDOM) */ /* .. and self-made low quality for backup: */ /* Process ID is 0 bits entropy if attacker has local access */ entropy = gather_time_entropy() ^ getpid(); /* Factors are 2^31-1 and 2^61-1 (Mersenne primes M31 and M61) */ if (sizeof(unsigned long) == 4) { return ENTROPY_DEBUG("fallback(4)", entropy * 2147483647); } else { return ENTROPY_DEBUG("fallback(8)", entropy * (unsigned long)2305843009213693951ULL); } #endif } static unsigned long get_hash_secret_salt(XML_Parser parser) { if (parser->m_parentParser != NULL) return get_hash_secret_salt(parser->m_parentParser); return parser->m_hash_secret_salt; } static XML_Bool /* only valid for root parser */ startParsing(XML_Parser parser) { /* hash functions must be initialized before setContext() is called */ if (parser->m_hash_secret_salt == 0) parser->m_hash_secret_salt = generate_hash_secret_salt(parser); if (parser->m_ns) { /* implicit context only set for root parser, since child parsers (i.e. external entity parsers) will inherit it */ return setContext(parser, implicitContext); } return XML_TRUE; } XML_Parser XMLCALL XML_ParserCreate_MM(const XML_Char *encodingName, const XML_Memory_Handling_Suite *memsuite, const XML_Char *nameSep) { return parserCreate(encodingName, memsuite, nameSep, NULL); } static XML_Parser parserCreate(const XML_Char *encodingName, const XML_Memory_Handling_Suite *memsuite, const XML_Char *nameSep, DTD *dtd) { XML_Parser parser; if (memsuite) { XML_Memory_Handling_Suite *mtemp; parser = (XML_Parser)memsuite->malloc_fcn(sizeof(struct XML_ParserStruct)); if (parser != NULL) { mtemp = (XML_Memory_Handling_Suite *)&(parser->m_mem); mtemp->malloc_fcn = memsuite->malloc_fcn; mtemp->realloc_fcn = memsuite->realloc_fcn; mtemp->free_fcn = memsuite->free_fcn; } } else { XML_Memory_Handling_Suite *mtemp; parser = (XML_Parser)malloc(sizeof(struct XML_ParserStruct)); if (parser != NULL) { mtemp = (XML_Memory_Handling_Suite *)&(parser->m_mem); mtemp->malloc_fcn = malloc; mtemp->realloc_fcn = realloc; mtemp->free_fcn = free; } } if (! parser) return parser; parser->m_buffer = NULL; parser->m_bufferLim = NULL; parser->m_attsSize = INIT_ATTS_SIZE; parser->m_atts = (ATTRIBUTE *)MALLOC(parser, parser->m_attsSize * sizeof(ATTRIBUTE)); if (parser->m_atts == NULL) { FREE(parser, parser); return NULL; } #ifdef XML_ATTR_INFO parser->m_attInfo = (XML_AttrInfo *)MALLOC( parser, parser->m_attsSize * sizeof(XML_AttrInfo)); if (parser->m_attInfo == NULL) { FREE(parser, parser->m_atts); FREE(parser, parser); return NULL; } #endif parser->m_dataBuf = (XML_Char *)MALLOC(parser, INIT_DATA_BUF_SIZE * sizeof(XML_Char)); if (parser->m_dataBuf == NULL) { FREE(parser, parser->m_atts); #ifdef XML_ATTR_INFO FREE(parser, parser->m_attInfo); #endif FREE(parser, parser); return NULL; } parser->m_dataBufEnd = parser->m_dataBuf + INIT_DATA_BUF_SIZE; if (dtd) parser->m_dtd = dtd; else { parser->m_dtd = dtdCreate(&parser->m_mem); if (parser->m_dtd == NULL) { FREE(parser, parser->m_dataBuf); FREE(parser, parser->m_atts); #ifdef XML_ATTR_INFO FREE(parser, parser->m_attInfo); #endif FREE(parser, parser); return NULL; } } parser->m_freeBindingList = NULL; parser->m_freeTagList = NULL; parser->m_freeInternalEntities = NULL; parser->m_groupSize = 0; parser->m_groupConnector = NULL; parser->m_unknownEncodingHandler = NULL; parser->m_unknownEncodingHandlerData = NULL; parser->m_namespaceSeparator = ASCII_EXCL; parser->m_ns = XML_FALSE; parser->m_ns_triplets = XML_FALSE; parser->m_nsAtts = NULL; parser->m_nsAttsVersion = 0; parser->m_nsAttsPower = 0; parser->m_protocolEncodingName = NULL; poolInit(&parser->m_tempPool, &(parser->m_mem)); poolInit(&parser->m_temp2Pool, &(parser->m_mem)); parserInit(parser, encodingName); if (encodingName && ! parser->m_protocolEncodingName) { XML_ParserFree(parser); return NULL; } if (nameSep) { parser->m_ns = XML_TRUE; parser->m_internalEncoding = XmlGetInternalEncodingNS(); parser->m_namespaceSeparator = *nameSep; } else { parser->m_internalEncoding = XmlGetInternalEncoding(); } return parser; } static void parserInit(XML_Parser parser, const XML_Char *encodingName) { parser->m_processor = prologInitProcessor; XmlPrologStateInit(&parser->m_prologState); if (encodingName != NULL) { parser->m_protocolEncodingName = copyString(encodingName, &(parser->m_mem)); } parser->m_curBase = NULL; XmlInitEncoding(&parser->m_initEncoding, &parser->m_encoding, 0); parser->m_userData = NULL; parser->m_handlerArg = NULL; parser->m_startElementHandler = NULL; parser->m_endElementHandler = NULL; parser->m_characterDataHandler = NULL; parser->m_processingInstructionHandler = NULL; parser->m_commentHandler = NULL; parser->m_startCdataSectionHandler = NULL; parser->m_endCdataSectionHandler = NULL; parser->m_defaultHandler = NULL; parser->m_startDoctypeDeclHandler = NULL; parser->m_endDoctypeDeclHandler = NULL; parser->m_unparsedEntityDeclHandler = NULL; parser->m_notationDeclHandler = NULL; parser->m_startNamespaceDeclHandler = NULL; parser->m_endNamespaceDeclHandler = NULL; parser->m_notStandaloneHandler = NULL; parser->m_externalEntityRefHandler = NULL; parser->m_externalEntityRefHandlerArg = parser; parser->m_skippedEntityHandler = NULL; parser->m_elementDeclHandler = NULL; parser->m_attlistDeclHandler = NULL; parser->m_entityDeclHandler = NULL; parser->m_xmlDeclHandler = NULL; parser->m_bufferPtr = parser->m_buffer; parser->m_bufferEnd = parser->m_buffer; parser->m_parseEndByteIndex = 0; parser->m_parseEndPtr = NULL; parser->m_declElementType = NULL; parser->m_declAttributeId = NULL; parser->m_declEntity = NULL; parser->m_doctypeName = NULL; parser->m_doctypeSysid = NULL; parser->m_doctypePubid = NULL; parser->m_declAttributeType = NULL; parser->m_declNotationName = NULL; parser->m_declNotationPublicId = NULL; parser->m_declAttributeIsCdata = XML_FALSE; parser->m_declAttributeIsId = XML_FALSE; memset(&parser->m_position, 0, sizeof(POSITION)); parser->m_errorCode = XML_ERROR_NONE; parser->m_eventPtr = NULL; parser->m_eventEndPtr = NULL; parser->m_positionPtr = NULL; parser->m_openInternalEntities = NULL; parser->m_defaultExpandInternalEntities = XML_TRUE; parser->m_tagLevel = 0; parser->m_tagStack = NULL; parser->m_inheritedBindings = NULL; parser->m_nSpecifiedAtts = 0; parser->m_unknownEncodingMem = NULL; parser->m_unknownEncodingRelease = NULL; parser->m_unknownEncodingData = NULL; parser->m_parentParser = NULL; parser->m_parsingStatus.parsing = XML_INITIALIZED; #ifdef XML_DTD parser->m_isParamEntity = XML_FALSE; parser->m_useForeignDTD = XML_FALSE; parser->m_paramEntityParsing = XML_PARAM_ENTITY_PARSING_NEVER; #endif parser->m_hash_secret_salt = 0; } /* moves list of bindings to m_freeBindingList */ static void FASTCALL moveToFreeBindingList(XML_Parser parser, BINDING *bindings) { while (bindings) { BINDING *b = bindings; bindings = bindings->nextTagBinding; b->nextTagBinding = parser->m_freeBindingList; parser->m_freeBindingList = b; } } XML_Bool XMLCALL XML_ParserReset(XML_Parser parser, const XML_Char *encodingName) { TAG *tStk; OPEN_INTERNAL_ENTITY *openEntityList; if (parser == NULL) return XML_FALSE; if (parser->m_parentParser) return XML_FALSE; /* move m_tagStack to m_freeTagList */ tStk = parser->m_tagStack; while (tStk) { TAG *tag = tStk; tStk = tStk->parent; tag->parent = parser->m_freeTagList; moveToFreeBindingList(parser, tag->bindings); tag->bindings = NULL; parser->m_freeTagList = tag; } /* move m_openInternalEntities to m_freeInternalEntities */ openEntityList = parser->m_openInternalEntities; while (openEntityList) { OPEN_INTERNAL_ENTITY *openEntity = openEntityList; openEntityList = openEntity->next; openEntity->next = parser->m_freeInternalEntities; parser->m_freeInternalEntities = openEntity; } moveToFreeBindingList(parser, parser->m_inheritedBindings); FREE(parser, parser->m_unknownEncodingMem); if (parser->m_unknownEncodingRelease) parser->m_unknownEncodingRelease(parser->m_unknownEncodingData); poolClear(&parser->m_tempPool); poolClear(&parser->m_temp2Pool); FREE(parser, (void *)parser->m_protocolEncodingName); parser->m_protocolEncodingName = NULL; parserInit(parser, encodingName); dtdReset(parser->m_dtd, &parser->m_mem); return XML_TRUE; } enum XML_Status XMLCALL XML_SetEncoding(XML_Parser parser, const XML_Char *encodingName) { if (parser == NULL) return XML_STATUS_ERROR; /* Block after XML_Parse()/XML_ParseBuffer() has been called. XXX There's no way for the caller to determine which of the XXX possible error cases caused the XML_STATUS_ERROR return. */ if (parser->m_parsingStatus.parsing == XML_PARSING || parser->m_parsingStatus.parsing == XML_SUSPENDED) return XML_STATUS_ERROR; /* Get rid of any previous encoding name */ FREE(parser, (void *)parser->m_protocolEncodingName); if (encodingName == NULL) /* No new encoding name */ parser->m_protocolEncodingName = NULL; else { /* Copy the new encoding name into allocated memory */ parser->m_protocolEncodingName = copyString(encodingName, &(parser->m_mem)); if (! parser->m_protocolEncodingName) return XML_STATUS_ERROR; } return XML_STATUS_OK; } XML_Parser XMLCALL XML_ExternalEntityParserCreate(XML_Parser oldParser, const XML_Char *context, const XML_Char *encodingName) { XML_Parser parser = oldParser; DTD *newDtd = NULL; DTD *oldDtd; XML_StartElementHandler oldStartElementHandler; XML_EndElementHandler oldEndElementHandler; XML_CharacterDataHandler oldCharacterDataHandler; XML_ProcessingInstructionHandler oldProcessingInstructionHandler; XML_CommentHandler oldCommentHandler; XML_StartCdataSectionHandler oldStartCdataSectionHandler; XML_EndCdataSectionHandler oldEndCdataSectionHandler; XML_DefaultHandler oldDefaultHandler; XML_UnparsedEntityDeclHandler oldUnparsedEntityDeclHandler; XML_NotationDeclHandler oldNotationDeclHandler; XML_StartNamespaceDeclHandler oldStartNamespaceDeclHandler; XML_EndNamespaceDeclHandler oldEndNamespaceDeclHandler; XML_NotStandaloneHandler oldNotStandaloneHandler; XML_ExternalEntityRefHandler oldExternalEntityRefHandler; XML_SkippedEntityHandler oldSkippedEntityHandler; XML_UnknownEncodingHandler oldUnknownEncodingHandler; XML_ElementDeclHandler oldElementDeclHandler; XML_AttlistDeclHandler oldAttlistDeclHandler; XML_EntityDeclHandler oldEntityDeclHandler; XML_XmlDeclHandler oldXmlDeclHandler; ELEMENT_TYPE *oldDeclElementType; void *oldUserData; void *oldHandlerArg; XML_Bool oldDefaultExpandInternalEntities; XML_Parser oldExternalEntityRefHandlerArg; #ifdef XML_DTD enum XML_ParamEntityParsing oldParamEntityParsing; int oldInEntityValue; #endif XML_Bool oldns_triplets; /* Note that the new parser shares the same hash secret as the old parser, so that dtdCopy and copyEntityTable can lookup values from hash tables associated with either parser without us having to worry which hash secrets each table has. */ unsigned long oldhash_secret_salt; /* Validate the oldParser parameter before we pull everything out of it */ if (oldParser == NULL) return NULL; /* Stash the original parser contents on the stack */ oldDtd = parser->m_dtd; oldStartElementHandler = parser->m_startElementHandler; oldEndElementHandler = parser->m_endElementHandler; oldCharacterDataHandler = parser->m_characterDataHandler; oldProcessingInstructionHandler = parser->m_processingInstructionHandler; oldCommentHandler = parser->m_commentHandler; oldStartCdataSectionHandler = parser->m_startCdataSectionHandler; oldEndCdataSectionHandler = parser->m_endCdataSectionHandler; oldDefaultHandler = parser->m_defaultHandler; oldUnparsedEntityDeclHandler = parser->m_unparsedEntityDeclHandler; oldNotationDeclHandler = parser->m_notationDeclHandler; oldStartNamespaceDeclHandler = parser->m_startNamespaceDeclHandler; oldEndNamespaceDeclHandler = parser->m_endNamespaceDeclHandler; oldNotStandaloneHandler = parser->m_notStandaloneHandler; oldExternalEntityRefHandler = parser->m_externalEntityRefHandler; oldSkippedEntityHandler = parser->m_skippedEntityHandler; oldUnknownEncodingHandler = parser->m_unknownEncodingHandler; oldElementDeclHandler = parser->m_elementDeclHandler; oldAttlistDeclHandler = parser->m_attlistDeclHandler; oldEntityDeclHandler = parser->m_entityDeclHandler; oldXmlDeclHandler = parser->m_xmlDeclHandler; oldDeclElementType = parser->m_declElementType; oldUserData = parser->m_userData; oldHandlerArg = parser->m_handlerArg; oldDefaultExpandInternalEntities = parser->m_defaultExpandInternalEntities; oldExternalEntityRefHandlerArg = parser->m_externalEntityRefHandlerArg; #ifdef XML_DTD oldParamEntityParsing = parser->m_paramEntityParsing; oldInEntityValue = parser->m_prologState.inEntityValue; #endif oldns_triplets = parser->m_ns_triplets; /* Note that the new parser shares the same hash secret as the old parser, so that dtdCopy and copyEntityTable can lookup values from hash tables associated with either parser without us having to worry which hash secrets each table has. */ oldhash_secret_salt = parser->m_hash_secret_salt; #ifdef XML_DTD if (! context) newDtd = oldDtd; #endif /* XML_DTD */ /* Note that the magical uses of the pre-processor to make field access look more like C++ require that `parser' be overwritten here. This makes this function more painful to follow than it would be otherwise. */ if (parser->m_ns) { XML_Char tmp[2]; *tmp = parser->m_namespaceSeparator; parser = parserCreate(encodingName, &parser->m_mem, tmp, newDtd); } else { parser = parserCreate(encodingName, &parser->m_mem, NULL, newDtd); } if (! parser) return NULL; parser->m_startElementHandler = oldStartElementHandler; parser->m_endElementHandler = oldEndElementHandler; parser->m_characterDataHandler = oldCharacterDataHandler; parser->m_processingInstructionHandler = oldProcessingInstructionHandler; parser->m_commentHandler = oldCommentHandler; parser->m_startCdataSectionHandler = oldStartCdataSectionHandler; parser->m_endCdataSectionHandler = oldEndCdataSectionHandler; parser->m_defaultHandler = oldDefaultHandler; parser->m_unparsedEntityDeclHandler = oldUnparsedEntityDeclHandler; parser->m_notationDeclHandler = oldNotationDeclHandler; parser->m_startNamespaceDeclHandler = oldStartNamespaceDeclHandler; parser->m_endNamespaceDeclHandler = oldEndNamespaceDeclHandler; parser->m_notStandaloneHandler = oldNotStandaloneHandler; parser->m_externalEntityRefHandler = oldExternalEntityRefHandler; parser->m_skippedEntityHandler = oldSkippedEntityHandler; parser->m_unknownEncodingHandler = oldUnknownEncodingHandler; parser->m_elementDeclHandler = oldElementDeclHandler; parser->m_attlistDeclHandler = oldAttlistDeclHandler; parser->m_entityDeclHandler = oldEntityDeclHandler; parser->m_xmlDeclHandler = oldXmlDeclHandler; parser->m_declElementType = oldDeclElementType; parser->m_userData = oldUserData; if (oldUserData == oldHandlerArg) parser->m_handlerArg = parser->m_userData; else parser->m_handlerArg = parser; if (oldExternalEntityRefHandlerArg != oldParser) parser->m_externalEntityRefHandlerArg = oldExternalEntityRefHandlerArg; parser->m_defaultExpandInternalEntities = oldDefaultExpandInternalEntities; parser->m_ns_triplets = oldns_triplets; parser->m_hash_secret_salt = oldhash_secret_salt; parser->m_parentParser = oldParser; #ifdef XML_DTD parser->m_paramEntityParsing = oldParamEntityParsing; parser->m_prologState.inEntityValue = oldInEntityValue; if (context) { #endif /* XML_DTD */ if (! dtdCopy(oldParser, parser->m_dtd, oldDtd, &parser->m_mem) || ! setContext(parser, context)) { XML_ParserFree(parser); return NULL; } parser->m_processor = externalEntityInitProcessor; #ifdef XML_DTD } else { /* The DTD instance referenced by parser->m_dtd is shared between the document's root parser and external PE parsers, therefore one does not need to call setContext. In addition, one also *must* not call setContext, because this would overwrite existing prefix->binding pointers in parser->m_dtd with ones that get destroyed with the external PE parser. This would leave those prefixes with dangling pointers. */ parser->m_isParamEntity = XML_TRUE; XmlPrologStateInitExternalEntity(&parser->m_prologState); parser->m_processor = externalParEntInitProcessor; } #endif /* XML_DTD */ return parser; } static void FASTCALL destroyBindings(BINDING *bindings, XML_Parser parser) { for (;;) { BINDING *b = bindings; if (! b) break; bindings = b->nextTagBinding; FREE(parser, b->uri); FREE(parser, b); } } void XMLCALL XML_ParserFree(XML_Parser parser) { TAG *tagList; OPEN_INTERNAL_ENTITY *entityList; if (parser == NULL) return; /* free m_tagStack and m_freeTagList */ tagList = parser->m_tagStack; for (;;) { TAG *p; if (tagList == NULL) { if (parser->m_freeTagList == NULL) break; tagList = parser->m_freeTagList; parser->m_freeTagList = NULL; } p = tagList; tagList = tagList->parent; FREE(parser, p->buf); destroyBindings(p->bindings, parser); FREE(parser, p); } /* free m_openInternalEntities and m_freeInternalEntities */ entityList = parser->m_openInternalEntities; for (;;) { OPEN_INTERNAL_ENTITY *openEntity; if (entityList == NULL) { if (parser->m_freeInternalEntities == NULL) break; entityList = parser->m_freeInternalEntities; parser->m_freeInternalEntities = NULL; } openEntity = entityList; entityList = entityList->next; FREE(parser, openEntity); } destroyBindings(parser->m_freeBindingList, parser); destroyBindings(parser->m_inheritedBindings, parser); poolDestroy(&parser->m_tempPool); poolDestroy(&parser->m_temp2Pool); FREE(parser, (void *)parser->m_protocolEncodingName); #ifdef XML_DTD /* external parameter entity parsers share the DTD structure parser->m_dtd with the root parser, so we must not destroy it */ if (! parser->m_isParamEntity && parser->m_dtd) #else if (parser->m_dtd) #endif /* XML_DTD */ dtdDestroy(parser->m_dtd, (XML_Bool)! parser->m_parentParser, &parser->m_mem); FREE(parser, (void *)parser->m_atts); #ifdef XML_ATTR_INFO FREE(parser, (void *)parser->m_attInfo); #endif FREE(parser, parser->m_groupConnector); FREE(parser, parser->m_buffer); FREE(parser, parser->m_dataBuf); FREE(parser, parser->m_nsAtts); FREE(parser, parser->m_unknownEncodingMem); if (parser->m_unknownEncodingRelease) parser->m_unknownEncodingRelease(parser->m_unknownEncodingData); FREE(parser, parser); } void XMLCALL XML_UseParserAsHandlerArg(XML_Parser parser) { if (parser != NULL) parser->m_handlerArg = parser; } enum XML_Error XMLCALL XML_UseForeignDTD(XML_Parser parser, XML_Bool useDTD) { if (parser == NULL) return XML_ERROR_INVALID_ARGUMENT; #ifdef XML_DTD /* block after XML_Parse()/XML_ParseBuffer() has been called */ if (parser->m_parsingStatus.parsing == XML_PARSING || parser->m_parsingStatus.parsing == XML_SUSPENDED) return XML_ERROR_CANT_CHANGE_FEATURE_ONCE_PARSING; parser->m_useForeignDTD = useDTD; return XML_ERROR_NONE; #else return XML_ERROR_FEATURE_REQUIRES_XML_DTD; #endif } void XMLCALL XML_SetReturnNSTriplet(XML_Parser parser, int do_nst) { if (parser == NULL) return; /* block after XML_Parse()/XML_ParseBuffer() has been called */ if (parser->m_parsingStatus.parsing == XML_PARSING || parser->m_parsingStatus.parsing == XML_SUSPENDED) return; parser->m_ns_triplets = do_nst ? XML_TRUE : XML_FALSE; } void XMLCALL XML_SetUserData(XML_Parser parser, void *p) { if (parser == NULL) return; if (parser->m_handlerArg == parser->m_userData) parser->m_handlerArg = parser->m_userData = p; else parser->m_userData = p; } enum XML_Status XMLCALL XML_SetBase(XML_Parser parser, const XML_Char *p) { if (parser == NULL) return XML_STATUS_ERROR; if (p) { p = poolCopyString(&parser->m_dtd->pool, p); if (! p) return XML_STATUS_ERROR; parser->m_curBase = p; } else parser->m_curBase = NULL; return XML_STATUS_OK; } const XML_Char *XMLCALL XML_GetBase(XML_Parser parser) { if (parser == NULL) return NULL; return parser->m_curBase; } int XMLCALL XML_GetSpecifiedAttributeCount(XML_Parser parser) { if (parser == NULL) return -1; return parser->m_nSpecifiedAtts; } int XMLCALL XML_GetIdAttributeIndex(XML_Parser parser) { if (parser == NULL) return -1; return parser->m_idAttIndex; } #ifdef XML_ATTR_INFO const XML_AttrInfo *XMLCALL XML_GetAttributeInfo(XML_Parser parser) { if (parser == NULL) return NULL; return parser->m_attInfo; } #endif void XMLCALL XML_SetElementHandler(XML_Parser parser, XML_StartElementHandler start, XML_EndElementHandler end) { if (parser == NULL) return; parser->m_startElementHandler = start; parser->m_endElementHandler = end; } void XMLCALL XML_SetStartElementHandler(XML_Parser parser, XML_StartElementHandler start) { if (parser != NULL) parser->m_startElementHandler = start; } void XMLCALL XML_SetEndElementHandler(XML_Parser parser, XML_EndElementHandler end) { if (parser != NULL) parser->m_endElementHandler = end; } void XMLCALL XML_SetCharacterDataHandler(XML_Parser parser, XML_CharacterDataHandler handler) { if (parser != NULL) parser->m_characterDataHandler = handler; } void XMLCALL XML_SetProcessingInstructionHandler(XML_Parser parser, XML_ProcessingInstructionHandler handler) { if (parser != NULL) parser->m_processingInstructionHandler = handler; } void XMLCALL XML_SetCommentHandler(XML_Parser parser, XML_CommentHandler handler) { if (parser != NULL) parser->m_commentHandler = handler; } void XMLCALL XML_SetCdataSectionHandler(XML_Parser parser, XML_StartCdataSectionHandler start, XML_EndCdataSectionHandler end) { if (parser == NULL) return; parser->m_startCdataSectionHandler = start; parser->m_endCdataSectionHandler = end; } void XMLCALL XML_SetStartCdataSectionHandler(XML_Parser parser, XML_StartCdataSectionHandler start) { if (parser != NULL) parser->m_startCdataSectionHandler = start; } void XMLCALL XML_SetEndCdataSectionHandler(XML_Parser parser, XML_EndCdataSectionHandler end) { if (parser != NULL) parser->m_endCdataSectionHandler = end; } void XMLCALL XML_SetDefaultHandler(XML_Parser parser, XML_DefaultHandler handler) { if (parser == NULL) return; parser->m_defaultHandler = handler; parser->m_defaultExpandInternalEntities = XML_FALSE; } void XMLCALL XML_SetDefaultHandlerExpand(XML_Parser parser, XML_DefaultHandler handler) { if (parser == NULL) return; parser->m_defaultHandler = handler; parser->m_defaultExpandInternalEntities = XML_TRUE; } void XMLCALL XML_SetDoctypeDeclHandler(XML_Parser parser, XML_StartDoctypeDeclHandler start, XML_EndDoctypeDeclHandler end) { if (parser == NULL) return; parser->m_startDoctypeDeclHandler = start; parser->m_endDoctypeDeclHandler = end; } void XMLCALL XML_SetStartDoctypeDeclHandler(XML_Parser parser, XML_StartDoctypeDeclHandler start) { if (parser != NULL) parser->m_startDoctypeDeclHandler = start; } void XMLCALL XML_SetEndDoctypeDeclHandler(XML_Parser parser, XML_EndDoctypeDeclHandler end) { if (parser != NULL) parser->m_endDoctypeDeclHandler = end; } void XMLCALL XML_SetUnparsedEntityDeclHandler(XML_Parser parser, XML_UnparsedEntityDeclHandler handler) { if (parser != NULL) parser->m_unparsedEntityDeclHandler = handler; } void XMLCALL XML_SetNotationDeclHandler(XML_Parser parser, XML_NotationDeclHandler handler) { if (parser != NULL) parser->m_notationDeclHandler = handler; } void XMLCALL XML_SetNamespaceDeclHandler(XML_Parser parser, XML_StartNamespaceDeclHandler start, XML_EndNamespaceDeclHandler end) { if (parser == NULL) return; parser->m_startNamespaceDeclHandler = start; parser->m_endNamespaceDeclHandler = end; } void XMLCALL XML_SetStartNamespaceDeclHandler(XML_Parser parser, XML_StartNamespaceDeclHandler start) { if (parser != NULL) parser->m_startNamespaceDeclHandler = start; } void XMLCALL XML_SetEndNamespaceDeclHandler(XML_Parser parser, XML_EndNamespaceDeclHandler end) { if (parser != NULL) parser->m_endNamespaceDeclHandler = end; } void XMLCALL XML_SetNotStandaloneHandler(XML_Parser parser, XML_NotStandaloneHandler handler) { if (parser != NULL) parser->m_notStandaloneHandler = handler; } void XMLCALL XML_SetExternalEntityRefHandler(XML_Parser parser, XML_ExternalEntityRefHandler handler) { if (parser != NULL) parser->m_externalEntityRefHandler = handler; } void XMLCALL XML_SetExternalEntityRefHandlerArg(XML_Parser parser, void *arg) { if (parser == NULL) return; if (arg) parser->m_externalEntityRefHandlerArg = (XML_Parser)arg; else parser->m_externalEntityRefHandlerArg = parser; } void XMLCALL XML_SetSkippedEntityHandler(XML_Parser parser, XML_SkippedEntityHandler handler) { if (parser != NULL) parser->m_skippedEntityHandler = handler; } void XMLCALL XML_SetUnknownEncodingHandler(XML_Parser parser, XML_UnknownEncodingHandler handler, void *data) { if (parser == NULL) return; parser->m_unknownEncodingHandler = handler; parser->m_unknownEncodingHandlerData = data; } void XMLCALL XML_SetElementDeclHandler(XML_Parser parser, XML_ElementDeclHandler eldecl) { if (parser != NULL) parser->m_elementDeclHandler = eldecl; } void XMLCALL XML_SetAttlistDeclHandler(XML_Parser parser, XML_AttlistDeclHandler attdecl) { if (parser != NULL) parser->m_attlistDeclHandler = attdecl; } void XMLCALL XML_SetEntityDeclHandler(XML_Parser parser, XML_EntityDeclHandler handler) { if (parser != NULL) parser->m_entityDeclHandler = handler; } void XMLCALL XML_SetXmlDeclHandler(XML_Parser parser, XML_XmlDeclHandler handler) { if (parser != NULL) parser->m_xmlDeclHandler = handler; } int XMLCALL XML_SetParamEntityParsing(XML_Parser parser, enum XML_ParamEntityParsing peParsing) { if (parser == NULL) return 0; /* block after XML_Parse()/XML_ParseBuffer() has been called */ if (parser->m_parsingStatus.parsing == XML_PARSING || parser->m_parsingStatus.parsing == XML_SUSPENDED) return 0; #ifdef XML_DTD parser->m_paramEntityParsing = peParsing; return 1; #else return peParsing == XML_PARAM_ENTITY_PARSING_NEVER; #endif } int XMLCALL XML_SetHashSalt(XML_Parser parser, unsigned long hash_salt) { if (parser == NULL) return 0; if (parser->m_parentParser) return XML_SetHashSalt(parser->m_parentParser, hash_salt); /* block after XML_Parse()/XML_ParseBuffer() has been called */ if (parser->m_parsingStatus.parsing == XML_PARSING || parser->m_parsingStatus.parsing == XML_SUSPENDED) return 0; parser->m_hash_secret_salt = hash_salt; return 1; } enum XML_Status XMLCALL XML_Parse(XML_Parser parser, const char *s, int len, int isFinal) { if ((parser == NULL) || (len < 0) || ((s == NULL) && (len != 0))) { if (parser != NULL) parser->m_errorCode = XML_ERROR_INVALID_ARGUMENT; return XML_STATUS_ERROR; } switch (parser->m_parsingStatus.parsing) { case XML_SUSPENDED: parser->m_errorCode = XML_ERROR_SUSPENDED; return XML_STATUS_ERROR; case XML_FINISHED: parser->m_errorCode = XML_ERROR_FINISHED; return XML_STATUS_ERROR; case XML_INITIALIZED: if (parser->m_parentParser == NULL && ! startParsing(parser)) { parser->m_errorCode = XML_ERROR_NO_MEMORY; return XML_STATUS_ERROR; } /* fall through */ default: parser->m_parsingStatus.parsing = XML_PARSING; } if (len == 0) { parser->m_parsingStatus.finalBuffer = (XML_Bool)isFinal; if (! isFinal) return XML_STATUS_OK; parser->m_positionPtr = parser->m_bufferPtr; parser->m_parseEndPtr = parser->m_bufferEnd; /* If data are left over from last buffer, and we now know that these data are the final chunk of input, then we have to check them again to detect errors based on that fact. */ parser->m_errorCode = parser->m_processor(parser, parser->m_bufferPtr, parser->m_parseEndPtr, &parser->m_bufferPtr); if (parser->m_errorCode == XML_ERROR_NONE) { switch (parser->m_parsingStatus.parsing) { case XML_SUSPENDED: /* It is hard to be certain, but it seems that this case * cannot occur. This code is cleaning up a previous parse * with no new data (since len == 0). Changing the parsing * state requires getting to execute a handler function, and * there doesn't seem to be an opportunity for that while in * this circumstance. * * Given the uncertainty, we retain the code but exclude it * from coverage tests. * * LCOV_EXCL_START */ XmlUpdatePosition(parser->m_encoding, parser->m_positionPtr, parser->m_bufferPtr, &parser->m_position); parser->m_positionPtr = parser->m_bufferPtr; return XML_STATUS_SUSPENDED; /* LCOV_EXCL_STOP */ case XML_INITIALIZED: case XML_PARSING: parser->m_parsingStatus.parsing = XML_FINISHED; /* fall through */ default: return XML_STATUS_OK; } } parser->m_eventEndPtr = parser->m_eventPtr; parser->m_processor = errorProcessor; return XML_STATUS_ERROR; } #ifndef XML_CONTEXT_BYTES else if (parser->m_bufferPtr == parser->m_bufferEnd) { const char *end; int nLeftOver; enum XML_Status result; /* Detect overflow (a+b > MAX <==> b > MAX-a) */ if (len > ((XML_Size)-1) / 2 - parser->m_parseEndByteIndex) { parser->m_errorCode = XML_ERROR_NO_MEMORY; parser->m_eventPtr = parser->m_eventEndPtr = NULL; parser->m_processor = errorProcessor; return XML_STATUS_ERROR; } parser->m_parseEndByteIndex += len; parser->m_positionPtr = s; parser->m_parsingStatus.finalBuffer = (XML_Bool)isFinal; parser->m_errorCode = parser->m_processor(parser, s, parser->m_parseEndPtr = s + len, &end); if (parser->m_errorCode != XML_ERROR_NONE) { parser->m_eventEndPtr = parser->m_eventPtr; parser->m_processor = errorProcessor; return XML_STATUS_ERROR; } else { switch (parser->m_parsingStatus.parsing) { case XML_SUSPENDED: result = XML_STATUS_SUSPENDED; break; case XML_INITIALIZED: case XML_PARSING: if (isFinal) { parser->m_parsingStatus.parsing = XML_FINISHED; return XML_STATUS_OK; } /* fall through */ default: result = XML_STATUS_OK; } } XmlUpdatePosition(parser->m_encoding, parser->m_positionPtr, end, &parser->m_position); nLeftOver = s + len - end; if (nLeftOver) { if (parser->m_buffer == NULL || nLeftOver > parser->m_bufferLim - parser->m_buffer) { /* avoid _signed_ integer overflow */ char *temp = NULL; const int bytesToAllocate = (int)((unsigned)len * 2U); if (bytesToAllocate > 0) { temp = (char *)REALLOC(parser, parser->m_buffer, bytesToAllocate); } if (temp == NULL) { parser->m_errorCode = XML_ERROR_NO_MEMORY; parser->m_eventPtr = parser->m_eventEndPtr = NULL; parser->m_processor = errorProcessor; return XML_STATUS_ERROR; } parser->m_buffer = temp; parser->m_bufferLim = parser->m_buffer + bytesToAllocate; } memcpy(parser->m_buffer, end, nLeftOver); } parser->m_bufferPtr = parser->m_buffer; parser->m_bufferEnd = parser->m_buffer + nLeftOver; parser->m_positionPtr = parser->m_bufferPtr; parser->m_parseEndPtr = parser->m_bufferEnd; parser->m_eventPtr = parser->m_bufferPtr; parser->m_eventEndPtr = parser->m_bufferPtr; return result; } #endif /* not defined XML_CONTEXT_BYTES */ else { void *buff = XML_GetBuffer(parser, len); if (buff == NULL) return XML_STATUS_ERROR; else { memcpy(buff, s, len); return XML_ParseBuffer(parser, len, isFinal); } } } enum XML_Status XMLCALL XML_ParseBuffer(XML_Parser parser, int len, int isFinal) { const char *start; enum XML_Status result = XML_STATUS_OK; if (parser == NULL) return XML_STATUS_ERROR; switch (parser->m_parsingStatus.parsing) { case XML_SUSPENDED: parser->m_errorCode = XML_ERROR_SUSPENDED; return XML_STATUS_ERROR; case XML_FINISHED: parser->m_errorCode = XML_ERROR_FINISHED; return XML_STATUS_ERROR; case XML_INITIALIZED: if (parser->m_parentParser == NULL && ! startParsing(parser)) { parser->m_errorCode = XML_ERROR_NO_MEMORY; return XML_STATUS_ERROR; } /* fall through */ default: parser->m_parsingStatus.parsing = XML_PARSING; } start = parser->m_bufferPtr; parser->m_positionPtr = start; parser->m_bufferEnd += len; parser->m_parseEndPtr = parser->m_bufferEnd; parser->m_parseEndByteIndex += len; parser->m_parsingStatus.finalBuffer = (XML_Bool)isFinal; parser->m_errorCode = parser->m_processor( parser, start, parser->m_parseEndPtr, &parser->m_bufferPtr); if (parser->m_errorCode != XML_ERROR_NONE) { parser->m_eventEndPtr = parser->m_eventPtr; parser->m_processor = errorProcessor; return XML_STATUS_ERROR; } else { switch (parser->m_parsingStatus.parsing) { case XML_SUSPENDED: result = XML_STATUS_SUSPENDED; break; case XML_INITIALIZED: case XML_PARSING: if (isFinal) { parser->m_parsingStatus.parsing = XML_FINISHED; return result; } default:; /* should not happen */ } } XmlUpdatePosition(parser->m_encoding, parser->m_positionPtr, parser->m_bufferPtr, &parser->m_position); parser->m_positionPtr = parser->m_bufferPtr; return result; } void *XMLCALL XML_GetBuffer(XML_Parser parser, int len) { if (parser == NULL) return NULL; if (len < 0) { parser->m_errorCode = XML_ERROR_NO_MEMORY; return NULL; } switch (parser->m_parsingStatus.parsing) { case XML_SUSPENDED: parser->m_errorCode = XML_ERROR_SUSPENDED; return NULL; case XML_FINISHED: parser->m_errorCode = XML_ERROR_FINISHED; return NULL; default:; } if (len > EXPAT_SAFE_PTR_DIFF(parser->m_bufferLim, parser->m_bufferEnd)) { #ifdef XML_CONTEXT_BYTES int keep; #endif /* defined XML_CONTEXT_BYTES */ /* Do not invoke signed arithmetic overflow: */ int neededSize = (int)((unsigned)len + (unsigned)EXPAT_SAFE_PTR_DIFF( parser->m_bufferEnd, parser->m_bufferPtr)); if (neededSize < 0) { parser->m_errorCode = XML_ERROR_NO_MEMORY; return NULL; } #ifdef XML_CONTEXT_BYTES keep = (int)EXPAT_SAFE_PTR_DIFF(parser->m_bufferPtr, parser->m_buffer); if (keep > XML_CONTEXT_BYTES) keep = XML_CONTEXT_BYTES; neededSize += keep; #endif /* defined XML_CONTEXT_BYTES */ if (neededSize <= EXPAT_SAFE_PTR_DIFF(parser->m_bufferLim, parser->m_buffer)) { #ifdef XML_CONTEXT_BYTES if (keep < EXPAT_SAFE_PTR_DIFF(parser->m_bufferPtr, parser->m_buffer)) { int offset = (int)EXPAT_SAFE_PTR_DIFF(parser->m_bufferPtr, parser->m_buffer) - keep; /* The buffer pointers cannot be NULL here; we have at least some bytes * in the buffer */ memmove(parser->m_buffer, &parser->m_buffer[offset], parser->m_bufferEnd - parser->m_bufferPtr + keep); parser->m_bufferEnd -= offset; parser->m_bufferPtr -= offset; } #else if (parser->m_buffer && parser->m_bufferPtr) { memmove(parser->m_buffer, parser->m_bufferPtr, EXPAT_SAFE_PTR_DIFF(parser->m_bufferEnd, parser->m_bufferPtr)); parser->m_bufferEnd = parser->m_buffer + EXPAT_SAFE_PTR_DIFF(parser->m_bufferEnd, parser->m_bufferPtr); parser->m_bufferPtr = parser->m_buffer; } #endif /* not defined XML_CONTEXT_BYTES */ } else { char *newBuf; int bufferSize = (int)EXPAT_SAFE_PTR_DIFF(parser->m_bufferLim, parser->m_bufferPtr); if (bufferSize == 0) bufferSize = INIT_BUFFER_SIZE; do { /* Do not invoke signed arithmetic overflow: */ bufferSize = (int)(2U * (unsigned)bufferSize); } while (bufferSize < neededSize && bufferSize > 0); if (bufferSize <= 0) { parser->m_errorCode = XML_ERROR_NO_MEMORY; return NULL; } newBuf = (char *)MALLOC(parser, bufferSize); if (newBuf == 0) { parser->m_errorCode = XML_ERROR_NO_MEMORY; return NULL; } parser->m_bufferLim = newBuf + bufferSize; #ifdef XML_CONTEXT_BYTES if (parser->m_bufferPtr) { memcpy(newBuf, &parser->m_bufferPtr[-keep], EXPAT_SAFE_PTR_DIFF(parser->m_bufferEnd, parser->m_bufferPtr) + keep); FREE(parser, parser->m_buffer); parser->m_buffer = newBuf; parser->m_bufferEnd = parser->m_buffer + EXPAT_SAFE_PTR_DIFF(parser->m_bufferEnd, parser->m_bufferPtr) + keep; parser->m_bufferPtr = parser->m_buffer + keep; } else { /* This must be a brand new buffer with no data in it yet */ parser->m_bufferEnd = newBuf; parser->m_bufferPtr = parser->m_buffer = newBuf; } #else if (parser->m_bufferPtr) { memcpy(newBuf, parser->m_bufferPtr, EXPAT_SAFE_PTR_DIFF(parser->m_bufferEnd, parser->m_bufferPtr)); FREE(parser, parser->m_buffer); parser->m_bufferEnd = newBuf + EXPAT_SAFE_PTR_DIFF(parser->m_bufferEnd, parser->m_bufferPtr); } else { /* This must be a brand new buffer with no data in it yet */ parser->m_bufferEnd = newBuf; } parser->m_bufferPtr = parser->m_buffer = newBuf; #endif /* not defined XML_CONTEXT_BYTES */ } parser->m_eventPtr = parser->m_eventEndPtr = NULL; parser->m_positionPtr = NULL; } return parser->m_bufferEnd; } enum XML_Status XMLCALL XML_StopParser(XML_Parser parser, XML_Bool resumable) { if (parser == NULL) return XML_STATUS_ERROR; switch (parser->m_parsingStatus.parsing) { case XML_SUSPENDED: if (resumable) { parser->m_errorCode = XML_ERROR_SUSPENDED; return XML_STATUS_ERROR; } parser->m_parsingStatus.parsing = XML_FINISHED; break; case XML_FINISHED: parser->m_errorCode = XML_ERROR_FINISHED; return XML_STATUS_ERROR; default: if (resumable) { #ifdef XML_DTD if (parser->m_isParamEntity) { parser->m_errorCode = XML_ERROR_SUSPEND_PE; return XML_STATUS_ERROR; } #endif parser->m_parsingStatus.parsing = XML_SUSPENDED; } else parser->m_parsingStatus.parsing = XML_FINISHED; } return XML_STATUS_OK; } enum XML_Status XMLCALL XML_ResumeParser(XML_Parser parser) { enum XML_Status result = XML_STATUS_OK; if (parser == NULL) return XML_STATUS_ERROR; if (parser->m_parsingStatus.parsing != XML_SUSPENDED) { parser->m_errorCode = XML_ERROR_NOT_SUSPENDED; return XML_STATUS_ERROR; } parser->m_parsingStatus.parsing = XML_PARSING; parser->m_errorCode = parser->m_processor( parser, parser->m_bufferPtr, parser->m_parseEndPtr, &parser->m_bufferPtr); if (parser->m_errorCode != XML_ERROR_NONE) { parser->m_eventEndPtr = parser->m_eventPtr; parser->m_processor = errorProcessor; return XML_STATUS_ERROR; } else { switch (parser->m_parsingStatus.parsing) { case XML_SUSPENDED: result = XML_STATUS_SUSPENDED; break; case XML_INITIALIZED: case XML_PARSING: if (parser->m_parsingStatus.finalBuffer) { parser->m_parsingStatus.parsing = XML_FINISHED; return result; } default:; } } XmlUpdatePosition(parser->m_encoding, parser->m_positionPtr, parser->m_bufferPtr, &parser->m_position); parser->m_positionPtr = parser->m_bufferPtr; return result; } void XMLCALL XML_GetParsingStatus(XML_Parser parser, XML_ParsingStatus *status) { if (parser == NULL) return; assert(status != NULL); *status = parser->m_parsingStatus; } enum XML_Error XMLCALL XML_GetErrorCode(XML_Parser parser) { if (parser == NULL) return XML_ERROR_INVALID_ARGUMENT; return parser->m_errorCode; } XML_Index XMLCALL XML_GetCurrentByteIndex(XML_Parser parser) { if (parser == NULL) return -1; if (parser->m_eventPtr) return (XML_Index)(parser->m_parseEndByteIndex - (parser->m_parseEndPtr - parser->m_eventPtr)); return -1; } int XMLCALL XML_GetCurrentByteCount(XML_Parser parser) { if (parser == NULL) return 0; if (parser->m_eventEndPtr && parser->m_eventPtr) return (int)(parser->m_eventEndPtr - parser->m_eventPtr); return 0; } const char *XMLCALL XML_GetInputContext(XML_Parser parser, int *offset, int *size) { #ifdef XML_CONTEXT_BYTES if (parser == NULL) return NULL; if (parser->m_eventPtr && parser->m_buffer) { if (offset != NULL) *offset = (int)(parser->m_eventPtr - parser->m_buffer); if (size != NULL) *size = (int)(parser->m_bufferEnd - parser->m_buffer); return parser->m_buffer; } #else (void)parser; (void)offset; (void)size; #endif /* defined XML_CONTEXT_BYTES */ return (char *)0; } XML_Size XMLCALL XML_GetCurrentLineNumber(XML_Parser parser) { if (parser == NULL) return 0; if (parser->m_eventPtr && parser->m_eventPtr >= parser->m_positionPtr) { XmlUpdatePosition(parser->m_encoding, parser->m_positionPtr, parser->m_eventPtr, &parser->m_position); parser->m_positionPtr = parser->m_eventPtr; } return parser->m_position.lineNumber + 1; } XML_Size XMLCALL XML_GetCurrentColumnNumber(XML_Parser parser) { if (parser == NULL) return 0; if (parser->m_eventPtr && parser->m_eventPtr >= parser->m_positionPtr) { XmlUpdatePosition(parser->m_encoding, parser->m_positionPtr, parser->m_eventPtr, &parser->m_position); parser->m_positionPtr = parser->m_eventPtr; } return parser->m_position.columnNumber; } void XMLCALL XML_FreeContentModel(XML_Parser parser, XML_Content *model) { if (parser != NULL) FREE(parser, model); } void *XMLCALL XML_MemMalloc(XML_Parser parser, size_t size) { if (parser == NULL) return NULL; return MALLOC(parser, size); } void *XMLCALL XML_MemRealloc(XML_Parser parser, void *ptr, size_t size) { if (parser == NULL) return NULL; return REALLOC(parser, ptr, size); } void XMLCALL XML_MemFree(XML_Parser parser, void *ptr) { if (parser != NULL) FREE(parser, ptr); } void XMLCALL XML_DefaultCurrent(XML_Parser parser) { if (parser == NULL) return; if (parser->m_defaultHandler) { if (parser->m_openInternalEntities) reportDefault(parser, parser->m_internalEncoding, parser->m_openInternalEntities->internalEventPtr, parser->m_openInternalEntities->internalEventEndPtr); else reportDefault(parser, parser->m_encoding, parser->m_eventPtr, parser->m_eventEndPtr); } } const XML_LChar *XMLCALL XML_ErrorString(enum XML_Error code) { switch (code) { case XML_ERROR_NONE: return NULL; case XML_ERROR_NO_MEMORY: return XML_L("out of memory"); case XML_ERROR_SYNTAX: return XML_L("syntax error"); case XML_ERROR_NO_ELEMENTS: return XML_L("no element found"); case XML_ERROR_INVALID_TOKEN: return XML_L("not well-formed (invalid token)"); case XML_ERROR_UNCLOSED_TOKEN: return XML_L("unclosed token"); case XML_ERROR_PARTIAL_CHAR: return XML_L("partial character"); case XML_ERROR_TAG_MISMATCH: return XML_L("mismatched tag"); case XML_ERROR_DUPLICATE_ATTRIBUTE: return XML_L("duplicate attribute"); case XML_ERROR_JUNK_AFTER_DOC_ELEMENT: return XML_L("junk after document element"); case XML_ERROR_PARAM_ENTITY_REF: return XML_L("illegal parameter entity reference"); case XML_ERROR_UNDEFINED_ENTITY: return XML_L("undefined entity"); case XML_ERROR_RECURSIVE_ENTITY_REF: return XML_L("recursive entity reference"); case XML_ERROR_ASYNC_ENTITY: return XML_L("asynchronous entity"); case XML_ERROR_BAD_CHAR_REF: return XML_L("reference to invalid character number"); case XML_ERROR_BINARY_ENTITY_REF: return XML_L("reference to binary entity"); case XML_ERROR_ATTRIBUTE_EXTERNAL_ENTITY_REF: return XML_L("reference to external entity in attribute"); case XML_ERROR_MISPLACED_XML_PI: return XML_L("XML or text declaration not at start of entity"); case XML_ERROR_UNKNOWN_ENCODING: return XML_L("unknown encoding"); case XML_ERROR_INCORRECT_ENCODING: return XML_L("encoding specified in XML declaration is incorrect"); case XML_ERROR_UNCLOSED_CDATA_SECTION: return XML_L("unclosed CDATA section"); case XML_ERROR_EXTERNAL_ENTITY_HANDLING: return XML_L("error in processing external entity reference"); case XML_ERROR_NOT_STANDALONE: return XML_L("document is not standalone"); case XML_ERROR_UNEXPECTED_STATE: return XML_L("unexpected parser state - please send a bug report"); case XML_ERROR_ENTITY_DECLARED_IN_PE: return XML_L("entity declared in parameter entity"); case XML_ERROR_FEATURE_REQUIRES_XML_DTD: return XML_L("requested feature requires XML_DTD support in Expat"); case XML_ERROR_CANT_CHANGE_FEATURE_ONCE_PARSING: return XML_L("cannot change setting once parsing has begun"); /* Added in 1.95.7. */ case XML_ERROR_UNBOUND_PREFIX: return XML_L("unbound prefix"); /* Added in 1.95.8. */ case XML_ERROR_UNDECLARING_PREFIX: return XML_L("must not undeclare prefix"); case XML_ERROR_INCOMPLETE_PE: return XML_L("incomplete markup in parameter entity"); case XML_ERROR_XML_DECL: return XML_L("XML declaration not well-formed"); case XML_ERROR_TEXT_DECL: return XML_L("text declaration not well-formed"); case XML_ERROR_PUBLICID: return XML_L("illegal character(s) in public id"); case XML_ERROR_SUSPENDED: return XML_L("parser suspended"); case XML_ERROR_NOT_SUSPENDED: return XML_L("parser not suspended"); case XML_ERROR_ABORTED: return XML_L("parsing aborted"); case XML_ERROR_FINISHED: return XML_L("parsing finished"); case XML_ERROR_SUSPEND_PE: return XML_L("cannot suspend in external parameter entity"); /* Added in 2.0.0. */ case XML_ERROR_RESERVED_PREFIX_XML: return XML_L( "reserved prefix (xml) must not be undeclared or bound to another namespace name"); case XML_ERROR_RESERVED_PREFIX_XMLNS: return XML_L("reserved prefix (xmlns) must not be declared or undeclared"); case XML_ERROR_RESERVED_NAMESPACE_URI: return XML_L( "prefix must not be bound to one of the reserved namespace names"); /* Added in 2.2.5. */ case XML_ERROR_INVALID_ARGUMENT: /* Constant added in 2.2.1, already */ return XML_L("invalid argument"); } return NULL; } const XML_LChar *XMLCALL XML_ExpatVersion(void) { /* V1 is used to string-ize the version number. However, it would string-ize the actual version macro *names* unless we get them substituted before being passed to V1. CPP is defined to expand a macro, then rescan for more expansions. Thus, we use V2 to expand the version macros, then CPP will expand the resulting V1() macro with the correct numerals. */ /* ### I'm assuming cpp is portable in this respect... */ #define V1(a, b, c) XML_L(#a) XML_L(".") XML_L(#b) XML_L(".") XML_L(#c) #define V2(a, b, c) XML_L("expat_") V1(a, b, c) return V2(XML_MAJOR_VERSION, XML_MINOR_VERSION, XML_MICRO_VERSION); #undef V1 #undef V2 } XML_Expat_Version XMLCALL XML_ExpatVersionInfo(void) { XML_Expat_Version version; version.major = XML_MAJOR_VERSION; version.minor = XML_MINOR_VERSION; version.micro = XML_MICRO_VERSION; return version; } const XML_Feature *XMLCALL XML_GetFeatureList(void) { static const XML_Feature features[] = {{XML_FEATURE_SIZEOF_XML_CHAR, XML_L("sizeof(XML_Char)"), sizeof(XML_Char)}, {XML_FEATURE_SIZEOF_XML_LCHAR, XML_L("sizeof(XML_LChar)"), sizeof(XML_LChar)}, #ifdef XML_UNICODE {XML_FEATURE_UNICODE, XML_L("XML_UNICODE"), 0}, #endif #ifdef XML_UNICODE_WCHAR_T {XML_FEATURE_UNICODE_WCHAR_T, XML_L("XML_UNICODE_WCHAR_T"), 0}, #endif #ifdef XML_DTD {XML_FEATURE_DTD, XML_L("XML_DTD"), 0}, #endif #ifdef XML_CONTEXT_BYTES {XML_FEATURE_CONTEXT_BYTES, XML_L("XML_CONTEXT_BYTES"), XML_CONTEXT_BYTES}, #endif #ifdef XML_MIN_SIZE {XML_FEATURE_MIN_SIZE, XML_L("XML_MIN_SIZE"), 0}, #endif #ifdef XML_NS {XML_FEATURE_NS, XML_L("XML_NS"), 0}, #endif #ifdef XML_LARGE_SIZE {XML_FEATURE_LARGE_SIZE, XML_L("XML_LARGE_SIZE"), 0}, #endif #ifdef XML_ATTR_INFO {XML_FEATURE_ATTR_INFO, XML_L("XML_ATTR_INFO"), 0}, #endif {XML_FEATURE_END, NULL, 0}}; return features; } /* Initially tag->rawName always points into the parse buffer; for those TAG instances opened while the current parse buffer was processed, and not yet closed, we need to store tag->rawName in a more permanent location, since the parse buffer is about to be discarded. */ static XML_Bool storeRawNames(XML_Parser parser) { TAG *tag = parser->m_tagStack; while (tag) { int bufSize; int nameLen = sizeof(XML_Char) * (tag->name.strLen + 1); char *rawNameBuf = tag->buf + nameLen; /* Stop if already stored. Since m_tagStack is a stack, we can stop at the first entry that has already been copied; everything below it in the stack is already been accounted for in a previous call to this function. */ if (tag->rawName == rawNameBuf) break; /* For re-use purposes we need to ensure that the size of tag->buf is a multiple of sizeof(XML_Char). */ bufSize = nameLen + ROUND_UP(tag->rawNameLength, sizeof(XML_Char)); if (bufSize > tag->bufEnd - tag->buf) { char *temp = (char *)REALLOC(parser, tag->buf, bufSize); if (temp == NULL) return XML_FALSE; /* if tag->name.str points to tag->buf (only when namespace processing is off) then we have to update it */ if (tag->name.str == (XML_Char *)tag->buf) tag->name.str = (XML_Char *)temp; /* if tag->name.localPart is set (when namespace processing is on) then update it as well, since it will always point into tag->buf */ if (tag->name.localPart) tag->name.localPart = (XML_Char *)temp + (tag->name.localPart - (XML_Char *)tag->buf); tag->buf = temp; tag->bufEnd = temp + bufSize; rawNameBuf = temp + nameLen; } memcpy(rawNameBuf, tag->rawName, tag->rawNameLength); tag->rawName = rawNameBuf; tag = tag->parent; } return XML_TRUE; } static enum XML_Error PTRCALL contentProcessor(XML_Parser parser, const char *start, const char *end, const char **endPtr) { enum XML_Error result = doContent(parser, 0, parser->m_encoding, start, end, endPtr, (XML_Bool)! parser->m_parsingStatus.finalBuffer); if (result == XML_ERROR_NONE) { if (! storeRawNames(parser)) return XML_ERROR_NO_MEMORY; } return result; } static enum XML_Error PTRCALL externalEntityInitProcessor(XML_Parser parser, const char *start, const char *end, const char **endPtr) { enum XML_Error result = initializeEncoding(parser); if (result != XML_ERROR_NONE) return result; parser->m_processor = externalEntityInitProcessor2; return externalEntityInitProcessor2(parser, start, end, endPtr); } static enum XML_Error PTRCALL externalEntityInitProcessor2(XML_Parser parser, const char *start, const char *end, const char **endPtr) { const char *next = start; /* XmlContentTok doesn't always set the last arg */ int tok = XmlContentTok(parser->m_encoding, start, end, &next); switch (tok) { case XML_TOK_BOM: /* If we are at the end of the buffer, this would cause the next stage, i.e. externalEntityInitProcessor3, to pass control directly to doContent (by detecting XML_TOK_NONE) without processing any xml text declaration - causing the error XML_ERROR_MISPLACED_XML_PI in doContent. */ if (next == end && ! parser->m_parsingStatus.finalBuffer) { *endPtr = next; return XML_ERROR_NONE; } start = next; break; case XML_TOK_PARTIAL: if (! parser->m_parsingStatus.finalBuffer) { *endPtr = start; return XML_ERROR_NONE; } parser->m_eventPtr = start; return XML_ERROR_UNCLOSED_TOKEN; case XML_TOK_PARTIAL_CHAR: if (! parser->m_parsingStatus.finalBuffer) { *endPtr = start; return XML_ERROR_NONE; } parser->m_eventPtr = start; return XML_ERROR_PARTIAL_CHAR; } parser->m_processor = externalEntityInitProcessor3; return externalEntityInitProcessor3(parser, start, end, endPtr); } static enum XML_Error PTRCALL externalEntityInitProcessor3(XML_Parser parser, const char *start, const char *end, const char **endPtr) { int tok; const char *next = start; /* XmlContentTok doesn't always set the last arg */ parser->m_eventPtr = start; tok = XmlContentTok(parser->m_encoding, start, end, &next); parser->m_eventEndPtr = next; switch (tok) { case XML_TOK_XML_DECL: { enum XML_Error result; result = processXmlDecl(parser, 1, start, next); if (result != XML_ERROR_NONE) return result; switch (parser->m_parsingStatus.parsing) { case XML_SUSPENDED: *endPtr = next; return XML_ERROR_NONE; case XML_FINISHED: return XML_ERROR_ABORTED; default: start = next; } } break; case XML_TOK_PARTIAL: if (! parser->m_parsingStatus.finalBuffer) { *endPtr = start; return XML_ERROR_NONE; } return XML_ERROR_UNCLOSED_TOKEN; case XML_TOK_PARTIAL_CHAR: if (! parser->m_parsingStatus.finalBuffer) { *endPtr = start; return XML_ERROR_NONE; } return XML_ERROR_PARTIAL_CHAR; } parser->m_processor = externalEntityContentProcessor; parser->m_tagLevel = 1; return externalEntityContentProcessor(parser, start, end, endPtr); } static enum XML_Error PTRCALL externalEntityContentProcessor(XML_Parser parser, const char *start, const char *end, const char **endPtr) { enum XML_Error result = doContent(parser, 1, parser->m_encoding, start, end, endPtr, (XML_Bool)! parser->m_parsingStatus.finalBuffer); if (result == XML_ERROR_NONE) { if (! storeRawNames(parser)) return XML_ERROR_NO_MEMORY; } return result; } static enum XML_Error doContent(XML_Parser parser, int startTagLevel, const ENCODING *enc, const char *s, const char *end, const char **nextPtr, XML_Bool haveMore) { /* save one level of indirection */ DTD *const dtd = parser->m_dtd; const char **eventPP; const char **eventEndPP; if (enc == parser->m_encoding) { eventPP = &parser->m_eventPtr; eventEndPP = &parser->m_eventEndPtr; } else { eventPP = &(parser->m_openInternalEntities->internalEventPtr); eventEndPP = &(parser->m_openInternalEntities->internalEventEndPtr); } *eventPP = s; for (;;) { const char *next = s; /* XmlContentTok doesn't always set the last arg */ int tok = XmlContentTok(enc, s, end, &next); *eventEndPP = next; switch (tok) { case XML_TOK_TRAILING_CR: if (haveMore) { *nextPtr = s; return XML_ERROR_NONE; } *eventEndPP = end; if (parser->m_characterDataHandler) { XML_Char c = 0xA; parser->m_characterDataHandler(parser->m_handlerArg, &c, 1); } else if (parser->m_defaultHandler) reportDefault(parser, enc, s, end); /* We are at the end of the final buffer, should we check for XML_SUSPENDED, XML_FINISHED? */ if (startTagLevel == 0) return XML_ERROR_NO_ELEMENTS; if (parser->m_tagLevel != startTagLevel) return XML_ERROR_ASYNC_ENTITY; *nextPtr = end; return XML_ERROR_NONE; case XML_TOK_NONE: if (haveMore) { *nextPtr = s; return XML_ERROR_NONE; } if (startTagLevel > 0) { if (parser->m_tagLevel != startTagLevel) return XML_ERROR_ASYNC_ENTITY; *nextPtr = s; return XML_ERROR_NONE; } return XML_ERROR_NO_ELEMENTS; case XML_TOK_INVALID: *eventPP = next; return XML_ERROR_INVALID_TOKEN; case XML_TOK_PARTIAL: if (haveMore) { *nextPtr = s; return XML_ERROR_NONE; } return XML_ERROR_UNCLOSED_TOKEN; case XML_TOK_PARTIAL_CHAR: if (haveMore) { *nextPtr = s; return XML_ERROR_NONE; } return XML_ERROR_PARTIAL_CHAR; case XML_TOK_ENTITY_REF: { const XML_Char *name; ENTITY *entity; XML_Char ch = (XML_Char)XmlPredefinedEntityName( enc, s + enc->minBytesPerChar, next - enc->minBytesPerChar); if (ch) { if (parser->m_characterDataHandler) parser->m_characterDataHandler(parser->m_handlerArg, &ch, 1); else if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); break; } name = poolStoreString(&dtd->pool, enc, s + enc->minBytesPerChar, next - enc->minBytesPerChar); if (! name) return XML_ERROR_NO_MEMORY; entity = (ENTITY *)lookup(parser, &dtd->generalEntities, name, 0); poolDiscard(&dtd->pool); /* First, determine if a check for an existing declaration is needed; if yes, check that the entity exists, and that it is internal, otherwise call the skipped entity or default handler. */ if (! dtd->hasParamEntityRefs || dtd->standalone) { if (! entity) return XML_ERROR_UNDEFINED_ENTITY; else if (! entity->is_internal) return XML_ERROR_ENTITY_DECLARED_IN_PE; } else if (! entity) { if (parser->m_skippedEntityHandler) parser->m_skippedEntityHandler(parser->m_handlerArg, name, 0); else if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); break; } if (entity->open) return XML_ERROR_RECURSIVE_ENTITY_REF; if (entity->notation) return XML_ERROR_BINARY_ENTITY_REF; if (entity->textPtr) { enum XML_Error result; if (! parser->m_defaultExpandInternalEntities) { if (parser->m_skippedEntityHandler) parser->m_skippedEntityHandler(parser->m_handlerArg, entity->name, 0); else if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); break; } result = processInternalEntity(parser, entity, XML_FALSE); if (result != XML_ERROR_NONE) return result; } else if (parser->m_externalEntityRefHandler) { const XML_Char *context; entity->open = XML_TRUE; context = getContext(parser); entity->open = XML_FALSE; if (! context) return XML_ERROR_NO_MEMORY; if (! parser->m_externalEntityRefHandler( parser->m_externalEntityRefHandlerArg, context, entity->base, entity->systemId, entity->publicId)) return XML_ERROR_EXTERNAL_ENTITY_HANDLING; poolDiscard(&parser->m_tempPool); } else if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); break; } case XML_TOK_START_TAG_NO_ATTS: /* fall through */ case XML_TOK_START_TAG_WITH_ATTS: { TAG *tag; enum XML_Error result; XML_Char *toPtr; if (parser->m_freeTagList) { tag = parser->m_freeTagList; parser->m_freeTagList = parser->m_freeTagList->parent; } else { tag = (TAG *)MALLOC(parser, sizeof(TAG)); if (! tag) return XML_ERROR_NO_MEMORY; tag->buf = (char *)MALLOC(parser, INIT_TAG_BUF_SIZE); if (! tag->buf) { FREE(parser, tag); return XML_ERROR_NO_MEMORY; } tag->bufEnd = tag->buf + INIT_TAG_BUF_SIZE; } tag->bindings = NULL; tag->parent = parser->m_tagStack; parser->m_tagStack = tag; tag->name.localPart = NULL; tag->name.prefix = NULL; tag->rawName = s + enc->minBytesPerChar; tag->rawNameLength = XmlNameLength(enc, tag->rawName); ++parser->m_tagLevel; { const char *rawNameEnd = tag->rawName + tag->rawNameLength; const char *fromPtr = tag->rawName; toPtr = (XML_Char *)tag->buf; for (;;) { int bufSize; int convLen; const enum XML_Convert_Result convert_res = XmlConvert(enc, &fromPtr, rawNameEnd, (ICHAR **)&toPtr, (ICHAR *)tag->bufEnd - 1); convLen = (int)(toPtr - (XML_Char *)tag->buf); if ((fromPtr >= rawNameEnd) || (convert_res == XML_CONVERT_INPUT_INCOMPLETE)) { tag->name.strLen = convLen; break; } bufSize = (int)(tag->bufEnd - tag->buf) << 1; { char *temp = (char *)REALLOC(parser, tag->buf, bufSize); if (temp == NULL) return XML_ERROR_NO_MEMORY; tag->buf = temp; tag->bufEnd = temp + bufSize; toPtr = (XML_Char *)temp + convLen; } } } tag->name.str = (XML_Char *)tag->buf; *toPtr = XML_T('\0'); result = storeAtts(parser, enc, s, &(tag->name), &(tag->bindings)); if (result) return result; if (parser->m_startElementHandler) parser->m_startElementHandler(parser->m_handlerArg, tag->name.str, (const XML_Char **)parser->m_atts); else if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); poolClear(&parser->m_tempPool); break; } case XML_TOK_EMPTY_ELEMENT_NO_ATTS: /* fall through */ case XML_TOK_EMPTY_ELEMENT_WITH_ATTS: { const char *rawName = s + enc->minBytesPerChar; enum XML_Error result; BINDING *bindings = NULL; XML_Bool noElmHandlers = XML_TRUE; TAG_NAME name; name.str = poolStoreString(&parser->m_tempPool, enc, rawName, rawName + XmlNameLength(enc, rawName)); if (! name.str) return XML_ERROR_NO_MEMORY; poolFinish(&parser->m_tempPool); result = storeAtts(parser, enc, s, &name, &bindings); if (result != XML_ERROR_NONE) { freeBindings(parser, bindings); return result; } poolFinish(&parser->m_tempPool); if (parser->m_startElementHandler) { parser->m_startElementHandler(parser->m_handlerArg, name.str, (const XML_Char **)parser->m_atts); noElmHandlers = XML_FALSE; } if (parser->m_endElementHandler) { if (parser->m_startElementHandler) *eventPP = *eventEndPP; parser->m_endElementHandler(parser->m_handlerArg, name.str); noElmHandlers = XML_FALSE; } if (noElmHandlers && parser->m_defaultHandler) reportDefault(parser, enc, s, next); poolClear(&parser->m_tempPool); freeBindings(parser, bindings); } if ((parser->m_tagLevel == 0) && (parser->m_parsingStatus.parsing != XML_FINISHED)) { if (parser->m_parsingStatus.parsing == XML_SUSPENDED) parser->m_processor = epilogProcessor; else return epilogProcessor(parser, next, end, nextPtr); } break; case XML_TOK_END_TAG: if (parser->m_tagLevel == startTagLevel) return XML_ERROR_ASYNC_ENTITY; else { int len; const char *rawName; TAG *tag = parser->m_tagStack; parser->m_tagStack = tag->parent; tag->parent = parser->m_freeTagList; parser->m_freeTagList = tag; rawName = s + enc->minBytesPerChar * 2; len = XmlNameLength(enc, rawName); if (len != tag->rawNameLength || memcmp(tag->rawName, rawName, len) != 0) { *eventPP = rawName; return XML_ERROR_TAG_MISMATCH; } --parser->m_tagLevel; if (parser->m_endElementHandler) { const XML_Char *localPart; const XML_Char *prefix; XML_Char *uri; localPart = tag->name.localPart; if (parser->m_ns && localPart) { /* localPart and prefix may have been overwritten in tag->name.str, since this points to the binding->uri buffer which gets re-used; so we have to add them again */ uri = (XML_Char *)tag->name.str + tag->name.uriLen; /* don't need to check for space - already done in storeAtts() */ while (*localPart) *uri++ = *localPart++; prefix = (XML_Char *)tag->name.prefix; if (parser->m_ns_triplets && prefix) { *uri++ = parser->m_namespaceSeparator; while (*prefix) *uri++ = *prefix++; } *uri = XML_T('\0'); } parser->m_endElementHandler(parser->m_handlerArg, tag->name.str); } else if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); while (tag->bindings) { BINDING *b = tag->bindings; if (parser->m_endNamespaceDeclHandler) parser->m_endNamespaceDeclHandler(parser->m_handlerArg, b->prefix->name); tag->bindings = tag->bindings->nextTagBinding; b->nextTagBinding = parser->m_freeBindingList; parser->m_freeBindingList = b; b->prefix->binding = b->prevPrefixBinding; } if ((parser->m_tagLevel == 0) && (parser->m_parsingStatus.parsing != XML_FINISHED)) { if (parser->m_parsingStatus.parsing == XML_SUSPENDED) parser->m_processor = epilogProcessor; else return epilogProcessor(parser, next, end, nextPtr); } } break; case XML_TOK_CHAR_REF: { int n = XmlCharRefNumber(enc, s); if (n < 0) return XML_ERROR_BAD_CHAR_REF; if (parser->m_characterDataHandler) { XML_Char buf[XML_ENCODE_MAX]; parser->m_characterDataHandler(parser->m_handlerArg, buf, XmlEncode(n, (ICHAR *)buf)); } else if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); } break; case XML_TOK_XML_DECL: return XML_ERROR_MISPLACED_XML_PI; case XML_TOK_DATA_NEWLINE: if (parser->m_characterDataHandler) { XML_Char c = 0xA; parser->m_characterDataHandler(parser->m_handlerArg, &c, 1); } else if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); break; case XML_TOK_CDATA_SECT_OPEN: { enum XML_Error result; if (parser->m_startCdataSectionHandler) parser->m_startCdataSectionHandler(parser->m_handlerArg); /* BEGIN disabled code */ /* Suppose you doing a transformation on a document that involves changing only the character data. You set up a defaultHandler and a characterDataHandler. The defaultHandler simply copies characters through. The characterDataHandler does the transformation and writes the characters out escaping them as necessary. This case will fail to work if we leave out the following two lines (because & and < inside CDATA sections will be incorrectly escaped). However, now we have a start/endCdataSectionHandler, so it seems easier to let the user deal with this. */ else if (0 && parser->m_characterDataHandler) parser->m_characterDataHandler(parser->m_handlerArg, parser->m_dataBuf, 0); /* END disabled code */ else if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); result = doCdataSection(parser, enc, &next, end, nextPtr, haveMore); if (result != XML_ERROR_NONE) return result; else if (! next) { parser->m_processor = cdataSectionProcessor; return result; } } break; case XML_TOK_TRAILING_RSQB: if (haveMore) { *nextPtr = s; return XML_ERROR_NONE; } if (parser->m_characterDataHandler) { if (MUST_CONVERT(enc, s)) { ICHAR *dataPtr = (ICHAR *)parser->m_dataBuf; XmlConvert(enc, &s, end, &dataPtr, (ICHAR *)parser->m_dataBufEnd); parser->m_characterDataHandler( parser->m_handlerArg, parser->m_dataBuf, (int)(dataPtr - (ICHAR *)parser->m_dataBuf)); } else parser->m_characterDataHandler( parser->m_handlerArg, (XML_Char *)s, (int)((XML_Char *)end - (XML_Char *)s)); } else if (parser->m_defaultHandler) reportDefault(parser, enc, s, end); /* We are at the end of the final buffer, should we check for XML_SUSPENDED, XML_FINISHED? */ if (startTagLevel == 0) { *eventPP = end; return XML_ERROR_NO_ELEMENTS; } if (parser->m_tagLevel != startTagLevel) { *eventPP = end; return XML_ERROR_ASYNC_ENTITY; } *nextPtr = end; return XML_ERROR_NONE; case XML_TOK_DATA_CHARS: { XML_CharacterDataHandler charDataHandler = parser->m_characterDataHandler; if (charDataHandler) { if (MUST_CONVERT(enc, s)) { for (;;) { ICHAR *dataPtr = (ICHAR *)parser->m_dataBuf; const enum XML_Convert_Result convert_res = XmlConvert( enc, &s, next, &dataPtr, (ICHAR *)parser->m_dataBufEnd); *eventEndPP = s; charDataHandler(parser->m_handlerArg, parser->m_dataBuf, (int)(dataPtr - (ICHAR *)parser->m_dataBuf)); if ((convert_res == XML_CONVERT_COMPLETED) || (convert_res == XML_CONVERT_INPUT_INCOMPLETE)) break; *eventPP = s; } } else charDataHandler(parser->m_handlerArg, (XML_Char *)s, (int)((XML_Char *)next - (XML_Char *)s)); } else if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); } break; case XML_TOK_PI: if (! reportProcessingInstruction(parser, enc, s, next)) return XML_ERROR_NO_MEMORY; break; case XML_TOK_COMMENT: if (! reportComment(parser, enc, s, next)) return XML_ERROR_NO_MEMORY; break; default: /* All of the tokens produced by XmlContentTok() have their own * explicit cases, so this default is not strictly necessary. * However it is a useful safety net, so we retain the code and * simply exclude it from the coverage tests. * * LCOV_EXCL_START */ if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); break; /* LCOV_EXCL_STOP */ } *eventPP = s = next; switch (parser->m_parsingStatus.parsing) { case XML_SUSPENDED: *nextPtr = next; return XML_ERROR_NONE; case XML_FINISHED: return XML_ERROR_ABORTED; default:; } } /* not reached */ } /* This function does not call free() on the allocated memory, merely * moving it to the parser's m_freeBindingList where it can be freed or * reused as appropriate. */ static void freeBindings(XML_Parser parser, BINDING *bindings) { while (bindings) { BINDING *b = bindings; /* m_startNamespaceDeclHandler will have been called for this * binding in addBindings(), so call the end handler now. */ if (parser->m_endNamespaceDeclHandler) parser->m_endNamespaceDeclHandler(parser->m_handlerArg, b->prefix->name); bindings = bindings->nextTagBinding; b->nextTagBinding = parser->m_freeBindingList; parser->m_freeBindingList = b; b->prefix->binding = b->prevPrefixBinding; } } /* Precondition: all arguments must be non-NULL; Purpose: - normalize attributes - check attributes for well-formedness - generate namespace aware attribute names (URI, prefix) - build list of attributes for startElementHandler - default attributes - process namespace declarations (check and report them) - generate namespace aware element name (URI, prefix) */ static enum XML_Error storeAtts(XML_Parser parser, const ENCODING *enc, const char *attStr, TAG_NAME *tagNamePtr, BINDING **bindingsPtr) { DTD *const dtd = parser->m_dtd; /* save one level of indirection */ ELEMENT_TYPE *elementType; int nDefaultAtts; const XML_Char **appAtts; /* the attribute list for the application */ int attIndex = 0; int prefixLen; int i; int n; XML_Char *uri; int nPrefixes = 0; BINDING *binding; const XML_Char *localPart; /* lookup the element type name */ elementType = (ELEMENT_TYPE *)lookup(parser, &dtd->elementTypes, tagNamePtr->str, 0); if (! elementType) { const XML_Char *name = poolCopyString(&dtd->pool, tagNamePtr->str); if (! name) return XML_ERROR_NO_MEMORY; elementType = (ELEMENT_TYPE *)lookup(parser, &dtd->elementTypes, name, sizeof(ELEMENT_TYPE)); if (! elementType) return XML_ERROR_NO_MEMORY; if (parser->m_ns && ! setElementTypePrefix(parser, elementType)) return XML_ERROR_NO_MEMORY; } nDefaultAtts = elementType->nDefaultAtts; /* get the attributes from the tokenizer */ n = XmlGetAttributes(enc, attStr, parser->m_attsSize, parser->m_atts); if (n + nDefaultAtts > parser->m_attsSize) { int oldAttsSize = parser->m_attsSize; ATTRIBUTE *temp; #ifdef XML_ATTR_INFO XML_AttrInfo *temp2; #endif parser->m_attsSize = n + nDefaultAtts + INIT_ATTS_SIZE; temp = (ATTRIBUTE *)REALLOC(parser, (void *)parser->m_atts, parser->m_attsSize * sizeof(ATTRIBUTE)); if (temp == NULL) { parser->m_attsSize = oldAttsSize; return XML_ERROR_NO_MEMORY; } parser->m_atts = temp; #ifdef XML_ATTR_INFO temp2 = (XML_AttrInfo *)REALLOC(parser, (void *)parser->m_attInfo, parser->m_attsSize * sizeof(XML_AttrInfo)); if (temp2 == NULL) { parser->m_attsSize = oldAttsSize; return XML_ERROR_NO_MEMORY; } parser->m_attInfo = temp2; #endif if (n > oldAttsSize) XmlGetAttributes(enc, attStr, n, parser->m_atts); } appAtts = (const XML_Char **)parser->m_atts; for (i = 0; i < n; i++) { ATTRIBUTE *currAtt = &parser->m_atts[i]; #ifdef XML_ATTR_INFO XML_AttrInfo *currAttInfo = &parser->m_attInfo[i]; #endif /* add the name and value to the attribute list */ ATTRIBUTE_ID *attId = getAttributeId(parser, enc, currAtt->name, currAtt->name + XmlNameLength(enc, currAtt->name)); if (! attId) return XML_ERROR_NO_MEMORY; #ifdef XML_ATTR_INFO currAttInfo->nameStart = parser->m_parseEndByteIndex - (parser->m_parseEndPtr - currAtt->name); currAttInfo->nameEnd = currAttInfo->nameStart + XmlNameLength(enc, currAtt->name); currAttInfo->valueStart = parser->m_parseEndByteIndex - (parser->m_parseEndPtr - currAtt->valuePtr); currAttInfo->valueEnd = parser->m_parseEndByteIndex - (parser->m_parseEndPtr - currAtt->valueEnd); #endif /* Detect duplicate attributes by their QNames. This does not work when namespace processing is turned on and different prefixes for the same namespace are used. For this case we have a check further down. */ if ((attId->name)[-1]) { if (enc == parser->m_encoding) parser->m_eventPtr = parser->m_atts[i].name; return XML_ERROR_DUPLICATE_ATTRIBUTE; } (attId->name)[-1] = 1; appAtts[attIndex++] = attId->name; if (! parser->m_atts[i].normalized) { enum XML_Error result; XML_Bool isCdata = XML_TRUE; /* figure out whether declared as other than CDATA */ if (attId->maybeTokenized) { int j; for (j = 0; j < nDefaultAtts; j++) { if (attId == elementType->defaultAtts[j].id) { isCdata = elementType->defaultAtts[j].isCdata; break; } } } /* normalize the attribute value */ result = storeAttributeValue( parser, enc, isCdata, parser->m_atts[i].valuePtr, parser->m_atts[i].valueEnd, &parser->m_tempPool); if (result) return result; appAtts[attIndex] = poolStart(&parser->m_tempPool); poolFinish(&parser->m_tempPool); } else { /* the value did not need normalizing */ appAtts[attIndex] = poolStoreString(&parser->m_tempPool, enc, parser->m_atts[i].valuePtr, parser->m_atts[i].valueEnd); if (appAtts[attIndex] == 0) return XML_ERROR_NO_MEMORY; poolFinish(&parser->m_tempPool); } /* handle prefixed attribute names */ if (attId->prefix) { if (attId->xmlns) { /* deal with namespace declarations here */ enum XML_Error result = addBinding(parser, attId->prefix, attId, appAtts[attIndex], bindingsPtr); if (result) return result; --attIndex; } else { /* deal with other prefixed names later */ attIndex++; nPrefixes++; (attId->name)[-1] = 2; } } else attIndex++; } /* set-up for XML_GetSpecifiedAttributeCount and XML_GetIdAttributeIndex */ parser->m_nSpecifiedAtts = attIndex; if (elementType->idAtt && (elementType->idAtt->name)[-1]) { for (i = 0; i < attIndex; i += 2) if (appAtts[i] == elementType->idAtt->name) { parser->m_idAttIndex = i; break; } } else parser->m_idAttIndex = -1; /* do attribute defaulting */ for (i = 0; i < nDefaultAtts; i++) { const DEFAULT_ATTRIBUTE *da = elementType->defaultAtts + i; if (! (da->id->name)[-1] && da->value) { if (da->id->prefix) { if (da->id->xmlns) { enum XML_Error result = addBinding(parser, da->id->prefix, da->id, da->value, bindingsPtr); if (result) return result; } else { (da->id->name)[-1] = 2; nPrefixes++; appAtts[attIndex++] = da->id->name; appAtts[attIndex++] = da->value; } } else { (da->id->name)[-1] = 1; appAtts[attIndex++] = da->id->name; appAtts[attIndex++] = da->value; } } } appAtts[attIndex] = 0; /* expand prefixed attribute names, check for duplicates, and clear flags that say whether attributes were specified */ i = 0; if (nPrefixes) { int j; /* hash table index */ unsigned long version = parser->m_nsAttsVersion; int nsAttsSize = (int)1 << parser->m_nsAttsPower; unsigned char oldNsAttsPower = parser->m_nsAttsPower; /* size of hash table must be at least 2 * (# of prefixed attributes) */ if ((nPrefixes << 1) >> parser->m_nsAttsPower) { /* true for m_nsAttsPower = 0 */ NS_ATT *temp; /* hash table size must also be a power of 2 and >= 8 */ while (nPrefixes >> parser->m_nsAttsPower++) ; if (parser->m_nsAttsPower < 3) parser->m_nsAttsPower = 3; nsAttsSize = (int)1 << parser->m_nsAttsPower; temp = (NS_ATT *)REALLOC(parser, parser->m_nsAtts, nsAttsSize * sizeof(NS_ATT)); if (! temp) { /* Restore actual size of memory in m_nsAtts */ parser->m_nsAttsPower = oldNsAttsPower; return XML_ERROR_NO_MEMORY; } parser->m_nsAtts = temp; version = 0; /* force re-initialization of m_nsAtts hash table */ } /* using a version flag saves us from initializing m_nsAtts every time */ if (! version) { /* initialize version flags when version wraps around */ version = INIT_ATTS_VERSION; for (j = nsAttsSize; j != 0;) parser->m_nsAtts[--j].version = version; } parser->m_nsAttsVersion = --version; /* expand prefixed names and check for duplicates */ for (; i < attIndex; i += 2) { const XML_Char *s = appAtts[i]; if (s[-1] == 2) { /* prefixed */ ATTRIBUTE_ID *id; const BINDING *b; unsigned long uriHash; struct siphash sip_state; struct sipkey sip_key; copy_salt_to_sipkey(parser, &sip_key); sip24_init(&sip_state, &sip_key); ((XML_Char *)s)[-1] = 0; /* clear flag */ id = (ATTRIBUTE_ID *)lookup(parser, &dtd->attributeIds, s, 0); if (! id || ! id->prefix) { /* This code is walking through the appAtts array, dealing * with (in this case) a prefixed attribute name. To be in * the array, the attribute must have already been bound, so * has to have passed through the hash table lookup once * already. That implies that an entry for it already * exists, so the lookup above will return a pointer to * already allocated memory. There is no opportunaity for * the allocator to fail, so the condition above cannot be * fulfilled. * * Since it is difficult to be certain that the above * analysis is complete, we retain the test and merely * remove the code from coverage tests. */ return XML_ERROR_NO_MEMORY; /* LCOV_EXCL_LINE */ } b = id->prefix->binding; if (! b) return XML_ERROR_UNBOUND_PREFIX; for (j = 0; j < b->uriLen; j++) { const XML_Char c = b->uri[j]; if (! poolAppendChar(&parser->m_tempPool, c)) return XML_ERROR_NO_MEMORY; } sip24_update(&sip_state, b->uri, b->uriLen * sizeof(XML_Char)); while (*s++ != XML_T(ASCII_COLON)) ; sip24_update(&sip_state, s, keylen(s) * sizeof(XML_Char)); do { /* copies null terminator */ if (! poolAppendChar(&parser->m_tempPool, *s)) return XML_ERROR_NO_MEMORY; } while (*s++); uriHash = (unsigned long)sip24_final(&sip_state); { /* Check hash table for duplicate of expanded name (uriName). Derived from code in lookup(parser, HASH_TABLE *table, ...). */ unsigned char step = 0; unsigned long mask = nsAttsSize - 1; j = uriHash & mask; /* index into hash table */ while (parser->m_nsAtts[j].version == version) { /* for speed we compare stored hash values first */ if (uriHash == parser->m_nsAtts[j].hash) { const XML_Char *s1 = poolStart(&parser->m_tempPool); const XML_Char *s2 = parser->m_nsAtts[j].uriName; /* s1 is null terminated, but not s2 */ for (; *s1 == *s2 && *s1 != 0; s1++, s2++) ; if (*s1 == 0) return XML_ERROR_DUPLICATE_ATTRIBUTE; } if (! step) step = PROBE_STEP(uriHash, mask, parser->m_nsAttsPower); j < step ? (j += nsAttsSize - step) : (j -= step); } } if (parser->m_ns_triplets) { /* append namespace separator and prefix */ parser->m_tempPool.ptr[-1] = parser->m_namespaceSeparator; s = b->prefix->name; do { if (! poolAppendChar(&parser->m_tempPool, *s)) return XML_ERROR_NO_MEMORY; } while (*s++); } /* store expanded name in attribute list */ s = poolStart(&parser->m_tempPool); poolFinish(&parser->m_tempPool); appAtts[i] = s; /* fill empty slot with new version, uriName and hash value */ parser->m_nsAtts[j].version = version; parser->m_nsAtts[j].hash = uriHash; parser->m_nsAtts[j].uriName = s; if (! --nPrefixes) { i += 2; break; } } else /* not prefixed */ ((XML_Char *)s)[-1] = 0; /* clear flag */ } } /* clear flags for the remaining attributes */ for (; i < attIndex; i += 2) ((XML_Char *)(appAtts[i]))[-1] = 0; for (binding = *bindingsPtr; binding; binding = binding->nextTagBinding) binding->attId->name[-1] = 0; if (! parser->m_ns) return XML_ERROR_NONE; /* expand the element type name */ if (elementType->prefix) { binding = elementType->prefix->binding; if (! binding) return XML_ERROR_UNBOUND_PREFIX; localPart = tagNamePtr->str; while (*localPart++ != XML_T(ASCII_COLON)) ; } else if (dtd->defaultPrefix.binding) { binding = dtd->defaultPrefix.binding; localPart = tagNamePtr->str; } else return XML_ERROR_NONE; prefixLen = 0; if (parser->m_ns_triplets && binding->prefix->name) { for (; binding->prefix->name[prefixLen++];) ; /* prefixLen includes null terminator */ } tagNamePtr->localPart = localPart; tagNamePtr->uriLen = binding->uriLen; tagNamePtr->prefix = binding->prefix->name; tagNamePtr->prefixLen = prefixLen; for (i = 0; localPart[i++];) ; /* i includes null terminator */ n = i + binding->uriLen + prefixLen; if (n > binding->uriAlloc) { TAG *p; uri = (XML_Char *)MALLOC(parser, (n + EXPAND_SPARE) * sizeof(XML_Char)); if (! uri) return XML_ERROR_NO_MEMORY; binding->uriAlloc = n + EXPAND_SPARE; memcpy(uri, binding->uri, binding->uriLen * sizeof(XML_Char)); for (p = parser->m_tagStack; p; p = p->parent) if (p->name.str == binding->uri) p->name.str = uri; FREE(parser, binding->uri); binding->uri = uri; } /* if m_namespaceSeparator != '\0' then uri includes it already */ uri = binding->uri + binding->uriLen; memcpy(uri, localPart, i * sizeof(XML_Char)); /* we always have a namespace separator between localPart and prefix */ if (prefixLen) { uri += i - 1; *uri = parser->m_namespaceSeparator; /* replace null terminator */ memcpy(uri + 1, binding->prefix->name, prefixLen * sizeof(XML_Char)); } tagNamePtr->str = binding->uri; return XML_ERROR_NONE; } /* addBinding() overwrites the value of prefix->binding without checking. Therefore one must keep track of the old value outside of addBinding(). */ static enum XML_Error addBinding(XML_Parser parser, PREFIX *prefix, const ATTRIBUTE_ID *attId, const XML_Char *uri, BINDING **bindingsPtr) { static const XML_Char xmlNamespace[] = {ASCII_h, ASCII_t, ASCII_t, ASCII_p, ASCII_COLON, ASCII_SLASH, ASCII_SLASH, ASCII_w, ASCII_w, ASCII_w, ASCII_PERIOD, ASCII_w, ASCII_3, ASCII_PERIOD, ASCII_o, ASCII_r, ASCII_g, ASCII_SLASH, ASCII_X, ASCII_M, ASCII_L, ASCII_SLASH, ASCII_1, ASCII_9, ASCII_9, ASCII_8, ASCII_SLASH, ASCII_n, ASCII_a, ASCII_m, ASCII_e, ASCII_s, ASCII_p, ASCII_a, ASCII_c, ASCII_e, '\0'}; static const int xmlLen = (int)sizeof(xmlNamespace) / sizeof(XML_Char) - 1; static const XML_Char xmlnsNamespace[] = {ASCII_h, ASCII_t, ASCII_t, ASCII_p, ASCII_COLON, ASCII_SLASH, ASCII_SLASH, ASCII_w, ASCII_w, ASCII_w, ASCII_PERIOD, ASCII_w, ASCII_3, ASCII_PERIOD, ASCII_o, ASCII_r, ASCII_g, ASCII_SLASH, ASCII_2, ASCII_0, ASCII_0, ASCII_0, ASCII_SLASH, ASCII_x, ASCII_m, ASCII_l, ASCII_n, ASCII_s, ASCII_SLASH, '\0'}; static const int xmlnsLen = (int)sizeof(xmlnsNamespace) / sizeof(XML_Char) - 1; XML_Bool mustBeXML = XML_FALSE; XML_Bool isXML = XML_TRUE; XML_Bool isXMLNS = XML_TRUE; BINDING *b; int len; /* empty URI is only valid for default namespace per XML NS 1.0 (not 1.1) */ if (*uri == XML_T('\0') && prefix->name) return XML_ERROR_UNDECLARING_PREFIX; if (prefix->name && prefix->name[0] == XML_T(ASCII_x) && prefix->name[1] == XML_T(ASCII_m) && prefix->name[2] == XML_T(ASCII_l)) { /* Not allowed to bind xmlns */ if (prefix->name[3] == XML_T(ASCII_n) && prefix->name[4] == XML_T(ASCII_s) && prefix->name[5] == XML_T('\0')) return XML_ERROR_RESERVED_PREFIX_XMLNS; if (prefix->name[3] == XML_T('\0')) mustBeXML = XML_TRUE; } for (len = 0; uri[len]; len++) { if (isXML && (len > xmlLen || uri[len] != xmlNamespace[len])) isXML = XML_FALSE; if (! mustBeXML && isXMLNS && (len > xmlnsLen || uri[len] != xmlnsNamespace[len])) isXMLNS = XML_FALSE; } isXML = isXML && len == xmlLen; isXMLNS = isXMLNS && len == xmlnsLen; if (mustBeXML != isXML) return mustBeXML ? XML_ERROR_RESERVED_PREFIX_XML : XML_ERROR_RESERVED_NAMESPACE_URI; if (isXMLNS) return XML_ERROR_RESERVED_NAMESPACE_URI; if (parser->m_namespaceSeparator) len++; if (parser->m_freeBindingList) { b = parser->m_freeBindingList; if (len > b->uriAlloc) { XML_Char *temp = (XML_Char *)REALLOC( parser, b->uri, sizeof(XML_Char) * (len + EXPAND_SPARE)); if (temp == NULL) return XML_ERROR_NO_MEMORY; b->uri = temp; b->uriAlloc = len + EXPAND_SPARE; } parser->m_freeBindingList = b->nextTagBinding; } else { b = (BINDING *)MALLOC(parser, sizeof(BINDING)); if (! b) return XML_ERROR_NO_MEMORY; b->uri = (XML_Char *)MALLOC(parser, sizeof(XML_Char) * (len + EXPAND_SPARE)); if (! b->uri) { FREE(parser, b); return XML_ERROR_NO_MEMORY; } b->uriAlloc = len + EXPAND_SPARE; } b->uriLen = len; memcpy(b->uri, uri, len * sizeof(XML_Char)); if (parser->m_namespaceSeparator) b->uri[len - 1] = parser->m_namespaceSeparator; b->prefix = prefix; b->attId = attId; b->prevPrefixBinding = prefix->binding; /* NULL binding when default namespace undeclared */ if (*uri == XML_T('\0') && prefix == &parser->m_dtd->defaultPrefix) prefix->binding = NULL; else prefix->binding = b; b->nextTagBinding = *bindingsPtr; *bindingsPtr = b; /* if attId == NULL then we are not starting a namespace scope */ if (attId && parser->m_startNamespaceDeclHandler) parser->m_startNamespaceDeclHandler(parser->m_handlerArg, prefix->name, prefix->binding ? uri : 0); return XML_ERROR_NONE; } /* The idea here is to avoid using stack for each CDATA section when the whole file is parsed with one call. */ static enum XML_Error PTRCALL cdataSectionProcessor(XML_Parser parser, const char *start, const char *end, const char **endPtr) { enum XML_Error result = doCdataSection(parser, parser->m_encoding, &start, end, endPtr, (XML_Bool)! parser->m_parsingStatus.finalBuffer); if (result != XML_ERROR_NONE) return result; if (start) { if (parser->m_parentParser) { /* we are parsing an external entity */ parser->m_processor = externalEntityContentProcessor; return externalEntityContentProcessor(parser, start, end, endPtr); } else { parser->m_processor = contentProcessor; return contentProcessor(parser, start, end, endPtr); } } return result; } /* startPtr gets set to non-null if the section is closed, and to null if the section is not yet closed. */ static enum XML_Error doCdataSection(XML_Parser parser, const ENCODING *enc, const char **startPtr, const char *end, const char **nextPtr, XML_Bool haveMore) { const char *s = *startPtr; const char **eventPP; const char **eventEndPP; if (enc == parser->m_encoding) { eventPP = &parser->m_eventPtr; *eventPP = s; eventEndPP = &parser->m_eventEndPtr; } else { eventPP = &(parser->m_openInternalEntities->internalEventPtr); eventEndPP = &(parser->m_openInternalEntities->internalEventEndPtr); } *eventPP = s; *startPtr = NULL; for (;;) { const char *next; int tok = XmlCdataSectionTok(enc, s, end, &next); *eventEndPP = next; switch (tok) { case XML_TOK_CDATA_SECT_CLOSE: if (parser->m_endCdataSectionHandler) parser->m_endCdataSectionHandler(parser->m_handlerArg); /* BEGIN disabled code */ /* see comment under XML_TOK_CDATA_SECT_OPEN */ else if (0 && parser->m_characterDataHandler) parser->m_characterDataHandler(parser->m_handlerArg, parser->m_dataBuf, 0); /* END disabled code */ else if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); *startPtr = next; *nextPtr = next; if (parser->m_parsingStatus.parsing == XML_FINISHED) return XML_ERROR_ABORTED; else return XML_ERROR_NONE; case XML_TOK_DATA_NEWLINE: if (parser->m_characterDataHandler) { XML_Char c = 0xA; parser->m_characterDataHandler(parser->m_handlerArg, &c, 1); } else if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); break; case XML_TOK_DATA_CHARS: { XML_CharacterDataHandler charDataHandler = parser->m_characterDataHandler; if (charDataHandler) { if (MUST_CONVERT(enc, s)) { for (;;) { ICHAR *dataPtr = (ICHAR *)parser->m_dataBuf; const enum XML_Convert_Result convert_res = XmlConvert( enc, &s, next, &dataPtr, (ICHAR *)parser->m_dataBufEnd); *eventEndPP = next; charDataHandler(parser->m_handlerArg, parser->m_dataBuf, (int)(dataPtr - (ICHAR *)parser->m_dataBuf)); if ((convert_res == XML_CONVERT_COMPLETED) || (convert_res == XML_CONVERT_INPUT_INCOMPLETE)) break; *eventPP = s; } } else charDataHandler(parser->m_handlerArg, (XML_Char *)s, (int)((XML_Char *)next - (XML_Char *)s)); } else if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); } break; case XML_TOK_INVALID: *eventPP = next; return XML_ERROR_INVALID_TOKEN; case XML_TOK_PARTIAL_CHAR: if (haveMore) { *nextPtr = s; return XML_ERROR_NONE; } return XML_ERROR_PARTIAL_CHAR; case XML_TOK_PARTIAL: case XML_TOK_NONE: if (haveMore) { *nextPtr = s; return XML_ERROR_NONE; } return XML_ERROR_UNCLOSED_CDATA_SECTION; default: /* Every token returned by XmlCdataSectionTok() has its own * explicit case, so this default case will never be executed. * We retain it as a safety net and exclude it from the coverage * statistics. * * LCOV_EXCL_START */ *eventPP = next; return XML_ERROR_UNEXPECTED_STATE; /* LCOV_EXCL_STOP */ } *eventPP = s = next; switch (parser->m_parsingStatus.parsing) { case XML_SUSPENDED: *nextPtr = next; return XML_ERROR_NONE; case XML_FINISHED: return XML_ERROR_ABORTED; default:; } } /* not reached */ } #ifdef XML_DTD /* The idea here is to avoid using stack for each IGNORE section when the whole file is parsed with one call. */ static enum XML_Error PTRCALL ignoreSectionProcessor(XML_Parser parser, const char *start, const char *end, const char **endPtr) { enum XML_Error result = doIgnoreSection(parser, parser->m_encoding, &start, end, endPtr, (XML_Bool)! parser->m_parsingStatus.finalBuffer); if (result != XML_ERROR_NONE) return result; if (start) { parser->m_processor = prologProcessor; return prologProcessor(parser, start, end, endPtr); } return result; } /* startPtr gets set to non-null is the section is closed, and to null if the section is not yet closed. */ static enum XML_Error doIgnoreSection(XML_Parser parser, const ENCODING *enc, const char **startPtr, const char *end, const char **nextPtr, XML_Bool haveMore) { const char *next; int tok; const char *s = *startPtr; const char **eventPP; const char **eventEndPP; if (enc == parser->m_encoding) { eventPP = &parser->m_eventPtr; *eventPP = s; eventEndPP = &parser->m_eventEndPtr; } else { /* It's not entirely clear, but it seems the following two lines * of code cannot be executed. The only occasions on which 'enc' * is not 'encoding' are when this function is called * from the internal entity processing, and IGNORE sections are an * error in internal entities. * * Since it really isn't clear that this is true, we keep the code * and just remove it from our coverage tests. * * LCOV_EXCL_START */ eventPP = &(parser->m_openInternalEntities->internalEventPtr); eventEndPP = &(parser->m_openInternalEntities->internalEventEndPtr); /* LCOV_EXCL_STOP */ } *eventPP = s; *startPtr = NULL; tok = XmlIgnoreSectionTok(enc, s, end, &next); *eventEndPP = next; switch (tok) { case XML_TOK_IGNORE_SECT: if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); *startPtr = next; *nextPtr = next; if (parser->m_parsingStatus.parsing == XML_FINISHED) return XML_ERROR_ABORTED; else return XML_ERROR_NONE; case XML_TOK_INVALID: *eventPP = next; return XML_ERROR_INVALID_TOKEN; case XML_TOK_PARTIAL_CHAR: if (haveMore) { *nextPtr = s; return XML_ERROR_NONE; } return XML_ERROR_PARTIAL_CHAR; case XML_TOK_PARTIAL: case XML_TOK_NONE: if (haveMore) { *nextPtr = s; return XML_ERROR_NONE; } return XML_ERROR_SYNTAX; /* XML_ERROR_UNCLOSED_IGNORE_SECTION */ default: /* All of the tokens that XmlIgnoreSectionTok() returns have * explicit cases to handle them, so this default case is never * executed. We keep it as a safety net anyway, and remove it * from our test coverage statistics. * * LCOV_EXCL_START */ *eventPP = next; return XML_ERROR_UNEXPECTED_STATE; /* LCOV_EXCL_STOP */ } /* not reached */ } #endif /* XML_DTD */ static enum XML_Error initializeEncoding(XML_Parser parser) { const char *s; #ifdef XML_UNICODE char encodingBuf[128]; /* See comments abount `protoclEncodingName` in parserInit() */ if (! parser->m_protocolEncodingName) s = NULL; else { int i; for (i = 0; parser->m_protocolEncodingName[i]; i++) { if (i == sizeof(encodingBuf) - 1 || (parser->m_protocolEncodingName[i] & ~0x7f) != 0) { encodingBuf[0] = '\0'; break; } encodingBuf[i] = (char)parser->m_protocolEncodingName[i]; } encodingBuf[i] = '\0'; s = encodingBuf; } #else s = parser->m_protocolEncodingName; #endif if ((parser->m_ns ? XmlInitEncodingNS : XmlInitEncoding)( &parser->m_initEncoding, &parser->m_encoding, s)) return XML_ERROR_NONE; return handleUnknownEncoding(parser, parser->m_protocolEncodingName); } static enum XML_Error processXmlDecl(XML_Parser parser, int isGeneralTextEntity, const char *s, const char *next) { const char *encodingName = NULL; const XML_Char *storedEncName = NULL; const ENCODING *newEncoding = NULL; const char *version = NULL; const char *versionend; const XML_Char *storedversion = NULL; int standalone = -1; if (! (parser->m_ns ? XmlParseXmlDeclNS : XmlParseXmlDecl)( isGeneralTextEntity, parser->m_encoding, s, next, &parser->m_eventPtr, &version, &versionend, &encodingName, &newEncoding, &standalone)) { if (isGeneralTextEntity) return XML_ERROR_TEXT_DECL; else return XML_ERROR_XML_DECL; } if (! isGeneralTextEntity && standalone == 1) { parser->m_dtd->standalone = XML_TRUE; #ifdef XML_DTD if (parser->m_paramEntityParsing == XML_PARAM_ENTITY_PARSING_UNLESS_STANDALONE) parser->m_paramEntityParsing = XML_PARAM_ENTITY_PARSING_NEVER; #endif /* XML_DTD */ } if (parser->m_xmlDeclHandler) { if (encodingName != NULL) { storedEncName = poolStoreString( &parser->m_temp2Pool, parser->m_encoding, encodingName, encodingName + XmlNameLength(parser->m_encoding, encodingName)); if (! storedEncName) return XML_ERROR_NO_MEMORY; poolFinish(&parser->m_temp2Pool); } if (version) { storedversion = poolStoreString(&parser->m_temp2Pool, parser->m_encoding, version, versionend - parser->m_encoding->minBytesPerChar); if (! storedversion) return XML_ERROR_NO_MEMORY; } parser->m_xmlDeclHandler(parser->m_handlerArg, storedversion, storedEncName, standalone); } else if (parser->m_defaultHandler) reportDefault(parser, parser->m_encoding, s, next); if (parser->m_protocolEncodingName == NULL) { if (newEncoding) { /* Check that the specified encoding does not conflict with what * the parser has already deduced. Do we have the same number * of bytes in the smallest representation of a character? If * this is UTF-16, is it the same endianness? */ if (newEncoding->minBytesPerChar != parser->m_encoding->minBytesPerChar || (newEncoding->minBytesPerChar == 2 && newEncoding != parser->m_encoding)) { parser->m_eventPtr = encodingName; return XML_ERROR_INCORRECT_ENCODING; } parser->m_encoding = newEncoding; } else if (encodingName) { enum XML_Error result; if (! storedEncName) { storedEncName = poolStoreString( &parser->m_temp2Pool, parser->m_encoding, encodingName, encodingName + XmlNameLength(parser->m_encoding, encodingName)); if (! storedEncName) return XML_ERROR_NO_MEMORY; } result = handleUnknownEncoding(parser, storedEncName); poolClear(&parser->m_temp2Pool); if (result == XML_ERROR_UNKNOWN_ENCODING) parser->m_eventPtr = encodingName; return result; } } if (storedEncName || storedversion) poolClear(&parser->m_temp2Pool); return XML_ERROR_NONE; } static enum XML_Error handleUnknownEncoding(XML_Parser parser, const XML_Char *encodingName) { if (parser->m_unknownEncodingHandler) { XML_Encoding info; int i; for (i = 0; i < 256; i++) info.map[i] = -1; info.convert = NULL; info.data = NULL; info.release = NULL; if (parser->m_unknownEncodingHandler(parser->m_unknownEncodingHandlerData, encodingName, &info)) { ENCODING *enc; parser->m_unknownEncodingMem = MALLOC(parser, XmlSizeOfUnknownEncoding()); if (! parser->m_unknownEncodingMem) { if (info.release) info.release(info.data); return XML_ERROR_NO_MEMORY; } enc = (parser->m_ns ? XmlInitUnknownEncodingNS : XmlInitUnknownEncoding)( parser->m_unknownEncodingMem, info.map, info.convert, info.data); if (enc) { parser->m_unknownEncodingData = info.data; parser->m_unknownEncodingRelease = info.release; parser->m_encoding = enc; return XML_ERROR_NONE; } } if (info.release != NULL) info.release(info.data); } return XML_ERROR_UNKNOWN_ENCODING; } static enum XML_Error PTRCALL prologInitProcessor(XML_Parser parser, const char *s, const char *end, const char **nextPtr) { enum XML_Error result = initializeEncoding(parser); if (result != XML_ERROR_NONE) return result; parser->m_processor = prologProcessor; return prologProcessor(parser, s, end, nextPtr); } #ifdef XML_DTD static enum XML_Error PTRCALL externalParEntInitProcessor(XML_Parser parser, const char *s, const char *end, const char **nextPtr) { enum XML_Error result = initializeEncoding(parser); if (result != XML_ERROR_NONE) return result; /* we know now that XML_Parse(Buffer) has been called, so we consider the external parameter entity read */ parser->m_dtd->paramEntityRead = XML_TRUE; if (parser->m_prologState.inEntityValue) { parser->m_processor = entityValueInitProcessor; return entityValueInitProcessor(parser, s, end, nextPtr); } else { parser->m_processor = externalParEntProcessor; return externalParEntProcessor(parser, s, end, nextPtr); } } static enum XML_Error PTRCALL entityValueInitProcessor(XML_Parser parser, const char *s, const char *end, const char **nextPtr) { int tok; const char *start = s; const char *next = start; parser->m_eventPtr = start; for (;;) { tok = XmlPrologTok(parser->m_encoding, start, end, &next); parser->m_eventEndPtr = next; if (tok <= 0) { if (! parser->m_parsingStatus.finalBuffer && tok != XML_TOK_INVALID) { *nextPtr = s; return XML_ERROR_NONE; } switch (tok) { case XML_TOK_INVALID: return XML_ERROR_INVALID_TOKEN; case XML_TOK_PARTIAL: return XML_ERROR_UNCLOSED_TOKEN; case XML_TOK_PARTIAL_CHAR: return XML_ERROR_PARTIAL_CHAR; case XML_TOK_NONE: /* start == end */ default: break; } /* found end of entity value - can store it now */ return storeEntityValue(parser, parser->m_encoding, s, end); } else if (tok == XML_TOK_XML_DECL) { enum XML_Error result; result = processXmlDecl(parser, 0, start, next); if (result != XML_ERROR_NONE) return result; /* At this point, m_parsingStatus.parsing cannot be XML_SUSPENDED. For * that to happen, a parameter entity parsing handler must have attempted * to suspend the parser, which fails and raises an error. The parser can * be aborted, but can't be suspended. */ if (parser->m_parsingStatus.parsing == XML_FINISHED) return XML_ERROR_ABORTED; *nextPtr = next; /* stop scanning for text declaration - we found one */ parser->m_processor = entityValueProcessor; return entityValueProcessor(parser, next, end, nextPtr); } /* If we are at the end of the buffer, this would cause XmlPrologTok to return XML_TOK_NONE on the next call, which would then cause the function to exit with *nextPtr set to s - that is what we want for other tokens, but not for the BOM - we would rather like to skip it; then, when this routine is entered the next time, XmlPrologTok will return XML_TOK_INVALID, since the BOM is still in the buffer */ else if (tok == XML_TOK_BOM && next == end && ! parser->m_parsingStatus.finalBuffer) { *nextPtr = next; return XML_ERROR_NONE; } /* If we get this token, we have the start of what might be a normal tag, but not a declaration (i.e. it doesn't begin with "<!"). In a DTD context, that isn't legal. */ else if (tok == XML_TOK_INSTANCE_START) { *nextPtr = next; return XML_ERROR_SYNTAX; } start = next; parser->m_eventPtr = start; } } static enum XML_Error PTRCALL externalParEntProcessor(XML_Parser parser, const char *s, const char *end, const char **nextPtr) { const char *next = s; int tok; tok = XmlPrologTok(parser->m_encoding, s, end, &next); if (tok <= 0) { if (! parser->m_parsingStatus.finalBuffer && tok != XML_TOK_INVALID) { *nextPtr = s; return XML_ERROR_NONE; } switch (tok) { case XML_TOK_INVALID: return XML_ERROR_INVALID_TOKEN; case XML_TOK_PARTIAL: return XML_ERROR_UNCLOSED_TOKEN; case XML_TOK_PARTIAL_CHAR: return XML_ERROR_PARTIAL_CHAR; case XML_TOK_NONE: /* start == end */ default: break; } } /* This would cause the next stage, i.e. doProlog to be passed XML_TOK_BOM. However, when parsing an external subset, doProlog will not accept a BOM as valid, and report a syntax error, so we have to skip the BOM */ else if (tok == XML_TOK_BOM) { s = next; tok = XmlPrologTok(parser->m_encoding, s, end, &next); } parser->m_processor = prologProcessor; return doProlog(parser, parser->m_encoding, s, end, tok, next, nextPtr, (XML_Bool)! parser->m_parsingStatus.finalBuffer, XML_TRUE); } static enum XML_Error PTRCALL entityValueProcessor(XML_Parser parser, const char *s, const char *end, const char **nextPtr) { const char *start = s; const char *next = s; const ENCODING *enc = parser->m_encoding; int tok; for (;;) { tok = XmlPrologTok(enc, start, end, &next); if (tok <= 0) { if (! parser->m_parsingStatus.finalBuffer && tok != XML_TOK_INVALID) { *nextPtr = s; return XML_ERROR_NONE; } switch (tok) { case XML_TOK_INVALID: return XML_ERROR_INVALID_TOKEN; case XML_TOK_PARTIAL: return XML_ERROR_UNCLOSED_TOKEN; case XML_TOK_PARTIAL_CHAR: return XML_ERROR_PARTIAL_CHAR; case XML_TOK_NONE: /* start == end */ default: break; } /* found end of entity value - can store it now */ return storeEntityValue(parser, enc, s, end); } start = next; } } #endif /* XML_DTD */ static enum XML_Error PTRCALL prologProcessor(XML_Parser parser, const char *s, const char *end, const char **nextPtr) { const char *next = s; int tok = XmlPrologTok(parser->m_encoding, s, end, &next); return doProlog(parser, parser->m_encoding, s, end, tok, next, nextPtr, (XML_Bool)! parser->m_parsingStatus.finalBuffer, XML_TRUE); } static enum XML_Error doProlog(XML_Parser parser, const ENCODING *enc, const char *s, const char *end, int tok, const char *next, const char **nextPtr, XML_Bool haveMore, XML_Bool allowClosingDoctype) { #ifdef XML_DTD static const XML_Char externalSubsetName[] = {ASCII_HASH, '\0'}; #endif /* XML_DTD */ static const XML_Char atypeCDATA[] = {ASCII_C, ASCII_D, ASCII_A, ASCII_T, ASCII_A, '\0'}; static const XML_Char atypeID[] = {ASCII_I, ASCII_D, '\0'}; static const XML_Char atypeIDREF[] = {ASCII_I, ASCII_D, ASCII_R, ASCII_E, ASCII_F, '\0'}; static const XML_Char atypeIDREFS[] = {ASCII_I, ASCII_D, ASCII_R, ASCII_E, ASCII_F, ASCII_S, '\0'}; static const XML_Char atypeENTITY[] = {ASCII_E, ASCII_N, ASCII_T, ASCII_I, ASCII_T, ASCII_Y, '\0'}; static const XML_Char atypeENTITIES[] = {ASCII_E, ASCII_N, ASCII_T, ASCII_I, ASCII_T, ASCII_I, ASCII_E, ASCII_S, '\0'}; static const XML_Char atypeNMTOKEN[] = {ASCII_N, ASCII_M, ASCII_T, ASCII_O, ASCII_K, ASCII_E, ASCII_N, '\0'}; static const XML_Char atypeNMTOKENS[] = {ASCII_N, ASCII_M, ASCII_T, ASCII_O, ASCII_K, ASCII_E, ASCII_N, ASCII_S, '\0'}; static const XML_Char notationPrefix[] = {ASCII_N, ASCII_O, ASCII_T, ASCII_A, ASCII_T, ASCII_I, ASCII_O, ASCII_N, ASCII_LPAREN, '\0'}; static const XML_Char enumValueSep[] = {ASCII_PIPE, '\0'}; static const XML_Char enumValueStart[] = {ASCII_LPAREN, '\0'}; /* save one level of indirection */ DTD *const dtd = parser->m_dtd; const char **eventPP; const char **eventEndPP; enum XML_Content_Quant quant; if (enc == parser->m_encoding) { eventPP = &parser->m_eventPtr; eventEndPP = &parser->m_eventEndPtr; } else { eventPP = &(parser->m_openInternalEntities->internalEventPtr); eventEndPP = &(parser->m_openInternalEntities->internalEventEndPtr); } for (;;) { int role; XML_Bool handleDefault = XML_TRUE; *eventPP = s; *eventEndPP = next; if (tok <= 0) { if (haveMore && tok != XML_TOK_INVALID) { *nextPtr = s; return XML_ERROR_NONE; } switch (tok) { case XML_TOK_INVALID: *eventPP = next; return XML_ERROR_INVALID_TOKEN; case XML_TOK_PARTIAL: return XML_ERROR_UNCLOSED_TOKEN; case XML_TOK_PARTIAL_CHAR: return XML_ERROR_PARTIAL_CHAR; case -XML_TOK_PROLOG_S: tok = -tok; break; case XML_TOK_NONE: #ifdef XML_DTD /* for internal PE NOT referenced between declarations */ if (enc != parser->m_encoding && ! parser->m_openInternalEntities->betweenDecl) { *nextPtr = s; return XML_ERROR_NONE; } /* WFC: PE Between Declarations - must check that PE contains complete markup, not only for external PEs, but also for internal PEs if the reference occurs between declarations. */ if (parser->m_isParamEntity || enc != parser->m_encoding) { if (XmlTokenRole(&parser->m_prologState, XML_TOK_NONE, end, end, enc) == XML_ROLE_ERROR) return XML_ERROR_INCOMPLETE_PE; *nextPtr = s; return XML_ERROR_NONE; } #endif /* XML_DTD */ return XML_ERROR_NO_ELEMENTS; default: tok = -tok; next = end; break; } } role = XmlTokenRole(&parser->m_prologState, tok, s, next, enc); switch (role) { case XML_ROLE_XML_DECL: { enum XML_Error result = processXmlDecl(parser, 0, s, next); if (result != XML_ERROR_NONE) return result; enc = parser->m_encoding; handleDefault = XML_FALSE; } break; case XML_ROLE_DOCTYPE_NAME: if (parser->m_startDoctypeDeclHandler) { parser->m_doctypeName = poolStoreString(&parser->m_tempPool, enc, s, next); if (! parser->m_doctypeName) return XML_ERROR_NO_MEMORY; poolFinish(&parser->m_tempPool); parser->m_doctypePubid = NULL; handleDefault = XML_FALSE; } parser->m_doctypeSysid = NULL; /* always initialize to NULL */ break; case XML_ROLE_DOCTYPE_INTERNAL_SUBSET: if (parser->m_startDoctypeDeclHandler) { parser->m_startDoctypeDeclHandler( parser->m_handlerArg, parser->m_doctypeName, parser->m_doctypeSysid, parser->m_doctypePubid, 1); parser->m_doctypeName = NULL; poolClear(&parser->m_tempPool); handleDefault = XML_FALSE; } break; #ifdef XML_DTD case XML_ROLE_TEXT_DECL: { enum XML_Error result = processXmlDecl(parser, 1, s, next); if (result != XML_ERROR_NONE) return result; enc = parser->m_encoding; handleDefault = XML_FALSE; } break; #endif /* XML_DTD */ case XML_ROLE_DOCTYPE_PUBLIC_ID: #ifdef XML_DTD parser->m_useForeignDTD = XML_FALSE; parser->m_declEntity = (ENTITY *)lookup( parser, &dtd->paramEntities, externalSubsetName, sizeof(ENTITY)); if (! parser->m_declEntity) return XML_ERROR_NO_MEMORY; #endif /* XML_DTD */ dtd->hasParamEntityRefs = XML_TRUE; if (parser->m_startDoctypeDeclHandler) { XML_Char *pubId; if (! XmlIsPublicId(enc, s, next, eventPP)) return XML_ERROR_PUBLICID; pubId = poolStoreString(&parser->m_tempPool, enc, s + enc->minBytesPerChar, next - enc->minBytesPerChar); if (! pubId) return XML_ERROR_NO_MEMORY; normalizePublicId(pubId); poolFinish(&parser->m_tempPool); parser->m_doctypePubid = pubId; handleDefault = XML_FALSE; goto alreadyChecked; } /* fall through */ case XML_ROLE_ENTITY_PUBLIC_ID: if (! XmlIsPublicId(enc, s, next, eventPP)) return XML_ERROR_PUBLICID; alreadyChecked: if (dtd->keepProcessing && parser->m_declEntity) { XML_Char *tem = poolStoreString(&dtd->pool, enc, s + enc->minBytesPerChar, next - enc->minBytesPerChar); if (! tem) return XML_ERROR_NO_MEMORY; normalizePublicId(tem); parser->m_declEntity->publicId = tem; poolFinish(&dtd->pool); /* Don't suppress the default handler if we fell through from * the XML_ROLE_DOCTYPE_PUBLIC_ID case. */ if (parser->m_entityDeclHandler && role == XML_ROLE_ENTITY_PUBLIC_ID) handleDefault = XML_FALSE; } break; case XML_ROLE_DOCTYPE_CLOSE: if (allowClosingDoctype != XML_TRUE) { /* Must not close doctype from within expanded parameter entities */ return XML_ERROR_INVALID_TOKEN; } if (parser->m_doctypeName) { parser->m_startDoctypeDeclHandler( parser->m_handlerArg, parser->m_doctypeName, parser->m_doctypeSysid, parser->m_doctypePubid, 0); poolClear(&parser->m_tempPool); handleDefault = XML_FALSE; } /* parser->m_doctypeSysid will be non-NULL in the case of a previous XML_ROLE_DOCTYPE_SYSTEM_ID, even if parser->m_startDoctypeDeclHandler was not set, indicating an external subset */ #ifdef XML_DTD if (parser->m_doctypeSysid || parser->m_useForeignDTD) { XML_Bool hadParamEntityRefs = dtd->hasParamEntityRefs; dtd->hasParamEntityRefs = XML_TRUE; if (parser->m_paramEntityParsing && parser->m_externalEntityRefHandler) { ENTITY *entity = (ENTITY *)lookup(parser, &dtd->paramEntities, externalSubsetName, sizeof(ENTITY)); if (! entity) { /* The external subset name "#" will have already been * inserted into the hash table at the start of the * external entity parsing, so no allocation will happen * and lookup() cannot fail. */ return XML_ERROR_NO_MEMORY; /* LCOV_EXCL_LINE */ } if (parser->m_useForeignDTD) entity->base = parser->m_curBase; dtd->paramEntityRead = XML_FALSE; if (! parser->m_externalEntityRefHandler( parser->m_externalEntityRefHandlerArg, 0, entity->base, entity->systemId, entity->publicId)) return XML_ERROR_EXTERNAL_ENTITY_HANDLING; if (dtd->paramEntityRead) { if (! dtd->standalone && parser->m_notStandaloneHandler && ! parser->m_notStandaloneHandler(parser->m_handlerArg)) return XML_ERROR_NOT_STANDALONE; } /* if we didn't read the foreign DTD then this means that there is no external subset and we must reset dtd->hasParamEntityRefs */ else if (! parser->m_doctypeSysid) dtd->hasParamEntityRefs = hadParamEntityRefs; /* end of DTD - no need to update dtd->keepProcessing */ } parser->m_useForeignDTD = XML_FALSE; } #endif /* XML_DTD */ if (parser->m_endDoctypeDeclHandler) { parser->m_endDoctypeDeclHandler(parser->m_handlerArg); handleDefault = XML_FALSE; } break; case XML_ROLE_INSTANCE_START: #ifdef XML_DTD /* if there is no DOCTYPE declaration then now is the last chance to read the foreign DTD */ if (parser->m_useForeignDTD) { XML_Bool hadParamEntityRefs = dtd->hasParamEntityRefs; dtd->hasParamEntityRefs = XML_TRUE; if (parser->m_paramEntityParsing && parser->m_externalEntityRefHandler) { ENTITY *entity = (ENTITY *)lookup(parser, &dtd->paramEntities, externalSubsetName, sizeof(ENTITY)); if (! entity) return XML_ERROR_NO_MEMORY; entity->base = parser->m_curBase; dtd->paramEntityRead = XML_FALSE; if (! parser->m_externalEntityRefHandler( parser->m_externalEntityRefHandlerArg, 0, entity->base, entity->systemId, entity->publicId)) return XML_ERROR_EXTERNAL_ENTITY_HANDLING; if (dtd->paramEntityRead) { if (! dtd->standalone && parser->m_notStandaloneHandler && ! parser->m_notStandaloneHandler(parser->m_handlerArg)) return XML_ERROR_NOT_STANDALONE; } /* if we didn't read the foreign DTD then this means that there is no external subset and we must reset dtd->hasParamEntityRefs */ else dtd->hasParamEntityRefs = hadParamEntityRefs; /* end of DTD - no need to update dtd->keepProcessing */ } } #endif /* XML_DTD */ parser->m_processor = contentProcessor; return contentProcessor(parser, s, end, nextPtr); case XML_ROLE_ATTLIST_ELEMENT_NAME: parser->m_declElementType = getElementType(parser, enc, s, next); if (! parser->m_declElementType) return XML_ERROR_NO_MEMORY; goto checkAttListDeclHandler; case XML_ROLE_ATTRIBUTE_NAME: parser->m_declAttributeId = getAttributeId(parser, enc, s, next); if (! parser->m_declAttributeId) return XML_ERROR_NO_MEMORY; parser->m_declAttributeIsCdata = XML_FALSE; parser->m_declAttributeType = NULL; parser->m_declAttributeIsId = XML_FALSE; goto checkAttListDeclHandler; case XML_ROLE_ATTRIBUTE_TYPE_CDATA: parser->m_declAttributeIsCdata = XML_TRUE; parser->m_declAttributeType = atypeCDATA; goto checkAttListDeclHandler; case XML_ROLE_ATTRIBUTE_TYPE_ID: parser->m_declAttributeIsId = XML_TRUE; parser->m_declAttributeType = atypeID; goto checkAttListDeclHandler; case XML_ROLE_ATTRIBUTE_TYPE_IDREF: parser->m_declAttributeType = atypeIDREF; goto checkAttListDeclHandler; case XML_ROLE_ATTRIBUTE_TYPE_IDREFS: parser->m_declAttributeType = atypeIDREFS; goto checkAttListDeclHandler; case XML_ROLE_ATTRIBUTE_TYPE_ENTITY: parser->m_declAttributeType = atypeENTITY; goto checkAttListDeclHandler; case XML_ROLE_ATTRIBUTE_TYPE_ENTITIES: parser->m_declAttributeType = atypeENTITIES; goto checkAttListDeclHandler; case XML_ROLE_ATTRIBUTE_TYPE_NMTOKEN: parser->m_declAttributeType = atypeNMTOKEN; goto checkAttListDeclHandler; case XML_ROLE_ATTRIBUTE_TYPE_NMTOKENS: parser->m_declAttributeType = atypeNMTOKENS; checkAttListDeclHandler: if (dtd->keepProcessing && parser->m_attlistDeclHandler) handleDefault = XML_FALSE; break; case XML_ROLE_ATTRIBUTE_ENUM_VALUE: case XML_ROLE_ATTRIBUTE_NOTATION_VALUE: if (dtd->keepProcessing && parser->m_attlistDeclHandler) { const XML_Char *prefix; if (parser->m_declAttributeType) { prefix = enumValueSep; } else { prefix = (role == XML_ROLE_ATTRIBUTE_NOTATION_VALUE ? notationPrefix : enumValueStart); } if (! poolAppendString(&parser->m_tempPool, prefix)) return XML_ERROR_NO_MEMORY; if (! poolAppend(&parser->m_tempPool, enc, s, next)) return XML_ERROR_NO_MEMORY; parser->m_declAttributeType = parser->m_tempPool.start; handleDefault = XML_FALSE; } break; case XML_ROLE_IMPLIED_ATTRIBUTE_VALUE: case XML_ROLE_REQUIRED_ATTRIBUTE_VALUE: if (dtd->keepProcessing) { if (! defineAttribute(parser->m_declElementType, parser->m_declAttributeId, parser->m_declAttributeIsCdata, parser->m_declAttributeIsId, 0, parser)) return XML_ERROR_NO_MEMORY; if (parser->m_attlistDeclHandler && parser->m_declAttributeType) { if (*parser->m_declAttributeType == XML_T(ASCII_LPAREN) || (*parser->m_declAttributeType == XML_T(ASCII_N) && parser->m_declAttributeType[1] == XML_T(ASCII_O))) { /* Enumerated or Notation type */ if (! poolAppendChar(&parser->m_tempPool, XML_T(ASCII_RPAREN)) || ! poolAppendChar(&parser->m_tempPool, XML_T('\0'))) return XML_ERROR_NO_MEMORY; parser->m_declAttributeType = parser->m_tempPool.start; poolFinish(&parser->m_tempPool); } *eventEndPP = s; parser->m_attlistDeclHandler( parser->m_handlerArg, parser->m_declElementType->name, parser->m_declAttributeId->name, parser->m_declAttributeType, 0, role == XML_ROLE_REQUIRED_ATTRIBUTE_VALUE); poolClear(&parser->m_tempPool); handleDefault = XML_FALSE; } } break; case XML_ROLE_DEFAULT_ATTRIBUTE_VALUE: case XML_ROLE_FIXED_ATTRIBUTE_VALUE: if (dtd->keepProcessing) { const XML_Char *attVal; enum XML_Error result = storeAttributeValue( parser, enc, parser->m_declAttributeIsCdata, s + enc->minBytesPerChar, next - enc->minBytesPerChar, &dtd->pool); if (result) return result; attVal = poolStart(&dtd->pool); poolFinish(&dtd->pool); /* ID attributes aren't allowed to have a default */ if (! defineAttribute( parser->m_declElementType, parser->m_declAttributeId, parser->m_declAttributeIsCdata, XML_FALSE, attVal, parser)) return XML_ERROR_NO_MEMORY; if (parser->m_attlistDeclHandler && parser->m_declAttributeType) { if (*parser->m_declAttributeType == XML_T(ASCII_LPAREN) || (*parser->m_declAttributeType == XML_T(ASCII_N) && parser->m_declAttributeType[1] == XML_T(ASCII_O))) { /* Enumerated or Notation type */ if (! poolAppendChar(&parser->m_tempPool, XML_T(ASCII_RPAREN)) || ! poolAppendChar(&parser->m_tempPool, XML_T('\0'))) return XML_ERROR_NO_MEMORY; parser->m_declAttributeType = parser->m_tempPool.start; poolFinish(&parser->m_tempPool); } *eventEndPP = s; parser->m_attlistDeclHandler( parser->m_handlerArg, parser->m_declElementType->name, parser->m_declAttributeId->name, parser->m_declAttributeType, attVal, role == XML_ROLE_FIXED_ATTRIBUTE_VALUE); poolClear(&parser->m_tempPool); handleDefault = XML_FALSE; } } break; case XML_ROLE_ENTITY_VALUE: if (dtd->keepProcessing) { enum XML_Error result = storeEntityValue( parser, enc, s + enc->minBytesPerChar, next - enc->minBytesPerChar); if (parser->m_declEntity) { parser->m_declEntity->textPtr = poolStart(&dtd->entityValuePool); parser->m_declEntity->textLen = (int)(poolLength(&dtd->entityValuePool)); poolFinish(&dtd->entityValuePool); if (parser->m_entityDeclHandler) { *eventEndPP = s; parser->m_entityDeclHandler( parser->m_handlerArg, parser->m_declEntity->name, parser->m_declEntity->is_param, parser->m_declEntity->textPtr, parser->m_declEntity->textLen, parser->m_curBase, 0, 0, 0); handleDefault = XML_FALSE; } } else poolDiscard(&dtd->entityValuePool); if (result != XML_ERROR_NONE) return result; } break; case XML_ROLE_DOCTYPE_SYSTEM_ID: #ifdef XML_DTD parser->m_useForeignDTD = XML_FALSE; #endif /* XML_DTD */ dtd->hasParamEntityRefs = XML_TRUE; if (parser->m_startDoctypeDeclHandler) { parser->m_doctypeSysid = poolStoreString(&parser->m_tempPool, enc, s + enc->minBytesPerChar, next - enc->minBytesPerChar); if (parser->m_doctypeSysid == NULL) return XML_ERROR_NO_MEMORY; poolFinish(&parser->m_tempPool); handleDefault = XML_FALSE; } #ifdef XML_DTD else /* use externalSubsetName to make parser->m_doctypeSysid non-NULL for the case where no parser->m_startDoctypeDeclHandler is set */ parser->m_doctypeSysid = externalSubsetName; #endif /* XML_DTD */ if (! dtd->standalone #ifdef XML_DTD && ! parser->m_paramEntityParsing #endif /* XML_DTD */ && parser->m_notStandaloneHandler && ! parser->m_notStandaloneHandler(parser->m_handlerArg)) return XML_ERROR_NOT_STANDALONE; #ifndef XML_DTD break; #else /* XML_DTD */ if (! parser->m_declEntity) { parser->m_declEntity = (ENTITY *)lookup( parser, &dtd->paramEntities, externalSubsetName, sizeof(ENTITY)); if (! parser->m_declEntity) return XML_ERROR_NO_MEMORY; parser->m_declEntity->publicId = NULL; } #endif /* XML_DTD */ /* fall through */ case XML_ROLE_ENTITY_SYSTEM_ID: if (dtd->keepProcessing && parser->m_declEntity) { parser->m_declEntity->systemId = poolStoreString(&dtd->pool, enc, s + enc->minBytesPerChar, next - enc->minBytesPerChar); if (! parser->m_declEntity->systemId) return XML_ERROR_NO_MEMORY; parser->m_declEntity->base = parser->m_curBase; poolFinish(&dtd->pool); /* Don't suppress the default handler if we fell through from * the XML_ROLE_DOCTYPE_SYSTEM_ID case. */ if (parser->m_entityDeclHandler && role == XML_ROLE_ENTITY_SYSTEM_ID) handleDefault = XML_FALSE; } break; case XML_ROLE_ENTITY_COMPLETE: if (dtd->keepProcessing && parser->m_declEntity && parser->m_entityDeclHandler) { *eventEndPP = s; parser->m_entityDeclHandler( parser->m_handlerArg, parser->m_declEntity->name, parser->m_declEntity->is_param, 0, 0, parser->m_declEntity->base, parser->m_declEntity->systemId, parser->m_declEntity->publicId, 0); handleDefault = XML_FALSE; } break; case XML_ROLE_ENTITY_NOTATION_NAME: if (dtd->keepProcessing && parser->m_declEntity) { parser->m_declEntity->notation = poolStoreString(&dtd->pool, enc, s, next); if (! parser->m_declEntity->notation) return XML_ERROR_NO_MEMORY; poolFinish(&dtd->pool); if (parser->m_unparsedEntityDeclHandler) { *eventEndPP = s; parser->m_unparsedEntityDeclHandler( parser->m_handlerArg, parser->m_declEntity->name, parser->m_declEntity->base, parser->m_declEntity->systemId, parser->m_declEntity->publicId, parser->m_declEntity->notation); handleDefault = XML_FALSE; } else if (parser->m_entityDeclHandler) { *eventEndPP = s; parser->m_entityDeclHandler( parser->m_handlerArg, parser->m_declEntity->name, 0, 0, 0, parser->m_declEntity->base, parser->m_declEntity->systemId, parser->m_declEntity->publicId, parser->m_declEntity->notation); handleDefault = XML_FALSE; } } break; case XML_ROLE_GENERAL_ENTITY_NAME: { if (XmlPredefinedEntityName(enc, s, next)) { parser->m_declEntity = NULL; break; } if (dtd->keepProcessing) { const XML_Char *name = poolStoreString(&dtd->pool, enc, s, next); if (! name) return XML_ERROR_NO_MEMORY; parser->m_declEntity = (ENTITY *)lookup(parser, &dtd->generalEntities, name, sizeof(ENTITY)); if (! parser->m_declEntity) return XML_ERROR_NO_MEMORY; if (parser->m_declEntity->name != name) { poolDiscard(&dtd->pool); parser->m_declEntity = NULL; } else { poolFinish(&dtd->pool); parser->m_declEntity->publicId = NULL; parser->m_declEntity->is_param = XML_FALSE; /* if we have a parent parser or are reading an internal parameter entity, then the entity declaration is not considered "internal" */ parser->m_declEntity->is_internal = ! (parser->m_parentParser || parser->m_openInternalEntities); if (parser->m_entityDeclHandler) handleDefault = XML_FALSE; } } else { poolDiscard(&dtd->pool); parser->m_declEntity = NULL; } } break; case XML_ROLE_PARAM_ENTITY_NAME: #ifdef XML_DTD if (dtd->keepProcessing) { const XML_Char *name = poolStoreString(&dtd->pool, enc, s, next); if (! name) return XML_ERROR_NO_MEMORY; parser->m_declEntity = (ENTITY *)lookup(parser, &dtd->paramEntities, name, sizeof(ENTITY)); if (! parser->m_declEntity) return XML_ERROR_NO_MEMORY; if (parser->m_declEntity->name != name) { poolDiscard(&dtd->pool); parser->m_declEntity = NULL; } else { poolFinish(&dtd->pool); parser->m_declEntity->publicId = NULL; parser->m_declEntity->is_param = XML_TRUE; /* if we have a parent parser or are reading an internal parameter entity, then the entity declaration is not considered "internal" */ parser->m_declEntity->is_internal = ! (parser->m_parentParser || parser->m_openInternalEntities); if (parser->m_entityDeclHandler) handleDefault = XML_FALSE; } } else { poolDiscard(&dtd->pool); parser->m_declEntity = NULL; } #else /* not XML_DTD */ parser->m_declEntity = NULL; #endif /* XML_DTD */ break; case XML_ROLE_NOTATION_NAME: parser->m_declNotationPublicId = NULL; parser->m_declNotationName = NULL; if (parser->m_notationDeclHandler) { parser->m_declNotationName = poolStoreString(&parser->m_tempPool, enc, s, next); if (! parser->m_declNotationName) return XML_ERROR_NO_MEMORY; poolFinish(&parser->m_tempPool); handleDefault = XML_FALSE; } break; case XML_ROLE_NOTATION_PUBLIC_ID: if (! XmlIsPublicId(enc, s, next, eventPP)) return XML_ERROR_PUBLICID; if (parser ->m_declNotationName) { /* means m_notationDeclHandler != NULL */ XML_Char *tem = poolStoreString(&parser->m_tempPool, enc, s + enc->minBytesPerChar, next - enc->minBytesPerChar); if (! tem) return XML_ERROR_NO_MEMORY; normalizePublicId(tem); parser->m_declNotationPublicId = tem; poolFinish(&parser->m_tempPool); handleDefault = XML_FALSE; } break; case XML_ROLE_NOTATION_SYSTEM_ID: if (parser->m_declNotationName && parser->m_notationDeclHandler) { const XML_Char *systemId = poolStoreString(&parser->m_tempPool, enc, s + enc->minBytesPerChar, next - enc->minBytesPerChar); if (! systemId) return XML_ERROR_NO_MEMORY; *eventEndPP = s; parser->m_notationDeclHandler( parser->m_handlerArg, parser->m_declNotationName, parser->m_curBase, systemId, parser->m_declNotationPublicId); handleDefault = XML_FALSE; } poolClear(&parser->m_tempPool); break; case XML_ROLE_NOTATION_NO_SYSTEM_ID: if (parser->m_declNotationPublicId && parser->m_notationDeclHandler) { *eventEndPP = s; parser->m_notationDeclHandler( parser->m_handlerArg, parser->m_declNotationName, parser->m_curBase, 0, parser->m_declNotationPublicId); handleDefault = XML_FALSE; } poolClear(&parser->m_tempPool); break; case XML_ROLE_ERROR: switch (tok) { case XML_TOK_PARAM_ENTITY_REF: /* PE references in internal subset are not allowed within declarations. */ return XML_ERROR_PARAM_ENTITY_REF; case XML_TOK_XML_DECL: return XML_ERROR_MISPLACED_XML_PI; default: return XML_ERROR_SYNTAX; } #ifdef XML_DTD case XML_ROLE_IGNORE_SECT: { enum XML_Error result; if (parser->m_defaultHandler) reportDefault(parser, enc, s, next); handleDefault = XML_FALSE; result = doIgnoreSection(parser, enc, &next, end, nextPtr, haveMore); if (result != XML_ERROR_NONE) return result; else if (! next) { parser->m_processor = ignoreSectionProcessor; return result; } } break; #endif /* XML_DTD */ case XML_ROLE_GROUP_OPEN: if (parser->m_prologState.level >= parser->m_groupSize) { if (parser->m_groupSize) { { char *const new_connector = (char *)REALLOC( parser, parser->m_groupConnector, parser->m_groupSize *= 2); if (new_connector == NULL) { parser->m_groupSize /= 2; return XML_ERROR_NO_MEMORY; } parser->m_groupConnector = new_connector; } if (dtd->scaffIndex) { int *const new_scaff_index = (int *)REALLOC( parser, dtd->scaffIndex, parser->m_groupSize * sizeof(int)); if (new_scaff_index == NULL) return XML_ERROR_NO_MEMORY; dtd->scaffIndex = new_scaff_index; } } else { parser->m_groupConnector = (char *)MALLOC(parser, parser->m_groupSize = 32); if (! parser->m_groupConnector) { parser->m_groupSize = 0; return XML_ERROR_NO_MEMORY; } } } parser->m_groupConnector[parser->m_prologState.level] = 0; if (dtd->in_eldecl) { int myindex = nextScaffoldPart(parser); if (myindex < 0) return XML_ERROR_NO_MEMORY; assert(dtd->scaffIndex != NULL); dtd->scaffIndex[dtd->scaffLevel] = myindex; dtd->scaffLevel++; dtd->scaffold[myindex].type = XML_CTYPE_SEQ; if (parser->m_elementDeclHandler) handleDefault = XML_FALSE; } break; case XML_ROLE_GROUP_SEQUENCE: if (parser->m_groupConnector[parser->m_prologState.level] == ASCII_PIPE) return XML_ERROR_SYNTAX; parser->m_groupConnector[parser->m_prologState.level] = ASCII_COMMA; if (dtd->in_eldecl && parser->m_elementDeclHandler) handleDefault = XML_FALSE; break; case XML_ROLE_GROUP_CHOICE: if (parser->m_groupConnector[parser->m_prologState.level] == ASCII_COMMA) return XML_ERROR_SYNTAX; if (dtd->in_eldecl && ! parser->m_groupConnector[parser->m_prologState.level] && (dtd->scaffold[dtd->scaffIndex[dtd->scaffLevel - 1]].type != XML_CTYPE_MIXED)) { dtd->scaffold[dtd->scaffIndex[dtd->scaffLevel - 1]].type = XML_CTYPE_CHOICE; if (parser->m_elementDeclHandler) handleDefault = XML_FALSE; } parser->m_groupConnector[parser->m_prologState.level] = ASCII_PIPE; break; case XML_ROLE_PARAM_ENTITY_REF: #ifdef XML_DTD case XML_ROLE_INNER_PARAM_ENTITY_REF: dtd->hasParamEntityRefs = XML_TRUE; if (! parser->m_paramEntityParsing) dtd->keepProcessing = dtd->standalone; else { const XML_Char *name; ENTITY *entity; name = poolStoreString(&dtd->pool, enc, s + enc->minBytesPerChar, next - enc->minBytesPerChar); if (! name) return XML_ERROR_NO_MEMORY; entity = (ENTITY *)lookup(parser, &dtd->paramEntities, name, 0); poolDiscard(&dtd->pool); /* first, determine if a check for an existing declaration is needed; if yes, check that the entity exists, and that it is internal, otherwise call the skipped entity handler */ if (parser->m_prologState.documentEntity && (dtd->standalone ? ! parser->m_openInternalEntities : ! dtd->hasParamEntityRefs)) { if (! entity) return XML_ERROR_UNDEFINED_ENTITY; else if (! entity->is_internal) { /* It's hard to exhaustively search the code to be sure, * but there doesn't seem to be a way of executing the * following line. There are two cases: * * If 'standalone' is false, the DTD must have no * parameter entities or we wouldn't have passed the outer * 'if' statement. That measn the only entity in the hash * table is the external subset name "#" which cannot be * given as a parameter entity name in XML syntax, so the * lookup must have returned NULL and we don't even reach * the test for an internal entity. * * If 'standalone' is true, it does not seem to be * possible to create entities taking this code path that * are not internal entities, so fail the test above. * * Because this analysis is very uncertain, the code is * being left in place and merely removed from the * coverage test statistics. */ return XML_ERROR_ENTITY_DECLARED_IN_PE; /* LCOV_EXCL_LINE */ } } else if (! entity) { dtd->keepProcessing = dtd->standalone; /* cannot report skipped entities in declarations */ if ((role == XML_ROLE_PARAM_ENTITY_REF) && parser->m_skippedEntityHandler) { parser->m_skippedEntityHandler(parser->m_handlerArg, name, 1); handleDefault = XML_FALSE; } break; } if (entity->open) return XML_ERROR_RECURSIVE_ENTITY_REF; if (entity->textPtr) { enum XML_Error result; XML_Bool betweenDecl = (role == XML_ROLE_PARAM_ENTITY_REF ? XML_TRUE : XML_FALSE); result = processInternalEntity(parser, entity, betweenDecl); if (result != XML_ERROR_NONE) return result; handleDefault = XML_FALSE; break; } if (parser->m_externalEntityRefHandler) { dtd->paramEntityRead = XML_FALSE; entity->open = XML_TRUE; if (! parser->m_externalEntityRefHandler( parser->m_externalEntityRefHandlerArg, 0, entity->base, entity->systemId, entity->publicId)) { entity->open = XML_FALSE; return XML_ERROR_EXTERNAL_ENTITY_HANDLING; } entity->open = XML_FALSE; handleDefault = XML_FALSE; if (! dtd->paramEntityRead) { dtd->keepProcessing = dtd->standalone; break; } } else { dtd->keepProcessing = dtd->standalone; break; } } #endif /* XML_DTD */ if (! dtd->standalone && parser->m_notStandaloneHandler && ! parser->m_notStandaloneHandler(parser->m_handlerArg)) return XML_ERROR_NOT_STANDALONE; break; /* Element declaration stuff */ case XML_ROLE_ELEMENT_NAME: if (parser->m_elementDeclHandler) { parser->m_declElementType = getElementType(parser, enc, s, next); if (! parser->m_declElementType) return XML_ERROR_NO_MEMORY; dtd->scaffLevel = 0; dtd->scaffCount = 0; dtd->in_eldecl = XML_TRUE; handleDefault = XML_FALSE; } break; case XML_ROLE_CONTENT_ANY: case XML_ROLE_CONTENT_EMPTY: if (dtd->in_eldecl) { if (parser->m_elementDeclHandler) { XML_Content *content = (XML_Content *)MALLOC(parser, sizeof(XML_Content)); if (! content) return XML_ERROR_NO_MEMORY; content->quant = XML_CQUANT_NONE; content->name = NULL; content->numchildren = 0; content->children = NULL; content->type = ((role == XML_ROLE_CONTENT_ANY) ? XML_CTYPE_ANY : XML_CTYPE_EMPTY); *eventEndPP = s; parser->m_elementDeclHandler( parser->m_handlerArg, parser->m_declElementType->name, content); handleDefault = XML_FALSE; } dtd->in_eldecl = XML_FALSE; } break; case XML_ROLE_CONTENT_PCDATA: if (dtd->in_eldecl) { dtd->scaffold[dtd->scaffIndex[dtd->scaffLevel - 1]].type = XML_CTYPE_MIXED; if (parser->m_elementDeclHandler) handleDefault = XML_FALSE; } break; case XML_ROLE_CONTENT_ELEMENT: quant = XML_CQUANT_NONE; goto elementContent; case XML_ROLE_CONTENT_ELEMENT_OPT: quant = XML_CQUANT_OPT; goto elementContent; case XML_ROLE_CONTENT_ELEMENT_REP: quant = XML_CQUANT_REP; goto elementContent; case XML_ROLE_CONTENT_ELEMENT_PLUS: quant = XML_CQUANT_PLUS; elementContent: if (dtd->in_eldecl) { ELEMENT_TYPE *el; const XML_Char *name; int nameLen; const char *nxt = (quant == XML_CQUANT_NONE ? next : next - enc->minBytesPerChar); int myindex = nextScaffoldPart(parser); if (myindex < 0) return XML_ERROR_NO_MEMORY; dtd->scaffold[myindex].type = XML_CTYPE_NAME; dtd->scaffold[myindex].quant = quant; el = getElementType(parser, enc, s, nxt); if (! el) return XML_ERROR_NO_MEMORY; name = el->name; dtd->scaffold[myindex].name = name; nameLen = 0; for (; name[nameLen++];) ; dtd->contentStringLen += nameLen; if (parser->m_elementDeclHandler) handleDefault = XML_FALSE; } break; case XML_ROLE_GROUP_CLOSE: quant = XML_CQUANT_NONE; goto closeGroup; case XML_ROLE_GROUP_CLOSE_OPT: quant = XML_CQUANT_OPT; goto closeGroup; case XML_ROLE_GROUP_CLOSE_REP: quant = XML_CQUANT_REP; goto closeGroup; case XML_ROLE_GROUP_CLOSE_PLUS: quant = XML_CQUANT_PLUS; closeGroup: if (dtd->in_eldecl) { if (parser->m_elementDeclHandler) handleDefault = XML_FALSE; dtd->scaffLevel--; dtd->scaffold[dtd->scaffIndex[dtd->scaffLevel]].quant = quant; if (dtd->scaffLevel == 0) { if (! handleDefault) { XML_Content *model = build_model(parser); if (! model) return XML_ERROR_NO_MEMORY; *eventEndPP = s; parser->m_elementDeclHandler( parser->m_handlerArg, parser->m_declElementType->name, model); } dtd->in_eldecl = XML_FALSE; dtd->contentStringLen = 0; } } break; /* End element declaration stuff */ case XML_ROLE_PI: if (! reportProcessingInstruction(parser, enc, s, next)) return XML_ERROR_NO_MEMORY; handleDefault = XML_FALSE; break; case XML_ROLE_COMMENT: if (! reportComment(parser, enc, s, next)) return XML_ERROR_NO_MEMORY; handleDefault = XML_FALSE; break; case XML_ROLE_NONE: switch (tok) { case XML_TOK_BOM: handleDefault = XML_FALSE; break; } break; case XML_ROLE_DOCTYPE_NONE: if (parser->m_startDoctypeDeclHandler) handleDefault = XML_FALSE; break; case XML_ROLE_ENTITY_NONE: if (dtd->keepProcessing && parser->m_entityDeclHandler) handleDefault = XML_FALSE; break; case XML_ROLE_NOTATION_NONE: if (parser->m_notationDeclHandler) handleDefault = XML_FALSE; break; case XML_ROLE_ATTLIST_NONE: if (dtd->keepProcessing && parser->m_attlistDeclHandler) handleDefault = XML_FALSE; break; case XML_ROLE_ELEMENT_NONE: if (parser->m_elementDeclHandler) handleDefault = XML_FALSE; break; } /* end of big switch */ if (handleDefault && parser->m_defaultHandler) reportDefault(parser, enc, s, next); switch (parser->m_parsingStatus.parsing) { case XML_SUSPENDED: *nextPtr = next; return XML_ERROR_NONE; case XML_FINISHED: return XML_ERROR_ABORTED; default: s = next; tok = XmlPrologTok(enc, s, end, &next); } } /* not reached */ } static enum XML_Error PTRCALL epilogProcessor(XML_Parser parser, const char *s, const char *end, const char **nextPtr) { parser->m_processor = epilogProcessor; parser->m_eventPtr = s; for (;;) { const char *next = NULL; int tok = XmlPrologTok(parser->m_encoding, s, end, &next); parser->m_eventEndPtr = next; switch (tok) { /* report partial linebreak - it might be the last token */ case -XML_TOK_PROLOG_S: if (parser->m_defaultHandler) { reportDefault(parser, parser->m_encoding, s, next); if (parser->m_parsingStatus.parsing == XML_FINISHED) return XML_ERROR_ABORTED; } *nextPtr = next; return XML_ERROR_NONE; case XML_TOK_NONE: *nextPtr = s; return XML_ERROR_NONE; case XML_TOK_PROLOG_S: if (parser->m_defaultHandler) reportDefault(parser, parser->m_encoding, s, next); break; case XML_TOK_PI: if (! reportProcessingInstruction(parser, parser->m_encoding, s, next)) return XML_ERROR_NO_MEMORY; break; case XML_TOK_COMMENT: if (! reportComment(parser, parser->m_encoding, s, next)) return XML_ERROR_NO_MEMORY; break; case XML_TOK_INVALID: parser->m_eventPtr = next; return XML_ERROR_INVALID_TOKEN; case XML_TOK_PARTIAL: if (! parser->m_parsingStatus.finalBuffer) { *nextPtr = s; return XML_ERROR_NONE; } return XML_ERROR_UNCLOSED_TOKEN; case XML_TOK_PARTIAL_CHAR: if (! parser->m_parsingStatus.finalBuffer) { *nextPtr = s; return XML_ERROR_NONE; } return XML_ERROR_PARTIAL_CHAR; default: return XML_ERROR_JUNK_AFTER_DOC_ELEMENT; } parser->m_eventPtr = s = next; switch (parser->m_parsingStatus.parsing) { case XML_SUSPENDED: *nextPtr = next; return XML_ERROR_NONE; case XML_FINISHED: return XML_ERROR_ABORTED; default:; } } } static enum XML_Error processInternalEntity(XML_Parser parser, ENTITY *entity, XML_Bool betweenDecl) { const char *textStart, *textEnd; const char *next; enum XML_Error result; OPEN_INTERNAL_ENTITY *openEntity; if (parser->m_freeInternalEntities) { openEntity = parser->m_freeInternalEntities; parser->m_freeInternalEntities = openEntity->next; } else { openEntity = (OPEN_INTERNAL_ENTITY *)MALLOC(parser, sizeof(OPEN_INTERNAL_ENTITY)); if (! openEntity) return XML_ERROR_NO_MEMORY; } entity->open = XML_TRUE; entity->processed = 0; openEntity->next = parser->m_openInternalEntities; parser->m_openInternalEntities = openEntity; openEntity->entity = entity; openEntity->startTagLevel = parser->m_tagLevel; openEntity->betweenDecl = betweenDecl; openEntity->internalEventPtr = NULL; openEntity->internalEventEndPtr = NULL; textStart = (char *)entity->textPtr; textEnd = (char *)(entity->textPtr + entity->textLen); /* Set a safe default value in case 'next' does not get set */ next = textStart; #ifdef XML_DTD if (entity->is_param) { int tok = XmlPrologTok(parser->m_internalEncoding, textStart, textEnd, &next); result = doProlog(parser, parser->m_internalEncoding, textStart, textEnd, tok, next, &next, XML_FALSE, XML_FALSE); } else #endif /* XML_DTD */ result = doContent(parser, parser->m_tagLevel, parser->m_internalEncoding, textStart, textEnd, &next, XML_FALSE); if (result == XML_ERROR_NONE) { if (textEnd != next && parser->m_parsingStatus.parsing == XML_SUSPENDED) { entity->processed = (int)(next - textStart); parser->m_processor = internalEntityProcessor; } else { entity->open = XML_FALSE; parser->m_openInternalEntities = openEntity->next; /* put openEntity back in list of free instances */ openEntity->next = parser->m_freeInternalEntities; parser->m_freeInternalEntities = openEntity; } } return result; } static enum XML_Error PTRCALL internalEntityProcessor(XML_Parser parser, const char *s, const char *end, const char **nextPtr) { ENTITY *entity; const char *textStart, *textEnd; const char *next; enum XML_Error result; OPEN_INTERNAL_ENTITY *openEntity = parser->m_openInternalEntities; if (! openEntity) return XML_ERROR_UNEXPECTED_STATE; entity = openEntity->entity; textStart = ((char *)entity->textPtr) + entity->processed; textEnd = (char *)(entity->textPtr + entity->textLen); /* Set a safe default value in case 'next' does not get set */ next = textStart; #ifdef XML_DTD if (entity->is_param) { int tok = XmlPrologTok(parser->m_internalEncoding, textStart, textEnd, &next); result = doProlog(parser, parser->m_internalEncoding, textStart, textEnd, tok, next, &next, XML_FALSE, XML_TRUE); } else #endif /* XML_DTD */ result = doContent(parser, openEntity->startTagLevel, parser->m_internalEncoding, textStart, textEnd, &next, XML_FALSE); if (result != XML_ERROR_NONE) return result; else if (textEnd != next && parser->m_parsingStatus.parsing == XML_SUSPENDED) { entity->processed = (int)(next - (char *)entity->textPtr); return result; } else { entity->open = XML_FALSE; parser->m_openInternalEntities = openEntity->next; /* put openEntity back in list of free instances */ openEntity->next = parser->m_freeInternalEntities; parser->m_freeInternalEntities = openEntity; } #ifdef XML_DTD if (entity->is_param) { int tok; parser->m_processor = prologProcessor; tok = XmlPrologTok(parser->m_encoding, s, end, &next); return doProlog(parser, parser->m_encoding, s, end, tok, next, nextPtr, (XML_Bool)! parser->m_parsingStatus.finalBuffer, XML_TRUE); } else #endif /* XML_DTD */ { parser->m_processor = contentProcessor; /* see externalEntityContentProcessor vs contentProcessor */ return doContent(parser, parser->m_parentParser ? 1 : 0, parser->m_encoding, s, end, nextPtr, (XML_Bool)! parser->m_parsingStatus.finalBuffer); } } static enum XML_Error PTRCALL errorProcessor(XML_Parser parser, const char *s, const char *end, const char **nextPtr) { UNUSED_P(s); UNUSED_P(end); UNUSED_P(nextPtr); return parser->m_errorCode; } static enum XML_Error storeAttributeValue(XML_Parser parser, const ENCODING *enc, XML_Bool isCdata, const char *ptr, const char *end, STRING_POOL *pool) { enum XML_Error result = appendAttributeValue(parser, enc, isCdata, ptr, end, pool); if (result) return result; if (! isCdata && poolLength(pool) && poolLastChar(pool) == 0x20) poolChop(pool); if (! poolAppendChar(pool, XML_T('\0'))) return XML_ERROR_NO_MEMORY; return XML_ERROR_NONE; } static enum XML_Error appendAttributeValue(XML_Parser parser, const ENCODING *enc, XML_Bool isCdata, const char *ptr, const char *end, STRING_POOL *pool) { DTD *const dtd = parser->m_dtd; /* save one level of indirection */ for (;;) { const char *next; int tok = XmlAttributeValueTok(enc, ptr, end, &next); switch (tok) { case XML_TOK_NONE: return XML_ERROR_NONE; case XML_TOK_INVALID: if (enc == parser->m_encoding) parser->m_eventPtr = next; return XML_ERROR_INVALID_TOKEN; case XML_TOK_PARTIAL: if (enc == parser->m_encoding) parser->m_eventPtr = ptr; return XML_ERROR_INVALID_TOKEN; case XML_TOK_CHAR_REF: { XML_Char buf[XML_ENCODE_MAX]; int i; int n = XmlCharRefNumber(enc, ptr); if (n < 0) { if (enc == parser->m_encoding) parser->m_eventPtr = ptr; return XML_ERROR_BAD_CHAR_REF; } if (! isCdata && n == 0x20 /* space */ && (poolLength(pool) == 0 || poolLastChar(pool) == 0x20)) break; n = XmlEncode(n, (ICHAR *)buf); /* The XmlEncode() functions can never return 0 here. That * error return happens if the code point passed in is either * negative or greater than or equal to 0x110000. The * XmlCharRefNumber() functions will all return a number * strictly less than 0x110000 or a negative value if an error * occurred. The negative value is intercepted above, so * XmlEncode() is never passed a value it might return an * error for. */ for (i = 0; i < n; i++) { if (! poolAppendChar(pool, buf[i])) return XML_ERROR_NO_MEMORY; } } break; case XML_TOK_DATA_CHARS: if (! poolAppend(pool, enc, ptr, next)) return XML_ERROR_NO_MEMORY; break; case XML_TOK_TRAILING_CR: next = ptr + enc->minBytesPerChar; /* fall through */ case XML_TOK_ATTRIBUTE_VALUE_S: case XML_TOK_DATA_NEWLINE: if (! isCdata && (poolLength(pool) == 0 || poolLastChar(pool) == 0x20)) break; if (! poolAppendChar(pool, 0x20)) return XML_ERROR_NO_MEMORY; break; case XML_TOK_ENTITY_REF: { const XML_Char *name; ENTITY *entity; char checkEntityDecl; XML_Char ch = (XML_Char)XmlPredefinedEntityName( enc, ptr + enc->minBytesPerChar, next - enc->minBytesPerChar); if (ch) { if (! poolAppendChar(pool, ch)) return XML_ERROR_NO_MEMORY; break; } name = poolStoreString(&parser->m_temp2Pool, enc, ptr + enc->minBytesPerChar, next - enc->minBytesPerChar); if (! name) return XML_ERROR_NO_MEMORY; entity = (ENTITY *)lookup(parser, &dtd->generalEntities, name, 0); poolDiscard(&parser->m_temp2Pool); /* First, determine if a check for an existing declaration is needed; if yes, check that the entity exists, and that it is internal. */ if (pool == &dtd->pool) /* are we called from prolog? */ checkEntityDecl = #ifdef XML_DTD parser->m_prologState.documentEntity && #endif /* XML_DTD */ (dtd->standalone ? ! parser->m_openInternalEntities : ! dtd->hasParamEntityRefs); else /* if (pool == &parser->m_tempPool): we are called from content */ checkEntityDecl = ! dtd->hasParamEntityRefs || dtd->standalone; if (checkEntityDecl) { if (! entity) return XML_ERROR_UNDEFINED_ENTITY; else if (! entity->is_internal) return XML_ERROR_ENTITY_DECLARED_IN_PE; } else if (! entity) { /* Cannot report skipped entity here - see comments on parser->m_skippedEntityHandler. if (parser->m_skippedEntityHandler) parser->m_skippedEntityHandler(parser->m_handlerArg, name, 0); */ /* Cannot call the default handler because this would be out of sync with the call to the startElementHandler. if ((pool == &parser->m_tempPool) && parser->m_defaultHandler) reportDefault(parser, enc, ptr, next); */ break; } if (entity->open) { if (enc == parser->m_encoding) { /* It does not appear that this line can be executed. * * The "if (entity->open)" check catches recursive entity * definitions. In order to be called with an open * entity, it must have gone through this code before and * been through the recursive call to * appendAttributeValue() some lines below. That call * sets the local encoding ("enc") to the parser's * internal encoding (internal_utf8 or internal_utf16), * which can never be the same as the principle encoding. * It doesn't appear there is another code path that gets * here with entity->open being TRUE. * * Since it is not certain that this logic is watertight, * we keep the line and merely exclude it from coverage * tests. */ parser->m_eventPtr = ptr; /* LCOV_EXCL_LINE */ } return XML_ERROR_RECURSIVE_ENTITY_REF; } if (entity->notation) { if (enc == parser->m_encoding) parser->m_eventPtr = ptr; return XML_ERROR_BINARY_ENTITY_REF; } if (! entity->textPtr) { if (enc == parser->m_encoding) parser->m_eventPtr = ptr; return XML_ERROR_ATTRIBUTE_EXTERNAL_ENTITY_REF; } else { enum XML_Error result; const XML_Char *textEnd = entity->textPtr + entity->textLen; entity->open = XML_TRUE; result = appendAttributeValue(parser, parser->m_internalEncoding, isCdata, (char *)entity->textPtr, (char *)textEnd, pool); entity->open = XML_FALSE; if (result) return result; } } break; default: /* The only token returned by XmlAttributeValueTok() that does * not have an explicit case here is XML_TOK_PARTIAL_CHAR. * Getting that would require an entity name to contain an * incomplete XML character (e.g. \xE2\x82); however previous * tokenisers will have already recognised and rejected such * names before XmlAttributeValueTok() gets a look-in. This * default case should be retained as a safety net, but the code * excluded from coverage tests. * * LCOV_EXCL_START */ if (enc == parser->m_encoding) parser->m_eventPtr = ptr; return XML_ERROR_UNEXPECTED_STATE; /* LCOV_EXCL_STOP */ } ptr = next; } /* not reached */ } static enum XML_Error storeEntityValue(XML_Parser parser, const ENCODING *enc, const char *entityTextPtr, const char *entityTextEnd) { DTD *const dtd = parser->m_dtd; /* save one level of indirection */ STRING_POOL *pool = &(dtd->entityValuePool); enum XML_Error result = XML_ERROR_NONE; #ifdef XML_DTD int oldInEntityValue = parser->m_prologState.inEntityValue; parser->m_prologState.inEntityValue = 1; #endif /* XML_DTD */ /* never return Null for the value argument in EntityDeclHandler, since this would indicate an external entity; therefore we have to make sure that entityValuePool.start is not null */ if (! pool->blocks) { if (! poolGrow(pool)) return XML_ERROR_NO_MEMORY; } for (;;) { const char *next; int tok = XmlEntityValueTok(enc, entityTextPtr, entityTextEnd, &next); switch (tok) { case XML_TOK_PARAM_ENTITY_REF: #ifdef XML_DTD if (parser->m_isParamEntity || enc != parser->m_encoding) { const XML_Char *name; ENTITY *entity; name = poolStoreString(&parser->m_tempPool, enc, entityTextPtr + enc->minBytesPerChar, next - enc->minBytesPerChar); if (! name) { result = XML_ERROR_NO_MEMORY; goto endEntityValue; } entity = (ENTITY *)lookup(parser, &dtd->paramEntities, name, 0); poolDiscard(&parser->m_tempPool); if (! entity) { /* not a well-formedness error - see XML 1.0: WFC Entity Declared */ /* cannot report skipped entity here - see comments on parser->m_skippedEntityHandler if (parser->m_skippedEntityHandler) parser->m_skippedEntityHandler(parser->m_handlerArg, name, 0); */ dtd->keepProcessing = dtd->standalone; goto endEntityValue; } if (entity->open) { if (enc == parser->m_encoding) parser->m_eventPtr = entityTextPtr; result = XML_ERROR_RECURSIVE_ENTITY_REF; goto endEntityValue; } if (entity->systemId) { if (parser->m_externalEntityRefHandler) { dtd->paramEntityRead = XML_FALSE; entity->open = XML_TRUE; if (! parser->m_externalEntityRefHandler( parser->m_externalEntityRefHandlerArg, 0, entity->base, entity->systemId, entity->publicId)) { entity->open = XML_FALSE; result = XML_ERROR_EXTERNAL_ENTITY_HANDLING; goto endEntityValue; } entity->open = XML_FALSE; if (! dtd->paramEntityRead) dtd->keepProcessing = dtd->standalone; } else dtd->keepProcessing = dtd->standalone; } else { entity->open = XML_TRUE; result = storeEntityValue( parser, parser->m_internalEncoding, (char *)entity->textPtr, (char *)(entity->textPtr + entity->textLen)); entity->open = XML_FALSE; if (result) goto endEntityValue; } break; } #endif /* XML_DTD */ /* In the internal subset, PE references are not legal within markup declarations, e.g entity values in this case. */ parser->m_eventPtr = entityTextPtr; result = XML_ERROR_PARAM_ENTITY_REF; goto endEntityValue; case XML_TOK_NONE: result = XML_ERROR_NONE; goto endEntityValue; case XML_TOK_ENTITY_REF: case XML_TOK_DATA_CHARS: if (! poolAppend(pool, enc, entityTextPtr, next)) { result = XML_ERROR_NO_MEMORY; goto endEntityValue; } break; case XML_TOK_TRAILING_CR: next = entityTextPtr + enc->minBytesPerChar; /* fall through */ case XML_TOK_DATA_NEWLINE: if (pool->end == pool->ptr && ! poolGrow(pool)) { result = XML_ERROR_NO_MEMORY; goto endEntityValue; } *(pool->ptr)++ = 0xA; break; case XML_TOK_CHAR_REF: { XML_Char buf[XML_ENCODE_MAX]; int i; int n = XmlCharRefNumber(enc, entityTextPtr); if (n < 0) { if (enc == parser->m_encoding) parser->m_eventPtr = entityTextPtr; result = XML_ERROR_BAD_CHAR_REF; goto endEntityValue; } n = XmlEncode(n, (ICHAR *)buf); /* The XmlEncode() functions can never return 0 here. That * error return happens if the code point passed in is either * negative or greater than or equal to 0x110000. The * XmlCharRefNumber() functions will all return a number * strictly less than 0x110000 or a negative value if an error * occurred. The negative value is intercepted above, so * XmlEncode() is never passed a value it might return an * error for. */ for (i = 0; i < n; i++) { if (pool->end == pool->ptr && ! poolGrow(pool)) { result = XML_ERROR_NO_MEMORY; goto endEntityValue; } *(pool->ptr)++ = buf[i]; } } break; case XML_TOK_PARTIAL: if (enc == parser->m_encoding) parser->m_eventPtr = entityTextPtr; result = XML_ERROR_INVALID_TOKEN; goto endEntityValue; case XML_TOK_INVALID: if (enc == parser->m_encoding) parser->m_eventPtr = next; result = XML_ERROR_INVALID_TOKEN; goto endEntityValue; default: /* This default case should be unnecessary -- all the tokens * that XmlEntityValueTok() can return have their own explicit * cases -- but should be retained for safety. We do however * exclude it from the coverage statistics. * * LCOV_EXCL_START */ if (enc == parser->m_encoding) parser->m_eventPtr = entityTextPtr; result = XML_ERROR_UNEXPECTED_STATE; goto endEntityValue; /* LCOV_EXCL_STOP */ } entityTextPtr = next; } endEntityValue: #ifdef XML_DTD parser->m_prologState.inEntityValue = oldInEntityValue; #endif /* XML_DTD */ return result; } static void FASTCALL normalizeLines(XML_Char *s) { XML_Char *p; for (;; s++) { if (*s == XML_T('\0')) return; if (*s == 0xD) break; } p = s; do { if (*s == 0xD) { *p++ = 0xA; if (*++s == 0xA) s++; } else *p++ = *s++; } while (*s); *p = XML_T('\0'); } static int reportProcessingInstruction(XML_Parser parser, const ENCODING *enc, const char *start, const char *end) { const XML_Char *target; XML_Char *data; const char *tem; if (! parser->m_processingInstructionHandler) { if (parser->m_defaultHandler) reportDefault(parser, enc, start, end); return 1; } start += enc->minBytesPerChar * 2; tem = start + XmlNameLength(enc, start); target = poolStoreString(&parser->m_tempPool, enc, start, tem); if (! target) return 0; poolFinish(&parser->m_tempPool); data = poolStoreString(&parser->m_tempPool, enc, XmlSkipS(enc, tem), end - enc->minBytesPerChar * 2); if (! data) return 0; normalizeLines(data); parser->m_processingInstructionHandler(parser->m_handlerArg, target, data); poolClear(&parser->m_tempPool); return 1; } static int reportComment(XML_Parser parser, const ENCODING *enc, const char *start, const char *end) { XML_Char *data; if (! parser->m_commentHandler) { if (parser->m_defaultHandler) reportDefault(parser, enc, start, end); return 1; } data = poolStoreString(&parser->m_tempPool, enc, start + enc->minBytesPerChar * 4, end - enc->minBytesPerChar * 3); if (! data) return 0; normalizeLines(data); parser->m_commentHandler(parser->m_handlerArg, data); poolClear(&parser->m_tempPool); return 1; } static void reportDefault(XML_Parser parser, const ENCODING *enc, const char *s, const char *end) { if (MUST_CONVERT(enc, s)) { enum XML_Convert_Result convert_res; const char **eventPP; const char **eventEndPP; if (enc == parser->m_encoding) { eventPP = &parser->m_eventPtr; eventEndPP = &parser->m_eventEndPtr; } else { /* To get here, two things must be true; the parser must be * using a character encoding that is not the same as the * encoding passed in, and the encoding passed in must need * conversion to the internal format (UTF-8 unless XML_UNICODE * is defined). The only occasions on which the encoding passed * in is not the same as the parser's encoding are when it is * the internal encoding (e.g. a previously defined parameter * entity, already converted to internal format). This by * definition doesn't need conversion, so the whole branch never * gets executed. * * For safety's sake we don't delete these lines and merely * exclude them from coverage statistics. * * LCOV_EXCL_START */ eventPP = &(parser->m_openInternalEntities->internalEventPtr); eventEndPP = &(parser->m_openInternalEntities->internalEventEndPtr); /* LCOV_EXCL_STOP */ } do { ICHAR *dataPtr = (ICHAR *)parser->m_dataBuf; convert_res = XmlConvert(enc, &s, end, &dataPtr, (ICHAR *)parser->m_dataBufEnd); *eventEndPP = s; parser->m_defaultHandler(parser->m_handlerArg, parser->m_dataBuf, (int)(dataPtr - (ICHAR *)parser->m_dataBuf)); *eventPP = s; } while ((convert_res != XML_CONVERT_COMPLETED) && (convert_res != XML_CONVERT_INPUT_INCOMPLETE)); } else parser->m_defaultHandler(parser->m_handlerArg, (XML_Char *)s, (int)((XML_Char *)end - (XML_Char *)s)); } static int defineAttribute(ELEMENT_TYPE *type, ATTRIBUTE_ID *attId, XML_Bool isCdata, XML_Bool isId, const XML_Char *value, XML_Parser parser) { DEFAULT_ATTRIBUTE *att; if (value || isId) { /* The handling of default attributes gets messed up if we have a default which duplicates a non-default. */ int i; for (i = 0; i < type->nDefaultAtts; i++) if (attId == type->defaultAtts[i].id) return 1; if (isId && ! type->idAtt && ! attId->xmlns) type->idAtt = attId; } if (type->nDefaultAtts == type->allocDefaultAtts) { if (type->allocDefaultAtts == 0) { type->allocDefaultAtts = 8; type->defaultAtts = (DEFAULT_ATTRIBUTE *)MALLOC( parser, type->allocDefaultAtts * sizeof(DEFAULT_ATTRIBUTE)); if (! type->defaultAtts) { type->allocDefaultAtts = 0; return 0; } } else { DEFAULT_ATTRIBUTE *temp; int count = type->allocDefaultAtts * 2; temp = (DEFAULT_ATTRIBUTE *)REALLOC(parser, type->defaultAtts, (count * sizeof(DEFAULT_ATTRIBUTE))); if (temp == NULL) return 0; type->allocDefaultAtts = count; type->defaultAtts = temp; } } att = type->defaultAtts + type->nDefaultAtts; att->id = attId; att->value = value; att->isCdata = isCdata; if (! isCdata) attId->maybeTokenized = XML_TRUE; type->nDefaultAtts += 1; return 1; } static int setElementTypePrefix(XML_Parser parser, ELEMENT_TYPE *elementType) { DTD *const dtd = parser->m_dtd; /* save one level of indirection */ const XML_Char *name; for (name = elementType->name; *name; name++) { if (*name == XML_T(ASCII_COLON)) { PREFIX *prefix; const XML_Char *s; for (s = elementType->name; s != name; s++) { if (! poolAppendChar(&dtd->pool, *s)) return 0; } if (! poolAppendChar(&dtd->pool, XML_T('\0'))) return 0; prefix = (PREFIX *)lookup(parser, &dtd->prefixes, poolStart(&dtd->pool), sizeof(PREFIX)); if (! prefix) return 0; if (prefix->name == poolStart(&dtd->pool)) poolFinish(&dtd->pool); else poolDiscard(&dtd->pool); elementType->prefix = prefix; break; } } return 1; } static ATTRIBUTE_ID * getAttributeId(XML_Parser parser, const ENCODING *enc, const char *start, const char *end) { DTD *const dtd = parser->m_dtd; /* save one level of indirection */ ATTRIBUTE_ID *id; const XML_Char *name; if (! poolAppendChar(&dtd->pool, XML_T('\0'))) return NULL; name = poolStoreString(&dtd->pool, enc, start, end); if (! name) return NULL; /* skip quotation mark - its storage will be re-used (like in name[-1]) */ ++name; id = (ATTRIBUTE_ID *)lookup(parser, &dtd->attributeIds, name, sizeof(ATTRIBUTE_ID)); if (! id) return NULL; if (id->name != name) poolDiscard(&dtd->pool); else { poolFinish(&dtd->pool); if (! parser->m_ns) ; else if (name[0] == XML_T(ASCII_x) && name[1] == XML_T(ASCII_m) && name[2] == XML_T(ASCII_l) && name[3] == XML_T(ASCII_n) && name[4] == XML_T(ASCII_s) && (name[5] == XML_T('\0') || name[5] == XML_T(ASCII_COLON))) { if (name[5] == XML_T('\0')) id->prefix = &dtd->defaultPrefix; else id->prefix = (PREFIX *)lookup(parser, &dtd->prefixes, name + 6, sizeof(PREFIX)); id->xmlns = XML_TRUE; } else { int i; for (i = 0; name[i]; i++) { /* attributes without prefix are *not* in the default namespace */ if (name[i] == XML_T(ASCII_COLON)) { int j; for (j = 0; j < i; j++) { if (! poolAppendChar(&dtd->pool, name[j])) return NULL; } if (! poolAppendChar(&dtd->pool, XML_T('\0'))) return NULL; id->prefix = (PREFIX *)lookup(parser, &dtd->prefixes, poolStart(&dtd->pool), sizeof(PREFIX)); if (! id->prefix) return NULL; if (id->prefix->name == poolStart(&dtd->pool)) poolFinish(&dtd->pool); else poolDiscard(&dtd->pool); break; } } } } return id; } #define CONTEXT_SEP XML_T(ASCII_FF) static const XML_Char * getContext(XML_Parser parser) { DTD *const dtd = parser->m_dtd; /* save one level of indirection */ HASH_TABLE_ITER iter; XML_Bool needSep = XML_FALSE; if (dtd->defaultPrefix.binding) { int i; int len; if (! poolAppendChar(&parser->m_tempPool, XML_T(ASCII_EQUALS))) return NULL; len = dtd->defaultPrefix.binding->uriLen; if (parser->m_namespaceSeparator) len--; for (i = 0; i < len; i++) { if (! poolAppendChar(&parser->m_tempPool, dtd->defaultPrefix.binding->uri[i])) { /* Because of memory caching, I don't believe this line can be * executed. * * This is part of a loop copying the default prefix binding * URI into the parser's temporary string pool. Previously, * that URI was copied into the same string pool, with a * terminating NUL character, as part of setContext(). When * the pool was cleared, that leaves a block definitely big * enough to hold the URI on the free block list of the pool. * The URI copy in getContext() therefore cannot run out of * memory. * * If the pool is used between the setContext() and * getContext() calls, the worst it can do is leave a bigger * block on the front of the free list. Given that this is * all somewhat inobvious and program logic can be changed, we * don't delete the line but we do exclude it from the test * coverage statistics. */ return NULL; /* LCOV_EXCL_LINE */ } } needSep = XML_TRUE; } hashTableIterInit(&iter, &(dtd->prefixes)); for (;;) { int i; int len; const XML_Char *s; PREFIX *prefix = (PREFIX *)hashTableIterNext(&iter); if (! prefix) break; if (! prefix->binding) { /* This test appears to be (justifiable) paranoia. There does * not seem to be a way of injecting a prefix without a binding * that doesn't get errored long before this function is called. * The test should remain for safety's sake, so we instead * exclude the following line from the coverage statistics. */ continue; /* LCOV_EXCL_LINE */ } if (needSep && ! poolAppendChar(&parser->m_tempPool, CONTEXT_SEP)) return NULL; for (s = prefix->name; *s; s++) if (! poolAppendChar(&parser->m_tempPool, *s)) return NULL; if (! poolAppendChar(&parser->m_tempPool, XML_T(ASCII_EQUALS))) return NULL; len = prefix->binding->uriLen; if (parser->m_namespaceSeparator) len--; for (i = 0; i < len; i++) if (! poolAppendChar(&parser->m_tempPool, prefix->binding->uri[i])) return NULL; needSep = XML_TRUE; } hashTableIterInit(&iter, &(dtd->generalEntities)); for (;;) { const XML_Char *s; ENTITY *e = (ENTITY *)hashTableIterNext(&iter); if (! e) break; if (! e->open) continue; if (needSep && ! poolAppendChar(&parser->m_tempPool, CONTEXT_SEP)) return NULL; for (s = e->name; *s; s++) if (! poolAppendChar(&parser->m_tempPool, *s)) return 0; needSep = XML_TRUE; } if (! poolAppendChar(&parser->m_tempPool, XML_T('\0'))) return NULL; return parser->m_tempPool.start; } static XML_Bool setContext(XML_Parser parser, const XML_Char *context) { DTD *const dtd = parser->m_dtd; /* save one level of indirection */ const XML_Char *s = context; while (*context != XML_T('\0')) { if (*s == CONTEXT_SEP || *s == XML_T('\0')) { ENTITY *e; if (! poolAppendChar(&parser->m_tempPool, XML_T('\0'))) return XML_FALSE; e = (ENTITY *)lookup(parser, &dtd->generalEntities, poolStart(&parser->m_tempPool), 0); if (e) e->open = XML_TRUE; if (*s != XML_T('\0')) s++; context = s; poolDiscard(&parser->m_tempPool); } else if (*s == XML_T(ASCII_EQUALS)) { PREFIX *prefix; if (poolLength(&parser->m_tempPool) == 0) prefix = &dtd->defaultPrefix; else { if (! poolAppendChar(&parser->m_tempPool, XML_T('\0'))) return XML_FALSE; prefix = (PREFIX *)lookup(parser, &dtd->prefixes, poolStart(&parser->m_tempPool), sizeof(PREFIX)); if (! prefix) return XML_FALSE; if (prefix->name == poolStart(&parser->m_tempPool)) { prefix->name = poolCopyString(&dtd->pool, prefix->name); if (! prefix->name) return XML_FALSE; } poolDiscard(&parser->m_tempPool); } for (context = s + 1; *context != CONTEXT_SEP && *context != XML_T('\0'); context++) if (! poolAppendChar(&parser->m_tempPool, *context)) return XML_FALSE; if (! poolAppendChar(&parser->m_tempPool, XML_T('\0'))) return XML_FALSE; if (addBinding(parser, prefix, NULL, poolStart(&parser->m_tempPool), &parser->m_inheritedBindings) != XML_ERROR_NONE) return XML_FALSE; poolDiscard(&parser->m_tempPool); if (*context != XML_T('\0')) ++context; s = context; } else { if (! poolAppendChar(&parser->m_tempPool, *s)) return XML_FALSE; s++; } } return XML_TRUE; } static void FASTCALL normalizePublicId(XML_Char *publicId) { XML_Char *p = publicId; XML_Char *s; for (s = publicId; *s; s++) { switch (*s) { case 0x20: case 0xD: case 0xA: if (p != publicId && p[-1] != 0x20) *p++ = 0x20; break; default: *p++ = *s; } } if (p != publicId && p[-1] == 0x20) --p; *p = XML_T('\0'); } static DTD * dtdCreate(const XML_Memory_Handling_Suite *ms) { DTD *p = (DTD *)ms->malloc_fcn(sizeof(DTD)); if (p == NULL) return p; poolInit(&(p->pool), ms); poolInit(&(p->entityValuePool), ms); hashTableInit(&(p->generalEntities), ms); hashTableInit(&(p->elementTypes), ms); hashTableInit(&(p->attributeIds), ms); hashTableInit(&(p->prefixes), ms); #ifdef XML_DTD p->paramEntityRead = XML_FALSE; hashTableInit(&(p->paramEntities), ms); #endif /* XML_DTD */ p->defaultPrefix.name = NULL; p->defaultPrefix.binding = NULL; p->in_eldecl = XML_FALSE; p->scaffIndex = NULL; p->scaffold = NULL; p->scaffLevel = 0; p->scaffSize = 0; p->scaffCount = 0; p->contentStringLen = 0; p->keepProcessing = XML_TRUE; p->hasParamEntityRefs = XML_FALSE; p->standalone = XML_FALSE; return p; } static void dtdReset(DTD *p, const XML_Memory_Handling_Suite *ms) { HASH_TABLE_ITER iter; hashTableIterInit(&iter, &(p->elementTypes)); for (;;) { ELEMENT_TYPE *e = (ELEMENT_TYPE *)hashTableIterNext(&iter); if (! e) break; if (e->allocDefaultAtts != 0) ms->free_fcn(e->defaultAtts); } hashTableClear(&(p->generalEntities)); #ifdef XML_DTD p->paramEntityRead = XML_FALSE; hashTableClear(&(p->paramEntities)); #endif /* XML_DTD */ hashTableClear(&(p->elementTypes)); hashTableClear(&(p->attributeIds)); hashTableClear(&(p->prefixes)); poolClear(&(p->pool)); poolClear(&(p->entityValuePool)); p->defaultPrefix.name = NULL; p->defaultPrefix.binding = NULL; p->in_eldecl = XML_FALSE; ms->free_fcn(p->scaffIndex); p->scaffIndex = NULL; ms->free_fcn(p->scaffold); p->scaffold = NULL; p->scaffLevel = 0; p->scaffSize = 0; p->scaffCount = 0; p->contentStringLen = 0; p->keepProcessing = XML_TRUE; p->hasParamEntityRefs = XML_FALSE; p->standalone = XML_FALSE; } static void dtdDestroy(DTD *p, XML_Bool isDocEntity, const XML_Memory_Handling_Suite *ms) { HASH_TABLE_ITER iter; hashTableIterInit(&iter, &(p->elementTypes)); for (;;) { ELEMENT_TYPE *e = (ELEMENT_TYPE *)hashTableIterNext(&iter); if (! e) break; if (e->allocDefaultAtts != 0) ms->free_fcn(e->defaultAtts); } hashTableDestroy(&(p->generalEntities)); #ifdef XML_DTD hashTableDestroy(&(p->paramEntities)); #endif /* XML_DTD */ hashTableDestroy(&(p->elementTypes)); hashTableDestroy(&(p->attributeIds)); hashTableDestroy(&(p->prefixes)); poolDestroy(&(p->pool)); poolDestroy(&(p->entityValuePool)); if (isDocEntity) { ms->free_fcn(p->scaffIndex); ms->free_fcn(p->scaffold); } ms->free_fcn(p); } /* Do a deep copy of the DTD. Return 0 for out of memory, non-zero otherwise. The new DTD has already been initialized. */ static int dtdCopy(XML_Parser oldParser, DTD *newDtd, const DTD *oldDtd, const XML_Memory_Handling_Suite *ms) { HASH_TABLE_ITER iter; /* Copy the prefix table. */ hashTableIterInit(&iter, &(oldDtd->prefixes)); for (;;) { const XML_Char *name; const PREFIX *oldP = (PREFIX *)hashTableIterNext(&iter); if (! oldP) break; name = poolCopyString(&(newDtd->pool), oldP->name); if (! name) return 0; if (! lookup(oldParser, &(newDtd->prefixes), name, sizeof(PREFIX))) return 0; } hashTableIterInit(&iter, &(oldDtd->attributeIds)); /* Copy the attribute id table. */ for (;;) { ATTRIBUTE_ID *newA; const XML_Char *name; const ATTRIBUTE_ID *oldA = (ATTRIBUTE_ID *)hashTableIterNext(&iter); if (! oldA) break; /* Remember to allocate the scratch byte before the name. */ if (! poolAppendChar(&(newDtd->pool), XML_T('\0'))) return 0; name = poolCopyString(&(newDtd->pool), oldA->name); if (! name) return 0; ++name; newA = (ATTRIBUTE_ID *)lookup(oldParser, &(newDtd->attributeIds), name, sizeof(ATTRIBUTE_ID)); if (! newA) return 0; newA->maybeTokenized = oldA->maybeTokenized; if (oldA->prefix) { newA->xmlns = oldA->xmlns; if (oldA->prefix == &oldDtd->defaultPrefix) newA->prefix = &newDtd->defaultPrefix; else newA->prefix = (PREFIX *)lookup(oldParser, &(newDtd->prefixes), oldA->prefix->name, 0); } } /* Copy the element type table. */ hashTableIterInit(&iter, &(oldDtd->elementTypes)); for (;;) { int i; ELEMENT_TYPE *newE; const XML_Char *name; const ELEMENT_TYPE *oldE = (ELEMENT_TYPE *)hashTableIterNext(&iter); if (! oldE) break; name = poolCopyString(&(newDtd->pool), oldE->name); if (! name) return 0; newE = (ELEMENT_TYPE *)lookup(oldParser, &(newDtd->elementTypes), name, sizeof(ELEMENT_TYPE)); if (! newE) return 0; if (oldE->nDefaultAtts) { newE->defaultAtts = (DEFAULT_ATTRIBUTE *)ms->malloc_fcn( oldE->nDefaultAtts * sizeof(DEFAULT_ATTRIBUTE)); if (! newE->defaultAtts) { return 0; } } if (oldE->idAtt) newE->idAtt = (ATTRIBUTE_ID *)lookup(oldParser, &(newDtd->attributeIds), oldE->idAtt->name, 0); newE->allocDefaultAtts = newE->nDefaultAtts = oldE->nDefaultAtts; if (oldE->prefix) newE->prefix = (PREFIX *)lookup(oldParser, &(newDtd->prefixes), oldE->prefix->name, 0); for (i = 0; i < newE->nDefaultAtts; i++) { newE->defaultAtts[i].id = (ATTRIBUTE_ID *)lookup( oldParser, &(newDtd->attributeIds), oldE->defaultAtts[i].id->name, 0); newE->defaultAtts[i].isCdata = oldE->defaultAtts[i].isCdata; if (oldE->defaultAtts[i].value) { newE->defaultAtts[i].value = poolCopyString(&(newDtd->pool), oldE->defaultAtts[i].value); if (! newE->defaultAtts[i].value) return 0; } else newE->defaultAtts[i].value = NULL; } } /* Copy the entity tables. */ if (! copyEntityTable(oldParser, &(newDtd->generalEntities), &(newDtd->pool), &(oldDtd->generalEntities))) return 0; #ifdef XML_DTD if (! copyEntityTable(oldParser, &(newDtd->paramEntities), &(newDtd->pool), &(oldDtd->paramEntities))) return 0; newDtd->paramEntityRead = oldDtd->paramEntityRead; #endif /* XML_DTD */ newDtd->keepProcessing = oldDtd->keepProcessing; newDtd->hasParamEntityRefs = oldDtd->hasParamEntityRefs; newDtd->standalone = oldDtd->standalone; /* Don't want deep copying for scaffolding */ newDtd->in_eldecl = oldDtd->in_eldecl; newDtd->scaffold = oldDtd->scaffold; newDtd->contentStringLen = oldDtd->contentStringLen; newDtd->scaffSize = oldDtd->scaffSize; newDtd->scaffLevel = oldDtd->scaffLevel; newDtd->scaffIndex = oldDtd->scaffIndex; return 1; } /* End dtdCopy */ static int copyEntityTable(XML_Parser oldParser, HASH_TABLE *newTable, STRING_POOL *newPool, const HASH_TABLE *oldTable) { HASH_TABLE_ITER iter; const XML_Char *cachedOldBase = NULL; const XML_Char *cachedNewBase = NULL; hashTableIterInit(&iter, oldTable); for (;;) { ENTITY *newE; const XML_Char *name; const ENTITY *oldE = (ENTITY *)hashTableIterNext(&iter); if (! oldE) break; name = poolCopyString(newPool, oldE->name); if (! name) return 0; newE = (ENTITY *)lookup(oldParser, newTable, name, sizeof(ENTITY)); if (! newE) return 0; if (oldE->systemId) { const XML_Char *tem = poolCopyString(newPool, oldE->systemId); if (! tem) return 0; newE->systemId = tem; if (oldE->base) { if (oldE->base == cachedOldBase) newE->base = cachedNewBase; else { cachedOldBase = oldE->base; tem = poolCopyString(newPool, cachedOldBase); if (! tem) return 0; cachedNewBase = newE->base = tem; } } if (oldE->publicId) { tem = poolCopyString(newPool, oldE->publicId); if (! tem) return 0; newE->publicId = tem; } } else { const XML_Char *tem = poolCopyStringN(newPool, oldE->textPtr, oldE->textLen); if (! tem) return 0; newE->textPtr = tem; newE->textLen = oldE->textLen; } if (oldE->notation) { const XML_Char *tem = poolCopyString(newPool, oldE->notation); if (! tem) return 0; newE->notation = tem; } newE->is_param = oldE->is_param; newE->is_internal = oldE->is_internal; } return 1; } #define INIT_POWER 6 static XML_Bool FASTCALL keyeq(KEY s1, KEY s2) { for (; *s1 == *s2; s1++, s2++) if (*s1 == 0) return XML_TRUE; return XML_FALSE; } static size_t keylen(KEY s) { size_t len = 0; for (; *s; s++, len++) ; return len; } static void copy_salt_to_sipkey(XML_Parser parser, struct sipkey *key) { key->k[0] = 0; key->k[1] = get_hash_secret_salt(parser); } static unsigned long FASTCALL hash(XML_Parser parser, KEY s) { struct siphash state; struct sipkey key; (void)sip24_valid; copy_salt_to_sipkey(parser, &key); sip24_init(&state, &key); sip24_update(&state, s, keylen(s) * sizeof(XML_Char)); return (unsigned long)sip24_final(&state); } static NAMED * lookup(XML_Parser parser, HASH_TABLE *table, KEY name, size_t createSize) { size_t i; if (table->size == 0) { size_t tsize; if (! createSize) return NULL; table->power = INIT_POWER; /* table->size is a power of 2 */ table->size = (size_t)1 << INIT_POWER; tsize = table->size * sizeof(NAMED *); table->v = (NAMED **)table->mem->malloc_fcn(tsize); if (! table->v) { table->size = 0; return NULL; } memset(table->v, 0, tsize); i = hash(parser, name) & ((unsigned long)table->size - 1); } else { unsigned long h = hash(parser, name); unsigned long mask = (unsigned long)table->size - 1; unsigned char step = 0; i = h & mask; while (table->v[i]) { if (keyeq(name, table->v[i]->name)) return table->v[i]; if (! step) step = PROBE_STEP(h, mask, table->power); i < step ? (i += table->size - step) : (i -= step); } if (! createSize) return NULL; /* check for overflow (table is half full) */ if (table->used >> (table->power - 1)) { unsigned char newPower = table->power + 1; size_t newSize = (size_t)1 << newPower; unsigned long newMask = (unsigned long)newSize - 1; size_t tsize = newSize * sizeof(NAMED *); NAMED **newV = (NAMED **)table->mem->malloc_fcn(tsize); if (! newV) return NULL; memset(newV, 0, tsize); for (i = 0; i < table->size; i++) if (table->v[i]) { unsigned long newHash = hash(parser, table->v[i]->name); size_t j = newHash & newMask; step = 0; while (newV[j]) { if (! step) step = PROBE_STEP(newHash, newMask, newPower); j < step ? (j += newSize - step) : (j -= step); } newV[j] = table->v[i]; } table->mem->free_fcn(table->v); table->v = newV; table->power = newPower; table->size = newSize; i = h & newMask; step = 0; while (table->v[i]) { if (! step) step = PROBE_STEP(h, newMask, newPower); i < step ? (i += newSize - step) : (i -= step); } } } table->v[i] = (NAMED *)table->mem->malloc_fcn(createSize); if (! table->v[i]) return NULL; memset(table->v[i], 0, createSize); table->v[i]->name = name; (table->used)++; return table->v[i]; } static void FASTCALL hashTableClear(HASH_TABLE *table) { size_t i; for (i = 0; i < table->size; i++) { table->mem->free_fcn(table->v[i]); table->v[i] = NULL; } table->used = 0; } static void FASTCALL hashTableDestroy(HASH_TABLE *table) { size_t i; for (i = 0; i < table->size; i++) table->mem->free_fcn(table->v[i]); table->mem->free_fcn(table->v); } static void FASTCALL hashTableInit(HASH_TABLE *p, const XML_Memory_Handling_Suite *ms) { p->power = 0; p->size = 0; p->used = 0; p->v = NULL; p->mem = ms; } static void FASTCALL hashTableIterInit(HASH_TABLE_ITER *iter, const HASH_TABLE *table) { iter->p = table->v; iter->end = iter->p + table->size; } static NAMED *FASTCALL hashTableIterNext(HASH_TABLE_ITER *iter) { while (iter->p != iter->end) { NAMED *tem = *(iter->p)++; if (tem) return tem; } return NULL; } static void FASTCALL poolInit(STRING_POOL *pool, const XML_Memory_Handling_Suite *ms) { pool->blocks = NULL; pool->freeBlocks = NULL; pool->start = NULL; pool->ptr = NULL; pool->end = NULL; pool->mem = ms; } static void FASTCALL poolClear(STRING_POOL *pool) { if (! pool->freeBlocks) pool->freeBlocks = pool->blocks; else { BLOCK *p = pool->blocks; while (p) { BLOCK *tem = p->next; p->next = pool->freeBlocks; pool->freeBlocks = p; p = tem; } } pool->blocks = NULL; pool->start = NULL; pool->ptr = NULL; pool->end = NULL; } static void FASTCALL poolDestroy(STRING_POOL *pool) { BLOCK *p = pool->blocks; while (p) { BLOCK *tem = p->next; pool->mem->free_fcn(p); p = tem; } p = pool->freeBlocks; while (p) { BLOCK *tem = p->next; pool->mem->free_fcn(p); p = tem; } } static XML_Char * poolAppend(STRING_POOL *pool, const ENCODING *enc, const char *ptr, const char *end) { if (! pool->ptr && ! poolGrow(pool)) return NULL; for (;;) { const enum XML_Convert_Result convert_res = XmlConvert( enc, &ptr, end, (ICHAR **)&(pool->ptr), (ICHAR *)pool->end); if ((convert_res == XML_CONVERT_COMPLETED) || (convert_res == XML_CONVERT_INPUT_INCOMPLETE)) break; if (! poolGrow(pool)) return NULL; } return pool->start; } static const XML_Char *FASTCALL poolCopyString(STRING_POOL *pool, const XML_Char *s) { do { if (! poolAppendChar(pool, *s)) return NULL; } while (*s++); s = pool->start; poolFinish(pool); return s; } static const XML_Char * poolCopyStringN(STRING_POOL *pool, const XML_Char *s, int n) { if (! pool->ptr && ! poolGrow(pool)) { /* The following line is unreachable given the current usage of * poolCopyStringN(). Currently it is called from exactly one * place to copy the text of a simple general entity. By that * point, the name of the entity is already stored in the pool, so * pool->ptr cannot be NULL. * * If poolCopyStringN() is used elsewhere as it well might be, * this line may well become executable again. Regardless, this * sort of check shouldn't be removed lightly, so we just exclude * it from the coverage statistics. */ return NULL; /* LCOV_EXCL_LINE */ } for (; n > 0; --n, s++) { if (! poolAppendChar(pool, *s)) return NULL; } s = pool->start; poolFinish(pool); return s; } static const XML_Char *FASTCALL poolAppendString(STRING_POOL *pool, const XML_Char *s) { while (*s) { if (! poolAppendChar(pool, *s)) return NULL; s++; } return pool->start; } static XML_Char * poolStoreString(STRING_POOL *pool, const ENCODING *enc, const char *ptr, const char *end) { if (! poolAppend(pool, enc, ptr, end)) return NULL; if (pool->ptr == pool->end && ! poolGrow(pool)) return NULL; *(pool->ptr)++ = 0; return pool->start; } static size_t poolBytesToAllocateFor(int blockSize) { /* Unprotected math would be: ** return offsetof(BLOCK, s) + blockSize * sizeof(XML_Char); ** ** Detect overflow, avoiding _signed_ overflow undefined behavior ** For a + b * c we check b * c in isolation first, so that addition of a ** on top has no chance of making us accept a small non-negative number */ const size_t stretch = sizeof(XML_Char); /* can be 4 bytes */ if (blockSize <= 0) return 0; if (blockSize > (int)(INT_MAX / stretch)) return 0; { const int stretchedBlockSize = blockSize * (int)stretch; const int bytesToAllocate = (int)(offsetof(BLOCK, s) + (unsigned)stretchedBlockSize); if (bytesToAllocate < 0) return 0; return (size_t)bytesToAllocate; } } static XML_Bool FASTCALL poolGrow(STRING_POOL *pool) { if (pool->freeBlocks) { if (pool->start == 0) { pool->blocks = pool->freeBlocks; pool->freeBlocks = pool->freeBlocks->next; pool->blocks->next = NULL; pool->start = pool->blocks->s; pool->end = pool->start + pool->blocks->size; pool->ptr = pool->start; return XML_TRUE; } if (pool->end - pool->start < pool->freeBlocks->size) { BLOCK *tem = pool->freeBlocks->next; pool->freeBlocks->next = pool->blocks; pool->blocks = pool->freeBlocks; pool->freeBlocks = tem; memcpy(pool->blocks->s, pool->start, (pool->end - pool->start) * sizeof(XML_Char)); pool->ptr = pool->blocks->s + (pool->ptr - pool->start); pool->start = pool->blocks->s; pool->end = pool->start + pool->blocks->size; return XML_TRUE; } } if (pool->blocks && pool->start == pool->blocks->s) { BLOCK *temp; int blockSize = (int)((unsigned)(pool->end - pool->start) * 2U); size_t bytesToAllocate; /* NOTE: Needs to be calculated prior to calling `realloc` to avoid dangling pointers: */ const ptrdiff_t offsetInsideBlock = pool->ptr - pool->start; if (blockSize < 0) { /* This condition traps a situation where either more than * INT_MAX/2 bytes have already been allocated. This isn't * readily testable, since it is unlikely that an average * machine will have that much memory, so we exclude it from the * coverage statistics. */ return XML_FALSE; /* LCOV_EXCL_LINE */ } bytesToAllocate = poolBytesToAllocateFor(blockSize); if (bytesToAllocate == 0) return XML_FALSE; temp = (BLOCK *)pool->mem->realloc_fcn(pool->blocks, (unsigned)bytesToAllocate); if (temp == NULL) return XML_FALSE; pool->blocks = temp; pool->blocks->size = blockSize; pool->ptr = pool->blocks->s + offsetInsideBlock; pool->start = pool->blocks->s; pool->end = pool->start + blockSize; } else { BLOCK *tem; int blockSize = (int)(pool->end - pool->start); size_t bytesToAllocate; if (blockSize < 0) { /* This condition traps a situation where either more than * INT_MAX bytes have already been allocated (which is prevented * by various pieces of program logic, not least this one, never * mind the unlikelihood of actually having that much memory) or * the pool control fields have been corrupted (which could * conceivably happen in an extremely buggy user handler * function). Either way it isn't readily testable, so we * exclude it from the coverage statistics. */ return XML_FALSE; /* LCOV_EXCL_LINE */ } if (blockSize < INIT_BLOCK_SIZE) blockSize = INIT_BLOCK_SIZE; else { /* Detect overflow, avoiding _signed_ overflow undefined behavior */ if ((int)((unsigned)blockSize * 2U) < 0) { return XML_FALSE; } blockSize *= 2; } bytesToAllocate = poolBytesToAllocateFor(blockSize); if (bytesToAllocate == 0) return XML_FALSE; tem = (BLOCK *)pool->mem->malloc_fcn(bytesToAllocate); if (! tem) return XML_FALSE; tem->size = blockSize; tem->next = pool->blocks; pool->blocks = tem; if (pool->ptr != pool->start) memcpy(tem->s, pool->start, (pool->ptr - pool->start) * sizeof(XML_Char)); pool->ptr = tem->s + (pool->ptr - pool->start); pool->start = tem->s; pool->end = tem->s + blockSize; } return XML_TRUE; } static int FASTCALL nextScaffoldPart(XML_Parser parser) { DTD *const dtd = parser->m_dtd; /* save one level of indirection */ CONTENT_SCAFFOLD *me; int next; if (! dtd->scaffIndex) { dtd->scaffIndex = (int *)MALLOC(parser, parser->m_groupSize * sizeof(int)); if (! dtd->scaffIndex) return -1; dtd->scaffIndex[0] = 0; } if (dtd->scaffCount >= dtd->scaffSize) { CONTENT_SCAFFOLD *temp; if (dtd->scaffold) { temp = (CONTENT_SCAFFOLD *)REALLOC( parser, dtd->scaffold, dtd->scaffSize * 2 * sizeof(CONTENT_SCAFFOLD)); if (temp == NULL) return -1; dtd->scaffSize *= 2; } else { temp = (CONTENT_SCAFFOLD *)MALLOC(parser, INIT_SCAFFOLD_ELEMENTS * sizeof(CONTENT_SCAFFOLD)); if (temp == NULL) return -1; dtd->scaffSize = INIT_SCAFFOLD_ELEMENTS; } dtd->scaffold = temp; } next = dtd->scaffCount++; me = &dtd->scaffold[next]; if (dtd->scaffLevel) { CONTENT_SCAFFOLD *parent = &dtd->scaffold[dtd->scaffIndex[dtd->scaffLevel - 1]]; if (parent->lastchild) { dtd->scaffold[parent->lastchild].nextsib = next; } if (! parent->childcnt) parent->firstchild = next; parent->lastchild = next; parent->childcnt++; } me->firstchild = me->lastchild = me->childcnt = me->nextsib = 0; return next; } static void build_node(XML_Parser parser, int src_node, XML_Content *dest, XML_Content **contpos, XML_Char **strpos) { DTD *const dtd = parser->m_dtd; /* save one level of indirection */ dest->type = dtd->scaffold[src_node].type; dest->quant = dtd->scaffold[src_node].quant; if (dest->type == XML_CTYPE_NAME) { const XML_Char *src; dest->name = *strpos; src = dtd->scaffold[src_node].name; for (;;) { *(*strpos)++ = *src; if (! *src) break; src++; } dest->numchildren = 0; dest->children = NULL; } else { unsigned int i; int cn; dest->numchildren = dtd->scaffold[src_node].childcnt; dest->children = *contpos; *contpos += dest->numchildren; for (i = 0, cn = dtd->scaffold[src_node].firstchild; i < dest->numchildren; i++, cn = dtd->scaffold[cn].nextsib) { build_node(parser, cn, &(dest->children[i]), contpos, strpos); } dest->name = NULL; } } static XML_Content * build_model(XML_Parser parser) { DTD *const dtd = parser->m_dtd; /* save one level of indirection */ XML_Content *ret; XML_Content *cpos; XML_Char *str; int allocsize = (dtd->scaffCount * sizeof(XML_Content) + (dtd->contentStringLen * sizeof(XML_Char))); ret = (XML_Content *)MALLOC(parser, allocsize); if (! ret) return NULL; str = (XML_Char *)(&ret[dtd->scaffCount]); cpos = &ret[1]; build_node(parser, 0, ret, &cpos, &str); return ret; } static ELEMENT_TYPE * getElementType(XML_Parser parser, const ENCODING *enc, const char *ptr, const char *end) { DTD *const dtd = parser->m_dtd; /* save one level of indirection */ const XML_Char *name = poolStoreString(&dtd->pool, enc, ptr, end); ELEMENT_TYPE *ret; if (! name) return NULL; ret = (ELEMENT_TYPE *)lookup(parser, &dtd->elementTypes, name, sizeof(ELEMENT_TYPE)); if (! ret) return NULL; if (ret->name != name) poolDiscard(&dtd->pool); else { poolFinish(&dtd->pool); if (! setElementTypePrefix(parser, ret)) return NULL; } return ret; } static XML_Char * copyString(const XML_Char *s, const XML_Memory_Handling_Suite *memsuite) { int charsRequired = 0; XML_Char *result; /* First determine how long the string is */ while (s[charsRequired] != 0) { charsRequired++; } /* Include the terminator */ charsRequired++; /* Now allocate space for the copy */ result = memsuite->malloc_fcn(charsRequired * sizeof(XML_Char)); if (result == NULL) return NULL; /* Copy the original into place */ memcpy(result, s, charsRequired * sizeof(XML_Char)); return result; }
externalParEntProcessor(XML_Parser parser, const char *s, const char *end, const char **nextPtr) { const char *next = s; int tok; tok = XmlPrologTok(parser->m_encoding, s, end, &next); if (tok <= 0) { if (! parser->m_parsingStatus.finalBuffer && tok != XML_TOK_INVALID) { *nextPtr = s; return XML_ERROR_NONE; } switch (tok) { case XML_TOK_INVALID: return XML_ERROR_INVALID_TOKEN; case XML_TOK_PARTIAL: return XML_ERROR_UNCLOSED_TOKEN; case XML_TOK_PARTIAL_CHAR: return XML_ERROR_PARTIAL_CHAR; case XML_TOK_NONE: /* start == end */ default: break; } } /* This would cause the next stage, i.e. doProlog to be passed XML_TOK_BOM. However, when parsing an external subset, doProlog will not accept a BOM as valid, and report a syntax error, so we have to skip the BOM */ else if (tok == XML_TOK_BOM) { s = next; tok = XmlPrologTok(parser->m_encoding, s, end, &next); } parser->m_processor = prologProcessor; return doProlog(parser, parser->m_encoding, s, end, tok, next, nextPtr, (XML_Bool)! parser->m_parsingStatus.finalBuffer); }
externalParEntProcessor(XML_Parser parser, const char *s, const char *end, const char **nextPtr) { const char *next = s; int tok; tok = XmlPrologTok(parser->m_encoding, s, end, &next); if (tok <= 0) { if (! parser->m_parsingStatus.finalBuffer && tok != XML_TOK_INVALID) { *nextPtr = s; return XML_ERROR_NONE; } switch (tok) { case XML_TOK_INVALID: return XML_ERROR_INVALID_TOKEN; case XML_TOK_PARTIAL: return XML_ERROR_UNCLOSED_TOKEN; case XML_TOK_PARTIAL_CHAR: return XML_ERROR_PARTIAL_CHAR; case XML_TOK_NONE: /* start == end */ default: break; } } /* This would cause the next stage, i.e. doProlog to be passed XML_TOK_BOM. However, when parsing an external subset, doProlog will not accept a BOM as valid, and report a syntax error, so we have to skip the BOM */ else if (tok == XML_TOK_BOM) { s = next; tok = XmlPrologTok(parser->m_encoding, s, end, &next); } parser->m_processor = prologProcessor; return doProlog(parser, parser->m_encoding, s, end, tok, next, nextPtr, (XML_Bool)! parser->m_parsingStatus.finalBuffer, XML_TRUE); }
{'added': [(404, ' XML_Bool haveMore, XML_Bool allowClosingDoctype);'), (4049, ' (XML_Bool)! parser->m_parsingStatus.finalBuffer, XML_TRUE);'), (4093, ' (XML_Bool)! parser->m_parsingStatus.finalBuffer, XML_TRUE);'), (4098, ' int tok, const char *next, const char **nextPtr, XML_Bool haveMore,'), (4099, ' XML_Bool allowClosingDoctype) {'), (4275, ' if (allowClosingDoctype != XML_TRUE) {'), (4276, ' /* Must not close doctype from within expanded parameter entities */'), (4277, ' return XML_ERROR_INVALID_TOKEN;'), (4278, ' }'), (4279, ''), (5183, ' tok, next, &next, XML_FALSE, XML_FALSE);'), (5226, ' tok, next, &next, XML_FALSE, XML_TRUE);'), (5253, ' (XML_Bool)! parser->m_parsingStatus.finalBuffer, XML_TRUE);')], 'deleted': [(404, ' XML_Bool haveMore);'), (4049, ' (XML_Bool)! parser->m_parsingStatus.finalBuffer);'), (4093, ' (XML_Bool)! parser->m_parsingStatus.finalBuffer);'), (4098, ' int tok, const char *next, const char **nextPtr, XML_Bool haveMore) {'), (5177, ' tok, next, &next, XML_FALSE);'), (5220, ' tok, next, &next, XML_FALSE);'), (5247, ' (XML_Bool)! parser->m_parsingStatus.finalBuffer);')]}
13
7
5,538
34,488
30
175
9
https://github.com/libexpat/libexpat
CVE-2019-15903
CWE-125
3,096
terminal.c
C
create_pty_only
/* vi:set ts=8 sts=4 sw=4 noet: * * VIM - Vi IMproved by Bram Moolenaar * * Do ":help uganda" in Vim to read copying and usage conditions. * Do ":help credits" in Vim to see a list of people who contributed. * See README.txt for an overview of the Vim source code. */ /* * Terminal window support, see ":help :terminal". * * There are three parts: * 1. Generic code for all systems. * Uses libvterm for the terminal emulator. * 2. The MS-Windows implementation. * Uses winpty. * 3. The Unix-like implementation. * Uses pseudo-tty's (pty's). * * For each terminal one VTerm is constructed. This uses libvterm. A copy of * this library is in the libvterm directory. * * When a terminal window is opened, a job is started that will be connected to * the terminal emulator. * * If the terminal window has keyboard focus, typed keys are converted to the * terminal encoding and writing to the job over a channel. * * If the job produces output, it is written to the terminal emulator. The * terminal emulator invokes callbacks when its screen content changes. The * line range is stored in tl_dirty_row_start and tl_dirty_row_end. Once in a * while, if the terminal window is visible, the screen contents is drawn. * * When the job ends the text is put in a buffer. Redrawing then happens from * that buffer, attributes come from the scrollback buffer tl_scrollback. * When the buffer is changed it is turned into a normal buffer, the attributes * in tl_scrollback are no longer used. */ #include "vim.h" #if defined(FEAT_TERMINAL) || defined(PROTO) #ifndef MIN # define MIN(x,y) ((x) < (y) ? (x) : (y)) #endif #ifndef MAX # define MAX(x,y) ((x) > (y) ? (x) : (y)) #endif #include "libvterm/include/vterm.h" /* This is VTermScreenCell without the characters, thus much smaller. */ typedef struct { VTermScreenCellAttrs attrs; char width; VTermColor fg; VTermColor bg; } cellattr_T; typedef struct sb_line_S { int sb_cols; /* can differ per line */ cellattr_T *sb_cells; /* allocated */ cellattr_T sb_fill_attr; /* for short line */ } sb_line_T; /* typedef term_T in structs.h */ struct terminal_S { term_T *tl_next; VTerm *tl_vterm; job_T *tl_job; buf_T *tl_buffer; #if defined(FEAT_GUI) int tl_system; /* when non-zero used for :!cmd output */ int tl_toprow; /* row with first line of system terminal */ #endif /* Set when setting the size of a vterm, reset after redrawing. */ int tl_vterm_size_changed; int tl_normal_mode; /* TRUE: Terminal-Normal mode */ int tl_channel_closed; int tl_channel_recently_closed; // still need to handle tl_finish int tl_finish; #define TL_FINISH_UNSET NUL #define TL_FINISH_CLOSE 'c' /* ++close or :terminal without argument */ #define TL_FINISH_NOCLOSE 'n' /* ++noclose */ #define TL_FINISH_OPEN 'o' /* ++open */ char_u *tl_opencmd; char_u *tl_eof_chars; #ifdef WIN3264 void *tl_winpty_config; void *tl_winpty; FILE *tl_out_fd; #endif #if defined(FEAT_SESSION) char_u *tl_command; #endif char_u *tl_kill; /* last known vterm size */ int tl_rows; int tl_cols; char_u *tl_title; /* NULL or allocated */ char_u *tl_status_text; /* NULL or allocated */ /* Range of screen rows to update. Zero based. */ int tl_dirty_row_start; /* MAX_ROW if nothing dirty */ int tl_dirty_row_end; /* row below last one to update */ int tl_dirty_snapshot; /* text updated after making snapshot */ #ifdef FEAT_TIMERS int tl_timer_set; proftime_T tl_timer_due; #endif int tl_postponed_scroll; /* to be scrolled up */ garray_T tl_scrollback; int tl_scrollback_scrolled; cellattr_T tl_default_color; linenr_T tl_top_diff_rows; /* rows of top diff file or zero */ linenr_T tl_bot_diff_rows; /* rows of bottom diff file */ VTermPos tl_cursor_pos; int tl_cursor_visible; int tl_cursor_blink; int tl_cursor_shape; /* 1: block, 2: underline, 3: bar */ char_u *tl_cursor_color; /* NULL or allocated */ int tl_using_altscreen; }; #define TMODE_ONCE 1 /* CTRL-\ CTRL-N used */ #define TMODE_LOOP 2 /* CTRL-W N used */ /* * List of all active terminals. */ static term_T *first_term = NULL; /* Terminal active in terminal_loop(). */ static term_T *in_terminal_loop = NULL; #define MAX_ROW 999999 /* used for tl_dirty_row_end to update all rows */ #define KEY_BUF_LEN 200 /* * Functions with separate implementation for MS-Windows and Unix-like systems. */ static int term_and_job_init(term_T *term, typval_T *argvar, char **argv, jobopt_T *opt, jobopt_T *orig_opt); static int create_pty_only(term_T *term, jobopt_T *opt); static void term_report_winsize(term_T *term, int rows, int cols); static void term_free_vterm(term_T *term); #ifdef FEAT_GUI static void update_system_term(term_T *term); #endif /* The character that we know (or assume) that the terminal expects for the * backspace key. */ static int term_backspace_char = BS; /* "Terminal" highlight group colors. */ static int term_default_cterm_fg = -1; static int term_default_cterm_bg = -1; /* Store the last set and the desired cursor properties, so that we only update * them when needed. Doing it unnecessary may result in flicker. */ static char_u *last_set_cursor_color = NULL; static char_u *desired_cursor_color = NULL; static int last_set_cursor_shape = -1; static int desired_cursor_shape = -1; static int last_set_cursor_blink = -1; static int desired_cursor_blink = -1; /************************************** * 1. Generic code for all systems. */ static int cursor_color_equal(char_u *lhs_color, char_u *rhs_color) { if (lhs_color != NULL && rhs_color != NULL) return STRCMP(lhs_color, rhs_color) == 0; return lhs_color == NULL && rhs_color == NULL; } static void cursor_color_copy(char_u **to_color, char_u *from_color) { // Avoid a free & alloc if the value is already right. if (cursor_color_equal(*to_color, from_color)) return; vim_free(*to_color); *to_color = (from_color == NULL) ? NULL : vim_strsave(from_color); } static char_u * cursor_color_get(char_u *color) { return (color == NULL) ? (char_u *)"" : color; } /* * Parse 'termwinsize' and set "rows" and "cols" for the terminal size in the * current window. * Sets "rows" and/or "cols" to zero when it should follow the window size. * Return TRUE if the size is the minimum size: "24*80". */ static int parse_termwinsize(win_T *wp, int *rows, int *cols) { int minsize = FALSE; *rows = 0; *cols = 0; if (*wp->w_p_tws != NUL) { char_u *p = vim_strchr(wp->w_p_tws, 'x'); /* Syntax of value was already checked when it's set. */ if (p == NULL) { minsize = TRUE; p = vim_strchr(wp->w_p_tws, '*'); } *rows = atoi((char *)wp->w_p_tws); *cols = atoi((char *)p + 1); } return minsize; } /* * Determine the terminal size from 'termwinsize' and the current window. */ static void set_term_and_win_size(term_T *term) { #ifdef FEAT_GUI if (term->tl_system) { /* Use the whole screen for the system command. However, it will start * at the command line and scroll up as needed, using tl_toprow. */ term->tl_rows = Rows; term->tl_cols = Columns; return; } #endif if (parse_termwinsize(curwin, &term->tl_rows, &term->tl_cols)) { if (term->tl_rows != 0) term->tl_rows = MAX(term->tl_rows, curwin->w_height); if (term->tl_cols != 0) term->tl_cols = MAX(term->tl_cols, curwin->w_width); } if (term->tl_rows == 0) term->tl_rows = curwin->w_height; else win_setheight_win(term->tl_rows, curwin); if (term->tl_cols == 0) term->tl_cols = curwin->w_width; else win_setwidth_win(term->tl_cols, curwin); } /* * Initialize job options for a terminal job. * Caller may overrule some of them. */ void init_job_options(jobopt_T *opt) { clear_job_options(opt); opt->jo_mode = MODE_RAW; opt->jo_out_mode = MODE_RAW; opt->jo_err_mode = MODE_RAW; opt->jo_set = JO_MODE | JO_OUT_MODE | JO_ERR_MODE; } /* * Set job options mandatory for a terminal job. */ static void setup_job_options(jobopt_T *opt, int rows, int cols) { #ifndef WIN3264 /* Win32: Redirecting the job output won't work, thus always connect stdout * here. */ if (!(opt->jo_set & JO_OUT_IO)) #endif { /* Connect stdout to the terminal. */ opt->jo_io[PART_OUT] = JIO_BUFFER; opt->jo_io_buf[PART_OUT] = curbuf->b_fnum; opt->jo_modifiable[PART_OUT] = 0; opt->jo_set |= JO_OUT_IO + JO_OUT_BUF + JO_OUT_MODIFIABLE; } #ifndef WIN3264 /* Win32: Redirecting the job output won't work, thus always connect stderr * here. */ if (!(opt->jo_set & JO_ERR_IO)) #endif { /* Connect stderr to the terminal. */ opt->jo_io[PART_ERR] = JIO_BUFFER; opt->jo_io_buf[PART_ERR] = curbuf->b_fnum; opt->jo_modifiable[PART_ERR] = 0; opt->jo_set |= JO_ERR_IO + JO_ERR_BUF + JO_ERR_MODIFIABLE; } opt->jo_pty = TRUE; if ((opt->jo_set2 & JO2_TERM_ROWS) == 0) opt->jo_term_rows = rows; if ((opt->jo_set2 & JO2_TERM_COLS) == 0) opt->jo_term_cols = cols; } /* * Close a terminal buffer (and its window). Used when creating the terminal * fails. */ static void term_close_buffer(buf_T *buf, buf_T *old_curbuf) { free_terminal(buf); if (old_curbuf != NULL) { --curbuf->b_nwindows; curbuf = old_curbuf; curwin->w_buffer = curbuf; ++curbuf->b_nwindows; } /* Wiping out the buffer will also close the window and call * free_terminal(). */ do_buffer(DOBUF_WIPE, DOBUF_FIRST, FORWARD, buf->b_fnum, TRUE); } /* * Start a terminal window and return its buffer. * Use either "argvar" or "argv", the other must be NULL. * When "flags" has TERM_START_NOJOB only create the buffer, b_term and open * the window. * Returns NULL when failed. */ buf_T * term_start( typval_T *argvar, char **argv, jobopt_T *opt, int flags) { exarg_T split_ea; win_T *old_curwin = curwin; term_T *term; buf_T *old_curbuf = NULL; int res; buf_T *newbuf; int vertical = opt->jo_vertical || (cmdmod.split & WSP_VERT); jobopt_T orig_opt; // only partly filled if (check_restricted() || check_secure()) return NULL; if ((opt->jo_set & (JO_IN_IO + JO_OUT_IO + JO_ERR_IO)) == (JO_IN_IO + JO_OUT_IO + JO_ERR_IO) || (!(opt->jo_set & JO_OUT_IO) && (opt->jo_set & JO_OUT_BUF)) || (!(opt->jo_set & JO_ERR_IO) && (opt->jo_set & JO_ERR_BUF))) { EMSG(_(e_invarg)); return NULL; } term = (term_T *)alloc_clear(sizeof(term_T)); if (term == NULL) return NULL; term->tl_dirty_row_end = MAX_ROW; term->tl_cursor_visible = TRUE; term->tl_cursor_shape = VTERM_PROP_CURSORSHAPE_BLOCK; term->tl_finish = opt->jo_term_finish; #ifdef FEAT_GUI term->tl_system = (flags & TERM_START_SYSTEM); #endif ga_init2(&term->tl_scrollback, sizeof(sb_line_T), 300); vim_memset(&split_ea, 0, sizeof(split_ea)); if (opt->jo_curwin) { /* Create a new buffer in the current window. */ if (!can_abandon(curbuf, flags & TERM_START_FORCEIT)) { no_write_message(); vim_free(term); return NULL; } if (do_ecmd(0, NULL, NULL, &split_ea, ECMD_ONE, ECMD_HIDE + ((flags & TERM_START_FORCEIT) ? ECMD_FORCEIT : 0), curwin) == FAIL) { vim_free(term); return NULL; } } else if (opt->jo_hidden || (flags & TERM_START_SYSTEM)) { buf_T *buf; /* Create a new buffer without a window. Make it the current buffer for * a moment to be able to do the initialisations. */ buf = buflist_new((char_u *)"", NULL, (linenr_T)0, BLN_NEW | BLN_LISTED); if (buf == NULL || ml_open(buf) == FAIL) { vim_free(term); return NULL; } old_curbuf = curbuf; --curbuf->b_nwindows; curbuf = buf; curwin->w_buffer = buf; ++curbuf->b_nwindows; } else { /* Open a new window or tab. */ split_ea.cmdidx = CMD_new; split_ea.cmd = (char_u *)"new"; split_ea.arg = (char_u *)""; if (opt->jo_term_rows > 0 && !vertical) { split_ea.line2 = opt->jo_term_rows; split_ea.addr_count = 1; } if (opt->jo_term_cols > 0 && vertical) { split_ea.line2 = opt->jo_term_cols; split_ea.addr_count = 1; } if (vertical) cmdmod.split |= WSP_VERT; ex_splitview(&split_ea); if (curwin == old_curwin) { /* split failed */ vim_free(term); return NULL; } } term->tl_buffer = curbuf; curbuf->b_term = term; if (!opt->jo_hidden) { /* Only one size was taken care of with :new, do the other one. With * "curwin" both need to be done. */ if (opt->jo_term_rows > 0 && (opt->jo_curwin || vertical)) win_setheight(opt->jo_term_rows); if (opt->jo_term_cols > 0 && (opt->jo_curwin || !vertical)) win_setwidth(opt->jo_term_cols); } /* Link the new terminal in the list of active terminals. */ term->tl_next = first_term; first_term = term; if (opt->jo_term_name != NULL) curbuf->b_ffname = vim_strsave(opt->jo_term_name); else if (argv != NULL) curbuf->b_ffname = vim_strsave((char_u *)"!system"); else { int i; size_t len; char_u *cmd, *p; if (argvar->v_type == VAR_STRING) { cmd = argvar->vval.v_string; if (cmd == NULL) cmd = (char_u *)""; else if (STRCMP(cmd, "NONE") == 0) cmd = (char_u *)"pty"; } else if (argvar->v_type != VAR_LIST || argvar->vval.v_list == NULL || argvar->vval.v_list->lv_len < 1 || (cmd = tv_get_string_chk( &argvar->vval.v_list->lv_first->li_tv)) == NULL) cmd = (char_u*)""; len = STRLEN(cmd) + 10; p = alloc((int)len); for (i = 0; p != NULL; ++i) { /* Prepend a ! to the command name to avoid the buffer name equals * the executable, otherwise ":w!" would overwrite it. */ if (i == 0) vim_snprintf((char *)p, len, "!%s", cmd); else vim_snprintf((char *)p, len, "!%s (%d)", cmd, i); if (buflist_findname(p) == NULL) { vim_free(curbuf->b_ffname); curbuf->b_ffname = p; break; } } } curbuf->b_fname = curbuf->b_ffname; if (opt->jo_term_opencmd != NULL) term->tl_opencmd = vim_strsave(opt->jo_term_opencmd); if (opt->jo_eof_chars != NULL) term->tl_eof_chars = vim_strsave(opt->jo_eof_chars); set_string_option_direct((char_u *)"buftype", -1, (char_u *)"terminal", OPT_FREE|OPT_LOCAL, 0); // Avoid that 'buftype' is reset when this buffer is entered. curbuf->b_p_initialized = TRUE; /* Mark the buffer as not modifiable. It can only be made modifiable after * the job finished. */ curbuf->b_p_ma = FALSE; set_term_and_win_size(term); #ifdef WIN3264 mch_memmove(orig_opt.jo_io, opt->jo_io, sizeof(orig_opt.jo_io)); #endif setup_job_options(opt, term->tl_rows, term->tl_cols); if (flags & TERM_START_NOJOB) return curbuf; #if defined(FEAT_SESSION) /* Remember the command for the session file. */ if (opt->jo_term_norestore || argv != NULL) { term->tl_command = vim_strsave((char_u *)"NONE"); } else if (argvar->v_type == VAR_STRING) { char_u *cmd = argvar->vval.v_string; if (cmd != NULL && STRCMP(cmd, p_sh) != 0) term->tl_command = vim_strsave(cmd); } else if (argvar->v_type == VAR_LIST && argvar->vval.v_list != NULL && argvar->vval.v_list->lv_len > 0) { garray_T ga; listitem_T *item; ga_init2(&ga, 1, 100); for (item = argvar->vval.v_list->lv_first; item != NULL; item = item->li_next) { char_u *s = tv_get_string_chk(&item->li_tv); char_u *p; if (s == NULL) break; p = vim_strsave_fnameescape(s, FALSE); if (p == NULL) break; ga_concat(&ga, p); vim_free(p); ga_append(&ga, ' '); } if (item == NULL) { ga_append(&ga, NUL); term->tl_command = ga.ga_data; } else ga_clear(&ga); } #endif if (opt->jo_term_kill != NULL) { char_u *p = skiptowhite(opt->jo_term_kill); term->tl_kill = vim_strnsave(opt->jo_term_kill, p - opt->jo_term_kill); } /* System dependent: setup the vterm and maybe start the job in it. */ if (argv == NULL && argvar->v_type == VAR_STRING && argvar->vval.v_string != NULL && STRCMP(argvar->vval.v_string, "NONE") == 0) res = create_pty_only(term, opt); else res = term_and_job_init(term, argvar, argv, opt, &orig_opt); newbuf = curbuf; if (res == OK) { /* Get and remember the size we ended up with. Update the pty. */ vterm_get_size(term->tl_vterm, &term->tl_rows, &term->tl_cols); term_report_winsize(term, term->tl_rows, term->tl_cols); #ifdef FEAT_GUI if (term->tl_system) { /* display first line below typed command */ term->tl_toprow = msg_row + 1; term->tl_dirty_row_end = 0; } #endif /* Make sure we don't get stuck on sending keys to the job, it leads to * a deadlock if the job is waiting for Vim to read. */ channel_set_nonblock(term->tl_job->jv_channel, PART_IN); if (old_curbuf != NULL) { --curbuf->b_nwindows; curbuf = old_curbuf; curwin->w_buffer = curbuf; ++curbuf->b_nwindows; } } else { term_close_buffer(curbuf, old_curbuf); return NULL; } apply_autocmds(EVENT_TERMINALOPEN, NULL, NULL, FALSE, newbuf); return newbuf; } /* * ":terminal": open a terminal window and execute a job in it. */ void ex_terminal(exarg_T *eap) { typval_T argvar[2]; jobopt_T opt; char_u *cmd; char_u *tofree = NULL; init_job_options(&opt); cmd = eap->arg; while (*cmd == '+' && *(cmd + 1) == '+') { char_u *p, *ep; cmd += 2; p = skiptowhite(cmd); ep = vim_strchr(cmd, '='); if (ep != NULL && ep < p) p = ep; if ((int)(p - cmd) == 5 && STRNICMP(cmd, "close", 5) == 0) opt.jo_term_finish = 'c'; else if ((int)(p - cmd) == 7 && STRNICMP(cmd, "noclose", 7) == 0) opt.jo_term_finish = 'n'; else if ((int)(p - cmd) == 4 && STRNICMP(cmd, "open", 4) == 0) opt.jo_term_finish = 'o'; else if ((int)(p - cmd) == 6 && STRNICMP(cmd, "curwin", 6) == 0) opt.jo_curwin = 1; else if ((int)(p - cmd) == 6 && STRNICMP(cmd, "hidden", 6) == 0) opt.jo_hidden = 1; else if ((int)(p - cmd) == 9 && STRNICMP(cmd, "norestore", 9) == 0) opt.jo_term_norestore = 1; else if ((int)(p - cmd) == 4 && STRNICMP(cmd, "kill", 4) == 0 && ep != NULL) { opt.jo_set2 |= JO2_TERM_KILL; opt.jo_term_kill = ep + 1; p = skiptowhite(cmd); } else if ((int)(p - cmd) == 4 && STRNICMP(cmd, "rows", 4) == 0 && ep != NULL && isdigit(ep[1])) { opt.jo_set2 |= JO2_TERM_ROWS; opt.jo_term_rows = atoi((char *)ep + 1); p = skiptowhite(cmd); } else if ((int)(p - cmd) == 4 && STRNICMP(cmd, "cols", 4) == 0 && ep != NULL && isdigit(ep[1])) { opt.jo_set2 |= JO2_TERM_COLS; opt.jo_term_cols = atoi((char *)ep + 1); p = skiptowhite(cmd); } else if ((int)(p - cmd) == 3 && STRNICMP(cmd, "eof", 3) == 0 && ep != NULL) { char_u *buf = NULL; char_u *keys; p = skiptowhite(cmd); *p = NUL; keys = replace_termcodes(ep + 1, &buf, TRUE, TRUE, TRUE); opt.jo_set2 |= JO2_EOF_CHARS; opt.jo_eof_chars = vim_strsave(keys); vim_free(buf); *p = ' '; } else { if (*p) *p = NUL; EMSG2(_("E181: Invalid attribute: %s"), cmd); goto theend; } cmd = skipwhite(p); } if (*cmd == NUL) { /* Make a copy of 'shell', an autocommand may change the option. */ tofree = cmd = vim_strsave(p_sh); /* default to close when the shell exits */ if (opt.jo_term_finish == NUL) opt.jo_term_finish = 'c'; } if (eap->addr_count > 0) { /* Write lines from current buffer to the job. */ opt.jo_set |= JO_IN_IO | JO_IN_BUF | JO_IN_TOP | JO_IN_BOT; opt.jo_io[PART_IN] = JIO_BUFFER; opt.jo_io_buf[PART_IN] = curbuf->b_fnum; opt.jo_in_top = eap->line1; opt.jo_in_bot = eap->line2; } argvar[0].v_type = VAR_STRING; argvar[0].vval.v_string = cmd; argvar[1].v_type = VAR_UNKNOWN; term_start(argvar, NULL, &opt, eap->forceit ? TERM_START_FORCEIT : 0); vim_free(tofree); theend: vim_free(opt.jo_eof_chars); } #if defined(FEAT_SESSION) || defined(PROTO) /* * Write a :terminal command to the session file to restore the terminal in * window "wp". * Return FAIL if writing fails. */ int term_write_session(FILE *fd, win_T *wp) { term_T *term = wp->w_buffer->b_term; /* Create the terminal and run the command. This is not without * risk, but let's assume the user only creates a session when this * will be OK. */ if (fprintf(fd, "terminal ++curwin ++cols=%d ++rows=%d ", term->tl_cols, term->tl_rows) < 0) return FAIL; if (term->tl_command != NULL && fputs((char *)term->tl_command, fd) < 0) return FAIL; return put_eol(fd); } /* * Return TRUE if "buf" has a terminal that should be restored. */ int term_should_restore(buf_T *buf) { term_T *term = buf->b_term; return term != NULL && (term->tl_command == NULL || STRCMP(term->tl_command, "NONE") != 0); } #endif /* * Free the scrollback buffer for "term". */ static void free_scrollback(term_T *term) { int i; for (i = 0; i < term->tl_scrollback.ga_len; ++i) vim_free(((sb_line_T *)term->tl_scrollback.ga_data + i)->sb_cells); ga_clear(&term->tl_scrollback); } /* * Free a terminal and everything it refers to. * Kills the job if there is one. * Called when wiping out a buffer. */ void free_terminal(buf_T *buf) { term_T *term = buf->b_term; term_T *tp; if (term == NULL) return; if (first_term == term) first_term = term->tl_next; else for (tp = first_term; tp->tl_next != NULL; tp = tp->tl_next) if (tp->tl_next == term) { tp->tl_next = term->tl_next; break; } if (term->tl_job != NULL) { if (term->tl_job->jv_status != JOB_ENDED && term->tl_job->jv_status != JOB_FINISHED && term->tl_job->jv_status != JOB_FAILED) job_stop(term->tl_job, NULL, "kill"); job_unref(term->tl_job); } free_scrollback(term); term_free_vterm(term); vim_free(term->tl_title); #ifdef FEAT_SESSION vim_free(term->tl_command); #endif vim_free(term->tl_kill); vim_free(term->tl_status_text); vim_free(term->tl_opencmd); vim_free(term->tl_eof_chars); #ifdef WIN3264 if (term->tl_out_fd != NULL) fclose(term->tl_out_fd); #endif vim_free(term->tl_cursor_color); vim_free(term); buf->b_term = NULL; if (in_terminal_loop == term) in_terminal_loop = NULL; } /* * Get the part that is connected to the tty. Normally this is PART_IN, but * when writing buffer lines to the job it can be another. This makes it * possible to do "1,5term vim -". */ static ch_part_T get_tty_part(term_T *term) { #ifdef UNIX ch_part_T parts[3] = {PART_IN, PART_OUT, PART_ERR}; int i; for (i = 0; i < 3; ++i) { int fd = term->tl_job->jv_channel->ch_part[parts[i]].ch_fd; if (isatty(fd)) return parts[i]; } #endif return PART_IN; } /* * Write job output "msg[len]" to the vterm. */ static void term_write_job_output(term_T *term, char_u *msg, size_t len) { VTerm *vterm = term->tl_vterm; size_t prevlen = vterm_output_get_buffer_current(vterm); vterm_input_write(vterm, (char *)msg, len); /* flush vterm buffer when vterm responded to control sequence */ if (prevlen != vterm_output_get_buffer_current(vterm)) { char buf[KEY_BUF_LEN]; size_t curlen = vterm_output_read(vterm, buf, KEY_BUF_LEN); if (curlen > 0) channel_send(term->tl_job->jv_channel, get_tty_part(term), (char_u *)buf, (int)curlen, NULL); } /* this invokes the damage callbacks */ vterm_screen_flush_damage(vterm_obtain_screen(vterm)); } static void update_cursor(term_T *term, int redraw) { if (term->tl_normal_mode) return; #ifdef FEAT_GUI if (term->tl_system) windgoto(term->tl_cursor_pos.row + term->tl_toprow, term->tl_cursor_pos.col); else #endif setcursor(); if (redraw) { if (term->tl_buffer == curbuf && term->tl_cursor_visible) cursor_on(); out_flush(); #ifdef FEAT_GUI if (gui.in_use) { gui_update_cursor(FALSE, FALSE); gui_mch_flush(); } #endif } } /* * Invoked when "msg" output from a job was received. Write it to the terminal * of "buffer". */ void write_to_term(buf_T *buffer, char_u *msg, channel_T *channel) { size_t len = STRLEN(msg); term_T *term = buffer->b_term; #ifdef WIN3264 /* Win32: Cannot redirect output of the job, intercept it here and write to * the file. */ if (term->tl_out_fd != NULL) { ch_log(channel, "Writing %d bytes to output file", (int)len); fwrite(msg, len, 1, term->tl_out_fd); return; } #endif if (term->tl_vterm == NULL) { ch_log(channel, "NOT writing %d bytes to terminal", (int)len); return; } ch_log(channel, "writing %d bytes to terminal", (int)len); term_write_job_output(term, msg, len); #ifdef FEAT_GUI if (term->tl_system) { /* show system output, scrolling up the screen as needed */ update_system_term(term); update_cursor(term, TRUE); } else #endif /* In Terminal-Normal mode we are displaying the buffer, not the terminal * contents, thus no screen update is needed. */ if (!term->tl_normal_mode) { // Don't use update_screen() when editing the command line, it gets // cleared. // TODO: only update once in a while. ch_log(term->tl_job->jv_channel, "updating screen"); if (buffer == curbuf && (State & CMDLINE) == 0) { update_screen(VALID_NO_UPDATE); /* update_screen() can be slow, check the terminal wasn't closed * already */ if (buffer == curbuf && curbuf->b_term != NULL) update_cursor(curbuf->b_term, TRUE); } else redraw_after_callback(TRUE); } } /* * Send a mouse position and click to the vterm */ static int term_send_mouse(VTerm *vterm, int button, int pressed) { VTermModifier mod = VTERM_MOD_NONE; vterm_mouse_move(vterm, mouse_row - W_WINROW(curwin), mouse_col - curwin->w_wincol, mod); if (button != 0) vterm_mouse_button(vterm, button, pressed, mod); return TRUE; } static int enter_mouse_col = -1; static int enter_mouse_row = -1; /* * Handle a mouse click, drag or release. * Return TRUE when a mouse event is sent to the terminal. */ static int term_mouse_click(VTerm *vterm, int key) { #if defined(FEAT_CLIPBOARD) /* For modeless selection mouse drag and release events are ignored, unless * they are preceded with a mouse down event */ static int ignore_drag_release = TRUE; VTermMouseState mouse_state; vterm_state_get_mousestate(vterm_obtain_state(vterm), &mouse_state); if (mouse_state.flags == 0) { /* Terminal is not using the mouse, use modeless selection. */ switch (key) { case K_LEFTDRAG: case K_LEFTRELEASE: case K_RIGHTDRAG: case K_RIGHTRELEASE: /* Ignore drag and release events when the button-down wasn't * seen before. */ if (ignore_drag_release) { int save_mouse_col, save_mouse_row; if (enter_mouse_col < 0) break; /* mouse click in the window gave us focus, handle that * click now */ save_mouse_col = mouse_col; save_mouse_row = mouse_row; mouse_col = enter_mouse_col; mouse_row = enter_mouse_row; clip_modeless(MOUSE_LEFT, TRUE, FALSE); mouse_col = save_mouse_col; mouse_row = save_mouse_row; } /* FALLTHROUGH */ case K_LEFTMOUSE: case K_RIGHTMOUSE: if (key == K_LEFTRELEASE || key == K_RIGHTRELEASE) ignore_drag_release = TRUE; else ignore_drag_release = FALSE; /* Should we call mouse_has() here? */ if (clip_star.available) { int button, is_click, is_drag; button = get_mouse_button(KEY2TERMCAP1(key), &is_click, &is_drag); if (mouse_model_popup() && button == MOUSE_LEFT && (mod_mask & MOD_MASK_SHIFT)) { /* Translate shift-left to right button. */ button = MOUSE_RIGHT; mod_mask &= ~MOD_MASK_SHIFT; } clip_modeless(button, is_click, is_drag); } break; case K_MIDDLEMOUSE: if (clip_star.available) insert_reg('*', TRUE); break; } enter_mouse_col = -1; return FALSE; } #endif enter_mouse_col = -1; switch (key) { case K_LEFTMOUSE: case K_LEFTMOUSE_NM: term_send_mouse(vterm, 1, 1); break; case K_LEFTDRAG: term_send_mouse(vterm, 1, 1); break; case K_LEFTRELEASE: case K_LEFTRELEASE_NM: term_send_mouse(vterm, 1, 0); break; case K_MOUSEMOVE: term_send_mouse(vterm, 0, 0); break; case K_MIDDLEMOUSE: term_send_mouse(vterm, 2, 1); break; case K_MIDDLEDRAG: term_send_mouse(vterm, 2, 1); break; case K_MIDDLERELEASE: term_send_mouse(vterm, 2, 0); break; case K_RIGHTMOUSE: term_send_mouse(vterm, 3, 1); break; case K_RIGHTDRAG: term_send_mouse(vterm, 3, 1); break; case K_RIGHTRELEASE: term_send_mouse(vterm, 3, 0); break; } return TRUE; } /* * Convert typed key "c" into bytes to send to the job. * Return the number of bytes in "buf". */ static int term_convert_key(term_T *term, int c, char *buf) { VTerm *vterm = term->tl_vterm; VTermKey key = VTERM_KEY_NONE; VTermModifier mod = VTERM_MOD_NONE; int other = FALSE; switch (c) { /* don't use VTERM_KEY_ENTER, it may do an unwanted conversion */ /* don't use VTERM_KEY_BACKSPACE, it always * becomes 0x7f DEL */ case K_BS: c = term_backspace_char; break; case ESC: key = VTERM_KEY_ESCAPE; break; case K_DEL: key = VTERM_KEY_DEL; break; case K_DOWN: key = VTERM_KEY_DOWN; break; case K_S_DOWN: mod = VTERM_MOD_SHIFT; key = VTERM_KEY_DOWN; break; case K_END: key = VTERM_KEY_END; break; case K_S_END: mod = VTERM_MOD_SHIFT; key = VTERM_KEY_END; break; case K_C_END: mod = VTERM_MOD_CTRL; key = VTERM_KEY_END; break; case K_F10: key = VTERM_KEY_FUNCTION(10); break; case K_F11: key = VTERM_KEY_FUNCTION(11); break; case K_F12: key = VTERM_KEY_FUNCTION(12); break; case K_F1: key = VTERM_KEY_FUNCTION(1); break; case K_F2: key = VTERM_KEY_FUNCTION(2); break; case K_F3: key = VTERM_KEY_FUNCTION(3); break; case K_F4: key = VTERM_KEY_FUNCTION(4); break; case K_F5: key = VTERM_KEY_FUNCTION(5); break; case K_F6: key = VTERM_KEY_FUNCTION(6); break; case K_F7: key = VTERM_KEY_FUNCTION(7); break; case K_F8: key = VTERM_KEY_FUNCTION(8); break; case K_F9: key = VTERM_KEY_FUNCTION(9); break; case K_HOME: key = VTERM_KEY_HOME; break; case K_S_HOME: mod = VTERM_MOD_SHIFT; key = VTERM_KEY_HOME; break; case K_C_HOME: mod = VTERM_MOD_CTRL; key = VTERM_KEY_HOME; break; case K_INS: key = VTERM_KEY_INS; break; case K_K0: key = VTERM_KEY_KP_0; break; case K_K1: key = VTERM_KEY_KP_1; break; case K_K2: key = VTERM_KEY_KP_2; break; case K_K3: key = VTERM_KEY_KP_3; break; case K_K4: key = VTERM_KEY_KP_4; break; case K_K5: key = VTERM_KEY_KP_5; break; case K_K6: key = VTERM_KEY_KP_6; break; case K_K7: key = VTERM_KEY_KP_7; break; case K_K8: key = VTERM_KEY_KP_8; break; case K_K9: key = VTERM_KEY_KP_9; break; case K_KDEL: key = VTERM_KEY_DEL; break; /* TODO */ case K_KDIVIDE: key = VTERM_KEY_KP_DIVIDE; break; case K_KEND: key = VTERM_KEY_KP_1; break; /* TODO */ case K_KENTER: key = VTERM_KEY_KP_ENTER; break; case K_KHOME: key = VTERM_KEY_KP_7; break; /* TODO */ case K_KINS: key = VTERM_KEY_KP_0; break; /* TODO */ case K_KMINUS: key = VTERM_KEY_KP_MINUS; break; case K_KMULTIPLY: key = VTERM_KEY_KP_MULT; break; case K_KPAGEDOWN: key = VTERM_KEY_KP_3; break; /* TODO */ case K_KPAGEUP: key = VTERM_KEY_KP_9; break; /* TODO */ case K_KPLUS: key = VTERM_KEY_KP_PLUS; break; case K_KPOINT: key = VTERM_KEY_KP_PERIOD; break; case K_LEFT: key = VTERM_KEY_LEFT; break; case K_S_LEFT: mod = VTERM_MOD_SHIFT; key = VTERM_KEY_LEFT; break; case K_C_LEFT: mod = VTERM_MOD_CTRL; key = VTERM_KEY_LEFT; break; case K_PAGEDOWN: key = VTERM_KEY_PAGEDOWN; break; case K_PAGEUP: key = VTERM_KEY_PAGEUP; break; case K_RIGHT: key = VTERM_KEY_RIGHT; break; case K_S_RIGHT: mod = VTERM_MOD_SHIFT; key = VTERM_KEY_RIGHT; break; case K_C_RIGHT: mod = VTERM_MOD_CTRL; key = VTERM_KEY_RIGHT; break; case K_UP: key = VTERM_KEY_UP; break; case K_S_UP: mod = VTERM_MOD_SHIFT; key = VTERM_KEY_UP; break; case TAB: key = VTERM_KEY_TAB; break; case K_S_TAB: mod = VTERM_MOD_SHIFT; key = VTERM_KEY_TAB; break; case K_MOUSEUP: other = term_send_mouse(vterm, 5, 1); break; case K_MOUSEDOWN: other = term_send_mouse(vterm, 4, 1); break; case K_MOUSELEFT: /* TODO */ return 0; case K_MOUSERIGHT: /* TODO */ return 0; case K_LEFTMOUSE: case K_LEFTMOUSE_NM: case K_LEFTDRAG: case K_LEFTRELEASE: case K_LEFTRELEASE_NM: case K_MOUSEMOVE: case K_MIDDLEMOUSE: case K_MIDDLEDRAG: case K_MIDDLERELEASE: case K_RIGHTMOUSE: case K_RIGHTDRAG: case K_RIGHTRELEASE: if (!term_mouse_click(vterm, c)) return 0; other = TRUE; break; case K_X1MOUSE: /* TODO */ return 0; case K_X1DRAG: /* TODO */ return 0; case K_X1RELEASE: /* TODO */ return 0; case K_X2MOUSE: /* TODO */ return 0; case K_X2DRAG: /* TODO */ return 0; case K_X2RELEASE: /* TODO */ return 0; case K_IGNORE: return 0; case K_NOP: return 0; case K_UNDO: return 0; case K_HELP: return 0; case K_XF1: key = VTERM_KEY_FUNCTION(1); break; case K_XF2: key = VTERM_KEY_FUNCTION(2); break; case K_XF3: key = VTERM_KEY_FUNCTION(3); break; case K_XF4: key = VTERM_KEY_FUNCTION(4); break; case K_SELECT: return 0; #ifdef FEAT_GUI case K_VER_SCROLLBAR: return 0; case K_HOR_SCROLLBAR: return 0; #endif #ifdef FEAT_GUI_TABLINE case K_TABLINE: return 0; case K_TABMENU: return 0; #endif #ifdef FEAT_NETBEANS_INTG case K_F21: key = VTERM_KEY_FUNCTION(21); break; #endif #ifdef FEAT_DND case K_DROP: return 0; #endif case K_CURSORHOLD: return 0; case K_PS: vterm_keyboard_start_paste(vterm); other = TRUE; break; case K_PE: vterm_keyboard_end_paste(vterm); other = TRUE; break; } /* * Convert special keys to vterm keys: * - Write keys to vterm: vterm_keyboard_key() * - Write output to channel. * TODO: use mod_mask */ if (key != VTERM_KEY_NONE) /* Special key, let vterm convert it. */ vterm_keyboard_key(vterm, key, mod); else if (!other) /* Normal character, let vterm convert it. */ vterm_keyboard_unichar(vterm, c, mod); /* Read back the converted escape sequence. */ return (int)vterm_output_read(vterm, buf, KEY_BUF_LEN); } /* * Return TRUE if the job for "term" is still running. * If "check_job_status" is TRUE update the job status. */ static int term_job_running_check(term_T *term, int check_job_status) { /* Also consider the job finished when the channel is closed, to avoid a * race condition when updating the title. */ if (term != NULL && term->tl_job != NULL && channel_is_open(term->tl_job->jv_channel)) { if (check_job_status) job_status(term->tl_job); return (term->tl_job->jv_status == JOB_STARTED || term->tl_job->jv_channel->ch_keep_open); } return FALSE; } /* * Return TRUE if the job for "term" is still running. */ int term_job_running(term_T *term) { return term_job_running_check(term, FALSE); } /* * Return TRUE if "term" has an active channel and used ":term NONE". */ int term_none_open(term_T *term) { /* Also consider the job finished when the channel is closed, to avoid a * race condition when updating the title. */ return term != NULL && term->tl_job != NULL && channel_is_open(term->tl_job->jv_channel) && term->tl_job->jv_channel->ch_keep_open; } /* * Used when exiting: kill the job in "buf" if so desired. * Return OK when the job finished. * Return FAIL when the job is still running. */ int term_try_stop_job(buf_T *buf) { int count; char *how = (char *)buf->b_term->tl_kill; #if defined(FEAT_GUI_DIALOG) || defined(FEAT_CON_DIALOG) if ((how == NULL || *how == NUL) && (p_confirm || cmdmod.confirm)) { char_u buff[DIALOG_MSG_SIZE]; int ret; dialog_msg(buff, _("Kill job in \"%s\"?"), buf->b_fname); ret = vim_dialog_yesnocancel(VIM_QUESTION, NULL, buff, 1); if (ret == VIM_YES) how = "kill"; else if (ret == VIM_CANCEL) return FAIL; } #endif if (how == NULL || *how == NUL) return FAIL; job_stop(buf->b_term->tl_job, NULL, how); /* wait for up to a second for the job to die */ for (count = 0; count < 100; ++count) { /* buffer, terminal and job may be cleaned up while waiting */ if (!buf_valid(buf) || buf->b_term == NULL || buf->b_term->tl_job == NULL) return OK; /* call job_status() to update jv_status */ job_status(buf->b_term->tl_job); if (buf->b_term->tl_job->jv_status >= JOB_ENDED) return OK; ui_delay(10L, FALSE); mch_check_messages(); parse_queued_messages(); } return FAIL; } /* * Add the last line of the scrollback buffer to the buffer in the window. */ static void add_scrollback_line_to_buffer(term_T *term, char_u *text, int len) { buf_T *buf = term->tl_buffer; int empty = (buf->b_ml.ml_flags & ML_EMPTY); linenr_T lnum = buf->b_ml.ml_line_count; #ifdef WIN3264 if (!enc_utf8 && enc_codepage > 0) { WCHAR *ret = NULL; int length = 0; MultiByteToWideChar_alloc(CP_UTF8, 0, (char*)text, len + 1, &ret, &length); if (ret != NULL) { WideCharToMultiByte_alloc(enc_codepage, 0, ret, length, (char **)&text, &len, 0, 0); vim_free(ret); ml_append_buf(term->tl_buffer, lnum, text, len, FALSE); vim_free(text); } } else #endif ml_append_buf(term->tl_buffer, lnum, text, len + 1, FALSE); if (empty) { /* Delete the empty line that was in the empty buffer. */ curbuf = buf; ml_delete(1, FALSE); curbuf = curwin->w_buffer; } } static void cell2cellattr(const VTermScreenCell *cell, cellattr_T *attr) { attr->width = cell->width; attr->attrs = cell->attrs; attr->fg = cell->fg; attr->bg = cell->bg; } static int equal_celattr(cellattr_T *a, cellattr_T *b) { /* Comparing the colors should be sufficient. */ return a->fg.red == b->fg.red && a->fg.green == b->fg.green && a->fg.blue == b->fg.blue && a->bg.red == b->bg.red && a->bg.green == b->bg.green && a->bg.blue == b->bg.blue; } /* * Add an empty scrollback line to "term". When "lnum" is not zero, add the * line at this position. Otherwise at the end. */ static int add_empty_scrollback(term_T *term, cellattr_T *fill_attr, int lnum) { if (ga_grow(&term->tl_scrollback, 1) == OK) { sb_line_T *line = (sb_line_T *)term->tl_scrollback.ga_data + term->tl_scrollback.ga_len; if (lnum > 0) { int i; for (i = 0; i < term->tl_scrollback.ga_len - lnum; ++i) { *line = *(line - 1); --line; } } line->sb_cols = 0; line->sb_cells = NULL; line->sb_fill_attr = *fill_attr; ++term->tl_scrollback.ga_len; return OK; } return FALSE; } /* * Remove the terminal contents from the scrollback and the buffer. * Used before adding a new scrollback line or updating the buffer for lines * displayed in the terminal. */ static void cleanup_scrollback(term_T *term) { sb_line_T *line; garray_T *gap; curbuf = term->tl_buffer; gap = &term->tl_scrollback; while (curbuf->b_ml.ml_line_count > term->tl_scrollback_scrolled && gap->ga_len > 0) { ml_delete(curbuf->b_ml.ml_line_count, FALSE); line = (sb_line_T *)gap->ga_data + gap->ga_len - 1; vim_free(line->sb_cells); --gap->ga_len; } curbuf = curwin->w_buffer; if (curbuf == term->tl_buffer) check_cursor(); } /* * Add the current lines of the terminal to scrollback and to the buffer. */ static void update_snapshot(term_T *term) { VTermScreen *screen; int len; int lines_skipped = 0; VTermPos pos; VTermScreenCell cell; cellattr_T fill_attr, new_fill_attr; cellattr_T *p; ch_log(term->tl_job == NULL ? NULL : term->tl_job->jv_channel, "Adding terminal window snapshot to buffer"); /* First remove the lines that were appended before, they might be * outdated. */ cleanup_scrollback(term); screen = vterm_obtain_screen(term->tl_vterm); fill_attr = new_fill_attr = term->tl_default_color; for (pos.row = 0; pos.row < term->tl_rows; ++pos.row) { len = 0; for (pos.col = 0; pos.col < term->tl_cols; ++pos.col) if (vterm_screen_get_cell(screen, pos, &cell) != 0 && cell.chars[0] != NUL) { len = pos.col + 1; new_fill_attr = term->tl_default_color; } else /* Assume the last attr is the filler attr. */ cell2cellattr(&cell, &new_fill_attr); if (len == 0 && equal_celattr(&new_fill_attr, &fill_attr)) ++lines_skipped; else { while (lines_skipped > 0) { /* Line was skipped, add an empty line. */ --lines_skipped; if (add_empty_scrollback(term, &fill_attr, 0) == OK) add_scrollback_line_to_buffer(term, (char_u *)"", 0); } if (len == 0) p = NULL; else p = (cellattr_T *)alloc((int)sizeof(cellattr_T) * len); if ((p != NULL || len == 0) && ga_grow(&term->tl_scrollback, 1) == OK) { garray_T ga; int width; sb_line_T *line = (sb_line_T *)term->tl_scrollback.ga_data + term->tl_scrollback.ga_len; ga_init2(&ga, 1, 100); for (pos.col = 0; pos.col < len; pos.col += width) { if (vterm_screen_get_cell(screen, pos, &cell) == 0) { width = 1; vim_memset(p + pos.col, 0, sizeof(cellattr_T)); if (ga_grow(&ga, 1) == OK) ga.ga_len += utf_char2bytes(' ', (char_u *)ga.ga_data + ga.ga_len); } else { width = cell.width; cell2cellattr(&cell, &p[pos.col]); // Each character can be up to 6 bytes. if (ga_grow(&ga, VTERM_MAX_CHARS_PER_CELL * 6) == OK) { int i; int c; for (i = 0; (c = cell.chars[i]) > 0 || i == 0; ++i) ga.ga_len += utf_char2bytes(c == NUL ? ' ' : c, (char_u *)ga.ga_data + ga.ga_len); } } } line->sb_cols = len; line->sb_cells = p; line->sb_fill_attr = new_fill_attr; fill_attr = new_fill_attr; ++term->tl_scrollback.ga_len; if (ga_grow(&ga, 1) == FAIL) add_scrollback_line_to_buffer(term, (char_u *)"", 0); else { *((char_u *)ga.ga_data + ga.ga_len) = NUL; add_scrollback_line_to_buffer(term, ga.ga_data, ga.ga_len); } ga_clear(&ga); } else vim_free(p); } } // Add trailing empty lines. for (pos.row = term->tl_scrollback.ga_len; pos.row < term->tl_scrollback_scrolled + term->tl_cursor_pos.row; ++pos.row) { if (add_empty_scrollback(term, &fill_attr, 0) == OK) add_scrollback_line_to_buffer(term, (char_u *)"", 0); } term->tl_dirty_snapshot = FALSE; #ifdef FEAT_TIMERS term->tl_timer_set = FALSE; #endif } /* * If needed, add the current lines of the terminal to scrollback and to the * buffer. Called after the job has ended and when switching to * Terminal-Normal mode. * When "redraw" is TRUE redraw the windows that show the terminal. */ static void may_move_terminal_to_buffer(term_T *term, int redraw) { win_T *wp; if (term->tl_vterm == NULL) return; /* Update the snapshot only if something changes or the buffer does not * have all the lines. */ if (term->tl_dirty_snapshot || term->tl_buffer->b_ml.ml_line_count <= term->tl_scrollback_scrolled) update_snapshot(term); /* Obtain the current background color. */ vterm_state_get_default_colors(vterm_obtain_state(term->tl_vterm), &term->tl_default_color.fg, &term->tl_default_color.bg); if (redraw) FOR_ALL_WINDOWS(wp) { if (wp->w_buffer == term->tl_buffer) { wp->w_cursor.lnum = term->tl_buffer->b_ml.ml_line_count; wp->w_cursor.col = 0; wp->w_valid = 0; if (wp->w_cursor.lnum >= wp->w_height) { linenr_T min_topline = wp->w_cursor.lnum - wp->w_height + 1; if (wp->w_topline < min_topline) wp->w_topline = min_topline; } redraw_win_later(wp, NOT_VALID); } } } #if defined(FEAT_TIMERS) || defined(PROTO) /* * Check if any terminal timer expired. If so, copy text from the terminal to * the buffer. * Return the time until the next timer will expire. */ int term_check_timers(int next_due_arg, proftime_T *now) { term_T *term; int next_due = next_due_arg; for (term = first_term; term != NULL; term = term->tl_next) { if (term->tl_timer_set && !term->tl_normal_mode) { long this_due = proftime_time_left(&term->tl_timer_due, now); if (this_due <= 1) { term->tl_timer_set = FALSE; may_move_terminal_to_buffer(term, FALSE); } else if (next_due == -1 || next_due > this_due) next_due = this_due; } } return next_due; } #endif static void set_terminal_mode(term_T *term, int normal_mode) { term->tl_normal_mode = normal_mode; VIM_CLEAR(term->tl_status_text); if (term->tl_buffer == curbuf) maketitle(); } /* * Called after the job if finished and Terminal mode is not active: * Move the vterm contents into the scrollback buffer and free the vterm. */ static void cleanup_vterm(term_T *term) { if (term->tl_finish != TL_FINISH_CLOSE) may_move_terminal_to_buffer(term, TRUE); term_free_vterm(term); set_terminal_mode(term, FALSE); } /* * Switch from Terminal-Job mode to Terminal-Normal mode. * Suspends updating the terminal window. */ static void term_enter_normal_mode(void) { term_T *term = curbuf->b_term; set_terminal_mode(term, TRUE); /* Append the current terminal contents to the buffer. */ may_move_terminal_to_buffer(term, TRUE); /* Move the window cursor to the position of the cursor in the * terminal. */ curwin->w_cursor.lnum = term->tl_scrollback_scrolled + term->tl_cursor_pos.row + 1; check_cursor(); if (coladvance(term->tl_cursor_pos.col) == FAIL) coladvance(MAXCOL); /* Display the same lines as in the terminal. */ curwin->w_topline = term->tl_scrollback_scrolled + 1; } /* * Returns TRUE if the current window contains a terminal and we are in * Terminal-Normal mode. */ int term_in_normal_mode(void) { term_T *term = curbuf->b_term; return term != NULL && term->tl_normal_mode; } /* * Switch from Terminal-Normal mode to Terminal-Job mode. * Restores updating the terminal window. */ void term_enter_job_mode() { term_T *term = curbuf->b_term; set_terminal_mode(term, FALSE); if (term->tl_channel_closed) cleanup_vterm(term); redraw_buf_and_status_later(curbuf, NOT_VALID); } /* * Get a key from the user with terminal mode mappings. * Note: while waiting a terminal may be closed and freed if the channel is * closed and ++close was used. */ static int term_vgetc() { int c; int save_State = State; State = TERMINAL; got_int = FALSE; #ifdef WIN3264 ctrl_break_was_pressed = FALSE; #endif c = vgetc(); got_int = FALSE; State = save_State; return c; } static int mouse_was_outside = FALSE; /* * Send keys to terminal. * Return FAIL when the key needs to be handled in Normal mode. * Return OK when the key was dropped or sent to the terminal. */ int send_keys_to_term(term_T *term, int c, int typed) { char msg[KEY_BUF_LEN]; size_t len; int dragging_outside = FALSE; /* Catch keys that need to be handled as in Normal mode. */ switch (c) { case NUL: case K_ZERO: if (typed) stuffcharReadbuff(c); return FAIL; case K_TABLINE: stuffcharReadbuff(c); return FAIL; case K_IGNORE: case K_CANCEL: // used for :normal when running out of chars return FAIL; case K_LEFTDRAG: case K_MIDDLEDRAG: case K_RIGHTDRAG: case K_X1DRAG: case K_X2DRAG: dragging_outside = mouse_was_outside; /* FALLTHROUGH */ case K_LEFTMOUSE: case K_LEFTMOUSE_NM: case K_LEFTRELEASE: case K_LEFTRELEASE_NM: case K_MOUSEMOVE: case K_MIDDLEMOUSE: case K_MIDDLERELEASE: case K_RIGHTMOUSE: case K_RIGHTRELEASE: case K_X1MOUSE: case K_X1RELEASE: case K_X2MOUSE: case K_X2RELEASE: case K_MOUSEUP: case K_MOUSEDOWN: case K_MOUSELEFT: case K_MOUSERIGHT: if (mouse_row < W_WINROW(curwin) || mouse_row >= (W_WINROW(curwin) + curwin->w_height) || mouse_col < curwin->w_wincol || mouse_col >= W_ENDCOL(curwin) || dragging_outside) { /* click or scroll outside the current window or on status line * or vertical separator */ if (typed) { stuffcharReadbuff(c); mouse_was_outside = TRUE; } return FAIL; } } if (typed) mouse_was_outside = FALSE; /* Convert the typed key to a sequence of bytes for the job. */ len = term_convert_key(term, c, msg); if (len > 0) /* TODO: if FAIL is returned, stop? */ channel_send(term->tl_job->jv_channel, get_tty_part(term), (char_u *)msg, (int)len, NULL); return OK; } static void position_cursor(win_T *wp, VTermPos *pos) { wp->w_wrow = MIN(pos->row, MAX(0, wp->w_height - 1)); wp->w_wcol = MIN(pos->col, MAX(0, wp->w_width - 1)); wp->w_valid |= (VALID_WCOL|VALID_WROW); } /* * Handle CTRL-W "": send register contents to the job. */ static void term_paste_register(int prev_c UNUSED) { int c; list_T *l; listitem_T *item; long reglen = 0; int type; #ifdef FEAT_CMDL_INFO if (add_to_showcmd(prev_c)) if (add_to_showcmd('"')) out_flush(); #endif c = term_vgetc(); #ifdef FEAT_CMDL_INFO clear_showcmd(); #endif if (!term_use_loop()) /* job finished while waiting for a character */ return; /* CTRL-W "= prompt for expression to evaluate. */ if (c == '=' && get_expr_register() != '=') return; if (!term_use_loop()) /* job finished while waiting for a character */ return; l = (list_T *)get_reg_contents(c, GREG_LIST); if (l != NULL) { type = get_reg_type(c, &reglen); for (item = l->lv_first; item != NULL; item = item->li_next) { char_u *s = tv_get_string(&item->li_tv); #ifdef WIN3264 char_u *tmp = s; if (!enc_utf8 && enc_codepage > 0) { WCHAR *ret = NULL; int length = 0; MultiByteToWideChar_alloc(enc_codepage, 0, (char *)s, (int)STRLEN(s), &ret, &length); if (ret != NULL) { WideCharToMultiByte_alloc(CP_UTF8, 0, ret, length, (char **)&s, &length, 0, 0); vim_free(ret); } } #endif channel_send(curbuf->b_term->tl_job->jv_channel, PART_IN, s, (int)STRLEN(s), NULL); #ifdef WIN3264 if (tmp != s) vim_free(s); #endif if (item->li_next != NULL || type == MLINE) channel_send(curbuf->b_term->tl_job->jv_channel, PART_IN, (char_u *)"\r", 1, NULL); } list_free(l); } } /* * Return TRUE when waiting for a character in the terminal, the cursor of the * terminal should be displayed. */ int terminal_is_active() { return in_terminal_loop != NULL; } #if defined(FEAT_GUI) || defined(PROTO) cursorentry_T * term_get_cursor_shape(guicolor_T *fg, guicolor_T *bg) { term_T *term = in_terminal_loop; static cursorentry_T entry; int id; guicolor_T term_fg, term_bg; vim_memset(&entry, 0, sizeof(entry)); entry.shape = entry.mshape = term->tl_cursor_shape == VTERM_PROP_CURSORSHAPE_UNDERLINE ? SHAPE_HOR : term->tl_cursor_shape == VTERM_PROP_CURSORSHAPE_BAR_LEFT ? SHAPE_VER : SHAPE_BLOCK; entry.percentage = 20; if (term->tl_cursor_blink) { entry.blinkwait = 700; entry.blinkon = 400; entry.blinkoff = 250; } /* The "Terminal" highlight group overrules the defaults. */ id = syn_name2id((char_u *)"Terminal"); if (id != 0) { syn_id2colors(id, &term_fg, &term_bg); *fg = term_bg; } else *fg = gui.back_pixel; if (term->tl_cursor_color == NULL) { if (id != 0) *bg = term_fg; else *bg = gui.norm_pixel; } else *bg = color_name2handle(term->tl_cursor_color); entry.name = "n"; entry.used_for = SHAPE_CURSOR; return &entry; } #endif static void may_output_cursor_props(void) { if (!cursor_color_equal(last_set_cursor_color, desired_cursor_color) || last_set_cursor_shape != desired_cursor_shape || last_set_cursor_blink != desired_cursor_blink) { cursor_color_copy(&last_set_cursor_color, desired_cursor_color); last_set_cursor_shape = desired_cursor_shape; last_set_cursor_blink = desired_cursor_blink; term_cursor_color(cursor_color_get(desired_cursor_color)); if (desired_cursor_shape == -1 || desired_cursor_blink == -1) /* this will restore the initial cursor style, if possible */ ui_cursor_shape_forced(TRUE); else term_cursor_shape(desired_cursor_shape, desired_cursor_blink); } } /* * Set the cursor color and shape, if not last set to these. */ static void may_set_cursor_props(term_T *term) { #ifdef FEAT_GUI /* For the GUI the cursor properties are obtained with * term_get_cursor_shape(). */ if (gui.in_use) return; #endif if (in_terminal_loop == term) { cursor_color_copy(&desired_cursor_color, term->tl_cursor_color); desired_cursor_shape = term->tl_cursor_shape; desired_cursor_blink = term->tl_cursor_blink; may_output_cursor_props(); } } /* * Reset the desired cursor properties and restore them when needed. */ static void prepare_restore_cursor_props(void) { #ifdef FEAT_GUI if (gui.in_use) return; #endif cursor_color_copy(&desired_cursor_color, NULL); desired_cursor_shape = -1; desired_cursor_blink = -1; may_output_cursor_props(); } /* * Returns TRUE if the current window contains a terminal and we are sending * keys to the job. * If "check_job_status" is TRUE update the job status. */ static int term_use_loop_check(int check_job_status) { term_T *term = curbuf->b_term; return term != NULL && !term->tl_normal_mode && term->tl_vterm != NULL && term_job_running_check(term, check_job_status); } /* * Returns TRUE if the current window contains a terminal and we are sending * keys to the job. */ int term_use_loop(void) { return term_use_loop_check(FALSE); } /* * Called when entering a window with the mouse. If this is a terminal window * we may want to change state. */ void term_win_entered() { term_T *term = curbuf->b_term; if (term != NULL) { if (term_use_loop_check(TRUE)) { reset_VIsual_and_resel(); if (State & INSERT) stop_insert_mode = TRUE; } mouse_was_outside = FALSE; enter_mouse_col = mouse_col; enter_mouse_row = mouse_row; } } /* * Wait for input and send it to the job. * When "blocking" is TRUE wait for a character to be typed. Otherwise return * when there is no more typahead. * Return when the start of a CTRL-W command is typed or anything else that * should be handled as a Normal mode command. * Returns OK if a typed character is to be handled in Normal mode, FAIL if * the terminal was closed. */ int terminal_loop(int blocking) { int c; int termwinkey = 0; int ret; #ifdef UNIX int tty_fd = curbuf->b_term->tl_job->jv_channel ->ch_part[get_tty_part(curbuf->b_term)].ch_fd; #endif int restore_cursor = FALSE; /* Remember the terminal we are sending keys to. However, the terminal * might be closed while waiting for a character, e.g. typing "exit" in a * shell and ++close was used. Therefore use curbuf->b_term instead of a * stored reference. */ in_terminal_loop = curbuf->b_term; if (*curwin->w_p_twk != NUL) { termwinkey = string_to_key(curwin->w_p_twk, TRUE); if (termwinkey == Ctrl_W) termwinkey = 0; } position_cursor(curwin, &curbuf->b_term->tl_cursor_pos); may_set_cursor_props(curbuf->b_term); while (blocking || vpeekc_nomap() != NUL) { #ifdef FEAT_GUI if (!curbuf->b_term->tl_system) #endif /* TODO: skip screen update when handling a sequence of keys. */ /* Repeat redrawing in case a message is received while redrawing. */ while (must_redraw != 0) if (update_screen(0) == FAIL) break; if (!term_use_loop_check(TRUE) || in_terminal_loop != curbuf->b_term) /* job finished while redrawing */ break; update_cursor(curbuf->b_term, FALSE); restore_cursor = TRUE; c = term_vgetc(); if (!term_use_loop_check(TRUE) || in_terminal_loop != curbuf->b_term) { /* Job finished while waiting for a character. Push back the * received character. */ if (c != K_IGNORE) vungetc(c); break; } if (c == K_IGNORE) continue; #ifdef UNIX /* * The shell or another program may change the tty settings. Getting * them for every typed character is a bit of overhead, but it's needed * for the first character typed, e.g. when Vim starts in a shell. */ if (isatty(tty_fd)) { ttyinfo_T info; /* Get the current backspace character of the pty. */ if (get_tty_info(tty_fd, &info) == OK) term_backspace_char = info.backspace; } #endif #ifdef WIN3264 /* On Windows winpty handles CTRL-C, don't send a CTRL_C_EVENT. * Use CTRL-BREAK to kill the job. */ if (ctrl_break_was_pressed) mch_signal_job(curbuf->b_term->tl_job, (char_u *)"kill"); #endif /* Was either CTRL-W (termwinkey) or CTRL-\ pressed? * Not in a system terminal. */ if ((c == (termwinkey == 0 ? Ctrl_W : termwinkey) || c == Ctrl_BSL) #ifdef FEAT_GUI && !curbuf->b_term->tl_system #endif ) { int prev_c = c; #ifdef FEAT_CMDL_INFO if (add_to_showcmd(c)) out_flush(); #endif c = term_vgetc(); #ifdef FEAT_CMDL_INFO clear_showcmd(); #endif if (!term_use_loop_check(TRUE) || in_terminal_loop != curbuf->b_term) /* job finished while waiting for a character */ break; if (prev_c == Ctrl_BSL) { if (c == Ctrl_N) { /* CTRL-\ CTRL-N : go to Terminal-Normal mode. */ term_enter_normal_mode(); ret = FAIL; goto theend; } /* Send both keys to the terminal. */ send_keys_to_term(curbuf->b_term, prev_c, TRUE); } else if (c == Ctrl_C) { /* "CTRL-W CTRL-C" or 'termwinkey' CTRL-C: end the job */ mch_signal_job(curbuf->b_term->tl_job, (char_u *)"kill"); } else if (c == '.') { /* "CTRL-W .": send CTRL-W to the job */ /* "'termwinkey' .": send 'termwinkey' to the job */ c = termwinkey == 0 ? Ctrl_W : termwinkey; } else if (c == Ctrl_BSL) { /* "CTRL-W CTRL-\": send CTRL-\ to the job */ c = Ctrl_BSL; } else if (c == 'N') { /* CTRL-W N : go to Terminal-Normal mode. */ term_enter_normal_mode(); ret = FAIL; goto theend; } else if (c == '"') { term_paste_register(prev_c); continue; } else if (termwinkey == 0 || c != termwinkey) { stuffcharReadbuff(Ctrl_W); stuffcharReadbuff(c); ret = OK; goto theend; } } # ifdef WIN3264 if (!enc_utf8 && has_mbyte && c >= 0x80) { WCHAR wc; char_u mb[3]; mb[0] = (unsigned)c >> 8; mb[1] = c; if (MultiByteToWideChar(GetACP(), 0, (char*)mb, 2, &wc, 1) > 0) c = wc; } # endif if (send_keys_to_term(curbuf->b_term, c, TRUE) != OK) { if (c == K_MOUSEMOVE) /* We are sure to come back here, don't reset the cursor color * and shape to avoid flickering. */ restore_cursor = FALSE; ret = OK; goto theend; } } ret = FAIL; theend: in_terminal_loop = NULL; if (restore_cursor) prepare_restore_cursor_props(); /* Move a snapshot of the screen contents to the buffer, so that completion * works in other buffers. */ if (curbuf->b_term != NULL && !curbuf->b_term->tl_normal_mode) may_move_terminal_to_buffer(curbuf->b_term, FALSE); return ret; } /* * Called when a job has finished. * This updates the title and status, but does not close the vterm, because * there might still be pending output in the channel. */ void term_job_ended(job_T *job) { term_T *term; int did_one = FALSE; for (term = first_term; term != NULL; term = term->tl_next) if (term->tl_job == job) { VIM_CLEAR(term->tl_title); VIM_CLEAR(term->tl_status_text); redraw_buf_and_status_later(term->tl_buffer, VALID); did_one = TRUE; } if (did_one) redraw_statuslines(); if (curbuf->b_term != NULL) { if (curbuf->b_term->tl_job == job) maketitle(); update_cursor(curbuf->b_term, TRUE); } } static void may_toggle_cursor(term_T *term) { if (in_terminal_loop == term) { if (term->tl_cursor_visible) cursor_on(); else cursor_off(); } } /* * Reverse engineer the RGB value into a cterm color index. * First color is 1. Return 0 if no match found (default color). */ static int color2index(VTermColor *color, int fg, int *boldp) { int red = color->red; int blue = color->blue; int green = color->green; if (color->ansi_index != VTERM_ANSI_INDEX_NONE) { /* First 16 colors and default: use the ANSI index, because these * colors can be redefined. */ if (t_colors >= 16) return color->ansi_index; switch (color->ansi_index) { case 0: return 0; case 1: return lookup_color( 0, fg, boldp) + 1; /* black */ case 2: return lookup_color( 4, fg, boldp) + 1; /* dark red */ case 3: return lookup_color( 2, fg, boldp) + 1; /* dark green */ case 4: return lookup_color( 6, fg, boldp) + 1; /* brown */ case 5: return lookup_color( 1, fg, boldp) + 1; /* dark blue */ case 6: return lookup_color( 5, fg, boldp) + 1; /* dark magenta */ case 7: return lookup_color( 3, fg, boldp) + 1; /* dark cyan */ case 8: return lookup_color( 8, fg, boldp) + 1; /* light grey */ case 9: return lookup_color(12, fg, boldp) + 1; /* dark grey */ case 10: return lookup_color(20, fg, boldp) + 1; /* red */ case 11: return lookup_color(16, fg, boldp) + 1; /* green */ case 12: return lookup_color(24, fg, boldp) + 1; /* yellow */ case 13: return lookup_color(14, fg, boldp) + 1; /* blue */ case 14: return lookup_color(22, fg, boldp) + 1; /* magenta */ case 15: return lookup_color(18, fg, boldp) + 1; /* cyan */ case 16: return lookup_color(26, fg, boldp) + 1; /* white */ } } if (t_colors >= 256) { if (red == blue && red == green) { /* 24-color greyscale plus white and black */ static int cutoff[23] = { 0x0D, 0x17, 0x21, 0x2B, 0x35, 0x3F, 0x49, 0x53, 0x5D, 0x67, 0x71, 0x7B, 0x85, 0x8F, 0x99, 0xA3, 0xAD, 0xB7, 0xC1, 0xCB, 0xD5, 0xDF, 0xE9}; int i; if (red < 5) return 17; /* 00/00/00 */ if (red > 245) /* ff/ff/ff */ return 232; for (i = 0; i < 23; ++i) if (red < cutoff[i]) return i + 233; return 256; } { static int cutoff[5] = {0x2F, 0x73, 0x9B, 0xC3, 0xEB}; int ri, gi, bi; /* 216-color cube */ for (ri = 0; ri < 5; ++ri) if (red < cutoff[ri]) break; for (gi = 0; gi < 5; ++gi) if (green < cutoff[gi]) break; for (bi = 0; bi < 5; ++bi) if (blue < cutoff[bi]) break; return 17 + ri * 36 + gi * 6 + bi; } } return 0; } /* * Convert Vterm attributes to highlight flags. */ static int vtermAttr2hl(VTermScreenCellAttrs cellattrs) { int attr = 0; if (cellattrs.bold) attr |= HL_BOLD; if (cellattrs.underline) attr |= HL_UNDERLINE; if (cellattrs.italic) attr |= HL_ITALIC; if (cellattrs.strike) attr |= HL_STRIKETHROUGH; if (cellattrs.reverse) attr |= HL_INVERSE; return attr; } /* * Store Vterm attributes in "cell" from highlight flags. */ static void hl2vtermAttr(int attr, cellattr_T *cell) { vim_memset(&cell->attrs, 0, sizeof(VTermScreenCellAttrs)); if (attr & HL_BOLD) cell->attrs.bold = 1; if (attr & HL_UNDERLINE) cell->attrs.underline = 1; if (attr & HL_ITALIC) cell->attrs.italic = 1; if (attr & HL_STRIKETHROUGH) cell->attrs.strike = 1; if (attr & HL_INVERSE) cell->attrs.reverse = 1; } /* * Convert the attributes of a vterm cell into an attribute index. */ static int cell2attr(VTermScreenCellAttrs cellattrs, VTermColor cellfg, VTermColor cellbg) { int attr = vtermAttr2hl(cellattrs); #ifdef FEAT_GUI if (gui.in_use) { guicolor_T fg, bg; fg = gui_mch_get_rgb_color(cellfg.red, cellfg.green, cellfg.blue); bg = gui_mch_get_rgb_color(cellbg.red, cellbg.green, cellbg.blue); return get_gui_attr_idx(attr, fg, bg); } else #endif #ifdef FEAT_TERMGUICOLORS if (p_tgc) { guicolor_T fg, bg; fg = gui_get_rgb_color_cmn(cellfg.red, cellfg.green, cellfg.blue); bg = gui_get_rgb_color_cmn(cellbg.red, cellbg.green, cellbg.blue); return get_tgc_attr_idx(attr, fg, bg); } else #endif { int bold = MAYBE; int fg = color2index(&cellfg, TRUE, &bold); int bg = color2index(&cellbg, FALSE, &bold); /* Use the "Terminal" highlighting for the default colors. */ if ((fg == 0 || bg == 0) && t_colors >= 16) { if (fg == 0 && term_default_cterm_fg >= 0) fg = term_default_cterm_fg + 1; if (bg == 0 && term_default_cterm_bg >= 0) bg = term_default_cterm_bg + 1; } /* with 8 colors set the bold attribute to get a bright foreground */ if (bold == TRUE) attr |= HL_BOLD; return get_cterm_attr_idx(attr, fg, bg); } return 0; } static void set_dirty_snapshot(term_T *term) { term->tl_dirty_snapshot = TRUE; #ifdef FEAT_TIMERS if (!term->tl_normal_mode) { /* Update the snapshot after 100 msec of not getting updates. */ profile_setlimit(100L, &term->tl_timer_due); term->tl_timer_set = TRUE; } #endif } static int handle_damage(VTermRect rect, void *user) { term_T *term = (term_T *)user; term->tl_dirty_row_start = MIN(term->tl_dirty_row_start, rect.start_row); term->tl_dirty_row_end = MAX(term->tl_dirty_row_end, rect.end_row); set_dirty_snapshot(term); redraw_buf_later(term->tl_buffer, SOME_VALID); return 1; } static void term_scroll_up(term_T *term, int start_row, int count) { win_T *wp; VTermColor fg, bg; VTermScreenCellAttrs attr; int clear_attr; /* Set the color to clear lines with. */ vterm_state_get_default_colors(vterm_obtain_state(term->tl_vterm), &fg, &bg); vim_memset(&attr, 0, sizeof(attr)); clear_attr = cell2attr(attr, fg, bg); FOR_ALL_WINDOWS(wp) { if (wp->w_buffer == term->tl_buffer) win_del_lines(wp, start_row, count, FALSE, FALSE, clear_attr); } } static int handle_moverect(VTermRect dest, VTermRect src, void *user) { term_T *term = (term_T *)user; int count = src.start_row - dest.start_row; /* Scrolling up is done much more efficiently by deleting lines instead of * redrawing the text. But avoid doing this multiple times, postpone until * the redraw happens. */ if (dest.start_col == src.start_col && dest.end_col == src.end_col && dest.start_row < src.start_row) { if (dest.start_row == 0) term->tl_postponed_scroll += count; else term_scroll_up(term, dest.start_row, count); } term->tl_dirty_row_start = MIN(term->tl_dirty_row_start, dest.start_row); term->tl_dirty_row_end = MIN(term->tl_dirty_row_end, dest.end_row); set_dirty_snapshot(term); /* Note sure if the scrolling will work correctly, let's do a complete * redraw later. */ redraw_buf_later(term->tl_buffer, NOT_VALID); return 1; } static int handle_movecursor( VTermPos pos, VTermPos oldpos UNUSED, int visible, void *user) { term_T *term = (term_T *)user; win_T *wp; term->tl_cursor_pos = pos; term->tl_cursor_visible = visible; FOR_ALL_WINDOWS(wp) { if (wp->w_buffer == term->tl_buffer) position_cursor(wp, &pos); } if (term->tl_buffer == curbuf && !term->tl_normal_mode) { may_toggle_cursor(term); update_cursor(term, term->tl_cursor_visible); } return 1; } static int handle_settermprop( VTermProp prop, VTermValue *value, void *user) { term_T *term = (term_T *)user; switch (prop) { case VTERM_PROP_TITLE: vim_free(term->tl_title); /* a blank title isn't useful, make it empty, so that "running" is * displayed */ if (*skipwhite((char_u *)value->string) == NUL) term->tl_title = NULL; #ifdef WIN3264 else if (!enc_utf8 && enc_codepage > 0) { WCHAR *ret = NULL; int length = 0; MultiByteToWideChar_alloc(CP_UTF8, 0, (char*)value->string, (int)STRLEN(value->string), &ret, &length); if (ret != NULL) { WideCharToMultiByte_alloc(enc_codepage, 0, ret, length, (char**)&term->tl_title, &length, 0, 0); vim_free(ret); } } #endif else term->tl_title = vim_strsave((char_u *)value->string); VIM_CLEAR(term->tl_status_text); if (term == curbuf->b_term) maketitle(); break; case VTERM_PROP_CURSORVISIBLE: term->tl_cursor_visible = value->boolean; may_toggle_cursor(term); out_flush(); break; case VTERM_PROP_CURSORBLINK: term->tl_cursor_blink = value->boolean; may_set_cursor_props(term); break; case VTERM_PROP_CURSORSHAPE: term->tl_cursor_shape = value->number; may_set_cursor_props(term); break; case VTERM_PROP_CURSORCOLOR: cursor_color_copy(&term->tl_cursor_color, (char_u*)value->string); may_set_cursor_props(term); break; case VTERM_PROP_ALTSCREEN: /* TODO: do anything else? */ term->tl_using_altscreen = value->boolean; break; default: break; } /* Always return 1, otherwise vterm doesn't store the value internally. */ return 1; } /* * The job running in the terminal resized the terminal. */ static int handle_resize(int rows, int cols, void *user) { term_T *term = (term_T *)user; win_T *wp; term->tl_rows = rows; term->tl_cols = cols; if (term->tl_vterm_size_changed) /* Size was set by vterm_set_size(), don't set the window size. */ term->tl_vterm_size_changed = FALSE; else { FOR_ALL_WINDOWS(wp) { if (wp->w_buffer == term->tl_buffer) { win_setheight_win(rows, wp); win_setwidth_win(cols, wp); } } redraw_buf_later(term->tl_buffer, NOT_VALID); } return 1; } /* * Handle a line that is pushed off the top of the screen. */ static int handle_pushline(int cols, const VTermScreenCell *cells, void *user) { term_T *term = (term_T *)user; /* First remove the lines that were appended before, the pushed line goes * above it. */ cleanup_scrollback(term); /* If the number of lines that are stored goes over 'termscrollback' then * delete the first 10%. */ if (term->tl_scrollback.ga_len >= term->tl_buffer->b_p_twsl) { int todo = term->tl_buffer->b_p_twsl / 10; int i; curbuf = term->tl_buffer; for (i = 0; i < todo; ++i) { vim_free(((sb_line_T *)term->tl_scrollback.ga_data + i)->sb_cells); ml_delete(1, FALSE); } curbuf = curwin->w_buffer; term->tl_scrollback.ga_len -= todo; mch_memmove(term->tl_scrollback.ga_data, (sb_line_T *)term->tl_scrollback.ga_data + todo, sizeof(sb_line_T) * term->tl_scrollback.ga_len); term->tl_scrollback_scrolled -= todo; } if (ga_grow(&term->tl_scrollback, 1) == OK) { cellattr_T *p = NULL; int len = 0; int i; int c; int col; sb_line_T *line; garray_T ga; cellattr_T fill_attr = term->tl_default_color; /* do not store empty cells at the end */ for (i = 0; i < cols; ++i) if (cells[i].chars[0] != 0) len = i + 1; else cell2cellattr(&cells[i], &fill_attr); ga_init2(&ga, 1, 100); if (len > 0) p = (cellattr_T *)alloc((int)sizeof(cellattr_T) * len); if (p != NULL) { for (col = 0; col < len; col += cells[col].width) { if (ga_grow(&ga, MB_MAXBYTES) == FAIL) { ga.ga_len = 0; break; } for (i = 0; (c = cells[col].chars[i]) > 0 || i == 0; ++i) ga.ga_len += utf_char2bytes(c == NUL ? ' ' : c, (char_u *)ga.ga_data + ga.ga_len); cell2cellattr(&cells[col], &p[col]); } } if (ga_grow(&ga, 1) == FAIL) add_scrollback_line_to_buffer(term, (char_u *)"", 0); else { *((char_u *)ga.ga_data + ga.ga_len) = NUL; add_scrollback_line_to_buffer(term, ga.ga_data, ga.ga_len); } ga_clear(&ga); line = (sb_line_T *)term->tl_scrollback.ga_data + term->tl_scrollback.ga_len; line->sb_cols = len; line->sb_cells = p; line->sb_fill_attr = fill_attr; ++term->tl_scrollback.ga_len; ++term->tl_scrollback_scrolled; } return 0; /* ignored */ } static VTermScreenCallbacks screen_callbacks = { handle_damage, /* damage */ handle_moverect, /* moverect */ handle_movecursor, /* movecursor */ handle_settermprop, /* settermprop */ NULL, /* bell */ handle_resize, /* resize */ handle_pushline, /* sb_pushline */ NULL /* sb_popline */ }; /* * Do the work after the channel of a terminal was closed. * Must be called only when updating_screen is FALSE. * Returns TRUE when a buffer was closed (list of terminals may have changed). */ static int term_after_channel_closed(term_T *term) { /* Unless in Terminal-Normal mode: clear the vterm. */ if (!term->tl_normal_mode) { int fnum = term->tl_buffer->b_fnum; cleanup_vterm(term); if (term->tl_finish == TL_FINISH_CLOSE) { aco_save_T aco; int do_set_w_closing = term->tl_buffer->b_nwindows == 0; // ++close or term_finish == "close" ch_log(NULL, "terminal job finished, closing window"); aucmd_prepbuf(&aco, term->tl_buffer); // Avoid closing the window if we temporarily use it. if (do_set_w_closing) curwin->w_closing = TRUE; do_bufdel(DOBUF_WIPE, (char_u *)"", 1, fnum, fnum, FALSE); if (do_set_w_closing) curwin->w_closing = FALSE; aucmd_restbuf(&aco); return TRUE; } if (term->tl_finish == TL_FINISH_OPEN && term->tl_buffer->b_nwindows == 0) { char buf[50]; /* TODO: use term_opencmd */ ch_log(NULL, "terminal job finished, opening window"); vim_snprintf(buf, sizeof(buf), term->tl_opencmd == NULL ? "botright sbuf %d" : (char *)term->tl_opencmd, fnum); do_cmdline_cmd((char_u *)buf); } else ch_log(NULL, "terminal job finished"); } redraw_buf_and_status_later(term->tl_buffer, NOT_VALID); return FALSE; } /* * Called when a channel has been closed. * If this was a channel for a terminal window then finish it up. */ void term_channel_closed(channel_T *ch) { term_T *term; term_T *next_term; int did_one = FALSE; for (term = first_term; term != NULL; term = next_term) { next_term = term->tl_next; if (term->tl_job == ch->ch_job) { term->tl_channel_closed = TRUE; did_one = TRUE; VIM_CLEAR(term->tl_title); VIM_CLEAR(term->tl_status_text); #ifdef WIN3264 if (term->tl_out_fd != NULL) { fclose(term->tl_out_fd); term->tl_out_fd = NULL; } #endif if (updating_screen) { /* Cannot open or close windows now. Can happen when * 'lazyredraw' is set. */ term->tl_channel_recently_closed = TRUE; continue; } if (term_after_channel_closed(term)) next_term = first_term; } } if (did_one) { redraw_statuslines(); /* Need to break out of vgetc(). */ ins_char_typebuf(K_IGNORE); typebuf_was_filled = TRUE; term = curbuf->b_term; if (term != NULL) { if (term->tl_job == ch->ch_job) maketitle(); update_cursor(term, term->tl_cursor_visible); } } } /* * To be called after resetting updating_screen: handle any terminal where the * channel was closed. */ void term_check_channel_closed_recently() { term_T *term; term_T *next_term; for (term = first_term; term != NULL; term = next_term) { next_term = term->tl_next; if (term->tl_channel_recently_closed) { term->tl_channel_recently_closed = FALSE; if (term_after_channel_closed(term)) // start over, the list may have changed next_term = first_term; } } } /* * Fill one screen line from a line of the terminal. * Advances "pos" to past the last column. */ static void term_line2screenline(VTermScreen *screen, VTermPos *pos, int max_col) { int off = screen_get_current_line_off(); for (pos->col = 0; pos->col < max_col; ) { VTermScreenCell cell; int c; if (vterm_screen_get_cell(screen, *pos, &cell) == 0) vim_memset(&cell, 0, sizeof(cell)); c = cell.chars[0]; if (c == NUL) { ScreenLines[off] = ' '; if (enc_utf8) ScreenLinesUC[off] = NUL; } else { if (enc_utf8) { int i; /* composing chars */ for (i = 0; i < Screen_mco && i + 1 < VTERM_MAX_CHARS_PER_CELL; ++i) { ScreenLinesC[i][off] = cell.chars[i + 1]; if (cell.chars[i + 1] == 0) break; } if (c >= 0x80 || (Screen_mco > 0 && ScreenLinesC[0][off] != 0)) { ScreenLines[off] = ' '; ScreenLinesUC[off] = c; } else { ScreenLines[off] = c; ScreenLinesUC[off] = NUL; } } #ifdef WIN3264 else if (has_mbyte && c >= 0x80) { char_u mb[MB_MAXBYTES+1]; WCHAR wc = c; if (WideCharToMultiByte(GetACP(), 0, &wc, 1, (char*)mb, 2, 0, 0) > 1) { ScreenLines[off] = mb[0]; ScreenLines[off + 1] = mb[1]; cell.width = mb_ptr2cells(mb); } else ScreenLines[off] = c; } #endif else ScreenLines[off] = c; } ScreenAttrs[off] = cell2attr(cell.attrs, cell.fg, cell.bg); ++pos->col; ++off; if (cell.width == 2) { if (enc_utf8) ScreenLinesUC[off] = NUL; /* don't set the second byte to NUL for a DBCS encoding, it * has been set above */ if (enc_utf8 || !has_mbyte) ScreenLines[off] = NUL; ++pos->col; ++off; } } } #if defined(FEAT_GUI) static void update_system_term(term_T *term) { VTermPos pos; VTermScreen *screen; if (term->tl_vterm == NULL) return; screen = vterm_obtain_screen(term->tl_vterm); /* Scroll up to make more room for terminal lines if needed. */ while (term->tl_toprow > 0 && (Rows - term->tl_toprow) < term->tl_dirty_row_end) { int save_p_more = p_more; p_more = FALSE; msg_row = Rows - 1; msg_puts((char_u *)"\n"); p_more = save_p_more; --term->tl_toprow; } for (pos.row = term->tl_dirty_row_start; pos.row < term->tl_dirty_row_end && pos.row < Rows; ++pos.row) { if (pos.row < term->tl_rows) { int max_col = MIN(Columns, term->tl_cols); term_line2screenline(screen, &pos, max_col); } else pos.col = 0; screen_line(term->tl_toprow + pos.row, 0, pos.col, Columns, FALSE); } term->tl_dirty_row_start = MAX_ROW; term->tl_dirty_row_end = 0; update_cursor(term, TRUE); } #endif /* * Return TRUE if window "wp" is to be redrawn with term_update_window(). * Returns FALSE when there is no terminal running in this window or it is in * Terminal-Normal mode. */ int term_do_update_window(win_T *wp) { term_T *term = wp->w_buffer->b_term; return term != NULL && term->tl_vterm != NULL && !term->tl_normal_mode; } /* * Called to update a window that contains an active terminal. */ void term_update_window(win_T *wp) { term_T *term = wp->w_buffer->b_term; VTerm *vterm; VTermScreen *screen; VTermState *state; VTermPos pos; int rows, cols; int newrows, newcols; int minsize; win_T *twp; vterm = term->tl_vterm; screen = vterm_obtain_screen(vterm); state = vterm_obtain_state(vterm); /* We use NOT_VALID on a resize or scroll, redraw everything then. With * SOME_VALID only redraw what was marked dirty. */ if (wp->w_redr_type > SOME_VALID) { term->tl_dirty_row_start = 0; term->tl_dirty_row_end = MAX_ROW; if (term->tl_postponed_scroll > 0 && term->tl_postponed_scroll < term->tl_rows / 3) /* Scrolling is usually faster than redrawing, when there are only * a few lines to scroll. */ term_scroll_up(term, 0, term->tl_postponed_scroll); term->tl_postponed_scroll = 0; } /* * If the window was resized a redraw will be triggered and we get here. * Adjust the size of the vterm unless 'termwinsize' specifies a fixed size. */ minsize = parse_termwinsize(wp, &rows, &cols); newrows = 99999; newcols = 99999; FOR_ALL_WINDOWS(twp) { /* When more than one window shows the same terminal, use the * smallest size. */ if (twp->w_buffer == term->tl_buffer) { newrows = MIN(newrows, twp->w_height); newcols = MIN(newcols, twp->w_width); } } newrows = rows == 0 ? newrows : minsize ? MAX(rows, newrows) : rows; newcols = cols == 0 ? newcols : minsize ? MAX(cols, newcols) : cols; if (term->tl_rows != newrows || term->tl_cols != newcols) { term->tl_vterm_size_changed = TRUE; vterm_set_size(vterm, newrows, newcols); ch_log(term->tl_job->jv_channel, "Resizing terminal to %d lines", newrows); term_report_winsize(term, newrows, newcols); // Updating the terminal size will cause the snapshot to be cleared. // When not in terminal_loop() we need to restore it. if (term != in_terminal_loop) may_move_terminal_to_buffer(term, FALSE); } /* The cursor may have been moved when resizing. */ vterm_state_get_cursorpos(state, &pos); position_cursor(wp, &pos); for (pos.row = term->tl_dirty_row_start; pos.row < term->tl_dirty_row_end && pos.row < wp->w_height; ++pos.row) { if (pos.row < term->tl_rows) { int max_col = MIN(wp->w_width, term->tl_cols); term_line2screenline(screen, &pos, max_col); } else pos.col = 0; screen_line(wp->w_winrow + pos.row #ifdef FEAT_MENU + winbar_height(wp) #endif , wp->w_wincol, pos.col, wp->w_width, FALSE); } term->tl_dirty_row_start = MAX_ROW; term->tl_dirty_row_end = 0; } /* * Return TRUE if "wp" is a terminal window where the job has finished. */ int term_is_finished(buf_T *buf) { return buf->b_term != NULL && buf->b_term->tl_vterm == NULL; } /* * Return TRUE if "wp" is a terminal window where the job has finished or we * are in Terminal-Normal mode, thus we show the buffer contents. */ int term_show_buffer(buf_T *buf) { term_T *term = buf->b_term; return term != NULL && (term->tl_vterm == NULL || term->tl_normal_mode); } /* * The current buffer is going to be changed. If there is terminal * highlighting remove it now. */ void term_change_in_curbuf(void) { term_T *term = curbuf->b_term; if (term_is_finished(curbuf) && term->tl_scrollback.ga_len > 0) { free_scrollback(term); redraw_buf_later(term->tl_buffer, NOT_VALID); /* The buffer is now like a normal buffer, it cannot be easily * abandoned when changed. */ set_string_option_direct((char_u *)"buftype", -1, (char_u *)"", OPT_FREE|OPT_LOCAL, 0); } } /* * Get the screen attribute for a position in the buffer. * Use a negative "col" to get the filler background color. */ int term_get_attr(buf_T *buf, linenr_T lnum, int col) { term_T *term = buf->b_term; sb_line_T *line; cellattr_T *cellattr; if (lnum > term->tl_scrollback.ga_len) cellattr = &term->tl_default_color; else { line = (sb_line_T *)term->tl_scrollback.ga_data + lnum - 1; if (col < 0 || col >= line->sb_cols) cellattr = &line->sb_fill_attr; else cellattr = line->sb_cells + col; } return cell2attr(cellattr->attrs, cellattr->fg, cellattr->bg); } /* * Convert a cterm color number 0 - 255 to RGB. * This is compatible with xterm. */ static void cterm_color2vterm(int nr, VTermColor *rgb) { cterm_color2rgb(nr, &rgb->red, &rgb->green, &rgb->blue, &rgb->ansi_index); } /* * Initialize term->tl_default_color from the environment. */ static void init_default_colors(term_T *term) { VTermColor *fg, *bg; int fgval, bgval; int id; vim_memset(&term->tl_default_color.attrs, 0, sizeof(VTermScreenCellAttrs)); term->tl_default_color.width = 1; fg = &term->tl_default_color.fg; bg = &term->tl_default_color.bg; /* Vterm uses a default black background. Set it to white when * 'background' is "light". */ if (*p_bg == 'l') { fgval = 0; bgval = 255; } else { fgval = 255; bgval = 0; } fg->red = fg->green = fg->blue = fgval; bg->red = bg->green = bg->blue = bgval; fg->ansi_index = bg->ansi_index = VTERM_ANSI_INDEX_DEFAULT; /* The "Terminal" highlight group overrules the defaults. */ id = syn_name2id((char_u *)"Terminal"); /* Use the actual color for the GUI and when 'termguicolors' is set. */ #if defined(FEAT_GUI) || defined(FEAT_TERMGUICOLORS) if (0 # ifdef FEAT_GUI || gui.in_use # endif # ifdef FEAT_TERMGUICOLORS || p_tgc # ifdef FEAT_VTP /* Finally get INVALCOLOR on this execution path */ || (!p_tgc && t_colors >= 256) # endif # endif ) { guicolor_T fg_rgb = INVALCOLOR; guicolor_T bg_rgb = INVALCOLOR; if (id != 0) syn_id2colors(id, &fg_rgb, &bg_rgb); # ifdef FEAT_GUI if (gui.in_use) { if (fg_rgb == INVALCOLOR) fg_rgb = gui.norm_pixel; if (bg_rgb == INVALCOLOR) bg_rgb = gui.back_pixel; } # ifdef FEAT_TERMGUICOLORS else # endif # endif # ifdef FEAT_TERMGUICOLORS { if (fg_rgb == INVALCOLOR) fg_rgb = cterm_normal_fg_gui_color; if (bg_rgb == INVALCOLOR) bg_rgb = cterm_normal_bg_gui_color; } # endif if (fg_rgb != INVALCOLOR) { long_u rgb = GUI_MCH_GET_RGB(fg_rgb); fg->red = (unsigned)(rgb >> 16); fg->green = (unsigned)(rgb >> 8) & 255; fg->blue = (unsigned)rgb & 255; } if (bg_rgb != INVALCOLOR) { long_u rgb = GUI_MCH_GET_RGB(bg_rgb); bg->red = (unsigned)(rgb >> 16); bg->green = (unsigned)(rgb >> 8) & 255; bg->blue = (unsigned)rgb & 255; } } else #endif if (id != 0 && t_colors >= 16) { if (term_default_cterm_fg >= 0) cterm_color2vterm(term_default_cterm_fg, fg); if (term_default_cterm_bg >= 0) cterm_color2vterm(term_default_cterm_bg, bg); } else { #if defined(WIN3264) && !defined(FEAT_GUI_W32) int tmp; #endif /* In an MS-Windows console we know the normal colors. */ if (cterm_normal_fg_color > 0) { cterm_color2vterm(cterm_normal_fg_color - 1, fg); # if defined(WIN3264) && !defined(FEAT_GUI_W32) tmp = fg->red; fg->red = fg->blue; fg->blue = tmp; # endif } # ifdef FEAT_TERMRESPONSE else term_get_fg_color(&fg->red, &fg->green, &fg->blue); # endif if (cterm_normal_bg_color > 0) { cterm_color2vterm(cterm_normal_bg_color - 1, bg); # if defined(WIN3264) && !defined(FEAT_GUI_W32) tmp = bg->red; bg->red = bg->blue; bg->blue = tmp; # endif } # ifdef FEAT_TERMRESPONSE else term_get_bg_color(&bg->red, &bg->green, &bg->blue); # endif } } #if defined(FEAT_GUI) || defined(FEAT_TERMGUICOLORS) /* * Set the 16 ANSI colors from array of RGB values */ static void set_vterm_palette(VTerm *vterm, long_u *rgb) { int index = 0; VTermState *state = vterm_obtain_state(vterm); for (; index < 16; index++) { VTermColor color; color.red = (unsigned)(rgb[index] >> 16); color.green = (unsigned)(rgb[index] >> 8) & 255; color.blue = (unsigned)rgb[index] & 255; vterm_state_set_palette_color(state, index, &color); } } /* * Set the ANSI color palette from a list of colors */ static int set_ansi_colors_list(VTerm *vterm, list_T *list) { int n = 0; long_u rgb[16]; listitem_T *li = list->lv_first; for (; li != NULL && n < 16; li = li->li_next, n++) { char_u *color_name; guicolor_T guicolor; color_name = tv_get_string_chk(&li->li_tv); if (color_name == NULL) return FAIL; guicolor = GUI_GET_COLOR(color_name); if (guicolor == INVALCOLOR) return FAIL; rgb[n] = GUI_MCH_GET_RGB(guicolor); } if (n != 16 || li != NULL) return FAIL; set_vterm_palette(vterm, rgb); return OK; } /* * Initialize the ANSI color palette from g:terminal_ansi_colors[0:15] */ static void init_vterm_ansi_colors(VTerm *vterm) { dictitem_T *var = find_var((char_u *)"g:terminal_ansi_colors", NULL, TRUE); if (var != NULL && (var->di_tv.v_type != VAR_LIST || var->di_tv.vval.v_list == NULL || set_ansi_colors_list(vterm, var->di_tv.vval.v_list) == FAIL)) EMSG2(_(e_invarg2), "g:terminal_ansi_colors"); } #endif /* * Handles a "drop" command from the job in the terminal. * "item" is the file name, "item->li_next" may have options. */ static void handle_drop_command(listitem_T *item) { char_u *fname = tv_get_string(&item->li_tv); listitem_T *opt_item = item->li_next; int bufnr; win_T *wp; tabpage_T *tp; exarg_T ea; char_u *tofree = NULL; bufnr = buflist_add(fname, BLN_LISTED | BLN_NOOPT); FOR_ALL_TAB_WINDOWS(tp, wp) { if (wp->w_buffer->b_fnum == bufnr) { /* buffer is in a window already, go there */ goto_tabpage_win(tp, wp); return; } } vim_memset(&ea, 0, sizeof(ea)); if (opt_item != NULL && opt_item->li_tv.v_type == VAR_DICT && opt_item->li_tv.vval.v_dict != NULL) { dict_T *dict = opt_item->li_tv.vval.v_dict; char_u *p; p = dict_get_string(dict, (char_u *)"ff", FALSE); if (p == NULL) p = dict_get_string(dict, (char_u *)"fileformat", FALSE); if (p != NULL) { if (check_ff_value(p) == FAIL) ch_log(NULL, "Invalid ff argument to drop: %s", p); else ea.force_ff = *p; } p = dict_get_string(dict, (char_u *)"enc", FALSE); if (p == NULL) p = dict_get_string(dict, (char_u *)"encoding", FALSE); if (p != NULL) { ea.cmd = alloc((int)STRLEN(p) + 12); if (ea.cmd != NULL) { sprintf((char *)ea.cmd, "sbuf ++enc=%s", p); ea.force_enc = 11; tofree = ea.cmd; } } p = dict_get_string(dict, (char_u *)"bad", FALSE); if (p != NULL) get_bad_opt(p, &ea); if (dict_find(dict, (char_u *)"bin", -1) != NULL) ea.force_bin = FORCE_BIN; if (dict_find(dict, (char_u *)"binary", -1) != NULL) ea.force_bin = FORCE_BIN; if (dict_find(dict, (char_u *)"nobin", -1) != NULL) ea.force_bin = FORCE_NOBIN; if (dict_find(dict, (char_u *)"nobinary", -1) != NULL) ea.force_bin = FORCE_NOBIN; } /* open in new window, like ":split fname" */ if (ea.cmd == NULL) ea.cmd = (char_u *)"split"; ea.arg = fname; ea.cmdidx = CMD_split; ex_splitview(&ea); vim_free(tofree); } /* * Handles a function call from the job running in a terminal. * "item" is the function name, "item->li_next" has the arguments. */ static void handle_call_command(term_T *term, channel_T *channel, listitem_T *item) { char_u *func; typval_T argvars[2]; typval_T rettv; int doesrange; if (item->li_next == NULL) { ch_log(channel, "Missing function arguments for call"); return; } func = tv_get_string(&item->li_tv); if (STRNCMP(func, "Tapi_", 5) != 0) { ch_log(channel, "Invalid function name: %s", func); return; } argvars[0].v_type = VAR_NUMBER; argvars[0].vval.v_number = term->tl_buffer->b_fnum; argvars[1] = item->li_next->li_tv; if (call_func(func, (int)STRLEN(func), &rettv, 2, argvars, /* argv_func */ NULL, /* firstline */ 1, /* lastline */ 1, &doesrange, /* evaluate */ TRUE, /* partial */ NULL, /* selfdict */ NULL) == OK) { clear_tv(&rettv); ch_log(channel, "Function %s called", func); } else ch_log(channel, "Calling function %s failed", func); } /* * Called by libvterm when it cannot recognize an OSC sequence. * We recognize a terminal API command. */ static int parse_osc(const char *command, size_t cmdlen, void *user) { term_T *term = (term_T *)user; js_read_T reader; typval_T tv; channel_T *channel = term->tl_job == NULL ? NULL : term->tl_job->jv_channel; /* We recognize only OSC 5 1 ; {command} */ if (cmdlen < 3 || STRNCMP(command, "51;", 3) != 0) return 0; /* not handled */ reader.js_buf = vim_strnsave((char_u *)command + 3, (int)(cmdlen - 3)); if (reader.js_buf == NULL) return 1; reader.js_fill = NULL; reader.js_used = 0; if (json_decode(&reader, &tv, 0) == OK && tv.v_type == VAR_LIST && tv.vval.v_list != NULL) { listitem_T *item = tv.vval.v_list->lv_first; if (item == NULL) ch_log(channel, "Missing command"); else { char_u *cmd = tv_get_string(&item->li_tv); /* Make sure an invoked command doesn't delete the buffer (and the * terminal) under our fingers. */ ++term->tl_buffer->b_locked; item = item->li_next; if (item == NULL) ch_log(channel, "Missing argument for %s", cmd); else if (STRCMP(cmd, "drop") == 0) handle_drop_command(item); else if (STRCMP(cmd, "call") == 0) handle_call_command(term, channel, item); else ch_log(channel, "Invalid command received: %s", cmd); --term->tl_buffer->b_locked; } } else ch_log(channel, "Invalid JSON received"); vim_free(reader.js_buf); clear_tv(&tv); return 1; } static VTermParserCallbacks parser_fallbacks = { NULL, /* text */ NULL, /* control */ NULL, /* escape */ NULL, /* csi */ parse_osc, /* osc */ NULL, /* dcs */ NULL /* resize */ }; /* * Use Vim's allocation functions for vterm so profiling works. */ static void * vterm_malloc(size_t size, void *data UNUSED) { return alloc_clear((unsigned) size); } static void vterm_memfree(void *ptr, void *data UNUSED) { vim_free(ptr); } static VTermAllocatorFunctions vterm_allocator = { &vterm_malloc, &vterm_memfree }; /* * Create a new vterm and initialize it. */ static void create_vterm(term_T *term, int rows, int cols) { VTerm *vterm; VTermScreen *screen; VTermState *state; VTermValue value; vterm = vterm_new_with_allocator(rows, cols, &vterm_allocator, NULL); term->tl_vterm = vterm; screen = vterm_obtain_screen(vterm); vterm_screen_set_callbacks(screen, &screen_callbacks, term); /* TODO: depends on 'encoding'. */ vterm_set_utf8(vterm, 1); init_default_colors(term); vterm_state_set_default_colors( vterm_obtain_state(vterm), &term->tl_default_color.fg, &term->tl_default_color.bg); if (t_colors >= 16) vterm_state_set_bold_highbright(vterm_obtain_state(vterm), 1); /* Required to initialize most things. */ vterm_screen_reset(screen, 1 /* hard */); /* Allow using alternate screen. */ vterm_screen_enable_altscreen(screen, 1); /* For unix do not use a blinking cursor. In an xterm this causes the * cursor to blink if it's blinking in the xterm. * For Windows we respect the system wide setting. */ #ifdef WIN3264 if (GetCaretBlinkTime() == INFINITE) value.boolean = 0; else value.boolean = 1; #else value.boolean = 0; #endif state = vterm_obtain_state(vterm); vterm_state_set_termprop(state, VTERM_PROP_CURSORBLINK, &value); vterm_state_set_unrecognised_fallbacks(state, &parser_fallbacks, term); } /* * Return the text to show for the buffer name and status. */ char_u * term_get_status_text(term_T *term) { if (term->tl_status_text == NULL) { char_u *txt; size_t len; if (term->tl_normal_mode) { if (term_job_running(term)) txt = (char_u *)_("Terminal"); else txt = (char_u *)_("Terminal-finished"); } else if (term->tl_title != NULL) txt = term->tl_title; else if (term_none_open(term)) txt = (char_u *)_("active"); else if (term_job_running(term)) txt = (char_u *)_("running"); else txt = (char_u *)_("finished"); len = 9 + STRLEN(term->tl_buffer->b_fname) + STRLEN(txt); term->tl_status_text = alloc((int)len); if (term->tl_status_text != NULL) vim_snprintf((char *)term->tl_status_text, len, "%s [%s]", term->tl_buffer->b_fname, txt); } return term->tl_status_text; } /* * Mark references in jobs of terminals. */ int set_ref_in_term(int copyID) { int abort = FALSE; term_T *term; typval_T tv; for (term = first_term; term != NULL; term = term->tl_next) if (term->tl_job != NULL) { tv.v_type = VAR_JOB; tv.vval.v_job = term->tl_job; abort = abort || set_ref_in_item(&tv, copyID, NULL, NULL); } return abort; } /* * Cache "Terminal" highlight group colors. */ void set_terminal_default_colors(int cterm_fg, int cterm_bg) { term_default_cterm_fg = cterm_fg - 1; term_default_cterm_bg = cterm_bg - 1; } /* * Get the buffer from the first argument in "argvars". * Returns NULL when the buffer is not for a terminal window and logs a message * with "where". */ static buf_T * term_get_buf(typval_T *argvars, char *where) { buf_T *buf; (void)tv_get_number(&argvars[0]); /* issue errmsg if type error */ ++emsg_off; buf = get_buf_tv(&argvars[0], FALSE); --emsg_off; if (buf == NULL || buf->b_term == NULL) { ch_log(NULL, "%s: invalid buffer argument", where); return NULL; } return buf; } static int same_color(VTermColor *a, VTermColor *b) { return a->red == b->red && a->green == b->green && a->blue == b->blue && a->ansi_index == b->ansi_index; } static void dump_term_color(FILE *fd, VTermColor *color) { fprintf(fd, "%02x%02x%02x%d", (int)color->red, (int)color->green, (int)color->blue, (int)color->ansi_index); } /* * "term_dumpwrite(buf, filename, options)" function * * Each screen cell in full is: * |{characters}+{attributes}#{fg-color}{color-idx}#{bg-color}{color-idx} * {characters} is a space for an empty cell * For a double-width character "+" is changed to "*" and the next cell is * skipped. * {attributes} is the decimal value of HL_BOLD + HL_UNDERLINE, etc. * when "&" use the same as the previous cell. * {fg-color} is hex RGB, when "&" use the same as the previous cell. * {bg-color} is hex RGB, when "&" use the same as the previous cell. * {color-idx} is a number from 0 to 255 * * Screen cell with same width, attributes and color as the previous one: * |{characters} * * To use the color of the previous cell, use "&" instead of {color}-{idx}. * * Repeating the previous screen cell: * @{count} */ void f_term_dumpwrite(typval_T *argvars, typval_T *rettv UNUSED) { buf_T *buf = term_get_buf(argvars, "term_dumpwrite()"); term_T *term; char_u *fname; int max_height = 0; int max_width = 0; stat_T st; FILE *fd; VTermPos pos; VTermScreen *screen; VTermScreenCell prev_cell; VTermState *state; VTermPos cursor_pos; if (check_restricted() || check_secure()) return; if (buf == NULL) return; term = buf->b_term; if (term->tl_vterm == NULL) { EMSG(_("E958: Job already finished")); return; } if (argvars[2].v_type != VAR_UNKNOWN) { dict_T *d; if (argvars[2].v_type != VAR_DICT) { EMSG(_(e_dictreq)); return; } d = argvars[2].vval.v_dict; if (d != NULL) { max_height = dict_get_number(d, (char_u *)"rows"); max_width = dict_get_number(d, (char_u *)"columns"); } } fname = tv_get_string_chk(&argvars[1]); if (fname == NULL) return; if (mch_stat((char *)fname, &st) >= 0) { EMSG2(_("E953: File exists: %s"), fname); return; } if (*fname == NUL || (fd = mch_fopen((char *)fname, WRITEBIN)) == NULL) { EMSG2(_(e_notcreate), *fname == NUL ? (char_u *)_("<empty>") : fname); return; } vim_memset(&prev_cell, 0, sizeof(prev_cell)); screen = vterm_obtain_screen(term->tl_vterm); state = vterm_obtain_state(term->tl_vterm); vterm_state_get_cursorpos(state, &cursor_pos); for (pos.row = 0; (max_height == 0 || pos.row < max_height) && pos.row < term->tl_rows; ++pos.row) { int repeat = 0; for (pos.col = 0; (max_width == 0 || pos.col < max_width) && pos.col < term->tl_cols; ++pos.col) { VTermScreenCell cell; int same_attr; int same_chars = TRUE; int i; int is_cursor_pos = (pos.col == cursor_pos.col && pos.row == cursor_pos.row); if (vterm_screen_get_cell(screen, pos, &cell) == 0) vim_memset(&cell, 0, sizeof(cell)); for (i = 0; i < VTERM_MAX_CHARS_PER_CELL; ++i) { int c = cell.chars[i]; int pc = prev_cell.chars[i]; /* For the first character NUL is the same as space. */ if (i == 0) { c = (c == NUL) ? ' ' : c; pc = (pc == NUL) ? ' ' : pc; } if (c != pc) same_chars = FALSE; if (c == NUL || pc == NUL) break; } same_attr = vtermAttr2hl(cell.attrs) == vtermAttr2hl(prev_cell.attrs) && same_color(&cell.fg, &prev_cell.fg) && same_color(&cell.bg, &prev_cell.bg); if (same_chars && cell.width == prev_cell.width && same_attr && !is_cursor_pos) { ++repeat; } else { if (repeat > 0) { fprintf(fd, "@%d", repeat); repeat = 0; } fputs(is_cursor_pos ? ">" : "|", fd); if (cell.chars[0] == NUL) fputs(" ", fd); else { char_u charbuf[10]; int len; for (i = 0; i < VTERM_MAX_CHARS_PER_CELL && cell.chars[i] != NUL; ++i) { len = utf_char2bytes(cell.chars[i], charbuf); fwrite(charbuf, len, 1, fd); } } /* When only the characters differ we don't write anything, the * following "|", "@" or NL will indicate using the same * attributes. */ if (cell.width != prev_cell.width || !same_attr) { if (cell.width == 2) { fputs("*", fd); ++pos.col; } else fputs("+", fd); if (same_attr) { fputs("&", fd); } else { fprintf(fd, "%d", vtermAttr2hl(cell.attrs)); if (same_color(&cell.fg, &prev_cell.fg)) fputs("&", fd); else { fputs("#", fd); dump_term_color(fd, &cell.fg); } if (same_color(&cell.bg, &prev_cell.bg)) fputs("&", fd); else { fputs("#", fd); dump_term_color(fd, &cell.bg); } } } prev_cell = cell; } } if (repeat > 0) fprintf(fd, "@%d", repeat); fputs("\n", fd); } fclose(fd); } /* * Called when a dump is corrupted. Put a breakpoint here when debugging. */ static void dump_is_corrupt(garray_T *gap) { ga_concat(gap, (char_u *)"CORRUPT"); } static void append_cell(garray_T *gap, cellattr_T *cell) { if (ga_grow(gap, 1) == OK) { *(((cellattr_T *)gap->ga_data) + gap->ga_len) = *cell; ++gap->ga_len; } } /* * Read the dump file from "fd" and append lines to the current buffer. * Return the cell width of the longest line. */ static int read_dump_file(FILE *fd, VTermPos *cursor_pos) { int c; garray_T ga_text; garray_T ga_cell; char_u *prev_char = NULL; int attr = 0; cellattr_T cell; term_T *term = curbuf->b_term; int max_cells = 0; int start_row = term->tl_scrollback.ga_len; ga_init2(&ga_text, 1, 90); ga_init2(&ga_cell, sizeof(cellattr_T), 90); vim_memset(&cell, 0, sizeof(cell)); cursor_pos->row = -1; cursor_pos->col = -1; c = fgetc(fd); for (;;) { if (c == EOF) break; if (c == '\r') { // DOS line endings? Ignore. c = fgetc(fd); } else if (c == '\n') { /* End of a line: append it to the buffer. */ if (ga_text.ga_data == NULL) dump_is_corrupt(&ga_text); if (ga_grow(&term->tl_scrollback, 1) == OK) { sb_line_T *line = (sb_line_T *)term->tl_scrollback.ga_data + term->tl_scrollback.ga_len; if (max_cells < ga_cell.ga_len) max_cells = ga_cell.ga_len; line->sb_cols = ga_cell.ga_len; line->sb_cells = ga_cell.ga_data; line->sb_fill_attr = term->tl_default_color; ++term->tl_scrollback.ga_len; ga_init(&ga_cell); ga_append(&ga_text, NUL); ml_append(curbuf->b_ml.ml_line_count, ga_text.ga_data, ga_text.ga_len, FALSE); } else ga_clear(&ga_cell); ga_text.ga_len = 0; c = fgetc(fd); } else if (c == '|' || c == '>') { int prev_len = ga_text.ga_len; if (c == '>') { if (cursor_pos->row != -1) dump_is_corrupt(&ga_text); /* duplicate cursor */ cursor_pos->row = term->tl_scrollback.ga_len - start_row; cursor_pos->col = ga_cell.ga_len; } /* normal character(s) followed by "+", "*", "|", "@" or NL */ c = fgetc(fd); if (c != EOF) ga_append(&ga_text, c); for (;;) { c = fgetc(fd); if (c == '+' || c == '*' || c == '|' || c == '>' || c == '@' || c == EOF || c == '\n') break; ga_append(&ga_text, c); } /* save the character for repeating it */ vim_free(prev_char); if (ga_text.ga_data != NULL) prev_char = vim_strnsave(((char_u *)ga_text.ga_data) + prev_len, ga_text.ga_len - prev_len); if (c == '@' || c == '|' || c == '>' || c == '\n') { /* use all attributes from previous cell */ } else if (c == '+' || c == '*') { int is_bg; cell.width = c == '+' ? 1 : 2; c = fgetc(fd); if (c == '&') { /* use same attr as previous cell */ c = fgetc(fd); } else if (isdigit(c)) { /* get the decimal attribute */ attr = 0; while (isdigit(c)) { attr = attr * 10 + (c - '0'); c = fgetc(fd); } hl2vtermAttr(attr, &cell); } else dump_is_corrupt(&ga_text); /* is_bg == 0: fg, is_bg == 1: bg */ for (is_bg = 0; is_bg <= 1; ++is_bg) { if (c == '&') { /* use same color as previous cell */ c = fgetc(fd); } else if (c == '#') { int red, green, blue, index = 0; c = fgetc(fd); red = hex2nr(c); c = fgetc(fd); red = (red << 4) + hex2nr(c); c = fgetc(fd); green = hex2nr(c); c = fgetc(fd); green = (green << 4) + hex2nr(c); c = fgetc(fd); blue = hex2nr(c); c = fgetc(fd); blue = (blue << 4) + hex2nr(c); c = fgetc(fd); if (!isdigit(c)) dump_is_corrupt(&ga_text); while (isdigit(c)) { index = index * 10 + (c - '0'); c = fgetc(fd); } if (is_bg) { cell.bg.red = red; cell.bg.green = green; cell.bg.blue = blue; cell.bg.ansi_index = index; } else { cell.fg.red = red; cell.fg.green = green; cell.fg.blue = blue; cell.fg.ansi_index = index; } } else dump_is_corrupt(&ga_text); } } else dump_is_corrupt(&ga_text); append_cell(&ga_cell, &cell); } else if (c == '@') { if (prev_char == NULL) dump_is_corrupt(&ga_text); else { int count = 0; /* repeat previous character, get the count */ for (;;) { c = fgetc(fd); if (!isdigit(c)) break; count = count * 10 + (c - '0'); } while (count-- > 0) { ga_concat(&ga_text, prev_char); append_cell(&ga_cell, &cell); } } } else { dump_is_corrupt(&ga_text); c = fgetc(fd); } } if (ga_text.ga_len > 0) { /* trailing characters after last NL */ dump_is_corrupt(&ga_text); ga_append(&ga_text, NUL); ml_append(curbuf->b_ml.ml_line_count, ga_text.ga_data, ga_text.ga_len, FALSE); } ga_clear(&ga_text); vim_free(prev_char); return max_cells; } /* * Return an allocated string with at least "text_width" "=" characters and * "fname" inserted in the middle. */ static char_u * get_separator(int text_width, char_u *fname) { int width = MAX(text_width, curwin->w_width); char_u *textline; int fname_size; char_u *p = fname; int i; size_t off; textline = alloc(width + (int)STRLEN(fname) + 1); if (textline == NULL) return NULL; fname_size = vim_strsize(fname); if (fname_size < width - 8) { /* enough room, don't use the full window width */ width = MAX(text_width, fname_size + 8); } else if (fname_size > width - 8) { /* full name doesn't fit, use only the tail */ p = gettail(fname); fname_size = vim_strsize(p); } /* skip characters until the name fits */ while (fname_size > width - 8) { p += (*mb_ptr2len)(p); fname_size = vim_strsize(p); } for (i = 0; i < (width - fname_size) / 2 - 1; ++i) textline[i] = '='; textline[i++] = ' '; STRCPY(textline + i, p); off = STRLEN(textline); textline[off] = ' '; for (i = 1; i < (width - fname_size) / 2; ++i) textline[off + i] = '='; textline[off + i] = NUL; return textline; } /* * Common for "term_dumpdiff()" and "term_dumpload()". */ static void term_load_dump(typval_T *argvars, typval_T *rettv, int do_diff) { jobopt_T opt; buf_T *buf; char_u buf1[NUMBUFLEN]; char_u buf2[NUMBUFLEN]; char_u *fname1; char_u *fname2 = NULL; char_u *fname_tofree = NULL; FILE *fd1; FILE *fd2 = NULL; char_u *textline = NULL; /* First open the files. If this fails bail out. */ fname1 = tv_get_string_buf_chk(&argvars[0], buf1); if (do_diff) fname2 = tv_get_string_buf_chk(&argvars[1], buf2); if (fname1 == NULL || (do_diff && fname2 == NULL)) { EMSG(_(e_invarg)); return; } fd1 = mch_fopen((char *)fname1, READBIN); if (fd1 == NULL) { EMSG2(_(e_notread), fname1); return; } if (do_diff) { fd2 = mch_fopen((char *)fname2, READBIN); if (fd2 == NULL) { fclose(fd1); EMSG2(_(e_notread), fname2); return; } } init_job_options(&opt); if (argvars[do_diff ? 2 : 1].v_type != VAR_UNKNOWN && get_job_options(&argvars[do_diff ? 2 : 1], &opt, 0, JO2_TERM_NAME + JO2_TERM_COLS + JO2_TERM_ROWS + JO2_VERTICAL + JO2_CURWIN + JO2_NORESTORE) == FAIL) goto theend; if (opt.jo_term_name == NULL) { size_t len = STRLEN(fname1) + 12; fname_tofree = alloc((int)len); if (fname_tofree != NULL) { vim_snprintf((char *)fname_tofree, len, "dump diff %s", fname1); opt.jo_term_name = fname_tofree; } } buf = term_start(&argvars[0], NULL, &opt, TERM_START_NOJOB); if (buf != NULL && buf->b_term != NULL) { int i; linenr_T bot_lnum; linenr_T lnum; term_T *term = buf->b_term; int width; int width2; VTermPos cursor_pos1; VTermPos cursor_pos2; init_default_colors(term); rettv->vval.v_number = buf->b_fnum; /* read the files, fill the buffer with the diff */ width = read_dump_file(fd1, &cursor_pos1); /* position the cursor */ if (cursor_pos1.row >= 0) { curwin->w_cursor.lnum = cursor_pos1.row + 1; coladvance(cursor_pos1.col); } /* Delete the empty line that was in the empty buffer. */ ml_delete(1, FALSE); /* For term_dumpload() we are done here. */ if (!do_diff) goto theend; term->tl_top_diff_rows = curbuf->b_ml.ml_line_count; textline = get_separator(width, fname1); if (textline == NULL) goto theend; if (add_empty_scrollback(term, &term->tl_default_color, 0) == OK) ml_append(curbuf->b_ml.ml_line_count, textline, 0, FALSE); vim_free(textline); textline = get_separator(width, fname2); if (textline == NULL) goto theend; if (add_empty_scrollback(term, &term->tl_default_color, 0) == OK) ml_append(curbuf->b_ml.ml_line_count, textline, 0, FALSE); textline[width] = NUL; bot_lnum = curbuf->b_ml.ml_line_count; width2 = read_dump_file(fd2, &cursor_pos2); if (width2 > width) { vim_free(textline); textline = alloc(width2 + 1); if (textline == NULL) goto theend; width = width2; textline[width] = NUL; } term->tl_bot_diff_rows = curbuf->b_ml.ml_line_count - bot_lnum; for (lnum = 1; lnum <= term->tl_top_diff_rows; ++lnum) { if (lnum + bot_lnum > curbuf->b_ml.ml_line_count) { /* bottom part has fewer rows, fill with "-" */ for (i = 0; i < width; ++i) textline[i] = '-'; } else { char_u *line1; char_u *line2; char_u *p1; char_u *p2; int col; sb_line_T *sb_line = (sb_line_T *)term->tl_scrollback.ga_data; cellattr_T *cellattr1 = (sb_line + lnum - 1)->sb_cells; cellattr_T *cellattr2 = (sb_line + lnum + bot_lnum - 1) ->sb_cells; /* Make a copy, getting the second line will invalidate it. */ line1 = vim_strsave(ml_get(lnum)); if (line1 == NULL) break; p1 = line1; line2 = ml_get(lnum + bot_lnum); p2 = line2; for (col = 0; col < width && *p1 != NUL && *p2 != NUL; ++col) { int len1 = utfc_ptr2len(p1); int len2 = utfc_ptr2len(p2); textline[col] = ' '; if (len1 != len2 || STRNCMP(p1, p2, len1) != 0) /* text differs */ textline[col] = 'X'; else if (lnum == cursor_pos1.row + 1 && col == cursor_pos1.col && (cursor_pos1.row != cursor_pos2.row || cursor_pos1.col != cursor_pos2.col)) /* cursor in first but not in second */ textline[col] = '>'; else if (lnum == cursor_pos2.row + 1 && col == cursor_pos2.col && (cursor_pos1.row != cursor_pos2.row || cursor_pos1.col != cursor_pos2.col)) /* cursor in second but not in first */ textline[col] = '<'; else if (cellattr1 != NULL && cellattr2 != NULL) { if ((cellattr1 + col)->width != (cellattr2 + col)->width) textline[col] = 'w'; else if (!same_color(&(cellattr1 + col)->fg, &(cellattr2 + col)->fg)) textline[col] = 'f'; else if (!same_color(&(cellattr1 + col)->bg, &(cellattr2 + col)->bg)) textline[col] = 'b'; else if (vtermAttr2hl((cellattr1 + col)->attrs) != vtermAttr2hl(((cellattr2 + col)->attrs))) textline[col] = 'a'; } p1 += len1; p2 += len2; /* TODO: handle different width */ } vim_free(line1); while (col < width) { if (*p1 == NUL && *p2 == NUL) textline[col] = '?'; else if (*p1 == NUL) { textline[col] = '+'; p2 += utfc_ptr2len(p2); } else { textline[col] = '-'; p1 += utfc_ptr2len(p1); } ++col; } } if (add_empty_scrollback(term, &term->tl_default_color, term->tl_top_diff_rows) == OK) ml_append(term->tl_top_diff_rows + lnum, textline, 0, FALSE); ++bot_lnum; } while (lnum + bot_lnum <= curbuf->b_ml.ml_line_count) { /* bottom part has more rows, fill with "+" */ for (i = 0; i < width; ++i) textline[i] = '+'; if (add_empty_scrollback(term, &term->tl_default_color, term->tl_top_diff_rows) == OK) ml_append(term->tl_top_diff_rows + lnum, textline, 0, FALSE); ++lnum; ++bot_lnum; } term->tl_cols = width; /* looks better without wrapping */ curwin->w_p_wrap = 0; } theend: vim_free(textline); vim_free(fname_tofree); fclose(fd1); if (fd2 != NULL) fclose(fd2); } /* * If the current buffer shows the output of term_dumpdiff(), swap the top and * bottom files. * Return FAIL when this is not possible. */ int term_swap_diff() { term_T *term = curbuf->b_term; linenr_T line_count; linenr_T top_rows; linenr_T bot_rows; linenr_T bot_start; linenr_T lnum; char_u *p; sb_line_T *sb_line; if (term == NULL || !term_is_finished(curbuf) || term->tl_top_diff_rows == 0 || term->tl_scrollback.ga_len == 0) return FAIL; line_count = curbuf->b_ml.ml_line_count; top_rows = term->tl_top_diff_rows; bot_rows = term->tl_bot_diff_rows; bot_start = line_count - bot_rows; sb_line = (sb_line_T *)term->tl_scrollback.ga_data; /* move lines from top to above the bottom part */ for (lnum = 1; lnum <= top_rows; ++lnum) { p = vim_strsave(ml_get(1)); if (p == NULL) return OK; ml_append(bot_start, p, 0, FALSE); ml_delete(1, FALSE); vim_free(p); } /* move lines from bottom to the top */ for (lnum = 1; lnum <= bot_rows; ++lnum) { p = vim_strsave(ml_get(bot_start + lnum)); if (p == NULL) return OK; ml_delete(bot_start + lnum, FALSE); ml_append(lnum - 1, p, 0, FALSE); vim_free(p); } if (top_rows == bot_rows) { /* rows counts are equal, can swap cell properties */ for (lnum = 0; lnum < top_rows; ++lnum) { sb_line_T temp; temp = *(sb_line + lnum); *(sb_line + lnum) = *(sb_line + bot_start + lnum); *(sb_line + bot_start + lnum) = temp; } } else { size_t size = sizeof(sb_line_T) * term->tl_scrollback.ga_len; sb_line_T *temp = (sb_line_T *)alloc((int)size); /* need to copy cell properties into temp memory */ if (temp != NULL) { mch_memmove(temp, term->tl_scrollback.ga_data, size); mch_memmove(term->tl_scrollback.ga_data, temp + bot_start, sizeof(sb_line_T) * bot_rows); mch_memmove((sb_line_T *)term->tl_scrollback.ga_data + bot_rows, temp + top_rows, sizeof(sb_line_T) * (line_count - top_rows - bot_rows)); mch_memmove((sb_line_T *)term->tl_scrollback.ga_data + line_count - top_rows, temp, sizeof(sb_line_T) * top_rows); vim_free(temp); } } term->tl_top_diff_rows = bot_rows; term->tl_bot_diff_rows = top_rows; update_screen(NOT_VALID); return OK; } /* * "term_dumpdiff(filename, filename, options)" function */ void f_term_dumpdiff(typval_T *argvars, typval_T *rettv) { term_load_dump(argvars, rettv, TRUE); } /* * "term_dumpload(filename, options)" function */ void f_term_dumpload(typval_T *argvars, typval_T *rettv) { term_load_dump(argvars, rettv, FALSE); } /* * "term_getaltscreen(buf)" function */ void f_term_getaltscreen(typval_T *argvars, typval_T *rettv) { buf_T *buf = term_get_buf(argvars, "term_getaltscreen()"); if (buf == NULL) return; rettv->vval.v_number = buf->b_term->tl_using_altscreen; } /* * "term_getattr(attr, name)" function */ void f_term_getattr(typval_T *argvars, typval_T *rettv) { int attr; size_t i; char_u *name; static struct { char *name; int attr; } attrs[] = { {"bold", HL_BOLD}, {"italic", HL_ITALIC}, {"underline", HL_UNDERLINE}, {"strike", HL_STRIKETHROUGH}, {"reverse", HL_INVERSE}, }; attr = tv_get_number(&argvars[0]); name = tv_get_string_chk(&argvars[1]); if (name == NULL) return; for (i = 0; i < sizeof(attrs)/sizeof(attrs[0]); ++i) if (STRCMP(name, attrs[i].name) == 0) { rettv->vval.v_number = (attr & attrs[i].attr) != 0 ? 1 : 0; break; } } /* * "term_getcursor(buf)" function */ void f_term_getcursor(typval_T *argvars, typval_T *rettv) { buf_T *buf = term_get_buf(argvars, "term_getcursor()"); term_T *term; list_T *l; dict_T *d; if (rettv_list_alloc(rettv) == FAIL) return; if (buf == NULL) return; term = buf->b_term; l = rettv->vval.v_list; list_append_number(l, term->tl_cursor_pos.row + 1); list_append_number(l, term->tl_cursor_pos.col + 1); d = dict_alloc(); if (d != NULL) { dict_add_number(d, "visible", term->tl_cursor_visible); dict_add_number(d, "blink", blink_state_is_inverted() ? !term->tl_cursor_blink : term->tl_cursor_blink); dict_add_number(d, "shape", term->tl_cursor_shape); dict_add_string(d, "color", cursor_color_get(term->tl_cursor_color)); list_append_dict(l, d); } } /* * "term_getjob(buf)" function */ void f_term_getjob(typval_T *argvars, typval_T *rettv) { buf_T *buf = term_get_buf(argvars, "term_getjob()"); if (buf == NULL) { rettv->v_type = VAR_SPECIAL; rettv->vval.v_number = VVAL_NULL; return; } rettv->v_type = VAR_JOB; rettv->vval.v_job = buf->b_term->tl_job; if (rettv->vval.v_job != NULL) ++rettv->vval.v_job->jv_refcount; } static int get_row_number(typval_T *tv, term_T *term) { if (tv->v_type == VAR_STRING && tv->vval.v_string != NULL && STRCMP(tv->vval.v_string, ".") == 0) return term->tl_cursor_pos.row; return (int)tv_get_number(tv) - 1; } /* * "term_getline(buf, row)" function */ void f_term_getline(typval_T *argvars, typval_T *rettv) { buf_T *buf = term_get_buf(argvars, "term_getline()"); term_T *term; int row; rettv->v_type = VAR_STRING; if (buf == NULL) return; term = buf->b_term; row = get_row_number(&argvars[1], term); if (term->tl_vterm == NULL) { linenr_T lnum = row + term->tl_scrollback_scrolled + 1; /* vterm is finished, get the text from the buffer */ if (lnum > 0 && lnum <= buf->b_ml.ml_line_count) rettv->vval.v_string = vim_strsave(ml_get_buf(buf, lnum, FALSE)); } else { VTermScreen *screen = vterm_obtain_screen(term->tl_vterm); VTermRect rect; int len; char_u *p; if (row < 0 || row >= term->tl_rows) return; len = term->tl_cols * MB_MAXBYTES + 1; p = alloc(len); if (p == NULL) return; rettv->vval.v_string = p; rect.start_col = 0; rect.end_col = term->tl_cols; rect.start_row = row; rect.end_row = row + 1; p[vterm_screen_get_text(screen, (char *)p, len, rect)] = NUL; } } /* * "term_getscrolled(buf)" function */ void f_term_getscrolled(typval_T *argvars, typval_T *rettv) { buf_T *buf = term_get_buf(argvars, "term_getscrolled()"); if (buf == NULL) return; rettv->vval.v_number = buf->b_term->tl_scrollback_scrolled; } /* * "term_getsize(buf)" function */ void f_term_getsize(typval_T *argvars, typval_T *rettv) { buf_T *buf = term_get_buf(argvars, "term_getsize()"); list_T *l; if (rettv_list_alloc(rettv) == FAIL) return; if (buf == NULL) return; l = rettv->vval.v_list; list_append_number(l, buf->b_term->tl_rows); list_append_number(l, buf->b_term->tl_cols); } /* * "term_setsize(buf, rows, cols)" function */ void f_term_setsize(typval_T *argvars UNUSED, typval_T *rettv UNUSED) { buf_T *buf = term_get_buf(argvars, "term_setsize()"); term_T *term; varnumber_T rows, cols; if (buf == NULL) { EMSG(_("E955: Not a terminal buffer")); return; } if (buf->b_term->tl_vterm == NULL) return; term = buf->b_term; rows = tv_get_number(&argvars[1]); rows = rows <= 0 ? term->tl_rows : rows; cols = tv_get_number(&argvars[2]); cols = cols <= 0 ? term->tl_cols : cols; vterm_set_size(term->tl_vterm, rows, cols); /* handle_resize() will resize the windows */ /* Get and remember the size we ended up with. Update the pty. */ vterm_get_size(term->tl_vterm, &term->tl_rows, &term->tl_cols); term_report_winsize(term, term->tl_rows, term->tl_cols); } /* * "term_getstatus(buf)" function */ void f_term_getstatus(typval_T *argvars, typval_T *rettv) { buf_T *buf = term_get_buf(argvars, "term_getstatus()"); term_T *term; char_u val[100]; rettv->v_type = VAR_STRING; if (buf == NULL) return; term = buf->b_term; if (term_job_running(term)) STRCPY(val, "running"); else STRCPY(val, "finished"); if (term->tl_normal_mode) STRCAT(val, ",normal"); rettv->vval.v_string = vim_strsave(val); } /* * "term_gettitle(buf)" function */ void f_term_gettitle(typval_T *argvars, typval_T *rettv) { buf_T *buf = term_get_buf(argvars, "term_gettitle()"); rettv->v_type = VAR_STRING; if (buf == NULL) return; if (buf->b_term->tl_title != NULL) rettv->vval.v_string = vim_strsave(buf->b_term->tl_title); } /* * "term_gettty(buf)" function */ void f_term_gettty(typval_T *argvars, typval_T *rettv) { buf_T *buf = term_get_buf(argvars, "term_gettty()"); char_u *p = NULL; int num = 0; rettv->v_type = VAR_STRING; if (buf == NULL) return; if (argvars[1].v_type != VAR_UNKNOWN) num = tv_get_number(&argvars[1]); switch (num) { case 0: if (buf->b_term->tl_job != NULL) p = buf->b_term->tl_job->jv_tty_out; break; case 1: if (buf->b_term->tl_job != NULL) p = buf->b_term->tl_job->jv_tty_in; break; default: EMSG2(_(e_invarg2), tv_get_string(&argvars[1])); return; } if (p != NULL) rettv->vval.v_string = vim_strsave(p); } /* * "term_list()" function */ void f_term_list(typval_T *argvars UNUSED, typval_T *rettv) { term_T *tp; list_T *l; if (rettv_list_alloc(rettv) == FAIL || first_term == NULL) return; l = rettv->vval.v_list; for (tp = first_term; tp != NULL; tp = tp->tl_next) if (tp != NULL && tp->tl_buffer != NULL) if (list_append_number(l, (varnumber_T)tp->tl_buffer->b_fnum) == FAIL) return; } /* * "term_scrape(buf, row)" function */ void f_term_scrape(typval_T *argvars, typval_T *rettv) { buf_T *buf = term_get_buf(argvars, "term_scrape()"); VTermScreen *screen = NULL; VTermPos pos; list_T *l; term_T *term; char_u *p; sb_line_T *line; if (rettv_list_alloc(rettv) == FAIL) return; if (buf == NULL) return; term = buf->b_term; l = rettv->vval.v_list; pos.row = get_row_number(&argvars[1], term); if (term->tl_vterm != NULL) { screen = vterm_obtain_screen(term->tl_vterm); p = NULL; line = NULL; } else { linenr_T lnum = pos.row + term->tl_scrollback_scrolled; if (lnum < 0 || lnum >= term->tl_scrollback.ga_len) return; p = ml_get_buf(buf, lnum + 1, FALSE); line = (sb_line_T *)term->tl_scrollback.ga_data + lnum; } for (pos.col = 0; pos.col < term->tl_cols; ) { dict_T *dcell; int width; VTermScreenCellAttrs attrs; VTermColor fg, bg; char_u rgb[8]; char_u mbs[MB_MAXBYTES * VTERM_MAX_CHARS_PER_CELL + 1]; int off = 0; int i; if (screen == NULL) { cellattr_T *cellattr; int len; /* vterm has finished, get the cell from scrollback */ if (pos.col >= line->sb_cols) break; cellattr = line->sb_cells + pos.col; width = cellattr->width; attrs = cellattr->attrs; fg = cellattr->fg; bg = cellattr->bg; len = MB_PTR2LEN(p); mch_memmove(mbs, p, len); mbs[len] = NUL; p += len; } else { VTermScreenCell cell; if (vterm_screen_get_cell(screen, pos, &cell) == 0) break; for (i = 0; i < VTERM_MAX_CHARS_PER_CELL; ++i) { if (cell.chars[i] == 0) break; off += (*utf_char2bytes)((int)cell.chars[i], mbs + off); } mbs[off] = NUL; width = cell.width; attrs = cell.attrs; fg = cell.fg; bg = cell.bg; } dcell = dict_alloc(); if (dcell == NULL) break; list_append_dict(l, dcell); dict_add_string(dcell, "chars", mbs); vim_snprintf((char *)rgb, 8, "#%02x%02x%02x", fg.red, fg.green, fg.blue); dict_add_string(dcell, "fg", rgb); vim_snprintf((char *)rgb, 8, "#%02x%02x%02x", bg.red, bg.green, bg.blue); dict_add_string(dcell, "bg", rgb); dict_add_number(dcell, "attr", cell2attr(attrs, fg, bg)); dict_add_number(dcell, "width", width); ++pos.col; if (width == 2) ++pos.col; } } /* * "term_sendkeys(buf, keys)" function */ void f_term_sendkeys(typval_T *argvars, typval_T *rettv) { buf_T *buf = term_get_buf(argvars, "term_sendkeys()"); char_u *msg; term_T *term; rettv->v_type = VAR_UNKNOWN; if (buf == NULL) return; msg = tv_get_string_chk(&argvars[1]); if (msg == NULL) return; term = buf->b_term; if (term->tl_vterm == NULL) return; while (*msg != NUL) { int c; if (*msg == K_SPECIAL && msg[1] != NUL && msg[2] != NUL) { c = TO_SPECIAL(msg[1], msg[2]); msg += 3; } else { c = PTR2CHAR(msg); msg += MB_CPTR2LEN(msg); } send_keys_to_term(term, c, FALSE); } } #if defined(FEAT_GUI) || defined(FEAT_TERMGUICOLORS) || defined(PROTO) /* * "term_getansicolors(buf)" function */ void f_term_getansicolors(typval_T *argvars, typval_T *rettv) { buf_T *buf = term_get_buf(argvars, "term_getansicolors()"); term_T *term; VTermState *state; VTermColor color; char_u hexbuf[10]; int index; list_T *list; if (rettv_list_alloc(rettv) == FAIL) return; if (buf == NULL) return; term = buf->b_term; if (term->tl_vterm == NULL) return; list = rettv->vval.v_list; state = vterm_obtain_state(term->tl_vterm); for (index = 0; index < 16; index++) { vterm_state_get_palette_color(state, index, &color); sprintf((char *)hexbuf, "#%02x%02x%02x", color.red, color.green, color.blue); if (list_append_string(list, hexbuf, 7) == FAIL) return; } } /* * "term_setansicolors(buf, list)" function */ void f_term_setansicolors(typval_T *argvars, typval_T *rettv UNUSED) { buf_T *buf = term_get_buf(argvars, "term_setansicolors()"); term_T *term; if (buf == NULL) return; term = buf->b_term; if (term->tl_vterm == NULL) return; if (argvars[1].v_type != VAR_LIST || argvars[1].vval.v_list == NULL) { EMSG(_(e_listreq)); return; } if (set_ansi_colors_list(term->tl_vterm, argvars[1].vval.v_list) == FAIL) EMSG(_(e_invarg)); } #endif /* * "term_setrestore(buf, command)" function */ void f_term_setrestore(typval_T *argvars UNUSED, typval_T *rettv UNUSED) { #if defined(FEAT_SESSION) buf_T *buf = term_get_buf(argvars, "term_setrestore()"); term_T *term; char_u *cmd; if (buf == NULL) return; term = buf->b_term; vim_free(term->tl_command); cmd = tv_get_string_chk(&argvars[1]); if (cmd != NULL) term->tl_command = vim_strsave(cmd); else term->tl_command = NULL; #endif } /* * "term_setkill(buf, how)" function */ void f_term_setkill(typval_T *argvars UNUSED, typval_T *rettv UNUSED) { buf_T *buf = term_get_buf(argvars, "term_setkill()"); term_T *term; char_u *how; if (buf == NULL) return; term = buf->b_term; vim_free(term->tl_kill); how = tv_get_string_chk(&argvars[1]); if (how != NULL) term->tl_kill = vim_strsave(how); else term->tl_kill = NULL; } /* * "term_start(command, options)" function */ void f_term_start(typval_T *argvars, typval_T *rettv) { jobopt_T opt; buf_T *buf; init_job_options(&opt); if (argvars[1].v_type != VAR_UNKNOWN && get_job_options(&argvars[1], &opt, JO_TIMEOUT_ALL + JO_STOPONEXIT + JO_CALLBACK + JO_OUT_CALLBACK + JO_ERR_CALLBACK + JO_EXIT_CB + JO_CLOSE_CALLBACK + JO_OUT_IO, JO2_TERM_NAME + JO2_TERM_FINISH + JO2_HIDDEN + JO2_TERM_OPENCMD + JO2_TERM_COLS + JO2_TERM_ROWS + JO2_VERTICAL + JO2_CURWIN + JO2_CWD + JO2_ENV + JO2_EOF_CHARS + JO2_NORESTORE + JO2_TERM_KILL + JO2_ANSI_COLORS) == FAIL) return; buf = term_start(&argvars[0], NULL, &opt, 0); if (buf != NULL && buf->b_term != NULL) rettv->vval.v_number = buf->b_fnum; } /* * "term_wait" function */ void f_term_wait(typval_T *argvars, typval_T *rettv UNUSED) { buf_T *buf = term_get_buf(argvars, "term_wait()"); if (buf == NULL) return; if (buf->b_term->tl_job == NULL) { ch_log(NULL, "term_wait(): no job to wait for"); return; } if (buf->b_term->tl_job->jv_channel == NULL) /* channel is closed, nothing to do */ return; /* Get the job status, this will detect a job that finished. */ if (!buf->b_term->tl_job->jv_channel->ch_keep_open && STRCMP(job_status(buf->b_term->tl_job), "dead") == 0) { /* The job is dead, keep reading channel I/O until the channel is * closed. buf->b_term may become NULL if the terminal was closed while * waiting. */ ch_log(NULL, "term_wait(): waiting for channel to close"); while (buf->b_term != NULL && !buf->b_term->tl_channel_closed) { mch_check_messages(); parse_queued_messages(); ui_delay(10L, FALSE); if (!buf_valid(buf)) /* If the terminal is closed when the channel is closed the * buffer disappears. */ break; } mch_check_messages(); parse_queued_messages(); } else { long wait = 10L; mch_check_messages(); parse_queued_messages(); /* Wait for some time for any channel I/O. */ if (argvars[1].v_type != VAR_UNKNOWN) wait = tv_get_number(&argvars[1]); ui_delay(wait, TRUE); mch_check_messages(); /* Flushing messages on channels is hopefully sufficient. * TODO: is there a better way? */ parse_queued_messages(); } } /* * Called when a channel has sent all the lines to a terminal. * Send a CTRL-D to mark the end of the text. */ void term_send_eof(channel_T *ch) { term_T *term; for (term = first_term; term != NULL; term = term->tl_next) if (term->tl_job == ch->ch_job) { if (term->tl_eof_chars != NULL) { channel_send(ch, PART_IN, term->tl_eof_chars, (int)STRLEN(term->tl_eof_chars), NULL); channel_send(ch, PART_IN, (char_u *)"\r", 1, NULL); } # ifdef WIN3264 else /* Default: CTRL-D */ channel_send(ch, PART_IN, (char_u *)"\004\r", 2, NULL); # endif } } job_T * term_getjob(term_T *term) { return term != NULL ? term->tl_job : NULL; } # if defined(WIN3264) || defined(PROTO) /************************************** * 2. MS-Windows implementation. */ # ifndef PROTO #define WINPTY_SPAWN_FLAG_AUTO_SHUTDOWN 1ul #define WINPTY_SPAWN_FLAG_EXIT_AFTER_SHUTDOWN 2ull #define WINPTY_MOUSE_MODE_FORCE 2 void* (*winpty_config_new)(UINT64, void*); void* (*winpty_open)(void*, void*); void* (*winpty_spawn_config_new)(UINT64, void*, LPCWSTR, void*, void*, void*); BOOL (*winpty_spawn)(void*, void*, HANDLE*, HANDLE*, DWORD*, void*); void (*winpty_config_set_mouse_mode)(void*, int); void (*winpty_config_set_initial_size)(void*, int, int); LPCWSTR (*winpty_conin_name)(void*); LPCWSTR (*winpty_conout_name)(void*); LPCWSTR (*winpty_conerr_name)(void*); void (*winpty_free)(void*); void (*winpty_config_free)(void*); void (*winpty_spawn_config_free)(void*); void (*winpty_error_free)(void*); LPCWSTR (*winpty_error_msg)(void*); BOOL (*winpty_set_size)(void*, int, int, void*); HANDLE (*winpty_agent_process)(void*); #define WINPTY_DLL "winpty.dll" static HINSTANCE hWinPtyDLL = NULL; # endif static int dyn_winpty_init(int verbose) { int i; static struct { char *name; FARPROC *ptr; } winpty_entry[] = { {"winpty_conerr_name", (FARPROC*)&winpty_conerr_name}, {"winpty_config_free", (FARPROC*)&winpty_config_free}, {"winpty_config_new", (FARPROC*)&winpty_config_new}, {"winpty_config_set_mouse_mode", (FARPROC*)&winpty_config_set_mouse_mode}, {"winpty_config_set_initial_size", (FARPROC*)&winpty_config_set_initial_size}, {"winpty_conin_name", (FARPROC*)&winpty_conin_name}, {"winpty_conout_name", (FARPROC*)&winpty_conout_name}, {"winpty_error_free", (FARPROC*)&winpty_error_free}, {"winpty_free", (FARPROC*)&winpty_free}, {"winpty_open", (FARPROC*)&winpty_open}, {"winpty_spawn", (FARPROC*)&winpty_spawn}, {"winpty_spawn_config_free", (FARPROC*)&winpty_spawn_config_free}, {"winpty_spawn_config_new", (FARPROC*)&winpty_spawn_config_new}, {"winpty_error_msg", (FARPROC*)&winpty_error_msg}, {"winpty_set_size", (FARPROC*)&winpty_set_size}, {"winpty_agent_process", (FARPROC*)&winpty_agent_process}, {NULL, NULL} }; /* No need to initialize twice. */ if (hWinPtyDLL) return OK; /* Load winpty.dll, prefer using the 'winptydll' option, fall back to just * winpty.dll. */ if (*p_winptydll != NUL) hWinPtyDLL = vimLoadLib((char *)p_winptydll); if (!hWinPtyDLL) hWinPtyDLL = vimLoadLib(WINPTY_DLL); if (!hWinPtyDLL) { if (verbose) EMSG2(_(e_loadlib), *p_winptydll != NUL ? p_winptydll : (char_u *)WINPTY_DLL); return FAIL; } for (i = 0; winpty_entry[i].name != NULL && winpty_entry[i].ptr != NULL; ++i) { if ((*winpty_entry[i].ptr = (FARPROC)GetProcAddress(hWinPtyDLL, winpty_entry[i].name)) == NULL) { if (verbose) EMSG2(_(e_loadfunc), winpty_entry[i].name); return FAIL; } } return OK; } /* * Create a new terminal of "rows" by "cols" cells. * Store a reference in "term". * Return OK or FAIL. */ static int term_and_job_init( term_T *term, typval_T *argvar, char **argv UNUSED, jobopt_T *opt, jobopt_T *orig_opt) { WCHAR *cmd_wchar = NULL; WCHAR *cwd_wchar = NULL; WCHAR *env_wchar = NULL; channel_T *channel = NULL; job_T *job = NULL; DWORD error; HANDLE jo = NULL; HANDLE child_process_handle; HANDLE child_thread_handle; void *winpty_err = NULL; void *spawn_config = NULL; garray_T ga_cmd, ga_env; char_u *cmd = NULL; if (dyn_winpty_init(TRUE) == FAIL) return FAIL; ga_init2(&ga_cmd, (int)sizeof(char*), 20); ga_init2(&ga_env, (int)sizeof(char*), 20); if (argvar->v_type == VAR_STRING) { cmd = argvar->vval.v_string; } else if (argvar->v_type == VAR_LIST) { if (win32_build_cmd(argvar->vval.v_list, &ga_cmd) == FAIL) goto failed; cmd = ga_cmd.ga_data; } if (cmd == NULL || *cmd == NUL) { EMSG(_(e_invarg)); goto failed; } cmd_wchar = enc_to_utf16(cmd, NULL); ga_clear(&ga_cmd); if (cmd_wchar == NULL) goto failed; if (opt->jo_cwd != NULL) cwd_wchar = enc_to_utf16(opt->jo_cwd, NULL); win32_build_env(opt->jo_env, &ga_env, TRUE); env_wchar = ga_env.ga_data; term->tl_winpty_config = winpty_config_new(0, &winpty_err); if (term->tl_winpty_config == NULL) goto failed; winpty_config_set_mouse_mode(term->tl_winpty_config, WINPTY_MOUSE_MODE_FORCE); winpty_config_set_initial_size(term->tl_winpty_config, term->tl_cols, term->tl_rows); term->tl_winpty = winpty_open(term->tl_winpty_config, &winpty_err); if (term->tl_winpty == NULL) goto failed; spawn_config = winpty_spawn_config_new( WINPTY_SPAWN_FLAG_AUTO_SHUTDOWN | WINPTY_SPAWN_FLAG_EXIT_AFTER_SHUTDOWN, NULL, cmd_wchar, cwd_wchar, env_wchar, &winpty_err); if (spawn_config == NULL) goto failed; channel = add_channel(); if (channel == NULL) goto failed; job = job_alloc(); if (job == NULL) goto failed; if (argvar->v_type == VAR_STRING) { int argc; build_argv_from_string(cmd, &job->jv_argv, &argc); } else { int argc; build_argv_from_list(argvar->vval.v_list, &job->jv_argv, &argc); } if (opt->jo_set & JO_IN_BUF) job->jv_in_buf = buflist_findnr(opt->jo_io_buf[PART_IN]); if (!winpty_spawn(term->tl_winpty, spawn_config, &child_process_handle, &child_thread_handle, &error, &winpty_err)) goto failed; channel_set_pipes(channel, (sock_T)CreateFileW( winpty_conin_name(term->tl_winpty), GENERIC_WRITE, 0, NULL, OPEN_EXISTING, 0, NULL), (sock_T)CreateFileW( winpty_conout_name(term->tl_winpty), GENERIC_READ, 0, NULL, OPEN_EXISTING, 0, NULL), (sock_T)CreateFileW( winpty_conerr_name(term->tl_winpty), GENERIC_READ, 0, NULL, OPEN_EXISTING, 0, NULL)); /* Write lines with CR instead of NL. */ channel->ch_write_text_mode = TRUE; jo = CreateJobObject(NULL, NULL); if (jo == NULL) goto failed; if (!AssignProcessToJobObject(jo, child_process_handle)) { /* Failed, switch the way to terminate process with TerminateProcess. */ CloseHandle(jo); jo = NULL; } winpty_spawn_config_free(spawn_config); vim_free(cmd_wchar); vim_free(cwd_wchar); vim_free(env_wchar); create_vterm(term, term->tl_rows, term->tl_cols); #if defined(FEAT_GUI) || defined(FEAT_TERMGUICOLORS) if (opt->jo_set2 & JO2_ANSI_COLORS) set_vterm_palette(term->tl_vterm, opt->jo_ansi_colors); else init_vterm_ansi_colors(term->tl_vterm); #endif channel_set_job(channel, job, opt); job_set_options(job, opt); job->jv_channel = channel; job->jv_proc_info.hProcess = child_process_handle; job->jv_proc_info.dwProcessId = GetProcessId(child_process_handle); job->jv_job_object = jo; job->jv_status = JOB_STARTED; job->jv_tty_in = utf16_to_enc( (short_u*)winpty_conin_name(term->tl_winpty), NULL); job->jv_tty_out = utf16_to_enc( (short_u*)winpty_conout_name(term->tl_winpty), NULL); ++job->jv_refcount; term->tl_job = job; /* Redirecting stdout and stderr doesn't work at the job level. Instead * open the file here and handle it in. opt->jo_io was changed in * setup_job_options(), use the original flags here. */ if (orig_opt->jo_io[PART_OUT] == JIO_FILE) { char_u *fname = opt->jo_io_name[PART_OUT]; ch_log(channel, "Opening output file %s", fname); term->tl_out_fd = mch_fopen((char *)fname, WRITEBIN); if (term->tl_out_fd == NULL) EMSG2(_(e_notopen), fname); } return OK; failed: ga_clear(&ga_cmd); ga_clear(&ga_env); vim_free(cmd_wchar); vim_free(cwd_wchar); if (spawn_config != NULL) winpty_spawn_config_free(spawn_config); if (channel != NULL) channel_clear(channel); if (job != NULL) { job->jv_channel = NULL; job_cleanup(job); } term->tl_job = NULL; if (jo != NULL) CloseHandle(jo); if (term->tl_winpty != NULL) winpty_free(term->tl_winpty); term->tl_winpty = NULL; if (term->tl_winpty_config != NULL) winpty_config_free(term->tl_winpty_config); term->tl_winpty_config = NULL; if (winpty_err != NULL) { char_u *msg = utf16_to_enc( (short_u *)winpty_error_msg(winpty_err), NULL); EMSG(msg); winpty_error_free(winpty_err); } return FAIL; } static int create_pty_only(term_T *term, jobopt_T *options) { HANDLE hPipeIn = INVALID_HANDLE_VALUE; HANDLE hPipeOut = INVALID_HANDLE_VALUE; char in_name[80], out_name[80]; channel_T *channel = NULL; create_vterm(term, term->tl_rows, term->tl_cols); vim_snprintf(in_name, sizeof(in_name), "\\\\.\\pipe\\vim-%d-in-%d", GetCurrentProcessId(), curbuf->b_fnum); hPipeIn = CreateNamedPipe(in_name, PIPE_ACCESS_OUTBOUND, PIPE_TYPE_MESSAGE | PIPE_NOWAIT, PIPE_UNLIMITED_INSTANCES, 0, 0, NMPWAIT_NOWAIT, NULL); if (hPipeIn == INVALID_HANDLE_VALUE) goto failed; vim_snprintf(out_name, sizeof(out_name), "\\\\.\\pipe\\vim-%d-out-%d", GetCurrentProcessId(), curbuf->b_fnum); hPipeOut = CreateNamedPipe(out_name, PIPE_ACCESS_INBOUND, PIPE_TYPE_MESSAGE | PIPE_NOWAIT, PIPE_UNLIMITED_INSTANCES, 0, 0, 0, NULL); if (hPipeOut == INVALID_HANDLE_VALUE) goto failed; ConnectNamedPipe(hPipeIn, NULL); ConnectNamedPipe(hPipeOut, NULL); term->tl_job = job_alloc(); if (term->tl_job == NULL) goto failed; ++term->tl_job->jv_refcount; /* behave like the job is already finished */ term->tl_job->jv_status = JOB_FINISHED; channel = add_channel(); if (channel == NULL) goto failed; term->tl_job->jv_channel = channel; channel->ch_keep_open = TRUE; channel->ch_named_pipe = TRUE; channel_set_pipes(channel, (sock_T)hPipeIn, (sock_T)hPipeOut, (sock_T)hPipeOut); channel_set_job(channel, term->tl_job, options); term->tl_job->jv_tty_in = vim_strsave((char_u*)in_name); term->tl_job->jv_tty_out = vim_strsave((char_u*)out_name); return OK; failed: if (hPipeIn != NULL) CloseHandle(hPipeIn); if (hPipeOut != NULL) CloseHandle(hPipeOut); return FAIL; } /* * Free the terminal emulator part of "term". */ static void term_free_vterm(term_T *term) { if (term->tl_winpty != NULL) winpty_free(term->tl_winpty); term->tl_winpty = NULL; if (term->tl_winpty_config != NULL) winpty_config_free(term->tl_winpty_config); term->tl_winpty_config = NULL; if (term->tl_vterm != NULL) vterm_free(term->tl_vterm); term->tl_vterm = NULL; } /* * Report the size to the terminal. */ static void term_report_winsize(term_T *term, int rows, int cols) { if (term->tl_winpty) winpty_set_size(term->tl_winpty, cols, rows, NULL); } int terminal_enabled(void) { return dyn_winpty_init(FALSE) == OK; } # else /************************************** * 3. Unix-like implementation. */ /* * Create a new terminal of "rows" by "cols" cells. * Start job for "cmd". * Store the pointers in "term". * When "argv" is not NULL then "argvar" is not used. * Return OK or FAIL. */ static int term_and_job_init( term_T *term, typval_T *argvar, char **argv, jobopt_T *opt, jobopt_T *orig_opt UNUSED) { create_vterm(term, term->tl_rows, term->tl_cols); #if defined(FEAT_GUI) || defined(FEAT_TERMGUICOLORS) if (opt->jo_set2 & JO2_ANSI_COLORS) set_vterm_palette(term->tl_vterm, opt->jo_ansi_colors); else init_vterm_ansi_colors(term->tl_vterm); #endif /* This may change a string in "argvar". */ term->tl_job = job_start(argvar, argv, opt, TRUE); if (term->tl_job != NULL) ++term->tl_job->jv_refcount; return term->tl_job != NULL && term->tl_job->jv_channel != NULL && term->tl_job->jv_status != JOB_FAILED ? OK : FAIL; } static int create_pty_only(term_T *term, jobopt_T *opt) { create_vterm(term, term->tl_rows, term->tl_cols); term->tl_job = job_alloc(); if (term->tl_job == NULL) return FAIL; ++term->tl_job->jv_refcount; /* behave like the job is already finished */ term->tl_job->jv_status = JOB_FINISHED; return mch_create_pty_channel(term->tl_job, opt); } /* * Free the terminal emulator part of "term". */ static void term_free_vterm(term_T *term) { if (term->tl_vterm != NULL) vterm_free(term->tl_vterm); term->tl_vterm = NULL; } /* * Report the size to the terminal. */ static void term_report_winsize(term_T *term, int rows, int cols) { /* Use an ioctl() to report the new window size to the job. */ if (term->tl_job != NULL && term->tl_job->jv_channel != NULL) { int fd = -1; int part; for (part = PART_OUT; part < PART_COUNT; ++part) { fd = term->tl_job->jv_channel->ch_part[part].ch_fd; if (isatty(fd)) break; } if (part < PART_COUNT && mch_report_winsize(fd, rows, cols) == OK) mch_signal_job(term->tl_job, (char_u *)"winch"); } } # endif #endif /* FEAT_TERMINAL */
/* vi:set ts=8 sts=4 sw=4 noet: * * VIM - Vi IMproved by Bram Moolenaar * * Do ":help uganda" in Vim to read copying and usage conditions. * Do ":help credits" in Vim to see a list of people who contributed. * See README.txt for an overview of the Vim source code. */ /* * Terminal window support, see ":help :terminal". * * There are three parts: * 1. Generic code for all systems. * Uses libvterm for the terminal emulator. * 2. The MS-Windows implementation. * Uses winpty. * 3. The Unix-like implementation. * Uses pseudo-tty's (pty's). * * For each terminal one VTerm is constructed. This uses libvterm. A copy of * this library is in the libvterm directory. * * When a terminal window is opened, a job is started that will be connected to * the terminal emulator. * * If the terminal window has keyboard focus, typed keys are converted to the * terminal encoding and writing to the job over a channel. * * If the job produces output, it is written to the terminal emulator. The * terminal emulator invokes callbacks when its screen content changes. The * line range is stored in tl_dirty_row_start and tl_dirty_row_end. Once in a * while, if the terminal window is visible, the screen contents is drawn. * * When the job ends the text is put in a buffer. Redrawing then happens from * that buffer, attributes come from the scrollback buffer tl_scrollback. * When the buffer is changed it is turned into a normal buffer, the attributes * in tl_scrollback are no longer used. */ #include "vim.h" #if defined(FEAT_TERMINAL) || defined(PROTO) #ifndef MIN # define MIN(x,y) ((x) < (y) ? (x) : (y)) #endif #ifndef MAX # define MAX(x,y) ((x) > (y) ? (x) : (y)) #endif #include "libvterm/include/vterm.h" /* This is VTermScreenCell without the characters, thus much smaller. */ typedef struct { VTermScreenCellAttrs attrs; char width; VTermColor fg; VTermColor bg; } cellattr_T; typedef struct sb_line_S { int sb_cols; /* can differ per line */ cellattr_T *sb_cells; /* allocated */ cellattr_T sb_fill_attr; /* for short line */ } sb_line_T; /* typedef term_T in structs.h */ struct terminal_S { term_T *tl_next; VTerm *tl_vterm; job_T *tl_job; buf_T *tl_buffer; #if defined(FEAT_GUI) int tl_system; /* when non-zero used for :!cmd output */ int tl_toprow; /* row with first line of system terminal */ #endif /* Set when setting the size of a vterm, reset after redrawing. */ int tl_vterm_size_changed; int tl_normal_mode; /* TRUE: Terminal-Normal mode */ int tl_channel_closed; int tl_channel_recently_closed; // still need to handle tl_finish int tl_finish; #define TL_FINISH_UNSET NUL #define TL_FINISH_CLOSE 'c' /* ++close or :terminal without argument */ #define TL_FINISH_NOCLOSE 'n' /* ++noclose */ #define TL_FINISH_OPEN 'o' /* ++open */ char_u *tl_opencmd; char_u *tl_eof_chars; #ifdef WIN3264 void *tl_winpty_config; void *tl_winpty; FILE *tl_out_fd; #endif #if defined(FEAT_SESSION) char_u *tl_command; #endif char_u *tl_kill; /* last known vterm size */ int tl_rows; int tl_cols; char_u *tl_title; /* NULL or allocated */ char_u *tl_status_text; /* NULL or allocated */ /* Range of screen rows to update. Zero based. */ int tl_dirty_row_start; /* MAX_ROW if nothing dirty */ int tl_dirty_row_end; /* row below last one to update */ int tl_dirty_snapshot; /* text updated after making snapshot */ #ifdef FEAT_TIMERS int tl_timer_set; proftime_T tl_timer_due; #endif int tl_postponed_scroll; /* to be scrolled up */ garray_T tl_scrollback; int tl_scrollback_scrolled; cellattr_T tl_default_color; linenr_T tl_top_diff_rows; /* rows of top diff file or zero */ linenr_T tl_bot_diff_rows; /* rows of bottom diff file */ VTermPos tl_cursor_pos; int tl_cursor_visible; int tl_cursor_blink; int tl_cursor_shape; /* 1: block, 2: underline, 3: bar */ char_u *tl_cursor_color; /* NULL or allocated */ int tl_using_altscreen; }; #define TMODE_ONCE 1 /* CTRL-\ CTRL-N used */ #define TMODE_LOOP 2 /* CTRL-W N used */ /* * List of all active terminals. */ static term_T *first_term = NULL; /* Terminal active in terminal_loop(). */ static term_T *in_terminal_loop = NULL; #define MAX_ROW 999999 /* used for tl_dirty_row_end to update all rows */ #define KEY_BUF_LEN 200 /* * Functions with separate implementation for MS-Windows and Unix-like systems. */ static int term_and_job_init(term_T *term, typval_T *argvar, char **argv, jobopt_T *opt, jobopt_T *orig_opt); static int create_pty_only(term_T *term, jobopt_T *opt); static void term_report_winsize(term_T *term, int rows, int cols); static void term_free_vterm(term_T *term); #ifdef FEAT_GUI static void update_system_term(term_T *term); #endif /* The character that we know (or assume) that the terminal expects for the * backspace key. */ static int term_backspace_char = BS; /* "Terminal" highlight group colors. */ static int term_default_cterm_fg = -1; static int term_default_cterm_bg = -1; /* Store the last set and the desired cursor properties, so that we only update * them when needed. Doing it unnecessary may result in flicker. */ static char_u *last_set_cursor_color = NULL; static char_u *desired_cursor_color = NULL; static int last_set_cursor_shape = -1; static int desired_cursor_shape = -1; static int last_set_cursor_blink = -1; static int desired_cursor_blink = -1; /************************************** * 1. Generic code for all systems. */ static int cursor_color_equal(char_u *lhs_color, char_u *rhs_color) { if (lhs_color != NULL && rhs_color != NULL) return STRCMP(lhs_color, rhs_color) == 0; return lhs_color == NULL && rhs_color == NULL; } static void cursor_color_copy(char_u **to_color, char_u *from_color) { // Avoid a free & alloc if the value is already right. if (cursor_color_equal(*to_color, from_color)) return; vim_free(*to_color); *to_color = (from_color == NULL) ? NULL : vim_strsave(from_color); } static char_u * cursor_color_get(char_u *color) { return (color == NULL) ? (char_u *)"" : color; } /* * Parse 'termwinsize' and set "rows" and "cols" for the terminal size in the * current window. * Sets "rows" and/or "cols" to zero when it should follow the window size. * Return TRUE if the size is the minimum size: "24*80". */ static int parse_termwinsize(win_T *wp, int *rows, int *cols) { int minsize = FALSE; *rows = 0; *cols = 0; if (*wp->w_p_tws != NUL) { char_u *p = vim_strchr(wp->w_p_tws, 'x'); /* Syntax of value was already checked when it's set. */ if (p == NULL) { minsize = TRUE; p = vim_strchr(wp->w_p_tws, '*'); } *rows = atoi((char *)wp->w_p_tws); *cols = atoi((char *)p + 1); } return minsize; } /* * Determine the terminal size from 'termwinsize' and the current window. */ static void set_term_and_win_size(term_T *term) { #ifdef FEAT_GUI if (term->tl_system) { /* Use the whole screen for the system command. However, it will start * at the command line and scroll up as needed, using tl_toprow. */ term->tl_rows = Rows; term->tl_cols = Columns; return; } #endif if (parse_termwinsize(curwin, &term->tl_rows, &term->tl_cols)) { if (term->tl_rows != 0) term->tl_rows = MAX(term->tl_rows, curwin->w_height); if (term->tl_cols != 0) term->tl_cols = MAX(term->tl_cols, curwin->w_width); } if (term->tl_rows == 0) term->tl_rows = curwin->w_height; else win_setheight_win(term->tl_rows, curwin); if (term->tl_cols == 0) term->tl_cols = curwin->w_width; else win_setwidth_win(term->tl_cols, curwin); } /* * Initialize job options for a terminal job. * Caller may overrule some of them. */ void init_job_options(jobopt_T *opt) { clear_job_options(opt); opt->jo_mode = MODE_RAW; opt->jo_out_mode = MODE_RAW; opt->jo_err_mode = MODE_RAW; opt->jo_set = JO_MODE | JO_OUT_MODE | JO_ERR_MODE; } /* * Set job options mandatory for a terminal job. */ static void setup_job_options(jobopt_T *opt, int rows, int cols) { #ifndef WIN3264 /* Win32: Redirecting the job output won't work, thus always connect stdout * here. */ if (!(opt->jo_set & JO_OUT_IO)) #endif { /* Connect stdout to the terminal. */ opt->jo_io[PART_OUT] = JIO_BUFFER; opt->jo_io_buf[PART_OUT] = curbuf->b_fnum; opt->jo_modifiable[PART_OUT] = 0; opt->jo_set |= JO_OUT_IO + JO_OUT_BUF + JO_OUT_MODIFIABLE; } #ifndef WIN3264 /* Win32: Redirecting the job output won't work, thus always connect stderr * here. */ if (!(opt->jo_set & JO_ERR_IO)) #endif { /* Connect stderr to the terminal. */ opt->jo_io[PART_ERR] = JIO_BUFFER; opt->jo_io_buf[PART_ERR] = curbuf->b_fnum; opt->jo_modifiable[PART_ERR] = 0; opt->jo_set |= JO_ERR_IO + JO_ERR_BUF + JO_ERR_MODIFIABLE; } opt->jo_pty = TRUE; if ((opt->jo_set2 & JO2_TERM_ROWS) == 0) opt->jo_term_rows = rows; if ((opt->jo_set2 & JO2_TERM_COLS) == 0) opt->jo_term_cols = cols; } /* * Close a terminal buffer (and its window). Used when creating the terminal * fails. */ static void term_close_buffer(buf_T *buf, buf_T *old_curbuf) { free_terminal(buf); if (old_curbuf != NULL) { --curbuf->b_nwindows; curbuf = old_curbuf; curwin->w_buffer = curbuf; ++curbuf->b_nwindows; } /* Wiping out the buffer will also close the window and call * free_terminal(). */ do_buffer(DOBUF_WIPE, DOBUF_FIRST, FORWARD, buf->b_fnum, TRUE); } /* * Start a terminal window and return its buffer. * Use either "argvar" or "argv", the other must be NULL. * When "flags" has TERM_START_NOJOB only create the buffer, b_term and open * the window. * Returns NULL when failed. */ buf_T * term_start( typval_T *argvar, char **argv, jobopt_T *opt, int flags) { exarg_T split_ea; win_T *old_curwin = curwin; term_T *term; buf_T *old_curbuf = NULL; int res; buf_T *newbuf; int vertical = opt->jo_vertical || (cmdmod.split & WSP_VERT); jobopt_T orig_opt; // only partly filled if (check_restricted() || check_secure()) return NULL; if ((opt->jo_set & (JO_IN_IO + JO_OUT_IO + JO_ERR_IO)) == (JO_IN_IO + JO_OUT_IO + JO_ERR_IO) || (!(opt->jo_set & JO_OUT_IO) && (opt->jo_set & JO_OUT_BUF)) || (!(opt->jo_set & JO_ERR_IO) && (opt->jo_set & JO_ERR_BUF))) { EMSG(_(e_invarg)); return NULL; } term = (term_T *)alloc_clear(sizeof(term_T)); if (term == NULL) return NULL; term->tl_dirty_row_end = MAX_ROW; term->tl_cursor_visible = TRUE; term->tl_cursor_shape = VTERM_PROP_CURSORSHAPE_BLOCK; term->tl_finish = opt->jo_term_finish; #ifdef FEAT_GUI term->tl_system = (flags & TERM_START_SYSTEM); #endif ga_init2(&term->tl_scrollback, sizeof(sb_line_T), 300); vim_memset(&split_ea, 0, sizeof(split_ea)); if (opt->jo_curwin) { /* Create a new buffer in the current window. */ if (!can_abandon(curbuf, flags & TERM_START_FORCEIT)) { no_write_message(); vim_free(term); return NULL; } if (do_ecmd(0, NULL, NULL, &split_ea, ECMD_ONE, ECMD_HIDE + ((flags & TERM_START_FORCEIT) ? ECMD_FORCEIT : 0), curwin) == FAIL) { vim_free(term); return NULL; } } else if (opt->jo_hidden || (flags & TERM_START_SYSTEM)) { buf_T *buf; /* Create a new buffer without a window. Make it the current buffer for * a moment to be able to do the initialisations. */ buf = buflist_new((char_u *)"", NULL, (linenr_T)0, BLN_NEW | BLN_LISTED); if (buf == NULL || ml_open(buf) == FAIL) { vim_free(term); return NULL; } old_curbuf = curbuf; --curbuf->b_nwindows; curbuf = buf; curwin->w_buffer = buf; ++curbuf->b_nwindows; } else { /* Open a new window or tab. */ split_ea.cmdidx = CMD_new; split_ea.cmd = (char_u *)"new"; split_ea.arg = (char_u *)""; if (opt->jo_term_rows > 0 && !vertical) { split_ea.line2 = opt->jo_term_rows; split_ea.addr_count = 1; } if (opt->jo_term_cols > 0 && vertical) { split_ea.line2 = opt->jo_term_cols; split_ea.addr_count = 1; } if (vertical) cmdmod.split |= WSP_VERT; ex_splitview(&split_ea); if (curwin == old_curwin) { /* split failed */ vim_free(term); return NULL; } } term->tl_buffer = curbuf; curbuf->b_term = term; if (!opt->jo_hidden) { /* Only one size was taken care of with :new, do the other one. With * "curwin" both need to be done. */ if (opt->jo_term_rows > 0 && (opt->jo_curwin || vertical)) win_setheight(opt->jo_term_rows); if (opt->jo_term_cols > 0 && (opt->jo_curwin || !vertical)) win_setwidth(opt->jo_term_cols); } /* Link the new terminal in the list of active terminals. */ term->tl_next = first_term; first_term = term; if (opt->jo_term_name != NULL) curbuf->b_ffname = vim_strsave(opt->jo_term_name); else if (argv != NULL) curbuf->b_ffname = vim_strsave((char_u *)"!system"); else { int i; size_t len; char_u *cmd, *p; if (argvar->v_type == VAR_STRING) { cmd = argvar->vval.v_string; if (cmd == NULL) cmd = (char_u *)""; else if (STRCMP(cmd, "NONE") == 0) cmd = (char_u *)"pty"; } else if (argvar->v_type != VAR_LIST || argvar->vval.v_list == NULL || argvar->vval.v_list->lv_len < 1 || (cmd = tv_get_string_chk( &argvar->vval.v_list->lv_first->li_tv)) == NULL) cmd = (char_u*)""; len = STRLEN(cmd) + 10; p = alloc((int)len); for (i = 0; p != NULL; ++i) { /* Prepend a ! to the command name to avoid the buffer name equals * the executable, otherwise ":w!" would overwrite it. */ if (i == 0) vim_snprintf((char *)p, len, "!%s", cmd); else vim_snprintf((char *)p, len, "!%s (%d)", cmd, i); if (buflist_findname(p) == NULL) { vim_free(curbuf->b_ffname); curbuf->b_ffname = p; break; } } } curbuf->b_fname = curbuf->b_ffname; if (opt->jo_term_opencmd != NULL) term->tl_opencmd = vim_strsave(opt->jo_term_opencmd); if (opt->jo_eof_chars != NULL) term->tl_eof_chars = vim_strsave(opt->jo_eof_chars); set_string_option_direct((char_u *)"buftype", -1, (char_u *)"terminal", OPT_FREE|OPT_LOCAL, 0); // Avoid that 'buftype' is reset when this buffer is entered. curbuf->b_p_initialized = TRUE; /* Mark the buffer as not modifiable. It can only be made modifiable after * the job finished. */ curbuf->b_p_ma = FALSE; set_term_and_win_size(term); #ifdef WIN3264 mch_memmove(orig_opt.jo_io, opt->jo_io, sizeof(orig_opt.jo_io)); #endif setup_job_options(opt, term->tl_rows, term->tl_cols); if (flags & TERM_START_NOJOB) return curbuf; #if defined(FEAT_SESSION) /* Remember the command for the session file. */ if (opt->jo_term_norestore || argv != NULL) { term->tl_command = vim_strsave((char_u *)"NONE"); } else if (argvar->v_type == VAR_STRING) { char_u *cmd = argvar->vval.v_string; if (cmd != NULL && STRCMP(cmd, p_sh) != 0) term->tl_command = vim_strsave(cmd); } else if (argvar->v_type == VAR_LIST && argvar->vval.v_list != NULL && argvar->vval.v_list->lv_len > 0) { garray_T ga; listitem_T *item; ga_init2(&ga, 1, 100); for (item = argvar->vval.v_list->lv_first; item != NULL; item = item->li_next) { char_u *s = tv_get_string_chk(&item->li_tv); char_u *p; if (s == NULL) break; p = vim_strsave_fnameescape(s, FALSE); if (p == NULL) break; ga_concat(&ga, p); vim_free(p); ga_append(&ga, ' '); } if (item == NULL) { ga_append(&ga, NUL); term->tl_command = ga.ga_data; } else ga_clear(&ga); } #endif if (opt->jo_term_kill != NULL) { char_u *p = skiptowhite(opt->jo_term_kill); term->tl_kill = vim_strnsave(opt->jo_term_kill, p - opt->jo_term_kill); } /* System dependent: setup the vterm and maybe start the job in it. */ if (argv == NULL && argvar->v_type == VAR_STRING && argvar->vval.v_string != NULL && STRCMP(argvar->vval.v_string, "NONE") == 0) res = create_pty_only(term, opt); else res = term_and_job_init(term, argvar, argv, opt, &orig_opt); newbuf = curbuf; if (res == OK) { /* Get and remember the size we ended up with. Update the pty. */ vterm_get_size(term->tl_vterm, &term->tl_rows, &term->tl_cols); term_report_winsize(term, term->tl_rows, term->tl_cols); #ifdef FEAT_GUI if (term->tl_system) { /* display first line below typed command */ term->tl_toprow = msg_row + 1; term->tl_dirty_row_end = 0; } #endif /* Make sure we don't get stuck on sending keys to the job, it leads to * a deadlock if the job is waiting for Vim to read. */ channel_set_nonblock(term->tl_job->jv_channel, PART_IN); if (old_curbuf != NULL) { --curbuf->b_nwindows; curbuf = old_curbuf; curwin->w_buffer = curbuf; ++curbuf->b_nwindows; } } else { term_close_buffer(curbuf, old_curbuf); return NULL; } apply_autocmds(EVENT_TERMINALOPEN, NULL, NULL, FALSE, newbuf); return newbuf; } /* * ":terminal": open a terminal window and execute a job in it. */ void ex_terminal(exarg_T *eap) { typval_T argvar[2]; jobopt_T opt; char_u *cmd; char_u *tofree = NULL; init_job_options(&opt); cmd = eap->arg; while (*cmd == '+' && *(cmd + 1) == '+') { char_u *p, *ep; cmd += 2; p = skiptowhite(cmd); ep = vim_strchr(cmd, '='); if (ep != NULL && ep < p) p = ep; if ((int)(p - cmd) == 5 && STRNICMP(cmd, "close", 5) == 0) opt.jo_term_finish = 'c'; else if ((int)(p - cmd) == 7 && STRNICMP(cmd, "noclose", 7) == 0) opt.jo_term_finish = 'n'; else if ((int)(p - cmd) == 4 && STRNICMP(cmd, "open", 4) == 0) opt.jo_term_finish = 'o'; else if ((int)(p - cmd) == 6 && STRNICMP(cmd, "curwin", 6) == 0) opt.jo_curwin = 1; else if ((int)(p - cmd) == 6 && STRNICMP(cmd, "hidden", 6) == 0) opt.jo_hidden = 1; else if ((int)(p - cmd) == 9 && STRNICMP(cmd, "norestore", 9) == 0) opt.jo_term_norestore = 1; else if ((int)(p - cmd) == 4 && STRNICMP(cmd, "kill", 4) == 0 && ep != NULL) { opt.jo_set2 |= JO2_TERM_KILL; opt.jo_term_kill = ep + 1; p = skiptowhite(cmd); } else if ((int)(p - cmd) == 4 && STRNICMP(cmd, "rows", 4) == 0 && ep != NULL && isdigit(ep[1])) { opt.jo_set2 |= JO2_TERM_ROWS; opt.jo_term_rows = atoi((char *)ep + 1); p = skiptowhite(cmd); } else if ((int)(p - cmd) == 4 && STRNICMP(cmd, "cols", 4) == 0 && ep != NULL && isdigit(ep[1])) { opt.jo_set2 |= JO2_TERM_COLS; opt.jo_term_cols = atoi((char *)ep + 1); p = skiptowhite(cmd); } else if ((int)(p - cmd) == 3 && STRNICMP(cmd, "eof", 3) == 0 && ep != NULL) { char_u *buf = NULL; char_u *keys; p = skiptowhite(cmd); *p = NUL; keys = replace_termcodes(ep + 1, &buf, TRUE, TRUE, TRUE); opt.jo_set2 |= JO2_EOF_CHARS; opt.jo_eof_chars = vim_strsave(keys); vim_free(buf); *p = ' '; } else { if (*p) *p = NUL; EMSG2(_("E181: Invalid attribute: %s"), cmd); goto theend; } cmd = skipwhite(p); } if (*cmd == NUL) { /* Make a copy of 'shell', an autocommand may change the option. */ tofree = cmd = vim_strsave(p_sh); /* default to close when the shell exits */ if (opt.jo_term_finish == NUL) opt.jo_term_finish = 'c'; } if (eap->addr_count > 0) { /* Write lines from current buffer to the job. */ opt.jo_set |= JO_IN_IO | JO_IN_BUF | JO_IN_TOP | JO_IN_BOT; opt.jo_io[PART_IN] = JIO_BUFFER; opt.jo_io_buf[PART_IN] = curbuf->b_fnum; opt.jo_in_top = eap->line1; opt.jo_in_bot = eap->line2; } argvar[0].v_type = VAR_STRING; argvar[0].vval.v_string = cmd; argvar[1].v_type = VAR_UNKNOWN; term_start(argvar, NULL, &opt, eap->forceit ? TERM_START_FORCEIT : 0); vim_free(tofree); theend: vim_free(opt.jo_eof_chars); } #if defined(FEAT_SESSION) || defined(PROTO) /* * Write a :terminal command to the session file to restore the terminal in * window "wp". * Return FAIL if writing fails. */ int term_write_session(FILE *fd, win_T *wp) { term_T *term = wp->w_buffer->b_term; /* Create the terminal and run the command. This is not without * risk, but let's assume the user only creates a session when this * will be OK. */ if (fprintf(fd, "terminal ++curwin ++cols=%d ++rows=%d ", term->tl_cols, term->tl_rows) < 0) return FAIL; if (term->tl_command != NULL && fputs((char *)term->tl_command, fd) < 0) return FAIL; return put_eol(fd); } /* * Return TRUE if "buf" has a terminal that should be restored. */ int term_should_restore(buf_T *buf) { term_T *term = buf->b_term; return term != NULL && (term->tl_command == NULL || STRCMP(term->tl_command, "NONE") != 0); } #endif /* * Free the scrollback buffer for "term". */ static void free_scrollback(term_T *term) { int i; for (i = 0; i < term->tl_scrollback.ga_len; ++i) vim_free(((sb_line_T *)term->tl_scrollback.ga_data + i)->sb_cells); ga_clear(&term->tl_scrollback); } /* * Free a terminal and everything it refers to. * Kills the job if there is one. * Called when wiping out a buffer. */ void free_terminal(buf_T *buf) { term_T *term = buf->b_term; term_T *tp; if (term == NULL) return; if (first_term == term) first_term = term->tl_next; else for (tp = first_term; tp->tl_next != NULL; tp = tp->tl_next) if (tp->tl_next == term) { tp->tl_next = term->tl_next; break; } if (term->tl_job != NULL) { if (term->tl_job->jv_status != JOB_ENDED && term->tl_job->jv_status != JOB_FINISHED && term->tl_job->jv_status != JOB_FAILED) job_stop(term->tl_job, NULL, "kill"); job_unref(term->tl_job); } free_scrollback(term); term_free_vterm(term); vim_free(term->tl_title); #ifdef FEAT_SESSION vim_free(term->tl_command); #endif vim_free(term->tl_kill); vim_free(term->tl_status_text); vim_free(term->tl_opencmd); vim_free(term->tl_eof_chars); #ifdef WIN3264 if (term->tl_out_fd != NULL) fclose(term->tl_out_fd); #endif vim_free(term->tl_cursor_color); vim_free(term); buf->b_term = NULL; if (in_terminal_loop == term) in_terminal_loop = NULL; } /* * Get the part that is connected to the tty. Normally this is PART_IN, but * when writing buffer lines to the job it can be another. This makes it * possible to do "1,5term vim -". */ static ch_part_T get_tty_part(term_T *term) { #ifdef UNIX ch_part_T parts[3] = {PART_IN, PART_OUT, PART_ERR}; int i; for (i = 0; i < 3; ++i) { int fd = term->tl_job->jv_channel->ch_part[parts[i]].ch_fd; if (isatty(fd)) return parts[i]; } #endif return PART_IN; } /* * Write job output "msg[len]" to the vterm. */ static void term_write_job_output(term_T *term, char_u *msg, size_t len) { VTerm *vterm = term->tl_vterm; size_t prevlen = vterm_output_get_buffer_current(vterm); vterm_input_write(vterm, (char *)msg, len); /* flush vterm buffer when vterm responded to control sequence */ if (prevlen != vterm_output_get_buffer_current(vterm)) { char buf[KEY_BUF_LEN]; size_t curlen = vterm_output_read(vterm, buf, KEY_BUF_LEN); if (curlen > 0) channel_send(term->tl_job->jv_channel, get_tty_part(term), (char_u *)buf, (int)curlen, NULL); } /* this invokes the damage callbacks */ vterm_screen_flush_damage(vterm_obtain_screen(vterm)); } static void update_cursor(term_T *term, int redraw) { if (term->tl_normal_mode) return; #ifdef FEAT_GUI if (term->tl_system) windgoto(term->tl_cursor_pos.row + term->tl_toprow, term->tl_cursor_pos.col); else #endif setcursor(); if (redraw) { if (term->tl_buffer == curbuf && term->tl_cursor_visible) cursor_on(); out_flush(); #ifdef FEAT_GUI if (gui.in_use) { gui_update_cursor(FALSE, FALSE); gui_mch_flush(); } #endif } } /* * Invoked when "msg" output from a job was received. Write it to the terminal * of "buffer". */ void write_to_term(buf_T *buffer, char_u *msg, channel_T *channel) { size_t len = STRLEN(msg); term_T *term = buffer->b_term; #ifdef WIN3264 /* Win32: Cannot redirect output of the job, intercept it here and write to * the file. */ if (term->tl_out_fd != NULL) { ch_log(channel, "Writing %d bytes to output file", (int)len); fwrite(msg, len, 1, term->tl_out_fd); return; } #endif if (term->tl_vterm == NULL) { ch_log(channel, "NOT writing %d bytes to terminal", (int)len); return; } ch_log(channel, "writing %d bytes to terminal", (int)len); term_write_job_output(term, msg, len); #ifdef FEAT_GUI if (term->tl_system) { /* show system output, scrolling up the screen as needed */ update_system_term(term); update_cursor(term, TRUE); } else #endif /* In Terminal-Normal mode we are displaying the buffer, not the terminal * contents, thus no screen update is needed. */ if (!term->tl_normal_mode) { // Don't use update_screen() when editing the command line, it gets // cleared. // TODO: only update once in a while. ch_log(term->tl_job->jv_channel, "updating screen"); if (buffer == curbuf && (State & CMDLINE) == 0) { update_screen(VALID_NO_UPDATE); /* update_screen() can be slow, check the terminal wasn't closed * already */ if (buffer == curbuf && curbuf->b_term != NULL) update_cursor(curbuf->b_term, TRUE); } else redraw_after_callback(TRUE); } } /* * Send a mouse position and click to the vterm */ static int term_send_mouse(VTerm *vterm, int button, int pressed) { VTermModifier mod = VTERM_MOD_NONE; vterm_mouse_move(vterm, mouse_row - W_WINROW(curwin), mouse_col - curwin->w_wincol, mod); if (button != 0) vterm_mouse_button(vterm, button, pressed, mod); return TRUE; } static int enter_mouse_col = -1; static int enter_mouse_row = -1; /* * Handle a mouse click, drag or release. * Return TRUE when a mouse event is sent to the terminal. */ static int term_mouse_click(VTerm *vterm, int key) { #if defined(FEAT_CLIPBOARD) /* For modeless selection mouse drag and release events are ignored, unless * they are preceded with a mouse down event */ static int ignore_drag_release = TRUE; VTermMouseState mouse_state; vterm_state_get_mousestate(vterm_obtain_state(vterm), &mouse_state); if (mouse_state.flags == 0) { /* Terminal is not using the mouse, use modeless selection. */ switch (key) { case K_LEFTDRAG: case K_LEFTRELEASE: case K_RIGHTDRAG: case K_RIGHTRELEASE: /* Ignore drag and release events when the button-down wasn't * seen before. */ if (ignore_drag_release) { int save_mouse_col, save_mouse_row; if (enter_mouse_col < 0) break; /* mouse click in the window gave us focus, handle that * click now */ save_mouse_col = mouse_col; save_mouse_row = mouse_row; mouse_col = enter_mouse_col; mouse_row = enter_mouse_row; clip_modeless(MOUSE_LEFT, TRUE, FALSE); mouse_col = save_mouse_col; mouse_row = save_mouse_row; } /* FALLTHROUGH */ case K_LEFTMOUSE: case K_RIGHTMOUSE: if (key == K_LEFTRELEASE || key == K_RIGHTRELEASE) ignore_drag_release = TRUE; else ignore_drag_release = FALSE; /* Should we call mouse_has() here? */ if (clip_star.available) { int button, is_click, is_drag; button = get_mouse_button(KEY2TERMCAP1(key), &is_click, &is_drag); if (mouse_model_popup() && button == MOUSE_LEFT && (mod_mask & MOD_MASK_SHIFT)) { /* Translate shift-left to right button. */ button = MOUSE_RIGHT; mod_mask &= ~MOD_MASK_SHIFT; } clip_modeless(button, is_click, is_drag); } break; case K_MIDDLEMOUSE: if (clip_star.available) insert_reg('*', TRUE); break; } enter_mouse_col = -1; return FALSE; } #endif enter_mouse_col = -1; switch (key) { case K_LEFTMOUSE: case K_LEFTMOUSE_NM: term_send_mouse(vterm, 1, 1); break; case K_LEFTDRAG: term_send_mouse(vterm, 1, 1); break; case K_LEFTRELEASE: case K_LEFTRELEASE_NM: term_send_mouse(vterm, 1, 0); break; case K_MOUSEMOVE: term_send_mouse(vterm, 0, 0); break; case K_MIDDLEMOUSE: term_send_mouse(vterm, 2, 1); break; case K_MIDDLEDRAG: term_send_mouse(vterm, 2, 1); break; case K_MIDDLERELEASE: term_send_mouse(vterm, 2, 0); break; case K_RIGHTMOUSE: term_send_mouse(vterm, 3, 1); break; case K_RIGHTDRAG: term_send_mouse(vterm, 3, 1); break; case K_RIGHTRELEASE: term_send_mouse(vterm, 3, 0); break; } return TRUE; } /* * Convert typed key "c" into bytes to send to the job. * Return the number of bytes in "buf". */ static int term_convert_key(term_T *term, int c, char *buf) { VTerm *vterm = term->tl_vterm; VTermKey key = VTERM_KEY_NONE; VTermModifier mod = VTERM_MOD_NONE; int other = FALSE; switch (c) { /* don't use VTERM_KEY_ENTER, it may do an unwanted conversion */ /* don't use VTERM_KEY_BACKSPACE, it always * becomes 0x7f DEL */ case K_BS: c = term_backspace_char; break; case ESC: key = VTERM_KEY_ESCAPE; break; case K_DEL: key = VTERM_KEY_DEL; break; case K_DOWN: key = VTERM_KEY_DOWN; break; case K_S_DOWN: mod = VTERM_MOD_SHIFT; key = VTERM_KEY_DOWN; break; case K_END: key = VTERM_KEY_END; break; case K_S_END: mod = VTERM_MOD_SHIFT; key = VTERM_KEY_END; break; case K_C_END: mod = VTERM_MOD_CTRL; key = VTERM_KEY_END; break; case K_F10: key = VTERM_KEY_FUNCTION(10); break; case K_F11: key = VTERM_KEY_FUNCTION(11); break; case K_F12: key = VTERM_KEY_FUNCTION(12); break; case K_F1: key = VTERM_KEY_FUNCTION(1); break; case K_F2: key = VTERM_KEY_FUNCTION(2); break; case K_F3: key = VTERM_KEY_FUNCTION(3); break; case K_F4: key = VTERM_KEY_FUNCTION(4); break; case K_F5: key = VTERM_KEY_FUNCTION(5); break; case K_F6: key = VTERM_KEY_FUNCTION(6); break; case K_F7: key = VTERM_KEY_FUNCTION(7); break; case K_F8: key = VTERM_KEY_FUNCTION(8); break; case K_F9: key = VTERM_KEY_FUNCTION(9); break; case K_HOME: key = VTERM_KEY_HOME; break; case K_S_HOME: mod = VTERM_MOD_SHIFT; key = VTERM_KEY_HOME; break; case K_C_HOME: mod = VTERM_MOD_CTRL; key = VTERM_KEY_HOME; break; case K_INS: key = VTERM_KEY_INS; break; case K_K0: key = VTERM_KEY_KP_0; break; case K_K1: key = VTERM_KEY_KP_1; break; case K_K2: key = VTERM_KEY_KP_2; break; case K_K3: key = VTERM_KEY_KP_3; break; case K_K4: key = VTERM_KEY_KP_4; break; case K_K5: key = VTERM_KEY_KP_5; break; case K_K6: key = VTERM_KEY_KP_6; break; case K_K7: key = VTERM_KEY_KP_7; break; case K_K8: key = VTERM_KEY_KP_8; break; case K_K9: key = VTERM_KEY_KP_9; break; case K_KDEL: key = VTERM_KEY_DEL; break; /* TODO */ case K_KDIVIDE: key = VTERM_KEY_KP_DIVIDE; break; case K_KEND: key = VTERM_KEY_KP_1; break; /* TODO */ case K_KENTER: key = VTERM_KEY_KP_ENTER; break; case K_KHOME: key = VTERM_KEY_KP_7; break; /* TODO */ case K_KINS: key = VTERM_KEY_KP_0; break; /* TODO */ case K_KMINUS: key = VTERM_KEY_KP_MINUS; break; case K_KMULTIPLY: key = VTERM_KEY_KP_MULT; break; case K_KPAGEDOWN: key = VTERM_KEY_KP_3; break; /* TODO */ case K_KPAGEUP: key = VTERM_KEY_KP_9; break; /* TODO */ case K_KPLUS: key = VTERM_KEY_KP_PLUS; break; case K_KPOINT: key = VTERM_KEY_KP_PERIOD; break; case K_LEFT: key = VTERM_KEY_LEFT; break; case K_S_LEFT: mod = VTERM_MOD_SHIFT; key = VTERM_KEY_LEFT; break; case K_C_LEFT: mod = VTERM_MOD_CTRL; key = VTERM_KEY_LEFT; break; case K_PAGEDOWN: key = VTERM_KEY_PAGEDOWN; break; case K_PAGEUP: key = VTERM_KEY_PAGEUP; break; case K_RIGHT: key = VTERM_KEY_RIGHT; break; case K_S_RIGHT: mod = VTERM_MOD_SHIFT; key = VTERM_KEY_RIGHT; break; case K_C_RIGHT: mod = VTERM_MOD_CTRL; key = VTERM_KEY_RIGHT; break; case K_UP: key = VTERM_KEY_UP; break; case K_S_UP: mod = VTERM_MOD_SHIFT; key = VTERM_KEY_UP; break; case TAB: key = VTERM_KEY_TAB; break; case K_S_TAB: mod = VTERM_MOD_SHIFT; key = VTERM_KEY_TAB; break; case K_MOUSEUP: other = term_send_mouse(vterm, 5, 1); break; case K_MOUSEDOWN: other = term_send_mouse(vterm, 4, 1); break; case K_MOUSELEFT: /* TODO */ return 0; case K_MOUSERIGHT: /* TODO */ return 0; case K_LEFTMOUSE: case K_LEFTMOUSE_NM: case K_LEFTDRAG: case K_LEFTRELEASE: case K_LEFTRELEASE_NM: case K_MOUSEMOVE: case K_MIDDLEMOUSE: case K_MIDDLEDRAG: case K_MIDDLERELEASE: case K_RIGHTMOUSE: case K_RIGHTDRAG: case K_RIGHTRELEASE: if (!term_mouse_click(vterm, c)) return 0; other = TRUE; break; case K_X1MOUSE: /* TODO */ return 0; case K_X1DRAG: /* TODO */ return 0; case K_X1RELEASE: /* TODO */ return 0; case K_X2MOUSE: /* TODO */ return 0; case K_X2DRAG: /* TODO */ return 0; case K_X2RELEASE: /* TODO */ return 0; case K_IGNORE: return 0; case K_NOP: return 0; case K_UNDO: return 0; case K_HELP: return 0; case K_XF1: key = VTERM_KEY_FUNCTION(1); break; case K_XF2: key = VTERM_KEY_FUNCTION(2); break; case K_XF3: key = VTERM_KEY_FUNCTION(3); break; case K_XF4: key = VTERM_KEY_FUNCTION(4); break; case K_SELECT: return 0; #ifdef FEAT_GUI case K_VER_SCROLLBAR: return 0; case K_HOR_SCROLLBAR: return 0; #endif #ifdef FEAT_GUI_TABLINE case K_TABLINE: return 0; case K_TABMENU: return 0; #endif #ifdef FEAT_NETBEANS_INTG case K_F21: key = VTERM_KEY_FUNCTION(21); break; #endif #ifdef FEAT_DND case K_DROP: return 0; #endif case K_CURSORHOLD: return 0; case K_PS: vterm_keyboard_start_paste(vterm); other = TRUE; break; case K_PE: vterm_keyboard_end_paste(vterm); other = TRUE; break; } /* * Convert special keys to vterm keys: * - Write keys to vterm: vterm_keyboard_key() * - Write output to channel. * TODO: use mod_mask */ if (key != VTERM_KEY_NONE) /* Special key, let vterm convert it. */ vterm_keyboard_key(vterm, key, mod); else if (!other) /* Normal character, let vterm convert it. */ vterm_keyboard_unichar(vterm, c, mod); /* Read back the converted escape sequence. */ return (int)vterm_output_read(vterm, buf, KEY_BUF_LEN); } /* * Return TRUE if the job for "term" is still running. * If "check_job_status" is TRUE update the job status. */ static int term_job_running_check(term_T *term, int check_job_status) { /* Also consider the job finished when the channel is closed, to avoid a * race condition when updating the title. */ if (term != NULL && term->tl_job != NULL && channel_is_open(term->tl_job->jv_channel)) { if (check_job_status) job_status(term->tl_job); return (term->tl_job->jv_status == JOB_STARTED || term->tl_job->jv_channel->ch_keep_open); } return FALSE; } /* * Return TRUE if the job for "term" is still running. */ int term_job_running(term_T *term) { return term_job_running_check(term, FALSE); } /* * Return TRUE if "term" has an active channel and used ":term NONE". */ int term_none_open(term_T *term) { /* Also consider the job finished when the channel is closed, to avoid a * race condition when updating the title. */ return term != NULL && term->tl_job != NULL && channel_is_open(term->tl_job->jv_channel) && term->tl_job->jv_channel->ch_keep_open; } /* * Used when exiting: kill the job in "buf" if so desired. * Return OK when the job finished. * Return FAIL when the job is still running. */ int term_try_stop_job(buf_T *buf) { int count; char *how = (char *)buf->b_term->tl_kill; #if defined(FEAT_GUI_DIALOG) || defined(FEAT_CON_DIALOG) if ((how == NULL || *how == NUL) && (p_confirm || cmdmod.confirm)) { char_u buff[DIALOG_MSG_SIZE]; int ret; dialog_msg(buff, _("Kill job in \"%s\"?"), buf->b_fname); ret = vim_dialog_yesnocancel(VIM_QUESTION, NULL, buff, 1); if (ret == VIM_YES) how = "kill"; else if (ret == VIM_CANCEL) return FAIL; } #endif if (how == NULL || *how == NUL) return FAIL; job_stop(buf->b_term->tl_job, NULL, how); /* wait for up to a second for the job to die */ for (count = 0; count < 100; ++count) { /* buffer, terminal and job may be cleaned up while waiting */ if (!buf_valid(buf) || buf->b_term == NULL || buf->b_term->tl_job == NULL) return OK; /* call job_status() to update jv_status */ job_status(buf->b_term->tl_job); if (buf->b_term->tl_job->jv_status >= JOB_ENDED) return OK; ui_delay(10L, FALSE); mch_check_messages(); parse_queued_messages(); } return FAIL; } /* * Add the last line of the scrollback buffer to the buffer in the window. */ static void add_scrollback_line_to_buffer(term_T *term, char_u *text, int len) { buf_T *buf = term->tl_buffer; int empty = (buf->b_ml.ml_flags & ML_EMPTY); linenr_T lnum = buf->b_ml.ml_line_count; #ifdef WIN3264 if (!enc_utf8 && enc_codepage > 0) { WCHAR *ret = NULL; int length = 0; MultiByteToWideChar_alloc(CP_UTF8, 0, (char*)text, len + 1, &ret, &length); if (ret != NULL) { WideCharToMultiByte_alloc(enc_codepage, 0, ret, length, (char **)&text, &len, 0, 0); vim_free(ret); ml_append_buf(term->tl_buffer, lnum, text, len, FALSE); vim_free(text); } } else #endif ml_append_buf(term->tl_buffer, lnum, text, len + 1, FALSE); if (empty) { /* Delete the empty line that was in the empty buffer. */ curbuf = buf; ml_delete(1, FALSE); curbuf = curwin->w_buffer; } } static void cell2cellattr(const VTermScreenCell *cell, cellattr_T *attr) { attr->width = cell->width; attr->attrs = cell->attrs; attr->fg = cell->fg; attr->bg = cell->bg; } static int equal_celattr(cellattr_T *a, cellattr_T *b) { /* Comparing the colors should be sufficient. */ return a->fg.red == b->fg.red && a->fg.green == b->fg.green && a->fg.blue == b->fg.blue && a->bg.red == b->bg.red && a->bg.green == b->bg.green && a->bg.blue == b->bg.blue; } /* * Add an empty scrollback line to "term". When "lnum" is not zero, add the * line at this position. Otherwise at the end. */ static int add_empty_scrollback(term_T *term, cellattr_T *fill_attr, int lnum) { if (ga_grow(&term->tl_scrollback, 1) == OK) { sb_line_T *line = (sb_line_T *)term->tl_scrollback.ga_data + term->tl_scrollback.ga_len; if (lnum > 0) { int i; for (i = 0; i < term->tl_scrollback.ga_len - lnum; ++i) { *line = *(line - 1); --line; } } line->sb_cols = 0; line->sb_cells = NULL; line->sb_fill_attr = *fill_attr; ++term->tl_scrollback.ga_len; return OK; } return FALSE; } /* * Remove the terminal contents from the scrollback and the buffer. * Used before adding a new scrollback line or updating the buffer for lines * displayed in the terminal. */ static void cleanup_scrollback(term_T *term) { sb_line_T *line; garray_T *gap; curbuf = term->tl_buffer; gap = &term->tl_scrollback; while (curbuf->b_ml.ml_line_count > term->tl_scrollback_scrolled && gap->ga_len > 0) { ml_delete(curbuf->b_ml.ml_line_count, FALSE); line = (sb_line_T *)gap->ga_data + gap->ga_len - 1; vim_free(line->sb_cells); --gap->ga_len; } curbuf = curwin->w_buffer; if (curbuf == term->tl_buffer) check_cursor(); } /* * Add the current lines of the terminal to scrollback and to the buffer. */ static void update_snapshot(term_T *term) { VTermScreen *screen; int len; int lines_skipped = 0; VTermPos pos; VTermScreenCell cell; cellattr_T fill_attr, new_fill_attr; cellattr_T *p; ch_log(term->tl_job == NULL ? NULL : term->tl_job->jv_channel, "Adding terminal window snapshot to buffer"); /* First remove the lines that were appended before, they might be * outdated. */ cleanup_scrollback(term); screen = vterm_obtain_screen(term->tl_vterm); fill_attr = new_fill_attr = term->tl_default_color; for (pos.row = 0; pos.row < term->tl_rows; ++pos.row) { len = 0; for (pos.col = 0; pos.col < term->tl_cols; ++pos.col) if (vterm_screen_get_cell(screen, pos, &cell) != 0 && cell.chars[0] != NUL) { len = pos.col + 1; new_fill_attr = term->tl_default_color; } else /* Assume the last attr is the filler attr. */ cell2cellattr(&cell, &new_fill_attr); if (len == 0 && equal_celattr(&new_fill_attr, &fill_attr)) ++lines_skipped; else { while (lines_skipped > 0) { /* Line was skipped, add an empty line. */ --lines_skipped; if (add_empty_scrollback(term, &fill_attr, 0) == OK) add_scrollback_line_to_buffer(term, (char_u *)"", 0); } if (len == 0) p = NULL; else p = (cellattr_T *)alloc((int)sizeof(cellattr_T) * len); if ((p != NULL || len == 0) && ga_grow(&term->tl_scrollback, 1) == OK) { garray_T ga; int width; sb_line_T *line = (sb_line_T *)term->tl_scrollback.ga_data + term->tl_scrollback.ga_len; ga_init2(&ga, 1, 100); for (pos.col = 0; pos.col < len; pos.col += width) { if (vterm_screen_get_cell(screen, pos, &cell) == 0) { width = 1; vim_memset(p + pos.col, 0, sizeof(cellattr_T)); if (ga_grow(&ga, 1) == OK) ga.ga_len += utf_char2bytes(' ', (char_u *)ga.ga_data + ga.ga_len); } else { width = cell.width; cell2cellattr(&cell, &p[pos.col]); // Each character can be up to 6 bytes. if (ga_grow(&ga, VTERM_MAX_CHARS_PER_CELL * 6) == OK) { int i; int c; for (i = 0; (c = cell.chars[i]) > 0 || i == 0; ++i) ga.ga_len += utf_char2bytes(c == NUL ? ' ' : c, (char_u *)ga.ga_data + ga.ga_len); } } } line->sb_cols = len; line->sb_cells = p; line->sb_fill_attr = new_fill_attr; fill_attr = new_fill_attr; ++term->tl_scrollback.ga_len; if (ga_grow(&ga, 1) == FAIL) add_scrollback_line_to_buffer(term, (char_u *)"", 0); else { *((char_u *)ga.ga_data + ga.ga_len) = NUL; add_scrollback_line_to_buffer(term, ga.ga_data, ga.ga_len); } ga_clear(&ga); } else vim_free(p); } } // Add trailing empty lines. for (pos.row = term->tl_scrollback.ga_len; pos.row < term->tl_scrollback_scrolled + term->tl_cursor_pos.row; ++pos.row) { if (add_empty_scrollback(term, &fill_attr, 0) == OK) add_scrollback_line_to_buffer(term, (char_u *)"", 0); } term->tl_dirty_snapshot = FALSE; #ifdef FEAT_TIMERS term->tl_timer_set = FALSE; #endif } /* * If needed, add the current lines of the terminal to scrollback and to the * buffer. Called after the job has ended and when switching to * Terminal-Normal mode. * When "redraw" is TRUE redraw the windows that show the terminal. */ static void may_move_terminal_to_buffer(term_T *term, int redraw) { win_T *wp; if (term->tl_vterm == NULL) return; /* Update the snapshot only if something changes or the buffer does not * have all the lines. */ if (term->tl_dirty_snapshot || term->tl_buffer->b_ml.ml_line_count <= term->tl_scrollback_scrolled) update_snapshot(term); /* Obtain the current background color. */ vterm_state_get_default_colors(vterm_obtain_state(term->tl_vterm), &term->tl_default_color.fg, &term->tl_default_color.bg); if (redraw) FOR_ALL_WINDOWS(wp) { if (wp->w_buffer == term->tl_buffer) { wp->w_cursor.lnum = term->tl_buffer->b_ml.ml_line_count; wp->w_cursor.col = 0; wp->w_valid = 0; if (wp->w_cursor.lnum >= wp->w_height) { linenr_T min_topline = wp->w_cursor.lnum - wp->w_height + 1; if (wp->w_topline < min_topline) wp->w_topline = min_topline; } redraw_win_later(wp, NOT_VALID); } } } #if defined(FEAT_TIMERS) || defined(PROTO) /* * Check if any terminal timer expired. If so, copy text from the terminal to * the buffer. * Return the time until the next timer will expire. */ int term_check_timers(int next_due_arg, proftime_T *now) { term_T *term; int next_due = next_due_arg; for (term = first_term; term != NULL; term = term->tl_next) { if (term->tl_timer_set && !term->tl_normal_mode) { long this_due = proftime_time_left(&term->tl_timer_due, now); if (this_due <= 1) { term->tl_timer_set = FALSE; may_move_terminal_to_buffer(term, FALSE); } else if (next_due == -1 || next_due > this_due) next_due = this_due; } } return next_due; } #endif static void set_terminal_mode(term_T *term, int normal_mode) { term->tl_normal_mode = normal_mode; VIM_CLEAR(term->tl_status_text); if (term->tl_buffer == curbuf) maketitle(); } /* * Called after the job if finished and Terminal mode is not active: * Move the vterm contents into the scrollback buffer and free the vterm. */ static void cleanup_vterm(term_T *term) { if (term->tl_finish != TL_FINISH_CLOSE) may_move_terminal_to_buffer(term, TRUE); term_free_vterm(term); set_terminal_mode(term, FALSE); } /* * Switch from Terminal-Job mode to Terminal-Normal mode. * Suspends updating the terminal window. */ static void term_enter_normal_mode(void) { term_T *term = curbuf->b_term; set_terminal_mode(term, TRUE); /* Append the current terminal contents to the buffer. */ may_move_terminal_to_buffer(term, TRUE); /* Move the window cursor to the position of the cursor in the * terminal. */ curwin->w_cursor.lnum = term->tl_scrollback_scrolled + term->tl_cursor_pos.row + 1; check_cursor(); if (coladvance(term->tl_cursor_pos.col) == FAIL) coladvance(MAXCOL); /* Display the same lines as in the terminal. */ curwin->w_topline = term->tl_scrollback_scrolled + 1; } /* * Returns TRUE if the current window contains a terminal and we are in * Terminal-Normal mode. */ int term_in_normal_mode(void) { term_T *term = curbuf->b_term; return term != NULL && term->tl_normal_mode; } /* * Switch from Terminal-Normal mode to Terminal-Job mode. * Restores updating the terminal window. */ void term_enter_job_mode() { term_T *term = curbuf->b_term; set_terminal_mode(term, FALSE); if (term->tl_channel_closed) cleanup_vterm(term); redraw_buf_and_status_later(curbuf, NOT_VALID); } /* * Get a key from the user with terminal mode mappings. * Note: while waiting a terminal may be closed and freed if the channel is * closed and ++close was used. */ static int term_vgetc() { int c; int save_State = State; State = TERMINAL; got_int = FALSE; #ifdef WIN3264 ctrl_break_was_pressed = FALSE; #endif c = vgetc(); got_int = FALSE; State = save_State; return c; } static int mouse_was_outside = FALSE; /* * Send keys to terminal. * Return FAIL when the key needs to be handled in Normal mode. * Return OK when the key was dropped or sent to the terminal. */ int send_keys_to_term(term_T *term, int c, int typed) { char msg[KEY_BUF_LEN]; size_t len; int dragging_outside = FALSE; /* Catch keys that need to be handled as in Normal mode. */ switch (c) { case NUL: case K_ZERO: if (typed) stuffcharReadbuff(c); return FAIL; case K_TABLINE: stuffcharReadbuff(c); return FAIL; case K_IGNORE: case K_CANCEL: // used for :normal when running out of chars return FAIL; case K_LEFTDRAG: case K_MIDDLEDRAG: case K_RIGHTDRAG: case K_X1DRAG: case K_X2DRAG: dragging_outside = mouse_was_outside; /* FALLTHROUGH */ case K_LEFTMOUSE: case K_LEFTMOUSE_NM: case K_LEFTRELEASE: case K_LEFTRELEASE_NM: case K_MOUSEMOVE: case K_MIDDLEMOUSE: case K_MIDDLERELEASE: case K_RIGHTMOUSE: case K_RIGHTRELEASE: case K_X1MOUSE: case K_X1RELEASE: case K_X2MOUSE: case K_X2RELEASE: case K_MOUSEUP: case K_MOUSEDOWN: case K_MOUSELEFT: case K_MOUSERIGHT: if (mouse_row < W_WINROW(curwin) || mouse_row >= (W_WINROW(curwin) + curwin->w_height) || mouse_col < curwin->w_wincol || mouse_col >= W_ENDCOL(curwin) || dragging_outside) { /* click or scroll outside the current window or on status line * or vertical separator */ if (typed) { stuffcharReadbuff(c); mouse_was_outside = TRUE; } return FAIL; } } if (typed) mouse_was_outside = FALSE; /* Convert the typed key to a sequence of bytes for the job. */ len = term_convert_key(term, c, msg); if (len > 0) /* TODO: if FAIL is returned, stop? */ channel_send(term->tl_job->jv_channel, get_tty_part(term), (char_u *)msg, (int)len, NULL); return OK; } static void position_cursor(win_T *wp, VTermPos *pos) { wp->w_wrow = MIN(pos->row, MAX(0, wp->w_height - 1)); wp->w_wcol = MIN(pos->col, MAX(0, wp->w_width - 1)); wp->w_valid |= (VALID_WCOL|VALID_WROW); } /* * Handle CTRL-W "": send register contents to the job. */ static void term_paste_register(int prev_c UNUSED) { int c; list_T *l; listitem_T *item; long reglen = 0; int type; #ifdef FEAT_CMDL_INFO if (add_to_showcmd(prev_c)) if (add_to_showcmd('"')) out_flush(); #endif c = term_vgetc(); #ifdef FEAT_CMDL_INFO clear_showcmd(); #endif if (!term_use_loop()) /* job finished while waiting for a character */ return; /* CTRL-W "= prompt for expression to evaluate. */ if (c == '=' && get_expr_register() != '=') return; if (!term_use_loop()) /* job finished while waiting for a character */ return; l = (list_T *)get_reg_contents(c, GREG_LIST); if (l != NULL) { type = get_reg_type(c, &reglen); for (item = l->lv_first; item != NULL; item = item->li_next) { char_u *s = tv_get_string(&item->li_tv); #ifdef WIN3264 char_u *tmp = s; if (!enc_utf8 && enc_codepage > 0) { WCHAR *ret = NULL; int length = 0; MultiByteToWideChar_alloc(enc_codepage, 0, (char *)s, (int)STRLEN(s), &ret, &length); if (ret != NULL) { WideCharToMultiByte_alloc(CP_UTF8, 0, ret, length, (char **)&s, &length, 0, 0); vim_free(ret); } } #endif channel_send(curbuf->b_term->tl_job->jv_channel, PART_IN, s, (int)STRLEN(s), NULL); #ifdef WIN3264 if (tmp != s) vim_free(s); #endif if (item->li_next != NULL || type == MLINE) channel_send(curbuf->b_term->tl_job->jv_channel, PART_IN, (char_u *)"\r", 1, NULL); } list_free(l); } } /* * Return TRUE when waiting for a character in the terminal, the cursor of the * terminal should be displayed. */ int terminal_is_active() { return in_terminal_loop != NULL; } #if defined(FEAT_GUI) || defined(PROTO) cursorentry_T * term_get_cursor_shape(guicolor_T *fg, guicolor_T *bg) { term_T *term = in_terminal_loop; static cursorentry_T entry; int id; guicolor_T term_fg, term_bg; vim_memset(&entry, 0, sizeof(entry)); entry.shape = entry.mshape = term->tl_cursor_shape == VTERM_PROP_CURSORSHAPE_UNDERLINE ? SHAPE_HOR : term->tl_cursor_shape == VTERM_PROP_CURSORSHAPE_BAR_LEFT ? SHAPE_VER : SHAPE_BLOCK; entry.percentage = 20; if (term->tl_cursor_blink) { entry.blinkwait = 700; entry.blinkon = 400; entry.blinkoff = 250; } /* The "Terminal" highlight group overrules the defaults. */ id = syn_name2id((char_u *)"Terminal"); if (id != 0) { syn_id2colors(id, &term_fg, &term_bg); *fg = term_bg; } else *fg = gui.back_pixel; if (term->tl_cursor_color == NULL) { if (id != 0) *bg = term_fg; else *bg = gui.norm_pixel; } else *bg = color_name2handle(term->tl_cursor_color); entry.name = "n"; entry.used_for = SHAPE_CURSOR; return &entry; } #endif static void may_output_cursor_props(void) { if (!cursor_color_equal(last_set_cursor_color, desired_cursor_color) || last_set_cursor_shape != desired_cursor_shape || last_set_cursor_blink != desired_cursor_blink) { cursor_color_copy(&last_set_cursor_color, desired_cursor_color); last_set_cursor_shape = desired_cursor_shape; last_set_cursor_blink = desired_cursor_blink; term_cursor_color(cursor_color_get(desired_cursor_color)); if (desired_cursor_shape == -1 || desired_cursor_blink == -1) /* this will restore the initial cursor style, if possible */ ui_cursor_shape_forced(TRUE); else term_cursor_shape(desired_cursor_shape, desired_cursor_blink); } } /* * Set the cursor color and shape, if not last set to these. */ static void may_set_cursor_props(term_T *term) { #ifdef FEAT_GUI /* For the GUI the cursor properties are obtained with * term_get_cursor_shape(). */ if (gui.in_use) return; #endif if (in_terminal_loop == term) { cursor_color_copy(&desired_cursor_color, term->tl_cursor_color); desired_cursor_shape = term->tl_cursor_shape; desired_cursor_blink = term->tl_cursor_blink; may_output_cursor_props(); } } /* * Reset the desired cursor properties and restore them when needed. */ static void prepare_restore_cursor_props(void) { #ifdef FEAT_GUI if (gui.in_use) return; #endif cursor_color_copy(&desired_cursor_color, NULL); desired_cursor_shape = -1; desired_cursor_blink = -1; may_output_cursor_props(); } /* * Returns TRUE if the current window contains a terminal and we are sending * keys to the job. * If "check_job_status" is TRUE update the job status. */ static int term_use_loop_check(int check_job_status) { term_T *term = curbuf->b_term; return term != NULL && !term->tl_normal_mode && term->tl_vterm != NULL && term_job_running_check(term, check_job_status); } /* * Returns TRUE if the current window contains a terminal and we are sending * keys to the job. */ int term_use_loop(void) { return term_use_loop_check(FALSE); } /* * Called when entering a window with the mouse. If this is a terminal window * we may want to change state. */ void term_win_entered() { term_T *term = curbuf->b_term; if (term != NULL) { if (term_use_loop_check(TRUE)) { reset_VIsual_and_resel(); if (State & INSERT) stop_insert_mode = TRUE; } mouse_was_outside = FALSE; enter_mouse_col = mouse_col; enter_mouse_row = mouse_row; } } /* * Wait for input and send it to the job. * When "blocking" is TRUE wait for a character to be typed. Otherwise return * when there is no more typahead. * Return when the start of a CTRL-W command is typed or anything else that * should be handled as a Normal mode command. * Returns OK if a typed character is to be handled in Normal mode, FAIL if * the terminal was closed. */ int terminal_loop(int blocking) { int c; int termwinkey = 0; int ret; #ifdef UNIX int tty_fd = curbuf->b_term->tl_job->jv_channel ->ch_part[get_tty_part(curbuf->b_term)].ch_fd; #endif int restore_cursor = FALSE; /* Remember the terminal we are sending keys to. However, the terminal * might be closed while waiting for a character, e.g. typing "exit" in a * shell and ++close was used. Therefore use curbuf->b_term instead of a * stored reference. */ in_terminal_loop = curbuf->b_term; if (*curwin->w_p_twk != NUL) { termwinkey = string_to_key(curwin->w_p_twk, TRUE); if (termwinkey == Ctrl_W) termwinkey = 0; } position_cursor(curwin, &curbuf->b_term->tl_cursor_pos); may_set_cursor_props(curbuf->b_term); while (blocking || vpeekc_nomap() != NUL) { #ifdef FEAT_GUI if (!curbuf->b_term->tl_system) #endif /* TODO: skip screen update when handling a sequence of keys. */ /* Repeat redrawing in case a message is received while redrawing. */ while (must_redraw != 0) if (update_screen(0) == FAIL) break; if (!term_use_loop_check(TRUE) || in_terminal_loop != curbuf->b_term) /* job finished while redrawing */ break; update_cursor(curbuf->b_term, FALSE); restore_cursor = TRUE; c = term_vgetc(); if (!term_use_loop_check(TRUE) || in_terminal_loop != curbuf->b_term) { /* Job finished while waiting for a character. Push back the * received character. */ if (c != K_IGNORE) vungetc(c); break; } if (c == K_IGNORE) continue; #ifdef UNIX /* * The shell or another program may change the tty settings. Getting * them for every typed character is a bit of overhead, but it's needed * for the first character typed, e.g. when Vim starts in a shell. */ if (isatty(tty_fd)) { ttyinfo_T info; /* Get the current backspace character of the pty. */ if (get_tty_info(tty_fd, &info) == OK) term_backspace_char = info.backspace; } #endif #ifdef WIN3264 /* On Windows winpty handles CTRL-C, don't send a CTRL_C_EVENT. * Use CTRL-BREAK to kill the job. */ if (ctrl_break_was_pressed) mch_signal_job(curbuf->b_term->tl_job, (char_u *)"kill"); #endif /* Was either CTRL-W (termwinkey) or CTRL-\ pressed? * Not in a system terminal. */ if ((c == (termwinkey == 0 ? Ctrl_W : termwinkey) || c == Ctrl_BSL) #ifdef FEAT_GUI && !curbuf->b_term->tl_system #endif ) { int prev_c = c; #ifdef FEAT_CMDL_INFO if (add_to_showcmd(c)) out_flush(); #endif c = term_vgetc(); #ifdef FEAT_CMDL_INFO clear_showcmd(); #endif if (!term_use_loop_check(TRUE) || in_terminal_loop != curbuf->b_term) /* job finished while waiting for a character */ break; if (prev_c == Ctrl_BSL) { if (c == Ctrl_N) { /* CTRL-\ CTRL-N : go to Terminal-Normal mode. */ term_enter_normal_mode(); ret = FAIL; goto theend; } /* Send both keys to the terminal. */ send_keys_to_term(curbuf->b_term, prev_c, TRUE); } else if (c == Ctrl_C) { /* "CTRL-W CTRL-C" or 'termwinkey' CTRL-C: end the job */ mch_signal_job(curbuf->b_term->tl_job, (char_u *)"kill"); } else if (c == '.') { /* "CTRL-W .": send CTRL-W to the job */ /* "'termwinkey' .": send 'termwinkey' to the job */ c = termwinkey == 0 ? Ctrl_W : termwinkey; } else if (c == Ctrl_BSL) { /* "CTRL-W CTRL-\": send CTRL-\ to the job */ c = Ctrl_BSL; } else if (c == 'N') { /* CTRL-W N : go to Terminal-Normal mode. */ term_enter_normal_mode(); ret = FAIL; goto theend; } else if (c == '"') { term_paste_register(prev_c); continue; } else if (termwinkey == 0 || c != termwinkey) { stuffcharReadbuff(Ctrl_W); stuffcharReadbuff(c); ret = OK; goto theend; } } # ifdef WIN3264 if (!enc_utf8 && has_mbyte && c >= 0x80) { WCHAR wc; char_u mb[3]; mb[0] = (unsigned)c >> 8; mb[1] = c; if (MultiByteToWideChar(GetACP(), 0, (char*)mb, 2, &wc, 1) > 0) c = wc; } # endif if (send_keys_to_term(curbuf->b_term, c, TRUE) != OK) { if (c == K_MOUSEMOVE) /* We are sure to come back here, don't reset the cursor color * and shape to avoid flickering. */ restore_cursor = FALSE; ret = OK; goto theend; } } ret = FAIL; theend: in_terminal_loop = NULL; if (restore_cursor) prepare_restore_cursor_props(); /* Move a snapshot of the screen contents to the buffer, so that completion * works in other buffers. */ if (curbuf->b_term != NULL && !curbuf->b_term->tl_normal_mode) may_move_terminal_to_buffer(curbuf->b_term, FALSE); return ret; } /* * Called when a job has finished. * This updates the title and status, but does not close the vterm, because * there might still be pending output in the channel. */ void term_job_ended(job_T *job) { term_T *term; int did_one = FALSE; for (term = first_term; term != NULL; term = term->tl_next) if (term->tl_job == job) { VIM_CLEAR(term->tl_title); VIM_CLEAR(term->tl_status_text); redraw_buf_and_status_later(term->tl_buffer, VALID); did_one = TRUE; } if (did_one) redraw_statuslines(); if (curbuf->b_term != NULL) { if (curbuf->b_term->tl_job == job) maketitle(); update_cursor(curbuf->b_term, TRUE); } } static void may_toggle_cursor(term_T *term) { if (in_terminal_loop == term) { if (term->tl_cursor_visible) cursor_on(); else cursor_off(); } } /* * Reverse engineer the RGB value into a cterm color index. * First color is 1. Return 0 if no match found (default color). */ static int color2index(VTermColor *color, int fg, int *boldp) { int red = color->red; int blue = color->blue; int green = color->green; if (color->ansi_index != VTERM_ANSI_INDEX_NONE) { /* First 16 colors and default: use the ANSI index, because these * colors can be redefined. */ if (t_colors >= 16) return color->ansi_index; switch (color->ansi_index) { case 0: return 0; case 1: return lookup_color( 0, fg, boldp) + 1; /* black */ case 2: return lookup_color( 4, fg, boldp) + 1; /* dark red */ case 3: return lookup_color( 2, fg, boldp) + 1; /* dark green */ case 4: return lookup_color( 6, fg, boldp) + 1; /* brown */ case 5: return lookup_color( 1, fg, boldp) + 1; /* dark blue */ case 6: return lookup_color( 5, fg, boldp) + 1; /* dark magenta */ case 7: return lookup_color( 3, fg, boldp) + 1; /* dark cyan */ case 8: return lookup_color( 8, fg, boldp) + 1; /* light grey */ case 9: return lookup_color(12, fg, boldp) + 1; /* dark grey */ case 10: return lookup_color(20, fg, boldp) + 1; /* red */ case 11: return lookup_color(16, fg, boldp) + 1; /* green */ case 12: return lookup_color(24, fg, boldp) + 1; /* yellow */ case 13: return lookup_color(14, fg, boldp) + 1; /* blue */ case 14: return lookup_color(22, fg, boldp) + 1; /* magenta */ case 15: return lookup_color(18, fg, boldp) + 1; /* cyan */ case 16: return lookup_color(26, fg, boldp) + 1; /* white */ } } if (t_colors >= 256) { if (red == blue && red == green) { /* 24-color greyscale plus white and black */ static int cutoff[23] = { 0x0D, 0x17, 0x21, 0x2B, 0x35, 0x3F, 0x49, 0x53, 0x5D, 0x67, 0x71, 0x7B, 0x85, 0x8F, 0x99, 0xA3, 0xAD, 0xB7, 0xC1, 0xCB, 0xD5, 0xDF, 0xE9}; int i; if (red < 5) return 17; /* 00/00/00 */ if (red > 245) /* ff/ff/ff */ return 232; for (i = 0; i < 23; ++i) if (red < cutoff[i]) return i + 233; return 256; } { static int cutoff[5] = {0x2F, 0x73, 0x9B, 0xC3, 0xEB}; int ri, gi, bi; /* 216-color cube */ for (ri = 0; ri < 5; ++ri) if (red < cutoff[ri]) break; for (gi = 0; gi < 5; ++gi) if (green < cutoff[gi]) break; for (bi = 0; bi < 5; ++bi) if (blue < cutoff[bi]) break; return 17 + ri * 36 + gi * 6 + bi; } } return 0; } /* * Convert Vterm attributes to highlight flags. */ static int vtermAttr2hl(VTermScreenCellAttrs cellattrs) { int attr = 0; if (cellattrs.bold) attr |= HL_BOLD; if (cellattrs.underline) attr |= HL_UNDERLINE; if (cellattrs.italic) attr |= HL_ITALIC; if (cellattrs.strike) attr |= HL_STRIKETHROUGH; if (cellattrs.reverse) attr |= HL_INVERSE; return attr; } /* * Store Vterm attributes in "cell" from highlight flags. */ static void hl2vtermAttr(int attr, cellattr_T *cell) { vim_memset(&cell->attrs, 0, sizeof(VTermScreenCellAttrs)); if (attr & HL_BOLD) cell->attrs.bold = 1; if (attr & HL_UNDERLINE) cell->attrs.underline = 1; if (attr & HL_ITALIC) cell->attrs.italic = 1; if (attr & HL_STRIKETHROUGH) cell->attrs.strike = 1; if (attr & HL_INVERSE) cell->attrs.reverse = 1; } /* * Convert the attributes of a vterm cell into an attribute index. */ static int cell2attr(VTermScreenCellAttrs cellattrs, VTermColor cellfg, VTermColor cellbg) { int attr = vtermAttr2hl(cellattrs); #ifdef FEAT_GUI if (gui.in_use) { guicolor_T fg, bg; fg = gui_mch_get_rgb_color(cellfg.red, cellfg.green, cellfg.blue); bg = gui_mch_get_rgb_color(cellbg.red, cellbg.green, cellbg.blue); return get_gui_attr_idx(attr, fg, bg); } else #endif #ifdef FEAT_TERMGUICOLORS if (p_tgc) { guicolor_T fg, bg; fg = gui_get_rgb_color_cmn(cellfg.red, cellfg.green, cellfg.blue); bg = gui_get_rgb_color_cmn(cellbg.red, cellbg.green, cellbg.blue); return get_tgc_attr_idx(attr, fg, bg); } else #endif { int bold = MAYBE; int fg = color2index(&cellfg, TRUE, &bold); int bg = color2index(&cellbg, FALSE, &bold); /* Use the "Terminal" highlighting for the default colors. */ if ((fg == 0 || bg == 0) && t_colors >= 16) { if (fg == 0 && term_default_cterm_fg >= 0) fg = term_default_cterm_fg + 1; if (bg == 0 && term_default_cterm_bg >= 0) bg = term_default_cterm_bg + 1; } /* with 8 colors set the bold attribute to get a bright foreground */ if (bold == TRUE) attr |= HL_BOLD; return get_cterm_attr_idx(attr, fg, bg); } return 0; } static void set_dirty_snapshot(term_T *term) { term->tl_dirty_snapshot = TRUE; #ifdef FEAT_TIMERS if (!term->tl_normal_mode) { /* Update the snapshot after 100 msec of not getting updates. */ profile_setlimit(100L, &term->tl_timer_due); term->tl_timer_set = TRUE; } #endif } static int handle_damage(VTermRect rect, void *user) { term_T *term = (term_T *)user; term->tl_dirty_row_start = MIN(term->tl_dirty_row_start, rect.start_row); term->tl_dirty_row_end = MAX(term->tl_dirty_row_end, rect.end_row); set_dirty_snapshot(term); redraw_buf_later(term->tl_buffer, SOME_VALID); return 1; } static void term_scroll_up(term_T *term, int start_row, int count) { win_T *wp; VTermColor fg, bg; VTermScreenCellAttrs attr; int clear_attr; /* Set the color to clear lines with. */ vterm_state_get_default_colors(vterm_obtain_state(term->tl_vterm), &fg, &bg); vim_memset(&attr, 0, sizeof(attr)); clear_attr = cell2attr(attr, fg, bg); FOR_ALL_WINDOWS(wp) { if (wp->w_buffer == term->tl_buffer) win_del_lines(wp, start_row, count, FALSE, FALSE, clear_attr); } } static int handle_moverect(VTermRect dest, VTermRect src, void *user) { term_T *term = (term_T *)user; int count = src.start_row - dest.start_row; /* Scrolling up is done much more efficiently by deleting lines instead of * redrawing the text. But avoid doing this multiple times, postpone until * the redraw happens. */ if (dest.start_col == src.start_col && dest.end_col == src.end_col && dest.start_row < src.start_row) { if (dest.start_row == 0) term->tl_postponed_scroll += count; else term_scroll_up(term, dest.start_row, count); } term->tl_dirty_row_start = MIN(term->tl_dirty_row_start, dest.start_row); term->tl_dirty_row_end = MIN(term->tl_dirty_row_end, dest.end_row); set_dirty_snapshot(term); /* Note sure if the scrolling will work correctly, let's do a complete * redraw later. */ redraw_buf_later(term->tl_buffer, NOT_VALID); return 1; } static int handle_movecursor( VTermPos pos, VTermPos oldpos UNUSED, int visible, void *user) { term_T *term = (term_T *)user; win_T *wp; term->tl_cursor_pos = pos; term->tl_cursor_visible = visible; FOR_ALL_WINDOWS(wp) { if (wp->w_buffer == term->tl_buffer) position_cursor(wp, &pos); } if (term->tl_buffer == curbuf && !term->tl_normal_mode) { may_toggle_cursor(term); update_cursor(term, term->tl_cursor_visible); } return 1; } static int handle_settermprop( VTermProp prop, VTermValue *value, void *user) { term_T *term = (term_T *)user; switch (prop) { case VTERM_PROP_TITLE: vim_free(term->tl_title); /* a blank title isn't useful, make it empty, so that "running" is * displayed */ if (*skipwhite((char_u *)value->string) == NUL) term->tl_title = NULL; #ifdef WIN3264 else if (!enc_utf8 && enc_codepage > 0) { WCHAR *ret = NULL; int length = 0; MultiByteToWideChar_alloc(CP_UTF8, 0, (char*)value->string, (int)STRLEN(value->string), &ret, &length); if (ret != NULL) { WideCharToMultiByte_alloc(enc_codepage, 0, ret, length, (char**)&term->tl_title, &length, 0, 0); vim_free(ret); } } #endif else term->tl_title = vim_strsave((char_u *)value->string); VIM_CLEAR(term->tl_status_text); if (term == curbuf->b_term) maketitle(); break; case VTERM_PROP_CURSORVISIBLE: term->tl_cursor_visible = value->boolean; may_toggle_cursor(term); out_flush(); break; case VTERM_PROP_CURSORBLINK: term->tl_cursor_blink = value->boolean; may_set_cursor_props(term); break; case VTERM_PROP_CURSORSHAPE: term->tl_cursor_shape = value->number; may_set_cursor_props(term); break; case VTERM_PROP_CURSORCOLOR: cursor_color_copy(&term->tl_cursor_color, (char_u*)value->string); may_set_cursor_props(term); break; case VTERM_PROP_ALTSCREEN: /* TODO: do anything else? */ term->tl_using_altscreen = value->boolean; break; default: break; } /* Always return 1, otherwise vterm doesn't store the value internally. */ return 1; } /* * The job running in the terminal resized the terminal. */ static int handle_resize(int rows, int cols, void *user) { term_T *term = (term_T *)user; win_T *wp; term->tl_rows = rows; term->tl_cols = cols; if (term->tl_vterm_size_changed) /* Size was set by vterm_set_size(), don't set the window size. */ term->tl_vterm_size_changed = FALSE; else { FOR_ALL_WINDOWS(wp) { if (wp->w_buffer == term->tl_buffer) { win_setheight_win(rows, wp); win_setwidth_win(cols, wp); } } redraw_buf_later(term->tl_buffer, NOT_VALID); } return 1; } /* * Handle a line that is pushed off the top of the screen. */ static int handle_pushline(int cols, const VTermScreenCell *cells, void *user) { term_T *term = (term_T *)user; /* First remove the lines that were appended before, the pushed line goes * above it. */ cleanup_scrollback(term); /* If the number of lines that are stored goes over 'termscrollback' then * delete the first 10%. */ if (term->tl_scrollback.ga_len >= term->tl_buffer->b_p_twsl) { int todo = term->tl_buffer->b_p_twsl / 10; int i; curbuf = term->tl_buffer; for (i = 0; i < todo; ++i) { vim_free(((sb_line_T *)term->tl_scrollback.ga_data + i)->sb_cells); ml_delete(1, FALSE); } curbuf = curwin->w_buffer; term->tl_scrollback.ga_len -= todo; mch_memmove(term->tl_scrollback.ga_data, (sb_line_T *)term->tl_scrollback.ga_data + todo, sizeof(sb_line_T) * term->tl_scrollback.ga_len); term->tl_scrollback_scrolled -= todo; } if (ga_grow(&term->tl_scrollback, 1) == OK) { cellattr_T *p = NULL; int len = 0; int i; int c; int col; sb_line_T *line; garray_T ga; cellattr_T fill_attr = term->tl_default_color; /* do not store empty cells at the end */ for (i = 0; i < cols; ++i) if (cells[i].chars[0] != 0) len = i + 1; else cell2cellattr(&cells[i], &fill_attr); ga_init2(&ga, 1, 100); if (len > 0) p = (cellattr_T *)alloc((int)sizeof(cellattr_T) * len); if (p != NULL) { for (col = 0; col < len; col += cells[col].width) { if (ga_grow(&ga, MB_MAXBYTES) == FAIL) { ga.ga_len = 0; break; } for (i = 0; (c = cells[col].chars[i]) > 0 || i == 0; ++i) ga.ga_len += utf_char2bytes(c == NUL ? ' ' : c, (char_u *)ga.ga_data + ga.ga_len); cell2cellattr(&cells[col], &p[col]); } } if (ga_grow(&ga, 1) == FAIL) add_scrollback_line_to_buffer(term, (char_u *)"", 0); else { *((char_u *)ga.ga_data + ga.ga_len) = NUL; add_scrollback_line_to_buffer(term, ga.ga_data, ga.ga_len); } ga_clear(&ga); line = (sb_line_T *)term->tl_scrollback.ga_data + term->tl_scrollback.ga_len; line->sb_cols = len; line->sb_cells = p; line->sb_fill_attr = fill_attr; ++term->tl_scrollback.ga_len; ++term->tl_scrollback_scrolled; } return 0; /* ignored */ } static VTermScreenCallbacks screen_callbacks = { handle_damage, /* damage */ handle_moverect, /* moverect */ handle_movecursor, /* movecursor */ handle_settermprop, /* settermprop */ NULL, /* bell */ handle_resize, /* resize */ handle_pushline, /* sb_pushline */ NULL /* sb_popline */ }; /* * Do the work after the channel of a terminal was closed. * Must be called only when updating_screen is FALSE. * Returns TRUE when a buffer was closed (list of terminals may have changed). */ static int term_after_channel_closed(term_T *term) { /* Unless in Terminal-Normal mode: clear the vterm. */ if (!term->tl_normal_mode) { int fnum = term->tl_buffer->b_fnum; cleanup_vterm(term); if (term->tl_finish == TL_FINISH_CLOSE) { aco_save_T aco; int do_set_w_closing = term->tl_buffer->b_nwindows == 0; // ++close or term_finish == "close" ch_log(NULL, "terminal job finished, closing window"); aucmd_prepbuf(&aco, term->tl_buffer); // Avoid closing the window if we temporarily use it. if (do_set_w_closing) curwin->w_closing = TRUE; do_bufdel(DOBUF_WIPE, (char_u *)"", 1, fnum, fnum, FALSE); if (do_set_w_closing) curwin->w_closing = FALSE; aucmd_restbuf(&aco); return TRUE; } if (term->tl_finish == TL_FINISH_OPEN && term->tl_buffer->b_nwindows == 0) { char buf[50]; /* TODO: use term_opencmd */ ch_log(NULL, "terminal job finished, opening window"); vim_snprintf(buf, sizeof(buf), term->tl_opencmd == NULL ? "botright sbuf %d" : (char *)term->tl_opencmd, fnum); do_cmdline_cmd((char_u *)buf); } else ch_log(NULL, "terminal job finished"); } redraw_buf_and_status_later(term->tl_buffer, NOT_VALID); return FALSE; } /* * Called when a channel has been closed. * If this was a channel for a terminal window then finish it up. */ void term_channel_closed(channel_T *ch) { term_T *term; term_T *next_term; int did_one = FALSE; for (term = first_term; term != NULL; term = next_term) { next_term = term->tl_next; if (term->tl_job == ch->ch_job) { term->tl_channel_closed = TRUE; did_one = TRUE; VIM_CLEAR(term->tl_title); VIM_CLEAR(term->tl_status_text); #ifdef WIN3264 if (term->tl_out_fd != NULL) { fclose(term->tl_out_fd); term->tl_out_fd = NULL; } #endif if (updating_screen) { /* Cannot open or close windows now. Can happen when * 'lazyredraw' is set. */ term->tl_channel_recently_closed = TRUE; continue; } if (term_after_channel_closed(term)) next_term = first_term; } } if (did_one) { redraw_statuslines(); /* Need to break out of vgetc(). */ ins_char_typebuf(K_IGNORE); typebuf_was_filled = TRUE; term = curbuf->b_term; if (term != NULL) { if (term->tl_job == ch->ch_job) maketitle(); update_cursor(term, term->tl_cursor_visible); } } } /* * To be called after resetting updating_screen: handle any terminal where the * channel was closed. */ void term_check_channel_closed_recently() { term_T *term; term_T *next_term; for (term = first_term; term != NULL; term = next_term) { next_term = term->tl_next; if (term->tl_channel_recently_closed) { term->tl_channel_recently_closed = FALSE; if (term_after_channel_closed(term)) // start over, the list may have changed next_term = first_term; } } } /* * Fill one screen line from a line of the terminal. * Advances "pos" to past the last column. */ static void term_line2screenline(VTermScreen *screen, VTermPos *pos, int max_col) { int off = screen_get_current_line_off(); for (pos->col = 0; pos->col < max_col; ) { VTermScreenCell cell; int c; if (vterm_screen_get_cell(screen, *pos, &cell) == 0) vim_memset(&cell, 0, sizeof(cell)); c = cell.chars[0]; if (c == NUL) { ScreenLines[off] = ' '; if (enc_utf8) ScreenLinesUC[off] = NUL; } else { if (enc_utf8) { int i; /* composing chars */ for (i = 0; i < Screen_mco && i + 1 < VTERM_MAX_CHARS_PER_CELL; ++i) { ScreenLinesC[i][off] = cell.chars[i + 1]; if (cell.chars[i + 1] == 0) break; } if (c >= 0x80 || (Screen_mco > 0 && ScreenLinesC[0][off] != 0)) { ScreenLines[off] = ' '; ScreenLinesUC[off] = c; } else { ScreenLines[off] = c; ScreenLinesUC[off] = NUL; } } #ifdef WIN3264 else if (has_mbyte && c >= 0x80) { char_u mb[MB_MAXBYTES+1]; WCHAR wc = c; if (WideCharToMultiByte(GetACP(), 0, &wc, 1, (char*)mb, 2, 0, 0) > 1) { ScreenLines[off] = mb[0]; ScreenLines[off + 1] = mb[1]; cell.width = mb_ptr2cells(mb); } else ScreenLines[off] = c; } #endif else ScreenLines[off] = c; } ScreenAttrs[off] = cell2attr(cell.attrs, cell.fg, cell.bg); ++pos->col; ++off; if (cell.width == 2) { if (enc_utf8) ScreenLinesUC[off] = NUL; /* don't set the second byte to NUL for a DBCS encoding, it * has been set above */ if (enc_utf8 || !has_mbyte) ScreenLines[off] = NUL; ++pos->col; ++off; } } } #if defined(FEAT_GUI) static void update_system_term(term_T *term) { VTermPos pos; VTermScreen *screen; if (term->tl_vterm == NULL) return; screen = vterm_obtain_screen(term->tl_vterm); /* Scroll up to make more room for terminal lines if needed. */ while (term->tl_toprow > 0 && (Rows - term->tl_toprow) < term->tl_dirty_row_end) { int save_p_more = p_more; p_more = FALSE; msg_row = Rows - 1; msg_puts((char_u *)"\n"); p_more = save_p_more; --term->tl_toprow; } for (pos.row = term->tl_dirty_row_start; pos.row < term->tl_dirty_row_end && pos.row < Rows; ++pos.row) { if (pos.row < term->tl_rows) { int max_col = MIN(Columns, term->tl_cols); term_line2screenline(screen, &pos, max_col); } else pos.col = 0; screen_line(term->tl_toprow + pos.row, 0, pos.col, Columns, FALSE); } term->tl_dirty_row_start = MAX_ROW; term->tl_dirty_row_end = 0; update_cursor(term, TRUE); } #endif /* * Return TRUE if window "wp" is to be redrawn with term_update_window(). * Returns FALSE when there is no terminal running in this window or it is in * Terminal-Normal mode. */ int term_do_update_window(win_T *wp) { term_T *term = wp->w_buffer->b_term; return term != NULL && term->tl_vterm != NULL && !term->tl_normal_mode; } /* * Called to update a window that contains an active terminal. */ void term_update_window(win_T *wp) { term_T *term = wp->w_buffer->b_term; VTerm *vterm; VTermScreen *screen; VTermState *state; VTermPos pos; int rows, cols; int newrows, newcols; int minsize; win_T *twp; vterm = term->tl_vterm; screen = vterm_obtain_screen(vterm); state = vterm_obtain_state(vterm); /* We use NOT_VALID on a resize or scroll, redraw everything then. With * SOME_VALID only redraw what was marked dirty. */ if (wp->w_redr_type > SOME_VALID) { term->tl_dirty_row_start = 0; term->tl_dirty_row_end = MAX_ROW; if (term->tl_postponed_scroll > 0 && term->tl_postponed_scroll < term->tl_rows / 3) /* Scrolling is usually faster than redrawing, when there are only * a few lines to scroll. */ term_scroll_up(term, 0, term->tl_postponed_scroll); term->tl_postponed_scroll = 0; } /* * If the window was resized a redraw will be triggered and we get here. * Adjust the size of the vterm unless 'termwinsize' specifies a fixed size. */ minsize = parse_termwinsize(wp, &rows, &cols); newrows = 99999; newcols = 99999; FOR_ALL_WINDOWS(twp) { /* When more than one window shows the same terminal, use the * smallest size. */ if (twp->w_buffer == term->tl_buffer) { newrows = MIN(newrows, twp->w_height); newcols = MIN(newcols, twp->w_width); } } newrows = rows == 0 ? newrows : minsize ? MAX(rows, newrows) : rows; newcols = cols == 0 ? newcols : minsize ? MAX(cols, newcols) : cols; if (term->tl_rows != newrows || term->tl_cols != newcols) { term->tl_vterm_size_changed = TRUE; vterm_set_size(vterm, newrows, newcols); ch_log(term->tl_job->jv_channel, "Resizing terminal to %d lines", newrows); term_report_winsize(term, newrows, newcols); // Updating the terminal size will cause the snapshot to be cleared. // When not in terminal_loop() we need to restore it. if (term != in_terminal_loop) may_move_terminal_to_buffer(term, FALSE); } /* The cursor may have been moved when resizing. */ vterm_state_get_cursorpos(state, &pos); position_cursor(wp, &pos); for (pos.row = term->tl_dirty_row_start; pos.row < term->tl_dirty_row_end && pos.row < wp->w_height; ++pos.row) { if (pos.row < term->tl_rows) { int max_col = MIN(wp->w_width, term->tl_cols); term_line2screenline(screen, &pos, max_col); } else pos.col = 0; screen_line(wp->w_winrow + pos.row #ifdef FEAT_MENU + winbar_height(wp) #endif , wp->w_wincol, pos.col, wp->w_width, FALSE); } term->tl_dirty_row_start = MAX_ROW; term->tl_dirty_row_end = 0; } /* * Return TRUE if "wp" is a terminal window where the job has finished. */ int term_is_finished(buf_T *buf) { return buf->b_term != NULL && buf->b_term->tl_vterm == NULL; } /* * Return TRUE if "wp" is a terminal window where the job has finished or we * are in Terminal-Normal mode, thus we show the buffer contents. */ int term_show_buffer(buf_T *buf) { term_T *term = buf->b_term; return term != NULL && (term->tl_vterm == NULL || term->tl_normal_mode); } /* * The current buffer is going to be changed. If there is terminal * highlighting remove it now. */ void term_change_in_curbuf(void) { term_T *term = curbuf->b_term; if (term_is_finished(curbuf) && term->tl_scrollback.ga_len > 0) { free_scrollback(term); redraw_buf_later(term->tl_buffer, NOT_VALID); /* The buffer is now like a normal buffer, it cannot be easily * abandoned when changed. */ set_string_option_direct((char_u *)"buftype", -1, (char_u *)"", OPT_FREE|OPT_LOCAL, 0); } } /* * Get the screen attribute for a position in the buffer. * Use a negative "col" to get the filler background color. */ int term_get_attr(buf_T *buf, linenr_T lnum, int col) { term_T *term = buf->b_term; sb_line_T *line; cellattr_T *cellattr; if (lnum > term->tl_scrollback.ga_len) cellattr = &term->tl_default_color; else { line = (sb_line_T *)term->tl_scrollback.ga_data + lnum - 1; if (col < 0 || col >= line->sb_cols) cellattr = &line->sb_fill_attr; else cellattr = line->sb_cells + col; } return cell2attr(cellattr->attrs, cellattr->fg, cellattr->bg); } /* * Convert a cterm color number 0 - 255 to RGB. * This is compatible with xterm. */ static void cterm_color2vterm(int nr, VTermColor *rgb) { cterm_color2rgb(nr, &rgb->red, &rgb->green, &rgb->blue, &rgb->ansi_index); } /* * Initialize term->tl_default_color from the environment. */ static void init_default_colors(term_T *term) { VTermColor *fg, *bg; int fgval, bgval; int id; vim_memset(&term->tl_default_color.attrs, 0, sizeof(VTermScreenCellAttrs)); term->tl_default_color.width = 1; fg = &term->tl_default_color.fg; bg = &term->tl_default_color.bg; /* Vterm uses a default black background. Set it to white when * 'background' is "light". */ if (*p_bg == 'l') { fgval = 0; bgval = 255; } else { fgval = 255; bgval = 0; } fg->red = fg->green = fg->blue = fgval; bg->red = bg->green = bg->blue = bgval; fg->ansi_index = bg->ansi_index = VTERM_ANSI_INDEX_DEFAULT; /* The "Terminal" highlight group overrules the defaults. */ id = syn_name2id((char_u *)"Terminal"); /* Use the actual color for the GUI and when 'termguicolors' is set. */ #if defined(FEAT_GUI) || defined(FEAT_TERMGUICOLORS) if (0 # ifdef FEAT_GUI || gui.in_use # endif # ifdef FEAT_TERMGUICOLORS || p_tgc # ifdef FEAT_VTP /* Finally get INVALCOLOR on this execution path */ || (!p_tgc && t_colors >= 256) # endif # endif ) { guicolor_T fg_rgb = INVALCOLOR; guicolor_T bg_rgb = INVALCOLOR; if (id != 0) syn_id2colors(id, &fg_rgb, &bg_rgb); # ifdef FEAT_GUI if (gui.in_use) { if (fg_rgb == INVALCOLOR) fg_rgb = gui.norm_pixel; if (bg_rgb == INVALCOLOR) bg_rgb = gui.back_pixel; } # ifdef FEAT_TERMGUICOLORS else # endif # endif # ifdef FEAT_TERMGUICOLORS { if (fg_rgb == INVALCOLOR) fg_rgb = cterm_normal_fg_gui_color; if (bg_rgb == INVALCOLOR) bg_rgb = cterm_normal_bg_gui_color; } # endif if (fg_rgb != INVALCOLOR) { long_u rgb = GUI_MCH_GET_RGB(fg_rgb); fg->red = (unsigned)(rgb >> 16); fg->green = (unsigned)(rgb >> 8) & 255; fg->blue = (unsigned)rgb & 255; } if (bg_rgb != INVALCOLOR) { long_u rgb = GUI_MCH_GET_RGB(bg_rgb); bg->red = (unsigned)(rgb >> 16); bg->green = (unsigned)(rgb >> 8) & 255; bg->blue = (unsigned)rgb & 255; } } else #endif if (id != 0 && t_colors >= 16) { if (term_default_cterm_fg >= 0) cterm_color2vterm(term_default_cterm_fg, fg); if (term_default_cterm_bg >= 0) cterm_color2vterm(term_default_cterm_bg, bg); } else { #if defined(WIN3264) && !defined(FEAT_GUI_W32) int tmp; #endif /* In an MS-Windows console we know the normal colors. */ if (cterm_normal_fg_color > 0) { cterm_color2vterm(cterm_normal_fg_color - 1, fg); # if defined(WIN3264) && !defined(FEAT_GUI_W32) tmp = fg->red; fg->red = fg->blue; fg->blue = tmp; # endif } # ifdef FEAT_TERMRESPONSE else term_get_fg_color(&fg->red, &fg->green, &fg->blue); # endif if (cterm_normal_bg_color > 0) { cterm_color2vterm(cterm_normal_bg_color - 1, bg); # if defined(WIN3264) && !defined(FEAT_GUI_W32) tmp = bg->red; bg->red = bg->blue; bg->blue = tmp; # endif } # ifdef FEAT_TERMRESPONSE else term_get_bg_color(&bg->red, &bg->green, &bg->blue); # endif } } #if defined(FEAT_GUI) || defined(FEAT_TERMGUICOLORS) /* * Set the 16 ANSI colors from array of RGB values */ static void set_vterm_palette(VTerm *vterm, long_u *rgb) { int index = 0; VTermState *state = vterm_obtain_state(vterm); for (; index < 16; index++) { VTermColor color; color.red = (unsigned)(rgb[index] >> 16); color.green = (unsigned)(rgb[index] >> 8) & 255; color.blue = (unsigned)rgb[index] & 255; vterm_state_set_palette_color(state, index, &color); } } /* * Set the ANSI color palette from a list of colors */ static int set_ansi_colors_list(VTerm *vterm, list_T *list) { int n = 0; long_u rgb[16]; listitem_T *li = list->lv_first; for (; li != NULL && n < 16; li = li->li_next, n++) { char_u *color_name; guicolor_T guicolor; color_name = tv_get_string_chk(&li->li_tv); if (color_name == NULL) return FAIL; guicolor = GUI_GET_COLOR(color_name); if (guicolor == INVALCOLOR) return FAIL; rgb[n] = GUI_MCH_GET_RGB(guicolor); } if (n != 16 || li != NULL) return FAIL; set_vterm_palette(vterm, rgb); return OK; } /* * Initialize the ANSI color palette from g:terminal_ansi_colors[0:15] */ static void init_vterm_ansi_colors(VTerm *vterm) { dictitem_T *var = find_var((char_u *)"g:terminal_ansi_colors", NULL, TRUE); if (var != NULL && (var->di_tv.v_type != VAR_LIST || var->di_tv.vval.v_list == NULL || set_ansi_colors_list(vterm, var->di_tv.vval.v_list) == FAIL)) EMSG2(_(e_invarg2), "g:terminal_ansi_colors"); } #endif /* * Handles a "drop" command from the job in the terminal. * "item" is the file name, "item->li_next" may have options. */ static void handle_drop_command(listitem_T *item) { char_u *fname = tv_get_string(&item->li_tv); listitem_T *opt_item = item->li_next; int bufnr; win_T *wp; tabpage_T *tp; exarg_T ea; char_u *tofree = NULL; bufnr = buflist_add(fname, BLN_LISTED | BLN_NOOPT); FOR_ALL_TAB_WINDOWS(tp, wp) { if (wp->w_buffer->b_fnum == bufnr) { /* buffer is in a window already, go there */ goto_tabpage_win(tp, wp); return; } } vim_memset(&ea, 0, sizeof(ea)); if (opt_item != NULL && opt_item->li_tv.v_type == VAR_DICT && opt_item->li_tv.vval.v_dict != NULL) { dict_T *dict = opt_item->li_tv.vval.v_dict; char_u *p; p = dict_get_string(dict, (char_u *)"ff", FALSE); if (p == NULL) p = dict_get_string(dict, (char_u *)"fileformat", FALSE); if (p != NULL) { if (check_ff_value(p) == FAIL) ch_log(NULL, "Invalid ff argument to drop: %s", p); else ea.force_ff = *p; } p = dict_get_string(dict, (char_u *)"enc", FALSE); if (p == NULL) p = dict_get_string(dict, (char_u *)"encoding", FALSE); if (p != NULL) { ea.cmd = alloc((int)STRLEN(p) + 12); if (ea.cmd != NULL) { sprintf((char *)ea.cmd, "sbuf ++enc=%s", p); ea.force_enc = 11; tofree = ea.cmd; } } p = dict_get_string(dict, (char_u *)"bad", FALSE); if (p != NULL) get_bad_opt(p, &ea); if (dict_find(dict, (char_u *)"bin", -1) != NULL) ea.force_bin = FORCE_BIN; if (dict_find(dict, (char_u *)"binary", -1) != NULL) ea.force_bin = FORCE_BIN; if (dict_find(dict, (char_u *)"nobin", -1) != NULL) ea.force_bin = FORCE_NOBIN; if (dict_find(dict, (char_u *)"nobinary", -1) != NULL) ea.force_bin = FORCE_NOBIN; } /* open in new window, like ":split fname" */ if (ea.cmd == NULL) ea.cmd = (char_u *)"split"; ea.arg = fname; ea.cmdidx = CMD_split; ex_splitview(&ea); vim_free(tofree); } /* * Handles a function call from the job running in a terminal. * "item" is the function name, "item->li_next" has the arguments. */ static void handle_call_command(term_T *term, channel_T *channel, listitem_T *item) { char_u *func; typval_T argvars[2]; typval_T rettv; int doesrange; if (item->li_next == NULL) { ch_log(channel, "Missing function arguments for call"); return; } func = tv_get_string(&item->li_tv); if (STRNCMP(func, "Tapi_", 5) != 0) { ch_log(channel, "Invalid function name: %s", func); return; } argvars[0].v_type = VAR_NUMBER; argvars[0].vval.v_number = term->tl_buffer->b_fnum; argvars[1] = item->li_next->li_tv; if (call_func(func, (int)STRLEN(func), &rettv, 2, argvars, /* argv_func */ NULL, /* firstline */ 1, /* lastline */ 1, &doesrange, /* evaluate */ TRUE, /* partial */ NULL, /* selfdict */ NULL) == OK) { clear_tv(&rettv); ch_log(channel, "Function %s called", func); } else ch_log(channel, "Calling function %s failed", func); } /* * Called by libvterm when it cannot recognize an OSC sequence. * We recognize a terminal API command. */ static int parse_osc(const char *command, size_t cmdlen, void *user) { term_T *term = (term_T *)user; js_read_T reader; typval_T tv; channel_T *channel = term->tl_job == NULL ? NULL : term->tl_job->jv_channel; /* We recognize only OSC 5 1 ; {command} */ if (cmdlen < 3 || STRNCMP(command, "51;", 3) != 0) return 0; /* not handled */ reader.js_buf = vim_strnsave((char_u *)command + 3, (int)(cmdlen - 3)); if (reader.js_buf == NULL) return 1; reader.js_fill = NULL; reader.js_used = 0; if (json_decode(&reader, &tv, 0) == OK && tv.v_type == VAR_LIST && tv.vval.v_list != NULL) { listitem_T *item = tv.vval.v_list->lv_first; if (item == NULL) ch_log(channel, "Missing command"); else { char_u *cmd = tv_get_string(&item->li_tv); /* Make sure an invoked command doesn't delete the buffer (and the * terminal) under our fingers. */ ++term->tl_buffer->b_locked; item = item->li_next; if (item == NULL) ch_log(channel, "Missing argument for %s", cmd); else if (STRCMP(cmd, "drop") == 0) handle_drop_command(item); else if (STRCMP(cmd, "call") == 0) handle_call_command(term, channel, item); else ch_log(channel, "Invalid command received: %s", cmd); --term->tl_buffer->b_locked; } } else ch_log(channel, "Invalid JSON received"); vim_free(reader.js_buf); clear_tv(&tv); return 1; } static VTermParserCallbacks parser_fallbacks = { NULL, /* text */ NULL, /* control */ NULL, /* escape */ NULL, /* csi */ parse_osc, /* osc */ NULL, /* dcs */ NULL /* resize */ }; /* * Use Vim's allocation functions for vterm so profiling works. */ static void * vterm_malloc(size_t size, void *data UNUSED) { return alloc_clear((unsigned) size); } static void vterm_memfree(void *ptr, void *data UNUSED) { vim_free(ptr); } static VTermAllocatorFunctions vterm_allocator = { &vterm_malloc, &vterm_memfree }; /* * Create a new vterm and initialize it. * Return FAIL when out of memory. */ static int create_vterm(term_T *term, int rows, int cols) { VTerm *vterm; VTermScreen *screen; VTermState *state; VTermValue value; vterm = vterm_new_with_allocator(rows, cols, &vterm_allocator, NULL); term->tl_vterm = vterm; if (vterm == NULL) return FAIL; // Allocate screen and state here, so we can bail out if that fails. state = vterm_obtain_state(vterm); screen = vterm_obtain_screen(vterm); if (state == NULL || screen == NULL) { vterm_free(vterm); return FAIL; } vterm_screen_set_callbacks(screen, &screen_callbacks, term); /* TODO: depends on 'encoding'. */ vterm_set_utf8(vterm, 1); init_default_colors(term); vterm_state_set_default_colors( state, &term->tl_default_color.fg, &term->tl_default_color.bg); if (t_colors >= 16) vterm_state_set_bold_highbright(vterm_obtain_state(vterm), 1); /* Required to initialize most things. */ vterm_screen_reset(screen, 1 /* hard */); /* Allow using alternate screen. */ vterm_screen_enable_altscreen(screen, 1); /* For unix do not use a blinking cursor. In an xterm this causes the * cursor to blink if it's blinking in the xterm. * For Windows we respect the system wide setting. */ #ifdef WIN3264 if (GetCaretBlinkTime() == INFINITE) value.boolean = 0; else value.boolean = 1; #else value.boolean = 0; #endif vterm_state_set_termprop(state, VTERM_PROP_CURSORBLINK, &value); vterm_state_set_unrecognised_fallbacks(state, &parser_fallbacks, term); return OK; } /* * Return the text to show for the buffer name and status. */ char_u * term_get_status_text(term_T *term) { if (term->tl_status_text == NULL) { char_u *txt; size_t len; if (term->tl_normal_mode) { if (term_job_running(term)) txt = (char_u *)_("Terminal"); else txt = (char_u *)_("Terminal-finished"); } else if (term->tl_title != NULL) txt = term->tl_title; else if (term_none_open(term)) txt = (char_u *)_("active"); else if (term_job_running(term)) txt = (char_u *)_("running"); else txt = (char_u *)_("finished"); len = 9 + STRLEN(term->tl_buffer->b_fname) + STRLEN(txt); term->tl_status_text = alloc((int)len); if (term->tl_status_text != NULL) vim_snprintf((char *)term->tl_status_text, len, "%s [%s]", term->tl_buffer->b_fname, txt); } return term->tl_status_text; } /* * Mark references in jobs of terminals. */ int set_ref_in_term(int copyID) { int abort = FALSE; term_T *term; typval_T tv; for (term = first_term; term != NULL; term = term->tl_next) if (term->tl_job != NULL) { tv.v_type = VAR_JOB; tv.vval.v_job = term->tl_job; abort = abort || set_ref_in_item(&tv, copyID, NULL, NULL); } return abort; } /* * Cache "Terminal" highlight group colors. */ void set_terminal_default_colors(int cterm_fg, int cterm_bg) { term_default_cterm_fg = cterm_fg - 1; term_default_cterm_bg = cterm_bg - 1; } /* * Get the buffer from the first argument in "argvars". * Returns NULL when the buffer is not for a terminal window and logs a message * with "where". */ static buf_T * term_get_buf(typval_T *argvars, char *where) { buf_T *buf; (void)tv_get_number(&argvars[0]); /* issue errmsg if type error */ ++emsg_off; buf = get_buf_tv(&argvars[0], FALSE); --emsg_off; if (buf == NULL || buf->b_term == NULL) { ch_log(NULL, "%s: invalid buffer argument", where); return NULL; } return buf; } static int same_color(VTermColor *a, VTermColor *b) { return a->red == b->red && a->green == b->green && a->blue == b->blue && a->ansi_index == b->ansi_index; } static void dump_term_color(FILE *fd, VTermColor *color) { fprintf(fd, "%02x%02x%02x%d", (int)color->red, (int)color->green, (int)color->blue, (int)color->ansi_index); } /* * "term_dumpwrite(buf, filename, options)" function * * Each screen cell in full is: * |{characters}+{attributes}#{fg-color}{color-idx}#{bg-color}{color-idx} * {characters} is a space for an empty cell * For a double-width character "+" is changed to "*" and the next cell is * skipped. * {attributes} is the decimal value of HL_BOLD + HL_UNDERLINE, etc. * when "&" use the same as the previous cell. * {fg-color} is hex RGB, when "&" use the same as the previous cell. * {bg-color} is hex RGB, when "&" use the same as the previous cell. * {color-idx} is a number from 0 to 255 * * Screen cell with same width, attributes and color as the previous one: * |{characters} * * To use the color of the previous cell, use "&" instead of {color}-{idx}. * * Repeating the previous screen cell: * @{count} */ void f_term_dumpwrite(typval_T *argvars, typval_T *rettv UNUSED) { buf_T *buf = term_get_buf(argvars, "term_dumpwrite()"); term_T *term; char_u *fname; int max_height = 0; int max_width = 0; stat_T st; FILE *fd; VTermPos pos; VTermScreen *screen; VTermScreenCell prev_cell; VTermState *state; VTermPos cursor_pos; if (check_restricted() || check_secure()) return; if (buf == NULL) return; term = buf->b_term; if (term->tl_vterm == NULL) { EMSG(_("E958: Job already finished")); return; } if (argvars[2].v_type != VAR_UNKNOWN) { dict_T *d; if (argvars[2].v_type != VAR_DICT) { EMSG(_(e_dictreq)); return; } d = argvars[2].vval.v_dict; if (d != NULL) { max_height = dict_get_number(d, (char_u *)"rows"); max_width = dict_get_number(d, (char_u *)"columns"); } } fname = tv_get_string_chk(&argvars[1]); if (fname == NULL) return; if (mch_stat((char *)fname, &st) >= 0) { EMSG2(_("E953: File exists: %s"), fname); return; } if (*fname == NUL || (fd = mch_fopen((char *)fname, WRITEBIN)) == NULL) { EMSG2(_(e_notcreate), *fname == NUL ? (char_u *)_("<empty>") : fname); return; } vim_memset(&prev_cell, 0, sizeof(prev_cell)); screen = vterm_obtain_screen(term->tl_vterm); state = vterm_obtain_state(term->tl_vterm); vterm_state_get_cursorpos(state, &cursor_pos); for (pos.row = 0; (max_height == 0 || pos.row < max_height) && pos.row < term->tl_rows; ++pos.row) { int repeat = 0; for (pos.col = 0; (max_width == 0 || pos.col < max_width) && pos.col < term->tl_cols; ++pos.col) { VTermScreenCell cell; int same_attr; int same_chars = TRUE; int i; int is_cursor_pos = (pos.col == cursor_pos.col && pos.row == cursor_pos.row); if (vterm_screen_get_cell(screen, pos, &cell) == 0) vim_memset(&cell, 0, sizeof(cell)); for (i = 0; i < VTERM_MAX_CHARS_PER_CELL; ++i) { int c = cell.chars[i]; int pc = prev_cell.chars[i]; /* For the first character NUL is the same as space. */ if (i == 0) { c = (c == NUL) ? ' ' : c; pc = (pc == NUL) ? ' ' : pc; } if (c != pc) same_chars = FALSE; if (c == NUL || pc == NUL) break; } same_attr = vtermAttr2hl(cell.attrs) == vtermAttr2hl(prev_cell.attrs) && same_color(&cell.fg, &prev_cell.fg) && same_color(&cell.bg, &prev_cell.bg); if (same_chars && cell.width == prev_cell.width && same_attr && !is_cursor_pos) { ++repeat; } else { if (repeat > 0) { fprintf(fd, "@%d", repeat); repeat = 0; } fputs(is_cursor_pos ? ">" : "|", fd); if (cell.chars[0] == NUL) fputs(" ", fd); else { char_u charbuf[10]; int len; for (i = 0; i < VTERM_MAX_CHARS_PER_CELL && cell.chars[i] != NUL; ++i) { len = utf_char2bytes(cell.chars[i], charbuf); fwrite(charbuf, len, 1, fd); } } /* When only the characters differ we don't write anything, the * following "|", "@" or NL will indicate using the same * attributes. */ if (cell.width != prev_cell.width || !same_attr) { if (cell.width == 2) { fputs("*", fd); ++pos.col; } else fputs("+", fd); if (same_attr) { fputs("&", fd); } else { fprintf(fd, "%d", vtermAttr2hl(cell.attrs)); if (same_color(&cell.fg, &prev_cell.fg)) fputs("&", fd); else { fputs("#", fd); dump_term_color(fd, &cell.fg); } if (same_color(&cell.bg, &prev_cell.bg)) fputs("&", fd); else { fputs("#", fd); dump_term_color(fd, &cell.bg); } } } prev_cell = cell; } } if (repeat > 0) fprintf(fd, "@%d", repeat); fputs("\n", fd); } fclose(fd); } /* * Called when a dump is corrupted. Put a breakpoint here when debugging. */ static void dump_is_corrupt(garray_T *gap) { ga_concat(gap, (char_u *)"CORRUPT"); } static void append_cell(garray_T *gap, cellattr_T *cell) { if (ga_grow(gap, 1) == OK) { *(((cellattr_T *)gap->ga_data) + gap->ga_len) = *cell; ++gap->ga_len; } } /* * Read the dump file from "fd" and append lines to the current buffer. * Return the cell width of the longest line. */ static int read_dump_file(FILE *fd, VTermPos *cursor_pos) { int c; garray_T ga_text; garray_T ga_cell; char_u *prev_char = NULL; int attr = 0; cellattr_T cell; term_T *term = curbuf->b_term; int max_cells = 0; int start_row = term->tl_scrollback.ga_len; ga_init2(&ga_text, 1, 90); ga_init2(&ga_cell, sizeof(cellattr_T), 90); vim_memset(&cell, 0, sizeof(cell)); cursor_pos->row = -1; cursor_pos->col = -1; c = fgetc(fd); for (;;) { if (c == EOF) break; if (c == '\r') { // DOS line endings? Ignore. c = fgetc(fd); } else if (c == '\n') { /* End of a line: append it to the buffer. */ if (ga_text.ga_data == NULL) dump_is_corrupt(&ga_text); if (ga_grow(&term->tl_scrollback, 1) == OK) { sb_line_T *line = (sb_line_T *)term->tl_scrollback.ga_data + term->tl_scrollback.ga_len; if (max_cells < ga_cell.ga_len) max_cells = ga_cell.ga_len; line->sb_cols = ga_cell.ga_len; line->sb_cells = ga_cell.ga_data; line->sb_fill_attr = term->tl_default_color; ++term->tl_scrollback.ga_len; ga_init(&ga_cell); ga_append(&ga_text, NUL); ml_append(curbuf->b_ml.ml_line_count, ga_text.ga_data, ga_text.ga_len, FALSE); } else ga_clear(&ga_cell); ga_text.ga_len = 0; c = fgetc(fd); } else if (c == '|' || c == '>') { int prev_len = ga_text.ga_len; if (c == '>') { if (cursor_pos->row != -1) dump_is_corrupt(&ga_text); /* duplicate cursor */ cursor_pos->row = term->tl_scrollback.ga_len - start_row; cursor_pos->col = ga_cell.ga_len; } /* normal character(s) followed by "+", "*", "|", "@" or NL */ c = fgetc(fd); if (c != EOF) ga_append(&ga_text, c); for (;;) { c = fgetc(fd); if (c == '+' || c == '*' || c == '|' || c == '>' || c == '@' || c == EOF || c == '\n') break; ga_append(&ga_text, c); } /* save the character for repeating it */ vim_free(prev_char); if (ga_text.ga_data != NULL) prev_char = vim_strnsave(((char_u *)ga_text.ga_data) + prev_len, ga_text.ga_len - prev_len); if (c == '@' || c == '|' || c == '>' || c == '\n') { /* use all attributes from previous cell */ } else if (c == '+' || c == '*') { int is_bg; cell.width = c == '+' ? 1 : 2; c = fgetc(fd); if (c == '&') { /* use same attr as previous cell */ c = fgetc(fd); } else if (isdigit(c)) { /* get the decimal attribute */ attr = 0; while (isdigit(c)) { attr = attr * 10 + (c - '0'); c = fgetc(fd); } hl2vtermAttr(attr, &cell); } else dump_is_corrupt(&ga_text); /* is_bg == 0: fg, is_bg == 1: bg */ for (is_bg = 0; is_bg <= 1; ++is_bg) { if (c == '&') { /* use same color as previous cell */ c = fgetc(fd); } else if (c == '#') { int red, green, blue, index = 0; c = fgetc(fd); red = hex2nr(c); c = fgetc(fd); red = (red << 4) + hex2nr(c); c = fgetc(fd); green = hex2nr(c); c = fgetc(fd); green = (green << 4) + hex2nr(c); c = fgetc(fd); blue = hex2nr(c); c = fgetc(fd); blue = (blue << 4) + hex2nr(c); c = fgetc(fd); if (!isdigit(c)) dump_is_corrupt(&ga_text); while (isdigit(c)) { index = index * 10 + (c - '0'); c = fgetc(fd); } if (is_bg) { cell.bg.red = red; cell.bg.green = green; cell.bg.blue = blue; cell.bg.ansi_index = index; } else { cell.fg.red = red; cell.fg.green = green; cell.fg.blue = blue; cell.fg.ansi_index = index; } } else dump_is_corrupt(&ga_text); } } else dump_is_corrupt(&ga_text); append_cell(&ga_cell, &cell); } else if (c == '@') { if (prev_char == NULL) dump_is_corrupt(&ga_text); else { int count = 0; /* repeat previous character, get the count */ for (;;) { c = fgetc(fd); if (!isdigit(c)) break; count = count * 10 + (c - '0'); } while (count-- > 0) { ga_concat(&ga_text, prev_char); append_cell(&ga_cell, &cell); } } } else { dump_is_corrupt(&ga_text); c = fgetc(fd); } } if (ga_text.ga_len > 0) { /* trailing characters after last NL */ dump_is_corrupt(&ga_text); ga_append(&ga_text, NUL); ml_append(curbuf->b_ml.ml_line_count, ga_text.ga_data, ga_text.ga_len, FALSE); } ga_clear(&ga_text); vim_free(prev_char); return max_cells; } /* * Return an allocated string with at least "text_width" "=" characters and * "fname" inserted in the middle. */ static char_u * get_separator(int text_width, char_u *fname) { int width = MAX(text_width, curwin->w_width); char_u *textline; int fname_size; char_u *p = fname; int i; size_t off; textline = alloc(width + (int)STRLEN(fname) + 1); if (textline == NULL) return NULL; fname_size = vim_strsize(fname); if (fname_size < width - 8) { /* enough room, don't use the full window width */ width = MAX(text_width, fname_size + 8); } else if (fname_size > width - 8) { /* full name doesn't fit, use only the tail */ p = gettail(fname); fname_size = vim_strsize(p); } /* skip characters until the name fits */ while (fname_size > width - 8) { p += (*mb_ptr2len)(p); fname_size = vim_strsize(p); } for (i = 0; i < (width - fname_size) / 2 - 1; ++i) textline[i] = '='; textline[i++] = ' '; STRCPY(textline + i, p); off = STRLEN(textline); textline[off] = ' '; for (i = 1; i < (width - fname_size) / 2; ++i) textline[off + i] = '='; textline[off + i] = NUL; return textline; } /* * Common for "term_dumpdiff()" and "term_dumpload()". */ static void term_load_dump(typval_T *argvars, typval_T *rettv, int do_diff) { jobopt_T opt; buf_T *buf; char_u buf1[NUMBUFLEN]; char_u buf2[NUMBUFLEN]; char_u *fname1; char_u *fname2 = NULL; char_u *fname_tofree = NULL; FILE *fd1; FILE *fd2 = NULL; char_u *textline = NULL; /* First open the files. If this fails bail out. */ fname1 = tv_get_string_buf_chk(&argvars[0], buf1); if (do_diff) fname2 = tv_get_string_buf_chk(&argvars[1], buf2); if (fname1 == NULL || (do_diff && fname2 == NULL)) { EMSG(_(e_invarg)); return; } fd1 = mch_fopen((char *)fname1, READBIN); if (fd1 == NULL) { EMSG2(_(e_notread), fname1); return; } if (do_diff) { fd2 = mch_fopen((char *)fname2, READBIN); if (fd2 == NULL) { fclose(fd1); EMSG2(_(e_notread), fname2); return; } } init_job_options(&opt); if (argvars[do_diff ? 2 : 1].v_type != VAR_UNKNOWN && get_job_options(&argvars[do_diff ? 2 : 1], &opt, 0, JO2_TERM_NAME + JO2_TERM_COLS + JO2_TERM_ROWS + JO2_VERTICAL + JO2_CURWIN + JO2_NORESTORE) == FAIL) goto theend; if (opt.jo_term_name == NULL) { size_t len = STRLEN(fname1) + 12; fname_tofree = alloc((int)len); if (fname_tofree != NULL) { vim_snprintf((char *)fname_tofree, len, "dump diff %s", fname1); opt.jo_term_name = fname_tofree; } } buf = term_start(&argvars[0], NULL, &opt, TERM_START_NOJOB); if (buf != NULL && buf->b_term != NULL) { int i; linenr_T bot_lnum; linenr_T lnum; term_T *term = buf->b_term; int width; int width2; VTermPos cursor_pos1; VTermPos cursor_pos2; init_default_colors(term); rettv->vval.v_number = buf->b_fnum; /* read the files, fill the buffer with the diff */ width = read_dump_file(fd1, &cursor_pos1); /* position the cursor */ if (cursor_pos1.row >= 0) { curwin->w_cursor.lnum = cursor_pos1.row + 1; coladvance(cursor_pos1.col); } /* Delete the empty line that was in the empty buffer. */ ml_delete(1, FALSE); /* For term_dumpload() we are done here. */ if (!do_diff) goto theend; term->tl_top_diff_rows = curbuf->b_ml.ml_line_count; textline = get_separator(width, fname1); if (textline == NULL) goto theend; if (add_empty_scrollback(term, &term->tl_default_color, 0) == OK) ml_append(curbuf->b_ml.ml_line_count, textline, 0, FALSE); vim_free(textline); textline = get_separator(width, fname2); if (textline == NULL) goto theend; if (add_empty_scrollback(term, &term->tl_default_color, 0) == OK) ml_append(curbuf->b_ml.ml_line_count, textline, 0, FALSE); textline[width] = NUL; bot_lnum = curbuf->b_ml.ml_line_count; width2 = read_dump_file(fd2, &cursor_pos2); if (width2 > width) { vim_free(textline); textline = alloc(width2 + 1); if (textline == NULL) goto theend; width = width2; textline[width] = NUL; } term->tl_bot_diff_rows = curbuf->b_ml.ml_line_count - bot_lnum; for (lnum = 1; lnum <= term->tl_top_diff_rows; ++lnum) { if (lnum + bot_lnum > curbuf->b_ml.ml_line_count) { /* bottom part has fewer rows, fill with "-" */ for (i = 0; i < width; ++i) textline[i] = '-'; } else { char_u *line1; char_u *line2; char_u *p1; char_u *p2; int col; sb_line_T *sb_line = (sb_line_T *)term->tl_scrollback.ga_data; cellattr_T *cellattr1 = (sb_line + lnum - 1)->sb_cells; cellattr_T *cellattr2 = (sb_line + lnum + bot_lnum - 1) ->sb_cells; /* Make a copy, getting the second line will invalidate it. */ line1 = vim_strsave(ml_get(lnum)); if (line1 == NULL) break; p1 = line1; line2 = ml_get(lnum + bot_lnum); p2 = line2; for (col = 0; col < width && *p1 != NUL && *p2 != NUL; ++col) { int len1 = utfc_ptr2len(p1); int len2 = utfc_ptr2len(p2); textline[col] = ' '; if (len1 != len2 || STRNCMP(p1, p2, len1) != 0) /* text differs */ textline[col] = 'X'; else if (lnum == cursor_pos1.row + 1 && col == cursor_pos1.col && (cursor_pos1.row != cursor_pos2.row || cursor_pos1.col != cursor_pos2.col)) /* cursor in first but not in second */ textline[col] = '>'; else if (lnum == cursor_pos2.row + 1 && col == cursor_pos2.col && (cursor_pos1.row != cursor_pos2.row || cursor_pos1.col != cursor_pos2.col)) /* cursor in second but not in first */ textline[col] = '<'; else if (cellattr1 != NULL && cellattr2 != NULL) { if ((cellattr1 + col)->width != (cellattr2 + col)->width) textline[col] = 'w'; else if (!same_color(&(cellattr1 + col)->fg, &(cellattr2 + col)->fg)) textline[col] = 'f'; else if (!same_color(&(cellattr1 + col)->bg, &(cellattr2 + col)->bg)) textline[col] = 'b'; else if (vtermAttr2hl((cellattr1 + col)->attrs) != vtermAttr2hl(((cellattr2 + col)->attrs))) textline[col] = 'a'; } p1 += len1; p2 += len2; /* TODO: handle different width */ } vim_free(line1); while (col < width) { if (*p1 == NUL && *p2 == NUL) textline[col] = '?'; else if (*p1 == NUL) { textline[col] = '+'; p2 += utfc_ptr2len(p2); } else { textline[col] = '-'; p1 += utfc_ptr2len(p1); } ++col; } } if (add_empty_scrollback(term, &term->tl_default_color, term->tl_top_diff_rows) == OK) ml_append(term->tl_top_diff_rows + lnum, textline, 0, FALSE); ++bot_lnum; } while (lnum + bot_lnum <= curbuf->b_ml.ml_line_count) { /* bottom part has more rows, fill with "+" */ for (i = 0; i < width; ++i) textline[i] = '+'; if (add_empty_scrollback(term, &term->tl_default_color, term->tl_top_diff_rows) == OK) ml_append(term->tl_top_diff_rows + lnum, textline, 0, FALSE); ++lnum; ++bot_lnum; } term->tl_cols = width; /* looks better without wrapping */ curwin->w_p_wrap = 0; } theend: vim_free(textline); vim_free(fname_tofree); fclose(fd1); if (fd2 != NULL) fclose(fd2); } /* * If the current buffer shows the output of term_dumpdiff(), swap the top and * bottom files. * Return FAIL when this is not possible. */ int term_swap_diff() { term_T *term = curbuf->b_term; linenr_T line_count; linenr_T top_rows; linenr_T bot_rows; linenr_T bot_start; linenr_T lnum; char_u *p; sb_line_T *sb_line; if (term == NULL || !term_is_finished(curbuf) || term->tl_top_diff_rows == 0 || term->tl_scrollback.ga_len == 0) return FAIL; line_count = curbuf->b_ml.ml_line_count; top_rows = term->tl_top_diff_rows; bot_rows = term->tl_bot_diff_rows; bot_start = line_count - bot_rows; sb_line = (sb_line_T *)term->tl_scrollback.ga_data; /* move lines from top to above the bottom part */ for (lnum = 1; lnum <= top_rows; ++lnum) { p = vim_strsave(ml_get(1)); if (p == NULL) return OK; ml_append(bot_start, p, 0, FALSE); ml_delete(1, FALSE); vim_free(p); } /* move lines from bottom to the top */ for (lnum = 1; lnum <= bot_rows; ++lnum) { p = vim_strsave(ml_get(bot_start + lnum)); if (p == NULL) return OK; ml_delete(bot_start + lnum, FALSE); ml_append(lnum - 1, p, 0, FALSE); vim_free(p); } if (top_rows == bot_rows) { /* rows counts are equal, can swap cell properties */ for (lnum = 0; lnum < top_rows; ++lnum) { sb_line_T temp; temp = *(sb_line + lnum); *(sb_line + lnum) = *(sb_line + bot_start + lnum); *(sb_line + bot_start + lnum) = temp; } } else { size_t size = sizeof(sb_line_T) * term->tl_scrollback.ga_len; sb_line_T *temp = (sb_line_T *)alloc((int)size); /* need to copy cell properties into temp memory */ if (temp != NULL) { mch_memmove(temp, term->tl_scrollback.ga_data, size); mch_memmove(term->tl_scrollback.ga_data, temp + bot_start, sizeof(sb_line_T) * bot_rows); mch_memmove((sb_line_T *)term->tl_scrollback.ga_data + bot_rows, temp + top_rows, sizeof(sb_line_T) * (line_count - top_rows - bot_rows)); mch_memmove((sb_line_T *)term->tl_scrollback.ga_data + line_count - top_rows, temp, sizeof(sb_line_T) * top_rows); vim_free(temp); } } term->tl_top_diff_rows = bot_rows; term->tl_bot_diff_rows = top_rows; update_screen(NOT_VALID); return OK; } /* * "term_dumpdiff(filename, filename, options)" function */ void f_term_dumpdiff(typval_T *argvars, typval_T *rettv) { term_load_dump(argvars, rettv, TRUE); } /* * "term_dumpload(filename, options)" function */ void f_term_dumpload(typval_T *argvars, typval_T *rettv) { term_load_dump(argvars, rettv, FALSE); } /* * "term_getaltscreen(buf)" function */ void f_term_getaltscreen(typval_T *argvars, typval_T *rettv) { buf_T *buf = term_get_buf(argvars, "term_getaltscreen()"); if (buf == NULL) return; rettv->vval.v_number = buf->b_term->tl_using_altscreen; } /* * "term_getattr(attr, name)" function */ void f_term_getattr(typval_T *argvars, typval_T *rettv) { int attr; size_t i; char_u *name; static struct { char *name; int attr; } attrs[] = { {"bold", HL_BOLD}, {"italic", HL_ITALIC}, {"underline", HL_UNDERLINE}, {"strike", HL_STRIKETHROUGH}, {"reverse", HL_INVERSE}, }; attr = tv_get_number(&argvars[0]); name = tv_get_string_chk(&argvars[1]); if (name == NULL) return; for (i = 0; i < sizeof(attrs)/sizeof(attrs[0]); ++i) if (STRCMP(name, attrs[i].name) == 0) { rettv->vval.v_number = (attr & attrs[i].attr) != 0 ? 1 : 0; break; } } /* * "term_getcursor(buf)" function */ void f_term_getcursor(typval_T *argvars, typval_T *rettv) { buf_T *buf = term_get_buf(argvars, "term_getcursor()"); term_T *term; list_T *l; dict_T *d; if (rettv_list_alloc(rettv) == FAIL) return; if (buf == NULL) return; term = buf->b_term; l = rettv->vval.v_list; list_append_number(l, term->tl_cursor_pos.row + 1); list_append_number(l, term->tl_cursor_pos.col + 1); d = dict_alloc(); if (d != NULL) { dict_add_number(d, "visible", term->tl_cursor_visible); dict_add_number(d, "blink", blink_state_is_inverted() ? !term->tl_cursor_blink : term->tl_cursor_blink); dict_add_number(d, "shape", term->tl_cursor_shape); dict_add_string(d, "color", cursor_color_get(term->tl_cursor_color)); list_append_dict(l, d); } } /* * "term_getjob(buf)" function */ void f_term_getjob(typval_T *argvars, typval_T *rettv) { buf_T *buf = term_get_buf(argvars, "term_getjob()"); if (buf == NULL) { rettv->v_type = VAR_SPECIAL; rettv->vval.v_number = VVAL_NULL; return; } rettv->v_type = VAR_JOB; rettv->vval.v_job = buf->b_term->tl_job; if (rettv->vval.v_job != NULL) ++rettv->vval.v_job->jv_refcount; } static int get_row_number(typval_T *tv, term_T *term) { if (tv->v_type == VAR_STRING && tv->vval.v_string != NULL && STRCMP(tv->vval.v_string, ".") == 0) return term->tl_cursor_pos.row; return (int)tv_get_number(tv) - 1; } /* * "term_getline(buf, row)" function */ void f_term_getline(typval_T *argvars, typval_T *rettv) { buf_T *buf = term_get_buf(argvars, "term_getline()"); term_T *term; int row; rettv->v_type = VAR_STRING; if (buf == NULL) return; term = buf->b_term; row = get_row_number(&argvars[1], term); if (term->tl_vterm == NULL) { linenr_T lnum = row + term->tl_scrollback_scrolled + 1; /* vterm is finished, get the text from the buffer */ if (lnum > 0 && lnum <= buf->b_ml.ml_line_count) rettv->vval.v_string = vim_strsave(ml_get_buf(buf, lnum, FALSE)); } else { VTermScreen *screen = vterm_obtain_screen(term->tl_vterm); VTermRect rect; int len; char_u *p; if (row < 0 || row >= term->tl_rows) return; len = term->tl_cols * MB_MAXBYTES + 1; p = alloc(len); if (p == NULL) return; rettv->vval.v_string = p; rect.start_col = 0; rect.end_col = term->tl_cols; rect.start_row = row; rect.end_row = row + 1; p[vterm_screen_get_text(screen, (char *)p, len, rect)] = NUL; } } /* * "term_getscrolled(buf)" function */ void f_term_getscrolled(typval_T *argvars, typval_T *rettv) { buf_T *buf = term_get_buf(argvars, "term_getscrolled()"); if (buf == NULL) return; rettv->vval.v_number = buf->b_term->tl_scrollback_scrolled; } /* * "term_getsize(buf)" function */ void f_term_getsize(typval_T *argvars, typval_T *rettv) { buf_T *buf = term_get_buf(argvars, "term_getsize()"); list_T *l; if (rettv_list_alloc(rettv) == FAIL) return; if (buf == NULL) return; l = rettv->vval.v_list; list_append_number(l, buf->b_term->tl_rows); list_append_number(l, buf->b_term->tl_cols); } /* * "term_setsize(buf, rows, cols)" function */ void f_term_setsize(typval_T *argvars UNUSED, typval_T *rettv UNUSED) { buf_T *buf = term_get_buf(argvars, "term_setsize()"); term_T *term; varnumber_T rows, cols; if (buf == NULL) { EMSG(_("E955: Not a terminal buffer")); return; } if (buf->b_term->tl_vterm == NULL) return; term = buf->b_term; rows = tv_get_number(&argvars[1]); rows = rows <= 0 ? term->tl_rows : rows; cols = tv_get_number(&argvars[2]); cols = cols <= 0 ? term->tl_cols : cols; vterm_set_size(term->tl_vterm, rows, cols); /* handle_resize() will resize the windows */ /* Get and remember the size we ended up with. Update the pty. */ vterm_get_size(term->tl_vterm, &term->tl_rows, &term->tl_cols); term_report_winsize(term, term->tl_rows, term->tl_cols); } /* * "term_getstatus(buf)" function */ void f_term_getstatus(typval_T *argvars, typval_T *rettv) { buf_T *buf = term_get_buf(argvars, "term_getstatus()"); term_T *term; char_u val[100]; rettv->v_type = VAR_STRING; if (buf == NULL) return; term = buf->b_term; if (term_job_running(term)) STRCPY(val, "running"); else STRCPY(val, "finished"); if (term->tl_normal_mode) STRCAT(val, ",normal"); rettv->vval.v_string = vim_strsave(val); } /* * "term_gettitle(buf)" function */ void f_term_gettitle(typval_T *argvars, typval_T *rettv) { buf_T *buf = term_get_buf(argvars, "term_gettitle()"); rettv->v_type = VAR_STRING; if (buf == NULL) return; if (buf->b_term->tl_title != NULL) rettv->vval.v_string = vim_strsave(buf->b_term->tl_title); } /* * "term_gettty(buf)" function */ void f_term_gettty(typval_T *argvars, typval_T *rettv) { buf_T *buf = term_get_buf(argvars, "term_gettty()"); char_u *p = NULL; int num = 0; rettv->v_type = VAR_STRING; if (buf == NULL) return; if (argvars[1].v_type != VAR_UNKNOWN) num = tv_get_number(&argvars[1]); switch (num) { case 0: if (buf->b_term->tl_job != NULL) p = buf->b_term->tl_job->jv_tty_out; break; case 1: if (buf->b_term->tl_job != NULL) p = buf->b_term->tl_job->jv_tty_in; break; default: EMSG2(_(e_invarg2), tv_get_string(&argvars[1])); return; } if (p != NULL) rettv->vval.v_string = vim_strsave(p); } /* * "term_list()" function */ void f_term_list(typval_T *argvars UNUSED, typval_T *rettv) { term_T *tp; list_T *l; if (rettv_list_alloc(rettv) == FAIL || first_term == NULL) return; l = rettv->vval.v_list; for (tp = first_term; tp != NULL; tp = tp->tl_next) if (tp != NULL && tp->tl_buffer != NULL) if (list_append_number(l, (varnumber_T)tp->tl_buffer->b_fnum) == FAIL) return; } /* * "term_scrape(buf, row)" function */ void f_term_scrape(typval_T *argvars, typval_T *rettv) { buf_T *buf = term_get_buf(argvars, "term_scrape()"); VTermScreen *screen = NULL; VTermPos pos; list_T *l; term_T *term; char_u *p; sb_line_T *line; if (rettv_list_alloc(rettv) == FAIL) return; if (buf == NULL) return; term = buf->b_term; l = rettv->vval.v_list; pos.row = get_row_number(&argvars[1], term); if (term->tl_vterm != NULL) { screen = vterm_obtain_screen(term->tl_vterm); p = NULL; line = NULL; } else { linenr_T lnum = pos.row + term->tl_scrollback_scrolled; if (lnum < 0 || lnum >= term->tl_scrollback.ga_len) return; p = ml_get_buf(buf, lnum + 1, FALSE); line = (sb_line_T *)term->tl_scrollback.ga_data + lnum; } for (pos.col = 0; pos.col < term->tl_cols; ) { dict_T *dcell; int width; VTermScreenCellAttrs attrs; VTermColor fg, bg; char_u rgb[8]; char_u mbs[MB_MAXBYTES * VTERM_MAX_CHARS_PER_CELL + 1]; int off = 0; int i; if (screen == NULL) { cellattr_T *cellattr; int len; /* vterm has finished, get the cell from scrollback */ if (pos.col >= line->sb_cols) break; cellattr = line->sb_cells + pos.col; width = cellattr->width; attrs = cellattr->attrs; fg = cellattr->fg; bg = cellattr->bg; len = MB_PTR2LEN(p); mch_memmove(mbs, p, len); mbs[len] = NUL; p += len; } else { VTermScreenCell cell; if (vterm_screen_get_cell(screen, pos, &cell) == 0) break; for (i = 0; i < VTERM_MAX_CHARS_PER_CELL; ++i) { if (cell.chars[i] == 0) break; off += (*utf_char2bytes)((int)cell.chars[i], mbs + off); } mbs[off] = NUL; width = cell.width; attrs = cell.attrs; fg = cell.fg; bg = cell.bg; } dcell = dict_alloc(); if (dcell == NULL) break; list_append_dict(l, dcell); dict_add_string(dcell, "chars", mbs); vim_snprintf((char *)rgb, 8, "#%02x%02x%02x", fg.red, fg.green, fg.blue); dict_add_string(dcell, "fg", rgb); vim_snprintf((char *)rgb, 8, "#%02x%02x%02x", bg.red, bg.green, bg.blue); dict_add_string(dcell, "bg", rgb); dict_add_number(dcell, "attr", cell2attr(attrs, fg, bg)); dict_add_number(dcell, "width", width); ++pos.col; if (width == 2) ++pos.col; } } /* * "term_sendkeys(buf, keys)" function */ void f_term_sendkeys(typval_T *argvars, typval_T *rettv) { buf_T *buf = term_get_buf(argvars, "term_sendkeys()"); char_u *msg; term_T *term; rettv->v_type = VAR_UNKNOWN; if (buf == NULL) return; msg = tv_get_string_chk(&argvars[1]); if (msg == NULL) return; term = buf->b_term; if (term->tl_vterm == NULL) return; while (*msg != NUL) { int c; if (*msg == K_SPECIAL && msg[1] != NUL && msg[2] != NUL) { c = TO_SPECIAL(msg[1], msg[2]); msg += 3; } else { c = PTR2CHAR(msg); msg += MB_CPTR2LEN(msg); } send_keys_to_term(term, c, FALSE); } } #if defined(FEAT_GUI) || defined(FEAT_TERMGUICOLORS) || defined(PROTO) /* * "term_getansicolors(buf)" function */ void f_term_getansicolors(typval_T *argvars, typval_T *rettv) { buf_T *buf = term_get_buf(argvars, "term_getansicolors()"); term_T *term; VTermState *state; VTermColor color; char_u hexbuf[10]; int index; list_T *list; if (rettv_list_alloc(rettv) == FAIL) return; if (buf == NULL) return; term = buf->b_term; if (term->tl_vterm == NULL) return; list = rettv->vval.v_list; state = vterm_obtain_state(term->tl_vterm); for (index = 0; index < 16; index++) { vterm_state_get_palette_color(state, index, &color); sprintf((char *)hexbuf, "#%02x%02x%02x", color.red, color.green, color.blue); if (list_append_string(list, hexbuf, 7) == FAIL) return; } } /* * "term_setansicolors(buf, list)" function */ void f_term_setansicolors(typval_T *argvars, typval_T *rettv UNUSED) { buf_T *buf = term_get_buf(argvars, "term_setansicolors()"); term_T *term; if (buf == NULL) return; term = buf->b_term; if (term->tl_vterm == NULL) return; if (argvars[1].v_type != VAR_LIST || argvars[1].vval.v_list == NULL) { EMSG(_(e_listreq)); return; } if (set_ansi_colors_list(term->tl_vterm, argvars[1].vval.v_list) == FAIL) EMSG(_(e_invarg)); } #endif /* * "term_setrestore(buf, command)" function */ void f_term_setrestore(typval_T *argvars UNUSED, typval_T *rettv UNUSED) { #if defined(FEAT_SESSION) buf_T *buf = term_get_buf(argvars, "term_setrestore()"); term_T *term; char_u *cmd; if (buf == NULL) return; term = buf->b_term; vim_free(term->tl_command); cmd = tv_get_string_chk(&argvars[1]); if (cmd != NULL) term->tl_command = vim_strsave(cmd); else term->tl_command = NULL; #endif } /* * "term_setkill(buf, how)" function */ void f_term_setkill(typval_T *argvars UNUSED, typval_T *rettv UNUSED) { buf_T *buf = term_get_buf(argvars, "term_setkill()"); term_T *term; char_u *how; if (buf == NULL) return; term = buf->b_term; vim_free(term->tl_kill); how = tv_get_string_chk(&argvars[1]); if (how != NULL) term->tl_kill = vim_strsave(how); else term->tl_kill = NULL; } /* * "term_start(command, options)" function */ void f_term_start(typval_T *argvars, typval_T *rettv) { jobopt_T opt; buf_T *buf; init_job_options(&opt); if (argvars[1].v_type != VAR_UNKNOWN && get_job_options(&argvars[1], &opt, JO_TIMEOUT_ALL + JO_STOPONEXIT + JO_CALLBACK + JO_OUT_CALLBACK + JO_ERR_CALLBACK + JO_EXIT_CB + JO_CLOSE_CALLBACK + JO_OUT_IO, JO2_TERM_NAME + JO2_TERM_FINISH + JO2_HIDDEN + JO2_TERM_OPENCMD + JO2_TERM_COLS + JO2_TERM_ROWS + JO2_VERTICAL + JO2_CURWIN + JO2_CWD + JO2_ENV + JO2_EOF_CHARS + JO2_NORESTORE + JO2_TERM_KILL + JO2_ANSI_COLORS) == FAIL) return; buf = term_start(&argvars[0], NULL, &opt, 0); if (buf != NULL && buf->b_term != NULL) rettv->vval.v_number = buf->b_fnum; } /* * "term_wait" function */ void f_term_wait(typval_T *argvars, typval_T *rettv UNUSED) { buf_T *buf = term_get_buf(argvars, "term_wait()"); if (buf == NULL) return; if (buf->b_term->tl_job == NULL) { ch_log(NULL, "term_wait(): no job to wait for"); return; } if (buf->b_term->tl_job->jv_channel == NULL) /* channel is closed, nothing to do */ return; /* Get the job status, this will detect a job that finished. */ if (!buf->b_term->tl_job->jv_channel->ch_keep_open && STRCMP(job_status(buf->b_term->tl_job), "dead") == 0) { /* The job is dead, keep reading channel I/O until the channel is * closed. buf->b_term may become NULL if the terminal was closed while * waiting. */ ch_log(NULL, "term_wait(): waiting for channel to close"); while (buf->b_term != NULL && !buf->b_term->tl_channel_closed) { mch_check_messages(); parse_queued_messages(); ui_delay(10L, FALSE); if (!buf_valid(buf)) /* If the terminal is closed when the channel is closed the * buffer disappears. */ break; } mch_check_messages(); parse_queued_messages(); } else { long wait = 10L; mch_check_messages(); parse_queued_messages(); /* Wait for some time for any channel I/O. */ if (argvars[1].v_type != VAR_UNKNOWN) wait = tv_get_number(&argvars[1]); ui_delay(wait, TRUE); mch_check_messages(); /* Flushing messages on channels is hopefully sufficient. * TODO: is there a better way? */ parse_queued_messages(); } } /* * Called when a channel has sent all the lines to a terminal. * Send a CTRL-D to mark the end of the text. */ void term_send_eof(channel_T *ch) { term_T *term; for (term = first_term; term != NULL; term = term->tl_next) if (term->tl_job == ch->ch_job) { if (term->tl_eof_chars != NULL) { channel_send(ch, PART_IN, term->tl_eof_chars, (int)STRLEN(term->tl_eof_chars), NULL); channel_send(ch, PART_IN, (char_u *)"\r", 1, NULL); } # ifdef WIN3264 else /* Default: CTRL-D */ channel_send(ch, PART_IN, (char_u *)"\004\r", 2, NULL); # endif } } job_T * term_getjob(term_T *term) { return term != NULL ? term->tl_job : NULL; } # if defined(WIN3264) || defined(PROTO) /************************************** * 2. MS-Windows implementation. */ # ifndef PROTO #define WINPTY_SPAWN_FLAG_AUTO_SHUTDOWN 1ul #define WINPTY_SPAWN_FLAG_EXIT_AFTER_SHUTDOWN 2ull #define WINPTY_MOUSE_MODE_FORCE 2 void* (*winpty_config_new)(UINT64, void*); void* (*winpty_open)(void*, void*); void* (*winpty_spawn_config_new)(UINT64, void*, LPCWSTR, void*, void*, void*); BOOL (*winpty_spawn)(void*, void*, HANDLE*, HANDLE*, DWORD*, void*); void (*winpty_config_set_mouse_mode)(void*, int); void (*winpty_config_set_initial_size)(void*, int, int); LPCWSTR (*winpty_conin_name)(void*); LPCWSTR (*winpty_conout_name)(void*); LPCWSTR (*winpty_conerr_name)(void*); void (*winpty_free)(void*); void (*winpty_config_free)(void*); void (*winpty_spawn_config_free)(void*); void (*winpty_error_free)(void*); LPCWSTR (*winpty_error_msg)(void*); BOOL (*winpty_set_size)(void*, int, int, void*); HANDLE (*winpty_agent_process)(void*); #define WINPTY_DLL "winpty.dll" static HINSTANCE hWinPtyDLL = NULL; # endif static int dyn_winpty_init(int verbose) { int i; static struct { char *name; FARPROC *ptr; } winpty_entry[] = { {"winpty_conerr_name", (FARPROC*)&winpty_conerr_name}, {"winpty_config_free", (FARPROC*)&winpty_config_free}, {"winpty_config_new", (FARPROC*)&winpty_config_new}, {"winpty_config_set_mouse_mode", (FARPROC*)&winpty_config_set_mouse_mode}, {"winpty_config_set_initial_size", (FARPROC*)&winpty_config_set_initial_size}, {"winpty_conin_name", (FARPROC*)&winpty_conin_name}, {"winpty_conout_name", (FARPROC*)&winpty_conout_name}, {"winpty_error_free", (FARPROC*)&winpty_error_free}, {"winpty_free", (FARPROC*)&winpty_free}, {"winpty_open", (FARPROC*)&winpty_open}, {"winpty_spawn", (FARPROC*)&winpty_spawn}, {"winpty_spawn_config_free", (FARPROC*)&winpty_spawn_config_free}, {"winpty_spawn_config_new", (FARPROC*)&winpty_spawn_config_new}, {"winpty_error_msg", (FARPROC*)&winpty_error_msg}, {"winpty_set_size", (FARPROC*)&winpty_set_size}, {"winpty_agent_process", (FARPROC*)&winpty_agent_process}, {NULL, NULL} }; /* No need to initialize twice. */ if (hWinPtyDLL) return OK; /* Load winpty.dll, prefer using the 'winptydll' option, fall back to just * winpty.dll. */ if (*p_winptydll != NUL) hWinPtyDLL = vimLoadLib((char *)p_winptydll); if (!hWinPtyDLL) hWinPtyDLL = vimLoadLib(WINPTY_DLL); if (!hWinPtyDLL) { if (verbose) EMSG2(_(e_loadlib), *p_winptydll != NUL ? p_winptydll : (char_u *)WINPTY_DLL); return FAIL; } for (i = 0; winpty_entry[i].name != NULL && winpty_entry[i].ptr != NULL; ++i) { if ((*winpty_entry[i].ptr = (FARPROC)GetProcAddress(hWinPtyDLL, winpty_entry[i].name)) == NULL) { if (verbose) EMSG2(_(e_loadfunc), winpty_entry[i].name); return FAIL; } } return OK; } /* * Create a new terminal of "rows" by "cols" cells. * Store a reference in "term". * Return OK or FAIL. */ static int term_and_job_init( term_T *term, typval_T *argvar, char **argv UNUSED, jobopt_T *opt, jobopt_T *orig_opt) { WCHAR *cmd_wchar = NULL; WCHAR *cwd_wchar = NULL; WCHAR *env_wchar = NULL; channel_T *channel = NULL; job_T *job = NULL; DWORD error; HANDLE jo = NULL; HANDLE child_process_handle; HANDLE child_thread_handle; void *winpty_err = NULL; void *spawn_config = NULL; garray_T ga_cmd, ga_env; char_u *cmd = NULL; if (dyn_winpty_init(TRUE) == FAIL) return FAIL; ga_init2(&ga_cmd, (int)sizeof(char*), 20); ga_init2(&ga_env, (int)sizeof(char*), 20); if (argvar->v_type == VAR_STRING) { cmd = argvar->vval.v_string; } else if (argvar->v_type == VAR_LIST) { if (win32_build_cmd(argvar->vval.v_list, &ga_cmd) == FAIL) goto failed; cmd = ga_cmd.ga_data; } if (cmd == NULL || *cmd == NUL) { EMSG(_(e_invarg)); goto failed; } cmd_wchar = enc_to_utf16(cmd, NULL); ga_clear(&ga_cmd); if (cmd_wchar == NULL) goto failed; if (opt->jo_cwd != NULL) cwd_wchar = enc_to_utf16(opt->jo_cwd, NULL); win32_build_env(opt->jo_env, &ga_env, TRUE); env_wchar = ga_env.ga_data; term->tl_winpty_config = winpty_config_new(0, &winpty_err); if (term->tl_winpty_config == NULL) goto failed; winpty_config_set_mouse_mode(term->tl_winpty_config, WINPTY_MOUSE_MODE_FORCE); winpty_config_set_initial_size(term->tl_winpty_config, term->tl_cols, term->tl_rows); term->tl_winpty = winpty_open(term->tl_winpty_config, &winpty_err); if (term->tl_winpty == NULL) goto failed; spawn_config = winpty_spawn_config_new( WINPTY_SPAWN_FLAG_AUTO_SHUTDOWN | WINPTY_SPAWN_FLAG_EXIT_AFTER_SHUTDOWN, NULL, cmd_wchar, cwd_wchar, env_wchar, &winpty_err); if (spawn_config == NULL) goto failed; channel = add_channel(); if (channel == NULL) goto failed; job = job_alloc(); if (job == NULL) goto failed; if (argvar->v_type == VAR_STRING) { int argc; build_argv_from_string(cmd, &job->jv_argv, &argc); } else { int argc; build_argv_from_list(argvar->vval.v_list, &job->jv_argv, &argc); } if (opt->jo_set & JO_IN_BUF) job->jv_in_buf = buflist_findnr(opt->jo_io_buf[PART_IN]); if (!winpty_spawn(term->tl_winpty, spawn_config, &child_process_handle, &child_thread_handle, &error, &winpty_err)) goto failed; channel_set_pipes(channel, (sock_T)CreateFileW( winpty_conin_name(term->tl_winpty), GENERIC_WRITE, 0, NULL, OPEN_EXISTING, 0, NULL), (sock_T)CreateFileW( winpty_conout_name(term->tl_winpty), GENERIC_READ, 0, NULL, OPEN_EXISTING, 0, NULL), (sock_T)CreateFileW( winpty_conerr_name(term->tl_winpty), GENERIC_READ, 0, NULL, OPEN_EXISTING, 0, NULL)); /* Write lines with CR instead of NL. */ channel->ch_write_text_mode = TRUE; jo = CreateJobObject(NULL, NULL); if (jo == NULL) goto failed; if (!AssignProcessToJobObject(jo, child_process_handle)) { /* Failed, switch the way to terminate process with TerminateProcess. */ CloseHandle(jo); jo = NULL; } winpty_spawn_config_free(spawn_config); vim_free(cmd_wchar); vim_free(cwd_wchar); vim_free(env_wchar); if (create_vterm(term, term->tl_rows, term->tl_cols) == FAIL) goto failed; #if defined(FEAT_GUI) || defined(FEAT_TERMGUICOLORS) if (opt->jo_set2 & JO2_ANSI_COLORS) set_vterm_palette(term->tl_vterm, opt->jo_ansi_colors); else init_vterm_ansi_colors(term->tl_vterm); #endif channel_set_job(channel, job, opt); job_set_options(job, opt); job->jv_channel = channel; job->jv_proc_info.hProcess = child_process_handle; job->jv_proc_info.dwProcessId = GetProcessId(child_process_handle); job->jv_job_object = jo; job->jv_status = JOB_STARTED; job->jv_tty_in = utf16_to_enc( (short_u*)winpty_conin_name(term->tl_winpty), NULL); job->jv_tty_out = utf16_to_enc( (short_u*)winpty_conout_name(term->tl_winpty), NULL); ++job->jv_refcount; term->tl_job = job; /* Redirecting stdout and stderr doesn't work at the job level. Instead * open the file here and handle it in. opt->jo_io was changed in * setup_job_options(), use the original flags here. */ if (orig_opt->jo_io[PART_OUT] == JIO_FILE) { char_u *fname = opt->jo_io_name[PART_OUT]; ch_log(channel, "Opening output file %s", fname); term->tl_out_fd = mch_fopen((char *)fname, WRITEBIN); if (term->tl_out_fd == NULL) EMSG2(_(e_notopen), fname); } return OK; failed: ga_clear(&ga_cmd); ga_clear(&ga_env); vim_free(cmd_wchar); vim_free(cwd_wchar); if (spawn_config != NULL) winpty_spawn_config_free(spawn_config); if (channel != NULL) channel_clear(channel); if (job != NULL) { job->jv_channel = NULL; job_cleanup(job); } term->tl_job = NULL; if (jo != NULL) CloseHandle(jo); if (term->tl_winpty != NULL) winpty_free(term->tl_winpty); term->tl_winpty = NULL; if (term->tl_winpty_config != NULL) winpty_config_free(term->tl_winpty_config); term->tl_winpty_config = NULL; if (winpty_err != NULL) { char_u *msg = utf16_to_enc( (short_u *)winpty_error_msg(winpty_err), NULL); EMSG(msg); winpty_error_free(winpty_err); } return FAIL; } static int create_pty_only(term_T *term, jobopt_T *options) { HANDLE hPipeIn = INVALID_HANDLE_VALUE; HANDLE hPipeOut = INVALID_HANDLE_VALUE; char in_name[80], out_name[80]; channel_T *channel = NULL; if (create_vterm(term, term->tl_rows, term->tl_cols) == FAIL) return FAIL; vim_snprintf(in_name, sizeof(in_name), "\\\\.\\pipe\\vim-%d-in-%d", GetCurrentProcessId(), curbuf->b_fnum); hPipeIn = CreateNamedPipe(in_name, PIPE_ACCESS_OUTBOUND, PIPE_TYPE_MESSAGE | PIPE_NOWAIT, PIPE_UNLIMITED_INSTANCES, 0, 0, NMPWAIT_NOWAIT, NULL); if (hPipeIn == INVALID_HANDLE_VALUE) goto failed; vim_snprintf(out_name, sizeof(out_name), "\\\\.\\pipe\\vim-%d-out-%d", GetCurrentProcessId(), curbuf->b_fnum); hPipeOut = CreateNamedPipe(out_name, PIPE_ACCESS_INBOUND, PIPE_TYPE_MESSAGE | PIPE_NOWAIT, PIPE_UNLIMITED_INSTANCES, 0, 0, 0, NULL); if (hPipeOut == INVALID_HANDLE_VALUE) goto failed; ConnectNamedPipe(hPipeIn, NULL); ConnectNamedPipe(hPipeOut, NULL); term->tl_job = job_alloc(); if (term->tl_job == NULL) goto failed; ++term->tl_job->jv_refcount; /* behave like the job is already finished */ term->tl_job->jv_status = JOB_FINISHED; channel = add_channel(); if (channel == NULL) goto failed; term->tl_job->jv_channel = channel; channel->ch_keep_open = TRUE; channel->ch_named_pipe = TRUE; channel_set_pipes(channel, (sock_T)hPipeIn, (sock_T)hPipeOut, (sock_T)hPipeOut); channel_set_job(channel, term->tl_job, options); term->tl_job->jv_tty_in = vim_strsave((char_u*)in_name); term->tl_job->jv_tty_out = vim_strsave((char_u*)out_name); return OK; failed: if (hPipeIn != NULL) CloseHandle(hPipeIn); if (hPipeOut != NULL) CloseHandle(hPipeOut); return FAIL; } /* * Free the terminal emulator part of "term". */ static void term_free_vterm(term_T *term) { if (term->tl_winpty != NULL) winpty_free(term->tl_winpty); term->tl_winpty = NULL; if (term->tl_winpty_config != NULL) winpty_config_free(term->tl_winpty_config); term->tl_winpty_config = NULL; if (term->tl_vterm != NULL) vterm_free(term->tl_vterm); term->tl_vterm = NULL; } /* * Report the size to the terminal. */ static void term_report_winsize(term_T *term, int rows, int cols) { if (term->tl_winpty) winpty_set_size(term->tl_winpty, cols, rows, NULL); } int terminal_enabled(void) { return dyn_winpty_init(FALSE) == OK; } # else /************************************** * 3. Unix-like implementation. */ /* * Create a new terminal of "rows" by "cols" cells. * Start job for "cmd". * Store the pointers in "term". * When "argv" is not NULL then "argvar" is not used. * Return OK or FAIL. */ static int term_and_job_init( term_T *term, typval_T *argvar, char **argv, jobopt_T *opt, jobopt_T *orig_opt UNUSED) { if (create_vterm(term, term->tl_rows, term->tl_cols) == FAIL) return FAIL; #if defined(FEAT_GUI) || defined(FEAT_TERMGUICOLORS) if (opt->jo_set2 & JO2_ANSI_COLORS) set_vterm_palette(term->tl_vterm, opt->jo_ansi_colors); else init_vterm_ansi_colors(term->tl_vterm); #endif /* This may change a string in "argvar". */ term->tl_job = job_start(argvar, argv, opt, TRUE); if (term->tl_job != NULL) ++term->tl_job->jv_refcount; return term->tl_job != NULL && term->tl_job->jv_channel != NULL && term->tl_job->jv_status != JOB_FAILED ? OK : FAIL; } static int create_pty_only(term_T *term, jobopt_T *opt) { if (create_vterm(term, term->tl_rows, term->tl_cols) == FAIL) return FAIL; term->tl_job = job_alloc(); if (term->tl_job == NULL) return FAIL; ++term->tl_job->jv_refcount; /* behave like the job is already finished */ term->tl_job->jv_status = JOB_FINISHED; return mch_create_pty_channel(term->tl_job, opt); } /* * Free the terminal emulator part of "term". */ static void term_free_vterm(term_T *term) { if (term->tl_vterm != NULL) vterm_free(term->tl_vterm); term->tl_vterm = NULL; } /* * Report the size to the terminal. */ static void term_report_winsize(term_T *term, int rows, int cols) { /* Use an ioctl() to report the new window size to the job. */ if (term->tl_job != NULL && term->tl_job->jv_channel != NULL) { int fd = -1; int part; for (part = PART_OUT; part < PART_COUNT; ++part) { fd = term->tl_job->jv_channel->ch_part[part].ch_fd; if (isatty(fd)) break; } if (part < PART_COUNT && mch_report_winsize(fd, rows, cols) == OK) mch_signal_job(term->tl_job, (char_u *)"winch"); } } # endif #endif /* FEAT_TERMINAL */
create_pty_only(term_T *term, jobopt_T *options) { HANDLE hPipeIn = INVALID_HANDLE_VALUE; HANDLE hPipeOut = INVALID_HANDLE_VALUE; char in_name[80], out_name[80]; channel_T *channel = NULL; create_vterm(term, term->tl_rows, term->tl_cols); vim_snprintf(in_name, sizeof(in_name), "\\\\.\\pipe\\vim-%d-in-%d", GetCurrentProcessId(), curbuf->b_fnum); hPipeIn = CreateNamedPipe(in_name, PIPE_ACCESS_OUTBOUND, PIPE_TYPE_MESSAGE | PIPE_NOWAIT, PIPE_UNLIMITED_INSTANCES, 0, 0, NMPWAIT_NOWAIT, NULL); if (hPipeIn == INVALID_HANDLE_VALUE) goto failed; vim_snprintf(out_name, sizeof(out_name), "\\\\.\\pipe\\vim-%d-out-%d", GetCurrentProcessId(), curbuf->b_fnum); hPipeOut = CreateNamedPipe(out_name, PIPE_ACCESS_INBOUND, PIPE_TYPE_MESSAGE | PIPE_NOWAIT, PIPE_UNLIMITED_INSTANCES, 0, 0, 0, NULL); if (hPipeOut == INVALID_HANDLE_VALUE) goto failed; ConnectNamedPipe(hPipeIn, NULL); ConnectNamedPipe(hPipeOut, NULL); term->tl_job = job_alloc(); if (term->tl_job == NULL) goto failed; ++term->tl_job->jv_refcount; /* behave like the job is already finished */ term->tl_job->jv_status = JOB_FINISHED; channel = add_channel(); if (channel == NULL) goto failed; term->tl_job->jv_channel = channel; channel->ch_keep_open = TRUE; channel->ch_named_pipe = TRUE; channel_set_pipes(channel, (sock_T)hPipeIn, (sock_T)hPipeOut, (sock_T)hPipeOut); channel_set_job(channel, term->tl_job, options); term->tl_job->jv_tty_in = vim_strsave((char_u*)in_name); term->tl_job->jv_tty_out = vim_strsave((char_u*)out_name); return OK; failed: if (hPipeIn != NULL) CloseHandle(hPipeIn); if (hPipeOut != NULL) CloseHandle(hPipeOut); return FAIL; }
create_pty_only(term_T *term, jobopt_T *options) { HANDLE hPipeIn = INVALID_HANDLE_VALUE; HANDLE hPipeOut = INVALID_HANDLE_VALUE; char in_name[80], out_name[80]; channel_T *channel = NULL; if (create_vterm(term, term->tl_rows, term->tl_cols) == FAIL) return FAIL; vim_snprintf(in_name, sizeof(in_name), "\\\\.\\pipe\\vim-%d-in-%d", GetCurrentProcessId(), curbuf->b_fnum); hPipeIn = CreateNamedPipe(in_name, PIPE_ACCESS_OUTBOUND, PIPE_TYPE_MESSAGE | PIPE_NOWAIT, PIPE_UNLIMITED_INSTANCES, 0, 0, NMPWAIT_NOWAIT, NULL); if (hPipeIn == INVALID_HANDLE_VALUE) goto failed; vim_snprintf(out_name, sizeof(out_name), "\\\\.\\pipe\\vim-%d-out-%d", GetCurrentProcessId(), curbuf->b_fnum); hPipeOut = CreateNamedPipe(out_name, PIPE_ACCESS_INBOUND, PIPE_TYPE_MESSAGE | PIPE_NOWAIT, PIPE_UNLIMITED_INSTANCES, 0, 0, 0, NULL); if (hPipeOut == INVALID_HANDLE_VALUE) goto failed; ConnectNamedPipe(hPipeIn, NULL); ConnectNamedPipe(hPipeOut, NULL); term->tl_job = job_alloc(); if (term->tl_job == NULL) goto failed; ++term->tl_job->jv_refcount; /* behave like the job is already finished */ term->tl_job->jv_status = JOB_FINISHED; channel = add_channel(); if (channel == NULL) goto failed; term->tl_job->jv_channel = channel; channel->ch_keep_open = TRUE; channel->ch_named_pipe = TRUE; channel_set_pipes(channel, (sock_T)hPipeIn, (sock_T)hPipeOut, (sock_T)hPipeOut); channel_set_job(channel, term->tl_job, options); term->tl_job->jv_tty_in = vim_strsave((char_u*)in_name); term->tl_job->jv_tty_out = vim_strsave((char_u*)out_name); return OK; failed: if (hPipeIn != NULL) CloseHandle(hPipeIn); if (hPipeOut != NULL) CloseHandle(hPipeOut); return FAIL; }
{'added': [(3433, ''), (3707, ' * Return FAIL when out of memory.'), (3709, ' static int'), (3719, ' if (vterm == NULL)'), (3720, '\treturn FAIL;'), (3721, ''), (3722, ' // Allocate screen and state here, so we can bail out if that fails.'), (3723, ' state = vterm_obtain_state(vterm);'), (3725, ' if (state == NULL || screen == NULL)'), (3726, ' {'), (3727, '\tvterm_free(vterm);'), (3728, '\treturn FAIL;'), (3729, ' }'), (3730, ''), (3738, '\t state,'), (3764, ''), (3765, ' return OK;'), (5646, ' if (create_vterm(term, term->tl_rows, term->tl_cols) == FAIL)'), (5647, '\tgoto failed;'), (5728, ' if (create_vterm(term, term->tl_rows, term->tl_cols) == FAIL)'), (5729, '\treturn FAIL;'), (5841, ' if (create_vterm(term, term->tl_rows, term->tl_cols) == FAIL)'), (5842, '\treturn FAIL;'), (5864, ' if (create_vterm(term, term->tl_rows, term->tl_cols) == FAIL)'), (5865, '\treturn FAIL;')], 'deleted': [(3707, ' static void'), (3725, '\t vterm_obtain_state(vterm),'), (3749, ' state = vterm_obtain_state(vterm);'), (5632, ' create_vterm(term, term->tl_rows, term->tl_cols);'), (5713, ' create_vterm(term, term->tl_rows, term->tl_cols);'), (5825, ' create_vterm(term, term->tl_rows, term->tl_cols);'), (5847, ' create_vterm(term, term->tl_rows, term->tl_cols);')]}
25
7
4,348
25,983
53
330
7
https://github.com/vim/vim
CVE-2018-20786
CWE-476
3,149
commonio.c
C
commonio_sort
/* * Copyright (c) 1990 - 1994, Julianne Frances Haugh * Copyright (c) 1996 - 2001, Marek Michałkiewicz * Copyright (c) 2001 - 2006, Tomasz Kłoczko * Copyright (c) 2007 - 2011, Nicolas François * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the copyright holders or contributors may not be used to * endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <config.h> #ident "$Id$" #include "defines.h" #include <assert.h> #include <sys/stat.h> #include <stdlib.h> #include <limits.h> #include <utime.h> #include <fcntl.h> #include <errno.h> #include <stdio.h> #include <signal.h> #include "nscd.h" #ifdef WITH_TCB #include <tcb.h> #endif /* WITH_TCB */ #include "prototypes.h" #include "commonio.h" /* local function prototypes */ static int lrename (const char *, const char *); static int check_link_count (const char *file); static int do_lock_file (const char *file, const char *lock, bool log); static /*@null@*/ /*@dependent@*/FILE *fopen_set_perms ( const char *name, const char *mode, const struct stat *sb); static int create_backup (const char *, FILE *); static void free_linked_list (struct commonio_db *); static void add_one_entry ( struct commonio_db *db, /*@owned@*/struct commonio_entry *p); static bool name_is_nis (const char *name); static int write_all (const struct commonio_db *); static /*@dependent@*/ /*@null@*/struct commonio_entry *find_entry_by_name ( struct commonio_db *, const char *); static /*@dependent@*/ /*@null@*/struct commonio_entry *next_entry_by_name ( struct commonio_db *, /*@null@*/struct commonio_entry *pos, const char *); static int lock_count = 0; static bool nscd_need_reload = false; /* * Simple rename(P) alternative that attempts to rename to symlink * target. */ int lrename (const char *old, const char *new) { int res; char *r = NULL; #if defined(S_ISLNK) #ifndef __GLIBC__ char resolved_path[PATH_MAX]; #endif /* !__GLIBC__ */ struct stat sb; if (lstat (new, &sb) == 0 && S_ISLNK (sb.st_mode)) { #ifdef __GLIBC__ /* now a POSIX.1-2008 feature */ r = realpath (new, NULL); #else /* !__GLIBC__ */ r = realpath (new, resolved_path); #endif /* !__GLIBC__ */ if (NULL == r) { perror ("realpath in lrename()"); } else { new = r; } } #endif /* S_ISLNK */ res = rename (old, new); #ifdef __GLIBC__ if (NULL != r) { free (r); } #endif /* __GLIBC__ */ return res; } static int check_link_count (const char *file) { struct stat sb; if (stat (file, &sb) != 0) { return 0; } if (sb.st_nlink != 2) { return 0; } return 1; } static int do_lock_file (const char *file, const char *lock, bool log) { int fd; pid_t pid; ssize_t len; int retval; char buf[32]; fd = open (file, O_CREAT | O_EXCL | O_WRONLY, 0600); if (-1 == fd) { if (log) { (void) fprintf (stderr, "%s: %s: %s\n", Prog, file, strerror (errno)); } return 0; } pid = getpid (); snprintf (buf, sizeof buf, "%lu", (unsigned long) pid); len = (ssize_t) strlen (buf) + 1; if (write (fd, buf, (size_t) len) != len) { if (log) { (void) fprintf (stderr, "%s: %s: %s\n", Prog, file, strerror (errno)); } (void) close (fd); unlink (file); return 0; } close (fd); if (link (file, lock) == 0) { retval = check_link_count (file); if ((0==retval) && log) { (void) fprintf (stderr, "%s: %s: lock file already used\n", Prog, file); } unlink (file); return retval; } fd = open (lock, O_RDWR); if (-1 == fd) { if (log) { (void) fprintf (stderr, "%s: %s: %s\n", Prog, lock, strerror (errno)); } unlink (file); errno = EINVAL; return 0; } len = read (fd, buf, sizeof (buf) - 1); close (fd); if (len <= 0) { if (log) { (void) fprintf (stderr, "%s: existing lock file %s without a PID\n", Prog, lock); } unlink (file); errno = EINVAL; return 0; } buf[len] = '\0'; if (get_pid (buf, &pid) == 0) { if (log) { (void) fprintf (stderr, "%s: existing lock file %s with an invalid PID '%s'\n", Prog, lock, buf); } unlink (file); errno = EINVAL; return 0; } if (kill (pid, 0) == 0) { if (log) { (void) fprintf (stderr, "%s: lock %s already used by PID %lu\n", Prog, lock, (unsigned long) pid); } unlink (file); errno = EEXIST; return 0; } if (unlink (lock) != 0) { if (log) { (void) fprintf (stderr, "%s: cannot get lock %s: %s\n", Prog, lock, strerror (errno)); } unlink (file); return 0; } retval = 0; if (link (file, lock) == 0) { retval = check_link_count (file); if ((0==retval) && log) { (void) fprintf (stderr, "%s: %s: lock file already used\n", Prog, file); } } else { if (log) { (void) fprintf (stderr, "%s: cannot get lock %s: %s\n", Prog, lock, strerror (errno)); } } unlink (file); return retval; } static /*@null@*/ /*@dependent@*/FILE *fopen_set_perms ( const char *name, const char *mode, const struct stat *sb) { FILE *fp; mode_t mask; mask = umask (0777); fp = fopen (name, mode); (void) umask (mask); if (NULL == fp) { return NULL; } #ifdef HAVE_FCHOWN if (fchown (fileno (fp), sb->st_uid, sb->st_gid) != 0) { goto fail; } #else /* !HAVE_FCHOWN */ if (chown (name, sb->st_mode) != 0) { goto fail; } #endif /* !HAVE_FCHOWN */ #ifdef HAVE_FCHMOD if (fchmod (fileno (fp), sb->st_mode & 0664) != 0) { goto fail; } #else /* !HAVE_FCHMOD */ if (chmod (name, sb->st_mode & 0664) != 0) { goto fail; } #endif /* !HAVE_FCHMOD */ return fp; fail: (void) fclose (fp); /* fopen_set_perms is used for intermediate files */ (void) unlink (name); return NULL; } static int create_backup (const char *backup, FILE * fp) { struct stat sb; struct utimbuf ub; FILE *bkfp; int c; if (fstat (fileno (fp), &sb) != 0) { return -1; } bkfp = fopen_set_perms (backup, "w", &sb); if (NULL == bkfp) { return -1; } /* TODO: faster copy, not one-char-at-a-time. --marekm */ c = 0; if (fseek (fp, 0, SEEK_SET) == 0) { while ((c = getc (fp)) != EOF) { if (putc (c, bkfp) == EOF) { break; } } } if ((c != EOF) || (ferror (fp) != 0) || (fflush (bkfp) != 0)) { (void) fclose (bkfp); /* FIXME: unlink the backup file? */ return -1; } if ( (fsync (fileno (bkfp)) != 0) || (fclose (bkfp) != 0)) { /* FIXME: unlink the backup file? */ return -1; } ub.actime = sb.st_atime; ub.modtime = sb.st_mtime; (void) utime (backup, &ub); return 0; } static void free_linked_list (struct commonio_db *db) { struct commonio_entry *p; while (NULL != db->head) { p = db->head; db->head = p->next; if (NULL != p->line) { free (p->line); } if (NULL != p->eptr) { db->ops->free (p->eptr); } free (p); } db->tail = NULL; } int commonio_setname (struct commonio_db *db, const char *name) { snprintf (db->filename, sizeof (db->filename), "%s", name); return 1; } bool commonio_present (const struct commonio_db *db) { return (access (db->filename, F_OK) == 0); } int commonio_lock_nowait (struct commonio_db *db, bool log) { char file[1024]; char lock[1024]; if (db->locked) { return 1; } snprintf (file, sizeof file, "%s.%lu", db->filename, (unsigned long) getpid ()); snprintf (lock, sizeof lock, "%s.lock", db->filename); if (do_lock_file (file, lock, log) != 0) { db->locked = true; lock_count++; return 1; } return 0; } int commonio_lock (struct commonio_db *db) { #ifdef HAVE_LCKPWDF /* * only if the system libc has a real lckpwdf() - the one from * lockpw.c calls us and would cause infinite recursion! */ /* * Call lckpwdf() on the first lock. * If it succeeds, call *_lock() only once * (no retries, it should always succeed). */ if (0 == lock_count) { if (lckpwdf () == -1) { if (geteuid () != 0) { (void) fprintf (stderr, "%s: Permission denied.\n", Prog); } return 0; /* failure */ } } if (commonio_lock_nowait (db, true) != 0) { return 1; /* success */ } ulckpwdf (); return 0; /* failure */ #else /* !HAVE_LCKPWDF */ int i; /* * lckpwdf() not used - do it the old way. */ #ifndef LOCK_TRIES #define LOCK_TRIES 15 #endif #ifndef LOCK_SLEEP #define LOCK_SLEEP 1 #endif for (i = 0; i < LOCK_TRIES; i++) { if (i > 0) { sleep (LOCK_SLEEP); /* delay between retries */ } if (commonio_lock_nowait (db, i==LOCK_TRIES-1) != 0) { return 1; /* success */ } /* no unnecessary retries on "permission denied" errors */ if (geteuid () != 0) { (void) fprintf (stderr, "%s: Permission denied.\n", Prog); return 0; } } return 0; /* failure */ #endif /* !HAVE_LCKPWDF */ } static void dec_lock_count (void) { if (lock_count > 0) { lock_count--; if (lock_count == 0) { /* Tell nscd when lock count goes to zero, if any of the files were changed. */ if (nscd_need_reload) { nscd_flush_cache ("passwd"); nscd_flush_cache ("group"); nscd_need_reload = false; } #ifdef HAVE_LCKPWDF ulckpwdf (); #endif /* HAVE_LCKPWDF */ } } } int commonio_unlock (struct commonio_db *db) { char lock[1024]; if (db->isopen) { db->readonly = true; if (commonio_close (db) == 0) { if (db->locked) { dec_lock_count (); } return 0; } } if (db->locked) { /* * Unlock in reverse order: remove the lock file, * then call ulckpwdf() (if used) on last unlock. */ db->locked = false; snprintf (lock, sizeof lock, "%s.lock", db->filename); unlink (lock); dec_lock_count (); return 1; } return 0; } /* * Add an entry at the end. * * defines p->next, p->prev * (unfortunately, owned special are not supported) */ static void add_one_entry (struct commonio_db *db, /*@owned@*/struct commonio_entry *p) { /*@-mustfreeonly@*/ p->next = NULL; p->prev = db->tail; /*@=mustfreeonly@*/ if (NULL == db->head) { db->head = p; } if (NULL != db->tail) { db->tail->next = p; } db->tail = p; } static bool name_is_nis (const char *name) { return (('+' == name[0]) || ('-' == name[0])); } /* * New entries are inserted before the first NIS entry. Order is preserved * when db is written out. */ #ifndef KEEP_NIS_AT_END #define KEEP_NIS_AT_END 1 #endif #if KEEP_NIS_AT_END static void add_one_entry_nis (struct commonio_db *db, /*@owned@*/struct commonio_entry *newp); /* * Insert an entry between the regular entries, and the NIS entries. * * defines newp->next, newp->prev * (unfortunately, owned special are not supported) */ static void add_one_entry_nis (struct commonio_db *db, /*@owned@*/struct commonio_entry *newp) { struct commonio_entry *p; for (p = db->head; NULL != p; p = p->next) { if (name_is_nis (p->eptr ? db->ops->getname (p->eptr) : p->line)) { /*@-mustfreeonly@*/ newp->next = p; newp->prev = p->prev; /*@=mustfreeonly@*/ if (NULL != p->prev) { p->prev->next = newp; } else { db->head = newp; } p->prev = newp; return; } } add_one_entry (db, newp); } #endif /* KEEP_NIS_AT_END */ /* Initial buffer size, as well as increment if not sufficient (for reading very long lines in group files). */ #define BUFLEN 4096 int commonio_open (struct commonio_db *db, int mode) { char *buf; char *cp; char *line; struct commonio_entry *p; void *eptr = NULL; int flags = mode; size_t buflen; int fd; int saved_errno; mode &= ~O_CREAT; if ( db->isopen || ( (O_RDONLY != mode) && (O_RDWR != mode))) { errno = EINVAL; return 0; } db->readonly = (mode == O_RDONLY); if (!db->readonly && !db->locked) { errno = EACCES; return 0; } db->head = NULL; db->tail = NULL; db->cursor = NULL; db->changed = false; fd = open (db->filename, (db->readonly ? O_RDONLY : O_RDWR) | O_NOCTTY | O_NONBLOCK | O_NOFOLLOW); saved_errno = errno; db->fp = NULL; if (fd >= 0) { #ifdef WITH_TCB if (tcb_is_suspect (fd) != 0) { (void) close (fd); errno = EINVAL; return 0; } #endif /* WITH_TCB */ db->fp = fdopen (fd, db->readonly ? "r" : "r+"); saved_errno = errno; if (NULL == db->fp) { (void) close (fd); } } errno = saved_errno; /* * If O_CREAT was specified and the file didn't exist, it will be * created by commonio_close(). We have no entries to read yet. --marekm */ if (NULL == db->fp) { if (((flags & O_CREAT) != 0) && (ENOENT == errno)) { db->isopen = true; return 1; } return 0; } /* Do not inherit fd in spawned processes (e.g. nscd) */ fcntl (fileno (db->fp), F_SETFD, FD_CLOEXEC); buflen = BUFLEN; buf = (char *) malloc (buflen); if (NULL == buf) { goto cleanup_ENOMEM; } while (db->ops->fgets (buf, (int) buflen, db->fp) == buf) { while ( ((cp = strrchr (buf, '\n')) == NULL) && (feof (db->fp) == 0)) { size_t len; buflen += BUFLEN; cp = (char *) realloc (buf, buflen); if (NULL == cp) { goto cleanup_buf; } buf = cp; len = strlen (buf); if (db->ops->fgets (buf + len, (int) (buflen - len), db->fp) == NULL) { goto cleanup_buf; } } cp = strrchr (buf, '\n'); if (NULL != cp) { *cp = '\0'; } line = strdup (buf); if (NULL == line) { goto cleanup_buf; } if (name_is_nis (line)) { eptr = NULL; } else { eptr = db->ops->parse (line); if (NULL != eptr) { eptr = db->ops->dup (eptr); if (NULL == eptr) { goto cleanup_line; } } } p = (struct commonio_entry *) malloc (sizeof *p); if (NULL == p) { goto cleanup_entry; } p->eptr = eptr; p->line = line; p->changed = false; add_one_entry (db, p); } free (buf); if (ferror (db->fp) != 0) { goto cleanup_errno; } if ((NULL != db->ops->open_hook) && (db->ops->open_hook () == 0)) { goto cleanup_errno; } db->isopen = true; return 1; cleanup_entry: if (NULL != eptr) { db->ops->free (eptr); } cleanup_line: free (line); cleanup_buf: free (buf); cleanup_ENOMEM: errno = ENOMEM; cleanup_errno: saved_errno = errno; free_linked_list (db); fclose (db->fp); db->fp = NULL; errno = saved_errno; return 0; } /* * Sort given db according to cmp function (usually compares uids) */ int commonio_sort (struct commonio_db *db, int (*cmp) (const void *, const void *)) { struct commonio_entry **entries, *ptr; size_t n = 0, i; #if KEEP_NIS_AT_END struct commonio_entry *nis = NULL; #endif for (ptr = db->head; (NULL != ptr) #if KEEP_NIS_AT_END && (NULL != ptr->line) && ( ('+' != ptr->line[0]) && ('-' != ptr->line[0])) #endif ; ptr = ptr->next) { n++; } #if KEEP_NIS_AT_END if ((NULL != ptr) && (NULL != ptr->line)) { nis = ptr; } #endif if (n <= 1) { return 0; } entries = malloc (n * sizeof (struct commonio_entry *)); if (entries == NULL) { return -1; } n = 0; for (ptr = db->head; #if KEEP_NIS_AT_END nis != ptr; #else NULL != ptr; #endif /*@ -nullderef @*/ ptr = ptr->next /*@ +nullderef @*/ ) { entries[n] = ptr; n++; } qsort (entries, n, sizeof (struct commonio_entry *), cmp); /* Take care of the head and tail separately */ db->head = entries[0]; n--; #if KEEP_NIS_AT_END if (NULL == nis) #endif { db->tail = entries[n]; } db->head->prev = NULL; db->head->next = entries[1]; entries[n]->prev = entries[n - 1]; #if KEEP_NIS_AT_END entries[n]->next = nis; #else entries[n]->next = NULL; #endif /* Now other elements have prev and next entries */ for (i = 1; i < n; i++) { entries[i]->prev = entries[i - 1]; entries[i]->next = entries[i + 1]; } free (entries); db->changed = true; return 0; } /* * Sort entries in db according to order in another. */ int commonio_sort_wrt (struct commonio_db *shadow, const struct commonio_db *passwd) { struct commonio_entry *head = NULL, *pw_ptr, *spw_ptr; const char *name; if ((NULL == shadow) || (NULL == shadow->head)) { return 0; } for (pw_ptr = passwd->head; NULL != pw_ptr; pw_ptr = pw_ptr->next) { if (NULL == pw_ptr->eptr) { continue; } name = passwd->ops->getname (pw_ptr->eptr); for (spw_ptr = shadow->head; NULL != spw_ptr; spw_ptr = spw_ptr->next) { if (NULL == spw_ptr->eptr) { continue; } if (strcmp (name, shadow->ops->getname (spw_ptr->eptr)) == 0) { break; } } if (NULL == spw_ptr) { continue; } commonio_del_entry (shadow, spw_ptr); spw_ptr->next = head; head = spw_ptr; } for (spw_ptr = head; NULL != spw_ptr; spw_ptr = head) { head = head->next; if (NULL != shadow->head) { shadow->head->prev = spw_ptr; } spw_ptr->next = shadow->head; shadow->head = spw_ptr; } shadow->head->prev = NULL; shadow->changed = true; return 0; } /* * write_all - Write the database to its file. * * It returns 0 if all the entries could be written correctly. */ static int write_all (const struct commonio_db *db) /*@requires notnull db->fp@*/ { const struct commonio_entry *p; void *eptr; for (p = db->head; NULL != p; p = p->next) { if (p->changed) { eptr = p->eptr; assert (NULL != eptr); if (db->ops->put (eptr, db->fp) != 0) { return -1; } } else if (NULL != p->line) { if (db->ops->fputs (p->line, db->fp) == EOF) { return -1; } if (putc ('\n', db->fp) == EOF) { return -1; } } } return 0; } int commonio_close (struct commonio_db *db) /*@requires notnull db->fp@*/ { char buf[1024]; int errors = 0; struct stat sb; if (!db->isopen) { errno = EINVAL; return 0; } db->isopen = false; if (!db->changed || db->readonly) { (void) fclose (db->fp); db->fp = NULL; goto success; } if ((NULL != db->ops->close_hook) && (db->ops->close_hook () == 0)) { goto fail; } memzero (&sb, sizeof sb); if (NULL != db->fp) { if (fstat (fileno (db->fp), &sb) != 0) { (void) fclose (db->fp); db->fp = NULL; goto fail; } /* * Create backup file. */ snprintf (buf, sizeof buf, "%s-", db->filename); #ifdef WITH_SELINUX if (set_selinux_file_context (buf) != 0) { errors++; } #endif if (create_backup (buf, db->fp) != 0) { errors++; } if (fclose (db->fp) != 0) { errors++; } #ifdef WITH_SELINUX if (reset_selinux_file_context () != 0) { errors++; } #endif if (errors != 0) { db->fp = NULL; goto fail; } } else { /* * Default permissions for new [g]shadow files. */ sb.st_mode = db->st_mode; sb.st_uid = db->st_uid; sb.st_gid = db->st_gid; } snprintf (buf, sizeof buf, "%s+", db->filename); #ifdef WITH_SELINUX if (set_selinux_file_context (buf) != 0) { errors++; } #endif db->fp = fopen_set_perms (buf, "w", &sb); if (NULL == db->fp) { goto fail; } if (write_all (db) != 0) { errors++; } if (fflush (db->fp) != 0) { errors++; } #ifdef HAVE_FSYNC if (fsync (fileno (db->fp)) != 0) { errors++; } #else /* !HAVE_FSYNC */ sync (); #endif /* !HAVE_FSYNC */ if (fclose (db->fp) != 0) { errors++; } db->fp = NULL; if (errors != 0) { unlink (buf); goto fail; } if (lrename (buf, db->filename) != 0) { goto fail; } #ifdef WITH_SELINUX if (reset_selinux_file_context () != 0) { goto fail; } #endif nscd_need_reload = true; goto success; fail: errors++; success: free_linked_list (db); return errors == 0; } static /*@dependent@*/ /*@null@*/struct commonio_entry *next_entry_by_name ( struct commonio_db *db, /*@null@*/struct commonio_entry *pos, const char *name) { struct commonio_entry *p; void *ep; if (NULL == pos) { return NULL; } for (p = pos; NULL != p; p = p->next) { ep = p->eptr; if ( (NULL != ep) && (strcmp (db->ops->getname (ep), name) == 0)) { break; } } return p; } static /*@dependent@*/ /*@null@*/struct commonio_entry *find_entry_by_name ( struct commonio_db *db, const char *name) { return next_entry_by_name (db, db->head, name); } int commonio_update (struct commonio_db *db, const void *eptr) { struct commonio_entry *p; void *nentry; if (!db->isopen || db->readonly) { errno = EINVAL; return 0; } nentry = db->ops->dup (eptr); if (NULL == nentry) { errno = ENOMEM; return 0; } p = find_entry_by_name (db, db->ops->getname (eptr)); if (NULL != p) { if (next_entry_by_name (db, p->next, db->ops->getname (eptr)) != NULL) { fprintf (stderr, _("Multiple entries named '%s' in %s. Please fix this with pwck or grpck.\n"), db->ops->getname (eptr), db->filename); db->ops->free (nentry); return 0; } db->ops->free (p->eptr); p->eptr = nentry; p->changed = true; db->cursor = p; db->changed = true; return 1; } /* not found, new entry */ p = (struct commonio_entry *) malloc (sizeof *p); if (NULL == p) { db->ops->free (nentry); errno = ENOMEM; return 0; } p->eptr = nentry; p->line = NULL; p->changed = true; #if KEEP_NIS_AT_END add_one_entry_nis (db, p); #else /* !KEEP_NIS_AT_END */ add_one_entry (db, p); #endif /* !KEEP_NIS_AT_END */ db->changed = true; return 1; } #ifdef ENABLE_SUBIDS int commonio_append (struct commonio_db *db, const void *eptr) { struct commonio_entry *p; void *nentry; if (!db->isopen || db->readonly) { errno = EINVAL; return 0; } nentry = db->ops->dup (eptr); if (NULL == nentry) { errno = ENOMEM; return 0; } /* new entry */ p = (struct commonio_entry *) malloc (sizeof *p); if (NULL == p) { db->ops->free (nentry); errno = ENOMEM; return 0; } p->eptr = nentry; p->line = NULL; p->changed = true; add_one_entry (db, p); db->changed = true; return 1; } #endif /* ENABLE_SUBIDS */ void commonio_del_entry (struct commonio_db *db, const struct commonio_entry *p) { if (p == db->cursor) { db->cursor = p->next; } if (NULL != p->prev) { p->prev->next = p->next; } else { db->head = p->next; } if (NULL != p->next) { p->next->prev = p->prev; } else { db->tail = p->prev; } db->changed = true; } /* * commonio_remove - Remove the entry of the given name from the database. */ int commonio_remove (struct commonio_db *db, const char *name) { struct commonio_entry *p; if (!db->isopen || db->readonly) { errno = EINVAL; return 0; } p = find_entry_by_name (db, name); if (NULL == p) { errno = ENOENT; return 0; } if (next_entry_by_name (db, p->next, name) != NULL) { fprintf (stderr, _("Multiple entries named '%s' in %s. Please fix this with pwck or grpck.\n"), name, db->filename); return 0; } commonio_del_entry (db, p); if (NULL != p->line) { free (p->line); } if (NULL != p->eptr) { db->ops->free (p->eptr); } return 1; } /* * commonio_locate - Find the first entry with the specified name in * the database. * * If found, it returns the entry and set the cursor of the database to * that entry. * * Otherwise, it returns NULL. */ /*@observer@*/ /*@null@*/const void *commonio_locate (struct commonio_db *db, const char *name) { struct commonio_entry *p; if (!db->isopen) { errno = EINVAL; return NULL; } p = find_entry_by_name (db, name); if (NULL == p) { errno = ENOENT; return NULL; } db->cursor = p; return p->eptr; } /* * commonio_rewind - Restore the database cursor to the first entry. * * It returns 0 on error, 1 on success. */ int commonio_rewind (struct commonio_db *db) { if (!db->isopen) { errno = EINVAL; return 0; } db->cursor = NULL; return 1; } /* * commonio_next - Return the next entry of the specified database * * It returns the next entry, or NULL if no other entries could be found. */ /*@observer@*/ /*@null@*/const void *commonio_next (struct commonio_db *db) { void *eptr; if (!db->isopen) { errno = EINVAL; return 0; } if (NULL == db->cursor) { db->cursor = db->head; } else { db->cursor = db->cursor->next; } while (NULL != db->cursor) { eptr = db->cursor->eptr; if (NULL != eptr) { return eptr; } db->cursor = db->cursor->next; } return NULL; }
/* * Copyright (c) 1990 - 1994, Julianne Frances Haugh * Copyright (c) 1996 - 2001, Marek Michałkiewicz * Copyright (c) 2001 - 2006, Tomasz Kłoczko * Copyright (c) 2007 - 2011, Nicolas François * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the copyright holders or contributors may not be used to * endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <config.h> #ident "$Id$" #include "defines.h" #include <assert.h> #include <sys/stat.h> #include <stdlib.h> #include <limits.h> #include <utime.h> #include <fcntl.h> #include <errno.h> #include <stdio.h> #include <signal.h> #include "nscd.h" #ifdef WITH_TCB #include <tcb.h> #endif /* WITH_TCB */ #include "prototypes.h" #include "commonio.h" /* local function prototypes */ static int lrename (const char *, const char *); static int check_link_count (const char *file); static int do_lock_file (const char *file, const char *lock, bool log); static /*@null@*/ /*@dependent@*/FILE *fopen_set_perms ( const char *name, const char *mode, const struct stat *sb); static int create_backup (const char *, FILE *); static void free_linked_list (struct commonio_db *); static void add_one_entry ( struct commonio_db *db, /*@owned@*/struct commonio_entry *p); static bool name_is_nis (const char *name); static int write_all (const struct commonio_db *); static /*@dependent@*/ /*@null@*/struct commonio_entry *find_entry_by_name ( struct commonio_db *, const char *); static /*@dependent@*/ /*@null@*/struct commonio_entry *next_entry_by_name ( struct commonio_db *, /*@null@*/struct commonio_entry *pos, const char *); static int lock_count = 0; static bool nscd_need_reload = false; /* * Simple rename(P) alternative that attempts to rename to symlink * target. */ int lrename (const char *old, const char *new) { int res; char *r = NULL; #if defined(S_ISLNK) #ifndef __GLIBC__ char resolved_path[PATH_MAX]; #endif /* !__GLIBC__ */ struct stat sb; if (lstat (new, &sb) == 0 && S_ISLNK (sb.st_mode)) { #ifdef __GLIBC__ /* now a POSIX.1-2008 feature */ r = realpath (new, NULL); #else /* !__GLIBC__ */ r = realpath (new, resolved_path); #endif /* !__GLIBC__ */ if (NULL == r) { perror ("realpath in lrename()"); } else { new = r; } } #endif /* S_ISLNK */ res = rename (old, new); #ifdef __GLIBC__ if (NULL != r) { free (r); } #endif /* __GLIBC__ */ return res; } static int check_link_count (const char *file) { struct stat sb; if (stat (file, &sb) != 0) { return 0; } if (sb.st_nlink != 2) { return 0; } return 1; } static int do_lock_file (const char *file, const char *lock, bool log) { int fd; pid_t pid; ssize_t len; int retval; char buf[32]; fd = open (file, O_CREAT | O_EXCL | O_WRONLY, 0600); if (-1 == fd) { if (log) { (void) fprintf (stderr, "%s: %s: %s\n", Prog, file, strerror (errno)); } return 0; } pid = getpid (); snprintf (buf, sizeof buf, "%lu", (unsigned long) pid); len = (ssize_t) strlen (buf) + 1; if (write (fd, buf, (size_t) len) != len) { if (log) { (void) fprintf (stderr, "%s: %s: %s\n", Prog, file, strerror (errno)); } (void) close (fd); unlink (file); return 0; } close (fd); if (link (file, lock) == 0) { retval = check_link_count (file); if ((0==retval) && log) { (void) fprintf (stderr, "%s: %s: lock file already used\n", Prog, file); } unlink (file); return retval; } fd = open (lock, O_RDWR); if (-1 == fd) { if (log) { (void) fprintf (stderr, "%s: %s: %s\n", Prog, lock, strerror (errno)); } unlink (file); errno = EINVAL; return 0; } len = read (fd, buf, sizeof (buf) - 1); close (fd); if (len <= 0) { if (log) { (void) fprintf (stderr, "%s: existing lock file %s without a PID\n", Prog, lock); } unlink (file); errno = EINVAL; return 0; } buf[len] = '\0'; if (get_pid (buf, &pid) == 0) { if (log) { (void) fprintf (stderr, "%s: existing lock file %s with an invalid PID '%s'\n", Prog, lock, buf); } unlink (file); errno = EINVAL; return 0; } if (kill (pid, 0) == 0) { if (log) { (void) fprintf (stderr, "%s: lock %s already used by PID %lu\n", Prog, lock, (unsigned long) pid); } unlink (file); errno = EEXIST; return 0; } if (unlink (lock) != 0) { if (log) { (void) fprintf (stderr, "%s: cannot get lock %s: %s\n", Prog, lock, strerror (errno)); } unlink (file); return 0; } retval = 0; if (link (file, lock) == 0) { retval = check_link_count (file); if ((0==retval) && log) { (void) fprintf (stderr, "%s: %s: lock file already used\n", Prog, file); } } else { if (log) { (void) fprintf (stderr, "%s: cannot get lock %s: %s\n", Prog, lock, strerror (errno)); } } unlink (file); return retval; } static /*@null@*/ /*@dependent@*/FILE *fopen_set_perms ( const char *name, const char *mode, const struct stat *sb) { FILE *fp; mode_t mask; mask = umask (0777); fp = fopen (name, mode); (void) umask (mask); if (NULL == fp) { return NULL; } #ifdef HAVE_FCHOWN if (fchown (fileno (fp), sb->st_uid, sb->st_gid) != 0) { goto fail; } #else /* !HAVE_FCHOWN */ if (chown (name, sb->st_mode) != 0) { goto fail; } #endif /* !HAVE_FCHOWN */ #ifdef HAVE_FCHMOD if (fchmod (fileno (fp), sb->st_mode & 0664) != 0) { goto fail; } #else /* !HAVE_FCHMOD */ if (chmod (name, sb->st_mode & 0664) != 0) { goto fail; } #endif /* !HAVE_FCHMOD */ return fp; fail: (void) fclose (fp); /* fopen_set_perms is used for intermediate files */ (void) unlink (name); return NULL; } static int create_backup (const char *backup, FILE * fp) { struct stat sb; struct utimbuf ub; FILE *bkfp; int c; if (fstat (fileno (fp), &sb) != 0) { return -1; } bkfp = fopen_set_perms (backup, "w", &sb); if (NULL == bkfp) { return -1; } /* TODO: faster copy, not one-char-at-a-time. --marekm */ c = 0; if (fseek (fp, 0, SEEK_SET) == 0) { while ((c = getc (fp)) != EOF) { if (putc (c, bkfp) == EOF) { break; } } } if ((c != EOF) || (ferror (fp) != 0) || (fflush (bkfp) != 0)) { (void) fclose (bkfp); /* FIXME: unlink the backup file? */ return -1; } if ( (fsync (fileno (bkfp)) != 0) || (fclose (bkfp) != 0)) { /* FIXME: unlink the backup file? */ return -1; } ub.actime = sb.st_atime; ub.modtime = sb.st_mtime; (void) utime (backup, &ub); return 0; } static void free_linked_list (struct commonio_db *db) { struct commonio_entry *p; while (NULL != db->head) { p = db->head; db->head = p->next; if (NULL != p->line) { free (p->line); } if (NULL != p->eptr) { db->ops->free (p->eptr); } free (p); } db->tail = NULL; } int commonio_setname (struct commonio_db *db, const char *name) { snprintf (db->filename, sizeof (db->filename), "%s", name); return 1; } bool commonio_present (const struct commonio_db *db) { return (access (db->filename, F_OK) == 0); } int commonio_lock_nowait (struct commonio_db *db, bool log) { char file[1024]; char lock[1024]; if (db->locked) { return 1; } snprintf (file, sizeof file, "%s.%lu", db->filename, (unsigned long) getpid ()); snprintf (lock, sizeof lock, "%s.lock", db->filename); if (do_lock_file (file, lock, log) != 0) { db->locked = true; lock_count++; return 1; } return 0; } int commonio_lock (struct commonio_db *db) { #ifdef HAVE_LCKPWDF /* * only if the system libc has a real lckpwdf() - the one from * lockpw.c calls us and would cause infinite recursion! */ /* * Call lckpwdf() on the first lock. * If it succeeds, call *_lock() only once * (no retries, it should always succeed). */ if (0 == lock_count) { if (lckpwdf () == -1) { if (geteuid () != 0) { (void) fprintf (stderr, "%s: Permission denied.\n", Prog); } return 0; /* failure */ } } if (commonio_lock_nowait (db, true) != 0) { return 1; /* success */ } ulckpwdf (); return 0; /* failure */ #else /* !HAVE_LCKPWDF */ int i; /* * lckpwdf() not used - do it the old way. */ #ifndef LOCK_TRIES #define LOCK_TRIES 15 #endif #ifndef LOCK_SLEEP #define LOCK_SLEEP 1 #endif for (i = 0; i < LOCK_TRIES; i++) { if (i > 0) { sleep (LOCK_SLEEP); /* delay between retries */ } if (commonio_lock_nowait (db, i==LOCK_TRIES-1) != 0) { return 1; /* success */ } /* no unnecessary retries on "permission denied" errors */ if (geteuid () != 0) { (void) fprintf (stderr, "%s: Permission denied.\n", Prog); return 0; } } return 0; /* failure */ #endif /* !HAVE_LCKPWDF */ } static void dec_lock_count (void) { if (lock_count > 0) { lock_count--; if (lock_count == 0) { /* Tell nscd when lock count goes to zero, if any of the files were changed. */ if (nscd_need_reload) { nscd_flush_cache ("passwd"); nscd_flush_cache ("group"); nscd_need_reload = false; } #ifdef HAVE_LCKPWDF ulckpwdf (); #endif /* HAVE_LCKPWDF */ } } } int commonio_unlock (struct commonio_db *db) { char lock[1024]; if (db->isopen) { db->readonly = true; if (commonio_close (db) == 0) { if (db->locked) { dec_lock_count (); } return 0; } } if (db->locked) { /* * Unlock in reverse order: remove the lock file, * then call ulckpwdf() (if used) on last unlock. */ db->locked = false; snprintf (lock, sizeof lock, "%s.lock", db->filename); unlink (lock); dec_lock_count (); return 1; } return 0; } /* * Add an entry at the end. * * defines p->next, p->prev * (unfortunately, owned special are not supported) */ static void add_one_entry (struct commonio_db *db, /*@owned@*/struct commonio_entry *p) { /*@-mustfreeonly@*/ p->next = NULL; p->prev = db->tail; /*@=mustfreeonly@*/ if (NULL == db->head) { db->head = p; } if (NULL != db->tail) { db->tail->next = p; } db->tail = p; } static bool name_is_nis (const char *name) { return (('+' == name[0]) || ('-' == name[0])); } /* * New entries are inserted before the first NIS entry. Order is preserved * when db is written out. */ #ifndef KEEP_NIS_AT_END #define KEEP_NIS_AT_END 1 #endif #if KEEP_NIS_AT_END static void add_one_entry_nis (struct commonio_db *db, /*@owned@*/struct commonio_entry *newp); /* * Insert an entry between the regular entries, and the NIS entries. * * defines newp->next, newp->prev * (unfortunately, owned special are not supported) */ static void add_one_entry_nis (struct commonio_db *db, /*@owned@*/struct commonio_entry *newp) { struct commonio_entry *p; for (p = db->head; NULL != p; p = p->next) { if (name_is_nis (p->eptr ? db->ops->getname (p->eptr) : p->line)) { /*@-mustfreeonly@*/ newp->next = p; newp->prev = p->prev; /*@=mustfreeonly@*/ if (NULL != p->prev) { p->prev->next = newp; } else { db->head = newp; } p->prev = newp; return; } } add_one_entry (db, newp); } #endif /* KEEP_NIS_AT_END */ /* Initial buffer size, as well as increment if not sufficient (for reading very long lines in group files). */ #define BUFLEN 4096 int commonio_open (struct commonio_db *db, int mode) { char *buf; char *cp; char *line; struct commonio_entry *p; void *eptr = NULL; int flags = mode; size_t buflen; int fd; int saved_errno; mode &= ~O_CREAT; if ( db->isopen || ( (O_RDONLY != mode) && (O_RDWR != mode))) { errno = EINVAL; return 0; } db->readonly = (mode == O_RDONLY); if (!db->readonly && !db->locked) { errno = EACCES; return 0; } db->head = NULL; db->tail = NULL; db->cursor = NULL; db->changed = false; fd = open (db->filename, (db->readonly ? O_RDONLY : O_RDWR) | O_NOCTTY | O_NONBLOCK | O_NOFOLLOW); saved_errno = errno; db->fp = NULL; if (fd >= 0) { #ifdef WITH_TCB if (tcb_is_suspect (fd) != 0) { (void) close (fd); errno = EINVAL; return 0; } #endif /* WITH_TCB */ db->fp = fdopen (fd, db->readonly ? "r" : "r+"); saved_errno = errno; if (NULL == db->fp) { (void) close (fd); } } errno = saved_errno; /* * If O_CREAT was specified and the file didn't exist, it will be * created by commonio_close(). We have no entries to read yet. --marekm */ if (NULL == db->fp) { if (((flags & O_CREAT) != 0) && (ENOENT == errno)) { db->isopen = true; return 1; } return 0; } /* Do not inherit fd in spawned processes (e.g. nscd) */ fcntl (fileno (db->fp), F_SETFD, FD_CLOEXEC); buflen = BUFLEN; buf = (char *) malloc (buflen); if (NULL == buf) { goto cleanup_ENOMEM; } while (db->ops->fgets (buf, (int) buflen, db->fp) == buf) { while ( ((cp = strrchr (buf, '\n')) == NULL) && (feof (db->fp) == 0)) { size_t len; buflen += BUFLEN; cp = (char *) realloc (buf, buflen); if (NULL == cp) { goto cleanup_buf; } buf = cp; len = strlen (buf); if (db->ops->fgets (buf + len, (int) (buflen - len), db->fp) == NULL) { goto cleanup_buf; } } cp = strrchr (buf, '\n'); if (NULL != cp) { *cp = '\0'; } line = strdup (buf); if (NULL == line) { goto cleanup_buf; } if (name_is_nis (line)) { eptr = NULL; } else { eptr = db->ops->parse (line); if (NULL != eptr) { eptr = db->ops->dup (eptr); if (NULL == eptr) { goto cleanup_line; } } } p = (struct commonio_entry *) malloc (sizeof *p); if (NULL == p) { goto cleanup_entry; } p->eptr = eptr; p->line = line; p->changed = false; add_one_entry (db, p); } free (buf); if (ferror (db->fp) != 0) { goto cleanup_errno; } if ((NULL != db->ops->open_hook) && (db->ops->open_hook () == 0)) { goto cleanup_errno; } db->isopen = true; return 1; cleanup_entry: if (NULL != eptr) { db->ops->free (eptr); } cleanup_line: free (line); cleanup_buf: free (buf); cleanup_ENOMEM: errno = ENOMEM; cleanup_errno: saved_errno = errno; free_linked_list (db); fclose (db->fp); db->fp = NULL; errno = saved_errno; return 0; } /* * Sort given db according to cmp function (usually compares uids) */ int commonio_sort (struct commonio_db *db, int (*cmp) (const void *, const void *)) { struct commonio_entry **entries, *ptr; size_t n = 0, i; #if KEEP_NIS_AT_END struct commonio_entry *nis = NULL; #endif for (ptr = db->head; (NULL != ptr) #if KEEP_NIS_AT_END && ((NULL == ptr->line) || (('+' != ptr->line[0]) && ('-' != ptr->line[0]))) #endif ; ptr = ptr->next) { n++; } #if KEEP_NIS_AT_END if (NULL != ptr) { nis = ptr; } #endif if (n <= 1) { return 0; } entries = malloc (n * sizeof (struct commonio_entry *)); if (entries == NULL) { return -1; } n = 0; for (ptr = db->head; #if KEEP_NIS_AT_END nis != ptr; #else NULL != ptr; #endif /*@ -nullderef @*/ ptr = ptr->next /*@ +nullderef @*/ ) { entries[n] = ptr; n++; } qsort (entries, n, sizeof (struct commonio_entry *), cmp); /* Take care of the head and tail separately */ db->head = entries[0]; n--; #if KEEP_NIS_AT_END if (NULL == nis) #endif { db->tail = entries[n]; } db->head->prev = NULL; db->head->next = entries[1]; entries[n]->prev = entries[n - 1]; #if KEEP_NIS_AT_END entries[n]->next = nis; #else entries[n]->next = NULL; #endif /* Now other elements have prev and next entries */ for (i = 1; i < n; i++) { entries[i]->prev = entries[i - 1]; entries[i]->next = entries[i + 1]; } free (entries); db->changed = true; return 0; } /* * Sort entries in db according to order in another. */ int commonio_sort_wrt (struct commonio_db *shadow, const struct commonio_db *passwd) { struct commonio_entry *head = NULL, *pw_ptr, *spw_ptr; const char *name; if ((NULL == shadow) || (NULL == shadow->head)) { return 0; } for (pw_ptr = passwd->head; NULL != pw_ptr; pw_ptr = pw_ptr->next) { if (NULL == pw_ptr->eptr) { continue; } name = passwd->ops->getname (pw_ptr->eptr); for (spw_ptr = shadow->head; NULL != spw_ptr; spw_ptr = spw_ptr->next) { if (NULL == spw_ptr->eptr) { continue; } if (strcmp (name, shadow->ops->getname (spw_ptr->eptr)) == 0) { break; } } if (NULL == spw_ptr) { continue; } commonio_del_entry (shadow, spw_ptr); spw_ptr->next = head; head = spw_ptr; } for (spw_ptr = head; NULL != spw_ptr; spw_ptr = head) { head = head->next; if (NULL != shadow->head) { shadow->head->prev = spw_ptr; } spw_ptr->next = shadow->head; shadow->head = spw_ptr; } shadow->head->prev = NULL; shadow->changed = true; return 0; } /* * write_all - Write the database to its file. * * It returns 0 if all the entries could be written correctly. */ static int write_all (const struct commonio_db *db) /*@requires notnull db->fp@*/ { const struct commonio_entry *p; void *eptr; for (p = db->head; NULL != p; p = p->next) { if (p->changed) { eptr = p->eptr; assert (NULL != eptr); if (db->ops->put (eptr, db->fp) != 0) { return -1; } } else if (NULL != p->line) { if (db->ops->fputs (p->line, db->fp) == EOF) { return -1; } if (putc ('\n', db->fp) == EOF) { return -1; } } } return 0; } int commonio_close (struct commonio_db *db) /*@requires notnull db->fp@*/ { char buf[1024]; int errors = 0; struct stat sb; if (!db->isopen) { errno = EINVAL; return 0; } db->isopen = false; if (!db->changed || db->readonly) { (void) fclose (db->fp); db->fp = NULL; goto success; } if ((NULL != db->ops->close_hook) && (db->ops->close_hook () == 0)) { goto fail; } memzero (&sb, sizeof sb); if (NULL != db->fp) { if (fstat (fileno (db->fp), &sb) != 0) { (void) fclose (db->fp); db->fp = NULL; goto fail; } /* * Create backup file. */ snprintf (buf, sizeof buf, "%s-", db->filename); #ifdef WITH_SELINUX if (set_selinux_file_context (buf) != 0) { errors++; } #endif if (create_backup (buf, db->fp) != 0) { errors++; } if (fclose (db->fp) != 0) { errors++; } #ifdef WITH_SELINUX if (reset_selinux_file_context () != 0) { errors++; } #endif if (errors != 0) { db->fp = NULL; goto fail; } } else { /* * Default permissions for new [g]shadow files. */ sb.st_mode = db->st_mode; sb.st_uid = db->st_uid; sb.st_gid = db->st_gid; } snprintf (buf, sizeof buf, "%s+", db->filename); #ifdef WITH_SELINUX if (set_selinux_file_context (buf) != 0) { errors++; } #endif db->fp = fopen_set_perms (buf, "w", &sb); if (NULL == db->fp) { goto fail; } if (write_all (db) != 0) { errors++; } if (fflush (db->fp) != 0) { errors++; } #ifdef HAVE_FSYNC if (fsync (fileno (db->fp)) != 0) { errors++; } #else /* !HAVE_FSYNC */ sync (); #endif /* !HAVE_FSYNC */ if (fclose (db->fp) != 0) { errors++; } db->fp = NULL; if (errors != 0) { unlink (buf); goto fail; } if (lrename (buf, db->filename) != 0) { goto fail; } #ifdef WITH_SELINUX if (reset_selinux_file_context () != 0) { goto fail; } #endif nscd_need_reload = true; goto success; fail: errors++; success: free_linked_list (db); return errors == 0; } static /*@dependent@*/ /*@null@*/struct commonio_entry *next_entry_by_name ( struct commonio_db *db, /*@null@*/struct commonio_entry *pos, const char *name) { struct commonio_entry *p; void *ep; if (NULL == pos) { return NULL; } for (p = pos; NULL != p; p = p->next) { ep = p->eptr; if ( (NULL != ep) && (strcmp (db->ops->getname (ep), name) == 0)) { break; } } return p; } static /*@dependent@*/ /*@null@*/struct commonio_entry *find_entry_by_name ( struct commonio_db *db, const char *name) { return next_entry_by_name (db, db->head, name); } int commonio_update (struct commonio_db *db, const void *eptr) { struct commonio_entry *p; void *nentry; if (!db->isopen || db->readonly) { errno = EINVAL; return 0; } nentry = db->ops->dup (eptr); if (NULL == nentry) { errno = ENOMEM; return 0; } p = find_entry_by_name (db, db->ops->getname (eptr)); if (NULL != p) { if (next_entry_by_name (db, p->next, db->ops->getname (eptr)) != NULL) { fprintf (stderr, _("Multiple entries named '%s' in %s. Please fix this with pwck or grpck.\n"), db->ops->getname (eptr), db->filename); db->ops->free (nentry); return 0; } db->ops->free (p->eptr); p->eptr = nentry; p->changed = true; db->cursor = p; db->changed = true; return 1; } /* not found, new entry */ p = (struct commonio_entry *) malloc (sizeof *p); if (NULL == p) { db->ops->free (nentry); errno = ENOMEM; return 0; } p->eptr = nentry; p->line = NULL; p->changed = true; #if KEEP_NIS_AT_END add_one_entry_nis (db, p); #else /* !KEEP_NIS_AT_END */ add_one_entry (db, p); #endif /* !KEEP_NIS_AT_END */ db->changed = true; return 1; } #ifdef ENABLE_SUBIDS int commonio_append (struct commonio_db *db, const void *eptr) { struct commonio_entry *p; void *nentry; if (!db->isopen || db->readonly) { errno = EINVAL; return 0; } nentry = db->ops->dup (eptr); if (NULL == nentry) { errno = ENOMEM; return 0; } /* new entry */ p = (struct commonio_entry *) malloc (sizeof *p); if (NULL == p) { db->ops->free (nentry); errno = ENOMEM; return 0; } p->eptr = nentry; p->line = NULL; p->changed = true; add_one_entry (db, p); db->changed = true; return 1; } #endif /* ENABLE_SUBIDS */ void commonio_del_entry (struct commonio_db *db, const struct commonio_entry *p) { if (p == db->cursor) { db->cursor = p->next; } if (NULL != p->prev) { p->prev->next = p->next; } else { db->head = p->next; } if (NULL != p->next) { p->next->prev = p->prev; } else { db->tail = p->prev; } db->changed = true; } /* * commonio_remove - Remove the entry of the given name from the database. */ int commonio_remove (struct commonio_db *db, const char *name) { struct commonio_entry *p; if (!db->isopen || db->readonly) { errno = EINVAL; return 0; } p = find_entry_by_name (db, name); if (NULL == p) { errno = ENOENT; return 0; } if (next_entry_by_name (db, p->next, name) != NULL) { fprintf (stderr, _("Multiple entries named '%s' in %s. Please fix this with pwck or grpck.\n"), name, db->filename); return 0; } commonio_del_entry (db, p); if (NULL != p->line) { free (p->line); } if (NULL != p->eptr) { db->ops->free (p->eptr); } return 1; } /* * commonio_locate - Find the first entry with the specified name in * the database. * * If found, it returns the entry and set the cursor of the database to * that entry. * * Otherwise, it returns NULL. */ /*@observer@*/ /*@null@*/const void *commonio_locate (struct commonio_db *db, const char *name) { struct commonio_entry *p; if (!db->isopen) { errno = EINVAL; return NULL; } p = find_entry_by_name (db, name); if (NULL == p) { errno = ENOENT; return NULL; } db->cursor = p; return p->eptr; } /* * commonio_rewind - Restore the database cursor to the first entry. * * It returns 0 on error, 1 on success. */ int commonio_rewind (struct commonio_db *db) { if (!db->isopen) { errno = EINVAL; return 0; } db->cursor = NULL; return 1; } /* * commonio_next - Return the next entry of the specified database * * It returns the next entry, or NULL if no other entries could be found. */ /*@observer@*/ /*@null@*/const void *commonio_next (struct commonio_db *db) { void *eptr; if (!db->isopen) { errno = EINVAL; return 0; } if (NULL == db->cursor) { db->cursor = db->head; } else { db->cursor = db->cursor->next; } while (NULL != db->cursor) { eptr = db->cursor->eptr; if (NULL != eptr) { return eptr; } db->cursor = db->cursor->next; } return NULL; }
commonio_sort (struct commonio_db *db, int (*cmp) (const void *, const void *)) { struct commonio_entry **entries, *ptr; size_t n = 0, i; #if KEEP_NIS_AT_END struct commonio_entry *nis = NULL; #endif for (ptr = db->head; (NULL != ptr) #if KEEP_NIS_AT_END && (NULL != ptr->line) && ( ('+' != ptr->line[0]) && ('-' != ptr->line[0])) #endif ; ptr = ptr->next) { n++; } #if KEEP_NIS_AT_END if ((NULL != ptr) && (NULL != ptr->line)) { nis = ptr; } #endif if (n <= 1) { return 0; } entries = malloc (n * sizeof (struct commonio_entry *)); if (entries == NULL) { return -1; } n = 0; for (ptr = db->head; #if KEEP_NIS_AT_END nis != ptr; #else NULL != ptr; #endif /*@ -nullderef @*/ ptr = ptr->next /*@ +nullderef @*/ ) { entries[n] = ptr; n++; } qsort (entries, n, sizeof (struct commonio_entry *), cmp); /* Take care of the head and tail separately */ db->head = entries[0]; n--; #if KEEP_NIS_AT_END if (NULL == nis) #endif { db->tail = entries[n]; } db->head->prev = NULL; db->head->next = entries[1]; entries[n]->prev = entries[n - 1]; #if KEEP_NIS_AT_END entries[n]->next = nis; #else entries[n]->next = NULL; #endif /* Now other elements have prev and next entries */ for (i = 1; i < n; i++) { entries[i]->prev = entries[i - 1]; entries[i]->next = entries[i + 1]; } free (entries); db->changed = true; return 0; }
commonio_sort (struct commonio_db *db, int (*cmp) (const void *, const void *)) { struct commonio_entry **entries, *ptr; size_t n = 0, i; #if KEEP_NIS_AT_END struct commonio_entry *nis = NULL; #endif for (ptr = db->head; (NULL != ptr) #if KEEP_NIS_AT_END && ((NULL == ptr->line) || (('+' != ptr->line[0]) && ('-' != ptr->line[0]))) #endif ; ptr = ptr->next) { n++; } #if KEEP_NIS_AT_END if (NULL != ptr) { nis = ptr; } #endif if (n <= 1) { return 0; } entries = malloc (n * sizeof (struct commonio_entry *)); if (entries == NULL) { return -1; } n = 0; for (ptr = db->head; #if KEEP_NIS_AT_END nis != ptr; #else NULL != ptr; #endif /*@ -nullderef @*/ ptr = ptr->next /*@ +nullderef @*/ ) { entries[n] = ptr; n++; } qsort (entries, n, sizeof (struct commonio_entry *), cmp); /* Take care of the head and tail separately */ db->head = entries[0]; n--; #if KEEP_NIS_AT_END if (NULL == nis) #endif { db->tail = entries[n]; } db->head->prev = NULL; db->head->next = entries[1]; entries[n]->prev = entries[n - 1]; #if KEEP_NIS_AT_END entries[n]->next = nis; #else entries[n]->next = NULL; #endif /* Now other elements have prev and next entries */ for (i = 1; i < n; i++) { entries[i]->prev = entries[i - 1]; entries[i]->next = entries[i + 1]; } free (entries); db->changed = true; return 0; }
{'added': [(754, '\t && ((NULL == ptr->line)'), (755, "\t || (('+' != ptr->line[0])"), (756, "\t && ('-' != ptr->line[0])))"), (763, '\tif (NULL != ptr) {')], 'deleted': [(754, '\t && (NULL != ptr->line)'), (755, "\t && ( ('+' != ptr->line[0])"), (756, "\t && ('-' != ptr->line[0]))"), (763, '\tif ((NULL != ptr) && (NULL != ptr->line)) {')]}
4
4
910
5,122
53
353
18
https://github.com/shadow-maint/shadow
CVE-2017-12424
CWE-119
2,786
jsiArray.c
C
jsi_ArrayShiftCmd
#ifndef JSI_LITE_ONLY #ifndef JSI_AMALGAMATION #include "jsiInt.h" #endif #if JSI__MUSL==1 || defined(__FreeBSD__) #define NO_QSORT_R 1 #endif static Jsi_RC jsi_ArrayPushCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this, Jsi_Value **ret, Jsi_Func *funcPtr) { Jsi_Obj *obj; if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) { Jsi_ValueMakeNumber(interp, ret, 0); return JSI_OK; } obj = _this->d.obj; int argc = Jsi_ValueGetLength(interp, args); int curlen = Jsi_ObjGetLength(interp, obj); if (curlen < 0) { Jsi_ObjSetLength(interp, obj, 0); } int i; for (i = 0; i < argc; ++i) { Jsi_Value *ov = Jsi_ValueArrayIndex(interp, args, i); if (!ov) { Jsi_LogBug("Arguments Error"); ov = Jsi_ValueNew(interp); } Jsi_ValueInsertArray(interp, _this, curlen + i, ov, 0); } Jsi_ValueMakeNumber(interp, ret, Jsi_ObjGetLength(interp, obj)); return JSI_OK; } static Jsi_RC jsi_ArrayPopCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this, Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) { Jsi_ValueMakeNumber(interp, ret, 0); return JSI_OK; } Jsi_Value *v; Jsi_Obj *obj; obj = _this->d.obj; int i = Jsi_ObjGetLength(interp, obj) - 1; if (i < 0) { Jsi_ValueMakeUndef(interp, ret); return JSI_OK; } if (obj->arr) { if ((v = obj->arr[i])) { obj->arr[i] = NULL; obj->arrCnt--; } } else { v = Jsi_ValueArrayIndex(interp, _this, i); } if (v) { Jsi_DecrRefCount(interp, *ret); *ret = v; } Jsi_ObjSetLength(interp, obj, i); return JSI_OK; } static Jsi_RC jsi_ArrayJoinCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this, Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); const char *jstr = ""; int argc, curlen; Jsi_DString dStr = {}; curlen = Jsi_ObjGetLength(interp, _this->d.obj); if (curlen == 0) { goto bail; } if (Jsi_ValueGetLength(interp, args) >= 1) { Jsi_Value *sc = Jsi_ValueArrayIndex(interp, args, 0); if (sc != NULL) jstr = Jsi_ValueToString(interp, sc, NULL); } if (0 == (argc=Jsi_ObjGetLength(interp, _this->d.obj))) { goto bail; } int i; for (i = 0; i < argc; ++i) { const char *cp; Jsi_Value *ov = Jsi_ValueArrayIndex(interp, _this, i); if (!ov) { /* TODO: are NULL args ok? */ continue; cp = ""; } else cp = Jsi_ValueToString(interp, ov, NULL); if (i && jstr[0]) Jsi_DSAppend(&dStr, jstr, NULL); Jsi_DSAppend(&dStr, cp, NULL); } Jsi_ValueMakeStringDup(interp, ret, Jsi_DSValue(&dStr)); Jsi_DSFree(&dStr); return JSI_OK; bail: Jsi_ValueMakeStringDup(interp, ret, ""); return JSI_OK; } Jsi_Value* Jsi_ValueArrayConcat(Jsi_Interp *interp, Jsi_Value *arg1, Jsi_Value *arg2) { Jsi_Value *va; Jsi_Obj *obj; if (arg1->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, arg1->d.obj)) { return NULL; } if (arg2->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, arg2->d.obj)) { return NULL; } int len1 = arg1->d.obj->arrCnt; int len2 = arg2->d.obj->arrCnt; Jsi_Obj *nobj = Jsi_ObjNewType(interp, JSI_OT_ARRAY); Jsi_ObjArraySizer(interp, nobj, len1+len2); int i, j = 0; obj = arg1->d.obj; for (i = 0; i<len1; i++, j++) { if (!obj->arr[i]) continue; nobj->arr[j] = NULL; Jsi_ValueDup2(interp, nobj->arr+j, obj->arr[i]); } obj = arg2->d.obj; for (i = 0; i<len2; i++, j++) { if (!obj->arr[i]) continue; nobj->arr[j] = NULL; Jsi_ValueDup2(interp, nobj->arr+j, obj->arr[i]); } Jsi_ObjSetLength(interp, nobj, len1+len2); va = Jsi_ValueMakeArrayObject(interp, NULL, nobj); return va; } Jsi_RC Jsi_ValueArrayPush(Jsi_Interp *interp, Jsi_Value *arg1, Jsi_Value *arg2) { Jsi_Obj *obj; if (arg1->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, arg1->d.obj)) return JSI_ERROR; if (!arg2) return JSI_ERROR; int len1 = arg1->d.obj->arrCnt; obj = arg1->d.obj; Jsi_ObjArraySizer(interp, obj, len1); obj->arr[len1] = arg2; Jsi_IncrRefCount(interp, arg2); obj->arrCnt++; return JSI_OK; } Jsi_Value *Jsi_ValueArrayPop(Jsi_Interp *interp, Jsi_Value *v) { if (v->vt != JSI_VT_OBJECT) { Jsi_LogBug("Jsi_ValueArrayPop, target is not object"); return NULL; } Jsi_Obj *o = v->d.obj; if (!o->isarrlist) { Jsi_LogBug("Jsi_ValueArrayPop, target is not array"); return NULL; } if (o->arrCnt<=0) return NULL; int idx = o->arrCnt-1; if (!o->arr[idx]) return NULL; Jsi_DecrRefCount(interp, o->arr[idx]); Jsi_Value *ret = o->arr[idx]; o->arr[idx] = NULL; o->arrCnt--; return ret; } Jsi_Value *Jsi_ValueArrayUnshift(Jsi_Interp *interp, Jsi_Value *v) { if (v->vt != JSI_VT_OBJECT) { Jsi_LogBug("Jsi_ValueArrayUnshift, target is not object"); return NULL; } Jsi_Obj *o = v->d.obj; if (!o->isarrlist) { Jsi_LogBug("Jsi_ValueArrayUnshift, target is not array"); return NULL; } if (o->arrCnt<=0) return NULL; if (!o->arr[0]) return NULL; Jsi_DecrRefCount(interp, o->arr[0]); Jsi_Value *ret = o->arr[0]; o->arr[0] = NULL; o->arrCnt--; return ret; } /* delete array[0], array[1]->array[0] */ void Jsi_ValueArrayShift(Jsi_Interp *interp, Jsi_Value *v) { if (v->vt != JSI_VT_OBJECT) { Jsi_LogBug("Jsi_ValueArrayShift, target is not object"); return; } Jsi_Obj *o = v->d.obj; if (o->isarrlist) { uint i; if (!o->arrCnt) return; if (o->arr[0]) Jsi_DecrRefCount(interp, o->arr[0]); for (i=1; i<o->arrCnt; i++) { o->arr[i-1] = o->arr[i]; } o->arr[o->arrCnt--] = NULL; return; } int len = Jsi_ObjGetLength(interp, v->d.obj); if (len <= 0) return; Jsi_Value *v0 = Jsi_ValueArrayIndex(interp, v, 0); if (!v0) return; Jsi_ValueReset(interp, &v0); int i; Jsi_Value *last = v0; for (i = 1; i < len; ++i) { Jsi_Value *t = Jsi_ValueArrayIndex(interp, v, i); if (!t) return; Jsi_ValueCopy(interp, last, t); Jsi_ValueReset(interp, &t); last = t; } Jsi_ObjSetLength(interp, v->d.obj, len - 1); } static Jsi_RC jsi_ArrayFlatSub(Jsi_Interp *interp, Jsi_Obj* nobj, Jsi_Value *arr, int depth) { int i, n = 0, len = Jsi_ObjGetLength(interp, arr->d.obj); if (len <= 0) return JSI_OK; Jsi_RC rc = JSI_OK; int clen = Jsi_ObjGetLength(interp, nobj); for (i = 0; i < len && rc == JSI_OK; i++) { Jsi_Value *t = Jsi_ValueArrayIndex(interp, arr, i); if (t && depth>0 && Jsi_ValueIsArray(interp, t)) rc = jsi_ArrayFlatSub(interp, nobj, t , depth-1); else if (!Jsi_ValueIsUndef(interp, t)) Jsi_ObjArrayAdd(interp, nobj, t); if ((++n + clen)>interp->maxArrayList) return Jsi_LogError("array size exceeded"); } return rc; } static Jsi_RC jsi_ArrayFlatCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_Number ndepth = 1; Jsi_Obj *nobj; Jsi_Value *depth = Jsi_ValueArrayIndex(interp, args, 0); if (depth && Jsi_GetNumberFromValue(interp,depth, &ndepth) != JSI_OK) return JSI_ERROR; if (ndepth < 0 || ndepth>1000) return Jsi_LogError("bad depth: %d", (int)ndepth); nobj = Jsi_ObjNewType(interp, JSI_OT_ARRAY); Jsi_ValueMakeArrayObject(interp, ret, nobj ); if (ndepth>0) return jsi_ArrayFlatSub(interp, nobj, _this, ndepth); return JSI_OK; } static Jsi_RC jsi_ArrayConcatCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_RC rc = JSI_OK; int curlen, argc, nsiz; Jsi_Obj *obj, *nobj; Jsi_Value *va; obj = _this->d.obj; argc = Jsi_ValueGetLength(interp, args); curlen = Jsi_ObjGetLength(interp, obj); if (curlen < 0) { Jsi_ObjSetLength(interp, obj, 0); } Jsi_ObjListifyArray(interp, obj); nobj = Jsi_ObjNewType(interp, JSI_OT_ARRAY); nsiz = obj->arrMaxSize; if (nsiz<=0) nsiz = 100; if (Jsi_ObjArraySizer(interp, nobj, nsiz+1) <= 0) { rc = JSI_ERROR; Jsi_LogError("index too large: %d", nsiz+1); goto bail; } int i, j, m; for (i = 0; i<curlen; i++) { if (!obj->arr[i]) continue; nobj->arr[i] = NULL; Jsi_ValueDup2(interp, nobj->arr+i, obj->arr[i]); } m = i; for (i = 0; i < argc; i++) { va = Jsi_ValueArrayIndex(interp, args, i); if (va->vt == JSI_VT_OBJECT && Jsi_ObjIsArray(interp, va->d.obj)) { int margc = Jsi_ValueGetLength(interp, va); Jsi_Obj *mobj = va->d.obj; Jsi_ObjListifyArray(interp, mobj); if (Jsi_ObjArraySizer(interp, nobj, curlen += margc) <= 0) { rc = JSI_ERROR; Jsi_LogError("index too large: %d", curlen); goto bail; } for (j = 0; j<margc; j++, m++) { if (!mobj->arr[j]) continue; nobj->arr[m] = NULL; Jsi_ValueDup2(interp, nobj->arr+m, mobj->arr[j]); } } else { if (Jsi_ObjArraySizer(interp, nobj, ++curlen) <= 0) { rc = JSI_ERROR; Jsi_LogError("index too large: %d", curlen); goto bail; } nobj->arr[m] = NULL; Jsi_ValueDup2(interp, nobj->arr+m++, va); } } Jsi_ObjSetLength(interp, nobj, curlen); Jsi_ValueMakeArrayObject(interp, ret, nobj); return JSI_OK; bail: Jsi_ValueMakeNull(interp, ret); return rc; } static Jsi_RC jsi_ArrayMapCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_RC rc = JSI_OK; int curlen, nsiz, i, maa = 0; Jsi_Obj *obj, *nobj; Jsi_Value *func, *vpargs, *nthis = NULL, *sthis; Jsi_Func *fptr = NULL; func = Jsi_ValueArrayIndex(interp, args, 0); if (!Jsi_ValueIsFunction(interp, func)) return Jsi_LogError("expected function"); sthis = Jsi_ValueArrayIndex(interp, args, 1); if (!sthis) sthis = nthis = Jsi_ValueNew1(interp); obj = _this->d.obj; curlen = Jsi_ObjGetLength(interp, obj); if (curlen < 0) { Jsi_ObjSetLength(interp, obj, 0); } Jsi_ObjListifyArray(interp, obj); nobj = Jsi_ObjNewType(interp, JSI_OT_ARRAY); nsiz = obj->arrCnt; if (nsiz<=0) nsiz = 1; if (Jsi_ObjArraySizer(interp, nobj, nsiz) <= 0) { Jsi_LogError("index too large: %d", nsiz); rc = JSI_ERROR; goto bail; } Jsi_ValueMakeArrayObject(interp, ret, nobj); Jsi_Value *vobjs[3]; fptr = func->d.obj->d.fobj->func; maa = (fptr->argnames?fptr->argnames->argCnt:0); if (maa>3) maa = 3; for (i = 0; i < curlen; i++) { if (!obj->arr[i]) continue; vobjs[0] = obj->arr[i]; vobjs[1] = (maa>1?Jsi_ValueNewNumber(interp, i):NULL); vobjs[2] = _this; vpargs = Jsi_ValueMakeObject(interp, NULL, Jsi_ObjNewArray(interp, vobjs, maa, 0)); Jsi_IncrRefCount(interp, vpargs); nobj->arr[i] = Jsi_ValueNew1(interp); rc = Jsi_FunctionInvoke(interp, func, vpargs, nobj->arr+i, sthis); Jsi_DecrRefCount(interp, vpargs); if( JSI_OK!=rc ) { goto bail; } } Jsi_ObjSetLength(interp, nobj, curlen); if (nthis) Jsi_DecrRefCount(interp, nthis); return JSI_OK; bail: Jsi_ValueMakeNull(interp, ret); if (nthis) Jsi_DecrRefCount(interp, nthis); return rc; } static Jsi_RC jsi_ArrayFilterCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_RC rc = JSI_OK; int curlen, nsiz, i, fval, n = 0, maa = 0; Jsi_Obj *obj, *nobj; Jsi_Value *func, *vpargs, *nthis = NULL, *sthis, *nrPtr = NULL; Jsi_Func *fptr = NULL; func = Jsi_ValueArrayIndex(interp, args, 0); if (!Jsi_ValueIsFunction(interp, func)) return Jsi_LogError("expected function"); sthis = Jsi_ValueArrayIndex(interp, args, 1); if (!sthis) sthis = nthis = Jsi_ValueNew1(interp); obj = _this->d.obj; curlen = Jsi_ObjGetLength(interp, obj); if (curlen < 0) { Jsi_ObjSetLength(interp, obj, 0); } Jsi_ObjListifyArray(interp, obj); nobj = Jsi_ObjNewType(interp, JSI_OT_ARRAY); nsiz = obj->arrCnt; if (nsiz<=0) nsiz = 1; if (Jsi_ObjArraySizer(interp, nobj, nsiz) <= 0) { Jsi_LogError("index too large: %d", nsiz); rc = JSI_ERROR; goto bail; } Jsi_ValueMakeArrayObject(interp, ret, nobj); nrPtr = Jsi_ValueNew1(interp); Jsi_Value *vobjs[4]; fptr = func->d.obj->d.fobj->func; maa = (fptr->argnames?fptr->argnames->argCnt:0); if (maa>3) maa = 3; for (i = 0; i < curlen; i++) { if (!obj->arr[i]) continue; vobjs[0] = obj->arr[i]; vobjs[1] = (maa>1?Jsi_ValueNewNumber(interp, i):NULL); vobjs[2] = _this; vpargs = Jsi_ValueMakeObject(interp, NULL, Jsi_ObjNewArray(interp, vobjs, maa, 0)); Jsi_IncrRefCount(interp, vpargs); rc = Jsi_FunctionInvoke(interp, func, vpargs, &nrPtr, sthis); Jsi_DecrRefCount(interp, vpargs); fval = Jsi_ValueIsTrue(interp, nrPtr); Jsi_ValueMakeUndef(interp, &nrPtr); if( JSI_OK!=rc ) { goto bail; } if (fval) { nobj->arr[n++] = obj->arr[i]; Jsi_IncrRefCount(interp, obj->arr[i]); } } if (nthis) Jsi_DecrRefCount(interp, nthis); Jsi_DecrRefCount(interp, nrPtr); Jsi_ObjSetLength(interp, nobj, n); return JSI_OK; bail: if (nthis) Jsi_DecrRefCount(interp, nthis); if (nrPtr) Jsi_DecrRefCount(interp, nrPtr); Jsi_ValueMakeNull(interp, ret); return rc; } static Jsi_RC jsi_ArrayReverseCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this, Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array"); int i, n, m; Jsi_Obj *obj; Jsi_Value *tval, *nthis = NULL, *sthis = Jsi_ValueArrayIndex(interp, args, 1); if (!sthis) sthis = nthis = Jsi_ValueNew1(interp); obj = _this->d.obj; Jsi_ObjListifyArray(interp, obj); m = obj->arrCnt/2; for (i = 0, n=obj->arrCnt-1; i < m; i++, n--) { tval = obj->arr[i]; obj->arr[i] = obj->arr[n]; obj->arr[n] = tval; } Jsi_ValueDup2(interp, ret, _this); if (nthis) Jsi_DecrRefCount(interp, nthis); return JSI_OK; } static Jsi_RC jsi_ArrayForeachCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_Obj *obj; int curlen; uint i; Jsi_Value *func, *vpargs; func = Jsi_ValueArrayIndex(interp, args, 0); if (!Jsi_ValueIsFunction(interp, func)) return Jsi_LogError("expected function"); Jsi_Value *sthis = Jsi_ValueArrayIndex(interp, args, 1); Jsi_Value *nthis = NULL; if (!sthis) sthis = nthis = Jsi_ValueNew1(interp); obj = _this->d.obj; curlen = Jsi_ObjGetLength(interp, obj); if (curlen < 0) { Jsi_ObjSetLength(interp, obj, 0); } Jsi_ObjListifyArray(interp, obj); Jsi_RC rc = JSI_OK; Jsi_Value *vobjs[3]; Jsi_Func *fptr = func->d.obj->d.fobj->func; int maa = (fptr->argnames?fptr->argnames->argCnt:0); if (maa>3) maa = 3; for (i = 0; i < obj->arrCnt && rc == JSI_OK; i++) { if (!obj->arr[i]) continue; vobjs[0] = obj->arr[i]; vobjs[1] = (maa>1?Jsi_ValueNewNumber(interp, i):NULL); vobjs[2] = _this; vpargs = Jsi_ValueMakeObject(interp, NULL, Jsi_ObjNewArray(interp, vobjs, maa, 0)); Jsi_IncrRefCount(interp, vpargs); rc = Jsi_FunctionInvoke(interp, func, vpargs, ret, sthis); Jsi_DecrRefCount(interp, vpargs); } if (nthis) Jsi_DecrRefCount(interp, nthis); return rc; } static Jsi_RC jsi_ArrayFindSubCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr, int op) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array"); Jsi_Obj *obj; int curlen; uint i; Jsi_RC rc = JSI_OK; Jsi_Value *func, *vpargs, *sthis = Jsi_ValueArrayIndex(interp, args, 1); func = Jsi_ValueArrayIndex(interp, args, 0); if (!Jsi_ValueIsFunction(interp, func)) return Jsi_LogError("expected function"); Jsi_Value *nthis = NULL; if (!sthis) sthis = nthis = Jsi_ValueNew1(interp); obj = _this->d.obj; curlen = Jsi_ObjGetLength(interp, obj); if (curlen < 0) { Jsi_ObjSetLength(interp, obj, 0); } Jsi_ObjListifyArray(interp, obj); int fval = 0; Jsi_Value *nrPtr = Jsi_ValueNew1(interp); Jsi_Value *vobjs[3]; Jsi_Func *fptr = func->d.obj->d.fobj->func; int maa = (fptr->argnames?fptr->argnames->argCnt:0); if (maa>3) maa = 3; for (i = 0; i < obj->arrCnt && rc == JSI_OK; i++) { if (!obj->arr[i]) continue; vobjs[0] = obj->arr[i]; vobjs[1] = (maa>1?Jsi_ValueNewNumber(interp, i):NULL); vobjs[2] = _this; vpargs = Jsi_ValueMakeObject(interp, NULL, Jsi_ObjNewArray(interp, vobjs, maa, 0)); Jsi_IncrRefCount(interp, vpargs); rc = Jsi_FunctionInvoke(interp, func, vpargs, &nrPtr, sthis); Jsi_DecrRefCount(interp, vpargs); if (rc != JSI_OK) break; fval = Jsi_ValueIsTrue(interp, nrPtr); Jsi_ValueMakeUndef(interp, &nrPtr); if (op == 3) { if (!fval) break; } else if (fval) break; } if (rc == JSI_OK) { if (op == 1 && fval) // Find Jsi_ValueCopy(interp, *ret, obj->arr[i]); else if (op == 2 || op == 3) // Some/Every Jsi_ValueMakeBool(interp, ret, fval); else if (op == 4) Jsi_ValueMakeNumber(interp, ret, (Jsi_Number)(fval?(int)i:-1)); } if (nthis) Jsi_DecrRefCount(interp, nthis); Jsi_DecrRefCount(interp, nrPtr); return rc; } static Jsi_RC jsi_ArrayReduceSubCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr, int op) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array"); Jsi_RC rc = JSI_OK; int curlen, i; Jsi_Obj *obj; Jsi_Value *func, *vpargs, *ini = Jsi_ValueArrayIndex(interp, args, 1); func = Jsi_ValueArrayIndex(interp, args, 0); if (!Jsi_ValueIsFunction(interp, func)) return Jsi_LogError("expected function"); Jsi_Value *nrPtr = Jsi_ValueNew1(interp); obj = _this->d.obj; curlen = Jsi_ObjGetLength(interp, obj); if (curlen < 0) Jsi_ObjSetLength(interp, obj, 0); Jsi_ObjListifyArray(interp, obj); Jsi_Value *vobjs[4]; int n, rev = (op==2); Jsi_Func *fptr = func->d.obj->d.fobj->func; int maa = (fptr->argnames?fptr->argnames->argCnt:0); if (maa>4) maa = 4; for (n = 0, i = (rev?obj->arrCnt-1:0); (rev?i>=0:i < (int)obj->arrCnt) && rc == JSI_OK; n++, i = (rev?i-1:i+1)) { if (!obj->arr[i]) continue; if (n==0 && !ini) { ini = obj->arr[i]; continue; } vobjs[0] = ini; vobjs[1] = obj->arr[i]; vobjs[2] = (maa>2?Jsi_ValueNewNumber(interp, i):NULL); vobjs[3] = _this; vpargs = Jsi_ValueMakeObject(interp, NULL, Jsi_ObjNewArray(interp, vobjs, maa, 0)); Jsi_IncrRefCount(interp, vpargs); rc = Jsi_FunctionInvoke(interp, func, vpargs, &nrPtr, NULL); Jsi_DecrRefCount(interp, vpargs); if (rc != JSI_OK) break; ini = nrPtr; } if (rc == JSI_OK && ini) Jsi_ValueCopy(interp, *ret, ini); Jsi_DecrRefCount(interp, nrPtr); return rc; } static Jsi_RC jsi_ArrayFindCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayFindSubCmd(interp, args, _this, ret, funcPtr, 1); } static Jsi_RC jsi_ArraySomeCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayFindSubCmd(interp, args, _this, ret, funcPtr, 2); } static Jsi_RC jsi_ArrayEveryCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayFindSubCmd(interp, args, _this, ret, funcPtr, 3); } static Jsi_RC jsi_ArrayFindIndexCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayFindSubCmd(interp, args, _this, ret, funcPtr, 4); } static Jsi_RC jsi_ArrayReduceCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayReduceSubCmd(interp, args, _this, ret, funcPtr, 1); } static Jsi_RC jsi_ArrayReduceRightCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayReduceSubCmd(interp, args, _this, ret, funcPtr, 2); } static Jsi_RC jsi_ArrayIsArrayCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { bool b = 0; Jsi_Value *sthis = _this; if (_this->vt == JSI_VT_OBJECT && _this->d.obj->ot == JSI_OT_FUNCTION && _this->d.obj->__proto__ == interp->Array_prototype->d.obj->__proto__ ) sthis = Jsi_ValueArrayIndex(interp, args, 0); if (sthis && sthis->vt == JSI_VT_OBJECT && Jsi_ObjIsArray(interp, sthis->d.obj)) b = 1; Jsi_ValueMakeBool(interp, ret, b); return JSI_OK; } static Jsi_RC jsi_ArrayIndexSubCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr, int op) { int istart = 0, n, i = 0, dir=1, idx=-1; Jsi_Value *seq = Jsi_ValueArrayIndex(interp, args, 0), *start = Jsi_ValueArrayIndex(interp, args, 1); Jsi_Obj *obj = _this->d.obj; if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); if (!seq) { goto bail; } n = Jsi_ObjGetLength(interp, obj); if (n == 0) { goto bail; } Jsi_Number nstart; if (op == 2) { istart = n-1; } if (start && Jsi_GetNumberFromValue(interp,start, &nstart)==JSI_OK) { istart = (int)nstart; if (istart > n) goto bail; if (istart < 0) istart = (n+istart); if (istart<0) goto bail; } if (op == 2) { istart = n-1; dir = -1; } Jsi_ObjListifyArray(interp, obj); for (i = istart; ; i+=dir) { if ((dir>0 && i>=n) || (dir<0 && i<0) || i>=(int)obj->arrCnt) break; if (obj->arr[i] && Jsi_ValueCmp(interp, obj->arr[i], seq, JSI_CMP_EXACT)==0) { idx = i; break; } } bail: if (op == 3) Jsi_ValueMakeBool(interp, ret, (idx!=-1)); else Jsi_ValueMakeNumber(interp, ret, idx); return JSI_OK; } static Jsi_RC jsi_ArrayIndexOfCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayIndexSubCmd(interp, args, _this, ret, funcPtr, 1); } static Jsi_RC jsi_ArrayLastindexOfCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayIndexSubCmd(interp, args, _this, ret, funcPtr, 2); } static Jsi_RC jsi_ArrayIncludesCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayIndexSubCmd(interp, args, _this, ret, funcPtr, 3); } static Jsi_RC jsi_ArraySizeOfCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); int i = Jsi_ObjGetLength(interp, _this->d.obj); Jsi_ValueMakeNumber(interp, ret, i); return JSI_OK; } static Jsi_RC jsi_ArrayShiftCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_Value *v; Jsi_Obj *obj = _this->d.obj; Jsi_ObjListifyArray(interp, obj); uint n = Jsi_ObjGetLength(interp, obj); assert(n <= obj->arrCnt); if (n<=0) { Jsi_ValueMakeUndef(interp, ret); } else { n--; v = obj->arr[0]; memmove(obj->arr, obj->arr+1, n*sizeof(Jsi_Value*)); obj->arr[n] = NULL; Jsi_ValueDup2(interp, ret, v); Jsi_DecrRefCount(interp, v); Jsi_ObjSetLength(interp, obj, n); } return JSI_OK; } static Jsi_RC jsi_ArrayUnshiftCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_Obj *obj = _this->d.obj; int argc = Jsi_ValueGetLength(interp, args); int curlen = Jsi_ObjGetLength(interp, obj); if (curlen < 0) { Jsi_ObjSetLength(interp, obj, 0); } if (argc <= 0) { Jsi_ValueMakeNumber(interp, ret, 0); return JSI_OK; } Jsi_ObjListifyArray(interp, obj); if (Jsi_ObjArraySizer(interp, obj, curlen+argc)<=0) return Jsi_LogError("too long"); memmove(obj->arr+argc, obj->arr, (curlen)*sizeof(Jsi_Value*)); obj->arrCnt += argc; int i; for (i = 0; i < argc; ++i) { Jsi_Value *ov = Jsi_ValueArrayIndex(interp, args, i); obj->arr[i] = NULL; if (!ov) { Jsi_LogBug("Arguments Error"); continue; } obj->arr[i] = ov; Jsi_IncrRefCount(interp, ov); } Jsi_ObjSetLength(interp, obj, curlen+argc); Jsi_ValueMakeNumber(interp, ret, Jsi_ObjGetLength(interp, obj)); return JSI_OK; } static Jsi_RC jsi_ArrayFillCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_RC rc = JSI_OK; int istart = 0, iend, n, nsiz; Jsi_Number nstart = 0, nend = 0; // TODO: merge with code in ArraySliceCmd. Jsi_Value *value = Jsi_ValueArrayIndex(interp, args, 0), *start = Jsi_ValueArrayIndex(interp, args, 1), *end = Jsi_ValueArrayIndex(interp, args, 2); Jsi_Obj *obj = _this->d.obj; n = Jsi_ObjGetLength(interp, obj); if (start && Jsi_GetNumberFromValue(interp, start, &nstart) == JSI_OK) { istart = (int)nstart; if (istart > n) goto bail; if (istart < 0) istart = (n+istart); if (istart<0) goto bail; } if (n == 0) { goto bail; } iend = n-1; if (end && Jsi_GetNumberFromValue(interp,end, &nend) == JSI_OK) { iend = (int) nend; if (iend >= n) iend = n; if (iend < 0) iend = (n+iend); if (iend<0) goto bail; } nsiz = iend-istart+1; if (nsiz<=0) goto bail; int i; for (i = istart; i <= iend; i++) { if (obj->arr[i]) Jsi_ValueCopy(interp, obj->arr[i], value); else obj->arr[i] = Jsi_ValueDup(interp, value); } bail: if (_this != *ret) { Jsi_ValueMove(interp, *ret, _this); /*if (*ret) Jsi_DecrRefCount(interp, *ret); *ret = _this; Jsi_IncrRefCount(interp, *ret);*/ } return rc; } static Jsi_RC jsi_ArraySliceCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_RC rc = JSI_OK; int istart = 0, iend, n, nsiz; Jsi_Number nstart; Jsi_Obj *nobj, *obj; Jsi_Value *start = Jsi_ValueArrayIndex(interp, args, 0), *end = Jsi_ValueArrayIndex(interp, args, 1); if (!start) { goto bail; } obj = _this->d.obj; n = Jsi_ObjGetLength(interp, obj); if (Jsi_GetNumberFromValue(interp,start, &nstart) == JSI_OK) { istart = (int)nstart; if (istart > n) goto done; if (istart < 0) istart = (n+istart); if (istart<0) goto bail; } if (n == 0) { done: Jsi_ValueMakeArrayObject(interp, ret, Jsi_ObjNewType(interp, JSI_OT_ARRAY)); return JSI_OK; } Jsi_Number nend; iend = n-1; if (end && Jsi_GetNumberFromValue(interp,end, &nend) == JSI_OK) { iend = (int) nend; if (iend >= n) iend = n; if (iend < 0) iend = (n+iend); if (iend<0) goto bail; } nsiz = iend-istart+1; if (nsiz<=0) goto done; Jsi_ObjListifyArray(interp, obj); nobj = Jsi_ObjNewType(interp, JSI_OT_ARRAY); if (Jsi_ObjArraySizer(interp, nobj, nsiz) <= 0) { rc = Jsi_LogError("index too large: %d", nsiz); goto bail; } int i, m; for (m = 0, i = istart; i <= iend; i++, m++) { if (!obj->arr[i]) continue; nobj->arr[m] = NULL; Jsi_ValueDup2(interp, nobj->arr+m, obj->arr[i]); } Jsi_ObjSetLength(interp, nobj, nsiz); Jsi_ValueMakeArrayObject(interp, ret, nobj); return JSI_OK; bail: Jsi_ValueMakeNull(interp, ret); return rc; } typedef struct { Jsi_Interp *interp; int flags; int mode; bool unique; Jsi_Value *compare; int errCnt; } SortInfo; static const char *sortArrayStrs[] = {"default", "desc", "dict", "nocase", 0}; static Jsi_OptionSpec jsi_ArraySortOptions[] = { JSI_OPT(CUSTOM, SortInfo, mode, .help="Mode to sort by", .flags=0, .custom=Jsi_Opt_SwitchEnum, .data=sortArrayStrs), JSI_OPT(FUNC, SortInfo, compare, .help="Function to do comparison", .flags=0, .custom=0, .data=(void*)"val1,val2"), JSI_OPT(BOOL, SortInfo, unique, .help="Eliminate duplicate items"), JSI_OPT_END(SortInfo) }; #ifdef NO_QSORT_R SortInfo *curSortInfo = NULL; static int SortSubCmd(const void *p1, const void *p2) { SortInfo *si = curSortInfo; #else #ifdef __WIN32 static int SortSubCmd(void *thunk, const void *p1, const void *p2) #else static int SortSubCmd(const void *p1, const void *p2, void *thunk) #endif { SortInfo *si = (SortInfo *)thunk; #endif Jsi_Interp *interp = si->interp; int sortFlags = si->flags; if (interp == NULL || interp->deleting) return 0; Jsi_Value *v1 = *(Jsi_Value**)p1, *v2 = *(Jsi_Value**)p2; int rc = 0; if (v1 != NULL && v2 != NULL) { VALCHK(v1); VALCHK(v2); if (!si->compare) rc = Jsi_ValueCmp(interp, v1, v2, sortFlags); else { Jsi_Value *vv[2] = {v1, v2}; Jsi_Value *retP = Jsi_ValueNew1(interp); Jsi_Value *vpargs = Jsi_ValueMakeObject(interp, NULL, Jsi_ObjNewArray(interp, vv, 2, 0)); Jsi_IncrRefCount(interp, vpargs); rc = Jsi_FunctionInvoke(interp, si->compare, vpargs, &retP, NULL); Jsi_DecrRefCount(interp, vpargs); if (rc == JSI_OK) { Jsi_Number d = 0; if (Jsi_ValueGetNumber(interp, retP, &d) == JSI_OK) rc = -(int)d; else { if (!si->errCnt) Jsi_LogWarn("invalid function return"); si->errCnt++; } } Jsi_DecrRefCount(interp, retP); } } else { if (v1 == v2) rc = 0; else if (v1 == NULL) rc = 1; else rc = -1; } if ((sortFlags&JSI_SORT_DESCEND)) return rc; return -rc; } Jsi_RC Jsi_ValueArraySort(Jsi_Interp *interp, Jsi_Value *val, int flags) { if (val->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, val->d.obj)) { return JSI_ERROR; } Jsi_Obj *obj = val->d.obj; Jsi_ObjListifyArray(interp, obj); if (obj->arrCnt <= 0) { return JSI_OK; } #ifdef __WIN32 #define qsort_r qsort_s #endif SortInfo si = {}; si.interp = interp; si.flags = flags; #ifdef NO_QSORT_R curSortInfo = &si; qsort(obj->arr, obj->arrCnt, sizeof(Jsi_Value*), SortSubCmd); curSortInfo = NULL; #else qsort_r(obj->arr, obj->arrCnt, sizeof(Jsi_Value*), SortSubCmd, &si); #endif return JSI_OK; } static Jsi_RC jsi_ArraySortCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); int flags = 0, i, curlen, hasopt = 0; Jsi_Value *v, *arg = NULL; SortInfo si = {}; si.interp = interp; Jsi_Obj *obj = _this->d.obj; curlen = obj->arrCnt; if (curlen <= 1) { goto done; } arg = Jsi_ValueArrayIndex(interp, args, 0); if (arg) { if (Jsi_ValueIsObjType(interp, arg, JSI_OT_OBJECT)) { if (Jsi_OptionsProcess(interp, jsi_ArraySortOptions, &si, arg, 0) < 0) return JSI_ERROR; hasopt = 1; switch (si.mode) { case 1: flags |= JSI_SORT_DESCEND; break; case 2: flags |= JSI_SORT_DICT; break; case 3: flags |= JSI_SORT_NOCASE; break; } } else if (Jsi_ValueIsObjType(interp, arg, JSI_OT_FUNCTION)) si.compare = arg; else return Jsi_LogError("expected object or function"); } si.flags = flags; Jsi_ObjListifyArray(interp, obj); #ifdef NO_QSORT_R /* TODO: mutex. */ curSortInfo = &si; qsort(obj->arr, curlen, sizeof(Jsi_Value*), SortSubCmd); #else qsort_r(obj->arr, curlen, sizeof(Jsi_Value*), SortSubCmd, &si); #endif if (interp->deleting) { #ifdef NO_QSORT_R curSortInfo = NULL; #endif return JSI_ERROR; } if (si.unique) { int n, diff = 1, dupCnt=0; for (n=0, i=1; i<(int)obj->arrCnt; i++) { if (obj->arr[n] == obj->arr[i]) diff = 1; else #ifdef NO_QSORT_R diff = SortSubCmd(&obj->arr[n], &obj->arr[i]); #else #ifdef __WIN32 diff = SortSubCmd(&si, &obj->arr[n], &obj->arr[i]); #else diff = SortSubCmd(&obj->arr[n], &obj->arr[i], &si); #endif #endif if (diff) { n++; if (n!=i) obj->arr[n] = obj->arr[i]; } else { dupCnt++; if (obj->arr[i]) Jsi_DecrRefCount(interp, obj->arr[i]); obj->arr[i] = 0; } } obj->arrCnt -= dupCnt; } #ifdef NO_QSORT_R curSortInfo = NULL; #endif if (hasopt) Jsi_OptionsFree(interp, jsi_ArraySortOptions, &si, 0); done: v = Jsi_ValueMakeObject(interp, NULL, obj); Jsi_ValueReplace(interp, ret, v); return JSI_OK; Jsi_ValueMakeNull(interp, ret); return JSI_OK; } static Jsi_RC jsi_ArraySpliceCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); int newlen, argc, istart, n, rhowmany, ilen, curlen; Jsi_Value *va, *start, *howmany; Jsi_Obj *nobj, *obj = _this->d.obj; start = Jsi_ValueArrayIndex(interp, args, 0); howmany = Jsi_ValueArrayIndex(interp, args, 1); argc = Jsi_ValueGetLength(interp, args); istart = 0; ilen = (argc>=2 ? argc - 2 : 0); n = Jsi_ObjGetLength(interp, obj); curlen = n; if (!start) { goto bail2; } nobj = Jsi_ObjNewType(interp, JSI_OT_ARRAY); Jsi_ValueMakeArrayObject(interp, ret, nobj); Jsi_ObjSetLength(interp, nobj, 0); /* Determine start index. */ Jsi_Number nstart; if (Jsi_GetNumberFromValue(interp, start, &nstart) == JSI_OK) { istart = (int)nstart; if (istart > n) goto bail; if (istart < 0) istart = (n+istart); if (istart<0) istart=0; } Jsi_Number nhow; rhowmany = n-istart; if (howmany && Jsi_GetNumberFromValue(interp, howmany, &nhow) == JSI_OK) { rhowmany = (int)nhow; if (rhowmany >= (n-istart)) rhowmany = n-istart; if (rhowmany < 0) rhowmany = (n-istart); if (rhowmany<0) goto bail; } if (curlen < 0) { Jsi_ObjSetLength(interp, obj, 0); } Jsi_ObjListifyArray(interp, obj); Jsi_ObjArraySizer(interp, nobj, rhowmany); /* Move elements to return object. */ int i, j, m; for (m=0, j = 0, i = istart; m<rhowmany && m<curlen; m++, i++, j++) { if (!obj->arr[i]) continue; nobj->arr[m] = obj->arr[i]; obj->arr[i] = NULL; } Jsi_ObjSetLength(interp, nobj, m); /* Shift remaining down. */ for (; rhowmany && i<curlen; i++) { obj->arr[i-rhowmany] = obj->arr[i]; obj->arr[i] = NULL; } curlen -= j; /* Add elements. */ newlen = curlen + argc - (argc>=2?2:1); if (Jsi_ObjArraySizer(interp, obj, newlen+3) <= 0) { Jsi_LogError("too long"); Jsi_ValueMakeUndef(interp, ret); return JSI_ERROR; } if (ilen>0) { for (i = curlen-1; i>=istart; i--) { obj->arr[i+ilen] = obj->arr[i]; obj->arr[i] = NULL; } for (m=istart, i = 2; i<argc; m++,i++) { va = Jsi_ValueArrayIndex(interp, args, i); if (!va) continue; obj->arr[m] = NULL; Jsi_ValueDup2(interp, obj->arr+m, va); } } Jsi_ObjSetLength(interp, obj, newlen); bail: return JSI_OK; bail2: Jsi_ValueMakeNull(interp, ret); return JSI_OK; } static Jsi_RC jsi_ArrayConstructor(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this, Jsi_Value **ret, Jsi_Func *funcPtr) { int argc = Jsi_ValueGetLength(interp, args), iscons = Jsi_FunctionIsConstructor(funcPtr); Jsi_Value *target; Jsi_Value *v = Jsi_ValueArrayIndex(interp, args, 0); if (iscons) { target = _this; Jsi_ValueMakeArrayObject(interp, &_this, Jsi_ObjNewArray(interp, NULL, 0, 0)); } else { Jsi_Obj *o = Jsi_ObjNewType(interp, JSI_OT_ARRAY); o->__proto__ = interp->Array_prototype; Jsi_ValueMakeObject(interp, ret, o); target = *ret; } if (argc == 1 && v && Jsi_ValueIsNumber(interp, v)) { Jsi_Number nv; Jsi_GetNumberFromValue(interp,v, &nv); int len = (int)nv; if (!Jsi_NumberIsInteger(v->d.num) || len < 0) return Jsi_LogError("Invalid array length"); target->d.obj->isarrlist = 1; if (Jsi_ObjArraySizer(interp, target->d.obj, len) <= 0) return JSI_ERROR; } else { int i; target->d.obj->isarrlist = 1; if (Jsi_ObjArraySizer(interp, target->d.obj, 0) <= 0) return JSI_ERROR; for (i = 0; i < argc; ++i) { Jsi_Value *argv = Jsi_ValueArrayIndex(interp, args, i); ; Jsi_ValueInsertArray(interp, _this, i, argv, 0); } } if (iscons) Jsi_ValueDup2(interp, ret, target); return JSI_OK; } static Jsi_CmdSpec arrayCmds[] = { { "Array", jsi_ArrayConstructor, 0,-1, "...", .help="jsi_Array constructor", .retType=(uint)JSI_TT_ARRAY, .flags=JSI_CMD_IS_CONSTRUCTOR }, { "concat", jsi_ArrayConcatCmd, 0,-1, "...", .help="Return array with args appended", .retType=(uint)JSI_TT_ARRAY }, { "every", jsi_ArrayEveryCmd, 1, 1, "callback:function", .help="Returns true if every value in array satisfies the test", .retType=(uint)JSI_TT_ANY }, { "fill", jsi_ArrayFillCmd, 1, 3, "value:any, start:number=0, end:number=-1", .help="Fill an array with values", .retType=(uint)JSI_TT_ARRAY }, { "filter", jsi_ArrayFilterCmd, 1, 2, "callback:function, this:object=void", .help="Return a filtered array", .retType=(uint)JSI_TT_ARRAY }, { "find", jsi_ArrayFindCmd, 1, 1, "callback:function", .help="Returns the value of the first element in the array that satisfies the test", .retType=(uint)JSI_TT_ANY }, { "findIndex", jsi_ArrayFindIndexCmd, 1, 1, "callback:function", .help="Returns the index of the first element in the array that satisfies the test", .retType=(uint)JSI_TT_ANY }, { "flat", jsi_ArrayFlatCmd, 0, 1, "depth:number=1", .help="Flatten an arra", .retType=(uint)JSI_TT_ARRAY }, { "forEach", jsi_ArrayForeachCmd, 1, 2, "callback:function, this:object=void", .help="Invoke function with each item in object", .retType=(uint)JSI_TT_VOID }, { "includes", jsi_ArrayIncludesCmd, 1, 1, "val:any", .help="Returns true if array contains value", .retType=(uint)JSI_TT_ANY }, { "indexOf", jsi_ArrayIndexOfCmd, 1, 2, "str:any, startIdx:number=0", .help="Return index of first occurrance in array", .retType=(uint)JSI_TT_NUMBER }, { "isArray", jsi_ArrayIsArrayCmd, 0, 0, "", .help="True if val array", .retType=(uint)JSI_TT_BOOLEAN }, { "join", jsi_ArrayJoinCmd, 0, 1, "sep:string=''", .help="Return elements joined by char", .retType=(uint)JSI_TT_STRING }, { "lastIndexOf",jsi_ArrayLastindexOfCmd,1, 2, "val:any, start:number=0", .help="Return index of last occurence in array", .retType=(uint)JSI_TT_NUMBER }, { "map", jsi_ArrayMapCmd, 1, 2, "callback:function, this:object=void", .help="Creates a new array with the results of calling a provided function on every element in this array", .retType=(uint)JSI_TT_ARRAY }, { "pop", jsi_ArrayPopCmd, 0, 0, "", .help="Remove and return last element of array", .retType=(uint)JSI_TT_ANY }, { "push", jsi_ArrayPushCmd, 1,-1, "val:any, ...", .help="Push one or more elements onto array and return size", .retType=(uint)JSI_TT_NUMBER }, { "reduce", jsi_ArrayReduceCmd, 1, 2, "callback:function, initial:any", .help="Return a reduced array", .retType=(uint)JSI_TT_ANY }, { "reduceRight",jsi_ArrayReduceRightCmd,1, 2, "callback:function, initial:any", .help="Return a reduced array", .retType=(uint)JSI_TT_ANY }, { "shift", jsi_ArrayShiftCmd, 0, 0, "", .help="Remove first element and shift downwards", .retType=(uint)JSI_TT_ANY }, { "sizeOf", jsi_ArraySizeOfCmd, 0, 0, "", .help="Return size of array", .retType=(uint)JSI_TT_NUMBER }, { "slice", jsi_ArraySliceCmd, 1, 2, "start:number, end:number=void", .help="Return sub-array", .retType=(uint)JSI_TT_ARRAY }, { "some", jsi_ArraySomeCmd, 1, 2, "callback:function, this:object=void", .help="Return true if function returns true some element", .retType=(uint)JSI_TT_BOOLEAN }, { "sort", jsi_ArraySortCmd, 0, 1, "options:function|object=void", .help="Sort an array", .retType=(uint)JSI_TT_ARRAY, .flags=0, .info=0, .opts=jsi_ArraySortOptions }, { "splice", jsi_ArraySpliceCmd, 1,-1, "start:number, howmany:number=void, ...", .help="Change the content of an array, adding new elements while removing old elements", .retType=(uint)JSI_TT_ARRAY }, { "reverse", jsi_ArrayReverseCmd, 0, 0, "", .help="Reverse order of all elements in an array", .retType=(uint)JSI_TT_ARRAY }, { "unshift", jsi_ArrayUnshiftCmd, 0,-1, "...", .help="Add new elements to start of array and return size", .retType=(uint)JSI_TT_NUMBER }, { NULL, 0,0,0,0, .help="Provide access to array objects" } }; Jsi_RC jsi_InitArray(Jsi_Interp *interp, int release) { if (release) return JSI_OK; interp->Array_prototype = Jsi_CommandCreateSpecs(interp, "Array", arrayCmds, NULL, JSI_CMDSPEC_ISOBJ); return JSI_OK; } #endif
#ifndef JSI_LITE_ONLY #ifndef JSI_AMALGAMATION #include "jsiInt.h" #endif #if JSI__MUSL==1 || defined(__FreeBSD__) #define NO_QSORT_R 1 #endif static uint jsi_SizeOfArray(Jsi_Interp *interp, Jsi_Obj *obj) { if (!obj || !obj->arr) return 0; return obj->arrCnt; } static Jsi_RC jsi_ArrayPushCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this, Jsi_Value **ret, Jsi_Func *funcPtr) { Jsi_Obj *obj; if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) { Jsi_ValueMakeNumber(interp, ret, 0); return JSI_OK; } obj = _this->d.obj; int argc = Jsi_ValueGetLength(interp, args); int curlen = jsi_SizeOfArray(interp, obj); int i; for (i = 0; i < argc; ++i) { Jsi_Value *ov = Jsi_ValueArrayIndex(interp, args, i); if (!ov) { Jsi_LogBug("Arguments Error"); ov = Jsi_ValueNew(interp); } Jsi_ValueInsertArray(interp, _this, curlen + i, ov, 0); } Jsi_ValueMakeNumber(interp, ret, jsi_SizeOfArray(interp, obj)); return JSI_OK; } static Jsi_RC jsi_ArrayPopCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this, Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) { Jsi_ValueMakeNumber(interp, ret, 0); return JSI_OK; } Jsi_Value *v; Jsi_Obj *obj; obj = _this->d.obj; int i = jsi_SizeOfArray(interp, obj) - 1; if (i < 0) { Jsi_ValueMakeUndef(interp, ret); return JSI_OK; } if (obj->arr) { if ((v = obj->arr[i])) { obj->arr[i] = NULL; obj->arrCnt--; } } else { v = Jsi_ValueArrayIndex(interp, _this, i); } if (v) { Jsi_DecrRefCount(interp, *ret); *ret = v; } Jsi_ObjSetLength(interp, obj, i); return JSI_OK; } static Jsi_RC jsi_ArrayJoinCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this, Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); const char *jstr = ""; int argc, curlen; Jsi_DString dStr = {}; curlen = jsi_SizeOfArray(interp, _this->d.obj); if (curlen == 0) { goto bail; } if (Jsi_ValueGetLength(interp, args) >= 1) { Jsi_Value *sc = Jsi_ValueArrayIndex(interp, args, 0); if (sc != NULL) jstr = Jsi_ValueToString(interp, sc, NULL); } if (0 == (argc=jsi_SizeOfArray(interp, _this->d.obj))) { goto bail; } int i; for (i = 0; i < argc; ++i) { const char *cp; Jsi_Value *ov = Jsi_ValueArrayIndex(interp, _this, i); if (!ov) { /* TODO: are NULL args ok? */ continue; cp = ""; } else cp = Jsi_ValueToString(interp, ov, NULL); if (i && jstr[0]) Jsi_DSAppend(&dStr, jstr, NULL); Jsi_DSAppend(&dStr, cp, NULL); } Jsi_ValueMakeStringDup(interp, ret, Jsi_DSValue(&dStr)); Jsi_DSFree(&dStr); return JSI_OK; bail: Jsi_ValueMakeStringDup(interp, ret, ""); return JSI_OK; } Jsi_Value* Jsi_ValueArrayConcat(Jsi_Interp *interp, Jsi_Value *arg1, Jsi_Value *arg2) { Jsi_Value *va; Jsi_Obj *obj; if (arg1->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, arg1->d.obj)) { return NULL; } if (arg2->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, arg2->d.obj)) { return NULL; } int len1 = arg1->d.obj->arrCnt; int len2 = arg2->d.obj->arrCnt; Jsi_Obj *nobj = Jsi_ObjNewType(interp, JSI_OT_ARRAY); Jsi_ObjArraySizer(interp, nobj, len1+len2); int i, j = 0; obj = arg1->d.obj; for (i = 0; i<len1; i++, j++) { if (!obj->arr[i]) continue; nobj->arr[j] = NULL; Jsi_ValueDup2(interp, nobj->arr+j, obj->arr[i]); } obj = arg2->d.obj; for (i = 0; i<len2; i++, j++) { if (!obj->arr[i]) continue; nobj->arr[j] = NULL; Jsi_ValueDup2(interp, nobj->arr+j, obj->arr[i]); } Jsi_ObjSetLength(interp, nobj, len1+len2); va = Jsi_ValueMakeArrayObject(interp, NULL, nobj); return va; } Jsi_RC Jsi_ValueArrayPush(Jsi_Interp *interp, Jsi_Value *arg1, Jsi_Value *arg2) { Jsi_Obj *obj; if (arg1->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, arg1->d.obj)) return JSI_ERROR; if (!arg2) return JSI_ERROR; int len1 = arg1->d.obj->arrCnt; obj = arg1->d.obj; Jsi_ObjArraySizer(interp, obj, len1); obj->arr[len1] = arg2; Jsi_IncrRefCount(interp, arg2); obj->arrCnt++; return JSI_OK; } Jsi_Value *Jsi_ValueArrayPop(Jsi_Interp *interp, Jsi_Value *v) { if (v->vt != JSI_VT_OBJECT) { Jsi_LogBug("Jsi_ValueArrayPop, target is not object"); return NULL; } Jsi_Obj *o = v->d.obj; if (!o->isarrlist) { Jsi_LogBug("Jsi_ValueArrayPop, target is not array"); return NULL; } if (o->arrCnt<=0) return NULL; int idx = o->arrCnt-1; if (!o->arr[idx]) return NULL; Jsi_DecrRefCount(interp, o->arr[idx]); Jsi_Value *ret = o->arr[idx]; o->arr[idx] = NULL; o->arrCnt--; return ret; } Jsi_Value *Jsi_ValueArrayUnshift(Jsi_Interp *interp, Jsi_Value *v) { if (v->vt != JSI_VT_OBJECT) { Jsi_LogBug("Jsi_ValueArrayUnshift, target is not object"); return NULL; } Jsi_Obj *o = v->d.obj; if (!o->isarrlist) { Jsi_LogBug("Jsi_ValueArrayUnshift, target is not array"); return NULL; } if (o->arrCnt<=0) return NULL; if (!o->arr[0]) return NULL; Jsi_DecrRefCount(interp, o->arr[0]); Jsi_Value *ret = o->arr[0]; o->arr[0] = NULL; o->arrCnt--; return ret; } /* delete array[0], array[1]->array[0] */ void Jsi_ValueArrayShift(Jsi_Interp *interp, Jsi_Value *v) { if (v->vt != JSI_VT_OBJECT) { Jsi_LogBug("Jsi_ValueArrayShift, target is not object"); return; } Jsi_Obj *o = v->d.obj; if (o->isarrlist) { uint i; if (!o->arrCnt) return; if (o->arr[0]) Jsi_DecrRefCount(interp, o->arr[0]); for (i=1; i<o->arrCnt; i++) { o->arr[i-1] = o->arr[i]; } o->arr[o->arrCnt--] = NULL; return; } int len = jsi_SizeOfArray(interp, v->d.obj); if (len <= 0) return; Jsi_Value *v0 = Jsi_ValueArrayIndex(interp, v, 0); if (!v0) return; Jsi_ValueReset(interp, &v0); int i; Jsi_Value *last = v0; for (i = 1; i < len; ++i) { Jsi_Value *t = Jsi_ValueArrayIndex(interp, v, i); if (!t) return; Jsi_ValueCopy(interp, last, t); Jsi_ValueReset(interp, &t); last = t; } Jsi_ObjSetLength(interp, v->d.obj, len - 1); } static Jsi_RC jsi_ArrayFlatSub(Jsi_Interp *interp, Jsi_Obj* nobj, Jsi_Value *arr, int depth) { int i, n = 0, len = jsi_SizeOfArray(interp, arr->d.obj); if (len <= 0) return JSI_OK; Jsi_RC rc = JSI_OK; int clen = jsi_SizeOfArray(interp, nobj); for (i = 0; i < len && rc == JSI_OK; i++) { Jsi_Value *t = Jsi_ValueArrayIndex(interp, arr, i); if (t && depth>0 && Jsi_ValueIsArray(interp, t)) rc = jsi_ArrayFlatSub(interp, nobj, t , depth-1); else if (!Jsi_ValueIsUndef(interp, t)) Jsi_ObjArrayAdd(interp, nobj, t); if ((++n + clen)>interp->maxArrayList) return Jsi_LogError("array size exceeded"); } return rc; } static Jsi_RC jsi_ArrayFlatCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_Number ndepth = 1; Jsi_Obj *nobj; Jsi_Value *depth = Jsi_ValueArrayIndex(interp, args, 0); if (depth && Jsi_GetNumberFromValue(interp,depth, &ndepth) != JSI_OK) return JSI_ERROR; if (ndepth < 0 || ndepth>1000) return Jsi_LogError("bad depth: %d", (int)ndepth); nobj = Jsi_ObjNewType(interp, JSI_OT_ARRAY); Jsi_ValueMakeArrayObject(interp, ret, nobj ); if (ndepth>0) return jsi_ArrayFlatSub(interp, nobj, _this, ndepth); return JSI_OK; } static Jsi_RC jsi_ArrayConcatCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_RC rc = JSI_OK; int curlen, argc, nsiz; Jsi_Obj *obj, *nobj; Jsi_Value *va; obj = _this->d.obj; argc = Jsi_ValueGetLength(interp, args); curlen = jsi_SizeOfArray(interp, obj); Jsi_ObjListifyArray(interp, obj); nobj = Jsi_ObjNewType(interp, JSI_OT_ARRAY); nsiz = obj->arrMaxSize; if (nsiz<=0) nsiz = 100; if (Jsi_ObjArraySizer(interp, nobj, nsiz+1) <= 0) { rc = JSI_ERROR; Jsi_LogError("index too large: %d", nsiz+1); goto bail; } int i, j, m; for (i = 0; i<curlen; i++) { if (!obj->arr[i]) continue; nobj->arr[i] = NULL; Jsi_ValueDup2(interp, nobj->arr+i, obj->arr[i]); } m = i; for (i = 0; i < argc; i++) { va = Jsi_ValueArrayIndex(interp, args, i); if (va->vt == JSI_VT_OBJECT && Jsi_ObjIsArray(interp, va->d.obj)) { int margc = Jsi_ValueGetLength(interp, va); Jsi_Obj *mobj = va->d.obj; Jsi_ObjListifyArray(interp, mobj); if (Jsi_ObjArraySizer(interp, nobj, curlen += margc) <= 0) { rc = JSI_ERROR; Jsi_LogError("index too large: %d", curlen); goto bail; } for (j = 0; j<margc; j++, m++) { if (!mobj->arr[j]) continue; nobj->arr[m] = NULL; Jsi_ValueDup2(interp, nobj->arr+m, mobj->arr[j]); } } else { if (Jsi_ObjArraySizer(interp, nobj, ++curlen) <= 0) { rc = JSI_ERROR; Jsi_LogError("index too large: %d", curlen); goto bail; } nobj->arr[m] = NULL; Jsi_ValueDup2(interp, nobj->arr+m++, va); } } Jsi_ObjSetLength(interp, nobj, curlen); Jsi_ValueMakeArrayObject(interp, ret, nobj); return JSI_OK; bail: Jsi_ValueMakeNull(interp, ret); return rc; } static Jsi_RC jsi_ArrayMapCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_RC rc = JSI_OK; int curlen, nsiz, i, maa = 0; Jsi_Obj *obj, *nobj; Jsi_Value *func, *vpargs, *nthis = NULL, *sthis; Jsi_Func *fptr = NULL; func = Jsi_ValueArrayIndex(interp, args, 0); if (!Jsi_ValueIsFunction(interp, func)) return Jsi_LogError("expected function"); sthis = Jsi_ValueArrayIndex(interp, args, 1); if (!sthis) sthis = nthis = Jsi_ValueNew1(interp); obj = _this->d.obj; curlen = jsi_SizeOfArray(interp, obj); Jsi_ObjListifyArray(interp, obj); nobj = Jsi_ObjNewType(interp, JSI_OT_ARRAY); nsiz = obj->arrCnt; if (nsiz<=0) nsiz = 1; if (Jsi_ObjArraySizer(interp, nobj, nsiz) <= 0) { Jsi_LogError("index too large: %d", nsiz); rc = JSI_ERROR; goto bail; } Jsi_ValueMakeArrayObject(interp, ret, nobj); Jsi_Value *vobjs[3]; fptr = func->d.obj->d.fobj->func; maa = (fptr->argnames?fptr->argnames->argCnt:0); if (maa>3) maa = 3; for (i = 0; i < curlen; i++) { if (!obj->arr[i]) continue; vobjs[0] = obj->arr[i]; vobjs[1] = (maa>1?Jsi_ValueNewNumber(interp, i):NULL); vobjs[2] = _this; vpargs = Jsi_ValueMakeObject(interp, NULL, Jsi_ObjNewArray(interp, vobjs, maa, 0)); Jsi_IncrRefCount(interp, vpargs); nobj->arr[i] = Jsi_ValueNew1(interp); rc = Jsi_FunctionInvoke(interp, func, vpargs, nobj->arr+i, sthis); Jsi_DecrRefCount(interp, vpargs); if( JSI_OK!=rc ) { goto bail; } } Jsi_ObjSetLength(interp, nobj, curlen); if (nthis) Jsi_DecrRefCount(interp, nthis); return JSI_OK; bail: Jsi_ValueMakeNull(interp, ret); if (nthis) Jsi_DecrRefCount(interp, nthis); return rc; } static Jsi_RC jsi_ArrayFilterCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_RC rc = JSI_OK; int curlen, nsiz, i, fval, n = 0, maa = 0; Jsi_Obj *obj, *nobj; Jsi_Value *func, *vpargs, *nthis = NULL, *sthis, *nrPtr = NULL; Jsi_Func *fptr = NULL; func = Jsi_ValueArrayIndex(interp, args, 0); if (!Jsi_ValueIsFunction(interp, func)) return Jsi_LogError("expected function"); sthis = Jsi_ValueArrayIndex(interp, args, 1); if (!sthis) sthis = nthis = Jsi_ValueNew1(interp); obj = _this->d.obj; curlen = jsi_SizeOfArray(interp, obj); Jsi_ObjListifyArray(interp, obj); nobj = Jsi_ObjNewType(interp, JSI_OT_ARRAY); nsiz = obj->arrCnt; if (nsiz<=0) nsiz = 1; if (Jsi_ObjArraySizer(interp, nobj, nsiz) <= 0) { Jsi_LogError("index too large: %d", nsiz); rc = JSI_ERROR; goto bail; } Jsi_ValueMakeArrayObject(interp, ret, nobj); nrPtr = Jsi_ValueNew1(interp); Jsi_Value *vobjs[4]; fptr = func->d.obj->d.fobj->func; maa = (fptr->argnames?fptr->argnames->argCnt:0); if (maa>3) maa = 3; for (i = 0; i < curlen; i++) { if (!obj->arr[i]) continue; vobjs[0] = obj->arr[i]; vobjs[1] = (maa>1?Jsi_ValueNewNumber(interp, i):NULL); vobjs[2] = _this; vpargs = Jsi_ValueMakeObject(interp, NULL, Jsi_ObjNewArray(interp, vobjs, maa, 0)); Jsi_IncrRefCount(interp, vpargs); rc = Jsi_FunctionInvoke(interp, func, vpargs, &nrPtr, sthis); Jsi_DecrRefCount(interp, vpargs); fval = Jsi_ValueIsTrue(interp, nrPtr); Jsi_ValueMakeUndef(interp, &nrPtr); if( JSI_OK!=rc ) { goto bail; } if (fval) { nobj->arr[n++] = obj->arr[i]; Jsi_IncrRefCount(interp, obj->arr[i]); } } if (nthis) Jsi_DecrRefCount(interp, nthis); Jsi_DecrRefCount(interp, nrPtr); Jsi_ObjSetLength(interp, nobj, n); return JSI_OK; bail: if (nthis) Jsi_DecrRefCount(interp, nthis); if (nrPtr) Jsi_DecrRefCount(interp, nrPtr); Jsi_ValueMakeNull(interp, ret); return rc; } static Jsi_RC jsi_ArrayReverseCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this, Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array"); int i, n, m; Jsi_Obj *obj; Jsi_Value *tval, *nthis = NULL, *sthis = Jsi_ValueArrayIndex(interp, args, 1); if (!sthis) sthis = nthis = Jsi_ValueNew1(interp); obj = _this->d.obj; Jsi_ObjListifyArray(interp, obj); m = obj->arrCnt/2; for (i = 0, n=obj->arrCnt-1; i < m; i++, n--) { tval = obj->arr[i]; obj->arr[i] = obj->arr[n]; obj->arr[n] = tval; } Jsi_ValueDup2(interp, ret, _this); if (nthis) Jsi_DecrRefCount(interp, nthis); return JSI_OK; } static Jsi_RC jsi_ArrayForeachCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_Obj *obj; uint i; Jsi_Value *func, *vpargs; func = Jsi_ValueArrayIndex(interp, args, 0); if (!Jsi_ValueIsFunction(interp, func)) return Jsi_LogError("expected function"); Jsi_Value *sthis = Jsi_ValueArrayIndex(interp, args, 1); Jsi_Value *nthis = NULL; if (!sthis) sthis = nthis = Jsi_ValueNew1(interp); obj = _this->d.obj; Jsi_ObjListifyArray(interp, obj); Jsi_RC rc = JSI_OK; Jsi_Value *vobjs[3]; Jsi_Func *fptr = func->d.obj->d.fobj->func; int maa = (fptr->argnames?fptr->argnames->argCnt:0); if (maa>3) maa = 3; for (i = 0; i < obj->arrCnt && rc == JSI_OK; i++) { if (!obj->arr[i]) continue; vobjs[0] = obj->arr[i]; vobjs[1] = (maa>1?Jsi_ValueNewNumber(interp, i):NULL); vobjs[2] = _this; vpargs = Jsi_ValueMakeObject(interp, NULL, Jsi_ObjNewArray(interp, vobjs, maa, 0)); Jsi_IncrRefCount(interp, vpargs); rc = Jsi_FunctionInvoke(interp, func, vpargs, ret, sthis); Jsi_DecrRefCount(interp, vpargs); } if (nthis) Jsi_DecrRefCount(interp, nthis); return rc; } static Jsi_RC jsi_ArrayFindSubCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr, int op) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array"); Jsi_Obj *obj; uint i; Jsi_RC rc = JSI_OK; Jsi_Value *func, *vpargs, *sthis = Jsi_ValueArrayIndex(interp, args, 1); func = Jsi_ValueArrayIndex(interp, args, 0); if (!Jsi_ValueIsFunction(interp, func)) return Jsi_LogError("expected function"); Jsi_Value *nthis = NULL; if (!sthis) sthis = nthis = Jsi_ValueNew1(interp); obj = _this->d.obj; Jsi_ObjListifyArray(interp, obj); int fval = 0; Jsi_Value *nrPtr = Jsi_ValueNew1(interp); Jsi_Value *vobjs[3]; Jsi_Func *fptr = func->d.obj->d.fobj->func; int maa = (fptr->argnames?fptr->argnames->argCnt:0); if (maa>3) maa = 3; for (i = 0; i < obj->arrCnt && rc == JSI_OK; i++) { if (!obj->arr[i]) continue; vobjs[0] = obj->arr[i]; vobjs[1] = (maa>1?Jsi_ValueNewNumber(interp, i):NULL); vobjs[2] = _this; vpargs = Jsi_ValueMakeObject(interp, NULL, Jsi_ObjNewArray(interp, vobjs, maa, 0)); Jsi_IncrRefCount(interp, vpargs); rc = Jsi_FunctionInvoke(interp, func, vpargs, &nrPtr, sthis); Jsi_DecrRefCount(interp, vpargs); if (rc != JSI_OK) break; fval = Jsi_ValueIsTrue(interp, nrPtr); Jsi_ValueMakeUndef(interp, &nrPtr); if (op == 3) { if (!fval) break; } else if (fval) break; } if (rc == JSI_OK) { if (op == 1 && fval) // Find Jsi_ValueCopy(interp, *ret, obj->arr[i]); else if (op == 2 || op == 3) // Some/Every Jsi_ValueMakeBool(interp, ret, fval); else if (op == 4) Jsi_ValueMakeNumber(interp, ret, (Jsi_Number)(fval?(int)i:-1)); } if (nthis) Jsi_DecrRefCount(interp, nthis); Jsi_DecrRefCount(interp, nrPtr); return rc; } static Jsi_RC jsi_ArrayReduceSubCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr, int op) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array"); Jsi_RC rc = JSI_OK; int i; Jsi_Obj *obj; Jsi_Value *func, *vpargs, *ini = Jsi_ValueArrayIndex(interp, args, 1); func = Jsi_ValueArrayIndex(interp, args, 0); if (!Jsi_ValueIsFunction(interp, func)) return Jsi_LogError("expected function"); Jsi_Value *nrPtr = Jsi_ValueNew1(interp); obj = _this->d.obj; Jsi_ObjListifyArray(interp, obj); Jsi_Value *vobjs[4]; int n, rev = (op==2); Jsi_Func *fptr = func->d.obj->d.fobj->func; int maa = (fptr->argnames?fptr->argnames->argCnt:0); if (maa>4) maa = 4; for (n = 0, i = (rev?obj->arrCnt-1:0); (rev?i>=0:i < (int)obj->arrCnt) && rc == JSI_OK; n++, i = (rev?i-1:i+1)) { if (!obj->arr[i]) continue; if (n==0 && !ini) { ini = obj->arr[i]; continue; } vobjs[0] = ini; vobjs[1] = obj->arr[i]; vobjs[2] = (maa>2?Jsi_ValueNewNumber(interp, i):NULL); vobjs[3] = _this; vpargs = Jsi_ValueMakeObject(interp, NULL, Jsi_ObjNewArray(interp, vobjs, maa, 0)); Jsi_IncrRefCount(interp, vpargs); rc = Jsi_FunctionInvoke(interp, func, vpargs, &nrPtr, NULL); Jsi_DecrRefCount(interp, vpargs); if (rc != JSI_OK) break; ini = nrPtr; } if (rc == JSI_OK && ini) Jsi_ValueCopy(interp, *ret, ini); Jsi_DecrRefCount(interp, nrPtr); return rc; } static Jsi_RC jsi_ArrayFindCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayFindSubCmd(interp, args, _this, ret, funcPtr, 1); } static Jsi_RC jsi_ArraySomeCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayFindSubCmd(interp, args, _this, ret, funcPtr, 2); } static Jsi_RC jsi_ArrayEveryCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayFindSubCmd(interp, args, _this, ret, funcPtr, 3); } static Jsi_RC jsi_ArrayFindIndexCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayFindSubCmd(interp, args, _this, ret, funcPtr, 4); } static Jsi_RC jsi_ArrayReduceCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayReduceSubCmd(interp, args, _this, ret, funcPtr, 1); } static Jsi_RC jsi_ArrayReduceRightCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayReduceSubCmd(interp, args, _this, ret, funcPtr, 2); } static Jsi_RC jsi_ArrayIsArrayCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { bool b = 0; Jsi_Value *sthis = _this; if (_this->vt == JSI_VT_OBJECT && _this->d.obj->ot == JSI_OT_FUNCTION && _this->d.obj->__proto__ == interp->Array_prototype->d.obj->__proto__ ) sthis = Jsi_ValueArrayIndex(interp, args, 0); if (sthis && sthis->vt == JSI_VT_OBJECT && Jsi_ObjIsArray(interp, sthis->d.obj)) b = 1; Jsi_ValueMakeBool(interp, ret, b); return JSI_OK; } static Jsi_RC jsi_ArrayIndexSubCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr, int op) { int istart = 0, n, i = 0, dir=1, idx=-1; Jsi_Value *seq = Jsi_ValueArrayIndex(interp, args, 0), *start = Jsi_ValueArrayIndex(interp, args, 1); Jsi_Obj *obj = _this->d.obj; if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); if (!seq) { goto bail; } n = jsi_SizeOfArray(interp, obj); if (n == 0) { goto bail; } Jsi_Number nstart; if (op == 2) { istart = n-1; } if (start && Jsi_GetNumberFromValue(interp,start, &nstart)==JSI_OK) { istart = (int)nstart; if (istart > n) goto bail; if (istart < 0) istart = (n+istart); if (istart<0) goto bail; } if (op == 2) { istart = n-1; dir = -1; } Jsi_ObjListifyArray(interp, obj); for (i = istart; ; i+=dir) { if ((dir>0 && i>=n) || (dir<0 && i<0) || i>=(int)obj->arrCnt) break; if (obj->arr[i] && Jsi_ValueCmp(interp, obj->arr[i], seq, JSI_CMP_EXACT)==0) { idx = i; break; } } bail: if (op == 3) Jsi_ValueMakeBool(interp, ret, (idx!=-1)); else Jsi_ValueMakeNumber(interp, ret, idx); return JSI_OK; } static Jsi_RC jsi_ArrayIndexOfCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayIndexSubCmd(interp, args, _this, ret, funcPtr, 1); } static Jsi_RC jsi_ArrayLastindexOfCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayIndexSubCmd(interp, args, _this, ret, funcPtr, 2); } static Jsi_RC jsi_ArrayIncludesCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { return jsi_ArrayIndexSubCmd(interp, args, _this, ret, funcPtr, 3); } static Jsi_RC jsi_ArraySizeOfCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); int i = jsi_SizeOfArray(interp, _this->d.obj); Jsi_ValueMakeNumber(interp, ret, i); return JSI_OK; } static Jsi_RC jsi_ArrayShiftCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_Value *v; Jsi_Obj *obj = _this->d.obj; Jsi_ObjListifyArray(interp, obj); uint n = jsi_SizeOfArray(interp, obj); if (n<=0) { Jsi_ValueMakeUndef(interp, ret); } else { n--; v = obj->arr[0]; memmove(obj->arr, obj->arr+1, n*sizeof(Jsi_Value*)); obj->arr[n] = NULL; Jsi_ValueDup2(interp, ret, v); Jsi_DecrRefCount(interp, v); Jsi_ObjSetLength(interp, obj, n); } return JSI_OK; } static Jsi_RC jsi_ArrayUnshiftCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_Obj *obj = _this->d.obj; int argc = Jsi_ValueGetLength(interp, args); int curlen = jsi_SizeOfArray(interp, obj); if (argc <= 0) { Jsi_ValueMakeNumber(interp, ret, 0); return JSI_OK; } Jsi_ObjListifyArray(interp, obj); if (Jsi_ObjArraySizer(interp, obj, curlen+argc)<=0) return Jsi_LogError("too long"); memmove(obj->arr+argc, obj->arr, (curlen)*sizeof(Jsi_Value*)); obj->arrCnt += argc; int i; for (i = 0; i < argc; ++i) { Jsi_Value *ov = Jsi_ValueArrayIndex(interp, args, i); obj->arr[i] = NULL; if (!ov) { Jsi_LogBug("Arguments Error"); continue; } obj->arr[i] = ov; Jsi_IncrRefCount(interp, ov); } Jsi_ObjSetLength(interp, obj, curlen+argc); Jsi_ValueMakeNumber(interp, ret, jsi_SizeOfArray(interp, obj)); return JSI_OK; } static Jsi_RC jsi_ArrayFillCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_RC rc = JSI_OK; int istart = 0, iend, n, nsiz; Jsi_Number nstart = 0, nend = 0; // TODO: merge with code in ArraySliceCmd. Jsi_Value *value = Jsi_ValueArrayIndex(interp, args, 0), *start = Jsi_ValueArrayIndex(interp, args, 1), *end = Jsi_ValueArrayIndex(interp, args, 2); Jsi_Obj *obj = _this->d.obj; n = jsi_SizeOfArray(interp, obj); if (start && Jsi_GetNumberFromValue(interp, start, &nstart) == JSI_OK) { istart = (int)nstart; if (istart > n) goto bail; if (istart < 0) istart = (n+istart); if (istart<0) goto bail; } if (n == 0) { goto bail; } iend = n-1; if (end && Jsi_GetNumberFromValue(interp,end, &nend) == JSI_OK) { iend = (int) nend; if (iend >= n) iend = n; if (iend < 0) iend = (n+iend); if (iend<0) goto bail; } nsiz = iend-istart+1; if (nsiz<=0) goto bail; int i; for (i = istart; i <= iend; i++) { if (obj->arr[i]) Jsi_ValueCopy(interp, obj->arr[i], value); else obj->arr[i] = Jsi_ValueDup(interp, value); } bail: if (_this != *ret) { Jsi_ValueMove(interp, *ret, _this); /*if (*ret) Jsi_DecrRefCount(interp, *ret); *ret = _this; Jsi_IncrRefCount(interp, *ret);*/ } return rc; } static Jsi_RC jsi_ArraySliceCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_RC rc = JSI_OK; int istart = 0, iend, n, nsiz; Jsi_Number nstart; Jsi_Obj *nobj, *obj; Jsi_Value *start = Jsi_ValueArrayIndex(interp, args, 0), *end = Jsi_ValueArrayIndex(interp, args, 1); if (!start) { goto bail; } obj = _this->d.obj; n = jsi_SizeOfArray(interp, obj); if (Jsi_GetNumberFromValue(interp,start, &nstart) == JSI_OK) { istart = (int)nstart; if (istart > n) goto done; if (istart < 0) istart = (n+istart); if (istart<0) goto bail; } if (n == 0) { done: Jsi_ValueMakeArrayObject(interp, ret, Jsi_ObjNewType(interp, JSI_OT_ARRAY)); return JSI_OK; } Jsi_Number nend; iend = n-1; if (end && Jsi_GetNumberFromValue(interp,end, &nend) == JSI_OK) { iend = (int) nend; if (iend >= n) iend = n; if (iend < 0) iend = (n+iend); if (iend<0) goto bail; } nsiz = iend-istart+1; if (nsiz<=0) goto done; Jsi_ObjListifyArray(interp, obj); nobj = Jsi_ObjNewType(interp, JSI_OT_ARRAY); if (Jsi_ObjArraySizer(interp, nobj, nsiz) <= 0) { rc = Jsi_LogError("index too large: %d", nsiz); goto bail; } int i, m; for (m = 0, i = istart; i <= iend; i++, m++) { if (!obj->arr[i]) continue; nobj->arr[m] = NULL; Jsi_ValueDup2(interp, nobj->arr+m, obj->arr[i]); } Jsi_ObjSetLength(interp, nobj, nsiz); Jsi_ValueMakeArrayObject(interp, ret, nobj); return JSI_OK; bail: Jsi_ValueMakeNull(interp, ret); return rc; } typedef struct { Jsi_Interp *interp; int flags; int mode; bool unique; Jsi_Value *compare; int errCnt; } SortInfo; static const char *sortArrayStrs[] = {"default", "desc", "dict", "nocase", 0}; static Jsi_OptionSpec jsi_ArraySortOptions[] = { JSI_OPT(CUSTOM, SortInfo, mode, .help="Mode to sort by", .flags=0, .custom=Jsi_Opt_SwitchEnum, .data=sortArrayStrs), JSI_OPT(FUNC, SortInfo, compare, .help="Function to do comparison", .flags=0, .custom=0, .data=(void*)"val1,val2"), JSI_OPT(BOOL, SortInfo, unique, .help="Eliminate duplicate items"), JSI_OPT_END(SortInfo) }; #ifdef NO_QSORT_R SortInfo *curSortInfo = NULL; static int SortSubCmd(const void *p1, const void *p2) { SortInfo *si = curSortInfo; #else #ifdef __WIN32 static int SortSubCmd(void *thunk, const void *p1, const void *p2) #else static int SortSubCmd(const void *p1, const void *p2, void *thunk) #endif { SortInfo *si = (SortInfo *)thunk; #endif Jsi_Interp *interp = si->interp; int sortFlags = si->flags; if (interp == NULL || interp->deleting) return 0; Jsi_Value *v1 = *(Jsi_Value**)p1, *v2 = *(Jsi_Value**)p2; int rc = 0; if (v1 != NULL && v2 != NULL) { VALCHK(v1); VALCHK(v2); if (!si->compare) rc = Jsi_ValueCmp(interp, v1, v2, sortFlags); else { Jsi_Value *vv[2] = {v1, v2}; Jsi_Value *retP = Jsi_ValueNew1(interp); Jsi_Value *vpargs = Jsi_ValueMakeObject(interp, NULL, Jsi_ObjNewArray(interp, vv, 2, 0)); Jsi_IncrRefCount(interp, vpargs); rc = Jsi_FunctionInvoke(interp, si->compare, vpargs, &retP, NULL); Jsi_DecrRefCount(interp, vpargs); if (rc == JSI_OK) { Jsi_Number d = 0; if (Jsi_ValueGetNumber(interp, retP, &d) == JSI_OK) rc = -(int)d; else { if (!si->errCnt) Jsi_LogWarn("invalid function return"); si->errCnt++; } } Jsi_DecrRefCount(interp, retP); } } else { if (v1 == v2) rc = 0; else if (v1 == NULL) rc = 1; else rc = -1; } if ((sortFlags&JSI_SORT_DESCEND)) return rc; return -rc; } Jsi_RC Jsi_ValueArraySort(Jsi_Interp *interp, Jsi_Value *val, int flags) { if (val->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, val->d.obj)) { return JSI_ERROR; } Jsi_Obj *obj = val->d.obj; Jsi_ObjListifyArray(interp, obj); if (obj->arrCnt <= 0) { return JSI_OK; } #ifdef __WIN32 #define qsort_r qsort_s #endif SortInfo si = {}; si.interp = interp; si.flags = flags; #ifdef NO_QSORT_R curSortInfo = &si; qsort(obj->arr, obj->arrCnt, sizeof(Jsi_Value*), SortSubCmd); curSortInfo = NULL; #else qsort_r(obj->arr, obj->arrCnt, sizeof(Jsi_Value*), SortSubCmd, &si); #endif return JSI_OK; } static Jsi_RC jsi_ArraySortCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); int flags = 0, i, curlen, hasopt = 0; Jsi_Value *v, *arg = NULL; SortInfo si = {}; si.interp = interp; Jsi_Obj *obj = _this->d.obj; curlen = obj->arrCnt; if (curlen <= 1) { goto done; } arg = Jsi_ValueArrayIndex(interp, args, 0); if (arg) { if (Jsi_ValueIsObjType(interp, arg, JSI_OT_OBJECT)) { if (Jsi_OptionsProcess(interp, jsi_ArraySortOptions, &si, arg, 0) < 0) return JSI_ERROR; hasopt = 1; switch (si.mode) { case 1: flags |= JSI_SORT_DESCEND; break; case 2: flags |= JSI_SORT_DICT; break; case 3: flags |= JSI_SORT_NOCASE; break; } } else if (Jsi_ValueIsObjType(interp, arg, JSI_OT_FUNCTION)) si.compare = arg; else return Jsi_LogError("expected object or function"); } si.flags = flags; Jsi_ObjListifyArray(interp, obj); #ifdef NO_QSORT_R /* TODO: mutex. */ curSortInfo = &si; qsort(obj->arr, curlen, sizeof(Jsi_Value*), SortSubCmd); #else qsort_r(obj->arr, curlen, sizeof(Jsi_Value*), SortSubCmd, &si); #endif if (interp->deleting) { #ifdef NO_QSORT_R curSortInfo = NULL; #endif return JSI_ERROR; } if (si.unique) { int n, diff = 1, dupCnt=0; for (n=0, i=1; i<(int)obj->arrCnt; i++) { if (obj->arr[n] == obj->arr[i]) diff = 1; else #ifdef NO_QSORT_R diff = SortSubCmd(&obj->arr[n], &obj->arr[i]); #else #ifdef __WIN32 diff = SortSubCmd(&si, &obj->arr[n], &obj->arr[i]); #else diff = SortSubCmd(&obj->arr[n], &obj->arr[i], &si); #endif #endif if (diff) { n++; if (n!=i) obj->arr[n] = obj->arr[i]; } else { dupCnt++; if (obj->arr[i]) Jsi_DecrRefCount(interp, obj->arr[i]); obj->arr[i] = 0; } } obj->arrCnt -= dupCnt; } #ifdef NO_QSORT_R curSortInfo = NULL; #endif if (hasopt) Jsi_OptionsFree(interp, jsi_ArraySortOptions, &si, 0); done: v = Jsi_ValueMakeObject(interp, NULL, obj); Jsi_ValueReplace(interp, ret, v); return JSI_OK; Jsi_ValueMakeNull(interp, ret); return JSI_OK; } static Jsi_RC jsi_ArraySpliceCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); int newlen, argc, istart, n, rhowmany, ilen, curlen; Jsi_Value *va, *start, *howmany; Jsi_Obj *nobj, *obj = _this->d.obj; start = Jsi_ValueArrayIndex(interp, args, 0); howmany = Jsi_ValueArrayIndex(interp, args, 1); argc = Jsi_ValueGetLength(interp, args); istart = 0; ilen = (argc>=2 ? argc - 2 : 0); n = jsi_SizeOfArray(interp, obj); curlen = n; if (!start) { goto bail2; } nobj = Jsi_ObjNewType(interp, JSI_OT_ARRAY); Jsi_ValueMakeArrayObject(interp, ret, nobj); Jsi_ObjSetLength(interp, nobj, 0); /* Determine start index. */ Jsi_Number nstart; if (Jsi_GetNumberFromValue(interp, start, &nstart) == JSI_OK) { istart = (int)nstart; if (istart > n) goto bail; if (istart < 0) istart = (n+istart); if (istart<0) istart=0; } Jsi_Number nhow; rhowmany = n-istart; if (howmany && Jsi_GetNumberFromValue(interp, howmany, &nhow) == JSI_OK) { rhowmany = (int)nhow; if (rhowmany >= (n-istart)) rhowmany = n-istart; if (rhowmany < 0) rhowmany = (n-istart); if (rhowmany<0) goto bail; } if (curlen < 0) { Jsi_ObjSetLength(interp, obj, curlen=0); } Jsi_ObjListifyArray(interp, obj); Jsi_ObjArraySizer(interp, nobj, rhowmany); /* Move elements to return object. */ int i, j, m; for (m=0, j = 0, i = istart; m<rhowmany && m<curlen; m++, i++, j++) { if (!obj->arr[i]) continue; nobj->arr[m] = obj->arr[i]; obj->arr[i] = NULL; } Jsi_ObjSetLength(interp, nobj, m); /* Shift remaining down. */ for (; rhowmany && i<curlen; i++) { obj->arr[i-rhowmany] = obj->arr[i]; obj->arr[i] = NULL; } curlen -= j; /* Add elements. */ newlen = curlen + argc - (argc>=2?2:1); if (Jsi_ObjArraySizer(interp, obj, newlen+3) <= 0) { Jsi_LogError("too long"); Jsi_ValueMakeUndef(interp, ret); return JSI_ERROR; } if (ilen>0) { for (i = curlen-1; i>=istart; i--) { obj->arr[i+ilen] = obj->arr[i]; obj->arr[i] = NULL; } for (m=istart, i = 2; i<argc; m++,i++) { va = Jsi_ValueArrayIndex(interp, args, i); if (!va) continue; obj->arr[m] = NULL; Jsi_ValueDup2(interp, obj->arr+m, va); } } Jsi_ObjSetLength(interp, obj, newlen); bail: return JSI_OK; bail2: Jsi_ValueMakeNull(interp, ret); return JSI_OK; } static Jsi_RC jsi_ArrayConstructor(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this, Jsi_Value **ret, Jsi_Func *funcPtr) { int argc = Jsi_ValueGetLength(interp, args), iscons = Jsi_FunctionIsConstructor(funcPtr); Jsi_Value *target; Jsi_Value *v = Jsi_ValueArrayIndex(interp, args, 0); if (iscons) { target = _this; Jsi_ValueMakeArrayObject(interp, &_this, Jsi_ObjNewArray(interp, NULL, 0, 0)); } else { Jsi_Obj *o = Jsi_ObjNewType(interp, JSI_OT_ARRAY); o->__proto__ = interp->Array_prototype; Jsi_ValueMakeObject(interp, ret, o); target = *ret; } if (argc == 1 && v && Jsi_ValueIsNumber(interp, v)) { Jsi_Number nv; Jsi_GetNumberFromValue(interp,v, &nv); int len = (int)nv; if (!Jsi_NumberIsInteger(v->d.num) || len < 0) return Jsi_LogError("Invalid array length"); target->d.obj->isarrlist = 1; if (Jsi_ObjArraySizer(interp, target->d.obj, len) <= 0) return JSI_ERROR; } else { int i; target->d.obj->isarrlist = 1; if (Jsi_ObjArraySizer(interp, target->d.obj, 0) <= 0) return JSI_ERROR; for (i = 0; i < argc; ++i) { Jsi_Value *argv = Jsi_ValueArrayIndex(interp, args, i); ; Jsi_ValueInsertArray(interp, _this, i, argv, 0); } } if (iscons) Jsi_ValueDup2(interp, ret, target); return JSI_OK; } static Jsi_CmdSpec arrayCmds[] = { { "Array", jsi_ArrayConstructor, 0,-1, "...", .help="jsi_Array constructor", .retType=(uint)JSI_TT_ARRAY, .flags=JSI_CMD_IS_CONSTRUCTOR }, { "concat", jsi_ArrayConcatCmd, 0,-1, "...", .help="Return array with args appended", .retType=(uint)JSI_TT_ARRAY }, { "every", jsi_ArrayEveryCmd, 1, 1, "callback:function", .help="Returns true if every value in array satisfies the test", .retType=(uint)JSI_TT_ANY }, { "fill", jsi_ArrayFillCmd, 1, 3, "value:any, start:number=0, end:number=-1", .help="Fill an array with values", .retType=(uint)JSI_TT_ARRAY }, { "filter", jsi_ArrayFilterCmd, 1, 2, "callback:function, this:object=void", .help="Return a filtered array", .retType=(uint)JSI_TT_ARRAY }, { "find", jsi_ArrayFindCmd, 1, 1, "callback:function", .help="Returns the value of the first element in the array that satisfies the test", .retType=(uint)JSI_TT_ANY }, { "findIndex", jsi_ArrayFindIndexCmd, 1, 1, "callback:function", .help="Returns the index of the first element in the array that satisfies the test", .retType=(uint)JSI_TT_ANY }, { "flat", jsi_ArrayFlatCmd, 0, 1, "depth:number=1", .help="Flatten an arra", .retType=(uint)JSI_TT_ARRAY }, { "forEach", jsi_ArrayForeachCmd, 1, 2, "callback:function, this:object=void", .help="Invoke function with each item in object", .retType=(uint)JSI_TT_VOID }, { "includes", jsi_ArrayIncludesCmd, 1, 1, "val:any", .help="Returns true if array contains value", .retType=(uint)JSI_TT_ANY }, { "indexOf", jsi_ArrayIndexOfCmd, 1, 2, "str:any, startIdx:number=0", .help="Return index of first occurrance in array", .retType=(uint)JSI_TT_NUMBER }, { "isArray", jsi_ArrayIsArrayCmd, 0, 0, "", .help="True if val array", .retType=(uint)JSI_TT_BOOLEAN }, { "join", jsi_ArrayJoinCmd, 0, 1, "sep:string=''", .help="Return elements joined by char", .retType=(uint)JSI_TT_STRING }, { "lastIndexOf",jsi_ArrayLastindexOfCmd,1, 2, "val:any, start:number=0", .help="Return index of last occurence in array", .retType=(uint)JSI_TT_NUMBER }, { "map", jsi_ArrayMapCmd, 1, 2, "callback:function, this:object=void", .help="Creates a new array with the results of calling a provided function on every element in this array", .retType=(uint)JSI_TT_ARRAY }, { "pop", jsi_ArrayPopCmd, 0, 0, "", .help="Remove and return last element of array", .retType=(uint)JSI_TT_ANY }, { "push", jsi_ArrayPushCmd, 1,-1, "val:any, ...", .help="Push one or more elements onto array and return size", .retType=(uint)JSI_TT_NUMBER }, { "reduce", jsi_ArrayReduceCmd, 1, 2, "callback:function, initial:any", .help="Return a reduced array", .retType=(uint)JSI_TT_ANY }, { "reduceRight",jsi_ArrayReduceRightCmd,1, 2, "callback:function, initial:any", .help="Return a reduced array", .retType=(uint)JSI_TT_ANY }, { "shift", jsi_ArrayShiftCmd, 0, 0, "", .help="Remove first element and shift downwards", .retType=(uint)JSI_TT_ANY }, { "sizeOf", jsi_ArraySizeOfCmd, 0, 0, "", .help="Return size of array", .retType=(uint)JSI_TT_NUMBER }, { "slice", jsi_ArraySliceCmd, 1, 2, "start:number, end:number=void", .help="Return sub-array", .retType=(uint)JSI_TT_ARRAY }, { "some", jsi_ArraySomeCmd, 1, 2, "callback:function, this:object=void", .help="Return true if function returns true some element", .retType=(uint)JSI_TT_BOOLEAN }, { "sort", jsi_ArraySortCmd, 0, 1, "options:function|object=void", .help="Sort an array", .retType=(uint)JSI_TT_ARRAY, .flags=0, .info=0, .opts=jsi_ArraySortOptions }, { "splice", jsi_ArraySpliceCmd, 1,-1, "start:number, howmany:number=void, ...", .help="Change the content of an array, adding new elements while removing old elements", .retType=(uint)JSI_TT_ARRAY }, { "reverse", jsi_ArrayReverseCmd, 0, 0, "", .help="Reverse order of all elements in an array", .retType=(uint)JSI_TT_ARRAY }, { "unshift", jsi_ArrayUnshiftCmd, 0,-1, "...", .help="Add new elements to start of array and return size", .retType=(uint)JSI_TT_NUMBER }, { NULL, 0,0,0,0, .help="Provide access to array objects" } }; Jsi_RC jsi_InitArray(Jsi_Interp *interp, int release) { if (release) return JSI_OK; interp->Array_prototype = Jsi_CommandCreateSpecs(interp, "Array", arrayCmds, NULL, JSI_CMDSPEC_ISOBJ); return JSI_OK; } #endif
static Jsi_RC jsi_ArrayShiftCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_Value *v; Jsi_Obj *obj = _this->d.obj; Jsi_ObjListifyArray(interp, obj); uint n = Jsi_ObjGetLength(interp, obj); assert(n <= obj->arrCnt); if (n<=0) { Jsi_ValueMakeUndef(interp, ret); } else { n--; v = obj->arr[0]; memmove(obj->arr, obj->arr+1, n*sizeof(Jsi_Value*)); obj->arr[n] = NULL; Jsi_ValueDup2(interp, ret, v); Jsi_DecrRefCount(interp, v); Jsi_ObjSetLength(interp, obj, n); } return JSI_OK; }
static Jsi_RC jsi_ArrayShiftCmd(Jsi_Interp *interp, Jsi_Value *args, Jsi_Value *_this,Jsi_Value **ret, Jsi_Func *funcPtr) { if (_this->vt != JSI_VT_OBJECT || !Jsi_ObjIsArray(interp, _this->d.obj)) return Jsi_LogError("expected array object"); Jsi_Value *v; Jsi_Obj *obj = _this->d.obj; Jsi_ObjListifyArray(interp, obj); uint n = jsi_SizeOfArray(interp, obj); if (n<=0) { Jsi_ValueMakeUndef(interp, ret); } else { n--; v = obj->arr[0]; memmove(obj->arr, obj->arr+1, n*sizeof(Jsi_Value*)); obj->arr[n] = NULL; Jsi_ValueDup2(interp, ret, v); Jsi_DecrRefCount(interp, v); Jsi_ObjSetLength(interp, obj, n); } return JSI_OK; }
{'added': [(10, 'static uint jsi_SizeOfArray(Jsi_Interp *interp, Jsi_Obj *obj) {'), (11, ' if (!obj || !obj->arr)'), (12, ' return 0;'), (13, ' return obj->arrCnt;'), (14, '}'), (15, ''), (28, ' int curlen = jsi_SizeOfArray(interp, obj);'), (36, ' Jsi_ValueMakeNumber(interp, ret, jsi_SizeOfArray(interp, obj));'), (50, ' int i = jsi_SizeOfArray(interp, obj) - 1;'), (83, ' curlen = jsi_SizeOfArray(interp, _this->d.obj);'), (94, ' if (0 == (argc=jsi_SizeOfArray(interp, _this->d.obj))) {'), (238, ' int len = jsi_SizeOfArray(interp, v->d.obj);'), (260, ' int i, n = 0, len = jsi_SizeOfArray(interp, arr->d.obj);'), (263, ' int clen = jsi_SizeOfArray(interp, nobj);'), (307, ' curlen = jsi_SizeOfArray(interp, obj);'), (379, ' curlen = jsi_SizeOfArray(interp, obj);'), (438, ' curlen = jsi_SizeOfArray(interp, obj);'), (614, ' int i;'), (700, ' n = jsi_SizeOfArray(interp, obj);'), (752, ' int i = jsi_SizeOfArray(interp, _this->d.obj);'), (763, ' uint n = jsi_SizeOfArray(interp, obj);'), (784, ' int curlen = jsi_SizeOfArray(interp, obj);'), (804, ' Jsi_ValueMakeNumber(interp, ret, jsi_SizeOfArray(interp, obj));'), (818, ' n = jsi_SizeOfArray(interp, obj);'), (881, ' n = jsi_SizeOfArray(interp, obj);'), (1145, ' n = jsi_SizeOfArray(interp, obj);'), (1181, ' Jsi_ObjSetLength(interp, obj, curlen=0);')], 'deleted': [(22, ' int curlen = Jsi_ObjGetLength(interp, obj);'), (23, ' if (curlen < 0) {'), (24, ' Jsi_ObjSetLength(interp, obj, 0);'), (25, ' }'), (26, ''), (34, ' Jsi_ValueMakeNumber(interp, ret, Jsi_ObjGetLength(interp, obj));'), (48, ' int i = Jsi_ObjGetLength(interp, obj) - 1;'), (81, ' curlen = Jsi_ObjGetLength(interp, _this->d.obj);'), (92, ' if (0 == (argc=Jsi_ObjGetLength(interp, _this->d.obj))) {'), (236, ' int len = Jsi_ObjGetLength(interp, v->d.obj);'), (258, ' int i, n = 0, len = Jsi_ObjGetLength(interp, arr->d.obj);'), (261, ' int clen = Jsi_ObjGetLength(interp, nobj);'), (305, ' curlen = Jsi_ObjGetLength(interp, obj);'), (306, ' if (curlen < 0) {'), (307, ' Jsi_ObjSetLength(interp, obj, 0);'), (308, ' }'), (380, ' curlen = Jsi_ObjGetLength(interp, obj);'), (381, ' if (curlen < 0) {'), (382, ' Jsi_ObjSetLength(interp, obj, 0);'), (383, ' }'), (442, ' curlen = Jsi_ObjGetLength(interp, obj);'), (443, ' if (curlen < 0) {'), (444, ' Jsi_ObjSetLength(interp, obj, 0);'), (445, ' }'), (525, ' int curlen;'), (538, ' curlen = Jsi_ObjGetLength(interp, obj);'), (539, ' if (curlen < 0) {'), (540, ' Jsi_ObjSetLength(interp, obj, 0);'), (541, ' }'), (569, ' int curlen;'), (582, ' curlen = Jsi_ObjGetLength(interp, obj);'), (583, ' if (curlen < 0) {'), (584, ' Jsi_ObjSetLength(interp, obj, 0);'), (585, ' }'), (631, ' int curlen, i;'), (641, ' curlen = Jsi_ObjGetLength(interp, obj);'), (642, ' if (curlen < 0)'), (643, ' Jsi_ObjSetLength(interp, obj, 0);'), (720, ' n = Jsi_ObjGetLength(interp, obj);'), (772, ' int i = Jsi_ObjGetLength(interp, _this->d.obj);'), (783, ' uint n = Jsi_ObjGetLength(interp, obj);'), (784, ' assert(n <= obj->arrCnt);'), (805, ' int curlen = Jsi_ObjGetLength(interp, obj);'), (806, ' if (curlen < 0) {'), (807, ' Jsi_ObjSetLength(interp, obj, 0);'), (808, ' }'), (828, ' Jsi_ValueMakeNumber(interp, ret, Jsi_ObjGetLength(interp, obj));'), (842, ' n = Jsi_ObjGetLength(interp, obj);'), (905, ' n = Jsi_ObjGetLength(interp, obj);'), (1169, ' n = Jsi_ObjGetLength(interp, obj);'), (1205, ' Jsi_ObjSetLength(interp, obj, 0);')]}
27
51
1,148
9,934
21
178
4
https://github.com/pcmacdon/jsish
CVE-2020-22875
CWE-190
735
ne.c
C
r_bin_ne_get_entrypoints
/* radare - LGPL - Copyright 2019-2022 - GustavoLCR */ #include "ne.h" static char *__get_target_os(r_bin_ne_obj_t *bin) { switch (bin->ne_header->targOS) { case 1: return "OS/2"; case 2: return "Windows"; case 3: return "European MS-DOS 4.x"; case 4: return "Windows 386"; case 5: return "BOSS (Borland Operating System Services)"; default: return "Unknown"; } } static int __translate_perms(int flags) { int perms = 0; if (flags & IS_RX) { if (flags & IS_DATA) { perms = R_PERM_R; } else { perms = R_PERM_X; } } if (!perms) { perms = R_PERM_RWX; } return perms; } static char *__read_nonnull_str_at(RBuffer *buf, ut64 offset) { ut8 sz = r_buf_read8_at (buf, offset); if (!sz) { return NULL; } char *str = malloc ((ut64)sz + 1); if (!str) { return NULL; } r_buf_read_at (buf, offset + 1, (ut8 *)str, sz); str[sz] = '\0'; return str; } static char *__func_name_from_ord(const char *module, ut16 ordinal) { if (!module) { return NULL; } char *lower_module = strdup (module); r_str_case (lower_module, false); char *path = r_str_newf (R_JOIN_4_PATHS ("%s", R2_SDB_FORMAT, "dll", "%s.sdb"), r_sys_prefix (NULL), lower_module); free (lower_module); char *ord = r_str_newf ("%d", ordinal); char *name; if (r_file_exists (path)) { Sdb *sdb = sdb_new (NULL, path, 0); name = sdb_get (sdb, ord, NULL); if (!name) { name = ord; } else { free (ord); } sdb_close (sdb); free (sdb); } else { name = ord; } free (path); return name; } RList *r_bin_ne_get_segments(r_bin_ne_obj_t *bin) { int i; if (!bin) { return NULL; } RList *segments = r_list_newf (free); for (i = 0; i < bin->ne_header->SegCount; i++) { RBinSection *bs = R_NEW0 (RBinSection); if (!bs) { return segments; } NE_image_segment_entry *se = &bin->segment_entries[i]; bs->size = se->length; bs->vsize = se->minAllocSz ? se->minAllocSz : 64000; bs->bits = R_SYS_BITS_16; bs->is_data = se->flags & IS_DATA; bs->perm = __translate_perms (se->flags); bs->paddr = (ut64)se->offset * bin->alignment; bs->name = r_str_newf ("%s.%" PFMT64d, se->flags & IS_MOVEABLE ? "MOVEABLE" : "FIXED", bs->paddr); bs->is_segment = true; r_list_append (segments, bs); } bin->segments = segments; return segments; } static int __find_symbol_by_paddr(const void *paddr, const void *sym) { return (int)!(*(ut64 *)paddr == ((RBinSymbol *)sym)->paddr); } RList *r_bin_ne_get_symbols(r_bin_ne_obj_t *bin) { RBinSymbol *sym; ut16 off = bin->ne_header->ResidNamTable + bin->header_offset; RList *symbols = r_list_newf (free); if (!symbols) { return NULL; } RList *entries = r_bin_ne_get_entrypoints (bin); bool resident = true, first = true; while (entries) { ut8 sz = r_buf_read8_at (bin->buf, off); if (!sz) { first = true; if (resident) { resident = false; off = bin->ne_header->OffStartNonResTab; sz = r_buf_read8_at (bin->buf, off); if (!sz) { break; } } else { break; } } char *name = malloc ((ut64)sz + 1); if (!name) { break; } off++; r_buf_read_at (bin->buf, off, (ut8 *)name, sz); name[sz] = '\0'; off += sz; sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = name; if (!first) { sym->bind = R_BIN_BIND_GLOBAL_STR; } ut16 entry_off = r_buf_read_le16_at (bin->buf, off); off += 2; RBinAddr *entry = r_list_get_n (entries, entry_off); if (entry) { sym->paddr = entry->paddr; } else { sym->paddr = -1; } sym->ordinal = entry_off; r_list_append (symbols, sym); first = false; } RListIter *it; RBinAddr *en; int i = 1; r_list_foreach (entries, it, en) { if (!r_list_find (symbols, &en->paddr, __find_symbol_by_paddr)) { sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = r_str_newf ("entry%d", i - 1); sym->paddr = en->paddr; sym->bind = R_BIN_BIND_GLOBAL_STR; sym->ordinal = i; r_list_append (symbols, sym); } i++; } bin->symbols = symbols; return symbols; } static char *__resource_type_str(int type) { char *typeName; switch (type) { case 1: typeName = "CURSOR"; break; case 2: typeName = "BITMAP"; break; case 3: typeName = "ICON"; break; case 4: typeName = "MENU"; break; case 5: typeName = "DIALOG"; break; case 6: typeName = "STRING"; break; case 7: typeName = "FONTDIR"; break; case 8: typeName = "FONT"; break; case 9: typeName = "ACCELERATOR"; break; case 10: typeName = "RCDATA"; break; case 11: typeName = "MESSAGETABLE"; break; case 12: typeName = "GROUP_CURSOR"; break; case 14: typeName = "GROUP_ICON"; break; case 15: typeName = "NAMETABLE"; break; case 16: typeName = "VERSION"; break; case 17: typeName = "DLGINCLUDE"; break; case 19: typeName = "PLUGPLAY"; break; case 20: typeName = "VXD"; break; case 21: typeName = "ANICURSOR"; break; case 22: typeName = "ANIICON"; break; case 23: typeName = "HTML"; break; case 24: typeName = "MANIFEST"; break; default: return r_str_newf ("UNKNOWN (%d)", type); } return strdup (typeName); } static void __free_resource_entry(void *entry) { r_ne_resource_entry *en = (r_ne_resource_entry *)entry; free (en->name); free (en); } static void __free_resource(void *resource) { r_ne_resource *res = (r_ne_resource *)resource; free (res->name); r_list_free (res->entry); free (res); } static bool __ne_get_resources(r_bin_ne_obj_t *bin) { if (!bin->resources) { bin->resources = r_list_newf (__free_resource); } ut16 resoff = bin->ne_header->ResTableOffset + bin->header_offset; ut16 alignment = r_buf_read_le16_at (bin->buf, resoff); ut32 off = resoff + 2; while (true) { NE_image_typeinfo_entry ti = {0}; r_ne_resource *res = R_NEW0 (r_ne_resource); if (!res) { break; } res->entry = r_list_newf (__free_resource_entry); if (!res->entry) { break; } r_buf_read_at (bin->buf, off, (ut8 *)&ti, sizeof (ti)); if (!ti.rtTypeID) { break; } else if (ti.rtTypeID & 0x8000) { res->name = __resource_type_str (ti.rtTypeID & ~0x8000); } else { // Offset to resident name table res->name = __read_nonnull_str_at (bin->buf, (ut64)resoff + ti.rtTypeID); } off += sizeof (NE_image_typeinfo_entry); int i; for (i = 0; i < ti.rtResourceCount; i++) { NE_image_nameinfo_entry ni; r_ne_resource_entry *ren = R_NEW0 (r_ne_resource_entry); if (!ren) { break; } r_buf_read_at (bin->buf, off, (ut8 *)&ni, sizeof (NE_image_nameinfo_entry)); ren->offset = ni.rnOffset << alignment; ren->size = ni.rnLength; if (ni.rnID & 0x8000) { ren->name = r_str_newf ("%d", ni.rnID & ~0x8000); } else { // Offset to resident name table ren->name = __read_nonnull_str_at (bin->buf, (ut64)resoff + ni.rnID); } r_list_append (res->entry, ren); off += sizeof (NE_image_nameinfo_entry); } r_list_append (bin->resources, res); } return true; } RList *r_bin_ne_get_imports(r_bin_ne_obj_t *bin) { RList *imports = r_list_newf ((RListFree)r_bin_import_free); if (!imports) { return NULL; } ut16 off = bin->ne_header->ImportNameTable + bin->header_offset + 1; int i; for (i = 0; i < bin->ne_header->ModRefs; i++) { RBinImport *imp = R_NEW0 (RBinImport); if (!imp) { break; } ut8 sz = r_buf_read8_at (bin->buf, off); if (!sz) { r_bin_import_free (imp); break; } off++; char *name = malloc ((ut64)sz + 1); if (!name) { break; } r_buf_read_at (bin->buf, off, (ut8 *)name, sz); name[sz] = '\0'; imp->name = name; imp->ordinal = i + 1; r_list_append (imports, imp); off += sz; } bin->imports = imports; return imports; } RList *r_bin_ne_get_entrypoints(r_bin_ne_obj_t *bin) { if (!bin->entry_table) { return NULL; } RList *entries = r_list_newf (free); if (!entries) { return NULL; } RList *segments = r_bin_ne_get_segments (bin); if (!segments) { r_list_free (entries); return NULL; } if (bin->ne_header->csEntryPoint) { RBinAddr *entry = R_NEW0 (RBinAddr); if (!entry) { r_list_free (entries); return NULL; } entry->bits = 16; ut32 entry_cs = bin->ne_header->csEntryPoint; RBinSection *s = r_list_get_n (segments, entry_cs - 1); entry->paddr = bin->ne_header->ipEntryPoint + (s? s->paddr: 0); r_list_append (entries, entry); } int off = 0; size_t tableat = bin->header_offset + bin->ne_header->EntryTableOffset; while (off < bin->ne_header->EntryTableLength) { if (tableat + off >= r_buf_size (bin->buf)) { break; } ut8 bundle_length = *(ut8 *)(bin->entry_table + off); if (!bundle_length) { break; } off++; ut8 bundle_type = *(ut8 *)(bin->entry_table + off); off++; int i; for (i = 0; i < bundle_length; i++) { if (tableat + off + 4 >= r_buf_size (bin->buf)) { break; } RBinAddr *entry = R_NEW0 (RBinAddr); if (!entry) { r_list_free (entries); return NULL; } off++; if (!bundle_type) { // Skip off--; free (entry); break; } else if (bundle_type == 0xff) { // moveable off += 2; ut8 segnum = *(bin->entry_table + off); off++; ut16 segoff = *(ut16 *)(bin->entry_table + off); if (segnum > 0) { entry->paddr = (ut64)bin->segment_entries[segnum - 1].offset * bin->alignment + segoff; } } else { // Fixed if (bundle_type < bin->ne_header->SegCount) { entry->paddr = (ut64)bin->segment_entries[bundle_type - 1].offset * bin->alignment + *(ut16 *)(bin->entry_table + off); } } off += 2; r_list_append (entries, entry); } } r_list_free (segments); bin->entries = entries; return entries; } RList *r_bin_ne_get_relocs(r_bin_ne_obj_t *bin) { RList *segments = bin->segments; if (!segments) { return NULL; } RList *entries = bin->entries; if (!entries) { return NULL; } RList *symbols = bin->symbols; if (!symbols) { return NULL; } ut16 *modref = calloc (bin->ne_header->ModRefs, sizeof (ut16)); if (!modref) { return NULL; } r_buf_read_at (bin->buf, (ut64)bin->ne_header->ModRefTable + bin->header_offset, (ut8 *)modref, bin->ne_header->ModRefs * sizeof (ut16)); RList *relocs = r_list_newf (free); if (!relocs) { free (modref); return NULL; } RListIter *it; RBinSection *seg; int index = -1; r_list_foreach (segments, it, seg) { index++; if (!(bin->segment_entries[index].flags & RELOCINFO)) { continue; } ut32 off = seg->paddr + seg->size; ut32 start = off; ut16 length = r_buf_read_le16_at (bin->buf, off); if (!length) { continue; } off += 2; // size_t buf_size = r_buf_size (bin->buf); while (off < start + length * sizeof (NE_image_reloc_item)) { // && off + sizeof (NE_image_reloc_item) < buf_size) NE_image_reloc_item rel = {0}; if (r_buf_read_at (bin->buf, off, (ut8 *)&rel, sizeof (rel)) < 1) { return NULL; } RBinReloc *reloc = R_NEW0 (RBinReloc); if (!reloc) { return NULL; } reloc->paddr = seg->paddr + rel.offset; switch (rel.type) { case LOBYTE: reloc->type = R_BIN_RELOC_8; break; case SEL_16: case OFF_16: reloc->type = R_BIN_RELOC_16; break; case POI_32: case OFF_32: reloc->type = R_BIN_RELOC_32; break; case POI_48: reloc->type = R_BIN_RELOC_64; break; } ut32 offset; if (rel.flags & (IMPORTED_ORD | IMPORTED_NAME)) { RBinImport *imp = R_NEW0 (RBinImport); if (!imp) { free (reloc); break; } char *name = NULL; if (rel.index > bin->ne_header->ModRefs) { name = r_str_newf ("UnknownModule%d_%x", rel.index, off); // ???? } else if (rel.index > 0) { offset = modref[rel.index - 1] + bin->header_offset + bin->ne_header->ImportNameTable; name = __read_nonnull_str_at (bin->buf, offset); } if (rel.flags & IMPORTED_ORD) { imp->ordinal = rel.func_ord; char *fname = __func_name_from_ord (name, rel.func_ord); imp->name = r_str_newf ("%s.%s", name, fname); free (fname); } else { offset = bin->header_offset + bin->ne_header->ImportNameTable + rel.name_off; char *func = __read_nonnull_str_at (bin->buf, offset); imp->name = r_str_newf ("%s.%s", name, func); free (func); } free (name); reloc->import = imp; } else if (rel.flags & OSFIXUP) { // TODO } else { if (strstr (seg->name, "FIXED")) { RBinSection *s = r_list_get_n (segments, rel.segnum - 1); if (s) { offset = s->paddr + rel.segoff; } else { offset = -1; } } else { RBinAddr *entry = r_list_get_n (entries, rel.entry_ordinal - 1); if (entry) { offset = entry->paddr; } else { offset = -1; } } reloc->addend = offset; RBinSymbol *sym = NULL; RListIter *sit; r_list_foreach (symbols, sit, sym) { if (sym->paddr == reloc->addend) { reloc->symbol = sym; break; } } } if (rel.flags & ADDITIVE) { reloc->additive = 1; r_list_append (relocs, reloc); } else { do { #define NE_BUG 0 #if NE_BUG if (reloc->paddr + 4 < r_buf_size (bin->buf)) { break; } #endif r_list_append (relocs, reloc); offset = r_buf_read_le16_at (bin->buf, reloc->paddr); RBinReloc *tmp = reloc; reloc = R_NEW0 (RBinReloc); if (!reloc) { break; } *reloc = *tmp; reloc->paddr = seg->paddr + offset; } while (offset != 0xFFFF); free (reloc); } off += sizeof (NE_image_reloc_item); } } free (modref); return relocs; } void __init(RBuffer *buf, r_bin_ne_obj_t *bin) { bin->header_offset = r_buf_read_le16_at (buf, 0x3c); bin->ne_header = R_NEW0 (NE_image_header); if (!bin->ne_header) { return; } bin->buf = buf; // XXX this is endian unsafe if (r_buf_read_at (buf, bin->header_offset, (ut8 *)bin->ne_header, sizeof (NE_image_header)) < 1) { R_FREE (bin->ne_header); return; } if (bin->ne_header->FileAlnSzShftCnt > 15) { bin->ne_header->FileAlnSzShftCnt = 15; } ut64 from = bin->ne_header->ModRefTable + bin->header_offset; ut64 left = r_buf_size (bin->buf) - from; if (from + bin->ne_header->ModRefs * sizeof (ut16) >= left) { bin->ne_header->ModRefs = left / sizeof (ut16); } bin->alignment = 1 << bin->ne_header->FileAlnSzShftCnt; if (!bin->alignment) { bin->alignment = 1 << 9; } bin->os = __get_target_os (bin); ut16 offset = bin->ne_header->SegTableOffset + bin->header_offset; size_t size = bin->ne_header->SegCount * sizeof (NE_image_segment_entry); if (offset >= r_buf_size (bin->buf)) { return; } size_t remaining = r_buf_size (bin->buf) - offset; size = R_MIN (remaining, size); bin->ne_header->SegCount = size / sizeof (NE_image_segment_entry); // * sizeof (NE_image_segment_entry); bin->segment_entries = calloc (1, size); if (size >= remaining) { bin->ne_header->SegCount = size / sizeof (NE_image_segment_entry); } if (!bin->segment_entries) { return; } r_buf_read_at (buf, offset, (ut8 *)bin->segment_entries, size); bin->entry_table = calloc (4, bin->ne_header->EntryTableLength); if (!bin->entry_table) { R_FREE (bin->segment_entries); return; } r_buf_read_at (buf, (ut64)bin->header_offset + bin->ne_header->EntryTableOffset, bin->entry_table, bin->ne_header->EntryTableLength); bin->imports = r_bin_ne_get_imports (bin); __ne_get_resources (bin); } void r_bin_ne_free(r_bin_ne_obj_t *bin) { // r_list_free (bin->imports); // double free r_list_free (bin->resources); free (bin->entry_table); free (bin->ne_header); free (bin->resident_name_table); free (bin->segment_entries); free (bin); } r_bin_ne_obj_t *r_bin_ne_new_buf(RBuffer *buf, bool verbose) { r_bin_ne_obj_t *bin = R_NEW0 (r_bin_ne_obj_t); if (!bin) { return NULL; } __init(buf, bin); return bin; }
/* radare - LGPL - Copyright 2019-2022 - GustavoLCR */ #include "ne.h" static char *__get_target_os(r_bin_ne_obj_t *bin) { switch (bin->ne_header->targOS) { case 1: return "OS/2"; case 2: return "Windows"; case 3: return "European MS-DOS 4.x"; case 4: return "Windows 386"; case 5: return "BOSS (Borland Operating System Services)"; default: return "Unknown"; } } static int __translate_perms(int flags) { int perms = 0; if (flags & IS_RX) { if (flags & IS_DATA) { perms = R_PERM_R; } else { perms = R_PERM_X; } } if (!perms) { perms = R_PERM_RWX; } return perms; } static char *__read_nonnull_str_at(RBuffer *buf, ut64 offset) { ut8 sz = r_buf_read8_at (buf, offset); if (!sz) { return NULL; } char *str = malloc ((ut64)sz + 1); if (!str) { return NULL; } r_buf_read_at (buf, offset + 1, (ut8 *)str, sz); str[sz] = '\0'; return str; } static char *__func_name_from_ord(const char *module, ut16 ordinal) { if (!module) { return NULL; } char *lower_module = strdup (module); r_str_case (lower_module, false); char *path = r_str_newf (R_JOIN_4_PATHS ("%s", R2_SDB_FORMAT, "dll", "%s.sdb"), r_sys_prefix (NULL), lower_module); free (lower_module); char *ord = r_str_newf ("%d", ordinal); char *name; if (r_file_exists (path)) { Sdb *sdb = sdb_new (NULL, path, 0); name = sdb_get (sdb, ord, NULL); if (!name) { name = ord; } else { free (ord); } sdb_close (sdb); free (sdb); } else { name = ord; } free (path); return name; } RList *r_bin_ne_get_segments(r_bin_ne_obj_t *bin) { int i; if (!bin) { return NULL; } RList *segments = r_list_newf (free); for (i = 0; i < bin->ne_header->SegCount; i++) { RBinSection *bs = R_NEW0 (RBinSection); if (!bs) { return segments; } NE_image_segment_entry *se = &bin->segment_entries[i]; bs->size = se->length; bs->vsize = se->minAllocSz ? se->minAllocSz : 64000; bs->bits = R_SYS_BITS_16; bs->is_data = se->flags & IS_DATA; bs->perm = __translate_perms (se->flags); bs->paddr = (ut64)se->offset * bin->alignment; bs->name = r_str_newf ("%s.%" PFMT64d, se->flags & IS_MOVEABLE ? "MOVEABLE" : "FIXED", bs->paddr); bs->is_segment = true; r_list_append (segments, bs); } bin->segments = segments; return segments; } static int __find_symbol_by_paddr(const void *paddr, const void *sym) { return (int)!(*(ut64 *)paddr == ((RBinSymbol *)sym)->paddr); } RList *r_bin_ne_get_symbols(r_bin_ne_obj_t *bin) { RBinSymbol *sym; ut16 off = bin->ne_header->ResidNamTable + bin->header_offset; RList *symbols = r_list_newf (free); if (!symbols) { return NULL; } RList *entries = r_bin_ne_get_entrypoints (bin); bool resident = true, first = true; while (entries) { ut8 sz = r_buf_read8_at (bin->buf, off); if (!sz) { first = true; if (resident) { resident = false; off = bin->ne_header->OffStartNonResTab; sz = r_buf_read8_at (bin->buf, off); if (!sz) { break; } } else { break; } } char *name = malloc ((ut64)sz + 1); if (!name) { break; } off++; r_buf_read_at (bin->buf, off, (ut8 *)name, sz); name[sz] = '\0'; off += sz; sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = name; if (!first) { sym->bind = R_BIN_BIND_GLOBAL_STR; } ut16 entry_off = r_buf_read_le16_at (bin->buf, off); off += 2; RBinAddr *entry = r_list_get_n (entries, entry_off); if (entry) { sym->paddr = entry->paddr; } else { sym->paddr = -1; } sym->ordinal = entry_off; r_list_append (symbols, sym); first = false; } RListIter *it; RBinAddr *en; int i = 1; r_list_foreach (entries, it, en) { if (!r_list_find (symbols, &en->paddr, __find_symbol_by_paddr)) { sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = r_str_newf ("entry%d", i - 1); sym->paddr = en->paddr; sym->bind = R_BIN_BIND_GLOBAL_STR; sym->ordinal = i; r_list_append (symbols, sym); } i++; } bin->symbols = symbols; return symbols; } static char *__resource_type_str(int type) { char *typeName; switch (type) { case 1: typeName = "CURSOR"; break; case 2: typeName = "BITMAP"; break; case 3: typeName = "ICON"; break; case 4: typeName = "MENU"; break; case 5: typeName = "DIALOG"; break; case 6: typeName = "STRING"; break; case 7: typeName = "FONTDIR"; break; case 8: typeName = "FONT"; break; case 9: typeName = "ACCELERATOR"; break; case 10: typeName = "RCDATA"; break; case 11: typeName = "MESSAGETABLE"; break; case 12: typeName = "GROUP_CURSOR"; break; case 14: typeName = "GROUP_ICON"; break; case 15: typeName = "NAMETABLE"; break; case 16: typeName = "VERSION"; break; case 17: typeName = "DLGINCLUDE"; break; case 19: typeName = "PLUGPLAY"; break; case 20: typeName = "VXD"; break; case 21: typeName = "ANICURSOR"; break; case 22: typeName = "ANIICON"; break; case 23: typeName = "HTML"; break; case 24: typeName = "MANIFEST"; break; default: return r_str_newf ("UNKNOWN (%d)", type); } return strdup (typeName); } static void __free_resource_entry(void *entry) { r_ne_resource_entry *en = (r_ne_resource_entry *)entry; free (en->name); free (en); } static void __free_resource(void *resource) { r_ne_resource *res = (r_ne_resource *)resource; free (res->name); r_list_free (res->entry); free (res); } static bool __ne_get_resources(r_bin_ne_obj_t *bin) { if (!bin->resources) { bin->resources = r_list_newf (__free_resource); } ut16 resoff = bin->ne_header->ResTableOffset + bin->header_offset; ut16 alignment = r_buf_read_le16_at (bin->buf, resoff); ut32 off = resoff + 2; while (true) { NE_image_typeinfo_entry ti = {0}; r_ne_resource *res = R_NEW0 (r_ne_resource); if (!res) { break; } res->entry = r_list_newf (__free_resource_entry); if (!res->entry) { break; } r_buf_read_at (bin->buf, off, (ut8 *)&ti, sizeof (ti)); if (!ti.rtTypeID) { break; } else if (ti.rtTypeID & 0x8000) { res->name = __resource_type_str (ti.rtTypeID & ~0x8000); } else { // Offset to resident name table res->name = __read_nonnull_str_at (bin->buf, (ut64)resoff + ti.rtTypeID); } off += sizeof (NE_image_typeinfo_entry); int i; for (i = 0; i < ti.rtResourceCount; i++) { NE_image_nameinfo_entry ni; r_ne_resource_entry *ren = R_NEW0 (r_ne_resource_entry); if (!ren) { break; } r_buf_read_at (bin->buf, off, (ut8 *)&ni, sizeof (NE_image_nameinfo_entry)); ren->offset = ni.rnOffset << alignment; ren->size = ni.rnLength; if (ni.rnID & 0x8000) { ren->name = r_str_newf ("%d", ni.rnID & ~0x8000); } else { // Offset to resident name table ren->name = __read_nonnull_str_at (bin->buf, (ut64)resoff + ni.rnID); } r_list_append (res->entry, ren); off += sizeof (NE_image_nameinfo_entry); } r_list_append (bin->resources, res); } return true; } RList *r_bin_ne_get_imports(r_bin_ne_obj_t *bin) { RList *imports = r_list_newf ((RListFree)r_bin_import_free); if (!imports) { return NULL; } ut16 off = bin->ne_header->ImportNameTable + bin->header_offset + 1; int i; for (i = 0; i < bin->ne_header->ModRefs; i++) { RBinImport *imp = R_NEW0 (RBinImport); if (!imp) { break; } ut8 sz = r_buf_read8_at (bin->buf, off); if (!sz) { r_bin_import_free (imp); break; } off++; char *name = malloc ((ut64)sz + 1); if (!name) { break; } r_buf_read_at (bin->buf, off, (ut8 *)name, sz); name[sz] = '\0'; imp->name = name; imp->ordinal = i + 1; r_list_append (imports, imp); off += sz; } bin->imports = imports; return imports; } RList *r_bin_ne_get_entrypoints(r_bin_ne_obj_t *bin) { if (!bin->entry_table) { return NULL; } RList *entries = r_list_newf (free); if (!entries) { return NULL; } RList *segments = r_bin_ne_get_segments (bin); if (!segments) { r_list_free (entries); return NULL; } if (bin->ne_header->csEntryPoint) { RBinAddr *entry = R_NEW0 (RBinAddr); if (!entry) { r_list_free (entries); return NULL; } entry->bits = 16; ut32 entry_cs = bin->ne_header->csEntryPoint; RBinSection *s = r_list_get_n (segments, entry_cs - 1); entry->paddr = bin->ne_header->ipEntryPoint + (s? s->paddr: 0); r_list_append (entries, entry); } int off = 0; size_t tableat = bin->header_offset + bin->ne_header->EntryTableOffset; while (off < bin->ne_header->EntryTableLength) { if (tableat + off >= r_buf_size (bin->buf)) { break; } ut8 bundle_length = *(ut8 *)(bin->entry_table + off); if (!bundle_length) { break; } off++; ut8 bundle_type = *(ut8 *)(bin->entry_table + off); off++; int i; for (i = 0; i < bundle_length; i++) { if (tableat + off + 4 >= r_buf_size (bin->buf)) { break; } RBinAddr *entry = R_NEW0 (RBinAddr); if (!entry) { r_list_free (entries); return NULL; } off++; if (!bundle_type) { // Skip off--; free (entry); break; } else if (bundle_type == 0xff) { // moveable off += 2; ut8 segnum = *(bin->entry_table + off); off++; if (off > bin->ne_header->EntryTableLength) { break; } ut16 segoff = r_read_le16 (bin->entry_table + off); if (segnum > 0 && segnum < bin->ne_header->SegCount) { entry->paddr = (ut64)bin->segment_entries[segnum - 1].offset * bin->alignment + segoff; } } else { // Fixed if (off + 2 >= bin->ne_header->EntryTableLength) { break; } ut16 delta = r_read_le16 (bin->entry_table + off); if (bundle_type < bin->ne_header->SegCount) { entry->paddr = (ut64)bin->segment_entries[bundle_type - 1].offset * bin->alignment + delta; } } off += 2; r_list_append (entries, entry); } } r_list_free (segments); bin->entries = entries; return entries; } RList *r_bin_ne_get_relocs(r_bin_ne_obj_t *bin) { RList *segments = bin->segments; if (!segments) { return NULL; } RList *entries = bin->entries; if (!entries) { return NULL; } RList *symbols = bin->symbols; if (!symbols) { return NULL; } ut16 *modref = calloc (bin->ne_header->ModRefs, sizeof (ut16)); if (!modref) { return NULL; } r_buf_read_at (bin->buf, (ut64)bin->ne_header->ModRefTable + bin->header_offset, (ut8 *)modref, bin->ne_header->ModRefs * sizeof (ut16)); RList *relocs = r_list_newf (free); if (!relocs) { free (modref); return NULL; } RListIter *it; RBinSection *seg; int index = -1; r_list_foreach (segments, it, seg) { index++; if (!(bin->segment_entries[index].flags & RELOCINFO)) { continue; } ut32 off = seg->paddr + seg->size; ut32 start = off; ut16 length = r_buf_read_le16_at (bin->buf, off); if (!length) { continue; } off += 2; // size_t buf_size = r_buf_size (bin->buf); while (off < start + length * sizeof (NE_image_reloc_item)) { // && off + sizeof (NE_image_reloc_item) < buf_size) NE_image_reloc_item rel = {0}; if (r_buf_read_at (bin->buf, off, (ut8 *)&rel, sizeof (rel)) < 1) { return NULL; } RBinReloc *reloc = R_NEW0 (RBinReloc); if (!reloc) { return NULL; } reloc->paddr = seg->paddr + rel.offset; switch (rel.type) { case LOBYTE: reloc->type = R_BIN_RELOC_8; break; case SEL_16: case OFF_16: reloc->type = R_BIN_RELOC_16; break; case POI_32: case OFF_32: reloc->type = R_BIN_RELOC_32; break; case POI_48: reloc->type = R_BIN_RELOC_64; break; } ut32 offset; if (rel.flags & (IMPORTED_ORD | IMPORTED_NAME)) { RBinImport *imp = R_NEW0 (RBinImport); if (!imp) { free (reloc); break; } char *name = NULL; if (rel.index > bin->ne_header->ModRefs) { name = r_str_newf ("UnknownModule%d_%x", rel.index, off); // ???? } else if (rel.index > 0) { offset = modref[rel.index - 1] + bin->header_offset + bin->ne_header->ImportNameTable; name = __read_nonnull_str_at (bin->buf, offset); } if (rel.flags & IMPORTED_ORD) { imp->ordinal = rel.func_ord; char *fname = __func_name_from_ord (name, rel.func_ord); imp->name = r_str_newf ("%s.%s", name, fname); free (fname); } else { offset = bin->header_offset + bin->ne_header->ImportNameTable + rel.name_off; char *func = __read_nonnull_str_at (bin->buf, offset); imp->name = r_str_newf ("%s.%s", name, func); free (func); } free (name); reloc->import = imp; } else if (rel.flags & OSFIXUP) { // TODO } else { if (strstr (seg->name, "FIXED")) { RBinSection *s = r_list_get_n (segments, rel.segnum - 1); if (s) { offset = s->paddr + rel.segoff; } else { offset = -1; } } else { RBinAddr *entry = r_list_get_n (entries, rel.entry_ordinal - 1); if (entry) { offset = entry->paddr; } else { offset = -1; } } reloc->addend = offset; RBinSymbol *sym = NULL; RListIter *sit; r_list_foreach (symbols, sit, sym) { if (sym->paddr == reloc->addend) { reloc->symbol = sym; break; } } } if (rel.flags & ADDITIVE) { reloc->additive = 1; r_list_append (relocs, reloc); } else { do { #define NE_BUG 0 #if NE_BUG if (reloc->paddr + 4 < r_buf_size (bin->buf)) { break; } #endif r_list_append (relocs, reloc); offset = r_buf_read_le16_at (bin->buf, reloc->paddr); RBinReloc *tmp = reloc; reloc = R_NEW0 (RBinReloc); if (!reloc) { break; } *reloc = *tmp; reloc->paddr = seg->paddr + offset; } while (offset != 0xFFFF); free (reloc); } off += sizeof (NE_image_reloc_item); } } free (modref); return relocs; } void __init(RBuffer *buf, r_bin_ne_obj_t *bin) { bin->header_offset = r_buf_read_le16_at (buf, 0x3c); bin->ne_header = R_NEW0 (NE_image_header); if (!bin->ne_header) { return; } bin->buf = buf; // XXX this is endian unsafe if (r_buf_read_at (buf, bin->header_offset, (ut8 *)bin->ne_header, sizeof (NE_image_header)) < 1) { R_FREE (bin->ne_header); return; } if (bin->ne_header->FileAlnSzShftCnt > 15) { bin->ne_header->FileAlnSzShftCnt = 15; } ut64 from = bin->ne_header->ModRefTable + bin->header_offset; ut64 left = r_buf_size (bin->buf) - from; if (from + bin->ne_header->ModRefs * sizeof (ut16) >= left) { bin->ne_header->ModRefs = left / sizeof (ut16); } bin->alignment = 1 << bin->ne_header->FileAlnSzShftCnt; if (!bin->alignment) { bin->alignment = 1 << 9; } bin->os = __get_target_os (bin); ut16 offset = bin->ne_header->SegTableOffset + bin->header_offset; size_t size = bin->ne_header->SegCount * sizeof (NE_image_segment_entry); if (offset >= r_buf_size (bin->buf)) { return; } size_t remaining = r_buf_size (bin->buf) - offset; size = R_MIN (remaining, size); bin->ne_header->SegCount = size / sizeof (NE_image_segment_entry); // * sizeof (NE_image_segment_entry); bin->segment_entries = calloc (1, size); if (size >= remaining) { bin->ne_header->SegCount = size / sizeof (NE_image_segment_entry); } if (!bin->segment_entries) { return; } r_buf_read_at (buf, offset, (ut8 *)bin->segment_entries, size); bin->entry_table = calloc (4, bin->ne_header->EntryTableLength); if (!bin->entry_table) { R_FREE (bin->segment_entries); return; } r_buf_read_at (buf, (ut64)bin->header_offset + bin->ne_header->EntryTableOffset, bin->entry_table, bin->ne_header->EntryTableLength); bin->imports = r_bin_ne_get_imports (bin); __ne_get_resources (bin); } void r_bin_ne_free(r_bin_ne_obj_t *bin) { // r_list_free (bin->imports); // double free r_list_free (bin->resources); free (bin->entry_table); free (bin->ne_header); free (bin->resident_name_table); free (bin->segment_entries); free (bin); } r_bin_ne_obj_t *r_bin_ne_new_buf(RBuffer *buf, bool verbose) { r_bin_ne_obj_t *bin = R_NEW0 (r_bin_ne_obj_t); if (!bin) { return NULL; } __init(buf, bin); return bin; }
RList *r_bin_ne_get_entrypoints(r_bin_ne_obj_t *bin) { if (!bin->entry_table) { return NULL; } RList *entries = r_list_newf (free); if (!entries) { return NULL; } RList *segments = r_bin_ne_get_segments (bin); if (!segments) { r_list_free (entries); return NULL; } if (bin->ne_header->csEntryPoint) { RBinAddr *entry = R_NEW0 (RBinAddr); if (!entry) { r_list_free (entries); return NULL; } entry->bits = 16; ut32 entry_cs = bin->ne_header->csEntryPoint; RBinSection *s = r_list_get_n (segments, entry_cs - 1); entry->paddr = bin->ne_header->ipEntryPoint + (s? s->paddr: 0); r_list_append (entries, entry); } int off = 0; size_t tableat = bin->header_offset + bin->ne_header->EntryTableOffset; while (off < bin->ne_header->EntryTableLength) { if (tableat + off >= r_buf_size (bin->buf)) { break; } ut8 bundle_length = *(ut8 *)(bin->entry_table + off); if (!bundle_length) { break; } off++; ut8 bundle_type = *(ut8 *)(bin->entry_table + off); off++; int i; for (i = 0; i < bundle_length; i++) { if (tableat + off + 4 >= r_buf_size (bin->buf)) { break; } RBinAddr *entry = R_NEW0 (RBinAddr); if (!entry) { r_list_free (entries); return NULL; } off++; if (!bundle_type) { // Skip off--; free (entry); break; } else if (bundle_type == 0xff) { // moveable off += 2; ut8 segnum = *(bin->entry_table + off); off++; ut16 segoff = *(ut16 *)(bin->entry_table + off); if (segnum > 0) { entry->paddr = (ut64)bin->segment_entries[segnum - 1].offset * bin->alignment + segoff; } } else { // Fixed if (bundle_type < bin->ne_header->SegCount) { entry->paddr = (ut64)bin->segment_entries[bundle_type - 1].offset * bin->alignment + *(ut16 *)(bin->entry_table + off); } } off += 2; r_list_append (entries, entry); } } r_list_free (segments); bin->entries = entries; return entries; }
RList *r_bin_ne_get_entrypoints(r_bin_ne_obj_t *bin) { if (!bin->entry_table) { return NULL; } RList *entries = r_list_newf (free); if (!entries) { return NULL; } RList *segments = r_bin_ne_get_segments (bin); if (!segments) { r_list_free (entries); return NULL; } if (bin->ne_header->csEntryPoint) { RBinAddr *entry = R_NEW0 (RBinAddr); if (!entry) { r_list_free (entries); return NULL; } entry->bits = 16; ut32 entry_cs = bin->ne_header->csEntryPoint; RBinSection *s = r_list_get_n (segments, entry_cs - 1); entry->paddr = bin->ne_header->ipEntryPoint + (s? s->paddr: 0); r_list_append (entries, entry); } int off = 0; size_t tableat = bin->header_offset + bin->ne_header->EntryTableOffset; while (off < bin->ne_header->EntryTableLength) { if (tableat + off >= r_buf_size (bin->buf)) { break; } ut8 bundle_length = *(ut8 *)(bin->entry_table + off); if (!bundle_length) { break; } off++; ut8 bundle_type = *(ut8 *)(bin->entry_table + off); off++; int i; for (i = 0; i < bundle_length; i++) { if (tableat + off + 4 >= r_buf_size (bin->buf)) { break; } RBinAddr *entry = R_NEW0 (RBinAddr); if (!entry) { r_list_free (entries); return NULL; } off++; if (!bundle_type) { // Skip off--; free (entry); break; } else if (bundle_type == 0xff) { // moveable off += 2; ut8 segnum = *(bin->entry_table + off); off++; if (off > bin->ne_header->EntryTableLength) { break; } ut16 segoff = r_read_le16 (bin->entry_table + off); if (segnum > 0 && segnum < bin->ne_header->SegCount) { entry->paddr = (ut64)bin->segment_entries[segnum - 1].offset * bin->alignment + segoff; } } else { // Fixed if (off + 2 >= bin->ne_header->EntryTableLength) { break; } ut16 delta = r_read_le16 (bin->entry_table + off); if (bundle_type < bin->ne_header->SegCount) { entry->paddr = (ut64)bin->segment_entries[bundle_type - 1].offset * bin->alignment + delta; } } off += 2; r_list_append (entries, entry); } } r_list_free (segments); bin->entries = entries; return entries; }
{'added': [(411, '\t\t\t\tif (off > bin->ne_header->EntryTableLength) {'), (412, '\t\t\t\t\tbreak;'), (413, '\t\t\t\t}'), (414, '\t\t\t\tut16 segoff = r_read_le16 (bin->entry_table + off);'), (415, '\t\t\t\tif (segnum > 0 && segnum < bin->ne_header->SegCount) {'), (419, '\t\t\t\tif (off + 2 >= bin->ne_header->EntryTableLength) {'), (420, '\t\t\t\t\tbreak;'), (421, '\t\t\t\t}'), (422, '\t\t\t\tut16 delta = r_read_le16 (bin->entry_table + off);'), (425, '\t\t\t\t\t\t* bin->alignment + delta;')], 'deleted': [(411, '\t\t\t\tut16 segoff = *(ut16 *)(bin->entry_table + off);'), (412, '\t\t\t\tif (segnum > 0) {'), (418, '\t\t\t\t\t\t* bin->alignment + *(ut16 *)(bin->entry_table + off);')]}
10
3
626
3,762
75
478
17
https://github.com/radareorg/radare2
CVE-2022-1297
CWE-125
1,532
rose_timer.c
C
rose_stop_heartbeat
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright (C) 2002 Ralf Baechle DO1GRB (ralf@gnu.org) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <net/tcp_states.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <net/rose.h> static void rose_heartbeat_expiry(struct timer_list *t); static void rose_timer_expiry(struct timer_list *); static void rose_idletimer_expiry(struct timer_list *); void rose_start_heartbeat(struct sock *sk) { del_timer(&sk->sk_timer); sk->sk_timer.function = rose_heartbeat_expiry; sk->sk_timer.expires = jiffies + 5 * HZ; add_timer(&sk->sk_timer); } void rose_start_t1timer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); del_timer(&rose->timer); rose->timer.function = rose_timer_expiry; rose->timer.expires = jiffies + rose->t1; add_timer(&rose->timer); } void rose_start_t2timer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); del_timer(&rose->timer); rose->timer.function = rose_timer_expiry; rose->timer.expires = jiffies + rose->t2; add_timer(&rose->timer); } void rose_start_t3timer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); del_timer(&rose->timer); rose->timer.function = rose_timer_expiry; rose->timer.expires = jiffies + rose->t3; add_timer(&rose->timer); } void rose_start_hbtimer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); del_timer(&rose->timer); rose->timer.function = rose_timer_expiry; rose->timer.expires = jiffies + rose->hb; add_timer(&rose->timer); } void rose_start_idletimer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); del_timer(&rose->idletimer); if (rose->idle > 0) { rose->idletimer.function = rose_idletimer_expiry; rose->idletimer.expires = jiffies + rose->idle; add_timer(&rose->idletimer); } } void rose_stop_heartbeat(struct sock *sk) { del_timer(&sk->sk_timer); } void rose_stop_timer(struct sock *sk) { del_timer(&rose_sk(sk)->timer); } void rose_stop_idletimer(struct sock *sk) { del_timer(&rose_sk(sk)->idletimer); } static void rose_heartbeat_expiry(struct timer_list *t) { struct sock *sk = from_timer(sk, t, sk_timer); struct rose_sock *rose = rose_sk(sk); bh_lock_sock(sk); switch (rose->state) { case ROSE_STATE_0: /* Magic here: If we listen() and a new link dies before it is accepted() it isn't 'dead' so doesn't get removed. */ if (sock_flag(sk, SOCK_DESTROY) || (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) { bh_unlock_sock(sk); rose_destroy_socket(sk); return; } break; case ROSE_STATE_3: /* * Check for the state of the receive buffer. */ if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) && (rose->condition & ROSE_COND_OWN_RX_BUSY)) { rose->condition &= ~ROSE_COND_OWN_RX_BUSY; rose->condition &= ~ROSE_COND_ACK_PENDING; rose->vl = rose->vr; rose_write_internal(sk, ROSE_RR); rose_stop_timer(sk); /* HB */ break; } break; } rose_start_heartbeat(sk); bh_unlock_sock(sk); } static void rose_timer_expiry(struct timer_list *t) { struct rose_sock *rose = from_timer(rose, t, timer); struct sock *sk = &rose->sock; bh_lock_sock(sk); switch (rose->state) { case ROSE_STATE_1: /* T1 */ case ROSE_STATE_4: /* T2 */ rose_write_internal(sk, ROSE_CLEAR_REQUEST); rose->state = ROSE_STATE_2; rose_start_t3timer(sk); break; case ROSE_STATE_2: /* T3 */ rose->neighbour->use--; rose_disconnect(sk, ETIMEDOUT, -1, -1); break; case ROSE_STATE_3: /* HB */ if (rose->condition & ROSE_COND_ACK_PENDING) { rose->condition &= ~ROSE_COND_ACK_PENDING; rose_enquiry_response(sk); } break; } bh_unlock_sock(sk); } static void rose_idletimer_expiry(struct timer_list *t) { struct rose_sock *rose = from_timer(rose, t, idletimer); struct sock *sk = &rose->sock; bh_lock_sock(sk); rose_clear_queues(sk); rose_write_internal(sk, ROSE_CLEAR_REQUEST); rose_sk(sk)->state = ROSE_STATE_2; rose_start_t3timer(sk); sk->sk_state = TCP_CLOSE; sk->sk_err = 0; sk->sk_shutdown |= SEND_SHUTDOWN; if (!sock_flag(sk, SOCK_DEAD)) { sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); } bh_unlock_sock(sk); }
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright (C) 2002 Ralf Baechle DO1GRB (ralf@gnu.org) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <net/tcp_states.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <net/rose.h> static void rose_heartbeat_expiry(struct timer_list *t); static void rose_timer_expiry(struct timer_list *); static void rose_idletimer_expiry(struct timer_list *); void rose_start_heartbeat(struct sock *sk) { sk_stop_timer(sk, &sk->sk_timer); sk->sk_timer.function = rose_heartbeat_expiry; sk->sk_timer.expires = jiffies + 5 * HZ; sk_reset_timer(sk, &sk->sk_timer, sk->sk_timer.expires); } void rose_start_t1timer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); sk_stop_timer(sk, &rose->timer); rose->timer.function = rose_timer_expiry; rose->timer.expires = jiffies + rose->t1; sk_reset_timer(sk, &rose->timer, rose->timer.expires); } void rose_start_t2timer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); sk_stop_timer(sk, &rose->timer); rose->timer.function = rose_timer_expiry; rose->timer.expires = jiffies + rose->t2; sk_reset_timer(sk, &rose->timer, rose->timer.expires); } void rose_start_t3timer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); sk_stop_timer(sk, &rose->timer); rose->timer.function = rose_timer_expiry; rose->timer.expires = jiffies + rose->t3; sk_reset_timer(sk, &rose->timer, rose->timer.expires); } void rose_start_hbtimer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); sk_stop_timer(sk, &rose->timer); rose->timer.function = rose_timer_expiry; rose->timer.expires = jiffies + rose->hb; sk_reset_timer(sk, &rose->timer, rose->timer.expires); } void rose_start_idletimer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); sk_stop_timer(sk, &rose->idletimer); if (rose->idle > 0) { rose->idletimer.function = rose_idletimer_expiry; rose->idletimer.expires = jiffies + rose->idle; sk_reset_timer(sk, &rose->idletimer, rose->idletimer.expires); } } void rose_stop_heartbeat(struct sock *sk) { sk_stop_timer(sk, &sk->sk_timer); } void rose_stop_timer(struct sock *sk) { sk_stop_timer(sk, &rose_sk(sk)->timer); } void rose_stop_idletimer(struct sock *sk) { sk_stop_timer(sk, &rose_sk(sk)->idletimer); } static void rose_heartbeat_expiry(struct timer_list *t) { struct sock *sk = from_timer(sk, t, sk_timer); struct rose_sock *rose = rose_sk(sk); bh_lock_sock(sk); switch (rose->state) { case ROSE_STATE_0: /* Magic here: If we listen() and a new link dies before it is accepted() it isn't 'dead' so doesn't get removed. */ if (sock_flag(sk, SOCK_DESTROY) || (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) { bh_unlock_sock(sk); rose_destroy_socket(sk); sock_put(sk); return; } break; case ROSE_STATE_3: /* * Check for the state of the receive buffer. */ if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) && (rose->condition & ROSE_COND_OWN_RX_BUSY)) { rose->condition &= ~ROSE_COND_OWN_RX_BUSY; rose->condition &= ~ROSE_COND_ACK_PENDING; rose->vl = rose->vr; rose_write_internal(sk, ROSE_RR); rose_stop_timer(sk); /* HB */ break; } break; } rose_start_heartbeat(sk); bh_unlock_sock(sk); sock_put(sk); } static void rose_timer_expiry(struct timer_list *t) { struct rose_sock *rose = from_timer(rose, t, timer); struct sock *sk = &rose->sock; bh_lock_sock(sk); switch (rose->state) { case ROSE_STATE_1: /* T1 */ case ROSE_STATE_4: /* T2 */ rose_write_internal(sk, ROSE_CLEAR_REQUEST); rose->state = ROSE_STATE_2; rose_start_t3timer(sk); break; case ROSE_STATE_2: /* T3 */ rose->neighbour->use--; rose_disconnect(sk, ETIMEDOUT, -1, -1); break; case ROSE_STATE_3: /* HB */ if (rose->condition & ROSE_COND_ACK_PENDING) { rose->condition &= ~ROSE_COND_ACK_PENDING; rose_enquiry_response(sk); } break; } bh_unlock_sock(sk); sock_put(sk); } static void rose_idletimer_expiry(struct timer_list *t) { struct rose_sock *rose = from_timer(rose, t, idletimer); struct sock *sk = &rose->sock; bh_lock_sock(sk); rose_clear_queues(sk); rose_write_internal(sk, ROSE_CLEAR_REQUEST); rose_sk(sk)->state = ROSE_STATE_2; rose_start_t3timer(sk); sk->sk_state = TCP_CLOSE; sk->sk_err = 0; sk->sk_shutdown |= SEND_SHUTDOWN; if (!sock_flag(sk, SOCK_DEAD)) { sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); } bh_unlock_sock(sk); sock_put(sk); }
void rose_stop_heartbeat(struct sock *sk) { del_timer(&sk->sk_timer); }
void rose_stop_heartbeat(struct sock *sk) { sk_stop_timer(sk, &sk->sk_timer); }
{'added': [(34, '\tsk_stop_timer(sk, &sk->sk_timer);'), (39, '\tsk_reset_timer(sk, &sk->sk_timer, sk->sk_timer.expires);'), (46, '\tsk_stop_timer(sk, &rose->timer);'), (51, '\tsk_reset_timer(sk, &rose->timer, rose->timer.expires);'), (58, '\tsk_stop_timer(sk, &rose->timer);'), (63, '\tsk_reset_timer(sk, &rose->timer, rose->timer.expires);'), (70, '\tsk_stop_timer(sk, &rose->timer);'), (75, '\tsk_reset_timer(sk, &rose->timer, rose->timer.expires);'), (82, '\tsk_stop_timer(sk, &rose->timer);'), (87, '\tsk_reset_timer(sk, &rose->timer, rose->timer.expires);'), (94, '\tsk_stop_timer(sk, &rose->idletimer);'), (100, '\t\tsk_reset_timer(sk, &rose->idletimer, rose->idletimer.expires);'), (106, '\tsk_stop_timer(sk, &sk->sk_timer);'), (111, '\tsk_stop_timer(sk, &rose_sk(sk)->timer);'), (116, '\tsk_stop_timer(sk, &rose_sk(sk)->idletimer);'), (133, '\t\t\tsock_put(sk);'), (156, '\tsock_put(sk);'), (186, '\tsock_put(sk);'), (211, '\tsock_put(sk);')], 'deleted': [(34, '\tdel_timer(&sk->sk_timer);'), (39, '\tadd_timer(&sk->sk_timer);'), (46, '\tdel_timer(&rose->timer);'), (51, '\tadd_timer(&rose->timer);'), (58, '\tdel_timer(&rose->timer);'), (63, '\tadd_timer(&rose->timer);'), (70, '\tdel_timer(&rose->timer);'), (75, '\tadd_timer(&rose->timer);'), (82, '\tdel_timer(&rose->timer);'), (87, '\tadd_timer(&rose->timer);'), (94, '\tdel_timer(&rose->idletimer);'), (100, '\t\tadd_timer(&rose->idletimer);'), (106, '\tdel_timer(&sk->sk_timer);'), (111, '\tdel_timer(&rose_sk(sk)->timer);'), (116, '\tdel_timer(&rose_sk(sk)->idletimer);')]}
19
15
160
963
4
17
1
https://github.com/torvalds/linux
CVE-2022-2318
CWE-416
3,132
oom_kill.c
C
__oom_reap_task_mm
/* * linux/mm/oom_kill.c * * Copyright (C) 1998,2000 Rik van Riel * Thanks go out to Claus Fischer for some serious inspiration and * for goading me into coding this file... * Copyright (C) 2010 Google, Inc. * Rewritten by David Rientjes * * The routines in this file are used to kill a process when * we're seriously out of memory. This gets called from __alloc_pages() * in mm/page_alloc.c when we really run out of memory. * * Since we won't call these routines often (on a well-configured * machine) this file will double as a 'coding guide' and a signpost * for newbie kernel hackers. It features several pointers to major * kernel subsystems and hints as to where to find out what things do. */ #include <linux/oom.h> #include <linux/mm.h> #include <linux/err.h> #include <linux/gfp.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/sched/coredump.h> #include <linux/sched/task.h> #include <linux/swap.h> #include <linux/timex.h> #include <linux/jiffies.h> #include <linux/cpuset.h> #include <linux/export.h> #include <linux/notifier.h> #include <linux/memcontrol.h> #include <linux/mempolicy.h> #include <linux/security.h> #include <linux/ptrace.h> #include <linux/freezer.h> #include <linux/ftrace.h> #include <linux/ratelimit.h> #include <linux/kthread.h> #include <linux/init.h> #include <linux/mmu_notifier.h> #include <asm/tlb.h> #include "internal.h" #include "slab.h" #define CREATE_TRACE_POINTS #include <trace/events/oom.h> int sysctl_panic_on_oom; int sysctl_oom_kill_allocating_task; int sysctl_oom_dump_tasks = 1; DEFINE_MUTEX(oom_lock); #ifdef CONFIG_NUMA /** * has_intersects_mems_allowed() - check task eligiblity for kill * @start: task struct of which task to consider * @mask: nodemask passed to page allocator for mempolicy ooms * * Task eligibility is determined by whether or not a candidate task, @tsk, * shares the same mempolicy nodes as current if it is bound by such a policy * and whether or not it has the same set of allowed cpuset nodes. */ static bool has_intersects_mems_allowed(struct task_struct *start, const nodemask_t *mask) { struct task_struct *tsk; bool ret = false; rcu_read_lock(); for_each_thread(start, tsk) { if (mask) { /* * If this is a mempolicy constrained oom, tsk's * cpuset is irrelevant. Only return true if its * mempolicy intersects current, otherwise it may be * needlessly killed. */ ret = mempolicy_nodemask_intersects(tsk, mask); } else { /* * This is not a mempolicy constrained oom, so only * check the mems of tsk's cpuset. */ ret = cpuset_mems_allowed_intersects(current, tsk); } if (ret) break; } rcu_read_unlock(); return ret; } #else static bool has_intersects_mems_allowed(struct task_struct *tsk, const nodemask_t *mask) { return true; } #endif /* CONFIG_NUMA */ /* * The process p may have detached its own ->mm while exiting or through * use_mm(), but one or more of its subthreads may still have a valid * pointer. Return p, or any of its subthreads with a valid ->mm, with * task_lock() held. */ struct task_struct *find_lock_task_mm(struct task_struct *p) { struct task_struct *t; rcu_read_lock(); for_each_thread(p, t) { task_lock(t); if (likely(t->mm)) goto found; task_unlock(t); } t = NULL; found: rcu_read_unlock(); return t; } /* * order == -1 means the oom kill is required by sysrq, otherwise only * for display purposes. */ static inline bool is_sysrq_oom(struct oom_control *oc) { return oc->order == -1; } static inline bool is_memcg_oom(struct oom_control *oc) { return oc->memcg != NULL; } /* return true if the task is not adequate as candidate victim task. */ static bool oom_unkillable_task(struct task_struct *p, struct mem_cgroup *memcg, const nodemask_t *nodemask) { if (is_global_init(p)) return true; if (p->flags & PF_KTHREAD) return true; /* When mem_cgroup_out_of_memory() and p is not member of the group */ if (memcg && !task_in_mem_cgroup(p, memcg)) return true; /* p may not have freeable memory in nodemask */ if (!has_intersects_mems_allowed(p, nodemask)) return true; return false; } /* * Print out unreclaimble slabs info when unreclaimable slabs amount is greater * than all user memory (LRU pages) */ static bool is_dump_unreclaim_slabs(void) { unsigned long nr_lru; nr_lru = global_node_page_state(NR_ACTIVE_ANON) + global_node_page_state(NR_INACTIVE_ANON) + global_node_page_state(NR_ACTIVE_FILE) + global_node_page_state(NR_INACTIVE_FILE) + global_node_page_state(NR_ISOLATED_ANON) + global_node_page_state(NR_ISOLATED_FILE) + global_node_page_state(NR_UNEVICTABLE); return (global_node_page_state(NR_SLAB_UNRECLAIMABLE) > nr_lru); } /** * oom_badness - heuristic function to determine which candidate task to kill * @p: task struct of which task we should calculate * @totalpages: total present RAM allowed for page allocation * * The heuristic for determining which task to kill is made to be as simple and * predictable as possible. The goal is to return the highest value for the * task consuming the most memory to avoid subsequent oom failures. */ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, const nodemask_t *nodemask, unsigned long totalpages) { long points; long adj; if (oom_unkillable_task(p, memcg, nodemask)) return 0; p = find_lock_task_mm(p); if (!p) return 0; /* * Do not even consider tasks which are explicitly marked oom * unkillable or have been already oom reaped or the are in * the middle of vfork */ adj = (long)p->signal->oom_score_adj; if (adj == OOM_SCORE_ADJ_MIN || test_bit(MMF_OOM_SKIP, &p->mm->flags) || in_vfork(p)) { task_unlock(p); return 0; } /* * The baseline for the badness score is the proportion of RAM that each * task's rss, pagetable and swap space use. */ points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) + mm_pgtables_bytes(p->mm) / PAGE_SIZE; task_unlock(p); /* * Root processes get 3% bonus, just like the __vm_enough_memory() * implementation used by LSMs. */ if (has_capability_noaudit(p, CAP_SYS_ADMIN)) points -= (points * 3) / 100; /* Normalize to oom_score_adj units */ adj *= totalpages / 1000; points += adj; /* * Never return 0 for an eligible task regardless of the root bonus and * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here). */ return points > 0 ? points : 1; } enum oom_constraint { CONSTRAINT_NONE, CONSTRAINT_CPUSET, CONSTRAINT_MEMORY_POLICY, CONSTRAINT_MEMCG, }; /* * Determine the type of allocation constraint. */ static enum oom_constraint constrained_alloc(struct oom_control *oc) { struct zone *zone; struct zoneref *z; enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask); bool cpuset_limited = false; int nid; if (is_memcg_oom(oc)) { oc->totalpages = mem_cgroup_get_limit(oc->memcg) ?: 1; return CONSTRAINT_MEMCG; } /* Default to all available memory */ oc->totalpages = totalram_pages + total_swap_pages; if (!IS_ENABLED(CONFIG_NUMA)) return CONSTRAINT_NONE; if (!oc->zonelist) return CONSTRAINT_NONE; /* * Reach here only when __GFP_NOFAIL is used. So, we should avoid * to kill current.We have to random task kill in this case. * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now. */ if (oc->gfp_mask & __GFP_THISNODE) return CONSTRAINT_NONE; /* * This is not a __GFP_THISNODE allocation, so a truncated nodemask in * the page allocator means a mempolicy is in effect. Cpuset policy * is enforced in get_page_from_freelist(). */ if (oc->nodemask && !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) { oc->totalpages = total_swap_pages; for_each_node_mask(nid, *oc->nodemask) oc->totalpages += node_spanned_pages(nid); return CONSTRAINT_MEMORY_POLICY; } /* Check this allocation failure is caused by cpuset's wall function */ for_each_zone_zonelist_nodemask(zone, z, oc->zonelist, high_zoneidx, oc->nodemask) if (!cpuset_zone_allowed(zone, oc->gfp_mask)) cpuset_limited = true; if (cpuset_limited) { oc->totalpages = total_swap_pages; for_each_node_mask(nid, cpuset_current_mems_allowed) oc->totalpages += node_spanned_pages(nid); return CONSTRAINT_CPUSET; } return CONSTRAINT_NONE; } static int oom_evaluate_task(struct task_struct *task, void *arg) { struct oom_control *oc = arg; unsigned long points; if (oom_unkillable_task(task, NULL, oc->nodemask)) goto next; /* * This task already has access to memory reserves and is being killed. * Don't allow any other task to have access to the reserves unless * the task has MMF_OOM_SKIP because chances that it would release * any memory is quite low. */ if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) { if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags)) goto next; goto abort; } /* * If task is allocating a lot of memory and has been marked to be * killed first if it triggers an oom, then select it. */ if (oom_task_origin(task)) { points = ULONG_MAX; goto select; } points = oom_badness(task, NULL, oc->nodemask, oc->totalpages); if (!points || points < oc->chosen_points) goto next; /* Prefer thread group leaders for display purposes */ if (points == oc->chosen_points && thread_group_leader(oc->chosen)) goto next; select: if (oc->chosen) put_task_struct(oc->chosen); get_task_struct(task); oc->chosen = task; oc->chosen_points = points; next: return 0; abort: if (oc->chosen) put_task_struct(oc->chosen); oc->chosen = (void *)-1UL; return 1; } /* * Simple selection loop. We choose the process with the highest number of * 'points'. In case scan was aborted, oc->chosen is set to -1. */ static void select_bad_process(struct oom_control *oc) { if (is_memcg_oom(oc)) mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc); else { struct task_struct *p; rcu_read_lock(); for_each_process(p) if (oom_evaluate_task(p, oc)) break; rcu_read_unlock(); } oc->chosen_points = oc->chosen_points * 1000 / oc->totalpages; } /** * dump_tasks - dump current memory state of all system tasks * @memcg: current's memory controller, if constrained * @nodemask: nodemask passed to page allocator for mempolicy ooms * * Dumps the current memory state of all eligible tasks. Tasks not in the same * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes * are not shown. * State information includes task's pid, uid, tgid, vm size, rss, * pgtables_bytes, swapents, oom_score_adj value, and name. */ static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) { struct task_struct *p; struct task_struct *task; pr_info("[ pid ] uid tgid total_vm rss pgtables_bytes swapents oom_score_adj name\n"); rcu_read_lock(); for_each_process(p) { if (oom_unkillable_task(p, memcg, nodemask)) continue; task = find_lock_task_mm(p); if (!task) { /* * This is a kthread or all of p's threads have already * detached their mm's. There's no need to report * them; they can't be oom killed anyway. */ continue; } pr_info("[%5d] %5d %5d %8lu %8lu %8ld %8lu %5hd %s\n", task->pid, from_kuid(&init_user_ns, task_uid(task)), task->tgid, task->mm->total_vm, get_mm_rss(task->mm), mm_pgtables_bytes(task->mm), get_mm_counter(task->mm, MM_SWAPENTS), task->signal->oom_score_adj, task->comm); task_unlock(task); } rcu_read_unlock(); } static void dump_header(struct oom_control *oc, struct task_struct *p) { pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), nodemask=%*pbl, order=%d, oom_score_adj=%hd\n", current->comm, oc->gfp_mask, &oc->gfp_mask, nodemask_pr_args(oc->nodemask), oc->order, current->signal->oom_score_adj); if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order) pr_warn("COMPACTION is disabled!!!\n"); cpuset_print_current_mems_allowed(); dump_stack(); if (is_memcg_oom(oc)) mem_cgroup_print_oom_info(oc->memcg, p); else { show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask); if (is_dump_unreclaim_slabs()) dump_unreclaimable_slab(); } if (sysctl_oom_dump_tasks) dump_tasks(oc->memcg, oc->nodemask); } /* * Number of OOM victims in flight */ static atomic_t oom_victims = ATOMIC_INIT(0); static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait); static bool oom_killer_disabled __read_mostly; #define K(x) ((x) << (PAGE_SHIFT-10)) /* * task->mm can be NULL if the task is the exited group leader. So to * determine whether the task is using a particular mm, we examine all the * task's threads: if one of those is using this mm then this task was also * using it. */ bool process_shares_mm(struct task_struct *p, struct mm_struct *mm) { struct task_struct *t; for_each_thread(p, t) { struct mm_struct *t_mm = READ_ONCE(t->mm); if (t_mm) return t_mm == mm; } return false; } #ifdef CONFIG_MMU /* * OOM Reaper kernel thread which tries to reap the memory used by the OOM * victim (if that is possible) to help the OOM killer to move on. */ static struct task_struct *oom_reaper_th; static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait); static struct task_struct *oom_reaper_list; static DEFINE_SPINLOCK(oom_reaper_lock); static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) { struct mmu_gather tlb; struct vm_area_struct *vma; bool ret = true; /* * We have to make sure to not race with the victim exit path * and cause premature new oom victim selection: * __oom_reap_task_mm exit_mm * mmget_not_zero * mmput * atomic_dec_and_test * exit_oom_victim * [...] * out_of_memory * select_bad_process * # no TIF_MEMDIE task selects new victim * unmap_page_range # frees some memory */ mutex_lock(&oom_lock); if (!down_read_trylock(&mm->mmap_sem)) { ret = false; trace_skip_task_reaping(tsk->pid); goto unlock_oom; } /* * If the mm has notifiers then we would need to invalidate them around * unmap_page_range and that is risky because notifiers can sleep and * what they do is basically undeterministic. So let's have a short * sleep to give the oom victim some more time. * TODO: we really want to get rid of this ugly hack and make sure that * notifiers cannot block for unbounded amount of time and add * mmu_notifier_invalidate_range_{start,end} around unmap_page_range */ if (mm_has_notifiers(mm)) { up_read(&mm->mmap_sem); schedule_timeout_idle(HZ); goto unlock_oom; } /* * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't * work on the mm anymore. The check for MMF_OOM_SKIP must run * under mmap_sem for reading because it serializes against the * down_write();up_write() cycle in exit_mmap(). */ if (test_bit(MMF_OOM_SKIP, &mm->flags)) { up_read(&mm->mmap_sem); trace_skip_task_reaping(tsk->pid); goto unlock_oom; } trace_start_task_reaping(tsk->pid); /* * Tell all users of get_user/copy_from_user etc... that the content * is no longer stable. No barriers really needed because unmapping * should imply barriers already and the reader would hit a page fault * if it stumbled over a reaped memory. */ set_bit(MMF_UNSTABLE, &mm->flags); tlb_gather_mmu(&tlb, mm, 0, -1); for (vma = mm->mmap ; vma; vma = vma->vm_next) { if (!can_madv_dontneed_vma(vma)) continue; /* * Only anonymous pages have a good chance to be dropped * without additional steps which we cannot afford as we * are OOM already. * * We do not even care about fs backed pages because all * which are reclaimable have already been reclaimed and * we do not want to block exit_mmap by keeping mm ref * count elevated without a good reason. */ if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end, NULL); } tlb_finish_mmu(&tlb, 0, -1); pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", task_pid_nr(tsk), tsk->comm, K(get_mm_counter(mm, MM_ANONPAGES)), K(get_mm_counter(mm, MM_FILEPAGES)), K(get_mm_counter(mm, MM_SHMEMPAGES))); up_read(&mm->mmap_sem); trace_finish_task_reaping(tsk->pid); unlock_oom: mutex_unlock(&oom_lock); return ret; } #define MAX_OOM_REAP_RETRIES 10 static void oom_reap_task(struct task_struct *tsk) { int attempts = 0; struct mm_struct *mm = tsk->signal->oom_mm; /* Retry the down_read_trylock(mmap_sem) a few times */ while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task_mm(tsk, mm)) schedule_timeout_idle(HZ/10); if (attempts <= MAX_OOM_REAP_RETRIES) goto done; pr_info("oom_reaper: unable to reap pid:%d (%s)\n", task_pid_nr(tsk), tsk->comm); debug_show_all_locks(); done: tsk->oom_reaper_list = NULL; /* * Hide this mm from OOM killer because it has been either reaped or * somebody can't call up_write(mmap_sem). */ set_bit(MMF_OOM_SKIP, &mm->flags); /* Drop a reference taken by wake_oom_reaper */ put_task_struct(tsk); } static int oom_reaper(void *unused) { while (true) { struct task_struct *tsk = NULL; wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL); spin_lock(&oom_reaper_lock); if (oom_reaper_list != NULL) { tsk = oom_reaper_list; oom_reaper_list = tsk->oom_reaper_list; } spin_unlock(&oom_reaper_lock); if (tsk) oom_reap_task(tsk); } return 0; } static void wake_oom_reaper(struct task_struct *tsk) { /* tsk is already queued? */ if (tsk == oom_reaper_list || tsk->oom_reaper_list) return; get_task_struct(tsk); spin_lock(&oom_reaper_lock); tsk->oom_reaper_list = oom_reaper_list; oom_reaper_list = tsk; spin_unlock(&oom_reaper_lock); trace_wake_reaper(tsk->pid); wake_up(&oom_reaper_wait); } static int __init oom_init(void) { oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper"); return 0; } subsys_initcall(oom_init) #else static inline void wake_oom_reaper(struct task_struct *tsk) { } #endif /* CONFIG_MMU */ /** * mark_oom_victim - mark the given task as OOM victim * @tsk: task to mark * * Has to be called with oom_lock held and never after * oom has been disabled already. * * tsk->mm has to be non NULL and caller has to guarantee it is stable (either * under task_lock or operate on the current). */ static void mark_oom_victim(struct task_struct *tsk) { struct mm_struct *mm = tsk->mm; WARN_ON(oom_killer_disabled); /* OOM killer might race with memcg OOM */ if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE)) return; /* oom_mm is bound to the signal struct life time. */ if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) mmgrab(tsk->signal->oom_mm); /* * Make sure that the task is woken up from uninterruptible sleep * if it is frozen because OOM killer wouldn't be able to free * any memory and livelock. freezing_slow_path will tell the freezer * that TIF_MEMDIE tasks should be ignored. */ __thaw_task(tsk); atomic_inc(&oom_victims); trace_mark_victim(tsk->pid); } /** * exit_oom_victim - note the exit of an OOM victim */ void exit_oom_victim(void) { clear_thread_flag(TIF_MEMDIE); if (!atomic_dec_return(&oom_victims)) wake_up_all(&oom_victims_wait); } /** * oom_killer_enable - enable OOM killer */ void oom_killer_enable(void) { oom_killer_disabled = false; pr_info("OOM killer enabled.\n"); } /** * oom_killer_disable - disable OOM killer * @timeout: maximum timeout to wait for oom victims in jiffies * * Forces all page allocations to fail rather than trigger OOM killer. * Will block and wait until all OOM victims are killed or the given * timeout expires. * * The function cannot be called when there are runnable user tasks because * the userspace would see unexpected allocation failures as a result. Any * new usage of this function should be consulted with MM people. * * Returns true if successful and false if the OOM killer cannot be * disabled. */ bool oom_killer_disable(signed long timeout) { signed long ret; /* * Make sure to not race with an ongoing OOM killer. Check that the * current is not killed (possibly due to sharing the victim's memory). */ if (mutex_lock_killable(&oom_lock)) return false; oom_killer_disabled = true; mutex_unlock(&oom_lock); ret = wait_event_interruptible_timeout(oom_victims_wait, !atomic_read(&oom_victims), timeout); if (ret <= 0) { oom_killer_enable(); return false; } pr_info("OOM killer disabled.\n"); return true; } static inline bool __task_will_free_mem(struct task_struct *task) { struct signal_struct *sig = task->signal; /* * A coredumping process may sleep for an extended period in exit_mm(), * so the oom killer cannot assume that the process will promptly exit * and release memory. */ if (sig->flags & SIGNAL_GROUP_COREDUMP) return false; if (sig->flags & SIGNAL_GROUP_EXIT) return true; if (thread_group_empty(task) && (task->flags & PF_EXITING)) return true; return false; } /* * Checks whether the given task is dying or exiting and likely to * release its address space. This means that all threads and processes * sharing the same mm have to be killed or exiting. * Caller has to make sure that task->mm is stable (hold task_lock or * it operates on the current). */ static bool task_will_free_mem(struct task_struct *task) { struct mm_struct *mm = task->mm; struct task_struct *p; bool ret = true; /* * Skip tasks without mm because it might have passed its exit_mm and * exit_oom_victim. oom_reaper could have rescued that but do not rely * on that for now. We can consider find_lock_task_mm in future. */ if (!mm) return false; if (!__task_will_free_mem(task)) return false; /* * This task has already been drained by the oom reaper so there are * only small chances it will free some more */ if (test_bit(MMF_OOM_SKIP, &mm->flags)) return false; if (atomic_read(&mm->mm_users) <= 1) return true; /* * Make sure that all tasks which share the mm with the given tasks * are dying as well to make sure that a) nobody pins its mm and * b) the task is also reapable by the oom reaper. */ rcu_read_lock(); for_each_process(p) { if (!process_shares_mm(p, mm)) continue; if (same_thread_group(task, p)) continue; ret = __task_will_free_mem(p); if (!ret) break; } rcu_read_unlock(); return ret; } static void oom_kill_process(struct oom_control *oc, const char *message) { struct task_struct *p = oc->chosen; unsigned int points = oc->chosen_points; struct task_struct *victim = p; struct task_struct *child; struct task_struct *t; struct mm_struct *mm; unsigned int victim_points = 0; static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); bool can_oom_reap = true; /* * If the task is already exiting, don't alarm the sysadmin or kill * its children or threads, just give it access to memory reserves * so it can die quickly */ task_lock(p); if (task_will_free_mem(p)) { mark_oom_victim(p); wake_oom_reaper(p); task_unlock(p); put_task_struct(p); return; } task_unlock(p); if (__ratelimit(&oom_rs)) dump_header(oc, p); pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n", message, task_pid_nr(p), p->comm, points); /* * If any of p's children has a different mm and is eligible for kill, * the one with the highest oom_badness() score is sacrificed for its * parent. This attempts to lose the minimal amount of work done while * still freeing memory. */ read_lock(&tasklist_lock); for_each_thread(p, t) { list_for_each_entry(child, &t->children, sibling) { unsigned int child_points; if (process_shares_mm(child, p->mm)) continue; /* * oom_badness() returns 0 if the thread is unkillable */ child_points = oom_badness(child, oc->memcg, oc->nodemask, oc->totalpages); if (child_points > victim_points) { put_task_struct(victim); victim = child; victim_points = child_points; get_task_struct(victim); } } } read_unlock(&tasklist_lock); p = find_lock_task_mm(victim); if (!p) { put_task_struct(victim); return; } else if (victim != p) { get_task_struct(p); put_task_struct(victim); victim = p; } /* Get a reference to safely compare mm after task_unlock(victim) */ mm = victim->mm; mmgrab(mm); /* Raise event before sending signal: task reaper must see this */ count_vm_event(OOM_KILL); count_memcg_event_mm(mm, OOM_KILL); /* * We should send SIGKILL before granting access to memory reserves * in order to prevent the OOM victim from depleting the memory * reserves from the user space under its control. */ do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true); mark_oom_victim(victim); pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", task_pid_nr(victim), victim->comm, K(victim->mm->total_vm), K(get_mm_counter(victim->mm, MM_ANONPAGES)), K(get_mm_counter(victim->mm, MM_FILEPAGES)), K(get_mm_counter(victim->mm, MM_SHMEMPAGES))); task_unlock(victim); /* * Kill all user processes sharing victim->mm in other thread groups, if * any. They don't get access to memory reserves, though, to avoid * depletion of all memory. This prevents mm->mmap_sem livelock when an * oom killed thread cannot exit because it requires the semaphore and * its contended by another thread trying to allocate memory itself. * That thread will now get access to memory reserves since it has a * pending fatal signal. */ rcu_read_lock(); for_each_process(p) { if (!process_shares_mm(p, mm)) continue; if (same_thread_group(p, victim)) continue; if (is_global_init(p)) { can_oom_reap = false; set_bit(MMF_OOM_SKIP, &mm->flags); pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n", task_pid_nr(victim), victim->comm, task_pid_nr(p), p->comm); continue; } /* * No use_mm() user needs to read from the userspace so we are * ok to reap it. */ if (unlikely(p->flags & PF_KTHREAD)) continue; do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true); } rcu_read_unlock(); if (can_oom_reap) wake_oom_reaper(victim); mmdrop(mm); put_task_struct(victim); } #undef K /* * Determines whether the kernel must panic because of the panic_on_oom sysctl. */ static void check_panic_on_oom(struct oom_control *oc, enum oom_constraint constraint) { if (likely(!sysctl_panic_on_oom)) return; if (sysctl_panic_on_oom != 2) { /* * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel * does not panic for cpuset, mempolicy, or memcg allocation * failures. */ if (constraint != CONSTRAINT_NONE) return; } /* Do not panic for oom kills triggered by sysrq */ if (is_sysrq_oom(oc)) return; dump_header(oc, NULL); panic("Out of memory: %s panic_on_oom is enabled\n", sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); } static BLOCKING_NOTIFIER_HEAD(oom_notify_list); int register_oom_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&oom_notify_list, nb); } EXPORT_SYMBOL_GPL(register_oom_notifier); int unregister_oom_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&oom_notify_list, nb); } EXPORT_SYMBOL_GPL(unregister_oom_notifier); /** * out_of_memory - kill the "best" process when we run out of memory * @oc: pointer to struct oom_control * * If we run out of memory, we have the choice between either * killing a random task (bad), letting the system crash (worse) * OR try to be smart about which process to kill. Note that we * don't have to be perfect here, we just have to be good. */ bool out_of_memory(struct oom_control *oc) { unsigned long freed = 0; enum oom_constraint constraint = CONSTRAINT_NONE; if (oom_killer_disabled) return false; if (!is_memcg_oom(oc)) { blocking_notifier_call_chain(&oom_notify_list, 0, &freed); if (freed > 0) /* Got some memory back in the last second. */ return true; } /* * If current has a pending SIGKILL or is exiting, then automatically * select it. The goal is to allow it to allocate so that it may * quickly exit and free its memory. */ if (task_will_free_mem(current)) { mark_oom_victim(current); wake_oom_reaper(current); return true; } /* * The OOM killer does not compensate for IO-less reclaim. * pagefault_out_of_memory lost its gfp context so we have to * make sure exclude 0 mask - all other users should have at least * ___GFP_DIRECT_RECLAIM to get here. */ if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS)) return true; /* * Check if there were limitations on the allocation (only relevant for * NUMA and memcg) that may require different handling. */ constraint = constrained_alloc(oc); if (constraint != CONSTRAINT_MEMORY_POLICY) oc->nodemask = NULL; check_panic_on_oom(oc, constraint); if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task && current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) && current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) { get_task_struct(current); oc->chosen = current; oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)"); return true; } select_bad_process(oc); /* Found nothing?!?! Either we hang forever, or we panic. */ if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) { dump_header(oc, NULL); panic("Out of memory and no killable processes...\n"); } if (oc->chosen && oc->chosen != (void *)-1UL) { oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" : "Memory cgroup out of memory"); /* * Give the killed process a good chance to exit before trying * to allocate memory again. */ schedule_timeout_killable(1); } return !!oc->chosen; } /* * The pagefault handler calls here because it is out of memory, so kill a * memory-hogging task. If oom_lock is held by somebody else, a parallel oom * killing is already in progress so do nothing. */ void pagefault_out_of_memory(void) { struct oom_control oc = { .zonelist = NULL, .nodemask = NULL, .memcg = NULL, .gfp_mask = 0, .order = 0, }; if (mem_cgroup_oom_synchronize(true)) return; if (!mutex_trylock(&oom_lock)) return; out_of_memory(&oc); mutex_unlock(&oom_lock); }
/* * linux/mm/oom_kill.c * * Copyright (C) 1998,2000 Rik van Riel * Thanks go out to Claus Fischer for some serious inspiration and * for goading me into coding this file... * Copyright (C) 2010 Google, Inc. * Rewritten by David Rientjes * * The routines in this file are used to kill a process when * we're seriously out of memory. This gets called from __alloc_pages() * in mm/page_alloc.c when we really run out of memory. * * Since we won't call these routines often (on a well-configured * machine) this file will double as a 'coding guide' and a signpost * for newbie kernel hackers. It features several pointers to major * kernel subsystems and hints as to where to find out what things do. */ #include <linux/oom.h> #include <linux/mm.h> #include <linux/err.h> #include <linux/gfp.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/sched/coredump.h> #include <linux/sched/task.h> #include <linux/swap.h> #include <linux/timex.h> #include <linux/jiffies.h> #include <linux/cpuset.h> #include <linux/export.h> #include <linux/notifier.h> #include <linux/memcontrol.h> #include <linux/mempolicy.h> #include <linux/security.h> #include <linux/ptrace.h> #include <linux/freezer.h> #include <linux/ftrace.h> #include <linux/ratelimit.h> #include <linux/kthread.h> #include <linux/init.h> #include <linux/mmu_notifier.h> #include <asm/tlb.h> #include "internal.h" #include "slab.h" #define CREATE_TRACE_POINTS #include <trace/events/oom.h> int sysctl_panic_on_oom; int sysctl_oom_kill_allocating_task; int sysctl_oom_dump_tasks = 1; DEFINE_MUTEX(oom_lock); #ifdef CONFIG_NUMA /** * has_intersects_mems_allowed() - check task eligiblity for kill * @start: task struct of which task to consider * @mask: nodemask passed to page allocator for mempolicy ooms * * Task eligibility is determined by whether or not a candidate task, @tsk, * shares the same mempolicy nodes as current if it is bound by such a policy * and whether or not it has the same set of allowed cpuset nodes. */ static bool has_intersects_mems_allowed(struct task_struct *start, const nodemask_t *mask) { struct task_struct *tsk; bool ret = false; rcu_read_lock(); for_each_thread(start, tsk) { if (mask) { /* * If this is a mempolicy constrained oom, tsk's * cpuset is irrelevant. Only return true if its * mempolicy intersects current, otherwise it may be * needlessly killed. */ ret = mempolicy_nodemask_intersects(tsk, mask); } else { /* * This is not a mempolicy constrained oom, so only * check the mems of tsk's cpuset. */ ret = cpuset_mems_allowed_intersects(current, tsk); } if (ret) break; } rcu_read_unlock(); return ret; } #else static bool has_intersects_mems_allowed(struct task_struct *tsk, const nodemask_t *mask) { return true; } #endif /* CONFIG_NUMA */ /* * The process p may have detached its own ->mm while exiting or through * use_mm(), but one or more of its subthreads may still have a valid * pointer. Return p, or any of its subthreads with a valid ->mm, with * task_lock() held. */ struct task_struct *find_lock_task_mm(struct task_struct *p) { struct task_struct *t; rcu_read_lock(); for_each_thread(p, t) { task_lock(t); if (likely(t->mm)) goto found; task_unlock(t); } t = NULL; found: rcu_read_unlock(); return t; } /* * order == -1 means the oom kill is required by sysrq, otherwise only * for display purposes. */ static inline bool is_sysrq_oom(struct oom_control *oc) { return oc->order == -1; } static inline bool is_memcg_oom(struct oom_control *oc) { return oc->memcg != NULL; } /* return true if the task is not adequate as candidate victim task. */ static bool oom_unkillable_task(struct task_struct *p, struct mem_cgroup *memcg, const nodemask_t *nodemask) { if (is_global_init(p)) return true; if (p->flags & PF_KTHREAD) return true; /* When mem_cgroup_out_of_memory() and p is not member of the group */ if (memcg && !task_in_mem_cgroup(p, memcg)) return true; /* p may not have freeable memory in nodemask */ if (!has_intersects_mems_allowed(p, nodemask)) return true; return false; } /* * Print out unreclaimble slabs info when unreclaimable slabs amount is greater * than all user memory (LRU pages) */ static bool is_dump_unreclaim_slabs(void) { unsigned long nr_lru; nr_lru = global_node_page_state(NR_ACTIVE_ANON) + global_node_page_state(NR_INACTIVE_ANON) + global_node_page_state(NR_ACTIVE_FILE) + global_node_page_state(NR_INACTIVE_FILE) + global_node_page_state(NR_ISOLATED_ANON) + global_node_page_state(NR_ISOLATED_FILE) + global_node_page_state(NR_UNEVICTABLE); return (global_node_page_state(NR_SLAB_UNRECLAIMABLE) > nr_lru); } /** * oom_badness - heuristic function to determine which candidate task to kill * @p: task struct of which task we should calculate * @totalpages: total present RAM allowed for page allocation * * The heuristic for determining which task to kill is made to be as simple and * predictable as possible. The goal is to return the highest value for the * task consuming the most memory to avoid subsequent oom failures. */ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, const nodemask_t *nodemask, unsigned long totalpages) { long points; long adj; if (oom_unkillable_task(p, memcg, nodemask)) return 0; p = find_lock_task_mm(p); if (!p) return 0; /* * Do not even consider tasks which are explicitly marked oom * unkillable or have been already oom reaped or the are in * the middle of vfork */ adj = (long)p->signal->oom_score_adj; if (adj == OOM_SCORE_ADJ_MIN || test_bit(MMF_OOM_SKIP, &p->mm->flags) || in_vfork(p)) { task_unlock(p); return 0; } /* * The baseline for the badness score is the proportion of RAM that each * task's rss, pagetable and swap space use. */ points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) + mm_pgtables_bytes(p->mm) / PAGE_SIZE; task_unlock(p); /* * Root processes get 3% bonus, just like the __vm_enough_memory() * implementation used by LSMs. */ if (has_capability_noaudit(p, CAP_SYS_ADMIN)) points -= (points * 3) / 100; /* Normalize to oom_score_adj units */ adj *= totalpages / 1000; points += adj; /* * Never return 0 for an eligible task regardless of the root bonus and * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here). */ return points > 0 ? points : 1; } enum oom_constraint { CONSTRAINT_NONE, CONSTRAINT_CPUSET, CONSTRAINT_MEMORY_POLICY, CONSTRAINT_MEMCG, }; /* * Determine the type of allocation constraint. */ static enum oom_constraint constrained_alloc(struct oom_control *oc) { struct zone *zone; struct zoneref *z; enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask); bool cpuset_limited = false; int nid; if (is_memcg_oom(oc)) { oc->totalpages = mem_cgroup_get_limit(oc->memcg) ?: 1; return CONSTRAINT_MEMCG; } /* Default to all available memory */ oc->totalpages = totalram_pages + total_swap_pages; if (!IS_ENABLED(CONFIG_NUMA)) return CONSTRAINT_NONE; if (!oc->zonelist) return CONSTRAINT_NONE; /* * Reach here only when __GFP_NOFAIL is used. So, we should avoid * to kill current.We have to random task kill in this case. * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now. */ if (oc->gfp_mask & __GFP_THISNODE) return CONSTRAINT_NONE; /* * This is not a __GFP_THISNODE allocation, so a truncated nodemask in * the page allocator means a mempolicy is in effect. Cpuset policy * is enforced in get_page_from_freelist(). */ if (oc->nodemask && !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) { oc->totalpages = total_swap_pages; for_each_node_mask(nid, *oc->nodemask) oc->totalpages += node_spanned_pages(nid); return CONSTRAINT_MEMORY_POLICY; } /* Check this allocation failure is caused by cpuset's wall function */ for_each_zone_zonelist_nodemask(zone, z, oc->zonelist, high_zoneidx, oc->nodemask) if (!cpuset_zone_allowed(zone, oc->gfp_mask)) cpuset_limited = true; if (cpuset_limited) { oc->totalpages = total_swap_pages; for_each_node_mask(nid, cpuset_current_mems_allowed) oc->totalpages += node_spanned_pages(nid); return CONSTRAINT_CPUSET; } return CONSTRAINT_NONE; } static int oom_evaluate_task(struct task_struct *task, void *arg) { struct oom_control *oc = arg; unsigned long points; if (oom_unkillable_task(task, NULL, oc->nodemask)) goto next; /* * This task already has access to memory reserves and is being killed. * Don't allow any other task to have access to the reserves unless * the task has MMF_OOM_SKIP because chances that it would release * any memory is quite low. */ if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) { if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags)) goto next; goto abort; } /* * If task is allocating a lot of memory and has been marked to be * killed first if it triggers an oom, then select it. */ if (oom_task_origin(task)) { points = ULONG_MAX; goto select; } points = oom_badness(task, NULL, oc->nodemask, oc->totalpages); if (!points || points < oc->chosen_points) goto next; /* Prefer thread group leaders for display purposes */ if (points == oc->chosen_points && thread_group_leader(oc->chosen)) goto next; select: if (oc->chosen) put_task_struct(oc->chosen); get_task_struct(task); oc->chosen = task; oc->chosen_points = points; next: return 0; abort: if (oc->chosen) put_task_struct(oc->chosen); oc->chosen = (void *)-1UL; return 1; } /* * Simple selection loop. We choose the process with the highest number of * 'points'. In case scan was aborted, oc->chosen is set to -1. */ static void select_bad_process(struct oom_control *oc) { if (is_memcg_oom(oc)) mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc); else { struct task_struct *p; rcu_read_lock(); for_each_process(p) if (oom_evaluate_task(p, oc)) break; rcu_read_unlock(); } oc->chosen_points = oc->chosen_points * 1000 / oc->totalpages; } /** * dump_tasks - dump current memory state of all system tasks * @memcg: current's memory controller, if constrained * @nodemask: nodemask passed to page allocator for mempolicy ooms * * Dumps the current memory state of all eligible tasks. Tasks not in the same * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes * are not shown. * State information includes task's pid, uid, tgid, vm size, rss, * pgtables_bytes, swapents, oom_score_adj value, and name. */ static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) { struct task_struct *p; struct task_struct *task; pr_info("[ pid ] uid tgid total_vm rss pgtables_bytes swapents oom_score_adj name\n"); rcu_read_lock(); for_each_process(p) { if (oom_unkillable_task(p, memcg, nodemask)) continue; task = find_lock_task_mm(p); if (!task) { /* * This is a kthread or all of p's threads have already * detached their mm's. There's no need to report * them; they can't be oom killed anyway. */ continue; } pr_info("[%5d] %5d %5d %8lu %8lu %8ld %8lu %5hd %s\n", task->pid, from_kuid(&init_user_ns, task_uid(task)), task->tgid, task->mm->total_vm, get_mm_rss(task->mm), mm_pgtables_bytes(task->mm), get_mm_counter(task->mm, MM_SWAPENTS), task->signal->oom_score_adj, task->comm); task_unlock(task); } rcu_read_unlock(); } static void dump_header(struct oom_control *oc, struct task_struct *p) { pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), nodemask=%*pbl, order=%d, oom_score_adj=%hd\n", current->comm, oc->gfp_mask, &oc->gfp_mask, nodemask_pr_args(oc->nodemask), oc->order, current->signal->oom_score_adj); if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order) pr_warn("COMPACTION is disabled!!!\n"); cpuset_print_current_mems_allowed(); dump_stack(); if (is_memcg_oom(oc)) mem_cgroup_print_oom_info(oc->memcg, p); else { show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask); if (is_dump_unreclaim_slabs()) dump_unreclaimable_slab(); } if (sysctl_oom_dump_tasks) dump_tasks(oc->memcg, oc->nodemask); } /* * Number of OOM victims in flight */ static atomic_t oom_victims = ATOMIC_INIT(0); static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait); static bool oom_killer_disabled __read_mostly; #define K(x) ((x) << (PAGE_SHIFT-10)) /* * task->mm can be NULL if the task is the exited group leader. So to * determine whether the task is using a particular mm, we examine all the * task's threads: if one of those is using this mm then this task was also * using it. */ bool process_shares_mm(struct task_struct *p, struct mm_struct *mm) { struct task_struct *t; for_each_thread(p, t) { struct mm_struct *t_mm = READ_ONCE(t->mm); if (t_mm) return t_mm == mm; } return false; } #ifdef CONFIG_MMU /* * OOM Reaper kernel thread which tries to reap the memory used by the OOM * victim (if that is possible) to help the OOM killer to move on. */ static struct task_struct *oom_reaper_th; static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait); static struct task_struct *oom_reaper_list; static DEFINE_SPINLOCK(oom_reaper_lock); static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) { struct mmu_gather tlb; struct vm_area_struct *vma; bool ret = true; /* * We have to make sure to not race with the victim exit path * and cause premature new oom victim selection: * __oom_reap_task_mm exit_mm * mmget_not_zero * mmput * atomic_dec_and_test * exit_oom_victim * [...] * out_of_memory * select_bad_process * # no TIF_MEMDIE task selects new victim * unmap_page_range # frees some memory */ mutex_lock(&oom_lock); if (!down_read_trylock(&mm->mmap_sem)) { ret = false; trace_skip_task_reaping(tsk->pid); goto unlock_oom; } /* * If the mm has notifiers then we would need to invalidate them around * unmap_page_range and that is risky because notifiers can sleep and * what they do is basically undeterministic. So let's have a short * sleep to give the oom victim some more time. * TODO: we really want to get rid of this ugly hack and make sure that * notifiers cannot block for unbounded amount of time and add * mmu_notifier_invalidate_range_{start,end} around unmap_page_range */ if (mm_has_notifiers(mm)) { up_read(&mm->mmap_sem); schedule_timeout_idle(HZ); goto unlock_oom; } /* * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't * work on the mm anymore. The check for MMF_OOM_SKIP must run * under mmap_sem for reading because it serializes against the * down_write();up_write() cycle in exit_mmap(). */ if (test_bit(MMF_OOM_SKIP, &mm->flags)) { up_read(&mm->mmap_sem); trace_skip_task_reaping(tsk->pid); goto unlock_oom; } trace_start_task_reaping(tsk->pid); /* * Tell all users of get_user/copy_from_user etc... that the content * is no longer stable. No barriers really needed because unmapping * should imply barriers already and the reader would hit a page fault * if it stumbled over a reaped memory. */ set_bit(MMF_UNSTABLE, &mm->flags); for (vma = mm->mmap ; vma; vma = vma->vm_next) { if (!can_madv_dontneed_vma(vma)) continue; /* * Only anonymous pages have a good chance to be dropped * without additional steps which we cannot afford as we * are OOM already. * * We do not even care about fs backed pages because all * which are reclaimable have already been reclaimed and * we do not want to block exit_mmap by keeping mm ref * count elevated without a good reason. */ if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) { tlb_gather_mmu(&tlb, mm, vma->vm_start, vma->vm_end); unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end, NULL); tlb_finish_mmu(&tlb, vma->vm_start, vma->vm_end); } } pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", task_pid_nr(tsk), tsk->comm, K(get_mm_counter(mm, MM_ANONPAGES)), K(get_mm_counter(mm, MM_FILEPAGES)), K(get_mm_counter(mm, MM_SHMEMPAGES))); up_read(&mm->mmap_sem); trace_finish_task_reaping(tsk->pid); unlock_oom: mutex_unlock(&oom_lock); return ret; } #define MAX_OOM_REAP_RETRIES 10 static void oom_reap_task(struct task_struct *tsk) { int attempts = 0; struct mm_struct *mm = tsk->signal->oom_mm; /* Retry the down_read_trylock(mmap_sem) a few times */ while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task_mm(tsk, mm)) schedule_timeout_idle(HZ/10); if (attempts <= MAX_OOM_REAP_RETRIES) goto done; pr_info("oom_reaper: unable to reap pid:%d (%s)\n", task_pid_nr(tsk), tsk->comm); debug_show_all_locks(); done: tsk->oom_reaper_list = NULL; /* * Hide this mm from OOM killer because it has been either reaped or * somebody can't call up_write(mmap_sem). */ set_bit(MMF_OOM_SKIP, &mm->flags); /* Drop a reference taken by wake_oom_reaper */ put_task_struct(tsk); } static int oom_reaper(void *unused) { while (true) { struct task_struct *tsk = NULL; wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL); spin_lock(&oom_reaper_lock); if (oom_reaper_list != NULL) { tsk = oom_reaper_list; oom_reaper_list = tsk->oom_reaper_list; } spin_unlock(&oom_reaper_lock); if (tsk) oom_reap_task(tsk); } return 0; } static void wake_oom_reaper(struct task_struct *tsk) { /* tsk is already queued? */ if (tsk == oom_reaper_list || tsk->oom_reaper_list) return; get_task_struct(tsk); spin_lock(&oom_reaper_lock); tsk->oom_reaper_list = oom_reaper_list; oom_reaper_list = tsk; spin_unlock(&oom_reaper_lock); trace_wake_reaper(tsk->pid); wake_up(&oom_reaper_wait); } static int __init oom_init(void) { oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper"); return 0; } subsys_initcall(oom_init) #else static inline void wake_oom_reaper(struct task_struct *tsk) { } #endif /* CONFIG_MMU */ /** * mark_oom_victim - mark the given task as OOM victim * @tsk: task to mark * * Has to be called with oom_lock held and never after * oom has been disabled already. * * tsk->mm has to be non NULL and caller has to guarantee it is stable (either * under task_lock or operate on the current). */ static void mark_oom_victim(struct task_struct *tsk) { struct mm_struct *mm = tsk->mm; WARN_ON(oom_killer_disabled); /* OOM killer might race with memcg OOM */ if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE)) return; /* oom_mm is bound to the signal struct life time. */ if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) mmgrab(tsk->signal->oom_mm); /* * Make sure that the task is woken up from uninterruptible sleep * if it is frozen because OOM killer wouldn't be able to free * any memory and livelock. freezing_slow_path will tell the freezer * that TIF_MEMDIE tasks should be ignored. */ __thaw_task(tsk); atomic_inc(&oom_victims); trace_mark_victim(tsk->pid); } /** * exit_oom_victim - note the exit of an OOM victim */ void exit_oom_victim(void) { clear_thread_flag(TIF_MEMDIE); if (!atomic_dec_return(&oom_victims)) wake_up_all(&oom_victims_wait); } /** * oom_killer_enable - enable OOM killer */ void oom_killer_enable(void) { oom_killer_disabled = false; pr_info("OOM killer enabled.\n"); } /** * oom_killer_disable - disable OOM killer * @timeout: maximum timeout to wait for oom victims in jiffies * * Forces all page allocations to fail rather than trigger OOM killer. * Will block and wait until all OOM victims are killed or the given * timeout expires. * * The function cannot be called when there are runnable user tasks because * the userspace would see unexpected allocation failures as a result. Any * new usage of this function should be consulted with MM people. * * Returns true if successful and false if the OOM killer cannot be * disabled. */ bool oom_killer_disable(signed long timeout) { signed long ret; /* * Make sure to not race with an ongoing OOM killer. Check that the * current is not killed (possibly due to sharing the victim's memory). */ if (mutex_lock_killable(&oom_lock)) return false; oom_killer_disabled = true; mutex_unlock(&oom_lock); ret = wait_event_interruptible_timeout(oom_victims_wait, !atomic_read(&oom_victims), timeout); if (ret <= 0) { oom_killer_enable(); return false; } pr_info("OOM killer disabled.\n"); return true; } static inline bool __task_will_free_mem(struct task_struct *task) { struct signal_struct *sig = task->signal; /* * A coredumping process may sleep for an extended period in exit_mm(), * so the oom killer cannot assume that the process will promptly exit * and release memory. */ if (sig->flags & SIGNAL_GROUP_COREDUMP) return false; if (sig->flags & SIGNAL_GROUP_EXIT) return true; if (thread_group_empty(task) && (task->flags & PF_EXITING)) return true; return false; } /* * Checks whether the given task is dying or exiting and likely to * release its address space. This means that all threads and processes * sharing the same mm have to be killed or exiting. * Caller has to make sure that task->mm is stable (hold task_lock or * it operates on the current). */ static bool task_will_free_mem(struct task_struct *task) { struct mm_struct *mm = task->mm; struct task_struct *p; bool ret = true; /* * Skip tasks without mm because it might have passed its exit_mm and * exit_oom_victim. oom_reaper could have rescued that but do not rely * on that for now. We can consider find_lock_task_mm in future. */ if (!mm) return false; if (!__task_will_free_mem(task)) return false; /* * This task has already been drained by the oom reaper so there are * only small chances it will free some more */ if (test_bit(MMF_OOM_SKIP, &mm->flags)) return false; if (atomic_read(&mm->mm_users) <= 1) return true; /* * Make sure that all tasks which share the mm with the given tasks * are dying as well to make sure that a) nobody pins its mm and * b) the task is also reapable by the oom reaper. */ rcu_read_lock(); for_each_process(p) { if (!process_shares_mm(p, mm)) continue; if (same_thread_group(task, p)) continue; ret = __task_will_free_mem(p); if (!ret) break; } rcu_read_unlock(); return ret; } static void oom_kill_process(struct oom_control *oc, const char *message) { struct task_struct *p = oc->chosen; unsigned int points = oc->chosen_points; struct task_struct *victim = p; struct task_struct *child; struct task_struct *t; struct mm_struct *mm; unsigned int victim_points = 0; static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); bool can_oom_reap = true; /* * If the task is already exiting, don't alarm the sysadmin or kill * its children or threads, just give it access to memory reserves * so it can die quickly */ task_lock(p); if (task_will_free_mem(p)) { mark_oom_victim(p); wake_oom_reaper(p); task_unlock(p); put_task_struct(p); return; } task_unlock(p); if (__ratelimit(&oom_rs)) dump_header(oc, p); pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n", message, task_pid_nr(p), p->comm, points); /* * If any of p's children has a different mm and is eligible for kill, * the one with the highest oom_badness() score is sacrificed for its * parent. This attempts to lose the minimal amount of work done while * still freeing memory. */ read_lock(&tasklist_lock); for_each_thread(p, t) { list_for_each_entry(child, &t->children, sibling) { unsigned int child_points; if (process_shares_mm(child, p->mm)) continue; /* * oom_badness() returns 0 if the thread is unkillable */ child_points = oom_badness(child, oc->memcg, oc->nodemask, oc->totalpages); if (child_points > victim_points) { put_task_struct(victim); victim = child; victim_points = child_points; get_task_struct(victim); } } } read_unlock(&tasklist_lock); p = find_lock_task_mm(victim); if (!p) { put_task_struct(victim); return; } else if (victim != p) { get_task_struct(p); put_task_struct(victim); victim = p; } /* Get a reference to safely compare mm after task_unlock(victim) */ mm = victim->mm; mmgrab(mm); /* Raise event before sending signal: task reaper must see this */ count_vm_event(OOM_KILL); count_memcg_event_mm(mm, OOM_KILL); /* * We should send SIGKILL before granting access to memory reserves * in order to prevent the OOM victim from depleting the memory * reserves from the user space under its control. */ do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true); mark_oom_victim(victim); pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", task_pid_nr(victim), victim->comm, K(victim->mm->total_vm), K(get_mm_counter(victim->mm, MM_ANONPAGES)), K(get_mm_counter(victim->mm, MM_FILEPAGES)), K(get_mm_counter(victim->mm, MM_SHMEMPAGES))); task_unlock(victim); /* * Kill all user processes sharing victim->mm in other thread groups, if * any. They don't get access to memory reserves, though, to avoid * depletion of all memory. This prevents mm->mmap_sem livelock when an * oom killed thread cannot exit because it requires the semaphore and * its contended by another thread trying to allocate memory itself. * That thread will now get access to memory reserves since it has a * pending fatal signal. */ rcu_read_lock(); for_each_process(p) { if (!process_shares_mm(p, mm)) continue; if (same_thread_group(p, victim)) continue; if (is_global_init(p)) { can_oom_reap = false; set_bit(MMF_OOM_SKIP, &mm->flags); pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n", task_pid_nr(victim), victim->comm, task_pid_nr(p), p->comm); continue; } /* * No use_mm() user needs to read from the userspace so we are * ok to reap it. */ if (unlikely(p->flags & PF_KTHREAD)) continue; do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true); } rcu_read_unlock(); if (can_oom_reap) wake_oom_reaper(victim); mmdrop(mm); put_task_struct(victim); } #undef K /* * Determines whether the kernel must panic because of the panic_on_oom sysctl. */ static void check_panic_on_oom(struct oom_control *oc, enum oom_constraint constraint) { if (likely(!sysctl_panic_on_oom)) return; if (sysctl_panic_on_oom != 2) { /* * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel * does not panic for cpuset, mempolicy, or memcg allocation * failures. */ if (constraint != CONSTRAINT_NONE) return; } /* Do not panic for oom kills triggered by sysrq */ if (is_sysrq_oom(oc)) return; dump_header(oc, NULL); panic("Out of memory: %s panic_on_oom is enabled\n", sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); } static BLOCKING_NOTIFIER_HEAD(oom_notify_list); int register_oom_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&oom_notify_list, nb); } EXPORT_SYMBOL_GPL(register_oom_notifier); int unregister_oom_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&oom_notify_list, nb); } EXPORT_SYMBOL_GPL(unregister_oom_notifier); /** * out_of_memory - kill the "best" process when we run out of memory * @oc: pointer to struct oom_control * * If we run out of memory, we have the choice between either * killing a random task (bad), letting the system crash (worse) * OR try to be smart about which process to kill. Note that we * don't have to be perfect here, we just have to be good. */ bool out_of_memory(struct oom_control *oc) { unsigned long freed = 0; enum oom_constraint constraint = CONSTRAINT_NONE; if (oom_killer_disabled) return false; if (!is_memcg_oom(oc)) { blocking_notifier_call_chain(&oom_notify_list, 0, &freed); if (freed > 0) /* Got some memory back in the last second. */ return true; } /* * If current has a pending SIGKILL or is exiting, then automatically * select it. The goal is to allow it to allocate so that it may * quickly exit and free its memory. */ if (task_will_free_mem(current)) { mark_oom_victim(current); wake_oom_reaper(current); return true; } /* * The OOM killer does not compensate for IO-less reclaim. * pagefault_out_of_memory lost its gfp context so we have to * make sure exclude 0 mask - all other users should have at least * ___GFP_DIRECT_RECLAIM to get here. */ if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS)) return true; /* * Check if there were limitations on the allocation (only relevant for * NUMA and memcg) that may require different handling. */ constraint = constrained_alloc(oc); if (constraint != CONSTRAINT_MEMORY_POLICY) oc->nodemask = NULL; check_panic_on_oom(oc, constraint); if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task && current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) && current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) { get_task_struct(current); oc->chosen = current; oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)"); return true; } select_bad_process(oc); /* Found nothing?!?! Either we hang forever, or we panic. */ if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) { dump_header(oc, NULL); panic("Out of memory and no killable processes...\n"); } if (oc->chosen && oc->chosen != (void *)-1UL) { oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" : "Memory cgroup out of memory"); /* * Give the killed process a good chance to exit before trying * to allocate memory again. */ schedule_timeout_killable(1); } return !!oc->chosen; } /* * The pagefault handler calls here because it is out of memory, so kill a * memory-hogging task. If oom_lock is held by somebody else, a parallel oom * killing is already in progress so do nothing. */ void pagefault_out_of_memory(void) { struct oom_control oc = { .zonelist = NULL, .nodemask = NULL, .memcg = NULL, .gfp_mask = 0, .order = 0, }; if (mem_cgroup_oom_synchronize(true)) return; if (!mutex_trylock(&oom_lock)) return; out_of_memory(&oc); mutex_unlock(&oom_lock); }
static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) { struct mmu_gather tlb; struct vm_area_struct *vma; bool ret = true; /* * We have to make sure to not race with the victim exit path * and cause premature new oom victim selection: * __oom_reap_task_mm exit_mm * mmget_not_zero * mmput * atomic_dec_and_test * exit_oom_victim * [...] * out_of_memory * select_bad_process * # no TIF_MEMDIE task selects new victim * unmap_page_range # frees some memory */ mutex_lock(&oom_lock); if (!down_read_trylock(&mm->mmap_sem)) { ret = false; trace_skip_task_reaping(tsk->pid); goto unlock_oom; } /* * If the mm has notifiers then we would need to invalidate them around * unmap_page_range and that is risky because notifiers can sleep and * what they do is basically undeterministic. So let's have a short * sleep to give the oom victim some more time. * TODO: we really want to get rid of this ugly hack and make sure that * notifiers cannot block for unbounded amount of time and add * mmu_notifier_invalidate_range_{start,end} around unmap_page_range */ if (mm_has_notifiers(mm)) { up_read(&mm->mmap_sem); schedule_timeout_idle(HZ); goto unlock_oom; } /* * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't * work on the mm anymore. The check for MMF_OOM_SKIP must run * under mmap_sem for reading because it serializes against the * down_write();up_write() cycle in exit_mmap(). */ if (test_bit(MMF_OOM_SKIP, &mm->flags)) { up_read(&mm->mmap_sem); trace_skip_task_reaping(tsk->pid); goto unlock_oom; } trace_start_task_reaping(tsk->pid); /* * Tell all users of get_user/copy_from_user etc... that the content * is no longer stable. No barriers really needed because unmapping * should imply barriers already and the reader would hit a page fault * if it stumbled over a reaped memory. */ set_bit(MMF_UNSTABLE, &mm->flags); tlb_gather_mmu(&tlb, mm, 0, -1); for (vma = mm->mmap ; vma; vma = vma->vm_next) { if (!can_madv_dontneed_vma(vma)) continue; /* * Only anonymous pages have a good chance to be dropped * without additional steps which we cannot afford as we * are OOM already. * * We do not even care about fs backed pages because all * which are reclaimable have already been reclaimed and * we do not want to block exit_mmap by keeping mm ref * count elevated without a good reason. */ if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end, NULL); } tlb_finish_mmu(&tlb, 0, -1); pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", task_pid_nr(tsk), tsk->comm, K(get_mm_counter(mm, MM_ANONPAGES)), K(get_mm_counter(mm, MM_FILEPAGES)), K(get_mm_counter(mm, MM_SHMEMPAGES))); up_read(&mm->mmap_sem); trace_finish_task_reaping(tsk->pid); unlock_oom: mutex_unlock(&oom_lock); return ret; }
static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) { struct mmu_gather tlb; struct vm_area_struct *vma; bool ret = true; /* * We have to make sure to not race with the victim exit path * and cause premature new oom victim selection: * __oom_reap_task_mm exit_mm * mmget_not_zero * mmput * atomic_dec_and_test * exit_oom_victim * [...] * out_of_memory * select_bad_process * # no TIF_MEMDIE task selects new victim * unmap_page_range # frees some memory */ mutex_lock(&oom_lock); if (!down_read_trylock(&mm->mmap_sem)) { ret = false; trace_skip_task_reaping(tsk->pid); goto unlock_oom; } /* * If the mm has notifiers then we would need to invalidate them around * unmap_page_range and that is risky because notifiers can sleep and * what they do is basically undeterministic. So let's have a short * sleep to give the oom victim some more time. * TODO: we really want to get rid of this ugly hack and make sure that * notifiers cannot block for unbounded amount of time and add * mmu_notifier_invalidate_range_{start,end} around unmap_page_range */ if (mm_has_notifiers(mm)) { up_read(&mm->mmap_sem); schedule_timeout_idle(HZ); goto unlock_oom; } /* * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't * work on the mm anymore. The check for MMF_OOM_SKIP must run * under mmap_sem for reading because it serializes against the * down_write();up_write() cycle in exit_mmap(). */ if (test_bit(MMF_OOM_SKIP, &mm->flags)) { up_read(&mm->mmap_sem); trace_skip_task_reaping(tsk->pid); goto unlock_oom; } trace_start_task_reaping(tsk->pid); /* * Tell all users of get_user/copy_from_user etc... that the content * is no longer stable. No barriers really needed because unmapping * should imply barriers already and the reader would hit a page fault * if it stumbled over a reaped memory. */ set_bit(MMF_UNSTABLE, &mm->flags); for (vma = mm->mmap ; vma; vma = vma->vm_next) { if (!can_madv_dontneed_vma(vma)) continue; /* * Only anonymous pages have a good chance to be dropped * without additional steps which we cannot afford as we * are OOM already. * * We do not even care about fs backed pages because all * which are reclaimable have already been reclaimed and * we do not want to block exit_mmap by keeping mm ref * count elevated without a good reason. */ if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) { tlb_gather_mmu(&tlb, mm, vma->vm_start, vma->vm_end); unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end, NULL); tlb_finish_mmu(&tlb, vma->vm_start, vma->vm_end); } } pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", task_pid_nr(tsk), tsk->comm, K(get_mm_counter(mm, MM_ANONPAGES)), K(get_mm_counter(mm, MM_FILEPAGES)), K(get_mm_counter(mm, MM_SHMEMPAGES))); up_read(&mm->mmap_sem); trace_finish_task_reaping(tsk->pid); unlock_oom: mutex_unlock(&oom_lock); return ret; }
{'added': [(567, '\t\tif (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {'), (568, '\t\t\ttlb_gather_mmu(&tlb, mm, vma->vm_start, vma->vm_end);'), (571, '\t\t\ttlb_finish_mmu(&tlb, vma->vm_start, vma->vm_end);'), (572, '\t\t}')], 'deleted': [(553, '\ttlb_gather_mmu(&tlb, mm, 0, -1);'), (568, '\t\tif (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED))'), (572, '\ttlb_finish_mmu(&tlb, 0, -1);')]}
4
3
624
3,414
43
291
8
https://github.com/torvalds/linux
CVE-2017-18202
CWE-416
3,174
adaptmap_reg.c
C
main
/*====================================================================* - Copyright (C) 2001 Leptonica. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials - provided with the distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *====================================================================*/ /* * adaptmap_reg.c * * Regression test demonstrating adaptive mappings in both gray and color */ #ifdef HAVE_CONFIG_H #include <config_auto.h> #endif /* HAVE_CONFIG_H */ #include "allheaders.h" /* Location of image region in wet-day.jpg */ static const l_int32 XS = 151; static const l_int32 YS = 225; static const l_int32 WS = 913; static const l_int32 HS = 1285; static const l_int32 SIZE_X = 10; static const l_int32 SIZE_Y = 30; static const l_int32 BINTHRESH = 50; static const l_int32 MINCOUNT = 30; static const l_int32 BGVAL = 200; static const l_int32 SMOOTH_X = 2; static const l_int32 SMOOTH_Y = 1; int main(int argc, char **argv) { l_int32 w, h; PIX *pixs, *pixg, *pixim, *pixgm, *pixmi, *pix1, *pix2; PIX *pixmr, *pixmg, *pixmb, *pixmri, *pixmgi, *pixmbi; PIXA *pixa; L_REGPARAMS *rp; if (regTestSetup(argc, argv, &rp)) return 1; lept_mkdir("lept/adapt"); // REMOVE? pixs = pixRead("wet-day.jpg"); pixa = pixaCreate(0); pixg = pixConvertRGBToGray(pixs, 0.33, 0.34, 0.33); pixaAddPix(pixa, pixs, L_INSERT); pixaAddPix(pixa, pixg, L_INSERT); pixGetDimensions(pixs, &w, &h, NULL); /* Process in grayscale */ startTimer(); pixim = pixCreate(w, h, 1); pixRasterop(pixim, XS, YS, WS, HS, PIX_SET, NULL, 0, 0); pixGetBackgroundGrayMap(pixg, pixim, SIZE_X, SIZE_Y, BINTHRESH, MINCOUNT, &pixgm); fprintf(stderr, "Time for gray adaptmap gen: %7.3f\n", stopTimer()); regTestWritePixAndCheck(rp, pixgm, IFF_PNG); /* 0 */ pixaAddPix(pixa, pixgm, L_INSERT); startTimer(); pixmi = pixGetInvBackgroundMap(pixgm, BGVAL, SMOOTH_X, SMOOTH_Y); fprintf(stderr, "Time for gray inv map generation: %7.3f\n", stopTimer()); regTestWritePixAndCheck(rp, pixmi, IFF_PNG); /* 1 */ pixaAddPix(pixa, pixmi, L_INSERT); startTimer(); pix1 = pixApplyInvBackgroundGrayMap(pixg, pixmi, SIZE_X, SIZE_Y); fprintf(stderr, "Time to apply gray inv map: %7.3f\n", stopTimer()); regTestWritePixAndCheck(rp, pix1, IFF_JFIF_JPEG); /* 2 */ pixaAddPix(pixa, pix1, L_INSERT); pix2 = pixGammaTRCMasked(NULL, pix1, pixim, 1.0, 0, 190); pixInvert(pixim, pixim); pixGammaTRCMasked(pix2, pix2, pixim, 1.0, 60, 190); regTestWritePixAndCheck(rp, pix2, IFF_JFIF_JPEG); /* 3 */ pixaAddPix(pixa, pix2, L_INSERT); pixDestroy(&pixim); /* Process in color */ startTimer(); pixim = pixCreate(w, h, 1); pixRasterop(pixim, XS, YS, WS, HS, PIX_SET, NULL, 0, 0); pixGetBackgroundRGBMap(pixs, pixim, NULL, SIZE_X, SIZE_Y, BINTHRESH, MINCOUNT, &pixmr, &pixmg, &pixmb); fprintf(stderr, "Time for color adaptmap gen: %7.3f\n", stopTimer()); regTestWritePixAndCheck(rp, pixmr, IFF_PNG); /* 4 */ regTestWritePixAndCheck(rp, pixmg, IFF_PNG); /* 5 */ regTestWritePixAndCheck(rp, pixmb, IFF_PNG); /* 6 */ pixaAddPix(pixa, pixmr, L_INSERT); pixaAddPix(pixa, pixmg, L_INSERT); pixaAddPix(pixa, pixmb, L_INSERT); startTimer(); pixmri = pixGetInvBackgroundMap(pixmr, BGVAL, SMOOTH_X, SMOOTH_Y); pixmgi = pixGetInvBackgroundMap(pixmg, BGVAL, SMOOTH_X, SMOOTH_Y); pixmbi = pixGetInvBackgroundMap(pixmb, BGVAL, SMOOTH_X, SMOOTH_Y); fprintf(stderr, "Time for color inv map generation: %7.3f\n", stopTimer()); regTestWritePixAndCheck(rp, pixmri, IFF_PNG); /* 7 */ regTestWritePixAndCheck(rp, pixmgi, IFF_PNG); /* 8 */ regTestWritePixAndCheck(rp, pixmbi, IFF_PNG); /* 9 */ pixaAddPix(pixa, pixmri, L_INSERT); pixaAddPix(pixa, pixmgi, L_INSERT); pixaAddPix(pixa, pixmbi, L_INSERT); startTimer(); pix1 = pixApplyInvBackgroundRGBMap(pixs, pixmri, pixmgi, pixmbi, SIZE_X, SIZE_Y); fprintf(stderr, "Time to apply color inv maps: %7.3f\n", stopTimer()); regTestWritePixAndCheck(rp, pix1, IFF_JFIF_JPEG); /* 10 */ pixaAddPix(pixa, pix1, L_INSERT); pix2 = pixGammaTRCMasked(NULL, pix1, pixim, 1.0, 0, 190); pixInvert(pixim, pixim); pixGammaTRCMasked(pix2, pix2, pixim, 1.0, 60, 190); regTestWritePixAndCheck(rp, pix2, IFF_JFIF_JPEG); /* 11 */ pixaAddPix(pixa, pix2, L_INSERT); pixDestroy(&pixim); /* Process at higher level in color */ startTimer(); pixim = pixCreate(w, h, 1); pixRasterop(pixim, XS, YS, WS, HS, PIX_SET, NULL, 0, 0); pix1 = pixBackgroundNorm(pixs, pixim, NULL, 5, 10, BINTHRESH, 20, BGVAL, SMOOTH_X, SMOOTH_Y); fprintf(stderr, "Time for bg normalization: %7.3f\n", stopTimer()); regTestWritePixAndCheck(rp, pix1, IFF_JFIF_JPEG); /* 12 */ pixaAddPix(pixa, pix1, L_INSERT); pix2 = pixGammaTRCMasked(NULL, pix1, pixim, 1.0, 0, 190); pixInvert(pixim, pixim); pixGammaTRCMasked(pix2, pix2, pixim, 1.0, 60, 190); regTestWritePixAndCheck(rp, pix2, IFF_JFIF_JPEG); /* 13 */ pixaAddPix(pixa, pix2, L_INSERT); pixDestroy(&pixim); /* Display results */ pix1 = pixaDisplayTiledAndScaled(pixa, 32, 400, 4, 0, 20, 2); pixWrite("/tmp/lept/adapt/results.jpg", pix1, IFF_JFIF_JPEG); pixDisplayWithTitle(pix1, 100, 0, NULL, rp->display); pixDestroy(&pix1); pixaDestroy(&pixa); return regTestCleanup(rp); }
/*====================================================================* - Copyright (C) 2001 Leptonica. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials - provided with the distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *====================================================================*/ /* * adaptmap_reg.c * * Regression test demonstrating adaptive mappings in both gray and color */ #ifdef HAVE_CONFIG_H #include <config_auto.h> #endif /* HAVE_CONFIG_H */ #include "allheaders.h" /* Location of image region in wet-day.jpg */ static const l_int32 XS = 151; static const l_int32 YS = 225; static const l_int32 WS = 913; static const l_int32 HS = 1285; static const l_int32 SIZE_X = 10; static const l_int32 SIZE_Y = 30; static const l_int32 BINTHRESH = 50; static const l_int32 MINCOUNT = 30; static const l_int32 BGVAL = 200; static const l_int32 SMOOTH_X = 2; static const l_int32 SMOOTH_Y = 1; int main(int argc, char **argv) { l_int32 w, h; PIX *pixs, *pixg, *pixim, *pixgm, *pixmi, *pix1, *pix2; PIX *pixmr, *pixmg, *pixmb, *pixmri, *pixmgi, *pixmbi; PIXA *pixa, *pixa2; L_REGPARAMS *rp; if (regTestSetup(argc, argv, &rp)) return 1; lept_mkdir("lept/adapt"); // REMOVE? pixs = pixRead("wet-day.jpg"); pixa = pixaCreate(0); pixg = pixConvertRGBToGray(pixs, 0.33, 0.34, 0.33); pixaAddPix(pixa, pixs, L_INSERT); pixaAddPix(pixa, pixg, L_INSERT); pixGetDimensions(pixs, &w, &h, NULL); /* Process in grayscale */ startTimer(); pixim = pixCreate(w, h, 1); pixRasterop(pixim, XS, YS, WS, HS, PIX_SET, NULL, 0, 0); pixGetBackgroundGrayMap(pixg, pixim, SIZE_X, SIZE_Y, BINTHRESH, MINCOUNT, &pixgm); fprintf(stderr, "Time for gray adaptmap gen: %7.3f\n", stopTimer()); regTestWritePixAndCheck(rp, pixgm, IFF_PNG); /* 0 */ pixaAddPix(pixa, pixgm, L_INSERT); startTimer(); pixmi = pixGetInvBackgroundMap(pixgm, BGVAL, SMOOTH_X, SMOOTH_Y); fprintf(stderr, "Time for gray inv map generation: %7.3f\n", stopTimer()); regTestWritePixAndCheck(rp, pixmi, IFF_PNG); /* 1 */ pixaAddPix(pixa, pixmi, L_INSERT); startTimer(); pix1 = pixApplyInvBackgroundGrayMap(pixg, pixmi, SIZE_X, SIZE_Y); fprintf(stderr, "Time to apply gray inv map: %7.3f\n", stopTimer()); regTestWritePixAndCheck(rp, pix1, IFF_JFIF_JPEG); /* 2 */ pixaAddPix(pixa, pix1, L_INSERT); pix2 = pixGammaTRCMasked(NULL, pix1, pixim, 1.0, 0, 190); pixInvert(pixim, pixim); pixGammaTRCMasked(pix2, pix2, pixim, 1.0, 60, 190); regTestWritePixAndCheck(rp, pix2, IFF_JFIF_JPEG); /* 3 */ pixaAddPix(pixa, pix2, L_INSERT); pixDestroy(&pixim); /* Process in color */ startTimer(); pixim = pixCreate(w, h, 1); pixRasterop(pixim, XS, YS, WS, HS, PIX_SET, NULL, 0, 0); pixGetBackgroundRGBMap(pixs, pixim, NULL, SIZE_X, SIZE_Y, BINTHRESH, MINCOUNT, &pixmr, &pixmg, &pixmb); fprintf(stderr, "Time for color adaptmap gen: %7.3f\n", stopTimer()); regTestWritePixAndCheck(rp, pixmr, IFF_PNG); /* 4 */ regTestWritePixAndCheck(rp, pixmg, IFF_PNG); /* 5 */ regTestWritePixAndCheck(rp, pixmb, IFF_PNG); /* 6 */ pixaAddPix(pixa, pixmr, L_INSERT); pixaAddPix(pixa, pixmg, L_INSERT); pixaAddPix(pixa, pixmb, L_INSERT); startTimer(); pixmri = pixGetInvBackgroundMap(pixmr, BGVAL, SMOOTH_X, SMOOTH_Y); pixmgi = pixGetInvBackgroundMap(pixmg, BGVAL, SMOOTH_X, SMOOTH_Y); pixmbi = pixGetInvBackgroundMap(pixmb, BGVAL, SMOOTH_X, SMOOTH_Y); fprintf(stderr, "Time for color inv map generation: %7.3f\n", stopTimer()); regTestWritePixAndCheck(rp, pixmri, IFF_PNG); /* 7 */ regTestWritePixAndCheck(rp, pixmgi, IFF_PNG); /* 8 */ regTestWritePixAndCheck(rp, pixmbi, IFF_PNG); /* 9 */ pixaAddPix(pixa, pixmri, L_INSERT); pixaAddPix(pixa, pixmgi, L_INSERT); pixaAddPix(pixa, pixmbi, L_INSERT); startTimer(); pix1 = pixApplyInvBackgroundRGBMap(pixs, pixmri, pixmgi, pixmbi, SIZE_X, SIZE_Y); fprintf(stderr, "Time to apply color inv maps: %7.3f\n", stopTimer()); regTestWritePixAndCheck(rp, pix1, IFF_JFIF_JPEG); /* 10 */ pixaAddPix(pixa, pix1, L_INSERT); pix2 = pixGammaTRCMasked(NULL, pix1, pixim, 1.0, 0, 190); pixInvert(pixim, pixim); pixGammaTRCMasked(pix2, pix2, pixim, 1.0, 60, 190); regTestWritePixAndCheck(rp, pix2, IFF_JFIF_JPEG); /* 11 */ pixaAddPix(pixa, pix2, L_INSERT); pixDestroy(&pixim); /* Process at higher level in color */ startTimer(); pixim = pixCreate(w, h, 1); pixRasterop(pixim, XS, YS, WS, HS, PIX_SET, NULL, 0, 0); pix1 = pixBackgroundNorm(pixs, pixim, NULL, 5, 10, BINTHRESH, 20, BGVAL, SMOOTH_X, SMOOTH_Y); fprintf(stderr, "Time for bg normalization: %7.3f\n", stopTimer()); regTestWritePixAndCheck(rp, pix1, IFF_JFIF_JPEG); /* 12 */ pixaAddPix(pixa, pix1, L_INSERT); pix2 = pixGammaTRCMasked(NULL, pix1, pixim, 1.0, 0, 190); pixInvert(pixim, pixim); pixGammaTRCMasked(pix2, pix2, pixim, 1.0, 60, 190); regTestWritePixAndCheck(rp, pix2, IFF_JFIF_JPEG); /* 13 */ pixaAddPix(pixa, pix2, L_INSERT); pixDestroy(&pixim); /* Check pixFillMapHoles() */ pixa2 = pixaCreate(3); pix1 = pixRead("weasel8.png"); /* use this as the map */ pixGammaTRC(pix1, pix1, 1.0, 0, 270); /* darken white pixels */ pixaAddPix(pixa2, pix1, L_COPY); pixGetDimensions(pix1, &w, &h, NULL); pixRasterop(pix1, 0, 0, 5, h, PIX_SET, NULL, 0, 0); /* add white holes */ pixRasterop(pix1, 20, 0, 2, h, PIX_SET, NULL, 0, 0); pixRasterop(pix1, 40, 0, 3, h, PIX_SET, NULL, 0, 0); pixRasterop(pix1, 0, 0, w, 3, PIX_SET, NULL, 0, 0); pixRasterop(pix1, 0, 15, w, 3, PIX_SET, NULL, 0, 0); pixRasterop(pix1, 0, 35, w, 2, PIX_SET, NULL, 0, 0); pixaAddPix(pixa2, pix1, L_COPY); pixFillMapHoles(pix1, w, h, L_FILL_WHITE); pixaAddPix(pixa2, pix1, L_INSERT); pix2 = pixaDisplayTiledInColumns(pixa2, 3, 1.0, 20, 1); regTestWritePixAndCheck(rp, pix2, IFF_PNG); /* 14 */ pixDisplayWithTitle(pix2, 50, 850, NULL, rp->display); pixaDestroy(&pixa2); pixDestroy(&pix2); /* Display results */ pix1 = pixaDisplayTiledAndScaled(pixa, 32, 400, 4, 0, 20, 2); pixWrite("/tmp/lept/adapt/results.jpg", pix1, IFF_JFIF_JPEG); pixDisplayWithTitle(pix1, 50, 0, NULL, rp->display); pixDestroy(&pix1); pixaDestroy(&pixa); return regTestCleanup(rp); }
int main(int argc, char **argv) { l_int32 w, h; PIX *pixs, *pixg, *pixim, *pixgm, *pixmi, *pix1, *pix2; PIX *pixmr, *pixmg, *pixmb, *pixmri, *pixmgi, *pixmbi; PIXA *pixa; L_REGPARAMS *rp; if (regTestSetup(argc, argv, &rp)) return 1; lept_mkdir("lept/adapt"); // REMOVE? pixs = pixRead("wet-day.jpg"); pixa = pixaCreate(0); pixg = pixConvertRGBToGray(pixs, 0.33, 0.34, 0.33); pixaAddPix(pixa, pixs, L_INSERT); pixaAddPix(pixa, pixg, L_INSERT); pixGetDimensions(pixs, &w, &h, NULL); /* Process in grayscale */ startTimer(); pixim = pixCreate(w, h, 1); pixRasterop(pixim, XS, YS, WS, HS, PIX_SET, NULL, 0, 0); pixGetBackgroundGrayMap(pixg, pixim, SIZE_X, SIZE_Y, BINTHRESH, MINCOUNT, &pixgm); fprintf(stderr, "Time for gray adaptmap gen: %7.3f\n", stopTimer()); regTestWritePixAndCheck(rp, pixgm, IFF_PNG); /* 0 */ pixaAddPix(pixa, pixgm, L_INSERT); startTimer(); pixmi = pixGetInvBackgroundMap(pixgm, BGVAL, SMOOTH_X, SMOOTH_Y); fprintf(stderr, "Time for gray inv map generation: %7.3f\n", stopTimer()); regTestWritePixAndCheck(rp, pixmi, IFF_PNG); /* 1 */ pixaAddPix(pixa, pixmi, L_INSERT); startTimer(); pix1 = pixApplyInvBackgroundGrayMap(pixg, pixmi, SIZE_X, SIZE_Y); fprintf(stderr, "Time to apply gray inv map: %7.3f\n", stopTimer()); regTestWritePixAndCheck(rp, pix1, IFF_JFIF_JPEG); /* 2 */ pixaAddPix(pixa, pix1, L_INSERT); pix2 = pixGammaTRCMasked(NULL, pix1, pixim, 1.0, 0, 190); pixInvert(pixim, pixim); pixGammaTRCMasked(pix2, pix2, pixim, 1.0, 60, 190); regTestWritePixAndCheck(rp, pix2, IFF_JFIF_JPEG); /* 3 */ pixaAddPix(pixa, pix2, L_INSERT); pixDestroy(&pixim); /* Process in color */ startTimer(); pixim = pixCreate(w, h, 1); pixRasterop(pixim, XS, YS, WS, HS, PIX_SET, NULL, 0, 0); pixGetBackgroundRGBMap(pixs, pixim, NULL, SIZE_X, SIZE_Y, BINTHRESH, MINCOUNT, &pixmr, &pixmg, &pixmb); fprintf(stderr, "Time for color adaptmap gen: %7.3f\n", stopTimer()); regTestWritePixAndCheck(rp, pixmr, IFF_PNG); /* 4 */ regTestWritePixAndCheck(rp, pixmg, IFF_PNG); /* 5 */ regTestWritePixAndCheck(rp, pixmb, IFF_PNG); /* 6 */ pixaAddPix(pixa, pixmr, L_INSERT); pixaAddPix(pixa, pixmg, L_INSERT); pixaAddPix(pixa, pixmb, L_INSERT); startTimer(); pixmri = pixGetInvBackgroundMap(pixmr, BGVAL, SMOOTH_X, SMOOTH_Y); pixmgi = pixGetInvBackgroundMap(pixmg, BGVAL, SMOOTH_X, SMOOTH_Y); pixmbi = pixGetInvBackgroundMap(pixmb, BGVAL, SMOOTH_X, SMOOTH_Y); fprintf(stderr, "Time for color inv map generation: %7.3f\n", stopTimer()); regTestWritePixAndCheck(rp, pixmri, IFF_PNG); /* 7 */ regTestWritePixAndCheck(rp, pixmgi, IFF_PNG); /* 8 */ regTestWritePixAndCheck(rp, pixmbi, IFF_PNG); /* 9 */ pixaAddPix(pixa, pixmri, L_INSERT); pixaAddPix(pixa, pixmgi, L_INSERT); pixaAddPix(pixa, pixmbi, L_INSERT); startTimer(); pix1 = pixApplyInvBackgroundRGBMap(pixs, pixmri, pixmgi, pixmbi, SIZE_X, SIZE_Y); fprintf(stderr, "Time to apply color inv maps: %7.3f\n", stopTimer()); regTestWritePixAndCheck(rp, pix1, IFF_JFIF_JPEG); /* 10 */ pixaAddPix(pixa, pix1, L_INSERT); pix2 = pixGammaTRCMasked(NULL, pix1, pixim, 1.0, 0, 190); pixInvert(pixim, pixim); pixGammaTRCMasked(pix2, pix2, pixim, 1.0, 60, 190); regTestWritePixAndCheck(rp, pix2, IFF_JFIF_JPEG); /* 11 */ pixaAddPix(pixa, pix2, L_INSERT); pixDestroy(&pixim); /* Process at higher level in color */ startTimer(); pixim = pixCreate(w, h, 1); pixRasterop(pixim, XS, YS, WS, HS, PIX_SET, NULL, 0, 0); pix1 = pixBackgroundNorm(pixs, pixim, NULL, 5, 10, BINTHRESH, 20, BGVAL, SMOOTH_X, SMOOTH_Y); fprintf(stderr, "Time for bg normalization: %7.3f\n", stopTimer()); regTestWritePixAndCheck(rp, pix1, IFF_JFIF_JPEG); /* 12 */ pixaAddPix(pixa, pix1, L_INSERT); pix2 = pixGammaTRCMasked(NULL, pix1, pixim, 1.0, 0, 190); pixInvert(pixim, pixim); pixGammaTRCMasked(pix2, pix2, pixim, 1.0, 60, 190); regTestWritePixAndCheck(rp, pix2, IFF_JFIF_JPEG); /* 13 */ pixaAddPix(pixa, pix2, L_INSERT); pixDestroy(&pixim); /* Display results */ pix1 = pixaDisplayTiledAndScaled(pixa, 32, 400, 4, 0, 20, 2); pixWrite("/tmp/lept/adapt/results.jpg", pix1, IFF_JFIF_JPEG); pixDisplayWithTitle(pix1, 100, 0, NULL, rp->display); pixDestroy(&pix1); pixaDestroy(&pixa); return regTestCleanup(rp); }
int main(int argc, char **argv) { l_int32 w, h; PIX *pixs, *pixg, *pixim, *pixgm, *pixmi, *pix1, *pix2; PIX *pixmr, *pixmg, *pixmb, *pixmri, *pixmgi, *pixmbi; PIXA *pixa, *pixa2; L_REGPARAMS *rp; if (regTestSetup(argc, argv, &rp)) return 1; lept_mkdir("lept/adapt"); // REMOVE? pixs = pixRead("wet-day.jpg"); pixa = pixaCreate(0); pixg = pixConvertRGBToGray(pixs, 0.33, 0.34, 0.33); pixaAddPix(pixa, pixs, L_INSERT); pixaAddPix(pixa, pixg, L_INSERT); pixGetDimensions(pixs, &w, &h, NULL); /* Process in grayscale */ startTimer(); pixim = pixCreate(w, h, 1); pixRasterop(pixim, XS, YS, WS, HS, PIX_SET, NULL, 0, 0); pixGetBackgroundGrayMap(pixg, pixim, SIZE_X, SIZE_Y, BINTHRESH, MINCOUNT, &pixgm); fprintf(stderr, "Time for gray adaptmap gen: %7.3f\n", stopTimer()); regTestWritePixAndCheck(rp, pixgm, IFF_PNG); /* 0 */ pixaAddPix(pixa, pixgm, L_INSERT); startTimer(); pixmi = pixGetInvBackgroundMap(pixgm, BGVAL, SMOOTH_X, SMOOTH_Y); fprintf(stderr, "Time for gray inv map generation: %7.3f\n", stopTimer()); regTestWritePixAndCheck(rp, pixmi, IFF_PNG); /* 1 */ pixaAddPix(pixa, pixmi, L_INSERT); startTimer(); pix1 = pixApplyInvBackgroundGrayMap(pixg, pixmi, SIZE_X, SIZE_Y); fprintf(stderr, "Time to apply gray inv map: %7.3f\n", stopTimer()); regTestWritePixAndCheck(rp, pix1, IFF_JFIF_JPEG); /* 2 */ pixaAddPix(pixa, pix1, L_INSERT); pix2 = pixGammaTRCMasked(NULL, pix1, pixim, 1.0, 0, 190); pixInvert(pixim, pixim); pixGammaTRCMasked(pix2, pix2, pixim, 1.0, 60, 190); regTestWritePixAndCheck(rp, pix2, IFF_JFIF_JPEG); /* 3 */ pixaAddPix(pixa, pix2, L_INSERT); pixDestroy(&pixim); /* Process in color */ startTimer(); pixim = pixCreate(w, h, 1); pixRasterop(pixim, XS, YS, WS, HS, PIX_SET, NULL, 0, 0); pixGetBackgroundRGBMap(pixs, pixim, NULL, SIZE_X, SIZE_Y, BINTHRESH, MINCOUNT, &pixmr, &pixmg, &pixmb); fprintf(stderr, "Time for color adaptmap gen: %7.3f\n", stopTimer()); regTestWritePixAndCheck(rp, pixmr, IFF_PNG); /* 4 */ regTestWritePixAndCheck(rp, pixmg, IFF_PNG); /* 5 */ regTestWritePixAndCheck(rp, pixmb, IFF_PNG); /* 6 */ pixaAddPix(pixa, pixmr, L_INSERT); pixaAddPix(pixa, pixmg, L_INSERT); pixaAddPix(pixa, pixmb, L_INSERT); startTimer(); pixmri = pixGetInvBackgroundMap(pixmr, BGVAL, SMOOTH_X, SMOOTH_Y); pixmgi = pixGetInvBackgroundMap(pixmg, BGVAL, SMOOTH_X, SMOOTH_Y); pixmbi = pixGetInvBackgroundMap(pixmb, BGVAL, SMOOTH_X, SMOOTH_Y); fprintf(stderr, "Time for color inv map generation: %7.3f\n", stopTimer()); regTestWritePixAndCheck(rp, pixmri, IFF_PNG); /* 7 */ regTestWritePixAndCheck(rp, pixmgi, IFF_PNG); /* 8 */ regTestWritePixAndCheck(rp, pixmbi, IFF_PNG); /* 9 */ pixaAddPix(pixa, pixmri, L_INSERT); pixaAddPix(pixa, pixmgi, L_INSERT); pixaAddPix(pixa, pixmbi, L_INSERT); startTimer(); pix1 = pixApplyInvBackgroundRGBMap(pixs, pixmri, pixmgi, pixmbi, SIZE_X, SIZE_Y); fprintf(stderr, "Time to apply color inv maps: %7.3f\n", stopTimer()); regTestWritePixAndCheck(rp, pix1, IFF_JFIF_JPEG); /* 10 */ pixaAddPix(pixa, pix1, L_INSERT); pix2 = pixGammaTRCMasked(NULL, pix1, pixim, 1.0, 0, 190); pixInvert(pixim, pixim); pixGammaTRCMasked(pix2, pix2, pixim, 1.0, 60, 190); regTestWritePixAndCheck(rp, pix2, IFF_JFIF_JPEG); /* 11 */ pixaAddPix(pixa, pix2, L_INSERT); pixDestroy(&pixim); /* Process at higher level in color */ startTimer(); pixim = pixCreate(w, h, 1); pixRasterop(pixim, XS, YS, WS, HS, PIX_SET, NULL, 0, 0); pix1 = pixBackgroundNorm(pixs, pixim, NULL, 5, 10, BINTHRESH, 20, BGVAL, SMOOTH_X, SMOOTH_Y); fprintf(stderr, "Time for bg normalization: %7.3f\n", stopTimer()); regTestWritePixAndCheck(rp, pix1, IFF_JFIF_JPEG); /* 12 */ pixaAddPix(pixa, pix1, L_INSERT); pix2 = pixGammaTRCMasked(NULL, pix1, pixim, 1.0, 0, 190); pixInvert(pixim, pixim); pixGammaTRCMasked(pix2, pix2, pixim, 1.0, 60, 190); regTestWritePixAndCheck(rp, pix2, IFF_JFIF_JPEG); /* 13 */ pixaAddPix(pixa, pix2, L_INSERT); pixDestroy(&pixim); /* Check pixFillMapHoles() */ pixa2 = pixaCreate(3); pix1 = pixRead("weasel8.png"); /* use this as the map */ pixGammaTRC(pix1, pix1, 1.0, 0, 270); /* darken white pixels */ pixaAddPix(pixa2, pix1, L_COPY); pixGetDimensions(pix1, &w, &h, NULL); pixRasterop(pix1, 0, 0, 5, h, PIX_SET, NULL, 0, 0); /* add white holes */ pixRasterop(pix1, 20, 0, 2, h, PIX_SET, NULL, 0, 0); pixRasterop(pix1, 40, 0, 3, h, PIX_SET, NULL, 0, 0); pixRasterop(pix1, 0, 0, w, 3, PIX_SET, NULL, 0, 0); pixRasterop(pix1, 0, 15, w, 3, PIX_SET, NULL, 0, 0); pixRasterop(pix1, 0, 35, w, 2, PIX_SET, NULL, 0, 0); pixaAddPix(pixa2, pix1, L_COPY); pixFillMapHoles(pix1, w, h, L_FILL_WHITE); pixaAddPix(pixa2, pix1, L_INSERT); pix2 = pixaDisplayTiledInColumns(pixa2, 3, 1.0, 20, 1); regTestWritePixAndCheck(rp, pix2, IFF_PNG); /* 14 */ pixDisplayWithTitle(pix2, 50, 850, NULL, rp->display); pixaDestroy(&pixa2); pixDestroy(&pix2); /* Display results */ pix1 = pixaDisplayTiledAndScaled(pixa, 32, 400, 4, 0, 20, 2); pixWrite("/tmp/lept/adapt/results.jpg", pix1, IFF_JFIF_JPEG); pixDisplayWithTitle(pix1, 50, 0, NULL, rp->display); pixDestroy(&pix1); pixaDestroy(&pixa); return regTestCleanup(rp); }
{'added': [(60, 'PIXA *pixa, *pixa2;'), (162, ' /* Check pixFillMapHoles() */'), (163, ' pixa2 = pixaCreate(3);'), (164, ' pix1 = pixRead("weasel8.png"); /* use this as the map */'), (165, ' pixGammaTRC(pix1, pix1, 1.0, 0, 270); /* darken white pixels */'), (166, ' pixaAddPix(pixa2, pix1, L_COPY);'), (167, ' pixGetDimensions(pix1, &w, &h, NULL);'), (168, ' pixRasterop(pix1, 0, 0, 5, h, PIX_SET, NULL, 0, 0); /* add white holes */'), (169, ' pixRasterop(pix1, 20, 0, 2, h, PIX_SET, NULL, 0, 0);'), (170, ' pixRasterop(pix1, 40, 0, 3, h, PIX_SET, NULL, 0, 0);'), (171, ' pixRasterop(pix1, 0, 0, w, 3, PIX_SET, NULL, 0, 0);'), (172, ' pixRasterop(pix1, 0, 15, w, 3, PIX_SET, NULL, 0, 0);'), (173, ' pixRasterop(pix1, 0, 35, w, 2, PIX_SET, NULL, 0, 0);'), (174, ' pixaAddPix(pixa2, pix1, L_COPY);'), (175, ' pixFillMapHoles(pix1, w, h, L_FILL_WHITE);'), (176, ' pixaAddPix(pixa2, pix1, L_INSERT);'), (177, ' pix2 = pixaDisplayTiledInColumns(pixa2, 3, 1.0, 20, 1);'), (178, ' regTestWritePixAndCheck(rp, pix2, IFF_PNG); /* 14 */'), (179, ' pixDisplayWithTitle(pix2, 50, 850, NULL, rp->display);'), (180, ' pixaDestroy(&pixa2);'), (181, ' pixDestroy(&pix2);'), (182, ''), (186, ' pixDisplayWithTitle(pix1, 50, 0, NULL, rp->display);')], 'deleted': [(60, 'PIXA *pixa;'), (165, ' pixDisplayWithTitle(pix1, 100, 0, NULL, rp->display);')]}
23
2
130
1,305
98
961
2
https://github.com/DanBloomberg/leptonica
CVE-2020-36279
CWE-125
1,881
elf.c
C
init_shdr
/* radare - LGPL - Copyright 2008-2022 - nibble, pancake, alvaro_fe */ #define R_LOG_ORIGIN "elf" #include <r_types.h> #include <r_util.h> #include "elf.h" #define MIPS_PLT_OFFSET 0x20 #define RISCV_PLT_OFFSET 0x20 #define LOONGARCH_PLT_OFFSET 0x20 #define RISCV_PLT_ENTRY_SIZE 0x10 #define LOONGARCH_PLT_ENTRY_SIZE 0x10 #define X86_PLT_ENTRY_SIZE 0x10 #define SPARC_OFFSET_PLT_ENTRY_FROM_GOT_ADDR -0x6 #define X86_OFFSET_PLT_ENTRY_FROM_GOT_ADDR -0x6 #define ELF_PAGE_MASK 0xFFFFFFFFFFFFF000LL #define ELF_PAGE_SIZE 12 #define R_ELF_NO_RELRO 0 #define R_ELF_PART_RELRO 1 #define R_ELF_FULL_RELRO 2 #define MAX_REL_RELA_SZ (sizeof (Elf_(Rel)) > sizeof (Elf_(Rela))? sizeof (Elf_(Rel)): sizeof (Elf_(Rela))) #define READ8(x, i) r_read_ble8((x) + (i)); (i) += 1 #define READ16(x, i) r_read_ble16((x) + (i), bin->endian); (i) += 2 #define READ32(x, i) r_read_ble32((x) + (i), bin->endian); (i) += 4 #define READ64(x, i) r_read_ble64((x) + (i), bin->endian); (i) += 8 #define BREAD8(x, i) r_buf_read_ble8_at (x, i); (i) += 1 #define BREAD16(x, i) r_buf_read_ble16_at (x, i, bin->endian); (i) += 2 #define BREAD32(x, i) r_buf_read_ble32_at (x, i, bin->endian); (i) += 4 #define BREAD64(x, i) r_buf_read_ble64_at (x, i, bin->endian); (i) += 8 #define NUMENTRIES_ROUNDUP(sectionsize, entrysize) (((sectionsize) + (entrysize)-1) / (entrysize)) #define COMPUTE_PLTGOT_POSITION(rel, pltgot_addr, n_initial_unused_entries) \ ((rel->rva - pltgot_addr - n_initial_unused_entries * R_BIN_ELF_WORDSIZE) / R_BIN_ELF_WORDSIZE) #define GROWTH_FACTOR (1.5) #define round_up(a) ((((a) + (4) - (1)) / (4)) * (4)) #define EF_MIPS_ABI_O32 0x00001000 /* O32 ABI. */ #define EF_MIPS_ABI_O64 0x00002000 /* O32 extended for 64 bit. */ #define EF_MIPS_ABI 0x0000f000 static inline bool is_elfclass64(Elf_(Ehdr) *h) { return h->e_ident[EI_CLASS] == ELFCLASS64; } static bool is_mips_o32(Elf_(Ehdr) *h) { if (h->e_ident[EI_CLASS] != ELFCLASS32) { return false; } if ((h->e_flags & EF_MIPS_ABI2) != 0) { return false; } if (((h->e_flags & EF_MIPS_ABI) != 0) && ((h->e_flags & EF_MIPS_ABI) != EF_MIPS_ABI_O32)) { return false; } return true; } static bool is_mips_n32(Elf_(Ehdr) *h) { if (h->e_ident[EI_CLASS] != ELFCLASS32) { return false; } if (((h->e_flags & EF_MIPS_ABI2) == 0) || ((h->e_flags & EF_MIPS_ABI) != 0)) { return false; } return true; } enum { X86, X86_64, ARM, AARCH64, RCE, ARCH_LEN }; typedef struct reginfo { ut32 regsize; ut32 regdelta; } reginfo_t; static reginfo_t reginf[ARCH_LEN] = { { 160, 0x5c }, { 216, 0x84 }, { 72, 0x5c }, { 272, 0x84 }, { 272, 0x84 } }; static inline int __strnlen(const char *str, int len) { int l = 0; while (IS_PRINTABLE (*str) && --len) { if (((ut8)*str) == 0xff) { break; } str++; l++; } return l + 1; } static bool is_bin_etrel(ELFOBJ *bin) { return bin->ehdr.e_type == ET_REL; } static bool __is_valid_ident(ELFOBJ *bin) { return !strncmp ((char *)bin->ehdr.e_ident, ELFMAG, SELFMAG) || !strncmp ((char *)bin->ehdr.e_ident, CGCMAG, SCGCMAG); } static bool init_ehdr(ELFOBJ *bin) { ut8 e_ident[EI_NIDENT]; ut8 ehdr[sizeof (Elf_(Ehdr))] = {0}; int i, len; if (r_buf_read_at (bin->b, 0, e_ident, EI_NIDENT) == -1) { R_LOG_ERROR ("read (magic)"); return false; } sdb_set (bin->kv, "elf_type.cparse", "enum elf_type { ET_NONE=0, ET_REL=1," " ET_EXEC=2, ET_DYN=3, ET_CORE=4, ET_LOOS=0xfe00, ET_HIOS=0xfeff," " ET_LOPROC=0xff00, ET_HIPROC=0xffff };", 0); sdb_set (bin->kv, "elf_machine.cparse", "enum elf_machine {EM_NONE=0, EM_M32=1," " EM_SPARC=2, EM_386=3, EM_68K=4, EM_88K=5, EM_IAMCU=6, EM_860=7, EM_MIPS=8," " EM_S370=9, EM_MIPS_RS3_LE=10, EM_RS6000=11, EM_PARISC=15, EM_nCUBE=16," " EM_VPP500=17, EM_SPARC32PLUS=18, EM_960=19, EM_PPC=20, EM_PPC64=21, EM_S390=22," " EM_SPU=23, EM_V800=36, EM_FR20=37, EM_RH32=38, EM_RCE=39, EM_ARM=40," " EM_ALPHA=41, EM_SH=42, EM_SPARCV9=43, EM_TRICORE=44, EM_ARC=45, EM_H8_300=46," " EM_H8_300H=47, EM_H8S=48, EM_H8_500=49, EM_IA_64=50, EM_MIPS_X=51," " EM_COLDFIRE=52, EM_68HC12=53, EM_MMA=54, EM_PCP=55, EM_NCPU=56, EM_NDR1=57," " EM_STARCORE=58, EM_ME16=59, EM_ST100=60, EM_TINYJ=61, EM_X86_64=62, EM_PDSP=63," " EM_PDP10=64, EM_PDP11=65, EM_FX66=66, EM_ST9PLUS=67, EM_ST7=68, EM_68HC16=69," " EM_68HC11=70, EM_68HC08=71, EM_68HC05=72, EM_SVX=73, EM_ST19=74, EM_VAX=75," " EM_CRIS=76, EM_JAVELIN=77, EM_FIREPATH=78, EM_ZSP=79, EM_MMIX=80, EM_HUANY=81," " EM_PRISM=82, EM_AVR=83, EM_FR30=84, EM_D10V=85, EM_D30V=86, EM_V850=87," " EM_M32R=88, EM_MN10300=89, EM_MN10200=90, EM_PJ=91, EM_OPENRISC=92," " EM_ARC_COMPACT=93, EM_XTENSA=94, EM_VIDEOCORE=95, EM_TMM_GPP=96, EM_NS32K=97," " EM_TPC=98, EM_SNP1K=99, EM_ST200=100, EM_IP2K=101, EM_MAX=102, EM_CR=103," " EM_F2MC16=104, EM_MSP430=105, EM_BLACKFIN=106, EM_SE_C33=107, EM_SEP=108," " EM_ARCA=109, EM_UNICORE=110, EM_EXCESS=111, EM_DXP=112, EM_ALTERA_NIOS2=113," " EM_CRX=114, EM_XGATE=115, EM_C166=116, EM_M16C=117, EM_DSPIC30F=118, EM_CE=119," " EM_M32C=120, EM_TSK3000=131, EM_RS08=132, EM_SHARC=133, EM_ECOG2=134," " EM_SCORE7=135, EM_DSP24=136, EM_VIDEOCORE3=137, EM_LATTICEMICO32=138," " EM_SE_C17=139, EM_TI_C6000=140, EM_TI_C2000=141, EM_TI_C5500=142," " EM_TI_ARP32=143, EM_TI_PRU=144," " EM_MMDSP_PLUS=160, EM_CYPRESS_M8C=161, EM_R32C=162, EM_TRIMEDIA=163," " EM_QDSP6=164, EM_8051=165, EM_STXP7X=166, EM_NDS32=167," " EM_ECOG1X=168, EM_MAXQ30=169, EM_XIMO16=170, EM_MANIK=171, EM_CRAYNV2=172," " EM_RX=173, EM_METAG=174, EM_MCST_ELBRUS=175, EM_ECOG16=176, EM_CR16=177," " EM_ETPU=178, EM_SLE9X=179, EM_L10M=180, EM_K10M=181, EM_AARCH64=183," " EM_AVR32=185, EM_STM8=186, EM_TILE64=187, EM_TILEPRO=188, EM_CUDA=190," " EM_TILEGX=191, EM_CLOUDSHIELD=192, EM_COREA_1ST=193, EM_COREA_2ND=194," " EM_ARC_COMPACT2=195, EM_OPEN8=196, EM_RL78=197, EM_VIDEOCORE5=198," " EM_78KOR=199, EM_56800EX=200, EM_BA1=201, EM_BA2=202, EM_XCORE=203," " EM_MCHP_PIC=204, EM_INTEL205=205, EM_INTEL206=206, EM_INTEL207=207," " EM_INTEL208=208, EM_INTEL209=209, EM_KM32=210, EM_KMX32=211, EM_KMX16=212," " EM_KMX8=213, EM_KVARC=214, EM_CDP=215, EM_COGE=216, EM_COOL=217, EM_NORC=218," " EM_CSR_KALIMBA=219, EM_AMDGPU=224, EM_RISCV=243, EM_LANAI=244, EM_BPF=247," " EM_CSKY=252, EM_KVX=256, EM_LOONGARCH=258}", 0); sdb_set (bin->kv, "elf_class.cparse", "enum elf_class {ELFCLASSNONE=0, ELFCLASS32=1, ELFCLASS64=2};", 0); sdb_set (bin->kv, "elf_data.cparse", "enum elf_data {ELFDATANONE=0, ELFDATA2LSB=1, ELFDATA2MSB=2};", 0); sdb_set (bin->kv, "elf_hdr_version.cparse", "enum elf_hdr_version {EV_NONE=0, EV_CURRENT=1};", 0); sdb_set (bin->kv, "elf_obj_version.cparse", "enum elf_obj_version {EV_NONE=0, EV_CURRENT=1};", 0); sdb_num_set (bin->kv, "elf_header.offset", 0, 0); sdb_num_set (bin->kv, "elf_header.size", sizeof (Elf_(Ehdr)), 0); sdb_set (bin->kv, "elf_ident.format", "[4]z[1]E[1]E[1]E.::" " magic (elf_class)class (elf_data)data (elf_hdr_version)version", 0); #if R_BIN_ELF64 sdb_set (bin->kv, "elf_header.format", "?[2]E[2]E[4]EqqqxN2N2N2N2N2N2" " (elf_ident)ident (elf_type)type (elf_machine)machine (elf_obj_version)version" " entry phoff shoff flags ehsize phentsize phnum shentsize shnum shstrndx", 0); #else sdb_set (bin->kv, "elf_header.format", "?[2]E[2]E[4]ExxxxN2N2N2N2N2N2" " (elf_ident)ident (elf_type)type (elf_machine)machine (elf_obj_version)version" " entry phoff shoff flags ehsize phentsize phnum shentsize shnum shstrndx", 0); #endif bin->endian = (e_ident[EI_DATA] == ELFDATA2MSB)? 1: 0; memset (&bin->ehdr, 0, sizeof (Elf_(Ehdr))); len = r_buf_read_at (bin->b, 0, ehdr, sizeof (ehdr)); if (len < 32) { // tinyelf != sizeof (Elf_(Ehdr))) { R_LOG_ERROR ("read (ehdr)"); return false; } // XXX no need to check twice memcpy (&bin->ehdr.e_ident, ehdr, 16); if (!__is_valid_ident (bin)) { return false; } i = 16; // TODO: use r_read or r_buf_read_ apis instead bin->ehdr.e_type = READ16 (ehdr, i); bin->ehdr.e_machine = READ16 (ehdr, i); bin->ehdr.e_version = READ32 (ehdr, i); #if R_BIN_ELF64 bin->ehdr.e_entry = READ64 (ehdr, i); bin->ehdr.e_phoff = READ64 (ehdr, i); bin->ehdr.e_shoff = READ64 (ehdr, i); #else bin->ehdr.e_entry = READ32 (ehdr, i); bin->ehdr.e_phoff = READ32 (ehdr, i); bin->ehdr.e_shoff = READ32 (ehdr, i); #endif bin->ehdr.e_flags = READ32 (ehdr, i); bin->ehdr.e_ehsize = READ16 (ehdr, i); bin->ehdr.e_phentsize = READ16 (ehdr, i); bin->ehdr.e_phnum = READ16 (ehdr, i); bin->ehdr.e_shentsize = READ16 (ehdr, i); bin->ehdr.e_shnum = READ16 (ehdr, i); bin->ehdr.e_shstrndx = READ16 (ehdr, i); return true; // [Outdated] Usage example: // > td `k bin/cur/info/elf_type.cparse`; td `k bin/cur/info/elf_machine.cparse` // > pf `k bin/cur/info/elf_header.format` @ `k bin/cur/info/elf_header.offset` } ut64 Elf_(r_bin_elf_get_phnum)(ELFOBJ *obj) { r_return_val_if_fail (obj, 0); ut64 num = obj->ehdr.e_phnum & UT16_MAX; if (obj->ehdr.e_phnum == 0xffff) { ut32 shnum = obj->ehdr.e_shnum; // sh_info member of the initial entry in section header table. if (shnum > 0) { ut32 shoff = obj->ehdr.e_shoff; Elf_(Shdr) shdr = {0}; (void)r_buf_read_at (obj->b, shoff, (ut8 *)&shdr, sizeof (shdr)); num = shdr.sh_info; if ((int)(shdr.sh_info) < 1) { return UT16_MAX; } } } return num; } static bool read_phdr(ELFOBJ *bin, bool linux_kernel_hack) { bool phdr_found = false; int i; #if R_BIN_ELF64 const bool is_elf64 = true; #else const bool is_elf64 = false; #endif ut64 phnum = Elf_(r_bin_elf_get_phnum) (bin); for (i = 0; i < phnum; i++) { ut8 phdr[sizeof (Elf_(Phdr))] = {0}; int j = 0; const size_t rsize = bin->ehdr.e_phoff + i * sizeof (Elf_(Phdr)); int len = r_buf_read_at (bin->b, rsize, phdr, sizeof (Elf_(Phdr))); if (len < 1) { R_LOG_ERROR ("read (phdr)"); R_FREE (bin->phdr); return false; } bin->phdr[i].p_type = READ32 (phdr, j); if (bin->phdr[i].p_type == PT_PHDR) { phdr_found = true; } if (is_elf64) { bin->phdr[i].p_flags = READ32 (phdr, j); } bin->phdr[i].p_offset = R_BIN_ELF_READWORD (phdr, j); bin->phdr[i].p_vaddr = R_BIN_ELF_READWORD (phdr, j); bin->phdr[i].p_paddr = R_BIN_ELF_READWORD (phdr, j); bin->phdr[i].p_filesz = R_BIN_ELF_READWORD (phdr, j); bin->phdr[i].p_memsz = R_BIN_ELF_READWORD (phdr, j); if (!is_elf64) { bin->phdr[i].p_flags = READ32 (phdr, j); // bin->phdr[i].p_flags |= 1; tiny.elf needs this somehow :? LOAD0 is always +x for linux? } bin->phdr[i].p_align = R_BIN_ELF_READWORD (phdr, j); } /* Here is the where all the fun starts. * Linux kernel since 2005 calculates phdr offset wrongly * adding it to the load address (va of the LOAD0). * See `fs/binfmt_elf.c` file this line: * NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff); * So after the first read, we fix the address and read it again */ if (linux_kernel_hack && phdr_found) { ut64 load_addr = Elf_(r_bin_elf_get_baddr) (bin); bin->ehdr.e_phoff = Elf_(r_bin_elf_v2p) (bin, load_addr + bin->ehdr.e_phoff); return read_phdr (bin, false); } return true; } static int init_phdr(ELFOBJ *bin) { ut32 phdr_size; r_return_val_if_fail (bin && !bin->phdr, false); if (!bin->ehdr.e_phnum) { return false; } if (!UT32_MUL (&phdr_size, (ut32)bin->ehdr.e_phnum, sizeof (Elf_(Phdr)))) { return false; } if (!phdr_size) { return false; } if (phdr_size > bin->size) { return false; } if (phdr_size > (ut32)bin->size) { return false; } if (bin->ehdr.e_phoff > bin->size) { return false; } if (bin->ehdr.e_phoff + phdr_size > bin->size) { return false; } ut64 phnum = Elf_(r_bin_elf_get_phnum) (bin); if (!(bin->phdr = R_NEWS0 (Elf_(Phdr), phnum))) { r_sys_perror ("malloc (phdr)"); return false; } bool linux_kern_hack = false; /* Enable this hack only for the X86 64bit ELFs */ const int _128K = 1024 * 128; if (r_buf_size (bin->b) > _128K && (bin->ehdr.e_machine == EM_X86_64 || bin->ehdr.e_machine == EM_386)) { linux_kern_hack = true; } if (!read_phdr (bin, linux_kern_hack)) { return false; } sdb_num_set (bin->kv, "elf_phdr.offset", bin->ehdr.e_phoff, 0); sdb_num_set (bin->kv, "elf_phdr.size", sizeof (Elf_(Phdr)), 0); sdb_set (bin->kv, "elf_p_type.cparse", "enum elf_p_type {PT_NULL=0,PT_LOAD=1,PT_DYNAMIC=2," "PT_INTERP=3,PT_NOTE=4,PT_SHLIB=5,PT_PHDR=6,PT_LOOS=0x60000000," "PT_HIOS=0x6fffffff,PT_LOPROC=0x70000000,PT_HIPROC=0x7fffffff};", 0); sdb_set (bin->kv, "elf_p_flags.cparse", "enum elf_p_flags {PF_None=0,PF_Exec=1," "PF_Write=2,PF_Write_Exec=3,PF_Read=4,PF_Read_Exec=5,PF_Read_Write=6," "PF_Read_Write_Exec=7};", 0); #if R_BIN_ELF64 sdb_set (bin->kv, "elf_phdr.format", "[4]E[4]Eqqqqqq (elf_p_type)type (elf_p_flags)flags" " offset vaddr paddr filesz memsz align", 0); #else sdb_set (bin->kv, "elf_phdr.format", "[4]Exxxxx[4]Ex (elf_p_type)type offset vaddr paddr" " filesz memsz (elf_p_flags)flags align", 0); #endif return true; // Usage example: // > td `k bin/cur/info/elf_p_type.cparse`; td `k bin/cur/info/elf_p_flags.cparse` // > pf `k bin/cur/info/elf_phdr.format` @ `k bin/cur/info/elf_phdr.offset` } static int init_shdr(ELFOBJ *bin) { ut32 shdr_size; ut8 shdr[sizeof (Elf_(Shdr))] = {0}; size_t i, j, len; r_return_val_if_fail (bin && !bin->shdr, false); if (!UT32_MUL (&shdr_size, bin->ehdr.e_shnum, sizeof (Elf_(Shdr)))) { return false; } if (shdr_size < 1) { return false; } if (shdr_size > bin->size) { return false; } if (bin->ehdr.e_shoff > bin->size) { return false; } if (bin->ehdr.e_shoff + shdr_size > bin->size) { return false; } if (!(bin->shdr = R_NEWS0 (Elf_(Shdr), bin->ehdr.e_shnum))) { r_sys_perror ("malloc (shdr)"); return false; } sdb_num_set (bin->kv, "elf_shdr.offset", bin->ehdr.e_shoff, 0); sdb_num_set (bin->kv, "elf_shdr.size", sizeof (Elf_(Shdr)), 0); sdb_set (bin->kv, "elf_s_type.cparse", "enum elf_s_type {SHT_NULL=0,SHT_PROGBITS=1," "SHT_SYMTAB=2,SHT_STRTAB=3,SHT_RELA=4,SHT_HASH=5,SHT_DYNAMIC=6,SHT_NOTE=7," "SHT_NOBITS=8,SHT_REL=9,SHT_SHLIB=10,SHT_DYNSYM=11,SHT_LOOS=0x60000000," "SHT_HIOS=0x6fffffff,SHT_LOPROC=0x70000000,SHT_HIPROC=0x7fffffff};", 0); for (i = 0; i < bin->ehdr.e_shnum; i++) { j = 0; len = r_buf_read_at (bin->b, bin->ehdr.e_shoff + i * sizeof (Elf_(Shdr)), shdr, sizeof (Elf_(Shdr))); if (len < 1) { R_LOG_ERROR ("read (shdr) at 0x%" PFMT64x, (ut64) bin->ehdr.e_shoff); R_FREE (bin->shdr); return false; } bin->shdr[i].sh_name = READ32 (shdr, j); bin->shdr[i].sh_type = READ32 (shdr, j); bin->shdr[i].sh_flags = R_BIN_ELF_READWORD (shdr, j); bin->shdr[i].sh_addr = R_BIN_ELF_READWORD (shdr, j); bin->shdr[i].sh_offset = R_BIN_ELF_READWORD (shdr, j); bin->shdr[i].sh_size = R_BIN_ELF_READWORD (shdr, j); bin->shdr[i].sh_link = READ32 (shdr, j); bin->shdr[i].sh_info = READ32 (shdr, j); bin->shdr[i].sh_addralign = R_BIN_ELF_READWORD (shdr, j); bin->shdr[i].sh_entsize = R_BIN_ELF_READWORD (shdr, j); } #if R_BIN_ELF64 sdb_set (bin->kv, "elf_s_flags_64.cparse", "enum elf_s_flags_64 {SF64_None=0,SF64_Exec=1," "SF64_Alloc=2,SF64_Alloc_Exec=3,SF64_Write=4,SF64_Write_Exec=5," "SF64_Write_Alloc=6,SF64_Write_Alloc_Exec=7};", 0); sdb_set (bin->kv, "elf_shdr.format", "x[4]E[8]Eqqqxxqq name (elf_s_type)type" " (elf_s_flags_64)flags addr offset size link info addralign entsize", 0); #else sdb_set (bin->kv, "elf_s_flags_32.cparse", "enum elf_s_flags_32 {SF32_None=0,SF32_Exec=1," "SF32_Alloc=2,SF32_Alloc_Exec=3,SF32_Write=4,SF32_Write_Exec=5," "SF32_Write_Alloc=6,SF32_Write_Alloc_Exec=7};", 0); sdb_set (bin->kv, "elf_shdr.format", "x[4]E[4]Exxxxxxx name (elf_s_type)type" " (elf_s_flags_32)flags addr offset size link info addralign entsize", 0); #endif return true; // Usage example: // > td `k bin/cur/info/elf_s_type.cparse`; td `k bin/cur/info/elf_s_flags_64.cparse` // > pf `k bin/cur/info/elf_shdr.format` @ `k bin/cur/info/elf_shdr.offset` } static bool is_shidx_valid(ELFOBJ *bin, Elf_(Half) value) { return value < bin->ehdr.e_shnum && !R_BETWEEN (SHN_LORESERVE, value, SHN_HIRESERVE); } static int init_strtab(ELFOBJ *bin) { r_return_val_if_fail (!bin->strtab, false); if (!bin->shdr) { return false; } Elf_(Half) shstrndx = bin->ehdr.e_shstrndx; if (shstrndx != SHN_UNDEF && !is_shidx_valid (bin, shstrndx)) { return false; } /* sh_size must be lower than UT32_MAX and not equal to zero, to avoid bugs on malloc() */ if (bin->shdr[shstrndx].sh_size > UT32_MAX) { return false; } if (!bin->shdr[shstrndx].sh_size) { return false; } bin->shstrtab_section = bin->strtab_section = &bin->shdr[shstrndx]; bin->shstrtab_size = bin->shstrtab_section->sh_size; if (bin->shstrtab_size > bin->size) { return false; } if (bin->shstrtab_section->sh_offset > bin->size) { return false; } if (bin->shstrtab_section->sh_offset + bin->shstrtab_section->sh_size > bin->size) { return false; } if (!(bin->shstrtab = calloc (1, bin->shstrtab_size + 1))) { r_sys_perror ("malloc"); bin->shstrtab = NULL; return false; } int res = r_buf_read_at (bin->b, bin->shstrtab_section->sh_offset, (ut8*)bin->shstrtab, bin->shstrtab_section->sh_size); if (res < 1) { R_LOG_ERROR ("read (shstrtab) at 0x%" PFMT64x, (ut64) bin->shstrtab_section->sh_offset); R_FREE (bin->shstrtab); return false; } bin->shstrtab[bin->shstrtab_section->sh_size] = '\0'; sdb_num_set (bin->kv, "elf_shstrtab.offset", bin->shstrtab_section->sh_offset, 0); sdb_num_set (bin->kv, "elf_shstrtab.size", bin->shstrtab_section->sh_size, 0); return true; } static Elf_(Phdr) *get_dynamic_segment(ELFOBJ *bin) { int i; for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_DYNAMIC) { if (bin->phdr[i].p_filesz > bin->size) { return NULL; } if (bin->phdr[i].p_offset > bin->size) { return NULL; } if (bin->phdr[i].p_offset + sizeof (Elf_(Dyn)) > bin->size) { return NULL; } return &bin->phdr[i]; } } return NULL; } static void init_dynamic_section_sdb(ELFOBJ *bin, Elf_(Addr) strtabaddr, size_t strsize) { int r = Elf_(r_bin_elf_has_relro) (bin); switch (r) { case R_ELF_FULL_RELRO: sdb_set (bin->kv, "elf.relro", "full", 0); break; case R_ELF_PART_RELRO: sdb_set (bin->kv, "elf.relro", "partial", 0); break; default: sdb_set (bin->kv, "elf.relro", "no", 0); break; } sdb_num_set (bin->kv, "elf_strtab.offset", strtabaddr, 0); sdb_num_set (bin->kv, "elf_strtab.size", strsize, 0); } static void set_default_value_dynamic_info(ELFOBJ *bin) { bin->dyn_info.dt_pltrelsz = 0; bin->dyn_info.dt_pltgot = R_BIN_ELF_ADDR_MAX; bin->dyn_info.dt_hash = R_BIN_ELF_ADDR_MAX; bin->dyn_info.dt_strtab = R_BIN_ELF_ADDR_MAX; bin->dyn_info.dt_symtab = R_BIN_ELF_ADDR_MAX; bin->dyn_info.dt_rela = R_BIN_ELF_ADDR_MAX; bin->dyn_info.dt_relasz = 0; bin->dyn_info.dt_relaent = 0; bin->dyn_info.dt_strsz = 0; bin->dyn_info.dt_syment = 0; bin->dyn_info.dt_rel = R_BIN_ELF_ADDR_MAX; bin->dyn_info.dt_relsz = 0; bin->dyn_info.dt_relent = 0; bin->dyn_info.dt_pltrel = R_BIN_ELF_XWORD_MAX; bin->dyn_info.dt_jmprel = R_BIN_ELF_ADDR_MAX; bin->dyn_info.dt_pltgot = R_BIN_ELF_ADDR_MAX; bin->dyn_info.dt_mips_pltgot = R_BIN_ELF_ADDR_MAX; bin->dyn_info.dt_bind_now = false; bin->dyn_info.dt_flags = R_BIN_ELF_XWORD_MAX; bin->dyn_info.dt_flags_1 = R_BIN_ELF_XWORD_MAX; bin->dyn_info.dt_rpath = R_BIN_ELF_XWORD_MAX; bin->dyn_info.dt_runpath = R_BIN_ELF_XWORD_MAX; r_vector_init(&bin->dyn_info.dt_needed, sizeof (Elf_(Off)), NULL, NULL); } static size_t get_maximum_number_of_dynamic_entries(ut64 dyn_size) { return dyn_size / sizeof (Elf_(Dyn)); } static bool fill_dynamic_entry(ELFOBJ *bin, ut64 entry_offset, Elf_(Dyn) *d) { ut8 sdyn[sizeof (Elf_(Dyn))] = {0}; int j = 0; int len = r_buf_read_at (bin->b, entry_offset, sdyn, sizeof (Elf_(Dyn))); if (len < 1) { return false; } d->d_tag = R_BIN_ELF_READWORD (sdyn, j); d->d_un.d_ptr = R_BIN_ELF_READWORD (sdyn, j); return true; } static void fill_dynamic_entries(ELFOBJ *bin, ut64 loaded_offset, ut64 dyn_size) { Elf_(Dyn) d = {0}; size_t i; size_t number_of_entries = get_maximum_number_of_dynamic_entries(dyn_size); for (i = 0; i < number_of_entries; i++) { ut64 entry_offset = loaded_offset + i * sizeof (Elf_(Dyn)); if (!fill_dynamic_entry (bin, entry_offset, &d)) { break; } switch (d.d_tag) { case DT_NULL: break; case DT_PLTRELSZ: bin->dyn_info.dt_pltrelsz = d.d_un.d_val; break; case DT_PLTGOT: bin->dyn_info.dt_pltgot = d.d_un.d_ptr; break; case DT_HASH: bin->dyn_info.dt_hash = d.d_un.d_ptr; break; case DT_STRTAB: bin->dyn_info.dt_strtab = d.d_un.d_ptr; break; case DT_SYMTAB: bin->dyn_info.dt_symtab = d.d_un.d_ptr; break; case DT_RELA: bin->dyn_info.dt_rela = d.d_un.d_ptr; break; case DT_RELASZ: bin->dyn_info.dt_relasz = d.d_un.d_val; break; case DT_RELAENT: bin->dyn_info.dt_relaent = d.d_un.d_val; break; case DT_STRSZ: bin->dyn_info.dt_strsz = d.d_un.d_val; break; case DT_SYMENT: bin->dyn_info.dt_syment = d.d_un.d_val; break; case DT_REL: bin->dyn_info.dt_rel = d.d_un.d_ptr; break; case DT_RELSZ: bin->dyn_info.dt_relsz = d.d_un.d_val; break; case DT_RELENT: bin->dyn_info.dt_relent = d.d_un.d_val; break; case DT_PLTREL: bin->dyn_info.dt_pltrel = d.d_un.d_val; break; case DT_JMPREL: bin->dyn_info.dt_jmprel = d.d_un.d_ptr; break; case DT_MIPS_PLTGOT: bin->dyn_info.dt_mips_pltgot = d.d_un.d_ptr; break; case DT_BIND_NOW: bin->dyn_info.dt_bind_now = true; break; case DT_FLAGS: bin->dyn_info.dt_flags = d.d_un.d_val; break; case DT_FLAGS_1: bin->dyn_info.dt_flags_1 = d.d_un.d_val; break; case DT_RPATH: bin->dyn_info.dt_rpath = d.d_un.d_val; break; case DT_RUNPATH: bin->dyn_info.dt_runpath = d.d_un.d_val; break; case DT_NEEDED: r_vector_push (&bin->dyn_info.dt_needed, &d.d_un.d_val); break; case DT_INIT: case DT_FINI: case DT_DEBUG: case DT_INIT_ARRAY: case DT_FINI_ARRAY: case DT_INIT_ARRAYSZ: case DT_FINI_ARRAYSZ: case DT_PREINIT_ARRAY: case DT_PREINIT_ARRAYSZ: case DT_SONAME: case DT_GNU_HASH: // common dynamic entries in ELF, but we don't need to // do anything with them. break; default: if ((d.d_tag >= DT_VERSYM) && (d.d_tag <= DT_VERNEEDNUM)) { bin->version_info[DT_VERSIONTAGIDX (d.d_tag)] = d.d_un.d_val; } else { R_LOG_DEBUG ("Dynamic tag %" PFMT64d " not handled", (ut64) d.d_tag); } break; } if (d.d_tag == DT_NULL) { break; } } } static int init_dynamic_section(ELFOBJ *bin) { ut64 strtabaddr = 0; char *strtab = NULL; size_t strsize = 0; int r; ut64 dyn_size = 0, loaded_offset; set_default_value_dynamic_info(bin); r_return_val_if_fail (bin, false); if (!bin->phdr || !bin->ehdr.e_phnum) { return false; } Elf_(Phdr) *dyn_phdr = get_dynamic_segment (bin); if (!dyn_phdr) { return false; } dyn_size = dyn_phdr->p_filesz; loaded_offset = Elf_(r_bin_elf_v2p_new) (bin, dyn_phdr->p_vaddr); if (loaded_offset == UT64_MAX) { return false; } if (!dyn_size || loaded_offset + dyn_size > bin->size) { return false; } fill_dynamic_entries (bin, loaded_offset, dyn_size); if (bin->dyn_info.dt_strtab != R_BIN_ELF_ADDR_MAX) { strtabaddr = Elf_(r_bin_elf_v2p_new) (bin, bin->dyn_info.dt_strtab); } if (bin->dyn_info.dt_strsz > 0) { strsize = bin->dyn_info.dt_strsz; } if (strtabaddr == UT64_MAX || strtabaddr > bin->size || strsize > ST32_MAX || !strsize || strsize > bin->size || strtabaddr + strsize > bin->size) { if (!strtabaddr) { R_LOG_DEBUG ("DT_STRTAB not found or invalid"); } return false; } strtab = (char *)calloc (1, strsize + 1); if (!strtab) { return false; } r = r_buf_read_at (bin->b, strtabaddr, (ut8 *)strtab, strsize); if (r < 1) { free (strtab); return false; } bin->strtab = strtab; bin->strtab_size = strsize; init_dynamic_section_sdb (bin, strtabaddr, strsize); return true; } static RBinElfSection* get_section_by_name(ELFOBJ *bin, const char *section_name) { if (bin->g_sections) { size_t i; for (i = 0; !bin->g_sections[i].last; i++) { if (!strncmp (bin->g_sections[i].name, section_name, ELF_STRING_LENGTH - 1)) { return &bin->g_sections[i]; } } } return NULL; } static char *get_ver_flags(ut32 flags) { if (!flags) { return "none"; } static char buff[32]; buff[0] = 0; if (flags & VER_FLG_BASE) { strcpy (buff, "BASE "); } if (flags & VER_FLG_WEAK) { if (flags & VER_FLG_BASE) { strcat (buff, "| "); } strcat (buff, "WEAK "); } if (flags & ~(VER_FLG_BASE | VER_FLG_WEAK)) { strcat (buff, "| <unknown>"); } return buff; } static Sdb *store_versioninfo_gnu_versym(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { size_t i; const ut64 num_entries = sz / sizeof (Elf_(Versym)); const char *section_name = ""; const char *link_section_name = ""; Sdb *sdb = sdb_new0 (); if (!sdb) { return NULL; } if (!bin->version_info[DT_VERSIONTAGIDX (DT_VERSYM)]) { sdb_free (sdb); return NULL; } if (shdr->sh_link >= bin->ehdr.e_shnum) { sdb_free (sdb); return NULL; } Elf_(Shdr) *link_shdr = &bin->shdr[shdr->sh_link]; ut8 *edata = (ut8*) calloc (R_MAX (1, num_entries), 2 * sizeof (ut8)); if (!edata) { sdb_free (sdb); return NULL; } ut16 *data = (ut16 *)calloc (R_MAX (1, num_entries), sizeof (ut16)); if (!data) { free (edata); sdb_free (sdb); return NULL; } ut64 off = Elf_(r_bin_elf_v2p) (bin, bin->version_info[DT_VERSIONTAGIDX (DT_VERSYM)]); if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } r_buf_read_at (bin->b, off, edata, sizeof (ut16) * num_entries); sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "num_entries", num_entries, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); for (i = num_entries; i--;) { data[i] = r_read_ble16 (&edata[i * sizeof (ut16)], bin->endian); } R_FREE (edata); char *tmp_val = NULL; for (i = 0; i < num_entries; i += 4) { size_t j; int check_def; char key[32] = {0}; for (j = 0; (j < 4) && (i + j) < num_entries; j++) { int k; snprintf (key, sizeof (key), "entry%d", (int)(i + j)); switch (data[i + j]) { case 0: sdb_set (sdb, key, "0 (*local*)", 0); break; case 1: sdb_set (sdb, key, "1 (*global*)", 0); break; default: free (tmp_val); tmp_val = r_str_newf ("%x ", data[i+j] & 0x7FFF); check_def = true; if (bin->version_info[DT_VERSIONTAGIDX (DT_VERNEED)]) { Elf_(Verneed) vn; ut8 svn[sizeof (Elf_(Verneed))] = {0}; ut64 offset = Elf_(r_bin_elf_v2p) (bin, bin->version_info[DT_VERSIONTAGIDX (DT_VERNEED)]); do { Elf_(Vernaux) vna; ut8 svna[sizeof (Elf_(Vernaux))] = {0}; ut64 a_off; if (offset > bin->size || offset + sizeof (vn) > bin->size) { goto beach; } if (r_buf_read_at (bin->b, offset, svn, sizeof (svn)) < 0) { R_LOG_DEBUG ("Cannot read Verneed for Versym"); goto beach; } k = 0; vn.vn_version = READ16 (svn, k); vn.vn_cnt = READ16 (svn, k); vn.vn_file = READ32 (svn, k); vn.vn_aux = READ32 (svn, k); vn.vn_next = READ32 (svn, k); a_off = offset + vn.vn_aux; do { if (a_off > bin->size || a_off + sizeof (vna) > bin->size) { goto beach; } if (r_buf_read_at (bin->b, a_off, svna, sizeof (svna)) < 0) { R_LOG_DEBUG ("Cannot read Vernaux for Versym"); goto beach; } k = 0; vna.vna_hash = READ32 (svna, k); vna.vna_flags = READ16 (svna, k); vna.vna_other = READ16 (svna, k); vna.vna_name = READ32 (svna, k); vna.vna_next = READ32 (svna, k); a_off += vna.vna_next; } while (vna.vna_other != data[i + j] && vna.vna_next != 0); if (vna.vna_other == data[i + j]) { if (vna.vna_name > bin->strtab_size) { goto beach; } char *val = r_str_newf ("%s(%s)", tmp_val, bin->strtab + vna.vna_name); sdb_set (sdb, key, val, 0); free (val); check_def = false; break; } offset += vn.vn_next; } while (vn.vn_next); } ut64 vinfoaddr = bin->version_info[DT_VERSIONTAGIDX (DT_VERDEF)]; if (check_def && data[i + j] != 0x8001 && vinfoaddr) { Elf_(Verdef) vd; ut8 svd[sizeof (Elf_(Verdef))] = {0}; ut64 offset = Elf_(r_bin_elf_v2p) (bin, vinfoaddr); if (offset > bin->size || offset + sizeof (vd) > bin->size) { goto beach; } do { if (r_buf_read_at (bin->b, offset, svd, sizeof (svd)) < 0) { R_LOG_DEBUG ("Cannot read Verdef for Versym"); goto beach; } k = 0; vd.vd_version = READ16 (svd, k); vd.vd_flags = READ16 (svd, k); vd.vd_ndx = READ16 (svd, k); vd.vd_cnt = READ16 (svd, k); vd.vd_hash = READ32 (svd, k); vd.vd_aux = READ32 (svd, k); vd.vd_next = READ32 (svd, k); offset += vd.vd_next; } while (vd.vd_ndx != (data[i + j] & 0x7FFF) && vd.vd_next != 0); if (vd.vd_ndx == (data[i + j] & 0x7FFF)) { Elf_(Verdaux) vda; ut8 svda[sizeof (Elf_(Verdaux))] = {0}; ut64 off_vda = offset - vd.vd_next + vd.vd_aux; if (off_vda > bin->size || off_vda + sizeof (vda) > bin->size) { goto beach; } if (r_buf_read_at (bin->b, off_vda, svda, sizeof (svda)) < 0) { R_LOG_DEBUG ("Cannot read Verdaux for Versym"); goto beach; } k = 0; vda.vda_name = READ32 (svda, k); vda.vda_next = READ32 (svda, k); if (vda.vda_name > bin->strtab_size) { goto beach; } const char *name = bin->strtab + vda.vda_name; if (name) { char *fname = r_str_newf ("%s(%s%-*s)", tmp_val, name, (int)(12 - strlen (name)),")"); sdb_set (sdb, key, fname, 0); free (fname); } } } } } R_FREE (tmp_val); } beach: R_FREE (tmp_val); free (data); return sdb; } static Sdb *store_versioninfo_gnu_verdef(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { const char *section_name = ""; const char *link_section_name = ""; char *end = NULL; ut8 dfs[sizeof (Elf_(Verdef))] = {0}; ut32 cnt; size_t i; if (shdr->sh_link >= bin->ehdr.e_shnum) { return false; } Elf_(Shdr) *link_shdr = &bin->shdr[shdr->sh_link]; #ifdef R_BIN_ELF64 if ((int)shdr->sh_size < 1 || shdr->sh_size > SIZE_MAX) { #else if ((int)shdr->sh_size < 1) { #endif return false; } if (shdr->sh_size < sizeof (Elf_(Verdef)) || shdr->sh_size < sizeof (Elf_(Verdaux))) { return false; } Elf_(Verdef) *defs = calloc (shdr->sh_size, 1); if (!defs) { R_LOG_ERROR ("Cannot allocate memory (Check Elf_(Verdef))"); return false; } if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (link_shdr && bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } Sdb *sdb = sdb_new0 (); if (!sdb) { free (defs); return false; } size_t shsize = shdr->sh_size; if (shdr->sh_size > bin->size) { if (bin->verbose) { eprintf ("Truncating shsize from %d to %d\n", (int)shdr->sh_size, (int)bin->size); } if (bin->size > shdr->sh_offset) { shsize = bin->size - shdr->sh_offset; } else { shsize = bin->size; } } end = (char *)defs + shsize; //& shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); for (cnt = 0, i = 0; cnt < shdr->sh_info && i < shdr->sh_size; cnt++) { Sdb *sdb_verdef = sdb_new0 (); char *vstart = ((char*)defs) + i; size_t vstart_off = i; char key[32] = {0}; Elf_(Verdef) *verdef = (Elf_(Verdef)*)vstart; Elf_(Verdaux) aux = {0}; int j = 0; int isum = 0; if (vstart + sizeof (*verdef) > end) { break; } r_buf_read_at (bin->b, shdr->sh_offset + i, dfs, sizeof (Elf_(Verdef))); verdef->vd_version = READ16 (dfs, j); verdef->vd_flags = READ16 (dfs, j); verdef->vd_ndx = READ16 (dfs, j); verdef->vd_cnt = READ16 (dfs, j); verdef->vd_hash = READ32 (dfs, j); verdef->vd_aux = READ32 (dfs, j); verdef->vd_next = READ32 (dfs, j); int vdaux = verdef->vd_aux; if (vdaux < 1 || shdr->sh_size - vstart_off < vdaux) { sdb_free (sdb_verdef); goto out_error; } vstart += vdaux; vstart_off += vdaux; if (vstart > end || shdr->sh_size - sizeof (Elf_(Verdaux)) < vstart_off) { sdb_free (sdb_verdef); goto out_error; } j = 0; aux.vda_name = READ32 (vstart, j); aux.vda_next = READ32 (vstart, j); isum = i + verdef->vd_aux; if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); goto out_error; } sdb_num_set (sdb_verdef, "idx", i, 0); sdb_num_set (sdb_verdef, "vd_version", verdef->vd_version, 0); sdb_num_set (sdb_verdef, "vd_ndx", verdef->vd_ndx, 0); sdb_num_set (sdb_verdef, "vd_cnt", verdef->vd_cnt, 0); sdb_set (sdb_verdef, "vda_name", &bin->dynstr[aux.vda_name], 0); sdb_set (sdb_verdef, "flags", get_ver_flags (verdef->vd_flags), 0); for (j = 1; j < verdef->vd_cnt; j++) { int k; Sdb *sdb_parent = sdb_new0 (); if (shdr->sh_size - vstart_off < aux.vda_next) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } isum += aux.vda_next; vstart += aux.vda_next; vstart_off += aux.vda_next; if (vstart > end || shdr->sh_size - sizeof (Elf_(Verdaux)) < vstart_off) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } k = 0; aux.vda_name = READ32 (vstart, k); aux.vda_next = READ32 (vstart, k); if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } sdb_num_set (sdb_parent, "idx", isum, 0); sdb_num_set (sdb_parent, "parent", j, 0); sdb_set (sdb_parent, "vda_name", &bin->dynstr[aux.vda_name], 0); snprintf (key, sizeof (key), "parent%d", j - 1); sdb_ns_set (sdb_verdef, key, sdb_parent); } snprintf (key, sizeof (key), "verdef%u", cnt); sdb_ns_set (sdb, key, sdb_verdef); if (!verdef->vd_next || shdr->sh_size - i < verdef->vd_next) { sdb_free (sdb_verdef); goto out_error; } if ((st32)verdef->vd_next < 1) { R_LOG_DEBUG ("Invalid vd_next in the ELF version"); break; } i += verdef->vd_next; } free (defs); return sdb; out_error: free (defs); sdb_free (sdb); return NULL; } static Sdb *store_versioninfo_gnu_verneed(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { ut8 *end, *need = NULL; const char *section_name = ""; Elf_(Shdr) *link_shdr = NULL; const char *link_section_name = ""; Sdb *sdb_vernaux = NULL; Sdb *sdb_version = NULL; Sdb *sdb = NULL; ut64 i; int cnt; if (!bin || !bin->dynstr) { return NULL; } if (shdr->sh_link >= bin->ehdr.e_shnum) { return NULL; } #ifdef R_BIN_ELF64 if ((int)shdr->sh_size < 1 || shdr->sh_size > SIZE_MAX) { #else if ((int)shdr->sh_size < 1) { #endif return NULL; } sdb = sdb_new0 (); if (!sdb) { return NULL; } link_shdr = &bin->shdr[shdr->sh_link]; if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } if (!(need = (ut8*) calloc (R_MAX (1, shdr->sh_size), sizeof (ut8)))) { R_LOG_ERROR ("Cannot allocate memory for Elf_(Verneed)"); goto beach; } end = need + shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "num_entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); if (shdr->sh_offset > bin->size || shdr->sh_offset + shdr->sh_size > bin->size) { goto beach; } if (shdr->sh_offset + shdr->sh_size < shdr->sh_size) { goto beach; } i = r_buf_read_at (bin->b, shdr->sh_offset, need, shdr->sh_size); if (i < 1) { goto beach; } //XXX we should use DT_VERNEEDNUM instead of sh_info //TODO https://sourceware.org/ml/binutils/2014-11/msg00353.html for (i = 0, cnt = 0; cnt < shdr->sh_info; cnt++) { int j, isum; ut8 *vstart = need + i; Elf_(Verneed) vvn = {0}; if (vstart + sizeof (Elf_(Verneed)) > end) { goto beach; } Elf_(Verneed) *entry = &vvn; char key[32] = {0}; sdb_version = sdb_new0 (); if (!sdb_version) { goto beach; } j = 0; vvn.vn_version = READ16 (vstart, j); vvn.vn_cnt = READ16 (vstart, j); vvn.vn_file = READ32 (vstart, j); vvn.vn_aux = READ32 (vstart, j); vvn.vn_next = READ32 (vstart, j); sdb_num_set (sdb_version, "vn_version", entry->vn_version, 0); sdb_num_set (sdb_version, "idx", i, 0); if (entry->vn_file > bin->dynstr_size) { goto beach; } { char *s = r_str_ndup (&bin->dynstr[entry->vn_file], 16); sdb_set (sdb_version, "file_name", s, 0); free (s); } sdb_num_set (sdb_version, "cnt", entry->vn_cnt, 0); st32 vnaux = entry->vn_aux; if (vnaux < 1) { goto beach; } vstart += vnaux; ut32 vn_cnt = entry->vn_cnt; for (j = 0, isum = i + entry->vn_aux; j < vn_cnt && vstart + sizeof (Elf_(Vernaux)) <= end; j++) { int k; Elf_(Vernaux) *aux = NULL; Elf_(Vernaux) vaux = {0}; aux = (Elf_(Vernaux)*)&vaux; k = 0; vaux.vna_hash = READ32 (vstart, k); vaux.vna_flags = READ16 (vstart, k); vaux.vna_other = READ16 (vstart, k); vaux.vna_name = READ32 (vstart, k); vaux.vna_next = READ32 (vstart, k); if (aux->vna_name > bin->dynstr_size) { goto beach; } #if 1 sdb_vernaux = sdb_new0 (); if (!sdb_vernaux) { goto beach; } sdb_num_set (sdb_vernaux, "idx", isum, 0); if (aux->vna_name > 0 && aux->vna_name + 8 < bin->dynstr_size) { char name [16]; strncpy (name, &bin->dynstr[aux->vna_name], sizeof (name)-1); name[sizeof (name)-1] = 0; sdb_set (sdb_vernaux, "name", name, 0); } sdb_set (sdb_vernaux, "flags", get_ver_flags (aux->vna_flags), 0); sdb_num_set (sdb_vernaux, "version", aux->vna_other, 0); isum += aux->vna_next; vstart += aux->vna_next; snprintf (key, sizeof (key), "vernaux%d", j); sdb_ns_set (sdb_version, key, sdb_vernaux); #else char *key = r_str_newf ("vernaux%d", j); char *val = r_str_newf ("%d,%s", isum, get_ver_flags (aux->vna_flags)); sdb_set (sdb_version, key, val, 0); free (key); free (val); #endif } if ((int)entry->vn_next < 0) { R_LOG_DEBUG ("Invalid vn_next at 0x%08" PFMT64x, (ut64)shdr->sh_offset); break; } i += entry->vn_next; snprintf (key, sizeof (key), "version%d", cnt ); sdb_ns_set (sdb, key, sdb_version); //if entry->vn_next is 0 it iterate infinitely if (!entry->vn_next) { break; } } free (need); return sdb; beach: free (need); sdb_free (sdb_vernaux); sdb_free (sdb_version); sdb_free (sdb); return NULL; } static Sdb *store_versioninfo(ELFOBJ *bin) { Sdb *sdb_versioninfo = NULL; int num_verdef = 0; int num_verneed = 0; int num_versym = 0; size_t i; if (!bin || !bin->shdr) { return NULL; } if (!(sdb_versioninfo = sdb_new0 ())) { return NULL; } for (i = 0; i < bin->ehdr.e_shnum; i++) { Sdb *sdb = NULL; char key[32] = {0}; int size = bin->shdr[i].sh_size; if (size - (i * sizeof (Elf_(Shdr)) > bin->size)) { size = bin->size - (i*sizeof (Elf_(Shdr))); } int left = size - (i * sizeof (Elf_(Shdr))); left = R_MIN (left, bin->shdr[i].sh_size); if (left < 0) { break; } switch (bin->shdr[i].sh_type) { case SHT_GNU_verdef: sdb = store_versioninfo_gnu_verdef (bin, &bin->shdr[i], left); snprintf (key, sizeof (key), "verdef%d", num_verdef++); sdb_ns_set (sdb_versioninfo, key, sdb); break; case SHT_GNU_verneed: sdb = store_versioninfo_gnu_verneed (bin, &bin->shdr[i], left); snprintf (key, sizeof (key), "verneed%d", num_verneed++); sdb_ns_set (sdb_versioninfo, key, sdb); break; case SHT_GNU_versym: sdb = store_versioninfo_gnu_versym (bin, &bin->shdr[i], left); snprintf (key, sizeof (key), "versym%d", num_versym++); sdb_ns_set (sdb_versioninfo, key, sdb); break; } } return sdb_versioninfo; } static bool init_dynstr(ELFOBJ *bin) { int i, r; const char *section_name = NULL; if (!bin || !bin->shdr) { return false; } if (!bin->shstrtab) { return false; } for (i = 0; i < bin->ehdr.e_shnum; i++) { if (bin->shdr[i].sh_name > bin->shstrtab_size) { return false; } section_name = &bin->shstrtab[bin->shdr[i].sh_name]; if (bin->shdr[i].sh_type == SHT_STRTAB && !strcmp (section_name, ".dynstr")) { if (!(bin->dynstr = (char*) calloc (bin->shdr[i].sh_size + 1, sizeof (char)))) { R_LOG_ERROR ("Cannot allocate memory for dynamic strings\n"); return false; } if (bin->shdr[i].sh_offset > bin->size) { return false; } if (bin->shdr[i].sh_offset + bin->shdr[i].sh_size > bin->size) { return false; } if (bin->shdr[i].sh_offset + bin->shdr[i].sh_size < bin->shdr[i].sh_size) { return false; } r = r_buf_read_at (bin->b, bin->shdr[i].sh_offset, (ut8*)bin->dynstr, bin->shdr[i].sh_size); if (r < 1) { R_FREE (bin->dynstr); bin->dynstr_size = 0; return false; } bin->dynstr_size = bin->shdr[i].sh_size; return true; } } return false; } static HtUP *rel_cache_new(RBinElfReloc *relocs, ut32 reloc_num) { if (!relocs || reloc_num == 0) { return NULL; } const int htsize = R_MIN (reloc_num, 1024); HtUP *rel_cache = ht_up_new_size (htsize, NULL, NULL, NULL); if (rel_cache) { size_t i; for (i = 0; i < reloc_num; i++) { RBinElfReloc *tmp = relocs + i; ht_up_insert (rel_cache, tmp->sym, tmp); } } return rel_cache; } static bool elf_init(ELFOBJ *bin) { /* bin is not an ELF */ if (!init_ehdr (bin)) { return false; } if (!init_phdr (bin) && !is_bin_etrel (bin)) { R_LOG_DEBUG ("Cannot initialize program headers"); } if (bin->ehdr.e_type != ET_CORE) { if (!init_shdr (bin)) { R_LOG_DEBUG ("Cannot initialize section headers"); } if (!init_strtab (bin)) { R_LOG_DEBUG ("Cannot initialize strings table"); } if (!init_dynstr (bin) && !is_bin_etrel (bin)) { R_LOG_DEBUG ("Cannot initialize dynamic strings"); } bin->baddr = Elf_(r_bin_elf_get_baddr) (bin); if (!init_dynamic_section (bin) && !Elf_(r_bin_elf_is_static) (bin) && !is_bin_etrel (bin)) { R_LOG_DEBUG ("Cannot initialize dynamic section"); } } bin->imports_by_ord_size = 0; bin->imports_by_ord = NULL; bin->symbols_by_ord_size = 0; bin->symbols_by_ord = NULL; bin->g_sections = Elf_(r_bin_elf_get_sections) (bin); bin->boffset = Elf_(r_bin_elf_get_boffset) (bin); bin->g_relocs = Elf_(r_bin_elf_get_relocs) (bin); bin->rel_cache = rel_cache_new (bin->g_relocs, bin->g_reloc_num); sdb_ns_set (bin->kv, "versioninfo", store_versioninfo (bin)); return true; } ut64 Elf_(r_bin_elf_get_section_offset)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); return section? section->offset: UT64_MAX; } ut64 Elf_(r_bin_elf_get_section_addr)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); return section? section->rva: UT64_MAX; } ut64 Elf_(r_bin_elf_get_section_addr_end)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); return section? section->rva + section->size: UT64_MAX; } static ut64 get_got_entry(ELFOBJ *bin, RBinElfReloc *rel) { if (!rel->rva) { return UT64_MAX; } ut64 p_sym_got_addr = Elf_(r_bin_elf_v2p_new) (bin, rel->rva); ut64 addr = R_BIN_ELF_BREADWORD (bin->b, p_sym_got_addr); return (!addr || addr == R_BIN_ELF_WORD_MAX) ? UT64_MAX : addr; } static bool is_thumb_symbol(ut64 plt_addr) { return plt_addr & 1; } static ut64 get_import_addr_arm(ELFOBJ *bin, RBinElfReloc *rel) { ut64 got_addr = bin->dyn_info.dt_pltgot; if (got_addr == R_BIN_ELF_ADDR_MAX) { return UT64_MAX; } ut64 plt_addr = get_got_entry (bin, rel); if (plt_addr == UT64_MAX) { return UT64_MAX; } ut64 pos = COMPUTE_PLTGOT_POSITION (rel, got_addr, 0x3); switch (rel->type) { case R_ARM_JUMP_SLOT: plt_addr += pos * 12 + 20; if (is_thumb_symbol (plt_addr)) { plt_addr--; } return plt_addr; case R_AARCH64_RELATIVE: R_LOG_WARN ("Unsupported relocation type for imports %d", rel->type); return UT64_MAX; case R_AARCH64_IRELATIVE: if (rel->addend > plt_addr) { // start return (plt_addr + pos * 16 + 32) + rel->addend; } // same as fallback to JUMP_SLOT return plt_addr + pos * 16 + 32; case R_AARCH64_JUMP_SLOT: return plt_addr + pos * 16 + 32; default: R_LOG_WARN ("Unsupported relocation type for imports %d", rel->type); return UT64_MAX; } return UT64_MAX; } static ut64 get_import_addr_mips(ELFOBJ *bin, RBinElfReloc *rel) { ut64 jmprel_addr = bin->dyn_info.dt_jmprel; ut64 got_addr = bin->dyn_info.dt_mips_pltgot; if (jmprel_addr == R_BIN_ELF_ADDR_MAX || got_addr == R_BIN_ELF_ADDR_MAX) { return UT64_MAX; } ut64 pos = COMPUTE_PLTGOT_POSITION(rel, got_addr, 0x2); ut8 buf[1024]; ut64 plt_addr = jmprel_addr + bin->dyn_info.dt_pltrelsz; ut64 p_plt_addr = Elf_(r_bin_elf_v2p_new) (bin, plt_addr); int res = r_buf_read_at (bin->b, p_plt_addr, buf, sizeof (buf)); if (res != sizeof (buf)) { return UT64_MAX; } const ut8 *base = r_mem_mem_aligned (buf, sizeof (buf), (const ut8 *)"\x3c\x0f\x00", 3, 4); plt_addr += base? (int)(size_t) (base - buf): MIPS_PLT_OFFSET + 8; // HARDCODED HACK plt_addr += pos * 16; return plt_addr; } static size_t get_size_rel_mode(Elf_(Xword) mode) { return mode == DT_RELA? sizeof (Elf_(Rela)): sizeof (Elf_(Rel)); } static ut64 get_num_relocs_dynamic_plt(ELFOBJ *bin) { if (bin->dyn_info.dt_pltrelsz) { const ut64 size = bin->dyn_info.dt_pltrelsz; const ut64 relsize = get_size_rel_mode (bin->dyn_info.dt_pltrel); return size / relsize; } return 0; } static ut64 get_import_addr_riscv(ELFOBJ *bin, RBinElfReloc *rel) { ut64 got_addr = bin->dyn_info.dt_pltgot; if (got_addr == R_BIN_ELF_ADDR_MAX) { return UT64_MAX; } ut64 plt_addr = get_got_entry (bin, rel); if (plt_addr == UT64_MAX) { return UT64_MAX; } ut64 pos = COMPUTE_PLTGOT_POSITION(rel, got_addr, 0x2); return plt_addr + RISCV_PLT_OFFSET + pos * RISCV_PLT_ENTRY_SIZE; } static ut64 get_import_addr_loongarch(ELFOBJ *bin, RBinElfReloc *rel) { ut64 got_addr = bin->dyn_info.dt_pltgot; if (got_addr == R_BIN_ELF_ADDR_MAX) { return UT64_MAX; } ut64 plt_addr = get_got_entry (bin, rel); if (plt_addr == UT64_MAX) { return UT64_MAX; } ut64 pos = COMPUTE_PLTGOT_POSITION(rel, got_addr, 0x2); return plt_addr + LOONGARCH_PLT_OFFSET + pos * LOONGARCH_PLT_ENTRY_SIZE; } static ut64 get_import_addr_sparc(ELFOBJ *bin, RBinElfReloc *rel) { if (rel->type != R_SPARC_JMP_SLOT) { R_LOG_DEBUG ("Unknown sparc reloc type %d", rel->type); return UT64_MAX; } ut64 tmp = get_got_entry (bin, rel); return (tmp == UT64_MAX) ? UT64_MAX : tmp + SPARC_OFFSET_PLT_ENTRY_FROM_GOT_ADDR; } static ut64 get_import_addr_ppc(ELFOBJ *bin, RBinElfReloc *rel) { ut64 plt_addr = bin->dyn_info.dt_pltgot; if (plt_addr == R_BIN_ELF_ADDR_MAX) { return UT64_MAX; } ut64 p_plt_addr = Elf_(r_bin_elf_v2p_new) (bin, plt_addr); if (p_plt_addr == UT64_MAX) { return UT64_MAX; } ut64 base = r_buf_read_ble32_at (bin->b, p_plt_addr, bin->endian); if (base == UT32_MAX) { return UT64_MAX; } ut64 nrel = get_num_relocs_dynamic_plt (bin); ut64 pos = COMPUTE_PLTGOT_POSITION(rel, plt_addr, 0x0); if (bin->endian) { base -= (nrel * 16); base += (pos * 16); return base; } base -= (nrel * 12) + 20; base += (pos * 8); return base; } static ut64 get_import_addr_x86_manual(ELFOBJ *bin, RBinElfReloc *rel) { ut64 got_addr = bin->dyn_info.dt_pltgot; if (got_addr == R_BIN_ELF_ADDR_MAX) { return UT64_MAX; } ut64 got_offset = Elf_(r_bin_elf_v2p_new) (bin, got_addr); if (got_offset == UT64_MAX) { return UT64_MAX; } //XXX HACK ALERT!!!! full relro?? try to fix it //will there always be .plt.got, what would happen if is .got.plt? RBinElfSection *s = get_section_by_name (bin, ".plt.got"); if (Elf_(r_bin_elf_has_relro) (bin) < R_ELF_PART_RELRO || !s) { return UT64_MAX; } ut8 buf[sizeof (Elf_(Addr))] = {0}; ut64 plt_addr = s->offset; ut64 plt_sym_addr; while (plt_addr + 2 + 4 < s->offset + s->size) { /*we try to locate the plt entry that correspond with the relocation since got does not point back to .plt. In this case it has the following form ff253a152000 JMP QWORD [RIP + 0x20153A] 6690 NOP ---- ff25ec9f0408 JMP DWORD [reloc.puts_236] plt_addr + 2 to remove jmp opcode and get the imm reading 4 and if RIP (plt_addr + 6) + imm == rel->offset return plt_addr, that will be our sym addr perhaps this hack doesn't work on 32 bits */ int res = r_buf_read_at (bin->b, plt_addr + 2, buf, sizeof (ut32)); if (res < 0) { return UT64_MAX; } size_t i = 0; plt_sym_addr = R_BIN_ELF_READWORD (buf, i); //relative address if ((plt_addr + 6 + Elf_(r_bin_elf_v2p) (bin, plt_sym_addr)) == rel->rva) { return plt_addr; } if (plt_sym_addr == rel->rva) { return plt_addr; } plt_addr += 8; } return UT64_MAX; } static ut64 get_import_addr_x86(ELFOBJ *bin, RBinElfReloc *rel) { ut64 tmp = get_got_entry (bin, rel); if (tmp == UT64_MAX) { return get_import_addr_x86_manual (bin, rel); } RBinElfSection *pltsec_section = get_section_by_name (bin, ".plt.sec"); if (pltsec_section) { ut64 got_addr = bin->dyn_info.dt_pltgot; ut64 pos = COMPUTE_PLTGOT_POSITION (rel, got_addr, 0x3); return pltsec_section->rva + pos * X86_PLT_ENTRY_SIZE; } return tmp + X86_OFFSET_PLT_ENTRY_FROM_GOT_ADDR; } static ut64 get_import_addr(ELFOBJ *bin, int sym) { if ((!bin->shdr || !bin->strtab) && !bin->phdr) { return UT64_MAX; } if (!bin->rel_cache) { return UT64_MAX; } // lookup the right rel/rela entry RBinElfReloc *rel = ht_up_find (bin->rel_cache, sym, NULL); if (!rel) { return UT64_MAX; } switch (bin->ehdr.e_machine) { case EM_ARM: case EM_AARCH64: return get_import_addr_arm (bin, rel); case EM_MIPS: // MIPS32 BIG ENDIAN relocs return get_import_addr_mips (bin, rel); case EM_VAX: // as beautiful as riscv <3 return get_import_addr_riscv (bin, rel); case EM_RISCV: return get_import_addr_riscv (bin, rel); case EM_SPARC: case EM_SPARCV9: case EM_SPARC32PLUS: return get_import_addr_sparc (bin, rel); case EM_PPC: case EM_PPC64: return get_import_addr_ppc (bin, rel); case EM_386: case EM_X86_64: return get_import_addr_x86 (bin, rel); case EM_LOONGARCH: return get_import_addr_loongarch(bin, rel); default: eprintf ("Unsupported relocs type %" PFMT64u " for arch %d\n", (ut64) rel->type, bin->ehdr.e_machine); return UT64_MAX; } } int Elf_(r_bin_elf_has_nx)(ELFOBJ *bin) { r_return_val_if_fail (bin, 0); int i; if (bin && bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_GNU_STACK) { return (!(bin->phdr[i].p_flags & 1))? 1: 0; } } } return 0; } int Elf_(r_bin_elf_has_relro)(ELFOBJ *bin) { r_return_val_if_fail (bin, R_ELF_NO_RELRO); bool haveBindNow = false; bool haveGnuRelro = false; if (bin->dyn_info.dt_bind_now) { haveBindNow = true; } else if (bin->dyn_info.dt_flags != R_BIN_ELF_XWORD_MAX && bin->dyn_info.dt_flags != R_BIN_ELF_XWORD_MAX) { haveBindNow = bin->dyn_info.dt_flags_1 & DF_1_NOW; } if (bin->phdr) { size_t i; for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_GNU_RELRO) { haveGnuRelro = true; break; } } } if (haveGnuRelro) { if (haveBindNow) { return R_ELF_FULL_RELRO; } return R_ELF_PART_RELRO; } return R_ELF_NO_RELRO; } /* To compute the base address, one determines the memory address associated with the lowest p_vaddr value for a PT_LOAD segment. One then obtains the base address by truncating the memory address to the nearest multiple of the maximum page size */ ut64 Elf_(r_bin_elf_get_baddr)(ELFOBJ *bin) { ut64 tmp, base = UT64_MAX; if (!bin) { return 0; } if (bin->phdr) { size_t i; for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_LOAD) { tmp = (ut64)bin->phdr[i].p_vaddr & ELF_PAGE_MASK; tmp = tmp - (tmp % (1 << ELF_PAGE_SIZE)); if (tmp < base) { base = tmp; } } } } if (base == UT64_MAX && is_bin_etrel (bin)) { //we return our own base address for ET_REL type //we act as a loader for ELF return 0x08000000; } return base == UT64_MAX? 0: base; } ut64 Elf_(r_bin_elf_get_boffset)(ELFOBJ *bin) { ut64 tmp, base = UT64_MAX; r_return_val_if_fail (bin, 0); if (!bin->phdr) { return 0; // TODO: should return ut64.max } size_t i; for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_LOAD) { tmp = (ut64)bin->phdr[i].p_offset & ELF_PAGE_MASK; tmp = tmp - (tmp % (1 << ELF_PAGE_SIZE)); if (tmp < base) { base = tmp; } } } return base == UT64_MAX? 0: base; } ut64 Elf_(r_bin_elf_get_init_offset)(ELFOBJ *bin) { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); ut8 buf[128]; if (!bin || entry == UT64_MAX) { return UT64_MAX; } if (r_buf_read_at (bin->b, entry + 16, buf, sizeof (buf)) < 1) { R_LOG_ERROR ("read (init_offset)"); return 0; } if (buf[0] == 0x68) { // push // x86 only ut64 addr; memmove (buf, buf + 1, 4); addr = (ut64)r_read_le32 (buf); return Elf_(r_bin_elf_v2p) (bin, addr); } return 0; } ut64 Elf_(r_bin_elf_get_fini_offset)(ELFOBJ *bin) { r_return_val_if_fail (bin, UT64_MAX); ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); if (entry == UT64_MAX) { return UT64_MAX; } ut8 buf[512]; if (r_buf_read_at (bin->b, entry + 11, buf, sizeof (buf)) == -1) { R_LOG_ERROR ("read (get_fini)"); return 0; } if (*buf == 0x68) { // push // x86/32 only memmove (buf, buf + 1, 4); ut64 addr = (ut64)r_read_le32 (buf); return Elf_(r_bin_elf_v2p) (bin, addr); } return 0; } ut64 Elf_(r_bin_elf_get_entry_offset)(ELFOBJ *bin) { r_return_val_if_fail (bin, UT64_MAX); ut64 entry = bin->ehdr.e_entry; if (!entry) { if (!Elf_(r_bin_elf_is_executable) (bin)) { return UT64_MAX; } entry = Elf_(r_bin_elf_get_section_offset)(bin, ".init.text"); if (entry != UT64_MAX) { return entry; } entry = Elf_(r_bin_elf_get_section_offset)(bin, ".text"); if (entry != UT64_MAX) { return entry; } return Elf_(r_bin_elf_get_section_offset)(bin, ".init"); } return Elf_(r_bin_elf_v2p) (bin, entry); } static ut64 getmainsymbol(ELFOBJ *bin) { struct r_bin_elf_symbol_t *symbol = Elf_(r_bin_elf_get_symbols) (bin); if (symbol) { size_t i; for (i = 0; !symbol[i].last; i++) { if (!strcmp (symbol[i].name, "main")) { return symbol[i].offset; } } } return UT64_MAX; } ut64 Elf_(r_bin_elf_get_main_offset)(ELFOBJ *bin) { r_return_val_if_fail (bin, UT64_MAX); ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); if (entry == UT64_MAX) { return UT64_MAX; } ut8 buf[256]; if (entry > bin->size || (entry + sizeof (buf)) > bin->size) { return UT64_MAX; } // unnecessary to read 512 bytes imho if (r_buf_read_at (bin->b, entry, buf, sizeof (buf)) < 1) { R_LOG_ERROR ("read (main)"); return UT64_MAX; } // ARM64 if (buf[0x18 + 3] == 0x58 && buf[0x2f] == 0x00) { ut32 entry_vaddr = Elf_(r_bin_elf_p2v) (bin, entry); ut32 main_addr = r_read_le32 (&buf[0x30]); if ((main_addr >> 16) == (entry_vaddr >> 16)) { return Elf_(r_bin_elf_v2p) (bin, main_addr); } } // TODO: Use arch to identify arch before memcmp's // ARM Glibc if (entry & 1) { int delta = 0; /* thumb entry points */ if (!memcmp (buf, "\xf0\x00\x0b\x4f\xf0\x00\x0e\x02\xbc\x6a\x46", 11)) { /* newer versions of gcc use push/pop */ delta = 0x28; } else if (!memcmp (buf, "\xf0\x00\x0b\x4f\xf0\x00\x0e\x5d\xf8\x04\x1b", 11)) { /* older versions of gcc (4.5.x) use ldr/str */ delta = 0x30; } if (delta) { ut64 pa = Elf_(r_bin_elf_v2p) (bin, r_read_le32 (&buf[delta-1]) & ~1); if (pa < r_buf_size (bin->b)) { return pa; } } } else { /* non-thumb entry points */ if (!memcmp (buf, "\x00\xb0\xa0\xe3\x00\xe0\xa0\xe3", 8)) { return Elf_(r_bin_elf_v2p) (bin, r_read_le32 (&buf[0x34]) & ~1); } if (!memcmp (buf, "\x24\xc0\x9f\xe5\x00\xb0\xa0\xe3", 8)) { return Elf_(r_bin_elf_v2p) (bin, r_read_le32 (&buf[0x30]) & ~1); } } // MIPS /* get .got, calculate offset of main symbol */ if (!memcmp (buf, "\x21\x00\xe0\x03\x01\x00\x11\x04", 8)) { /* assuming the startup code looks like got = gp-0x7ff0 got[index__libc_start_main] ( got[index_main] ); looking for the instruction generating the first argument to find main lw a0, offset(gp) */ ut64 got_offset; if ((got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got")) != -1 || (got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got.plt")) != -1) { const ut64 gp = got_offset + 0x7ff0; size_t i, len = sizeof (buf) / sizeof (buf[0]); for (i = 0; i < len; i += 4) { const ut32 instr = r_read_le32 (&buf[i]); if ((instr & 0xffff0000) == 0x8f840000) { // lw a0, offset(gp) const short delta = instr & 0x0000ffff; r_buf_read_at (bin->b, /* got_entry_offset = */ gp + delta, buf, 4); return Elf_(r_bin_elf_v2p) (bin, r_read_le32 (&buf[0])); } } } return 0; } // X86-CGC if (buf[0] == 0xe8 && !memcmp (buf + 5, "\x50\xe8\x00\x00\x00\x00\xb8\x01\x00\x00\x00\x53", 12)) { size_t SIZEOF_CALL = 5; ut64 rel_addr = (ut64)((int)(buf[1] + (buf[2] << 8) + (buf[3] << 16) + (buf[4] << 24))); ut64 addr = Elf_(r_bin_elf_p2v)(bin, entry + SIZEOF_CALL); addr += rel_addr; return Elf_(r_bin_elf_v2p) (bin, addr); } // X86-PIE if (buf[0x00] == 0x48 && buf[0x1e] == 0x8d && buf[0x11] == 0xe8) { ut32 *pmain = (ut32*)(buf + 0x30); ut64 vmain = Elf_(r_bin_elf_p2v) (bin, (ut64)*pmain); ut64 ventry = Elf_(r_bin_elf_p2v) (bin, entry); if (vmain >> 16 == ventry >> 16) { return (ut64)vmain; } } // X86-PIE if (buf[0x1d] == 0x48 && buf[0x1e] == 0x8b) { if (!memcmp (buf, "\x31\xed\x49\x89", 4)) {// linux ut64 maddr, baddr; ut8 n32s[sizeof (ut32)] = {0}; maddr = entry + 0x24 + r_read_le32 (buf + 0x20); if (r_buf_read_at (bin->b, maddr, n32s, sizeof (ut32)) == -1) { R_LOG_ERROR ("read (maddr) 2"); return 0; } maddr = (ut64)r_read_le32 (&n32s[0]); baddr = (bin->ehdr.e_entry >> 16) << 16; if (bin->phdr) { baddr = Elf_(r_bin_elf_get_baddr) (bin); } maddr += baddr; return maddr; } } // X86-NONPIE #if R_BIN_ELF64 if (!memcmp (buf, "\x49\x89\xd9", 3) && buf[156] == 0xe8) { // openbsd return r_read_le32 (&buf[157]) + entry + 156 + 5; } if (!memcmp (buf+29, "\x48\xc7\xc7", 3)) { // linux ut64 addr = (ut64)r_read_le32 (&buf[29 + 3]); return Elf_(r_bin_elf_v2p) (bin, addr); } #else if (buf[23] == '\x68') { ut64 addr = (ut64)r_read_le32 (&buf[23 + 1]); return Elf_(r_bin_elf_v2p) (bin, addr); } #endif /* linux64 pie main -- probably buggy in some cases */ int bo = 29; // Begin offset may vary depending on the entry prelude // endbr64 - fedora bins have this if (buf[0] == 0xf3 && buf[1] == 0x0f && buf[2] == 0x1e && buf[3] == 0xfa) { // Change begin offset if binary starts with 'endbr64' bo = 33; } if (buf[bo] == 0x48) { ut8 ch = buf[bo + 1]; if (ch == 0x8d) { // lea rdi, qword [rip-0x21c4] ut8 *p = buf + bo + 3; st32 maindelta = (st32)r_read_le32 (p); ut64 vmain = (ut64)(entry + bo + maindelta) + 7; ut64 ventry = Elf_(r_bin_elf_p2v) (bin, entry); if ((vmain >> 16) == (ventry >> 16)) { return (ut64)vmain; } } else if (ch == 0xc7) { // mov rdi, 0xADDR ut8 *p = buf + bo + 3; return (ut64)(ut32)r_read_le32 (p); } } /* find sym.main if possible */ { ut64 m = getmainsymbol (bin); if (m != UT64_MAX) { return m; } } return UT64_MAX; } bool Elf_(r_bin_elf_get_stripped)(ELFOBJ *bin) { if (!bin->shdr) { return false; } if (bin->g_sections) { size_t i; for (i = 0; !bin->g_sections[i].last; i++) { if (!strcmp (bin->g_sections[i].name, ".gnu_debugdata")) { return false; } } } size_t i; for (i = 0; i < bin->ehdr.e_shnum; i++) { if (bin->shdr[i].sh_type == SHT_SYMTAB) { return false; } } return true; } char *Elf_(r_bin_elf_intrp)(ELFOBJ *bin) { int i; if (!bin || !bin->phdr) { return NULL; } for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_INTERP) { ut64 addr = bin->phdr[i].p_offset; int sz = bin->phdr[i].p_filesz; sdb_num_set (bin->kv, "elf_header.intrp_addr", addr, 0); sdb_num_set (bin->kv, "elf_header.intrp_size", sz, 0); if (sz < 1 || sz > r_buf_size (bin->b)) { return NULL; } char *str = malloc (sz + 1); if (!str) { return NULL; } if (r_buf_read_at (bin->b, addr, (ut8*)str, sz) < 1) { R_LOG_ERROR ("read (main)"); free (str); return 0; } str[sz] = 0; sdb_set (bin->kv, "elf_header.intrp", str, 0); return str; } } return NULL; } bool Elf_(r_bin_elf_is_static)(ELFOBJ *bin) { size_t i; if (!bin->phdr) { return false; } for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_INTERP || bin->phdr[i].p_type == PT_DYNAMIC) { return false; } } return true; } char* Elf_(r_bin_elf_get_data_encoding)(ELFOBJ *bin) { switch (bin->ehdr.e_ident[EI_DATA]) { case ELFDATANONE: return strdup ("none"); case ELFDATA2LSB: return strdup ("2's complement, little endian"); case ELFDATA2MSB: return strdup ("2's complement, big endian"); default: return r_str_newf ("<unknown: %x>", bin->ehdr.e_ident[EI_DATA]); } } int Elf_(r_bin_elf_has_va)(ELFOBJ *bin) { return true; } char* Elf_(r_bin_elf_get_arch)(ELFOBJ *bin) { switch (bin->ehdr.e_machine) { case EM_ARC: case EM_ARC_A5: return strdup ("arc"); case EM_AVR: return strdup ("avr"); case EM_BA2_NON_STANDARD: case EM_BA2: return strdup ("ba2"); case EM_CRIS: return strdup ("cris"); case EM_68K: return strdup ("m68k"); case EM_MIPS: case EM_MIPS_RS3_LE: case EM_MIPS_X: return strdup ("mips"); case EM_MCST_ELBRUS: return strdup ("elbrus"); case EM_TRICORE: return strdup ("tricore"); case EM_RCE: return strdup ("mcore"); case EM_ARM: case EM_AARCH64: return strdup ("arm"); case EM_QDSP6: // EM_HEXAGON return strdup ("hexagon"); case EM_BLACKFIN: return strdup ("blackfin"); case EM_SPARC: case EM_SPARC32PLUS: case EM_SPARCV9: return strdup ("sparc"); case EM_PPC: case EM_PPC64: return strdup ("ppc"); case EM_PARISC: return strdup ("hppa"); case EM_PROPELLER: return strdup ("propeller"); case EM_MICROBLAZE: return strdup ("microblaze.gnu"); case EM_RISCV: return strdup ("riscv"); case EM_VAX: return strdup ("vax"); case EM_XTENSA: return strdup ("xtensa"); case EM_LANAI: return strdup ("lanai"); case EM_VIDEOCORE3: case EM_VIDEOCORE4: return strdup ("vc4"); case EM_MSP430: return strdup ("msp430"); case EM_SH: return strdup ("sh"); case EM_V800: return strdup ("v850"); case EM_V850: return strdup ("v850"); case EM_IA_64: return strdup ("ia64"); case EM_S390: return strdup ("s390"); case EM_KVX: return strdup("kvx"); case EM_LOONGARCH: return strdup ("loongarch"); default: return strdup ("x86"); } } char* Elf_(r_bin_elf_get_abi)(ELFOBJ *bin) { Elf_(Ehdr)* ehdr = (Elf_(Ehdr) *) &bin->ehdr; if (ehdr->e_machine == EM_MIPS) { if (is_elfclass64 (ehdr)) { return strdup ("n64"); } if (is_mips_n32 (ehdr)) { return strdup ("n32"); } if (is_mips_o32 (ehdr)) { return strdup ("o32"); } } return NULL; } char* Elf_(r_bin_elf_get_cpu)(ELFOBJ *bin) { if (bin->phdr && bin->ehdr.e_machine == EM_MIPS) { const ut32 mipsType = bin->ehdr.e_flags & EF_MIPS_ARCH; switch (mipsType) { case EF_MIPS_ARCH_1: return strdup ("mips1"); case EF_MIPS_ARCH_2: return strdup ("mips2"); case EF_MIPS_ARCH_3: return strdup ("mips3"); case EF_MIPS_ARCH_4: return strdup ("mips4"); case EF_MIPS_ARCH_5: return strdup ("mips5"); case EF_MIPS_ARCH_32: return strdup ("mips32"); case EF_MIPS_ARCH_64: return strdup ("mips64"); case EF_MIPS_ARCH_32R2: return strdup ("mips32r2"); case EF_MIPS_ARCH_64R2: return strdup ("mips64r2"); default : return strdup (" Unknown mips ISA"); } } return NULL; } char* Elf_(r_bin_elf_get_head_flag)(ELFOBJ *bin) { char *head_flag = NULL; char *str = Elf_(r_bin_elf_get_cpu) (bin); if (str) { head_flag = r_str_append (head_flag, str); free (str); } str = Elf_(r_bin_elf_get_abi) (bin); if (str) { head_flag = r_str_appendf (head_flag, " %s", str); free (str); } if (R_STR_ISEMPTY (head_flag)) { head_flag = r_str_append (head_flag, "unknown_flag"); } return head_flag; } // http://www.sco.com/developers/gabi/latest/ch4.eheader.html char* Elf_(r_bin_elf_get_machine_name)(ELFOBJ *bin) { switch (bin->ehdr.e_machine) { case EM_NONE: return strdup ("No machine"); case EM_M32: return strdup ("AT&T WE 32100"); case EM_SPARC: return strdup ("SUN SPARC"); case EM_386: return strdup ("Intel 80386"); case EM_68K: return strdup ("Motorola m68k family"); case EM_88K: return strdup ("Motorola m88k family"); case EM_860: return strdup ("Intel 80860"); case EM_MIPS: return strdup ("MIPS R3000"); case EM_S370: return strdup ("IBM System/370"); case EM_MIPS_RS3_LE: return strdup ("MIPS R3000 little-endian"); case EM_PARISC: return strdup ("HPPA"); case EM_VPP500: return strdup ("Fujitsu VPP500"); case EM_SPARC32PLUS: return strdup ("Sun's \"v8plus\""); case EM_960: return strdup ("Intel 80960"); case EM_PPC: return strdup ("PowerPC"); case EM_PPC64: return strdup ("PowerPC 64-bit"); case EM_S390: return strdup ("IBM S390"); case EM_V800: return strdup ("NEC V800 series"); case EM_FR20: return strdup ("Fujitsu FR20"); case EM_RH32: return strdup ("TRW RH-32"); case EM_RCE: return strdup ("Motorola RCE"); case EM_ARM: return strdup ("ARM"); case EM_BLACKFIN: return strdup ("Analog Devices Blackfin"); case EM_FAKE_ALPHA: return strdup ("Digital Alpha"); case EM_SH: return strdup ("Hitachi SH"); case EM_SPARCV9: return strdup ("SPARC v9 64-bit"); case EM_TRICORE: return strdup ("Siemens Tricore"); case EM_ARC: return strdup ("Argonaut RISC Core"); case EM_H8_300: return strdup ("Hitachi H8/300"); case EM_H8_300H: return strdup ("Hitachi H8/300H"); case EM_H8S: return strdup ("Hitachi H8S"); case EM_H8_500: return strdup ("Hitachi H8/500"); case EM_IA_64: return strdup ("Intel Merced"); case EM_MIPS_X: return strdup ("Stanford MIPS-X"); case EM_COLDFIRE: return strdup ("Motorola Coldfire"); case EM_68HC12: return strdup ("Motorola M68HC12"); case EM_MMA: return strdup ("Fujitsu MMA Multimedia Accelerator"); case EM_PCP: return strdup ("Siemens PCP"); case EM_NCPU: return strdup ("Sony nCPU embeeded RISC"); case EM_NDR1: return strdup ("Denso NDR1 microprocessor"); case EM_STARCORE: return strdup ("Motorola Start*Core processor"); case EM_ME16: return strdup ("Toyota ME16 processor"); case EM_ST100: return strdup ("STMicroelectronic ST100 processor"); case EM_TINYJ: return strdup ("Advanced Logic Corp. Tinyj emb.fam"); case EM_X86_64: return strdup ("AMD x86-64 architecture"); case EM_LANAI: return strdup ("32bit LANAI architecture"); case EM_PDSP: return strdup ("Sony DSP Processor"); case EM_PDP10: return strdup ("Digital Equipment Corp. PDP-10"); case EM_PDP11: return strdup ("Digital Equipment Corp. PDP-11"); case EM_FX66: return strdup ("Siemens FX66 microcontroller"); case EM_ST9PLUS: return strdup ("STMicroelectronics ST9+ 8/16 mc"); case EM_ST7: return strdup ("STmicroelectronics ST7 8 bit mc"); case EM_68HC16: return strdup ("Motorola MC68HC16 microcontroller"); case EM_68HC11: return strdup ("Motorola MC68HC11 microcontroller"); case EM_68HC08: return strdup ("Motorola MC68HC08 microcontroller"); case EM_68HC05: return strdup ("Motorola MC68HC05 microcontroller"); case EM_SVX: return strdup ("Silicon Graphics SVx"); case EM_ST19: return strdup ("STMicroelectronics ST19 8 bit mc"); case EM_VAX: return strdup ("Digital VAX"); case EM_CRIS: return strdup ("Axis Communications 32-bit embedded processor"); case EM_JAVELIN: return strdup ("Infineon Technologies 32-bit embedded processor"); case EM_FIREPATH: return strdup ("Element 14 64-bit DSP Processor"); case EM_ZSP: return strdup ("LSI Logic 16-bit DSP Processor"); case EM_MMIX: return strdup ("Donald Knuth's educational 64-bit processor"); case EM_HUANY: return strdup ("Harvard University machine-independent object files"); case EM_PRISM: return strdup ("SiTera Prism"); case EM_AVR: return strdup ("Atmel AVR 8-bit microcontroller"); case EM_FR30: return strdup ("Fujitsu FR30"); case EM_D10V: return strdup ("Mitsubishi D10V"); case EM_D30V: return strdup ("Mitsubishi D30V"); case EM_V850: return strdup ("NEC v850"); case EM_M32R: return strdup ("Mitsubishi M32R"); case EM_MN10300: return strdup ("Matsushita MN10300"); case EM_MN10200: return strdup ("Matsushita MN10200"); case EM_PJ: return strdup ("picoJava"); case EM_OPENRISC: return strdup ("OpenRISC 32-bit embedded processor"); case EM_ARC_A5: return strdup ("ARC Cores Tangent-A5"); case EM_XTENSA: return strdup ("Tensilica Xtensa Architecture"); case EM_AARCH64: return strdup ("ARM aarch64"); case EM_PROPELLER: return strdup ("Parallax Propeller"); case EM_MICROBLAZE: return strdup ("Xilinx MicroBlaze"); case EM_RISCV: return strdup ("RISC V"); case EM_VIDEOCORE3: return strdup ("VideoCore III"); case EM_VIDEOCORE4: return strdup ("VideoCore IV"); case EM_LATTICEMICO32: return strdup ("RISC processor for Lattice FPGA architecture"); case EM_SE_C17: return strdup ("Seiko Epson C17 family"); case EM_TI_C6000: return strdup ("The Texas Instruments TMS320C6000 DSP family"); case EM_TI_C2000: return strdup ("The Texas Instruments TMS320C2000 DSP family"); case EM_TI_C5500: return strdup ("The Texas Instruments TMS320C55x DSP family"); case EM_TI_ARP32: return strdup ("Texas Instruments Application Specific RISC Processor, 32bit fetch"); case EM_TI_PRU: return strdup ("Texas Instruments Programmable Realtime Unit"); case EM_MMDSP_PLUS: return strdup ("STMicroelectronics 64bit VLIW Data Signal Processor"); case EM_CYPRESS_M8C: return strdup ("Cypress M8C microprocessor"); case EM_R32C: return strdup ("Renesas R32C series microprocessors"); case EM_TRIMEDIA: return strdup ("NXP Semiconductors TriMedia architecture family"); case EM_QDSP6: return strdup ("QUALCOMM DSP6 Processor"); // Nonstandard case EM_8051: return strdup ("Intel 8051 and variants"); case EM_STXP7X: return strdup ("STMicroelectronics STxP7x family of configurable and extensible RISC processors"); case EM_NDS32: return strdup ("Andes Technology compact code size embedded RISC processor family"); case EM_ECOG1: return strdup ("Cyan Technology eCOG1X family"); // case EM_ECOG1X: return strdup ("Cyan Technology eCOG1X family"); // Nonstandard case EM_MAXQ30: return strdup ("Dallas Semiconductor MAXQ30 Core Micro-controllers"); case EM_XIMO16: return strdup ("New Japan Radio (NJR) 16-bit DSP Processor"); case EM_MANIK: return strdup ("M2000 Reconfigurable RISC Microprocessor"); case EM_CRAYNV2: return strdup ("Cray Inc. NV2 vector architecture"); case EM_RX: return strdup ("Renesas RX family"); case EM_METAG: return strdup ("Imagination Technologies META processor architecture"); case EM_MCST_ELBRUS: return strdup ("MCST Elbrus general purpose hardware architecture"); case EM_ECOG16: return strdup ("Cyan Technology eCOG16 family"); case EM_CR16: return strdup ("National Semiconductor CompactRISC CR16 16-bit microprocessor"); case EM_ETPU: return strdup ("Freescale Extended Time Processing Unit"); case EM_SLE9X: return strdup ("Infineon Technologies SLE9X core"); case EM_L10M: return strdup ("Intel L10M"); case EM_K10M: return strdup ("Intel K10M"); // case EM_AARCH64: return strdup ("ARM 64-bit architecture (AARCH64)"); // Nonstandard case EM_AVR32: return strdup ("Atmel Corporation 32-bit microprocessor family"); case EM_STM8: return strdup ("STMicroeletronics STM8 8-bit microcontroller"); case EM_TILE64: return strdup ("Tilera TILE64 multicore architecture family"); case EM_TILEPRO: return strdup ("Tilera TILEPro multicore architecture family"); // case EM_MICROBLAZE: return strdup ("Xilinx MicroBlaze 32-bit RISC soft processor core"); // Nonstandard case EM_CUDA: return strdup ("NVIDIA CUDA architecture"); case EM_TILEGX: return strdup ("Tilera TILE-Gx multicore architecture family"); case EM_CLOUDSHIELD: return strdup ("CloudShield architecture family"); case EM_COREA_1ST: return strdup ("KIPO-KAIST Core-A 1st generation processor family"); case EM_COREA_2ND: return strdup ("KIPO-KAIST Core-A 2nd generation processor family"); case EM_ARC_COMPACT2: return strdup ("Synopsys ARCompact V2"); case EM_OPEN8: return strdup ("Open8 8-bit RISC soft processor core"); case EM_RL78: return strdup ("Renesas RL78 family"); case EM_VIDEOCORE5: return strdup ("Broadcom VideoCore V processor"); case EM_78KOR: return strdup ("Renesas 78KOR family"); // case EM_56800EX: return strdup ("Freescale 56800EX Digital Signal Controller (DSC)"); // Nonstandard case EM_BA1: return strdup ("Beyond BA1 CPU architecture"); case EM_BA2_NON_STANDARD: case EM_BA2: return strdup ("Beyond BA2 CPU architecture"); case EM_XCORE: return strdup ("XMOS xCORE processor family"); case EM_MCHP_PIC: return strdup ("Microchip 8-bit PIC(r) family"); case EM_INTEL205: return strdup ("Reserved by Intel"); case EM_INTEL206: return strdup ("Reserved by Intel"); case EM_INTEL207: return strdup ("Reserved by Intel"); case EM_INTEL208: return strdup ("Reserved by Intel"); case EM_INTEL209: return strdup ("Reserved by Intel"); case EM_KM32: return strdup ("KM211 KM32 32-bit processor"); case EM_KMX32: return strdup ("KM211 KMX32 32-bit processor"); case EM_KMX16: return strdup ("KM211 KMX16 16-bit processor"); case EM_KMX8: return strdup ("KM211 KMX8 8-bit processor"); case EM_KVARC: return strdup ("KM211 KVARC processor"); case EM_CDP: return strdup ("Paneve CDP architecture family"); case EM_COGE: return strdup ("Cognitive Smart Memory Processor"); case EM_COOL: return strdup ("Bluechip Systems CoolEngine"); case EM_NORC: return strdup ("Nanoradio Optimized RISC"); case EM_CSR_KALIMBA: return strdup ("CSR Kalimba architecture family"); case EM_Z80: return strdup ("Zilog Z80"); case EM_VISIUM: return strdup ("Controls and Data Services VISIUMcore processor"); case EM_FT32: return strdup ("FTDI Chip FT32 high performance 32-bit RISC architecture"); case EM_MOXIE: return strdup ("Moxie processor family"); case EM_AMDGPU: return strdup ("AMD GPU architecture"); case EM_LOONGARCH: return strdup ("Loongson Loongarch"); default: return r_str_newf ("<unknown>: 0x%x", bin->ehdr.e_machine); } } char* Elf_(r_bin_elf_get_file_type)(ELFOBJ *bin) { r_return_val_if_fail (bin, NULL); ut32 e_type = (ut32)bin->ehdr.e_type; // cast to avoid warn in iphone-gcc, must be ut16 switch (e_type) { case ET_NONE: return strdup ("NONE (None)"); case ET_REL: return strdup ("REL (Relocatable file)"); case ET_EXEC: return strdup ("EXEC (Executable file)"); case ET_DYN: return strdup ("DYN (Shared object file)"); case ET_CORE: return strdup ("CORE (Core file)"); } if ((e_type >= ET_LOPROC) && (e_type <= ET_HIPROC)) { return r_str_newf ("Processor Specific: %x", e_type); } if ((e_type >= ET_LOOS) && (e_type <= ET_HIOS)) { return r_str_newf ("OS Specific: %x", e_type); } return r_str_newf ("<unknown>: %x", e_type); } char* Elf_(r_bin_elf_get_elf_class)(ELFOBJ *bin) { switch (bin->ehdr.e_ident[EI_CLASS]) { case ELFCLASSNONE: return strdup ("none"); case ELFCLASS32: return strdup ("ELF32"); case ELFCLASS64: return strdup ("ELF64"); default: return r_str_newf ("<unknown: %x>", bin->ehdr.e_ident[EI_CLASS]); } } int Elf_(r_bin_elf_get_bits)(ELFOBJ *bin) { /* Hack for ARCompact */ if (bin->ehdr.e_machine == EM_ARC_A5) { return 16; } /* Hack for Ps2 */ if (bin->phdr && bin->ehdr.e_machine == EM_MIPS) { const ut32 mipsType = bin->ehdr.e_flags & EF_MIPS_ARCH; if (bin->ehdr.e_type == ET_EXEC) { int i; bool haveInterp = false; for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_INTERP) { haveInterp = true; } } if (!haveInterp && mipsType == EF_MIPS_ARCH_3) { // Playstation2 Hack return 64; } } // TODO: show this specific asm.cpu somewhere in bininfo (mips1, mips2, mips3, mips32r2, ...) switch (mipsType) { case EF_MIPS_ARCH_1: case EF_MIPS_ARCH_2: case EF_MIPS_ARCH_3: case EF_MIPS_ARCH_4: case EF_MIPS_ARCH_5: case EF_MIPS_ARCH_32: return 32; case EF_MIPS_ARCH_64: return 64; case EF_MIPS_ARCH_32R2: return 32; case EF_MIPS_ARCH_64R2: return 64; break; } return 32; } /* Hack for Thumb */ if (bin->ehdr.e_machine == EM_ARM) { if (bin->ehdr.e_type != ET_EXEC) { struct r_bin_elf_symbol_t *symbol; if ((symbol = Elf_(r_bin_elf_get_symbols) (bin))) { int i = 0; for (i = 0; !symbol[i].last; i++) { ut64 paddr = symbol[i].offset; if (paddr & 1) { return 16; } } } } ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); if (entry & 1) { return 16; } } switch (bin->ehdr.e_ident[EI_CLASS]) { case ELFCLASS32: return 32; case ELFCLASS64: return 64; case ELFCLASSNONE: default: return 32; // defaults } } static inline int noodle(ELFOBJ *bin, const char *s) { if (r_buf_size (bin->b) >= 64) { ut8 tmp[64] = {0}; if (r_buf_read_at (bin->b, r_buf_size (bin->b) - 64, tmp, 64) == 64) { return (bool)r_mem_mem (tmp, 64, (const ut8 *)s, strlen (s)); } } return false; } static inline bool needle(ELFOBJ *bin, const char *s) { if (bin->shstrtab) { ut32 len = bin->shstrtab_size; if (len > 4096) { len = 4096; // avoid slow loading .. can be buggy? } return (bool)r_mem_mem ((const ut8*)bin->shstrtab, len, (const ut8*)s, strlen (s)); } return false; } // TODO: must return const char * all those strings must be const char os[LINUX] or so char* Elf_(r_bin_elf_get_osabi_name)(ELFOBJ *bin) { size_t i; size_t num = bin->ehdr.e_shnum; const char *section_name = NULL; switch (bin->ehdr.e_ident[EI_OSABI]) { case ELFOSABI_LINUX: return strdup("linux"); case ELFOSABI_SOLARIS: return strdup("solaris"); case ELFOSABI_FREEBSD: return strdup("freebsd"); case ELFOSABI_HPUX: return strdup("hpux"); } if (bin->shdr && bin->shstrtab) { for (i = 0; i < num; i++) { if (bin->shdr[i].sh_type == SHT_NOTE && bin->shdr[i].sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[bin->shdr[i].sh_name]; if (!strcmp (section_name, ".note.openbsd.ident")) { return strdup ("openbsd"); } if (!strcmp (section_name, ".note.minix.ident")) { return strdup ("minix"); } if (!strcmp (section_name, ".note.netbsd.ident")) { return strdup ("netbsd"); } if (!strcmp (section_name, ".note.android.ident")) { return strdup ("android"); } } } } /* Hack to identify OS */ if (needle (bin, "freebsd")) { return strdup ("freebsd"); } if (noodle (bin, "BEOS:APP_VERSION")) { return strdup ("beos"); } if (needle (bin, "GNU")) { return strdup ("linux"); } return strdup ("linux"); } ut8 *Elf_(r_bin_elf_grab_regstate)(ELFOBJ *bin, int *len) { if (bin->phdr) { size_t i; int num = bin->ehdr.e_phnum; for (i = 0; i < num; i++) { if (bin->phdr[i].p_type != PT_NOTE) { continue; } int bits = Elf_(r_bin_elf_get_bits)(bin); int elf_nhdr_size = (bits == 64) ? sizeof (Elf64_Nhdr) : sizeof (Elf32_Nhdr); void *elf_nhdr = calloc (elf_nhdr_size, 1); bool regs_found = false; ut64 offset = 0; while (!regs_found) { ut32 n_descsz, n_namesz, n_type; int ret; ret = r_buf_read_at (bin->b, bin->phdr[i].p_offset + offset, elf_nhdr, elf_nhdr_size); if (ret != elf_nhdr_size) { R_LOG_DEBUG ("Cannot read NOTES hdr from CORE file"); free (elf_nhdr); return NULL; } if (bits == 64) { n_descsz = round_up (((Elf64_Nhdr *)elf_nhdr)->n_descsz); n_namesz = round_up (((Elf64_Nhdr *)elf_nhdr)->n_namesz); n_type = ((Elf64_Nhdr *)elf_nhdr)->n_type; } else { n_descsz = round_up (((Elf32_Nhdr *)elf_nhdr)->n_descsz); n_namesz = round_up (((Elf32_Nhdr *)elf_nhdr)->n_namesz); n_type = ((Elf32_Nhdr *)elf_nhdr)->n_type; } if (n_type == NT_PRSTATUS) { regs_found = true; free (elf_nhdr); } else { offset += elf_nhdr_size + n_descsz + n_namesz; } } int regdelta = 0; int regsize = 0; switch (bin->ehdr.e_machine) { case EM_AARCH64: regsize = reginf[AARCH64].regsize; regdelta = reginf[AARCH64].regdelta; break; case EM_ARM: regsize = reginf[ARM].regsize; regdelta = reginf[ARM].regdelta; break; case EM_386: regsize = reginf[X86].regsize; regdelta = reginf[X86].regdelta; break; case EM_X86_64: regsize = reginf[X86_64].regsize; regdelta = reginf[X86_64].regdelta; break; } ut8 *buf = malloc (regsize); if (r_buf_read_at (bin->b, bin->phdr[i].p_offset + offset + regdelta, buf, regsize) != regsize) { free (buf); R_LOG_DEBUG ("Cannot read register state from CORE file"); return NULL; } if (len) { *len = regsize; } return buf; } } R_LOG_DEBUG ("Cannot find NOTE section."); return NULL; } int Elf_(r_bin_elf_is_big_endian)(ELFOBJ *bin) { return (bin->ehdr.e_ident[EI_DATA] == ELFDATA2MSB); } /* XXX Init dt_strtab? */ char *Elf_(r_bin_elf_get_rpath)(ELFOBJ *bin) { r_return_val_if_fail (bin, NULL); Elf_(Xword) val; if (!bin->phdr || !bin->strtab) { return NULL; } if (bin->dyn_info.dt_rpath != R_BIN_ELF_XWORD_MAX) { val = bin->dyn_info.dt_rpath; } else if (bin->dyn_info.dt_runpath != R_BIN_ELF_XWORD_MAX) { val = bin->dyn_info.dt_runpath; } else { return NULL; } if (val > bin->strtab_size) { return NULL; } return r_str_ndup (bin->strtab + val, ELF_STRING_LENGTH); } static bool has_valid_section_header(ELFOBJ *bin, size_t pos) { return bin->g_sections[pos].info < bin->ehdr.e_shnum && bin->shdr; } static void fix_rva_and_offset_relocable_file(ELFOBJ *bin, RBinElfReloc *r, size_t pos) { if (has_valid_section_header (bin, pos)) { size_t idx = bin->g_sections[pos].info; if (idx < bin->ehdr.e_shnum) { ut64 pa = bin->shdr[idx].sh_offset + r->offset; r->offset = pa; r->rva = Elf_(r_bin_elf_p2v) (bin, pa); } else { eprintf ("fix_rva_and_offset_reloc..: invalid index\n"); } } else { r->rva = r->offset; } } static void fix_rva_and_offset_exec_file(ELFOBJ *bin, RBinElfReloc *r) { r->rva = r->offset; r->offset = Elf_(r_bin_elf_v2p) (bin, r->offset); } static void fix_rva_and_offset(ELFOBJ *bin, RBinElfReloc *r, size_t pos) { if (is_bin_etrel (bin)) { fix_rva_and_offset_relocable_file (bin, r, pos); } else { fix_rva_and_offset_exec_file (bin, r); } } static bool read_reloc(ELFOBJ *bin, RBinElfReloc *r, Elf_(Xword) rel_mode, ut64 vaddr) { ut64 offset = Elf_(r_bin_elf_v2p_new) (bin, vaddr); if (offset == UT64_MAX) { return false; } size_t size_struct = get_size_rel_mode (rel_mode); ut8 buf[sizeof (Elf_(Rela))] = {0}; int res = r_buf_read_at (bin->b, offset, buf, size_struct); if (res != size_struct) { return false; } size_t i = 0; Elf_(Rela) reloc_info; reloc_info.r_offset = R_BIN_ELF_READWORD (buf, i); reloc_info.r_info = R_BIN_ELF_READWORD (buf, i); if (rel_mode == DT_RELA) { reloc_info.r_addend = R_BIN_ELF_READWORD (buf, i); r->addend = reloc_info.r_addend; } r->mode = rel_mode; r->last = 0; r->offset = reloc_info.r_offset; r->sym = ELF_R_SYM (reloc_info.r_info); r->type = ELF_R_TYPE (reloc_info.r_info); return true; } static size_t get_num_relocs_dynamic(ELFOBJ *bin) { size_t res = 0; if (bin->dyn_info.dt_relaent) { res += bin->dyn_info.dt_relasz / bin->dyn_info.dt_relaent; } if (bin->dyn_info.dt_relent) { res += bin->dyn_info.dt_relsz / bin->dyn_info.dt_relent; } return res + get_num_relocs_dynamic_plt (bin); } static bool sectionIsValid(ELFOBJ *bin, RBinElfSection *sect) { return (sect->offset + sect->size <= bin->size); } static Elf_(Xword) get_section_mode(ELFOBJ *bin, size_t pos) { if (r_str_startswith (bin->g_sections[pos].name, ".rela.")) { return DT_RELA; } if (r_str_startswith (bin->g_sections[pos].name, ".rel.")) { return DT_REL; } return 0; } static bool is_reloc_section(Elf_(Xword) rel_mode) { return rel_mode == DT_REL || rel_mode == DT_RELA; } static size_t get_num_relocs_sections(ELFOBJ *bin) { size_t i, size, ret = 0; Elf_(Xword) rel_mode; if (!bin->g_sections) { return 0; } for (i = 0; !bin->g_sections[i].last; i++) { if (!sectionIsValid (bin, &bin->g_sections[i])) { continue; } rel_mode = get_section_mode (bin, i); if (!is_reloc_section (rel_mode)) { continue; } size = get_size_rel_mode (rel_mode); ret += NUMENTRIES_ROUNDUP (bin->g_sections[i].size, size); } return ret; } static size_t get_num_relocs_approx(ELFOBJ *bin) { size_t total = get_num_relocs_dynamic (bin) + get_num_relocs_sections (bin); if (total > bin->size) { return bin->size / 2; } return total; } static size_t populate_relocs_record_from_dynamic(ELFOBJ *bin, RBinElfReloc *relocs, size_t pos, size_t num_relocs) { size_t offset; size_t size = get_size_rel_mode (bin->dyn_info.dt_pltrel); for (offset = 0; offset < bin->dyn_info.dt_pltrelsz && pos < num_relocs; offset += size, pos++) { if (!read_reloc (bin, relocs + pos, bin->dyn_info.dt_pltrel, bin->dyn_info.dt_jmprel + offset)) { break; } fix_rva_and_offset_exec_file (bin, relocs + pos); } for (offset = 0; offset < bin->dyn_info.dt_relasz && pos < num_relocs; offset += bin->dyn_info.dt_relaent, pos++) { if (!read_reloc (bin, relocs + pos, DT_RELA, bin->dyn_info.dt_rela + offset)) { break; } fix_rva_and_offset_exec_file (bin, relocs + pos); } for (offset = 0; offset < bin->dyn_info.dt_relsz && pos < num_relocs; offset += bin->dyn_info.dt_relent, pos++) { if (!read_reloc (bin, relocs + pos, DT_REL, bin->dyn_info.dt_rel + offset)) { break; } fix_rva_and_offset_exec_file (bin, relocs + pos); } return pos; } static size_t get_next_not_analysed_offset(ELFOBJ *bin, size_t section_vaddr, size_t offset) { size_t gvaddr = section_vaddr + offset; if (bin->dyn_info.dt_rela != R_BIN_ELF_ADDR_MAX && bin->dyn_info.dt_rela <= gvaddr && gvaddr < bin->dyn_info.dt_rela + bin->dyn_info.dt_relasz) { return bin->dyn_info.dt_rela + bin->dyn_info.dt_relasz - section_vaddr; } if (bin->dyn_info.dt_rel != R_BIN_ELF_ADDR_MAX && bin->dyn_info.dt_rel <= gvaddr && gvaddr < bin->dyn_info.dt_rel + bin->dyn_info.dt_relsz) { return bin->dyn_info.dt_rel + bin->dyn_info.dt_relsz - section_vaddr; } if (bin->dyn_info.dt_jmprel != R_BIN_ELF_ADDR_MAX && bin->dyn_info.dt_jmprel <= gvaddr && gvaddr < bin->dyn_info.dt_jmprel + bin->dyn_info.dt_pltrelsz) { return bin->dyn_info.dt_jmprel + bin->dyn_info.dt_pltrelsz - section_vaddr; } return offset; } static size_t populate_relocs_record_from_section(ELFOBJ *bin, RBinElfReloc *relocs, size_t pos, size_t num_relocs) { size_t size, i, j; Elf_(Xword) rel_mode; if (!bin->g_sections) { return pos; } for (i = 0; !bin->g_sections[i].last; i++) { rel_mode = get_section_mode (bin, i); if (!is_reloc_section (rel_mode) || bin->g_sections[i].size > bin->size || bin->g_sections[i].offset > bin->size) { continue; } size = get_size_rel_mode (rel_mode); for (j = get_next_not_analysed_offset (bin, bin->g_sections[i].rva, 0); j < bin->g_sections[i].size && pos < num_relocs; j = get_next_not_analysed_offset (bin, bin->g_sections[i].rva, j + size)) { if (!read_reloc (bin, relocs + pos, rel_mode, bin->g_sections[i].rva + j)) { break; } fix_rva_and_offset (bin, relocs + pos, i); pos++; } } return pos; } static RBinElfReloc *populate_relocs_record(ELFOBJ *bin) { size_t i = 0; size_t num_relocs = get_num_relocs_approx (bin); RBinElfReloc *relocs = R_NEWS0 (RBinElfReloc, num_relocs + 1); if (!relocs) { // In case we can't allocate enough memory for all the claimed // relocation entries, try to parse only the ones specified in // the dynamic segment. num_relocs = get_num_relocs_dynamic (bin); relocs = R_NEWS0 (RBinElfReloc, num_relocs + 1); if (!relocs) { return NULL; } } i = populate_relocs_record_from_dynamic (bin, relocs, i, num_relocs); i = populate_relocs_record_from_section (bin, relocs, i, num_relocs); relocs[i].last = 1; bin->g_reloc_num = i; return relocs; } RBinElfReloc* Elf_(r_bin_elf_get_relocs) (ELFOBJ *bin) { if (!bin) { return NULL; } if (!bin->g_relocs) { bin->g_relocs = populate_relocs_record (bin); } return bin->g_relocs; } RBinElfLib* Elf_(r_bin_elf_get_libs)(ELFOBJ *bin) { RBinElfLib *ret = NULL; Elf_(Off) *it = NULL; size_t k = 0; if (!bin || !bin->phdr || !bin->strtab || *(bin->strtab+1) == '0') { return NULL; } r_vector_foreach(&bin->dyn_info.dt_needed, it) { Elf_(Off) val = *it; RBinElfLib *r = realloc (ret, (k + 1) * sizeof (RBinElfLib)); if (!r) { r_sys_perror ("realloc (libs)"); free (ret); return NULL; } ret = r; if (val > bin->strtab_size) { free (ret); return NULL; } strncpy (ret[k].name, bin->strtab + val, ELF_STRING_LENGTH); ret[k].name[ELF_STRING_LENGTH - 1] = '\0'; ret[k].last = 0; if (ret[k].name[0]) { k++; } } RBinElfLib *r = realloc (ret, (k + 1) * sizeof (RBinElfLib)); if (!r) { r_sys_perror ("realloc (libs)"); free (ret); return NULL; } ret = r; ret[k].last = 1; return ret; } static void create_section_from_phdr(ELFOBJ *bin, RBinElfSection *ret, size_t *i, const char *name, ut64 addr, ut64 sz) { r_return_if_fail (bin && ret && i); if (!addr || addr == UT64_MAX) { return; } ret[*i].offset = Elf_(r_bin_elf_v2p_new) (bin, addr); ret[*i].rva = addr; ret[*i].size = sz; r_str_ncpy (ret[*i].name, name, R_ARRAY_SIZE (ret[*i].name) - 1); ret[*i].last = 0; *i = *i + 1; } static RBinElfSection *get_sections_from_phdr(ELFOBJ *bin) { RBinElfSection *ret; size_t num_sections = 0; ut64 reldyn = 0, relava = 0, pltgotva = 0, relva = 0; ut64 reldynsz = 0, relasz = 0, pltgotsz = 0; r_return_val_if_fail (bin && bin->phdr, NULL); if (!bin->ehdr.e_phnum) { return NULL; } if (bin->dyn_info.dt_rel != R_BIN_ELF_ADDR_MAX) { reldyn = bin->dyn_info.dt_rel; num_sections++; } if (bin->dyn_info.dt_rela != R_BIN_ELF_ADDR_MAX) { relva = bin->dyn_info.dt_rela; num_sections++; } if (bin->dyn_info.dt_relsz) { reldynsz = bin->dyn_info.dt_relsz; } if (bin->dyn_info.dt_relasz) { relasz = bin->dyn_info.dt_relasz; } if (bin->dyn_info.dt_pltgot != R_BIN_ELF_ADDR_MAX) { pltgotva = bin->dyn_info.dt_pltgot; num_sections++; } if (bin->dyn_info.dt_pltrelsz) { pltgotsz = bin->dyn_info.dt_pltrelsz; } if (bin->dyn_info.dt_jmprel != R_BIN_ELF_ADDR_MAX) { relava = bin->dyn_info.dt_jmprel; num_sections++; } ret = calloc (num_sections + 1, sizeof (RBinElfSection)); if (!ret) { return NULL; } size_t i = 0; create_section_from_phdr (bin, ret, &i, ".rel.dyn", reldyn, reldynsz); create_section_from_phdr (bin, ret, &i, ".rela.plt", relava, pltgotsz); create_section_from_phdr (bin, ret, &i, ".rel.plt", relva, relasz); create_section_from_phdr (bin, ret, &i, ".got.plt", pltgotva, pltgotsz); ret[i].last = 1; return ret; } RBinElfSection* Elf_(r_bin_elf_get_sections)(ELFOBJ *bin) { RBinElfSection *ret = NULL; char unknown_s[32], invalid_s[32]; int i, nidx, unknown_c=0, invalid_c=0; r_return_val_if_fail (bin, NULL); if (bin->g_sections) { return bin->g_sections; } if (!bin->shdr && bin->phdr) { //we don't give up search in phdr section return get_sections_from_phdr (bin); } if (!bin->shdr) { return NULL; } ut32 count = bin->ehdr.e_shnum; if (!(ret = calloc ((count + 1), sizeof (RBinElfSection)))) { return NULL; } for (i = 0; i < count; i++) { ret[i].offset = bin->shdr[i].sh_offset; ret[i].size = bin->shdr[i].sh_size; ret[i].align = bin->shdr[i].sh_addralign; ret[i].flags = bin->shdr[i].sh_flags; ret[i].link = bin->shdr[i].sh_link; ret[i].info = bin->shdr[i].sh_info; ret[i].type = bin->shdr[i].sh_type; if (is_bin_etrel (bin)) { ret[i].rva = bin->baddr + bin->shdr[i].sh_offset; } else { ret[i].rva = bin->shdr[i].sh_addr; } const int SHNAME = (int)bin->shdr[i].sh_name; const int SHSIZE = (int)bin->shstrtab_size; nidx = SHNAME; if (nidx < 0 || !bin->shstrtab_section || !bin->shstrtab_size || nidx > bin->shstrtab_size) { snprintf (invalid_s, sizeof (invalid_s), "invalid%d", invalid_c); strncpy (ret[i].name, invalid_s, sizeof (ret[i].name) - 1); invalid_c++; } else if (bin->shstrtab && (SHNAME > 0) && (SHNAME < SHSIZE)) { strncpy (ret[i].name, &bin->shstrtab[SHNAME], sizeof (ret[i].name) - 1); } else if (bin->shdr[i].sh_type == SHT_NULL) { //to follow the same behaviour as readelf ret[i].name[0] = '\0'; } else { snprintf (unknown_s, sizeof (unknown_s), "unknown%d", unknown_c); strncpy (ret[i].name, unknown_s, sizeof (ret[i].name) - 1); unknown_c++; } ret[i].name[ELF_STRING_LENGTH - 1] = '\0'; ret[i].last = 0; } ret[i].last = 1; return ret; } static bool is_special_arm_symbol(ELFOBJ *bin, Elf_(Sym) *sym, const char *name) { if (name[0] != '$') { return false; } switch (name[1]) { case 'a': case 't': case 'd': case 'x': return (name[2] == '\0' || name[2] == '.') && ELF_ST_TYPE (sym->st_info) == STT_NOTYPE && ELF_ST_BIND (sym->st_info) == STB_LOCAL && ELF_ST_VISIBILITY (sym->st_info) == STV_DEFAULT; default: return false; } } static bool is_special_symbol(ELFOBJ *bin, Elf_(Sym) *sym, const char *name) { switch (bin->ehdr.e_machine) { case EM_ARM: case EM_AARCH64: return is_special_arm_symbol (bin, sym, name); default: return false; } } static const char *bind2str(Elf_(Sym) *sym) { switch (ELF_ST_BIND (sym->st_info)) { case STB_LOCAL: return R_BIN_BIND_LOCAL_STR; case STB_GLOBAL: return R_BIN_BIND_GLOBAL_STR; case STB_WEAK: return R_BIN_BIND_WEAK_STR; case STB_NUM: return R_BIN_BIND_NUM_STR; case STB_LOOS: return R_BIN_BIND_LOOS_STR; case STB_HIOS: return R_BIN_BIND_HIOS_STR; case STB_LOPROC: return R_BIN_BIND_LOPROC_STR; case STB_HIPROC: return R_BIN_BIND_HIPROC_STR; default: return R_BIN_BIND_UNKNOWN_STR; } } static const char *type2str(ELFOBJ *bin, struct r_bin_elf_symbol_t *ret, Elf_(Sym) *sym) { if (bin && ret && is_special_symbol (bin, sym, ret->name)) { return R_BIN_TYPE_SPECIAL_SYM_STR; } switch (ELF_ST_TYPE (sym->st_info)) { case STT_NOTYPE: return R_BIN_TYPE_NOTYPE_STR; case STT_OBJECT: return R_BIN_TYPE_OBJECT_STR; case STT_FUNC: return R_BIN_TYPE_FUNC_STR; case STT_SECTION: return R_BIN_TYPE_SECTION_STR; case STT_FILE: return R_BIN_TYPE_FILE_STR; case STT_COMMON: return R_BIN_TYPE_COMMON_STR; case STT_TLS: return R_BIN_TYPE_TLS_STR; case STT_NUM: return R_BIN_TYPE_NUM_STR; case STT_LOOS: return R_BIN_TYPE_LOOS_STR; case STT_HIOS: return R_BIN_TYPE_HIOS_STR; case STT_LOPROC: return R_BIN_TYPE_LOPROC_STR; case STT_HIPROC: return R_BIN_TYPE_HIPROC_STR; default: return R_BIN_TYPE_UNKNOWN_STR; } } static void fill_symbol_bind_and_type(ELFOBJ *bin, struct r_bin_elf_symbol_t *ret, Elf_(Sym) *sym) { ret->bind = bind2str (sym); ret->type = type2str (bin, ret, sym); } static RBinElfSymbol* get_symbols_from_phdr(ELFOBJ *bin, int type) { Elf_(Sym) *sym = NULL; Elf_(Addr) addr_sym_table = 0; ut8 s[sizeof (Elf_(Sym))] = {0}; RBinElfSymbol *ret = NULL; int i, r, tsize, nsym, ret_ctr; ut64 toffset = 0, tmp_offset; ut32 size, sym_size = 0; if (!bin || !bin->phdr || !bin->ehdr.e_phnum) { return NULL; } if (bin->dyn_info.dt_symtab == R_BIN_ELF_ADDR_MAX || !bin->dyn_info.dt_syment) { return NULL; } addr_sym_table = Elf_(r_bin_elf_v2p) (bin, bin->dyn_info.dt_symtab); sym_size = bin->dyn_info.dt_syment; if (!sym_size) { goto beach; } //since ELF doesn't specify the symbol table size we may read until the end of the buffer nsym = (bin->size - addr_sym_table) / sym_size; if (!UT32_MUL (&size, nsym, sizeof (Elf_ (Sym)))) { goto beach; } if (size < 1) { goto beach; } if (addr_sym_table > bin->size || addr_sym_table + size > bin->size) { goto beach; } if (nsym < 1) { return NULL; } // we reserve room for 4096 and grow as needed. size_t capacity1 = 4096; size_t capacity2 = 4096; sym = (Elf_(Sym)*) calloc (capacity1, sym_size); ret = (RBinElfSymbol *) calloc (capacity2, sizeof (struct r_bin_elf_symbol_t)); if (!sym || !ret) { goto beach; } for (i = 1, ret_ctr = 0; i < nsym; i++) { if (i >= capacity1) { // maybe grow // You take what you want, but you eat what you take. Elf_(Sym)* temp_sym = (Elf_(Sym)*) realloc (sym, (size_t)(capacity1 * GROWTH_FACTOR) * sym_size); if (!temp_sym) { goto beach; } sym = temp_sym; capacity1 = (size_t)(capacity1 * GROWTH_FACTOR); } if (ret_ctr >= capacity2) { // maybe grow RBinElfSymbol *temp_ret = realloc (ret, (size_t)(capacity2 * GROWTH_FACTOR) * sizeof (struct r_bin_elf_symbol_t)); if (!temp_ret) { goto beach; } ret = temp_ret; capacity2 = (size_t)(capacity2 * GROWTH_FACTOR); } // read in one entry r = r_buf_read_at (bin->b, addr_sym_table + i * sizeof (Elf_ (Sym)), s, sizeof (Elf_ (Sym))); if (r < 1) { goto beach; } int j = 0; #if R_BIN_ELF64 sym[i].st_name = READ32 (s, j); sym[i].st_info = READ8 (s, j); sym[i].st_other = READ8 (s, j); sym[i].st_shndx = READ16 (s, j); sym[i].st_value = READ64 (s, j); sym[i].st_size = READ64 (s, j); #else sym[i].st_name = READ32 (s, j); sym[i].st_value = READ32 (s, j); sym[i].st_size = READ32 (s, j); sym[i].st_info = READ8 (s, j); sym[i].st_other = READ8 (s, j); sym[i].st_shndx = READ16 (s, j); #endif bool is_sht_null = false; bool is_vaddr = false; // zero symbol is always empty // Examine entry and maybe store if (type == R_BIN_ELF_IMPORT_SYMBOLS && sym[i].st_shndx == SHT_NULL) { if (sym[i].st_value) { toffset = sym[i].st_value; } else if ((toffset = get_import_addr (bin, i)) == -1){ toffset = 0; } tsize = 16; } else if (type == R_BIN_ELF_ALL_SYMBOLS) { tsize = sym[i].st_size; toffset = (ut64) sym[i].st_value; is_sht_null = sym[i].st_shndx == SHT_NULL; } else { continue; } // since we don't know the size of the sym table in this case, // let's stop at the first invalid entry if (!strcmp (bind2str (&sym[i]), R_BIN_BIND_UNKNOWN_STR) || !strcmp (type2str (NULL, NULL, &sym[i]), R_BIN_TYPE_UNKNOWN_STR)) { goto done; } tmp_offset = Elf_(r_bin_elf_v2p_new) (bin, toffset); if (tmp_offset == UT64_MAX) { tmp_offset = toffset; is_vaddr = true; } if (sym[i].st_name + 2 > bin->strtab_size) { // Since we are reading beyond the symbol table what's happening // is that some entry is trying to dereference the strtab beyond its capacity // is not a symbol so is the end goto done; } ret[ret_ctr].offset = tmp_offset; ret[ret_ctr].size = tsize; { int rest = ELF_STRING_LENGTH - 1; int st_name = sym[i].st_name; int maxsize = R_MIN (bin->size, bin->strtab_size); if (st_name < 0 || st_name >= maxsize) { ret[ret_ctr].name[0] = 0; } else { const int len = __strnlen (bin->strtab + st_name, rest); memcpy (ret[ret_ctr].name, &bin->strtab[st_name], len); } } ret[ret_ctr].ordinal = i; ret[ret_ctr].in_shdr = false; ret[ret_ctr].name[ELF_STRING_LENGTH - 2] = '\0'; fill_symbol_bind_and_type (bin, &ret[ret_ctr], &sym[i]); ret[ret_ctr].is_sht_null = is_sht_null; ret[ret_ctr].is_vaddr = is_vaddr; ret[ret_ctr].last = 0; ret_ctr++; } done: // Size everything down to only what is used { nsym = i > 0? i: 1; Elf_(Sym) *temp_sym = (Elf_(Sym) *)realloc (sym, (size_t)(nsym * GROWTH_FACTOR) * sym_size); if (!temp_sym) { goto beach; } sym = temp_sym; } { ret_ctr = ret_ctr > 0? ret_ctr: 1; RBinElfSymbol *p = (RBinElfSymbol *)realloc (ret, (ret_ctr + 1) * sizeof (RBinElfSymbol)); if (!p) { goto beach; } ret = p; } ret[ret_ctr].last = 1; if (type == R_BIN_ELF_IMPORT_SYMBOLS && !bin->imports_by_ord_size) { bin->imports_by_ord_size = ret_ctr + 1; if (ret_ctr > 0) { bin->imports_by_ord = (RBinImport * *) calloc (ret_ctr + 1, sizeof (RBinImport*)); } else { bin->imports_by_ord = NULL; } } else if (type == R_BIN_ELF_ALL_SYMBOLS && !bin->symbols_by_ord_size && ret_ctr) { bin->symbols_by_ord_size = ret_ctr + 1; if (ret_ctr > 0) { bin->symbols_by_ord = (RBinSymbol **) calloc (ret_ctr + 1, sizeof (RBinSymbol*)); } else { bin->symbols_by_ord = NULL; } } free (sym); return ret; beach: free (sym); free (ret); return NULL; } static RBinElfSymbol *Elf_(r_bin_elf_get_phdr_symbols)(ELFOBJ *bin) { if (!bin) { return NULL; } if (bin->phdr_symbols) { return bin->phdr_symbols; } bin->phdr_symbols = get_symbols_from_phdr (bin, R_BIN_ELF_ALL_SYMBOLS); return bin->phdr_symbols; } static RBinElfSymbol *Elf_(r_bin_elf_get_phdr_imports)(ELFOBJ *bin) { r_return_val_if_fail (bin, NULL); if (!bin->phdr_imports) { bin->phdr_imports = get_symbols_from_phdr (bin, R_BIN_ELF_IMPORT_SYMBOLS); } return bin->phdr_imports; } static RBinElfSymbol *Elf_(get_phdr_symbols)(ELFOBJ *bin, int type) { return (type != R_BIN_ELF_IMPORT_SYMBOLS) ? Elf_(r_bin_elf_get_phdr_symbols) (bin) : Elf_(r_bin_elf_get_phdr_imports) (bin); } static int Elf_(fix_symbols)(ELFOBJ *bin, int nsym, int type, RBinElfSymbol **sym) { int count = 0; int result = -1; RBinElfSymbol *ret = *sym; RBinElfSymbol *phdr_symbols = Elf_(get_phdr_symbols) (bin, type); RBinElfSymbol *tmp, *p; HtUP *phd_offset_map = ht_up_new0 (); HtUP *phd_ordinal_map = ht_up_new0 (); if (phdr_symbols) { RBinElfSymbol *d = ret; while (!d->last) { ht_up_insert (phd_offset_map, d->offset, d); ht_up_insert (phd_ordinal_map, d->ordinal, d); d++; } p = phdr_symbols; while (!p->last) { /* find match in phdr */ d = ht_up_find (phd_offset_map, p->offset, NULL); if (!d) { d = ht_up_find (phd_ordinal_map, p->ordinal, NULL); } if (d) { p->in_shdr = true; if (*p->name && *d->name && r_str_startswith (d->name, "$")) { strcpy (d->name, p->name); } } p++; } p = phdr_symbols; while (!p->last) { if (!p->in_shdr) { count++; } p++; } /*Take those symbols that are not present in the shdr but yes in phdr*/ /*This should only should happen with fucked up binaries*/ if (count > 0) { /*what happens if a shdr says it has only one symbol? we should look anyway into phdr*/ tmp = (RBinElfSymbol*)realloc (ret, (nsym + count + 1) * sizeof (RBinElfSymbol)); if (!tmp) { result = -1; goto done; } ret = tmp; ret[nsym--].last = 0; p = phdr_symbols; while (!p->last) { if (!p->in_shdr) { memcpy (&ret[++nsym], p, sizeof (RBinElfSymbol)); } p++; } ret[nsym + 1].last = 1; } *sym = ret; result = nsym + 1; goto done; } result = nsym; done: ht_up_free (phd_offset_map); ht_up_free (phd_ordinal_map); return result; } static bool is_section_local_sym(ELFOBJ *bin, Elf_(Sym) *sym) { if (sym->st_name != 0) { return false; } if (ELF_ST_TYPE (sym->st_info) != STT_SECTION) { return false; } if (ELF_ST_BIND (sym->st_info) != STB_LOCAL) { return false; } if (!is_shidx_valid (bin, sym->st_shndx)) { return false; } Elf_(Word) sh_name = bin->shdr[sym->st_shndx].sh_name; return bin->shstrtab && sh_name < bin->shstrtab_size; } static void setsymord(ELFOBJ* eobj, ut32 ord, RBinSymbol *ptr) { if (!eobj->symbols_by_ord || ord >= eobj->symbols_by_ord_size) { return; } r_bin_symbol_free (eobj->symbols_by_ord[ord]); eobj->symbols_by_ord[ord] = ptr; } static void _set_arm_thumb_bits(struct Elf_(r_bin_elf_obj_t) *bin, RBinSymbol **sym) { int bin_bits = Elf_(r_bin_elf_get_bits) (bin); RBinSymbol *ptr = *sym; int len = strlen (ptr->name); if (ptr->name[0] == '$' && (len >= 2 && !ptr->name[2])) { switch (ptr->name[1]) { case 'a' : //arm ptr->bits = 32; break; case 't': //thumb ptr->bits = 16; if (ptr->vaddr & 1) { ptr->vaddr--; } if (ptr->paddr & 1) { ptr->paddr--; } break; case 'd': //data break; default: goto arm_symbol; } } else { arm_symbol: ptr->bits = bin_bits; if (bin_bits != 64) { ptr->bits = 32; if (ptr->paddr != UT64_MAX) { if (ptr->vaddr & 1) { ptr->vaddr--; ptr->bits = 16; } if (ptr->paddr & 1) { ptr->paddr--; ptr->bits = 16; } } } } } RBinSymbol *Elf_(_r_bin_elf_convert_symbol)(struct Elf_(r_bin_elf_obj_t) *bin, struct r_bin_elf_symbol_t *symbol, const char *namefmt) { ut64 paddr, vaddr; RBinSymbol *ptr = NULL; if (symbol->is_vaddr) { paddr = UT64_MAX; vaddr = symbol->offset; } else { paddr = symbol->offset; vaddr = Elf_(r_bin_elf_p2v_new) (bin, paddr); } if (!(ptr = R_NEW0 (RBinSymbol))) { return NULL; } ptr->name = symbol->name[0] ? r_str_newf (namefmt, &symbol->name[0]) : strdup (""); ptr->forwarder = "NONE"; ptr->bind = symbol->bind; ptr->type = symbol->type; ptr->is_imported = symbol->is_imported; ptr->paddr = paddr; ptr->vaddr = vaddr; ptr->size = symbol->size; ptr->ordinal = symbol->ordinal; // detect thumb if (bin->ehdr.e_machine == EM_ARM && *ptr->name) { _set_arm_thumb_bits (bin, &ptr); } return ptr; } static ut32 hashRBinElfSymbol(const void *obj) { const RBinElfSymbol *symbol = (const RBinElfSymbol *)obj; if (!symbol || !*symbol->name) { return 0; } int hash = sdb_hash (symbol->name); hash ^= sdb_hash (symbol->type); hash ^= (symbol->offset >> 32); hash ^= (symbol->offset & 0xffffffff); return hash; } static int cmp_RBinElfSymbol(const RBinElfSymbol *a, const RBinElfSymbol *b) { if (a->offset != b->offset) { return 1; } int result = strcmp (a->name, b->name); if (result != 0) { return result; } return strcmp (a->type, b->type); } static RBinElfSymbol* parse_gnu_debugdata(ELFOBJ *bin, size_t *ret_size) { if (ret_size) { *ret_size = 0; } if (bin->g_sections) { size_t i; for (i = 0; !bin->g_sections[i].last; i++) { if (!strcmp (bin->g_sections[i].name, ".gnu_debugdata")) { ut64 addr = bin->g_sections[i].offset; ut64 size = bin->g_sections[i].size; if (size < 10) { return false; } ut8 *data = malloc (size + 1); if (r_buf_read_at (bin->b, addr, data, size) == -1) { eprintf ("Cannot read%c\n", 10); } size_t osize; ut8 *odata = r_sys_unxz (data, size, &osize); if (odata) { RBuffer *newelf = r_buf_new_with_pointers (odata, osize, false); ELFOBJ* newobj = Elf_(r_bin_elf_new_buf) (newelf, false); RBinElfSymbol *symbol = NULL; if (newobj) { symbol = Elf_(r_bin_elf_get_symbols) (newobj); newobj->g_symbols = NULL; Elf_(r_bin_elf_free)(newobj); } r_buf_free (newelf); free (odata); if (ret_size) { *ret_size = i; } free (data); return symbol; } free (data); return NULL; } } } return NULL; } // TODO: return RList<RBinSymbol*> .. or run a callback with that symbol constructed, so we don't have to do it twice static RBinElfSymbol* Elf_(_r_bin_elf_get_symbols_imports)(ELFOBJ *bin, int type) { ut32 shdr_size; int tsize, nsym, ret_ctr = 0, i, j, r, k, newsize; ut64 toffset; ut32 size = 0; RBinElfSymbol *ret = NULL, *import_ret = NULL; RBinSymbol *import_sym_ptr = NULL; size_t ret_size = 0, prev_ret_size = 0, import_ret_ctr = 0; Elf_(Shdr) *strtab_section = NULL; Elf_(Sym) *sym = NULL; ut8 s[sizeof (Elf_(Sym))] = {0}; char *strtab = NULL; HtPP *symbol_map = NULL; HtPPOptions symbol_map_options = { .cmp = (HtPPListComparator)cmp_RBinElfSymbol, .hashfn = hashRBinElfSymbol, .dupkey = NULL, .calcsizeK = NULL, .calcsizeV = NULL, .freefn = NULL, .elem_size = sizeof (HtPPKv), }; if (!bin || !bin->shdr || !bin->ehdr.e_shnum || bin->ehdr.e_shnum == 0xffff) { return Elf_(get_phdr_symbols) (bin, type); } if (!UT32_MUL (&shdr_size, bin->ehdr.e_shnum, sizeof (Elf_(Shdr)))) { return NULL; } if (shdr_size + 8 > bin->size) { return NULL; } RBinElfSymbol *dbgsyms = parse_gnu_debugdata (bin, &ret_size); if (dbgsyms) { ret = dbgsyms; ret_ctr = ret_size; } else { ret_ctr = 0; ret_size = 0; } for (i = 0; i < bin->ehdr.e_shnum; i++) { if (((type & R_BIN_ELF_SYMTAB_SYMBOLS) && bin->shdr[i].sh_type == SHT_SYMTAB) || ((type & R_BIN_ELF_DYNSYM_SYMBOLS) && bin->shdr[i].sh_type == SHT_DYNSYM)) { if (bin->shdr[i].sh_link < 1) { /* oops. fix out of range pointers */ continue; } // hack to avoid asan cry if ((bin->shdr[i].sh_link * sizeof (Elf_(Shdr))) >= shdr_size) { /* oops. fix out of range pointers */ continue; } strtab_section = &bin->shdr[bin->shdr[i].sh_link]; if (strtab_section->sh_size > ST32_MAX || strtab_section->sh_size+8 > bin->size) { R_LOG_ERROR ("size (syms strtab)"); free (ret); free (strtab); return NULL; } if (!strtab) { if (!(strtab = (char *)calloc (1, 8 + strtab_section->sh_size))) { R_LOG_ERROR ("malloc (syms strtab)"); goto beach; } if (strtab_section->sh_offset > bin->size || strtab_section->sh_offset + strtab_section->sh_size > bin->size) { goto beach; } if (r_buf_read_at (bin->b, strtab_section->sh_offset, (ut8*)strtab, strtab_section->sh_size) == -1) { R_LOG_ERROR ("read (syms strtab)"); goto beach; } } newsize = 1 + bin->shdr[i].sh_size; if (newsize < 0 || newsize > bin->size) { R_LOG_ERROR ("invalid shdr %d size", i); goto beach; } nsym = (int)(bin->shdr[i].sh_size / sizeof (Elf_(Sym))); if (nsym < 0) { goto beach; } { ut64 sh_begin = bin->shdr[i].sh_offset; ut64 sh_end = sh_begin + bin->shdr[i].sh_size; if (sh_begin > bin->size) { goto beach; } if (sh_end > bin->size) { st64 newshsize = bin->size - sh_begin; nsym = (int)(newshsize / sizeof (Elf_(Sym))); } } if (!(sym = (Elf_(Sym) *)calloc (nsym, sizeof (Elf_(Sym))))) { R_LOG_ERROR ("calloc (syms)"); goto beach; } if (!UT32_MUL (&size, nsym, sizeof (Elf_(Sym)))) { goto beach; } if (size < 1 || size > bin->size) { goto beach; } if (bin->shdr[i].sh_offset > bin->size) { goto beach; } if (bin->shdr[i].sh_offset + size > bin->size) { goto beach; } for (j = 0; j < nsym; j++) { int k = 0; r = r_buf_read_at (bin->b, bin->shdr[i].sh_offset + j * sizeof (Elf_(Sym)), s, sizeof (Elf_(Sym))); if (r < 1) { R_LOG_ERROR ("read (sym)"); goto beach; } #if R_BIN_ELF64 sym[j].st_name = READ32 (s, k); sym[j].st_info = READ8 (s, k); sym[j].st_other = READ8 (s, k); sym[j].st_shndx = READ16 (s, k); sym[j].st_value = READ64 (s, k); sym[j].st_size = READ64 (s, k); #else sym[j].st_name = READ32 (s, k); sym[j].st_value = READ32 (s, k); sym[j].st_size = READ32 (s, k); sym[j].st_info = READ8 (s, k); sym[j].st_other = READ8 (s, k); sym[j].st_shndx = READ16 (s, k); #endif } void *rett = realloc (ret, (ret_size + nsym) * sizeof (RBinElfSymbol)); if (!rett) { R_LOG_ERROR ("Cannot allocate %d symbols.", (int)(nsym + ret_size)); goto beach; } ret = rett; memset (ret + ret_size, 0, nsym * sizeof (RBinElfSymbol)); prev_ret_size = ret_size; ret_size += nsym; symbol_map = ht_pp_new_opt (&symbol_map_options); for (k = 0; k < prev_ret_size; k++) { if (ret[k].name[0]) { ht_pp_insert (symbol_map, ret + k, ret + k); } } for (k = 1; k < nsym; k++) { bool is_sht_null = false; bool is_vaddr = false; bool is_imported = false; if (type == R_BIN_ELF_IMPORT_SYMBOLS) { if (sym[k].st_value) { toffset = sym[k].st_value; } else if ((toffset = get_import_addr (bin, k)) == -1) { toffset = 0; } tsize = 16; is_imported = sym[k].st_shndx == STN_UNDEF; } else { tsize = sym[k].st_size; toffset = (ut64)sym[k].st_value; is_sht_null = sym[k].st_shndx == SHT_NULL; } if (is_bin_etrel (bin)) { if (sym[k].st_shndx < bin->ehdr.e_shnum) { ret[ret_ctr].offset = sym[k].st_value + bin->shdr[sym[k].st_shndx].sh_offset; } } else { ret[ret_ctr].offset = Elf_(r_bin_elf_v2p_new) (bin, toffset); if (ret[ret_ctr].offset == UT64_MAX) { ret[ret_ctr].offset = toffset; is_vaddr = true; } } ret[ret_ctr].size = tsize; if (sym[k].st_name + 1 > strtab_section->sh_size) { R_LOG_DEBUG ("index out of strtab range (%"PFMT64d" / %"PFMT64d")\n", (ut64)sym[k].st_name, (ut64)strtab_section->sh_size); continue; } { int st_name = sym[k].st_name; int maxsize = R_MIN (r_buf_size (bin->b), strtab_section->sh_size); if (is_section_local_sym (bin, &sym[k])) { const char *shname = &bin->shstrtab[bin->shdr[sym[k].st_shndx].sh_name]; r_str_ncpy (ret[ret_ctr].name, shname, ELF_STRING_LENGTH - 1); } else if (st_name <= 0 || st_name >= maxsize) { ret[ret_ctr].name[0] = 0; } else { r_str_ncpy (ret[ret_ctr].name, &strtab[st_name], ELF_STRING_LENGTH - 1); ret[ret_ctr].type = type2str (bin, &ret[ret_ctr], &sym[k]); if (ht_pp_find (symbol_map, &ret[ret_ctr], NULL)) { memset (ret + ret_ctr, 0, sizeof (RBinElfSymbol)); continue; } } } ret[ret_ctr].ordinal = k; ret[ret_ctr].name[ELF_STRING_LENGTH - 1] = '\0'; fill_symbol_bind_and_type (bin, &ret[ret_ctr], &sym[k]); ret[ret_ctr].is_sht_null = is_sht_null; ret[ret_ctr].is_vaddr = is_vaddr; ret[ret_ctr].last = 0; ret[ret_ctr].is_imported = is_imported; ret_ctr++; if (type == R_BIN_ELF_IMPORT_SYMBOLS && is_imported) { import_ret_ctr++; } } R_FREE (strtab); R_FREE (sym); ht_pp_free (symbol_map); symbol_map = NULL; if (type == R_BIN_ELF_IMPORT_SYMBOLS) { break; } } } if (!ret) { return Elf_(get_phdr_symbols) (bin, type); } ret[ret_ctr].last = 1; // ugly dirty hack :D int max = -1; RBinElfSymbol *aux = NULL; nsym = Elf_(fix_symbols) (bin, ret_ctr, type, &ret); if (nsym == -1) { goto beach; } // Elf_(fix_symbols) may find additional symbols, some of which could be // imported symbols. Let's reserve additional space for them. r_warn_if_fail (nsym >= ret_ctr); import_ret_ctr += nsym - ret_ctr; aux = ret; while (!aux->last) { if ((int)aux->ordinal > max) { max = aux->ordinal; } aux++; } nsym = max; if (type == R_BIN_ELF_IMPORT_SYMBOLS) { R_FREE (bin->imports_by_ord); bin->imports_by_ord_size = nsym + 1; bin->imports_by_ord = (RBinImport**)calloc (R_MAX (1, nsym + 1), sizeof (RBinImport*)); R_FREE (bin->symbols_by_ord); bin->symbols_by_ord_size = nsym + 1; bin->symbols_by_ord = (RBinSymbol**)calloc (R_MAX (1, nsym + 1), sizeof (RBinSymbol*)); import_ret = calloc (import_ret_ctr + 1, sizeof (RBinElfSymbol)); if (!import_ret) { R_LOG_DEBUG ("Cannot allocate %d symbols", nsym); goto beach; } import_ret_ctr = 0; i = -1; while (!ret[++i].last) { if (!(import_sym_ptr = Elf_(_r_bin_elf_convert_symbol) (bin, &ret[i], "%s"))) { continue; } setsymord (bin, import_sym_ptr->ordinal, import_sym_ptr); if (ret[i].is_imported) { memcpy (&import_ret[import_ret_ctr], &ret[i], sizeof (RBinElfSymbol)); ++import_ret_ctr; } } import_ret[import_ret_ctr].last = 1; R_FREE (ret); return import_ret; } return ret; beach: free (ret); free (sym); free (strtab); ht_pp_free (symbol_map); return NULL; } RBinElfSymbol *Elf_(r_bin_elf_get_symbols)(ELFOBJ *bin) { if (!bin->g_symbols) { bin->g_symbols = Elf_(_r_bin_elf_get_symbols_imports) (bin, R_BIN_ELF_ALL_SYMBOLS); } return bin->g_symbols; } RBinElfSymbol *Elf_(r_bin_elf_get_imports)(ELFOBJ *bin) { if (!bin->g_imports) { bin->g_imports = Elf_(_r_bin_elf_get_symbols_imports) (bin, R_BIN_ELF_IMPORT_SYMBOLS); } return bin->g_imports; } RBinElfField* Elf_(r_bin_elf_get_fields)(ELFOBJ *bin) { RBinElfField *ret = NULL; int i = 0, j; if (!bin || !(ret = calloc ((bin->ehdr.e_phnum + 3 + 1), sizeof (RBinElfField)))) { return NULL; } strncpy (ret[i].name, "ehdr", ELF_STRING_LENGTH); ret[i].offset = 0; ret[i++].last = 0; strncpy (ret[i].name, "shoff", ELF_STRING_LENGTH); ret[i].offset = bin->ehdr.e_shoff; ret[i++].last = 0; strncpy (ret[i].name, "phoff", ELF_STRING_LENGTH); ret[i].offset = bin->ehdr.e_phoff; ret[i++].last = 0; for (j = 0; bin->phdr && j < bin->ehdr.e_phnum; i++, j++) { snprintf (ret[i].name, ELF_STRING_LENGTH, "phdr_%i", j); ret[i].offset = bin->phdr[j].p_offset; ret[i].last = 0; } ret[i].last = 1; return ret; } void Elf_(r_bin_elf_free)(ELFOBJ* bin) { if (!bin) { return; } free (bin->phdr); free (bin->shdr); free (bin->strtab); free (bin->shstrtab); free (bin->dynstr); r_vector_fini (&bin->dyn_info.dt_needed); //free (bin->strtab_section); size_t i; if (bin->imports_by_ord) { for (i = 0; i<bin->imports_by_ord_size; i++) { free (bin->imports_by_ord[i]); } free (bin->imports_by_ord); } if (bin->symbols_by_ord) { for (i = 0; i<bin->symbols_by_ord_size; i++) { r_bin_symbol_free (bin->symbols_by_ord[i]); } free (bin->symbols_by_ord); } r_buf_free (bin->b); if (bin->g_symbols != bin->phdr_symbols) { R_FREE (bin->phdr_symbols); } if (bin->g_imports != bin->phdr_imports) { R_FREE (bin->phdr_imports); } R_FREE (bin->g_sections); R_FREE (bin->g_symbols); R_FREE (bin->g_imports); R_FREE (bin->g_relocs); ht_up_free (bin->rel_cache); bin->rel_cache = NULL; sdb_free (bin->kv); free (bin); } ELFOBJ* Elf_(r_bin_elf_new_buf)(RBuffer *buf, bool verbose) { ELFOBJ *bin = R_NEW0 (ELFOBJ); if (bin) { bin->kv = sdb_new0 (); bin->size = r_buf_size (buf); bin->verbose = verbose; bin->b = r_buf_ref (buf); if (!elf_init (bin)) { Elf_(r_bin_elf_free) (bin); return NULL; } } return bin; } static int is_in_pphdr(Elf_(Phdr) *p, ut64 addr) { return addr >= p->p_offset && addr < p->p_offset + p->p_filesz; } static int is_in_vphdr(Elf_(Phdr) *p, ut64 addr) { return addr >= p->p_vaddr && addr < p->p_vaddr + p->p_filesz; } /* Deprecated temporarily. Use r_bin_elf_p2v_new in new code for now. */ ut64 Elf_(r_bin_elf_p2v) (ELFOBJ *bin, ut64 paddr) { size_t i; r_return_val_if_fail (bin, 0); if (!bin->phdr) { if (is_bin_etrel (bin)) { return bin->baddr + paddr; } return paddr; } for (i = 0; i < bin->ehdr.e_phnum; i++) { Elf_(Phdr) *p = &bin->phdr[i]; if (p->p_type == PT_LOAD && is_in_pphdr (p, paddr)) { if (!p->p_vaddr && !p->p_offset) { continue; } return p->p_vaddr + paddr - p->p_offset; } } return paddr; } /* Deprecated temporarily. Use r_bin_elf_v2p_new in new code for now. */ ut64 Elf_(r_bin_elf_v2p)(ELFOBJ *bin, ut64 vaddr) { r_return_val_if_fail (bin, 0); // UT64_MAX or vaddr? // r_return_val_if_fail (bin, UT64_MAX); if (!bin->phdr) { if (is_bin_etrel (bin)) { return vaddr - bin->baddr; } return vaddr; } size_t i; for (i = 0; i < bin->ehdr.e_phnum; i++) { Elf_(Phdr) *p = &bin->phdr[i]; if (p->p_type == PT_LOAD && is_in_vphdr (p, vaddr)) { if (!p->p_offset && !p->p_vaddr) { continue; } return p->p_offset + vaddr - p->p_vaddr; } } return vaddr; } /* converts a physical address to the virtual address, looking * at the program headers in the binary bin */ ut64 Elf_(r_bin_elf_p2v_new) (ELFOBJ *bin, ut64 paddr) { size_t i; r_return_val_if_fail (bin, UT64_MAX); if (!bin->phdr) { if (is_bin_etrel (bin)) { return bin->baddr + paddr; } return UT64_MAX; } for (i = 0; i < bin->ehdr.e_phnum; i++) { Elf_(Phdr) *p = &bin->phdr[i]; if (p->p_type == PT_LOAD && is_in_pphdr (p, paddr)) { return p->p_vaddr + paddr - p->p_offset; } } return UT64_MAX; } /* converts a virtual address to the relative physical address, looking * at the program headers in the binary bin */ ut64 Elf_(r_bin_elf_v2p_new) (ELFOBJ *bin, ut64 vaddr) { size_t i; r_return_val_if_fail (bin, UT64_MAX); if (!bin->phdr) { if (is_bin_etrel (bin)) { return vaddr - bin->baddr; } return UT64_MAX; } for (i = 0; i < bin->ehdr.e_phnum; i++) { Elf_(Phdr) *p = &bin->phdr[i]; if (p->p_type == PT_LOAD && is_in_vphdr (p, vaddr)) { return p->p_offset + vaddr - p->p_vaddr; } } return UT64_MAX; } static bool get_nt_file_maps(ELFOBJ *bin, RList *core_maps) { ut16 ph, ph_num = bin->ehdr.e_phnum; for (ph = 0; ph < ph_num; ph++) { Elf_(Phdr) *p = &bin->phdr[ph]; if (p->p_type == PT_NOTE) { int bits = Elf_(r_bin_elf_get_bits)(bin); int elf_nhdr_size = (bits == 64) ? sizeof (Elf64_Nhdr) : sizeof (Elf32_Nhdr); int size_of = (bits == 64) ? sizeof (ut64) : sizeof (ut32); void *elf_nhdr = calloc (elf_nhdr_size, 1); ut64 offset = 0; bool found = false; while (!found) { int ret; ut32 n_descsz, n_namesz, n_type; ret = r_buf_read_at (bin->b, bin->phdr[ph].p_offset + offset, elf_nhdr, elf_nhdr_size); if (ret != elf_nhdr_size) { eprintf ("Cannot read more NOTES header from CORE\n"); free (elf_nhdr); goto fail; } if (bits == 64) { n_descsz = round_up (((Elf64_Nhdr *)elf_nhdr)->n_descsz); n_namesz = round_up (((Elf64_Nhdr *)elf_nhdr)->n_namesz); n_type = ((Elf64_Nhdr *)elf_nhdr)->n_type; } else { n_descsz = round_up (((Elf32_Nhdr *)elf_nhdr)->n_descsz); n_namesz = round_up (((Elf32_Nhdr *)elf_nhdr)->n_namesz); n_type = ((Elf32_Nhdr *)elf_nhdr)->n_type; } if (n_type == NT_FILE) { found = true; offset += elf_nhdr_size + n_namesz; free (elf_nhdr); } else { offset += elf_nhdr_size + n_descsz + n_namesz; } } ut64 i = bin->phdr[ph].p_offset + offset; ut64 n_maps; if (bits == 64) { n_maps = BREAD64 (bin->b, i); (void)BREAD64 (bin->b, i); } else { n_maps = BREAD32 (bin->b, i); (void)BREAD32 (bin->b, i); } ut64 jump = ((size_of * 3) * n_maps) + i; int len_str = 0; while (n_maps > 0) { ut64 addr; if (bits == 64) { addr = BREAD64 (bin->b, i); } else { addr = BREAD32 (bin->b, i); } if (addr == UT64_MAX) { break; } char str[512] = {0}; r_buf_read_at (bin->b, jump + len_str, (ut8*)str, sizeof (str) - 1); str[sizeof (str) - 1] = 0; // null terminate string RListIter *iter; RBinMap *p; r_list_foreach (core_maps, iter, p) { if (p->addr == addr) { p->file = strdup (str); } } len_str += strlen (str) + 1; n_maps--; i += (size_of * 2); } } } return true; fail: return false; } static void r_bin_elf_map_free(RBinMap *map) { if (map) { free (map->file); free (map); } } RList *Elf_(r_bin_elf_get_maps)(ELFOBJ *bin) { ut16 ph, ph_num = bin->ehdr.e_phnum; //Skip PT_NOTE if (!bin->phdr) { return NULL; } RList *maps = r_list_newf ((RListFree)r_bin_elf_map_free); for (ph = 0; ph < ph_num; ph++) { Elf_(Phdr) *p = &bin->phdr[ph]; if (p->p_type == PT_LOAD) { RBinMap *map = R_NEW0 (RBinMap); if (map) { map->addr = p->p_vaddr; map->size = p->p_memsz; map->perms = p->p_flags; map->offset = p->p_offset; map->file = NULL; r_list_append (maps, map); } } } if (!r_list_empty (maps)) { if (!get_nt_file_maps (bin, maps)) { eprintf ("Could not retrieve the names of all maps from NT_FILE\n"); } } return maps; } char *Elf_(r_bin_elf_compiler)(ELFOBJ *bin) { RBinElfSection *section = get_section_by_name (bin, ".comment"); if (!section) { return NULL; } ut64 off = section->offset; ut32 sz = R_MIN (section->size, 128); if (sz < 1) { return NULL; } char *buf = malloc (sz + 1); if (!buf) { return NULL; } if (r_buf_read_at (bin->b, off, (ut8*)buf, sz) < 1) { free (buf); return NULL; } buf[sz] = 0; const size_t buflen = strlen (buf); char *nullbyte = buf + buflen; if (buflen != sz && nullbyte[1] && buflen < sz) { nullbyte[0] = ' '; } buf[sz] = 0; r_str_trim (buf); char * res = r_str_escape (buf); free (buf); return res; } bool Elf_(r_bin_elf_is_executable)(ELFOBJ *bin) { const int t = bin->ehdr.e_type; return t == ET_EXEC || t == ET_DYN; }
/* radare - LGPL - Copyright 2008-2022 - nibble, pancake, alvaro_fe */ #define R_LOG_ORIGIN "elf" #include <r_types.h> #include <r_util.h> #include "elf.h" #define MIPS_PLT_OFFSET 0x20 #define RISCV_PLT_OFFSET 0x20 #define LOONGARCH_PLT_OFFSET 0x20 #define RISCV_PLT_ENTRY_SIZE 0x10 #define LOONGARCH_PLT_ENTRY_SIZE 0x10 #define X86_PLT_ENTRY_SIZE 0x10 #define SPARC_OFFSET_PLT_ENTRY_FROM_GOT_ADDR -0x6 #define X86_OFFSET_PLT_ENTRY_FROM_GOT_ADDR -0x6 #define ELF_PAGE_MASK 0xFFFFFFFFFFFFF000LL #define ELF_PAGE_SIZE 12 #define R_ELF_NO_RELRO 0 #define R_ELF_PART_RELRO 1 #define R_ELF_FULL_RELRO 2 #define MAX_REL_RELA_SZ (sizeof (Elf_(Rel)) > sizeof (Elf_(Rela))? sizeof (Elf_(Rel)): sizeof (Elf_(Rela))) #define READ8(x, i) r_read_ble8((x) + (i)); (i) += 1 #define READ16(x, i) r_read_ble16((x) + (i), bin->endian); (i) += 2 #define READ32(x, i) r_read_ble32((x) + (i), bin->endian); (i) += 4 #define READ64(x, i) r_read_ble64((x) + (i), bin->endian); (i) += 8 #define BREAD8(x, i) r_buf_read_ble8_at (x, i); (i) += 1 #define BREAD16(x, i) r_buf_read_ble16_at (x, i, bin->endian); (i) += 2 #define BREAD32(x, i) r_buf_read_ble32_at (x, i, bin->endian); (i) += 4 #define BREAD64(x, i) r_buf_read_ble64_at (x, i, bin->endian); (i) += 8 #define NUMENTRIES_ROUNDUP(sectionsize, entrysize) (((sectionsize) + (entrysize)-1) / (entrysize)) #define COMPUTE_PLTGOT_POSITION(rel, pltgot_addr, n_initial_unused_entries) \ ((rel->rva - pltgot_addr - n_initial_unused_entries * R_BIN_ELF_WORDSIZE) / R_BIN_ELF_WORDSIZE) #define GROWTH_FACTOR (1.5) #define round_up(a) ((((a) + (4) - (1)) / (4)) * (4)) #define EF_MIPS_ABI_O32 0x00001000 /* O32 ABI. */ #define EF_MIPS_ABI_O64 0x00002000 /* O32 extended for 64 bit. */ #define EF_MIPS_ABI 0x0000f000 static inline bool is_elfclass64(Elf_(Ehdr) *h) { return h->e_ident[EI_CLASS] == ELFCLASS64; } static bool is_mips_o32(Elf_(Ehdr) *h) { if (h->e_ident[EI_CLASS] != ELFCLASS32) { return false; } if ((h->e_flags & EF_MIPS_ABI2) != 0) { return false; } if (((h->e_flags & EF_MIPS_ABI) != 0) && ((h->e_flags & EF_MIPS_ABI) != EF_MIPS_ABI_O32)) { return false; } return true; } static bool is_mips_n32(Elf_(Ehdr) *h) { if (h->e_ident[EI_CLASS] != ELFCLASS32) { return false; } if (((h->e_flags & EF_MIPS_ABI2) == 0) || ((h->e_flags & EF_MIPS_ABI) != 0)) { return false; } return true; } enum { X86, X86_64, ARM, AARCH64, RCE, ARCH_LEN }; typedef struct reginfo { ut32 regsize; ut32 regdelta; } reginfo_t; static reginfo_t reginf[ARCH_LEN] = { { 160, 0x5c }, { 216, 0x84 }, { 72, 0x5c }, { 272, 0x84 }, { 272, 0x84 } }; static inline int __strnlen(const char *str, int len) { int l = 0; while (IS_PRINTABLE (*str) && --len) { if (((ut8)*str) == 0xff) { break; } str++; l++; } return l + 1; } static bool is_bin_etrel(ELFOBJ *bin) { return bin->ehdr.e_type == ET_REL; } static bool __is_valid_ident(ELFOBJ *bin) { return !strncmp ((char *)bin->ehdr.e_ident, ELFMAG, SELFMAG) || !strncmp ((char *)bin->ehdr.e_ident, CGCMAG, SCGCMAG); } static bool init_ehdr(ELFOBJ *bin) { ut8 e_ident[EI_NIDENT]; ut8 ehdr[sizeof (Elf_(Ehdr))] = {0}; int i, len; if (r_buf_read_at (bin->b, 0, e_ident, EI_NIDENT) == -1) { R_LOG_DEBUG ("read (magic)"); return false; } sdb_set (bin->kv, "elf_type.cparse", "enum elf_type { ET_NONE=0, ET_REL=1," " ET_EXEC=2, ET_DYN=3, ET_CORE=4, ET_LOOS=0xfe00, ET_HIOS=0xfeff," " ET_LOPROC=0xff00, ET_HIPROC=0xffff };", 0); sdb_set (bin->kv, "elf_machine.cparse", "enum elf_machine {EM_NONE=0, EM_M32=1," " EM_SPARC=2, EM_386=3, EM_68K=4, EM_88K=5, EM_IAMCU=6, EM_860=7, EM_MIPS=8," " EM_S370=9, EM_MIPS_RS3_LE=10, EM_RS6000=11, EM_PARISC=15, EM_nCUBE=16," " EM_VPP500=17, EM_SPARC32PLUS=18, EM_960=19, EM_PPC=20, EM_PPC64=21, EM_S390=22," " EM_SPU=23, EM_V800=36, EM_FR20=37, EM_RH32=38, EM_RCE=39, EM_ARM=40," " EM_ALPHA=41, EM_SH=42, EM_SPARCV9=43, EM_TRICORE=44, EM_ARC=45, EM_H8_300=46," " EM_H8_300H=47, EM_H8S=48, EM_H8_500=49, EM_IA_64=50, EM_MIPS_X=51," " EM_COLDFIRE=52, EM_68HC12=53, EM_MMA=54, EM_PCP=55, EM_NCPU=56, EM_NDR1=57," " EM_STARCORE=58, EM_ME16=59, EM_ST100=60, EM_TINYJ=61, EM_X86_64=62, EM_PDSP=63," " EM_PDP10=64, EM_PDP11=65, EM_FX66=66, EM_ST9PLUS=67, EM_ST7=68, EM_68HC16=69," " EM_68HC11=70, EM_68HC08=71, EM_68HC05=72, EM_SVX=73, EM_ST19=74, EM_VAX=75," " EM_CRIS=76, EM_JAVELIN=77, EM_FIREPATH=78, EM_ZSP=79, EM_MMIX=80, EM_HUANY=81," " EM_PRISM=82, EM_AVR=83, EM_FR30=84, EM_D10V=85, EM_D30V=86, EM_V850=87," " EM_M32R=88, EM_MN10300=89, EM_MN10200=90, EM_PJ=91, EM_OPENRISC=92," " EM_ARC_COMPACT=93, EM_XTENSA=94, EM_VIDEOCORE=95, EM_TMM_GPP=96, EM_NS32K=97," " EM_TPC=98, EM_SNP1K=99, EM_ST200=100, EM_IP2K=101, EM_MAX=102, EM_CR=103," " EM_F2MC16=104, EM_MSP430=105, EM_BLACKFIN=106, EM_SE_C33=107, EM_SEP=108," " EM_ARCA=109, EM_UNICORE=110, EM_EXCESS=111, EM_DXP=112, EM_ALTERA_NIOS2=113," " EM_CRX=114, EM_XGATE=115, EM_C166=116, EM_M16C=117, EM_DSPIC30F=118, EM_CE=119," " EM_M32C=120, EM_TSK3000=131, EM_RS08=132, EM_SHARC=133, EM_ECOG2=134," " EM_SCORE7=135, EM_DSP24=136, EM_VIDEOCORE3=137, EM_LATTICEMICO32=138," " EM_SE_C17=139, EM_TI_C6000=140, EM_TI_C2000=141, EM_TI_C5500=142," " EM_TI_ARP32=143, EM_TI_PRU=144," " EM_MMDSP_PLUS=160, EM_CYPRESS_M8C=161, EM_R32C=162, EM_TRIMEDIA=163," " EM_QDSP6=164, EM_8051=165, EM_STXP7X=166, EM_NDS32=167," " EM_ECOG1X=168, EM_MAXQ30=169, EM_XIMO16=170, EM_MANIK=171, EM_CRAYNV2=172," " EM_RX=173, EM_METAG=174, EM_MCST_ELBRUS=175, EM_ECOG16=176, EM_CR16=177," " EM_ETPU=178, EM_SLE9X=179, EM_L10M=180, EM_K10M=181, EM_AARCH64=183," " EM_AVR32=185, EM_STM8=186, EM_TILE64=187, EM_TILEPRO=188, EM_CUDA=190," " EM_TILEGX=191, EM_CLOUDSHIELD=192, EM_COREA_1ST=193, EM_COREA_2ND=194," " EM_ARC_COMPACT2=195, EM_OPEN8=196, EM_RL78=197, EM_VIDEOCORE5=198," " EM_78KOR=199, EM_56800EX=200, EM_BA1=201, EM_BA2=202, EM_XCORE=203," " EM_MCHP_PIC=204, EM_INTEL205=205, EM_INTEL206=206, EM_INTEL207=207," " EM_INTEL208=208, EM_INTEL209=209, EM_KM32=210, EM_KMX32=211, EM_KMX16=212," " EM_KMX8=213, EM_KVARC=214, EM_CDP=215, EM_COGE=216, EM_COOL=217, EM_NORC=218," " EM_CSR_KALIMBA=219, EM_AMDGPU=224, EM_RISCV=243, EM_LANAI=244, EM_BPF=247," " EM_CSKY=252, EM_KVX=256, EM_LOONGARCH=258}", 0); sdb_set (bin->kv, "elf_class.cparse", "enum elf_class {ELFCLASSNONE=0, ELFCLASS32=1, ELFCLASS64=2};", 0); sdb_set (bin->kv, "elf_data.cparse", "enum elf_data {ELFDATANONE=0, ELFDATA2LSB=1, ELFDATA2MSB=2};", 0); sdb_set (bin->kv, "elf_hdr_version.cparse", "enum elf_hdr_version {EV_NONE=0, EV_CURRENT=1};", 0); sdb_set (bin->kv, "elf_obj_version.cparse", "enum elf_obj_version {EV_NONE=0, EV_CURRENT=1};", 0); sdb_num_set (bin->kv, "elf_header.offset", 0, 0); sdb_num_set (bin->kv, "elf_header.size", sizeof (Elf_(Ehdr)), 0); sdb_set (bin->kv, "elf_ident.format", "[4]z[1]E[1]E[1]E.::" " magic (elf_class)class (elf_data)data (elf_hdr_version)version", 0); #if R_BIN_ELF64 sdb_set (bin->kv, "elf_header.format", "?[2]E[2]E[4]EqqqxN2N2N2N2N2N2" " (elf_ident)ident (elf_type)type (elf_machine)machine (elf_obj_version)version" " entry phoff shoff flags ehsize phentsize phnum shentsize shnum shstrndx", 0); #else sdb_set (bin->kv, "elf_header.format", "?[2]E[2]E[4]ExxxxN2N2N2N2N2N2" " (elf_ident)ident (elf_type)type (elf_machine)machine (elf_obj_version)version" " entry phoff shoff flags ehsize phentsize phnum shentsize shnum shstrndx", 0); #endif bin->endian = (e_ident[EI_DATA] == ELFDATA2MSB)? 1: 0; memset (&bin->ehdr, 0, sizeof (Elf_(Ehdr))); len = r_buf_read_at (bin->b, 0, ehdr, sizeof (ehdr)); if (len < 32) { // tinyelf != sizeof (Elf_(Ehdr))) { R_LOG_DEBUG ("read (ehdr)"); return false; } // XXX no need to check twice memcpy (&bin->ehdr.e_ident, ehdr, 16); if (!__is_valid_ident (bin)) { return false; } i = 16; // TODO: use r_read or r_buf_read_ apis instead bin->ehdr.e_type = READ16 (ehdr, i); bin->ehdr.e_machine = READ16 (ehdr, i); bin->ehdr.e_version = READ32 (ehdr, i); #if R_BIN_ELF64 bin->ehdr.e_entry = READ64 (ehdr, i); bin->ehdr.e_phoff = READ64 (ehdr, i); bin->ehdr.e_shoff = READ64 (ehdr, i); #else bin->ehdr.e_entry = READ32 (ehdr, i); bin->ehdr.e_phoff = READ32 (ehdr, i); bin->ehdr.e_shoff = READ32 (ehdr, i); #endif bin->ehdr.e_flags = READ32 (ehdr, i); bin->ehdr.e_ehsize = READ16 (ehdr, i); bin->ehdr.e_phentsize = READ16 (ehdr, i); bin->ehdr.e_phnum = READ16 (ehdr, i); bin->ehdr.e_shentsize = READ16 (ehdr, i); bin->ehdr.e_shnum = READ16 (ehdr, i); bin->ehdr.e_shstrndx = READ16 (ehdr, i); return true; // [Outdated] Usage example: // > td `k bin/cur/info/elf_type.cparse`; td `k bin/cur/info/elf_machine.cparse` // > pf `k bin/cur/info/elf_header.format` @ `k bin/cur/info/elf_header.offset` } ut64 Elf_(r_bin_elf_get_phnum)(ELFOBJ *obj) { r_return_val_if_fail (obj, 0); ut64 num = obj->ehdr.e_phnum & UT16_MAX; if (obj->ehdr.e_phnum == 0xffff) { ut32 shnum = obj->ehdr.e_shnum; // sh_info member of the initial entry in section header table. if (shnum > 0) { ut32 shoff = obj->ehdr.e_shoff; Elf_(Shdr) shdr = {0}; (void)r_buf_read_at (obj->b, shoff, (ut8 *)&shdr, sizeof (shdr)); num = shdr.sh_info; if ((int)(shdr.sh_info) < 1) { return UT16_MAX; } } } return num; } static bool read_phdr(ELFOBJ *bin, bool linux_kernel_hack) { bool phdr_found = false; int i; #if R_BIN_ELF64 const bool is_elf64 = true; #else const bool is_elf64 = false; #endif ut64 phnum = Elf_(r_bin_elf_get_phnum) (bin); for (i = 0; i < phnum; i++) { ut8 phdr[sizeof (Elf_(Phdr))] = {0}; int j = 0; const size_t rsize = bin->ehdr.e_phoff + i * sizeof (Elf_(Phdr)); int len = r_buf_read_at (bin->b, rsize, phdr, sizeof (Elf_(Phdr))); if (len < 1) { R_LOG_DEBUG ("read (phdr)"); R_FREE (bin->phdr); return false; } bin->phdr[i].p_type = READ32 (phdr, j); if (bin->phdr[i].p_type == PT_PHDR) { phdr_found = true; } if (is_elf64) { bin->phdr[i].p_flags = READ32 (phdr, j); } bin->phdr[i].p_offset = R_BIN_ELF_READWORD (phdr, j); bin->phdr[i].p_vaddr = R_BIN_ELF_READWORD (phdr, j); bin->phdr[i].p_paddr = R_BIN_ELF_READWORD (phdr, j); bin->phdr[i].p_filesz = R_BIN_ELF_READWORD (phdr, j); bin->phdr[i].p_memsz = R_BIN_ELF_READWORD (phdr, j); if (!is_elf64) { bin->phdr[i].p_flags = READ32 (phdr, j); // bin->phdr[i].p_flags |= 1; tiny.elf needs this somehow :? LOAD0 is always +x for linux? } bin->phdr[i].p_align = R_BIN_ELF_READWORD (phdr, j); } /* Here is the where all the fun starts. * Linux kernel since 2005 calculates phdr offset wrongly * adding it to the load address (va of the LOAD0). * See `fs/binfmt_elf.c` file this line: * NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff); * So after the first read, we fix the address and read it again */ if (linux_kernel_hack && phdr_found) { ut64 load_addr = Elf_(r_bin_elf_get_baddr) (bin); bin->ehdr.e_phoff = Elf_(r_bin_elf_v2p) (bin, load_addr + bin->ehdr.e_phoff); return read_phdr (bin, false); } return true; } static int init_phdr(ELFOBJ *bin) { ut32 phdr_size; r_return_val_if_fail (bin && !bin->phdr, false); if (!bin->ehdr.e_phnum) { return false; } if (!UT32_MUL (&phdr_size, (ut32)bin->ehdr.e_phnum, sizeof (Elf_(Phdr)))) { return false; } if (!phdr_size) { return false; } if (phdr_size > bin->size) { return false; } if (phdr_size > (ut32)bin->size) { return false; } if (bin->ehdr.e_phoff > bin->size) { return false; } if (bin->ehdr.e_phoff + phdr_size > bin->size) { return false; } ut64 phnum = Elf_(r_bin_elf_get_phnum) (bin); if (!(bin->phdr = R_NEWS0 (Elf_(Phdr), phnum))) { r_sys_perror ("malloc (phdr)"); return false; } bool linux_kern_hack = false; /* Enable this hack only for the X86 64bit ELFs */ const int _128K = 1024 * 128; if (r_buf_size (bin->b) > _128K && (bin->ehdr.e_machine == EM_X86_64 || bin->ehdr.e_machine == EM_386)) { linux_kern_hack = true; } if (!read_phdr (bin, linux_kern_hack)) { return false; } sdb_num_set (bin->kv, "elf_phdr.offset", bin->ehdr.e_phoff, 0); sdb_num_set (bin->kv, "elf_phdr.size", sizeof (Elf_(Phdr)), 0); sdb_set (bin->kv, "elf_p_type.cparse", "enum elf_p_type {PT_NULL=0,PT_LOAD=1,PT_DYNAMIC=2," "PT_INTERP=3,PT_NOTE=4,PT_SHLIB=5,PT_PHDR=6,PT_LOOS=0x60000000," "PT_HIOS=0x6fffffff,PT_LOPROC=0x70000000,PT_HIPROC=0x7fffffff};", 0); sdb_set (bin->kv, "elf_p_flags.cparse", "enum elf_p_flags {PF_None=0,PF_Exec=1," "PF_Write=2,PF_Write_Exec=3,PF_Read=4,PF_Read_Exec=5,PF_Read_Write=6," "PF_Read_Write_Exec=7};", 0); #if R_BIN_ELF64 sdb_set (bin->kv, "elf_phdr.format", "[4]E[4]Eqqqqqq (elf_p_type)type (elf_p_flags)flags" " offset vaddr paddr filesz memsz align", 0); #else sdb_set (bin->kv, "elf_phdr.format", "[4]Exxxxx[4]Ex (elf_p_type)type offset vaddr paddr" " filesz memsz (elf_p_flags)flags align", 0); #endif return true; // Usage example: // > td `k bin/cur/info/elf_p_type.cparse`; td `k bin/cur/info/elf_p_flags.cparse` // > pf `k bin/cur/info/elf_phdr.format` @ `k bin/cur/info/elf_phdr.offset` } static int init_shdr(ELFOBJ *bin) { ut32 shdr_size; ut8 shdr[sizeof (Elf_(Shdr))] = {0}; size_t i, j, len; r_return_val_if_fail (bin && !bin->shdr, false); if (!UT32_MUL (&shdr_size, bin->ehdr.e_shnum, sizeof (Elf_(Shdr)))) { return false; } if (shdr_size < 1) { return false; } if (shdr_size > bin->size) { return false; } if (bin->ehdr.e_shoff > bin->size) { return false; } if (bin->ehdr.e_shoff + shdr_size > bin->size) { return false; } if (!(bin->shdr = R_NEWS0 (Elf_(Shdr), bin->ehdr.e_shnum))) { r_sys_perror ("malloc (shdr)"); return false; } sdb_num_set (bin->kv, "elf_shdr.offset", bin->ehdr.e_shoff, 0); sdb_num_set (bin->kv, "elf_shdr.size", sizeof (Elf_(Shdr)), 0); sdb_set (bin->kv, "elf_s_type.cparse", "enum elf_s_type {SHT_NULL=0,SHT_PROGBITS=1," "SHT_SYMTAB=2,SHT_STRTAB=3,SHT_RELA=4,SHT_HASH=5,SHT_DYNAMIC=6,SHT_NOTE=7," "SHT_NOBITS=8,SHT_REL=9,SHT_SHLIB=10,SHT_DYNSYM=11,SHT_LOOS=0x60000000," "SHT_HIOS=0x6fffffff,SHT_LOPROC=0x70000000,SHT_HIPROC=0x7fffffff};", 0); for (i = 0; i < bin->ehdr.e_shnum; i++) { j = 0; len = r_buf_read_at (bin->b, bin->ehdr.e_shoff + i * sizeof (Elf_(Shdr)), shdr, sizeof (Elf_(Shdr))); if (len < 1) { R_LOG_DEBUG ("read (shdr) at 0x%" PFMT64x, (ut64) bin->ehdr.e_shoff); R_FREE (bin->shdr); return false; } bin->shdr[i].sh_name = READ32 (shdr, j); bin->shdr[i].sh_type = READ32 (shdr, j); bin->shdr[i].sh_flags = R_BIN_ELF_READWORD (shdr, j); bin->shdr[i].sh_addr = R_BIN_ELF_READWORD (shdr, j); bin->shdr[i].sh_offset = R_BIN_ELF_READWORD (shdr, j); bin->shdr[i].sh_size = R_BIN_ELF_READWORD (shdr, j); bin->shdr[i].sh_link = READ32 (shdr, j); bin->shdr[i].sh_info = READ32 (shdr, j); bin->shdr[i].sh_addralign = R_BIN_ELF_READWORD (shdr, j); bin->shdr[i].sh_entsize = R_BIN_ELF_READWORD (shdr, j); } #if R_BIN_ELF64 sdb_set (bin->kv, "elf_s_flags_64.cparse", "enum elf_s_flags_64 {SF64_None=0,SF64_Exec=1," "SF64_Alloc=2,SF64_Alloc_Exec=3,SF64_Write=4,SF64_Write_Exec=5," "SF64_Write_Alloc=6,SF64_Write_Alloc_Exec=7};", 0); sdb_set (bin->kv, "elf_shdr.format", "x[4]E[8]Eqqqxxqq name (elf_s_type)type" " (elf_s_flags_64)flags addr offset size link info addralign entsize", 0); #else sdb_set (bin->kv, "elf_s_flags_32.cparse", "enum elf_s_flags_32 {SF32_None=0,SF32_Exec=1," "SF32_Alloc=2,SF32_Alloc_Exec=3,SF32_Write=4,SF32_Write_Exec=5," "SF32_Write_Alloc=6,SF32_Write_Alloc_Exec=7};", 0); sdb_set (bin->kv, "elf_shdr.format", "x[4]E[4]Exxxxxxx name (elf_s_type)type" " (elf_s_flags_32)flags addr offset size link info addralign entsize", 0); #endif return true; // Usage example: // > td `k bin/cur/info/elf_s_type.cparse`; td `k bin/cur/info/elf_s_flags_64.cparse` // > pf `k bin/cur/info/elf_shdr.format` @ `k bin/cur/info/elf_shdr.offset` } static bool is_shidx_valid(ELFOBJ *bin, Elf_(Half) value) { return value < bin->ehdr.e_shnum && !R_BETWEEN (SHN_LORESERVE, value, SHN_HIRESERVE); } static int init_strtab(ELFOBJ *bin) { r_return_val_if_fail (!bin->strtab, false); if (!bin->shdr) { return false; } Elf_(Half) shstrndx = bin->ehdr.e_shstrndx; if (shstrndx != SHN_UNDEF && !is_shidx_valid (bin, shstrndx)) { return false; } /* sh_size must be lower than UT32_MAX and not equal to zero, to avoid bugs on malloc() */ if (bin->shdr[shstrndx].sh_size > UT32_MAX) { return false; } if (!bin->shdr[shstrndx].sh_size) { return false; } bin->shstrtab_section = bin->strtab_section = &bin->shdr[shstrndx]; bin->shstrtab_size = bin->shstrtab_section->sh_size; if (bin->shstrtab_size > bin->size) { return false; } if (bin->shstrtab_section->sh_offset > bin->size) { return false; } if (bin->shstrtab_section->sh_offset + bin->shstrtab_section->sh_size > bin->size) { return false; } if (!(bin->shstrtab = calloc (1, bin->shstrtab_size + 1))) { r_sys_perror ("malloc"); bin->shstrtab = NULL; return false; } int res = r_buf_read_at (bin->b, bin->shstrtab_section->sh_offset, (ut8*)bin->shstrtab, bin->shstrtab_section->sh_size); if (res < 1) { R_LOG_DEBUG ("read (shstrtab) at 0x%" PFMT64x, (ut64) bin->shstrtab_section->sh_offset); R_FREE (bin->shstrtab); return false; } bin->shstrtab[bin->shstrtab_section->sh_size] = '\0'; sdb_num_set (bin->kv, "elf_shstrtab.offset", bin->shstrtab_section->sh_offset, 0); sdb_num_set (bin->kv, "elf_shstrtab.size", bin->shstrtab_section->sh_size, 0); return true; } static Elf_(Phdr) *get_dynamic_segment(ELFOBJ *bin) { int i; for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_DYNAMIC) { if (bin->phdr[i].p_filesz > bin->size) { return NULL; } if (bin->phdr[i].p_offset > bin->size) { return NULL; } if (bin->phdr[i].p_offset + sizeof (Elf_(Dyn)) > bin->size) { return NULL; } return &bin->phdr[i]; } } return NULL; } static void init_dynamic_section_sdb(ELFOBJ *bin, Elf_(Addr) strtabaddr, size_t strsize) { int r = Elf_(r_bin_elf_has_relro) (bin); switch (r) { case R_ELF_FULL_RELRO: sdb_set (bin->kv, "elf.relro", "full", 0); break; case R_ELF_PART_RELRO: sdb_set (bin->kv, "elf.relro", "partial", 0); break; default: sdb_set (bin->kv, "elf.relro", "no", 0); break; } sdb_num_set (bin->kv, "elf_strtab.offset", strtabaddr, 0); sdb_num_set (bin->kv, "elf_strtab.size", strsize, 0); } static void set_default_value_dynamic_info(ELFOBJ *bin) { bin->dyn_info.dt_pltrelsz = 0; bin->dyn_info.dt_pltgot = R_BIN_ELF_ADDR_MAX; bin->dyn_info.dt_hash = R_BIN_ELF_ADDR_MAX; bin->dyn_info.dt_strtab = R_BIN_ELF_ADDR_MAX; bin->dyn_info.dt_symtab = R_BIN_ELF_ADDR_MAX; bin->dyn_info.dt_rela = R_BIN_ELF_ADDR_MAX; bin->dyn_info.dt_relasz = 0; bin->dyn_info.dt_relaent = 0; bin->dyn_info.dt_strsz = 0; bin->dyn_info.dt_syment = 0; bin->dyn_info.dt_rel = R_BIN_ELF_ADDR_MAX; bin->dyn_info.dt_relsz = 0; bin->dyn_info.dt_relent = 0; bin->dyn_info.dt_pltrel = R_BIN_ELF_XWORD_MAX; bin->dyn_info.dt_jmprel = R_BIN_ELF_ADDR_MAX; bin->dyn_info.dt_pltgot = R_BIN_ELF_ADDR_MAX; bin->dyn_info.dt_mips_pltgot = R_BIN_ELF_ADDR_MAX; bin->dyn_info.dt_bind_now = false; bin->dyn_info.dt_flags = R_BIN_ELF_XWORD_MAX; bin->dyn_info.dt_flags_1 = R_BIN_ELF_XWORD_MAX; bin->dyn_info.dt_rpath = R_BIN_ELF_XWORD_MAX; bin->dyn_info.dt_runpath = R_BIN_ELF_XWORD_MAX; r_vector_init(&bin->dyn_info.dt_needed, sizeof (Elf_(Off)), NULL, NULL); } static size_t get_maximum_number_of_dynamic_entries(ut64 dyn_size) { return dyn_size / sizeof (Elf_(Dyn)); } static bool fill_dynamic_entry(ELFOBJ *bin, ut64 entry_offset, Elf_(Dyn) *d) { ut8 sdyn[sizeof (Elf_(Dyn))] = {0}; int j = 0; int len = r_buf_read_at (bin->b, entry_offset, sdyn, sizeof (Elf_(Dyn))); if (len < 1) { return false; } d->d_tag = R_BIN_ELF_READWORD (sdyn, j); d->d_un.d_ptr = R_BIN_ELF_READWORD (sdyn, j); return true; } static void fill_dynamic_entries(ELFOBJ *bin, ut64 loaded_offset, ut64 dyn_size) { Elf_(Dyn) d = {0}; size_t i; size_t number_of_entries = get_maximum_number_of_dynamic_entries(dyn_size); for (i = 0; i < number_of_entries; i++) { ut64 entry_offset = loaded_offset + i * sizeof (Elf_(Dyn)); if (!fill_dynamic_entry (bin, entry_offset, &d)) { break; } switch (d.d_tag) { case DT_NULL: break; case DT_PLTRELSZ: bin->dyn_info.dt_pltrelsz = d.d_un.d_val; break; case DT_PLTGOT: bin->dyn_info.dt_pltgot = d.d_un.d_ptr; break; case DT_HASH: bin->dyn_info.dt_hash = d.d_un.d_ptr; break; case DT_STRTAB: bin->dyn_info.dt_strtab = d.d_un.d_ptr; break; case DT_SYMTAB: bin->dyn_info.dt_symtab = d.d_un.d_ptr; break; case DT_RELA: bin->dyn_info.dt_rela = d.d_un.d_ptr; break; case DT_RELASZ: bin->dyn_info.dt_relasz = d.d_un.d_val; break; case DT_RELAENT: bin->dyn_info.dt_relaent = d.d_un.d_val; break; case DT_STRSZ: bin->dyn_info.dt_strsz = d.d_un.d_val; break; case DT_SYMENT: bin->dyn_info.dt_syment = d.d_un.d_val; break; case DT_REL: bin->dyn_info.dt_rel = d.d_un.d_ptr; break; case DT_RELSZ: bin->dyn_info.dt_relsz = d.d_un.d_val; break; case DT_RELENT: bin->dyn_info.dt_relent = d.d_un.d_val; break; case DT_PLTREL: bin->dyn_info.dt_pltrel = d.d_un.d_val; break; case DT_JMPREL: bin->dyn_info.dt_jmprel = d.d_un.d_ptr; break; case DT_MIPS_PLTGOT: bin->dyn_info.dt_mips_pltgot = d.d_un.d_ptr; break; case DT_BIND_NOW: bin->dyn_info.dt_bind_now = true; break; case DT_FLAGS: bin->dyn_info.dt_flags = d.d_un.d_val; break; case DT_FLAGS_1: bin->dyn_info.dt_flags_1 = d.d_un.d_val; break; case DT_RPATH: bin->dyn_info.dt_rpath = d.d_un.d_val; break; case DT_RUNPATH: bin->dyn_info.dt_runpath = d.d_un.d_val; break; case DT_NEEDED: r_vector_push (&bin->dyn_info.dt_needed, &d.d_un.d_val); break; case DT_INIT: case DT_FINI: case DT_DEBUG: case DT_INIT_ARRAY: case DT_FINI_ARRAY: case DT_INIT_ARRAYSZ: case DT_FINI_ARRAYSZ: case DT_PREINIT_ARRAY: case DT_PREINIT_ARRAYSZ: case DT_SONAME: case DT_GNU_HASH: // common dynamic entries in ELF, but we don't need to // do anything with them. break; default: if ((d.d_tag >= DT_VERSYM) && (d.d_tag <= DT_VERNEEDNUM)) { bin->version_info[DT_VERSIONTAGIDX (d.d_tag)] = d.d_un.d_val; } else { R_LOG_DEBUG ("Dynamic tag %" PFMT64d " not handled", (ut64) d.d_tag); } break; } if (d.d_tag == DT_NULL) { break; } } } static int init_dynamic_section(ELFOBJ *bin) { ut64 strtabaddr = 0; char *strtab = NULL; size_t strsize = 0; int r; ut64 dyn_size = 0, loaded_offset; set_default_value_dynamic_info(bin); r_return_val_if_fail (bin, false); if (!bin->phdr || !bin->ehdr.e_phnum) { return false; } Elf_(Phdr) *dyn_phdr = get_dynamic_segment (bin); if (!dyn_phdr) { return false; } dyn_size = dyn_phdr->p_filesz; loaded_offset = Elf_(r_bin_elf_v2p_new) (bin, dyn_phdr->p_vaddr); if (loaded_offset == UT64_MAX) { return false; } if (!dyn_size || loaded_offset + dyn_size > bin->size) { return false; } fill_dynamic_entries (bin, loaded_offset, dyn_size); if (bin->dyn_info.dt_strtab != R_BIN_ELF_ADDR_MAX) { strtabaddr = Elf_(r_bin_elf_v2p_new) (bin, bin->dyn_info.dt_strtab); } if (bin->dyn_info.dt_strsz > 0) { strsize = bin->dyn_info.dt_strsz; } if (strtabaddr == UT64_MAX || strtabaddr > bin->size || strsize > ST32_MAX || !strsize || strsize > bin->size || strtabaddr + strsize > bin->size) { if (!strtabaddr) { R_LOG_DEBUG ("DT_STRTAB not found or invalid"); } return false; } strtab = (char *)calloc (1, strsize + 1); if (!strtab) { return false; } r = r_buf_read_at (bin->b, strtabaddr, (ut8 *)strtab, strsize); if (r < 1) { free (strtab); return false; } bin->strtab = strtab; bin->strtab_size = strsize; init_dynamic_section_sdb (bin, strtabaddr, strsize); return true; } static RBinElfSection* get_section_by_name(ELFOBJ *bin, const char *section_name) { if (bin->g_sections) { size_t i; for (i = 0; !bin->g_sections[i].last; i++) { if (!strncmp (bin->g_sections[i].name, section_name, ELF_STRING_LENGTH - 1)) { return &bin->g_sections[i]; } } } return NULL; } static char *get_ver_flags(ut32 flags) { if (!flags) { return "none"; } static char buff[32]; buff[0] = 0; if (flags & VER_FLG_BASE) { strcpy (buff, "BASE "); } if (flags & VER_FLG_WEAK) { if (flags & VER_FLG_BASE) { strcat (buff, "| "); } strcat (buff, "WEAK "); } if (flags & ~(VER_FLG_BASE | VER_FLG_WEAK)) { strcat (buff, "| <unknown>"); } return buff; } static Sdb *store_versioninfo_gnu_versym(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { size_t i; const ut64 num_entries = sz / sizeof (Elf_(Versym)); const char *section_name = ""; const char *link_section_name = ""; Sdb *sdb = sdb_new0 (); if (!sdb) { return NULL; } if (!bin->version_info[DT_VERSIONTAGIDX (DT_VERSYM)]) { sdb_free (sdb); return NULL; } if (shdr->sh_link >= bin->ehdr.e_shnum) { sdb_free (sdb); return NULL; } Elf_(Shdr) *link_shdr = &bin->shdr[shdr->sh_link]; ut8 *edata = (ut8*) calloc (R_MAX (1, num_entries), 2 * sizeof (ut8)); if (!edata) { sdb_free (sdb); return NULL; } ut16 *data = (ut16 *)calloc (R_MAX (1, num_entries), sizeof (ut16)); if (!data) { free (edata); sdb_free (sdb); return NULL; } ut64 off = Elf_(r_bin_elf_v2p) (bin, bin->version_info[DT_VERSIONTAGIDX (DT_VERSYM)]); if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } r_buf_read_at (bin->b, off, edata, sizeof (ut16) * num_entries); sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "num_entries", num_entries, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); for (i = num_entries; i--;) { data[i] = r_read_ble16 (&edata[i * sizeof (ut16)], bin->endian); } R_FREE (edata); char *tmp_val = NULL; for (i = 0; i < num_entries; i += 4) { size_t j; int check_def; char key[32] = {0}; for (j = 0; (j < 4) && (i + j) < num_entries; j++) { int k; snprintf (key, sizeof (key), "entry%d", (int)(i + j)); switch (data[i + j]) { case 0: sdb_set (sdb, key, "0 (*local*)", 0); break; case 1: sdb_set (sdb, key, "1 (*global*)", 0); break; default: free (tmp_val); tmp_val = r_str_newf ("%x ", data[i+j] & 0x7FFF); check_def = true; if (bin->version_info[DT_VERSIONTAGIDX (DT_VERNEED)]) { Elf_(Verneed) vn; ut8 svn[sizeof (Elf_(Verneed))] = {0}; ut64 offset = Elf_(r_bin_elf_v2p) (bin, bin->version_info[DT_VERSIONTAGIDX (DT_VERNEED)]); do { Elf_(Vernaux) vna; ut8 svna[sizeof (Elf_(Vernaux))] = {0}; ut64 a_off; if (offset > bin->size || offset + sizeof (vn) > bin->size) { goto beach; } if (r_buf_read_at (bin->b, offset, svn, sizeof (svn)) < 0) { R_LOG_DEBUG ("Cannot read Verneed for Versym"); goto beach; } k = 0; vn.vn_version = READ16 (svn, k); vn.vn_cnt = READ16 (svn, k); vn.vn_file = READ32 (svn, k); vn.vn_aux = READ32 (svn, k); vn.vn_next = READ32 (svn, k); a_off = offset + vn.vn_aux; do { if (a_off > bin->size || a_off + sizeof (vna) > bin->size) { goto beach; } if (r_buf_read_at (bin->b, a_off, svna, sizeof (svna)) < 0) { R_LOG_DEBUG ("Cannot read Vernaux for Versym"); goto beach; } k = 0; vna.vna_hash = READ32 (svna, k); vna.vna_flags = READ16 (svna, k); vna.vna_other = READ16 (svna, k); vna.vna_name = READ32 (svna, k); vna.vna_next = READ32 (svna, k); a_off += vna.vna_next; } while (vna.vna_other != data[i + j] && vna.vna_next != 0); if (vna.vna_other == data[i + j]) { if (vna.vna_name > bin->strtab_size) { goto beach; } char *val = r_str_newf ("%s(%s)", tmp_val, bin->strtab + vna.vna_name); sdb_set (sdb, key, val, 0); free (val); check_def = false; break; } offset += vn.vn_next; } while (vn.vn_next); } ut64 vinfoaddr = bin->version_info[DT_VERSIONTAGIDX (DT_VERDEF)]; if (check_def && data[i + j] != 0x8001 && vinfoaddr) { Elf_(Verdef) vd; ut8 svd[sizeof (Elf_(Verdef))] = {0}; ut64 offset = Elf_(r_bin_elf_v2p) (bin, vinfoaddr); if (offset > bin->size || offset + sizeof (vd) > bin->size) { goto beach; } do { if (r_buf_read_at (bin->b, offset, svd, sizeof (svd)) < 0) { R_LOG_DEBUG ("Cannot read Verdef for Versym"); goto beach; } k = 0; vd.vd_version = READ16 (svd, k); vd.vd_flags = READ16 (svd, k); vd.vd_ndx = READ16 (svd, k); vd.vd_cnt = READ16 (svd, k); vd.vd_hash = READ32 (svd, k); vd.vd_aux = READ32 (svd, k); vd.vd_next = READ32 (svd, k); offset += vd.vd_next; } while (vd.vd_ndx != (data[i + j] & 0x7FFF) && vd.vd_next != 0); if (vd.vd_ndx == (data[i + j] & 0x7FFF)) { Elf_(Verdaux) vda; ut8 svda[sizeof (Elf_(Verdaux))] = {0}; ut64 off_vda = offset - vd.vd_next + vd.vd_aux; if (off_vda > bin->size || off_vda + sizeof (vda) > bin->size) { goto beach; } if (r_buf_read_at (bin->b, off_vda, svda, sizeof (svda)) < 0) { R_LOG_DEBUG ("Cannot read Verdaux for Versym"); goto beach; } k = 0; vda.vda_name = READ32 (svda, k); vda.vda_next = READ32 (svda, k); if (vda.vda_name > bin->strtab_size) { goto beach; } const char *name = bin->strtab + vda.vda_name; if (name) { char *fname = r_str_newf ("%s(%s%-*s)", tmp_val, name, (int)(12 - strlen (name)),")"); sdb_set (sdb, key, fname, 0); free (fname); } } } } } R_FREE (tmp_val); } beach: R_FREE (tmp_val); free (data); return sdb; } static Sdb *store_versioninfo_gnu_verdef(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { const char *section_name = ""; const char *link_section_name = ""; char *end = NULL; ut8 dfs[sizeof (Elf_(Verdef))] = {0}; ut32 cnt; size_t i; if (shdr->sh_link >= bin->ehdr.e_shnum) { return false; } Elf_(Shdr) *link_shdr = &bin->shdr[shdr->sh_link]; #ifdef R_BIN_ELF64 if ((int)shdr->sh_size < 1 || shdr->sh_size > SIZE_MAX) { #else if ((int)shdr->sh_size < 1) { #endif return false; } if (shdr->sh_size < sizeof (Elf_(Verdef)) || shdr->sh_size < sizeof (Elf_(Verdaux))) { return false; } Elf_(Verdef) *defs = calloc (shdr->sh_size, 1); if (!defs) { R_LOG_DEBUG ("Cannot allocate memory (Check Elf_(Verdef))"); return false; } if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (link_shdr && bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } Sdb *sdb = sdb_new0 (); if (!sdb) { free (defs); return false; } size_t shsize = shdr->sh_size; if (shdr->sh_size > bin->size) { if (bin->verbose) { eprintf ("Truncating shsize from %d to %d\n", (int)shdr->sh_size, (int)bin->size); } if (bin->size > shdr->sh_offset) { shsize = bin->size - shdr->sh_offset; } else { shsize = bin->size; } } end = (char *)defs + shsize; //& shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); for (cnt = 0, i = 0; cnt < shdr->sh_info && i < shdr->sh_size; cnt++) { Sdb *sdb_verdef = sdb_new0 (); char *vstart = ((char*)defs) + i; size_t vstart_off = i; char key[32] = {0}; Elf_(Verdef) *verdef = (Elf_(Verdef)*)vstart; Elf_(Verdaux) aux = {0}; int j = 0; int isum = 0; if (vstart + sizeof (*verdef) > end) { break; } r_buf_read_at (bin->b, shdr->sh_offset + i, dfs, sizeof (Elf_(Verdef))); verdef->vd_version = READ16 (dfs, j); verdef->vd_flags = READ16 (dfs, j); verdef->vd_ndx = READ16 (dfs, j); verdef->vd_cnt = READ16 (dfs, j); verdef->vd_hash = READ32 (dfs, j); verdef->vd_aux = READ32 (dfs, j); verdef->vd_next = READ32 (dfs, j); int vdaux = verdef->vd_aux; if (vdaux < 1 || shdr->sh_size - vstart_off < vdaux) { sdb_free (sdb_verdef); goto out_error; } vstart += vdaux; vstart_off += vdaux; if (vstart > end || shdr->sh_size - sizeof (Elf_(Verdaux)) < vstart_off) { sdb_free (sdb_verdef); goto out_error; } j = 0; aux.vda_name = READ32 (vstart, j); aux.vda_next = READ32 (vstart, j); isum = i + verdef->vd_aux; if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); goto out_error; } sdb_num_set (sdb_verdef, "idx", i, 0); sdb_num_set (sdb_verdef, "vd_version", verdef->vd_version, 0); sdb_num_set (sdb_verdef, "vd_ndx", verdef->vd_ndx, 0); sdb_num_set (sdb_verdef, "vd_cnt", verdef->vd_cnt, 0); sdb_set (sdb_verdef, "vda_name", &bin->dynstr[aux.vda_name], 0); sdb_set (sdb_verdef, "flags", get_ver_flags (verdef->vd_flags), 0); for (j = 1; j < verdef->vd_cnt; j++) { int k; Sdb *sdb_parent = sdb_new0 (); if (shdr->sh_size - vstart_off < aux.vda_next) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } isum += aux.vda_next; vstart += aux.vda_next; vstart_off += aux.vda_next; if (vstart > end || shdr->sh_size - sizeof (Elf_(Verdaux)) < vstart_off) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } k = 0; aux.vda_name = READ32 (vstart, k); aux.vda_next = READ32 (vstart, k); if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } sdb_num_set (sdb_parent, "idx", isum, 0); sdb_num_set (sdb_parent, "parent", j, 0); sdb_set (sdb_parent, "vda_name", &bin->dynstr[aux.vda_name], 0); snprintf (key, sizeof (key), "parent%d", j - 1); sdb_ns_set (sdb_verdef, key, sdb_parent); } snprintf (key, sizeof (key), "verdef%u", cnt); sdb_ns_set (sdb, key, sdb_verdef); if (!verdef->vd_next || shdr->sh_size - i < verdef->vd_next) { sdb_free (sdb_verdef); goto out_error; } if ((st32)verdef->vd_next < 1) { R_LOG_DEBUG ("Invalid vd_next in the ELF version"); break; } i += verdef->vd_next; } free (defs); return sdb; out_error: free (defs); sdb_free (sdb); return NULL; } static Sdb *store_versioninfo_gnu_verneed(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { ut8 *end, *need = NULL; const char *section_name = ""; Elf_(Shdr) *link_shdr = NULL; const char *link_section_name = ""; Sdb *sdb_vernaux = NULL; Sdb *sdb_version = NULL; Sdb *sdb = NULL; ut64 i; int cnt; if (!bin || !bin->dynstr) { return NULL; } if (shdr->sh_link >= bin->ehdr.e_shnum) { return NULL; } #ifdef R_BIN_ELF64 if ((int)shdr->sh_size < 1 || shdr->sh_size > SIZE_MAX) { #else if ((int)shdr->sh_size < 1) { #endif return NULL; } sdb = sdb_new0 (); if (!sdb) { return NULL; } link_shdr = &bin->shdr[shdr->sh_link]; if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } if (!(need = (ut8*) calloc (R_MAX (1, shdr->sh_size), sizeof (ut8)))) { R_LOG_ERROR ("Cannot allocate memory for Elf_(Verneed)"); goto beach; } end = need + shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "num_entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); if (shdr->sh_offset > bin->size || shdr->sh_offset + shdr->sh_size > bin->size) { goto beach; } if (shdr->sh_offset + shdr->sh_size < shdr->sh_size) { goto beach; } i = r_buf_read_at (bin->b, shdr->sh_offset, need, shdr->sh_size); if (i < 1) { goto beach; } //XXX we should use DT_VERNEEDNUM instead of sh_info //TODO https://sourceware.org/ml/binutils/2014-11/msg00353.html for (i = 0, cnt = 0; cnt < shdr->sh_info; cnt++) { int j, isum; ut8 *vstart = need + i; Elf_(Verneed) vvn = {0}; if (vstart + sizeof (Elf_(Verneed)) > end) { goto beach; } Elf_(Verneed) *entry = &vvn; char key[32] = {0}; sdb_version = sdb_new0 (); if (!sdb_version) { goto beach; } j = 0; vvn.vn_version = READ16 (vstart, j); vvn.vn_cnt = READ16 (vstart, j); vvn.vn_file = READ32 (vstart, j); vvn.vn_aux = READ32 (vstart, j); vvn.vn_next = READ32 (vstart, j); sdb_num_set (sdb_version, "vn_version", entry->vn_version, 0); sdb_num_set (sdb_version, "idx", i, 0); if (entry->vn_file > bin->dynstr_size) { goto beach; } { char *s = r_str_ndup (&bin->dynstr[entry->vn_file], 16); sdb_set (sdb_version, "file_name", s, 0); free (s); } sdb_num_set (sdb_version, "cnt", entry->vn_cnt, 0); st32 vnaux = entry->vn_aux; if (vnaux < 1) { goto beach; } vstart += vnaux; ut32 vn_cnt = entry->vn_cnt; for (j = 0, isum = i + entry->vn_aux; j < vn_cnt && vstart + sizeof (Elf_(Vernaux)) <= end; j++) { int k; Elf_(Vernaux) *aux = NULL; Elf_(Vernaux) vaux = {0}; aux = (Elf_(Vernaux)*)&vaux; k = 0; vaux.vna_hash = READ32 (vstart, k); vaux.vna_flags = READ16 (vstart, k); vaux.vna_other = READ16 (vstart, k); vaux.vna_name = READ32 (vstart, k); vaux.vna_next = READ32 (vstart, k); if (aux->vna_name > bin->dynstr_size) { goto beach; } #if 1 sdb_vernaux = sdb_new0 (); if (!sdb_vernaux) { goto beach; } sdb_num_set (sdb_vernaux, "idx", isum, 0); if (aux->vna_name > 0 && aux->vna_name + 8 < bin->dynstr_size) { char name [16]; strncpy (name, &bin->dynstr[aux->vna_name], sizeof (name)-1); name[sizeof (name)-1] = 0; sdb_set (sdb_vernaux, "name", name, 0); } sdb_set (sdb_vernaux, "flags", get_ver_flags (aux->vna_flags), 0); sdb_num_set (sdb_vernaux, "version", aux->vna_other, 0); isum += aux->vna_next; vstart += aux->vna_next; snprintf (key, sizeof (key), "vernaux%d", j); sdb_ns_set (sdb_version, key, sdb_vernaux); #else char *key = r_str_newf ("vernaux%d", j); char *val = r_str_newf ("%d,%s", isum, get_ver_flags (aux->vna_flags)); sdb_set (sdb_version, key, val, 0); free (key); free (val); #endif } if ((int)entry->vn_next < 0) { R_LOG_DEBUG ("Invalid vn_next at 0x%08" PFMT64x, (ut64)shdr->sh_offset); break; } i += entry->vn_next; snprintf (key, sizeof (key), "version%d", cnt ); sdb_ns_set (sdb, key, sdb_version); //if entry->vn_next is 0 it iterate infinitely if (!entry->vn_next) { break; } } free (need); return sdb; beach: free (need); sdb_free (sdb_vernaux); sdb_free (sdb_version); sdb_free (sdb); return NULL; } static Sdb *store_versioninfo(ELFOBJ *bin) { Sdb *sdb_versioninfo = NULL; int num_verdef = 0; int num_verneed = 0; int num_versym = 0; size_t i; if (!bin || !bin->shdr) { return NULL; } if (!(sdb_versioninfo = sdb_new0 ())) { return NULL; } for (i = 0; i < bin->ehdr.e_shnum; i++) { Sdb *sdb = NULL; char key[32] = {0}; int size = bin->shdr[i].sh_size; if (size - (i * sizeof (Elf_(Shdr)) > bin->size)) { size = bin->size - (i*sizeof (Elf_(Shdr))); } int left = size - (i * sizeof (Elf_(Shdr))); left = R_MIN (left, bin->shdr[i].sh_size); if (left < 0) { break; } switch (bin->shdr[i].sh_type) { case SHT_GNU_verdef: sdb = store_versioninfo_gnu_verdef (bin, &bin->shdr[i], left); snprintf (key, sizeof (key), "verdef%d", num_verdef++); sdb_ns_set (sdb_versioninfo, key, sdb); break; case SHT_GNU_verneed: sdb = store_versioninfo_gnu_verneed (bin, &bin->shdr[i], left); snprintf (key, sizeof (key), "verneed%d", num_verneed++); sdb_ns_set (sdb_versioninfo, key, sdb); break; case SHT_GNU_versym: sdb = store_versioninfo_gnu_versym (bin, &bin->shdr[i], left); snprintf (key, sizeof (key), "versym%d", num_versym++); sdb_ns_set (sdb_versioninfo, key, sdb); break; } } return sdb_versioninfo; } static bool init_dynstr(ELFOBJ *bin) { int i, r; const char *section_name = NULL; if (!bin || !bin->shdr) { return false; } if (!bin->shstrtab) { return false; } for (i = 0; i < bin->ehdr.e_shnum; i++) { if (bin->shdr[i].sh_name > bin->shstrtab_size) { return false; } section_name = &bin->shstrtab[bin->shdr[i].sh_name]; if (bin->shdr[i].sh_type == SHT_STRTAB && !strcmp (section_name, ".dynstr")) { if (!(bin->dynstr = (char*) calloc (bin->shdr[i].sh_size + 1, sizeof (char)))) { R_LOG_ERROR ("Cannot allocate memory for dynamic strings\n"); return false; } if (bin->shdr[i].sh_offset > bin->size) { return false; } if (bin->shdr[i].sh_offset + bin->shdr[i].sh_size > bin->size) { return false; } if (bin->shdr[i].sh_offset + bin->shdr[i].sh_size < bin->shdr[i].sh_size) { return false; } r = r_buf_read_at (bin->b, bin->shdr[i].sh_offset, (ut8*)bin->dynstr, bin->shdr[i].sh_size); if (r < 1) { R_FREE (bin->dynstr); bin->dynstr_size = 0; return false; } bin->dynstr_size = bin->shdr[i].sh_size; return true; } } return false; } static HtUP *rel_cache_new(RBinElfReloc *relocs, ut32 reloc_num) { if (!relocs || reloc_num == 0) { return NULL; } const int htsize = R_MIN (reloc_num, 1024); HtUP *rel_cache = ht_up_new_size (htsize, NULL, NULL, NULL); if (rel_cache) { size_t i; for (i = 0; i < reloc_num; i++) { RBinElfReloc *tmp = relocs + i; ht_up_insert (rel_cache, tmp->sym, tmp); } } return rel_cache; } static bool elf_init(ELFOBJ *bin) { /* bin is not an ELF */ if (!init_ehdr (bin)) { return false; } if (!init_phdr (bin) && !is_bin_etrel (bin)) { R_LOG_DEBUG ("Cannot initialize program headers"); } if (bin->ehdr.e_type != ET_CORE) { if (!init_shdr (bin)) { R_LOG_DEBUG ("Cannot initialize section headers"); } if (!init_strtab (bin)) { R_LOG_DEBUG ("Cannot initialize strings table"); } if (!init_dynstr (bin) && !is_bin_etrel (bin)) { R_LOG_DEBUG ("Cannot initialize dynamic strings"); } bin->baddr = Elf_(r_bin_elf_get_baddr) (bin); if (!init_dynamic_section (bin) && !Elf_(r_bin_elf_is_static) (bin) && !is_bin_etrel (bin)) { R_LOG_DEBUG ("Cannot initialize dynamic section"); } } bin->imports_by_ord_size = 0; bin->imports_by_ord = NULL; bin->symbols_by_ord_size = 0; bin->symbols_by_ord = NULL; bin->g_sections = Elf_(r_bin_elf_get_sections) (bin); bin->boffset = Elf_(r_bin_elf_get_boffset) (bin); bin->g_relocs = Elf_(r_bin_elf_get_relocs) (bin); bin->rel_cache = rel_cache_new (bin->g_relocs, bin->g_reloc_num); sdb_ns_set (bin->kv, "versioninfo", store_versioninfo (bin)); return true; } ut64 Elf_(r_bin_elf_get_section_offset)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); return section? section->offset: UT64_MAX; } ut64 Elf_(r_bin_elf_get_section_addr)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); return section? section->rva: UT64_MAX; } ut64 Elf_(r_bin_elf_get_section_addr_end)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); return section? section->rva + section->size: UT64_MAX; } static ut64 get_got_entry(ELFOBJ *bin, RBinElfReloc *rel) { if (!rel->rva) { return UT64_MAX; } ut64 p_sym_got_addr = Elf_(r_bin_elf_v2p_new) (bin, rel->rva); ut64 addr = R_BIN_ELF_BREADWORD (bin->b, p_sym_got_addr); return (!addr || addr == R_BIN_ELF_WORD_MAX) ? UT64_MAX : addr; } static bool is_thumb_symbol(ut64 plt_addr) { return plt_addr & 1; } static ut64 get_import_addr_arm(ELFOBJ *bin, RBinElfReloc *rel) { ut64 got_addr = bin->dyn_info.dt_pltgot; if (got_addr == R_BIN_ELF_ADDR_MAX) { return UT64_MAX; } ut64 plt_addr = get_got_entry (bin, rel); if (plt_addr == UT64_MAX) { return UT64_MAX; } ut64 pos = COMPUTE_PLTGOT_POSITION (rel, got_addr, 0x3); switch (rel->type) { case R_ARM_JUMP_SLOT: plt_addr += pos * 12 + 20; if (is_thumb_symbol (plt_addr)) { plt_addr--; } return plt_addr; case R_AARCH64_RELATIVE: R_LOG_WARN ("Unsupported relocation type for imports %d", rel->type); return UT64_MAX; case R_AARCH64_IRELATIVE: if (rel->addend > plt_addr) { // start return (plt_addr + pos * 16 + 32) + rel->addend; } // same as fallback to JUMP_SLOT return plt_addr + pos * 16 + 32; case R_AARCH64_JUMP_SLOT: return plt_addr + pos * 16 + 32; default: R_LOG_WARN ("Unsupported relocation type for imports %d", rel->type); return UT64_MAX; } return UT64_MAX; } static ut64 get_import_addr_mips(ELFOBJ *bin, RBinElfReloc *rel) { ut64 jmprel_addr = bin->dyn_info.dt_jmprel; ut64 got_addr = bin->dyn_info.dt_mips_pltgot; if (jmprel_addr == R_BIN_ELF_ADDR_MAX || got_addr == R_BIN_ELF_ADDR_MAX) { return UT64_MAX; } ut64 pos = COMPUTE_PLTGOT_POSITION(rel, got_addr, 0x2); ut8 buf[1024]; ut64 plt_addr = jmprel_addr + bin->dyn_info.dt_pltrelsz; ut64 p_plt_addr = Elf_(r_bin_elf_v2p_new) (bin, plt_addr); int res = r_buf_read_at (bin->b, p_plt_addr, buf, sizeof (buf)); if (res != sizeof (buf)) { return UT64_MAX; } const ut8 *base = r_mem_mem_aligned (buf, sizeof (buf), (const ut8 *)"\x3c\x0f\x00", 3, 4); plt_addr += base? (int)(size_t) (base - buf): MIPS_PLT_OFFSET + 8; // HARDCODED HACK plt_addr += pos * 16; return plt_addr; } static size_t get_size_rel_mode(Elf_(Xword) mode) { return mode == DT_RELA? sizeof (Elf_(Rela)): sizeof (Elf_(Rel)); } static ut64 get_num_relocs_dynamic_plt(ELFOBJ *bin) { if (bin->dyn_info.dt_pltrelsz) { const ut64 size = bin->dyn_info.dt_pltrelsz; const ut64 relsize = get_size_rel_mode (bin->dyn_info.dt_pltrel); return size / relsize; } return 0; } static ut64 get_import_addr_riscv(ELFOBJ *bin, RBinElfReloc *rel) { ut64 got_addr = bin->dyn_info.dt_pltgot; if (got_addr == R_BIN_ELF_ADDR_MAX) { return UT64_MAX; } ut64 plt_addr = get_got_entry (bin, rel); if (plt_addr == UT64_MAX) { return UT64_MAX; } ut64 pos = COMPUTE_PLTGOT_POSITION(rel, got_addr, 0x2); return plt_addr + RISCV_PLT_OFFSET + pos * RISCV_PLT_ENTRY_SIZE; } static ut64 get_import_addr_loongarch(ELFOBJ *bin, RBinElfReloc *rel) { ut64 got_addr = bin->dyn_info.dt_pltgot; if (got_addr == R_BIN_ELF_ADDR_MAX) { return UT64_MAX; } ut64 plt_addr = get_got_entry (bin, rel); if (plt_addr == UT64_MAX) { return UT64_MAX; } ut64 pos = COMPUTE_PLTGOT_POSITION(rel, got_addr, 0x2); return plt_addr + LOONGARCH_PLT_OFFSET + pos * LOONGARCH_PLT_ENTRY_SIZE; } static ut64 get_import_addr_sparc(ELFOBJ *bin, RBinElfReloc *rel) { if (rel->type != R_SPARC_JMP_SLOT) { R_LOG_DEBUG ("Unknown sparc reloc type %d", rel->type); return UT64_MAX; } ut64 tmp = get_got_entry (bin, rel); return (tmp == UT64_MAX) ? UT64_MAX : tmp + SPARC_OFFSET_PLT_ENTRY_FROM_GOT_ADDR; } static ut64 get_import_addr_ppc(ELFOBJ *bin, RBinElfReloc *rel) { ut64 plt_addr = bin->dyn_info.dt_pltgot; if (plt_addr == R_BIN_ELF_ADDR_MAX) { return UT64_MAX; } ut64 p_plt_addr = Elf_(r_bin_elf_v2p_new) (bin, plt_addr); if (p_plt_addr == UT64_MAX) { return UT64_MAX; } ut64 base = r_buf_read_ble32_at (bin->b, p_plt_addr, bin->endian); if (base == UT32_MAX) { return UT64_MAX; } ut64 nrel = get_num_relocs_dynamic_plt (bin); ut64 pos = COMPUTE_PLTGOT_POSITION(rel, plt_addr, 0x0); if (bin->endian) { base -= (nrel * 16); base += (pos * 16); return base; } base -= (nrel * 12) + 20; base += (pos * 8); return base; } static ut64 get_import_addr_x86_manual(ELFOBJ *bin, RBinElfReloc *rel) { ut64 got_addr = bin->dyn_info.dt_pltgot; if (got_addr == R_BIN_ELF_ADDR_MAX) { return UT64_MAX; } ut64 got_offset = Elf_(r_bin_elf_v2p_new) (bin, got_addr); if (got_offset == UT64_MAX) { return UT64_MAX; } //XXX HACK ALERT!!!! full relro?? try to fix it //will there always be .plt.got, what would happen if is .got.plt? RBinElfSection *s = get_section_by_name (bin, ".plt.got"); if (Elf_(r_bin_elf_has_relro) (bin) < R_ELF_PART_RELRO || !s) { return UT64_MAX; } ut8 buf[sizeof (Elf_(Addr))] = {0}; ut64 plt_addr = s->offset; ut64 plt_sym_addr; while (plt_addr + 2 + 4 < s->offset + s->size) { /*we try to locate the plt entry that correspond with the relocation since got does not point back to .plt. In this case it has the following form ff253a152000 JMP QWORD [RIP + 0x20153A] 6690 NOP ---- ff25ec9f0408 JMP DWORD [reloc.puts_236] plt_addr + 2 to remove jmp opcode and get the imm reading 4 and if RIP (plt_addr + 6) + imm == rel->offset return plt_addr, that will be our sym addr perhaps this hack doesn't work on 32 bits */ int res = r_buf_read_at (bin->b, plt_addr + 2, buf, sizeof (ut32)); if (res < 0) { return UT64_MAX; } size_t i = 0; plt_sym_addr = R_BIN_ELF_READWORD (buf, i); //relative address if ((plt_addr + 6 + Elf_(r_bin_elf_v2p) (bin, plt_sym_addr)) == rel->rva) { return plt_addr; } if (plt_sym_addr == rel->rva) { return plt_addr; } plt_addr += 8; } return UT64_MAX; } static ut64 get_import_addr_x86(ELFOBJ *bin, RBinElfReloc *rel) { ut64 tmp = get_got_entry (bin, rel); if (tmp == UT64_MAX) { return get_import_addr_x86_manual (bin, rel); } RBinElfSection *pltsec_section = get_section_by_name (bin, ".plt.sec"); if (pltsec_section) { ut64 got_addr = bin->dyn_info.dt_pltgot; ut64 pos = COMPUTE_PLTGOT_POSITION (rel, got_addr, 0x3); return pltsec_section->rva + pos * X86_PLT_ENTRY_SIZE; } return tmp + X86_OFFSET_PLT_ENTRY_FROM_GOT_ADDR; } static ut64 get_import_addr(ELFOBJ *bin, int sym) { if ((!bin->shdr || !bin->strtab) && !bin->phdr) { return UT64_MAX; } if (!bin->rel_cache) { return UT64_MAX; } // lookup the right rel/rela entry RBinElfReloc *rel = ht_up_find (bin->rel_cache, sym, NULL); if (!rel) { return UT64_MAX; } switch (bin->ehdr.e_machine) { case EM_ARM: case EM_AARCH64: return get_import_addr_arm (bin, rel); case EM_MIPS: // MIPS32 BIG ENDIAN relocs return get_import_addr_mips (bin, rel); case EM_VAX: // as beautiful as riscv <3 return get_import_addr_riscv (bin, rel); case EM_RISCV: return get_import_addr_riscv (bin, rel); case EM_SPARC: case EM_SPARCV9: case EM_SPARC32PLUS: return get_import_addr_sparc (bin, rel); case EM_PPC: case EM_PPC64: return get_import_addr_ppc (bin, rel); case EM_386: case EM_X86_64: return get_import_addr_x86 (bin, rel); case EM_LOONGARCH: return get_import_addr_loongarch(bin, rel); default: eprintf ("Unsupported relocs type %" PFMT64u " for arch %d\n", (ut64) rel->type, bin->ehdr.e_machine); return UT64_MAX; } } int Elf_(r_bin_elf_has_nx)(ELFOBJ *bin) { r_return_val_if_fail (bin, 0); int i; if (bin && bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_GNU_STACK) { return (!(bin->phdr[i].p_flags & 1))? 1: 0; } } } return 0; } int Elf_(r_bin_elf_has_relro)(ELFOBJ *bin) { r_return_val_if_fail (bin, R_ELF_NO_RELRO); bool haveBindNow = false; bool haveGnuRelro = false; if (bin->dyn_info.dt_bind_now) { haveBindNow = true; } else if (bin->dyn_info.dt_flags != R_BIN_ELF_XWORD_MAX && bin->dyn_info.dt_flags != R_BIN_ELF_XWORD_MAX) { haveBindNow = bin->dyn_info.dt_flags_1 & DF_1_NOW; } if (bin->phdr) { size_t i; for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_GNU_RELRO) { haveGnuRelro = true; break; } } } if (haveGnuRelro) { if (haveBindNow) { return R_ELF_FULL_RELRO; } return R_ELF_PART_RELRO; } return R_ELF_NO_RELRO; } /* To compute the base address, one determines the memory address associated with the lowest p_vaddr value for a PT_LOAD segment. One then obtains the base address by truncating the memory address to the nearest multiple of the maximum page size */ ut64 Elf_(r_bin_elf_get_baddr)(ELFOBJ *bin) { ut64 tmp, base = UT64_MAX; if (!bin) { return 0; } if (bin->phdr) { size_t i; for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_LOAD) { tmp = (ut64)bin->phdr[i].p_vaddr & ELF_PAGE_MASK; tmp = tmp - (tmp % (1 << ELF_PAGE_SIZE)); if (tmp < base) { base = tmp; } } } } if (base == UT64_MAX && is_bin_etrel (bin)) { //we return our own base address for ET_REL type //we act as a loader for ELF return 0x08000000; } return base == UT64_MAX? 0: base; } ut64 Elf_(r_bin_elf_get_boffset)(ELFOBJ *bin) { ut64 tmp, base = UT64_MAX; r_return_val_if_fail (bin, 0); if (!bin->phdr) { return 0; // TODO: should return ut64.max } size_t i; for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_LOAD) { tmp = (ut64)bin->phdr[i].p_offset & ELF_PAGE_MASK; tmp = tmp - (tmp % (1 << ELF_PAGE_SIZE)); if (tmp < base) { base = tmp; } } } return base == UT64_MAX? 0: base; } ut64 Elf_(r_bin_elf_get_init_offset)(ELFOBJ *bin) { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); ut8 buf[128]; if (!bin || entry == UT64_MAX) { return UT64_MAX; } if (r_buf_read_at (bin->b, entry + 16, buf, sizeof (buf)) < 1) { R_LOG_DEBUG ("read (init_offset)"); return 0; } if (buf[0] == 0x68) { // push // x86 only ut64 addr; memmove (buf, buf + 1, 4); addr = (ut64)r_read_le32 (buf); return Elf_(r_bin_elf_v2p) (bin, addr); } return 0; } ut64 Elf_(r_bin_elf_get_fini_offset)(ELFOBJ *bin) { r_return_val_if_fail (bin, UT64_MAX); ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); if (entry == UT64_MAX) { return UT64_MAX; } ut8 buf[512]; if (r_buf_read_at (bin->b, entry + 11, buf, sizeof (buf)) == -1) { R_LOG_ERROR ("read (get_fini)"); return 0; } if (*buf == 0x68) { // push // x86/32 only memmove (buf, buf + 1, 4); ut64 addr = (ut64)r_read_le32 (buf); return Elf_(r_bin_elf_v2p) (bin, addr); } return 0; } ut64 Elf_(r_bin_elf_get_entry_offset)(ELFOBJ *bin) { r_return_val_if_fail (bin, UT64_MAX); ut64 entry = bin->ehdr.e_entry; if (!entry) { if (!Elf_(r_bin_elf_is_executable) (bin)) { return UT64_MAX; } entry = Elf_(r_bin_elf_get_section_offset)(bin, ".init.text"); if (entry != UT64_MAX) { return entry; } entry = Elf_(r_bin_elf_get_section_offset)(bin, ".text"); if (entry != UT64_MAX) { return entry; } return Elf_(r_bin_elf_get_section_offset)(bin, ".init"); } return Elf_(r_bin_elf_v2p) (bin, entry); } static ut64 getmainsymbol(ELFOBJ *bin) { struct r_bin_elf_symbol_t *symbol = Elf_(r_bin_elf_get_symbols) (bin); if (symbol) { size_t i; for (i = 0; !symbol[i].last; i++) { if (!strcmp (symbol[i].name, "main")) { return symbol[i].offset; } } } return UT64_MAX; } ut64 Elf_(r_bin_elf_get_main_offset)(ELFOBJ *bin) { r_return_val_if_fail (bin, UT64_MAX); ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); if (entry == UT64_MAX) { return UT64_MAX; } ut8 buf[256]; if (entry > bin->size || (entry + sizeof (buf)) > bin->size) { return UT64_MAX; } // unnecessary to read 512 bytes imho if (r_buf_read_at (bin->b, entry, buf, sizeof (buf)) < 1) { R_LOG_ERROR ("read (main)"); return UT64_MAX; } // ARM64 if (buf[0x18 + 3] == 0x58 && buf[0x2f] == 0x00) { ut32 entry_vaddr = Elf_(r_bin_elf_p2v) (bin, entry); ut32 main_addr = r_read_le32 (&buf[0x30]); if ((main_addr >> 16) == (entry_vaddr >> 16)) { return Elf_(r_bin_elf_v2p) (bin, main_addr); } } // TODO: Use arch to identify arch before memcmp's // ARM Glibc if (entry & 1) { int delta = 0; /* thumb entry points */ if (!memcmp (buf, "\xf0\x00\x0b\x4f\xf0\x00\x0e\x02\xbc\x6a\x46", 11)) { /* newer versions of gcc use push/pop */ delta = 0x28; } else if (!memcmp (buf, "\xf0\x00\x0b\x4f\xf0\x00\x0e\x5d\xf8\x04\x1b", 11)) { /* older versions of gcc (4.5.x) use ldr/str */ delta = 0x30; } if (delta) { ut64 pa = Elf_(r_bin_elf_v2p) (bin, r_read_le32 (&buf[delta-1]) & ~1); if (pa < r_buf_size (bin->b)) { return pa; } } } else { /* non-thumb entry points */ if (!memcmp (buf, "\x00\xb0\xa0\xe3\x00\xe0\xa0\xe3", 8)) { return Elf_(r_bin_elf_v2p) (bin, r_read_le32 (&buf[0x34]) & ~1); } if (!memcmp (buf, "\x24\xc0\x9f\xe5\x00\xb0\xa0\xe3", 8)) { return Elf_(r_bin_elf_v2p) (bin, r_read_le32 (&buf[0x30]) & ~1); } } // MIPS /* get .got, calculate offset of main symbol */ if (!memcmp (buf, "\x21\x00\xe0\x03\x01\x00\x11\x04", 8)) { /* assuming the startup code looks like got = gp-0x7ff0 got[index__libc_start_main] ( got[index_main] ); looking for the instruction generating the first argument to find main lw a0, offset(gp) */ ut64 got_offset; if ((got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got")) != -1 || (got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got.plt")) != -1) { const ut64 gp = got_offset + 0x7ff0; size_t i, len = sizeof (buf) / sizeof (buf[0]); for (i = 0; i < len; i += 4) { const ut32 instr = r_read_le32 (&buf[i]); if ((instr & 0xffff0000) == 0x8f840000) { // lw a0, offset(gp) const short delta = instr & 0x0000ffff; r_buf_read_at (bin->b, /* got_entry_offset = */ gp + delta, buf, 4); return Elf_(r_bin_elf_v2p) (bin, r_read_le32 (&buf[0])); } } } return 0; } // X86-CGC if (buf[0] == 0xe8 && !memcmp (buf + 5, "\x50\xe8\x00\x00\x00\x00\xb8\x01\x00\x00\x00\x53", 12)) { size_t SIZEOF_CALL = 5; ut64 rel_addr = (ut64)((int)(buf[1] + (buf[2] << 8) + (buf[3] << 16) + (buf[4] << 24))); ut64 addr = Elf_(r_bin_elf_p2v)(bin, entry + SIZEOF_CALL); addr += rel_addr; return Elf_(r_bin_elf_v2p) (bin, addr); } // X86-PIE if (buf[0x00] == 0x48 && buf[0x1e] == 0x8d && buf[0x11] == 0xe8) { ut32 *pmain = (ut32*)(buf + 0x30); ut64 vmain = Elf_(r_bin_elf_p2v) (bin, (ut64)*pmain); ut64 ventry = Elf_(r_bin_elf_p2v) (bin, entry); if (vmain >> 16 == ventry >> 16) { return (ut64)vmain; } } // X86-PIE if (buf[0x1d] == 0x48 && buf[0x1e] == 0x8b) { if (!memcmp (buf, "\x31\xed\x49\x89", 4)) {// linux ut64 maddr, baddr; ut8 n32s[sizeof (ut32)] = {0}; maddr = entry + 0x24 + r_read_le32 (buf + 0x20); if (r_buf_read_at (bin->b, maddr, n32s, sizeof (ut32)) == -1) { R_LOG_ERROR ("read (maddr) 2"); return 0; } maddr = (ut64)r_read_le32 (&n32s[0]); baddr = (bin->ehdr.e_entry >> 16) << 16; if (bin->phdr) { baddr = Elf_(r_bin_elf_get_baddr) (bin); } maddr += baddr; return maddr; } } // X86-NONPIE #if R_BIN_ELF64 if (!memcmp (buf, "\x49\x89\xd9", 3) && buf[156] == 0xe8) { // openbsd return r_read_le32 (&buf[157]) + entry + 156 + 5; } if (!memcmp (buf+29, "\x48\xc7\xc7", 3)) { // linux ut64 addr = (ut64)r_read_le32 (&buf[29 + 3]); return Elf_(r_bin_elf_v2p) (bin, addr); } #else if (buf[23] == '\x68') { ut64 addr = (ut64)r_read_le32 (&buf[23 + 1]); return Elf_(r_bin_elf_v2p) (bin, addr); } #endif /* linux64 pie main -- probably buggy in some cases */ int bo = 29; // Begin offset may vary depending on the entry prelude // endbr64 - fedora bins have this if (buf[0] == 0xf3 && buf[1] == 0x0f && buf[2] == 0x1e && buf[3] == 0xfa) { // Change begin offset if binary starts with 'endbr64' bo = 33; } if (buf[bo] == 0x48) { ut8 ch = buf[bo + 1]; if (ch == 0x8d) { // lea rdi, qword [rip-0x21c4] ut8 *p = buf + bo + 3; st32 maindelta = (st32)r_read_le32 (p); ut64 vmain = (ut64)(entry + bo + maindelta) + 7; ut64 ventry = Elf_(r_bin_elf_p2v) (bin, entry); if ((vmain >> 16) == (ventry >> 16)) { return (ut64)vmain; } } else if (ch == 0xc7) { // mov rdi, 0xADDR ut8 *p = buf + bo + 3; return (ut64)(ut32)r_read_le32 (p); } } /* find sym.main if possible */ { ut64 m = getmainsymbol (bin); if (m != UT64_MAX) { return m; } } return UT64_MAX; } bool Elf_(r_bin_elf_get_stripped)(ELFOBJ *bin) { if (!bin->shdr) { return false; } if (bin->g_sections) { size_t i; for (i = 0; !bin->g_sections[i].last; i++) { if (!strcmp (bin->g_sections[i].name, ".gnu_debugdata")) { return false; } } } size_t i; for (i = 0; i < bin->ehdr.e_shnum; i++) { if (bin->shdr[i].sh_type == SHT_SYMTAB) { return false; } } return true; } char *Elf_(r_bin_elf_intrp)(ELFOBJ *bin) { int i; if (!bin || !bin->phdr) { return NULL; } for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_INTERP) { ut64 addr = bin->phdr[i].p_offset; int sz = bin->phdr[i].p_filesz; sdb_num_set (bin->kv, "elf_header.intrp_addr", addr, 0); sdb_num_set (bin->kv, "elf_header.intrp_size", sz, 0); if (sz < 1 || sz > r_buf_size (bin->b)) { return NULL; } char *str = malloc (sz + 1); if (!str) { return NULL; } if (r_buf_read_at (bin->b, addr, (ut8*)str, sz) < 1) { R_LOG_ERROR ("read (main)"); free (str); return 0; } str[sz] = 0; sdb_set (bin->kv, "elf_header.intrp", str, 0); return str; } } return NULL; } bool Elf_(r_bin_elf_is_static)(ELFOBJ *bin) { size_t i; if (!bin->phdr) { return false; } for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_INTERP || bin->phdr[i].p_type == PT_DYNAMIC) { return false; } } return true; } char* Elf_(r_bin_elf_get_data_encoding)(ELFOBJ *bin) { switch (bin->ehdr.e_ident[EI_DATA]) { case ELFDATANONE: return strdup ("none"); case ELFDATA2LSB: return strdup ("2's complement, little endian"); case ELFDATA2MSB: return strdup ("2's complement, big endian"); default: return r_str_newf ("<unknown: %x>", bin->ehdr.e_ident[EI_DATA]); } } int Elf_(r_bin_elf_has_va)(ELFOBJ *bin) { return true; } char* Elf_(r_bin_elf_get_arch)(ELFOBJ *bin) { switch (bin->ehdr.e_machine) { case EM_ARC: case EM_ARC_A5: return strdup ("arc"); case EM_AVR: return strdup ("avr"); case EM_BA2_NON_STANDARD: case EM_BA2: return strdup ("ba2"); case EM_CRIS: return strdup ("cris"); case EM_68K: return strdup ("m68k"); case EM_MIPS: case EM_MIPS_RS3_LE: case EM_MIPS_X: return strdup ("mips"); case EM_MCST_ELBRUS: return strdup ("elbrus"); case EM_TRICORE: return strdup ("tricore"); case EM_RCE: return strdup ("mcore"); case EM_ARM: case EM_AARCH64: return strdup ("arm"); case EM_QDSP6: // EM_HEXAGON return strdup ("hexagon"); case EM_BLACKFIN: return strdup ("blackfin"); case EM_SPARC: case EM_SPARC32PLUS: case EM_SPARCV9: return strdup ("sparc"); case EM_PPC: case EM_PPC64: return strdup ("ppc"); case EM_PARISC: return strdup ("hppa"); case EM_PROPELLER: return strdup ("propeller"); case EM_MICROBLAZE: return strdup ("microblaze.gnu"); case EM_RISCV: return strdup ("riscv"); case EM_VAX: return strdup ("vax"); case EM_XTENSA: return strdup ("xtensa"); case EM_LANAI: return strdup ("lanai"); case EM_VIDEOCORE3: case EM_VIDEOCORE4: return strdup ("vc4"); case EM_MSP430: return strdup ("msp430"); case EM_SH: return strdup ("sh"); case EM_V800: return strdup ("v850"); case EM_V850: return strdup ("v850"); case EM_IA_64: return strdup ("ia64"); case EM_S390: return strdup ("s390"); case EM_KVX: return strdup("kvx"); case EM_LOONGARCH: return strdup ("loongarch"); default: return strdup ("x86"); } } char* Elf_(r_bin_elf_get_abi)(ELFOBJ *bin) { Elf_(Ehdr)* ehdr = (Elf_(Ehdr) *) &bin->ehdr; if (ehdr->e_machine == EM_MIPS) { if (is_elfclass64 (ehdr)) { return strdup ("n64"); } if (is_mips_n32 (ehdr)) { return strdup ("n32"); } if (is_mips_o32 (ehdr)) { return strdup ("o32"); } } return NULL; } char* Elf_(r_bin_elf_get_cpu)(ELFOBJ *bin) { if (bin->phdr && bin->ehdr.e_machine == EM_MIPS) { const ut32 mipsType = bin->ehdr.e_flags & EF_MIPS_ARCH; switch (mipsType) { case EF_MIPS_ARCH_1: return strdup ("mips1"); case EF_MIPS_ARCH_2: return strdup ("mips2"); case EF_MIPS_ARCH_3: return strdup ("mips3"); case EF_MIPS_ARCH_4: return strdup ("mips4"); case EF_MIPS_ARCH_5: return strdup ("mips5"); case EF_MIPS_ARCH_32: return strdup ("mips32"); case EF_MIPS_ARCH_64: return strdup ("mips64"); case EF_MIPS_ARCH_32R2: return strdup ("mips32r2"); case EF_MIPS_ARCH_64R2: return strdup ("mips64r2"); default : return strdup (" Unknown mips ISA"); } } return NULL; } char* Elf_(r_bin_elf_get_head_flag)(ELFOBJ *bin) { char *head_flag = NULL; char *str = Elf_(r_bin_elf_get_cpu) (bin); if (str) { head_flag = r_str_append (head_flag, str); free (str); } str = Elf_(r_bin_elf_get_abi) (bin); if (str) { head_flag = r_str_appendf (head_flag, " %s", str); free (str); } if (R_STR_ISEMPTY (head_flag)) { head_flag = r_str_append (head_flag, "unknown_flag"); } return head_flag; } // http://www.sco.com/developers/gabi/latest/ch4.eheader.html char* Elf_(r_bin_elf_get_machine_name)(ELFOBJ *bin) { switch (bin->ehdr.e_machine) { case EM_NONE: return strdup ("No machine"); case EM_M32: return strdup ("AT&T WE 32100"); case EM_SPARC: return strdup ("SUN SPARC"); case EM_386: return strdup ("Intel 80386"); case EM_68K: return strdup ("Motorola m68k family"); case EM_88K: return strdup ("Motorola m88k family"); case EM_860: return strdup ("Intel 80860"); case EM_MIPS: return strdup ("MIPS R3000"); case EM_S370: return strdup ("IBM System/370"); case EM_MIPS_RS3_LE: return strdup ("MIPS R3000 little-endian"); case EM_PARISC: return strdup ("HPPA"); case EM_VPP500: return strdup ("Fujitsu VPP500"); case EM_SPARC32PLUS: return strdup ("Sun's \"v8plus\""); case EM_960: return strdup ("Intel 80960"); case EM_PPC: return strdup ("PowerPC"); case EM_PPC64: return strdup ("PowerPC 64-bit"); case EM_S390: return strdup ("IBM S390"); case EM_V800: return strdup ("NEC V800 series"); case EM_FR20: return strdup ("Fujitsu FR20"); case EM_RH32: return strdup ("TRW RH-32"); case EM_RCE: return strdup ("Motorola RCE"); case EM_ARM: return strdup ("ARM"); case EM_BLACKFIN: return strdup ("Analog Devices Blackfin"); case EM_FAKE_ALPHA: return strdup ("Digital Alpha"); case EM_SH: return strdup ("Hitachi SH"); case EM_SPARCV9: return strdup ("SPARC v9 64-bit"); case EM_TRICORE: return strdup ("Siemens Tricore"); case EM_ARC: return strdup ("Argonaut RISC Core"); case EM_H8_300: return strdup ("Hitachi H8/300"); case EM_H8_300H: return strdup ("Hitachi H8/300H"); case EM_H8S: return strdup ("Hitachi H8S"); case EM_H8_500: return strdup ("Hitachi H8/500"); case EM_IA_64: return strdup ("Intel Merced"); case EM_MIPS_X: return strdup ("Stanford MIPS-X"); case EM_COLDFIRE: return strdup ("Motorola Coldfire"); case EM_68HC12: return strdup ("Motorola M68HC12"); case EM_MMA: return strdup ("Fujitsu MMA Multimedia Accelerator"); case EM_PCP: return strdup ("Siemens PCP"); case EM_NCPU: return strdup ("Sony nCPU embeeded RISC"); case EM_NDR1: return strdup ("Denso NDR1 microprocessor"); case EM_STARCORE: return strdup ("Motorola Start*Core processor"); case EM_ME16: return strdup ("Toyota ME16 processor"); case EM_ST100: return strdup ("STMicroelectronic ST100 processor"); case EM_TINYJ: return strdup ("Advanced Logic Corp. Tinyj emb.fam"); case EM_X86_64: return strdup ("AMD x86-64 architecture"); case EM_LANAI: return strdup ("32bit LANAI architecture"); case EM_PDSP: return strdup ("Sony DSP Processor"); case EM_PDP10: return strdup ("Digital Equipment Corp. PDP-10"); case EM_PDP11: return strdup ("Digital Equipment Corp. PDP-11"); case EM_FX66: return strdup ("Siemens FX66 microcontroller"); case EM_ST9PLUS: return strdup ("STMicroelectronics ST9+ 8/16 mc"); case EM_ST7: return strdup ("STmicroelectronics ST7 8 bit mc"); case EM_68HC16: return strdup ("Motorola MC68HC16 microcontroller"); case EM_68HC11: return strdup ("Motorola MC68HC11 microcontroller"); case EM_68HC08: return strdup ("Motorola MC68HC08 microcontroller"); case EM_68HC05: return strdup ("Motorola MC68HC05 microcontroller"); case EM_SVX: return strdup ("Silicon Graphics SVx"); case EM_ST19: return strdup ("STMicroelectronics ST19 8 bit mc"); case EM_VAX: return strdup ("Digital VAX"); case EM_CRIS: return strdup ("Axis Communications 32-bit embedded processor"); case EM_JAVELIN: return strdup ("Infineon Technologies 32-bit embedded processor"); case EM_FIREPATH: return strdup ("Element 14 64-bit DSP Processor"); case EM_ZSP: return strdup ("LSI Logic 16-bit DSP Processor"); case EM_MMIX: return strdup ("Donald Knuth's educational 64-bit processor"); case EM_HUANY: return strdup ("Harvard University machine-independent object files"); case EM_PRISM: return strdup ("SiTera Prism"); case EM_AVR: return strdup ("Atmel AVR 8-bit microcontroller"); case EM_FR30: return strdup ("Fujitsu FR30"); case EM_D10V: return strdup ("Mitsubishi D10V"); case EM_D30V: return strdup ("Mitsubishi D30V"); case EM_V850: return strdup ("NEC v850"); case EM_M32R: return strdup ("Mitsubishi M32R"); case EM_MN10300: return strdup ("Matsushita MN10300"); case EM_MN10200: return strdup ("Matsushita MN10200"); case EM_PJ: return strdup ("picoJava"); case EM_OPENRISC: return strdup ("OpenRISC 32-bit embedded processor"); case EM_ARC_A5: return strdup ("ARC Cores Tangent-A5"); case EM_XTENSA: return strdup ("Tensilica Xtensa Architecture"); case EM_AARCH64: return strdup ("ARM aarch64"); case EM_PROPELLER: return strdup ("Parallax Propeller"); case EM_MICROBLAZE: return strdup ("Xilinx MicroBlaze"); case EM_RISCV: return strdup ("RISC V"); case EM_VIDEOCORE3: return strdup ("VideoCore III"); case EM_VIDEOCORE4: return strdup ("VideoCore IV"); case EM_LATTICEMICO32: return strdup ("RISC processor for Lattice FPGA architecture"); case EM_SE_C17: return strdup ("Seiko Epson C17 family"); case EM_TI_C6000: return strdup ("The Texas Instruments TMS320C6000 DSP family"); case EM_TI_C2000: return strdup ("The Texas Instruments TMS320C2000 DSP family"); case EM_TI_C5500: return strdup ("The Texas Instruments TMS320C55x DSP family"); case EM_TI_ARP32: return strdup ("Texas Instruments Application Specific RISC Processor, 32bit fetch"); case EM_TI_PRU: return strdup ("Texas Instruments Programmable Realtime Unit"); case EM_MMDSP_PLUS: return strdup ("STMicroelectronics 64bit VLIW Data Signal Processor"); case EM_CYPRESS_M8C: return strdup ("Cypress M8C microprocessor"); case EM_R32C: return strdup ("Renesas R32C series microprocessors"); case EM_TRIMEDIA: return strdup ("NXP Semiconductors TriMedia architecture family"); case EM_QDSP6: return strdup ("QUALCOMM DSP6 Processor"); // Nonstandard case EM_8051: return strdup ("Intel 8051 and variants"); case EM_STXP7X: return strdup ("STMicroelectronics STxP7x family of configurable and extensible RISC processors"); case EM_NDS32: return strdup ("Andes Technology compact code size embedded RISC processor family"); case EM_ECOG1: return strdup ("Cyan Technology eCOG1X family"); // case EM_ECOG1X: return strdup ("Cyan Technology eCOG1X family"); // Nonstandard case EM_MAXQ30: return strdup ("Dallas Semiconductor MAXQ30 Core Micro-controllers"); case EM_XIMO16: return strdup ("New Japan Radio (NJR) 16-bit DSP Processor"); case EM_MANIK: return strdup ("M2000 Reconfigurable RISC Microprocessor"); case EM_CRAYNV2: return strdup ("Cray Inc. NV2 vector architecture"); case EM_RX: return strdup ("Renesas RX family"); case EM_METAG: return strdup ("Imagination Technologies META processor architecture"); case EM_MCST_ELBRUS: return strdup ("MCST Elbrus general purpose hardware architecture"); case EM_ECOG16: return strdup ("Cyan Technology eCOG16 family"); case EM_CR16: return strdup ("National Semiconductor CompactRISC CR16 16-bit microprocessor"); case EM_ETPU: return strdup ("Freescale Extended Time Processing Unit"); case EM_SLE9X: return strdup ("Infineon Technologies SLE9X core"); case EM_L10M: return strdup ("Intel L10M"); case EM_K10M: return strdup ("Intel K10M"); // case EM_AARCH64: return strdup ("ARM 64-bit architecture (AARCH64)"); // Nonstandard case EM_AVR32: return strdup ("Atmel Corporation 32-bit microprocessor family"); case EM_STM8: return strdup ("STMicroeletronics STM8 8-bit microcontroller"); case EM_TILE64: return strdup ("Tilera TILE64 multicore architecture family"); case EM_TILEPRO: return strdup ("Tilera TILEPro multicore architecture family"); // case EM_MICROBLAZE: return strdup ("Xilinx MicroBlaze 32-bit RISC soft processor core"); // Nonstandard case EM_CUDA: return strdup ("NVIDIA CUDA architecture"); case EM_TILEGX: return strdup ("Tilera TILE-Gx multicore architecture family"); case EM_CLOUDSHIELD: return strdup ("CloudShield architecture family"); case EM_COREA_1ST: return strdup ("KIPO-KAIST Core-A 1st generation processor family"); case EM_COREA_2ND: return strdup ("KIPO-KAIST Core-A 2nd generation processor family"); case EM_ARC_COMPACT2: return strdup ("Synopsys ARCompact V2"); case EM_OPEN8: return strdup ("Open8 8-bit RISC soft processor core"); case EM_RL78: return strdup ("Renesas RL78 family"); case EM_VIDEOCORE5: return strdup ("Broadcom VideoCore V processor"); case EM_78KOR: return strdup ("Renesas 78KOR family"); // case EM_56800EX: return strdup ("Freescale 56800EX Digital Signal Controller (DSC)"); // Nonstandard case EM_BA1: return strdup ("Beyond BA1 CPU architecture"); case EM_BA2_NON_STANDARD: case EM_BA2: return strdup ("Beyond BA2 CPU architecture"); case EM_XCORE: return strdup ("XMOS xCORE processor family"); case EM_MCHP_PIC: return strdup ("Microchip 8-bit PIC(r) family"); case EM_INTEL205: return strdup ("Reserved by Intel"); case EM_INTEL206: return strdup ("Reserved by Intel"); case EM_INTEL207: return strdup ("Reserved by Intel"); case EM_INTEL208: return strdup ("Reserved by Intel"); case EM_INTEL209: return strdup ("Reserved by Intel"); case EM_KM32: return strdup ("KM211 KM32 32-bit processor"); case EM_KMX32: return strdup ("KM211 KMX32 32-bit processor"); case EM_KMX16: return strdup ("KM211 KMX16 16-bit processor"); case EM_KMX8: return strdup ("KM211 KMX8 8-bit processor"); case EM_KVARC: return strdup ("KM211 KVARC processor"); case EM_CDP: return strdup ("Paneve CDP architecture family"); case EM_COGE: return strdup ("Cognitive Smart Memory Processor"); case EM_COOL: return strdup ("Bluechip Systems CoolEngine"); case EM_NORC: return strdup ("Nanoradio Optimized RISC"); case EM_CSR_KALIMBA: return strdup ("CSR Kalimba architecture family"); case EM_Z80: return strdup ("Zilog Z80"); case EM_VISIUM: return strdup ("Controls and Data Services VISIUMcore processor"); case EM_FT32: return strdup ("FTDI Chip FT32 high performance 32-bit RISC architecture"); case EM_MOXIE: return strdup ("Moxie processor family"); case EM_AMDGPU: return strdup ("AMD GPU architecture"); case EM_LOONGARCH: return strdup ("Loongson Loongarch"); default: return r_str_newf ("<unknown>: 0x%x", bin->ehdr.e_machine); } } char* Elf_(r_bin_elf_get_file_type)(ELFOBJ *bin) { r_return_val_if_fail (bin, NULL); ut32 e_type = (ut32)bin->ehdr.e_type; // cast to avoid warn in iphone-gcc, must be ut16 switch (e_type) { case ET_NONE: return strdup ("NONE (None)"); case ET_REL: return strdup ("REL (Relocatable file)"); case ET_EXEC: return strdup ("EXEC (Executable file)"); case ET_DYN: return strdup ("DYN (Shared object file)"); case ET_CORE: return strdup ("CORE (Core file)"); } if ((e_type >= ET_LOPROC) && (e_type <= ET_HIPROC)) { return r_str_newf ("Processor Specific: %x", e_type); } if ((e_type >= ET_LOOS) && (e_type <= ET_HIOS)) { return r_str_newf ("OS Specific: %x", e_type); } return r_str_newf ("<unknown>: %x", e_type); } char* Elf_(r_bin_elf_get_elf_class)(ELFOBJ *bin) { switch (bin->ehdr.e_ident[EI_CLASS]) { case ELFCLASSNONE: return strdup ("none"); case ELFCLASS32: return strdup ("ELF32"); case ELFCLASS64: return strdup ("ELF64"); default: return r_str_newf ("<unknown: %x>", bin->ehdr.e_ident[EI_CLASS]); } } int Elf_(r_bin_elf_get_bits)(ELFOBJ *bin) { /* Hack for ARCompact */ if (bin->ehdr.e_machine == EM_ARC_A5) { return 16; } /* Hack for Ps2 */ if (bin->phdr && bin->ehdr.e_machine == EM_MIPS) { const ut32 mipsType = bin->ehdr.e_flags & EF_MIPS_ARCH; if (bin->ehdr.e_type == ET_EXEC) { int i; bool haveInterp = false; for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_INTERP) { haveInterp = true; } } if (!haveInterp && mipsType == EF_MIPS_ARCH_3) { // Playstation2 Hack return 64; } } // TODO: show this specific asm.cpu somewhere in bininfo (mips1, mips2, mips3, mips32r2, ...) switch (mipsType) { case EF_MIPS_ARCH_1: case EF_MIPS_ARCH_2: case EF_MIPS_ARCH_3: case EF_MIPS_ARCH_4: case EF_MIPS_ARCH_5: case EF_MIPS_ARCH_32: return 32; case EF_MIPS_ARCH_64: return 64; case EF_MIPS_ARCH_32R2: return 32; case EF_MIPS_ARCH_64R2: return 64; break; } return 32; } /* Hack for Thumb */ if (bin->ehdr.e_machine == EM_ARM) { if (bin->ehdr.e_type != ET_EXEC) { struct r_bin_elf_symbol_t *symbol; if ((symbol = Elf_(r_bin_elf_get_symbols) (bin))) { int i = 0; for (i = 0; !symbol[i].last; i++) { ut64 paddr = symbol[i].offset; if (paddr & 1) { return 16; } } } } ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); if (entry & 1) { return 16; } } switch (bin->ehdr.e_ident[EI_CLASS]) { case ELFCLASS32: return 32; case ELFCLASS64: return 64; case ELFCLASSNONE: default: return 32; // defaults } } static inline int noodle(ELFOBJ *bin, const char *s) { if (r_buf_size (bin->b) >= 64) { ut8 tmp[64] = {0}; if (r_buf_read_at (bin->b, r_buf_size (bin->b) - 64, tmp, 64) == 64) { return (bool)r_mem_mem (tmp, 64, (const ut8 *)s, strlen (s)); } } return false; } static inline bool needle(ELFOBJ *bin, const char *s) { if (bin->shstrtab) { ut32 len = bin->shstrtab_size; if (len > 4096) { len = 4096; // avoid slow loading .. can be buggy? } return (bool)r_mem_mem ((const ut8*)bin->shstrtab, len, (const ut8*)s, strlen (s)); } return false; } // TODO: must return const char * all those strings must be const char os[LINUX] or so char* Elf_(r_bin_elf_get_osabi_name)(ELFOBJ *bin) { size_t i; size_t num = bin->ehdr.e_shnum; const char *section_name = NULL; switch (bin->ehdr.e_ident[EI_OSABI]) { case ELFOSABI_LINUX: return strdup("linux"); case ELFOSABI_SOLARIS: return strdup("solaris"); case ELFOSABI_FREEBSD: return strdup("freebsd"); case ELFOSABI_HPUX: return strdup("hpux"); } if (bin->shdr && bin->shstrtab) { for (i = 0; i < num; i++) { if (bin->shdr[i].sh_type == SHT_NOTE && bin->shdr[i].sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[bin->shdr[i].sh_name]; if (!strcmp (section_name, ".note.openbsd.ident")) { return strdup ("openbsd"); } if (!strcmp (section_name, ".note.minix.ident")) { return strdup ("minix"); } if (!strcmp (section_name, ".note.netbsd.ident")) { return strdup ("netbsd"); } if (!strcmp (section_name, ".note.android.ident")) { return strdup ("android"); } } } } /* Hack to identify OS */ if (needle (bin, "freebsd")) { return strdup ("freebsd"); } if (noodle (bin, "BEOS:APP_VERSION")) { return strdup ("beos"); } if (needle (bin, "GNU")) { return strdup ("linux"); } return strdup ("linux"); } ut8 *Elf_(r_bin_elf_grab_regstate)(ELFOBJ *bin, int *len) { if (bin->phdr) { size_t i; int num = bin->ehdr.e_phnum; for (i = 0; i < num; i++) { if (bin->phdr[i].p_type != PT_NOTE) { continue; } int bits = Elf_(r_bin_elf_get_bits)(bin); int elf_nhdr_size = (bits == 64) ? sizeof (Elf64_Nhdr) : sizeof (Elf32_Nhdr); void *elf_nhdr = calloc (elf_nhdr_size, 1); bool regs_found = false; ut64 offset = 0; while (!regs_found) { ut32 n_descsz, n_namesz, n_type; int ret; ret = r_buf_read_at (bin->b, bin->phdr[i].p_offset + offset, elf_nhdr, elf_nhdr_size); if (ret != elf_nhdr_size) { R_LOG_DEBUG ("Cannot read NOTES hdr from CORE file"); free (elf_nhdr); return NULL; } if (bits == 64) { n_descsz = round_up (((Elf64_Nhdr *)elf_nhdr)->n_descsz); n_namesz = round_up (((Elf64_Nhdr *)elf_nhdr)->n_namesz); n_type = ((Elf64_Nhdr *)elf_nhdr)->n_type; } else { n_descsz = round_up (((Elf32_Nhdr *)elf_nhdr)->n_descsz); n_namesz = round_up (((Elf32_Nhdr *)elf_nhdr)->n_namesz); n_type = ((Elf32_Nhdr *)elf_nhdr)->n_type; } if (n_type == NT_PRSTATUS) { regs_found = true; free (elf_nhdr); } else { offset += elf_nhdr_size + n_descsz + n_namesz; } } int regdelta = 0; int regsize = 0; switch (bin->ehdr.e_machine) { case EM_AARCH64: regsize = reginf[AARCH64].regsize; regdelta = reginf[AARCH64].regdelta; break; case EM_ARM: regsize = reginf[ARM].regsize; regdelta = reginf[ARM].regdelta; break; case EM_386: regsize = reginf[X86].regsize; regdelta = reginf[X86].regdelta; break; case EM_X86_64: regsize = reginf[X86_64].regsize; regdelta = reginf[X86_64].regdelta; break; } ut8 *buf = malloc (regsize); if (r_buf_read_at (bin->b, bin->phdr[i].p_offset + offset + regdelta, buf, regsize) != regsize) { free (buf); R_LOG_DEBUG ("Cannot read register state from CORE file"); return NULL; } if (len) { *len = regsize; } return buf; } } R_LOG_DEBUG ("Cannot find NOTE section."); return NULL; } int Elf_(r_bin_elf_is_big_endian)(ELFOBJ *bin) { return (bin->ehdr.e_ident[EI_DATA] == ELFDATA2MSB); } /* XXX Init dt_strtab? */ char *Elf_(r_bin_elf_get_rpath)(ELFOBJ *bin) { r_return_val_if_fail (bin, NULL); Elf_(Xword) val; if (!bin->phdr || !bin->strtab) { return NULL; } if (bin->dyn_info.dt_rpath != R_BIN_ELF_XWORD_MAX) { val = bin->dyn_info.dt_rpath; } else if (bin->dyn_info.dt_runpath != R_BIN_ELF_XWORD_MAX) { val = bin->dyn_info.dt_runpath; } else { return NULL; } if (val > bin->strtab_size) { return NULL; } return r_str_ndup (bin->strtab + val, ELF_STRING_LENGTH); } static bool has_valid_section_header(ELFOBJ *bin, size_t pos) { return bin->g_sections[pos].info < bin->ehdr.e_shnum && bin->shdr; } static void fix_rva_and_offset_relocable_file(ELFOBJ *bin, RBinElfReloc *r, size_t pos) { if (has_valid_section_header (bin, pos)) { size_t idx = bin->g_sections[pos].info; if (idx < bin->ehdr.e_shnum) { ut64 pa = bin->shdr[idx].sh_offset + r->offset; r->offset = pa; r->rva = Elf_(r_bin_elf_p2v) (bin, pa); } else { eprintf ("fix_rva_and_offset_reloc..: invalid index\n"); } } else { r->rva = r->offset; } } static void fix_rva_and_offset_exec_file(ELFOBJ *bin, RBinElfReloc *r) { r->rva = r->offset; r->offset = Elf_(r_bin_elf_v2p) (bin, r->offset); } static void fix_rva_and_offset(ELFOBJ *bin, RBinElfReloc *r, size_t pos) { if (is_bin_etrel (bin)) { fix_rva_and_offset_relocable_file (bin, r, pos); } else { fix_rva_and_offset_exec_file (bin, r); } } static bool read_reloc(ELFOBJ *bin, RBinElfReloc *r, Elf_(Xword) rel_mode, ut64 vaddr) { ut64 offset = Elf_(r_bin_elf_v2p_new) (bin, vaddr); if (offset == UT64_MAX) { return false; } size_t size_struct = get_size_rel_mode (rel_mode); ut8 buf[sizeof (Elf_(Rela))] = {0}; int res = r_buf_read_at (bin->b, offset, buf, size_struct); if (res != size_struct) { return false; } size_t i = 0; Elf_(Rela) reloc_info; reloc_info.r_offset = R_BIN_ELF_READWORD (buf, i); reloc_info.r_info = R_BIN_ELF_READWORD (buf, i); if (rel_mode == DT_RELA) { reloc_info.r_addend = R_BIN_ELF_READWORD (buf, i); r->addend = reloc_info.r_addend; } r->mode = rel_mode; r->last = 0; r->offset = reloc_info.r_offset; r->sym = ELF_R_SYM (reloc_info.r_info); r->type = ELF_R_TYPE (reloc_info.r_info); return true; } static size_t get_num_relocs_dynamic(ELFOBJ *bin) { size_t res = 0; if (bin->dyn_info.dt_relaent) { res += bin->dyn_info.dt_relasz / bin->dyn_info.dt_relaent; } if (bin->dyn_info.dt_relent) { res += bin->dyn_info.dt_relsz / bin->dyn_info.dt_relent; } return res + get_num_relocs_dynamic_plt (bin); } static bool sectionIsValid(ELFOBJ *bin, RBinElfSection *sect) { return (sect->offset + sect->size <= bin->size); } static Elf_(Xword) get_section_mode(ELFOBJ *bin, size_t pos) { if (r_str_startswith (bin->g_sections[pos].name, ".rela.")) { return DT_RELA; } if (r_str_startswith (bin->g_sections[pos].name, ".rel.")) { return DT_REL; } return 0; } static bool is_reloc_section(Elf_(Xword) rel_mode) { return rel_mode == DT_REL || rel_mode == DT_RELA; } static size_t get_num_relocs_sections(ELFOBJ *bin) { size_t i, size, ret = 0; Elf_(Xword) rel_mode; if (!bin->g_sections) { return 0; } for (i = 0; !bin->g_sections[i].last; i++) { if (!sectionIsValid (bin, &bin->g_sections[i])) { continue; } rel_mode = get_section_mode (bin, i); if (!is_reloc_section (rel_mode)) { continue; } size = get_size_rel_mode (rel_mode); ret += NUMENTRIES_ROUNDUP (bin->g_sections[i].size, size); } return ret; } static size_t get_num_relocs_approx(ELFOBJ *bin) { size_t total = get_num_relocs_dynamic (bin) + get_num_relocs_sections (bin); if (total > bin->size) { return bin->size / 2; } return total; } static size_t populate_relocs_record_from_dynamic(ELFOBJ *bin, RBinElfReloc *relocs, size_t pos, size_t num_relocs) { size_t offset; size_t size = get_size_rel_mode (bin->dyn_info.dt_pltrel); for (offset = 0; offset < bin->dyn_info.dt_pltrelsz && pos < num_relocs; offset += size, pos++) { if (!read_reloc (bin, relocs + pos, bin->dyn_info.dt_pltrel, bin->dyn_info.dt_jmprel + offset)) { break; } fix_rva_and_offset_exec_file (bin, relocs + pos); } for (offset = 0; offset < bin->dyn_info.dt_relasz && pos < num_relocs; offset += bin->dyn_info.dt_relaent, pos++) { if (!read_reloc (bin, relocs + pos, DT_RELA, bin->dyn_info.dt_rela + offset)) { break; } fix_rva_and_offset_exec_file (bin, relocs + pos); } for (offset = 0; offset < bin->dyn_info.dt_relsz && pos < num_relocs; offset += bin->dyn_info.dt_relent, pos++) { if (!read_reloc (bin, relocs + pos, DT_REL, bin->dyn_info.dt_rel + offset)) { break; } fix_rva_and_offset_exec_file (bin, relocs + pos); } return pos; } static size_t get_next_not_analysed_offset(ELFOBJ *bin, size_t section_vaddr, size_t offset) { size_t gvaddr = section_vaddr + offset; if (bin->dyn_info.dt_rela != R_BIN_ELF_ADDR_MAX && bin->dyn_info.dt_rela <= gvaddr && gvaddr < bin->dyn_info.dt_rela + bin->dyn_info.dt_relasz) { return bin->dyn_info.dt_rela + bin->dyn_info.dt_relasz - section_vaddr; } if (bin->dyn_info.dt_rel != R_BIN_ELF_ADDR_MAX && bin->dyn_info.dt_rel <= gvaddr && gvaddr < bin->dyn_info.dt_rel + bin->dyn_info.dt_relsz) { return bin->dyn_info.dt_rel + bin->dyn_info.dt_relsz - section_vaddr; } if (bin->dyn_info.dt_jmprel != R_BIN_ELF_ADDR_MAX && bin->dyn_info.dt_jmprel <= gvaddr && gvaddr < bin->dyn_info.dt_jmprel + bin->dyn_info.dt_pltrelsz) { return bin->dyn_info.dt_jmprel + bin->dyn_info.dt_pltrelsz - section_vaddr; } return offset; } static size_t populate_relocs_record_from_section(ELFOBJ *bin, RBinElfReloc *relocs, size_t pos, size_t num_relocs) { size_t size, i, j; Elf_(Xword) rel_mode; if (!bin->g_sections) { return pos; } for (i = 0; !bin->g_sections[i].last; i++) { rel_mode = get_section_mode (bin, i); if (!is_reloc_section (rel_mode) || bin->g_sections[i].size > bin->size || bin->g_sections[i].offset > bin->size) { continue; } size = get_size_rel_mode (rel_mode); for (j = get_next_not_analysed_offset (bin, bin->g_sections[i].rva, 0); j < bin->g_sections[i].size && pos < num_relocs; j = get_next_not_analysed_offset (bin, bin->g_sections[i].rva, j + size)) { if (!read_reloc (bin, relocs + pos, rel_mode, bin->g_sections[i].rva + j)) { break; } fix_rva_and_offset (bin, relocs + pos, i); pos++; } } return pos; } static RBinElfReloc *populate_relocs_record(ELFOBJ *bin) { size_t i = 0; size_t num_relocs = get_num_relocs_approx (bin); RBinElfReloc *relocs = R_NEWS0 (RBinElfReloc, num_relocs + 1); if (!relocs) { // In case we can't allocate enough memory for all the claimed // relocation entries, try to parse only the ones specified in // the dynamic segment. num_relocs = get_num_relocs_dynamic (bin); relocs = R_NEWS0 (RBinElfReloc, num_relocs + 1); if (!relocs) { return NULL; } } i = populate_relocs_record_from_dynamic (bin, relocs, i, num_relocs); i = populate_relocs_record_from_section (bin, relocs, i, num_relocs); relocs[i].last = 1; bin->g_reloc_num = i; return relocs; } RBinElfReloc* Elf_(r_bin_elf_get_relocs) (ELFOBJ *bin) { if (!bin) { return NULL; } if (!bin->g_relocs) { bin->g_relocs = populate_relocs_record (bin); } return bin->g_relocs; } RBinElfLib* Elf_(r_bin_elf_get_libs)(ELFOBJ *bin) { RBinElfLib *ret = NULL; Elf_(Off) *it = NULL; size_t k = 0; if (!bin || !bin->phdr || !bin->strtab || *(bin->strtab+1) == '0') { return NULL; } r_vector_foreach(&bin->dyn_info.dt_needed, it) { Elf_(Off) val = *it; RBinElfLib *r = realloc (ret, (k + 1) * sizeof (RBinElfLib)); if (!r) { r_sys_perror ("realloc (libs)"); free (ret); return NULL; } ret = r; if (val > bin->strtab_size) { free (ret); return NULL; } strncpy (ret[k].name, bin->strtab + val, ELF_STRING_LENGTH); ret[k].name[ELF_STRING_LENGTH - 1] = '\0'; ret[k].last = 0; if (ret[k].name[0]) { k++; } } RBinElfLib *r = realloc (ret, (k + 1) * sizeof (RBinElfLib)); if (!r) { r_sys_perror ("realloc (libs)"); free (ret); return NULL; } ret = r; ret[k].last = 1; return ret; } static void create_section_from_phdr(ELFOBJ *bin, RBinElfSection *ret, size_t *i, const char *name, ut64 addr, ut64 sz) { r_return_if_fail (bin && ret && i); if (!addr || addr == UT64_MAX) { return; } ret[*i].offset = Elf_(r_bin_elf_v2p_new) (bin, addr); ret[*i].rva = addr; ret[*i].size = sz; r_str_ncpy (ret[*i].name, name, R_ARRAY_SIZE (ret[*i].name) - 1); ret[*i].last = 0; *i = *i + 1; } static RBinElfSection *get_sections_from_phdr(ELFOBJ *bin) { RBinElfSection *ret; size_t num_sections = 0; ut64 reldyn = 0, relava = 0, pltgotva = 0, relva = 0; ut64 reldynsz = 0, relasz = 0, pltgotsz = 0; r_return_val_if_fail (bin && bin->phdr, NULL); if (!bin->ehdr.e_phnum) { return NULL; } if (bin->dyn_info.dt_rel != R_BIN_ELF_ADDR_MAX) { reldyn = bin->dyn_info.dt_rel; num_sections++; } if (bin->dyn_info.dt_rela != R_BIN_ELF_ADDR_MAX) { relva = bin->dyn_info.dt_rela; num_sections++; } if (bin->dyn_info.dt_relsz) { reldynsz = bin->dyn_info.dt_relsz; } if (bin->dyn_info.dt_relasz) { relasz = bin->dyn_info.dt_relasz; } if (bin->dyn_info.dt_pltgot != R_BIN_ELF_ADDR_MAX) { pltgotva = bin->dyn_info.dt_pltgot; num_sections++; } if (bin->dyn_info.dt_pltrelsz) { pltgotsz = bin->dyn_info.dt_pltrelsz; } if (bin->dyn_info.dt_jmprel != R_BIN_ELF_ADDR_MAX) { relava = bin->dyn_info.dt_jmprel; num_sections++; } ret = calloc (num_sections + 1, sizeof (RBinElfSection)); if (!ret) { return NULL; } size_t i = 0; create_section_from_phdr (bin, ret, &i, ".rel.dyn", reldyn, reldynsz); create_section_from_phdr (bin, ret, &i, ".rela.plt", relava, pltgotsz); create_section_from_phdr (bin, ret, &i, ".rel.plt", relva, relasz); create_section_from_phdr (bin, ret, &i, ".got.plt", pltgotva, pltgotsz); ret[i].last = 1; return ret; } RBinElfSection* Elf_(r_bin_elf_get_sections)(ELFOBJ *bin) { RBinElfSection *ret = NULL; char unknown_s[32], invalid_s[32]; int i, nidx, unknown_c=0, invalid_c=0; r_return_val_if_fail (bin, NULL); if (bin->g_sections) { return bin->g_sections; } if (!bin->shdr && bin->phdr) { //we don't give up search in phdr section return get_sections_from_phdr (bin); } if (!bin->shdr) { return NULL; } ut32 count = bin->ehdr.e_shnum; if (!(ret = calloc ((count + 1), sizeof (RBinElfSection)))) { return NULL; } for (i = 0; i < count; i++) { ret[i].offset = bin->shdr[i].sh_offset; ret[i].size = bin->shdr[i].sh_size; ret[i].align = bin->shdr[i].sh_addralign; ret[i].flags = bin->shdr[i].sh_flags; ret[i].link = bin->shdr[i].sh_link; ret[i].info = bin->shdr[i].sh_info; ret[i].type = bin->shdr[i].sh_type; if (is_bin_etrel (bin)) { ret[i].rva = bin->baddr + bin->shdr[i].sh_offset; } else { ret[i].rva = bin->shdr[i].sh_addr; } const int SHNAME = (int)bin->shdr[i].sh_name; const int SHSIZE = (int)bin->shstrtab_size; nidx = SHNAME; if (nidx < 0 || !bin->shstrtab_section || !bin->shstrtab_size || nidx > bin->shstrtab_size) { snprintf (invalid_s, sizeof (invalid_s), "invalid%d", invalid_c); strncpy (ret[i].name, invalid_s, sizeof (ret[i].name) - 1); invalid_c++; } else if (bin->shstrtab && (SHNAME > 0) && (SHNAME < SHSIZE)) { strncpy (ret[i].name, &bin->shstrtab[SHNAME], sizeof (ret[i].name) - 1); } else if (bin->shdr[i].sh_type == SHT_NULL) { //to follow the same behaviour as readelf ret[i].name[0] = '\0'; } else { snprintf (unknown_s, sizeof (unknown_s), "unknown%d", unknown_c); strncpy (ret[i].name, unknown_s, sizeof (ret[i].name) - 1); unknown_c++; } ret[i].name[ELF_STRING_LENGTH - 1] = '\0'; ret[i].last = 0; } ret[i].last = 1; return ret; } static bool is_special_arm_symbol(ELFOBJ *bin, Elf_(Sym) *sym, const char *name) { if (name[0] != '$') { return false; } switch (name[1]) { case 'a': case 't': case 'd': case 'x': return (name[2] == '\0' || name[2] == '.') && ELF_ST_TYPE (sym->st_info) == STT_NOTYPE && ELF_ST_BIND (sym->st_info) == STB_LOCAL && ELF_ST_VISIBILITY (sym->st_info) == STV_DEFAULT; default: return false; } } static bool is_special_symbol(ELFOBJ *bin, Elf_(Sym) *sym, const char *name) { switch (bin->ehdr.e_machine) { case EM_ARM: case EM_AARCH64: return is_special_arm_symbol (bin, sym, name); default: return false; } } static const char *bind2str(Elf_(Sym) *sym) { switch (ELF_ST_BIND (sym->st_info)) { case STB_LOCAL: return R_BIN_BIND_LOCAL_STR; case STB_GLOBAL: return R_BIN_BIND_GLOBAL_STR; case STB_WEAK: return R_BIN_BIND_WEAK_STR; case STB_NUM: return R_BIN_BIND_NUM_STR; case STB_LOOS: return R_BIN_BIND_LOOS_STR; case STB_HIOS: return R_BIN_BIND_HIOS_STR; case STB_LOPROC: return R_BIN_BIND_LOPROC_STR; case STB_HIPROC: return R_BIN_BIND_HIPROC_STR; default: return R_BIN_BIND_UNKNOWN_STR; } } static const char *type2str(ELFOBJ *bin, struct r_bin_elf_symbol_t *ret, Elf_(Sym) *sym) { if (bin && ret && is_special_symbol (bin, sym, ret->name)) { return R_BIN_TYPE_SPECIAL_SYM_STR; } switch (ELF_ST_TYPE (sym->st_info)) { case STT_NOTYPE: return R_BIN_TYPE_NOTYPE_STR; case STT_OBJECT: return R_BIN_TYPE_OBJECT_STR; case STT_FUNC: return R_BIN_TYPE_FUNC_STR; case STT_SECTION: return R_BIN_TYPE_SECTION_STR; case STT_FILE: return R_BIN_TYPE_FILE_STR; case STT_COMMON: return R_BIN_TYPE_COMMON_STR; case STT_TLS: return R_BIN_TYPE_TLS_STR; case STT_NUM: return R_BIN_TYPE_NUM_STR; case STT_LOOS: return R_BIN_TYPE_LOOS_STR; case STT_HIOS: return R_BIN_TYPE_HIOS_STR; case STT_LOPROC: return R_BIN_TYPE_LOPROC_STR; case STT_HIPROC: return R_BIN_TYPE_HIPROC_STR; default: return R_BIN_TYPE_UNKNOWN_STR; } } static void fill_symbol_bind_and_type(ELFOBJ *bin, struct r_bin_elf_symbol_t *ret, Elf_(Sym) *sym) { ret->bind = bind2str (sym); ret->type = type2str (bin, ret, sym); } static RBinElfSymbol* get_symbols_from_phdr(ELFOBJ *bin, int type) { Elf_(Sym) *sym = NULL; Elf_(Addr) addr_sym_table = 0; ut8 s[sizeof (Elf_(Sym))] = {0}; RBinElfSymbol *ret = NULL; int i, r, tsize, nsym, ret_ctr; ut64 toffset = 0, tmp_offset; ut32 size, sym_size = 0; if (!bin || !bin->phdr || !bin->ehdr.e_phnum) { return NULL; } if (bin->dyn_info.dt_symtab == R_BIN_ELF_ADDR_MAX || !bin->dyn_info.dt_syment) { return NULL; } addr_sym_table = Elf_(r_bin_elf_v2p) (bin, bin->dyn_info.dt_symtab); sym_size = bin->dyn_info.dt_syment; if (!sym_size) { goto beach; } //since ELF doesn't specify the symbol table size we may read until the end of the buffer nsym = (bin->size - addr_sym_table) / sym_size; if (!UT32_MUL (&size, nsym, sizeof (Elf_ (Sym)))) { goto beach; } if (size < 1) { goto beach; } if (addr_sym_table > bin->size || addr_sym_table + size > bin->size) { goto beach; } if (nsym < 1) { return NULL; } // we reserve room for 4096 and grow as needed. size_t capacity1 = 4096; size_t capacity2 = 4096; sym = (Elf_(Sym)*) calloc (capacity1, sym_size); ret = (RBinElfSymbol *) calloc (capacity2, sizeof (struct r_bin_elf_symbol_t)); if (!sym || !ret) { goto beach; } for (i = 1, ret_ctr = 0; i < nsym; i++) { if (i >= capacity1) { // maybe grow // You take what you want, but you eat what you take. Elf_(Sym)* temp_sym = (Elf_(Sym)*) realloc (sym, (size_t)(capacity1 * GROWTH_FACTOR) * sym_size); if (!temp_sym) { goto beach; } sym = temp_sym; capacity1 = (size_t)(capacity1 * GROWTH_FACTOR); } if (ret_ctr >= capacity2) { // maybe grow RBinElfSymbol *temp_ret = realloc (ret, (size_t)(capacity2 * GROWTH_FACTOR) * sizeof (struct r_bin_elf_symbol_t)); if (!temp_ret) { goto beach; } ret = temp_ret; capacity2 = (size_t)(capacity2 * GROWTH_FACTOR); } // read in one entry r = r_buf_read_at (bin->b, addr_sym_table + i * sizeof (Elf_ (Sym)), s, sizeof (Elf_ (Sym))); if (r < 1) { goto beach; } int j = 0; #if R_BIN_ELF64 sym[i].st_name = READ32 (s, j); sym[i].st_info = READ8 (s, j); sym[i].st_other = READ8 (s, j); sym[i].st_shndx = READ16 (s, j); sym[i].st_value = READ64 (s, j); sym[i].st_size = READ64 (s, j); #else sym[i].st_name = READ32 (s, j); sym[i].st_value = READ32 (s, j); sym[i].st_size = READ32 (s, j); sym[i].st_info = READ8 (s, j); sym[i].st_other = READ8 (s, j); sym[i].st_shndx = READ16 (s, j); #endif bool is_sht_null = false; bool is_vaddr = false; // zero symbol is always empty // Examine entry and maybe store if (type == R_BIN_ELF_IMPORT_SYMBOLS && sym[i].st_shndx == SHT_NULL) { if (sym[i].st_value) { toffset = sym[i].st_value; } else if ((toffset = get_import_addr (bin, i)) == -1){ toffset = 0; } tsize = 16; } else if (type == R_BIN_ELF_ALL_SYMBOLS) { tsize = sym[i].st_size; toffset = (ut64) sym[i].st_value; is_sht_null = sym[i].st_shndx == SHT_NULL; } else { continue; } // since we don't know the size of the sym table in this case, // let's stop at the first invalid entry if (!strcmp (bind2str (&sym[i]), R_BIN_BIND_UNKNOWN_STR) || !strcmp (type2str (NULL, NULL, &sym[i]), R_BIN_TYPE_UNKNOWN_STR)) { goto done; } tmp_offset = Elf_(r_bin_elf_v2p_new) (bin, toffset); if (tmp_offset == UT64_MAX) { tmp_offset = toffset; is_vaddr = true; } if (sym[i].st_name + 2 > bin->strtab_size) { // Since we are reading beyond the symbol table what's happening // is that some entry is trying to dereference the strtab beyond its capacity // is not a symbol so is the end goto done; } ret[ret_ctr].offset = tmp_offset; ret[ret_ctr].size = tsize; { int rest = ELF_STRING_LENGTH - 1; int st_name = sym[i].st_name; int maxsize = R_MIN (bin->size, bin->strtab_size); if (st_name < 0 || st_name >= maxsize) { ret[ret_ctr].name[0] = 0; } else { const int len = __strnlen (bin->strtab + st_name, rest); memcpy (ret[ret_ctr].name, &bin->strtab[st_name], len); } } ret[ret_ctr].ordinal = i; ret[ret_ctr].in_shdr = false; ret[ret_ctr].name[ELF_STRING_LENGTH - 2] = '\0'; fill_symbol_bind_and_type (bin, &ret[ret_ctr], &sym[i]); ret[ret_ctr].is_sht_null = is_sht_null; ret[ret_ctr].is_vaddr = is_vaddr; ret[ret_ctr].last = 0; ret_ctr++; } done: // Size everything down to only what is used { nsym = i > 0? i: 1; Elf_(Sym) *temp_sym = (Elf_(Sym) *)realloc (sym, (size_t)(nsym * GROWTH_FACTOR) * sym_size); if (!temp_sym) { goto beach; } sym = temp_sym; } { ret_ctr = ret_ctr > 0? ret_ctr: 1; RBinElfSymbol *p = (RBinElfSymbol *)realloc (ret, (ret_ctr + 1) * sizeof (RBinElfSymbol)); if (!p) { goto beach; } ret = p; } ret[ret_ctr].last = 1; if (type == R_BIN_ELF_IMPORT_SYMBOLS && !bin->imports_by_ord_size) { bin->imports_by_ord_size = ret_ctr + 1; if (ret_ctr > 0) { bin->imports_by_ord = (RBinImport * *) calloc (ret_ctr + 1, sizeof (RBinImport*)); } else { bin->imports_by_ord = NULL; } } else if (type == R_BIN_ELF_ALL_SYMBOLS && !bin->symbols_by_ord_size && ret_ctr) { bin->symbols_by_ord_size = ret_ctr + 1; if (ret_ctr > 0) { bin->symbols_by_ord = (RBinSymbol **) calloc (ret_ctr + 1, sizeof (RBinSymbol*)); } else { bin->symbols_by_ord = NULL; } } free (sym); return ret; beach: free (sym); free (ret); return NULL; } static RBinElfSymbol *Elf_(r_bin_elf_get_phdr_symbols)(ELFOBJ *bin) { if (!bin) { return NULL; } if (bin->phdr_symbols) { return bin->phdr_symbols; } bin->phdr_symbols = get_symbols_from_phdr (bin, R_BIN_ELF_ALL_SYMBOLS); return bin->phdr_symbols; } static RBinElfSymbol *Elf_(r_bin_elf_get_phdr_imports)(ELFOBJ *bin) { r_return_val_if_fail (bin, NULL); if (!bin->phdr_imports) { bin->phdr_imports = get_symbols_from_phdr (bin, R_BIN_ELF_IMPORT_SYMBOLS); } return bin->phdr_imports; } static RBinElfSymbol *Elf_(get_phdr_symbols)(ELFOBJ *bin, int type) { return (type != R_BIN_ELF_IMPORT_SYMBOLS) ? Elf_(r_bin_elf_get_phdr_symbols) (bin) : Elf_(r_bin_elf_get_phdr_imports) (bin); } static int Elf_(fix_symbols)(ELFOBJ *bin, int nsym, int type, RBinElfSymbol **sym) { int count = 0; int result = -1; RBinElfSymbol *ret = *sym; RBinElfSymbol *phdr_symbols = Elf_(get_phdr_symbols) (bin, type); RBinElfSymbol *tmp, *p; HtUP *phd_offset_map = ht_up_new0 (); HtUP *phd_ordinal_map = ht_up_new0 (); if (phdr_symbols) { RBinElfSymbol *d = ret; while (!d->last) { ht_up_insert (phd_offset_map, d->offset, d); ht_up_insert (phd_ordinal_map, d->ordinal, d); d++; } p = phdr_symbols; while (!p->last) { /* find match in phdr */ d = ht_up_find (phd_offset_map, p->offset, NULL); if (!d) { d = ht_up_find (phd_ordinal_map, p->ordinal, NULL); } if (d) { p->in_shdr = true; if (*p->name && *d->name && r_str_startswith (d->name, "$")) { strcpy (d->name, p->name); } } p++; } p = phdr_symbols; while (!p->last) { if (!p->in_shdr) { count++; } p++; } /*Take those symbols that are not present in the shdr but yes in phdr*/ /*This should only should happen with fucked up binaries*/ if (count > 0) { /*what happens if a shdr says it has only one symbol? we should look anyway into phdr*/ tmp = (RBinElfSymbol*)realloc (ret, (nsym + count + 1) * sizeof (RBinElfSymbol)); if (!tmp) { result = -1; goto done; } ret = tmp; ret[nsym--].last = 0; p = phdr_symbols; while (!p->last) { if (!p->in_shdr) { memcpy (&ret[++nsym], p, sizeof (RBinElfSymbol)); } p++; } ret[nsym + 1].last = 1; } *sym = ret; result = nsym + 1; goto done; } result = nsym; done: ht_up_free (phd_offset_map); ht_up_free (phd_ordinal_map); return result; } static bool is_section_local_sym(ELFOBJ *bin, Elf_(Sym) *sym) { if (sym->st_name != 0) { return false; } if (ELF_ST_TYPE (sym->st_info) != STT_SECTION) { return false; } if (ELF_ST_BIND (sym->st_info) != STB_LOCAL) { return false; } if (!is_shidx_valid (bin, sym->st_shndx)) { return false; } Elf_(Word) sh_name = bin->shdr[sym->st_shndx].sh_name; return bin->shstrtab && sh_name < bin->shstrtab_size; } static void setsymord(ELFOBJ* eobj, ut32 ord, RBinSymbol *ptr) { if (!eobj->symbols_by_ord || ord >= eobj->symbols_by_ord_size) { return; } r_bin_symbol_free (eobj->symbols_by_ord[ord]); eobj->symbols_by_ord[ord] = ptr; } static void _set_arm_thumb_bits(struct Elf_(r_bin_elf_obj_t) *bin, RBinSymbol **sym) { int bin_bits = Elf_(r_bin_elf_get_bits) (bin); RBinSymbol *ptr = *sym; int len = strlen (ptr->name); if (ptr->name[0] == '$' && (len >= 2 && !ptr->name[2])) { switch (ptr->name[1]) { case 'a' : //arm ptr->bits = 32; break; case 't': //thumb ptr->bits = 16; if (ptr->vaddr & 1) { ptr->vaddr--; } if (ptr->paddr & 1) { ptr->paddr--; } break; case 'd': //data break; default: goto arm_symbol; } } else { arm_symbol: ptr->bits = bin_bits; if (bin_bits != 64) { ptr->bits = 32; if (ptr->paddr != UT64_MAX) { if (ptr->vaddr & 1) { ptr->vaddr--; ptr->bits = 16; } if (ptr->paddr & 1) { ptr->paddr--; ptr->bits = 16; } } } } } RBinSymbol *Elf_(_r_bin_elf_convert_symbol)(struct Elf_(r_bin_elf_obj_t) *bin, struct r_bin_elf_symbol_t *symbol, const char *namefmt) { ut64 paddr, vaddr; RBinSymbol *ptr = NULL; if (symbol->is_vaddr) { paddr = UT64_MAX; vaddr = symbol->offset; } else { paddr = symbol->offset; vaddr = Elf_(r_bin_elf_p2v_new) (bin, paddr); } if (!(ptr = R_NEW0 (RBinSymbol))) { return NULL; } ptr->name = symbol->name[0] ? r_str_newf (namefmt, &symbol->name[0]) : strdup (""); ptr->forwarder = "NONE"; ptr->bind = symbol->bind; ptr->type = symbol->type; ptr->is_imported = symbol->is_imported; ptr->paddr = paddr; ptr->vaddr = vaddr; ptr->size = symbol->size; ptr->ordinal = symbol->ordinal; // detect thumb if (bin->ehdr.e_machine == EM_ARM && *ptr->name) { _set_arm_thumb_bits (bin, &ptr); } return ptr; } static ut32 hashRBinElfSymbol(const void *obj) { const RBinElfSymbol *symbol = (const RBinElfSymbol *)obj; if (!symbol || !*symbol->name) { return 0; } int hash = sdb_hash (symbol->name); hash ^= sdb_hash (symbol->type); hash ^= (symbol->offset >> 32); hash ^= (symbol->offset & 0xffffffff); return hash; } static int cmp_RBinElfSymbol(const RBinElfSymbol *a, const RBinElfSymbol *b) { if (a->offset != b->offset) { return 1; } int result = strcmp (a->name, b->name); if (result != 0) { return result; } return strcmp (a->type, b->type); } static RBinElfSymbol* parse_gnu_debugdata(ELFOBJ *bin, size_t *ret_size) { if (ret_size) { *ret_size = 0; } if (bin->g_sections) { size_t i; for (i = 0; !bin->g_sections[i].last; i++) { if (!strcmp (bin->g_sections[i].name, ".gnu_debugdata")) { ut64 addr = bin->g_sections[i].offset; ut64 size = bin->g_sections[i].size; if (size < 10) { return false; } ut8 *data = malloc (size + 1); if (r_buf_read_at (bin->b, addr, data, size) == -1) { eprintf ("Cannot read%c\n", 10); } size_t osize; ut8 *odata = r_sys_unxz (data, size, &osize); if (odata) { RBuffer *newelf = r_buf_new_with_pointers (odata, osize, false); ELFOBJ* newobj = Elf_(r_bin_elf_new_buf) (newelf, false); RBinElfSymbol *symbol = NULL; if (newobj) { symbol = Elf_(r_bin_elf_get_symbols) (newobj); newobj->g_symbols = NULL; Elf_(r_bin_elf_free)(newobj); } r_buf_free (newelf); free (odata); if (ret_size) { *ret_size = i; } free (data); return symbol; } free (data); return NULL; } } } return NULL; } // TODO: return RList<RBinSymbol*> .. or run a callback with that symbol constructed, so we don't have to do it twice static RBinElfSymbol* Elf_(_r_bin_elf_get_symbols_imports)(ELFOBJ *bin, int type) { ut32 shdr_size; int tsize, nsym, ret_ctr = 0, i, j, r, k, newsize; ut64 toffset; ut32 size = 0; RBinElfSymbol *ret = NULL, *import_ret = NULL; RBinSymbol *import_sym_ptr = NULL; size_t ret_size = 0, prev_ret_size = 0, import_ret_ctr = 0; Elf_(Shdr) *strtab_section = NULL; Elf_(Sym) *sym = NULL; ut8 s[sizeof (Elf_(Sym))] = {0}; char *strtab = NULL; HtPP *symbol_map = NULL; HtPPOptions symbol_map_options = { .cmp = (HtPPListComparator)cmp_RBinElfSymbol, .hashfn = hashRBinElfSymbol, .dupkey = NULL, .calcsizeK = NULL, .calcsizeV = NULL, .freefn = NULL, .elem_size = sizeof (HtPPKv), }; if (!bin || !bin->shdr || !bin->ehdr.e_shnum || bin->ehdr.e_shnum == 0xffff) { return Elf_(get_phdr_symbols) (bin, type); } if (!UT32_MUL (&shdr_size, bin->ehdr.e_shnum, sizeof (Elf_(Shdr)))) { return NULL; } if (shdr_size + 8 > bin->size) { return NULL; } RBinElfSymbol *dbgsyms = parse_gnu_debugdata (bin, &ret_size); if (dbgsyms) { ret = dbgsyms; ret_ctr = ret_size; } else { ret_ctr = 0; ret_size = 0; } for (i = 0; i < bin->ehdr.e_shnum; i++) { if (((type & R_BIN_ELF_SYMTAB_SYMBOLS) && bin->shdr[i].sh_type == SHT_SYMTAB) || ((type & R_BIN_ELF_DYNSYM_SYMBOLS) && bin->shdr[i].sh_type == SHT_DYNSYM)) { if (bin->shdr[i].sh_link < 1) { /* oops. fix out of range pointers */ continue; } // hack to avoid asan cry if ((bin->shdr[i].sh_link * sizeof (Elf_(Shdr))) >= shdr_size) { /* oops. fix out of range pointers */ continue; } strtab_section = &bin->shdr[bin->shdr[i].sh_link]; if (strtab_section->sh_size > ST32_MAX || strtab_section->sh_size+8 > bin->size) { R_LOG_ERROR ("size (syms strtab)"); free (ret); free (strtab); return NULL; } if (!strtab) { if (!(strtab = (char *)calloc (1, 8 + strtab_section->sh_size))) { R_LOG_ERROR ("malloc (syms strtab)"); goto beach; } if (strtab_section->sh_offset > bin->size || strtab_section->sh_offset + strtab_section->sh_size > bin->size) { goto beach; } if (r_buf_read_at (bin->b, strtab_section->sh_offset, (ut8*)strtab, strtab_section->sh_size) == -1) { R_LOG_ERROR ("read (syms strtab)"); goto beach; } } newsize = 1 + bin->shdr[i].sh_size; if (newsize < 0 || newsize > bin->size) { R_LOG_ERROR ("invalid shdr %d size", i); goto beach; } nsym = (int)(bin->shdr[i].sh_size / sizeof (Elf_(Sym))); if (nsym < 0) { goto beach; } { ut64 sh_begin = bin->shdr[i].sh_offset; ut64 sh_end = sh_begin + bin->shdr[i].sh_size; if (sh_begin > bin->size) { goto beach; } if (sh_end > bin->size) { st64 newshsize = bin->size - sh_begin; nsym = (int)(newshsize / sizeof (Elf_(Sym))); } } if (!(sym = (Elf_(Sym) *)calloc (nsym, sizeof (Elf_(Sym))))) { R_LOG_ERROR ("calloc (syms)"); goto beach; } if (!UT32_MUL (&size, nsym, sizeof (Elf_(Sym)))) { goto beach; } if (size < 1 || size > bin->size) { goto beach; } if (bin->shdr[i].sh_offset > bin->size) { goto beach; } if (bin->shdr[i].sh_offset + size > bin->size) { goto beach; } for (j = 0; j < nsym; j++) { int k = 0; r = r_buf_read_at (bin->b, bin->shdr[i].sh_offset + j * sizeof (Elf_(Sym)), s, sizeof (Elf_(Sym))); if (r < 1) { R_LOG_ERROR ("read (sym)"); goto beach; } #if R_BIN_ELF64 sym[j].st_name = READ32 (s, k); sym[j].st_info = READ8 (s, k); sym[j].st_other = READ8 (s, k); sym[j].st_shndx = READ16 (s, k); sym[j].st_value = READ64 (s, k); sym[j].st_size = READ64 (s, k); #else sym[j].st_name = READ32 (s, k); sym[j].st_value = READ32 (s, k); sym[j].st_size = READ32 (s, k); sym[j].st_info = READ8 (s, k); sym[j].st_other = READ8 (s, k); sym[j].st_shndx = READ16 (s, k); #endif } void *rett = realloc (ret, (ret_size + nsym) * sizeof (RBinElfSymbol)); if (!rett) { R_LOG_ERROR ("Cannot allocate %d symbols.", (int)(nsym + ret_size)); goto beach; } ret = rett; memset (ret + ret_size, 0, nsym * sizeof (RBinElfSymbol)); prev_ret_size = ret_size; ret_size += nsym; symbol_map = ht_pp_new_opt (&symbol_map_options); for (k = 0; k < prev_ret_size; k++) { if (ret[k].name[0]) { ht_pp_insert (symbol_map, ret + k, ret + k); } } for (k = 1; k < nsym; k++) { bool is_sht_null = false; bool is_vaddr = false; bool is_imported = false; if (type == R_BIN_ELF_IMPORT_SYMBOLS) { if (sym[k].st_value) { toffset = sym[k].st_value; } else if ((toffset = get_import_addr (bin, k)) == -1) { toffset = 0; } tsize = 16; is_imported = sym[k].st_shndx == STN_UNDEF; } else { tsize = sym[k].st_size; toffset = (ut64)sym[k].st_value; is_sht_null = sym[k].st_shndx == SHT_NULL; } if (is_bin_etrel (bin)) { if (sym[k].st_shndx < bin->ehdr.e_shnum) { ret[ret_ctr].offset = sym[k].st_value + bin->shdr[sym[k].st_shndx].sh_offset; } } else { ret[ret_ctr].offset = Elf_(r_bin_elf_v2p_new) (bin, toffset); if (ret[ret_ctr].offset == UT64_MAX) { ret[ret_ctr].offset = toffset; is_vaddr = true; } } ret[ret_ctr].size = tsize; if (sym[k].st_name + 1 > strtab_section->sh_size) { R_LOG_DEBUG ("index out of strtab range (%"PFMT64d" / %"PFMT64d")\n", (ut64)sym[k].st_name, (ut64)strtab_section->sh_size); continue; } { int st_name = sym[k].st_name; int maxsize = R_MIN (r_buf_size (bin->b), strtab_section->sh_size); if (is_section_local_sym (bin, &sym[k])) { const char *shname = &bin->shstrtab[bin->shdr[sym[k].st_shndx].sh_name]; r_str_ncpy (ret[ret_ctr].name, shname, ELF_STRING_LENGTH - 1); } else if (st_name <= 0 || st_name >= maxsize) { ret[ret_ctr].name[0] = 0; } else { r_str_ncpy (ret[ret_ctr].name, &strtab[st_name], ELF_STRING_LENGTH - 1); ret[ret_ctr].type = type2str (bin, &ret[ret_ctr], &sym[k]); if (ht_pp_find (symbol_map, &ret[ret_ctr], NULL)) { memset (ret + ret_ctr, 0, sizeof (RBinElfSymbol)); continue; } } } ret[ret_ctr].ordinal = k; ret[ret_ctr].name[ELF_STRING_LENGTH - 1] = '\0'; fill_symbol_bind_and_type (bin, &ret[ret_ctr], &sym[k]); ret[ret_ctr].is_sht_null = is_sht_null; ret[ret_ctr].is_vaddr = is_vaddr; ret[ret_ctr].last = 0; ret[ret_ctr].is_imported = is_imported; ret_ctr++; if (type == R_BIN_ELF_IMPORT_SYMBOLS && is_imported) { import_ret_ctr++; } } R_FREE (strtab); R_FREE (sym); ht_pp_free (symbol_map); symbol_map = NULL; if (type == R_BIN_ELF_IMPORT_SYMBOLS) { break; } } } if (!ret) { return Elf_(get_phdr_symbols) (bin, type); } ret[ret_ctr].last = 1; // ugly dirty hack :D int max = -1; RBinElfSymbol *aux = NULL; nsym = Elf_(fix_symbols) (bin, ret_ctr, type, &ret); if (nsym == -1) { goto beach; } // Elf_(fix_symbols) may find additional symbols, some of which could be // imported symbols. Let's reserve additional space for them. r_warn_if_fail (nsym >= ret_ctr); import_ret_ctr += nsym - ret_ctr; aux = ret; while (!aux->last) { if ((int)aux->ordinal > max) { max = aux->ordinal; } aux++; } nsym = max; if (type == R_BIN_ELF_IMPORT_SYMBOLS) { R_FREE (bin->imports_by_ord); bin->imports_by_ord_size = nsym + 1; bin->imports_by_ord = (RBinImport**)calloc (R_MAX (1, nsym + 1), sizeof (RBinImport*)); R_FREE (bin->symbols_by_ord); bin->symbols_by_ord_size = nsym + 1; bin->symbols_by_ord = (RBinSymbol**)calloc (R_MAX (1, nsym + 1), sizeof (RBinSymbol*)); import_ret = calloc (import_ret_ctr + 1, sizeof (RBinElfSymbol)); if (!import_ret) { R_LOG_DEBUG ("Cannot allocate %d symbols", nsym); goto beach; } import_ret_ctr = 0; i = -1; while (!ret[++i].last) { if (!(import_sym_ptr = Elf_(_r_bin_elf_convert_symbol) (bin, &ret[i], "%s"))) { continue; } setsymord (bin, import_sym_ptr->ordinal, import_sym_ptr); if (ret[i].is_imported) { memcpy (&import_ret[import_ret_ctr], &ret[i], sizeof (RBinElfSymbol)); ++import_ret_ctr; } } import_ret[import_ret_ctr].last = 1; R_FREE (ret); return import_ret; } return ret; beach: free (ret); free (sym); free (strtab); ht_pp_free (symbol_map); return NULL; } RBinElfSymbol *Elf_(r_bin_elf_get_symbols)(ELFOBJ *bin) { if (!bin->g_symbols) { bin->g_symbols = Elf_(_r_bin_elf_get_symbols_imports) (bin, R_BIN_ELF_ALL_SYMBOLS); } return bin->g_symbols; } RBinElfSymbol *Elf_(r_bin_elf_get_imports)(ELFOBJ *bin) { if (!bin->g_imports) { bin->g_imports = Elf_(_r_bin_elf_get_symbols_imports) (bin, R_BIN_ELF_IMPORT_SYMBOLS); } return bin->g_imports; } RBinElfField* Elf_(r_bin_elf_get_fields)(ELFOBJ *bin) { RBinElfField *ret = NULL; int i = 0, j; if (!bin || !(ret = calloc ((bin->ehdr.e_phnum + 3 + 1), sizeof (RBinElfField)))) { return NULL; } strncpy (ret[i].name, "ehdr", ELF_STRING_LENGTH); ret[i].offset = 0; ret[i++].last = 0; strncpy (ret[i].name, "shoff", ELF_STRING_LENGTH); ret[i].offset = bin->ehdr.e_shoff; ret[i++].last = 0; strncpy (ret[i].name, "phoff", ELF_STRING_LENGTH); ret[i].offset = bin->ehdr.e_phoff; ret[i++].last = 0; for (j = 0; bin->phdr && j < bin->ehdr.e_phnum; i++, j++) { snprintf (ret[i].name, ELF_STRING_LENGTH, "phdr_%i", j); ret[i].offset = bin->phdr[j].p_offset; ret[i].last = 0; } ret[i].last = 1; return ret; } void Elf_(r_bin_elf_free)(ELFOBJ* bin) { if (!bin) { return; } free (bin->phdr); free (bin->shdr); free (bin->strtab); free (bin->shstrtab); free (bin->dynstr); r_vector_fini (&bin->dyn_info.dt_needed); //free (bin->strtab_section); size_t i; if (bin->imports_by_ord) { for (i = 0; i<bin->imports_by_ord_size; i++) { free (bin->imports_by_ord[i]); } free (bin->imports_by_ord); } if (bin->symbols_by_ord) { for (i = 0; i<bin->symbols_by_ord_size; i++) { r_bin_symbol_free (bin->symbols_by_ord[i]); } free (bin->symbols_by_ord); } r_buf_free (bin->b); if (bin->g_symbols != bin->phdr_symbols) { R_FREE (bin->phdr_symbols); } if (bin->g_imports != bin->phdr_imports) { R_FREE (bin->phdr_imports); } R_FREE (bin->g_sections); R_FREE (bin->g_symbols); R_FREE (bin->g_imports); R_FREE (bin->g_relocs); ht_up_free (bin->rel_cache); bin->rel_cache = NULL; sdb_free (bin->kv); free (bin); } ELFOBJ* Elf_(r_bin_elf_new_buf)(RBuffer *buf, bool verbose) { ELFOBJ *bin = R_NEW0 (ELFOBJ); if (bin) { bin->kv = sdb_new0 (); bin->size = r_buf_size (buf); bin->verbose = verbose; bin->b = r_buf_ref (buf); if (!elf_init (bin)) { Elf_(r_bin_elf_free) (bin); return NULL; } } return bin; } static int is_in_pphdr(Elf_(Phdr) *p, ut64 addr) { return addr >= p->p_offset && addr < p->p_offset + p->p_filesz; } static int is_in_vphdr(Elf_(Phdr) *p, ut64 addr) { return addr >= p->p_vaddr && addr < p->p_vaddr + p->p_filesz; } /* Deprecated temporarily. Use r_bin_elf_p2v_new in new code for now. */ ut64 Elf_(r_bin_elf_p2v) (ELFOBJ *bin, ut64 paddr) { size_t i; r_return_val_if_fail (bin, 0); if (!bin->phdr) { if (is_bin_etrel (bin)) { return bin->baddr + paddr; } return paddr; } for (i = 0; i < bin->ehdr.e_phnum; i++) { Elf_(Phdr) *p = &bin->phdr[i]; if (p->p_type == PT_LOAD && is_in_pphdr (p, paddr)) { if (!p->p_vaddr && !p->p_offset) { continue; } return p->p_vaddr + paddr - p->p_offset; } } return paddr; } /* Deprecated temporarily. Use r_bin_elf_v2p_new in new code for now. */ ut64 Elf_(r_bin_elf_v2p)(ELFOBJ *bin, ut64 vaddr) { r_return_val_if_fail (bin, 0); // UT64_MAX or vaddr? // r_return_val_if_fail (bin, UT64_MAX); if (!bin->phdr) { if (is_bin_etrel (bin)) { return vaddr - bin->baddr; } return vaddr; } size_t i; for (i = 0; i < bin->ehdr.e_phnum; i++) { Elf_(Phdr) *p = &bin->phdr[i]; if (p->p_type == PT_LOAD && is_in_vphdr (p, vaddr)) { if (!p->p_offset && !p->p_vaddr) { continue; } return p->p_offset + vaddr - p->p_vaddr; } } return vaddr; } /* converts a physical address to the virtual address, looking * at the program headers in the binary bin */ ut64 Elf_(r_bin_elf_p2v_new) (ELFOBJ *bin, ut64 paddr) { size_t i; r_return_val_if_fail (bin, UT64_MAX); if (!bin->phdr) { if (is_bin_etrel (bin)) { return bin->baddr + paddr; } return UT64_MAX; } for (i = 0; i < bin->ehdr.e_phnum; i++) { Elf_(Phdr) *p = &bin->phdr[i]; if (p->p_type == PT_LOAD && is_in_pphdr (p, paddr)) { return p->p_vaddr + paddr - p->p_offset; } } return UT64_MAX; } /* converts a virtual address to the relative physical address, looking * at the program headers in the binary bin */ ut64 Elf_(r_bin_elf_v2p_new) (ELFOBJ *bin, ut64 vaddr) { size_t i; r_return_val_if_fail (bin, UT64_MAX); if (!bin->phdr) { if (is_bin_etrel (bin)) { return vaddr - bin->baddr; } return UT64_MAX; } for (i = 0; i < bin->ehdr.e_phnum; i++) { Elf_(Phdr) *p = &bin->phdr[i]; if (p->p_type == PT_LOAD && is_in_vphdr (p, vaddr)) { return p->p_offset + vaddr - p->p_vaddr; } } return UT64_MAX; } static bool get_nt_file_maps(ELFOBJ *bin, RList *core_maps) { ut16 ph, ph_num = bin->ehdr.e_phnum; for (ph = 0; ph < ph_num; ph++) { Elf_(Phdr) *p = &bin->phdr[ph]; if (p->p_type == PT_NOTE) { int bits = Elf_(r_bin_elf_get_bits)(bin); int elf_nhdr_size = (bits == 64) ? sizeof (Elf64_Nhdr) : sizeof (Elf32_Nhdr); int size_of = (bits == 64) ? sizeof (ut64) : sizeof (ut32); void *elf_nhdr = calloc (elf_nhdr_size, 1); ut64 offset = 0; bool found = false; while (!found) { int ret; ut32 n_descsz, n_namesz, n_type; ret = r_buf_read_at (bin->b, bin->phdr[ph].p_offset + offset, elf_nhdr, elf_nhdr_size); if (ret != elf_nhdr_size) { eprintf ("Cannot read more NOTES header from CORE\n"); free (elf_nhdr); goto fail; } if (bits == 64) { n_descsz = round_up (((Elf64_Nhdr *)elf_nhdr)->n_descsz); n_namesz = round_up (((Elf64_Nhdr *)elf_nhdr)->n_namesz); n_type = ((Elf64_Nhdr *)elf_nhdr)->n_type; } else { n_descsz = round_up (((Elf32_Nhdr *)elf_nhdr)->n_descsz); n_namesz = round_up (((Elf32_Nhdr *)elf_nhdr)->n_namesz); n_type = ((Elf32_Nhdr *)elf_nhdr)->n_type; } if (n_type == NT_FILE) { found = true; offset += elf_nhdr_size + n_namesz; free (elf_nhdr); } else { offset += elf_nhdr_size + n_descsz + n_namesz; } } ut64 i = bin->phdr[ph].p_offset + offset; ut64 n_maps; if (bits == 64) { n_maps = BREAD64 (bin->b, i); (void)BREAD64 (bin->b, i); } else { n_maps = BREAD32 (bin->b, i); (void)BREAD32 (bin->b, i); } ut64 jump = ((size_of * 3) * n_maps) + i; int len_str = 0; while (n_maps > 0) { ut64 addr; if (bits == 64) { addr = BREAD64 (bin->b, i); } else { addr = BREAD32 (bin->b, i); } if (addr == UT64_MAX) { break; } char str[512] = {0}; r_buf_read_at (bin->b, jump + len_str, (ut8*)str, sizeof (str) - 1); str[sizeof (str) - 1] = 0; // null terminate string RListIter *iter; RBinMap *p; r_list_foreach (core_maps, iter, p) { if (p->addr == addr) { p->file = strdup (str); } } len_str += strlen (str) + 1; n_maps--; i += (size_of * 2); } } } return true; fail: return false; } static void r_bin_elf_map_free(RBinMap *map) { if (map) { free (map->file); free (map); } } RList *Elf_(r_bin_elf_get_maps)(ELFOBJ *bin) { ut16 ph, ph_num = bin->ehdr.e_phnum; //Skip PT_NOTE if (!bin->phdr) { return NULL; } RList *maps = r_list_newf ((RListFree)r_bin_elf_map_free); for (ph = 0; ph < ph_num; ph++) { Elf_(Phdr) *p = &bin->phdr[ph]; if (p->p_type == PT_LOAD) { RBinMap *map = R_NEW0 (RBinMap); if (map) { map->addr = p->p_vaddr; map->size = p->p_memsz; map->perms = p->p_flags; map->offset = p->p_offset; map->file = NULL; r_list_append (maps, map); } } } if (!r_list_empty (maps)) { if (!get_nt_file_maps (bin, maps)) { eprintf ("Could not retrieve the names of all maps from NT_FILE\n"); } } return maps; } char *Elf_(r_bin_elf_compiler)(ELFOBJ *bin) { RBinElfSection *section = get_section_by_name (bin, ".comment"); if (!section) { return NULL; } ut64 off = section->offset; ut32 sz = R_MIN (section->size, 128); if (sz < 1) { return NULL; } char *buf = malloc (sz + 1); if (!buf) { return NULL; } if (r_buf_read_at (bin->b, off, (ut8*)buf, sz) < 1) { free (buf); return NULL; } buf[sz] = 0; const size_t buflen = strlen (buf); char *nullbyte = buf + buflen; if (buflen != sz && nullbyte[1] && buflen < sz) { nullbyte[0] = ' '; } buf[sz] = 0; r_str_trim (buf); char * res = r_str_escape (buf); free (buf); return res; } bool Elf_(r_bin_elf_is_executable)(ELFOBJ *bin) { const int t = bin->ehdr.e_type; return t == ET_EXEC || t == ET_DYN; }
static int init_shdr(ELFOBJ *bin) { ut32 shdr_size; ut8 shdr[sizeof (Elf_(Shdr))] = {0}; size_t i, j, len; r_return_val_if_fail (bin && !bin->shdr, false); if (!UT32_MUL (&shdr_size, bin->ehdr.e_shnum, sizeof (Elf_(Shdr)))) { return false; } if (shdr_size < 1) { return false; } if (shdr_size > bin->size) { return false; } if (bin->ehdr.e_shoff > bin->size) { return false; } if (bin->ehdr.e_shoff + shdr_size > bin->size) { return false; } if (!(bin->shdr = R_NEWS0 (Elf_(Shdr), bin->ehdr.e_shnum))) { r_sys_perror ("malloc (shdr)"); return false; } sdb_num_set (bin->kv, "elf_shdr.offset", bin->ehdr.e_shoff, 0); sdb_num_set (bin->kv, "elf_shdr.size", sizeof (Elf_(Shdr)), 0); sdb_set (bin->kv, "elf_s_type.cparse", "enum elf_s_type {SHT_NULL=0,SHT_PROGBITS=1," "SHT_SYMTAB=2,SHT_STRTAB=3,SHT_RELA=4,SHT_HASH=5,SHT_DYNAMIC=6,SHT_NOTE=7," "SHT_NOBITS=8,SHT_REL=9,SHT_SHLIB=10,SHT_DYNSYM=11,SHT_LOOS=0x60000000," "SHT_HIOS=0x6fffffff,SHT_LOPROC=0x70000000,SHT_HIPROC=0x7fffffff};", 0); for (i = 0; i < bin->ehdr.e_shnum; i++) { j = 0; len = r_buf_read_at (bin->b, bin->ehdr.e_shoff + i * sizeof (Elf_(Shdr)), shdr, sizeof (Elf_(Shdr))); if (len < 1) { R_LOG_ERROR ("read (shdr) at 0x%" PFMT64x, (ut64) bin->ehdr.e_shoff); R_FREE (bin->shdr); return false; } bin->shdr[i].sh_name = READ32 (shdr, j); bin->shdr[i].sh_type = READ32 (shdr, j); bin->shdr[i].sh_flags = R_BIN_ELF_READWORD (shdr, j); bin->shdr[i].sh_addr = R_BIN_ELF_READWORD (shdr, j); bin->shdr[i].sh_offset = R_BIN_ELF_READWORD (shdr, j); bin->shdr[i].sh_size = R_BIN_ELF_READWORD (shdr, j); bin->shdr[i].sh_link = READ32 (shdr, j); bin->shdr[i].sh_info = READ32 (shdr, j); bin->shdr[i].sh_addralign = R_BIN_ELF_READWORD (shdr, j); bin->shdr[i].sh_entsize = R_BIN_ELF_READWORD (shdr, j); } #if R_BIN_ELF64 sdb_set (bin->kv, "elf_s_flags_64.cparse", "enum elf_s_flags_64 {SF64_None=0,SF64_Exec=1," "SF64_Alloc=2,SF64_Alloc_Exec=3,SF64_Write=4,SF64_Write_Exec=5," "SF64_Write_Alloc=6,SF64_Write_Alloc_Exec=7};", 0); sdb_set (bin->kv, "elf_shdr.format", "x[4]E[8]Eqqqxxqq name (elf_s_type)type" " (elf_s_flags_64)flags addr offset size link info addralign entsize", 0); #else sdb_set (bin->kv, "elf_s_flags_32.cparse", "enum elf_s_flags_32 {SF32_None=0,SF32_Exec=1," "SF32_Alloc=2,SF32_Alloc_Exec=3,SF32_Write=4,SF32_Write_Exec=5," "SF32_Write_Alloc=6,SF32_Write_Alloc_Exec=7};", 0); sdb_set (bin->kv, "elf_shdr.format", "x[4]E[4]Exxxxxxx name (elf_s_type)type" " (elf_s_flags_32)flags addr offset size link info addralign entsize", 0); #endif return true; // Usage example: // > td `k bin/cur/info/elf_s_type.cparse`; td `k bin/cur/info/elf_s_flags_64.cparse` // > pf `k bin/cur/info/elf_shdr.format` @ `k bin/cur/info/elf_shdr.offset` }
static int init_shdr(ELFOBJ *bin) { ut32 shdr_size; ut8 shdr[sizeof (Elf_(Shdr))] = {0}; size_t i, j, len; r_return_val_if_fail (bin && !bin->shdr, false); if (!UT32_MUL (&shdr_size, bin->ehdr.e_shnum, sizeof (Elf_(Shdr)))) { return false; } if (shdr_size < 1) { return false; } if (shdr_size > bin->size) { return false; } if (bin->ehdr.e_shoff > bin->size) { return false; } if (bin->ehdr.e_shoff + shdr_size > bin->size) { return false; } if (!(bin->shdr = R_NEWS0 (Elf_(Shdr), bin->ehdr.e_shnum))) { r_sys_perror ("malloc (shdr)"); return false; } sdb_num_set (bin->kv, "elf_shdr.offset", bin->ehdr.e_shoff, 0); sdb_num_set (bin->kv, "elf_shdr.size", sizeof (Elf_(Shdr)), 0); sdb_set (bin->kv, "elf_s_type.cparse", "enum elf_s_type {SHT_NULL=0,SHT_PROGBITS=1," "SHT_SYMTAB=2,SHT_STRTAB=3,SHT_RELA=4,SHT_HASH=5,SHT_DYNAMIC=6,SHT_NOTE=7," "SHT_NOBITS=8,SHT_REL=9,SHT_SHLIB=10,SHT_DYNSYM=11,SHT_LOOS=0x60000000," "SHT_HIOS=0x6fffffff,SHT_LOPROC=0x70000000,SHT_HIPROC=0x7fffffff};", 0); for (i = 0; i < bin->ehdr.e_shnum; i++) { j = 0; len = r_buf_read_at (bin->b, bin->ehdr.e_shoff + i * sizeof (Elf_(Shdr)), shdr, sizeof (Elf_(Shdr))); if (len < 1) { R_LOG_DEBUG ("read (shdr) at 0x%" PFMT64x, (ut64) bin->ehdr.e_shoff); R_FREE (bin->shdr); return false; } bin->shdr[i].sh_name = READ32 (shdr, j); bin->shdr[i].sh_type = READ32 (shdr, j); bin->shdr[i].sh_flags = R_BIN_ELF_READWORD (shdr, j); bin->shdr[i].sh_addr = R_BIN_ELF_READWORD (shdr, j); bin->shdr[i].sh_offset = R_BIN_ELF_READWORD (shdr, j); bin->shdr[i].sh_size = R_BIN_ELF_READWORD (shdr, j); bin->shdr[i].sh_link = READ32 (shdr, j); bin->shdr[i].sh_info = READ32 (shdr, j); bin->shdr[i].sh_addralign = R_BIN_ELF_READWORD (shdr, j); bin->shdr[i].sh_entsize = R_BIN_ELF_READWORD (shdr, j); } #if R_BIN_ELF64 sdb_set (bin->kv, "elf_s_flags_64.cparse", "enum elf_s_flags_64 {SF64_None=0,SF64_Exec=1," "SF64_Alloc=2,SF64_Alloc_Exec=3,SF64_Write=4,SF64_Write_Exec=5," "SF64_Write_Alloc=6,SF64_Write_Alloc_Exec=7};", 0); sdb_set (bin->kv, "elf_shdr.format", "x[4]E[8]Eqqqxxqq name (elf_s_type)type" " (elf_s_flags_64)flags addr offset size link info addralign entsize", 0); #else sdb_set (bin->kv, "elf_s_flags_32.cparse", "enum elf_s_flags_32 {SF32_None=0,SF32_Exec=1," "SF32_Alloc=2,SF32_Alloc_Exec=3,SF32_Write=4,SF32_Write_Exec=5," "SF32_Write_Alloc=6,SF32_Write_Alloc_Exec=7};", 0); sdb_set (bin->kv, "elf_shdr.format", "x[4]E[4]Exxxxxxx name (elf_s_type)type" " (elf_s_flags_32)flags addr offset size link info addralign entsize", 0); #endif return true; // Usage example: // > td `k bin/cur/info/elf_s_type.cparse`; td `k bin/cur/info/elf_s_flags_64.cparse` // > pf `k bin/cur/info/elf_shdr.format` @ `k bin/cur/info/elf_shdr.offset` }
{'added': [(127, '\t\tR_LOG_DEBUG ("read (magic)");'), (191, '\t\tR_LOG_DEBUG ("read (ehdr)");'), (260, '\t\t\tR_LOG_DEBUG ("read (phdr)");'), (400, '\t\t\tR_LOG_DEBUG ("read (shdr) at 0x%" PFMT64x, (ut64) bin->ehdr.e_shoff);'), (478, '\t\tR_LOG_DEBUG ("read (shstrtab) at 0x%" PFMT64x, (ut64) bin->shstrtab_section->sh_offset);'), (973, '\t\tR_LOG_DEBUG ("Cannot allocate memory (Check Elf_(Verdef))");'), (1801, '\t\tR_LOG_DEBUG ("read (init_offset)");')], 'deleted': [(127, '\t\tR_LOG_ERROR ("read (magic)");'), (191, '\t\tR_LOG_ERROR ("read (ehdr)");'), (260, '\t\t\tR_LOG_ERROR ("read (phdr)");'), (400, '\t\t\tR_LOG_ERROR ("read (shdr) at 0x%" PFMT64x, (ut64) bin->ehdr.e_shoff);'), (478, '\t\tR_LOG_ERROR ("read (shstrtab) at 0x%" PFMT64x, (ut64) bin->shstrtab_section->sh_offset);'), (973, '\t\tR_LOG_ERROR ("Cannot allocate memory (Check Elf_(Verdef))");'), (1801, '\t\tR_LOG_ERROR ("read (init_offset)");')]}
7
7
3,718
27,554
61
531
11
https://github.com/radareorg/radare2
CVE-2022-1714
CWE-787
2,282
ip_tables.c
C
check_compat_entry_size_and_hooks
/* * Packet matching code. * * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> * Copyright (C) 2006-2010 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/cache.h> #include <linux/capability.h> #include <linux/skbuff.h> #include <linux/kmod.h> #include <linux/vmalloc.h> #include <linux/netdevice.h> #include <linux/module.h> #include <linux/icmp.h> #include <net/ip.h> #include <net/compat.h> #include <asm/uaccess.h> #include <linux/mutex.h> #include <linux/proc_fs.h> #include <linux/err.h> #include <linux/cpumask.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_ipv4/ip_tables.h> #include <net/netfilter/nf_log.h> #include "../../netfilter/xt_repldata.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); MODULE_DESCRIPTION("IPv4 packet filter"); /*#define DEBUG_IP_FIREWALL*/ /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */ /*#define DEBUG_IP_FIREWALL_USER*/ #ifdef DEBUG_IP_FIREWALL #define dprintf(format, args...) pr_info(format , ## args) #else #define dprintf(format, args...) #endif #ifdef DEBUG_IP_FIREWALL_USER #define duprintf(format, args...) pr_info(format , ## args) #else #define duprintf(format, args...) #endif #ifdef CONFIG_NETFILTER_DEBUG #define IP_NF_ASSERT(x) WARN_ON(!(x)) #else #define IP_NF_ASSERT(x) #endif #if 0 /* All the better to debug you with... */ #define static #define inline #endif void *ipt_alloc_initial_table(const struct xt_table *info) { return xt_alloc_initial_table(ipt, IPT); } EXPORT_SYMBOL_GPL(ipt_alloc_initial_table); /* Returns whether matches rule or not. */ /* Performance critical - called for every packet */ static inline bool ip_packet_match(const struct iphdr *ip, const char *indev, const char *outdev, const struct ipt_ip *ipinfo, int isfrag) { unsigned long ret; #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg))) if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr, IPT_INV_SRCIP) || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr, IPT_INV_DSTIP)) { dprintf("Source or dest mismatch.\n"); dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n", &ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr, ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : ""); dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n", &ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr, ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : ""); return false; } ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask); if (FWINV(ret != 0, IPT_INV_VIA_IN)) { dprintf("VIA in mismatch (%s vs %s).%s\n", indev, ipinfo->iniface, ipinfo->invflags & IPT_INV_VIA_IN ? " (INV)" : ""); return false; } ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask); if (FWINV(ret != 0, IPT_INV_VIA_OUT)) { dprintf("VIA out mismatch (%s vs %s).%s\n", outdev, ipinfo->outiface, ipinfo->invflags & IPT_INV_VIA_OUT ? " (INV)" : ""); return false; } /* Check specific protocol */ if (ipinfo->proto && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) { dprintf("Packet protocol %hi does not match %hi.%s\n", ip->protocol, ipinfo->proto, ipinfo->invflags & IPT_INV_PROTO ? " (INV)" : ""); return false; } /* If we have a fragment rule but the packet is not a fragment * then we return zero */ if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) { dprintf("Fragment rule but not fragment.%s\n", ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : ""); return false; } return true; } static bool ip_checkentry(const struct ipt_ip *ip) { if (ip->flags & ~IPT_F_MASK) { duprintf("Unknown flag bits set: %08X\n", ip->flags & ~IPT_F_MASK); return false; } if (ip->invflags & ~IPT_INV_MASK) { duprintf("Unknown invflag bits set: %08X\n", ip->invflags & ~IPT_INV_MASK); return false; } return true; } static unsigned int ipt_error(struct sk_buff *skb, const struct xt_action_param *par) { net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo); return NF_DROP; } /* Performance critical */ static inline struct ipt_entry * get_entry(const void *base, unsigned int offset) { return (struct ipt_entry *)(base + offset); } /* All zeroes == unconditional rule. */ /* Mildly perf critical (only if packet tracing is on) */ static inline bool unconditional(const struct ipt_ip *ip) { static const struct ipt_ip uncond; return memcmp(ip, &uncond, sizeof(uncond)) == 0; #undef FWINV } /* for const-correctness */ static inline const struct xt_entry_target * ipt_get_target_c(const struct ipt_entry *e) { return ipt_get_target((struct ipt_entry *)e); } #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) static const char *const hooknames[] = { [NF_INET_PRE_ROUTING] = "PREROUTING", [NF_INET_LOCAL_IN] = "INPUT", [NF_INET_FORWARD] = "FORWARD", [NF_INET_LOCAL_OUT] = "OUTPUT", [NF_INET_POST_ROUTING] = "POSTROUTING", }; enum nf_ip_trace_comments { NF_IP_TRACE_COMMENT_RULE, NF_IP_TRACE_COMMENT_RETURN, NF_IP_TRACE_COMMENT_POLICY, }; static const char *const comments[] = { [NF_IP_TRACE_COMMENT_RULE] = "rule", [NF_IP_TRACE_COMMENT_RETURN] = "return", [NF_IP_TRACE_COMMENT_POLICY] = "policy", }; static struct nf_loginfo trace_loginfo = { .type = NF_LOG_TYPE_LOG, .u = { .log = { .level = 4, .logflags = NF_LOG_MASK, }, }, }; /* Mildly perf critical (only if packet tracing is on) */ static inline int get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e, const char *hookname, const char **chainname, const char **comment, unsigned int *rulenum) { const struct xt_standard_target *t = (void *)ipt_get_target_c(s); if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) { /* Head of user chain: ERROR target with chainname */ *chainname = t->target.data; (*rulenum) = 0; } else if (s == e) { (*rulenum)++; if (s->target_offset == sizeof(struct ipt_entry) && strcmp(t->target.u.kernel.target->name, XT_STANDARD_TARGET) == 0 && t->verdict < 0 && unconditional(&s->ip)) { /* Tail of chains: STANDARD target (return/policy) */ *comment = *chainname == hookname ? comments[NF_IP_TRACE_COMMENT_POLICY] : comments[NF_IP_TRACE_COMMENT_RETURN]; } return 1; } else (*rulenum)++; return 0; } static void trace_packet(struct net *net, const struct sk_buff *skb, unsigned int hook, const struct net_device *in, const struct net_device *out, const char *tablename, const struct xt_table_info *private, const struct ipt_entry *e) { const struct ipt_entry *root; const char *hookname, *chainname, *comment; const struct ipt_entry *iter; unsigned int rulenum = 0; root = get_entry(private->entries, private->hook_entry[hook]); hookname = chainname = hooknames[hook]; comment = comments[NF_IP_TRACE_COMMENT_RULE]; xt_entry_foreach(iter, root, private->size - private->hook_entry[hook]) if (get_chainname_rulenum(iter, e, hookname, &chainname, &comment, &rulenum) != 0) break; nf_log_trace(net, AF_INET, hook, skb, in, out, &trace_loginfo, "TRACE: %s:%s:%s:%u ", tablename, chainname, comment, rulenum); } #endif static inline struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry) { return (void *)entry + entry->next_offset; } /* Returns one of the generic firewall policies, like NF_ACCEPT. */ unsigned int ipt_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table) { unsigned int hook = state->hook; static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); const struct iphdr *ip; /* Initializing verdict to NF_DROP keeps gcc happy. */ unsigned int verdict = NF_DROP; const char *indev, *outdev; const void *table_base; struct ipt_entry *e, **jumpstack; unsigned int stackidx, cpu; const struct xt_table_info *private; struct xt_action_param acpar; unsigned int addend; /* Initialization */ stackidx = 0; ip = ip_hdr(skb); indev = state->in ? state->in->name : nulldevname; outdev = state->out ? state->out->name : nulldevname; /* We handle fragments by dealing with the first fragment as * if it was a normal packet. All other fragments are treated * normally, except that they will NEVER match rules that ask * things we don't know, ie. tcp syn flag or ports). If the * rule is also a fragment-specific rule, non-fragments won't * match it. */ acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET; acpar.thoff = ip_hdrlen(skb); acpar.hotdrop = false; acpar.net = state->net; acpar.in = state->in; acpar.out = state->out; acpar.family = NFPROTO_IPV4; acpar.hooknum = hook; IP_NF_ASSERT(table->valid_hooks & (1 << hook)); local_bh_disable(); addend = xt_write_recseq_begin(); private = table->private; cpu = smp_processor_id(); /* * Ensure we load private-> members after we've fetched the base * pointer. */ smp_read_barrier_depends(); table_base = private->entries; jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; /* Switch to alternate jumpstack if we're being invoked via TEE. * TEE issues XT_CONTINUE verdict on original skb so we must not * clobber the jumpstack. * * For recursion via REJECT or SYNPROXY the stack will be clobbered * but it is no problem since absolute verdict is issued by these. */ if (static_key_false(&xt_tee_enabled)) jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated); e = get_entry(table_base, private->hook_entry[hook]); pr_debug("Entering %s(hook %u), UF %p\n", table->name, hook, get_entry(table_base, private->underflow[hook])); do { const struct xt_entry_target *t; const struct xt_entry_match *ematch; struct xt_counters *counter; IP_NF_ASSERT(e); if (!ip_packet_match(ip, indev, outdev, &e->ip, acpar.fragoff)) { no_match: e = ipt_next_entry(e); continue; } xt_ematch_foreach(ematch, e) { acpar.match = ematch->u.kernel.match; acpar.matchinfo = ematch->data; if (!acpar.match->match(skb, &acpar)) goto no_match; } counter = xt_get_this_cpu_counter(&e->counters); ADD_COUNTER(*counter, skb->len, 1); t = ipt_get_target(e); IP_NF_ASSERT(t->u.kernel.target); #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) /* The packet is traced: log it */ if (unlikely(skb->nf_trace)) trace_packet(state->net, skb, hook, state->in, state->out, table->name, private, e); #endif /* Standard target? */ if (!t->u.kernel.target->target) { int v; v = ((struct xt_standard_target *)t)->verdict; if (v < 0) { /* Pop from stack? */ if (v != XT_RETURN) { verdict = (unsigned int)(-v) - 1; break; } if (stackidx == 0) { e = get_entry(table_base, private->underflow[hook]); pr_debug("Underflow (this is normal) " "to %p\n", e); } else { e = jumpstack[--stackidx]; pr_debug("Pulled %p out from pos %u\n", e, stackidx); e = ipt_next_entry(e); } continue; } if (table_base + v != ipt_next_entry(e) && !(e->ip.flags & IPT_F_GOTO)) { jumpstack[stackidx++] = e; pr_debug("Pushed %p into pos %u\n", e, stackidx - 1); } e = get_entry(table_base, v); continue; } acpar.target = t->u.kernel.target; acpar.targinfo = t->data; verdict = t->u.kernel.target->target(skb, &acpar); /* Target might have changed stuff. */ ip = ip_hdr(skb); if (verdict == XT_CONTINUE) e = ipt_next_entry(e); else /* Verdict */ break; } while (!acpar.hotdrop); pr_debug("Exiting %s; sp at %u\n", __func__, stackidx); xt_write_recseq_end(addend); local_bh_enable(); #ifdef DEBUG_ALLOW_ALL return NF_ACCEPT; #else if (acpar.hotdrop) return NF_DROP; else return verdict; #endif } /* Figures out from what hook each rule can be called: returns 0 if there are loops. Puts hook bitmask in comefrom. */ static int mark_source_chains(const struct xt_table_info *newinfo, unsigned int valid_hooks, void *entry0) { unsigned int hook; /* No recursion; use packet counter to save back ptrs (reset to 0 as we leave), and comefrom to save source hook bitmask */ for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) { unsigned int pos = newinfo->hook_entry[hook]; struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos); if (!(valid_hooks & (1 << hook))) continue; /* Set initial back pointer. */ e->counters.pcnt = pos; for (;;) { const struct xt_standard_target *t = (void *)ipt_get_target_c(e); int visited = e->comefrom & (1 << hook); if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { pr_err("iptables: loop hook %u pos %u %08X.\n", hook, pos, e->comefrom); return 0; } e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); /* Unconditional return/END. */ if ((e->target_offset == sizeof(struct ipt_entry) && (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < 0 && unconditional(&e->ip)) || visited) { unsigned int oldpos, size; if ((strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < -NF_MAX_VERDICT - 1) { duprintf("mark_source_chains: bad " "negative verdict (%i)\n", t->verdict); return 0; } /* Return: backtrack through the last big jump. */ do { e->comefrom ^= (1<<NF_INET_NUMHOOKS); #ifdef DEBUG_IP_FIREWALL_USER if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { duprintf("Back unset " "on hook %u " "rule %u\n", hook, pos); } #endif oldpos = pos; pos = e->counters.pcnt; e->counters.pcnt = 0; /* We're at the start. */ if (pos == oldpos) goto next; e = (struct ipt_entry *) (entry0 + pos); } while (oldpos == pos + e->next_offset); /* Move along one */ size = e->next_offset; e = (struct ipt_entry *) (entry0 + pos + size); e->counters.pcnt = pos; pos += size; } else { int newpos = t->verdict; if (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0 && newpos >= 0) { if (newpos > newinfo->size - sizeof(struct ipt_entry)) { duprintf("mark_source_chains: " "bad verdict (%i)\n", newpos); return 0; } /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; } e = (struct ipt_entry *) (entry0 + newpos); e->counters.pcnt = pos; pos = newpos; } } next: duprintf("Finished chain %u\n", hook); } return 1; } static void cleanup_match(struct xt_entry_match *m, struct net *net) { struct xt_mtdtor_param par; par.net = net; par.match = m->u.kernel.match; par.matchinfo = m->data; par.family = NFPROTO_IPV4; if (par.match->destroy != NULL) par.match->destroy(&par); module_put(par.match->me); } static int check_entry(const struct ipt_entry *e) { const struct xt_entry_target *t; if (!ip_checkentry(&e->ip)) return -EINVAL; if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset) return -EINVAL; t = ipt_get_target_c(e); if (e->target_offset + t->u.target_size > e->next_offset) return -EINVAL; return 0; } static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) { const struct ipt_ip *ip = par->entryinfo; int ret; par->match = m->u.kernel.match; par->matchinfo = m->data; ret = xt_check_match(par, m->u.match_size - sizeof(*m), ip->proto, ip->invflags & IPT_INV_PROTO); if (ret < 0) { duprintf("check failed for `%s'.\n", par->match->name); return ret; } return 0; } static int find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) { struct xt_match *match; int ret; match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name, m->u.user.revision); if (IS_ERR(match)) { duprintf("find_check_match: `%s' not found\n", m->u.user.name); return PTR_ERR(match); } m->u.kernel.match = match; ret = check_match(m, par); if (ret) goto err; return 0; err: module_put(m->u.kernel.match->me); return ret; } static int check_target(struct ipt_entry *e, struct net *net, const char *name) { struct xt_entry_target *t = ipt_get_target(e); struct xt_tgchk_param par = { .net = net, .table = name, .entryinfo = e, .target = t->u.kernel.target, .targinfo = t->data, .hook_mask = e->comefrom, .family = NFPROTO_IPV4, }; int ret; ret = xt_check_target(&par, t->u.target_size - sizeof(*t), e->ip.proto, e->ip.invflags & IPT_INV_PROTO); if (ret < 0) { duprintf("check failed for `%s'.\n", t->u.kernel.target->name); return ret; } return 0; } static int find_check_entry(struct ipt_entry *e, struct net *net, const char *name, unsigned int size) { struct xt_entry_target *t; struct xt_target *target; int ret; unsigned int j; struct xt_mtchk_param mtpar; struct xt_entry_match *ematch; e->counters.pcnt = xt_percpu_counter_alloc(); if (IS_ERR_VALUE(e->counters.pcnt)) return -ENOMEM; j = 0; mtpar.net = net; mtpar.table = name; mtpar.entryinfo = &e->ip; mtpar.hook_mask = e->comefrom; mtpar.family = NFPROTO_IPV4; xt_ematch_foreach(ematch, e) { ret = find_check_match(ematch, &mtpar); if (ret != 0) goto cleanup_matches; ++j; } t = ipt_get_target(e); target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("find_check_entry: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto cleanup_matches; } t->u.kernel.target = target; ret = check_target(e, net, name); if (ret) goto err; return 0; err: module_put(t->u.kernel.target->me); cleanup_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; cleanup_match(ematch, net); } xt_percpu_counter_free(e->counters.pcnt); return ret; } static bool check_underflow(const struct ipt_entry *e) { const struct xt_entry_target *t; unsigned int verdict; if (!unconditional(&e->ip)) return false; t = ipt_get_target_c(e); if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) return false; verdict = ((struct xt_standard_target *)t)->verdict; verdict = -verdict - 1; return verdict == NF_DROP || verdict == NF_ACCEPT; } static int check_entry_size_and_hooks(struct ipt_entry *e, struct xt_table_info *newinfo, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int valid_hooks) { unsigned int h; int err; if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) { duprintf("Bad offset %p\n", e); return -EINVAL; } if (e->next_offset < sizeof(struct ipt_entry) + sizeof(struct xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } err = check_entry(e); if (err) return err; /* Check hooks & underflows */ for (h = 0; h < NF_INET_NUMHOOKS; h++) { if (!(valid_hooks & (1 << h))) continue; if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) { if (!check_underflow(e)) { pr_err("Underflows must be unconditional and " "use the STANDARD target with " "ACCEPT/DROP\n"); return -EINVAL; } newinfo->underflow[h] = underflows[h]; } } /* Clear counters and comefrom */ e->counters = ((struct xt_counters) { 0, 0 }); e->comefrom = 0; return 0; } static void cleanup_entry(struct ipt_entry *e, struct net *net) { struct xt_tgdtor_param par; struct xt_entry_target *t; struct xt_entry_match *ematch; /* Cleanup all matches */ xt_ematch_foreach(ematch, e) cleanup_match(ematch, net); t = ipt_get_target(e); par.net = net; par.target = t->u.kernel.target; par.targinfo = t->data; par.family = NFPROTO_IPV4; if (par.target->destroy != NULL) par.target->destroy(&par); module_put(par.target->me); xt_percpu_counter_free(e->counters.pcnt); } /* Checks and translates the user-supplied table segment (held in newinfo) */ static int translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, const struct ipt_replace *repl) { struct ipt_entry *iter; unsigned int i; int ret = 0; newinfo->size = repl->size; newinfo->number = repl->num_entries; /* Init all hooks to impossible value. */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { newinfo->hook_entry[i] = 0xFFFFFFFF; newinfo->underflow[i] = 0xFFFFFFFF; } duprintf("translate_table: size %u\n", newinfo->size); i = 0; /* Walk through entries, checking offsets. */ xt_entry_foreach(iter, entry0, newinfo->size) { ret = check_entry_size_and_hooks(iter, newinfo, entry0, entry0 + repl->size, repl->hook_entry, repl->underflow, repl->valid_hooks); if (ret != 0) return ret; ++i; if (strcmp(ipt_get_target(iter)->u.user.name, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } if (i != repl->num_entries) { duprintf("translate_table: %u not %u entries\n", i, repl->num_entries); return -EINVAL; } /* Check hooks all assigned */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(repl->valid_hooks & (1 << i))) continue; if (newinfo->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, repl->hook_entry[i]); return -EINVAL; } if (newinfo->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, repl->underflow[i]); return -EINVAL; } } if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) return -ELOOP; /* Finally, each sanity check must pass */ i = 0; xt_entry_foreach(iter, entry0, newinfo->size) { ret = find_check_entry(iter, net, repl->name, repl->size); if (ret != 0) break; ++i; } if (ret != 0) { xt_entry_foreach(iter, entry0, newinfo->size) { if (i-- == 0) break; cleanup_entry(iter, net); } return ret; } return ret; } static void get_counters(const struct xt_table_info *t, struct xt_counters counters[]) { struct ipt_entry *iter; unsigned int cpu; unsigned int i; for_each_possible_cpu(cpu) { seqcount_t *s = &per_cpu(xt_recseq, cpu); i = 0; xt_entry_foreach(iter, t->entries, t->size) { struct xt_counters *tmp; u64 bcnt, pcnt; unsigned int start; tmp = xt_get_per_cpu_counter(&iter->counters, cpu); do { start = read_seqcount_begin(s); bcnt = tmp->bcnt; pcnt = tmp->pcnt; } while (read_seqcount_retry(s, start)); ADD_COUNTER(counters[i], bcnt, pcnt); ++i; /* macro does multi eval of i */ } } } static struct xt_counters *alloc_counters(const struct xt_table *table) { unsigned int countersize; struct xt_counters *counters; const struct xt_table_info *private = table->private; /* We need atomic snapshot of counters: rest doesn't change (other than comefrom, which userspace doesn't care about). */ countersize = sizeof(struct xt_counters) * private->number; counters = vzalloc(countersize); if (counters == NULL) return ERR_PTR(-ENOMEM); get_counters(private, counters); return counters; } static int copy_entries_to_user(unsigned int total_size, const struct xt_table *table, void __user *userptr) { unsigned int off, num; const struct ipt_entry *e; struct xt_counters *counters; const struct xt_table_info *private = table->private; int ret = 0; const void *loc_cpu_entry; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); loc_cpu_entry = private->entries; if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { ret = -EFAULT; goto free_counters; } /* FIXME: use iterator macros --RR */ /* ... then go back and fix counters and names */ for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ unsigned int i; const struct xt_entry_match *m; const struct xt_entry_target *t; e = (struct ipt_entry *)(loc_cpu_entry + off); if (copy_to_user(userptr + off + offsetof(struct ipt_entry, counters), &counters[num], sizeof(counters[num])) != 0) { ret = -EFAULT; goto free_counters; } for (i = sizeof(struct ipt_entry); i < e->target_offset; i += m->u.match_size) { m = (void *)e + i; if (copy_to_user(userptr + off + i + offsetof(struct xt_entry_match, u.user.name), m->u.kernel.match->name, strlen(m->u.kernel.match->name)+1) != 0) { ret = -EFAULT; goto free_counters; } } t = ipt_get_target_c(e); if (copy_to_user(userptr + off + e->target_offset + offsetof(struct xt_entry_target, u.user.name), t->u.kernel.target->name, strlen(t->u.kernel.target->name)+1) != 0) { ret = -EFAULT; goto free_counters; } } free_counters: vfree(counters); return ret; } #ifdef CONFIG_COMPAT static void compat_standard_from_user(void *dst, const void *src) { int v = *(compat_int_t *)src; if (v > 0) v += xt_compat_calc_jump(AF_INET, v); memcpy(dst, &v, sizeof(v)); } static int compat_standard_to_user(void __user *dst, const void *src) { compat_int_t cv = *(int *)src; if (cv > 0) cv -= xt_compat_calc_jump(AF_INET, cv); return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; } static int compat_calc_entry(const struct ipt_entry *e, const struct xt_table_info *info, const void *base, struct xt_table_info *newinfo) { const struct xt_entry_match *ematch; const struct xt_entry_target *t; unsigned int entry_offset; int off, i, ret; off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); entry_offset = (void *)e - base; xt_ematch_foreach(ematch, e) off += xt_compat_match_offset(ematch->u.kernel.match); t = ipt_get_target_c(e); off += xt_compat_target_offset(t->u.kernel.target); newinfo->size -= off; ret = xt_compat_add_offset(AF_INET, entry_offset, off); if (ret) return ret; for (i = 0; i < NF_INET_NUMHOOKS; i++) { if (info->hook_entry[i] && (e < (struct ipt_entry *)(base + info->hook_entry[i]))) newinfo->hook_entry[i] -= off; if (info->underflow[i] && (e < (struct ipt_entry *)(base + info->underflow[i]))) newinfo->underflow[i] -= off; } return 0; } static int compat_table_info(const struct xt_table_info *info, struct xt_table_info *newinfo) { struct ipt_entry *iter; const void *loc_cpu_entry; int ret; if (!newinfo || !info) return -EINVAL; /* we dont care about newinfo->entries */ memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); newinfo->initial_entries = 0; loc_cpu_entry = info->entries; xt_compat_init_offsets(AF_INET, info->number); xt_entry_foreach(iter, loc_cpu_entry, info->size) { ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); if (ret != 0) return ret; } return 0; } #endif static int get_info(struct net *net, void __user *user, const int *len, int compat) { char name[XT_TABLE_MAXNAMELEN]; struct xt_table *t; int ret; if (*len != sizeof(struct ipt_getinfo)) { duprintf("length %u != %zu\n", *len, sizeof(struct ipt_getinfo)); return -EINVAL; } if (copy_from_user(name, user, sizeof(name)) != 0) return -EFAULT; name[XT_TABLE_MAXNAMELEN-1] = '\0'; #ifdef CONFIG_COMPAT if (compat) xt_compat_lock(AF_INET); #endif t = try_then_request_module(xt_find_table_lock(net, AF_INET, name), "iptable_%s", name); if (!IS_ERR_OR_NULL(t)) { struct ipt_getinfo info; const struct xt_table_info *private = t->private; #ifdef CONFIG_COMPAT struct xt_table_info tmp; if (compat) { ret = compat_table_info(private, &tmp); xt_compat_flush_offsets(AF_INET); private = &tmp; } #endif memset(&info, 0, sizeof(info)); info.valid_hooks = t->valid_hooks; memcpy(info.hook_entry, private->hook_entry, sizeof(info.hook_entry)); memcpy(info.underflow, private->underflow, sizeof(info.underflow)); info.num_entries = private->number; info.size = private->size; strcpy(info.name, name); if (copy_to_user(user, &info, *len) != 0) ret = -EFAULT; else ret = 0; xt_table_unlock(t); module_put(t->me); } else ret = t ? PTR_ERR(t) : -ENOENT; #ifdef CONFIG_COMPAT if (compat) xt_compat_unlock(AF_INET); #endif return ret; } static int get_entries(struct net *net, struct ipt_get_entries __user *uptr, const int *len) { int ret; struct ipt_get_entries get; struct xt_table *t; if (*len < sizeof(get)) { duprintf("get_entries: %u < %zu\n", *len, sizeof(get)); return -EINVAL; } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; if (*len != sizeof(struct ipt_get_entries) + get.size) { duprintf("get_entries: %u != %zu\n", *len, sizeof(get) + get.size); return -EINVAL; } t = xt_find_table_lock(net, AF_INET, get.name); if (!IS_ERR_OR_NULL(t)) { const struct xt_table_info *private = t->private; duprintf("t->private->number = %u\n", private->number); if (get.size == private->size) ret = copy_entries_to_user(private->size, t, uptr->entrytable); else { duprintf("get_entries: I've got %u not %u!\n", private->size, get.size); ret = -EAGAIN; } module_put(t->me); xt_table_unlock(t); } else ret = t ? PTR_ERR(t) : -ENOENT; return ret; } static int __do_replace(struct net *net, const char *name, unsigned int valid_hooks, struct xt_table_info *newinfo, unsigned int num_counters, void __user *counters_ptr) { int ret; struct xt_table *t; struct xt_table_info *oldinfo; struct xt_counters *counters; struct ipt_entry *iter; ret = 0; counters = vzalloc(num_counters * sizeof(struct xt_counters)); if (!counters) { ret = -ENOMEM; goto out; } t = try_then_request_module(xt_find_table_lock(net, AF_INET, name), "iptable_%s", name); if (IS_ERR_OR_NULL(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free_newinfo_counters_untrans; } /* You lied! */ if (valid_hooks != t->valid_hooks) { duprintf("Valid hook crap: %08X vs %08X\n", valid_hooks, t->valid_hooks); ret = -EINVAL; goto put_module; } oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); if (!oldinfo) goto put_module; /* Update module usage count based on number of rules */ duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", oldinfo->number, oldinfo->initial_entries, newinfo->number); if ((oldinfo->number > oldinfo->initial_entries) || (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); if ((oldinfo->number > oldinfo->initial_entries) && (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); /* Get the old counters, and synchronize with replace */ get_counters(oldinfo, counters); /* Decrease module usage counts and free resource */ xt_entry_foreach(iter, oldinfo->entries, oldinfo->size) cleanup_entry(iter, net); xt_free_table_info(oldinfo); if (copy_to_user(counters_ptr, counters, sizeof(struct xt_counters) * num_counters) != 0) { /* Silent error, can't fail, new table is already in place */ net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n"); } vfree(counters); xt_table_unlock(t); return ret; put_module: module_put(t->me); xt_table_unlock(t); free_newinfo_counters_untrans: vfree(counters); out: return ret; } static int do_replace(struct net *net, const void __user *user, unsigned int len) { int ret; struct ipt_replace tmp; struct xt_table_info *newinfo; void *loc_cpu_entry; struct ipt_entry *iter; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; if (tmp.num_counters == 0) return -EINVAL; tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } ret = translate_table(net, newinfo, loc_cpu_entry, &tmp); if (ret != 0) goto free_newinfo; duprintf("Translated table\n"); ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, tmp.counters); if (ret) goto free_newinfo_untrans; return 0; free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) cleanup_entry(iter, net); free_newinfo: xt_free_table_info(newinfo); return ret; } static int do_add_counters(struct net *net, const void __user *user, unsigned int len, int compat) { unsigned int i; struct xt_counters_info tmp; struct xt_counters *paddc; unsigned int num_counters; const char *name; int size; void *ptmp; struct xt_table *t; const struct xt_table_info *private; int ret = 0; struct ipt_entry *iter; unsigned int addend; #ifdef CONFIG_COMPAT struct compat_xt_counters_info compat_tmp; if (compat) { ptmp = &compat_tmp; size = sizeof(struct compat_xt_counters_info); } else #endif { ptmp = &tmp; size = sizeof(struct xt_counters_info); } if (copy_from_user(ptmp, user, size) != 0) return -EFAULT; #ifdef CONFIG_COMPAT if (compat) { num_counters = compat_tmp.num_counters; name = compat_tmp.name; } else #endif { num_counters = tmp.num_counters; name = tmp.name; } if (len != size + num_counters * sizeof(struct xt_counters)) return -EINVAL; paddc = vmalloc(len - size); if (!paddc) return -ENOMEM; if (copy_from_user(paddc, user + size, len - size) != 0) { ret = -EFAULT; goto free; } t = xt_find_table_lock(net, AF_INET, name); if (IS_ERR_OR_NULL(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free; } local_bh_disable(); private = t->private; if (private->number != num_counters) { ret = -EINVAL; goto unlock_up_free; } i = 0; addend = xt_write_recseq_begin(); xt_entry_foreach(iter, private->entries, private->size) { struct xt_counters *tmp; tmp = xt_get_this_cpu_counter(&iter->counters); ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt); ++i; } xt_write_recseq_end(addend); unlock_up_free: local_bh_enable(); xt_table_unlock(t); module_put(t->me); free: vfree(paddc); return ret; } #ifdef CONFIG_COMPAT struct compat_ipt_replace { char name[XT_TABLE_MAXNAMELEN]; u32 valid_hooks; u32 num_entries; u32 size; u32 hook_entry[NF_INET_NUMHOOKS]; u32 underflow[NF_INET_NUMHOOKS]; u32 num_counters; compat_uptr_t counters; /* struct xt_counters * */ struct compat_ipt_entry entries[0]; }; static int compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr, unsigned int *size, struct xt_counters *counters, unsigned int i) { struct xt_entry_target *t; struct compat_ipt_entry __user *ce; u_int16_t target_offset, next_offset; compat_uint_t origsize; const struct xt_entry_match *ematch; int ret = 0; origsize = *size; ce = (struct compat_ipt_entry __user *)*dstptr; if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 || copy_to_user(&ce->counters, &counters[i], sizeof(counters[i])) != 0) return -EFAULT; *dstptr += sizeof(struct compat_ipt_entry); *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); xt_ematch_foreach(ematch, e) { ret = xt_compat_match_to_user(ematch, dstptr, size); if (ret != 0) return ret; } target_offset = e->target_offset - (origsize - *size); t = ipt_get_target(e); ret = xt_compat_target_to_user(t, dstptr, size); if (ret) return ret; next_offset = e->next_offset - (origsize - *size); if (put_user(target_offset, &ce->target_offset) != 0 || put_user(next_offset, &ce->next_offset) != 0) return -EFAULT; return 0; } static int compat_find_calc_match(struct xt_entry_match *m, const char *name, const struct ipt_ip *ip, int *size) { struct xt_match *match; match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name, m->u.user.revision); if (IS_ERR(match)) { duprintf("compat_check_calc_match: `%s' not found\n", m->u.user.name); return PTR_ERR(match); } m->u.kernel.match = match; *size += xt_compat_match_offset(match); return 0; } static void compat_release_entry(struct compat_ipt_entry *e) { struct xt_entry_target *t; struct xt_entry_match *ematch; /* Cleanup all matches */ xt_ematch_foreach(ematch, e) module_put(ematch->u.kernel.match->me); t = compat_ipt_get_target(e); module_put(t->u.kernel.target->me); } static int check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, struct xt_table_info *newinfo, unsigned int *size, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, const char *name) { struct xt_entry_match *ematch; struct xt_entry_target *t; struct xt_target *target; unsigned int entry_offset; unsigned int j; int ret, off, h; duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) { duprintf("Bad offset %p, limit = %p\n", e, limit); return -EINVAL; } if (e->next_offset < sizeof(struct compat_ipt_entry) + sizeof(struct compat_xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } /* For purposes of check_entry casting the compat entry is fine */ ret = check_entry((struct ipt_entry *)e); if (ret) return ret; off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); entry_offset = (void *)e - (void *)base; j = 0; xt_ematch_foreach(ematch, e) { ret = compat_find_calc_match(ematch, name, &e->ip, &off); if (ret != 0) goto release_matches; ++j; } t = compat_ipt_get_target(e); target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto release_matches; } t->u.kernel.target = target; off += xt_compat_target_offset(target); *size += off; ret = xt_compat_add_offset(AF_INET, entry_offset, off); if (ret) goto out; /* Check hooks & underflows */ for (h = 0; h < NF_INET_NUMHOOKS; h++) { if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) newinfo->underflow[h] = underflows[h]; } /* Clear counters and comefrom */ memset(&e->counters, 0, sizeof(e->counters)); e->comefrom = 0; return 0; out: module_put(t->u.kernel.target->me); release_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; module_put(ematch->u.kernel.match->me); } return ret; } static int compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr, unsigned int *size, const char *name, struct xt_table_info *newinfo, unsigned char *base) { struct xt_entry_target *t; struct xt_target *target; struct ipt_entry *de; unsigned int origsize; int ret, h; struct xt_entry_match *ematch; ret = 0; origsize = *size; de = (struct ipt_entry *)*dstptr; memcpy(de, e, sizeof(struct ipt_entry)); memcpy(&de->counters, &e->counters, sizeof(e->counters)); *dstptr += sizeof(struct ipt_entry); *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); xt_ematch_foreach(ematch, e) { ret = xt_compat_match_from_user(ematch, dstptr, size); if (ret != 0) return ret; } de->target_offset = e->target_offset - (origsize - *size); t = compat_ipt_get_target(e); target = t->u.kernel.target; xt_compat_target_from_user(t, dstptr, size); de->next_offset = e->next_offset - (origsize - *size); for (h = 0; h < NF_INET_NUMHOOKS; h++) { if ((unsigned char *)de - base < newinfo->hook_entry[h]) newinfo->hook_entry[h] -= origsize - *size; if ((unsigned char *)de - base < newinfo->underflow[h]) newinfo->underflow[h] -= origsize - *size; } return ret; } static int compat_check_entry(struct ipt_entry *e, struct net *net, const char *name) { struct xt_entry_match *ematch; struct xt_mtchk_param mtpar; unsigned int j; int ret = 0; e->counters.pcnt = xt_percpu_counter_alloc(); if (IS_ERR_VALUE(e->counters.pcnt)) return -ENOMEM; j = 0; mtpar.net = net; mtpar.table = name; mtpar.entryinfo = &e->ip; mtpar.hook_mask = e->comefrom; mtpar.family = NFPROTO_IPV4; xt_ematch_foreach(ematch, e) { ret = check_match(ematch, &mtpar); if (ret != 0) goto cleanup_matches; ++j; } ret = check_target(e, net, name); if (ret) goto cleanup_matches; return 0; cleanup_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; cleanup_match(ematch, net); } xt_percpu_counter_free(e->counters.pcnt); return ret; } static int translate_compat_table(struct net *net, const char *name, unsigned int valid_hooks, struct xt_table_info **pinfo, void **pentry0, unsigned int total_size, unsigned int number, unsigned int *hook_entries, unsigned int *underflows) { unsigned int i, j; struct xt_table_info *newinfo, *info; void *pos, *entry0, *entry1; struct compat_ipt_entry *iter0; struct ipt_entry *iter1; unsigned int size; int ret; info = *pinfo; entry0 = *pentry0; size = total_size; info->number = number; /* Init all hooks to impossible value. */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { info->hook_entry[i] = 0xFFFFFFFF; info->underflow[i] = 0xFFFFFFFF; } duprintf("translate_compat_table: size %u\n", info->size); j = 0; xt_compat_lock(AF_INET); xt_compat_init_offsets(AF_INET, number); /* Walk through entries, checking offsets. */ xt_entry_foreach(iter0, entry0, total_size) { ret = check_compat_entry_size_and_hooks(iter0, info, &size, entry0, entry0 + total_size, hook_entries, underflows, name); if (ret != 0) goto out_unlock; ++j; } ret = -EINVAL; if (j != number) { duprintf("translate_compat_table: %u not %u entries\n", j, number); goto out_unlock; } /* Check hooks all assigned */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(valid_hooks & (1 << i))) continue; if (info->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, hook_entries[i]); goto out_unlock; } if (info->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, underflows[i]); goto out_unlock; } } ret = -ENOMEM; newinfo = xt_alloc_table_info(size); if (!newinfo) goto out_unlock; newinfo->number = number; for (i = 0; i < NF_INET_NUMHOOKS; i++) { newinfo->hook_entry[i] = info->hook_entry[i]; newinfo->underflow[i] = info->underflow[i]; } entry1 = newinfo->entries; pos = entry1; size = total_size; xt_entry_foreach(iter0, entry0, total_size) { ret = compat_copy_entry_from_user(iter0, &pos, &size, name, newinfo, entry1); if (ret != 0) break; } xt_compat_flush_offsets(AF_INET); xt_compat_unlock(AF_INET); if (ret) goto free_newinfo; ret = -ELOOP; if (!mark_source_chains(newinfo, valid_hooks, entry1)) goto free_newinfo; i = 0; xt_entry_foreach(iter1, entry1, newinfo->size) { ret = compat_check_entry(iter1, net, name); if (ret != 0) break; ++i; if (strcmp(ipt_get_target(iter1)->u.user.name, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } if (ret) { /* * The first i matches need cleanup_entry (calls ->destroy) * because they had called ->check already. The other j-i * entries need only release. */ int skip = i; j -= i; xt_entry_foreach(iter0, entry0, newinfo->size) { if (skip-- > 0) continue; if (j-- == 0) break; compat_release_entry(iter0); } xt_entry_foreach(iter1, entry1, newinfo->size) { if (i-- == 0) break; cleanup_entry(iter1, net); } xt_free_table_info(newinfo); return ret; } *pinfo = newinfo; *pentry0 = entry1; xt_free_table_info(info); return 0; free_newinfo: xt_free_table_info(newinfo); out: xt_entry_foreach(iter0, entry0, total_size) { if (j-- == 0) break; compat_release_entry(iter0); } return ret; out_unlock: xt_compat_flush_offsets(AF_INET); xt_compat_unlock(AF_INET); goto out; } static int compat_do_replace(struct net *net, void __user *user, unsigned int len) { int ret; struct compat_ipt_replace tmp; struct xt_table_info *newinfo; void *loc_cpu_entry; struct ipt_entry *iter; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.size >= INT_MAX / num_possible_cpus()) return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; if (tmp.num_counters == 0) return -EINVAL; tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } ret = translate_compat_table(net, tmp.name, tmp.valid_hooks, &newinfo, &loc_cpu_entry, tmp.size, tmp.num_entries, tmp.hook_entry, tmp.underflow); if (ret != 0) goto free_newinfo; duprintf("compat_do_replace: Translated table\n"); ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, compat_ptr(tmp.counters)); if (ret) goto free_newinfo_untrans; return 0; free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) cleanup_entry(iter, net); free_newinfo: xt_free_table_info(newinfo); return ret; } static int compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IPT_SO_SET_REPLACE: ret = compat_do_replace(sock_net(sk), user, len); break; case IPT_SO_SET_ADD_COUNTERS: ret = do_add_counters(sock_net(sk), user, len, 1); break; default: duprintf("do_ipt_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } struct compat_ipt_get_entries { char name[XT_TABLE_MAXNAMELEN]; compat_uint_t size; struct compat_ipt_entry entrytable[0]; }; static int compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, void __user *userptr) { struct xt_counters *counters; const struct xt_table_info *private = table->private; void __user *pos; unsigned int size; int ret = 0; unsigned int i = 0; struct ipt_entry *iter; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); pos = userptr; size = total_size; xt_entry_foreach(iter, private->entries, total_size) { ret = compat_copy_entry_to_user(iter, &pos, &size, counters, i++); if (ret != 0) break; } vfree(counters); return ret; } static int compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr, int *len) { int ret; struct compat_ipt_get_entries get; struct xt_table *t; if (*len < sizeof(get)) { duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); return -EINVAL; } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; if (*len != sizeof(struct compat_ipt_get_entries) + get.size) { duprintf("compat_get_entries: %u != %zu\n", *len, sizeof(get) + get.size); return -EINVAL; } xt_compat_lock(AF_INET); t = xt_find_table_lock(net, AF_INET, get.name); if (!IS_ERR_OR_NULL(t)) { const struct xt_table_info *private = t->private; struct xt_table_info info; duprintf("t->private->number = %u\n", private->number); ret = compat_table_info(private, &info); if (!ret && get.size == info.size) { ret = compat_copy_entries_to_user(private->size, t, uptr->entrytable); } else if (!ret) { duprintf("compat_get_entries: I've got %u not %u!\n", private->size, get.size); ret = -EAGAIN; } xt_compat_flush_offsets(AF_INET); module_put(t->me); xt_table_unlock(t); } else ret = t ? PTR_ERR(t) : -ENOENT; xt_compat_unlock(AF_INET); return ret; } static int do_ipt_get_ctl(struct sock *, int, void __user *, int *); static int compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IPT_SO_GET_INFO: ret = get_info(sock_net(sk), user, len, 1); break; case IPT_SO_GET_ENTRIES: ret = compat_get_entries(sock_net(sk), user, len); break; default: ret = do_ipt_get_ctl(sk, cmd, user, len); } return ret; } #endif static int do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IPT_SO_SET_REPLACE: ret = do_replace(sock_net(sk), user, len); break; case IPT_SO_SET_ADD_COUNTERS: ret = do_add_counters(sock_net(sk), user, len, 0); break; default: duprintf("do_ipt_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static int do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IPT_SO_GET_INFO: ret = get_info(sock_net(sk), user, len, 0); break; case IPT_SO_GET_ENTRIES: ret = get_entries(sock_net(sk), user, len); break; case IPT_SO_GET_REVISION_MATCH: case IPT_SO_GET_REVISION_TARGET: { struct xt_get_revision rev; int target; if (*len != sizeof(rev)) { ret = -EINVAL; break; } if (copy_from_user(&rev, user, sizeof(rev)) != 0) { ret = -EFAULT; break; } rev.name[sizeof(rev.name)-1] = 0; if (cmd == IPT_SO_GET_REVISION_TARGET) target = 1; else target = 0; try_then_request_module(xt_find_revision(AF_INET, rev.name, rev.revision, target, &ret), "ipt_%s", rev.name); break; } default: duprintf("do_ipt_get_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static void __ipt_unregister_table(struct net *net, struct xt_table *table) { struct xt_table_info *private; void *loc_cpu_entry; struct module *table_owner = table->me; struct ipt_entry *iter; private = xt_unregister_table(table); /* Decrease module usage counts and free resources */ loc_cpu_entry = private->entries; xt_entry_foreach(iter, loc_cpu_entry, private->size) cleanup_entry(iter, net); if (private->number > private->initial_entries) module_put(table_owner); xt_free_table_info(private); } int ipt_register_table(struct net *net, const struct xt_table *table, const struct ipt_replace *repl, const struct nf_hook_ops *ops, struct xt_table **res) { int ret; struct xt_table_info *newinfo; struct xt_table_info bootstrap = {0}; void *loc_cpu_entry; struct xt_table *new_table; newinfo = xt_alloc_table_info(repl->size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; memcpy(loc_cpu_entry, repl->entries, repl->size); ret = translate_table(net, newinfo, loc_cpu_entry, repl); if (ret != 0) goto out_free; new_table = xt_register_table(net, table, &bootstrap, newinfo); if (IS_ERR(new_table)) { ret = PTR_ERR(new_table); goto out_free; } /* set res now, will see skbs right after nf_register_net_hooks */ WRITE_ONCE(*res, new_table); ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks)); if (ret != 0) { __ipt_unregister_table(net, new_table); *res = NULL; } return ret; out_free: xt_free_table_info(newinfo); return ret; } void ipt_unregister_table(struct net *net, struct xt_table *table, const struct nf_hook_ops *ops) { nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks)); __ipt_unregister_table(net, table); } /* Returns 1 if the type and code is matched by the range, 0 otherwise */ static inline bool icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code, u_int8_t type, u_int8_t code, bool invert) { return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code)) ^ invert; } static bool icmp_match(const struct sk_buff *skb, struct xt_action_param *par) { const struct icmphdr *ic; struct icmphdr _icmph; const struct ipt_icmp *icmpinfo = par->matchinfo; /* Must not be a fragment. */ if (par->fragoff != 0) return false; ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph); if (ic == NULL) { /* We've been asked to examine this packet, and we * can't. Hence, no choice but to drop. */ duprintf("Dropping evil ICMP tinygram.\n"); par->hotdrop = true; return false; } return icmp_type_code_match(icmpinfo->type, icmpinfo->code[0], icmpinfo->code[1], ic->type, ic->code, !!(icmpinfo->invflags&IPT_ICMP_INV)); } static int icmp_checkentry(const struct xt_mtchk_param *par) { const struct ipt_icmp *icmpinfo = par->matchinfo; /* Must specify no unknown invflags */ return (icmpinfo->invflags & ~IPT_ICMP_INV) ? -EINVAL : 0; } static struct xt_target ipt_builtin_tg[] __read_mostly = { { .name = XT_STANDARD_TARGET, .targetsize = sizeof(int), .family = NFPROTO_IPV4, #ifdef CONFIG_COMPAT .compatsize = sizeof(compat_int_t), .compat_from_user = compat_standard_from_user, .compat_to_user = compat_standard_to_user, #endif }, { .name = XT_ERROR_TARGET, .target = ipt_error, .targetsize = XT_FUNCTION_MAXNAMELEN, .family = NFPROTO_IPV4, }, }; static struct nf_sockopt_ops ipt_sockopts = { .pf = PF_INET, .set_optmin = IPT_BASE_CTL, .set_optmax = IPT_SO_SET_MAX+1, .set = do_ipt_set_ctl, #ifdef CONFIG_COMPAT .compat_set = compat_do_ipt_set_ctl, #endif .get_optmin = IPT_BASE_CTL, .get_optmax = IPT_SO_GET_MAX+1, .get = do_ipt_get_ctl, #ifdef CONFIG_COMPAT .compat_get = compat_do_ipt_get_ctl, #endif .owner = THIS_MODULE, }; static struct xt_match ipt_builtin_mt[] __read_mostly = { { .name = "icmp", .match = icmp_match, .matchsize = sizeof(struct ipt_icmp), .checkentry = icmp_checkentry, .proto = IPPROTO_ICMP, .family = NFPROTO_IPV4, }, }; static int __net_init ip_tables_net_init(struct net *net) { return xt_proto_init(net, NFPROTO_IPV4); } static void __net_exit ip_tables_net_exit(struct net *net) { xt_proto_fini(net, NFPROTO_IPV4); } static struct pernet_operations ip_tables_net_ops = { .init = ip_tables_net_init, .exit = ip_tables_net_exit, }; static int __init ip_tables_init(void) { int ret; ret = register_pernet_subsys(&ip_tables_net_ops); if (ret < 0) goto err1; /* No one else will be downing sem now, so we won't sleep */ ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); if (ret < 0) goto err2; ret = xt_register_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt)); if (ret < 0) goto err4; /* Register setsockopt */ ret = nf_register_sockopt(&ipt_sockopts); if (ret < 0) goto err5; pr_info("(C) 2000-2006 Netfilter Core Team\n"); return 0; err5: xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt)); err4: xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); err2: unregister_pernet_subsys(&ip_tables_net_ops); err1: return ret; } static void __exit ip_tables_fini(void) { nf_unregister_sockopt(&ipt_sockopts); xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt)); xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); unregister_pernet_subsys(&ip_tables_net_ops); } EXPORT_SYMBOL(ipt_register_table); EXPORT_SYMBOL(ipt_unregister_table); EXPORT_SYMBOL(ipt_do_table); module_init(ip_tables_init); module_exit(ip_tables_fini);
/* * Packet matching code. * * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> * Copyright (C) 2006-2010 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/cache.h> #include <linux/capability.h> #include <linux/skbuff.h> #include <linux/kmod.h> #include <linux/vmalloc.h> #include <linux/netdevice.h> #include <linux/module.h> #include <linux/icmp.h> #include <net/ip.h> #include <net/compat.h> #include <asm/uaccess.h> #include <linux/mutex.h> #include <linux/proc_fs.h> #include <linux/err.h> #include <linux/cpumask.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_ipv4/ip_tables.h> #include <net/netfilter/nf_log.h> #include "../../netfilter/xt_repldata.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); MODULE_DESCRIPTION("IPv4 packet filter"); /*#define DEBUG_IP_FIREWALL*/ /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */ /*#define DEBUG_IP_FIREWALL_USER*/ #ifdef DEBUG_IP_FIREWALL #define dprintf(format, args...) pr_info(format , ## args) #else #define dprintf(format, args...) #endif #ifdef DEBUG_IP_FIREWALL_USER #define duprintf(format, args...) pr_info(format , ## args) #else #define duprintf(format, args...) #endif #ifdef CONFIG_NETFILTER_DEBUG #define IP_NF_ASSERT(x) WARN_ON(!(x)) #else #define IP_NF_ASSERT(x) #endif #if 0 /* All the better to debug you with... */ #define static #define inline #endif void *ipt_alloc_initial_table(const struct xt_table *info) { return xt_alloc_initial_table(ipt, IPT); } EXPORT_SYMBOL_GPL(ipt_alloc_initial_table); /* Returns whether matches rule or not. */ /* Performance critical - called for every packet */ static inline bool ip_packet_match(const struct iphdr *ip, const char *indev, const char *outdev, const struct ipt_ip *ipinfo, int isfrag) { unsigned long ret; #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg))) if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr, IPT_INV_SRCIP) || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr, IPT_INV_DSTIP)) { dprintf("Source or dest mismatch.\n"); dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n", &ip->saddr, &ipinfo->smsk.s_addr, &ipinfo->src.s_addr, ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : ""); dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n", &ip->daddr, &ipinfo->dmsk.s_addr, &ipinfo->dst.s_addr, ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : ""); return false; } ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask); if (FWINV(ret != 0, IPT_INV_VIA_IN)) { dprintf("VIA in mismatch (%s vs %s).%s\n", indev, ipinfo->iniface, ipinfo->invflags & IPT_INV_VIA_IN ? " (INV)" : ""); return false; } ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask); if (FWINV(ret != 0, IPT_INV_VIA_OUT)) { dprintf("VIA out mismatch (%s vs %s).%s\n", outdev, ipinfo->outiface, ipinfo->invflags & IPT_INV_VIA_OUT ? " (INV)" : ""); return false; } /* Check specific protocol */ if (ipinfo->proto && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) { dprintf("Packet protocol %hi does not match %hi.%s\n", ip->protocol, ipinfo->proto, ipinfo->invflags & IPT_INV_PROTO ? " (INV)" : ""); return false; } /* If we have a fragment rule but the packet is not a fragment * then we return zero */ if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) { dprintf("Fragment rule but not fragment.%s\n", ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : ""); return false; } return true; } static bool ip_checkentry(const struct ipt_ip *ip) { if (ip->flags & ~IPT_F_MASK) { duprintf("Unknown flag bits set: %08X\n", ip->flags & ~IPT_F_MASK); return false; } if (ip->invflags & ~IPT_INV_MASK) { duprintf("Unknown invflag bits set: %08X\n", ip->invflags & ~IPT_INV_MASK); return false; } return true; } static unsigned int ipt_error(struct sk_buff *skb, const struct xt_action_param *par) { net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo); return NF_DROP; } /* Performance critical */ static inline struct ipt_entry * get_entry(const void *base, unsigned int offset) { return (struct ipt_entry *)(base + offset); } /* All zeroes == unconditional rule. */ /* Mildly perf critical (only if packet tracing is on) */ static inline bool unconditional(const struct ipt_ip *ip) { static const struct ipt_ip uncond; return memcmp(ip, &uncond, sizeof(uncond)) == 0; #undef FWINV } /* for const-correctness */ static inline const struct xt_entry_target * ipt_get_target_c(const struct ipt_entry *e) { return ipt_get_target((struct ipt_entry *)e); } #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) static const char *const hooknames[] = { [NF_INET_PRE_ROUTING] = "PREROUTING", [NF_INET_LOCAL_IN] = "INPUT", [NF_INET_FORWARD] = "FORWARD", [NF_INET_LOCAL_OUT] = "OUTPUT", [NF_INET_POST_ROUTING] = "POSTROUTING", }; enum nf_ip_trace_comments { NF_IP_TRACE_COMMENT_RULE, NF_IP_TRACE_COMMENT_RETURN, NF_IP_TRACE_COMMENT_POLICY, }; static const char *const comments[] = { [NF_IP_TRACE_COMMENT_RULE] = "rule", [NF_IP_TRACE_COMMENT_RETURN] = "return", [NF_IP_TRACE_COMMENT_POLICY] = "policy", }; static struct nf_loginfo trace_loginfo = { .type = NF_LOG_TYPE_LOG, .u = { .log = { .level = 4, .logflags = NF_LOG_MASK, }, }, }; /* Mildly perf critical (only if packet tracing is on) */ static inline int get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e, const char *hookname, const char **chainname, const char **comment, unsigned int *rulenum) { const struct xt_standard_target *t = (void *)ipt_get_target_c(s); if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) { /* Head of user chain: ERROR target with chainname */ *chainname = t->target.data; (*rulenum) = 0; } else if (s == e) { (*rulenum)++; if (s->target_offset == sizeof(struct ipt_entry) && strcmp(t->target.u.kernel.target->name, XT_STANDARD_TARGET) == 0 && t->verdict < 0 && unconditional(&s->ip)) { /* Tail of chains: STANDARD target (return/policy) */ *comment = *chainname == hookname ? comments[NF_IP_TRACE_COMMENT_POLICY] : comments[NF_IP_TRACE_COMMENT_RETURN]; } return 1; } else (*rulenum)++; return 0; } static void trace_packet(struct net *net, const struct sk_buff *skb, unsigned int hook, const struct net_device *in, const struct net_device *out, const char *tablename, const struct xt_table_info *private, const struct ipt_entry *e) { const struct ipt_entry *root; const char *hookname, *chainname, *comment; const struct ipt_entry *iter; unsigned int rulenum = 0; root = get_entry(private->entries, private->hook_entry[hook]); hookname = chainname = hooknames[hook]; comment = comments[NF_IP_TRACE_COMMENT_RULE]; xt_entry_foreach(iter, root, private->size - private->hook_entry[hook]) if (get_chainname_rulenum(iter, e, hookname, &chainname, &comment, &rulenum) != 0) break; nf_log_trace(net, AF_INET, hook, skb, in, out, &trace_loginfo, "TRACE: %s:%s:%s:%u ", tablename, chainname, comment, rulenum); } #endif static inline struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry) { return (void *)entry + entry->next_offset; } /* Returns one of the generic firewall policies, like NF_ACCEPT. */ unsigned int ipt_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table) { unsigned int hook = state->hook; static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); const struct iphdr *ip; /* Initializing verdict to NF_DROP keeps gcc happy. */ unsigned int verdict = NF_DROP; const char *indev, *outdev; const void *table_base; struct ipt_entry *e, **jumpstack; unsigned int stackidx, cpu; const struct xt_table_info *private; struct xt_action_param acpar; unsigned int addend; /* Initialization */ stackidx = 0; ip = ip_hdr(skb); indev = state->in ? state->in->name : nulldevname; outdev = state->out ? state->out->name : nulldevname; /* We handle fragments by dealing with the first fragment as * if it was a normal packet. All other fragments are treated * normally, except that they will NEVER match rules that ask * things we don't know, ie. tcp syn flag or ports). If the * rule is also a fragment-specific rule, non-fragments won't * match it. */ acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET; acpar.thoff = ip_hdrlen(skb); acpar.hotdrop = false; acpar.net = state->net; acpar.in = state->in; acpar.out = state->out; acpar.family = NFPROTO_IPV4; acpar.hooknum = hook; IP_NF_ASSERT(table->valid_hooks & (1 << hook)); local_bh_disable(); addend = xt_write_recseq_begin(); private = table->private; cpu = smp_processor_id(); /* * Ensure we load private-> members after we've fetched the base * pointer. */ smp_read_barrier_depends(); table_base = private->entries; jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; /* Switch to alternate jumpstack if we're being invoked via TEE. * TEE issues XT_CONTINUE verdict on original skb so we must not * clobber the jumpstack. * * For recursion via REJECT or SYNPROXY the stack will be clobbered * but it is no problem since absolute verdict is issued by these. */ if (static_key_false(&xt_tee_enabled)) jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated); e = get_entry(table_base, private->hook_entry[hook]); pr_debug("Entering %s(hook %u), UF %p\n", table->name, hook, get_entry(table_base, private->underflow[hook])); do { const struct xt_entry_target *t; const struct xt_entry_match *ematch; struct xt_counters *counter; IP_NF_ASSERT(e); if (!ip_packet_match(ip, indev, outdev, &e->ip, acpar.fragoff)) { no_match: e = ipt_next_entry(e); continue; } xt_ematch_foreach(ematch, e) { acpar.match = ematch->u.kernel.match; acpar.matchinfo = ematch->data; if (!acpar.match->match(skb, &acpar)) goto no_match; } counter = xt_get_this_cpu_counter(&e->counters); ADD_COUNTER(*counter, skb->len, 1); t = ipt_get_target(e); IP_NF_ASSERT(t->u.kernel.target); #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) /* The packet is traced: log it */ if (unlikely(skb->nf_trace)) trace_packet(state->net, skb, hook, state->in, state->out, table->name, private, e); #endif /* Standard target? */ if (!t->u.kernel.target->target) { int v; v = ((struct xt_standard_target *)t)->verdict; if (v < 0) { /* Pop from stack? */ if (v != XT_RETURN) { verdict = (unsigned int)(-v) - 1; break; } if (stackidx == 0) { e = get_entry(table_base, private->underflow[hook]); pr_debug("Underflow (this is normal) " "to %p\n", e); } else { e = jumpstack[--stackidx]; pr_debug("Pulled %p out from pos %u\n", e, stackidx); e = ipt_next_entry(e); } continue; } if (table_base + v != ipt_next_entry(e) && !(e->ip.flags & IPT_F_GOTO)) { jumpstack[stackidx++] = e; pr_debug("Pushed %p into pos %u\n", e, stackidx - 1); } e = get_entry(table_base, v); continue; } acpar.target = t->u.kernel.target; acpar.targinfo = t->data; verdict = t->u.kernel.target->target(skb, &acpar); /* Target might have changed stuff. */ ip = ip_hdr(skb); if (verdict == XT_CONTINUE) e = ipt_next_entry(e); else /* Verdict */ break; } while (!acpar.hotdrop); pr_debug("Exiting %s; sp at %u\n", __func__, stackidx); xt_write_recseq_end(addend); local_bh_enable(); #ifdef DEBUG_ALLOW_ALL return NF_ACCEPT; #else if (acpar.hotdrop) return NF_DROP; else return verdict; #endif } /* Figures out from what hook each rule can be called: returns 0 if there are loops. Puts hook bitmask in comefrom. */ static int mark_source_chains(const struct xt_table_info *newinfo, unsigned int valid_hooks, void *entry0) { unsigned int hook; /* No recursion; use packet counter to save back ptrs (reset to 0 as we leave), and comefrom to save source hook bitmask */ for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) { unsigned int pos = newinfo->hook_entry[hook]; struct ipt_entry *e = (struct ipt_entry *)(entry0 + pos); if (!(valid_hooks & (1 << hook))) continue; /* Set initial back pointer. */ e->counters.pcnt = pos; for (;;) { const struct xt_standard_target *t = (void *)ipt_get_target_c(e); int visited = e->comefrom & (1 << hook); if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { pr_err("iptables: loop hook %u pos %u %08X.\n", hook, pos, e->comefrom); return 0; } e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); /* Unconditional return/END. */ if ((e->target_offset == sizeof(struct ipt_entry) && (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < 0 && unconditional(&e->ip)) || visited) { unsigned int oldpos, size; if ((strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < -NF_MAX_VERDICT - 1) { duprintf("mark_source_chains: bad " "negative verdict (%i)\n", t->verdict); return 0; } /* Return: backtrack through the last big jump. */ do { e->comefrom ^= (1<<NF_INET_NUMHOOKS); #ifdef DEBUG_IP_FIREWALL_USER if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { duprintf("Back unset " "on hook %u " "rule %u\n", hook, pos); } #endif oldpos = pos; pos = e->counters.pcnt; e->counters.pcnt = 0; /* We're at the start. */ if (pos == oldpos) goto next; e = (struct ipt_entry *) (entry0 + pos); } while (oldpos == pos + e->next_offset); /* Move along one */ size = e->next_offset; e = (struct ipt_entry *) (entry0 + pos + size); e->counters.pcnt = pos; pos += size; } else { int newpos = t->verdict; if (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0 && newpos >= 0) { if (newpos > newinfo->size - sizeof(struct ipt_entry)) { duprintf("mark_source_chains: " "bad verdict (%i)\n", newpos); return 0; } /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; } e = (struct ipt_entry *) (entry0 + newpos); e->counters.pcnt = pos; pos = newpos; } } next: duprintf("Finished chain %u\n", hook); } return 1; } static void cleanup_match(struct xt_entry_match *m, struct net *net) { struct xt_mtdtor_param par; par.net = net; par.match = m->u.kernel.match; par.matchinfo = m->data; par.family = NFPROTO_IPV4; if (par.match->destroy != NULL) par.match->destroy(&par); module_put(par.match->me); } static int check_entry(const struct ipt_entry *e) { const struct xt_entry_target *t; if (!ip_checkentry(&e->ip)) return -EINVAL; if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset) return -EINVAL; t = ipt_get_target_c(e); if (e->target_offset + t->u.target_size > e->next_offset) return -EINVAL; return 0; } static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) { const struct ipt_ip *ip = par->entryinfo; int ret; par->match = m->u.kernel.match; par->matchinfo = m->data; ret = xt_check_match(par, m->u.match_size - sizeof(*m), ip->proto, ip->invflags & IPT_INV_PROTO); if (ret < 0) { duprintf("check failed for `%s'.\n", par->match->name); return ret; } return 0; } static int find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) { struct xt_match *match; int ret; match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name, m->u.user.revision); if (IS_ERR(match)) { duprintf("find_check_match: `%s' not found\n", m->u.user.name); return PTR_ERR(match); } m->u.kernel.match = match; ret = check_match(m, par); if (ret) goto err; return 0; err: module_put(m->u.kernel.match->me); return ret; } static int check_target(struct ipt_entry *e, struct net *net, const char *name) { struct xt_entry_target *t = ipt_get_target(e); struct xt_tgchk_param par = { .net = net, .table = name, .entryinfo = e, .target = t->u.kernel.target, .targinfo = t->data, .hook_mask = e->comefrom, .family = NFPROTO_IPV4, }; int ret; ret = xt_check_target(&par, t->u.target_size - sizeof(*t), e->ip.proto, e->ip.invflags & IPT_INV_PROTO); if (ret < 0) { duprintf("check failed for `%s'.\n", t->u.kernel.target->name); return ret; } return 0; } static int find_check_entry(struct ipt_entry *e, struct net *net, const char *name, unsigned int size) { struct xt_entry_target *t; struct xt_target *target; int ret; unsigned int j; struct xt_mtchk_param mtpar; struct xt_entry_match *ematch; e->counters.pcnt = xt_percpu_counter_alloc(); if (IS_ERR_VALUE(e->counters.pcnt)) return -ENOMEM; j = 0; mtpar.net = net; mtpar.table = name; mtpar.entryinfo = &e->ip; mtpar.hook_mask = e->comefrom; mtpar.family = NFPROTO_IPV4; xt_ematch_foreach(ematch, e) { ret = find_check_match(ematch, &mtpar); if (ret != 0) goto cleanup_matches; ++j; } t = ipt_get_target(e); target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("find_check_entry: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto cleanup_matches; } t->u.kernel.target = target; ret = check_target(e, net, name); if (ret) goto err; return 0; err: module_put(t->u.kernel.target->me); cleanup_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; cleanup_match(ematch, net); } xt_percpu_counter_free(e->counters.pcnt); return ret; } static bool check_underflow(const struct ipt_entry *e) { const struct xt_entry_target *t; unsigned int verdict; if (!unconditional(&e->ip)) return false; t = ipt_get_target_c(e); if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) return false; verdict = ((struct xt_standard_target *)t)->verdict; verdict = -verdict - 1; return verdict == NF_DROP || verdict == NF_ACCEPT; } static int check_entry_size_and_hooks(struct ipt_entry *e, struct xt_table_info *newinfo, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int valid_hooks) { unsigned int h; int err; if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit || (unsigned char *)e + e->next_offset > limit) { duprintf("Bad offset %p\n", e); return -EINVAL; } if (e->next_offset < sizeof(struct ipt_entry) + sizeof(struct xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } err = check_entry(e); if (err) return err; /* Check hooks & underflows */ for (h = 0; h < NF_INET_NUMHOOKS; h++) { if (!(valid_hooks & (1 << h))) continue; if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) { if (!check_underflow(e)) { pr_err("Underflows must be unconditional and " "use the STANDARD target with " "ACCEPT/DROP\n"); return -EINVAL; } newinfo->underflow[h] = underflows[h]; } } /* Clear counters and comefrom */ e->counters = ((struct xt_counters) { 0, 0 }); e->comefrom = 0; return 0; } static void cleanup_entry(struct ipt_entry *e, struct net *net) { struct xt_tgdtor_param par; struct xt_entry_target *t; struct xt_entry_match *ematch; /* Cleanup all matches */ xt_ematch_foreach(ematch, e) cleanup_match(ematch, net); t = ipt_get_target(e); par.net = net; par.target = t->u.kernel.target; par.targinfo = t->data; par.family = NFPROTO_IPV4; if (par.target->destroy != NULL) par.target->destroy(&par); module_put(par.target->me); xt_percpu_counter_free(e->counters.pcnt); } /* Checks and translates the user-supplied table segment (held in newinfo) */ static int translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, const struct ipt_replace *repl) { struct ipt_entry *iter; unsigned int i; int ret = 0; newinfo->size = repl->size; newinfo->number = repl->num_entries; /* Init all hooks to impossible value. */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { newinfo->hook_entry[i] = 0xFFFFFFFF; newinfo->underflow[i] = 0xFFFFFFFF; } duprintf("translate_table: size %u\n", newinfo->size); i = 0; /* Walk through entries, checking offsets. */ xt_entry_foreach(iter, entry0, newinfo->size) { ret = check_entry_size_and_hooks(iter, newinfo, entry0, entry0 + repl->size, repl->hook_entry, repl->underflow, repl->valid_hooks); if (ret != 0) return ret; ++i; if (strcmp(ipt_get_target(iter)->u.user.name, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } if (i != repl->num_entries) { duprintf("translate_table: %u not %u entries\n", i, repl->num_entries); return -EINVAL; } /* Check hooks all assigned */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(repl->valid_hooks & (1 << i))) continue; if (newinfo->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, repl->hook_entry[i]); return -EINVAL; } if (newinfo->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, repl->underflow[i]); return -EINVAL; } } if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) return -ELOOP; /* Finally, each sanity check must pass */ i = 0; xt_entry_foreach(iter, entry0, newinfo->size) { ret = find_check_entry(iter, net, repl->name, repl->size); if (ret != 0) break; ++i; } if (ret != 0) { xt_entry_foreach(iter, entry0, newinfo->size) { if (i-- == 0) break; cleanup_entry(iter, net); } return ret; } return ret; } static void get_counters(const struct xt_table_info *t, struct xt_counters counters[]) { struct ipt_entry *iter; unsigned int cpu; unsigned int i; for_each_possible_cpu(cpu) { seqcount_t *s = &per_cpu(xt_recseq, cpu); i = 0; xt_entry_foreach(iter, t->entries, t->size) { struct xt_counters *tmp; u64 bcnt, pcnt; unsigned int start; tmp = xt_get_per_cpu_counter(&iter->counters, cpu); do { start = read_seqcount_begin(s); bcnt = tmp->bcnt; pcnt = tmp->pcnt; } while (read_seqcount_retry(s, start)); ADD_COUNTER(counters[i], bcnt, pcnt); ++i; /* macro does multi eval of i */ } } } static struct xt_counters *alloc_counters(const struct xt_table *table) { unsigned int countersize; struct xt_counters *counters; const struct xt_table_info *private = table->private; /* We need atomic snapshot of counters: rest doesn't change (other than comefrom, which userspace doesn't care about). */ countersize = sizeof(struct xt_counters) * private->number; counters = vzalloc(countersize); if (counters == NULL) return ERR_PTR(-ENOMEM); get_counters(private, counters); return counters; } static int copy_entries_to_user(unsigned int total_size, const struct xt_table *table, void __user *userptr) { unsigned int off, num; const struct ipt_entry *e; struct xt_counters *counters; const struct xt_table_info *private = table->private; int ret = 0; const void *loc_cpu_entry; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); loc_cpu_entry = private->entries; if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { ret = -EFAULT; goto free_counters; } /* FIXME: use iterator macros --RR */ /* ... then go back and fix counters and names */ for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ unsigned int i; const struct xt_entry_match *m; const struct xt_entry_target *t; e = (struct ipt_entry *)(loc_cpu_entry + off); if (copy_to_user(userptr + off + offsetof(struct ipt_entry, counters), &counters[num], sizeof(counters[num])) != 0) { ret = -EFAULT; goto free_counters; } for (i = sizeof(struct ipt_entry); i < e->target_offset; i += m->u.match_size) { m = (void *)e + i; if (copy_to_user(userptr + off + i + offsetof(struct xt_entry_match, u.user.name), m->u.kernel.match->name, strlen(m->u.kernel.match->name)+1) != 0) { ret = -EFAULT; goto free_counters; } } t = ipt_get_target_c(e); if (copy_to_user(userptr + off + e->target_offset + offsetof(struct xt_entry_target, u.user.name), t->u.kernel.target->name, strlen(t->u.kernel.target->name)+1) != 0) { ret = -EFAULT; goto free_counters; } } free_counters: vfree(counters); return ret; } #ifdef CONFIG_COMPAT static void compat_standard_from_user(void *dst, const void *src) { int v = *(compat_int_t *)src; if (v > 0) v += xt_compat_calc_jump(AF_INET, v); memcpy(dst, &v, sizeof(v)); } static int compat_standard_to_user(void __user *dst, const void *src) { compat_int_t cv = *(int *)src; if (cv > 0) cv -= xt_compat_calc_jump(AF_INET, cv); return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; } static int compat_calc_entry(const struct ipt_entry *e, const struct xt_table_info *info, const void *base, struct xt_table_info *newinfo) { const struct xt_entry_match *ematch; const struct xt_entry_target *t; unsigned int entry_offset; int off, i, ret; off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); entry_offset = (void *)e - base; xt_ematch_foreach(ematch, e) off += xt_compat_match_offset(ematch->u.kernel.match); t = ipt_get_target_c(e); off += xt_compat_target_offset(t->u.kernel.target); newinfo->size -= off; ret = xt_compat_add_offset(AF_INET, entry_offset, off); if (ret) return ret; for (i = 0; i < NF_INET_NUMHOOKS; i++) { if (info->hook_entry[i] && (e < (struct ipt_entry *)(base + info->hook_entry[i]))) newinfo->hook_entry[i] -= off; if (info->underflow[i] && (e < (struct ipt_entry *)(base + info->underflow[i]))) newinfo->underflow[i] -= off; } return 0; } static int compat_table_info(const struct xt_table_info *info, struct xt_table_info *newinfo) { struct ipt_entry *iter; const void *loc_cpu_entry; int ret; if (!newinfo || !info) return -EINVAL; /* we dont care about newinfo->entries */ memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); newinfo->initial_entries = 0; loc_cpu_entry = info->entries; xt_compat_init_offsets(AF_INET, info->number); xt_entry_foreach(iter, loc_cpu_entry, info->size) { ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); if (ret != 0) return ret; } return 0; } #endif static int get_info(struct net *net, void __user *user, const int *len, int compat) { char name[XT_TABLE_MAXNAMELEN]; struct xt_table *t; int ret; if (*len != sizeof(struct ipt_getinfo)) { duprintf("length %u != %zu\n", *len, sizeof(struct ipt_getinfo)); return -EINVAL; } if (copy_from_user(name, user, sizeof(name)) != 0) return -EFAULT; name[XT_TABLE_MAXNAMELEN-1] = '\0'; #ifdef CONFIG_COMPAT if (compat) xt_compat_lock(AF_INET); #endif t = try_then_request_module(xt_find_table_lock(net, AF_INET, name), "iptable_%s", name); if (!IS_ERR_OR_NULL(t)) { struct ipt_getinfo info; const struct xt_table_info *private = t->private; #ifdef CONFIG_COMPAT struct xt_table_info tmp; if (compat) { ret = compat_table_info(private, &tmp); xt_compat_flush_offsets(AF_INET); private = &tmp; } #endif memset(&info, 0, sizeof(info)); info.valid_hooks = t->valid_hooks; memcpy(info.hook_entry, private->hook_entry, sizeof(info.hook_entry)); memcpy(info.underflow, private->underflow, sizeof(info.underflow)); info.num_entries = private->number; info.size = private->size; strcpy(info.name, name); if (copy_to_user(user, &info, *len) != 0) ret = -EFAULT; else ret = 0; xt_table_unlock(t); module_put(t->me); } else ret = t ? PTR_ERR(t) : -ENOENT; #ifdef CONFIG_COMPAT if (compat) xt_compat_unlock(AF_INET); #endif return ret; } static int get_entries(struct net *net, struct ipt_get_entries __user *uptr, const int *len) { int ret; struct ipt_get_entries get; struct xt_table *t; if (*len < sizeof(get)) { duprintf("get_entries: %u < %zu\n", *len, sizeof(get)); return -EINVAL; } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; if (*len != sizeof(struct ipt_get_entries) + get.size) { duprintf("get_entries: %u != %zu\n", *len, sizeof(get) + get.size); return -EINVAL; } t = xt_find_table_lock(net, AF_INET, get.name); if (!IS_ERR_OR_NULL(t)) { const struct xt_table_info *private = t->private; duprintf("t->private->number = %u\n", private->number); if (get.size == private->size) ret = copy_entries_to_user(private->size, t, uptr->entrytable); else { duprintf("get_entries: I've got %u not %u!\n", private->size, get.size); ret = -EAGAIN; } module_put(t->me); xt_table_unlock(t); } else ret = t ? PTR_ERR(t) : -ENOENT; return ret; } static int __do_replace(struct net *net, const char *name, unsigned int valid_hooks, struct xt_table_info *newinfo, unsigned int num_counters, void __user *counters_ptr) { int ret; struct xt_table *t; struct xt_table_info *oldinfo; struct xt_counters *counters; struct ipt_entry *iter; ret = 0; counters = vzalloc(num_counters * sizeof(struct xt_counters)); if (!counters) { ret = -ENOMEM; goto out; } t = try_then_request_module(xt_find_table_lock(net, AF_INET, name), "iptable_%s", name); if (IS_ERR_OR_NULL(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free_newinfo_counters_untrans; } /* You lied! */ if (valid_hooks != t->valid_hooks) { duprintf("Valid hook crap: %08X vs %08X\n", valid_hooks, t->valid_hooks); ret = -EINVAL; goto put_module; } oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); if (!oldinfo) goto put_module; /* Update module usage count based on number of rules */ duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", oldinfo->number, oldinfo->initial_entries, newinfo->number); if ((oldinfo->number > oldinfo->initial_entries) || (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); if ((oldinfo->number > oldinfo->initial_entries) && (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); /* Get the old counters, and synchronize with replace */ get_counters(oldinfo, counters); /* Decrease module usage counts and free resource */ xt_entry_foreach(iter, oldinfo->entries, oldinfo->size) cleanup_entry(iter, net); xt_free_table_info(oldinfo); if (copy_to_user(counters_ptr, counters, sizeof(struct xt_counters) * num_counters) != 0) { /* Silent error, can't fail, new table is already in place */ net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n"); } vfree(counters); xt_table_unlock(t); return ret; put_module: module_put(t->me); xt_table_unlock(t); free_newinfo_counters_untrans: vfree(counters); out: return ret; } static int do_replace(struct net *net, const void __user *user, unsigned int len) { int ret; struct ipt_replace tmp; struct xt_table_info *newinfo; void *loc_cpu_entry; struct ipt_entry *iter; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; if (tmp.num_counters == 0) return -EINVAL; tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } ret = translate_table(net, newinfo, loc_cpu_entry, &tmp); if (ret != 0) goto free_newinfo; duprintf("Translated table\n"); ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, tmp.counters); if (ret) goto free_newinfo_untrans; return 0; free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) cleanup_entry(iter, net); free_newinfo: xt_free_table_info(newinfo); return ret; } static int do_add_counters(struct net *net, const void __user *user, unsigned int len, int compat) { unsigned int i; struct xt_counters_info tmp; struct xt_counters *paddc; unsigned int num_counters; const char *name; int size; void *ptmp; struct xt_table *t; const struct xt_table_info *private; int ret = 0; struct ipt_entry *iter; unsigned int addend; #ifdef CONFIG_COMPAT struct compat_xt_counters_info compat_tmp; if (compat) { ptmp = &compat_tmp; size = sizeof(struct compat_xt_counters_info); } else #endif { ptmp = &tmp; size = sizeof(struct xt_counters_info); } if (copy_from_user(ptmp, user, size) != 0) return -EFAULT; #ifdef CONFIG_COMPAT if (compat) { num_counters = compat_tmp.num_counters; name = compat_tmp.name; } else #endif { num_counters = tmp.num_counters; name = tmp.name; } if (len != size + num_counters * sizeof(struct xt_counters)) return -EINVAL; paddc = vmalloc(len - size); if (!paddc) return -ENOMEM; if (copy_from_user(paddc, user + size, len - size) != 0) { ret = -EFAULT; goto free; } t = xt_find_table_lock(net, AF_INET, name); if (IS_ERR_OR_NULL(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free; } local_bh_disable(); private = t->private; if (private->number != num_counters) { ret = -EINVAL; goto unlock_up_free; } i = 0; addend = xt_write_recseq_begin(); xt_entry_foreach(iter, private->entries, private->size) { struct xt_counters *tmp; tmp = xt_get_this_cpu_counter(&iter->counters); ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt); ++i; } xt_write_recseq_end(addend); unlock_up_free: local_bh_enable(); xt_table_unlock(t); module_put(t->me); free: vfree(paddc); return ret; } #ifdef CONFIG_COMPAT struct compat_ipt_replace { char name[XT_TABLE_MAXNAMELEN]; u32 valid_hooks; u32 num_entries; u32 size; u32 hook_entry[NF_INET_NUMHOOKS]; u32 underflow[NF_INET_NUMHOOKS]; u32 num_counters; compat_uptr_t counters; /* struct xt_counters * */ struct compat_ipt_entry entries[0]; }; static int compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr, unsigned int *size, struct xt_counters *counters, unsigned int i) { struct xt_entry_target *t; struct compat_ipt_entry __user *ce; u_int16_t target_offset, next_offset; compat_uint_t origsize; const struct xt_entry_match *ematch; int ret = 0; origsize = *size; ce = (struct compat_ipt_entry __user *)*dstptr; if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 || copy_to_user(&ce->counters, &counters[i], sizeof(counters[i])) != 0) return -EFAULT; *dstptr += sizeof(struct compat_ipt_entry); *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); xt_ematch_foreach(ematch, e) { ret = xt_compat_match_to_user(ematch, dstptr, size); if (ret != 0) return ret; } target_offset = e->target_offset - (origsize - *size); t = ipt_get_target(e); ret = xt_compat_target_to_user(t, dstptr, size); if (ret) return ret; next_offset = e->next_offset - (origsize - *size); if (put_user(target_offset, &ce->target_offset) != 0 || put_user(next_offset, &ce->next_offset) != 0) return -EFAULT; return 0; } static int compat_find_calc_match(struct xt_entry_match *m, const char *name, const struct ipt_ip *ip, int *size) { struct xt_match *match; match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name, m->u.user.revision); if (IS_ERR(match)) { duprintf("compat_check_calc_match: `%s' not found\n", m->u.user.name); return PTR_ERR(match); } m->u.kernel.match = match; *size += xt_compat_match_offset(match); return 0; } static void compat_release_entry(struct compat_ipt_entry *e) { struct xt_entry_target *t; struct xt_entry_match *ematch; /* Cleanup all matches */ xt_ematch_foreach(ematch, e) module_put(ematch->u.kernel.match->me); t = compat_ipt_get_target(e); module_put(t->u.kernel.target->me); } static int check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, struct xt_table_info *newinfo, unsigned int *size, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, const char *name) { struct xt_entry_match *ematch; struct xt_entry_target *t; struct xt_target *target; unsigned int entry_offset; unsigned int j; int ret, off, h; duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit || (unsigned char *)e + e->next_offset > limit) { duprintf("Bad offset %p, limit = %p\n", e, limit); return -EINVAL; } if (e->next_offset < sizeof(struct compat_ipt_entry) + sizeof(struct compat_xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } /* For purposes of check_entry casting the compat entry is fine */ ret = check_entry((struct ipt_entry *)e); if (ret) return ret; off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); entry_offset = (void *)e - (void *)base; j = 0; xt_ematch_foreach(ematch, e) { ret = compat_find_calc_match(ematch, name, &e->ip, &off); if (ret != 0) goto release_matches; ++j; } t = compat_ipt_get_target(e); target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto release_matches; } t->u.kernel.target = target; off += xt_compat_target_offset(target); *size += off; ret = xt_compat_add_offset(AF_INET, entry_offset, off); if (ret) goto out; /* Check hooks & underflows */ for (h = 0; h < NF_INET_NUMHOOKS; h++) { if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) newinfo->underflow[h] = underflows[h]; } /* Clear counters and comefrom */ memset(&e->counters, 0, sizeof(e->counters)); e->comefrom = 0; return 0; out: module_put(t->u.kernel.target->me); release_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; module_put(ematch->u.kernel.match->me); } return ret; } static int compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr, unsigned int *size, const char *name, struct xt_table_info *newinfo, unsigned char *base) { struct xt_entry_target *t; struct xt_target *target; struct ipt_entry *de; unsigned int origsize; int ret, h; struct xt_entry_match *ematch; ret = 0; origsize = *size; de = (struct ipt_entry *)*dstptr; memcpy(de, e, sizeof(struct ipt_entry)); memcpy(&de->counters, &e->counters, sizeof(e->counters)); *dstptr += sizeof(struct ipt_entry); *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); xt_ematch_foreach(ematch, e) { ret = xt_compat_match_from_user(ematch, dstptr, size); if (ret != 0) return ret; } de->target_offset = e->target_offset - (origsize - *size); t = compat_ipt_get_target(e); target = t->u.kernel.target; xt_compat_target_from_user(t, dstptr, size); de->next_offset = e->next_offset - (origsize - *size); for (h = 0; h < NF_INET_NUMHOOKS; h++) { if ((unsigned char *)de - base < newinfo->hook_entry[h]) newinfo->hook_entry[h] -= origsize - *size; if ((unsigned char *)de - base < newinfo->underflow[h]) newinfo->underflow[h] -= origsize - *size; } return ret; } static int compat_check_entry(struct ipt_entry *e, struct net *net, const char *name) { struct xt_entry_match *ematch; struct xt_mtchk_param mtpar; unsigned int j; int ret = 0; e->counters.pcnt = xt_percpu_counter_alloc(); if (IS_ERR_VALUE(e->counters.pcnt)) return -ENOMEM; j = 0; mtpar.net = net; mtpar.table = name; mtpar.entryinfo = &e->ip; mtpar.hook_mask = e->comefrom; mtpar.family = NFPROTO_IPV4; xt_ematch_foreach(ematch, e) { ret = check_match(ematch, &mtpar); if (ret != 0) goto cleanup_matches; ++j; } ret = check_target(e, net, name); if (ret) goto cleanup_matches; return 0; cleanup_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; cleanup_match(ematch, net); } xt_percpu_counter_free(e->counters.pcnt); return ret; } static int translate_compat_table(struct net *net, const char *name, unsigned int valid_hooks, struct xt_table_info **pinfo, void **pentry0, unsigned int total_size, unsigned int number, unsigned int *hook_entries, unsigned int *underflows) { unsigned int i, j; struct xt_table_info *newinfo, *info; void *pos, *entry0, *entry1; struct compat_ipt_entry *iter0; struct ipt_entry *iter1; unsigned int size; int ret; info = *pinfo; entry0 = *pentry0; size = total_size; info->number = number; /* Init all hooks to impossible value. */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { info->hook_entry[i] = 0xFFFFFFFF; info->underflow[i] = 0xFFFFFFFF; } duprintf("translate_compat_table: size %u\n", info->size); j = 0; xt_compat_lock(AF_INET); xt_compat_init_offsets(AF_INET, number); /* Walk through entries, checking offsets. */ xt_entry_foreach(iter0, entry0, total_size) { ret = check_compat_entry_size_and_hooks(iter0, info, &size, entry0, entry0 + total_size, hook_entries, underflows, name); if (ret != 0) goto out_unlock; ++j; } ret = -EINVAL; if (j != number) { duprintf("translate_compat_table: %u not %u entries\n", j, number); goto out_unlock; } /* Check hooks all assigned */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(valid_hooks & (1 << i))) continue; if (info->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, hook_entries[i]); goto out_unlock; } if (info->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, underflows[i]); goto out_unlock; } } ret = -ENOMEM; newinfo = xt_alloc_table_info(size); if (!newinfo) goto out_unlock; newinfo->number = number; for (i = 0; i < NF_INET_NUMHOOKS; i++) { newinfo->hook_entry[i] = info->hook_entry[i]; newinfo->underflow[i] = info->underflow[i]; } entry1 = newinfo->entries; pos = entry1; size = total_size; xt_entry_foreach(iter0, entry0, total_size) { ret = compat_copy_entry_from_user(iter0, &pos, &size, name, newinfo, entry1); if (ret != 0) break; } xt_compat_flush_offsets(AF_INET); xt_compat_unlock(AF_INET); if (ret) goto free_newinfo; ret = -ELOOP; if (!mark_source_chains(newinfo, valid_hooks, entry1)) goto free_newinfo; i = 0; xt_entry_foreach(iter1, entry1, newinfo->size) { ret = compat_check_entry(iter1, net, name); if (ret != 0) break; ++i; if (strcmp(ipt_get_target(iter1)->u.user.name, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } if (ret) { /* * The first i matches need cleanup_entry (calls ->destroy) * because they had called ->check already. The other j-i * entries need only release. */ int skip = i; j -= i; xt_entry_foreach(iter0, entry0, newinfo->size) { if (skip-- > 0) continue; if (j-- == 0) break; compat_release_entry(iter0); } xt_entry_foreach(iter1, entry1, newinfo->size) { if (i-- == 0) break; cleanup_entry(iter1, net); } xt_free_table_info(newinfo); return ret; } *pinfo = newinfo; *pentry0 = entry1; xt_free_table_info(info); return 0; free_newinfo: xt_free_table_info(newinfo); out: xt_entry_foreach(iter0, entry0, total_size) { if (j-- == 0) break; compat_release_entry(iter0); } return ret; out_unlock: xt_compat_flush_offsets(AF_INET); xt_compat_unlock(AF_INET); goto out; } static int compat_do_replace(struct net *net, void __user *user, unsigned int len) { int ret; struct compat_ipt_replace tmp; struct xt_table_info *newinfo; void *loc_cpu_entry; struct ipt_entry *iter; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.size >= INT_MAX / num_possible_cpus()) return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; if (tmp.num_counters == 0) return -EINVAL; tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } ret = translate_compat_table(net, tmp.name, tmp.valid_hooks, &newinfo, &loc_cpu_entry, tmp.size, tmp.num_entries, tmp.hook_entry, tmp.underflow); if (ret != 0) goto free_newinfo; duprintf("compat_do_replace: Translated table\n"); ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, compat_ptr(tmp.counters)); if (ret) goto free_newinfo_untrans; return 0; free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) cleanup_entry(iter, net); free_newinfo: xt_free_table_info(newinfo); return ret; } static int compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IPT_SO_SET_REPLACE: ret = compat_do_replace(sock_net(sk), user, len); break; case IPT_SO_SET_ADD_COUNTERS: ret = do_add_counters(sock_net(sk), user, len, 1); break; default: duprintf("do_ipt_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } struct compat_ipt_get_entries { char name[XT_TABLE_MAXNAMELEN]; compat_uint_t size; struct compat_ipt_entry entrytable[0]; }; static int compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, void __user *userptr) { struct xt_counters *counters; const struct xt_table_info *private = table->private; void __user *pos; unsigned int size; int ret = 0; unsigned int i = 0; struct ipt_entry *iter; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); pos = userptr; size = total_size; xt_entry_foreach(iter, private->entries, total_size) { ret = compat_copy_entry_to_user(iter, &pos, &size, counters, i++); if (ret != 0) break; } vfree(counters); return ret; } static int compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr, int *len) { int ret; struct compat_ipt_get_entries get; struct xt_table *t; if (*len < sizeof(get)) { duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); return -EINVAL; } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; if (*len != sizeof(struct compat_ipt_get_entries) + get.size) { duprintf("compat_get_entries: %u != %zu\n", *len, sizeof(get) + get.size); return -EINVAL; } xt_compat_lock(AF_INET); t = xt_find_table_lock(net, AF_INET, get.name); if (!IS_ERR_OR_NULL(t)) { const struct xt_table_info *private = t->private; struct xt_table_info info; duprintf("t->private->number = %u\n", private->number); ret = compat_table_info(private, &info); if (!ret && get.size == info.size) { ret = compat_copy_entries_to_user(private->size, t, uptr->entrytable); } else if (!ret) { duprintf("compat_get_entries: I've got %u not %u!\n", private->size, get.size); ret = -EAGAIN; } xt_compat_flush_offsets(AF_INET); module_put(t->me); xt_table_unlock(t); } else ret = t ? PTR_ERR(t) : -ENOENT; xt_compat_unlock(AF_INET); return ret; } static int do_ipt_get_ctl(struct sock *, int, void __user *, int *); static int compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IPT_SO_GET_INFO: ret = get_info(sock_net(sk), user, len, 1); break; case IPT_SO_GET_ENTRIES: ret = compat_get_entries(sock_net(sk), user, len); break; default: ret = do_ipt_get_ctl(sk, cmd, user, len); } return ret; } #endif static int do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IPT_SO_SET_REPLACE: ret = do_replace(sock_net(sk), user, len); break; case IPT_SO_SET_ADD_COUNTERS: ret = do_add_counters(sock_net(sk), user, len, 0); break; default: duprintf("do_ipt_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static int do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IPT_SO_GET_INFO: ret = get_info(sock_net(sk), user, len, 0); break; case IPT_SO_GET_ENTRIES: ret = get_entries(sock_net(sk), user, len); break; case IPT_SO_GET_REVISION_MATCH: case IPT_SO_GET_REVISION_TARGET: { struct xt_get_revision rev; int target; if (*len != sizeof(rev)) { ret = -EINVAL; break; } if (copy_from_user(&rev, user, sizeof(rev)) != 0) { ret = -EFAULT; break; } rev.name[sizeof(rev.name)-1] = 0; if (cmd == IPT_SO_GET_REVISION_TARGET) target = 1; else target = 0; try_then_request_module(xt_find_revision(AF_INET, rev.name, rev.revision, target, &ret), "ipt_%s", rev.name); break; } default: duprintf("do_ipt_get_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static void __ipt_unregister_table(struct net *net, struct xt_table *table) { struct xt_table_info *private; void *loc_cpu_entry; struct module *table_owner = table->me; struct ipt_entry *iter; private = xt_unregister_table(table); /* Decrease module usage counts and free resources */ loc_cpu_entry = private->entries; xt_entry_foreach(iter, loc_cpu_entry, private->size) cleanup_entry(iter, net); if (private->number > private->initial_entries) module_put(table_owner); xt_free_table_info(private); } int ipt_register_table(struct net *net, const struct xt_table *table, const struct ipt_replace *repl, const struct nf_hook_ops *ops, struct xt_table **res) { int ret; struct xt_table_info *newinfo; struct xt_table_info bootstrap = {0}; void *loc_cpu_entry; struct xt_table *new_table; newinfo = xt_alloc_table_info(repl->size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; memcpy(loc_cpu_entry, repl->entries, repl->size); ret = translate_table(net, newinfo, loc_cpu_entry, repl); if (ret != 0) goto out_free; new_table = xt_register_table(net, table, &bootstrap, newinfo); if (IS_ERR(new_table)) { ret = PTR_ERR(new_table); goto out_free; } /* set res now, will see skbs right after nf_register_net_hooks */ WRITE_ONCE(*res, new_table); ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks)); if (ret != 0) { __ipt_unregister_table(net, new_table); *res = NULL; } return ret; out_free: xt_free_table_info(newinfo); return ret; } void ipt_unregister_table(struct net *net, struct xt_table *table, const struct nf_hook_ops *ops) { nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks)); __ipt_unregister_table(net, table); } /* Returns 1 if the type and code is matched by the range, 0 otherwise */ static inline bool icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code, u_int8_t type, u_int8_t code, bool invert) { return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code)) ^ invert; } static bool icmp_match(const struct sk_buff *skb, struct xt_action_param *par) { const struct icmphdr *ic; struct icmphdr _icmph; const struct ipt_icmp *icmpinfo = par->matchinfo; /* Must not be a fragment. */ if (par->fragoff != 0) return false; ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph); if (ic == NULL) { /* We've been asked to examine this packet, and we * can't. Hence, no choice but to drop. */ duprintf("Dropping evil ICMP tinygram.\n"); par->hotdrop = true; return false; } return icmp_type_code_match(icmpinfo->type, icmpinfo->code[0], icmpinfo->code[1], ic->type, ic->code, !!(icmpinfo->invflags&IPT_ICMP_INV)); } static int icmp_checkentry(const struct xt_mtchk_param *par) { const struct ipt_icmp *icmpinfo = par->matchinfo; /* Must specify no unknown invflags */ return (icmpinfo->invflags & ~IPT_ICMP_INV) ? -EINVAL : 0; } static struct xt_target ipt_builtin_tg[] __read_mostly = { { .name = XT_STANDARD_TARGET, .targetsize = sizeof(int), .family = NFPROTO_IPV4, #ifdef CONFIG_COMPAT .compatsize = sizeof(compat_int_t), .compat_from_user = compat_standard_from_user, .compat_to_user = compat_standard_to_user, #endif }, { .name = XT_ERROR_TARGET, .target = ipt_error, .targetsize = XT_FUNCTION_MAXNAMELEN, .family = NFPROTO_IPV4, }, }; static struct nf_sockopt_ops ipt_sockopts = { .pf = PF_INET, .set_optmin = IPT_BASE_CTL, .set_optmax = IPT_SO_SET_MAX+1, .set = do_ipt_set_ctl, #ifdef CONFIG_COMPAT .compat_set = compat_do_ipt_set_ctl, #endif .get_optmin = IPT_BASE_CTL, .get_optmax = IPT_SO_GET_MAX+1, .get = do_ipt_get_ctl, #ifdef CONFIG_COMPAT .compat_get = compat_do_ipt_get_ctl, #endif .owner = THIS_MODULE, }; static struct xt_match ipt_builtin_mt[] __read_mostly = { { .name = "icmp", .match = icmp_match, .matchsize = sizeof(struct ipt_icmp), .checkentry = icmp_checkentry, .proto = IPPROTO_ICMP, .family = NFPROTO_IPV4, }, }; static int __net_init ip_tables_net_init(struct net *net) { return xt_proto_init(net, NFPROTO_IPV4); } static void __net_exit ip_tables_net_exit(struct net *net) { xt_proto_fini(net, NFPROTO_IPV4); } static struct pernet_operations ip_tables_net_ops = { .init = ip_tables_net_init, .exit = ip_tables_net_exit, }; static int __init ip_tables_init(void) { int ret; ret = register_pernet_subsys(&ip_tables_net_ops); if (ret < 0) goto err1; /* No one else will be downing sem now, so we won't sleep */ ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); if (ret < 0) goto err2; ret = xt_register_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt)); if (ret < 0) goto err4; /* Register setsockopt */ ret = nf_register_sockopt(&ipt_sockopts); if (ret < 0) goto err5; pr_info("(C) 2000-2006 Netfilter Core Team\n"); return 0; err5: xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt)); err4: xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); err2: unregister_pernet_subsys(&ip_tables_net_ops); err1: return ret; } static void __exit ip_tables_fini(void) { nf_unregister_sockopt(&ipt_sockopts); xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt)); xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); unregister_pernet_subsys(&ip_tables_net_ops); } EXPORT_SYMBOL(ipt_register_table); EXPORT_SYMBOL(ipt_unregister_table); EXPORT_SYMBOL(ipt_do_table); module_init(ip_tables_init); module_exit(ip_tables_fini);
check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, struct xt_table_info *newinfo, unsigned int *size, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, const char *name) { struct xt_entry_match *ematch; struct xt_entry_target *t; struct xt_target *target; unsigned int entry_offset; unsigned int j; int ret, off, h; duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) { duprintf("Bad offset %p, limit = %p\n", e, limit); return -EINVAL; } if (e->next_offset < sizeof(struct compat_ipt_entry) + sizeof(struct compat_xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } /* For purposes of check_entry casting the compat entry is fine */ ret = check_entry((struct ipt_entry *)e); if (ret) return ret; off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); entry_offset = (void *)e - (void *)base; j = 0; xt_ematch_foreach(ematch, e) { ret = compat_find_calc_match(ematch, name, &e->ip, &off); if (ret != 0) goto release_matches; ++j; } t = compat_ipt_get_target(e); target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto release_matches; } t->u.kernel.target = target; off += xt_compat_target_offset(target); *size += off; ret = xt_compat_add_offset(AF_INET, entry_offset, off); if (ret) goto out; /* Check hooks & underflows */ for (h = 0; h < NF_INET_NUMHOOKS; h++) { if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) newinfo->underflow[h] = underflows[h]; } /* Clear counters and comefrom */ memset(&e->counters, 0, sizeof(e->counters)); e->comefrom = 0; return 0; out: module_put(t->u.kernel.target->me); release_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; module_put(ematch->u.kernel.match->me); } return ret; }
check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, struct xt_table_info *newinfo, unsigned int *size, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, const char *name) { struct xt_entry_match *ematch; struct xt_entry_target *t; struct xt_target *target; unsigned int entry_offset; unsigned int j; int ret, off, h; duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit || (unsigned char *)e + e->next_offset > limit) { duprintf("Bad offset %p, limit = %p\n", e, limit); return -EINVAL; } if (e->next_offset < sizeof(struct compat_ipt_entry) + sizeof(struct compat_xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } /* For purposes of check_entry casting the compat entry is fine */ ret = check_entry((struct ipt_entry *)e); if (ret) return ret; off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry); entry_offset = (void *)e - (void *)base; j = 0; xt_ematch_foreach(ematch, e) { ret = compat_find_calc_match(ematch, name, &e->ip, &off); if (ret != 0) goto release_matches; ++j; } t = compat_ipt_get_target(e); target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto release_matches; } t->u.kernel.target = target; off += xt_compat_target_offset(target); *size += off; ret = xt_compat_add_offset(AF_INET, entry_offset, off); if (ret) goto out; /* Check hooks & underflows */ for (h = 0; h < NF_INET_NUMHOOKS; h++) { if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) newinfo->underflow[h] = underflows[h]; } /* Clear counters and comefrom */ memset(&e->counters, 0, sizeof(e->counters)); e->comefrom = 0; return 0; out: module_put(t->u.kernel.target->me); release_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; module_put(ematch->u.kernel.match->me); } return ret; }
{'added': [(741, '\t (unsigned char *)e + sizeof(struct ipt_entry) >= limit ||'), (742, '\t (unsigned char *)e + e->next_offset > limit) {'), (1496, '\t (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit ||'), (1497, '\t (unsigned char *)e + e->next_offset > limit) {')], 'deleted': [(741, '\t (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {'), (1495, '\t (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {')]}
4
2
1,816
11,251
73
503
12
https://github.com/torvalds/linux
CVE-2016-4998
CWE-119
132
bin_dyldcache.c
C
read_cache_accel
/* radare2 - LGPL - Copyright 2018-2022 - pancake, mrmacete, keegan */ #include <r_types.h> #include <r_util.h> #include <r_lib.h> #include <r_bin.h> #include <r_core.h> #include <r_io.h> #include <ht_pu.h> // #include "../format/mach0/mach0_defines.h" #define R_BIN_MACH064 1 #include "../format/mach0/mach0.h" #include "objc/mach0_classes.h" #define R_IS_PTR_AUTHENTICATED(x) B_IS_SET(x, 63) #define MAX_N_HDR 16 typedef struct { ut8 version; ut64 slide; ut8 *one_page_buf; ut32 page_size; ut64 start_of_data; } RDyldRebaseInfo; typedef struct { ut64 start; ut64 end; RDyldRebaseInfo *info; } RDyldRebaseInfosEntry; typedef struct { RDyldRebaseInfosEntry *entries; size_t length; } RDyldRebaseInfos; typedef struct { ut8 version; ut64 slide; ut8 *one_page_buf; ut32 page_size; ut64 start_of_data; ut16 *page_starts; ut32 page_starts_count; ut64 delta_mask; ut32 delta_shift; ut64 auth_value_add; } RDyldRebaseInfo3; typedef struct { ut8 version; ut64 slide; ut8 *one_page_buf; ut32 page_size; ut64 start_of_data; ut16 *page_starts; ut32 page_starts_count; ut16 *page_extras; ut32 page_extras_count; ut64 delta_mask; ut64 value_mask; ut32 delta_shift; ut64 value_add; } RDyldRebaseInfo2; typedef struct { ut8 version; ut64 slide; ut8 *one_page_buf; ut32 page_size; ut64 start_of_data; ut16 *toc; ut32 toc_count; ut8 *entries; ut32 entries_size; } RDyldRebaseInfo1; typedef struct { ut64 local_symbols_offset; ut64 nlists_offset; ut64 nlists_count; ut64 strings_offset; ut64 strings_size; } RDyldLocSym; typedef struct _r_dyldcache { ut8 magic[8]; cache_hdr_t *hdr; ut64 *hdr_offset; ut32 *maps_index; ut32 n_hdr; cache_map_t *maps; ut32 n_maps; RList *bins; RBuffer *buf; int (*original_io_read)(RIO *io, RIODesc *fd, ut8 *buf, int count); RDyldRebaseInfos *rebase_infos; cache_accel_t *accel; RDyldLocSym *locsym; objc_cache_opt_info *oi; bool objc_opt_info_loaded; } RDyldCache; typedef struct _r_bin_image { char *file; ut64 header_at; ut64 hdr_offset; ut64 symbols_off; ut64 va; ut32 nlist_start_index; ut32 nlist_count; } RDyldBinImage; static R_TH_LOCAL RList *pending_bin_files = NULL; static ut64 va2pa(uint64_t addr, ut32 n_maps, cache_map_t *maps, RBuffer *cache_buf, ut64 slide, ut32 *offset, ut32 *left) { ut64 res = UT64_MAX; ut32 i; addr -= slide; for (i = 0; i < n_maps; i++) { if (addr >= maps[i].address && addr < maps[i].address + maps[i].size) { res = maps[i].fileOffset + addr - maps[i].address; if (offset) { *offset = addr - maps[i].address; } if (left) { *left = maps[i].size - (addr - maps[i].address); } break; } } return res; } static void free_bin(RDyldBinImage *bin) { if (!bin) { return; } R_FREE (bin->file); R_FREE (bin); } static void rebase_info3_free(RDyldRebaseInfo3 *rebase_info) { if (!rebase_info) { return; } R_FREE (rebase_info->page_starts); R_FREE (rebase_info); } static void rebase_info2_free(RDyldRebaseInfo2 *rebase_info) { if (!rebase_info) { return; } R_FREE (rebase_info->page_starts); R_FREE (rebase_info->page_extras); R_FREE (rebase_info); } static void rebase_info1_free(RDyldRebaseInfo1 *rebase_info) { if (!rebase_info) { return; } R_FREE (rebase_info->toc); R_FREE (rebase_info->entries); R_FREE (rebase_info); } static void rebase_info_free(RDyldRebaseInfo *rebase_info) { if (!rebase_info) { return; } R_FREE (rebase_info->one_page_buf); ut8 version = rebase_info->version; if (version == 1) { rebase_info1_free ((RDyldRebaseInfo1*) rebase_info); } else if (version == 2 || version == 4) { rebase_info2_free ((RDyldRebaseInfo2*) rebase_info); } else if (version == 3) { rebase_info3_free ((RDyldRebaseInfo3*) rebase_info); } else { R_FREE (rebase_info); } } static cache_img_t *read_cache_images(RBuffer *cache_buf, cache_hdr_t *hdr, ut64 hdr_offset) { if (!cache_buf || !hdr) { return NULL; } if (!hdr->imagesCount || !hdr->imagesOffset || hdr->imagesOffset == UT32_MAX || hdr->imagesCount == UT32_MAX) { return NULL; } ut64 size = sizeof (cache_img_t) * hdr->imagesCount; cache_img_t *images = R_NEWS0 (cache_img_t, hdr->imagesCount); if (!images) { return NULL; } if (r_buf_fread_at (cache_buf, hdr->imagesOffset, (ut8*) images, "3l2i", hdr->imagesCount) != size) { R_FREE (images); return NULL; } if (hdr_offset) { ut32 i; for (i = 0; i < hdr->imagesCount; i++) { cache_img_t *img = &images[i]; img->pathFileOffset += hdr_offset; } } return images; } static void match_bin_entries(RDyldCache *cache, void *entries) { r_return_if_fail (cache && cache->bins && entries); cache_img_t *imgs = read_cache_images (cache->buf, cache->hdr, 0); if (!imgs) { return; } RDyldBinImage *bin = NULL; RListIter *it = r_list_iterator (cache->bins); bool has_large_entries = cache->n_hdr > 1; ut32 i; for (i = 0; i < cache->hdr->imagesCount; i++) { cache_img_t *img = &imgs[i]; if (!it) { break; } bin = it->data; if (!bin) { break; } if (bin && bin->va == img->address) { if (has_large_entries) { cache_locsym_entry_large_t *e = &((cache_locsym_entry_large_t *) entries)[i]; bin->nlist_start_index = e->nlistStartIndex; bin->nlist_count = e->nlistCount; } else { cache_locsym_entry_t *e = &((cache_locsym_entry_t *) entries)[i]; bin->nlist_start_index = e->nlistStartIndex; bin->nlist_count = e->nlistCount; } it = it->n; } } R_FREE (imgs); } static RDyldLocSym *r_dyld_locsym_new(RDyldCache *cache) { r_return_val_if_fail (cache && cache->buf, NULL); ut32 i; for (i = 0; i < cache->n_hdr; i++) { cache_hdr_t *hdr = &cache->hdr[i]; if (!hdr || !hdr->localSymbolsSize || !hdr->localSymbolsOffset) { continue; } cache_locsym_info_t *info = NULL; void *entries = NULL; ut64 info_size = sizeof (cache_locsym_info_t); info = R_NEW0 (cache_locsym_info_t); if (!info) { goto beach; } if (r_buf_fread_at (cache->buf, hdr->localSymbolsOffset, (ut8*) info, "6i", 1) != info_size) { eprintf ("locsym err 01\n"); goto beach; } if (info->entriesCount != cache->hdr->imagesCount) { eprintf ("locsym err 02\n"); goto beach; } bool has_large_entries = cache->n_hdr > 1; if (has_large_entries) { ut64 entries_size = sizeof (cache_locsym_entry_large_t) * info->entriesCount; cache_locsym_entry_large_t *large_entries = R_NEWS0 (cache_locsym_entry_large_t, info->entriesCount); if (!large_entries) { goto beach; } if (r_buf_fread_at (cache->buf, hdr->localSymbolsOffset + info->entriesOffset, (ut8*) large_entries, "lii", info->entriesCount) != entries_size) { eprintf ("locsym err 03\n"); goto beach; } entries = large_entries; } else { ut64 entries_size = sizeof (cache_locsym_entry_t) * info->entriesCount; cache_locsym_entry_t *regular_entries = R_NEWS0 (cache_locsym_entry_t, info->entriesCount); if (!regular_entries) { goto beach; } if (r_buf_fread_at (cache->buf, hdr->localSymbolsOffset + info->entriesOffset, (ut8*) regular_entries, "iii", info->entriesCount) != entries_size) { eprintf ("locsym err 04\n"); goto beach; } entries = regular_entries; } RDyldLocSym * locsym = R_NEW0 (RDyldLocSym); if (!locsym) { goto beach; } match_bin_entries (cache, entries); locsym->local_symbols_offset = hdr->localSymbolsOffset; locsym->nlists_offset = info->nlistOffset; locsym->nlists_count = info->nlistCount; locsym->strings_offset = info->stringsOffset; locsym->strings_size = info->stringsSize; free (info); free (entries); return locsym; beach: free (info); free (entries); eprintf ("dyldcache: malformed local symbols metadata\n"); break; } return NULL; } static ut64 rebase_infos_get_slide(RDyldCache *cache) { if (!cache->rebase_infos || !cache->rebase_infos->length) { return 0; } size_t i; for (i = 0; i < cache->rebase_infos->length; i++) { if (cache->rebase_infos->entries[i].info) { return cache->rebase_infos->entries[i].info->slide; } } return 0; } static void symbols_from_locsym(RDyldCache *cache, RDyldBinImage *bin, RList *symbols, SetU *hash) { RDyldLocSym *locsym = cache->locsym; if (!locsym) { return; } if (bin->nlist_start_index >= locsym->nlists_count || bin->nlist_start_index + bin->nlist_count > locsym->nlists_count) { eprintf ("dyldcache: malformed local symbol entry\n"); return; } ut64 nlists_size = sizeof (struct MACH0_(nlist)) * bin->nlist_count; struct MACH0_(nlist) *nlists = R_NEWS0 (struct MACH0_(nlist), bin->nlist_count); if (!nlists) { return; } ut64 nlists_offset = locsym->local_symbols_offset + locsym->nlists_offset + bin->nlist_start_index * sizeof (struct MACH0_(nlist)); if (r_buf_fread_at (cache->buf, nlists_offset, (ut8*) nlists, "iccsl", bin->nlist_count) != nlists_size) { free (nlists); return; } ut32 j; for (j = 0; j != bin->nlist_count; j++) { struct MACH0_(nlist) *nlist = &nlists[j]; if (set_u_contains (hash, (ut64)nlist->n_value)) { continue; } set_u_add (hash, (ut64)nlist->n_value); if (nlist->n_strx >= locsym->strings_size) { continue; } RBinSymbol *sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->type = "LOCAL"; sym->vaddr = nlist->n_value; ut64 slide = rebase_infos_get_slide (cache); sym->paddr = va2pa (nlist->n_value, cache->n_maps, cache->maps, cache->buf, slide, NULL, NULL); char *symstr =r_buf_get_string (cache->buf, locsym->local_symbols_offset + locsym->strings_offset + nlist->n_strx); if (symstr) { sym->name = symstr; } else { static ut32 k = 0; sym->name = r_str_newf ("unk_local%d", k++); } r_list_append (symbols, sym); } free (nlists); } static void r_dyldcache_free(RDyldCache *cache) { if (!cache) { return; } r_list_free (cache->bins); cache->bins = NULL; r_buf_free (cache->buf); cache->buf = NULL; if (cache->rebase_infos) { int i; for (i = 0; i < cache->rebase_infos->length; i++) { rebase_info_free (cache->rebase_infos->entries[i].info); cache->rebase_infos->entries[i].info = NULL; } R_FREE (cache->rebase_infos->entries); R_FREE (cache->rebase_infos); } R_FREE (cache->hdr); R_FREE (cache->maps); R_FREE (cache->maps_index); R_FREE (cache->hdr_offset); R_FREE (cache->accel); R_FREE (cache->locsym); R_FREE (cache->oi); R_FREE (cache); } static ut64 bin_obj_va2pa(ut64 p, ut32 *offset, ut32 *left, RBinFile *bf) { if (!bf || !bf->o || !bf->o->bin_obj) { return 0; } RDyldCache *cache = (RDyldCache*) ((struct MACH0_(obj_t)*)bf->o->bin_obj)->user; if (!cache) { return 0; } ut64 slide = rebase_infos_get_slide (cache); ut64 res = va2pa (p, cache->n_maps, cache->maps, cache->buf, slide, offset, left); if (res == UT64_MAX) { res = 0; } return res; } static struct MACH0_(obj_t) *bin_to_mach0(RBinFile *bf, RDyldBinImage *bin) { if (!bin || !bf) { return NULL; } RDyldCache *cache = (RDyldCache*) bf->o->bin_obj; if (!cache) { return NULL; } RBuffer *buf = r_buf_new_slice (cache->buf, bin->hdr_offset, r_buf_size (cache->buf) - bin->hdr_offset); if (!buf) { return NULL; } struct MACH0_(opts_t) opts; MACH0_(opts_set_default) (&opts, bf); opts.header_at = bin->header_at - bin->hdr_offset; opts.symbols_off = bin->symbols_off; struct MACH0_(obj_t) *mach0 = MACH0_(new_buf) (buf, &opts); mach0->user = cache; mach0->va2pa = &bin_obj_va2pa; r_buf_free (buf); return mach0; } static int prot2perm(int x) { int r = 0; if (x & 1) { r |= 4; } if (x & 2) { r |= 2; } if (x & 4) { r |= 1; } return r; } static ut32 dumb_ctzll(ut64 x) { ut64 result = 0; int i, j; for (i = 0; i < 64; i += 8) { ut8 byte = (x >> i) & 0xff; if (!byte) { result += 8; } else { for (j = 0; j < 8; j++) { if (!((byte >> j) & 1)) { result++; } else { break; } } break; } } return result; } static ut64 estimate_slide(RBinFile *bf, RDyldCache *cache, ut64 value_mask, ut64 value_add) { ut64 slide = 0; if (cache->n_hdr > 1) { return slide; } ut64 *classlist = malloc (64); if (!classlist) { goto beach; } RListIter *iter; RDyldBinImage *bin; r_list_foreach (cache->bins, iter, bin) { bool found_sample = false; struct MACH0_(opts_t) opts = {0}; opts.verbose = bf->rbin->verbose; opts.header_at = bin->header_at; opts.symbols_off = 0; struct MACH0_(obj_t) *mach0 = MACH0_(new_buf) (cache->buf, &opts); if (!mach0) { goto beach; } struct section_t *sections = NULL; if (!(sections = MACH0_(get_sections) (mach0))) { MACH0_(mach0_free) (mach0); goto beach; } int i; int incomplete = 2; int classlist_idx = 0, data_idx = 0; for (i = 0; !sections[i].last && incomplete; i++) { if (sections[i].size == 0) { continue; } if (strstr (sections[i].name, "__objc_classlist")) { incomplete--; classlist_idx = i; continue; } if (strstr (sections[i].name, "__objc_data")) { incomplete--; data_idx = i; continue; } } if (incomplete) { goto next_bin; } int classlist_sample_size = R_MIN (64, sections[classlist_idx].size); int n_classes = classlist_sample_size / 8; ut64 sect_offset = sections[classlist_idx].offset + bin->hdr_offset; if (r_buf_fread_at (cache->buf, sect_offset, (ut8*) classlist, "l", n_classes) < classlist_sample_size) { goto next_bin; } ut64 data_addr = sections[data_idx].addr; ut64 data_tail = data_addr & 0xfff; ut64 data_tail_end = (data_addr + sections[data_idx].size) & 0xfff; for (i = 0; i < n_classes; i++) { ut64 cl_addr = (classlist[i] & value_mask) + value_add; ut64 cl_tail = cl_addr & 0xfff; if (cl_tail >= data_tail && cl_tail < data_tail_end) { ut64 off = cl_tail - data_tail; slide = ((cl_addr - off) & value_mask) - (data_addr & value_mask); found_sample = true; break; } } next_bin: MACH0_(mach0_free) (mach0); R_FREE (sections); if (found_sample) { break; } } beach: R_FREE (classlist); return slide; } static RDyldRebaseInfo *get_rebase_info(RBinFile *bf, RDyldCache *cache, ut64 slideInfoOffset, ut64 slideInfoSize, ut64 start_of_data, ut64 slide) { ut8 *tmp_buf_1 = NULL; ut8 *tmp_buf_2 = NULL; ut8 *one_page_buf = NULL; RBuffer *cache_buf = cache->buf; ut64 offset = slideInfoOffset; ut32 slide_info_version = 0; if (r_buf_read_at (cache_buf, offset, (ut8*) &slide_info_version, 4) != 4) { return NULL; } if (slide_info_version == 3) { cache_slide3_t slide_info; ut64 size = sizeof (cache_slide3_t); if (r_buf_fread_at (cache_buf, offset, (ut8*) &slide_info, "4i1l", 1) < 20) { return NULL; } ut64 page_starts_offset = offset + size; ut64 page_starts_size = slide_info.page_starts_count * 2; if (page_starts_size + size > slideInfoSize) { return NULL; } if (page_starts_size > 0) { tmp_buf_1 = malloc (page_starts_size); if (!tmp_buf_1) { goto beach; } if (r_buf_fread_at (cache_buf, page_starts_offset, tmp_buf_1, "s", slide_info.page_starts_count) != page_starts_size) { goto beach; } } if (slide_info.page_size > 0) { one_page_buf = malloc (slide_info.page_size); if (!one_page_buf) { goto beach; } } RDyldRebaseInfo3 *rebase_info = R_NEW0 (RDyldRebaseInfo3); if (!rebase_info) { goto beach; } rebase_info->version = 3; rebase_info->delta_mask = 0x3ff8000000000000ULL; rebase_info->delta_shift = 51; rebase_info->start_of_data = start_of_data; rebase_info->page_starts = (ut16*) tmp_buf_1; rebase_info->page_starts_count = slide_info.page_starts_count; rebase_info->auth_value_add = slide_info.auth_value_add; rebase_info->page_size = slide_info.page_size; rebase_info->one_page_buf = one_page_buf; if (slide == UT64_MAX) { rebase_info->slide = estimate_slide (bf, cache, 0x7ffffffffffffULL, 0); if (rebase_info->slide) { eprintf ("dyldcache is slid: 0x%"PFMT64x"\n", rebase_info->slide); } } else { rebase_info->slide = slide; } return (RDyldRebaseInfo*) rebase_info; } else if (slide_info_version == 2 || slide_info_version == 4) { cache_slide2_t slide_info; ut64 size = sizeof (cache_slide2_t); if (r_buf_fread_at (cache_buf, offset, (ut8*) &slide_info, "6i2l", 1) != size) { return NULL; } if (slide_info.page_starts_offset == 0 || slide_info.page_starts_offset > slideInfoSize || slide_info.page_starts_offset + slide_info.page_starts_count * 2 > slideInfoSize) { return NULL; } if (slide_info.page_extras_offset == 0 || slide_info.page_extras_offset > slideInfoSize || slide_info.page_extras_offset + slide_info.page_extras_count * 2 > slideInfoSize) { return NULL; } if (slide_info.page_starts_count > 0) { ut64 size = slide_info.page_starts_count * 2; ut64 at = slideInfoOffset + slide_info.page_starts_offset; tmp_buf_1 = malloc (size); if (!tmp_buf_1) { goto beach; } if (r_buf_fread_at (cache_buf, at, tmp_buf_1, "s", slide_info.page_starts_count) != size) { goto beach; } } if (slide_info.page_extras_count > 0) { ut64 size = slide_info.page_extras_count * 2; ut64 at = slideInfoOffset + slide_info.page_extras_offset; tmp_buf_2 = malloc (size); if (!tmp_buf_2) { goto beach; } if (r_buf_fread_at (cache_buf, at, tmp_buf_2, "s", slide_info.page_extras_count) != size) { goto beach; } } if (slide_info.page_size > 0) { one_page_buf = malloc (slide_info.page_size); if (!one_page_buf) { goto beach; } } RDyldRebaseInfo2 *rebase_info = R_NEW0 (RDyldRebaseInfo2); if (!rebase_info) { goto beach; } rebase_info->version = slide_info_version; rebase_info->start_of_data = start_of_data; rebase_info->page_starts = (ut16*) tmp_buf_1; rebase_info->page_starts_count = slide_info.page_starts_count; rebase_info->page_extras = (ut16*) tmp_buf_2; rebase_info->page_extras_count = slide_info.page_extras_count; rebase_info->value_add = slide_info.value_add; rebase_info->delta_mask = slide_info.delta_mask; rebase_info->value_mask = ~rebase_info->delta_mask; rebase_info->delta_shift = dumb_ctzll (rebase_info->delta_mask) - 2; rebase_info->page_size = slide_info.page_size; rebase_info->one_page_buf = one_page_buf; if (slide == UT64_MAX) { rebase_info->slide = estimate_slide (bf, cache, rebase_info->value_mask, rebase_info->value_add); if (rebase_info->slide) { eprintf ("dyldcache is slid: 0x%"PFMT64x"\n", rebase_info->slide); } } else { rebase_info->slide = slide; } return (RDyldRebaseInfo*) rebase_info; } else if (slide_info_version == 1) { cache_slide1_t slide_info; ut64 size = sizeof (cache_slide1_t); if (r_buf_fread_at (cache_buf, offset, (ut8*) &slide_info, "6i", 1) != size) { return NULL; } if (slide_info.toc_offset == 0 || slide_info.toc_offset > slideInfoSize || slide_info.toc_offset + slide_info.toc_count * 2 > slideInfoSize) { return NULL; } if (slide_info.entries_offset == 0 || slide_info.entries_offset > slideInfoSize || slide_info.entries_offset + slide_info.entries_count * slide_info.entries_size > slideInfoSize) { return NULL; } if (slide_info.toc_count > 0) { ut64 size = slide_info.toc_count * 2; ut64 at = slideInfoOffset + slide_info.toc_offset; tmp_buf_1 = malloc (size); if (!tmp_buf_1) { goto beach; } if (r_buf_fread_at (cache_buf, at, tmp_buf_1, "s", slide_info.toc_count) != size) { goto beach; } } if (slide_info.entries_count > 0) { ut64 size = (ut64) slide_info.entries_count * (ut64) slide_info.entries_size; ut64 at = slideInfoOffset + slide_info.entries_offset; tmp_buf_2 = malloc (size); if (!tmp_buf_2) { goto beach; } if (r_buf_read_at (cache_buf, at, tmp_buf_2, size) != size) { goto beach; } } one_page_buf = malloc (4096); if (!one_page_buf) { goto beach; } RDyldRebaseInfo1 *rebase_info = R_NEW0 (RDyldRebaseInfo1); if (!rebase_info) { goto beach; } rebase_info->version = 1; rebase_info->start_of_data = start_of_data; rebase_info->one_page_buf = one_page_buf; rebase_info->page_size = 4096; rebase_info->toc = (ut16*) tmp_buf_1; rebase_info->toc_count = slide_info.toc_count; rebase_info->entries = tmp_buf_2; rebase_info->entries_size = slide_info.entries_size; if (slide == UT64_MAX) { rebase_info->slide = estimate_slide (bf, cache, UT64_MAX, 0); if (rebase_info->slide) { eprintf ("dyldcache is slid: 0x%"PFMT64x"\n", rebase_info->slide); } } else { rebase_info->slide = slide; } return (RDyldRebaseInfo*) rebase_info; } else { eprintf ("unsupported slide info version %d\n", slide_info_version); return NULL; } beach: R_FREE (tmp_buf_1); R_FREE (tmp_buf_2); R_FREE (one_page_buf); return NULL; } static RDyldRebaseInfos *get_rebase_infos(RBinFile *bf, RDyldCache *cache) { RDyldRebaseInfos *result = R_NEW0 (RDyldRebaseInfos); if (!result) { return NULL; } if (!cache->hdr->slideInfoOffset || !cache->hdr->slideInfoSize) { ut32 total_slide_infos = 0; ut32 n_slide_infos[MAX_N_HDR]; ut32 i; for (i = 0; i < cache->n_hdr && i < MAX_N_HDR; i++) { ut64 hdr_offset = cache->hdr_offset[i]; if ((n_slide_infos[i] = r_buf_read_le32_at (cache->buf, 0x13c + hdr_offset)) == UT32_MAX) { goto beach; } total_slide_infos += n_slide_infos[i]; } if (!total_slide_infos) { goto beach; } RDyldRebaseInfosEntry * infos = R_NEWS0 (RDyldRebaseInfosEntry, total_slide_infos); if (!infos) { goto beach; } ut32 k = 0; for (i = 0; i < cache->n_hdr && i < MAX_N_HDR; i++) { ut64 hdr_offset = cache->hdr_offset[i]; ut64 slide_infos_offset; if (!n_slide_infos[i]) { continue; } if ((slide_infos_offset = r_buf_read_le32_at (cache->buf, 0x138 + hdr_offset)) == UT32_MAX) { continue; } if (!slide_infos_offset) { continue; } slide_infos_offset += hdr_offset; ut32 j; RDyldRebaseInfo *prev_info = NULL; for (j = 0; j < n_slide_infos[i]; j++) { ut64 offset = slide_infos_offset + j * sizeof (cache_mapping_slide); cache_mapping_slide entry; if (r_buf_fread_at (cache->buf, offset, (ut8*)&entry, "6lii", 1) != sizeof (cache_mapping_slide)) { break; } if (entry.slideInfoOffset && entry.slideInfoSize) { infos[k].start = entry.fileOffset + hdr_offset; infos[k].end = infos[k].start + entry.size; ut64 slide = prev_info ? prev_info->slide : UT64_MAX; infos[k].info = get_rebase_info (bf, cache, entry.slideInfoOffset + hdr_offset, entry.slideInfoSize, entry.fileOffset + hdr_offset, slide); prev_info = infos[k].info; k++; } } } if (!k) { free (infos); goto beach; } if (k < total_slide_infos) { RDyldRebaseInfosEntry * pruned_infos = R_NEWS0 (RDyldRebaseInfosEntry, k); if (!pruned_infos) { free (infos); goto beach; } memcpy (pruned_infos, infos, sizeof (RDyldRebaseInfosEntry) * k); free (infos); infos = pruned_infos; } result->entries = infos; result->length = k; return result; } if (cache->hdr->mappingCount > 1) { RDyldRebaseInfosEntry * infos = R_NEWS0 (RDyldRebaseInfosEntry, 1); if (!infos) { goto beach; } infos[0].start = cache->maps[1].fileOffset; infos[0].end = infos[0].start + cache->maps[1].size; infos[0].info = get_rebase_info (bf, cache, cache->hdr->slideInfoOffset, cache->hdr->slideInfoSize, infos[0].start, UT64_MAX); result->entries = infos; result->length = 1; return result; } beach: free (result); return NULL; } static bool check_magic(const char *magic) { return !strcmp (magic, "dyld_v1 arm64") || !strcmp (magic, "dyld_v1 arm64e") || !strcmp (magic, "dyld_v1 x86_64") || !strcmp (magic, "dyld_v1 x86_64h"); } static bool check_buffer(RBinFile *bf, RBuffer *buf) { if (r_buf_size (buf) < 32) { return false; } char hdr[17] = { 0 }; int rhdr = r_buf_read_at (buf, 0, (ut8 *)&hdr, sizeof (hdr) - 1); if (rhdr != sizeof (hdr) - 1) { return false; } return check_magic (hdr); } static cache_imgxtr_t *read_cache_imgextra(RBuffer *cache_buf, cache_hdr_t *hdr, cache_accel_t *accel) { if (!cache_buf || !hdr || !hdr->imagesCount || !accel || !accel->imageExtrasCount || !accel->imagesExtrasOffset) { return NULL; } ut64 size = sizeof (cache_imgxtr_t) * accel->imageExtrasCount; cache_imgxtr_t *images = R_NEWS0 (cache_imgxtr_t, accel->imageExtrasCount); if (!images) { return NULL; } if (r_buf_fread_at (cache_buf, accel->imagesExtrasOffset, (ut8*) images, "ll4i", accel->imageExtrasCount) != size) { R_FREE (images); return NULL; } return images; } static char *get_lib_name(RBuffer *cache_buf, cache_img_t *img) { char file[256]; char *lib_name = file; if (r_buf_read_at (cache_buf, img->pathFileOffset, (ut8*) &file, sizeof (file)) == sizeof (file)) { file[255] = 0; /*char * last_slash = strrchr (file, '/'); if (last_slash && *last_slash) { lib_name = last_slash + 1; }*/ return strdup (lib_name); } return strdup ("FAIL"); } static int string_contains(const void *a, const void *b) { return !strstr ((const char*) a, (const char*) b); } static HtPU *create_path_to_index(RBuffer *cache_buf, cache_img_t *img, cache_hdr_t *hdr) { HtPU *path_to_idx = ht_pu_new0 (); if (!path_to_idx) { return NULL; } size_t i; for (i = 0; i != hdr->imagesCount; i++) { char file[256]; if (r_buf_read_at (cache_buf, img[i].pathFileOffset, (ut8*) &file, sizeof (file)) != sizeof (file)) { continue; } file[sizeof (file) - 1] = 0; ht_pu_insert (path_to_idx, file, (ut64)i); const char versions_pattern[] = ".framework/Versions/"; char *versions = strstr (file, versions_pattern); if (versions) { char *next_slash = strchr (versions + 20, '/'); if (next_slash) { char *tail = strdup (next_slash); if (!tail) { break; } strcpy (versions + 10, tail); free (tail); ht_pu_insert (path_to_idx, file, (ut64)i); } } } return path_to_idx; } static void carve_deps_at_address(RDyldCache *cache, cache_img_t *img, HtPU *path_to_idx, ut64 address, int *deps, bool printing) { ut64 pa = va2pa (address, cache->n_maps, cache->maps, cache->buf, 0, NULL, NULL); if (pa == UT64_MAX) { return; } struct MACH0_(mach_header) mh; if (r_buf_fread_at (cache->buf, pa, (ut8*) &mh, "8i", 1) != sizeof (struct MACH0_(mach_header))) { return; } if (mh.magic != MH_MAGIC_64 || mh.sizeofcmds == 0) { return; } ut64 cmds_at = pa + sizeof (struct MACH0_(mach_header)); ut8 *cmds = malloc (mh.sizeofcmds + 1); if (!cmds || r_buf_read_at (cache->buf, cmds_at, cmds, mh.sizeofcmds) != mh.sizeofcmds) { goto beach; } cmds[mh.sizeofcmds] = 0; ut8 *cursor = cmds; ut8 *end = cmds + mh.sizeofcmds; while (cursor < end) { ut32 cmd = r_read_le32 (cursor); ut32 cmdsize = r_read_le32 (cursor + sizeof (ut32)); if (cmd == LC_LOAD_DYLIB || cmd == LC_LOAD_WEAK_DYLIB || cmd == LC_REEXPORT_DYLIB || cmd == LC_LOAD_UPWARD_DYLIB) { bool found; if (cursor + 24 >= end) { break; } const char *key = (const char *) cursor + 24; size_t dep_index = (size_t)ht_pu_find (path_to_idx, key, &found); if (!found || dep_index >= cache->hdr->imagesCount) { eprintf ("Warning: alien dep '%s'\n", key); continue; } deps[dep_index]++; if (printing) { eprintf ("-> %s\n", key); } } cursor += cmdsize; } beach: free (cmds); } static ut64 resolve_symbols_off(RDyldCache *cache, ut64 pa) { struct MACH0_(mach_header) mh; if (r_buf_fread_at (cache->buf, pa, (ut8*) &mh, "8i", 1) != sizeof (struct MACH0_(mach_header))) { return 0; } if (mh.magic != MH_MAGIC_64 || mh.sizeofcmds == 0) { return 0; } ut64 cmds_at = pa + sizeof (struct MACH0_(mach_header)); ut64 cursor = cmds_at; ut64 end = cursor + mh.sizeofcmds; while (cursor < end) { ut32 cmd = r_buf_read_le32_at (cache->buf, cursor); if (cmd == UT32_MAX) { return 0; } ut32 cmdsize = r_buf_read_le32_at (cache->buf, cursor + sizeof (ut32)); if (cmdsize == UT32_MAX) { return 0; } if (cmd == LC_SEGMENT || cmd == LC_SEGMENT_64) { char segname[17]; segname[16] = 0; if (r_buf_read_at (cache->buf, cursor + 2 * sizeof (ut32), (ut8 *)segname, 16) != 16) { return 0; } if (!strncmp (segname, "__LINKEDIT", 16)) { ut64 vmaddr = r_buf_read_le64_at (cache->buf, cursor + 2 * sizeof (ut32) + 16); if (vmaddr == UT64_MAX) { return 0; } ut32 i,j; for (i = 0; i < cache->n_hdr; i++) { cache_hdr_t *hdr = &cache->hdr[i]; ut64 hdr_offset = cache->hdr_offset[i]; ut32 maps_index = cache->maps_index[i]; for (j = 0; j < hdr->mappingCount; j++) { ut64 map_start = cache->maps[maps_index + j].address; ut64 map_end = map_start + cache->maps[maps_index + j].size; if (vmaddr >= map_start && vmaddr < map_end) { return hdr_offset; } } } } } cursor += cmdsize; } return 0; } static RList *create_cache_bins(RBinFile *bf, RDyldCache *cache) { RList *bins = r_list_newf ((RListFree)free_bin); if (!bins) { return NULL; } char *target_libs = NULL; RList *target_lib_names = NULL; int *deps = NULL; target_libs = r_sys_getenv ("R_DYLDCACHE_FILTER"); if (target_libs) { target_lib_names = r_str_split_list (target_libs, ":", 0); if (!target_lib_names) { r_list_free (bins); return NULL; } deps = R_NEWS0 (int, cache->hdr->imagesCount); if (!deps) { r_list_free (bins); r_list_free (target_lib_names); return NULL; } } ut32 i; for (i = 0; i < cache->n_hdr; i++) { cache_hdr_t *hdr = &cache->hdr[i]; ut64 hdr_offset = cache->hdr_offset[i]; ut32 maps_index = cache->maps_index[i]; cache_img_t *img = read_cache_images (cache->buf, hdr, hdr_offset); if (!img) { goto next; } ut32 j; ut16 *depArray = NULL; cache_imgxtr_t *extras = NULL; if (target_libs) { HtPU *path_to_idx = NULL; if (cache->accel) { depArray = R_NEWS0 (ut16, cache->accel->depListCount); if (!depArray) { goto next; } if (r_buf_fread_at (cache->buf, cache->accel->depListOffset, (ut8*) depArray, "s", cache->accel->depListCount) != cache->accel->depListCount * 2) { goto next; } extras = read_cache_imgextra (cache->buf, hdr, cache->accel); if (!extras) { goto next; } } else { path_to_idx = create_path_to_index (cache->buf, img, hdr); } for (j = 0; j < hdr->imagesCount; j++) { bool printing = !deps[j]; char *lib_name = get_lib_name (cache->buf, &img[j]); if (!lib_name) { break; } if (strstr (lib_name, "libobjc.A.dylib")) { deps[j]++; } if (!r_list_find (target_lib_names, lib_name, string_contains)) { R_FREE (lib_name); continue; } if (printing) { eprintf ("FILTER: %s\n", lib_name); } R_FREE (lib_name); deps[j]++; if (extras && depArray) { ut32 k; for (k = extras[j].dependentsStartArrayIndex; depArray[k] != 0xffff; k++) { ut16 dep_index = depArray[k] & 0x7fff; deps[dep_index]++; char *dep_name = get_lib_name (cache->buf, &img[dep_index]); if (!dep_name) { break; } if (printing) { eprintf ("-> %s\n", dep_name); } free (dep_name); } } else if (path_to_idx) { carve_deps_at_address (cache, img, path_to_idx, img[j].address, deps, printing); } } ht_pu_free (path_to_idx); R_FREE (depArray); R_FREE (extras); } for (j = 0; j < hdr->imagesCount; j++) { if (deps && !deps[j]) { continue; } ut64 pa = va2pa (img[j].address, hdr->mappingCount, &cache->maps[maps_index], cache->buf, 0, NULL, NULL); if (pa == UT64_MAX) { continue; } ut8 magicbytes[4]; r_buf_read_at (cache->buf, pa, magicbytes, 4); int magic = r_read_le32 (magicbytes); switch (magic) { case MH_MAGIC_64: { char file[256]; RDyldBinImage *bin = R_NEW0 (RDyldBinImage); if (!bin) { goto next; } bin->header_at = pa; bin->hdr_offset = hdr_offset; bin->symbols_off = resolve_symbols_off (cache, pa); bin->va = img[j].address; if (r_buf_read_at (cache->buf, img[j].pathFileOffset, (ut8*) &file, sizeof (file)) == sizeof (file)) { file[255] = 0; char *last_slash = strrchr (file, '/'); if (last_slash && *last_slash) { if (last_slash > file) { char *scan = last_slash - 1; while (scan > file && *scan != '/') { scan--; } if (*scan == '/') { bin->file = strdup (scan + 1); } else { bin->file = strdup (last_slash + 1); } } else { bin->file = strdup (last_slash + 1); } } else { bin->file = strdup (file); } } r_list_append (bins, bin); break; } default: eprintf ("Unknown sub-bin\n"); break; } } next: R_FREE (depArray); R_FREE (extras); R_FREE (img); } if (r_list_empty (bins)) { r_list_free (bins); bins = NULL; } R_FREE (deps); R_FREE (target_libs); r_list_free (target_lib_names); return bins; } static void rebase_bytes_v1(RDyldRebaseInfo1 *rebase_info, ut8 *buf, ut64 offset, int count, ut64 start_of_write) { int in_buf = 0; while (in_buf < count) { ut64 offset_in_data = offset - rebase_info->start_of_data; ut64 page_index = offset_in_data / rebase_info->page_size; ut64 page_offset = offset_in_data % rebase_info->page_size; ut64 to_next_page = rebase_info->page_size - page_offset; ut64 entry_index = page_offset / 32; ut64 offset_in_entry = (page_offset % 32) / 4; if (entry_index >= rebase_info->entries_size) { in_buf += to_next_page; offset += to_next_page; continue; } if (page_index >= rebase_info->toc_count) { break; } ut8 *entry = &rebase_info->entries[rebase_info->toc[page_index] * rebase_info->entries_size]; ut8 b = entry[entry_index]; if (b & (1 << offset_in_entry)) { ut64 value = r_read_le64 (buf + in_buf); value += rebase_info->slide; r_write_le64 (buf + in_buf, value); in_buf += 8; offset += 8; } else { in_buf += 4; offset += 4; } } } static void rebase_bytes_v2(RDyldRebaseInfo2 *rebase_info, ut8 *buf, ut64 offset, int count, ut64 start_of_write) { int in_buf = 0; while (in_buf < count) { ut64 offset_in_data = offset - rebase_info->start_of_data; ut64 page_index = offset_in_data / rebase_info->page_size; ut64 page_offset = offset_in_data % rebase_info->page_size; ut64 to_next_page = rebase_info->page_size - page_offset; if (page_index >= rebase_info->page_starts_count) { goto next_page; } ut16 page_flag = rebase_info->page_starts[page_index]; if (page_flag == DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE) { goto next_page; } if (!(page_flag & DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA)) { ut64 first_rebase_off = rebase_info->page_starts[page_index] * 4; if (first_rebase_off >= page_offset && first_rebase_off < page_offset + count) { ut32 delta = 1; while (delta) { ut64 position = in_buf + first_rebase_off - page_offset; if (position >= count) { break; } ut64 raw_value = r_read_le64 (buf + position); delta = ((raw_value & rebase_info->delta_mask) >> rebase_info->delta_shift); if (position >= start_of_write) { ut64 new_value = raw_value & rebase_info->value_mask; if (new_value != 0) { new_value += rebase_info->value_add; new_value += rebase_info->slide; } r_write_le64 (buf + position, new_value); } first_rebase_off += delta; } } } next_page: in_buf += to_next_page; offset += to_next_page; } } static void rebase_bytes_v3(RDyldRebaseInfo3 *rebase_info, ut8 *buf, ut64 offset, int count, ut64 start_of_write) { int in_buf = 0; while (in_buf < count) { ut64 offset_in_data = offset - rebase_info->start_of_data; ut64 page_index = offset_in_data / rebase_info->page_size; ut64 page_offset = offset_in_data % rebase_info->page_size; ut64 to_next_page = rebase_info->page_size - page_offset; if (page_index >= rebase_info->page_starts_count) { goto next_page; } ut64 delta = rebase_info->page_starts[page_index]; if (delta == DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE) { goto next_page; } ut64 first_rebase_off = delta; if (first_rebase_off >= page_offset && first_rebase_off < page_offset + count) { do { ut64 position = in_buf + first_rebase_off - page_offset; if (position >= count) { break; } ut64 raw_value = r_read_le64 (buf + position); delta = ((raw_value & rebase_info->delta_mask) >> rebase_info->delta_shift) * 8; if (position >= start_of_write) { ut64 new_value = 0; if (R_IS_PTR_AUTHENTICATED (raw_value)) { new_value = (raw_value & 0xFFFFFFFFULL) + rebase_info->auth_value_add; // TODO: don't throw auth info away } else { new_value = ((raw_value << 13) & 0xFF00000000000000ULL) | (raw_value & 0x7ffffffffffULL); new_value &= 0x00FFFFFFFFFFFFFFULL; } if (new_value != 0) { new_value += rebase_info->slide; } r_write_le64 (buf + position, new_value); } first_rebase_off += delta; } while (delta); } next_page: in_buf += to_next_page; offset += to_next_page; } } static RDyldRebaseInfo *rebase_info_by_range(RDyldRebaseInfos *infos, ut64 offset, int count) { int imid; int imin = 0; int imax = infos->length - 1; while (imin < imax) { imid = (imin + imax) / 2; RDyldRebaseInfosEntry *entry = &infos->entries[imid]; if ((entry->end) <= offset) { imin = imid + 1; } else { imax = imid; } } RDyldRebaseInfosEntry *minEntry = &infos->entries[imin]; if ((imax == imin) && (minEntry->start <= offset + count) && (minEntry->end >= offset)) { return minEntry->info; } return NULL; } static void rebase_bytes(RDyldRebaseInfo *rebase_info, ut8 *buf, ut64 offset, int count, ut64 start_of_write) { if (!rebase_info || !buf) { return; } if (rebase_info->version == 3) { rebase_bytes_v3 ((RDyldRebaseInfo3*) rebase_info, buf, offset, count, start_of_write); } else if (rebase_info->version == 2 || rebase_info->version == 4) { rebase_bytes_v2 ((RDyldRebaseInfo2*) rebase_info, buf, offset, count, start_of_write); } else if (rebase_info->version == 1) { rebase_bytes_v1 ((RDyldRebaseInfo1*) rebase_info, buf, offset, count, start_of_write); } } static int dyldcache_io_read(RIO *io, RIODesc *fd, ut8 *buf, int count) { r_return_val_if_fail (io, -1); RCore *core = (RCore*) io->corebind.core; if (!core || !core->bin || !core->bin->binfiles) { return -1; } RDyldCache *cache = NULL; RListIter *iter; RBinFile *bf; r_list_foreach (core->bin->binfiles, iter, bf) { if (bf->fd == fd->fd ) { if (!strncmp ((char*) bf->o->bin_obj, "dyldcac", 7)) { cache = bf->o->bin_obj; } else { cache = ((struct MACH0_(obj_t)*) bf->o->bin_obj)->user; } if (pending_bin_files) { RListIter *to_remove = r_list_contains (pending_bin_files, bf); if (to_remove) { r_list_delete (pending_bin_files, to_remove); if (r_list_empty (pending_bin_files)) { r_list_free (pending_bin_files); pending_bin_files = NULL; } } } break; } } if (!cache) { r_list_foreach (pending_bin_files, iter, bf) { if (bf->fd == fd->fd && bf->o) { if (!strncmp ((char*) bf->o->bin_obj, "dyldcac", 7)) { cache = bf->o->bin_obj; } else { cache = ((struct MACH0_(obj_t)*) bf->o->bin_obj)->user; } break; } } } if (!cache || !cache->original_io_read) { if (fd->plugin->read == &dyldcache_io_read) { return -1; } return fd->plugin->read (io, fd, buf, count); } RDyldRebaseInfo *rebase_info = rebase_info_by_range (cache->rebase_infos, io->off, count); int result = 0; if (rebase_info && count > 0) { ut64 offset_in_data = io->off - rebase_info->start_of_data; ut64 page_offset = offset_in_data % rebase_info->page_size; ut64 internal_offset = io->off & ~(rebase_info->page_size - 1); ut64 internal_end = io->off + count; int rounded_count = internal_end - internal_offset; ut8 *internal_buf = rebase_info->one_page_buf; if (rounded_count > rebase_info->page_size) { internal_buf = malloc (rounded_count); if (!internal_buf) { eprintf ("Cannot allocate memory for 'internal_buf'\n"); return -1; } } ut64 original_off = io->off; io->off = internal_offset; int internal_result = cache->original_io_read (io, fd, internal_buf, rounded_count); io->off = original_off; if (internal_result >= page_offset + count) { rebase_bytes (rebase_info, internal_buf, internal_offset, internal_result, page_offset); result = R_MIN (count, internal_result); memcpy (buf, internal_buf + page_offset, result); } else { eprintf ("ERROR rebasing\n"); result = cache->original_io_read (io, fd, buf, count); } if (internal_buf != rebase_info->one_page_buf) { R_FREE (internal_buf); } } else { result = cache->original_io_read (io, fd, buf, count); } return result; } static void swizzle_io_read(RDyldCache *cache, RIO *io) { if (!io || !io->desc || !io->desc->plugin) { return; } RIOPlugin *plugin = io->desc->plugin; cache->original_io_read = plugin->read; plugin->read = &dyldcache_io_read; } static cache_hdr_t *read_cache_header(RBuffer *cache_buf, ut64 offset) { if (!cache_buf) { return NULL; } cache_hdr_t *hdr = R_NEW0 (cache_hdr_t); if (!hdr) { return NULL; } ut64 size = sizeof (cache_hdr_t); if (r_buf_fread_at (cache_buf, offset, (ut8*) hdr, "16c4i7l16clii4l", 1) != size) { R_FREE (hdr); return NULL; } if (!check_magic (hdr->magic)) { R_FREE (hdr); return NULL; } if (!hdr->imagesCount && !hdr->imagesOffset) { hdr->imagesOffset = r_buf_read_le32_at (cache_buf, 0x1c0 + offset); hdr->imagesCount = r_buf_read_le32_at (cache_buf, 0x1c4 + offset); } return hdr; } static void populate_cache_headers(RDyldCache *cache) { cache->n_hdr = 0; RList *hdrs = r_list_newf (NULL); if (!hdrs) { return; } cache_hdr_t *h; ut64 offsets[MAX_N_HDR]; ut64 offset = 0; do { offsets[cache->n_hdr] = offset; h = read_cache_header (cache->buf, offset); if (!h) { break; } r_list_append (hdrs, h); ut64 size = h->codeSignatureOffset + h->codeSignatureSize; #define SHIFT_MAYBE(x) \ if (x) { \ x += offset; \ } SHIFT_MAYBE (h->mappingOffset); SHIFT_MAYBE (h->imagesOffset); SHIFT_MAYBE (h->codeSignatureOffset); SHIFT_MAYBE (h->slideInfoOffset); SHIFT_MAYBE (h->localSymbolsOffset); SHIFT_MAYBE (h->branchPoolsOffset); SHIFT_MAYBE (h->imagesTextOffset); offset += size; cache->n_hdr++; } while (cache->n_hdr < MAX_N_HDR); if (!cache->n_hdr) { goto beach; } cache->hdr = R_NEWS0 (cache_hdr_t, cache->n_hdr); if (!cache->hdr) { cache->n_hdr = 0; goto beach; } cache->hdr_offset = R_NEWS0 (ut64, cache->n_hdr); if (!cache->hdr_offset) { cache->n_hdr = 0; R_FREE (cache->hdr); goto beach; } memcpy (cache->hdr_offset, offsets, cache->n_hdr * sizeof (ut64)); ut32 i = 0; RListIter *iter; cache_hdr_t *item; r_list_foreach (hdrs, iter, item) { if (i >= cache->n_hdr) { break; } memcpy (&cache->hdr[i++], item, sizeof (cache_hdr_t)); } beach: r_list_free (hdrs); } static void populate_cache_maps(RDyldCache *cache) { r_return_if_fail (cache && cache->buf); ut32 i; ut32 n_maps = 0; for (i = 0; i < cache->n_hdr; i++) { cache_hdr_t *hdr = &cache->hdr[i]; if (!hdr->mappingCount || !hdr->mappingOffset) { continue; } n_maps += hdr->mappingCount; } cache_map_t *maps = NULL; if (n_maps != 0) { cache->maps_index = R_NEWS0 (ut32, cache->n_hdr); if (!cache->maps_index) { return; } maps = R_NEWS0 (cache_map_t, n_maps); } if (!maps) { cache->maps = NULL; cache->n_maps = 0; return; } ut32 next_map = 0; for (i = 0; i < cache->n_hdr; i++) { cache_hdr_t *hdr = &cache->hdr[i]; cache->maps_index[i] = next_map; if (!hdr->mappingCount || !hdr->mappingOffset) { continue; } ut64 size = sizeof (cache_map_t) * hdr->mappingCount; if (r_buf_fread_at (cache->buf, hdr->mappingOffset, (ut8*) &maps[next_map], "3l2i", hdr->mappingCount) != size) { continue; } ut32 j; ut64 hdr_offset = cache->hdr_offset[i]; for (j = 0; j < hdr->mappingCount; j++) { cache_map_t *map = &maps[next_map + j]; map->fileOffset += hdr_offset; } next_map += hdr->mappingCount; } cache->maps = maps; cache->n_maps = next_map; } static cache_accel_t *read_cache_accel(RBuffer *cache_buf, cache_hdr_t *hdr, cache_map_t *maps) { if (!cache_buf || !hdr || !hdr->accelerateInfoSize || !hdr->accelerateInfoAddr) { return NULL; } ut64 offset = va2pa (hdr->accelerateInfoAddr, hdr->mappingCount, maps, cache_buf, 0, NULL, NULL); if (!offset) { return NULL; } ut64 size = sizeof (cache_accel_t); cache_accel_t *accel = R_NEW0 (cache_accel_t); if (!accel) { return NULL; } if (r_buf_fread_at (cache_buf, offset, (ut8*) accel, "16il", 1) != size) { R_FREE (accel); return NULL; } accel->imagesExtrasOffset += offset; accel->bottomUpListOffset += offset; accel->dylibTrieOffset += offset; accel->initializersOffset += offset; accel->dofSectionsOffset += offset; accel->reExportListOffset += offset; accel->depListOffset += offset; accel->rangeTableOffset += offset; return accel; } static objc_cache_opt_info *get_objc_opt_info(RBinFile *bf, RDyldCache *cache) { objc_cache_opt_info *result = NULL; RListIter *iter; RDyldBinImage *bin; r_list_foreach (cache->bins, iter, bin) { if (strcmp (bin->file, "lib/libobjc.A.dylib")) { continue; } struct MACH0_(opts_t) opts = {0}; opts.verbose = bf->rbin->verbose; opts.header_at = bin->header_at; opts.symbols_off = 0; struct MACH0_(obj_t) *mach0 = MACH0_(new_buf) (cache->buf, &opts); if (!mach0) { goto beach; } struct section_t *sections = NULL; if (!(sections = MACH0_(get_sections) (mach0))) { MACH0_(mach0_free) (mach0); goto beach; } int i; ut64 scoffs_offset = 0; ut64 scoffs_size = 0; ut64 selrefs_offset = 0; ut64 selrefs_size = 0; ut8 remaining = 2; ut64 slide = rebase_infos_get_slide (cache); for (i = 0; !sections[i].last; i++) { if (sections[i].size == 0) { continue; } if (strstr (sections[i].name, "__objc_scoffs")) { scoffs_offset = va2pa (sections[i].addr, cache->n_maps, cache->maps, cache->buf, slide, NULL, NULL); scoffs_size = sections[i].size; remaining--; if (remaining == 0) { break; } } if (strstr (sections[i].name, "__DATA.__objc_selrefs")) { selrefs_offset = va2pa (sections[i].addr, cache->n_maps, cache->maps, cache->buf, slide, NULL, NULL); selrefs_size = sections[i].size; remaining--; if (remaining == 0) { break; } } } MACH0_(mach0_free) (mach0); R_FREE (sections); ut64 sel_string_base = 0; if (!scoffs_offset || scoffs_size < 40) { if (!selrefs_offset || !selrefs_size || cache->n_hdr == 1) { break; } ut64 cursor = selrefs_offset; ut64 end = cursor + selrefs_size; while (cursor < end) { ut64 sel_ptr = r_buf_read_le64_at (cache->buf, cursor); if (sel_ptr == UT64_MAX) { break; } ut64 sel_offset = va2pa (sel_ptr, cache->n_maps, cache->maps, cache->buf, slide, NULL, NULL); char * selector = r_buf_get_string (cache->buf, sel_offset); if (!selector) { break; } bool is_magic_selector = !strncmp (selector, "\xf0\x9f\xa4\xaf", 4); free (selector); if (is_magic_selector) { sel_string_base = sel_ptr; break; } cursor += 8; } if (sel_string_base == 0) { break; } } else { ut64 check = r_buf_read_le64_at (cache->buf, scoffs_offset); if (check != 2) { break; } sel_string_base = r_buf_read_le64_at (cache->buf, scoffs_offset + 8); if (sel_string_base == UT64_MAX) { break; } ut64 sel_string_end = r_buf_read_le64_at (cache->buf, scoffs_offset + 16); if (sel_string_end == sel_string_base || sel_string_end == UT64_MAX) { break; } } result = R_NEW0 (objc_cache_opt_info); if (!result) { break; } result->sel_string_base = sel_string_base; } beach: return result; } static bool load_buffer(RBinFile *bf, void **bin_obj, RBuffer *buf, ut64 loadaddr, Sdb *sdb) { RDyldCache *cache = R_NEW0 (RDyldCache); memcpy (cache->magic, "dyldcac", 7); cache->buf = r_buf_ref (buf); populate_cache_headers (cache); if (!cache->hdr) { r_dyldcache_free (cache); return false; } populate_cache_maps (cache); if (!cache->maps) { r_dyldcache_free (cache); return false; } cache->accel = read_cache_accel (cache->buf, cache->hdr, cache->maps); cache->bins = create_cache_bins (bf, cache); if (!cache->bins) { r_dyldcache_free (cache); return false; } cache->locsym = r_dyld_locsym_new (cache); cache->rebase_infos = get_rebase_infos (bf, cache); if (cache->rebase_infos) { if (!rebase_infos_get_slide (cache)) { if (!pending_bin_files) { pending_bin_files = r_list_new (); if (!pending_bin_files) { r_dyldcache_free (cache); return false; } } r_list_push (pending_bin_files, bf); swizzle_io_read (cache, bf->rbin->iob.io); } } *bin_obj = cache; return true; } static RList *entries(RBinFile *bf) { RBinAddr *ptr = NULL; RList *ret = r_list_newf (free); if (!ret) { return NULL; } if ((ptr = R_NEW0 (RBinAddr))) { r_list_append (ret, ptr); } return ret; } static RBinInfo *info(RBinFile *bf) { RBinInfo *ret = NULL; if (!bf || !bf->o) { return NULL; } RDyldCache *cache = (RDyldCache*) bf->o->bin_obj; if (!cache) { return NULL; } bool big_endian = 0; if (!(ret = R_NEW0 (RBinInfo))) { return NULL; } ret->file = strdup (bf->file); ret->bclass = strdup ("dyldcache"); ret->rclass = strdup ("ios"); ret->os = strdup ("iOS"); if (strstr (cache->hdr->magic, "x86_64")) { ret->arch = strdup ("x86"); ret->bits = 64; } else { ret->arch = strdup ("arm"); ret->bits = strstr (cache->hdr->magic, "arm64")? 64: 32; } ret->machine = strdup (ret->arch); ret->subsystem = strdup ("xnu"); ret->type = strdup ("library-cache"); ret->has_va = true; ret->big_endian = big_endian; ret->dbg_info = 0; return ret; } #if 0 static void parse_mach0(RList *ret, ut64 paddr, RBinFile *bf) { // TODO } #endif static ut64 baddr(RBinFile *bf) { // XXX hardcoded return 0x180000000; } void symbols_from_bin(RDyldCache *cache, RList *ret, RBinFile *bf, RDyldBinImage *bin, SetU *hash) { struct MACH0_(obj_t) *mach0 = bin_to_mach0 (bf, bin); if (!mach0) { return; } // const RList*symbols = MACH0_(get_symbols_list) (mach0); const struct symbol_t *symbols = MACH0_(get_symbols) (mach0); if (!symbols) { return; } int i; for (i = 0; !symbols[i].last; i++) { if (!symbols[i].name || !symbols[i].name[0] || symbols[i].addr < 100) { continue; } if (strstr (symbols[i].name, "<redacted>")) { continue; } RBinSymbol *sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = strdup (symbols[i].name); sym->vaddr = symbols[i].addr; sym->forwarder = "NONE"; sym->bind = (symbols[i].type == R_BIN_MACH0_SYMBOL_TYPE_LOCAL)? R_BIN_BIND_LOCAL_STR: R_BIN_BIND_GLOBAL_STR; sym->type = R_BIN_TYPE_FUNC_STR; sym->paddr = symbols[i].offset + bf->o->boffset; sym->size = symbols[i].size; sym->ordinal = i; set_u_add (hash, sym->vaddr); r_list_append (ret, sym); } MACH0_(mach0_free) (mach0); } static bool __is_data_section(const char *name) { if (strstr (name, "_cstring")) { return true; } if (strstr (name, "_os_log")) { return true; } if (strstr (name, "_objc_methname")) { return true; } if (strstr (name, "_objc_classname")) { return true; } if (strstr (name, "_objc_methtype")) { return true; } return false; } static void sections_from_bin(RList *ret, RBinFile *bf, RDyldBinImage *bin) { RDyldCache *cache = (RDyldCache*) bf->o->bin_obj; if (!cache) { return; } struct MACH0_(obj_t) *mach0 = bin_to_mach0 (bf, bin); if (!mach0) { return; } struct section_t *sections = NULL; if (!(sections = MACH0_(get_sections) (mach0))) { return; } ut64 slide = rebase_infos_get_slide (cache); int i; for (i = 0; !sections[i].last; i++) { RBinSection *ptr = R_NEW0 (RBinSection); if (!ptr) { break; } if (bin->file) { ptr->name = r_str_newf ("%s.%s", bin->file, (char*)sections[i].name); } else { ptr->name = r_str_newf ("%s", (char*)sections[i].name); } if (strstr (ptr->name, "la_symbol_ptr")) { int len = sections[i].size / 8; ptr->format = r_str_newf ("Cd %d[%d]", 8, len); } ptr->is_data = __is_data_section (ptr->name); ptr->size = sections[i].size; ptr->vsize = sections[i].vsize; ptr->vaddr = sections[i].addr; ptr->paddr = va2pa (sections[i].addr, cache->n_maps, cache->maps, cache->buf, slide, NULL, NULL); if (!ptr->vaddr) { ptr->vaddr = ptr->paddr; } ptr->perm = sections[i].perm; r_list_append (ret, ptr); } free (sections); MACH0_(mach0_free) (mach0); } static RList *sections(RBinFile *bf) { RDyldCache *cache = (RDyldCache*) bf->o->bin_obj; if (!cache) { return NULL; } RList *ret = r_list_newf (free); if (!ret) { return NULL; } RListIter *iter; RDyldBinImage *bin; r_list_foreach (cache->bins, iter, bin) { sections_from_bin (ret, bf, bin); } RBinSection *ptr = NULL; int i; for (i = 0; i < cache->n_maps; i++) { if (!(ptr = R_NEW0 (RBinSection))) { r_list_free (ret); return NULL; } ptr->name = r_str_newf ("cache_map.%d", i); ptr->size = cache->maps[i].size; ptr->vsize = ptr->size; ptr->paddr = cache->maps[i].fileOffset; ptr->vaddr = cache->maps[i].address; ptr->add = true; ptr->is_segment = true; ptr->perm = prot2perm (cache->maps[i].initProt); r_list_append (ret, ptr); } ut64 slide = rebase_infos_get_slide (cache); if (slide) { RBinSection *section; r_list_foreach (ret, iter, section) { section->vaddr += slide; } } return ret; } static RList *symbols(RBinFile *bf) { RDyldCache *cache = (RDyldCache*) bf->o->bin_obj; if (!cache) { return NULL; } RList *ret = r_list_newf (free); if (!ret) { return NULL; } RListIter *iter; RDyldBinImage *bin; r_list_foreach (cache->bins, iter, bin) { SetU *hash = set_u_new (); if (!hash) { r_list_free (ret); return NULL; } symbols_from_bin (cache, ret, bf, bin, hash); symbols_from_locsym (cache, bin, ret, hash); set_u_free (hash); } ut64 slide = rebase_infos_get_slide (cache); if (slide) { RBinSymbol *sym; r_list_foreach (ret, iter, sym) { sym->vaddr += slide; } } return ret; } /* static void unswizzle_io_read(RDyldCache *cache, RIO *io) { if (!io || !io->desc || !io->desc->plugin || !cache->original_io_read) { return; } RIOPlugin *plugin = io->desc->plugin; plugin->read = cache->original_io_read; cache->original_io_read = NULL; } */ static void destroy(RBinFile *bf) { RDyldCache *cache = (RDyldCache*) bf->o->bin_obj; // unswizzle_io_read (cache, bf->rbin->iob.io); // XXX io may be dead here r_dyldcache_free (cache); } static RList *classes(RBinFile *bf) { RDyldCache *cache = (RDyldCache*) bf->o->bin_obj; if (!cache) { return NULL; } RList *ret = r_list_newf (free); if (!ret) { return NULL; } if (!cache->objc_opt_info_loaded) { cache->oi = get_objc_opt_info (bf, cache); cache->objc_opt_info_loaded = true; } RListIter *iter; RDyldBinImage *bin; ut64 slide = rebase_infos_get_slide (cache); RBuffer *orig_buf = bf->buf; ut32 num_of_unnamed_class = 0; r_list_foreach (cache->bins, iter, bin) { struct MACH0_(obj_t) *mach0 = bin_to_mach0 (bf, bin); if (!mach0) { goto beach; } struct section_t *sections = NULL; if (!(sections = MACH0_(get_sections) (mach0))) { MACH0_(mach0_free) (mach0); goto beach; } int i; for (i = 0; !sections[i].last; i++) { if (sections[i].size == 0) { continue; } bool is_classlist = strstr (sections[i].name, "__objc_classlist"); bool is_catlist = strstr (sections[i].name, "__objc_catlist"); if (!is_classlist && !is_catlist) { continue; } ut8 *pointers = malloc (sections[i].size); if (!pointers) { continue; } ut64 offset = va2pa (sections[i].addr, cache->n_maps, cache->maps, cache->buf, slide, NULL, NULL); if (r_buf_read_at (cache->buf, offset, pointers, sections[i].size) < sections[i].size) { R_FREE (pointers); continue; } ut8 *cursor = pointers; ut8 *pointers_end = pointers + sections[i].size; for (; cursor < pointers_end; cursor += 8) { ut64 pointer_to_class = r_read_le64 (cursor); RBinClass *klass; if (!(klass = R_NEW0 (RBinClass)) || !(klass->methods = r_list_new ()) || !(klass->fields = r_list_new ())) { R_FREE (klass); R_FREE (pointers); R_FREE (sections); MACH0_(mach0_free) (mach0); goto beach; } bf->o->bin_obj = mach0; bf->buf = cache->buf; if (is_classlist) { MACH0_(get_class_t) (pointer_to_class, bf, klass, false, NULL, cache->oi); } else { MACH0_(get_category_t) (pointer_to_class, bf, klass, NULL, cache->oi); } bf->o->bin_obj = cache; bf->buf = orig_buf; if (!klass->name) { eprintf ("KLASS ERROR AT 0x%"PFMT64x", is_classlist %d\n", pointer_to_class, is_classlist); klass->name = r_str_newf ("UnnamedClass%u", num_of_unnamed_class); if (!klass->name) { R_FREE (klass); R_FREE (pointers); R_FREE (sections); MACH0_(mach0_free) (mach0); goto beach; } num_of_unnamed_class++; } r_list_append (ret, klass); } R_FREE (pointers); } R_FREE (sections); MACH0_(mach0_free) (mach0); } return ret; beach: r_list_free (ret); return NULL; } static void header(RBinFile *bf) { if (!bf || !bf->o) { return; } RDyldCache *cache = (RDyldCache*) bf->o->bin_obj; if (!cache) { return; } RBin *bin = bf->rbin; ut64 slide = rebase_infos_get_slide (cache); PrintfCallback p = bin->cb_printf; PJ *pj = pj_new (); if (!pj) { return; } pj_o (pj); pj_k (pj, "header"); pj_o (pj); pj_ks (pj, "magic", cache->hdr->magic); pj_kn (pj, "mappingOffset", cache->hdr->mappingOffset); pj_kn (pj, "mappingCount", cache->hdr->mappingCount); pj_kn (pj, "imagesOffset", cache->hdr->imagesOffset); pj_kn (pj, "imagesCount", cache->hdr->imagesCount); pj_kn (pj, "dyldBaseAddress", cache->hdr->dyldBaseAddress); pj_kn (pj, "codeSignatureOffset", cache->hdr->codeSignatureOffset); pj_kn (pj, "codeSignatureSize", cache->hdr->codeSignatureSize); pj_kn (pj, "slideInfoOffset", cache->hdr->slideInfoOffset); pj_kn (pj, "slideInfoSize", cache->hdr->slideInfoSize); pj_kn (pj, "localSymbolsOffset", cache->hdr->localSymbolsOffset); pj_kn (pj, "localSymbolsSize", cache->hdr->localSymbolsSize); char uuidstr[128]; r_hex_bin2str ((ut8*)cache->hdr->uuid, 16, uuidstr); pj_ks (pj, "uuid", uuidstr); pj_ks (pj, "cacheType", (cache->hdr->cacheType == 0) ? "development" : "production"); pj_kn (pj, "branchPoolsOffset", cache->hdr->branchPoolsOffset); pj_kn (pj, "branchPoolsCount", cache->hdr->branchPoolsCount); pj_kn (pj, "accelerateInfoAddr", cache->hdr->accelerateInfoAddr + slide); pj_kn (pj, "accelerateInfoSize", cache->hdr->accelerateInfoSize); pj_kn (pj, "imagesTextOffset", cache->hdr->imagesTextOffset); pj_kn (pj, "imagesTextCount", cache->hdr->imagesTextCount); pj_end (pj); if (cache->accel) { pj_k (pj, "accelerator"); pj_o (pj); pj_kn (pj, "version", cache->accel->version); pj_kn (pj, "imageExtrasCount", cache->accel->imageExtrasCount); pj_kn (pj, "imagesExtrasOffset", cache->accel->imagesExtrasOffset); pj_kn (pj, "bottomUpListOffset", cache->accel->bottomUpListOffset); pj_kn (pj, "dylibTrieOffset", cache->accel->dylibTrieOffset); pj_kn (pj, "dylibTrieSize", cache->accel->dylibTrieSize); pj_kn (pj, "initializersOffset", cache->accel->initializersOffset); pj_kn (pj, "initializersCount", cache->accel->initializersCount); pj_kn (pj, "dofSectionsOffset", cache->accel->dofSectionsOffset); pj_kn (pj, "dofSectionsCount", cache->accel->dofSectionsCount); pj_kn (pj, "reExportListOffset", cache->accel->reExportListOffset); pj_kn (pj, "reExportCount", cache->accel->reExportCount); pj_kn (pj, "depListOffset", cache->accel->depListOffset); pj_kn (pj, "depListCount", cache->accel->depListCount); pj_kn (pj, "rangeTableOffset", cache->accel->rangeTableOffset); pj_kn (pj, "rangeTableCount", cache->accel->rangeTableCount); pj_kn (pj, "dyldSectionAddr", cache->accel->dyldSectionAddr + slide); pj_end (pj); } if (cache->rebase_infos) { size_t i; pj_k (pj, "slideInfo"); pj_a (pj); for (i = 0; i < cache->rebase_infos->length; i++) { RDyldRebaseInfo * rebase_info = cache->rebase_infos->entries[i].info; pj_o (pj); pj_kn (pj, "start", cache->rebase_infos->entries[i].start); pj_kn (pj, "end", cache->rebase_infos->entries[i].end); if (rebase_info) { ut8 version = rebase_info->version; pj_kn (pj, "version", version); pj_kn (pj, "slide", slide); if (version == 3) { RDyldRebaseInfo3 *info3 = (RDyldRebaseInfo3*) rebase_info; pj_kn (pj, "page_starts_count", info3->page_starts_count); pj_kn (pj, "page_size", info3->page_size); pj_kn (pj, "auth_value_add", info3->auth_value_add); } else if (version == 2 || version == 4) { RDyldRebaseInfo2 *info2 = (RDyldRebaseInfo2*) rebase_info; pj_kn (pj, "page_starts_count", info2->page_starts_count); pj_kn (pj, "page_extras_count", info2->page_extras_count); pj_kn (pj, "delta_mask", info2->delta_mask); pj_kn (pj, "value_mask", info2->value_mask); pj_kn (pj, "value_add", info2->value_add); pj_kn (pj, "delta_shift", info2->delta_shift); pj_kn (pj, "page_size", info2->page_size); } else if (version == 1) { RDyldRebaseInfo1 *info1 = (RDyldRebaseInfo1*) rebase_info; pj_kn (pj, "toc_count", info1->toc_count); pj_kn (pj, "entries_size", info1->entries_size); pj_kn (pj, "page_size", 4096); } } pj_end (pj); } pj_end (pj); } if (cache->hdr->imagesTextCount) { pj_k (pj, "images"); pj_a (pj); ut64 total_size = cache->hdr->imagesTextCount * sizeof (cache_text_info_t); cache_text_info_t * text_infos = malloc (total_size); if (!text_infos) { goto beach; } if (r_buf_fread_at (cache->buf, cache->hdr->imagesTextOffset, (ut8*)text_infos, "16clii", cache->hdr->imagesTextCount) != total_size) { free (text_infos); goto beach; } size_t i; for (i = 0; i != cache->hdr->imagesTextCount; i++) { cache_text_info_t * text_info = &text_infos[i]; r_hex_bin2str ((ut8*)text_info->uuid, 16, uuidstr); pj_o (pj); pj_ks (pj, "uuid", uuidstr); pj_kn (pj, "address", text_info->loadAddress + slide); pj_kn (pj, "textSegmentSize", text_info->textSegmentSize); char file[256]; if (r_buf_read_at (cache->buf, text_info->pathOffset, (ut8*) &file, sizeof (file)) == sizeof (file)) { file[255] = 0; pj_ks (pj, "path", file); char *last_slash = strrchr (file, '/'); if (last_slash && *last_slash) { pj_ks (pj, "name", last_slash + 1); } else { pj_ks (pj, "name", file); } } pj_end (pj); } pj_end (pj); free (text_infos); } pj_end (pj); p ("%s", pj_string (pj)); beach: pj_free (pj); } RBinPlugin r_bin_plugin_dyldcache = { .name = "dyldcache", .desc = "dyldcache bin plugin", .license = "LGPL3", .load_buffer = &load_buffer, .entries = &entries, .baddr = &baddr, .symbols = &symbols, .sections = &sections, .check_buffer = &check_buffer, .destroy = &destroy, .classes = &classes, .header = &header, .info = &info, }; #ifndef R2_PLUGIN_INCORE R_API RLibStruct radare_plugin = { .type = R_LIB_TYPE_BIN, .data = &r_bin_plugin_dyldcache, .version = R2_VERSION }; #endif
/* radare2 - LGPL - Copyright 2018-2022 - pancake, mrmacete, keegan */ #include <r_types.h> #include <r_util.h> #include <r_lib.h> #include <r_bin.h> #include <r_core.h> #include <r_io.h> #include <ht_pu.h> // #include "../format/mach0/mach0_defines.h" #define R_BIN_MACH064 1 #include "../format/mach0/mach0.h" #include "objc/mach0_classes.h" #define R_IS_PTR_AUTHENTICATED(x) B_IS_SET(x, 63) #define MAX_N_HDR 16 typedef struct { ut8 version; ut64 slide; ut8 *one_page_buf; ut32 page_size; ut64 start_of_data; } RDyldRebaseInfo; typedef struct { ut64 start; ut64 end; RDyldRebaseInfo *info; } RDyldRebaseInfosEntry; typedef struct { RDyldRebaseInfosEntry *entries; size_t length; } RDyldRebaseInfos; typedef struct { ut8 version; ut64 slide; ut8 *one_page_buf; ut32 page_size; ut64 start_of_data; ut16 *page_starts; ut32 page_starts_count; ut64 delta_mask; ut32 delta_shift; ut64 auth_value_add; } RDyldRebaseInfo3; typedef struct { ut8 version; ut64 slide; ut8 *one_page_buf; ut32 page_size; ut64 start_of_data; ut16 *page_starts; ut32 page_starts_count; ut16 *page_extras; ut32 page_extras_count; ut64 delta_mask; ut64 value_mask; ut32 delta_shift; ut64 value_add; } RDyldRebaseInfo2; typedef struct { ut8 version; ut64 slide; ut8 *one_page_buf; ut32 page_size; ut64 start_of_data; ut16 *toc; ut32 toc_count; ut8 *entries; ut32 entries_size; } RDyldRebaseInfo1; typedef struct { ut64 local_symbols_offset; ut64 nlists_offset; ut64 nlists_count; ut64 strings_offset; ut64 strings_size; } RDyldLocSym; typedef struct _r_dyldcache { ut8 magic[8]; cache_hdr_t *hdr; ut64 *hdr_offset; ut32 *maps_index; ut32 n_hdr; cache_map_t *maps; ut32 n_maps; RList *bins; RBuffer *buf; int (*original_io_read)(RIO *io, RIODesc *fd, ut8 *buf, int count); RDyldRebaseInfos *rebase_infos; cache_accel_t *accel; RDyldLocSym *locsym; objc_cache_opt_info *oi; bool objc_opt_info_loaded; } RDyldCache; typedef struct _r_bin_image { char *file; ut64 header_at; ut64 hdr_offset; ut64 symbols_off; ut64 va; ut32 nlist_start_index; ut32 nlist_count; } RDyldBinImage; static R_TH_LOCAL RList *pending_bin_files = NULL; static ut64 va2pa(uint64_t addr, ut32 n_maps, cache_map_t *maps, RBuffer *cache_buf, ut64 slide, ut32 *offset, ut32 *left) { ut64 res = UT64_MAX; ut32 i; addr -= slide; for (i = 0; i < n_maps; i++) { if (addr >= maps[i].address && addr < maps[i].address + maps[i].size) { res = maps[i].fileOffset + addr - maps[i].address; if (offset) { *offset = addr - maps[i].address; } if (left) { *left = maps[i].size - (addr - maps[i].address); } break; } } return res; } static void free_bin(RDyldBinImage *bin) { if (!bin) { return; } R_FREE (bin->file); R_FREE (bin); } static void rebase_info3_free(RDyldRebaseInfo3 *rebase_info) { if (!rebase_info) { return; } R_FREE (rebase_info->page_starts); R_FREE (rebase_info); } static void rebase_info2_free(RDyldRebaseInfo2 *rebase_info) { if (!rebase_info) { return; } R_FREE (rebase_info->page_starts); R_FREE (rebase_info->page_extras); R_FREE (rebase_info); } static void rebase_info1_free(RDyldRebaseInfo1 *rebase_info) { if (!rebase_info) { return; } R_FREE (rebase_info->toc); R_FREE (rebase_info->entries); R_FREE (rebase_info); } static void rebase_info_free(RDyldRebaseInfo *rebase_info) { if (!rebase_info) { return; } R_FREE (rebase_info->one_page_buf); ut8 version = rebase_info->version; if (version == 1) { rebase_info1_free ((RDyldRebaseInfo1*) rebase_info); } else if (version == 2 || version == 4) { rebase_info2_free ((RDyldRebaseInfo2*) rebase_info); } else if (version == 3) { rebase_info3_free ((RDyldRebaseInfo3*) rebase_info); } else { R_FREE (rebase_info); } } static cache_img_t *read_cache_images(RBuffer *cache_buf, cache_hdr_t *hdr, ut64 hdr_offset) { if (!cache_buf || !hdr) { return NULL; } if (!hdr->imagesCount || !hdr->imagesOffset || hdr->imagesOffset == UT32_MAX || hdr->imagesCount == UT32_MAX) { return NULL; } ut64 size = sizeof (cache_img_t) * hdr->imagesCount; cache_img_t *images = R_NEWS0 (cache_img_t, hdr->imagesCount); if (!images) { return NULL; } if (r_buf_fread_at (cache_buf, hdr->imagesOffset, (ut8*) images, "3l2i", hdr->imagesCount) != size) { R_FREE (images); return NULL; } if (hdr_offset) { ut32 i; for (i = 0; i < hdr->imagesCount; i++) { cache_img_t *img = &images[i]; img->pathFileOffset += hdr_offset; } } return images; } static void match_bin_entries(RDyldCache *cache, void *entries) { r_return_if_fail (cache && cache->bins && entries); cache_img_t *imgs = read_cache_images (cache->buf, cache->hdr, 0); if (!imgs) { return; } RDyldBinImage *bin = NULL; RListIter *it = r_list_iterator (cache->bins); bool has_large_entries = cache->n_hdr > 1; ut32 i; for (i = 0; i < cache->hdr->imagesCount; i++) { cache_img_t *img = &imgs[i]; if (!it) { break; } bin = it->data; if (!bin) { break; } if (bin && bin->va == img->address) { if (has_large_entries) { cache_locsym_entry_large_t *e = &((cache_locsym_entry_large_t *) entries)[i]; bin->nlist_start_index = e->nlistStartIndex; bin->nlist_count = e->nlistCount; } else { cache_locsym_entry_t *e = &((cache_locsym_entry_t *) entries)[i]; bin->nlist_start_index = e->nlistStartIndex; bin->nlist_count = e->nlistCount; } it = it->n; } } R_FREE (imgs); } static RDyldLocSym *r_dyld_locsym_new(RDyldCache *cache) { r_return_val_if_fail (cache && cache->buf, NULL); ut32 i; for (i = 0; i < cache->n_hdr; i++) { cache_hdr_t *hdr = &cache->hdr[i]; if (!hdr || !hdr->localSymbolsSize || !hdr->localSymbolsOffset) { continue; } cache_locsym_info_t *info = NULL; void *entries = NULL; ut64 info_size = sizeof (cache_locsym_info_t); info = R_NEW0 (cache_locsym_info_t); if (!info) { goto beach; } if (r_buf_fread_at (cache->buf, hdr->localSymbolsOffset, (ut8*) info, "6i", 1) != info_size) { eprintf ("locsym err 01\n"); goto beach; } if (info->entriesCount != cache->hdr->imagesCount) { eprintf ("locsym err 02\n"); goto beach; } bool has_large_entries = cache->n_hdr > 1; if (has_large_entries) { ut64 entries_size = sizeof (cache_locsym_entry_large_t) * info->entriesCount; cache_locsym_entry_large_t *large_entries = R_NEWS0 (cache_locsym_entry_large_t, info->entriesCount); if (!large_entries) { goto beach; } if (r_buf_fread_at (cache->buf, hdr->localSymbolsOffset + info->entriesOffset, (ut8*) large_entries, "lii", info->entriesCount) != entries_size) { eprintf ("locsym err 03\n"); goto beach; } entries = large_entries; } else { ut64 entries_size = sizeof (cache_locsym_entry_t) * info->entriesCount; cache_locsym_entry_t *regular_entries = R_NEWS0 (cache_locsym_entry_t, info->entriesCount); if (!regular_entries) { goto beach; } if (r_buf_fread_at (cache->buf, hdr->localSymbolsOffset + info->entriesOffset, (ut8*) regular_entries, "iii", info->entriesCount) != entries_size) { eprintf ("locsym err 04\n"); goto beach; } entries = regular_entries; } RDyldLocSym * locsym = R_NEW0 (RDyldLocSym); if (!locsym) { goto beach; } match_bin_entries (cache, entries); locsym->local_symbols_offset = hdr->localSymbolsOffset; locsym->nlists_offset = info->nlistOffset; locsym->nlists_count = info->nlistCount; locsym->strings_offset = info->stringsOffset; locsym->strings_size = info->stringsSize; free (info); free (entries); return locsym; beach: free (info); free (entries); eprintf ("dyldcache: malformed local symbols metadata\n"); break; } return NULL; } static ut64 rebase_infos_get_slide(RDyldCache *cache) { if (!cache->rebase_infos || !cache->rebase_infos->length) { return 0; } size_t i; for (i = 0; i < cache->rebase_infos->length; i++) { if (cache->rebase_infos->entries[i].info) { return cache->rebase_infos->entries[i].info->slide; } } return 0; } static void symbols_from_locsym(RDyldCache *cache, RDyldBinImage *bin, RList *symbols, SetU *hash) { RDyldLocSym *locsym = cache->locsym; if (!locsym) { return; } if (bin->nlist_start_index >= locsym->nlists_count || bin->nlist_start_index + bin->nlist_count > locsym->nlists_count) { eprintf ("dyldcache: malformed local symbol entry\n"); return; } ut64 nlists_size = sizeof (struct MACH0_(nlist)) * bin->nlist_count; struct MACH0_(nlist) *nlists = R_NEWS0 (struct MACH0_(nlist), bin->nlist_count); if (!nlists) { return; } ut64 nlists_offset = locsym->local_symbols_offset + locsym->nlists_offset + bin->nlist_start_index * sizeof (struct MACH0_(nlist)); if (r_buf_fread_at (cache->buf, nlists_offset, (ut8*) nlists, "iccsl", bin->nlist_count) != nlists_size) { free (nlists); return; } ut32 j; for (j = 0; j != bin->nlist_count; j++) { struct MACH0_(nlist) *nlist = &nlists[j]; if (set_u_contains (hash, (ut64)nlist->n_value)) { continue; } set_u_add (hash, (ut64)nlist->n_value); if (nlist->n_strx >= locsym->strings_size) { continue; } RBinSymbol *sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->type = "LOCAL"; sym->vaddr = nlist->n_value; ut64 slide = rebase_infos_get_slide (cache); sym->paddr = va2pa (nlist->n_value, cache->n_maps, cache->maps, cache->buf, slide, NULL, NULL); char *symstr =r_buf_get_string (cache->buf, locsym->local_symbols_offset + locsym->strings_offset + nlist->n_strx); if (symstr) { sym->name = symstr; } else { static ut32 k = 0; sym->name = r_str_newf ("unk_local%d", k++); } r_list_append (symbols, sym); } free (nlists); } static void r_dyldcache_free(RDyldCache *cache) { if (!cache) { return; } r_list_free (cache->bins); cache->bins = NULL; r_buf_free (cache->buf); cache->buf = NULL; if (cache->rebase_infos) { int i; for (i = 0; i < cache->rebase_infos->length; i++) { rebase_info_free (cache->rebase_infos->entries[i].info); cache->rebase_infos->entries[i].info = NULL; } R_FREE (cache->rebase_infos->entries); R_FREE (cache->rebase_infos); } R_FREE (cache->hdr); R_FREE (cache->maps); R_FREE (cache->maps_index); R_FREE (cache->hdr_offset); R_FREE (cache->accel); R_FREE (cache->locsym); R_FREE (cache->oi); R_FREE (cache); } static ut64 bin_obj_va2pa(ut64 p, ut32 *offset, ut32 *left, RBinFile *bf) { if (!bf || !bf->o || !bf->o->bin_obj) { return 0; } RDyldCache *cache = (RDyldCache*) ((struct MACH0_(obj_t)*)bf->o->bin_obj)->user; if (!cache) { return 0; } ut64 slide = rebase_infos_get_slide (cache); ut64 res = va2pa (p, cache->n_maps, cache->maps, cache->buf, slide, offset, left); if (res == UT64_MAX) { res = 0; } return res; } static struct MACH0_(obj_t) *bin_to_mach0(RBinFile *bf, RDyldBinImage *bin) { if (!bin || !bf) { return NULL; } RDyldCache *cache = (RDyldCache*) bf->o->bin_obj; if (!cache) { return NULL; } RBuffer *buf = r_buf_new_slice (cache->buf, bin->hdr_offset, r_buf_size (cache->buf) - bin->hdr_offset); if (!buf) { return NULL; } struct MACH0_(opts_t) opts; MACH0_(opts_set_default) (&opts, bf); opts.header_at = bin->header_at - bin->hdr_offset; opts.symbols_off = bin->symbols_off; struct MACH0_(obj_t) *mach0 = MACH0_(new_buf) (buf, &opts); mach0->user = cache; mach0->va2pa = &bin_obj_va2pa; r_buf_free (buf); return mach0; } static int prot2perm(int x) { int r = 0; if (x & 1) { r |= 4; } if (x & 2) { r |= 2; } if (x & 4) { r |= 1; } return r; } static ut32 dumb_ctzll(ut64 x) { ut64 result = 0; int i, j; for (i = 0; i < 64; i += 8) { ut8 byte = (x >> i) & 0xff; if (!byte) { result += 8; } else { for (j = 0; j < 8; j++) { if (!((byte >> j) & 1)) { result++; } else { break; } } break; } } return result; } static ut64 estimate_slide(RBinFile *bf, RDyldCache *cache, ut64 value_mask, ut64 value_add) { ut64 slide = 0; if (cache->n_hdr > 1) { return slide; } ut64 *classlist = malloc (64); if (!classlist) { goto beach; } RListIter *iter; RDyldBinImage *bin; r_list_foreach (cache->bins, iter, bin) { bool found_sample = false; struct MACH0_(opts_t) opts = {0}; opts.verbose = bf->rbin->verbose; opts.header_at = bin->header_at; opts.symbols_off = 0; struct MACH0_(obj_t) *mach0 = MACH0_(new_buf) (cache->buf, &opts); if (!mach0) { goto beach; } struct section_t *sections = NULL; if (!(sections = MACH0_(get_sections) (mach0))) { MACH0_(mach0_free) (mach0); goto beach; } int i; int incomplete = 2; int classlist_idx = 0, data_idx = 0; for (i = 0; !sections[i].last && incomplete; i++) { if (sections[i].size == 0) { continue; } if (strstr (sections[i].name, "__objc_classlist")) { incomplete--; classlist_idx = i; continue; } if (strstr (sections[i].name, "__objc_data")) { incomplete--; data_idx = i; continue; } } if (incomplete) { goto next_bin; } int classlist_sample_size = R_MIN (64, sections[classlist_idx].size); int n_classes = classlist_sample_size / 8; ut64 sect_offset = sections[classlist_idx].offset + bin->hdr_offset; if (r_buf_fread_at (cache->buf, sect_offset, (ut8*) classlist, "l", n_classes) < classlist_sample_size) { goto next_bin; } ut64 data_addr = sections[data_idx].addr; ut64 data_tail = data_addr & 0xfff; ut64 data_tail_end = (data_addr + sections[data_idx].size) & 0xfff; for (i = 0; i < n_classes; i++) { ut64 cl_addr = (classlist[i] & value_mask) + value_add; ut64 cl_tail = cl_addr & 0xfff; if (cl_tail >= data_tail && cl_tail < data_tail_end) { ut64 off = cl_tail - data_tail; slide = ((cl_addr - off) & value_mask) - (data_addr & value_mask); found_sample = true; break; } } next_bin: MACH0_(mach0_free) (mach0); R_FREE (sections); if (found_sample) { break; } } beach: R_FREE (classlist); return slide; } static RDyldRebaseInfo *get_rebase_info(RBinFile *bf, RDyldCache *cache, ut64 slideInfoOffset, ut64 slideInfoSize, ut64 start_of_data, ut64 slide) { ut8 *tmp_buf_1 = NULL; ut8 *tmp_buf_2 = NULL; ut8 *one_page_buf = NULL; RBuffer *cache_buf = cache->buf; ut64 offset = slideInfoOffset; ut32 slide_info_version = 0; if (r_buf_read_at (cache_buf, offset, (ut8*) &slide_info_version, 4) != 4) { return NULL; } if (slide_info_version == 3) { cache_slide3_t slide_info; ut64 size = sizeof (cache_slide3_t); if (r_buf_fread_at (cache_buf, offset, (ut8*) &slide_info, "4i1l", 1) < 20) { return NULL; } ut64 page_starts_offset = offset + size; ut64 page_starts_size = slide_info.page_starts_count * 2; if (page_starts_size + size > slideInfoSize) { return NULL; } if (page_starts_size > 0) { tmp_buf_1 = malloc (page_starts_size); if (!tmp_buf_1) { goto beach; } if (r_buf_fread_at (cache_buf, page_starts_offset, tmp_buf_1, "s", slide_info.page_starts_count) != page_starts_size) { goto beach; } } if (slide_info.page_size > 0) { one_page_buf = malloc (slide_info.page_size); if (!one_page_buf) { goto beach; } } RDyldRebaseInfo3 *rebase_info = R_NEW0 (RDyldRebaseInfo3); if (!rebase_info) { goto beach; } rebase_info->version = 3; rebase_info->delta_mask = 0x3ff8000000000000ULL; rebase_info->delta_shift = 51; rebase_info->start_of_data = start_of_data; rebase_info->page_starts = (ut16*) tmp_buf_1; rebase_info->page_starts_count = slide_info.page_starts_count; rebase_info->auth_value_add = slide_info.auth_value_add; rebase_info->page_size = slide_info.page_size; rebase_info->one_page_buf = one_page_buf; if (slide == UT64_MAX) { rebase_info->slide = estimate_slide (bf, cache, 0x7ffffffffffffULL, 0); if (rebase_info->slide) { eprintf ("dyldcache is slid: 0x%"PFMT64x"\n", rebase_info->slide); } } else { rebase_info->slide = slide; } return (RDyldRebaseInfo*) rebase_info; } else if (slide_info_version == 2 || slide_info_version == 4) { cache_slide2_t slide_info; ut64 size = sizeof (cache_slide2_t); if (r_buf_fread_at (cache_buf, offset, (ut8*) &slide_info, "6i2l", 1) != size) { return NULL; } if (slide_info.page_starts_offset == 0 || slide_info.page_starts_offset > slideInfoSize || slide_info.page_starts_offset + slide_info.page_starts_count * 2 > slideInfoSize) { return NULL; } if (slide_info.page_extras_offset == 0 || slide_info.page_extras_offset > slideInfoSize || slide_info.page_extras_offset + slide_info.page_extras_count * 2 > slideInfoSize) { return NULL; } if (slide_info.page_starts_count > 0) { ut64 size = slide_info.page_starts_count * 2; ut64 at = slideInfoOffset + slide_info.page_starts_offset; tmp_buf_1 = malloc (size); if (!tmp_buf_1) { goto beach; } if (r_buf_fread_at (cache_buf, at, tmp_buf_1, "s", slide_info.page_starts_count) != size) { goto beach; } } if (slide_info.page_extras_count > 0) { ut64 size = slide_info.page_extras_count * 2; ut64 at = slideInfoOffset + slide_info.page_extras_offset; tmp_buf_2 = malloc (size); if (!tmp_buf_2) { goto beach; } if (r_buf_fread_at (cache_buf, at, tmp_buf_2, "s", slide_info.page_extras_count) != size) { goto beach; } } if (slide_info.page_size > 0) { one_page_buf = malloc (slide_info.page_size); if (!one_page_buf) { goto beach; } } RDyldRebaseInfo2 *rebase_info = R_NEW0 (RDyldRebaseInfo2); if (!rebase_info) { goto beach; } rebase_info->version = slide_info_version; rebase_info->start_of_data = start_of_data; rebase_info->page_starts = (ut16*) tmp_buf_1; rebase_info->page_starts_count = slide_info.page_starts_count; rebase_info->page_extras = (ut16*) tmp_buf_2; rebase_info->page_extras_count = slide_info.page_extras_count; rebase_info->value_add = slide_info.value_add; rebase_info->delta_mask = slide_info.delta_mask; rebase_info->value_mask = ~rebase_info->delta_mask; rebase_info->delta_shift = dumb_ctzll (rebase_info->delta_mask) - 2; rebase_info->page_size = slide_info.page_size; rebase_info->one_page_buf = one_page_buf; if (slide == UT64_MAX) { rebase_info->slide = estimate_slide (bf, cache, rebase_info->value_mask, rebase_info->value_add); if (rebase_info->slide) { eprintf ("dyldcache is slid: 0x%"PFMT64x"\n", rebase_info->slide); } } else { rebase_info->slide = slide; } return (RDyldRebaseInfo*) rebase_info; } else if (slide_info_version == 1) { cache_slide1_t slide_info; ut64 size = sizeof (cache_slide1_t); if (r_buf_fread_at (cache_buf, offset, (ut8*) &slide_info, "6i", 1) != size) { return NULL; } if (slide_info.toc_offset == 0 || slide_info.toc_offset > slideInfoSize || slide_info.toc_offset + slide_info.toc_count * 2 > slideInfoSize) { return NULL; } if (slide_info.entries_offset == 0 || slide_info.entries_offset > slideInfoSize || slide_info.entries_offset + slide_info.entries_count * slide_info.entries_size > slideInfoSize) { return NULL; } if (slide_info.toc_count > 0) { ut64 size = slide_info.toc_count * 2; ut64 at = slideInfoOffset + slide_info.toc_offset; tmp_buf_1 = malloc (size); if (!tmp_buf_1) { goto beach; } if (r_buf_fread_at (cache_buf, at, tmp_buf_1, "s", slide_info.toc_count) != size) { goto beach; } } if (slide_info.entries_count > 0) { ut64 size = (ut64) slide_info.entries_count * (ut64) slide_info.entries_size; ut64 at = slideInfoOffset + slide_info.entries_offset; tmp_buf_2 = malloc (size); if (!tmp_buf_2) { goto beach; } if (r_buf_read_at (cache_buf, at, tmp_buf_2, size) != size) { goto beach; } } one_page_buf = malloc (4096); if (!one_page_buf) { goto beach; } RDyldRebaseInfo1 *rebase_info = R_NEW0 (RDyldRebaseInfo1); if (!rebase_info) { goto beach; } rebase_info->version = 1; rebase_info->start_of_data = start_of_data; rebase_info->one_page_buf = one_page_buf; rebase_info->page_size = 4096; rebase_info->toc = (ut16*) tmp_buf_1; rebase_info->toc_count = slide_info.toc_count; rebase_info->entries = tmp_buf_2; rebase_info->entries_size = slide_info.entries_size; if (slide == UT64_MAX) { rebase_info->slide = estimate_slide (bf, cache, UT64_MAX, 0); if (rebase_info->slide) { eprintf ("dyldcache is slid: 0x%"PFMT64x"\n", rebase_info->slide); } } else { rebase_info->slide = slide; } return (RDyldRebaseInfo*) rebase_info; } else { eprintf ("unsupported slide info version %d\n", slide_info_version); return NULL; } beach: R_FREE (tmp_buf_1); R_FREE (tmp_buf_2); R_FREE (one_page_buf); return NULL; } static RDyldRebaseInfos *get_rebase_infos(RBinFile *bf, RDyldCache *cache) { RDyldRebaseInfos *result = R_NEW0 (RDyldRebaseInfos); if (!result) { return NULL; } if (!cache->hdr->slideInfoOffset || !cache->hdr->slideInfoSize) { ut32 total_slide_infos = 0; ut32 n_slide_infos[MAX_N_HDR]; ut32 i; for (i = 0; i < cache->n_hdr && i < MAX_N_HDR; i++) { ut64 hdr_offset = cache->hdr_offset[i]; if ((n_slide_infos[i] = r_buf_read_le32_at (cache->buf, 0x13c + hdr_offset)) == UT32_MAX) { goto beach; } total_slide_infos += n_slide_infos[i]; } if (!total_slide_infos) { goto beach; } RDyldRebaseInfosEntry * infos = R_NEWS0 (RDyldRebaseInfosEntry, total_slide_infos); if (!infos) { goto beach; } ut32 k = 0; for (i = 0; i < cache->n_hdr && i < MAX_N_HDR; i++) { ut64 hdr_offset = cache->hdr_offset[i]; ut64 slide_infos_offset; if (!n_slide_infos[i]) { continue; } if ((slide_infos_offset = r_buf_read_le32_at (cache->buf, 0x138 + hdr_offset)) == UT32_MAX) { continue; } if (!slide_infos_offset) { continue; } slide_infos_offset += hdr_offset; ut32 j; RDyldRebaseInfo *prev_info = NULL; for (j = 0; j < n_slide_infos[i]; j++) { ut64 offset = slide_infos_offset + j * sizeof (cache_mapping_slide); cache_mapping_slide entry; if (r_buf_fread_at (cache->buf, offset, (ut8*)&entry, "6lii", 1) != sizeof (cache_mapping_slide)) { break; } if (entry.slideInfoOffset && entry.slideInfoSize) { infos[k].start = entry.fileOffset + hdr_offset; infos[k].end = infos[k].start + entry.size; ut64 slide = prev_info ? prev_info->slide : UT64_MAX; infos[k].info = get_rebase_info (bf, cache, entry.slideInfoOffset + hdr_offset, entry.slideInfoSize, entry.fileOffset + hdr_offset, slide); prev_info = infos[k].info; k++; } } } if (!k) { free (infos); goto beach; } if (k < total_slide_infos) { RDyldRebaseInfosEntry * pruned_infos = R_NEWS0 (RDyldRebaseInfosEntry, k); if (!pruned_infos) { free (infos); goto beach; } memcpy (pruned_infos, infos, sizeof (RDyldRebaseInfosEntry) * k); free (infos); infos = pruned_infos; } result->entries = infos; result->length = k; return result; } if (cache->hdr->mappingCount > 1) { RDyldRebaseInfosEntry * infos = R_NEWS0 (RDyldRebaseInfosEntry, 1); if (!infos) { goto beach; } infos[0].start = cache->maps[1].fileOffset; infos[0].end = infos[0].start + cache->maps[1].size; infos[0].info = get_rebase_info (bf, cache, cache->hdr->slideInfoOffset, cache->hdr->slideInfoSize, infos[0].start, UT64_MAX); result->entries = infos; result->length = 1; return result; } beach: free (result); return NULL; } static bool check_magic(const char *magic) { return !strcmp (magic, "dyld_v1 arm64") || !strcmp (magic, "dyld_v1 arm64e") || !strcmp (magic, "dyld_v1 x86_64") || !strcmp (magic, "dyld_v1 x86_64h"); } static bool check_buffer(RBinFile *bf, RBuffer *buf) { if (r_buf_size (buf) < 32) { return false; } char hdr[17] = { 0 }; int rhdr = r_buf_read_at (buf, 0, (ut8 *)&hdr, sizeof (hdr) - 1); if (rhdr != sizeof (hdr) - 1) { return false; } return check_magic (hdr); } static cache_imgxtr_t *read_cache_imgextra(RBuffer *cache_buf, cache_hdr_t *hdr, cache_accel_t *accel) { if (!cache_buf || !hdr || !hdr->imagesCount || !accel || !accel->imageExtrasCount || !accel->imagesExtrasOffset) { return NULL; } ut64 size = sizeof (cache_imgxtr_t) * accel->imageExtrasCount; cache_imgxtr_t *images = R_NEWS0 (cache_imgxtr_t, accel->imageExtrasCount); if (!images) { return NULL; } if (r_buf_fread_at (cache_buf, accel->imagesExtrasOffset, (ut8*) images, "ll4i", accel->imageExtrasCount) != size) { R_FREE (images); return NULL; } return images; } static char *get_lib_name(RBuffer *cache_buf, cache_img_t *img) { char file[256]; char *lib_name = file; if (r_buf_read_at (cache_buf, img->pathFileOffset, (ut8*) &file, sizeof (file)) == sizeof (file)) { file[255] = 0; /*char * last_slash = strrchr (file, '/'); if (last_slash && *last_slash) { lib_name = last_slash + 1; }*/ return strdup (lib_name); } return strdup ("FAIL"); } static int string_contains(const void *a, const void *b) { return !strstr ((const char*) a, (const char*) b); } static HtPU *create_path_to_index(RBuffer *cache_buf, cache_img_t *img, cache_hdr_t *hdr) { HtPU *path_to_idx = ht_pu_new0 (); if (!path_to_idx) { return NULL; } size_t i; for (i = 0; i != hdr->imagesCount; i++) { char file[256]; if (r_buf_read_at (cache_buf, img[i].pathFileOffset, (ut8*) &file, sizeof (file)) != sizeof (file)) { continue; } file[sizeof (file) - 1] = 0; ht_pu_insert (path_to_idx, file, (ut64)i); const char versions_pattern[] = ".framework/Versions/"; char *versions = strstr (file, versions_pattern); if (versions) { char *next_slash = strchr (versions + 20, '/'); if (next_slash) { char *tail = strdup (next_slash); if (!tail) { break; } strcpy (versions + 10, tail); free (tail); ht_pu_insert (path_to_idx, file, (ut64)i); } } } return path_to_idx; } static void carve_deps_at_address(RDyldCache *cache, cache_img_t *img, HtPU *path_to_idx, ut64 address, int *deps, bool printing) { ut64 pa = va2pa (address, cache->n_maps, cache->maps, cache->buf, 0, NULL, NULL); if (pa == UT64_MAX) { return; } struct MACH0_(mach_header) mh; if (r_buf_fread_at (cache->buf, pa, (ut8*) &mh, "8i", 1) != sizeof (struct MACH0_(mach_header))) { return; } if (mh.magic != MH_MAGIC_64 || mh.sizeofcmds == 0) { return; } ut64 cmds_at = pa + sizeof (struct MACH0_(mach_header)); ut8 *cmds = malloc (mh.sizeofcmds + 1); if (!cmds || r_buf_read_at (cache->buf, cmds_at, cmds, mh.sizeofcmds) != mh.sizeofcmds) { goto beach; } cmds[mh.sizeofcmds] = 0; ut8 *cursor = cmds; ut8 *end = cmds + mh.sizeofcmds; while (cursor < end) { ut32 cmd = r_read_le32 (cursor); ut32 cmdsize = r_read_le32 (cursor + sizeof (ut32)); if (cmd == LC_LOAD_DYLIB || cmd == LC_LOAD_WEAK_DYLIB || cmd == LC_REEXPORT_DYLIB || cmd == LC_LOAD_UPWARD_DYLIB) { bool found; if (cursor + 24 >= end) { break; } const char *key = (const char *) cursor + 24; size_t dep_index = (size_t)ht_pu_find (path_to_idx, key, &found); if (!found || dep_index >= cache->hdr->imagesCount) { eprintf ("Warning: alien dep '%s'\n", key); continue; } deps[dep_index]++; if (printing) { eprintf ("-> %s\n", key); } } cursor += cmdsize; } beach: free (cmds); } static ut64 resolve_symbols_off(RDyldCache *cache, ut64 pa) { struct MACH0_(mach_header) mh; if (r_buf_fread_at (cache->buf, pa, (ut8*) &mh, "8i", 1) != sizeof (struct MACH0_(mach_header))) { return 0; } if (mh.magic != MH_MAGIC_64 || mh.sizeofcmds == 0) { return 0; } ut64 cmds_at = pa + sizeof (struct MACH0_(mach_header)); ut64 cursor = cmds_at; ut64 end = cursor + mh.sizeofcmds; while (cursor < end) { ut32 cmd = r_buf_read_le32_at (cache->buf, cursor); if (cmd == UT32_MAX) { return 0; } ut32 cmdsize = r_buf_read_le32_at (cache->buf, cursor + sizeof (ut32)); if (cmdsize == UT32_MAX) { return 0; } if (cmd == LC_SEGMENT || cmd == LC_SEGMENT_64) { char segname[17]; segname[16] = 0; if (r_buf_read_at (cache->buf, cursor + 2 * sizeof (ut32), (ut8 *)segname, 16) != 16) { return 0; } if (!strncmp (segname, "__LINKEDIT", 16)) { ut64 vmaddr = r_buf_read_le64_at (cache->buf, cursor + 2 * sizeof (ut32) + 16); if (vmaddr == UT64_MAX) { return 0; } ut32 i,j; for (i = 0; i < cache->n_hdr; i++) { cache_hdr_t *hdr = &cache->hdr[i]; ut64 hdr_offset = cache->hdr_offset[i]; ut32 maps_index = cache->maps_index[i]; for (j = 0; j < hdr->mappingCount; j++) { ut64 map_start = cache->maps[maps_index + j].address; ut64 map_end = map_start + cache->maps[maps_index + j].size; if (vmaddr >= map_start && vmaddr < map_end) { return hdr_offset; } } } } } cursor += cmdsize; } return 0; } static RList *create_cache_bins(RBinFile *bf, RDyldCache *cache) { RList *bins = r_list_newf ((RListFree)free_bin); ut16 *depArray = NULL; cache_imgxtr_t *extras = NULL; if (!bins) { return NULL; } char *target_libs = NULL; RList *target_lib_names = NULL; int *deps = NULL; target_libs = r_sys_getenv ("R_DYLDCACHE_FILTER"); if (target_libs) { target_lib_names = r_str_split_list (target_libs, ":", 0); if (!target_lib_names) { r_list_free (bins); return NULL; } deps = R_NEWS0 (int, cache->hdr->imagesCount); if (!deps) { r_list_free (bins); r_list_free (target_lib_names); return NULL; } } ut32 i; for (i = 0; i < cache->n_hdr; i++) { cache_hdr_t *hdr = &cache->hdr[i]; ut64 hdr_offset = cache->hdr_offset[i]; ut32 maps_index = cache->maps_index[i]; cache_img_t *img = read_cache_images (cache->buf, hdr, hdr_offset); if (!img) { goto next; } ut32 j; if (target_libs) { HtPU *path_to_idx = NULL; if (cache->accel) { depArray = R_NEWS0 (ut16, cache->accel->depListCount); if (!depArray) { goto next; } if (r_buf_fread_at (cache->buf, cache->accel->depListOffset, (ut8*) depArray, "s", cache->accel->depListCount) != cache->accel->depListCount * 2) { goto next; } extras = read_cache_imgextra (cache->buf, hdr, cache->accel); if (!extras) { goto next; } } else { path_to_idx = create_path_to_index (cache->buf, img, hdr); } for (j = 0; j < hdr->imagesCount; j++) { bool printing = !deps[j]; char *lib_name = get_lib_name (cache->buf, &img[j]); if (!lib_name) { break; } if (strstr (lib_name, "libobjc.A.dylib")) { deps[j]++; } if (!r_list_find (target_lib_names, lib_name, string_contains)) { R_FREE (lib_name); continue; } if (printing) { eprintf ("FILTER: %s\n", lib_name); } R_FREE (lib_name); deps[j]++; if (extras && depArray) { ut32 k; for (k = extras[j].dependentsStartArrayIndex; depArray[k] != 0xffff; k++) { ut16 dep_index = depArray[k] & 0x7fff; deps[dep_index]++; char *dep_name = get_lib_name (cache->buf, &img[dep_index]); if (!dep_name) { break; } if (printing) { eprintf ("-> %s\n", dep_name); } free (dep_name); } } else if (path_to_idx) { carve_deps_at_address (cache, img, path_to_idx, img[j].address, deps, printing); } } ht_pu_free (path_to_idx); R_FREE (depArray); R_FREE (extras); } for (j = 0; j < hdr->imagesCount; j++) { if (deps && !deps[j]) { continue; } ut64 pa = va2pa (img[j].address, hdr->mappingCount, &cache->maps[maps_index], cache->buf, 0, NULL, NULL); if (pa == UT64_MAX) { continue; } ut8 magicbytes[4]; r_buf_read_at (cache->buf, pa, magicbytes, 4); int magic = r_read_le32 (magicbytes); switch (magic) { case MH_MAGIC_64: { char file[256]; RDyldBinImage *bin = R_NEW0 (RDyldBinImage); if (!bin) { goto next; } bin->header_at = pa; bin->hdr_offset = hdr_offset; bin->symbols_off = resolve_symbols_off (cache, pa); bin->va = img[j].address; if (r_buf_read_at (cache->buf, img[j].pathFileOffset, (ut8*) &file, sizeof (file)) == sizeof (file)) { file[255] = 0; char *last_slash = strrchr (file, '/'); if (last_slash && *last_slash) { if (last_slash > file) { char *scan = last_slash - 1; while (scan > file && *scan != '/') { scan--; } if (*scan == '/') { bin->file = strdup (scan + 1); } else { bin->file = strdup (last_slash + 1); } } else { bin->file = strdup (last_slash + 1); } } else { bin->file = strdup (file); } } r_list_append (bins, bin); break; } default: eprintf ("Unknown sub-bin\n"); break; } } next: R_FREE (depArray); R_FREE (extras); R_FREE (img); } if (r_list_empty (bins)) { r_list_free (bins); bins = NULL; } R_FREE (deps); R_FREE (target_libs); r_list_free (target_lib_names); return bins; } static void rebase_bytes_v1(RDyldRebaseInfo1 *rebase_info, ut8 *buf, ut64 offset, int count, ut64 start_of_write) { int in_buf = 0; while (in_buf < count) { ut64 offset_in_data = offset - rebase_info->start_of_data; ut64 page_index = offset_in_data / rebase_info->page_size; ut64 page_offset = offset_in_data % rebase_info->page_size; ut64 to_next_page = rebase_info->page_size - page_offset; ut64 entry_index = page_offset / 32; ut64 offset_in_entry = (page_offset % 32) / 4; if (entry_index >= rebase_info->entries_size) { in_buf += to_next_page; offset += to_next_page; continue; } if (page_index >= rebase_info->toc_count) { break; } ut8 *entry = &rebase_info->entries[rebase_info->toc[page_index] * rebase_info->entries_size]; ut8 b = entry[entry_index]; if (b & (1 << offset_in_entry)) { ut64 value = r_read_le64 (buf + in_buf); value += rebase_info->slide; r_write_le64 (buf + in_buf, value); in_buf += 8; offset += 8; } else { in_buf += 4; offset += 4; } } } static void rebase_bytes_v2(RDyldRebaseInfo2 *rebase_info, ut8 *buf, ut64 offset, int count, ut64 start_of_write) { int in_buf = 0; while (in_buf < count) { ut64 offset_in_data = offset - rebase_info->start_of_data; ut64 page_index = offset_in_data / rebase_info->page_size; ut64 page_offset = offset_in_data % rebase_info->page_size; ut64 to_next_page = rebase_info->page_size - page_offset; if (page_index >= rebase_info->page_starts_count) { goto next_page; } ut16 page_flag = rebase_info->page_starts[page_index]; if (page_flag == DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE) { goto next_page; } if (!(page_flag & DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA)) { ut64 first_rebase_off = rebase_info->page_starts[page_index] * 4; if (first_rebase_off >= page_offset && first_rebase_off < page_offset + count) { ut32 delta = 1; while (delta) { ut64 position = in_buf + first_rebase_off - page_offset; if (position >= count) { break; } ut64 raw_value = r_read_le64 (buf + position); delta = ((raw_value & rebase_info->delta_mask) >> rebase_info->delta_shift); if (position >= start_of_write) { ut64 new_value = raw_value & rebase_info->value_mask; if (new_value != 0) { new_value += rebase_info->value_add; new_value += rebase_info->slide; } r_write_le64 (buf + position, new_value); } first_rebase_off += delta; } } } next_page: in_buf += to_next_page; offset += to_next_page; } } static void rebase_bytes_v3(RDyldRebaseInfo3 *rebase_info, ut8 *buf, ut64 offset, int count, ut64 start_of_write) { int in_buf = 0; while (in_buf < count) { ut64 offset_in_data = offset - rebase_info->start_of_data; ut64 page_index = offset_in_data / rebase_info->page_size; ut64 page_offset = offset_in_data % rebase_info->page_size; ut64 to_next_page = rebase_info->page_size - page_offset; if (page_index >= rebase_info->page_starts_count) { goto next_page; } ut64 delta = rebase_info->page_starts[page_index]; if (delta == DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE) { goto next_page; } ut64 first_rebase_off = delta; if (first_rebase_off >= page_offset && first_rebase_off < page_offset + count) { do { ut64 position = in_buf + first_rebase_off - page_offset; if (position >= count) { break; } ut64 raw_value = r_read_le64 (buf + position); delta = ((raw_value & rebase_info->delta_mask) >> rebase_info->delta_shift) * 8; if (position >= start_of_write) { ut64 new_value = 0; if (R_IS_PTR_AUTHENTICATED (raw_value)) { new_value = (raw_value & 0xFFFFFFFFULL) + rebase_info->auth_value_add; // TODO: don't throw auth info away } else { new_value = ((raw_value << 13) & 0xFF00000000000000ULL) | (raw_value & 0x7ffffffffffULL); new_value &= 0x00FFFFFFFFFFFFFFULL; } if (new_value != 0) { new_value += rebase_info->slide; } r_write_le64 (buf + position, new_value); } first_rebase_off += delta; } while (delta); } next_page: in_buf += to_next_page; offset += to_next_page; } } static RDyldRebaseInfo *rebase_info_by_range(RDyldRebaseInfos *infos, ut64 offset, int count) { int imid; int imin = 0; int imax = infos->length - 1; while (imin < imax) { imid = (imin + imax) / 2; RDyldRebaseInfosEntry *entry = &infos->entries[imid]; if ((entry->end) <= offset) { imin = imid + 1; } else { imax = imid; } } RDyldRebaseInfosEntry *minEntry = &infos->entries[imin]; if ((imax == imin) && (minEntry->start <= offset + count) && (minEntry->end >= offset)) { return minEntry->info; } return NULL; } static void rebase_bytes(RDyldRebaseInfo *rebase_info, ut8 *buf, ut64 offset, int count, ut64 start_of_write) { if (!rebase_info || !buf) { return; } if (rebase_info->version == 3) { rebase_bytes_v3 ((RDyldRebaseInfo3*) rebase_info, buf, offset, count, start_of_write); } else if (rebase_info->version == 2 || rebase_info->version == 4) { rebase_bytes_v2 ((RDyldRebaseInfo2*) rebase_info, buf, offset, count, start_of_write); } else if (rebase_info->version == 1) { rebase_bytes_v1 ((RDyldRebaseInfo1*) rebase_info, buf, offset, count, start_of_write); } } static int dyldcache_io_read(RIO *io, RIODesc *fd, ut8 *buf, int count) { r_return_val_if_fail (io, -1); RCore *core = (RCore*) io->corebind.core; if (!core || !core->bin || !core->bin->binfiles) { return -1; } RDyldCache *cache = NULL; RListIter *iter; RBinFile *bf; r_list_foreach (core->bin->binfiles, iter, bf) { if (bf->fd == fd->fd ) { if (!strncmp ((char*) bf->o->bin_obj, "dyldcac", 7)) { cache = bf->o->bin_obj; } else { cache = ((struct MACH0_(obj_t)*) bf->o->bin_obj)->user; } if (pending_bin_files) { RListIter *to_remove = r_list_contains (pending_bin_files, bf); if (to_remove) { r_list_delete (pending_bin_files, to_remove); if (r_list_empty (pending_bin_files)) { r_list_free (pending_bin_files); pending_bin_files = NULL; } } } break; } } if (!cache) { r_list_foreach (pending_bin_files, iter, bf) { if (bf->fd == fd->fd && bf->o) { if (!strncmp ((char*) bf->o->bin_obj, "dyldcac", 7)) { cache = bf->o->bin_obj; } else { cache = ((struct MACH0_(obj_t)*) bf->o->bin_obj)->user; } break; } } } if (!cache || !cache->original_io_read) { if (fd->plugin->read == &dyldcache_io_read) { return -1; } return fd->plugin->read (io, fd, buf, count); } RDyldRebaseInfo *rebase_info = rebase_info_by_range (cache->rebase_infos, io->off, count); int result = 0; if (rebase_info && count > 0) { ut64 offset_in_data = io->off - rebase_info->start_of_data; ut64 page_offset = offset_in_data % rebase_info->page_size; ut64 internal_offset = io->off & ~(rebase_info->page_size - 1); ut64 internal_end = io->off + count; int rounded_count = internal_end - internal_offset; ut8 *internal_buf = rebase_info->one_page_buf; if (rounded_count > rebase_info->page_size) { internal_buf = malloc (rounded_count); if (!internal_buf) { eprintf ("Cannot allocate memory for 'internal_buf'\n"); return -1; } } ut64 original_off = io->off; io->off = internal_offset; int internal_result = cache->original_io_read (io, fd, internal_buf, rounded_count); io->off = original_off; if (internal_result >= page_offset + count) { rebase_bytes (rebase_info, internal_buf, internal_offset, internal_result, page_offset); result = R_MIN (count, internal_result); memcpy (buf, internal_buf + page_offset, result); } else { eprintf ("ERROR rebasing\n"); result = cache->original_io_read (io, fd, buf, count); } if (internal_buf != rebase_info->one_page_buf) { R_FREE (internal_buf); } } else { result = cache->original_io_read (io, fd, buf, count); } return result; } static void swizzle_io_read(RDyldCache *cache, RIO *io) { if (!io || !io->desc || !io->desc->plugin) { return; } RIOPlugin *plugin = io->desc->plugin; cache->original_io_read = plugin->read; plugin->read = &dyldcache_io_read; } static cache_hdr_t *read_cache_header(RBuffer *cache_buf, ut64 offset) { if (!cache_buf) { return NULL; } cache_hdr_t *hdr = R_NEW0 (cache_hdr_t); if (!hdr) { return NULL; } ut64 size = sizeof (cache_hdr_t); if (r_buf_fread_at (cache_buf, offset, (ut8*) hdr, "16c4i7l16clii4l", 1) != size) { R_FREE (hdr); return NULL; } if (!check_magic (hdr->magic)) { R_FREE (hdr); return NULL; } if (!hdr->imagesCount && !hdr->imagesOffset) { hdr->imagesOffset = r_buf_read_le32_at (cache_buf, 0x1c0 + offset); hdr->imagesCount = r_buf_read_le32_at (cache_buf, 0x1c4 + offset); } return hdr; } static void populate_cache_headers(RDyldCache *cache) { cache->n_hdr = 0; RList *hdrs = r_list_newf (NULL); if (!hdrs) { return; } cache_hdr_t *h; ut64 offsets[MAX_N_HDR]; ut64 offset = 0; do { offsets[cache->n_hdr] = offset; h = read_cache_header (cache->buf, offset); if (!h) { break; } r_list_append (hdrs, h); ut64 size = h->codeSignatureOffset + h->codeSignatureSize; #define SHIFT_MAYBE(x) \ if (x) { \ x += offset; \ } SHIFT_MAYBE (h->mappingOffset); SHIFT_MAYBE (h->imagesOffset); SHIFT_MAYBE (h->codeSignatureOffset); SHIFT_MAYBE (h->slideInfoOffset); SHIFT_MAYBE (h->localSymbolsOffset); SHIFT_MAYBE (h->branchPoolsOffset); SHIFT_MAYBE (h->imagesTextOffset); offset += size; cache->n_hdr++; } while (cache->n_hdr < MAX_N_HDR); if (!cache->n_hdr) { goto beach; } cache->hdr = R_NEWS0 (cache_hdr_t, cache->n_hdr); if (!cache->hdr) { cache->n_hdr = 0; goto beach; } cache->hdr_offset = R_NEWS0 (ut64, cache->n_hdr); if (!cache->hdr_offset) { cache->n_hdr = 0; R_FREE (cache->hdr); goto beach; } memcpy (cache->hdr_offset, offsets, cache->n_hdr * sizeof (ut64)); ut32 i = 0; RListIter *iter; cache_hdr_t *item; r_list_foreach (hdrs, iter, item) { if (i >= cache->n_hdr) { break; } memcpy (&cache->hdr[i++], item, sizeof (cache_hdr_t)); } beach: r_list_free (hdrs); } static void populate_cache_maps(RDyldCache *cache) { r_return_if_fail (cache && cache->buf); ut32 i; ut32 n_maps = 0; for (i = 0; i < cache->n_hdr; i++) { cache_hdr_t *hdr = &cache->hdr[i]; if (!hdr->mappingCount || !hdr->mappingOffset) { continue; } n_maps += hdr->mappingCount; } cache_map_t *maps = NULL; if (n_maps != 0) { cache->maps_index = R_NEWS0 (ut32, cache->n_hdr); if (!cache->maps_index) { return; } maps = R_NEWS0 (cache_map_t, n_maps); } if (!maps) { cache->maps = NULL; cache->n_maps = 0; return; } ut32 next_map = 0; for (i = 0; i < cache->n_hdr; i++) { cache_hdr_t *hdr = &cache->hdr[i]; cache->maps_index[i] = next_map; if (!hdr->mappingCount || !hdr->mappingOffset) { continue; } ut64 size = sizeof (cache_map_t) * hdr->mappingCount; if (r_buf_fread_at (cache->buf, hdr->mappingOffset, (ut8*) &maps[next_map], "3l2i", hdr->mappingCount) != size) { continue; } ut32 j; ut64 hdr_offset = cache->hdr_offset[i]; for (j = 0; j < hdr->mappingCount; j++) { cache_map_t *map = &maps[next_map + j]; map->fileOffset += hdr_offset; } next_map += hdr->mappingCount; } cache->maps = maps; cache->n_maps = next_map; } static cache_accel_t *read_cache_accel(RBuffer *cache_buf, cache_hdr_t *hdr, cache_map_t *maps, int n_maps) { if (!cache_buf || !hdr || !hdr->accelerateInfoSize || !hdr->accelerateInfoAddr) { return NULL; } size_t mc = R_MIN (hdr->mappingCount, n_maps); ut64 offset = va2pa (hdr->accelerateInfoAddr, mc, maps, cache_buf, 0, NULL, NULL); if (!offset) { return NULL; } ut64 size = sizeof (cache_accel_t); cache_accel_t *accel = R_NEW0 (cache_accel_t); if (!accel) { return NULL; } if (r_buf_fread_at (cache_buf, offset, (ut8*) accel, "16il", 1) != size) { R_FREE (accel); return NULL; } accel->imagesExtrasOffset += offset; accel->bottomUpListOffset += offset; accel->dylibTrieOffset += offset; accel->initializersOffset += offset; accel->dofSectionsOffset += offset; accel->reExportListOffset += offset; accel->depListOffset += offset; accel->rangeTableOffset += offset; return accel; } static objc_cache_opt_info *get_objc_opt_info(RBinFile *bf, RDyldCache *cache) { objc_cache_opt_info *result = NULL; RListIter *iter; RDyldBinImage *bin; r_list_foreach (cache->bins, iter, bin) { if (strcmp (bin->file, "lib/libobjc.A.dylib")) { continue; } struct MACH0_(opts_t) opts = {0}; opts.verbose = bf->rbin->verbose; opts.header_at = bin->header_at; opts.symbols_off = 0; struct MACH0_(obj_t) *mach0 = MACH0_(new_buf) (cache->buf, &opts); if (!mach0) { goto beach; } struct section_t *sections = NULL; if (!(sections = MACH0_(get_sections) (mach0))) { MACH0_(mach0_free) (mach0); goto beach; } int i; ut64 scoffs_offset = 0; ut64 scoffs_size = 0; ut64 selrefs_offset = 0; ut64 selrefs_size = 0; ut8 remaining = 2; ut64 slide = rebase_infos_get_slide (cache); for (i = 0; !sections[i].last; i++) { if (sections[i].size == 0) { continue; } if (strstr (sections[i].name, "__objc_scoffs")) { scoffs_offset = va2pa (sections[i].addr, cache->n_maps, cache->maps, cache->buf, slide, NULL, NULL); scoffs_size = sections[i].size; remaining--; if (remaining == 0) { break; } } if (strstr (sections[i].name, "__DATA.__objc_selrefs")) { selrefs_offset = va2pa (sections[i].addr, cache->n_maps, cache->maps, cache->buf, slide, NULL, NULL); selrefs_size = sections[i].size; remaining--; if (remaining == 0) { break; } } } MACH0_(mach0_free) (mach0); R_FREE (sections); ut64 sel_string_base = 0; if (!scoffs_offset || scoffs_size < 40) { if (!selrefs_offset || !selrefs_size || cache->n_hdr == 1) { break; } ut64 cursor = selrefs_offset; ut64 end = cursor + selrefs_size; while (cursor < end) { ut64 sel_ptr = r_buf_read_le64_at (cache->buf, cursor); if (sel_ptr == UT64_MAX) { break; } ut64 sel_offset = va2pa (sel_ptr, cache->n_maps, cache->maps, cache->buf, slide, NULL, NULL); char * selector = r_buf_get_string (cache->buf, sel_offset); if (!selector) { break; } bool is_magic_selector = !strncmp (selector, "\xf0\x9f\xa4\xaf", 4); free (selector); if (is_magic_selector) { sel_string_base = sel_ptr; break; } cursor += 8; } if (sel_string_base == 0) { break; } } else { ut64 check = r_buf_read_le64_at (cache->buf, scoffs_offset); if (check != 2) { break; } sel_string_base = r_buf_read_le64_at (cache->buf, scoffs_offset + 8); if (sel_string_base == UT64_MAX) { break; } ut64 sel_string_end = r_buf_read_le64_at (cache->buf, scoffs_offset + 16); if (sel_string_end == sel_string_base || sel_string_end == UT64_MAX) { break; } } result = R_NEW0 (objc_cache_opt_info); if (!result) { break; } result->sel_string_base = sel_string_base; } beach: return result; } static bool load_buffer(RBinFile *bf, void **bin_obj, RBuffer *buf, ut64 loadaddr, Sdb *sdb) { RDyldCache *cache = R_NEW0 (RDyldCache); memcpy (cache->magic, "dyldcac", 7); cache->buf = r_buf_ref (buf); populate_cache_headers (cache); if (!cache->hdr) { r_dyldcache_free (cache); return false; } populate_cache_maps (cache); if (!cache->maps) { r_dyldcache_free (cache); return false; } cache->accel = read_cache_accel (cache->buf, cache->hdr, cache->maps, cache->n_maps); cache->bins = create_cache_bins (bf, cache); if (!cache->bins) { r_dyldcache_free (cache); return false; } cache->locsym = r_dyld_locsym_new (cache); cache->rebase_infos = get_rebase_infos (bf, cache); if (cache->rebase_infos) { if (!rebase_infos_get_slide (cache)) { if (!pending_bin_files) { pending_bin_files = r_list_new (); if (!pending_bin_files) { r_dyldcache_free (cache); return false; } } r_list_push (pending_bin_files, bf); swizzle_io_read (cache, bf->rbin->iob.io); } } *bin_obj = cache; return true; } static RList *entries(RBinFile *bf) { RBinAddr *ptr = NULL; RList *ret = r_list_newf (free); if (!ret) { return NULL; } if ((ptr = R_NEW0 (RBinAddr))) { r_list_append (ret, ptr); } return ret; } static RBinInfo *info(RBinFile *bf) { RBinInfo *ret = NULL; if (!bf || !bf->o) { return NULL; } RDyldCache *cache = (RDyldCache*) bf->o->bin_obj; if (!cache) { return NULL; } bool big_endian = 0; if (!(ret = R_NEW0 (RBinInfo))) { return NULL; } ret->file = strdup (bf->file); ret->bclass = strdup ("dyldcache"); ret->rclass = strdup ("ios"); ret->os = strdup ("iOS"); if (strstr (cache->hdr->magic, "x86_64")) { ret->arch = strdup ("x86"); ret->bits = 64; } else { ret->arch = strdup ("arm"); ret->bits = strstr (cache->hdr->magic, "arm64")? 64: 32; } ret->machine = strdup (ret->arch); ret->subsystem = strdup ("xnu"); ret->type = strdup ("library-cache"); ret->has_va = true; ret->big_endian = big_endian; ret->dbg_info = 0; return ret; } #if 0 static void parse_mach0(RList *ret, ut64 paddr, RBinFile *bf) { // TODO } #endif static ut64 baddr(RBinFile *bf) { // XXX hardcoded return 0x180000000; } void symbols_from_bin(RDyldCache *cache, RList *ret, RBinFile *bf, RDyldBinImage *bin, SetU *hash) { struct MACH0_(obj_t) *mach0 = bin_to_mach0 (bf, bin); if (!mach0) { return; } // const RList*symbols = MACH0_(get_symbols_list) (mach0); const struct symbol_t *symbols = MACH0_(get_symbols) (mach0); if (!symbols) { return; } int i; for (i = 0; !symbols[i].last; i++) { if (!symbols[i].name || !symbols[i].name[0] || symbols[i].addr < 100) { continue; } if (strstr (symbols[i].name, "<redacted>")) { continue; } RBinSymbol *sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = strdup (symbols[i].name); sym->vaddr = symbols[i].addr; sym->forwarder = "NONE"; sym->bind = (symbols[i].type == R_BIN_MACH0_SYMBOL_TYPE_LOCAL)? R_BIN_BIND_LOCAL_STR: R_BIN_BIND_GLOBAL_STR; sym->type = R_BIN_TYPE_FUNC_STR; sym->paddr = symbols[i].offset + bf->o->boffset; sym->size = symbols[i].size; sym->ordinal = i; set_u_add (hash, sym->vaddr); r_list_append (ret, sym); } MACH0_(mach0_free) (mach0); } static bool __is_data_section(const char *name) { if (strstr (name, "_cstring")) { return true; } if (strstr (name, "_os_log")) { return true; } if (strstr (name, "_objc_methname")) { return true; } if (strstr (name, "_objc_classname")) { return true; } if (strstr (name, "_objc_methtype")) { return true; } return false; } static void sections_from_bin(RList *ret, RBinFile *bf, RDyldBinImage *bin) { RDyldCache *cache = (RDyldCache*) bf->o->bin_obj; if (!cache) { return; } struct MACH0_(obj_t) *mach0 = bin_to_mach0 (bf, bin); if (!mach0) { return; } struct section_t *sections = NULL; if (!(sections = MACH0_(get_sections) (mach0))) { return; } ut64 slide = rebase_infos_get_slide (cache); int i; for (i = 0; !sections[i].last; i++) { RBinSection *ptr = R_NEW0 (RBinSection); if (!ptr) { break; } if (bin->file) { ptr->name = r_str_newf ("%s.%s", bin->file, (char*)sections[i].name); } else { ptr->name = r_str_newf ("%s", (char*)sections[i].name); } if (strstr (ptr->name, "la_symbol_ptr")) { int len = sections[i].size / 8; ptr->format = r_str_newf ("Cd %d[%d]", 8, len); } ptr->is_data = __is_data_section (ptr->name); ptr->size = sections[i].size; ptr->vsize = sections[i].vsize; ptr->vaddr = sections[i].addr; ptr->paddr = va2pa (sections[i].addr, cache->n_maps, cache->maps, cache->buf, slide, NULL, NULL); if (!ptr->vaddr) { ptr->vaddr = ptr->paddr; } ptr->perm = sections[i].perm; r_list_append (ret, ptr); } free (sections); MACH0_(mach0_free) (mach0); } static RList *sections(RBinFile *bf) { RDyldCache *cache = (RDyldCache*) bf->o->bin_obj; if (!cache) { return NULL; } RList *ret = r_list_newf (free); if (!ret) { return NULL; } RListIter *iter; RDyldBinImage *bin; r_list_foreach (cache->bins, iter, bin) { sections_from_bin (ret, bf, bin); } RBinSection *ptr = NULL; int i; for (i = 0; i < cache->n_maps; i++) { if (!(ptr = R_NEW0 (RBinSection))) { r_list_free (ret); return NULL; } ptr->name = r_str_newf ("cache_map.%d", i); ptr->size = cache->maps[i].size; ptr->vsize = ptr->size; ptr->paddr = cache->maps[i].fileOffset; ptr->vaddr = cache->maps[i].address; ptr->add = true; ptr->is_segment = true; ptr->perm = prot2perm (cache->maps[i].initProt); r_list_append (ret, ptr); } ut64 slide = rebase_infos_get_slide (cache); if (slide) { RBinSection *section; r_list_foreach (ret, iter, section) { section->vaddr += slide; } } return ret; } static RList *symbols(RBinFile *bf) { RDyldCache *cache = (RDyldCache*) bf->o->bin_obj; if (!cache) { return NULL; } RList *ret = r_list_newf (free); if (!ret) { return NULL; } RListIter *iter; RDyldBinImage *bin; r_list_foreach (cache->bins, iter, bin) { SetU *hash = set_u_new (); if (!hash) { r_list_free (ret); return NULL; } symbols_from_bin (cache, ret, bf, bin, hash); symbols_from_locsym (cache, bin, ret, hash); set_u_free (hash); } ut64 slide = rebase_infos_get_slide (cache); if (slide) { RBinSymbol *sym; r_list_foreach (ret, iter, sym) { sym->vaddr += slide; } } return ret; } /* static void unswizzle_io_read(RDyldCache *cache, RIO *io) { if (!io || !io->desc || !io->desc->plugin || !cache->original_io_read) { return; } RIOPlugin *plugin = io->desc->plugin; plugin->read = cache->original_io_read; cache->original_io_read = NULL; } */ static void destroy(RBinFile *bf) { RDyldCache *cache = (RDyldCache*) bf->o->bin_obj; // unswizzle_io_read (cache, bf->rbin->iob.io); // XXX io may be dead here r_dyldcache_free (cache); } static RList *classes(RBinFile *bf) { RDyldCache *cache = (RDyldCache*) bf->o->bin_obj; if (!cache) { return NULL; } RList *ret = r_list_newf (free); if (!ret) { return NULL; } if (!cache->objc_opt_info_loaded) { cache->oi = get_objc_opt_info (bf, cache); cache->objc_opt_info_loaded = true; } RListIter *iter; RDyldBinImage *bin; ut64 slide = rebase_infos_get_slide (cache); RBuffer *orig_buf = bf->buf; ut32 num_of_unnamed_class = 0; r_list_foreach (cache->bins, iter, bin) { struct MACH0_(obj_t) *mach0 = bin_to_mach0 (bf, bin); if (!mach0) { goto beach; } struct section_t *sections = NULL; if (!(sections = MACH0_(get_sections) (mach0))) { MACH0_(mach0_free) (mach0); goto beach; } int i; for (i = 0; !sections[i].last; i++) { if (sections[i].size == 0) { continue; } bool is_classlist = strstr (sections[i].name, "__objc_classlist"); bool is_catlist = strstr (sections[i].name, "__objc_catlist"); if (!is_classlist && !is_catlist) { continue; } ut8 *pointers = malloc (sections[i].size); if (!pointers) { continue; } ut64 offset = va2pa (sections[i].addr, cache->n_maps, cache->maps, cache->buf, slide, NULL, NULL); if (r_buf_read_at (cache->buf, offset, pointers, sections[i].size) < sections[i].size) { R_FREE (pointers); continue; } ut8 *cursor = pointers; ut8 *pointers_end = pointers + sections[i].size; for (; cursor < pointers_end; cursor += 8) { ut64 pointer_to_class = r_read_le64 (cursor); RBinClass *klass; if (!(klass = R_NEW0 (RBinClass)) || !(klass->methods = r_list_new ()) || !(klass->fields = r_list_new ())) { R_FREE (klass); R_FREE (pointers); R_FREE (sections); MACH0_(mach0_free) (mach0); goto beach; } bf->o->bin_obj = mach0; bf->buf = cache->buf; if (is_classlist) { MACH0_(get_class_t) (pointer_to_class, bf, klass, false, NULL, cache->oi); } else { MACH0_(get_category_t) (pointer_to_class, bf, klass, NULL, cache->oi); } bf->o->bin_obj = cache; bf->buf = orig_buf; if (!klass->name) { eprintf ("KLASS ERROR AT 0x%"PFMT64x", is_classlist %d\n", pointer_to_class, is_classlist); klass->name = r_str_newf ("UnnamedClass%u", num_of_unnamed_class); if (!klass->name) { R_FREE (klass); R_FREE (pointers); R_FREE (sections); MACH0_(mach0_free) (mach0); goto beach; } num_of_unnamed_class++; } r_list_append (ret, klass); } R_FREE (pointers); } R_FREE (sections); MACH0_(mach0_free) (mach0); } return ret; beach: r_list_free (ret); return NULL; } static void header(RBinFile *bf) { if (!bf || !bf->o) { return; } RDyldCache *cache = (RDyldCache*) bf->o->bin_obj; if (!cache) { return; } RBin *bin = bf->rbin; ut64 slide = rebase_infos_get_slide (cache); PrintfCallback p = bin->cb_printf; PJ *pj = pj_new (); if (!pj) { return; } pj_o (pj); pj_k (pj, "header"); pj_o (pj); pj_ks (pj, "magic", cache->hdr->magic); pj_kn (pj, "mappingOffset", cache->hdr->mappingOffset); pj_kn (pj, "mappingCount", cache->hdr->mappingCount); pj_kn (pj, "imagesOffset", cache->hdr->imagesOffset); pj_kn (pj, "imagesCount", cache->hdr->imagesCount); pj_kn (pj, "dyldBaseAddress", cache->hdr->dyldBaseAddress); pj_kn (pj, "codeSignatureOffset", cache->hdr->codeSignatureOffset); pj_kn (pj, "codeSignatureSize", cache->hdr->codeSignatureSize); pj_kn (pj, "slideInfoOffset", cache->hdr->slideInfoOffset); pj_kn (pj, "slideInfoSize", cache->hdr->slideInfoSize); pj_kn (pj, "localSymbolsOffset", cache->hdr->localSymbolsOffset); pj_kn (pj, "localSymbolsSize", cache->hdr->localSymbolsSize); char uuidstr[128]; r_hex_bin2str ((ut8*)cache->hdr->uuid, 16, uuidstr); pj_ks (pj, "uuid", uuidstr); pj_ks (pj, "cacheType", (cache->hdr->cacheType == 0) ? "development" : "production"); pj_kn (pj, "branchPoolsOffset", cache->hdr->branchPoolsOffset); pj_kn (pj, "branchPoolsCount", cache->hdr->branchPoolsCount); pj_kn (pj, "accelerateInfoAddr", cache->hdr->accelerateInfoAddr + slide); pj_kn (pj, "accelerateInfoSize", cache->hdr->accelerateInfoSize); pj_kn (pj, "imagesTextOffset", cache->hdr->imagesTextOffset); pj_kn (pj, "imagesTextCount", cache->hdr->imagesTextCount); pj_end (pj); if (cache->accel) { pj_k (pj, "accelerator"); pj_o (pj); pj_kn (pj, "version", cache->accel->version); pj_kn (pj, "imageExtrasCount", cache->accel->imageExtrasCount); pj_kn (pj, "imagesExtrasOffset", cache->accel->imagesExtrasOffset); pj_kn (pj, "bottomUpListOffset", cache->accel->bottomUpListOffset); pj_kn (pj, "dylibTrieOffset", cache->accel->dylibTrieOffset); pj_kn (pj, "dylibTrieSize", cache->accel->dylibTrieSize); pj_kn (pj, "initializersOffset", cache->accel->initializersOffset); pj_kn (pj, "initializersCount", cache->accel->initializersCount); pj_kn (pj, "dofSectionsOffset", cache->accel->dofSectionsOffset); pj_kn (pj, "dofSectionsCount", cache->accel->dofSectionsCount); pj_kn (pj, "reExportListOffset", cache->accel->reExportListOffset); pj_kn (pj, "reExportCount", cache->accel->reExportCount); pj_kn (pj, "depListOffset", cache->accel->depListOffset); pj_kn (pj, "depListCount", cache->accel->depListCount); pj_kn (pj, "rangeTableOffset", cache->accel->rangeTableOffset); pj_kn (pj, "rangeTableCount", cache->accel->rangeTableCount); pj_kn (pj, "dyldSectionAddr", cache->accel->dyldSectionAddr + slide); pj_end (pj); } if (cache->rebase_infos) { size_t i; pj_k (pj, "slideInfo"); pj_a (pj); for (i = 0; i < cache->rebase_infos->length; i++) { RDyldRebaseInfo * rebase_info = cache->rebase_infos->entries[i].info; pj_o (pj); pj_kn (pj, "start", cache->rebase_infos->entries[i].start); pj_kn (pj, "end", cache->rebase_infos->entries[i].end); if (rebase_info) { ut8 version = rebase_info->version; pj_kn (pj, "version", version); pj_kn (pj, "slide", slide); if (version == 3) { RDyldRebaseInfo3 *info3 = (RDyldRebaseInfo3*) rebase_info; pj_kn (pj, "page_starts_count", info3->page_starts_count); pj_kn (pj, "page_size", info3->page_size); pj_kn (pj, "auth_value_add", info3->auth_value_add); } else if (version == 2 || version == 4) { RDyldRebaseInfo2 *info2 = (RDyldRebaseInfo2*) rebase_info; pj_kn (pj, "page_starts_count", info2->page_starts_count); pj_kn (pj, "page_extras_count", info2->page_extras_count); pj_kn (pj, "delta_mask", info2->delta_mask); pj_kn (pj, "value_mask", info2->value_mask); pj_kn (pj, "value_add", info2->value_add); pj_kn (pj, "delta_shift", info2->delta_shift); pj_kn (pj, "page_size", info2->page_size); } else if (version == 1) { RDyldRebaseInfo1 *info1 = (RDyldRebaseInfo1*) rebase_info; pj_kn (pj, "toc_count", info1->toc_count); pj_kn (pj, "entries_size", info1->entries_size); pj_kn (pj, "page_size", 4096); } } pj_end (pj); } pj_end (pj); } if (cache->hdr->imagesTextCount) { pj_k (pj, "images"); pj_a (pj); ut64 total_size = cache->hdr->imagesTextCount * sizeof (cache_text_info_t); cache_text_info_t * text_infos = malloc (total_size); if (!text_infos) { goto beach; } if (r_buf_fread_at (cache->buf, cache->hdr->imagesTextOffset, (ut8*)text_infos, "16clii", cache->hdr->imagesTextCount) != total_size) { free (text_infos); goto beach; } size_t i; for (i = 0; i != cache->hdr->imagesTextCount; i++) { cache_text_info_t * text_info = &text_infos[i]; r_hex_bin2str ((ut8*)text_info->uuid, 16, uuidstr); pj_o (pj); pj_ks (pj, "uuid", uuidstr); pj_kn (pj, "address", text_info->loadAddress + slide); pj_kn (pj, "textSegmentSize", text_info->textSegmentSize); char file[256]; if (r_buf_read_at (cache->buf, text_info->pathOffset, (ut8*) &file, sizeof (file)) == sizeof (file)) { file[255] = 0; pj_ks (pj, "path", file); char *last_slash = strrchr (file, '/'); if (last_slash && *last_slash) { pj_ks (pj, "name", last_slash + 1); } else { pj_ks (pj, "name", file); } } pj_end (pj); } pj_end (pj); free (text_infos); } pj_end (pj); p ("%s", pj_string (pj)); beach: pj_free (pj); } RBinPlugin r_bin_plugin_dyldcache = { .name = "dyldcache", .desc = "dyldcache bin plugin", .license = "LGPL3", .load_buffer = &load_buffer, .entries = &entries, .baddr = &baddr, .symbols = &symbols, .sections = &sections, .check_buffer = &check_buffer, .destroy = &destroy, .classes = &classes, .header = &header, .info = &info, }; #ifndef R2_PLUGIN_INCORE R_API RLibStruct radare_plugin = { .type = R_LIB_TYPE_BIN, .data = &r_bin_plugin_dyldcache, .version = R2_VERSION }; #endif
static cache_accel_t *read_cache_accel(RBuffer *cache_buf, cache_hdr_t *hdr, cache_map_t *maps) { if (!cache_buf || !hdr || !hdr->accelerateInfoSize || !hdr->accelerateInfoAddr) { return NULL; } ut64 offset = va2pa (hdr->accelerateInfoAddr, hdr->mappingCount, maps, cache_buf, 0, NULL, NULL); if (!offset) { return NULL; } ut64 size = sizeof (cache_accel_t); cache_accel_t *accel = R_NEW0 (cache_accel_t); if (!accel) { return NULL; } if (r_buf_fread_at (cache_buf, offset, (ut8*) accel, "16il", 1) != size) { R_FREE (accel); return NULL; } accel->imagesExtrasOffset += offset; accel->bottomUpListOffset += offset; accel->dylibTrieOffset += offset; accel->initializersOffset += offset; accel->dofSectionsOffset += offset; accel->reExportListOffset += offset; accel->depListOffset += offset; accel->rangeTableOffset += offset; return accel; }
static cache_accel_t *read_cache_accel(RBuffer *cache_buf, cache_hdr_t *hdr, cache_map_t *maps, int n_maps) { if (!cache_buf || !hdr || !hdr->accelerateInfoSize || !hdr->accelerateInfoAddr) { return NULL; } size_t mc = R_MIN (hdr->mappingCount, n_maps); ut64 offset = va2pa (hdr->accelerateInfoAddr, mc, maps, cache_buf, 0, NULL, NULL); if (!offset) { return NULL; } ut64 size = sizeof (cache_accel_t); cache_accel_t *accel = R_NEW0 (cache_accel_t); if (!accel) { return NULL; } if (r_buf_fread_at (cache_buf, offset, (ut8*) accel, "16il", 1) != size) { R_FREE (accel); return NULL; } accel->imagesExtrasOffset += offset; accel->bottomUpListOffset += offset; accel->dylibTrieOffset += offset; accel->initializersOffset += offset; accel->dofSectionsOffset += offset; accel->reExportListOffset += offset; accel->depListOffset += offset; accel->rangeTableOffset += offset; return accel; }
{'added': [(1147, '\tut16 *depArray = NULL;'), (1148, '\tcache_imgxtr_t *extras = NULL;'), (1737, 'static cache_accel_t *read_cache_accel(RBuffer *cache_buf, cache_hdr_t *hdr, cache_map_t *maps, int n_maps) {'), (1741, '\tsize_t mc = R_MIN (hdr->mappingCount, n_maps);'), (1742, '\tut64 offset = va2pa (hdr->accelerateInfoAddr, mc, maps, cache_buf, 0, NULL, NULL);'), (1898, '\tcache->accel = read_cache_accel (cache->buf, cache->hdr, cache->maps, cache->n_maps);')], 'deleted': [(1180, '\t\tut16 *depArray = NULL;'), (1181, '\t\tcache_imgxtr_t *extras = NULL;'), (1737, 'static cache_accel_t *read_cache_accel(RBuffer *cache_buf, cache_hdr_t *hdr, cache_map_t *maps) {'), (1741, ''), (1742, '\tut64 offset = va2pa (hdr->accelerateInfoAddr, hdr->mappingCount, maps, cache_buf, 0, NULL, NULL);'), (1898, '\tcache->accel = read_cache_accel (cache->buf, cache->hdr, cache->maps);')]}
6
6
2,145
14,485
27
182
8
https://github.com/radareorg/radare2
CVE-2022-0676
CWE-787
2,611
ast.c
C
decode_bytes_with_escapes
/* * This file includes functions to transform a concrete syntax tree (CST) to * an abstract syntax tree (AST). The main function is Ta3AST_FromNode(). * */ #include "Python.h" #include "Python-ast.h" #include "node.h" #include "ast.h" #include "token.h" #include <assert.h> #if PY_MINOR_VERSION < 4 #define PyErr_ProgramTextObject PyErr_ProgramText #define PyMem_RawMalloc PyMem_Malloc #define PyMem_RawRealloc PyMem_Realloc #define PyMem_RawFree PyMem_Free #endif static int validate_stmts(asdl_seq *); static int validate_exprs(asdl_seq *, expr_context_ty, int); static int validate_nonempty_seq(asdl_seq *, const char *, const char *); static int validate_stmt(stmt_ty); static int validate_expr(expr_ty, expr_context_ty); mod_ty string_object_to_c_ast(const char *s, PyObject *filename, int start, PyCompilerFlags *flags, int feature_version, PyArena *arena); static int validate_comprehension(asdl_seq *gens) { int i; if (!asdl_seq_LEN(gens)) { PyErr_SetString(PyExc_ValueError, "comprehension with no generators"); return 0; } for (i = 0; i < asdl_seq_LEN(gens); i++) { comprehension_ty comp = asdl_seq_GET(gens, i); if (!validate_expr(comp->target, Store) || !validate_expr(comp->iter, Load) || !validate_exprs(comp->ifs, Load, 0)) return 0; } return 1; } static int validate_slice(slice_ty slice) { switch (slice->kind) { case Slice_kind: return (!slice->v.Slice.lower || validate_expr(slice->v.Slice.lower, Load)) && (!slice->v.Slice.upper || validate_expr(slice->v.Slice.upper, Load)) && (!slice->v.Slice.step || validate_expr(slice->v.Slice.step, Load)); case ExtSlice_kind: { int i; if (!validate_nonempty_seq(slice->v.ExtSlice.dims, "dims", "ExtSlice")) return 0; for (i = 0; i < asdl_seq_LEN(slice->v.ExtSlice.dims); i++) if (!validate_slice(asdl_seq_GET(slice->v.ExtSlice.dims, i))) return 0; return 1; } case Index_kind: return validate_expr(slice->v.Index.value, Load); default: PyErr_SetString(PyExc_SystemError, "unknown slice node"); return 0; } } static int validate_keywords(asdl_seq *keywords) { int i; for (i = 0; i < asdl_seq_LEN(keywords); i++) if (!validate_expr(((keyword_ty)asdl_seq_GET(keywords, i))->value, Load)) return 0; return 1; } static int validate_args(asdl_seq *args) { int i; for (i = 0; i < asdl_seq_LEN(args); i++) { arg_ty arg = asdl_seq_GET(args, i); if (arg->annotation && !validate_expr(arg->annotation, Load)) return 0; } return 1; } static const char * expr_context_name(expr_context_ty ctx) { switch (ctx) { case Load: return "Load"; case Store: return "Store"; case Del: return "Del"; case AugLoad: return "AugLoad"; case AugStore: return "AugStore"; case Param: return "Param"; default: assert(0); return "(unknown)"; } } static int validate_arguments(arguments_ty args) { if (!validate_args(args->args)) return 0; if (args->vararg && args->vararg->annotation && !validate_expr(args->vararg->annotation, Load)) { return 0; } if (!validate_args(args->kwonlyargs)) return 0; if (args->kwarg && args->kwarg->annotation && !validate_expr(args->kwarg->annotation, Load)) { return 0; } if (asdl_seq_LEN(args->defaults) > asdl_seq_LEN(args->args)) { PyErr_SetString(PyExc_ValueError, "more positional defaults than args on arguments"); return 0; } if (asdl_seq_LEN(args->kw_defaults) != asdl_seq_LEN(args->kwonlyargs)) { PyErr_SetString(PyExc_ValueError, "length of kwonlyargs is not the same as " "kw_defaults on arguments"); return 0; } return validate_exprs(args->defaults, Load, 0) && validate_exprs(args->kw_defaults, Load, 1); } static int validate_constant(PyObject *value) { if (value == Py_None || value == Py_Ellipsis) return 1; if (PyLong_CheckExact(value) || PyFloat_CheckExact(value) || PyComplex_CheckExact(value) || PyBool_Check(value) || PyUnicode_CheckExact(value) || PyBytes_CheckExact(value)) return 1; if (PyTuple_CheckExact(value) || PyFrozenSet_CheckExact(value)) { PyObject *it; it = PyObject_GetIter(value); if (it == NULL) return 0; while (1) { PyObject *item = PyIter_Next(it); if (item == NULL) { if (PyErr_Occurred()) { Py_DECREF(it); return 0; } break; } if (!validate_constant(item)) { Py_DECREF(it); Py_DECREF(item); return 0; } Py_DECREF(item); } Py_DECREF(it); return 1; } return 0; } static int validate_expr(expr_ty exp, expr_context_ty ctx) { int check_ctx = 1; expr_context_ty actual_ctx; /* First check expression context. */ switch (exp->kind) { case Attribute_kind: actual_ctx = exp->v.Attribute.ctx; break; case Subscript_kind: actual_ctx = exp->v.Subscript.ctx; break; case Starred_kind: actual_ctx = exp->v.Starred.ctx; break; case Name_kind: actual_ctx = exp->v.Name.ctx; break; case List_kind: actual_ctx = exp->v.List.ctx; break; case Tuple_kind: actual_ctx = exp->v.Tuple.ctx; break; default: if (ctx != Load) { PyErr_Format(PyExc_ValueError, "expression which can't be " "assigned to in %s context", expr_context_name(ctx)); return 0; } check_ctx = 0; /* set actual_ctx to prevent gcc warning */ actual_ctx = 0; } if (check_ctx && actual_ctx != ctx) { PyErr_Format(PyExc_ValueError, "expression must have %s context but has %s instead", expr_context_name(ctx), expr_context_name(actual_ctx)); return 0; } /* Now validate expression. */ switch (exp->kind) { case BoolOp_kind: if (asdl_seq_LEN(exp->v.BoolOp.values) < 2) { PyErr_SetString(PyExc_ValueError, "BoolOp with less than 2 values"); return 0; } return validate_exprs(exp->v.BoolOp.values, Load, 0); case BinOp_kind: return validate_expr(exp->v.BinOp.left, Load) && validate_expr(exp->v.BinOp.right, Load); case UnaryOp_kind: return validate_expr(exp->v.UnaryOp.operand, Load); case Lambda_kind: return validate_arguments(exp->v.Lambda.args) && validate_expr(exp->v.Lambda.body, Load); case IfExp_kind: return validate_expr(exp->v.IfExp.test, Load) && validate_expr(exp->v.IfExp.body, Load) && validate_expr(exp->v.IfExp.orelse, Load); case Dict_kind: if (asdl_seq_LEN(exp->v.Dict.keys) != asdl_seq_LEN(exp->v.Dict.values)) { PyErr_SetString(PyExc_ValueError, "Dict doesn't have the same number of keys as values"); return 0; } /* null_ok=1 for keys expressions to allow dict unpacking to work in dict literals, i.e. ``{**{a:b}}`` */ return validate_exprs(exp->v.Dict.keys, Load, /*null_ok=*/ 1) && validate_exprs(exp->v.Dict.values, Load, /*null_ok=*/ 0); case Set_kind: return validate_exprs(exp->v.Set.elts, Load, 0); #define COMP(NAME) \ case NAME ## _kind: \ return validate_comprehension(exp->v.NAME.generators) && \ validate_expr(exp->v.NAME.elt, Load); COMP(ListComp) COMP(SetComp) COMP(GeneratorExp) #undef COMP case DictComp_kind: return validate_comprehension(exp->v.DictComp.generators) && validate_expr(exp->v.DictComp.key, Load) && validate_expr(exp->v.DictComp.value, Load); case Yield_kind: return !exp->v.Yield.value || validate_expr(exp->v.Yield.value, Load); case YieldFrom_kind: return validate_expr(exp->v.YieldFrom.value, Load); case Await_kind: return validate_expr(exp->v.Await.value, Load); case Compare_kind: if (!asdl_seq_LEN(exp->v.Compare.comparators)) { PyErr_SetString(PyExc_ValueError, "Compare with no comparators"); return 0; } if (asdl_seq_LEN(exp->v.Compare.comparators) != asdl_seq_LEN(exp->v.Compare.ops)) { PyErr_SetString(PyExc_ValueError, "Compare has a different number " "of comparators and operands"); return 0; } return validate_exprs(exp->v.Compare.comparators, Load, 0) && validate_expr(exp->v.Compare.left, Load); case Call_kind: return validate_expr(exp->v.Call.func, Load) && validate_exprs(exp->v.Call.args, Load, 0) && validate_keywords(exp->v.Call.keywords); case Constant_kind: if (!validate_constant(exp->v.Constant.value)) { PyErr_Format(PyExc_TypeError, "got an invalid type in Constant: %s", Py_TYPE(exp->v.Constant.value)->tp_name); return 0; } return 1; case Num_kind: { PyObject *n = exp->v.Num.n; if (!PyLong_CheckExact(n) && !PyFloat_CheckExact(n) && !PyComplex_CheckExact(n)) { PyErr_SetString(PyExc_TypeError, "non-numeric type in Num"); return 0; } return 1; } case Str_kind: { PyObject *s = exp->v.Str.s; if (!PyUnicode_CheckExact(s)) { PyErr_SetString(PyExc_TypeError, "non-string type in Str"); return 0; } return 1; } case JoinedStr_kind: return validate_exprs(exp->v.JoinedStr.values, Load, 0); case FormattedValue_kind: if (validate_expr(exp->v.FormattedValue.value, Load) == 0) return 0; if (exp->v.FormattedValue.format_spec) return validate_expr(exp->v.FormattedValue.format_spec, Load); return 1; case Bytes_kind: { PyObject *b = exp->v.Bytes.s; if (!PyBytes_CheckExact(b)) { PyErr_SetString(PyExc_TypeError, "non-bytes type in Bytes"); return 0; } return 1; } case Attribute_kind: return validate_expr(exp->v.Attribute.value, Load); case Subscript_kind: return validate_slice(exp->v.Subscript.slice) && validate_expr(exp->v.Subscript.value, Load); case Starred_kind: return validate_expr(exp->v.Starred.value, ctx); case List_kind: return validate_exprs(exp->v.List.elts, ctx, 0); case Tuple_kind: return validate_exprs(exp->v.Tuple.elts, ctx, 0); /* These last cases don't have any checking. */ case Name_kind: case NameConstant_kind: case Ellipsis_kind: return 1; default: PyErr_SetString(PyExc_SystemError, "unexpected expression"); return 0; } } static int validate_nonempty_seq(asdl_seq *seq, const char *what, const char *owner) { if (asdl_seq_LEN(seq)) return 1; PyErr_Format(PyExc_ValueError, "empty %s on %s", what, owner); return 0; } static int validate_assignlist(asdl_seq *targets, expr_context_ty ctx) { return validate_nonempty_seq(targets, "targets", ctx == Del ? "Delete" : "Assign") && validate_exprs(targets, ctx, 0); } static int validate_body(asdl_seq *body, const char *owner) { return validate_nonempty_seq(body, "body", owner) && validate_stmts(body); } static int validate_stmt(stmt_ty stmt) { int i; switch (stmt->kind) { case FunctionDef_kind: return validate_body(stmt->v.FunctionDef.body, "FunctionDef") && validate_arguments(stmt->v.FunctionDef.args) && validate_exprs(stmt->v.FunctionDef.decorator_list, Load, 0) && (!stmt->v.FunctionDef.returns || validate_expr(stmt->v.FunctionDef.returns, Load)); case ClassDef_kind: return validate_body(stmt->v.ClassDef.body, "ClassDef") && validate_exprs(stmt->v.ClassDef.bases, Load, 0) && validate_keywords(stmt->v.ClassDef.keywords) && validate_exprs(stmt->v.ClassDef.decorator_list, Load, 0); case Return_kind: return !stmt->v.Return.value || validate_expr(stmt->v.Return.value, Load); case Delete_kind: return validate_assignlist(stmt->v.Delete.targets, Del); case Assign_kind: return validate_assignlist(stmt->v.Assign.targets, Store) && validate_expr(stmt->v.Assign.value, Load); case AugAssign_kind: return validate_expr(stmt->v.AugAssign.target, Store) && validate_expr(stmt->v.AugAssign.value, Load); case AnnAssign_kind: if (stmt->v.AnnAssign.target->kind != Name_kind && stmt->v.AnnAssign.simple) { PyErr_SetString(PyExc_TypeError, "AnnAssign with simple non-Name target"); return 0; } return validate_expr(stmt->v.AnnAssign.target, Store) && (!stmt->v.AnnAssign.value || validate_expr(stmt->v.AnnAssign.value, Load)) && validate_expr(stmt->v.AnnAssign.annotation, Load); case For_kind: return validate_expr(stmt->v.For.target, Store) && validate_expr(stmt->v.For.iter, Load) && validate_body(stmt->v.For.body, "For") && validate_stmts(stmt->v.For.orelse); case AsyncFor_kind: return validate_expr(stmt->v.AsyncFor.target, Store) && validate_expr(stmt->v.AsyncFor.iter, Load) && validate_body(stmt->v.AsyncFor.body, "AsyncFor") && validate_stmts(stmt->v.AsyncFor.orelse); case While_kind: return validate_expr(stmt->v.While.test, Load) && validate_body(stmt->v.While.body, "While") && validate_stmts(stmt->v.While.orelse); case If_kind: return validate_expr(stmt->v.If.test, Load) && validate_body(stmt->v.If.body, "If") && validate_stmts(stmt->v.If.orelse); case With_kind: if (!validate_nonempty_seq(stmt->v.With.items, "items", "With")) return 0; for (i = 0; i < asdl_seq_LEN(stmt->v.With.items); i++) { withitem_ty item = asdl_seq_GET(stmt->v.With.items, i); if (!validate_expr(item->context_expr, Load) || (item->optional_vars && !validate_expr(item->optional_vars, Store))) return 0; } return validate_body(stmt->v.With.body, "With"); case AsyncWith_kind: if (!validate_nonempty_seq(stmt->v.AsyncWith.items, "items", "AsyncWith")) return 0; for (i = 0; i < asdl_seq_LEN(stmt->v.AsyncWith.items); i++) { withitem_ty item = asdl_seq_GET(stmt->v.AsyncWith.items, i); if (!validate_expr(item->context_expr, Load) || (item->optional_vars && !validate_expr(item->optional_vars, Store))) return 0; } return validate_body(stmt->v.AsyncWith.body, "AsyncWith"); case Raise_kind: if (stmt->v.Raise.exc) { return validate_expr(stmt->v.Raise.exc, Load) && (!stmt->v.Raise.cause || validate_expr(stmt->v.Raise.cause, Load)); } if (stmt->v.Raise.cause) { PyErr_SetString(PyExc_ValueError, "Raise with cause but no exception"); return 0; } return 1; case Try_kind: if (!validate_body(stmt->v.Try.body, "Try")) return 0; if (!asdl_seq_LEN(stmt->v.Try.handlers) && !asdl_seq_LEN(stmt->v.Try.finalbody)) { PyErr_SetString(PyExc_ValueError, "Try has neither except handlers nor finalbody"); return 0; } if (!asdl_seq_LEN(stmt->v.Try.handlers) && asdl_seq_LEN(stmt->v.Try.orelse)) { PyErr_SetString(PyExc_ValueError, "Try has orelse but no except handlers"); return 0; } for (i = 0; i < asdl_seq_LEN(stmt->v.Try.handlers); i++) { excepthandler_ty handler = asdl_seq_GET(stmt->v.Try.handlers, i); if ((handler->v.ExceptHandler.type && !validate_expr(handler->v.ExceptHandler.type, Load)) || !validate_body(handler->v.ExceptHandler.body, "ExceptHandler")) return 0; } return (!asdl_seq_LEN(stmt->v.Try.finalbody) || validate_stmts(stmt->v.Try.finalbody)) && (!asdl_seq_LEN(stmt->v.Try.orelse) || validate_stmts(stmt->v.Try.orelse)); case Assert_kind: return validate_expr(stmt->v.Assert.test, Load) && (!stmt->v.Assert.msg || validate_expr(stmt->v.Assert.msg, Load)); case Import_kind: return validate_nonempty_seq(stmt->v.Import.names, "names", "Import"); case ImportFrom_kind: if (stmt->v.ImportFrom.level < 0) { PyErr_SetString(PyExc_ValueError, "Negative ImportFrom level"); return 0; } return validate_nonempty_seq(stmt->v.ImportFrom.names, "names", "ImportFrom"); case Global_kind: return validate_nonempty_seq(stmt->v.Global.names, "names", "Global"); case Nonlocal_kind: return validate_nonempty_seq(stmt->v.Nonlocal.names, "names", "Nonlocal"); case Expr_kind: return validate_expr(stmt->v.Expr.value, Load); case AsyncFunctionDef_kind: return validate_body(stmt->v.AsyncFunctionDef.body, "AsyncFunctionDef") && validate_arguments(stmt->v.AsyncFunctionDef.args) && validate_exprs(stmt->v.AsyncFunctionDef.decorator_list, Load, 0) && (!stmt->v.AsyncFunctionDef.returns || validate_expr(stmt->v.AsyncFunctionDef.returns, Load)); case Pass_kind: case Break_kind: case Continue_kind: return 1; default: PyErr_SetString(PyExc_SystemError, "unexpected statement"); return 0; } } static int validate_stmts(asdl_seq *seq) { int i; for (i = 0; i < asdl_seq_LEN(seq); i++) { stmt_ty stmt = asdl_seq_GET(seq, i); if (stmt) { if (!validate_stmt(stmt)) return 0; } else { PyErr_SetString(PyExc_ValueError, "None disallowed in statement list"); return 0; } } return 1; } static int validate_exprs(asdl_seq *exprs, expr_context_ty ctx, int null_ok) { int i; for (i = 0; i < asdl_seq_LEN(exprs); i++) { expr_ty expr = asdl_seq_GET(exprs, i); if (expr) { if (!validate_expr(expr, ctx)) return 0; } else if (!null_ok) { PyErr_SetString(PyExc_ValueError, "None disallowed in expression list"); return 0; } } return 1; } int Ta3AST_Validate(mod_ty mod) { int res = 0; switch (mod->kind) { case Module_kind: res = validate_stmts(mod->v.Module.body); break; case Interactive_kind: res = validate_stmts(mod->v.Interactive.body); break; case Expression_kind: res = validate_expr(mod->v.Expression.body, Load); break; case Suite_kind: PyErr_SetString(PyExc_ValueError, "Suite is not valid in the CPython compiler"); break; default: PyErr_SetString(PyExc_SystemError, "impossible module node"); res = 0; break; } return res; } /* This is done here, so defines like "test" don't interfere with AST use above. */ #include "grammar.h" #include "parsetok.h" #include "graminit.h" /* Data structure used internally */ struct compiling { PyArena *c_arena; /* Arena for allocating memory. */ PyObject *c_filename; /* filename */ PyObject *c_normalize; /* Normalization function from unicodedata. */ PyObject *c_normalize_args; /* Normalization argument tuple. */ int c_feature_version; /* Latest minior version of Python for allowed features */ }; static asdl_seq *seq_for_testlist(struct compiling *, const node *); static expr_ty ast_for_expr(struct compiling *, const node *); static stmt_ty ast_for_stmt(struct compiling *, const node *); static asdl_seq *ast_for_suite(struct compiling *, const node *); static asdl_seq *ast_for_exprlist(struct compiling *, const node *, expr_context_ty); static expr_ty ast_for_testlist(struct compiling *, const node *); static stmt_ty ast_for_classdef(struct compiling *, const node *, asdl_seq *); static stmt_ty ast_for_with_stmt(struct compiling *, const node *, int); static stmt_ty ast_for_for_stmt(struct compiling *, const node *, int); /* Note different signature for ast_for_call */ static expr_ty ast_for_call(struct compiling *, const node *, expr_ty); static PyObject *parsenumber(struct compiling *, const char *); static expr_ty parsestrplus(struct compiling *, const node *n); #define COMP_GENEXP 0 #define COMP_LISTCOMP 1 #define COMP_SETCOMP 2 static int init_normalization(struct compiling *c) { PyObject *m = PyImport_ImportModuleNoBlock("unicodedata"); if (!m) return 0; c->c_normalize = PyObject_GetAttrString(m, "normalize"); Py_DECREF(m); if (!c->c_normalize) return 0; c->c_normalize_args = Py_BuildValue("(sN)", "NFKC", Py_None); if (!c->c_normalize_args) { Py_CLEAR(c->c_normalize); return 0; } PyTuple_SET_ITEM(c->c_normalize_args, 1, NULL); return 1; } static identifier new_identifier(const char *n, struct compiling *c) { PyObject *id = PyUnicode_DecodeUTF8(n, strlen(n), NULL); if (!id) return NULL; /* PyUnicode_DecodeUTF8 should always return a ready string. */ assert(PyUnicode_IS_READY(id)); /* Check whether there are non-ASCII characters in the identifier; if so, normalize to NFKC. */ if (!PyUnicode_IS_ASCII(id)) { PyObject *id2; if (!c->c_normalize && !init_normalization(c)) { Py_DECREF(id); return NULL; } PyTuple_SET_ITEM(c->c_normalize_args, 1, id); id2 = PyObject_Call(c->c_normalize, c->c_normalize_args, NULL); Py_DECREF(id); if (!id2) return NULL; id = id2; } PyUnicode_InternInPlace(&id); if (PyArena_AddPyObject(c->c_arena, id) < 0) { Py_DECREF(id); return NULL; } return id; } #define NEW_IDENTIFIER(n) new_identifier(STR(n), c) static string new_type_comment(const char *s, struct compiling *c) { return PyUnicode_DecodeUTF8(s, strlen(s), NULL); } #define NEW_TYPE_COMMENT(n) new_type_comment(STR(n), c) static int ast_error(struct compiling *c, const node *n, const char *errmsg) { PyObject *value, *errstr, *loc, *tmp; loc = PyErr_ProgramTextObject(c->c_filename, LINENO(n)); if (!loc) { Py_INCREF(Py_None); loc = Py_None; } tmp = Py_BuildValue("(OiiN)", c->c_filename, LINENO(n), n->n_col_offset, loc); if (!tmp) return 0; errstr = PyUnicode_FromString(errmsg); if (!errstr) { Py_DECREF(tmp); return 0; } value = PyTuple_Pack(2, errstr, tmp); Py_DECREF(errstr); Py_DECREF(tmp); if (value) { PyErr_SetObject(PyExc_SyntaxError, value); Py_DECREF(value); } return 0; } /* num_stmts() returns number of contained statements. Use this routine to determine how big a sequence is needed for the statements in a parse tree. Its raison d'etre is this bit of grammar: stmt: simple_stmt | compound_stmt simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE A simple_stmt can contain multiple small_stmt elements joined by semicolons. If the arg is a simple_stmt, the number of small_stmt elements is returned. */ static int num_stmts(const node *n) { int i, l; node *ch; switch (TYPE(n)) { case single_input: if (TYPE(CHILD(n, 0)) == NEWLINE) return 0; else return num_stmts(CHILD(n, 0)); case file_input: l = 0; for (i = 0; i < NCH(n); i++) { ch = CHILD(n, i); if (TYPE(ch) == stmt) l += num_stmts(ch); } return l; case stmt: return num_stmts(CHILD(n, 0)); case compound_stmt: return 1; case simple_stmt: return NCH(n) / 2; /* Divide by 2 to remove count of semi-colons */ case suite: /* suite: simple_stmt | NEWLINE [TYPE_COMMENT NEWLINE] INDENT stmt+ DEDENT */ if (NCH(n) == 1) return num_stmts(CHILD(n, 0)); else { i = 2; l = 0; if (TYPE(CHILD(n, 1)) == TYPE_COMMENT) i += 2; for (; i < (NCH(n) - 1); i++) l += num_stmts(CHILD(n, i)); return l; } default: { char buf[128]; sprintf(buf, "Non-statement found: %d %d", TYPE(n), NCH(n)); Py_FatalError(buf); } } assert(0); return 0; } /* Transform the CST rooted at node * to the appropriate AST */ mod_ty Ta3AST_FromNodeObject(const node *n, PyCompilerFlags *flags, PyObject *filename, int feature_version, PyArena *arena) { int i, j, k, num; asdl_seq *stmts = NULL; asdl_seq *type_ignores = NULL; stmt_ty s; node *ch; struct compiling c; mod_ty res = NULL; asdl_seq *argtypes = NULL; expr_ty ret, arg; c.c_arena = arena; /* borrowed reference */ c.c_filename = filename; c.c_normalize = NULL; c.c_normalize_args = NULL; c.c_feature_version = feature_version; if (TYPE(n) == encoding_decl) n = CHILD(n, 0); k = 0; switch (TYPE(n)) { case file_input: stmts = _Ta3_asdl_seq_new(num_stmts(n), arena); if (!stmts) goto out; for (i = 0; i < NCH(n) - 1; i++) { ch = CHILD(n, i); if (TYPE(ch) == NEWLINE) continue; REQ(ch, stmt); num = num_stmts(ch); if (num == 1) { s = ast_for_stmt(&c, ch); if (!s) goto out; asdl_seq_SET(stmts, k++, s); } else { ch = CHILD(ch, 0); REQ(ch, simple_stmt); for (j = 0; j < num; j++) { s = ast_for_stmt(&c, CHILD(ch, j * 2)); if (!s) goto out; asdl_seq_SET(stmts, k++, s); } } } /* Type ignores are stored under the ENDMARKER in file_input. */ ch = CHILD(n, NCH(n) - 1); REQ(ch, ENDMARKER); num = NCH(ch); type_ignores = _Ta3_asdl_seq_new(num, arena); if (!type_ignores) goto out; for (i = 0; i < num; i++) { type_ignore_ty ti = TypeIgnore(LINENO(CHILD(ch, i)), arena); if (!ti) goto out; asdl_seq_SET(type_ignores, i, ti); } res = Module(stmts, type_ignores, arena); break; case eval_input: { expr_ty testlist_ast; /* XXX Why not comp_for here? */ testlist_ast = ast_for_testlist(&c, CHILD(n, 0)); if (!testlist_ast) goto out; res = Expression(testlist_ast, arena); break; } case single_input: if (TYPE(CHILD(n, 0)) == NEWLINE) { stmts = _Ta3_asdl_seq_new(1, arena); if (!stmts) goto out; asdl_seq_SET(stmts, 0, Pass(n->n_lineno, n->n_col_offset, arena)); if (!asdl_seq_GET(stmts, 0)) goto out; res = Interactive(stmts, arena); } else { n = CHILD(n, 0); num = num_stmts(n); stmts = _Ta3_asdl_seq_new(num, arena); if (!stmts) goto out; if (num == 1) { s = ast_for_stmt(&c, n); if (!s) goto out; asdl_seq_SET(stmts, 0, s); } else { /* Only a simple_stmt can contain multiple statements. */ REQ(n, simple_stmt); for (i = 0; i < NCH(n); i += 2) { if (TYPE(CHILD(n, i)) == NEWLINE) break; s = ast_for_stmt(&c, CHILD(n, i)); if (!s) goto out; asdl_seq_SET(stmts, i / 2, s); } } res = Interactive(stmts, arena); } break; case func_type_input: n = CHILD(n, 0); REQ(n, func_type); if (TYPE(CHILD(n, 1)) == typelist) { ch = CHILD(n, 1); /* this is overly permissive -- we don't pay any attention to * stars on the args -- just parse them into an ordered list */ num = 0; for (i = 0; i < NCH(ch); i++) { if (TYPE(CHILD(ch, i)) == test) num++; } argtypes = _Ta3_asdl_seq_new(num, arena); j = 0; for (i = 0; i < NCH(ch); i++) { if (TYPE(CHILD(ch, i)) == test) { arg = ast_for_expr(&c, CHILD(ch, i)); if (!arg) goto out; asdl_seq_SET(argtypes, j++, arg); } } } else argtypes = _Ta3_asdl_seq_new(0, arena); ret = ast_for_expr(&c, CHILD(n, NCH(n) - 1)); if (!ret) goto out; res = FunctionType(argtypes, ret, arena); break; default: PyErr_Format(PyExc_SystemError, "invalid node %d for Ta3AST_FromNode", TYPE(n)); goto out; } out: if (c.c_normalize) { Py_DECREF(c.c_normalize); PyTuple_SET_ITEM(c.c_normalize_args, 1, NULL); Py_DECREF(c.c_normalize_args); } return res; } mod_ty Ta3AST_FromNode(const node *n, PyCompilerFlags *flags, const char *filename_str, int feature_version, PyArena *arena) { mod_ty mod; PyObject *filename; filename = PyUnicode_DecodeFSDefault(filename_str); if (filename == NULL) return NULL; mod = Ta3AST_FromNodeObject(n, flags, filename, feature_version, arena); Py_DECREF(filename); return mod; } /* Return the AST repr. of the operator represented as syntax (|, ^, etc.) */ static operator_ty get_operator(struct compiling *c, const node *n) { switch (TYPE(n)) { case VBAR: return BitOr; case CIRCUMFLEX: return BitXor; case AMPER: return BitAnd; case LEFTSHIFT: return LShift; case RIGHTSHIFT: return RShift; case PLUS: return Add; case MINUS: return Sub; case STAR: return Mult; case AT: if (c->c_feature_version < 5) { ast_error(c, n, "The '@' operator is only supported in Python 3.5 and greater"); return (operator_ty)0; } return MatMult; case SLASH: return Div; case DOUBLESLASH: return FloorDiv; case PERCENT: return Mod; default: return (operator_ty)0; } } static const char * const FORBIDDEN[] = { "None", "True", "False", NULL, }; static int forbidden_name(struct compiling *c, identifier name, const node *n, int full_checks) { assert(PyUnicode_Check(name)); if (PyUnicode_CompareWithASCIIString(name, "__debug__") == 0) { ast_error(c, n, "assignment to keyword"); return 1; } if (full_checks) { const char * const *p; for (p = FORBIDDEN; *p; p++) { if (PyUnicode_CompareWithASCIIString(name, *p) == 0) { ast_error(c, n, "assignment to keyword"); return 1; } } } return 0; } /* Set the context ctx for expr_ty e, recursively traversing e. Only sets context for expr kinds that "can appear in assignment context" (according to ../Parser/Python.asdl). For other expr kinds, it sets an appropriate syntax error and returns false. */ static int set_context(struct compiling *c, expr_ty e, expr_context_ty ctx, const node *n) { asdl_seq *s = NULL; /* If a particular expression type can't be used for assign / delete, set expr_name to its name and an error message will be generated. */ const char* expr_name = NULL; /* The ast defines augmented store and load contexts, but the implementation here doesn't actually use them. The code may be a little more complex than necessary as a result. It also means that expressions in an augmented assignment have a Store context. Consider restructuring so that augmented assignment uses set_context(), too. */ assert(ctx != AugStore && ctx != AugLoad); switch (e->kind) { case Attribute_kind: e->v.Attribute.ctx = ctx; if (ctx == Store && forbidden_name(c, e->v.Attribute.attr, n, 1)) return 0; break; case Subscript_kind: e->v.Subscript.ctx = ctx; break; case Starred_kind: e->v.Starred.ctx = ctx; if (!set_context(c, e->v.Starred.value, ctx, n)) return 0; break; case Name_kind: if (ctx == Store) { if (forbidden_name(c, e->v.Name.id, n, 0)) return 0; /* forbidden_name() calls ast_error() */ } e->v.Name.ctx = ctx; break; case List_kind: e->v.List.ctx = ctx; s = e->v.List.elts; break; case Tuple_kind: e->v.Tuple.ctx = ctx; s = e->v.Tuple.elts; break; case Lambda_kind: expr_name = "lambda"; break; case Call_kind: expr_name = "function call"; break; case BoolOp_kind: case BinOp_kind: case UnaryOp_kind: expr_name = "operator"; break; case GeneratorExp_kind: expr_name = "generator expression"; break; case Yield_kind: case YieldFrom_kind: expr_name = "yield expression"; break; case Await_kind: expr_name = "await expression"; break; case ListComp_kind: expr_name = "list comprehension"; break; case SetComp_kind: expr_name = "set comprehension"; break; case DictComp_kind: expr_name = "dict comprehension"; break; case Dict_kind: case Set_kind: case Num_kind: case Str_kind: case Bytes_kind: case JoinedStr_kind: case FormattedValue_kind: expr_name = "literal"; break; case NameConstant_kind: expr_name = "keyword"; break; case Ellipsis_kind: expr_name = "Ellipsis"; break; case Compare_kind: expr_name = "comparison"; break; case IfExp_kind: expr_name = "conditional expression"; break; default: PyErr_Format(PyExc_SystemError, "unexpected expression in assignment %d (line %d)", e->kind, e->lineno); return 0; } /* Check for error string set by switch */ if (expr_name) { char buf[300]; PyOS_snprintf(buf, sizeof(buf), "can't %s %s", ctx == Store ? "assign to" : "delete", expr_name); return ast_error(c, n, buf); } /* If the LHS is a list or tuple, we need to set the assignment context for all the contained elements. */ if (s) { int i; for (i = 0; i < asdl_seq_LEN(s); i++) { if (!set_context(c, (expr_ty)asdl_seq_GET(s, i), ctx, n)) return 0; } } return 1; } static operator_ty ast_for_augassign(struct compiling *c, const node *n) { REQ(n, augassign); n = CHILD(n, 0); switch (STR(n)[0]) { case '+': return Add; case '-': return Sub; case '/': if (STR(n)[1] == '/') return FloorDiv; else return Div; case '%': return Mod; case '<': return LShift; case '>': return RShift; case '&': return BitAnd; case '^': return BitXor; case '|': return BitOr; case '*': if (STR(n)[1] == '*') return Pow; else return Mult; case '@': if (c->c_feature_version < 5) { ast_error(c, n, "The '@' operator is only supported in Python 3.5 and greater"); return (operator_ty)0; } return MatMult; default: PyErr_Format(PyExc_SystemError, "invalid augassign: %s", STR(n)); return (operator_ty)0; } } static cmpop_ty ast_for_comp_op(struct compiling *c, const node *n) { /* comp_op: '<'|'>'|'=='|'>='|'<='|'!='|'in'|'not' 'in'|'is' |'is' 'not' */ REQ(n, comp_op); if (NCH(n) == 1) { n = CHILD(n, 0); switch (TYPE(n)) { case LESS: return Lt; case GREATER: return Gt; case EQEQUAL: /* == */ return Eq; case LESSEQUAL: return LtE; case GREATEREQUAL: return GtE; case NOTEQUAL: return NotEq; case NAME: if (strcmp(STR(n), "in") == 0) return In; if (strcmp(STR(n), "is") == 0) return Is; default: PyErr_Format(PyExc_SystemError, "invalid comp_op: %s", STR(n)); return (cmpop_ty)0; } } else if (NCH(n) == 2) { /* handle "not in" and "is not" */ switch (TYPE(CHILD(n, 0))) { case NAME: if (strcmp(STR(CHILD(n, 1)), "in") == 0) return NotIn; if (strcmp(STR(CHILD(n, 0)), "is") == 0) return IsNot; default: PyErr_Format(PyExc_SystemError, "invalid comp_op: %s %s", STR(CHILD(n, 0)), STR(CHILD(n, 1))); return (cmpop_ty)0; } } PyErr_Format(PyExc_SystemError, "invalid comp_op: has %d children", NCH(n)); return (cmpop_ty)0; } static asdl_seq * seq_for_testlist(struct compiling *c, const node *n) { /* testlist: test (',' test)* [','] testlist_star_expr: test|star_expr (',' test|star_expr)* [','] */ asdl_seq *seq; expr_ty expression; int i; assert(TYPE(n) == testlist || TYPE(n) == testlist_star_expr || TYPE(n) == testlist_comp); seq = _Ta3_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!seq) return NULL; for (i = 0; i < NCH(n); i += 2) { const node *ch = CHILD(n, i); assert(TYPE(ch) == test || TYPE(ch) == test_nocond || TYPE(ch) == star_expr); expression = ast_for_expr(c, ch); if (!expression) return NULL; assert(i / 2 < seq->size); asdl_seq_SET(seq, i / 2, expression); } return seq; } static arg_ty ast_for_arg(struct compiling *c, const node *n) { identifier name; expr_ty annotation = NULL; node *ch; arg_ty ret; assert(TYPE(n) == tfpdef || TYPE(n) == vfpdef); ch = CHILD(n, 0); name = NEW_IDENTIFIER(ch); if (!name) return NULL; if (forbidden_name(c, name, ch, 0)) return NULL; if (NCH(n) == 3 && TYPE(CHILD(n, 1)) == COLON) { annotation = ast_for_expr(c, CHILD(n, 2)); if (!annotation) return NULL; } ret = arg(name, annotation, NULL, LINENO(n), n->n_col_offset, c->c_arena); if (!ret) return NULL; return ret; } /* returns -1 if failed to handle keyword only arguments returns new position to keep processing if successful (',' tfpdef ['=' test])* ^^^ start pointing here */ static int handle_keywordonly_args(struct compiling *c, const node *n, int start, asdl_seq *kwonlyargs, asdl_seq *kwdefaults) { PyObject *argname; node *ch; expr_ty expression, annotation; arg_ty arg; int i = start; int j = 0; /* index for kwdefaults and kwonlyargs */ if (kwonlyargs == NULL) { ast_error(c, CHILD(n, start), "named arguments must follow bare *"); return -1; } assert(kwdefaults != NULL); while (i < NCH(n)) { ch = CHILD(n, i); switch (TYPE(ch)) { case vfpdef: case tfpdef: if (i + 1 < NCH(n) && TYPE(CHILD(n, i + 1)) == EQUAL) { expression = ast_for_expr(c, CHILD(n, i + 2)); if (!expression) goto error; asdl_seq_SET(kwdefaults, j, expression); i += 2; /* '=' and test */ } else { /* setting NULL if no default value exists */ asdl_seq_SET(kwdefaults, j, NULL); } if (NCH(ch) == 3) { /* ch is NAME ':' test */ annotation = ast_for_expr(c, CHILD(ch, 2)); if (!annotation) goto error; } else { annotation = NULL; } ch = CHILD(ch, 0); argname = NEW_IDENTIFIER(ch); if (!argname) goto error; if (forbidden_name(c, argname, ch, 0)) goto error; arg = arg(argname, annotation, NULL, LINENO(ch), ch->n_col_offset, c->c_arena); if (!arg) goto error; asdl_seq_SET(kwonlyargs, j++, arg); i += 1; /* the name */ if (i < NCH(n) && TYPE(CHILD(n, i)) == COMMA) i += 1; /* the comma, if present */ break; case TYPE_COMMENT: /* arg will be equal to the last argument processed */ arg->type_comment = NEW_TYPE_COMMENT(ch); i += 1; break; case DOUBLESTAR: return i; default: ast_error(c, ch, "unexpected node"); goto error; } } return i; error: return -1; } /* Create AST for argument list. */ static arguments_ty ast_for_arguments(struct compiling *c, const node *n) { /* This function handles both typedargslist (function definition) and varargslist (lambda definition). parameters: '(' [typedargslist] ')' typedargslist: (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [ '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] | '**' tfpdef [',']]] | '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] | '**' tfpdef [',']) tfpdef: NAME [':' test] varargslist: (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [ '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] | '**' vfpdef [',']]] | '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] | '**' vfpdef [','] ) vfpdef: NAME */ int i, j, k, nposargs = 0, nkwonlyargs = 0; int nposdefaults = 0, found_default = 0; asdl_seq *posargs, *posdefaults, *kwonlyargs, *kwdefaults; arg_ty vararg = NULL, kwarg = NULL; arg_ty arg; node *ch; if (TYPE(n) == parameters) { if (NCH(n) == 2) /* () as argument list */ return arguments(NULL, NULL, NULL, NULL, NULL, NULL, c->c_arena); n = CHILD(n, 1); } assert(TYPE(n) == typedargslist || TYPE(n) == varargslist); /* First count the number of positional args & defaults. The variable i is the loop index for this for loop and the next. The next loop picks up where the first leaves off. */ for (i = 0; i < NCH(n); i++) { ch = CHILD(n, i); if (TYPE(ch) == STAR) { /* skip star */ i++; if (i < NCH(n) && /* skip argument following star */ (TYPE(CHILD(n, i)) == tfpdef || TYPE(CHILD(n, i)) == vfpdef)) { i++; } break; } if (TYPE(ch) == DOUBLESTAR) break; if (TYPE(ch) == vfpdef || TYPE(ch) == tfpdef) nposargs++; if (TYPE(ch) == EQUAL) nposdefaults++; } /* count the number of keyword only args & defaults for keyword only args */ for ( ; i < NCH(n); ++i) { ch = CHILD(n, i); if (TYPE(ch) == DOUBLESTAR) break; if (TYPE(ch) == tfpdef || TYPE(ch) == vfpdef) nkwonlyargs++; } posargs = (nposargs ? _Ta3_asdl_seq_new(nposargs, c->c_arena) : NULL); if (!posargs && nposargs) return NULL; kwonlyargs = (nkwonlyargs ? _Ta3_asdl_seq_new(nkwonlyargs, c->c_arena) : NULL); if (!kwonlyargs && nkwonlyargs) return NULL; posdefaults = (nposdefaults ? _Ta3_asdl_seq_new(nposdefaults, c->c_arena) : NULL); if (!posdefaults && nposdefaults) return NULL; /* The length of kwonlyargs and kwdefaults are same since we set NULL as default for keyword only argument w/o default - we have sequence data structure, but no dictionary */ kwdefaults = (nkwonlyargs ? _Ta3_asdl_seq_new(nkwonlyargs, c->c_arena) : NULL); if (!kwdefaults && nkwonlyargs) return NULL; if (nposargs + nkwonlyargs > 255) { ast_error(c, n, "more than 255 arguments"); return NULL; } /* tfpdef: NAME [':' test] vfpdef: NAME */ i = 0; j = 0; /* index for defaults */ k = 0; /* index for args */ while (i < NCH(n)) { ch = CHILD(n, i); switch (TYPE(ch)) { case tfpdef: case vfpdef: /* XXX Need to worry about checking if TYPE(CHILD(n, i+1)) is anything other than EQUAL or a comma? */ /* XXX Should NCH(n) check be made a separate check? */ if (i + 1 < NCH(n) && TYPE(CHILD(n, i + 1)) == EQUAL) { expr_ty expression = ast_for_expr(c, CHILD(n, i + 2)); if (!expression) return NULL; assert(posdefaults != NULL); asdl_seq_SET(posdefaults, j++, expression); i += 2; found_default = 1; } else if (found_default) { ast_error(c, n, "non-default argument follows default argument"); return NULL; } arg = ast_for_arg(c, ch); if (!arg) return NULL; asdl_seq_SET(posargs, k++, arg); i += 1; /* the name */ if (i < NCH(n) && TYPE(CHILD(n, i)) == COMMA) i += 1; /* the comma, if present */ break; case STAR: if (i+1 >= NCH(n) || (i+2 == NCH(n) && (TYPE(CHILD(n, i+1)) == COMMA || TYPE(CHILD(n, i+1)) == TYPE_COMMENT))) { ast_error(c, CHILD(n, i), "named arguments must follow bare *"); return NULL; } ch = CHILD(n, i+1); /* tfpdef or COMMA */ if (TYPE(ch) == COMMA) { int res = 0; i += 2; /* now follows keyword only arguments */ if (i < NCH(n) && TYPE(CHILD(n, i)) == TYPE_COMMENT) { ast_error(c, CHILD(n, i), "bare * has associated type comment"); return NULL; } res = handle_keywordonly_args(c, n, i, kwonlyargs, kwdefaults); if (res == -1) return NULL; i = res; /* res has new position to process */ } else { vararg = ast_for_arg(c, ch); if (!vararg) return NULL; i += 2; /* the star and the name */ if (i < NCH(n) && TYPE(CHILD(n, i)) == COMMA) i += 1; /* the comma, if present */ if (i < NCH(n) && TYPE(CHILD(n, i)) == TYPE_COMMENT) { vararg->type_comment = NEW_TYPE_COMMENT(CHILD(n, i)); i += 1; } if (i < NCH(n) && (TYPE(CHILD(n, i)) == tfpdef || TYPE(CHILD(n, i)) == vfpdef)) { int res = 0; res = handle_keywordonly_args(c, n, i, kwonlyargs, kwdefaults); if (res == -1) return NULL; i = res; /* res has new position to process */ } } break; case DOUBLESTAR: ch = CHILD(n, i+1); /* tfpdef */ assert(TYPE(ch) == tfpdef || TYPE(ch) == vfpdef); kwarg = ast_for_arg(c, ch); if (!kwarg) return NULL; i += 2; /* the double star and the name */ if (i < NCH(n) && TYPE(CHILD(n, i)) == COMMA) i += 1; /* the comma, if present */ break; case TYPE_COMMENT: assert(i); if (kwarg) arg = kwarg; /* arg will be equal to the last argument processed */ arg->type_comment = NEW_TYPE_COMMENT(ch); i += 1; break; default: PyErr_Format(PyExc_SystemError, "unexpected node in varargslist: %d @ %d", TYPE(ch), i); return NULL; } } return arguments(posargs, vararg, kwonlyargs, kwdefaults, kwarg, posdefaults, c->c_arena); } static expr_ty ast_for_dotted_name(struct compiling *c, const node *n) { expr_ty e; identifier id; int lineno, col_offset; int i; REQ(n, dotted_name); lineno = LINENO(n); col_offset = n->n_col_offset; id = NEW_IDENTIFIER(CHILD(n, 0)); if (!id) return NULL; e = Name(id, Load, lineno, col_offset, c->c_arena); if (!e) return NULL; for (i = 2; i < NCH(n); i+=2) { id = NEW_IDENTIFIER(CHILD(n, i)); if (!id) return NULL; e = Attribute(e, id, Load, lineno, col_offset, c->c_arena); if (!e) return NULL; } return e; } static expr_ty ast_for_decorator(struct compiling *c, const node *n) { /* decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE */ expr_ty d = NULL; expr_ty name_expr; REQ(n, decorator); REQ(CHILD(n, 0), AT); REQ(RCHILD(n, -1), NEWLINE); name_expr = ast_for_dotted_name(c, CHILD(n, 1)); if (!name_expr) return NULL; if (NCH(n) == 3) { /* No arguments */ d = name_expr; name_expr = NULL; } else if (NCH(n) == 5) { /* Call with no arguments */ d = Call(name_expr, NULL, NULL, LINENO(n), n->n_col_offset, c->c_arena); if (!d) return NULL; name_expr = NULL; } else { d = ast_for_call(c, CHILD(n, 3), name_expr); if (!d) return NULL; name_expr = NULL; } return d; } static asdl_seq* ast_for_decorators(struct compiling *c, const node *n) { asdl_seq* decorator_seq; expr_ty d; int i; REQ(n, decorators); decorator_seq = _Ta3_asdl_seq_new(NCH(n), c->c_arena); if (!decorator_seq) return NULL; for (i = 0; i < NCH(n); i++) { d = ast_for_decorator(c, CHILD(n, i)); if (!d) return NULL; asdl_seq_SET(decorator_seq, i, d); } return decorator_seq; } static stmt_ty ast_for_funcdef_impl(struct compiling *c, const node *n, asdl_seq *decorator_seq, int is_async) { /* funcdef: 'def' NAME parameters ['->' test] ':' [TYPE_COMMENT] suite */ identifier name; arguments_ty args; asdl_seq *body; expr_ty returns = NULL; int name_i = 1; node *tc; string type_comment = NULL; if (is_async && c->c_feature_version < 5) { ast_error(c, n, "Async functions are only supported in Python 3.5 and greater"); return NULL; } REQ(n, funcdef); name = NEW_IDENTIFIER(CHILD(n, name_i)); if (!name) return NULL; if (forbidden_name(c, name, CHILD(n, name_i), 0)) return NULL; args = ast_for_arguments(c, CHILD(n, name_i + 1)); if (!args) return NULL; if (TYPE(CHILD(n, name_i+2)) == RARROW) { returns = ast_for_expr(c, CHILD(n, name_i + 3)); if (!returns) return NULL; name_i += 2; } if (TYPE(CHILD(n, name_i + 3)) == TYPE_COMMENT) { type_comment = NEW_TYPE_COMMENT(CHILD(n, name_i + 3)); name_i += 1; } body = ast_for_suite(c, CHILD(n, name_i + 3)); if (!body) return NULL; if (!type_comment && NCH(CHILD(n, name_i + 3)) > 1) { /* If the function doesn't have a type comment on the same line, check * if the suite has a type comment in it. */ tc = CHILD(CHILD(n, name_i + 3), 1); if (TYPE(tc) == TYPE_COMMENT) type_comment = NEW_TYPE_COMMENT(tc); } if (is_async) return AsyncFunctionDef(name, args, body, decorator_seq, returns, type_comment, LINENO(n), n->n_col_offset, c->c_arena); else return FunctionDef(name, args, body, decorator_seq, returns, type_comment, LINENO(n), n->n_col_offset, c->c_arena); } static stmt_ty ast_for_async_funcdef(struct compiling *c, const node *n, asdl_seq *decorator_seq) { /* async_funcdef: ASYNC funcdef */ REQ(n, async_funcdef); REQ(CHILD(n, 0), ASYNC); REQ(CHILD(n, 1), funcdef); return ast_for_funcdef_impl(c, CHILD(n, 1), decorator_seq, 1 /* is_async */); } static stmt_ty ast_for_funcdef(struct compiling *c, const node *n, asdl_seq *decorator_seq) { /* funcdef: 'def' NAME parameters ['->' test] ':' suite */ return ast_for_funcdef_impl(c, n, decorator_seq, 0 /* is_async */); } static stmt_ty ast_for_async_stmt(struct compiling *c, const node *n) { /* async_stmt: ASYNC (funcdef | with_stmt | for_stmt) */ REQ(n, async_stmt); REQ(CHILD(n, 0), ASYNC); switch (TYPE(CHILD(n, 1))) { case funcdef: return ast_for_funcdef_impl(c, CHILD(n, 1), NULL, 1 /* is_async */); case with_stmt: return ast_for_with_stmt(c, CHILD(n, 1), 1 /* is_async */); case for_stmt: return ast_for_for_stmt(c, CHILD(n, 1), 1 /* is_async */); default: PyErr_Format(PyExc_SystemError, "invalid async stament: %s", STR(CHILD(n, 1))); return NULL; } } static stmt_ty ast_for_decorated(struct compiling *c, const node *n) { /* decorated: decorators (classdef | funcdef | async_funcdef) */ stmt_ty thing = NULL; asdl_seq *decorator_seq = NULL; REQ(n, decorated); decorator_seq = ast_for_decorators(c, CHILD(n, 0)); if (!decorator_seq) return NULL; assert(TYPE(CHILD(n, 1)) == funcdef || TYPE(CHILD(n, 1)) == async_funcdef || TYPE(CHILD(n, 1)) == classdef); if (TYPE(CHILD(n, 1)) == funcdef) { thing = ast_for_funcdef(c, CHILD(n, 1), decorator_seq); } else if (TYPE(CHILD(n, 1)) == classdef) { thing = ast_for_classdef(c, CHILD(n, 1), decorator_seq); } else if (TYPE(CHILD(n, 1)) == async_funcdef) { thing = ast_for_async_funcdef(c, CHILD(n, 1), decorator_seq); } /* we count the decorators in when talking about the class' or * function's line number */ if (thing) { thing->lineno = LINENO(n); thing->col_offset = n->n_col_offset; } return thing; } static expr_ty ast_for_lambdef(struct compiling *c, const node *n) { /* lambdef: 'lambda' [varargslist] ':' test lambdef_nocond: 'lambda' [varargslist] ':' test_nocond */ arguments_ty args; expr_ty expression; if (NCH(n) == 3) { args = arguments(NULL, NULL, NULL, NULL, NULL, NULL, c->c_arena); if (!args) return NULL; expression = ast_for_expr(c, CHILD(n, 2)); if (!expression) return NULL; } else { args = ast_for_arguments(c, CHILD(n, 1)); if (!args) return NULL; expression = ast_for_expr(c, CHILD(n, 3)); if (!expression) return NULL; } return Lambda(args, expression, LINENO(n), n->n_col_offset, c->c_arena); } static expr_ty ast_for_ifexpr(struct compiling *c, const node *n) { /* test: or_test 'if' or_test 'else' test */ expr_ty expression, body, orelse; assert(NCH(n) == 5); body = ast_for_expr(c, CHILD(n, 0)); if (!body) return NULL; expression = ast_for_expr(c, CHILD(n, 2)); if (!expression) return NULL; orelse = ast_for_expr(c, CHILD(n, 4)); if (!orelse) return NULL; return IfExp(expression, body, orelse, LINENO(n), n->n_col_offset, c->c_arena); } /* Count the number of 'for' loops in a comprehension. Helper for ast_for_comprehension(). */ static int count_comp_fors(struct compiling *c, const node *n) { int n_fors = 0; int is_async; count_comp_for: is_async = 0; n_fors++; REQ(n, comp_for); if (TYPE(CHILD(n, 0)) == ASYNC) { is_async = 1; } if (NCH(n) == (5 + is_async)) { n = CHILD(n, 4 + is_async); } else { return n_fors; } count_comp_iter: REQ(n, comp_iter); n = CHILD(n, 0); if (TYPE(n) == comp_for) goto count_comp_for; else if (TYPE(n) == comp_if) { if (NCH(n) == 3) { n = CHILD(n, 2); goto count_comp_iter; } else return n_fors; } /* Should never be reached */ PyErr_SetString(PyExc_SystemError, "logic error in count_comp_fors"); return -1; } /* Count the number of 'if' statements in a comprehension. Helper for ast_for_comprehension(). */ static int count_comp_ifs(struct compiling *c, const node *n) { int n_ifs = 0; while (1) { REQ(n, comp_iter); if (TYPE(CHILD(n, 0)) == comp_for) return n_ifs; n = CHILD(n, 0); REQ(n, comp_if); n_ifs++; if (NCH(n) == 2) return n_ifs; n = CHILD(n, 2); } } static asdl_seq * ast_for_comprehension(struct compiling *c, const node *n) { int i, n_fors; asdl_seq *comps; n_fors = count_comp_fors(c, n); if (n_fors == -1) return NULL; comps = _Ta3_asdl_seq_new(n_fors, c->c_arena); if (!comps) return NULL; for (i = 0; i < n_fors; i++) { comprehension_ty comp; asdl_seq *t; expr_ty expression, first; node *for_ch; int is_async = 0; REQ(n, comp_for); if (TYPE(CHILD(n, 0)) == ASYNC) { is_async = 1; } /* Async comprehensions only allowed in Python 3.6 and greater */ if (is_async && c->c_feature_version < 6) { ast_error(c, n, "Async comprehensions are only supported in Python 3.6 and greater"); return NULL; } for_ch = CHILD(n, 1 + is_async); t = ast_for_exprlist(c, for_ch, Store); if (!t) return NULL; expression = ast_for_expr(c, CHILD(n, 3 + is_async)); if (!expression) return NULL; /* Check the # of children rather than the length of t, since (x for x, in ...) has 1 element in t, but still requires a Tuple. */ first = (expr_ty)asdl_seq_GET(t, 0); if (NCH(for_ch) == 1) comp = comprehension(first, expression, NULL, is_async, c->c_arena); else comp = comprehension(Tuple(t, Store, first->lineno, first->col_offset, c->c_arena), expression, NULL, is_async, c->c_arena); if (!comp) return NULL; if (NCH(n) == (5 + is_async)) { int j, n_ifs; asdl_seq *ifs; n = CHILD(n, 4 + is_async); n_ifs = count_comp_ifs(c, n); if (n_ifs == -1) return NULL; ifs = _Ta3_asdl_seq_new(n_ifs, c->c_arena); if (!ifs) return NULL; for (j = 0; j < n_ifs; j++) { REQ(n, comp_iter); n = CHILD(n, 0); REQ(n, comp_if); expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; asdl_seq_SET(ifs, j, expression); if (NCH(n) == 3) n = CHILD(n, 2); } /* on exit, must guarantee that n is a comp_for */ if (TYPE(n) == comp_iter) n = CHILD(n, 0); comp->ifs = ifs; } asdl_seq_SET(comps, i, comp); } return comps; } static expr_ty ast_for_itercomp(struct compiling *c, const node *n, int type) { /* testlist_comp: (test|star_expr) * ( comp_for | (',' (test|star_expr))* [','] ) */ expr_ty elt; asdl_seq *comps; node *ch; assert(NCH(n) > 1); ch = CHILD(n, 0); elt = ast_for_expr(c, ch); if (!elt) return NULL; if (elt->kind == Starred_kind) { ast_error(c, ch, "iterable unpacking cannot be used in comprehension"); return NULL; } comps = ast_for_comprehension(c, CHILD(n, 1)); if (!comps) return NULL; if (type == COMP_GENEXP) return GeneratorExp(elt, comps, LINENO(n), n->n_col_offset, c->c_arena); else if (type == COMP_LISTCOMP) return ListComp(elt, comps, LINENO(n), n->n_col_offset, c->c_arena); else if (type == COMP_SETCOMP) return SetComp(elt, comps, LINENO(n), n->n_col_offset, c->c_arena); else /* Should never happen */ return NULL; } /* Fills in the key, value pair corresponding to the dict element. In case * of an unpacking, key is NULL. *i is advanced by the number of ast * elements. Iff successful, nonzero is returned. */ static int ast_for_dictelement(struct compiling *c, const node *n, int *i, expr_ty *key, expr_ty *value) { expr_ty expression; if (TYPE(CHILD(n, *i)) == DOUBLESTAR) { assert(NCH(n) - *i >= 2); expression = ast_for_expr(c, CHILD(n, *i + 1)); if (!expression) return 0; *key = NULL; *value = expression; *i += 2; } else { assert(NCH(n) - *i >= 3); expression = ast_for_expr(c, CHILD(n, *i)); if (!expression) return 0; *key = expression; REQ(CHILD(n, *i + 1), COLON); expression = ast_for_expr(c, CHILD(n, *i + 2)); if (!expression) return 0; *value = expression; *i += 3; } return 1; } static expr_ty ast_for_dictcomp(struct compiling *c, const node *n) { expr_ty key, value; asdl_seq *comps; int i = 0; if (!ast_for_dictelement(c, n, &i, &key, &value)) return NULL; assert(key); assert(NCH(n) - i >= 1); comps = ast_for_comprehension(c, CHILD(n, i)); if (!comps) return NULL; return DictComp(key, value, comps, LINENO(n), n->n_col_offset, c->c_arena); } static expr_ty ast_for_dictdisplay(struct compiling *c, const node *n) { int i; int j; int size; asdl_seq *keys, *values; size = (NCH(n) + 1) / 3; /* +1 in case no trailing comma */ keys = _Ta3_asdl_seq_new(size, c->c_arena); if (!keys) return NULL; values = _Ta3_asdl_seq_new(size, c->c_arena); if (!values) return NULL; j = 0; for (i = 0; i < NCH(n); i++) { expr_ty key, value; if (!ast_for_dictelement(c, n, &i, &key, &value)) return NULL; asdl_seq_SET(keys, j, key); asdl_seq_SET(values, j, value); j++; } keys->size = j; values->size = j; return Dict(keys, values, LINENO(n), n->n_col_offset, c->c_arena); } static expr_ty ast_for_genexp(struct compiling *c, const node *n) { assert(TYPE(n) == (testlist_comp) || TYPE(n) == (argument)); return ast_for_itercomp(c, n, COMP_GENEXP); } static expr_ty ast_for_listcomp(struct compiling *c, const node *n) { assert(TYPE(n) == (testlist_comp)); return ast_for_itercomp(c, n, COMP_LISTCOMP); } static expr_ty ast_for_setcomp(struct compiling *c, const node *n) { assert(TYPE(n) == (dictorsetmaker)); return ast_for_itercomp(c, n, COMP_SETCOMP); } static expr_ty ast_for_setdisplay(struct compiling *c, const node *n) { int i; int size; asdl_seq *elts; assert(TYPE(n) == (dictorsetmaker)); size = (NCH(n) + 1) / 2; /* +1 in case no trailing comma */ elts = _Ta3_asdl_seq_new(size, c->c_arena); if (!elts) return NULL; for (i = 0; i < NCH(n); i += 2) { expr_ty expression; expression = ast_for_expr(c, CHILD(n, i)); if (!expression) return NULL; asdl_seq_SET(elts, i / 2, expression); } return Set(elts, LINENO(n), n->n_col_offset, c->c_arena); } static expr_ty ast_for_atom(struct compiling *c, const node *n) { /* atom: '(' [yield_expr|testlist_comp] ')' | '[' [testlist_comp] ']' | '{' [dictmaker|testlist_comp] '}' | NAME | NUMBER | STRING+ | '...' | 'None' | 'True' | 'False' */ node *ch = CHILD(n, 0); switch (TYPE(ch)) { case NAME: { PyObject *name; const char *s = STR(ch); size_t len = strlen(s); if (len >= 4 && len <= 5) { if (!strcmp(s, "None")) return NameConstant(Py_None, LINENO(n), n->n_col_offset, c->c_arena); if (!strcmp(s, "True")) return NameConstant(Py_True, LINENO(n), n->n_col_offset, c->c_arena); if (!strcmp(s, "False")) return NameConstant(Py_False, LINENO(n), n->n_col_offset, c->c_arena); } name = new_identifier(s, c); if (!name) return NULL; /* All names start in Load context, but may later be changed. */ return Name(name, Load, LINENO(n), n->n_col_offset, c->c_arena); } case STRING: { expr_ty str = parsestrplus(c, n); if (!str) { const char *errtype = NULL; if (PyErr_ExceptionMatches(PyExc_UnicodeError)) errtype = "unicode error"; else if (PyErr_ExceptionMatches(PyExc_ValueError)) errtype = "value error"; if (errtype) { char buf[128]; const char *s = NULL; PyObject *type, *value, *tback, *errstr; PyErr_Fetch(&type, &value, &tback); errstr = PyObject_Str(value); if (errstr) s = PyUnicode_AsUTF8(errstr); if (s) { PyOS_snprintf(buf, sizeof(buf), "(%s) %s", errtype, s); } else { PyErr_Clear(); PyOS_snprintf(buf, sizeof(buf), "(%s) unknown error", errtype); } Py_XDECREF(errstr); ast_error(c, n, buf); Py_DECREF(type); Py_XDECREF(value); Py_XDECREF(tback); } return NULL; } return str; } case NUMBER: { PyObject *pynum; const char *s = STR(ch); /* Underscores in numeric literals are only allowed in Python 3.6 or greater */ /* Check for underscores here rather than in parse_number so we can report a line number on error */ if (c->c_feature_version < 6 && strchr(s, '_') != NULL) { ast_error(c, ch, "Underscores in numeric literals are only supported in Python 3.6 and greater"); return NULL; } pynum = parsenumber(c, s); if (!pynum) return NULL; if (PyArena_AddPyObject(c->c_arena, pynum) < 0) { Py_DECREF(pynum); return NULL; } return Num(pynum, LINENO(n), n->n_col_offset, c->c_arena); } case ELLIPSIS: /* Ellipsis */ return Ellipsis(LINENO(n), n->n_col_offset, c->c_arena); case LPAR: /* some parenthesized expressions */ ch = CHILD(n, 1); if (TYPE(ch) == RPAR) return Tuple(NULL, Load, LINENO(n), n->n_col_offset, c->c_arena); if (TYPE(ch) == yield_expr) return ast_for_expr(c, ch); /* testlist_comp: test ( comp_for | (',' test)* [','] ) */ if ((NCH(ch) > 1) && (TYPE(CHILD(ch, 1)) == comp_for)) return ast_for_genexp(c, ch); return ast_for_testlist(c, ch); case LSQB: /* list (or list comprehension) */ ch = CHILD(n, 1); if (TYPE(ch) == RSQB) return List(NULL, Load, LINENO(n), n->n_col_offset, c->c_arena); REQ(ch, testlist_comp); if (NCH(ch) == 1 || TYPE(CHILD(ch, 1)) == COMMA) { asdl_seq *elts = seq_for_testlist(c, ch); if (!elts) return NULL; return List(elts, Load, LINENO(n), n->n_col_offset, c->c_arena); } else return ast_for_listcomp(c, ch); case LBRACE: { /* dictorsetmaker: ( ((test ':' test | '**' test) * (comp_for | (',' (test ':' test | '**' test))* [','])) | * ((test | '*' test) * (comp_for | (',' (test | '*' test))* [','])) ) */ expr_ty res; ch = CHILD(n, 1); if (TYPE(ch) == RBRACE) { /* It's an empty dict. */ return Dict(NULL, NULL, LINENO(n), n->n_col_offset, c->c_arena); } else { int is_dict = (TYPE(CHILD(ch, 0)) == DOUBLESTAR); if (NCH(ch) == 1 || (NCH(ch) > 1 && TYPE(CHILD(ch, 1)) == COMMA)) { /* It's a set display. */ res = ast_for_setdisplay(c, ch); } else if (NCH(ch) > 1 && TYPE(CHILD(ch, 1)) == comp_for) { /* It's a set comprehension. */ res = ast_for_setcomp(c, ch); } else if (NCH(ch) > 3 - is_dict && TYPE(CHILD(ch, 3 - is_dict)) == comp_for) { /* It's a dictionary comprehension. */ if (is_dict) { ast_error(c, n, "dict unpacking cannot be used in " "dict comprehension"); return NULL; } res = ast_for_dictcomp(c, ch); } else { /* It's a dictionary display. */ res = ast_for_dictdisplay(c, ch); } if (res) { res->lineno = LINENO(n); res->col_offset = n->n_col_offset; } return res; } } default: PyErr_Format(PyExc_SystemError, "unhandled atom %d", TYPE(ch)); return NULL; } } static slice_ty ast_for_slice(struct compiling *c, const node *n) { node *ch; expr_ty lower = NULL, upper = NULL, step = NULL; REQ(n, subscript); /* subscript: test | [test] ':' [test] [sliceop] sliceop: ':' [test] */ ch = CHILD(n, 0); if (NCH(n) == 1 && TYPE(ch) == test) { /* 'step' variable hold no significance in terms of being used over other vars */ step = ast_for_expr(c, ch); if (!step) return NULL; return Index(step, c->c_arena); } if (TYPE(ch) == test) { lower = ast_for_expr(c, ch); if (!lower) return NULL; } /* If there's an upper bound it's in the second or third position. */ if (TYPE(ch) == COLON) { if (NCH(n) > 1) { node *n2 = CHILD(n, 1); if (TYPE(n2) == test) { upper = ast_for_expr(c, n2); if (!upper) return NULL; } } } else if (NCH(n) > 2) { node *n2 = CHILD(n, 2); if (TYPE(n2) == test) { upper = ast_for_expr(c, n2); if (!upper) return NULL; } } ch = CHILD(n, NCH(n) - 1); if (TYPE(ch) == sliceop) { if (NCH(ch) != 1) { ch = CHILD(ch, 1); if (TYPE(ch) == test) { step = ast_for_expr(c, ch); if (!step) return NULL; } } } return Slice(lower, upper, step, c->c_arena); } static expr_ty ast_for_binop(struct compiling *c, const node *n) { /* Must account for a sequence of expressions. How should A op B op C by represented? BinOp(BinOp(A, op, B), op, C). */ int i, nops; expr_ty expr1, expr2, result; operator_ty newoperator; expr1 = ast_for_expr(c, CHILD(n, 0)); if (!expr1) return NULL; expr2 = ast_for_expr(c, CHILD(n, 2)); if (!expr2) return NULL; newoperator = get_operator(c, CHILD(n, 1)); if (!newoperator) return NULL; result = BinOp(expr1, newoperator, expr2, LINENO(n), n->n_col_offset, c->c_arena); if (!result) return NULL; nops = (NCH(n) - 1) / 2; for (i = 1; i < nops; i++) { expr_ty tmp_result, tmp; const node* next_oper = CHILD(n, i * 2 + 1); newoperator = get_operator(c, next_oper); if (!newoperator) return NULL; tmp = ast_for_expr(c, CHILD(n, i * 2 + 2)); if (!tmp) return NULL; tmp_result = BinOp(result, newoperator, tmp, LINENO(next_oper), next_oper->n_col_offset, c->c_arena); if (!tmp_result) return NULL; result = tmp_result; } return result; } static expr_ty ast_for_trailer(struct compiling *c, const node *n, expr_ty left_expr) { /* trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME subscriptlist: subscript (',' subscript)* [','] subscript: '.' '.' '.' | test | [test] ':' [test] [sliceop] */ REQ(n, trailer); if (TYPE(CHILD(n, 0)) == LPAR) { if (NCH(n) == 2) return Call(left_expr, NULL, NULL, LINENO(n), n->n_col_offset, c->c_arena); else return ast_for_call(c, CHILD(n, 1), left_expr); } else if (TYPE(CHILD(n, 0)) == DOT) { PyObject *attr_id = NEW_IDENTIFIER(CHILD(n, 1)); if (!attr_id) return NULL; return Attribute(left_expr, attr_id, Load, LINENO(n), n->n_col_offset, c->c_arena); } else { REQ(CHILD(n, 0), LSQB); REQ(CHILD(n, 2), RSQB); n = CHILD(n, 1); if (NCH(n) == 1) { slice_ty slc = ast_for_slice(c, CHILD(n, 0)); if (!slc) return NULL; return Subscript(left_expr, slc, Load, LINENO(n), n->n_col_offset, c->c_arena); } else { /* The grammar is ambiguous here. The ambiguity is resolved by treating the sequence as a tuple literal if there are no slice features. */ int j; slice_ty slc; expr_ty e; int simple = 1; asdl_seq *slices, *elts; slices = _Ta3_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!slices) return NULL; for (j = 0; j < NCH(n); j += 2) { slc = ast_for_slice(c, CHILD(n, j)); if (!slc) return NULL; if (slc->kind != Index_kind) simple = 0; asdl_seq_SET(slices, j / 2, slc); } if (!simple) { return Subscript(left_expr, ExtSlice(slices, c->c_arena), Load, LINENO(n), n->n_col_offset, c->c_arena); } /* extract Index values and put them in a Tuple */ elts = _Ta3_asdl_seq_new(asdl_seq_LEN(slices), c->c_arena); if (!elts) return NULL; for (j = 0; j < asdl_seq_LEN(slices); ++j) { slc = (slice_ty)asdl_seq_GET(slices, j); assert(slc->kind == Index_kind && slc->v.Index.value); asdl_seq_SET(elts, j, slc->v.Index.value); } e = Tuple(elts, Load, LINENO(n), n->n_col_offset, c->c_arena); if (!e) return NULL; return Subscript(left_expr, Index(e, c->c_arena), Load, LINENO(n), n->n_col_offset, c->c_arena); } } } static expr_ty ast_for_factor(struct compiling *c, const node *n) { expr_ty expression; expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; switch (TYPE(CHILD(n, 0))) { case PLUS: return UnaryOp(UAdd, expression, LINENO(n), n->n_col_offset, c->c_arena); case MINUS: return UnaryOp(USub, expression, LINENO(n), n->n_col_offset, c->c_arena); case TILDE: return UnaryOp(Invert, expression, LINENO(n), n->n_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "unhandled factor: %d", TYPE(CHILD(n, 0))); return NULL; } static expr_ty ast_for_atom_expr(struct compiling *c, const node *n) { int i, nch, start = 0; expr_ty e, tmp; REQ(n, atom_expr); nch = NCH(n); if (TYPE(CHILD(n, 0)) == AWAIT) { if (c->c_feature_version < 5) { ast_error(c, n, "Await expressions are only supported in Python 3.5 and greater"); return NULL; } start = 1; assert(nch > 1); } e = ast_for_atom(c, CHILD(n, start)); if (!e) return NULL; if (nch == 1) return e; if (start && nch == 2) { return Await(e, LINENO(n), n->n_col_offset, c->c_arena); } for (i = start + 1; i < nch; i++) { node *ch = CHILD(n, i); if (TYPE(ch) != trailer) break; tmp = ast_for_trailer(c, ch, e); if (!tmp) return NULL; tmp->lineno = e->lineno; tmp->col_offset = e->col_offset; e = tmp; } if (start) { /* there was an AWAIT */ return Await(e, LINENO(n), n->n_col_offset, c->c_arena); } else { return e; } } static expr_ty ast_for_power(struct compiling *c, const node *n) { /* power: atom trailer* ('**' factor)* */ expr_ty e; REQ(n, power); e = ast_for_atom_expr(c, CHILD(n, 0)); if (!e) return NULL; if (NCH(n) == 1) return e; if (TYPE(CHILD(n, NCH(n) - 1)) == factor) { expr_ty f = ast_for_expr(c, CHILD(n, NCH(n) - 1)); if (!f) return NULL; e = BinOp(e, Pow, f, LINENO(n), n->n_col_offset, c->c_arena); } return e; } static expr_ty ast_for_starred(struct compiling *c, const node *n) { expr_ty tmp; REQ(n, star_expr); tmp = ast_for_expr(c, CHILD(n, 1)); if (!tmp) return NULL; /* The Load context is changed later. */ return Starred(tmp, Load, LINENO(n), n->n_col_offset, c->c_arena); } /* Do not name a variable 'expr'! Will cause a compile error. */ static expr_ty ast_for_expr(struct compiling *c, const node *n) { /* handle the full range of simple expressions test: or_test ['if' or_test 'else' test] | lambdef test_nocond: or_test | lambdef_nocond or_test: and_test ('or' and_test)* and_test: not_test ('and' not_test)* not_test: 'not' not_test | comparison comparison: expr (comp_op expr)* expr: xor_expr ('|' xor_expr)* xor_expr: and_expr ('^' and_expr)* and_expr: shift_expr ('&' shift_expr)* shift_expr: arith_expr (('<<'|'>>') arith_expr)* arith_expr: term (('+'|'-') term)* term: factor (('*'|'@'|'/'|'%'|'//') factor)* factor: ('+'|'-'|'~') factor | power power: atom_expr ['**' factor] atom_expr: [AWAIT] atom trailer* yield_expr: 'yield' [yield_arg] */ asdl_seq *seq; int i; loop: switch (TYPE(n)) { case test: case test_nocond: if (TYPE(CHILD(n, 0)) == lambdef || TYPE(CHILD(n, 0)) == lambdef_nocond) return ast_for_lambdef(c, CHILD(n, 0)); else if (NCH(n) > 1) return ast_for_ifexpr(c, n); /* Fallthrough */ case or_test: case and_test: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } seq = _Ta3_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!seq) return NULL; for (i = 0; i < NCH(n); i += 2) { expr_ty e = ast_for_expr(c, CHILD(n, i)); if (!e) return NULL; asdl_seq_SET(seq, i / 2, e); } if (!strcmp(STR(CHILD(n, 1)), "and")) return BoolOp(And, seq, LINENO(n), n->n_col_offset, c->c_arena); assert(!strcmp(STR(CHILD(n, 1)), "or")); return BoolOp(Or, seq, LINENO(n), n->n_col_offset, c->c_arena); case not_test: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } else { expr_ty expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; return UnaryOp(Not, expression, LINENO(n), n->n_col_offset, c->c_arena); } case comparison: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } else { expr_ty expression; asdl_int_seq *ops; asdl_seq *cmps; ops = _Ta3_asdl_int_seq_new(NCH(n) / 2, c->c_arena); if (!ops) return NULL; cmps = _Ta3_asdl_seq_new(NCH(n) / 2, c->c_arena); if (!cmps) { return NULL; } for (i = 1; i < NCH(n); i += 2) { cmpop_ty newoperator; newoperator = ast_for_comp_op(c, CHILD(n, i)); if (!newoperator) { return NULL; } expression = ast_for_expr(c, CHILD(n, i + 1)); if (!expression) { return NULL; } asdl_seq_SET(ops, i / 2, newoperator); asdl_seq_SET(cmps, i / 2, expression); } expression = ast_for_expr(c, CHILD(n, 0)); if (!expression) { return NULL; } return Compare(expression, ops, cmps, LINENO(n), n->n_col_offset, c->c_arena); } break; case star_expr: return ast_for_starred(c, n); /* The next five cases all handle BinOps. The main body of code is the same in each case, but the switch turned inside out to reuse the code for each type of operator. */ case expr: case xor_expr: case and_expr: case shift_expr: case arith_expr: case term: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } return ast_for_binop(c, n); case yield_expr: { node *an = NULL; node *en = NULL; int is_from = 0; expr_ty exp = NULL; if (NCH(n) > 1) an = CHILD(n, 1); /* yield_arg */ if (an) { en = CHILD(an, NCH(an) - 1); if (NCH(an) == 2) { is_from = 1; exp = ast_for_expr(c, en); } else exp = ast_for_testlist(c, en); if (!exp) return NULL; } if (is_from) return YieldFrom(exp, LINENO(n), n->n_col_offset, c->c_arena); return Yield(exp, LINENO(n), n->n_col_offset, c->c_arena); } case factor: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } return ast_for_factor(c, n); case power: return ast_for_power(c, n); default: PyErr_Format(PyExc_SystemError, "unhandled expr: %d", TYPE(n)); return NULL; } /* should never get here unless if error is set */ return NULL; } static expr_ty ast_for_call(struct compiling *c, const node *n, expr_ty func) { /* arglist: argument (',' argument)* [','] argument: ( test [comp_for] | '*' test | test '=' test | '**' test ) */ int i, nargs, nkeywords, ngens; int ndoublestars; asdl_seq *args; asdl_seq *keywords; REQ(n, arglist); nargs = 0; nkeywords = 0; ngens = 0; for (i = 0; i < NCH(n); i++) { node *ch = CHILD(n, i); if (TYPE(ch) == argument) { if (NCH(ch) == 1) nargs++; else if (TYPE(CHILD(ch, 1)) == comp_for) ngens++; else if (TYPE(CHILD(ch, 0)) == STAR) nargs++; else /* TYPE(CHILD(ch, 0)) == DOUBLESTAR or keyword argument */ nkeywords++; } } if (ngens > 1 || (ngens && (nargs || nkeywords))) { ast_error(c, n, "Generator expression must be parenthesized " "if not sole argument"); return NULL; } if (nargs + nkeywords + ngens > 255) { ast_error(c, n, "more than 255 arguments"); return NULL; } args = _Ta3_asdl_seq_new(nargs + ngens, c->c_arena); if (!args) return NULL; keywords = _Ta3_asdl_seq_new(nkeywords, c->c_arena); if (!keywords) return NULL; nargs = 0; /* positional arguments + iterable argument unpackings */ nkeywords = 0; /* keyword arguments + keyword argument unpackings */ ndoublestars = 0; /* just keyword argument unpackings */ for (i = 0; i < NCH(n); i++) { node *ch = CHILD(n, i); if (TYPE(ch) == argument) { expr_ty e; node *chch = CHILD(ch, 0); if (NCH(ch) == 1) { /* a positional argument */ if (nkeywords) { if (ndoublestars) { ast_error(c, chch, "positional argument follows " "keyword argument unpacking"); } else { ast_error(c, chch, "positional argument follows " "keyword argument"); } return NULL; } e = ast_for_expr(c, chch); if (!e) return NULL; asdl_seq_SET(args, nargs++, e); } else if (TYPE(chch) == STAR) { /* an iterable argument unpacking */ expr_ty starred; if (ndoublestars) { ast_error(c, chch, "iterable argument unpacking follows " "keyword argument unpacking"); return NULL; } e = ast_for_expr(c, CHILD(ch, 1)); if (!e) return NULL; starred = Starred(e, Load, LINENO(chch), chch->n_col_offset, c->c_arena); if (!starred) return NULL; asdl_seq_SET(args, nargs++, starred); } else if (TYPE(chch) == DOUBLESTAR) { /* a keyword argument unpacking */ keyword_ty kw; i++; e = ast_for_expr(c, CHILD(ch, 1)); if (!e) return NULL; kw = keyword(NULL, e, c->c_arena); asdl_seq_SET(keywords, nkeywords++, kw); ndoublestars++; } else if (TYPE(CHILD(ch, 1)) == comp_for) { /* the lone generator expression */ e = ast_for_genexp(c, ch); if (!e) return NULL; asdl_seq_SET(args, nargs++, e); } else { /* a keyword argument */ keyword_ty kw; identifier key, tmp; int k; /* chch is test, but must be an identifier? */ e = ast_for_expr(c, chch); if (!e) return NULL; /* f(lambda x: x[0] = 3) ends up getting parsed with * LHS test = lambda x: x[0], and RHS test = 3. * SF bug 132313 points out that complaining about a keyword * then is very confusing. */ if (e->kind == Lambda_kind) { ast_error(c, chch, "lambda cannot contain assignment"); return NULL; } else if (e->kind != Name_kind) { ast_error(c, chch, "keyword can't be an expression"); return NULL; } else if (forbidden_name(c, e->v.Name.id, ch, 1)) { return NULL; } key = e->v.Name.id; for (k = 0; k < nkeywords; k++) { tmp = ((keyword_ty)asdl_seq_GET(keywords, k))->arg; if (tmp && !PyUnicode_Compare(tmp, key)) { ast_error(c, chch, "keyword argument repeated"); return NULL; } } e = ast_for_expr(c, CHILD(ch, 2)); if (!e) return NULL; kw = keyword(key, e, c->c_arena); if (!kw) return NULL; asdl_seq_SET(keywords, nkeywords++, kw); } } } return Call(func, args, keywords, func->lineno, func->col_offset, c->c_arena); } static expr_ty ast_for_testlist(struct compiling *c, const node* n) { /* testlist_comp: test (comp_for | (',' test)* [',']) */ /* testlist: test (',' test)* [','] */ assert(NCH(n) > 0); if (TYPE(n) == testlist_comp) { if (NCH(n) > 1) assert(TYPE(CHILD(n, 1)) != comp_for); } else { assert(TYPE(n) == testlist || TYPE(n) == testlist_star_expr); } if (NCH(n) == 1) return ast_for_expr(c, CHILD(n, 0)); else { asdl_seq *tmp = seq_for_testlist(c, n); if (!tmp) return NULL; return Tuple(tmp, Load, LINENO(n), n->n_col_offset, c->c_arena); } } static stmt_ty ast_for_expr_stmt(struct compiling *c, const node *n) { int num; REQ(n, expr_stmt); /* expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | ('=' (yield_expr|testlist_star_expr))* [TYPE_COMMENT]) annassign: ':' test ['=' test] testlist_star_expr: (test|star_expr) (',' test|star_expr)* [','] augassign: '+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | '<<=' | '>>=' | '**=' | '//=' test: ... here starts the operator precedence dance */ num = NCH(n); if (num == 1 || (num == 2 && TYPE(CHILD(n, 1)) == TYPE_COMMENT)) { expr_ty e = ast_for_testlist(c, CHILD(n, 0)); if (!e) return NULL; return Expr(e, LINENO(n), n->n_col_offset, c->c_arena); } else if (TYPE(CHILD(n, 1)) == augassign) { expr_ty expr1, expr2; operator_ty newoperator; node *ch = CHILD(n, 0); expr1 = ast_for_testlist(c, ch); if (!expr1) return NULL; if(!set_context(c, expr1, Store, ch)) return NULL; /* set_context checks that most expressions are not the left side. Augmented assignments can only have a name, a subscript, or an attribute on the left, though, so we have to explicitly check for those. */ switch (expr1->kind) { case Name_kind: case Attribute_kind: case Subscript_kind: break; default: ast_error(c, ch, "illegal expression for augmented assignment"); return NULL; } ch = CHILD(n, 2); if (TYPE(ch) == testlist) expr2 = ast_for_testlist(c, ch); else expr2 = ast_for_expr(c, ch); if (!expr2) return NULL; newoperator = ast_for_augassign(c, CHILD(n, 1)); if (!newoperator) return NULL; return AugAssign(expr1, newoperator, expr2, LINENO(n), n->n_col_offset, c->c_arena); } else if (TYPE(CHILD(n, 1)) == annassign) { expr_ty expr1, expr2, expr3; node *ch = CHILD(n, 0); node *deep, *ann = CHILD(n, 1); int simple = 1; /* AnnAssigns are only allowed in Python 3.6 or greater */ if (c->c_feature_version < 6) { ast_error(c, ch, "Variable annotation syntax is only supported in Python 3.6 and greater"); return NULL; } /* we keep track of parens to qualify (x) as expression not name */ deep = ch; while (NCH(deep) == 1) { deep = CHILD(deep, 0); } if (NCH(deep) > 0 && TYPE(CHILD(deep, 0)) == LPAR) { simple = 0; } expr1 = ast_for_testlist(c, ch); if (!expr1) { return NULL; } switch (expr1->kind) { case Name_kind: if (forbidden_name(c, expr1->v.Name.id, n, 0)) { return NULL; } expr1->v.Name.ctx = Store; break; case Attribute_kind: if (forbidden_name(c, expr1->v.Attribute.attr, n, 1)) { return NULL; } expr1->v.Attribute.ctx = Store; break; case Subscript_kind: expr1->v.Subscript.ctx = Store; break; case List_kind: ast_error(c, ch, "only single target (not list) can be annotated"); return NULL; case Tuple_kind: ast_error(c, ch, "only single target (not tuple) can be annotated"); return NULL; default: ast_error(c, ch, "illegal target for annotation"); return NULL; } if (expr1->kind != Name_kind) { simple = 0; } ch = CHILD(ann, 1); expr2 = ast_for_expr(c, ch); if (!expr2) { return NULL; } if (NCH(ann) == 2) { return AnnAssign(expr1, expr2, NULL, simple, LINENO(n), n->n_col_offset, c->c_arena); } else { ch = CHILD(ann, 3); expr3 = ast_for_expr(c, ch); if (!expr3) { return NULL; } return AnnAssign(expr1, expr2, expr3, simple, LINENO(n), n->n_col_offset, c->c_arena); } } else { int i, nch_minus_type, has_type_comment; asdl_seq *targets; node *value; expr_ty expression; string type_comment; /* a normal assignment */ REQ(CHILD(n, 1), EQUAL); has_type_comment = TYPE(CHILD(n, num - 1)) == TYPE_COMMENT; nch_minus_type = num - has_type_comment; targets = _Ta3_asdl_seq_new(nch_minus_type / 2, c->c_arena); if (!targets) return NULL; for (i = 0; i < nch_minus_type - 2; i += 2) { expr_ty e; node *ch = CHILD(n, i); if (TYPE(ch) == yield_expr) { ast_error(c, ch, "assignment to yield expression not possible"); return NULL; } e = ast_for_testlist(c, ch); if (!e) return NULL; /* set context to assign */ if (!set_context(c, e, Store, CHILD(n, i))) return NULL; asdl_seq_SET(targets, i / 2, e); } value = CHILD(n, nch_minus_type - 1); if (TYPE(value) == testlist_star_expr) expression = ast_for_testlist(c, value); else expression = ast_for_expr(c, value); if (!expression) return NULL; if (has_type_comment) type_comment = NEW_TYPE_COMMENT(CHILD(n, nch_minus_type)); else type_comment = NULL; return Assign(targets, expression, type_comment, LINENO(n), n->n_col_offset, c->c_arena); } } static asdl_seq * ast_for_exprlist(struct compiling *c, const node *n, expr_context_ty context) { asdl_seq *seq; int i; expr_ty e; REQ(n, exprlist); seq = _Ta3_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!seq) return NULL; for (i = 0; i < NCH(n); i += 2) { e = ast_for_expr(c, CHILD(n, i)); if (!e) return NULL; asdl_seq_SET(seq, i / 2, e); if (context && !set_context(c, e, context, CHILD(n, i))) return NULL; } return seq; } static stmt_ty ast_for_del_stmt(struct compiling *c, const node *n) { asdl_seq *expr_list; /* del_stmt: 'del' exprlist */ REQ(n, del_stmt); expr_list = ast_for_exprlist(c, CHILD(n, 1), Del); if (!expr_list) return NULL; return Delete(expr_list, LINENO(n), n->n_col_offset, c->c_arena); } static stmt_ty ast_for_flow_stmt(struct compiling *c, const node *n) { /* flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt break_stmt: 'break' continue_stmt: 'continue' return_stmt: 'return' [testlist] yield_stmt: yield_expr yield_expr: 'yield' testlist | 'yield' 'from' test raise_stmt: 'raise' [test [',' test [',' test]]] */ node *ch; REQ(n, flow_stmt); ch = CHILD(n, 0); switch (TYPE(ch)) { case break_stmt: return Break(LINENO(n), n->n_col_offset, c->c_arena); case continue_stmt: return Continue(LINENO(n), n->n_col_offset, c->c_arena); case yield_stmt: { /* will reduce to yield_expr */ expr_ty exp = ast_for_expr(c, CHILD(ch, 0)); if (!exp) return NULL; return Expr(exp, LINENO(n), n->n_col_offset, c->c_arena); } case return_stmt: if (NCH(ch) == 1) return Return(NULL, LINENO(n), n->n_col_offset, c->c_arena); else { expr_ty expression = ast_for_testlist(c, CHILD(ch, 1)); if (!expression) return NULL; return Return(expression, LINENO(n), n->n_col_offset, c->c_arena); } case raise_stmt: if (NCH(ch) == 1) return Raise(NULL, NULL, LINENO(n), n->n_col_offset, c->c_arena); else if (NCH(ch) >= 2) { expr_ty cause = NULL; expr_ty expression = ast_for_expr(c, CHILD(ch, 1)); if (!expression) return NULL; if (NCH(ch) == 4) { cause = ast_for_expr(c, CHILD(ch, 3)); if (!cause) return NULL; } return Raise(expression, cause, LINENO(n), n->n_col_offset, c->c_arena); } default: PyErr_Format(PyExc_SystemError, "unexpected flow_stmt: %d", TYPE(ch)); return NULL; } } static alias_ty alias_for_import_name(struct compiling *c, const node *n, int store) { /* import_as_name: NAME ['as' NAME] dotted_as_name: dotted_name ['as' NAME] dotted_name: NAME ('.' NAME)* */ identifier str, name; loop: switch (TYPE(n)) { case import_as_name: { node *name_node = CHILD(n, 0); str = NULL; name = NEW_IDENTIFIER(name_node); if (!name) return NULL; if (NCH(n) == 3) { node *str_node = CHILD(n, 2); str = NEW_IDENTIFIER(str_node); if (!str) return NULL; if (store && forbidden_name(c, str, str_node, 0)) return NULL; } else { if (forbidden_name(c, name, name_node, 0)) return NULL; } return alias(name, str, c->c_arena); } case dotted_as_name: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } else { node *asname_node = CHILD(n, 2); alias_ty a = alias_for_import_name(c, CHILD(n, 0), 0); if (!a) return NULL; assert(!a->asname); a->asname = NEW_IDENTIFIER(asname_node); if (!a->asname) return NULL; if (forbidden_name(c, a->asname, asname_node, 0)) return NULL; return a; } break; case dotted_name: if (NCH(n) == 1) { node *name_node = CHILD(n, 0); name = NEW_IDENTIFIER(name_node); if (!name) return NULL; if (store && forbidden_name(c, name, name_node, 0)) return NULL; return alias(name, NULL, c->c_arena); } else { /* Create a string of the form "a.b.c" */ int i; size_t len; char *s; PyObject *uni; len = 0; for (i = 0; i < NCH(n); i += 2) /* length of string plus one for the dot */ len += strlen(STR(CHILD(n, i))) + 1; len--; /* the last name doesn't have a dot */ str = PyBytes_FromStringAndSize(NULL, len); if (!str) return NULL; s = PyBytes_AS_STRING(str); if (!s) return NULL; for (i = 0; i < NCH(n); i += 2) { char *sch = STR(CHILD(n, i)); strcpy(s, STR(CHILD(n, i))); s += strlen(sch); *s++ = '.'; } --s; *s = '\0'; uni = PyUnicode_DecodeUTF8(PyBytes_AS_STRING(str), PyBytes_GET_SIZE(str), NULL); Py_DECREF(str); if (!uni) return NULL; str = uni; PyUnicode_InternInPlace(&str); if (PyArena_AddPyObject(c->c_arena, str) < 0) { Py_DECREF(str); return NULL; } return alias(str, NULL, c->c_arena); } break; case STAR: str = PyUnicode_InternFromString("*"); if (PyArena_AddPyObject(c->c_arena, str) < 0) { Py_DECREF(str); return NULL; } return alias(str, NULL, c->c_arena); default: PyErr_Format(PyExc_SystemError, "unexpected import name: %d", TYPE(n)); return NULL; } PyErr_SetString(PyExc_SystemError, "unhandled import name condition"); return NULL; } static stmt_ty ast_for_import_stmt(struct compiling *c, const node *n) { /* import_stmt: import_name | import_from import_name: 'import' dotted_as_names import_from: 'from' (('.' | '...')* dotted_name | ('.' | '...')+) 'import' ('*' | '(' import_as_names ')' | import_as_names) */ int lineno; int col_offset; int i; asdl_seq *aliases; REQ(n, import_stmt); lineno = LINENO(n); col_offset = n->n_col_offset; n = CHILD(n, 0); if (TYPE(n) == import_name) { n = CHILD(n, 1); REQ(n, dotted_as_names); aliases = _Ta3_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!aliases) return NULL; for (i = 0; i < NCH(n); i += 2) { alias_ty import_alias = alias_for_import_name(c, CHILD(n, i), 1); if (!import_alias) return NULL; asdl_seq_SET(aliases, i / 2, import_alias); } return Import(aliases, lineno, col_offset, c->c_arena); } else if (TYPE(n) == import_from) { int n_children; int idx, ndots = 0; alias_ty mod = NULL; identifier modname = NULL; /* Count the number of dots (for relative imports) and check for the optional module name */ for (idx = 1; idx < NCH(n); idx++) { if (TYPE(CHILD(n, idx)) == dotted_name) { mod = alias_for_import_name(c, CHILD(n, idx), 0); if (!mod) return NULL; idx++; break; } else if (TYPE(CHILD(n, idx)) == ELLIPSIS) { /* three consecutive dots are tokenized as one ELLIPSIS */ ndots += 3; continue; } else if (TYPE(CHILD(n, idx)) != DOT) { break; } ndots++; } idx++; /* skip over the 'import' keyword */ switch (TYPE(CHILD(n, idx))) { case STAR: /* from ... import * */ n = CHILD(n, idx); n_children = 1; break; case LPAR: /* from ... import (x, y, z) */ n = CHILD(n, idx + 1); n_children = NCH(n); break; case import_as_names: /* from ... import x, y, z */ n = CHILD(n, idx); n_children = NCH(n); if (n_children % 2 == 0) { ast_error(c, n, "trailing comma not allowed without" " surrounding parentheses"); return NULL; } break; default: ast_error(c, n, "Unexpected node-type in from-import"); return NULL; } aliases = _Ta3_asdl_seq_new((n_children + 1) / 2, c->c_arena); if (!aliases) return NULL; /* handle "from ... import *" special b/c there's no children */ if (TYPE(n) == STAR) { alias_ty import_alias = alias_for_import_name(c, n, 1); if (!import_alias) return NULL; asdl_seq_SET(aliases, 0, import_alias); } else { for (i = 0; i < NCH(n); i += 2) { alias_ty import_alias = alias_for_import_name(c, CHILD(n, i), 1); if (!import_alias) return NULL; asdl_seq_SET(aliases, i / 2, import_alias); } } if (mod != NULL) modname = mod->name; return ImportFrom(modname, aliases, ndots, lineno, col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "unknown import statement: starts with command '%s'", STR(CHILD(n, 0))); return NULL; } static stmt_ty ast_for_global_stmt(struct compiling *c, const node *n) { /* global_stmt: 'global' NAME (',' NAME)* */ identifier name; asdl_seq *s; int i; REQ(n, global_stmt); s = _Ta3_asdl_seq_new(NCH(n) / 2, c->c_arena); if (!s) return NULL; for (i = 1; i < NCH(n); i += 2) { name = NEW_IDENTIFIER(CHILD(n, i)); if (!name) return NULL; asdl_seq_SET(s, i / 2, name); } return Global(s, LINENO(n), n->n_col_offset, c->c_arena); } static stmt_ty ast_for_nonlocal_stmt(struct compiling *c, const node *n) { /* nonlocal_stmt: 'nonlocal' NAME (',' NAME)* */ identifier name; asdl_seq *s; int i; REQ(n, nonlocal_stmt); s = _Ta3_asdl_seq_new(NCH(n) / 2, c->c_arena); if (!s) return NULL; for (i = 1; i < NCH(n); i += 2) { name = NEW_IDENTIFIER(CHILD(n, i)); if (!name) return NULL; asdl_seq_SET(s, i / 2, name); } return Nonlocal(s, LINENO(n), n->n_col_offset, c->c_arena); } static stmt_ty ast_for_assert_stmt(struct compiling *c, const node *n) { /* assert_stmt: 'assert' test [',' test] */ REQ(n, assert_stmt); if (NCH(n) == 2) { expr_ty expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; return Assert(expression, NULL, LINENO(n), n->n_col_offset, c->c_arena); } else if (NCH(n) == 4) { expr_ty expr1, expr2; expr1 = ast_for_expr(c, CHILD(n, 1)); if (!expr1) return NULL; expr2 = ast_for_expr(c, CHILD(n, 3)); if (!expr2) return NULL; return Assert(expr1, expr2, LINENO(n), n->n_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "improper number of parts to 'assert' statement: %d", NCH(n)); return NULL; } static asdl_seq * ast_for_suite(struct compiling *c, const node *n) { /* suite: simple_stmt | NEWLINE [TYPE_COMMENT NEWLINE] INDENT stmt+ DEDENT */ asdl_seq *seq; stmt_ty s; int i, total, num, end, pos = 0; node *ch; REQ(n, suite); total = num_stmts(n); seq = _Ta3_asdl_seq_new(total, c->c_arena); if (!seq) return NULL; if (TYPE(CHILD(n, 0)) == simple_stmt) { n = CHILD(n, 0); /* simple_stmt always ends with a NEWLINE, and may have a trailing SEMI */ end = NCH(n) - 1; if (TYPE(CHILD(n, end - 1)) == SEMI) end--; /* loop by 2 to skip semi-colons */ for (i = 0; i < end; i += 2) { ch = CHILD(n, i); s = ast_for_stmt(c, ch); if (!s) return NULL; asdl_seq_SET(seq, pos++, s); } } else { i = 2; if (TYPE(CHILD(n, 1)) == TYPE_COMMENT) i += 2; for (; i < (NCH(n) - 1); i++) { ch = CHILD(n, i); REQ(ch, stmt); num = num_stmts(ch); if (num == 1) { /* small_stmt or compound_stmt with only one child */ s = ast_for_stmt(c, ch); if (!s) return NULL; asdl_seq_SET(seq, pos++, s); } else { int j; ch = CHILD(ch, 0); REQ(ch, simple_stmt); for (j = 0; j < NCH(ch); j += 2) { /* statement terminates with a semi-colon ';' */ if (NCH(CHILD(ch, j)) == 0) { assert((j + 1) == NCH(ch)); break; } s = ast_for_stmt(c, CHILD(ch, j)); if (!s) return NULL; asdl_seq_SET(seq, pos++, s); } } } } assert(pos == seq->size); return seq; } static stmt_ty ast_for_if_stmt(struct compiling *c, const node *n) { /* if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite] */ char *s; REQ(n, if_stmt); if (NCH(n) == 4) { expr_ty expression; asdl_seq *suite_seq; expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, 3)); if (!suite_seq) return NULL; return If(expression, suite_seq, NULL, LINENO(n), n->n_col_offset, c->c_arena); } s = STR(CHILD(n, 4)); /* s[2], the third character in the string, will be 's' for el_s_e, or 'i' for el_i_f */ if (s[2] == 's') { expr_ty expression; asdl_seq *seq1, *seq2; expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; seq1 = ast_for_suite(c, CHILD(n, 3)); if (!seq1) return NULL; seq2 = ast_for_suite(c, CHILD(n, 6)); if (!seq2) return NULL; return If(expression, seq1, seq2, LINENO(n), n->n_col_offset, c->c_arena); } else if (s[2] == 'i') { int i, n_elif, has_else = 0; expr_ty expression; asdl_seq *suite_seq; asdl_seq *orelse = NULL; n_elif = NCH(n) - 4; /* must reference the child n_elif+1 since 'else' token is third, not fourth, child from the end. */ if (TYPE(CHILD(n, (n_elif + 1))) == NAME && STR(CHILD(n, (n_elif + 1)))[2] == 's') { has_else = 1; n_elif -= 3; } n_elif /= 4; if (has_else) { asdl_seq *suite_seq2; orelse = _Ta3_asdl_seq_new(1, c->c_arena); if (!orelse) return NULL; expression = ast_for_expr(c, CHILD(n, NCH(n) - 6)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, NCH(n) - 4)); if (!suite_seq) return NULL; suite_seq2 = ast_for_suite(c, CHILD(n, NCH(n) - 1)); if (!suite_seq2) return NULL; asdl_seq_SET(orelse, 0, If(expression, suite_seq, suite_seq2, LINENO(CHILD(n, NCH(n) - 6)), CHILD(n, NCH(n) - 6)->n_col_offset, c->c_arena)); /* the just-created orelse handled the last elif */ n_elif--; } for (i = 0; i < n_elif; i++) { int off = 5 + (n_elif - i - 1) * 4; asdl_seq *newobj = _Ta3_asdl_seq_new(1, c->c_arena); if (!newobj) return NULL; expression = ast_for_expr(c, CHILD(n, off)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, off + 2)); if (!suite_seq) return NULL; asdl_seq_SET(newobj, 0, If(expression, suite_seq, orelse, LINENO(CHILD(n, off)), CHILD(n, off)->n_col_offset, c->c_arena)); orelse = newobj; } expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, 3)); if (!suite_seq) return NULL; return If(expression, suite_seq, orelse, LINENO(n), n->n_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "unexpected token in 'if' statement: %s", s); return NULL; } static stmt_ty ast_for_while_stmt(struct compiling *c, const node *n) { /* while_stmt: 'while' test ':' suite ['else' ':' suite] */ REQ(n, while_stmt); if (NCH(n) == 4) { expr_ty expression; asdl_seq *suite_seq; expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, 3)); if (!suite_seq) return NULL; return While(expression, suite_seq, NULL, LINENO(n), n->n_col_offset, c->c_arena); } else if (NCH(n) == 7) { expr_ty expression; asdl_seq *seq1, *seq2; expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; seq1 = ast_for_suite(c, CHILD(n, 3)); if (!seq1) return NULL; seq2 = ast_for_suite(c, CHILD(n, 6)); if (!seq2) return NULL; return While(expression, seq1, seq2, LINENO(n), n->n_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "wrong number of tokens for 'while' statement: %d", NCH(n)); return NULL; } static stmt_ty ast_for_for_stmt(struct compiling *c, const node *n, int is_async) { asdl_seq *_target, *seq = NULL, *suite_seq; expr_ty expression; expr_ty target, first; const node *node_target; int has_type_comment; string type_comment; if (is_async && c->c_feature_version < 5) { ast_error(c, n, "Async for loops are only supported in Python 3.5 and greater"); return NULL; } /* for_stmt: 'for' exprlist 'in' testlist ':' [TYPE_COMMENT] suite ['else' ':' suite] */ REQ(n, for_stmt); has_type_comment = TYPE(CHILD(n, 5)) == TYPE_COMMENT; if (NCH(n) == 9 + has_type_comment) { seq = ast_for_suite(c, CHILD(n, 8 + has_type_comment)); if (!seq) return NULL; } node_target = CHILD(n, 1); _target = ast_for_exprlist(c, node_target, Store); if (!_target) return NULL; /* Check the # of children rather than the length of _target, since for x, in ... has 1 element in _target, but still requires a Tuple. */ first = (expr_ty)asdl_seq_GET(_target, 0); if (NCH(node_target) == 1) target = first; else target = Tuple(_target, Store, first->lineno, first->col_offset, c->c_arena); expression = ast_for_testlist(c, CHILD(n, 3)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, 5 + has_type_comment)); if (!suite_seq) return NULL; if (has_type_comment) type_comment = NEW_TYPE_COMMENT(CHILD(n, 5)); else type_comment = NULL; if (is_async) return AsyncFor(target, expression, suite_seq, seq, type_comment, LINENO(n), n->n_col_offset, c->c_arena); else return For(target, expression, suite_seq, seq, type_comment, LINENO(n), n->n_col_offset, c->c_arena); } static excepthandler_ty ast_for_except_clause(struct compiling *c, const node *exc, node *body) { /* except_clause: 'except' [test ['as' test]] */ REQ(exc, except_clause); REQ(body, suite); if (NCH(exc) == 1) { asdl_seq *suite_seq = ast_for_suite(c, body); if (!suite_seq) return NULL; return ExceptHandler(NULL, NULL, suite_seq, LINENO(exc), exc->n_col_offset, c->c_arena); } else if (NCH(exc) == 2) { expr_ty expression; asdl_seq *suite_seq; expression = ast_for_expr(c, CHILD(exc, 1)); if (!expression) return NULL; suite_seq = ast_for_suite(c, body); if (!suite_seq) return NULL; return ExceptHandler(expression, NULL, suite_seq, LINENO(exc), exc->n_col_offset, c->c_arena); } else if (NCH(exc) == 4) { asdl_seq *suite_seq; expr_ty expression; identifier e = NEW_IDENTIFIER(CHILD(exc, 3)); if (!e) return NULL; if (forbidden_name(c, e, CHILD(exc, 3), 0)) return NULL; expression = ast_for_expr(c, CHILD(exc, 1)); if (!expression) return NULL; suite_seq = ast_for_suite(c, body); if (!suite_seq) return NULL; return ExceptHandler(expression, e, suite_seq, LINENO(exc), exc->n_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "wrong number of children for 'except' clause: %d", NCH(exc)); return NULL; } static stmt_ty ast_for_try_stmt(struct compiling *c, const node *n) { const int nch = NCH(n); int n_except = (nch - 3)/3; asdl_seq *body, *handlers = NULL, *orelse = NULL, *finally = NULL; REQ(n, try_stmt); body = ast_for_suite(c, CHILD(n, 2)); if (body == NULL) return NULL; if (TYPE(CHILD(n, nch - 3)) == NAME) { if (strcmp(STR(CHILD(n, nch - 3)), "finally") == 0) { if (nch >= 9 && TYPE(CHILD(n, nch - 6)) == NAME) { /* we can assume it's an "else", because nch >= 9 for try-else-finally and it would otherwise have a type of except_clause */ orelse = ast_for_suite(c, CHILD(n, nch - 4)); if (orelse == NULL) return NULL; n_except--; } finally = ast_for_suite(c, CHILD(n, nch - 1)); if (finally == NULL) return NULL; n_except--; } else { /* we can assume it's an "else", otherwise it would have a type of except_clause */ orelse = ast_for_suite(c, CHILD(n, nch - 1)); if (orelse == NULL) return NULL; n_except--; } } else if (TYPE(CHILD(n, nch - 3)) != except_clause) { ast_error(c, n, "malformed 'try' statement"); return NULL; } if (n_except > 0) { int i; /* process except statements to create a try ... except */ handlers = _Ta3_asdl_seq_new(n_except, c->c_arena); if (handlers == NULL) return NULL; for (i = 0; i < n_except; i++) { excepthandler_ty e = ast_for_except_clause(c, CHILD(n, 3 + i * 3), CHILD(n, 5 + i * 3)); if (!e) return NULL; asdl_seq_SET(handlers, i, e); } } assert(finally != NULL || asdl_seq_LEN(handlers)); return Try(body, handlers, orelse, finally, LINENO(n), n->n_col_offset, c->c_arena); } /* with_item: test ['as' expr] */ static withitem_ty ast_for_with_item(struct compiling *c, const node *n) { expr_ty context_expr, optional_vars = NULL; REQ(n, with_item); context_expr = ast_for_expr(c, CHILD(n, 0)); if (!context_expr) return NULL; if (NCH(n) == 3) { optional_vars = ast_for_expr(c, CHILD(n, 2)); if (!optional_vars) { return NULL; } if (!set_context(c, optional_vars, Store, n)) { return NULL; } } return withitem(context_expr, optional_vars, c->c_arena); } /* with_stmt: 'with' with_item (',' with_item)* ':' [TYPE_COMMENT] suite */ static stmt_ty ast_for_with_stmt(struct compiling *c, const node *n, int is_async) { int i, n_items, nch_minus_type, has_type_comment; asdl_seq *items, *body; string type_comment; if (is_async && c->c_feature_version < 5) { ast_error(c, n, "Async with statements are only supported in Python 3.5 and greater"); return NULL; } REQ(n, with_stmt); has_type_comment = TYPE(CHILD(n, NCH(n) - 2)) == TYPE_COMMENT; nch_minus_type = NCH(n) - has_type_comment; n_items = (nch_minus_type - 2) / 2; items = _Ta3_asdl_seq_new(n_items, c->c_arena); if (!items) return NULL; for (i = 1; i < nch_minus_type - 2; i += 2) { withitem_ty item = ast_for_with_item(c, CHILD(n, i)); if (!item) return NULL; asdl_seq_SET(items, (i - 1) / 2, item); } body = ast_for_suite(c, CHILD(n, NCH(n) - 1)); if (!body) return NULL; if (has_type_comment) type_comment = NEW_TYPE_COMMENT(CHILD(n, NCH(n) - 2)); else type_comment = NULL; if (is_async) return AsyncWith(items, body, type_comment, LINENO(n), n->n_col_offset, c->c_arena); else return With(items, body, type_comment, LINENO(n), n->n_col_offset, c->c_arena); } static stmt_ty ast_for_classdef(struct compiling *c, const node *n, asdl_seq *decorator_seq) { /* classdef: 'class' NAME ['(' arglist ')'] ':' suite */ PyObject *classname; asdl_seq *s; expr_ty call; REQ(n, classdef); if (NCH(n) == 4) { /* class NAME ':' suite */ s = ast_for_suite(c, CHILD(n, 3)); if (!s) return NULL; classname = NEW_IDENTIFIER(CHILD(n, 1)); if (!classname) return NULL; if (forbidden_name(c, classname, CHILD(n, 3), 0)) return NULL; return ClassDef(classname, NULL, NULL, s, decorator_seq, LINENO(n), n->n_col_offset, c->c_arena); } if (TYPE(CHILD(n, 3)) == RPAR) { /* class NAME '(' ')' ':' suite */ s = ast_for_suite(c, CHILD(n,5)); if (!s) return NULL; classname = NEW_IDENTIFIER(CHILD(n, 1)); if (!classname) return NULL; if (forbidden_name(c, classname, CHILD(n, 3), 0)) return NULL; return ClassDef(classname, NULL, NULL, s, decorator_seq, LINENO(n), n->n_col_offset, c->c_arena); } /* class NAME '(' arglist ')' ':' suite */ /* build up a fake Call node so we can extract its pieces */ { PyObject *dummy_name; expr_ty dummy; dummy_name = NEW_IDENTIFIER(CHILD(n, 1)); if (!dummy_name) return NULL; dummy = Name(dummy_name, Load, LINENO(n), n->n_col_offset, c->c_arena); call = ast_for_call(c, CHILD(n, 3), dummy); if (!call) return NULL; } s = ast_for_suite(c, CHILD(n, 6)); if (!s) return NULL; classname = NEW_IDENTIFIER(CHILD(n, 1)); if (!classname) return NULL; if (forbidden_name(c, classname, CHILD(n, 1), 0)) return NULL; return ClassDef(classname, call->v.Call.args, call->v.Call.keywords, s, decorator_seq, LINENO(n), n->n_col_offset, c->c_arena); } static stmt_ty ast_for_stmt(struct compiling *c, const node *n) { if (TYPE(n) == stmt) { assert(NCH(n) == 1); n = CHILD(n, 0); } if (TYPE(n) == simple_stmt) { assert(num_stmts(n) == 1); n = CHILD(n, 0); } if (TYPE(n) == small_stmt) { n = CHILD(n, 0); /* small_stmt: expr_stmt | del_stmt | pass_stmt | flow_stmt | import_stmt | global_stmt | nonlocal_stmt | assert_stmt */ switch (TYPE(n)) { case expr_stmt: return ast_for_expr_stmt(c, n); case del_stmt: return ast_for_del_stmt(c, n); case pass_stmt: return Pass(LINENO(n), n->n_col_offset, c->c_arena); case flow_stmt: return ast_for_flow_stmt(c, n); case import_stmt: return ast_for_import_stmt(c, n); case global_stmt: return ast_for_global_stmt(c, n); case nonlocal_stmt: return ast_for_nonlocal_stmt(c, n); case assert_stmt: return ast_for_assert_stmt(c, n); default: PyErr_Format(PyExc_SystemError, "unhandled small_stmt: TYPE=%d NCH=%d\n", TYPE(n), NCH(n)); return NULL; } } else { /* compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef | decorated | async_stmt */ node *ch = CHILD(n, 0); REQ(n, compound_stmt); switch (TYPE(ch)) { case if_stmt: return ast_for_if_stmt(c, ch); case while_stmt: return ast_for_while_stmt(c, ch); case for_stmt: return ast_for_for_stmt(c, ch, 0); case try_stmt: return ast_for_try_stmt(c, ch); case with_stmt: return ast_for_with_stmt(c, ch, 0); case funcdef: return ast_for_funcdef(c, ch, NULL); case classdef: return ast_for_classdef(c, ch, NULL); case decorated: return ast_for_decorated(c, ch); case async_stmt: return ast_for_async_stmt(c, ch); default: PyErr_Format(PyExc_SystemError, "unhandled small_stmt: TYPE=%d NCH=%d\n", TYPE(n), NCH(n)); return NULL; } } } static PyObject * parsenumber_raw(struct compiling *c, const char *s) { const char *end; long x; double dx; Py_complex compl; int imflag; assert(s != NULL); errno = 0; end = s + strlen(s) - 1; imflag = *end == 'j' || *end == 'J'; if (s[0] == '0') { x = (long) PyOS_strtoul(s, (char **)&end, 0); if (x < 0 && errno == 0) { return PyLong_FromString(s, (char **)0, 0); } } else x = PyOS_strtol(s, (char **)&end, 0); if (*end == '\0') { if (errno != 0) return PyLong_FromString(s, (char **)0, 0); return PyLong_FromLong(x); } /* XXX Huge floats may silently fail */ if (imflag) { compl.real = 0.; compl.imag = PyOS_string_to_double(s, (char **)&end, NULL); if (compl.imag == -1.0 && PyErr_Occurred()) return NULL; return PyComplex_FromCComplex(compl); } else { dx = PyOS_string_to_double(s, NULL, NULL); if (dx == -1.0 && PyErr_Occurred()) return NULL; return PyFloat_FromDouble(dx); } } static PyObject * parsenumber(struct compiling *c, const char *s) { char *dup, *end; PyObject *res = NULL; assert(s != NULL); if (strchr(s, '_') == NULL) { return parsenumber_raw(c, s); } /* Create a duplicate without underscores. */ dup = PyMem_Malloc(strlen(s) + 1); end = dup; for (; *s; s++) { if (*s != '_') { *end++ = *s; } } *end = '\0'; res = parsenumber_raw(c, dup); PyMem_Free(dup); return res; } static PyObject * decode_utf8(struct compiling *c, const char **sPtr, const char *end) { const char *s, *t; t = s = *sPtr; /* while (s < end && *s != '\\') s++; */ /* inefficient for u".." */ while (s < end && (*s & 0x80)) s++; *sPtr = s; return PyUnicode_DecodeUTF8(t, s - t, NULL); } static PyObject * decode_unicode_with_escapes(struct compiling *c, const node *n, const char *s, size_t len) { PyObject *u; char *buf; char *p; const char *end; /* check for integer overflow */ if (len > SIZE_MAX / 6) return NULL; /* "ä" (2 bytes) may become "\U000000E4" (10 bytes), or 1:5 "\ä" (3 bytes) may become "\u005c\U000000E4" (16 bytes), or ~1:6 */ u = PyBytes_FromStringAndSize((char *)NULL, len * 6); if (u == NULL) return NULL; p = buf = PyBytes_AsString(u); end = s + len; while (s < end) { if (*s == '\\') { *p++ = *s++; if (*s & 0x80) { strcpy(p, "u005c"); p += 5; } } if (*s & 0x80) { /* XXX inefficient */ PyObject *w; int kind; void *data; Py_ssize_t len, i; w = decode_utf8(c, &s, end); if (w == NULL) { Py_DECREF(u); return NULL; } kind = PyUnicode_KIND(w); data = PyUnicode_DATA(w); len = PyUnicode_GET_LENGTH(w); for (i = 0; i < len; i++) { Py_UCS4 chr = PyUnicode_READ(kind, data, i); sprintf(p, "\\U%08x", chr); p += 10; } /* Should be impossible to overflow */ assert(p - buf <= Py_SIZE(u)); Py_DECREF(w); } else { *p++ = *s++; } } len = p - buf; s = buf; return PyUnicode_DecodeUnicodeEscape(s, len, NULL); } static PyObject * decode_bytes_with_escapes(struct compiling *c, const node *n, const char *s, size_t len) { return PyBytes_DecodeEscape(s, len, NULL, 0, NULL); } /* Compile this expression in to an expr_ty. Add parens around the expression, in order to allow leading spaces in the expression. */ static expr_ty fstring_compile_expr(const char *expr_start, const char *expr_end, struct compiling *c, const node *n) { int all_whitespace = 1; int kind; void *data; PyCompilerFlags cf; mod_ty mod; char *str; PyObject *o, *fstring_name; Py_ssize_t len; Py_ssize_t i; assert(expr_end >= expr_start); assert(*(expr_start-1) == '{'); assert(*expr_end == '}' || *expr_end == '!' || *expr_end == ':'); /* We know there are no escapes here, because backslashes are not allowed, and we know it's utf-8 encoded (per PEP 263). But, in order to check that each char is not whitespace, we need to decode it to unicode. Which is unfortunate, but such is life. */ /* If the substring is all whitespace, it's an error. We need to catch this here, and not when we call PyParser_ASTFromString, because turning the expression '' in to '()' would go from being invalid to valid. */ /* Note that this code says an empty string is all whitespace. That's important. There's a test for it: f'{}'. */ o = PyUnicode_DecodeUTF8(expr_start, expr_end-expr_start, NULL); if (o == NULL) return NULL; len = PyUnicode_GET_LENGTH(o); kind = PyUnicode_KIND(o); data = PyUnicode_DATA(o); for (i = 0; i < len; i++) { if (!Py_UNICODE_ISSPACE(PyUnicode_READ(kind, data, i))) { all_whitespace = 0; break; } } Py_DECREF(o); if (all_whitespace) { ast_error(c, n, "f-string: empty expression not allowed"); return NULL; } /* Reuse len to be the length of the utf-8 input string. */ len = expr_end - expr_start; /* Allocate 3 extra bytes: open paren, close paren, null byte. */ str = PyMem_RawMalloc(len + 3); if (str == NULL) return NULL; str[0] = '('; memcpy(str+1, expr_start, len); str[len+1] = ')'; str[len+2] = 0; cf.cf_flags = PyCF_ONLY_AST; fstring_name = PyUnicode_FromString("<fstring>"); mod = string_object_to_c_ast(str, fstring_name, Py_eval_input, &cf, c->c_feature_version, c->c_arena); Py_DECREF(fstring_name); PyMem_RawFree(str); if (!mod) return NULL; return mod->v.Expression.body; } /* Return -1 on error. Return 0 if we reached the end of the literal. Return 1 if we haven't reached the end of the literal, but we want the caller to process the literal up to this point. Used for doubled braces. */ static int fstring_find_literal(const char **str, const char *end, int raw, PyObject **literal, int recurse_lvl, struct compiling *c, const node *n) { /* Get any literal string. It ends when we hit an un-doubled left brace (which isn't part of a unicode name escape such as "\N{EULER CONSTANT}"), or the end of the string. */ const char *literal_start = *str; const char *literal_end; int in_named_escape = 0; int result = 0; assert(*literal == NULL); for (; *str < end; (*str)++) { char ch = **str; if (!in_named_escape && ch == '{' && (*str)-literal_start >= 2 && *(*str-2) == '\\' && *(*str-1) == 'N') { in_named_escape = 1; } else if (in_named_escape && ch == '}') { in_named_escape = 0; } else if (ch == '{' || ch == '}') { /* Check for doubled braces, but only at the top level. If we checked at every level, then f'{0:{3}}' would fail with the two closing braces. */ if (recurse_lvl == 0) { if (*str+1 < end && *(*str+1) == ch) { /* We're going to tell the caller that the literal ends here, but that they should continue scanning. But also skip over the second brace when we resume scanning. */ literal_end = *str+1; *str += 2; result = 1; goto done; } /* Where a single '{' is the start of a new expression, a single '}' is not allowed. */ if (ch == '}') { ast_error(c, n, "f-string: single '}' is not allowed"); return -1; } } /* We're either at a '{', which means we're starting another expression; or a '}', which means we're at the end of this f-string (for a nested format_spec). */ break; } } literal_end = *str; assert(*str <= end); assert(*str == end || **str == '{' || **str == '}'); done: if (literal_start != literal_end) { if (raw) *literal = PyUnicode_DecodeUTF8Stateful(literal_start, literal_end-literal_start, NULL, NULL); else *literal = decode_unicode_with_escapes(c, n, literal_start, literal_end-literal_start); if (!*literal) return -1; } return result; } /* Forward declaration because parsing is recursive. */ static expr_ty fstring_parse(const char **str, const char *end, int raw, int recurse_lvl, struct compiling *c, const node *n); /* Parse the f-string at *str, ending at end. We know *str starts an expression (so it must be a '{'). Returns the FormattedValue node, which includes the expression, conversion character, and format_spec expression. Note that I don't do a perfect job here: I don't make sure that a closing brace doesn't match an opening paren, for example. It doesn't need to error on all invalid expressions, just correctly find the end of all valid ones. Any errors inside the expression will be caught when we parse it later. */ static int fstring_find_expr(const char **str, const char *end, int raw, int recurse_lvl, expr_ty *expression, struct compiling *c, const node *n) { /* Return -1 on error, else 0. */ const char *expr_start; const char *expr_end; expr_ty simple_expression; expr_ty format_spec = NULL; /* Optional format specifier. */ int conversion = -1; /* The conversion char. -1 if not specified. */ /* 0 if we're not in a string, else the quote char we're trying to match (single or double quote). */ char quote_char = 0; /* If we're inside a string, 1=normal, 3=triple-quoted. */ int string_type = 0; /* Keep track of nesting level for braces/parens/brackets in expressions. */ Py_ssize_t nested_depth = 0; /* Can only nest one level deep. */ if (recurse_lvl >= 2) { ast_error(c, n, "f-string: expressions nested too deeply"); return -1; } /* The first char must be a left brace, or we wouldn't have gotten here. Skip over it. */ assert(**str == '{'); *str += 1; expr_start = *str; for (; *str < end; (*str)++) { char ch; /* Loop invariants. */ assert(nested_depth >= 0); assert(*str >= expr_start && *str < end); if (quote_char) assert(string_type == 1 || string_type == 3); else assert(string_type == 0); ch = **str; /* Nowhere inside an expression is a backslash allowed. */ if (ch == '\\') { /* Error: can't include a backslash character, inside parens or strings or not. */ ast_error(c, n, "f-string expression part " "cannot include a backslash"); return -1; } if (quote_char) { /* We're inside a string. See if we're at the end. */ /* This code needs to implement the same non-error logic as tok_get from tokenizer.c, at the letter_quote label. To actually share that code would be a nightmare. But, it's unlikely to change and is small, so duplicate it here. Note we don't need to catch all of the errors, since they'll be caught when parsing the expression. We just need to match the non-error cases. Thus we can ignore \n in single-quoted strings, for example. Or non-terminated strings. */ if (ch == quote_char) { /* Does this match the string_type (single or triple quoted)? */ if (string_type == 3) { if (*str+2 < end && *(*str+1) == ch && *(*str+2) == ch) { /* We're at the end of a triple quoted string. */ *str += 2; string_type = 0; quote_char = 0; continue; } } else { /* We're at the end of a normal string. */ quote_char = 0; string_type = 0; continue; } } } else if (ch == '\'' || ch == '"') { /* Is this a triple quoted string? */ if (*str+2 < end && *(*str+1) == ch && *(*str+2) == ch) { string_type = 3; *str += 2; } else { /* Start of a normal string. */ string_type = 1; } /* Start looking for the end of the string. */ quote_char = ch; } else if (ch == '[' || ch == '{' || ch == '(') { nested_depth++; } else if (nested_depth != 0 && (ch == ']' || ch == '}' || ch == ')')) { nested_depth--; } else if (ch == '#') { /* Error: can't include a comment character, inside parens or not. */ ast_error(c, n, "f-string expression part cannot include '#'"); return -1; } else if (nested_depth == 0 && (ch == '!' || ch == ':' || ch == '}')) { /* First, test for the special case of "!=". Since '=' is not an allowed conversion character, nothing is lost in this test. */ if (ch == '!' && *str+1 < end && *(*str+1) == '=') { /* This isn't a conversion character, just continue. */ continue; } /* Normal way out of this loop. */ break; } else { /* Just consume this char and loop around. */ } } expr_end = *str; /* If we leave this loop in a string or with mismatched parens, we don't care. We'll get a syntax error when compiling the expression. But, we can produce a better error message, so let's just do that.*/ if (quote_char) { ast_error(c, n, "f-string: unterminated string"); return -1; } if (nested_depth) { ast_error(c, n, "f-string: mismatched '(', '{', or '['"); return -1; } if (*str >= end) goto unexpected_end_of_string; /* Compile the expression as soon as possible, so we show errors related to the expression before errors related to the conversion or format_spec. */ simple_expression = fstring_compile_expr(expr_start, expr_end, c, n); if (!simple_expression) return -1; /* Check for a conversion char, if present. */ if (**str == '!') { *str += 1; if (*str >= end) goto unexpected_end_of_string; conversion = **str; *str += 1; /* Validate the conversion. */ if (!(conversion == 's' || conversion == 'r' || conversion == 'a')) { ast_error(c, n, "f-string: invalid conversion character: " "expected 's', 'r', or 'a'"); return -1; } } /* Check for the format spec, if present. */ if (*str >= end) goto unexpected_end_of_string; if (**str == ':') { *str += 1; if (*str >= end) goto unexpected_end_of_string; /* Parse the format spec. */ format_spec = fstring_parse(str, end, raw, recurse_lvl+1, c, n); if (!format_spec) return -1; } if (*str >= end || **str != '}') goto unexpected_end_of_string; /* We're at a right brace. Consume it. */ assert(*str < end); assert(**str == '}'); *str += 1; /* And now create the FormattedValue node that represents this entire expression with the conversion and format spec. */ *expression = FormattedValue(simple_expression, conversion, format_spec, LINENO(n), n->n_col_offset, c->c_arena); if (!*expression) return -1; return 0; unexpected_end_of_string: ast_error(c, n, "f-string: expecting '}'"); return -1; } /* Return -1 on error. Return 0 if we have a literal (possible zero length) and an expression (zero length if at the end of the string. Return 1 if we have a literal, but no expression, and we want the caller to call us again. This is used to deal with doubled braces. When called multiple times on the string 'a{{b{0}c', this function will return: 1. the literal 'a{' with no expression, and a return value of 1. Despite the fact that there's no expression, the return value of 1 means we're not finished yet. 2. the literal 'b' and the expression '0', with a return value of 0. The fact that there's an expression means we're not finished. 3. literal 'c' with no expression and a return value of 0. The combination of the return value of 0 with no expression means we're finished. */ static int fstring_find_literal_and_expr(const char **str, const char *end, int raw, int recurse_lvl, PyObject **literal, expr_ty *expression, struct compiling *c, const node *n) { int result; assert(*literal == NULL && *expression == NULL); /* Get any literal string. */ result = fstring_find_literal(str, end, raw, literal, recurse_lvl, c, n); if (result < 0) goto error; assert(result == 0 || result == 1); if (result == 1) /* We have a literal, but don't look at the expression. */ return 1; if (*str >= end || **str == '}') /* We're at the end of the string or the end of a nested f-string: no expression. The top-level error case where we expect to be at the end of the string but we're at a '}' is handled later. */ return 0; /* We must now be the start of an expression, on a '{'. */ assert(**str == '{'); if (fstring_find_expr(str, end, raw, recurse_lvl, expression, c, n) < 0) goto error; return 0; error: Py_CLEAR(*literal); return -1; } #define EXPRLIST_N_CACHED 64 typedef struct { /* Incrementally build an array of expr_ty, so be used in an asdl_seq. Cache some small but reasonably sized number of expr_ty's, and then after that start dynamically allocating, doubling the number allocated each time. Note that the f-string f'{0}a{1}' contains 3 expr_ty's: 2 FormattedValue's, and one Str for the literal 'a'. So you add expr_ty's about twice as fast as you add exressions in an f-string. */ Py_ssize_t allocated; /* Number we've allocated. */ Py_ssize_t size; /* Number we've used. */ expr_ty *p; /* Pointer to the memory we're actually using. Will point to 'data' until we start dynamically allocating. */ expr_ty data[EXPRLIST_N_CACHED]; } ExprList; #ifdef NDEBUG #define ExprList_check_invariants(l) #else static void ExprList_check_invariants(ExprList *l) { /* Check our invariants. Make sure this object is "live", and hasn't been deallocated. */ assert(l->size >= 0); assert(l->p != NULL); if (l->size <= EXPRLIST_N_CACHED) assert(l->data == l->p); } #endif static void ExprList_Init(ExprList *l) { l->allocated = EXPRLIST_N_CACHED; l->size = 0; /* Until we start allocating dynamically, p points to data. */ l->p = l->data; ExprList_check_invariants(l); } static int ExprList_Append(ExprList *l, expr_ty exp) { ExprList_check_invariants(l); if (l->size >= l->allocated) { /* We need to alloc (or realloc) the memory. */ Py_ssize_t new_size = l->allocated * 2; /* See if we've ever allocated anything dynamically. */ if (l->p == l->data) { Py_ssize_t i; /* We're still using the cached data. Switch to alloc-ing. */ l->p = PyMem_RawMalloc(sizeof(expr_ty) * new_size); if (!l->p) return -1; /* Copy the cached data into the new buffer. */ for (i = 0; i < l->size; i++) l->p[i] = l->data[i]; } else { /* Just realloc. */ expr_ty *tmp = PyMem_RawRealloc(l->p, sizeof(expr_ty) * new_size); if (!tmp) { PyMem_RawFree(l->p); l->p = NULL; return -1; } l->p = tmp; } l->allocated = new_size; assert(l->allocated == 2 * l->size); } l->p[l->size++] = exp; ExprList_check_invariants(l); return 0; } static void ExprList_Dealloc(ExprList *l) { ExprList_check_invariants(l); /* If there's been an error, or we've never dynamically allocated, do nothing. */ if (!l->p || l->p == l->data) { /* Do nothing. */ } else { /* We have dynamically allocated. Free the memory. */ PyMem_RawFree(l->p); } l->p = NULL; l->size = -1; } static asdl_seq * ExprList_Finish(ExprList *l, PyArena *arena) { asdl_seq *seq; ExprList_check_invariants(l); /* Allocate the asdl_seq and copy the expressions in to it. */ seq = _Ta3_asdl_seq_new(l->size, arena); if (seq) { Py_ssize_t i; for (i = 0; i < l->size; i++) asdl_seq_SET(seq, i, l->p[i]); } ExprList_Dealloc(l); return seq; } /* The FstringParser is designed to add a mix of strings and f-strings, and concat them together as needed. Ultimately, it generates an expr_ty. */ typedef struct { PyObject *last_str; ExprList expr_list; } FstringParser; #ifdef NDEBUG #define FstringParser_check_invariants(state) #else static void FstringParser_check_invariants(FstringParser *state) { if (state->last_str) assert(PyUnicode_CheckExact(state->last_str)); ExprList_check_invariants(&state->expr_list); } #endif static void FstringParser_Init(FstringParser *state) { state->last_str = NULL; ExprList_Init(&state->expr_list); FstringParser_check_invariants(state); } static void FstringParser_Dealloc(FstringParser *state) { FstringParser_check_invariants(state); Py_XDECREF(state->last_str); ExprList_Dealloc(&state->expr_list); } /* Make a Str node, but decref the PyUnicode object being added. */ static expr_ty make_str_node_and_del(PyObject **str, struct compiling *c, const node* n) { PyObject *kind, *s = *str; const char *raw = STR(CHILD(n, 0)); /* currently Python allows up to 2 string modifiers */ char *ch, s_kind[3] = {0, 0, 0}; ch = s_kind; while (*raw && *raw != '\'' && *raw != '"') { *ch++ = *raw++; } kind = PyUnicode_FromString(s_kind); if (!kind) { return NULL; } *str = NULL; assert(PyUnicode_CheckExact(s)); if (PyArena_AddPyObject(c->c_arena, s) < 0) { Py_DECREF(s); return NULL; } return Str(s, kind, LINENO(n), n->n_col_offset, c->c_arena); } /* Add a non-f-string (that is, a regular literal string). str is decref'd. */ static int FstringParser_ConcatAndDel(FstringParser *state, PyObject *str) { FstringParser_check_invariants(state); assert(PyUnicode_CheckExact(str)); if (PyUnicode_GET_LENGTH(str) == 0) { Py_DECREF(str); return 0; } if (!state->last_str) { /* We didn't have a string before, so just remember this one. */ state->last_str = str; } else { /* Concatenate this with the previous string. */ PyUnicode_AppendAndDel(&state->last_str, str); if (!state->last_str) return -1; } FstringParser_check_invariants(state); return 0; } /* Parse an f-string. The f-string is in *str to end, with no 'f' or quotes. */ static int FstringParser_ConcatFstring(FstringParser *state, const char **str, const char *end, int raw, int recurse_lvl, struct compiling *c, const node *n) { FstringParser_check_invariants(state); /* Parse the f-string. */ while (1) { PyObject *literal = NULL; expr_ty expression = NULL; /* If there's a zero length literal in front of the expression, literal will be NULL. If we're at the end of the f-string, expression will be NULL (unless result == 1, see below). */ int result = fstring_find_literal_and_expr(str, end, raw, recurse_lvl, &literal, &expression, c, n); if (result < 0) return -1; /* Add the literal, if any. */ if (!literal) { /* Do nothing. Just leave last_str alone (and possibly NULL). */ } else if (!state->last_str) { state->last_str = literal; literal = NULL; } else { /* We have a literal, concatenate it. */ assert(PyUnicode_GET_LENGTH(literal) != 0); if (FstringParser_ConcatAndDel(state, literal) < 0) return -1; literal = NULL; } assert(!state->last_str || PyUnicode_GET_LENGTH(state->last_str) != 0); /* We've dealt with the literal now. It can't be leaked on further errors. */ assert(literal == NULL); /* See if we should just loop around to get the next literal and expression, while ignoring the expression this time. This is used for un-doubling braces, as an optimization. */ if (result == 1) continue; if (!expression) /* We're done with this f-string. */ break; /* We know we have an expression. Convert any existing string to a Str node. */ if (!state->last_str) { /* Do nothing. No previous literal. */ } else { /* Convert the existing last_str literal to a Str node. */ expr_ty str = make_str_node_and_del(&state->last_str, c, n); if (!str || ExprList_Append(&state->expr_list, str) < 0) return -1; } if (ExprList_Append(&state->expr_list, expression) < 0) return -1; } /* If recurse_lvl is zero, then we must be at the end of the string. Otherwise, we must be at a right brace. */ if (recurse_lvl == 0 && *str < end-1) { ast_error(c, n, "f-string: unexpected end of string"); return -1; } if (recurse_lvl != 0 && **str != '}') { ast_error(c, n, "f-string: expecting '}'"); return -1; } FstringParser_check_invariants(state); return 0; } /* Convert the partial state reflected in last_str and expr_list to an expr_ty. The expr_ty can be a Str, or a JoinedStr. */ static expr_ty FstringParser_Finish(FstringParser *state, struct compiling *c, const node *n) { asdl_seq *seq; FstringParser_check_invariants(state); /* If we're just a constant string with no expressions, return that. */ if(state->expr_list.size == 0) { if (!state->last_str) { /* Create a zero length string. */ state->last_str = PyUnicode_FromStringAndSize(NULL, 0); if (!state->last_str) goto error; } return make_str_node_and_del(&state->last_str, c, n); } /* Create a Str node out of last_str, if needed. It will be the last node in our expression list. */ if (state->last_str) { expr_ty str = make_str_node_and_del(&state->last_str, c, n); if (!str || ExprList_Append(&state->expr_list, str) < 0) goto error; } /* This has already been freed. */ assert(state->last_str == NULL); seq = ExprList_Finish(&state->expr_list, c->c_arena); if (!seq) goto error; /* If there's only one expression, return it. Otherwise, we need to join them together. */ if (seq->size == 1) return seq->elements[0]; return JoinedStr(seq, LINENO(n), n->n_col_offset, c->c_arena); error: FstringParser_Dealloc(state); return NULL; } /* Given an f-string (with no 'f' or quotes) that's in *str and ends at end, parse it into an expr_ty. Return NULL on error. Adjust str to point past the parsed portion. */ static expr_ty fstring_parse(const char **str, const char *end, int raw, int recurse_lvl, struct compiling *c, const node *n) { FstringParser state; FstringParser_Init(&state); if (FstringParser_ConcatFstring(&state, str, end, raw, recurse_lvl, c, n) < 0) { FstringParser_Dealloc(&state); return NULL; } return FstringParser_Finish(&state, c, n); } /* n is a Python string literal, including the bracketing quote characters, and r, b, u, &/or f prefixes (if any), and embedded escape sequences (if any). parsestr parses it, and sets *result to decoded Python string object. If the string is an f-string, set *fstr and *fstrlen to the unparsed string object. Return 0 if no errors occurred. */ static int parsestr(struct compiling *c, const node *n, int *bytesmode, int *rawmode, PyObject **result, const char **fstr, Py_ssize_t *fstrlen) { size_t len; const char *s = STR(n); int quote = Py_CHARMASK(*s); int fmode = 0; *bytesmode = 0; *rawmode = 0; *result = NULL; *fstr = NULL; if (Py_ISALPHA(quote)) { while (!*bytesmode || !*rawmode) { if (quote == 'b' || quote == 'B') { quote = *++s; *bytesmode = 1; } else if (quote == 'u' || quote == 'U') { quote = *++s; } else if (quote == 'r' || quote == 'R') { quote = *++s; *rawmode = 1; } else if (quote == 'f' || quote == 'F') { quote = *++s; fmode = 1; } else { break; } } } /* fstrings are only allowed in Python 3.6 and greater */ if (fmode && c->c_feature_version < 6) { ast_error(c, n, "Format strings are only supported in Python 3.6 and greater"); return -1; } if (fmode && *bytesmode) { PyErr_BadInternalCall(); return -1; } if (quote != '\'' && quote != '\"') { PyErr_BadInternalCall(); return -1; } /* Skip the leading quote char. */ s++; len = strlen(s); if (len > INT_MAX) { PyErr_SetString(PyExc_OverflowError, "string to parse is too long"); return -1; } if (s[--len] != quote) { /* Last quote char must match the first. */ PyErr_BadInternalCall(); return -1; } if (len >= 4 && s[0] == quote && s[1] == quote) { /* A triple quoted string. We've already skipped one quote at the start and one at the end of the string. Now skip the two at the start. */ s += 2; len -= 2; /* And check that the last two match. */ if (s[--len] != quote || s[--len] != quote) { PyErr_BadInternalCall(); return -1; } } if (fmode) { /* Just return the bytes. The caller will parse the resulting string. */ *fstr = s; *fstrlen = len; return 0; } /* Not an f-string. */ /* Avoid invoking escape decoding routines if possible. */ *rawmode = *rawmode || strchr(s, '\\') == NULL; if (*bytesmode) { /* Disallow non-ASCII characters. */ const char *ch; for (ch = s; *ch; ch++) { if (Py_CHARMASK(*ch) >= 0x80) { ast_error(c, n, "bytes can only contain ASCII " "literal characters."); return -1; } } if (*rawmode) *result = PyBytes_FromStringAndSize(s, len); else *result = decode_bytes_with_escapes(c, n, s, len); } else { if (*rawmode) *result = PyUnicode_DecodeUTF8Stateful(s, len, NULL, NULL); else *result = decode_unicode_with_escapes(c, n, s, len); } return *result == NULL ? -1 : 0; } /* Accepts a STRING+ atom, and produces an expr_ty node. Run through each STRING atom, and process it as needed. For bytes, just concatenate them together, and the result will be a Bytes node. For normal strings and f-strings, concatenate them together. The result will be a Str node if there were no f-strings; a FormattedValue node if there's just an f-string (with no leading or trailing literals), or a JoinedStr node if there are multiple f-strings or any literals involved. */ static expr_ty parsestrplus(struct compiling *c, const node *n) { int bytesmode = 0; PyObject *bytes_str = NULL; int i; FstringParser state; FstringParser_Init(&state); for (i = 0; i < NCH(n); i++) { int this_bytesmode; int this_rawmode; PyObject *s; const char *fstr; Py_ssize_t fstrlen = -1; /* Silence a compiler warning. */ REQ(CHILD(n, i), STRING); if (parsestr(c, CHILD(n, i), &this_bytesmode, &this_rawmode, &s, &fstr, &fstrlen) != 0) goto error; /* Check that we're not mixing bytes with unicode. */ if (i != 0 && bytesmode != this_bytesmode) { ast_error(c, n, "cannot mix bytes and nonbytes literals"); /* s is NULL if the current string part is an f-string. */ Py_XDECREF(s); goto error; } bytesmode = this_bytesmode; if (fstr != NULL) { int result; assert(s == NULL && !bytesmode); /* This is an f-string. Parse and concatenate it. */ result = FstringParser_ConcatFstring(&state, &fstr, fstr+fstrlen, this_rawmode, 0, c, n); if (result < 0) goto error; } else { /* A string or byte string. */ assert(s != NULL && fstr == NULL); assert(bytesmode ? PyBytes_CheckExact(s) : PyUnicode_CheckExact(s)); if (bytesmode) { /* For bytes, concat as we go. */ if (i == 0) { /* First time, just remember this value. */ bytes_str = s; } else { PyBytes_ConcatAndDel(&bytes_str, s); if (!bytes_str) goto error; } } else { /* This is a regular string. Concatenate it. */ if (FstringParser_ConcatAndDel(&state, s) < 0) goto error; } } } if (bytesmode) { /* Just return the bytes object and we're done. */ if (PyArena_AddPyObject(c->c_arena, bytes_str) < 0) goto error; return Bytes(bytes_str, LINENO(n), n->n_col_offset, c->c_arena); } /* We're not a bytes string, bytes_str should never have been set. */ assert(bytes_str == NULL); return FstringParser_Finish(&state, c, n); error: Py_XDECREF(bytes_str); FstringParser_Dealloc(&state); return NULL; }
/* * This file includes functions to transform a concrete syntax tree (CST) to * an abstract syntax tree (AST). The main function is Ta3AST_FromNode(). * */ #include "Python.h" #include "Python-ast.h" #include "node.h" #include "ast.h" #include "token.h" #include "pythonrun.h" #include <assert.h> // VS 2010 doesn't have <stdbool.h>... typedef int bool; #define false 0 #define true 1 #ifndef _PyObject_FastCall static PyObject * _PyObject_FastCall(PyObject *func, PyObject *const *args, int nargs) { PyObject *t, *res; int i; t = PyTuple_New(nargs); if (t == NULL) { return NULL; } for (i = 0; i < nargs; i++) { if (PyTuple_SetItem(t, i, args[i]) < 0) { Py_DECREF(t); return NULL; } } res = PyObject_CallObject(func, t); Py_DECREF(t); return res; } #endif #if PY_MINOR_VERSION < 6 #define _PyUnicode_EqualToASCIIString(a, b) (PyUnicode_CompareWithASCIIString((a), (b)) == 0) static PyObject * _PyBytes_DecodeEscape(const char *s, Py_ssize_t len, const char *errors, Py_ssize_t unicode, const char *recode_encoding, const char **first_invalid_escape) { *first_invalid_escape = NULL; return PyBytes_DecodeEscape(s, len, errors, unicode, recode_encoding); } PyObject * _PyUnicode_DecodeUnicodeEscape(const char *s, Py_ssize_t size, const char *errors, const char **first_invalid_escape) { *first_invalid_escape = NULL; return PyUnicode_DecodeUnicodeEscape(s, size, errors); } #endif static int validate_stmts(asdl_seq *); static int validate_exprs(asdl_seq *, expr_context_ty, int); static int validate_nonempty_seq(asdl_seq *, const char *, const char *); static int validate_stmt(stmt_ty); static int validate_expr(expr_ty, expr_context_ty); mod_ty string_object_to_c_ast(const char *s, PyObject *filename, int start, PyCompilerFlags *flags, int feature_version, PyArena *arena); static int validate_comprehension(asdl_seq *gens) { int i; if (!asdl_seq_LEN(gens)) { PyErr_SetString(PyExc_ValueError, "comprehension with no generators"); return 0; } for (i = 0; i < asdl_seq_LEN(gens); i++) { comprehension_ty comp = asdl_seq_GET(gens, i); if (!validate_expr(comp->target, Store) || !validate_expr(comp->iter, Load) || !validate_exprs(comp->ifs, Load, 0)) return 0; } return 1; } static int validate_slice(slice_ty slice) { switch (slice->kind) { case Slice_kind: return (!slice->v.Slice.lower || validate_expr(slice->v.Slice.lower, Load)) && (!slice->v.Slice.upper || validate_expr(slice->v.Slice.upper, Load)) && (!slice->v.Slice.step || validate_expr(slice->v.Slice.step, Load)); case ExtSlice_kind: { int i; if (!validate_nonempty_seq(slice->v.ExtSlice.dims, "dims", "ExtSlice")) return 0; for (i = 0; i < asdl_seq_LEN(slice->v.ExtSlice.dims); i++) if (!validate_slice(asdl_seq_GET(slice->v.ExtSlice.dims, i))) return 0; return 1; } case Index_kind: return validate_expr(slice->v.Index.value, Load); default: PyErr_SetString(PyExc_SystemError, "unknown slice node"); return 0; } } static int validate_keywords(asdl_seq *keywords) { int i; for (i = 0; i < asdl_seq_LEN(keywords); i++) if (!validate_expr(((keyword_ty)asdl_seq_GET(keywords, i))->value, Load)) return 0; return 1; } static int validate_args(asdl_seq *args) { int i; for (i = 0; i < asdl_seq_LEN(args); i++) { arg_ty arg = asdl_seq_GET(args, i); if (arg->annotation && !validate_expr(arg->annotation, Load)) return 0; } return 1; } static const char * expr_context_name(expr_context_ty ctx) { switch (ctx) { case Load: return "Load"; case Store: return "Store"; case Del: return "Del"; case AugLoad: return "AugLoad"; case AugStore: return "AugStore"; case Param: return "Param"; default: abort(); } } static int validate_arguments(arguments_ty args) { if (!validate_args(args->args)) return 0; if (args->vararg && args->vararg->annotation && !validate_expr(args->vararg->annotation, Load)) { return 0; } if (!validate_args(args->kwonlyargs)) return 0; if (args->kwarg && args->kwarg->annotation && !validate_expr(args->kwarg->annotation, Load)) { return 0; } if (asdl_seq_LEN(args->defaults) > asdl_seq_LEN(args->args)) { PyErr_SetString(PyExc_ValueError, "more positional defaults than args on arguments"); return 0; } if (asdl_seq_LEN(args->kw_defaults) != asdl_seq_LEN(args->kwonlyargs)) { PyErr_SetString(PyExc_ValueError, "length of kwonlyargs is not the same as " "kw_defaults on arguments"); return 0; } return validate_exprs(args->defaults, Load, 0) && validate_exprs(args->kw_defaults, Load, 1); } static int validate_constant(PyObject *value) { if (value == Py_None || value == Py_Ellipsis) return 1; if (PyLong_CheckExact(value) || PyFloat_CheckExact(value) || PyComplex_CheckExact(value) || PyBool_Check(value) || PyUnicode_CheckExact(value) || PyBytes_CheckExact(value)) return 1; if (PyTuple_CheckExact(value) || PyFrozenSet_CheckExact(value)) { PyObject *it; it = PyObject_GetIter(value); if (it == NULL) return 0; while (1) { PyObject *item = PyIter_Next(it); if (item == NULL) { if (PyErr_Occurred()) { Py_DECREF(it); return 0; } break; } if (!validate_constant(item)) { Py_DECREF(it); Py_DECREF(item); return 0; } Py_DECREF(item); } Py_DECREF(it); return 1; } return 0; } static int validate_expr(expr_ty exp, expr_context_ty ctx) { int check_ctx = 1; expr_context_ty actual_ctx; /* First check expression context. */ switch (exp->kind) { case Attribute_kind: actual_ctx = exp->v.Attribute.ctx; break; case Subscript_kind: actual_ctx = exp->v.Subscript.ctx; break; case Starred_kind: actual_ctx = exp->v.Starred.ctx; break; case Name_kind: actual_ctx = exp->v.Name.ctx; break; case List_kind: actual_ctx = exp->v.List.ctx; break; case Tuple_kind: actual_ctx = exp->v.Tuple.ctx; break; default: if (ctx != Load) { PyErr_Format(PyExc_ValueError, "expression which can't be " "assigned to in %s context", expr_context_name(ctx)); return 0; } check_ctx = 0; /* set actual_ctx to prevent gcc warning */ actual_ctx = 0; } if (check_ctx && actual_ctx != ctx) { PyErr_Format(PyExc_ValueError, "expression must have %s context but has %s instead", expr_context_name(ctx), expr_context_name(actual_ctx)); return 0; } /* Now validate expression. */ switch (exp->kind) { case BoolOp_kind: if (asdl_seq_LEN(exp->v.BoolOp.values) < 2) { PyErr_SetString(PyExc_ValueError, "BoolOp with less than 2 values"); return 0; } return validate_exprs(exp->v.BoolOp.values, Load, 0); case BinOp_kind: return validate_expr(exp->v.BinOp.left, Load) && validate_expr(exp->v.BinOp.right, Load); case UnaryOp_kind: return validate_expr(exp->v.UnaryOp.operand, Load); case Lambda_kind: return validate_arguments(exp->v.Lambda.args) && validate_expr(exp->v.Lambda.body, Load); case IfExp_kind: return validate_expr(exp->v.IfExp.test, Load) && validate_expr(exp->v.IfExp.body, Load) && validate_expr(exp->v.IfExp.orelse, Load); case Dict_kind: if (asdl_seq_LEN(exp->v.Dict.keys) != asdl_seq_LEN(exp->v.Dict.values)) { PyErr_SetString(PyExc_ValueError, "Dict doesn't have the same number of keys as values"); return 0; } /* null_ok=1 for keys expressions to allow dict unpacking to work in dict literals, i.e. ``{**{a:b}}`` */ return validate_exprs(exp->v.Dict.keys, Load, /*null_ok=*/ 1) && validate_exprs(exp->v.Dict.values, Load, /*null_ok=*/ 0); case Set_kind: return validate_exprs(exp->v.Set.elts, Load, 0); #define COMP(NAME) \ case NAME ## _kind: \ return validate_comprehension(exp->v.NAME.generators) && \ validate_expr(exp->v.NAME.elt, Load); COMP(ListComp) COMP(SetComp) COMP(GeneratorExp) #undef COMP case DictComp_kind: return validate_comprehension(exp->v.DictComp.generators) && validate_expr(exp->v.DictComp.key, Load) && validate_expr(exp->v.DictComp.value, Load); case Yield_kind: return !exp->v.Yield.value || validate_expr(exp->v.Yield.value, Load); case YieldFrom_kind: return validate_expr(exp->v.YieldFrom.value, Load); case Await_kind: return validate_expr(exp->v.Await.value, Load); case Compare_kind: if (!asdl_seq_LEN(exp->v.Compare.comparators)) { PyErr_SetString(PyExc_ValueError, "Compare with no comparators"); return 0; } if (asdl_seq_LEN(exp->v.Compare.comparators) != asdl_seq_LEN(exp->v.Compare.ops)) { PyErr_SetString(PyExc_ValueError, "Compare has a different number " "of comparators and operands"); return 0; } return validate_exprs(exp->v.Compare.comparators, Load, 0) && validate_expr(exp->v.Compare.left, Load); case Call_kind: return validate_expr(exp->v.Call.func, Load) && validate_exprs(exp->v.Call.args, Load, 0) && validate_keywords(exp->v.Call.keywords); case Constant_kind: if (!validate_constant(exp->v.Constant.value)) { PyErr_Format(PyExc_TypeError, "got an invalid type in Constant: %s", Py_TYPE(exp->v.Constant.value)->tp_name); return 0; } return 1; case Num_kind: { PyObject *n = exp->v.Num.n; if (!PyLong_CheckExact(n) && !PyFloat_CheckExact(n) && !PyComplex_CheckExact(n)) { PyErr_SetString(PyExc_TypeError, "non-numeric type in Num"); return 0; } return 1; } case Str_kind: { PyObject *s = exp->v.Str.s; if (!PyUnicode_CheckExact(s)) { PyErr_SetString(PyExc_TypeError, "non-string type in Str"); return 0; } return 1; } case JoinedStr_kind: return validate_exprs(exp->v.JoinedStr.values, Load, 0); case FormattedValue_kind: if (validate_expr(exp->v.FormattedValue.value, Load) == 0) return 0; if (exp->v.FormattedValue.format_spec) return validate_expr(exp->v.FormattedValue.format_spec, Load); return 1; case Bytes_kind: { PyObject *b = exp->v.Bytes.s; if (!PyBytes_CheckExact(b)) { PyErr_SetString(PyExc_TypeError, "non-bytes type in Bytes"); return 0; } return 1; } case Attribute_kind: return validate_expr(exp->v.Attribute.value, Load); case Subscript_kind: return validate_slice(exp->v.Subscript.slice) && validate_expr(exp->v.Subscript.value, Load); case Starred_kind: return validate_expr(exp->v.Starred.value, ctx); case List_kind: return validate_exprs(exp->v.List.elts, ctx, 0); case Tuple_kind: return validate_exprs(exp->v.Tuple.elts, ctx, 0); /* These last cases don't have any checking. */ case Name_kind: case NameConstant_kind: case Ellipsis_kind: return 1; default: PyErr_SetString(PyExc_SystemError, "unexpected expression"); return 0; } } static int validate_nonempty_seq(asdl_seq *seq, const char *what, const char *owner) { if (asdl_seq_LEN(seq)) return 1; PyErr_Format(PyExc_ValueError, "empty %s on %s", what, owner); return 0; } static int validate_assignlist(asdl_seq *targets, expr_context_ty ctx) { return validate_nonempty_seq(targets, "targets", ctx == Del ? "Delete" : "Assign") && validate_exprs(targets, ctx, 0); } static int validate_body(asdl_seq *body, const char *owner) { return validate_nonempty_seq(body, "body", owner) && validate_stmts(body); } static int validate_stmt(stmt_ty stmt) { int i; switch (stmt->kind) { case FunctionDef_kind: return validate_body(stmt->v.FunctionDef.body, "FunctionDef") && validate_arguments(stmt->v.FunctionDef.args) && validate_exprs(stmt->v.FunctionDef.decorator_list, Load, 0) && (!stmt->v.FunctionDef.returns || validate_expr(stmt->v.FunctionDef.returns, Load)); case ClassDef_kind: return validate_body(stmt->v.ClassDef.body, "ClassDef") && validate_exprs(stmt->v.ClassDef.bases, Load, 0) && validate_keywords(stmt->v.ClassDef.keywords) && validate_exprs(stmt->v.ClassDef.decorator_list, Load, 0); case Return_kind: return !stmt->v.Return.value || validate_expr(stmt->v.Return.value, Load); case Delete_kind: return validate_assignlist(stmt->v.Delete.targets, Del); case Assign_kind: return validate_assignlist(stmt->v.Assign.targets, Store) && validate_expr(stmt->v.Assign.value, Load); case AugAssign_kind: return validate_expr(stmt->v.AugAssign.target, Store) && validate_expr(stmt->v.AugAssign.value, Load); case AnnAssign_kind: if (stmt->v.AnnAssign.target->kind != Name_kind && stmt->v.AnnAssign.simple) { PyErr_SetString(PyExc_TypeError, "AnnAssign with simple non-Name target"); return 0; } return validate_expr(stmt->v.AnnAssign.target, Store) && (!stmt->v.AnnAssign.value || validate_expr(stmt->v.AnnAssign.value, Load)) && validate_expr(stmt->v.AnnAssign.annotation, Load); case For_kind: return validate_expr(stmt->v.For.target, Store) && validate_expr(stmt->v.For.iter, Load) && validate_body(stmt->v.For.body, "For") && validate_stmts(stmt->v.For.orelse); case AsyncFor_kind: return validate_expr(stmt->v.AsyncFor.target, Store) && validate_expr(stmt->v.AsyncFor.iter, Load) && validate_body(stmt->v.AsyncFor.body, "AsyncFor") && validate_stmts(stmt->v.AsyncFor.orelse); case While_kind: return validate_expr(stmt->v.While.test, Load) && validate_body(stmt->v.While.body, "While") && validate_stmts(stmt->v.While.orelse); case If_kind: return validate_expr(stmt->v.If.test, Load) && validate_body(stmt->v.If.body, "If") && validate_stmts(stmt->v.If.orelse); case With_kind: if (!validate_nonempty_seq(stmt->v.With.items, "items", "With")) return 0; for (i = 0; i < asdl_seq_LEN(stmt->v.With.items); i++) { withitem_ty item = asdl_seq_GET(stmt->v.With.items, i); if (!validate_expr(item->context_expr, Load) || (item->optional_vars && !validate_expr(item->optional_vars, Store))) return 0; } return validate_body(stmt->v.With.body, "With"); case AsyncWith_kind: if (!validate_nonempty_seq(stmt->v.AsyncWith.items, "items", "AsyncWith")) return 0; for (i = 0; i < asdl_seq_LEN(stmt->v.AsyncWith.items); i++) { withitem_ty item = asdl_seq_GET(stmt->v.AsyncWith.items, i); if (!validate_expr(item->context_expr, Load) || (item->optional_vars && !validate_expr(item->optional_vars, Store))) return 0; } return validate_body(stmt->v.AsyncWith.body, "AsyncWith"); case Raise_kind: if (stmt->v.Raise.exc) { return validate_expr(stmt->v.Raise.exc, Load) && (!stmt->v.Raise.cause || validate_expr(stmt->v.Raise.cause, Load)); } if (stmt->v.Raise.cause) { PyErr_SetString(PyExc_ValueError, "Raise with cause but no exception"); return 0; } return 1; case Try_kind: if (!validate_body(stmt->v.Try.body, "Try")) return 0; if (!asdl_seq_LEN(stmt->v.Try.handlers) && !asdl_seq_LEN(stmt->v.Try.finalbody)) { PyErr_SetString(PyExc_ValueError, "Try has neither except handlers nor finalbody"); return 0; } if (!asdl_seq_LEN(stmt->v.Try.handlers) && asdl_seq_LEN(stmt->v.Try.orelse)) { PyErr_SetString(PyExc_ValueError, "Try has orelse but no except handlers"); return 0; } for (i = 0; i < asdl_seq_LEN(stmt->v.Try.handlers); i++) { excepthandler_ty handler = asdl_seq_GET(stmt->v.Try.handlers, i); if ((handler->v.ExceptHandler.type && !validate_expr(handler->v.ExceptHandler.type, Load)) || !validate_body(handler->v.ExceptHandler.body, "ExceptHandler")) return 0; } return (!asdl_seq_LEN(stmt->v.Try.finalbody) || validate_stmts(stmt->v.Try.finalbody)) && (!asdl_seq_LEN(stmt->v.Try.orelse) || validate_stmts(stmt->v.Try.orelse)); case Assert_kind: return validate_expr(stmt->v.Assert.test, Load) && (!stmt->v.Assert.msg || validate_expr(stmt->v.Assert.msg, Load)); case Import_kind: return validate_nonempty_seq(stmt->v.Import.names, "names", "Import"); case ImportFrom_kind: if (stmt->v.ImportFrom.level < 0) { PyErr_SetString(PyExc_ValueError, "Negative ImportFrom level"); return 0; } return validate_nonempty_seq(stmt->v.ImportFrom.names, "names", "ImportFrom"); case Global_kind: return validate_nonempty_seq(stmt->v.Global.names, "names", "Global"); case Nonlocal_kind: return validate_nonempty_seq(stmt->v.Nonlocal.names, "names", "Nonlocal"); case Expr_kind: return validate_expr(stmt->v.Expr.value, Load); case AsyncFunctionDef_kind: return validate_body(stmt->v.AsyncFunctionDef.body, "AsyncFunctionDef") && validate_arguments(stmt->v.AsyncFunctionDef.args) && validate_exprs(stmt->v.AsyncFunctionDef.decorator_list, Load, 0) && (!stmt->v.AsyncFunctionDef.returns || validate_expr(stmt->v.AsyncFunctionDef.returns, Load)); case Pass_kind: case Break_kind: case Continue_kind: return 1; default: PyErr_SetString(PyExc_SystemError, "unexpected statement"); return 0; } } static int validate_stmts(asdl_seq *seq) { int i; for (i = 0; i < asdl_seq_LEN(seq); i++) { stmt_ty stmt = asdl_seq_GET(seq, i); if (stmt) { if (!validate_stmt(stmt)) return 0; } else { PyErr_SetString(PyExc_ValueError, "None disallowed in statement list"); return 0; } } return 1; } static int validate_exprs(asdl_seq *exprs, expr_context_ty ctx, int null_ok) { int i; for (i = 0; i < asdl_seq_LEN(exprs); i++) { expr_ty expr = asdl_seq_GET(exprs, i); if (expr) { if (!validate_expr(expr, ctx)) return 0; } else if (!null_ok) { PyErr_SetString(PyExc_ValueError, "None disallowed in expression list"); return 0; } } return 1; } int Ta3AST_Validate(mod_ty mod) { int res = 0; switch (mod->kind) { case Module_kind: res = validate_stmts(mod->v.Module.body); break; case Interactive_kind: res = validate_stmts(mod->v.Interactive.body); break; case Expression_kind: res = validate_expr(mod->v.Expression.body, Load); break; case Suite_kind: PyErr_SetString(PyExc_ValueError, "Suite is not valid in the CPython compiler"); break; default: PyErr_SetString(PyExc_SystemError, "impossible module node"); res = 0; break; } return res; } /* This is done here, so defines like "test" don't interfere with AST use above. */ #include "grammar.h" #include "parsetok.h" #include "graminit.h" /* Data structure used internally */ struct compiling { PyArena *c_arena; /* Arena for allocating memory. */ PyObject *c_filename; /* filename */ PyObject *c_normalize; /* Normalization function from unicodedata. */ int c_feature_version; /* Latest minior version of Python for allowed features */ }; static asdl_seq *seq_for_testlist(struct compiling *, const node *); static expr_ty ast_for_expr(struct compiling *, const node *); static stmt_ty ast_for_stmt(struct compiling *, const node *); static asdl_seq *ast_for_suite(struct compiling *c, const node *n); static asdl_seq *ast_for_exprlist(struct compiling *, const node *, expr_context_ty); static expr_ty ast_for_testlist(struct compiling *, const node *); static stmt_ty ast_for_classdef(struct compiling *, const node *, asdl_seq *); static stmt_ty ast_for_with_stmt(struct compiling *, const node *, bool); static stmt_ty ast_for_for_stmt(struct compiling *, const node *, bool); /* Note different signature for ast_for_call */ static expr_ty ast_for_call(struct compiling *, const node *, expr_ty, bool); static PyObject *parsenumber(struct compiling *, const char *); static expr_ty parsestrplus(struct compiling *, const node *n); #define COMP_GENEXP 0 #define COMP_LISTCOMP 1 #define COMP_SETCOMP 2 static int init_normalization(struct compiling *c) { PyObject *m = PyImport_ImportModuleNoBlock("unicodedata"); if (!m) return 0; c->c_normalize = PyObject_GetAttrString(m, "normalize"); Py_DECREF(m); if (!c->c_normalize) return 0; return 1; } static identifier new_identifier(const char *n, struct compiling *c) { PyObject *id = PyUnicode_DecodeUTF8(n, strlen(n), NULL); if (!id) return NULL; /* PyUnicode_DecodeUTF8 should always return a ready string. */ assert(PyUnicode_IS_READY(id)); /* Check whether there are non-ASCII characters in the identifier; if so, normalize to NFKC. */ if (!PyUnicode_IS_ASCII(id)) { PyObject *id2; PyObject *form; PyObject *args[2]; _Py_IDENTIFIER(NFKC); if (!c->c_normalize && !init_normalization(c)) { Py_DECREF(id); return NULL; } form = _PyUnicode_FromId(&PyId_NFKC); if (form == NULL) { Py_DECREF(id); return NULL; } args[0] = form; args[1] = id; id2 = _PyObject_FastCall(c->c_normalize, args, 2); Py_DECREF(id); if (!id2) return NULL; if (!PyUnicode_Check(id2)) { PyErr_Format(PyExc_TypeError, "unicodedata.normalize() must return a string, not " "%.200s", Py_TYPE(id2)->tp_name); Py_DECREF(id2); return NULL; } id = id2; } PyUnicode_InternInPlace(&id); if (PyArena_AddPyObject(c->c_arena, id) < 0) { Py_DECREF(id); return NULL; } return id; } #define NEW_IDENTIFIER(n) new_identifier(STR(n), c) static string new_type_comment(const char *s, struct compiling *c) { return PyUnicode_DecodeUTF8(s, strlen(s), NULL); } #define NEW_TYPE_COMMENT(n) new_type_comment(STR(n), c) static int ast_error(struct compiling *c, const node *n, const char *errmsg) { PyObject *value, *errstr, *loc, *tmp; loc = PyErr_ProgramTextObject(c->c_filename, LINENO(n)); if (!loc) { Py_INCREF(Py_None); loc = Py_None; } tmp = Py_BuildValue("(OiiN)", c->c_filename, LINENO(n), n->n_col_offset, loc); if (!tmp) return 0; errstr = PyUnicode_FromString(errmsg); if (!errstr) { Py_DECREF(tmp); return 0; } value = PyTuple_Pack(2, errstr, tmp); Py_DECREF(errstr); Py_DECREF(tmp); if (value) { PyErr_SetObject(PyExc_SyntaxError, value); Py_DECREF(value); } return 0; } /* num_stmts() returns number of contained statements. Use this routine to determine how big a sequence is needed for the statements in a parse tree. Its raison d'etre is this bit of grammar: stmt: simple_stmt | compound_stmt simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE A simple_stmt can contain multiple small_stmt elements joined by semicolons. If the arg is a simple_stmt, the number of small_stmt elements is returned. */ static int num_stmts(const node *n) { int i, l; node *ch; switch (TYPE(n)) { case single_input: if (TYPE(CHILD(n, 0)) == NEWLINE) return 0; else return num_stmts(CHILD(n, 0)); case file_input: l = 0; for (i = 0; i < NCH(n); i++) { ch = CHILD(n, i); if (TYPE(ch) == stmt) l += num_stmts(ch); } return l; case stmt: return num_stmts(CHILD(n, 0)); case compound_stmt: return 1; case simple_stmt: return NCH(n) / 2; /* Divide by 2 to remove count of semi-colons */ case suite: /* suite: simple_stmt | NEWLINE [TYPE_COMMENT NEWLINE] INDENT stmt+ DEDENT */ if (NCH(n) == 1) return num_stmts(CHILD(n, 0)); else { i = 2; l = 0; if (TYPE(CHILD(n, 1)) == TYPE_COMMENT) i += 2; for (; i < (NCH(n) - 1); i++) l += num_stmts(CHILD(n, i)); return l; } default: { char buf[128]; sprintf(buf, "Non-statement found: %d %d", TYPE(n), NCH(n)); Py_FatalError(buf); } } abort(); } /* Transform the CST rooted at node * to the appropriate AST */ mod_ty Ta3AST_FromNodeObject(const node *n, PyCompilerFlags *flags, PyObject *filename, int feature_version, PyArena *arena) { int i, j, k, num; asdl_seq *stmts = NULL; asdl_seq *type_ignores = NULL; stmt_ty s; node *ch; struct compiling c; mod_ty res = NULL; asdl_seq *argtypes = NULL; expr_ty ret, arg; c.c_arena = arena; /* borrowed reference */ c.c_filename = filename; c.c_normalize = NULL; c.c_feature_version = feature_version; if (TYPE(n) == encoding_decl) n = CHILD(n, 0); k = 0; switch (TYPE(n)) { case file_input: stmts = _Ta3_asdl_seq_new(num_stmts(n), arena); if (!stmts) goto out; for (i = 0; i < NCH(n) - 1; i++) { ch = CHILD(n, i); if (TYPE(ch) == NEWLINE) continue; REQ(ch, stmt); num = num_stmts(ch); if (num == 1) { s = ast_for_stmt(&c, ch); if (!s) goto out; asdl_seq_SET(stmts, k++, s); } else { ch = CHILD(ch, 0); REQ(ch, simple_stmt); for (j = 0; j < num; j++) { s = ast_for_stmt(&c, CHILD(ch, j * 2)); if (!s) goto out; asdl_seq_SET(stmts, k++, s); } } } /* Type ignores are stored under the ENDMARKER in file_input. */ ch = CHILD(n, NCH(n) - 1); REQ(ch, ENDMARKER); num = NCH(ch); type_ignores = _Ta3_asdl_seq_new(num, arena); if (!type_ignores) goto out; for (i = 0; i < num; i++) { type_ignore_ty ti = TypeIgnore(LINENO(CHILD(ch, i)), arena); if (!ti) goto out; asdl_seq_SET(type_ignores, i, ti); } res = Module(stmts, type_ignores, arena); break; case eval_input: { expr_ty testlist_ast; /* XXX Why not comp_for here? */ testlist_ast = ast_for_testlist(&c, CHILD(n, 0)); if (!testlist_ast) goto out; res = Expression(testlist_ast, arena); break; } case single_input: if (TYPE(CHILD(n, 0)) == NEWLINE) { stmts = _Ta3_asdl_seq_new(1, arena); if (!stmts) goto out; asdl_seq_SET(stmts, 0, Pass(n->n_lineno, n->n_col_offset, arena)); if (!asdl_seq_GET(stmts, 0)) goto out; res = Interactive(stmts, arena); } else { n = CHILD(n, 0); num = num_stmts(n); stmts = _Ta3_asdl_seq_new(num, arena); if (!stmts) goto out; if (num == 1) { s = ast_for_stmt(&c, n); if (!s) goto out; asdl_seq_SET(stmts, 0, s); } else { /* Only a simple_stmt can contain multiple statements. */ REQ(n, simple_stmt); for (i = 0; i < NCH(n); i += 2) { if (TYPE(CHILD(n, i)) == NEWLINE) break; s = ast_for_stmt(&c, CHILD(n, i)); if (!s) goto out; asdl_seq_SET(stmts, i / 2, s); } } res = Interactive(stmts, arena); } break; case func_type_input: n = CHILD(n, 0); REQ(n, func_type); if (TYPE(CHILD(n, 1)) == typelist) { ch = CHILD(n, 1); /* this is overly permissive -- we don't pay any attention to * stars on the args -- just parse them into an ordered list */ num = 0; for (i = 0; i < NCH(ch); i++) { if (TYPE(CHILD(ch, i)) == test) num++; } argtypes = _Ta3_asdl_seq_new(num, arena); j = 0; for (i = 0; i < NCH(ch); i++) { if (TYPE(CHILD(ch, i)) == test) { arg = ast_for_expr(&c, CHILD(ch, i)); if (!arg) goto out; asdl_seq_SET(argtypes, j++, arg); } } } else argtypes = _Ta3_asdl_seq_new(0, arena); ret = ast_for_expr(&c, CHILD(n, NCH(n) - 1)); if (!ret) goto out; res = FunctionType(argtypes, ret, arena); break; default: PyErr_Format(PyExc_SystemError, "invalid node %d for Ta3AST_FromNode", TYPE(n)); goto out; } out: if (c.c_normalize) { Py_DECREF(c.c_normalize); } return res; } mod_ty Ta3AST_FromNode(const node *n, PyCompilerFlags *flags, const char *filename_str, int feature_version, PyArena *arena) { mod_ty mod; PyObject *filename; filename = PyUnicode_DecodeFSDefault(filename_str); if (filename == NULL) return NULL; mod = Ta3AST_FromNodeObject(n, flags, filename, feature_version, arena); Py_DECREF(filename); return mod; } /* Return the AST repr. of the operator represented as syntax (|, ^, etc.) */ static operator_ty get_operator(struct compiling *c, const node *n) { switch (TYPE(n)) { case VBAR: return BitOr; case CIRCUMFLEX: return BitXor; case AMPER: return BitAnd; case LEFTSHIFT: return LShift; case RIGHTSHIFT: return RShift; case PLUS: return Add; case MINUS: return Sub; case STAR: return Mult; case AT: if (c->c_feature_version < 5) { ast_error(c, n, "The '@' operator is only supported in Python 3.5 and greater"); return (operator_ty)0; } return MatMult; case SLASH: return Div; case DOUBLESLASH: return FloorDiv; case PERCENT: return Mod; default: return (operator_ty)0; } } static const char * const FORBIDDEN[] = { "None", "True", "False", NULL, }; static int forbidden_name(struct compiling *c, identifier name, const node *n, int full_checks) { assert(PyUnicode_Check(name)); if (_PyUnicode_EqualToASCIIString(name, "__debug__")) { ast_error(c, n, "assignment to keyword"); return 1; } if (full_checks) { const char * const *p; for (p = FORBIDDEN; *p; p++) { if (_PyUnicode_EqualToASCIIString(name, *p)) { ast_error(c, n, "assignment to keyword"); return 1; } } } return 0; } /* Set the context ctx for expr_ty e, recursively traversing e. Only sets context for expr kinds that "can appear in assignment context" (according to ../Parser/Python.asdl). For other expr kinds, it sets an appropriate syntax error and returns false. */ static int set_context(struct compiling *c, expr_ty e, expr_context_ty ctx, const node *n) { asdl_seq *s = NULL; /* If a particular expression type can't be used for assign / delete, set expr_name to its name and an error message will be generated. */ const char* expr_name = NULL; /* The ast defines augmented store and load contexts, but the implementation here doesn't actually use them. The code may be a little more complex than necessary as a result. It also means that expressions in an augmented assignment have a Store context. Consider restructuring so that augmented assignment uses set_context(), too. */ assert(ctx != AugStore && ctx != AugLoad); switch (e->kind) { case Attribute_kind: e->v.Attribute.ctx = ctx; if (ctx == Store && forbidden_name(c, e->v.Attribute.attr, n, 1)) return 0; break; case Subscript_kind: e->v.Subscript.ctx = ctx; break; case Starred_kind: e->v.Starred.ctx = ctx; if (!set_context(c, e->v.Starred.value, ctx, n)) return 0; break; case Name_kind: if (ctx == Store) { if (forbidden_name(c, e->v.Name.id, n, 0)) return 0; /* forbidden_name() calls ast_error() */ } e->v.Name.ctx = ctx; break; case List_kind: e->v.List.ctx = ctx; s = e->v.List.elts; break; case Tuple_kind: e->v.Tuple.ctx = ctx; s = e->v.Tuple.elts; break; case Lambda_kind: expr_name = "lambda"; break; case Call_kind: expr_name = "function call"; break; case BoolOp_kind: case BinOp_kind: case UnaryOp_kind: expr_name = "operator"; break; case GeneratorExp_kind: expr_name = "generator expression"; break; case Yield_kind: case YieldFrom_kind: expr_name = "yield expression"; break; case Await_kind: expr_name = "await expression"; break; case ListComp_kind: expr_name = "list comprehension"; break; case SetComp_kind: expr_name = "set comprehension"; break; case DictComp_kind: expr_name = "dict comprehension"; break; case Dict_kind: case Set_kind: case Num_kind: case Str_kind: case Bytes_kind: case JoinedStr_kind: case FormattedValue_kind: expr_name = "literal"; break; case NameConstant_kind: expr_name = "keyword"; break; case Ellipsis_kind: expr_name = "Ellipsis"; break; case Compare_kind: expr_name = "comparison"; break; case IfExp_kind: expr_name = "conditional expression"; break; default: PyErr_Format(PyExc_SystemError, "unexpected expression in assignment %d (line %d)", e->kind, e->lineno); return 0; } /* Check for error string set by switch */ if (expr_name) { char buf[300]; PyOS_snprintf(buf, sizeof(buf), "can't %s %s", ctx == Store ? "assign to" : "delete", expr_name); return ast_error(c, n, buf); } /* If the LHS is a list or tuple, we need to set the assignment context for all the contained elements. */ if (s) { int i; for (i = 0; i < asdl_seq_LEN(s); i++) { if (!set_context(c, (expr_ty)asdl_seq_GET(s, i), ctx, n)) return 0; } } return 1; } static operator_ty ast_for_augassign(struct compiling *c, const node *n) { REQ(n, augassign); n = CHILD(n, 0); switch (STR(n)[0]) { case '+': return Add; case '-': return Sub; case '/': if (STR(n)[1] == '/') return FloorDiv; else return Div; case '%': return Mod; case '<': return LShift; case '>': return RShift; case '&': return BitAnd; case '^': return BitXor; case '|': return BitOr; case '*': if (STR(n)[1] == '*') return Pow; else return Mult; case '@': if (c->c_feature_version < 5) { ast_error(c, n, "The '@' operator is only supported in Python 3.5 and greater"); return (operator_ty)0; } return MatMult; default: PyErr_Format(PyExc_SystemError, "invalid augassign: %s", STR(n)); return (operator_ty)0; } } static cmpop_ty ast_for_comp_op(struct compiling *c, const node *n) { /* comp_op: '<'|'>'|'=='|'>='|'<='|'!='|'in'|'not' 'in'|'is' |'is' 'not' */ REQ(n, comp_op); if (NCH(n) == 1) { n = CHILD(n, 0); switch (TYPE(n)) { case LESS: return Lt; case GREATER: return Gt; case EQEQUAL: /* == */ return Eq; case LESSEQUAL: return LtE; case GREATEREQUAL: return GtE; case NOTEQUAL: return NotEq; case NAME: if (strcmp(STR(n), "in") == 0) return In; if (strcmp(STR(n), "is") == 0) return Is; /* fall through */ default: PyErr_Format(PyExc_SystemError, "invalid comp_op: %s", STR(n)); return (cmpop_ty)0; } } else if (NCH(n) == 2) { /* handle "not in" and "is not" */ switch (TYPE(CHILD(n, 0))) { case NAME: if (strcmp(STR(CHILD(n, 1)), "in") == 0) return NotIn; if (strcmp(STR(CHILD(n, 0)), "is") == 0) return IsNot; /* fall through */ default: PyErr_Format(PyExc_SystemError, "invalid comp_op: %s %s", STR(CHILD(n, 0)), STR(CHILD(n, 1))); return (cmpop_ty)0; } } PyErr_Format(PyExc_SystemError, "invalid comp_op: has %d children", NCH(n)); return (cmpop_ty)0; } static asdl_seq * seq_for_testlist(struct compiling *c, const node *n) { /* testlist: test (',' test)* [','] testlist_star_expr: test|star_expr (',' test|star_expr)* [','] */ asdl_seq *seq; expr_ty expression; int i; assert(TYPE(n) == testlist || TYPE(n) == testlist_star_expr || TYPE(n) == testlist_comp); seq = _Ta3_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!seq) return NULL; for (i = 0; i < NCH(n); i += 2) { const node *ch = CHILD(n, i); assert(TYPE(ch) == test || TYPE(ch) == test_nocond || TYPE(ch) == star_expr); expression = ast_for_expr(c, ch); if (!expression) return NULL; assert(i / 2 < seq->size); asdl_seq_SET(seq, i / 2, expression); } return seq; } static arg_ty ast_for_arg(struct compiling *c, const node *n) { identifier name; expr_ty annotation = NULL; node *ch; arg_ty ret; assert(TYPE(n) == tfpdef || TYPE(n) == vfpdef); ch = CHILD(n, 0); name = NEW_IDENTIFIER(ch); if (!name) return NULL; if (forbidden_name(c, name, ch, 0)) return NULL; if (NCH(n) == 3 && TYPE(CHILD(n, 1)) == COLON) { annotation = ast_for_expr(c, CHILD(n, 2)); if (!annotation) return NULL; } ret = arg(name, annotation, NULL, LINENO(n), n->n_col_offset, c->c_arena); if (!ret) return NULL; return ret; } /* returns -1 if failed to handle keyword only arguments returns new position to keep processing if successful (',' tfpdef ['=' test])* ^^^ start pointing here */ static int handle_keywordonly_args(struct compiling *c, const node *n, int start, asdl_seq *kwonlyargs, asdl_seq *kwdefaults) { PyObject *argname; node *ch; expr_ty expression, annotation; arg_ty arg; int i = start; int j = 0; /* index for kwdefaults and kwonlyargs */ if (kwonlyargs == NULL) { ast_error(c, CHILD(n, start), "named arguments must follow bare *"); return -1; } assert(kwdefaults != NULL); while (i < NCH(n)) { ch = CHILD(n, i); switch (TYPE(ch)) { case vfpdef: case tfpdef: if (i + 1 < NCH(n) && TYPE(CHILD(n, i + 1)) == EQUAL) { expression = ast_for_expr(c, CHILD(n, i + 2)); if (!expression) goto error; asdl_seq_SET(kwdefaults, j, expression); i += 2; /* '=' and test */ } else { /* setting NULL if no default value exists */ asdl_seq_SET(kwdefaults, j, NULL); } if (NCH(ch) == 3) { /* ch is NAME ':' test */ annotation = ast_for_expr(c, CHILD(ch, 2)); if (!annotation) goto error; } else { annotation = NULL; } ch = CHILD(ch, 0); argname = NEW_IDENTIFIER(ch); if (!argname) goto error; if (forbidden_name(c, argname, ch, 0)) goto error; arg = arg(argname, annotation, NULL, LINENO(ch), ch->n_col_offset, c->c_arena); if (!arg) goto error; asdl_seq_SET(kwonlyargs, j++, arg); i += 1; /* the name */ if (TYPE(CHILD(n, i)) == COMMA) i += 1; /* the comma, if present */ break; case TYPE_COMMENT: /* arg will be equal to the last argument processed */ arg->type_comment = NEW_TYPE_COMMENT(ch); i += 1; break; case DOUBLESTAR: return i; default: ast_error(c, ch, "unexpected node"); goto error; } } return i; error: return -1; } /* Create AST for argument list. */ static arguments_ty ast_for_arguments(struct compiling *c, const node *n) { /* This function handles both typedargslist (function definition) and varargslist (lambda definition). parameters: '(' [typedargslist] ')' typedargslist: (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' [ '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] | '**' tfpdef [',']]] | '*' [tfpdef] (',' tfpdef ['=' test])* [',' ['**' tfpdef [',']]] | '**' tfpdef [',']) tfpdef: NAME [':' test] varargslist: (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' [ '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] | '**' vfpdef [',']]] | '*' [vfpdef] (',' vfpdef ['=' test])* [',' ['**' vfpdef [',']]] | '**' vfpdef [','] ) vfpdef: NAME */ int i, j, k, nposargs = 0, nkwonlyargs = 0; int nposdefaults = 0, found_default = 0; asdl_seq *posargs, *posdefaults, *kwonlyargs, *kwdefaults; arg_ty vararg = NULL, kwarg = NULL; arg_ty arg; node *ch; if (TYPE(n) == parameters) { if (NCH(n) == 2) /* () as argument list */ return arguments(NULL, NULL, NULL, NULL, NULL, NULL, c->c_arena); n = CHILD(n, 1); } assert(TYPE(n) == typedargslist || TYPE(n) == varargslist); /* First count the number of positional args & defaults. The variable i is the loop index for this for loop and the next. The next loop picks up where the first leaves off. */ for (i = 0; i < NCH(n); i++) { ch = CHILD(n, i); if (TYPE(ch) == STAR) { /* skip star */ i++; if (i < NCH(n) && /* skip argument following star */ (TYPE(CHILD(n, i)) == tfpdef || TYPE(CHILD(n, i)) == vfpdef)) { i++; } break; } if (TYPE(ch) == DOUBLESTAR) break; if (TYPE(ch) == vfpdef || TYPE(ch) == tfpdef) nposargs++; if (TYPE(ch) == EQUAL) nposdefaults++; } /* count the number of keyword only args & defaults for keyword only args */ for ( ; i < NCH(n); ++i) { ch = CHILD(n, i); if (TYPE(ch) == DOUBLESTAR) break; if (TYPE(ch) == tfpdef || TYPE(ch) == vfpdef) nkwonlyargs++; } posargs = (nposargs ? _Ta3_asdl_seq_new(nposargs, c->c_arena) : NULL); if (!posargs && nposargs) return NULL; kwonlyargs = (nkwonlyargs ? _Ta3_asdl_seq_new(nkwonlyargs, c->c_arena) : NULL); if (!kwonlyargs && nkwonlyargs) return NULL; posdefaults = (nposdefaults ? _Ta3_asdl_seq_new(nposdefaults, c->c_arena) : NULL); if (!posdefaults && nposdefaults) return NULL; /* The length of kwonlyargs and kwdefaults are same since we set NULL as default for keyword only argument w/o default - we have sequence data structure, but no dictionary */ kwdefaults = (nkwonlyargs ? _Ta3_asdl_seq_new(nkwonlyargs, c->c_arena) : NULL); if (!kwdefaults && nkwonlyargs) return NULL; /* tfpdef: NAME [':' test] vfpdef: NAME */ i = 0; j = 0; /* index for defaults */ k = 0; /* index for args */ while (i < NCH(n)) { ch = CHILD(n, i); switch (TYPE(ch)) { case tfpdef: case vfpdef: /* XXX Need to worry about checking if TYPE(CHILD(n, i+1)) is anything other than EQUAL or a comma? */ /* XXX Should NCH(n) check be made a separate check? */ if (i + 1 < NCH(n) && TYPE(CHILD(n, i + 1)) == EQUAL) { expr_ty expression = ast_for_expr(c, CHILD(n, i + 2)); if (!expression) return NULL; assert(posdefaults != NULL); asdl_seq_SET(posdefaults, j++, expression); i += 2; found_default = 1; } else if (found_default) { ast_error(c, n, "non-default argument follows default argument"); return NULL; } arg = ast_for_arg(c, ch); if (!arg) return NULL; asdl_seq_SET(posargs, k++, arg); i += 1; /* the name */ if (TYPE(CHILD(n, i)) == COMMA) i += 1; /* the comma, if present */ break; case STAR: if (i+1 >= NCH(n) || (i+2 == NCH(n) && (TYPE(CHILD(n, i+1)) == COMMA || TYPE(CHILD(n, i+1)) == TYPE_COMMENT))) { ast_error(c, CHILD(n, i), "named arguments must follow bare *"); return NULL; } ch = CHILD(n, i+1); /* tfpdef or COMMA */ if (TYPE(ch) == COMMA) { int res = 0; i += 2; /* now follows keyword only arguments */ if (TYPE(CHILD(n, i)) == TYPE_COMMENT) { ast_error(c, CHILD(n, i), "bare * has associated type comment"); return NULL; } res = handle_keywordonly_args(c, n, i, kwonlyargs, kwdefaults); if (res == -1) return NULL; i = res; /* res has new position to process */ } else { vararg = ast_for_arg(c, ch); if (!vararg) return NULL; i += 2; /* the star and the name */ if (TYPE(CHILD(n, i)) == COMMA) i += 1; /* the comma, if present */ if (TYPE(CHILD(n, i)) == TYPE_COMMENT) { vararg->type_comment = NEW_TYPE_COMMENT(CHILD(n, i)); i += 1; } if (i < NCH(n) && (TYPE(CHILD(n, i)) == tfpdef || TYPE(CHILD(n, i)) == vfpdef)) { int res = 0; res = handle_keywordonly_args(c, n, i, kwonlyargs, kwdefaults); if (res == -1) return NULL; i = res; /* res has new position to process */ } } break; case DOUBLESTAR: ch = CHILD(n, i+1); /* tfpdef */ assert(TYPE(ch) == tfpdef || TYPE(ch) == vfpdef); kwarg = ast_for_arg(c, ch); if (!kwarg) return NULL; i += 2; /* the double star and the name */ if (TYPE(CHILD(n, i)) == COMMA) i += 1; /* the comma, if present */ break; case TYPE_COMMENT: assert(i); if (kwarg) arg = kwarg; /* arg will be equal to the last argument processed */ arg->type_comment = NEW_TYPE_COMMENT(ch); i += 1; break; default: PyErr_Format(PyExc_SystemError, "unexpected node in varargslist: %d @ %d", TYPE(ch), i); return NULL; } } return arguments(posargs, vararg, kwonlyargs, kwdefaults, kwarg, posdefaults, c->c_arena); } static expr_ty ast_for_dotted_name(struct compiling *c, const node *n) { expr_ty e; identifier id; int lineno, col_offset; int i; REQ(n, dotted_name); lineno = LINENO(n); col_offset = n->n_col_offset; id = NEW_IDENTIFIER(CHILD(n, 0)); if (!id) return NULL; e = Name(id, Load, lineno, col_offset, c->c_arena); if (!e) return NULL; for (i = 2; i < NCH(n); i+=2) { id = NEW_IDENTIFIER(CHILD(n, i)); if (!id) return NULL; e = Attribute(e, id, Load, lineno, col_offset, c->c_arena); if (!e) return NULL; } return e; } static expr_ty ast_for_decorator(struct compiling *c, const node *n) { /* decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE */ expr_ty d = NULL; expr_ty name_expr; REQ(n, decorator); REQ(CHILD(n, 0), AT); REQ(RCHILD(n, -1), NEWLINE); name_expr = ast_for_dotted_name(c, CHILD(n, 1)); if (!name_expr) return NULL; if (NCH(n) == 3) { /* No arguments */ d = name_expr; name_expr = NULL; } else if (NCH(n) == 5) { /* Call with no arguments */ d = Call(name_expr, NULL, NULL, LINENO(n), n->n_col_offset, c->c_arena); if (!d) return NULL; name_expr = NULL; } else { d = ast_for_call(c, CHILD(n, 3), name_expr, true); if (!d) return NULL; name_expr = NULL; } return d; } static asdl_seq* ast_for_decorators(struct compiling *c, const node *n) { asdl_seq* decorator_seq; expr_ty d; int i; REQ(n, decorators); decorator_seq = _Ta3_asdl_seq_new(NCH(n), c->c_arena); if (!decorator_seq) return NULL; for (i = 0; i < NCH(n); i++) { d = ast_for_decorator(c, CHILD(n, i)); if (!d) return NULL; asdl_seq_SET(decorator_seq, i, d); } return decorator_seq; } static stmt_ty ast_for_funcdef_impl(struct compiling *c, const node *n0, asdl_seq *decorator_seq, bool is_async) { /* funcdef: 'def' NAME parameters ['->' test] ':' [TYPE_COMMENT] suite */ const node * const n = is_async ? CHILD(n0, 1) : n0; identifier name; arguments_ty args; asdl_seq *body; expr_ty returns = NULL; int name_i = 1; node *tc; string type_comment = NULL; if (is_async && c->c_feature_version < 5) { ast_error(c, n, "Async functions are only supported in Python 3.5 and greater"); return NULL; } REQ(n, funcdef); name = NEW_IDENTIFIER(CHILD(n, name_i)); if (!name) return NULL; if (forbidden_name(c, name, CHILD(n, name_i), 0)) return NULL; args = ast_for_arguments(c, CHILD(n, name_i + 1)); if (!args) return NULL; if (TYPE(CHILD(n, name_i+2)) == RARROW) { returns = ast_for_expr(c, CHILD(n, name_i + 3)); if (!returns) return NULL; name_i += 2; } if (TYPE(CHILD(n, name_i + 3)) == TYPE_COMMENT) { type_comment = NEW_TYPE_COMMENT(CHILD(n, name_i + 3)); name_i += 1; } body = ast_for_suite(c, CHILD(n, name_i + 3)); if (!body) return NULL; if (!type_comment && NCH(CHILD(n, name_i + 3)) > 1) { /* If the function doesn't have a type comment on the same line, check * if the suite has a type comment in it. */ tc = CHILD(CHILD(n, name_i + 3), 1); if (TYPE(tc) == TYPE_COMMENT) type_comment = NEW_TYPE_COMMENT(tc); } if (is_async) return AsyncFunctionDef(name, args, body, decorator_seq, returns, type_comment, LINENO(n0), n0->n_col_offset, c->c_arena); else return FunctionDef(name, args, body, decorator_seq, returns, type_comment, LINENO(n), n->n_col_offset, c->c_arena); } static stmt_ty ast_for_async_funcdef(struct compiling *c, const node *n, asdl_seq *decorator_seq) { /* async_funcdef: 'async' funcdef */ REQ(n, async_funcdef); REQ(CHILD(n, 0), NAME); assert(strcmp(STR(CHILD(n, 0)), "async") == 0); REQ(CHILD(n, 1), funcdef); return ast_for_funcdef_impl(c, n, decorator_seq, true /* is_async */); } static stmt_ty ast_for_funcdef(struct compiling *c, const node *n, asdl_seq *decorator_seq) { /* funcdef: 'def' NAME parameters ['->' test] ':' suite */ return ast_for_funcdef_impl(c, n, decorator_seq, false /* is_async */); } static stmt_ty ast_for_async_stmt(struct compiling *c, const node *n) { /* async_stmt: 'async' (funcdef | with_stmt | for_stmt) */ REQ(n, async_stmt); REQ(CHILD(n, 0), NAME); assert(strcmp(STR(CHILD(n, 0)), "async") == 0); switch (TYPE(CHILD(n, 1))) { case funcdef: return ast_for_funcdef_impl(c, n, NULL, true /* is_async */); case with_stmt: return ast_for_with_stmt(c, n, true /* is_async */); case for_stmt: return ast_for_for_stmt(c, n, true /* is_async */); default: PyErr_Format(PyExc_SystemError, "invalid async stament: %s", STR(CHILD(n, 1))); return NULL; } } static stmt_ty ast_for_decorated(struct compiling *c, const node *n) { /* decorated: decorators (classdef | funcdef | async_funcdef) */ stmt_ty thing = NULL; asdl_seq *decorator_seq = NULL; REQ(n, decorated); decorator_seq = ast_for_decorators(c, CHILD(n, 0)); if (!decorator_seq) return NULL; assert(TYPE(CHILD(n, 1)) == funcdef || TYPE(CHILD(n, 1)) == async_funcdef || TYPE(CHILD(n, 1)) == classdef); if (TYPE(CHILD(n, 1)) == funcdef) { thing = ast_for_funcdef(c, CHILD(n, 1), decorator_seq); } else if (TYPE(CHILD(n, 1)) == classdef) { thing = ast_for_classdef(c, CHILD(n, 1), decorator_seq); } else if (TYPE(CHILD(n, 1)) == async_funcdef) { thing = ast_for_async_funcdef(c, CHILD(n, 1), decorator_seq); } /* we count the decorators in when talking about the class' or * function's line number */ if (thing) { thing->lineno = LINENO(n); thing->col_offset = n->n_col_offset; } return thing; } static expr_ty ast_for_lambdef(struct compiling *c, const node *n) { /* lambdef: 'lambda' [varargslist] ':' test lambdef_nocond: 'lambda' [varargslist] ':' test_nocond */ arguments_ty args; expr_ty expression; if (NCH(n) == 3) { args = arguments(NULL, NULL, NULL, NULL, NULL, NULL, c->c_arena); if (!args) return NULL; expression = ast_for_expr(c, CHILD(n, 2)); if (!expression) return NULL; } else { args = ast_for_arguments(c, CHILD(n, 1)); if (!args) return NULL; expression = ast_for_expr(c, CHILD(n, 3)); if (!expression) return NULL; } return Lambda(args, expression, LINENO(n), n->n_col_offset, c->c_arena); } static expr_ty ast_for_ifexpr(struct compiling *c, const node *n) { /* test: or_test 'if' or_test 'else' test */ expr_ty expression, body, orelse; assert(NCH(n) == 5); body = ast_for_expr(c, CHILD(n, 0)); if (!body) return NULL; expression = ast_for_expr(c, CHILD(n, 2)); if (!expression) return NULL; orelse = ast_for_expr(c, CHILD(n, 4)); if (!orelse) return NULL; return IfExp(expression, body, orelse, LINENO(n), n->n_col_offset, c->c_arena); } /* Count the number of 'for' loops in a comprehension. Helper for ast_for_comprehension(). */ static int count_comp_fors(struct compiling *c, const node *n) { int n_fors = 0; count_comp_for: n_fors++; REQ(n, comp_for); if (NCH(n) == 2) { REQ(CHILD(n, 0), NAME); assert(strcmp(STR(CHILD(n, 0)), "async") == 0); n = CHILD(n, 1); } else if (NCH(n) == 1) { n = CHILD(n, 0); } else { goto error; } if (NCH(n) == (5)) { n = CHILD(n, 4); } else { return n_fors; } count_comp_iter: REQ(n, comp_iter); n = CHILD(n, 0); if (TYPE(n) == comp_for) goto count_comp_for; else if (TYPE(n) == comp_if) { if (NCH(n) == 3) { n = CHILD(n, 2); goto count_comp_iter; } else return n_fors; } error: /* Should never be reached */ PyErr_SetString(PyExc_SystemError, "logic error in count_comp_fors"); return -1; } /* Count the number of 'if' statements in a comprehension. Helper for ast_for_comprehension(). */ static int count_comp_ifs(struct compiling *c, const node *n) { int n_ifs = 0; while (1) { REQ(n, comp_iter); if (TYPE(CHILD(n, 0)) == comp_for) return n_ifs; n = CHILD(n, 0); REQ(n, comp_if); n_ifs++; if (NCH(n) == 2) return n_ifs; n = CHILD(n, 2); } } static asdl_seq * ast_for_comprehension(struct compiling *c, const node *n) { int i, n_fors; asdl_seq *comps; n_fors = count_comp_fors(c, n); if (n_fors == -1) return NULL; comps = _Ta3_asdl_seq_new(n_fors, c->c_arena); if (!comps) return NULL; for (i = 0; i < n_fors; i++) { comprehension_ty comp; asdl_seq *t; expr_ty expression, first; node *for_ch; node *sync_n; int is_async = 0; REQ(n, comp_for); if (NCH(n) == 2) { is_async = 1; REQ(CHILD(n, 0), NAME); assert(strcmp(STR(CHILD(n, 0)), "async") == 0); sync_n = CHILD(n, 1); } else { sync_n = CHILD(n, 0); } REQ(sync_n, sync_comp_for); /* Async comprehensions only allowed in Python 3.6 and greater */ if (is_async && c->c_feature_version < 6) { ast_error(c, n, "Async comprehensions are only supported in Python 3.6 and greater"); return NULL; } for_ch = CHILD(sync_n, 1); t = ast_for_exprlist(c, for_ch, Store); if (!t) return NULL; expression = ast_for_expr(c, CHILD(sync_n, 3)); if (!expression) return NULL; /* Check the # of children rather than the length of t, since (x for x, in ...) has 1 element in t, but still requires a Tuple. */ first = (expr_ty)asdl_seq_GET(t, 0); if (NCH(for_ch) == 1) comp = comprehension(first, expression, NULL, is_async, c->c_arena); else comp = comprehension(Tuple(t, Store, first->lineno, first->col_offset, c->c_arena), expression, NULL, is_async, c->c_arena); if (!comp) return NULL; if (NCH(sync_n) == 5) { int j, n_ifs; asdl_seq *ifs; n = CHILD(sync_n, 4); n_ifs = count_comp_ifs(c, n); if (n_ifs == -1) return NULL; ifs = _Ta3_asdl_seq_new(n_ifs, c->c_arena); if (!ifs) return NULL; for (j = 0; j < n_ifs; j++) { REQ(n, comp_iter); n = CHILD(n, 0); REQ(n, comp_if); expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; asdl_seq_SET(ifs, j, expression); if (NCH(n) == 3) n = CHILD(n, 2); } /* on exit, must guarantee that n is a comp_for */ if (TYPE(n) == comp_iter) n = CHILD(n, 0); comp->ifs = ifs; } asdl_seq_SET(comps, i, comp); } return comps; } static expr_ty ast_for_itercomp(struct compiling *c, const node *n, int type) { /* testlist_comp: (test|star_expr) * ( comp_for | (',' (test|star_expr))* [','] ) */ expr_ty elt; asdl_seq *comps; node *ch; assert(NCH(n) > 1); ch = CHILD(n, 0); elt = ast_for_expr(c, ch); if (!elt) return NULL; if (elt->kind == Starred_kind) { ast_error(c, ch, "iterable unpacking cannot be used in comprehension"); return NULL; } comps = ast_for_comprehension(c, CHILD(n, 1)); if (!comps) return NULL; if (type == COMP_GENEXP) return GeneratorExp(elt, comps, LINENO(n), n->n_col_offset, c->c_arena); else if (type == COMP_LISTCOMP) return ListComp(elt, comps, LINENO(n), n->n_col_offset, c->c_arena); else if (type == COMP_SETCOMP) return SetComp(elt, comps, LINENO(n), n->n_col_offset, c->c_arena); else /* Should never happen */ return NULL; } /* Fills in the key, value pair corresponding to the dict element. In case * of an unpacking, key is NULL. *i is advanced by the number of ast * elements. Iff successful, nonzero is returned. */ static int ast_for_dictelement(struct compiling *c, const node *n, int *i, expr_ty *key, expr_ty *value) { expr_ty expression; if (TYPE(CHILD(n, *i)) == DOUBLESTAR) { assert(NCH(n) - *i >= 2); expression = ast_for_expr(c, CHILD(n, *i + 1)); if (!expression) return 0; *key = NULL; *value = expression; *i += 2; } else { assert(NCH(n) - *i >= 3); expression = ast_for_expr(c, CHILD(n, *i)); if (!expression) return 0; *key = expression; REQ(CHILD(n, *i + 1), COLON); expression = ast_for_expr(c, CHILD(n, *i + 2)); if (!expression) return 0; *value = expression; *i += 3; } return 1; } static expr_ty ast_for_dictcomp(struct compiling *c, const node *n) { expr_ty key, value; asdl_seq *comps; int i = 0; if (!ast_for_dictelement(c, n, &i, &key, &value)) return NULL; assert(key); assert(NCH(n) - i >= 1); comps = ast_for_comprehension(c, CHILD(n, i)); if (!comps) return NULL; return DictComp(key, value, comps, LINENO(n), n->n_col_offset, c->c_arena); } static expr_ty ast_for_dictdisplay(struct compiling *c, const node *n) { int i; int j; int size; asdl_seq *keys, *values; size = (NCH(n) + 1) / 3; /* +1 in case no trailing comma */ keys = _Ta3_asdl_seq_new(size, c->c_arena); if (!keys) return NULL; values = _Ta3_asdl_seq_new(size, c->c_arena); if (!values) return NULL; j = 0; for (i = 0; i < NCH(n); i++) { expr_ty key, value; if (!ast_for_dictelement(c, n, &i, &key, &value)) return NULL; asdl_seq_SET(keys, j, key); asdl_seq_SET(values, j, value); j++; } keys->size = j; values->size = j; return Dict(keys, values, LINENO(n), n->n_col_offset, c->c_arena); } static expr_ty ast_for_genexp(struct compiling *c, const node *n) { assert(TYPE(n) == (testlist_comp) || TYPE(n) == (argument)); return ast_for_itercomp(c, n, COMP_GENEXP); } static expr_ty ast_for_listcomp(struct compiling *c, const node *n) { assert(TYPE(n) == (testlist_comp)); return ast_for_itercomp(c, n, COMP_LISTCOMP); } static expr_ty ast_for_setcomp(struct compiling *c, const node *n) { assert(TYPE(n) == (dictorsetmaker)); return ast_for_itercomp(c, n, COMP_SETCOMP); } static expr_ty ast_for_setdisplay(struct compiling *c, const node *n) { int i; int size; asdl_seq *elts; assert(TYPE(n) == (dictorsetmaker)); size = (NCH(n) + 1) / 2; /* +1 in case no trailing comma */ elts = _Ta3_asdl_seq_new(size, c->c_arena); if (!elts) return NULL; for (i = 0; i < NCH(n); i += 2) { expr_ty expression; expression = ast_for_expr(c, CHILD(n, i)); if (!expression) return NULL; asdl_seq_SET(elts, i / 2, expression); } return Set(elts, LINENO(n), n->n_col_offset, c->c_arena); } static expr_ty ast_for_atom(struct compiling *c, const node *n) { /* atom: '(' [yield_expr|testlist_comp] ')' | '[' [testlist_comp] ']' | '{' [dictmaker|testlist_comp] '}' | NAME | NUMBER | STRING+ | '...' | 'None' | 'True' | 'False' */ node *ch = CHILD(n, 0); switch (TYPE(ch)) { case NAME: { PyObject *name; const char *s = STR(ch); size_t len = strlen(s); if (len >= 4 && len <= 5) { if (!strcmp(s, "None")) return NameConstant(Py_None, LINENO(n), n->n_col_offset, c->c_arena); if (!strcmp(s, "True")) return NameConstant(Py_True, LINENO(n), n->n_col_offset, c->c_arena); if (!strcmp(s, "False")) return NameConstant(Py_False, LINENO(n), n->n_col_offset, c->c_arena); } name = new_identifier(s, c); if (!name) return NULL; /* All names start in Load context, but may later be changed. */ return Name(name, Load, LINENO(n), n->n_col_offset, c->c_arena); } case STRING: { expr_ty str = parsestrplus(c, n); if (!str) { const char *errtype = NULL; if (PyErr_ExceptionMatches(PyExc_UnicodeError)) errtype = "unicode error"; else if (PyErr_ExceptionMatches(PyExc_ValueError)) errtype = "value error"; if (errtype) { char buf[128]; const char *s = NULL; PyObject *type, *value, *tback, *errstr; PyErr_Fetch(&type, &value, &tback); errstr = PyObject_Str(value); if (errstr) s = PyUnicode_AsUTF8(errstr); if (s) { PyOS_snprintf(buf, sizeof(buf), "(%s) %s", errtype, s); } else { PyErr_Clear(); PyOS_snprintf(buf, sizeof(buf), "(%s) unknown error", errtype); } Py_XDECREF(errstr); ast_error(c, n, buf); Py_DECREF(type); Py_XDECREF(value); Py_XDECREF(tback); } return NULL; } return str; } case NUMBER: { PyObject *pynum; const char *s = STR(ch); /* Underscores in numeric literals are only allowed in Python 3.6 or greater */ /* Check for underscores here rather than in parse_number so we can report a line number on error */ if (c->c_feature_version < 6 && strchr(s, '_') != NULL) { ast_error(c, ch, "Underscores in numeric literals are only supported in Python 3.6 and greater"); return NULL; } pynum = parsenumber(c, STR(ch)); if (!pynum) return NULL; if (PyArena_AddPyObject(c->c_arena, pynum) < 0) { Py_DECREF(pynum); return NULL; } return Num(pynum, LINENO(n), n->n_col_offset, c->c_arena); } case ELLIPSIS: /* Ellipsis */ return Ellipsis(LINENO(n), n->n_col_offset, c->c_arena); case LPAR: /* some parenthesized expressions */ ch = CHILD(n, 1); if (TYPE(ch) == RPAR) return Tuple(NULL, Load, LINENO(n), n->n_col_offset, c->c_arena); if (TYPE(ch) == yield_expr) return ast_for_expr(c, ch); /* testlist_comp: test ( comp_for | (',' test)* [','] ) */ if ((NCH(ch) > 1) && (TYPE(CHILD(ch, 1)) == comp_for)) return ast_for_genexp(c, ch); return ast_for_testlist(c, ch); case LSQB: /* list (or list comprehension) */ ch = CHILD(n, 1); if (TYPE(ch) == RSQB) return List(NULL, Load, LINENO(n), n->n_col_offset, c->c_arena); REQ(ch, testlist_comp); if (NCH(ch) == 1 || TYPE(CHILD(ch, 1)) == COMMA) { asdl_seq *elts = seq_for_testlist(c, ch); if (!elts) return NULL; return List(elts, Load, LINENO(n), n->n_col_offset, c->c_arena); } else return ast_for_listcomp(c, ch); case LBRACE: { /* dictorsetmaker: ( ((test ':' test | '**' test) * (comp_for | (',' (test ':' test | '**' test))* [','])) | * ((test | '*' test) * (comp_for | (',' (test | '*' test))* [','])) ) */ expr_ty res; ch = CHILD(n, 1); if (TYPE(ch) == RBRACE) { /* It's an empty dict. */ return Dict(NULL, NULL, LINENO(n), n->n_col_offset, c->c_arena); } else { int is_dict = (TYPE(CHILD(ch, 0)) == DOUBLESTAR); if (NCH(ch) == 1 || (NCH(ch) > 1 && TYPE(CHILD(ch, 1)) == COMMA)) { /* It's a set display. */ res = ast_for_setdisplay(c, ch); } else if (NCH(ch) > 1 && TYPE(CHILD(ch, 1)) == comp_for) { /* It's a set comprehension. */ res = ast_for_setcomp(c, ch); } else if (NCH(ch) > 3 - is_dict && TYPE(CHILD(ch, 3 - is_dict)) == comp_for) { /* It's a dictionary comprehension. */ if (is_dict) { ast_error(c, n, "dict unpacking cannot be used in " "dict comprehension"); return NULL; } res = ast_for_dictcomp(c, ch); } else { /* It's a dictionary display. */ res = ast_for_dictdisplay(c, ch); } if (res) { res->lineno = LINENO(n); res->col_offset = n->n_col_offset; } return res; } } default: PyErr_Format(PyExc_SystemError, "unhandled atom %d", TYPE(ch)); return NULL; } } static slice_ty ast_for_slice(struct compiling *c, const node *n) { node *ch; expr_ty lower = NULL, upper = NULL, step = NULL; REQ(n, subscript); /* subscript: test | [test] ':' [test] [sliceop] sliceop: ':' [test] */ ch = CHILD(n, 0); if (NCH(n) == 1 && TYPE(ch) == test) { /* 'step' variable hold no significance in terms of being used over other vars */ step = ast_for_expr(c, ch); if (!step) return NULL; return Index(step, c->c_arena); } if (TYPE(ch) == test) { lower = ast_for_expr(c, ch); if (!lower) return NULL; } /* If there's an upper bound it's in the second or third position. */ if (TYPE(ch) == COLON) { if (NCH(n) > 1) { node *n2 = CHILD(n, 1); if (TYPE(n2) == test) { upper = ast_for_expr(c, n2); if (!upper) return NULL; } } } else if (NCH(n) > 2) { node *n2 = CHILD(n, 2); if (TYPE(n2) == test) { upper = ast_for_expr(c, n2); if (!upper) return NULL; } } ch = CHILD(n, NCH(n) - 1); if (TYPE(ch) == sliceop) { if (NCH(ch) != 1) { ch = CHILD(ch, 1); if (TYPE(ch) == test) { step = ast_for_expr(c, ch); if (!step) return NULL; } } } return Slice(lower, upper, step, c->c_arena); } static expr_ty ast_for_binop(struct compiling *c, const node *n) { /* Must account for a sequence of expressions. How should A op B op C by represented? BinOp(BinOp(A, op, B), op, C). */ int i, nops; expr_ty expr1, expr2, result; operator_ty newoperator; expr1 = ast_for_expr(c, CHILD(n, 0)); if (!expr1) return NULL; expr2 = ast_for_expr(c, CHILD(n, 2)); if (!expr2) return NULL; newoperator = get_operator(c, CHILD(n, 1)); if (!newoperator) return NULL; result = BinOp(expr1, newoperator, expr2, LINENO(n), n->n_col_offset, c->c_arena); if (!result) return NULL; nops = (NCH(n) - 1) / 2; for (i = 1; i < nops; i++) { expr_ty tmp_result, tmp; const node* next_oper = CHILD(n, i * 2 + 1); newoperator = get_operator(c, next_oper); if (!newoperator) return NULL; tmp = ast_for_expr(c, CHILD(n, i * 2 + 2)); if (!tmp) return NULL; tmp_result = BinOp(result, newoperator, tmp, LINENO(next_oper), next_oper->n_col_offset, c->c_arena); if (!tmp_result) return NULL; result = tmp_result; } return result; } static expr_ty ast_for_trailer(struct compiling *c, const node *n, expr_ty left_expr) { /* trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME subscriptlist: subscript (',' subscript)* [','] subscript: '.' '.' '.' | test | [test] ':' [test] [sliceop] */ REQ(n, trailer); if (TYPE(CHILD(n, 0)) == LPAR) { if (NCH(n) == 2) return Call(left_expr, NULL, NULL, LINENO(n), n->n_col_offset, c->c_arena); else return ast_for_call(c, CHILD(n, 1), left_expr, true); } else if (TYPE(CHILD(n, 0)) == DOT) { PyObject *attr_id = NEW_IDENTIFIER(CHILD(n, 1)); if (!attr_id) return NULL; return Attribute(left_expr, attr_id, Load, LINENO(n), n->n_col_offset, c->c_arena); } else { REQ(CHILD(n, 0), LSQB); REQ(CHILD(n, 2), RSQB); n = CHILD(n, 1); if (NCH(n) == 1) { slice_ty slc = ast_for_slice(c, CHILD(n, 0)); if (!slc) return NULL; return Subscript(left_expr, slc, Load, LINENO(n), n->n_col_offset, c->c_arena); } else { /* The grammar is ambiguous here. The ambiguity is resolved by treating the sequence as a tuple literal if there are no slice features. */ int j; slice_ty slc; expr_ty e; int simple = 1; asdl_seq *slices, *elts; slices = _Ta3_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!slices) return NULL; for (j = 0; j < NCH(n); j += 2) { slc = ast_for_slice(c, CHILD(n, j)); if (!slc) return NULL; if (slc->kind != Index_kind) simple = 0; asdl_seq_SET(slices, j / 2, slc); } if (!simple) { return Subscript(left_expr, ExtSlice(slices, c->c_arena), Load, LINENO(n), n->n_col_offset, c->c_arena); } /* extract Index values and put them in a Tuple */ elts = _Ta3_asdl_seq_new(asdl_seq_LEN(slices), c->c_arena); if (!elts) return NULL; for (j = 0; j < asdl_seq_LEN(slices); ++j) { slc = (slice_ty)asdl_seq_GET(slices, j); assert(slc->kind == Index_kind && slc->v.Index.value); asdl_seq_SET(elts, j, slc->v.Index.value); } e = Tuple(elts, Load, LINENO(n), n->n_col_offset, c->c_arena); if (!e) return NULL; return Subscript(left_expr, Index(e, c->c_arena), Load, LINENO(n), n->n_col_offset, c->c_arena); } } } static expr_ty ast_for_factor(struct compiling *c, const node *n) { expr_ty expression; expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; switch (TYPE(CHILD(n, 0))) { case PLUS: return UnaryOp(UAdd, expression, LINENO(n), n->n_col_offset, c->c_arena); case MINUS: return UnaryOp(USub, expression, LINENO(n), n->n_col_offset, c->c_arena); case TILDE: return UnaryOp(Invert, expression, LINENO(n), n->n_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "unhandled factor: %d", TYPE(CHILD(n, 0))); return NULL; } static expr_ty ast_for_atom_expr(struct compiling *c, const node *n) { int i, nch, start = 0; expr_ty e, tmp; REQ(n, atom_expr); nch = NCH(n); if (TYPE(CHILD(n, 0)) == AWAIT) { if (c->c_feature_version < 5) { ast_error(c, n, "Await expressions are only supported in Python 3.5 and greater"); return NULL; } start = 1; assert(nch > 1); } e = ast_for_atom(c, CHILD(n, start)); if (!e) return NULL; if (nch == 1) return e; if (start && nch == 2) { return Await(e, LINENO(n), n->n_col_offset, c->c_arena); } for (i = start + 1; i < nch; i++) { node *ch = CHILD(n, i); if (TYPE(ch) != trailer) break; tmp = ast_for_trailer(c, ch, e); if (!tmp) return NULL; tmp->lineno = e->lineno; tmp->col_offset = e->col_offset; e = tmp; } if (start) { /* there was an 'await' */ return Await(e, LINENO(n), n->n_col_offset, c->c_arena); } else { return e; } } static expr_ty ast_for_power(struct compiling *c, const node *n) { /* power: atom trailer* ('**' factor)* */ expr_ty e; REQ(n, power); e = ast_for_atom_expr(c, CHILD(n, 0)); if (!e) return NULL; if (NCH(n) == 1) return e; if (TYPE(CHILD(n, NCH(n) - 1)) == factor) { expr_ty f = ast_for_expr(c, CHILD(n, NCH(n) - 1)); if (!f) return NULL; e = BinOp(e, Pow, f, LINENO(n), n->n_col_offset, c->c_arena); } return e; } static expr_ty ast_for_starred(struct compiling *c, const node *n) { expr_ty tmp; REQ(n, star_expr); tmp = ast_for_expr(c, CHILD(n, 1)); if (!tmp) return NULL; /* The Load context is changed later. */ return Starred(tmp, Load, LINENO(n), n->n_col_offset, c->c_arena); } /* Do not name a variable 'expr'! Will cause a compile error. */ static expr_ty ast_for_expr(struct compiling *c, const node *n) { /* handle the full range of simple expressions test: or_test ['if' or_test 'else' test] | lambdef test_nocond: or_test | lambdef_nocond or_test: and_test ('or' and_test)* and_test: not_test ('and' not_test)* not_test: 'not' not_test | comparison comparison: expr (comp_op expr)* expr: xor_expr ('|' xor_expr)* xor_expr: and_expr ('^' and_expr)* and_expr: shift_expr ('&' shift_expr)* shift_expr: arith_expr (('<<'|'>>') arith_expr)* arith_expr: term (('+'|'-') term)* term: factor (('*'|'@'|'/'|'%'|'//') factor)* factor: ('+'|'-'|'~') factor | power power: atom_expr ['**' factor] atom_expr: ['await'] atom trailer* yield_expr: 'yield' [yield_arg] */ asdl_seq *seq; int i; loop: switch (TYPE(n)) { case test: case test_nocond: if (TYPE(CHILD(n, 0)) == lambdef || TYPE(CHILD(n, 0)) == lambdef_nocond) return ast_for_lambdef(c, CHILD(n, 0)); else if (NCH(n) > 1) return ast_for_ifexpr(c, n); /* Fallthrough */ case or_test: case and_test: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } seq = _Ta3_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!seq) return NULL; for (i = 0; i < NCH(n); i += 2) { expr_ty e = ast_for_expr(c, CHILD(n, i)); if (!e) return NULL; asdl_seq_SET(seq, i / 2, e); } if (!strcmp(STR(CHILD(n, 1)), "and")) return BoolOp(And, seq, LINENO(n), n->n_col_offset, c->c_arena); assert(!strcmp(STR(CHILD(n, 1)), "or")); return BoolOp(Or, seq, LINENO(n), n->n_col_offset, c->c_arena); case not_test: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } else { expr_ty expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; return UnaryOp(Not, expression, LINENO(n), n->n_col_offset, c->c_arena); } case comparison: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } else { expr_ty expression; asdl_int_seq *ops; asdl_seq *cmps; ops = _Ta3_asdl_int_seq_new(NCH(n) / 2, c->c_arena); if (!ops) return NULL; cmps = _Ta3_asdl_seq_new(NCH(n) / 2, c->c_arena); if (!cmps) { return NULL; } for (i = 1; i < NCH(n); i += 2) { cmpop_ty newoperator; newoperator = ast_for_comp_op(c, CHILD(n, i)); if (!newoperator) { return NULL; } expression = ast_for_expr(c, CHILD(n, i + 1)); if (!expression) { return NULL; } asdl_seq_SET(ops, i / 2, newoperator); asdl_seq_SET(cmps, i / 2, expression); } expression = ast_for_expr(c, CHILD(n, 0)); if (!expression) { return NULL; } return Compare(expression, ops, cmps, LINENO(n), n->n_col_offset, c->c_arena); } break; case star_expr: return ast_for_starred(c, n); /* The next five cases all handle BinOps. The main body of code is the same in each case, but the switch turned inside out to reuse the code for each type of operator. */ case expr: case xor_expr: case and_expr: case shift_expr: case arith_expr: case term: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } return ast_for_binop(c, n); case yield_expr: { node *an = NULL; node *en = NULL; int is_from = 0; expr_ty exp = NULL; if (NCH(n) > 1) an = CHILD(n, 1); /* yield_arg */ if (an) { en = CHILD(an, NCH(an) - 1); if (NCH(an) == 2) { is_from = 1; exp = ast_for_expr(c, en); } else exp = ast_for_testlist(c, en); if (!exp) return NULL; } if (is_from) return YieldFrom(exp, LINENO(n), n->n_col_offset, c->c_arena); return Yield(exp, LINENO(n), n->n_col_offset, c->c_arena); } case factor: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } return ast_for_factor(c, n); case power: return ast_for_power(c, n); default: PyErr_Format(PyExc_SystemError, "unhandled expr: %d", TYPE(n)); return NULL; } /* should never get here unless if error is set */ return NULL; } static expr_ty ast_for_call(struct compiling *c, const node *n, expr_ty func, bool allowgen) { /* arglist: argument (',' argument)* [','] argument: ( test [comp_for] | '*' test | test '=' test | '**' test ) */ int i, nargs, nkeywords; int ndoublestars; asdl_seq *args; asdl_seq *keywords; REQ(n, arglist); nargs = 0; nkeywords = 0; for (i = 0; i < NCH(n); i++) { node *ch = CHILD(n, i); if (TYPE(ch) == argument) { if (NCH(ch) == 1) nargs++; else if (TYPE(CHILD(ch, 1)) == comp_for) { nargs++; if (!allowgen) { ast_error(c, ch, "invalid syntax"); return NULL; } if (NCH(n) > 1) { ast_error(c, ch, "Generator expression must be parenthesized"); return NULL; } } else if (TYPE(CHILD(ch, 0)) == STAR) nargs++; else /* TYPE(CHILD(ch, 0)) == DOUBLESTAR or keyword argument */ nkeywords++; } } args = _Ta3_asdl_seq_new(nargs, c->c_arena); if (!args) return NULL; keywords = _Ta3_asdl_seq_new(nkeywords, c->c_arena); if (!keywords) return NULL; nargs = 0; /* positional arguments + iterable argument unpackings */ nkeywords = 0; /* keyword arguments + keyword argument unpackings */ ndoublestars = 0; /* just keyword argument unpackings */ for (i = 0; i < NCH(n); i++) { node *ch = CHILD(n, i); if (TYPE(ch) == argument) { expr_ty e; node *chch = CHILD(ch, 0); if (NCH(ch) == 1) { /* a positional argument */ if (nkeywords) { if (ndoublestars) { ast_error(c, chch, "positional argument follows " "keyword argument unpacking"); } else { ast_error(c, chch, "positional argument follows " "keyword argument"); } return NULL; } e = ast_for_expr(c, chch); if (!e) return NULL; asdl_seq_SET(args, nargs++, e); } else if (TYPE(chch) == STAR) { /* an iterable argument unpacking */ expr_ty starred; if (ndoublestars) { ast_error(c, chch, "iterable argument unpacking follows " "keyword argument unpacking"); return NULL; } e = ast_for_expr(c, CHILD(ch, 1)); if (!e) return NULL; starred = Starred(e, Load, LINENO(chch), chch->n_col_offset, c->c_arena); if (!starred) return NULL; asdl_seq_SET(args, nargs++, starred); } else if (TYPE(chch) == DOUBLESTAR) { /* a keyword argument unpacking */ keyword_ty kw; i++; e = ast_for_expr(c, CHILD(ch, 1)); if (!e) return NULL; kw = keyword(NULL, e, c->c_arena); asdl_seq_SET(keywords, nkeywords++, kw); ndoublestars++; } else if (TYPE(CHILD(ch, 1)) == comp_for) { /* the lone generator expression */ e = ast_for_genexp(c, ch); if (!e) return NULL; asdl_seq_SET(args, nargs++, e); } else { /* a keyword argument */ keyword_ty kw; identifier key, tmp; int k; /* chch is test, but must be an identifier? */ e = ast_for_expr(c, chch); if (!e) return NULL; /* f(lambda x: x[0] = 3) ends up getting parsed with * LHS test = lambda x: x[0], and RHS test = 3. * SF bug 132313 points out that complaining about a keyword * then is very confusing. */ if (e->kind == Lambda_kind) { ast_error(c, chch, "lambda cannot contain assignment"); return NULL; } else if (e->kind != Name_kind) { ast_error(c, chch, "keyword can't be an expression"); return NULL; } else if (forbidden_name(c, e->v.Name.id, ch, 1)) { return NULL; } key = e->v.Name.id; for (k = 0; k < nkeywords; k++) { tmp = ((keyword_ty)asdl_seq_GET(keywords, k))->arg; if (tmp && !PyUnicode_Compare(tmp, key)) { ast_error(c, chch, "keyword argument repeated"); return NULL; } } e = ast_for_expr(c, CHILD(ch, 2)); if (!e) return NULL; kw = keyword(key, e, c->c_arena); if (!kw) return NULL; asdl_seq_SET(keywords, nkeywords++, kw); } } } return Call(func, args, keywords, func->lineno, func->col_offset, c->c_arena); } static expr_ty ast_for_testlist(struct compiling *c, const node* n) { /* testlist_comp: test (comp_for | (',' test)* [',']) */ /* testlist: test (',' test)* [','] */ assert(NCH(n) > 0); if (TYPE(n) == testlist_comp) { if (NCH(n) > 1) assert(TYPE(CHILD(n, 1)) != comp_for); } else { assert(TYPE(n) == testlist || TYPE(n) == testlist_star_expr); } if (NCH(n) == 1) return ast_for_expr(c, CHILD(n, 0)); else { asdl_seq *tmp = seq_for_testlist(c, n); if (!tmp) return NULL; return Tuple(tmp, Load, LINENO(n), n->n_col_offset, c->c_arena); } } static stmt_ty ast_for_expr_stmt(struct compiling *c, const node *n) { int num; REQ(n, expr_stmt); /* expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | ('=' (yield_expr|testlist_star_expr))* [TYPE_COMMENT]) annassign: ':' test ['=' test] testlist_star_expr: (test|star_expr) (',' test|star_expr)* [','] augassign: '+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | '<<=' | '>>=' | '**=' | '//=' test: ... here starts the operator precedence dance */ num = NCH(n); if (num == 1 || (num == 2 && TYPE(CHILD(n, 1)) == TYPE_COMMENT)) { expr_ty e = ast_for_testlist(c, CHILD(n, 0)); if (!e) return NULL; return Expr(e, LINENO(n), n->n_col_offset, c->c_arena); } else if (TYPE(CHILD(n, 1)) == augassign) { expr_ty expr1, expr2; operator_ty newoperator; node *ch = CHILD(n, 0); expr1 = ast_for_testlist(c, ch); if (!expr1) return NULL; if(!set_context(c, expr1, Store, ch)) return NULL; /* set_context checks that most expressions are not the left side. Augmented assignments can only have a name, a subscript, or an attribute on the left, though, so we have to explicitly check for those. */ switch (expr1->kind) { case Name_kind: case Attribute_kind: case Subscript_kind: break; default: ast_error(c, ch, "illegal expression for augmented assignment"); return NULL; } ch = CHILD(n, 2); if (TYPE(ch) == testlist) expr2 = ast_for_testlist(c, ch); else expr2 = ast_for_expr(c, ch); if (!expr2) return NULL; newoperator = ast_for_augassign(c, CHILD(n, 1)); if (!newoperator) return NULL; return AugAssign(expr1, newoperator, expr2, LINENO(n), n->n_col_offset, c->c_arena); } else if (TYPE(CHILD(n, 1)) == annassign) { expr_ty expr1, expr2, expr3; node *ch = CHILD(n, 0); node *deep, *ann = CHILD(n, 1); int simple = 1; /* AnnAssigns are only allowed in Python 3.6 or greater */ if (c->c_feature_version < 6) { ast_error(c, ch, "Variable annotation syntax is only supported in Python 3.6 and greater"); return NULL; } /* we keep track of parens to qualify (x) as expression not name */ deep = ch; while (NCH(deep) == 1) { deep = CHILD(deep, 0); } if (NCH(deep) > 0 && TYPE(CHILD(deep, 0)) == LPAR) { simple = 0; } expr1 = ast_for_testlist(c, ch); if (!expr1) { return NULL; } switch (expr1->kind) { case Name_kind: if (forbidden_name(c, expr1->v.Name.id, n, 0)) { return NULL; } expr1->v.Name.ctx = Store; break; case Attribute_kind: if (forbidden_name(c, expr1->v.Attribute.attr, n, 1)) { return NULL; } expr1->v.Attribute.ctx = Store; break; case Subscript_kind: expr1->v.Subscript.ctx = Store; break; case List_kind: ast_error(c, ch, "only single target (not list) can be annotated"); return NULL; case Tuple_kind: ast_error(c, ch, "only single target (not tuple) can be annotated"); return NULL; default: ast_error(c, ch, "illegal target for annotation"); return NULL; } if (expr1->kind != Name_kind) { simple = 0; } ch = CHILD(ann, 1); expr2 = ast_for_expr(c, ch); if (!expr2) { return NULL; } if (NCH(ann) == 2) { return AnnAssign(expr1, expr2, NULL, simple, LINENO(n), n->n_col_offset, c->c_arena); } else { ch = CHILD(ann, 3); expr3 = ast_for_expr(c, ch); if (!expr3) { return NULL; } return AnnAssign(expr1, expr2, expr3, simple, LINENO(n), n->n_col_offset, c->c_arena); } } else { int i, nch_minus_type, has_type_comment; asdl_seq *targets; node *value; expr_ty expression; string type_comment; /* a normal assignment */ REQ(CHILD(n, 1), EQUAL); has_type_comment = TYPE(CHILD(n, num - 1)) == TYPE_COMMENT; nch_minus_type = num - has_type_comment; targets = _Ta3_asdl_seq_new(nch_minus_type / 2, c->c_arena); if (!targets) return NULL; for (i = 0; i < nch_minus_type - 2; i += 2) { expr_ty e; node *ch = CHILD(n, i); if (TYPE(ch) == yield_expr) { ast_error(c, ch, "assignment to yield expression not possible"); return NULL; } e = ast_for_testlist(c, ch); if (!e) return NULL; /* set context to assign */ if (!set_context(c, e, Store, CHILD(n, i))) return NULL; asdl_seq_SET(targets, i / 2, e); } value = CHILD(n, nch_minus_type - 1); if (TYPE(value) == testlist_star_expr) expression = ast_for_testlist(c, value); else expression = ast_for_expr(c, value); if (!expression) return NULL; if (has_type_comment) type_comment = NEW_TYPE_COMMENT(CHILD(n, nch_minus_type)); else type_comment = NULL; return Assign(targets, expression, type_comment, LINENO(n), n->n_col_offset, c->c_arena); } } static asdl_seq * ast_for_exprlist(struct compiling *c, const node *n, expr_context_ty context) { asdl_seq *seq; int i; expr_ty e; REQ(n, exprlist); seq = _Ta3_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!seq) return NULL; for (i = 0; i < NCH(n); i += 2) { e = ast_for_expr(c, CHILD(n, i)); if (!e) return NULL; asdl_seq_SET(seq, i / 2, e); if (context && !set_context(c, e, context, CHILD(n, i))) return NULL; } return seq; } static stmt_ty ast_for_del_stmt(struct compiling *c, const node *n) { asdl_seq *expr_list; /* del_stmt: 'del' exprlist */ REQ(n, del_stmt); expr_list = ast_for_exprlist(c, CHILD(n, 1), Del); if (!expr_list) return NULL; return Delete(expr_list, LINENO(n), n->n_col_offset, c->c_arena); } static stmt_ty ast_for_flow_stmt(struct compiling *c, const node *n) { /* flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt break_stmt: 'break' continue_stmt: 'continue' return_stmt: 'return' [testlist] yield_stmt: yield_expr yield_expr: 'yield' testlist | 'yield' 'from' test raise_stmt: 'raise' [test [',' test [',' test]]] */ node *ch; REQ(n, flow_stmt); ch = CHILD(n, 0); switch (TYPE(ch)) { case break_stmt: return Break(LINENO(n), n->n_col_offset, c->c_arena); case continue_stmt: return Continue(LINENO(n), n->n_col_offset, c->c_arena); case yield_stmt: { /* will reduce to yield_expr */ expr_ty exp = ast_for_expr(c, CHILD(ch, 0)); if (!exp) return NULL; return Expr(exp, LINENO(n), n->n_col_offset, c->c_arena); } case return_stmt: if (NCH(ch) == 1) return Return(NULL, LINENO(n), n->n_col_offset, c->c_arena); else { expr_ty expression = ast_for_testlist(c, CHILD(ch, 1)); if (!expression) return NULL; return Return(expression, LINENO(n), n->n_col_offset, c->c_arena); } case raise_stmt: if (NCH(ch) == 1) return Raise(NULL, NULL, LINENO(n), n->n_col_offset, c->c_arena); else if (NCH(ch) >= 2) { expr_ty cause = NULL; expr_ty expression = ast_for_expr(c, CHILD(ch, 1)); if (!expression) return NULL; if (NCH(ch) == 4) { cause = ast_for_expr(c, CHILD(ch, 3)); if (!cause) return NULL; } return Raise(expression, cause, LINENO(n), n->n_col_offset, c->c_arena); } /* fall through */ default: PyErr_Format(PyExc_SystemError, "unexpected flow_stmt: %d", TYPE(ch)); return NULL; } } static alias_ty alias_for_import_name(struct compiling *c, const node *n, int store) { /* import_as_name: NAME ['as' NAME] dotted_as_name: dotted_name ['as' NAME] dotted_name: NAME ('.' NAME)* */ identifier str, name; loop: switch (TYPE(n)) { case import_as_name: { node *name_node = CHILD(n, 0); str = NULL; name = NEW_IDENTIFIER(name_node); if (!name) return NULL; if (NCH(n) == 3) { node *str_node = CHILD(n, 2); str = NEW_IDENTIFIER(str_node); if (!str) return NULL; if (store && forbidden_name(c, str, str_node, 0)) return NULL; } else { if (forbidden_name(c, name, name_node, 0)) return NULL; } return alias(name, str, c->c_arena); } case dotted_as_name: if (NCH(n) == 1) { n = CHILD(n, 0); goto loop; } else { node *asname_node = CHILD(n, 2); alias_ty a = alias_for_import_name(c, CHILD(n, 0), 0); if (!a) return NULL; assert(!a->asname); a->asname = NEW_IDENTIFIER(asname_node); if (!a->asname) return NULL; if (forbidden_name(c, a->asname, asname_node, 0)) return NULL; return a; } break; case dotted_name: if (NCH(n) == 1) { node *name_node = CHILD(n, 0); name = NEW_IDENTIFIER(name_node); if (!name) return NULL; if (store && forbidden_name(c, name, name_node, 0)) return NULL; return alias(name, NULL, c->c_arena); } else { /* Create a string of the form "a.b.c" */ int i; size_t len; char *s; PyObject *uni; len = 0; for (i = 0; i < NCH(n); i += 2) /* length of string plus one for the dot */ len += strlen(STR(CHILD(n, i))) + 1; len--; /* the last name doesn't have a dot */ str = PyBytes_FromStringAndSize(NULL, len); if (!str) return NULL; s = PyBytes_AS_STRING(str); if (!s) return NULL; for (i = 0; i < NCH(n); i += 2) { char *sch = STR(CHILD(n, i)); strcpy(s, STR(CHILD(n, i))); s += strlen(sch); *s++ = '.'; } --s; *s = '\0'; uni = PyUnicode_DecodeUTF8(PyBytes_AS_STRING(str), PyBytes_GET_SIZE(str), NULL); Py_DECREF(str); if (!uni) return NULL; str = uni; PyUnicode_InternInPlace(&str); if (PyArena_AddPyObject(c->c_arena, str) < 0) { Py_DECREF(str); return NULL; } return alias(str, NULL, c->c_arena); } break; case STAR: str = PyUnicode_InternFromString("*"); if (!str) return NULL; if (PyArena_AddPyObject(c->c_arena, str) < 0) { Py_DECREF(str); return NULL; } return alias(str, NULL, c->c_arena); default: PyErr_Format(PyExc_SystemError, "unexpected import name: %d", TYPE(n)); return NULL; } PyErr_SetString(PyExc_SystemError, "unhandled import name condition"); return NULL; } static stmt_ty ast_for_import_stmt(struct compiling *c, const node *n) { /* import_stmt: import_name | import_from import_name: 'import' dotted_as_names import_from: 'from' (('.' | '...')* dotted_name | ('.' | '...')+) 'import' ('*' | '(' import_as_names ')' | import_as_names) */ int lineno; int col_offset; int i; asdl_seq *aliases; REQ(n, import_stmt); lineno = LINENO(n); col_offset = n->n_col_offset; n = CHILD(n, 0); if (TYPE(n) == import_name) { n = CHILD(n, 1); REQ(n, dotted_as_names); aliases = _Ta3_asdl_seq_new((NCH(n) + 1) / 2, c->c_arena); if (!aliases) return NULL; for (i = 0; i < NCH(n); i += 2) { alias_ty import_alias = alias_for_import_name(c, CHILD(n, i), 1); if (!import_alias) return NULL; asdl_seq_SET(aliases, i / 2, import_alias); } return Import(aliases, lineno, col_offset, c->c_arena); } else if (TYPE(n) == import_from) { int n_children; int idx, ndots = 0; alias_ty mod = NULL; identifier modname = NULL; /* Count the number of dots (for relative imports) and check for the optional module name */ for (idx = 1; idx < NCH(n); idx++) { if (TYPE(CHILD(n, idx)) == dotted_name) { mod = alias_for_import_name(c, CHILD(n, idx), 0); if (!mod) return NULL; idx++; break; } else if (TYPE(CHILD(n, idx)) == ELLIPSIS) { /* three consecutive dots are tokenized as one ELLIPSIS */ ndots += 3; continue; } else if (TYPE(CHILD(n, idx)) != DOT) { break; } ndots++; } idx++; /* skip over the 'import' keyword */ switch (TYPE(CHILD(n, idx))) { case STAR: /* from ... import * */ n = CHILD(n, idx); n_children = 1; break; case LPAR: /* from ... import (x, y, z) */ n = CHILD(n, idx + 1); n_children = NCH(n); break; case import_as_names: /* from ... import x, y, z */ n = CHILD(n, idx); n_children = NCH(n); if (n_children % 2 == 0) { ast_error(c, n, "trailing comma not allowed without" " surrounding parentheses"); return NULL; } break; default: ast_error(c, n, "Unexpected node-type in from-import"); return NULL; } aliases = _Ta3_asdl_seq_new((n_children + 1) / 2, c->c_arena); if (!aliases) return NULL; /* handle "from ... import *" special b/c there's no children */ if (TYPE(n) == STAR) { alias_ty import_alias = alias_for_import_name(c, n, 1); if (!import_alias) return NULL; asdl_seq_SET(aliases, 0, import_alias); } else { for (i = 0; i < NCH(n); i += 2) { alias_ty import_alias = alias_for_import_name(c, CHILD(n, i), 1); if (!import_alias) return NULL; asdl_seq_SET(aliases, i / 2, import_alias); } } if (mod != NULL) modname = mod->name; return ImportFrom(modname, aliases, ndots, lineno, col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "unknown import statement: starts with command '%s'", STR(CHILD(n, 0))); return NULL; } static stmt_ty ast_for_global_stmt(struct compiling *c, const node *n) { /* global_stmt: 'global' NAME (',' NAME)* */ identifier name; asdl_seq *s; int i; REQ(n, global_stmt); s = _Ta3_asdl_seq_new(NCH(n) / 2, c->c_arena); if (!s) return NULL; for (i = 1; i < NCH(n); i += 2) { name = NEW_IDENTIFIER(CHILD(n, i)); if (!name) return NULL; asdl_seq_SET(s, i / 2, name); } return Global(s, LINENO(n), n->n_col_offset, c->c_arena); } static stmt_ty ast_for_nonlocal_stmt(struct compiling *c, const node *n) { /* nonlocal_stmt: 'nonlocal' NAME (',' NAME)* */ identifier name; asdl_seq *s; int i; REQ(n, nonlocal_stmt); s = _Ta3_asdl_seq_new(NCH(n) / 2, c->c_arena); if (!s) return NULL; for (i = 1; i < NCH(n); i += 2) { name = NEW_IDENTIFIER(CHILD(n, i)); if (!name) return NULL; asdl_seq_SET(s, i / 2, name); } return Nonlocal(s, LINENO(n), n->n_col_offset, c->c_arena); } static stmt_ty ast_for_assert_stmt(struct compiling *c, const node *n) { /* assert_stmt: 'assert' test [',' test] */ REQ(n, assert_stmt); if (NCH(n) == 2) { expr_ty expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; return Assert(expression, NULL, LINENO(n), n->n_col_offset, c->c_arena); } else if (NCH(n) == 4) { expr_ty expr1, expr2; expr1 = ast_for_expr(c, CHILD(n, 1)); if (!expr1) return NULL; expr2 = ast_for_expr(c, CHILD(n, 3)); if (!expr2) return NULL; return Assert(expr1, expr2, LINENO(n), n->n_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "improper number of parts to 'assert' statement: %d", NCH(n)); return NULL; } static asdl_seq * ast_for_suite(struct compiling *c, const node *n) { /* suite: simple_stmt | NEWLINE [TYPE_COMMENT NEWLINE] INDENT stmt+ DEDENT */ asdl_seq *seq; stmt_ty s; int i, total, num, end, pos = 0; node *ch; REQ(n, suite); total = num_stmts(n); seq = _Ta3_asdl_seq_new(total, c->c_arena); if (!seq) return NULL; if (TYPE(CHILD(n, 0)) == simple_stmt) { n = CHILD(n, 0); /* simple_stmt always ends with a NEWLINE, and may have a trailing SEMI */ end = NCH(n) - 1; if (TYPE(CHILD(n, end - 1)) == SEMI) end--; /* loop by 2 to skip semi-colons */ for (i = 0; i < end; i += 2) { ch = CHILD(n, i); s = ast_for_stmt(c, ch); if (!s) return NULL; asdl_seq_SET(seq, pos++, s); } } else { i = 2; if (TYPE(CHILD(n, 1)) == TYPE_COMMENT) i += 2; for (; i < (NCH(n) - 1); i++) { ch = CHILD(n, i); REQ(ch, stmt); num = num_stmts(ch); if (num == 1) { /* small_stmt or compound_stmt with only one child */ s = ast_for_stmt(c, ch); if (!s) return NULL; asdl_seq_SET(seq, pos++, s); } else { int j; ch = CHILD(ch, 0); REQ(ch, simple_stmt); for (j = 0; j < NCH(ch); j += 2) { /* statement terminates with a semi-colon ';' */ if (NCH(CHILD(ch, j)) == 0) { assert((j + 1) == NCH(ch)); break; } s = ast_for_stmt(c, CHILD(ch, j)); if (!s) return NULL; asdl_seq_SET(seq, pos++, s); } } } } assert(pos == seq->size); return seq; } static stmt_ty ast_for_if_stmt(struct compiling *c, const node *n) { /* if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite] */ char *s; REQ(n, if_stmt); if (NCH(n) == 4) { expr_ty expression; asdl_seq *suite_seq; expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, 3)); if (!suite_seq) return NULL; return If(expression, suite_seq, NULL, LINENO(n), n->n_col_offset, c->c_arena); } s = STR(CHILD(n, 4)); /* s[2], the third character in the string, will be 's' for el_s_e, or 'i' for el_i_f */ if (s[2] == 's') { expr_ty expression; asdl_seq *seq1, *seq2; expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; seq1 = ast_for_suite(c, CHILD(n, 3)); if (!seq1) return NULL; seq2 = ast_for_suite(c, CHILD(n, 6)); if (!seq2) return NULL; return If(expression, seq1, seq2, LINENO(n), n->n_col_offset, c->c_arena); } else if (s[2] == 'i') { int i, n_elif, has_else = 0; expr_ty expression; asdl_seq *suite_seq; asdl_seq *orelse = NULL; n_elif = NCH(n) - 4; /* must reference the child n_elif+1 since 'else' token is third, not fourth, child from the end. */ if (TYPE(CHILD(n, (n_elif + 1))) == NAME && STR(CHILD(n, (n_elif + 1)))[2] == 's') { has_else = 1; n_elif -= 3; } n_elif /= 4; if (has_else) { asdl_seq *suite_seq2; orelse = _Ta3_asdl_seq_new(1, c->c_arena); if (!orelse) return NULL; expression = ast_for_expr(c, CHILD(n, NCH(n) - 6)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, NCH(n) - 4)); if (!suite_seq) return NULL; suite_seq2 = ast_for_suite(c, CHILD(n, NCH(n) - 1)); if (!suite_seq2) return NULL; asdl_seq_SET(orelse, 0, If(expression, suite_seq, suite_seq2, LINENO(CHILD(n, NCH(n) - 6)), CHILD(n, NCH(n) - 6)->n_col_offset, c->c_arena)); /* the just-created orelse handled the last elif */ n_elif--; } for (i = 0; i < n_elif; i++) { int off = 5 + (n_elif - i - 1) * 4; asdl_seq *newobj = _Ta3_asdl_seq_new(1, c->c_arena); if (!newobj) return NULL; expression = ast_for_expr(c, CHILD(n, off)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, off + 2)); if (!suite_seq) return NULL; asdl_seq_SET(newobj, 0, If(expression, suite_seq, orelse, LINENO(CHILD(n, off)), CHILD(n, off)->n_col_offset, c->c_arena)); orelse = newobj; } expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, 3)); if (!suite_seq) return NULL; return If(expression, suite_seq, orelse, LINENO(n), n->n_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "unexpected token in 'if' statement: %s", s); return NULL; } static stmt_ty ast_for_while_stmt(struct compiling *c, const node *n) { /* while_stmt: 'while' test ':' suite ['else' ':' suite] */ REQ(n, while_stmt); if (NCH(n) == 4) { expr_ty expression; asdl_seq *suite_seq; expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, 3)); if (!suite_seq) return NULL; return While(expression, suite_seq, NULL, LINENO(n), n->n_col_offset, c->c_arena); } else if (NCH(n) == 7) { expr_ty expression; asdl_seq *seq1, *seq2; expression = ast_for_expr(c, CHILD(n, 1)); if (!expression) return NULL; seq1 = ast_for_suite(c, CHILD(n, 3)); if (!seq1) return NULL; seq2 = ast_for_suite(c, CHILD(n, 6)); if (!seq2) return NULL; return While(expression, seq1, seq2, LINENO(n), n->n_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "wrong number of tokens for 'while' statement: %d", NCH(n)); return NULL; } static stmt_ty ast_for_for_stmt(struct compiling *c, const node *n0, bool is_async) { const node * const n = is_async ? CHILD(n0, 1) : n0; asdl_seq *_target, *seq = NULL, *suite_seq; expr_ty expression; expr_ty target, first; const node *node_target; int has_type_comment; string type_comment; if (is_async && c->c_feature_version < 5) { ast_error(c, n, "Async for loops are only supported in Python 3.5 and greater"); return NULL; } /* for_stmt: 'for' exprlist 'in' testlist ':' [TYPE_COMMENT] suite ['else' ':' suite] */ REQ(n, for_stmt); has_type_comment = TYPE(CHILD(n, 5)) == TYPE_COMMENT; if (NCH(n) == 9 + has_type_comment) { seq = ast_for_suite(c, CHILD(n, 8 + has_type_comment)); if (!seq) return NULL; } node_target = CHILD(n, 1); _target = ast_for_exprlist(c, node_target, Store); if (!_target) return NULL; /* Check the # of children rather than the length of _target, since for x, in ... has 1 element in _target, but still requires a Tuple. */ first = (expr_ty)asdl_seq_GET(_target, 0); if (NCH(node_target) == 1) target = first; else target = Tuple(_target, Store, first->lineno, first->col_offset, c->c_arena); expression = ast_for_testlist(c, CHILD(n, 3)); if (!expression) return NULL; suite_seq = ast_for_suite(c, CHILD(n, 5 + has_type_comment)); if (!suite_seq) return NULL; if (has_type_comment) type_comment = NEW_TYPE_COMMENT(CHILD(n, 5)); else type_comment = NULL; if (is_async) return AsyncFor(target, expression, suite_seq, seq, type_comment, LINENO(n0), n0->n_col_offset, c->c_arena); else return For(target, expression, suite_seq, seq, type_comment, LINENO(n), n->n_col_offset, c->c_arena); } static excepthandler_ty ast_for_except_clause(struct compiling *c, const node *exc, node *body) { /* except_clause: 'except' [test ['as' test]] */ REQ(exc, except_clause); REQ(body, suite); if (NCH(exc) == 1) { asdl_seq *suite_seq = ast_for_suite(c, body); if (!suite_seq) return NULL; return ExceptHandler(NULL, NULL, suite_seq, LINENO(exc), exc->n_col_offset, c->c_arena); } else if (NCH(exc) == 2) { expr_ty expression; asdl_seq *suite_seq; expression = ast_for_expr(c, CHILD(exc, 1)); if (!expression) return NULL; suite_seq = ast_for_suite(c, body); if (!suite_seq) return NULL; return ExceptHandler(expression, NULL, suite_seq, LINENO(exc), exc->n_col_offset, c->c_arena); } else if (NCH(exc) == 4) { asdl_seq *suite_seq; expr_ty expression; identifier e = NEW_IDENTIFIER(CHILD(exc, 3)); if (!e) return NULL; if (forbidden_name(c, e, CHILD(exc, 3), 0)) return NULL; expression = ast_for_expr(c, CHILD(exc, 1)); if (!expression) return NULL; suite_seq = ast_for_suite(c, body); if (!suite_seq) return NULL; return ExceptHandler(expression, e, suite_seq, LINENO(exc), exc->n_col_offset, c->c_arena); } PyErr_Format(PyExc_SystemError, "wrong number of children for 'except' clause: %d", NCH(exc)); return NULL; } static stmt_ty ast_for_try_stmt(struct compiling *c, const node *n) { const int nch = NCH(n); int n_except = (nch - 3)/3; asdl_seq *body, *handlers = NULL, *orelse = NULL, *finally = NULL; REQ(n, try_stmt); body = ast_for_suite(c, CHILD(n, 2)); if (body == NULL) return NULL; if (TYPE(CHILD(n, nch - 3)) == NAME) { if (strcmp(STR(CHILD(n, nch - 3)), "finally") == 0) { if (nch >= 9 && TYPE(CHILD(n, nch - 6)) == NAME) { /* we can assume it's an "else", because nch >= 9 for try-else-finally and it would otherwise have a type of except_clause */ orelse = ast_for_suite(c, CHILD(n, nch - 4)); if (orelse == NULL) return NULL; n_except--; } finally = ast_for_suite(c, CHILD(n, nch - 1)); if (finally == NULL) return NULL; n_except--; } else { /* we can assume it's an "else", otherwise it would have a type of except_clause */ orelse = ast_for_suite(c, CHILD(n, nch - 1)); if (orelse == NULL) return NULL; n_except--; } } else if (TYPE(CHILD(n, nch - 3)) != except_clause) { ast_error(c, n, "malformed 'try' statement"); return NULL; } if (n_except > 0) { int i; /* process except statements to create a try ... except */ handlers = _Ta3_asdl_seq_new(n_except, c->c_arena); if (handlers == NULL) return NULL; for (i = 0; i < n_except; i++) { excepthandler_ty e = ast_for_except_clause(c, CHILD(n, 3 + i * 3), CHILD(n, 5 + i * 3)); if (!e) return NULL; asdl_seq_SET(handlers, i, e); } } assert(finally != NULL || asdl_seq_LEN(handlers)); return Try(body, handlers, orelse, finally, LINENO(n), n->n_col_offset, c->c_arena); } /* with_item: test ['as' expr] */ static withitem_ty ast_for_with_item(struct compiling *c, const node *n) { expr_ty context_expr, optional_vars = NULL; REQ(n, with_item); context_expr = ast_for_expr(c, CHILD(n, 0)); if (!context_expr) return NULL; if (NCH(n) == 3) { optional_vars = ast_for_expr(c, CHILD(n, 2)); if (!optional_vars) { return NULL; } if (!set_context(c, optional_vars, Store, n)) { return NULL; } } return withitem(context_expr, optional_vars, c->c_arena); } /* with_stmt: 'with' with_item (',' with_item)* ':' [TYPE_COMMENT] suite */ static stmt_ty ast_for_with_stmt(struct compiling *c, const node *n0, bool is_async) { const node * const n = is_async ? CHILD(n0, 1) : n0; int i, n_items, nch_minus_type, has_type_comment; asdl_seq *items, *body; string type_comment; if (is_async && c->c_feature_version < 5) { ast_error(c, n, "Async with statements are only supported in Python 3.5 and greater"); return NULL; } REQ(n, with_stmt); has_type_comment = TYPE(CHILD(n, NCH(n) - 2)) == TYPE_COMMENT; nch_minus_type = NCH(n) - has_type_comment; n_items = (nch_minus_type - 2) / 2; items = _Ta3_asdl_seq_new(n_items, c->c_arena); if (!items) return NULL; for (i = 1; i < nch_minus_type - 2; i += 2) { withitem_ty item = ast_for_with_item(c, CHILD(n, i)); if (!item) return NULL; asdl_seq_SET(items, (i - 1) / 2, item); } body = ast_for_suite(c, CHILD(n, NCH(n) - 1)); if (!body) return NULL; if (has_type_comment) type_comment = NEW_TYPE_COMMENT(CHILD(n, NCH(n) - 2)); else type_comment = NULL; if (is_async) return AsyncWith(items, body, type_comment, LINENO(n0), n0->n_col_offset, c->c_arena); else return With(items, body, type_comment, LINENO(n), n->n_col_offset, c->c_arena); } static stmt_ty ast_for_classdef(struct compiling *c, const node *n, asdl_seq *decorator_seq) { /* classdef: 'class' NAME ['(' arglist ')'] ':' suite */ PyObject *classname; asdl_seq *s; expr_ty call; REQ(n, classdef); if (NCH(n) == 4) { /* class NAME ':' suite */ s = ast_for_suite(c, CHILD(n, 3)); if (!s) return NULL; classname = NEW_IDENTIFIER(CHILD(n, 1)); if (!classname) return NULL; if (forbidden_name(c, classname, CHILD(n, 3), 0)) return NULL; return ClassDef(classname, NULL, NULL, s, decorator_seq, LINENO(n), n->n_col_offset, c->c_arena); } if (TYPE(CHILD(n, 3)) == RPAR) { /* class NAME '(' ')' ':' suite */ s = ast_for_suite(c, CHILD(n, 5)); if (!s) return NULL; classname = NEW_IDENTIFIER(CHILD(n, 1)); if (!classname) return NULL; if (forbidden_name(c, classname, CHILD(n, 3), 0)) return NULL; return ClassDef(classname, NULL, NULL, s, decorator_seq, LINENO(n), n->n_col_offset, c->c_arena); } /* class NAME '(' arglist ')' ':' suite */ /* build up a fake Call node so we can extract its pieces */ { PyObject *dummy_name; expr_ty dummy; dummy_name = NEW_IDENTIFIER(CHILD(n, 1)); if (!dummy_name) return NULL; dummy = Name(dummy_name, Load, LINENO(n), n->n_col_offset, c->c_arena); call = ast_for_call(c, CHILD(n, 3), dummy, false); if (!call) return NULL; } s = ast_for_suite(c, CHILD(n, 6)); if (!s) return NULL; classname = NEW_IDENTIFIER(CHILD(n, 1)); if (!classname) return NULL; if (forbidden_name(c, classname, CHILD(n, 1), 0)) return NULL; return ClassDef(classname, call->v.Call.args, call->v.Call.keywords, s, decorator_seq, LINENO(n), n->n_col_offset, c->c_arena); } static stmt_ty ast_for_stmt(struct compiling *c, const node *n) { if (TYPE(n) == stmt) { assert(NCH(n) == 1); n = CHILD(n, 0); } if (TYPE(n) == simple_stmt) { assert(num_stmts(n) == 1); n = CHILD(n, 0); } if (TYPE(n) == small_stmt) { n = CHILD(n, 0); /* small_stmt: expr_stmt | del_stmt | pass_stmt | flow_stmt | import_stmt | global_stmt | nonlocal_stmt | assert_stmt */ switch (TYPE(n)) { case expr_stmt: return ast_for_expr_stmt(c, n); case del_stmt: return ast_for_del_stmt(c, n); case pass_stmt: return Pass(LINENO(n), n->n_col_offset, c->c_arena); case flow_stmt: return ast_for_flow_stmt(c, n); case import_stmt: return ast_for_import_stmt(c, n); case global_stmt: return ast_for_global_stmt(c, n); case nonlocal_stmt: return ast_for_nonlocal_stmt(c, n); case assert_stmt: return ast_for_assert_stmt(c, n); default: PyErr_Format(PyExc_SystemError, "unhandled small_stmt: TYPE=%d NCH=%d\n", TYPE(n), NCH(n)); return NULL; } } else { /* compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef | decorated | async_stmt */ node *ch = CHILD(n, 0); REQ(n, compound_stmt); switch (TYPE(ch)) { case if_stmt: return ast_for_if_stmt(c, ch); case while_stmt: return ast_for_while_stmt(c, ch); case for_stmt: return ast_for_for_stmt(c, ch, 0); case try_stmt: return ast_for_try_stmt(c, ch); case with_stmt: return ast_for_with_stmt(c, ch, 0); case funcdef: return ast_for_funcdef(c, ch, NULL); case classdef: return ast_for_classdef(c, ch, NULL); case decorated: return ast_for_decorated(c, ch); case async_stmt: return ast_for_async_stmt(c, ch); default: PyErr_Format(PyExc_SystemError, "unhandled small_stmt: TYPE=%d NCH=%d\n", TYPE(n), NCH(n)); return NULL; } } } static PyObject * parsenumber_raw(struct compiling *c, const char *s) { const char *end; long x; double dx; Py_complex compl; int imflag; assert(s != NULL); errno = 0; end = s + strlen(s) - 1; imflag = *end == 'j' || *end == 'J'; if (s[0] == '0') { x = (long) PyOS_strtoul(s, (char **)&end, 0); if (x < 0 && errno == 0) { return PyLong_FromString(s, (char **)0, 0); } } else x = PyOS_strtol(s, (char **)&end, 0); if (*end == '\0') { if (errno != 0) return PyLong_FromString(s, (char **)0, 0); return PyLong_FromLong(x); } /* XXX Huge floats may silently fail */ if (imflag) { compl.real = 0.; compl.imag = PyOS_string_to_double(s, (char **)&end, NULL); if (compl.imag == -1.0 && PyErr_Occurred()) return NULL; return PyComplex_FromCComplex(compl); } else { dx = PyOS_string_to_double(s, NULL, NULL); if (dx == -1.0 && PyErr_Occurred()) return NULL; return PyFloat_FromDouble(dx); } } static PyObject * parsenumber(struct compiling *c, const char *s) { char *dup, *end; PyObject *res = NULL; assert(s != NULL); if (strchr(s, '_') == NULL) { return parsenumber_raw(c, s); } /* Create a duplicate without underscores. */ dup = PyMem_Malloc(strlen(s) + 1); if (dup == NULL) { return PyErr_NoMemory(); } end = dup; for (; *s; s++) { if (*s != '_') { *end++ = *s; } } *end = '\0'; res = parsenumber_raw(c, dup); PyMem_Free(dup); return res; } static PyObject * decode_utf8(struct compiling *c, const char **sPtr, const char *end) { const char *s, *t; t = s = *sPtr; /* while (s < end && *s != '\\') s++; */ /* inefficient for u".." */ while (s < end && (*s & 0x80)) s++; *sPtr = s; return PyUnicode_DecodeUTF8(t, s - t, NULL); } static int warn_invalid_escape_sequence(struct compiling *c, const node *n, unsigned char first_invalid_escape_char) { PyObject *msg = PyUnicode_FromFormat("invalid escape sequence \\%c", first_invalid_escape_char); if (msg == NULL) { return -1; } if (PyErr_WarnExplicitObject(PyExc_DeprecationWarning, msg, c->c_filename, LINENO(n), NULL, NULL) < 0) { if (PyErr_ExceptionMatches(PyExc_DeprecationWarning)) { const char *s; /* Replace the DeprecationWarning exception with a SyntaxError to get a more accurate error report */ PyErr_Clear(); s = PyUnicode_AsUTF8(msg); if (s != NULL) { ast_error(c, n, s); } } Py_DECREF(msg); return -1; } Py_DECREF(msg); return 0; } static PyObject * decode_unicode_with_escapes(struct compiling *c, const node *n, const char *s, size_t len) { PyObject *v, *u; char *buf; char *p; const char *end; const char *first_invalid_escape; /* check for integer overflow */ if (len > SIZE_MAX / 6) return NULL; /* "ä" (2 bytes) may become "\U000000E4" (10 bytes), or 1:5 "\ä" (3 bytes) may become "\u005c\U000000E4" (16 bytes), or ~1:6 */ u = PyBytes_FromStringAndSize((char *)NULL, len * 6); if (u == NULL) return NULL; p = buf = PyBytes_AsString(u); end = s + len; while (s < end) { if (*s == '\\') { *p++ = *s++; if (s >= end || *s & 0x80) { strcpy(p, "u005c"); p += 5; if (s >= end) break; } } if (*s & 0x80) { /* XXX inefficient */ PyObject *w; int kind; void *data; Py_ssize_t len, i; w = decode_utf8(c, &s, end); if (w == NULL) { Py_DECREF(u); return NULL; } kind = PyUnicode_KIND(w); data = PyUnicode_DATA(w); len = PyUnicode_GET_LENGTH(w); for (i = 0; i < len; i++) { Py_UCS4 chr = PyUnicode_READ(kind, data, i); sprintf(p, "\\U%08x", chr); p += 10; } /* Should be impossible to overflow */ assert(p - buf <= PyBytes_GET_SIZE(u)); Py_DECREF(w); } else { *p++ = *s++; } } len = p - buf; s = buf; v = _PyUnicode_DecodeUnicodeEscape(s, len, NULL, &first_invalid_escape); if (v != NULL && first_invalid_escape != NULL) { if (warn_invalid_escape_sequence(c, n, *first_invalid_escape) < 0) { /* We have not decref u before because first_invalid_escape points inside u. */ Py_XDECREF(u); Py_DECREF(v); return NULL; } } Py_XDECREF(u); return v; } static PyObject * decode_bytes_with_escapes(struct compiling *c, const node *n, const char *s, size_t len) { const char *first_invalid_escape; PyObject *result = _PyBytes_DecodeEscape(s, len, NULL, 0, NULL, &first_invalid_escape); if (result == NULL) return NULL; if (first_invalid_escape != NULL) { if (warn_invalid_escape_sequence(c, n, *first_invalid_escape) < 0) { Py_DECREF(result); return NULL; } } return result; } /* Shift locations for the given node and all its children by adding `lineno` and `col_offset` to existing locations. */ static void fstring_shift_node_locations(node *n, int lineno, int col_offset) { int i; n->n_col_offset = n->n_col_offset + col_offset; for (i = 0; i < NCH(n); ++i) { if (n->n_lineno && n->n_lineno < CHILD(n, i)->n_lineno) { /* Shifting column offsets unnecessary if there's been newlines. */ col_offset = 0; } fstring_shift_node_locations(CHILD(n, i), lineno, col_offset); } n->n_lineno = n->n_lineno + lineno; } /* Fix locations for the given node and its children. `parent` is the enclosing node. `n` is the node which locations are going to be fixed relative to parent. `expr_str` is the child node's string representation, including braces. */ static void fstring_fix_node_location(const node *parent, node *n, char *expr_str) { char *substr = NULL; char *start; int lines = LINENO(parent) - 1; int cols = parent->n_col_offset; /* Find the full fstring to fix location information in `n`. */ while (parent && parent->n_type != STRING) parent = parent->n_child; if (parent && parent->n_str) { substr = strstr(parent->n_str, expr_str); if (substr) { start = substr; while (start > parent->n_str) { if (start[0] == '\n') break; start--; } cols += substr - start; /* Fix lineno in mulitline strings. */ while ((substr = strchr(substr + 1, '\n'))) lines--; } } fstring_shift_node_locations(n, lines, cols); } /* Compile this expression in to an expr_ty. Add parens around the expression, in order to allow leading spaces in the expression. */ static expr_ty fstring_compile_expr(const char *expr_start, const char *expr_end, struct compiling *c, const node *n) { PyCompilerFlags cf; node *mod_n; mod_ty mod; char *str; Py_ssize_t len; const char *s; PyObject *fstring_name; assert(expr_end >= expr_start); assert(*(expr_start-1) == '{'); assert(*expr_end == '}' || *expr_end == '!' || *expr_end == ':'); /* If the substring is all whitespace, it's an error. We need to catch this here, and not when we call PyParser_SimpleParseStringFlagsFilename, because turning the expression '' in to '()' would go from being invalid to valid. */ for (s = expr_start; s != expr_end; s++) { char c = *s; /* The Python parser ignores only the following whitespace characters (\r already is converted to \n). */ if (!(c == ' ' || c == '\t' || c == '\n' || c == '\f')) { break; } } if (s == expr_end) { ast_error(c, n, "f-string: empty expression not allowed"); return NULL; } len = expr_end - expr_start; /* Allocate 3 extra bytes: open paren, close paren, null byte. */ str = PyMem_RawMalloc(len + 3); if (str == NULL) { PyErr_NoMemory(); return NULL; } str[0] = '('; memcpy(str+1, expr_start, len); str[len+1] = ')'; str[len+2] = 0; cf.cf_flags = PyCF_ONLY_AST; mod_n = PyParser_SimpleParseStringFlagsFilename(str, "<fstring>", Py_eval_input, 0); if (!mod_n) { PyMem_RawFree(str); return NULL; } /* Reuse str to find the correct column offset. */ str[0] = '{'; str[len+1] = '}'; fstring_fix_node_location(n, mod_n, str); fstring_name = PyUnicode_FromString("<fstring>"); mod = string_object_to_c_ast(str, fstring_name, Py_eval_input, &cf, c->c_feature_version, c->c_arena); Py_DECREF(fstring_name); PyMem_RawFree(str); Ta3Node_Free(mod_n); if (!mod) return NULL; return mod->v.Expression.body; } /* Return -1 on error. Return 0 if we reached the end of the literal. Return 1 if we haven't reached the end of the literal, but we want the caller to process the literal up to this point. Used for doubled braces. */ static int fstring_find_literal(const char **str, const char *end, int raw, PyObject **literal, int recurse_lvl, struct compiling *c, const node *n) { /* Get any literal string. It ends when we hit an un-doubled left brace (which isn't part of a unicode name escape such as "\N{EULER CONSTANT}"), or the end of the string. */ const char *s = *str; const char *literal_start = s; int result = 0; assert(*literal == NULL); while (s < end) { char ch = *s++; if (!raw && ch == '\\' && s < end) { ch = *s++; if (ch == 'N') { if (s < end && *s++ == '{') { while (s < end && *s++ != '}') { } continue; } break; } if (ch == '{' && warn_invalid_escape_sequence(c, n, ch) < 0) { return -1; } } if (ch == '{' || ch == '}') { /* Check for doubled braces, but only at the top level. If we checked at every level, then f'{0:{3}}' would fail with the two closing braces. */ if (recurse_lvl == 0) { if (s < end && *s == ch) { /* We're going to tell the caller that the literal ends here, but that they should continue scanning. But also skip over the second brace when we resume scanning. */ *str = s + 1; result = 1; goto done; } /* Where a single '{' is the start of a new expression, a single '}' is not allowed. */ if (ch == '}') { *str = s - 1; ast_error(c, n, "f-string: single '}' is not allowed"); return -1; } } /* We're either at a '{', which means we're starting another expression; or a '}', which means we're at the end of this f-string (for a nested format_spec). */ s--; break; } } *str = s; assert(s <= end); assert(s == end || *s == '{' || *s == '}'); done: if (literal_start != s) { if (raw) *literal = PyUnicode_DecodeUTF8Stateful(literal_start, s - literal_start, NULL, NULL); else *literal = decode_unicode_with_escapes(c, n, literal_start, s - literal_start); if (!*literal) return -1; } return result; } /* Forward declaration because parsing is recursive. */ static expr_ty fstring_parse(const char **str, const char *end, int raw, int recurse_lvl, struct compiling *c, const node *n); /* Parse the f-string at *str, ending at end. We know *str starts an expression (so it must be a '{'). Returns the FormattedValue node, which includes the expression, conversion character, and format_spec expression. Note that I don't do a perfect job here: I don't make sure that a closing brace doesn't match an opening paren, for example. It doesn't need to error on all invalid expressions, just correctly find the end of all valid ones. Any errors inside the expression will be caught when we parse it later. */ static int fstring_find_expr(const char **str, const char *end, int raw, int recurse_lvl, expr_ty *expression, struct compiling *c, const node *n) { /* Return -1 on error, else 0. */ const char *expr_start; const char *expr_end; expr_ty simple_expression; expr_ty format_spec = NULL; /* Optional format specifier. */ int conversion = -1; /* The conversion char. -1 if not specified. */ /* 0 if we're not in a string, else the quote char we're trying to match (single or double quote). */ char quote_char = 0; /* If we're inside a string, 1=normal, 3=triple-quoted. */ int string_type = 0; /* Keep track of nesting level for braces/parens/brackets in expressions. */ Py_ssize_t nested_depth = 0; /* Can only nest one level deep. */ if (recurse_lvl >= 2) { ast_error(c, n, "f-string: expressions nested too deeply"); return -1; } /* The first char must be a left brace, or we wouldn't have gotten here. Skip over it. */ assert(**str == '{'); *str += 1; expr_start = *str; for (; *str < end; (*str)++) { char ch; /* Loop invariants. */ assert(nested_depth >= 0); assert(*str >= expr_start && *str < end); if (quote_char) assert(string_type == 1 || string_type == 3); else assert(string_type == 0); ch = **str; /* Nowhere inside an expression is a backslash allowed. */ if (ch == '\\') { /* Error: can't include a backslash character, inside parens or strings or not. */ ast_error(c, n, "f-string expression part " "cannot include a backslash"); return -1; } if (quote_char) { /* We're inside a string. See if we're at the end. */ /* This code needs to implement the same non-error logic as tok_get from tokenizer.c, at the letter_quote label. To actually share that code would be a nightmare. But, it's unlikely to change and is small, so duplicate it here. Note we don't need to catch all of the errors, since they'll be caught when parsing the expression. We just need to match the non-error cases. Thus we can ignore \n in single-quoted strings, for example. Or non-terminated strings. */ if (ch == quote_char) { /* Does this match the string_type (single or triple quoted)? */ if (string_type == 3) { if (*str+2 < end && *(*str+1) == ch && *(*str+2) == ch) { /* We're at the end of a triple quoted string. */ *str += 2; string_type = 0; quote_char = 0; continue; } } else { /* We're at the end of a normal string. */ quote_char = 0; string_type = 0; continue; } } } else if (ch == '\'' || ch == '"') { /* Is this a triple quoted string? */ if (*str+2 < end && *(*str+1) == ch && *(*str+2) == ch) { string_type = 3; *str += 2; } else { /* Start of a normal string. */ string_type = 1; } /* Start looking for the end of the string. */ quote_char = ch; } else if (ch == '[' || ch == '{' || ch == '(') { nested_depth++; } else if (nested_depth != 0 && (ch == ']' || ch == '}' || ch == ')')) { nested_depth--; } else if (ch == '#') { /* Error: can't include a comment character, inside parens or not. */ ast_error(c, n, "f-string expression part cannot include '#'"); return -1; } else if (nested_depth == 0 && (ch == '!' || ch == ':' || ch == '}')) { /* First, test for the special case of "!=". Since '=' is not an allowed conversion character, nothing is lost in this test. */ if (ch == '!' && *str+1 < end && *(*str+1) == '=') { /* This isn't a conversion character, just continue. */ continue; } /* Normal way out of this loop. */ break; } else { /* Just consume this char and loop around. */ } } expr_end = *str; /* If we leave this loop in a string or with mismatched parens, we don't care. We'll get a syntax error when compiling the expression. But, we can produce a better error message, so let's just do that.*/ if (quote_char) { ast_error(c, n, "f-string: unterminated string"); return -1; } if (nested_depth) { ast_error(c, n, "f-string: mismatched '(', '{', or '['"); return -1; } if (*str >= end) goto unexpected_end_of_string; /* Compile the expression as soon as possible, so we show errors related to the expression before errors related to the conversion or format_spec. */ simple_expression = fstring_compile_expr(expr_start, expr_end, c, n); if (!simple_expression) return -1; /* Check for a conversion char, if present. */ if (**str == '!') { *str += 1; if (*str >= end) goto unexpected_end_of_string; conversion = **str; *str += 1; /* Validate the conversion. */ if (!(conversion == 's' || conversion == 'r' || conversion == 'a')) { ast_error(c, n, "f-string: invalid conversion character: " "expected 's', 'r', or 'a'"); return -1; } } /* Check for the format spec, if present. */ if (*str >= end) goto unexpected_end_of_string; if (**str == ':') { *str += 1; if (*str >= end) goto unexpected_end_of_string; /* Parse the format spec. */ format_spec = fstring_parse(str, end, raw, recurse_lvl+1, c, n); if (!format_spec) return -1; } if (*str >= end || **str != '}') goto unexpected_end_of_string; /* We're at a right brace. Consume it. */ assert(*str < end); assert(**str == '}'); *str += 1; /* And now create the FormattedValue node that represents this entire expression with the conversion and format spec. */ *expression = FormattedValue(simple_expression, conversion, format_spec, LINENO(n), n->n_col_offset, c->c_arena); if (!*expression) return -1; return 0; unexpected_end_of_string: ast_error(c, n, "f-string: expecting '}'"); return -1; } /* Return -1 on error. Return 0 if we have a literal (possible zero length) and an expression (zero length if at the end of the string. Return 1 if we have a literal, but no expression, and we want the caller to call us again. This is used to deal with doubled braces. When called multiple times on the string 'a{{b{0}c', this function will return: 1. the literal 'a{' with no expression, and a return value of 1. Despite the fact that there's no expression, the return value of 1 means we're not finished yet. 2. the literal 'b' and the expression '0', with a return value of 0. The fact that there's an expression means we're not finished. 3. literal 'c' with no expression and a return value of 0. The combination of the return value of 0 with no expression means we're finished. */ static int fstring_find_literal_and_expr(const char **str, const char *end, int raw, int recurse_lvl, PyObject **literal, expr_ty *expression, struct compiling *c, const node *n) { int result; assert(*literal == NULL && *expression == NULL); /* Get any literal string. */ result = fstring_find_literal(str, end, raw, literal, recurse_lvl, c, n); if (result < 0) goto error; assert(result == 0 || result == 1); if (result == 1) /* We have a literal, but don't look at the expression. */ return 1; if (*str >= end || **str == '}') /* We're at the end of the string or the end of a nested f-string: no expression. The top-level error case where we expect to be at the end of the string but we're at a '}' is handled later. */ return 0; /* We must now be the start of an expression, on a '{'. */ assert(**str == '{'); if (fstring_find_expr(str, end, raw, recurse_lvl, expression, c, n) < 0) goto error; return 0; error: Py_CLEAR(*literal); return -1; } #define EXPRLIST_N_CACHED 64 typedef struct { /* Incrementally build an array of expr_ty, so be used in an asdl_seq. Cache some small but reasonably sized number of expr_ty's, and then after that start dynamically allocating, doubling the number allocated each time. Note that the f-string f'{0}a{1}' contains 3 expr_ty's: 2 FormattedValue's, and one Str for the literal 'a'. So you add expr_ty's about twice as fast as you add exressions in an f-string. */ Py_ssize_t allocated; /* Number we've allocated. */ Py_ssize_t size; /* Number we've used. */ expr_ty *p; /* Pointer to the memory we're actually using. Will point to 'data' until we start dynamically allocating. */ expr_ty data[EXPRLIST_N_CACHED]; } ExprList; #ifdef NDEBUG #define ExprList_check_invariants(l) #else static void ExprList_check_invariants(ExprList *l) { /* Check our invariants. Make sure this object is "live", and hasn't been deallocated. */ assert(l->size >= 0); assert(l->p != NULL); if (l->size <= EXPRLIST_N_CACHED) assert(l->data == l->p); } #endif static void ExprList_Init(ExprList *l) { l->allocated = EXPRLIST_N_CACHED; l->size = 0; /* Until we start allocating dynamically, p points to data. */ l->p = l->data; ExprList_check_invariants(l); } static int ExprList_Append(ExprList *l, expr_ty exp) { ExprList_check_invariants(l); if (l->size >= l->allocated) { /* We need to alloc (or realloc) the memory. */ Py_ssize_t new_size = l->allocated * 2; /* See if we've ever allocated anything dynamically. */ if (l->p == l->data) { Py_ssize_t i; /* We're still using the cached data. Switch to alloc-ing. */ l->p = PyMem_RawMalloc(sizeof(expr_ty) * new_size); if (!l->p) return -1; /* Copy the cached data into the new buffer. */ for (i = 0; i < l->size; i++) l->p[i] = l->data[i]; } else { /* Just realloc. */ expr_ty *tmp = PyMem_RawRealloc(l->p, sizeof(expr_ty) * new_size); if (!tmp) { PyMem_RawFree(l->p); l->p = NULL; return -1; } l->p = tmp; } l->allocated = new_size; assert(l->allocated == 2 * l->size); } l->p[l->size++] = exp; ExprList_check_invariants(l); return 0; } static void ExprList_Dealloc(ExprList *l) { ExprList_check_invariants(l); /* If there's been an error, or we've never dynamically allocated, do nothing. */ if (!l->p || l->p == l->data) { /* Do nothing. */ } else { /* We have dynamically allocated. Free the memory. */ PyMem_RawFree(l->p); } l->p = NULL; l->size = -1; } static asdl_seq * ExprList_Finish(ExprList *l, PyArena *arena) { asdl_seq *seq; ExprList_check_invariants(l); /* Allocate the asdl_seq and copy the expressions in to it. */ seq = _Ta3_asdl_seq_new(l->size, arena); if (seq) { Py_ssize_t i; for (i = 0; i < l->size; i++) asdl_seq_SET(seq, i, l->p[i]); } ExprList_Dealloc(l); return seq; } /* The FstringParser is designed to add a mix of strings and f-strings, and concat them together as needed. Ultimately, it generates an expr_ty. */ typedef struct { PyObject *last_str; ExprList expr_list; int fmode; } FstringParser; #ifdef NDEBUG #define FstringParser_check_invariants(state) #else static void FstringParser_check_invariants(FstringParser *state) { if (state->last_str) assert(PyUnicode_CheckExact(state->last_str)); ExprList_check_invariants(&state->expr_list); } #endif static void FstringParser_Init(FstringParser *state) { state->last_str = NULL; state->fmode = 0; ExprList_Init(&state->expr_list); FstringParser_check_invariants(state); } static void FstringParser_Dealloc(FstringParser *state) { FstringParser_check_invariants(state); Py_XDECREF(state->last_str); ExprList_Dealloc(&state->expr_list); } /* Make a Str node, but decref the PyUnicode object being added. */ static expr_ty make_str_node_and_del(PyObject **str, struct compiling *c, const node* n) { PyObject *kind, *s = *str; const char *raw = STR(CHILD(n, 0)); /* currently Python allows up to 2 string modifiers */ char *ch, s_kind[3] = {0, 0, 0}; ch = s_kind; while (*raw && *raw != '\'' && *raw != '"') { *ch++ = *raw++; } kind = PyUnicode_FromString(s_kind); if (!kind) { return NULL; } *str = NULL; assert(PyUnicode_CheckExact(s)); if (PyArena_AddPyObject(c->c_arena, s) < 0) { Py_DECREF(s); return NULL; } return Str(s, kind, LINENO(n), n->n_col_offset, c->c_arena); } /* Add a non-f-string (that is, a regular literal string). str is decref'd. */ static int FstringParser_ConcatAndDel(FstringParser *state, PyObject *str) { FstringParser_check_invariants(state); assert(PyUnicode_CheckExact(str)); if (PyUnicode_GET_LENGTH(str) == 0) { Py_DECREF(str); return 0; } if (!state->last_str) { /* We didn't have a string before, so just remember this one. */ state->last_str = str; } else { /* Concatenate this with the previous string. */ PyUnicode_AppendAndDel(&state->last_str, str); if (!state->last_str) return -1; } FstringParser_check_invariants(state); return 0; } /* Parse an f-string. The f-string is in *str to end, with no 'f' or quotes. */ static int FstringParser_ConcatFstring(FstringParser *state, const char **str, const char *end, int raw, int recurse_lvl, struct compiling *c, const node *n) { FstringParser_check_invariants(state); state->fmode = 1; /* Parse the f-string. */ while (1) { PyObject *literal = NULL; expr_ty expression = NULL; /* If there's a zero length literal in front of the expression, literal will be NULL. If we're at the end of the f-string, expression will be NULL (unless result == 1, see below). */ int result = fstring_find_literal_and_expr(str, end, raw, recurse_lvl, &literal, &expression, c, n); if (result < 0) return -1; /* Add the literal, if any. */ if (!literal) { /* Do nothing. Just leave last_str alone (and possibly NULL). */ } else if (!state->last_str) { /* Note that the literal can be zero length, if the input string is "\\\n" or "\\\r", among others. */ state->last_str = literal; literal = NULL; } else { /* We have a literal, concatenate it. */ assert(PyUnicode_GET_LENGTH(literal) != 0); if (FstringParser_ConcatAndDel(state, literal) < 0) return -1; literal = NULL; } /* We've dealt with the literal now. It can't be leaked on further errors. */ assert(literal == NULL); /* See if we should just loop around to get the next literal and expression, while ignoring the expression this time. This is used for un-doubling braces, as an optimization. */ if (result == 1) continue; if (!expression) /* We're done with this f-string. */ break; /* We know we have an expression. Convert any existing string to a Str node. */ if (!state->last_str) { /* Do nothing. No previous literal. */ } else { /* Convert the existing last_str literal to a Str node. */ expr_ty str = make_str_node_and_del(&state->last_str, c, n); if (!str || ExprList_Append(&state->expr_list, str) < 0) return -1; } if (ExprList_Append(&state->expr_list, expression) < 0) return -1; } /* If recurse_lvl is zero, then we must be at the end of the string. Otherwise, we must be at a right brace. */ if (recurse_lvl == 0 && *str < end-1) { ast_error(c, n, "f-string: unexpected end of string"); return -1; } if (recurse_lvl != 0 && **str != '}') { ast_error(c, n, "f-string: expecting '}'"); return -1; } FstringParser_check_invariants(state); return 0; } /* Convert the partial state reflected in last_str and expr_list to an expr_ty. The expr_ty can be a Str, or a JoinedStr. */ static expr_ty FstringParser_Finish(FstringParser *state, struct compiling *c, const node *n) { asdl_seq *seq; FstringParser_check_invariants(state); /* If we're just a constant string with no expressions, return that. */ if (!state->fmode) { assert(!state->expr_list.size); if (!state->last_str) { /* Create a zero length string. */ state->last_str = PyUnicode_FromStringAndSize(NULL, 0); if (!state->last_str) goto error; } return make_str_node_and_del(&state->last_str, c, n); } /* Create a Str node out of last_str, if needed. It will be the last node in our expression list. */ if (state->last_str) { expr_ty str = make_str_node_and_del(&state->last_str, c, n); if (!str || ExprList_Append(&state->expr_list, str) < 0) goto error; } /* This has already been freed. */ assert(state->last_str == NULL); seq = ExprList_Finish(&state->expr_list, c->c_arena); if (!seq) goto error; return JoinedStr(seq, LINENO(n), n->n_col_offset, c->c_arena); error: FstringParser_Dealloc(state); return NULL; } /* Given an f-string (with no 'f' or quotes) that's in *str and ends at end, parse it into an expr_ty. Return NULL on error. Adjust str to point past the parsed portion. */ static expr_ty fstring_parse(const char **str, const char *end, int raw, int recurse_lvl, struct compiling *c, const node *n) { FstringParser state; FstringParser_Init(&state); if (FstringParser_ConcatFstring(&state, str, end, raw, recurse_lvl, c, n) < 0) { FstringParser_Dealloc(&state); return NULL; } return FstringParser_Finish(&state, c, n); } /* n is a Python string literal, including the bracketing quote characters, and r, b, u, &/or f prefixes (if any), and embedded escape sequences (if any). parsestr parses it, and sets *result to decoded Python string object. If the string is an f-string, set *fstr and *fstrlen to the unparsed string object. Return 0 if no errors occurred. */ static int parsestr(struct compiling *c, const node *n, int *bytesmode, int *rawmode, PyObject **result, const char **fstr, Py_ssize_t *fstrlen) { size_t len; const char *s = STR(n); int quote = Py_CHARMASK(*s); int fmode = 0; *bytesmode = 0; *rawmode = 0; *result = NULL; *fstr = NULL; if (Py_ISALPHA(quote)) { while (!*bytesmode || !*rawmode) { if (quote == 'b' || quote == 'B') { quote = *++s; *bytesmode = 1; } else if (quote == 'u' || quote == 'U') { quote = *++s; } else if (quote == 'r' || quote == 'R') { quote = *++s; *rawmode = 1; } else if (quote == 'f' || quote == 'F') { quote = *++s; fmode = 1; } else { break; } } } /* fstrings are only allowed in Python 3.6 and greater */ if (fmode && c->c_feature_version < 6) { ast_error(c, n, "Format strings are only supported in Python 3.6 and greater"); return -1; } if (fmode && *bytesmode) { PyErr_BadInternalCall(); return -1; } if (quote != '\'' && quote != '\"') { PyErr_BadInternalCall(); return -1; } /* Skip the leading quote char. */ s++; len = strlen(s); if (len > INT_MAX) { PyErr_SetString(PyExc_OverflowError, "string to parse is too long"); return -1; } if (s[--len] != quote) { /* Last quote char must match the first. */ PyErr_BadInternalCall(); return -1; } if (len >= 4 && s[0] == quote && s[1] == quote) { /* A triple quoted string. We've already skipped one quote at the start and one at the end of the string. Now skip the two at the start. */ s += 2; len -= 2; /* And check that the last two match. */ if (s[--len] != quote || s[--len] != quote) { PyErr_BadInternalCall(); return -1; } } if (fmode) { /* Just return the bytes. The caller will parse the resulting string. */ *fstr = s; *fstrlen = len; return 0; } /* Not an f-string. */ /* Avoid invoking escape decoding routines if possible. */ *rawmode = *rawmode || strchr(s, '\\') == NULL; if (*bytesmode) { /* Disallow non-ASCII characters. */ const char *ch; for (ch = s; *ch; ch++) { if (Py_CHARMASK(*ch) >= 0x80) { ast_error(c, n, "bytes can only contain ASCII " "literal characters."); return -1; } } if (*rawmode) *result = PyBytes_FromStringAndSize(s, len); else *result = decode_bytes_with_escapes(c, n, s, len); } else { if (*rawmode) *result = PyUnicode_DecodeUTF8Stateful(s, len, NULL, NULL); else *result = decode_unicode_with_escapes(c, n, s, len); } return *result == NULL ? -1 : 0; } /* Accepts a STRING+ atom, and produces an expr_ty node. Run through each STRING atom, and process it as needed. For bytes, just concatenate them together, and the result will be a Bytes node. For normal strings and f-strings, concatenate them together. The result will be a Str node if there were no f-strings; a FormattedValue node if there's just an f-string (with no leading or trailing literals), or a JoinedStr node if there are multiple f-strings or any literals involved. */ static expr_ty parsestrplus(struct compiling *c, const node *n) { int bytesmode = 0; PyObject *bytes_str = NULL; int i; FstringParser state; FstringParser_Init(&state); for (i = 0; i < NCH(n); i++) { int this_bytesmode; int this_rawmode; PyObject *s; const char *fstr; Py_ssize_t fstrlen = -1; /* Silence a compiler warning. */ REQ(CHILD(n, i), STRING); if (parsestr(c, CHILD(n, i), &this_bytesmode, &this_rawmode, &s, &fstr, &fstrlen) != 0) goto error; /* Check that we're not mixing bytes with unicode. */ if (i != 0 && bytesmode != this_bytesmode) { ast_error(c, n, "cannot mix bytes and nonbytes literals"); /* s is NULL if the current string part is an f-string. */ Py_XDECREF(s); goto error; } bytesmode = this_bytesmode; if (fstr != NULL) { int result; assert(s == NULL && !bytesmode); /* This is an f-string. Parse and concatenate it. */ result = FstringParser_ConcatFstring(&state, &fstr, fstr+fstrlen, this_rawmode, 0, c, n); if (result < 0) goto error; } else { /* A string or byte string. */ assert(s != NULL && fstr == NULL); assert(bytesmode ? PyBytes_CheckExact(s) : PyUnicode_CheckExact(s)); if (bytesmode) { /* For bytes, concat as we go. */ if (i == 0) { /* First time, just remember this value. */ bytes_str = s; } else { PyBytes_ConcatAndDel(&bytes_str, s); if (!bytes_str) goto error; } } else { /* This is a regular string. Concatenate it. */ if (FstringParser_ConcatAndDel(&state, s) < 0) goto error; } } } if (bytesmode) { /* Just return the bytes object and we're done. */ if (PyArena_AddPyObject(c->c_arena, bytes_str) < 0) goto error; return Bytes(bytes_str, LINENO(n), n->n_col_offset, c->c_arena); } /* We're not a bytes string, bytes_str should never have been set. */ assert(bytes_str == NULL); return FstringParser_Finish(&state, c, n); error: Py_XDECREF(bytes_str); FstringParser_Dealloc(&state); return NULL; }
decode_bytes_with_escapes(struct compiling *c, const node *n, const char *s, size_t len) { return PyBytes_DecodeEscape(s, len, NULL, 0, NULL); }
decode_bytes_with_escapes(struct compiling *c, const node *n, const char *s, size_t len) { const char *first_invalid_escape; PyObject *result = _PyBytes_DecodeEscape(s, len, NULL, 0, NULL, &first_invalid_escape); if (result == NULL) return NULL; if (first_invalid_escape != NULL) { if (warn_invalid_escape_sequence(c, n, *first_invalid_escape) < 0) { Py_DECREF(result); return NULL; } } return result; }
{'added': [(11, '#include "pythonrun.h"'), (15, "// VS 2010 doesn't have <stdbool.h>..."), (16, 'typedef int bool;'), (17, '#define false 0'), (18, '#define true 1'), (19, ''), (20, '#ifndef _PyObject_FastCall'), (21, 'static PyObject *'), (22, '_PyObject_FastCall(PyObject *func, PyObject *const *args, int nargs)'), (23, '{'), (24, ' PyObject *t, *res;'), (25, ' int i;'), (26, ''), (27, ' t = PyTuple_New(nargs);'), (28, ' if (t == NULL) {'), (29, ' return NULL;'), (30, ' }'), (31, ' for (i = 0; i < nargs; i++) {'), (32, ' if (PyTuple_SetItem(t, i, args[i]) < 0) {'), (33, ' Py_DECREF(t);'), (34, ' return NULL;'), (35, ' }'), (36, ' }'), (37, ' res = PyObject_CallObject(func, t);'), (38, ' Py_DECREF(t);'), (39, ' return res;'), (40, '}'), (41, '#endif'), (42, ''), (43, '#if PY_MINOR_VERSION < 6'), (44, '#define _PyUnicode_EqualToASCIIString(a, b) (PyUnicode_CompareWithASCIIString((a), (b)) == 0)'), (45, ''), (46, 'static PyObject *'), (47, '_PyBytes_DecodeEscape(const char *s,'), (48, ' Py_ssize_t len,'), (49, ' const char *errors,'), (50, ' Py_ssize_t unicode,'), (51, ' const char *recode_encoding,'), (52, ' const char **first_invalid_escape)'), (53, '{'), (54, ' *first_invalid_escape = NULL;'), (55, ' return PyBytes_DecodeEscape(s, len, errors, unicode, recode_encoding);'), (56, '}'), (57, ''), (58, 'PyObject *'), (59, '_PyUnicode_DecodeUnicodeEscape(const char *s,'), (60, ' Py_ssize_t size,'), (61, ' const char *errors,'), (62, ' const char **first_invalid_escape)'), (63, '{'), (64, ' *first_invalid_escape = NULL;'), (65, ' return PyUnicode_DecodeUnicodeEscape(s, size, errors);'), (66, '}'), (163, ' abort();'), (657, 'static asdl_seq *ast_for_suite(struct compiling *c, const node *n);'), (663, 'static stmt_ty ast_for_with_stmt(struct compiling *, const node *, bool);'), (664, 'static stmt_ty ast_for_for_stmt(struct compiling *, const node *, bool);'), (667, 'static expr_ty ast_for_call(struct compiling *, const node *, expr_ty, bool);'), (701, ' PyObject *form;'), (702, ' PyObject *args[2];'), (703, ' _Py_IDENTIFIER(NFKC);'), (708, ' form = _PyUnicode_FromId(&PyId_NFKC);'), (709, ' if (form == NULL) {'), (710, ' Py_DECREF(id);'), (711, ' return NULL;'), (712, ' }'), (713, ' args[0] = form;'), (714, ' args[1] = id;'), (715, ' id2 = _PyObject_FastCall(c->c_normalize, args, 2);'), (719, ' if (!PyUnicode_Check(id2)) {'), (720, ' PyErr_Format(PyExc_TypeError,'), (721, ' "unicodedata.normalize() must return a string, not "'), (722, ' "%.200s",'), (723, ' Py_TYPE(id2)->tp_name);'), (724, ' Py_DECREF(id2);'), (725, ' return NULL;'), (726, ' }'), (835, ' abort();'), (843, ' PyObject *filename, int feature_version,'), (844, ' PyArena *arena)'), (906, ' goto out;'), (907, ' asdl_seq_SET(type_ignores, i, ti);'), (1009, ' int feature_version, PyArena *arena)'), (1075, ' if (_PyUnicode_EqualToASCIIString(name, "__debug__")) {'), (1082, ' if (_PyUnicode_EqualToASCIIString(name, *p)) {'), (1298, ' /* fall through */'), (1313, ' /* fall through */'), (1440, ' if (TYPE(CHILD(n, i)) == COMMA)'), (1577, ' if (TYPE(CHILD(n, i)) == COMMA)'), (1593, ' if (TYPE(CHILD(n, i)) == TYPE_COMMENT) {'), (1609, ' i += 2; /* the star and the name */'), (1610, ' if (TYPE(CHILD(n, i)) == COMMA)'), (1611, ' i += 1; /* the comma, if present */'), (1613, ' if (TYPE(CHILD(n, i)) == TYPE_COMMENT) {'), (1635, ' if (TYPE(CHILD(n, i)) == COMMA)'), (1717, ' d = ast_for_call(c, CHILD(n, 3), name_expr, true);'), (1748, 'ast_for_funcdef_impl(struct compiling *c, const node *n0,'), (1749, ' asdl_seq *decorator_seq, bool is_async)'), (1752, ' const node * const n = is_async ? CHILD(n0, 1) : n0;'), (1763, ' "Async functions are only supported in Python 3.5 and greater");'), (1802, ' type_comment, LINENO(n0), n0->n_col_offset, c->c_arena);'), (1805, ' type_comment, LINENO(n), n->n_col_offset, c->c_arena);'), (1811, " /* async_funcdef: 'async' funcdef */"), (1813, ' REQ(CHILD(n, 0), NAME);'), (1814, ' assert(strcmp(STR(CHILD(n, 0)), "async") == 0);'), (1817, ' return ast_for_funcdef_impl(c, n, decorator_seq,'), (1818, ' true /* is_async */);'), (1826, ' false /* is_async */);'), (1833, " /* async_stmt: 'async' (funcdef | with_stmt | for_stmt) */"), (1835, ' REQ(CHILD(n, 0), NAME);'), (1836, ' assert(strcmp(STR(CHILD(n, 0)), "async") == 0);'), (1840, ' return ast_for_funcdef_impl(c, n, NULL,'), (1841, ' true /* is_async */);'), (1843, ' return ast_for_with_stmt(c, n,'), (1844, ' true /* is_async */);'), (1847, ' return ast_for_for_stmt(c, n,'), (1848, ' true /* is_async */);'), (1953, ' if (NCH(n) == 2) {'), (1954, ' REQ(CHILD(n, 0), NAME);'), (1955, ' assert(strcmp(STR(CHILD(n, 0)), "async") == 0);'), (1956, ' n = CHILD(n, 1);'), (1958, ' else if (NCH(n) == 1) {'), (1959, ' n = CHILD(n, 0);'), (1960, ' }'), (1961, ' else {'), (1962, ' goto error;'), (1963, ' }'), (1964, ' if (NCH(n) == (5)) {'), (1965, ' n = CHILD(n, 4);'), (1984, ' error:'), (2033, ' node *sync_n;'), (2038, ' if (NCH(n) == 2) {'), (2040, ' REQ(CHILD(n, 0), NAME);'), (2041, ' assert(strcmp(STR(CHILD(n, 0)), "async") == 0);'), (2042, ' sync_n = CHILD(n, 1);'), (2044, ' else {'), (2045, ' sync_n = CHILD(n, 0);'), (2046, ' }'), (2047, ' REQ(sync_n, sync_comp_for);'), (2056, ' for_ch = CHILD(sync_n, 1);'), (2060, ' expression = ast_for_expr(c, CHILD(sync_n, 3));'), (2077, ' if (NCH(sync_n) == 5) {'), (2081, ' n = CHILD(sync_n, 4);'), (2352, ' pynum = parsenumber(c, STR(ch));'), (2575, ' return ast_for_call(c, CHILD(n, 1), left_expr, true);'), (2704, " /* there was an 'await' */"), (2769, " atom_expr: ['await'] atom trailer*"), (2917, 'ast_for_call(struct compiling *c, const node *n, expr_ty func, bool allowgen)'), (2924, ' int i, nargs, nkeywords;'), (2938, ' else if (TYPE(CHILD(ch, 1)) == comp_for) {'), (2939, ' nargs++;'), (2940, ' if (!allowgen) {'), (2941, ' ast_error(c, ch, "invalid syntax");'), (2942, ' return NULL;'), (2943, ' }'), (2944, ' if (NCH(n) > 1) {'), (2945, ' ast_error(c, ch, "Generator expression must be parenthesized");'), (2946, ' return NULL;'), (2947, ' }'), (2948, ' }'), (2957, ' args = _Ta3_asdl_seq_new(nargs, c->c_arena);'), (3109, ''), (3381, ' /* fall through */'), (3493, ' if (!str)'), (3494, ' return NULL;'), (3926, 'ast_for_for_stmt(struct compiling *c, const node *n0, bool is_async)'), (3928, ' const node * const n = is_async ? CHILD(n0, 1) : n0;'), (3978, ' return AsyncFor(target, expression, suite_seq, seq, type_comment,'), (3979, ' LINENO(n0), n0->n_col_offset,'), (3982, ' return For(target, expression, suite_seq, seq, type_comment,'), (3983, ' LINENO(n), n->n_col_offset,'), (4131, 'ast_for_with_stmt(struct compiling *c, const node *n0, bool is_async)'), (4133, ' const node * const n = is_async ? CHILD(n0, 1) : n0;'), (4170, ' return AsyncWith(items, body, type_comment, LINENO(n0), n0->n_col_offset, c->c_arena);'), (4194, ' return ClassDef(classname, NULL, NULL, s, decorator_seq,'), (4195, ' LINENO(n), n->n_col_offset, c->c_arena);'), (4199, ' s = ast_for_suite(c, CHILD(n, 5));'), (4207, ' return ClassDef(classname, NULL, NULL, s, decorator_seq,'), (4208, ' LINENO(n), n->n_col_offset, c->c_arena);'), (4220, ' call = ast_for_call(c, CHILD(n, 3), dummy, false);'), (4367, ' if (dup == NULL) {'), (4368, ' return PyErr_NoMemory();'), (4369, ' }'), (4393, 'static int'), (4394, 'warn_invalid_escape_sequence(struct compiling *c, const node *n,'), (4395, ' unsigned char first_invalid_escape_char)'), (4396, '{'), (4397, ' PyObject *msg = PyUnicode_FromFormat("invalid escape sequence \\\\%c",'), (4398, ' first_invalid_escape_char);'), (4399, ' if (msg == NULL) {'), (4400, ' return -1;'), (4401, ' }'), (4402, ' if (PyErr_WarnExplicitObject(PyExc_DeprecationWarning, msg,'), (4403, ' c->c_filename, LINENO(n),'), (4404, ' NULL, NULL) < 0)'), (4405, ' {'), (4406, ' if (PyErr_ExceptionMatches(PyExc_DeprecationWarning)) {'), (4407, ' const char *s;'), (4408, ''), (4409, ' /* Replace the DeprecationWarning exception with a SyntaxError'), (4410, ' to get a more accurate error report */'), (4411, ' PyErr_Clear();'), (4412, ''), (4413, ' s = PyUnicode_AsUTF8(msg);'), (4414, ' if (s != NULL) {'), (4415, ' ast_error(c, n, s);'), (4416, ' }'), (4417, ' }'), (4418, ' Py_DECREF(msg);'), (4419, ' return -1;'), (4420, ' }'), (4421, ' Py_DECREF(msg);'), (4422, ' return 0;'), (4423, '}'), (4424, ''), (4429, ' PyObject *v, *u;'), (4433, ' const char *first_invalid_escape;'), (4448, ' if (s >= end || *s & 0x80) {'), (4451, ' if (s >= end)'), (4452, ' break;'), (4474, ' assert(p - buf <= PyBytes_GET_SIZE(u));'), (4483, ' v = _PyUnicode_DecodeUnicodeEscape(s, len, NULL, &first_invalid_escape);'), (4484, ''), (4485, ' if (v != NULL && first_invalid_escape != NULL) {'), (4486, ' if (warn_invalid_escape_sequence(c, n, *first_invalid_escape) < 0) {'), (4487, ' /* We have not decref u before because first_invalid_escape points'), (4488, ' inside u. */'), (4489, ' Py_XDECREF(u);'), (4490, ' Py_DECREF(v);'), (4491, ' return NULL;'), (4492, ' }'), (4493, ' }'), (4494, ' Py_XDECREF(u);'), (4495, ' return v;'), (4502, ' const char *first_invalid_escape;'), (4503, ' PyObject *result = _PyBytes_DecodeEscape(s, len, NULL, 0, NULL,'), (4504, ' &first_invalid_escape);'), (4505, ' if (result == NULL)'), (4506, ' return NULL;'), (4507, ''), (4508, ' if (first_invalid_escape != NULL) {'), (4509, ' if (warn_invalid_escape_sequence(c, n, *first_invalid_escape) < 0) {'), (4510, ' Py_DECREF(result);'), (4511, ' return NULL;'), (4512, ' }'), (4513, ' }'), (4514, ' return result;'), (4515, '}'), (4516, ''), (4517, '/* Shift locations for the given node and all its children by adding `lineno`'), (4518, ' and `col_offset` to existing locations. */'), (4519, 'static void fstring_shift_node_locations(node *n, int lineno, int col_offset)'), (4520, '{'), (4521, ' int i;'), (4522, ' n->n_col_offset = n->n_col_offset + col_offset;'), (4523, ' for (i = 0; i < NCH(n); ++i) {'), (4524, ' if (n->n_lineno && n->n_lineno < CHILD(n, i)->n_lineno) {'), (4525, " /* Shifting column offsets unnecessary if there's been newlines. */"), (4526, ' col_offset = 0;'), (4527, ' }'), (4528, ' fstring_shift_node_locations(CHILD(n, i), lineno, col_offset);'), (4529, ' }'), (4530, ' n->n_lineno = n->n_lineno + lineno;'), (4531, '}'), (4532, ''), (4533, '/* Fix locations for the given node and its children.'), (4534, ''), (4535, ' `parent` is the enclosing node.'), (4536, ' `n` is the node which locations are going to be fixed relative to parent.'), (4537, " `expr_str` is the child node's string representation, including braces."), (4538, '*/'), (4539, 'static void'), (4540, 'fstring_fix_node_location(const node *parent, node *n, char *expr_str)'), (4541, '{'), (4542, ' char *substr = NULL;'), (4543, ' char *start;'), (4544, ' int lines = LINENO(parent) - 1;'), (4545, ' int cols = parent->n_col_offset;'), (4546, ' /* Find the full fstring to fix location information in `n`. */'), (4547, ' while (parent && parent->n_type != STRING)'), (4548, ' parent = parent->n_child;'), (4549, ' if (parent && parent->n_str) {'), (4550, ' substr = strstr(parent->n_str, expr_str);'), (4551, ' if (substr) {'), (4552, ' start = substr;'), (4553, ' while (start > parent->n_str) {'), (4554, " if (start[0] == '\\n')"), (4555, ' break;'), (4556, ' start--;'), (4557, ' }'), (4558, ' cols += substr - start;'), (4559, ' /* Fix lineno in mulitline strings. */'), (4560, " while ((substr = strchr(substr + 1, '\\n')))"), (4561, ' lines--;'), (4562, ' }'), (4563, ' }'), (4564, ' fstring_shift_node_locations(n, lines, cols);'), (4575, ' node *mod_n;'), (4579, ' const char *s;'), (4580, ' PyObject *fstring_name;'), (4586, " /* If the substring is all whitespace, it's an error. We need to catch this"), (4587, ' here, and not when we call PyParser_SimpleParseStringFlagsFilename,'), (4588, " because turning the expression '' in to '()' would go from being invalid"), (4589, ' to valid. */'), (4590, ' for (s = expr_start; s != expr_end; s++) {'), (4591, ' char c = *s;'), (4592, ' /* The Python parser ignores only the following whitespace'), (4593, ' characters (\\r already is converted to \\n). */'), (4594, " if (!(c == ' ' || c == '\\t' || c == '\\n' || c == '\\f')) {"), (4598, ' if (s == expr_end) {'), (4606, ' if (str == NULL) {'), (4607, ' PyErr_NoMemory();'), (4609, ' }'), (4617, ' mod_n = PyParser_SimpleParseStringFlagsFilename(str, "<fstring>",'), (4618, ' Py_eval_input, 0);'), (4619, ' if (!mod_n) {'), (4620, ' PyMem_RawFree(str);'), (4621, ' return NULL;'), (4622, ' }'), (4623, ' /* Reuse str to find the correct column offset. */'), (4624, " str[0] = '{';"), (4625, " str[len+1] = '}';"), (4626, ' fstring_fix_node_location(n, mod_n, str);'), (4633, ' Ta3Node_Free(mod_n);'), (4656, ' const char *s = *str;'), (4657, ' const char *literal_start = s;'), (4661, ' while (s < end) {'), (4662, ' char ch = *s++;'), (4663, " if (!raw && ch == '\\\\' && s < end) {"), (4664, ' ch = *s++;'), (4665, " if (ch == 'N') {"), (4666, " if (s < end && *s++ == '{') {"), (4667, " while (s < end && *s++ != '}') {"), (4668, ' }'), (4669, ' continue;'), (4670, ' }'), (4671, ' break;'), (4672, ' }'), (4673, " if (ch == '{' && warn_invalid_escape_sequence(c, n, ch) < 0) {"), (4674, ' return -1;'), (4675, ' }'), (4676, ' }'), (4677, " if (ch == '{' || ch == '}') {"), (4682, ' if (s < end && *s == ch) {'), (4686, ' *str = s + 1;'), (4694, ' *str = s - 1;'), (4702, ' s--;'), (4706, ' *str = s;'), (4707, ' assert(s <= end);'), (4708, " assert(s == end || *s == '{' || *s == '}');"), (4710, ' if (literal_start != s) {'), (4713, ' s - literal_start,'), (4717, ' s - literal_start);'), (5129, ' int fmode;'), (5148, ' state->fmode = 0;'), (5222, ' state->fmode = 1;'), (5244, ' /* Note that the literal can be zero length, if the'), (5245, ' input string is "\\\\\\n" or "\\\\\\r", among others. */'), (5314, ' if (!state->fmode) {'), (5315, ' assert(!state->expr_list.size);')], 'deleted': [(14, '#if PY_MINOR_VERSION < 4'), (15, '#define PyErr_ProgramTextObject PyErr_ProgramText'), (17, '#define PyMem_RawMalloc PyMem_Malloc'), (18, '#define PyMem_RawRealloc PyMem_Realloc'), (19, '#define PyMem_RawFree PyMem_Free'), (115, ' assert(0);'), (116, ' return "(unknown)";'), (604, ' PyObject *c_normalize_args; /* Normalization argument tuple. */'), (611, 'static asdl_seq *ast_for_suite(struct compiling *, const node *);'), (617, 'static stmt_ty ast_for_with_stmt(struct compiling *, const node *, int);'), (618, 'static stmt_ty ast_for_for_stmt(struct compiling *, const node *, int);'), (621, 'static expr_ty ast_for_call(struct compiling *, const node *, expr_ty);'), (640, ' c->c_normalize_args = Py_BuildValue("(sN)", "NFKC", Py_None);'), (641, ' if (!c->c_normalize_args) {'), (642, ' Py_CLEAR(c->c_normalize);'), (643, ' return 0;'), (644, ' }'), (645, ' PyTuple_SET_ITEM(c->c_normalize_args, 1, NULL);'), (665, ' PyTuple_SET_ITEM(c->c_normalize_args, 1, id);'), (666, ' id2 = PyObject_Call(c->c_normalize, c->c_normalize_args, NULL);'), (778, ' assert(0);'), (779, ' return 0;'), (787, ' PyObject *filename, int feature_version,'), (788, ' PyArena *arena)'), (804, ' c.c_normalize_args = NULL;'), (851, ' goto out;'), (852, ' asdl_seq_SET(type_ignores, i, ti);'), (948, ' PyTuple_SET_ITEM(c.c_normalize_args, 1, NULL);'), (949, ' Py_DECREF(c.c_normalize_args);'), (956, ' int feature_version, PyArena *arena)'), (1022, ' if (PyUnicode_CompareWithASCIIString(name, "__debug__") == 0) {'), (1029, ' if (PyUnicode_CompareWithASCIIString(name, *p) == 0) {'), (1385, ' if (i < NCH(n) && TYPE(CHILD(n, i)) == COMMA)'), (1489, ' if (nposargs + nkwonlyargs > 255) {'), (1490, ' ast_error(c, n, "more than 255 arguments");'), (1491, ' return NULL;'), (1492, ' }'), (1493, ''), (1527, ' if (i < NCH(n) && TYPE(CHILD(n, i)) == COMMA)'), (1543, ' if (i < NCH(n) && TYPE(CHILD(n, i)) == TYPE_COMMENT) {'), (1559, ' i += 2; /* the star and the name */'), (1560, ' if (i < NCH(n) && TYPE(CHILD(n, i)) == COMMA)'), (1561, ' i += 1; /* the comma, if present */'), (1563, ' if (i < NCH(n) && TYPE(CHILD(n, i)) == TYPE_COMMENT) {'), (1585, ' if (i < NCH(n) && TYPE(CHILD(n, i)) == COMMA)'), (1667, ' d = ast_for_call(c, CHILD(n, 3), name_expr);'), (1698, 'ast_for_funcdef_impl(struct compiling *c, const node *n,'), (1699, ' asdl_seq *decorator_seq, int is_async)'), (1712, ' "Async functions are only supported in Python 3.5 and greater");'), (1751, ' type_comment, LINENO(n),'), (1752, ' n->n_col_offset, c->c_arena);'), (1755, ' type_comment, LINENO(n),'), (1756, ' n->n_col_offset, c->c_arena);'), (1762, ' /* async_funcdef: ASYNC funcdef */'), (1764, ' REQ(CHILD(n, 0), ASYNC);'), (1767, ' return ast_for_funcdef_impl(c, CHILD(n, 1), decorator_seq,'), (1768, ' 1 /* is_async */);'), (1776, ' 0 /* is_async */);'), (1783, ' /* async_stmt: ASYNC (funcdef | with_stmt | for_stmt) */'), (1785, ' REQ(CHILD(n, 0), ASYNC);'), (1789, ' return ast_for_funcdef_impl(c, CHILD(n, 1), NULL,'), (1790, ' 1 /* is_async */);'), (1792, ' return ast_for_with_stmt(c, CHILD(n, 1),'), (1793, ' 1 /* is_async */);'), (1796, ' return ast_for_for_stmt(c, CHILD(n, 1),'), (1797, ' 1 /* is_async */);'), (1898, ' int is_async;'), (1901, ' is_async = 0;'), (1904, ' if (TYPE(CHILD(n, 0)) == ASYNC) {'), (1905, ' is_async = 1;'), (1907, ' if (NCH(n) == (5 + is_async)) {'), (1908, ' n = CHILD(n, 4 + is_async);'), (1979, ' if (TYPE(CHILD(n, 0)) == ASYNC) {'), (1990, ' for_ch = CHILD(n, 1 + is_async);'), (1994, ' expression = ast_for_expr(c, CHILD(n, 3 + is_async));'), (2011, ' if (NCH(n) == (5 + is_async)) {'), (2015, ' n = CHILD(n, 4 + is_async);'), (2286, ' pynum = parsenumber(c, s);'), (2509, ' return ast_for_call(c, CHILD(n, 1), left_expr);'), (2638, ' /* there was an AWAIT */'), (2703, ' atom_expr: [AWAIT] atom trailer*'), (2851, 'ast_for_call(struct compiling *c, const node *n, expr_ty func)'), (2858, ' int i, nargs, nkeywords, ngens;'), (2867, ' ngens = 0;'), (2873, ' else if (TYPE(CHILD(ch, 1)) == comp_for)'), (2874, ' ngens++;'), (2882, ' if (ngens > 1 || (ngens && (nargs || nkeywords))) {'), (2883, ' ast_error(c, n, "Generator expression must be parenthesized "'), (2884, ' "if not sole argument");'), (2885, ' return NULL;'), (2886, ' }'), (2888, ' if (nargs + nkeywords + ngens > 255) {'), (2889, ' ast_error(c, n, "more than 255 arguments");'), (2890, ' return NULL;'), (2891, ' }'), (2892, ''), (2893, ' args = _Ta3_asdl_seq_new(nargs + ngens, c->c_arena);'), (3858, 'ast_for_for_stmt(struct compiling *c, const node *n, int is_async)'), (3909, ' return AsyncFor(target, expression, suite_seq, seq,'), (3910, ' type_comment, LINENO(n), n->n_col_offset,'), (3913, ' return For(target, expression, suite_seq, seq,'), (3914, ' type_comment, LINENO(n), n->n_col_offset,'), (4062, 'ast_for_with_stmt(struct compiling *c, const node *n, int is_async)'), (4100, ' return AsyncWith(items, body, type_comment, LINENO(n), n->n_col_offset, c->c_arena);'), (4124, ' return ClassDef(classname, NULL, NULL, s, decorator_seq, LINENO(n),'), (4125, ' n->n_col_offset, c->c_arena);'), (4129, ' s = ast_for_suite(c, CHILD(n,5));'), (4137, ' return ClassDef(classname, NULL, NULL, s, decorator_seq, LINENO(n),'), (4138, ' n->n_col_offset, c->c_arena);'), (4150, ' call = ast_for_call(c, CHILD(n, 3), dummy);'), (4324, ' PyObject *u;'), (4342, ' if (*s & 0x80) {'), (4366, ' assert(p - buf <= Py_SIZE(u));'), (4375, ' return PyUnicode_DecodeUnicodeEscape(s, len, NULL);'), (4382, ' return PyBytes_DecodeEscape(s, len, NULL, 0, NULL);'), (4392, ' int all_whitespace = 1;'), (4393, ' int kind;'), (4394, ' void *data;'), (4398, ' PyObject *o, *fstring_name;'), (4400, ' Py_ssize_t i;'), (4406, ' /* We know there are no escapes here, because backslashes are not allowed,'), (4407, " and we know it's utf-8 encoded (per PEP 263). But, in order to check"), (4408, ' that each char is not whitespace, we need to decode it to unicode.'), (4409, ' Which is unfortunate, but such is life. */'), (4410, ''), (4411, " /* If the substring is all whitespace, it's an error. We need to catch"), (4412, ' this here, and not when we call PyParser_ASTFromString, because turning'), (4413, " the expression '' in to '()' would go from being invalid to valid. */"), (4414, " /* Note that this code says an empty string is all whitespace. That's"), (4415, " important. There's a test for it: f'{}'. */"), (4416, ' o = PyUnicode_DecodeUTF8(expr_start, expr_end-expr_start, NULL);'), (4417, ' if (o == NULL)'), (4418, ' return NULL;'), (4419, ' len = PyUnicode_GET_LENGTH(o);'), (4420, ' kind = PyUnicode_KIND(o);'), (4421, ' data = PyUnicode_DATA(o);'), (4422, ' for (i = 0; i < len; i++) {'), (4423, ' if (!Py_UNICODE_ISSPACE(PyUnicode_READ(kind, data, i))) {'), (4424, ' all_whitespace = 0;'), (4428, ' Py_DECREF(o);'), (4429, ' if (all_whitespace) {'), (4434, ' /* Reuse len to be the length of the utf-8 input string. */'), (4438, ' if (str == NULL)'), (4475, ' const char *literal_start = *str;'), (4476, ' const char *literal_end;'), (4477, ' int in_named_escape = 0;'), (4481, ' for (; *str < end; (*str)++) {'), (4482, ' char ch = **str;'), (4483, " if (!in_named_escape && ch == '{' && (*str)-literal_start >= 2 &&"), (4484, " *(*str-2) == '\\\\' && *(*str-1) == 'N') {"), (4485, ' in_named_escape = 1;'), (4486, " } else if (in_named_escape && ch == '}') {"), (4487, ' in_named_escape = 0;'), (4488, " } else if (ch == '{' || ch == '}') {"), (4493, ' if (*str+1 < end && *(*str+1) == ch) {'), (4497, ' literal_end = *str+1;'), (4498, ' *str += 2;'), (4516, ' literal_end = *str;'), (4517, ' assert(*str <= end);'), (4518, " assert(*str == end || **str == '{' || **str == '}');"), (4520, ' if (literal_start != literal_end) {'), (4523, ' literal_end-literal_start,'), (4527, ' literal_end-literal_start);'), (5060, ' assert(!state->last_str ||'), (5061, ' PyUnicode_GET_LENGTH(state->last_str) != 0);'), (5121, ' if(state->expr_list.size == 0) {'), (5145, " /* If there's only one expression, return it. Otherwise, we need"), (5146, ' to join them together. */'), (5147, ' if (seq->size == 1)'), (5148, ' return seq->elements[0];'), (5149, '')]}
360
171
4,475
28,946
5
36
1
https://github.com/python/typed_ast
CVE-2019-19274
CWE-125
3,185
readelf.c
C
doshn
/* * Copyright (c) Christos Zoulas 2003. * All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "file.h" #ifndef lint FILE_RCSID("@(#)$File: readelf.c,v 1.113 2014/12/11 14:10:53 christos Exp $") #endif #ifdef BUILTIN_ELF #include <string.h> #include <ctype.h> #include <stdlib.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include "readelf.h" #include "magic.h" #ifdef ELFCORE private int dophn_core(struct magic_set *, int, int, int, off_t, int, size_t, off_t, int *); #endif private int dophn_exec(struct magic_set *, int, int, int, off_t, int, size_t, off_t, int *, int); private int doshn(struct magic_set *, int, int, int, off_t, int, size_t, off_t, int *, int, int); private size_t donote(struct magic_set *, void *, size_t, size_t, int, int, size_t, int *); #define ELF_ALIGN(a) ((((a) + align - 1) / align) * align) #define isquote(c) (strchr("'\"`", (c)) != NULL) private uint16_t getu16(int, uint16_t); private uint32_t getu32(int, uint32_t); private uint64_t getu64(int, uint64_t); #define MAX_PHNUM 128 #define MAX_SHNUM 32768 #define SIZE_UNKNOWN ((off_t)-1) private int toomany(struct magic_set *ms, const char *name, uint16_t num) { if (file_printf(ms, ", too many %s header sections (%u)", name, num ) == -1) return -1; return 0; } private uint16_t getu16(int swap, uint16_t value) { union { uint16_t ui; char c[2]; } retval, tmpval; if (swap) { tmpval.ui = value; retval.c[0] = tmpval.c[1]; retval.c[1] = tmpval.c[0]; return retval.ui; } else return value; } private uint32_t getu32(int swap, uint32_t value) { union { uint32_t ui; char c[4]; } retval, tmpval; if (swap) { tmpval.ui = value; retval.c[0] = tmpval.c[3]; retval.c[1] = tmpval.c[2]; retval.c[2] = tmpval.c[1]; retval.c[3] = tmpval.c[0]; return retval.ui; } else return value; } private uint64_t getu64(int swap, uint64_t value) { union { uint64_t ui; char c[8]; } retval, tmpval; if (swap) { tmpval.ui = value; retval.c[0] = tmpval.c[7]; retval.c[1] = tmpval.c[6]; retval.c[2] = tmpval.c[5]; retval.c[3] = tmpval.c[4]; retval.c[4] = tmpval.c[3]; retval.c[5] = tmpval.c[2]; retval.c[6] = tmpval.c[1]; retval.c[7] = tmpval.c[0]; return retval.ui; } else return value; } #define elf_getu16(swap, value) getu16(swap, value) #define elf_getu32(swap, value) getu32(swap, value) #define elf_getu64(swap, value) getu64(swap, value) #define xsh_addr (clazz == ELFCLASS32 \ ? (void *)&sh32 \ : (void *)&sh64) #define xsh_sizeof (clazz == ELFCLASS32 \ ? sizeof(sh32) \ : sizeof(sh64)) #define xsh_size (size_t)(clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_size) \ : elf_getu64(swap, sh64.sh_size)) #define xsh_offset (off_t)(clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_offset) \ : elf_getu64(swap, sh64.sh_offset)) #define xsh_type (clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_type) \ : elf_getu32(swap, sh64.sh_type)) #define xsh_name (clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_name) \ : elf_getu32(swap, sh64.sh_name)) #define xph_addr (clazz == ELFCLASS32 \ ? (void *) &ph32 \ : (void *) &ph64) #define xph_sizeof (clazz == ELFCLASS32 \ ? sizeof(ph32) \ : sizeof(ph64)) #define xph_type (clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_type) \ : elf_getu32(swap, ph64.p_type)) #define xph_offset (off_t)(clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_offset) \ : elf_getu64(swap, ph64.p_offset)) #define xph_align (size_t)((clazz == ELFCLASS32 \ ? (off_t) (ph32.p_align ? \ elf_getu32(swap, ph32.p_align) : 4) \ : (off_t) (ph64.p_align ? \ elf_getu64(swap, ph64.p_align) : 4))) #define xph_filesz (size_t)((clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_filesz) \ : elf_getu64(swap, ph64.p_filesz))) #define xnh_addr (clazz == ELFCLASS32 \ ? (void *)&nh32 \ : (void *)&nh64) #define xph_memsz (size_t)((clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_memsz) \ : elf_getu64(swap, ph64.p_memsz))) #define xnh_sizeof (clazz == ELFCLASS32 \ ? sizeof nh32 \ : sizeof nh64) #define xnh_type (clazz == ELFCLASS32 \ ? elf_getu32(swap, nh32.n_type) \ : elf_getu32(swap, nh64.n_type)) #define xnh_namesz (clazz == ELFCLASS32 \ ? elf_getu32(swap, nh32.n_namesz) \ : elf_getu32(swap, nh64.n_namesz)) #define xnh_descsz (clazz == ELFCLASS32 \ ? elf_getu32(swap, nh32.n_descsz) \ : elf_getu32(swap, nh64.n_descsz)) #define prpsoffsets(i) (clazz == ELFCLASS32 \ ? prpsoffsets32[i] \ : prpsoffsets64[i]) #define xcap_addr (clazz == ELFCLASS32 \ ? (void *)&cap32 \ : (void *)&cap64) #define xcap_sizeof (clazz == ELFCLASS32 \ ? sizeof cap32 \ : sizeof cap64) #define xcap_tag (clazz == ELFCLASS32 \ ? elf_getu32(swap, cap32.c_tag) \ : elf_getu64(swap, cap64.c_tag)) #define xcap_val (clazz == ELFCLASS32 \ ? elf_getu32(swap, cap32.c_un.c_val) \ : elf_getu64(swap, cap64.c_un.c_val)) #ifdef ELFCORE /* * Try larger offsets first to avoid false matches * from earlier data that happen to look like strings. */ static const size_t prpsoffsets32[] = { #ifdef USE_NT_PSINFO 104, /* SunOS 5.x (command line) */ 88, /* SunOS 5.x (short name) */ #endif /* USE_NT_PSINFO */ 100, /* SunOS 5.x (command line) */ 84, /* SunOS 5.x (short name) */ 44, /* Linux (command line) */ 28, /* Linux 2.0.36 (short name) */ 8, /* FreeBSD */ }; static const size_t prpsoffsets64[] = { #ifdef USE_NT_PSINFO 152, /* SunOS 5.x (command line) */ 136, /* SunOS 5.x (short name) */ #endif /* USE_NT_PSINFO */ 136, /* SunOS 5.x, 64-bit (command line) */ 120, /* SunOS 5.x, 64-bit (short name) */ 56, /* Linux (command line) */ 40, /* Linux (tested on core from 2.4.x, short name) */ 16, /* FreeBSD, 64-bit */ }; #define NOFFSETS32 (sizeof prpsoffsets32 / sizeof prpsoffsets32[0]) #define NOFFSETS64 (sizeof prpsoffsets64 / sizeof prpsoffsets64[0]) #define NOFFSETS (clazz == ELFCLASS32 ? NOFFSETS32 : NOFFSETS64) /* * Look through the program headers of an executable image, searching * for a PT_NOTE section of type NT_PRPSINFO, with a name "CORE" or * "FreeBSD"; if one is found, try looking in various places in its * contents for a 16-character string containing only printable * characters - if found, that string should be the name of the program * that dropped core. Note: right after that 16-character string is, * at least in SunOS 5.x (and possibly other SVR4-flavored systems) and * Linux, a longer string (80 characters, in 5.x, probably other * SVR4-flavored systems, and Linux) containing the start of the * command line for that program. * * SunOS 5.x core files contain two PT_NOTE sections, with the types * NT_PRPSINFO (old) and NT_PSINFO (new). These structs contain the * same info about the command name and command line, so it probably * isn't worthwhile to look for NT_PSINFO, but the offsets are provided * above (see USE_NT_PSINFO), in case we ever decide to do so. The * NT_PRPSINFO and NT_PSINFO sections are always in order and adjacent; * the SunOS 5.x file command relies on this (and prefers the latter). * * The signal number probably appears in a section of type NT_PRSTATUS, * but that's also rather OS-dependent, in ways that are harder to * dissect with heuristics, so I'm not bothering with the signal number. * (I suppose the signal number could be of interest in situations where * you don't have the binary of the program that dropped core; if you * *do* have that binary, the debugger will probably tell you what * signal it was.) */ #define OS_STYLE_SVR4 0 #define OS_STYLE_FREEBSD 1 #define OS_STYLE_NETBSD 2 private const char os_style_names[][8] = { "SVR4", "FreeBSD", "NetBSD", }; #define FLAGS_DID_CORE 0x01 #define FLAGS_DID_NOTE 0x02 #define FLAGS_DID_BUILD_ID 0x04 #define FLAGS_DID_CORE_STYLE 0x08 #define FLAGS_IS_CORE 0x10 private int dophn_core(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num, size_t size, off_t fsize, int *flags) { Elf32_Phdr ph32; Elf64_Phdr ph64; size_t offset, len; unsigned char nbuf[BUFSIZ]; ssize_t bufsize; if (size != xph_sizeof) { if (file_printf(ms, ", corrupted program header size") == -1) return -1; return 0; } /* * Loop through all the program headers. */ for ( ; num; num--) { if (pread(fd, xph_addr, xph_sizeof, off) == -1) { file_badread(ms); return -1; } off += size; if (fsize != SIZE_UNKNOWN && xph_offset > fsize) { /* Perhaps warn here */ continue; } if (xph_type != PT_NOTE) continue; /* * This is a PT_NOTE section; loop through all the notes * in the section. */ len = xph_filesz < sizeof(nbuf) ? xph_filesz : sizeof(nbuf); if ((bufsize = pread(fd, nbuf, len, xph_offset)) == -1) { file_badread(ms); return -1; } offset = 0; for (;;) { if (offset >= (size_t)bufsize) break; offset = donote(ms, nbuf, offset, (size_t)bufsize, clazz, swap, 4, flags); if (offset == 0) break; } } return 0; } #endif static void do_note_netbsd_version(struct magic_set *ms, int swap, void *v) { uint32_t desc; (void)memcpy(&desc, v, sizeof(desc)); desc = elf_getu32(swap, desc); if (file_printf(ms, ", for NetBSD") == -1) return; /* * The version number used to be stuck as 199905, and was thus * basically content-free. Newer versions of NetBSD have fixed * this and now use the encoding of __NetBSD_Version__: * * MMmmrrpp00 * * M = major version * m = minor version * r = release ["",A-Z,Z[A-Z] but numeric] * p = patchlevel */ if (desc > 100000000U) { uint32_t ver_patch = (desc / 100) % 100; uint32_t ver_rel = (desc / 10000) % 100; uint32_t ver_min = (desc / 1000000) % 100; uint32_t ver_maj = desc / 100000000; if (file_printf(ms, " %u.%u", ver_maj, ver_min) == -1) return; if (ver_rel == 0 && ver_patch != 0) { if (file_printf(ms, ".%u", ver_patch) == -1) return; } else if (ver_rel != 0) { while (ver_rel > 26) { if (file_printf(ms, "Z") == -1) return; ver_rel -= 26; } if (file_printf(ms, "%c", 'A' + ver_rel - 1) == -1) return; } } } static void do_note_freebsd_version(struct magic_set *ms, int swap, void *v) { uint32_t desc; (void)memcpy(&desc, v, sizeof(desc)); desc = elf_getu32(swap, desc); if (file_printf(ms, ", for FreeBSD") == -1) return; /* * Contents is __FreeBSD_version, whose relation to OS * versions is defined by a huge table in the Porter's * Handbook. This is the general scheme: * * Releases: * Mmp000 (before 4.10) * Mmi0p0 (before 5.0) * Mmm0p0 * * Development branches: * Mmpxxx (before 4.6) * Mmp1xx (before 4.10) * Mmi1xx (before 5.0) * M000xx (pre-M.0) * Mmm1xx * * M = major version * m = minor version * i = minor version increment (491000 -> 4.10) * p = patchlevel * x = revision * * The first release of FreeBSD to use ELF by default * was version 3.0. */ if (desc == 460002) { if (file_printf(ms, " 4.6.2") == -1) return; } else if (desc < 460100) { if (file_printf(ms, " %d.%d", desc / 100000, desc / 10000 % 10) == -1) return; if (desc / 1000 % 10 > 0) if (file_printf(ms, ".%d", desc / 1000 % 10) == -1) return; if ((desc % 1000 > 0) || (desc % 100000 == 0)) if (file_printf(ms, " (%d)", desc) == -1) return; } else if (desc < 500000) { if (file_printf(ms, " %d.%d", desc / 100000, desc / 10000 % 10 + desc / 1000 % 10) == -1) return; if (desc / 100 % 10 > 0) { if (file_printf(ms, " (%d)", desc) == -1) return; } else if (desc / 10 % 10 > 0) { if (file_printf(ms, ".%d", desc / 10 % 10) == -1) return; } } else { if (file_printf(ms, " %d.%d", desc / 100000, desc / 1000 % 100) == -1) return; if ((desc / 100 % 10 > 0) || (desc % 100000 / 100 == 0)) { if (file_printf(ms, " (%d)", desc) == -1) return; } else if (desc / 10 % 10 > 0) { if (file_printf(ms, ".%d", desc / 10 % 10) == -1) return; } } } private size_t donote(struct magic_set *ms, void *vbuf, size_t offset, size_t size, int clazz, int swap, size_t align, int *flags) { Elf32_Nhdr nh32; Elf64_Nhdr nh64; size_t noff, doff; #ifdef ELFCORE int os_style = -1; #endif uint32_t namesz, descsz; unsigned char *nbuf = CAST(unsigned char *, vbuf); char sbuf[512]; if (xnh_sizeof + offset > size) { /* * We're out of note headers. */ return xnh_sizeof + offset; } (void)memcpy(xnh_addr, &nbuf[offset], xnh_sizeof); offset += xnh_sizeof; namesz = xnh_namesz; descsz = xnh_descsz; if ((namesz == 0) && (descsz == 0)) { /* * We're out of note headers. */ return (offset >= size) ? offset : size; } if (namesz & 0x80000000) { (void)file_printf(ms, ", bad note name size 0x%lx", (unsigned long)namesz); return 0; } if (descsz & 0x80000000) { (void)file_printf(ms, ", bad note description size 0x%lx", (unsigned long)descsz); return 0; } noff = offset; doff = ELF_ALIGN(offset + namesz); if (offset + namesz > size) { /* * We're past the end of the buffer. */ return doff; } offset = ELF_ALIGN(doff + descsz); if (doff + descsz > size) { /* * We're past the end of the buffer. */ return (offset >= size) ? offset : size; } if ((*flags & (FLAGS_DID_NOTE|FLAGS_DID_BUILD_ID)) == (FLAGS_DID_NOTE|FLAGS_DID_BUILD_ID)) goto core; if (namesz == 5 && strcmp((char *)&nbuf[noff], "SuSE") == 0 && xnh_type == NT_GNU_VERSION && descsz == 2) { file_printf(ms, ", for SuSE %d.%d", nbuf[doff], nbuf[doff + 1]); } if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 && xnh_type == NT_GNU_VERSION && descsz == 16) { uint32_t desc[4]; (void)memcpy(desc, &nbuf[doff], sizeof(desc)); if (file_printf(ms, ", for GNU/") == -1) return size; switch (elf_getu32(swap, desc[0])) { case GNU_OS_LINUX: if (file_printf(ms, "Linux") == -1) return size; break; case GNU_OS_HURD: if (file_printf(ms, "Hurd") == -1) return size; break; case GNU_OS_SOLARIS: if (file_printf(ms, "Solaris") == -1) return size; break; case GNU_OS_KFREEBSD: if (file_printf(ms, "kFreeBSD") == -1) return size; break; case GNU_OS_KNETBSD: if (file_printf(ms, "kNetBSD") == -1) return size; break; default: if (file_printf(ms, "<unknown>") == -1) return size; } if (file_printf(ms, " %d.%d.%d", elf_getu32(swap, desc[1]), elf_getu32(swap, desc[2]), elf_getu32(swap, desc[3])) == -1) return size; *flags |= FLAGS_DID_NOTE; return size; } if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 && xnh_type == NT_GNU_BUILD_ID && (descsz == 16 || descsz == 20)) { uint8_t desc[20]; uint32_t i; if (file_printf(ms, ", BuildID[%s]=", descsz == 16 ? "md5/uuid" : "sha1") == -1) return size; (void)memcpy(desc, &nbuf[doff], descsz); for (i = 0; i < descsz; i++) if (file_printf(ms, "%02x", desc[i]) == -1) return size; *flags |= FLAGS_DID_BUILD_ID; } if (namesz == 4 && strcmp((char *)&nbuf[noff], "PaX") == 0 && xnh_type == NT_NETBSD_PAX && descsz == 4) { static const char *pax[] = { "+mprotect", "-mprotect", "+segvguard", "-segvguard", "+ASLR", "-ASLR", }; uint32_t desc; size_t i; int did = 0; (void)memcpy(&desc, &nbuf[doff], sizeof(desc)); desc = elf_getu32(swap, desc); if (desc && file_printf(ms, ", PaX: ") == -1) return size; for (i = 0; i < __arraycount(pax); i++) { if (((1 << i) & desc) == 0) continue; if (file_printf(ms, "%s%s", did++ ? "," : "", pax[i]) == -1) return size; } } if (namesz == 7 && strcmp((char *)&nbuf[noff], "NetBSD") == 0) { switch (xnh_type) { case NT_NETBSD_VERSION: if (descsz == 4) { do_note_netbsd_version(ms, swap, &nbuf[doff]); *flags |= FLAGS_DID_NOTE; return size; } break; case NT_NETBSD_MARCH: if (file_printf(ms, ", compiled for: %.*s", (int)descsz, (const char *)&nbuf[doff]) == -1) return size; break; case NT_NETBSD_CMODEL: if (file_printf(ms, ", compiler model: %.*s", (int)descsz, (const char *)&nbuf[doff]) == -1) return size; break; default: if (file_printf(ms, ", note=%u", xnh_type) == -1) return size; break; } return size; } if (namesz == 8 && strcmp((char *)&nbuf[noff], "FreeBSD") == 0) { if (xnh_type == NT_FREEBSD_VERSION && descsz == 4) { do_note_freebsd_version(ms, swap, &nbuf[doff]); *flags |= FLAGS_DID_NOTE; return size; } } if (namesz == 8 && strcmp((char *)&nbuf[noff], "OpenBSD") == 0 && xnh_type == NT_OPENBSD_VERSION && descsz == 4) { if (file_printf(ms, ", for OpenBSD") == -1) return size; /* Content of note is always 0 */ *flags |= FLAGS_DID_NOTE; return size; } if (namesz == 10 && strcmp((char *)&nbuf[noff], "DragonFly") == 0 && xnh_type == NT_DRAGONFLY_VERSION && descsz == 4) { uint32_t desc; if (file_printf(ms, ", for DragonFly") == -1) return size; (void)memcpy(&desc, &nbuf[doff], sizeof(desc)); desc = elf_getu32(swap, desc); if (file_printf(ms, " %d.%d.%d", desc / 100000, desc / 10000 % 10, desc % 10000) == -1) return size; *flags |= FLAGS_DID_NOTE; return size; } core: /* * Sigh. The 2.0.36 kernel in Debian 2.1, at * least, doesn't correctly implement name * sections, in core dumps, as specified by * the "Program Linking" section of "UNIX(R) System * V Release 4 Programmer's Guide: ANSI C and * Programming Support Tools", because my copy * clearly says "The first 'namesz' bytes in 'name' * contain a *null-terminated* [emphasis mine] * character representation of the entry's owner * or originator", but the 2.0.36 kernel code * doesn't include the terminating null in the * name.... */ if ((namesz == 4 && strncmp((char *)&nbuf[noff], "CORE", 4) == 0) || (namesz == 5 && strcmp((char *)&nbuf[noff], "CORE") == 0)) { os_style = OS_STYLE_SVR4; } if ((namesz == 8 && strcmp((char *)&nbuf[noff], "FreeBSD") == 0)) { os_style = OS_STYLE_FREEBSD; } if ((namesz >= 11 && strncmp((char *)&nbuf[noff], "NetBSD-CORE", 11) == 0)) { os_style = OS_STYLE_NETBSD; } #ifdef ELFCORE if ((*flags & FLAGS_DID_CORE) != 0) return size; if (os_style != -1 && (*flags & FLAGS_DID_CORE_STYLE) == 0) { if (file_printf(ms, ", %s-style", os_style_names[os_style]) == -1) return size; *flags |= FLAGS_DID_CORE_STYLE; } switch (os_style) { case OS_STYLE_NETBSD: if (xnh_type == NT_NETBSD_CORE_PROCINFO) { uint32_t signo; /* * Extract the program name. It is at * offset 0x7c, and is up to 32-bytes, * including the terminating NUL. */ if (file_printf(ms, ", from '%.31s'", file_printable(sbuf, sizeof(sbuf), (const char *)&nbuf[doff + 0x7c])) == -1) return size; /* * Extract the signal number. It is at * offset 0x08. */ (void)memcpy(&signo, &nbuf[doff + 0x08], sizeof(signo)); if (file_printf(ms, " (signal %u)", elf_getu32(swap, signo)) == -1) return size; *flags |= FLAGS_DID_CORE; return size; } break; default: if (xnh_type == NT_PRPSINFO && *flags & FLAGS_IS_CORE) { size_t i, j; unsigned char c; /* * Extract the program name. We assume * it to be 16 characters (that's what it * is in SunOS 5.x and Linux). * * Unfortunately, it's at a different offset * in various OSes, so try multiple offsets. * If the characters aren't all printable, * reject it. */ for (i = 0; i < NOFFSETS; i++) { unsigned char *cname, *cp; size_t reloffset = prpsoffsets(i); size_t noffset = doff + reloffset; size_t k; for (j = 0; j < 16; j++, noffset++, reloffset++) { /* * Make sure we're not past * the end of the buffer; if * we are, just give up. */ if (noffset >= size) goto tryanother; /* * Make sure we're not past * the end of the contents; * if we are, this obviously * isn't the right offset. */ if (reloffset >= descsz) goto tryanother; c = nbuf[noffset]; if (c == '\0') { /* * A '\0' at the * beginning is * obviously wrong. * Any other '\0' * means we're done. */ if (j == 0) goto tryanother; else break; } else { /* * A nonprintable * character is also * wrong. */ if (!isprint(c) || isquote(c)) goto tryanother; } } /* * Well, that worked. */ /* * Try next offsets, in case this match is * in the middle of a string. */ for (k = i + 1 ; k < NOFFSETS ; k++) { size_t no; int adjust = 1; if (prpsoffsets(k) >= prpsoffsets(i)) continue; for (no = doff + prpsoffsets(k); no < doff + prpsoffsets(i); no++) adjust = adjust && isprint(nbuf[no]); if (adjust) i = k; } cname = (unsigned char *) &nbuf[doff + prpsoffsets(i)]; for (cp = cname; *cp && isprint(*cp); cp++) continue; /* * Linux apparently appends a space at the end * of the command line: remove it. */ while (cp > cname && isspace(cp[-1])) cp--; if (file_printf(ms, ", from '%.*s'", (int)(cp - cname), cname) == -1) return size; *flags |= FLAGS_DID_CORE; return size; tryanother: ; } } break; } #endif return offset; } /* SunOS 5.x hardware capability descriptions */ typedef struct cap_desc { uint64_t cd_mask; const char *cd_name; } cap_desc_t; static const cap_desc_t cap_desc_sparc[] = { { AV_SPARC_MUL32, "MUL32" }, { AV_SPARC_DIV32, "DIV32" }, { AV_SPARC_FSMULD, "FSMULD" }, { AV_SPARC_V8PLUS, "V8PLUS" }, { AV_SPARC_POPC, "POPC" }, { AV_SPARC_VIS, "VIS" }, { AV_SPARC_VIS2, "VIS2" }, { AV_SPARC_ASI_BLK_INIT, "ASI_BLK_INIT" }, { AV_SPARC_FMAF, "FMAF" }, { AV_SPARC_FJFMAU, "FJFMAU" }, { AV_SPARC_IMA, "IMA" }, { 0, NULL } }; static const cap_desc_t cap_desc_386[] = { { AV_386_FPU, "FPU" }, { AV_386_TSC, "TSC" }, { AV_386_CX8, "CX8" }, { AV_386_SEP, "SEP" }, { AV_386_AMD_SYSC, "AMD_SYSC" }, { AV_386_CMOV, "CMOV" }, { AV_386_MMX, "MMX" }, { AV_386_AMD_MMX, "AMD_MMX" }, { AV_386_AMD_3DNow, "AMD_3DNow" }, { AV_386_AMD_3DNowx, "AMD_3DNowx" }, { AV_386_FXSR, "FXSR" }, { AV_386_SSE, "SSE" }, { AV_386_SSE2, "SSE2" }, { AV_386_PAUSE, "PAUSE" }, { AV_386_SSE3, "SSE3" }, { AV_386_MON, "MON" }, { AV_386_CX16, "CX16" }, { AV_386_AHF, "AHF" }, { AV_386_TSCP, "TSCP" }, { AV_386_AMD_SSE4A, "AMD_SSE4A" }, { AV_386_POPCNT, "POPCNT" }, { AV_386_AMD_LZCNT, "AMD_LZCNT" }, { AV_386_SSSE3, "SSSE3" }, { AV_386_SSE4_1, "SSE4.1" }, { AV_386_SSE4_2, "SSE4.2" }, { 0, NULL } }; private int doshn(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num, size_t size, off_t fsize, int *flags, int mach, int strtab) { Elf32_Shdr sh32; Elf64_Shdr sh64; int stripped = 1; size_t nbadcap = 0; void *nbuf; off_t noff, coff, name_off; uint64_t cap_hw1 = 0; /* SunOS 5.x hardware capabilites */ uint64_t cap_sf1 = 0; /* SunOS 5.x software capabilites */ char name[50]; if (size != xsh_sizeof) { if (file_printf(ms, ", corrupted section header size") == -1) return -1; return 0; } /* Read offset of name section to be able to read section names later */ if (pread(fd, xsh_addr, xsh_sizeof, off + size * strtab) == -1) { file_badread(ms); return -1; } name_off = xsh_offset; for ( ; num; num--) { /* Read the name of this section. */ if (pread(fd, name, sizeof(name), name_off + xsh_name) == -1) { file_badread(ms); return -1; } name[sizeof(name) - 1] = '\0'; if (strcmp(name, ".debug_info") == 0) stripped = 0; if (pread(fd, xsh_addr, xsh_sizeof, off) == -1) { file_badread(ms); return -1; } off += size; /* Things we can determine before we seek */ switch (xsh_type) { case SHT_SYMTAB: #if 0 case SHT_DYNSYM: #endif stripped = 0; break; default: if (fsize != SIZE_UNKNOWN && xsh_offset > fsize) { /* Perhaps warn here */ continue; } break; } /* Things we can determine when we seek */ switch (xsh_type) { case SHT_NOTE: if ((nbuf = malloc(xsh_size)) == NULL) { file_error(ms, errno, "Cannot allocate memory" " for note"); return -1; } if (pread(fd, nbuf, xsh_size, xsh_offset) == -1) { file_badread(ms); free(nbuf); return -1; } noff = 0; for (;;) { if (noff >= (off_t)xsh_size) break; noff = donote(ms, nbuf, (size_t)noff, xsh_size, clazz, swap, 4, flags); if (noff == 0) break; } free(nbuf); break; case SHT_SUNW_cap: switch (mach) { case EM_SPARC: case EM_SPARCV9: case EM_IA_64: case EM_386: case EM_AMD64: break; default: goto skip; } if (nbadcap > 5) break; if (lseek(fd, xsh_offset, SEEK_SET) == (off_t)-1) { file_badseek(ms); return -1; } coff = 0; for (;;) { Elf32_Cap cap32; Elf64_Cap cap64; char cbuf[/*CONSTCOND*/ MAX(sizeof cap32, sizeof cap64)]; if ((coff += xcap_sizeof) > (off_t)xsh_size) break; if (read(fd, cbuf, (size_t)xcap_sizeof) != (ssize_t)xcap_sizeof) { file_badread(ms); return -1; } if (cbuf[0] == 'A') { #ifdef notyet char *p = cbuf + 1; uint32_t len, tag; memcpy(&len, p, sizeof(len)); p += 4; len = getu32(swap, len); if (memcmp("gnu", p, 3) != 0) { if (file_printf(ms, ", unknown capability %.3s", p) == -1) return -1; break; } p += strlen(p) + 1; tag = *p++; memcpy(&len, p, sizeof(len)); p += 4; len = getu32(swap, len); if (tag != 1) { if (file_printf(ms, ", unknown gnu" " capability tag %d", tag) == -1) return -1; break; } // gnu attributes #endif break; } (void)memcpy(xcap_addr, cbuf, xcap_sizeof); switch (xcap_tag) { case CA_SUNW_NULL: break; case CA_SUNW_HW_1: cap_hw1 |= xcap_val; break; case CA_SUNW_SF_1: cap_sf1 |= xcap_val; break; default: if (file_printf(ms, ", with unknown capability " "0x%" INT64_T_FORMAT "x = 0x%" INT64_T_FORMAT "x", (unsigned long long)xcap_tag, (unsigned long long)xcap_val) == -1) return -1; if (nbadcap++ > 2) coff = xsh_size; break; } } /*FALLTHROUGH*/ skip: default: break; } } if (file_printf(ms, ", %sstripped", stripped ? "" : "not ") == -1) return -1; if (cap_hw1) { const cap_desc_t *cdp; switch (mach) { case EM_SPARC: case EM_SPARC32PLUS: case EM_SPARCV9: cdp = cap_desc_sparc; break; case EM_386: case EM_IA_64: case EM_AMD64: cdp = cap_desc_386; break; default: cdp = NULL; break; } if (file_printf(ms, ", uses") == -1) return -1; if (cdp) { while (cdp->cd_name) { if (cap_hw1 & cdp->cd_mask) { if (file_printf(ms, " %s", cdp->cd_name) == -1) return -1; cap_hw1 &= ~cdp->cd_mask; } ++cdp; } if (cap_hw1) if (file_printf(ms, " unknown hardware capability 0x%" INT64_T_FORMAT "x", (unsigned long long)cap_hw1) == -1) return -1; } else { if (file_printf(ms, " hardware capability 0x%" INT64_T_FORMAT "x", (unsigned long long)cap_hw1) == -1) return -1; } } if (cap_sf1) { if (cap_sf1 & SF1_SUNW_FPUSED) { if (file_printf(ms, (cap_sf1 & SF1_SUNW_FPKNWN) ? ", uses frame pointer" : ", not known to use frame pointer") == -1) return -1; } cap_sf1 &= ~SF1_SUNW_MASK; if (cap_sf1) if (file_printf(ms, ", with unknown software capability 0x%" INT64_T_FORMAT "x", (unsigned long long)cap_sf1) == -1) return -1; } return 0; } /* * Look through the program headers of an executable image, searching * for a PT_INTERP section; if one is found, it's dynamically linked, * otherwise it's statically linked. */ private int dophn_exec(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num, size_t size, off_t fsize, int *flags, int sh_num) { Elf32_Phdr ph32; Elf64_Phdr ph64; const char *linking_style = "statically"; const char *interp = ""; unsigned char nbuf[BUFSIZ]; char ibuf[BUFSIZ]; ssize_t bufsize; size_t offset, align, len; if (size != xph_sizeof) { if (file_printf(ms, ", corrupted program header size") == -1) return -1; return 0; } for ( ; num; num--) { if (pread(fd, xph_addr, xph_sizeof, off) == -1) { file_badread(ms); return -1; } off += size; bufsize = 0; align = 4; /* Things we can determine before we seek */ switch (xph_type) { case PT_DYNAMIC: linking_style = "dynamically"; break; case PT_NOTE: if (sh_num) /* Did this through section headers */ continue; if (((align = xph_align) & 0x80000000UL) != 0 || align < 4) { if (file_printf(ms, ", invalid note alignment 0x%lx", (unsigned long)align) == -1) return -1; align = 4; } /*FALLTHROUGH*/ case PT_INTERP: len = xph_filesz < sizeof(nbuf) ? xph_filesz : sizeof(nbuf); bufsize = pread(fd, nbuf, len, xph_offset); if (bufsize == -1) { file_badread(ms); return -1; } break; default: if (fsize != SIZE_UNKNOWN && xph_offset > fsize) { /* Maybe warn here? */ continue; } break; } /* Things we can determine when we seek */ switch (xph_type) { case PT_INTERP: if (bufsize && nbuf[0]) { nbuf[bufsize - 1] = '\0'; interp = (const char *)nbuf; } else interp = "*empty*"; break; case PT_NOTE: /* * This is a PT_NOTE section; loop through all the notes * in the section. */ offset = 0; for (;;) { if (offset >= (size_t)bufsize) break; offset = donote(ms, nbuf, offset, (size_t)bufsize, clazz, swap, align, flags); if (offset == 0) break; } break; default: break; } } if (file_printf(ms, ", %s linked", linking_style) == -1) return -1; if (interp[0]) if (file_printf(ms, ", interpreter %s", file_printable(ibuf, sizeof(ibuf), interp)) == -1) return -1; return 0; } protected int file_tryelf(struct magic_set *ms, int fd, const unsigned char *buf, size_t nbytes) { union { int32_t l; char c[sizeof (int32_t)]; } u; int clazz; int swap; struct stat st; off_t fsize; int flags = 0; Elf32_Ehdr elf32hdr; Elf64_Ehdr elf64hdr; uint16_t type, phnum, shnum; if (ms->flags & (MAGIC_MIME|MAGIC_APPLE)) return 0; /* * ELF executables have multiple section headers in arbitrary * file locations and thus file(1) cannot determine it from easily. * Instead we traverse thru all section headers until a symbol table * one is found or else the binary is stripped. * Return immediately if it's not ELF (so we avoid pipe2file unless needed). */ if (buf[EI_MAG0] != ELFMAG0 || (buf[EI_MAG1] != ELFMAG1 && buf[EI_MAG1] != OLFMAG1) || buf[EI_MAG2] != ELFMAG2 || buf[EI_MAG3] != ELFMAG3) return 0; /* * If we cannot seek, it must be a pipe, socket or fifo. */ if((lseek(fd, (off_t)0, SEEK_SET) == (off_t)-1) && (errno == ESPIPE)) fd = file_pipe2file(ms, fd, buf, nbytes); if (fstat(fd, &st) == -1) { file_badread(ms); return -1; } if (S_ISREG(st.st_mode) || st.st_size != 0) fsize = st.st_size; else fsize = SIZE_UNKNOWN; clazz = buf[EI_CLASS]; switch (clazz) { case ELFCLASS32: #undef elf_getu #define elf_getu(a, b) elf_getu32(a, b) #undef elfhdr #define elfhdr elf32hdr #include "elfclass.h" case ELFCLASS64: #undef elf_getu #define elf_getu(a, b) elf_getu64(a, b) #undef elfhdr #define elfhdr elf64hdr #include "elfclass.h" default: if (file_printf(ms, ", unknown class %d", clazz) == -1) return -1; break; } return 0; } #endif
/* * Copyright (c) Christos Zoulas 2003. * All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "file.h" #ifndef lint FILE_RCSID("@(#)$File: readelf.c,v 1.114 2014/12/11 14:19:36 christos Exp $") #endif #ifdef BUILTIN_ELF #include <string.h> #include <ctype.h> #include <stdlib.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include "readelf.h" #include "magic.h" #ifdef ELFCORE private int dophn_core(struct magic_set *, int, int, int, off_t, int, size_t, off_t, int *); #endif private int dophn_exec(struct magic_set *, int, int, int, off_t, int, size_t, off_t, int *, int); private int doshn(struct magic_set *, int, int, int, off_t, int, size_t, off_t, int *, int, int); private size_t donote(struct magic_set *, void *, size_t, size_t, int, int, size_t, int *); #define ELF_ALIGN(a) ((((a) + align - 1) / align) * align) #define isquote(c) (strchr("'\"`", (c)) != NULL) private uint16_t getu16(int, uint16_t); private uint32_t getu32(int, uint32_t); private uint64_t getu64(int, uint64_t); #define MAX_PHNUM 128 #define MAX_SHNUM 32768 #define SIZE_UNKNOWN ((off_t)-1) private int toomany(struct magic_set *ms, const char *name, uint16_t num) { if (file_printf(ms, ", too many %s header sections (%u)", name, num ) == -1) return -1; return 0; } private uint16_t getu16(int swap, uint16_t value) { union { uint16_t ui; char c[2]; } retval, tmpval; if (swap) { tmpval.ui = value; retval.c[0] = tmpval.c[1]; retval.c[1] = tmpval.c[0]; return retval.ui; } else return value; } private uint32_t getu32(int swap, uint32_t value) { union { uint32_t ui; char c[4]; } retval, tmpval; if (swap) { tmpval.ui = value; retval.c[0] = tmpval.c[3]; retval.c[1] = tmpval.c[2]; retval.c[2] = tmpval.c[1]; retval.c[3] = tmpval.c[0]; return retval.ui; } else return value; } private uint64_t getu64(int swap, uint64_t value) { union { uint64_t ui; char c[8]; } retval, tmpval; if (swap) { tmpval.ui = value; retval.c[0] = tmpval.c[7]; retval.c[1] = tmpval.c[6]; retval.c[2] = tmpval.c[5]; retval.c[3] = tmpval.c[4]; retval.c[4] = tmpval.c[3]; retval.c[5] = tmpval.c[2]; retval.c[6] = tmpval.c[1]; retval.c[7] = tmpval.c[0]; return retval.ui; } else return value; } #define elf_getu16(swap, value) getu16(swap, value) #define elf_getu32(swap, value) getu32(swap, value) #define elf_getu64(swap, value) getu64(swap, value) #define xsh_addr (clazz == ELFCLASS32 \ ? (void *)&sh32 \ : (void *)&sh64) #define xsh_sizeof (clazz == ELFCLASS32 \ ? sizeof(sh32) \ : sizeof(sh64)) #define xsh_size (size_t)(clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_size) \ : elf_getu64(swap, sh64.sh_size)) #define xsh_offset (off_t)(clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_offset) \ : elf_getu64(swap, sh64.sh_offset)) #define xsh_type (clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_type) \ : elf_getu32(swap, sh64.sh_type)) #define xsh_name (clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_name) \ : elf_getu32(swap, sh64.sh_name)) #define xph_addr (clazz == ELFCLASS32 \ ? (void *) &ph32 \ : (void *) &ph64) #define xph_sizeof (clazz == ELFCLASS32 \ ? sizeof(ph32) \ : sizeof(ph64)) #define xph_type (clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_type) \ : elf_getu32(swap, ph64.p_type)) #define xph_offset (off_t)(clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_offset) \ : elf_getu64(swap, ph64.p_offset)) #define xph_align (size_t)((clazz == ELFCLASS32 \ ? (off_t) (ph32.p_align ? \ elf_getu32(swap, ph32.p_align) : 4) \ : (off_t) (ph64.p_align ? \ elf_getu64(swap, ph64.p_align) : 4))) #define xph_filesz (size_t)((clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_filesz) \ : elf_getu64(swap, ph64.p_filesz))) #define xnh_addr (clazz == ELFCLASS32 \ ? (void *)&nh32 \ : (void *)&nh64) #define xph_memsz (size_t)((clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_memsz) \ : elf_getu64(swap, ph64.p_memsz))) #define xnh_sizeof (clazz == ELFCLASS32 \ ? sizeof nh32 \ : sizeof nh64) #define xnh_type (clazz == ELFCLASS32 \ ? elf_getu32(swap, nh32.n_type) \ : elf_getu32(swap, nh64.n_type)) #define xnh_namesz (clazz == ELFCLASS32 \ ? elf_getu32(swap, nh32.n_namesz) \ : elf_getu32(swap, nh64.n_namesz)) #define xnh_descsz (clazz == ELFCLASS32 \ ? elf_getu32(swap, nh32.n_descsz) \ : elf_getu32(swap, nh64.n_descsz)) #define prpsoffsets(i) (clazz == ELFCLASS32 \ ? prpsoffsets32[i] \ : prpsoffsets64[i]) #define xcap_addr (clazz == ELFCLASS32 \ ? (void *)&cap32 \ : (void *)&cap64) #define xcap_sizeof (clazz == ELFCLASS32 \ ? sizeof cap32 \ : sizeof cap64) #define xcap_tag (clazz == ELFCLASS32 \ ? elf_getu32(swap, cap32.c_tag) \ : elf_getu64(swap, cap64.c_tag)) #define xcap_val (clazz == ELFCLASS32 \ ? elf_getu32(swap, cap32.c_un.c_val) \ : elf_getu64(swap, cap64.c_un.c_val)) #ifdef ELFCORE /* * Try larger offsets first to avoid false matches * from earlier data that happen to look like strings. */ static const size_t prpsoffsets32[] = { #ifdef USE_NT_PSINFO 104, /* SunOS 5.x (command line) */ 88, /* SunOS 5.x (short name) */ #endif /* USE_NT_PSINFO */ 100, /* SunOS 5.x (command line) */ 84, /* SunOS 5.x (short name) */ 44, /* Linux (command line) */ 28, /* Linux 2.0.36 (short name) */ 8, /* FreeBSD */ }; static const size_t prpsoffsets64[] = { #ifdef USE_NT_PSINFO 152, /* SunOS 5.x (command line) */ 136, /* SunOS 5.x (short name) */ #endif /* USE_NT_PSINFO */ 136, /* SunOS 5.x, 64-bit (command line) */ 120, /* SunOS 5.x, 64-bit (short name) */ 56, /* Linux (command line) */ 40, /* Linux (tested on core from 2.4.x, short name) */ 16, /* FreeBSD, 64-bit */ }; #define NOFFSETS32 (sizeof prpsoffsets32 / sizeof prpsoffsets32[0]) #define NOFFSETS64 (sizeof prpsoffsets64 / sizeof prpsoffsets64[0]) #define NOFFSETS (clazz == ELFCLASS32 ? NOFFSETS32 : NOFFSETS64) /* * Look through the program headers of an executable image, searching * for a PT_NOTE section of type NT_PRPSINFO, with a name "CORE" or * "FreeBSD"; if one is found, try looking in various places in its * contents for a 16-character string containing only printable * characters - if found, that string should be the name of the program * that dropped core. Note: right after that 16-character string is, * at least in SunOS 5.x (and possibly other SVR4-flavored systems) and * Linux, a longer string (80 characters, in 5.x, probably other * SVR4-flavored systems, and Linux) containing the start of the * command line for that program. * * SunOS 5.x core files contain two PT_NOTE sections, with the types * NT_PRPSINFO (old) and NT_PSINFO (new). These structs contain the * same info about the command name and command line, so it probably * isn't worthwhile to look for NT_PSINFO, but the offsets are provided * above (see USE_NT_PSINFO), in case we ever decide to do so. The * NT_PRPSINFO and NT_PSINFO sections are always in order and adjacent; * the SunOS 5.x file command relies on this (and prefers the latter). * * The signal number probably appears in a section of type NT_PRSTATUS, * but that's also rather OS-dependent, in ways that are harder to * dissect with heuristics, so I'm not bothering with the signal number. * (I suppose the signal number could be of interest in situations where * you don't have the binary of the program that dropped core; if you * *do* have that binary, the debugger will probably tell you what * signal it was.) */ #define OS_STYLE_SVR4 0 #define OS_STYLE_FREEBSD 1 #define OS_STYLE_NETBSD 2 private const char os_style_names[][8] = { "SVR4", "FreeBSD", "NetBSD", }; #define FLAGS_DID_CORE 0x01 #define FLAGS_DID_NOTE 0x02 #define FLAGS_DID_BUILD_ID 0x04 #define FLAGS_DID_CORE_STYLE 0x08 #define FLAGS_IS_CORE 0x10 private int dophn_core(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num, size_t size, off_t fsize, int *flags) { Elf32_Phdr ph32; Elf64_Phdr ph64; size_t offset, len; unsigned char nbuf[BUFSIZ]; ssize_t bufsize; if (size != xph_sizeof) { if (file_printf(ms, ", corrupted program header size") == -1) return -1; return 0; } /* * Loop through all the program headers. */ for ( ; num; num--) { if (pread(fd, xph_addr, xph_sizeof, off) < (ssize_t)xph_sizeof) { file_badread(ms); return -1; } off += size; if (fsize != SIZE_UNKNOWN && xph_offset > fsize) { /* Perhaps warn here */ continue; } if (xph_type != PT_NOTE) continue; /* * This is a PT_NOTE section; loop through all the notes * in the section. */ len = xph_filesz < sizeof(nbuf) ? xph_filesz : sizeof(nbuf); if ((bufsize = pread(fd, nbuf, len, xph_offset)) == -1) { file_badread(ms); return -1; } offset = 0; for (;;) { if (offset >= (size_t)bufsize) break; offset = donote(ms, nbuf, offset, (size_t)bufsize, clazz, swap, 4, flags); if (offset == 0) break; } } return 0; } #endif static void do_note_netbsd_version(struct magic_set *ms, int swap, void *v) { uint32_t desc; (void)memcpy(&desc, v, sizeof(desc)); desc = elf_getu32(swap, desc); if (file_printf(ms, ", for NetBSD") == -1) return; /* * The version number used to be stuck as 199905, and was thus * basically content-free. Newer versions of NetBSD have fixed * this and now use the encoding of __NetBSD_Version__: * * MMmmrrpp00 * * M = major version * m = minor version * r = release ["",A-Z,Z[A-Z] but numeric] * p = patchlevel */ if (desc > 100000000U) { uint32_t ver_patch = (desc / 100) % 100; uint32_t ver_rel = (desc / 10000) % 100; uint32_t ver_min = (desc / 1000000) % 100; uint32_t ver_maj = desc / 100000000; if (file_printf(ms, " %u.%u", ver_maj, ver_min) == -1) return; if (ver_rel == 0 && ver_patch != 0) { if (file_printf(ms, ".%u", ver_patch) == -1) return; } else if (ver_rel != 0) { while (ver_rel > 26) { if (file_printf(ms, "Z") == -1) return; ver_rel -= 26; } if (file_printf(ms, "%c", 'A' + ver_rel - 1) == -1) return; } } } static void do_note_freebsd_version(struct magic_set *ms, int swap, void *v) { uint32_t desc; (void)memcpy(&desc, v, sizeof(desc)); desc = elf_getu32(swap, desc); if (file_printf(ms, ", for FreeBSD") == -1) return; /* * Contents is __FreeBSD_version, whose relation to OS * versions is defined by a huge table in the Porter's * Handbook. This is the general scheme: * * Releases: * Mmp000 (before 4.10) * Mmi0p0 (before 5.0) * Mmm0p0 * * Development branches: * Mmpxxx (before 4.6) * Mmp1xx (before 4.10) * Mmi1xx (before 5.0) * M000xx (pre-M.0) * Mmm1xx * * M = major version * m = minor version * i = minor version increment (491000 -> 4.10) * p = patchlevel * x = revision * * The first release of FreeBSD to use ELF by default * was version 3.0. */ if (desc == 460002) { if (file_printf(ms, " 4.6.2") == -1) return; } else if (desc < 460100) { if (file_printf(ms, " %d.%d", desc / 100000, desc / 10000 % 10) == -1) return; if (desc / 1000 % 10 > 0) if (file_printf(ms, ".%d", desc / 1000 % 10) == -1) return; if ((desc % 1000 > 0) || (desc % 100000 == 0)) if (file_printf(ms, " (%d)", desc) == -1) return; } else if (desc < 500000) { if (file_printf(ms, " %d.%d", desc / 100000, desc / 10000 % 10 + desc / 1000 % 10) == -1) return; if (desc / 100 % 10 > 0) { if (file_printf(ms, " (%d)", desc) == -1) return; } else if (desc / 10 % 10 > 0) { if (file_printf(ms, ".%d", desc / 10 % 10) == -1) return; } } else { if (file_printf(ms, " %d.%d", desc / 100000, desc / 1000 % 100) == -1) return; if ((desc / 100 % 10 > 0) || (desc % 100000 / 100 == 0)) { if (file_printf(ms, " (%d)", desc) == -1) return; } else if (desc / 10 % 10 > 0) { if (file_printf(ms, ".%d", desc / 10 % 10) == -1) return; } } } private size_t donote(struct magic_set *ms, void *vbuf, size_t offset, size_t size, int clazz, int swap, size_t align, int *flags) { Elf32_Nhdr nh32; Elf64_Nhdr nh64; size_t noff, doff; #ifdef ELFCORE int os_style = -1; #endif uint32_t namesz, descsz; unsigned char *nbuf = CAST(unsigned char *, vbuf); char sbuf[512]; if (xnh_sizeof + offset > size) { /* * We're out of note headers. */ return xnh_sizeof + offset; } (void)memcpy(xnh_addr, &nbuf[offset], xnh_sizeof); offset += xnh_sizeof; namesz = xnh_namesz; descsz = xnh_descsz; if ((namesz == 0) && (descsz == 0)) { /* * We're out of note headers. */ return (offset >= size) ? offset : size; } if (namesz & 0x80000000) { (void)file_printf(ms, ", bad note name size 0x%lx", (unsigned long)namesz); return 0; } if (descsz & 0x80000000) { (void)file_printf(ms, ", bad note description size 0x%lx", (unsigned long)descsz); return 0; } noff = offset; doff = ELF_ALIGN(offset + namesz); if (offset + namesz > size) { /* * We're past the end of the buffer. */ return doff; } offset = ELF_ALIGN(doff + descsz); if (doff + descsz > size) { /* * We're past the end of the buffer. */ return (offset >= size) ? offset : size; } if ((*flags & (FLAGS_DID_NOTE|FLAGS_DID_BUILD_ID)) == (FLAGS_DID_NOTE|FLAGS_DID_BUILD_ID)) goto core; if (namesz == 5 && strcmp((char *)&nbuf[noff], "SuSE") == 0 && xnh_type == NT_GNU_VERSION && descsz == 2) { file_printf(ms, ", for SuSE %d.%d", nbuf[doff], nbuf[doff + 1]); } if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 && xnh_type == NT_GNU_VERSION && descsz == 16) { uint32_t desc[4]; (void)memcpy(desc, &nbuf[doff], sizeof(desc)); if (file_printf(ms, ", for GNU/") == -1) return size; switch (elf_getu32(swap, desc[0])) { case GNU_OS_LINUX: if (file_printf(ms, "Linux") == -1) return size; break; case GNU_OS_HURD: if (file_printf(ms, "Hurd") == -1) return size; break; case GNU_OS_SOLARIS: if (file_printf(ms, "Solaris") == -1) return size; break; case GNU_OS_KFREEBSD: if (file_printf(ms, "kFreeBSD") == -1) return size; break; case GNU_OS_KNETBSD: if (file_printf(ms, "kNetBSD") == -1) return size; break; default: if (file_printf(ms, "<unknown>") == -1) return size; } if (file_printf(ms, " %d.%d.%d", elf_getu32(swap, desc[1]), elf_getu32(swap, desc[2]), elf_getu32(swap, desc[3])) == -1) return size; *flags |= FLAGS_DID_NOTE; return size; } if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 && xnh_type == NT_GNU_BUILD_ID && (descsz == 16 || descsz == 20)) { uint8_t desc[20]; uint32_t i; if (file_printf(ms, ", BuildID[%s]=", descsz == 16 ? "md5/uuid" : "sha1") == -1) return size; (void)memcpy(desc, &nbuf[doff], descsz); for (i = 0; i < descsz; i++) if (file_printf(ms, "%02x", desc[i]) == -1) return size; *flags |= FLAGS_DID_BUILD_ID; } if (namesz == 4 && strcmp((char *)&nbuf[noff], "PaX") == 0 && xnh_type == NT_NETBSD_PAX && descsz == 4) { static const char *pax[] = { "+mprotect", "-mprotect", "+segvguard", "-segvguard", "+ASLR", "-ASLR", }; uint32_t desc; size_t i; int did = 0; (void)memcpy(&desc, &nbuf[doff], sizeof(desc)); desc = elf_getu32(swap, desc); if (desc && file_printf(ms, ", PaX: ") == -1) return size; for (i = 0; i < __arraycount(pax); i++) { if (((1 << i) & desc) == 0) continue; if (file_printf(ms, "%s%s", did++ ? "," : "", pax[i]) == -1) return size; } } if (namesz == 7 && strcmp((char *)&nbuf[noff], "NetBSD") == 0) { switch (xnh_type) { case NT_NETBSD_VERSION: if (descsz == 4) { do_note_netbsd_version(ms, swap, &nbuf[doff]); *flags |= FLAGS_DID_NOTE; return size; } break; case NT_NETBSD_MARCH: if (file_printf(ms, ", compiled for: %.*s", (int)descsz, (const char *)&nbuf[doff]) == -1) return size; break; case NT_NETBSD_CMODEL: if (file_printf(ms, ", compiler model: %.*s", (int)descsz, (const char *)&nbuf[doff]) == -1) return size; break; default: if (file_printf(ms, ", note=%u", xnh_type) == -1) return size; break; } return size; } if (namesz == 8 && strcmp((char *)&nbuf[noff], "FreeBSD") == 0) { if (xnh_type == NT_FREEBSD_VERSION && descsz == 4) { do_note_freebsd_version(ms, swap, &nbuf[doff]); *flags |= FLAGS_DID_NOTE; return size; } } if (namesz == 8 && strcmp((char *)&nbuf[noff], "OpenBSD") == 0 && xnh_type == NT_OPENBSD_VERSION && descsz == 4) { if (file_printf(ms, ", for OpenBSD") == -1) return size; /* Content of note is always 0 */ *flags |= FLAGS_DID_NOTE; return size; } if (namesz == 10 && strcmp((char *)&nbuf[noff], "DragonFly") == 0 && xnh_type == NT_DRAGONFLY_VERSION && descsz == 4) { uint32_t desc; if (file_printf(ms, ", for DragonFly") == -1) return size; (void)memcpy(&desc, &nbuf[doff], sizeof(desc)); desc = elf_getu32(swap, desc); if (file_printf(ms, " %d.%d.%d", desc / 100000, desc / 10000 % 10, desc % 10000) == -1) return size; *flags |= FLAGS_DID_NOTE; return size; } core: /* * Sigh. The 2.0.36 kernel in Debian 2.1, at * least, doesn't correctly implement name * sections, in core dumps, as specified by * the "Program Linking" section of "UNIX(R) System * V Release 4 Programmer's Guide: ANSI C and * Programming Support Tools", because my copy * clearly says "The first 'namesz' bytes in 'name' * contain a *null-terminated* [emphasis mine] * character representation of the entry's owner * or originator", but the 2.0.36 kernel code * doesn't include the terminating null in the * name.... */ if ((namesz == 4 && strncmp((char *)&nbuf[noff], "CORE", 4) == 0) || (namesz == 5 && strcmp((char *)&nbuf[noff], "CORE") == 0)) { os_style = OS_STYLE_SVR4; } if ((namesz == 8 && strcmp((char *)&nbuf[noff], "FreeBSD") == 0)) { os_style = OS_STYLE_FREEBSD; } if ((namesz >= 11 && strncmp((char *)&nbuf[noff], "NetBSD-CORE", 11) == 0)) { os_style = OS_STYLE_NETBSD; } #ifdef ELFCORE if ((*flags & FLAGS_DID_CORE) != 0) return size; if (os_style != -1 && (*flags & FLAGS_DID_CORE_STYLE) == 0) { if (file_printf(ms, ", %s-style", os_style_names[os_style]) == -1) return size; *flags |= FLAGS_DID_CORE_STYLE; } switch (os_style) { case OS_STYLE_NETBSD: if (xnh_type == NT_NETBSD_CORE_PROCINFO) { uint32_t signo; /* * Extract the program name. It is at * offset 0x7c, and is up to 32-bytes, * including the terminating NUL. */ if (file_printf(ms, ", from '%.31s'", file_printable(sbuf, sizeof(sbuf), (const char *)&nbuf[doff + 0x7c])) == -1) return size; /* * Extract the signal number. It is at * offset 0x08. */ (void)memcpy(&signo, &nbuf[doff + 0x08], sizeof(signo)); if (file_printf(ms, " (signal %u)", elf_getu32(swap, signo)) == -1) return size; *flags |= FLAGS_DID_CORE; return size; } break; default: if (xnh_type == NT_PRPSINFO && *flags & FLAGS_IS_CORE) { size_t i, j; unsigned char c; /* * Extract the program name. We assume * it to be 16 characters (that's what it * is in SunOS 5.x and Linux). * * Unfortunately, it's at a different offset * in various OSes, so try multiple offsets. * If the characters aren't all printable, * reject it. */ for (i = 0; i < NOFFSETS; i++) { unsigned char *cname, *cp; size_t reloffset = prpsoffsets(i); size_t noffset = doff + reloffset; size_t k; for (j = 0; j < 16; j++, noffset++, reloffset++) { /* * Make sure we're not past * the end of the buffer; if * we are, just give up. */ if (noffset >= size) goto tryanother; /* * Make sure we're not past * the end of the contents; * if we are, this obviously * isn't the right offset. */ if (reloffset >= descsz) goto tryanother; c = nbuf[noffset]; if (c == '\0') { /* * A '\0' at the * beginning is * obviously wrong. * Any other '\0' * means we're done. */ if (j == 0) goto tryanother; else break; } else { /* * A nonprintable * character is also * wrong. */ if (!isprint(c) || isquote(c)) goto tryanother; } } /* * Well, that worked. */ /* * Try next offsets, in case this match is * in the middle of a string. */ for (k = i + 1 ; k < NOFFSETS ; k++) { size_t no; int adjust = 1; if (prpsoffsets(k) >= prpsoffsets(i)) continue; for (no = doff + prpsoffsets(k); no < doff + prpsoffsets(i); no++) adjust = adjust && isprint(nbuf[no]); if (adjust) i = k; } cname = (unsigned char *) &nbuf[doff + prpsoffsets(i)]; for (cp = cname; *cp && isprint(*cp); cp++) continue; /* * Linux apparently appends a space at the end * of the command line: remove it. */ while (cp > cname && isspace(cp[-1])) cp--; if (file_printf(ms, ", from '%.*s'", (int)(cp - cname), cname) == -1) return size; *flags |= FLAGS_DID_CORE; return size; tryanother: ; } } break; } #endif return offset; } /* SunOS 5.x hardware capability descriptions */ typedef struct cap_desc { uint64_t cd_mask; const char *cd_name; } cap_desc_t; static const cap_desc_t cap_desc_sparc[] = { { AV_SPARC_MUL32, "MUL32" }, { AV_SPARC_DIV32, "DIV32" }, { AV_SPARC_FSMULD, "FSMULD" }, { AV_SPARC_V8PLUS, "V8PLUS" }, { AV_SPARC_POPC, "POPC" }, { AV_SPARC_VIS, "VIS" }, { AV_SPARC_VIS2, "VIS2" }, { AV_SPARC_ASI_BLK_INIT, "ASI_BLK_INIT" }, { AV_SPARC_FMAF, "FMAF" }, { AV_SPARC_FJFMAU, "FJFMAU" }, { AV_SPARC_IMA, "IMA" }, { 0, NULL } }; static const cap_desc_t cap_desc_386[] = { { AV_386_FPU, "FPU" }, { AV_386_TSC, "TSC" }, { AV_386_CX8, "CX8" }, { AV_386_SEP, "SEP" }, { AV_386_AMD_SYSC, "AMD_SYSC" }, { AV_386_CMOV, "CMOV" }, { AV_386_MMX, "MMX" }, { AV_386_AMD_MMX, "AMD_MMX" }, { AV_386_AMD_3DNow, "AMD_3DNow" }, { AV_386_AMD_3DNowx, "AMD_3DNowx" }, { AV_386_FXSR, "FXSR" }, { AV_386_SSE, "SSE" }, { AV_386_SSE2, "SSE2" }, { AV_386_PAUSE, "PAUSE" }, { AV_386_SSE3, "SSE3" }, { AV_386_MON, "MON" }, { AV_386_CX16, "CX16" }, { AV_386_AHF, "AHF" }, { AV_386_TSCP, "TSCP" }, { AV_386_AMD_SSE4A, "AMD_SSE4A" }, { AV_386_POPCNT, "POPCNT" }, { AV_386_AMD_LZCNT, "AMD_LZCNT" }, { AV_386_SSSE3, "SSSE3" }, { AV_386_SSE4_1, "SSE4.1" }, { AV_386_SSE4_2, "SSE4.2" }, { 0, NULL } }; private int doshn(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num, size_t size, off_t fsize, int *flags, int mach, int strtab) { Elf32_Shdr sh32; Elf64_Shdr sh64; int stripped = 1; size_t nbadcap = 0; void *nbuf; off_t noff, coff, name_off; uint64_t cap_hw1 = 0; /* SunOS 5.x hardware capabilites */ uint64_t cap_sf1 = 0; /* SunOS 5.x software capabilites */ char name[50]; ssize_t namesize; if (size != xsh_sizeof) { if (file_printf(ms, ", corrupted section header size") == -1) return -1; return 0; } /* Read offset of name section to be able to read section names later */ if (pread(fd, xsh_addr, xsh_sizeof, off + size * strtab) < (ssize_t)xsh_sizeof) { file_badread(ms); return -1; } name_off = xsh_offset; for ( ; num; num--) { /* Read the name of this section. */ if ((namesize = pread(fd, name, sizeof(name) - 1, name_off + xsh_name)) == -1) { file_badread(ms); return -1; } name[namesize] = '\0'; if (strcmp(name, ".debug_info") == 0) stripped = 0; if (pread(fd, xsh_addr, xsh_sizeof, off) < (ssize_t)xsh_sizeof) { file_badread(ms); return -1; } off += size; /* Things we can determine before we seek */ switch (xsh_type) { case SHT_SYMTAB: #if 0 case SHT_DYNSYM: #endif stripped = 0; break; default: if (fsize != SIZE_UNKNOWN && xsh_offset > fsize) { /* Perhaps warn here */ continue; } break; } /* Things we can determine when we seek */ switch (xsh_type) { case SHT_NOTE: if ((nbuf = malloc(xsh_size)) == NULL) { file_error(ms, errno, "Cannot allocate memory" " for note"); return -1; } if (pread(fd, nbuf, xsh_size, xsh_offset) < (ssize_t)xsh_size) { file_badread(ms); free(nbuf); return -1; } noff = 0; for (;;) { if (noff >= (off_t)xsh_size) break; noff = donote(ms, nbuf, (size_t)noff, xsh_size, clazz, swap, 4, flags); if (noff == 0) break; } free(nbuf); break; case SHT_SUNW_cap: switch (mach) { case EM_SPARC: case EM_SPARCV9: case EM_IA_64: case EM_386: case EM_AMD64: break; default: goto skip; } if (nbadcap > 5) break; if (lseek(fd, xsh_offset, SEEK_SET) == (off_t)-1) { file_badseek(ms); return -1; } coff = 0; for (;;) { Elf32_Cap cap32; Elf64_Cap cap64; char cbuf[/*CONSTCOND*/ MAX(sizeof cap32, sizeof cap64)]; if ((coff += xcap_sizeof) > (off_t)xsh_size) break; if (read(fd, cbuf, (size_t)xcap_sizeof) != (ssize_t)xcap_sizeof) { file_badread(ms); return -1; } if (cbuf[0] == 'A') { #ifdef notyet char *p = cbuf + 1; uint32_t len, tag; memcpy(&len, p, sizeof(len)); p += 4; len = getu32(swap, len); if (memcmp("gnu", p, 3) != 0) { if (file_printf(ms, ", unknown capability %.3s", p) == -1) return -1; break; } p += strlen(p) + 1; tag = *p++; memcpy(&len, p, sizeof(len)); p += 4; len = getu32(swap, len); if (tag != 1) { if (file_printf(ms, ", unknown gnu" " capability tag %d", tag) == -1) return -1; break; } // gnu attributes #endif break; } (void)memcpy(xcap_addr, cbuf, xcap_sizeof); switch (xcap_tag) { case CA_SUNW_NULL: break; case CA_SUNW_HW_1: cap_hw1 |= xcap_val; break; case CA_SUNW_SF_1: cap_sf1 |= xcap_val; break; default: if (file_printf(ms, ", with unknown capability " "0x%" INT64_T_FORMAT "x = 0x%" INT64_T_FORMAT "x", (unsigned long long)xcap_tag, (unsigned long long)xcap_val) == -1) return -1; if (nbadcap++ > 2) coff = xsh_size; break; } } /*FALLTHROUGH*/ skip: default: break; } } if (file_printf(ms, ", %sstripped", stripped ? "" : "not ") == -1) return -1; if (cap_hw1) { const cap_desc_t *cdp; switch (mach) { case EM_SPARC: case EM_SPARC32PLUS: case EM_SPARCV9: cdp = cap_desc_sparc; break; case EM_386: case EM_IA_64: case EM_AMD64: cdp = cap_desc_386; break; default: cdp = NULL; break; } if (file_printf(ms, ", uses") == -1) return -1; if (cdp) { while (cdp->cd_name) { if (cap_hw1 & cdp->cd_mask) { if (file_printf(ms, " %s", cdp->cd_name) == -1) return -1; cap_hw1 &= ~cdp->cd_mask; } ++cdp; } if (cap_hw1) if (file_printf(ms, " unknown hardware capability 0x%" INT64_T_FORMAT "x", (unsigned long long)cap_hw1) == -1) return -1; } else { if (file_printf(ms, " hardware capability 0x%" INT64_T_FORMAT "x", (unsigned long long)cap_hw1) == -1) return -1; } } if (cap_sf1) { if (cap_sf1 & SF1_SUNW_FPUSED) { if (file_printf(ms, (cap_sf1 & SF1_SUNW_FPKNWN) ? ", uses frame pointer" : ", not known to use frame pointer") == -1) return -1; } cap_sf1 &= ~SF1_SUNW_MASK; if (cap_sf1) if (file_printf(ms, ", with unknown software capability 0x%" INT64_T_FORMAT "x", (unsigned long long)cap_sf1) == -1) return -1; } return 0; } /* * Look through the program headers of an executable image, searching * for a PT_INTERP section; if one is found, it's dynamically linked, * otherwise it's statically linked. */ private int dophn_exec(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num, size_t size, off_t fsize, int *flags, int sh_num) { Elf32_Phdr ph32; Elf64_Phdr ph64; const char *linking_style = "statically"; const char *interp = ""; unsigned char nbuf[BUFSIZ]; char ibuf[BUFSIZ]; ssize_t bufsize; size_t offset, align, len; if (size != xph_sizeof) { if (file_printf(ms, ", corrupted program header size") == -1) return -1; return 0; } for ( ; num; num--) { if (pread(fd, xph_addr, xph_sizeof, off) < (ssize_t)xph_sizeof) { file_badread(ms); return -1; } off += size; bufsize = 0; align = 4; /* Things we can determine before we seek */ switch (xph_type) { case PT_DYNAMIC: linking_style = "dynamically"; break; case PT_NOTE: if (sh_num) /* Did this through section headers */ continue; if (((align = xph_align) & 0x80000000UL) != 0 || align < 4) { if (file_printf(ms, ", invalid note alignment 0x%lx", (unsigned long)align) == -1) return -1; align = 4; } /*FALLTHROUGH*/ case PT_INTERP: len = xph_filesz < sizeof(nbuf) ? xph_filesz : sizeof(nbuf); bufsize = pread(fd, nbuf, len, xph_offset); if (bufsize == -1) { file_badread(ms); return -1; } break; default: if (fsize != SIZE_UNKNOWN && xph_offset > fsize) { /* Maybe warn here? */ continue; } break; } /* Things we can determine when we seek */ switch (xph_type) { case PT_INTERP: if (bufsize && nbuf[0]) { nbuf[bufsize - 1] = '\0'; interp = (const char *)nbuf; } else interp = "*empty*"; break; case PT_NOTE: /* * This is a PT_NOTE section; loop through all the notes * in the section. */ offset = 0; for (;;) { if (offset >= (size_t)bufsize) break; offset = donote(ms, nbuf, offset, (size_t)bufsize, clazz, swap, align, flags); if (offset == 0) break; } break; default: break; } } if (file_printf(ms, ", %s linked", linking_style) == -1) return -1; if (interp[0]) if (file_printf(ms, ", interpreter %s", file_printable(ibuf, sizeof(ibuf), interp)) == -1) return -1; return 0; } protected int file_tryelf(struct magic_set *ms, int fd, const unsigned char *buf, size_t nbytes) { union { int32_t l; char c[sizeof (int32_t)]; } u; int clazz; int swap; struct stat st; off_t fsize; int flags = 0; Elf32_Ehdr elf32hdr; Elf64_Ehdr elf64hdr; uint16_t type, phnum, shnum; if (ms->flags & (MAGIC_MIME|MAGIC_APPLE)) return 0; /* * ELF executables have multiple section headers in arbitrary * file locations and thus file(1) cannot determine it from easily. * Instead we traverse thru all section headers until a symbol table * one is found or else the binary is stripped. * Return immediately if it's not ELF (so we avoid pipe2file unless needed). */ if (buf[EI_MAG0] != ELFMAG0 || (buf[EI_MAG1] != ELFMAG1 && buf[EI_MAG1] != OLFMAG1) || buf[EI_MAG2] != ELFMAG2 || buf[EI_MAG3] != ELFMAG3) return 0; /* * If we cannot seek, it must be a pipe, socket or fifo. */ if((lseek(fd, (off_t)0, SEEK_SET) == (off_t)-1) && (errno == ESPIPE)) fd = file_pipe2file(ms, fd, buf, nbytes); if (fstat(fd, &st) == -1) { file_badread(ms); return -1; } if (S_ISREG(st.st_mode) || st.st_size != 0) fsize = st.st_size; else fsize = SIZE_UNKNOWN; clazz = buf[EI_CLASS]; switch (clazz) { case ELFCLASS32: #undef elf_getu #define elf_getu(a, b) elf_getu32(a, b) #undef elfhdr #define elfhdr elf32hdr #include "elfclass.h" case ELFCLASS64: #undef elf_getu #define elf_getu(a, b) elf_getu64(a, b) #undef elfhdr #define elfhdr elf64hdr #include "elfclass.h" default: if (file_printf(ms, ", unknown class %d", clazz) == -1) return -1; break; } return 0; } #endif
doshn(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num, size_t size, off_t fsize, int *flags, int mach, int strtab) { Elf32_Shdr sh32; Elf64_Shdr sh64; int stripped = 1; size_t nbadcap = 0; void *nbuf; off_t noff, coff, name_off; uint64_t cap_hw1 = 0; /* SunOS 5.x hardware capabilites */ uint64_t cap_sf1 = 0; /* SunOS 5.x software capabilites */ char name[50]; if (size != xsh_sizeof) { if (file_printf(ms, ", corrupted section header size") == -1) return -1; return 0; } /* Read offset of name section to be able to read section names later */ if (pread(fd, xsh_addr, xsh_sizeof, off + size * strtab) == -1) { file_badread(ms); return -1; } name_off = xsh_offset; for ( ; num; num--) { /* Read the name of this section. */ if (pread(fd, name, sizeof(name), name_off + xsh_name) == -1) { file_badread(ms); return -1; } name[sizeof(name) - 1] = '\0'; if (strcmp(name, ".debug_info") == 0) stripped = 0; if (pread(fd, xsh_addr, xsh_sizeof, off) == -1) { file_badread(ms); return -1; } off += size; /* Things we can determine before we seek */ switch (xsh_type) { case SHT_SYMTAB: #if 0 case SHT_DYNSYM: #endif stripped = 0; break; default: if (fsize != SIZE_UNKNOWN && xsh_offset > fsize) { /* Perhaps warn here */ continue; } break; } /* Things we can determine when we seek */ switch (xsh_type) { case SHT_NOTE: if ((nbuf = malloc(xsh_size)) == NULL) { file_error(ms, errno, "Cannot allocate memory" " for note"); return -1; } if (pread(fd, nbuf, xsh_size, xsh_offset) == -1) { file_badread(ms); free(nbuf); return -1; } noff = 0; for (;;) { if (noff >= (off_t)xsh_size) break; noff = donote(ms, nbuf, (size_t)noff, xsh_size, clazz, swap, 4, flags); if (noff == 0) break; } free(nbuf); break; case SHT_SUNW_cap: switch (mach) { case EM_SPARC: case EM_SPARCV9: case EM_IA_64: case EM_386: case EM_AMD64: break; default: goto skip; } if (nbadcap > 5) break; if (lseek(fd, xsh_offset, SEEK_SET) == (off_t)-1) { file_badseek(ms); return -1; } coff = 0; for (;;) { Elf32_Cap cap32; Elf64_Cap cap64; char cbuf[/*CONSTCOND*/ MAX(sizeof cap32, sizeof cap64)]; if ((coff += xcap_sizeof) > (off_t)xsh_size) break; if (read(fd, cbuf, (size_t)xcap_sizeof) != (ssize_t)xcap_sizeof) { file_badread(ms); return -1; } if (cbuf[0] == 'A') { #ifdef notyet char *p = cbuf + 1; uint32_t len, tag; memcpy(&len, p, sizeof(len)); p += 4; len = getu32(swap, len); if (memcmp("gnu", p, 3) != 0) { if (file_printf(ms, ", unknown capability %.3s", p) == -1) return -1; break; } p += strlen(p) + 1; tag = *p++; memcpy(&len, p, sizeof(len)); p += 4; len = getu32(swap, len); if (tag != 1) { if (file_printf(ms, ", unknown gnu" " capability tag %d", tag) == -1) return -1; break; } // gnu attributes #endif break; } (void)memcpy(xcap_addr, cbuf, xcap_sizeof); switch (xcap_tag) { case CA_SUNW_NULL: break; case CA_SUNW_HW_1: cap_hw1 |= xcap_val; break; case CA_SUNW_SF_1: cap_sf1 |= xcap_val; break; default: if (file_printf(ms, ", with unknown capability " "0x%" INT64_T_FORMAT "x = 0x%" INT64_T_FORMAT "x", (unsigned long long)xcap_tag, (unsigned long long)xcap_val) == -1) return -1; if (nbadcap++ > 2) coff = xsh_size; break; } } /*FALLTHROUGH*/ skip: default: break; } } if (file_printf(ms, ", %sstripped", stripped ? "" : "not ") == -1) return -1; if (cap_hw1) { const cap_desc_t *cdp; switch (mach) { case EM_SPARC: case EM_SPARC32PLUS: case EM_SPARCV9: cdp = cap_desc_sparc; break; case EM_386: case EM_IA_64: case EM_AMD64: cdp = cap_desc_386; break; default: cdp = NULL; break; } if (file_printf(ms, ", uses") == -1) return -1; if (cdp) { while (cdp->cd_name) { if (cap_hw1 & cdp->cd_mask) { if (file_printf(ms, " %s", cdp->cd_name) == -1) return -1; cap_hw1 &= ~cdp->cd_mask; } ++cdp; } if (cap_hw1) if (file_printf(ms, " unknown hardware capability 0x%" INT64_T_FORMAT "x", (unsigned long long)cap_hw1) == -1) return -1; } else { if (file_printf(ms, " hardware capability 0x%" INT64_T_FORMAT "x", (unsigned long long)cap_hw1) == -1) return -1; } } if (cap_sf1) { if (cap_sf1 & SF1_SUNW_FPUSED) { if (file_printf(ms, (cap_sf1 & SF1_SUNW_FPKNWN) ? ", uses frame pointer" : ", not known to use frame pointer") == -1) return -1; } cap_sf1 &= ~SF1_SUNW_MASK; if (cap_sf1) if (file_printf(ms, ", with unknown software capability 0x%" INT64_T_FORMAT "x", (unsigned long long)cap_sf1) == -1) return -1; } return 0; }
doshn(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num, size_t size, off_t fsize, int *flags, int mach, int strtab) { Elf32_Shdr sh32; Elf64_Shdr sh64; int stripped = 1; size_t nbadcap = 0; void *nbuf; off_t noff, coff, name_off; uint64_t cap_hw1 = 0; /* SunOS 5.x hardware capabilites */ uint64_t cap_sf1 = 0; /* SunOS 5.x software capabilites */ char name[50]; ssize_t namesize; if (size != xsh_sizeof) { if (file_printf(ms, ", corrupted section header size") == -1) return -1; return 0; } /* Read offset of name section to be able to read section names later */ if (pread(fd, xsh_addr, xsh_sizeof, off + size * strtab) < (ssize_t)xsh_sizeof) { file_badread(ms); return -1; } name_off = xsh_offset; for ( ; num; num--) { /* Read the name of this section. */ if ((namesize = pread(fd, name, sizeof(name) - 1, name_off + xsh_name)) == -1) { file_badread(ms); return -1; } name[namesize] = '\0'; if (strcmp(name, ".debug_info") == 0) stripped = 0; if (pread(fd, xsh_addr, xsh_sizeof, off) < (ssize_t)xsh_sizeof) { file_badread(ms); return -1; } off += size; /* Things we can determine before we seek */ switch (xsh_type) { case SHT_SYMTAB: #if 0 case SHT_DYNSYM: #endif stripped = 0; break; default: if (fsize != SIZE_UNKNOWN && xsh_offset > fsize) { /* Perhaps warn here */ continue; } break; } /* Things we can determine when we seek */ switch (xsh_type) { case SHT_NOTE: if ((nbuf = malloc(xsh_size)) == NULL) { file_error(ms, errno, "Cannot allocate memory" " for note"); return -1; } if (pread(fd, nbuf, xsh_size, xsh_offset) < (ssize_t)xsh_size) { file_badread(ms); free(nbuf); return -1; } noff = 0; for (;;) { if (noff >= (off_t)xsh_size) break; noff = donote(ms, nbuf, (size_t)noff, xsh_size, clazz, swap, 4, flags); if (noff == 0) break; } free(nbuf); break; case SHT_SUNW_cap: switch (mach) { case EM_SPARC: case EM_SPARCV9: case EM_IA_64: case EM_386: case EM_AMD64: break; default: goto skip; } if (nbadcap > 5) break; if (lseek(fd, xsh_offset, SEEK_SET) == (off_t)-1) { file_badseek(ms); return -1; } coff = 0; for (;;) { Elf32_Cap cap32; Elf64_Cap cap64; char cbuf[/*CONSTCOND*/ MAX(sizeof cap32, sizeof cap64)]; if ((coff += xcap_sizeof) > (off_t)xsh_size) break; if (read(fd, cbuf, (size_t)xcap_sizeof) != (ssize_t)xcap_sizeof) { file_badread(ms); return -1; } if (cbuf[0] == 'A') { #ifdef notyet char *p = cbuf + 1; uint32_t len, tag; memcpy(&len, p, sizeof(len)); p += 4; len = getu32(swap, len); if (memcmp("gnu", p, 3) != 0) { if (file_printf(ms, ", unknown capability %.3s", p) == -1) return -1; break; } p += strlen(p) + 1; tag = *p++; memcpy(&len, p, sizeof(len)); p += 4; len = getu32(swap, len); if (tag != 1) { if (file_printf(ms, ", unknown gnu" " capability tag %d", tag) == -1) return -1; break; } // gnu attributes #endif break; } (void)memcpy(xcap_addr, cbuf, xcap_sizeof); switch (xcap_tag) { case CA_SUNW_NULL: break; case CA_SUNW_HW_1: cap_hw1 |= xcap_val; break; case CA_SUNW_SF_1: cap_sf1 |= xcap_val; break; default: if (file_printf(ms, ", with unknown capability " "0x%" INT64_T_FORMAT "x = 0x%" INT64_T_FORMAT "x", (unsigned long long)xcap_tag, (unsigned long long)xcap_val) == -1) return -1; if (nbadcap++ > 2) coff = xsh_size; break; } } /*FALLTHROUGH*/ skip: default: break; } } if (file_printf(ms, ", %sstripped", stripped ? "" : "not ") == -1) return -1; if (cap_hw1) { const cap_desc_t *cdp; switch (mach) { case EM_SPARC: case EM_SPARC32PLUS: case EM_SPARCV9: cdp = cap_desc_sparc; break; case EM_386: case EM_IA_64: case EM_AMD64: cdp = cap_desc_386; break; default: cdp = NULL; break; } if (file_printf(ms, ", uses") == -1) return -1; if (cdp) { while (cdp->cd_name) { if (cap_hw1 & cdp->cd_mask) { if (file_printf(ms, " %s", cdp->cd_name) == -1) return -1; cap_hw1 &= ~cdp->cd_mask; } ++cdp; } if (cap_hw1) if (file_printf(ms, " unknown hardware capability 0x%" INT64_T_FORMAT "x", (unsigned long long)cap_hw1) == -1) return -1; } else { if (file_printf(ms, " hardware capability 0x%" INT64_T_FORMAT "x", (unsigned long long)cap_hw1) == -1) return -1; } } if (cap_sf1) { if (cap_sf1 & SF1_SUNW_FPUSED) { if (file_printf(ms, (cap_sf1 & SF1_SUNW_FPKNWN) ? ", uses frame pointer" : ", not known to use frame pointer") == -1) return -1; } cap_sf1 &= ~SF1_SUNW_MASK; if (cap_sf1) if (file_printf(ms, ", with unknown software capability 0x%" INT64_T_FORMAT "x", (unsigned long long)cap_sf1) == -1) return -1; } return 0; }
{'added': [(30, 'FILE_RCSID("@(#)$File: readelf.c,v 1.114 2014/12/11 14:19:36 christos Exp $")'), (322, '\t\tif (pread(fd, xph_addr, xph_sizeof, off) < (ssize_t)xph_sizeof) {'), (931, '\tssize_t namesize;'), (940, '\tif (pread(fd, xsh_addr, xsh_sizeof, off + size * strtab) < (ssize_t)xsh_sizeof) {'), (948, '\t\tif ((namesize = pread(fd, name, sizeof(name) - 1, name_off + xsh_name)) == -1) {'), (952, "\t\tname[namesize] = '\\0';"), (956, '\t\tif (pread(fd, xsh_addr, xsh_sizeof, off) < (ssize_t)xsh_sizeof) {'), (986, '\t\t\tif (pread(fd, nbuf, xsh_size, xsh_offset) < (ssize_t)xsh_size) {'), (1182, '\t\tif (pread(fd, xph_addr, xph_sizeof, off) < (ssize_t)xph_sizeof) {')], 'deleted': [(30, 'FILE_RCSID("@(#)$File: readelf.c,v 1.113 2014/12/11 14:10:53 christos Exp $")'), (322, '\t\tif (pread(fd, xph_addr, xph_sizeof, off) == -1) {'), (939, '\tif (pread(fd, xsh_addr, xsh_sizeof, off + size * strtab) == -1) {'), (947, '\t\tif (pread(fd, name, sizeof(name), name_off + xsh_name) == -1) {'), (951, "\t\tname[sizeof(name) - 1] = '\\0';"), (955, '\t\tif (pread(fd, xsh_addr, xsh_sizeof, off) == -1) {'), (985, '\t\t\tif (pread(fd, nbuf, xsh_size, xsh_offset) == -1) {'), (1181, '\t\tif (pread(fd, xph_addr, xph_sizeof, off) == -1) {')]}
9
8
887
5,436
216
1,085
64
https://github.com/file/file
CVE-2014-9653
CWE-20
2,636
rxe_mr.c
C
mem_check_range
/* * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "rxe.h" #include "rxe_loc.h" /* * lfsr (linear feedback shift register) with period 255 */ static u8 rxe_get_key(void) { static u32 key = 1; key = key << 1; key |= (0 != (key & 0x100)) ^ (0 != (key & 0x10)) ^ (0 != (key & 0x80)) ^ (0 != (key & 0x40)); key &= 0xff; return key; } int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length) { switch (mem->type) { case RXE_MEM_TYPE_DMA: return 0; case RXE_MEM_TYPE_MR: case RXE_MEM_TYPE_FMR: return ((iova < mem->iova) || ((iova + length) > (mem->iova + mem->length))) ? -EFAULT : 0; default: return -EFAULT; } } #define IB_ACCESS_REMOTE (IB_ACCESS_REMOTE_READ \ | IB_ACCESS_REMOTE_WRITE \ | IB_ACCESS_REMOTE_ATOMIC) static void rxe_mem_init(int access, struct rxe_mem *mem) { u32 lkey = mem->pelem.index << 8 | rxe_get_key(); u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0; if (mem->pelem.pool->type == RXE_TYPE_MR) { mem->ibmr.lkey = lkey; mem->ibmr.rkey = rkey; } mem->lkey = lkey; mem->rkey = rkey; mem->state = RXE_MEM_STATE_INVALID; mem->type = RXE_MEM_TYPE_NONE; mem->map_shift = ilog2(RXE_BUF_PER_MAP); } void rxe_mem_cleanup(void *arg) { struct rxe_mem *mem = arg; int i; if (mem->umem) ib_umem_release(mem->umem); if (mem->map) { for (i = 0; i < mem->num_map; i++) kfree(mem->map[i]); kfree(mem->map); } } static int rxe_mem_alloc(struct rxe_dev *rxe, struct rxe_mem *mem, int num_buf) { int i; int num_map; struct rxe_map **map = mem->map; num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP; mem->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL); if (!mem->map) goto err1; for (i = 0; i < num_map; i++) { mem->map[i] = kmalloc(sizeof(**map), GFP_KERNEL); if (!mem->map[i]) goto err2; } WARN_ON(!is_power_of_2(RXE_BUF_PER_MAP)); mem->map_shift = ilog2(RXE_BUF_PER_MAP); mem->map_mask = RXE_BUF_PER_MAP - 1; mem->num_buf = num_buf; mem->num_map = num_map; mem->max_buf = num_map * RXE_BUF_PER_MAP; return 0; err2: for (i--; i >= 0; i--) kfree(mem->map[i]); kfree(mem->map); err1: return -ENOMEM; } int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd, int access, struct rxe_mem *mem) { rxe_mem_init(access, mem); mem->pd = pd; mem->access = access; mem->state = RXE_MEM_STATE_VALID; mem->type = RXE_MEM_TYPE_DMA; return 0; } int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start, u64 length, u64 iova, int access, struct ib_udata *udata, struct rxe_mem *mem) { int entry; struct rxe_map **map; struct rxe_phys_buf *buf = NULL; struct ib_umem *umem; struct scatterlist *sg; int num_buf; void *vaddr; int err; umem = ib_umem_get(pd->ibpd.uobject->context, start, length, access, 0); if (IS_ERR(umem)) { pr_warn("err %d from rxe_umem_get\n", (int)PTR_ERR(umem)); err = -EINVAL; goto err1; } mem->umem = umem; num_buf = umem->nmap; rxe_mem_init(access, mem); err = rxe_mem_alloc(rxe, mem, num_buf); if (err) { pr_warn("err %d from rxe_mem_alloc\n", err); ib_umem_release(umem); goto err1; } WARN_ON(!is_power_of_2(umem->page_size)); mem->page_shift = ilog2(umem->page_size); mem->page_mask = umem->page_size - 1; num_buf = 0; map = mem->map; if (length > 0) { buf = map[0]->buf; for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { vaddr = page_address(sg_page(sg)); if (!vaddr) { pr_warn("null vaddr\n"); err = -ENOMEM; goto err1; } buf->addr = (uintptr_t)vaddr; buf->size = umem->page_size; num_buf++; buf++; if (num_buf >= RXE_BUF_PER_MAP) { map++; buf = map[0]->buf; num_buf = 0; } } } mem->pd = pd; mem->umem = umem; mem->access = access; mem->length = length; mem->iova = iova; mem->va = start; mem->offset = ib_umem_offset(umem); mem->state = RXE_MEM_STATE_VALID; mem->type = RXE_MEM_TYPE_MR; return 0; err1: return err; } int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd, int max_pages, struct rxe_mem *mem) { int err; rxe_mem_init(0, mem); /* In fastreg, we also set the rkey */ mem->ibmr.rkey = mem->ibmr.lkey; err = rxe_mem_alloc(rxe, mem, max_pages); if (err) goto err1; mem->pd = pd; mem->max_buf = max_pages; mem->state = RXE_MEM_STATE_FREE; mem->type = RXE_MEM_TYPE_MR; return 0; err1: return err; } static void lookup_iova( struct rxe_mem *mem, u64 iova, int *m_out, int *n_out, size_t *offset_out) { size_t offset = iova - mem->iova + mem->offset; int map_index; int buf_index; u64 length; if (likely(mem->page_shift)) { *offset_out = offset & mem->page_mask; offset >>= mem->page_shift; *n_out = offset & mem->map_mask; *m_out = offset >> mem->map_shift; } else { map_index = 0; buf_index = 0; length = mem->map[map_index]->buf[buf_index].size; while (offset >= length) { offset -= length; buf_index++; if (buf_index == RXE_BUF_PER_MAP) { map_index++; buf_index = 0; } length = mem->map[map_index]->buf[buf_index].size; } *m_out = map_index; *n_out = buf_index; *offset_out = offset; } } void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length) { size_t offset; int m, n; void *addr; if (mem->state != RXE_MEM_STATE_VALID) { pr_warn("mem not in valid state\n"); addr = NULL; goto out; } if (!mem->map) { addr = (void *)(uintptr_t)iova; goto out; } if (mem_check_range(mem, iova, length)) { pr_warn("range violation\n"); addr = NULL; goto out; } lookup_iova(mem, iova, &m, &n, &offset); if (offset + length > mem->map[m]->buf[n].size) { pr_warn("crosses page boundary\n"); addr = NULL; goto out; } addr = (void *)(uintptr_t)mem->map[m]->buf[n].addr + offset; out: return addr; } /* copy data from a range (vaddr, vaddr+length-1) to or from * a mem object starting at iova. Compute incremental value of * crc32 if crcp is not zero. caller must hold a reference to mem */ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length, enum copy_direction dir, u32 *crcp) { int err; int bytes; u8 *va; struct rxe_map **map; struct rxe_phys_buf *buf; int m; int i; size_t offset; u32 crc = crcp ? (*crcp) : 0; if (length == 0) return 0; if (mem->type == RXE_MEM_TYPE_DMA) { u8 *src, *dest; src = (dir == to_mem_obj) ? addr : ((void *)(uintptr_t)iova); dest = (dir == to_mem_obj) ? ((void *)(uintptr_t)iova) : addr; if (crcp) *crcp = crc32_le(*crcp, src, length); memcpy(dest, src, length); return 0; } WARN_ON(!mem->map); err = mem_check_range(mem, iova, length); if (err) { err = -EFAULT; goto err1; } lookup_iova(mem, iova, &m, &i, &offset); map = mem->map + m; buf = map[0]->buf + i; while (length > 0) { u8 *src, *dest; va = (u8 *)(uintptr_t)buf->addr + offset; src = (dir == to_mem_obj) ? addr : va; dest = (dir == to_mem_obj) ? va : addr; bytes = buf->size - offset; if (bytes > length) bytes = length; if (crcp) crc = crc32_le(crc, src, bytes); memcpy(dest, src, bytes); length -= bytes; addr += bytes; offset = 0; buf++; i++; if (i == RXE_BUF_PER_MAP) { i = 0; map++; buf = map[0]->buf; } } if (crcp) *crcp = crc; return 0; err1: return err; } /* copy data in or out of a wqe, i.e. sg list * under the control of a dma descriptor */ int copy_data( struct rxe_dev *rxe, struct rxe_pd *pd, int access, struct rxe_dma_info *dma, void *addr, int length, enum copy_direction dir, u32 *crcp) { int bytes; struct rxe_sge *sge = &dma->sge[dma->cur_sge]; int offset = dma->sge_offset; int resid = dma->resid; struct rxe_mem *mem = NULL; u64 iova; int err; if (length == 0) return 0; if (length > resid) { err = -EINVAL; goto err2; } if (sge->length && (offset < sge->length)) { mem = lookup_mem(pd, access, sge->lkey, lookup_local); if (!mem) { err = -EINVAL; goto err1; } } while (length > 0) { bytes = length; if (offset >= sge->length) { if (mem) { rxe_drop_ref(mem); mem = NULL; } sge++; dma->cur_sge++; offset = 0; if (dma->cur_sge >= dma->num_sge) { err = -ENOSPC; goto err2; } if (sge->length) { mem = lookup_mem(pd, access, sge->lkey, lookup_local); if (!mem) { err = -EINVAL; goto err1; } } else { continue; } } if (bytes > sge->length - offset) bytes = sge->length - offset; if (bytes > 0) { iova = sge->addr + offset; err = rxe_mem_copy(mem, iova, addr, bytes, dir, crcp); if (err) goto err2; offset += bytes; resid -= bytes; length -= bytes; addr += bytes; } } dma->sge_offset = offset; dma->resid = resid; if (mem) rxe_drop_ref(mem); return 0; err2: if (mem) rxe_drop_ref(mem); err1: return err; } int advance_dma_data(struct rxe_dma_info *dma, unsigned int length) { struct rxe_sge *sge = &dma->sge[dma->cur_sge]; int offset = dma->sge_offset; int resid = dma->resid; while (length) { unsigned int bytes; if (offset >= sge->length) { sge++; dma->cur_sge++; offset = 0; if (dma->cur_sge >= dma->num_sge) return -ENOSPC; } bytes = length; if (bytes > sge->length - offset) bytes = sge->length - offset; offset += bytes; resid -= bytes; length -= bytes; } dma->sge_offset = offset; dma->resid = resid; return 0; } /* (1) find the mem (mr or mw) corresponding to lkey/rkey * depending on lookup_type * (2) verify that the (qp) pd matches the mem pd * (3) verify that the mem can support the requested access * (4) verify that mem state is valid */ struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key, enum lookup_type type) { struct rxe_mem *mem; struct rxe_dev *rxe = to_rdev(pd->ibpd.device); int index = key >> 8; if (index >= RXE_MIN_MR_INDEX && index <= RXE_MAX_MR_INDEX) { mem = rxe_pool_get_index(&rxe->mr_pool, index); if (!mem) goto err1; } else { goto err1; } if ((type == lookup_local && mem->lkey != key) || (type == lookup_remote && mem->rkey != key)) goto err2; if (mem->pd != pd) goto err2; if (access && !(access & mem->access)) goto err2; if (mem->state != RXE_MEM_STATE_VALID) goto err2; return mem; err2: rxe_drop_ref(mem); err1: return NULL; } int rxe_mem_map_pages(struct rxe_dev *rxe, struct rxe_mem *mem, u64 *page, int num_pages, u64 iova) { int i; int num_buf; int err; struct rxe_map **map; struct rxe_phys_buf *buf; int page_size; if (num_pages > mem->max_buf) { err = -EINVAL; goto err1; } num_buf = 0; page_size = 1 << mem->page_shift; map = mem->map; buf = map[0]->buf; for (i = 0; i < num_pages; i++) { buf->addr = *page++; buf->size = page_size; buf++; num_buf++; if (num_buf == RXE_BUF_PER_MAP) { map++; buf = map[0]->buf; num_buf = 0; } } mem->iova = iova; mem->va = iova; mem->length = num_pages << mem->page_shift; mem->state = RXE_MEM_STATE_VALID; return 0; err1: return err; }
/* * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "rxe.h" #include "rxe_loc.h" /* * lfsr (linear feedback shift register) with period 255 */ static u8 rxe_get_key(void) { static u32 key = 1; key = key << 1; key |= (0 != (key & 0x100)) ^ (0 != (key & 0x10)) ^ (0 != (key & 0x80)) ^ (0 != (key & 0x40)); key &= 0xff; return key; } int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length) { switch (mem->type) { case RXE_MEM_TYPE_DMA: return 0; case RXE_MEM_TYPE_MR: case RXE_MEM_TYPE_FMR: if (iova < mem->iova || length > mem->length || iova > mem->iova + mem->length - length) return -EFAULT; return 0; default: return -EFAULT; } } #define IB_ACCESS_REMOTE (IB_ACCESS_REMOTE_READ \ | IB_ACCESS_REMOTE_WRITE \ | IB_ACCESS_REMOTE_ATOMIC) static void rxe_mem_init(int access, struct rxe_mem *mem) { u32 lkey = mem->pelem.index << 8 | rxe_get_key(); u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0; if (mem->pelem.pool->type == RXE_TYPE_MR) { mem->ibmr.lkey = lkey; mem->ibmr.rkey = rkey; } mem->lkey = lkey; mem->rkey = rkey; mem->state = RXE_MEM_STATE_INVALID; mem->type = RXE_MEM_TYPE_NONE; mem->map_shift = ilog2(RXE_BUF_PER_MAP); } void rxe_mem_cleanup(void *arg) { struct rxe_mem *mem = arg; int i; if (mem->umem) ib_umem_release(mem->umem); if (mem->map) { for (i = 0; i < mem->num_map; i++) kfree(mem->map[i]); kfree(mem->map); } } static int rxe_mem_alloc(struct rxe_dev *rxe, struct rxe_mem *mem, int num_buf) { int i; int num_map; struct rxe_map **map = mem->map; num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP; mem->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL); if (!mem->map) goto err1; for (i = 0; i < num_map; i++) { mem->map[i] = kmalloc(sizeof(**map), GFP_KERNEL); if (!mem->map[i]) goto err2; } WARN_ON(!is_power_of_2(RXE_BUF_PER_MAP)); mem->map_shift = ilog2(RXE_BUF_PER_MAP); mem->map_mask = RXE_BUF_PER_MAP - 1; mem->num_buf = num_buf; mem->num_map = num_map; mem->max_buf = num_map * RXE_BUF_PER_MAP; return 0; err2: for (i--; i >= 0; i--) kfree(mem->map[i]); kfree(mem->map); err1: return -ENOMEM; } int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd, int access, struct rxe_mem *mem) { rxe_mem_init(access, mem); mem->pd = pd; mem->access = access; mem->state = RXE_MEM_STATE_VALID; mem->type = RXE_MEM_TYPE_DMA; return 0; } int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start, u64 length, u64 iova, int access, struct ib_udata *udata, struct rxe_mem *mem) { int entry; struct rxe_map **map; struct rxe_phys_buf *buf = NULL; struct ib_umem *umem; struct scatterlist *sg; int num_buf; void *vaddr; int err; umem = ib_umem_get(pd->ibpd.uobject->context, start, length, access, 0); if (IS_ERR(umem)) { pr_warn("err %d from rxe_umem_get\n", (int)PTR_ERR(umem)); err = -EINVAL; goto err1; } mem->umem = umem; num_buf = umem->nmap; rxe_mem_init(access, mem); err = rxe_mem_alloc(rxe, mem, num_buf); if (err) { pr_warn("err %d from rxe_mem_alloc\n", err); ib_umem_release(umem); goto err1; } WARN_ON(!is_power_of_2(umem->page_size)); mem->page_shift = ilog2(umem->page_size); mem->page_mask = umem->page_size - 1; num_buf = 0; map = mem->map; if (length > 0) { buf = map[0]->buf; for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { vaddr = page_address(sg_page(sg)); if (!vaddr) { pr_warn("null vaddr\n"); err = -ENOMEM; goto err1; } buf->addr = (uintptr_t)vaddr; buf->size = umem->page_size; num_buf++; buf++; if (num_buf >= RXE_BUF_PER_MAP) { map++; buf = map[0]->buf; num_buf = 0; } } } mem->pd = pd; mem->umem = umem; mem->access = access; mem->length = length; mem->iova = iova; mem->va = start; mem->offset = ib_umem_offset(umem); mem->state = RXE_MEM_STATE_VALID; mem->type = RXE_MEM_TYPE_MR; return 0; err1: return err; } int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd, int max_pages, struct rxe_mem *mem) { int err; rxe_mem_init(0, mem); /* In fastreg, we also set the rkey */ mem->ibmr.rkey = mem->ibmr.lkey; err = rxe_mem_alloc(rxe, mem, max_pages); if (err) goto err1; mem->pd = pd; mem->max_buf = max_pages; mem->state = RXE_MEM_STATE_FREE; mem->type = RXE_MEM_TYPE_MR; return 0; err1: return err; } static void lookup_iova( struct rxe_mem *mem, u64 iova, int *m_out, int *n_out, size_t *offset_out) { size_t offset = iova - mem->iova + mem->offset; int map_index; int buf_index; u64 length; if (likely(mem->page_shift)) { *offset_out = offset & mem->page_mask; offset >>= mem->page_shift; *n_out = offset & mem->map_mask; *m_out = offset >> mem->map_shift; } else { map_index = 0; buf_index = 0; length = mem->map[map_index]->buf[buf_index].size; while (offset >= length) { offset -= length; buf_index++; if (buf_index == RXE_BUF_PER_MAP) { map_index++; buf_index = 0; } length = mem->map[map_index]->buf[buf_index].size; } *m_out = map_index; *n_out = buf_index; *offset_out = offset; } } void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length) { size_t offset; int m, n; void *addr; if (mem->state != RXE_MEM_STATE_VALID) { pr_warn("mem not in valid state\n"); addr = NULL; goto out; } if (!mem->map) { addr = (void *)(uintptr_t)iova; goto out; } if (mem_check_range(mem, iova, length)) { pr_warn("range violation\n"); addr = NULL; goto out; } lookup_iova(mem, iova, &m, &n, &offset); if (offset + length > mem->map[m]->buf[n].size) { pr_warn("crosses page boundary\n"); addr = NULL; goto out; } addr = (void *)(uintptr_t)mem->map[m]->buf[n].addr + offset; out: return addr; } /* copy data from a range (vaddr, vaddr+length-1) to or from * a mem object starting at iova. Compute incremental value of * crc32 if crcp is not zero. caller must hold a reference to mem */ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length, enum copy_direction dir, u32 *crcp) { int err; int bytes; u8 *va; struct rxe_map **map; struct rxe_phys_buf *buf; int m; int i; size_t offset; u32 crc = crcp ? (*crcp) : 0; if (length == 0) return 0; if (mem->type == RXE_MEM_TYPE_DMA) { u8 *src, *dest; src = (dir == to_mem_obj) ? addr : ((void *)(uintptr_t)iova); dest = (dir == to_mem_obj) ? ((void *)(uintptr_t)iova) : addr; if (crcp) *crcp = crc32_le(*crcp, src, length); memcpy(dest, src, length); return 0; } WARN_ON(!mem->map); err = mem_check_range(mem, iova, length); if (err) { err = -EFAULT; goto err1; } lookup_iova(mem, iova, &m, &i, &offset); map = mem->map + m; buf = map[0]->buf + i; while (length > 0) { u8 *src, *dest; va = (u8 *)(uintptr_t)buf->addr + offset; src = (dir == to_mem_obj) ? addr : va; dest = (dir == to_mem_obj) ? va : addr; bytes = buf->size - offset; if (bytes > length) bytes = length; if (crcp) crc = crc32_le(crc, src, bytes); memcpy(dest, src, bytes); length -= bytes; addr += bytes; offset = 0; buf++; i++; if (i == RXE_BUF_PER_MAP) { i = 0; map++; buf = map[0]->buf; } } if (crcp) *crcp = crc; return 0; err1: return err; } /* copy data in or out of a wqe, i.e. sg list * under the control of a dma descriptor */ int copy_data( struct rxe_dev *rxe, struct rxe_pd *pd, int access, struct rxe_dma_info *dma, void *addr, int length, enum copy_direction dir, u32 *crcp) { int bytes; struct rxe_sge *sge = &dma->sge[dma->cur_sge]; int offset = dma->sge_offset; int resid = dma->resid; struct rxe_mem *mem = NULL; u64 iova; int err; if (length == 0) return 0; if (length > resid) { err = -EINVAL; goto err2; } if (sge->length && (offset < sge->length)) { mem = lookup_mem(pd, access, sge->lkey, lookup_local); if (!mem) { err = -EINVAL; goto err1; } } while (length > 0) { bytes = length; if (offset >= sge->length) { if (mem) { rxe_drop_ref(mem); mem = NULL; } sge++; dma->cur_sge++; offset = 0; if (dma->cur_sge >= dma->num_sge) { err = -ENOSPC; goto err2; } if (sge->length) { mem = lookup_mem(pd, access, sge->lkey, lookup_local); if (!mem) { err = -EINVAL; goto err1; } } else { continue; } } if (bytes > sge->length - offset) bytes = sge->length - offset; if (bytes > 0) { iova = sge->addr + offset; err = rxe_mem_copy(mem, iova, addr, bytes, dir, crcp); if (err) goto err2; offset += bytes; resid -= bytes; length -= bytes; addr += bytes; } } dma->sge_offset = offset; dma->resid = resid; if (mem) rxe_drop_ref(mem); return 0; err2: if (mem) rxe_drop_ref(mem); err1: return err; } int advance_dma_data(struct rxe_dma_info *dma, unsigned int length) { struct rxe_sge *sge = &dma->sge[dma->cur_sge]; int offset = dma->sge_offset; int resid = dma->resid; while (length) { unsigned int bytes; if (offset >= sge->length) { sge++; dma->cur_sge++; offset = 0; if (dma->cur_sge >= dma->num_sge) return -ENOSPC; } bytes = length; if (bytes > sge->length - offset) bytes = sge->length - offset; offset += bytes; resid -= bytes; length -= bytes; } dma->sge_offset = offset; dma->resid = resid; return 0; } /* (1) find the mem (mr or mw) corresponding to lkey/rkey * depending on lookup_type * (2) verify that the (qp) pd matches the mem pd * (3) verify that the mem can support the requested access * (4) verify that mem state is valid */ struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key, enum lookup_type type) { struct rxe_mem *mem; struct rxe_dev *rxe = to_rdev(pd->ibpd.device); int index = key >> 8; if (index >= RXE_MIN_MR_INDEX && index <= RXE_MAX_MR_INDEX) { mem = rxe_pool_get_index(&rxe->mr_pool, index); if (!mem) goto err1; } else { goto err1; } if ((type == lookup_local && mem->lkey != key) || (type == lookup_remote && mem->rkey != key)) goto err2; if (mem->pd != pd) goto err2; if (access && !(access & mem->access)) goto err2; if (mem->state != RXE_MEM_STATE_VALID) goto err2; return mem; err2: rxe_drop_ref(mem); err1: return NULL; } int rxe_mem_map_pages(struct rxe_dev *rxe, struct rxe_mem *mem, u64 *page, int num_pages, u64 iova) { int i; int num_buf; int err; struct rxe_map **map; struct rxe_phys_buf *buf; int page_size; if (num_pages > mem->max_buf) { err = -EINVAL; goto err1; } num_buf = 0; page_size = 1 << mem->page_shift; map = mem->map; buf = map[0]->buf; for (i = 0; i < num_pages; i++) { buf->addr = *page++; buf->size = page_size; buf++; num_buf++; if (num_buf == RXE_BUF_PER_MAP) { map++; buf = map[0]->buf; num_buf = 0; } } mem->iova = iova; mem->va = iova; mem->length = num_pages << mem->page_shift; mem->state = RXE_MEM_STATE_VALID; return 0; err1: return err; }
int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length) { switch (mem->type) { case RXE_MEM_TYPE_DMA: return 0; case RXE_MEM_TYPE_MR: case RXE_MEM_TYPE_FMR: return ((iova < mem->iova) || ((iova + length) > (mem->iova + mem->length))) ? -EFAULT : 0; default: return -EFAULT; } }
int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length) { switch (mem->type) { case RXE_MEM_TYPE_DMA: return 0; case RXE_MEM_TYPE_MR: case RXE_MEM_TYPE_FMR: if (iova < mem->iova || length > mem->length || iova > mem->iova + mem->length - length) return -EFAULT; return 0; default: return -EFAULT; } }
{'added': [(62, '\t\tif (iova < mem->iova ||'), (63, '\t\t length > mem->length ||'), (64, '\t\t iova > mem->iova + mem->length - length)'), (65, '\t\t\treturn -EFAULT;'), (66, '\t\treturn 0;')], 'deleted': [(62, '\t\treturn ((iova < mem->iova) ||'), (63, '\t\t\t((iova + length) > (mem->iova + mem->length))) ?'), (64, '\t\t\t-EFAULT : 0;')]}
5
3
467
2,702
14
75
6
https://github.com/torvalds/linux
CVE-2016-8636
CWE-190